Lines Matching defs:nskb

917 	struct sk_buff *skb = *pskb, *nskb;
937 nskb = napi_build_skb(data, truesize);
938 if (!nskb) {
943 skb_reserve(nskb, headroom);
944 skb_copy_header(nskb, skb);
945 skb_mark_for_recycle(nskb);
947 err = skb_copy_bits(skb, 0, nskb->data, size);
949 consume_skb(nskb);
952 skb_put(nskb, size);
954 head_off = skb_headroom(nskb) - skb_headroom(skb);
955 skb_headers_offset_update(nskb, head_off);
968 consume_skb(nskb);
972 skb_add_rx_frag(nskb, i, page, page_off, size, truesize);
976 consume_skb(nskb);
985 *pskb = nskb;
1901 static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
1905 if (skb_zcopy(nskb)) {
1906 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */
1911 if (skb_uarg(nskb) == skb_uarg(orig))
1913 if (skb_copy_ubufs(nskb, GFP_ATOMIC))
1916 skb_zcopy_set(nskb, skb_uarg(orig), NULL);
2402 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2404 if (unlikely(!nskb))
2408 skb_set_owner_w(nskb, sk);
2410 skb = nskb;
4497 struct sk_buff *nskb, *tmp;
4510 nskb = list_skb;
4514 delta_truesize += nskb->truesize;
4515 if (skb_shared(nskb)) {
4516 tmp = skb_clone(nskb, GFP_ATOMIC);
4518 consume_skb(nskb);
4519 nskb = tmp;
4520 err = skb_unclone(nskb, GFP_ATOMIC);
4527 skb->next = nskb;
4529 tail->next = nskb;
4532 nskb->next = list_skb;
4536 tail = nskb;
4538 delta_len += nskb->len;
4540 skb_push(nskb, -skb_network_offset(nskb) + offset);
4542 skb_release_head_state(nskb);
4543 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb);
4544 __copy_skb_header(nskb, skb);
4546 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
4547 nskb->transport_header += len_diff;
4549 nskb->data - tnl_hlen,
4552 if (skb_needs_linearize(nskb, features) &&
4553 __skb_linearize(nskb))
4694 struct sk_buff *nskb;
4713 nskb = skb_clone(list_skb, GFP_ATOMIC);
4714 if (unlikely(!nskb))
4737 if (unlikely(pskb_trim(nskb, len))) {
4738 kfree_skb(nskb);
4742 hsize = skb_end_offset(nskb);
4743 if (skb_cow_head(nskb, doffset + headroom)) {
4744 kfree_skb(nskb);
4748 nskb->truesize += skb_end_offset(nskb) - hsize;
4749 skb_release_head_state(nskb);
4750 __skb_push(nskb, doffset);
4757 nskb = __alloc_skb(hsize + doffset + headroom,
4761 if (unlikely(!nskb))
4764 skb_reserve(nskb, headroom);
4765 __skb_put(nskb, doffset);
4769 tail->next = nskb;
4771 segs = nskb;
4772 tail = nskb;
4774 __copy_skb_header(nskb, head_skb);
4776 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
4777 skb_reset_mac_len(nskb);
4780 nskb->data - tnl_hlen,
4783 if (nskb->len == len + doffset)
4788 if (!nskb->remcsum_offload)
4789 nskb->ip_summed = CHECKSUM_NONE;
4790 SKB_GSO_CB(nskb)->csum =
4792 skb_put(nskb,
4795 SKB_GSO_CB(nskb)->csum_start =
4796 skb_headroom(nskb) + doffset;
4798 if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len))
4804 nskb_frag = skb_shinfo(nskb)->frags;
4807 skb_put(nskb, hsize), hsize);
4809 skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags &
4812 if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
4818 skb_zerocopy_clone(nskb, list_skb,
4839 if (unlikely(skb_shinfo(nskb)->nr_frags >=
4857 skb_shinfo(nskb)->nr_frags++;
4872 nskb->data_len = len - hsize;
4873 nskb->len += nskb->data_len;
4874 nskb->truesize += nskb->data_len;
4878 if (skb_has_shared_frag(nskb) &&
4879 __skb_linearize(nskb))
4882 if (!nskb->remcsum_offload)
4883 nskb->ip_summed = CHECKSUM_NONE;
4884 SKB_GSO_CB(nskb)->csum =
4885 skb_checksum(nskb, doffset,
4886 nskb->len - doffset, 0);
4887 SKB_GSO_CB(nskb)->csum_start =
4888 skb_headroom(nskb) + doffset;