Lines Matching defs:gl

219 cxgbit_copy_frags(struct sk_buff *skb, const struct pkt_gl *gl,
226 __skb_fill_page_desc(skb, skb_frag_idx, gl->frags[0].page,
227 gl->frags[0].offset + offset,
228 gl->frags[0].size - offset);
229 for (i = 1; i < gl->nfrags; i++)
231 gl->frags[i].page,
232 gl->frags[i].offset,
233 gl->frags[i].size);
235 skb_shinfo(skb)->nr_frags += gl->nfrags;
238 get_page(gl->frags[gl->nfrags - 1].page);
242 cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl)
250 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)gl->va;
256 pdu_cb->hdr = gl->va + offset;
260 if (unlikely(gl->nfrags > 1))
265 struct cpl_iscsi_data *cpl = (struct cpl_iscsi_data *)gl->va;
272 pdu_cb->nr_dfrags = gl->nfrags;
278 cpl = (struct cpl_rx_iscsi_cmp *)gl->va;
282 pdu_cb->hdr = gl->va + offset;
288 if (unlikely(gl->nfrags > 1))
306 cxgbit_copy_frags(skb, gl, offset);
308 pdu_cb->frags += gl->nfrags;
316 cxgbit_lro_init_skb(struct cxgbit_sock *csk, u8 op, const struct pkt_gl *gl,
380 const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr,
400 skb = cxgbit_lro_init_skb(csk, op, gl, rsp, napi);
413 if ((gl && (((skb_shinfo(skb)->nr_frags + gl->nfrags) >
420 if (gl)
421 cxgbit_lro_add_packet_gl(skb, op, gl);
435 const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr,
462 rpl = gl ? (struct cpl_tx_data *)gl->va :
474 if (!gl) {
490 if (unlikely(op != *(u8 *)gl->va)) {
492 gl->va, be64_to_cpu(*rsp),
493 get_unaligned_be64(gl->va),
494 gl->tot_len);
500 if (!cxgbit_lro_receive(csk, op, rsp, gl, lro_mgr,
506 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);