Lines Matching defs:fl

559 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
561 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
653 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
678 if (q->fl[i].desc) {
680 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
682 free_rx_bufs(pdev, &q->fl[i]);
683 kfree(q->fl[i].sdesc);
685 q->fl[i].size *
686 sizeof(struct rx_desc), q->fl[i].desc,
687 q->fl[i].phys_addr);
729 qs->fl[0].cntxt_id = 2 * id;
730 qs->fl[1].cntxt_id = 2 * id + 1;
767 * @fl: the SGE free list holding the packet
779 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
783 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
786 fl->credits--;
802 recycle_rx_buf(adap, fl, fl->cidx);
806 if (unlikely(fl->credits < drop_thres) &&
807 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
813 fl->buf_size, DMA_FROM_DEVICE);
816 __refill_fl(adap, fl);
823 * @fl: the SGE free list holding the packet
839 static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
844 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
861 fl->credits--;
862 recycle_rx_buf(adap, fl, fl->cidx);
867 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
884 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
886 fl->alloc_size, DMA_FROM_DEVICE);
905 fl->credits--;
2127 * @fl: the free list containing the page chunk to add
2135 struct sge_fl *fl, int len, int complete)
2137 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2150 fl->credits--;
2154 fl->buf_size - SGE_PG_RSVD, DMA_FROM_DEVICE);
2157 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
2159 fl->alloc_size, DMA_FROM_DEVICE);
2369 struct sge_fl *fl;
2373 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2374 if (fl->use_pages) {
2375 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2378 __refill_fl(adap, fl);
2380 lro_add_page(adap, qs, fl,
2386 skb = get_packet_pg(adap, fl, q,
2392 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2401 if (++fl->cidx == fl->size)
2402 fl->cidx = 0;
2979 if (qs->fl[0].credits < qs->fl[0].size)
2980 __refill_fl(adap, &qs->fl[0]);
2981 if (qs->fl[1].credits < qs->fl[1].size)
2982 __refill_fl(adap, &qs->fl[1]);
3033 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
3036 &q->fl[0].phys_addr, &q->fl[0].sdesc);
3037 if (!q->fl[0].desc)
3040 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
3043 &q->fl[1].phys_addr, &q->fl[1].sdesc);
3044 if (!q->fl[1].desc)
3076 q->fl[0].gen = q->fl[1].gen = 1;
3077 q->fl[0].size = p->fl_size;
3078 q->fl[1].size = p->jumbo_size;
3089 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
3091 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
3094 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
3096 q->fl[1].buf_size = is_offload(adapter) ?
3101 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
3102 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
3103 q->fl[0].order = FL0_PG_ORDER;
3104 q->fl[1].order = FL1_PG_ORDER;
3105 q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
3106 q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
3113 q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
3118 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
3119 q->fl[i].phys_addr, q->fl[i].size,
3120 q->fl[i].buf_size - SGE_PG_RSVD,
3159 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3166 if (avail < q->fl[0].size)
3170 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
3172 if (avail < q->fl[1].size)