Lines Matching refs:rq

165 static void oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
166 static void oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq);
169 static void oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2);
170 static void oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m);
1567 oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m)
1569 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1576 if (rq->ring->cidx == rq->ring->pidx) {
1581 pd = &rq->pckts[rq->ring->cidx];
1583 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1584 bus_dmamap_unload(rq->tag, pd->map);
1585 RING_GET(rq->ring, 1);
1586 rq->pending--;
1588 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1595 if(rq->islro)
1601 if(rq->islro)
1610 if(rq->islro) {
1632 oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2)
1634 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1661 cq_info.num_frags = cq_info.pkt_size / rq->cfg.frag_size;
1662 if(cq_info.pkt_size % rq->cfg.frag_size)
1665 oce_rx_mbuf_chain(rq, &cq_info, &m);
1677 if (rq->queue_index)
1678 m->m_pkthdr.flowid = (rq->queue_index - 1);
1680 m->m_pkthdr.flowid = rq->queue_index;
1705 rq->rx_stats.rx_pkts++;
1706 rq->rx_stats.rx_bytes += cq_info.pkt_size;
1707 rq->rx_stats.rx_frags += cq_info.num_frags;
1708 rq->rx_stats.rx_ucast_pkts++;
1714 oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1716 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1729 oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1734 oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1750 oce_rx_mbuf_chain(rq, &cq_info, &m);
1755 if (rq->queue_index)
1756 m->m_pkthdr.flowid = (rq->queue_index - 1);
1758 m->m_pkthdr.flowid = rq->queue_index;
1786 (rq->lro.lro_cnt != 0)) {
1788 if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1789 rq->lro_pkts_queued ++;
1801 rq->rx_stats.rx_pkts++;
1802 rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1803 rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1805 rq->rx_stats.rx_mcast_pkts++;
1807 rq->rx_stats.rx_ucast_pkts++;
1815 oce_discard_rx_comp(struct oce_rq *rq, int num_frags)
1819 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1822 if (rq->ring->cidx == rq->ring->pidx) {
1827 pd = &rq->pckts[rq->ring->cidx];
1828 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1829 bus_dmamap_unload(rq->tag, pd->map);
1835 RING_GET(rq->ring, 1);
1836 rq->pending--;
1878 oce_rx_flush_lro(struct oce_rq *rq)
1880 struct lro_ctrl *lro = &rq->lro;
1881 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1887 rq->lro_pkts_queued = 0;
1900 lro = &sc->rq[i]->lro;
1920 lro = &sc->rq[i]->lro;
1928 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1930 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1942 in = (rq->ring->pidx + 1) % OCE_RQ_PACKET_ARRAY_SIZE;
1944 pd = &rq->pckts[rq->ring->pidx];
1952 pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = rq->cfg.frag_size;
1954 rc = bus_dmamap_load_mbuf_sg(rq->tag,
1969 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1971 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1975 RING_PUT(rq->ring, 1);
1977 rq->pending++;
1983 rxdb_reg.bits.qid = rq->rq_id;
1984 if(rq->islro) {
1985 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1994 rxdb_reg.bits.qid = rq->rq_id;
1996 if(rq->islro) {
1997 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
2010 oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq)
2013 oce_arm_cq(sc, rq->cq->cq_id, num_cqes, FALSE);
2015 if((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) > 1)
2016 oce_alloc_rx_bufs(rq, ((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) - 1));
2018 if ((OCE_RQ_PACKET_ARRAY_SIZE -1 - rq->pending) > 64)
2019 oce_alloc_rx_bufs(rq, 64);
2029 struct oce_rq *rq = (struct oce_rq *)arg;
2030 struct oce_cq *cq = rq->cq;
2031 POCE_SOFTC sc = rq->parent;
2036 LOCK(&rq->rx_lock);
2041 /* we should not get singleton cqe after cqe1 on same rq */
2042 if(rq->cqe_firstpart != NULL) {
2047 rq->rx_stats.rxcp_err++;
2050 oce_rx_lro(rq, cqe, NULL);
2051 rq->rx_stats.rx_compl++;
2058 /* we should not get cqe1 after cqe1 on same rq */
2059 if(rq->cqe_firstpart != NULL) {
2063 rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
2068 rq->rx_stats.rxcp_err++;
2072 if(rq->cqe_firstpart == NULL) {
2076 oce_rx_lro(rq, (struct nic_hwlro_singleton_cqe *)rq->cqe_firstpart, cqe2);
2078 rq->rx_stats.rx_compl++;
2079 rq->cqe_firstpart->valid = 0;
2081 rq->cqe_firstpart = NULL;
2092 oce_check_rx_bufs(sc, num_cqes, rq);
2094 UNLOCK(&rq->rx_lock);
2102 struct oce_rq *rq = (struct oce_rq *)arg;
2103 struct oce_cq *cq = rq->cq;
2104 POCE_SOFTC sc = rq->parent;
2108 if(rq->islro) {
2112 LOCK(&rq->rx_lock);
2120 oce_rx(rq, cqe);
2122 rq->rx_stats.rxcp_err++;
2125 oce_rx(rq, cqe);
2127 rq->rx_stats.rx_compl++;
2131 if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
2132 oce_rx_flush_lro(rq);
2148 oce_rx_flush_lro(rq);
2151 oce_check_rx_bufs(sc, num_cqes, rq);
2152 UNLOCK(&rq->rx_lock);
2383 struct oce_rq *rq;
2399 rq = sc->rq[0];
2400 rxpkts = rq->rx_stats.rx_pkts;
2404 rq = sc->rq[i + 1];
2405 rxpkts += rq->rx_stats.rx_pkts;
2588 struct oce_rq *rq;
2617 for_all_rq_queues(sc, rq, i)
2618 oce_drain_rq_cq(rq);
2636 struct oce_rq *rq;
2646 for_all_rq_queues(sc, rq, i) {
2647 rc = oce_start_rq(rq);