Lines Matching refs:wq

142 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
143 static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
146 struct oce_wq *wq);
562 struct oce_wq *wq = NULL;
569 wq = sc->wq[queue_index];
571 LOCK(&wq->tx_lock);
572 status = oce_multiq_transmit(ifp, m, wq);
573 UNLOCK(&wq->tx_lock);
588 while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
869 struct oce_wq *wq = sc->wq[wq_index];
908 pd = &wq->pckts[wq->pkt_desc_head];
910 rc = bus_dmamap_load_mbuf_sg(wq->tag,
920 if (num_wqes >= RING_NUM_FREE(wq->ring)) {
921 bus_dmamap_unload(wq->tag, pd->map);
924 atomic_store_rel_int(&wq->pkt_desc_head,
925 (wq->pkt_desc_head + 1) % \
927 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
931 RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
963 RING_PUT(wq->ring, 1);
964 atomic_add_int(&wq->ring->num_used, 1);
968 RING_GET_PRODUCER_ITEM_VA(wq->ring,
974 pd->wqe_idx = wq->ring->pidx;
975 RING_PUT(wq->ring, 1);
976 atomic_add_int(&wq->ring->num_used, 1);
980 RING_GET_PRODUCER_ITEM_VA(wq->ring,
986 pd->wqe_idx = wq->ring->pidx;
987 RING_PUT(wq->ring, 1);
988 atomic_add_int(&wq->ring->num_used, 1);
993 wq->tx_stats.tx_reqs++;
994 wq->tx_stats.tx_wrbs += num_wqes;
995 wq->tx_stats.tx_bytes += m->m_pkthdr.len;
996 wq->tx_stats.tx_pkts++;
998 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
1000 reg_value = (num_wqes << 16) | wq->wq_id;
1001 OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
1029 oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
1032 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
1035 pd = &wq->pckts[wq->pkt_desc_tail];
1036 atomic_store_rel_int(&wq->pkt_desc_tail,
1037 (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
1038 atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
1039 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1040 bus_dmamap_unload(wq->tag, pd->map);
1048 if (wq->ring->num_used < (wq->ring->num_items / 2)) {
1050 oce_tx_restart(sc, wq);
1057 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1064 if (!drbr_empty(sc->ifp, wq->br))
1068 taskqueue_enqueue(taskqueue_swi, &wq->txtask);
1145 struct oce_wq *wq = arg;
1146 POCE_SOFTC sc = wq->parent;
1151 LOCK(&wq->tx_lock);
1152 rc = oce_multiq_transmit(ifp, NULL, wq);
1155 "TX[%d] restart failed\n", wq->queue_index);
1157 UNLOCK(&wq->tx_lock);
1185 LOCK(&sc->wq[def_q]->tx_lock);
1187 UNLOCK(&sc->wq[def_q]->tx_lock);
1190 sc->wq[def_q]->tx_stats.tx_stops ++;
1210 struct oce_wq *wq = (struct oce_wq *)arg;
1211 POCE_SOFTC sc = wq->parent;
1212 struct oce_cq *cq = wq->cq;
1222 wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1223 if (wq->ring->cidx >= wq->ring->num_items)
1224 wq->ring->cidx -= wq->ring->num_items;
1226 oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1227 wq->tx_stats.tx_compl++;
1245 oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1252 br = wq->br;
1253 queue_index = wq->queue_index;
1272 wq->tx_stats.tx_stops ++;
2031 oce_tx_restart(sc, sc->wq[i]);
2050 struct oce_wq *wq;
2058 for_all_wq_queues(sc, wq, i) {
2059 if (wq->ring->num_used) {
2093 for_all_wq_queues(sc, wq, i)
2094 oce_drain_wq_cq(wq);
2111 struct oce_wq *wq;
2126 for_all_wq_queues(sc, wq, i) {
2127 rc = oce_start_wq(wq);