Lines Matching refs:que

67 static inline u32 ixl_get_tx_head(struct ixl_queue *que);
164 struct ixl_queue *que;
190 que = &vsi->queues[i];
191 txr = &que->txr;
200 taskqueue_enqueue(que->tq, &que->tx_task);
208 struct ixl_queue *que = txr->que;
209 struct ixl_vsi *vsi = que->vsi;
220 if ((err = ixl_xmit(que, &next)) != 0) {
235 ixl_txeof(que);
246 struct ixl_queue *que = arg;
247 struct tx_ring *txr = &que->txr;
248 struct ixl_vsi *vsi = que->vsi;
266 struct ixl_queue *que = &vsi->queues[i];
267 struct tx_ring *txr = &que->txr;
315 ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
317 struct ixl_vsi *vsi = que->vsi;
319 struct tx_ring *txr = &que->txr;
369 que->mbuf_defrag_failed++;
381 que->tx_dmamap_failed++;
387 que->tx_dmamap_failed++;
403 error = ixl_tx_setup_offload(que, m_head, &cmd, &off);
434 if (++i == que->num_tx_desc)
486 ixl_allocate_tx_data(struct ixl_queue *que)
488 struct tx_ring *txr = &que->txr;
489 struct ixl_vsi *vsi = que->vsi;
532 que->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
540 for (i = 0; i < que->num_tx_desc; i++, buf++) {
578 ixl_init_tx_ring(struct ixl_queue *que)
581 struct netmap_adapter *na = NA(que->vsi->ifp);
584 struct tx_ring *txr = &que->txr;
595 slot = netmap_reset(na, NR_TX, que->me, 0);
599 (sizeof(struct i40e_tx_desc)) * que->num_tx_desc);
610 for (int i = 0; i < que->num_tx_desc; i++, buf++) {
627 int si = netmap_idx_n2k(na->tx_rings[que->me], i);
636 txr->avail = que->num_tx_desc;
650 ixl_free_que_tx(struct ixl_queue *que)
652 struct tx_ring *txr = &que->txr;
655 INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
657 for (int i = 0; i < que->num_tx_desc; i++) {
681 INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me);
692 ixl_tx_setup_offload(struct ixl_queue *que,
710 tso = ixl_tso_setup(que, mp);
712 ++que->tso;
798 ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp)
800 struct tx_ring *txr = &que->txr;
881 que->mss_too_small++;
896 if (++idx == que->num_tx_desc)
910 ixl_get_tx_head(struct ixl_queue *que)
912 struct tx_ring *txr = &que->txr;
913 void *head = &txr->base[que->num_tx_desc];
924 ixl_txeof_hwb(struct ixl_queue *que)
926 struct tx_ring *txr = &que->txr;
935 if (netmap_tx_irq(que->vsi->ifp, que->me))
940 if (txr->avail == que->num_tx_desc) {
958 head = ixl_get_tx_head(que);
966 if (++last == que->num_tx_desc) last = 0;
995 if (++first == que->num_tx_desc)
1009 if (++last == que->num_tx_desc) last = 0;
1022 if (txr->avail == que->num_tx_desc) {
1043 ixl_txeof_dwb(struct ixl_queue *que)
1045 struct tx_ring *txr = &que->txr;
1054 if (txr->avail == que->num_tx_desc) {
1084 if (++last == que->num_tx_desc)
1117 if (++first == que->num_tx_desc)
1134 if (++last == que->num_tx_desc) last = 0;
1148 if (txr->avail == que->num_tx_desc) {
1157 ixl_txeof(struct ixl_queue *que)
1159 struct ixl_vsi *vsi = que->vsi;
1161 return (vsi->enable_head_writeback) ? ixl_txeof_hwb(que)
1162 : ixl_txeof_dwb(que);
1176 ixl_refresh_mbufs(struct ixl_queue *que, int limit)
1178 struct ixl_vsi *vsi = que->vsi;
1179 struct rx_ring *rxr = &que->rxr;
1189 if (++j == que->num_rx_desc)
1255 if (++j == que->num_rx_desc)
1274 ixl_allocate_rx_data(struct ixl_queue *que)
1276 struct rx_ring *rxr = &que->rxr;
1277 struct ixl_vsi *vsi = que->vsi;
1314 bsize = sizeof(struct ixl_rx_buf) * que->num_rx_desc;
1323 for (i = 0; i < que->num_rx_desc; i++) {
1365 ixl_init_rx_ring(struct ixl_queue *que)
1367 struct rx_ring *rxr = &que->rxr;
1368 struct ixl_vsi *vsi = que->vsi;
1377 struct netmap_adapter *na = NA(que->vsi->ifp);
1384 slot = netmap_reset(na, NR_RX, que->me, 0);
1387 rsize = roundup2(que->num_rx_desc *
1391 for (int i = 0; i < que->num_rx_desc; i++) {
1415 for (int j = 0; j != que->num_rx_desc; ++j) {
1428 int sj = netmap_idx_n2k(na->rx_rings[que->me], j);
1500 wr32(vsi->hw, rxr->tail, que->num_rx_desc - 1);
1510 if_printf(ifp, "queue %d: LRO Initialization failed!\n", que->me);
1513 INIT_DBG_IF(ifp, "queue %d: RX Soft LRO Initialized", que->me);
1534 ixl_free_que_rx(struct ixl_queue *que)
1536 struct rx_ring *rxr = &que->rxr;
1541 for (int i = 0; i < que->num_rx_desc; i++) {
1601 KASSERT(i < rxr->que->num_rx_desc, ("Descriptor index must be less than que->num_rx_desc"));
1701 ixl_rxeof(struct ixl_queue *que, int count)
1703 struct ixl_vsi *vsi = que->vsi;
1704 struct rx_ring *rxr = &que->rxr;
1716 if (netmap_rx_irq(ifp, que->me, &count)) {
1793 if (nextp == que->num_rx_desc)
1906 sendmp->m_pkthdr.flowid = que->msix;
1915 if (++i == que->num_rx_desc)
1931 ixl_refresh_mbufs(que, i);
1937 if (ixl_rx_unrefreshed(que))
1938 ixl_refresh_mbufs(que, i);
2102 snprintf(queue_namebuf, IXL_QUEUE_NAME_LEN, "que%d", q);
2231 struct ixl_queue *que;
2235 que = ((struct ixl_queue *)oidp->oid_arg1);
2236 if (!que) return 0;
2238 val = rd32(que->vsi->hw, que->txr.tail);
2253 struct ixl_queue *que;
2257 que = ((struct ixl_queue *)oidp->oid_arg1);
2258 if (!que) return 0;
2260 val = rd32(que->vsi->hw, que->rxr.tail);
2295 struct ixl_queue *que = vsi->queues;
2301 for (int i = 0; i < vsi->num_queues; i++, que++) {
2302 txr = &que->txr;
2322 "appears to be hung!\n", que->me);