• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-13-stable/sys/dev/netmap/

Lines Matching refs:pq

198 static int	ptnet_drain_transmit_queue(struct ptnet_queue *pq,
227 static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget,
269 static inline void ptnet_kick(struct ptnet_queue *pq)
272 pq->stats.kicks ++;
274 bus_write_4(pq->sc->iomem, pq->kick, 0);
377 struct ptnet_queue *pq = sc->queues + i;
379 pq->sc = sc;
380 pq->kring_id = i;
381 pq->kick = PTNET_IO_KICK_BASE + 4 * i;
382 pq->atok = sc->csb_gh + i;
383 pq->ktoa = sc->csb_hg + i;
384 snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d",
386 mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF);
389 pq->kring_id -= num_tx_rings;
392 pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE,
393 M_DEVBUF, M_NOWAIT, &pq->lock);
394 if (pq->bufring == NULL) {
537 struct ptnet_queue *pq = sc->queues + i;
539 if (pq->taskq) {
540 taskqueue_drain(pq->taskq, &pq->task);
566 struct ptnet_queue *pq = sc->queues + i;
568 if (mtx_initialized(&pq->lock)) {
569 mtx_destroy(&pq->lock);
571 if (pq->bufring != NULL) {
572 buf_ring_free(pq->bufring, M_DEVBUF);
656 struct ptnet_queue *pq = sc->queues + i;
659 pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
661 if (pq->irq == NULL) {
671 struct ptnet_queue *pq = sc->queues + i;
677 err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE,
679 pq, &pq->cookie);
686 bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i);
688 bus_bind_intr(sc->dev, pq->irq, cpu_cur);
697 struct ptnet_queue *pq = sc->queues + i;
700 TASK_INIT(&pq->task, 0, ptnet_tx_task, pq);
702 NET_TASK_INIT(&pq->task, 0, ptnet_rx_task, pq);
704 pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT,
705 taskqueue_thread_enqueue, &pq->taskq);
706 taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d",
724 struct ptnet_queue *pq = sc->queues + i;
726 if (pq->taskq) {
727 taskqueue_free(pq->taskq);
728 pq->taskq = NULL;
731 if (pq->cookie) {
732 bus_teardown_intr(dev, pq->irq, pq->cookie);
733 pq->cookie = NULL;
736 if (pq->irq) {
737 bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq);
738 pq->irq = NULL;
792 struct ptnet_queue *pq;
803 pq = sc-> queues + i;
806 PTNET_Q_LOCK(pq);
807 pq->atok->appl_need_kick = 0;
808 PTNET_Q_UNLOCK(pq);
810 if (pq->taskq) {
811 taskqueue_drain(pq->taskq,
812 &pq->task);
819 pq = sc-> queues + i;
820 PTNET_Q_LOCK(pq);
821 pq->atok->appl_need_kick = 1;
822 PTNET_Q_UNLOCK(pq);
990 struct ptnet_queue *pq = sc->queues + i;
993 PTNET_Q_LOCK(pq);
994 if (pq->bufring) {
995 while ((m = buf_ring_dequeue_sc(pq->bufring))) {
999 PTNET_Q_UNLOCK(pq);
1029 struct ptnet_queue *pq = sc->queues + i;
1032 stats[idx].packets += pq->stats.packets;
1033 stats[idx].bytes += pq->stats.bytes;
1034 stats[idx].errors += pq->stats.errors;
1035 stats[idx].iqdrops += pq->stats.iqdrops;
1036 stats[idx].mcasts += pq->stats.mcasts;
1068 struct ptnet_queue *pq = sc->queues + i;
1069 struct ptnet_queue_stats cur = pq->stats;
1083 (cur.packets - pq->last_stats.packets),
1084 (cur.kicks - pq->last_stats.kicks),
1085 (cur.intrs - pq->last_stats.intrs));
1086 pq->last_stats = cur;
1182 struct ptnet_queue *pq;
1199 pq = sc->queues + i;
1200 pq->atok->appl_need_kick = 1;
1208 pq = sc->queues + i;
1209 pq->ktoa->kern_need_kick = 1;
1210 pq->atok->appl_need_kick =
1259 struct ptnet_queue *pq = sc->queues + kring->ring_id;
1262 notify = netmap_pt_guest_txsync(pq->atok, pq->ktoa, kring, flags);
1264 ptnet_kick(pq);
1274 struct ptnet_queue *pq = sc->rxqueues + kring->ring_id;
1277 notify = netmap_pt_guest_rxsync(pq->atok, pq->ktoa, kring, flags);
1279 ptnet_kick(pq);
1292 struct ptnet_queue *pq = sc->queues + i;
1293 pq->atok->appl_need_kick = onoff;
1300 struct ptnet_queue *pq = opaque;
1301 struct ptnet_softc *sc = pq->sc;
1303 DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id));
1305 pq->stats.intrs ++;
1308 if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) {
1316 taskqueue_enqueue(pq->taskq, &pq->task);
1322 struct ptnet_queue *pq = opaque;
1323 struct ptnet_softc *sc = pq->sc;
1326 DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id));
1328 pq->stats.intrs ++;
1331 if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) {
1338 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
1357 ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring,
1361 struct nm_csb_atok *atok = pq->atok;
1362 struct nm_csb_ktoa *ktoa = pq->ktoa;
1377 ptnet_kick(pq);
1388 ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
1391 struct ptnet_softc *sc = pq->sc;
1410 if (!PTNET_Q_TRYLOCK(pq)) {
1414 taskqueue_enqueue(pq->taskq, &pq->task);
1421 PTNET_Q_UNLOCK(pq);
1426 atok = pq->atok;
1427 ktoa = pq->ktoa;
1428 kring = na->tx_rings[pq->kring_id];
1467 mhead = drbr_peek(ifp, pq->bufring);
1494 pq->stats.errors ++;
1495 drbr_advance(ifp, pq->bufring);
1549 drbr_advance(ifp, pq->bufring);
1554 pq->stats.packets ++;
1555 pq->stats.bytes += mhead->m_pkthdr.len;
1557 pq->stats.mcasts ++;
1564 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM);
1570 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM);
1575 drbr_inuse(ifp, pq->bufring)));
1576 taskqueue_enqueue(pq->taskq, &pq->task);
1579 PTNET_Q_UNLOCK(pq);
1588 struct ptnet_queue *pq;
1611 pq = sc->queues + queue_idx;
1613 err = drbr_enqueue(ifp, pq->bufring, m);
1618 pq->stats.errors ++;
1628 err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true);
1689 ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched)
1691 struct ptnet_softc *sc = pq->sc;
1693 struct nm_csb_atok *atok = pq->atok;
1694 struct nm_csb_ktoa *ktoa = pq->ktoa;
1696 struct netmap_kring *kring = na->rx_rings[pq->kring_id];
1704 PTNET_Q_LOCK(pq);
1763 pq->stats.iqdrops ++;
1784 pq->stats.errors ++;
1811 pq->stats.errors ++;
1813 taskqueue_enqueue(pq->taskq,
1814 &pq->task);
1833 pq->stats.iqdrops ++;
1847 mhead->m_pkthdr.flowid = pq->kring_id;
1868 pq->stats.iqdrops ++;
1878 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ);
1883 pq->stats.packets ++;
1884 pq->stats.bytes += mhead->m_pkthdr.len;
1886 PTNET_Q_UNLOCK(pq);
1888 PTNET_Q_LOCK(pq);
1890 * updated under pq lock by ptnet_ring_update().
1905 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ);
1914 taskqueue_enqueue(pq->taskq, &pq->task);
1917 PTNET_Q_UNLOCK(pq);
1925 struct ptnet_queue *pq = context;
1927 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id));
1928 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
1934 struct ptnet_queue *pq = context;
1936 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id));
1937 ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true);
1960 struct ptnet_queue *pq = sc->queues + i;
1970 rcnt += ptnet_drain_transmit_queue(pq,
1973 rcnt += ptnet_rx_eof(pq, queue_budget,