Lines Matching defs:kring

287  *                kring->nm_sync() == DEVICE_netmap_txsync()
293 * kring->nm_sync() == DEVICE_netmap_rxsync()
302 * kring->nm_sync() == netmap_rxsync_from_host_compat
306 * kring->nm_sync() == netmap_txsync_to_host_compat
691 /* kring->nm_sync callback for the host tx ring */
693 netmap_txsync_to_host_compat(struct netmap_kring *kring, int flags)
696 netmap_txsync_to_host(kring->na);
700 /* kring->nm_sync callback for the host rx ring */
702 netmap_rxsync_from_host_compat(struct netmap_kring *kring, int flags)
705 netmap_rxsync_from_host(kring->na, NULL, NULL);
719 * | | host tx kring
725 * | | host rx kring
740 struct netmap_kring *kring;
758 * but better be explicit on important kring fields.
762 kring = &na->tx_rings[i];
763 bzero(kring, sizeof(*kring));
764 kring->na = na;
765 kring->ring_id = i;
766 kring->nkr_num_slots = ndesc;
768 kring->nm_sync = na->nm_txsync;
770 kring->nm_sync = netmap_txsync_to_host_compat;
775 kring->rhead = kring->rcur = kring->nr_hwcur = 0;
776 kring->rtail = kring->nr_hwtail = ndesc - 1;
777 snprintf(kring->name, sizeof(kring->name) - 1, "%s TX%d", na->name, i);
779 kring->name, kring->rhead, kring->rcur, kring->rtail);
780 mtx_init(&kring->q_lock, "nm_txq_lock", NULL, MTX_DEF);
781 init_waitqueue_head(&kring->si);
786 kring = &na->rx_rings[i];
787 bzero(kring, sizeof(*kring));
788 kring->na = na;
789 kring->ring_id = i;
790 kring->nkr_num_slots = ndesc;
792 kring->nm_sync = na->nm_rxsync;
794 kring->nm_sync = netmap_rxsync_from_host_compat;
796 kring->rhead = kring->rcur = kring->nr_hwcur = 0;
797 kring->rtail = kring->nr_hwtail = 0;
798 snprintf(kring->name, sizeof(kring->name) - 1, "%s RX%d", na->name, i);
800 kring->name, kring->rhead, kring->rcur, kring->rtail);
801 mtx_init(&kring->q_lock, "nm_rxq_lock", NULL, MTX_DEF);
802 init_waitqueue_head(&kring->si);
831 struct netmap_kring *kring = na->tx_rings;
834 for ( ; kring != na->tailroom; kring++) {
835 mtx_destroy(&kring->q_lock);
836 netmap_knlist_destroy(&kring->si);
1113 * kring->nr_kcur and ring->cur with NS_FORWARD still set are moved
1148 netmap_grab_packets(struct netmap_kring *kring, struct mbq *q, int force)
1150 u_int const lim = kring->nkr_num_slots - 1;
1151 u_int const head = kring->ring->head;
1153 struct netmap_adapter *na = kring->na;
1155 for (n = kring->nr_hwcur; n != head; n = nm_next(n, lim)) {
1157 struct netmap_slot *slot = &kring->ring->slot[n];
1178 * kring->nr_hwcur and kring->rhead
1179 * Called under kring->rx_queue.lock on the sw rx ring,
1184 struct netmap_kring *kring = &na->rx_rings[na->num_rx_rings];
1185 struct netmap_slot *rxslot = kring->ring->slot;
1186 u_int i, rxcur = kring->nr_hwcur;
1187 u_int const head = kring->rhead;
1188 u_int const src_lim = kring->nkr_num_slots - 1;
1197 /* XXX do we trust ring or kring->rcur,rtail ? */
1237 struct netmap_kring *kring = &na->tx_rings[na->num_tx_rings];
1238 struct netmap_ring *ring = kring->ring;
1239 u_int const lim = kring->nkr_num_slots - 1;
1240 u_int const head = kring->rhead;
1250 netmap_grab_packets(kring, &q, 1 /* force */);
1252 kring->nr_hwcur = head;
1253 kring->nr_hwtail = head + lim;
1254 if (kring->nr_hwtail > lim)
1255 kring->nr_hwtail -= lim + 1;
1256 nm_txsync_finalize(kring);
1264 * They have been put in kring->rx_queue by netmap_transmit().
1265 * We protect access to the kring using kring->rx_queue.lock
1278 struct netmap_kring *kring = &na->rx_rings[na->num_rx_rings];
1279 struct netmap_ring *ring = kring->ring;
1281 u_int const lim = kring->nkr_num_slots - 1;
1282 u_int const head = kring->rhead;
1284 struct mbq *q = &kring->rx_queue;
1297 nm_i = kring->nr_hwtail;
1309 slot->flags = kring->nkr_slot_flags;
1313 kring->nr_hwtail = nm_i;
1319 nm_i = kring->nr_hwcur;
1321 if (netmap_fwd || kring->ring->flags & NR_FORWARD)
1323 kring->nr_hwcur = head;
1326 nm_rxsync_finalize(kring);
1328 /* access copies of cur,tail in the kring */
1329 if (kring->rcur == kring->rtail && td) /* no bufs available */
1330 OS_selrecord(td, &kring->si);
1533 * Returns ring->cur if ok, or something >= kring->nkr_num_slots
1545 nm_txsync_prologue(struct netmap_kring *kring)
1547 struct netmap_ring *ring = kring->ring;
1550 u_int n = kring->nkr_num_slots;
1553 kring->name,
1554 kring->nr_hwcur, kring->nr_hwtail,
1556 #if 1 /* kernel sanity checks; but we can trust the kring. */
1557 if (kring->nr_hwcur >= n || kring->rhead >= n ||
1558 kring->rtail >= n || kring->nr_hwtail >= n)
1570 if (kring->rtail >= kring->rhead) {
1572 if (head < kring->rhead || head > kring->rtail)
1575 if (cur < head || cur > kring->rtail)
1579 if (head > kring->rtail && head < kring->rhead)
1583 if (head <= kring->rtail) {
1585 if (cur < head || cur > kring->rtail)
1589 if (cur > kring->rtail && cur < head)
1593 if (ring->tail != kring->rtail) {
1595 ring->tail, kring->rtail);
1596 ring->tail = kring->rtail;
1598 kring->rhead = head;
1599 kring->rcur = cur;
1603 RD(5, "%s kring error: hwcur %d rcur %d hwtail %d cur %d tail %d",
1604 kring->name,
1605 kring->nr_hwcur,
1606 kring->rcur, kring->nr_hwtail,
1614 * Returns ring->head if ok, kring->nkr_num_slots on error.
1624 nm_rxsync_prologue(struct netmap_kring *kring)
1626 struct netmap_ring *ring = kring->ring;
1627 uint32_t const n = kring->nkr_num_slots;
1631 kring->name,
1632 kring->nr_hwcur, kring->nr_hwtail,
1641 cur = kring->rcur = ring->cur; /* read only once */
1642 head = kring->rhead = ring->head; /* read only once */
1644 if (kring->nr_hwcur >= n || kring->nr_hwtail >= n)
1648 if (kring->nr_hwtail >= kring->nr_hwcur) {
1650 if (head < kring->nr_hwcur || head > kring->nr_hwtail)
1653 if (cur < head || cur > kring->nr_hwtail)
1657 if (head < kring->nr_hwcur && head > kring->nr_hwtail)
1660 if (head <= kring->nr_hwtail) {
1662 if (cur < head || cur > kring->nr_hwtail)
1666 if (cur < head && cur > kring->nr_hwtail)
1670 if (ring->tail != kring->rtail) {
1672 kring->name,
1673 ring->tail, kring->rtail);
1674 ring->tail = kring->rtail;
1679 RD(5, "kring error: hwcur %d rcur %d hwtail %d head %d cur %d tail %d",
1680 kring->nr_hwcur,
1681 kring->rcur, kring->nr_hwtail,
1682 kring->rhead, kring->rcur, ring->tail);
1700 netmap_ring_reinit(struct netmap_kring *kring)
1702 struct netmap_ring *ring = kring->ring;
1703 u_int i, lim = kring->nkr_num_slots - 1;
1707 RD(10, "called for %s", kring->name);
1709 kring->rhead = ring->head;
1710 kring->rcur = ring->cur;
1711 kring->rtail = ring->tail;
1726 } else if (len > NETMAP_BUF_SIZE(kring->na)) {
1734 kring->name,
1735 ring->cur, kring->nr_hwcur,
1736 ring->tail, kring->nr_hwtail);
1737 ring->head = kring->rhead = kring->nr_hwcur;
1738 ring->cur = kring->rcur = kring->nr_hwcur;
1739 ring->tail = kring->rtail = kring->nr_hwtail;
2196 struct netmap_kring *kring = krings + i;
2197 if (nm_kr_tryget(kring)) {
2204 i, kring->ring->cur,
2205 kring->nr_hwcur);
2206 if (nm_txsync_prologue(kring) >= kring->nkr_num_slots) {
2207 netmap_ring_reinit(kring);
2209 kring->nm_sync(kring, NAF_FORCE_RECLAIM);
2213 i, kring->ring->cur,
2214 kring->nr_hwcur);
2216 kring->nm_sync(kring, NAF_FORCE_READ);
2219 nm_kr_put(kring);
2288 struct netmap_kring *kring;
2358 kring = &na->rx_rings[i];
2359 /* XXX compare ring->cur and kring->tail */
2360 if (!nm_ring_empty(kring->ring)) {
2366 kring = &na->tx_rings[i];
2367 /* XXX compare ring->cur and kring->tail */
2368 if (!nm_ring_empty(kring->ring)) {
2392 kring = &na->tx_rings[i];
2393 if (!want_tx && kring->ring->cur == kring->nr_hwcur)
2396 if (nm_kr_tryget(kring)) {
2407 if (nm_txsync_prologue(kring) >= kring->nkr_num_slots) {
2408 netmap_ring_reinit(kring);
2411 if (kring->nm_sync(kring, 0))
2419 * of cur,tail in the kring.
2421 found = kring->rcur != kring->rtail;
2422 nm_kr_put(kring);
2448 kring = &na->rx_rings[i];
2450 if (nm_kr_tryget(kring)) {
2463 if (netmap_fwd ||kring->ring->flags & NR_FORWARD) {
2465 kring->nr_hwcur, kring->ring->cur);
2466 netmap_grab_packets(kring, &q, netmap_fwd);
2469 if (kring->nm_sync(kring, 0))
2472 kring->ring->flags & NR_TIMESTAMP) {
2473 microtime(&kring->ring->ts);
2475 /* after an rxsync we can use kring->rcur, rtail */
2476 found = kring->rcur != kring->rtail;
2477 nm_kr_put(kring);
2487 kring = &na->rx_rings[na->num_rx_rings];
2489 && (netmap_fwd || kring->ring->flags & NR_FORWARD)) {
2490 /* XXX fix to use kring fields */
2491 if (nm_ring_empty(kring->ring))
2493 if (!nm_ring_empty(kring->ring))
2512 * kring->nr_hwcur and ring->head
2538 struct netmap_kring *kring;
2541 kring = na->tx_rings + n_ring;
2542 OS_selwakeup(&kring->si, PI_NET);
2550 kring = na->rx_rings + n_ring;
2551 OS_selwakeup(&kring->si, PI_NET);
2664 * kring N is for the host stack queue
2665 * kring N+1 is only used for the selinfo for all queues. // XXX still true ?
2821 struct netmap_kring *kring;
2837 kring = &na->rx_rings[na->num_rx_rings];
2838 q = &kring->rx_queue;
2854 space = kring->nr_hwtail - kring->nr_hwcur;
2856 space += kring->nkr_num_slots;
2857 if (space + mbq_len(q) >= kring->nkr_num_slots - 1) { // XXX
2859 na->name, kring->nr_hwcur, kring->nr_hwtail, mbq_len(q),
2887 * a ring. The driver is in charge of locking to protect the kring.
2894 struct netmap_kring *kring;
2906 * - set a RESET flag somewhere in the kring
2913 kring = na->tx_rings + n;
2915 new_hwofs = kring->nr_hwcur - new_cur;
2919 kring = na->rx_rings + n;
2920 new_hwofs = kring->nr_hwtail - new_cur;
2922 lim = kring->nkr_num_slots - 1;
2931 kring->nkr_hwofs, new_hwofs,
2932 kring->nr_hwtail,
2933 tx == NR_TX ? lim : kring->nr_hwtail);
2934 kring->nkr_hwofs = new_hwofs;
2936 kring->nr_hwtail = kring->nr_hwcur + lim;
2937 if (kring->nr_hwtail > lim)
2938 kring->nr_hwtail -= lim + 1;
2957 return kring->ring->slot;
2979 struct netmap_kring *kring;
2990 kring = na->rx_rings + q;
2991 kring->nr_kflags |= NKR_PENDINTR; // XXX atomic ?
2997 kring = na->tx_rings + q;