Lines Matching refs:na

163  * 	netmap_adapter (fields na->nm_txsync and na->nm_rxsync).  Then, they
286 * na == NA(ifp) == netmap_hw_adapter created in DEVICE_netmap_attach()
293 * na->nm_notify() == netmap_notify()
299 * na->nm_notify() == netmap_notify()
304 * na->nm_notify == netmap_notify()
307 * netmap_rxsync_from_host(na, NULL, NULL)
311 * netmap_txsync_to_host(na)
313 * FreeBSD: na->if_input() == ether_input()
319 * na == NA(ifp) == generic_netmap_adapter created in generic_netmap_attach()
329 * FreeBSD: na->if_transmit() == orig. dev if_transmit
331 * na->nm_notify() == netmap_notify()
339 * na->nm_notify() == netmap_notify()
347 * na->nm_notify() == netmap_notify()
362 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
368 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
375 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
381 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
395 * na->nm_notify() == netmap_notify()
399 * na->nm_notify() == netmap_bwrap_notify()
410 * na->nm_notify() == netmap_bwrap_notify()
602 netmap_set_ring(struct netmap_adapter *na, u_int ring_id, enum txrx t, int stopped)
605 netmap_disable_ring(NMR(na, t)[ring_id], stopped);
607 NMR(na, t)[ring_id]->nkr_stopped = 0;
611 /* stop or enable all the rings of na */
613 netmap_set_all_rings(struct netmap_adapter *na, int stopped)
618 if (!nm_netmap_on(na))
622 nm_prinf("%s: %sable all rings", na->name,
626 for (i = 0; i < netmap_real_rings(na, t); i++) {
627 netmap_set_ring(na, i, t, stopped);
663 struct netmap_adapter *na = NA(ifp);
664 netmap_set_all_rings(na, NM_KR_LOCKED);
665 na->na_flags |= NAF_ZOMBIE;
666 netmap_set_all_rings(na, 0);
674 struct netmap_adapter *na = NA(ifp);
675 if (na->na_flags & NAF_ZOMBIE) {
676 netmap_set_all_rings(na, NM_KR_LOCKED);
677 na->na_flags &= ~NAF_ZOMBIE;
678 netmap_set_all_rings(na, 0);
761 netmap_update_config(struct netmap_adapter *na)
765 if (na->ifp && !nm_is_bwrap(na)) {
766 strlcpy(na->name, if_name(na->ifp), sizeof(na->name));
770 if (na->nm_config == NULL ||
771 na->nm_config(na, &info)) {
773 info.num_tx_rings = na->num_tx_rings;
774 info.num_tx_descs = na->num_tx_desc;
775 info.num_rx_rings = na->num_rx_rings;
776 info.num_rx_descs = na->num_rx_desc;
777 info.rx_buf_maxsize = na->rx_buf_maxsize;
780 if (na->num_tx_rings == info.num_tx_rings &&
781 na->num_tx_desc == info.num_tx_descs &&
782 na->num_rx_rings == info.num_rx_rings &&
783 na->num_rx_desc == info.num_rx_descs &&
784 na->rx_buf_maxsize == info.rx_buf_maxsize)
786 if (na->active_fds == 0) {
787 na->num_tx_rings = info.num_tx_rings;
788 na->num_tx_desc = info.num_tx_descs;
789 na->num_rx_rings = info.num_rx_rings;
790 na->num_rx_desc = info.num_rx_descs;
791 na->rx_buf_maxsize = info.rx_buf_maxsize;
795 na->name, na->num_tx_rings, na->num_tx_desc,
796 na->num_rx_rings, na->num_rx_desc, na->rx_buf_maxsize);
801 na->name, info.num_tx_rings, info.num_tx_descs,
823 * na->tx_rings ----->| | \
824 * | | } na->num_tx_ring
828 * na->rx_rings ----> +----------+
830 * | | } na->num_rx_rings
835 * na->tailroom ----->| | \
845 netmap_krings_create(struct netmap_adapter *na, u_int tailroom)
853 if (na->tx_rings != NULL) {
860 n[NR_TX] = netmap_all_rings(na, NR_TX);
861 n[NR_RX] = netmap_all_rings(na, NR_RX);
867 na->tx_rings = nm_os_malloc((size_t)len);
868 if (na->tx_rings == NULL) {
872 na->rx_rings = na->tx_rings + n[NR_TX];
873 na->tailroom = na->rx_rings + n[NR_RX];
876 kring = (struct netmap_kring *)((char *)na->tailroom + tailroom);
878 na->tx_rings[i] = kring;
887 ndesc = nma_get_ndesc(na, t);
889 kring = NMR(na, t)[i];
891 kring->notify_na = na;
897 if (i < nma_get_nrings(na, t)) {
898 kring->nm_sync = (t == NR_TX ? na->nm_txsync : na->nm_rxsync);
899 kring->nm_bufcfg = na->nm_bufcfg;
903 if (!(na->na_flags & NAF_HOST_RINGS))
910 kring->nm_notify = na->nm_notify;
916 snprintf(kring->name, sizeof(kring->name) - 1, "%s %s%d", na->name,
922 netmap_krings_delete(na);
926 kring->na = na; /* setting this field marks the mutex as initialized */
928 err = nm_os_selinfo_init(&na->si[t], na->name);
930 netmap_krings_delete(na);
942 netmap_krings_delete(struct netmap_adapter *na)
944 struct netmap_kring **kring = na->tx_rings;
947 if (na->tx_rings == NULL) {
954 nm_os_selinfo_uninit(&na->si[t]);
957 for ( ; kring != na->tailroom; kring++) {
958 if ((*kring)->na != NULL)
962 nm_os_free(na->tx_rings);
963 na->tx_rings = na->rx_rings = na->tailroom = NULL;
974 netmap_hw_krings_delete(struct netmap_adapter *na)
976 u_int lim = netmap_real_rings(na, NR_RX), i;
978 for (i = nma_get_nrings(na, NR_RX); i < lim; i++) {
979 struct mbq *q = &NMR(na, NR_RX)[i]->rx_queue;
984 netmap_krings_delete(na);
988 netmap_mem_restore(struct netmap_adapter *na)
990 if (na->nm_mem_prev) {
991 netmap_mem_put(na->nm_mem);
992 na->nm_mem = na->nm_mem_prev;
993 na->nm_mem_prev = NULL;
998 netmap_mem_drop(struct netmap_adapter *na)
1000 netmap_mem_deref(na->nm_mem, na);
1002 if (na->active_fds <= 0) {
1006 netmap_mem_restore(na);
1011 netmap_update_hostrings_mode(struct netmap_adapter *na)
1018 for (i = nma_get_nrings(na, t);
1019 i < netmap_real_rings(na, t); i++) {
1020 kring = NMR(na, t)[i];
1037 struct netmap_adapter *na = priv->np_na;
1040 na->active_fds--;
1047 if (na->active_fds <= 0) {
1051 netmap_monitor_stop(na);
1055 if (na->active_fds <= 0 || nm_kring_pending(priv)) {
1056 netmap_set_all_rings(na, NM_KR_LOCKED);
1057 na->nm_register(na, 0);
1058 netmap_set_all_rings(na, 0);
1062 netmap_mem_rings_delete(na);
1064 if (na->active_fds <= 0) { /* last instance */
1080 nm_prinf("deleting last instance for %s", na->name);
1082 if (nm_netmap_on(na)) {
1086 na->nm_krings_delete(na);
1089 if (na->na_flags & NAF_HOST_RINGS) {
1090 na->num_host_tx_rings = 1;
1091 na->num_host_rx_rings = 1;
1093 na->num_host_tx_rings = 0;
1094 na->num_host_rx_rings = 0;
1101 netmap_mem_if_delete(na, priv->np_nifp);
1103 netmap_mem_drop(na);
1134 struct netmap_adapter *na = priv->np_na;
1141 if (na) {
1144 netmap_unget_na(na, priv->np_ifp);
1235 struct netmap_adapter *na = kring->na;
1243 if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE(na)) {
1249 m = m_devget(NMB(na, slot), slot->len, 0, na->ifp, NULL);
1261 kring->na->na_flags & NAF_HOST_RINGS &&
1269 kring->ring_id != kring->na->num_rx_rings;
1277 kring->ring_id == kring->na->num_rx_rings;
1293 netmap_sw_to_nic(struct netmap_adapter *na)
1295 struct netmap_kring *kring = na->rx_rings[na->num_rx_rings];
1303 for (i = 0; i < na->num_tx_rings; i++) {
1304 struct netmap_kring *kdst = na->tx_rings[i];
1348 struct netmap_adapter *na = kring->na;
1364 netmap_send_up(na->ifp, &q);
1381 struct netmap_adapter *na = kring->na;
1405 m_copydata(m, 0, len, NMB(na, slot));
1408 nm_prinf("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL));
1424 ret = netmap_sw_to_nic(na);
1445 * with *na containing the netmap adapter found.
1446 * Otherwise return an error code, with *na containing NULL.
1451 * then we unconditionally return the existing adapter into *na.
1452 * In all the other cases, we return (into *na) either native,
1467 netmap_get_hw_na(if_t ifp, struct netmap_mem_d *nmd, struct netmap_adapter **na)
1474 *na = NULL; /* default */
1497 *na = prev_na;
1526 *na = NA(ifp);
1529 if (nmd != NULL && !((*na)->na_flags & NAF_MEM_OWNER) &&
1530 (*na)->active_fds == 0 && ((*na)->nm_mem != nmd)) {
1531 (*na)->nm_mem_prev = (*na)->nm_mem;
1532 (*na)->nm_mem = netmap_mem_get(nmd);
1557 struct netmap_adapter **na, if_t *ifp,
1565 *na = NULL; /* default return value */
1593 * All netmap_get_*_na() functions return an error and an na,
1596 * error na
1598 * !0 NULL type matches, but na creation/lookup failed
1599 * 0 !NULL type matches and na created/found
1602 error = netmap_get_null_na(hdr, na, nmd, create);
1603 if (error || *na != NULL)
1607 error = netmap_get_monitor_na(hdr, na, nmd, create);
1608 if (error || *na != NULL)
1612 error = netmap_get_pipe_na(hdr, na, nmd, create);
1613 if (error || *na != NULL)
1617 error = netmap_get_vale_na(hdr, na, nmd, create);
1621 if (*na != NULL) /* valid match in netmap_get_bdg_na() */
1625 * This must be a hardware na, lookup the name in the system.
1640 *na = ret;
1647 if (((*na)->na_flags & NAF_HOST_RINGS) && (*na)->active_fds == 0) {
1649 (*na)->num_host_tx_rings = req->nr_host_tx_rings;
1651 (*na)->num_host_rx_rings = req->nr_host_rx_rings;
1653 nm_prdis("%s: host tx %d rx %u", (*na)->name, (*na)->num_host_tx_rings,
1654 (*na)->num_host_rx_rings);
1673 netmap_unget_na(struct netmap_adapter *na, if_t ifp)
1677 if (na)
1678 netmap_adapter_put(na);
1858 if (idx < 2 || idx >= kring->na->na_lut.objtotal) {
1862 } else if (len > NETMAP_BUF_SIZE(kring->na)) {
1890 struct netmap_adapter *na = priv->np_na;
1907 priv->np_qlast[t] = nma_get_nrings(na, t);
1913 if (!(na->na_flags & NAF_HOST_RINGS)) {
1918 nma_get_nrings(na, t) : 0);
1919 priv->np_qlast[t] = netmap_all_rings(na, t);
1925 if (nr_ringid >= na->num_tx_rings &&
1926 nr_ringid >= na->num_rx_rings) {
1932 if (j >= nma_get_nrings(na, t))
1940 if (!(na->na_flags & NAF_HOST_RINGS)) {
1944 if (nr_ringid >= na->num_host_tx_rings &&
1945 nr_ringid >= na->num_host_rx_rings) {
1951 if (j >= nma_get_host_nrings(na, t))
1953 priv->np_qfirst[t] = nma_get_nrings(na, t) + j;
1954 priv->np_qlast[t] = nma_get_nrings(na, t) + j + 1;
1968 priv->np_qlast[NR_TX] >= na->num_tx_rings) {
1974 na->name,
1992 struct netmap_adapter *na = priv->np_na;
2011 na->si_users[t]++;
2019 struct netmap_adapter *na = priv->np_na;
2024 na->si_users[t]--;
2053 struct netmap_adapter *na = priv->np_na;
2061 na->name,
2106 na->name,
2232 * @return EINVAL if the na cannot be set to mtu, 0 otherwise.
2235 netmap_buf_size_validate(const struct netmap_adapter *na, unsigned mtu) {
2236 unsigned nbs = NETMAP_BUF_SIZE(na);
2238 if (mtu <= na->rx_buf_maxsize) {
2254 if (!(na->na_flags & NAF_MOREFRAG)) {
2258 if_name(na->ifp));
2260 } else if (nbs < na->rx_buf_maxsize) {
2263 ">= %u", if_name(na->ifp),
2264 na->rx_buf_maxsize);
2271 if_name(na->ifp), mtu, nbs);
2284 struct netmap_adapter *na = priv->np_na;
2297 if (!(na->na_flags & NAF_OFFSETS)) {
2300 na->name);
2327 if (max_offset > NETMAP_BUF_SIZE(na)) {
2330 (unsigned long long)max_offset, NETMAP_BUF_SIZE(na));
2353 struct netmap_kring *kring = NMR(na, t)[i];
2421 struct netmap_adapter *na = priv->np_na;
2442 target = NETMAP_BUF_SIZE(kring->na) -
2456 NETMAP_BUF_SIZE(kring->na);
2467 if (!(na->na_flags & NAF_MOREFRAG)) {
2471 na->name);
2493 * The following na callbacks are called in the process:
2495 * na->nm_config() [by netmap_update_config]
2503 * na->nm_krings_create()
2527 * na->nm_register(, 1)
2559 netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
2566 priv->np_na = na; /* store the reference */
2567 error = netmap_mem_finalize(na->nm_mem, na);
2571 if (na->active_fds == 0) {
2573 /* cache the allocator info in the na */
2574 error = netmap_mem_get_lut(na->nm_mem, &na->na_lut);
2577 nm_prdis("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal,
2578 na->na_lut.objsize);
2581 netmap_update_config(na);
2589 if (na->active_fds == 0) {
2595 if (na->ifp && nm_priv_rx_enabled(priv)) {
2597 unsigned mtu = nm_os_ifnet_mtu(na->ifp);
2600 na->name, mtu, na->rx_buf_maxsize, NETMAP_BUF_SIZE(na));
2602 if (na->rx_buf_maxsize == 0) {
2603 nm_prerr("%s: error: rx_buf_maxsize == 0", na->name);
2608 error = netmap_buf_size_validate(na, mtu);
2617 error = na->nm_krings_create(na);
2632 error = netmap_mem_rings_create(na);
2647 nifp = netmap_mem_if_new(na, priv);
2656 netmap_set_all_rings(na, NM_KR_LOCKED);
2657 error = na->nm_register(na, 1);
2658 netmap_set_all_rings(na, 0);
2664 na->active_fds++;
2677 netmap_mem_if_delete(na, nifp);
2680 netmap_mem_rings_delete(na);
2682 if (na->active_fds == 0)
2683 na->nm_krings_delete(na);
2685 if (na->active_fds == 0)
2686 memset(&na->na_lut, 0, sizeof(na->na_lut));
2688 netmap_mem_drop(na);
2744 struct netmap_adapter *na = NULL;
2822 error = netmap_get_na(hdr, &na, &ifp, nmd,
2826 if (NETMAP_OWNED_BY_KERN(na)) {
2831 if (na->virt_hdr_len && !(req->nr_flags & NR_ACCEPT_VNET_HDR)) {
2833 "not accept it", na->virt_hdr_len);
2838 error = netmap_do_regif(priv, na, hdr);
2858 req->nr_rx_rings = na->num_rx_rings;
2859 req->nr_tx_rings = na->num_tx_rings;
2860 req->nr_rx_slots = na->num_rx_desc;
2861 req->nr_tx_slots = na->num_tx_desc;
2862 req->nr_host_tx_rings = na->num_host_tx_rings;
2863 req->nr_host_rx_rings = na->num_host_rx_rings;
2864 error = netmap_mem_get_info(na->nm_mem, &req->nr_memsize, &memflags,
2875 &na->si[t] : &NMR(na, t)[priv->np_qfirst[t]]->si;
2882 req->nr_extra_bufs = netmap_extra_alloc(na,
2889 req->nr_offset = netmap_mem_if_offset(na->nm_mem, nifp);
2901 netmap_unget_na(na, ifp);
2938 error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
2942 na = NULL;
2946 nmd = na->nm_mem; /* get memory allocator */
2964 if (na == NULL) /* only memory info */
2966 netmap_update_config(na);
2967 req->nr_rx_rings = na->num_rx_rings;
2968 req->nr_tx_rings = na->num_tx_rings;
2969 req->nr_rx_slots = na->num_rx_desc;
2970 req->nr_tx_slots = na->num_tx_desc;
2971 req->nr_host_tx_rings = na->num_host_tx_rings;
2972 req->nr_host_rx_rings = na->num_host_rx_rings;
2974 netmap_unget_na(na, ifp);
3014 error = netmap_get_vale_na(hdr, &na, NULL, 0);
3017 if (na && !error) {
3019 (struct netmap_vp_adapter *)na;
3020 na->virt_hdr_len = req->nr_hdr_len;
3021 if (na->virt_hdr_len) {
3022 vpna->mfs = NETMAP_BUF_SIZE(na);
3025 nm_prinf("Using vnet_hdr_len %d for %p", na->virt_hdr_len, na);
3026 netmap_adapter_put(na);
3027 } else if (!na) {
3048 error = netmap_get_na(hdr, &na, &ifp, NULL, 0);
3051 if (na && !error) {
3052 req->nr_hdr_len = na->virt_hdr_len;
3054 netmap_unget_na(na, ifp);
3096 error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
3100 na = NULL;
3104 nmd = na->nm_mem; /* grab the memory allocator */
3112 error = netmap_mem_finalize(nmd, na);
3117 netmap_mem_drop(na);
3119 netmap_unget_na(na, ifp);
3176 na = priv->np_na; /* we have a reference */
3180 krings = NMR(na, t);
3225 netmap_send_up(na->ifp, &q);
3656 struct netmap_adapter *na;
3688 na = priv->np_na;
3690 if (unlikely(!nm_netmap_on(na)))
3699 nm_prinf("device %s events 0x%x", na->name, events);
3729 kring = NMR(na, t)[i];
3744 kring = NMR(na, t)[i];
3785 kring = na->tx_rings[i];
3847 kring = na->rx_rings[i];
3909 netmap_send_up(na->ifp, &q);
3918 nma_intr_enable(struct netmap_adapter *na, int onoff)
3925 for (i = 0; i < nma_get_nrings(na, t); i++) {
3926 struct netmap_kring *kring = NMR(na, t)[i];
3944 if (!na->nm_intr) {
3946 na->name);
3950 na->nm_intr(na, onoff);
3962 struct netmap_adapter *na = kring->notify_na;
3970 if (na->si_users[t] > 0)
3971 nm_os_selwakeup(&na->si[t]);
3981 netmap_attach_common(struct netmap_adapter *na)
3983 if (!na->rx_buf_maxsize) {
3985 na->rx_buf_maxsize = PAGE_SIZE;
3989 if (na->na_flags & NAF_HOST_RINGS && na->ifp) {
3990 na->if_input = if_getinputfn(na->ifp); /* for netmap_send_up */
3992 na->pdev = na; /* make sure netmap_mem_map() is called */
3994 if (na->na_flags & NAF_HOST_RINGS) {
3995 if (na->num_host_rx_rings == 0)
3996 na->num_host_rx_rings = 1;
3997 if (na->num_host_tx_rings == 0)
3998 na->num_host_tx_rings = 1;
4000 if (na->nm_krings_create == NULL) {
4005 na->nm_krings_create = netmap_hw_krings_create;
4006 na->nm_krings_delete = netmap_hw_krings_delete;
4008 if (na->nm_notify == NULL)
4009 na->nm_notify = netmap_notify;
4010 na->active_fds = 0;
4012 if (na->nm_mem == NULL) {
4014 na->nm_mem = netmap_mem_get_iommu(na);
4016 if (na->nm_bdg_attach == NULL)
4020 na->nm_bdg_attach = netmap_default_bdg_attach;
4027 * nm_iszombie(na) means that the driver module has been
4033 netmap_hw_reg(struct netmap_adapter *na, int onoff)
4036 (struct netmap_hw_adapter*)na;
4041 if (nm_iszombie(na)) {
4044 } else if (na != NULL) {
4045 na->na_flags &= ~NAF_NETMAP_ON;
4050 error = hwna->nm_hw_register(na, onoff);
4059 netmap_hw_dtor(struct netmap_adapter *na)
4061 if (na->ifp == NULL)
4064 NM_DETACH_NA(na->ifp);
4141 nm_prerr("fail, arg %p ifp %p na %p", arg, ifp, hwna);
4155 NM_DBG(netmap_adapter_get)(struct netmap_adapter *na)
4157 if (!na) {
4161 refcount_acquire(&na->na_refcount);
4167 NM_DBG(netmap_adapter_put)(struct netmap_adapter *na)
4169 if (!na)
4172 if (!refcount_release(&na->na_refcount))
4175 if (na->nm_dtor)
4176 na->nm_dtor(na);
4178 if (na->tx_rings) { /* XXX should not happen */
4181 na->nm_krings_delete(na);
4183 netmap_pipe_dealloc(na);
4184 if (na->nm_mem)
4185 netmap_mem_put(na->nm_mem);
4186 bzero(na, sizeof(*na));
4187 nm_os_free(na);
4194 netmap_hw_krings_create(struct netmap_adapter *na)
4196 int ret = netmap_krings_create(na, 0);
4199 u_int lim = netmap_real_rings(na, NR_RX), i;
4200 for (i = na->num_rx_rings; i < lim; i++) {
4201 mbq_safe_init(&NMR(na, NR_RX)[i]->rx_queue);
4203 nm_prdis("initialized sw rx queue %d", na->num_rx_rings);
4216 struct netmap_adapter *na;
4225 na = NA(ifp);
4226 netmap_set_all_rings(na, NM_KR_LOCKED);
4233 if (!(na->na_flags & NAF_NATIVE) || !netmap_adapter_put(na)) {
4234 na->na_flags |= NAF_ZOMBIE;
4239 * therefore, the put() above has deleted the na, since now NA(ifp) is
4254 * We rely on the OS to make sure that the ifp and na do not go
4262 struct netmap_adapter *na = NA(ifp);
4272 if (i >= na->num_host_rx_rings) {
4273 i = i % na->num_host_rx_rings;
4275 kring = NMR(na, NR_RX)[nma_get_nrings(na, NR_RX) + i];
4279 // mtx_lock(&na->core_lock);
4281 if (!nm_netmap_on(na)) {
4282 nm_prerr("%s not in netmap mode anymore", na->name);
4288 if (txr >= na->num_tx_rings) {
4289 txr %= na->num_tx_rings;
4291 tx_kring = NMR(na, NR_TX)[txr];
4294 return MBUF_TRANSMIT(na, ifp, m);
4300 if (len > NETMAP_BUF_SIZE(na)) { /* too long for us */
4301 nm_prerr("%s from_host, drop packet size %d > %d", na->name,
4302 len, NETMAP_BUF_SIZE(na));
4308 nm_prlim(1, "%s drop mbuf that needs checksum offload", na->name);
4314 nm_prlim(1, "%s drop mbuf that needs generic segmentation offload", na->name);
4334 nm_prlim(2, "%s full hwcur %d hwtail %d qlen %d", na->name,
4338 nm_prdis(2, "%s %d bufs in queue", na->name, mbq_len(q));
4374 netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n,
4380 if (!nm_native_on(na)) {
4386 if (n >= na->num_tx_rings)
4388 kring = na->tx_rings[n];
4400 if (n >= na->num_rx_rings)
4402 kring = na->rx_rings[n];
4454 netmap_common_irq(struct netmap_adapter *na, u_int q, u_int *work_done)
4465 if (q >= nma_get_nrings(na, t))
4468 kring = NMR(na, t)[q];
4502 struct netmap_adapter *na = NA(ifp);
4510 if (!nm_netmap_on(na))
4513 if (na->na_flags & NAF_SKIP_INTR) {
4518 return netmap_common_irq(na, q, work_done);
4523 nm_set_native_flags(struct netmap_adapter *na)
4525 if_t ifp = na->ifp;
4529 if (na->active_fds > 0) {
4533 na->na_flags |= NAF_NETMAP_ON;
4535 netmap_update_hostrings_mode(na);
4539 nm_clear_native_flags(struct netmap_adapter *na)
4541 if_t ifp = na->ifp;
4545 if (na->active_fds > 0) {
4549 netmap_update_hostrings_mode(na);
4552 na->na_flags &= ~NAF_NETMAP_ON;
4556 netmap_krings_mode_commit(struct netmap_adapter *na, int onoff)
4563 for (i = 0; i < netmap_real_rings(na, t); i++) {
4564 struct netmap_kring *kring = NMR(na, t)[i];