Lines Matching refs:na

207 netmap_generic_irq(struct netmap_adapter *na, u_int q, u_int *work_done)
209 if (unlikely(!nm_netmap_on(na)))
212 netmap_common_irq(na, q, work_done);
222 generic_netmap_unregister(struct netmap_adapter *na)
224 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
228 if (na->active_fds == 0) {
229 na->na_flags &= ~NAF_NETMAP_ON;
238 netmap_krings_mode_commit(na, /*onoff=*/0);
240 for_each_rx_kring(r, kring, na) {
252 for_each_tx_kring(r, kring, na) {
265 if (na->active_fds == 0) {
268 for_each_rx_kring(r, kring, na) {
272 for_each_tx_kring(r, kring, na) {
279 for (i=0; i<na->num_tx_desc; i++) {
296 nm_prinf("Emulated adapter for %s deactivated", na->name);
304 generic_netmap_register(struct netmap_adapter *na, int enable)
306 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
311 if (!na) {
317 return generic_netmap_unregister(na);
320 if (na->active_fds == 0) {
321 nm_prinf("Emulated adapter for %s activated", na->name);
322 /* Do all memory allocations when (na->active_fds == 0), to
326 gna->mit = nm_os_malloc(na->num_rx_rings * sizeof(struct nm_generic_mit));
333 for_each_rx_kring(r, kring, na) {
335 nm_os_mitigation_init(&gna->mit[r], r, na);
348 for_each_tx_kring(r, kring, na) {
351 for_each_tx_kring(r, kring, na) {
353 nm_os_malloc(na->num_tx_desc * sizeof(struct mbuf *));
367 netmap_krings_mode_commit(na, /*onoff=*/1);
369 for_each_tx_kring(r, kring, na) {
371 for (i=0; i<na->num_tx_desc; i++) {
378 if (na->active_fds == 0) {
393 na->na_flags |= NAF_NETMAP_ON;
410 /* Here (na->active_fds == 0) holds. */
414 for_each_tx_kring(r, kring, na) {
422 for_each_rx_kring(r, kring, na) {
439 struct netmap_adapter *na = GEN_TX_MBUF_NA(m);
444 if (unlikely(!nm_netmap_on(na) || r >= na->num_tx_rings)) {
461 kring = na->tx_rings[r];
489 if (++r == na->num_tx_rings) r = 0;
506 netmap_generic_irq(na, r, NULL);
608 netmap_generic_irq(kring->na, kring->ring_id, NULL);
655 SET_MBUF_DESTRUCTOR(m, generic_mbuf_dtor, kring->na);
695 struct netmap_adapter *na = kring->na;
696 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
697 if_t ifp = na->ifp;
738 void *addr = NMB(na, slot);
743 NM_CHECK_ADDR_LEN(na, addr, len);
750 nm_os_get_mbuf(ifp, NETMAP_BUF_SIZE(na));
865 struct netmap_adapter *na = NA(ifp);
866 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
871 if (r >= na->num_rx_rings) {
872 r = r % na->num_rx_rings;
875 kring = na->rx_rings[r];
883 if (unlikely(!gna->rxsg && MBUF_LEN(m) > NETMAP_BUF_SIZE(na))) {
891 } else if (unlikely(mbq_len(&kring->rx_queue) > na->num_rx_desc)) {
900 netmap_generic_irq(na, r, &work_done);
909 netmap_generic_irq(na, r, &work_done);
928 struct netmap_adapter *na = kring->na;
936 u_int nm_buf_len = NETMAP_BUF_SIZE(na);
1037 nmaddr = NMB(na, &ring->slot[nm_i]);
1039 if (nmaddr == NETMAP_BUF_BASE(na)) { /* Bad buffer */
1068 generic_netmap_dtor(struct netmap_adapter *na)
1070 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na;
1076 if (nm_iszombie(na)) {
1086 na->ifp = NULL;
1087 nm_prinf("Emulated netmap adapter for %s destroyed", na->name);
1091 na_is_generic(struct netmap_adapter *na)
1093 return na->nm_register == generic_netmap_register;
1110 struct netmap_adapter *na;
1144 na = (struct netmap_adapter *)gna;
1145 strlcpy(na->name, if_name(ifp), sizeof(na->name));
1146 na->ifp = ifp;
1147 na->num_tx_desc = num_tx_desc;
1148 na->num_rx_desc = num_rx_desc;
1149 na->rx_buf_maxsize = 32768;
1150 na->nm_register = &generic_netmap_register;
1151 na->nm_txsync = &generic_netmap_txsync;
1152 na->nm_rxsync = &generic_netmap_rxsync;
1153 na->nm_dtor = &generic_netmap_dtor;
1157 na->na_flags = NAF_SKIP_INTR | NAF_HOST_RINGS;
1165 nm_os_generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings);
1167 retval = netmap_attach_common(na);
1174 gna->prev = NA(ifp); /* save old na */
1177 NM_ATTACH_NA(ifp, na);
1181 nm_prinf("Emulated adapter for %s created (prev was %s)", na->name,