Lines Matching refs:na

207 generic_netmap_register(struct netmap_adapter *na, int enable)
209 struct ifnet *ifp = na->ifp;
210 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
215 if (!na)
227 gna->mit = malloc(na->num_rx_rings * sizeof(struct nm_generic_mit),
234 for (r=0; r<na->num_rx_rings; r++)
235 netmap_mitigation_init(&gna->mit[r], na);
240 for (r=0; r<na->num_rx_rings; r++) {
241 mbq_safe_init(&na->rx_rings[r].rx_queue);
247 for (r=0; r<na->num_tx_rings; r++)
248 na->tx_rings[r].tx_pool = NULL;
249 for (r=0; r<na->num_tx_rings; r++) {
250 na->tx_rings[r].tx_pool = malloc(na->num_tx_desc * sizeof(struct mbuf *),
252 if (!na->tx_rings[r].tx_pool) {
257 for (i=0; i<na->num_tx_desc; i++)
258 na->tx_rings[r].tx_pool[i] = NULL;
259 for (i=0; i<na->num_tx_desc; i++) {
266 na->tx_rings[r].tx_pool[i] = m;
271 error = netmap_catch_rx(na, 1);
295 } else if (na->tx_rings[0].tx_pool) {
297 generic_netmap_register(na, 1) was successfull.
298 If it was not, na->tx_rings[0].tx_pool was set to NULL by the
308 netmap_catch_rx(na, 0);
313 for (r=0; r<na->num_rx_rings; r++) {
314 mbq_safe_purge(&na->rx_rings[r].rx_queue);
315 mbq_safe_destroy(&na->rx_rings[r].rx_queue);
318 for (r=0; r<na->num_rx_rings; r++)
322 for (r=0; r<na->num_tx_rings; r++) {
323 for (i=0; i<na->num_tx_desc; i++) {
324 m_freem(na->tx_rings[r].tx_pool[i]);
326 free(na->tx_rings[r].tx_pool, M_DEVBUF);
349 for (r=0; r<na->num_tx_rings; r++) {
350 if (na->tx_rings[r].tx_pool == NULL)
352 for (i=0; i<na->num_tx_desc; i++)
353 if (na->tx_rings[r].tx_pool[i])
354 m_freem(na->tx_rings[r].tx_pool[i]);
355 free(na->tx_rings[r].tx_pool, M_DEVBUF);
356 na->tx_rings[r].tx_pool = NULL;
358 for (r=0; r<na->num_rx_rings; r++) {
360 mbq_safe_destroy(&na->rx_rings[r].rx_queue);
498 generic_netmap_txsync(struct netmap_adapter *na, u_int ring_nr, int flags)
500 struct ifnet *ifp = na->ifp;
501 struct netmap_kring *kring = &na->tx_rings[ring_nr];
612 struct netmap_adapter *na = NA(ifp);
613 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
617 if (rr >= na->num_rx_rings) {
618 rr = rr % na->num_rx_rings; // XXX expensive...
622 if (unlikely(mbq_len(&na->rx_rings[rr].rx_queue) > 1024)) {
625 mbq_safe_enqueue(&na->rx_rings[rr].rx_queue, m);
630 netmap_generic_irq(na->ifp, rr, &work_done);
640 netmap_generic_irq(na->ifp, rr, &work_done);
654 generic_netmap_rxsync(struct netmap_adapter *na, u_int ring_nr, int flags)
656 struct netmap_kring *kring = &na->rx_rings[ring_nr];
732 generic_netmap_dtor(struct netmap_adapter *na)
734 struct ifnet *ifp = na->ifp;
735 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na;
740 if_rele(na->ifp);
746 na->ifp = NULL;
764 struct netmap_adapter *na;
779 na = (struct netmap_adapter *)gna;
780 na->ifp = ifp;
781 na->num_tx_desc = num_tx_desc;
782 na->num_rx_desc = num_rx_desc;
783 na->nm_register = &generic_netmap_register;
784 na->nm_txsync = &generic_netmap_txsync;
785 na->nm_rxsync = &generic_netmap_rxsync;
786 na->nm_dtor = &generic_netmap_dtor;
790 na->na_flags = NAF_SKIP_INTR | NAF_HOST_RINGS;
798 generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings);
800 retval = netmap_attach_common(na);