Lines Matching refs:na

103 #define MBUF_TRANSMIT(na, ifp, m)	((na)->if_transmit(ifp, m))
144 #define MBUF_TRANSMIT(na, ifp, m) \
148 (((struct net_device_ops *)(na)->if_transmit)->ndo_start_xmit(m, ifp)); \
472 struct netmap_adapter *na;
934 nma_get_ndesc(struct netmap_adapter *na, enum txrx t)
936 return (t == NR_TX ? na->num_tx_desc : na->num_rx_desc);
940 nma_set_ndesc(struct netmap_adapter *na, enum txrx t, u_int v)
943 na->num_tx_desc = v;
945 na->num_rx_desc = v;
949 nma_get_nrings(struct netmap_adapter *na, enum txrx t)
951 return (t == NR_TX ? na->num_tx_rings : na->num_rx_rings);
955 nma_get_host_nrings(struct netmap_adapter *na, enum txrx t)
957 return (t == NR_TX ? na->num_host_tx_rings : na->num_host_rx_rings);
961 nma_set_nrings(struct netmap_adapter *na, enum txrx t, u_int v)
964 na->num_tx_rings = v;
966 na->num_rx_rings = v;
970 nma_set_host_nrings(struct netmap_adapter *na, enum txrx t, u_int v)
973 na->num_host_tx_rings = v;
975 na->num_host_rx_rings = v;
979 NMR(struct netmap_adapter *na, enum txrx t)
981 return (t == NR_TX ? na->tx_rings : na->rx_rings);
984 int nma_intr_enable(struct netmap_adapter *na, int onoff);
992 #define NETMAP_OWNED_BY_KERN(na) ((na)->na_flags & NAF_BUSY)
993 #define NETMAP_OWNED_BY_ANY(na) \
994 (NETMAP_OWNED_BY_KERN(na) || ((na)->active_fds > 0))
1065 netmap_real_rings(struct netmap_adapter *na, enum txrx t)
1067 return nma_get_nrings(na, t) +
1068 !!(na->na_flags & NAF_HOST_RINGS) * nma_get_host_nrings(na, t);
1073 netmap_all_rings(struct netmap_adapter *na, enum txrx t)
1075 return max(nma_get_nrings(na, t) + 1, netmap_real_rings(na, t));
1078 int netmap_default_bdg_attach(const char *name, struct netmap_adapter *na,
1145 int nm_is_bwrap(struct netmap_adapter *na);
1243 static inline int nm_iszombie(struct netmap_adapter *na);
1279 if (unlikely(nm_iszombie(kr->na))) {
1349 struct netmap_slot *netmap_reset(struct netmap_adapter *na,
1391 nm_netmap_on(struct netmap_adapter *na)
1393 return na && na->na_flags & NAF_NETMAP_ON;
1397 nm_native_on(struct netmap_adapter *na)
1399 return nm_netmap_on(na) && (na->na_flags & NAF_NATIVE);
1403 netmap_kring_on(struct netmap_adapter *na, u_int q, enum txrx t)
1407 if (!nm_native_on(na))
1410 if (t == NR_RX && q < na->num_rx_rings)
1411 kring = na->rx_rings[q];
1412 else if (t == NR_TX && q < na->num_tx_rings)
1413 kring = na->tx_rings[q];
1421 nm_iszombie(struct netmap_adapter *na)
1423 return na == NULL || (na->na_flags & NAF_ZOMBIE);
1429 void netmap_krings_mode_commit(struct netmap_adapter *na, int onoff);
1486 * - if the na points to an ifp, mark the ifp as netmap capable
1487 * using na as its native adapter;
1498 int netmap_update_config(struct netmap_adapter *na);
1500 * using the information that must be already available in the na.
1506 int netmap_krings_create(struct netmap_adapter *na, u_int tailroom);
1510 void netmap_krings_delete(struct netmap_adapter *na);
1512 int netmap_hw_krings_create(struct netmap_adapter *na);
1513 void netmap_hw_krings_delete(struct netmap_adapter *na);
1517 * terminate. The status change is then notified using the na nm_notify
1527 int netmap_buf_size_validate(const struct netmap_adapter *na, unsigned mtu);
1528 int netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
1533 int netmap_get_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1535 void netmap_unget_na(struct netmap_adapter *na, if_t ifp);
1537 struct netmap_mem_d *nmd, struct netmap_adapter **na);
1538 void netmap_mem_restore(struct netmap_adapter *na);
1545 int netmap_get_vale_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1564 int netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1575 int netmap_get_monitor_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1577 void netmap_monitor_stop(struct netmap_adapter *na);
1584 int netmap_get_null_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1624 void __netmap_adapter_get(struct netmap_adapter *na);
1626 #define netmap_adapter_get(na) \
1628 struct netmap_adapter *__na = na; \
1633 int __netmap_adapter_put(struct netmap_adapter *na);
1635 #define netmap_adapter_put(na) \
1637 struct netmap_adapter *__na = na; \
1648 void netmap_adapter_get(struct netmap_adapter *na);
1649 int netmap_adapter_put(struct netmap_adapter *na);
1714 #define NM_ATTACH_NA(ifp, na) do { \
1715 if_setnetmapadapter(ifp, na); \
1720 #define NM_RESTORE_NA(ifp, na) if_setnetmapadapter(ifp, na);
1745 netmap_load_map(struct netmap_adapter *na,
1749 bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na),
1755 netmap_unload_map(struct netmap_adapter *na,
1762 #define netmap_sync_map(na, tag, map, sz, t)
1766 netmap_reload_map(struct netmap_adapter *na,
1771 bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na),
1811 netmap_load_map(struct netmap_adapter *na,
1815 *map = dma_map_single(na->pdev, buf, size,
1817 if (dma_mapping_error(na->pdev, *map)) {
1826 netmap_unload_map(struct netmap_adapter *na,
1830 dma_unmap_single(na->pdev, *map, sz,
1837 netmap_sync_map_cpu(struct netmap_adapter *na,
1841 dma_sync_single_for_cpu(na->pdev, *map, sz,
1847 netmap_sync_map_dev(struct netmap_adapter *na,
1851 dma_sync_single_for_device(na->pdev, *map, sz,
1857 netmap_reload_map(struct netmap_adapter *na,
1860 u_int sz = NETMAP_BUF_SIZE(na);
1863 dma_unmap_single(na->pdev, *map, sz,
1867 *map = dma_map_single(na->pdev, buf, sz,
1871 #define netmap_sync_map_cpu(na, tag, map, sz, t)
1872 #define netmap_sync_map_dev(na, tag, map, sz, t)
1928 * physical-address look-up table for each na.
1951 NMB(struct netmap_adapter *na, struct netmap_slot *slot)
1953 struct lut_entry *lut = na->na_lut.lut;
1955 return (unlikely(i >= na->na_lut.objtotal)) ?
1960 PNMB(struct netmap_adapter *na, struct netmap_slot *slot, uint64_t *pp)
1963 struct lut_entry *lut = na->na_lut.lut;
1964 struct plut_entry *plut = na->na_lut.plut;
1965 void *ret = (i >= na->na_lut.objtotal) ? lut[0].vaddr : lut[i].vaddr;
1968 *pp = (i >= na->na_lut.objtotal) ? (uint64_t)plut[0].paddr.QuadPart : (uint64_t)plut[i].paddr.QuadPart;
1970 *pp = (i >= na->na_lut.objtotal) ? plut[0].paddr : plut[i].paddr;
1995 void *addr = NMB(kring->na, slot);
2002 void *addr = PNMB(kring->na, slot, pp);
2072 struct netmap_adapter *na = np->np_na;
2078 struct netmap_kring *kring = NMR(na, t)[i];
2098 int netmap_pipe_krings_create_both(struct netmap_adapter *na,
2100 void netmap_pipe_krings_delete_both(struct netmap_adapter *na,
2102 int netmap_pipe_reg_both(struct netmap_adapter *na,
2129 int na_is_generic(struct netmap_adapter *na);
2165 void netmap_generic_irq(struct netmap_adapter *na, u_int q, u_int *work_done);
2180 struct netmap_adapter *na);
2187 #define na_is_generic(na) (0)
2286 void bdg_mismatch_datapath(struct netmap_vp_adapter *na,
2354 int netmap_pt_guest_attach(struct netmap_adapter *na,
2363 int ptnet_nm_krings_create(struct netmap_adapter *na);
2364 void ptnet_nm_krings_delete(struct netmap_adapter *na);
2365 void ptnet_nm_dtor(struct netmap_adapter *na);
2399 #define SET_MBUF_DESTRUCTOR(m, fn, na) do { \
2402 (m)->m_ext.ext_arg1 = na; \