Lines Matching refs:na

190 nm_free_bdgfwd(struct netmap_adapter *na)
196 nrings = na->num_tx_rings;
197 kring = na->tx_rings;
211 nm_alloc_bdgfwd(struct netmap_adapter *na)
223 nrings = netmap_real_rings(na, NR_TX);
224 kring = na->tx_rings;
232 nm_free_bdgfwd(na);
398 netmap_vale_vp_dtor(struct netmap_adapter *na)
400 struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter*)na;
403 nm_prdis("%s has %d references", na->name, na->na_refcount);
409 if (na->ifp != NULL && !nm_iszombie(na)) {
410 NM_DETACH_NA(na->ifp);
412 nm_prdis("releasing %s", if_name(na->ifp));
414 nm_os_vi_detach(na->ifp);
427 netmap_vale_vp_krings_create(struct netmap_adapter *na)
432 u_int nrx = netmap_real_rings(na, NR_RX);
437 tailroom = sizeof(uint32_t) * na->num_rx_desc * nrx;
439 error = netmap_krings_create(na, tailroom);
443 leases = na->tailroom;
446 na->rx_rings[i]->nkr_leases = leases;
447 leases += na->num_rx_desc;
450 error = nm_alloc_bdgfwd(na);
452 netmap_krings_delete(na);
462 netmap_vale_vp_krings_delete(struct netmap_adapter *na)
464 nm_free_bdgfwd(na);
465 netmap_krings_delete(na);
471 struct netmap_vp_adapter *na, u_int ring_nr);
484 struct netmap_vp_adapter *na =
485 (struct netmap_vp_adapter*)kring->na;
492 struct nm_bridge *b = na->na_bdg;
499 if (na->up.na_flags & NAF_BDG_MAYSLEEP)
523 slot->len > NETMAP_BUF_SIZE(&na->up) - nm_get_offset(kring, slot))) {
527 buf = ft[ft_i].ft_buf = NETMAP_BUF_BASE(&na->up);
542 ft_i = nm_vale_flush(ft, ft_i, na, ring_nr);
553 ft_i = nm_vale_flush(ft, ft_i, na, ring_nr);
609 struct netmap_vp_adapter *na, void *private_data)
615 u_int dst, mysrc = na->bdg_port;
638 if (((buf[6] & 1) == 0) && (na->last_smac != smac)) { /* valid src */
642 na->last_smac = ht[sh].mac = smac; /* XXX expire ? */
727 k->na->name,
741 nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
746 struct nm_bridge *b = na->na_bdg;
747 u_int i, me = na->bdg_port;
767 if (na->up.virt_hdr_len < ft[i].ft_len) {
768 ft[i].ft_offset = na->up.virt_hdr_len;
770 } else if (na->up.virt_hdr_len == ft[i].ft_len && ft[i].ft_flags & NS_MOREFRAG) {
779 dst_port = b->bdg_ops.lookup(start_ft, &dst_ring, na, b->private_data);
853 * - when na is attached but not activated yet;
854 * - when na is being deactivated but is still attached.
873 if (unlikely(dst_na->up.virt_hdr_len != na->up.virt_hdr_len)) {
875 nm_prlim(3, "virt_hdr_mismatch, src %d dst %d", na->up.virt_hdr_len,
883 if (dst_na->mfs < na->mfs) {
888 * needed * na->mfs + x * H <= x * na->mfs
895 needed = (needed * na->mfs) /
897 nm_prdis(3, "srcmtu=%u, dstmtu=%u, x=%u", na->mfs, dst_na->mfs, needed);
970 bdg_mismatch_datapath(na, dst_na, ft_p, ring, &j, lim, &howmany);
1104 struct netmap_vp_adapter *na =
1105 (struct netmap_vp_adapter *)kring->na;
1114 if (!na->na_bdg) {
1131 nm_prinf("%s ring %d flags %d", na->up.name, kring->ring_id, flags);
1145 struct netmap_adapter *na;
1158 na = &vpna->up;
1160 na->ifp = ifp;
1161 strlcpy(na->name, hdr->nr_name, sizeof(na->name));
1164 na->num_tx_rings = req->nr_tx_rings;
1165 nm_bound_var(&na->num_tx_rings, 1, 1, NM_BDG_MAXRINGS, NULL);
1166 req->nr_tx_rings = na->num_tx_rings; /* write back */
1167 na->num_rx_rings = req->nr_rx_rings;
1168 nm_bound_var(&na->num_rx_rings, 1, 1, NM_BDG_MAXRINGS, NULL);
1169 req->nr_rx_rings = na->num_rx_rings; /* write back */
1172 na->num_tx_desc = req->nr_tx_slots;
1185 na->num_rx_desc = req->nr_rx_slots;
1196 na->na_flags |= (NAF_BDG_MAYSLEEP | NAF_OFFSETS);
1201 na->na_flags |= NAF_NATIVE;
1202 na->nm_txsync = netmap_vale_vp_txsync;
1203 na->nm_rxsync = netmap_vp_rxsync; /* use the one provided by bdg */
1204 na->nm_register = netmap_vp_reg; /* use the one provided by bdg */
1205 na->nm_krings_create = netmap_vale_vp_krings_create;
1206 na->nm_krings_delete = netmap_vale_vp_krings_delete;
1207 na->nm_dtor = netmap_vale_vp_dtor;
1209 na->nm_mem = nmd ?
1212 na->num_tx_rings, na->num_tx_desc,
1213 na->num_rx_rings, na->num_rx_desc,
1215 if (na->nm_mem == NULL)
1217 na->nm_bdg_attach = netmap_vale_vp_bdg_attach;
1219 error = netmap_attach_common(na);
1226 if (na->nm_mem != NULL)
1227 netmap_mem_put(na->nm_mem);
1236 netmap_vale_vp_bdg_attach(const char *name, struct netmap_adapter *na,
1239 struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter *)na;
1244 na->na_vp = vpna;
1245 strlcpy(na->name, name, sizeof(na->name));
1246 na->na_hostvp = NULL;
1251 netmap_vale_bwrap_krings_create(struct netmap_adapter *na)
1256 error = netmap_vale_vp_krings_create(na);
1259 error = netmap_bwrap_krings_create_common(na);
1261 netmap_vale_vp_krings_delete(na);
1267 netmap_vale_bwrap_krings_delete(struct netmap_adapter *na)
1269 netmap_bwrap_krings_delete_common(na);
1270 netmap_vale_vp_krings_delete(na);
1277 struct netmap_adapter *na = NULL;
1285 na = &bna->up.up;
1286 strlcpy(na->name, nr_name, sizeof(na->name));
1287 na->nm_register = netmap_bwrap_reg;
1288 na->nm_txsync = netmap_vale_vp_txsync;
1289 // na->nm_rxsync = netmap_bwrap_rxsync;
1290 na->nm_krings_create = netmap_vale_bwrap_krings_create;
1291 na->nm_krings_delete = netmap_vale_bwrap_krings_delete;
1292 na->nm_notify = netmap_bwrap_notify;
1304 error = netmap_bwrap_attach_common(na, hwna);
1312 netmap_get_vale_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1315 return netmap_get_bdg_na(hdr, na, nmd, create, &vale_bdg_ops);
1401 nm_update_info(struct nmreq_register *req, struct netmap_adapter *na)
1403 req->nr_rx_rings = na->num_rx_rings;
1404 req->nr_tx_rings = na->num_tx_rings;
1405 req->nr_rx_slots = na->num_rx_desc;
1406 req->nr_tx_slots = na->num_tx_desc;
1407 return netmap_mem_get_info(na->nm_mem, &req->nr_memsize, NULL,