netmap.c (239149) | netmap.c (241719) |
---|---|
1/* 2 * Copyright (C) 2011-2012 Matteo Landi, Luigi Rizzo. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. --- 51 unchanged lines hidden (view full) --- 60#endif /* linux */ 61 62#ifdef __APPLE__ 63#include "osx_glue.h" 64#endif /* __APPLE__ */ 65 66#ifdef __FreeBSD__ 67#include <sys/cdefs.h> /* prerequisite */ | 1/* 2 * Copyright (C) 2011-2012 Matteo Landi, Luigi Rizzo. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. --- 51 unchanged lines hidden (view full) --- 60#endif /* linux */ 61 62#ifdef __APPLE__ 63#include "osx_glue.h" 64#endif /* __APPLE__ */ 65 66#ifdef __FreeBSD__ 67#include <sys/cdefs.h> /* prerequisite */ |
68__FBSDID("$FreeBSD: head/sys/dev/netmap/netmap.c 239149 2012-08-09 14:46:52Z emaste $"); | 68__FBSDID("$FreeBSD: head/sys/dev/netmap/netmap.c 241719 2012-10-19 04:13:12Z luigi $"); |
69 70#include <sys/types.h> 71#include <sys/module.h> 72#include <sys/errno.h> 73#include <sys/param.h> /* defines used in kernel.h */ 74#include <sys/jail.h> 75#include <sys/kernel.h> /* types used in module initialization */ 76#include <sys/conf.h> /* cdevsw struct */ --- 16 unchanged lines hidden (view full) --- 93#include <machine/bus.h> /* bus_dmamap_* */ 94 95MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map"); 96#endif /* __FreeBSD__ */ 97 98#include <net/netmap.h> 99#include <dev/netmap/netmap_kern.h> 100 | 69 70#include <sys/types.h> 71#include <sys/module.h> 72#include <sys/errno.h> 73#include <sys/param.h> /* defines used in kernel.h */ 74#include <sys/jail.h> 75#include <sys/kernel.h> /* types used in module initialization */ 76#include <sys/conf.h> /* cdevsw struct */ --- 16 unchanged lines hidden (view full) --- 93#include <machine/bus.h> /* bus_dmamap_* */ 94 95MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map"); 96#endif /* __FreeBSD__ */ 97 98#include <net/netmap.h> 99#include <dev/netmap/netmap_kern.h> 100 |
101/* 102 * lock and unlock for the netmap memory allocator 103 */ 104#define NMA_LOCK() mtx_lock(&nm_mem->nm_mtx); 105#define NMA_UNLOCK() mtx_unlock(&nm_mem->nm_mtx); 106struct netmap_mem_d; 107static struct netmap_mem_d *nm_mem; /* Our memory allocator. */ 108 | |
109u_int netmap_total_buffers; | 101u_int netmap_total_buffers; |
102u_int netmap_buf_size; |
|
110char *netmap_buffer_base; /* address of an invalid buffer */ 111 112/* user-controlled variables */ 113int netmap_verbose; 114 115static int netmap_no_timestamp; /* don't timestamp on rxsync */ 116 117SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW, 0, "Netmap args"); 118SYSCTL_INT(_dev_netmap, OID_AUTO, verbose, 119 CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode"); 120SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp, 121 CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp"); | 103char *netmap_buffer_base; /* address of an invalid buffer */ 104 105/* user-controlled variables */ 106int netmap_verbose; 107 108static int netmap_no_timestamp; /* don't timestamp on rxsync */ 109 110SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW, 0, "Netmap args"); 111SYSCTL_INT(_dev_netmap, OID_AUTO, verbose, 112 CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode"); 113SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp, 114 CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp"); |
122u_int netmap_buf_size = 2048; 123TUNABLE_INT("hw.netmap.buf_size", (u_int *)&netmap_buf_size); 124SYSCTL_INT(_dev_netmap, OID_AUTO, buf_size, 125 CTLFLAG_RD, &netmap_buf_size, 0, "Size of packet buffers"); | |
126int netmap_mitigate = 1; 127SYSCTL_INT(_dev_netmap, OID_AUTO, mitigate, CTLFLAG_RW, &netmap_mitigate, 0, ""); 128int netmap_no_pendintr = 1; 129SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, 130 CTLFLAG_RW, &netmap_no_pendintr, 0, "Always look for new received packets."); 131 132int netmap_drop = 0; /* debugging */ 133int netmap_flags = 0; /* debug flags */ --- 155 unchanged lines hidden (view full) --- 289/*------------- memory allocator -----------------*/ 290#ifdef NETMAP_MEM2 291#include "netmap_mem2.c" 292#else /* !NETMAP_MEM2 */ 293#include "netmap_mem1.c" 294#endif /* !NETMAP_MEM2 */ 295/*------------ end of memory allocator ----------*/ 296 | 115int netmap_mitigate = 1; 116SYSCTL_INT(_dev_netmap, OID_AUTO, mitigate, CTLFLAG_RW, &netmap_mitigate, 0, ""); 117int netmap_no_pendintr = 1; 118SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, 119 CTLFLAG_RW, &netmap_no_pendintr, 0, "Always look for new received packets."); 120 121int netmap_drop = 0; /* debugging */ 122int netmap_flags = 0; /* debug flags */ --- 155 unchanged lines hidden (view full) --- 278/*------------- memory allocator -----------------*/ 279#ifdef NETMAP_MEM2 280#include "netmap_mem2.c" 281#else /* !NETMAP_MEM2 */ 282#include "netmap_mem1.c" 283#endif /* !NETMAP_MEM2 */ 284/*------------ end of memory allocator ----------*/ 285 |
297/* Structure associated to each thread which registered an interface. */ | 286 287/* Structure associated to each thread which registered an interface. 288 * 289 * The first 4 fields of this structure are written by NIOCREGIF and 290 * read by poll() and NIOC?XSYNC. 291 * There is low contention among writers (actually, a correct user program 292 * should have no contention among writers) and among writers and readers, 293 * so we use a single global lock to protect the structure initialization. 294 * Since initialization involves the allocation of memory, we reuse the memory 295 * allocator lock. 296 * Read access to the structure is lock free. Readers must check that 297 * np_nifp is not NULL before using the other fields. 298 * If np_nifp is NULL initialization has not been performed, so they should 299 * return an error to userlevel. 300 * 301 * The ref_done field is used to regulate access to the refcount in the 302 * memory allocator. The refcount must be incremented at most once for 303 * each open("/dev/netmap"). The increment is performed by the first 304 * function that calls netmap_get_memory() (currently called by 305 * mmap(), NIOCGINFO and NIOCREGIF). 306 * If the refcount is incremented, it is then decremented when the 307 * private structure is destroyed. 308 */ |
298struct netmap_priv_d { | 309struct netmap_priv_d { |
299 struct netmap_if *np_nifp; /* netmap interface descriptor. */ | 310 struct netmap_if * volatile np_nifp; /* netmap interface descriptor. */ |
300 301 struct ifnet *np_ifp; /* device for which we hold a reference */ 302 int np_ringid; /* from the ioctl */ 303 u_int np_qfirst, np_qlast; /* range of rings to scan */ 304 uint16_t np_txpoll; | 311 312 struct ifnet *np_ifp; /* device for which we hold a reference */ 313 int np_ringid; /* from the ioctl */ 314 u_int np_qfirst, np_qlast; /* range of rings to scan */ 315 uint16_t np_txpoll; |
316 317 unsigned long ref_done; /* use with NMA_LOCK held */ |
|
305}; 306 307 | 318}; 319 320 |
321static int 322netmap_get_memory(struct netmap_priv_d* p) 323{ 324 int error = 0; 325 NMA_LOCK(); 326 if (!p->ref_done) { 327 error = netmap_memory_finalize(); 328 if (!error) 329 p->ref_done = 1; 330 } 331 NMA_UNLOCK(); 332 return error; 333} 334 |
|
308/* 309 * File descriptor's private data destructor. 310 * 311 * Call nm_register(ifp,0) to stop netmap mode on the interface and 312 * revert to normal operation. We expect that np_ifp has not gone. 313 */ | 335/* 336 * File descriptor's private data destructor. 337 * 338 * Call nm_register(ifp,0) to stop netmap mode on the interface and 339 * revert to normal operation. We expect that np_ifp has not gone. 340 */ |
341/* call with NMA_LOCK held */ |
|
314static void 315netmap_dtor_locked(void *data) 316{ 317 struct netmap_priv_d *priv = data; 318 struct ifnet *ifp = priv->np_ifp; 319 struct netmap_adapter *na = NA(ifp); 320 struct netmap_if *nifp = priv->np_nifp; 321 --- 23 unchanged lines hidden (view full) --- 345 */ 346 for (i = 0; i < na->num_tx_rings + 1; i++) 347 selwakeuppri(&na->tx_rings[i].si, PI_NET); 348 for (i = 0; i < na->num_rx_rings + 1; i++) 349 selwakeuppri(&na->rx_rings[i].si, PI_NET); 350 selwakeuppri(&na->tx_si, PI_NET); 351 selwakeuppri(&na->rx_si, PI_NET); 352 /* release all buffers */ | 342static void 343netmap_dtor_locked(void *data) 344{ 345 struct netmap_priv_d *priv = data; 346 struct ifnet *ifp = priv->np_ifp; 347 struct netmap_adapter *na = NA(ifp); 348 struct netmap_if *nifp = priv->np_nifp; 349 --- 23 unchanged lines hidden (view full) --- 373 */ 374 for (i = 0; i < na->num_tx_rings + 1; i++) 375 selwakeuppri(&na->tx_rings[i].si, PI_NET); 376 for (i = 0; i < na->num_rx_rings + 1; i++) 377 selwakeuppri(&na->rx_rings[i].si, PI_NET); 378 selwakeuppri(&na->tx_si, PI_NET); 379 selwakeuppri(&na->rx_si, PI_NET); 380 /* release all buffers */ |
353 NMA_LOCK(); | |
354 for (i = 0; i < na->num_tx_rings + 1; i++) { 355 struct netmap_ring *ring = na->tx_rings[i].ring; 356 lim = na->tx_rings[i].nkr_num_slots; 357 for (j = 0; j < lim; j++) 358 netmap_free_buf(nifp, ring->slot[j].buf_idx); 359 /* knlist_destroy(&na->tx_rings[i].si.si_note); */ 360 mtx_destroy(&na->tx_rings[i].q_lock); 361 } 362 for (i = 0; i < na->num_rx_rings + 1; i++) { 363 struct netmap_ring *ring = na->rx_rings[i].ring; 364 lim = na->rx_rings[i].nkr_num_slots; 365 for (j = 0; j < lim; j++) 366 netmap_free_buf(nifp, ring->slot[j].buf_idx); 367 /* knlist_destroy(&na->rx_rings[i].si.si_note); */ 368 mtx_destroy(&na->rx_rings[i].q_lock); 369 } 370 /* XXX kqueue(9) needed; these will mirror knlist_init. */ 371 /* knlist_destroy(&na->tx_si.si_note); */ 372 /* knlist_destroy(&na->rx_si.si_note); */ | 381 for (i = 0; i < na->num_tx_rings + 1; i++) { 382 struct netmap_ring *ring = na->tx_rings[i].ring; 383 lim = na->tx_rings[i].nkr_num_slots; 384 for (j = 0; j < lim; j++) 385 netmap_free_buf(nifp, ring->slot[j].buf_idx); 386 /* knlist_destroy(&na->tx_rings[i].si.si_note); */ 387 mtx_destroy(&na->tx_rings[i].q_lock); 388 } 389 for (i = 0; i < na->num_rx_rings + 1; i++) { 390 struct netmap_ring *ring = na->rx_rings[i].ring; 391 lim = na->rx_rings[i].nkr_num_slots; 392 for (j = 0; j < lim; j++) 393 netmap_free_buf(nifp, ring->slot[j].buf_idx); 394 /* knlist_destroy(&na->rx_rings[i].si.si_note); */ 395 mtx_destroy(&na->rx_rings[i].q_lock); 396 } 397 /* XXX kqueue(9) needed; these will mirror knlist_init. */ 398 /* knlist_destroy(&na->tx_si.si_note); */ 399 /* knlist_destroy(&na->rx_si.si_note); */ |
373 NMA_UNLOCK(); | |
374 netmap_free_rings(na); 375 wakeup(na); 376 } 377 netmap_if_free(nifp); 378} 379 380static void 381nm_if_rele(struct ifnet *ifp) --- 16 unchanged lines hidden (view full) --- 398 ND("want to disconnect %s from the bridge", ifp->if_xname); 399 full = 0; 400 for (i = 0; i < NM_BDG_MAXPORTS; i++) { 401 if (b->bdg_ports[i] == ifp) { 402 b->bdg_ports[i] = NULL; 403 bzero(ifp, sizeof(*ifp)); 404 free(ifp, M_DEVBUF); 405 break; | 400 netmap_free_rings(na); 401 wakeup(na); 402 } 403 netmap_if_free(nifp); 404} 405 406static void 407nm_if_rele(struct ifnet *ifp) --- 16 unchanged lines hidden (view full) --- 424 ND("want to disconnect %s from the bridge", ifp->if_xname); 425 full = 0; 426 for (i = 0; i < NM_BDG_MAXPORTS; i++) { 427 if (b->bdg_ports[i] == ifp) { 428 b->bdg_ports[i] = NULL; 429 bzero(ifp, sizeof(*ifp)); 430 free(ifp, M_DEVBUF); 431 break; |
406 } | 432 } |
407 else if (b->bdg_ports[i] != NULL) 408 full = 1; 409 } 410 BDG_UNLOCK(b); 411 if (full == 0) { 412 ND("freeing bridge %d", b - nm_bridges); 413 b->namelen = 0; 414 } 415 BDG_UNLOCK(nm_bridges); 416 if (i == NM_BDG_MAXPORTS) 417 D("ouch, cannot find ifp to remove"); 418#endif /* NM_BRIDGE */ 419} 420 421static void 422netmap_dtor(void *data) 423{ 424 struct netmap_priv_d *priv = data; 425 struct ifnet *ifp = priv->np_ifp; | 433 else if (b->bdg_ports[i] != NULL) 434 full = 1; 435 } 436 BDG_UNLOCK(b); 437 if (full == 0) { 438 ND("freeing bridge %d", b - nm_bridges); 439 b->namelen = 0; 440 } 441 BDG_UNLOCK(nm_bridges); 442 if (i == NM_BDG_MAXPORTS) 443 D("ouch, cannot find ifp to remove"); 444#endif /* NM_BRIDGE */ 445} 446 447static void 448netmap_dtor(void *data) 449{ 450 struct netmap_priv_d *priv = data; 451 struct ifnet *ifp = priv->np_ifp; |
426 struct netmap_adapter *na = NA(ifp); | 452 struct netmap_adapter *na; |
427 | 453 |
428 na->nm_lock(ifp, NETMAP_REG_LOCK, 0); 429 netmap_dtor_locked(data); 430 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0); | 454 NMA_LOCK(); 455 if (ifp) { 456 na = NA(ifp); 457 na->nm_lock(ifp, NETMAP_REG_LOCK, 0); 458 netmap_dtor_locked(data); 459 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0); |
431 | 460 |
432 nm_if_rele(ifp); | 461 nm_if_rele(ifp); 462 } 463 if (priv->ref_done) { 464 netmap_memory_deref(); 465 } 466 NMA_UNLOCK(); |
433 bzero(priv, sizeof(*priv)); /* XXX for safety */ 434 free(priv, M_DEVBUF); 435} 436 | 467 bzero(priv, sizeof(*priv)); /* XXX for safety */ 468 free(priv, M_DEVBUF); 469} 470 |
471#ifdef __FreeBSD__ 472#include <vm/vm.h> 473#include <vm/vm_param.h> 474#include <vm/vm_object.h> 475#include <vm/vm_page.h> 476#include <vm/vm_pager.h> 477#include <vm/uma.h> |
|
437 | 478 |
479static struct cdev_pager_ops saved_cdev_pager_ops; 480 481static int 482netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 483 vm_ooffset_t foff, struct ucred *cred, u_short *color) 484{ 485 D("first mmap for %p", handle); 486 return saved_cdev_pager_ops.cdev_pg_ctor(handle, 487 size, prot, foff, cred, color); 488} 489 490static void 491netmap_dev_pager_dtor(void *handle) 492{ 493 saved_cdev_pager_ops.cdev_pg_dtor(handle); 494 D("ready to release memory for %p", handle); 495} 496 497 498static struct cdev_pager_ops netmap_cdev_pager_ops = { 499 .cdev_pg_ctor = netmap_dev_pager_ctor, 500 .cdev_pg_dtor = netmap_dev_pager_dtor, 501 .cdev_pg_fault = NULL, 502}; 503 504static int 505netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *foff, 506 vm_size_t objsize, vm_object_t *objp, int prot) 507{ 508 vm_object_t obj; 509 510 D("cdev %p foff %d size %d objp %p prot %d", cdev, *foff, 511 objsize, objp, prot); 512 obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff, 513 curthread->td_ucred); 514 ND("returns obj %p", obj); 515 if (obj == NULL) 516 return EINVAL; 517 if (saved_cdev_pager_ops.cdev_pg_fault == NULL) { 518 D("initialize cdev_pager_ops"); 519 saved_cdev_pager_ops = *(obj->un_pager.devp.ops); 520 netmap_cdev_pager_ops.cdev_pg_fault = 521 saved_cdev_pager_ops.cdev_pg_fault; 522 }; 523 obj->un_pager.devp.ops = &netmap_cdev_pager_ops; 524 *objp = obj; 525 return 0; 526} 527#endif /* __FreeBSD__ */ 528 529 |
|
438/* 439 * mmap(2) support for the "netmap" device. 440 * 441 * Expose all the memory previously allocated by our custom memory 442 * allocator: this way the user has only to issue a single mmap(2), and 443 * can work on all the data structures flawlessly. 444 * 445 * Return 0 on success, -1 otherwise. --- 5 unchanged lines hidden (view full) --- 451#if __FreeBSD_version < 900000 452 vm_offset_t offset, vm_paddr_t *paddr, int nprot 453#else 454 vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, 455 __unused vm_memattr_t *memattr 456#endif 457 ) 458{ | 530/* 531 * mmap(2) support for the "netmap" device. 532 * 533 * Expose all the memory previously allocated by our custom memory 534 * allocator: this way the user has only to issue a single mmap(2), and 535 * can work on all the data structures flawlessly. 536 * 537 * Return 0 on success, -1 otherwise. --- 5 unchanged lines hidden (view full) --- 543#if __FreeBSD_version < 900000 544 vm_offset_t offset, vm_paddr_t *paddr, int nprot 545#else 546 vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, 547 __unused vm_memattr_t *memattr 548#endif 549 ) 550{ |
551 int error = 0; 552 struct netmap_priv_d *priv; 553 |
|
459 if (nprot & PROT_EXEC) 460 return (-1); // XXX -1 or EINVAL ? 461 | 554 if (nprot & PROT_EXEC) 555 return (-1); // XXX -1 or EINVAL ? 556 |
557 error = devfs_get_cdevpriv((void **)&priv); 558 if (error == EBADF) { /* called on fault, memory is initialized */ 559 ND(5, "handling fault at ofs 0x%x", offset); 560 error = 0; 561 } else if (error == 0) /* make sure memory is set */ 562 error = netmap_get_memory(priv); 563 if (error) 564 return (error); 565 |
|
462 ND("request for offset 0x%x", (uint32_t)offset); 463 *paddr = netmap_ofstophys(offset); 464 | 566 ND("request for offset 0x%x", (uint32_t)offset); 567 *paddr = netmap_ofstophys(offset); 568 |
465 return (0); | 569 return (*paddr ? 0 : ENOMEM); |
466} | 570} |
571 572static int 573netmap_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 574{ 575 D("dev %p fflag 0x%x devtype %d td %p", dev, fflag, devtype, td); 576 return 0; 577} 578 579static int 580netmap_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 581{ 582 struct netmap_priv_d *priv; 583 int error; 584 585 priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF, 586 M_NOWAIT | M_ZERO); 587 if (priv == NULL) 588 return ENOMEM; 589 590 error = devfs_set_cdevpriv(priv, netmap_dtor); 591 if (error) 592 return error; 593 594 return 0; 595} |
|
467#endif /* __FreeBSD__ */ 468 469 470/* 471 * Handlers for synchronization of the queues from/to the host. 472 * 473 * netmap_sync_to_host() passes packets up. We are called from a 474 * system call in user process context, and the only contention --- 170 unchanged lines hidden (view full) --- 645 if (! *ifp) 646#endif /* NM_BRIDGE */ 647 *ifp = ifunit_ref(name); 648 if (*ifp == NULL) 649 return (ENXIO); 650 /* can do this if the capability exists and if_pspare[0] 651 * points to the netmap descriptor. 652 */ | 596#endif /* __FreeBSD__ */ 597 598 599/* 600 * Handlers for synchronization of the queues from/to the host. 601 * 602 * netmap_sync_to_host() passes packets up. We are called from a 603 * system call in user process context, and the only contention --- 170 unchanged lines hidden (view full) --- 774 if (! *ifp) 775#endif /* NM_BRIDGE */ 776 *ifp = ifunit_ref(name); 777 if (*ifp == NULL) 778 return (ENXIO); 779 /* can do this if the capability exists and if_pspare[0] 780 * points to the netmap descriptor. 781 */ |
653 if ((*ifp)->if_capabilities & IFCAP_NETMAP && NA(*ifp)) | 782 if (NETMAP_CAPABLE(*ifp)) |
654 return 0; /* valid pointer, we hold the refcount */ 655 nm_if_rele(*ifp); 656 return EINVAL; // not NETMAP capable 657} 658 659 660/* 661 * Error routine called when txsync/rxsync detects an error. --- 9 unchanged lines hidden (view full) --- 671 */ 672int 673netmap_ring_reinit(struct netmap_kring *kring) 674{ 675 struct netmap_ring *ring = kring->ring; 676 u_int i, lim = kring->nkr_num_slots - 1; 677 int errors = 0; 678 | 783 return 0; /* valid pointer, we hold the refcount */ 784 nm_if_rele(*ifp); 785 return EINVAL; // not NETMAP capable 786} 787 788 789/* 790 * Error routine called when txsync/rxsync detects an error. --- 9 unchanged lines hidden (view full) --- 800 */ 801int 802netmap_ring_reinit(struct netmap_kring *kring) 803{ 804 struct netmap_ring *ring = kring->ring; 805 u_int i, lim = kring->nkr_num_slots - 1; 806 int errors = 0; 807 |
679 D("called for %s", kring->na->ifp->if_xname); | 808 RD(10, "called for %s", kring->na->ifp->if_xname); |
680 if (ring->cur > lim) 681 errors++; 682 for (i = 0; i <= lim; i++) { 683 u_int idx = ring->slot[i].buf_idx; 684 u_int len = ring->slot[i].len; 685 if (idx < 2 || idx >= netmap_total_buffers) { 686 if (!errors++) 687 D("bad buffer at slot %d idx %d len %d ", i, idx, len); --- 5 unchanged lines hidden (view full) --- 693 D("bad len %d at slot %d idx %d", 694 len, i, idx); 695 } 696 } 697 if (errors) { 698 int pos = kring - kring->na->tx_rings; 699 int n = kring->na->num_tx_rings + 1; 700 | 809 if (ring->cur > lim) 810 errors++; 811 for (i = 0; i <= lim; i++) { 812 u_int idx = ring->slot[i].buf_idx; 813 u_int len = ring->slot[i].len; 814 if (idx < 2 || idx >= netmap_total_buffers) { 815 if (!errors++) 816 D("bad buffer at slot %d idx %d len %d ", i, idx, len); --- 5 unchanged lines hidden (view full) --- 822 D("bad len %d at slot %d idx %d", 823 len, i, idx); 824 } 825 } 826 if (errors) { 827 int pos = kring - kring->na->tx_rings; 828 int n = kring->na->num_tx_rings + 1; 829 |
701 D("total %d errors", errors); | 830 RD(10, "total %d errors", errors); |
702 errors++; | 831 errors++; |
703 D("%s %s[%d] reinit, cur %d -> %d avail %d -> %d", | 832 RD(10, "%s %s[%d] reinit, cur %d -> %d avail %d -> %d", |
704 kring->na->ifp->if_xname, 705 pos < n ? "TX" : "RX", pos < n ? pos : pos - n, 706 ring->cur, kring->nr_hwcur, 707 ring->avail, kring->nr_hwavail); 708 ring->cur = kring->nr_hwcur; 709 ring->avail = kring->nr_hwavail; 710 } 711 return (errors ? 1 : 0); --- 86 unchanged lines hidden (view full) --- 798#define devfs_clear_cdevpriv() do { \ 799 netmap_dtor(priv); ((struct file *)td)->private_data = 0; \ 800 } while (0) 801#endif /* linux */ 802 803 CURVNET_SET(TD_TO_VNET(td)); 804 805 error = devfs_get_cdevpriv((void **)&priv); | 833 kring->na->ifp->if_xname, 834 pos < n ? "TX" : "RX", pos < n ? pos : pos - n, 835 ring->cur, kring->nr_hwcur, 836 ring->avail, kring->nr_hwavail); 837 ring->cur = kring->nr_hwcur; 838 ring->avail = kring->nr_hwavail; 839 } 840 return (errors ? 1 : 0); --- 86 unchanged lines hidden (view full) --- 927#define devfs_clear_cdevpriv() do { \ 928 netmap_dtor(priv); ((struct file *)td)->private_data = 0; \ 929 } while (0) 930#endif /* linux */ 931 932 CURVNET_SET(TD_TO_VNET(td)); 933 934 error = devfs_get_cdevpriv((void **)&priv); |
806 if (error != ENOENT && error != 0) { | 935 if (error) { |
807 CURVNET_RESTORE(); | 936 CURVNET_RESTORE(); |
808 return (error); | 937 /* XXX ENOENT should be impossible, since the priv 938 * is now created in the open */ 939 return (error == ENOENT ? ENXIO : error); |
809 } 810 | 940 } 941 |
811 error = 0; /* Could be ENOENT */ | |
812 nmr->nr_name[sizeof(nmr->nr_name) - 1] = '\0'; /* truncate name */ 813 switch (cmd) { 814 case NIOCGINFO: /* return capabilities etc */ | 942 nmr->nr_name[sizeof(nmr->nr_name) - 1] = '\0'; /* truncate name */ 943 switch (cmd) { 944 case NIOCGINFO: /* return capabilities etc */ |
815 /* memsize is always valid */ 816 nmr->nr_memsize = nm_mem->nm_totalsize; 817 nmr->nr_offset = 0; 818 nmr->nr_rx_rings = nmr->nr_tx_rings = 0; 819 nmr->nr_rx_slots = nmr->nr_tx_slots = 0; | |
820 if (nmr->nr_version != NETMAP_API) { 821 D("API mismatch got %d have %d", 822 nmr->nr_version, NETMAP_API); 823 nmr->nr_version = NETMAP_API; 824 error = EINVAL; 825 break; 826 } | 945 if (nmr->nr_version != NETMAP_API) { 946 D("API mismatch got %d have %d", 947 nmr->nr_version, NETMAP_API); 948 nmr->nr_version = NETMAP_API; 949 error = EINVAL; 950 break; 951 } |
952 /* update configuration */ 953 error = netmap_get_memory(priv); 954 ND("get_memory returned %d", error); 955 if (error) 956 break; 957 /* memsize is always valid */ 958 nmr->nr_memsize = nm_mem.nm_totalsize; 959 nmr->nr_offset = 0; 960 nmr->nr_rx_rings = nmr->nr_tx_rings = 0; 961 nmr->nr_rx_slots = nmr->nr_tx_slots = 0; |
|
827 if (nmr->nr_name[0] == '\0') /* just get memory info */ 828 break; 829 error = get_ifp(nmr->nr_name, &ifp); /* get a refcount */ 830 if (error) 831 break; 832 na = NA(ifp); /* retrieve netmap_adapter */ 833 nmr->nr_rx_rings = na->num_rx_rings; 834 nmr->nr_tx_rings = na->num_tx_rings; 835 nmr->nr_rx_slots = na->num_rx_desc; 836 nmr->nr_tx_slots = na->num_tx_desc; 837 nm_if_rele(ifp); /* return the refcount */ 838 break; 839 840 case NIOCREGIF: 841 if (nmr->nr_version != NETMAP_API) { 842 nmr->nr_version = NETMAP_API; 843 error = EINVAL; 844 break; 845 } | 962 if (nmr->nr_name[0] == '\0') /* just get memory info */ 963 break; 964 error = get_ifp(nmr->nr_name, &ifp); /* get a refcount */ 965 if (error) 966 break; 967 na = NA(ifp); /* retrieve netmap_adapter */ 968 nmr->nr_rx_rings = na->num_rx_rings; 969 nmr->nr_tx_rings = na->num_tx_rings; 970 nmr->nr_rx_slots = na->num_rx_desc; 971 nmr->nr_tx_slots = na->num_tx_desc; 972 nm_if_rele(ifp); /* return the refcount */ 973 break; 974 975 case NIOCREGIF: 976 if (nmr->nr_version != NETMAP_API) { 977 nmr->nr_version = NETMAP_API; 978 error = EINVAL; 979 break; 980 } |
846 if (priv != NULL) { /* thread already registered */ | 981 /* ensure allocators are ready */ 982 error = netmap_get_memory(priv); 983 ND("get_memory returned %d", error); 984 if (error) 985 break; 986 987 /* protect access to priv from concurrent NIOCREGIF */ 988 NMA_LOCK(); 989 if (priv->np_ifp != NULL) { /* thread already registered */ |
847 error = netmap_set_ringid(priv, nmr->nr_ringid); | 990 error = netmap_set_ringid(priv, nmr->nr_ringid); |
991 NMA_UNLOCK(); |
|
848 break; 849 } 850 /* find the interface and a reference */ 851 error = get_ifp(nmr->nr_name, &ifp); /* keep reference */ | 992 break; 993 } 994 /* find the interface and a reference */ 995 error = get_ifp(nmr->nr_name, &ifp); /* keep reference */ |
852 if (error) | 996 if (error) { 997 NMA_UNLOCK(); |
853 break; | 998 break; |
854 na = NA(ifp); /* retrieve netmap adapter */ 855 /* 856 * Allocate the private per-thread structure. 857 * XXX perhaps we can use a blocking malloc ? 858 */ 859 priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF, 860 M_NOWAIT | M_ZERO); 861 if (priv == NULL) { 862 error = ENOMEM; 863 nm_if_rele(ifp); /* return the refcount */ 864 break; | |
865 } | 999 } |
1000 na = NA(ifp); /* retrieve netmap adapter */ |
|
866 867 for (i = 10; i > 0; i--) { 868 na->nm_lock(ifp, NETMAP_REG_LOCK, 0); 869 if (!NETMAP_DELETING(na)) 870 break; 871 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0); 872 tsleep(na, 0, "NIOCREGIF", hz/10); 873 } 874 if (i == 0) { 875 D("too many NIOCREGIF attempts, give up"); 876 error = EINVAL; | 1001 1002 for (i = 10; i > 0; i--) { 1003 na->nm_lock(ifp, NETMAP_REG_LOCK, 0); 1004 if (!NETMAP_DELETING(na)) 1005 break; 1006 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0); 1007 tsleep(na, 0, "NIOCREGIF", hz/10); 1008 } 1009 if (i == 0) { 1010 D("too many NIOCREGIF attempts, give up"); 1011 error = EINVAL; |
877 free(priv, M_DEVBUF); | |
878 nm_if_rele(ifp); /* return the refcount */ | 1012 nm_if_rele(ifp); /* return the refcount */ |
1013 NMA_UNLOCK(); |
|
879 break; 880 } 881 882 priv->np_ifp = ifp; /* store the reference */ 883 error = netmap_set_ringid(priv, nmr->nr_ringid); 884 if (error) 885 goto error; | 1014 break; 1015 } 1016 1017 priv->np_ifp = ifp; /* store the reference */ 1018 error = netmap_set_ringid(priv, nmr->nr_ringid); 1019 if (error) 1020 goto error; |
886 priv->np_nifp = nifp = netmap_if_new(nmr->nr_name, na); | 1021 nifp = netmap_if_new(nmr->nr_name, na); |
887 if (nifp == NULL) { /* allocation failed */ 888 error = ENOMEM; 889 } else if (ifp->if_capenable & IFCAP_NETMAP) { 890 /* was already set */ 891 } else { 892 /* Otherwise set the card in netmap mode 893 * and make it use the shared buffers. 894 */ 895 for (i = 0 ; i < na->num_tx_rings + 1; i++) 896 mtx_init(&na->tx_rings[i].q_lock, "nm_txq_lock", MTX_NETWORK_LOCK, MTX_DEF); 897 for (i = 0 ; i < na->num_rx_rings + 1; i++) { 898 mtx_init(&na->rx_rings[i].q_lock, "nm_rxq_lock", MTX_NETWORK_LOCK, MTX_DEF); 899 } 900 error = na->nm_register(ifp, 1); /* mode on */ | 1022 if (nifp == NULL) { /* allocation failed */ 1023 error = ENOMEM; 1024 } else if (ifp->if_capenable & IFCAP_NETMAP) { 1025 /* was already set */ 1026 } else { 1027 /* Otherwise set the card in netmap mode 1028 * and make it use the shared buffers. 1029 */ 1030 for (i = 0 ; i < na->num_tx_rings + 1; i++) 1031 mtx_init(&na->tx_rings[i].q_lock, "nm_txq_lock", MTX_NETWORK_LOCK, MTX_DEF); 1032 for (i = 0 ; i < na->num_rx_rings + 1; i++) { 1033 mtx_init(&na->rx_rings[i].q_lock, "nm_rxq_lock", MTX_NETWORK_LOCK, MTX_DEF); 1034 } 1035 error = na->nm_register(ifp, 1); /* mode on */ |
901 if (error) | 1036 if (error) { |
902 netmap_dtor_locked(priv); | 1037 netmap_dtor_locked(priv); |
1038 netmap_if_free(nifp); 1039 } |
|
903 } 904 905 if (error) { /* reg. failed, release priv and ref */ 906error: 907 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0); 908 nm_if_rele(ifp); /* return the refcount */ | 1040 } 1041 1042 if (error) { /* reg. failed, release priv and ref */ 1043error: 1044 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0); 1045 nm_if_rele(ifp); /* return the refcount */ |
909 bzero(priv, sizeof(*priv)); 910 free(priv, M_DEVBUF); | 1046 priv->np_ifp = NULL; 1047 priv->np_nifp = NULL; 1048 NMA_UNLOCK(); |
911 break; 912 } 913 914 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0); | 1049 break; 1050 } 1051 1052 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0); |
915 error = devfs_set_cdevpriv(priv, netmap_dtor); | |
916 | 1053 |
917 if (error != 0) { 918 /* could not assign the private storage for the 919 * thread, call the destructor explicitly. 920 */ 921 netmap_dtor(priv); 922 break; 923 } | 1054 /* the following assignment is a commitment. 1055 * Readers (i.e., poll and *SYNC) check for 1056 * np_nifp != NULL without locking 1057 */ 1058 wmb(); /* make sure previous writes are visible to all CPUs */ 1059 priv->np_nifp = nifp; 1060 NMA_UNLOCK(); |
924 925 /* return the offset of the netmap_if object */ 926 nmr->nr_rx_rings = na->num_rx_rings; 927 nmr->nr_tx_rings = na->num_tx_rings; 928 nmr->nr_rx_slots = na->num_rx_desc; 929 nmr->nr_tx_slots = na->num_tx_desc; | 1061 1062 /* return the offset of the netmap_if object */ 1063 nmr->nr_rx_rings = na->num_rx_rings; 1064 nmr->nr_tx_rings = na->num_tx_rings; 1065 nmr->nr_rx_slots = na->num_rx_desc; 1066 nmr->nr_tx_slots = na->num_tx_desc; |
930 nmr->nr_memsize = nm_mem->nm_totalsize; | 1067 nmr->nr_memsize = nm_mem.nm_totalsize; |
931 nmr->nr_offset = netmap_if_offset(nifp); 932 break; 933 934 case NIOCUNREGIF: | 1068 nmr->nr_offset = netmap_if_offset(nifp); 1069 break; 1070 1071 case NIOCUNREGIF: |
935 if (priv == NULL) { | 1072 // XXX we have no data here ? 1073 D("deprecated, data is %p", nmr); 1074 error = EINVAL; 1075 break; 1076 1077 case NIOCTXSYNC: 1078 case NIOCRXSYNC: 1079 nifp = priv->np_nifp; 1080 1081 if (nifp == NULL) { |
936 error = ENXIO; 937 break; 938 } | 1082 error = ENXIO; 1083 break; 1084 } |
1085 rmb(); /* make sure following reads are not from cache */ |
|
939 | 1086 |
940 /* the interface is unregistered inside the 941 destructor of the private data. */ 942 devfs_clear_cdevpriv(); 943 break; | |
944 | 1087 |
945 case NIOCTXSYNC: 946 case NIOCRXSYNC: 947 if (priv == NULL) { | 1088 ifp = priv->np_ifp; /* we have a reference */ 1089 1090 if (ifp == NULL) { 1091 D("Internal error: nifp != NULL && ifp == NULL"); |
948 error = ENXIO; 949 break; 950 } | 1092 error = ENXIO; 1093 break; 1094 } |
951 ifp = priv->np_ifp; /* we have a reference */ | 1095 |
952 na = NA(ifp); /* retrieve netmap adapter */ 953 if (priv->np_qfirst == NETMAP_SW_RING) { /* host rings */ 954 if (cmd == NIOCTXSYNC) 955 netmap_sync_to_host(na); 956 else 957 netmap_sync_from_host(na, NULL, NULL); 958 break; 959 } --- 82 unchanged lines hidden (view full) --- 1042 enum {NO_CL, NEED_CL, LOCKED_CL }; /* see below */ 1043 void *pwait = dev; /* linux compatibility */ 1044 1045 (void)pwait; 1046 1047 if (devfs_get_cdevpriv((void **)&priv) != 0 || priv == NULL) 1048 return POLLERR; 1049 | 1096 na = NA(ifp); /* retrieve netmap adapter */ 1097 if (priv->np_qfirst == NETMAP_SW_RING) { /* host rings */ 1098 if (cmd == NIOCTXSYNC) 1099 netmap_sync_to_host(na); 1100 else 1101 netmap_sync_from_host(na, NULL, NULL); 1102 break; 1103 } --- 82 unchanged lines hidden (view full) --- 1186 enum {NO_CL, NEED_CL, LOCKED_CL }; /* see below */ 1187 void *pwait = dev; /* linux compatibility */ 1188 1189 (void)pwait; 1190 1191 if (devfs_get_cdevpriv((void **)&priv) != 0 || priv == NULL) 1192 return POLLERR; 1193 |
1194 if (priv->np_nifp == NULL) { 1195 D("No if registered"); 1196 return POLLERR; 1197 } 1198 rmb(); /* make sure following reads are not from cache */ 1199 |
|
1050 ifp = priv->np_ifp; 1051 // XXX check for deleting() ? 1052 if ( (ifp->if_capenable & IFCAP_NETMAP) == 0) 1053 return POLLERR; 1054 1055 if (netmap_verbose & 0x8000) 1056 D("device %s events 0x%x", ifp->if_xname, events); 1057 want_tx = events & (POLLOUT | POLLWRNORM); --- 259 unchanged lines hidden (view full) --- 1317 size = sizeof(*na) + n * sizeof(struct netmap_kring); 1318 1319 buf = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 1320 if (buf) { 1321 WNA(ifp) = buf; 1322 na->tx_rings = (void *)((char *)buf + sizeof(*na)); 1323 na->rx_rings = na->tx_rings + na->num_tx_rings + 1; 1324 bcopy(na, buf, sizeof(*na)); | 1200 ifp = priv->np_ifp; 1201 // XXX check for deleting() ? 1202 if ( (ifp->if_capenable & IFCAP_NETMAP) == 0) 1203 return POLLERR; 1204 1205 if (netmap_verbose & 0x8000) 1206 D("device %s events 0x%x", ifp->if_xname, events); 1207 want_tx = events & (POLLOUT | POLLWRNORM); --- 259 unchanged lines hidden (view full) --- 1467 size = sizeof(*na) + n * sizeof(struct netmap_kring); 1468 1469 buf = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 1470 if (buf) { 1471 WNA(ifp) = buf; 1472 na->tx_rings = (void *)((char *)buf + sizeof(*na)); 1473 na->rx_rings = na->tx_rings + na->num_tx_rings + 1; 1474 bcopy(na, buf, sizeof(*na)); |
1325 ifp->if_capabilities |= IFCAP_NETMAP; | 1475 NETMAP_SET_CAPABLE(ifp); |
1326 1327 na = buf; 1328 /* Core lock initialized here. Others are initialized after 1329 * netmap_if_new. 1330 */ 1331 mtx_init(&na->core_lock, "netmap core lock", MTX_NETWORK_LOCK, 1332 MTX_DEF); 1333 if (na->nm_lock == NULL) { 1334 ND("using default locks for %s", ifp->if_xname); 1335 na->nm_lock = netmap_lock_wrapper; 1336 } 1337 } 1338#ifdef linux 1339 if (ifp->netdev_ops) { | 1476 1477 na = buf; 1478 /* Core lock initialized here. Others are initialized after 1479 * netmap_if_new. 1480 */ 1481 mtx_init(&na->core_lock, "netmap core lock", MTX_NETWORK_LOCK, 1482 MTX_DEF); 1483 if (na->nm_lock == NULL) { 1484 ND("using default locks for %s", ifp->if_xname); 1485 na->nm_lock = netmap_lock_wrapper; 1486 } 1487 } 1488#ifdef linux 1489 if (ifp->netdev_ops) { |
1340 D("netdev_ops %p", ifp->netdev_ops); | 1490 ND("netdev_ops %p", ifp->netdev_ops); |
1341 /* prepare a clone of the netdev ops */ 1342 na->nm_ndo = *ifp->netdev_ops; 1343 } 1344 na->nm_ndo.ndo_start_xmit = linux_netmap_start; 1345#endif 1346 D("%s for %s", buf ? "ok" : "failed", ifp->if_xname); 1347 1348 return (buf ? 0 : ENOMEM); --- 86 unchanged lines hidden (view full) --- 1435 int new_hwofs, lim; 1436 1437 if (na == NULL) 1438 return NULL; /* no netmap support here */ 1439 if (!(na->ifp->if_capenable & IFCAP_NETMAP)) 1440 return NULL; /* nothing to reinitialize */ 1441 1442 if (tx == NR_TX) { | 1491 /* prepare a clone of the netdev ops */ 1492 na->nm_ndo = *ifp->netdev_ops; 1493 } 1494 na->nm_ndo.ndo_start_xmit = linux_netmap_start; 1495#endif 1496 D("%s for %s", buf ? "ok" : "failed", ifp->if_xname); 1497 1498 return (buf ? 0 : ENOMEM); --- 86 unchanged lines hidden (view full) --- 1585 int new_hwofs, lim; 1586 1587 if (na == NULL) 1588 return NULL; /* no netmap support here */ 1589 if (!(na->ifp->if_capenable & IFCAP_NETMAP)) 1590 return NULL; /* nothing to reinitialize */ 1591 1592 if (tx == NR_TX) { |
1593 if (n >= na->num_tx_rings) 1594 return NULL; |
|
1443 kring = na->tx_rings + n; 1444 new_hwofs = kring->nr_hwcur - new_cur; 1445 } else { | 1595 kring = na->tx_rings + n; 1596 new_hwofs = kring->nr_hwcur - new_cur; 1597 } else { |
1598 if (n >= na->num_rx_rings) 1599 return NULL; |
|
1446 kring = na->rx_rings + n; 1447 new_hwofs = kring->nr_hwcur + kring->nr_hwavail - new_cur; 1448 } 1449 lim = kring->nkr_num_slots - 1; 1450 if (new_hwofs > lim) 1451 new_hwofs -= lim + 1; 1452 1453 /* Alwayws set the new offset value and realign the ring. */ 1454 kring->nkr_hwofs = new_hwofs; 1455 if (tx == NR_TX) 1456 kring->nr_hwavail = kring->nkr_num_slots - 1; | 1600 kring = na->rx_rings + n; 1601 new_hwofs = kring->nr_hwcur + kring->nr_hwavail - new_cur; 1602 } 1603 lim = kring->nkr_num_slots - 1; 1604 if (new_hwofs > lim) 1605 new_hwofs -= lim + 1; 1606 1607 /* Alwayws set the new offset value and realign the ring. */ 1608 kring->nkr_hwofs = new_hwofs; 1609 if (tx == NR_TX) 1610 kring->nr_hwavail = kring->nkr_num_slots - 1; |
1457 D("new hwofs %d on %s %s[%d]", | 1611 ND(10, "new hwofs %d on %s %s[%d]", |
1458 kring->nkr_hwofs, na->ifp->if_xname, 1459 tx == NR_TX ? "TX" : "RX", n); 1460 1461#if 0 // def linux 1462 /* XXX check that the mappings are correct */ 1463 /* need ring_nr, adapter->pdev, direction */ 1464 buffer_info->dma = dma_map_single(&pdev->dev, addr, adapter->rx_buffer_len, DMA_FROM_DEVICE); 1465 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { --- 30 unchanged lines hidden (view full) --- 1496netmap_rx_irq(struct ifnet *ifp, int q, int *work_done) 1497{ 1498 struct netmap_adapter *na; 1499 struct netmap_kring *r; 1500 NM_SELINFO_T *main_wq; 1501 1502 if (!(ifp->if_capenable & IFCAP_NETMAP)) 1503 return 0; | 1612 kring->nkr_hwofs, na->ifp->if_xname, 1613 tx == NR_TX ? "TX" : "RX", n); 1614 1615#if 0 // def linux 1616 /* XXX check that the mappings are correct */ 1617 /* need ring_nr, adapter->pdev, direction */ 1618 buffer_info->dma = dma_map_single(&pdev->dev, addr, adapter->rx_buffer_len, DMA_FROM_DEVICE); 1619 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { --- 30 unchanged lines hidden (view full) --- 1650netmap_rx_irq(struct ifnet *ifp, int q, int *work_done) 1651{ 1652 struct netmap_adapter *na; 1653 struct netmap_kring *r; 1654 NM_SELINFO_T *main_wq; 1655 1656 if (!(ifp->if_capenable & IFCAP_NETMAP)) 1657 return 0; |
1658 ND(5, "received %s queue %d", work_done ? "RX" : "TX" , q); |
|
1504 na = NA(ifp); | 1659 na = NA(ifp); |
1660 if (na->na_flags & NAF_SKIP_INTR) { 1661 ND("use regular interrupt"); 1662 return 0; 1663 } 1664 |
|
1505 if (work_done) { /* RX path */ | 1665 if (work_done) { /* RX path */ |
1666 if (q >= na->num_rx_rings) 1667 return 0; // regular queue |
|
1506 r = na->rx_rings + q; 1507 r->nr_kflags |= NKR_PENDINTR; 1508 main_wq = (na->num_rx_rings > 1) ? &na->rx_si : NULL; 1509 } else { /* tx path */ | 1668 r = na->rx_rings + q; 1669 r->nr_kflags |= NKR_PENDINTR; 1670 main_wq = (na->num_rx_rings > 1) ? &na->rx_si : NULL; 1671 } else { /* tx path */ |
1672 if (q >= na->num_tx_rings) 1673 return 0; // regular queue |
|
1510 r = na->tx_rings + q; 1511 main_wq = (na->num_tx_rings > 1) ? &na->tx_si : NULL; 1512 work_done = &q; /* dummy */ 1513 } 1514 if (na->separate_locks) { 1515 mtx_lock(&r->q_lock); 1516 selwakeuppri(&r->si, PI_NET); 1517 mtx_unlock(&r->q_lock); --- 37 unchanged lines hidden (view full) --- 1555} 1556 1557static int 1558linux_netmap_mmap(struct file *f, struct vm_area_struct *vma) 1559{ 1560 int lut_skip, i, j; 1561 int user_skip = 0; 1562 struct lut_entry *l_entry; | 1674 r = na->tx_rings + q; 1675 main_wq = (na->num_tx_rings > 1) ? &na->tx_si : NULL; 1676 work_done = &q; /* dummy */ 1677 } 1678 if (na->separate_locks) { 1679 mtx_lock(&r->q_lock); 1680 selwakeuppri(&r->si, PI_NET); 1681 mtx_unlock(&r->q_lock); --- 37 unchanged lines hidden (view full) --- 1719} 1720 1721static int 1722linux_netmap_mmap(struct file *f, struct vm_area_struct *vma) 1723{ 1724 int lut_skip, i, j; 1725 int user_skip = 0; 1726 struct lut_entry *l_entry; |
1563 const struct netmap_obj_pool *p[] = { 1564 nm_mem->nm_if_pool, 1565 nm_mem->nm_ring_pool, 1566 nm_mem->nm_buf_pool }; | 1727 int error = 0; 1728 unsigned long off, tomap; |
1567 /* 1568 * vma->vm_start: start of mapping user address space 1569 * vma->vm_end: end of the mapping user address space | 1729 /* 1730 * vma->vm_start: start of mapping user address space 1731 * vma->vm_end: end of the mapping user address space |
1732 * vma->vm_pfoff: offset of first page in the device |
|
1570 */ 1571 | 1733 */ 1734 |
1572 (void)f; /* UNUSED */ | |
1573 // XXX security checks 1574 | 1735 // XXX security checks 1736 |
1575 for (i = 0; i < 3; i++) { /* loop through obj_pools */ | 1737 error = netmap_get_memory(f->private_data); 1738 ND("get_memory returned %d", error); 1739 if (error) 1740 return -error; 1741 1742 off = vma->vm_pgoff << PAGE_SHIFT; /* offset in bytes */ 1743 tomap = vma->vm_end - vma->vm_start; 1744 for (i = 0; i < NETMAP_POOLS_NR; i++) { /* loop through obj_pools */ 1745 const struct netmap_obj_pool *p = &nm_mem.pools[i]; |
1576 /* 1577 * In each pool memory is allocated in clusters | 1746 /* 1747 * In each pool memory is allocated in clusters |
1578 * of size _clustsize , each containing clustentries | 1748 * of size _clustsize, each containing clustentries |
1579 * entries. For each object k we already store the | 1749 * entries. For each object k we already store the |
1580 * vtophys malling in lut[k] so we use that, scanning | 1750 * vtophys mapping in lut[k] so we use that, scanning |
1581 * the lut[] array in steps of clustentries, 1582 * and we map each cluster (not individual pages, 1583 * it would be overkill). 1584 */ | 1751 * the lut[] array in steps of clustentries, 1752 * and we map each cluster (not individual pages, 1753 * it would be overkill). 1754 */ |
1585 for (lut_skip = 0, j = 0; j < p[i]->_numclusters; j++) { 1586 l_entry = &p[i]->lut[lut_skip]; | 1755 1756 /* 1757 * We interpret vm_pgoff as an offset into the whole 1758 * netmap memory, as if all clusters where contiguous. 1759 */ 1760 for (lut_skip = 0, j = 0; j < p->_numclusters; j++, lut_skip += p->clustentries) { 1761 unsigned long paddr, mapsize; 1762 if (p->_clustsize <= off) { 1763 off -= p->_clustsize; 1764 continue; 1765 } 1766 l_entry = &p->lut[lut_skip]; /* first obj in the cluster */ 1767 paddr = l_entry->paddr + off; 1768 mapsize = p->_clustsize - off; 1769 off = 0; 1770 if (mapsize > tomap) 1771 mapsize = tomap; 1772 ND("remap_pfn_range(%lx, %lx, %lx)", 1773 vma->vm_start + user_skip, 1774 paddr >> PAGE_SHIFT, mapsize); |
1587 if (remap_pfn_range(vma, vma->vm_start + user_skip, | 1775 if (remap_pfn_range(vma, vma->vm_start + user_skip, |
1588 l_entry->paddr >> PAGE_SHIFT, p[i]->_clustsize, | 1776 paddr >> PAGE_SHIFT, mapsize, |
1589 vma->vm_page_prot)) 1590 return -EAGAIN; // XXX check return value | 1777 vma->vm_page_prot)) 1778 return -EAGAIN; // XXX check return value |
1591 lut_skip += p[i]->clustentries; 1592 user_skip += p[i]->_clustsize; | 1779 user_skip += mapsize; 1780 tomap -= mapsize; 1781 if (tomap == 0) 1782 goto done; |
1593 } 1594 } | 1783 } 1784 } |
1785done: |
|
1595 1596 return 0; 1597} 1598 1599static netdev_tx_t 1600linux_netmap_start(struct sk_buff *skb, struct net_device *dev) 1601{ 1602 netmap_start(dev, skb); --- 28 unchanged lines hidden (view full) --- 1631netmap_release(struct inode *inode, struct file *file) 1632{ 1633 (void)inode; /* UNUSED */ 1634 if (file->private_data) 1635 netmap_dtor(file->private_data); 1636 return (0); 1637} 1638 | 1786 1787 return 0; 1788} 1789 1790static netdev_tx_t 1791linux_netmap_start(struct sk_buff *skb, struct net_device *dev) 1792{ 1793 netmap_start(dev, skb); --- 28 unchanged lines hidden (view full) --- 1822netmap_release(struct inode *inode, struct file *file) 1823{ 1824 (void)inode; /* UNUSED */ 1825 if (file->private_data) 1826 netmap_dtor(file->private_data); 1827 return (0); 1828} 1829 |
1830static int 1831linux_netmap_open(struct inode *inode, struct file *file) 1832{ 1833 struct netmap_priv_d *priv; 1834 (void)inode; /* UNUSED */ |
|
1639 | 1835 |
1836 priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF, 1837 M_NOWAIT | M_ZERO); 1838 if (priv == NULL) 1839 return -ENOMEM; 1840 1841 file->private_data = priv; 1842 1843 return (0); 1844} 1845 |
|
1640static struct file_operations netmap_fops = { | 1846static struct file_operations netmap_fops = { |
1847 .open = linux_netmap_open, |
|
1641 .mmap = linux_netmap_mmap, 1642 LIN_IOCTL_NAME = linux_netmap_ioctl, 1643 .poll = linux_netmap_poll, 1644 .release = netmap_release, 1645}; 1646 1647static struct miscdevice netmap_cdevsw = { /* same name as FreeBSD */ 1648 MISC_DYNAMIC_MINOR, --- 29 unchanged lines hidden (view full) --- 1678MODULE_DESCRIPTION("The netmap packet I/O framework"); 1679MODULE_LICENSE("Dual BSD/GPL"); /* the code here is all BSD. */ 1680 1681#else /* __FreeBSD__ */ 1682 1683static struct cdevsw netmap_cdevsw = { 1684 .d_version = D_VERSION, 1685 .d_name = "netmap", | 1848 .mmap = linux_netmap_mmap, 1849 LIN_IOCTL_NAME = linux_netmap_ioctl, 1850 .poll = linux_netmap_poll, 1851 .release = netmap_release, 1852}; 1853 1854static struct miscdevice netmap_cdevsw = { /* same name as FreeBSD */ 1855 MISC_DYNAMIC_MINOR, --- 29 unchanged lines hidden (view full) --- 1885MODULE_DESCRIPTION("The netmap packet I/O framework"); 1886MODULE_LICENSE("Dual BSD/GPL"); /* the code here is all BSD. */ 1887 1888#else /* __FreeBSD__ */ 1889 1890static struct cdevsw netmap_cdevsw = { 1891 .d_version = D_VERSION, 1892 .d_name = "netmap", |
1893 .d_open = netmap_open, |
|
1686 .d_mmap = netmap_mmap, | 1894 .d_mmap = netmap_mmap, |
1895 .d_mmap_single = netmap_mmap_single, |
|
1687 .d_ioctl = netmap_ioctl, 1688 .d_poll = netmap_poll, | 1896 .d_ioctl = netmap_ioctl, 1897 .d_poll = netmap_poll, |
1898 .d_close = netmap_close, |
|
1689}; 1690#endif /* __FreeBSD__ */ 1691 1692#ifdef NM_BRIDGE 1693/* 1694 *---- support for virtual bridge ----- 1695 */ 1696 --- 346 unchanged lines hidden (view full) --- 2043{ 2044 int error; 2045 2046 error = netmap_memory_init(); 2047 if (error != 0) { 2048 printf("netmap: unable to initialize the memory allocator.\n"); 2049 return (error); 2050 } | 1899}; 1900#endif /* __FreeBSD__ */ 1901 1902#ifdef NM_BRIDGE 1903/* 1904 *---- support for virtual bridge ----- 1905 */ 1906 --- 346 unchanged lines hidden (view full) --- 2253{ 2254 int error; 2255 2256 error = netmap_memory_init(); 2257 if (error != 0) { 2258 printf("netmap: unable to initialize the memory allocator.\n"); 2259 return (error); 2260 } |
2051 printf("netmap: loaded module with %d Mbytes\n", 2052 (int)(nm_mem->nm_totalsize >> 20)); | 2261 printf("netmap: loaded module\n"); |
2053 netmap_dev = make_dev(&netmap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660, 2054 "netmap"); 2055 2056#ifdef NM_BRIDGE 2057 { 2058 int i; 2059 for (i = 0; i < NM_BRIDGES; i++) 2060 mtx_init(&nm_bridges[i].bdg_lock, "bdg lock", "bdg_lock", MTX_DEF); --- 53 unchanged lines hidden --- | 2262 netmap_dev = make_dev(&netmap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660, 2263 "netmap"); 2264 2265#ifdef NM_BRIDGE 2266 { 2267 int i; 2268 for (i = 0; i < NM_BRIDGES; i++) 2269 mtx_init(&nm_bridges[i].bdg_lock, "bdg lock", "bdg_lock", MTX_DEF); --- 53 unchanged lines hidden --- |