if_vioif.c revision 1.101
1/* $NetBSD: if_vioif.c,v 1.101 2023/03/23 02:57:54 yamaguchi Exp $ */ 2 3/* 4 * Copyright (c) 2020 The NetBSD Foundation, Inc. 5 * Copyright (c) 2010 Minoura Makoto. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include <sys/cdefs.h> 30__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.101 2023/03/23 02:57:54 yamaguchi Exp $"); 31 32#ifdef _KERNEL_OPT 33#include "opt_net_mpsafe.h" 34#endif 35 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/kernel.h> 39#include <sys/atomic.h> 40#include <sys/bus.h> 41#include <sys/condvar.h> 42#include <sys/device.h> 43#include <sys/evcnt.h> 44#include <sys/intr.h> 45#include <sys/kmem.h> 46#include <sys/mbuf.h> 47#include <sys/mutex.h> 48#include <sys/sockio.h> 49#include <sys/syslog.h> 50#include <sys/cpu.h> 51#include <sys/module.h> 52#include <sys/pcq.h> 53#include <sys/workqueue.h> 54#include <sys/xcall.h> 55 56#include <dev/pci/virtioreg.h> 57#include <dev/pci/virtiovar.h> 58 59#include <net/if.h> 60#include <net/if_dl.h> 61#include <net/if_media.h> 62#include <net/if_ether.h> 63 64#include <net/bpf.h> 65 66#include "ioconf.h" 67 68#ifdef NET_MPSAFE 69#define VIOIF_MPSAFE 1 70#define VIOIF_MULTIQ 1 71#endif 72 73/* 74 * if_vioifreg.h: 75 */ 76/* Configuration registers */ 77#define VIRTIO_NET_CONFIG_MAC 0 /* 8bit x 6byte */ 78#define VIRTIO_NET_CONFIG_STATUS 6 /* 16bit */ 79#define VIRTIO_NET_CONFIG_MAX_VQ_PAIRS 8 /* 16bit */ 80#define VIRTIO_NET_CONFIG_MTU 10 /* 16bit */ 81 82/* Feature bits */ 83#define VIRTIO_NET_F_CSUM __BIT(0) 84#define VIRTIO_NET_F_GUEST_CSUM __BIT(1) 85#define VIRTIO_NET_F_MAC __BIT(5) 86#define VIRTIO_NET_F_GSO __BIT(6) 87#define VIRTIO_NET_F_GUEST_TSO4 __BIT(7) 88#define VIRTIO_NET_F_GUEST_TSO6 __BIT(8) 89#define VIRTIO_NET_F_GUEST_ECN __BIT(9) 90#define VIRTIO_NET_F_GUEST_UFO __BIT(10) 91#define VIRTIO_NET_F_HOST_TSO4 __BIT(11) 92#define VIRTIO_NET_F_HOST_TSO6 __BIT(12) 93#define VIRTIO_NET_F_HOST_ECN __BIT(13) 94#define VIRTIO_NET_F_HOST_UFO __BIT(14) 95#define VIRTIO_NET_F_MRG_RXBUF __BIT(15) 96#define VIRTIO_NET_F_STATUS __BIT(16) 97#define VIRTIO_NET_F_CTRL_VQ __BIT(17) 98#define VIRTIO_NET_F_CTRL_RX __BIT(18) 99#define VIRTIO_NET_F_CTRL_VLAN __BIT(19) 100#define VIRTIO_NET_F_CTRL_RX_EXTRA __BIT(20) 101#define VIRTIO_NET_F_GUEST_ANNOUNCE __BIT(21) 102#define VIRTIO_NET_F_MQ __BIT(22) 103#define VIRTIO_NET_F_CTRL_MAC_ADDR __BIT(23) 104 105#define VIRTIO_NET_FLAG_BITS \ 106 VIRTIO_COMMON_FLAG_BITS \ 107 "b\x17" "CTRL_MAC\0" \ 108 "b\x16" "MQ\0" \ 109 "b\x15" "GUEST_ANNOUNCE\0" \ 110 "b\x14" "CTRL_RX_EXTRA\0" \ 111 "b\x13" "CTRL_VLAN\0" \ 112 "b\x12" "CTRL_RX\0" \ 113 "b\x11" "CTRL_VQ\0" \ 114 "b\x10" "STATUS\0" \ 115 "b\x0f" "MRG_RXBUF\0" \ 116 "b\x0e" "HOST_UFO\0" \ 117 "b\x0d" "HOST_ECN\0" \ 118 "b\x0c" "HOST_TSO6\0" \ 119 "b\x0b" "HOST_TSO4\0" \ 120 "b\x0a" "GUEST_UFO\0" \ 121 "b\x09" "GUEST_ECN\0" \ 122 "b\x08" "GUEST_TSO6\0" \ 123 "b\x07" "GUEST_TSO4\0" \ 124 "b\x06" "GSO\0" \ 125 "b\x05" "MAC\0" \ 126 "b\x01" "GUEST_CSUM\0" \ 127 "b\x00" "CSUM\0" 128 129/* Status */ 130#define VIRTIO_NET_S_LINK_UP 1 131 132/* Packet header structure */ 133struct virtio_net_hdr { 134 uint8_t flags; 135 uint8_t gso_type; 136 uint16_t hdr_len; 137 uint16_t gso_size; 138 uint16_t csum_start; 139 uint16_t csum_offset; 140 141 uint16_t num_buffers; /* VIRTIO_NET_F_MRG_RXBUF enabled or v1 */ 142} __packed; 143 144#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* flags */ 145#define VIRTIO_NET_HDR_GSO_NONE 0 /* gso_type */ 146#define VIRTIO_NET_HDR_GSO_TCPV4 1 /* gso_type */ 147#define VIRTIO_NET_HDR_GSO_UDP 3 /* gso_type */ 148#define VIRTIO_NET_HDR_GSO_TCPV6 4 /* gso_type */ 149#define VIRTIO_NET_HDR_GSO_ECN 0x80 /* gso_type, |'ed */ 150 151#define VIRTIO_NET_MAX_GSO_LEN (65536+ETHER_HDR_LEN) 152 153/* Control virtqueue */ 154struct virtio_net_ctrl_cmd { 155 uint8_t class; 156 uint8_t command; 157} __packed; 158#define VIRTIO_NET_CTRL_RX 0 159# define VIRTIO_NET_CTRL_RX_PROMISC 0 160# define VIRTIO_NET_CTRL_RX_ALLMULTI 1 161 162#define VIRTIO_NET_CTRL_MAC 1 163# define VIRTIO_NET_CTRL_MAC_TABLE_SET 0 164# define VIRTIO_NET_CTRL_MAC_ADDR_SET 1 165 166#define VIRTIO_NET_CTRL_VLAN 2 167# define VIRTIO_NET_CTRL_VLAN_ADD 0 168# define VIRTIO_NET_CTRL_VLAN_DEL 1 169 170#define VIRTIO_NET_CTRL_MQ 4 171# define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0 172# define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1 173# define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000 174 175struct virtio_net_ctrl_status { 176 uint8_t ack; 177} __packed; 178#define VIRTIO_NET_OK 0 179#define VIRTIO_NET_ERR 1 180 181struct virtio_net_ctrl_rx { 182 uint8_t onoff; 183} __packed; 184 185struct virtio_net_ctrl_mac_tbl { 186 uint32_t nentries; 187 uint8_t macs[][ETHER_ADDR_LEN]; 188} __packed; 189 190struct virtio_net_ctrl_mac_addr { 191 uint8_t mac[ETHER_ADDR_LEN]; 192} __packed; 193 194struct virtio_net_ctrl_vlan { 195 uint16_t id; 196} __packed; 197 198struct virtio_net_ctrl_mq { 199 uint16_t virtqueue_pairs; 200} __packed; 201 202/* 203 * if_vioifvar.h: 204 */ 205 206/* 207 * Locking notes: 208 * + a field in vioif_netueue is protected by netq_lock (a spin mutex) 209 * - more than one lock cannot be held at onece 210 * + a field in vioif_tx_context and vioif_rx_context is also protected 211 * by netq_lock. 212 * + ctrlq_inuse is protected by ctrlq_wait_lock. 213 * - other fields in vioif_ctrlqueue are protected by ctrlq_inuse 214 * - netq_lock cannot be held along with ctrlq_wait_lock 215 * + fields in vioif_softc except queues are protected by 216 * sc->sc_lock(an adaptive mutex) 217 * - the lock is held before acquisition of other locks 218 */ 219 220struct vioif_ctrl_cmdspec { 221 bus_dmamap_t dmamap; 222 void *buf; 223 bus_size_t bufsize; 224}; 225 226struct vioif_work { 227 struct work cookie; 228 void (*func)(void *); 229 void *arg; 230 unsigned int added; 231}; 232 233struct vioif_net_map { 234 struct virtio_net_hdr *vnm_hdr; 235 bus_dmamap_t vnm_hdr_map; 236 struct mbuf *vnm_mbuf; 237 bus_dmamap_t vnm_mbuf_map; 238}; 239 240#define VIOIF_NETQ_RX 0 241#define VIOIF_NETQ_TX 1 242#define VIOIF_NETQ_IDX 2 243#define VIOIF_NETQ_DIR(n) ((n) % VIOIF_NETQ_IDX) 244#define VIOIF_NETQ_PAIRIDX(n) ((n) / VIOIF_NETQ_IDX) 245#define VIOIF_NETQ_RXQID(n) ((n) * VIOIF_NETQ_IDX + VIOIF_NETQ_RX) 246#define VIOIF_NETQ_TXQID(n) ((n) * VIOIF_NETQ_IDX + VIOIF_NETQ_TX) 247 248struct vioif_netqueue { 249 kmutex_t netq_lock; 250 struct virtqueue *netq_vq; 251 bool netq_stopping; 252 bool netq_running_handle; 253 void *netq_maps_kva; 254 struct vioif_net_map *netq_maps; 255 256 void *netq_softint; 257 struct vioif_work netq_work; 258 bool netq_workqueue; 259 260 char netq_evgroup[32]; 261 struct evcnt netq_mbuf_load_failed; 262 struct evcnt netq_enqueue_failed; 263 264 void *netq_ctx; 265}; 266 267struct vioif_tx_context { 268 bool txc_link_active; 269 pcq_t *txc_intrq; 270 void *txc_deferred_transmit; 271 272 struct evcnt txc_defrag_failed; 273}; 274 275struct vioif_rx_context { 276 struct evcnt rxc_mbuf_enobufs; 277}; 278struct vioif_ctrlqueue { 279 struct virtqueue *ctrlq_vq; 280 enum { 281 FREE, INUSE, DONE 282 } ctrlq_inuse; 283 kcondvar_t ctrlq_wait; 284 kmutex_t ctrlq_wait_lock; 285 struct lwp *ctrlq_owner; 286 287 struct virtio_net_ctrl_cmd *ctrlq_cmd; 288 struct virtio_net_ctrl_status *ctrlq_status; 289 struct virtio_net_ctrl_rx *ctrlq_rx; 290 struct virtio_net_ctrl_mac_tbl *ctrlq_mac_tbl_uc; 291 struct virtio_net_ctrl_mac_tbl *ctrlq_mac_tbl_mc; 292 struct virtio_net_ctrl_mac_addr *ctrlq_mac_addr; 293 struct virtio_net_ctrl_mq *ctrlq_mq; 294 295 bus_dmamap_t ctrlq_cmd_dmamap; 296 bus_dmamap_t ctrlq_status_dmamap; 297 bus_dmamap_t ctrlq_rx_dmamap; 298 bus_dmamap_t ctrlq_tbl_uc_dmamap; 299 bus_dmamap_t ctrlq_tbl_mc_dmamap; 300 bus_dmamap_t ctrlq_mac_addr_dmamap; 301 bus_dmamap_t ctrlq_mq_dmamap; 302 303 struct evcnt ctrlq_cmd_load_failed; 304 struct evcnt ctrlq_cmd_failed; 305}; 306 307struct vioif_softc { 308 device_t sc_dev; 309 kmutex_t sc_lock; 310 struct sysctllog *sc_sysctllog; 311 312 struct virtio_softc *sc_virtio; 313 struct virtqueue *sc_vqs; 314 u_int sc_hdr_size; 315 316 int sc_max_nvq_pairs; 317 int sc_req_nvq_pairs; 318 int sc_act_nvq_pairs; 319 320 uint8_t sc_mac[ETHER_ADDR_LEN]; 321 struct ethercom sc_ethercom; 322 int sc_link_state; 323 324 struct vioif_netqueue *sc_netqs; 325 326 bool sc_has_ctrl; 327 struct vioif_ctrlqueue sc_ctrlq; 328 329 bus_dma_segment_t sc_segs[1]; 330 void *sc_dmamem; 331 void *sc_kmem; 332 333 void *sc_cfg_softint; 334 335 struct workqueue *sc_txrx_workqueue; 336 bool sc_txrx_workqueue_sysctl; 337 u_int sc_tx_intr_process_limit; 338 u_int sc_tx_process_limit; 339 u_int sc_rx_intr_process_limit; 340 u_int sc_rx_process_limit; 341}; 342#define VIRTIO_NET_TX_MAXNSEGS (16) /* XXX */ 343#define VIRTIO_NET_CTRL_MAC_MAXENTRIES (64) /* XXX */ 344 345#define VIOIF_TX_INTR_PROCESS_LIMIT 256 346#define VIOIF_TX_PROCESS_LIMIT 256 347#define VIOIF_RX_INTR_PROCESS_LIMIT 0U 348#define VIOIF_RX_PROCESS_LIMIT 256 349 350#define VIOIF_WORKQUEUE_PRI PRI_SOFTNET 351#define VIOIF_IS_LINK_ACTIVE(_sc) ((_sc)->sc_link_state == LINK_STATE_UP ? \ 352 true : false) 353 354/* cfattach interface functions */ 355static int vioif_match(device_t, cfdata_t, void *); 356static void vioif_attach(device_t, device_t, void *); 357static int vioif_finalize_teardown(device_t); 358 359/* ifnet interface functions */ 360static int vioif_init(struct ifnet *); 361static void vioif_stop(struct ifnet *, int); 362static void vioif_start(struct ifnet *); 363static int vioif_transmit(struct ifnet *, struct mbuf *); 364static int vioif_ioctl(struct ifnet *, u_long, void *); 365static void vioif_watchdog(struct ifnet *); 366static int vioif_ifflags(struct vioif_softc *); 367static int vioif_ifflags_cb(struct ethercom *); 368 369/* tx & rx */ 370static int vioif_netqueue_init(struct vioif_softc *, 371 struct virtio_softc *, size_t, u_int); 372static void vioif_netqueue_teardown(struct vioif_softc *, 373 struct virtio_softc *, size_t); 374static void vioif_net_intr_enable(struct vioif_softc *, 375 struct virtio_softc *); 376static void vioif_net_intr_disable(struct vioif_softc *, 377 struct virtio_softc *); 378static void vioif_net_sched_handle(struct vioif_softc *, 379 struct vioif_netqueue *); 380 381/* rx */ 382static void vioif_populate_rx_mbufs_locked(struct vioif_softc *, 383 struct vioif_netqueue *); 384static int vioif_rx_intr(void *); 385static void vioif_rx_handle(void *); 386static void vioif_rx_queue_clear(struct vioif_softc *, 387 struct virtio_softc *, struct vioif_netqueue *); 388 389/* tx */ 390static void vioif_start_locked(struct ifnet *, struct vioif_netqueue *); 391static void vioif_transmit_locked(struct ifnet *, struct vioif_netqueue *); 392static void vioif_deferred_transmit(void *); 393static int vioif_tx_intr(void *); 394static void vioif_tx_handle(void *); 395static void vioif_tx_queue_clear(struct vioif_softc *, struct virtio_softc *, 396 struct vioif_netqueue *); 397 398/* controls */ 399static int vioif_ctrl_intr(void *); 400static int vioif_ctrl_rx(struct vioif_softc *, int, bool); 401static int vioif_set_promisc(struct vioif_softc *, bool); 402static int vioif_set_allmulti(struct vioif_softc *, bool); 403static int vioif_set_rx_filter(struct vioif_softc *); 404static int vioif_rx_filter(struct vioif_softc *); 405static int vioif_set_mac_addr(struct vioif_softc *); 406static int vioif_ctrl_mq_vq_pairs_set(struct vioif_softc *, int); 407 408/* config interrupt */ 409static int vioif_config_change(struct virtio_softc *); 410static void vioif_cfg_softint(void *); 411static void vioif_update_link_status(struct vioif_softc *); 412 413/* others */ 414static void vioif_alloc_queues(struct vioif_softc *); 415static void vioif_free_queues(struct vioif_softc *); 416static int vioif_alloc_mems(struct vioif_softc *); 417static struct workqueue* 418 vioif_workq_create(const char *, pri_t, int, int); 419static void vioif_workq_destroy(struct workqueue *); 420static void vioif_work_set(struct vioif_work *, void(*)(void *), void *); 421static void vioif_work_add(struct workqueue *, struct vioif_work *); 422static void vioif_work_wait(struct workqueue *, struct vioif_work *); 423static int vioif_setup_sysctl(struct vioif_softc *); 424static void vioif_setup_stats(struct vioif_softc *); 425 426CFATTACH_DECL_NEW(vioif, sizeof(struct vioif_softc), 427 vioif_match, vioif_attach, NULL, NULL); 428 429static void 430vioif_intr_barrier(void) 431{ 432 433 /* wait for finish all interrupt handler */ 434 xc_barrier(0); 435} 436 437static void 438vioif_notify(struct virtio_softc *vsc, struct virtqueue *vq) 439{ 440 441 virtio_enqueue_commit(vsc, vq, -1, true); 442} 443 444static int 445vioif_match(device_t parent, cfdata_t match, void *aux) 446{ 447 struct virtio_attach_args *va = aux; 448 449 if (va->sc_childdevid == VIRTIO_DEVICE_ID_NETWORK) 450 return 1; 451 452 return 0; 453} 454 455static void 456vioif_attach(device_t parent, device_t self, void *aux) 457{ 458 struct vioif_softc *sc = device_private(self); 459 struct virtio_softc *vsc = device_private(parent); 460 struct vioif_netqueue *txq0; 461 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; 462 uint64_t features, req_features; 463 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 464 u_int softint_flags; 465 int r, i, req_flags; 466 char xnamebuf[MAXCOMLEN]; 467 size_t netq_num; 468 469 if (virtio_child(vsc) != NULL) { 470 aprint_normal(": child already attached for %s; " 471 "something wrong...\n", device_xname(parent)); 472 return; 473 } 474 475 sc->sc_dev = self; 476 sc->sc_virtio = vsc; 477 sc->sc_link_state = LINK_STATE_UNKNOWN; 478 479 sc->sc_max_nvq_pairs = 1; 480 sc->sc_req_nvq_pairs = 1; 481 sc->sc_act_nvq_pairs = 1; 482 sc->sc_txrx_workqueue_sysctl = true; 483 sc->sc_tx_intr_process_limit = VIOIF_TX_INTR_PROCESS_LIMIT; 484 sc->sc_tx_process_limit = VIOIF_TX_PROCESS_LIMIT; 485 sc->sc_rx_intr_process_limit = VIOIF_RX_INTR_PROCESS_LIMIT; 486 sc->sc_rx_process_limit = VIOIF_RX_PROCESS_LIMIT; 487 488 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE); 489 490 snprintf(xnamebuf, sizeof(xnamebuf), "%s_txrx", device_xname(self)); 491 sc->sc_txrx_workqueue = vioif_workq_create(xnamebuf, VIOIF_WORKQUEUE_PRI, 492 IPL_NET, WQ_PERCPU | WQ_MPSAFE); 493 if (sc->sc_txrx_workqueue == NULL) 494 goto err; 495 496 req_flags = 0; 497 498#ifdef VIOIF_MPSAFE 499 req_flags |= VIRTIO_F_INTR_MPSAFE; 500#endif 501 req_flags |= VIRTIO_F_INTR_MSIX; 502 503 req_features = 504 VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS | VIRTIO_NET_F_CTRL_VQ | 505 VIRTIO_NET_F_CTRL_RX | VIRTIO_F_NOTIFY_ON_EMPTY; 506 req_features |= VIRTIO_F_RING_EVENT_IDX; 507 req_features |= VIRTIO_NET_F_CTRL_MAC_ADDR; 508#ifdef VIOIF_MULTIQ 509 req_features |= VIRTIO_NET_F_MQ; 510#endif 511 virtio_child_attach_start(vsc, self, IPL_NET, NULL, 512 vioif_config_change, virtio_vq_intrhand, req_flags, 513 req_features, VIRTIO_NET_FLAG_BITS); 514 515 features = virtio_features(vsc); 516 if (features == 0) 517 goto err; 518 519 if (features & VIRTIO_NET_F_MAC) { 520 for (i = 0; i < __arraycount(sc->sc_mac); i++) { 521 sc->sc_mac[i] = virtio_read_device_config_1(vsc, 522 VIRTIO_NET_CONFIG_MAC + i); 523 } 524 } else { 525 /* code stolen from sys/net/if_tap.c */ 526 struct timeval tv; 527 uint32_t ui; 528 getmicrouptime(&tv); 529 ui = (tv.tv_sec ^ tv.tv_usec) & 0xffffff; 530 memcpy(sc->sc_mac+3, (uint8_t *)&ui, 3); 531 for (i = 0; i < __arraycount(sc->sc_mac); i++) { 532 virtio_write_device_config_1(vsc, 533 VIRTIO_NET_CONFIG_MAC + i, sc->sc_mac[i]); 534 } 535 } 536 537 /* 'Ethernet' with capital follows other ethernet driver attachment */ 538 aprint_normal_dev(self, "Ethernet address %s\n", 539 ether_sprintf(sc->sc_mac)); 540 541 if (features & (VIRTIO_NET_F_MRG_RXBUF | VIRTIO_F_VERSION_1)) { 542 sc->sc_hdr_size = sizeof(struct virtio_net_hdr); 543 } else { 544 sc->sc_hdr_size = offsetof(struct virtio_net_hdr, num_buffers); 545 } 546 547 if ((features & VIRTIO_NET_F_CTRL_VQ) && 548 (features & VIRTIO_NET_F_CTRL_RX)) { 549 sc->sc_has_ctrl = true; 550 551 cv_init(&ctrlq->ctrlq_wait, "ctrl_vq"); 552 mutex_init(&ctrlq->ctrlq_wait_lock, MUTEX_DEFAULT, IPL_NET); 553 ctrlq->ctrlq_inuse = FREE; 554 } else { 555 sc->sc_has_ctrl = false; 556 } 557 558 if (sc->sc_has_ctrl && (features & VIRTIO_NET_F_MQ)) { 559 sc->sc_max_nvq_pairs = virtio_read_device_config_2(vsc, 560 VIRTIO_NET_CONFIG_MAX_VQ_PAIRS); 561 562 if (sc->sc_max_nvq_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) 563 goto err; 564 565 /* Limit the number of queue pairs to use */ 566 sc->sc_req_nvq_pairs = MIN(sc->sc_max_nvq_pairs, ncpu); 567 } 568 569 vioif_alloc_queues(sc); 570 virtio_child_attach_set_vqs(vsc, sc->sc_vqs, sc->sc_req_nvq_pairs); 571 572#ifdef VIOIF_MPSAFE 573 softint_flags = SOFTINT_NET | SOFTINT_MPSAFE; 574#else 575 softint_flags = SOFTINT_NET; 576#endif 577 578 /* 579 * Initialize network queues 580 */ 581 netq_num = sc->sc_max_nvq_pairs * 2; 582 for (i = 0; i < netq_num; i++) { 583 r = vioif_netqueue_init(sc, vsc, i, softint_flags); 584 if (r != 0) 585 goto err; 586 } 587 588 if (sc->sc_has_ctrl) { 589 int ctrlq_idx = sc->sc_max_nvq_pairs * 2; 590 /* 591 * Allocating a virtqueue for control channel 592 */ 593 sc->sc_ctrlq.ctrlq_vq = &sc->sc_vqs[ctrlq_idx]; 594 r = virtio_alloc_vq(vsc, ctrlq->ctrlq_vq, ctrlq_idx, 595 NBPG, 1, "control"); 596 if (r != 0) { 597 aprint_error_dev(self, "failed to allocate " 598 "a virtqueue for control channel, error code %d\n", 599 r); 600 601 sc->sc_has_ctrl = false; 602 cv_destroy(&ctrlq->ctrlq_wait); 603 mutex_destroy(&ctrlq->ctrlq_wait_lock); 604 } else { 605 ctrlq->ctrlq_vq->vq_intrhand = vioif_ctrl_intr; 606 ctrlq->ctrlq_vq->vq_intrhand_arg = (void *) ctrlq; 607 } 608 } 609 610 sc->sc_cfg_softint = softint_establish(softint_flags, 611 vioif_cfg_softint, sc); 612 if (sc->sc_cfg_softint == NULL) { 613 aprint_error_dev(self, "cannot establish ctl softint\n"); 614 goto err; 615 } 616 617 if (vioif_alloc_mems(sc) < 0) 618 goto err; 619 620 if (virtio_child_attach_finish(vsc) != 0) 621 goto err; 622 623 if (vioif_setup_sysctl(sc) != 0) { 624 aprint_error_dev(self, "unable to create sysctl node\n"); 625 /* continue */ 626 } 627 628 vioif_setup_stats(sc); 629 630 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 631 ifp->if_softc = sc; 632 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 633#ifdef VIOIF_MPSAFE 634 ifp->if_extflags = IFEF_MPSAFE; 635#endif 636 ifp->if_start = vioif_start; 637 if (sc->sc_req_nvq_pairs > 1) 638 ifp->if_transmit = vioif_transmit; 639 ifp->if_ioctl = vioif_ioctl; 640 ifp->if_init = vioif_init; 641 ifp->if_stop = vioif_stop; 642 ifp->if_capabilities = 0; 643 ifp->if_watchdog = vioif_watchdog; 644 txq0 = &sc->sc_netqs[VIOIF_NETQ_TXQID(0)]; 645 IFQ_SET_MAXLEN(&ifp->if_snd, MAX(txq0->netq_vq->vq_num, IFQ_MAXLEN)); 646 IFQ_SET_READY(&ifp->if_snd); 647 648 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 649 650 if_attach(ifp); 651 if_deferred_start_init(ifp, NULL); 652 ether_ifattach(ifp, sc->sc_mac); 653 ether_set_ifflags_cb(&sc->sc_ethercom, vioif_ifflags_cb); 654 655 return; 656 657err: 658 netq_num = sc->sc_max_nvq_pairs * 2; 659 for (i = 0; i < netq_num; i++) { 660 vioif_netqueue_teardown(sc, vsc, i); 661 } 662 663 if (sc->sc_has_ctrl) { 664 cv_destroy(&ctrlq->ctrlq_wait); 665 mutex_destroy(&ctrlq->ctrlq_wait_lock); 666 virtio_free_vq(vsc, ctrlq->ctrlq_vq); 667 ctrlq->ctrlq_vq = NULL; 668 } 669 670 vioif_free_queues(sc); 671 mutex_destroy(&sc->sc_lock); 672 virtio_child_attach_failed(vsc); 673 config_finalize_register(self, vioif_finalize_teardown); 674 675 return; 676} 677 678static int 679vioif_finalize_teardown(device_t self) 680{ 681 struct vioif_softc *sc = device_private(self); 682 683 if (sc->sc_txrx_workqueue != NULL) { 684 vioif_workq_destroy(sc->sc_txrx_workqueue); 685 sc->sc_txrx_workqueue = NULL; 686 } 687 688 return 0; 689} 690 691/* 692 * Interface functions for ifnet 693 */ 694static int 695vioif_init(struct ifnet *ifp) 696{ 697 struct vioif_softc *sc = ifp->if_softc; 698 struct virtio_softc *vsc = sc->sc_virtio; 699 struct vioif_netqueue *netq; 700 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; 701 int r, i; 702 703 vioif_stop(ifp, 0); 704 705 r = virtio_reinit_start(vsc); 706 if (r != 0) { 707 log(LOG_ERR, "%s: reset failed\n", ifp->if_xname); 708 return EIO; 709 } 710 711 virtio_negotiate_features(vsc, virtio_features(vsc)); 712 713 for (i = 0; i < sc->sc_req_nvq_pairs; i++) { 714 netq = &sc->sc_netqs[VIOIF_NETQ_RXQID(i)]; 715 716 mutex_enter(&netq->netq_lock); 717 vioif_populate_rx_mbufs_locked(sc, netq); 718 mutex_exit(&netq->netq_lock); 719 } 720 721 virtio_reinit_end(vsc); 722 723 if (sc->sc_has_ctrl) 724 virtio_start_vq_intr(vsc, ctrlq->ctrlq_vq); 725 726 r = vioif_ctrl_mq_vq_pairs_set(sc, sc->sc_req_nvq_pairs); 727 if (r == 0) 728 sc->sc_act_nvq_pairs = sc->sc_req_nvq_pairs; 729 else 730 sc->sc_act_nvq_pairs = 1; 731 732 SET(ifp->if_flags, IFF_RUNNING); 733 CLR(ifp->if_flags, IFF_OACTIVE); 734 735 vioif_net_intr_enable(sc, vsc); 736 737 vioif_update_link_status(sc); 738 r = vioif_rx_filter(sc); 739 740 return r; 741} 742 743static void 744vioif_stop(struct ifnet *ifp, int disable) 745{ 746 struct vioif_softc *sc = ifp->if_softc; 747 struct virtio_softc *vsc = sc->sc_virtio; 748 struct vioif_netqueue *netq; 749 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; 750 size_t i, act_qnum; 751 752 act_qnum = sc->sc_act_nvq_pairs * 2; 753 754 CLR(ifp->if_flags, IFF_RUNNING); 755 for (i = 0; i < act_qnum; i++) { 756 netq = &sc->sc_netqs[i]; 757 758 mutex_enter(&netq->netq_lock); 759 netq->netq_stopping = true; 760 mutex_exit(&netq->netq_lock); 761 } 762 763 /* disable interrupts */ 764 vioif_net_intr_disable(sc, vsc); 765 if (sc->sc_has_ctrl) 766 virtio_stop_vq_intr(vsc, ctrlq->ctrlq_vq); 767 768 /* 769 * only way to stop interrupt, I/O and DMA is resetting... 770 * 771 * NOTE: Devices based on VirtIO draft specification can not 772 * stop interrupt completely even if virtio_stop_vq_intr() is called. 773 */ 774 virtio_reset(vsc); 775 776 vioif_intr_barrier(); 777 778 for (i = 0; i < act_qnum; i++) { 779 netq = &sc->sc_netqs[i]; 780 vioif_work_wait(sc->sc_txrx_workqueue, &netq->netq_work); 781 } 782 783 for (i = 0; i < sc->sc_act_nvq_pairs; i++) { 784 netq = &sc->sc_netqs[VIOIF_NETQ_RXQID(i)]; 785 vioif_rx_queue_clear(sc, vsc, netq); 786 787 netq = &sc->sc_netqs[VIOIF_NETQ_TXQID(i)]; 788 vioif_tx_queue_clear(sc, vsc, netq); 789 } 790 791 /* all packet processing is stopped */ 792 for (i = 0; i < act_qnum; i++) { 793 netq = &sc->sc_netqs[i]; 794 795 mutex_enter(&netq->netq_lock); 796 netq->netq_stopping = false; 797 mutex_exit(&netq->netq_lock); 798 } 799} 800 801static void 802vioif_start(struct ifnet *ifp) 803{ 804 struct vioif_softc *sc = ifp->if_softc; 805 struct vioif_netqueue *txq0 = &sc->sc_netqs[VIOIF_NETQ_TXQID(0)]; 806 807#ifdef VIOIF_MPSAFE 808 KASSERT(if_is_mpsafe(ifp)); 809#endif 810 811 mutex_enter(&txq0->netq_lock); 812 vioif_start_locked(ifp, txq0); 813 mutex_exit(&txq0->netq_lock); 814} 815 816static inline int 817vioif_select_txqueue(struct ifnet *ifp, struct mbuf *m) 818{ 819 struct vioif_softc *sc = ifp->if_softc; 820 u_int cpuid = cpu_index(curcpu()); 821 822 return VIOIF_NETQ_TXQID(cpuid % sc->sc_act_nvq_pairs); 823} 824 825static int 826vioif_transmit(struct ifnet *ifp, struct mbuf *m) 827{ 828 struct vioif_softc *sc = ifp->if_softc; 829 struct vioif_netqueue *netq; 830 struct vioif_tx_context *txc; 831 int qid; 832 833 qid = vioif_select_txqueue(ifp, m); 834 netq = &sc->sc_netqs[qid]; 835 txc = netq->netq_ctx; 836 837 if (__predict_false(!pcq_put(txc->txc_intrq, m))) { 838 m_freem(m); 839 return ENOBUFS; 840 } 841 842 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 843 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len); 844 if (m->m_flags & M_MCAST) 845 if_statinc_ref(nsr, if_omcasts); 846 IF_STAT_PUTREF(ifp); 847 848 if (mutex_tryenter(&netq->netq_lock)) { 849 vioif_transmit_locked(ifp, netq); 850 mutex_exit(&netq->netq_lock); 851 } 852 853 return 0; 854} 855 856void 857vioif_watchdog(struct ifnet *ifp) 858{ 859 struct vioif_softc *sc = ifp->if_softc; 860 struct vioif_netqueue *netq; 861 int i; 862 863 if (ifp->if_flags & IFF_RUNNING) { 864 for (i = 0; i < sc->sc_act_nvq_pairs; i++) { 865 netq = &sc->sc_netqs[VIOIF_NETQ_TXQID(i)]; 866 867 mutex_enter(&netq->netq_lock); 868 if (!netq->netq_running_handle) { 869 netq->netq_running_handle = true; 870 vioif_net_sched_handle(sc, netq); 871 } 872 mutex_exit(&netq->netq_lock); 873 } 874 } 875} 876 877static int 878vioif_ioctl(struct ifnet *ifp, u_long cmd, void *data) 879{ 880 int s, r; 881 882 s = splnet(); 883 884 r = ether_ioctl(ifp, cmd, data); 885 if (r == ENETRESET && (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI)) { 886 if (ifp->if_flags & IFF_RUNNING) { 887 r = vioif_rx_filter(ifp->if_softc); 888 } else { 889 r = 0; 890 } 891 } 892 893 splx(s); 894 895 return r; 896} 897 898static int 899vioif_ifflags(struct vioif_softc *sc) 900{ 901 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 902 bool onoff; 903 int r; 904 905 if (!sc->sc_has_ctrl) { 906 /* no ctrl vq; always promisc and allmulti */ 907 ifp->if_flags |= (IFF_PROMISC | IFF_ALLMULTI); 908 return 0; 909 } 910 911 onoff = ifp->if_flags & IFF_ALLMULTI ? true : false; 912 r = vioif_set_allmulti(sc, onoff); 913 if (r != 0) { 914 log(LOG_WARNING, 915 "%s: couldn't %sable ALLMULTI\n", 916 ifp->if_xname, onoff ? "en" : "dis"); 917 if (onoff == false) { 918 ifp->if_flags |= IFF_ALLMULTI; 919 } 920 } 921 922 onoff = ifp->if_flags & IFF_PROMISC ? true : false; 923 r = vioif_set_promisc(sc, onoff); 924 if (r != 0) { 925 log(LOG_WARNING, 926 "%s: couldn't %sable PROMISC\n", 927 ifp->if_xname, onoff ? "en" : "dis"); 928 if (onoff == false) { 929 ifp->if_flags |= IFF_PROMISC; 930 } 931 } 932 933 return 0; 934} 935 936static int 937vioif_ifflags_cb(struct ethercom *ec) 938{ 939 struct ifnet *ifp = &ec->ec_if; 940 struct vioif_softc *sc = ifp->if_softc; 941 942 return vioif_ifflags(sc); 943} 944 945static int 946vioif_setup_sysctl(struct vioif_softc *sc) 947{ 948 const char *devname; 949 struct sysctllog **log; 950 const struct sysctlnode *rnode, *rxnode, *txnode; 951 int error; 952 953 log = &sc->sc_sysctllog; 954 devname = device_xname(sc->sc_dev); 955 956 error = sysctl_createv(log, 0, NULL, &rnode, 957 0, CTLTYPE_NODE, devname, 958 SYSCTL_DESCR("virtio-net information and settings"), 959 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 960 if (error) 961 goto out; 962 963 error = sysctl_createv(log, 0, &rnode, NULL, 964 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue", 965 SYSCTL_DESCR("Use workqueue for packet processing"), 966 NULL, 0, &sc->sc_txrx_workqueue_sysctl, 0, CTL_CREATE, CTL_EOL); 967 if (error) 968 goto out; 969 970 error = sysctl_createv(log, 0, &rnode, &rxnode, 971 0, CTLTYPE_NODE, "rx", 972 SYSCTL_DESCR("virtio-net information and settings for Rx"), 973 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 974 if (error) 975 goto out; 976 977 error = sysctl_createv(log, 0, &rxnode, NULL, 978 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit", 979 SYSCTL_DESCR("max number of Rx packets to process for interrupt processing"), 980 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL); 981 if (error) 982 goto out; 983 984 error = sysctl_createv(log, 0, &rxnode, NULL, 985 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit", 986 SYSCTL_DESCR("max number of Rx packets to process for deferred processing"), 987 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL); 988 if (error) 989 goto out; 990 991 error = sysctl_createv(log, 0, &rnode, &txnode, 992 0, CTLTYPE_NODE, "tx", 993 SYSCTL_DESCR("virtio-net information and settings for Tx"), 994 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 995 if (error) 996 goto out; 997 998 error = sysctl_createv(log, 0, &txnode, NULL, 999 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit", 1000 SYSCTL_DESCR("max number of Tx packets to process for interrupt processing"), 1001 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL); 1002 if (error) 1003 goto out; 1004 1005 error = sysctl_createv(log, 0, &txnode, NULL, 1006 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit", 1007 SYSCTL_DESCR("max number of Tx packets to process for deferred processing"), 1008 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL); 1009 1010out: 1011 if (error) 1012 sysctl_teardown(log); 1013 1014 return error; 1015} 1016 1017static void 1018vioif_setup_stats(struct vioif_softc *sc) 1019{ 1020 struct vioif_netqueue *netq; 1021 struct vioif_tx_context *txc; 1022 struct vioif_rx_context *rxc; 1023 size_t i, netq_num; 1024 1025 netq_num = sc->sc_max_nvq_pairs * 2; 1026 for (i = 0; i < netq_num; i++) { 1027 netq = &sc->sc_netqs[i]; 1028 evcnt_attach_dynamic(&netq->netq_mbuf_load_failed, EVCNT_TYPE_MISC, 1029 NULL, netq->netq_evgroup, "failed to load mbuf to DMA"); 1030 evcnt_attach_dynamic(&netq->netq_enqueue_failed, 1031 EVCNT_TYPE_MISC, NULL, netq->netq_evgroup, 1032 "virtqueue enqueue failed failed"); 1033 1034 switch (VIOIF_NETQ_DIR(i)) { 1035 case VIOIF_NETQ_RX: 1036 rxc = netq->netq_ctx; 1037 evcnt_attach_dynamic(&rxc->rxc_mbuf_enobufs, 1038 EVCNT_TYPE_MISC, NULL, netq->netq_evgroup, 1039 "no receive buffer"); 1040 break; 1041 case VIOIF_NETQ_TX: 1042 txc = netq->netq_ctx; 1043 evcnt_attach_dynamic(&txc->txc_defrag_failed, 1044 EVCNT_TYPE_MISC, NULL, netq->netq_evgroup, 1045 "m_defrag() failed"); 1046 break; 1047 } 1048 } 1049 1050 evcnt_attach_dynamic(&sc->sc_ctrlq.ctrlq_cmd_load_failed, EVCNT_TYPE_MISC, 1051 NULL, device_xname(sc->sc_dev), "control command dmamap load failed"); 1052 evcnt_attach_dynamic(&sc->sc_ctrlq.ctrlq_cmd_failed, EVCNT_TYPE_MISC, 1053 NULL, device_xname(sc->sc_dev), "control command failed"); 1054} 1055 1056/* 1057 * allocate memory 1058 */ 1059static int 1060vioif_dmamap_create(struct vioif_softc *sc, bus_dmamap_t *map, 1061 bus_size_t size, int nsegs, const char *usage) 1062{ 1063 int r; 1064 1065 r = bus_dmamap_create(virtio_dmat(sc->sc_virtio), size, 1066 nsegs, size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, map); 1067 1068 if (r != 0) { 1069 aprint_error_dev(sc->sc_dev, "%s dmamap creation failed, " 1070 "error code %d\n", usage, r); 1071 } 1072 1073 return r; 1074} 1075 1076static void 1077vioif_dmamap_destroy(struct vioif_softc *sc, bus_dmamap_t *map) 1078{ 1079 1080 if (*map) { 1081 bus_dmamap_destroy(virtio_dmat(sc->sc_virtio), *map); 1082 *map = NULL; 1083 } 1084} 1085 1086static int 1087vioif_dmamap_create_load(struct vioif_softc *sc, bus_dmamap_t *map, 1088 void *buf, bus_size_t size, int nsegs, int rw, const char *usage) 1089{ 1090 int r; 1091 1092 r = vioif_dmamap_create(sc, map, size, nsegs, usage); 1093 if (r != 0) 1094 return 1; 1095 1096 r = bus_dmamap_load(virtio_dmat(sc->sc_virtio), *map, buf, 1097 size, NULL, rw | BUS_DMA_NOWAIT); 1098 if (r != 0) { 1099 vioif_dmamap_destroy(sc, map); 1100 aprint_error_dev(sc->sc_dev, "%s dmamap load failed. " 1101 "error code %d\n", usage, r); 1102 } 1103 1104 return r; 1105} 1106 1107static void * 1108vioif_assign_mem(intptr_t *p, size_t size) 1109{ 1110 intptr_t rv; 1111 1112 rv = *p; 1113 *p += size; 1114 1115 return (void *)rv; 1116} 1117 1118/* 1119 * dma memory is used for: 1120 * netq_maps_kva: metadata array for received frames (READ) and 1121 * sent frames (WRITE) 1122 * ctrlq_cmd: command to be sent via ctrl vq (WRITE) 1123 * ctrlq_status: return value for a command via ctrl vq (READ) 1124 * ctrlq_rx: parameter for a VIRTIO_NET_CTRL_RX class command 1125 * (WRITE) 1126 * ctrlq_mac_tbl_uc: unicast MAC address filter for a VIRTIO_NET_CTRL_MAC 1127 * class command (WRITE) 1128 * ctrlq_mac_tbl_mc: multicast MAC address filter for a VIRTIO_NET_CTRL_MAC 1129 * class command (WRITE) 1130 * ctrlq_* structures are allocated only one each; they are protected by 1131 * ctrlq_inuse variable and ctrlq_wait condvar. 1132 */ 1133static int 1134vioif_alloc_mems(struct vioif_softc *sc) 1135{ 1136 struct virtio_softc *vsc = sc->sc_virtio; 1137 struct vioif_netqueue *netq; 1138 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; 1139 struct vioif_net_map *maps; 1140 unsigned int vq_num; 1141 int r, rsegs; 1142 bus_size_t dmamemsize; 1143 size_t qid, i, netq_num, kmemsize; 1144 void *vaddr; 1145 intptr_t p; 1146 1147 netq_num = sc->sc_max_nvq_pairs * 2; 1148 1149 /* allocate DMA memory */ 1150 dmamemsize = 0; 1151 1152 for (qid = 0; qid < netq_num; qid++) { 1153 maps = sc->sc_netqs[qid].netq_maps; 1154 vq_num = sc->sc_netqs[qid].netq_vq->vq_num; 1155 dmamemsize += sizeof(*maps[0].vnm_hdr) * vq_num; 1156 } 1157 1158 if (sc->sc_has_ctrl) { 1159 dmamemsize += sizeof(struct virtio_net_ctrl_cmd); 1160 dmamemsize += sizeof(struct virtio_net_ctrl_status); 1161 dmamemsize += sizeof(struct virtio_net_ctrl_rx); 1162 dmamemsize += sizeof(struct virtio_net_ctrl_mac_tbl) 1163 + ETHER_ADDR_LEN; 1164 dmamemsize += sizeof(struct virtio_net_ctrl_mac_tbl) 1165 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES; 1166 dmamemsize += sizeof(struct virtio_net_ctrl_mac_addr); 1167 dmamemsize += sizeof(struct virtio_net_ctrl_mq); 1168 } 1169 1170 r = bus_dmamem_alloc(virtio_dmat(vsc), dmamemsize, 0, 0, 1171 &sc->sc_segs[0], 1, &rsegs, BUS_DMA_NOWAIT); 1172 if (r != 0) { 1173 aprint_error_dev(sc->sc_dev, 1174 "DMA memory allocation failed, size %zu, " 1175 "error code %d\n", dmamemsize, r); 1176 goto err_none; 1177 } 1178 r = bus_dmamem_map(virtio_dmat(vsc), &sc->sc_segs[0], 1, 1179 dmamemsize, &vaddr, BUS_DMA_NOWAIT); 1180 if (r != 0) { 1181 aprint_error_dev(sc->sc_dev, 1182 "DMA memory map failed, error code %d\n", r); 1183 goto err_dmamem_alloc; 1184 } 1185 1186 /* assign DMA memory */ 1187 memset(vaddr, 0, dmamemsize); 1188 sc->sc_dmamem = vaddr; 1189 p = (intptr_t) vaddr; 1190 1191 for (qid = 0; qid < netq_num; qid++) { 1192 netq = &sc->sc_netqs[qid]; 1193 maps = netq->netq_maps; 1194 vq_num = netq->netq_vq->vq_num; 1195 1196 netq->netq_maps_kva = vioif_assign_mem(&p, 1197 sizeof(*maps[0].vnm_hdr) * vq_num); 1198 } 1199 1200 if (sc->sc_has_ctrl) { 1201 ctrlq->ctrlq_cmd = vioif_assign_mem(&p, 1202 sizeof(*ctrlq->ctrlq_cmd)); 1203 ctrlq->ctrlq_status = vioif_assign_mem(&p, 1204 sizeof(*ctrlq->ctrlq_status)); 1205 ctrlq->ctrlq_rx = vioif_assign_mem(&p, 1206 sizeof(*ctrlq->ctrlq_rx)); 1207 ctrlq->ctrlq_mac_tbl_uc = vioif_assign_mem(&p, 1208 sizeof(*ctrlq->ctrlq_mac_tbl_uc) 1209 + ETHER_ADDR_LEN); 1210 ctrlq->ctrlq_mac_tbl_mc = vioif_assign_mem(&p, 1211 sizeof(*ctrlq->ctrlq_mac_tbl_mc) 1212 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES); 1213 ctrlq->ctrlq_mac_addr = vioif_assign_mem(&p, 1214 sizeof(*ctrlq->ctrlq_mac_addr)); 1215 ctrlq->ctrlq_mq = vioif_assign_mem(&p, sizeof(*ctrlq->ctrlq_mq)); 1216 } 1217 1218 /* allocate kmem */ 1219 kmemsize = 0; 1220 1221 for (qid = 0; qid < netq_num; qid++) { 1222 netq = &sc->sc_netqs[qid]; 1223 vq_num = netq->netq_vq->vq_num; 1224 1225 kmemsize += sizeof(netq->netq_maps[0]) * vq_num; 1226 } 1227 1228 vaddr = kmem_zalloc(kmemsize, KM_SLEEP); 1229 sc->sc_kmem = vaddr; 1230 1231 /* assign allocated kmem */ 1232 p = (intptr_t) vaddr; 1233 1234 for (qid = 0; qid < netq_num; qid++) { 1235 netq = &sc->sc_netqs[qid]; 1236 vq_num = netq->netq_vq->vq_num; 1237 1238 netq->netq_maps = vioif_assign_mem(&p, 1239 sizeof(netq->netq_maps[0]) * vq_num); 1240 } 1241 1242 /* prepare dmamaps */ 1243 for (qid = 0; qid < netq_num; qid++) { 1244 static const struct { 1245 const char *msg_hdr; 1246 const char *msg_payload; 1247 int dma_flag; 1248 bus_size_t dma_size; 1249 int dma_nsegs; 1250 } dmaparams[VIOIF_NETQ_IDX] = { 1251 [VIOIF_NETQ_RX] = { 1252 .msg_hdr = "rx header", 1253 .msg_payload = "rx payload", 1254 .dma_flag = BUS_DMA_READ, 1255 .dma_size = MCLBYTES - ETHER_ALIGN, 1256 .dma_nsegs = 1, 1257 }, 1258 [VIOIF_NETQ_TX] = { 1259 .msg_hdr = "tx header", 1260 .msg_payload = "tx payload", 1261 .dma_flag = BUS_DMA_WRITE, 1262 .dma_size = ETHER_MAX_LEN, 1263 .dma_nsegs = VIRTIO_NET_TX_MAXNSEGS, 1264 } 1265 }; 1266 1267 struct virtio_net_hdr *hdrs; 1268 int dir; 1269 1270 dir = VIOIF_NETQ_DIR(qid); 1271 netq = &sc->sc_netqs[qid]; 1272 vq_num = netq->netq_vq->vq_num; 1273 maps = netq->netq_maps; 1274 hdrs = netq->netq_maps_kva; 1275 1276 for (i = 0; i < vq_num; i++) { 1277 maps[i].vnm_hdr = &hdrs[i]; 1278 1279 r = vioif_dmamap_create_load(sc, &maps[i].vnm_hdr_map, 1280 maps[i].vnm_hdr, sc->sc_hdr_size, 1, 1281 dmaparams[dir].dma_flag, dmaparams[dir].msg_hdr); 1282 if (r != 0) 1283 goto err_reqs; 1284 1285 r = vioif_dmamap_create(sc, &maps[i].vnm_mbuf_map, 1286 dmaparams[dir].dma_size, dmaparams[dir].dma_nsegs, 1287 dmaparams[dir].msg_payload); 1288 if (r != 0) 1289 goto err_reqs; 1290 } 1291 } 1292 1293 if (sc->sc_has_ctrl) { 1294 /* control vq class & command */ 1295 r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_cmd_dmamap, 1296 ctrlq->ctrlq_cmd, sizeof(*ctrlq->ctrlq_cmd), 1, 1297 BUS_DMA_WRITE, "control command"); 1298 if (r != 0) 1299 goto err_reqs; 1300 1301 r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_status_dmamap, 1302 ctrlq->ctrlq_status, sizeof(*ctrlq->ctrlq_status), 1, 1303 BUS_DMA_READ, "control status"); 1304 if (r != 0) 1305 goto err_reqs; 1306 1307 /* control vq rx mode command parameter */ 1308 r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_rx_dmamap, 1309 ctrlq->ctrlq_rx, sizeof(*ctrlq->ctrlq_rx), 1, 1310 BUS_DMA_WRITE, "rx mode control command"); 1311 if (r != 0) 1312 goto err_reqs; 1313 1314 /* multiqueue set command */ 1315 r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_mq_dmamap, 1316 ctrlq->ctrlq_mq, sizeof(*ctrlq->ctrlq_mq), 1, 1317 BUS_DMA_WRITE, "multiqueue set command"); 1318 if (r != 0) 1319 goto err_reqs; 1320 1321 /* control vq MAC filter table for unicast */ 1322 /* do not load now since its length is variable */ 1323 r = vioif_dmamap_create(sc, &ctrlq->ctrlq_tbl_uc_dmamap, 1324 sizeof(*ctrlq->ctrlq_mac_tbl_uc) 1325 + ETHER_ADDR_LEN, 1, 1326 "unicast MAC address filter command"); 1327 if (r != 0) 1328 goto err_reqs; 1329 1330 /* control vq MAC filter table for multicast */ 1331 r = vioif_dmamap_create(sc, &ctrlq->ctrlq_tbl_mc_dmamap, 1332 sizeof(*ctrlq->ctrlq_mac_tbl_mc) 1333 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES, 1, 1334 "multicast MAC address filter command"); 1335 if (r != 0) 1336 goto err_reqs; 1337 1338 /* control vq MAC address set command */ 1339 r = vioif_dmamap_create_load(sc, 1340 &ctrlq->ctrlq_mac_addr_dmamap, 1341 ctrlq->ctrlq_mac_addr, 1342 sizeof(*ctrlq->ctrlq_mac_addr), 1, 1343 BUS_DMA_WRITE, "mac addr set command"); 1344 if (r != 0) 1345 goto err_reqs; 1346 } 1347 1348 return 0; 1349 1350err_reqs: 1351 vioif_dmamap_destroy(sc, &ctrlq->ctrlq_tbl_mc_dmamap); 1352 vioif_dmamap_destroy(sc, &ctrlq->ctrlq_tbl_uc_dmamap); 1353 vioif_dmamap_destroy(sc, &ctrlq->ctrlq_rx_dmamap); 1354 vioif_dmamap_destroy(sc, &ctrlq->ctrlq_status_dmamap); 1355 vioif_dmamap_destroy(sc, &ctrlq->ctrlq_cmd_dmamap); 1356 vioif_dmamap_destroy(sc, &ctrlq->ctrlq_mac_addr_dmamap); 1357 for (qid = 0; qid < netq_num; qid++) { 1358 vq_num = sc->sc_netqs[qid].netq_vq->vq_num; 1359 maps = sc->sc_netqs[qid].netq_maps; 1360 1361 for (i = 0; i < vq_num; i++) { 1362 vioif_dmamap_destroy(sc, &maps[i].vnm_mbuf_map); 1363 vioif_dmamap_destroy(sc, &maps[i].vnm_hdr_map); 1364 } 1365 } 1366 if (sc->sc_kmem) { 1367 kmem_free(sc->sc_kmem, kmemsize); 1368 sc->sc_kmem = NULL; 1369 } 1370 bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_dmamem, dmamemsize); 1371err_dmamem_alloc: 1372 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_segs[0], 1); 1373err_none: 1374 return -1; 1375} 1376 1377static void 1378vioif_alloc_queues(struct vioif_softc *sc) 1379{ 1380 int nvq_pairs = sc->sc_max_nvq_pairs; 1381 size_t nvqs, netq_num; 1382 1383 KASSERT(nvq_pairs <= VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX); 1384 1385 nvqs = netq_num = sc->sc_max_nvq_pairs * 2; 1386 if (sc->sc_has_ctrl) 1387 nvqs++; 1388 1389 sc->sc_vqs = kmem_zalloc(sizeof(sc->sc_vqs[0]) * nvqs, KM_SLEEP); 1390 sc->sc_netqs = kmem_zalloc(sizeof(sc->sc_vqs[0]) * netq_num, 1391 KM_SLEEP); 1392} 1393 1394static void 1395vioif_free_queues(struct vioif_softc *sc) 1396{ 1397 size_t nvqs, netq_num; 1398 1399 nvqs = netq_num = sc->sc_max_nvq_pairs * 2; 1400 if (sc->sc_ctrlq.ctrlq_vq) 1401 nvqs++; 1402 1403 kmem_free(sc->sc_netqs, sizeof(sc->sc_netqs[0]) * netq_num); 1404 kmem_free(sc->sc_vqs, sizeof(sc->sc_vqs[0]) * nvqs); 1405 sc->sc_netqs = NULL; 1406 sc->sc_vqs = NULL; 1407} 1408 1409/* 1410 * Network queues 1411 */ 1412static int 1413vioif_netqueue_init(struct vioif_softc *sc, struct virtio_softc *vsc, 1414 size_t qid, u_int softint_flags) 1415{ 1416 static const struct { 1417 const char *dirname; 1418 int segsize; 1419 int nsegs; 1420 int (*intrhand)(void *); 1421 void (*sihand)(void *); 1422 } params[VIOIF_NETQ_IDX] = { 1423 [VIOIF_NETQ_RX] = { 1424 .dirname = "rx", 1425 .segsize = MCLBYTES, 1426 .nsegs = 2, 1427 .intrhand = vioif_rx_intr, 1428 .sihand = vioif_rx_handle, 1429 }, 1430 [VIOIF_NETQ_TX] = { 1431 .dirname = "tx", 1432 .segsize = ETHER_MAX_LEN - ETHER_HDR_LEN, 1433 .nsegs = 2, 1434 .intrhand = vioif_tx_intr, 1435 .sihand = vioif_tx_handle, 1436 } 1437 }; 1438 1439 struct virtqueue *vq; 1440 struct vioif_netqueue *netq; 1441 struct vioif_tx_context *txc; 1442 struct vioif_rx_context *rxc; 1443 char qname[32]; 1444 int r, dir; 1445 1446 txc = NULL; 1447 rxc = NULL; 1448 netq = &sc->sc_netqs[qid]; 1449 vq = &sc->sc_vqs[qid]; 1450 dir = VIOIF_NETQ_DIR(qid); 1451 1452 netq->netq_vq = &sc->sc_vqs[qid]; 1453 netq->netq_stopping = false; 1454 netq->netq_running_handle = false; 1455 1456 snprintf(qname, sizeof(qname), "%s%zu", 1457 params[dir].dirname, VIOIF_NETQ_PAIRIDX(qid)); 1458 snprintf(netq->netq_evgroup, sizeof(netq->netq_evgroup), 1459 "%s-%s", device_xname(sc->sc_dev), qname); 1460 1461 mutex_init(&netq->netq_lock, MUTEX_DEFAULT, IPL_NET); 1462 r = virtio_alloc_vq(vsc, vq, qid, 1463 params[dir].segsize + sc->sc_hdr_size, 1464 params[dir].nsegs, qname); 1465 if (r != 0) 1466 goto err; 1467 netq->netq_vq = vq; 1468 1469 netq->netq_vq->vq_intrhand = params[dir].intrhand; 1470 netq->netq_vq->vq_intrhand_arg = netq; 1471 netq->netq_softint = softint_establish(softint_flags, 1472 params[dir].sihand, netq); 1473 if (netq->netq_softint == NULL) { 1474 aprint_error_dev(sc->sc_dev, 1475 "couldn't establish %s softint\n", 1476 params[dir].dirname); 1477 goto err; 1478 } 1479 vioif_work_set(&netq->netq_work, params[dir].sihand, netq); 1480 1481 switch (dir) { 1482 case VIOIF_NETQ_RX: 1483 rxc = kmem_zalloc(sizeof(*rxc), KM_SLEEP); 1484 netq->netq_ctx = rxc; 1485 /* nothing to do */ 1486 break; 1487 case VIOIF_NETQ_TX: 1488 txc = kmem_zalloc(sizeof(*txc), KM_SLEEP); 1489 netq->netq_ctx = (void *)txc; 1490 txc->txc_deferred_transmit = softint_establish(softint_flags, 1491 vioif_deferred_transmit, netq); 1492 if (txc->txc_deferred_transmit == NULL) { 1493 aprint_error_dev(sc->sc_dev, 1494 "couldn't establish softint for " 1495 "tx deferred transmit\n"); 1496 goto err; 1497 } 1498 txc->txc_link_active = VIOIF_IS_LINK_ACTIVE(sc); 1499 txc->txc_intrq = pcq_create(vq->vq_num, KM_SLEEP); 1500 break; 1501 } 1502 1503 return 0; 1504 1505err: 1506 netq->netq_ctx = NULL; 1507 1508 if (rxc != NULL) { 1509 kmem_free(rxc, sizeof(*rxc)); 1510 } 1511 1512 if (txc != NULL) { 1513 if (txc->txc_deferred_transmit != NULL) 1514 softint_disestablish(txc->txc_deferred_transmit); 1515 if (txc->txc_intrq != NULL) 1516 pcq_destroy(txc->txc_intrq); 1517 kmem_free(txc, sizeof(txc)); 1518 } 1519 1520 vioif_work_set(&netq->netq_work, NULL, NULL); 1521 if (netq->netq_softint != NULL) { 1522 softint_disestablish(netq->netq_softint); 1523 netq->netq_softint = NULL; 1524 } 1525 netq->netq_vq->vq_intrhand = NULL; 1526 netq->netq_vq->vq_intrhand_arg = NULL; 1527 1528 virtio_free_vq(vsc, vq); 1529 mutex_destroy(&netq->netq_lock); 1530 netq->netq_vq = NULL; 1531 1532 return -1; 1533} 1534 1535static void 1536vioif_netqueue_teardown(struct vioif_softc *sc, struct virtio_softc *vsc, 1537 size_t qid) 1538{ 1539 struct vioif_netqueue *netq; 1540 struct vioif_rx_context *rxc; 1541 struct vioif_tx_context *txc; 1542 int dir; 1543 1544 netq = &sc->sc_netqs[qid]; 1545 1546 if (netq->netq_vq == NULL) 1547 return; 1548 1549 netq = &sc->sc_netqs[qid]; 1550 dir = VIOIF_NETQ_DIR(qid); 1551 switch (dir) { 1552 case VIOIF_NETQ_RX: 1553 rxc = netq->netq_ctx; 1554 netq->netq_ctx = NULL; 1555 kmem_free(rxc, sizeof(*rxc)); 1556 break; 1557 case VIOIF_NETQ_TX: 1558 txc = netq->netq_ctx; 1559 netq->netq_ctx = NULL; 1560 softint_disestablish(txc->txc_deferred_transmit); 1561 pcq_destroy(txc->txc_intrq); 1562 kmem_free(txc, sizeof(*txc)); 1563 break; 1564 } 1565 1566 softint_disestablish(netq->netq_softint); 1567 virtio_free_vq(vsc, netq->netq_vq); 1568 mutex_destroy(&netq->netq_lock); 1569 netq->netq_vq = NULL; 1570} 1571 1572static void 1573vioif_net_sched_handle(struct vioif_softc *sc, struct vioif_netqueue *netq) 1574{ 1575 1576 KASSERT(mutex_owned(&netq->netq_lock)); 1577 KASSERT(!netq->netq_stopping); 1578 1579 if (netq->netq_workqueue) { 1580 vioif_work_add(sc->sc_txrx_workqueue, &netq->netq_work); 1581 } else { 1582 softint_schedule(netq->netq_softint); 1583 } 1584} 1585 1586static int 1587vioif_net_load_mbuf(struct virtio_softc *vsc, struct vioif_net_map *map, 1588 struct mbuf *m, int dma_flags) 1589{ 1590 int r; 1591 1592 KASSERT(map->vnm_mbuf == NULL); 1593 1594 r = bus_dmamap_load_mbuf(virtio_dmat(vsc), 1595 map->vnm_mbuf_map, m, dma_flags | BUS_DMA_NOWAIT); 1596 if (r == 0) { 1597 map->vnm_mbuf = m; 1598 } 1599 1600 return r; 1601} 1602 1603static void 1604vioif_net_unload_mbuf(struct virtio_softc *vsc, struct vioif_net_map *map) 1605{ 1606 1607 KASSERT(map->vnm_mbuf != NULL); 1608 bus_dmamap_unload(virtio_dmat(vsc), map->vnm_mbuf_map); 1609 map->vnm_mbuf = NULL; 1610} 1611 1612static int 1613vioif_net_enqueue(struct virtio_softc *vsc, struct virtqueue *vq, 1614 int slot, struct vioif_net_map *map, int dma_ops, bool is_write) 1615{ 1616 int r; 1617 1618 KASSERT(map->vnm_mbuf != NULL); 1619 1620 /* This should actually never fail */ 1621 r = virtio_enqueue_reserve(vsc, vq, slot, 1622 map->vnm_mbuf_map->dm_nsegs + 1); 1623 if (r != 0) { 1624 /* slot already freed by virtio_enqueue_reserve */ 1625 return r; 1626 } 1627 1628 bus_dmamap_sync(virtio_dmat(vsc), map->vnm_mbuf_map, 1629 0, map->vnm_mbuf_map->dm_mapsize, dma_ops); 1630 bus_dmamap_sync(virtio_dmat(vsc), map->vnm_hdr_map, 1631 0, map->vnm_hdr_map->dm_mapsize, dma_ops); 1632 1633 virtio_enqueue(vsc, vq, slot, map->vnm_hdr_map, is_write); 1634 virtio_enqueue(vsc, vq, slot, map->vnm_mbuf_map, is_write); 1635 virtio_enqueue_commit(vsc, vq, slot, false); 1636 1637 return 0; 1638} 1639 1640static int 1641vioif_net_enqueue_tx(struct virtio_softc *vsc, struct virtqueue *vq, 1642 int slot, struct vioif_net_map *map) 1643{ 1644 1645 return vioif_net_enqueue(vsc, vq, slot, map, 1646 BUS_DMASYNC_PREWRITE, true); 1647} 1648 1649static int 1650vioif_net_enqueue_rx(struct virtio_softc *vsc, struct virtqueue *vq, 1651 int slot, struct vioif_net_map *map) 1652{ 1653 1654 return vioif_net_enqueue(vsc, vq, slot, map, 1655 BUS_DMASYNC_PREREAD, false); 1656} 1657 1658static struct mbuf * 1659vioif_net_dequeue_commit(struct virtio_softc *vsc, struct virtqueue *vq, 1660 int slot, struct vioif_net_map *map, int dma_flags) 1661{ 1662 struct mbuf *m; 1663 1664 m = map->vnm_mbuf; 1665 KASSERT(m != NULL); 1666 map->vnm_mbuf = NULL; 1667 1668 bus_dmamap_sync(virtio_dmat(vsc), map->vnm_hdr_map, 1669 0, map->vnm_hdr_map->dm_mapsize, dma_flags); 1670 bus_dmamap_sync(virtio_dmat(vsc), map->vnm_mbuf_map, 1671 0, map->vnm_mbuf_map->dm_mapsize, dma_flags); 1672 1673 bus_dmamap_unload(virtio_dmat(vsc), map->vnm_mbuf_map); 1674 virtio_dequeue_commit(vsc, vq, slot); 1675 1676 return m; 1677} 1678 1679static void 1680vioif_net_intr_enable(struct vioif_softc *sc, struct virtio_softc *vsc) 1681{ 1682 struct vioif_netqueue *netq; 1683 size_t i, act_qnum; 1684 int enqueued; 1685 1686 act_qnum = sc->sc_act_nvq_pairs * 2; 1687 for (i = 0; i < act_qnum; i++) { 1688 netq = &sc->sc_netqs[i]; 1689 1690 KASSERT(!netq->netq_stopping); 1691 KASSERT(!netq->netq_running_handle); 1692 1693 enqueued = virtio_start_vq_intr(vsc, netq->netq_vq); 1694 if (enqueued != 0) { 1695 virtio_stop_vq_intr(vsc, netq->netq_vq); 1696 1697 mutex_enter(&netq->netq_lock); 1698 netq->netq_running_handle = true; 1699 vioif_net_sched_handle(sc, netq); 1700 mutex_exit(&netq->netq_lock); 1701 } 1702 } 1703} 1704 1705static void 1706vioif_net_intr_disable(struct vioif_softc *sc, struct virtio_softc *vsc) 1707{ 1708 struct vioif_netqueue *netq; 1709 size_t i, act_qnum; 1710 1711 act_qnum = sc->sc_act_nvq_pairs * 2; 1712 for (i = 0; i < act_qnum; i++) { 1713 netq = &sc->sc_netqs[i]; 1714 1715 virtio_stop_vq_intr(vsc, netq->netq_vq); 1716 } 1717} 1718 1719/* 1720 * Receive implementation 1721 */ 1722/* enqueue mbufs to receive slots */ 1723static void 1724vioif_populate_rx_mbufs_locked(struct vioif_softc *sc, struct vioif_netqueue *netq) 1725{ 1726 struct virtqueue *vq = netq->netq_vq; 1727 struct virtio_softc *vsc = vq->vq_owner; 1728 struct vioif_rx_context *rxc; 1729 struct vioif_net_map *map; 1730 struct mbuf *m; 1731 int i, r, ndone = 0; 1732 1733 KASSERT(mutex_owned(&netq->netq_lock)); 1734 1735 rxc = netq->netq_ctx; 1736 1737 for (i = 0; i < vq->vq_num; i++) { 1738 int slot; 1739 r = virtio_enqueue_prep(vsc, vq, &slot); 1740 if (r == EAGAIN) 1741 break; 1742 if (__predict_false(r != 0)) 1743 panic("enqueue_prep for rx buffers"); 1744 1745 MGETHDR(m, M_DONTWAIT, MT_DATA); 1746 if (m == NULL) { 1747 virtio_enqueue_abort(vsc, vq, slot); 1748 rxc->rxc_mbuf_enobufs.ev_count++; 1749 break; 1750 } 1751 MCLGET(m, M_DONTWAIT); 1752 if ((m->m_flags & M_EXT) == 0) { 1753 virtio_enqueue_abort(vsc, vq, slot); 1754 m_freem(m); 1755 rxc->rxc_mbuf_enobufs.ev_count++; 1756 break; 1757 } 1758 1759 m->m_len = m->m_pkthdr.len = MCLBYTES; 1760 m_adj(m, ETHER_ALIGN); 1761 1762 map = &netq->netq_maps[slot]; 1763 r = vioif_net_load_mbuf(vsc, map, m, BUS_DMA_READ); 1764 if (r != 0) { 1765 virtio_enqueue_abort(vsc, vq, slot); 1766 m_freem(m); 1767 netq->netq_mbuf_load_failed.ev_count++; 1768 break; 1769 } 1770 1771 r = vioif_net_enqueue_rx(vsc, vq, slot, map); 1772 if (r != 0) { 1773 vioif_net_unload_mbuf(vsc, map); 1774 netq->netq_enqueue_failed.ev_count++; 1775 m_freem(m); 1776 /* slot already freed by vioif_net_enqueue_rx */ 1777 break; 1778 } 1779 1780 ndone++; 1781 } 1782 1783 if (ndone > 0) 1784 vioif_notify(vsc, vq); 1785} 1786 1787/* dequeue received packets */ 1788static bool 1789vioif_rx_deq_locked(struct vioif_softc *sc, struct virtio_softc *vsc, 1790 struct vioif_netqueue *netq, u_int limit, size_t *ndeqp) 1791{ 1792 struct virtqueue *vq = netq->netq_vq; 1793 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1794 struct vioif_net_map *map; 1795 struct mbuf *m; 1796 int slot, len; 1797 bool more; 1798 size_t ndeq; 1799 1800 KASSERT(mutex_owned(&netq->netq_lock)); 1801 1802 more = false; 1803 ndeq = 0; 1804 1805 if (virtio_vq_is_enqueued(vsc, vq) == false) 1806 goto done; 1807 1808 for (;;ndeq++) { 1809 if (ndeq >= limit) { 1810 more = true; 1811 break; 1812 } 1813 1814 if (virtio_dequeue(vsc, vq, &slot, &len) != 0) 1815 break; 1816 1817 map = &netq->netq_maps[slot]; 1818 KASSERT(map->vnm_mbuf != NULL); 1819 m = vioif_net_dequeue_commit(vsc, vq, slot, 1820 map, BUS_DMASYNC_POSTREAD); 1821 KASSERT(m != NULL); 1822 1823 m->m_len = m->m_pkthdr.len = len - sc->sc_hdr_size; 1824 m_set_rcvif(m, ifp); 1825 if_percpuq_enqueue(ifp->if_percpuq, m); 1826 } 1827 1828done: 1829 if (ndeqp != NULL) 1830 *ndeqp = ndeq; 1831 1832 return more; 1833} 1834 1835static void 1836vioif_rx_queue_clear(struct vioif_softc *sc, struct virtio_softc *vsc, 1837 struct vioif_netqueue *netq) 1838{ 1839 struct vioif_net_map *map; 1840 struct mbuf *m; 1841 unsigned int i, vq_num; 1842 bool more; 1843 1844 mutex_enter(&netq->netq_lock); 1845 1846 vq_num = netq->netq_vq->vq_num; 1847 for (;;) { 1848 more = vioif_rx_deq_locked(sc, vsc, netq, vq_num, NULL); 1849 if (more == false) 1850 break; 1851 } 1852 1853 for (i = 0; i < vq_num; i++) { 1854 map = &netq->netq_maps[i]; 1855 1856 m = map->vnm_mbuf; 1857 if (m == NULL) 1858 continue; 1859 1860 vioif_net_unload_mbuf(vsc, map); 1861 m_freem(m); 1862 } 1863 mutex_exit(&netq->netq_lock); 1864} 1865 1866static void 1867vioif_rx_handle_locked(void *xnetq, u_int limit) 1868{ 1869 struct vioif_netqueue *netq = xnetq; 1870 struct virtqueue *vq = netq->netq_vq; 1871 struct virtio_softc *vsc = vq->vq_owner; 1872 struct vioif_softc *sc = device_private(virtio_child(vsc)); 1873 bool more; 1874 int enqueued; 1875 size_t ndeq; 1876 1877 KASSERT(mutex_owned(&netq->netq_lock)); 1878 KASSERT(!netq->netq_stopping); 1879 1880 more = vioif_rx_deq_locked(sc, vsc, netq, limit, &ndeq); 1881 if (ndeq > 0) 1882 vioif_populate_rx_mbufs_locked(sc, netq); 1883 1884 if (more) { 1885 vioif_net_sched_handle(sc, netq); 1886 return; 1887 } 1888 1889 enqueued = virtio_start_vq_intr(vsc, netq->netq_vq); 1890 if (enqueued != 0) { 1891 virtio_stop_vq_intr(vsc, netq->netq_vq); 1892 vioif_net_sched_handle(sc, netq); 1893 return; 1894 } 1895 1896 netq->netq_running_handle = false; 1897} 1898 1899static int 1900vioif_rx_intr(void *arg) 1901{ 1902 struct vioif_netqueue *netq = arg; 1903 struct virtqueue *vq = netq->netq_vq; 1904 struct virtio_softc *vsc = vq->vq_owner; 1905 struct vioif_softc *sc = device_private(virtio_child(vsc)); 1906 u_int limit; 1907 1908 mutex_enter(&netq->netq_lock); 1909 1910 /* handler is already running in softint/workqueue */ 1911 if (netq->netq_running_handle) 1912 goto done; 1913 1914 netq->netq_running_handle = true; 1915 1916 limit = sc->sc_rx_intr_process_limit; 1917 virtio_stop_vq_intr(vsc, vq); 1918 vioif_rx_handle_locked(netq, limit); 1919 1920done: 1921 mutex_exit(&netq->netq_lock); 1922 return 1; 1923} 1924 1925static void 1926vioif_rx_handle(void *xnetq) 1927{ 1928 struct vioif_netqueue *netq = xnetq; 1929 struct virtqueue *vq = netq->netq_vq; 1930 struct virtio_softc *vsc = vq->vq_owner; 1931 struct vioif_softc *sc = device_private(virtio_child(vsc)); 1932 u_int limit; 1933 1934 mutex_enter(&netq->netq_lock); 1935 1936 KASSERT(netq->netq_running_handle); 1937 1938 if (netq->netq_stopping) { 1939 netq->netq_running_handle = false; 1940 goto done; 1941 } 1942 1943 limit = sc->sc_rx_process_limit; 1944 vioif_rx_handle_locked(netq, limit); 1945 1946done: 1947 mutex_exit(&netq->netq_lock); 1948} 1949 1950/* 1951 * Transmition implementation 1952 */ 1953/* enqueue mbufs to send */ 1954static void 1955vioif_send_common_locked(struct ifnet *ifp, struct vioif_netqueue *netq, 1956 bool is_transmit) 1957{ 1958 struct vioif_softc *sc = ifp->if_softc; 1959 struct virtio_softc *vsc = sc->sc_virtio; 1960 struct virtqueue *vq = netq->netq_vq; 1961 struct vioif_tx_context *txc; 1962 struct vioif_net_map *map; 1963 struct mbuf *m; 1964 int queued = 0; 1965 1966 KASSERT(mutex_owned(&netq->netq_lock)); 1967 1968 if (netq->netq_stopping || 1969 !ISSET(ifp->if_flags, IFF_RUNNING)) 1970 return; 1971 1972 txc = netq->netq_ctx; 1973 1974 if (!txc->txc_link_active) 1975 return; 1976 1977 if (!is_transmit && 1978 ISSET(ifp->if_flags, IFF_OACTIVE)) 1979 return; 1980 1981 for (;;) { 1982 int slot, r; 1983 r = virtio_enqueue_prep(vsc, vq, &slot); 1984 if (r == EAGAIN) 1985 break; 1986 if (__predict_false(r != 0)) 1987 panic("enqueue_prep for tx buffers"); 1988 1989 if (is_transmit) 1990 m = pcq_get(txc->txc_intrq); 1991 else 1992 IFQ_DEQUEUE(&ifp->if_snd, m); 1993 1994 if (m == NULL) { 1995 virtio_enqueue_abort(vsc, vq, slot); 1996 break; 1997 } 1998 1999 map = &netq->netq_maps[slot]; 2000 KASSERT(map->vnm_mbuf == NULL); 2001 2002 r = vioif_net_load_mbuf(vsc, map, m, BUS_DMA_WRITE); 2003 if (r != 0) { 2004 /* maybe just too fragmented */ 2005 struct mbuf *newm; 2006 2007 newm = m_defrag(m, M_NOWAIT); 2008 if (newm != NULL) { 2009 m = newm; 2010 r = vioif_net_load_mbuf(vsc, map, m, 2011 BUS_DMA_WRITE); 2012 } else { 2013 txc->txc_defrag_failed.ev_count++; 2014 r = -1; 2015 } 2016 2017 if (r != 0) { 2018 netq->netq_mbuf_load_failed.ev_count++; 2019 m_freem(m); 2020 if_statinc(ifp, if_oerrors); 2021 virtio_enqueue_abort(vsc, vq, slot); 2022 continue; 2023 } 2024 } 2025 2026 memset(map->vnm_hdr, 0, sc->sc_hdr_size); 2027 2028 r = vioif_net_enqueue_tx(vsc, vq, slot, map); 2029 if (r != 0) { 2030 netq->netq_enqueue_failed.ev_count++; 2031 vioif_net_unload_mbuf(vsc, map); 2032 m_freem(m); 2033 /* slot already freed by vioif_net_enqueue_tx */ 2034 2035 if_statinc(ifp, if_oerrors); 2036 continue; 2037 } 2038 2039 queued++; 2040 bpf_mtap(ifp, m, BPF_D_OUT); 2041 } 2042 2043 if (queued > 0) { 2044 vioif_notify(vsc, vq); 2045 ifp->if_timer = 5; 2046 } 2047} 2048 2049/* dequeue sent mbufs */ 2050static bool 2051vioif_tx_deq_locked(struct vioif_softc *sc, struct virtio_softc *vsc, 2052 struct vioif_netqueue *netq, u_int limit) 2053{ 2054 struct virtqueue *vq = netq->netq_vq; 2055 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2056 struct vioif_net_map *map; 2057 struct mbuf *m; 2058 int slot, len; 2059 bool more = false; 2060 2061 KASSERT(mutex_owned(&netq->netq_lock)); 2062 2063 if (virtio_vq_is_enqueued(vsc, vq) == false) 2064 return false; 2065 2066 for (;;) { 2067 if (limit-- == 0) { 2068 more = true; 2069 break; 2070 } 2071 2072 if (virtio_dequeue(vsc, vq, &slot, &len) != 0) 2073 break; 2074 2075 map = &netq->netq_maps[slot]; 2076 KASSERT(map->vnm_mbuf != NULL); 2077 m = vioif_net_dequeue_commit(vsc, vq, slot, 2078 map, BUS_DMASYNC_POSTWRITE); 2079 KASSERT(m != NULL); 2080 2081 if_statinc(ifp, if_opackets); 2082 m_freem(m); 2083 } 2084 2085 return more; 2086} 2087 2088static void 2089vioif_tx_queue_clear(struct vioif_softc *sc, struct virtio_softc *vsc, 2090 struct vioif_netqueue *netq) 2091{ 2092 struct vioif_net_map *map; 2093 struct mbuf *m; 2094 unsigned int i, vq_num; 2095 bool more; 2096 2097 mutex_enter(&netq->netq_lock); 2098 2099 vq_num = netq->netq_vq->vq_num; 2100 for (;;) { 2101 more = vioif_tx_deq_locked(sc, vsc, netq, vq_num); 2102 if (more == false) 2103 break; 2104 } 2105 2106 for (i = 0; i < vq_num; i++) { 2107 map = &netq->netq_maps[i]; 2108 2109 m = map->vnm_mbuf; 2110 if (m == NULL) 2111 continue; 2112 2113 vioif_net_unload_mbuf(vsc, map); 2114 m_freem(m); 2115 } 2116 mutex_exit(&netq->netq_lock); 2117} 2118 2119static void 2120vioif_start_locked(struct ifnet *ifp, struct vioif_netqueue *netq) 2121{ 2122 2123 /* 2124 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c. 2125 */ 2126 vioif_send_common_locked(ifp, netq, false); 2127 2128} 2129 2130static void 2131vioif_transmit_locked(struct ifnet *ifp, struct vioif_netqueue *netq) 2132{ 2133 2134 vioif_send_common_locked(ifp, netq, true); 2135} 2136 2137static void 2138vioif_deferred_transmit(void *arg) 2139{ 2140 struct vioif_netqueue *netq = arg; 2141 struct virtio_softc *vsc = netq->netq_vq->vq_owner; 2142 struct vioif_softc *sc = device_private(virtio_child(vsc)); 2143 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2144 2145 mutex_enter(&netq->netq_lock); 2146 vioif_send_common_locked(ifp, netq, true); 2147 mutex_exit(&netq->netq_lock); 2148} 2149 2150static void 2151vioif_tx_handle_locked(struct vioif_netqueue *netq, u_int limit) 2152{ 2153 struct virtqueue *vq = netq->netq_vq; 2154 struct vioif_tx_context *txc = netq->netq_ctx; 2155 struct virtio_softc *vsc = vq->vq_owner; 2156 struct vioif_softc *sc = device_private(virtio_child(vsc)); 2157 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2158 bool more; 2159 int enqueued; 2160 2161 KASSERT(mutex_owned(&netq->netq_lock)); 2162 KASSERT(!netq->netq_stopping); 2163 2164 more = vioif_tx_deq_locked(sc, vsc, netq, limit); 2165 if (more) { 2166 vioif_net_sched_handle(sc, netq); 2167 return; 2168 } 2169 2170 enqueued = (virtio_features(vsc) & VIRTIO_F_RING_EVENT_IDX) ? 2171 virtio_postpone_intr_smart(vsc, vq): 2172 virtio_start_vq_intr(vsc, vq); 2173 if (enqueued != 0) { 2174 virtio_stop_vq_intr(vsc, vq); 2175 vioif_net_sched_handle(sc, netq); 2176 return; 2177 } 2178 2179 netq->netq_running_handle = false; 2180 2181 /* for ALTQ */ 2182 if (netq == &sc->sc_netqs[VIOIF_NETQ_TXQID(0)]) { 2183 if_schedule_deferred_start(ifp); 2184 ifp->if_flags &= ~IFF_OACTIVE; 2185 } 2186 softint_schedule(txc->txc_deferred_transmit); 2187} 2188 2189static int 2190vioif_tx_intr(void *arg) 2191{ 2192 struct vioif_netqueue *netq = arg; 2193 struct virtqueue *vq = netq->netq_vq; 2194 struct virtio_softc *vsc = vq->vq_owner; 2195 struct vioif_softc *sc = device_private(virtio_child(vsc)); 2196 u_int limit; 2197 2198 mutex_enter(&netq->netq_lock); 2199 2200 /* tx handler is already running in softint/workqueue */ 2201 if (netq->netq_running_handle) 2202 goto done; 2203 2204 if (netq->netq_stopping) 2205 goto done; 2206 2207 netq->netq_running_handle = true; 2208 2209 virtio_stop_vq_intr(vsc, vq); 2210 netq->netq_workqueue = sc->sc_txrx_workqueue_sysctl; 2211 limit = sc->sc_tx_intr_process_limit; 2212 vioif_tx_handle_locked(netq, limit); 2213 2214done: 2215 mutex_exit(&netq->netq_lock); 2216 return 1; 2217} 2218 2219static void 2220vioif_tx_handle(void *xnetq) 2221{ 2222 struct vioif_netqueue *netq = xnetq; 2223 struct virtqueue *vq = netq->netq_vq; 2224 struct virtio_softc *vsc = vq->vq_owner; 2225 struct vioif_softc *sc = device_private(virtio_child(vsc)); 2226 u_int limit; 2227 2228 mutex_enter(&netq->netq_lock); 2229 2230 KASSERT(netq->netq_running_handle); 2231 2232 if (netq->netq_stopping) { 2233 netq->netq_running_handle = false; 2234 goto done; 2235 } 2236 2237 limit = sc->sc_tx_process_limit; 2238 vioif_tx_handle_locked(netq, limit); 2239 2240done: 2241 mutex_exit(&netq->netq_lock); 2242} 2243 2244/* 2245 * Control vq 2246 */ 2247/* issue a VIRTIO_NET_CTRL_RX class command and wait for completion */ 2248static void 2249vioif_ctrl_acquire(struct vioif_softc *sc) 2250{ 2251 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; 2252 2253 mutex_enter(&ctrlq->ctrlq_wait_lock); 2254 while (ctrlq->ctrlq_inuse != FREE) 2255 cv_wait(&ctrlq->ctrlq_wait, &ctrlq->ctrlq_wait_lock); 2256 ctrlq->ctrlq_inuse = INUSE; 2257 ctrlq->ctrlq_owner = curlwp; 2258 mutex_exit(&ctrlq->ctrlq_wait_lock); 2259} 2260 2261static void 2262vioif_ctrl_release(struct vioif_softc *sc) 2263{ 2264 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; 2265 2266 KASSERT(ctrlq->ctrlq_inuse != FREE); 2267 KASSERT(ctrlq->ctrlq_owner == curlwp); 2268 2269 mutex_enter(&ctrlq->ctrlq_wait_lock); 2270 ctrlq->ctrlq_inuse = FREE; 2271 ctrlq->ctrlq_owner = NULL; 2272 cv_signal(&ctrlq->ctrlq_wait); 2273 mutex_exit(&ctrlq->ctrlq_wait_lock); 2274} 2275 2276static int 2277vioif_ctrl_load_cmdspec(struct vioif_softc *sc, 2278 struct vioif_ctrl_cmdspec *specs, int nspecs) 2279{ 2280 struct virtio_softc *vsc = sc->sc_virtio; 2281 int i, r, loaded; 2282 2283 loaded = 0; 2284 for (i = 0; i < nspecs; i++) { 2285 r = bus_dmamap_load(virtio_dmat(vsc), 2286 specs[i].dmamap, specs[i].buf, specs[i].bufsize, 2287 NULL, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 2288 if (r) { 2289 sc->sc_ctrlq.ctrlq_cmd_load_failed.ev_count++; 2290 goto err; 2291 } 2292 loaded++; 2293 2294 } 2295 2296 return r; 2297 2298err: 2299 for (i = 0; i < loaded; i++) { 2300 bus_dmamap_unload(virtio_dmat(vsc), specs[i].dmamap); 2301 } 2302 2303 return r; 2304} 2305 2306static void 2307vioif_ctrl_unload_cmdspec(struct vioif_softc *sc, 2308 struct vioif_ctrl_cmdspec *specs, int nspecs) 2309{ 2310 struct virtio_softc *vsc = sc->sc_virtio; 2311 int i; 2312 2313 for (i = 0; i < nspecs; i++) { 2314 bus_dmamap_unload(virtio_dmat(vsc), specs[i].dmamap); 2315 } 2316} 2317 2318static int 2319vioif_ctrl_send_command(struct vioif_softc *sc, uint8_t class, uint8_t cmd, 2320 struct vioif_ctrl_cmdspec *specs, int nspecs) 2321{ 2322 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; 2323 struct virtqueue *vq = ctrlq->ctrlq_vq; 2324 struct virtio_softc *vsc = sc->sc_virtio; 2325 int i, r, slot; 2326 2327 ctrlq->ctrlq_cmd->class = class; 2328 ctrlq->ctrlq_cmd->command = cmd; 2329 2330 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_cmd_dmamap, 2331 0, sizeof(struct virtio_net_ctrl_cmd), BUS_DMASYNC_PREWRITE); 2332 for (i = 0; i < nspecs; i++) { 2333 bus_dmamap_sync(virtio_dmat(vsc), specs[i].dmamap, 2334 0, specs[i].bufsize, BUS_DMASYNC_PREWRITE); 2335 } 2336 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_status_dmamap, 2337 0, sizeof(struct virtio_net_ctrl_status), BUS_DMASYNC_PREREAD); 2338 2339 /* we need to explicitly (re)start vq intr when using RING EVENT IDX */ 2340 if (virtio_features(vsc) & VIRTIO_F_RING_EVENT_IDX) 2341 virtio_start_vq_intr(vsc, ctrlq->ctrlq_vq); 2342 2343 r = virtio_enqueue_prep(vsc, vq, &slot); 2344 if (r != 0) 2345 panic("%s: control vq busy!?", device_xname(sc->sc_dev)); 2346 r = virtio_enqueue_reserve(vsc, vq, slot, nspecs + 2); 2347 if (r != 0) 2348 panic("%s: control vq busy!?", device_xname(sc->sc_dev)); 2349 virtio_enqueue(vsc, vq, slot, ctrlq->ctrlq_cmd_dmamap, true); 2350 for (i = 0; i < nspecs; i++) { 2351 virtio_enqueue(vsc, vq, slot, specs[i].dmamap, true); 2352 } 2353 virtio_enqueue(vsc, vq, slot, ctrlq->ctrlq_status_dmamap, false); 2354 virtio_enqueue_commit(vsc, vq, slot, true); 2355 2356 /* wait for done */ 2357 mutex_enter(&ctrlq->ctrlq_wait_lock); 2358 while (ctrlq->ctrlq_inuse != DONE) 2359 cv_wait(&ctrlq->ctrlq_wait, &ctrlq->ctrlq_wait_lock); 2360 mutex_exit(&ctrlq->ctrlq_wait_lock); 2361 /* already dequeueued */ 2362 2363 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_cmd_dmamap, 0, 2364 sizeof(struct virtio_net_ctrl_cmd), BUS_DMASYNC_POSTWRITE); 2365 for (i = 0; i < nspecs; i++) { 2366 bus_dmamap_sync(virtio_dmat(vsc), specs[i].dmamap, 0, 2367 specs[i].bufsize, BUS_DMASYNC_POSTWRITE); 2368 } 2369 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_status_dmamap, 0, 2370 sizeof(struct virtio_net_ctrl_status), BUS_DMASYNC_POSTREAD); 2371 2372 if (ctrlq->ctrlq_status->ack == VIRTIO_NET_OK) 2373 r = 0; 2374 else { 2375 device_printf(sc->sc_dev, "failed setting rx mode\n"); 2376 sc->sc_ctrlq.ctrlq_cmd_failed.ev_count++; 2377 r = EIO; 2378 } 2379 2380 return r; 2381} 2382 2383/* ctrl vq interrupt; wake up the command issuer */ 2384static int 2385vioif_ctrl_intr(void *arg) 2386{ 2387 struct vioif_ctrlqueue *ctrlq = arg; 2388 struct virtqueue *vq = ctrlq->ctrlq_vq; 2389 struct virtio_softc *vsc = vq->vq_owner; 2390 int r, slot; 2391 2392 if (virtio_vq_is_enqueued(vsc, vq) == false) 2393 return 0; 2394 2395 r = virtio_dequeue(vsc, vq, &slot, NULL); 2396 if (r == ENOENT) 2397 return 0; 2398 virtio_dequeue_commit(vsc, vq, slot); 2399 2400 mutex_enter(&ctrlq->ctrlq_wait_lock); 2401 ctrlq->ctrlq_inuse = DONE; 2402 cv_signal(&ctrlq->ctrlq_wait); 2403 mutex_exit(&ctrlq->ctrlq_wait_lock); 2404 2405 return 1; 2406} 2407 2408static int 2409vioif_ctrl_rx(struct vioif_softc *sc, int cmd, bool onoff) 2410{ 2411 struct virtio_net_ctrl_rx *rx = sc->sc_ctrlq.ctrlq_rx; 2412 struct vioif_ctrl_cmdspec specs[1]; 2413 int r; 2414 2415 if (!sc->sc_has_ctrl) 2416 return ENOTSUP; 2417 2418 vioif_ctrl_acquire(sc); 2419 2420 rx->onoff = onoff; 2421 specs[0].dmamap = sc->sc_ctrlq.ctrlq_rx_dmamap; 2422 specs[0].buf = rx; 2423 specs[0].bufsize = sizeof(*rx); 2424 2425 r = vioif_ctrl_send_command(sc, VIRTIO_NET_CTRL_RX, cmd, 2426 specs, __arraycount(specs)); 2427 2428 vioif_ctrl_release(sc); 2429 return r; 2430} 2431 2432static int 2433vioif_set_promisc(struct vioif_softc *sc, bool onoff) 2434{ 2435 return vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_PROMISC, onoff); 2436} 2437 2438static int 2439vioif_set_allmulti(struct vioif_softc *sc, bool onoff) 2440{ 2441 return vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, onoff); 2442} 2443 2444static int 2445vioif_ctrl_mq_vq_pairs_set(struct vioif_softc *sc, int nvq_pairs) 2446{ 2447 struct virtio_net_ctrl_mq *mq = sc->sc_ctrlq.ctrlq_mq; 2448 struct vioif_ctrl_cmdspec specs[1]; 2449 int r; 2450 2451 if (!sc->sc_has_ctrl) 2452 return ENOTSUP; 2453 2454 if (nvq_pairs <= 1) 2455 return EINVAL; 2456 2457 vioif_ctrl_acquire(sc); 2458 2459 mq->virtqueue_pairs = virtio_rw16(sc->sc_virtio, nvq_pairs); 2460 specs[0].dmamap = sc->sc_ctrlq.ctrlq_mq_dmamap; 2461 specs[0].buf = mq; 2462 specs[0].bufsize = sizeof(*mq); 2463 2464 r = vioif_ctrl_send_command(sc, 2465 VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, 2466 specs, __arraycount(specs)); 2467 2468 vioif_ctrl_release(sc); 2469 2470 return r; 2471} 2472 2473static int 2474vioif_set_mac_addr(struct vioif_softc *sc) 2475{ 2476 struct virtio_net_ctrl_mac_addr *ma = 2477 sc->sc_ctrlq.ctrlq_mac_addr; 2478 struct vioif_ctrl_cmdspec specs[1]; 2479 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2480 int nspecs = __arraycount(specs); 2481 uint64_t features; 2482 int r; 2483 size_t i; 2484 2485 if (!sc->sc_has_ctrl) 2486 return ENOTSUP; 2487 2488 if (memcmp(CLLADDR(ifp->if_sadl), sc->sc_mac, 2489 ETHER_ADDR_LEN) == 0) { 2490 return 0; 2491 } 2492 2493 memcpy(sc->sc_mac, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 2494 2495 features = virtio_features(sc->sc_virtio); 2496 if (features & VIRTIO_NET_F_CTRL_MAC_ADDR) { 2497 vioif_ctrl_acquire(sc); 2498 2499 memcpy(ma->mac, sc->sc_mac, ETHER_ADDR_LEN); 2500 specs[0].dmamap = sc->sc_ctrlq.ctrlq_mac_addr_dmamap; 2501 specs[0].buf = ma; 2502 specs[0].bufsize = sizeof(*ma); 2503 2504 r = vioif_ctrl_send_command(sc, 2505 VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_ADDR_SET, 2506 specs, nspecs); 2507 2508 vioif_ctrl_release(sc); 2509 } else { 2510 for (i = 0; i < __arraycount(sc->sc_mac); i++) { 2511 virtio_write_device_config_1(sc->sc_virtio, 2512 VIRTIO_NET_CONFIG_MAC + i, sc->sc_mac[i]); 2513 } 2514 r = 0; 2515 } 2516 2517 return r; 2518} 2519 2520static int 2521vioif_set_rx_filter(struct vioif_softc *sc) 2522{ 2523 /* filter already set in ctrlq->ctrlq_mac_tbl */ 2524 struct virtio_softc *vsc = sc->sc_virtio; 2525 struct virtio_net_ctrl_mac_tbl *mac_tbl_uc, *mac_tbl_mc; 2526 struct vioif_ctrl_cmdspec specs[2]; 2527 int nspecs = __arraycount(specs); 2528 int r; 2529 2530 mac_tbl_uc = sc->sc_ctrlq.ctrlq_mac_tbl_uc; 2531 mac_tbl_mc = sc->sc_ctrlq.ctrlq_mac_tbl_mc; 2532 2533 if (!sc->sc_has_ctrl) 2534 return ENOTSUP; 2535 2536 vioif_ctrl_acquire(sc); 2537 2538 specs[0].dmamap = sc->sc_ctrlq.ctrlq_tbl_uc_dmamap; 2539 specs[0].buf = mac_tbl_uc; 2540 specs[0].bufsize = sizeof(*mac_tbl_uc) 2541 + (ETHER_ADDR_LEN * virtio_rw32(vsc, mac_tbl_uc->nentries)); 2542 2543 specs[1].dmamap = sc->sc_ctrlq.ctrlq_tbl_mc_dmamap; 2544 specs[1].buf = mac_tbl_mc; 2545 specs[1].bufsize = sizeof(*mac_tbl_mc) 2546 + (ETHER_ADDR_LEN * virtio_rw32(vsc, mac_tbl_mc->nentries)); 2547 2548 r = vioif_ctrl_load_cmdspec(sc, specs, nspecs); 2549 if (r != 0) 2550 goto out; 2551 2552 r = vioif_ctrl_send_command(sc, 2553 VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET, 2554 specs, nspecs); 2555 2556 vioif_ctrl_unload_cmdspec(sc, specs, nspecs); 2557 2558out: 2559 vioif_ctrl_release(sc); 2560 2561 return r; 2562} 2563 2564/* 2565 * If multicast filter small enough (<=MAXENTRIES) set rx filter 2566 * If large multicast filter exist use ALLMULTI 2567 * If setting rx filter fails fall back to ALLMULTI 2568 */ 2569static int 2570vioif_rx_filter(struct vioif_softc *sc) 2571{ 2572 struct virtio_softc *vsc = sc->sc_virtio; 2573 struct ethercom *ec = &sc->sc_ethercom; 2574 struct ifnet *ifp = &ec->ec_if; 2575 struct ether_multi *enm; 2576 struct ether_multistep step; 2577 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; 2578 int nentries; 2579 bool allmulti = 0; 2580 int r; 2581 2582 if (!sc->sc_has_ctrl) { 2583 goto set_ifflags; 2584 } 2585 2586 memcpy(ctrlq->ctrlq_mac_tbl_uc->macs[0], 2587 CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 2588 2589 nentries = 0; 2590 allmulti = false; 2591 2592 ETHER_LOCK(ec); 2593 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 2594 ETHER_NEXT_MULTI(step, enm)) { 2595 if (nentries >= VIRTIO_NET_CTRL_MAC_MAXENTRIES) { 2596 allmulti = true; 2597 break; 2598 } 2599 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 2600 allmulti = true; 2601 break; 2602 } 2603 2604 memcpy(ctrlq->ctrlq_mac_tbl_mc->macs[nentries], 2605 enm->enm_addrlo, ETHER_ADDR_LEN); 2606 nentries++; 2607 } 2608 ETHER_UNLOCK(ec); 2609 2610 r = vioif_set_mac_addr(sc); 2611 if (r != 0) { 2612 log(LOG_WARNING, "%s: couldn't set MAC address\n", 2613 ifp->if_xname); 2614 } 2615 2616 if (!allmulti) { 2617 ctrlq->ctrlq_mac_tbl_uc->nentries = virtio_rw32(vsc, 1); 2618 ctrlq->ctrlq_mac_tbl_mc->nentries = virtio_rw32(vsc, nentries); 2619 r = vioif_set_rx_filter(sc); 2620 if (r != 0) { 2621 allmulti = true; /* fallback */ 2622 } 2623 } 2624 2625 if (allmulti) { 2626 ctrlq->ctrlq_mac_tbl_uc->nentries = virtio_rw32(vsc, 0); 2627 ctrlq->ctrlq_mac_tbl_mc->nentries = virtio_rw32(vsc, 0); 2628 r = vioif_set_rx_filter(sc); 2629 if (r != 0) { 2630 log(LOG_DEBUG, "%s: couldn't clear RX filter\n", 2631 ifp->if_xname); 2632 /* what to do on failure? */ 2633 } 2634 2635 ifp->if_flags |= IFF_ALLMULTI; 2636 } 2637 2638set_ifflags: 2639 r = vioif_ifflags(sc); 2640 2641 return r; 2642} 2643 2644/* 2645 * VM configuration changes 2646 */ 2647static int 2648vioif_config_change(struct virtio_softc *vsc) 2649{ 2650 struct vioif_softc *sc = device_private(virtio_child(vsc)); 2651 2652 softint_schedule(sc->sc_cfg_softint); 2653 return 0; 2654} 2655 2656static void 2657vioif_cfg_softint(void *arg) 2658{ 2659 struct vioif_softc *sc = arg; 2660 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2661 2662 vioif_update_link_status(sc); 2663 vioif_start(ifp); 2664} 2665 2666static int 2667vioif_get_link_status(struct vioif_softc *sc) 2668{ 2669 struct virtio_softc *vsc = sc->sc_virtio; 2670 uint16_t status; 2671 2672 if (virtio_features(vsc) & VIRTIO_NET_F_STATUS) 2673 status = virtio_read_device_config_2(vsc, 2674 VIRTIO_NET_CONFIG_STATUS); 2675 else 2676 status = VIRTIO_NET_S_LINK_UP; 2677 2678 if ((status & VIRTIO_NET_S_LINK_UP) != 0) 2679 return LINK_STATE_UP; 2680 2681 return LINK_STATE_DOWN; 2682} 2683 2684static void 2685vioif_update_link_status(struct vioif_softc *sc) 2686{ 2687 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2688 struct vioif_netqueue *netq; 2689 struct vioif_tx_context *txc; 2690 bool active; 2691 int link, i; 2692 2693 mutex_enter(&sc->sc_lock); 2694 2695 link = vioif_get_link_status(sc); 2696 2697 if (link == sc->sc_link_state) 2698 goto done; 2699 2700 sc->sc_link_state = link; 2701 2702 active = VIOIF_IS_LINK_ACTIVE(sc); 2703 for (i = 0; i < sc->sc_act_nvq_pairs; i++) { 2704 netq = &sc->sc_netqs[VIOIF_NETQ_TXQID(i)]; 2705 2706 mutex_enter(&netq->netq_lock); 2707 txc = netq->netq_ctx; 2708 txc->txc_link_active = active; 2709 mutex_exit(&netq->netq_lock); 2710 } 2711 2712 if_link_state_change(ifp, sc->sc_link_state); 2713 2714done: 2715 mutex_exit(&sc->sc_lock); 2716} 2717 2718static void 2719vioif_workq_work(struct work *wk, void *context) 2720{ 2721 struct vioif_work *work; 2722 2723 work = container_of(wk, struct vioif_work, cookie); 2724 2725 atomic_store_relaxed(&work->added, 0); 2726 work->func(work->arg); 2727} 2728 2729static struct workqueue * 2730vioif_workq_create(const char *name, pri_t prio, int ipl, int flags) 2731{ 2732 struct workqueue *wq; 2733 int error; 2734 2735 error = workqueue_create(&wq, name, vioif_workq_work, NULL, 2736 prio, ipl, flags); 2737 2738 if (error) 2739 return NULL; 2740 2741 return wq; 2742} 2743 2744static void 2745vioif_workq_destroy(struct workqueue *wq) 2746{ 2747 2748 workqueue_destroy(wq); 2749} 2750 2751static void 2752vioif_work_set(struct vioif_work *work, void (*func)(void *), void *arg) 2753{ 2754 2755 memset(work, 0, sizeof(*work)); 2756 work->func = func; 2757 work->arg = arg; 2758} 2759 2760static void 2761vioif_work_add(struct workqueue *wq, struct vioif_work *work) 2762{ 2763 2764 if (atomic_load_relaxed(&work->added) != 0) 2765 return; 2766 2767 atomic_store_relaxed(&work->added, 1); 2768 kpreempt_disable(); 2769 workqueue_enqueue(wq, &work->cookie, NULL); 2770 kpreempt_enable(); 2771} 2772 2773static void 2774vioif_work_wait(struct workqueue *wq, struct vioif_work *work) 2775{ 2776 2777 workqueue_wait(wq, &work->cookie); 2778} 2779 2780MODULE(MODULE_CLASS_DRIVER, if_vioif, "virtio"); 2781 2782#ifdef _MODULE 2783#include "ioconf.c" 2784#endif 2785 2786static int 2787if_vioif_modcmd(modcmd_t cmd, void *opaque) 2788{ 2789 int error = 0; 2790 2791#ifdef _MODULE 2792 switch (cmd) { 2793 case MODULE_CMD_INIT: 2794 error = config_init_component(cfdriver_ioconf_if_vioif, 2795 cfattach_ioconf_if_vioif, cfdata_ioconf_if_vioif); 2796 break; 2797 case MODULE_CMD_FINI: 2798 error = config_fini_component(cfdriver_ioconf_if_vioif, 2799 cfattach_ioconf_if_vioif, cfdata_ioconf_if_vioif); 2800 break; 2801 default: 2802 error = ENOTTY; 2803 break; 2804 } 2805#endif 2806 2807 return error; 2808} 2809