pci_virtio_net.c revision 221828
1/*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD$"); 31 32#include <sys/param.h> 33#include <sys/linker_set.h> 34#include <sys/select.h> 35#include <sys/uio.h> 36#include <sys/ioctl.h> 37 38#include <errno.h> 39#include <fcntl.h> 40#include <stdio.h> 41#include <stdlib.h> 42#include <stdint.h> 43#include <string.h> 44#include <strings.h> 45#include <unistd.h> 46#include <assert.h> 47#include <md5.h> 48#include <pthread.h> 49 50#include "fbsdrun.h" 51#include "pci_emul.h" 52#include "mevent.h" 53#include "virtio.h" 54 55#define VTNET_RINGSZ 256 56 57#define VTNET_MAXSEGS 32 58 59/* 60 * PCI config-space register offsets 61 */ 62#define VTNET_R_CFG0 20 63#define VTNET_R_CFG1 21 64#define VTNET_R_CFG2 22 65#define VTNET_R_CFG3 23 66#define VTNET_R_CFG4 24 67#define VTNET_R_CFG5 25 68#define VTNET_R_CFG6 26 69#define VTNET_R_CFG7 27 70#define VTNET_R_MAX 27 71 72#define VTNET_REGSZ VTNET_R_MAX+1 73 74/* 75 * Host capabilities 76 */ 77#define VTNET_S_HOSTCAPS \ 78 ( 0x00000020 | /* host supplies MAC */ \ 79 0x00008000 | /* host can merge Rx buffers */ \ 80 0x00010000 ) /* config status available */ 81 82/* 83 * Queue definitions. 84 */ 85#define VTNET_RXQ 0 86#define VTNET_TXQ 1 87#define VTNET_CTLQ 2 88 89#define VTNET_MAXQ 3 90 91struct vring_hqueue { 92 /* Internal state */ 93 uint16_t hq_size; 94 uint16_t hq_cur_aidx; /* trails behind 'avail_idx' */ 95 96 /* Host-context pointers to the queue */ 97 struct virtio_desc *hq_dtable; 98 uint16_t *hq_avail_flags; 99 uint16_t *hq_avail_idx; /* monotonically increasing */ 100 uint16_t *hq_avail_ring; 101 102 uint16_t *hq_used_flags; 103 uint16_t *hq_used_idx; /* monotonically increasing */ 104 struct virtio_used *hq_used_ring; 105}; 106 107/* 108 * Fixed network header size 109 */ 110struct virtio_net_rxhdr { 111 uint8_t vrh_flags; 112 uint8_t vrh_gso_type; 113 uint16_t vrh_hdr_len; 114 uint16_t vrh_gso_size; 115 uint16_t vrh_csum_start; 116 uint16_t vrh_csum_offset; 117 uint16_t vrh_bufs; 118} __packed; 119 120/* 121 * Debug printf 122 */ 123static int pci_vtnet_debug; 124#define DPRINTF(params) if (pci_vtnet_debug) printf params 125#define WPRINTF(params) printf params 126 127/* 128 * Per-device softc 129 */ 130struct pci_vtnet_softc { 131 struct pci_devinst *vsc_pi; 132 pthread_mutex_t vsc_mtx; 133 struct mevent *vsc_mevp; 134 135 int vsc_curq; 136 int vsc_status; 137 int vsc_isr; 138 int vsc_tapfd; 139 int vsc_rx_ready; 140 int vsc_rxpend; 141 142 uint32_t vsc_features; 143 uint8_t vsc_macaddr[6]; 144 145 uint64_t vsc_pfn[VTNET_MAXQ]; 146 struct vring_hqueue vsc_hq[VTNET_MAXQ]; 147}; 148 149/* 150 * Return the number of available descriptors in the vring taking care 151 * of the 16-bit index wraparound. 152 */ 153static int 154hq_num_avail(struct vring_hqueue *hq) 155{ 156 int ndesc; 157 158 if (*hq->hq_avail_idx >= hq->hq_cur_aidx) 159 ndesc = *hq->hq_avail_idx - hq->hq_cur_aidx; 160 else 161 ndesc = UINT16_MAX - hq->hq_cur_aidx + *hq->hq_avail_idx + 1; 162 163 assert(ndesc >= 0 && ndesc <= hq->hq_size); 164 165 return (ndesc); 166} 167 168static uint16_t 169pci_vtnet_qsize(int qnum) 170{ 171 /* XXX no ctl queue currently */ 172 if (qnum == VTNET_CTLQ) { 173 return (0); 174 } 175 176 /* XXX fixed currently. Maybe different for tx/rx/ctl */ 177 return (VTNET_RINGSZ); 178} 179 180static void 181pci_vtnet_update_status(struct pci_vtnet_softc *sc, uint32_t value) 182{ 183 if (value == 0) { 184 DPRINTF(("vtnet: device reset requested !\n")); 185 } 186 187 sc->vsc_status = value; 188} 189 190/* 191 * Called to send a buffer chain out to the tap device 192 */ 193static void 194pci_vtnet_tap_tx(struct pci_vtnet_softc *sc, struct iovec *iov, int iovcnt, 195 int len) 196{ 197 char pad[60]; 198 199 if (sc->vsc_tapfd == -1) 200 return; 201 202 /* 203 * If the length is < 60, pad out to that and add the 204 * extra zero'd segment to the iov. It is guaranteed that 205 * there is always an extra iov available by the caller. 206 */ 207 if (len < 60) { 208 memset(pad, 0, 60 - len); 209 iov[iovcnt].iov_base = pad; 210 iov[iovcnt].iov_len = 60 - len; 211 iovcnt++; 212 } 213 (void) writev(sc->vsc_tapfd, iov, iovcnt); 214} 215 216/* 217 * Called when there is read activity on the tap file descriptor. 218 * Each buffer posted by the guest is assumed to be able to contain 219 * an entire ethernet frame + rx header. 220 * MP note: the dummybuf is only used for discarding frames, so there 221 * is no need for it to be per-vtnet or locked. 222 */ 223static uint8_t dummybuf[2048]; 224 225static void 226pci_vtnet_tap_rx(struct pci_vtnet_softc *sc) 227{ 228 struct virtio_desc *vd; 229 struct virtio_used *vu; 230 struct vring_hqueue *hq; 231 struct virtio_net_rxhdr *vrx; 232 uint8_t *buf; 233 int i; 234 int len; 235 int ndescs; 236 int didx, uidx, aidx; /* descriptor, avail and used index */ 237 238 /* 239 * Should never be called without a valid tap fd 240 */ 241 assert(sc->vsc_tapfd != -1); 242 243 /* 244 * But, will be called when the rx ring hasn't yet 245 * been set up. 246 */ 247 if (sc->vsc_rx_ready == 0) { 248 /* 249 * Drop the packet and try later. 250 */ 251 (void) read(sc->vsc_tapfd, dummybuf, sizeof(dummybuf)); 252 return; 253 } 254 255 /* 256 * Calculate the number of available rx buffers 257 */ 258 hq = &sc->vsc_hq[VTNET_RXQ]; 259 260 ndescs = hq_num_avail(hq); 261 262 if (ndescs == 0) { 263 /* 264 * Need to wait for host notification to read 265 */ 266 if (sc->vsc_rxpend == 0) { 267 WPRINTF(("vtnet: no rx descriptors !\n")); 268 sc->vsc_rxpend = 1; 269 } 270 271 /* 272 * Drop the packet and try later 273 */ 274 (void) read(sc->vsc_tapfd, dummybuf, sizeof(dummybuf)); 275 return; 276 } 277 278 aidx = hq->hq_cur_aidx; 279 uidx = *hq->hq_used_idx; 280 for (i = 0; i < ndescs; i++) { 281 /* 282 * 'aidx' indexes into the an array of descriptor indexes 283 */ 284 didx = hq->hq_avail_ring[aidx % hq->hq_size]; 285 assert(didx >= 0 && didx < hq->hq_size); 286 287 vd = &hq->hq_dtable[didx]; 288 289 /* 290 * Get a pointer to the rx header, and use the 291 * data immediately following it for the packet buffer. 292 */ 293 vrx = (struct virtio_net_rxhdr *)paddr_guest2host(vd->vd_addr); 294 buf = (uint8_t *)(vrx + 1); 295 296 len = read(sc->vsc_tapfd, buf, 297 vd->vd_len - sizeof(struct virtio_net_rxhdr)); 298 299 if (len < 0 && errno == EWOULDBLOCK) { 300 break; 301 } 302 303 /* 304 * The only valid field in the rx packet header is the 305 * number of buffers, which is always 1 without TSO 306 * support. 307 */ 308 memset(vrx, 0, sizeof(struct virtio_net_rxhdr)); 309 vrx->vrh_bufs = 1; 310 311 /* 312 * Write this descriptor into the used ring 313 */ 314 vu = &hq->hq_used_ring[uidx % hq->hq_size]; 315 vu->vu_idx = didx; 316 vu->vu_tlen = len + sizeof(struct virtio_net_rxhdr); 317 uidx++; 318 aidx++; 319 } 320 321 /* 322 * Update the used pointer, and signal an interrupt if allowed 323 */ 324 *hq->hq_used_idx = uidx; 325 hq->hq_cur_aidx = aidx; 326 327 if ((*hq->hq_avail_flags & VRING_AVAIL_F_NO_INTERRUPT) == 0) { 328 sc->vsc_isr |= 1; 329 pci_generate_msi(sc->vsc_pi, 0); 330 } 331} 332 333static void 334pci_vtnet_tap_callback(int fd, enum ev_type type, void *param) 335{ 336 struct pci_vtnet_softc *sc = param; 337 338 pthread_mutex_lock(&sc->vsc_mtx); 339 pci_vtnet_tap_rx(sc); 340 pthread_mutex_unlock(&sc->vsc_mtx); 341 342} 343 344static void 345pci_vtnet_ping_rxq(struct pci_vtnet_softc *sc) 346{ 347 /* 348 * A qnotify means that the rx process can now begin 349 */ 350 if (sc->vsc_rx_ready == 0) { 351 sc->vsc_rx_ready = 1; 352 } 353 354 /* 355 * If the rx queue was empty, attempt to receive a 356 * packet that was previously blocked due to no rx bufs 357 * available 358 */ 359 if (sc->vsc_rxpend) { 360 WPRINTF(("vtnet: rx resumed\n\r")); 361 sc->vsc_rxpend = 0; 362 pci_vtnet_tap_rx(sc); 363 } 364} 365 366static void 367pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vring_hqueue *hq) 368{ 369 struct iovec iov[VTNET_MAXSEGS + 1]; 370 struct virtio_desc *vd; 371 struct virtio_used *vu; 372 int i; 373 int plen; 374 int tlen; 375 int uidx, aidx, didx; 376 377 uidx = *hq->hq_used_idx; 378 aidx = hq->hq_cur_aidx; 379 didx = hq->hq_avail_ring[aidx % hq->hq_size]; 380 assert(didx >= 0 && didx < hq->hq_size); 381 382 vd = &hq->hq_dtable[didx]; 383 384 /* 385 * Run through the chain of descriptors, ignoring the 386 * first header descriptor. However, include the header 387 * length in the total length that will be put into the 388 * used queue. 389 */ 390 tlen = vd->vd_len; 391 vd = &hq->hq_dtable[vd->vd_next]; 392 393 for (i = 0, plen = 0; 394 i < VTNET_MAXSEGS; 395 i++, vd = &hq->hq_dtable[vd->vd_next]) { 396 iov[i].iov_base = paddr_guest2host(vd->vd_addr); 397 iov[i].iov_len = vd->vd_len; 398 plen += vd->vd_len; 399 tlen += vd->vd_len; 400 401 if ((vd->vd_flags & VRING_DESC_F_NEXT) == 0) 402 break; 403 } 404 assert(i < VTNET_MAXSEGS); 405 406 DPRINTF(("virtio: packet send, %d bytes, %d segs\n\r", plen, i + 1)); 407 pci_vtnet_tap_tx(sc, iov, i + 1, plen); 408 409 /* 410 * Return this chain back to the host 411 */ 412 vu = &hq->hq_used_ring[uidx % hq->hq_size]; 413 vu->vu_idx = didx; 414 vu->vu_tlen = tlen; 415 hq->hq_cur_aidx = aidx + 1; 416 *hq->hq_used_idx = uidx + 1; 417 418 /* 419 * Generate an interrupt if able 420 */ 421 if ((*hq->hq_avail_flags & VRING_AVAIL_F_NO_INTERRUPT) == 0) { 422 sc->vsc_isr |= 1; 423 pci_generate_msi(sc->vsc_pi, 0); 424 } 425} 426 427static void 428pci_vtnet_ping_txq(struct pci_vtnet_softc *sc) 429{ 430 struct vring_hqueue *hq = &sc->vsc_hq[VTNET_TXQ]; 431 int i; 432 int ndescs; 433 434 /* 435 * Calculate number of ring entries to process 436 */ 437 ndescs = hq_num_avail(hq); 438 439 if (ndescs == 0) 440 return; 441 442 /* 443 * Run through all the entries, placing them into iovecs and 444 * sending when an end-of-packet is found 445 */ 446 for (i = 0; i < ndescs; i++) 447 pci_vtnet_proctx(sc, hq); 448} 449 450static void 451pci_vtnet_ping_ctlq(struct pci_vtnet_softc *sc) 452{ 453 454 DPRINTF(("vtnet: control qnotify!\n\r")); 455} 456 457static void 458pci_vtnet_ring_init(struct pci_vtnet_softc *sc, uint64_t pfn) 459{ 460 struct vring_hqueue *hq; 461 int qnum = sc->vsc_curq; 462 463 assert(qnum < VTNET_MAXQ); 464 465 sc->vsc_pfn[qnum] = pfn << VRING_PFN; 466 467 /* 468 * Set up host pointers to the various parts of the 469 * queue 470 */ 471 hq = &sc->vsc_hq[qnum]; 472 hq->hq_size = pci_vtnet_qsize(qnum); 473 474 hq->hq_dtable = paddr_guest2host(pfn << VRING_PFN); 475 hq->hq_avail_flags = (uint16_t *)(hq->hq_dtable + hq->hq_size); 476 hq->hq_avail_idx = hq->hq_avail_flags + 1; 477 hq->hq_avail_ring = hq->hq_avail_flags + 2; 478 hq->hq_used_flags = (uint16_t *)roundup2((uintptr_t)hq->hq_avail_ring, 479 VRING_ALIGN); 480 hq->hq_used_idx = hq->hq_used_flags + 1; 481 hq->hq_used_ring = (struct virtio_used *)(hq->hq_used_flags + 2); 482 483 /* 484 * Initialize queue indexes 485 */ 486 hq->hq_cur_aidx = 0; 487} 488 489static int 490pci_vtnet_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) 491{ 492 MD5_CTX mdctx; 493 unsigned char digest[16]; 494 char nstr[80]; 495 struct pci_vtnet_softc *sc; 496 497 /* 498 * Access to guest memory is required. Fail if 499 * memory not mapped 500 */ 501 if (paddr_guest2host(0) == NULL) 502 return (1); 503 504 sc = malloc(sizeof(struct pci_vtnet_softc)); 505 memset(sc, 0, sizeof(struct pci_vtnet_softc)); 506 507 pi->pi_arg = sc; 508 sc->vsc_pi = pi; 509 510 pthread_mutex_init(&sc->vsc_mtx, NULL); 511 512 /* 513 * Attempt to open the tap device 514 */ 515 sc->vsc_tapfd = -1; 516 if (opts != NULL) { 517 char tbuf[80]; 518 519 strcpy(tbuf, "/dev/"); 520 strncat(tbuf, opts, sizeof(tbuf) - strlen(tbuf)); 521 522 sc->vsc_tapfd = open(tbuf, O_RDWR); 523 if (sc->vsc_tapfd == -1) { 524 WPRINTF(("open of tap device %s failed\n", tbuf)); 525 } else { 526 /* 527 * Set non-blocking and register for read 528 * notifications with the event loop 529 */ 530 int opt = 1; 531 if (ioctl(sc->vsc_tapfd, FIONBIO, &opt) < 0) { 532 WPRINTF(("tap device O_NONBLOCK failed\n")); 533 close(sc->vsc_tapfd); 534 sc->vsc_tapfd = -1; 535 } 536 537 sc->vsc_mevp = mevent_add(sc->vsc_tapfd, 538 EVF_READ, 539 pci_vtnet_tap_callback, 540 sc); 541 if (sc->vsc_mevp == NULL) { 542 WPRINTF(("Could not register event\n")); 543 close(sc->vsc_tapfd); 544 sc->vsc_tapfd = -1; 545 } 546 } 547 } 548 549 /* 550 * The MAC address is the standard NetApp OUI of 00-a0-98, 551 * followed by an MD5 of the vm name. The slot number is 552 * prepended to this for slots other than 1, so that 553 * CFE can netboot from the equivalent of slot 1. 554 */ 555 if (pi->pi_slot == 1) { 556 strncpy(nstr, vmname, sizeof(nstr)); 557 } else { 558 snprintf(nstr, sizeof(nstr), "%d-%s", pi->pi_slot, vmname); 559 } 560 561 MD5Init(&mdctx); 562 MD5Update(&mdctx, nstr, strlen(nstr)); 563 MD5Final(digest, &mdctx); 564 565 sc->vsc_macaddr[0] = 0x00; 566 sc->vsc_macaddr[1] = 0xa0; 567 sc->vsc_macaddr[2] = 0x98; 568 sc->vsc_macaddr[3] = digest[0]; 569 sc->vsc_macaddr[4] = digest[1]; 570 sc->vsc_macaddr[5] = digest[2]; 571 572 /* initialize config space */ 573 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_NET); 574 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR); 575 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK); 576 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_NET); 577 pci_emul_alloc_bar(pi, 0, 0, PCIBAR_IO, VTNET_REGSZ); 578 pci_emul_add_msicap(pi, 1); 579 580 return (0); 581} 582 583/* 584 * Function pointer array to handle queue notifications 585 */ 586static void (*pci_vtnet_qnotify[VTNET_MAXQ])(struct pci_vtnet_softc *) = { 587 pci_vtnet_ping_rxq, 588 pci_vtnet_ping_txq, 589 pci_vtnet_ping_ctlq 590}; 591 592static void 593pci_vtnet_write(struct pci_devinst *pi, int baridx, int offset, int size, 594 uint32_t value) 595{ 596 struct pci_vtnet_softc *sc = pi->pi_arg; 597 598 if (offset + size > VTNET_REGSZ) { 599 DPRINTF(("vtnet_write: 2big, offset %d size %d\n", 600 offset, size)); 601 return; 602 } 603 604 pthread_mutex_lock(&sc->vsc_mtx); 605 606 switch (offset) { 607 case VTCFG_R_GUESTCAP: 608 assert(size == 4); 609 sc->vsc_features = value & VTNET_S_HOSTCAPS; 610 break; 611 case VTCFG_R_PFN: 612 assert(size == 4); 613 pci_vtnet_ring_init(sc, value); 614 break; 615 case VTCFG_R_QSEL: 616 assert(size == 2); 617 assert(value < VTNET_MAXQ); 618 sc->vsc_curq = value; 619 break; 620 case VTCFG_R_QNOTIFY: 621 assert(size == 2); 622 assert(value < VTNET_MAXQ); 623 (*pci_vtnet_qnotify[value])(sc); 624 break; 625 case VTCFG_R_STATUS: 626 assert(size == 1); 627 pci_vtnet_update_status(sc, value); 628 break; 629 case VTNET_R_CFG0: 630 case VTNET_R_CFG1: 631 case VTNET_R_CFG2: 632 case VTNET_R_CFG3: 633 case VTNET_R_CFG4: 634 case VTNET_R_CFG5: 635 /* 636 * The driver is allowed to change the MAC address 637 */ 638 assert(size == 1); 639 sc->vsc_macaddr[offset - VTNET_R_CFG0] = value; 640 break; 641 case VTCFG_R_HOSTCAP: 642 case VTCFG_R_QNUM: 643 case VTCFG_R_ISR: 644 case VTNET_R_CFG6: 645 case VTNET_R_CFG7: 646 DPRINTF(("vtnet: write to readonly reg %d\n\r", offset)); 647 break; 648 default: 649 DPRINTF(("vtnet: unknown i/o write offset %d\n\r", offset)); 650 value = 0; 651 break; 652 } 653 654 pthread_mutex_unlock(&sc->vsc_mtx); 655} 656 657uint32_t 658pci_vtnet_read(struct pci_devinst *pi, int baridx, int offset, int size) 659{ 660 struct pci_vtnet_softc *sc = pi->pi_arg; 661 uint32_t value; 662 663 if (offset + size > VTNET_REGSZ) { 664 DPRINTF(("vtnet_read: 2big, offset %d size %d\n", 665 offset, size)); 666 return (0); 667 } 668 669 pthread_mutex_lock(&sc->vsc_mtx); 670 671 switch (offset) { 672 case VTCFG_R_HOSTCAP: 673 assert(size == 4); 674 value = VTNET_S_HOSTCAPS; 675 break; 676 case VTCFG_R_GUESTCAP: 677 assert(size == 4); 678 value = sc->vsc_features; /* XXX never read ? */ 679 break; 680 case VTCFG_R_PFN: 681 assert(size == 4); 682 value = sc->vsc_pfn[sc->vsc_curq] >> VRING_PFN; 683 break; 684 case VTCFG_R_QNUM: 685 assert(size == 2); 686 value = pci_vtnet_qsize(sc->vsc_curq); 687 break; 688 case VTCFG_R_QSEL: 689 assert(size == 2); 690 value = sc->vsc_curq; /* XXX never read ? */ 691 break; 692 case VTCFG_R_QNOTIFY: 693 assert(size == 2); 694 value = sc->vsc_curq; /* XXX never read ? */ 695 break; 696 case VTCFG_R_STATUS: 697 assert(size == 1); 698 value = sc->vsc_status; 699 break; 700 case VTCFG_R_ISR: 701 assert(size == 1); 702 value = sc->vsc_isr; 703 sc->vsc_isr = 0; /* a read clears this flag */ 704 break; 705 case VTNET_R_CFG0: 706 case VTNET_R_CFG1: 707 case VTNET_R_CFG2: 708 case VTNET_R_CFG3: 709 case VTNET_R_CFG4: 710 case VTNET_R_CFG5: 711 assert(size == 1); 712 value = sc->vsc_macaddr[offset - VTNET_R_CFG0]; 713 break; 714 case VTNET_R_CFG6: 715 assert(size == 1); 716 value = 0x01; /* XXX link always up */ 717 break; 718 case VTNET_R_CFG7: 719 assert(size == 1); 720 value = 0; /* link status is in the LSB */ 721 break; 722 default: 723 DPRINTF(("vtnet: unknown i/o read offset %d\n\r", offset)); 724 value = 0; 725 break; 726 } 727 728 pthread_mutex_unlock(&sc->vsc_mtx); 729 730 return (value); 731} 732 733struct pci_devemu pci_de_vnet = { 734 .pe_emu = "virtio-net", 735 .pe_init = pci_vtnet_init, 736 .pe_iow = pci_vtnet_write, 737 .pe_ior = pci_vtnet_read, 738}; 739PCI_EMUL_SET(pci_de_vnet); 740