virtio.c revision 1.56
1/* $NetBSD: virtio.c,v 1.56 2022/08/09 12:42:05 riastradh Exp $ */ 2 3/* 4 * Copyright (c) 2020 The NetBSD Foundation, Inc. 5 * Copyright (c) 2012 Stefan Fritsch, Alexander Fiveg. 6 * Copyright (c) 2010 Minoura Makoto. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30#include <sys/cdefs.h> 31__KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.56 2022/08/09 12:42:05 riastradh Exp $"); 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/kernel.h> 36#include <sys/atomic.h> 37#include <sys/bus.h> 38#include <sys/device.h> 39#include <sys/kmem.h> 40#include <sys/module.h> 41 42#define VIRTIO_PRIVATE 43 44#include <dev/pci/virtioreg.h> /* XXX: move to non-pci */ 45#include <dev/pci/virtiovar.h> /* XXX: move to non-pci */ 46 47#define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */ 48 49/* incomplete list */ 50static const char *virtio_device_name[] = { 51 "unknown (0)", /* 0 */ 52 "network", /* 1 */ 53 "block", /* 2 */ 54 "console", /* 3 */ 55 "entropy", /* 4 */ 56 "memory balloon", /* 5 */ 57 "I/O memory", /* 6 */ 58 "remote processor messaging", /* 7 */ 59 "SCSI", /* 8 */ 60 "9P transport", /* 9 */ 61}; 62#define NDEVNAMES __arraycount(virtio_device_name) 63 64static void virtio_init_vq(struct virtio_softc *, 65 struct virtqueue *, const bool); 66 67void 68virtio_set_status(struct virtio_softc *sc, int status) 69{ 70 sc->sc_ops->set_status(sc, status); 71} 72 73/* 74 * Reset the device. 75 */ 76/* 77 * To reset the device to a known state, do following: 78 * virtio_reset(sc); // this will stop the device activity 79 * <dequeue finished requests>; // virtio_dequeue() still can be called 80 * <revoke pending requests in the vqs if any>; 81 * virtio_reinit_start(sc); // dequeue prohibitted 82 * newfeatures = virtio_negotiate_features(sc, requestedfeatures); 83 * <some other initialization>; 84 * virtio_reinit_end(sc); // device activated; enqueue allowed 85 * Once attached, feature negotiation can only be allowed after virtio_reset. 86 */ 87void 88virtio_reset(struct virtio_softc *sc) 89{ 90 virtio_device_reset(sc); 91} 92 93int 94virtio_reinit_start(struct virtio_softc *sc) 95{ 96 int i, r; 97 98 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); 99 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); 100 for (i = 0; i < sc->sc_nvqs; i++) { 101 int n; 102 struct virtqueue *vq = &sc->sc_vqs[i]; 103 n = sc->sc_ops->read_queue_size(sc, vq->vq_index); 104 if (n == 0) /* vq disappeared */ 105 continue; 106 if (n != vq->vq_num) { 107 panic("%s: virtqueue size changed, vq index %d\n", 108 device_xname(sc->sc_dev), 109 vq->vq_index); 110 } 111 virtio_init_vq(sc, vq, true); 112 sc->sc_ops->setup_queue(sc, vq->vq_index, 113 vq->vq_dmamap->dm_segs[0].ds_addr); 114 } 115 116 r = sc->sc_ops->setup_interrupts(sc, 1); 117 if (r != 0) 118 goto fail; 119 120 return 0; 121 122fail: 123 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 124 125 return 1; 126} 127 128void 129virtio_reinit_end(struct virtio_softc *sc) 130{ 131 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK); 132} 133 134/* 135 * Feature negotiation. 136 */ 137void 138virtio_negotiate_features(struct virtio_softc *sc, uint64_t guest_features) 139{ 140 if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) && 141 !(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */ 142 guest_features |= VIRTIO_F_RING_INDIRECT_DESC; 143 sc->sc_ops->neg_features(sc, guest_features); 144 if (sc->sc_active_features & VIRTIO_F_RING_INDIRECT_DESC) 145 sc->sc_indirect = true; 146 else 147 sc->sc_indirect = false; 148} 149 150 151/* 152 * Device configuration registers readers/writers 153 */ 154#if 0 155#define DPRINTFR(n, fmt, val, index, num) \ 156 printf("\n%s (", n); \ 157 for (int i = 0; i < num; i++) \ 158 printf("%02x ", bus_space_read_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index+i)); \ 159 printf(") -> "); printf(fmt, val); printf("\n"); 160#define DPRINTFR2(n, fmt, val_s, val_n) \ 161 printf("%s ", n); \ 162 printf("\n stream "); printf(fmt, val_s); printf(" norm "); printf(fmt, val_n); printf("\n"); 163#else 164#define DPRINTFR(n, fmt, val, index, num) 165#define DPRINTFR2(n, fmt, val_s, val_n) 166#endif 167 168 169uint8_t 170virtio_read_device_config_1(struct virtio_softc *sc, int index) { 171 bus_space_tag_t iot = sc->sc_devcfg_iot; 172 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 173 uint8_t val; 174 175 val = bus_space_read_1(iot, ioh, index); 176 177 DPRINTFR("read_1", "%02x", val, index, 1); 178 return val; 179} 180 181uint16_t 182virtio_read_device_config_2(struct virtio_softc *sc, int index) { 183 bus_space_tag_t iot = sc->sc_devcfg_iot; 184 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 185 uint16_t val; 186 187 val = bus_space_read_2(iot, ioh, index); 188 if (BYTE_ORDER != sc->sc_bus_endian) 189 val = bswap16(val); 190 191 DPRINTFR("read_2", "%04x", val, index, 2); 192 DPRINTFR2("read_2", "%04x", 193 bus_space_read_stream_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index), 194 bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index)); 195 return val; 196} 197 198uint32_t 199virtio_read_device_config_4(struct virtio_softc *sc, int index) { 200 bus_space_tag_t iot = sc->sc_devcfg_iot; 201 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 202 uint32_t val; 203 204 val = bus_space_read_4(iot, ioh, index); 205 if (BYTE_ORDER != sc->sc_bus_endian) 206 val = bswap32(val); 207 208 DPRINTFR("read_4", "%08x", val, index, 4); 209 DPRINTFR2("read_4", "%08x", 210 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index), 211 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index)); 212 return val; 213} 214 215/* 216 * The Virtio spec explicitly tells that reading and writing 8 bytes are not 217 * considered atomic and no triggers may be connected to reading or writing 218 * it. We access it using two 32 reads. See virtio spec 4.1.3.1. 219 */ 220uint64_t 221virtio_read_device_config_8(struct virtio_softc *sc, int index) { 222 bus_space_tag_t iot = sc->sc_devcfg_iot; 223 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 224 union { 225 uint64_t u64; 226 uint32_t l[2]; 227 } v; 228 uint64_t val; 229 230 v.l[0] = bus_space_read_4(iot, ioh, index); 231 v.l[1] = bus_space_read_4(iot, ioh, index + 4); 232 if (sc->sc_bus_endian != sc->sc_struct_endian) { 233 v.l[0] = bswap32(v.l[0]); 234 v.l[1] = bswap32(v.l[1]); 235 } 236 val = v.u64; 237 238 if (BYTE_ORDER != sc->sc_struct_endian) 239 val = bswap64(val); 240 241 DPRINTFR("read_8", "%08lx", val, index, 8); 242 DPRINTFR2("read_8 low ", "%08x", 243 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index), 244 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index)); 245 DPRINTFR2("read_8 high ", "%08x", 246 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index + 4), 247 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index + 4)); 248 return val; 249} 250 251/* 252 * In the older virtio spec, device config registers are host endian. On newer 253 * they are little endian. Some newer devices however explicitly specify their 254 * register to always be little endian. These functions cater for these. 255 */ 256uint16_t 257virtio_read_device_config_le_2(struct virtio_softc *sc, int index) { 258 bus_space_tag_t iot = sc->sc_devcfg_iot; 259 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 260 uint16_t val; 261 262 val = bus_space_read_2(iot, ioh, index); 263 if (sc->sc_bus_endian != LITTLE_ENDIAN) 264 val = bswap16(val); 265 266 DPRINTFR("read_le_2", "%04x", val, index, 2); 267 DPRINTFR2("read_le_2", "%04x", 268 bus_space_read_stream_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0), 269 bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0)); 270 return val; 271} 272 273uint32_t 274virtio_read_device_config_le_4(struct virtio_softc *sc, int index) { 275 bus_space_tag_t iot = sc->sc_devcfg_iot; 276 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 277 uint32_t val; 278 279 val = bus_space_read_4(iot, ioh, index); 280 if (sc->sc_bus_endian != LITTLE_ENDIAN) 281 val = bswap32(val); 282 283 DPRINTFR("read_le_4", "%08x", val, index, 4); 284 DPRINTFR2("read_le_4", "%08x", 285 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0), 286 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0)); 287 return val; 288} 289 290void 291virtio_write_device_config_1(struct virtio_softc *sc, int index, uint8_t value) 292{ 293 bus_space_tag_t iot = sc->sc_devcfg_iot; 294 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 295 296 bus_space_write_1(iot, ioh, index, value); 297} 298 299void 300virtio_write_device_config_2(struct virtio_softc *sc, int index, uint16_t value) 301{ 302 bus_space_tag_t iot = sc->sc_devcfg_iot; 303 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 304 305 if (BYTE_ORDER != sc->sc_bus_endian) 306 value = bswap16(value); 307 bus_space_write_2(iot, ioh, index, value); 308} 309 310void 311virtio_write_device_config_4(struct virtio_softc *sc, int index, uint32_t value) 312{ 313 bus_space_tag_t iot = sc->sc_devcfg_iot; 314 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 315 316 if (BYTE_ORDER != sc->sc_bus_endian) 317 value = bswap32(value); 318 bus_space_write_4(iot, ioh, index, value); 319} 320 321/* 322 * The Virtio spec explicitly tells that reading and writing 8 bytes are not 323 * considered atomic and no triggers may be connected to reading or writing 324 * it. We access it using two 32 bit writes. For good measure it is stated to 325 * always write lsb first just in case of a hypervisor bug. See See virtio 326 * spec 4.1.3.1. 327 */ 328void 329virtio_write_device_config_8(struct virtio_softc *sc, int index, uint64_t value) 330{ 331 bus_space_tag_t iot = sc->sc_devcfg_iot; 332 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 333 union { 334 uint64_t u64; 335 uint32_t l[2]; 336 } v; 337 338 if (BYTE_ORDER != sc->sc_struct_endian) 339 value = bswap64(value); 340 341 v.u64 = value; 342 if (sc->sc_bus_endian != sc->sc_struct_endian) { 343 v.l[0] = bswap32(v.l[0]); 344 v.l[1] = bswap32(v.l[1]); 345 } 346 347 if (sc->sc_struct_endian == LITTLE_ENDIAN) { 348 bus_space_write_4(iot, ioh, index, v.l[0]); 349 bus_space_write_4(iot, ioh, index + 4, v.l[1]); 350 } else { 351 bus_space_write_4(iot, ioh, index + 4, v.l[1]); 352 bus_space_write_4(iot, ioh, index, v.l[0]); 353 } 354} 355 356/* 357 * In the older virtio spec, device config registers are host endian. On newer 358 * they are little endian. Some newer devices however explicitly specify their 359 * register to always be little endian. These functions cater for these. 360 */ 361void 362virtio_write_device_config_le_2(struct virtio_softc *sc, int index, uint16_t value) 363{ 364 bus_space_tag_t iot = sc->sc_devcfg_iot; 365 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 366 367 if (sc->sc_bus_endian != LITTLE_ENDIAN) 368 value = bswap16(value); 369 bus_space_write_2(iot, ioh, index, value); 370} 371 372void 373virtio_write_device_config_le_4(struct virtio_softc *sc, int index, uint32_t value) 374{ 375 bus_space_tag_t iot = sc->sc_devcfg_iot; 376 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 377 378 if (sc->sc_bus_endian != LITTLE_ENDIAN) 379 value = bswap32(value); 380 bus_space_write_4(iot, ioh, index, value); 381} 382 383 384/* 385 * data structures endian helpers 386 */ 387uint16_t virtio_rw16(struct virtio_softc *sc, uint16_t val) 388{ 389 KASSERT(sc); 390 return BYTE_ORDER != sc->sc_struct_endian ? bswap16(val) : val; 391} 392 393uint32_t virtio_rw32(struct virtio_softc *sc, uint32_t val) 394{ 395 KASSERT(sc); 396 return BYTE_ORDER != sc->sc_struct_endian ? bswap32(val) : val; 397} 398 399uint64_t virtio_rw64(struct virtio_softc *sc, uint64_t val) 400{ 401 KASSERT(sc); 402 return BYTE_ORDER != sc->sc_struct_endian ? bswap64(val) : val; 403} 404 405 406/* 407 * Interrupt handler. 408 */ 409static void 410virtio_soft_intr(void *arg) 411{ 412 struct virtio_softc *sc = arg; 413 414 KASSERT(sc->sc_intrhand != NULL); 415 416 (*sc->sc_intrhand)(sc); 417} 418 419/* 420 * dmamap sync operations for a virtqueue. 421 */ 422static inline void 423vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops) 424{ 425 /* availoffset == sizeof(vring_desc)*vq_num */ 426 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset, 427 ops); 428} 429 430static inline void 431vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops) 432{ 433 uint16_t hdrlen = offsetof(struct vring_avail, ring); 434 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) 435 hdrlen += sizeof(uint16_t); 436 437 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 438 vq->vq_availoffset, 439 hdrlen + sc->sc_nvqs * sizeof(uint16_t), 440 ops); 441} 442 443static inline void 444vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops) 445{ 446 uint16_t hdrlen = offsetof(struct vring_used, ring); 447 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) 448 hdrlen += sizeof(uint16_t); 449 450 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 451 vq->vq_usedoffset, 452 hdrlen + sc->sc_nvqs * sizeof(struct vring_used_elem), 453 ops); 454} 455 456static inline void 457vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot, 458 int ops) 459{ 460 int offset = vq->vq_indirectoffset 461 + sizeof(struct vring_desc) * vq->vq_maxnsegs * slot; 462 463 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 464 offset, sizeof(struct vring_desc) * vq->vq_maxnsegs, 465 ops); 466} 467 468bool 469virtio_vq_is_enqueued(struct virtio_softc *sc, struct virtqueue *vq) 470{ 471 472 if (vq->vq_queued) { 473 vq->vq_queued = 0; 474 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE); 475 } 476 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD); 477 membar_consumer(); 478 479 return (vq->vq_used_idx != virtio_rw16(sc, vq->vq_used->idx)) ? 1 : 0; 480} 481 482/* 483 * Scan vq, bus_dmamap_sync for the vqs (not for the payload), 484 * and calls (*vq_done)() if some entries are consumed. 485 * 486 * Can be used as sc_intrhand. 487 */ 488int 489virtio_vq_intr(struct virtio_softc *sc) 490{ 491 struct virtqueue *vq; 492 int i, r = 0; 493 494 for (i = 0; i < sc->sc_nvqs; i++) { 495 vq = &sc->sc_vqs[i]; 496 if (virtio_vq_is_enqueued(sc, vq) == 1) { 497 if (vq->vq_done) 498 r |= (*vq->vq_done)(vq); 499 } 500 } 501 502 return r; 503} 504 505int 506virtio_vq_intrhand(struct virtio_softc *sc) 507{ 508 struct virtqueue *vq; 509 int i, r = 0; 510 511 for (i = 0; i < sc->sc_nvqs; i++) { 512 vq = &sc->sc_vqs[i]; 513 r |= (*vq->vq_intrhand)(vq->vq_intrhand_arg); 514 } 515 516 return r; 517} 518 519 520/* 521 * Increase the event index in order to delay interrupts. 522 */ 523int 524virtio_postpone_intr(struct virtio_softc *sc, struct virtqueue *vq, 525 uint16_t nslots) 526{ 527 uint16_t idx, nused; 528 529 idx = vq->vq_used_idx + nslots; 530 531 /* set the new event index: avail_ring->used_event = idx */ 532 *vq->vq_used_event = virtio_rw16(sc, idx); 533 membar_producer(); 534 535 vq_sync_aring(vq->vq_owner, vq, BUS_DMASYNC_PREWRITE); 536 vq->vq_queued++; 537 538 nused = (uint16_t) 539 (virtio_rw16(sc, vq->vq_used->idx) - vq->vq_used_idx); 540 KASSERT(nused <= vq->vq_num); 541 542 return nslots < nused; 543} 544 545/* 546 * Postpone interrupt until 3/4 of the available descriptors have been 547 * consumed. 548 */ 549int 550virtio_postpone_intr_smart(struct virtio_softc *sc, struct virtqueue *vq) 551{ 552 uint16_t nslots; 553 554 nslots = (uint16_t) 555 (virtio_rw16(sc, vq->vq_avail->idx) - vq->vq_used_idx) * 3 / 4; 556 557 return virtio_postpone_intr(sc, vq, nslots); 558} 559 560/* 561 * Postpone interrupt until all of the available descriptors have been 562 * consumed. 563 */ 564int 565virtio_postpone_intr_far(struct virtio_softc *sc, struct virtqueue *vq) 566{ 567 uint16_t nslots; 568 569 nslots = (uint16_t) 570 (virtio_rw16(sc, vq->vq_avail->idx) - vq->vq_used_idx); 571 572 return virtio_postpone_intr(sc, vq, nslots); 573} 574 575/* 576 * Start/stop vq interrupt. No guarantee. 577 */ 578void 579virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq) 580{ 581 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) { 582 /* 583 * No way to disable the interrupt completely with 584 * RingEventIdx. Instead advance used_event by half the 585 * possible value. This won't happen soon and is far enough in 586 * the past to not trigger a spurios interrupt. 587 */ 588 *vq->vq_used_event = virtio_rw16(sc, vq->vq_used_idx + 0x8000); 589 } else { 590 vq->vq_avail->flags |= virtio_rw16(sc, VRING_AVAIL_F_NO_INTERRUPT); 591 } 592 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 593 vq->vq_queued++; 594} 595 596int 597virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq) 598{ 599 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) { 600 /* 601 * If event index feature is negotiated, enabling interrupts 602 * is done through setting the latest consumed index in the 603 * used_event field 604 */ 605 *vq->vq_used_event = virtio_rw16(sc, vq->vq_used_idx); 606 } else { 607 vq->vq_avail->flags &= ~virtio_rw16(sc, VRING_AVAIL_F_NO_INTERRUPT); 608 } 609 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 610 vq->vq_queued++; 611 612 return vq->vq_used_idx != virtio_rw16(sc, vq->vq_used->idx); 613} 614 615/* 616 * Initialize vq structure. 617 */ 618static void 619virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq, 620 const bool reinit) 621{ 622 int i, j; 623 int vq_size = vq->vq_num; 624 625 memset(vq->vq_vaddr, 0, vq->vq_bytesize); 626 627 /* build the indirect descriptor chain */ 628 if (vq->vq_indirect != NULL) { 629 struct vring_desc *vd; 630 631 for (i = 0; i < vq_size; i++) { 632 vd = vq->vq_indirect; 633 vd += vq->vq_maxnsegs * i; 634 for (j = 0; j < vq->vq_maxnsegs-1; j++) { 635 vd[j].next = virtio_rw16(sc, j + 1); 636 } 637 } 638 } 639 640 /* free slot management */ 641 SIMPLEQ_INIT(&vq->vq_freelist); 642 for (i = 0; i < vq_size; i++) { 643 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, 644 &vq->vq_entries[i], qe_list); 645 vq->vq_entries[i].qe_index = i; 646 } 647 if (!reinit) 648 mutex_init(&vq->vq_freelist_lock, MUTEX_SPIN, sc->sc_ipl); 649 650 /* enqueue/dequeue status */ 651 vq->vq_avail_idx = 0; 652 vq->vq_used_idx = 0; 653 vq->vq_queued = 0; 654 if (!reinit) { 655 mutex_init(&vq->vq_aring_lock, MUTEX_SPIN, sc->sc_ipl); 656 mutex_init(&vq->vq_uring_lock, MUTEX_SPIN, sc->sc_ipl); 657 } 658 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 659 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD); 660 vq->vq_queued++; 661} 662 663/* 664 * Allocate/free a vq. 665 */ 666int 667virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq, int index, 668 int maxsegsize, int maxnsegs, const char *name) 669{ 670 int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0; 671 int rsegs, r, hdrlen; 672#define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1))& \ 673 ~(VIRTIO_PAGE_SIZE-1)) 674 675 /* Make sure callers allocate vqs in order */ 676 KASSERT(sc->sc_nvqs == index); 677 678 memset(vq, 0, sizeof(*vq)); 679 680 vq_size = sc->sc_ops->read_queue_size(sc, index); 681 if (vq_size == 0) { 682 aprint_error_dev(sc->sc_dev, 683 "virtqueue not exist, index %d for %s\n", 684 index, name); 685 goto err; 686 } 687 688 hdrlen = sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX ? 3 : 2; 689 690 /* allocsize1: descriptor table + avail ring + pad */ 691 allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc)*vq_size 692 + sizeof(uint16_t)*(hdrlen + vq_size)); 693 /* allocsize2: used ring + pad */ 694 allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t) * hdrlen 695 + sizeof(struct vring_used_elem)*vq_size); 696 /* allocsize3: indirect table */ 697 if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT) 698 allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size; 699 else 700 allocsize3 = 0; 701 allocsize = allocsize1 + allocsize2 + allocsize3; 702 703 /* alloc and map the memory */ 704 r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0, 705 &vq->vq_segs[0], 1, &rsegs, BUS_DMA_WAITOK); 706 if (r != 0) { 707 aprint_error_dev(sc->sc_dev, 708 "virtqueue %d for %s allocation failed, " 709 "error code %d\n", index, name, r); 710 goto err; 711 } 712 r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], rsegs, allocsize, 713 &vq->vq_vaddr, BUS_DMA_WAITOK); 714 if (r != 0) { 715 aprint_error_dev(sc->sc_dev, 716 "virtqueue %d for %s map failed, " 717 "error code %d\n", index, name, r); 718 goto err; 719 } 720 r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0, 721 BUS_DMA_WAITOK, &vq->vq_dmamap); 722 if (r != 0) { 723 aprint_error_dev(sc->sc_dev, 724 "virtqueue %d for %s dmamap creation failed, " 725 "error code %d\n", index, name, r); 726 goto err; 727 } 728 r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap, 729 vq->vq_vaddr, allocsize, NULL, BUS_DMA_WAITOK); 730 if (r != 0) { 731 aprint_error_dev(sc->sc_dev, 732 "virtqueue %d for %s dmamap load failed, " 733 "error code %d\n", index, name, r); 734 goto err; 735 } 736 737 /* remember addresses and offsets for later use */ 738 vq->vq_owner = sc; 739 vq->vq_num = vq_size; 740 vq->vq_index = index; 741 vq->vq_desc = vq->vq_vaddr; 742 vq->vq_availoffset = sizeof(struct vring_desc)*vq_size; 743 vq->vq_avail = (void*)(((char*)vq->vq_desc) + vq->vq_availoffset); 744 vq->vq_used_event = (uint16_t *) ((char *)vq->vq_avail + 745 offsetof(struct vring_avail, ring[vq->vq_num])); 746 vq->vq_usedoffset = allocsize1; 747 vq->vq_used = (void*)(((char*)vq->vq_desc) + vq->vq_usedoffset); 748 vq->vq_avail_event = (uint16_t *)((char *)vq->vq_used + 749 offsetof(struct vring_used, ring[vq->vq_num])); 750 751 if (allocsize3 > 0) { 752 vq->vq_indirectoffset = allocsize1 + allocsize2; 753 vq->vq_indirect = (void*)(((char*)vq->vq_desc) 754 + vq->vq_indirectoffset); 755 } 756 vq->vq_bytesize = allocsize; 757 vq->vq_maxsegsize = maxsegsize; 758 vq->vq_maxnsegs = maxnsegs; 759 760 /* free slot management */ 761 vq->vq_entries = kmem_zalloc(sizeof(struct vq_entry)*vq_size, 762 KM_SLEEP); 763 virtio_init_vq(sc, vq, false); 764 765 /* set the vq address */ 766 sc->sc_ops->setup_queue(sc, index, 767 vq->vq_dmamap->dm_segs[0].ds_addr); 768 769 aprint_verbose_dev(sc->sc_dev, 770 "allocated %u byte for virtqueue %d for %s, " 771 "size %d\n", allocsize, index, name, vq_size); 772 if (allocsize3 > 0) 773 aprint_verbose_dev(sc->sc_dev, 774 "using %d byte (%d entries) " 775 "indirect descriptors\n", 776 allocsize3, maxnsegs * vq_size); 777 778 sc->sc_nvqs++; 779 780 return 0; 781 782err: 783 sc->sc_ops->setup_queue(sc, index, 0); 784 if (vq->vq_dmamap) 785 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap); 786 if (vq->vq_vaddr) 787 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize); 788 if (vq->vq_segs[0].ds_addr) 789 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1); 790 memset(vq, 0, sizeof(*vq)); 791 792 return -1; 793} 794 795int 796virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq) 797{ 798 struct vq_entry *qe; 799 int i = 0; 800 801 /* device must be already deactivated */ 802 /* confirm the vq is empty */ 803 SIMPLEQ_FOREACH(qe, &vq->vq_freelist, qe_list) { 804 i++; 805 } 806 if (i != vq->vq_num) { 807 printf("%s: freeing non-empty vq, index %d\n", 808 device_xname(sc->sc_dev), vq->vq_index); 809 return EBUSY; 810 } 811 812 /* tell device that there's no virtqueue any longer */ 813 sc->sc_ops->setup_queue(sc, vq->vq_index, 0); 814 815 kmem_free(vq->vq_entries, sizeof(*vq->vq_entries) * vq->vq_num); 816 bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap); 817 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap); 818 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize); 819 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1); 820 mutex_destroy(&vq->vq_freelist_lock); 821 mutex_destroy(&vq->vq_uring_lock); 822 mutex_destroy(&vq->vq_aring_lock); 823 memset(vq, 0, sizeof(*vq)); 824 825 sc->sc_nvqs--; 826 827 return 0; 828} 829 830/* 831 * Free descriptor management. 832 */ 833static struct vq_entry * 834vq_alloc_entry(struct virtqueue *vq) 835{ 836 struct vq_entry *qe; 837 838 mutex_enter(&vq->vq_freelist_lock); 839 if (SIMPLEQ_EMPTY(&vq->vq_freelist)) { 840 mutex_exit(&vq->vq_freelist_lock); 841 return NULL; 842 } 843 qe = SIMPLEQ_FIRST(&vq->vq_freelist); 844 SIMPLEQ_REMOVE_HEAD(&vq->vq_freelist, qe_list); 845 mutex_exit(&vq->vq_freelist_lock); 846 847 return qe; 848} 849 850static void 851vq_free_entry(struct virtqueue *vq, struct vq_entry *qe) 852{ 853 mutex_enter(&vq->vq_freelist_lock); 854 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, qe, qe_list); 855 mutex_exit(&vq->vq_freelist_lock); 856 857 return; 858} 859 860/* 861 * Enqueue several dmamaps as a single request. 862 */ 863/* 864 * Typical usage: 865 * <queue size> number of followings are stored in arrays 866 * - command blocks (in dmamem) should be pre-allocated and mapped 867 * - dmamaps for command blocks should be pre-allocated and loaded 868 * - dmamaps for payload should be pre-allocated 869 * r = virtio_enqueue_prep(sc, vq, &slot); // allocate a slot 870 * if (r) // currently 0 or EAGAIN 871 * return r; 872 * r = bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..); 873 * if (r) { 874 * virtio_enqueue_abort(sc, vq, slot); 875 * return r; 876 * } 877 * r = virtio_enqueue_reserve(sc, vq, slot, 878 * dmamap_payload[slot]->dm_nsegs+1); 879 * // ^ +1 for command 880 * if (r) { // currently 0 or EAGAIN 881 * bus_dmamap_unload(dmat, dmamap_payload[slot]); 882 * return r; // do not call abort() 883 * } 884 * <setup and prepare commands> 885 * bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE); 886 * bus_dmamap_sync(dmat, dmamap_payload[slot],...); 887 * virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], false); 888 * virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite); 889 * virtio_enqueue_commit(sc, vq, slot, true); 890 */ 891 892/* 893 * enqueue_prep: allocate a slot number 894 */ 895int 896virtio_enqueue_prep(struct virtio_softc *sc, struct virtqueue *vq, int *slotp) 897{ 898 struct vq_entry *qe1; 899 900 KASSERT(slotp != NULL); 901 902 qe1 = vq_alloc_entry(vq); 903 if (qe1 == NULL) 904 return EAGAIN; 905 /* next slot is not allocated yet */ 906 qe1->qe_next = -1; 907 *slotp = qe1->qe_index; 908 909 return 0; 910} 911 912/* 913 * enqueue_reserve: allocate remaining slots and build the descriptor chain. 914 */ 915int 916virtio_enqueue_reserve(struct virtio_softc *sc, struct virtqueue *vq, 917 int slot, int nsegs) 918{ 919 int indirect; 920 struct vq_entry *qe1 = &vq->vq_entries[slot]; 921 922 KASSERT(qe1->qe_next == -1); 923 KASSERT(1 <= nsegs && nsegs <= vq->vq_num); 924 925 if ((vq->vq_indirect != NULL) && 926 (nsegs >= MINSEG_INDIRECT) && 927 (nsegs <= vq->vq_maxnsegs)) 928 indirect = 1; 929 else 930 indirect = 0; 931 qe1->qe_indirect = indirect; 932 933 if (indirect) { 934 struct vring_desc *vd; 935 uint64_t addr; 936 int i; 937 938 vd = &vq->vq_desc[qe1->qe_index]; 939 addr = vq->vq_dmamap->dm_segs[0].ds_addr 940 + vq->vq_indirectoffset; 941 addr += sizeof(struct vring_desc) 942 * vq->vq_maxnsegs * qe1->qe_index; 943 vd->addr = virtio_rw64(sc, addr); 944 vd->len = virtio_rw32(sc, sizeof(struct vring_desc) * nsegs); 945 vd->flags = virtio_rw16(sc, VRING_DESC_F_INDIRECT); 946 947 vd = vq->vq_indirect; 948 vd += vq->vq_maxnsegs * qe1->qe_index; 949 qe1->qe_desc_base = vd; 950 951 for (i = 0; i < nsegs-1; i++) { 952 vd[i].flags = virtio_rw16(sc, VRING_DESC_F_NEXT); 953 } 954 vd[i].flags = virtio_rw16(sc, 0); 955 qe1->qe_next = 0; 956 957 return 0; 958 } else { 959 struct vring_desc *vd; 960 struct vq_entry *qe; 961 int i, s; 962 963 vd = &vq->vq_desc[0]; 964 qe1->qe_desc_base = vd; 965 qe1->qe_next = qe1->qe_index; 966 s = slot; 967 for (i = 0; i < nsegs - 1; i++) { 968 qe = vq_alloc_entry(vq); 969 if (qe == NULL) { 970 vd[s].flags = virtio_rw16(sc, 0); 971 virtio_enqueue_abort(sc, vq, slot); 972 return EAGAIN; 973 } 974 vd[s].flags = virtio_rw16(sc, VRING_DESC_F_NEXT); 975 vd[s].next = virtio_rw16(sc, qe->qe_index); 976 s = qe->qe_index; 977 } 978 vd[s].flags = virtio_rw16(sc, 0); 979 980 return 0; 981 } 982} 983 984/* 985 * enqueue: enqueue a single dmamap. 986 */ 987int 988virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot, 989 bus_dmamap_t dmamap, bool write) 990{ 991 struct vq_entry *qe1 = &vq->vq_entries[slot]; 992 struct vring_desc *vd = qe1->qe_desc_base; 993 int i; 994 int s = qe1->qe_next; 995 996 KASSERT(s >= 0); 997 KASSERT(dmamap->dm_nsegs > 0); 998 999 for (i = 0; i < dmamap->dm_nsegs; i++) { 1000 vd[s].addr = virtio_rw64(sc, dmamap->dm_segs[i].ds_addr); 1001 vd[s].len = virtio_rw32(sc, dmamap->dm_segs[i].ds_len); 1002 if (!write) 1003 vd[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE); 1004 s = virtio_rw16(sc, vd[s].next); 1005 } 1006 qe1->qe_next = s; 1007 1008 return 0; 1009} 1010 1011int 1012virtio_enqueue_p(struct virtio_softc *sc, struct virtqueue *vq, int slot, 1013 bus_dmamap_t dmamap, bus_addr_t start, bus_size_t len, 1014 bool write) 1015{ 1016 struct vq_entry *qe1 = &vq->vq_entries[slot]; 1017 struct vring_desc *vd = qe1->qe_desc_base; 1018 int s = qe1->qe_next; 1019 1020 KASSERT(s >= 0); 1021 KASSERT(dmamap->dm_nsegs == 1); /* XXX */ 1022 KASSERT((dmamap->dm_segs[0].ds_len > start) && 1023 (dmamap->dm_segs[0].ds_len >= start + len)); 1024 1025 vd[s].addr = virtio_rw64(sc, dmamap->dm_segs[0].ds_addr + start); 1026 vd[s].len = virtio_rw32(sc, len); 1027 if (!write) 1028 vd[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE); 1029 qe1->qe_next = virtio_rw16(sc, vd[s].next); 1030 1031 return 0; 1032} 1033 1034/* 1035 * enqueue_commit: add it to the aring. 1036 */ 1037int 1038virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot, 1039 bool notifynow) 1040{ 1041 struct vq_entry *qe1; 1042 1043 if (slot < 0) { 1044 mutex_enter(&vq->vq_aring_lock); 1045 goto notify; 1046 } 1047 vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE); 1048 qe1 = &vq->vq_entries[slot]; 1049 if (qe1->qe_indirect) 1050 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE); 1051 mutex_enter(&vq->vq_aring_lock); 1052 vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] = 1053 virtio_rw16(sc, slot); 1054 1055notify: 1056 if (notifynow) { 1057 uint16_t o, n, t; 1058 uint16_t flags; 1059 o = virtio_rw16(sc, vq->vq_avail->idx); 1060 n = vq->vq_avail_idx; 1061 1062 /* publish avail idx */ 1063 membar_producer(); 1064 vq->vq_avail->idx = virtio_rw16(sc, vq->vq_avail_idx); 1065 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 1066 vq->vq_queued++; 1067 1068 membar_consumer(); 1069 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD); 1070 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) { 1071 t = virtio_rw16(sc, *vq->vq_avail_event) + 1; 1072 if ((uint16_t) (n - t) < (uint16_t) (n - o)) 1073 sc->sc_ops->kick(sc, vq->vq_index); 1074 } else { 1075 flags = virtio_rw16(sc, vq->vq_used->flags); 1076 if (!(flags & VRING_USED_F_NO_NOTIFY)) 1077 sc->sc_ops->kick(sc, vq->vq_index); 1078 } 1079 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD); 1080 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE); 1081 } 1082 mutex_exit(&vq->vq_aring_lock); 1083 1084 return 0; 1085} 1086 1087/* 1088 * enqueue_abort: rollback. 1089 */ 1090int 1091virtio_enqueue_abort(struct virtio_softc *sc, struct virtqueue *vq, int slot) 1092{ 1093 struct vq_entry *qe = &vq->vq_entries[slot]; 1094 struct vring_desc *vd; 1095 int s; 1096 1097 if (qe->qe_next < 0) { 1098 vq_free_entry(vq, qe); 1099 return 0; 1100 } 1101 1102 s = slot; 1103 vd = &vq->vq_desc[0]; 1104 while (virtio_rw16(sc, vd[s].flags) & VRING_DESC_F_NEXT) { 1105 s = virtio_rw16(sc, vd[s].next); 1106 vq_free_entry(vq, qe); 1107 qe = &vq->vq_entries[s]; 1108 } 1109 vq_free_entry(vq, qe); 1110 return 0; 1111} 1112 1113/* 1114 * Dequeue a request. 1115 */ 1116/* 1117 * dequeue: dequeue a request from uring; dmamap_sync for uring is 1118 * already done in the interrupt handler. 1119 */ 1120int 1121virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq, 1122 int *slotp, int *lenp) 1123{ 1124 uint16_t slot, usedidx; 1125 struct vq_entry *qe; 1126 1127 if (vq->vq_used_idx == virtio_rw16(sc, vq->vq_used->idx)) 1128 return ENOENT; 1129 mutex_enter(&vq->vq_uring_lock); 1130 usedidx = vq->vq_used_idx++; 1131 mutex_exit(&vq->vq_uring_lock); 1132 usedidx %= vq->vq_num; 1133 slot = virtio_rw32(sc, vq->vq_used->ring[usedidx].id); 1134 qe = &vq->vq_entries[slot]; 1135 1136 if (qe->qe_indirect) 1137 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE); 1138 1139 if (slotp) 1140 *slotp = slot; 1141 if (lenp) 1142 *lenp = virtio_rw32(sc, vq->vq_used->ring[usedidx].len); 1143 1144 return 0; 1145} 1146 1147/* 1148 * dequeue_commit: complete dequeue; the slot is recycled for future use. 1149 * if you forget to call this the slot will be leaked. 1150 */ 1151int 1152virtio_dequeue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot) 1153{ 1154 struct vq_entry *qe = &vq->vq_entries[slot]; 1155 struct vring_desc *vd = &vq->vq_desc[0]; 1156 int s = slot; 1157 1158 while (virtio_rw16(sc, vd[s].flags) & VRING_DESC_F_NEXT) { 1159 s = virtio_rw16(sc, vd[s].next); 1160 vq_free_entry(vq, qe); 1161 qe = &vq->vq_entries[s]; 1162 } 1163 vq_free_entry(vq, qe); 1164 1165 return 0; 1166} 1167 1168/* 1169 * Attach a child, fill all the members. 1170 */ 1171void 1172virtio_child_attach_start(struct virtio_softc *sc, device_t child, int ipl, 1173 struct virtqueue *vqs, 1174 virtio_callback config_change, 1175 virtio_callback intr_hand, 1176 int req_flags, int req_features, const char *feat_bits) 1177{ 1178 char buf[1024]; 1179 1180 sc->sc_child = child; 1181 sc->sc_ipl = ipl; 1182 sc->sc_vqs = vqs; 1183 sc->sc_config_change = config_change; 1184 sc->sc_intrhand = intr_hand; 1185 sc->sc_flags = req_flags; 1186 1187 virtio_negotiate_features(sc, req_features); 1188 snprintb(buf, sizeof(buf), feat_bits, sc->sc_active_features); 1189 aprint_normal(": features: %s\n", buf); 1190 aprint_naive("\n"); 1191} 1192 1193void 1194virtio_child_attach_set_vqs(struct virtio_softc *sc, 1195 struct virtqueue *vqs, int nvq_pairs) 1196{ 1197 1198 KASSERT(nvq_pairs == 1 || 1199 (sc->sc_flags & VIRTIO_F_INTR_SOFTINT) == 0); 1200 if (nvq_pairs > 1) 1201 sc->sc_child_mq = true; 1202 1203 sc->sc_vqs = vqs; 1204} 1205 1206int 1207virtio_child_attach_finish(struct virtio_softc *sc) 1208{ 1209 int r; 1210 1211 sc->sc_finished_called = true; 1212 r = sc->sc_ops->alloc_interrupts(sc); 1213 if (r != 0) { 1214 aprint_error_dev(sc->sc_dev, "failed to allocate interrupts\n"); 1215 goto fail; 1216 } 1217 1218 r = sc->sc_ops->setup_interrupts(sc, 0); 1219 if (r != 0) { 1220 aprint_error_dev(sc->sc_dev, "failed to setup interrupts\n"); 1221 goto fail; 1222 } 1223 1224 KASSERT(sc->sc_soft_ih == NULL); 1225 if (sc->sc_flags & VIRTIO_F_INTR_SOFTINT) { 1226 u_int flags = SOFTINT_NET; 1227 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) 1228 flags |= SOFTINT_MPSAFE; 1229 1230 sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, sc); 1231 if (sc->sc_soft_ih == NULL) { 1232 sc->sc_ops->free_interrupts(sc); 1233 aprint_error_dev(sc->sc_dev, 1234 "failed to establish soft interrupt\n"); 1235 goto fail; 1236 } 1237 } 1238 1239 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK); 1240 return 0; 1241 1242fail: 1243 if (sc->sc_soft_ih) { 1244 softint_disestablish(sc->sc_soft_ih); 1245 sc->sc_soft_ih = NULL; 1246 } 1247 1248 sc->sc_ops->free_interrupts(sc); 1249 1250 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 1251 return 1; 1252} 1253 1254void 1255virtio_child_detach(struct virtio_softc *sc) 1256{ 1257 sc->sc_child = NULL; 1258 sc->sc_vqs = NULL; 1259 1260 virtio_device_reset(sc); 1261 1262 sc->sc_ops->free_interrupts(sc); 1263 1264 if (sc->sc_soft_ih) { 1265 softint_disestablish(sc->sc_soft_ih); 1266 sc->sc_soft_ih = NULL; 1267 } 1268} 1269 1270void 1271virtio_child_attach_failed(struct virtio_softc *sc) 1272{ 1273 virtio_child_detach(sc); 1274 1275 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 1276 1277 sc->sc_child = VIRTIO_CHILD_FAILED; 1278} 1279 1280bus_dma_tag_t 1281virtio_dmat(struct virtio_softc *sc) 1282{ 1283 return sc->sc_dmat; 1284} 1285 1286device_t 1287virtio_child(struct virtio_softc *sc) 1288{ 1289 return sc->sc_child; 1290} 1291 1292int 1293virtio_intrhand(struct virtio_softc *sc) 1294{ 1295 return (*sc->sc_intrhand)(sc); 1296} 1297 1298uint64_t 1299virtio_features(struct virtio_softc *sc) 1300{ 1301 return sc->sc_active_features; 1302} 1303 1304int 1305virtio_attach_failed(struct virtio_softc *sc) 1306{ 1307 device_t self = sc->sc_dev; 1308 1309 /* no error if its not connected, but its failed */ 1310 if (sc->sc_childdevid == 0) 1311 return 1; 1312 1313 if (sc->sc_child == NULL) { 1314 aprint_error_dev(self, 1315 "no matching child driver; not configured\n"); 1316 return 1; 1317 } 1318 1319 if (sc->sc_child == VIRTIO_CHILD_FAILED) { 1320 aprint_error_dev(self, "virtio configuration failed\n"); 1321 return 1; 1322 } 1323 1324 /* sanity check */ 1325 if (!sc->sc_finished_called) { 1326 aprint_error_dev(self, "virtio internal error, child driver " 1327 "signaled OK but didn't initialize interrupts\n"); 1328 return 1; 1329 } 1330 1331 return 0; 1332} 1333 1334void 1335virtio_print_device_type(device_t self, int id, int revision) 1336{ 1337 aprint_normal_dev(self, "%s device (rev. 0x%02x)\n", 1338 (id < NDEVNAMES ? virtio_device_name[id] : "Unknown"), 1339 revision); 1340} 1341 1342 1343MODULE(MODULE_CLASS_DRIVER, virtio, NULL); 1344 1345#ifdef _MODULE 1346#include "ioconf.c" 1347#endif 1348 1349static int 1350virtio_modcmd(modcmd_t cmd, void *opaque) 1351{ 1352 int error = 0; 1353 1354#ifdef _MODULE 1355 switch (cmd) { 1356 case MODULE_CMD_INIT: 1357 error = config_init_component(cfdriver_ioconf_virtio, 1358 cfattach_ioconf_virtio, cfdata_ioconf_virtio); 1359 break; 1360 case MODULE_CMD_FINI: 1361 error = config_fini_component(cfdriver_ioconf_virtio, 1362 cfattach_ioconf_virtio, cfdata_ioconf_virtio); 1363 break; 1364 default: 1365 error = ENOTTY; 1366 break; 1367 } 1368#endif 1369 1370 return error; 1371} 1372