blkfront.c revision 231743
1/* 2 * XenBSD block device driver 3 * 4 * Copyright (c) 2009 Scott Long, Yahoo! 5 * Copyright (c) 2009 Frank Suchomel, Citrix 6 * Copyright (c) 2009 Doug F. Rabson, Citrix 7 * Copyright (c) 2005 Kip Macy 8 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand 9 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge 10 * 11 * 12 * Permission is hereby granted, free of charge, to any person obtaining a copy 13 * of this software and associated documentation files (the "Software"), to 14 * deal in the Software without restriction, including without limitation the 15 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 16 * sell copies of the Software, and to permit persons to whom the Software is 17 * furnished to do so, subject to the following conditions: 18 * 19 * The above copyright notice and this permission notice shall be included in 20 * all copies or substantial portions of the Software. 21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 27 * DEALINGS IN THE SOFTWARE. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: head/sys/dev/xen/blkfront/blkfront.c 231743 2012-02-15 06:45:49Z gibbs $"); 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/malloc.h> 36#include <sys/kernel.h> 37#include <vm/vm.h> 38#include <vm/pmap.h> 39 40#include <sys/bio.h> 41#include <sys/bus.h> 42#include <sys/conf.h> 43#include <sys/module.h> 44#include <sys/sysctl.h> 45 46#include <machine/bus.h> 47#include <sys/rman.h> 48#include <machine/resource.h> 49#include <machine/intr_machdep.h> 50#include <machine/vmparam.h> 51#include <sys/bus_dma.h> 52 53#include <machine/_inttypes.h> 54#include <machine/xen/xen-os.h> 55#include <machine/xen/xenvar.h> 56#include <machine/xen/xenfunc.h> 57 58#include <xen/hypervisor.h> 59#include <xen/xen_intr.h> 60#include <xen/evtchn.h> 61#include <xen/gnttab.h> 62#include <xen/interface/grant_table.h> 63#include <xen/interface/io/protocols.h> 64#include <xen/xenbus/xenbusvar.h> 65 66#include <geom/geom_disk.h> 67 68#include <dev/xen/blkfront/block.h> 69 70#include "xenbus_if.h" 71 72/* prototypes */ 73static void xb_free_command(struct xb_command *cm); 74static void xb_startio(struct xb_softc *sc); 75static void blkfront_connect(struct xb_softc *); 76static void blkfront_closing(device_t); 77static int blkfront_detach(device_t); 78static int setup_blkring(struct xb_softc *); 79static void blkif_int(void *); 80static void blkfront_initialize(struct xb_softc *); 81static int blkif_completion(struct xb_command *); 82static void blkif_free(struct xb_softc *); 83static void blkif_queue_cb(void *, bus_dma_segment_t *, int, int); 84 85static MALLOC_DEFINE(M_XENBLOCKFRONT, "xbd", "Xen Block Front driver data"); 86 87#define GRANT_INVALID_REF 0 88 89/* Control whether runtime update of vbds is enabled. */ 90#define ENABLE_VBD_UPDATE 0 91 92#if ENABLE_VBD_UPDATE 93static void vbd_update(void); 94#endif 95 96#define BLKIF_STATE_DISCONNECTED 0 97#define BLKIF_STATE_CONNECTED 1 98#define BLKIF_STATE_SUSPENDED 2 99 100#ifdef notyet 101static char *blkif_state_name[] = { 102 [BLKIF_STATE_DISCONNECTED] = "disconnected", 103 [BLKIF_STATE_CONNECTED] = "connected", 104 [BLKIF_STATE_SUSPENDED] = "closed", 105}; 106 107static char * blkif_status_name[] = { 108 [BLKIF_INTERFACE_STATUS_CLOSED] = "closed", 109 [BLKIF_INTERFACE_STATUS_DISCONNECTED] = "disconnected", 110 [BLKIF_INTERFACE_STATUS_CONNECTED] = "connected", 111 [BLKIF_INTERFACE_STATUS_CHANGED] = "changed", 112}; 113#endif 114 115#if 0 116#define DPRINTK(fmt, args...) printf("[XEN] %s:%d: " fmt ".\n", __func__, __LINE__, ##args) 117#else 118#define DPRINTK(fmt, args...) 119#endif 120 121static int blkif_open(struct disk *dp); 122static int blkif_close(struct disk *dp); 123static int blkif_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td); 124static int blkif_queue_request(struct xb_softc *sc, struct xb_command *cm); 125static void xb_strategy(struct bio *bp); 126 127// In order to quiesce the device during kernel dumps, outstanding requests to 128// DOM0 for disk reads/writes need to be accounted for. 129static int xb_dump(void *, void *, vm_offset_t, off_t, size_t); 130 131/* XXX move to xb_vbd.c when VBD update support is added */ 132#define MAX_VBDS 64 133 134#define XBD_SECTOR_SIZE 512 /* XXX: assume for now */ 135#define XBD_SECTOR_SHFT 9 136 137/* 138 * Translate Linux major/minor to an appropriate name and unit 139 * number. For HVM guests, this allows us to use the same drive names 140 * with blkfront as the emulated drives, easing transition slightly. 141 */ 142static void 143blkfront_vdevice_to_unit(uint32_t vdevice, int *unit, const char **name) 144{ 145 static struct vdev_info { 146 int major; 147 int shift; 148 int base; 149 const char *name; 150 } info[] = { 151 {3, 6, 0, "ad"}, /* ide0 */ 152 {22, 6, 2, "ad"}, /* ide1 */ 153 {33, 6, 4, "ad"}, /* ide2 */ 154 {34, 6, 6, "ad"}, /* ide3 */ 155 {56, 6, 8, "ad"}, /* ide4 */ 156 {57, 6, 10, "ad"}, /* ide5 */ 157 {88, 6, 12, "ad"}, /* ide6 */ 158 {89, 6, 14, "ad"}, /* ide7 */ 159 {90, 6, 16, "ad"}, /* ide8 */ 160 {91, 6, 18, "ad"}, /* ide9 */ 161 162 {8, 4, 0, "da"}, /* scsi disk0 */ 163 {65, 4, 16, "da"}, /* scsi disk1 */ 164 {66, 4, 32, "da"}, /* scsi disk2 */ 165 {67, 4, 48, "da"}, /* scsi disk3 */ 166 {68, 4, 64, "da"}, /* scsi disk4 */ 167 {69, 4, 80, "da"}, /* scsi disk5 */ 168 {70, 4, 96, "da"}, /* scsi disk6 */ 169 {71, 4, 112, "da"}, /* scsi disk7 */ 170 {128, 4, 128, "da"}, /* scsi disk8 */ 171 {129, 4, 144, "da"}, /* scsi disk9 */ 172 {130, 4, 160, "da"}, /* scsi disk10 */ 173 {131, 4, 176, "da"}, /* scsi disk11 */ 174 {132, 4, 192, "da"}, /* scsi disk12 */ 175 {133, 4, 208, "da"}, /* scsi disk13 */ 176 {134, 4, 224, "da"}, /* scsi disk14 */ 177 {135, 4, 240, "da"}, /* scsi disk15 */ 178 179 {202, 4, 0, "xbd"}, /* xbd */ 180 181 {0, 0, 0, NULL}, 182 }; 183 int major = vdevice >> 8; 184 int minor = vdevice & 0xff; 185 int i; 186 187 if (vdevice & (1 << 28)) { 188 *unit = (vdevice & ((1 << 28) - 1)) >> 8; 189 *name = "xbd"; 190 return; 191 } 192 193 for (i = 0; info[i].major; i++) { 194 if (info[i].major == major) { 195 *unit = info[i].base + (minor >> info[i].shift); 196 *name = info[i].name; 197 return; 198 } 199 } 200 201 *unit = minor >> 4; 202 *name = "xbd"; 203} 204 205int 206xlvbd_add(struct xb_softc *sc, blkif_sector_t sectors, 207 int vdevice, uint16_t vdisk_info, unsigned long sector_size) 208{ 209 int unit, error = 0; 210 const char *name; 211 212 blkfront_vdevice_to_unit(vdevice, &unit, &name); 213 214 sc->xb_unit = unit; 215 216 if (strcmp(name, "xbd")) 217 device_printf(sc->xb_dev, "attaching as %s%d\n", name, unit); 218 219 sc->xb_disk = disk_alloc(); 220 sc->xb_disk->d_unit = sc->xb_unit; 221 sc->xb_disk->d_open = blkif_open; 222 sc->xb_disk->d_close = blkif_close; 223 sc->xb_disk->d_ioctl = blkif_ioctl; 224 sc->xb_disk->d_strategy = xb_strategy; 225 sc->xb_disk->d_dump = xb_dump; 226 sc->xb_disk->d_name = name; 227 sc->xb_disk->d_drv1 = sc; 228 sc->xb_disk->d_sectorsize = sector_size; 229 230 sc->xb_disk->d_mediasize = sectors * sector_size; 231 sc->xb_disk->d_maxsize = sc->max_request_size - PAGE_SIZE; 232 sc->xb_disk->d_flags = 0; 233 disk_create(sc->xb_disk, DISK_VERSION_00); 234 235 return error; 236} 237 238/************************ end VBD support *****************/ 239 240/* 241 * Read/write routine for a buffer. Finds the proper unit, place it on 242 * the sortq and kick the controller. 243 */ 244static void 245xb_strategy(struct bio *bp) 246{ 247 struct xb_softc *sc = (struct xb_softc *)bp->bio_disk->d_drv1; 248 249 /* bogus disk? */ 250 if (sc == NULL) { 251 bp->bio_error = EINVAL; 252 bp->bio_flags |= BIO_ERROR; 253 bp->bio_resid = bp->bio_bcount; 254 biodone(bp); 255 return; 256 } 257 258 /* 259 * Place it in the queue of disk activities for this disk 260 */ 261 mtx_lock(&sc->xb_io_lock); 262 263 xb_enqueue_bio(sc, bp); 264 xb_startio(sc); 265 266 mtx_unlock(&sc->xb_io_lock); 267 return; 268} 269 270static void 271xb_bio_complete(struct xb_softc *sc, struct xb_command *cm) 272{ 273 struct bio *bp; 274 275 bp = cm->bp; 276 277 if ( unlikely(cm->status != BLKIF_RSP_OKAY) ) { 278 disk_err(bp, "disk error" , -1, 0); 279 printf(" status: %x\n", cm->status); 280 bp->bio_flags |= BIO_ERROR; 281 } 282 283 if (bp->bio_flags & BIO_ERROR) 284 bp->bio_error = EIO; 285 else 286 bp->bio_resid = 0; 287 288 xb_free_command(cm); 289 biodone(bp); 290} 291 292// Quiesce the disk writes for a dump file before allowing the next buffer. 293static void 294xb_quiesce(struct xb_softc *sc) 295{ 296 int mtd; 297 298 // While there are outstanding requests 299 while (!TAILQ_EMPTY(&sc->cm_busy)) { 300 RING_FINAL_CHECK_FOR_RESPONSES(&sc->ring, mtd); 301 if (mtd) { 302 /* Recieved request completions, update queue. */ 303 blkif_int(sc); 304 } 305 if (!TAILQ_EMPTY(&sc->cm_busy)) { 306 /* 307 * Still pending requests, wait for the disk i/o 308 * to complete. 309 */ 310 HYPERVISOR_yield(); 311 } 312 } 313} 314 315/* Kernel dump function for a paravirtualized disk device */ 316static void 317xb_dump_complete(struct xb_command *cm) 318{ 319 320 xb_enqueue_complete(cm); 321} 322 323static int 324xb_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset, 325 size_t length) 326{ 327 struct disk *dp = arg; 328 struct xb_softc *sc = (struct xb_softc *) dp->d_drv1; 329 struct xb_command *cm; 330 size_t chunk; 331 int sbp; 332 int rc = 0; 333 334 if (length <= 0) 335 return (rc); 336 337 xb_quiesce(sc); /* All quiet on the western front. */ 338 339 /* 340 * If this lock is held, then this module is failing, and a 341 * successful kernel dump is highly unlikely anyway. 342 */ 343 mtx_lock(&sc->xb_io_lock); 344 345 /* Split the 64KB block as needed */ 346 for (sbp=0; length > 0; sbp++) { 347 cm = xb_dequeue_free(sc); 348 if (cm == NULL) { 349 mtx_unlock(&sc->xb_io_lock); 350 device_printf(sc->xb_dev, "dump: no more commands?\n"); 351 return (EBUSY); 352 } 353 354 if (gnttab_alloc_grant_references(sc->max_request_segments, 355 &cm->gref_head) != 0) { 356 xb_free_command(cm); 357 mtx_unlock(&sc->xb_io_lock); 358 device_printf(sc->xb_dev, "no more grant allocs?\n"); 359 return (EBUSY); 360 } 361 362 chunk = length > sc->max_request_size 363 ? sc->max_request_size : length; 364 cm->data = virtual; 365 cm->datalen = chunk; 366 cm->operation = BLKIF_OP_WRITE; 367 cm->sector_number = offset / dp->d_sectorsize; 368 cm->cm_complete = xb_dump_complete; 369 370 xb_enqueue_ready(cm); 371 372 length -= chunk; 373 offset += chunk; 374 virtual = (char *) virtual + chunk; 375 } 376 377 /* Tell DOM0 to do the I/O */ 378 xb_startio(sc); 379 mtx_unlock(&sc->xb_io_lock); 380 381 /* Poll for the completion. */ 382 xb_quiesce(sc); /* All quite on the eastern front */ 383 384 /* If there were any errors, bail out... */ 385 while ((cm = xb_dequeue_complete(sc)) != NULL) { 386 if (cm->status != BLKIF_RSP_OKAY) { 387 device_printf(sc->xb_dev, 388 "Dump I/O failed at sector %jd\n", 389 cm->sector_number); 390 rc = EIO; 391 } 392 xb_free_command(cm); 393 } 394 395 return (rc); 396} 397 398 399static int 400blkfront_probe(device_t dev) 401{ 402 403 if (!strcmp(xenbus_get_type(dev), "vbd")) { 404 device_set_desc(dev, "Virtual Block Device"); 405 device_quiet(dev); 406 return (0); 407 } 408 409 return (ENXIO); 410} 411 412static void 413xb_setup_sysctl(struct xb_softc *xb) 414{ 415 struct sysctl_ctx_list *sysctl_ctx = NULL; 416 struct sysctl_oid *sysctl_tree = NULL; 417 418 sysctl_ctx = device_get_sysctl_ctx(xb->xb_dev); 419 if (sysctl_ctx == NULL) 420 return; 421 422 sysctl_tree = device_get_sysctl_tree(xb->xb_dev); 423 if (sysctl_tree == NULL) 424 return; 425 426 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 427 "max_requests", CTLFLAG_RD, &xb->max_requests, -1, 428 "maximum outstanding requests (negotiated)"); 429 430 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 431 "max_request_segments", CTLFLAG_RD, 432 &xb->max_request_segments, 0, 433 "maximum number of pages per requests (negotiated)"); 434 435 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 436 "max_request_size", CTLFLAG_RD, 437 &xb->max_request_size, 0, 438 "maximum size in bytes of a request (negotiated)"); 439 440 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 441 "ring_pages", CTLFLAG_RD, 442 &xb->ring_pages, 0, 443 "communication channel pages (negotiated)"); 444} 445 446/* 447 * Setup supplies the backend dir, virtual device. We place an event 448 * channel and shared frame entries. We watch backend to wait if it's 449 * ok. 450 */ 451static int 452blkfront_attach(device_t dev) 453{ 454 struct xb_softc *sc; 455 const char *name; 456 uint32_t vdevice; 457 int error; 458 int i; 459 int unit; 460 461 /* FIXME: Use dynamic device id if this is not set. */ 462 error = xs_scanf(XST_NIL, xenbus_get_node(dev), 463 "virtual-device", NULL, "%" PRIu32, &vdevice); 464 if (error) { 465 xenbus_dev_fatal(dev, error, "reading virtual-device"); 466 device_printf(dev, "Couldn't determine virtual device.\n"); 467 return (error); 468 } 469 470 blkfront_vdevice_to_unit(vdevice, &unit, &name); 471 if (!strcmp(name, "xbd")) 472 device_set_unit(dev, unit); 473 474 sc = device_get_softc(dev); 475 mtx_init(&sc->xb_io_lock, "blkfront i/o lock", NULL, MTX_DEF); 476 xb_initq_free(sc); 477 xb_initq_busy(sc); 478 xb_initq_ready(sc); 479 xb_initq_complete(sc); 480 xb_initq_bio(sc); 481 for (i = 0; i < XBF_MAX_RING_PAGES; i++) 482 sc->ring_ref[i] = GRANT_INVALID_REF; 483 484 sc->xb_dev = dev; 485 sc->vdevice = vdevice; 486 sc->connected = BLKIF_STATE_DISCONNECTED; 487 488 xb_setup_sysctl(sc); 489 490 /* Wait for backend device to publish its protocol capabilities. */ 491 xenbus_set_state(dev, XenbusStateInitialising); 492 493 return (0); 494} 495 496static int 497blkfront_suspend(device_t dev) 498{ 499 struct xb_softc *sc = device_get_softc(dev); 500 int retval; 501 int saved_state; 502 503 /* Prevent new requests being issued until we fix things up. */ 504 mtx_lock(&sc->xb_io_lock); 505 saved_state = sc->connected; 506 sc->connected = BLKIF_STATE_SUSPENDED; 507 508 /* Wait for outstanding I/O to drain. */ 509 retval = 0; 510 while (TAILQ_EMPTY(&sc->cm_busy) == 0) { 511 if (msleep(&sc->cm_busy, &sc->xb_io_lock, 512 PRIBIO, "blkf_susp", 30 * hz) == EWOULDBLOCK) { 513 retval = EBUSY; 514 break; 515 } 516 } 517 mtx_unlock(&sc->xb_io_lock); 518 519 if (retval != 0) 520 sc->connected = saved_state; 521 522 return (retval); 523} 524 525static int 526blkfront_resume(device_t dev) 527{ 528 struct xb_softc *sc = device_get_softc(dev); 529 530 DPRINTK("blkfront_resume: %s\n", xenbus_get_node(dev)); 531 532 blkif_free(sc); 533 blkfront_initialize(sc); 534 return (0); 535} 536 537static void 538blkfront_initialize(struct xb_softc *sc) 539{ 540 const char *otherend_path; 541 const char *node_path; 542 uint32_t max_ring_page_order; 543 int error; 544 int i; 545 546 if (xenbus_get_state(sc->xb_dev) != XenbusStateInitialising) { 547 /* Initialization has already been performed. */ 548 return; 549 } 550 551 /* 552 * Protocol defaults valid even if negotiation for a 553 * setting fails. 554 */ 555 max_ring_page_order = 0; 556 sc->ring_pages = 1; 557 sc->max_request_segments = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK; 558 sc->max_request_size = (sc->max_request_segments - 1) * PAGE_SIZE; 559 sc->max_request_blocks = BLKIF_SEGS_TO_BLOCKS(sc->max_request_segments); 560 561 /* 562 * Protocol negotiation. 563 * 564 * \note xs_gather() returns on the first encountered error, so 565 * we must use independant calls in order to guarantee 566 * we don't miss information in a sparsly populated back-end 567 * tree. 568 * 569 * \note xs_scanf() does not update variables for unmatched 570 * fields. 571 */ 572 otherend_path = xenbus_get_otherend_path(sc->xb_dev); 573 node_path = xenbus_get_node(sc->xb_dev); 574 575 /* Support both backend schemes for relaying ring page limits. */ 576 (void)xs_scanf(XST_NIL, otherend_path, 577 "max-ring-page-order", NULL, "%" PRIu32, 578 &max_ring_page_order); 579 sc->ring_pages = 1 << max_ring_page_order; 580 (void)xs_scanf(XST_NIL, otherend_path, 581 "max-ring-pages", NULL, "%" PRIu32, 582 &sc->ring_pages); 583 if (sc->ring_pages < 1) 584 sc->ring_pages = 1; 585 586 sc->max_requests = BLKIF_MAX_RING_REQUESTS(sc->ring_pages * PAGE_SIZE); 587 (void)xs_scanf(XST_NIL, otherend_path, 588 "max-requests", NULL, "%" PRIu32, 589 &sc->max_requests); 590 591 (void)xs_scanf(XST_NIL, otherend_path, 592 "max-request-segments", NULL, "%" PRIu32, 593 &sc->max_request_segments); 594 595 (void)xs_scanf(XST_NIL, otherend_path, 596 "max-request-size", NULL, "%" PRIu32, 597 &sc->max_request_size); 598 599 if (sc->ring_pages > XBF_MAX_RING_PAGES) { 600 device_printf(sc->xb_dev, "Back-end specified ring-pages of " 601 "%u limited to front-end limit of %zu.\n", 602 sc->ring_pages, XBF_MAX_RING_PAGES); 603 sc->ring_pages = XBF_MAX_RING_PAGES; 604 } 605 606 if (powerof2(sc->ring_pages) == 0) { 607 uint32_t new_page_limit; 608 609 new_page_limit = 0x01 << (fls(sc->ring_pages) - 1); 610 device_printf(sc->xb_dev, "Back-end specified ring-pages of " 611 "%u is not a power of 2. Limited to %u.\n", 612 sc->ring_pages, new_page_limit); 613 sc->ring_pages = new_page_limit; 614 } 615 616 if (sc->max_requests > XBF_MAX_REQUESTS) { 617 device_printf(sc->xb_dev, "Back-end specified max_requests of " 618 "%u limited to front-end limit of %u.\n", 619 sc->max_requests, XBF_MAX_REQUESTS); 620 sc->max_requests = XBF_MAX_REQUESTS; 621 } 622 623 if (sc->max_request_segments > XBF_MAX_SEGMENTS_PER_REQUEST) { 624 device_printf(sc->xb_dev, "Back-end specificed " 625 "max_requests_segments of %u limited to " 626 "front-end limit of %u.\n", 627 sc->max_request_segments, 628 XBF_MAX_SEGMENTS_PER_REQUEST); 629 sc->max_request_segments = XBF_MAX_SEGMENTS_PER_REQUEST; 630 } 631 632 if (sc->max_request_size > XBF_MAX_REQUEST_SIZE) { 633 device_printf(sc->xb_dev, "Back-end specificed " 634 "max_request_size of %u limited to front-end " 635 "limit of %u.\n", sc->max_request_size, 636 XBF_MAX_REQUEST_SIZE); 637 sc->max_request_size = XBF_MAX_REQUEST_SIZE; 638 } 639 sc->max_request_blocks = BLKIF_SEGS_TO_BLOCKS(sc->max_request_segments); 640 641 /* Allocate datastructures based on negotiated values. */ 642 error = bus_dma_tag_create(NULL, /* parent */ 643 512, PAGE_SIZE, /* algnmnt, boundary */ 644 BUS_SPACE_MAXADDR, /* lowaddr */ 645 BUS_SPACE_MAXADDR, /* highaddr */ 646 NULL, NULL, /* filter, filterarg */ 647 sc->max_request_size, 648 sc->max_request_segments, 649 PAGE_SIZE, /* maxsegsize */ 650 BUS_DMA_ALLOCNOW, /* flags */ 651 busdma_lock_mutex, /* lockfunc */ 652 &sc->xb_io_lock, /* lockarg */ 653 &sc->xb_io_dmat); 654 if (error != 0) { 655 xenbus_dev_fatal(sc->xb_dev, error, 656 "Cannot allocate parent DMA tag\n"); 657 return; 658 } 659 660 /* Per-transaction data allocation. */ 661 sc->shadow = malloc(sizeof(*sc->shadow) * sc->max_requests, 662 M_XENBLOCKFRONT, M_NOWAIT|M_ZERO); 663 if (sc->shadow == NULL) { 664 bus_dma_tag_destroy(sc->xb_io_dmat); 665 xenbus_dev_fatal(sc->xb_dev, error, 666 "Cannot allocate request structures\n"); 667 return; 668 } 669 670 for (i = 0; i < sc->max_requests; i++) { 671 struct xb_command *cm; 672 673 cm = &sc->shadow[i]; 674 cm->sg_refs = malloc(sizeof(grant_ref_t) 675 * sc->max_request_segments, 676 M_XENBLOCKFRONT, M_NOWAIT); 677 if (cm->sg_refs == NULL) 678 break; 679 cm->id = i; 680 cm->cm_sc = sc; 681 if (bus_dmamap_create(sc->xb_io_dmat, 0, &cm->map) != 0) 682 break; 683 xb_free_command(cm); 684 } 685 686 if (setup_blkring(sc) != 0) 687 return; 688 689 /* Support both backend schemes for relaying ring page limits. */ 690 error = xs_printf(XST_NIL, node_path, 691 "num-ring-pages","%u", sc->ring_pages); 692 if (error) { 693 xenbus_dev_fatal(sc->xb_dev, error, 694 "writing %s/num-ring-pages", 695 node_path); 696 return; 697 } 698 error = xs_printf(XST_NIL, node_path, 699 "ring-page-order","%u", fls(sc->ring_pages) - 1); 700 if (error) { 701 xenbus_dev_fatal(sc->xb_dev, error, 702 "writing %s/ring-page-order", 703 node_path); 704 return; 705 } 706 707 error = xs_printf(XST_NIL, node_path, 708 "max-requests","%u", sc->max_requests); 709 if (error) { 710 xenbus_dev_fatal(sc->xb_dev, error, 711 "writing %s/max-requests", 712 node_path); 713 return; 714 } 715 716 error = xs_printf(XST_NIL, node_path, 717 "max-request-segments","%u", sc->max_request_segments); 718 if (error) { 719 xenbus_dev_fatal(sc->xb_dev, error, 720 "writing %s/max-request-segments", 721 node_path); 722 return; 723 } 724 725 error = xs_printf(XST_NIL, node_path, 726 "max-request-size","%u", sc->max_request_size); 727 if (error) { 728 xenbus_dev_fatal(sc->xb_dev, error, 729 "writing %s/max-request-size", 730 node_path); 731 return; 732 } 733 734 error = xs_printf(XST_NIL, node_path, "event-channel", 735 "%u", irq_to_evtchn_port(sc->irq)); 736 if (error) { 737 xenbus_dev_fatal(sc->xb_dev, error, 738 "writing %s/event-channel", 739 node_path); 740 return; 741 } 742 743 error = xs_printf(XST_NIL, node_path, 744 "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE); 745 if (error) { 746 xenbus_dev_fatal(sc->xb_dev, error, 747 "writing %s/protocol", 748 node_path); 749 return; 750 } 751 752 xenbus_set_state(sc->xb_dev, XenbusStateInitialised); 753} 754 755static int 756setup_blkring(struct xb_softc *sc) 757{ 758 blkif_sring_t *sring; 759 uintptr_t sring_page_addr; 760 int error; 761 int i; 762 763 sring = malloc(sc->ring_pages * PAGE_SIZE, M_XENBLOCKFRONT, 764 M_NOWAIT|M_ZERO); 765 if (sring == NULL) { 766 xenbus_dev_fatal(sc->xb_dev, ENOMEM, "allocating shared ring"); 767 return (ENOMEM); 768 } 769 SHARED_RING_INIT(sring); 770 FRONT_RING_INIT(&sc->ring, sring, sc->ring_pages * PAGE_SIZE); 771 772 for (i = 0, sring_page_addr = (uintptr_t)sring; 773 i < sc->ring_pages; 774 i++, sring_page_addr += PAGE_SIZE) { 775 776 error = xenbus_grant_ring(sc->xb_dev, 777 (vtomach(sring_page_addr) >> PAGE_SHIFT), &sc->ring_ref[i]); 778 if (error) { 779 xenbus_dev_fatal(sc->xb_dev, error, 780 "granting ring_ref(%d)", i); 781 return (error); 782 } 783 } 784 if (sc->ring_pages == 1) { 785 error = xs_printf(XST_NIL, xenbus_get_node(sc->xb_dev), 786 "ring-ref", "%u", sc->ring_ref[0]); 787 if (error) { 788 xenbus_dev_fatal(sc->xb_dev, error, 789 "writing %s/ring-ref", 790 xenbus_get_node(sc->xb_dev)); 791 return (error); 792 } 793 } else { 794 for (i = 0; i < sc->ring_pages; i++) { 795 char ring_ref_name[]= "ring_refXX"; 796 797 snprintf(ring_ref_name, sizeof(ring_ref_name), 798 "ring-ref%u", i); 799 error = xs_printf(XST_NIL, xenbus_get_node(sc->xb_dev), 800 ring_ref_name, "%u", sc->ring_ref[i]); 801 if (error) { 802 xenbus_dev_fatal(sc->xb_dev, error, 803 "writing %s/%s", 804 xenbus_get_node(sc->xb_dev), 805 ring_ref_name); 806 return (error); 807 } 808 } 809 } 810 811 error = bind_listening_port_to_irqhandler( 812 xenbus_get_otherend_id(sc->xb_dev), 813 "xbd", (driver_intr_t *)blkif_int, sc, 814 INTR_TYPE_BIO | INTR_MPSAFE, &sc->irq); 815 if (error) { 816 xenbus_dev_fatal(sc->xb_dev, error, 817 "bind_evtchn_to_irqhandler failed"); 818 return (error); 819 } 820 821 return (0); 822} 823 824/** 825 * Callback received when the backend's state changes. 826 */ 827static void 828blkfront_backend_changed(device_t dev, XenbusState backend_state) 829{ 830 struct xb_softc *sc = device_get_softc(dev); 831 832 DPRINTK("backend_state=%d\n", backend_state); 833 834 switch (backend_state) { 835 case XenbusStateUnknown: 836 case XenbusStateInitialising: 837 case XenbusStateReconfigured: 838 case XenbusStateReconfiguring: 839 case XenbusStateClosed: 840 break; 841 842 case XenbusStateInitWait: 843 case XenbusStateInitialised: 844 blkfront_initialize(sc); 845 break; 846 847 case XenbusStateConnected: 848 blkfront_initialize(sc); 849 blkfront_connect(sc); 850 break; 851 852 case XenbusStateClosing: 853 if (sc->users > 0) 854 xenbus_dev_error(dev, -EBUSY, 855 "Device in use; refusing to close"); 856 else 857 blkfront_closing(dev); 858 break; 859 } 860} 861 862/* 863** Invoked when the backend is finally 'ready' (and has published 864** the details about the physical device - #sectors, size, etc). 865*/ 866static void 867blkfront_connect(struct xb_softc *sc) 868{ 869 device_t dev = sc->xb_dev; 870 unsigned long sectors, sector_size; 871 unsigned int binfo; 872 int err, feature_barrier; 873 874 if( (sc->connected == BLKIF_STATE_CONNECTED) || 875 (sc->connected == BLKIF_STATE_SUSPENDED) ) 876 return; 877 878 DPRINTK("blkfront.c:connect:%s.\n", xenbus_get_otherend_path(dev)); 879 880 err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev), 881 "sectors", "%lu", §ors, 882 "info", "%u", &binfo, 883 "sector-size", "%lu", §or_size, 884 NULL); 885 if (err) { 886 xenbus_dev_fatal(dev, err, 887 "reading backend fields at %s", 888 xenbus_get_otherend_path(dev)); 889 return; 890 } 891 err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev), 892 "feature-barrier", "%lu", &feature_barrier, 893 NULL); 894 if (!err || feature_barrier) 895 sc->xb_flags |= XB_BARRIER; 896 897 if (sc->xb_disk == NULL) { 898 device_printf(dev, "%juMB <%s> at %s", 899 (uintmax_t) sectors / (1048576 / sector_size), 900 device_get_desc(dev), 901 xenbus_get_node(dev)); 902 bus_print_child_footer(device_get_parent(dev), dev); 903 904 xlvbd_add(sc, sectors, sc->vdevice, binfo, sector_size); 905 } 906 907 (void)xenbus_set_state(dev, XenbusStateConnected); 908 909 /* Kick pending requests. */ 910 mtx_lock(&sc->xb_io_lock); 911 sc->connected = BLKIF_STATE_CONNECTED; 912 xb_startio(sc); 913 sc->xb_flags |= XB_READY; 914 mtx_unlock(&sc->xb_io_lock); 915} 916 917/** 918 * Handle the change of state of the backend to Closing. We must delete our 919 * device-layer structures now, to ensure that writes are flushed through to 920 * the backend. Once this is done, we can switch to Closed in 921 * acknowledgement. 922 */ 923static void 924blkfront_closing(device_t dev) 925{ 926 struct xb_softc *sc = device_get_softc(dev); 927 928 xenbus_set_state(dev, XenbusStateClosing); 929 930 DPRINTK("blkfront_closing: %s removed\n", xenbus_get_node(dev)); 931 932 if (sc->xb_disk != NULL) { 933 disk_destroy(sc->xb_disk); 934 sc->xb_disk = NULL; 935 } 936 937 xenbus_set_state(dev, XenbusStateClosed); 938} 939 940 941static int 942blkfront_detach(device_t dev) 943{ 944 struct xb_softc *sc = device_get_softc(dev); 945 946 DPRINTK("blkfront_remove: %s removed\n", xenbus_get_node(dev)); 947 948 blkif_free(sc); 949 mtx_destroy(&sc->xb_io_lock); 950 951 return 0; 952} 953 954 955static inline void 956flush_requests(struct xb_softc *sc) 957{ 958 int notify; 959 960 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->ring, notify); 961 962 if (notify) 963 notify_remote_via_irq(sc->irq); 964} 965 966static void 967blkif_restart_queue_callback(void *arg) 968{ 969 struct xb_softc *sc = arg; 970 971 mtx_lock(&sc->xb_io_lock); 972 973 xb_startio(sc); 974 975 mtx_unlock(&sc->xb_io_lock); 976} 977 978static int 979blkif_open(struct disk *dp) 980{ 981 struct xb_softc *sc = (struct xb_softc *)dp->d_drv1; 982 983 if (sc == NULL) { 984 printf("xb%d: not found", sc->xb_unit); 985 return (ENXIO); 986 } 987 988 sc->xb_flags |= XB_OPEN; 989 sc->users++; 990 return (0); 991} 992 993static int 994blkif_close(struct disk *dp) 995{ 996 struct xb_softc *sc = (struct xb_softc *)dp->d_drv1; 997 998 if (sc == NULL) 999 return (ENXIO); 1000 sc->xb_flags &= ~XB_OPEN; 1001 if (--(sc->users) == 0) { 1002 /* 1003 * Check whether we have been instructed to close. We will 1004 * have ignored this request initially, as the device was 1005 * still mounted. 1006 */ 1007 if (xenbus_get_otherend_state(sc->xb_dev) == XenbusStateClosing) 1008 blkfront_closing(sc->xb_dev); 1009 } 1010 return (0); 1011} 1012 1013static int 1014blkif_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td) 1015{ 1016 struct xb_softc *sc = (struct xb_softc *)dp->d_drv1; 1017 1018 if (sc == NULL) 1019 return (ENXIO); 1020 1021 return (ENOTTY); 1022} 1023 1024static void 1025xb_free_command(struct xb_command *cm) 1026{ 1027 1028 KASSERT((cm->cm_flags & XB_ON_XBQ_MASK) == 0, 1029 ("Freeing command that is still on a queue\n")); 1030 1031 cm->cm_flags = 0; 1032 cm->bp = NULL; 1033 cm->cm_complete = NULL; 1034 xb_enqueue_free(cm); 1035} 1036 1037/* 1038 * blkif_queue_request 1039 * 1040 * request block io 1041 * 1042 * id: for guest use only. 1043 * operation: BLKIF_OP_{READ,WRITE,PROBE} 1044 * buffer: buffer to read/write into. this should be a 1045 * virtual address in the guest os. 1046 */ 1047static struct xb_command * 1048xb_bio_command(struct xb_softc *sc) 1049{ 1050 struct xb_command *cm; 1051 struct bio *bp; 1052 1053 if (unlikely(sc->connected != BLKIF_STATE_CONNECTED)) 1054 return (NULL); 1055 1056 bp = xb_dequeue_bio(sc); 1057 if (bp == NULL) 1058 return (NULL); 1059 1060 if ((cm = xb_dequeue_free(sc)) == NULL) { 1061 xb_requeue_bio(sc, bp); 1062 return (NULL); 1063 } 1064 1065 if (gnttab_alloc_grant_references(sc->max_request_segments, 1066 &cm->gref_head) != 0) { 1067 gnttab_request_free_callback(&sc->callback, 1068 blkif_restart_queue_callback, sc, 1069 sc->max_request_segments); 1070 xb_requeue_bio(sc, bp); 1071 xb_enqueue_free(cm); 1072 sc->xb_flags |= XB_FROZEN; 1073 return (NULL); 1074 } 1075 1076 cm->bp = bp; 1077 cm->data = bp->bio_data; 1078 cm->datalen = bp->bio_bcount; 1079 cm->operation = (bp->bio_cmd == BIO_READ) ? BLKIF_OP_READ : 1080 BLKIF_OP_WRITE; 1081 cm->sector_number = (blkif_sector_t)bp->bio_pblkno; 1082 1083 return (cm); 1084} 1085 1086static int 1087blkif_queue_request(struct xb_softc *sc, struct xb_command *cm) 1088{ 1089 int error; 1090 1091 error = bus_dmamap_load(sc->xb_io_dmat, cm->map, cm->data, cm->datalen, 1092 blkif_queue_cb, cm, 0); 1093 if (error == EINPROGRESS) { 1094 printf("EINPROGRESS\n"); 1095 sc->xb_flags |= XB_FROZEN; 1096 cm->cm_flags |= XB_CMD_FROZEN; 1097 return (0); 1098 } 1099 1100 return (error); 1101} 1102 1103static void 1104blkif_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1105{ 1106 struct xb_softc *sc; 1107 struct xb_command *cm; 1108 blkif_request_t *ring_req; 1109 struct blkif_request_segment *sg; 1110 struct blkif_request_segment *last_block_sg; 1111 grant_ref_t *sg_ref; 1112 vm_paddr_t buffer_ma; 1113 uint64_t fsect, lsect; 1114 int ref; 1115 int op; 1116 int block_segs; 1117 1118 cm = arg; 1119 sc = cm->cm_sc; 1120 1121//printf("%s: Start\n", __func__); 1122 if (error) { 1123 printf("error %d in blkif_queue_cb\n", error); 1124 cm->bp->bio_error = EIO; 1125 biodone(cm->bp); 1126 xb_free_command(cm); 1127 return; 1128 } 1129 1130 /* Fill out a communications ring structure. */ 1131 ring_req = RING_GET_REQUEST(&sc->ring, sc->ring.req_prod_pvt); 1132 sc->ring.req_prod_pvt++; 1133 ring_req->id = cm->id; 1134 ring_req->operation = cm->operation; 1135 ring_req->sector_number = cm->sector_number; 1136 ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xb_disk; 1137 ring_req->nr_segments = nsegs; 1138 cm->nseg = nsegs; 1139 1140 block_segs = MIN(nsegs, BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK); 1141 sg = ring_req->seg; 1142 last_block_sg = sg + block_segs; 1143 sg_ref = cm->sg_refs; 1144 1145 while (1) { 1146 1147 while (sg < last_block_sg) { 1148 buffer_ma = segs->ds_addr; 1149 fsect = (buffer_ma & PAGE_MASK) >> XBD_SECTOR_SHFT; 1150 lsect = fsect + (segs->ds_len >> XBD_SECTOR_SHFT) - 1; 1151 1152 KASSERT(lsect <= 7, ("XEN disk driver data cannot " 1153 "cross a page boundary")); 1154 1155 /* install a grant reference. */ 1156 ref = gnttab_claim_grant_reference(&cm->gref_head); 1157 1158 /* 1159 * GNTTAB_LIST_END == 0xffffffff, but it is private 1160 * to gnttab.c. 1161 */ 1162 KASSERT(ref != ~0, ("grant_reference failed")); 1163 1164 gnttab_grant_foreign_access_ref( 1165 ref, 1166 xenbus_get_otherend_id(sc->xb_dev), 1167 buffer_ma >> PAGE_SHIFT, 1168 ring_req->operation == BLKIF_OP_WRITE); 1169 1170 *sg_ref = ref; 1171 *sg = (struct blkif_request_segment) { 1172 .gref = ref, 1173 .first_sect = fsect, 1174 .last_sect = lsect }; 1175 sg++; 1176 sg_ref++; 1177 segs++; 1178 nsegs--; 1179 } 1180 block_segs = MIN(nsegs, BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK); 1181 if (block_segs == 0) 1182 break; 1183 1184 sg = BLKRING_GET_SEG_BLOCK(&sc->ring, sc->ring.req_prod_pvt); 1185 sc->ring.req_prod_pvt++; 1186 last_block_sg = sg + block_segs; 1187 } 1188 1189 if (cm->operation == BLKIF_OP_READ) 1190 op = BUS_DMASYNC_PREREAD; 1191 else if (cm->operation == BLKIF_OP_WRITE) 1192 op = BUS_DMASYNC_PREWRITE; 1193 else 1194 op = 0; 1195 bus_dmamap_sync(sc->xb_io_dmat, cm->map, op); 1196 1197 gnttab_free_grant_references(cm->gref_head); 1198 1199 xb_enqueue_busy(cm); 1200 1201 /* 1202 * This flag means that we're probably executing in the busdma swi 1203 * instead of in the startio context, so an explicit flush is needed. 1204 */ 1205 if (cm->cm_flags & XB_CMD_FROZEN) 1206 flush_requests(sc); 1207 1208//printf("%s: Done\n", __func__); 1209 return; 1210} 1211 1212/* 1213 * Dequeue buffers and place them in the shared communication ring. 1214 * Return when no more requests can be accepted or all buffers have 1215 * been queued. 1216 * 1217 * Signal XEN once the ring has been filled out. 1218 */ 1219static void 1220xb_startio(struct xb_softc *sc) 1221{ 1222 struct xb_command *cm; 1223 int error, queued = 0; 1224 1225 mtx_assert(&sc->xb_io_lock, MA_OWNED); 1226 1227 if (sc->connected != BLKIF_STATE_CONNECTED) 1228 return; 1229 1230 while (RING_FREE_REQUESTS(&sc->ring) >= sc->max_request_blocks) { 1231 if (sc->xb_flags & XB_FROZEN) 1232 break; 1233 1234 cm = xb_dequeue_ready(sc); 1235 1236 if (cm == NULL) 1237 cm = xb_bio_command(sc); 1238 1239 if (cm == NULL) 1240 break; 1241 1242 if ((error = blkif_queue_request(sc, cm)) != 0) { 1243 printf("blkif_queue_request returned %d\n", error); 1244 break; 1245 } 1246 queued++; 1247 } 1248 1249 if (queued != 0) 1250 flush_requests(sc); 1251} 1252 1253static void 1254blkif_int(void *xsc) 1255{ 1256 struct xb_softc *sc = xsc; 1257 struct xb_command *cm; 1258 blkif_response_t *bret; 1259 RING_IDX i, rp; 1260 int op; 1261 1262 mtx_lock(&sc->xb_io_lock); 1263 1264 if (unlikely(sc->connected == BLKIF_STATE_DISCONNECTED)) { 1265 mtx_unlock(&sc->xb_io_lock); 1266 return; 1267 } 1268 1269 again: 1270 rp = sc->ring.sring->rsp_prod; 1271 rmb(); /* Ensure we see queued responses up to 'rp'. */ 1272 1273 for (i = sc->ring.rsp_cons; i != rp;) { 1274 bret = RING_GET_RESPONSE(&sc->ring, i); 1275 cm = &sc->shadow[bret->id]; 1276 1277 xb_remove_busy(cm); 1278 i += blkif_completion(cm); 1279 1280 if (cm->operation == BLKIF_OP_READ) 1281 op = BUS_DMASYNC_POSTREAD; 1282 else if (cm->operation == BLKIF_OP_WRITE) 1283 op = BUS_DMASYNC_POSTWRITE; 1284 else 1285 op = 0; 1286 bus_dmamap_sync(sc->xb_io_dmat, cm->map, op); 1287 bus_dmamap_unload(sc->xb_io_dmat, cm->map); 1288 1289 /* 1290 * If commands are completing then resources are probably 1291 * being freed as well. It's a cheap assumption even when 1292 * wrong. 1293 */ 1294 sc->xb_flags &= ~XB_FROZEN; 1295 1296 /* 1297 * Directly call the i/o complete routine to save an 1298 * an indirection in the common case. 1299 */ 1300 cm->status = bret->status; 1301 if (cm->bp) 1302 xb_bio_complete(sc, cm); 1303 else if (cm->cm_complete) 1304 (cm->cm_complete)(cm); 1305 else 1306 xb_free_command(cm); 1307 } 1308 1309 sc->ring.rsp_cons = i; 1310 1311 if (i != sc->ring.req_prod_pvt) { 1312 int more_to_do; 1313 RING_FINAL_CHECK_FOR_RESPONSES(&sc->ring, more_to_do); 1314 if (more_to_do) 1315 goto again; 1316 } else { 1317 sc->ring.sring->rsp_event = i + 1; 1318 } 1319 1320 xb_startio(sc); 1321 1322 if (unlikely(sc->connected == BLKIF_STATE_SUSPENDED)) 1323 wakeup(&sc->cm_busy); 1324 1325 mtx_unlock(&sc->xb_io_lock); 1326} 1327 1328static void 1329blkif_free(struct xb_softc *sc) 1330{ 1331 uint8_t *sring_page_ptr; 1332 int i; 1333 1334 /* Prevent new requests being issued until we fix things up. */ 1335 mtx_lock(&sc->xb_io_lock); 1336 sc->connected = BLKIF_STATE_DISCONNECTED; 1337 mtx_unlock(&sc->xb_io_lock); 1338 1339 /* Free resources associated with old device channel. */ 1340 if (sc->ring.sring != NULL) { 1341 sring_page_ptr = (uint8_t *)sc->ring.sring; 1342 for (i = 0; i < sc->ring_pages; i++) { 1343 if (sc->ring_ref[i] != GRANT_INVALID_REF) { 1344 gnttab_end_foreign_access_ref(sc->ring_ref[i]); 1345 sc->ring_ref[i] = GRANT_INVALID_REF; 1346 } 1347 sring_page_ptr += PAGE_SIZE; 1348 } 1349 free(sc->ring.sring, M_XENBLOCKFRONT); 1350 sc->ring.sring = NULL; 1351 } 1352 1353 if (sc->shadow) { 1354 1355 for (i = 0; i < sc->max_requests; i++) { 1356 struct xb_command *cm; 1357 1358 cm = &sc->shadow[i]; 1359 if (cm->sg_refs != NULL) { 1360 free(cm->sg_refs, M_XENBLOCKFRONT); 1361 cm->sg_refs = NULL; 1362 } 1363 1364 bus_dmamap_destroy(sc->xb_io_dmat, cm->map); 1365 } 1366 free(sc->shadow, M_XENBLOCKFRONT); 1367 sc->shadow = NULL; 1368 1369 bus_dma_tag_destroy(sc->xb_io_dmat); 1370 1371 xb_initq_free(sc); 1372 xb_initq_ready(sc); 1373 xb_initq_complete(sc); 1374 } 1375 1376 if (sc->irq) { 1377 unbind_from_irqhandler(sc->irq); 1378 sc->irq = 0; 1379 } 1380} 1381 1382static int 1383blkif_completion(struct xb_command *s) 1384{ 1385//printf("%s: Req %p(%d)\n", __func__, s, s->nseg); 1386 gnttab_end_foreign_access_references(s->nseg, s->sg_refs); 1387 return (BLKIF_SEGS_TO_BLOCKS(s->nseg)); 1388} 1389 1390/* ** Driver registration ** */ 1391static device_method_t blkfront_methods[] = { 1392 /* Device interface */ 1393 DEVMETHOD(device_probe, blkfront_probe), 1394 DEVMETHOD(device_attach, blkfront_attach), 1395 DEVMETHOD(device_detach, blkfront_detach), 1396 DEVMETHOD(device_shutdown, bus_generic_shutdown), 1397 DEVMETHOD(device_suspend, blkfront_suspend), 1398 DEVMETHOD(device_resume, blkfront_resume), 1399 1400 /* Xenbus interface */ 1401 DEVMETHOD(xenbus_otherend_changed, blkfront_backend_changed), 1402 1403 { 0, 0 } 1404}; 1405 1406static driver_t blkfront_driver = { 1407 "xbd", 1408 blkfront_methods, 1409 sizeof(struct xb_softc), 1410}; 1411devclass_t blkfront_devclass; 1412 1413DRIVER_MODULE(xbd, xenbusb_front, blkfront_driver, blkfront_devclass, 0, 0); 1414