1181643Skmacy/* 2196661Skmacy * XenBSD block device driver 3196661Skmacy * 4199960Skmacy * Copyright (c) 2009 Scott Long, Yahoo! 5196661Skmacy * Copyright (c) 2009 Frank Suchomel, Citrix 6199959Skmacy * Copyright (c) 2009 Doug F. Rabson, Citrix 7199959Skmacy * Copyright (c) 2005 Kip Macy 8199959Skmacy * Copyright (c) 2003-2004, Keir Fraser & Steve Hand 9199959Skmacy * Modifications by Mark A. Williamson are (c) Intel Research Cambridge 10199959Skmacy * 11199959Skmacy * 12199959Skmacy * Permission is hereby granted, free of charge, to any person obtaining a copy 13199959Skmacy * of this software and associated documentation files (the "Software"), to 14199959Skmacy * deal in the Software without restriction, including without limitation the 15199959Skmacy * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 16199959Skmacy * sell copies of the Software, and to permit persons to whom the Software is 17199959Skmacy * furnished to do so, subject to the following conditions: 18199959Skmacy * 19199959Skmacy * The above copyright notice and this permission notice shall be included in 20199959Skmacy * all copies or substantial portions of the Software. 21199959Skmacy * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 22199959Skmacy * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 23199959Skmacy * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 24199959Skmacy * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 25199959Skmacy * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 26199959Skmacy * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 27199959Skmacy * DEALINGS IN THE SOFTWARE. 28181643Skmacy */ 29181643Skmacy 30181643Skmacy#include <sys/cdefs.h> 31181643Skmacy__FBSDID("$FreeBSD$"); 32181643Skmacy 33181643Skmacy#include <sys/param.h> 34181643Skmacy#include <sys/systm.h> 35181643Skmacy#include <sys/malloc.h> 36181643Skmacy#include <sys/kernel.h> 37181643Skmacy#include <vm/vm.h> 38181643Skmacy#include <vm/pmap.h> 39181643Skmacy 40181643Skmacy#include <sys/bio.h> 41181643Skmacy#include <sys/bus.h> 42181643Skmacy#include <sys/conf.h> 43181643Skmacy#include <sys/module.h> 44231851Sgibbs#include <sys/sysctl.h> 45181643Skmacy 46181643Skmacy#include <machine/bus.h> 47181643Skmacy#include <sys/rman.h> 48181643Skmacy#include <machine/resource.h> 49181643Skmacy#include <machine/intr_machdep.h> 50181643Skmacy#include <machine/vmparam.h> 51199960Skmacy#include <sys/bus_dma.h> 52181643Skmacy 53214077Sgibbs#include <machine/_inttypes.h> 54189699Sdfr#include <machine/xen/xen-os.h> 55216956Srwatson#include <machine/xen/xenvar.h> 56189699Sdfr#include <machine/xen/xenfunc.h> 57214077Sgibbs 58186557Skmacy#include <xen/hypervisor.h> 59186557Skmacy#include <xen/xen_intr.h> 60186557Skmacy#include <xen/evtchn.h> 61189699Sdfr#include <xen/gnttab.h> 62181643Skmacy#include <xen/interface/grant_table.h> 63185605Skmacy#include <xen/interface/io/protocols.h> 64185605Skmacy#include <xen/xenbus/xenbusvar.h> 65181643Skmacy 66181643Skmacy#include <geom/geom_disk.h> 67181643Skmacy 68181643Skmacy#include <dev/xen/blkfront/block.h> 69181643Skmacy 70185605Skmacy#include "xenbus_if.h" 71185605Skmacy 72181643Skmacy/* prototypes */ 73199960Skmacystatic void xb_free_command(struct xb_command *cm); 74181643Skmacystatic void xb_startio(struct xb_softc *sc); 75214077Sgibbsstatic void blkfront_connect(struct xb_softc *); 76185605Skmacystatic void blkfront_closing(device_t); 77185605Skmacystatic int blkfront_detach(device_t); 78199960Skmacystatic int setup_blkring(struct xb_softc *); 79181643Skmacystatic void blkif_int(void *); 80214077Sgibbsstatic void blkfront_initialize(struct xb_softc *); 81214077Sgibbsstatic int blkif_completion(struct xb_command *); 82225705Sgibbsstatic void blkif_free(struct xb_softc *); 83199960Skmacystatic void blkif_queue_cb(void *, bus_dma_segment_t *, int, int); 84181643Skmacy 85249132Smavstatic MALLOC_DEFINE(M_XENBLOCKFRONT, "xbd", "Xen Block Front driver data"); 86214077Sgibbs 87181643Skmacy#define GRANT_INVALID_REF 0 88181643Skmacy 89181643Skmacy/* Control whether runtime update of vbds is enabled. */ 90181643Skmacy#define ENABLE_VBD_UPDATE 0 91181643Skmacy 92181643Skmacy#if ENABLE_VBD_UPDATE 93181643Skmacystatic void vbd_update(void); 94181643Skmacy#endif 95181643Skmacy 96181643Skmacy#define BLKIF_STATE_DISCONNECTED 0 97181643Skmacy#define BLKIF_STATE_CONNECTED 1 98181643Skmacy#define BLKIF_STATE_SUSPENDED 2 99181643Skmacy 100181643Skmacy#ifdef notyet 101181643Skmacystatic char *blkif_state_name[] = { 102181643Skmacy [BLKIF_STATE_DISCONNECTED] = "disconnected", 103181643Skmacy [BLKIF_STATE_CONNECTED] = "connected", 104181643Skmacy [BLKIF_STATE_SUSPENDED] = "closed", 105181643Skmacy}; 106181643Skmacy 107181643Skmacystatic char * blkif_status_name[] = { 108181643Skmacy [BLKIF_INTERFACE_STATUS_CLOSED] = "closed", 109181643Skmacy [BLKIF_INTERFACE_STATUS_DISCONNECTED] = "disconnected", 110181643Skmacy [BLKIF_INTERFACE_STATUS_CONNECTED] = "connected", 111181643Skmacy [BLKIF_INTERFACE_STATUS_CHANGED] = "changed", 112181643Skmacy}; 113181643Skmacy#endif 114199960Skmacy 115181643Skmacy#if 0 116189699Sdfr#define DPRINTK(fmt, args...) printf("[XEN] %s:%d: " fmt ".\n", __func__, __LINE__, ##args) 117181643Skmacy#else 118181643Skmacy#define DPRINTK(fmt, args...) 119181643Skmacy#endif 120181643Skmacy 121181643Skmacystatic int blkif_open(struct disk *dp); 122181643Skmacystatic int blkif_close(struct disk *dp); 123181643Skmacystatic int blkif_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td); 124199960Skmacystatic int blkif_queue_request(struct xb_softc *sc, struct xb_command *cm); 125181643Skmacystatic void xb_strategy(struct bio *bp); 126181643Skmacy 127196661Skmacy// In order to quiesce the device during kernel dumps, outstanding requests to 128196661Skmacy// DOM0 for disk reads/writes need to be accounted for. 129196661Skmacystatic int xb_dump(void *, void *, vm_offset_t, off_t, size_t); 130181643Skmacy 131181643Skmacy/* XXX move to xb_vbd.c when VBD update support is added */ 132181643Skmacy#define MAX_VBDS 64 133181643Skmacy 134181643Skmacy#define XBD_SECTOR_SIZE 512 /* XXX: assume for now */ 135181643Skmacy#define XBD_SECTOR_SHFT 9 136181643Skmacy 137185605Skmacy/* 138185605Skmacy * Translate Linux major/minor to an appropriate name and unit 139185605Skmacy * number. For HVM guests, this allows us to use the same drive names 140185605Skmacy * with blkfront as the emulated drives, easing transition slightly. 141185605Skmacy */ 142185605Skmacystatic void 143231851Sgibbsblkfront_vdevice_to_unit(uint32_t vdevice, int *unit, const char **name) 144185605Skmacy{ 145185605Skmacy static struct vdev_info { 146185605Skmacy int major; 147185605Skmacy int shift; 148185605Skmacy int base; 149185605Skmacy const char *name; 150185605Skmacy } info[] = { 151251973Sgibbs {3, 6, 0, "ada"}, /* ide0 */ 152251973Sgibbs {22, 6, 2, "ada"}, /* ide1 */ 153251973Sgibbs {33, 6, 4, "ada"}, /* ide2 */ 154251973Sgibbs {34, 6, 6, "ada"}, /* ide3 */ 155251973Sgibbs {56, 6, 8, "ada"}, /* ide4 */ 156251973Sgibbs {57, 6, 10, "ada"}, /* ide5 */ 157251973Sgibbs {88, 6, 12, "ada"}, /* ide6 */ 158251973Sgibbs {89, 6, 14, "ada"}, /* ide7 */ 159251973Sgibbs {90, 6, 16, "ada"}, /* ide8 */ 160251973Sgibbs {91, 6, 18, "ada"}, /* ide9 */ 161185605Skmacy 162185605Skmacy {8, 4, 0, "da"}, /* scsi disk0 */ 163185605Skmacy {65, 4, 16, "da"}, /* scsi disk1 */ 164185605Skmacy {66, 4, 32, "da"}, /* scsi disk2 */ 165185605Skmacy {67, 4, 48, "da"}, /* scsi disk3 */ 166185605Skmacy {68, 4, 64, "da"}, /* scsi disk4 */ 167185605Skmacy {69, 4, 80, "da"}, /* scsi disk5 */ 168185605Skmacy {70, 4, 96, "da"}, /* scsi disk6 */ 169185605Skmacy {71, 4, 112, "da"}, /* scsi disk7 */ 170185605Skmacy {128, 4, 128, "da"}, /* scsi disk8 */ 171185605Skmacy {129, 4, 144, "da"}, /* scsi disk9 */ 172185605Skmacy {130, 4, 160, "da"}, /* scsi disk10 */ 173185605Skmacy {131, 4, 176, "da"}, /* scsi disk11 */ 174185605Skmacy {132, 4, 192, "da"}, /* scsi disk12 */ 175185605Skmacy {133, 4, 208, "da"}, /* scsi disk13 */ 176185605Skmacy {134, 4, 224, "da"}, /* scsi disk14 */ 177185605Skmacy {135, 4, 240, "da"}, /* scsi disk15 */ 178185605Skmacy 179185605Skmacy {202, 4, 0, "xbd"}, /* xbd */ 180185605Skmacy 181185605Skmacy {0, 0, 0, NULL}, 182185605Skmacy }; 183185605Skmacy int major = vdevice >> 8; 184185605Skmacy int minor = vdevice & 0xff; 185185605Skmacy int i; 186185605Skmacy 187185605Skmacy if (vdevice & (1 << 28)) { 188185605Skmacy *unit = (vdevice & ((1 << 28) - 1)) >> 8; 189185605Skmacy *name = "xbd"; 190231851Sgibbs return; 191185605Skmacy } 192185605Skmacy 193185605Skmacy for (i = 0; info[i].major; i++) { 194185605Skmacy if (info[i].major == major) { 195185605Skmacy *unit = info[i].base + (minor >> info[i].shift); 196185605Skmacy *name = info[i].name; 197185605Skmacy return; 198185605Skmacy } 199185605Skmacy } 200185605Skmacy 201185605Skmacy *unit = minor >> 4; 202185605Skmacy *name = "xbd"; 203185605Skmacy} 204185605Skmacy 205181643Skmacyint 206214077Sgibbsxlvbd_add(struct xb_softc *sc, blkif_sector_t sectors, 207214077Sgibbs int vdevice, uint16_t vdisk_info, unsigned long sector_size) 208181643Skmacy{ 209185605Skmacy int unit, error = 0; 210185605Skmacy const char *name; 211186557Skmacy 212185605Skmacy blkfront_vdevice_to_unit(vdevice, &unit, &name); 213182082Skmacy 214185605Skmacy sc->xb_unit = unit; 215181643Skmacy 216185605Skmacy if (strcmp(name, "xbd")) 217199960Skmacy device_printf(sc->xb_dev, "attaching as %s%d\n", name, unit); 218185605Skmacy 219181643Skmacy sc->xb_disk = disk_alloc(); 220186557Skmacy sc->xb_disk->d_unit = sc->xb_unit; 221181643Skmacy sc->xb_disk->d_open = blkif_open; 222181643Skmacy sc->xb_disk->d_close = blkif_close; 223181643Skmacy sc->xb_disk->d_ioctl = blkif_ioctl; 224181643Skmacy sc->xb_disk->d_strategy = xb_strategy; 225196661Skmacy sc->xb_disk->d_dump = xb_dump; 226186557Skmacy sc->xb_disk->d_name = name; 227181643Skmacy sc->xb_disk->d_drv1 = sc; 228181643Skmacy sc->xb_disk->d_sectorsize = sector_size; 229181643Skmacy 230214077Sgibbs sc->xb_disk->d_mediasize = sectors * sector_size; 231214077Sgibbs sc->xb_disk->d_maxsize = sc->max_request_size; 232181643Skmacy sc->xb_disk->d_flags = 0; 233237873Sken disk_create(sc->xb_disk, DISK_VERSION); 234181643Skmacy 235181643Skmacy return error; 236181643Skmacy} 237181643Skmacy 238181643Skmacy/************************ end VBD support *****************/ 239181643Skmacy 240181643Skmacy/* 241181643Skmacy * Read/write routine for a buffer. Finds the proper unit, place it on 242181643Skmacy * the sortq and kick the controller. 243181643Skmacy */ 244181643Skmacystatic void 245181643Skmacyxb_strategy(struct bio *bp) 246181643Skmacy{ 247181643Skmacy struct xb_softc *sc = (struct xb_softc *)bp->bio_disk->d_drv1; 248181643Skmacy 249181643Skmacy /* bogus disk? */ 250181643Skmacy if (sc == NULL) { 251181643Skmacy bp->bio_error = EINVAL; 252181643Skmacy bp->bio_flags |= BIO_ERROR; 253199960Skmacy bp->bio_resid = bp->bio_bcount; 254199960Skmacy biodone(bp); 255199960Skmacy return; 256181643Skmacy } 257181643Skmacy 258181643Skmacy /* 259181643Skmacy * Place it in the queue of disk activities for this disk 260181643Skmacy */ 261199960Skmacy mtx_lock(&sc->xb_io_lock); 262196661Skmacy 263199960Skmacy xb_enqueue_bio(sc, bp); 264196661Skmacy xb_startio(sc); 265181643Skmacy 266199960Skmacy mtx_unlock(&sc->xb_io_lock); 267181643Skmacy return; 268199960Skmacy} 269181643Skmacy 270199960Skmacystatic void 271199960Skmacyxb_bio_complete(struct xb_softc *sc, struct xb_command *cm) 272199960Skmacy{ 273199960Skmacy struct bio *bp; 274199960Skmacy 275199960Skmacy bp = cm->bp; 276199960Skmacy 277199960Skmacy if ( unlikely(cm->status != BLKIF_RSP_OKAY) ) { 278199960Skmacy disk_err(bp, "disk error" , -1, 0); 279199960Skmacy printf(" status: %x\n", cm->status); 280199960Skmacy bp->bio_flags |= BIO_ERROR; 281199960Skmacy } 282199960Skmacy 283199960Skmacy if (bp->bio_flags & BIO_ERROR) 284199960Skmacy bp->bio_error = EIO; 285199960Skmacy else 286199960Skmacy bp->bio_resid = 0; 287199960Skmacy 288199960Skmacy xb_free_command(cm); 289181643Skmacy biodone(bp); 290181643Skmacy} 291181643Skmacy 292196661Skmacy// Quiesce the disk writes for a dump file before allowing the next buffer. 293196661Skmacystatic void 294199960Skmacyxb_quiesce(struct xb_softc *sc) 295196661Skmacy{ 296196661Skmacy int mtd; 297196661Skmacy 298196661Skmacy // While there are outstanding requests 299199960Skmacy while (!TAILQ_EMPTY(&sc->cm_busy)) { 300199960Skmacy RING_FINAL_CHECK_FOR_RESPONSES(&sc->ring, mtd); 301196661Skmacy if (mtd) { 302199960Skmacy /* Recieved request completions, update queue. */ 303199960Skmacy blkif_int(sc); 304196661Skmacy } 305199960Skmacy if (!TAILQ_EMPTY(&sc->cm_busy)) { 306199960Skmacy /* 307199960Skmacy * Still pending requests, wait for the disk i/o 308199960Skmacy * to complete. 309199960Skmacy */ 310199734Skmacy HYPERVISOR_yield(); 311196661Skmacy } 312196661Skmacy } 313196661Skmacy} 314196661Skmacy 315199960Skmacy/* Kernel dump function for a paravirtualized disk device */ 316199960Skmacystatic void 317199960Skmacyxb_dump_complete(struct xb_command *cm) 318199960Skmacy{ 319196661Skmacy 320199960Skmacy xb_enqueue_complete(cm); 321199960Skmacy} 322199960Skmacy 323185605Skmacystatic int 324196661Skmacyxb_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset, 325196661Skmacy size_t length) 326196661Skmacy{ 327199960Skmacy struct disk *dp = arg; 328199960Skmacy struct xb_softc *sc = (struct xb_softc *) dp->d_drv1; 329199960Skmacy struct xb_command *cm; 330199960Skmacy size_t chunk; 331199960Skmacy int sbp; 332199960Skmacy int rc = 0; 333196661Skmacy 334199960Skmacy if (length <= 0) 335199960Skmacy return (rc); 336196661Skmacy 337199960Skmacy xb_quiesce(sc); /* All quiet on the western front. */ 338196661Skmacy 339199960Skmacy /* 340199960Skmacy * If this lock is held, then this module is failing, and a 341199960Skmacy * successful kernel dump is highly unlikely anyway. 342199960Skmacy */ 343199960Skmacy mtx_lock(&sc->xb_io_lock); 344199960Skmacy 345199960Skmacy /* Split the 64KB block as needed */ 346199960Skmacy for (sbp=0; length > 0; sbp++) { 347199960Skmacy cm = xb_dequeue_free(sc); 348199960Skmacy if (cm == NULL) { 349199960Skmacy mtx_unlock(&sc->xb_io_lock); 350199960Skmacy device_printf(sc->xb_dev, "dump: no more commands?\n"); 351199960Skmacy return (EBUSY); 352196661Skmacy } 353196661Skmacy 354214077Sgibbs if (gnttab_alloc_grant_references(sc->max_request_segments, 355214077Sgibbs &cm->gref_head) != 0) { 356199960Skmacy xb_free_command(cm); 357199960Skmacy mtx_unlock(&sc->xb_io_lock); 358199960Skmacy device_printf(sc->xb_dev, "no more grant allocs?\n"); 359199960Skmacy return (EBUSY); 360196661Skmacy } 361199960Skmacy 362214077Sgibbs chunk = length > sc->max_request_size 363214077Sgibbs ? sc->max_request_size : length; 364199960Skmacy cm->data = virtual; 365199960Skmacy cm->datalen = chunk; 366199960Skmacy cm->operation = BLKIF_OP_WRITE; 367199960Skmacy cm->sector_number = offset / dp->d_sectorsize; 368199960Skmacy cm->cm_complete = xb_dump_complete; 369199960Skmacy 370199960Skmacy xb_enqueue_ready(cm); 371199960Skmacy 372199960Skmacy length -= chunk; 373199960Skmacy offset += chunk; 374199960Skmacy virtual = (char *) virtual + chunk; 375196661Skmacy } 376199960Skmacy 377199960Skmacy /* Tell DOM0 to do the I/O */ 378199960Skmacy xb_startio(sc); 379199960Skmacy mtx_unlock(&sc->xb_io_lock); 380199960Skmacy 381199960Skmacy /* Poll for the completion. */ 382199960Skmacy xb_quiesce(sc); /* All quite on the eastern front */ 383199960Skmacy 384199960Skmacy /* If there were any errors, bail out... */ 385199960Skmacy while ((cm = xb_dequeue_complete(sc)) != NULL) { 386199960Skmacy if (cm->status != BLKIF_RSP_OKAY) { 387199960Skmacy device_printf(sc->xb_dev, 388199960Skmacy "Dump I/O failed at sector %jd\n", 389199960Skmacy cm->sector_number); 390199960Skmacy rc = EIO; 391199960Skmacy } 392199960Skmacy xb_free_command(cm); 393199960Skmacy } 394199960Skmacy 395196661Skmacy return (rc); 396196661Skmacy} 397196661Skmacy 398196661Skmacy 399196661Skmacystatic int 400185605Skmacyblkfront_probe(device_t dev) 401185605Skmacy{ 402181643Skmacy 403185605Skmacy if (!strcmp(xenbus_get_type(dev), "vbd")) { 404185605Skmacy device_set_desc(dev, "Virtual Block Device"); 405185605Skmacy device_quiet(dev); 406185605Skmacy return (0); 407185605Skmacy } 408181643Skmacy 409185605Skmacy return (ENXIO); 410185605Skmacy} 411185605Skmacy 412231851Sgibbsstatic void 413231851Sgibbsxb_setup_sysctl(struct xb_softc *xb) 414231851Sgibbs{ 415231851Sgibbs struct sysctl_ctx_list *sysctl_ctx = NULL; 416231851Sgibbs struct sysctl_oid *sysctl_tree = NULL; 417231851Sgibbs 418231851Sgibbs sysctl_ctx = device_get_sysctl_ctx(xb->xb_dev); 419231851Sgibbs if (sysctl_ctx == NULL) 420231851Sgibbs return; 421231851Sgibbs 422231851Sgibbs sysctl_tree = device_get_sysctl_tree(xb->xb_dev); 423231851Sgibbs if (sysctl_tree == NULL) 424231851Sgibbs return; 425231851Sgibbs 426231851Sgibbs SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 427231851Sgibbs "max_requests", CTLFLAG_RD, &xb->max_requests, -1, 428231851Sgibbs "maximum outstanding requests (negotiated)"); 429231851Sgibbs 430231851Sgibbs SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 431231851Sgibbs "max_request_segments", CTLFLAG_RD, 432231851Sgibbs &xb->max_request_segments, 0, 433231851Sgibbs "maximum number of pages per requests (negotiated)"); 434231851Sgibbs 435231851Sgibbs SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 436231851Sgibbs "max_request_size", CTLFLAG_RD, 437231851Sgibbs &xb->max_request_size, 0, 438231851Sgibbs "maximum size in bytes of a request (negotiated)"); 439231851Sgibbs 440231851Sgibbs SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 441231851Sgibbs "ring_pages", CTLFLAG_RD, 442231851Sgibbs &xb->ring_pages, 0, 443231851Sgibbs "communication channel pages (negotiated)"); 444231851Sgibbs} 445231851Sgibbs 446185605Skmacy/* 447185605Skmacy * Setup supplies the backend dir, virtual device. We place an event 448185605Skmacy * channel and shared frame entries. We watch backend to wait if it's 449185605Skmacy * ok. 450185605Skmacy */ 451185605Skmacystatic int 452185605Skmacyblkfront_attach(device_t dev) 453181643Skmacy{ 454199960Skmacy struct xb_softc *sc; 455199960Skmacy const char *name; 456231851Sgibbs uint32_t vdevice; 457214077Sgibbs int error; 458214077Sgibbs int i; 459214077Sgibbs int unit; 460181643Skmacy 461181643Skmacy /* FIXME: Use dynamic device id if this is not set. */ 462214077Sgibbs error = xs_scanf(XST_NIL, xenbus_get_node(dev), 463231851Sgibbs "virtual-device", NULL, "%" PRIu32, &vdevice); 464189699Sdfr if (error) { 465189699Sdfr xenbus_dev_fatal(dev, error, "reading virtual-device"); 466214077Sgibbs device_printf(dev, "Couldn't determine virtual device.\n"); 467189699Sdfr return (error); 468181643Skmacy } 469181643Skmacy 470185605Skmacy blkfront_vdevice_to_unit(vdevice, &unit, &name); 471185605Skmacy if (!strcmp(name, "xbd")) 472185605Skmacy device_set_unit(dev, unit); 473185605Skmacy 474199960Skmacy sc = device_get_softc(dev); 475199960Skmacy mtx_init(&sc->xb_io_lock, "blkfront i/o lock", NULL, MTX_DEF); 476199960Skmacy xb_initq_free(sc); 477199960Skmacy xb_initq_busy(sc); 478199960Skmacy xb_initq_ready(sc); 479199960Skmacy xb_initq_complete(sc); 480199960Skmacy xb_initq_bio(sc); 481214077Sgibbs for (i = 0; i < XBF_MAX_RING_PAGES; i++) 482214077Sgibbs sc->ring_ref[i] = GRANT_INVALID_REF; 483181643Skmacy 484199960Skmacy sc->xb_dev = dev; 485199960Skmacy sc->vdevice = vdevice; 486199960Skmacy sc->connected = BLKIF_STATE_DISCONNECTED; 487199960Skmacy 488231851Sgibbs xb_setup_sysctl(sc); 489231851Sgibbs 490214077Sgibbs /* Wait for backend device to publish its protocol capabilities. */ 491214077Sgibbs xenbus_set_state(dev, XenbusStateInitialising); 492181643Skmacy 493185605Skmacy return (0); 494181643Skmacy} 495181643Skmacy 496185605Skmacystatic int 497189699Sdfrblkfront_suspend(device_t dev) 498189699Sdfr{ 499199960Skmacy struct xb_softc *sc = device_get_softc(dev); 500225705Sgibbs int retval; 501225705Sgibbs int saved_state; 502189699Sdfr 503189699Sdfr /* Prevent new requests being issued until we fix things up. */ 504199960Skmacy mtx_lock(&sc->xb_io_lock); 505225705Sgibbs saved_state = sc->connected; 506199960Skmacy sc->connected = BLKIF_STATE_SUSPENDED; 507225705Sgibbs 508225705Sgibbs /* Wait for outstanding I/O to drain. */ 509225705Sgibbs retval = 0; 510225705Sgibbs while (TAILQ_EMPTY(&sc->cm_busy) == 0) { 511225705Sgibbs if (msleep(&sc->cm_busy, &sc->xb_io_lock, 512225705Sgibbs PRIBIO, "blkf_susp", 30 * hz) == EWOULDBLOCK) { 513225705Sgibbs retval = EBUSY; 514225705Sgibbs break; 515225705Sgibbs } 516225705Sgibbs } 517199960Skmacy mtx_unlock(&sc->xb_io_lock); 518189699Sdfr 519225705Sgibbs if (retval != 0) 520225705Sgibbs sc->connected = saved_state; 521225705Sgibbs 522225705Sgibbs return (retval); 523189699Sdfr} 524189699Sdfr 525189699Sdfrstatic int 526185605Skmacyblkfront_resume(device_t dev) 527181643Skmacy{ 528199960Skmacy struct xb_softc *sc = device_get_softc(dev); 529181643Skmacy 530189699Sdfr DPRINTK("blkfront_resume: %s\n", xenbus_get_node(dev)); 531181643Skmacy 532225705Sgibbs blkif_free(sc); 533214077Sgibbs blkfront_initialize(sc); 534214077Sgibbs return (0); 535181643Skmacy} 536181643Skmacy 537214077Sgibbsstatic void 538214077Sgibbsblkfront_initialize(struct xb_softc *sc) 539181643Skmacy{ 540214077Sgibbs const char *otherend_path; 541214077Sgibbs const char *node_path; 542231851Sgibbs uint32_t max_ring_page_order; 543214077Sgibbs int error; 544214077Sgibbs int i; 545181643Skmacy 546225705Sgibbs if (xenbus_get_state(sc->xb_dev) != XenbusStateInitialising) { 547225705Sgibbs /* Initialization has already been performed. */ 548225705Sgibbs return; 549225705Sgibbs } 550181643Skmacy 551214077Sgibbs /* 552214077Sgibbs * Protocol defaults valid even if negotiation for a 553214077Sgibbs * setting fails. 554214077Sgibbs */ 555231851Sgibbs max_ring_page_order = 0; 556214077Sgibbs sc->ring_pages = 1; 557214077Sgibbs sc->max_request_segments = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK; 558231851Sgibbs sc->max_request_size = XBF_SEGS_TO_SIZE(sc->max_request_segments); 559214077Sgibbs sc->max_request_blocks = BLKIF_SEGS_TO_BLOCKS(sc->max_request_segments); 560214077Sgibbs 561214077Sgibbs /* 562214077Sgibbs * Protocol negotiation. 563214077Sgibbs * 564214077Sgibbs * \note xs_gather() returns on the first encountered error, so 565214077Sgibbs * we must use independant calls in order to guarantee 566214077Sgibbs * we don't miss information in a sparsly populated back-end 567214077Sgibbs * tree. 568231851Sgibbs * 569231851Sgibbs * \note xs_scanf() does not update variables for unmatched 570231851Sgibbs * fields. 571214077Sgibbs */ 572214077Sgibbs otherend_path = xenbus_get_otherend_path(sc->xb_dev); 573214077Sgibbs node_path = xenbus_get_node(sc->xb_dev); 574231851Sgibbs 575231851Sgibbs /* Support both backend schemes for relaying ring page limits. */ 576214077Sgibbs (void)xs_scanf(XST_NIL, otherend_path, 577231851Sgibbs "max-ring-page-order", NULL, "%" PRIu32, 578231851Sgibbs &max_ring_page_order); 579231851Sgibbs sc->ring_pages = 1 << max_ring_page_order; 580231851Sgibbs (void)xs_scanf(XST_NIL, otherend_path, 581214077Sgibbs "max-ring-pages", NULL, "%" PRIu32, 582214077Sgibbs &sc->ring_pages); 583231851Sgibbs if (sc->ring_pages < 1) 584231851Sgibbs sc->ring_pages = 1; 585214077Sgibbs 586231851Sgibbs sc->max_requests = BLKIF_MAX_RING_REQUESTS(sc->ring_pages * PAGE_SIZE); 587214077Sgibbs (void)xs_scanf(XST_NIL, otherend_path, 588214077Sgibbs "max-requests", NULL, "%" PRIu32, 589214077Sgibbs &sc->max_requests); 590214077Sgibbs 591214077Sgibbs (void)xs_scanf(XST_NIL, otherend_path, 592214077Sgibbs "max-request-segments", NULL, "%" PRIu32, 593214077Sgibbs &sc->max_request_segments); 594214077Sgibbs 595214077Sgibbs (void)xs_scanf(XST_NIL, otherend_path, 596214077Sgibbs "max-request-size", NULL, "%" PRIu32, 597214077Sgibbs &sc->max_request_size); 598214077Sgibbs 599214077Sgibbs if (sc->ring_pages > XBF_MAX_RING_PAGES) { 600214077Sgibbs device_printf(sc->xb_dev, "Back-end specified ring-pages of " 601214077Sgibbs "%u limited to front-end limit of %zu.\n", 602214077Sgibbs sc->ring_pages, XBF_MAX_RING_PAGES); 603214077Sgibbs sc->ring_pages = XBF_MAX_RING_PAGES; 604181643Skmacy } 605181643Skmacy 606231851Sgibbs if (powerof2(sc->ring_pages) == 0) { 607231851Sgibbs uint32_t new_page_limit; 608231851Sgibbs 609231851Sgibbs new_page_limit = 0x01 << (fls(sc->ring_pages) - 1); 610231851Sgibbs device_printf(sc->xb_dev, "Back-end specified ring-pages of " 611231851Sgibbs "%u is not a power of 2. Limited to %u.\n", 612231851Sgibbs sc->ring_pages, new_page_limit); 613231851Sgibbs sc->ring_pages = new_page_limit; 614231851Sgibbs } 615231851Sgibbs 616214077Sgibbs if (sc->max_requests > XBF_MAX_REQUESTS) { 617214077Sgibbs device_printf(sc->xb_dev, "Back-end specified max_requests of " 618214077Sgibbs "%u limited to front-end limit of %u.\n", 619214077Sgibbs sc->max_requests, XBF_MAX_REQUESTS); 620214077Sgibbs sc->max_requests = XBF_MAX_REQUESTS; 621181643Skmacy } 622214077Sgibbs 623214077Sgibbs if (sc->max_request_segments > XBF_MAX_SEGMENTS_PER_REQUEST) { 624231851Sgibbs device_printf(sc->xb_dev, "Back-end specified " 625231851Sgibbs "max_request_segments of %u limited to " 626214077Sgibbs "front-end limit of %u.\n", 627214077Sgibbs sc->max_request_segments, 628214077Sgibbs XBF_MAX_SEGMENTS_PER_REQUEST); 629214077Sgibbs sc->max_request_segments = XBF_MAX_SEGMENTS_PER_REQUEST; 630181643Skmacy } 631214077Sgibbs 632214077Sgibbs if (sc->max_request_size > XBF_MAX_REQUEST_SIZE) { 633231851Sgibbs device_printf(sc->xb_dev, "Back-end specified " 634214077Sgibbs "max_request_size of %u limited to front-end " 635214077Sgibbs "limit of %u.\n", sc->max_request_size, 636214077Sgibbs XBF_MAX_REQUEST_SIZE); 637214077Sgibbs sc->max_request_size = XBF_MAX_REQUEST_SIZE; 638185605Skmacy } 639231851Sgibbs 640231851Sgibbs if (sc->max_request_size > XBF_SEGS_TO_SIZE(sc->max_request_segments)) { 641231851Sgibbs device_printf(sc->xb_dev, "Back-end specified " 642231851Sgibbs "max_request_size of %u limited to front-end " 643231851Sgibbs "limit of %u. (Too few segments.)\n", 644231851Sgibbs sc->max_request_size, 645231851Sgibbs XBF_SEGS_TO_SIZE(sc->max_request_segments)); 646231851Sgibbs sc->max_request_size = 647231851Sgibbs XBF_SEGS_TO_SIZE(sc->max_request_segments); 648231851Sgibbs } 649231851Sgibbs 650214077Sgibbs sc->max_request_blocks = BLKIF_SEGS_TO_BLOCKS(sc->max_request_segments); 651189699Sdfr 652214077Sgibbs /* Allocate datastructures based on negotiated values. */ 653233024Sscottl error = bus_dma_tag_create(bus_get_dma_tag(sc->xb_dev), /* parent */ 654214077Sgibbs 512, PAGE_SIZE, /* algnmnt, boundary */ 655214077Sgibbs BUS_SPACE_MAXADDR, /* lowaddr */ 656214077Sgibbs BUS_SPACE_MAXADDR, /* highaddr */ 657214077Sgibbs NULL, NULL, /* filter, filterarg */ 658214077Sgibbs sc->max_request_size, 659214077Sgibbs sc->max_request_segments, 660214077Sgibbs PAGE_SIZE, /* maxsegsize */ 661214077Sgibbs BUS_DMA_ALLOCNOW, /* flags */ 662214077Sgibbs busdma_lock_mutex, /* lockfunc */ 663214077Sgibbs &sc->xb_io_lock, /* lockarg */ 664214077Sgibbs &sc->xb_io_dmat); 665214077Sgibbs if (error != 0) { 666214077Sgibbs xenbus_dev_fatal(sc->xb_dev, error, 667214077Sgibbs "Cannot allocate parent DMA tag\n"); 668214077Sgibbs return; 669181643Skmacy } 670181643Skmacy 671214077Sgibbs /* Per-transaction data allocation. */ 672214077Sgibbs sc->shadow = malloc(sizeof(*sc->shadow) * sc->max_requests, 673214077Sgibbs M_XENBLOCKFRONT, M_NOWAIT|M_ZERO); 674214077Sgibbs if (sc->shadow == NULL) { 675225705Sgibbs bus_dma_tag_destroy(sc->xb_io_dmat); 676214077Sgibbs xenbus_dev_fatal(sc->xb_dev, error, 677214077Sgibbs "Cannot allocate request structures\n"); 678225705Sgibbs return; 679214077Sgibbs } 680214077Sgibbs 681214077Sgibbs for (i = 0; i < sc->max_requests; i++) { 682214077Sgibbs struct xb_command *cm; 683214077Sgibbs 684214077Sgibbs cm = &sc->shadow[i]; 685214077Sgibbs cm->sg_refs = malloc(sizeof(grant_ref_t) 686214077Sgibbs * sc->max_request_segments, 687214077Sgibbs M_XENBLOCKFRONT, M_NOWAIT); 688214077Sgibbs if (cm->sg_refs == NULL) 689214077Sgibbs break; 690214077Sgibbs cm->id = i; 691214077Sgibbs cm->cm_sc = sc; 692214077Sgibbs if (bus_dmamap_create(sc->xb_io_dmat, 0, &cm->map) != 0) 693214077Sgibbs break; 694214077Sgibbs xb_free_command(cm); 695214077Sgibbs } 696214077Sgibbs 697214077Sgibbs if (setup_blkring(sc) != 0) 698214077Sgibbs return; 699214077Sgibbs 700231851Sgibbs /* Support both backend schemes for relaying ring page limits. */ 701233626Sgibbs if (sc->ring_pages > 1) { 702233626Sgibbs error = xs_printf(XST_NIL, node_path, 703233626Sgibbs "num-ring-pages","%u", sc->ring_pages); 704233626Sgibbs if (error) { 705233626Sgibbs xenbus_dev_fatal(sc->xb_dev, error, 706233626Sgibbs "writing %s/num-ring-pages", 707233626Sgibbs node_path); 708233626Sgibbs return; 709233626Sgibbs } 710233626Sgibbs 711233626Sgibbs error = xs_printf(XST_NIL, node_path, 712233626Sgibbs "ring-page-order", "%u", 713233626Sgibbs fls(sc->ring_pages) - 1); 714233626Sgibbs if (error) { 715233626Sgibbs xenbus_dev_fatal(sc->xb_dev, error, 716233626Sgibbs "writing %s/ring-page-order", 717233626Sgibbs node_path); 718233626Sgibbs return; 719233626Sgibbs } 720214077Sgibbs } 721214077Sgibbs 722214077Sgibbs error = xs_printf(XST_NIL, node_path, 723214077Sgibbs "max-requests","%u", sc->max_requests); 724214077Sgibbs if (error) { 725214077Sgibbs xenbus_dev_fatal(sc->xb_dev, error, 726214077Sgibbs "writing %s/max-requests", 727214077Sgibbs node_path); 728214077Sgibbs return; 729214077Sgibbs } 730214077Sgibbs 731214077Sgibbs error = xs_printf(XST_NIL, node_path, 732214077Sgibbs "max-request-segments","%u", sc->max_request_segments); 733214077Sgibbs if (error) { 734214077Sgibbs xenbus_dev_fatal(sc->xb_dev, error, 735214077Sgibbs "writing %s/max-request-segments", 736214077Sgibbs node_path); 737214077Sgibbs return; 738214077Sgibbs } 739214077Sgibbs 740214077Sgibbs error = xs_printf(XST_NIL, node_path, 741214077Sgibbs "max-request-size","%u", sc->max_request_size); 742214077Sgibbs if (error) { 743214077Sgibbs xenbus_dev_fatal(sc->xb_dev, error, 744214077Sgibbs "writing %s/max-request-size", 745214077Sgibbs node_path); 746214077Sgibbs return; 747214077Sgibbs } 748214077Sgibbs 749214077Sgibbs error = xs_printf(XST_NIL, node_path, "event-channel", 750214077Sgibbs "%u", irq_to_evtchn_port(sc->irq)); 751214077Sgibbs if (error) { 752214077Sgibbs xenbus_dev_fatal(sc->xb_dev, error, 753214077Sgibbs "writing %s/event-channel", 754214077Sgibbs node_path); 755214077Sgibbs return; 756214077Sgibbs } 757214077Sgibbs 758214077Sgibbs error = xs_printf(XST_NIL, node_path, 759214077Sgibbs "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE); 760214077Sgibbs if (error) { 761214077Sgibbs xenbus_dev_fatal(sc->xb_dev, error, 762214077Sgibbs "writing %s/protocol", 763214077Sgibbs node_path); 764214077Sgibbs return; 765214077Sgibbs } 766214077Sgibbs 767214077Sgibbs xenbus_set_state(sc->xb_dev, XenbusStateInitialised); 768181643Skmacy} 769181643Skmacy 770181643Skmacystatic int 771199960Skmacysetup_blkring(struct xb_softc *sc) 772181643Skmacy{ 773181643Skmacy blkif_sring_t *sring; 774214077Sgibbs uintptr_t sring_page_addr; 775186557Skmacy int error; 776214077Sgibbs int i; 777181643Skmacy 778214077Sgibbs sring = malloc(sc->ring_pages * PAGE_SIZE, M_XENBLOCKFRONT, 779214077Sgibbs M_NOWAIT|M_ZERO); 780181643Skmacy if (sring == NULL) { 781199960Skmacy xenbus_dev_fatal(sc->xb_dev, ENOMEM, "allocating shared ring"); 782214077Sgibbs return (ENOMEM); 783181643Skmacy } 784181643Skmacy SHARED_RING_INIT(sring); 785214077Sgibbs FRONT_RING_INIT(&sc->ring, sring, sc->ring_pages * PAGE_SIZE); 786181643Skmacy 787214077Sgibbs for (i = 0, sring_page_addr = (uintptr_t)sring; 788214077Sgibbs i < sc->ring_pages; 789214077Sgibbs i++, sring_page_addr += PAGE_SIZE) { 790214077Sgibbs 791214077Sgibbs error = xenbus_grant_ring(sc->xb_dev, 792214077Sgibbs (vtomach(sring_page_addr) >> PAGE_SHIFT), &sc->ring_ref[i]); 793214077Sgibbs if (error) { 794214077Sgibbs xenbus_dev_fatal(sc->xb_dev, error, 795214077Sgibbs "granting ring_ref(%d)", i); 796214077Sgibbs return (error); 797214077Sgibbs } 798214077Sgibbs } 799231851Sgibbs if (sc->ring_pages == 1) { 800214077Sgibbs error = xs_printf(XST_NIL, xenbus_get_node(sc->xb_dev), 801231851Sgibbs "ring-ref", "%u", sc->ring_ref[0]); 802214077Sgibbs if (error) { 803231851Sgibbs xenbus_dev_fatal(sc->xb_dev, error, 804231851Sgibbs "writing %s/ring-ref", 805231851Sgibbs xenbus_get_node(sc->xb_dev)); 806214077Sgibbs return (error); 807214077Sgibbs } 808231851Sgibbs } else { 809231851Sgibbs for (i = 0; i < sc->ring_pages; i++) { 810231851Sgibbs char ring_ref_name[]= "ring_refXX"; 811231851Sgibbs 812231851Sgibbs snprintf(ring_ref_name, sizeof(ring_ref_name), 813231851Sgibbs "ring-ref%u", i); 814231851Sgibbs error = xs_printf(XST_NIL, xenbus_get_node(sc->xb_dev), 815231851Sgibbs ring_ref_name, "%u", sc->ring_ref[i]); 816231851Sgibbs if (error) { 817231851Sgibbs xenbus_dev_fatal(sc->xb_dev, error, 818231851Sgibbs "writing %s/%s", 819231851Sgibbs xenbus_get_node(sc->xb_dev), 820231851Sgibbs ring_ref_name); 821231851Sgibbs return (error); 822231851Sgibbs } 823231851Sgibbs } 824214077Sgibbs } 825214077Sgibbs 826214077Sgibbs error = bind_listening_port_to_irqhandler( 827214077Sgibbs xenbus_get_otherend_id(sc->xb_dev), 828199960Skmacy "xbd", (driver_intr_t *)blkif_int, sc, 829199960Skmacy INTR_TYPE_BIO | INTR_MPSAFE, &sc->irq); 830186557Skmacy if (error) { 831199960Skmacy xenbus_dev_fatal(sc->xb_dev, error, 832189699Sdfr "bind_evtchn_to_irqhandler failed"); 833214077Sgibbs return (error); 834181643Skmacy } 835181643Skmacy 836186557Skmacy return (0); 837181643Skmacy} 838181643Skmacy 839181643Skmacy/** 840181643Skmacy * Callback received when the backend's state changes. 841181643Skmacy */ 842222975Sgibbsstatic void 843185605Skmacyblkfront_backend_changed(device_t dev, XenbusState backend_state) 844181643Skmacy{ 845199960Skmacy struct xb_softc *sc = device_get_softc(dev); 846181643Skmacy 847189699Sdfr DPRINTK("backend_state=%d\n", backend_state); 848181643Skmacy 849181643Skmacy switch (backend_state) { 850181643Skmacy case XenbusStateUnknown: 851181643Skmacy case XenbusStateInitialising: 852183375Skmacy case XenbusStateReconfigured: 853183375Skmacy case XenbusStateReconfiguring: 854214077Sgibbs case XenbusStateClosed: 855181643Skmacy break; 856181643Skmacy 857214077Sgibbs case XenbusStateInitWait: 858225705Sgibbs case XenbusStateInitialised: 859214077Sgibbs blkfront_initialize(sc); 860214077Sgibbs break; 861214077Sgibbs 862181643Skmacy case XenbusStateConnected: 863214077Sgibbs blkfront_initialize(sc); 864214077Sgibbs blkfront_connect(sc); 865181643Skmacy break; 866181643Skmacy 867181643Skmacy case XenbusStateClosing: 868199960Skmacy if (sc->users > 0) 869181643Skmacy xenbus_dev_error(dev, -EBUSY, 870181643Skmacy "Device in use; refusing to close"); 871181643Skmacy else 872181643Skmacy blkfront_closing(dev); 873214077Sgibbs break; 874181643Skmacy } 875181643Skmacy} 876181643Skmacy 877181643Skmacy/* 878225705Sgibbs** Invoked when the backend is finally 'ready' (and has published 879181643Skmacy** the details about the physical device - #sectors, size, etc). 880181643Skmacy*/ 881181643Skmacystatic void 882214077Sgibbsblkfront_connect(struct xb_softc *sc) 883181643Skmacy{ 884199960Skmacy device_t dev = sc->xb_dev; 885181643Skmacy unsigned long sectors, sector_size; 886181643Skmacy unsigned int binfo; 887199960Skmacy int err, feature_barrier; 888181643Skmacy 889231851Sgibbs if( (sc->connected == BLKIF_STATE_CONNECTED) || 890199960Skmacy (sc->connected == BLKIF_STATE_SUSPENDED) ) 891181643Skmacy return; 892181643Skmacy 893185605Skmacy DPRINTK("blkfront.c:connect:%s.\n", xenbus_get_otherend_path(dev)); 894181643Skmacy 895214077Sgibbs err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev), 896214077Sgibbs "sectors", "%lu", §ors, 897214077Sgibbs "info", "%u", &binfo, 898214077Sgibbs "sector-size", "%lu", §or_size, 899214077Sgibbs NULL); 900181643Skmacy if (err) { 901185605Skmacy xenbus_dev_fatal(dev, err, 902185605Skmacy "reading backend fields at %s", 903185605Skmacy xenbus_get_otherend_path(dev)); 904181643Skmacy return; 905181643Skmacy } 906214077Sgibbs err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev), 907214077Sgibbs "feature-barrier", "%lu", &feature_barrier, 908214077Sgibbs NULL); 909199960Skmacy if (!err || feature_barrier) 910199960Skmacy sc->xb_flags |= XB_BARRIER; 911181643Skmacy 912225705Sgibbs if (sc->xb_disk == NULL) { 913225705Sgibbs device_printf(dev, "%juMB <%s> at %s", 914225705Sgibbs (uintmax_t) sectors / (1048576 / sector_size), 915225705Sgibbs device_get_desc(dev), 916225705Sgibbs xenbus_get_node(dev)); 917225705Sgibbs bus_print_child_footer(device_get_parent(dev), dev); 918181643Skmacy 919225705Sgibbs xlvbd_add(sc, sectors, sc->vdevice, binfo, sector_size); 920225705Sgibbs } 921181643Skmacy 922185605Skmacy (void)xenbus_set_state(dev, XenbusStateConnected); 923185605Skmacy 924181643Skmacy /* Kick pending requests. */ 925199960Skmacy mtx_lock(&sc->xb_io_lock); 926199960Skmacy sc->connected = BLKIF_STATE_CONNECTED; 927199960Skmacy xb_startio(sc); 928199960Skmacy sc->xb_flags |= XB_READY; 929199960Skmacy mtx_unlock(&sc->xb_io_lock); 930181643Skmacy} 931181643Skmacy 932181643Skmacy/** 933181643Skmacy * Handle the change of state of the backend to Closing. We must delete our 934181643Skmacy * device-layer structures now, to ensure that writes are flushed through to 935201138Sgibbs * the backend. Once this is done, we can switch to Closed in 936181643Skmacy * acknowledgement. 937181643Skmacy */ 938185605Skmacystatic void 939185605Skmacyblkfront_closing(device_t dev) 940181643Skmacy{ 941199960Skmacy struct xb_softc *sc = device_get_softc(dev); 942181643Skmacy 943214077Sgibbs xenbus_set_state(dev, XenbusStateClosing); 944214077Sgibbs 945185605Skmacy DPRINTK("blkfront_closing: %s removed\n", xenbus_get_node(dev)); 946181643Skmacy 947214077Sgibbs if (sc->xb_disk != NULL) { 948214077Sgibbs disk_destroy(sc->xb_disk); 949214077Sgibbs sc->xb_disk = NULL; 950181643Skmacy } 951181643Skmacy 952214077Sgibbs xenbus_set_state(dev, XenbusStateClosed); 953181643Skmacy} 954181643Skmacy 955181643Skmacy 956185605Skmacystatic int 957185605Skmacyblkfront_detach(device_t dev) 958181643Skmacy{ 959199960Skmacy struct xb_softc *sc = device_get_softc(dev); 960181643Skmacy 961185605Skmacy DPRINTK("blkfront_remove: %s removed\n", xenbus_get_node(dev)); 962181643Skmacy 963225705Sgibbs blkif_free(sc); 964199960Skmacy mtx_destroy(&sc->xb_io_lock); 965181643Skmacy 966181643Skmacy return 0; 967181643Skmacy} 968181643Skmacy 969181643Skmacy 970181643Skmacystatic inline void 971199960Skmacyflush_requests(struct xb_softc *sc) 972181643Skmacy{ 973181643Skmacy int notify; 974181643Skmacy 975199960Skmacy RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->ring, notify); 976181643Skmacy 977181643Skmacy if (notify) 978199960Skmacy notify_remote_via_irq(sc->irq); 979181643Skmacy} 980181643Skmacy 981214077Sgibbsstatic void 982214077Sgibbsblkif_restart_queue_callback(void *arg) 983181643Skmacy{ 984199960Skmacy struct xb_softc *sc = arg; 985181643Skmacy 986214077Sgibbs mtx_lock(&sc->xb_io_lock); 987214077Sgibbs 988199960Skmacy xb_startio(sc); 989214077Sgibbs 990214077Sgibbs mtx_unlock(&sc->xb_io_lock); 991181643Skmacy} 992181643Skmacy 993181643Skmacystatic int 994181643Skmacyblkif_open(struct disk *dp) 995181643Skmacy{ 996181643Skmacy struct xb_softc *sc = (struct xb_softc *)dp->d_drv1; 997181643Skmacy 998181643Skmacy if (sc == NULL) { 999189699Sdfr printf("xb%d: not found", sc->xb_unit); 1000181643Skmacy return (ENXIO); 1001181643Skmacy } 1002181643Skmacy 1003181643Skmacy sc->xb_flags |= XB_OPEN; 1004199960Skmacy sc->users++; 1005181643Skmacy return (0); 1006181643Skmacy} 1007181643Skmacy 1008181643Skmacystatic int 1009181643Skmacyblkif_close(struct disk *dp) 1010181643Skmacy{ 1011181643Skmacy struct xb_softc *sc = (struct xb_softc *)dp->d_drv1; 1012181643Skmacy 1013181643Skmacy if (sc == NULL) 1014181643Skmacy return (ENXIO); 1015181643Skmacy sc->xb_flags &= ~XB_OPEN; 1016199960Skmacy if (--(sc->users) == 0) { 1017231851Sgibbs /* 1018231851Sgibbs * Check whether we have been instructed to close. We will 1019231851Sgibbs * have ignored this request initially, as the device was 1020231851Sgibbs * still mounted. 1021231851Sgibbs */ 1022231851Sgibbs if (xenbus_get_otherend_state(sc->xb_dev) == XenbusStateClosing) 1023231851Sgibbs blkfront_closing(sc->xb_dev); 1024181643Skmacy } 1025181643Skmacy return (0); 1026181643Skmacy} 1027181643Skmacy 1028181643Skmacystatic int 1029181643Skmacyblkif_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td) 1030181643Skmacy{ 1031181643Skmacy struct xb_softc *sc = (struct xb_softc *)dp->d_drv1; 1032181643Skmacy 1033181643Skmacy if (sc == NULL) 1034181643Skmacy return (ENXIO); 1035181643Skmacy 1036181643Skmacy return (ENOTTY); 1037181643Skmacy} 1038181643Skmacy 1039199960Skmacystatic void 1040199960Skmacyxb_free_command(struct xb_command *cm) 1041199960Skmacy{ 1042181643Skmacy 1043199960Skmacy KASSERT((cm->cm_flags & XB_ON_XBQ_MASK) == 0, 1044199960Skmacy ("Freeing command that is still on a queue\n")); 1045199960Skmacy 1046199960Skmacy cm->cm_flags = 0; 1047199960Skmacy cm->bp = NULL; 1048199960Skmacy cm->cm_complete = NULL; 1049199960Skmacy xb_enqueue_free(cm); 1050199960Skmacy} 1051199960Skmacy 1052181643Skmacy/* 1053181643Skmacy * blkif_queue_request 1054181643Skmacy * 1055181643Skmacy * request block io 1056181643Skmacy * 1057181643Skmacy * id: for guest use only. 1058181643Skmacy * operation: BLKIF_OP_{READ,WRITE,PROBE} 1059181643Skmacy * buffer: buffer to read/write into. this should be a 1060181643Skmacy * virtual address in the guest os. 1061181643Skmacy */ 1062199960Skmacystatic struct xb_command * 1063199960Skmacyxb_bio_command(struct xb_softc *sc) 1064181643Skmacy{ 1065199960Skmacy struct xb_command *cm; 1066199960Skmacy struct bio *bp; 1067181643Skmacy 1068199960Skmacy if (unlikely(sc->connected != BLKIF_STATE_CONNECTED)) 1069199960Skmacy return (NULL); 1070181643Skmacy 1071199960Skmacy bp = xb_dequeue_bio(sc); 1072199960Skmacy if (bp == NULL) 1073199960Skmacy return (NULL); 1074199960Skmacy 1075199960Skmacy if ((cm = xb_dequeue_free(sc)) == NULL) { 1076199960Skmacy xb_requeue_bio(sc, bp); 1077199960Skmacy return (NULL); 1078199960Skmacy } 1079199960Skmacy 1080214077Sgibbs if (gnttab_alloc_grant_references(sc->max_request_segments, 1081214077Sgibbs &cm->gref_head) != 0) { 1082199960Skmacy gnttab_request_free_callback(&sc->callback, 1083199960Skmacy blkif_restart_queue_callback, sc, 1084214077Sgibbs sc->max_request_segments); 1085199960Skmacy xb_requeue_bio(sc, bp); 1086199960Skmacy xb_enqueue_free(cm); 1087199960Skmacy sc->xb_flags |= XB_FROZEN; 1088199960Skmacy return (NULL); 1089181643Skmacy } 1090181643Skmacy 1091199960Skmacy cm->bp = bp; 1092199960Skmacy cm->data = bp->bio_data; 1093199960Skmacy cm->datalen = bp->bio_bcount; 1094199960Skmacy cm->operation = (bp->bio_cmd == BIO_READ) ? BLKIF_OP_READ : 1095199960Skmacy BLKIF_OP_WRITE; 1096199960Skmacy cm->sector_number = (blkif_sector_t)bp->bio_pblkno; 1097181643Skmacy 1098199960Skmacy return (cm); 1099199960Skmacy} 1100181643Skmacy 1101199960Skmacystatic int 1102199960Skmacyblkif_queue_request(struct xb_softc *sc, struct xb_command *cm) 1103199960Skmacy{ 1104199960Skmacy int error; 1105181643Skmacy 1106199960Skmacy error = bus_dmamap_load(sc->xb_io_dmat, cm->map, cm->data, cm->datalen, 1107199960Skmacy blkif_queue_cb, cm, 0); 1108199960Skmacy if (error == EINPROGRESS) { 1109199960Skmacy printf("EINPROGRESS\n"); 1110199960Skmacy sc->xb_flags |= XB_FROZEN; 1111199960Skmacy cm->cm_flags |= XB_CMD_FROZEN; 1112199960Skmacy return (0); 1113199960Skmacy } 1114199960Skmacy 1115199960Skmacy return (error); 1116199960Skmacy} 1117199960Skmacy 1118199960Skmacystatic void 1119199960Skmacyblkif_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1120199960Skmacy{ 1121199960Skmacy struct xb_softc *sc; 1122199960Skmacy struct xb_command *cm; 1123199960Skmacy blkif_request_t *ring_req; 1124214077Sgibbs struct blkif_request_segment *sg; 1125231851Sgibbs struct blkif_request_segment *last_block_sg; 1126214077Sgibbs grant_ref_t *sg_ref; 1127199960Skmacy vm_paddr_t buffer_ma; 1128199960Skmacy uint64_t fsect, lsect; 1129214077Sgibbs int ref; 1130214077Sgibbs int op; 1131214077Sgibbs int block_segs; 1132199960Skmacy 1133199960Skmacy cm = arg; 1134199960Skmacy sc = cm->cm_sc; 1135199960Skmacy 1136214077Sgibbs//printf("%s: Start\n", __func__); 1137199960Skmacy if (error) { 1138199960Skmacy printf("error %d in blkif_queue_cb\n", error); 1139199960Skmacy cm->bp->bio_error = EIO; 1140199960Skmacy biodone(cm->bp); 1141199960Skmacy xb_free_command(cm); 1142199960Skmacy return; 1143199960Skmacy } 1144199960Skmacy 1145181643Skmacy /* Fill out a communications ring structure. */ 1146199960Skmacy ring_req = RING_GET_REQUEST(&sc->ring, sc->ring.req_prod_pvt); 1147214077Sgibbs sc->ring.req_prod_pvt++; 1148214077Sgibbs ring_req->id = cm->id; 1149199960Skmacy ring_req->operation = cm->operation; 1150199960Skmacy ring_req->sector_number = cm->sector_number; 1151199960Skmacy ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xb_disk; 1152199960Skmacy ring_req->nr_segments = nsegs; 1153214077Sgibbs cm->nseg = nsegs; 1154181643Skmacy 1155214077Sgibbs block_segs = MIN(nsegs, BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK); 1156214077Sgibbs sg = ring_req->seg; 1157214077Sgibbs last_block_sg = sg + block_segs; 1158214077Sgibbs sg_ref = cm->sg_refs; 1159181643Skmacy 1160214077Sgibbs while (1) { 1161181643Skmacy 1162214077Sgibbs while (sg < last_block_sg) { 1163214077Sgibbs buffer_ma = segs->ds_addr; 1164214077Sgibbs fsect = (buffer_ma & PAGE_MASK) >> XBD_SECTOR_SHFT; 1165214077Sgibbs lsect = fsect + (segs->ds_len >> XBD_SECTOR_SHFT) - 1; 1166181643Skmacy 1167214077Sgibbs KASSERT(lsect <= 7, ("XEN disk driver data cannot " 1168214077Sgibbs "cross a page boundary")); 1169181643Skmacy 1170214077Sgibbs /* install a grant reference. */ 1171214077Sgibbs ref = gnttab_claim_grant_reference(&cm->gref_head); 1172214077Sgibbs 1173214077Sgibbs /* 1174214077Sgibbs * GNTTAB_LIST_END == 0xffffffff, but it is private 1175214077Sgibbs * to gnttab.c. 1176214077Sgibbs */ 1177214077Sgibbs KASSERT(ref != ~0, ("grant_reference failed")); 1178214077Sgibbs 1179214077Sgibbs gnttab_grant_foreign_access_ref( 1180214077Sgibbs ref, 1181214077Sgibbs xenbus_get_otherend_id(sc->xb_dev), 1182214077Sgibbs buffer_ma >> PAGE_SHIFT, 1183214077Sgibbs ring_req->operation == BLKIF_OP_WRITE); 1184214077Sgibbs 1185214077Sgibbs *sg_ref = ref; 1186214077Sgibbs *sg = (struct blkif_request_segment) { 1187199960Skmacy .gref = ref, 1188199960Skmacy .first_sect = fsect, 1189199960Skmacy .last_sect = lsect }; 1190214077Sgibbs sg++; 1191214077Sgibbs sg_ref++; 1192214077Sgibbs segs++; 1193214077Sgibbs nsegs--; 1194214077Sgibbs } 1195214077Sgibbs block_segs = MIN(nsegs, BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK); 1196231851Sgibbs if (block_segs == 0) 1197231851Sgibbs break; 1198214077Sgibbs 1199231851Sgibbs sg = BLKRING_GET_SEG_BLOCK(&sc->ring, sc->ring.req_prod_pvt); 1200214077Sgibbs sc->ring.req_prod_pvt++; 1201231851Sgibbs last_block_sg = sg + block_segs; 1202199960Skmacy } 1203181643Skmacy 1204199960Skmacy if (cm->operation == BLKIF_OP_READ) 1205199960Skmacy op = BUS_DMASYNC_PREREAD; 1206199960Skmacy else if (cm->operation == BLKIF_OP_WRITE) 1207199960Skmacy op = BUS_DMASYNC_PREWRITE; 1208199960Skmacy else 1209199960Skmacy op = 0; 1210199960Skmacy bus_dmamap_sync(sc->xb_io_dmat, cm->map, op); 1211199960Skmacy 1212214077Sgibbs gnttab_free_grant_references(cm->gref_head); 1213199960Skmacy 1214199960Skmacy xb_enqueue_busy(cm); 1215181643Skmacy 1216199960Skmacy /* 1217199960Skmacy * This flag means that we're probably executing in the busdma swi 1218199960Skmacy * instead of in the startio context, so an explicit flush is needed. 1219199960Skmacy */ 1220199960Skmacy if (cm->cm_flags & XB_CMD_FROZEN) 1221199960Skmacy flush_requests(sc); 1222181643Skmacy 1223214077Sgibbs//printf("%s: Done\n", __func__); 1224199960Skmacy return; 1225199960Skmacy} 1226181643Skmacy 1227181643Skmacy/* 1228181643Skmacy * Dequeue buffers and place them in the shared communication ring. 1229181643Skmacy * Return when no more requests can be accepted or all buffers have 1230181643Skmacy * been queued. 1231181643Skmacy * 1232181643Skmacy * Signal XEN once the ring has been filled out. 1233181643Skmacy */ 1234181643Skmacystatic void 1235181643Skmacyxb_startio(struct xb_softc *sc) 1236181643Skmacy{ 1237199960Skmacy struct xb_command *cm; 1238199960Skmacy int error, queued = 0; 1239181643Skmacy 1240199960Skmacy mtx_assert(&sc->xb_io_lock, MA_OWNED); 1241181643Skmacy 1242225705Sgibbs if (sc->connected != BLKIF_STATE_CONNECTED) 1243225705Sgibbs return; 1244225705Sgibbs 1245214077Sgibbs while (RING_FREE_REQUESTS(&sc->ring) >= sc->max_request_blocks) { 1246199960Skmacy if (sc->xb_flags & XB_FROZEN) 1247199960Skmacy break; 1248181643Skmacy 1249199960Skmacy cm = xb_dequeue_ready(sc); 1250199960Skmacy 1251199960Skmacy if (cm == NULL) 1252199960Skmacy cm = xb_bio_command(sc); 1253199960Skmacy 1254199960Skmacy if (cm == NULL) 1255181643Skmacy break; 1256199960Skmacy 1257199960Skmacy if ((error = blkif_queue_request(sc, cm)) != 0) { 1258199960Skmacy printf("blkif_queue_request returned %d\n", error); 1259199960Skmacy break; 1260181643Skmacy } 1261181643Skmacy queued++; 1262181643Skmacy } 1263181643Skmacy 1264181643Skmacy if (queued != 0) 1265199960Skmacy flush_requests(sc); 1266181643Skmacy} 1267181643Skmacy 1268181643Skmacystatic void 1269181643Skmacyblkif_int(void *xsc) 1270181643Skmacy{ 1271199960Skmacy struct xb_softc *sc = xsc; 1272199960Skmacy struct xb_command *cm; 1273181643Skmacy blkif_response_t *bret; 1274181643Skmacy RING_IDX i, rp; 1275199960Skmacy int op; 1276181643Skmacy 1277199960Skmacy mtx_lock(&sc->xb_io_lock); 1278181643Skmacy 1279225705Sgibbs if (unlikely(sc->connected == BLKIF_STATE_DISCONNECTED)) { 1280199960Skmacy mtx_unlock(&sc->xb_io_lock); 1281181643Skmacy return; 1282181643Skmacy } 1283181643Skmacy 1284181643Skmacy again: 1285199960Skmacy rp = sc->ring.sring->rsp_prod; 1286181643Skmacy rmb(); /* Ensure we see queued responses up to 'rp'. */ 1287181643Skmacy 1288214077Sgibbs for (i = sc->ring.rsp_cons; i != rp;) { 1289199960Skmacy bret = RING_GET_RESPONSE(&sc->ring, i); 1290199960Skmacy cm = &sc->shadow[bret->id]; 1291181643Skmacy 1292199960Skmacy xb_remove_busy(cm); 1293214077Sgibbs i += blkif_completion(cm); 1294181643Skmacy 1295199960Skmacy if (cm->operation == BLKIF_OP_READ) 1296199960Skmacy op = BUS_DMASYNC_POSTREAD; 1297199960Skmacy else if (cm->operation == BLKIF_OP_WRITE) 1298199960Skmacy op = BUS_DMASYNC_POSTWRITE; 1299199960Skmacy else 1300199960Skmacy op = 0; 1301199960Skmacy bus_dmamap_sync(sc->xb_io_dmat, cm->map, op); 1302199960Skmacy bus_dmamap_unload(sc->xb_io_dmat, cm->map); 1303181643Skmacy 1304199960Skmacy /* 1305199960Skmacy * If commands are completing then resources are probably 1306199960Skmacy * being freed as well. It's a cheap assumption even when 1307199960Skmacy * wrong. 1308199960Skmacy */ 1309199960Skmacy sc->xb_flags &= ~XB_FROZEN; 1310181643Skmacy 1311199960Skmacy /* 1312199960Skmacy * Directly call the i/o complete routine to save an 1313199960Skmacy * an indirection in the common case. 1314199960Skmacy */ 1315199960Skmacy cm->status = bret->status; 1316199960Skmacy if (cm->bp) 1317199960Skmacy xb_bio_complete(sc, cm); 1318199960Skmacy else if (cm->cm_complete) 1319199960Skmacy (cm->cm_complete)(cm); 1320199960Skmacy else 1321199960Skmacy xb_free_command(cm); 1322181643Skmacy } 1323181643Skmacy 1324199960Skmacy sc->ring.rsp_cons = i; 1325181643Skmacy 1326199960Skmacy if (i != sc->ring.req_prod_pvt) { 1327181643Skmacy int more_to_do; 1328199960Skmacy RING_FINAL_CHECK_FOR_RESPONSES(&sc->ring, more_to_do); 1329181643Skmacy if (more_to_do) 1330181643Skmacy goto again; 1331181643Skmacy } else { 1332199960Skmacy sc->ring.sring->rsp_event = i + 1; 1333181643Skmacy } 1334181643Skmacy 1335199960Skmacy xb_startio(sc); 1336181643Skmacy 1337225705Sgibbs if (unlikely(sc->connected == BLKIF_STATE_SUSPENDED)) 1338225705Sgibbs wakeup(&sc->cm_busy); 1339225705Sgibbs 1340199960Skmacy mtx_unlock(&sc->xb_io_lock); 1341181643Skmacy} 1342181643Skmacy 1343181643Skmacystatic void 1344225705Sgibbsblkif_free(struct xb_softc *sc) 1345181643Skmacy{ 1346214077Sgibbs uint8_t *sring_page_ptr; 1347214077Sgibbs int i; 1348181643Skmacy 1349214077Sgibbs /* Prevent new requests being issued until we fix things up. */ 1350199960Skmacy mtx_lock(&sc->xb_io_lock); 1351225705Sgibbs sc->connected = BLKIF_STATE_DISCONNECTED; 1352199960Skmacy mtx_unlock(&sc->xb_io_lock); 1353181643Skmacy 1354181643Skmacy /* Free resources associated with old device channel. */ 1355214077Sgibbs if (sc->ring.sring != NULL) { 1356214077Sgibbs sring_page_ptr = (uint8_t *)sc->ring.sring; 1357214077Sgibbs for (i = 0; i < sc->ring_pages; i++) { 1358214077Sgibbs if (sc->ring_ref[i] != GRANT_INVALID_REF) { 1359214077Sgibbs gnttab_end_foreign_access_ref(sc->ring_ref[i]); 1360214077Sgibbs sc->ring_ref[i] = GRANT_INVALID_REF; 1361214077Sgibbs } 1362214077Sgibbs sring_page_ptr += PAGE_SIZE; 1363214077Sgibbs } 1364214077Sgibbs free(sc->ring.sring, M_XENBLOCKFRONT); 1365199960Skmacy sc->ring.sring = NULL; 1366181643Skmacy } 1367214077Sgibbs 1368214077Sgibbs if (sc->shadow) { 1369214077Sgibbs 1370214077Sgibbs for (i = 0; i < sc->max_requests; i++) { 1371214077Sgibbs struct xb_command *cm; 1372214077Sgibbs 1373214077Sgibbs cm = &sc->shadow[i]; 1374214077Sgibbs if (cm->sg_refs != NULL) { 1375214077Sgibbs free(cm->sg_refs, M_XENBLOCKFRONT); 1376214077Sgibbs cm->sg_refs = NULL; 1377214077Sgibbs } 1378214077Sgibbs 1379214077Sgibbs bus_dmamap_destroy(sc->xb_io_dmat, cm->map); 1380214077Sgibbs } 1381214077Sgibbs free(sc->shadow, M_XENBLOCKFRONT); 1382214077Sgibbs sc->shadow = NULL; 1383225705Sgibbs 1384225705Sgibbs bus_dma_tag_destroy(sc->xb_io_dmat); 1385225705Sgibbs 1386225705Sgibbs xb_initq_free(sc); 1387225705Sgibbs xb_initq_ready(sc); 1388225705Sgibbs xb_initq_complete(sc); 1389214077Sgibbs } 1390214077Sgibbs 1391214077Sgibbs if (sc->irq) { 1392199960Skmacy unbind_from_irqhandler(sc->irq); 1393214077Sgibbs sc->irq = 0; 1394214077Sgibbs } 1395181643Skmacy} 1396181643Skmacy 1397214077Sgibbsstatic int 1398199960Skmacyblkif_completion(struct xb_command *s) 1399181643Skmacy{ 1400214077Sgibbs//printf("%s: Req %p(%d)\n", __func__, s, s->nseg); 1401214077Sgibbs gnttab_end_foreign_access_references(s->nseg, s->sg_refs); 1402214077Sgibbs return (BLKIF_SEGS_TO_BLOCKS(s->nseg)); 1403181643Skmacy} 1404181643Skmacy 1405185605Skmacy/* ** Driver registration ** */ 1406185605Skmacystatic device_method_t blkfront_methods[] = { 1407185605Skmacy /* Device interface */ 1408185605Skmacy DEVMETHOD(device_probe, blkfront_probe), 1409185605Skmacy DEVMETHOD(device_attach, blkfront_attach), 1410185605Skmacy DEVMETHOD(device_detach, blkfront_detach), 1411185605Skmacy DEVMETHOD(device_shutdown, bus_generic_shutdown), 1412189699Sdfr DEVMETHOD(device_suspend, blkfront_suspend), 1413185605Skmacy DEVMETHOD(device_resume, blkfront_resume), 1414185605Skmacy 1415185605Skmacy /* Xenbus interface */ 1416214077Sgibbs DEVMETHOD(xenbus_otherend_changed, blkfront_backend_changed), 1417181643Skmacy 1418185605Skmacy { 0, 0 } 1419185605Skmacy}; 1420181643Skmacy 1421185605Skmacystatic driver_t blkfront_driver = { 1422185605Skmacy "xbd", 1423185605Skmacy blkfront_methods, 1424199960Skmacy sizeof(struct xb_softc), 1425185605Skmacy}; 1426185605Skmacydevclass_t blkfront_devclass; 1427185605Skmacy 1428214077SgibbsDRIVER_MODULE(xbd, xenbusb_front, blkfront_driver, blkfront_devclass, 0, 0); 1429