1/* 2 * blkfront.c 3 * 4 * XenLinux virtual block device driver. 5 * 6 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand 7 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge 8 * Copyright (c) 2004, Christian Limpach 9 * Copyright (c) 2004, Andrew Warfield 10 * Copyright (c) 2005, Christopher Clark 11 * Copyright (c) 2005, XenSource Ltd 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License version 2 15 * as published by the Free Software Foundation; or, when distributed 16 * separately from the Linux kernel or incorporated into other 17 * software packages, subject to the following license: 18 * 19 * Permission is hereby granted, free of charge, to any person obtaining a copy 20 * of this source file (the "Software"), to deal in the Software without 21 * restriction, including without limitation the rights to use, copy, modify, 22 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 23 * and to permit persons to whom the Software is furnished to do so, subject to 24 * the following conditions: 25 * 26 * The above copyright notice and this permission notice shall be included in 27 * all copies or substantial portions of the Software. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 35 * IN THE SOFTWARE. 36 */ 37 38#include <linux/interrupt.h> 39#include <linux/blkdev.h> 40#include <linux/hdreg.h> 41#include <linux/cdrom.h> 42#include <linux/module.h> 43#include <linux/slab.h> 44#include <linux/smp_lock.h> 45#include <linux/scatterlist.h> 46 47#include <xen/xen.h> 48#include <xen/xenbus.h> 49#include <xen/grant_table.h> 50#include <xen/events.h> 51#include <xen/page.h> 52#include <xen/platform_pci.h> 53 54#include <xen/interface/grant_table.h> 55#include <xen/interface/io/blkif.h> 56#include <xen/interface/io/protocols.h> 57 58#include <asm/xen/hypervisor.h> 59 60enum blkif_state { 61 BLKIF_STATE_DISCONNECTED, 62 BLKIF_STATE_CONNECTED, 63 BLKIF_STATE_SUSPENDED, 64}; 65 66struct blk_shadow { 67 struct blkif_request req; 68 unsigned long request; 69 unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 70}; 71 72static const struct block_device_operations xlvbd_block_fops; 73 74#define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE) 75 76/* 77 * We have one of these per vbd, whether ide, scsi or 'other'. They 78 * hang in private_data off the gendisk structure. We may end up 79 * putting all kinds of interesting stuff here :-) 80 */ 81struct blkfront_info 82{ 83 struct mutex mutex; 84 struct xenbus_device *xbdev; 85 struct gendisk *gd; 86 int vdevice; 87 blkif_vdev_t handle; 88 enum blkif_state connected; 89 int ring_ref; 90 struct blkif_front_ring ring; 91 struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 92 unsigned int evtchn, irq; 93 struct request_queue *rq; 94 struct work_struct work; 95 struct gnttab_free_callback callback; 96 struct blk_shadow shadow[BLK_RING_SIZE]; 97 unsigned long shadow_free; 98 int feature_barrier; 99 int is_ready; 100}; 101 102static DEFINE_SPINLOCK(blkif_io_lock); 103 104static unsigned int nr_minors; 105static unsigned long *minors; 106static DEFINE_SPINLOCK(minor_lock); 107 108#define MAXIMUM_OUTSTANDING_BLOCK_REQS \ 109 (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) 110#define GRANT_INVALID_REF 0 111 112#define PARTS_PER_DISK 16 113#define PARTS_PER_EXT_DISK 256 114 115#define BLKIF_MAJOR(dev) ((dev)>>8) 116#define BLKIF_MINOR(dev) ((dev) & 0xff) 117 118#define EXT_SHIFT 28 119#define EXTENDED (1<<EXT_SHIFT) 120#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED)) 121#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED)) 122 123#define DEV_NAME "xvd" /* name in /dev */ 124 125static int get_id_from_freelist(struct blkfront_info *info) 126{ 127 unsigned long free = info->shadow_free; 128 BUG_ON(free >= BLK_RING_SIZE); 129 info->shadow_free = info->shadow[free].req.id; 130 info->shadow[free].req.id = 0x0fffffee; /* debug */ 131 return free; 132} 133 134static void add_id_to_freelist(struct blkfront_info *info, 135 unsigned long id) 136{ 137 info->shadow[id].req.id = info->shadow_free; 138 info->shadow[id].request = 0; 139 info->shadow_free = id; 140} 141 142static int xlbd_reserve_minors(unsigned int minor, unsigned int nr) 143{ 144 unsigned int end = minor + nr; 145 int rc; 146 147 if (end > nr_minors) { 148 unsigned long *bitmap, *old; 149 150 bitmap = kzalloc(BITS_TO_LONGS(end) * sizeof(*bitmap), 151 GFP_KERNEL); 152 if (bitmap == NULL) 153 return -ENOMEM; 154 155 spin_lock(&minor_lock); 156 if (end > nr_minors) { 157 old = minors; 158 memcpy(bitmap, minors, 159 BITS_TO_LONGS(nr_minors) * sizeof(*bitmap)); 160 minors = bitmap; 161 nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG; 162 } else 163 old = bitmap; 164 spin_unlock(&minor_lock); 165 kfree(old); 166 } 167 168 spin_lock(&minor_lock); 169 if (find_next_bit(minors, end, minor) >= end) { 170 for (; minor < end; ++minor) 171 __set_bit(minor, minors); 172 rc = 0; 173 } else 174 rc = -EBUSY; 175 spin_unlock(&minor_lock); 176 177 return rc; 178} 179 180static void xlbd_release_minors(unsigned int minor, unsigned int nr) 181{ 182 unsigned int end = minor + nr; 183 184 BUG_ON(end > nr_minors); 185 spin_lock(&minor_lock); 186 for (; minor < end; ++minor) 187 __clear_bit(minor, minors); 188 spin_unlock(&minor_lock); 189} 190 191static void blkif_restart_queue_callback(void *arg) 192{ 193 struct blkfront_info *info = (struct blkfront_info *)arg; 194 schedule_work(&info->work); 195} 196 197static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) 198{ 199 /* We don't have real geometry info, but let's at least return 200 values consistent with the size of the device */ 201 sector_t nsect = get_capacity(bd->bd_disk); 202 sector_t cylinders = nsect; 203 204 hg->heads = 0xff; 205 hg->sectors = 0x3f; 206 sector_div(cylinders, hg->heads * hg->sectors); 207 hg->cylinders = cylinders; 208 if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) 209 hg->cylinders = 0xffff; 210 return 0; 211} 212 213static int blkif_ioctl(struct block_device *bdev, fmode_t mode, 214 unsigned command, unsigned long argument) 215{ 216 struct blkfront_info *info = bdev->bd_disk->private_data; 217 int i; 218 219 dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n", 220 command, (long)argument); 221 222 switch (command) { 223 case CDROMMULTISESSION: 224 dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n"); 225 for (i = 0; i < sizeof(struct cdrom_multisession); i++) 226 if (put_user(0, (char __user *)(argument + i))) 227 return -EFAULT; 228 return 0; 229 230 case CDROM_GET_CAPABILITY: { 231 struct gendisk *gd = info->gd; 232 if (gd->flags & GENHD_FL_CD) 233 return 0; 234 return -EINVAL; 235 } 236 237 default: 238 /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", 239 command);*/ 240 return -EINVAL; /* same return as native Linux */ 241 } 242 243 return 0; 244} 245 246/* 247 * blkif_queue_request 248 * 249 * request block io 250 * 251 * id: for guest use only. 252 * operation: BLKIF_OP_{READ,WRITE,PROBE} 253 * buffer: buffer to read/write into. this should be a 254 * virtual address in the guest os. 255 */ 256static int blkif_queue_request(struct request *req) 257{ 258 struct blkfront_info *info = req->rq_disk->private_data; 259 unsigned long buffer_mfn; 260 struct blkif_request *ring_req; 261 unsigned long id; 262 unsigned int fsect, lsect; 263 int i, ref; 264 grant_ref_t gref_head; 265 struct scatterlist *sg; 266 267 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) 268 return 1; 269 270 if (gnttab_alloc_grant_references( 271 BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) { 272 gnttab_request_free_callback( 273 &info->callback, 274 blkif_restart_queue_callback, 275 info, 276 BLKIF_MAX_SEGMENTS_PER_REQUEST); 277 return 1; 278 } 279 280 /* Fill out a communications ring structure. */ 281 ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); 282 id = get_id_from_freelist(info); 283 info->shadow[id].request = (unsigned long)req; 284 285 ring_req->id = id; 286 ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); 287 ring_req->handle = info->handle; 288 289 ring_req->operation = rq_data_dir(req) ? 290 BLKIF_OP_WRITE : BLKIF_OP_READ; 291 if (req->cmd_flags & REQ_HARDBARRIER) 292 ring_req->operation = BLKIF_OP_WRITE_BARRIER; 293 294 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); 295 BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); 296 297 for_each_sg(info->sg, sg, ring_req->nr_segments, i) { 298 buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg))); 299 fsect = sg->offset >> 9; 300 lsect = fsect + (sg->length >> 9) - 1; 301 /* install a grant reference. */ 302 ref = gnttab_claim_grant_reference(&gref_head); 303 BUG_ON(ref == -ENOSPC); 304 305 gnttab_grant_foreign_access_ref( 306 ref, 307 info->xbdev->otherend_id, 308 buffer_mfn, 309 rq_data_dir(req) ); 310 311 info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); 312 ring_req->seg[i] = 313 (struct blkif_request_segment) { 314 .gref = ref, 315 .first_sect = fsect, 316 .last_sect = lsect }; 317 } 318 319 info->ring.req_prod_pvt++; 320 321 /* Keep a private copy so we can reissue requests when recovering. */ 322 info->shadow[id].req = *ring_req; 323 324 gnttab_free_grant_references(gref_head); 325 326 return 0; 327} 328 329 330static inline void flush_requests(struct blkfront_info *info) 331{ 332 int notify; 333 334 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); 335 336 if (notify) 337 notify_remote_via_irq(info->irq); 338} 339 340/* 341 * do_blkif_request 342 * read a block; request is in a request queue 343 */ 344static void do_blkif_request(struct request_queue *rq) 345{ 346 struct blkfront_info *info = NULL; 347 struct request *req; 348 int queued; 349 350 pr_debug("Entered do_blkif_request\n"); 351 352 queued = 0; 353 354 while ((req = blk_peek_request(rq)) != NULL) { 355 info = req->rq_disk->private_data; 356 357 if (RING_FULL(&info->ring)) 358 goto wait; 359 360 blk_start_request(req); 361 362 if (req->cmd_type != REQ_TYPE_FS) { 363 __blk_end_request_all(req, -EIO); 364 continue; 365 } 366 367 pr_debug("do_blk_req %p: cmd %p, sec %lx, " 368 "(%u/%u) buffer:%p [%s]\n", 369 req, req->cmd, (unsigned long)blk_rq_pos(req), 370 blk_rq_cur_sectors(req), blk_rq_sectors(req), 371 req->buffer, rq_data_dir(req) ? "write" : "read"); 372 373 if (blkif_queue_request(req)) { 374 blk_requeue_request(rq, req); 375wait: 376 /* Avoid pointless unplugs. */ 377 blk_stop_queue(rq); 378 break; 379 } 380 381 queued++; 382 } 383 384 if (queued != 0) 385 flush_requests(info); 386} 387 388static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) 389{ 390 struct request_queue *rq; 391 392 rq = blk_init_queue(do_blkif_request, &blkif_io_lock); 393 if (rq == NULL) 394 return -1; 395 396 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); 397 398 /* Hard sector size and max sectors impersonate the equiv. hardware. */ 399 blk_queue_logical_block_size(rq, sector_size); 400 blk_queue_max_hw_sectors(rq, 512); 401 402 /* Each segment in a request is up to an aligned page in size. */ 403 blk_queue_segment_boundary(rq, PAGE_SIZE - 1); 404 blk_queue_max_segment_size(rq, PAGE_SIZE); 405 406 /* Ensure a merged request will fit in a single I/O ring slot. */ 407 blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); 408 409 /* Make sure buffer addresses are sector-aligned. */ 410 blk_queue_dma_alignment(rq, 511); 411 412 /* Make sure we don't use bounce buffers. */ 413 blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); 414 415 gd->queue = rq; 416 417 return 0; 418} 419 420 421static int xlvbd_barrier(struct blkfront_info *info) 422{ 423 int err; 424 const char *barrier; 425 426 switch (info->feature_barrier) { 427 case QUEUE_ORDERED_DRAIN: barrier = "enabled (drain)"; break; 428 case QUEUE_ORDERED_TAG: barrier = "enabled (tag)"; break; 429 case QUEUE_ORDERED_NONE: barrier = "disabled"; break; 430 default: return -EINVAL; 431 } 432 433 err = blk_queue_ordered(info->rq, info->feature_barrier); 434 435 if (err) 436 return err; 437 438 printk(KERN_INFO "blkfront: %s: barriers %s\n", 439 info->gd->disk_name, barrier); 440 return 0; 441} 442 443 444static int xlvbd_alloc_gendisk(blkif_sector_t capacity, 445 struct blkfront_info *info, 446 u16 vdisk_info, u16 sector_size) 447{ 448 struct gendisk *gd; 449 int nr_minors = 1; 450 int err = -ENODEV; 451 unsigned int offset; 452 int minor; 453 int nr_parts; 454 455 BUG_ON(info->gd != NULL); 456 BUG_ON(info->rq != NULL); 457 458 if ((info->vdevice>>EXT_SHIFT) > 1) { 459 /* this is above the extended range; something is wrong */ 460 printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice); 461 return -ENODEV; 462 } 463 464 if (!VDEV_IS_EXTENDED(info->vdevice)) { 465 minor = BLKIF_MINOR(info->vdevice); 466 nr_parts = PARTS_PER_DISK; 467 } else { 468 minor = BLKIF_MINOR_EXT(info->vdevice); 469 nr_parts = PARTS_PER_EXT_DISK; 470 } 471 472 if ((minor % nr_parts) == 0) 473 nr_minors = nr_parts; 474 475 err = xlbd_reserve_minors(minor, nr_minors); 476 if (err) 477 goto out; 478 err = -ENODEV; 479 480 gd = alloc_disk(nr_minors); 481 if (gd == NULL) 482 goto release; 483 484 offset = minor / nr_parts; 485 486 if (nr_minors > 1) { 487 if (offset < 26) 488 sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset); 489 else 490 sprintf(gd->disk_name, "%s%c%c", DEV_NAME, 491 'a' + ((offset / 26)-1), 'a' + (offset % 26)); 492 } else { 493 if (offset < 26) 494 sprintf(gd->disk_name, "%s%c%d", DEV_NAME, 495 'a' + offset, 496 minor & (nr_parts - 1)); 497 else 498 sprintf(gd->disk_name, "%s%c%c%d", DEV_NAME, 499 'a' + ((offset / 26) - 1), 500 'a' + (offset % 26), 501 minor & (nr_parts - 1)); 502 } 503 504 gd->major = XENVBD_MAJOR; 505 gd->first_minor = minor; 506 gd->fops = &xlvbd_block_fops; 507 gd->private_data = info; 508 gd->driverfs_dev = &(info->xbdev->dev); 509 set_capacity(gd, capacity); 510 511 if (xlvbd_init_blk_queue(gd, sector_size)) { 512 del_gendisk(gd); 513 goto release; 514 } 515 516 info->rq = gd->queue; 517 info->gd = gd; 518 519 xlvbd_barrier(info); 520 521 if (vdisk_info & VDISK_READONLY) 522 set_disk_ro(gd, 1); 523 524 if (vdisk_info & VDISK_REMOVABLE) 525 gd->flags |= GENHD_FL_REMOVABLE; 526 527 if (vdisk_info & VDISK_CDROM) 528 gd->flags |= GENHD_FL_CD; 529 530 return 0; 531 532 release: 533 xlbd_release_minors(minor, nr_minors); 534 out: 535 return err; 536} 537 538static void xlvbd_release_gendisk(struct blkfront_info *info) 539{ 540 unsigned int minor, nr_minors; 541 unsigned long flags; 542 543 if (info->rq == NULL) 544 return; 545 546 spin_lock_irqsave(&blkif_io_lock, flags); 547 548 /* No more blkif_request(). */ 549 blk_stop_queue(info->rq); 550 551 /* No more gnttab callback work. */ 552 gnttab_cancel_free_callback(&info->callback); 553 spin_unlock_irqrestore(&blkif_io_lock, flags); 554 555 /* Flush gnttab callback work. Must be done with no locks held. */ 556 flush_scheduled_work(); 557 558 del_gendisk(info->gd); 559 560 minor = info->gd->first_minor; 561 nr_minors = info->gd->minors; 562 xlbd_release_minors(minor, nr_minors); 563 564 blk_cleanup_queue(info->rq); 565 info->rq = NULL; 566 567 put_disk(info->gd); 568 info->gd = NULL; 569} 570 571static void kick_pending_request_queues(struct blkfront_info *info) 572{ 573 if (!RING_FULL(&info->ring)) { 574 /* Re-enable calldowns. */ 575 blk_start_queue(info->rq); 576 /* Kick things off immediately. */ 577 do_blkif_request(info->rq); 578 } 579} 580 581static void blkif_restart_queue(struct work_struct *work) 582{ 583 struct blkfront_info *info = container_of(work, struct blkfront_info, work); 584 585 spin_lock_irq(&blkif_io_lock); 586 if (info->connected == BLKIF_STATE_CONNECTED) 587 kick_pending_request_queues(info); 588 spin_unlock_irq(&blkif_io_lock); 589} 590 591static void blkif_free(struct blkfront_info *info, int suspend) 592{ 593 /* Prevent new requests being issued until we fix things up. */ 594 spin_lock_irq(&blkif_io_lock); 595 info->connected = suspend ? 596 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; 597 /* No more blkif_request(). */ 598 if (info->rq) 599 blk_stop_queue(info->rq); 600 /* No more gnttab callback work. */ 601 gnttab_cancel_free_callback(&info->callback); 602 spin_unlock_irq(&blkif_io_lock); 603 604 /* Flush gnttab callback work. Must be done with no locks held. */ 605 flush_scheduled_work(); 606 607 /* Free resources associated with old device channel. */ 608 if (info->ring_ref != GRANT_INVALID_REF) { 609 gnttab_end_foreign_access(info->ring_ref, 0, 610 (unsigned long)info->ring.sring); 611 info->ring_ref = GRANT_INVALID_REF; 612 info->ring.sring = NULL; 613 } 614 if (info->irq) 615 unbind_from_irqhandler(info->irq, info); 616 info->evtchn = info->irq = 0; 617 618} 619 620static void blkif_completion(struct blk_shadow *s) 621{ 622 int i; 623 for (i = 0; i < s->req.nr_segments; i++) 624 gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL); 625} 626 627static irqreturn_t blkif_interrupt(int irq, void *dev_id) 628{ 629 struct request *req; 630 struct blkif_response *bret; 631 RING_IDX i, rp; 632 unsigned long flags; 633 struct blkfront_info *info = (struct blkfront_info *)dev_id; 634 int error; 635 636 spin_lock_irqsave(&blkif_io_lock, flags); 637 638 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { 639 spin_unlock_irqrestore(&blkif_io_lock, flags); 640 return IRQ_HANDLED; 641 } 642 643 again: 644 rp = info->ring.sring->rsp_prod; 645 rmb(); /* Ensure we see queued responses up to 'rp'. */ 646 647 for (i = info->ring.rsp_cons; i != rp; i++) { 648 unsigned long id; 649 650 bret = RING_GET_RESPONSE(&info->ring, i); 651 id = bret->id; 652 req = (struct request *)info->shadow[id].request; 653 654 blkif_completion(&info->shadow[id]); 655 656 add_id_to_freelist(info, id); 657 658 error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; 659 switch (bret->operation) { 660 case BLKIF_OP_WRITE_BARRIER: 661 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { 662 printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", 663 info->gd->disk_name); 664 error = -EOPNOTSUPP; 665 info->feature_barrier = QUEUE_ORDERED_NONE; 666 xlvbd_barrier(info); 667 } 668 /* fall through */ 669 case BLKIF_OP_READ: 670 case BLKIF_OP_WRITE: 671 if (unlikely(bret->status != BLKIF_RSP_OKAY)) 672 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " 673 "request: %x\n", bret->status); 674 675 __blk_end_request_all(req, error); 676 break; 677 default: 678 BUG(); 679 } 680 } 681 682 info->ring.rsp_cons = i; 683 684 if (i != info->ring.req_prod_pvt) { 685 int more_to_do; 686 RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); 687 if (more_to_do) 688 goto again; 689 } else 690 info->ring.sring->rsp_event = i + 1; 691 692 kick_pending_request_queues(info); 693 694 spin_unlock_irqrestore(&blkif_io_lock, flags); 695 696 return IRQ_HANDLED; 697} 698 699 700static int setup_blkring(struct xenbus_device *dev, 701 struct blkfront_info *info) 702{ 703 struct blkif_sring *sring; 704 int err; 705 706 info->ring_ref = GRANT_INVALID_REF; 707 708 sring = (struct blkif_sring *)__get_free_page(GFP_NOIO | __GFP_HIGH); 709 if (!sring) { 710 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); 711 return -ENOMEM; 712 } 713 SHARED_RING_INIT(sring); 714 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); 715 716 sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); 717 718 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); 719 if (err < 0) { 720 free_page((unsigned long)sring); 721 info->ring.sring = NULL; 722 goto fail; 723 } 724 info->ring_ref = err; 725 726 err = xenbus_alloc_evtchn(dev, &info->evtchn); 727 if (err) 728 goto fail; 729 730 err = bind_evtchn_to_irqhandler(info->evtchn, 731 blkif_interrupt, 732 IRQF_SAMPLE_RANDOM, "blkif", info); 733 if (err <= 0) { 734 xenbus_dev_fatal(dev, err, 735 "bind_evtchn_to_irqhandler failed"); 736 goto fail; 737 } 738 info->irq = err; 739 740 return 0; 741fail: 742 blkif_free(info, 0); 743 return err; 744} 745 746 747/* Common code used when first setting up, and when resuming. */ 748static int talk_to_blkback(struct xenbus_device *dev, 749 struct blkfront_info *info) 750{ 751 const char *message = NULL; 752 struct xenbus_transaction xbt; 753 int err; 754 755 /* Create shared ring, alloc event channel. */ 756 err = setup_blkring(dev, info); 757 if (err) 758 goto out; 759 760again: 761 err = xenbus_transaction_start(&xbt); 762 if (err) { 763 xenbus_dev_fatal(dev, err, "starting transaction"); 764 goto destroy_blkring; 765 } 766 767 err = xenbus_printf(xbt, dev->nodename, 768 "ring-ref", "%u", info->ring_ref); 769 if (err) { 770 message = "writing ring-ref"; 771 goto abort_transaction; 772 } 773 err = xenbus_printf(xbt, dev->nodename, 774 "event-channel", "%u", info->evtchn); 775 if (err) { 776 message = "writing event-channel"; 777 goto abort_transaction; 778 } 779 err = xenbus_printf(xbt, dev->nodename, "protocol", "%s", 780 XEN_IO_PROTO_ABI_NATIVE); 781 if (err) { 782 message = "writing protocol"; 783 goto abort_transaction; 784 } 785 786 err = xenbus_transaction_end(xbt, 0); 787 if (err) { 788 if (err == -EAGAIN) 789 goto again; 790 xenbus_dev_fatal(dev, err, "completing transaction"); 791 goto destroy_blkring; 792 } 793 794 xenbus_switch_state(dev, XenbusStateInitialised); 795 796 return 0; 797 798 abort_transaction: 799 xenbus_transaction_end(xbt, 1); 800 if (message) 801 xenbus_dev_fatal(dev, err, "%s", message); 802 destroy_blkring: 803 blkif_free(info, 0); 804 out: 805 return err; 806} 807 808/** 809 * Entry point to this code when a new device is created. Allocate the basic 810 * structures and the ring buffer for communication with the backend, and 811 * inform the backend of the appropriate details for those. Switch to 812 * Initialised state. 813 */ 814static int blkfront_probe(struct xenbus_device *dev, 815 const struct xenbus_device_id *id) 816{ 817 int err, vdevice, i; 818 struct blkfront_info *info; 819 820 err = xenbus_scanf(XBT_NIL, dev->nodename, 821 "virtual-device", "%i", &vdevice); 822 if (err != 1) { 823 /* go looking in the extended area instead */ 824 err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", 825 "%i", &vdevice); 826 if (err != 1) { 827 xenbus_dev_fatal(dev, err, "reading virtual-device"); 828 return err; 829 } 830 } 831 832 if (xen_hvm_domain()) { 833 char *type; 834 int len; 835 /* no unplug has been done: do not hook devices != xen vbds */ 836 if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) { 837 int major; 838 839 if (!VDEV_IS_EXTENDED(vdevice)) 840 major = BLKIF_MAJOR(vdevice); 841 else 842 major = XENVBD_MAJOR; 843 844 if (major != XENVBD_MAJOR) { 845 printk(KERN_INFO 846 "%s: HVM does not support vbd %d as xen block device\n", 847 __FUNCTION__, vdevice); 848 return -ENODEV; 849 } 850 } 851 /* do not create a PV cdrom device if we are an HVM guest */ 852 type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len); 853 if (IS_ERR(type)) 854 return -ENODEV; 855 if (strncmp(type, "cdrom", 5) == 0) { 856 kfree(type); 857 return -ENODEV; 858 } 859 kfree(type); 860 } 861 info = kzalloc(sizeof(*info), GFP_KERNEL); 862 if (!info) { 863 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); 864 return -ENOMEM; 865 } 866 867 mutex_init(&info->mutex); 868 info->xbdev = dev; 869 info->vdevice = vdevice; 870 info->connected = BLKIF_STATE_DISCONNECTED; 871 INIT_WORK(&info->work, blkif_restart_queue); 872 873 for (i = 0; i < BLK_RING_SIZE; i++) 874 info->shadow[i].req.id = i+1; 875 info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; 876 877 /* Front end dir is a number, which is used as the id. */ 878 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); 879 dev_set_drvdata(&dev->dev, info); 880 881 err = talk_to_blkback(dev, info); 882 if (err) { 883 kfree(info); 884 dev_set_drvdata(&dev->dev, NULL); 885 return err; 886 } 887 888 return 0; 889} 890 891 892static int blkif_recover(struct blkfront_info *info) 893{ 894 int i; 895 struct blkif_request *req; 896 struct blk_shadow *copy; 897 int j; 898 899 /* Stage 1: Make a safe copy of the shadow state. */ 900 copy = kmalloc(sizeof(info->shadow), 901 GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); 902 if (!copy) 903 return -ENOMEM; 904 memcpy(copy, info->shadow, sizeof(info->shadow)); 905 906 /* Stage 2: Set up free list. */ 907 memset(&info->shadow, 0, sizeof(info->shadow)); 908 for (i = 0; i < BLK_RING_SIZE; i++) 909 info->shadow[i].req.id = i+1; 910 info->shadow_free = info->ring.req_prod_pvt; 911 info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; 912 913 /* Stage 3: Find pending requests and requeue them. */ 914 for (i = 0; i < BLK_RING_SIZE; i++) { 915 /* Not in use? */ 916 if (copy[i].request == 0) 917 continue; 918 919 /* Grab a request slot and copy shadow state into it. */ 920 req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); 921 *req = copy[i].req; 922 923 /* We get a new request id, and must reset the shadow state. */ 924 req->id = get_id_from_freelist(info); 925 memcpy(&info->shadow[req->id], ©[i], sizeof(copy[i])); 926 927 /* Rewrite any grant references invalidated by susp/resume. */ 928 for (j = 0; j < req->nr_segments; j++) 929 gnttab_grant_foreign_access_ref( 930 req->seg[j].gref, 931 info->xbdev->otherend_id, 932 pfn_to_mfn(info->shadow[req->id].frame[j]), 933 rq_data_dir( 934 (struct request *) 935 info->shadow[req->id].request)); 936 info->shadow[req->id].req = *req; 937 938 info->ring.req_prod_pvt++; 939 } 940 941 kfree(copy); 942 943 xenbus_switch_state(info->xbdev, XenbusStateConnected); 944 945 spin_lock_irq(&blkif_io_lock); 946 947 /* Now safe for us to use the shared ring */ 948 info->connected = BLKIF_STATE_CONNECTED; 949 950 /* Send off requeued requests */ 951 flush_requests(info); 952 953 /* Kick any other new requests queued since we resumed */ 954 kick_pending_request_queues(info); 955 956 spin_unlock_irq(&blkif_io_lock); 957 958 return 0; 959} 960 961/** 962 * We are reconnecting to the backend, due to a suspend/resume, or a backend 963 * driver restart. We tear down our blkif structure and recreate it, but 964 * leave the device-layer structures intact so that this is transparent to the 965 * rest of the kernel. 966 */ 967static int blkfront_resume(struct xenbus_device *dev) 968{ 969 struct blkfront_info *info = dev_get_drvdata(&dev->dev); 970 int err; 971 972 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); 973 974 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); 975 976 err = talk_to_blkback(dev, info); 977 if (info->connected == BLKIF_STATE_SUSPENDED && !err) 978 err = blkif_recover(info); 979 980 return err; 981} 982 983static void 984blkfront_closing(struct blkfront_info *info) 985{ 986 struct xenbus_device *xbdev = info->xbdev; 987 struct block_device *bdev = NULL; 988 989 mutex_lock(&info->mutex); 990 991 if (xbdev->state == XenbusStateClosing) { 992 mutex_unlock(&info->mutex); 993 return; 994 } 995 996 if (info->gd) 997 bdev = bdget_disk(info->gd, 0); 998 999 mutex_unlock(&info->mutex); 1000 1001 if (!bdev) { 1002 xenbus_frontend_closed(xbdev); 1003 return; 1004 } 1005 1006 mutex_lock(&bdev->bd_mutex); 1007 1008 if (bdev->bd_openers) { 1009 xenbus_dev_error(xbdev, -EBUSY, 1010 "Device in use; refusing to close"); 1011 xenbus_switch_state(xbdev, XenbusStateClosing); 1012 } else { 1013 xlvbd_release_gendisk(info); 1014 xenbus_frontend_closed(xbdev); 1015 } 1016 1017 mutex_unlock(&bdev->bd_mutex); 1018 bdput(bdev); 1019} 1020 1021/* 1022 * Invoked when the backend is finally 'ready' (and has told produced 1023 * the details about the physical device - #sectors, size, etc). 1024 */ 1025static void blkfront_connect(struct blkfront_info *info) 1026{ 1027 unsigned long long sectors; 1028 unsigned long sector_size; 1029 unsigned int binfo; 1030 int err; 1031 int barrier; 1032 1033 switch (info->connected) { 1034 case BLKIF_STATE_CONNECTED: 1035 /* 1036 * Potentially, the back-end may be signalling 1037 * a capacity change; update the capacity. 1038 */ 1039 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 1040 "sectors", "%Lu", §ors); 1041 if (XENBUS_EXIST_ERR(err)) 1042 return; 1043 printk(KERN_INFO "Setting capacity to %Lu\n", 1044 sectors); 1045 set_capacity(info->gd, sectors); 1046 revalidate_disk(info->gd); 1047 1048 /* fall through */ 1049 case BLKIF_STATE_SUSPENDED: 1050 return; 1051 1052 default: 1053 break; 1054 } 1055 1056 dev_dbg(&info->xbdev->dev, "%s:%s.\n", 1057 __func__, info->xbdev->otherend); 1058 1059 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 1060 "sectors", "%llu", §ors, 1061 "info", "%u", &binfo, 1062 "sector-size", "%lu", §or_size, 1063 NULL); 1064 if (err) { 1065 xenbus_dev_fatal(info->xbdev, err, 1066 "reading backend fields at %s", 1067 info->xbdev->otherend); 1068 return; 1069 } 1070 1071 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 1072 "feature-barrier", "%lu", &barrier, 1073 NULL); 1074 1075 /* 1076 * If there's no "feature-barrier" defined, then it means 1077 * we're dealing with a very old backend which writes 1078 * synchronously; draining will do what needs to get done. 1079 * 1080 * If there are barriers, then we can do full queued writes 1081 * with tagged barriers. 1082 * 1083 * If barriers are not supported, then there's no much we can 1084 * do, so just set ordering to NONE. 1085 */ 1086 if (err) 1087 info->feature_barrier = QUEUE_ORDERED_DRAIN; 1088 else if (barrier) 1089 info->feature_barrier = QUEUE_ORDERED_TAG; 1090 else 1091 info->feature_barrier = QUEUE_ORDERED_NONE; 1092 1093 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); 1094 if (err) { 1095 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", 1096 info->xbdev->otherend); 1097 return; 1098 } 1099 1100 xenbus_switch_state(info->xbdev, XenbusStateConnected); 1101 1102 /* Kick pending requests. */ 1103 spin_lock_irq(&blkif_io_lock); 1104 info->connected = BLKIF_STATE_CONNECTED; 1105 kick_pending_request_queues(info); 1106 spin_unlock_irq(&blkif_io_lock); 1107 1108 add_disk(info->gd); 1109 1110 info->is_ready = 1; 1111} 1112 1113/** 1114 * Callback received when the backend's state changes. 1115 */ 1116static void blkback_changed(struct xenbus_device *dev, 1117 enum xenbus_state backend_state) 1118{ 1119 struct blkfront_info *info = dev_get_drvdata(&dev->dev); 1120 1121 dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state); 1122 1123 switch (backend_state) { 1124 case XenbusStateInitialising: 1125 case XenbusStateInitWait: 1126 case XenbusStateInitialised: 1127 case XenbusStateUnknown: 1128 case XenbusStateClosed: 1129 break; 1130 1131 case XenbusStateConnected: 1132 blkfront_connect(info); 1133 break; 1134 1135 case XenbusStateClosing: 1136 blkfront_closing(info); 1137 break; 1138 } 1139} 1140 1141static int blkfront_remove(struct xenbus_device *xbdev) 1142{ 1143 struct blkfront_info *info = dev_get_drvdata(&xbdev->dev); 1144 struct block_device *bdev = NULL; 1145 struct gendisk *disk; 1146 1147 dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename); 1148 1149 blkif_free(info, 0); 1150 1151 mutex_lock(&info->mutex); 1152 1153 disk = info->gd; 1154 if (disk) 1155 bdev = bdget_disk(disk, 0); 1156 1157 info->xbdev = NULL; 1158 mutex_unlock(&info->mutex); 1159 1160 if (!bdev) { 1161 kfree(info); 1162 return 0; 1163 } 1164 1165 /* 1166 * The xbdev was removed before we reached the Closed 1167 * state. See if it's safe to remove the disk. If the bdev 1168 * isn't closed yet, we let release take care of it. 1169 */ 1170 1171 mutex_lock(&bdev->bd_mutex); 1172 info = disk->private_data; 1173 1174 dev_warn(disk_to_dev(disk), 1175 "%s was hot-unplugged, %d stale handles\n", 1176 xbdev->nodename, bdev->bd_openers); 1177 1178 if (info && !bdev->bd_openers) { 1179 xlvbd_release_gendisk(info); 1180 disk->private_data = NULL; 1181 kfree(info); 1182 } 1183 1184 mutex_unlock(&bdev->bd_mutex); 1185 bdput(bdev); 1186 1187 return 0; 1188} 1189 1190static int blkfront_is_ready(struct xenbus_device *dev) 1191{ 1192 struct blkfront_info *info = dev_get_drvdata(&dev->dev); 1193 1194 return info->is_ready && info->xbdev; 1195} 1196 1197static int blkif_open(struct block_device *bdev, fmode_t mode) 1198{ 1199 struct gendisk *disk = bdev->bd_disk; 1200 struct blkfront_info *info; 1201 int err = 0; 1202 1203 lock_kernel(); 1204 1205 info = disk->private_data; 1206 if (!info) { 1207 /* xbdev gone */ 1208 err = -ERESTARTSYS; 1209 goto out; 1210 } 1211 1212 mutex_lock(&info->mutex); 1213 1214 if (!info->gd) 1215 /* xbdev is closed */ 1216 err = -ERESTARTSYS; 1217 1218 mutex_unlock(&info->mutex); 1219 1220out: 1221 unlock_kernel(); 1222 return err; 1223} 1224 1225static int blkif_release(struct gendisk *disk, fmode_t mode) 1226{ 1227 struct blkfront_info *info = disk->private_data; 1228 struct block_device *bdev; 1229 struct xenbus_device *xbdev; 1230 1231 lock_kernel(); 1232 1233 bdev = bdget_disk(disk, 0); 1234 bdput(bdev); 1235 1236 if (bdev->bd_openers) 1237 goto out; 1238 1239 /* 1240 * Check if we have been instructed to close. We will have 1241 * deferred this request, because the bdev was still open. 1242 */ 1243 1244 mutex_lock(&info->mutex); 1245 xbdev = info->xbdev; 1246 1247 if (xbdev && xbdev->state == XenbusStateClosing) { 1248 /* pending switch to state closed */ 1249 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); 1250 xlvbd_release_gendisk(info); 1251 xenbus_frontend_closed(info->xbdev); 1252 } 1253 1254 mutex_unlock(&info->mutex); 1255 1256 if (!xbdev) { 1257 /* sudden device removal */ 1258 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); 1259 xlvbd_release_gendisk(info); 1260 disk->private_data = NULL; 1261 kfree(info); 1262 } 1263 1264out: 1265 unlock_kernel(); 1266 return 0; 1267} 1268 1269static const struct block_device_operations xlvbd_block_fops = 1270{ 1271 .owner = THIS_MODULE, 1272 .open = blkif_open, 1273 .release = blkif_release, 1274 .getgeo = blkif_getgeo, 1275 .ioctl = blkif_ioctl, 1276}; 1277 1278 1279static const struct xenbus_device_id blkfront_ids[] = { 1280 { "vbd" }, 1281 { "" } 1282}; 1283 1284static struct xenbus_driver blkfront = { 1285 .name = "vbd", 1286 .owner = THIS_MODULE, 1287 .ids = blkfront_ids, 1288 .probe = blkfront_probe, 1289 .remove = blkfront_remove, 1290 .resume = blkfront_resume, 1291 .otherend_changed = blkback_changed, 1292 .is_ready = blkfront_is_ready, 1293}; 1294 1295static int __init xlblk_init(void) 1296{ 1297 if (!xen_domain()) 1298 return -ENODEV; 1299 1300 if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { 1301 printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n", 1302 XENVBD_MAJOR, DEV_NAME); 1303 return -ENODEV; 1304 } 1305 1306 return xenbus_register_frontend(&blkfront); 1307} 1308module_init(xlblk_init); 1309 1310 1311static void __exit xlblk_exit(void) 1312{ 1313 return xenbus_unregister_driver(&blkfront); 1314} 1315module_exit(xlblk_exit); 1316 1317MODULE_DESCRIPTION("Xen virtual block device frontend"); 1318MODULE_LICENSE("GPL"); 1319MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR); 1320MODULE_ALIAS("xen:vbd"); 1321MODULE_ALIAS("xenblk"); 1322