Deleted Added
full compact
blkfront.c (227293) blkfront.c (231743)
1/*
2 * XenBSD block device driver
3 *
4 * Copyright (c) 2009 Scott Long, Yahoo!
5 * Copyright (c) 2009 Frank Suchomel, Citrix
6 * Copyright (c) 2009 Doug F. Rabson, Citrix
7 * Copyright (c) 2005 Kip Macy
8 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand

--- 14 unchanged lines hidden (view full) ---

23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
27 * DEALINGS IN THE SOFTWARE.
28 */
29
30#include <sys/cdefs.h>
1/*
2 * XenBSD block device driver
3 *
4 * Copyright (c) 2009 Scott Long, Yahoo!
5 * Copyright (c) 2009 Frank Suchomel, Citrix
6 * Copyright (c) 2009 Doug F. Rabson, Citrix
7 * Copyright (c) 2005 Kip Macy
8 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand

--- 14 unchanged lines hidden (view full) ---

23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
27 * DEALINGS IN THE SOFTWARE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/xen/blkfront/blkfront.c 227293 2011-11-07 06:44:47Z ed $");
31__FBSDID("$FreeBSD: head/sys/dev/xen/blkfront/blkfront.c 231743 2012-02-15 06:45:49Z gibbs $");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/malloc.h>
36#include <sys/kernel.h>
37#include <vm/vm.h>
38#include <vm/pmap.h>
39
40#include <sys/bio.h>
41#include <sys/bus.h>
42#include <sys/conf.h>
43#include <sys/module.h>
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/malloc.h>
36#include <sys/kernel.h>
37#include <vm/vm.h>
38#include <vm/pmap.h>
39
40#include <sys/bio.h>
41#include <sys/bus.h>
42#include <sys/conf.h>
43#include <sys/module.h>
44#include <sys/sysctl.h>
44
45#include <machine/bus.h>
46#include <sys/rman.h>
47#include <machine/resource.h>
48#include <machine/intr_machdep.h>
49#include <machine/vmparam.h>
50#include <sys/bus_dma.h>
51

--- 82 unchanged lines hidden (view full) ---

134#define XBD_SECTOR_SHFT 9
135
136/*
137 * Translate Linux major/minor to an appropriate name and unit
138 * number. For HVM guests, this allows us to use the same drive names
139 * with blkfront as the emulated drives, easing transition slightly.
140 */
141static void
45
46#include <machine/bus.h>
47#include <sys/rman.h>
48#include <machine/resource.h>
49#include <machine/intr_machdep.h>
50#include <machine/vmparam.h>
51#include <sys/bus_dma.h>
52

--- 82 unchanged lines hidden (view full) ---

135#define XBD_SECTOR_SHFT 9
136
137/*
138 * Translate Linux major/minor to an appropriate name and unit
139 * number. For HVM guests, this allows us to use the same drive names
140 * with blkfront as the emulated drives, easing transition slightly.
141 */
142static void
142blkfront_vdevice_to_unit(int vdevice, int *unit, const char **name)
143blkfront_vdevice_to_unit(uint32_t vdevice, int *unit, const char **name)
143{
144 static struct vdev_info {
145 int major;
146 int shift;
147 int base;
148 const char *name;
149 } info[] = {
150 {3, 6, 0, "ad"}, /* ide0 */

--- 30 unchanged lines hidden (view full) ---

181 };
182 int major = vdevice >> 8;
183 int minor = vdevice & 0xff;
184 int i;
185
186 if (vdevice & (1 << 28)) {
187 *unit = (vdevice & ((1 << 28) - 1)) >> 8;
188 *name = "xbd";
144{
145 static struct vdev_info {
146 int major;
147 int shift;
148 int base;
149 const char *name;
150 } info[] = {
151 {3, 6, 0, "ad"}, /* ide0 */

--- 30 unchanged lines hidden (view full) ---

182 };
183 int major = vdevice >> 8;
184 int minor = vdevice & 0xff;
185 int i;
186
187 if (vdevice & (1 << 28)) {
188 *unit = (vdevice & ((1 << 28) - 1)) >> 8;
189 *name = "xbd";
190 return;
189 }
190
191 for (i = 0; info[i].major; i++) {
192 if (info[i].major == major) {
193 *unit = info[i].base + (minor >> info[i].shift);
194 *name = info[i].name;
195 return;
196 }

--- 24 unchanged lines hidden (view full) ---

221 sc->xb_disk->d_ioctl = blkif_ioctl;
222 sc->xb_disk->d_strategy = xb_strategy;
223 sc->xb_disk->d_dump = xb_dump;
224 sc->xb_disk->d_name = name;
225 sc->xb_disk->d_drv1 = sc;
226 sc->xb_disk->d_sectorsize = sector_size;
227
228 sc->xb_disk->d_mediasize = sectors * sector_size;
191 }
192
193 for (i = 0; info[i].major; i++) {
194 if (info[i].major == major) {
195 *unit = info[i].base + (minor >> info[i].shift);
196 *name = info[i].name;
197 return;
198 }

--- 24 unchanged lines hidden (view full) ---

223 sc->xb_disk->d_ioctl = blkif_ioctl;
224 sc->xb_disk->d_strategy = xb_strategy;
225 sc->xb_disk->d_dump = xb_dump;
226 sc->xb_disk->d_name = name;
227 sc->xb_disk->d_drv1 = sc;
228 sc->xb_disk->d_sectorsize = sector_size;
229
230 sc->xb_disk->d_mediasize = sectors * sector_size;
229 sc->xb_disk->d_maxsize = sc->max_request_size;
231 sc->xb_disk->d_maxsize = sc->max_request_size - PAGE_SIZE;
230 sc->xb_disk->d_flags = 0;
231 disk_create(sc->xb_disk, DISK_VERSION_00);
232
233 return error;
234}
235
236/************************ end VBD support *****************/
237

--- 164 unchanged lines hidden (view full) ---

402 device_set_desc(dev, "Virtual Block Device");
403 device_quiet(dev);
404 return (0);
405 }
406
407 return (ENXIO);
408}
409
232 sc->xb_disk->d_flags = 0;
233 disk_create(sc->xb_disk, DISK_VERSION_00);
234
235 return error;
236}
237
238/************************ end VBD support *****************/
239

--- 164 unchanged lines hidden (view full) ---

404 device_set_desc(dev, "Virtual Block Device");
405 device_quiet(dev);
406 return (0);
407 }
408
409 return (ENXIO);
410}
411
412static void
413xb_setup_sysctl(struct xb_softc *xb)
414{
415 struct sysctl_ctx_list *sysctl_ctx = NULL;
416 struct sysctl_oid *sysctl_tree = NULL;
417
418 sysctl_ctx = device_get_sysctl_ctx(xb->xb_dev);
419 if (sysctl_ctx == NULL)
420 return;
421
422 sysctl_tree = device_get_sysctl_tree(xb->xb_dev);
423 if (sysctl_tree == NULL)
424 return;
425
426 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
427 "max_requests", CTLFLAG_RD, &xb->max_requests, -1,
428 "maximum outstanding requests (negotiated)");
429
430 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
431 "max_request_segments", CTLFLAG_RD,
432 &xb->max_request_segments, 0,
433 "maximum number of pages per requests (negotiated)");
434
435 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
436 "max_request_size", CTLFLAG_RD,
437 &xb->max_request_size, 0,
438 "maximum size in bytes of a request (negotiated)");
439
440 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
441 "ring_pages", CTLFLAG_RD,
442 &xb->ring_pages, 0,
443 "communication channel pages (negotiated)");
444}
445
410/*
411 * Setup supplies the backend dir, virtual device. We place an event
412 * channel and shared frame entries. We watch backend to wait if it's
413 * ok.
414 */
415static int
416blkfront_attach(device_t dev)
417{
418 struct xb_softc *sc;
419 const char *name;
446/*
447 * Setup supplies the backend dir, virtual device. We place an event
448 * channel and shared frame entries. We watch backend to wait if it's
449 * ok.
450 */
451static int
452blkfront_attach(device_t dev)
453{
454 struct xb_softc *sc;
455 const char *name;
456 uint32_t vdevice;
420 int error;
457 int error;
421 int vdevice;
422 int i;
423 int unit;
424
425 /* FIXME: Use dynamic device id if this is not set. */
426 error = xs_scanf(XST_NIL, xenbus_get_node(dev),
458 int i;
459 int unit;
460
461 /* FIXME: Use dynamic device id if this is not set. */
462 error = xs_scanf(XST_NIL, xenbus_get_node(dev),
427 "virtual-device", NULL, "%i", &vdevice);
463 "virtual-device", NULL, "%" PRIu32, &vdevice);
428 if (error) {
429 xenbus_dev_fatal(dev, error, "reading virtual-device");
430 device_printf(dev, "Couldn't determine virtual device.\n");
431 return (error);
432 }
433
434 blkfront_vdevice_to_unit(vdevice, &unit, &name);
435 if (!strcmp(name, "xbd"))

--- 8 unchanged lines hidden (view full) ---

444 xb_initq_bio(sc);
445 for (i = 0; i < XBF_MAX_RING_PAGES; i++)
446 sc->ring_ref[i] = GRANT_INVALID_REF;
447
448 sc->xb_dev = dev;
449 sc->vdevice = vdevice;
450 sc->connected = BLKIF_STATE_DISCONNECTED;
451
464 if (error) {
465 xenbus_dev_fatal(dev, error, "reading virtual-device");
466 device_printf(dev, "Couldn't determine virtual device.\n");
467 return (error);
468 }
469
470 blkfront_vdevice_to_unit(vdevice, &unit, &name);
471 if (!strcmp(name, "xbd"))

--- 8 unchanged lines hidden (view full) ---

480 xb_initq_bio(sc);
481 for (i = 0; i < XBF_MAX_RING_PAGES; i++)
482 sc->ring_ref[i] = GRANT_INVALID_REF;
483
484 sc->xb_dev = dev;
485 sc->vdevice = vdevice;
486 sc->connected = BLKIF_STATE_DISCONNECTED;
487
488 xb_setup_sysctl(sc);
489
452 /* Wait for backend device to publish its protocol capabilities. */
453 xenbus_set_state(dev, XenbusStateInitialising);
454
455 return (0);
456}
457
458static int
459blkfront_suspend(device_t dev)

--- 36 unchanged lines hidden (view full) ---

496 return (0);
497}
498
499static void
500blkfront_initialize(struct xb_softc *sc)
501{
502 const char *otherend_path;
503 const char *node_path;
490 /* Wait for backend device to publish its protocol capabilities. */
491 xenbus_set_state(dev, XenbusStateInitialising);
492
493 return (0);
494}
495
496static int
497blkfront_suspend(device_t dev)

--- 36 unchanged lines hidden (view full) ---

534 return (0);
535}
536
537static void
538blkfront_initialize(struct xb_softc *sc)
539{
540 const char *otherend_path;
541 const char *node_path;
542 uint32_t max_ring_page_order;
504 int error;
505 int i;
506
507 if (xenbus_get_state(sc->xb_dev) != XenbusStateInitialising) {
508 /* Initialization has already been performed. */
509 return;
510 }
511
512 /*
513 * Protocol defaults valid even if negotiation for a
514 * setting fails.
515 */
543 int error;
544 int i;
545
546 if (xenbus_get_state(sc->xb_dev) != XenbusStateInitialising) {
547 /* Initialization has already been performed. */
548 return;
549 }
550
551 /*
552 * Protocol defaults valid even if negotiation for a
553 * setting fails.
554 */
555 max_ring_page_order = 0;
516 sc->ring_pages = 1;
556 sc->ring_pages = 1;
517 sc->max_requests = BLKIF_MAX_RING_REQUESTS(PAGE_SIZE);
518 sc->max_request_segments = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK;
519 sc->max_request_size = (sc->max_request_segments - 1) * PAGE_SIZE;
520 sc->max_request_blocks = BLKIF_SEGS_TO_BLOCKS(sc->max_request_segments);
521
522 /*
523 * Protocol negotiation.
524 *
525 * \note xs_gather() returns on the first encountered error, so
526 * we must use independant calls in order to guarantee
527 * we don't miss information in a sparsly populated back-end
528 * tree.
557 sc->max_request_segments = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK;
558 sc->max_request_size = (sc->max_request_segments - 1) * PAGE_SIZE;
559 sc->max_request_blocks = BLKIF_SEGS_TO_BLOCKS(sc->max_request_segments);
560
561 /*
562 * Protocol negotiation.
563 *
564 * \note xs_gather() returns on the first encountered error, so
565 * we must use independant calls in order to guarantee
566 * we don't miss information in a sparsly populated back-end
567 * tree.
568 *
569 * \note xs_scanf() does not update variables for unmatched
570 * fields.
529 */
530 otherend_path = xenbus_get_otherend_path(sc->xb_dev);
531 node_path = xenbus_get_node(sc->xb_dev);
571 */
572 otherend_path = xenbus_get_otherend_path(sc->xb_dev);
573 node_path = xenbus_get_node(sc->xb_dev);
574
575 /* Support both backend schemes for relaying ring page limits. */
532 (void)xs_scanf(XST_NIL, otherend_path,
576 (void)xs_scanf(XST_NIL, otherend_path,
577 "max-ring-page-order", NULL, "%" PRIu32,
578 &max_ring_page_order);
579 sc->ring_pages = 1 << max_ring_page_order;
580 (void)xs_scanf(XST_NIL, otherend_path,
533 "max-ring-pages", NULL, "%" PRIu32,
534 &sc->ring_pages);
581 "max-ring-pages", NULL, "%" PRIu32,
582 &sc->ring_pages);
583 if (sc->ring_pages < 1)
584 sc->ring_pages = 1;
535
585
586 sc->max_requests = BLKIF_MAX_RING_REQUESTS(sc->ring_pages * PAGE_SIZE);
536 (void)xs_scanf(XST_NIL, otherend_path,
537 "max-requests", NULL, "%" PRIu32,
538 &sc->max_requests);
539
540 (void)xs_scanf(XST_NIL, otherend_path,
541 "max-request-segments", NULL, "%" PRIu32,
542 &sc->max_request_segments);
543
544 (void)xs_scanf(XST_NIL, otherend_path,
545 "max-request-size", NULL, "%" PRIu32,
546 &sc->max_request_size);
547
548 if (sc->ring_pages > XBF_MAX_RING_PAGES) {
549 device_printf(sc->xb_dev, "Back-end specified ring-pages of "
550 "%u limited to front-end limit of %zu.\n",
551 sc->ring_pages, XBF_MAX_RING_PAGES);
552 sc->ring_pages = XBF_MAX_RING_PAGES;
553 }
554
587 (void)xs_scanf(XST_NIL, otherend_path,
588 "max-requests", NULL, "%" PRIu32,
589 &sc->max_requests);
590
591 (void)xs_scanf(XST_NIL, otherend_path,
592 "max-request-segments", NULL, "%" PRIu32,
593 &sc->max_request_segments);
594
595 (void)xs_scanf(XST_NIL, otherend_path,
596 "max-request-size", NULL, "%" PRIu32,
597 &sc->max_request_size);
598
599 if (sc->ring_pages > XBF_MAX_RING_PAGES) {
600 device_printf(sc->xb_dev, "Back-end specified ring-pages of "
601 "%u limited to front-end limit of %zu.\n",
602 sc->ring_pages, XBF_MAX_RING_PAGES);
603 sc->ring_pages = XBF_MAX_RING_PAGES;
604 }
605
606 if (powerof2(sc->ring_pages) == 0) {
607 uint32_t new_page_limit;
608
609 new_page_limit = 0x01 << (fls(sc->ring_pages) - 1);
610 device_printf(sc->xb_dev, "Back-end specified ring-pages of "
611 "%u is not a power of 2. Limited to %u.\n",
612 sc->ring_pages, new_page_limit);
613 sc->ring_pages = new_page_limit;
614 }
615
555 if (sc->max_requests > XBF_MAX_REQUESTS) {
556 device_printf(sc->xb_dev, "Back-end specified max_requests of "
557 "%u limited to front-end limit of %u.\n",
558 sc->max_requests, XBF_MAX_REQUESTS);
559 sc->max_requests = XBF_MAX_REQUESTS;
560 }
561
562 if (sc->max_request_segments > XBF_MAX_SEGMENTS_PER_REQUEST) {

--- 57 unchanged lines hidden (view full) ---

620 if (bus_dmamap_create(sc->xb_io_dmat, 0, &cm->map) != 0)
621 break;
622 xb_free_command(cm);
623 }
624
625 if (setup_blkring(sc) != 0)
626 return;
627
616 if (sc->max_requests > XBF_MAX_REQUESTS) {
617 device_printf(sc->xb_dev, "Back-end specified max_requests of "
618 "%u limited to front-end limit of %u.\n",
619 sc->max_requests, XBF_MAX_REQUESTS);
620 sc->max_requests = XBF_MAX_REQUESTS;
621 }
622
623 if (sc->max_request_segments > XBF_MAX_SEGMENTS_PER_REQUEST) {

--- 57 unchanged lines hidden (view full) ---

681 if (bus_dmamap_create(sc->xb_io_dmat, 0, &cm->map) != 0)
682 break;
683 xb_free_command(cm);
684 }
685
686 if (setup_blkring(sc) != 0)
687 return;
688
689 /* Support both backend schemes for relaying ring page limits. */
628 error = xs_printf(XST_NIL, node_path,
690 error = xs_printf(XST_NIL, node_path,
629 "ring-pages","%u", sc->ring_pages);
691 "num-ring-pages","%u", sc->ring_pages);
630 if (error) {
631 xenbus_dev_fatal(sc->xb_dev, error,
692 if (error) {
693 xenbus_dev_fatal(sc->xb_dev, error,
632 "writing %s/ring-pages",
694 "writing %s/num-ring-pages",
633 node_path);
634 return;
635 }
695 node_path);
696 return;
697 }
698 error = xs_printf(XST_NIL, node_path,
699 "ring-page-order","%u", fls(sc->ring_pages) - 1);
700 if (error) {
701 xenbus_dev_fatal(sc->xb_dev, error,
702 "writing %s/ring-page-order",
703 node_path);
704 return;
705 }
636
637 error = xs_printf(XST_NIL, node_path,
638 "max-requests","%u", sc->max_requests);
639 if (error) {
640 xenbus_dev_fatal(sc->xb_dev, error,
641 "writing %s/max-requests",
642 node_path);
643 return;

--- 62 unchanged lines hidden (view full) ---

706 error = xenbus_grant_ring(sc->xb_dev,
707 (vtomach(sring_page_addr) >> PAGE_SHIFT), &sc->ring_ref[i]);
708 if (error) {
709 xenbus_dev_fatal(sc->xb_dev, error,
710 "granting ring_ref(%d)", i);
711 return (error);
712 }
713 }
706
707 error = xs_printf(XST_NIL, node_path,
708 "max-requests","%u", sc->max_requests);
709 if (error) {
710 xenbus_dev_fatal(sc->xb_dev, error,
711 "writing %s/max-requests",
712 node_path);
713 return;

--- 62 unchanged lines hidden (view full) ---

776 error = xenbus_grant_ring(sc->xb_dev,
777 (vtomach(sring_page_addr) >> PAGE_SHIFT), &sc->ring_ref[i]);
778 if (error) {
779 xenbus_dev_fatal(sc->xb_dev, error,
780 "granting ring_ref(%d)", i);
781 return (error);
782 }
783 }
714 error = xs_printf(XST_NIL, xenbus_get_node(sc->xb_dev),
715 "ring-ref","%u", sc->ring_ref[0]);
716 if (error) {
717 xenbus_dev_fatal(sc->xb_dev, error, "writing %s/ring-ref",
718 xenbus_get_node(sc->xb_dev));
719 return (error);
720 }
721 for (i = 1; i < sc->ring_pages; i++) {
722 char ring_ref_name[]= "ring_refXX";
723
724 snprintf(ring_ref_name, sizeof(ring_ref_name), "ring-ref%u", i);
784 if (sc->ring_pages == 1) {
725 error = xs_printf(XST_NIL, xenbus_get_node(sc->xb_dev),
785 error = xs_printf(XST_NIL, xenbus_get_node(sc->xb_dev),
726 ring_ref_name, "%u", sc->ring_ref[i]);
786 "ring-ref", "%u", sc->ring_ref[0]);
727 if (error) {
787 if (error) {
728 xenbus_dev_fatal(sc->xb_dev, error, "writing %s/%s",
729 xenbus_get_node(sc->xb_dev),
730 ring_ref_name);
788 xenbus_dev_fatal(sc->xb_dev, error,
789 "writing %s/ring-ref",
790 xenbus_get_node(sc->xb_dev));
731 return (error);
732 }
791 return (error);
792 }
793 } else {
794 for (i = 0; i < sc->ring_pages; i++) {
795 char ring_ref_name[]= "ring_refXX";
796
797 snprintf(ring_ref_name, sizeof(ring_ref_name),
798 "ring-ref%u", i);
799 error = xs_printf(XST_NIL, xenbus_get_node(sc->xb_dev),
800 ring_ref_name, "%u", sc->ring_ref[i]);
801 if (error) {
802 xenbus_dev_fatal(sc->xb_dev, error,
803 "writing %s/%s",
804 xenbus_get_node(sc->xb_dev),
805 ring_ref_name);
806 return (error);
807 }
808 }
733 }
734
735 error = bind_listening_port_to_irqhandler(
736 xenbus_get_otherend_id(sc->xb_dev),
737 "xbd", (driver_intr_t *)blkif_int, sc,
738 INTR_TYPE_BIO | INTR_MPSAFE, &sc->irq);
739 if (error) {
740 xenbus_dev_fatal(sc->xb_dev, error,

--- 49 unchanged lines hidden (view full) ---

790static void
791blkfront_connect(struct xb_softc *sc)
792{
793 device_t dev = sc->xb_dev;
794 unsigned long sectors, sector_size;
795 unsigned int binfo;
796 int err, feature_barrier;
797
809 }
810
811 error = bind_listening_port_to_irqhandler(
812 xenbus_get_otherend_id(sc->xb_dev),
813 "xbd", (driver_intr_t *)blkif_int, sc,
814 INTR_TYPE_BIO | INTR_MPSAFE, &sc->irq);
815 if (error) {
816 xenbus_dev_fatal(sc->xb_dev, error,

--- 49 unchanged lines hidden (view full) ---

866static void
867blkfront_connect(struct xb_softc *sc)
868{
869 device_t dev = sc->xb_dev;
870 unsigned long sectors, sector_size;
871 unsigned int binfo;
872 int err, feature_barrier;
873
798 if( (sc->connected == BLKIF_STATE_CONNECTED) ||
874 if( (sc->connected == BLKIF_STATE_CONNECTED) ||
799 (sc->connected == BLKIF_STATE_SUSPENDED) )
800 return;
801
802 DPRINTK("blkfront.c:connect:%s.\n", xenbus_get_otherend_path(dev));
803
804 err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
805 "sectors", "%lu", &sectors,
806 "info", "%u", &binfo,

--- 111 unchanged lines hidden (view full) ---

918blkif_close(struct disk *dp)
919{
920 struct xb_softc *sc = (struct xb_softc *)dp->d_drv1;
921
922 if (sc == NULL)
923 return (ENXIO);
924 sc->xb_flags &= ~XB_OPEN;
925 if (--(sc->users) == 0) {
875 (sc->connected == BLKIF_STATE_SUSPENDED) )
876 return;
877
878 DPRINTK("blkfront.c:connect:%s.\n", xenbus_get_otherend_path(dev));
879
880 err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
881 "sectors", "%lu", &sectors,
882 "info", "%u", &binfo,

--- 111 unchanged lines hidden (view full) ---

994blkif_close(struct disk *dp)
995{
996 struct xb_softc *sc = (struct xb_softc *)dp->d_drv1;
997
998 if (sc == NULL)
999 return (ENXIO);
1000 sc->xb_flags &= ~XB_OPEN;
1001 if (--(sc->users) == 0) {
926 /* Check whether we have been instructed to close. We will
927 have ignored this request initially, as the device was
928 still mounted. */
929 device_t dev = sc->xb_dev;
930 XenbusState state =
931 xenbus_read_driver_state(xenbus_get_otherend_path(dev));
932
933 if (state == XenbusStateClosing)
934 blkfront_closing(dev);
1002 /*
1003 * Check whether we have been instructed to close. We will
1004 * have ignored this request initially, as the device was
1005 * still mounted.
1006 */
1007 if (xenbus_get_otherend_state(sc->xb_dev) == XenbusStateClosing)
1008 blkfront_closing(sc->xb_dev);
935 }
936 return (0);
937}
938
939static int
940blkif_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td)
941{
942 struct xb_softc *sc = (struct xb_softc *)dp->d_drv1;

--- 85 unchanged lines hidden (view full) ---

1028
1029static void
1030blkif_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1031{
1032 struct xb_softc *sc;
1033 struct xb_command *cm;
1034 blkif_request_t *ring_req;
1035 struct blkif_request_segment *sg;
1009 }
1010 return (0);
1011}
1012
1013static int
1014blkif_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td)
1015{
1016 struct xb_softc *sc = (struct xb_softc *)dp->d_drv1;

--- 85 unchanged lines hidden (view full) ---

1102
1103static void
1104blkif_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1105{
1106 struct xb_softc *sc;
1107 struct xb_command *cm;
1108 blkif_request_t *ring_req;
1109 struct blkif_request_segment *sg;
1036 struct blkif_request_segment *last_block_sg;
1110 struct blkif_request_segment *last_block_sg;
1037 grant_ref_t *sg_ref;
1038 vm_paddr_t buffer_ma;
1039 uint64_t fsect, lsect;
1040 int ref;
1041 int op;
1042 int block_segs;
1043
1044 cm = arg;

--- 54 unchanged lines hidden (view full) ---

1099 .first_sect = fsect,
1100 .last_sect = lsect };
1101 sg++;
1102 sg_ref++;
1103 segs++;
1104 nsegs--;
1105 }
1106 block_segs = MIN(nsegs, BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK);
1111 grant_ref_t *sg_ref;
1112 vm_paddr_t buffer_ma;
1113 uint64_t fsect, lsect;
1114 int ref;
1115 int op;
1116 int block_segs;
1117
1118 cm = arg;

--- 54 unchanged lines hidden (view full) ---

1173 .first_sect = fsect,
1174 .last_sect = lsect };
1175 sg++;
1176 sg_ref++;
1177 segs++;
1178 nsegs--;
1179 }
1180 block_segs = MIN(nsegs, BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK);
1107 if (block_segs == 0)
1108 break;
1181 if (block_segs == 0)
1182 break;
1109
1183
1110 sg = BLKRING_GET_SG_REQUEST(&sc->ring, sc->ring.req_prod_pvt);
1184 sg = BLKRING_GET_SEG_BLOCK(&sc->ring, sc->ring.req_prod_pvt);
1111 sc->ring.req_prod_pvt++;
1185 sc->ring.req_prod_pvt++;
1112 last_block_sg = sg + block_segs;
1186 last_block_sg = sg + block_segs;
1113 }
1114
1115 if (cm->operation == BLKIF_OP_READ)
1116 op = BUS_DMASYNC_PREREAD;
1117 else if (cm->operation == BLKIF_OP_WRITE)
1118 op = BUS_DMASYNC_PREWRITE;
1119 else
1120 op = 0;

--- 219 unchanged lines hidden ---
1187 }
1188
1189 if (cm->operation == BLKIF_OP_READ)
1190 op = BUS_DMASYNC_PREREAD;
1191 else if (cm->operation == BLKIF_OP_WRITE)
1192 op = BUS_DMASYNC_PREWRITE;
1193 else
1194 op = 0;

--- 219 unchanged lines hidden ---