blkfront.c revision 181805
1181643Skmacy/*-
2181643Skmacy * All rights reserved.
3181643Skmacy *
4181643Skmacy * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
5181643Skmacy * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
6181643Skmacy * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
7181643Skmacy * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
8181643Skmacy * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
9181643Skmacy * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
10181643Skmacy * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
11181643Skmacy * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
12181643Skmacy * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
13181643Skmacy * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
14181643Skmacy * SUCH DAMAGE.
15181643Skmacy *
16181643Skmacy */
17181643Skmacy
18181643Skmacy/*
19181643Skmacy * XenoBSD block device driver
20181643Skmacy */
21181643Skmacy
22181643Skmacy#include <sys/cdefs.h>
23181643Skmacy__FBSDID("$FreeBSD: head/sys/dev/xen/blkfront/blkfront.c 181805 2008-08-17 23:33:33Z kmacy $");
24181643Skmacy
25181643Skmacy#include <sys/param.h>
26181643Skmacy#include <sys/systm.h>
27181643Skmacy#include <sys/malloc.h>
28181643Skmacy#include <sys/kernel.h>
29181643Skmacy#include <vm/vm.h>
30181643Skmacy#include <vm/pmap.h>
31181643Skmacy
32181643Skmacy#include <sys/bio.h>
33181643Skmacy#include <sys/bus.h>
34181643Skmacy#include <sys/conf.h>
35181643Skmacy#include <sys/module.h>
36181643Skmacy
37181643Skmacy#include <machine/bus.h>
38181643Skmacy#include <sys/rman.h>
39181643Skmacy#include <machine/resource.h>
40181643Skmacy#include <machine/intr_machdep.h>
41181643Skmacy#include <machine/vmparam.h>
42181643Skmacy
43181643Skmacy#include <machine/xen/hypervisor.h>
44181643Skmacy#include <machine/xen/xen-os.h>
45181643Skmacy#include <machine/xen/xen_intr.h>
46181643Skmacy#include <machine/xen/xenbus.h>
47181643Skmacy#include <machine/xen/evtchn.h>
48181643Skmacy#include <xen/interface/grant_table.h>
49181643Skmacy
50181643Skmacy#include <geom/geom_disk.h>
51181643Skmacy#include <machine/xen/xenfunc.h>
52181643Skmacy#include <xen/gnttab.h>
53181643Skmacy
54181643Skmacy#include <dev/xen/blkfront/block.h>
55181643Skmacy
56181643Skmacy#define    ASSERT(S)       KASSERT(S, (#S))
57181643Skmacy/* prototypes */
58181643Skmacystruct xb_softc;
59181643Skmacystatic void xb_startio(struct xb_softc *sc);
60181643Skmacystatic void connect(struct blkfront_info *);
61181643Skmacystatic void blkfront_closing(struct xenbus_device *);
62181643Skmacystatic int blkfront_remove(struct xenbus_device *);
63181643Skmacystatic int talk_to_backend(struct xenbus_device *, struct blkfront_info *);
64181643Skmacystatic int setup_blkring(struct xenbus_device *, struct blkfront_info *);
65181643Skmacystatic void blkif_int(void *);
66181643Skmacy#if 0
67181643Skmacystatic void blkif_restart_queue(void *arg);
68181643Skmacy#endif
69181643Skmacystatic void blkif_recover(struct blkfront_info *);
70181643Skmacystatic void blkif_completion(struct blk_shadow *);
71181643Skmacystatic void blkif_free(struct blkfront_info *, int);
72181643Skmacy
73181643Skmacy#define GRANT_INVALID_REF 0
74181643Skmacy#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
75181643Skmacy
76181643SkmacyLIST_HEAD(xb_softc_list_head, xb_softc) xbsl_head;
77181643Skmacy
78181643Skmacy/* Control whether runtime update of vbds is enabled. */
79181643Skmacy#define ENABLE_VBD_UPDATE 0
80181643Skmacy
81181643Skmacy#if ENABLE_VBD_UPDATE
82181643Skmacystatic void vbd_update(void);
83181643Skmacy#endif
84181643Skmacy
85181643Skmacy
86181643Skmacy#define BLKIF_STATE_DISCONNECTED 0
87181643Skmacy#define BLKIF_STATE_CONNECTED    1
88181643Skmacy#define BLKIF_STATE_SUSPENDED    2
89181643Skmacy
90181643Skmacy#ifdef notyet
91181643Skmacystatic char *blkif_state_name[] = {
92181643Skmacy	[BLKIF_STATE_DISCONNECTED] = "disconnected",
93181643Skmacy	[BLKIF_STATE_CONNECTED]    = "connected",
94181643Skmacy	[BLKIF_STATE_SUSPENDED]    = "closed",
95181643Skmacy};
96181643Skmacy
97181643Skmacystatic char * blkif_status_name[] = {
98181643Skmacy	[BLKIF_INTERFACE_STATUS_CLOSED]       = "closed",
99181643Skmacy	[BLKIF_INTERFACE_STATUS_DISCONNECTED] = "disconnected",
100181643Skmacy	[BLKIF_INTERFACE_STATUS_CONNECTED]    = "connected",
101181643Skmacy	[BLKIF_INTERFACE_STATUS_CHANGED]      = "changed",
102181643Skmacy};
103181643Skmacy#endif
104181643Skmacy#define WPRINTK(fmt, args...) printf("[XEN] " fmt, ##args)
105181643Skmacy#if 0
106181643Skmacy#define DPRINTK(fmt, args...) printf("[XEN] %s:%d" fmt ".\n", __FUNCTION__, __LINE__,##args)
107181643Skmacy#else
108181643Skmacy#define DPRINTK(fmt, args...)
109181643Skmacy#endif
110181643Skmacy
111181643Skmacystatic grant_ref_t gref_head;
112181643Skmacy#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
113181643Skmacy    (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
114181643Skmacy
115181643Skmacystatic void kick_pending_request_queues(struct blkfront_info *);
116181643Skmacystatic int blkif_open(struct disk *dp);
117181643Skmacystatic int blkif_close(struct disk *dp);
118181643Skmacystatic int blkif_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td);
119181643Skmacystatic int blkif_queue_request(struct bio *bp);
120181643Skmacystatic void xb_strategy(struct bio *bp);
121181643Skmacy
122181643Skmacy
123181643Skmacy
124181643Skmacy/* XXX move to xb_vbd.c when VBD update support is added */
125181643Skmacy#define MAX_VBDS 64
126181643Skmacy
127181643Skmacy#define XBD_SECTOR_SIZE		512	/* XXX: assume for now */
128181643Skmacy#define XBD_SECTOR_SHFT		9
129181643Skmacy
130181643Skmacystatic struct mtx blkif_io_lock;
131181643Skmacy
132181805Skmacystatic vm_paddr_t
133181805Skmacypfn_to_mfn(vm_paddr_t pfn)
134181643Skmacy{
135181643Skmacy	return (phystomach(pfn << PAGE_SHIFT) >> PAGE_SHIFT);
136181643Skmacy}
137181643Skmacy
138181643Skmacy
139181643Skmacyint
140181643Skmacyxlvbd_add(blkif_sector_t capacity, int unit, uint16_t vdisk_info, uint16_t sector_size,
141181643Skmacy	  struct blkfront_info *info)
142181643Skmacy{
143181643Skmacy	struct xb_softc	*sc;
144181643Skmacy	int			error = 0;
145181643Skmacy
146181643Skmacy	sc = (struct xb_softc *)malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
147181643Skmacy	sc->xb_unit = unit;
148181643Skmacy	sc->xb_info = info;
149181643Skmacy	info->sc = sc;
150181643Skmacy
151181643Skmacy	memset(&sc->xb_disk, 0, sizeof(sc->xb_disk));
152181643Skmacy	sc->xb_disk = disk_alloc();
153181643Skmacy	sc->xb_disk->d_unit = unit;
154181643Skmacy	sc->xb_disk->d_open = blkif_open;
155181643Skmacy	sc->xb_disk->d_close = blkif_close;
156181643Skmacy	sc->xb_disk->d_ioctl = blkif_ioctl;
157181643Skmacy	sc->xb_disk->d_strategy = xb_strategy;
158181643Skmacy	sc->xb_disk->d_name = "xbd";
159181643Skmacy	sc->xb_disk->d_drv1 = sc;
160181643Skmacy	sc->xb_disk->d_sectorsize = sector_size;
161181643Skmacy
162181643Skmacy	/* XXX */
163181643Skmacy	sc->xb_disk->d_mediasize = capacity << XBD_SECTOR_SHFT;
164181643Skmacy#if 0
165181643Skmacy	sc->xb_disk->d_maxsize = DFLTPHYS;
166181643Skmacy#else /* XXX: xen can't handle large single i/o requests */
167181643Skmacy	sc->xb_disk->d_maxsize = 4096;
168181643Skmacy#endif
169181643Skmacy#ifdef notyet
170181643Skmacy	XENPRINTF("attaching device 0x%x unit %d capacity %llu\n",
171181643Skmacy		  xb_diskinfo[sc->xb_unit].device, sc->xb_unit,
172181643Skmacy		  sc->xb_disk->d_mediasize);
173181643Skmacy#endif
174181643Skmacy	sc->xb_disk->d_flags = 0;
175181643Skmacy	disk_create(sc->xb_disk, DISK_VERSION_00);
176181643Skmacy	bioq_init(&sc->xb_bioq);
177181643Skmacy
178181643Skmacy	return error;
179181643Skmacy}
180181643Skmacy
181181643Skmacyvoid
182181643Skmacyxlvbd_del(struct blkfront_info *info)
183181643Skmacy{
184181643Skmacy	struct xb_softc	*sc;
185181643Skmacy
186181643Skmacy	sc = info->sc;
187181643Skmacy	disk_destroy(sc->xb_disk);
188181643Skmacy}
189181643Skmacy/************************ end VBD support *****************/
190181643Skmacy
191181643Skmacy/*
192181643Skmacy * Read/write routine for a buffer.  Finds the proper unit, place it on
193181643Skmacy * the sortq and kick the controller.
194181643Skmacy */
195181643Skmacystatic void
196181643Skmacyxb_strategy(struct bio *bp)
197181643Skmacy{
198181643Skmacy	struct xb_softc	*sc = (struct xb_softc *)bp->bio_disk->d_drv1;
199181643Skmacy
200181643Skmacy	/* bogus disk? */
201181643Skmacy	if (sc == NULL) {
202181643Skmacy		bp->bio_error = EINVAL;
203181643Skmacy		bp->bio_flags |= BIO_ERROR;
204181643Skmacy		goto bad;
205181643Skmacy	}
206181643Skmacy
207181643Skmacy	DPRINTK("");
208181643Skmacy
209181643Skmacy	/*
210181643Skmacy	 * Place it in the queue of disk activities for this disk
211181643Skmacy	 */
212181643Skmacy	mtx_lock(&blkif_io_lock);
213181643Skmacy	bioq_disksort(&sc->xb_bioq, bp);
214181643Skmacy
215181643Skmacy	xb_startio(sc);
216181643Skmacy	mtx_unlock(&blkif_io_lock);
217181643Skmacy	return;
218181643Skmacy
219181643Skmacy bad:
220181643Skmacy	/*
221181643Skmacy	 * Correctly set the bio to indicate a failed tranfer.
222181643Skmacy	 */
223181643Skmacy	bp->bio_resid = bp->bio_bcount;
224181643Skmacy	biodone(bp);
225181643Skmacy	return;
226181643Skmacy}
227181643Skmacy
228181643Skmacy
229181643Skmacy/* Setup supplies the backend dir, virtual device.
230181643Skmacy
231181643SkmacyWe place an event channel and shared frame entries.
232181643SkmacyWe watch backend to wait if it's ok. */
233181643Skmacystatic int blkfront_probe(struct xenbus_device *dev,
234181643Skmacy			  const struct xenbus_device_id *id)
235181643Skmacy{
236181643Skmacy	int err, vdevice, i;
237181643Skmacy	struct blkfront_info *info;
238181643Skmacy
239181643Skmacy	/* FIXME: Use dynamic device id if this is not set. */
240181643Skmacy	err = xenbus_scanf(XBT_NIL, dev->nodename,
241181643Skmacy			   "virtual-device", "%i", &vdevice);
242181643Skmacy	if (err != 1) {
243181643Skmacy		xenbus_dev_fatal(dev, err, "reading virtual-device");
244181805Skmacy		printf("couldn't find virtual device");
245181805Skmacy		return (err);
246181643Skmacy	}
247181643Skmacy
248181643Skmacy	info = malloc(sizeof(*info), M_DEVBUF, M_NOWAIT|M_ZERO);
249181643Skmacy	if (info == NULL) {
250181643Skmacy		xenbus_dev_fatal(dev, ENOMEM, "allocating info structure");
251181643Skmacy		return ENOMEM;
252181643Skmacy	}
253181643Skmacy
254181643Skmacy	/*
255181643Skmacy	 * XXX debug only
256181643Skmacy	 */
257181643Skmacy	for (i = 0; i < sizeof(*info); i++)
258181643Skmacy			if (((uint8_t *)info)[i] != 0)
259181643Skmacy					panic("non-null memory");
260181643Skmacy
261181643Skmacy	info->shadow_free = 0;
262181643Skmacy	info->xbdev = dev;
263181643Skmacy	info->vdevice = vdevice;
264181643Skmacy	info->connected = BLKIF_STATE_DISCONNECTED;
265181643Skmacy
266181643Skmacy	/* work queue needed ? */
267181643Skmacy	for (i = 0; i < BLK_RING_SIZE; i++)
268181643Skmacy		info->shadow[i].req.id = i+1;
269181643Skmacy	info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
270181643Skmacy
271181643Skmacy	/* Front end dir is a number, which is used as the id. */
272181643Skmacy	info->handle = strtoul(strrchr(dev->nodename,'/')+1, NULL, 0);
273181643Skmacy	dev->dev_driver_data = info;
274181643Skmacy
275181643Skmacy	err = talk_to_backend(dev, info);
276181643Skmacy	if (err) {
277181643Skmacy		free(info, M_DEVBUF);
278181643Skmacy		dev->dev_driver_data = NULL;
279181643Skmacy		return err;
280181643Skmacy	}
281181643Skmacy
282181643Skmacy	return 0;
283181643Skmacy}
284181643Skmacy
285181643Skmacy
286181643Skmacystatic int blkfront_resume(struct xenbus_device *dev)
287181643Skmacy{
288181643Skmacy	struct blkfront_info *info = dev->dev_driver_data;
289181643Skmacy	int err;
290181643Skmacy
291181643Skmacy	DPRINTK("blkfront_resume: %s\n", dev->nodename);
292181643Skmacy
293181643Skmacy	blkif_free(info, 1);
294181643Skmacy
295181643Skmacy	err = talk_to_backend(dev, info);
296181643Skmacy	if (!err)
297181643Skmacy		blkif_recover(info);
298181643Skmacy
299181643Skmacy	return err;
300181643Skmacy}
301181643Skmacy
302181643Skmacy/* Common code used when first setting up, and when resuming. */
303181643Skmacystatic int talk_to_backend(struct xenbus_device *dev,
304181643Skmacy			   struct blkfront_info *info)
305181643Skmacy{
306181643Skmacy	const char *message = NULL;
307181643Skmacy	struct xenbus_transaction xbt;
308181643Skmacy	int err;
309181643Skmacy
310181643Skmacy	/* Create shared ring, alloc event channel. */
311181643Skmacy	err = setup_blkring(dev, info);
312181643Skmacy	if (err)
313181643Skmacy		goto out;
314181643Skmacy
315181643Skmacy again:
316181643Skmacy	err = xenbus_transaction_start(&xbt);
317181643Skmacy	if (err) {
318181643Skmacy		xenbus_dev_fatal(dev, err, "starting transaction");
319181643Skmacy		goto destroy_blkring;
320181643Skmacy	}
321181643Skmacy
322181643Skmacy	err = xenbus_printf(xbt, dev->nodename,
323181643Skmacy			    "ring-ref","%u", info->ring_ref);
324181643Skmacy	if (err) {
325181643Skmacy		message = "writing ring-ref";
326181643Skmacy		goto abort_transaction;
327181643Skmacy	}
328181643Skmacy	err = xenbus_printf(xbt, dev->nodename,
329181643Skmacy		"event-channel", "%u", irq_to_evtchn_port(info->irq));
330181643Skmacy	if (err) {
331181643Skmacy		message = "writing event-channel";
332181643Skmacy		goto abort_transaction;
333181643Skmacy	}
334181643Skmacy
335181643Skmacy	err = xenbus_transaction_end(xbt, 0);
336181643Skmacy	if (err) {
337181643Skmacy		if (err == -EAGAIN)
338181643Skmacy			goto again;
339181643Skmacy		xenbus_dev_fatal(dev, err, "completing transaction");
340181643Skmacy		goto destroy_blkring;
341181643Skmacy	}
342181643Skmacy	xenbus_switch_state(dev, XenbusStateInitialised);
343181643Skmacy
344181643Skmacy	return 0;
345181643Skmacy
346181643Skmacy abort_transaction:
347181643Skmacy	xenbus_transaction_end(xbt, 1);
348181643Skmacy	if (message)
349181643Skmacy		xenbus_dev_fatal(dev, err, "%s", message);
350181643Skmacy destroy_blkring:
351181643Skmacy	blkif_free(info, 0);
352181643Skmacy out:
353181643Skmacy	return err;
354181643Skmacy}
355181643Skmacy
356181643Skmacystatic int
357181643Skmacysetup_blkring(struct xenbus_device *dev, struct blkfront_info *info)
358181643Skmacy{
359181643Skmacy	blkif_sring_t *sring;
360181643Skmacy	int err;
361181643Skmacy
362181643Skmacy	info->ring_ref = GRANT_INVALID_REF;
363181643Skmacy
364181643Skmacy	sring = (blkif_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
365181643Skmacy	if (sring == NULL) {
366181643Skmacy		xenbus_dev_fatal(dev, ENOMEM, "allocating shared ring");
367181643Skmacy		return ENOMEM;
368181643Skmacy	}
369181643Skmacy	SHARED_RING_INIT(sring);
370181643Skmacy	FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
371181643Skmacy
372181643Skmacy	err = xenbus_grant_ring(dev, (vtomach(info->ring.sring) >> PAGE_SHIFT));
373181643Skmacy	if (err < 0) {
374181643Skmacy		free(sring, M_DEVBUF);
375181643Skmacy		info->ring.sring = NULL;
376181643Skmacy		goto fail;
377181643Skmacy	}
378181643Skmacy	info->ring_ref = err;
379181643Skmacy
380181643Skmacy	err = bind_listening_port_to_irqhandler(dev->otherend_id,
381181643Skmacy		"xbd", (driver_intr_t *)blkif_int, info,
382181643Skmacy					INTR_TYPE_BIO | INTR_MPSAFE, NULL);
383181643Skmacy	if (err <= 0) {
384181643Skmacy		xenbus_dev_fatal(dev, err,
385181643Skmacy				 "bind_evtchn_to_irqhandler failed");
386181643Skmacy		goto fail;
387181643Skmacy	}
388181643Skmacy	info->irq = err;
389181643Skmacy
390181643Skmacy	return 0;
391181643Skmacy fail:
392181643Skmacy	blkif_free(info, 0);
393181643Skmacy	return err;
394181643Skmacy}
395181643Skmacy
396181643Skmacy
397181643Skmacy/**
398181643Skmacy * Callback received when the backend's state changes.
399181643Skmacy */
400181643Skmacystatic void backend_changed(struct xenbus_device *dev,
401181643Skmacy			    XenbusState backend_state)
402181643Skmacy{
403181643Skmacy	struct blkfront_info *info = dev->dev_driver_data;
404181643Skmacy
405181643Skmacy	DPRINTK("blkfront:backend_changed.\n");
406181643Skmacy
407181643Skmacy	switch (backend_state) {
408181643Skmacy	case XenbusStateUnknown:
409181643Skmacy	case XenbusStateInitialising:
410181643Skmacy	case XenbusStateInitWait:
411181643Skmacy	case XenbusStateInitialised:
412181643Skmacy	case XenbusStateClosed:
413181643Skmacy		break;
414181643Skmacy
415181643Skmacy	case XenbusStateConnected:
416181643Skmacy		connect(info);
417181643Skmacy		break;
418181643Skmacy
419181643Skmacy	case XenbusStateClosing:
420181643Skmacy		if (info->users > 0)
421181643Skmacy			xenbus_dev_error(dev, -EBUSY,
422181643Skmacy					 "Device in use; refusing to close");
423181643Skmacy		else
424181643Skmacy			blkfront_closing(dev);
425181643Skmacy#ifdef notyet
426181643Skmacy		bd = bdget(info->dev);
427181643Skmacy		if (bd == NULL)
428181643Skmacy			xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
429181643Skmacy
430181643Skmacy		down(&bd->bd_sem);
431181643Skmacy		if (info->users > 0)
432181643Skmacy			xenbus_dev_error(dev, -EBUSY,
433181643Skmacy					 "Device in use; refusing to close");
434181643Skmacy		else
435181643Skmacy			blkfront_closing(dev);
436181643Skmacy		up(&bd->bd_sem);
437181643Skmacy		bdput(bd);
438181643Skmacy#endif
439181643Skmacy	}
440181643Skmacy}
441181643Skmacy
442181643Skmacy/*
443181643Skmacy** Invoked when the backend is finally 'ready' (and has told produced
444181643Skmacy** the details about the physical device - #sectors, size, etc).
445181643Skmacy*/
446181643Skmacystatic void
447181643Skmacyconnect(struct blkfront_info *info)
448181643Skmacy{
449181643Skmacy	unsigned long sectors, sector_size;
450181643Skmacy	unsigned int binfo;
451181643Skmacy	int err;
452181643Skmacy
453181643Skmacy        if( (info->connected == BLKIF_STATE_CONNECTED) ||
454181643Skmacy	    (info->connected == BLKIF_STATE_SUSPENDED) )
455181643Skmacy		return;
456181643Skmacy
457181643Skmacy	DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend);
458181643Skmacy
459181643Skmacy	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
460181643Skmacy			    "sectors", "%lu", &sectors,
461181643Skmacy			    "info", "%u", &binfo,
462181643Skmacy			    "sector-size", "%lu", &sector_size,
463181643Skmacy			    NULL);
464181643Skmacy	if (err) {
465181643Skmacy		xenbus_dev_fatal(info->xbdev, err,
466181643Skmacy				 "reading backend fields at %s",
467181643Skmacy				 info->xbdev->otherend);
468181643Skmacy		return;
469181643Skmacy	}
470181643Skmacy	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
471181643Skmacy			    "feature-barrier", "%lu", &info->feature_barrier,
472181643Skmacy			    NULL);
473181643Skmacy	if (err)
474181643Skmacy		info->feature_barrier = 0;
475181643Skmacy
476181643Skmacy	xlvbd_add(sectors, info->vdevice, binfo, sector_size, info);
477181643Skmacy
478181643Skmacy	(void)xenbus_switch_state(info->xbdev, XenbusStateConnected);
479181643Skmacy
480181643Skmacy	/* Kick pending requests. */
481181643Skmacy	mtx_lock(&blkif_io_lock);
482181643Skmacy	info->connected = BLKIF_STATE_CONNECTED;
483181643Skmacy	kick_pending_request_queues(info);
484181643Skmacy	mtx_unlock(&blkif_io_lock);
485181643Skmacy
486181643Skmacy#if 0
487181643Skmacy	add_disk(info->gd);
488181643Skmacy#endif
489181643Skmacy}
490181643Skmacy
491181643Skmacy/**
492181643Skmacy * Handle the change of state of the backend to Closing.  We must delete our
493181643Skmacy * device-layer structures now, to ensure that writes are flushed through to
494181643Skmacy * the backend.  Once is this done, we can switch to Closed in
495181643Skmacy * acknowledgement.
496181643Skmacy */
497181643Skmacystatic void blkfront_closing(struct xenbus_device *dev)
498181643Skmacy{
499181643Skmacy	struct blkfront_info *info = dev->dev_driver_data;
500181643Skmacy
501181643Skmacy	DPRINTK("blkfront_closing: %s removed\n", dev->nodename);
502181643Skmacy
503181643Skmacy	if (info->mi) {
504181643Skmacy		DPRINTK("Calling xlvbd_del\n");
505181643Skmacy		xlvbd_del(info);
506181643Skmacy		info->mi = NULL;
507181643Skmacy	}
508181643Skmacy
509181643Skmacy	xenbus_switch_state(dev, XenbusStateClosed);
510181643Skmacy}
511181643Skmacy
512181643Skmacy
513181643Skmacystatic int blkfront_remove(struct xenbus_device *dev)
514181643Skmacy{
515181643Skmacy	struct blkfront_info *info = dev->dev_driver_data;
516181643Skmacy
517181643Skmacy	DPRINTK("blkfront_remove: %s removed\n", dev->nodename);
518181643Skmacy
519181643Skmacy	blkif_free(info, 0);
520181643Skmacy
521181643Skmacy	free(info, M_DEVBUF);
522181643Skmacy
523181643Skmacy	return 0;
524181643Skmacy}
525181643Skmacy
526181643Skmacy
527181643Skmacystatic inline int
528181643SkmacyGET_ID_FROM_FREELIST(struct blkfront_info *info)
529181643Skmacy{
530181643Skmacy	unsigned long nfree = info->shadow_free;
531181643Skmacy
532181643Skmacy	KASSERT(nfree <= BLK_RING_SIZE, ("free %lu > RING_SIZE", nfree));
533181643Skmacy	info->shadow_free = info->shadow[nfree].req.id;
534181643Skmacy	info->shadow[nfree].req.id = 0x0fffffee; /* debug */
535181643Skmacy	return nfree;
536181643Skmacy}
537181643Skmacy
538181643Skmacystatic inline void
539181643SkmacyADD_ID_TO_FREELIST(struct blkfront_info *info, unsigned long id)
540181643Skmacy{
541181643Skmacy	info->shadow[id].req.id  = info->shadow_free;
542181643Skmacy	info->shadow[id].request = 0;
543181643Skmacy	info->shadow_free = id;
544181643Skmacy}
545181643Skmacy
546181643Skmacystatic inline void
547181643Skmacyflush_requests(struct blkfront_info *info)
548181643Skmacy{
549181643Skmacy	int notify;
550181643Skmacy
551181643Skmacy	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
552181643Skmacy
553181643Skmacy	if (notify)
554181643Skmacy		notify_remote_via_irq(info->irq);
555181643Skmacy}
556181643Skmacy
557181643Skmacystatic void
558181643Skmacykick_pending_request_queues(struct blkfront_info *info)
559181643Skmacy{
560181643Skmacy	/* XXX check if we can't simplify */
561181643Skmacy#if 0
562181643Skmacy	if (!RING_FULL(&info->ring)) {
563181643Skmacy		/* Re-enable calldowns. */
564181643Skmacy		blk_start_queue(info->rq);
565181643Skmacy		/* Kick things off immediately. */
566181643Skmacy		do_blkif_request(info->rq);
567181643Skmacy	}
568181643Skmacy#endif
569181643Skmacy	if (!RING_FULL(&info->ring)) {
570181643Skmacy#if 0
571181643Skmacy		sc = LIST_FIRST(&xbsl_head);
572181643Skmacy		LIST_REMOVE(sc, entry);
573181643Skmacy		/* Re-enable calldowns. */
574181643Skmacy		blk_start_queue(di->rq);
575181643Skmacy#endif
576181643Skmacy		/* Kick things off immediately. */
577181643Skmacy		xb_startio(info->sc);
578181643Skmacy	}
579181643Skmacy}
580181643Skmacy
581181643Skmacy#if 0
582181643Skmacy/* XXX */
583181643Skmacystatic void blkif_restart_queue(void *arg)
584181643Skmacy{
585181643Skmacy	struct blkfront_info *info = (struct blkfront_info *)arg;
586181643Skmacy
587181643Skmacy	mtx_lock(&blkif_io_lock);
588181643Skmacy	kick_pending_request_queues(info);
589181643Skmacy	mtx_unlock(&blkif_io_lock);
590181643Skmacy}
591181643Skmacy#endif
592181643Skmacy
593181643Skmacystatic void blkif_restart_queue_callback(void *arg)
594181643Skmacy{
595181643Skmacy#if 0
596181643Skmacy	struct blkfront_info *info = (struct blkfront_info *)arg;
597181643Skmacy	/* XXX BSD equiv ? */
598181643Skmacy
599181643Skmacy	schedule_work(&info->work);
600181643Skmacy#endif
601181643Skmacy}
602181643Skmacy
603181643Skmacystatic int
604181643Skmacyblkif_open(struct disk *dp)
605181643Skmacy{
606181643Skmacy	struct xb_softc	*sc = (struct xb_softc *)dp->d_drv1;
607181643Skmacy
608181643Skmacy	if (sc == NULL) {
609181643Skmacy		printk("xb%d: not found", sc->xb_unit);
610181643Skmacy		return (ENXIO);
611181643Skmacy	}
612181643Skmacy
613181643Skmacy	sc->xb_flags |= XB_OPEN;
614181643Skmacy	sc->xb_info->users++;
615181643Skmacy	return (0);
616181643Skmacy}
617181643Skmacy
618181643Skmacystatic int
619181643Skmacyblkif_close(struct disk *dp)
620181643Skmacy{
621181643Skmacy	struct xb_softc	*sc = (struct xb_softc *)dp->d_drv1;
622181643Skmacy
623181643Skmacy	if (sc == NULL)
624181643Skmacy		return (ENXIO);
625181643Skmacy	sc->xb_flags &= ~XB_OPEN;
626181643Skmacy	if (--(sc->xb_info->users) == 0) {
627181643Skmacy		/* Check whether we have been instructed to close.  We will
628181643Skmacy		   have ignored this request initially, as the device was
629181643Skmacy		   still mounted. */
630181643Skmacy		struct xenbus_device * dev = sc->xb_info->xbdev;
631181643Skmacy		XenbusState state = xenbus_read_driver_state(dev->otherend);
632181643Skmacy
633181643Skmacy		if (state == XenbusStateClosing)
634181643Skmacy			blkfront_closing(dev);
635181643Skmacy	}
636181643Skmacy	return (0);
637181643Skmacy}
638181643Skmacy
639181643Skmacystatic int
640181643Skmacyblkif_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td)
641181643Skmacy{
642181643Skmacy	struct xb_softc	*sc = (struct xb_softc *)dp->d_drv1;
643181643Skmacy
644181643Skmacy	if (sc == NULL)
645181643Skmacy		return (ENXIO);
646181643Skmacy
647181643Skmacy	return (ENOTTY);
648181643Skmacy}
649181643Skmacy
650181643Skmacy
651181643Skmacy/*
652181643Skmacy * blkif_queue_request
653181643Skmacy *
654181643Skmacy * request block io
655181643Skmacy *
656181643Skmacy * id: for guest use only.
657181643Skmacy * operation: BLKIF_OP_{READ,WRITE,PROBE}
658181643Skmacy * buffer: buffer to read/write into. this should be a
659181643Skmacy *   virtual address in the guest os.
660181643Skmacy */
661181643Skmacystatic int blkif_queue_request(struct bio *bp)
662181643Skmacy{
663181643Skmacy	caddr_t alignbuf;
664181805Skmacy	vm_paddr_t buffer_ma;
665181643Skmacy	blkif_request_t     *ring_req;
666181643Skmacy	unsigned long id;
667181805Skmacy	uint64_t fsect, lsect;
668181643Skmacy	struct xb_softc *sc = (struct xb_softc *)bp->bio_disk->d_drv1;
669181643Skmacy	struct blkfront_info *info = sc->xb_info;
670181643Skmacy	int ref;
671181643Skmacy
672181643Skmacy	if (unlikely(sc->xb_info->connected != BLKIF_STATE_CONNECTED))
673181643Skmacy		return 1;
674181643Skmacy
675181643Skmacy	if (gnttab_alloc_grant_references(
676181643Skmacy		    BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
677181643Skmacy		gnttab_request_free_callback(
678181643Skmacy			&info->callback,
679181643Skmacy			blkif_restart_queue_callback,
680181643Skmacy			info,
681181643Skmacy			BLKIF_MAX_SEGMENTS_PER_REQUEST);
682181643Skmacy		return 1;
683181643Skmacy	}
684181643Skmacy
685181643Skmacy	/* Check if the buffer is properly aligned */
686181643Skmacy	if ((vm_offset_t)bp->bio_data & PAGE_MASK) {
687181643Skmacy		int align = (bp->bio_bcount < PAGE_SIZE/2) ? XBD_SECTOR_SIZE :
688181643Skmacy			PAGE_SIZE;
689181643Skmacy		caddr_t newbuf = malloc(bp->bio_bcount + align, M_DEVBUF,
690181643Skmacy					M_NOWAIT);
691181643Skmacy
692181643Skmacy		alignbuf = (char *)roundup2((u_long)newbuf, align);
693181643Skmacy
694181643Skmacy		/* save a copy of the current buffer */
695181643Skmacy		bp->bio_driver1 = newbuf;
696181643Skmacy		bp->bio_driver2 = alignbuf;
697181643Skmacy
698181643Skmacy		/* Copy the data for a write */
699181643Skmacy		if (bp->bio_cmd == BIO_WRITE)
700181643Skmacy			bcopy(bp->bio_data, alignbuf, bp->bio_bcount);
701181643Skmacy	} else
702181643Skmacy		alignbuf = bp->bio_data;
703181643Skmacy
704181643Skmacy	/* Fill out a communications ring structure. */
705181643Skmacy	ring_req 	         = RING_GET_REQUEST(&info->ring,
706181643Skmacy						    info->ring.req_prod_pvt);
707181643Skmacy	id		         = GET_ID_FROM_FREELIST(info);
708181643Skmacy	info->shadow[id].request = (unsigned long)bp;
709181643Skmacy
710181643Skmacy	ring_req->id 	         = id;
711181643Skmacy	ring_req->operation 	 = (bp->bio_cmd == BIO_READ) ? BLKIF_OP_READ :
712181643Skmacy		BLKIF_OP_WRITE;
713181643Skmacy
714181643Skmacy	ring_req->sector_number= (blkif_sector_t)bp->bio_pblkno;
715181643Skmacy	ring_req->handle 	  = (blkif_vdev_t)(uintptr_t)sc->xb_disk;
716181643Skmacy
717181643Skmacy	ring_req->nr_segments  = 0;	/* XXX not doing scatter/gather since buffer
718181643Skmacy					 * chaining is not supported.
719181643Skmacy					 */
720181643Skmacy
721181643Skmacy	buffer_ma = vtomach(alignbuf);
722181643Skmacy	fsect = (buffer_ma & PAGE_MASK) >> XBD_SECTOR_SHFT;
723181643Skmacy	lsect = fsect + (bp->bio_bcount >> XBD_SECTOR_SHFT) - 1;
724181643Skmacy	/* install a grant reference. */
725181643Skmacy	ref = gnttab_claim_grant_reference(&gref_head);
726181643Skmacy	KASSERT( ref != -ENOSPC, ("grant_reference failed") );
727181643Skmacy
728181643Skmacy	gnttab_grant_foreign_access_ref(
729181643Skmacy		ref,
730181643Skmacy		info->xbdev->otherend_id,
731181643Skmacy		buffer_ma >> PAGE_SHIFT,
732181643Skmacy		ring_req->operation & 1 ); /* ??? */
733181643Skmacy	info->shadow[id].frame[ring_req->nr_segments] =
734181643Skmacy		buffer_ma >> PAGE_SHIFT;
735181643Skmacy
736181643Skmacy	ring_req->seg[ring_req->nr_segments] =
737181643Skmacy		(struct blkif_request_segment) {
738181643Skmacy			.gref       = ref,
739181643Skmacy			.first_sect = fsect,
740181643Skmacy			.last_sect  = lsect };
741181643Skmacy
742181643Skmacy	ring_req->nr_segments++;
743181643Skmacy	KASSERT((buffer_ma & (XBD_SECTOR_SIZE-1)) == 0,
744181643Skmacy		("XEN buffer must be sector aligned"));
745181643Skmacy	KASSERT(lsect <= 7,
746181643Skmacy		("XEN disk driver data cannot cross a page boundary"));
747181643Skmacy
748181643Skmacy	buffer_ma &= ~PAGE_MASK;
749181643Skmacy
750181643Skmacy	info->ring.req_prod_pvt++;
751181643Skmacy
752181643Skmacy	/* Keep a private copy so we can reissue requests when recovering. */
753181643Skmacy	info->shadow[id].req = *ring_req;
754181643Skmacy
755181643Skmacy	gnttab_free_grant_references(gref_head);
756181643Skmacy
757181643Skmacy	return 0;
758181643Skmacy}
759181643Skmacy
760181643Skmacy
761181643Skmacy
762181643Skmacy/*
763181643Skmacy * Dequeue buffers and place them in the shared communication ring.
764181643Skmacy * Return when no more requests can be accepted or all buffers have
765181643Skmacy * been queued.
766181643Skmacy *
767181643Skmacy * Signal XEN once the ring has been filled out.
768181643Skmacy */
769181643Skmacystatic void
770181643Skmacyxb_startio(struct xb_softc *sc)
771181643Skmacy{
772181643Skmacy	struct bio		*bp;
773181643Skmacy	int			queued = 0;
774181643Skmacy	struct blkfront_info *info = sc->xb_info;
775181643Skmacy	DPRINTK("");
776181643Skmacy
777181643Skmacy	mtx_assert(&blkif_io_lock, MA_OWNED);
778181643Skmacy
779181643Skmacy	while ((bp = bioq_takefirst(&sc->xb_bioq)) != NULL) {
780181643Skmacy
781181643Skmacy		if (RING_FULL(&info->ring))
782181643Skmacy			goto wait;
783181643Skmacy
784181643Skmacy		if (blkif_queue_request(bp)) {
785181643Skmacy		wait:
786181643Skmacy			bioq_insert_head(&sc->xb_bioq, bp);
787181643Skmacy			break;
788181643Skmacy		}
789181643Skmacy		queued++;
790181643Skmacy	}
791181643Skmacy
792181643Skmacy	if (queued != 0)
793181643Skmacy		flush_requests(sc->xb_info);
794181643Skmacy}
795181643Skmacy
796181643Skmacystatic void
797181643Skmacyblkif_int(void *xsc)
798181643Skmacy{
799181643Skmacy	struct xb_softc *sc = NULL;
800181643Skmacy	struct bio *bp;
801181643Skmacy	blkif_response_t *bret;
802181643Skmacy	RING_IDX i, rp;
803181643Skmacy	struct blkfront_info *info = xsc;
804181643Skmacy	DPRINTK("");
805181643Skmacy
806181643Skmacy	TRACE_ENTER;
807181643Skmacy
808181643Skmacy	mtx_lock(&blkif_io_lock);
809181643Skmacy
810181643Skmacy	if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
811181643Skmacy		mtx_unlock(&blkif_io_lock);
812181643Skmacy		return;
813181643Skmacy	}
814181643Skmacy
815181643Skmacy again:
816181643Skmacy	rp = info->ring.sring->rsp_prod;
817181643Skmacy	rmb(); /* Ensure we see queued responses up to 'rp'. */
818181643Skmacy
819181643Skmacy	for (i = info->ring.rsp_cons; i != rp; i++) {
820181643Skmacy		unsigned long id;
821181643Skmacy
822181643Skmacy		bret = RING_GET_RESPONSE(&info->ring, i);
823181643Skmacy		id   = bret->id;
824181643Skmacy		bp   = (struct bio *)info->shadow[id].request;
825181643Skmacy
826181643Skmacy		blkif_completion(&info->shadow[id]);
827181643Skmacy
828181643Skmacy		ADD_ID_TO_FREELIST(info, id);
829181643Skmacy
830181643Skmacy		switch (bret->operation) {
831181643Skmacy		case BLKIF_OP_READ:
832181643Skmacy			/* had an unaligned buffer that needs to be copied */
833181643Skmacy			if (bp->bio_driver1)
834181643Skmacy				bcopy(bp->bio_driver2, bp->bio_data, bp->bio_bcount);
835181643Skmacy			/* FALLTHROUGH */
836181643Skmacy		case BLKIF_OP_WRITE:
837181643Skmacy
838181643Skmacy			/* free the copy buffer */
839181643Skmacy			if (bp->bio_driver1) {
840181643Skmacy				free(bp->bio_driver1, M_DEVBUF);
841181643Skmacy				bp->bio_driver1 = NULL;
842181643Skmacy			}
843181643Skmacy
844181643Skmacy			if ( unlikely(bret->status != BLKIF_RSP_OKAY) ) {
845181805Skmacy					printf("Bad return from blkdev data request: %x\n",
846181643Skmacy					  bret->status);
847181643Skmacy				bp->bio_flags |= BIO_ERROR;
848181643Skmacy			}
849181643Skmacy
850181643Skmacy			sc = (struct xb_softc *)bp->bio_disk->d_drv1;
851181643Skmacy
852181643Skmacy			if (bp->bio_flags & BIO_ERROR)
853181643Skmacy				bp->bio_error = EIO;
854181643Skmacy			else
855181643Skmacy				bp->bio_resid = 0;
856181643Skmacy
857181643Skmacy			biodone(bp);
858181643Skmacy			break;
859181643Skmacy		default:
860181643Skmacy			panic("received invalid operation");
861181643Skmacy			break;
862181643Skmacy		}
863181643Skmacy	}
864181643Skmacy
865181643Skmacy	info->ring.rsp_cons = i;
866181643Skmacy
867181643Skmacy	if (i != info->ring.req_prod_pvt) {
868181643Skmacy		int more_to_do;
869181643Skmacy		RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
870181643Skmacy		if (more_to_do)
871181643Skmacy			goto again;
872181643Skmacy	} else {
873181643Skmacy		info->ring.sring->rsp_event = i + 1;
874181643Skmacy	}
875181643Skmacy
876181643Skmacy	kick_pending_request_queues(info);
877181643Skmacy
878181643Skmacy	mtx_unlock(&blkif_io_lock);
879181643Skmacy}
880181643Skmacy
881181643Skmacystatic void
882181643Skmacyblkif_free(struct blkfront_info *info, int suspend)
883181643Skmacy{
884181643Skmacy
885181643Skmacy/* Prevent new requests being issued until we fix things up. */
886181643Skmacy	mtx_lock(&blkif_io_lock);
887181643Skmacy	info->connected = suspend ?
888181643Skmacy		BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
889181643Skmacy	mtx_unlock(&blkif_io_lock);
890181643Skmacy
891181643Skmacy	/* Free resources associated with old device channel. */
892181643Skmacy	if (info->ring_ref != GRANT_INVALID_REF) {
893181643Skmacy		gnttab_end_foreign_access(info->ring_ref, 0,
894181643Skmacy					  info->ring.sring);
895181643Skmacy		info->ring_ref = GRANT_INVALID_REF;
896181643Skmacy		info->ring.sring = NULL;
897181643Skmacy	}
898181643Skmacy	if (info->irq)
899181643Skmacy		unbind_from_irqhandler(info->irq, info);
900181643Skmacy	info->irq = 0;
901181643Skmacy
902181643Skmacy}
903181643Skmacy
904181643Skmacystatic void
905181643Skmacyblkif_completion(struct blk_shadow *s)
906181643Skmacy{
907181643Skmacy	int i;
908181643Skmacy
909181643Skmacy	for (i = 0; i < s->req.nr_segments; i++)
910181643Skmacy		gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
911181643Skmacy}
912181643Skmacy
913181643Skmacystatic void
914181643Skmacyblkif_recover(struct blkfront_info *info)
915181643Skmacy{
916181643Skmacy	int i, j;
917181643Skmacy	blkif_request_t *req;
918181643Skmacy	struct blk_shadow *copy;
919181643Skmacy
920181643Skmacy	/* Stage 1: Make a safe copy of the shadow state. */
921181643Skmacy	copy = (struct blk_shadow *)malloc(sizeof(info->shadow), M_DEVBUF, M_NOWAIT|M_ZERO);
922181643Skmacy	PANIC_IF(copy == NULL);
923181643Skmacy	memcpy(copy, info->shadow, sizeof(info->shadow));
924181643Skmacy
925181643Skmacy	/* Stage 2: Set up free list. */
926181643Skmacy	memset(&info->shadow, 0, sizeof(info->shadow));
927181643Skmacy	for (i = 0; i < BLK_RING_SIZE; i++)
928181643Skmacy		info->shadow[i].req.id = i+1;
929181643Skmacy	info->shadow_free = info->ring.req_prod_pvt;
930181643Skmacy	info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
931181643Skmacy
932181643Skmacy	/* Stage 3: Find pending requests and requeue them. */
933181643Skmacy	for (i = 0; i < BLK_RING_SIZE; i++) {
934181643Skmacy		/* Not in use? */
935181643Skmacy		if (copy[i].request == 0)
936181643Skmacy			continue;
937181643Skmacy
938181643Skmacy		/* Grab a request slot and copy shadow state into it. */
939181643Skmacy		req = RING_GET_REQUEST(
940181643Skmacy			&info->ring, info->ring.req_prod_pvt);
941181643Skmacy		*req = copy[i].req;
942181643Skmacy
943181643Skmacy		/* We get a new request id, and must reset the shadow state. */
944181643Skmacy		req->id = GET_ID_FROM_FREELIST(info);
945181643Skmacy		memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
946181643Skmacy
947181643Skmacy		/* Rewrite any grant references invalidated by suspend/resume. */
948181643Skmacy		for (j = 0; j < req->nr_segments; j++)
949181643Skmacy			gnttab_grant_foreign_access_ref(
950181643Skmacy				req->seg[j].gref,
951181643Skmacy				info->xbdev->otherend_id,
952181643Skmacy				pfn_to_mfn(info->shadow[req->id].frame[j]),
953181643Skmacy				0 /* assume not readonly */);
954181643Skmacy
955181643Skmacy		info->shadow[req->id].req = *req;
956181643Skmacy
957181643Skmacy		info->ring.req_prod_pvt++;
958181643Skmacy	}
959181643Skmacy
960181643Skmacy	free(copy, M_DEVBUF);
961181643Skmacy
962181643Skmacy	xenbus_switch_state(info->xbdev, XenbusStateConnected);
963181643Skmacy
964181643Skmacy	/* Now safe for us to use the shared ring */
965181643Skmacy	mtx_lock(&blkif_io_lock);
966181643Skmacy	info->connected = BLKIF_STATE_CONNECTED;
967181643Skmacy	mtx_unlock(&blkif_io_lock);
968181643Skmacy
969181643Skmacy	/* Send off requeued requests */
970181643Skmacy	mtx_lock(&blkif_io_lock);
971181643Skmacy	flush_requests(info);
972181643Skmacy
973181643Skmacy	/* Kick any other new requests queued since we resumed */
974181643Skmacy	kick_pending_request_queues(info);
975181643Skmacy	mtx_unlock(&blkif_io_lock);
976181643Skmacy}
977181643Skmacy
978181643Skmacystatic int
979181643Skmacyblkfront_is_ready(struct xenbus_device *dev)
980181643Skmacy{
981181643Skmacy	struct blkfront_info *info = dev->dev_driver_data;
982181643Skmacy
983181643Skmacy	return info->is_ready;
984181643Skmacy}
985181643Skmacy
986181643Skmacystatic struct xenbus_device_id blkfront_ids[] = {
987181643Skmacy	{ "vbd" },
988181643Skmacy	{ "" }
989181643Skmacy};
990181643Skmacy
991181643Skmacy
992181643Skmacystatic struct xenbus_driver blkfront = {
993181643Skmacy	.name             = "vbd",
994181643Skmacy	.ids              = blkfront_ids,
995181643Skmacy	.probe            = blkfront_probe,
996181643Skmacy	.remove           = blkfront_remove,
997181643Skmacy	.resume           = blkfront_resume,
998181643Skmacy	.otherend_changed = backend_changed,
999181643Skmacy	.is_ready		  = blkfront_is_ready,
1000181643Skmacy};
1001181643Skmacy
1002181643Skmacy
1003181643Skmacy
1004181643Skmacystatic void
1005181643Skmacyxenbus_init(void)
1006181643Skmacy{
1007181643Skmacy	xenbus_register_frontend(&blkfront);
1008181643Skmacy}
1009181643Skmacy
1010181643SkmacyMTX_SYSINIT(ioreq, &blkif_io_lock, "BIO LOCK", MTX_NOWITNESS); /* XXX how does one enroll a lock? */
1011181643SkmacySYSINIT(xbdev, SI_SUB_PSEUDO, SI_ORDER_SECOND, xenbus_init, NULL);
1012181643Skmacy
1013181643Skmacy
1014181643Skmacy/*
1015181643Skmacy * Local variables:
1016181643Skmacy * mode: C
1017181643Skmacy * c-set-style: "BSD"
1018181643Skmacy * c-basic-offset: 8
1019181643Skmacy * tab-width: 4
1020181643Skmacy * indent-tabs-mode: t
1021181643Skmacy * End:
1022181643Skmacy */
1023