1// SPDX-License-Identifier: GPL-2.0-or-later
2/*  Xenbus code for blkif backend
3    Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
4    Copyright (C) 2005 XenSource Ltd
5
6
7*/
8
9#define pr_fmt(fmt) "xen-blkback: " fmt
10
11#include <linux/module.h>
12#include <linux/kthread.h>
13#include <linux/pagemap.h>
14#include <xen/events.h>
15#include <xen/grant_table.h>
16#include "common.h"
17
18/* On the XenBus the max length of 'ring-ref%u'. */
19#define RINGREF_NAME_LEN (20)
20
21struct backend_info {
22	struct xenbus_device	*dev;
23	struct xen_blkif	*blkif;
24	struct xenbus_watch	backend_watch;
25	unsigned		major;
26	unsigned		minor;
27	char			*mode;
28};
29
30static struct kmem_cache *xen_blkif_cachep;
31static void connect(struct backend_info *);
32static int connect_ring(struct backend_info *);
33static void backend_changed(struct xenbus_watch *, const char *,
34			    const char *);
35static void xen_blkif_free(struct xen_blkif *blkif);
36static void xen_vbd_free(struct xen_vbd *vbd);
37
38struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be)
39{
40	return be->dev;
41}
42
43/*
44 * The last request could free the device from softirq context and
45 * xen_blkif_free() can sleep.
46 */
47static void xen_blkif_deferred_free(struct work_struct *work)
48{
49	struct xen_blkif *blkif;
50
51	blkif = container_of(work, struct xen_blkif, free_work);
52	xen_blkif_free(blkif);
53}
54
55static int blkback_name(struct xen_blkif *blkif, char *buf)
56{
57	char *devpath, *devname;
58	struct xenbus_device *dev = blkif->be->dev;
59
60	devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
61	if (IS_ERR(devpath))
62		return PTR_ERR(devpath);
63
64	devname = strstr(devpath, "/dev/");
65	if (devname != NULL)
66		devname += strlen("/dev/");
67	else
68		devname  = devpath;
69
70	snprintf(buf, TASK_COMM_LEN, "%d.%s", blkif->domid, devname);
71	kfree(devpath);
72
73	return 0;
74}
75
76static void xen_update_blkif_status(struct xen_blkif *blkif)
77{
78	int err;
79	char name[TASK_COMM_LEN];
80	struct xen_blkif_ring *ring;
81	int i;
82
83	/* Not ready to connect? */
84	if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev_file)
85		return;
86
87	/* Already connected? */
88	if (blkif->be->dev->state == XenbusStateConnected)
89		return;
90
91	/* Attempt to connect: exit if we fail to. */
92	connect(blkif->be);
93	if (blkif->be->dev->state != XenbusStateConnected)
94		return;
95
96	err = blkback_name(blkif, name);
97	if (err) {
98		xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
99		return;
100	}
101
102	err = sync_blockdev(file_bdev(blkif->vbd.bdev_file));
103	if (err) {
104		xenbus_dev_error(blkif->be->dev, err, "block flush");
105		return;
106	}
107	invalidate_inode_pages2(blkif->vbd.bdev_file->f_mapping);
108
109	for (i = 0; i < blkif->nr_rings; i++) {
110		ring = &blkif->rings[i];
111		ring->xenblkd = kthread_run(xen_blkif_schedule, ring, "%s-%d", name, i);
112		if (IS_ERR(ring->xenblkd)) {
113			err = PTR_ERR(ring->xenblkd);
114			ring->xenblkd = NULL;
115			xenbus_dev_fatal(blkif->be->dev, err,
116					"start %s-%d xenblkd", name, i);
117			goto out;
118		}
119	}
120	return;
121
122out:
123	while (--i >= 0) {
124		ring = &blkif->rings[i];
125		kthread_stop(ring->xenblkd);
126	}
127	return;
128}
129
130static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
131{
132	unsigned int r;
133
134	blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring),
135			       GFP_KERNEL);
136	if (!blkif->rings)
137		return -ENOMEM;
138
139	for (r = 0; r < blkif->nr_rings; r++) {
140		struct xen_blkif_ring *ring = &blkif->rings[r];
141
142		spin_lock_init(&ring->blk_ring_lock);
143		init_waitqueue_head(&ring->wq);
144		INIT_LIST_HEAD(&ring->pending_free);
145		INIT_LIST_HEAD(&ring->persistent_purge_list);
146		INIT_WORK(&ring->persistent_purge_work, xen_blkbk_unmap_purged_grants);
147		gnttab_page_cache_init(&ring->free_pages);
148
149		spin_lock_init(&ring->pending_free_lock);
150		init_waitqueue_head(&ring->pending_free_wq);
151		init_waitqueue_head(&ring->shutdown_wq);
152		ring->blkif = blkif;
153		ring->st_print = jiffies;
154		ring->active = true;
155	}
156
157	return 0;
158}
159
160/* Enable the persistent grants feature. */
161static bool feature_persistent = true;
162module_param(feature_persistent, bool, 0644);
163MODULE_PARM_DESC(feature_persistent, "Enables the persistent grants feature");
164
165static struct xen_blkif *xen_blkif_alloc(domid_t domid)
166{
167	struct xen_blkif *blkif;
168
169	BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
170
171	blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL);
172	if (!blkif)
173		return ERR_PTR(-ENOMEM);
174
175	blkif->domid = domid;
176	atomic_set(&blkif->refcnt, 1);
177	init_completion(&blkif->drain_complete);
178
179	/*
180	 * Because freeing back to the cache may be deferred, it is not
181	 * safe to unload the module (and hence destroy the cache) until
182	 * this has completed. To prevent premature unloading, take an
183	 * extra module reference here and release only when the object
184	 * has been freed back to the cache.
185	 */
186	__module_get(THIS_MODULE);
187	INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
188
189	return blkif;
190}
191
192static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
193			 unsigned int nr_grefs, unsigned int evtchn)
194{
195	int err;
196	struct xen_blkif *blkif = ring->blkif;
197	const struct blkif_common_sring *sring_common;
198	RING_IDX rsp_prod, req_prod;
199	unsigned int size;
200
201	/* Already connected through? */
202	if (ring->irq)
203		return 0;
204
205	err = xenbus_map_ring_valloc(blkif->be->dev, gref, nr_grefs,
206				     &ring->blk_ring);
207	if (err < 0)
208		return err;
209
210	sring_common = (struct blkif_common_sring *)ring->blk_ring;
211	rsp_prod = READ_ONCE(sring_common->rsp_prod);
212	req_prod = READ_ONCE(sring_common->req_prod);
213
214	switch (blkif->blk_protocol) {
215	case BLKIF_PROTOCOL_NATIVE:
216	{
217		struct blkif_sring *sring_native =
218			(struct blkif_sring *)ring->blk_ring;
219
220		BACK_RING_ATTACH(&ring->blk_rings.native, sring_native,
221				 rsp_prod, XEN_PAGE_SIZE * nr_grefs);
222		size = __RING_SIZE(sring_native, XEN_PAGE_SIZE * nr_grefs);
223		break;
224	}
225	case BLKIF_PROTOCOL_X86_32:
226	{
227		struct blkif_x86_32_sring *sring_x86_32 =
228			(struct blkif_x86_32_sring *)ring->blk_ring;
229
230		BACK_RING_ATTACH(&ring->blk_rings.x86_32, sring_x86_32,
231				 rsp_prod, XEN_PAGE_SIZE * nr_grefs);
232		size = __RING_SIZE(sring_x86_32, XEN_PAGE_SIZE * nr_grefs);
233		break;
234	}
235	case BLKIF_PROTOCOL_X86_64:
236	{
237		struct blkif_x86_64_sring *sring_x86_64 =
238			(struct blkif_x86_64_sring *)ring->blk_ring;
239
240		BACK_RING_ATTACH(&ring->blk_rings.x86_64, sring_x86_64,
241				 rsp_prod, XEN_PAGE_SIZE * nr_grefs);
242		size = __RING_SIZE(sring_x86_64, XEN_PAGE_SIZE * nr_grefs);
243		break;
244	}
245	default:
246		BUG();
247	}
248
249	err = -EIO;
250	if (req_prod - rsp_prod > size)
251		goto fail;
252
253	err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->be->dev,
254			evtchn, xen_blkif_be_int, 0, "blkif-backend", ring);
255	if (err < 0)
256		goto fail;
257	ring->irq = err;
258
259	return 0;
260
261fail:
262	xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
263	ring->blk_rings.common.sring = NULL;
264	return err;
265}
266
267static int xen_blkif_disconnect(struct xen_blkif *blkif)
268{
269	struct pending_req *req, *n;
270	unsigned int j, r;
271	bool busy = false;
272
273	for (r = 0; r < blkif->nr_rings; r++) {
274		struct xen_blkif_ring *ring = &blkif->rings[r];
275		unsigned int i = 0;
276
277		if (!ring->active)
278			continue;
279
280		if (ring->xenblkd) {
281			kthread_stop(ring->xenblkd);
282			ring->xenblkd = NULL;
283			wake_up(&ring->shutdown_wq);
284		}
285
286		/* The above kthread_stop() guarantees that at this point we
287		 * don't have any discard_io or other_io requests. So, checking
288		 * for inflight IO is enough.
289		 */
290		if (atomic_read(&ring->inflight) > 0) {
291			busy = true;
292			continue;
293		}
294
295		if (ring->irq) {
296			unbind_from_irqhandler(ring->irq, ring);
297			ring->irq = 0;
298		}
299
300		if (ring->blk_rings.common.sring) {
301			xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
302			ring->blk_rings.common.sring = NULL;
303		}
304
305		/* Remove all persistent grants and the cache of ballooned pages. */
306		xen_blkbk_free_caches(ring);
307
308		/* Check that there is no request in use */
309		list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
310			list_del(&req->free_list);
311
312			for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
313				kfree(req->segments[j]);
314
315			for (j = 0; j < MAX_INDIRECT_PAGES; j++)
316				kfree(req->indirect_pages[j]);
317
318			kfree(req);
319			i++;
320		}
321
322		BUG_ON(atomic_read(&ring->persistent_gnt_in_use) != 0);
323		BUG_ON(!list_empty(&ring->persistent_purge_list));
324		BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
325		BUG_ON(ring->free_pages.num_pages != 0);
326		BUG_ON(ring->persistent_gnt_c != 0);
327		WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
328		ring->active = false;
329	}
330	if (busy)
331		return -EBUSY;
332
333	blkif->nr_ring_pages = 0;
334	/*
335	 * blkif->rings was allocated in connect_ring, so we should free it in
336	 * here.
337	 */
338	kfree(blkif->rings);
339	blkif->rings = NULL;
340	blkif->nr_rings = 0;
341
342	return 0;
343}
344
345static void xen_blkif_free(struct xen_blkif *blkif)
346{
347	WARN_ON(xen_blkif_disconnect(blkif));
348	xen_vbd_free(&blkif->vbd);
349	kfree(blkif->be->mode);
350	kfree(blkif->be);
351
352	/* Make sure everything is drained before shutting down */
353	kmem_cache_free(xen_blkif_cachep, blkif);
354	module_put(THIS_MODULE);
355}
356
357int __init xen_blkif_interface_init(void)
358{
359	xen_blkif_cachep = kmem_cache_create("blkif_cache",
360					     sizeof(struct xen_blkif),
361					     0, 0, NULL);
362	if (!xen_blkif_cachep)
363		return -ENOMEM;
364
365	return 0;
366}
367
368void xen_blkif_interface_fini(void)
369{
370	kmem_cache_destroy(xen_blkif_cachep);
371	xen_blkif_cachep = NULL;
372}
373
374/*
375 *  sysfs interface for VBD I/O requests
376 */
377
378#define VBD_SHOW_ALLRING(name, format)					\
379	static ssize_t show_##name(struct device *_dev,			\
380				   struct device_attribute *attr,	\
381				   char *buf)				\
382	{								\
383		struct xenbus_device *dev = to_xenbus_device(_dev);	\
384		struct backend_info *be = dev_get_drvdata(&dev->dev);	\
385		struct xen_blkif *blkif = be->blkif;			\
386		unsigned int i;						\
387		unsigned long long result = 0;				\
388									\
389		if (!blkif->rings)				\
390			goto out;					\
391									\
392		for (i = 0; i < blkif->nr_rings; i++) {		\
393			struct xen_blkif_ring *ring = &blkif->rings[i];	\
394									\
395			result += ring->st_##name;			\
396		}							\
397									\
398out:									\
399		return sprintf(buf, format, result);			\
400	}								\
401	static DEVICE_ATTR(name, 0444, show_##name, NULL)
402
403VBD_SHOW_ALLRING(oo_req,  "%llu\n");
404VBD_SHOW_ALLRING(rd_req,  "%llu\n");
405VBD_SHOW_ALLRING(wr_req,  "%llu\n");
406VBD_SHOW_ALLRING(f_req,  "%llu\n");
407VBD_SHOW_ALLRING(ds_req,  "%llu\n");
408VBD_SHOW_ALLRING(rd_sect, "%llu\n");
409VBD_SHOW_ALLRING(wr_sect, "%llu\n");
410
411static struct attribute *xen_vbdstat_attrs[] = {
412	&dev_attr_oo_req.attr,
413	&dev_attr_rd_req.attr,
414	&dev_attr_wr_req.attr,
415	&dev_attr_f_req.attr,
416	&dev_attr_ds_req.attr,
417	&dev_attr_rd_sect.attr,
418	&dev_attr_wr_sect.attr,
419	NULL
420};
421
422static const struct attribute_group xen_vbdstat_group = {
423	.name = "statistics",
424	.attrs = xen_vbdstat_attrs,
425};
426
427#define VBD_SHOW(name, format, args...)					\
428	static ssize_t show_##name(struct device *_dev,			\
429				   struct device_attribute *attr,	\
430				   char *buf)				\
431	{								\
432		struct xenbus_device *dev = to_xenbus_device(_dev);	\
433		struct backend_info *be = dev_get_drvdata(&dev->dev);	\
434									\
435		return sprintf(buf, format, ##args);			\
436	}								\
437	static DEVICE_ATTR(name, 0444, show_##name, NULL)
438
439VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
440VBD_SHOW(mode, "%s\n", be->mode);
441
442static int xenvbd_sysfs_addif(struct xenbus_device *dev)
443{
444	int error;
445
446	error = device_create_file(&dev->dev, &dev_attr_physical_device);
447	if (error)
448		goto fail1;
449
450	error = device_create_file(&dev->dev, &dev_attr_mode);
451	if (error)
452		goto fail2;
453
454	error = sysfs_create_group(&dev->dev.kobj, &xen_vbdstat_group);
455	if (error)
456		goto fail3;
457
458	return 0;
459
460fail3:	sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
461fail2:	device_remove_file(&dev->dev, &dev_attr_mode);
462fail1:	device_remove_file(&dev->dev, &dev_attr_physical_device);
463	return error;
464}
465
466static void xenvbd_sysfs_delif(struct xenbus_device *dev)
467{
468	sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
469	device_remove_file(&dev->dev, &dev_attr_mode);
470	device_remove_file(&dev->dev, &dev_attr_physical_device);
471}
472
473static void xen_vbd_free(struct xen_vbd *vbd)
474{
475	if (vbd->bdev_file)
476		fput(vbd->bdev_file);
477	vbd->bdev_file = NULL;
478}
479
480static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
481			  unsigned major, unsigned minor, int readonly,
482			  int cdrom)
483{
484	struct xen_vbd *vbd;
485	struct file *bdev_file;
486
487	vbd = &blkif->vbd;
488	vbd->handle   = handle;
489	vbd->readonly = readonly;
490	vbd->type     = 0;
491
492	vbd->pdevice  = MKDEV(major, minor);
493
494	bdev_file = bdev_file_open_by_dev(vbd->pdevice, vbd->readonly ?
495				 BLK_OPEN_READ : BLK_OPEN_WRITE, NULL, NULL);
496
497	if (IS_ERR(bdev_file)) {
498		pr_warn("xen_vbd_create: device %08x could not be opened\n",
499			vbd->pdevice);
500		return -ENOENT;
501	}
502
503	vbd->bdev_file = bdev_file;
504	if (file_bdev(vbd->bdev_file)->bd_disk == NULL) {
505		pr_warn("xen_vbd_create: device %08x doesn't exist\n",
506			vbd->pdevice);
507		xen_vbd_free(vbd);
508		return -ENOENT;
509	}
510	vbd->size = vbd_sz(vbd);
511
512	if (cdrom || disk_to_cdi(file_bdev(vbd->bdev_file)->bd_disk))
513		vbd->type |= VDISK_CDROM;
514	if (file_bdev(vbd->bdev_file)->bd_disk->flags & GENHD_FL_REMOVABLE)
515		vbd->type |= VDISK_REMOVABLE;
516
517	if (bdev_write_cache(file_bdev(bdev_file)))
518		vbd->flush_support = true;
519	if (bdev_max_secure_erase_sectors(file_bdev(bdev_file)))
520		vbd->discard_secure = true;
521
522	pr_debug("Successful creation of handle=%04x (dom=%u)\n",
523		handle, blkif->domid);
524	return 0;
525}
526
527static void xen_blkbk_remove(struct xenbus_device *dev)
528{
529	struct backend_info *be = dev_get_drvdata(&dev->dev);
530
531	pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
532
533	if (be->major || be->minor)
534		xenvbd_sysfs_delif(dev);
535
536	if (be->backend_watch.node) {
537		unregister_xenbus_watch(&be->backend_watch);
538		kfree(be->backend_watch.node);
539		be->backend_watch.node = NULL;
540	}
541
542	dev_set_drvdata(&dev->dev, NULL);
543
544	if (be->blkif) {
545		xen_blkif_disconnect(be->blkif);
546
547		/* Put the reference we set in xen_blkif_alloc(). */
548		xen_blkif_put(be->blkif);
549	}
550}
551
552int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
553			      struct backend_info *be, int state)
554{
555	struct xenbus_device *dev = be->dev;
556	int err;
557
558	err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
559			    "%d", state);
560	if (err)
561		dev_warn(&dev->dev, "writing feature-flush-cache (%d)", err);
562
563	return err;
564}
565
566static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
567{
568	struct xenbus_device *dev = be->dev;
569	struct xen_blkif *blkif = be->blkif;
570	int err;
571	int state = 0;
572	struct block_device *bdev = file_bdev(be->blkif->vbd.bdev_file);
573
574	if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1))
575		return;
576
577	if (bdev_max_discard_sectors(bdev)) {
578		err = xenbus_printf(xbt, dev->nodename,
579			"discard-granularity", "%u",
580			bdev_discard_granularity(bdev));
581		if (err) {
582			dev_warn(&dev->dev, "writing discard-granularity (%d)", err);
583			return;
584		}
585		err = xenbus_printf(xbt, dev->nodename,
586			"discard-alignment", "%u",
587			bdev_discard_alignment(bdev));
588		if (err) {
589			dev_warn(&dev->dev, "writing discard-alignment (%d)", err);
590			return;
591		}
592		state = 1;
593		/* Optional. */
594		err = xenbus_printf(xbt, dev->nodename,
595				    "discard-secure", "%d",
596				    blkif->vbd.discard_secure);
597		if (err) {
598			dev_warn(&dev->dev, "writing discard-secure (%d)", err);
599			return;
600		}
601	}
602	err = xenbus_printf(xbt, dev->nodename, "feature-discard",
603			    "%d", state);
604	if (err)
605		dev_warn(&dev->dev, "writing feature-discard (%d)", err);
606}
607
608int xen_blkbk_barrier(struct xenbus_transaction xbt,
609		      struct backend_info *be, int state)
610{
611	struct xenbus_device *dev = be->dev;
612	int err;
613
614	err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
615			    "%d", state);
616	if (err)
617		dev_warn(&dev->dev, "writing feature-barrier (%d)", err);
618
619	return err;
620}
621
622/*
623 * Entry point to this code when a new device is created.  Allocate the basic
624 * structures, and watch the store waiting for the hotplug scripts to tell us
625 * the device's physical major and minor numbers.  Switch to InitWait.
626 */
627static int xen_blkbk_probe(struct xenbus_device *dev,
628			   const struct xenbus_device_id *id)
629{
630	int err;
631	struct backend_info *be = kzalloc(sizeof(struct backend_info),
632					  GFP_KERNEL);
633
634	/* match the pr_debug in xen_blkbk_remove */
635	pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
636
637	if (!be) {
638		xenbus_dev_fatal(dev, -ENOMEM,
639				 "allocating backend structure");
640		return -ENOMEM;
641	}
642	be->dev = dev;
643	dev_set_drvdata(&dev->dev, be);
644
645	be->blkif = xen_blkif_alloc(dev->otherend_id);
646	if (IS_ERR(be->blkif)) {
647		err = PTR_ERR(be->blkif);
648		be->blkif = NULL;
649		xenbus_dev_fatal(dev, err, "creating block interface");
650		goto fail;
651	}
652
653	err = xenbus_printf(XBT_NIL, dev->nodename,
654			    "feature-max-indirect-segments", "%u",
655			    MAX_INDIRECT_SEGMENTS);
656	if (err)
657		dev_warn(&dev->dev,
658			 "writing %s/feature-max-indirect-segments (%d)",
659			 dev->nodename, err);
660
661	/* Multi-queue: advertise how many queues are supported by us.*/
662	err = xenbus_printf(XBT_NIL, dev->nodename,
663			    "multi-queue-max-queues", "%u", xenblk_max_queues);
664	if (err)
665		pr_warn("Error writing multi-queue-max-queues\n");
666
667	/* setup back pointer */
668	be->blkif->be = be;
669
670	err = xenbus_watch_pathfmt(dev, &be->backend_watch, NULL,
671				   backend_changed,
672				   "%s/%s", dev->nodename, "physical-device");
673	if (err)
674		goto fail;
675
676	err = xenbus_printf(XBT_NIL, dev->nodename, "max-ring-page-order", "%u",
677			    xen_blkif_max_ring_order);
678	if (err)
679		pr_warn("%s write out 'max-ring-page-order' failed\n", __func__);
680
681	err = xenbus_switch_state(dev, XenbusStateInitWait);
682	if (err)
683		goto fail;
684
685	return 0;
686
687fail:
688	pr_warn("%s failed\n", __func__);
689	xen_blkbk_remove(dev);
690	return err;
691}
692
693/*
694 * Callback received when the hotplug scripts have placed the physical-device
695 * node.  Read it and the mode node, and create a vbd.  If the frontend is
696 * ready, connect.
697 */
698static void backend_changed(struct xenbus_watch *watch,
699			    const char *path, const char *token)
700{
701	int err;
702	unsigned major;
703	unsigned minor;
704	struct backend_info *be
705		= container_of(watch, struct backend_info, backend_watch);
706	struct xenbus_device *dev = be->dev;
707	int cdrom = 0;
708	unsigned long handle;
709	char *device_type;
710
711	pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
712
713	err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
714			   &major, &minor);
715	if (XENBUS_EXIST_ERR(err)) {
716		/*
717		 * Since this watch will fire once immediately after it is
718		 * registered, we expect this.  Ignore it, and wait for the
719		 * hotplug scripts.
720		 */
721		return;
722	}
723	if (err != 2) {
724		xenbus_dev_fatal(dev, err, "reading physical-device");
725		return;
726	}
727
728	if (be->major | be->minor) {
729		if (be->major != major || be->minor != minor)
730			pr_warn("changing physical device (from %x:%x to %x:%x) not supported.\n",
731				be->major, be->minor, major, minor);
732		return;
733	}
734
735	be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL);
736	if (IS_ERR(be->mode)) {
737		err = PTR_ERR(be->mode);
738		be->mode = NULL;
739		xenbus_dev_fatal(dev, err, "reading mode");
740		return;
741	}
742
743	device_type = xenbus_read(XBT_NIL, dev->otherend, "device-type", NULL);
744	if (!IS_ERR(device_type)) {
745		cdrom = strcmp(device_type, "cdrom") == 0;
746		kfree(device_type);
747	}
748
749	/* Front end dir is a number, which is used as the handle. */
750	err = kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
751	if (err) {
752		kfree(be->mode);
753		be->mode = NULL;
754		return;
755	}
756
757	be->major = major;
758	be->minor = minor;
759
760	err = xen_vbd_create(be->blkif, handle, major, minor,
761			     !strchr(be->mode, 'w'), cdrom);
762
763	if (err)
764		xenbus_dev_fatal(dev, err, "creating vbd structure");
765	else {
766		err = xenvbd_sysfs_addif(dev);
767		if (err) {
768			xen_vbd_free(&be->blkif->vbd);
769			xenbus_dev_fatal(dev, err, "creating sysfs entries");
770		}
771	}
772
773	if (err) {
774		kfree(be->mode);
775		be->mode = NULL;
776		be->major = 0;
777		be->minor = 0;
778	} else {
779		/* We're potentially connected now */
780		xen_update_blkif_status(be->blkif);
781	}
782}
783
784/*
785 * Callback received when the frontend's state changes.
786 */
787static void frontend_changed(struct xenbus_device *dev,
788			     enum xenbus_state frontend_state)
789{
790	struct backend_info *be = dev_get_drvdata(&dev->dev);
791	int err;
792
793	pr_debug("%s %p %s\n", __func__, dev, xenbus_strstate(frontend_state));
794
795	switch (frontend_state) {
796	case XenbusStateInitialising:
797		if (dev->state == XenbusStateClosed) {
798			pr_info("%s: prepare for reconnect\n", dev->nodename);
799			xenbus_switch_state(dev, XenbusStateInitWait);
800		}
801		break;
802
803	case XenbusStateInitialised:
804	case XenbusStateConnected:
805		/*
806		 * Ensure we connect even when two watches fire in
807		 * close succession and we miss the intermediate value
808		 * of frontend_state.
809		 */
810		if (dev->state == XenbusStateConnected)
811			break;
812
813		/*
814		 * Enforce precondition before potential leak point.
815		 * xen_blkif_disconnect() is idempotent.
816		 */
817		err = xen_blkif_disconnect(be->blkif);
818		if (err) {
819			xenbus_dev_fatal(dev, err, "pending I/O");
820			break;
821		}
822
823		err = connect_ring(be);
824		if (err) {
825			/*
826			 * Clean up so that memory resources can be used by
827			 * other devices. connect_ring reported already error.
828			 */
829			xen_blkif_disconnect(be->blkif);
830			break;
831		}
832		xen_update_blkif_status(be->blkif);
833		break;
834
835	case XenbusStateClosing:
836		xenbus_switch_state(dev, XenbusStateClosing);
837		break;
838
839	case XenbusStateClosed:
840		xen_blkif_disconnect(be->blkif);
841		xenbus_switch_state(dev, XenbusStateClosed);
842		if (xenbus_dev_is_online(dev))
843			break;
844		fallthrough;
845		/* if not online */
846	case XenbusStateUnknown:
847		/* implies xen_blkif_disconnect() via xen_blkbk_remove() */
848		device_unregister(&dev->dev);
849		break;
850
851	default:
852		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
853				 frontend_state);
854		break;
855	}
856}
857
858/* Once a memory pressure is detected, squeeze free page pools for a while. */
859static unsigned int buffer_squeeze_duration_ms = 10;
860module_param_named(buffer_squeeze_duration_ms,
861		buffer_squeeze_duration_ms, int, 0644);
862MODULE_PARM_DESC(buffer_squeeze_duration_ms,
863"Duration in ms to squeeze pages buffer when a memory pressure is detected");
864
865/*
866 * Callback received when the memory pressure is detected.
867 */
868static void reclaim_memory(struct xenbus_device *dev)
869{
870	struct backend_info *be = dev_get_drvdata(&dev->dev);
871
872	if (!be)
873		return;
874	be->blkif->buffer_squeeze_end = jiffies +
875		msecs_to_jiffies(buffer_squeeze_duration_ms);
876}
877
878/* ** Connection ** */
879
880/*
881 * Write the physical details regarding the block device to the store, and
882 * switch to Connected state.
883 */
884static void connect(struct backend_info *be)
885{
886	struct xenbus_transaction xbt;
887	int err;
888	struct xenbus_device *dev = be->dev;
889
890	pr_debug("%s %s\n", __func__, dev->otherend);
891
892	/* Supply the information about the device the frontend needs */
893again:
894	err = xenbus_transaction_start(&xbt);
895	if (err) {
896		xenbus_dev_fatal(dev, err, "starting transaction");
897		return;
898	}
899
900	/* If we can't advertise it is OK. */
901	xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
902
903	xen_blkbk_discard(xbt, be);
904
905	xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
906
907	err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
908			be->blkif->vbd.feature_gnt_persistent_parm);
909	if (err) {
910		xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
911				 dev->nodename);
912		goto abort;
913	}
914
915	err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
916			    (unsigned long long)vbd_sz(&be->blkif->vbd));
917	if (err) {
918		xenbus_dev_fatal(dev, err, "writing %s/sectors",
919				 dev->nodename);
920		goto abort;
921	}
922
923	/* FIXME: use a typename instead */
924	err = xenbus_printf(xbt, dev->nodename, "info", "%u",
925			    be->blkif->vbd.type |
926			    (be->blkif->vbd.readonly ? VDISK_READONLY : 0));
927	if (err) {
928		xenbus_dev_fatal(dev, err, "writing %s/info",
929				 dev->nodename);
930		goto abort;
931	}
932	err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
933			    (unsigned long)bdev_logical_block_size(
934					file_bdev(be->blkif->vbd.bdev_file)));
935	if (err) {
936		xenbus_dev_fatal(dev, err, "writing %s/sector-size",
937				 dev->nodename);
938		goto abort;
939	}
940	err = xenbus_printf(xbt, dev->nodename, "physical-sector-size", "%u",
941			    bdev_physical_block_size(
942					file_bdev(be->blkif->vbd.bdev_file)));
943	if (err)
944		xenbus_dev_error(dev, err, "writing %s/physical-sector-size",
945				 dev->nodename);
946
947	err = xenbus_transaction_end(xbt, 0);
948	if (err == -EAGAIN)
949		goto again;
950	if (err)
951		xenbus_dev_fatal(dev, err, "ending transaction");
952
953	err = xenbus_switch_state(dev, XenbusStateConnected);
954	if (err)
955		xenbus_dev_fatal(dev, err, "%s: switching to Connected state",
956				 dev->nodename);
957
958	return;
959 abort:
960	xenbus_transaction_end(xbt, 1);
961}
962
963/*
964 * Each ring may have multi pages, depends on "ring-page-order".
965 */
966static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
967{
968	unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
969	struct pending_req *req, *n;
970	int err, i, j;
971	struct xen_blkif *blkif = ring->blkif;
972	struct xenbus_device *dev = blkif->be->dev;
973	unsigned int nr_grefs, evtchn;
974
975	err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
976			  &evtchn);
977	if (err != 1) {
978		err = -EINVAL;
979		xenbus_dev_fatal(dev, err, "reading %s/event-channel", dir);
980		return err;
981	}
982
983	nr_grefs = blkif->nr_ring_pages;
984
985	if (unlikely(!nr_grefs)) {
986		WARN_ON(true);
987		return -EINVAL;
988	}
989
990	for (i = 0; i < nr_grefs; i++) {
991		char ring_ref_name[RINGREF_NAME_LEN];
992
993		if (blkif->multi_ref)
994			snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
995		else {
996			WARN_ON(i != 0);
997			snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref");
998		}
999
1000		err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
1001				   "%u", &ring_ref[i]);
1002
1003		if (err != 1) {
1004			err = -EINVAL;
1005			xenbus_dev_fatal(dev, err, "reading %s/%s",
1006					 dir, ring_ref_name);
1007			return err;
1008		}
1009	}
1010
1011	err = -ENOMEM;
1012	for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
1013		req = kzalloc(sizeof(*req), GFP_KERNEL);
1014		if (!req)
1015			goto fail;
1016		list_add_tail(&req->free_list, &ring->pending_free);
1017		for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
1018			req->segments[j] = kzalloc(sizeof(*req->segments[0]), GFP_KERNEL);
1019			if (!req->segments[j])
1020				goto fail;
1021		}
1022		for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
1023			req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]),
1024							 GFP_KERNEL);
1025			if (!req->indirect_pages[j])
1026				goto fail;
1027		}
1028	}
1029
1030	/* Map the shared frame, irq etc. */
1031	err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
1032	if (err) {
1033		xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
1034		goto fail;
1035	}
1036
1037	return 0;
1038
1039fail:
1040	list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
1041		list_del(&req->free_list);
1042		for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
1043			if (!req->segments[j])
1044				break;
1045			kfree(req->segments[j]);
1046		}
1047		for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
1048			if (!req->indirect_pages[j])
1049				break;
1050			kfree(req->indirect_pages[j]);
1051		}
1052		kfree(req);
1053	}
1054	return err;
1055}
1056
1057static int connect_ring(struct backend_info *be)
1058{
1059	struct xenbus_device *dev = be->dev;
1060	struct xen_blkif *blkif = be->blkif;
1061	char protocol[64] = "";
1062	int err, i;
1063	char *xspath;
1064	size_t xspathsize;
1065	const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
1066	unsigned int requested_num_queues = 0;
1067	unsigned int ring_page_order;
1068
1069	pr_debug("%s %s\n", __func__, dev->otherend);
1070
1071	blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT;
1072	err = xenbus_scanf(XBT_NIL, dev->otherend, "protocol",
1073			   "%63s", protocol);
1074	if (err <= 0)
1075		strcpy(protocol, "unspecified, assuming default");
1076	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
1077		blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
1078	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
1079		blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
1080	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
1081		blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
1082	else {
1083		xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
1084		return -ENOSYS;
1085	}
1086
1087	blkif->vbd.feature_gnt_persistent_parm = feature_persistent;
1088	blkif->vbd.feature_gnt_persistent =
1089		blkif->vbd.feature_gnt_persistent_parm &&
1090		xenbus_read_unsigned(dev->otherend, "feature-persistent", 0);
1091
1092	blkif->vbd.overflow_max_grants = 0;
1093
1094	/*
1095	 * Read the number of hardware queues from frontend.
1096	 */
1097	requested_num_queues = xenbus_read_unsigned(dev->otherend,
1098						    "multi-queue-num-queues",
1099						    1);
1100	if (requested_num_queues > xenblk_max_queues
1101	    || requested_num_queues == 0) {
1102		/* Buggy or malicious guest. */
1103		xenbus_dev_fatal(dev, err,
1104				"guest requested %u queues, exceeding the maximum of %u.",
1105				requested_num_queues, xenblk_max_queues);
1106		return -ENOSYS;
1107	}
1108	blkif->nr_rings = requested_num_queues;
1109	if (xen_blkif_alloc_rings(blkif))
1110		return -ENOMEM;
1111
1112	pr_info("%s: using %d queues, protocol %d (%s) %s\n", dev->nodename,
1113		 blkif->nr_rings, blkif->blk_protocol, protocol,
1114		 blkif->vbd.feature_gnt_persistent ? "persistent grants" : "");
1115
1116	err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
1117			   &ring_page_order);
1118	if (err != 1) {
1119		blkif->nr_ring_pages = 1;
1120		blkif->multi_ref = false;
1121	} else if (ring_page_order <= xen_blkif_max_ring_order) {
1122		blkif->nr_ring_pages = 1 << ring_page_order;
1123		blkif->multi_ref = true;
1124	} else {
1125		err = -EINVAL;
1126		xenbus_dev_fatal(dev, err,
1127				 "requested ring page order %d exceed max:%d",
1128				 ring_page_order,
1129				 xen_blkif_max_ring_order);
1130		return err;
1131	}
1132
1133	if (blkif->nr_rings == 1)
1134		return read_per_ring_refs(&blkif->rings[0], dev->otherend);
1135	else {
1136		xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
1137		xspath = kmalloc(xspathsize, GFP_KERNEL);
1138		if (!xspath) {
1139			xenbus_dev_fatal(dev, -ENOMEM, "reading ring references");
1140			return -ENOMEM;
1141		}
1142
1143		for (i = 0; i < blkif->nr_rings; i++) {
1144			memset(xspath, 0, xspathsize);
1145			snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, i);
1146			err = read_per_ring_refs(&blkif->rings[i], xspath);
1147			if (err) {
1148				kfree(xspath);
1149				return err;
1150			}
1151		}
1152		kfree(xspath);
1153	}
1154	return 0;
1155}
1156
1157static const struct xenbus_device_id xen_blkbk_ids[] = {
1158	{ "vbd" },
1159	{ "" }
1160};
1161
1162static struct xenbus_driver xen_blkbk_driver = {
1163	.ids  = xen_blkbk_ids,
1164	.probe = xen_blkbk_probe,
1165	.remove = xen_blkbk_remove,
1166	.otherend_changed = frontend_changed,
1167	.allow_rebind = true,
1168	.reclaim_memory = reclaim_memory,
1169};
1170
1171int xen_blkif_xenbus_init(void)
1172{
1173	return xenbus_register_backend(&xen_blkbk_driver);
1174}
1175
1176void xen_blkif_xenbus_fini(void)
1177{
1178	xenbus_unregister_driver(&xen_blkbk_driver);
1179}
1180