1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2014 Ezequiel Garcia
4 * Copyright (c) 2011 Free Electrons
5 *
6 * Driver parameter handling strongly based on drivers/mtd/ubi/build.c
7 *   Copyright (c) International Business Machines Corp., 2006
8 *   Copyright (c) Nokia Corporation, 2007
9 *   Authors: Artem Bityutskiy, Frank Haverkamp
10 */
11
12/*
13 * Read-only block devices on top of UBI volumes
14 *
15 * A simple implementation to allow a block device to be layered on top of a
16 * UBI volume. The implementation is provided by creating a static 1-to-1
17 * mapping between the block device and the UBI volume.
18 *
19 * The addressed byte is obtained from the addressed block sector, which is
20 * mapped linearly into the corresponding LEB:
21 *
22 *   LEB number = addressed byte / LEB size
23 *
24 * This feature is compiled in the UBI core, and adds a 'block' parameter
25 * to allow early creation of block devices on top of UBI volumes. Runtime
26 * block creation/removal for UBI volumes is provided through two UBI ioctls:
27 * UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK.
28 */
29
30#include <linux/module.h>
31#include <linux/init.h>
32#include <linux/err.h>
33#include <linux/kernel.h>
34#include <linux/list.h>
35#include <linux/mutex.h>
36#include <linux/slab.h>
37#include <linux/mtd/ubi.h>
38#include <linux/blkdev.h>
39#include <linux/blk-mq.h>
40#include <linux/hdreg.h>
41#include <linux/scatterlist.h>
42#include <linux/idr.h>
43#include <asm/div64.h>
44
45#include "ubi-media.h"
46#include "ubi.h"
47
48/* Maximum number of supported devices */
49#define UBIBLOCK_MAX_DEVICES 32
50
51/* Maximum length of the 'block=' parameter */
52#define UBIBLOCK_PARAM_LEN 63
53
54/* Maximum number of comma-separated items in the 'block=' parameter */
55#define UBIBLOCK_PARAM_COUNT 2
56
57struct ubiblock_param {
58	int ubi_num;
59	int vol_id;
60	char name[UBIBLOCK_PARAM_LEN+1];
61};
62
63struct ubiblock_pdu {
64	struct ubi_sgl usgl;
65};
66
67/* Numbers of elements set in the @ubiblock_param array */
68static int ubiblock_devs;
69
70/* MTD devices specification parameters */
71static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES];
72
73struct ubiblock {
74	struct ubi_volume_desc *desc;
75	int ubi_num;
76	int vol_id;
77	int refcnt;
78	int leb_size;
79
80	struct gendisk *gd;
81	struct request_queue *rq;
82
83	struct mutex dev_mutex;
84	struct list_head list;
85	struct blk_mq_tag_set tag_set;
86};
87
88/* Linked list of all ubiblock instances */
89static LIST_HEAD(ubiblock_devices);
90static DEFINE_IDR(ubiblock_minor_idr);
91/* Protects ubiblock_devices and ubiblock_minor_idr */
92static DEFINE_MUTEX(devices_mutex);
93static int ubiblock_major;
94
95static int __init ubiblock_set_param(const char *val,
96				     const struct kernel_param *kp)
97{
98	int i, ret;
99	size_t len;
100	struct ubiblock_param *param;
101	char buf[UBIBLOCK_PARAM_LEN];
102	char *pbuf = &buf[0];
103	char *tokens[UBIBLOCK_PARAM_COUNT];
104
105	if (!val)
106		return -EINVAL;
107
108	len = strnlen(val, UBIBLOCK_PARAM_LEN);
109	if (len == 0) {
110		pr_warn("UBI: block: empty 'block=' parameter - ignored\n");
111		return 0;
112	}
113
114	if (len == UBIBLOCK_PARAM_LEN) {
115		pr_err("UBI: block: parameter \"%s\" is too long, max. is %d\n",
116		       val, UBIBLOCK_PARAM_LEN);
117		return -EINVAL;
118	}
119
120	strcpy(buf, val);
121
122	/* Get rid of the final newline */
123	if (buf[len - 1] == '\n')
124		buf[len - 1] = '\0';
125
126	for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
127		tokens[i] = strsep(&pbuf, ",");
128
129	param = &ubiblock_param[ubiblock_devs];
130	if (tokens[1]) {
131		/* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */
132		ret = kstrtoint(tokens[0], 10, &param->ubi_num);
133		if (ret < 0)
134			return -EINVAL;
135
136		/* Second param can be a number or a name */
137		ret = kstrtoint(tokens[1], 10, &param->vol_id);
138		if (ret < 0) {
139			param->vol_id = -1;
140			strcpy(param->name, tokens[1]);
141		}
142
143	} else {
144		/* One parameter: must be device path */
145		strcpy(param->name, tokens[0]);
146		param->ubi_num = -1;
147		param->vol_id = -1;
148	}
149
150	ubiblock_devs++;
151
152	return 0;
153}
154
155static const struct kernel_param_ops ubiblock_param_ops = {
156	.set    = ubiblock_set_param,
157};
158module_param_cb(block, &ubiblock_param_ops, NULL, 0);
159MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
160			"Multiple \"block\" parameters may be specified.\n"
161			"UBI volumes may be specified by their number, name, or path to the device node.\n"
162			"Examples\n"
163			"Using the UBI volume path:\n"
164			"ubi.block=/dev/ubi0_0\n"
165			"Using the UBI device, and the volume name:\n"
166			"ubi.block=0,rootfs\n"
167			"Using both UBI device number and UBI volume number:\n"
168			"ubi.block=0,0\n");
169
170static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
171{
172	struct ubiblock *dev;
173
174	list_for_each_entry(dev, &ubiblock_devices, list)
175		if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
176			return dev;
177	return NULL;
178}
179
180static blk_status_t ubiblock_read(struct request *req)
181{
182	struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
183	struct ubiblock *dev = req->q->queuedata;
184	u64 pos = blk_rq_pos(req) << 9;
185	int to_read = blk_rq_bytes(req);
186	int bytes_left = to_read;
187	/* Get LEB:offset address to read from */
188	int offset = do_div(pos, dev->leb_size);
189	int leb = pos;
190	struct req_iterator iter;
191	struct bio_vec bvec;
192	int ret;
193
194	blk_mq_start_request(req);
195
196	/*
197	 * It is safe to ignore the return value of blk_rq_map_sg() because
198	 * the number of sg entries is limited to UBI_MAX_SG_COUNT
199	 * and ubi_read_sg() will check that limit.
200	 */
201	ubi_sgl_init(&pdu->usgl);
202	blk_rq_map_sg(req->q, req, pdu->usgl.sg);
203
204	while (bytes_left) {
205		/*
206		 * We can only read one LEB at a time. Therefore if the read
207		 * length is larger than one LEB size, we split the operation.
208		 */
209		if (offset + to_read > dev->leb_size)
210			to_read = dev->leb_size - offset;
211
212		ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
213		if (ret < 0)
214			break;
215
216		bytes_left -= to_read;
217		to_read = bytes_left;
218		leb += 1;
219		offset = 0;
220	}
221
222	rq_for_each_segment(bvec, req, iter)
223		flush_dcache_page(bvec.bv_page);
224
225	blk_mq_end_request(req, errno_to_blk_status(ret));
226
227	return BLK_STS_OK;
228}
229
230static int ubiblock_open(struct gendisk *disk, blk_mode_t mode)
231{
232	struct ubiblock *dev = disk->private_data;
233	int ret;
234
235	mutex_lock(&dev->dev_mutex);
236	if (dev->refcnt > 0) {
237		/*
238		 * The volume is already open, just increase the reference
239		 * counter.
240		 */
241		goto out_done;
242	}
243
244	/*
245	 * We want users to be aware they should only mount us as read-only.
246	 * It's just a paranoid check, as write requests will get rejected
247	 * in any case.
248	 */
249	if (mode & BLK_OPEN_WRITE) {
250		ret = -EROFS;
251		goto out_unlock;
252	}
253	dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
254	if (IS_ERR(dev->desc)) {
255		dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d",
256			dev->ubi_num, dev->vol_id);
257		ret = PTR_ERR(dev->desc);
258		dev->desc = NULL;
259		goto out_unlock;
260	}
261
262out_done:
263	dev->refcnt++;
264	mutex_unlock(&dev->dev_mutex);
265	return 0;
266
267out_unlock:
268	mutex_unlock(&dev->dev_mutex);
269	return ret;
270}
271
272static void ubiblock_release(struct gendisk *gd)
273{
274	struct ubiblock *dev = gd->private_data;
275
276	mutex_lock(&dev->dev_mutex);
277	dev->refcnt--;
278	if (dev->refcnt == 0) {
279		ubi_close_volume(dev->desc);
280		dev->desc = NULL;
281	}
282	mutex_unlock(&dev->dev_mutex);
283}
284
285static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
286{
287	/* Some tools might require this information */
288	geo->heads = 1;
289	geo->cylinders = 1;
290	geo->sectors = get_capacity(bdev->bd_disk);
291	geo->start = 0;
292	return 0;
293}
294
295static const struct block_device_operations ubiblock_ops = {
296	.owner = THIS_MODULE,
297	.open = ubiblock_open,
298	.release = ubiblock_release,
299	.getgeo	= ubiblock_getgeo,
300};
301
302static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
303			     const struct blk_mq_queue_data *bd)
304{
305	switch (req_op(bd->rq)) {
306	case REQ_OP_READ:
307		return ubiblock_read(bd->rq);
308	default:
309		return BLK_STS_IOERR;
310	}
311}
312
313static int ubiblock_init_request(struct blk_mq_tag_set *set,
314		struct request *req, unsigned int hctx_idx,
315		unsigned int numa_node)
316{
317	struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
318
319	sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
320	return 0;
321}
322
323static const struct blk_mq_ops ubiblock_mq_ops = {
324	.queue_rq       = ubiblock_queue_rq,
325	.init_request	= ubiblock_init_request,
326};
327
328static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity)
329{
330	u64 size = vi->used_bytes >> 9;
331
332	if (vi->used_bytes % 512) {
333		if (vi->vol_type == UBI_DYNAMIC_VOLUME)
334			pr_warn("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n",
335				vi->used_bytes - (size << 9));
336		else
337			pr_info("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n",
338				vi->used_bytes - (size << 9));
339	}
340
341	if ((sector_t)size != size)
342		return -EFBIG;
343
344	*disk_capacity = size;
345
346	return 0;
347}
348
349int ubiblock_create(struct ubi_volume_info *vi)
350{
351	struct queue_limits lim = {
352		.max_segments		= UBI_MAX_SG_COUNT,
353	};
354	struct ubiblock *dev;
355	struct gendisk *gd;
356	u64 disk_capacity;
357	int ret;
358
359	ret = calc_disk_capacity(vi, &disk_capacity);
360	if (ret) {
361		return ret;
362	}
363
364	/* Check that the volume isn't already handled */
365	mutex_lock(&devices_mutex);
366	if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
367		ret = -EEXIST;
368		goto out_unlock;
369	}
370
371	dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
372	if (!dev) {
373		ret = -ENOMEM;
374		goto out_unlock;
375	}
376
377	mutex_init(&dev->dev_mutex);
378
379	dev->ubi_num = vi->ubi_num;
380	dev->vol_id = vi->vol_id;
381	dev->leb_size = vi->usable_leb_size;
382
383	dev->tag_set.ops = &ubiblock_mq_ops;
384	dev->tag_set.queue_depth = 64;
385	dev->tag_set.numa_node = NUMA_NO_NODE;
386	dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
387	dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
388	dev->tag_set.driver_data = dev;
389	dev->tag_set.nr_hw_queues = 1;
390
391	ret = blk_mq_alloc_tag_set(&dev->tag_set);
392	if (ret) {
393		dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
394		goto out_free_dev;
395	}
396
397
398	/* Initialize the gendisk of this ubiblock device */
399	gd = blk_mq_alloc_disk(&dev->tag_set, &lim, dev);
400	if (IS_ERR(gd)) {
401		ret = PTR_ERR(gd);
402		goto out_free_tags;
403	}
404
405	gd->fops = &ubiblock_ops;
406	gd->major = ubiblock_major;
407	gd->minors = 1;
408	gd->first_minor = idr_alloc(&ubiblock_minor_idr, dev, 0, 0, GFP_KERNEL);
409	if (gd->first_minor < 0) {
410		dev_err(disk_to_dev(gd),
411			"block: dynamic minor allocation failed");
412		ret = -ENODEV;
413		goto out_cleanup_disk;
414	}
415	gd->flags |= GENHD_FL_NO_PART;
416	gd->private_data = dev;
417	sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
418	set_capacity(gd, disk_capacity);
419	dev->gd = gd;
420
421	dev->rq = gd->queue;
422
423	list_add_tail(&dev->list, &ubiblock_devices);
424
425	/* Must be the last step: anyone can call file ops from now on */
426	ret = device_add_disk(vi->dev, dev->gd, NULL);
427	if (ret)
428		goto out_remove_minor;
429
430	dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
431		 dev->ubi_num, dev->vol_id, vi->name);
432	mutex_unlock(&devices_mutex);
433	return 0;
434
435out_remove_minor:
436	list_del(&dev->list);
437	idr_remove(&ubiblock_minor_idr, gd->first_minor);
438out_cleanup_disk:
439	put_disk(gd);
440out_free_tags:
441	blk_mq_free_tag_set(&dev->tag_set);
442out_free_dev:
443	kfree(dev);
444out_unlock:
445	mutex_unlock(&devices_mutex);
446
447	return ret;
448}
449
450static void ubiblock_cleanup(struct ubiblock *dev)
451{
452	int id = dev->gd->first_minor;
453
454	/* Stop new requests to arrive */
455	del_gendisk(dev->gd);
456	/* Finally destroy the blk queue */
457	dev_info(disk_to_dev(dev->gd), "released");
458	put_disk(dev->gd);
459	blk_mq_free_tag_set(&dev->tag_set);
460	idr_remove(&ubiblock_minor_idr, id);
461}
462
463int ubiblock_remove(struct ubi_volume_info *vi)
464{
465	struct ubiblock *dev;
466	int ret;
467
468	mutex_lock(&devices_mutex);
469	dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
470	if (!dev) {
471		ret = -ENODEV;
472		goto out_unlock;
473	}
474
475	/* Found a device, let's lock it so we can check if it's busy */
476	mutex_lock(&dev->dev_mutex);
477	if (dev->refcnt > 0) {
478		ret = -EBUSY;
479		goto out_unlock_dev;
480	}
481
482	/* Remove from device list */
483	list_del(&dev->list);
484	ubiblock_cleanup(dev);
485	mutex_unlock(&dev->dev_mutex);
486	mutex_unlock(&devices_mutex);
487
488	kfree(dev);
489	return 0;
490
491out_unlock_dev:
492	mutex_unlock(&dev->dev_mutex);
493out_unlock:
494	mutex_unlock(&devices_mutex);
495	return ret;
496}
497
498static int ubiblock_resize(struct ubi_volume_info *vi)
499{
500	struct ubiblock *dev;
501	u64 disk_capacity;
502	int ret;
503
504	/*
505	 * Need to lock the device list until we stop using the device,
506	 * otherwise the device struct might get released in
507	 * 'ubiblock_remove()'.
508	 */
509	mutex_lock(&devices_mutex);
510	dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
511	if (!dev) {
512		mutex_unlock(&devices_mutex);
513		return -ENODEV;
514	}
515
516	ret = calc_disk_capacity(vi, &disk_capacity);
517	if (ret) {
518		mutex_unlock(&devices_mutex);
519		if (ret == -EFBIG) {
520			dev_warn(disk_to_dev(dev->gd),
521				 "the volume is too big (%d LEBs), cannot resize",
522				 vi->size);
523		}
524		return ret;
525	}
526
527	mutex_lock(&dev->dev_mutex);
528
529	if (get_capacity(dev->gd) != disk_capacity) {
530		set_capacity(dev->gd, disk_capacity);
531		dev_info(disk_to_dev(dev->gd), "resized to %lld bytes",
532			 vi->used_bytes);
533	}
534	mutex_unlock(&dev->dev_mutex);
535	mutex_unlock(&devices_mutex);
536	return 0;
537}
538
539static bool
540match_volume_desc(struct ubi_volume_info *vi, const char *name, int ubi_num, int vol_id)
541{
542	int err, len, cur_ubi_num, cur_vol_id;
543
544	if (ubi_num == -1) {
545		/* No ubi num, name must be a vol device path */
546		err = ubi_get_num_by_path(name, &cur_ubi_num, &cur_vol_id);
547		if (err || vi->ubi_num != cur_ubi_num || vi->vol_id != cur_vol_id)
548			return false;
549
550		return true;
551	}
552
553	if (vol_id == -1) {
554		/* Got ubi_num, but no vol_id, name must be volume name */
555		if (vi->ubi_num != ubi_num)
556			return false;
557
558		len = strnlen(name, UBI_VOL_NAME_MAX + 1);
559		if (len < 1 || vi->name_len != len)
560			return false;
561
562		if (strcmp(name, vi->name))
563			return false;
564
565		return true;
566	}
567
568	if (vi->ubi_num != ubi_num)
569		return false;
570
571	if (vi->vol_id != vol_id)
572		return false;
573
574	return true;
575}
576
577static void
578ubiblock_create_from_param(struct ubi_volume_info *vi)
579{
580	int i, ret = 0;
581	struct ubiblock_param *p;
582
583	/*
584	 * Iterate over ubiblock cmdline parameters. If a parameter matches the
585	 * newly added volume create the ubiblock device for it.
586	 */
587	for (i = 0; i < ubiblock_devs; i++) {
588		p = &ubiblock_param[i];
589
590		if (!match_volume_desc(vi, p->name, p->ubi_num, p->vol_id))
591			continue;
592
593		ret = ubiblock_create(vi);
594		if (ret) {
595			pr_err(
596			       "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
597			       vi->name, p->ubi_num, p->vol_id, ret);
598		}
599		break;
600	}
601}
602
603static int ubiblock_notify(struct notifier_block *nb,
604			 unsigned long notification_type, void *ns_ptr)
605{
606	struct ubi_notification *nt = ns_ptr;
607
608	switch (notification_type) {
609	case UBI_VOLUME_ADDED:
610		ubiblock_create_from_param(&nt->vi);
611		break;
612	case UBI_VOLUME_REMOVED:
613		ubiblock_remove(&nt->vi);
614		break;
615	case UBI_VOLUME_RESIZED:
616		ubiblock_resize(&nt->vi);
617		break;
618	case UBI_VOLUME_UPDATED:
619		/*
620		 * If the volume is static, a content update might mean the
621		 * size (i.e. used_bytes) was also changed.
622		 */
623		if (nt->vi.vol_type == UBI_STATIC_VOLUME)
624			ubiblock_resize(&nt->vi);
625		break;
626	default:
627		break;
628	}
629	return NOTIFY_OK;
630}
631
632static struct notifier_block ubiblock_notifier = {
633	.notifier_call = ubiblock_notify,
634};
635
636static void ubiblock_remove_all(void)
637{
638	struct ubiblock *next;
639	struct ubiblock *dev;
640
641	mutex_lock(&devices_mutex);
642	list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
643		/* The module is being forcefully removed */
644		WARN_ON(dev->desc);
645		/* Remove from device list */
646		list_del(&dev->list);
647		ubiblock_cleanup(dev);
648		kfree(dev);
649	}
650	mutex_unlock(&devices_mutex);
651}
652
653int __init ubiblock_init(void)
654{
655	int ret;
656
657	ubiblock_major = register_blkdev(0, "ubiblock");
658	if (ubiblock_major < 0)
659		return ubiblock_major;
660
661	ret = ubi_register_volume_notifier(&ubiblock_notifier, 0);
662	if (ret)
663		goto err_unreg;
664	return 0;
665
666err_unreg:
667	unregister_blkdev(ubiblock_major, "ubiblock");
668	ubiblock_remove_all();
669	return ret;
670}
671
672void __exit ubiblock_exit(void)
673{
674	ubi_unregister_volume_notifier(&ubiblock_notifier);
675	ubiblock_remove_all();
676	unregister_blkdev(ubiblock_major, "ubiblock");
677}
678