1/*
2 * Copyright (c) 2014 Ezequiel Garcia
3 * Copyright (c) 2011 Free Electrons
4 *
5 * Driver parameter handling strongly based on drivers/mtd/ubi/build.c
6 *   Copyright (c) International Business Machines Corp., 2006
7 *   Copyright (c) Nokia Corporation, 2007
8 *   Authors: Artem Bityutskiy, Frank Haverkamp
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation, version 2.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU General Public License for more details.
18 */
19
20/*
21 * Read-only block devices on top of UBI volumes
22 *
23 * A simple implementation to allow a block device to be layered on top of a
24 * UBI volume. The implementation is provided by creating a static 1-to-1
25 * mapping between the block device and the UBI volume.
26 *
27 * The addressed byte is obtained from the addressed block sector, which is
28 * mapped linearly into the corresponding LEB:
29 *
30 *   LEB number = addressed byte / LEB size
31 *
32 * This feature is compiled in the UBI core, and adds a 'block' parameter
33 * to allow early creation of block devices on top of UBI volumes. Runtime
34 * block creation/removal for UBI volumes is provided through two UBI ioctls:
35 * UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK.
36 */
37
38#include <linux/module.h>
39#include <linux/init.h>
40#include <linux/err.h>
41#include <linux/kernel.h>
42#include <linux/list.h>
43#include <linux/mutex.h>
44#include <linux/slab.h>
45#include <linux/mtd/ubi.h>
46#include <linux/workqueue.h>
47#include <linux/blkdev.h>
48#include <linux/blk-mq.h>
49#include <linux/hdreg.h>
50#include <linux/scatterlist.h>
51#include <asm/div64.h>
52
53#include "ubi-media.h"
54#include "ubi.h"
55
56/* Maximum number of supported devices */
57#define UBIBLOCK_MAX_DEVICES 32
58
59/* Maximum length of the 'block=' parameter */
60#define UBIBLOCK_PARAM_LEN 63
61
62/* Maximum number of comma-separated items in the 'block=' parameter */
63#define UBIBLOCK_PARAM_COUNT 2
64
65struct ubiblock_param {
66	int ubi_num;
67	int vol_id;
68	char name[UBIBLOCK_PARAM_LEN+1];
69};
70
71struct ubiblock_pdu {
72	struct work_struct work;
73	struct ubi_sgl usgl;
74};
75
76/* Numbers of elements set in the @ubiblock_param array */
77static int ubiblock_devs __initdata;
78
79/* MTD devices specification parameters */
80static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
81
82struct ubiblock {
83	struct ubi_volume_desc *desc;
84	int ubi_num;
85	int vol_id;
86	int refcnt;
87	int leb_size;
88
89	struct gendisk *gd;
90	struct request_queue *rq;
91
92	struct workqueue_struct *wq;
93
94	struct mutex dev_mutex;
95	struct list_head list;
96	struct blk_mq_tag_set tag_set;
97};
98
99/* Linked list of all ubiblock instances */
100static LIST_HEAD(ubiblock_devices);
101static DEFINE_MUTEX(devices_mutex);
102static int ubiblock_major;
103
104static int __init ubiblock_set_param(const char *val,
105				     const struct kernel_param *kp)
106{
107	int i, ret;
108	size_t len;
109	struct ubiblock_param *param;
110	char buf[UBIBLOCK_PARAM_LEN];
111	char *pbuf = &buf[0];
112	char *tokens[UBIBLOCK_PARAM_COUNT];
113
114	if (!val)
115		return -EINVAL;
116
117	len = strnlen(val, UBIBLOCK_PARAM_LEN);
118	if (len == 0) {
119		pr_warn("UBI: block: empty 'block=' parameter - ignored\n");
120		return 0;
121	}
122
123	if (len == UBIBLOCK_PARAM_LEN) {
124		pr_err("UBI: block: parameter \"%s\" is too long, max. is %d\n",
125		       val, UBIBLOCK_PARAM_LEN);
126		return -EINVAL;
127	}
128
129	strcpy(buf, val);
130
131	/* Get rid of the final newline */
132	if (buf[len - 1] == '\n')
133		buf[len - 1] = '\0';
134
135	for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
136		tokens[i] = strsep(&pbuf, ",");
137
138	param = &ubiblock_param[ubiblock_devs];
139	if (tokens[1]) {
140		/* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */
141		ret = kstrtoint(tokens[0], 10, &param->ubi_num);
142		if (ret < 0)
143			return -EINVAL;
144
145		/* Second param can be a number or a name */
146		ret = kstrtoint(tokens[1], 10, &param->vol_id);
147		if (ret < 0) {
148			param->vol_id = -1;
149			strcpy(param->name, tokens[1]);
150		}
151
152	} else {
153		/* One parameter: must be device path */
154		strcpy(param->name, tokens[0]);
155		param->ubi_num = -1;
156		param->vol_id = -1;
157	}
158
159	ubiblock_devs++;
160
161	return 0;
162}
163
164static struct kernel_param_ops ubiblock_param_ops = {
165	.set    = ubiblock_set_param,
166};
167module_param_cb(block, &ubiblock_param_ops, NULL, 0);
168MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
169			"Multiple \"block\" parameters may be specified.\n"
170			"UBI volumes may be specified by their number, name, or path to the device node.\n"
171			"Examples\n"
172			"Using the UBI volume path:\n"
173			"ubi.block=/dev/ubi0_0\n"
174			"Using the UBI device, and the volume name:\n"
175			"ubi.block=0,rootfs\n"
176			"Using both UBI device number and UBI volume number:\n"
177			"ubi.block=0,0\n");
178
179static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
180{
181	struct ubiblock *dev;
182
183	list_for_each_entry(dev, &ubiblock_devices, list)
184		if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
185			return dev;
186	return NULL;
187}
188
189static int ubiblock_read(struct ubiblock_pdu *pdu)
190{
191	int ret, leb, offset, bytes_left, to_read;
192	u64 pos;
193	struct request *req = blk_mq_rq_from_pdu(pdu);
194	struct ubiblock *dev = req->q->queuedata;
195
196	to_read = blk_rq_bytes(req);
197	pos = blk_rq_pos(req) << 9;
198
199	/* Get LEB:offset address to read from */
200	offset = do_div(pos, dev->leb_size);
201	leb = pos;
202	bytes_left = to_read;
203
204	while (bytes_left) {
205		/*
206		 * We can only read one LEB at a time. Therefore if the read
207		 * length is larger than one LEB size, we split the operation.
208		 */
209		if (offset + to_read > dev->leb_size)
210			to_read = dev->leb_size - offset;
211
212		ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
213		if (ret < 0)
214			return ret;
215
216		bytes_left -= to_read;
217		to_read = bytes_left;
218		leb += 1;
219		offset = 0;
220	}
221	return 0;
222}
223
224static int ubiblock_open(struct block_device *bdev, fmode_t mode)
225{
226	struct ubiblock *dev = bdev->bd_disk->private_data;
227	int ret;
228
229	mutex_lock(&dev->dev_mutex);
230	if (dev->refcnt > 0) {
231		/*
232		 * The volume is already open, just increase the reference
233		 * counter.
234		 */
235		goto out_done;
236	}
237
238	/*
239	 * We want users to be aware they should only mount us as read-only.
240	 * It's just a paranoid check, as write requests will get rejected
241	 * in any case.
242	 */
243	if (mode & FMODE_WRITE) {
244		ret = -EPERM;
245		goto out_unlock;
246	}
247
248	dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
249	if (IS_ERR(dev->desc)) {
250		dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d",
251			dev->ubi_num, dev->vol_id);
252		ret = PTR_ERR(dev->desc);
253		dev->desc = NULL;
254		goto out_unlock;
255	}
256
257out_done:
258	dev->refcnt++;
259	mutex_unlock(&dev->dev_mutex);
260	return 0;
261
262out_unlock:
263	mutex_unlock(&dev->dev_mutex);
264	return ret;
265}
266
267static void ubiblock_release(struct gendisk *gd, fmode_t mode)
268{
269	struct ubiblock *dev = gd->private_data;
270
271	mutex_lock(&dev->dev_mutex);
272	dev->refcnt--;
273	if (dev->refcnt == 0) {
274		ubi_close_volume(dev->desc);
275		dev->desc = NULL;
276	}
277	mutex_unlock(&dev->dev_mutex);
278}
279
280static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
281{
282	/* Some tools might require this information */
283	geo->heads = 1;
284	geo->cylinders = 1;
285	geo->sectors = get_capacity(bdev->bd_disk);
286	geo->start = 0;
287	return 0;
288}
289
290static const struct block_device_operations ubiblock_ops = {
291	.owner = THIS_MODULE,
292	.open = ubiblock_open,
293	.release = ubiblock_release,
294	.getgeo	= ubiblock_getgeo,
295};
296
297static void ubiblock_do_work(struct work_struct *work)
298{
299	int ret;
300	struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
301	struct request *req = blk_mq_rq_from_pdu(pdu);
302
303	blk_mq_start_request(req);
304
305	/*
306	 * It is safe to ignore the return value of blk_rq_map_sg() because
307	 * the number of sg entries is limited to UBI_MAX_SG_COUNT
308	 * and ubi_read_sg() will check that limit.
309	 */
310	blk_rq_map_sg(req->q, req, pdu->usgl.sg);
311
312	ret = ubiblock_read(pdu);
313	rq_flush_dcache_pages(req);
314
315	blk_mq_end_request(req, ret);
316}
317
318static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
319			     const struct blk_mq_queue_data *bd)
320{
321	struct request *req = bd->rq;
322	struct ubiblock *dev = hctx->queue->queuedata;
323	struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
324
325	if (req->cmd_type != REQ_TYPE_FS)
326		return BLK_MQ_RQ_QUEUE_ERROR;
327
328	if (rq_data_dir(req) != READ)
329		return BLK_MQ_RQ_QUEUE_ERROR; /* Write not implemented */
330
331	ubi_sgl_init(&pdu->usgl);
332	queue_work(dev->wq, &pdu->work);
333
334	return BLK_MQ_RQ_QUEUE_OK;
335}
336
337static int ubiblock_init_request(void *data, struct request *req,
338				 unsigned int hctx_idx,
339				 unsigned int request_idx,
340				 unsigned int numa_node)
341{
342	struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
343
344	sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
345	INIT_WORK(&pdu->work, ubiblock_do_work);
346
347	return 0;
348}
349
350static struct blk_mq_ops ubiblock_mq_ops = {
351	.queue_rq       = ubiblock_queue_rq,
352	.init_request	= ubiblock_init_request,
353	.map_queue      = blk_mq_map_queue,
354};
355
356int ubiblock_create(struct ubi_volume_info *vi)
357{
358	struct ubiblock *dev;
359	struct gendisk *gd;
360	u64 disk_capacity = vi->used_bytes >> 9;
361	int ret;
362
363	if ((sector_t)disk_capacity != disk_capacity)
364		return -EFBIG;
365	/* Check that the volume isn't already handled */
366	mutex_lock(&devices_mutex);
367	if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
368		mutex_unlock(&devices_mutex);
369		return -EEXIST;
370	}
371	mutex_unlock(&devices_mutex);
372
373	dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
374	if (!dev)
375		return -ENOMEM;
376
377	mutex_init(&dev->dev_mutex);
378
379	dev->ubi_num = vi->ubi_num;
380	dev->vol_id = vi->vol_id;
381	dev->leb_size = vi->usable_leb_size;
382
383	/* Initialize the gendisk of this ubiblock device */
384	gd = alloc_disk(1);
385	if (!gd) {
386		pr_err("UBI: block: alloc_disk failed");
387		ret = -ENODEV;
388		goto out_free_dev;
389	}
390
391	gd->fops = &ubiblock_ops;
392	gd->major = ubiblock_major;
393	gd->first_minor = dev->ubi_num * UBI_MAX_VOLUMES + dev->vol_id;
394	gd->private_data = dev;
395	sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
396	set_capacity(gd, disk_capacity);
397	dev->gd = gd;
398
399	dev->tag_set.ops = &ubiblock_mq_ops;
400	dev->tag_set.queue_depth = 64;
401	dev->tag_set.numa_node = NUMA_NO_NODE;
402	dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
403	dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
404	dev->tag_set.driver_data = dev;
405	dev->tag_set.nr_hw_queues = 1;
406
407	ret = blk_mq_alloc_tag_set(&dev->tag_set);
408	if (ret) {
409		dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
410		goto out_put_disk;
411	}
412
413	dev->rq = blk_mq_init_queue(&dev->tag_set);
414	if (IS_ERR(dev->rq)) {
415		dev_err(disk_to_dev(gd), "blk_mq_init_queue failed");
416		ret = PTR_ERR(dev->rq);
417		goto out_free_tags;
418	}
419	blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
420
421	dev->rq->queuedata = dev;
422	dev->gd->queue = dev->rq;
423
424	/*
425	 * Create one workqueue per volume (per registered block device).
426	 * Rembember workqueues are cheap, they're not threads.
427	 */
428	dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
429	if (!dev->wq) {
430		ret = -ENOMEM;
431		goto out_free_queue;
432	}
433
434	mutex_lock(&devices_mutex);
435	list_add_tail(&dev->list, &ubiblock_devices);
436	mutex_unlock(&devices_mutex);
437
438	/* Must be the last step: anyone can call file ops from now on */
439	add_disk(dev->gd);
440	dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
441		 dev->ubi_num, dev->vol_id, vi->name);
442	return 0;
443
444out_free_queue:
445	blk_cleanup_queue(dev->rq);
446out_free_tags:
447	blk_mq_free_tag_set(&dev->tag_set);
448out_put_disk:
449	put_disk(dev->gd);
450out_free_dev:
451	kfree(dev);
452
453	return ret;
454}
455
456static void ubiblock_cleanup(struct ubiblock *dev)
457{
458	/* Stop new requests to arrive */
459	del_gendisk(dev->gd);
460	/* Flush pending work */
461	destroy_workqueue(dev->wq);
462	/* Finally destroy the blk queue */
463	blk_cleanup_queue(dev->rq);
464	blk_mq_free_tag_set(&dev->tag_set);
465	dev_info(disk_to_dev(dev->gd), "released");
466	put_disk(dev->gd);
467}
468
469int ubiblock_remove(struct ubi_volume_info *vi)
470{
471	struct ubiblock *dev;
472
473	mutex_lock(&devices_mutex);
474	dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
475	if (!dev) {
476		mutex_unlock(&devices_mutex);
477		return -ENODEV;
478	}
479
480	/* Found a device, let's lock it so we can check if it's busy */
481	mutex_lock(&dev->dev_mutex);
482	if (dev->refcnt > 0) {
483		mutex_unlock(&dev->dev_mutex);
484		mutex_unlock(&devices_mutex);
485		return -EBUSY;
486	}
487
488	/* Remove from device list */
489	list_del(&dev->list);
490	mutex_unlock(&devices_mutex);
491
492	ubiblock_cleanup(dev);
493	mutex_unlock(&dev->dev_mutex);
494	kfree(dev);
495	return 0;
496}
497
498static int ubiblock_resize(struct ubi_volume_info *vi)
499{
500	struct ubiblock *dev;
501	u64 disk_capacity = vi->used_bytes >> 9;
502
503	/*
504	 * Need to lock the device list until we stop using the device,
505	 * otherwise the device struct might get released in
506	 * 'ubiblock_remove()'.
507	 */
508	mutex_lock(&devices_mutex);
509	dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
510	if (!dev) {
511		mutex_unlock(&devices_mutex);
512		return -ENODEV;
513	}
514	if ((sector_t)disk_capacity != disk_capacity) {
515		mutex_unlock(&devices_mutex);
516		dev_warn(disk_to_dev(dev->gd), "the volume is too big (%d LEBs), cannot resize",
517			 vi->size);
518		return -EFBIG;
519	}
520
521	mutex_lock(&dev->dev_mutex);
522
523	if (get_capacity(dev->gd) != disk_capacity) {
524		set_capacity(dev->gd, disk_capacity);
525		dev_info(disk_to_dev(dev->gd), "resized to %lld bytes",
526			 vi->used_bytes);
527	}
528	mutex_unlock(&dev->dev_mutex);
529	mutex_unlock(&devices_mutex);
530	return 0;
531}
532
533static int ubiblock_notify(struct notifier_block *nb,
534			 unsigned long notification_type, void *ns_ptr)
535{
536	struct ubi_notification *nt = ns_ptr;
537
538	switch (notification_type) {
539	case UBI_VOLUME_ADDED:
540		/*
541		 * We want to enforce explicit block device creation for
542		 * volumes, so when a volume is added we do nothing.
543		 */
544		break;
545	case UBI_VOLUME_REMOVED:
546		ubiblock_remove(&nt->vi);
547		break;
548	case UBI_VOLUME_RESIZED:
549		ubiblock_resize(&nt->vi);
550		break;
551	case UBI_VOLUME_UPDATED:
552		/*
553		 * If the volume is static, a content update might mean the
554		 * size (i.e. used_bytes) was also changed.
555		 */
556		if (nt->vi.vol_type == UBI_STATIC_VOLUME)
557			ubiblock_resize(&nt->vi);
558		break;
559	default:
560		break;
561	}
562	return NOTIFY_OK;
563}
564
565static struct notifier_block ubiblock_notifier = {
566	.notifier_call = ubiblock_notify,
567};
568
569static struct ubi_volume_desc * __init
570open_volume_desc(const char *name, int ubi_num, int vol_id)
571{
572	if (ubi_num == -1)
573		/* No ubi num, name must be a vol device path */
574		return ubi_open_volume_path(name, UBI_READONLY);
575	else if (vol_id == -1)
576		/* No vol_id, must be vol_name */
577		return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
578	else
579		return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
580}
581
582static void __init ubiblock_create_from_param(void)
583{
584	int i, ret = 0;
585	struct ubiblock_param *p;
586	struct ubi_volume_desc *desc;
587	struct ubi_volume_info vi;
588
589	/*
590	 * If there is an error creating one of the ubiblocks, continue on to
591	 * create the following ubiblocks. This helps in a circumstance where
592	 * the kernel command-line specifies multiple block devices and some
593	 * may be broken, but we still want the working ones to come up.
594	 */
595	for (i = 0; i < ubiblock_devs; i++) {
596		p = &ubiblock_param[i];
597
598		desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
599		if (IS_ERR(desc)) {
600			pr_err(
601			       "UBI: block: can't open volume on ubi%d_%d, err=%ld",
602			       p->ubi_num, p->vol_id, PTR_ERR(desc));
603			continue;
604		}
605
606		ubi_get_volume_info(desc, &vi);
607		ubi_close_volume(desc);
608
609		ret = ubiblock_create(&vi);
610		if (ret) {
611			pr_err(
612			       "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d",
613			       vi.name, p->ubi_num, p->vol_id, ret);
614			continue;
615		}
616	}
617}
618
619static void ubiblock_remove_all(void)
620{
621	struct ubiblock *next;
622	struct ubiblock *dev;
623
624	list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
625		/* The module is being forcefully removed */
626		WARN_ON(dev->desc);
627		/* Remove from device list */
628		list_del(&dev->list);
629		ubiblock_cleanup(dev);
630		kfree(dev);
631	}
632}
633
634int __init ubiblock_init(void)
635{
636	int ret;
637
638	ubiblock_major = register_blkdev(0, "ubiblock");
639	if (ubiblock_major < 0)
640		return ubiblock_major;
641
642	/*
643	 * Attach block devices from 'block=' module param.
644	 * Even if one block device in the param list fails to come up,
645	 * still allow the module to load and leave any others up.
646	 */
647	ubiblock_create_from_param();
648
649	/*
650	 * Block devices are only created upon user requests, so we ignore
651	 * existing volumes.
652	 */
653	ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
654	if (ret)
655		goto err_unreg;
656	return 0;
657
658err_unreg:
659	unregister_blkdev(ubiblock_major, "ubiblock");
660	ubiblock_remove_all();
661	return ret;
662}
663
664void __exit ubiblock_exit(void)
665{
666	ubi_unregister_volume_notifier(&ubiblock_notifier);
667	ubiblock_remove_all();
668	unregister_blkdev(ubiblock_major, "ubiblock");
669}
670