xref: /OK3568_Linux_fs/kernel/drivers/mtd/ubi/block.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2014 Ezequiel Garcia
4*4882a593Smuzhiyun  * Copyright (c) 2011 Free Electrons
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Driver parameter handling strongly based on drivers/mtd/ubi/build.c
7*4882a593Smuzhiyun  *   Copyright (c) International Business Machines Corp., 2006
8*4882a593Smuzhiyun  *   Copyright (c) Nokia Corporation, 2007
9*4882a593Smuzhiyun  *   Authors: Artem Bityutskiy, Frank Haverkamp
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun /*
13*4882a593Smuzhiyun  * Read-only block devices on top of UBI volumes
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * A simple implementation to allow a block device to be layered on top of a
16*4882a593Smuzhiyun  * UBI volume. The implementation is provided by creating a static 1-to-1
17*4882a593Smuzhiyun  * mapping between the block device and the UBI volume.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * The addressed byte is obtained from the addressed block sector, which is
20*4882a593Smuzhiyun  * mapped linearly into the corresponding LEB:
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  *   LEB number = addressed byte / LEB size
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * This feature is compiled in the UBI core, and adds a 'block' parameter
25*4882a593Smuzhiyun  * to allow early creation of block devices on top of UBI volumes. Runtime
26*4882a593Smuzhiyun  * block creation/removal for UBI volumes is provided through two UBI ioctls:
27*4882a593Smuzhiyun  * UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK.
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #include <linux/module.h>
31*4882a593Smuzhiyun #include <linux/init.h>
32*4882a593Smuzhiyun #include <linux/err.h>
33*4882a593Smuzhiyun #include <linux/kernel.h>
34*4882a593Smuzhiyun #include <linux/list.h>
35*4882a593Smuzhiyun #include <linux/mutex.h>
36*4882a593Smuzhiyun #include <linux/slab.h>
37*4882a593Smuzhiyun #include <linux/mtd/ubi.h>
38*4882a593Smuzhiyun #include <linux/workqueue.h>
39*4882a593Smuzhiyun #include <linux/blkdev.h>
40*4882a593Smuzhiyun #include <linux/blk-mq.h>
41*4882a593Smuzhiyun #include <linux/hdreg.h>
42*4882a593Smuzhiyun #include <linux/scatterlist.h>
43*4882a593Smuzhiyun #include <linux/idr.h>
44*4882a593Smuzhiyun #include <asm/div64.h>
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #include "ubi-media.h"
47*4882a593Smuzhiyun #include "ubi.h"
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /* Maximum number of supported devices */
50*4882a593Smuzhiyun #define UBIBLOCK_MAX_DEVICES 32
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /* Maximum length of the 'block=' parameter */
53*4882a593Smuzhiyun #define UBIBLOCK_PARAM_LEN 63
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun /* Maximum number of comma-separated items in the 'block=' parameter */
56*4882a593Smuzhiyun #define UBIBLOCK_PARAM_COUNT 2
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun struct ubiblock_param {
59*4882a593Smuzhiyun 	int ubi_num;
60*4882a593Smuzhiyun 	int vol_id;
61*4882a593Smuzhiyun 	char name[UBIBLOCK_PARAM_LEN+1];
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun struct ubiblock_pdu {
65*4882a593Smuzhiyun 	struct work_struct work;
66*4882a593Smuzhiyun 	struct ubi_sgl usgl;
67*4882a593Smuzhiyun };
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun /* Numbers of elements set in the @ubiblock_param array */
70*4882a593Smuzhiyun static int ubiblock_devs __initdata;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /* MTD devices specification parameters */
73*4882a593Smuzhiyun static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun struct ubiblock {
76*4882a593Smuzhiyun 	struct ubi_volume_desc *desc;
77*4882a593Smuzhiyun 	int ubi_num;
78*4882a593Smuzhiyun 	int vol_id;
79*4882a593Smuzhiyun 	int refcnt;
80*4882a593Smuzhiyun 	int leb_size;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	struct gendisk *gd;
83*4882a593Smuzhiyun 	struct request_queue *rq;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	struct workqueue_struct *wq;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	struct mutex dev_mutex;
88*4882a593Smuzhiyun 	struct list_head list;
89*4882a593Smuzhiyun 	struct blk_mq_tag_set tag_set;
90*4882a593Smuzhiyun };
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /* Linked list of all ubiblock instances */
93*4882a593Smuzhiyun static LIST_HEAD(ubiblock_devices);
94*4882a593Smuzhiyun static DEFINE_IDR(ubiblock_minor_idr);
95*4882a593Smuzhiyun /* Protects ubiblock_devices and ubiblock_minor_idr */
96*4882a593Smuzhiyun static DEFINE_MUTEX(devices_mutex);
97*4882a593Smuzhiyun static int ubiblock_major;
98*4882a593Smuzhiyun 
ubiblock_set_param(const char * val,const struct kernel_param * kp)99*4882a593Smuzhiyun static int __init ubiblock_set_param(const char *val,
100*4882a593Smuzhiyun 				     const struct kernel_param *kp)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	int i, ret;
103*4882a593Smuzhiyun 	size_t len;
104*4882a593Smuzhiyun 	struct ubiblock_param *param;
105*4882a593Smuzhiyun 	char buf[UBIBLOCK_PARAM_LEN];
106*4882a593Smuzhiyun 	char *pbuf = &buf[0];
107*4882a593Smuzhiyun 	char *tokens[UBIBLOCK_PARAM_COUNT];
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	if (!val)
110*4882a593Smuzhiyun 		return -EINVAL;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	len = strnlen(val, UBIBLOCK_PARAM_LEN);
113*4882a593Smuzhiyun 	if (len == 0) {
114*4882a593Smuzhiyun 		pr_warn("UBI: block: empty 'block=' parameter - ignored\n");
115*4882a593Smuzhiyun 		return 0;
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	if (len == UBIBLOCK_PARAM_LEN) {
119*4882a593Smuzhiyun 		pr_err("UBI: block: parameter \"%s\" is too long, max. is %d\n",
120*4882a593Smuzhiyun 		       val, UBIBLOCK_PARAM_LEN);
121*4882a593Smuzhiyun 		return -EINVAL;
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	strcpy(buf, val);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	/* Get rid of the final newline */
127*4882a593Smuzhiyun 	if (buf[len - 1] == '\n')
128*4882a593Smuzhiyun 		buf[len - 1] = '\0';
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
131*4882a593Smuzhiyun 		tokens[i] = strsep(&pbuf, ",");
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	param = &ubiblock_param[ubiblock_devs];
134*4882a593Smuzhiyun 	if (tokens[1]) {
135*4882a593Smuzhiyun 		/* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */
136*4882a593Smuzhiyun 		ret = kstrtoint(tokens[0], 10, &param->ubi_num);
137*4882a593Smuzhiyun 		if (ret < 0)
138*4882a593Smuzhiyun 			return -EINVAL;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 		/* Second param can be a number or a name */
141*4882a593Smuzhiyun 		ret = kstrtoint(tokens[1], 10, &param->vol_id);
142*4882a593Smuzhiyun 		if (ret < 0) {
143*4882a593Smuzhiyun 			param->vol_id = -1;
144*4882a593Smuzhiyun 			strcpy(param->name, tokens[1]);
145*4882a593Smuzhiyun 		}
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	} else {
148*4882a593Smuzhiyun 		/* One parameter: must be device path */
149*4882a593Smuzhiyun 		strcpy(param->name, tokens[0]);
150*4882a593Smuzhiyun 		param->ubi_num = -1;
151*4882a593Smuzhiyun 		param->vol_id = -1;
152*4882a593Smuzhiyun 	}
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	ubiblock_devs++;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	return 0;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun static const struct kernel_param_ops ubiblock_param_ops = {
160*4882a593Smuzhiyun 	.set    = ubiblock_set_param,
161*4882a593Smuzhiyun };
162*4882a593Smuzhiyun module_param_cb(block, &ubiblock_param_ops, NULL, 0);
163*4882a593Smuzhiyun MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
164*4882a593Smuzhiyun 			"Multiple \"block\" parameters may be specified.\n"
165*4882a593Smuzhiyun 			"UBI volumes may be specified by their number, name, or path to the device node.\n"
166*4882a593Smuzhiyun 			"Examples\n"
167*4882a593Smuzhiyun 			"Using the UBI volume path:\n"
168*4882a593Smuzhiyun 			"ubi.block=/dev/ubi0_0\n"
169*4882a593Smuzhiyun 			"Using the UBI device, and the volume name:\n"
170*4882a593Smuzhiyun 			"ubi.block=0,rootfs\n"
171*4882a593Smuzhiyun 			"Using both UBI device number and UBI volume number:\n"
172*4882a593Smuzhiyun 			"ubi.block=0,0\n");
173*4882a593Smuzhiyun 
find_dev_nolock(int ubi_num,int vol_id)174*4882a593Smuzhiyun static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	struct ubiblock *dev;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	list_for_each_entry(dev, &ubiblock_devices, list)
179*4882a593Smuzhiyun 		if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
180*4882a593Smuzhiyun 			return dev;
181*4882a593Smuzhiyun 	return NULL;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
ubiblock_read(struct ubiblock_pdu * pdu)184*4882a593Smuzhiyun static int ubiblock_read(struct ubiblock_pdu *pdu)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	int ret, leb, offset, bytes_left, to_read;
187*4882a593Smuzhiyun 	u64 pos;
188*4882a593Smuzhiyun 	struct request *req = blk_mq_rq_from_pdu(pdu);
189*4882a593Smuzhiyun 	struct ubiblock *dev = req->q->queuedata;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	to_read = blk_rq_bytes(req);
192*4882a593Smuzhiyun 	pos = blk_rq_pos(req) << 9;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	/* Get LEB:offset address to read from */
195*4882a593Smuzhiyun 	offset = do_div(pos, dev->leb_size);
196*4882a593Smuzhiyun 	leb = pos;
197*4882a593Smuzhiyun 	bytes_left = to_read;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	while (bytes_left) {
200*4882a593Smuzhiyun 		/*
201*4882a593Smuzhiyun 		 * We can only read one LEB at a time. Therefore if the read
202*4882a593Smuzhiyun 		 * length is larger than one LEB size, we split the operation.
203*4882a593Smuzhiyun 		 */
204*4882a593Smuzhiyun 		if (offset + to_read > dev->leb_size)
205*4882a593Smuzhiyun 			to_read = dev->leb_size - offset;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 		ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
208*4882a593Smuzhiyun 		if (ret < 0)
209*4882a593Smuzhiyun 			return ret;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 		bytes_left -= to_read;
212*4882a593Smuzhiyun 		to_read = bytes_left;
213*4882a593Smuzhiyun 		leb += 1;
214*4882a593Smuzhiyun 		offset = 0;
215*4882a593Smuzhiyun 	}
216*4882a593Smuzhiyun 	return 0;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun 
ubiblock_open(struct block_device * bdev,fmode_t mode)219*4882a593Smuzhiyun static int ubiblock_open(struct block_device *bdev, fmode_t mode)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	struct ubiblock *dev = bdev->bd_disk->private_data;
222*4882a593Smuzhiyun 	int ret;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	mutex_lock(&dev->dev_mutex);
225*4882a593Smuzhiyun 	if (dev->refcnt > 0) {
226*4882a593Smuzhiyun 		/*
227*4882a593Smuzhiyun 		 * The volume is already open, just increase the reference
228*4882a593Smuzhiyun 		 * counter.
229*4882a593Smuzhiyun 		 */
230*4882a593Smuzhiyun 		goto out_done;
231*4882a593Smuzhiyun 	}
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	/*
234*4882a593Smuzhiyun 	 * We want users to be aware they should only mount us as read-only.
235*4882a593Smuzhiyun 	 * It's just a paranoid check, as write requests will get rejected
236*4882a593Smuzhiyun 	 * in any case.
237*4882a593Smuzhiyun 	 */
238*4882a593Smuzhiyun 	if (mode & FMODE_WRITE) {
239*4882a593Smuzhiyun 		ret = -EROFS;
240*4882a593Smuzhiyun 		goto out_unlock;
241*4882a593Smuzhiyun 	}
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
244*4882a593Smuzhiyun 	if (IS_ERR(dev->desc)) {
245*4882a593Smuzhiyun 		dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d",
246*4882a593Smuzhiyun 			dev->ubi_num, dev->vol_id);
247*4882a593Smuzhiyun 		ret = PTR_ERR(dev->desc);
248*4882a593Smuzhiyun 		dev->desc = NULL;
249*4882a593Smuzhiyun 		goto out_unlock;
250*4882a593Smuzhiyun 	}
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun out_done:
253*4882a593Smuzhiyun 	dev->refcnt++;
254*4882a593Smuzhiyun 	mutex_unlock(&dev->dev_mutex);
255*4882a593Smuzhiyun 	return 0;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun out_unlock:
258*4882a593Smuzhiyun 	mutex_unlock(&dev->dev_mutex);
259*4882a593Smuzhiyun 	return ret;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun 
ubiblock_release(struct gendisk * gd,fmode_t mode)262*4882a593Smuzhiyun static void ubiblock_release(struct gendisk *gd, fmode_t mode)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	struct ubiblock *dev = gd->private_data;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	mutex_lock(&dev->dev_mutex);
267*4882a593Smuzhiyun 	dev->refcnt--;
268*4882a593Smuzhiyun 	if (dev->refcnt == 0) {
269*4882a593Smuzhiyun 		ubi_close_volume(dev->desc);
270*4882a593Smuzhiyun 		dev->desc = NULL;
271*4882a593Smuzhiyun 	}
272*4882a593Smuzhiyun 	mutex_unlock(&dev->dev_mutex);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
ubiblock_getgeo(struct block_device * bdev,struct hd_geometry * geo)275*4882a593Smuzhiyun static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun 	/* Some tools might require this information */
278*4882a593Smuzhiyun 	geo->heads = 1;
279*4882a593Smuzhiyun 	geo->cylinders = 1;
280*4882a593Smuzhiyun 	geo->sectors = get_capacity(bdev->bd_disk);
281*4882a593Smuzhiyun 	geo->start = 0;
282*4882a593Smuzhiyun 	return 0;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun static const struct block_device_operations ubiblock_ops = {
286*4882a593Smuzhiyun 	.owner = THIS_MODULE,
287*4882a593Smuzhiyun 	.open = ubiblock_open,
288*4882a593Smuzhiyun 	.release = ubiblock_release,
289*4882a593Smuzhiyun 	.getgeo	= ubiblock_getgeo,
290*4882a593Smuzhiyun };
291*4882a593Smuzhiyun 
ubiblock_do_work(struct work_struct * work)292*4882a593Smuzhiyun static void ubiblock_do_work(struct work_struct *work)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun 	int ret;
295*4882a593Smuzhiyun 	struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
296*4882a593Smuzhiyun 	struct request *req = blk_mq_rq_from_pdu(pdu);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	blk_mq_start_request(req);
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	/*
301*4882a593Smuzhiyun 	 * It is safe to ignore the return value of blk_rq_map_sg() because
302*4882a593Smuzhiyun 	 * the number of sg entries is limited to UBI_MAX_SG_COUNT
303*4882a593Smuzhiyun 	 * and ubi_read_sg() will check that limit.
304*4882a593Smuzhiyun 	 */
305*4882a593Smuzhiyun 	blk_rq_map_sg(req->q, req, pdu->usgl.sg);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	ret = ubiblock_read(pdu);
308*4882a593Smuzhiyun 	rq_flush_dcache_pages(req);
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	blk_mq_end_request(req, errno_to_blk_status(ret));
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
ubiblock_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)313*4882a593Smuzhiyun static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
314*4882a593Smuzhiyun 			     const struct blk_mq_queue_data *bd)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	struct request *req = bd->rq;
317*4882a593Smuzhiyun 	struct ubiblock *dev = hctx->queue->queuedata;
318*4882a593Smuzhiyun 	struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	switch (req_op(req)) {
321*4882a593Smuzhiyun 	case REQ_OP_READ:
322*4882a593Smuzhiyun 		ubi_sgl_init(&pdu->usgl);
323*4882a593Smuzhiyun 		queue_work(dev->wq, &pdu->work);
324*4882a593Smuzhiyun 		return BLK_STS_OK;
325*4882a593Smuzhiyun 	default:
326*4882a593Smuzhiyun 		return BLK_STS_IOERR;
327*4882a593Smuzhiyun 	}
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
ubiblock_init_request(struct blk_mq_tag_set * set,struct request * req,unsigned int hctx_idx,unsigned int numa_node)331*4882a593Smuzhiyun static int ubiblock_init_request(struct blk_mq_tag_set *set,
332*4882a593Smuzhiyun 		struct request *req, unsigned int hctx_idx,
333*4882a593Smuzhiyun 		unsigned int numa_node)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun 	struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
338*4882a593Smuzhiyun 	INIT_WORK(&pdu->work, ubiblock_do_work);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	return 0;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun static const struct blk_mq_ops ubiblock_mq_ops = {
344*4882a593Smuzhiyun 	.queue_rq       = ubiblock_queue_rq,
345*4882a593Smuzhiyun 	.init_request	= ubiblock_init_request,
346*4882a593Smuzhiyun };
347*4882a593Smuzhiyun 
calc_disk_capacity(struct ubi_volume_info * vi,u64 * disk_capacity)348*4882a593Smuzhiyun static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	u64 size = vi->used_bytes >> 9;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	if (vi->used_bytes % 512) {
353*4882a593Smuzhiyun 		pr_warn("UBI: block: volume size is not a multiple of 512, "
354*4882a593Smuzhiyun 			"last %llu bytes are ignored!\n",
355*4882a593Smuzhiyun 			vi->used_bytes - (size << 9));
356*4882a593Smuzhiyun 	}
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	if ((sector_t)size != size)
359*4882a593Smuzhiyun 		return -EFBIG;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	*disk_capacity = size;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	return 0;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun 
ubiblock_create(struct ubi_volume_info * vi)366*4882a593Smuzhiyun int ubiblock_create(struct ubi_volume_info *vi)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	struct ubiblock *dev;
369*4882a593Smuzhiyun 	struct gendisk *gd;
370*4882a593Smuzhiyun 	u64 disk_capacity;
371*4882a593Smuzhiyun 	int ret;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	ret = calc_disk_capacity(vi, &disk_capacity);
374*4882a593Smuzhiyun 	if (ret) {
375*4882a593Smuzhiyun 		return ret;
376*4882a593Smuzhiyun 	}
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	/* Check that the volume isn't already handled */
379*4882a593Smuzhiyun 	mutex_lock(&devices_mutex);
380*4882a593Smuzhiyun 	if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
381*4882a593Smuzhiyun 		ret = -EEXIST;
382*4882a593Smuzhiyun 		goto out_unlock;
383*4882a593Smuzhiyun 	}
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
386*4882a593Smuzhiyun 	if (!dev) {
387*4882a593Smuzhiyun 		ret = -ENOMEM;
388*4882a593Smuzhiyun 		goto out_unlock;
389*4882a593Smuzhiyun 	}
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	mutex_init(&dev->dev_mutex);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	dev->ubi_num = vi->ubi_num;
394*4882a593Smuzhiyun 	dev->vol_id = vi->vol_id;
395*4882a593Smuzhiyun 	dev->leb_size = vi->usable_leb_size;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	/* Initialize the gendisk of this ubiblock device */
398*4882a593Smuzhiyun 	gd = alloc_disk(1);
399*4882a593Smuzhiyun 	if (!gd) {
400*4882a593Smuzhiyun 		pr_err("UBI: block: alloc_disk failed\n");
401*4882a593Smuzhiyun 		ret = -ENODEV;
402*4882a593Smuzhiyun 		goto out_free_dev;
403*4882a593Smuzhiyun 	}
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	gd->fops = &ubiblock_ops;
406*4882a593Smuzhiyun 	gd->major = ubiblock_major;
407*4882a593Smuzhiyun 	gd->first_minor = idr_alloc(&ubiblock_minor_idr, dev, 0, 0, GFP_KERNEL);
408*4882a593Smuzhiyun 	if (gd->first_minor < 0) {
409*4882a593Smuzhiyun 		dev_err(disk_to_dev(gd),
410*4882a593Smuzhiyun 			"block: dynamic minor allocation failed");
411*4882a593Smuzhiyun 		ret = -ENODEV;
412*4882a593Smuzhiyun 		goto out_put_disk;
413*4882a593Smuzhiyun 	}
414*4882a593Smuzhiyun 	gd->private_data = dev;
415*4882a593Smuzhiyun 	sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
416*4882a593Smuzhiyun 	set_capacity(gd, disk_capacity);
417*4882a593Smuzhiyun 	dev->gd = gd;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	dev->tag_set.ops = &ubiblock_mq_ops;
420*4882a593Smuzhiyun 	dev->tag_set.queue_depth = 64;
421*4882a593Smuzhiyun 	dev->tag_set.numa_node = NUMA_NO_NODE;
422*4882a593Smuzhiyun 	dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
423*4882a593Smuzhiyun 	dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
424*4882a593Smuzhiyun 	dev->tag_set.driver_data = dev;
425*4882a593Smuzhiyun 	dev->tag_set.nr_hw_queues = 1;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	ret = blk_mq_alloc_tag_set(&dev->tag_set);
428*4882a593Smuzhiyun 	if (ret) {
429*4882a593Smuzhiyun 		dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
430*4882a593Smuzhiyun 		goto out_remove_minor;
431*4882a593Smuzhiyun 	}
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	dev->rq = blk_mq_init_queue(&dev->tag_set);
434*4882a593Smuzhiyun 	if (IS_ERR(dev->rq)) {
435*4882a593Smuzhiyun 		dev_err(disk_to_dev(gd), "blk_mq_init_queue failed");
436*4882a593Smuzhiyun 		ret = PTR_ERR(dev->rq);
437*4882a593Smuzhiyun 		goto out_free_tags;
438*4882a593Smuzhiyun 	}
439*4882a593Smuzhiyun 	blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	dev->rq->queuedata = dev;
442*4882a593Smuzhiyun 	dev->gd->queue = dev->rq;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	/*
445*4882a593Smuzhiyun 	 * Create one workqueue per volume (per registered block device).
446*4882a593Smuzhiyun 	 * Rembember workqueues are cheap, they're not threads.
447*4882a593Smuzhiyun 	 */
448*4882a593Smuzhiyun 	dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
449*4882a593Smuzhiyun 	if (!dev->wq) {
450*4882a593Smuzhiyun 		ret = -ENOMEM;
451*4882a593Smuzhiyun 		goto out_free_queue;
452*4882a593Smuzhiyun 	}
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	list_add_tail(&dev->list, &ubiblock_devices);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	/* Must be the last step: anyone can call file ops from now on */
457*4882a593Smuzhiyun 	add_disk(dev->gd);
458*4882a593Smuzhiyun 	dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
459*4882a593Smuzhiyun 		 dev->ubi_num, dev->vol_id, vi->name);
460*4882a593Smuzhiyun 	mutex_unlock(&devices_mutex);
461*4882a593Smuzhiyun 	return 0;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun out_free_queue:
464*4882a593Smuzhiyun 	blk_cleanup_queue(dev->rq);
465*4882a593Smuzhiyun out_free_tags:
466*4882a593Smuzhiyun 	blk_mq_free_tag_set(&dev->tag_set);
467*4882a593Smuzhiyun out_remove_minor:
468*4882a593Smuzhiyun 	idr_remove(&ubiblock_minor_idr, gd->first_minor);
469*4882a593Smuzhiyun out_put_disk:
470*4882a593Smuzhiyun 	put_disk(dev->gd);
471*4882a593Smuzhiyun out_free_dev:
472*4882a593Smuzhiyun 	kfree(dev);
473*4882a593Smuzhiyun out_unlock:
474*4882a593Smuzhiyun 	mutex_unlock(&devices_mutex);
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	return ret;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun 
ubiblock_cleanup(struct ubiblock * dev)479*4882a593Smuzhiyun static void ubiblock_cleanup(struct ubiblock *dev)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun 	/* Stop new requests to arrive */
482*4882a593Smuzhiyun 	del_gendisk(dev->gd);
483*4882a593Smuzhiyun 	/* Flush pending work */
484*4882a593Smuzhiyun 	destroy_workqueue(dev->wq);
485*4882a593Smuzhiyun 	/* Finally destroy the blk queue */
486*4882a593Smuzhiyun 	blk_cleanup_queue(dev->rq);
487*4882a593Smuzhiyun 	blk_mq_free_tag_set(&dev->tag_set);
488*4882a593Smuzhiyun 	dev_info(disk_to_dev(dev->gd), "released");
489*4882a593Smuzhiyun 	idr_remove(&ubiblock_minor_idr, dev->gd->first_minor);
490*4882a593Smuzhiyun 	put_disk(dev->gd);
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun 
ubiblock_remove(struct ubi_volume_info * vi)493*4882a593Smuzhiyun int ubiblock_remove(struct ubi_volume_info *vi)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun 	struct ubiblock *dev;
496*4882a593Smuzhiyun 	int ret;
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	mutex_lock(&devices_mutex);
499*4882a593Smuzhiyun 	dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
500*4882a593Smuzhiyun 	if (!dev) {
501*4882a593Smuzhiyun 		ret = -ENODEV;
502*4882a593Smuzhiyun 		goto out_unlock;
503*4882a593Smuzhiyun 	}
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	/* Found a device, let's lock it so we can check if it's busy */
506*4882a593Smuzhiyun 	mutex_lock(&dev->dev_mutex);
507*4882a593Smuzhiyun 	if (dev->refcnt > 0) {
508*4882a593Smuzhiyun 		ret = -EBUSY;
509*4882a593Smuzhiyun 		goto out_unlock_dev;
510*4882a593Smuzhiyun 	}
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	/* Remove from device list */
513*4882a593Smuzhiyun 	list_del(&dev->list);
514*4882a593Smuzhiyun 	ubiblock_cleanup(dev);
515*4882a593Smuzhiyun 	mutex_unlock(&dev->dev_mutex);
516*4882a593Smuzhiyun 	mutex_unlock(&devices_mutex);
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	kfree(dev);
519*4882a593Smuzhiyun 	return 0;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun out_unlock_dev:
522*4882a593Smuzhiyun 	mutex_unlock(&dev->dev_mutex);
523*4882a593Smuzhiyun out_unlock:
524*4882a593Smuzhiyun 	mutex_unlock(&devices_mutex);
525*4882a593Smuzhiyun 	return ret;
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun 
ubiblock_resize(struct ubi_volume_info * vi)528*4882a593Smuzhiyun static int ubiblock_resize(struct ubi_volume_info *vi)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun 	struct ubiblock *dev;
531*4882a593Smuzhiyun 	u64 disk_capacity;
532*4882a593Smuzhiyun 	int ret;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	/*
535*4882a593Smuzhiyun 	 * Need to lock the device list until we stop using the device,
536*4882a593Smuzhiyun 	 * otherwise the device struct might get released in
537*4882a593Smuzhiyun 	 * 'ubiblock_remove()'.
538*4882a593Smuzhiyun 	 */
539*4882a593Smuzhiyun 	mutex_lock(&devices_mutex);
540*4882a593Smuzhiyun 	dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
541*4882a593Smuzhiyun 	if (!dev) {
542*4882a593Smuzhiyun 		mutex_unlock(&devices_mutex);
543*4882a593Smuzhiyun 		return -ENODEV;
544*4882a593Smuzhiyun 	}
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	ret = calc_disk_capacity(vi, &disk_capacity);
547*4882a593Smuzhiyun 	if (ret) {
548*4882a593Smuzhiyun 		mutex_unlock(&devices_mutex);
549*4882a593Smuzhiyun 		if (ret == -EFBIG) {
550*4882a593Smuzhiyun 			dev_warn(disk_to_dev(dev->gd),
551*4882a593Smuzhiyun 				 "the volume is too big (%d LEBs), cannot resize",
552*4882a593Smuzhiyun 				 vi->size);
553*4882a593Smuzhiyun 		}
554*4882a593Smuzhiyun 		return ret;
555*4882a593Smuzhiyun 	}
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	mutex_lock(&dev->dev_mutex);
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	if (get_capacity(dev->gd) != disk_capacity) {
560*4882a593Smuzhiyun 		set_capacity(dev->gd, disk_capacity);
561*4882a593Smuzhiyun 		dev_info(disk_to_dev(dev->gd), "resized to %lld bytes",
562*4882a593Smuzhiyun 			 vi->used_bytes);
563*4882a593Smuzhiyun 	}
564*4882a593Smuzhiyun 	mutex_unlock(&dev->dev_mutex);
565*4882a593Smuzhiyun 	mutex_unlock(&devices_mutex);
566*4882a593Smuzhiyun 	return 0;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun 
ubiblock_notify(struct notifier_block * nb,unsigned long notification_type,void * ns_ptr)569*4882a593Smuzhiyun static int ubiblock_notify(struct notifier_block *nb,
570*4882a593Smuzhiyun 			 unsigned long notification_type, void *ns_ptr)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun 	struct ubi_notification *nt = ns_ptr;
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	switch (notification_type) {
575*4882a593Smuzhiyun 	case UBI_VOLUME_ADDED:
576*4882a593Smuzhiyun 		/*
577*4882a593Smuzhiyun 		 * We want to enforce explicit block device creation for
578*4882a593Smuzhiyun 		 * volumes, so when a volume is added we do nothing.
579*4882a593Smuzhiyun 		 */
580*4882a593Smuzhiyun 		break;
581*4882a593Smuzhiyun 	case UBI_VOLUME_REMOVED:
582*4882a593Smuzhiyun 		ubiblock_remove(&nt->vi);
583*4882a593Smuzhiyun 		break;
584*4882a593Smuzhiyun 	case UBI_VOLUME_RESIZED:
585*4882a593Smuzhiyun 		ubiblock_resize(&nt->vi);
586*4882a593Smuzhiyun 		break;
587*4882a593Smuzhiyun 	case UBI_VOLUME_UPDATED:
588*4882a593Smuzhiyun 		/*
589*4882a593Smuzhiyun 		 * If the volume is static, a content update might mean the
590*4882a593Smuzhiyun 		 * size (i.e. used_bytes) was also changed.
591*4882a593Smuzhiyun 		 */
592*4882a593Smuzhiyun 		if (nt->vi.vol_type == UBI_STATIC_VOLUME)
593*4882a593Smuzhiyun 			ubiblock_resize(&nt->vi);
594*4882a593Smuzhiyun 		break;
595*4882a593Smuzhiyun 	default:
596*4882a593Smuzhiyun 		break;
597*4882a593Smuzhiyun 	}
598*4882a593Smuzhiyun 	return NOTIFY_OK;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun static struct notifier_block ubiblock_notifier = {
602*4882a593Smuzhiyun 	.notifier_call = ubiblock_notify,
603*4882a593Smuzhiyun };
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun static struct ubi_volume_desc * __init
open_volume_desc(const char * name,int ubi_num,int vol_id)606*4882a593Smuzhiyun open_volume_desc(const char *name, int ubi_num, int vol_id)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun 	if (ubi_num == -1)
609*4882a593Smuzhiyun 		/* No ubi num, name must be a vol device path */
610*4882a593Smuzhiyun 		return ubi_open_volume_path(name, UBI_READONLY);
611*4882a593Smuzhiyun 	else if (vol_id == -1)
612*4882a593Smuzhiyun 		/* No vol_id, must be vol_name */
613*4882a593Smuzhiyun 		return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
614*4882a593Smuzhiyun 	else
615*4882a593Smuzhiyun 		return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun 
ubiblock_create_from_param(void)618*4882a593Smuzhiyun static void __init ubiblock_create_from_param(void)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun 	int i, ret = 0;
621*4882a593Smuzhiyun 	struct ubiblock_param *p;
622*4882a593Smuzhiyun 	struct ubi_volume_desc *desc;
623*4882a593Smuzhiyun 	struct ubi_volume_info vi;
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	/*
626*4882a593Smuzhiyun 	 * If there is an error creating one of the ubiblocks, continue on to
627*4882a593Smuzhiyun 	 * create the following ubiblocks. This helps in a circumstance where
628*4882a593Smuzhiyun 	 * the kernel command-line specifies multiple block devices and some
629*4882a593Smuzhiyun 	 * may be broken, but we still want the working ones to come up.
630*4882a593Smuzhiyun 	 */
631*4882a593Smuzhiyun 	for (i = 0; i < ubiblock_devs; i++) {
632*4882a593Smuzhiyun 		p = &ubiblock_param[i];
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 		desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
635*4882a593Smuzhiyun 		if (IS_ERR(desc)) {
636*4882a593Smuzhiyun 			pr_err(
637*4882a593Smuzhiyun 			       "UBI: block: can't open volume on ubi%d_%d, err=%ld\n",
638*4882a593Smuzhiyun 			       p->ubi_num, p->vol_id, PTR_ERR(desc));
639*4882a593Smuzhiyun 			continue;
640*4882a593Smuzhiyun 		}
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 		ubi_get_volume_info(desc, &vi);
643*4882a593Smuzhiyun 		ubi_close_volume(desc);
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 		ret = ubiblock_create(&vi);
646*4882a593Smuzhiyun 		if (ret) {
647*4882a593Smuzhiyun 			pr_err(
648*4882a593Smuzhiyun 			       "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
649*4882a593Smuzhiyun 			       vi.name, p->ubi_num, p->vol_id, ret);
650*4882a593Smuzhiyun 			continue;
651*4882a593Smuzhiyun 		}
652*4882a593Smuzhiyun 	}
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun 
ubiblock_remove_all(void)655*4882a593Smuzhiyun static void ubiblock_remove_all(void)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun 	struct ubiblock *next;
658*4882a593Smuzhiyun 	struct ubiblock *dev;
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	mutex_lock(&devices_mutex);
661*4882a593Smuzhiyun 	list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
662*4882a593Smuzhiyun 		/* The module is being forcefully removed */
663*4882a593Smuzhiyun 		WARN_ON(dev->desc);
664*4882a593Smuzhiyun 		/* Remove from device list */
665*4882a593Smuzhiyun 		list_del(&dev->list);
666*4882a593Smuzhiyun 		ubiblock_cleanup(dev);
667*4882a593Smuzhiyun 		kfree(dev);
668*4882a593Smuzhiyun 	}
669*4882a593Smuzhiyun 	mutex_unlock(&devices_mutex);
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun 
ubiblock_init(void)672*4882a593Smuzhiyun int __init ubiblock_init(void)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun 	int ret;
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	ubiblock_major = register_blkdev(0, "ubiblock");
677*4882a593Smuzhiyun 	if (ubiblock_major < 0)
678*4882a593Smuzhiyun 		return ubiblock_major;
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	/*
681*4882a593Smuzhiyun 	 * Attach block devices from 'block=' module param.
682*4882a593Smuzhiyun 	 * Even if one block device in the param list fails to come up,
683*4882a593Smuzhiyun 	 * still allow the module to load and leave any others up.
684*4882a593Smuzhiyun 	 */
685*4882a593Smuzhiyun 	ubiblock_create_from_param();
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	/*
688*4882a593Smuzhiyun 	 * Block devices are only created upon user requests, so we ignore
689*4882a593Smuzhiyun 	 * existing volumes.
690*4882a593Smuzhiyun 	 */
691*4882a593Smuzhiyun 	ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
692*4882a593Smuzhiyun 	if (ret)
693*4882a593Smuzhiyun 		goto err_unreg;
694*4882a593Smuzhiyun 	return 0;
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun err_unreg:
697*4882a593Smuzhiyun 	unregister_blkdev(ubiblock_major, "ubiblock");
698*4882a593Smuzhiyun 	ubiblock_remove_all();
699*4882a593Smuzhiyun 	return ret;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun 
ubiblock_exit(void)702*4882a593Smuzhiyun void __exit ubiblock_exit(void)
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun 	ubi_unregister_volume_notifier(&ubiblock_notifier);
705*4882a593Smuzhiyun 	ubiblock_remove_all();
706*4882a593Smuzhiyun 	unregister_blkdev(ubiblock_major, "ubiblock");
707*4882a593Smuzhiyun }
708