xref: /OK3568_Linux_fs/kernel/drivers/mtd/ubi/build.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) International Business Machines Corp., 2006
4*4882a593Smuzhiyun  * Copyright (c) Nokia Corporation, 2007
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Author: Artem Bityutskiy (Битюцкий Артём),
7*4882a593Smuzhiyun  *         Frank Haverkamp
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun /*
11*4882a593Smuzhiyun  * This file includes UBI initialization and building of UBI devices.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * When UBI is initialized, it attaches all the MTD devices specified as the
14*4882a593Smuzhiyun  * module load parameters or the kernel boot parameters. If MTD devices were
15*4882a593Smuzhiyun  * specified, UBI does not attach any MTD device, but it is possible to do
16*4882a593Smuzhiyun  * later using the "UBI control device".
17*4882a593Smuzhiyun  */
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <linux/err.h>
20*4882a593Smuzhiyun #include <linux/module.h>
21*4882a593Smuzhiyun #include <linux/moduleparam.h>
22*4882a593Smuzhiyun #include <linux/stringify.h>
23*4882a593Smuzhiyun #include <linux/namei.h>
24*4882a593Smuzhiyun #include <linux/stat.h>
25*4882a593Smuzhiyun #include <linux/miscdevice.h>
26*4882a593Smuzhiyun #include <linux/mtd/partitions.h>
27*4882a593Smuzhiyun #include <linux/log2.h>
28*4882a593Smuzhiyun #include <linux/kthread.h>
29*4882a593Smuzhiyun #include <linux/kernel.h>
30*4882a593Smuzhiyun #include <linux/slab.h>
31*4882a593Smuzhiyun #include <linux/major.h>
32*4882a593Smuzhiyun #include "ubi.h"
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /* Maximum length of the 'mtd=' parameter */
35*4882a593Smuzhiyun #define MTD_PARAM_LEN_MAX 64
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun /* Maximum number of comma-separated items in the 'mtd=' parameter */
38*4882a593Smuzhiyun #define MTD_PARAM_MAX_COUNT 4
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /* Maximum value for the number of bad PEBs per 1024 PEBs */
41*4882a593Smuzhiyun #define MAX_MTD_UBI_BEB_LIMIT 768
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #ifdef CONFIG_MTD_UBI_MODULE
44*4882a593Smuzhiyun #define ubi_is_module() 1
45*4882a593Smuzhiyun #else
46*4882a593Smuzhiyun #define ubi_is_module() 0
47*4882a593Smuzhiyun #endif
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /**
50*4882a593Smuzhiyun  * struct mtd_dev_param - MTD device parameter description data structure.
51*4882a593Smuzhiyun  * @name: MTD character device node path, MTD device name, or MTD device number
52*4882a593Smuzhiyun  *        string
53*4882a593Smuzhiyun  * @vid_hdr_offs: VID header offset
54*4882a593Smuzhiyun  * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
55*4882a593Smuzhiyun  */
56*4882a593Smuzhiyun struct mtd_dev_param {
57*4882a593Smuzhiyun 	char name[MTD_PARAM_LEN_MAX];
58*4882a593Smuzhiyun 	int ubi_num;
59*4882a593Smuzhiyun 	int vid_hdr_offs;
60*4882a593Smuzhiyun 	int max_beb_per1024;
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /* Numbers of elements set in the @mtd_dev_param array */
64*4882a593Smuzhiyun static int mtd_devs;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /* MTD devices specification parameters */
67*4882a593Smuzhiyun static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES];
68*4882a593Smuzhiyun #ifdef CONFIG_MTD_UBI_FASTMAP
69*4882a593Smuzhiyun /* UBI module parameter to enable fastmap automatically on non-fastmap images */
70*4882a593Smuzhiyun static bool fm_autoconvert;
71*4882a593Smuzhiyun static bool fm_debug;
72*4882a593Smuzhiyun #endif
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun /* Slab cache for wear-leveling entries */
75*4882a593Smuzhiyun struct kmem_cache *ubi_wl_entry_slab;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun /* UBI control character device */
78*4882a593Smuzhiyun static struct miscdevice ubi_ctrl_cdev = {
79*4882a593Smuzhiyun 	.minor = MISC_DYNAMIC_MINOR,
80*4882a593Smuzhiyun 	.name = "ubi_ctrl",
81*4882a593Smuzhiyun 	.fops = &ubi_ctrl_cdev_operations,
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /* All UBI devices in system */
85*4882a593Smuzhiyun static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun /* Serializes UBI devices creations and removals */
88*4882a593Smuzhiyun DEFINE_MUTEX(ubi_devices_mutex);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /* Protects @ubi_devices and @ubi->ref_count */
91*4882a593Smuzhiyun static DEFINE_SPINLOCK(ubi_devices_lock);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /* "Show" method for files in '/<sysfs>/class/ubi/' */
94*4882a593Smuzhiyun /* UBI version attribute ('/<sysfs>/class/ubi/version') */
version_show(struct class * class,struct class_attribute * attr,char * buf)95*4882a593Smuzhiyun static ssize_t version_show(struct class *class, struct class_attribute *attr,
96*4882a593Smuzhiyun 			    char *buf)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	return sprintf(buf, "%d\n", UBI_VERSION);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun static CLASS_ATTR_RO(version);
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun static struct attribute *ubi_class_attrs[] = {
103*4882a593Smuzhiyun 	&class_attr_version.attr,
104*4882a593Smuzhiyun 	NULL,
105*4882a593Smuzhiyun };
106*4882a593Smuzhiyun ATTRIBUTE_GROUPS(ubi_class);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun /* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
109*4882a593Smuzhiyun struct class ubi_class = {
110*4882a593Smuzhiyun 	.name		= UBI_NAME_STR,
111*4882a593Smuzhiyun 	.owner		= THIS_MODULE,
112*4882a593Smuzhiyun 	.class_groups	= ubi_class_groups,
113*4882a593Smuzhiyun };
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun static ssize_t dev_attribute_show(struct device *dev,
116*4882a593Smuzhiyun 				  struct device_attribute *attr, char *buf);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun /* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */
119*4882a593Smuzhiyun static struct device_attribute dev_eraseblock_size =
120*4882a593Smuzhiyun 	__ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL);
121*4882a593Smuzhiyun static struct device_attribute dev_avail_eraseblocks =
122*4882a593Smuzhiyun 	__ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
123*4882a593Smuzhiyun static struct device_attribute dev_total_eraseblocks =
124*4882a593Smuzhiyun 	__ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
125*4882a593Smuzhiyun static struct device_attribute dev_volumes_count =
126*4882a593Smuzhiyun 	__ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
127*4882a593Smuzhiyun static struct device_attribute dev_max_ec =
128*4882a593Smuzhiyun 	__ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
129*4882a593Smuzhiyun static struct device_attribute dev_reserved_for_bad =
130*4882a593Smuzhiyun 	__ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
131*4882a593Smuzhiyun static struct device_attribute dev_bad_peb_count =
132*4882a593Smuzhiyun 	__ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL);
133*4882a593Smuzhiyun static struct device_attribute dev_max_vol_count =
134*4882a593Smuzhiyun 	__ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL);
135*4882a593Smuzhiyun static struct device_attribute dev_min_io_size =
136*4882a593Smuzhiyun 	__ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
137*4882a593Smuzhiyun static struct device_attribute dev_bgt_enabled =
138*4882a593Smuzhiyun 	__ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
139*4882a593Smuzhiyun static struct device_attribute dev_mtd_num =
140*4882a593Smuzhiyun 	__ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
141*4882a593Smuzhiyun static struct device_attribute dev_ro_mode =
142*4882a593Smuzhiyun 	__ATTR(ro_mode, S_IRUGO, dev_attribute_show, NULL);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun /**
145*4882a593Smuzhiyun  * ubi_volume_notify - send a volume change notification.
146*4882a593Smuzhiyun  * @ubi: UBI device description object
147*4882a593Smuzhiyun  * @vol: volume description object of the changed volume
148*4882a593Smuzhiyun  * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
149*4882a593Smuzhiyun  *
150*4882a593Smuzhiyun  * This is a helper function which notifies all subscribers about a volume
151*4882a593Smuzhiyun  * change event (creation, removal, re-sizing, re-naming, updating). Returns
152*4882a593Smuzhiyun  * zero in case of success and a negative error code in case of failure.
153*4882a593Smuzhiyun  */
ubi_volume_notify(struct ubi_device * ubi,struct ubi_volume * vol,int ntype)154*4882a593Smuzhiyun int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	int ret;
157*4882a593Smuzhiyun 	struct ubi_notification nt;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	ubi_do_get_device_info(ubi, &nt.di);
160*4882a593Smuzhiyun 	ubi_do_get_volume_info(ubi, vol, &nt.vi);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	switch (ntype) {
163*4882a593Smuzhiyun 	case UBI_VOLUME_ADDED:
164*4882a593Smuzhiyun 	case UBI_VOLUME_REMOVED:
165*4882a593Smuzhiyun 	case UBI_VOLUME_RESIZED:
166*4882a593Smuzhiyun 	case UBI_VOLUME_RENAMED:
167*4882a593Smuzhiyun 		ret = ubi_update_fastmap(ubi);
168*4882a593Smuzhiyun 		if (ret)
169*4882a593Smuzhiyun 			ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
170*4882a593Smuzhiyun 	}
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun /**
176*4882a593Smuzhiyun  * ubi_notify_all - send a notification to all volumes.
177*4882a593Smuzhiyun  * @ubi: UBI device description object
178*4882a593Smuzhiyun  * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
179*4882a593Smuzhiyun  * @nb: the notifier to call
180*4882a593Smuzhiyun  *
181*4882a593Smuzhiyun  * This function walks all volumes of UBI device @ubi and sends the @ntype
182*4882a593Smuzhiyun  * notification for each volume. If @nb is %NULL, then all registered notifiers
183*4882a593Smuzhiyun  * are called, otherwise only the @nb notifier is called. Returns the number of
184*4882a593Smuzhiyun  * sent notifications.
185*4882a593Smuzhiyun  */
ubi_notify_all(struct ubi_device * ubi,int ntype,struct notifier_block * nb)186*4882a593Smuzhiyun int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	struct ubi_notification nt;
189*4882a593Smuzhiyun 	int i, count = 0;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	ubi_do_get_device_info(ubi, &nt.di);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	mutex_lock(&ubi->device_mutex);
194*4882a593Smuzhiyun 	for (i = 0; i < ubi->vtbl_slots; i++) {
195*4882a593Smuzhiyun 		/*
196*4882a593Smuzhiyun 		 * Since the @ubi->device is locked, and we are not going to
197*4882a593Smuzhiyun 		 * change @ubi->volumes, we do not have to lock
198*4882a593Smuzhiyun 		 * @ubi->volumes_lock.
199*4882a593Smuzhiyun 		 */
200*4882a593Smuzhiyun 		if (!ubi->volumes[i])
201*4882a593Smuzhiyun 			continue;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 		ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi);
204*4882a593Smuzhiyun 		if (nb)
205*4882a593Smuzhiyun 			nb->notifier_call(nb, ntype, &nt);
206*4882a593Smuzhiyun 		else
207*4882a593Smuzhiyun 			blocking_notifier_call_chain(&ubi_notifiers, ntype,
208*4882a593Smuzhiyun 						     &nt);
209*4882a593Smuzhiyun 		count += 1;
210*4882a593Smuzhiyun 	}
211*4882a593Smuzhiyun 	mutex_unlock(&ubi->device_mutex);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	return count;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun /**
217*4882a593Smuzhiyun  * ubi_enumerate_volumes - send "add" notification for all existing volumes.
218*4882a593Smuzhiyun  * @nb: the notifier to call
219*4882a593Smuzhiyun  *
220*4882a593Smuzhiyun  * This function walks all UBI devices and volumes and sends the
221*4882a593Smuzhiyun  * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all
222*4882a593Smuzhiyun  * registered notifiers are called, otherwise only the @nb notifier is called.
223*4882a593Smuzhiyun  * Returns the number of sent notifications.
224*4882a593Smuzhiyun  */
ubi_enumerate_volumes(struct notifier_block * nb)225*4882a593Smuzhiyun int ubi_enumerate_volumes(struct notifier_block *nb)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	int i, count = 0;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	/*
230*4882a593Smuzhiyun 	 * Since the @ubi_devices_mutex is locked, and we are not going to
231*4882a593Smuzhiyun 	 * change @ubi_devices, we do not have to lock @ubi_devices_lock.
232*4882a593Smuzhiyun 	 */
233*4882a593Smuzhiyun 	for (i = 0; i < UBI_MAX_DEVICES; i++) {
234*4882a593Smuzhiyun 		struct ubi_device *ubi = ubi_devices[i];
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 		if (!ubi)
237*4882a593Smuzhiyun 			continue;
238*4882a593Smuzhiyun 		count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb);
239*4882a593Smuzhiyun 	}
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	return count;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun /**
245*4882a593Smuzhiyun  * ubi_get_device - get UBI device.
246*4882a593Smuzhiyun  * @ubi_num: UBI device number
247*4882a593Smuzhiyun  *
248*4882a593Smuzhiyun  * This function returns UBI device description object for UBI device number
249*4882a593Smuzhiyun  * @ubi_num, or %NULL if the device does not exist. This function increases the
250*4882a593Smuzhiyun  * device reference count to prevent removal of the device. In other words, the
251*4882a593Smuzhiyun  * device cannot be removed if its reference count is not zero.
252*4882a593Smuzhiyun  */
ubi_get_device(int ubi_num)253*4882a593Smuzhiyun struct ubi_device *ubi_get_device(int ubi_num)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	struct ubi_device *ubi;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	spin_lock(&ubi_devices_lock);
258*4882a593Smuzhiyun 	ubi = ubi_devices[ubi_num];
259*4882a593Smuzhiyun 	if (ubi) {
260*4882a593Smuzhiyun 		ubi_assert(ubi->ref_count >= 0);
261*4882a593Smuzhiyun 		ubi->ref_count += 1;
262*4882a593Smuzhiyun 		get_device(&ubi->dev);
263*4882a593Smuzhiyun 	}
264*4882a593Smuzhiyun 	spin_unlock(&ubi_devices_lock);
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	return ubi;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun /**
270*4882a593Smuzhiyun  * ubi_put_device - drop an UBI device reference.
271*4882a593Smuzhiyun  * @ubi: UBI device description object
272*4882a593Smuzhiyun  */
ubi_put_device(struct ubi_device * ubi)273*4882a593Smuzhiyun void ubi_put_device(struct ubi_device *ubi)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	spin_lock(&ubi_devices_lock);
276*4882a593Smuzhiyun 	ubi->ref_count -= 1;
277*4882a593Smuzhiyun 	put_device(&ubi->dev);
278*4882a593Smuzhiyun 	spin_unlock(&ubi_devices_lock);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun /**
282*4882a593Smuzhiyun  * ubi_get_by_major - get UBI device by character device major number.
283*4882a593Smuzhiyun  * @major: major number
284*4882a593Smuzhiyun  *
285*4882a593Smuzhiyun  * This function is similar to 'ubi_get_device()', but it searches the device
286*4882a593Smuzhiyun  * by its major number.
287*4882a593Smuzhiyun  */
ubi_get_by_major(int major)288*4882a593Smuzhiyun struct ubi_device *ubi_get_by_major(int major)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	int i;
291*4882a593Smuzhiyun 	struct ubi_device *ubi;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	spin_lock(&ubi_devices_lock);
294*4882a593Smuzhiyun 	for (i = 0; i < UBI_MAX_DEVICES; i++) {
295*4882a593Smuzhiyun 		ubi = ubi_devices[i];
296*4882a593Smuzhiyun 		if (ubi && MAJOR(ubi->cdev.dev) == major) {
297*4882a593Smuzhiyun 			ubi_assert(ubi->ref_count >= 0);
298*4882a593Smuzhiyun 			ubi->ref_count += 1;
299*4882a593Smuzhiyun 			get_device(&ubi->dev);
300*4882a593Smuzhiyun 			spin_unlock(&ubi_devices_lock);
301*4882a593Smuzhiyun 			return ubi;
302*4882a593Smuzhiyun 		}
303*4882a593Smuzhiyun 	}
304*4882a593Smuzhiyun 	spin_unlock(&ubi_devices_lock);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	return NULL;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun /**
310*4882a593Smuzhiyun  * ubi_major2num - get UBI device number by character device major number.
311*4882a593Smuzhiyun  * @major: major number
312*4882a593Smuzhiyun  *
313*4882a593Smuzhiyun  * This function searches UBI device number object by its major number. If UBI
314*4882a593Smuzhiyun  * device was not found, this function returns -ENODEV, otherwise the UBI device
315*4882a593Smuzhiyun  * number is returned.
316*4882a593Smuzhiyun  */
ubi_major2num(int major)317*4882a593Smuzhiyun int ubi_major2num(int major)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun 	int i, ubi_num = -ENODEV;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	spin_lock(&ubi_devices_lock);
322*4882a593Smuzhiyun 	for (i = 0; i < UBI_MAX_DEVICES; i++) {
323*4882a593Smuzhiyun 		struct ubi_device *ubi = ubi_devices[i];
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 		if (ubi && MAJOR(ubi->cdev.dev) == major) {
326*4882a593Smuzhiyun 			ubi_num = ubi->ubi_num;
327*4882a593Smuzhiyun 			break;
328*4882a593Smuzhiyun 		}
329*4882a593Smuzhiyun 	}
330*4882a593Smuzhiyun 	spin_unlock(&ubi_devices_lock);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	return ubi_num;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun /* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
dev_attribute_show(struct device * dev,struct device_attribute * attr,char * buf)336*4882a593Smuzhiyun static ssize_t dev_attribute_show(struct device *dev,
337*4882a593Smuzhiyun 				  struct device_attribute *attr, char *buf)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun 	ssize_t ret;
340*4882a593Smuzhiyun 	struct ubi_device *ubi;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	/*
343*4882a593Smuzhiyun 	 * The below code looks weird, but it actually makes sense. We get the
344*4882a593Smuzhiyun 	 * UBI device reference from the contained 'struct ubi_device'. But it
345*4882a593Smuzhiyun 	 * is unclear if the device was removed or not yet. Indeed, if the
346*4882a593Smuzhiyun 	 * device was removed before we increased its reference count,
347*4882a593Smuzhiyun 	 * 'ubi_get_device()' will return -ENODEV and we fail.
348*4882a593Smuzhiyun 	 *
349*4882a593Smuzhiyun 	 * Remember, 'struct ubi_device' is freed in the release function, so
350*4882a593Smuzhiyun 	 * we still can use 'ubi->ubi_num'.
351*4882a593Smuzhiyun 	 */
352*4882a593Smuzhiyun 	ubi = container_of(dev, struct ubi_device, dev);
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	if (attr == &dev_eraseblock_size)
355*4882a593Smuzhiyun 		ret = sprintf(buf, "%d\n", ubi->leb_size);
356*4882a593Smuzhiyun 	else if (attr == &dev_avail_eraseblocks)
357*4882a593Smuzhiyun 		ret = sprintf(buf, "%d\n", ubi->avail_pebs);
358*4882a593Smuzhiyun 	else if (attr == &dev_total_eraseblocks)
359*4882a593Smuzhiyun 		ret = sprintf(buf, "%d\n", ubi->good_peb_count);
360*4882a593Smuzhiyun 	else if (attr == &dev_volumes_count)
361*4882a593Smuzhiyun 		ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
362*4882a593Smuzhiyun 	else if (attr == &dev_max_ec)
363*4882a593Smuzhiyun 		ret = sprintf(buf, "%d\n", ubi->max_ec);
364*4882a593Smuzhiyun 	else if (attr == &dev_reserved_for_bad)
365*4882a593Smuzhiyun 		ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
366*4882a593Smuzhiyun 	else if (attr == &dev_bad_peb_count)
367*4882a593Smuzhiyun 		ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
368*4882a593Smuzhiyun 	else if (attr == &dev_max_vol_count)
369*4882a593Smuzhiyun 		ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
370*4882a593Smuzhiyun 	else if (attr == &dev_min_io_size)
371*4882a593Smuzhiyun 		ret = sprintf(buf, "%d\n", ubi->min_io_size);
372*4882a593Smuzhiyun 	else if (attr == &dev_bgt_enabled)
373*4882a593Smuzhiyun 		ret = sprintf(buf, "%d\n", ubi->thread_enabled);
374*4882a593Smuzhiyun 	else if (attr == &dev_mtd_num)
375*4882a593Smuzhiyun 		ret = sprintf(buf, "%d\n", ubi->mtd->index);
376*4882a593Smuzhiyun 	else if (attr == &dev_ro_mode)
377*4882a593Smuzhiyun 		ret = sprintf(buf, "%d\n", ubi->ro_mode);
378*4882a593Smuzhiyun 	else
379*4882a593Smuzhiyun 		ret = -EINVAL;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	return ret;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun static struct attribute *ubi_dev_attrs[] = {
385*4882a593Smuzhiyun 	&dev_eraseblock_size.attr,
386*4882a593Smuzhiyun 	&dev_avail_eraseblocks.attr,
387*4882a593Smuzhiyun 	&dev_total_eraseblocks.attr,
388*4882a593Smuzhiyun 	&dev_volumes_count.attr,
389*4882a593Smuzhiyun 	&dev_max_ec.attr,
390*4882a593Smuzhiyun 	&dev_reserved_for_bad.attr,
391*4882a593Smuzhiyun 	&dev_bad_peb_count.attr,
392*4882a593Smuzhiyun 	&dev_max_vol_count.attr,
393*4882a593Smuzhiyun 	&dev_min_io_size.attr,
394*4882a593Smuzhiyun 	&dev_bgt_enabled.attr,
395*4882a593Smuzhiyun 	&dev_mtd_num.attr,
396*4882a593Smuzhiyun 	&dev_ro_mode.attr,
397*4882a593Smuzhiyun 	NULL
398*4882a593Smuzhiyun };
399*4882a593Smuzhiyun ATTRIBUTE_GROUPS(ubi_dev);
400*4882a593Smuzhiyun 
dev_release(struct device * dev)401*4882a593Smuzhiyun static void dev_release(struct device *dev)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun 	struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	kfree(ubi);
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun /**
409*4882a593Smuzhiyun  * kill_volumes - destroy all user volumes.
410*4882a593Smuzhiyun  * @ubi: UBI device description object
411*4882a593Smuzhiyun  */
kill_volumes(struct ubi_device * ubi)412*4882a593Smuzhiyun static void kill_volumes(struct ubi_device *ubi)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun 	int i;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	for (i = 0; i < ubi->vtbl_slots; i++)
417*4882a593Smuzhiyun 		if (ubi->volumes[i])
418*4882a593Smuzhiyun 			ubi_free_volume(ubi, ubi->volumes[i]);
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun /**
422*4882a593Smuzhiyun  * uif_init - initialize user interfaces for an UBI device.
423*4882a593Smuzhiyun  * @ubi: UBI device description object
424*4882a593Smuzhiyun  *
425*4882a593Smuzhiyun  * This function initializes various user interfaces for an UBI device. If the
426*4882a593Smuzhiyun  * initialization fails at an early stage, this function frees all the
427*4882a593Smuzhiyun  * resources it allocated, returns an error.
428*4882a593Smuzhiyun  *
429*4882a593Smuzhiyun  * This function returns zero in case of success and a negative error code in
430*4882a593Smuzhiyun  * case of failure.
431*4882a593Smuzhiyun  */
uif_init(struct ubi_device * ubi)432*4882a593Smuzhiyun static int uif_init(struct ubi_device *ubi)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun 	int i, err;
435*4882a593Smuzhiyun 	dev_t dev;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	/*
440*4882a593Smuzhiyun 	 * Major numbers for the UBI character devices are allocated
441*4882a593Smuzhiyun 	 * dynamically. Major numbers of volume character devices are
442*4882a593Smuzhiyun 	 * equivalent to ones of the corresponding UBI character device. Minor
443*4882a593Smuzhiyun 	 * numbers of UBI character devices are 0, while minor numbers of
444*4882a593Smuzhiyun 	 * volume character devices start from 1. Thus, we allocate one major
445*4882a593Smuzhiyun 	 * number and ubi->vtbl_slots + 1 minor numbers.
446*4882a593Smuzhiyun 	 */
447*4882a593Smuzhiyun 	err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
448*4882a593Smuzhiyun 	if (err) {
449*4882a593Smuzhiyun 		ubi_err(ubi, "cannot register UBI character devices");
450*4882a593Smuzhiyun 		return err;
451*4882a593Smuzhiyun 	}
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	ubi->dev.devt = dev;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	ubi_assert(MINOR(dev) == 0);
456*4882a593Smuzhiyun 	cdev_init(&ubi->cdev, &ubi_cdev_operations);
457*4882a593Smuzhiyun 	dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev));
458*4882a593Smuzhiyun 	ubi->cdev.owner = THIS_MODULE;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	dev_set_name(&ubi->dev, UBI_NAME_STR "%d", ubi->ubi_num);
461*4882a593Smuzhiyun 	err = cdev_device_add(&ubi->cdev, &ubi->dev);
462*4882a593Smuzhiyun 	if (err)
463*4882a593Smuzhiyun 		goto out_unreg;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	for (i = 0; i < ubi->vtbl_slots; i++)
466*4882a593Smuzhiyun 		if (ubi->volumes[i]) {
467*4882a593Smuzhiyun 			err = ubi_add_volume(ubi, ubi->volumes[i]);
468*4882a593Smuzhiyun 			if (err) {
469*4882a593Smuzhiyun 				ubi_err(ubi, "cannot add volume %d", i);
470*4882a593Smuzhiyun 				goto out_volumes;
471*4882a593Smuzhiyun 			}
472*4882a593Smuzhiyun 		}
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	return 0;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun out_volumes:
477*4882a593Smuzhiyun 	kill_volumes(ubi);
478*4882a593Smuzhiyun 	cdev_device_del(&ubi->cdev, &ubi->dev);
479*4882a593Smuzhiyun out_unreg:
480*4882a593Smuzhiyun 	unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
481*4882a593Smuzhiyun 	ubi_err(ubi, "cannot initialize UBI %s, error %d",
482*4882a593Smuzhiyun 		ubi->ubi_name, err);
483*4882a593Smuzhiyun 	return err;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun /**
487*4882a593Smuzhiyun  * uif_close - close user interfaces for an UBI device.
488*4882a593Smuzhiyun  * @ubi: UBI device description object
489*4882a593Smuzhiyun  *
490*4882a593Smuzhiyun  * Note, since this function un-registers UBI volume device objects (@vol->dev),
491*4882a593Smuzhiyun  * the memory allocated voe the volumes is freed as well (in the release
492*4882a593Smuzhiyun  * function).
493*4882a593Smuzhiyun  */
uif_close(struct ubi_device * ubi)494*4882a593Smuzhiyun static void uif_close(struct ubi_device *ubi)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun 	kill_volumes(ubi);
497*4882a593Smuzhiyun 	cdev_device_del(&ubi->cdev, &ubi->dev);
498*4882a593Smuzhiyun 	unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun /**
502*4882a593Smuzhiyun  * ubi_free_volumes_from - free volumes from specific index.
503*4882a593Smuzhiyun  * @ubi: UBI device description object
504*4882a593Smuzhiyun  * @from: the start index used for volume free.
505*4882a593Smuzhiyun  */
ubi_free_volumes_from(struct ubi_device * ubi,int from)506*4882a593Smuzhiyun static void ubi_free_volumes_from(struct ubi_device *ubi, int from)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun 	int i;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	for (i = from; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
511*4882a593Smuzhiyun 		if (!ubi->volumes[i])
512*4882a593Smuzhiyun 			continue;
513*4882a593Smuzhiyun 		ubi_eba_replace_table(ubi->volumes[i], NULL);
514*4882a593Smuzhiyun 		ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
515*4882a593Smuzhiyun 		kfree(ubi->volumes[i]);
516*4882a593Smuzhiyun 		ubi->volumes[i] = NULL;
517*4882a593Smuzhiyun 	}
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun /**
521*4882a593Smuzhiyun  * ubi_free_all_volumes - free all volumes.
522*4882a593Smuzhiyun  * @ubi: UBI device description object
523*4882a593Smuzhiyun  */
ubi_free_all_volumes(struct ubi_device * ubi)524*4882a593Smuzhiyun void ubi_free_all_volumes(struct ubi_device *ubi)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	ubi_free_volumes_from(ubi, 0);
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun /**
530*4882a593Smuzhiyun  * ubi_free_internal_volumes - free internal volumes.
531*4882a593Smuzhiyun  * @ubi: UBI device description object
532*4882a593Smuzhiyun  */
ubi_free_internal_volumes(struct ubi_device * ubi)533*4882a593Smuzhiyun void ubi_free_internal_volumes(struct ubi_device *ubi)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun 	ubi_free_volumes_from(ubi, ubi->vtbl_slots);
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun 
get_bad_peb_limit(const struct ubi_device * ubi,int max_beb_per1024)538*4882a593Smuzhiyun static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun 	int limit, device_pebs;
541*4882a593Smuzhiyun 	uint64_t device_size;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	if (!max_beb_per1024) {
544*4882a593Smuzhiyun 		/*
545*4882a593Smuzhiyun 		 * Since max_beb_per1024 has not been set by the user in either
546*4882a593Smuzhiyun 		 * the cmdline or Kconfig, use mtd_max_bad_blocks to set the
547*4882a593Smuzhiyun 		 * limit if it is supported by the device.
548*4882a593Smuzhiyun 		 */
549*4882a593Smuzhiyun 		limit = mtd_max_bad_blocks(ubi->mtd, 0, ubi->mtd->size);
550*4882a593Smuzhiyun 		if (limit < 0)
551*4882a593Smuzhiyun 			return 0;
552*4882a593Smuzhiyun 		return limit;
553*4882a593Smuzhiyun 	}
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	/*
556*4882a593Smuzhiyun 	 * Here we are using size of the entire flash chip and
557*4882a593Smuzhiyun 	 * not just the MTD partition size because the maximum
558*4882a593Smuzhiyun 	 * number of bad eraseblocks is a percentage of the
559*4882a593Smuzhiyun 	 * whole device and bad eraseblocks are not fairly
560*4882a593Smuzhiyun 	 * distributed over the flash chip. So the worst case
561*4882a593Smuzhiyun 	 * is that all the bad eraseblocks of the chip are in
562*4882a593Smuzhiyun 	 * the MTD partition we are attaching (ubi->mtd).
563*4882a593Smuzhiyun 	 */
564*4882a593Smuzhiyun 	device_size = mtd_get_device_size(ubi->mtd);
565*4882a593Smuzhiyun 	device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
566*4882a593Smuzhiyun 	limit = mult_frac(device_pebs, max_beb_per1024, 1024);
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	/* Round it up */
569*4882a593Smuzhiyun 	if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
570*4882a593Smuzhiyun 		limit += 1;
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	return limit;
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun /**
576*4882a593Smuzhiyun  * io_init - initialize I/O sub-system for a given UBI device.
577*4882a593Smuzhiyun  * @ubi: UBI device description object
578*4882a593Smuzhiyun  * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
579*4882a593Smuzhiyun  *
580*4882a593Smuzhiyun  * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
581*4882a593Smuzhiyun  * assumed:
582*4882a593Smuzhiyun  *   o EC header is always at offset zero - this cannot be changed;
583*4882a593Smuzhiyun  *   o VID header starts just after the EC header at the closest address
584*4882a593Smuzhiyun  *     aligned to @io->hdrs_min_io_size;
585*4882a593Smuzhiyun  *   o data starts just after the VID header at the closest address aligned to
586*4882a593Smuzhiyun  *     @io->min_io_size
587*4882a593Smuzhiyun  *
588*4882a593Smuzhiyun  * This function returns zero in case of success and a negative error code in
589*4882a593Smuzhiyun  * case of failure.
590*4882a593Smuzhiyun  */
io_init(struct ubi_device * ubi,int max_beb_per1024)591*4882a593Smuzhiyun static int io_init(struct ubi_device *ubi, int max_beb_per1024)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun 	dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
594*4882a593Smuzhiyun 	dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	if (ubi->mtd->numeraseregions != 0) {
597*4882a593Smuzhiyun 		/*
598*4882a593Smuzhiyun 		 * Some flashes have several erase regions. Different regions
599*4882a593Smuzhiyun 		 * may have different eraseblock size and other
600*4882a593Smuzhiyun 		 * characteristics. It looks like mostly multi-region flashes
601*4882a593Smuzhiyun 		 * have one "main" region and one or more small regions to
602*4882a593Smuzhiyun 		 * store boot loader code or boot parameters or whatever. I
603*4882a593Smuzhiyun 		 * guess we should just pick the largest region. But this is
604*4882a593Smuzhiyun 		 * not implemented.
605*4882a593Smuzhiyun 		 */
606*4882a593Smuzhiyun 		ubi_err(ubi, "multiple regions, not implemented");
607*4882a593Smuzhiyun 		return -EINVAL;
608*4882a593Smuzhiyun 	}
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	if (ubi->vid_hdr_offset < 0)
611*4882a593Smuzhiyun 		return -EINVAL;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	/*
614*4882a593Smuzhiyun 	 * Note, in this implementation we support MTD devices with 0x7FFFFFFF
615*4882a593Smuzhiyun 	 * physical eraseblocks maximum.
616*4882a593Smuzhiyun 	 */
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	ubi->peb_size   = ubi->mtd->erasesize;
619*4882a593Smuzhiyun 	ubi->peb_count  = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
620*4882a593Smuzhiyun 	ubi->flash_size = ubi->mtd->size;
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	if (mtd_can_have_bb(ubi->mtd)) {
623*4882a593Smuzhiyun 		ubi->bad_allowed = 1;
624*4882a593Smuzhiyun 		ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
625*4882a593Smuzhiyun 	}
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	if (ubi->mtd->type == MTD_NORFLASH) {
628*4882a593Smuzhiyun 		ubi_assert(ubi->mtd->writesize == 1);
629*4882a593Smuzhiyun 		ubi->nor_flash = 1;
630*4882a593Smuzhiyun 	}
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	ubi->min_io_size = ubi->mtd->writesize;
633*4882a593Smuzhiyun 	ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	/*
636*4882a593Smuzhiyun 	 * Make sure minimal I/O unit is power of 2. Note, there is no
637*4882a593Smuzhiyun 	 * fundamental reason for this assumption. It is just an optimization
638*4882a593Smuzhiyun 	 * which allows us to avoid costly division operations.
639*4882a593Smuzhiyun 	 */
640*4882a593Smuzhiyun 	if (!is_power_of_2(ubi->min_io_size)) {
641*4882a593Smuzhiyun 		ubi_err(ubi, "min. I/O unit (%d) is not power of 2",
642*4882a593Smuzhiyun 			ubi->min_io_size);
643*4882a593Smuzhiyun 		return -EINVAL;
644*4882a593Smuzhiyun 	}
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	ubi_assert(ubi->hdrs_min_io_size > 0);
647*4882a593Smuzhiyun 	ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
648*4882a593Smuzhiyun 	ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	ubi->max_write_size = ubi->mtd->writebufsize;
651*4882a593Smuzhiyun 	/*
652*4882a593Smuzhiyun 	 * Maximum write size has to be greater or equivalent to min. I/O
653*4882a593Smuzhiyun 	 * size, and be multiple of min. I/O size.
654*4882a593Smuzhiyun 	 */
655*4882a593Smuzhiyun 	if (ubi->max_write_size < ubi->min_io_size ||
656*4882a593Smuzhiyun 	    ubi->max_write_size % ubi->min_io_size ||
657*4882a593Smuzhiyun 	    !is_power_of_2(ubi->max_write_size)) {
658*4882a593Smuzhiyun 		ubi_err(ubi, "bad write buffer size %d for %d min. I/O unit",
659*4882a593Smuzhiyun 			ubi->max_write_size, ubi->min_io_size);
660*4882a593Smuzhiyun 		return -EINVAL;
661*4882a593Smuzhiyun 	}
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 	/* Calculate default aligned sizes of EC and VID headers */
664*4882a593Smuzhiyun 	ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
665*4882a593Smuzhiyun 	ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	dbg_gen("min_io_size      %d", ubi->min_io_size);
668*4882a593Smuzhiyun 	dbg_gen("max_write_size   %d", ubi->max_write_size);
669*4882a593Smuzhiyun 	dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
670*4882a593Smuzhiyun 	dbg_gen("ec_hdr_alsize    %d", ubi->ec_hdr_alsize);
671*4882a593Smuzhiyun 	dbg_gen("vid_hdr_alsize   %d", ubi->vid_hdr_alsize);
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	if (ubi->vid_hdr_offset == 0)
674*4882a593Smuzhiyun 		/* Default offset */
675*4882a593Smuzhiyun 		ubi->vid_hdr_offset = ubi->vid_hdr_aloffset =
676*4882a593Smuzhiyun 				      ubi->ec_hdr_alsize;
677*4882a593Smuzhiyun 	else {
678*4882a593Smuzhiyun 		ubi->vid_hdr_aloffset = ubi->vid_hdr_offset &
679*4882a593Smuzhiyun 						~(ubi->hdrs_min_io_size - 1);
680*4882a593Smuzhiyun 		ubi->vid_hdr_shift = ubi->vid_hdr_offset -
681*4882a593Smuzhiyun 						ubi->vid_hdr_aloffset;
682*4882a593Smuzhiyun 	}
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	/* Similar for the data offset */
685*4882a593Smuzhiyun 	ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
686*4882a593Smuzhiyun 	ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	dbg_gen("vid_hdr_offset   %d", ubi->vid_hdr_offset);
689*4882a593Smuzhiyun 	dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
690*4882a593Smuzhiyun 	dbg_gen("vid_hdr_shift    %d", ubi->vid_hdr_shift);
691*4882a593Smuzhiyun 	dbg_gen("leb_start        %d", ubi->leb_start);
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	/* The shift must be aligned to 32-bit boundary */
694*4882a593Smuzhiyun 	if (ubi->vid_hdr_shift % 4) {
695*4882a593Smuzhiyun 		ubi_err(ubi, "unaligned VID header shift %d",
696*4882a593Smuzhiyun 			ubi->vid_hdr_shift);
697*4882a593Smuzhiyun 		return -EINVAL;
698*4882a593Smuzhiyun 	}
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	/* Check sanity */
701*4882a593Smuzhiyun 	if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
702*4882a593Smuzhiyun 	    ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
703*4882a593Smuzhiyun 	    ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
704*4882a593Smuzhiyun 	    ubi->leb_start & (ubi->min_io_size - 1)) {
705*4882a593Smuzhiyun 		ubi_err(ubi, "bad VID header (%d) or data offsets (%d)",
706*4882a593Smuzhiyun 			ubi->vid_hdr_offset, ubi->leb_start);
707*4882a593Smuzhiyun 		return -EINVAL;
708*4882a593Smuzhiyun 	}
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	/*
711*4882a593Smuzhiyun 	 * Set maximum amount of physical erroneous eraseblocks to be 10%.
712*4882a593Smuzhiyun 	 * Erroneous PEB are those which have read errors.
713*4882a593Smuzhiyun 	 */
714*4882a593Smuzhiyun 	ubi->max_erroneous = ubi->peb_count / 10;
715*4882a593Smuzhiyun 	if (ubi->max_erroneous < 16)
716*4882a593Smuzhiyun 		ubi->max_erroneous = 16;
717*4882a593Smuzhiyun 	dbg_gen("max_erroneous    %d", ubi->max_erroneous);
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	/*
720*4882a593Smuzhiyun 	 * It may happen that EC and VID headers are situated in one minimal
721*4882a593Smuzhiyun 	 * I/O unit. In this case we can only accept this UBI image in
722*4882a593Smuzhiyun 	 * read-only mode.
723*4882a593Smuzhiyun 	 */
724*4882a593Smuzhiyun 	if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
725*4882a593Smuzhiyun 		ubi_warn(ubi, "EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
726*4882a593Smuzhiyun 		ubi->ro_mode = 1;
727*4882a593Smuzhiyun 	}
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	ubi->leb_size = ubi->peb_size - ubi->leb_start;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
732*4882a593Smuzhiyun 		ubi_msg(ubi, "MTD device %d is write-protected, attach in read-only mode",
733*4882a593Smuzhiyun 			ubi->mtd->index);
734*4882a593Smuzhiyun 		ubi->ro_mode = 1;
735*4882a593Smuzhiyun 	}
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	/*
738*4882a593Smuzhiyun 	 * Note, ideally, we have to initialize @ubi->bad_peb_count here. But
739*4882a593Smuzhiyun 	 * unfortunately, MTD does not provide this information. We should loop
740*4882a593Smuzhiyun 	 * over all physical eraseblocks and invoke mtd->block_is_bad() for
741*4882a593Smuzhiyun 	 * each physical eraseblock. So, we leave @ubi->bad_peb_count
742*4882a593Smuzhiyun 	 * uninitialized so far.
743*4882a593Smuzhiyun 	 */
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	return 0;
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun /**
749*4882a593Smuzhiyun  * autoresize - re-size the volume which has the "auto-resize" flag set.
750*4882a593Smuzhiyun  * @ubi: UBI device description object
751*4882a593Smuzhiyun  * @vol_id: ID of the volume to re-size
752*4882a593Smuzhiyun  *
753*4882a593Smuzhiyun  * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in
754*4882a593Smuzhiyun  * the volume table to the largest possible size. See comments in ubi-header.h
755*4882a593Smuzhiyun  * for more description of the flag. Returns zero in case of success and a
756*4882a593Smuzhiyun  * negative error code in case of failure.
757*4882a593Smuzhiyun  */
autoresize(struct ubi_device * ubi,int vol_id)758*4882a593Smuzhiyun static int autoresize(struct ubi_device *ubi, int vol_id)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun 	struct ubi_volume_desc desc;
761*4882a593Smuzhiyun 	struct ubi_volume *vol = ubi->volumes[vol_id];
762*4882a593Smuzhiyun 	int err, old_reserved_pebs = vol->reserved_pebs;
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	if (ubi->ro_mode) {
765*4882a593Smuzhiyun 		ubi_warn(ubi, "skip auto-resize because of R/O mode");
766*4882a593Smuzhiyun 		return 0;
767*4882a593Smuzhiyun 	}
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	/*
770*4882a593Smuzhiyun 	 * Clear the auto-resize flag in the volume in-memory copy of the
771*4882a593Smuzhiyun 	 * volume table, and 'ubi_resize_volume()' will propagate this change
772*4882a593Smuzhiyun 	 * to the flash.
773*4882a593Smuzhiyun 	 */
774*4882a593Smuzhiyun 	ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	if (ubi->avail_pebs == 0) {
777*4882a593Smuzhiyun 		struct ubi_vtbl_record vtbl_rec;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 		/*
780*4882a593Smuzhiyun 		 * No available PEBs to re-size the volume, clear the flag on
781*4882a593Smuzhiyun 		 * flash and exit.
782*4882a593Smuzhiyun 		 */
783*4882a593Smuzhiyun 		vtbl_rec = ubi->vtbl[vol_id];
784*4882a593Smuzhiyun 		err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
785*4882a593Smuzhiyun 		if (err)
786*4882a593Smuzhiyun 			ubi_err(ubi, "cannot clean auto-resize flag for volume %d",
787*4882a593Smuzhiyun 				vol_id);
788*4882a593Smuzhiyun 	} else {
789*4882a593Smuzhiyun 		desc.vol = vol;
790*4882a593Smuzhiyun 		err = ubi_resize_volume(&desc,
791*4882a593Smuzhiyun 					old_reserved_pebs + ubi->avail_pebs);
792*4882a593Smuzhiyun 		if (err)
793*4882a593Smuzhiyun 			ubi_err(ubi, "cannot auto-resize volume %d",
794*4882a593Smuzhiyun 				vol_id);
795*4882a593Smuzhiyun 	}
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	if (err)
798*4882a593Smuzhiyun 		return err;
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 	ubi_msg(ubi, "volume %d (\"%s\") re-sized from %d to %d LEBs",
801*4882a593Smuzhiyun 		vol_id, vol->name, old_reserved_pebs, vol->reserved_pebs);
802*4882a593Smuzhiyun 	return 0;
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun /**
806*4882a593Smuzhiyun  * ubi_attach_mtd_dev - attach an MTD device.
807*4882a593Smuzhiyun  * @mtd: MTD device description object
808*4882a593Smuzhiyun  * @ubi_num: number to assign to the new UBI device
809*4882a593Smuzhiyun  * @vid_hdr_offset: VID header offset
810*4882a593Smuzhiyun  * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
811*4882a593Smuzhiyun  *
812*4882a593Smuzhiyun  * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
813*4882a593Smuzhiyun  * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
814*4882a593Smuzhiyun  * which case this function finds a vacant device number and assigns it
815*4882a593Smuzhiyun  * automatically. Returns the new UBI device number in case of success and a
816*4882a593Smuzhiyun  * negative error code in case of failure.
817*4882a593Smuzhiyun  *
818*4882a593Smuzhiyun  * Note, the invocations of this function has to be serialized by the
819*4882a593Smuzhiyun  * @ubi_devices_mutex.
820*4882a593Smuzhiyun  */
ubi_attach_mtd_dev(struct mtd_info * mtd,int ubi_num,int vid_hdr_offset,int max_beb_per1024)821*4882a593Smuzhiyun int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
822*4882a593Smuzhiyun 		       int vid_hdr_offset, int max_beb_per1024)
823*4882a593Smuzhiyun {
824*4882a593Smuzhiyun 	struct ubi_device *ubi;
825*4882a593Smuzhiyun 	int i, err;
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
828*4882a593Smuzhiyun 		return -EINVAL;
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	if (!max_beb_per1024)
831*4882a593Smuzhiyun 		max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	/*
834*4882a593Smuzhiyun 	 * Check if we already have the same MTD device attached.
835*4882a593Smuzhiyun 	 *
836*4882a593Smuzhiyun 	 * Note, this function assumes that UBI devices creations and deletions
837*4882a593Smuzhiyun 	 * are serialized, so it does not take the &ubi_devices_lock.
838*4882a593Smuzhiyun 	 */
839*4882a593Smuzhiyun 	for (i = 0; i < UBI_MAX_DEVICES; i++) {
840*4882a593Smuzhiyun 		ubi = ubi_devices[i];
841*4882a593Smuzhiyun 		if (ubi && mtd->index == ubi->mtd->index) {
842*4882a593Smuzhiyun 			pr_err("ubi: mtd%d is already attached to ubi%d\n",
843*4882a593Smuzhiyun 				mtd->index, i);
844*4882a593Smuzhiyun 			return -EEXIST;
845*4882a593Smuzhiyun 		}
846*4882a593Smuzhiyun 	}
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	/*
849*4882a593Smuzhiyun 	 * Make sure this MTD device is not emulated on top of an UBI volume
850*4882a593Smuzhiyun 	 * already. Well, generally this recursion works fine, but there are
851*4882a593Smuzhiyun 	 * different problems like the UBI module takes a reference to itself
852*4882a593Smuzhiyun 	 * by attaching (and thus, opening) the emulated MTD device. This
853*4882a593Smuzhiyun 	 * results in inability to unload the module. And in general it makes
854*4882a593Smuzhiyun 	 * no sense to attach emulated MTD devices, so we prohibit this.
855*4882a593Smuzhiyun 	 */
856*4882a593Smuzhiyun 	if (mtd->type == MTD_UBIVOLUME) {
857*4882a593Smuzhiyun 		pr_err("ubi: refuse attaching mtd%d - it is already emulated on top of UBI\n",
858*4882a593Smuzhiyun 			mtd->index);
859*4882a593Smuzhiyun 		return -EINVAL;
860*4882a593Smuzhiyun 	}
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 	/*
863*4882a593Smuzhiyun 	 * Both UBI and UBIFS have been designed for SLC NAND and NOR flashes.
864*4882a593Smuzhiyun 	 * MLC NAND is different and needs special care, otherwise UBI or UBIFS
865*4882a593Smuzhiyun 	 * will die soon and you will lose all your data.
866*4882a593Smuzhiyun 	 * Relax this rule if the partition we're attaching to operates in SLC
867*4882a593Smuzhiyun 	 * mode.
868*4882a593Smuzhiyun 	 */
869*4882a593Smuzhiyun 	if (mtd->type == MTD_MLCNANDFLASH &&
870*4882a593Smuzhiyun 	    !(mtd->flags & MTD_SLC_ON_MLC_EMULATION)) {
871*4882a593Smuzhiyun 		pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n",
872*4882a593Smuzhiyun 			mtd->index);
873*4882a593Smuzhiyun 		return -EINVAL;
874*4882a593Smuzhiyun 	}
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	if (ubi_num == UBI_DEV_NUM_AUTO) {
877*4882a593Smuzhiyun 		/* Search for an empty slot in the @ubi_devices array */
878*4882a593Smuzhiyun 		for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
879*4882a593Smuzhiyun 			if (!ubi_devices[ubi_num])
880*4882a593Smuzhiyun 				break;
881*4882a593Smuzhiyun 		if (ubi_num == UBI_MAX_DEVICES) {
882*4882a593Smuzhiyun 			pr_err("ubi: only %d UBI devices may be created\n",
883*4882a593Smuzhiyun 				UBI_MAX_DEVICES);
884*4882a593Smuzhiyun 			return -ENFILE;
885*4882a593Smuzhiyun 		}
886*4882a593Smuzhiyun 	} else {
887*4882a593Smuzhiyun 		if (ubi_num >= UBI_MAX_DEVICES)
888*4882a593Smuzhiyun 			return -EINVAL;
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun 		/* Make sure ubi_num is not busy */
891*4882a593Smuzhiyun 		if (ubi_devices[ubi_num]) {
892*4882a593Smuzhiyun 			pr_err("ubi: ubi%i already exists\n", ubi_num);
893*4882a593Smuzhiyun 			return -EEXIST;
894*4882a593Smuzhiyun 		}
895*4882a593Smuzhiyun 	}
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
898*4882a593Smuzhiyun 	if (!ubi)
899*4882a593Smuzhiyun 		return -ENOMEM;
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	device_initialize(&ubi->dev);
902*4882a593Smuzhiyun 	ubi->dev.release = dev_release;
903*4882a593Smuzhiyun 	ubi->dev.class = &ubi_class;
904*4882a593Smuzhiyun 	ubi->dev.groups = ubi_dev_groups;
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	ubi->mtd = mtd;
907*4882a593Smuzhiyun 	ubi->ubi_num = ubi_num;
908*4882a593Smuzhiyun 	ubi->vid_hdr_offset = vid_hdr_offset;
909*4882a593Smuzhiyun 	ubi->autoresize_vol_id = -1;
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun #ifdef CONFIG_MTD_UBI_FASTMAP
912*4882a593Smuzhiyun 	ubi->fm_pool.used = ubi->fm_pool.size = 0;
913*4882a593Smuzhiyun 	ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	/*
916*4882a593Smuzhiyun 	 * fm_pool.max_size is 5% of the total number of PEBs but it's also
917*4882a593Smuzhiyun 	 * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
918*4882a593Smuzhiyun 	 */
919*4882a593Smuzhiyun 	ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
920*4882a593Smuzhiyun 		ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
921*4882a593Smuzhiyun 	ubi->fm_pool.max_size = max(ubi->fm_pool.max_size,
922*4882a593Smuzhiyun 		UBI_FM_MIN_POOL_SIZE);
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
925*4882a593Smuzhiyun 	ubi->fm_disabled = !fm_autoconvert;
926*4882a593Smuzhiyun 	if (fm_debug)
927*4882a593Smuzhiyun 		ubi_enable_dbg_chk_fastmap(ubi);
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
930*4882a593Smuzhiyun 	    <= UBI_FM_MAX_START) {
931*4882a593Smuzhiyun 		ubi_err(ubi, "More than %i PEBs are needed for fastmap, sorry.",
932*4882a593Smuzhiyun 			UBI_FM_MAX_START);
933*4882a593Smuzhiyun 		ubi->fm_disabled = 1;
934*4882a593Smuzhiyun 	}
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	ubi_msg(ubi, "default fastmap pool size: %d", ubi->fm_pool.max_size);
937*4882a593Smuzhiyun 	ubi_msg(ubi, "default fastmap WL pool size: %d",
938*4882a593Smuzhiyun 		ubi->fm_wl_pool.max_size);
939*4882a593Smuzhiyun #else
940*4882a593Smuzhiyun 	ubi->fm_disabled = 1;
941*4882a593Smuzhiyun #endif
942*4882a593Smuzhiyun 	mutex_init(&ubi->buf_mutex);
943*4882a593Smuzhiyun 	mutex_init(&ubi->ckvol_mutex);
944*4882a593Smuzhiyun 	mutex_init(&ubi->device_mutex);
945*4882a593Smuzhiyun 	spin_lock_init(&ubi->volumes_lock);
946*4882a593Smuzhiyun 	init_rwsem(&ubi->fm_protect);
947*4882a593Smuzhiyun 	init_rwsem(&ubi->fm_eba_sem);
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	ubi_msg(ubi, "attaching mtd%d", mtd->index);
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	err = io_init(ubi, max_beb_per1024);
952*4882a593Smuzhiyun 	if (err)
953*4882a593Smuzhiyun 		goto out_free;
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	err = -ENOMEM;
956*4882a593Smuzhiyun 	ubi->peb_buf = vmalloc(ubi->peb_size);
957*4882a593Smuzhiyun 	if (!ubi->peb_buf)
958*4882a593Smuzhiyun 		goto out_free;
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun #ifdef CONFIG_MTD_UBI_FASTMAP
961*4882a593Smuzhiyun 	ubi->fm_size = ubi_calc_fm_size(ubi);
962*4882a593Smuzhiyun 	ubi->fm_buf = vzalloc(ubi->fm_size);
963*4882a593Smuzhiyun 	if (!ubi->fm_buf)
964*4882a593Smuzhiyun 		goto out_free;
965*4882a593Smuzhiyun #endif
966*4882a593Smuzhiyun 	err = ubi_attach(ubi, 0);
967*4882a593Smuzhiyun 	if (err) {
968*4882a593Smuzhiyun 		ubi_err(ubi, "failed to attach mtd%d, error %d",
969*4882a593Smuzhiyun 			mtd->index, err);
970*4882a593Smuzhiyun 		goto out_free;
971*4882a593Smuzhiyun 	}
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 	if (ubi->autoresize_vol_id != -1) {
974*4882a593Smuzhiyun 		err = autoresize(ubi, ubi->autoresize_vol_id);
975*4882a593Smuzhiyun 		if (err)
976*4882a593Smuzhiyun 			goto out_detach;
977*4882a593Smuzhiyun 	}
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 	err = uif_init(ubi);
980*4882a593Smuzhiyun 	if (err)
981*4882a593Smuzhiyun 		goto out_detach;
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	err = ubi_debugfs_init_dev(ubi);
984*4882a593Smuzhiyun 	if (err)
985*4882a593Smuzhiyun 		goto out_uif;
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 	ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name);
988*4882a593Smuzhiyun 	if (IS_ERR(ubi->bgt_thread)) {
989*4882a593Smuzhiyun 		err = PTR_ERR(ubi->bgt_thread);
990*4882a593Smuzhiyun 		ubi_err(ubi, "cannot spawn \"%s\", error %d",
991*4882a593Smuzhiyun 			ubi->bgt_name, err);
992*4882a593Smuzhiyun 		goto out_debugfs;
993*4882a593Smuzhiyun 	}
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	ubi_msg(ubi, "attached mtd%d (name \"%s\", size %llu MiB)",
996*4882a593Smuzhiyun 		mtd->index, mtd->name, ubi->flash_size >> 20);
997*4882a593Smuzhiyun 	ubi_msg(ubi, "PEB size: %d bytes (%d KiB), LEB size: %d bytes",
998*4882a593Smuzhiyun 		ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
999*4882a593Smuzhiyun 	ubi_msg(ubi, "min./max. I/O unit sizes: %d/%d, sub-page size %d",
1000*4882a593Smuzhiyun 		ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
1001*4882a593Smuzhiyun 	ubi_msg(ubi, "VID header offset: %d (aligned %d), data offset: %d",
1002*4882a593Smuzhiyun 		ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
1003*4882a593Smuzhiyun 	ubi_msg(ubi, "good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
1004*4882a593Smuzhiyun 		ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
1005*4882a593Smuzhiyun 	ubi_msg(ubi, "user volume: %d, internal volumes: %d, max. volumes count: %d",
1006*4882a593Smuzhiyun 		ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
1007*4882a593Smuzhiyun 		ubi->vtbl_slots);
1008*4882a593Smuzhiyun 	ubi_msg(ubi, "max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
1009*4882a593Smuzhiyun 		ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
1010*4882a593Smuzhiyun 		ubi->image_seq);
1011*4882a593Smuzhiyun 	ubi_msg(ubi, "available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
1012*4882a593Smuzhiyun 		ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	/*
1015*4882a593Smuzhiyun 	 * The below lock makes sure we do not race with 'ubi_thread()' which
1016*4882a593Smuzhiyun 	 * checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
1017*4882a593Smuzhiyun 	 */
1018*4882a593Smuzhiyun 	spin_lock(&ubi->wl_lock);
1019*4882a593Smuzhiyun 	ubi->thread_enabled = 1;
1020*4882a593Smuzhiyun 	wake_up_process(ubi->bgt_thread);
1021*4882a593Smuzhiyun 	spin_unlock(&ubi->wl_lock);
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun 	ubi_devices[ubi_num] = ubi;
1024*4882a593Smuzhiyun 	ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
1025*4882a593Smuzhiyun 	return ubi_num;
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun out_debugfs:
1028*4882a593Smuzhiyun 	ubi_debugfs_exit_dev(ubi);
1029*4882a593Smuzhiyun out_uif:
1030*4882a593Smuzhiyun 	uif_close(ubi);
1031*4882a593Smuzhiyun out_detach:
1032*4882a593Smuzhiyun 	ubi_wl_close(ubi);
1033*4882a593Smuzhiyun 	ubi_free_all_volumes(ubi);
1034*4882a593Smuzhiyun 	vfree(ubi->vtbl);
1035*4882a593Smuzhiyun out_free:
1036*4882a593Smuzhiyun 	vfree(ubi->peb_buf);
1037*4882a593Smuzhiyun 	vfree(ubi->fm_buf);
1038*4882a593Smuzhiyun 	put_device(&ubi->dev);
1039*4882a593Smuzhiyun 	return err;
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun /**
1043*4882a593Smuzhiyun  * ubi_detach_mtd_dev - detach an MTD device.
1044*4882a593Smuzhiyun  * @ubi_num: UBI device number to detach from
1045*4882a593Smuzhiyun  * @anyway: detach MTD even if device reference count is not zero
1046*4882a593Smuzhiyun  *
1047*4882a593Smuzhiyun  * This function destroys an UBI device number @ubi_num and detaches the
1048*4882a593Smuzhiyun  * underlying MTD device. Returns zero in case of success and %-EBUSY if the
1049*4882a593Smuzhiyun  * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not
1050*4882a593Smuzhiyun  * exist.
1051*4882a593Smuzhiyun  *
1052*4882a593Smuzhiyun  * Note, the invocations of this function has to be serialized by the
1053*4882a593Smuzhiyun  * @ubi_devices_mutex.
1054*4882a593Smuzhiyun  */
ubi_detach_mtd_dev(int ubi_num,int anyway)1055*4882a593Smuzhiyun int ubi_detach_mtd_dev(int ubi_num, int anyway)
1056*4882a593Smuzhiyun {
1057*4882a593Smuzhiyun 	struct ubi_device *ubi;
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
1060*4882a593Smuzhiyun 		return -EINVAL;
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun 	ubi = ubi_get_device(ubi_num);
1063*4882a593Smuzhiyun 	if (!ubi)
1064*4882a593Smuzhiyun 		return -EINVAL;
1065*4882a593Smuzhiyun 
1066*4882a593Smuzhiyun 	spin_lock(&ubi_devices_lock);
1067*4882a593Smuzhiyun 	put_device(&ubi->dev);
1068*4882a593Smuzhiyun 	ubi->ref_count -= 1;
1069*4882a593Smuzhiyun 	if (ubi->ref_count) {
1070*4882a593Smuzhiyun 		if (!anyway) {
1071*4882a593Smuzhiyun 			spin_unlock(&ubi_devices_lock);
1072*4882a593Smuzhiyun 			return -EBUSY;
1073*4882a593Smuzhiyun 		}
1074*4882a593Smuzhiyun 		/* This may only happen if there is a bug */
1075*4882a593Smuzhiyun 		ubi_err(ubi, "%s reference count %d, destroy anyway",
1076*4882a593Smuzhiyun 			ubi->ubi_name, ubi->ref_count);
1077*4882a593Smuzhiyun 	}
1078*4882a593Smuzhiyun 	ubi_devices[ubi_num] = NULL;
1079*4882a593Smuzhiyun 	spin_unlock(&ubi_devices_lock);
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 	ubi_assert(ubi_num == ubi->ubi_num);
1082*4882a593Smuzhiyun 	ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
1083*4882a593Smuzhiyun 	ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index);
1084*4882a593Smuzhiyun #ifdef CONFIG_MTD_UBI_FASTMAP
1085*4882a593Smuzhiyun 	/* If we don't write a new fastmap at detach time we lose all
1086*4882a593Smuzhiyun 	 * EC updates that have been made since the last written fastmap.
1087*4882a593Smuzhiyun 	 * In case of fastmap debugging we omit the update to simulate an
1088*4882a593Smuzhiyun 	 * unclean shutdown. */
1089*4882a593Smuzhiyun 	if (!ubi_dbg_chk_fastmap(ubi))
1090*4882a593Smuzhiyun 		ubi_update_fastmap(ubi);
1091*4882a593Smuzhiyun #endif
1092*4882a593Smuzhiyun 	/*
1093*4882a593Smuzhiyun 	 * Before freeing anything, we have to stop the background thread to
1094*4882a593Smuzhiyun 	 * prevent it from doing anything on this device while we are freeing.
1095*4882a593Smuzhiyun 	 */
1096*4882a593Smuzhiyun 	if (ubi->bgt_thread)
1097*4882a593Smuzhiyun 		kthread_stop(ubi->bgt_thread);
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun #ifdef CONFIG_MTD_UBI_FASTMAP
1100*4882a593Smuzhiyun 	cancel_work_sync(&ubi->fm_work);
1101*4882a593Smuzhiyun #endif
1102*4882a593Smuzhiyun 	ubi_debugfs_exit_dev(ubi);
1103*4882a593Smuzhiyun 	uif_close(ubi);
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 	ubi_wl_close(ubi);
1106*4882a593Smuzhiyun 	ubi_free_internal_volumes(ubi);
1107*4882a593Smuzhiyun 	vfree(ubi->vtbl);
1108*4882a593Smuzhiyun 	vfree(ubi->peb_buf);
1109*4882a593Smuzhiyun 	vfree(ubi->fm_buf);
1110*4882a593Smuzhiyun 	ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index);
1111*4882a593Smuzhiyun 	put_mtd_device(ubi->mtd);
1112*4882a593Smuzhiyun 	put_device(&ubi->dev);
1113*4882a593Smuzhiyun 	return 0;
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun /**
1117*4882a593Smuzhiyun  * open_mtd_by_chdev - open an MTD device by its character device node path.
1118*4882a593Smuzhiyun  * @mtd_dev: MTD character device node path
1119*4882a593Smuzhiyun  *
1120*4882a593Smuzhiyun  * This helper function opens an MTD device by its character node device path.
1121*4882a593Smuzhiyun  * Returns MTD device description object in case of success and a negative
1122*4882a593Smuzhiyun  * error code in case of failure.
1123*4882a593Smuzhiyun  */
open_mtd_by_chdev(const char * mtd_dev)1124*4882a593Smuzhiyun static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
1125*4882a593Smuzhiyun {
1126*4882a593Smuzhiyun 	int err, minor;
1127*4882a593Smuzhiyun 	struct path path;
1128*4882a593Smuzhiyun 	struct kstat stat;
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 	/* Probably this is an MTD character device node path */
1131*4882a593Smuzhiyun 	err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
1132*4882a593Smuzhiyun 	if (err)
1133*4882a593Smuzhiyun 		return ERR_PTR(err);
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 	err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
1136*4882a593Smuzhiyun 	path_put(&path);
1137*4882a593Smuzhiyun 	if (err)
1138*4882a593Smuzhiyun 		return ERR_PTR(err);
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun 	/* MTD device number is defined by the major / minor numbers */
1141*4882a593Smuzhiyun 	if (MAJOR(stat.rdev) != MTD_CHAR_MAJOR || !S_ISCHR(stat.mode))
1142*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun 	minor = MINOR(stat.rdev);
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 	if (minor & 1)
1147*4882a593Smuzhiyun 		/*
1148*4882a593Smuzhiyun 		 * Just do not think the "/dev/mtdrX" devices support is need,
1149*4882a593Smuzhiyun 		 * so do not support them to avoid doing extra work.
1150*4882a593Smuzhiyun 		 */
1151*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun 	return get_mtd_device(NULL, minor / 2);
1154*4882a593Smuzhiyun }
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun /**
1157*4882a593Smuzhiyun  * open_mtd_device - open MTD device by name, character device path, or number.
1158*4882a593Smuzhiyun  * @mtd_dev: name, character device node path, or MTD device device number
1159*4882a593Smuzhiyun  *
1160*4882a593Smuzhiyun  * This function tries to open and MTD device described by @mtd_dev string,
1161*4882a593Smuzhiyun  * which is first treated as ASCII MTD device number, and if it is not true, it
1162*4882a593Smuzhiyun  * is treated as MTD device name, and if that is also not true, it is treated
1163*4882a593Smuzhiyun  * as MTD character device node path. Returns MTD device description object in
1164*4882a593Smuzhiyun  * case of success and a negative error code in case of failure.
1165*4882a593Smuzhiyun  */
open_mtd_device(const char * mtd_dev)1166*4882a593Smuzhiyun static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
1167*4882a593Smuzhiyun {
1168*4882a593Smuzhiyun 	struct mtd_info *mtd;
1169*4882a593Smuzhiyun 	int mtd_num;
1170*4882a593Smuzhiyun 	char *endp;
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 	mtd_num = simple_strtoul(mtd_dev, &endp, 0);
1173*4882a593Smuzhiyun 	if (*endp != '\0' || mtd_dev == endp) {
1174*4882a593Smuzhiyun 		/*
1175*4882a593Smuzhiyun 		 * This does not look like an ASCII integer, probably this is
1176*4882a593Smuzhiyun 		 * MTD device name.
1177*4882a593Smuzhiyun 		 */
1178*4882a593Smuzhiyun 		mtd = get_mtd_device_nm(mtd_dev);
1179*4882a593Smuzhiyun 		if (PTR_ERR(mtd) == -ENODEV)
1180*4882a593Smuzhiyun 			/* Probably this is an MTD character device node path */
1181*4882a593Smuzhiyun 			mtd = open_mtd_by_chdev(mtd_dev);
1182*4882a593Smuzhiyun 	} else
1183*4882a593Smuzhiyun 		mtd = get_mtd_device(NULL, mtd_num);
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	return mtd;
1186*4882a593Smuzhiyun }
1187*4882a593Smuzhiyun 
ubi_init(void)1188*4882a593Smuzhiyun static int __init ubi_init(void)
1189*4882a593Smuzhiyun {
1190*4882a593Smuzhiyun 	int err, i, k;
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 	/* Ensure that EC and VID headers have correct size */
1193*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
1194*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 	if (mtd_devs > UBI_MAX_DEVICES) {
1197*4882a593Smuzhiyun 		pr_err("UBI error: too many MTD devices, maximum is %d\n",
1198*4882a593Smuzhiyun 		       UBI_MAX_DEVICES);
1199*4882a593Smuzhiyun 		return -EINVAL;
1200*4882a593Smuzhiyun 	}
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 	/* Create base sysfs directory and sysfs files */
1203*4882a593Smuzhiyun 	err = class_register(&ubi_class);
1204*4882a593Smuzhiyun 	if (err < 0)
1205*4882a593Smuzhiyun 		return err;
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	err = misc_register(&ubi_ctrl_cdev);
1208*4882a593Smuzhiyun 	if (err) {
1209*4882a593Smuzhiyun 		pr_err("UBI error: cannot register device\n");
1210*4882a593Smuzhiyun 		goto out;
1211*4882a593Smuzhiyun 	}
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 	ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
1214*4882a593Smuzhiyun 					      sizeof(struct ubi_wl_entry),
1215*4882a593Smuzhiyun 					      0, 0, NULL);
1216*4882a593Smuzhiyun 	if (!ubi_wl_entry_slab) {
1217*4882a593Smuzhiyun 		err = -ENOMEM;
1218*4882a593Smuzhiyun 		goto out_dev_unreg;
1219*4882a593Smuzhiyun 	}
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	err = ubi_debugfs_init();
1222*4882a593Smuzhiyun 	if (err)
1223*4882a593Smuzhiyun 		goto out_slab;
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 	/* Attach MTD devices */
1227*4882a593Smuzhiyun 	for (i = 0; i < mtd_devs; i++) {
1228*4882a593Smuzhiyun 		struct mtd_dev_param *p = &mtd_dev_param[i];
1229*4882a593Smuzhiyun 		struct mtd_info *mtd;
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun 		cond_resched();
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun 		mtd = open_mtd_device(p->name);
1234*4882a593Smuzhiyun 		if (IS_ERR(mtd)) {
1235*4882a593Smuzhiyun 			err = PTR_ERR(mtd);
1236*4882a593Smuzhiyun 			pr_err("UBI error: cannot open mtd %s, error %d\n",
1237*4882a593Smuzhiyun 			       p->name, err);
1238*4882a593Smuzhiyun 			/* See comment below re-ubi_is_module(). */
1239*4882a593Smuzhiyun 			if (ubi_is_module())
1240*4882a593Smuzhiyun 				goto out_detach;
1241*4882a593Smuzhiyun 			continue;
1242*4882a593Smuzhiyun 		}
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun 		mutex_lock(&ubi_devices_mutex);
1245*4882a593Smuzhiyun 		err = ubi_attach_mtd_dev(mtd, p->ubi_num,
1246*4882a593Smuzhiyun 					 p->vid_hdr_offs, p->max_beb_per1024);
1247*4882a593Smuzhiyun 		mutex_unlock(&ubi_devices_mutex);
1248*4882a593Smuzhiyun 		if (err < 0) {
1249*4882a593Smuzhiyun 			pr_err("UBI error: cannot attach mtd%d\n",
1250*4882a593Smuzhiyun 			       mtd->index);
1251*4882a593Smuzhiyun 			put_mtd_device(mtd);
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun 			/*
1254*4882a593Smuzhiyun 			 * Originally UBI stopped initializing on any error.
1255*4882a593Smuzhiyun 			 * However, later on it was found out that this
1256*4882a593Smuzhiyun 			 * behavior is not very good when UBI is compiled into
1257*4882a593Smuzhiyun 			 * the kernel and the MTD devices to attach are passed
1258*4882a593Smuzhiyun 			 * through the command line. Indeed, UBI failure
1259*4882a593Smuzhiyun 			 * stopped whole boot sequence.
1260*4882a593Smuzhiyun 			 *
1261*4882a593Smuzhiyun 			 * To fix this, we changed the behavior for the
1262*4882a593Smuzhiyun 			 * non-module case, but preserved the old behavior for
1263*4882a593Smuzhiyun 			 * the module case, just for compatibility. This is a
1264*4882a593Smuzhiyun 			 * little inconsistent, though.
1265*4882a593Smuzhiyun 			 */
1266*4882a593Smuzhiyun 			if (ubi_is_module())
1267*4882a593Smuzhiyun 				goto out_detach;
1268*4882a593Smuzhiyun 		}
1269*4882a593Smuzhiyun 	}
1270*4882a593Smuzhiyun 
1271*4882a593Smuzhiyun 	err = ubiblock_init();
1272*4882a593Smuzhiyun 	if (err) {
1273*4882a593Smuzhiyun 		pr_err("UBI error: block: cannot initialize, error %d\n", err);
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun 		/* See comment above re-ubi_is_module(). */
1276*4882a593Smuzhiyun 		if (ubi_is_module())
1277*4882a593Smuzhiyun 			goto out_detach;
1278*4882a593Smuzhiyun 	}
1279*4882a593Smuzhiyun 
1280*4882a593Smuzhiyun 	return 0;
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun out_detach:
1283*4882a593Smuzhiyun 	for (k = 0; k < i; k++)
1284*4882a593Smuzhiyun 		if (ubi_devices[k]) {
1285*4882a593Smuzhiyun 			mutex_lock(&ubi_devices_mutex);
1286*4882a593Smuzhiyun 			ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
1287*4882a593Smuzhiyun 			mutex_unlock(&ubi_devices_mutex);
1288*4882a593Smuzhiyun 		}
1289*4882a593Smuzhiyun 	ubi_debugfs_exit();
1290*4882a593Smuzhiyun out_slab:
1291*4882a593Smuzhiyun 	kmem_cache_destroy(ubi_wl_entry_slab);
1292*4882a593Smuzhiyun out_dev_unreg:
1293*4882a593Smuzhiyun 	misc_deregister(&ubi_ctrl_cdev);
1294*4882a593Smuzhiyun out:
1295*4882a593Smuzhiyun 	class_unregister(&ubi_class);
1296*4882a593Smuzhiyun 	pr_err("UBI error: cannot initialize UBI, error %d\n", err);
1297*4882a593Smuzhiyun 	return err;
1298*4882a593Smuzhiyun }
1299*4882a593Smuzhiyun late_initcall(ubi_init);
1300*4882a593Smuzhiyun 
ubi_exit(void)1301*4882a593Smuzhiyun static void __exit ubi_exit(void)
1302*4882a593Smuzhiyun {
1303*4882a593Smuzhiyun 	int i;
1304*4882a593Smuzhiyun 
1305*4882a593Smuzhiyun 	ubiblock_exit();
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 	for (i = 0; i < UBI_MAX_DEVICES; i++)
1308*4882a593Smuzhiyun 		if (ubi_devices[i]) {
1309*4882a593Smuzhiyun 			mutex_lock(&ubi_devices_mutex);
1310*4882a593Smuzhiyun 			ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
1311*4882a593Smuzhiyun 			mutex_unlock(&ubi_devices_mutex);
1312*4882a593Smuzhiyun 		}
1313*4882a593Smuzhiyun 	ubi_debugfs_exit();
1314*4882a593Smuzhiyun 	kmem_cache_destroy(ubi_wl_entry_slab);
1315*4882a593Smuzhiyun 	misc_deregister(&ubi_ctrl_cdev);
1316*4882a593Smuzhiyun 	class_unregister(&ubi_class);
1317*4882a593Smuzhiyun }
1318*4882a593Smuzhiyun module_exit(ubi_exit);
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun /**
1321*4882a593Smuzhiyun  * bytes_str_to_int - convert a number of bytes string into an integer.
1322*4882a593Smuzhiyun  * @str: the string to convert
1323*4882a593Smuzhiyun  *
1324*4882a593Smuzhiyun  * This function returns positive resulting integer in case of success and a
1325*4882a593Smuzhiyun  * negative error code in case of failure.
1326*4882a593Smuzhiyun  */
bytes_str_to_int(const char * str)1327*4882a593Smuzhiyun static int bytes_str_to_int(const char *str)
1328*4882a593Smuzhiyun {
1329*4882a593Smuzhiyun 	char *endp;
1330*4882a593Smuzhiyun 	unsigned long result;
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun 	result = simple_strtoul(str, &endp, 0);
1333*4882a593Smuzhiyun 	if (str == endp || result >= INT_MAX) {
1334*4882a593Smuzhiyun 		pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
1335*4882a593Smuzhiyun 		return -EINVAL;
1336*4882a593Smuzhiyun 	}
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun 	switch (*endp) {
1339*4882a593Smuzhiyun 	case 'G':
1340*4882a593Smuzhiyun 		result *= 1024;
1341*4882a593Smuzhiyun 		fallthrough;
1342*4882a593Smuzhiyun 	case 'M':
1343*4882a593Smuzhiyun 		result *= 1024;
1344*4882a593Smuzhiyun 		fallthrough;
1345*4882a593Smuzhiyun 	case 'K':
1346*4882a593Smuzhiyun 		result *= 1024;
1347*4882a593Smuzhiyun 		if (endp[1] == 'i' && endp[2] == 'B')
1348*4882a593Smuzhiyun 			endp += 2;
1349*4882a593Smuzhiyun 	case '\0':
1350*4882a593Smuzhiyun 		break;
1351*4882a593Smuzhiyun 	default:
1352*4882a593Smuzhiyun 		pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
1353*4882a593Smuzhiyun 		return -EINVAL;
1354*4882a593Smuzhiyun 	}
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 	return result;
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun 
1359*4882a593Smuzhiyun /**
1360*4882a593Smuzhiyun  * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter.
1361*4882a593Smuzhiyun  * @val: the parameter value to parse
1362*4882a593Smuzhiyun  * @kp: not used
1363*4882a593Smuzhiyun  *
1364*4882a593Smuzhiyun  * This function returns zero in case of success and a negative error code in
1365*4882a593Smuzhiyun  * case of error.
1366*4882a593Smuzhiyun  */
ubi_mtd_param_parse(const char * val,const struct kernel_param * kp)1367*4882a593Smuzhiyun static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
1368*4882a593Smuzhiyun {
1369*4882a593Smuzhiyun 	int i, len;
1370*4882a593Smuzhiyun 	struct mtd_dev_param *p;
1371*4882a593Smuzhiyun 	char buf[MTD_PARAM_LEN_MAX];
1372*4882a593Smuzhiyun 	char *pbuf = &buf[0];
1373*4882a593Smuzhiyun 	char *tokens[MTD_PARAM_MAX_COUNT], *token;
1374*4882a593Smuzhiyun 
1375*4882a593Smuzhiyun 	if (!val)
1376*4882a593Smuzhiyun 		return -EINVAL;
1377*4882a593Smuzhiyun 
1378*4882a593Smuzhiyun 	if (mtd_devs == UBI_MAX_DEVICES) {
1379*4882a593Smuzhiyun 		pr_err("UBI error: too many parameters, max. is %d\n",
1380*4882a593Smuzhiyun 		       UBI_MAX_DEVICES);
1381*4882a593Smuzhiyun 		return -EINVAL;
1382*4882a593Smuzhiyun 	}
1383*4882a593Smuzhiyun 
1384*4882a593Smuzhiyun 	len = strnlen(val, MTD_PARAM_LEN_MAX);
1385*4882a593Smuzhiyun 	if (len == MTD_PARAM_LEN_MAX) {
1386*4882a593Smuzhiyun 		pr_err("UBI error: parameter \"%s\" is too long, max. is %d\n",
1387*4882a593Smuzhiyun 		       val, MTD_PARAM_LEN_MAX);
1388*4882a593Smuzhiyun 		return -EINVAL;
1389*4882a593Smuzhiyun 	}
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun 	if (len == 0) {
1392*4882a593Smuzhiyun 		pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
1393*4882a593Smuzhiyun 		return 0;
1394*4882a593Smuzhiyun 	}
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun 	strcpy(buf, val);
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun 	/* Get rid of the final newline */
1399*4882a593Smuzhiyun 	if (buf[len - 1] == '\n')
1400*4882a593Smuzhiyun 		buf[len - 1] = '\0';
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 	for (i = 0; i < MTD_PARAM_MAX_COUNT; i++)
1403*4882a593Smuzhiyun 		tokens[i] = strsep(&pbuf, ",");
1404*4882a593Smuzhiyun 
1405*4882a593Smuzhiyun 	if (pbuf) {
1406*4882a593Smuzhiyun 		pr_err("UBI error: too many arguments at \"%s\"\n", val);
1407*4882a593Smuzhiyun 		return -EINVAL;
1408*4882a593Smuzhiyun 	}
1409*4882a593Smuzhiyun 
1410*4882a593Smuzhiyun 	p = &mtd_dev_param[mtd_devs];
1411*4882a593Smuzhiyun 	strcpy(&p->name[0], tokens[0]);
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun 	token = tokens[1];
1414*4882a593Smuzhiyun 	if (token) {
1415*4882a593Smuzhiyun 		p->vid_hdr_offs = bytes_str_to_int(token);
1416*4882a593Smuzhiyun 
1417*4882a593Smuzhiyun 		if (p->vid_hdr_offs < 0)
1418*4882a593Smuzhiyun 			return p->vid_hdr_offs;
1419*4882a593Smuzhiyun 	}
1420*4882a593Smuzhiyun 
1421*4882a593Smuzhiyun 	token = tokens[2];
1422*4882a593Smuzhiyun 	if (token) {
1423*4882a593Smuzhiyun 		int err = kstrtoint(token, 10, &p->max_beb_per1024);
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun 		if (err) {
1426*4882a593Smuzhiyun 			pr_err("UBI error: bad value for max_beb_per1024 parameter: %s",
1427*4882a593Smuzhiyun 			       token);
1428*4882a593Smuzhiyun 			return -EINVAL;
1429*4882a593Smuzhiyun 		}
1430*4882a593Smuzhiyun 	}
1431*4882a593Smuzhiyun 
1432*4882a593Smuzhiyun 	token = tokens[3];
1433*4882a593Smuzhiyun 	if (token) {
1434*4882a593Smuzhiyun 		int err = kstrtoint(token, 10, &p->ubi_num);
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 		if (err) {
1437*4882a593Smuzhiyun 			pr_err("UBI error: bad value for ubi_num parameter: %s",
1438*4882a593Smuzhiyun 			       token);
1439*4882a593Smuzhiyun 			return -EINVAL;
1440*4882a593Smuzhiyun 		}
1441*4882a593Smuzhiyun 	} else
1442*4882a593Smuzhiyun 		p->ubi_num = UBI_DEV_NUM_AUTO;
1443*4882a593Smuzhiyun 
1444*4882a593Smuzhiyun 	mtd_devs += 1;
1445*4882a593Smuzhiyun 	return 0;
1446*4882a593Smuzhiyun }
1447*4882a593Smuzhiyun 
1448*4882a593Smuzhiyun module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 0400);
1449*4882a593Smuzhiyun MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024[,ubi_num]]].\n"
1450*4882a593Smuzhiyun 		      "Multiple \"mtd\" parameters may be specified.\n"
1451*4882a593Smuzhiyun 		      "MTD devices may be specified by their number, name, or path to the MTD character device node.\n"
1452*4882a593Smuzhiyun 		      "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n"
1453*4882a593Smuzhiyun 		      "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
1454*4882a593Smuzhiyun 		      __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
1455*4882a593Smuzhiyun 		      "Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
1456*4882a593Smuzhiyun 		      "\n"
1457*4882a593Smuzhiyun 		      "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
1458*4882a593Smuzhiyun 		      "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
1459*4882a593Smuzhiyun 		      "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
1460*4882a593Smuzhiyun 		      "Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n"
1461*4882a593Smuzhiyun 		      "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
1462*4882a593Smuzhiyun #ifdef CONFIG_MTD_UBI_FASTMAP
1463*4882a593Smuzhiyun module_param(fm_autoconvert, bool, 0644);
1464*4882a593Smuzhiyun MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
1465*4882a593Smuzhiyun module_param(fm_debug, bool, 0);
1466*4882a593Smuzhiyun MODULE_PARM_DESC(fm_debug, "Set this parameter to enable fastmap debugging by default. Warning, this will make fastmap slow!");
1467*4882a593Smuzhiyun #endif
1468*4882a593Smuzhiyun MODULE_VERSION(__stringify(UBI_VERSION));
1469*4882a593Smuzhiyun MODULE_DESCRIPTION("UBI - Unsorted Block Images");
1470*4882a593Smuzhiyun MODULE_AUTHOR("Artem Bityutskiy");
1471*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1472*4882a593Smuzhiyun MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);
1473