10a572655SStefan Roese /*
20a572655SStefan Roese * MTD device concatenation layer
30a572655SStefan Roese *
4*ff94bc40SHeiko Schocher * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
5*ff94bc40SHeiko Schocher * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
60a572655SStefan Roese *
70a572655SStefan Roese * NAND support by Christian Gan <cgan@iders.ca>
80a572655SStefan Roese *
9*ff94bc40SHeiko Schocher * SPDX-License-Identifier: GPL-2.0+
10*ff94bc40SHeiko Schocher *
110a572655SStefan Roese */
120a572655SStefan Roese
13*ff94bc40SHeiko Schocher #ifndef __UBOOT__
14*ff94bc40SHeiko Schocher #include <linux/kernel.h>
15*ff94bc40SHeiko Schocher #include <linux/module.h>
16*ff94bc40SHeiko Schocher #include <linux/slab.h>
17*ff94bc40SHeiko Schocher #include <linux/sched.h>
18*ff94bc40SHeiko Schocher #include <linux/types.h>
19*ff94bc40SHeiko Schocher #include <linux/backing-dev.h>
20*ff94bc40SHeiko Schocher #include <asm/div64.h>
21*ff94bc40SHeiko Schocher #else
22*ff94bc40SHeiko Schocher #include <div64.h>
237b15e2bbSMike Frysinger #include <linux/compat.h>
24*ff94bc40SHeiko Schocher #endif
25*ff94bc40SHeiko Schocher
26*ff94bc40SHeiko Schocher #include <linux/mtd/mtd.h>
270a572655SStefan Roese #include <linux/mtd/concat.h>
28*ff94bc40SHeiko Schocher
290a572655SStefan Roese #include <ubi_uboot.h>
300a572655SStefan Roese
310a572655SStefan Roese /*
320a572655SStefan Roese * Our storage structure:
330a572655SStefan Roese * Subdev points to an array of pointers to struct mtd_info objects
340a572655SStefan Roese * which is allocated along with this structure
350a572655SStefan Roese *
360a572655SStefan Roese */
370a572655SStefan Roese struct mtd_concat {
380a572655SStefan Roese struct mtd_info mtd;
390a572655SStefan Roese int num_subdev;
400a572655SStefan Roese struct mtd_info **subdev;
410a572655SStefan Roese };
420a572655SStefan Roese
430a572655SStefan Roese /*
440a572655SStefan Roese * how to calculate the size required for the above structure,
450a572655SStefan Roese * including the pointer array subdev points to:
460a572655SStefan Roese */
470a572655SStefan Roese #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
480a572655SStefan Roese ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
490a572655SStefan Roese
500a572655SStefan Roese /*
510a572655SStefan Roese * Given a pointer to the MTD object in the mtd_concat structure,
520a572655SStefan Roese * we can retrieve the pointer to that structure with this macro.
530a572655SStefan Roese */
540a572655SStefan Roese #define CONCAT(x) ((struct mtd_concat *)(x))
550a572655SStefan Roese
560a572655SStefan Roese /*
570a572655SStefan Roese * MTD methods which look up the relevant subdevice, translate the
580a572655SStefan Roese * effective address and pass through to the subdevice.
590a572655SStefan Roese */
600a572655SStefan Roese
610a572655SStefan Roese static int
concat_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)620a572655SStefan Roese concat_read(struct mtd_info *mtd, loff_t from, size_t len,
630a572655SStefan Roese size_t * retlen, u_char * buf)
640a572655SStefan Roese {
650a572655SStefan Roese struct mtd_concat *concat = CONCAT(mtd);
660a572655SStefan Roese int ret = 0, err;
670a572655SStefan Roese int i;
680a572655SStefan Roese
69*ff94bc40SHeiko Schocher #ifdef __UBOOT__
700a572655SStefan Roese *retlen = 0;
71*ff94bc40SHeiko Schocher #endif
720a572655SStefan Roese
730a572655SStefan Roese for (i = 0; i < concat->num_subdev; i++) {
740a572655SStefan Roese struct mtd_info *subdev = concat->subdev[i];
750a572655SStefan Roese size_t size, retsize;
760a572655SStefan Roese
770a572655SStefan Roese if (from >= subdev->size) {
780a572655SStefan Roese /* Not destined for this subdev */
790a572655SStefan Roese size = 0;
800a572655SStefan Roese from -= subdev->size;
810a572655SStefan Roese continue;
820a572655SStefan Roese }
830a572655SStefan Roese if (from + len > subdev->size)
840a572655SStefan Roese /* First part goes into this subdev */
850a572655SStefan Roese size = subdev->size - from;
860a572655SStefan Roese else
870a572655SStefan Roese /* Entire transaction goes into this subdev */
880a572655SStefan Roese size = len;
890a572655SStefan Roese
90dfe64e2cSSergey Lapin err = mtd_read(subdev, from, size, &retsize, buf);
910a572655SStefan Roese
920a572655SStefan Roese /* Save information about bitflips! */
930a572655SStefan Roese if (unlikely(err)) {
94dfe64e2cSSergey Lapin if (mtd_is_eccerr(err)) {
950a572655SStefan Roese mtd->ecc_stats.failed++;
960a572655SStefan Roese ret = err;
97dfe64e2cSSergey Lapin } else if (mtd_is_bitflip(err)) {
980a572655SStefan Roese mtd->ecc_stats.corrected++;
990a572655SStefan Roese /* Do not overwrite -EBADMSG !! */
1000a572655SStefan Roese if (!ret)
1010a572655SStefan Roese ret = err;
1020a572655SStefan Roese } else
1030a572655SStefan Roese return err;
1040a572655SStefan Roese }
1050a572655SStefan Roese
1060a572655SStefan Roese *retlen += retsize;
1070a572655SStefan Roese len -= size;
1080a572655SStefan Roese if (len == 0)
1090a572655SStefan Roese return ret;
1100a572655SStefan Roese
1110a572655SStefan Roese buf += size;
1120a572655SStefan Roese from = 0;
1130a572655SStefan Roese }
1140a572655SStefan Roese return -EINVAL;
1150a572655SStefan Roese }
1160a572655SStefan Roese
1170a572655SStefan Roese static int
concat_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1180a572655SStefan Roese concat_write(struct mtd_info *mtd, loff_t to, size_t len,
1190a572655SStefan Roese size_t * retlen, const u_char * buf)
1200a572655SStefan Roese {
1210a572655SStefan Roese struct mtd_concat *concat = CONCAT(mtd);
1220a572655SStefan Roese int err = -EINVAL;
1230a572655SStefan Roese int i;
1240a572655SStefan Roese
125*ff94bc40SHeiko Schocher #ifdef __UBOOT__
1260a572655SStefan Roese *retlen = 0;
127*ff94bc40SHeiko Schocher #endif
1280a572655SStefan Roese
1290a572655SStefan Roese for (i = 0; i < concat->num_subdev; i++) {
1300a572655SStefan Roese struct mtd_info *subdev = concat->subdev[i];
1310a572655SStefan Roese size_t size, retsize;
1320a572655SStefan Roese
1330a572655SStefan Roese if (to >= subdev->size) {
1340a572655SStefan Roese size = 0;
1350a572655SStefan Roese to -= subdev->size;
1360a572655SStefan Roese continue;
1370a572655SStefan Roese }
1380a572655SStefan Roese if (to + len > subdev->size)
1390a572655SStefan Roese size = subdev->size - to;
1400a572655SStefan Roese else
1410a572655SStefan Roese size = len;
1420a572655SStefan Roese
143dfe64e2cSSergey Lapin err = mtd_write(subdev, to, size, &retsize, buf);
1440a572655SStefan Roese if (err)
1450a572655SStefan Roese break;
1460a572655SStefan Roese
1470a572655SStefan Roese *retlen += retsize;
1480a572655SStefan Roese len -= size;
1490a572655SStefan Roese if (len == 0)
1500a572655SStefan Roese break;
1510a572655SStefan Roese
1520a572655SStefan Roese err = -EINVAL;
1530a572655SStefan Roese buf += size;
1540a572655SStefan Roese to = 0;
1550a572655SStefan Roese }
1560a572655SStefan Roese return err;
1570a572655SStefan Roese }
1580a572655SStefan Roese
159*ff94bc40SHeiko Schocher #ifndef __UBOOT__
160*ff94bc40SHeiko Schocher static int
concat_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)161*ff94bc40SHeiko Schocher concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
162*ff94bc40SHeiko Schocher unsigned long count, loff_t to, size_t * retlen)
163*ff94bc40SHeiko Schocher {
164*ff94bc40SHeiko Schocher struct mtd_concat *concat = CONCAT(mtd);
165*ff94bc40SHeiko Schocher struct kvec *vecs_copy;
166*ff94bc40SHeiko Schocher unsigned long entry_low, entry_high;
167*ff94bc40SHeiko Schocher size_t total_len = 0;
168*ff94bc40SHeiko Schocher int i;
169*ff94bc40SHeiko Schocher int err = -EINVAL;
170*ff94bc40SHeiko Schocher
171*ff94bc40SHeiko Schocher /* Calculate total length of data */
172*ff94bc40SHeiko Schocher for (i = 0; i < count; i++)
173*ff94bc40SHeiko Schocher total_len += vecs[i].iov_len;
174*ff94bc40SHeiko Schocher
175*ff94bc40SHeiko Schocher /* Check alignment */
176*ff94bc40SHeiko Schocher if (mtd->writesize > 1) {
177*ff94bc40SHeiko Schocher uint64_t __to = to;
178*ff94bc40SHeiko Schocher if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
179*ff94bc40SHeiko Schocher return -EINVAL;
180*ff94bc40SHeiko Schocher }
181*ff94bc40SHeiko Schocher
182*ff94bc40SHeiko Schocher /* make a copy of vecs */
183*ff94bc40SHeiko Schocher vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
184*ff94bc40SHeiko Schocher if (!vecs_copy)
185*ff94bc40SHeiko Schocher return -ENOMEM;
186*ff94bc40SHeiko Schocher
187*ff94bc40SHeiko Schocher entry_low = 0;
188*ff94bc40SHeiko Schocher for (i = 0; i < concat->num_subdev; i++) {
189*ff94bc40SHeiko Schocher struct mtd_info *subdev = concat->subdev[i];
190*ff94bc40SHeiko Schocher size_t size, wsize, retsize, old_iov_len;
191*ff94bc40SHeiko Schocher
192*ff94bc40SHeiko Schocher if (to >= subdev->size) {
193*ff94bc40SHeiko Schocher to -= subdev->size;
194*ff94bc40SHeiko Schocher continue;
195*ff94bc40SHeiko Schocher }
196*ff94bc40SHeiko Schocher
197*ff94bc40SHeiko Schocher size = min_t(uint64_t, total_len, subdev->size - to);
198*ff94bc40SHeiko Schocher wsize = size; /* store for future use */
199*ff94bc40SHeiko Schocher
200*ff94bc40SHeiko Schocher entry_high = entry_low;
201*ff94bc40SHeiko Schocher while (entry_high < count) {
202*ff94bc40SHeiko Schocher if (size <= vecs_copy[entry_high].iov_len)
203*ff94bc40SHeiko Schocher break;
204*ff94bc40SHeiko Schocher size -= vecs_copy[entry_high++].iov_len;
205*ff94bc40SHeiko Schocher }
206*ff94bc40SHeiko Schocher
207*ff94bc40SHeiko Schocher old_iov_len = vecs_copy[entry_high].iov_len;
208*ff94bc40SHeiko Schocher vecs_copy[entry_high].iov_len = size;
209*ff94bc40SHeiko Schocher
210*ff94bc40SHeiko Schocher err = mtd_writev(subdev, &vecs_copy[entry_low],
211*ff94bc40SHeiko Schocher entry_high - entry_low + 1, to, &retsize);
212*ff94bc40SHeiko Schocher
213*ff94bc40SHeiko Schocher vecs_copy[entry_high].iov_len = old_iov_len - size;
214*ff94bc40SHeiko Schocher vecs_copy[entry_high].iov_base += size;
215*ff94bc40SHeiko Schocher
216*ff94bc40SHeiko Schocher entry_low = entry_high;
217*ff94bc40SHeiko Schocher
218*ff94bc40SHeiko Schocher if (err)
219*ff94bc40SHeiko Schocher break;
220*ff94bc40SHeiko Schocher
221*ff94bc40SHeiko Schocher *retlen += retsize;
222*ff94bc40SHeiko Schocher total_len -= wsize;
223*ff94bc40SHeiko Schocher
224*ff94bc40SHeiko Schocher if (total_len == 0)
225*ff94bc40SHeiko Schocher break;
226*ff94bc40SHeiko Schocher
227*ff94bc40SHeiko Schocher err = -EINVAL;
228*ff94bc40SHeiko Schocher to = 0;
229*ff94bc40SHeiko Schocher }
230*ff94bc40SHeiko Schocher
231*ff94bc40SHeiko Schocher kfree(vecs_copy);
232*ff94bc40SHeiko Schocher return err;
233*ff94bc40SHeiko Schocher }
234*ff94bc40SHeiko Schocher #endif
235*ff94bc40SHeiko Schocher
2360a572655SStefan Roese static int
concat_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)2370a572655SStefan Roese concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
2380a572655SStefan Roese {
2390a572655SStefan Roese struct mtd_concat *concat = CONCAT(mtd);
2400a572655SStefan Roese struct mtd_oob_ops devops = *ops;
2410a572655SStefan Roese int i, err, ret = 0;
2420a572655SStefan Roese
2430a572655SStefan Roese ops->retlen = ops->oobretlen = 0;
2440a572655SStefan Roese
2450a572655SStefan Roese for (i = 0; i < concat->num_subdev; i++) {
2460a572655SStefan Roese struct mtd_info *subdev = concat->subdev[i];
2470a572655SStefan Roese
2480a572655SStefan Roese if (from >= subdev->size) {
2490a572655SStefan Roese from -= subdev->size;
2500a572655SStefan Roese continue;
2510a572655SStefan Roese }
2520a572655SStefan Roese
2530a572655SStefan Roese /* partial read ? */
2540a572655SStefan Roese if (from + devops.len > subdev->size)
2550a572655SStefan Roese devops.len = subdev->size - from;
2560a572655SStefan Roese
257dfe64e2cSSergey Lapin err = mtd_read_oob(subdev, from, &devops);
2580a572655SStefan Roese ops->retlen += devops.retlen;
2590a572655SStefan Roese ops->oobretlen += devops.oobretlen;
2600a572655SStefan Roese
2610a572655SStefan Roese /* Save information about bitflips! */
2620a572655SStefan Roese if (unlikely(err)) {
263dfe64e2cSSergey Lapin if (mtd_is_eccerr(err)) {
2640a572655SStefan Roese mtd->ecc_stats.failed++;
2650a572655SStefan Roese ret = err;
266dfe64e2cSSergey Lapin } else if (mtd_is_bitflip(err)) {
2670a572655SStefan Roese mtd->ecc_stats.corrected++;
2680a572655SStefan Roese /* Do not overwrite -EBADMSG !! */
2690a572655SStefan Roese if (!ret)
2700a572655SStefan Roese ret = err;
2710a572655SStefan Roese } else
2720a572655SStefan Roese return err;
2730a572655SStefan Roese }
2740a572655SStefan Roese
2750a572655SStefan Roese if (devops.datbuf) {
2760a572655SStefan Roese devops.len = ops->len - ops->retlen;
2770a572655SStefan Roese if (!devops.len)
2780a572655SStefan Roese return ret;
2790a572655SStefan Roese devops.datbuf += devops.retlen;
2800a572655SStefan Roese }
2810a572655SStefan Roese if (devops.oobbuf) {
2820a572655SStefan Roese devops.ooblen = ops->ooblen - ops->oobretlen;
2830a572655SStefan Roese if (!devops.ooblen)
2840a572655SStefan Roese return ret;
2850a572655SStefan Roese devops.oobbuf += ops->oobretlen;
2860a572655SStefan Roese }
2870a572655SStefan Roese
2880a572655SStefan Roese from = 0;
2890a572655SStefan Roese }
2900a572655SStefan Roese return -EINVAL;
2910a572655SStefan Roese }
2920a572655SStefan Roese
2930a572655SStefan Roese static int
concat_write_oob(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)2940a572655SStefan Roese concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
2950a572655SStefan Roese {
2960a572655SStefan Roese struct mtd_concat *concat = CONCAT(mtd);
2970a572655SStefan Roese struct mtd_oob_ops devops = *ops;
2980a572655SStefan Roese int i, err;
2990a572655SStefan Roese
3000a572655SStefan Roese if (!(mtd->flags & MTD_WRITEABLE))
3010a572655SStefan Roese return -EROFS;
3020a572655SStefan Roese
303*ff94bc40SHeiko Schocher ops->retlen = ops->oobretlen = 0;
3040a572655SStefan Roese
3050a572655SStefan Roese for (i = 0; i < concat->num_subdev; i++) {
3060a572655SStefan Roese struct mtd_info *subdev = concat->subdev[i];
3070a572655SStefan Roese
3080a572655SStefan Roese if (to >= subdev->size) {
3090a572655SStefan Roese to -= subdev->size;
3100a572655SStefan Roese continue;
3110a572655SStefan Roese }
3120a572655SStefan Roese
3130a572655SStefan Roese /* partial write ? */
3140a572655SStefan Roese if (to + devops.len > subdev->size)
3150a572655SStefan Roese devops.len = subdev->size - to;
3160a572655SStefan Roese
317dfe64e2cSSergey Lapin err = mtd_write_oob(subdev, to, &devops);
318*ff94bc40SHeiko Schocher ops->retlen += devops.oobretlen;
3190a572655SStefan Roese if (err)
3200a572655SStefan Roese return err;
3210a572655SStefan Roese
3220a572655SStefan Roese if (devops.datbuf) {
3230a572655SStefan Roese devops.len = ops->len - ops->retlen;
3240a572655SStefan Roese if (!devops.len)
3250a572655SStefan Roese return 0;
3260a572655SStefan Roese devops.datbuf += devops.retlen;
3270a572655SStefan Roese }
3280a572655SStefan Roese if (devops.oobbuf) {
3290a572655SStefan Roese devops.ooblen = ops->ooblen - ops->oobretlen;
3300a572655SStefan Roese if (!devops.ooblen)
3310a572655SStefan Roese return 0;
3320a572655SStefan Roese devops.oobbuf += devops.oobretlen;
3330a572655SStefan Roese }
3340a572655SStefan Roese to = 0;
3350a572655SStefan Roese }
3360a572655SStefan Roese return -EINVAL;
3370a572655SStefan Roese }
3380a572655SStefan Roese
concat_erase_callback(struct erase_info * instr)3390a572655SStefan Roese static void concat_erase_callback(struct erase_info *instr)
3400a572655SStefan Roese {
3410a572655SStefan Roese /* Nothing to do here in U-Boot */
342*ff94bc40SHeiko Schocher #ifndef __UBOOT__
343*ff94bc40SHeiko Schocher wake_up((wait_queue_head_t *) instr->priv);
344*ff94bc40SHeiko Schocher #endif
3450a572655SStefan Roese }
3460a572655SStefan Roese
concat_dev_erase(struct mtd_info * mtd,struct erase_info * erase)3470a572655SStefan Roese static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
3480a572655SStefan Roese {
3490a572655SStefan Roese int err;
3500a572655SStefan Roese wait_queue_head_t waitq;
3510a572655SStefan Roese DECLARE_WAITQUEUE(wait, current);
3520a572655SStefan Roese
3530a572655SStefan Roese /*
3540a572655SStefan Roese * This code was stol^H^H^H^Hinspired by mtdchar.c
3550a572655SStefan Roese */
3560a572655SStefan Roese init_waitqueue_head(&waitq);
3570a572655SStefan Roese
3580a572655SStefan Roese erase->mtd = mtd;
3590a572655SStefan Roese erase->callback = concat_erase_callback;
3600a572655SStefan Roese erase->priv = (unsigned long) &waitq;
3610a572655SStefan Roese
3620a572655SStefan Roese /*
3630a572655SStefan Roese * FIXME: Allow INTERRUPTIBLE. Which means
3640a572655SStefan Roese * not having the wait_queue head on the stack.
3650a572655SStefan Roese */
366dfe64e2cSSergey Lapin err = mtd_erase(mtd, erase);
3670a572655SStefan Roese if (!err) {
3680a572655SStefan Roese set_current_state(TASK_UNINTERRUPTIBLE);
3690a572655SStefan Roese add_wait_queue(&waitq, &wait);
3700a572655SStefan Roese if (erase->state != MTD_ERASE_DONE
3710a572655SStefan Roese && erase->state != MTD_ERASE_FAILED)
3720a572655SStefan Roese schedule();
3730a572655SStefan Roese remove_wait_queue(&waitq, &wait);
3740a572655SStefan Roese set_current_state(TASK_RUNNING);
3750a572655SStefan Roese
3760a572655SStefan Roese err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
3770a572655SStefan Roese }
3780a572655SStefan Roese return err;
3790a572655SStefan Roese }
3800a572655SStefan Roese
concat_erase(struct mtd_info * mtd,struct erase_info * instr)3810a572655SStefan Roese static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
3820a572655SStefan Roese {
3830a572655SStefan Roese struct mtd_concat *concat = CONCAT(mtd);
3840a572655SStefan Roese struct mtd_info *subdev;
3850a572655SStefan Roese int i, err;
3860a572655SStefan Roese uint64_t length, offset = 0;
3870a572655SStefan Roese struct erase_info *erase;
3880a572655SStefan Roese
3890a572655SStefan Roese /*
3900a572655SStefan Roese * Check for proper erase block alignment of the to-be-erased area.
3910a572655SStefan Roese * It is easier to do this based on the super device's erase
3920a572655SStefan Roese * region info rather than looking at each particular sub-device
3930a572655SStefan Roese * in turn.
3940a572655SStefan Roese */
3950a572655SStefan Roese if (!concat->mtd.numeraseregions) {
3960a572655SStefan Roese /* the easy case: device has uniform erase block size */
3970a572655SStefan Roese if (instr->addr & (concat->mtd.erasesize - 1))
3980a572655SStefan Roese return -EINVAL;
3990a572655SStefan Roese if (instr->len & (concat->mtd.erasesize - 1))
4000a572655SStefan Roese return -EINVAL;
4010a572655SStefan Roese } else {
4020a572655SStefan Roese /* device has variable erase size */
4030a572655SStefan Roese struct mtd_erase_region_info *erase_regions =
4040a572655SStefan Roese concat->mtd.eraseregions;
4050a572655SStefan Roese
4060a572655SStefan Roese /*
4070a572655SStefan Roese * Find the erase region where the to-be-erased area begins:
4080a572655SStefan Roese */
4090a572655SStefan Roese for (i = 0; i < concat->mtd.numeraseregions &&
4100a572655SStefan Roese instr->addr >= erase_regions[i].offset; i++) ;
4110a572655SStefan Roese --i;
4120a572655SStefan Roese
4130a572655SStefan Roese /*
4140a572655SStefan Roese * Now erase_regions[i] is the region in which the
4150a572655SStefan Roese * to-be-erased area begins. Verify that the starting
4160a572655SStefan Roese * offset is aligned to this region's erase size:
4170a572655SStefan Roese */
418*ff94bc40SHeiko Schocher if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
4190a572655SStefan Roese return -EINVAL;
4200a572655SStefan Roese
4210a572655SStefan Roese /*
4220a572655SStefan Roese * now find the erase region where the to-be-erased area ends:
4230a572655SStefan Roese */
4240a572655SStefan Roese for (; i < concat->mtd.numeraseregions &&
4250a572655SStefan Roese (instr->addr + instr->len) >= erase_regions[i].offset;
4260a572655SStefan Roese ++i) ;
4270a572655SStefan Roese --i;
4280a572655SStefan Roese /*
4290a572655SStefan Roese * check if the ending offset is aligned to this region's erase size
4300a572655SStefan Roese */
431*ff94bc40SHeiko Schocher if (i < 0 || ((instr->addr + instr->len) &
432*ff94bc40SHeiko Schocher (erase_regions[i].erasesize - 1)))
4330a572655SStefan Roese return -EINVAL;
4340a572655SStefan Roese }
4350a572655SStefan Roese
4360a572655SStefan Roese /* make a local copy of instr to avoid modifying the caller's struct */
4370a572655SStefan Roese erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
4380a572655SStefan Roese
4390a572655SStefan Roese if (!erase)
4400a572655SStefan Roese return -ENOMEM;
4410a572655SStefan Roese
4420a572655SStefan Roese *erase = *instr;
4430a572655SStefan Roese length = instr->len;
4440a572655SStefan Roese
4450a572655SStefan Roese /*
4460a572655SStefan Roese * find the subdevice where the to-be-erased area begins, adjust
4470a572655SStefan Roese * starting offset to be relative to the subdevice start
4480a572655SStefan Roese */
4490a572655SStefan Roese for (i = 0; i < concat->num_subdev; i++) {
4500a572655SStefan Roese subdev = concat->subdev[i];
4510a572655SStefan Roese if (subdev->size <= erase->addr) {
4520a572655SStefan Roese erase->addr -= subdev->size;
4530a572655SStefan Roese offset += subdev->size;
4540a572655SStefan Roese } else {
4550a572655SStefan Roese break;
4560a572655SStefan Roese }
4570a572655SStefan Roese }
4580a572655SStefan Roese
4590a572655SStefan Roese /* must never happen since size limit has been verified above */
4600a572655SStefan Roese BUG_ON(i >= concat->num_subdev);
4610a572655SStefan Roese
4620a572655SStefan Roese /* now do the erase: */
4630a572655SStefan Roese err = 0;
4640a572655SStefan Roese for (; length > 0; i++) {
4650a572655SStefan Roese /* loop for all subdevices affected by this request */
4660a572655SStefan Roese subdev = concat->subdev[i]; /* get current subdevice */
4670a572655SStefan Roese
4680a572655SStefan Roese /* limit length to subdevice's size: */
4690a572655SStefan Roese if (erase->addr + length > subdev->size)
4700a572655SStefan Roese erase->len = subdev->size - erase->addr;
4710a572655SStefan Roese else
4720a572655SStefan Roese erase->len = length;
4730a572655SStefan Roese
4740a572655SStefan Roese length -= erase->len;
4750a572655SStefan Roese if ((err = concat_dev_erase(subdev, erase))) {
4760a572655SStefan Roese /* sanity check: should never happen since
4770a572655SStefan Roese * block alignment has been checked above */
4780a572655SStefan Roese BUG_ON(err == -EINVAL);
4790a572655SStefan Roese if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
4800a572655SStefan Roese instr->fail_addr = erase->fail_addr + offset;
4810a572655SStefan Roese break;
4820a572655SStefan Roese }
4830a572655SStefan Roese /*
4840a572655SStefan Roese * erase->addr specifies the offset of the area to be
4850a572655SStefan Roese * erased *within the current subdevice*. It can be
4860a572655SStefan Roese * non-zero only the first time through this loop, i.e.
4870a572655SStefan Roese * for the first subdevice where blocks need to be erased.
4880a572655SStefan Roese * All the following erases must begin at the start of the
4890a572655SStefan Roese * current subdevice, i.e. at offset zero.
4900a572655SStefan Roese */
4910a572655SStefan Roese erase->addr = 0;
4920a572655SStefan Roese offset += subdev->size;
4930a572655SStefan Roese }
4940a572655SStefan Roese instr->state = erase->state;
4950a572655SStefan Roese kfree(erase);
4960a572655SStefan Roese if (err)
4970a572655SStefan Roese return err;
4980a572655SStefan Roese
4990a572655SStefan Roese if (instr->callback)
5000a572655SStefan Roese instr->callback(instr);
5010a572655SStefan Roese return 0;
5020a572655SStefan Roese }
5030a572655SStefan Roese
concat_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)5040a572655SStefan Roese static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
5050a572655SStefan Roese {
5060a572655SStefan Roese struct mtd_concat *concat = CONCAT(mtd);
5070a572655SStefan Roese int i, err = -EINVAL;
5080a572655SStefan Roese
5090a572655SStefan Roese for (i = 0; i < concat->num_subdev; i++) {
5100a572655SStefan Roese struct mtd_info *subdev = concat->subdev[i];
5110a572655SStefan Roese uint64_t size;
5120a572655SStefan Roese
5130a572655SStefan Roese if (ofs >= subdev->size) {
5140a572655SStefan Roese size = 0;
5150a572655SStefan Roese ofs -= subdev->size;
5160a572655SStefan Roese continue;
5170a572655SStefan Roese }
5180a572655SStefan Roese if (ofs + len > subdev->size)
5190a572655SStefan Roese size = subdev->size - ofs;
5200a572655SStefan Roese else
5210a572655SStefan Roese size = len;
5220a572655SStefan Roese
523dfe64e2cSSergey Lapin err = mtd_lock(subdev, ofs, size);
5240a572655SStefan Roese if (err)
5250a572655SStefan Roese break;
5260a572655SStefan Roese
5270a572655SStefan Roese len -= size;
5280a572655SStefan Roese if (len == 0)
5290a572655SStefan Roese break;
5300a572655SStefan Roese
5310a572655SStefan Roese err = -EINVAL;
5320a572655SStefan Roese ofs = 0;
5330a572655SStefan Roese }
5340a572655SStefan Roese
5350a572655SStefan Roese return err;
5360a572655SStefan Roese }
5370a572655SStefan Roese
concat_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)5380a572655SStefan Roese static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
5390a572655SStefan Roese {
5400a572655SStefan Roese struct mtd_concat *concat = CONCAT(mtd);
5410a572655SStefan Roese int i, err = 0;
5420a572655SStefan Roese
5430a572655SStefan Roese for (i = 0; i < concat->num_subdev; i++) {
5440a572655SStefan Roese struct mtd_info *subdev = concat->subdev[i];
5450a572655SStefan Roese uint64_t size;
5460a572655SStefan Roese
5470a572655SStefan Roese if (ofs >= subdev->size) {
5480a572655SStefan Roese size = 0;
5490a572655SStefan Roese ofs -= subdev->size;
5500a572655SStefan Roese continue;
5510a572655SStefan Roese }
5520a572655SStefan Roese if (ofs + len > subdev->size)
5530a572655SStefan Roese size = subdev->size - ofs;
5540a572655SStefan Roese else
5550a572655SStefan Roese size = len;
5560a572655SStefan Roese
557dfe64e2cSSergey Lapin err = mtd_unlock(subdev, ofs, size);
5580a572655SStefan Roese if (err)
5590a572655SStefan Roese break;
5600a572655SStefan Roese
5610a572655SStefan Roese len -= size;
5620a572655SStefan Roese if (len == 0)
5630a572655SStefan Roese break;
5640a572655SStefan Roese
5650a572655SStefan Roese err = -EINVAL;
5660a572655SStefan Roese ofs = 0;
5670a572655SStefan Roese }
5680a572655SStefan Roese
5690a572655SStefan Roese return err;
5700a572655SStefan Roese }
5710a572655SStefan Roese
concat_sync(struct mtd_info * mtd)5720a572655SStefan Roese static void concat_sync(struct mtd_info *mtd)
5730a572655SStefan Roese {
5740a572655SStefan Roese struct mtd_concat *concat = CONCAT(mtd);
5750a572655SStefan Roese int i;
5760a572655SStefan Roese
5770a572655SStefan Roese for (i = 0; i < concat->num_subdev; i++) {
5780a572655SStefan Roese struct mtd_info *subdev = concat->subdev[i];
579dfe64e2cSSergey Lapin mtd_sync(subdev);
5800a572655SStefan Roese }
5810a572655SStefan Roese }
5820a572655SStefan Roese
583*ff94bc40SHeiko Schocher #ifndef __UBOOT__
concat_suspend(struct mtd_info * mtd)584*ff94bc40SHeiko Schocher static int concat_suspend(struct mtd_info *mtd)
585*ff94bc40SHeiko Schocher {
586*ff94bc40SHeiko Schocher struct mtd_concat *concat = CONCAT(mtd);
587*ff94bc40SHeiko Schocher int i, rc = 0;
588*ff94bc40SHeiko Schocher
589*ff94bc40SHeiko Schocher for (i = 0; i < concat->num_subdev; i++) {
590*ff94bc40SHeiko Schocher struct mtd_info *subdev = concat->subdev[i];
591*ff94bc40SHeiko Schocher if ((rc = mtd_suspend(subdev)) < 0)
592*ff94bc40SHeiko Schocher return rc;
593*ff94bc40SHeiko Schocher }
594*ff94bc40SHeiko Schocher return rc;
595*ff94bc40SHeiko Schocher }
596*ff94bc40SHeiko Schocher
concat_resume(struct mtd_info * mtd)597*ff94bc40SHeiko Schocher static void concat_resume(struct mtd_info *mtd)
598*ff94bc40SHeiko Schocher {
599*ff94bc40SHeiko Schocher struct mtd_concat *concat = CONCAT(mtd);
600*ff94bc40SHeiko Schocher int i;
601*ff94bc40SHeiko Schocher
602*ff94bc40SHeiko Schocher for (i = 0; i < concat->num_subdev; i++) {
603*ff94bc40SHeiko Schocher struct mtd_info *subdev = concat->subdev[i];
604*ff94bc40SHeiko Schocher mtd_resume(subdev);
605*ff94bc40SHeiko Schocher }
606*ff94bc40SHeiko Schocher }
607*ff94bc40SHeiko Schocher #endif
608*ff94bc40SHeiko Schocher
concat_block_isbad(struct mtd_info * mtd,loff_t ofs)6090a572655SStefan Roese static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
6100a572655SStefan Roese {
6110a572655SStefan Roese struct mtd_concat *concat = CONCAT(mtd);
6120a572655SStefan Roese int i, res = 0;
6130a572655SStefan Roese
614dfe64e2cSSergey Lapin if (!mtd_can_have_bb(concat->subdev[0]))
6150a572655SStefan Roese return res;
6160a572655SStefan Roese
6170a572655SStefan Roese for (i = 0; i < concat->num_subdev; i++) {
6180a572655SStefan Roese struct mtd_info *subdev = concat->subdev[i];
6190a572655SStefan Roese
6200a572655SStefan Roese if (ofs >= subdev->size) {
6210a572655SStefan Roese ofs -= subdev->size;
6220a572655SStefan Roese continue;
6230a572655SStefan Roese }
6240a572655SStefan Roese
625dfe64e2cSSergey Lapin res = mtd_block_isbad(subdev, ofs);
6260a572655SStefan Roese break;
6270a572655SStefan Roese }
6280a572655SStefan Roese
6290a572655SStefan Roese return res;
6300a572655SStefan Roese }
6310a572655SStefan Roese
concat_block_markbad(struct mtd_info * mtd,loff_t ofs)6320a572655SStefan Roese static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
6330a572655SStefan Roese {
6340a572655SStefan Roese struct mtd_concat *concat = CONCAT(mtd);
6350a572655SStefan Roese int i, err = -EINVAL;
6360a572655SStefan Roese
6370a572655SStefan Roese for (i = 0; i < concat->num_subdev; i++) {
6380a572655SStefan Roese struct mtd_info *subdev = concat->subdev[i];
6390a572655SStefan Roese
6400a572655SStefan Roese if (ofs >= subdev->size) {
6410a572655SStefan Roese ofs -= subdev->size;
6420a572655SStefan Roese continue;
6430a572655SStefan Roese }
6440a572655SStefan Roese
645dfe64e2cSSergey Lapin err = mtd_block_markbad(subdev, ofs);
6460a572655SStefan Roese if (!err)
6470a572655SStefan Roese mtd->ecc_stats.badblocks++;
6480a572655SStefan Roese break;
6490a572655SStefan Roese }
6500a572655SStefan Roese
6510a572655SStefan Roese return err;
6520a572655SStefan Roese }
6530a572655SStefan Roese
6540a572655SStefan Roese /*
655*ff94bc40SHeiko Schocher * try to support NOMMU mmaps on concatenated devices
656*ff94bc40SHeiko Schocher * - we don't support subdev spanning as we can't guarantee it'll work
657*ff94bc40SHeiko Schocher */
concat_get_unmapped_area(struct mtd_info * mtd,unsigned long len,unsigned long offset,unsigned long flags)658*ff94bc40SHeiko Schocher static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
659*ff94bc40SHeiko Schocher unsigned long len,
660*ff94bc40SHeiko Schocher unsigned long offset,
661*ff94bc40SHeiko Schocher unsigned long flags)
662*ff94bc40SHeiko Schocher {
663*ff94bc40SHeiko Schocher struct mtd_concat *concat = CONCAT(mtd);
664*ff94bc40SHeiko Schocher int i;
665*ff94bc40SHeiko Schocher
666*ff94bc40SHeiko Schocher for (i = 0; i < concat->num_subdev; i++) {
667*ff94bc40SHeiko Schocher struct mtd_info *subdev = concat->subdev[i];
668*ff94bc40SHeiko Schocher
669*ff94bc40SHeiko Schocher if (offset >= subdev->size) {
670*ff94bc40SHeiko Schocher offset -= subdev->size;
671*ff94bc40SHeiko Schocher continue;
672*ff94bc40SHeiko Schocher }
673*ff94bc40SHeiko Schocher
674*ff94bc40SHeiko Schocher return mtd_get_unmapped_area(subdev, len, offset, flags);
675*ff94bc40SHeiko Schocher }
676*ff94bc40SHeiko Schocher
677*ff94bc40SHeiko Schocher return (unsigned long) -ENOSYS;
678*ff94bc40SHeiko Schocher }
679*ff94bc40SHeiko Schocher
680*ff94bc40SHeiko Schocher /*
6810a572655SStefan Roese * This function constructs a virtual MTD device by concatenating
6820a572655SStefan Roese * num_devs MTD devices. A pointer to the new device object is
6830a572655SStefan Roese * stored to *new_dev upon success. This function does _not_
6840a572655SStefan Roese * register any devices: this is the caller's responsibility.
6850a572655SStefan Roese */
mtd_concat_create(struct mtd_info * subdev[],int num_devs,const char * name)6860a572655SStefan Roese struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
6870a572655SStefan Roese int num_devs, /* number of subdevices */
688*ff94bc40SHeiko Schocher #ifndef __UBOOT__
6890a572655SStefan Roese const char *name)
690*ff94bc40SHeiko Schocher #else
691*ff94bc40SHeiko Schocher char *name)
692*ff94bc40SHeiko Schocher #endif
6930a572655SStefan Roese { /* name for the new device */
6940a572655SStefan Roese int i;
6950a572655SStefan Roese size_t size;
6960a572655SStefan Roese struct mtd_concat *concat;
6970a572655SStefan Roese uint32_t max_erasesize, curr_erasesize;
6980a572655SStefan Roese int num_erase_region;
699*ff94bc40SHeiko Schocher int max_writebufsize = 0;
7000a572655SStefan Roese
7010a572655SStefan Roese debug("Concatenating MTD devices:\n");
7020a572655SStefan Roese for (i = 0; i < num_devs; i++)
703*ff94bc40SHeiko Schocher printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
7040a572655SStefan Roese debug("into device \"%s\"\n", name);
7050a572655SStefan Roese
7060a572655SStefan Roese /* allocate the device structure */
7070a572655SStefan Roese size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
7080a572655SStefan Roese concat = kzalloc(size, GFP_KERNEL);
7090a572655SStefan Roese if (!concat) {
7100a572655SStefan Roese printk
7110a572655SStefan Roese ("memory allocation error while creating concatenated device \"%s\"\n",
7120a572655SStefan Roese name);
7130a572655SStefan Roese return NULL;
7140a572655SStefan Roese }
7150a572655SStefan Roese concat->subdev = (struct mtd_info **) (concat + 1);
7160a572655SStefan Roese
7170a572655SStefan Roese /*
7180a572655SStefan Roese * Set up the new "super" device's MTD object structure, check for
719*ff94bc40SHeiko Schocher * incompatibilities between the subdevices.
7200a572655SStefan Roese */
7210a572655SStefan Roese concat->mtd.type = subdev[0]->type;
7220a572655SStefan Roese concat->mtd.flags = subdev[0]->flags;
7230a572655SStefan Roese concat->mtd.size = subdev[0]->size;
7240a572655SStefan Roese concat->mtd.erasesize = subdev[0]->erasesize;
7250a572655SStefan Roese concat->mtd.writesize = subdev[0]->writesize;
726*ff94bc40SHeiko Schocher
727*ff94bc40SHeiko Schocher for (i = 0; i < num_devs; i++)
728*ff94bc40SHeiko Schocher if (max_writebufsize < subdev[i]->writebufsize)
729*ff94bc40SHeiko Schocher max_writebufsize = subdev[i]->writebufsize;
730*ff94bc40SHeiko Schocher concat->mtd.writebufsize = max_writebufsize;
731*ff94bc40SHeiko Schocher
7320a572655SStefan Roese concat->mtd.subpage_sft = subdev[0]->subpage_sft;
7330a572655SStefan Roese concat->mtd.oobsize = subdev[0]->oobsize;
7340a572655SStefan Roese concat->mtd.oobavail = subdev[0]->oobavail;
735*ff94bc40SHeiko Schocher #ifndef __UBOOT__
736*ff94bc40SHeiko Schocher if (subdev[0]->_writev)
737*ff94bc40SHeiko Schocher concat->mtd._writev = concat_writev;
738*ff94bc40SHeiko Schocher #endif
739dfe64e2cSSergey Lapin if (subdev[0]->_read_oob)
740dfe64e2cSSergey Lapin concat->mtd._read_oob = concat_read_oob;
741dfe64e2cSSergey Lapin if (subdev[0]->_write_oob)
742dfe64e2cSSergey Lapin concat->mtd._write_oob = concat_write_oob;
743dfe64e2cSSergey Lapin if (subdev[0]->_block_isbad)
744dfe64e2cSSergey Lapin concat->mtd._block_isbad = concat_block_isbad;
745dfe64e2cSSergey Lapin if (subdev[0]->_block_markbad)
746dfe64e2cSSergey Lapin concat->mtd._block_markbad = concat_block_markbad;
7470a572655SStefan Roese
7480a572655SStefan Roese concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
7490a572655SStefan Roese
750*ff94bc40SHeiko Schocher #ifndef __UBOOT__
751*ff94bc40SHeiko Schocher concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
752*ff94bc40SHeiko Schocher #endif
753*ff94bc40SHeiko Schocher
7540a572655SStefan Roese concat->subdev[0] = subdev[0];
7550a572655SStefan Roese
7560a572655SStefan Roese for (i = 1; i < num_devs; i++) {
7570a572655SStefan Roese if (concat->mtd.type != subdev[i]->type) {
7580a572655SStefan Roese kfree(concat);
7590a572655SStefan Roese printk("Incompatible device type on \"%s\"\n",
7600a572655SStefan Roese subdev[i]->name);
7610a572655SStefan Roese return NULL;
7620a572655SStefan Roese }
7630a572655SStefan Roese if (concat->mtd.flags != subdev[i]->flags) {
7640a572655SStefan Roese /*
7650a572655SStefan Roese * Expect all flags except MTD_WRITEABLE to be
7660a572655SStefan Roese * equal on all subdevices.
7670a572655SStefan Roese */
7680a572655SStefan Roese if ((concat->mtd.flags ^ subdev[i]->
7690a572655SStefan Roese flags) & ~MTD_WRITEABLE) {
7700a572655SStefan Roese kfree(concat);
7710a572655SStefan Roese printk("Incompatible device flags on \"%s\"\n",
7720a572655SStefan Roese subdev[i]->name);
7730a572655SStefan Roese return NULL;
7740a572655SStefan Roese } else
7750a572655SStefan Roese /* if writeable attribute differs,
7760a572655SStefan Roese make super device writeable */
7770a572655SStefan Roese concat->mtd.flags |=
7780a572655SStefan Roese subdev[i]->flags & MTD_WRITEABLE;
7790a572655SStefan Roese }
7800a572655SStefan Roese
781*ff94bc40SHeiko Schocher #ifndef __UBOOT__
782*ff94bc40SHeiko Schocher /* only permit direct mapping if the BDIs are all the same
783*ff94bc40SHeiko Schocher * - copy-mapping is still permitted
784*ff94bc40SHeiko Schocher */
785*ff94bc40SHeiko Schocher if (concat->mtd.backing_dev_info !=
786*ff94bc40SHeiko Schocher subdev[i]->backing_dev_info)
787*ff94bc40SHeiko Schocher concat->mtd.backing_dev_info =
788*ff94bc40SHeiko Schocher &default_backing_dev_info;
789*ff94bc40SHeiko Schocher #endif
790*ff94bc40SHeiko Schocher
7910a572655SStefan Roese concat->mtd.size += subdev[i]->size;
7920a572655SStefan Roese concat->mtd.ecc_stats.badblocks +=
7930a572655SStefan Roese subdev[i]->ecc_stats.badblocks;
7940a572655SStefan Roese if (concat->mtd.writesize != subdev[i]->writesize ||
7950a572655SStefan Roese concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
7960a572655SStefan Roese concat->mtd.oobsize != subdev[i]->oobsize ||
797dfe64e2cSSergey Lapin !concat->mtd._read_oob != !subdev[i]->_read_oob ||
798dfe64e2cSSergey Lapin !concat->mtd._write_oob != !subdev[i]->_write_oob) {
7990a572655SStefan Roese kfree(concat);
8000a572655SStefan Roese printk("Incompatible OOB or ECC data on \"%s\"\n",
8010a572655SStefan Roese subdev[i]->name);
8020a572655SStefan Roese return NULL;
8030a572655SStefan Roese }
8040a572655SStefan Roese concat->subdev[i] = subdev[i];
8050a572655SStefan Roese
8060a572655SStefan Roese }
8070a572655SStefan Roese
8080a572655SStefan Roese concat->mtd.ecclayout = subdev[0]->ecclayout;
8090a572655SStefan Roese
8100a572655SStefan Roese concat->num_subdev = num_devs;
8110a572655SStefan Roese concat->mtd.name = name;
8120a572655SStefan Roese
813dfe64e2cSSergey Lapin concat->mtd._erase = concat_erase;
814dfe64e2cSSergey Lapin concat->mtd._read = concat_read;
815dfe64e2cSSergey Lapin concat->mtd._write = concat_write;
816dfe64e2cSSergey Lapin concat->mtd._sync = concat_sync;
817dfe64e2cSSergey Lapin concat->mtd._lock = concat_lock;
818dfe64e2cSSergey Lapin concat->mtd._unlock = concat_unlock;
819*ff94bc40SHeiko Schocher #ifndef __UBOOT__
820*ff94bc40SHeiko Schocher concat->mtd._suspend = concat_suspend;
821*ff94bc40SHeiko Schocher concat->mtd._resume = concat_resume;
822*ff94bc40SHeiko Schocher #endif
823*ff94bc40SHeiko Schocher concat->mtd._get_unmapped_area = concat_get_unmapped_area;
8240a572655SStefan Roese
8250a572655SStefan Roese /*
8260a572655SStefan Roese * Combine the erase block size info of the subdevices:
8270a572655SStefan Roese *
8280a572655SStefan Roese * first, walk the map of the new device and see how
8290a572655SStefan Roese * many changes in erase size we have
8300a572655SStefan Roese */
8310a572655SStefan Roese max_erasesize = curr_erasesize = subdev[0]->erasesize;
8320a572655SStefan Roese num_erase_region = 1;
8330a572655SStefan Roese for (i = 0; i < num_devs; i++) {
8340a572655SStefan Roese if (subdev[i]->numeraseregions == 0) {
8350a572655SStefan Roese /* current subdevice has uniform erase size */
8360a572655SStefan Roese if (subdev[i]->erasesize != curr_erasesize) {
8370a572655SStefan Roese /* if it differs from the last subdevice's erase size, count it */
8380a572655SStefan Roese ++num_erase_region;
8390a572655SStefan Roese curr_erasesize = subdev[i]->erasesize;
8400a572655SStefan Roese if (curr_erasesize > max_erasesize)
8410a572655SStefan Roese max_erasesize = curr_erasesize;
8420a572655SStefan Roese }
8430a572655SStefan Roese } else {
8440a572655SStefan Roese /* current subdevice has variable erase size */
8450a572655SStefan Roese int j;
8460a572655SStefan Roese for (j = 0; j < subdev[i]->numeraseregions; j++) {
8470a572655SStefan Roese
8480a572655SStefan Roese /* walk the list of erase regions, count any changes */
8490a572655SStefan Roese if (subdev[i]->eraseregions[j].erasesize !=
8500a572655SStefan Roese curr_erasesize) {
8510a572655SStefan Roese ++num_erase_region;
8520a572655SStefan Roese curr_erasesize =
8530a572655SStefan Roese subdev[i]->eraseregions[j].
8540a572655SStefan Roese erasesize;
8550a572655SStefan Roese if (curr_erasesize > max_erasesize)
8560a572655SStefan Roese max_erasesize = curr_erasesize;
8570a572655SStefan Roese }
8580a572655SStefan Roese }
8590a572655SStefan Roese }
8600a572655SStefan Roese }
8610a572655SStefan Roese
8620a572655SStefan Roese if (num_erase_region == 1) {
8630a572655SStefan Roese /*
8640a572655SStefan Roese * All subdevices have the same uniform erase size.
8650a572655SStefan Roese * This is easy:
8660a572655SStefan Roese */
8670a572655SStefan Roese concat->mtd.erasesize = curr_erasesize;
8680a572655SStefan Roese concat->mtd.numeraseregions = 0;
8690a572655SStefan Roese } else {
8700a572655SStefan Roese uint64_t tmp64;
8710a572655SStefan Roese
8720a572655SStefan Roese /*
8730a572655SStefan Roese * erase block size varies across the subdevices: allocate
8740a572655SStefan Roese * space to store the data describing the variable erase regions
8750a572655SStefan Roese */
8760a572655SStefan Roese struct mtd_erase_region_info *erase_region_p;
8770a572655SStefan Roese uint64_t begin, position;
8780a572655SStefan Roese
8790a572655SStefan Roese concat->mtd.erasesize = max_erasesize;
8800a572655SStefan Roese concat->mtd.numeraseregions = num_erase_region;
8810a572655SStefan Roese concat->mtd.eraseregions = erase_region_p =
8820a572655SStefan Roese kmalloc(num_erase_region *
8830a572655SStefan Roese sizeof (struct mtd_erase_region_info), GFP_KERNEL);
8840a572655SStefan Roese if (!erase_region_p) {
8850a572655SStefan Roese kfree(concat);
8860a572655SStefan Roese printk
8870a572655SStefan Roese ("memory allocation error while creating erase region list"
8880a572655SStefan Roese " for device \"%s\"\n", name);
8890a572655SStefan Roese return NULL;
8900a572655SStefan Roese }
8910a572655SStefan Roese
8920a572655SStefan Roese /*
8930a572655SStefan Roese * walk the map of the new device once more and fill in
8940a572655SStefan Roese * in erase region info:
8950a572655SStefan Roese */
8960a572655SStefan Roese curr_erasesize = subdev[0]->erasesize;
8970a572655SStefan Roese begin = position = 0;
8980a572655SStefan Roese for (i = 0; i < num_devs; i++) {
8990a572655SStefan Roese if (subdev[i]->numeraseregions == 0) {
9000a572655SStefan Roese /* current subdevice has uniform erase size */
9010a572655SStefan Roese if (subdev[i]->erasesize != curr_erasesize) {
9020a572655SStefan Roese /*
9030a572655SStefan Roese * fill in an mtd_erase_region_info structure for the area
9040a572655SStefan Roese * we have walked so far:
9050a572655SStefan Roese */
9060a572655SStefan Roese erase_region_p->offset = begin;
9070a572655SStefan Roese erase_region_p->erasesize =
9080a572655SStefan Roese curr_erasesize;
9090a572655SStefan Roese tmp64 = position - begin;
9100a572655SStefan Roese do_div(tmp64, curr_erasesize);
9110a572655SStefan Roese erase_region_p->numblocks = tmp64;
9120a572655SStefan Roese begin = position;
9130a572655SStefan Roese
9140a572655SStefan Roese curr_erasesize = subdev[i]->erasesize;
9150a572655SStefan Roese ++erase_region_p;
9160a572655SStefan Roese }
9170a572655SStefan Roese position += subdev[i]->size;
9180a572655SStefan Roese } else {
9190a572655SStefan Roese /* current subdevice has variable erase size */
9200a572655SStefan Roese int j;
9210a572655SStefan Roese for (j = 0; j < subdev[i]->numeraseregions; j++) {
9220a572655SStefan Roese /* walk the list of erase regions, count any changes */
9230a572655SStefan Roese if (subdev[i]->eraseregions[j].
9240a572655SStefan Roese erasesize != curr_erasesize) {
9250a572655SStefan Roese erase_region_p->offset = begin;
9260a572655SStefan Roese erase_region_p->erasesize =
9270a572655SStefan Roese curr_erasesize;
9280a572655SStefan Roese tmp64 = position - begin;
9290a572655SStefan Roese do_div(tmp64, curr_erasesize);
9300a572655SStefan Roese erase_region_p->numblocks = tmp64;
9310a572655SStefan Roese begin = position;
9320a572655SStefan Roese
9330a572655SStefan Roese curr_erasesize =
9340a572655SStefan Roese subdev[i]->eraseregions[j].
9350a572655SStefan Roese erasesize;
9360a572655SStefan Roese ++erase_region_p;
9370a572655SStefan Roese }
9380a572655SStefan Roese position +=
9390a572655SStefan Roese subdev[i]->eraseregions[j].
9400a572655SStefan Roese numblocks * (uint64_t)curr_erasesize;
9410a572655SStefan Roese }
9420a572655SStefan Roese }
9430a572655SStefan Roese }
9440a572655SStefan Roese /* Now write the final entry */
9450a572655SStefan Roese erase_region_p->offset = begin;
9460a572655SStefan Roese erase_region_p->erasesize = curr_erasesize;
9470a572655SStefan Roese tmp64 = position - begin;
9480a572655SStefan Roese do_div(tmp64, curr_erasesize);
9490a572655SStefan Roese erase_region_p->numblocks = tmp64;
9500a572655SStefan Roese }
9510a572655SStefan Roese
9520a572655SStefan Roese return &concat->mtd;
9530a572655SStefan Roese }
954*ff94bc40SHeiko Schocher
955*ff94bc40SHeiko Schocher /*
956*ff94bc40SHeiko Schocher * This function destroys an MTD object obtained from concat_mtd_devs()
957*ff94bc40SHeiko Schocher */
958*ff94bc40SHeiko Schocher
mtd_concat_destroy(struct mtd_info * mtd)959*ff94bc40SHeiko Schocher void mtd_concat_destroy(struct mtd_info *mtd)
960*ff94bc40SHeiko Schocher {
961*ff94bc40SHeiko Schocher struct mtd_concat *concat = CONCAT(mtd);
962*ff94bc40SHeiko Schocher if (concat->mtd.numeraseregions)
963*ff94bc40SHeiko Schocher kfree(concat->mtd.eraseregions);
964*ff94bc40SHeiko Schocher kfree(concat);
965*ff94bc40SHeiko Schocher }
966*ff94bc40SHeiko Schocher
967*ff94bc40SHeiko Schocher EXPORT_SYMBOL(mtd_concat_create);
968*ff94bc40SHeiko Schocher EXPORT_SYMBOL(mtd_concat_destroy);
969*ff94bc40SHeiko Schocher
970*ff94bc40SHeiko Schocher MODULE_LICENSE("GPL");
971*ff94bc40SHeiko Schocher MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
972*ff94bc40SHeiko Schocher MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");
973