xref: /OK3568_Linux_fs/kernel/drivers/mtd/rfd_ftl.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * rfd_ftl.c -- resident flash disk (flash translation layer)
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright © 2005  Sean Young <sean@mess.org>
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * This type of flash translation layer (FTL) is used by the Embedded BIOS
8*4882a593Smuzhiyun  * by General Software. It is known as the Resident Flash Disk (RFD), see:
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *	http://www.gensw.com/pages/prod/bios/rfd.htm
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * based on ftl.c
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/hdreg.h>
16*4882a593Smuzhiyun #include <linux/init.h>
17*4882a593Smuzhiyun #include <linux/mtd/blktrans.h>
18*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
19*4882a593Smuzhiyun #include <linux/vmalloc.h>
20*4882a593Smuzhiyun #include <linux/slab.h>
21*4882a593Smuzhiyun #include <linux/jiffies.h>
22*4882a593Smuzhiyun #include <linux/module.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <asm/types.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun static int block_size = 0;
27*4882a593Smuzhiyun module_param(block_size, int, 0);
28*4882a593Smuzhiyun MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define PREFIX "rfd_ftl: "
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /* This major has been assigned by device@lanana.org */
33*4882a593Smuzhiyun #ifndef RFD_FTL_MAJOR
34*4882a593Smuzhiyun #define RFD_FTL_MAJOR		256
35*4882a593Smuzhiyun #endif
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun /* Maximum number of partitions in an FTL region */
38*4882a593Smuzhiyun #define PART_BITS		4
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /* An erase unit should start with this value */
41*4882a593Smuzhiyun #define RFD_MAGIC		0x9193
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /* the second value is 0xffff or 0xffc8; function unknown */
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /* the third value is always 0xffff, ignored */
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun /* next is an array of mapping for each corresponding sector */
48*4882a593Smuzhiyun #define HEADER_MAP_OFFSET	3
49*4882a593Smuzhiyun #define SECTOR_DELETED		0x0000
50*4882a593Smuzhiyun #define SECTOR_ZERO		0xfffe
51*4882a593Smuzhiyun #define SECTOR_FREE		0xffff
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #define SECTOR_SIZE		512
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #define SECTORS_PER_TRACK	63
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun struct block {
58*4882a593Smuzhiyun 	enum {
59*4882a593Smuzhiyun 		BLOCK_OK,
60*4882a593Smuzhiyun 		BLOCK_ERASING,
61*4882a593Smuzhiyun 		BLOCK_ERASED,
62*4882a593Smuzhiyun 		BLOCK_UNUSED,
63*4882a593Smuzhiyun 		BLOCK_FAILED
64*4882a593Smuzhiyun 	} state;
65*4882a593Smuzhiyun 	int free_sectors;
66*4882a593Smuzhiyun 	int used_sectors;
67*4882a593Smuzhiyun 	int erases;
68*4882a593Smuzhiyun 	u_long offset;
69*4882a593Smuzhiyun };
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun struct partition {
72*4882a593Smuzhiyun 	struct mtd_blktrans_dev mbd;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	u_int block_size;		/* size of erase unit */
75*4882a593Smuzhiyun 	u_int total_blocks;		/* number of erase units */
76*4882a593Smuzhiyun 	u_int header_sectors_per_block;	/* header sectors in erase unit */
77*4882a593Smuzhiyun 	u_int data_sectors_per_block;	/* data sectors in erase unit */
78*4882a593Smuzhiyun 	u_int sector_count;		/* sectors in translated disk */
79*4882a593Smuzhiyun 	u_int header_size;		/* bytes in header sector */
80*4882a593Smuzhiyun 	int reserved_block;		/* block next up for reclaim */
81*4882a593Smuzhiyun 	int current_block;		/* block to write to */
82*4882a593Smuzhiyun 	u16 *header_cache;		/* cached header */
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	int is_reclaiming;
85*4882a593Smuzhiyun 	int cylinders;
86*4882a593Smuzhiyun 	int errors;
87*4882a593Smuzhiyun 	u_long *sector_map;
88*4882a593Smuzhiyun 	struct block *blocks;
89*4882a593Smuzhiyun };
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
92*4882a593Smuzhiyun 
build_block_map(struct partition * part,int block_no)93*4882a593Smuzhiyun static int build_block_map(struct partition *part, int block_no)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	struct block *block = &part->blocks[block_no];
96*4882a593Smuzhiyun 	int i;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	block->offset = part->block_size * block_no;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
101*4882a593Smuzhiyun 		block->state = BLOCK_UNUSED;
102*4882a593Smuzhiyun 		return -ENOENT;
103*4882a593Smuzhiyun 	}
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	block->state = BLOCK_OK;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	for (i=0; i<part->data_sectors_per_block; i++) {
108*4882a593Smuzhiyun 		u16 entry;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 		entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 		if (entry == SECTOR_DELETED)
113*4882a593Smuzhiyun 			continue;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 		if (entry == SECTOR_FREE) {
116*4882a593Smuzhiyun 			block->free_sectors++;
117*4882a593Smuzhiyun 			continue;
118*4882a593Smuzhiyun 		}
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 		if (entry == SECTOR_ZERO)
121*4882a593Smuzhiyun 			entry = 0;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 		if (entry >= part->sector_count) {
124*4882a593Smuzhiyun 			printk(KERN_WARNING PREFIX
125*4882a593Smuzhiyun 				"'%s': unit #%d: entry %d corrupt, "
126*4882a593Smuzhiyun 				"sector %d out of range\n",
127*4882a593Smuzhiyun 				part->mbd.mtd->name, block_no, i, entry);
128*4882a593Smuzhiyun 			continue;
129*4882a593Smuzhiyun 		}
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 		if (part->sector_map[entry] != -1) {
132*4882a593Smuzhiyun 			printk(KERN_WARNING PREFIX
133*4882a593Smuzhiyun 				"'%s': more than one entry for sector %d\n",
134*4882a593Smuzhiyun 				part->mbd.mtd->name, entry);
135*4882a593Smuzhiyun 			part->errors = 1;
136*4882a593Smuzhiyun 			continue;
137*4882a593Smuzhiyun 		}
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 		part->sector_map[entry] = block->offset +
140*4882a593Smuzhiyun 			(i + part->header_sectors_per_block) * SECTOR_SIZE;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 		block->used_sectors++;
143*4882a593Smuzhiyun 	}
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	if (block->free_sectors == part->data_sectors_per_block)
146*4882a593Smuzhiyun 		part->reserved_block = block_no;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	return 0;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
scan_header(struct partition * part)151*4882a593Smuzhiyun static int scan_header(struct partition *part)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	int sectors_per_block;
154*4882a593Smuzhiyun 	int i, rc = -ENOMEM;
155*4882a593Smuzhiyun 	int blocks_found;
156*4882a593Smuzhiyun 	size_t retlen;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	sectors_per_block = part->block_size / SECTOR_SIZE;
159*4882a593Smuzhiyun 	part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	if (part->total_blocks < 2)
162*4882a593Smuzhiyun 		return -ENOENT;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	/* each erase block has three bytes header, followed by the map */
165*4882a593Smuzhiyun 	part->header_sectors_per_block =
166*4882a593Smuzhiyun 			((HEADER_MAP_OFFSET + sectors_per_block) *
167*4882a593Smuzhiyun 			sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	part->data_sectors_per_block = sectors_per_block -
170*4882a593Smuzhiyun 			part->header_sectors_per_block;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	part->header_size = (HEADER_MAP_OFFSET +
173*4882a593Smuzhiyun 			part->data_sectors_per_block) * sizeof(u16);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	part->cylinders = (part->data_sectors_per_block *
176*4882a593Smuzhiyun 			(part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	part->sector_count = part->cylinders * SECTORS_PER_TRACK;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	part->current_block = -1;
181*4882a593Smuzhiyun 	part->reserved_block = -1;
182*4882a593Smuzhiyun 	part->is_reclaiming = 0;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
185*4882a593Smuzhiyun 	if (!part->header_cache)
186*4882a593Smuzhiyun 		goto err;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
189*4882a593Smuzhiyun 			GFP_KERNEL);
190*4882a593Smuzhiyun 	if (!part->blocks)
191*4882a593Smuzhiyun 		goto err;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	part->sector_map = vmalloc(array_size(sizeof(u_long),
194*4882a593Smuzhiyun 					      part->sector_count));
195*4882a593Smuzhiyun 	if (!part->sector_map) {
196*4882a593Smuzhiyun 		printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
197*4882a593Smuzhiyun 			"sector map", part->mbd.mtd->name);
198*4882a593Smuzhiyun 		goto err;
199*4882a593Smuzhiyun 	}
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	for (i=0; i<part->sector_count; i++)
202*4882a593Smuzhiyun 		part->sector_map[i] = -1;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	for (i=0, blocks_found=0; i<part->total_blocks; i++) {
205*4882a593Smuzhiyun 		rc = mtd_read(part->mbd.mtd, i * part->block_size,
206*4882a593Smuzhiyun 			      part->header_size, &retlen,
207*4882a593Smuzhiyun 			      (u_char *)part->header_cache);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 		if (!rc && retlen != part->header_size)
210*4882a593Smuzhiyun 			rc = -EIO;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 		if (rc)
213*4882a593Smuzhiyun 			goto err;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 		if (!build_block_map(part, i))
216*4882a593Smuzhiyun 			blocks_found++;
217*4882a593Smuzhiyun 	}
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	if (blocks_found == 0) {
220*4882a593Smuzhiyun 		printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
221*4882a593Smuzhiyun 				part->mbd.mtd->name);
222*4882a593Smuzhiyun 		rc = -ENOENT;
223*4882a593Smuzhiyun 		goto err;
224*4882a593Smuzhiyun 	}
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	if (part->reserved_block == -1) {
227*4882a593Smuzhiyun 		printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
228*4882a593Smuzhiyun 				part->mbd.mtd->name);
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 		part->errors = 1;
231*4882a593Smuzhiyun 	}
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	return 0;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun err:
236*4882a593Smuzhiyun 	vfree(part->sector_map);
237*4882a593Smuzhiyun 	kfree(part->header_cache);
238*4882a593Smuzhiyun 	kfree(part->blocks);
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	return rc;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun 
rfd_ftl_readsect(struct mtd_blktrans_dev * dev,u_long sector,char * buf)243*4882a593Smuzhiyun static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun 	struct partition *part = (struct partition*)dev;
246*4882a593Smuzhiyun 	u_long addr;
247*4882a593Smuzhiyun 	size_t retlen;
248*4882a593Smuzhiyun 	int rc;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	if (sector >= part->sector_count)
251*4882a593Smuzhiyun 		return -EIO;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	addr = part->sector_map[sector];
254*4882a593Smuzhiyun 	if (addr != -1) {
255*4882a593Smuzhiyun 		rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
256*4882a593Smuzhiyun 			      (u_char *)buf);
257*4882a593Smuzhiyun 		if (!rc && retlen != SECTOR_SIZE)
258*4882a593Smuzhiyun 			rc = -EIO;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 		if (rc) {
261*4882a593Smuzhiyun 			printk(KERN_WARNING PREFIX "error reading '%s' at "
262*4882a593Smuzhiyun 				"0x%lx\n", part->mbd.mtd->name, addr);
263*4882a593Smuzhiyun 			return rc;
264*4882a593Smuzhiyun 		}
265*4882a593Smuzhiyun 	} else
266*4882a593Smuzhiyun 		memset(buf, 0, SECTOR_SIZE);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	return 0;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
erase_block(struct partition * part,int block)271*4882a593Smuzhiyun static int erase_block(struct partition *part, int block)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	struct erase_info *erase;
274*4882a593Smuzhiyun 	int rc;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
277*4882a593Smuzhiyun 	if (!erase)
278*4882a593Smuzhiyun 		return -ENOMEM;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	erase->addr = part->blocks[block].offset;
281*4882a593Smuzhiyun 	erase->len = part->block_size;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	part->blocks[block].state = BLOCK_ERASING;
284*4882a593Smuzhiyun 	part->blocks[block].free_sectors = 0;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	rc = mtd_erase(part->mbd.mtd, erase);
287*4882a593Smuzhiyun 	if (rc) {
288*4882a593Smuzhiyun 		printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
289*4882a593Smuzhiyun 				"failed\n", (unsigned long long)erase->addr,
290*4882a593Smuzhiyun 				(unsigned long long)erase->len, part->mbd.mtd->name);
291*4882a593Smuzhiyun 		part->blocks[block].state = BLOCK_FAILED;
292*4882a593Smuzhiyun 		part->blocks[block].free_sectors = 0;
293*4882a593Smuzhiyun 		part->blocks[block].used_sectors = 0;
294*4882a593Smuzhiyun 	} else {
295*4882a593Smuzhiyun 		u16 magic = cpu_to_le16(RFD_MAGIC);
296*4882a593Smuzhiyun 		size_t retlen;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 		part->blocks[block].state = BLOCK_ERASED;
299*4882a593Smuzhiyun 		part->blocks[block].free_sectors = part->data_sectors_per_block;
300*4882a593Smuzhiyun 		part->blocks[block].used_sectors = 0;
301*4882a593Smuzhiyun 		part->blocks[block].erases++;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 		rc = mtd_write(part->mbd.mtd, part->blocks[block].offset,
304*4882a593Smuzhiyun 			       sizeof(magic), &retlen, (u_char *)&magic);
305*4882a593Smuzhiyun 		if (!rc && retlen != sizeof(magic))
306*4882a593Smuzhiyun 			rc = -EIO;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 		if (rc) {
309*4882a593Smuzhiyun 			pr_err(PREFIX "'%s': unable to write RFD header at 0x%lx\n",
310*4882a593Smuzhiyun 			       part->mbd.mtd->name, part->blocks[block].offset);
311*4882a593Smuzhiyun 			part->blocks[block].state = BLOCK_FAILED;
312*4882a593Smuzhiyun 		} else {
313*4882a593Smuzhiyun 			part->blocks[block].state = BLOCK_OK;
314*4882a593Smuzhiyun 		}
315*4882a593Smuzhiyun 	}
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	kfree(erase);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	return rc;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun 
move_block_contents(struct partition * part,int block_no,u_long * old_sector)322*4882a593Smuzhiyun static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	void *sector_data;
325*4882a593Smuzhiyun 	u16 *map;
326*4882a593Smuzhiyun 	size_t retlen;
327*4882a593Smuzhiyun 	int i, rc = -ENOMEM;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	part->is_reclaiming = 1;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
332*4882a593Smuzhiyun 	if (!sector_data)
333*4882a593Smuzhiyun 		goto err3;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	map = kmalloc(part->header_size, GFP_KERNEL);
336*4882a593Smuzhiyun 	if (!map)
337*4882a593Smuzhiyun 		goto err2;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset,
340*4882a593Smuzhiyun 		      part->header_size, &retlen, (u_char *)map);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	if (!rc && retlen != part->header_size)
343*4882a593Smuzhiyun 		rc = -EIO;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	if (rc) {
346*4882a593Smuzhiyun 		printk(KERN_ERR PREFIX "error reading '%s' at "
347*4882a593Smuzhiyun 			"0x%lx\n", part->mbd.mtd->name,
348*4882a593Smuzhiyun 			part->blocks[block_no].offset);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 		goto err;
351*4882a593Smuzhiyun 	}
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	for (i=0; i<part->data_sectors_per_block; i++) {
354*4882a593Smuzhiyun 		u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
355*4882a593Smuzhiyun 		u_long addr;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 		if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
359*4882a593Smuzhiyun 			continue;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 		if (entry == SECTOR_ZERO)
362*4882a593Smuzhiyun 			entry = 0;
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 		/* already warned about and ignored in build_block_map() */
365*4882a593Smuzhiyun 		if (entry >= part->sector_count)
366*4882a593Smuzhiyun 			continue;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 		addr = part->blocks[block_no].offset +
369*4882a593Smuzhiyun 			(i + part->header_sectors_per_block) * SECTOR_SIZE;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 		if (*old_sector == addr) {
372*4882a593Smuzhiyun 			*old_sector = -1;
373*4882a593Smuzhiyun 			if (!part->blocks[block_no].used_sectors--) {
374*4882a593Smuzhiyun 				rc = erase_block(part, block_no);
375*4882a593Smuzhiyun 				break;
376*4882a593Smuzhiyun 			}
377*4882a593Smuzhiyun 			continue;
378*4882a593Smuzhiyun 		}
379*4882a593Smuzhiyun 		rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
380*4882a593Smuzhiyun 			      sector_data);
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 		if (!rc && retlen != SECTOR_SIZE)
383*4882a593Smuzhiyun 			rc = -EIO;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 		if (rc) {
386*4882a593Smuzhiyun 			printk(KERN_ERR PREFIX "'%s': Unable to "
387*4882a593Smuzhiyun 				"read sector for relocation\n",
388*4882a593Smuzhiyun 				part->mbd.mtd->name);
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 			goto err;
391*4882a593Smuzhiyun 		}
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 		rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
394*4882a593Smuzhiyun 				entry, sector_data);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 		if (rc)
397*4882a593Smuzhiyun 			goto err;
398*4882a593Smuzhiyun 	}
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun err:
401*4882a593Smuzhiyun 	kfree(map);
402*4882a593Smuzhiyun err2:
403*4882a593Smuzhiyun 	kfree(sector_data);
404*4882a593Smuzhiyun err3:
405*4882a593Smuzhiyun 	part->is_reclaiming = 0;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	return rc;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun 
reclaim_block(struct partition * part,u_long * old_sector)410*4882a593Smuzhiyun static int reclaim_block(struct partition *part, u_long *old_sector)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun 	int block, best_block, score, old_sector_block;
413*4882a593Smuzhiyun 	int rc;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	/* we have a race if sync doesn't exist */
416*4882a593Smuzhiyun 	mtd_sync(part->mbd.mtd);
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	score = 0x7fffffff; /* MAX_INT */
419*4882a593Smuzhiyun 	best_block = -1;
420*4882a593Smuzhiyun 	if (*old_sector != -1)
421*4882a593Smuzhiyun 		old_sector_block = *old_sector / part->block_size;
422*4882a593Smuzhiyun 	else
423*4882a593Smuzhiyun 		old_sector_block = -1;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	for (block=0; block<part->total_blocks; block++) {
426*4882a593Smuzhiyun 		int this_score;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 		if (block == part->reserved_block)
429*4882a593Smuzhiyun 			continue;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 		/*
432*4882a593Smuzhiyun 		 * Postpone reclaiming if there is a free sector as
433*4882a593Smuzhiyun 		 * more removed sectors is more efficient (have to move
434*4882a593Smuzhiyun 		 * less).
435*4882a593Smuzhiyun 		 */
436*4882a593Smuzhiyun 		if (part->blocks[block].free_sectors)
437*4882a593Smuzhiyun 			return 0;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 		this_score = part->blocks[block].used_sectors;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 		if (block == old_sector_block)
442*4882a593Smuzhiyun 			this_score--;
443*4882a593Smuzhiyun 		else {
444*4882a593Smuzhiyun 			/* no point in moving a full block */
445*4882a593Smuzhiyun 			if (part->blocks[block].used_sectors ==
446*4882a593Smuzhiyun 					part->data_sectors_per_block)
447*4882a593Smuzhiyun 				continue;
448*4882a593Smuzhiyun 		}
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 		this_score += part->blocks[block].erases;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 		if (this_score < score) {
453*4882a593Smuzhiyun 			best_block = block;
454*4882a593Smuzhiyun 			score = this_score;
455*4882a593Smuzhiyun 		}
456*4882a593Smuzhiyun 	}
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	if (best_block == -1)
459*4882a593Smuzhiyun 		return -ENOSPC;
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	part->current_block = -1;
462*4882a593Smuzhiyun 	part->reserved_block = best_block;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	pr_debug("reclaim_block: reclaiming block #%d with %d used "
465*4882a593Smuzhiyun 		 "%d free sectors\n", best_block,
466*4882a593Smuzhiyun 		 part->blocks[best_block].used_sectors,
467*4882a593Smuzhiyun 		 part->blocks[best_block].free_sectors);
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	if (part->blocks[best_block].used_sectors)
470*4882a593Smuzhiyun 		rc = move_block_contents(part, best_block, old_sector);
471*4882a593Smuzhiyun 	else
472*4882a593Smuzhiyun 		rc = erase_block(part, best_block);
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	return rc;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun /*
478*4882a593Smuzhiyun  * IMPROVE: It would be best to choose the block with the most deleted sectors,
479*4882a593Smuzhiyun  * because if we fill that one up first it'll have the most chance of having
480*4882a593Smuzhiyun  * the least live sectors at reclaim.
481*4882a593Smuzhiyun  */
find_free_block(struct partition * part)482*4882a593Smuzhiyun static int find_free_block(struct partition *part)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun 	int block, stop;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	block = part->current_block == -1 ?
487*4882a593Smuzhiyun 			jiffies % part->total_blocks : part->current_block;
488*4882a593Smuzhiyun 	stop = block;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	do {
491*4882a593Smuzhiyun 		if (part->blocks[block].free_sectors &&
492*4882a593Smuzhiyun 				block != part->reserved_block)
493*4882a593Smuzhiyun 			return block;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 		if (part->blocks[block].state == BLOCK_UNUSED)
496*4882a593Smuzhiyun 			erase_block(part, block);
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 		if (++block >= part->total_blocks)
499*4882a593Smuzhiyun 			block = 0;
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	} while (block != stop);
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	return -1;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun 
find_writable_block(struct partition * part,u_long * old_sector)506*4882a593Smuzhiyun static int find_writable_block(struct partition *part, u_long *old_sector)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun 	int rc, block;
509*4882a593Smuzhiyun 	size_t retlen;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	block = find_free_block(part);
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	if (block == -1) {
514*4882a593Smuzhiyun 		if (!part->is_reclaiming) {
515*4882a593Smuzhiyun 			rc = reclaim_block(part, old_sector);
516*4882a593Smuzhiyun 			if (rc)
517*4882a593Smuzhiyun 				goto err;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 			block = find_free_block(part);
520*4882a593Smuzhiyun 		}
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 		if (block == -1) {
523*4882a593Smuzhiyun 			rc = -ENOSPC;
524*4882a593Smuzhiyun 			goto err;
525*4882a593Smuzhiyun 		}
526*4882a593Smuzhiyun 	}
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	rc = mtd_read(part->mbd.mtd, part->blocks[block].offset,
529*4882a593Smuzhiyun 		      part->header_size, &retlen,
530*4882a593Smuzhiyun 		      (u_char *)part->header_cache);
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	if (!rc && retlen != part->header_size)
533*4882a593Smuzhiyun 		rc = -EIO;
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	if (rc) {
536*4882a593Smuzhiyun 		printk(KERN_ERR PREFIX "'%s': unable to read header at "
537*4882a593Smuzhiyun 				"0x%lx\n", part->mbd.mtd->name,
538*4882a593Smuzhiyun 				part->blocks[block].offset);
539*4882a593Smuzhiyun 		goto err;
540*4882a593Smuzhiyun 	}
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	part->current_block = block;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun err:
545*4882a593Smuzhiyun 	return rc;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun 
mark_sector_deleted(struct partition * part,u_long old_addr)548*4882a593Smuzhiyun static int mark_sector_deleted(struct partition *part, u_long old_addr)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun 	int block, offset, rc;
551*4882a593Smuzhiyun 	u_long addr;
552*4882a593Smuzhiyun 	size_t retlen;
553*4882a593Smuzhiyun 	u16 del = cpu_to_le16(SECTOR_DELETED);
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	block = old_addr / part->block_size;
556*4882a593Smuzhiyun 	offset = (old_addr % part->block_size) / SECTOR_SIZE -
557*4882a593Smuzhiyun 		part->header_sectors_per_block;
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	addr = part->blocks[block].offset +
560*4882a593Smuzhiyun 			(HEADER_MAP_OFFSET + offset) * sizeof(u16);
561*4882a593Smuzhiyun 	rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen,
562*4882a593Smuzhiyun 		       (u_char *)&del);
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	if (!rc && retlen != sizeof(del))
565*4882a593Smuzhiyun 		rc = -EIO;
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	if (rc) {
568*4882a593Smuzhiyun 		printk(KERN_ERR PREFIX "error writing '%s' at "
569*4882a593Smuzhiyun 			"0x%lx\n", part->mbd.mtd->name, addr);
570*4882a593Smuzhiyun 		goto err;
571*4882a593Smuzhiyun 	}
572*4882a593Smuzhiyun 	if (block == part->current_block)
573*4882a593Smuzhiyun 		part->header_cache[offset + HEADER_MAP_OFFSET] = del;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	part->blocks[block].used_sectors--;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	if (!part->blocks[block].used_sectors &&
578*4882a593Smuzhiyun 	    !part->blocks[block].free_sectors)
579*4882a593Smuzhiyun 		rc = erase_block(part, block);
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun err:
582*4882a593Smuzhiyun 	return rc;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun 
find_free_sector(const struct partition * part,const struct block * block)585*4882a593Smuzhiyun static int find_free_sector(const struct partition *part, const struct block *block)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	int i, stop;
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	i = stop = part->data_sectors_per_block - block->free_sectors;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	do {
592*4882a593Smuzhiyun 		if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
593*4882a593Smuzhiyun 				== SECTOR_FREE)
594*4882a593Smuzhiyun 			return i;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 		if (++i == part->data_sectors_per_block)
597*4882a593Smuzhiyun 			i = 0;
598*4882a593Smuzhiyun 	}
599*4882a593Smuzhiyun 	while(i != stop);
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	return -1;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun 
do_writesect(struct mtd_blktrans_dev * dev,u_long sector,char * buf,ulong * old_addr)604*4882a593Smuzhiyun static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun 	struct partition *part = (struct partition*)dev;
607*4882a593Smuzhiyun 	struct block *block;
608*4882a593Smuzhiyun 	u_long addr;
609*4882a593Smuzhiyun 	int i;
610*4882a593Smuzhiyun 	int rc;
611*4882a593Smuzhiyun 	size_t retlen;
612*4882a593Smuzhiyun 	u16 entry;
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	if (part->current_block == -1 ||
615*4882a593Smuzhiyun 		!part->blocks[part->current_block].free_sectors) {
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 		rc = find_writable_block(part, old_addr);
618*4882a593Smuzhiyun 		if (rc)
619*4882a593Smuzhiyun 			goto err;
620*4882a593Smuzhiyun 	}
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	block = &part->blocks[part->current_block];
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	i = find_free_sector(part, block);
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	if (i < 0) {
627*4882a593Smuzhiyun 		rc = -ENOSPC;
628*4882a593Smuzhiyun 		goto err;
629*4882a593Smuzhiyun 	}
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
632*4882a593Smuzhiyun 		block->offset;
633*4882a593Smuzhiyun 	rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
634*4882a593Smuzhiyun 		       (u_char *)buf);
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	if (!rc && retlen != SECTOR_SIZE)
637*4882a593Smuzhiyun 		rc = -EIO;
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	if (rc) {
640*4882a593Smuzhiyun 		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
641*4882a593Smuzhiyun 				part->mbd.mtd->name, addr);
642*4882a593Smuzhiyun 		goto err;
643*4882a593Smuzhiyun 	}
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	part->sector_map[sector] = addr;
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	part->header_cache[i + HEADER_MAP_OFFSET] = entry;
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
652*4882a593Smuzhiyun 	rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen,
653*4882a593Smuzhiyun 		       (u_char *)&entry);
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	if (!rc && retlen != sizeof(entry))
656*4882a593Smuzhiyun 		rc = -EIO;
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	if (rc) {
659*4882a593Smuzhiyun 		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
660*4882a593Smuzhiyun 				part->mbd.mtd->name, addr);
661*4882a593Smuzhiyun 		goto err;
662*4882a593Smuzhiyun 	}
663*4882a593Smuzhiyun 	block->used_sectors++;
664*4882a593Smuzhiyun 	block->free_sectors--;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun err:
667*4882a593Smuzhiyun 	return rc;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun 
rfd_ftl_writesect(struct mtd_blktrans_dev * dev,u_long sector,char * buf)670*4882a593Smuzhiyun static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun 	struct partition *part = (struct partition*)dev;
673*4882a593Smuzhiyun 	u_long old_addr;
674*4882a593Smuzhiyun 	int i;
675*4882a593Smuzhiyun 	int rc = 0;
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	if (part->reserved_block == -1) {
680*4882a593Smuzhiyun 		rc = -EACCES;
681*4882a593Smuzhiyun 		goto err;
682*4882a593Smuzhiyun 	}
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	if (sector >= part->sector_count) {
685*4882a593Smuzhiyun 		rc = -EIO;
686*4882a593Smuzhiyun 		goto err;
687*4882a593Smuzhiyun 	}
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	old_addr = part->sector_map[sector];
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	for (i=0; i<SECTOR_SIZE; i++) {
692*4882a593Smuzhiyun 		if (!buf[i])
693*4882a593Smuzhiyun 			continue;
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 		rc = do_writesect(dev, sector, buf, &old_addr);
696*4882a593Smuzhiyun 		if (rc)
697*4882a593Smuzhiyun 			goto err;
698*4882a593Smuzhiyun 		break;
699*4882a593Smuzhiyun 	}
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	if (i == SECTOR_SIZE)
702*4882a593Smuzhiyun 		part->sector_map[sector] = -1;
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	if (old_addr != -1)
705*4882a593Smuzhiyun 		rc = mark_sector_deleted(part, old_addr);
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun err:
708*4882a593Smuzhiyun 	return rc;
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun 
rfd_ftl_getgeo(struct mtd_blktrans_dev * dev,struct hd_geometry * geo)711*4882a593Smuzhiyun static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
712*4882a593Smuzhiyun {
713*4882a593Smuzhiyun 	struct partition *part = (struct partition*)dev;
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	geo->heads = 1;
716*4882a593Smuzhiyun 	geo->sectors = SECTORS_PER_TRACK;
717*4882a593Smuzhiyun 	geo->cylinders = part->cylinders;
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	return 0;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun 
rfd_ftl_add_mtd(struct mtd_blktrans_ops * tr,struct mtd_info * mtd)722*4882a593Smuzhiyun static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun 	struct partition *part;
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX)
727*4882a593Smuzhiyun 		return;
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	part = kzalloc(sizeof(struct partition), GFP_KERNEL);
730*4882a593Smuzhiyun 	if (!part)
731*4882a593Smuzhiyun 		return;
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	part->mbd.mtd = mtd;
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	if (block_size)
736*4882a593Smuzhiyun 		part->block_size = block_size;
737*4882a593Smuzhiyun 	else {
738*4882a593Smuzhiyun 		if (!mtd->erasesize) {
739*4882a593Smuzhiyun 			printk(KERN_WARNING PREFIX "please provide block_size");
740*4882a593Smuzhiyun 			goto out;
741*4882a593Smuzhiyun 		} else
742*4882a593Smuzhiyun 			part->block_size = mtd->erasesize;
743*4882a593Smuzhiyun 	}
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	if (scan_header(part) == 0) {
746*4882a593Smuzhiyun 		part->mbd.size = part->sector_count;
747*4882a593Smuzhiyun 		part->mbd.tr = tr;
748*4882a593Smuzhiyun 		part->mbd.devnum = -1;
749*4882a593Smuzhiyun 		if (!(mtd->flags & MTD_WRITEABLE))
750*4882a593Smuzhiyun 			part->mbd.readonly = 1;
751*4882a593Smuzhiyun 		else if (part->errors) {
752*4882a593Smuzhiyun 			printk(KERN_WARNING PREFIX "'%s': errors found, "
753*4882a593Smuzhiyun 					"setting read-only\n", mtd->name);
754*4882a593Smuzhiyun 			part->mbd.readonly = 1;
755*4882a593Smuzhiyun 		}
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 		printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
758*4882a593Smuzhiyun 				mtd->name, mtd->type, mtd->flags);
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 		if (!add_mtd_blktrans_dev((void*)part))
761*4882a593Smuzhiyun 			return;
762*4882a593Smuzhiyun 	}
763*4882a593Smuzhiyun out:
764*4882a593Smuzhiyun 	kfree(part);
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun 
rfd_ftl_remove_dev(struct mtd_blktrans_dev * dev)767*4882a593Smuzhiyun static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun 	struct partition *part = (struct partition*)dev;
770*4882a593Smuzhiyun 	int i;
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	for (i=0; i<part->total_blocks; i++) {
773*4882a593Smuzhiyun 		pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
774*4882a593Smuzhiyun 			part->mbd.mtd->name, i, part->blocks[i].erases);
775*4882a593Smuzhiyun 	}
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	del_mtd_blktrans_dev(dev);
778*4882a593Smuzhiyun 	vfree(part->sector_map);
779*4882a593Smuzhiyun 	kfree(part->header_cache);
780*4882a593Smuzhiyun 	kfree(part->blocks);
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun static struct mtd_blktrans_ops rfd_ftl_tr = {
784*4882a593Smuzhiyun 	.name		= "rfd",
785*4882a593Smuzhiyun 	.major		= RFD_FTL_MAJOR,
786*4882a593Smuzhiyun 	.part_bits	= PART_BITS,
787*4882a593Smuzhiyun 	.blksize 	= SECTOR_SIZE,
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	.readsect	= rfd_ftl_readsect,
790*4882a593Smuzhiyun 	.writesect	= rfd_ftl_writesect,
791*4882a593Smuzhiyun 	.getgeo		= rfd_ftl_getgeo,
792*4882a593Smuzhiyun 	.add_mtd	= rfd_ftl_add_mtd,
793*4882a593Smuzhiyun 	.remove_dev	= rfd_ftl_remove_dev,
794*4882a593Smuzhiyun 	.owner		= THIS_MODULE,
795*4882a593Smuzhiyun };
796*4882a593Smuzhiyun 
init_rfd_ftl(void)797*4882a593Smuzhiyun static int __init init_rfd_ftl(void)
798*4882a593Smuzhiyun {
799*4882a593Smuzhiyun 	return register_mtd_blktrans(&rfd_ftl_tr);
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun 
cleanup_rfd_ftl(void)802*4882a593Smuzhiyun static void __exit cleanup_rfd_ftl(void)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun 	deregister_mtd_blktrans(&rfd_ftl_tr);
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun module_init(init_rfd_ftl);
808*4882a593Smuzhiyun module_exit(cleanup_rfd_ftl);
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun MODULE_LICENSE("GPL");
811*4882a593Smuzhiyun MODULE_AUTHOR("Sean Young <sean@mess.org>");
812*4882a593Smuzhiyun MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
813*4882a593Smuzhiyun 		"used by General Software's Embedded BIOS");
814*4882a593Smuzhiyun 
815