xref: /OK3568_Linux_fs/kernel/drivers/mtd/chips/cfi_cmdset_0002.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Common Flash Interface support:
3*4882a593Smuzhiyun  *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6*4882a593Smuzhiyun  * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7*4882a593Smuzhiyun  * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * 2_by_8 routines added by Simon Munton
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * 4_by_16 work by Carolyn J. Smith
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * XIP support hooks by Vitaly Wool (based on code for Intel flash
14*4882a593Smuzhiyun  * by Nicolas Pitre)
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * This code is GPL
21*4882a593Smuzhiyun  */
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include <linux/module.h>
24*4882a593Smuzhiyun #include <linux/types.h>
25*4882a593Smuzhiyun #include <linux/kernel.h>
26*4882a593Smuzhiyun #include <linux/sched.h>
27*4882a593Smuzhiyun #include <asm/io.h>
28*4882a593Smuzhiyun #include <asm/byteorder.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #include <linux/errno.h>
31*4882a593Smuzhiyun #include <linux/slab.h>
32*4882a593Smuzhiyun #include <linux/delay.h>
33*4882a593Smuzhiyun #include <linux/interrupt.h>
34*4882a593Smuzhiyun #include <linux/reboot.h>
35*4882a593Smuzhiyun #include <linux/of.h>
36*4882a593Smuzhiyun #include <linux/of_platform.h>
37*4882a593Smuzhiyun #include <linux/mtd/map.h>
38*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
39*4882a593Smuzhiyun #include <linux/mtd/cfi.h>
40*4882a593Smuzhiyun #include <linux/mtd/xip.h>
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #define AMD_BOOTLOC_BUG
43*4882a593Smuzhiyun #define FORCE_WORD_WRITE 0
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define MAX_RETRIES 3
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #define SST49LF004B		0x0060
48*4882a593Smuzhiyun #define SST49LF040B		0x0050
49*4882a593Smuzhiyun #define SST49LF008A		0x005a
50*4882a593Smuzhiyun #define AT49BV6416		0x00d6
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun  * Status Register bit description. Used by flash devices that don't
54*4882a593Smuzhiyun  * support DQ polling (e.g. HyperFlash)
55*4882a593Smuzhiyun  */
56*4882a593Smuzhiyun #define CFI_SR_DRB		BIT(7)
57*4882a593Smuzhiyun #define CFI_SR_ESB		BIT(5)
58*4882a593Smuzhiyun #define CFI_SR_PSB		BIT(4)
59*4882a593Smuzhiyun #define CFI_SR_WBASB		BIT(3)
60*4882a593Smuzhiyun #define CFI_SR_SLSB		BIT(1)
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun enum cfi_quirks {
63*4882a593Smuzhiyun 	CFI_QUIRK_DQ_TRUE_DATA = BIT(0),
64*4882a593Smuzhiyun };
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
67*4882a593Smuzhiyun static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
68*4882a593Smuzhiyun #if !FORCE_WORD_WRITE
69*4882a593Smuzhiyun static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
70*4882a593Smuzhiyun #endif
71*4882a593Smuzhiyun static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
72*4882a593Smuzhiyun static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
73*4882a593Smuzhiyun static void cfi_amdstd_sync (struct mtd_info *);
74*4882a593Smuzhiyun static int cfi_amdstd_suspend (struct mtd_info *);
75*4882a593Smuzhiyun static void cfi_amdstd_resume (struct mtd_info *);
76*4882a593Smuzhiyun static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
77*4882a593Smuzhiyun static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t,
78*4882a593Smuzhiyun 					 size_t *, struct otp_info *);
79*4882a593Smuzhiyun static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t,
80*4882a593Smuzhiyun 					 size_t *, struct otp_info *);
81*4882a593Smuzhiyun static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
82*4882a593Smuzhiyun static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t,
83*4882a593Smuzhiyun 					 size_t *, u_char *);
84*4882a593Smuzhiyun static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t,
85*4882a593Smuzhiyun 					 size_t *, u_char *);
86*4882a593Smuzhiyun static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
87*4882a593Smuzhiyun 					  size_t *, u_char *);
88*4882a593Smuzhiyun static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
91*4882a593Smuzhiyun 				  size_t *retlen, const u_char *buf);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun static void cfi_amdstd_destroy(struct mtd_info *);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
96*4882a593Smuzhiyun static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
99*4882a593Smuzhiyun static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
100*4882a593Smuzhiyun #include "fwh_lock.h"
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
103*4882a593Smuzhiyun static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
106*4882a593Smuzhiyun static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
107*4882a593Smuzhiyun static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun static struct mtd_chip_driver cfi_amdstd_chipdrv = {
110*4882a593Smuzhiyun 	.probe		= NULL, /* Not usable directly */
111*4882a593Smuzhiyun 	.destroy	= cfi_amdstd_destroy,
112*4882a593Smuzhiyun 	.name		= "cfi_cmdset_0002",
113*4882a593Smuzhiyun 	.module		= THIS_MODULE
114*4882a593Smuzhiyun };
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun /*
117*4882a593Smuzhiyun  * Use status register to poll for Erase/write completion when DQ is not
118*4882a593Smuzhiyun  * supported. This is indicated by Bit[1:0] of SoftwareFeatures field in
119*4882a593Smuzhiyun  * CFI Primary Vendor-Specific Extended Query table 1.5
120*4882a593Smuzhiyun  */
cfi_use_status_reg(struct cfi_private * cfi)121*4882a593Smuzhiyun static int cfi_use_status_reg(struct cfi_private *cfi)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
124*4882a593Smuzhiyun 	u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	return extp && extp->MinorVersion >= '5' &&
127*4882a593Smuzhiyun 		(extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
cfi_check_err_status(struct map_info * map,struct flchip * chip,unsigned long adr)130*4882a593Smuzhiyun static int cfi_check_err_status(struct map_info *map, struct flchip *chip,
131*4882a593Smuzhiyun 				unsigned long adr)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
134*4882a593Smuzhiyun 	map_word status;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	if (!cfi_use_status_reg(cfi))
137*4882a593Smuzhiyun 		return 0;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
140*4882a593Smuzhiyun 			 cfi->device_type, NULL);
141*4882a593Smuzhiyun 	status = map_read(map, adr);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	/* The error bits are invalid while the chip's busy */
144*4882a593Smuzhiyun 	if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB)))
145*4882a593Smuzhiyun 		return 0;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	if (map_word_bitsset(map, status, CMD(0x3a))) {
148*4882a593Smuzhiyun 		unsigned long chipstatus = MERGESTATUS(status);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 		if (chipstatus & CFI_SR_ESB)
151*4882a593Smuzhiyun 			pr_err("%s erase operation failed, status %lx\n",
152*4882a593Smuzhiyun 			       map->name, chipstatus);
153*4882a593Smuzhiyun 		if (chipstatus & CFI_SR_PSB)
154*4882a593Smuzhiyun 			pr_err("%s program operation failed, status %lx\n",
155*4882a593Smuzhiyun 			       map->name, chipstatus);
156*4882a593Smuzhiyun 		if (chipstatus & CFI_SR_WBASB)
157*4882a593Smuzhiyun 			pr_err("%s buffer program command aborted, status %lx\n",
158*4882a593Smuzhiyun 			       map->name, chipstatus);
159*4882a593Smuzhiyun 		if (chipstatus & CFI_SR_SLSB)
160*4882a593Smuzhiyun 			pr_err("%s sector write protected, status %lx\n",
161*4882a593Smuzhiyun 			       map->name, chipstatus);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 		/* Erase/Program status bits are set on the operation failure */
164*4882a593Smuzhiyun 		if (chipstatus & (CFI_SR_ESB | CFI_SR_PSB))
165*4882a593Smuzhiyun 			return 1;
166*4882a593Smuzhiyun 	}
167*4882a593Smuzhiyun 	return 0;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun /* #define DEBUG_CFI_FEATURES */
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun #ifdef DEBUG_CFI_FEATURES
cfi_tell_features(struct cfi_pri_amdstd * extp)174*4882a593Smuzhiyun static void cfi_tell_features(struct cfi_pri_amdstd *extp)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	const char* erase_suspend[3] = {
177*4882a593Smuzhiyun 		"Not supported", "Read only", "Read/write"
178*4882a593Smuzhiyun 	};
179*4882a593Smuzhiyun 	const char* top_bottom[6] = {
180*4882a593Smuzhiyun 		"No WP", "8x8KiB sectors at top & bottom, no WP",
181*4882a593Smuzhiyun 		"Bottom boot", "Top boot",
182*4882a593Smuzhiyun 		"Uniform, Bottom WP", "Uniform, Top WP"
183*4882a593Smuzhiyun 	};
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
186*4882a593Smuzhiyun 	printk("  Address sensitive unlock: %s\n",
187*4882a593Smuzhiyun 	       (extp->SiliconRevision & 1) ? "Not required" : "Required");
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
190*4882a593Smuzhiyun 		printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
191*4882a593Smuzhiyun 	else
192*4882a593Smuzhiyun 		printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	if (extp->BlkProt == 0)
195*4882a593Smuzhiyun 		printk("  Block protection: Not supported\n");
196*4882a593Smuzhiyun 	else
197*4882a593Smuzhiyun 		printk("  Block protection: %d sectors per group\n", extp->BlkProt);
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	printk("  Temporary block unprotect: %s\n",
201*4882a593Smuzhiyun 	       extp->TmpBlkUnprotect ? "Supported" : "Not supported");
202*4882a593Smuzhiyun 	printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
203*4882a593Smuzhiyun 	printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
204*4882a593Smuzhiyun 	printk("  Burst mode: %s\n",
205*4882a593Smuzhiyun 	       extp->BurstMode ? "Supported" : "Not supported");
206*4882a593Smuzhiyun 	if (extp->PageMode == 0)
207*4882a593Smuzhiyun 		printk("  Page mode: Not supported\n");
208*4882a593Smuzhiyun 	else
209*4882a593Smuzhiyun 		printk("  Page mode: %d word page\n", extp->PageMode << 2);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
212*4882a593Smuzhiyun 	       extp->VppMin >> 4, extp->VppMin & 0xf);
213*4882a593Smuzhiyun 	printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
214*4882a593Smuzhiyun 	       extp->VppMax >> 4, extp->VppMax & 0xf);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	if (extp->TopBottom < ARRAY_SIZE(top_bottom))
217*4882a593Smuzhiyun 		printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
218*4882a593Smuzhiyun 	else
219*4882a593Smuzhiyun 		printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun #endif
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun #ifdef AMD_BOOTLOC_BUG
224*4882a593Smuzhiyun /* Wheee. Bring me the head of someone at AMD. */
fixup_amd_bootblock(struct mtd_info * mtd)225*4882a593Smuzhiyun static void fixup_amd_bootblock(struct mtd_info *mtd)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
228*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
229*4882a593Smuzhiyun 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
230*4882a593Smuzhiyun 	__u8 major = extp->MajorVersion;
231*4882a593Smuzhiyun 	__u8 minor = extp->MinorVersion;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	if (((major << 8) | minor) < 0x3131) {
234*4882a593Smuzhiyun 		/* CFI version 1.0 => don't trust bootloc */
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 		pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
237*4882a593Smuzhiyun 			map->name, cfi->mfr, cfi->id);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 		/* AFAICS all 29LV400 with a bottom boot block have a device ID
240*4882a593Smuzhiyun 		 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
241*4882a593Smuzhiyun 		 * These were badly detected as they have the 0x80 bit set
242*4882a593Smuzhiyun 		 * so treat them as a special case.
243*4882a593Smuzhiyun 		 */
244*4882a593Smuzhiyun 		if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 			/* Macronix added CFI to their 2nd generation
247*4882a593Smuzhiyun 			 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
248*4882a593Smuzhiyun 			 * Fujitsu, Spansion, EON, ESI and older Macronix)
249*4882a593Smuzhiyun 			 * has CFI.
250*4882a593Smuzhiyun 			 *
251*4882a593Smuzhiyun 			 * Therefore also check the manufacturer.
252*4882a593Smuzhiyun 			 * This reduces the risk of false detection due to
253*4882a593Smuzhiyun 			 * the 8-bit device ID.
254*4882a593Smuzhiyun 			 */
255*4882a593Smuzhiyun 			(cfi->mfr == CFI_MFR_MACRONIX)) {
256*4882a593Smuzhiyun 			pr_debug("%s: Macronix MX29LV400C with bottom boot block"
257*4882a593Smuzhiyun 				" detected\n", map->name);
258*4882a593Smuzhiyun 			extp->TopBottom = 2;	/* bottom boot */
259*4882a593Smuzhiyun 		} else
260*4882a593Smuzhiyun 		if (cfi->id & 0x80) {
261*4882a593Smuzhiyun 			printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
262*4882a593Smuzhiyun 			extp->TopBottom = 3;	/* top boot */
263*4882a593Smuzhiyun 		} else {
264*4882a593Smuzhiyun 			extp->TopBottom = 2;	/* bottom boot */
265*4882a593Smuzhiyun 		}
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 		pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
268*4882a593Smuzhiyun 			" deduced %s from Device ID\n", map->name, major, minor,
269*4882a593Smuzhiyun 			extp->TopBottom == 2 ? "bottom" : "top");
270*4882a593Smuzhiyun 	}
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun #endif
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun #if !FORCE_WORD_WRITE
fixup_use_write_buffers(struct mtd_info * mtd)275*4882a593Smuzhiyun static void fixup_use_write_buffers(struct mtd_info *mtd)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
278*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
279*4882a593Smuzhiyun 	if (cfi->cfiq->BufWriteTimeoutTyp) {
280*4882a593Smuzhiyun 		pr_debug("Using buffer write method\n");
281*4882a593Smuzhiyun 		mtd->_write = cfi_amdstd_write_buffers;
282*4882a593Smuzhiyun 	}
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun #endif /* !FORCE_WORD_WRITE */
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun /* Atmel chips don't use the same PRI format as AMD chips */
fixup_convert_atmel_pri(struct mtd_info * mtd)287*4882a593Smuzhiyun static void fixup_convert_atmel_pri(struct mtd_info *mtd)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
290*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
291*4882a593Smuzhiyun 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
292*4882a593Smuzhiyun 	struct cfi_pri_atmel atmel_pri;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
295*4882a593Smuzhiyun 	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	if (atmel_pri.Features & 0x02)
298*4882a593Smuzhiyun 		extp->EraseSuspend = 2;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	/* Some chips got it backwards... */
301*4882a593Smuzhiyun 	if (cfi->id == AT49BV6416) {
302*4882a593Smuzhiyun 		if (atmel_pri.BottomBoot)
303*4882a593Smuzhiyun 			extp->TopBottom = 3;
304*4882a593Smuzhiyun 		else
305*4882a593Smuzhiyun 			extp->TopBottom = 2;
306*4882a593Smuzhiyun 	} else {
307*4882a593Smuzhiyun 		if (atmel_pri.BottomBoot)
308*4882a593Smuzhiyun 			extp->TopBottom = 2;
309*4882a593Smuzhiyun 		else
310*4882a593Smuzhiyun 			extp->TopBottom = 3;
311*4882a593Smuzhiyun 	}
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	/* burst write mode not supported */
314*4882a593Smuzhiyun 	cfi->cfiq->BufWriteTimeoutTyp = 0;
315*4882a593Smuzhiyun 	cfi->cfiq->BufWriteTimeoutMax = 0;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun 
fixup_use_secsi(struct mtd_info * mtd)318*4882a593Smuzhiyun static void fixup_use_secsi(struct mtd_info *mtd)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun 	/* Setup for chips with a secsi area */
321*4882a593Smuzhiyun 	mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
322*4882a593Smuzhiyun 	mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun 
fixup_use_erase_chip(struct mtd_info * mtd)325*4882a593Smuzhiyun static void fixup_use_erase_chip(struct mtd_info *mtd)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
328*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
329*4882a593Smuzhiyun 	if ((cfi->cfiq->NumEraseRegions == 1) &&
330*4882a593Smuzhiyun 		((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
331*4882a593Smuzhiyun 		mtd->_erase = cfi_amdstd_erase_chip;
332*4882a593Smuzhiyun 	}
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun /*
337*4882a593Smuzhiyun  * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
338*4882a593Smuzhiyun  * locked by default.
339*4882a593Smuzhiyun  */
fixup_use_atmel_lock(struct mtd_info * mtd)340*4882a593Smuzhiyun static void fixup_use_atmel_lock(struct mtd_info *mtd)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun 	mtd->_lock = cfi_atmel_lock;
343*4882a593Smuzhiyun 	mtd->_unlock = cfi_atmel_unlock;
344*4882a593Smuzhiyun 	mtd->flags |= MTD_POWERUP_LOCK;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
fixup_old_sst_eraseregion(struct mtd_info * mtd)347*4882a593Smuzhiyun static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
350*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	/*
353*4882a593Smuzhiyun 	 * These flashes report two separate eraseblock regions based on the
354*4882a593Smuzhiyun 	 * sector_erase-size and block_erase-size, although they both operate on the
355*4882a593Smuzhiyun 	 * same memory. This is not allowed according to CFI, so we just pick the
356*4882a593Smuzhiyun 	 * sector_erase-size.
357*4882a593Smuzhiyun 	 */
358*4882a593Smuzhiyun 	cfi->cfiq->NumEraseRegions = 1;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun 
fixup_sst39vf(struct mtd_info * mtd)361*4882a593Smuzhiyun static void fixup_sst39vf(struct mtd_info *mtd)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
364*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	fixup_old_sst_eraseregion(mtd);
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	cfi->addr_unlock1 = 0x5555;
369*4882a593Smuzhiyun 	cfi->addr_unlock2 = 0x2AAA;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun 
fixup_sst39vf_rev_b(struct mtd_info * mtd)372*4882a593Smuzhiyun static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
375*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	fixup_old_sst_eraseregion(mtd);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	cfi->addr_unlock1 = 0x555;
380*4882a593Smuzhiyun 	cfi->addr_unlock2 = 0x2AA;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	cfi->sector_erase_cmd = CMD(0x50);
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun 
fixup_sst38vf640x_sectorsize(struct mtd_info * mtd)385*4882a593Smuzhiyun static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
388*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	fixup_sst39vf_rev_b(mtd);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	/*
393*4882a593Smuzhiyun 	 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
394*4882a593Smuzhiyun 	 * it should report a size of 8KBytes (0x0020*256).
395*4882a593Smuzhiyun 	 */
396*4882a593Smuzhiyun 	cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
397*4882a593Smuzhiyun 	pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n",
398*4882a593Smuzhiyun 		mtd->name);
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun 
fixup_s29gl064n_sectors(struct mtd_info * mtd)401*4882a593Smuzhiyun static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
404*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
407*4882a593Smuzhiyun 		cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
408*4882a593Smuzhiyun 		pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n",
409*4882a593Smuzhiyun 			mtd->name);
410*4882a593Smuzhiyun 	}
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun 
fixup_s29gl032n_sectors(struct mtd_info * mtd)413*4882a593Smuzhiyun static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
416*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
419*4882a593Smuzhiyun 		cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
420*4882a593Smuzhiyun 		pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n",
421*4882a593Smuzhiyun 			mtd->name);
422*4882a593Smuzhiyun 	}
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun 
fixup_s29ns512p_sectors(struct mtd_info * mtd)425*4882a593Smuzhiyun static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
428*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	/*
431*4882a593Smuzhiyun 	 *  S29NS512P flash uses more than 8bits to report number of sectors,
432*4882a593Smuzhiyun 	 * which is not permitted by CFI.
433*4882a593Smuzhiyun 	 */
434*4882a593Smuzhiyun 	cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
435*4882a593Smuzhiyun 	pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n",
436*4882a593Smuzhiyun 		mtd->name);
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun 
fixup_quirks(struct mtd_info * mtd)439*4882a593Smuzhiyun static void fixup_quirks(struct mtd_info *mtd)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
442*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	if (cfi->mfr == CFI_MFR_AMD && cfi->id == 0x0c01)
445*4882a593Smuzhiyun 		cfi->quirks |= CFI_QUIRK_DQ_TRUE_DATA;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun /* Used to fix CFI-Tables of chips without Extended Query Tables */
449*4882a593Smuzhiyun static struct cfi_fixup cfi_nopri_fixup_table[] = {
450*4882a593Smuzhiyun 	{ CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
451*4882a593Smuzhiyun 	{ CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
452*4882a593Smuzhiyun 	{ CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
453*4882a593Smuzhiyun 	{ CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
454*4882a593Smuzhiyun 	{ CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
455*4882a593Smuzhiyun 	{ CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
456*4882a593Smuzhiyun 	{ CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
457*4882a593Smuzhiyun 	{ CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
458*4882a593Smuzhiyun 	{ 0, 0, NULL }
459*4882a593Smuzhiyun };
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun static struct cfi_fixup cfi_fixup_table[] = {
462*4882a593Smuzhiyun 	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
463*4882a593Smuzhiyun #ifdef AMD_BOOTLOC_BUG
464*4882a593Smuzhiyun 	{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
465*4882a593Smuzhiyun 	{ CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
466*4882a593Smuzhiyun 	{ CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
467*4882a593Smuzhiyun #endif
468*4882a593Smuzhiyun 	{ CFI_MFR_AMD, 0x0050, fixup_use_secsi },
469*4882a593Smuzhiyun 	{ CFI_MFR_AMD, 0x0053, fixup_use_secsi },
470*4882a593Smuzhiyun 	{ CFI_MFR_AMD, 0x0055, fixup_use_secsi },
471*4882a593Smuzhiyun 	{ CFI_MFR_AMD, 0x0056, fixup_use_secsi },
472*4882a593Smuzhiyun 	{ CFI_MFR_AMD, 0x005C, fixup_use_secsi },
473*4882a593Smuzhiyun 	{ CFI_MFR_AMD, 0x005F, fixup_use_secsi },
474*4882a593Smuzhiyun 	{ CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
475*4882a593Smuzhiyun 	{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
476*4882a593Smuzhiyun 	{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
477*4882a593Smuzhiyun 	{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
478*4882a593Smuzhiyun 	{ CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
479*4882a593Smuzhiyun 	{ CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
480*4882a593Smuzhiyun 	{ CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
481*4882a593Smuzhiyun 	{ CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
482*4882a593Smuzhiyun 	{ CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
483*4882a593Smuzhiyun #if !FORCE_WORD_WRITE
484*4882a593Smuzhiyun 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
485*4882a593Smuzhiyun #endif
486*4882a593Smuzhiyun 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_quirks },
487*4882a593Smuzhiyun 	{ 0, 0, NULL }
488*4882a593Smuzhiyun };
489*4882a593Smuzhiyun static struct cfi_fixup jedec_fixup_table[] = {
490*4882a593Smuzhiyun 	{ CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
491*4882a593Smuzhiyun 	{ CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
492*4882a593Smuzhiyun 	{ CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
493*4882a593Smuzhiyun 	{ 0, 0, NULL }
494*4882a593Smuzhiyun };
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun static struct cfi_fixup fixup_table[] = {
497*4882a593Smuzhiyun 	/* The CFI vendor ids and the JEDEC vendor IDs appear
498*4882a593Smuzhiyun 	 * to be common.  It is like the devices id's are as
499*4882a593Smuzhiyun 	 * well.  This table is to pick all cases where
500*4882a593Smuzhiyun 	 * we know that is the case.
501*4882a593Smuzhiyun 	 */
502*4882a593Smuzhiyun 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
503*4882a593Smuzhiyun 	{ CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
504*4882a593Smuzhiyun 	{ 0, 0, NULL }
505*4882a593Smuzhiyun };
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 
cfi_fixup_major_minor(struct cfi_private * cfi,struct cfi_pri_amdstd * extp)508*4882a593Smuzhiyun static void cfi_fixup_major_minor(struct cfi_private *cfi,
509*4882a593Smuzhiyun 				  struct cfi_pri_amdstd *extp)
510*4882a593Smuzhiyun {
511*4882a593Smuzhiyun 	if (cfi->mfr == CFI_MFR_SAMSUNG) {
512*4882a593Smuzhiyun 		if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
513*4882a593Smuzhiyun 		    (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
514*4882a593Smuzhiyun 			/*
515*4882a593Smuzhiyun 			 * Samsung K8P2815UQB and K8D6x16UxM chips
516*4882a593Smuzhiyun 			 * report major=0 / minor=0.
517*4882a593Smuzhiyun 			 * K8D3x16UxC chips report major=3 / minor=3.
518*4882a593Smuzhiyun 			 */
519*4882a593Smuzhiyun 			printk(KERN_NOTICE "  Fixing Samsung's Amd/Fujitsu"
520*4882a593Smuzhiyun 			       " Extended Query version to 1.%c\n",
521*4882a593Smuzhiyun 			       extp->MinorVersion);
522*4882a593Smuzhiyun 			extp->MajorVersion = '1';
523*4882a593Smuzhiyun 		}
524*4882a593Smuzhiyun 	}
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	/*
527*4882a593Smuzhiyun 	 * SST 38VF640x chips report major=0xFF / minor=0xFF.
528*4882a593Smuzhiyun 	 */
529*4882a593Smuzhiyun 	if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
530*4882a593Smuzhiyun 		extp->MajorVersion = '1';
531*4882a593Smuzhiyun 		extp->MinorVersion = '0';
532*4882a593Smuzhiyun 	}
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun 
is_m29ew(struct cfi_private * cfi)535*4882a593Smuzhiyun static int is_m29ew(struct cfi_private *cfi)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun 	if (cfi->mfr == CFI_MFR_INTEL &&
538*4882a593Smuzhiyun 	    ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
539*4882a593Smuzhiyun 	     (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
540*4882a593Smuzhiyun 		return 1;
541*4882a593Smuzhiyun 	return 0;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun /*
545*4882a593Smuzhiyun  * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
546*4882a593Smuzhiyun  * Some revisions of the M29EW suffer from erase suspend hang ups. In
547*4882a593Smuzhiyun  * particular, it can occur when the sequence
548*4882a593Smuzhiyun  * Erase Confirm -> Suspend -> Program -> Resume
549*4882a593Smuzhiyun  * causes a lockup due to internal timing issues. The consequence is that the
550*4882a593Smuzhiyun  * erase cannot be resumed without inserting a dummy command after programming
551*4882a593Smuzhiyun  * and prior to resuming. [...] The work-around is to issue a dummy write cycle
552*4882a593Smuzhiyun  * that writes an F0 command code before the RESUME command.
553*4882a593Smuzhiyun  */
cfi_fixup_m29ew_erase_suspend(struct map_info * map,unsigned long adr)554*4882a593Smuzhiyun static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
555*4882a593Smuzhiyun 					  unsigned long adr)
556*4882a593Smuzhiyun {
557*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
558*4882a593Smuzhiyun 	/* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
559*4882a593Smuzhiyun 	if (is_m29ew(cfi))
560*4882a593Smuzhiyun 		map_write(map, CMD(0xF0), adr);
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun /*
564*4882a593Smuzhiyun  * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
565*4882a593Smuzhiyun  *
566*4882a593Smuzhiyun  * Some revisions of the M29EW (for example, A1 and A2 step revisions)
567*4882a593Smuzhiyun  * are affected by a problem that could cause a hang up when an ERASE SUSPEND
568*4882a593Smuzhiyun  * command is issued after an ERASE RESUME operation without waiting for a
569*4882a593Smuzhiyun  * minimum delay.  The result is that once the ERASE seems to be completed
570*4882a593Smuzhiyun  * (no bits are toggling), the contents of the Flash memory block on which
571*4882a593Smuzhiyun  * the erase was ongoing could be inconsistent with the expected values
572*4882a593Smuzhiyun  * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
573*4882a593Smuzhiyun  * values), causing a consequent failure of the ERASE operation.
574*4882a593Smuzhiyun  * The occurrence of this issue could be high, especially when file system
575*4882a593Smuzhiyun  * operations on the Flash are intensive.  As a result, it is recommended
576*4882a593Smuzhiyun  * that a patch be applied.  Intensive file system operations can cause many
577*4882a593Smuzhiyun  * calls to the garbage routine to free Flash space (also by erasing physical
578*4882a593Smuzhiyun  * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
579*4882a593Smuzhiyun  * commands can occur.  The problem disappears when a delay is inserted after
580*4882a593Smuzhiyun  * the RESUME command by using the udelay() function available in Linux.
581*4882a593Smuzhiyun  * The DELAY value must be tuned based on the customer's platform.
582*4882a593Smuzhiyun  * The maximum value that fixes the problem in all cases is 500us.
583*4882a593Smuzhiyun  * But, in our experience, a delay of 30 µs to 50 µs is sufficient
584*4882a593Smuzhiyun  * in most cases.
585*4882a593Smuzhiyun  * We have chosen 500µs because this latency is acceptable.
586*4882a593Smuzhiyun  */
cfi_fixup_m29ew_delay_after_resume(struct cfi_private * cfi)587*4882a593Smuzhiyun static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun 	/*
590*4882a593Smuzhiyun 	 * Resolving the Delay After Resume Issue see Micron TN-13-07
591*4882a593Smuzhiyun 	 * Worst case delay must be 500µs but 30-50µs should be ok as well
592*4882a593Smuzhiyun 	 */
593*4882a593Smuzhiyun 	if (is_m29ew(cfi))
594*4882a593Smuzhiyun 		cfi_udelay(500);
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun 
cfi_cmdset_0002(struct map_info * map,int primary)597*4882a593Smuzhiyun struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
600*4882a593Smuzhiyun 	struct device_node __maybe_unused *np = map->device_node;
601*4882a593Smuzhiyun 	struct mtd_info *mtd;
602*4882a593Smuzhiyun 	int i;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
605*4882a593Smuzhiyun 	if (!mtd)
606*4882a593Smuzhiyun 		return NULL;
607*4882a593Smuzhiyun 	mtd->priv = map;
608*4882a593Smuzhiyun 	mtd->type = MTD_NORFLASH;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	/* Fill in the default mtd operations */
611*4882a593Smuzhiyun 	mtd->_erase   = cfi_amdstd_erase_varsize;
612*4882a593Smuzhiyun 	mtd->_write   = cfi_amdstd_write_words;
613*4882a593Smuzhiyun 	mtd->_read    = cfi_amdstd_read;
614*4882a593Smuzhiyun 	mtd->_sync    = cfi_amdstd_sync;
615*4882a593Smuzhiyun 	mtd->_suspend = cfi_amdstd_suspend;
616*4882a593Smuzhiyun 	mtd->_resume  = cfi_amdstd_resume;
617*4882a593Smuzhiyun 	mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg;
618*4882a593Smuzhiyun 	mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg;
619*4882a593Smuzhiyun 	mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info;
620*4882a593Smuzhiyun 	mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info;
621*4882a593Smuzhiyun 	mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg;
622*4882a593Smuzhiyun 	mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg;
623*4882a593Smuzhiyun 	mtd->flags   = MTD_CAP_NORFLASH;
624*4882a593Smuzhiyun 	mtd->name    = map->name;
625*4882a593Smuzhiyun 	mtd->writesize = 1;
626*4882a593Smuzhiyun 	mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	pr_debug("MTD %s(): write buffer size %d\n", __func__,
629*4882a593Smuzhiyun 			mtd->writebufsize);
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	mtd->_panic_write = cfi_amdstd_panic_write;
632*4882a593Smuzhiyun 	mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	if (cfi->cfi_mode==CFI_MODE_CFI){
635*4882a593Smuzhiyun 		unsigned char bootloc;
636*4882a593Smuzhiyun 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
637*4882a593Smuzhiyun 		struct cfi_pri_amdstd *extp;
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 		extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
640*4882a593Smuzhiyun 		if (extp) {
641*4882a593Smuzhiyun 			/*
642*4882a593Smuzhiyun 			 * It's a real CFI chip, not one for which the probe
643*4882a593Smuzhiyun 			 * routine faked a CFI structure.
644*4882a593Smuzhiyun 			 */
645*4882a593Smuzhiyun 			cfi_fixup_major_minor(cfi, extp);
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 			/*
648*4882a593Smuzhiyun 			 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
649*4882a593Smuzhiyun 			 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
650*4882a593Smuzhiyun 			 *      http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
651*4882a593Smuzhiyun 			 *      http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
652*4882a593Smuzhiyun 			 *      http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
653*4882a593Smuzhiyun 			 */
654*4882a593Smuzhiyun 			if (extp->MajorVersion != '1' ||
655*4882a593Smuzhiyun 			    (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
656*4882a593Smuzhiyun 				printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
657*4882a593Smuzhiyun 				       "version %c.%c (%#02x/%#02x).\n",
658*4882a593Smuzhiyun 				       extp->MajorVersion, extp->MinorVersion,
659*4882a593Smuzhiyun 				       extp->MajorVersion, extp->MinorVersion);
660*4882a593Smuzhiyun 				kfree(extp);
661*4882a593Smuzhiyun 				kfree(mtd);
662*4882a593Smuzhiyun 				return NULL;
663*4882a593Smuzhiyun 			}
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 			printk(KERN_INFO "  Amd/Fujitsu Extended Query version %c.%c.\n",
666*4882a593Smuzhiyun 			       extp->MajorVersion, extp->MinorVersion);
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 			/* Install our own private info structure */
669*4882a593Smuzhiyun 			cfi->cmdset_priv = extp;
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 			/* Apply cfi device specific fixups */
672*4882a593Smuzhiyun 			cfi_fixup(mtd, cfi_fixup_table);
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun #ifdef DEBUG_CFI_FEATURES
675*4882a593Smuzhiyun 			/* Tell the user about it in lots of lovely detail */
676*4882a593Smuzhiyun 			cfi_tell_features(extp);
677*4882a593Smuzhiyun #endif
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun #ifdef CONFIG_OF
680*4882a593Smuzhiyun 			if (np && of_property_read_bool(
681*4882a593Smuzhiyun 				    np, "use-advanced-sector-protection")
682*4882a593Smuzhiyun 			    && extp->BlkProtUnprot == 8) {
683*4882a593Smuzhiyun 				printk(KERN_INFO "  Advanced Sector Protection (PPB Locking) supported\n");
684*4882a593Smuzhiyun 				mtd->_lock = cfi_ppb_lock;
685*4882a593Smuzhiyun 				mtd->_unlock = cfi_ppb_unlock;
686*4882a593Smuzhiyun 				mtd->_is_locked = cfi_ppb_is_locked;
687*4882a593Smuzhiyun 			}
688*4882a593Smuzhiyun #endif
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 			bootloc = extp->TopBottom;
691*4882a593Smuzhiyun 			if ((bootloc < 2) || (bootloc > 5)) {
692*4882a593Smuzhiyun 				printk(KERN_WARNING "%s: CFI contains unrecognised boot "
693*4882a593Smuzhiyun 				       "bank location (%d). Assuming bottom.\n",
694*4882a593Smuzhiyun 				       map->name, bootloc);
695*4882a593Smuzhiyun 				bootloc = 2;
696*4882a593Smuzhiyun 			}
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 			if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
699*4882a593Smuzhiyun 				printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 				for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
702*4882a593Smuzhiyun 					int j = (cfi->cfiq->NumEraseRegions-1)-i;
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 					swap(cfi->cfiq->EraseRegionInfo[i],
705*4882a593Smuzhiyun 					     cfi->cfiq->EraseRegionInfo[j]);
706*4882a593Smuzhiyun 				}
707*4882a593Smuzhiyun 			}
708*4882a593Smuzhiyun 			/* Set the default CFI lock/unlock addresses */
709*4882a593Smuzhiyun 			cfi->addr_unlock1 = 0x555;
710*4882a593Smuzhiyun 			cfi->addr_unlock2 = 0x2aa;
711*4882a593Smuzhiyun 		}
712*4882a593Smuzhiyun 		cfi_fixup(mtd, cfi_nopri_fixup_table);
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 		if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
715*4882a593Smuzhiyun 			kfree(mtd);
716*4882a593Smuzhiyun 			return NULL;
717*4882a593Smuzhiyun 		}
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	} /* CFI mode */
720*4882a593Smuzhiyun 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
721*4882a593Smuzhiyun 		/* Apply jedec specific fixups */
722*4882a593Smuzhiyun 		cfi_fixup(mtd, jedec_fixup_table);
723*4882a593Smuzhiyun 	}
724*4882a593Smuzhiyun 	/* Apply generic fixups */
725*4882a593Smuzhiyun 	cfi_fixup(mtd, fixup_table);
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	for (i=0; i< cfi->numchips; i++) {
728*4882a593Smuzhiyun 		cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
729*4882a593Smuzhiyun 		cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
730*4882a593Smuzhiyun 		cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
731*4882a593Smuzhiyun 		/*
732*4882a593Smuzhiyun 		 * First calculate the timeout max according to timeout field
733*4882a593Smuzhiyun 		 * of struct cfi_ident that probed from chip's CFI aera, if
734*4882a593Smuzhiyun 		 * available. Specify a minimum of 2000us, in case the CFI data
735*4882a593Smuzhiyun 		 * is wrong.
736*4882a593Smuzhiyun 		 */
737*4882a593Smuzhiyun 		if (cfi->cfiq->BufWriteTimeoutTyp &&
738*4882a593Smuzhiyun 		    cfi->cfiq->BufWriteTimeoutMax)
739*4882a593Smuzhiyun 			cfi->chips[i].buffer_write_time_max =
740*4882a593Smuzhiyun 				1 << (cfi->cfiq->BufWriteTimeoutTyp +
741*4882a593Smuzhiyun 				      cfi->cfiq->BufWriteTimeoutMax);
742*4882a593Smuzhiyun 		else
743*4882a593Smuzhiyun 			cfi->chips[i].buffer_write_time_max = 0;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 		cfi->chips[i].buffer_write_time_max =
746*4882a593Smuzhiyun 			max(cfi->chips[i].buffer_write_time_max, 2000);
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 		cfi->chips[i].ref_point_counter = 0;
749*4882a593Smuzhiyun 		init_waitqueue_head(&(cfi->chips[i].wq));
750*4882a593Smuzhiyun 	}
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	map->fldrv = &cfi_amdstd_chipdrv;
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	return cfi_amdstd_setup(mtd);
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
757*4882a593Smuzhiyun struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
758*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
759*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
760*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
761*4882a593Smuzhiyun 
cfi_amdstd_setup(struct mtd_info * mtd)762*4882a593Smuzhiyun static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
765*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
766*4882a593Smuzhiyun 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
767*4882a593Smuzhiyun 	unsigned long offset = 0;
768*4882a593Smuzhiyun 	int i,j;
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	printk(KERN_NOTICE "number of %s chips: %d\n",
771*4882a593Smuzhiyun 	       (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
772*4882a593Smuzhiyun 	/* Select the correct geometry setup */
773*4882a593Smuzhiyun 	mtd->size = devsize * cfi->numchips;
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
776*4882a593Smuzhiyun 	mtd->eraseregions = kmalloc_array(mtd->numeraseregions,
777*4882a593Smuzhiyun 					  sizeof(struct mtd_erase_region_info),
778*4882a593Smuzhiyun 					  GFP_KERNEL);
779*4882a593Smuzhiyun 	if (!mtd->eraseregions)
780*4882a593Smuzhiyun 		goto setup_err;
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
783*4882a593Smuzhiyun 		unsigned long ernum, ersize;
784*4882a593Smuzhiyun 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
785*4882a593Smuzhiyun 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 		if (mtd->erasesize < ersize) {
788*4882a593Smuzhiyun 			mtd->erasesize = ersize;
789*4882a593Smuzhiyun 		}
790*4882a593Smuzhiyun 		for (j=0; j<cfi->numchips; j++) {
791*4882a593Smuzhiyun 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
792*4882a593Smuzhiyun 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
793*4882a593Smuzhiyun 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
794*4882a593Smuzhiyun 		}
795*4882a593Smuzhiyun 		offset += (ersize * ernum);
796*4882a593Smuzhiyun 	}
797*4882a593Smuzhiyun 	if (offset != devsize) {
798*4882a593Smuzhiyun 		/* Argh */
799*4882a593Smuzhiyun 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
800*4882a593Smuzhiyun 		goto setup_err;
801*4882a593Smuzhiyun 	}
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	__module_get(THIS_MODULE);
804*4882a593Smuzhiyun 	register_reboot_notifier(&mtd->reboot_notifier);
805*4882a593Smuzhiyun 	return mtd;
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun  setup_err:
808*4882a593Smuzhiyun 	kfree(mtd->eraseregions);
809*4882a593Smuzhiyun 	kfree(mtd);
810*4882a593Smuzhiyun 	kfree(cfi->cmdset_priv);
811*4882a593Smuzhiyun 	return NULL;
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun /*
815*4882a593Smuzhiyun  * Return true if the chip is ready and has the correct value.
816*4882a593Smuzhiyun  *
817*4882a593Smuzhiyun  * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
818*4882a593Smuzhiyun  * non-suspended sector) and is indicated by no toggle bits toggling.
819*4882a593Smuzhiyun  *
820*4882a593Smuzhiyun  * Error are indicated by toggling bits or bits held with the wrong value,
821*4882a593Smuzhiyun  * or with bits toggling.
822*4882a593Smuzhiyun  *
823*4882a593Smuzhiyun  * Note that anything more complicated than checking if no bits are toggling
824*4882a593Smuzhiyun  * (including checking DQ5 for an error status) is tricky to get working
825*4882a593Smuzhiyun  * correctly and is therefore not done	(particularly with interleaved chips
826*4882a593Smuzhiyun  * as each chip must be checked independently of the others).
827*4882a593Smuzhiyun  */
chip_ready(struct map_info * map,struct flchip * chip,unsigned long addr,map_word * expected)828*4882a593Smuzhiyun static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
829*4882a593Smuzhiyun 			       unsigned long addr, map_word *expected)
830*4882a593Smuzhiyun {
831*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
832*4882a593Smuzhiyun 	map_word d, t;
833*4882a593Smuzhiyun 	int ret;
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	if (cfi_use_status_reg(cfi)) {
836*4882a593Smuzhiyun 		map_word ready = CMD(CFI_SR_DRB);
837*4882a593Smuzhiyun 		/*
838*4882a593Smuzhiyun 		 * For chips that support status register, check device
839*4882a593Smuzhiyun 		 * ready bit
840*4882a593Smuzhiyun 		 */
841*4882a593Smuzhiyun 		cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
842*4882a593Smuzhiyun 				 cfi->device_type, NULL);
843*4882a593Smuzhiyun 		t = map_read(map, addr);
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 		return map_word_andequal(map, t, ready, ready);
846*4882a593Smuzhiyun 	}
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	d = map_read(map, addr);
849*4882a593Smuzhiyun 	t = map_read(map, addr);
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	ret = map_word_equal(map, d, t);
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	if (!ret || !expected)
854*4882a593Smuzhiyun 		return ret;
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	return map_word_equal(map, t, *expected);
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun 
chip_good(struct map_info * map,struct flchip * chip,unsigned long addr,map_word * expected)859*4882a593Smuzhiyun static int __xipram chip_good(struct map_info *map, struct flchip *chip,
860*4882a593Smuzhiyun 			      unsigned long addr, map_word *expected)
861*4882a593Smuzhiyun {
862*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
863*4882a593Smuzhiyun 	map_word *datum = expected;
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun 	if (cfi->quirks & CFI_QUIRK_DQ_TRUE_DATA)
866*4882a593Smuzhiyun 		datum = NULL;
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	return chip_ready(map, chip, addr, datum);
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun 
get_chip(struct map_info * map,struct flchip * chip,unsigned long adr,int mode)871*4882a593Smuzhiyun static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun 	DECLARE_WAITQUEUE(wait, current);
874*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
875*4882a593Smuzhiyun 	unsigned long timeo;
876*4882a593Smuzhiyun 	struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun  resettime:
879*4882a593Smuzhiyun 	timeo = jiffies + HZ;
880*4882a593Smuzhiyun  retry:
881*4882a593Smuzhiyun 	switch (chip->state) {
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	case FL_STATUS:
884*4882a593Smuzhiyun 		for (;;) {
885*4882a593Smuzhiyun 			if (chip_ready(map, chip, adr, NULL))
886*4882a593Smuzhiyun 				break;
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 			if (time_after(jiffies, timeo)) {
889*4882a593Smuzhiyun 				printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
890*4882a593Smuzhiyun 				return -EIO;
891*4882a593Smuzhiyun 			}
892*4882a593Smuzhiyun 			mutex_unlock(&chip->mutex);
893*4882a593Smuzhiyun 			cfi_udelay(1);
894*4882a593Smuzhiyun 			mutex_lock(&chip->mutex);
895*4882a593Smuzhiyun 			/* Someone else might have been playing with it. */
896*4882a593Smuzhiyun 			goto retry;
897*4882a593Smuzhiyun 		}
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	case FL_READY:
900*4882a593Smuzhiyun 	case FL_CFI_QUERY:
901*4882a593Smuzhiyun 	case FL_JEDEC_QUERY:
902*4882a593Smuzhiyun 		return 0;
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	case FL_ERASING:
905*4882a593Smuzhiyun 		if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
906*4882a593Smuzhiyun 		    !(mode == FL_READY || mode == FL_POINT ||
907*4882a593Smuzhiyun 		    (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
908*4882a593Smuzhiyun 			goto sleep;
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 		/* Do not allow suspend iff read/write to EB address */
911*4882a593Smuzhiyun 		if ((adr & chip->in_progress_block_mask) ==
912*4882a593Smuzhiyun 		    chip->in_progress_block_addr)
913*4882a593Smuzhiyun 			goto sleep;
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 		/* Erase suspend */
916*4882a593Smuzhiyun 		/* It's harmless to issue the Erase-Suspend and Erase-Resume
917*4882a593Smuzhiyun 		 * commands when the erase algorithm isn't in progress. */
918*4882a593Smuzhiyun 		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
919*4882a593Smuzhiyun 		chip->oldstate = FL_ERASING;
920*4882a593Smuzhiyun 		chip->state = FL_ERASE_SUSPENDING;
921*4882a593Smuzhiyun 		chip->erase_suspended = 1;
922*4882a593Smuzhiyun 		for (;;) {
923*4882a593Smuzhiyun 			if (chip_ready(map, chip, adr, NULL))
924*4882a593Smuzhiyun 				break;
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 			if (time_after(jiffies, timeo)) {
927*4882a593Smuzhiyun 				/* Should have suspended the erase by now.
928*4882a593Smuzhiyun 				 * Send an Erase-Resume command as either
929*4882a593Smuzhiyun 				 * there was an error (so leave the erase
930*4882a593Smuzhiyun 				 * routine to recover from it) or we trying to
931*4882a593Smuzhiyun 				 * use the erase-in-progress sector. */
932*4882a593Smuzhiyun 				put_chip(map, chip, adr);
933*4882a593Smuzhiyun 				printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
934*4882a593Smuzhiyun 				return -EIO;
935*4882a593Smuzhiyun 			}
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 			mutex_unlock(&chip->mutex);
938*4882a593Smuzhiyun 			cfi_udelay(1);
939*4882a593Smuzhiyun 			mutex_lock(&chip->mutex);
940*4882a593Smuzhiyun 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
941*4882a593Smuzhiyun 			   So we can just loop here. */
942*4882a593Smuzhiyun 		}
943*4882a593Smuzhiyun 		chip->state = FL_READY;
944*4882a593Smuzhiyun 		return 0;
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	case FL_XIP_WHILE_ERASING:
947*4882a593Smuzhiyun 		if (mode != FL_READY && mode != FL_POINT &&
948*4882a593Smuzhiyun 		    (!cfip || !(cfip->EraseSuspend&2)))
949*4882a593Smuzhiyun 			goto sleep;
950*4882a593Smuzhiyun 		chip->oldstate = chip->state;
951*4882a593Smuzhiyun 		chip->state = FL_READY;
952*4882a593Smuzhiyun 		return 0;
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	case FL_SHUTDOWN:
955*4882a593Smuzhiyun 		/* The machine is rebooting */
956*4882a593Smuzhiyun 		return -EIO;
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	case FL_POINT:
959*4882a593Smuzhiyun 		/* Only if there's no operation suspended... */
960*4882a593Smuzhiyun 		if (mode == FL_READY && chip->oldstate == FL_READY)
961*4882a593Smuzhiyun 			return 0;
962*4882a593Smuzhiyun 		fallthrough;
963*4882a593Smuzhiyun 	default:
964*4882a593Smuzhiyun 	sleep:
965*4882a593Smuzhiyun 		set_current_state(TASK_UNINTERRUPTIBLE);
966*4882a593Smuzhiyun 		add_wait_queue(&chip->wq, &wait);
967*4882a593Smuzhiyun 		mutex_unlock(&chip->mutex);
968*4882a593Smuzhiyun 		schedule();
969*4882a593Smuzhiyun 		remove_wait_queue(&chip->wq, &wait);
970*4882a593Smuzhiyun 		mutex_lock(&chip->mutex);
971*4882a593Smuzhiyun 		goto resettime;
972*4882a593Smuzhiyun 	}
973*4882a593Smuzhiyun }
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 
put_chip(struct map_info * map,struct flchip * chip,unsigned long adr)976*4882a593Smuzhiyun static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
977*4882a593Smuzhiyun {
978*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	switch(chip->oldstate) {
981*4882a593Smuzhiyun 	case FL_ERASING:
982*4882a593Smuzhiyun 		cfi_fixup_m29ew_erase_suspend(map,
983*4882a593Smuzhiyun 			chip->in_progress_block_addr);
984*4882a593Smuzhiyun 		map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
985*4882a593Smuzhiyun 		cfi_fixup_m29ew_delay_after_resume(cfi);
986*4882a593Smuzhiyun 		chip->oldstate = FL_READY;
987*4882a593Smuzhiyun 		chip->state = FL_ERASING;
988*4882a593Smuzhiyun 		break;
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun 	case FL_XIP_WHILE_ERASING:
991*4882a593Smuzhiyun 		chip->state = chip->oldstate;
992*4882a593Smuzhiyun 		chip->oldstate = FL_READY;
993*4882a593Smuzhiyun 		break;
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	case FL_READY:
996*4882a593Smuzhiyun 	case FL_STATUS:
997*4882a593Smuzhiyun 		break;
998*4882a593Smuzhiyun 	default:
999*4882a593Smuzhiyun 		printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
1000*4882a593Smuzhiyun 	}
1001*4882a593Smuzhiyun 	wake_up(&chip->wq);
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun #ifdef CONFIG_MTD_XIP
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun /*
1007*4882a593Smuzhiyun  * No interrupt what so ever can be serviced while the flash isn't in array
1008*4882a593Smuzhiyun  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1009*4882a593Smuzhiyun  * enclosing any code path where the flash is known not to be in array mode.
1010*4882a593Smuzhiyun  * And within a XIP disabled code path, only functions marked with __xipram
1011*4882a593Smuzhiyun  * may be called and nothing else (it's a good thing to inspect generated
1012*4882a593Smuzhiyun  * assembly to make sure inline functions were actually inlined and that gcc
1013*4882a593Smuzhiyun  * didn't emit calls to its own support functions). Also configuring MTD CFI
1014*4882a593Smuzhiyun  * support to a single buswidth and a single interleave is also recommended.
1015*4882a593Smuzhiyun  */
1016*4882a593Smuzhiyun 
xip_disable(struct map_info * map,struct flchip * chip,unsigned long adr)1017*4882a593Smuzhiyun static void xip_disable(struct map_info *map, struct flchip *chip,
1018*4882a593Smuzhiyun 			unsigned long adr)
1019*4882a593Smuzhiyun {
1020*4882a593Smuzhiyun 	/* TODO: chips with no XIP use should ignore and return */
1021*4882a593Smuzhiyun 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
1022*4882a593Smuzhiyun 	local_irq_disable();
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun 
xip_enable(struct map_info * map,struct flchip * chip,unsigned long adr)1025*4882a593Smuzhiyun static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1026*4882a593Smuzhiyun 				unsigned long adr)
1027*4882a593Smuzhiyun {
1028*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1031*4882a593Smuzhiyun 		map_write(map, CMD(0xf0), adr);
1032*4882a593Smuzhiyun 		chip->state = FL_READY;
1033*4882a593Smuzhiyun 	}
1034*4882a593Smuzhiyun 	(void) map_read(map, adr);
1035*4882a593Smuzhiyun 	xip_iprefetch();
1036*4882a593Smuzhiyun 	local_irq_enable();
1037*4882a593Smuzhiyun }
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun /*
1040*4882a593Smuzhiyun  * When a delay is required for the flash operation to complete, the
1041*4882a593Smuzhiyun  * xip_udelay() function is polling for both the given timeout and pending
1042*4882a593Smuzhiyun  * (but still masked) hardware interrupts.  Whenever there is an interrupt
1043*4882a593Smuzhiyun  * pending then the flash erase operation is suspended, array mode restored
1044*4882a593Smuzhiyun  * and interrupts unmasked.  Task scheduling might also happen at that
1045*4882a593Smuzhiyun  * point.  The CPU eventually returns from the interrupt or the call to
1046*4882a593Smuzhiyun  * schedule() and the suspended flash operation is resumed for the remaining
1047*4882a593Smuzhiyun  * of the delay period.
1048*4882a593Smuzhiyun  *
1049*4882a593Smuzhiyun  * Warning: this function _will_ fool interrupt latency tracing tools.
1050*4882a593Smuzhiyun  */
1051*4882a593Smuzhiyun 
xip_udelay(struct map_info * map,struct flchip * chip,unsigned long adr,int usec)1052*4882a593Smuzhiyun static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
1053*4882a593Smuzhiyun 				unsigned long adr, int usec)
1054*4882a593Smuzhiyun {
1055*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
1056*4882a593Smuzhiyun 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
1057*4882a593Smuzhiyun 	map_word status, OK = CMD(0x80);
1058*4882a593Smuzhiyun 	unsigned long suspended, start = xip_currtime();
1059*4882a593Smuzhiyun 	flstate_t oldstate;
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 	do {
1062*4882a593Smuzhiyun 		cpu_relax();
1063*4882a593Smuzhiyun 		if (xip_irqpending() && extp &&
1064*4882a593Smuzhiyun 		    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
1065*4882a593Smuzhiyun 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1066*4882a593Smuzhiyun 			/*
1067*4882a593Smuzhiyun 			 * Let's suspend the erase operation when supported.
1068*4882a593Smuzhiyun 			 * Note that we currently don't try to suspend
1069*4882a593Smuzhiyun 			 * interleaved chips if there is already another
1070*4882a593Smuzhiyun 			 * operation suspended (imagine what happens
1071*4882a593Smuzhiyun 			 * when one chip was already done with the current
1072*4882a593Smuzhiyun 			 * operation while another chip suspended it, then
1073*4882a593Smuzhiyun 			 * we resume the whole thing at once).  Yes, it
1074*4882a593Smuzhiyun 			 * can happen!
1075*4882a593Smuzhiyun 			 */
1076*4882a593Smuzhiyun 			map_write(map, CMD(0xb0), adr);
1077*4882a593Smuzhiyun 			usec -= xip_elapsed_since(start);
1078*4882a593Smuzhiyun 			suspended = xip_currtime();
1079*4882a593Smuzhiyun 			do {
1080*4882a593Smuzhiyun 				if (xip_elapsed_since(suspended) > 100000) {
1081*4882a593Smuzhiyun 					/*
1082*4882a593Smuzhiyun 					 * The chip doesn't want to suspend
1083*4882a593Smuzhiyun 					 * after waiting for 100 msecs.
1084*4882a593Smuzhiyun 					 * This is a critical error but there
1085*4882a593Smuzhiyun 					 * is not much we can do here.
1086*4882a593Smuzhiyun 					 */
1087*4882a593Smuzhiyun 					return;
1088*4882a593Smuzhiyun 				}
1089*4882a593Smuzhiyun 				status = map_read(map, adr);
1090*4882a593Smuzhiyun 			} while (!map_word_andequal(map, status, OK, OK));
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 			/* Suspend succeeded */
1093*4882a593Smuzhiyun 			oldstate = chip->state;
1094*4882a593Smuzhiyun 			if (!map_word_bitsset(map, status, CMD(0x40)))
1095*4882a593Smuzhiyun 				break;
1096*4882a593Smuzhiyun 			chip->state = FL_XIP_WHILE_ERASING;
1097*4882a593Smuzhiyun 			chip->erase_suspended = 1;
1098*4882a593Smuzhiyun 			map_write(map, CMD(0xf0), adr);
1099*4882a593Smuzhiyun 			(void) map_read(map, adr);
1100*4882a593Smuzhiyun 			xip_iprefetch();
1101*4882a593Smuzhiyun 			local_irq_enable();
1102*4882a593Smuzhiyun 			mutex_unlock(&chip->mutex);
1103*4882a593Smuzhiyun 			xip_iprefetch();
1104*4882a593Smuzhiyun 			cond_resched();
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 			/*
1107*4882a593Smuzhiyun 			 * We're back.  However someone else might have
1108*4882a593Smuzhiyun 			 * decided to go write to the chip if we are in
1109*4882a593Smuzhiyun 			 * a suspended erase state.  If so let's wait
1110*4882a593Smuzhiyun 			 * until it's done.
1111*4882a593Smuzhiyun 			 */
1112*4882a593Smuzhiyun 			mutex_lock(&chip->mutex);
1113*4882a593Smuzhiyun 			while (chip->state != FL_XIP_WHILE_ERASING) {
1114*4882a593Smuzhiyun 				DECLARE_WAITQUEUE(wait, current);
1115*4882a593Smuzhiyun 				set_current_state(TASK_UNINTERRUPTIBLE);
1116*4882a593Smuzhiyun 				add_wait_queue(&chip->wq, &wait);
1117*4882a593Smuzhiyun 				mutex_unlock(&chip->mutex);
1118*4882a593Smuzhiyun 				schedule();
1119*4882a593Smuzhiyun 				remove_wait_queue(&chip->wq, &wait);
1120*4882a593Smuzhiyun 				mutex_lock(&chip->mutex);
1121*4882a593Smuzhiyun 			}
1122*4882a593Smuzhiyun 			/* Disallow XIP again */
1123*4882a593Smuzhiyun 			local_irq_disable();
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 			/* Correct Erase Suspend Hangups for M29EW */
1126*4882a593Smuzhiyun 			cfi_fixup_m29ew_erase_suspend(map, adr);
1127*4882a593Smuzhiyun 			/* Resume the write or erase operation */
1128*4882a593Smuzhiyun 			map_write(map, cfi->sector_erase_cmd, adr);
1129*4882a593Smuzhiyun 			chip->state = oldstate;
1130*4882a593Smuzhiyun 			start = xip_currtime();
1131*4882a593Smuzhiyun 		} else if (usec >= 1000000/HZ) {
1132*4882a593Smuzhiyun 			/*
1133*4882a593Smuzhiyun 			 * Try to save on CPU power when waiting delay
1134*4882a593Smuzhiyun 			 * is at least a system timer tick period.
1135*4882a593Smuzhiyun 			 * No need to be extremely accurate here.
1136*4882a593Smuzhiyun 			 */
1137*4882a593Smuzhiyun 			xip_cpu_idle();
1138*4882a593Smuzhiyun 		}
1139*4882a593Smuzhiyun 		status = map_read(map, adr);
1140*4882a593Smuzhiyun 	} while (!map_word_andequal(map, status, OK, OK)
1141*4882a593Smuzhiyun 		 && xip_elapsed_since(start) < usec);
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun #define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun /*
1147*4882a593Smuzhiyun  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1148*4882a593Smuzhiyun  * the flash is actively programming or erasing since we have to poll for
1149*4882a593Smuzhiyun  * the operation to complete anyway.  We can't do that in a generic way with
1150*4882a593Smuzhiyun  * a XIP setup so do it before the actual flash operation in this case
1151*4882a593Smuzhiyun  * and stub it out from INVALIDATE_CACHE_UDELAY.
1152*4882a593Smuzhiyun  */
1153*4882a593Smuzhiyun #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1154*4882a593Smuzhiyun 	INVALIDATE_CACHED_RANGE(map, from, size)
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1157*4882a593Smuzhiyun 	UDELAY(map, chip, adr, usec)
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun /*
1160*4882a593Smuzhiyun  * Extra notes:
1161*4882a593Smuzhiyun  *
1162*4882a593Smuzhiyun  * Activating this XIP support changes the way the code works a bit.  For
1163*4882a593Smuzhiyun  * example the code to suspend the current process when concurrent access
1164*4882a593Smuzhiyun  * happens is never executed because xip_udelay() will always return with the
1165*4882a593Smuzhiyun  * same chip state as it was entered with.  This is why there is no care for
1166*4882a593Smuzhiyun  * the presence of add_wait_queue() or schedule() calls from within a couple
1167*4882a593Smuzhiyun  * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
1168*4882a593Smuzhiyun  * The queueing and scheduling are always happening within xip_udelay().
1169*4882a593Smuzhiyun  *
1170*4882a593Smuzhiyun  * Similarly, get_chip() and put_chip() just happen to always be executed
1171*4882a593Smuzhiyun  * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1172*4882a593Smuzhiyun  * is in array mode, therefore never executing many cases therein and not
1173*4882a593Smuzhiyun  * causing any problem with XIP.
1174*4882a593Smuzhiyun  */
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun #else
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun #define xip_disable(map, chip, adr)
1179*4882a593Smuzhiyun #define xip_enable(map, chip, adr)
1180*4882a593Smuzhiyun #define XIP_INVAL_CACHED_RANGE(x...)
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun #define UDELAY(map, chip, adr, usec)  \
1183*4882a593Smuzhiyun do {  \
1184*4882a593Smuzhiyun 	mutex_unlock(&chip->mutex);  \
1185*4882a593Smuzhiyun 	cfi_udelay(usec);  \
1186*4882a593Smuzhiyun 	mutex_lock(&chip->mutex);  \
1187*4882a593Smuzhiyun } while (0)
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1190*4882a593Smuzhiyun do {  \
1191*4882a593Smuzhiyun 	mutex_unlock(&chip->mutex);  \
1192*4882a593Smuzhiyun 	INVALIDATE_CACHED_RANGE(map, adr, len);  \
1193*4882a593Smuzhiyun 	cfi_udelay(usec);  \
1194*4882a593Smuzhiyun 	mutex_lock(&chip->mutex);  \
1195*4882a593Smuzhiyun } while (0)
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun #endif
1198*4882a593Smuzhiyun 
do_read_onechip(struct map_info * map,struct flchip * chip,loff_t adr,size_t len,u_char * buf)1199*4882a593Smuzhiyun static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1200*4882a593Smuzhiyun {
1201*4882a593Smuzhiyun 	unsigned long cmd_addr;
1202*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
1203*4882a593Smuzhiyun 	int ret;
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	adr += chip->start;
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	/* Ensure cmd read/writes are aligned. */
1208*4882a593Smuzhiyun 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun 	mutex_lock(&chip->mutex);
1211*4882a593Smuzhiyun 	ret = get_chip(map, chip, cmd_addr, FL_READY);
1212*4882a593Smuzhiyun 	if (ret) {
1213*4882a593Smuzhiyun 		mutex_unlock(&chip->mutex);
1214*4882a593Smuzhiyun 		return ret;
1215*4882a593Smuzhiyun 	}
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1218*4882a593Smuzhiyun 		map_write(map, CMD(0xf0), cmd_addr);
1219*4882a593Smuzhiyun 		chip->state = FL_READY;
1220*4882a593Smuzhiyun 	}
1221*4882a593Smuzhiyun 
1222*4882a593Smuzhiyun 	map_copy_from(map, buf, adr, len);
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 	put_chip(map, chip, cmd_addr);
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 	mutex_unlock(&chip->mutex);
1227*4882a593Smuzhiyun 	return 0;
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 
cfi_amdstd_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1231*4882a593Smuzhiyun static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1232*4882a593Smuzhiyun {
1233*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
1234*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
1235*4882a593Smuzhiyun 	unsigned long ofs;
1236*4882a593Smuzhiyun 	int chipnum;
1237*4882a593Smuzhiyun 	int ret = 0;
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun 	/* ofs: offset within the first chip that the first read should start */
1240*4882a593Smuzhiyun 	chipnum = (from >> cfi->chipshift);
1241*4882a593Smuzhiyun 	ofs = from - (chipnum <<  cfi->chipshift);
1242*4882a593Smuzhiyun 
1243*4882a593Smuzhiyun 	while (len) {
1244*4882a593Smuzhiyun 		unsigned long thislen;
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 		if (chipnum >= cfi->numchips)
1247*4882a593Smuzhiyun 			break;
1248*4882a593Smuzhiyun 
1249*4882a593Smuzhiyun 		if ((len + ofs -1) >> cfi->chipshift)
1250*4882a593Smuzhiyun 			thislen = (1<<cfi->chipshift) - ofs;
1251*4882a593Smuzhiyun 		else
1252*4882a593Smuzhiyun 			thislen = len;
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1255*4882a593Smuzhiyun 		if (ret)
1256*4882a593Smuzhiyun 			break;
1257*4882a593Smuzhiyun 
1258*4882a593Smuzhiyun 		*retlen += thislen;
1259*4882a593Smuzhiyun 		len -= thislen;
1260*4882a593Smuzhiyun 		buf += thislen;
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun 		ofs = 0;
1263*4882a593Smuzhiyun 		chipnum++;
1264*4882a593Smuzhiyun 	}
1265*4882a593Smuzhiyun 	return ret;
1266*4882a593Smuzhiyun }
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1269*4882a593Smuzhiyun 			loff_t adr, size_t len, u_char *buf, size_t grouplen);
1270*4882a593Smuzhiyun 
otp_enter(struct map_info * map,struct flchip * chip,loff_t adr,size_t len)1271*4882a593Smuzhiyun static inline void otp_enter(struct map_info *map, struct flchip *chip,
1272*4882a593Smuzhiyun 			     loff_t adr, size_t len)
1273*4882a593Smuzhiyun {
1274*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1277*4882a593Smuzhiyun 			 cfi->device_type, NULL);
1278*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1279*4882a593Smuzhiyun 			 cfi->device_type, NULL);
1280*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi,
1281*4882a593Smuzhiyun 			 cfi->device_type, NULL);
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun 	INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1284*4882a593Smuzhiyun }
1285*4882a593Smuzhiyun 
otp_exit(struct map_info * map,struct flchip * chip,loff_t adr,size_t len)1286*4882a593Smuzhiyun static inline void otp_exit(struct map_info *map, struct flchip *chip,
1287*4882a593Smuzhiyun 			    loff_t adr, size_t len)
1288*4882a593Smuzhiyun {
1289*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1292*4882a593Smuzhiyun 			 cfi->device_type, NULL);
1293*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1294*4882a593Smuzhiyun 			 cfi->device_type, NULL);
1295*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi,
1296*4882a593Smuzhiyun 			 cfi->device_type, NULL);
1297*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi,
1298*4882a593Smuzhiyun 			 cfi->device_type, NULL);
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 	INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1301*4882a593Smuzhiyun }
1302*4882a593Smuzhiyun 
do_read_secsi_onechip(struct map_info * map,struct flchip * chip,loff_t adr,size_t len,u_char * buf,size_t grouplen)1303*4882a593Smuzhiyun static inline int do_read_secsi_onechip(struct map_info *map,
1304*4882a593Smuzhiyun 					struct flchip *chip, loff_t adr,
1305*4882a593Smuzhiyun 					size_t len, u_char *buf,
1306*4882a593Smuzhiyun 					size_t grouplen)
1307*4882a593Smuzhiyun {
1308*4882a593Smuzhiyun 	DECLARE_WAITQUEUE(wait, current);
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun  retry:
1311*4882a593Smuzhiyun 	mutex_lock(&chip->mutex);
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 	if (chip->state != FL_READY){
1314*4882a593Smuzhiyun 		set_current_state(TASK_UNINTERRUPTIBLE);
1315*4882a593Smuzhiyun 		add_wait_queue(&chip->wq, &wait);
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 		mutex_unlock(&chip->mutex);
1318*4882a593Smuzhiyun 
1319*4882a593Smuzhiyun 		schedule();
1320*4882a593Smuzhiyun 		remove_wait_queue(&chip->wq, &wait);
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun 		goto retry;
1323*4882a593Smuzhiyun 	}
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun 	adr += chip->start;
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun 	chip->state = FL_READY;
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun 	otp_enter(map, chip, adr, len);
1330*4882a593Smuzhiyun 	map_copy_from(map, buf, adr, len);
1331*4882a593Smuzhiyun 	otp_exit(map, chip, adr, len);
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun 	wake_up(&chip->wq);
1334*4882a593Smuzhiyun 	mutex_unlock(&chip->mutex);
1335*4882a593Smuzhiyun 
1336*4882a593Smuzhiyun 	return 0;
1337*4882a593Smuzhiyun }
1338*4882a593Smuzhiyun 
cfi_amdstd_secsi_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1339*4882a593Smuzhiyun static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1340*4882a593Smuzhiyun {
1341*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
1342*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
1343*4882a593Smuzhiyun 	unsigned long ofs;
1344*4882a593Smuzhiyun 	int chipnum;
1345*4882a593Smuzhiyun 	int ret = 0;
1346*4882a593Smuzhiyun 
1347*4882a593Smuzhiyun 	/* ofs: offset within the first chip that the first read should start */
1348*4882a593Smuzhiyun 	/* 8 secsi bytes per chip */
1349*4882a593Smuzhiyun 	chipnum=from>>3;
1350*4882a593Smuzhiyun 	ofs=from & 7;
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 	while (len) {
1353*4882a593Smuzhiyun 		unsigned long thislen;
1354*4882a593Smuzhiyun 
1355*4882a593Smuzhiyun 		if (chipnum >= cfi->numchips)
1356*4882a593Smuzhiyun 			break;
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 		if ((len + ofs -1) >> 3)
1359*4882a593Smuzhiyun 			thislen = (1<<3) - ofs;
1360*4882a593Smuzhiyun 		else
1361*4882a593Smuzhiyun 			thislen = len;
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun 		ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs,
1364*4882a593Smuzhiyun 					    thislen, buf, 0);
1365*4882a593Smuzhiyun 		if (ret)
1366*4882a593Smuzhiyun 			break;
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 		*retlen += thislen;
1369*4882a593Smuzhiyun 		len -= thislen;
1370*4882a593Smuzhiyun 		buf += thislen;
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun 		ofs = 0;
1373*4882a593Smuzhiyun 		chipnum++;
1374*4882a593Smuzhiyun 	}
1375*4882a593Smuzhiyun 	return ret;
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun 
1378*4882a593Smuzhiyun static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1379*4882a593Smuzhiyun 				     unsigned long adr, map_word datum,
1380*4882a593Smuzhiyun 				     int mode);
1381*4882a593Smuzhiyun 
do_otp_write(struct map_info * map,struct flchip * chip,loff_t adr,size_t len,u_char * buf,size_t grouplen)1382*4882a593Smuzhiyun static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr,
1383*4882a593Smuzhiyun 			size_t len, u_char *buf, size_t grouplen)
1384*4882a593Smuzhiyun {
1385*4882a593Smuzhiyun 	int ret;
1386*4882a593Smuzhiyun 	while (len) {
1387*4882a593Smuzhiyun 		unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1);
1388*4882a593Smuzhiyun 		int gap = adr - bus_ofs;
1389*4882a593Smuzhiyun 		int n = min_t(int, len, map_bankwidth(map) - gap);
1390*4882a593Smuzhiyun 		map_word datum = map_word_ff(map);
1391*4882a593Smuzhiyun 
1392*4882a593Smuzhiyun 		if (n != map_bankwidth(map)) {
1393*4882a593Smuzhiyun 			/* partial write of a word, load old contents */
1394*4882a593Smuzhiyun 			otp_enter(map, chip, bus_ofs, map_bankwidth(map));
1395*4882a593Smuzhiyun 			datum = map_read(map, bus_ofs);
1396*4882a593Smuzhiyun 			otp_exit(map, chip, bus_ofs, map_bankwidth(map));
1397*4882a593Smuzhiyun 		}
1398*4882a593Smuzhiyun 
1399*4882a593Smuzhiyun 		datum = map_word_load_partial(map, datum, buf, gap, n);
1400*4882a593Smuzhiyun 		ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
1401*4882a593Smuzhiyun 		if (ret)
1402*4882a593Smuzhiyun 			return ret;
1403*4882a593Smuzhiyun 
1404*4882a593Smuzhiyun 		adr += n;
1405*4882a593Smuzhiyun 		buf += n;
1406*4882a593Smuzhiyun 		len -= n;
1407*4882a593Smuzhiyun 	}
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun 	return 0;
1410*4882a593Smuzhiyun }
1411*4882a593Smuzhiyun 
do_otp_lock(struct map_info * map,struct flchip * chip,loff_t adr,size_t len,u_char * buf,size_t grouplen)1412*4882a593Smuzhiyun static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
1413*4882a593Smuzhiyun 		       size_t len, u_char *buf, size_t grouplen)
1414*4882a593Smuzhiyun {
1415*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
1416*4882a593Smuzhiyun 	uint8_t lockreg;
1417*4882a593Smuzhiyun 	unsigned long timeo;
1418*4882a593Smuzhiyun 	int ret;
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 	/* make sure area matches group boundaries */
1421*4882a593Smuzhiyun 	if ((adr != 0) || (len != grouplen))
1422*4882a593Smuzhiyun 		return -EINVAL;
1423*4882a593Smuzhiyun 
1424*4882a593Smuzhiyun 	mutex_lock(&chip->mutex);
1425*4882a593Smuzhiyun 	ret = get_chip(map, chip, chip->start, FL_LOCKING);
1426*4882a593Smuzhiyun 	if (ret) {
1427*4882a593Smuzhiyun 		mutex_unlock(&chip->mutex);
1428*4882a593Smuzhiyun 		return ret;
1429*4882a593Smuzhiyun 	}
1430*4882a593Smuzhiyun 	chip->state = FL_LOCKING;
1431*4882a593Smuzhiyun 
1432*4882a593Smuzhiyun 	/* Enter lock register command */
1433*4882a593Smuzhiyun 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1434*4882a593Smuzhiyun 			 cfi->device_type, NULL);
1435*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1436*4882a593Smuzhiyun 			 cfi->device_type, NULL);
1437*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi,
1438*4882a593Smuzhiyun 			 cfi->device_type, NULL);
1439*4882a593Smuzhiyun 
1440*4882a593Smuzhiyun 	/* read lock register */
1441*4882a593Smuzhiyun 	lockreg = cfi_read_query(map, 0);
1442*4882a593Smuzhiyun 
1443*4882a593Smuzhiyun 	/* set bit 0 to protect extended memory block */
1444*4882a593Smuzhiyun 	lockreg &= ~0x01;
1445*4882a593Smuzhiyun 
1446*4882a593Smuzhiyun 	/* set bit 0 to protect extended memory block */
1447*4882a593Smuzhiyun 	/* write lock register */
1448*4882a593Smuzhiyun 	map_write(map, CMD(0xA0), chip->start);
1449*4882a593Smuzhiyun 	map_write(map, CMD(lockreg), chip->start);
1450*4882a593Smuzhiyun 
1451*4882a593Smuzhiyun 	/* wait for chip to become ready */
1452*4882a593Smuzhiyun 	timeo = jiffies + msecs_to_jiffies(2);
1453*4882a593Smuzhiyun 	for (;;) {
1454*4882a593Smuzhiyun 		if (chip_ready(map, chip, adr, NULL))
1455*4882a593Smuzhiyun 			break;
1456*4882a593Smuzhiyun 
1457*4882a593Smuzhiyun 		if (time_after(jiffies, timeo)) {
1458*4882a593Smuzhiyun 			pr_err("Waiting for chip to be ready timed out.\n");
1459*4882a593Smuzhiyun 			ret = -EIO;
1460*4882a593Smuzhiyun 			break;
1461*4882a593Smuzhiyun 		}
1462*4882a593Smuzhiyun 		UDELAY(map, chip, 0, 1);
1463*4882a593Smuzhiyun 	}
1464*4882a593Smuzhiyun 
1465*4882a593Smuzhiyun 	/* exit protection commands */
1466*4882a593Smuzhiyun 	map_write(map, CMD(0x90), chip->start);
1467*4882a593Smuzhiyun 	map_write(map, CMD(0x00), chip->start);
1468*4882a593Smuzhiyun 
1469*4882a593Smuzhiyun 	chip->state = FL_READY;
1470*4882a593Smuzhiyun 	put_chip(map, chip, chip->start);
1471*4882a593Smuzhiyun 	mutex_unlock(&chip->mutex);
1472*4882a593Smuzhiyun 
1473*4882a593Smuzhiyun 	return ret;
1474*4882a593Smuzhiyun }
1475*4882a593Smuzhiyun 
cfi_amdstd_otp_walk(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf,otp_op_t action,int user_regs)1476*4882a593Smuzhiyun static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
1477*4882a593Smuzhiyun 			       size_t *retlen, u_char *buf,
1478*4882a593Smuzhiyun 			       otp_op_t action, int user_regs)
1479*4882a593Smuzhiyun {
1480*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
1481*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
1482*4882a593Smuzhiyun 	int ofs_factor = cfi->interleave * cfi->device_type;
1483*4882a593Smuzhiyun 	unsigned long base;
1484*4882a593Smuzhiyun 	int chipnum;
1485*4882a593Smuzhiyun 	struct flchip *chip;
1486*4882a593Smuzhiyun 	uint8_t otp, lockreg;
1487*4882a593Smuzhiyun 	int ret;
1488*4882a593Smuzhiyun 
1489*4882a593Smuzhiyun 	size_t user_size, factory_size, otpsize;
1490*4882a593Smuzhiyun 	loff_t user_offset, factory_offset, otpoffset;
1491*4882a593Smuzhiyun 	int user_locked = 0, otplocked;
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 	*retlen = 0;
1494*4882a593Smuzhiyun 
1495*4882a593Smuzhiyun 	for (chipnum = 0; chipnum < cfi->numchips; chipnum++) {
1496*4882a593Smuzhiyun 		chip = &cfi->chips[chipnum];
1497*4882a593Smuzhiyun 		factory_size = 0;
1498*4882a593Smuzhiyun 		user_size = 0;
1499*4882a593Smuzhiyun 
1500*4882a593Smuzhiyun 		/* Micron M29EW family */
1501*4882a593Smuzhiyun 		if (is_m29ew(cfi)) {
1502*4882a593Smuzhiyun 			base = chip->start;
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun 			/* check whether secsi area is factory locked
1505*4882a593Smuzhiyun 			   or user lockable */
1506*4882a593Smuzhiyun 			mutex_lock(&chip->mutex);
1507*4882a593Smuzhiyun 			ret = get_chip(map, chip, base, FL_CFI_QUERY);
1508*4882a593Smuzhiyun 			if (ret) {
1509*4882a593Smuzhiyun 				mutex_unlock(&chip->mutex);
1510*4882a593Smuzhiyun 				return ret;
1511*4882a593Smuzhiyun 			}
1512*4882a593Smuzhiyun 			cfi_qry_mode_on(base, map, cfi);
1513*4882a593Smuzhiyun 			otp = cfi_read_query(map, base + 0x3 * ofs_factor);
1514*4882a593Smuzhiyun 			cfi_qry_mode_off(base, map, cfi);
1515*4882a593Smuzhiyun 			put_chip(map, chip, base);
1516*4882a593Smuzhiyun 			mutex_unlock(&chip->mutex);
1517*4882a593Smuzhiyun 
1518*4882a593Smuzhiyun 			if (otp & 0x80) {
1519*4882a593Smuzhiyun 				/* factory locked */
1520*4882a593Smuzhiyun 				factory_offset = 0;
1521*4882a593Smuzhiyun 				factory_size = 0x100;
1522*4882a593Smuzhiyun 			} else {
1523*4882a593Smuzhiyun 				/* customer lockable */
1524*4882a593Smuzhiyun 				user_offset = 0;
1525*4882a593Smuzhiyun 				user_size = 0x100;
1526*4882a593Smuzhiyun 
1527*4882a593Smuzhiyun 				mutex_lock(&chip->mutex);
1528*4882a593Smuzhiyun 				ret = get_chip(map, chip, base, FL_LOCKING);
1529*4882a593Smuzhiyun 				if (ret) {
1530*4882a593Smuzhiyun 					mutex_unlock(&chip->mutex);
1531*4882a593Smuzhiyun 					return ret;
1532*4882a593Smuzhiyun 				}
1533*4882a593Smuzhiyun 
1534*4882a593Smuzhiyun 				/* Enter lock register command */
1535*4882a593Smuzhiyun 				cfi_send_gen_cmd(0xAA, cfi->addr_unlock1,
1536*4882a593Smuzhiyun 						 chip->start, map, cfi,
1537*4882a593Smuzhiyun 						 cfi->device_type, NULL);
1538*4882a593Smuzhiyun 				cfi_send_gen_cmd(0x55, cfi->addr_unlock2,
1539*4882a593Smuzhiyun 						 chip->start, map, cfi,
1540*4882a593Smuzhiyun 						 cfi->device_type, NULL);
1541*4882a593Smuzhiyun 				cfi_send_gen_cmd(0x40, cfi->addr_unlock1,
1542*4882a593Smuzhiyun 						 chip->start, map, cfi,
1543*4882a593Smuzhiyun 						 cfi->device_type, NULL);
1544*4882a593Smuzhiyun 				/* read lock register */
1545*4882a593Smuzhiyun 				lockreg = cfi_read_query(map, 0);
1546*4882a593Smuzhiyun 				/* exit protection commands */
1547*4882a593Smuzhiyun 				map_write(map, CMD(0x90), chip->start);
1548*4882a593Smuzhiyun 				map_write(map, CMD(0x00), chip->start);
1549*4882a593Smuzhiyun 				put_chip(map, chip, chip->start);
1550*4882a593Smuzhiyun 				mutex_unlock(&chip->mutex);
1551*4882a593Smuzhiyun 
1552*4882a593Smuzhiyun 				user_locked = ((lockreg & 0x01) == 0x00);
1553*4882a593Smuzhiyun 			}
1554*4882a593Smuzhiyun 		}
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun 		otpsize = user_regs ? user_size : factory_size;
1557*4882a593Smuzhiyun 		if (!otpsize)
1558*4882a593Smuzhiyun 			continue;
1559*4882a593Smuzhiyun 		otpoffset = user_regs ? user_offset : factory_offset;
1560*4882a593Smuzhiyun 		otplocked = user_regs ? user_locked : 1;
1561*4882a593Smuzhiyun 
1562*4882a593Smuzhiyun 		if (!action) {
1563*4882a593Smuzhiyun 			/* return otpinfo */
1564*4882a593Smuzhiyun 			struct otp_info *otpinfo;
1565*4882a593Smuzhiyun 			len -= sizeof(*otpinfo);
1566*4882a593Smuzhiyun 			if (len <= 0)
1567*4882a593Smuzhiyun 				return -ENOSPC;
1568*4882a593Smuzhiyun 			otpinfo = (struct otp_info *)buf;
1569*4882a593Smuzhiyun 			otpinfo->start = from;
1570*4882a593Smuzhiyun 			otpinfo->length = otpsize;
1571*4882a593Smuzhiyun 			otpinfo->locked = otplocked;
1572*4882a593Smuzhiyun 			buf += sizeof(*otpinfo);
1573*4882a593Smuzhiyun 			*retlen += sizeof(*otpinfo);
1574*4882a593Smuzhiyun 			from += otpsize;
1575*4882a593Smuzhiyun 		} else if ((from < otpsize) && (len > 0)) {
1576*4882a593Smuzhiyun 			size_t size;
1577*4882a593Smuzhiyun 			size = (len < otpsize - from) ? len : otpsize - from;
1578*4882a593Smuzhiyun 			ret = action(map, chip, otpoffset + from, size, buf,
1579*4882a593Smuzhiyun 				     otpsize);
1580*4882a593Smuzhiyun 			if (ret < 0)
1581*4882a593Smuzhiyun 				return ret;
1582*4882a593Smuzhiyun 
1583*4882a593Smuzhiyun 			buf += size;
1584*4882a593Smuzhiyun 			len -= size;
1585*4882a593Smuzhiyun 			*retlen += size;
1586*4882a593Smuzhiyun 			from = 0;
1587*4882a593Smuzhiyun 		} else {
1588*4882a593Smuzhiyun 			from -= otpsize;
1589*4882a593Smuzhiyun 		}
1590*4882a593Smuzhiyun 	}
1591*4882a593Smuzhiyun 	return 0;
1592*4882a593Smuzhiyun }
1593*4882a593Smuzhiyun 
cfi_amdstd_get_fact_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)1594*4882a593Smuzhiyun static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len,
1595*4882a593Smuzhiyun 					 size_t *retlen, struct otp_info *buf)
1596*4882a593Smuzhiyun {
1597*4882a593Smuzhiyun 	return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1598*4882a593Smuzhiyun 				   NULL, 0);
1599*4882a593Smuzhiyun }
1600*4882a593Smuzhiyun 
cfi_amdstd_get_user_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)1601*4882a593Smuzhiyun static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len,
1602*4882a593Smuzhiyun 					 size_t *retlen, struct otp_info *buf)
1603*4882a593Smuzhiyun {
1604*4882a593Smuzhiyun 	return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1605*4882a593Smuzhiyun 				   NULL, 1);
1606*4882a593Smuzhiyun }
1607*4882a593Smuzhiyun 
cfi_amdstd_read_fact_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1608*4882a593Smuzhiyun static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
1609*4882a593Smuzhiyun 					 size_t len, size_t *retlen,
1610*4882a593Smuzhiyun 					 u_char *buf)
1611*4882a593Smuzhiyun {
1612*4882a593Smuzhiyun 	return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1613*4882a593Smuzhiyun 				   buf, do_read_secsi_onechip, 0);
1614*4882a593Smuzhiyun }
1615*4882a593Smuzhiyun 
cfi_amdstd_read_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1616*4882a593Smuzhiyun static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
1617*4882a593Smuzhiyun 					 size_t len, size_t *retlen,
1618*4882a593Smuzhiyun 					 u_char *buf)
1619*4882a593Smuzhiyun {
1620*4882a593Smuzhiyun 	return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1621*4882a593Smuzhiyun 				   buf, do_read_secsi_onechip, 1);
1622*4882a593Smuzhiyun }
1623*4882a593Smuzhiyun 
cfi_amdstd_write_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1624*4882a593Smuzhiyun static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
1625*4882a593Smuzhiyun 					  size_t len, size_t *retlen,
1626*4882a593Smuzhiyun 					  u_char *buf)
1627*4882a593Smuzhiyun {
1628*4882a593Smuzhiyun 	return cfi_amdstd_otp_walk(mtd, from, len, retlen, buf,
1629*4882a593Smuzhiyun 				   do_otp_write, 1);
1630*4882a593Smuzhiyun }
1631*4882a593Smuzhiyun 
cfi_amdstd_lock_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len)1632*4882a593Smuzhiyun static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
1633*4882a593Smuzhiyun 					 size_t len)
1634*4882a593Smuzhiyun {
1635*4882a593Smuzhiyun 	size_t retlen;
1636*4882a593Smuzhiyun 	return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL,
1637*4882a593Smuzhiyun 				   do_otp_lock, 1);
1638*4882a593Smuzhiyun }
1639*4882a593Smuzhiyun 
do_write_oneword_once(struct map_info * map,struct flchip * chip,unsigned long adr,map_word datum,int mode,struct cfi_private * cfi)1640*4882a593Smuzhiyun static int __xipram do_write_oneword_once(struct map_info *map,
1641*4882a593Smuzhiyun 					  struct flchip *chip,
1642*4882a593Smuzhiyun 					  unsigned long adr, map_word datum,
1643*4882a593Smuzhiyun 					  int mode, struct cfi_private *cfi)
1644*4882a593Smuzhiyun {
1645*4882a593Smuzhiyun 	unsigned long timeo = jiffies + HZ;
1646*4882a593Smuzhiyun 	/*
1647*4882a593Smuzhiyun 	 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1648*4882a593Smuzhiyun 	 * have a max write time of a few hundreds usec). However, we should
1649*4882a593Smuzhiyun 	 * use the maximum timeout value given by the chip at probe time
1650*4882a593Smuzhiyun 	 * instead.  Unfortunately, struct flchip does have a field for
1651*4882a593Smuzhiyun 	 * maximum timeout, only for typical which can be far too short
1652*4882a593Smuzhiyun 	 * depending of the conditions.	 The ' + 1' is to avoid having a
1653*4882a593Smuzhiyun 	 * timeout of 0 jiffies if HZ is smaller than 1000.
1654*4882a593Smuzhiyun 	 */
1655*4882a593Smuzhiyun 	unsigned long uWriteTimeout = (HZ / 1000) + 1;
1656*4882a593Smuzhiyun 	int ret = 0;
1657*4882a593Smuzhiyun 
1658*4882a593Smuzhiyun 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1659*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1660*4882a593Smuzhiyun 	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1661*4882a593Smuzhiyun 	map_write(map, datum, adr);
1662*4882a593Smuzhiyun 	chip->state = mode;
1663*4882a593Smuzhiyun 
1664*4882a593Smuzhiyun 	INVALIDATE_CACHE_UDELAY(map, chip,
1665*4882a593Smuzhiyun 				adr, map_bankwidth(map),
1666*4882a593Smuzhiyun 				chip->word_write_time);
1667*4882a593Smuzhiyun 
1668*4882a593Smuzhiyun 	/* See comment above for timeout value. */
1669*4882a593Smuzhiyun 	timeo = jiffies + uWriteTimeout;
1670*4882a593Smuzhiyun 	for (;;) {
1671*4882a593Smuzhiyun 		if (chip->state != mode) {
1672*4882a593Smuzhiyun 			/* Someone's suspended the write. Sleep */
1673*4882a593Smuzhiyun 			DECLARE_WAITQUEUE(wait, current);
1674*4882a593Smuzhiyun 
1675*4882a593Smuzhiyun 			set_current_state(TASK_UNINTERRUPTIBLE);
1676*4882a593Smuzhiyun 			add_wait_queue(&chip->wq, &wait);
1677*4882a593Smuzhiyun 			mutex_unlock(&chip->mutex);
1678*4882a593Smuzhiyun 			schedule();
1679*4882a593Smuzhiyun 			remove_wait_queue(&chip->wq, &wait);
1680*4882a593Smuzhiyun 			timeo = jiffies + (HZ / 2); /* FIXME */
1681*4882a593Smuzhiyun 			mutex_lock(&chip->mutex);
1682*4882a593Smuzhiyun 			continue;
1683*4882a593Smuzhiyun 		}
1684*4882a593Smuzhiyun 
1685*4882a593Smuzhiyun 		/*
1686*4882a593Smuzhiyun 		 * We check "time_after" and "!chip_good" before checking
1687*4882a593Smuzhiyun 		 * "chip_good" to avoid the failure due to scheduling.
1688*4882a593Smuzhiyun 		 */
1689*4882a593Smuzhiyun 		if (time_after(jiffies, timeo) &&
1690*4882a593Smuzhiyun 		    !chip_good(map, chip, adr, &datum)) {
1691*4882a593Smuzhiyun 			xip_enable(map, chip, adr);
1692*4882a593Smuzhiyun 			printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1693*4882a593Smuzhiyun 			xip_disable(map, chip, adr);
1694*4882a593Smuzhiyun 			ret = -EIO;
1695*4882a593Smuzhiyun 			break;
1696*4882a593Smuzhiyun 		}
1697*4882a593Smuzhiyun 
1698*4882a593Smuzhiyun 		if (chip_good(map, chip, adr, &datum)) {
1699*4882a593Smuzhiyun 			if (cfi_check_err_status(map, chip, adr))
1700*4882a593Smuzhiyun 				ret = -EIO;
1701*4882a593Smuzhiyun 			break;
1702*4882a593Smuzhiyun 		}
1703*4882a593Smuzhiyun 
1704*4882a593Smuzhiyun 		/* Latency issues. Drop the lock, wait a while and retry */
1705*4882a593Smuzhiyun 		UDELAY(map, chip, adr, 1);
1706*4882a593Smuzhiyun 	}
1707*4882a593Smuzhiyun 
1708*4882a593Smuzhiyun 	return ret;
1709*4882a593Smuzhiyun }
1710*4882a593Smuzhiyun 
do_write_oneword_start(struct map_info * map,struct flchip * chip,unsigned long adr,int mode)1711*4882a593Smuzhiyun static int __xipram do_write_oneword_start(struct map_info *map,
1712*4882a593Smuzhiyun 					   struct flchip *chip,
1713*4882a593Smuzhiyun 					   unsigned long adr, int mode)
1714*4882a593Smuzhiyun {
1715*4882a593Smuzhiyun 	int ret;
1716*4882a593Smuzhiyun 
1717*4882a593Smuzhiyun 	mutex_lock(&chip->mutex);
1718*4882a593Smuzhiyun 
1719*4882a593Smuzhiyun 	ret = get_chip(map, chip, adr, mode);
1720*4882a593Smuzhiyun 	if (ret) {
1721*4882a593Smuzhiyun 		mutex_unlock(&chip->mutex);
1722*4882a593Smuzhiyun 		return ret;
1723*4882a593Smuzhiyun 	}
1724*4882a593Smuzhiyun 
1725*4882a593Smuzhiyun 	if (mode == FL_OTP_WRITE)
1726*4882a593Smuzhiyun 		otp_enter(map, chip, adr, map_bankwidth(map));
1727*4882a593Smuzhiyun 
1728*4882a593Smuzhiyun 	return ret;
1729*4882a593Smuzhiyun }
1730*4882a593Smuzhiyun 
do_write_oneword_done(struct map_info * map,struct flchip * chip,unsigned long adr,int mode)1731*4882a593Smuzhiyun static void __xipram do_write_oneword_done(struct map_info *map,
1732*4882a593Smuzhiyun 					   struct flchip *chip,
1733*4882a593Smuzhiyun 					   unsigned long adr, int mode)
1734*4882a593Smuzhiyun {
1735*4882a593Smuzhiyun 	if (mode == FL_OTP_WRITE)
1736*4882a593Smuzhiyun 		otp_exit(map, chip, adr, map_bankwidth(map));
1737*4882a593Smuzhiyun 
1738*4882a593Smuzhiyun 	chip->state = FL_READY;
1739*4882a593Smuzhiyun 	DISABLE_VPP(map);
1740*4882a593Smuzhiyun 	put_chip(map, chip, adr);
1741*4882a593Smuzhiyun 
1742*4882a593Smuzhiyun 	mutex_unlock(&chip->mutex);
1743*4882a593Smuzhiyun }
1744*4882a593Smuzhiyun 
do_write_oneword_retry(struct map_info * map,struct flchip * chip,unsigned long adr,map_word datum,int mode)1745*4882a593Smuzhiyun static int __xipram do_write_oneword_retry(struct map_info *map,
1746*4882a593Smuzhiyun 					   struct flchip *chip,
1747*4882a593Smuzhiyun 					   unsigned long adr, map_word datum,
1748*4882a593Smuzhiyun 					   int mode)
1749*4882a593Smuzhiyun {
1750*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
1751*4882a593Smuzhiyun 	int ret = 0;
1752*4882a593Smuzhiyun 	map_word oldd;
1753*4882a593Smuzhiyun 	int retry_cnt = 0;
1754*4882a593Smuzhiyun 
1755*4882a593Smuzhiyun 	/*
1756*4882a593Smuzhiyun 	 * Check for a NOP for the case when the datum to write is already
1757*4882a593Smuzhiyun 	 * present - it saves time and works around buggy chips that corrupt
1758*4882a593Smuzhiyun 	 * data at other locations when 0xff is written to a location that
1759*4882a593Smuzhiyun 	 * already contains 0xff.
1760*4882a593Smuzhiyun 	 */
1761*4882a593Smuzhiyun 	oldd = map_read(map, adr);
1762*4882a593Smuzhiyun 	if (map_word_equal(map, oldd, datum)) {
1763*4882a593Smuzhiyun 		pr_debug("MTD %s(): NOP\n", __func__);
1764*4882a593Smuzhiyun 		return ret;
1765*4882a593Smuzhiyun 	}
1766*4882a593Smuzhiyun 
1767*4882a593Smuzhiyun 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1768*4882a593Smuzhiyun 	ENABLE_VPP(map);
1769*4882a593Smuzhiyun 	xip_disable(map, chip, adr);
1770*4882a593Smuzhiyun 
1771*4882a593Smuzhiyun  retry:
1772*4882a593Smuzhiyun 	ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi);
1773*4882a593Smuzhiyun 	if (ret) {
1774*4882a593Smuzhiyun 		/* reset on all failures. */
1775*4882a593Smuzhiyun 		map_write(map, CMD(0xF0), chip->start);
1776*4882a593Smuzhiyun 		/* FIXME - should have reset delay before continuing */
1777*4882a593Smuzhiyun 
1778*4882a593Smuzhiyun 		if (++retry_cnt <= MAX_RETRIES) {
1779*4882a593Smuzhiyun 			ret = 0;
1780*4882a593Smuzhiyun 			goto retry;
1781*4882a593Smuzhiyun 		}
1782*4882a593Smuzhiyun 	}
1783*4882a593Smuzhiyun 	xip_enable(map, chip, adr);
1784*4882a593Smuzhiyun 
1785*4882a593Smuzhiyun 	return ret;
1786*4882a593Smuzhiyun }
1787*4882a593Smuzhiyun 
do_write_oneword(struct map_info * map,struct flchip * chip,unsigned long adr,map_word datum,int mode)1788*4882a593Smuzhiyun static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1789*4882a593Smuzhiyun 				     unsigned long adr, map_word datum,
1790*4882a593Smuzhiyun 				     int mode)
1791*4882a593Smuzhiyun {
1792*4882a593Smuzhiyun 	int ret;
1793*4882a593Smuzhiyun 
1794*4882a593Smuzhiyun 	adr += chip->start;
1795*4882a593Smuzhiyun 
1796*4882a593Smuzhiyun 	pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr,
1797*4882a593Smuzhiyun 		 datum.x[0]);
1798*4882a593Smuzhiyun 
1799*4882a593Smuzhiyun 	ret = do_write_oneword_start(map, chip, adr, mode);
1800*4882a593Smuzhiyun 	if (ret)
1801*4882a593Smuzhiyun 		return ret;
1802*4882a593Smuzhiyun 
1803*4882a593Smuzhiyun 	ret = do_write_oneword_retry(map, chip, adr, datum, mode);
1804*4882a593Smuzhiyun 
1805*4882a593Smuzhiyun 	do_write_oneword_done(map, chip, adr, mode);
1806*4882a593Smuzhiyun 
1807*4882a593Smuzhiyun 	return ret;
1808*4882a593Smuzhiyun }
1809*4882a593Smuzhiyun 
1810*4882a593Smuzhiyun 
cfi_amdstd_write_words(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1811*4882a593Smuzhiyun static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1812*4882a593Smuzhiyun 				  size_t *retlen, const u_char *buf)
1813*4882a593Smuzhiyun {
1814*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
1815*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
1816*4882a593Smuzhiyun 	int ret;
1817*4882a593Smuzhiyun 	int chipnum;
1818*4882a593Smuzhiyun 	unsigned long ofs, chipstart;
1819*4882a593Smuzhiyun 	DECLARE_WAITQUEUE(wait, current);
1820*4882a593Smuzhiyun 
1821*4882a593Smuzhiyun 	chipnum = to >> cfi->chipshift;
1822*4882a593Smuzhiyun 	ofs = to  - (chipnum << cfi->chipshift);
1823*4882a593Smuzhiyun 	chipstart = cfi->chips[chipnum].start;
1824*4882a593Smuzhiyun 
1825*4882a593Smuzhiyun 	/* If it's not bus-aligned, do the first byte write */
1826*4882a593Smuzhiyun 	if (ofs & (map_bankwidth(map)-1)) {
1827*4882a593Smuzhiyun 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1828*4882a593Smuzhiyun 		int i = ofs - bus_ofs;
1829*4882a593Smuzhiyun 		int n = 0;
1830*4882a593Smuzhiyun 		map_word tmp_buf;
1831*4882a593Smuzhiyun 
1832*4882a593Smuzhiyun  retry:
1833*4882a593Smuzhiyun 		mutex_lock(&cfi->chips[chipnum].mutex);
1834*4882a593Smuzhiyun 
1835*4882a593Smuzhiyun 		if (cfi->chips[chipnum].state != FL_READY) {
1836*4882a593Smuzhiyun 			set_current_state(TASK_UNINTERRUPTIBLE);
1837*4882a593Smuzhiyun 			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1838*4882a593Smuzhiyun 
1839*4882a593Smuzhiyun 			mutex_unlock(&cfi->chips[chipnum].mutex);
1840*4882a593Smuzhiyun 
1841*4882a593Smuzhiyun 			schedule();
1842*4882a593Smuzhiyun 			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1843*4882a593Smuzhiyun 			goto retry;
1844*4882a593Smuzhiyun 		}
1845*4882a593Smuzhiyun 
1846*4882a593Smuzhiyun 		/* Load 'tmp_buf' with old contents of flash */
1847*4882a593Smuzhiyun 		tmp_buf = map_read(map, bus_ofs+chipstart);
1848*4882a593Smuzhiyun 
1849*4882a593Smuzhiyun 		mutex_unlock(&cfi->chips[chipnum].mutex);
1850*4882a593Smuzhiyun 
1851*4882a593Smuzhiyun 		/* Number of bytes to copy from buffer */
1852*4882a593Smuzhiyun 		n = min_t(int, len, map_bankwidth(map)-i);
1853*4882a593Smuzhiyun 
1854*4882a593Smuzhiyun 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1855*4882a593Smuzhiyun 
1856*4882a593Smuzhiyun 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1857*4882a593Smuzhiyun 				       bus_ofs, tmp_buf, FL_WRITING);
1858*4882a593Smuzhiyun 		if (ret)
1859*4882a593Smuzhiyun 			return ret;
1860*4882a593Smuzhiyun 
1861*4882a593Smuzhiyun 		ofs += n;
1862*4882a593Smuzhiyun 		buf += n;
1863*4882a593Smuzhiyun 		(*retlen) += n;
1864*4882a593Smuzhiyun 		len -= n;
1865*4882a593Smuzhiyun 
1866*4882a593Smuzhiyun 		if (ofs >> cfi->chipshift) {
1867*4882a593Smuzhiyun 			chipnum ++;
1868*4882a593Smuzhiyun 			ofs = 0;
1869*4882a593Smuzhiyun 			if (chipnum == cfi->numchips)
1870*4882a593Smuzhiyun 				return 0;
1871*4882a593Smuzhiyun 		}
1872*4882a593Smuzhiyun 	}
1873*4882a593Smuzhiyun 
1874*4882a593Smuzhiyun 	/* We are now aligned, write as much as possible */
1875*4882a593Smuzhiyun 	while(len >= map_bankwidth(map)) {
1876*4882a593Smuzhiyun 		map_word datum;
1877*4882a593Smuzhiyun 
1878*4882a593Smuzhiyun 		datum = map_word_load(map, buf);
1879*4882a593Smuzhiyun 
1880*4882a593Smuzhiyun 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1881*4882a593Smuzhiyun 				       ofs, datum, FL_WRITING);
1882*4882a593Smuzhiyun 		if (ret)
1883*4882a593Smuzhiyun 			return ret;
1884*4882a593Smuzhiyun 
1885*4882a593Smuzhiyun 		ofs += map_bankwidth(map);
1886*4882a593Smuzhiyun 		buf += map_bankwidth(map);
1887*4882a593Smuzhiyun 		(*retlen) += map_bankwidth(map);
1888*4882a593Smuzhiyun 		len -= map_bankwidth(map);
1889*4882a593Smuzhiyun 
1890*4882a593Smuzhiyun 		if (ofs >> cfi->chipshift) {
1891*4882a593Smuzhiyun 			chipnum ++;
1892*4882a593Smuzhiyun 			ofs = 0;
1893*4882a593Smuzhiyun 			if (chipnum == cfi->numchips)
1894*4882a593Smuzhiyun 				return 0;
1895*4882a593Smuzhiyun 			chipstart = cfi->chips[chipnum].start;
1896*4882a593Smuzhiyun 		}
1897*4882a593Smuzhiyun 	}
1898*4882a593Smuzhiyun 
1899*4882a593Smuzhiyun 	/* Write the trailing bytes if any */
1900*4882a593Smuzhiyun 	if (len & (map_bankwidth(map)-1)) {
1901*4882a593Smuzhiyun 		map_word tmp_buf;
1902*4882a593Smuzhiyun 
1903*4882a593Smuzhiyun  retry1:
1904*4882a593Smuzhiyun 		mutex_lock(&cfi->chips[chipnum].mutex);
1905*4882a593Smuzhiyun 
1906*4882a593Smuzhiyun 		if (cfi->chips[chipnum].state != FL_READY) {
1907*4882a593Smuzhiyun 			set_current_state(TASK_UNINTERRUPTIBLE);
1908*4882a593Smuzhiyun 			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1909*4882a593Smuzhiyun 
1910*4882a593Smuzhiyun 			mutex_unlock(&cfi->chips[chipnum].mutex);
1911*4882a593Smuzhiyun 
1912*4882a593Smuzhiyun 			schedule();
1913*4882a593Smuzhiyun 			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1914*4882a593Smuzhiyun 			goto retry1;
1915*4882a593Smuzhiyun 		}
1916*4882a593Smuzhiyun 
1917*4882a593Smuzhiyun 		tmp_buf = map_read(map, ofs + chipstart);
1918*4882a593Smuzhiyun 
1919*4882a593Smuzhiyun 		mutex_unlock(&cfi->chips[chipnum].mutex);
1920*4882a593Smuzhiyun 
1921*4882a593Smuzhiyun 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1922*4882a593Smuzhiyun 
1923*4882a593Smuzhiyun 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1924*4882a593Smuzhiyun 				       ofs, tmp_buf, FL_WRITING);
1925*4882a593Smuzhiyun 		if (ret)
1926*4882a593Smuzhiyun 			return ret;
1927*4882a593Smuzhiyun 
1928*4882a593Smuzhiyun 		(*retlen) += len;
1929*4882a593Smuzhiyun 	}
1930*4882a593Smuzhiyun 
1931*4882a593Smuzhiyun 	return 0;
1932*4882a593Smuzhiyun }
1933*4882a593Smuzhiyun 
1934*4882a593Smuzhiyun #if !FORCE_WORD_WRITE
do_write_buffer_wait(struct map_info * map,struct flchip * chip,unsigned long adr,map_word datum)1935*4882a593Smuzhiyun static int __xipram do_write_buffer_wait(struct map_info *map,
1936*4882a593Smuzhiyun 					 struct flchip *chip, unsigned long adr,
1937*4882a593Smuzhiyun 					 map_word datum)
1938*4882a593Smuzhiyun {
1939*4882a593Smuzhiyun 	unsigned long timeo;
1940*4882a593Smuzhiyun 	unsigned long u_write_timeout;
1941*4882a593Smuzhiyun 	int ret = 0;
1942*4882a593Smuzhiyun 
1943*4882a593Smuzhiyun 	/*
1944*4882a593Smuzhiyun 	 * Timeout is calculated according to CFI data, if available.
1945*4882a593Smuzhiyun 	 * See more comments in cfi_cmdset_0002().
1946*4882a593Smuzhiyun 	 */
1947*4882a593Smuzhiyun 	u_write_timeout = usecs_to_jiffies(chip->buffer_write_time_max);
1948*4882a593Smuzhiyun 	timeo = jiffies + u_write_timeout;
1949*4882a593Smuzhiyun 
1950*4882a593Smuzhiyun 	for (;;) {
1951*4882a593Smuzhiyun 		if (chip->state != FL_WRITING) {
1952*4882a593Smuzhiyun 			/* Someone's suspended the write. Sleep */
1953*4882a593Smuzhiyun 			DECLARE_WAITQUEUE(wait, current);
1954*4882a593Smuzhiyun 
1955*4882a593Smuzhiyun 			set_current_state(TASK_UNINTERRUPTIBLE);
1956*4882a593Smuzhiyun 			add_wait_queue(&chip->wq, &wait);
1957*4882a593Smuzhiyun 			mutex_unlock(&chip->mutex);
1958*4882a593Smuzhiyun 			schedule();
1959*4882a593Smuzhiyun 			remove_wait_queue(&chip->wq, &wait);
1960*4882a593Smuzhiyun 			timeo = jiffies + (HZ / 2); /* FIXME */
1961*4882a593Smuzhiyun 			mutex_lock(&chip->mutex);
1962*4882a593Smuzhiyun 			continue;
1963*4882a593Smuzhiyun 		}
1964*4882a593Smuzhiyun 
1965*4882a593Smuzhiyun 		/*
1966*4882a593Smuzhiyun 		 * We check "time_after" and "!chip_good" before checking
1967*4882a593Smuzhiyun 		 * "chip_good" to avoid the failure due to scheduling.
1968*4882a593Smuzhiyun 		 */
1969*4882a593Smuzhiyun 		if (time_after(jiffies, timeo) &&
1970*4882a593Smuzhiyun 		    !chip_good(map, chip, adr, &datum)) {
1971*4882a593Smuzhiyun 			pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
1972*4882a593Smuzhiyun 			       __func__, adr);
1973*4882a593Smuzhiyun 			ret = -EIO;
1974*4882a593Smuzhiyun 			break;
1975*4882a593Smuzhiyun 		}
1976*4882a593Smuzhiyun 
1977*4882a593Smuzhiyun 		if (chip_good(map, chip, adr, &datum)) {
1978*4882a593Smuzhiyun 			if (cfi_check_err_status(map, chip, adr))
1979*4882a593Smuzhiyun 				ret = -EIO;
1980*4882a593Smuzhiyun 			break;
1981*4882a593Smuzhiyun 		}
1982*4882a593Smuzhiyun 
1983*4882a593Smuzhiyun 		/* Latency issues. Drop the lock, wait a while and retry */
1984*4882a593Smuzhiyun 		UDELAY(map, chip, adr, 1);
1985*4882a593Smuzhiyun 	}
1986*4882a593Smuzhiyun 
1987*4882a593Smuzhiyun 	return ret;
1988*4882a593Smuzhiyun }
1989*4882a593Smuzhiyun 
do_write_buffer_reset(struct map_info * map,struct flchip * chip,struct cfi_private * cfi)1990*4882a593Smuzhiyun static void __xipram do_write_buffer_reset(struct map_info *map,
1991*4882a593Smuzhiyun 					   struct flchip *chip,
1992*4882a593Smuzhiyun 					   struct cfi_private *cfi)
1993*4882a593Smuzhiyun {
1994*4882a593Smuzhiyun 	/*
1995*4882a593Smuzhiyun 	 * Recovery from write-buffer programming failures requires
1996*4882a593Smuzhiyun 	 * the write-to-buffer-reset sequence.  Since the last part
1997*4882a593Smuzhiyun 	 * of the sequence also works as a normal reset, we can run
1998*4882a593Smuzhiyun 	 * the same commands regardless of why we are here.
1999*4882a593Smuzhiyun 	 * See e.g.
2000*4882a593Smuzhiyun 	 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
2001*4882a593Smuzhiyun 	 */
2002*4882a593Smuzhiyun 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2003*4882a593Smuzhiyun 			 cfi->device_type, NULL);
2004*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2005*4882a593Smuzhiyun 			 cfi->device_type, NULL);
2006*4882a593Smuzhiyun 	cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
2007*4882a593Smuzhiyun 			 cfi->device_type, NULL);
2008*4882a593Smuzhiyun 
2009*4882a593Smuzhiyun 	/* FIXME - should have reset delay before continuing */
2010*4882a593Smuzhiyun }
2011*4882a593Smuzhiyun 
2012*4882a593Smuzhiyun /*
2013*4882a593Smuzhiyun  * FIXME: interleaved mode not tested, and probably not supported!
2014*4882a593Smuzhiyun  */
do_write_buffer(struct map_info * map,struct flchip * chip,unsigned long adr,const u_char * buf,int len)2015*4882a593Smuzhiyun static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
2016*4882a593Smuzhiyun 				    unsigned long adr, const u_char *buf,
2017*4882a593Smuzhiyun 				    int len)
2018*4882a593Smuzhiyun {
2019*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
2020*4882a593Smuzhiyun 	int ret;
2021*4882a593Smuzhiyun 	unsigned long cmd_adr;
2022*4882a593Smuzhiyun 	int z, words;
2023*4882a593Smuzhiyun 	map_word datum;
2024*4882a593Smuzhiyun 
2025*4882a593Smuzhiyun 	adr += chip->start;
2026*4882a593Smuzhiyun 	cmd_adr = adr;
2027*4882a593Smuzhiyun 
2028*4882a593Smuzhiyun 	mutex_lock(&chip->mutex);
2029*4882a593Smuzhiyun 	ret = get_chip(map, chip, adr, FL_WRITING);
2030*4882a593Smuzhiyun 	if (ret) {
2031*4882a593Smuzhiyun 		mutex_unlock(&chip->mutex);
2032*4882a593Smuzhiyun 		return ret;
2033*4882a593Smuzhiyun 	}
2034*4882a593Smuzhiyun 
2035*4882a593Smuzhiyun 	datum = map_word_load(map, buf);
2036*4882a593Smuzhiyun 
2037*4882a593Smuzhiyun 	pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
2038*4882a593Smuzhiyun 		 __func__, adr, datum.x[0]);
2039*4882a593Smuzhiyun 
2040*4882a593Smuzhiyun 	XIP_INVAL_CACHED_RANGE(map, adr, len);
2041*4882a593Smuzhiyun 	ENABLE_VPP(map);
2042*4882a593Smuzhiyun 	xip_disable(map, chip, cmd_adr);
2043*4882a593Smuzhiyun 
2044*4882a593Smuzhiyun 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2045*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2046*4882a593Smuzhiyun 
2047*4882a593Smuzhiyun 	/* Write Buffer Load */
2048*4882a593Smuzhiyun 	map_write(map, CMD(0x25), cmd_adr);
2049*4882a593Smuzhiyun 
2050*4882a593Smuzhiyun 	chip->state = FL_WRITING_TO_BUFFER;
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun 	/* Write length of data to come */
2053*4882a593Smuzhiyun 	words = len / map_bankwidth(map);
2054*4882a593Smuzhiyun 	map_write(map, CMD(words - 1), cmd_adr);
2055*4882a593Smuzhiyun 	/* Write data */
2056*4882a593Smuzhiyun 	z = 0;
2057*4882a593Smuzhiyun 	while(z < words * map_bankwidth(map)) {
2058*4882a593Smuzhiyun 		datum = map_word_load(map, buf);
2059*4882a593Smuzhiyun 		map_write(map, datum, adr + z);
2060*4882a593Smuzhiyun 
2061*4882a593Smuzhiyun 		z += map_bankwidth(map);
2062*4882a593Smuzhiyun 		buf += map_bankwidth(map);
2063*4882a593Smuzhiyun 	}
2064*4882a593Smuzhiyun 	z -= map_bankwidth(map);
2065*4882a593Smuzhiyun 
2066*4882a593Smuzhiyun 	adr += z;
2067*4882a593Smuzhiyun 
2068*4882a593Smuzhiyun 	/* Write Buffer Program Confirm: GO GO GO */
2069*4882a593Smuzhiyun 	map_write(map, CMD(0x29), cmd_adr);
2070*4882a593Smuzhiyun 	chip->state = FL_WRITING;
2071*4882a593Smuzhiyun 
2072*4882a593Smuzhiyun 	INVALIDATE_CACHE_UDELAY(map, chip,
2073*4882a593Smuzhiyun 				adr, map_bankwidth(map),
2074*4882a593Smuzhiyun 				chip->word_write_time);
2075*4882a593Smuzhiyun 
2076*4882a593Smuzhiyun 	ret = do_write_buffer_wait(map, chip, adr, datum);
2077*4882a593Smuzhiyun 	if (ret)
2078*4882a593Smuzhiyun 		do_write_buffer_reset(map, chip, cfi);
2079*4882a593Smuzhiyun 
2080*4882a593Smuzhiyun 	xip_enable(map, chip, adr);
2081*4882a593Smuzhiyun 
2082*4882a593Smuzhiyun 	chip->state = FL_READY;
2083*4882a593Smuzhiyun 	DISABLE_VPP(map);
2084*4882a593Smuzhiyun 	put_chip(map, chip, adr);
2085*4882a593Smuzhiyun 	mutex_unlock(&chip->mutex);
2086*4882a593Smuzhiyun 
2087*4882a593Smuzhiyun 	return ret;
2088*4882a593Smuzhiyun }
2089*4882a593Smuzhiyun 
2090*4882a593Smuzhiyun 
cfi_amdstd_write_buffers(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)2091*4882a593Smuzhiyun static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
2092*4882a593Smuzhiyun 				    size_t *retlen, const u_char *buf)
2093*4882a593Smuzhiyun {
2094*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
2095*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
2096*4882a593Smuzhiyun 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
2097*4882a593Smuzhiyun 	int ret;
2098*4882a593Smuzhiyun 	int chipnum;
2099*4882a593Smuzhiyun 	unsigned long ofs;
2100*4882a593Smuzhiyun 
2101*4882a593Smuzhiyun 	chipnum = to >> cfi->chipshift;
2102*4882a593Smuzhiyun 	ofs = to  - (chipnum << cfi->chipshift);
2103*4882a593Smuzhiyun 
2104*4882a593Smuzhiyun 	/* If it's not bus-aligned, do the first word write */
2105*4882a593Smuzhiyun 	if (ofs & (map_bankwidth(map)-1)) {
2106*4882a593Smuzhiyun 		size_t local_len = (-ofs)&(map_bankwidth(map)-1);
2107*4882a593Smuzhiyun 		if (local_len > len)
2108*4882a593Smuzhiyun 			local_len = len;
2109*4882a593Smuzhiyun 		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2110*4882a593Smuzhiyun 					     local_len, retlen, buf);
2111*4882a593Smuzhiyun 		if (ret)
2112*4882a593Smuzhiyun 			return ret;
2113*4882a593Smuzhiyun 		ofs += local_len;
2114*4882a593Smuzhiyun 		buf += local_len;
2115*4882a593Smuzhiyun 		len -= local_len;
2116*4882a593Smuzhiyun 
2117*4882a593Smuzhiyun 		if (ofs >> cfi->chipshift) {
2118*4882a593Smuzhiyun 			chipnum ++;
2119*4882a593Smuzhiyun 			ofs = 0;
2120*4882a593Smuzhiyun 			if (chipnum == cfi->numchips)
2121*4882a593Smuzhiyun 				return 0;
2122*4882a593Smuzhiyun 		}
2123*4882a593Smuzhiyun 	}
2124*4882a593Smuzhiyun 
2125*4882a593Smuzhiyun 	/* Write buffer is worth it only if more than one word to write... */
2126*4882a593Smuzhiyun 	while (len >= map_bankwidth(map) * 2) {
2127*4882a593Smuzhiyun 		/* We must not cross write block boundaries */
2128*4882a593Smuzhiyun 		int size = wbufsize - (ofs & (wbufsize-1));
2129*4882a593Smuzhiyun 
2130*4882a593Smuzhiyun 		if (size > len)
2131*4882a593Smuzhiyun 			size = len;
2132*4882a593Smuzhiyun 		if (size % map_bankwidth(map))
2133*4882a593Smuzhiyun 			size -= size % map_bankwidth(map);
2134*4882a593Smuzhiyun 
2135*4882a593Smuzhiyun 		ret = do_write_buffer(map, &cfi->chips[chipnum],
2136*4882a593Smuzhiyun 				      ofs, buf, size);
2137*4882a593Smuzhiyun 		if (ret)
2138*4882a593Smuzhiyun 			return ret;
2139*4882a593Smuzhiyun 
2140*4882a593Smuzhiyun 		ofs += size;
2141*4882a593Smuzhiyun 		buf += size;
2142*4882a593Smuzhiyun 		(*retlen) += size;
2143*4882a593Smuzhiyun 		len -= size;
2144*4882a593Smuzhiyun 
2145*4882a593Smuzhiyun 		if (ofs >> cfi->chipshift) {
2146*4882a593Smuzhiyun 			chipnum ++;
2147*4882a593Smuzhiyun 			ofs = 0;
2148*4882a593Smuzhiyun 			if (chipnum == cfi->numchips)
2149*4882a593Smuzhiyun 				return 0;
2150*4882a593Smuzhiyun 		}
2151*4882a593Smuzhiyun 	}
2152*4882a593Smuzhiyun 
2153*4882a593Smuzhiyun 	if (len) {
2154*4882a593Smuzhiyun 		size_t retlen_dregs = 0;
2155*4882a593Smuzhiyun 
2156*4882a593Smuzhiyun 		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2157*4882a593Smuzhiyun 					     len, &retlen_dregs, buf);
2158*4882a593Smuzhiyun 
2159*4882a593Smuzhiyun 		*retlen += retlen_dregs;
2160*4882a593Smuzhiyun 		return ret;
2161*4882a593Smuzhiyun 	}
2162*4882a593Smuzhiyun 
2163*4882a593Smuzhiyun 	return 0;
2164*4882a593Smuzhiyun }
2165*4882a593Smuzhiyun #endif /* !FORCE_WORD_WRITE */
2166*4882a593Smuzhiyun 
2167*4882a593Smuzhiyun /*
2168*4882a593Smuzhiyun  * Wait for the flash chip to become ready to write data
2169*4882a593Smuzhiyun  *
2170*4882a593Smuzhiyun  * This is only called during the panic_write() path. When panic_write()
2171*4882a593Smuzhiyun  * is called, the kernel is in the process of a panic, and will soon be
2172*4882a593Smuzhiyun  * dead. Therefore we don't take any locks, and attempt to get access
2173*4882a593Smuzhiyun  * to the chip as soon as possible.
2174*4882a593Smuzhiyun  */
cfi_amdstd_panic_wait(struct map_info * map,struct flchip * chip,unsigned long adr)2175*4882a593Smuzhiyun static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
2176*4882a593Smuzhiyun 				 unsigned long adr)
2177*4882a593Smuzhiyun {
2178*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
2179*4882a593Smuzhiyun 	int retries = 10;
2180*4882a593Smuzhiyun 	int i;
2181*4882a593Smuzhiyun 
2182*4882a593Smuzhiyun 	/*
2183*4882a593Smuzhiyun 	 * If the driver thinks the chip is idle, and no toggle bits
2184*4882a593Smuzhiyun 	 * are changing, then the chip is actually idle for sure.
2185*4882a593Smuzhiyun 	 */
2186*4882a593Smuzhiyun 	if (chip->state == FL_READY && chip_ready(map, chip, adr, NULL))
2187*4882a593Smuzhiyun 		return 0;
2188*4882a593Smuzhiyun 
2189*4882a593Smuzhiyun 	/*
2190*4882a593Smuzhiyun 	 * Try several times to reset the chip and then wait for it
2191*4882a593Smuzhiyun 	 * to become idle. The upper limit of a few milliseconds of
2192*4882a593Smuzhiyun 	 * delay isn't a big problem: the kernel is dying anyway. It
2193*4882a593Smuzhiyun 	 * is more important to save the messages.
2194*4882a593Smuzhiyun 	 */
2195*4882a593Smuzhiyun 	while (retries > 0) {
2196*4882a593Smuzhiyun 		const unsigned long timeo = (HZ / 1000) + 1;
2197*4882a593Smuzhiyun 
2198*4882a593Smuzhiyun 		/* send the reset command */
2199*4882a593Smuzhiyun 		map_write(map, CMD(0xF0), chip->start);
2200*4882a593Smuzhiyun 
2201*4882a593Smuzhiyun 		/* wait for the chip to become ready */
2202*4882a593Smuzhiyun 		for (i = 0; i < jiffies_to_usecs(timeo); i++) {
2203*4882a593Smuzhiyun 			if (chip_ready(map, chip, adr, NULL))
2204*4882a593Smuzhiyun 				return 0;
2205*4882a593Smuzhiyun 
2206*4882a593Smuzhiyun 			udelay(1);
2207*4882a593Smuzhiyun 		}
2208*4882a593Smuzhiyun 
2209*4882a593Smuzhiyun 		retries--;
2210*4882a593Smuzhiyun 	}
2211*4882a593Smuzhiyun 
2212*4882a593Smuzhiyun 	/* the chip never became ready */
2213*4882a593Smuzhiyun 	return -EBUSY;
2214*4882a593Smuzhiyun }
2215*4882a593Smuzhiyun 
2216*4882a593Smuzhiyun /*
2217*4882a593Smuzhiyun  * Write out one word of data to a single flash chip during a kernel panic
2218*4882a593Smuzhiyun  *
2219*4882a593Smuzhiyun  * This is only called during the panic_write() path. When panic_write()
2220*4882a593Smuzhiyun  * is called, the kernel is in the process of a panic, and will soon be
2221*4882a593Smuzhiyun  * dead. Therefore we don't take any locks, and attempt to get access
2222*4882a593Smuzhiyun  * to the chip as soon as possible.
2223*4882a593Smuzhiyun  *
2224*4882a593Smuzhiyun  * The implementation of this routine is intentionally similar to
2225*4882a593Smuzhiyun  * do_write_oneword(), in order to ease code maintenance.
2226*4882a593Smuzhiyun  */
do_panic_write_oneword(struct map_info * map,struct flchip * chip,unsigned long adr,map_word datum)2227*4882a593Smuzhiyun static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
2228*4882a593Smuzhiyun 				  unsigned long adr, map_word datum)
2229*4882a593Smuzhiyun {
2230*4882a593Smuzhiyun 	const unsigned long uWriteTimeout = (HZ / 1000) + 1;
2231*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
2232*4882a593Smuzhiyun 	int retry_cnt = 0;
2233*4882a593Smuzhiyun 	map_word oldd;
2234*4882a593Smuzhiyun 	int ret;
2235*4882a593Smuzhiyun 	int i;
2236*4882a593Smuzhiyun 
2237*4882a593Smuzhiyun 	adr += chip->start;
2238*4882a593Smuzhiyun 
2239*4882a593Smuzhiyun 	ret = cfi_amdstd_panic_wait(map, chip, adr);
2240*4882a593Smuzhiyun 	if (ret)
2241*4882a593Smuzhiyun 		return ret;
2242*4882a593Smuzhiyun 
2243*4882a593Smuzhiyun 	pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
2244*4882a593Smuzhiyun 			__func__, adr, datum.x[0]);
2245*4882a593Smuzhiyun 
2246*4882a593Smuzhiyun 	/*
2247*4882a593Smuzhiyun 	 * Check for a NOP for the case when the datum to write is already
2248*4882a593Smuzhiyun 	 * present - it saves time and works around buggy chips that corrupt
2249*4882a593Smuzhiyun 	 * data at other locations when 0xff is written to a location that
2250*4882a593Smuzhiyun 	 * already contains 0xff.
2251*4882a593Smuzhiyun 	 */
2252*4882a593Smuzhiyun 	oldd = map_read(map, adr);
2253*4882a593Smuzhiyun 	if (map_word_equal(map, oldd, datum)) {
2254*4882a593Smuzhiyun 		pr_debug("MTD %s(): NOP\n", __func__);
2255*4882a593Smuzhiyun 		goto op_done;
2256*4882a593Smuzhiyun 	}
2257*4882a593Smuzhiyun 
2258*4882a593Smuzhiyun 	ENABLE_VPP(map);
2259*4882a593Smuzhiyun 
2260*4882a593Smuzhiyun retry:
2261*4882a593Smuzhiyun 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2262*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2263*4882a593Smuzhiyun 	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2264*4882a593Smuzhiyun 	map_write(map, datum, adr);
2265*4882a593Smuzhiyun 
2266*4882a593Smuzhiyun 	for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
2267*4882a593Smuzhiyun 		if (chip_ready(map, chip, adr, NULL))
2268*4882a593Smuzhiyun 			break;
2269*4882a593Smuzhiyun 
2270*4882a593Smuzhiyun 		udelay(1);
2271*4882a593Smuzhiyun 	}
2272*4882a593Smuzhiyun 
2273*4882a593Smuzhiyun 	if (!chip_ready(map, chip, adr, &datum) ||
2274*4882a593Smuzhiyun 	    cfi_check_err_status(map, chip, adr)) {
2275*4882a593Smuzhiyun 		/* reset on all failures. */
2276*4882a593Smuzhiyun 		map_write(map, CMD(0xF0), chip->start);
2277*4882a593Smuzhiyun 		/* FIXME - should have reset delay before continuing */
2278*4882a593Smuzhiyun 
2279*4882a593Smuzhiyun 		if (++retry_cnt <= MAX_RETRIES)
2280*4882a593Smuzhiyun 			goto retry;
2281*4882a593Smuzhiyun 
2282*4882a593Smuzhiyun 		ret = -EIO;
2283*4882a593Smuzhiyun 	}
2284*4882a593Smuzhiyun 
2285*4882a593Smuzhiyun op_done:
2286*4882a593Smuzhiyun 	DISABLE_VPP(map);
2287*4882a593Smuzhiyun 	return ret;
2288*4882a593Smuzhiyun }
2289*4882a593Smuzhiyun 
2290*4882a593Smuzhiyun /*
2291*4882a593Smuzhiyun  * Write out some data during a kernel panic
2292*4882a593Smuzhiyun  *
2293*4882a593Smuzhiyun  * This is used by the mtdoops driver to save the dying messages from a
2294*4882a593Smuzhiyun  * kernel which has panic'd.
2295*4882a593Smuzhiyun  *
2296*4882a593Smuzhiyun  * This routine ignores all of the locking used throughout the rest of the
2297*4882a593Smuzhiyun  * driver, in order to ensure that the data gets written out no matter what
2298*4882a593Smuzhiyun  * state this driver (and the flash chip itself) was in when the kernel crashed.
2299*4882a593Smuzhiyun  *
2300*4882a593Smuzhiyun  * The implementation of this routine is intentionally similar to
2301*4882a593Smuzhiyun  * cfi_amdstd_write_words(), in order to ease code maintenance.
2302*4882a593Smuzhiyun  */
cfi_amdstd_panic_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)2303*4882a593Smuzhiyun static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
2304*4882a593Smuzhiyun 				  size_t *retlen, const u_char *buf)
2305*4882a593Smuzhiyun {
2306*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
2307*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
2308*4882a593Smuzhiyun 	unsigned long ofs, chipstart;
2309*4882a593Smuzhiyun 	int ret;
2310*4882a593Smuzhiyun 	int chipnum;
2311*4882a593Smuzhiyun 
2312*4882a593Smuzhiyun 	chipnum = to >> cfi->chipshift;
2313*4882a593Smuzhiyun 	ofs = to - (chipnum << cfi->chipshift);
2314*4882a593Smuzhiyun 	chipstart = cfi->chips[chipnum].start;
2315*4882a593Smuzhiyun 
2316*4882a593Smuzhiyun 	/* If it's not bus aligned, do the first byte write */
2317*4882a593Smuzhiyun 	if (ofs & (map_bankwidth(map) - 1)) {
2318*4882a593Smuzhiyun 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
2319*4882a593Smuzhiyun 		int i = ofs - bus_ofs;
2320*4882a593Smuzhiyun 		int n = 0;
2321*4882a593Smuzhiyun 		map_word tmp_buf;
2322*4882a593Smuzhiyun 
2323*4882a593Smuzhiyun 		ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
2324*4882a593Smuzhiyun 		if (ret)
2325*4882a593Smuzhiyun 			return ret;
2326*4882a593Smuzhiyun 
2327*4882a593Smuzhiyun 		/* Load 'tmp_buf' with old contents of flash */
2328*4882a593Smuzhiyun 		tmp_buf = map_read(map, bus_ofs + chipstart);
2329*4882a593Smuzhiyun 
2330*4882a593Smuzhiyun 		/* Number of bytes to copy from buffer */
2331*4882a593Smuzhiyun 		n = min_t(int, len, map_bankwidth(map) - i);
2332*4882a593Smuzhiyun 
2333*4882a593Smuzhiyun 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
2334*4882a593Smuzhiyun 
2335*4882a593Smuzhiyun 		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2336*4882a593Smuzhiyun 					     bus_ofs, tmp_buf);
2337*4882a593Smuzhiyun 		if (ret)
2338*4882a593Smuzhiyun 			return ret;
2339*4882a593Smuzhiyun 
2340*4882a593Smuzhiyun 		ofs += n;
2341*4882a593Smuzhiyun 		buf += n;
2342*4882a593Smuzhiyun 		(*retlen) += n;
2343*4882a593Smuzhiyun 		len -= n;
2344*4882a593Smuzhiyun 
2345*4882a593Smuzhiyun 		if (ofs >> cfi->chipshift) {
2346*4882a593Smuzhiyun 			chipnum++;
2347*4882a593Smuzhiyun 			ofs = 0;
2348*4882a593Smuzhiyun 			if (chipnum == cfi->numchips)
2349*4882a593Smuzhiyun 				return 0;
2350*4882a593Smuzhiyun 		}
2351*4882a593Smuzhiyun 	}
2352*4882a593Smuzhiyun 
2353*4882a593Smuzhiyun 	/* We are now aligned, write as much as possible */
2354*4882a593Smuzhiyun 	while (len >= map_bankwidth(map)) {
2355*4882a593Smuzhiyun 		map_word datum;
2356*4882a593Smuzhiyun 
2357*4882a593Smuzhiyun 		datum = map_word_load(map, buf);
2358*4882a593Smuzhiyun 
2359*4882a593Smuzhiyun 		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2360*4882a593Smuzhiyun 					     ofs, datum);
2361*4882a593Smuzhiyun 		if (ret)
2362*4882a593Smuzhiyun 			return ret;
2363*4882a593Smuzhiyun 
2364*4882a593Smuzhiyun 		ofs += map_bankwidth(map);
2365*4882a593Smuzhiyun 		buf += map_bankwidth(map);
2366*4882a593Smuzhiyun 		(*retlen) += map_bankwidth(map);
2367*4882a593Smuzhiyun 		len -= map_bankwidth(map);
2368*4882a593Smuzhiyun 
2369*4882a593Smuzhiyun 		if (ofs >> cfi->chipshift) {
2370*4882a593Smuzhiyun 			chipnum++;
2371*4882a593Smuzhiyun 			ofs = 0;
2372*4882a593Smuzhiyun 			if (chipnum == cfi->numchips)
2373*4882a593Smuzhiyun 				return 0;
2374*4882a593Smuzhiyun 
2375*4882a593Smuzhiyun 			chipstart = cfi->chips[chipnum].start;
2376*4882a593Smuzhiyun 		}
2377*4882a593Smuzhiyun 	}
2378*4882a593Smuzhiyun 
2379*4882a593Smuzhiyun 	/* Write the trailing bytes if any */
2380*4882a593Smuzhiyun 	if (len & (map_bankwidth(map) - 1)) {
2381*4882a593Smuzhiyun 		map_word tmp_buf;
2382*4882a593Smuzhiyun 
2383*4882a593Smuzhiyun 		ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
2384*4882a593Smuzhiyun 		if (ret)
2385*4882a593Smuzhiyun 			return ret;
2386*4882a593Smuzhiyun 
2387*4882a593Smuzhiyun 		tmp_buf = map_read(map, ofs + chipstart);
2388*4882a593Smuzhiyun 
2389*4882a593Smuzhiyun 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
2390*4882a593Smuzhiyun 
2391*4882a593Smuzhiyun 		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2392*4882a593Smuzhiyun 					     ofs, tmp_buf);
2393*4882a593Smuzhiyun 		if (ret)
2394*4882a593Smuzhiyun 			return ret;
2395*4882a593Smuzhiyun 
2396*4882a593Smuzhiyun 		(*retlen) += len;
2397*4882a593Smuzhiyun 	}
2398*4882a593Smuzhiyun 
2399*4882a593Smuzhiyun 	return 0;
2400*4882a593Smuzhiyun }
2401*4882a593Smuzhiyun 
2402*4882a593Smuzhiyun 
2403*4882a593Smuzhiyun /*
2404*4882a593Smuzhiyun  * Handle devices with one erase region, that only implement
2405*4882a593Smuzhiyun  * the chip erase command.
2406*4882a593Smuzhiyun  */
do_erase_chip(struct map_info * map,struct flchip * chip)2407*4882a593Smuzhiyun static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2408*4882a593Smuzhiyun {
2409*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
2410*4882a593Smuzhiyun 	unsigned long timeo = jiffies + HZ;
2411*4882a593Smuzhiyun 	unsigned long int adr;
2412*4882a593Smuzhiyun 	DECLARE_WAITQUEUE(wait, current);
2413*4882a593Smuzhiyun 	int ret;
2414*4882a593Smuzhiyun 	int retry_cnt = 0;
2415*4882a593Smuzhiyun 	map_word datum = map_word_ff(map);
2416*4882a593Smuzhiyun 
2417*4882a593Smuzhiyun 	adr = cfi->addr_unlock1;
2418*4882a593Smuzhiyun 
2419*4882a593Smuzhiyun 	mutex_lock(&chip->mutex);
2420*4882a593Smuzhiyun 	ret = get_chip(map, chip, adr, FL_ERASING);
2421*4882a593Smuzhiyun 	if (ret) {
2422*4882a593Smuzhiyun 		mutex_unlock(&chip->mutex);
2423*4882a593Smuzhiyun 		return ret;
2424*4882a593Smuzhiyun 	}
2425*4882a593Smuzhiyun 
2426*4882a593Smuzhiyun 	pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2427*4882a593Smuzhiyun 	       __func__, chip->start);
2428*4882a593Smuzhiyun 
2429*4882a593Smuzhiyun 	XIP_INVAL_CACHED_RANGE(map, adr, map->size);
2430*4882a593Smuzhiyun 	ENABLE_VPP(map);
2431*4882a593Smuzhiyun 	xip_disable(map, chip, adr);
2432*4882a593Smuzhiyun 
2433*4882a593Smuzhiyun  retry:
2434*4882a593Smuzhiyun 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2435*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2436*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2437*4882a593Smuzhiyun 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2438*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2439*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2440*4882a593Smuzhiyun 
2441*4882a593Smuzhiyun 	chip->state = FL_ERASING;
2442*4882a593Smuzhiyun 	chip->erase_suspended = 0;
2443*4882a593Smuzhiyun 	chip->in_progress_block_addr = adr;
2444*4882a593Smuzhiyun 	chip->in_progress_block_mask = ~(map->size - 1);
2445*4882a593Smuzhiyun 
2446*4882a593Smuzhiyun 	INVALIDATE_CACHE_UDELAY(map, chip,
2447*4882a593Smuzhiyun 				adr, map->size,
2448*4882a593Smuzhiyun 				chip->erase_time*500);
2449*4882a593Smuzhiyun 
2450*4882a593Smuzhiyun 	timeo = jiffies + (HZ*20);
2451*4882a593Smuzhiyun 
2452*4882a593Smuzhiyun 	for (;;) {
2453*4882a593Smuzhiyun 		if (chip->state != FL_ERASING) {
2454*4882a593Smuzhiyun 			/* Someone's suspended the erase. Sleep */
2455*4882a593Smuzhiyun 			set_current_state(TASK_UNINTERRUPTIBLE);
2456*4882a593Smuzhiyun 			add_wait_queue(&chip->wq, &wait);
2457*4882a593Smuzhiyun 			mutex_unlock(&chip->mutex);
2458*4882a593Smuzhiyun 			schedule();
2459*4882a593Smuzhiyun 			remove_wait_queue(&chip->wq, &wait);
2460*4882a593Smuzhiyun 			mutex_lock(&chip->mutex);
2461*4882a593Smuzhiyun 			continue;
2462*4882a593Smuzhiyun 		}
2463*4882a593Smuzhiyun 		if (chip->erase_suspended) {
2464*4882a593Smuzhiyun 			/* This erase was suspended and resumed.
2465*4882a593Smuzhiyun 			   Adjust the timeout */
2466*4882a593Smuzhiyun 			timeo = jiffies + (HZ*20); /* FIXME */
2467*4882a593Smuzhiyun 			chip->erase_suspended = 0;
2468*4882a593Smuzhiyun 		}
2469*4882a593Smuzhiyun 
2470*4882a593Smuzhiyun 		if (chip_ready(map, chip, adr, &datum)) {
2471*4882a593Smuzhiyun 			if (cfi_check_err_status(map, chip, adr))
2472*4882a593Smuzhiyun 				ret = -EIO;
2473*4882a593Smuzhiyun 			break;
2474*4882a593Smuzhiyun 		}
2475*4882a593Smuzhiyun 
2476*4882a593Smuzhiyun 		if (time_after(jiffies, timeo)) {
2477*4882a593Smuzhiyun 			printk(KERN_WARNING "MTD %s(): software timeout\n",
2478*4882a593Smuzhiyun 			       __func__);
2479*4882a593Smuzhiyun 			ret = -EIO;
2480*4882a593Smuzhiyun 			break;
2481*4882a593Smuzhiyun 		}
2482*4882a593Smuzhiyun 
2483*4882a593Smuzhiyun 		/* Latency issues. Drop the lock, wait a while and retry */
2484*4882a593Smuzhiyun 		UDELAY(map, chip, adr, 1000000/HZ);
2485*4882a593Smuzhiyun 	}
2486*4882a593Smuzhiyun 	/* Did we succeed? */
2487*4882a593Smuzhiyun 	if (ret) {
2488*4882a593Smuzhiyun 		/* reset on all failures. */
2489*4882a593Smuzhiyun 		map_write(map, CMD(0xF0), chip->start);
2490*4882a593Smuzhiyun 		/* FIXME - should have reset delay before continuing */
2491*4882a593Smuzhiyun 
2492*4882a593Smuzhiyun 		if (++retry_cnt <= MAX_RETRIES) {
2493*4882a593Smuzhiyun 			ret = 0;
2494*4882a593Smuzhiyun 			goto retry;
2495*4882a593Smuzhiyun 		}
2496*4882a593Smuzhiyun 	}
2497*4882a593Smuzhiyun 
2498*4882a593Smuzhiyun 	chip->state = FL_READY;
2499*4882a593Smuzhiyun 	xip_enable(map, chip, adr);
2500*4882a593Smuzhiyun 	DISABLE_VPP(map);
2501*4882a593Smuzhiyun 	put_chip(map, chip, adr);
2502*4882a593Smuzhiyun 	mutex_unlock(&chip->mutex);
2503*4882a593Smuzhiyun 
2504*4882a593Smuzhiyun 	return ret;
2505*4882a593Smuzhiyun }
2506*4882a593Smuzhiyun 
2507*4882a593Smuzhiyun 
do_erase_oneblock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)2508*4882a593Smuzhiyun static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
2509*4882a593Smuzhiyun {
2510*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
2511*4882a593Smuzhiyun 	unsigned long timeo = jiffies + HZ;
2512*4882a593Smuzhiyun 	DECLARE_WAITQUEUE(wait, current);
2513*4882a593Smuzhiyun 	int ret;
2514*4882a593Smuzhiyun 	int retry_cnt = 0;
2515*4882a593Smuzhiyun 	map_word datum = map_word_ff(map);
2516*4882a593Smuzhiyun 
2517*4882a593Smuzhiyun 	adr += chip->start;
2518*4882a593Smuzhiyun 
2519*4882a593Smuzhiyun 	mutex_lock(&chip->mutex);
2520*4882a593Smuzhiyun 	ret = get_chip(map, chip, adr, FL_ERASING);
2521*4882a593Smuzhiyun 	if (ret) {
2522*4882a593Smuzhiyun 		mutex_unlock(&chip->mutex);
2523*4882a593Smuzhiyun 		return ret;
2524*4882a593Smuzhiyun 	}
2525*4882a593Smuzhiyun 
2526*4882a593Smuzhiyun 	pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2527*4882a593Smuzhiyun 		 __func__, adr);
2528*4882a593Smuzhiyun 
2529*4882a593Smuzhiyun 	XIP_INVAL_CACHED_RANGE(map, adr, len);
2530*4882a593Smuzhiyun 	ENABLE_VPP(map);
2531*4882a593Smuzhiyun 	xip_disable(map, chip, adr);
2532*4882a593Smuzhiyun 
2533*4882a593Smuzhiyun  retry:
2534*4882a593Smuzhiyun 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2535*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2536*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2537*4882a593Smuzhiyun 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2538*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2539*4882a593Smuzhiyun 	map_write(map, cfi->sector_erase_cmd, adr);
2540*4882a593Smuzhiyun 
2541*4882a593Smuzhiyun 	chip->state = FL_ERASING;
2542*4882a593Smuzhiyun 	chip->erase_suspended = 0;
2543*4882a593Smuzhiyun 	chip->in_progress_block_addr = adr;
2544*4882a593Smuzhiyun 	chip->in_progress_block_mask = ~(len - 1);
2545*4882a593Smuzhiyun 
2546*4882a593Smuzhiyun 	INVALIDATE_CACHE_UDELAY(map, chip,
2547*4882a593Smuzhiyun 				adr, len,
2548*4882a593Smuzhiyun 				chip->erase_time*500);
2549*4882a593Smuzhiyun 
2550*4882a593Smuzhiyun 	timeo = jiffies + (HZ*20);
2551*4882a593Smuzhiyun 
2552*4882a593Smuzhiyun 	for (;;) {
2553*4882a593Smuzhiyun 		if (chip->state != FL_ERASING) {
2554*4882a593Smuzhiyun 			/* Someone's suspended the erase. Sleep */
2555*4882a593Smuzhiyun 			set_current_state(TASK_UNINTERRUPTIBLE);
2556*4882a593Smuzhiyun 			add_wait_queue(&chip->wq, &wait);
2557*4882a593Smuzhiyun 			mutex_unlock(&chip->mutex);
2558*4882a593Smuzhiyun 			schedule();
2559*4882a593Smuzhiyun 			remove_wait_queue(&chip->wq, &wait);
2560*4882a593Smuzhiyun 			mutex_lock(&chip->mutex);
2561*4882a593Smuzhiyun 			continue;
2562*4882a593Smuzhiyun 		}
2563*4882a593Smuzhiyun 		if (chip->erase_suspended) {
2564*4882a593Smuzhiyun 			/* This erase was suspended and resumed.
2565*4882a593Smuzhiyun 			   Adjust the timeout */
2566*4882a593Smuzhiyun 			timeo = jiffies + (HZ*20); /* FIXME */
2567*4882a593Smuzhiyun 			chip->erase_suspended = 0;
2568*4882a593Smuzhiyun 		}
2569*4882a593Smuzhiyun 
2570*4882a593Smuzhiyun 		if (chip_ready(map, chip, adr, &datum)) {
2571*4882a593Smuzhiyun 			if (cfi_check_err_status(map, chip, adr))
2572*4882a593Smuzhiyun 				ret = -EIO;
2573*4882a593Smuzhiyun 			break;
2574*4882a593Smuzhiyun 		}
2575*4882a593Smuzhiyun 
2576*4882a593Smuzhiyun 		if (time_after(jiffies, timeo)) {
2577*4882a593Smuzhiyun 			printk(KERN_WARNING "MTD %s(): software timeout\n",
2578*4882a593Smuzhiyun 			       __func__);
2579*4882a593Smuzhiyun 			ret = -EIO;
2580*4882a593Smuzhiyun 			break;
2581*4882a593Smuzhiyun 		}
2582*4882a593Smuzhiyun 
2583*4882a593Smuzhiyun 		/* Latency issues. Drop the lock, wait a while and retry */
2584*4882a593Smuzhiyun 		UDELAY(map, chip, adr, 1000000/HZ);
2585*4882a593Smuzhiyun 	}
2586*4882a593Smuzhiyun 	/* Did we succeed? */
2587*4882a593Smuzhiyun 	if (ret) {
2588*4882a593Smuzhiyun 		/* reset on all failures. */
2589*4882a593Smuzhiyun 		map_write(map, CMD(0xF0), chip->start);
2590*4882a593Smuzhiyun 		/* FIXME - should have reset delay before continuing */
2591*4882a593Smuzhiyun 
2592*4882a593Smuzhiyun 		if (++retry_cnt <= MAX_RETRIES) {
2593*4882a593Smuzhiyun 			ret = 0;
2594*4882a593Smuzhiyun 			goto retry;
2595*4882a593Smuzhiyun 		}
2596*4882a593Smuzhiyun 	}
2597*4882a593Smuzhiyun 
2598*4882a593Smuzhiyun 	chip->state = FL_READY;
2599*4882a593Smuzhiyun 	xip_enable(map, chip, adr);
2600*4882a593Smuzhiyun 	DISABLE_VPP(map);
2601*4882a593Smuzhiyun 	put_chip(map, chip, adr);
2602*4882a593Smuzhiyun 	mutex_unlock(&chip->mutex);
2603*4882a593Smuzhiyun 	return ret;
2604*4882a593Smuzhiyun }
2605*4882a593Smuzhiyun 
2606*4882a593Smuzhiyun 
cfi_amdstd_erase_varsize(struct mtd_info * mtd,struct erase_info * instr)2607*4882a593Smuzhiyun static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2608*4882a593Smuzhiyun {
2609*4882a593Smuzhiyun 	return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2610*4882a593Smuzhiyun 				instr->len, NULL);
2611*4882a593Smuzhiyun }
2612*4882a593Smuzhiyun 
2613*4882a593Smuzhiyun 
cfi_amdstd_erase_chip(struct mtd_info * mtd,struct erase_info * instr)2614*4882a593Smuzhiyun static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2615*4882a593Smuzhiyun {
2616*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
2617*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
2618*4882a593Smuzhiyun 
2619*4882a593Smuzhiyun 	if (instr->addr != 0)
2620*4882a593Smuzhiyun 		return -EINVAL;
2621*4882a593Smuzhiyun 
2622*4882a593Smuzhiyun 	if (instr->len != mtd->size)
2623*4882a593Smuzhiyun 		return -EINVAL;
2624*4882a593Smuzhiyun 
2625*4882a593Smuzhiyun 	return do_erase_chip(map, &cfi->chips[0]);
2626*4882a593Smuzhiyun }
2627*4882a593Smuzhiyun 
do_atmel_lock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)2628*4882a593Smuzhiyun static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2629*4882a593Smuzhiyun 			 unsigned long adr, int len, void *thunk)
2630*4882a593Smuzhiyun {
2631*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
2632*4882a593Smuzhiyun 	int ret;
2633*4882a593Smuzhiyun 
2634*4882a593Smuzhiyun 	mutex_lock(&chip->mutex);
2635*4882a593Smuzhiyun 	ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2636*4882a593Smuzhiyun 	if (ret)
2637*4882a593Smuzhiyun 		goto out_unlock;
2638*4882a593Smuzhiyun 	chip->state = FL_LOCKING;
2639*4882a593Smuzhiyun 
2640*4882a593Smuzhiyun 	pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2641*4882a593Smuzhiyun 
2642*4882a593Smuzhiyun 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2643*4882a593Smuzhiyun 			 cfi->device_type, NULL);
2644*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2645*4882a593Smuzhiyun 			 cfi->device_type, NULL);
2646*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2647*4882a593Smuzhiyun 			 cfi->device_type, NULL);
2648*4882a593Smuzhiyun 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2649*4882a593Smuzhiyun 			 cfi->device_type, NULL);
2650*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2651*4882a593Smuzhiyun 			 cfi->device_type, NULL);
2652*4882a593Smuzhiyun 	map_write(map, CMD(0x40), chip->start + adr);
2653*4882a593Smuzhiyun 
2654*4882a593Smuzhiyun 	chip->state = FL_READY;
2655*4882a593Smuzhiyun 	put_chip(map, chip, adr + chip->start);
2656*4882a593Smuzhiyun 	ret = 0;
2657*4882a593Smuzhiyun 
2658*4882a593Smuzhiyun out_unlock:
2659*4882a593Smuzhiyun 	mutex_unlock(&chip->mutex);
2660*4882a593Smuzhiyun 	return ret;
2661*4882a593Smuzhiyun }
2662*4882a593Smuzhiyun 
do_atmel_unlock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)2663*4882a593Smuzhiyun static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2664*4882a593Smuzhiyun 			   unsigned long adr, int len, void *thunk)
2665*4882a593Smuzhiyun {
2666*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
2667*4882a593Smuzhiyun 	int ret;
2668*4882a593Smuzhiyun 
2669*4882a593Smuzhiyun 	mutex_lock(&chip->mutex);
2670*4882a593Smuzhiyun 	ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2671*4882a593Smuzhiyun 	if (ret)
2672*4882a593Smuzhiyun 		goto out_unlock;
2673*4882a593Smuzhiyun 	chip->state = FL_UNLOCKING;
2674*4882a593Smuzhiyun 
2675*4882a593Smuzhiyun 	pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2676*4882a593Smuzhiyun 
2677*4882a593Smuzhiyun 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2678*4882a593Smuzhiyun 			 cfi->device_type, NULL);
2679*4882a593Smuzhiyun 	map_write(map, CMD(0x70), adr);
2680*4882a593Smuzhiyun 
2681*4882a593Smuzhiyun 	chip->state = FL_READY;
2682*4882a593Smuzhiyun 	put_chip(map, chip, adr + chip->start);
2683*4882a593Smuzhiyun 	ret = 0;
2684*4882a593Smuzhiyun 
2685*4882a593Smuzhiyun out_unlock:
2686*4882a593Smuzhiyun 	mutex_unlock(&chip->mutex);
2687*4882a593Smuzhiyun 	return ret;
2688*4882a593Smuzhiyun }
2689*4882a593Smuzhiyun 
cfi_atmel_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2690*4882a593Smuzhiyun static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2691*4882a593Smuzhiyun {
2692*4882a593Smuzhiyun 	return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2693*4882a593Smuzhiyun }
2694*4882a593Smuzhiyun 
cfi_atmel_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2695*4882a593Smuzhiyun static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2696*4882a593Smuzhiyun {
2697*4882a593Smuzhiyun 	return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2698*4882a593Smuzhiyun }
2699*4882a593Smuzhiyun 
2700*4882a593Smuzhiyun /*
2701*4882a593Smuzhiyun  * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
2702*4882a593Smuzhiyun  */
2703*4882a593Smuzhiyun 
2704*4882a593Smuzhiyun struct ppb_lock {
2705*4882a593Smuzhiyun 	struct flchip *chip;
2706*4882a593Smuzhiyun 	unsigned long adr;
2707*4882a593Smuzhiyun 	int locked;
2708*4882a593Smuzhiyun };
2709*4882a593Smuzhiyun 
2710*4882a593Smuzhiyun #define DO_XXLOCK_ONEBLOCK_LOCK		((void *)1)
2711*4882a593Smuzhiyun #define DO_XXLOCK_ONEBLOCK_UNLOCK	((void *)2)
2712*4882a593Smuzhiyun #define DO_XXLOCK_ONEBLOCK_GETLOCK	((void *)3)
2713*4882a593Smuzhiyun 
do_ppb_xxlock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)2714*4882a593Smuzhiyun static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2715*4882a593Smuzhiyun 					struct flchip *chip,
2716*4882a593Smuzhiyun 					unsigned long adr, int len, void *thunk)
2717*4882a593Smuzhiyun {
2718*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
2719*4882a593Smuzhiyun 	unsigned long timeo;
2720*4882a593Smuzhiyun 	int ret;
2721*4882a593Smuzhiyun 
2722*4882a593Smuzhiyun 	adr += chip->start;
2723*4882a593Smuzhiyun 	mutex_lock(&chip->mutex);
2724*4882a593Smuzhiyun 	ret = get_chip(map, chip, adr, FL_LOCKING);
2725*4882a593Smuzhiyun 	if (ret) {
2726*4882a593Smuzhiyun 		mutex_unlock(&chip->mutex);
2727*4882a593Smuzhiyun 		return ret;
2728*4882a593Smuzhiyun 	}
2729*4882a593Smuzhiyun 
2730*4882a593Smuzhiyun 	pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
2731*4882a593Smuzhiyun 
2732*4882a593Smuzhiyun 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2733*4882a593Smuzhiyun 			 cfi->device_type, NULL);
2734*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2735*4882a593Smuzhiyun 			 cfi->device_type, NULL);
2736*4882a593Smuzhiyun 	/* PPB entry command */
2737*4882a593Smuzhiyun 	cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
2738*4882a593Smuzhiyun 			 cfi->device_type, NULL);
2739*4882a593Smuzhiyun 
2740*4882a593Smuzhiyun 	if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2741*4882a593Smuzhiyun 		chip->state = FL_LOCKING;
2742*4882a593Smuzhiyun 		map_write(map, CMD(0xA0), adr);
2743*4882a593Smuzhiyun 		map_write(map, CMD(0x00), adr);
2744*4882a593Smuzhiyun 	} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2745*4882a593Smuzhiyun 		/*
2746*4882a593Smuzhiyun 		 * Unlocking of one specific sector is not supported, so we
2747*4882a593Smuzhiyun 		 * have to unlock all sectors of this device instead
2748*4882a593Smuzhiyun 		 */
2749*4882a593Smuzhiyun 		chip->state = FL_UNLOCKING;
2750*4882a593Smuzhiyun 		map_write(map, CMD(0x80), chip->start);
2751*4882a593Smuzhiyun 		map_write(map, CMD(0x30), chip->start);
2752*4882a593Smuzhiyun 	} else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
2753*4882a593Smuzhiyun 		chip->state = FL_JEDEC_QUERY;
2754*4882a593Smuzhiyun 		/* Return locked status: 0->locked, 1->unlocked */
2755*4882a593Smuzhiyun 		ret = !cfi_read_query(map, adr);
2756*4882a593Smuzhiyun 	} else
2757*4882a593Smuzhiyun 		BUG();
2758*4882a593Smuzhiyun 
2759*4882a593Smuzhiyun 	/*
2760*4882a593Smuzhiyun 	 * Wait for some time as unlocking of all sectors takes quite long
2761*4882a593Smuzhiyun 	 */
2762*4882a593Smuzhiyun 	timeo = jiffies + msecs_to_jiffies(2000);	/* 2s max (un)locking */
2763*4882a593Smuzhiyun 	for (;;) {
2764*4882a593Smuzhiyun 		if (chip_ready(map, chip, adr, NULL))
2765*4882a593Smuzhiyun 			break;
2766*4882a593Smuzhiyun 
2767*4882a593Smuzhiyun 		if (time_after(jiffies, timeo)) {
2768*4882a593Smuzhiyun 			printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
2769*4882a593Smuzhiyun 			ret = -EIO;
2770*4882a593Smuzhiyun 			break;
2771*4882a593Smuzhiyun 		}
2772*4882a593Smuzhiyun 
2773*4882a593Smuzhiyun 		UDELAY(map, chip, adr, 1);
2774*4882a593Smuzhiyun 	}
2775*4882a593Smuzhiyun 
2776*4882a593Smuzhiyun 	/* Exit BC commands */
2777*4882a593Smuzhiyun 	map_write(map, CMD(0x90), chip->start);
2778*4882a593Smuzhiyun 	map_write(map, CMD(0x00), chip->start);
2779*4882a593Smuzhiyun 
2780*4882a593Smuzhiyun 	chip->state = FL_READY;
2781*4882a593Smuzhiyun 	put_chip(map, chip, adr);
2782*4882a593Smuzhiyun 	mutex_unlock(&chip->mutex);
2783*4882a593Smuzhiyun 
2784*4882a593Smuzhiyun 	return ret;
2785*4882a593Smuzhiyun }
2786*4882a593Smuzhiyun 
cfi_ppb_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2787*4882a593Smuzhiyun static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
2788*4882a593Smuzhiyun 				       uint64_t len)
2789*4882a593Smuzhiyun {
2790*4882a593Smuzhiyun 	return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2791*4882a593Smuzhiyun 				DO_XXLOCK_ONEBLOCK_LOCK);
2792*4882a593Smuzhiyun }
2793*4882a593Smuzhiyun 
cfi_ppb_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2794*4882a593Smuzhiyun static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2795*4882a593Smuzhiyun 					 uint64_t len)
2796*4882a593Smuzhiyun {
2797*4882a593Smuzhiyun 	struct mtd_erase_region_info *regions = mtd->eraseregions;
2798*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
2799*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
2800*4882a593Smuzhiyun 	struct ppb_lock *sect;
2801*4882a593Smuzhiyun 	unsigned long adr;
2802*4882a593Smuzhiyun 	loff_t offset;
2803*4882a593Smuzhiyun 	uint64_t length;
2804*4882a593Smuzhiyun 	int chipnum;
2805*4882a593Smuzhiyun 	int i;
2806*4882a593Smuzhiyun 	int sectors;
2807*4882a593Smuzhiyun 	int ret;
2808*4882a593Smuzhiyun 	int max_sectors;
2809*4882a593Smuzhiyun 
2810*4882a593Smuzhiyun 	/*
2811*4882a593Smuzhiyun 	 * PPB unlocking always unlocks all sectors of the flash chip.
2812*4882a593Smuzhiyun 	 * We need to re-lock all previously locked sectors. So lets
2813*4882a593Smuzhiyun 	 * first check the locking status of all sectors and save
2814*4882a593Smuzhiyun 	 * it for future use.
2815*4882a593Smuzhiyun 	 */
2816*4882a593Smuzhiyun 	max_sectors = 0;
2817*4882a593Smuzhiyun 	for (i = 0; i < mtd->numeraseregions; i++)
2818*4882a593Smuzhiyun 		max_sectors += regions[i].numblocks;
2819*4882a593Smuzhiyun 
2820*4882a593Smuzhiyun 	sect = kcalloc(max_sectors, sizeof(struct ppb_lock), GFP_KERNEL);
2821*4882a593Smuzhiyun 	if (!sect)
2822*4882a593Smuzhiyun 		return -ENOMEM;
2823*4882a593Smuzhiyun 
2824*4882a593Smuzhiyun 	/*
2825*4882a593Smuzhiyun 	 * This code to walk all sectors is a slightly modified version
2826*4882a593Smuzhiyun 	 * of the cfi_varsize_frob() code.
2827*4882a593Smuzhiyun 	 */
2828*4882a593Smuzhiyun 	i = 0;
2829*4882a593Smuzhiyun 	chipnum = 0;
2830*4882a593Smuzhiyun 	adr = 0;
2831*4882a593Smuzhiyun 	sectors = 0;
2832*4882a593Smuzhiyun 	offset = 0;
2833*4882a593Smuzhiyun 	length = mtd->size;
2834*4882a593Smuzhiyun 
2835*4882a593Smuzhiyun 	while (length) {
2836*4882a593Smuzhiyun 		int size = regions[i].erasesize;
2837*4882a593Smuzhiyun 
2838*4882a593Smuzhiyun 		/*
2839*4882a593Smuzhiyun 		 * Only test sectors that shall not be unlocked. The other
2840*4882a593Smuzhiyun 		 * sectors shall be unlocked, so lets keep their locking
2841*4882a593Smuzhiyun 		 * status at "unlocked" (locked=0) for the final re-locking.
2842*4882a593Smuzhiyun 		 */
2843*4882a593Smuzhiyun 		if ((offset < ofs) || (offset >= (ofs + len))) {
2844*4882a593Smuzhiyun 			sect[sectors].chip = &cfi->chips[chipnum];
2845*4882a593Smuzhiyun 			sect[sectors].adr = adr;
2846*4882a593Smuzhiyun 			sect[sectors].locked = do_ppb_xxlock(
2847*4882a593Smuzhiyun 				map, &cfi->chips[chipnum], adr, 0,
2848*4882a593Smuzhiyun 				DO_XXLOCK_ONEBLOCK_GETLOCK);
2849*4882a593Smuzhiyun 		}
2850*4882a593Smuzhiyun 
2851*4882a593Smuzhiyun 		adr += size;
2852*4882a593Smuzhiyun 		offset += size;
2853*4882a593Smuzhiyun 		length -= size;
2854*4882a593Smuzhiyun 
2855*4882a593Smuzhiyun 		if (offset == regions[i].offset + size * regions[i].numblocks)
2856*4882a593Smuzhiyun 			i++;
2857*4882a593Smuzhiyun 
2858*4882a593Smuzhiyun 		if (adr >> cfi->chipshift) {
2859*4882a593Smuzhiyun 			if (offset >= (ofs + len))
2860*4882a593Smuzhiyun 				break;
2861*4882a593Smuzhiyun 			adr = 0;
2862*4882a593Smuzhiyun 			chipnum++;
2863*4882a593Smuzhiyun 
2864*4882a593Smuzhiyun 			if (chipnum >= cfi->numchips)
2865*4882a593Smuzhiyun 				break;
2866*4882a593Smuzhiyun 		}
2867*4882a593Smuzhiyun 
2868*4882a593Smuzhiyun 		sectors++;
2869*4882a593Smuzhiyun 		if (sectors >= max_sectors) {
2870*4882a593Smuzhiyun 			printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
2871*4882a593Smuzhiyun 			       max_sectors);
2872*4882a593Smuzhiyun 			kfree(sect);
2873*4882a593Smuzhiyun 			return -EINVAL;
2874*4882a593Smuzhiyun 		}
2875*4882a593Smuzhiyun 	}
2876*4882a593Smuzhiyun 
2877*4882a593Smuzhiyun 	/* Now unlock the whole chip */
2878*4882a593Smuzhiyun 	ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2879*4882a593Smuzhiyun 			       DO_XXLOCK_ONEBLOCK_UNLOCK);
2880*4882a593Smuzhiyun 	if (ret) {
2881*4882a593Smuzhiyun 		kfree(sect);
2882*4882a593Smuzhiyun 		return ret;
2883*4882a593Smuzhiyun 	}
2884*4882a593Smuzhiyun 
2885*4882a593Smuzhiyun 	/*
2886*4882a593Smuzhiyun 	 * PPB unlocking always unlocks all sectors of the flash chip.
2887*4882a593Smuzhiyun 	 * We need to re-lock all previously locked sectors.
2888*4882a593Smuzhiyun 	 */
2889*4882a593Smuzhiyun 	for (i = 0; i < sectors; i++) {
2890*4882a593Smuzhiyun 		if (sect[i].locked)
2891*4882a593Smuzhiyun 			do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
2892*4882a593Smuzhiyun 				      DO_XXLOCK_ONEBLOCK_LOCK);
2893*4882a593Smuzhiyun 	}
2894*4882a593Smuzhiyun 
2895*4882a593Smuzhiyun 	kfree(sect);
2896*4882a593Smuzhiyun 	return ret;
2897*4882a593Smuzhiyun }
2898*4882a593Smuzhiyun 
cfi_ppb_is_locked(struct mtd_info * mtd,loff_t ofs,uint64_t len)2899*4882a593Smuzhiyun static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
2900*4882a593Smuzhiyun 					    uint64_t len)
2901*4882a593Smuzhiyun {
2902*4882a593Smuzhiyun 	return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2903*4882a593Smuzhiyun 				DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
2904*4882a593Smuzhiyun }
2905*4882a593Smuzhiyun 
cfi_amdstd_sync(struct mtd_info * mtd)2906*4882a593Smuzhiyun static void cfi_amdstd_sync (struct mtd_info *mtd)
2907*4882a593Smuzhiyun {
2908*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
2909*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
2910*4882a593Smuzhiyun 	int i;
2911*4882a593Smuzhiyun 	struct flchip *chip;
2912*4882a593Smuzhiyun 	int ret = 0;
2913*4882a593Smuzhiyun 	DECLARE_WAITQUEUE(wait, current);
2914*4882a593Smuzhiyun 
2915*4882a593Smuzhiyun 	for (i=0; !ret && i<cfi->numchips; i++) {
2916*4882a593Smuzhiyun 		chip = &cfi->chips[i];
2917*4882a593Smuzhiyun 
2918*4882a593Smuzhiyun 	retry:
2919*4882a593Smuzhiyun 		mutex_lock(&chip->mutex);
2920*4882a593Smuzhiyun 
2921*4882a593Smuzhiyun 		switch(chip->state) {
2922*4882a593Smuzhiyun 		case FL_READY:
2923*4882a593Smuzhiyun 		case FL_STATUS:
2924*4882a593Smuzhiyun 		case FL_CFI_QUERY:
2925*4882a593Smuzhiyun 		case FL_JEDEC_QUERY:
2926*4882a593Smuzhiyun 			chip->oldstate = chip->state;
2927*4882a593Smuzhiyun 			chip->state = FL_SYNCING;
2928*4882a593Smuzhiyun 			/* No need to wake_up() on this state change -
2929*4882a593Smuzhiyun 			 * as the whole point is that nobody can do anything
2930*4882a593Smuzhiyun 			 * with the chip now anyway.
2931*4882a593Smuzhiyun 			 */
2932*4882a593Smuzhiyun 			fallthrough;
2933*4882a593Smuzhiyun 		case FL_SYNCING:
2934*4882a593Smuzhiyun 			mutex_unlock(&chip->mutex);
2935*4882a593Smuzhiyun 			break;
2936*4882a593Smuzhiyun 
2937*4882a593Smuzhiyun 		default:
2938*4882a593Smuzhiyun 			/* Not an idle state */
2939*4882a593Smuzhiyun 			set_current_state(TASK_UNINTERRUPTIBLE);
2940*4882a593Smuzhiyun 			add_wait_queue(&chip->wq, &wait);
2941*4882a593Smuzhiyun 
2942*4882a593Smuzhiyun 			mutex_unlock(&chip->mutex);
2943*4882a593Smuzhiyun 
2944*4882a593Smuzhiyun 			schedule();
2945*4882a593Smuzhiyun 
2946*4882a593Smuzhiyun 			remove_wait_queue(&chip->wq, &wait);
2947*4882a593Smuzhiyun 
2948*4882a593Smuzhiyun 			goto retry;
2949*4882a593Smuzhiyun 		}
2950*4882a593Smuzhiyun 	}
2951*4882a593Smuzhiyun 
2952*4882a593Smuzhiyun 	/* Unlock the chips again */
2953*4882a593Smuzhiyun 
2954*4882a593Smuzhiyun 	for (i--; i >=0; i--) {
2955*4882a593Smuzhiyun 		chip = &cfi->chips[i];
2956*4882a593Smuzhiyun 
2957*4882a593Smuzhiyun 		mutex_lock(&chip->mutex);
2958*4882a593Smuzhiyun 
2959*4882a593Smuzhiyun 		if (chip->state == FL_SYNCING) {
2960*4882a593Smuzhiyun 			chip->state = chip->oldstate;
2961*4882a593Smuzhiyun 			wake_up(&chip->wq);
2962*4882a593Smuzhiyun 		}
2963*4882a593Smuzhiyun 		mutex_unlock(&chip->mutex);
2964*4882a593Smuzhiyun 	}
2965*4882a593Smuzhiyun }
2966*4882a593Smuzhiyun 
2967*4882a593Smuzhiyun 
cfi_amdstd_suspend(struct mtd_info * mtd)2968*4882a593Smuzhiyun static int cfi_amdstd_suspend(struct mtd_info *mtd)
2969*4882a593Smuzhiyun {
2970*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
2971*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
2972*4882a593Smuzhiyun 	int i;
2973*4882a593Smuzhiyun 	struct flchip *chip;
2974*4882a593Smuzhiyun 	int ret = 0;
2975*4882a593Smuzhiyun 
2976*4882a593Smuzhiyun 	for (i=0; !ret && i<cfi->numchips; i++) {
2977*4882a593Smuzhiyun 		chip = &cfi->chips[i];
2978*4882a593Smuzhiyun 
2979*4882a593Smuzhiyun 		mutex_lock(&chip->mutex);
2980*4882a593Smuzhiyun 
2981*4882a593Smuzhiyun 		switch(chip->state) {
2982*4882a593Smuzhiyun 		case FL_READY:
2983*4882a593Smuzhiyun 		case FL_STATUS:
2984*4882a593Smuzhiyun 		case FL_CFI_QUERY:
2985*4882a593Smuzhiyun 		case FL_JEDEC_QUERY:
2986*4882a593Smuzhiyun 			chip->oldstate = chip->state;
2987*4882a593Smuzhiyun 			chip->state = FL_PM_SUSPENDED;
2988*4882a593Smuzhiyun 			/* No need to wake_up() on this state change -
2989*4882a593Smuzhiyun 			 * as the whole point is that nobody can do anything
2990*4882a593Smuzhiyun 			 * with the chip now anyway.
2991*4882a593Smuzhiyun 			 */
2992*4882a593Smuzhiyun 		case FL_PM_SUSPENDED:
2993*4882a593Smuzhiyun 			break;
2994*4882a593Smuzhiyun 
2995*4882a593Smuzhiyun 		default:
2996*4882a593Smuzhiyun 			ret = -EAGAIN;
2997*4882a593Smuzhiyun 			break;
2998*4882a593Smuzhiyun 		}
2999*4882a593Smuzhiyun 		mutex_unlock(&chip->mutex);
3000*4882a593Smuzhiyun 	}
3001*4882a593Smuzhiyun 
3002*4882a593Smuzhiyun 	/* Unlock the chips again */
3003*4882a593Smuzhiyun 
3004*4882a593Smuzhiyun 	if (ret) {
3005*4882a593Smuzhiyun 		for (i--; i >=0; i--) {
3006*4882a593Smuzhiyun 			chip = &cfi->chips[i];
3007*4882a593Smuzhiyun 
3008*4882a593Smuzhiyun 			mutex_lock(&chip->mutex);
3009*4882a593Smuzhiyun 
3010*4882a593Smuzhiyun 			if (chip->state == FL_PM_SUSPENDED) {
3011*4882a593Smuzhiyun 				chip->state = chip->oldstate;
3012*4882a593Smuzhiyun 				wake_up(&chip->wq);
3013*4882a593Smuzhiyun 			}
3014*4882a593Smuzhiyun 			mutex_unlock(&chip->mutex);
3015*4882a593Smuzhiyun 		}
3016*4882a593Smuzhiyun 	}
3017*4882a593Smuzhiyun 
3018*4882a593Smuzhiyun 	return ret;
3019*4882a593Smuzhiyun }
3020*4882a593Smuzhiyun 
3021*4882a593Smuzhiyun 
cfi_amdstd_resume(struct mtd_info * mtd)3022*4882a593Smuzhiyun static void cfi_amdstd_resume(struct mtd_info *mtd)
3023*4882a593Smuzhiyun {
3024*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
3025*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
3026*4882a593Smuzhiyun 	int i;
3027*4882a593Smuzhiyun 	struct flchip *chip;
3028*4882a593Smuzhiyun 
3029*4882a593Smuzhiyun 	for (i=0; i<cfi->numchips; i++) {
3030*4882a593Smuzhiyun 
3031*4882a593Smuzhiyun 		chip = &cfi->chips[i];
3032*4882a593Smuzhiyun 
3033*4882a593Smuzhiyun 		mutex_lock(&chip->mutex);
3034*4882a593Smuzhiyun 
3035*4882a593Smuzhiyun 		if (chip->state == FL_PM_SUSPENDED) {
3036*4882a593Smuzhiyun 			chip->state = FL_READY;
3037*4882a593Smuzhiyun 			map_write(map, CMD(0xF0), chip->start);
3038*4882a593Smuzhiyun 			wake_up(&chip->wq);
3039*4882a593Smuzhiyun 		}
3040*4882a593Smuzhiyun 		else
3041*4882a593Smuzhiyun 			printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
3042*4882a593Smuzhiyun 
3043*4882a593Smuzhiyun 		mutex_unlock(&chip->mutex);
3044*4882a593Smuzhiyun 	}
3045*4882a593Smuzhiyun }
3046*4882a593Smuzhiyun 
3047*4882a593Smuzhiyun 
3048*4882a593Smuzhiyun /*
3049*4882a593Smuzhiyun  * Ensure that the flash device is put back into read array mode before
3050*4882a593Smuzhiyun  * unloading the driver or rebooting.  On some systems, rebooting while
3051*4882a593Smuzhiyun  * the flash is in query/program/erase mode will prevent the CPU from
3052*4882a593Smuzhiyun  * fetching the bootloader code, requiring a hard reset or power cycle.
3053*4882a593Smuzhiyun  */
cfi_amdstd_reset(struct mtd_info * mtd)3054*4882a593Smuzhiyun static int cfi_amdstd_reset(struct mtd_info *mtd)
3055*4882a593Smuzhiyun {
3056*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
3057*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
3058*4882a593Smuzhiyun 	int i, ret;
3059*4882a593Smuzhiyun 	struct flchip *chip;
3060*4882a593Smuzhiyun 
3061*4882a593Smuzhiyun 	for (i = 0; i < cfi->numchips; i++) {
3062*4882a593Smuzhiyun 
3063*4882a593Smuzhiyun 		chip = &cfi->chips[i];
3064*4882a593Smuzhiyun 
3065*4882a593Smuzhiyun 		mutex_lock(&chip->mutex);
3066*4882a593Smuzhiyun 
3067*4882a593Smuzhiyun 		ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
3068*4882a593Smuzhiyun 		if (!ret) {
3069*4882a593Smuzhiyun 			map_write(map, CMD(0xF0), chip->start);
3070*4882a593Smuzhiyun 			chip->state = FL_SHUTDOWN;
3071*4882a593Smuzhiyun 			put_chip(map, chip, chip->start);
3072*4882a593Smuzhiyun 		}
3073*4882a593Smuzhiyun 
3074*4882a593Smuzhiyun 		mutex_unlock(&chip->mutex);
3075*4882a593Smuzhiyun 	}
3076*4882a593Smuzhiyun 
3077*4882a593Smuzhiyun 	return 0;
3078*4882a593Smuzhiyun }
3079*4882a593Smuzhiyun 
3080*4882a593Smuzhiyun 
cfi_amdstd_reboot(struct notifier_block * nb,unsigned long val,void * v)3081*4882a593Smuzhiyun static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
3082*4882a593Smuzhiyun 			       void *v)
3083*4882a593Smuzhiyun {
3084*4882a593Smuzhiyun 	struct mtd_info *mtd;
3085*4882a593Smuzhiyun 
3086*4882a593Smuzhiyun 	mtd = container_of(nb, struct mtd_info, reboot_notifier);
3087*4882a593Smuzhiyun 	cfi_amdstd_reset(mtd);
3088*4882a593Smuzhiyun 	return NOTIFY_DONE;
3089*4882a593Smuzhiyun }
3090*4882a593Smuzhiyun 
3091*4882a593Smuzhiyun 
cfi_amdstd_destroy(struct mtd_info * mtd)3092*4882a593Smuzhiyun static void cfi_amdstd_destroy(struct mtd_info *mtd)
3093*4882a593Smuzhiyun {
3094*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
3095*4882a593Smuzhiyun 	struct cfi_private *cfi = map->fldrv_priv;
3096*4882a593Smuzhiyun 
3097*4882a593Smuzhiyun 	cfi_amdstd_reset(mtd);
3098*4882a593Smuzhiyun 	unregister_reboot_notifier(&mtd->reboot_notifier);
3099*4882a593Smuzhiyun 	kfree(cfi->cmdset_priv);
3100*4882a593Smuzhiyun 	kfree(cfi->cfiq);
3101*4882a593Smuzhiyun 	kfree(cfi);
3102*4882a593Smuzhiyun 	kfree(mtd->eraseregions);
3103*4882a593Smuzhiyun }
3104*4882a593Smuzhiyun 
3105*4882a593Smuzhiyun MODULE_LICENSE("GPL");
3106*4882a593Smuzhiyun MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
3107*4882a593Smuzhiyun MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
3108*4882a593Smuzhiyun MODULE_ALIAS("cfi_cmdset_0006");
3109*4882a593Smuzhiyun MODULE_ALIAS("cfi_cmdset_0701");
3110