xref: /OK3568_Linux_fs/kernel/drivers/mtd/nand/raw/cs553x_nand.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * (C) 2005, 2006 Red Hat Inc.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Author: David Woodhouse <dwmw2@infradead.org>
6*4882a593Smuzhiyun  *	   Tom Sylla <tom.sylla@amd.com>
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  *  Overview:
9*4882a593Smuzhiyun  *   This is a device driver for the NAND flash controller found on
10*4882a593Smuzhiyun  *   the AMD CS5535/CS5536 companion chipsets for the Geode processor.
11*4882a593Smuzhiyun  *   mtd-id for command line partitioning is cs553x_nand_cs[0-3]
12*4882a593Smuzhiyun  *   where 0-3 reflects the chip select for NAND.
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/init.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/delay.h>
20*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
21*4882a593Smuzhiyun #include <linux/mtd/rawnand.h>
22*4882a593Smuzhiyun #include <linux/mtd/nand_ecc.h>
23*4882a593Smuzhiyun #include <linux/mtd/partitions.h>
24*4882a593Smuzhiyun #include <linux/iopoll.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include <asm/msr.h>
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define NR_CS553X_CONTROLLERS	4
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define MSR_DIVIL_GLD_CAP	0x51400000	/* DIVIL capabilitiies */
31*4882a593Smuzhiyun #define CAP_CS5535		0x2df000ULL
32*4882a593Smuzhiyun #define CAP_CS5536		0x5df500ULL
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /* NAND Timing MSRs */
35*4882a593Smuzhiyun #define MSR_NANDF_DATA		0x5140001b	/* NAND Flash Data Timing MSR */
36*4882a593Smuzhiyun #define MSR_NANDF_CTL		0x5140001c	/* NAND Flash Control Timing */
37*4882a593Smuzhiyun #define MSR_NANDF_RSVD		0x5140001d	/* Reserved */
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /* NAND BAR MSRs */
40*4882a593Smuzhiyun #define MSR_DIVIL_LBAR_FLSH0	0x51400010	/* Flash Chip Select 0 */
41*4882a593Smuzhiyun #define MSR_DIVIL_LBAR_FLSH1	0x51400011	/* Flash Chip Select 1 */
42*4882a593Smuzhiyun #define MSR_DIVIL_LBAR_FLSH2	0x51400012	/* Flash Chip Select 2 */
43*4882a593Smuzhiyun #define MSR_DIVIL_LBAR_FLSH3	0x51400013	/* Flash Chip Select 3 */
44*4882a593Smuzhiyun 	/* Each made up of... */
45*4882a593Smuzhiyun #define FLSH_LBAR_EN		(1ULL<<32)
46*4882a593Smuzhiyun #define FLSH_NOR_NAND		(1ULL<<33)	/* 1 for NAND */
47*4882a593Smuzhiyun #define FLSH_MEM_IO		(1ULL<<34)	/* 1 for MMIO */
48*4882a593Smuzhiyun 	/* I/O BARs have BASE_ADDR in bits 15:4, IO_MASK in 47:36 */
49*4882a593Smuzhiyun 	/* MMIO BARs have BASE_ADDR in bits 31:12, MEM_MASK in 63:44 */
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /* Pin function selection MSR (IDE vs. flash on the IDE pins) */
52*4882a593Smuzhiyun #define MSR_DIVIL_BALL_OPTS	0x51400015
53*4882a593Smuzhiyun #define PIN_OPT_IDE		(1<<0)	/* 0 for flash, 1 for IDE */
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun /* Registers within the NAND flash controller BAR -- memory mapped */
56*4882a593Smuzhiyun #define MM_NAND_DATA		0x00	/* 0 to 0x7ff, in fact */
57*4882a593Smuzhiyun #define MM_NAND_CTL		0x800	/* Any even address 0x800-0x80e */
58*4882a593Smuzhiyun #define MM_NAND_IO		0x801	/* Any odd address 0x801-0x80f */
59*4882a593Smuzhiyun #define MM_NAND_STS		0x810
60*4882a593Smuzhiyun #define MM_NAND_ECC_LSB		0x811
61*4882a593Smuzhiyun #define MM_NAND_ECC_MSB		0x812
62*4882a593Smuzhiyun #define MM_NAND_ECC_COL		0x813
63*4882a593Smuzhiyun #define MM_NAND_LAC		0x814
64*4882a593Smuzhiyun #define MM_NAND_ECC_CTL		0x815
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /* Registers within the NAND flash controller BAR -- I/O mapped */
67*4882a593Smuzhiyun #define IO_NAND_DATA		0x00	/* 0 to 3, in fact */
68*4882a593Smuzhiyun #define IO_NAND_CTL		0x04
69*4882a593Smuzhiyun #define IO_NAND_IO		0x05
70*4882a593Smuzhiyun #define IO_NAND_STS		0x06
71*4882a593Smuzhiyun #define IO_NAND_ECC_CTL		0x08
72*4882a593Smuzhiyun #define IO_NAND_ECC_LSB		0x09
73*4882a593Smuzhiyun #define IO_NAND_ECC_MSB		0x0a
74*4882a593Smuzhiyun #define IO_NAND_ECC_COL		0x0b
75*4882a593Smuzhiyun #define IO_NAND_LAC		0x0c
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun #define CS_NAND_CTL_DIST_EN	(1<<4)	/* Enable NAND Distract interrupt */
78*4882a593Smuzhiyun #define CS_NAND_CTL_RDY_INT_MASK	(1<<3)	/* Enable RDY/BUSY# interrupt */
79*4882a593Smuzhiyun #define CS_NAND_CTL_ALE		(1<<2)
80*4882a593Smuzhiyun #define CS_NAND_CTL_CLE		(1<<1)
81*4882a593Smuzhiyun #define CS_NAND_CTL_CE		(1<<0)	/* Keep low; 1 to reset */
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun #define CS_NAND_STS_FLASH_RDY	(1<<3)
84*4882a593Smuzhiyun #define CS_NAND_CTLR_BUSY	(1<<2)
85*4882a593Smuzhiyun #define CS_NAND_CMD_COMP	(1<<1)
86*4882a593Smuzhiyun #define CS_NAND_DIST_ST		(1<<0)
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun #define CS_NAND_ECC_PARITY	(1<<2)
89*4882a593Smuzhiyun #define CS_NAND_ECC_CLRECC	(1<<1)
90*4882a593Smuzhiyun #define CS_NAND_ECC_ENECC	(1<<0)
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun struct cs553x_nand_controller {
93*4882a593Smuzhiyun 	struct nand_controller base;
94*4882a593Smuzhiyun 	struct nand_chip chip;
95*4882a593Smuzhiyun 	void __iomem *mmio;
96*4882a593Smuzhiyun };
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun static struct cs553x_nand_controller *
to_cs553x(struct nand_controller * controller)99*4882a593Smuzhiyun to_cs553x(struct nand_controller *controller)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	return container_of(controller, struct cs553x_nand_controller, base);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
cs553x_write_ctrl_byte(struct cs553x_nand_controller * cs553x,u32 ctl,u8 data)104*4882a593Smuzhiyun static int cs553x_write_ctrl_byte(struct cs553x_nand_controller *cs553x,
105*4882a593Smuzhiyun 				  u32 ctl, u8 data)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	u8 status;
108*4882a593Smuzhiyun 	int ret;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	writeb(ctl, cs553x->mmio + MM_NAND_CTL);
111*4882a593Smuzhiyun 	writeb(data, cs553x->mmio + MM_NAND_IO);
112*4882a593Smuzhiyun 	ret = readb_poll_timeout_atomic(cs553x->mmio + MM_NAND_STS, status,
113*4882a593Smuzhiyun 					!(status & CS_NAND_CTLR_BUSY), 1,
114*4882a593Smuzhiyun 					100000);
115*4882a593Smuzhiyun 	if (ret)
116*4882a593Smuzhiyun 		return ret;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	return 0;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
cs553x_data_in(struct cs553x_nand_controller * cs553x,void * buf,unsigned int len)121*4882a593Smuzhiyun static void cs553x_data_in(struct cs553x_nand_controller *cs553x, void *buf,
122*4882a593Smuzhiyun 			   unsigned int len)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	writeb(0, cs553x->mmio + MM_NAND_CTL);
125*4882a593Smuzhiyun 	while (unlikely(len > 0x800)) {
126*4882a593Smuzhiyun 		memcpy_fromio(buf, cs553x->mmio, 0x800);
127*4882a593Smuzhiyun 		buf += 0x800;
128*4882a593Smuzhiyun 		len -= 0x800;
129*4882a593Smuzhiyun 	}
130*4882a593Smuzhiyun 	memcpy_fromio(buf, cs553x->mmio, len);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
cs553x_data_out(struct cs553x_nand_controller * cs553x,const void * buf,unsigned int len)133*4882a593Smuzhiyun static void cs553x_data_out(struct cs553x_nand_controller *cs553x,
134*4882a593Smuzhiyun 			    const void *buf, unsigned int len)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	writeb(0, cs553x->mmio + MM_NAND_CTL);
137*4882a593Smuzhiyun 	while (unlikely(len > 0x800)) {
138*4882a593Smuzhiyun 		memcpy_toio(cs553x->mmio, buf, 0x800);
139*4882a593Smuzhiyun 		buf += 0x800;
140*4882a593Smuzhiyun 		len -= 0x800;
141*4882a593Smuzhiyun 	}
142*4882a593Smuzhiyun 	memcpy_toio(cs553x->mmio, buf, len);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun 
cs553x_wait_ready(struct cs553x_nand_controller * cs553x,unsigned int timeout_ms)145*4882a593Smuzhiyun static int cs553x_wait_ready(struct cs553x_nand_controller *cs553x,
146*4882a593Smuzhiyun 			     unsigned int timeout_ms)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	u8 mask = CS_NAND_CTLR_BUSY | CS_NAND_STS_FLASH_RDY;
149*4882a593Smuzhiyun 	u8 status;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	return readb_poll_timeout(cs553x->mmio + MM_NAND_STS, status,
152*4882a593Smuzhiyun 				  (status & mask) == CS_NAND_STS_FLASH_RDY, 100,
153*4882a593Smuzhiyun 				  timeout_ms * 1000);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
cs553x_exec_instr(struct cs553x_nand_controller * cs553x,const struct nand_op_instr * instr)156*4882a593Smuzhiyun static int cs553x_exec_instr(struct cs553x_nand_controller *cs553x,
157*4882a593Smuzhiyun 			     const struct nand_op_instr *instr)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	unsigned int i;
160*4882a593Smuzhiyun 	int ret = 0;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	switch (instr->type) {
163*4882a593Smuzhiyun 	case NAND_OP_CMD_INSTR:
164*4882a593Smuzhiyun 		ret = cs553x_write_ctrl_byte(cs553x, CS_NAND_CTL_CLE,
165*4882a593Smuzhiyun 					     instr->ctx.cmd.opcode);
166*4882a593Smuzhiyun 		break;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	case NAND_OP_ADDR_INSTR:
169*4882a593Smuzhiyun 		for (i = 0; i < instr->ctx.addr.naddrs; i++) {
170*4882a593Smuzhiyun 			ret = cs553x_write_ctrl_byte(cs553x, CS_NAND_CTL_ALE,
171*4882a593Smuzhiyun 						     instr->ctx.addr.addrs[i]);
172*4882a593Smuzhiyun 			if (ret)
173*4882a593Smuzhiyun 				break;
174*4882a593Smuzhiyun 		}
175*4882a593Smuzhiyun 		break;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	case NAND_OP_DATA_IN_INSTR:
178*4882a593Smuzhiyun 		cs553x_data_in(cs553x, instr->ctx.data.buf.in,
179*4882a593Smuzhiyun 			       instr->ctx.data.len);
180*4882a593Smuzhiyun 		break;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	case NAND_OP_DATA_OUT_INSTR:
183*4882a593Smuzhiyun 		cs553x_data_out(cs553x, instr->ctx.data.buf.out,
184*4882a593Smuzhiyun 				instr->ctx.data.len);
185*4882a593Smuzhiyun 		break;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	case NAND_OP_WAITRDY_INSTR:
188*4882a593Smuzhiyun 		ret = cs553x_wait_ready(cs553x, instr->ctx.waitrdy.timeout_ms);
189*4882a593Smuzhiyun 		break;
190*4882a593Smuzhiyun 	}
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	if (instr->delay_ns)
193*4882a593Smuzhiyun 		ndelay(instr->delay_ns);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	return ret;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
cs553x_exec_op(struct nand_chip * this,const struct nand_operation * op,bool check_only)198*4882a593Smuzhiyun static int cs553x_exec_op(struct nand_chip *this,
199*4882a593Smuzhiyun 			  const struct nand_operation *op,
200*4882a593Smuzhiyun 			  bool check_only)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	struct cs553x_nand_controller *cs553x = to_cs553x(this->controller);
203*4882a593Smuzhiyun 	unsigned int i;
204*4882a593Smuzhiyun 	int ret;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	if (check_only)
207*4882a593Smuzhiyun 		return true;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	/* De-assert the CE pin */
210*4882a593Smuzhiyun 	writeb(0, cs553x->mmio + MM_NAND_CTL);
211*4882a593Smuzhiyun 	for (i = 0; i < op->ninstrs; i++) {
212*4882a593Smuzhiyun 		ret = cs553x_exec_instr(cs553x, &op->instrs[i]);
213*4882a593Smuzhiyun 		if (ret)
214*4882a593Smuzhiyun 			break;
215*4882a593Smuzhiyun 	}
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	/* Re-assert the CE pin. */
218*4882a593Smuzhiyun 	writeb(CS_NAND_CTL_CE, cs553x->mmio + MM_NAND_CTL);
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	return ret;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun 
cs_enable_hwecc(struct nand_chip * this,int mode)223*4882a593Smuzhiyun static void cs_enable_hwecc(struct nand_chip *this, int mode)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	struct cs553x_nand_controller *cs553x = to_cs553x(this->controller);
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	writeb(0x07, cs553x->mmio + MM_NAND_ECC_CTL);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
cs_calculate_ecc(struct nand_chip * this,const u_char * dat,u_char * ecc_code)230*4882a593Smuzhiyun static int cs_calculate_ecc(struct nand_chip *this, const u_char *dat,
231*4882a593Smuzhiyun 			    u_char *ecc_code)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	struct cs553x_nand_controller *cs553x = to_cs553x(this->controller);
234*4882a593Smuzhiyun 	uint32_t ecc;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	ecc = readl(cs553x->mmio + MM_NAND_STS);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	ecc_code[1] = ecc >> 8;
239*4882a593Smuzhiyun 	ecc_code[0] = ecc >> 16;
240*4882a593Smuzhiyun 	ecc_code[2] = ecc >> 24;
241*4882a593Smuzhiyun 	return 0;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun static struct cs553x_nand_controller *controllers[4];
245*4882a593Smuzhiyun 
cs553x_attach_chip(struct nand_chip * chip)246*4882a593Smuzhiyun static int cs553x_attach_chip(struct nand_chip *chip)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
249*4882a593Smuzhiyun 		return 0;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	chip->ecc.size = 256;
252*4882a593Smuzhiyun 	chip->ecc.bytes = 3;
253*4882a593Smuzhiyun 	chip->ecc.hwctl  = cs_enable_hwecc;
254*4882a593Smuzhiyun 	chip->ecc.calculate = cs_calculate_ecc;
255*4882a593Smuzhiyun 	chip->ecc.correct  = nand_correct_data;
256*4882a593Smuzhiyun 	chip->ecc.strength = 1;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	return 0;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun static const struct nand_controller_ops cs553x_nand_controller_ops = {
262*4882a593Smuzhiyun 	.exec_op = cs553x_exec_op,
263*4882a593Smuzhiyun 	.attach_chip = cs553x_attach_chip,
264*4882a593Smuzhiyun };
265*4882a593Smuzhiyun 
cs553x_init_one(int cs,int mmio,unsigned long adr)266*4882a593Smuzhiyun static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	struct cs553x_nand_controller *controller;
269*4882a593Smuzhiyun 	int err = 0;
270*4882a593Smuzhiyun 	struct nand_chip *this;
271*4882a593Smuzhiyun 	struct mtd_info *new_mtd;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	pr_notice("Probing CS553x NAND controller CS#%d at %sIO 0x%08lx\n",
274*4882a593Smuzhiyun 		  cs, mmio ? "MM" : "P", adr);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	if (!mmio) {
277*4882a593Smuzhiyun 		pr_notice("PIO mode not yet implemented for CS553X NAND controller\n");
278*4882a593Smuzhiyun 		return -ENXIO;
279*4882a593Smuzhiyun 	}
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	/* Allocate memory for MTD device structure and private data */
282*4882a593Smuzhiyun 	controller = kzalloc(sizeof(*controller), GFP_KERNEL);
283*4882a593Smuzhiyun 	if (!controller) {
284*4882a593Smuzhiyun 		err = -ENOMEM;
285*4882a593Smuzhiyun 		goto out;
286*4882a593Smuzhiyun 	}
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	this = &controller->chip;
289*4882a593Smuzhiyun 	nand_controller_init(&controller->base);
290*4882a593Smuzhiyun 	controller->base.ops = &cs553x_nand_controller_ops;
291*4882a593Smuzhiyun 	this->controller = &controller->base;
292*4882a593Smuzhiyun 	new_mtd = nand_to_mtd(this);
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	/* Link the private data with the MTD structure */
295*4882a593Smuzhiyun 	new_mtd->owner = THIS_MODULE;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/* map physical address */
298*4882a593Smuzhiyun 	controller->mmio = ioremap(adr, 4096);
299*4882a593Smuzhiyun 	if (!controller->mmio) {
300*4882a593Smuzhiyun 		pr_warn("ioremap cs553x NAND @0x%08lx failed\n", adr);
301*4882a593Smuzhiyun 		err = -EIO;
302*4882a593Smuzhiyun 		goto out_mtd;
303*4882a593Smuzhiyun 	}
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	/* Enable the following for a flash based bad block table */
306*4882a593Smuzhiyun 	this->bbt_options = NAND_BBT_USE_FLASH;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs);
309*4882a593Smuzhiyun 	if (!new_mtd->name) {
310*4882a593Smuzhiyun 		err = -ENOMEM;
311*4882a593Smuzhiyun 		goto out_ior;
312*4882a593Smuzhiyun 	}
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	/* Scan to find existence of the device */
315*4882a593Smuzhiyun 	err = nand_scan(this, 1);
316*4882a593Smuzhiyun 	if (err)
317*4882a593Smuzhiyun 		goto out_free;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	controllers[cs] = controller;
320*4882a593Smuzhiyun 	goto out;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun out_free:
323*4882a593Smuzhiyun 	kfree(new_mtd->name);
324*4882a593Smuzhiyun out_ior:
325*4882a593Smuzhiyun 	iounmap(controller->mmio);
326*4882a593Smuzhiyun out_mtd:
327*4882a593Smuzhiyun 	kfree(controller);
328*4882a593Smuzhiyun out:
329*4882a593Smuzhiyun 	return err;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
is_geode(void)332*4882a593Smuzhiyun static int is_geode(void)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	/* These are the CPUs which will have a CS553[56] companion chip */
335*4882a593Smuzhiyun 	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
336*4882a593Smuzhiyun 	    boot_cpu_data.x86 == 5 &&
337*4882a593Smuzhiyun 	    boot_cpu_data.x86_model == 10)
338*4882a593Smuzhiyun 		return 1; /* Geode LX */
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	if ((boot_cpu_data.x86_vendor == X86_VENDOR_NSC ||
341*4882a593Smuzhiyun 	     boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX) &&
342*4882a593Smuzhiyun 	    boot_cpu_data.x86 == 5 &&
343*4882a593Smuzhiyun 	    boot_cpu_data.x86_model == 5)
344*4882a593Smuzhiyun 		return 1; /* Geode GX (née GX2) */
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	return 0;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun 
cs553x_init(void)349*4882a593Smuzhiyun static int __init cs553x_init(void)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun 	int err = -ENXIO;
352*4882a593Smuzhiyun 	int i;
353*4882a593Smuzhiyun 	uint64_t val;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	/* If the CPU isn't a Geode GX or LX, abort */
356*4882a593Smuzhiyun 	if (!is_geode())
357*4882a593Smuzhiyun 		return -ENXIO;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	/* If it doesn't have the CS553[56], abort */
360*4882a593Smuzhiyun 	rdmsrl(MSR_DIVIL_GLD_CAP, val);
361*4882a593Smuzhiyun 	val &= ~0xFFULL;
362*4882a593Smuzhiyun 	if (val != CAP_CS5535 && val != CAP_CS5536)
363*4882a593Smuzhiyun 		return -ENXIO;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	/* If it doesn't have the NAND controller enabled, abort */
366*4882a593Smuzhiyun 	rdmsrl(MSR_DIVIL_BALL_OPTS, val);
367*4882a593Smuzhiyun 	if (val & PIN_OPT_IDE) {
368*4882a593Smuzhiyun 		pr_info("CS553x NAND controller: Flash I/O not enabled in MSR_DIVIL_BALL_OPTS.\n");
369*4882a593Smuzhiyun 		return -ENXIO;
370*4882a593Smuzhiyun 	}
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
373*4882a593Smuzhiyun 		rdmsrl(MSR_DIVIL_LBAR_FLSH0 + i, val);
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 		if ((val & (FLSH_LBAR_EN|FLSH_NOR_NAND)) == (FLSH_LBAR_EN|FLSH_NOR_NAND))
376*4882a593Smuzhiyun 			err = cs553x_init_one(i, !!(val & FLSH_MEM_IO), val & 0xFFFFFFFF);
377*4882a593Smuzhiyun 	}
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	/* Register all devices together here. This means we can easily hack it to
380*4882a593Smuzhiyun 	   do mtdconcat etc. if we want to. */
381*4882a593Smuzhiyun 	for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
382*4882a593Smuzhiyun 		if (controllers[i]) {
383*4882a593Smuzhiyun 			/* If any devices registered, return success. Else the last error. */
384*4882a593Smuzhiyun 			mtd_device_register(nand_to_mtd(&controllers[i]->chip),
385*4882a593Smuzhiyun 					    NULL, 0);
386*4882a593Smuzhiyun 			err = 0;
387*4882a593Smuzhiyun 		}
388*4882a593Smuzhiyun 	}
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	return err;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun module_init(cs553x_init);
394*4882a593Smuzhiyun 
cs553x_cleanup(void)395*4882a593Smuzhiyun static void __exit cs553x_cleanup(void)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun 	int i;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
400*4882a593Smuzhiyun 		struct cs553x_nand_controller *controller = controllers[i];
401*4882a593Smuzhiyun 		struct nand_chip *this = &controller->chip;
402*4882a593Smuzhiyun 		struct mtd_info *mtd = nand_to_mtd(this);
403*4882a593Smuzhiyun 		int ret;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 		if (!mtd)
406*4882a593Smuzhiyun 			continue;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 		/* Release resources, unregister device */
409*4882a593Smuzhiyun 		ret = mtd_device_unregister(mtd);
410*4882a593Smuzhiyun 		WARN_ON(ret);
411*4882a593Smuzhiyun 		nand_cleanup(this);
412*4882a593Smuzhiyun 		kfree(mtd->name);
413*4882a593Smuzhiyun 		controllers[i] = NULL;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 		/* unmap physical address */
416*4882a593Smuzhiyun 		iounmap(controller->mmio);
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 		/* Free the MTD device structure */
419*4882a593Smuzhiyun 		kfree(controller);
420*4882a593Smuzhiyun 	}
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun module_exit(cs553x_cleanup);
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun MODULE_LICENSE("GPL");
426*4882a593Smuzhiyun MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
427*4882a593Smuzhiyun MODULE_DESCRIPTION("NAND controller driver for AMD CS5535/CS5536 companion chip");
428