1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * dfu_nand.c -- DFU for NAND routines.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2012-2013 Texas Instruments, Inc.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Based on dfu_mmc.c which is:
7*4882a593Smuzhiyun * Copyright (C) 2012 Samsung Electronics
8*4882a593Smuzhiyun * author: Lukasz Majewski <l.majewski@samsung.com>
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0+
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <common.h>
14*4882a593Smuzhiyun #include <malloc.h>
15*4882a593Smuzhiyun #include <errno.h>
16*4882a593Smuzhiyun #include <div64.h>
17*4882a593Smuzhiyun #include <dfu.h>
18*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
19*4882a593Smuzhiyun #include <jffs2/load_kernel.h>
20*4882a593Smuzhiyun #include <nand.h>
21*4882a593Smuzhiyun
nand_block_op(enum dfu_op op,struct dfu_entity * dfu,u64 offset,void * buf,long * len)22*4882a593Smuzhiyun static int nand_block_op(enum dfu_op op, struct dfu_entity *dfu,
23*4882a593Smuzhiyun u64 offset, void *buf, long *len)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun loff_t start, lim;
26*4882a593Smuzhiyun size_t count, actual;
27*4882a593Smuzhiyun int ret;
28*4882a593Smuzhiyun struct mtd_info *mtd;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /* if buf == NULL return total size of the area */
31*4882a593Smuzhiyun if (buf == NULL) {
32*4882a593Smuzhiyun *len = dfu->data.nand.size;
33*4882a593Smuzhiyun return 0;
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun start = dfu->data.nand.start + offset + dfu->bad_skip;
37*4882a593Smuzhiyun lim = dfu->data.nand.start + dfu->data.nand.size - start;
38*4882a593Smuzhiyun count = *len;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun mtd = get_nand_dev_by_index(nand_curr_device);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun if (nand_curr_device < 0 ||
43*4882a593Smuzhiyun nand_curr_device >= CONFIG_SYS_MAX_NAND_DEVICE ||
44*4882a593Smuzhiyun !mtd) {
45*4882a593Smuzhiyun printf("%s: invalid nand device\n", __func__);
46*4882a593Smuzhiyun return -1;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun if (op == DFU_OP_READ) {
50*4882a593Smuzhiyun ret = nand_read_skip_bad(mtd, start, &count, &actual,
51*4882a593Smuzhiyun lim, buf);
52*4882a593Smuzhiyun } else {
53*4882a593Smuzhiyun nand_erase_options_t opts;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun memset(&opts, 0, sizeof(opts));
56*4882a593Smuzhiyun opts.offset = start;
57*4882a593Smuzhiyun opts.length = count;
58*4882a593Smuzhiyun opts.spread = 1;
59*4882a593Smuzhiyun opts.quiet = 1;
60*4882a593Smuzhiyun opts.lim = lim;
61*4882a593Smuzhiyun /* first erase */
62*4882a593Smuzhiyun ret = nand_erase_opts(mtd, &opts);
63*4882a593Smuzhiyun if (ret)
64*4882a593Smuzhiyun return ret;
65*4882a593Smuzhiyun /* then write */
66*4882a593Smuzhiyun ret = nand_write_skip_bad(mtd, start, &count, &actual,
67*4882a593Smuzhiyun lim, buf, WITH_WR_VERIFY);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun if (ret != 0) {
71*4882a593Smuzhiyun printf("%s: nand_%s_skip_bad call failed at %llx!\n",
72*4882a593Smuzhiyun __func__, op == DFU_OP_READ ? "read" : "write",
73*4882a593Smuzhiyun start);
74*4882a593Smuzhiyun return ret;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /*
78*4882a593Smuzhiyun * Find out where we stopped writing data. This can be deeper into
79*4882a593Smuzhiyun * the NAND than we expected due to having to skip bad blocks. So
80*4882a593Smuzhiyun * we must take this into account for the next write, if any.
81*4882a593Smuzhiyun */
82*4882a593Smuzhiyun if (actual > count)
83*4882a593Smuzhiyun dfu->bad_skip += actual - count;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun return ret;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
nand_block_write(struct dfu_entity * dfu,u64 offset,void * buf,long * len)88*4882a593Smuzhiyun static inline int nand_block_write(struct dfu_entity *dfu,
89*4882a593Smuzhiyun u64 offset, void *buf, long *len)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun return nand_block_op(DFU_OP_WRITE, dfu, offset, buf, len);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
nand_block_read(struct dfu_entity * dfu,u64 offset,void * buf,long * len)94*4882a593Smuzhiyun static inline int nand_block_read(struct dfu_entity *dfu,
95*4882a593Smuzhiyun u64 offset, void *buf, long *len)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun return nand_block_op(DFU_OP_READ, dfu, offset, buf, len);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
dfu_write_medium_nand(struct dfu_entity * dfu,u64 offset,void * buf,long * len)100*4882a593Smuzhiyun static int dfu_write_medium_nand(struct dfu_entity *dfu,
101*4882a593Smuzhiyun u64 offset, void *buf, long *len)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun int ret = -1;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun switch (dfu->layout) {
106*4882a593Smuzhiyun case DFU_RAW_ADDR:
107*4882a593Smuzhiyun ret = nand_block_write(dfu, offset, buf, len);
108*4882a593Smuzhiyun break;
109*4882a593Smuzhiyun default:
110*4882a593Smuzhiyun printf("%s: Layout (%s) not (yet) supported!\n", __func__,
111*4882a593Smuzhiyun dfu_get_layout(dfu->layout));
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun return ret;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
dfu_get_medium_size_nand(struct dfu_entity * dfu,u64 * size)117*4882a593Smuzhiyun int dfu_get_medium_size_nand(struct dfu_entity *dfu, u64 *size)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun *size = dfu->data.nand.size;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun return 0;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
dfu_read_medium_nand(struct dfu_entity * dfu,u64 offset,void * buf,long * len)124*4882a593Smuzhiyun static int dfu_read_medium_nand(struct dfu_entity *dfu, u64 offset, void *buf,
125*4882a593Smuzhiyun long *len)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun int ret = -1;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun switch (dfu->layout) {
130*4882a593Smuzhiyun case DFU_RAW_ADDR:
131*4882a593Smuzhiyun ret = nand_block_read(dfu, offset, buf, len);
132*4882a593Smuzhiyun break;
133*4882a593Smuzhiyun default:
134*4882a593Smuzhiyun printf("%s: Layout (%s) not (yet) supported!\n", __func__,
135*4882a593Smuzhiyun dfu_get_layout(dfu->layout));
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun return ret;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
dfu_flush_medium_nand(struct dfu_entity * dfu)141*4882a593Smuzhiyun static int dfu_flush_medium_nand(struct dfu_entity *dfu)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun int ret = 0;
144*4882a593Smuzhiyun u64 off;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /* in case of ubi partition, erase rest of the partition */
147*4882a593Smuzhiyun if (dfu->data.nand.ubi) {
148*4882a593Smuzhiyun struct mtd_info *mtd = get_nand_dev_by_index(nand_curr_device);
149*4882a593Smuzhiyun nand_erase_options_t opts;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun if (nand_curr_device < 0 ||
152*4882a593Smuzhiyun nand_curr_device >= CONFIG_SYS_MAX_NAND_DEVICE ||
153*4882a593Smuzhiyun !mtd) {
154*4882a593Smuzhiyun printf("%s: invalid nand device\n", __func__);
155*4882a593Smuzhiyun return -1;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun memset(&opts, 0, sizeof(opts));
159*4882a593Smuzhiyun off = dfu->offset;
160*4882a593Smuzhiyun if ((off & (mtd->erasesize - 1)) != 0) {
161*4882a593Smuzhiyun /*
162*4882a593Smuzhiyun * last write ended with unaligned length
163*4882a593Smuzhiyun * sector is erased, jump to next
164*4882a593Smuzhiyun */
165*4882a593Smuzhiyun off = off & ~((mtd->erasesize - 1));
166*4882a593Smuzhiyun off += mtd->erasesize;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun opts.offset = dfu->data.nand.start + off +
169*4882a593Smuzhiyun dfu->bad_skip;
170*4882a593Smuzhiyun opts.length = dfu->data.nand.start +
171*4882a593Smuzhiyun dfu->data.nand.size - opts.offset;
172*4882a593Smuzhiyun ret = nand_erase_opts(mtd, &opts);
173*4882a593Smuzhiyun if (ret != 0)
174*4882a593Smuzhiyun printf("Failure erase: %d\n", ret);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun return ret;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
dfu_polltimeout_nand(struct dfu_entity * dfu)180*4882a593Smuzhiyun unsigned int dfu_polltimeout_nand(struct dfu_entity *dfu)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun /*
183*4882a593Smuzhiyun * Currently, Poll Timeout != 0 is only needed on nand
184*4882a593Smuzhiyun * ubi partition, as the not used sectors need an erase
185*4882a593Smuzhiyun */
186*4882a593Smuzhiyun if (dfu->data.nand.ubi)
187*4882a593Smuzhiyun return DFU_MANIFEST_POLL_TIMEOUT;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun return DFU_DEFAULT_POLL_TIMEOUT;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
dfu_fill_entity_nand(struct dfu_entity * dfu,char * devstr,char * s)192*4882a593Smuzhiyun int dfu_fill_entity_nand(struct dfu_entity *dfu, char *devstr, char *s)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun char *st;
195*4882a593Smuzhiyun #ifndef CONFIG_SPL_BUILD
196*4882a593Smuzhiyun int ret, dev, part;
197*4882a593Smuzhiyun #endif
198*4882a593Smuzhiyun dfu->data.nand.ubi = 0;
199*4882a593Smuzhiyun dfu->dev_type = DFU_DEV_NAND;
200*4882a593Smuzhiyun st = strsep(&s, " ");
201*4882a593Smuzhiyun if (!strcmp(st, "raw")) {
202*4882a593Smuzhiyun dfu->layout = DFU_RAW_ADDR;
203*4882a593Smuzhiyun dfu->data.nand.start = simple_strtoul(s, &s, 16);
204*4882a593Smuzhiyun s++;
205*4882a593Smuzhiyun dfu->data.nand.size = simple_strtoul(s, &s, 16);
206*4882a593Smuzhiyun } else if ((!strcmp(st, "part")) || (!strcmp(st, "partubi"))) {
207*4882a593Smuzhiyun #ifndef CONFIG_SPL_BUILD
208*4882a593Smuzhiyun char mtd_id[32];
209*4882a593Smuzhiyun struct mtd_device *mtd_dev;
210*4882a593Smuzhiyun u8 part_num;
211*4882a593Smuzhiyun struct part_info *pi;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun dfu->layout = DFU_RAW_ADDR;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun dev = simple_strtoul(s, &s, 10);
216*4882a593Smuzhiyun s++;
217*4882a593Smuzhiyun part = simple_strtoul(s, &s, 10);
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun sprintf(mtd_id, "%s%d,%d", "nand", dev, part - 1);
220*4882a593Smuzhiyun printf("using id '%s'\n", mtd_id);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun mtdparts_init();
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun ret = find_dev_and_part(mtd_id, &mtd_dev, &part_num, &pi);
225*4882a593Smuzhiyun if (ret != 0) {
226*4882a593Smuzhiyun printf("Could not locate '%s'\n", mtd_id);
227*4882a593Smuzhiyun return -1;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun dfu->data.nand.start = pi->offset;
231*4882a593Smuzhiyun dfu->data.nand.size = pi->size;
232*4882a593Smuzhiyun if (!strcmp(st, "partubi"))
233*4882a593Smuzhiyun dfu->data.nand.ubi = 1;
234*4882a593Smuzhiyun #endif
235*4882a593Smuzhiyun } else {
236*4882a593Smuzhiyun printf("%s: Memory layout (%s) not supported!\n", __func__, st);
237*4882a593Smuzhiyun return -1;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun dfu->get_medium_size = dfu_get_medium_size_nand;
241*4882a593Smuzhiyun dfu->read_medium = dfu_read_medium_nand;
242*4882a593Smuzhiyun dfu->write_medium = dfu_write_medium_nand;
243*4882a593Smuzhiyun dfu->flush_medium = dfu_flush_medium_nand;
244*4882a593Smuzhiyun dfu->poll_timeout = dfu_polltimeout_nand;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /* initial state */
247*4882a593Smuzhiyun dfu->inited = 0;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun return 0;
250*4882a593Smuzhiyun }
251