nand_spl_load_image(uint32_t offs,unsigned int size,void * dst)1*4882a593Smuzhiyun int nand_spl_load_image(uint32_t offs, unsigned int size, void *dst)
2*4882a593Smuzhiyun {
3*4882a593Smuzhiyun unsigned int block, lastblock;
4*4882a593Smuzhiyun unsigned int page, page_offset;
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun /* offs has to be aligned to a page address! */
7*4882a593Smuzhiyun block = offs / CONFIG_SYS_NAND_BLOCK_SIZE;
8*4882a593Smuzhiyun lastblock = (offs + size - 1) / CONFIG_SYS_NAND_BLOCK_SIZE;
9*4882a593Smuzhiyun page = (offs % CONFIG_SYS_NAND_BLOCK_SIZE) / CONFIG_SYS_NAND_PAGE_SIZE;
10*4882a593Smuzhiyun page_offset = offs % CONFIG_SYS_NAND_PAGE_SIZE;
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun while (block <= lastblock) {
13*4882a593Smuzhiyun if (!nand_is_bad_block(block)) {
14*4882a593Smuzhiyun /* Skip bad blocks */
15*4882a593Smuzhiyun while (page < CONFIG_SYS_NAND_PAGE_COUNT) {
16*4882a593Smuzhiyun nand_read_page(block, page, dst);
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun * When offs is not aligned to page address the
19*4882a593Smuzhiyun * extra offset is copied to dst as well. Copy
20*4882a593Smuzhiyun * the image such that its first byte will be
21*4882a593Smuzhiyun * at the dst.
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun if (unlikely(page_offset)) {
24*4882a593Smuzhiyun memmove(dst, dst + page_offset,
25*4882a593Smuzhiyun CONFIG_SYS_NAND_PAGE_SIZE);
26*4882a593Smuzhiyun dst = (void *)((int)dst - page_offset);
27*4882a593Smuzhiyun page_offset = 0;
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun dst += CONFIG_SYS_NAND_PAGE_SIZE;
30*4882a593Smuzhiyun page++;
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun page = 0;
34*4882a593Smuzhiyun } else {
35*4882a593Smuzhiyun lastblock++;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun block++;
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun return 0;
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #ifdef CONFIG_SPL_UBI
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun * Temporary storage for non NAND page aligned and non NAND page sized
47*4882a593Smuzhiyun * reads. Note: This does not support runtime detected FLASH yet, but
48*4882a593Smuzhiyun * that should be reasonably easy to fix by making the buffer large
49*4882a593Smuzhiyun * enough :)
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun static u8 scratch_buf[CONFIG_SYS_NAND_PAGE_SIZE];
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /**
54*4882a593Smuzhiyun * nand_spl_read_block - Read data from physical eraseblock into a buffer
55*4882a593Smuzhiyun * @block: Number of the physical eraseblock
56*4882a593Smuzhiyun * @offset: Data offset from the start of @peb
57*4882a593Smuzhiyun * @len: Data size to read
58*4882a593Smuzhiyun * @dst: Address of the destination buffer
59*4882a593Smuzhiyun *
60*4882a593Smuzhiyun * This could be further optimized if we'd have a subpage read
61*4882a593Smuzhiyun * function in the simple code. On NAND which allows subpage reads
62*4882a593Smuzhiyun * this would spare quite some time to readout e.g. the VID header of
63*4882a593Smuzhiyun * UBI.
64*4882a593Smuzhiyun *
65*4882a593Smuzhiyun * Notes:
66*4882a593Smuzhiyun * @offset + @len are not allowed to be larger than a physical
67*4882a593Smuzhiyun * erase block. No sanity check done for simplicity reasons.
68*4882a593Smuzhiyun *
69*4882a593Smuzhiyun * To support runtime detected flash this needs to be extended by
70*4882a593Smuzhiyun * information about the actual flash geometry, but thats beyond the
71*4882a593Smuzhiyun * scope of this effort and for most applications where fast boot is
72*4882a593Smuzhiyun * required it is not an issue anyway.
73*4882a593Smuzhiyun */
nand_spl_read_block(int block,int offset,int len,void * dst)74*4882a593Smuzhiyun int nand_spl_read_block(int block, int offset, int len, void *dst)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun int page, read;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /* Calculate the page number */
79*4882a593Smuzhiyun page = offset / CONFIG_SYS_NAND_PAGE_SIZE;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /* Offset to the start of a flash page */
82*4882a593Smuzhiyun offset = offset % CONFIG_SYS_NAND_PAGE_SIZE;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun while (len) {
85*4882a593Smuzhiyun /*
86*4882a593Smuzhiyun * Non page aligned reads go to the scratch buffer.
87*4882a593Smuzhiyun * Page aligned reads go directly to the destination.
88*4882a593Smuzhiyun */
89*4882a593Smuzhiyun if (offset || len < CONFIG_SYS_NAND_PAGE_SIZE) {
90*4882a593Smuzhiyun nand_read_page(block, page, scratch_buf);
91*4882a593Smuzhiyun read = min(len, CONFIG_SYS_NAND_PAGE_SIZE - offset);
92*4882a593Smuzhiyun memcpy(dst, scratch_buf + offset, read);
93*4882a593Smuzhiyun offset = 0;
94*4882a593Smuzhiyun } else {
95*4882a593Smuzhiyun nand_read_page(block, page, dst);
96*4882a593Smuzhiyun read = CONFIG_SYS_NAND_PAGE_SIZE;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun page++;
99*4882a593Smuzhiyun len -= read;
100*4882a593Smuzhiyun dst += read;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun return 0;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun #endif
105