1b114abb6SLionel Debieve /*
2*f29c0702SLionel Debieve * Copyright (c) 2019-2022, STMicroelectronics - All Rights Reserved
3b114abb6SLionel Debieve *
4b114abb6SLionel Debieve * SPDX-License-Identifier: BSD-3-Clause
5b114abb6SLionel Debieve */
6b114abb6SLionel Debieve
7b114abb6SLionel Debieve #include <assert.h>
8b114abb6SLionel Debieve #include <errno.h>
9b114abb6SLionel Debieve #include <stddef.h>
10b114abb6SLionel Debieve
11b114abb6SLionel Debieve #include <common/debug.h>
12b114abb6SLionel Debieve #include <drivers/delay_timer.h>
13b114abb6SLionel Debieve #include <drivers/nand.h>
14b114abb6SLionel Debieve #include <lib/utils.h>
15b114abb6SLionel Debieve
16*f29c0702SLionel Debieve #include <platform_def.h>
17*f29c0702SLionel Debieve
18b114abb6SLionel Debieve /*
19b114abb6SLionel Debieve * Define a single nand_device used by specific NAND frameworks.
20b114abb6SLionel Debieve */
21b114abb6SLionel Debieve static struct nand_device nand_dev;
22*f29c0702SLionel Debieve
23*f29c0702SLionel Debieve #pragma weak plat_get_scratch_buffer
plat_get_scratch_buffer(void ** buffer_addr,size_t * buf_size)24*f29c0702SLionel Debieve void plat_get_scratch_buffer(void **buffer_addr, size_t *buf_size)
25*f29c0702SLionel Debieve {
26b114abb6SLionel Debieve static uint8_t scratch_buff[PLATFORM_MTD_MAX_PAGE_SIZE];
27b114abb6SLionel Debieve
28*f29c0702SLionel Debieve assert(buffer_addr != NULL);
29*f29c0702SLionel Debieve assert(buf_size != NULL);
30*f29c0702SLionel Debieve
31*f29c0702SLionel Debieve *buffer_addr = (void *)scratch_buff;
32*f29c0702SLionel Debieve *buf_size = sizeof(scratch_buff);
33*f29c0702SLionel Debieve }
34*f29c0702SLionel Debieve
nand_read(unsigned int offset,uintptr_t buffer,size_t length,size_t * length_read)35b114abb6SLionel Debieve int nand_read(unsigned int offset, uintptr_t buffer, size_t length,
36b114abb6SLionel Debieve size_t *length_read)
37b114abb6SLionel Debieve {
38b114abb6SLionel Debieve unsigned int block = offset / nand_dev.block_size;
39b114abb6SLionel Debieve unsigned int end_block = (offset + length - 1U) / nand_dev.block_size;
40b114abb6SLionel Debieve unsigned int page_start =
41b114abb6SLionel Debieve (offset % nand_dev.block_size) / nand_dev.page_size;
42b114abb6SLionel Debieve unsigned int nb_pages = nand_dev.block_size / nand_dev.page_size;
43b114abb6SLionel Debieve unsigned int start_offset = offset % nand_dev.page_size;
44b114abb6SLionel Debieve unsigned int page;
45b114abb6SLionel Debieve unsigned int bytes_read;
46b114abb6SLionel Debieve int is_bad;
47b114abb6SLionel Debieve int ret;
48*f29c0702SLionel Debieve uint8_t *scratch_buff;
49*f29c0702SLionel Debieve size_t scratch_buff_size;
50*f29c0702SLionel Debieve
51*f29c0702SLionel Debieve plat_get_scratch_buffer((void **)&scratch_buff, &scratch_buff_size);
52*f29c0702SLionel Debieve
53*f29c0702SLionel Debieve assert(scratch_buff != NULL);
54b114abb6SLionel Debieve
55b114abb6SLionel Debieve VERBOSE("Block %u - %u, page_start %u, nb %u, length %zu, offset %u\n",
56b114abb6SLionel Debieve block, end_block, page_start, nb_pages, length, offset);
57b114abb6SLionel Debieve
58b114abb6SLionel Debieve *length_read = 0UL;
59b114abb6SLionel Debieve
60b114abb6SLionel Debieve if (((start_offset != 0U) || (length % nand_dev.page_size) != 0U) &&
61*f29c0702SLionel Debieve (scratch_buff_size < nand_dev.page_size)) {
62b114abb6SLionel Debieve return -EINVAL;
63b114abb6SLionel Debieve }
64b114abb6SLionel Debieve
65b114abb6SLionel Debieve while (block <= end_block) {
66b114abb6SLionel Debieve is_bad = nand_dev.mtd_block_is_bad(block);
67b114abb6SLionel Debieve if (is_bad < 0) {
68b114abb6SLionel Debieve return is_bad;
69b114abb6SLionel Debieve }
70b114abb6SLionel Debieve
71b114abb6SLionel Debieve if (is_bad == 1) {
72b114abb6SLionel Debieve /* Skip the block */
73b114abb6SLionel Debieve uint32_t max_block =
74b114abb6SLionel Debieve nand_dev.size / nand_dev.block_size;
75b114abb6SLionel Debieve
76b114abb6SLionel Debieve block++;
77b114abb6SLionel Debieve end_block++;
78b114abb6SLionel Debieve if ((block < max_block) && (end_block < max_block)) {
79b114abb6SLionel Debieve continue;
80b114abb6SLionel Debieve }
81b114abb6SLionel Debieve
82b114abb6SLionel Debieve return -EIO;
83b114abb6SLionel Debieve }
84b114abb6SLionel Debieve
85b114abb6SLionel Debieve for (page = page_start; page < nb_pages; page++) {
86b114abb6SLionel Debieve if ((start_offset != 0U) ||
87b114abb6SLionel Debieve (length < nand_dev.page_size)) {
88b114abb6SLionel Debieve ret = nand_dev.mtd_read_page(
89b114abb6SLionel Debieve &nand_dev,
90b114abb6SLionel Debieve (block * nb_pages) + page,
91b114abb6SLionel Debieve (uintptr_t)scratch_buff);
92b114abb6SLionel Debieve if (ret != 0) {
93b114abb6SLionel Debieve return ret;
94b114abb6SLionel Debieve }
95b114abb6SLionel Debieve
96b114abb6SLionel Debieve bytes_read = MIN((size_t)(nand_dev.page_size -
97b114abb6SLionel Debieve start_offset),
98b114abb6SLionel Debieve length);
99b114abb6SLionel Debieve
100b114abb6SLionel Debieve memcpy((uint8_t *)buffer,
101b114abb6SLionel Debieve scratch_buff + start_offset,
102b114abb6SLionel Debieve bytes_read);
103b114abb6SLionel Debieve
104b114abb6SLionel Debieve start_offset = 0U;
105b114abb6SLionel Debieve } else {
106b114abb6SLionel Debieve ret = nand_dev.mtd_read_page(&nand_dev,
107b114abb6SLionel Debieve (block * nb_pages) + page,
108b114abb6SLionel Debieve buffer);
109b114abb6SLionel Debieve if (ret != 0) {
110b114abb6SLionel Debieve return ret;
111b114abb6SLionel Debieve }
112b114abb6SLionel Debieve
113b114abb6SLionel Debieve bytes_read = nand_dev.page_size;
114b114abb6SLionel Debieve }
115b114abb6SLionel Debieve
116b114abb6SLionel Debieve length -= bytes_read;
117b114abb6SLionel Debieve buffer += bytes_read;
118b114abb6SLionel Debieve *length_read += bytes_read;
119b114abb6SLionel Debieve
120b114abb6SLionel Debieve if (length == 0U) {
121b114abb6SLionel Debieve break;
122b114abb6SLionel Debieve }
123b114abb6SLionel Debieve }
124b114abb6SLionel Debieve
125b114abb6SLionel Debieve page_start = 0U;
126b114abb6SLionel Debieve block++;
127b114abb6SLionel Debieve }
128b114abb6SLionel Debieve
129b114abb6SLionel Debieve return 0;
130b114abb6SLionel Debieve }
131b114abb6SLionel Debieve
nand_seek_bb(uintptr_t base,unsigned int offset,size_t * extra_offset)132bc3eebb2SYann Gautier int nand_seek_bb(uintptr_t base, unsigned int offset, size_t *extra_offset)
133bc3eebb2SYann Gautier {
134bc3eebb2SYann Gautier unsigned int block;
135bc3eebb2SYann Gautier unsigned int offset_block;
136bc3eebb2SYann Gautier unsigned int max_block;
137bc3eebb2SYann Gautier int is_bad;
138bc3eebb2SYann Gautier size_t count_bb = 0U;
139bc3eebb2SYann Gautier
140bc3eebb2SYann Gautier block = base / nand_dev.block_size;
141bc3eebb2SYann Gautier
142bc3eebb2SYann Gautier if (offset != 0U) {
143bc3eebb2SYann Gautier offset_block = (base + offset - 1U) / nand_dev.block_size;
144bc3eebb2SYann Gautier } else {
145bc3eebb2SYann Gautier offset_block = block;
146bc3eebb2SYann Gautier }
147bc3eebb2SYann Gautier
148bc3eebb2SYann Gautier max_block = nand_dev.size / nand_dev.block_size;
149bc3eebb2SYann Gautier
150bc3eebb2SYann Gautier while (block <= offset_block) {
151bc3eebb2SYann Gautier if (offset_block >= max_block) {
152bc3eebb2SYann Gautier return -EIO;
153bc3eebb2SYann Gautier }
154bc3eebb2SYann Gautier
155bc3eebb2SYann Gautier is_bad = nand_dev.mtd_block_is_bad(block);
156bc3eebb2SYann Gautier if (is_bad < 0) {
157bc3eebb2SYann Gautier return is_bad;
158bc3eebb2SYann Gautier }
159bc3eebb2SYann Gautier
160bc3eebb2SYann Gautier if (is_bad == 1) {
161bc3eebb2SYann Gautier count_bb++;
162bc3eebb2SYann Gautier offset_block++;
163bc3eebb2SYann Gautier }
164bc3eebb2SYann Gautier
165bc3eebb2SYann Gautier block++;
166bc3eebb2SYann Gautier }
167bc3eebb2SYann Gautier
168bc3eebb2SYann Gautier *extra_offset = count_bb * nand_dev.block_size;
169bc3eebb2SYann Gautier
170bc3eebb2SYann Gautier return 0;
171bc3eebb2SYann Gautier }
172bc3eebb2SYann Gautier
get_nand_device(void)173b114abb6SLionel Debieve struct nand_device *get_nand_device(void)
174b114abb6SLionel Debieve {
175b114abb6SLionel Debieve return &nand_dev;
176b114abb6SLionel Debieve }
177