xref: /rk3399_ARM-atf/drivers/cadence/nand/cdns_nand.c (revision e4d0622c7e9a259a28172a1dfcf5bc4f38ca8594)
1 /*
2  * Copyright (c) 2022-2023, Intel Corporation. All rights reserved.
3  * Copyright (c) 2025, Altera Corporation. All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  */
7 
8 #include <assert.h>
9 #include <errno.h>
10 #include <stdbool.h>
11 #include <string.h>
12 
13 #include <arch_helpers.h>
14 #include <common/debug.h>
15 #include <drivers/cadence/cdns_nand.h>
16 #include <drivers/delay_timer.h>
17 #include <lib/mmio.h>
18 #include <lib/utils.h>
19 #include <platform_def.h>
20 
21 /* NAND flash device information struct */
22 static cnf_dev_info_t dev_info;
23 
24 /*
25  * Scratch buffers for read and write operations
26  * DMA transfer of Cadence NAND expects data 8 bytes aligned
27  * to be written to register.
28  */
29 static uint8_t *scratch_buff = (uint8_t *)PLAT_NAND_SCRATCH_BUFF;
30 
31 /* Wait for controller to be in idle state */
32 static inline void cdns_nand_wait_idle(void)
33 {
34 	uint32_t reg = 0U;
35 
36 	do {
37 		udelay(CNF_DEF_DELAY_US);
38 		reg = mmio_read_32(CNF_CMDREG(CTRL_STATUS));
39 	} while (CNF_GET_CTRL_BUSY(reg) != 0U);
40 }
41 
42 /* Wait for given thread to be in ready state */
43 static inline void cdns_nand_wait_thread_ready(uint8_t thread_id)
44 {
45 	uint32_t reg = 0U;
46 
47 	do {
48 		udelay(CNF_DEF_DELAY_US);
49 		reg = mmio_read_32(CNF_CMDREG(TRD_STATUS));
50 		reg &= (1U << (uint32_t)thread_id);
51 	} while (reg != 0U);
52 }
53 
54 static inline uint32_t cdns_nand_get_thread_status(uint8_t thread_id)
55 {
56 	uint32_t status = 0U;
57 
58 	/* Select thread */
59 	mmio_write_32(CNF_CMDREG(CMD_STAT_PTR), (uint32_t)thread_id);
60 
61 	/* Get last command status. */
62 	status = mmio_read_32(CNF_CMDREG(CMD_STAT));
63 
64 	return status;
65 }
66 
67 /* Check if the last operation/command in selected thread is completed */
68 static int cdns_nand_last_opr_status(uint8_t thread_id)
69 {
70 	uint8_t nthreads = 0U;
71 	uint32_t reg = 0U;
72 
73 	/* Get number of threads */
74 	reg = mmio_read_32(CNF_CTRLPARAM(FEATURE));
75 	nthreads = CNF_GET_NTHREADS(reg);
76 
77 	if (thread_id > nthreads) {
78 		ERROR("%s: Invalid thread ID\n", __func__);
79 		return -EINVAL;
80 	}
81 
82 	/* Select thread */
83 	mmio_write_32(CNF_CMDREG(CMD_STAT_PTR), (uint32_t)thread_id);
84 
85 	uint32_t err_mask = CNF_ECMD | CNF_EECC | CNF_EDEV | CNF_EDQS | CNF_EFAIL |
86 				CNF_EBUS | CNF_EDI | CNF_EPAR | CNF_ECTX | CNF_EPRO;
87 
88 	do {
89 		udelay(CNF_DEF_DELAY_US * 2);
90 		reg = mmio_read_32(CNF_CMDREG(CMD_STAT));
91 	} while ((reg & CNF_CMPLT) == 0U);
92 
93 	/* last operation is completed, make sure no other error bits are set */
94 	if ((reg & err_mask) == 1U) {
95 		ERROR("%s, CMD_STATUS:0x%x\n", __func__, reg);
96 		return -EIO;
97 	}
98 
99 	return 0;
100 }
101 
102 /* Set feature command */
103 int cdns_nand_set_feature(uint8_t feat_addr, uint8_t feat_val, uint8_t thread_id)
104 {
105 	/* Wait for thread to be ready */
106 	cdns_nand_wait_thread_ready(thread_id);
107 
108 	/* Set feature address */
109 	mmio_write_32(CNF_CMDREG(CMD_REG1), (uint32_t)feat_addr);
110 	/* Set feature volume */
111 	mmio_write_32(CNF_CMDREG(CMD_REG2), (uint32_t)feat_val);
112 
113 	/* Set feature command */
114 	uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
115 
116 	reg |= (thread_id << CNF_CMDREG0_TRD);
117 	reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
118 	reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
119 	reg |= (CNF_CT_SET_FEATURE << CNF_CMDREG0_CMD);
120 	mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
121 
122 	return cdns_nand_last_opr_status(thread_id);
123 }
124 
125 /* Reset command to the selected device */
126 int cdns_nand_reset(uint8_t thread_id)
127 {
128 	/* Operation is executed in selected thread */
129 	cdns_nand_wait_thread_ready(thread_id);
130 
131 	/* Select memory */
132 	mmio_write_32(CNF_CMDREG(CMD_REG4),
133 			(CNF_DEF_DEVICE << CNF_CMDREG4_MEM));
134 
135 	/* Issue reset command */
136 	uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
137 
138 	reg |= (thread_id << CNF_CMDREG0_TRD);
139 	reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
140 	reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
141 	reg |= (CNF_CT_RESET_ASYNC << CNF_CMDREG0_CMD);
142 	mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
143 
144 	return cdns_nand_last_opr_status(thread_id);
145 }
146 
147 /* Set operation work mode */
148 static void cdns_nand_set_opr_mode(void)
149 {
150 	/* Wait for controller to be in idle state */
151 	cdns_nand_wait_idle();
152 	/* NAND mini controller settings for SDR mode and Combo PHY settings. */
153 	mmio_write_32(CNF_MINICTRL(ASYNC_TOGGLE_TIMINGS), CNF_ASYNC_TOGGLE_TIMINGS_VAL);
154 	mmio_write_32(CNF_MINICTRL(TIMINGS0), CNF_MINICTRL_TIMINGS0_VAL);
155 	mmio_write_32(CNF_MINICTRL(TIMINGS1), CNF_MINICTRL_TIMINGS1_VAL);
156 	mmio_write_32(CNF_MINICTRL(TIMINGS2), CNF_MINICTRL_TIMINGS2_VAL);
157 	mmio_write_32(CNF_MINICTRL(DLL_PHY_CTRL), CNF_DLL_PHY_CTRL_VAL);
158 	mmio_write_32(CP_CTB(CTRL_REG), CP_CTRL_REG_SDR);
159 	mmio_write_32(CP_CTB(TSEL_REG), CP_TSEL_REG_SDR);
160 	mmio_write_32(CP_DLL(DQ_TIMING_REG), CP_DQ_TIMING_REG_SDR);
161 	mmio_write_32(CP_DLL(DQS_TIMING_REG), CP_DQS_TIMING_REG_SDR);
162 	mmio_write_32(CP_DLL(GATE_LPBK_CTRL_REG), CP_GATE_LPBK_CTRL_REG_SDR);
163 	mmio_write_32(CP_DLL(MASTER_CTRL_REG), CP_DLL_MASTER_CTRL_REG_SDR);
164 	mmio_write_32(CP_DLL(SLAVE_CTRL_REG), CP_DLL_SLAVE_CTRL_REG_SDR);
165 
166 	/* Wait for controller to be in idle state */
167 	cdns_nand_wait_idle();
168 	/* Set operation work mode in common settings to SDR. */
169 	mmio_clrbits_32(CNF_MINICTRL(CMN_SETTINGS), (BIT(1) | BIT(0)));
170 }
171 
172 /* Data transfer configuration */
173 static void cdns_nand_transfer_config(void)
174 {
175 	/* Wait for controller to be in idle state */
176 	cdns_nand_wait_idle();
177 
178 	/* DMA burst select */
179 	mmio_write_32(CNF_CTRLCFG(DMA_SETTINGS),
180 			(CNF_DMA_BURST_SIZE_MAX << CNF_DMA_SETTINGS_BURST) |
181 			(1 << CNF_DMA_SETTINGS_OTE));
182 
183 	/* Enable pre-fetching for 1K */
184 	mmio_write_32(CNF_CTRLCFG(FIFO_TLEVEL),
185 			(CNF_DMA_PREFETCH_SIZE << CNF_FIFO_TLEVEL_POS) |
186 			(CNF_DMA_PREFETCH_SIZE << CNF_FIFO_TLEVEL_DMA_SIZE));
187 
188 	/* Disable cache and multi-plane operations. */
189 	mmio_write_32(CNF_CTRLCFG(MULTIPLANE_CFG), 0);
190 	mmio_write_32(CNF_CTRLCFG(CACHE_CFG), 0);
191 
192 	/* ECC engine configuration. */
193 	mmio_write_32(CNF_CTRLCFG(ECC_CFG0), CNF_ECC_CFG0_VAL);
194 
195 	/* Skip bytes details update - bytes, marker and offset. */
196 	mmio_write_32(CNF_MINICTRL(SKIP_BYTES_CFG), CNF_SKIP_BYTES_CFG_VAL);
197 	mmio_write_32(CNF_MINICTRL(SKIP_BYTES_OFFSET), CNF_SKIP_BYTES_OFFSET_VAL);
198 
199 	/* Transfer config - sector count, sector size, last sector size. */
200 	mmio_write_32(CNF_CTRLCFG(TRANS_CFG0), CNF_TRANS_CFG0_VAL);
201 	mmio_write_32(CNF_CTRLCFG(TRANS_CFG1), CNF_TRANS_CFG1_VAL);
202 
203 	/* Disable pre-fetching. */
204 	cdns_nand_wait_idle();
205 	mmio_write_32(CNF_CTRLCFG(FIFO_TLEVEL), 0);
206 }
207 
208 /* Update the nand flash device info */
209 static int cdns_nand_update_dev_info(void)
210 {
211 	uint32_t reg = 0U;
212 	static const char *const device_type[] = {
213 		"Unknown",
214 		"ONFI",
215 		"JEDEC/Toggle",
216 		"Legacy"
217 	};
218 
219 	NOTICE("CNF: NAND Flash Device details\n");
220 
221 	/* Get Manufacturer ID and Device ID. */
222 	reg = mmio_read_32(CNF_CTRLPARAM(MFR_ID));
223 	dev_info.mfr_id = FIELD_GET(CNF_MFR_ID_MASK, reg);
224 	dev_info.dev_id = FIELD_GET(CNF_DEV_ID_MASK, reg);
225 	INFO(" -- Manufacturer ID: 0x%02x\n", dev_info.mfr_id);
226 	INFO(" -- Device ID: 0x%02x\n", dev_info.dev_id);
227 
228 	/* Read the Device type and number of LUNs. */
229 	reg = mmio_read_32(CNF_CTRLPARAM(DEV_PARAMS0));
230 	dev_info.type = CNF_GET_DEV_TYPE(reg);
231 	NOTICE(" -- Device type '%s' detected\n", device_type[dev_info.type]);
232 	if (dev_info.type == CNF_DT_UNKNOWN) {
233 		ERROR("CNF: Device type is 'Unknown', exit\n");
234 		return -ENXIO;
235 	}
236 	dev_info.nluns = CNF_GET_NLUNS(reg);
237 
238 	/* Pages per block - number of pages in a block. */
239 	reg = mmio_read_32(CNF_CTRLCFG(DEV_LAYOUT));
240 	dev_info.npages_per_block = CNF_GET_NPAGES_PER_BLOCK(reg);
241 	INFO(" -- Pages per block: %d\n", dev_info.npages_per_block);
242 
243 	/* Sector size and last sector size */
244 	reg = mmio_read_32(CNF_CTRLCFG(TRANS_CFG1));
245 	dev_info.sector_size = CNF_GET_SCTR_SIZE(reg);
246 	dev_info.last_sector_size = CNF_GET_LAST_SCTR_SIZE(reg);
247 
248 	/* Page size and spare size */
249 	reg = mmio_read_32(CNF_CTRLPARAM(DEV_AREA));
250 	dev_info.page_size = CNF_GET_PAGE_SIZE(reg);
251 	dev_info.spare_size = CNF_GET_SPARE_SIZE(reg);
252 	INFO(" -- Page main area size: %d bytes\n", dev_info.page_size);
253 	INFO(" -- Page spare area size: %d bytes\n", dev_info.spare_size);
254 
255 	/* Device blocks per LUN */
256 	dev_info.nblocks_per_lun = mmio_read_32(CNF_CTRLPARAM(DEV_BLOCKS_PLUN));
257 	INFO(" -- Blocks per LUN: %d\n", dev_info.nblocks_per_lun);
258 
259 	/* Calculate block size and total device size */
260 	dev_info.block_size = (dev_info.npages_per_block * dev_info.page_size);
261 	INFO(" -- Block size: %d bytes\n", dev_info.block_size);
262 
263 	dev_info.total_size = ((unsigned long long)dev_info.block_size *
264 				(unsigned long long)dev_info.nblocks_per_lun *
265 				dev_info.nluns);
266 	NOTICE(" -- Total device size: %llu bytes\n", dev_info.total_size);
267 
268 	return 0;
269 }
270 
271 /* NAND Flash Controller/Host initialization */
272 int cdns_nand_host_init(void)
273 {
274 	uint32_t reg = 0U;
275 	int ret = 0;
276 	uint32_t timeout_count = (CNF_DD_INIT_COMP_US / CNF_DEF_DELAY_US);
277 
278 	INFO("CNF: Starting Device Discovery Process\n");
279 	do {
280 		/* Read controller status register for init complete */
281 		reg = mmio_read_32(CNF_CMDREG(CTRL_STATUS));
282 
283 		/* Verify the device INIT state, break if complete. */
284 		if (CNF_GET_INIT_COMP(reg))
285 			break;
286 
287 		udelay(CNF_DEF_DELAY_US);
288 	} while (--timeout_count != 0);
289 
290 	if (timeout_count == 0) {
291 		ERROR("CNF: Device Discovery Process timed out\n");
292 		return -ETIMEDOUT;
293 	}
294 
295 	INFO("CNF: Device Discovery Process is completed\n");
296 	ret = cdns_nand_update_dev_info();
297 	if (ret != 0) {
298 		return ret;
299 	}
300 
301 	/* Status polling mode, device control and status register. */
302 	cdns_nand_wait_idle();
303 	mmio_clrbits_32(CNF_CTRLCFG(RDST_CTRL_0), BIT(0));
304 
305 	/* Write protect. */
306 	cdns_nand_wait_idle();
307 	mmio_setbits_32(CNF_MINICTRL(WP_SETTINGS), BIT(0));
308 
309 	/* Set operation work mode */
310 	cdns_nand_set_opr_mode();
311 
312 	/* Set data transfer configuration parameters */
313 	cdns_nand_transfer_config();
314 
315 	return 0;
316 }
317 
318 /* erase: Block erase command */
319 int cdns_nand_erase(uint32_t offset, uint32_t size)
320 {
321 	/* Determine the starting block offset i.e row address */
322 	uint32_t row_address = dev_info.npages_per_block * offset;
323 
324 	/* Wait for thread to be in ready state */
325 	cdns_nand_wait_thread_ready(CNF_DEF_TRD);
326 
327 	/*Set row address */
328 	mmio_write_32(CNF_CMDREG(CMD_REG1), row_address);
329 
330 	/* Operation bank number */
331 	mmio_write_32(CNF_CMDREG(CMD_REG4), (CNF_DEF_DEVICE << CNF_CMDREG4_MEM));
332 
333 	/* Block erase command */
334 	uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
335 
336 	reg |= (CNF_DEF_TRD << CNF_CMDREG0_TRD);
337 	reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
338 	reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
339 	reg |= (CNF_CT_ERASE << CNF_CMDREG0_CMD);
340 	reg |= (((size-1) & 0xFF) << CNF_CMDREG0_CMD);
341 	mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
342 
343 	/* Wait for erase operation to complete */
344 	return cdns_nand_last_opr_status(CNF_DEF_TRD);
345 }
346 
347 /* io mtd functions */
348 int cdns_nand_init_mtd(unsigned long long *size, unsigned int *erase_size)
349 {
350 	*size = dev_info.total_size;
351 	*erase_size = dev_info.block_size;
352 
353 	return 0;
354 }
355 
356 static uint32_t cdns_nand_get_row_address(uint32_t page, uint32_t block)
357 {
358 	uint32_t row_address = 0U;
359 	uint32_t req_bits = 0U;
360 
361 	/* The device info is not populated yet. */
362 	if (dev_info.npages_per_block == 0U)
363 		return 0;
364 
365 	for (uint32_t i = 0U; i < sizeof(uint32_t) * 8; i++) {
366 		if ((1U << i) & dev_info.npages_per_block)
367 			req_bits = i;
368 	}
369 
370 	row_address = ((page & GENMASK_32((req_bits - 1), 0)) |
371 			(block << req_bits));
372 
373 	return row_address;
374 }
375 
376 /* NAND Flash page read */
377 static int cdns_nand_read_page(uint32_t block, uint32_t page, uintptr_t buffer)
378 {
379 
380 	/* Wait for thread to be ready */
381 	cdns_nand_wait_thread_ready(CNF_DEF_TRD);
382 
383 	/* Select device */
384 	mmio_write_32(CNF_CMDREG(CMD_REG4),
385 			(CNF_DEF_DEVICE << CNF_CMDREG4_MEM));
386 
387 	/* Set host memory address for DMA transfers */
388 	mmio_write_32(CNF_CMDREG(CMD_REG2), (buffer & UINT32_MAX));
389 	mmio_write_32(CNF_CMDREG(CMD_REG3), ((buffer >> 32) & UINT32_MAX));
390 
391 	/* Set row address */
392 	mmio_write_32(CNF_CMDREG(CMD_REG1),
393 			cdns_nand_get_row_address(page, block));
394 
395 	/* Page read command */
396 	uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
397 
398 	reg |= (CNF_DEF_TRD << CNF_CMDREG0_TRD);
399 	reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
400 	reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
401 	reg |= (CNF_DMA_MASTER_SEL << CNF_CMDREG0_DMA);
402 	reg |= (CNF_CT_PAGE_READ << CNF_CMDREG0_CMD);
403 	reg |= (((CNF_READ_SINGLE_PAGE - 1) & 0xFF) << CNF_CMDREG0_CMD);
404 	mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
405 
406 	/* Wait for read operation to complete */
407 	if (cdns_nand_last_opr_status(CNF_DEF_TRD)) {
408 		ERROR("%s: Page read failed\n", __func__);
409 		return -EIO;
410 	}
411 
412 	return 0;
413 }
414 
415 int cdns_nand_read(unsigned int offset, uintptr_t buffer, size_t length,
416 					size_t *out_length)
417 {
418 	uint32_t block = offset / dev_info.block_size;
419 	uint32_t end_block = (offset + length - 1U) / dev_info.block_size;
420 	uint32_t page_start = (offset % dev_info.block_size) / dev_info.page_size;
421 	uint32_t start_offset = offset % dev_info.page_size;
422 	uint32_t nb_pages = dev_info.block_size / dev_info.page_size;
423 	uint32_t bytes_read = 0U;
424 	uint32_t page = 0U;
425 	int result = 0;
426 
427 	INFO("CNF: %s: block %u-%u, page_start %u, len %zu, offset %u\n",
428 		__func__, block, end_block, page_start, length, offset);
429 
430 	if ((offset >= dev_info.total_size) ||
431 		(offset + length-1 >= dev_info.total_size) ||
432 		(length == 0U)) {
433 		ERROR("CNF: Invalid read parameters\n");
434 		return -EINVAL;
435 	}
436 
437 	*out_length = 0UL;
438 
439 	while (block <= end_block) {
440 		for (page = page_start; page < nb_pages; page++) {
441 			if ((start_offset != 0U) || (length < dev_info.page_size)) {
442 				/* Partial page read */
443 				result = cdns_nand_read_page(block, page,
444 							(uintptr_t)scratch_buff);
445 				if (result != 0) {
446 					return result;
447 				}
448 
449 				bytes_read = MIN((size_t)(dev_info.page_size - start_offset),
450 								length);
451 
452 				memcpy((uint8_t *)buffer, scratch_buff + start_offset,
453 						bytes_read);
454 				start_offset = 0U;
455 			} else {
456 				/* Full page read */
457 				result = cdns_nand_read_page(block, page,
458 				(uintptr_t)scratch_buff);
459 				if (result != 0) {
460 					return result;
461 				}
462 
463 				bytes_read = dev_info.page_size;
464 				memcpy((uint8_t *)buffer, scratch_buff, bytes_read);
465 			}
466 
467 			length -= bytes_read;
468 			buffer += bytes_read;
469 			*out_length += bytes_read;
470 
471 			/* All the bytes have read */
472 			if (length == 0U) {
473 				break;
474 			}
475 
476 			udelay(CNF_READ_INT_DELAY_US);
477 		} /* for */
478 
479 		page_start = 0U;
480 		block++;
481 	} /* while */
482 
483 	return 0;
484 }