1*b114abb6SLionel Debieve /* 2*b114abb6SLionel Debieve * Copyright (c) 2019, STMicroelectronics - All Rights Reserved 3*b114abb6SLionel Debieve * 4*b114abb6SLionel Debieve * SPDX-License-Identifier: BSD-3-Clause 5*b114abb6SLionel Debieve */ 6*b114abb6SLionel Debieve 7*b114abb6SLionel Debieve #include <assert.h> 8*b114abb6SLionel Debieve #include <errno.h> 9*b114abb6SLionel Debieve #include <stddef.h> 10*b114abb6SLionel Debieve 11*b114abb6SLionel Debieve #include <platform_def.h> 12*b114abb6SLionel Debieve 13*b114abb6SLionel Debieve #include <common/debug.h> 14*b114abb6SLionel Debieve #include <drivers/delay_timer.h> 15*b114abb6SLionel Debieve #include <drivers/nand.h> 16*b114abb6SLionel Debieve #include <lib/utils.h> 17*b114abb6SLionel Debieve 18*b114abb6SLionel Debieve /* 19*b114abb6SLionel Debieve * Define a single nand_device used by specific NAND frameworks. 20*b114abb6SLionel Debieve */ 21*b114abb6SLionel Debieve static struct nand_device nand_dev; 22*b114abb6SLionel Debieve static uint8_t scratch_buff[PLATFORM_MTD_MAX_PAGE_SIZE]; 23*b114abb6SLionel Debieve 24*b114abb6SLionel Debieve int nand_read(unsigned int offset, uintptr_t buffer, size_t length, 25*b114abb6SLionel Debieve size_t *length_read) 26*b114abb6SLionel Debieve { 27*b114abb6SLionel Debieve unsigned int block = offset / nand_dev.block_size; 28*b114abb6SLionel Debieve unsigned int end_block = (offset + length - 1U) / nand_dev.block_size; 29*b114abb6SLionel Debieve unsigned int page_start = 30*b114abb6SLionel Debieve (offset % nand_dev.block_size) / nand_dev.page_size; 31*b114abb6SLionel Debieve unsigned int nb_pages = nand_dev.block_size / nand_dev.page_size; 32*b114abb6SLionel Debieve unsigned int start_offset = offset % nand_dev.page_size; 33*b114abb6SLionel Debieve unsigned int page; 34*b114abb6SLionel Debieve unsigned int bytes_read; 35*b114abb6SLionel Debieve int is_bad; 36*b114abb6SLionel Debieve int ret; 37*b114abb6SLionel Debieve 38*b114abb6SLionel Debieve VERBOSE("Block %u - %u, page_start %u, nb %u, length %zu, offset %u\n", 39*b114abb6SLionel Debieve block, end_block, page_start, nb_pages, length, offset); 40*b114abb6SLionel Debieve 41*b114abb6SLionel Debieve *length_read = 0UL; 42*b114abb6SLionel Debieve 43*b114abb6SLionel Debieve if (((start_offset != 0U) || (length % nand_dev.page_size) != 0U) && 44*b114abb6SLionel Debieve (sizeof(scratch_buff) < nand_dev.page_size)) { 45*b114abb6SLionel Debieve return -EINVAL; 46*b114abb6SLionel Debieve } 47*b114abb6SLionel Debieve 48*b114abb6SLionel Debieve while (block <= end_block) { 49*b114abb6SLionel Debieve is_bad = nand_dev.mtd_block_is_bad(block); 50*b114abb6SLionel Debieve if (is_bad < 0) { 51*b114abb6SLionel Debieve return is_bad; 52*b114abb6SLionel Debieve } 53*b114abb6SLionel Debieve 54*b114abb6SLionel Debieve if (is_bad == 1) { 55*b114abb6SLionel Debieve /* Skip the block */ 56*b114abb6SLionel Debieve uint32_t max_block = 57*b114abb6SLionel Debieve nand_dev.size / nand_dev.block_size; 58*b114abb6SLionel Debieve 59*b114abb6SLionel Debieve block++; 60*b114abb6SLionel Debieve end_block++; 61*b114abb6SLionel Debieve if ((block < max_block) && (end_block < max_block)) { 62*b114abb6SLionel Debieve continue; 63*b114abb6SLionel Debieve } 64*b114abb6SLionel Debieve 65*b114abb6SLionel Debieve return -EIO; 66*b114abb6SLionel Debieve } 67*b114abb6SLionel Debieve 68*b114abb6SLionel Debieve for (page = page_start; page < nb_pages; page++) { 69*b114abb6SLionel Debieve if ((start_offset != 0U) || 70*b114abb6SLionel Debieve (length < nand_dev.page_size)) { 71*b114abb6SLionel Debieve ret = nand_dev.mtd_read_page( 72*b114abb6SLionel Debieve &nand_dev, 73*b114abb6SLionel Debieve (block * nb_pages) + page, 74*b114abb6SLionel Debieve (uintptr_t)scratch_buff); 75*b114abb6SLionel Debieve if (ret != 0) { 76*b114abb6SLionel Debieve return ret; 77*b114abb6SLionel Debieve } 78*b114abb6SLionel Debieve 79*b114abb6SLionel Debieve bytes_read = MIN((size_t)(nand_dev.page_size - 80*b114abb6SLionel Debieve start_offset), 81*b114abb6SLionel Debieve length); 82*b114abb6SLionel Debieve 83*b114abb6SLionel Debieve memcpy((uint8_t *)buffer, 84*b114abb6SLionel Debieve scratch_buff + start_offset, 85*b114abb6SLionel Debieve bytes_read); 86*b114abb6SLionel Debieve 87*b114abb6SLionel Debieve start_offset = 0U; 88*b114abb6SLionel Debieve } else { 89*b114abb6SLionel Debieve ret = nand_dev.mtd_read_page(&nand_dev, 90*b114abb6SLionel Debieve (block * nb_pages) + page, 91*b114abb6SLionel Debieve buffer); 92*b114abb6SLionel Debieve if (ret != 0) { 93*b114abb6SLionel Debieve return ret; 94*b114abb6SLionel Debieve } 95*b114abb6SLionel Debieve 96*b114abb6SLionel Debieve bytes_read = nand_dev.page_size; 97*b114abb6SLionel Debieve } 98*b114abb6SLionel Debieve 99*b114abb6SLionel Debieve length -= bytes_read; 100*b114abb6SLionel Debieve buffer += bytes_read; 101*b114abb6SLionel Debieve *length_read += bytes_read; 102*b114abb6SLionel Debieve 103*b114abb6SLionel Debieve if (length == 0U) { 104*b114abb6SLionel Debieve break; 105*b114abb6SLionel Debieve } 106*b114abb6SLionel Debieve } 107*b114abb6SLionel Debieve 108*b114abb6SLionel Debieve page_start = 0U; 109*b114abb6SLionel Debieve block++; 110*b114abb6SLionel Debieve } 111*b114abb6SLionel Debieve 112*b114abb6SLionel Debieve return 0; 113*b114abb6SLionel Debieve } 114*b114abb6SLionel Debieve 115*b114abb6SLionel Debieve struct nand_device *get_nand_device(void) 116*b114abb6SLionel Debieve { 117*b114abb6SLionel Debieve return &nand_dev; 118*b114abb6SLionel Debieve } 119