1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (C) (C) Copyright 2016-2017 Rockchip Electronics Co., Ltd
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0+
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <common.h>
8*4882a593Smuzhiyun #include <asm/arch/vendor.h>
9*4882a593Smuzhiyun #include <dm.h>
10*4882a593Smuzhiyun #include <dm/device-internal.h>
11*4882a593Smuzhiyun #include <dm/lists.h>
12*4882a593Smuzhiyun #include <dm/root.h>
13*4882a593Smuzhiyun #include "rknand.h"
14*4882a593Smuzhiyun
rknand_get_blk_desc(struct rknand_dev * ndev)15*4882a593Smuzhiyun struct blk_desc *rknand_get_blk_desc(struct rknand_dev *ndev)
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun struct blk_desc *desc;
18*4882a593Smuzhiyun struct udevice *dev;
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun device_find_first_child(ndev->dev, &dev);
21*4882a593Smuzhiyun if (!dev)
22*4882a593Smuzhiyun return NULL;
23*4882a593Smuzhiyun desc = dev_get_uclass_platdata(dev);
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun return desc;
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun
rknand_bread(struct udevice * udev,lbaint_t start,lbaint_t blkcnt,void * dst)28*4882a593Smuzhiyun ulong rknand_bread(struct udevice *udev, lbaint_t start,
29*4882a593Smuzhiyun lbaint_t blkcnt, void *dst)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun struct blk_desc *block_dev = dev_get_uclass_platdata(udev);
32*4882a593Smuzhiyun struct rknand_dev *ndev = dev_get_priv(udev->parent);
33*4882a593Smuzhiyun int err;
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun if (blkcnt == 0)
36*4882a593Smuzhiyun return 0;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun if ((start + blkcnt) > block_dev->lba)
39*4882a593Smuzhiyun return 0;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun if (ndev->read == NULL)
42*4882a593Smuzhiyun return 0;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun err = ndev->read(0, (u32)start, (u32)blkcnt, dst);
45*4882a593Smuzhiyun if (err)
46*4882a593Smuzhiyun return err;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun return blkcnt;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
rknand_bwrite(struct udevice * udev,lbaint_t start,lbaint_t blkcnt,const void * src)51*4882a593Smuzhiyun ulong rknand_bwrite(struct udevice *udev, lbaint_t start,
52*4882a593Smuzhiyun lbaint_t blkcnt, const void *src)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun struct blk_desc *block_dev = dev_get_uclass_platdata(udev);
55*4882a593Smuzhiyun struct rknand_dev *ndev = dev_get_priv(udev->parent);
56*4882a593Smuzhiyun int err;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun if (blkcnt == 0)
59*4882a593Smuzhiyun return 0;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun if ((start + blkcnt) > block_dev->lba)
62*4882a593Smuzhiyun return 0;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun if (ndev->write == NULL)
65*4882a593Smuzhiyun return 0;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun err = ndev->write(0, (u32)start, (u32)blkcnt, src);
68*4882a593Smuzhiyun if (err)
69*4882a593Smuzhiyun return err;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun return blkcnt;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
rknand_berase(struct udevice * udev,lbaint_t start,lbaint_t blkcnt)74*4882a593Smuzhiyun ulong rknand_berase(struct udevice *udev, lbaint_t start,
75*4882a593Smuzhiyun lbaint_t blkcnt)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun struct blk_desc *block_dev = dev_get_uclass_platdata(udev);
78*4882a593Smuzhiyun struct rknand_dev *ndev = dev_get_priv(udev->parent);
79*4882a593Smuzhiyun int err;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun if (blkcnt == 0)
82*4882a593Smuzhiyun return 0;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun if ((start + blkcnt) > block_dev->lba)
85*4882a593Smuzhiyun return 0;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun if (ndev->erase == NULL)
88*4882a593Smuzhiyun return 0;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun err = ndev->erase(0, (u32)start, (u32)blkcnt);
91*4882a593Smuzhiyun if (err)
92*4882a593Smuzhiyun return err;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun return blkcnt;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
rkftl_nand_vendor_read(struct blk_desc * dev_desc,u32 index,u32 n_sec,void * p_data)97*4882a593Smuzhiyun int rkftl_nand_vendor_read(struct blk_desc *dev_desc,
98*4882a593Smuzhiyun u32 index,
99*4882a593Smuzhiyun u32 n_sec,
100*4882a593Smuzhiyun void *p_data)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun int ret;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun ret = ftl_vendor_read(index, n_sec, p_data);
105*4882a593Smuzhiyun if (!ret)
106*4882a593Smuzhiyun return n_sec;
107*4882a593Smuzhiyun else
108*4882a593Smuzhiyun return -EIO;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
rkftl_nand_vendor_write(struct blk_desc * dev_desc,u32 index,u32 n_sec,void * p_data)111*4882a593Smuzhiyun int rkftl_nand_vendor_write(struct blk_desc *dev_desc,
112*4882a593Smuzhiyun u32 index,
113*4882a593Smuzhiyun u32 n_sec,
114*4882a593Smuzhiyun void *p_data)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun int ret;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun ret = ftl_vendor_write(index, n_sec, p_data);
119*4882a593Smuzhiyun if (!ret)
120*4882a593Smuzhiyun return n_sec;
121*4882a593Smuzhiyun else
122*4882a593Smuzhiyun return -EIO;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
rknand_scan_namespace(void)125*4882a593Smuzhiyun int rknand_scan_namespace(void)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun struct uclass *uc;
128*4882a593Smuzhiyun struct udevice *dev;
129*4882a593Smuzhiyun int ret;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun ret = uclass_get(UCLASS_RKNAND, &uc);
132*4882a593Smuzhiyun if (ret)
133*4882a593Smuzhiyun return ret;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun uclass_foreach_dev(dev, uc) {
136*4882a593Smuzhiyun debug("%s %d %p\n", __func__, __LINE__, dev);
137*4882a593Smuzhiyun ret = device_probe(dev);
138*4882a593Smuzhiyun if (ret)
139*4882a593Smuzhiyun return ret;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun return 0;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
rknand_blk_bind(struct udevice * udev)145*4882a593Smuzhiyun static int rknand_blk_bind(struct udevice *udev)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun struct udevice *bdev;
148*4882a593Smuzhiyun int ret;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun ret = blk_create_devicef(udev, "rknand_blk", "blk",
151*4882a593Smuzhiyun IF_TYPE_RKNAND,
152*4882a593Smuzhiyun 0, 512, 0, &bdev);
153*4882a593Smuzhiyun if (ret) {
154*4882a593Smuzhiyun debug("Cannot create block device\n");
155*4882a593Smuzhiyun return ret;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun return 0;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
rknand_blk_probe(struct udevice * udev)161*4882a593Smuzhiyun static int rknand_blk_probe(struct udevice *udev)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun struct rknand_dev *ndev = dev_get_priv(udev->parent);
164*4882a593Smuzhiyun struct blk_desc *desc = dev_get_uclass_platdata(udev);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun debug("%s %d %p ndev = %p %p\n", __func__, __LINE__,
167*4882a593Smuzhiyun udev, ndev, udev->parent);
168*4882a593Smuzhiyun ndev->dev = udev;
169*4882a593Smuzhiyun desc->if_type = IF_TYPE_RKNAND;
170*4882a593Smuzhiyun desc->lba = ndev->density;
171*4882a593Smuzhiyun desc->log2blksz = 9;
172*4882a593Smuzhiyun desc->blksz = 512;
173*4882a593Smuzhiyun desc->bdev = udev;
174*4882a593Smuzhiyun desc->devnum = 0;
175*4882a593Smuzhiyun sprintf(desc->vendor, "0x%.4x", 0x2207);
176*4882a593Smuzhiyun memcpy(desc->product, "rknand", sizeof("rknand"));
177*4882a593Smuzhiyun memcpy(desc->revision, "V1.00", sizeof("V1.00"));
178*4882a593Smuzhiyun part_init(desc);
179*4882a593Smuzhiyun return 0;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
rockchip_nand_probe(struct udevice * udev)182*4882a593Smuzhiyun static int rockchip_nand_probe(struct udevice *udev)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun int ret;
185*4882a593Smuzhiyun struct rknand_dev *ndev = dev_get_priv(udev);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun ndev->ioaddr = dev_read_addr_ptr(udev);
188*4882a593Smuzhiyun ret = rk_ftl_init(ndev->ioaddr);
189*4882a593Smuzhiyun if (!ret) {
190*4882a593Smuzhiyun ndev->density = ftl_get_density(0);
191*4882a593Smuzhiyun ndev->read = ftl_read;
192*4882a593Smuzhiyun ndev->write = ftl_write;
193*4882a593Smuzhiyun ndev->erase = ftl_discard;
194*4882a593Smuzhiyun #ifndef CONFIG_SPL_BUILD
195*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_VENDOR_PARTITION
196*4882a593Smuzhiyun flash_vendor_dev_ops_register(rkftl_nand_vendor_read,
197*4882a593Smuzhiyun rkftl_nand_vendor_write);
198*4882a593Smuzhiyun #endif
199*4882a593Smuzhiyun #endif
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun return ret;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun static const struct blk_ops rknand_blk_ops = {
206*4882a593Smuzhiyun .read = rknand_bread,
207*4882a593Smuzhiyun .write = rknand_bwrite,
208*4882a593Smuzhiyun .erase = rknand_berase,
209*4882a593Smuzhiyun };
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun static const struct udevice_id rockchip_nand_ids[] = {
212*4882a593Smuzhiyun { .compatible = "rockchip,rk-nandc" },
213*4882a593Smuzhiyun { }
214*4882a593Smuzhiyun };
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun U_BOOT_DRIVER(rknand_blk) = {
217*4882a593Smuzhiyun .name = "rknand_blk",
218*4882a593Smuzhiyun .id = UCLASS_BLK,
219*4882a593Smuzhiyun .ops = &rknand_blk_ops,
220*4882a593Smuzhiyun .probe = rknand_blk_probe,
221*4882a593Smuzhiyun };
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun UCLASS_DRIVER(rknand) = {
224*4882a593Smuzhiyun .id = UCLASS_RKNAND,
225*4882a593Smuzhiyun .name = "rknand",
226*4882a593Smuzhiyun .flags = DM_UC_FLAG_SEQ_ALIAS,
227*4882a593Smuzhiyun .per_device_auto_alloc_size = sizeof(struct rknand_uclass_priv),
228*4882a593Smuzhiyun };
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun U_BOOT_DRIVER(rknand) = {
231*4882a593Smuzhiyun .name = "rknand",
232*4882a593Smuzhiyun .id = UCLASS_RKNAND,
233*4882a593Smuzhiyun .of_match = rockchip_nand_ids,
234*4882a593Smuzhiyun .bind = rknand_blk_bind,
235*4882a593Smuzhiyun .probe = rockchip_nand_probe,
236*4882a593Smuzhiyun .priv_auto_alloc_size = sizeof(struct rknand_dev),
237*4882a593Smuzhiyun };
238*4882a593Smuzhiyun
239