1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * mtd vendor storage
4 */
5
6 #include <linux/debugfs.h>
7 #include <linux/delay.h>
8 #include <linux/fs.h>
9 #include <linux/kernel.h>
10 #include <linux/list.h>
11 #include <linux/miscdevice.h>
12 #include <linux/module.h>
13 #include <linux/mtd/mtd.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/soc/rockchip/rk_vendor_storage.h>
17 #include <linux/uaccess.h>
18 #include <linux/vmalloc.h>
19 #include <misc/rkflash_vendor_storage.h>
20
21 #define MTD_VENDOR_PART_START 0
22 #define MTD_VENDOR_PART_SIZE FLASH_VENDOR_PART_SIZE
23 #define MTD_VENDOR_NOR_BLOCK_SIZE 128
24 #define MTD_VENDOR_PART_NUM 1
25 #define MTD_VENDOR_TAG VENDOR_HEAD_TAG
26
27 struct mtd_nand_info {
28 u32 blk_offset;
29 u32 page_offset;
30 u32 version;
31 u32 ops_size;
32 };
33
34 #ifdef CONFIG_ROCKCHIP_VENDOR_STORAGE_UPDATE_LOADER
35 #define READ_SECTOR_IO _IOW('r', 0x04, unsigned int)
36 #define WRITE_SECTOR_IO _IOW('r', 0x05, unsigned int)
37 #define END_WRITE_SECTOR_IO _IOW('r', 0x52, unsigned int)
38 #define GET_FLASH_INFO_IO _IOW('r', 0x1A, unsigned int)
39 #define GET_BAD_BLOCK_IO _IOW('r', 0x03, unsigned int)
40 #define GET_LOCK_FLAG_IO _IOW('r', 0x53, unsigned int)
41 #endif
42
43 static u8 *g_idb_buffer;
44 static struct flash_vendor_info *g_vendor;
45 static DEFINE_MUTEX(vendor_ops_mutex);
46 static struct mtd_info *mtd;
47 static u32 mtd_erase_size;
48 static const char *vendor_mtd_name = "vnvm";
49 static struct mtd_nand_info nand_info;
50 static struct platform_device *g_pdev;
51
mtd_vendor_nand_write(void)52 static int mtd_vendor_nand_write(void)
53 {
54 size_t bytes_write;
55 int err, count = 0;
56 struct erase_info ei;
57
58 re_write:
59 if (nand_info.page_offset >= mtd_erase_size) {
60 nand_info.blk_offset += mtd_erase_size;
61 if (nand_info.blk_offset >= mtd->size)
62 nand_info.blk_offset = 0;
63 if (mtd_block_isbad(mtd, nand_info.blk_offset))
64 goto re_write;
65
66 memset(&ei, 0, sizeof(struct erase_info));
67 ei.addr = nand_info.blk_offset;
68 ei.len = mtd_erase_size;
69 if (mtd_erase(mtd, &ei))
70 goto re_write;
71
72 nand_info.page_offset = 0;
73 }
74
75 err = mtd_write(mtd, nand_info.blk_offset + nand_info.page_offset,
76 nand_info.ops_size, &bytes_write, (u8 *)g_vendor);
77 nand_info.page_offset += nand_info.ops_size;
78 if (err)
79 goto re_write;
80
81 count++;
82 /* write 2 copies for reliability */
83 if (count < 2)
84 goto re_write;
85
86 return 0;
87 }
88
mtd_vendor_storage_init(void)89 static int mtd_vendor_storage_init(void)
90 {
91 int err, offset;
92 size_t bytes_read;
93 struct erase_info ei;
94
95 mtd = get_mtd_device_nm(vendor_mtd_name);
96 if (IS_ERR(mtd))
97 return -EIO;
98
99 nand_info.page_offset = 0;
100 nand_info.blk_offset = 0;
101 nand_info.version = 0;
102 nand_info.ops_size = (sizeof(*g_vendor) + mtd->writesize - 1) / mtd->writesize;
103 nand_info.ops_size *= mtd->writesize;
104
105 /*
106 * The NOR FLASH erase size maybe config as 4KB, need to re-define
107 * and maintain consistency with uboot.
108 */
109 mtd_erase_size = mtd->erasesize;
110 if (mtd_erase_size <= MTD_VENDOR_NOR_BLOCK_SIZE * 512)
111 mtd_erase_size = MTD_VENDOR_NOR_BLOCK_SIZE * 512;
112
113 for (offset = 0; offset < mtd->size; offset += mtd_erase_size) {
114 if (!mtd_block_isbad(mtd, offset)) {
115 err = mtd_read(mtd, offset, sizeof(*g_vendor),
116 &bytes_read, (u8 *)g_vendor);
117 if (err && err != -EUCLEAN)
118 continue;
119 if (bytes_read == sizeof(*g_vendor) &&
120 g_vendor->tag == MTD_VENDOR_TAG &&
121 g_vendor->version == g_vendor->version2) {
122 if (g_vendor->version > nand_info.version) {
123 nand_info.version = g_vendor->version;
124 nand_info.blk_offset = offset;
125 }
126 }
127 } else if (nand_info.blk_offset == offset)
128 nand_info.blk_offset += mtd_erase_size;
129 }
130
131 if (nand_info.version) {
132 for (offset = mtd_erase_size - nand_info.ops_size;
133 offset >= 0;
134 offset -= nand_info.ops_size) {
135 err = mtd_read(mtd, nand_info.blk_offset + offset,
136 sizeof(*g_vendor),
137 &bytes_read,
138 (u8 *)g_vendor);
139
140 /* the page is not programmed */
141 if (!err && bytes_read == sizeof(*g_vendor) &&
142 g_vendor->tag == 0xFFFFFFFF &&
143 g_vendor->version == 0xFFFFFFFF &&
144 g_vendor->version2 == 0xFFFFFFFF)
145 continue;
146
147 /* point to the next free page */
148 if (nand_info.page_offset < offset)
149 nand_info.page_offset = offset + nand_info.ops_size;
150
151 /* ecc error or io error */
152 if (err && err != -EUCLEAN)
153 continue;
154
155 if (bytes_read == sizeof(*g_vendor) &&
156 g_vendor->tag == MTD_VENDOR_TAG &&
157 g_vendor->version == g_vendor->version2) {
158 if (nand_info.version > g_vendor->version)
159 g_vendor->version = nand_info.version;
160 else
161 nand_info.version = g_vendor->version;
162 break;
163 }
164 }
165 } else {
166 memset((u8 *)g_vendor, 0, sizeof(*g_vendor));
167 g_vendor->version = 1;
168 g_vendor->tag = MTD_VENDOR_TAG;
169 g_vendor->free_size = sizeof(g_vendor->data);
170 g_vendor->version2 = g_vendor->version;
171 for (offset = 0; offset < mtd->size; offset += mtd_erase_size) {
172 if (!mtd_block_isbad(mtd, offset)) {
173 memset(&ei, 0, sizeof(struct erase_info));
174 ei.addr = nand_info.blk_offset + offset;
175 ei.len = mtd_erase_size;
176 mtd_erase(mtd, &ei);
177 }
178 }
179 mtd_vendor_nand_write();
180 }
181
182 return 0;
183 }
184
mtd_vendor_read(u32 id,void * pbuf,u32 size)185 static int mtd_vendor_read(u32 id, void *pbuf, u32 size)
186 {
187 u32 i;
188
189 if (!g_vendor)
190 return -ENOMEM;
191
192 for (i = 0; i < g_vendor->item_num; i++) {
193 if (g_vendor->item[i].id == id) {
194 if (size > g_vendor->item[i].size)
195 size = g_vendor->item[i].size;
196 memcpy(pbuf,
197 &g_vendor->data[g_vendor->item[i].offset],
198 size);
199 return size;
200 }
201 }
202 return (-1);
203 }
204
mtd_vendor_write(u32 id,void * pbuf,u32 size)205 static int mtd_vendor_write(u32 id, void *pbuf, u32 size)
206 {
207 u32 i, j, align_size, alloc_size, item_num;
208 u32 offset, next_size;
209 u8 *p_data;
210 struct vendor_item *item;
211 struct vendor_item *next_item;
212
213 if (!g_vendor)
214 return -ENOMEM;
215
216 p_data = g_vendor->data;
217 item_num = g_vendor->item_num;
218 align_size = ALIGN(size, 0x40); /* align to 64 bytes*/
219 for (i = 0; i < item_num; i++) {
220 item = &g_vendor->item[i];
221 if (item->id == id) {
222 alloc_size = ALIGN(item->size, 0x40);
223 if (size > alloc_size) {
224 if (g_vendor->free_size < align_size)
225 return -1;
226 offset = item->offset;
227 for (j = i; j < item_num - 1; j++) {
228 item = &g_vendor->item[j];
229 next_item = &g_vendor->item[j + 1];
230 item->id = next_item->id;
231 item->size = next_item->size;
232 item->offset = offset;
233 next_size = ALIGN(next_item->size,
234 0x40);
235 memcpy(&p_data[offset],
236 &p_data[next_item->offset],
237 next_size);
238 offset += next_size;
239 }
240 item = &g_vendor->item[j];
241 item->id = id;
242 item->offset = offset;
243 item->size = size;
244 memcpy(&p_data[item->offset], pbuf, size);
245 g_vendor->free_offset = offset + align_size;
246 g_vendor->free_size = sizeof(g_vendor->data) - g_vendor->free_offset;
247 } else {
248 memcpy(&p_data[item->offset],
249 pbuf,
250 size);
251 g_vendor->item[i].size = size;
252 }
253 g_vendor->version++;
254 g_vendor->version2 = g_vendor->version;
255 mtd_vendor_nand_write();
256 return 0;
257 }
258 }
259
260 if (g_vendor->free_size >= align_size) {
261 item = &g_vendor->item[g_vendor->item_num];
262 item->id = id;
263 item->offset = g_vendor->free_offset;
264 item->size = size;
265 g_vendor->free_offset += align_size;
266 g_vendor->free_size -= align_size;
267 memcpy(&g_vendor->data[item->offset], pbuf, size);
268 g_vendor->item_num++;
269 g_vendor->version++;
270 g_vendor->version2 = g_vendor->version;
271 mtd_vendor_nand_write();
272 return 0;
273 }
274 return(-1);
275 }
276
vendor_storage_open(struct inode * inode,struct file * file)277 static int vendor_storage_open(struct inode *inode, struct file *file)
278 {
279 return 0;
280 }
281
vendor_storage_release(struct inode * inode,struct file * file)282 static int vendor_storage_release(struct inode *inode, struct file *file)
283 {
284 return 0;
285 }
286
vendor_storage_ioctl(struct file * file,unsigned int cmd,unsigned long arg)287 static long vendor_storage_ioctl(struct file *file, unsigned int cmd,
288 unsigned long arg)
289 {
290 long ret = -1;
291 int size;
292 struct RK_VENDOR_REQ *v_req;
293 u32 *page_buf;
294
295 page_buf = kmalloc(4096, GFP_KERNEL);
296 if (!page_buf)
297 return -ENOMEM;
298
299 mutex_lock(&vendor_ops_mutex);
300
301 v_req = (struct RK_VENDOR_REQ *)page_buf;
302
303 switch (cmd) {
304 case VENDOR_READ_IO:
305 {
306 if (copy_from_user(page_buf, (void __user *)arg, 8)) {
307 ret = -EFAULT;
308 break;
309 }
310 if (v_req->tag == VENDOR_REQ_TAG) {
311 size = mtd_vendor_read(v_req->id, v_req->data,
312 v_req->len);
313 if (size != -1) {
314 v_req->len = size;
315 ret = 0;
316 if (copy_to_user((void __user *)arg,
317 page_buf,
318 v_req->len + 8))
319 ret = -EFAULT;
320 }
321 }
322 } break;
323
324 case VENDOR_WRITE_IO:
325 {
326 if (copy_from_user(page_buf, (void __user *)arg, 8)) {
327 ret = -EFAULT;
328 break;
329 }
330 if (v_req->tag == VENDOR_REQ_TAG && (v_req->len < 4096 - 8)) {
331 if (copy_from_user(page_buf, (void __user *)arg,
332 v_req->len + 8)) {
333 ret = -EFAULT;
334 break;
335 }
336 ret = mtd_vendor_write(v_req->id,
337 v_req->data,
338 v_req->len);
339 }
340 } break;
341
342 default:
343 ret = -EINVAL;
344 goto exit;
345 }
346 exit:
347 mutex_unlock(&vendor_ops_mutex);
348 kfree(page_buf);
349 return ret;
350 }
351
352 static const struct file_operations vendor_storage_fops = {
353 .open = vendor_storage_open,
354 .compat_ioctl = vendor_storage_ioctl,
355 .unlocked_ioctl = vendor_storage_ioctl,
356 .release = vendor_storage_release,
357 };
358
359 static struct miscdevice vendor_storage_dev = {
360 .minor = MISC_DYNAMIC_MINOR,
361 .name = "vendor_storage",
362 .fops = &vendor_storage_fops,
363 };
364
vendor_storage_probe(struct platform_device * pdev)365 static int vendor_storage_probe(struct platform_device *pdev)
366 {
367 struct device *dev = &pdev->dev;
368 int ret;
369
370 mtd = get_mtd_device_nm(vendor_mtd_name);
371 if (IS_ERR(mtd))
372 return -EPROBE_DEFER;
373
374 g_vendor = devm_kmalloc(dev, sizeof(*g_vendor), GFP_KERNEL | GFP_DMA);
375 if (!g_vendor)
376 return -ENOMEM;
377
378 ret = mtd_vendor_storage_init();
379 if (ret) {
380 g_vendor = NULL;
381 return ret;
382 }
383
384 ret = misc_register(&vendor_storage_dev);
385 rk_vendor_register(mtd_vendor_read, mtd_vendor_write);
386
387 pr_err("mtd vendor storage:20200313 ret = %d\n", ret);
388
389 return ret;
390 }
391
vendor_storage_remove(struct platform_device * pdev)392 static int vendor_storage_remove(struct platform_device *pdev)
393 {
394 if (g_vendor) {
395 misc_deregister(&vendor_storage_dev);
396 g_vendor = NULL;
397 }
398
399 return 0;
400 }
401
402 static const struct platform_device_id vendor_storage_ids[] = {
403 { "mtd_vendor_storage", },
404 { }
405 };
406
407 static struct platform_driver vendor_storage_driver = {
408 .probe = vendor_storage_probe,
409 .remove = vendor_storage_remove,
410 .driver = {
411 .name = "mtd_vendor_storage",
412 },
413 .id_table = vendor_storage_ids,
414 };
415
vendor_storage_init(void)416 static int __init vendor_storage_init(void)
417 {
418 struct platform_device *pdev;
419 int ret;
420
421 g_idb_buffer = NULL;
422 ret = platform_driver_register(&vendor_storage_driver);
423 if (ret)
424 return ret;
425
426 pdev = platform_device_register_simple("mtd_vendor_storage",
427 -1, NULL, 0);
428 if (IS_ERR(pdev)) {
429 platform_driver_unregister(&vendor_storage_driver);
430 return PTR_ERR(pdev);
431 }
432 g_pdev = pdev;
433
434 return ret;
435 }
436
vendor_storage_deinit(void)437 static __exit void vendor_storage_deinit(void)
438 {
439 platform_device_unregister(g_pdev);
440 platform_driver_unregister(&vendor_storage_driver);
441 }
442
443 device_initcall_sync(vendor_storage_init);
444 module_exit(vendor_storage_deinit);
445 MODULE_LICENSE("GPL");
446