1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 Rockchip Electronics Co., Ltd
4 *
5 * Parts derived from drivers/block/brd.c, copyright
6 * of their respective owners.
7 */
8
9 #include <linux/backing-dev.h>
10 #include <linux/dax.h>
11 #include <linux/module.h>
12 #include <linux/of_address.h>
13 #include <linux/pfn_t.h>
14 #include <linux/platform_device.h>
15 #include <linux/uio.h>
16
17 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
18 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
19
20 struct rd_device {
21 struct request_queue *rd_queue;
22 struct gendisk *rd_disk;
23
24 struct device *dev;
25 phys_addr_t mem_addr;
26 size_t mem_size;
27 size_t mem_pages;
28 void *mem_kaddr;
29 struct dax_device *dax_dev;
30 };
31
32 static int rd_major;
33
34 /*
35 * Look up and return a rd's page for a given sector.
36 */
rd_lookup_page(struct rd_device * rd,sector_t sector)37 static struct page *rd_lookup_page(struct rd_device *rd, sector_t sector)
38 {
39 pgoff_t idx;
40 struct page *page;
41
42 idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
43 page = phys_to_page(rd->mem_addr + (idx << PAGE_SHIFT));
44 BUG_ON(!page);
45
46 return page;
47 }
48
49 /*
50 * Copy n bytes from src to the rd starting at sector. Does not sleep.
51 */
copy_to_rd(struct rd_device * rd,const void * src,sector_t sector,size_t n)52 static void copy_to_rd(struct rd_device *rd, const void *src,
53 sector_t sector, size_t n)
54 {
55 struct page *page;
56 void *dst;
57 unsigned int offset = (sector & (PAGE_SECTORS - 1)) << SECTOR_SHIFT;
58 size_t copy;
59
60 copy = min_t(size_t, n, PAGE_SIZE - offset);
61 page = rd_lookup_page(rd, sector);
62 BUG_ON(!page);
63
64 dst = kmap_atomic(page);
65 memcpy(dst + offset, src, copy);
66 kunmap_atomic(dst);
67
68 if (copy < n) {
69 src += copy;
70 sector += copy >> SECTOR_SHIFT;
71 copy = n - copy;
72 page = rd_lookup_page(rd, sector);
73 BUG_ON(!page);
74
75 dst = kmap_atomic(page);
76 memcpy(dst, src, copy);
77 kunmap_atomic(dst);
78 }
79 }
80
81 /*
82 * Copy n bytes to dst from the rd starting at sector. Does not sleep.
83 */
copy_from_rd(void * dst,struct rd_device * rd,sector_t sector,size_t n)84 static void copy_from_rd(void *dst, struct rd_device *rd,
85 sector_t sector, size_t n)
86 {
87 struct page *page;
88 void *src;
89 unsigned int offset = (sector & (PAGE_SECTORS - 1)) << SECTOR_SHIFT;
90 size_t copy;
91
92 copy = min_t(size_t, n, PAGE_SIZE - offset);
93 page = rd_lookup_page(rd, sector);
94 if (page) {
95 src = kmap_atomic(page);
96 memcpy(dst, src + offset, copy);
97 kunmap_atomic(src);
98 } else {
99 memset(dst, 0, copy);
100 }
101
102 if (copy < n) {
103 dst += copy;
104 sector += copy >> SECTOR_SHIFT;
105 copy = n - copy;
106 page = rd_lookup_page(rd, sector);
107 if (page) {
108 src = kmap_atomic(page);
109 memcpy(dst, src, copy);
110 kunmap_atomic(src);
111 } else {
112 memset(dst, 0, copy);
113 }
114 }
115 }
116
117 /*
118 * Process a single bvec of a bio.
119 */
rd_do_bvec(struct rd_device * rd,struct page * page,unsigned int len,unsigned int off,unsigned int op,sector_t sector)120 static int rd_do_bvec(struct rd_device *rd, struct page *page,
121 unsigned int len, unsigned int off, unsigned int op,
122 sector_t sector)
123 {
124 void *mem;
125
126 mem = kmap_atomic(page);
127 if (!op_is_write(op)) {
128 copy_from_rd(mem + off, rd, sector, len);
129 flush_dcache_page(page);
130 } else {
131 flush_dcache_page(page);
132 copy_to_rd(rd, mem + off, sector, len);
133 }
134 kunmap_atomic(mem);
135
136 return 0;
137 }
138
rd_submit_bio(struct bio * bio)139 static blk_qc_t rd_submit_bio(struct bio *bio)
140 {
141 struct rd_device *rd = bio->bi_disk->private_data;
142 struct bio_vec bvec;
143 sector_t sector;
144 struct bvec_iter iter;
145
146 sector = bio->bi_iter.bi_sector;
147 if (bio_end_sector(bio) > get_capacity(bio->bi_disk))
148 goto io_error;
149
150 bio_for_each_segment(bvec, bio, iter) {
151 unsigned int len = bvec.bv_len;
152 int err;
153
154 /* Don't support un-aligned buffer */
155 WARN_ON_ONCE((bvec.bv_offset & (SECTOR_SIZE - 1)) ||
156 (len & (SECTOR_SIZE - 1)));
157
158 err = rd_do_bvec(rd, bvec.bv_page, len, bvec.bv_offset,
159 bio_op(bio), sector);
160 if (err)
161 goto io_error;
162 sector += len >> SECTOR_SHIFT;
163 }
164
165 bio_endio(bio);
166 return BLK_QC_T_NONE;
167 io_error:
168 bio_io_error(bio);
169 return BLK_QC_T_NONE;
170 }
171
rd_rw_page(struct block_device * bdev,sector_t sector,struct page * page,unsigned int op)172 static int rd_rw_page(struct block_device *bdev, sector_t sector,
173 struct page *page, unsigned int op)
174 {
175 struct rd_device *rd = bdev->bd_disk->private_data;
176 int err;
177
178 if (PageTransHuge(page))
179 return -ENOTSUPP;
180 err = rd_do_bvec(rd, page, PAGE_SIZE, 0, op, sector);
181 page_endio(page, op_is_write(op), err);
182 return err;
183 }
184
185 static const struct block_device_operations rd_fops = {
186 .owner = THIS_MODULE,
187 .submit_bio = rd_submit_bio,
188 .rw_page = rd_rw_page,
189 };
190
rd_dax_direct_access(struct dax_device * dax_dev,pgoff_t pgoff,long nr_pages,void ** kaddr,pfn_t * pfn)191 static long rd_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
192 long nr_pages, void **kaddr, pfn_t *pfn)
193 {
194 struct rd_device *rd = dax_get_private(dax_dev);
195
196 phys_addr_t offset = PFN_PHYS(pgoff);
197 size_t max_nr_pages = rd->mem_pages - pgoff;
198
199 if (kaddr)
200 *kaddr = rd->mem_kaddr + offset;
201 if (pfn)
202 *pfn = phys_to_pfn_t(rd->mem_addr + offset, PFN_DEV | PFN_MAP);
203
204 return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
205 }
206
rd_dax_supported(struct dax_device * dax_dev,struct block_device * bdev,int blocksize,sector_t start,sector_t sectors)207 static bool rd_dax_supported(struct dax_device *dax_dev,
208 struct block_device *bdev, int blocksize,
209 sector_t start, sector_t sectors)
210 {
211 return true;
212 }
213
rd_dax_copy_from_iter(struct dax_device * dax_dev,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)214 static size_t rd_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
215 void *addr, size_t bytes, struct iov_iter *i)
216 {
217 return copy_from_iter(addr, bytes, i);
218 }
219
rd_dax_copy_to_iter(struct dax_device * dax_dev,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)220 static size_t rd_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
221 void *addr, size_t bytes, struct iov_iter *i)
222 {
223 return copy_to_iter(addr, bytes, i);
224 }
225
rd_dax_zero_page_range(struct dax_device * dax_dev,pgoff_t pgoff,size_t nr_pages)226 static int rd_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, size_t nr_pages)
227 {
228 long rc;
229 void *kaddr;
230
231 rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL);
232 if (rc < 0)
233 return rc;
234 memset(kaddr, 0, nr_pages << PAGE_SHIFT);
235
236 return 0;
237 }
238
239 static const struct dax_operations rd_dax_ops = {
240 .direct_access = rd_dax_direct_access,
241 .dax_supported = rd_dax_supported,
242 .copy_from_iter = rd_dax_copy_from_iter,
243 .copy_to_iter = rd_dax_copy_to_iter,
244 .zero_page_range = rd_dax_zero_page_range,
245 };
246
rd_init(struct rd_device * rd,int major,int minor)247 static int rd_init(struct rd_device *rd, int major, int minor)
248 {
249 int ret;
250 struct gendisk *disk;
251
252 rd->rd_queue = blk_alloc_queue(NUMA_NO_NODE);
253 if (!rd->rd_queue)
254 return -ENOMEM;
255
256 /* This is so fdisk will align partitions on 4k, because of
257 * direct_access API needing 4k alignment, returning a PFN
258 * (This is only a problem on very small devices <= 4M,
259 * otherwise fdisk will align on 1M. Regardless this call
260 * is harmless)
261 */
262 blk_queue_physical_block_size(rd->rd_queue, PAGE_SIZE);
263 disk = alloc_disk(1);
264 if (!disk) {
265 ret = -ENOMEM;
266 goto out_free_queue;
267 }
268 disk->major = major;
269 disk->first_minor = 0;
270 disk->fops = &rd_fops;
271 disk->private_data = rd;
272 disk->flags = GENHD_FL_EXT_DEVT;
273 sprintf(disk->disk_name, "rd%d", minor);
274 set_capacity(disk, rd->mem_size >> SECTOR_SHIFT);
275 rd->rd_disk = disk;
276
277 rd->mem_kaddr = phys_to_virt(rd->mem_addr);
278 rd->mem_pages = PHYS_PFN(rd->mem_size);
279 rd->dax_dev = alloc_dax(rd, disk->disk_name, &rd_dax_ops, DAXDEV_F_SYNC);
280 if (IS_ERR(rd->dax_dev)) {
281 ret = PTR_ERR(rd->dax_dev);
282 dev_err(rd->dev, "alloc_dax failed %d\n", ret);
283 rd->dax_dev = NULL;
284 goto out_free_queue;
285 }
286
287 /* Tell the block layer that this is not a rotational device */
288 blk_queue_flag_set(QUEUE_FLAG_NONROT, rd->rd_queue);
289 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rd->rd_queue);
290 if (rd->dax_dev)
291 blk_queue_flag_set(QUEUE_FLAG_DAX, rd->rd_queue);
292
293 rd->rd_disk->queue = rd->rd_queue;
294 add_disk(rd->rd_disk);
295
296 return 0;
297
298 out_free_queue:
299 blk_cleanup_queue(rd->rd_queue);
300 return ret;
301 }
302
rd_probe(struct platform_device * pdev)303 static int rd_probe(struct platform_device *pdev)
304 {
305 struct rd_device *rd;
306 struct device *dev = &pdev->dev;
307 struct device_node *node;
308 struct resource reg;
309 int ret;
310
311 rd = devm_kzalloc(dev, sizeof(*rd), GFP_KERNEL);
312 if (!rd)
313 return -ENOMEM;
314
315 rd->dev = dev;
316 node = of_parse_phandle(dev->of_node, "memory-region", 0);
317 if (!node) {
318 dev_err(dev, "missing \"memory-region\" property\n");
319 return -ENODEV;
320 }
321
322 ret = of_address_to_resource(node, 0, ®);
323 of_node_put(node);
324 if (ret) {
325 dev_err(dev, "missing \"reg\" property\n");
326 return -ENODEV;
327 }
328
329 rd->mem_addr = reg.start;
330 rd->mem_size = resource_size(®);
331
332 ret = rd_init(rd, rd_major, 0);
333 dev_info(dev, "0x%zx@%pa -> 0x%px dax:%d ret:%d\n",
334 rd->mem_size, &rd->mem_addr, rd->mem_kaddr, (bool)rd->dax_dev, ret);
335
336 return ret;
337 }
338
339 static const struct of_device_id rd_dt_match[] = {
340 { .compatible = "rockchip,ramdisk" },
341 {},
342 };
343
344 static struct platform_driver rd_driver = {
345 .driver = {
346 .name = "rd",
347 .of_match_table = rd_dt_match,
348 },
349 .probe = rd_probe,
350 };
351
rd_driver_init(void)352 static int __init rd_driver_init(void)
353 {
354 int ret;
355
356 ret = register_blkdev(0, "rd");
357 if (ret < 0)
358 return ret;
359 rd_major = ret;
360
361 return platform_driver_register(&rd_driver);
362 }
363 subsys_initcall_sync(rd_driver_init);
364
365 MODULE_LICENSE("GPL");
366