1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 Rockchip Electronics Co., Ltd
4 *
5 * Parts derived from drivers/block/brd.c, copyright
6 * of their respective owners.
7 */
8
9 #include <linux/backing-dev.h>
10 #include <linux/module.h>
11 #include <linux/of_address.h>
12 #include <linux/platform_device.h>
13
14 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
15 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
16
17 struct rd_device {
18 struct request_queue *rd_queue;
19 struct gendisk *rd_disk;
20
21 struct device *dev;
22 phys_addr_t mem_addr;
23 size_t mem_size;
24 };
25
26 static int rd_major;
27
28 /*
29 * Look up and return a rd's page for a given sector.
30 */
rd_lookup_page(struct rd_device * rd,sector_t sector)31 static struct page *rd_lookup_page(struct rd_device *rd, sector_t sector)
32 {
33 pgoff_t idx;
34 struct page *page;
35
36 idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
37 page = phys_to_page(rd->mem_addr + (idx << PAGE_SHIFT));
38 BUG_ON(!page);
39
40 return page;
41 }
42
43 /*
44 * Copy n bytes from src to the rd starting at sector. Does not sleep.
45 */
copy_to_rd(struct rd_device * rd,const void * src,sector_t sector,size_t n)46 static void copy_to_rd(struct rd_device *rd, const void *src, sector_t sector, size_t n)
47 {
48 struct page *page;
49 void *dst;
50 unsigned int offset = (sector & (PAGE_SECTORS - 1)) << SECTOR_SHIFT;
51 size_t copy;
52
53 copy = min_t(size_t, n, PAGE_SIZE - offset);
54 page = rd_lookup_page(rd, sector);
55 BUG_ON(!page);
56
57 dst = kmap_atomic(page);
58 memcpy(dst + offset, src, copy);
59 kunmap_atomic(dst);
60
61 if (copy < n) {
62 src += copy;
63 sector += copy >> SECTOR_SHIFT;
64 copy = n - copy;
65 page = rd_lookup_page(rd, sector);
66 BUG_ON(!page);
67
68 dst = kmap_atomic(page);
69 memcpy(dst, src, copy);
70 kunmap_atomic(dst);
71 }
72 }
73
74 /*
75 * Copy n bytes to dst from the rd starting at sector. Does not sleep.
76 */
copy_from_rd(void * dst,struct rd_device * rd,sector_t sector,size_t n)77 static void copy_from_rd(void *dst, struct rd_device *rd, sector_t sector, size_t n)
78 {
79 struct page *page;
80 void *src;
81 unsigned int offset = (sector & (PAGE_SECTORS - 1)) << SECTOR_SHIFT;
82 size_t copy;
83
84 copy = min_t(size_t, n, PAGE_SIZE - offset);
85 page = rd_lookup_page(rd, sector);
86 if (page) {
87 src = kmap_atomic(page);
88 memcpy(dst, src + offset, copy);
89 kunmap_atomic(src);
90 } else {
91 memset(dst, 0, copy);
92 }
93
94 if (copy < n) {
95 dst += copy;
96 sector += copy >> SECTOR_SHIFT;
97 copy = n - copy;
98 page = rd_lookup_page(rd, sector);
99 if (page) {
100 src = kmap_atomic(page);
101 memcpy(dst, src, copy);
102 kunmap_atomic(src);
103 } else {
104 memset(dst, 0, copy);
105 }
106 }
107 }
108
109 /*
110 * Process a single bvec of a bio.
111 */
rd_do_bvec(struct rd_device * rd,struct page * page,unsigned int len,unsigned int off,unsigned int op,sector_t sector)112 static int rd_do_bvec(struct rd_device *rd, struct page *page, unsigned int len, unsigned int off, unsigned int op,
113 sector_t sector)
114 {
115 void *mem;
116
117 mem = kmap_atomic(page);
118 if (!op_is_write(op)) {
119 copy_from_rd(mem + off, rd, sector, len);
120 flush_dcache_page(page);
121 } else {
122 flush_dcache_page(page);
123 copy_to_rd(rd, mem + off, sector, len);
124 }
125 kunmap_atomic(mem);
126
127 return 0;
128 }
129
rd_make_request(struct request_queue * q,struct bio * bio)130 static blk_qc_t rd_make_request(struct request_queue *q, struct bio *bio)
131 {
132 struct rd_device *rd = bio->bi_disk->private_data;
133 struct bio_vec bvec;
134 sector_t sector;
135 struct bvec_iter iter;
136
137 sector = bio->bi_iter.bi_sector;
138 if (bio_end_sector(bio) > get_capacity(bio->bi_disk)) {
139 goto io_error;
140 }
141
142 bio_for_each_segment(bvec, bio, iter)
143 {
144 unsigned int len = bvec.bv_len;
145 int err;
146
147 err = rd_do_bvec(rd, bvec.bv_page, len, bvec.bv_offset, bio_op(bio), sector);
148 if (err) {
149 goto io_error;
150 }
151 sector += len >> SECTOR_SHIFT;
152 }
153
154 bio_endio(bio);
155 return BLK_QC_T_NONE;
156 io_error:
157 bio_io_error(bio);
158 return BLK_QC_T_NONE;
159 }
160
rd_rw_page(struct block_device * bdev,sector_t sector,struct page * page,unsigned int op)161 static int rd_rw_page(struct block_device *bdev, sector_t sector, struct page *page, unsigned int op)
162 {
163 struct rd_device *rd = bdev->bd_disk->private_data;
164 int err;
165
166 if (PageTransHuge(page)) {
167 return -ENOTSUPP;
168 }
169 err = rd_do_bvec(rd, page, PAGE_SIZE, 0, op, sector);
170 page_endio(page, op_is_write(op), err);
171 return err;
172 }
173
174 static const struct block_device_operations rd_fops = {
175 .owner = THIS_MODULE,
176 .rw_page = rd_rw_page,
177 };
178
rd_init(struct rd_device * rd,int major,int minor)179 static int rd_init(struct rd_device *rd, int major, int minor)
180 {
181 struct gendisk *disk;
182
183 rd->rd_queue = blk_alloc_queue(GFP_KERNEL);
184 if (!rd->rd_queue) {
185 return -ENOMEM;
186 }
187
188 blk_queue_make_request(rd->rd_queue, rd_make_request);
189 blk_queue_max_hw_sectors(rd->rd_queue, 0x400);
190
191 /* This is so fdisk will align partitions on 4k, because of
192 * direct_access API needing 4k alignment, returning a PFN
193 * (This is only a problem on very small devices <= 4M,
194 * otherwise fdisk will align on 1M. Regardless this call
195 * is harmless)
196 */
197 blk_queue_physical_block_size(rd->rd_queue, PAGE_SIZE);
198 disk = alloc_disk(1);
199 if (!disk) {
200 goto out_free_queue;
201 }
202 disk->major = major;
203 disk->first_minor = 0;
204 disk->fops = &rd_fops;
205 disk->private_data = rd;
206 disk->flags = GENHD_FL_EXT_DEVT;
207 sprintf(disk->disk_name, "rd%d", minor);
208 set_capacity(disk, rd->mem_size >> SECTOR_SHIFT);
209 rd->rd_disk = disk;
210 rd->rd_queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
211
212 /* Tell the block layer that this is not a rotational device */
213 blk_queue_flag_set(QUEUE_FLAG_NONROT, rd->rd_queue);
214 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rd->rd_queue);
215
216 rd->rd_disk->queue = rd->rd_queue;
217 add_disk(rd->rd_disk);
218
219 return 0;
220
221 out_free_queue:
222 blk_cleanup_queue(rd->rd_queue);
223 return -ENOMEM;
224 }
225
rd_probe(struct platform_device * pdev)226 static int rd_probe(struct platform_device *pdev)
227 {
228 struct rd_device *rd;
229 struct device *dev = &pdev->dev;
230 struct device_node *node;
231 struct resource reg;
232 int ret;
233
234 rd = devm_kzalloc(dev, sizeof(*rd), GFP_KERNEL);
235 if (!rd) {
236 return -ENOMEM;
237 }
238
239 rd->dev = dev;
240 node = of_parse_phandle(dev->of_node, "memory-region", 0);
241 if (!node) {
242 dev_err(dev, "missing \"memory-region\" property\n");
243 return -ENODEV;
244 }
245
246 ret = of_address_to_resource(node, 0, ®);
247 of_node_put(node);
248 if (ret) {
249 dev_err(dev, "missing \"reg\" property\n");
250 return -ENODEV;
251 }
252
253 rd->mem_addr = reg.start;
254 rd->mem_size = resource_size(®);
255
256 ret = rd_init(rd, rd_major, 0);
257
258 return ret;
259 }
260
261 static const struct of_device_id rd_dt_match[] = {
262 {.compatible = "rockchip,ramdisk"},
263 {},
264 };
265
266 static struct platform_driver rd_driver = {
267 .driver =
268 {
269 .name = "rd",
270 .of_match_table = rd_dt_match,
271 },
272 .probe = rd_probe,
273 };
274
rd_driver_init(void)275 static int __init rd_driver_init(void)
276 {
277 int ret;
278
279 ret = register_blkdev(0, "rd");
280 if (ret < 0) {
281 return ret;
282 }
283 rd_major = ret;
284
285 return platform_driver_register(&rd_driver);
286 }
287 subsys_initcall_sync(rd_driver_init);
288
289 MODULE_LICENSE("GPL");
290