1 /*
2 * Copyright (C) 2001-2003 Sistina Software (UK) Limited.
3 *
4 * This file is released under the GPL.
5 */
6
7 #include "dm.h"
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/blkdev.h>
11 #include <linux/bio.h>
12 #include <linux/dax.h>
13 #include <linux/slab.h>
14 #include <linux/device-mapper.h>
15
16 #define DM_MSG_PREFIX "linear"
17
18 /*
19 * Linear: maps a linear range of a device.
20 */
21 struct linear_c {
22 struct dm_dev *dev;
23 sector_t start;
24 };
25
26 /*
27 * Construct a linear mapping: <dev_path> <offset>
28 */
linear_ctr(struct dm_target * ti,unsigned int argc,char ** argv)29 static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
30 {
31 struct linear_c *lc;
32 unsigned long long tmp;
33 char dummy;
34 int ret;
35
36 if (argc != 2) {
37 ti->error = "Invalid argument count";
38 return -EINVAL;
39 }
40
41 lc = kmalloc(sizeof(*lc), GFP_KERNEL);
42 if (lc == NULL) {
43 ti->error = "Cannot allocate linear context";
44 return -ENOMEM;
45 }
46
47 ret = -EINVAL;
48 if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
49 ti->error = "Invalid device sector";
50 goto bad;
51 }
52 lc->start = tmp;
53
54 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev);
55 if (ret) {
56 ti->error = "Device lookup failed";
57 goto bad;
58 }
59
60 ti->num_flush_bios = 1;
61 ti->num_discard_bios = 1;
62 ti->num_secure_erase_bios = 1;
63 ti->num_write_same_bios = 1;
64 ti->num_write_zeroes_bios = 1;
65 ti->private = lc;
66 return 0;
67
68 bad:
69 kfree(lc);
70 return ret;
71 }
72
linear_dtr(struct dm_target * ti)73 static void linear_dtr(struct dm_target *ti)
74 {
75 struct linear_c *lc = (struct linear_c *) ti->private;
76
77 dm_put_device(ti, lc->dev);
78 kfree(lc);
79 }
80
linear_map_sector(struct dm_target * ti,sector_t bi_sector)81 static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
82 {
83 struct linear_c *lc = ti->private;
84
85 return lc->start + dm_target_offset(ti, bi_sector);
86 }
87
linear_map_bio(struct dm_target * ti,struct bio * bio)88 static void linear_map_bio(struct dm_target *ti, struct bio *bio)
89 {
90 struct linear_c *lc = ti->private;
91
92 bio_set_dev(bio, lc->dev->bdev);
93 if (bio_sectors(bio) || op_is_zone_mgmt(bio_op(bio)))
94 bio->bi_iter.bi_sector =
95 linear_map_sector(ti, bio->bi_iter.bi_sector);
96 }
97
linear_map(struct dm_target * ti,struct bio * bio)98 static int linear_map(struct dm_target *ti, struct bio *bio)
99 {
100 linear_map_bio(ti, bio);
101
102 return DM_MAPIO_REMAPPED;
103 }
104
linear_status(struct dm_target * ti,status_type_t type,unsigned status_flags,char * result,unsigned maxlen)105 static void linear_status(struct dm_target *ti, status_type_t type,
106 unsigned status_flags, char *result, unsigned maxlen)
107 {
108 struct linear_c *lc = (struct linear_c *) ti->private;
109
110 switch (type) {
111 case STATUSTYPE_INFO:
112 result[0] = '\0';
113 break;
114
115 case STATUSTYPE_TABLE:
116 snprintf(result, maxlen, "%s %llu", lc->dev->name,
117 (unsigned long long)lc->start);
118 break;
119 }
120 }
121
linear_prepare_ioctl(struct dm_target * ti,struct block_device ** bdev)122 static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
123 {
124 struct linear_c *lc = (struct linear_c *) ti->private;
125 struct dm_dev *dev = lc->dev;
126
127 *bdev = dev->bdev;
128
129 /*
130 * Only pass ioctls through if the device sizes match exactly.
131 */
132 if (lc->start ||
133 ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
134 return 1;
135 return 0;
136 }
137
138 #ifdef CONFIG_BLK_DEV_ZONED
linear_report_zones(struct dm_target * ti,struct dm_report_zones_args * args,unsigned int nr_zones)139 static int linear_report_zones(struct dm_target *ti,
140 struct dm_report_zones_args *args, unsigned int nr_zones)
141 {
142 struct linear_c *lc = ti->private;
143 sector_t sector = linear_map_sector(ti, args->next_sector);
144
145 args->start = lc->start;
146 return blkdev_report_zones(lc->dev->bdev, sector, nr_zones,
147 dm_report_zones_cb, args);
148 }
149 #endif
150
linear_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)151 static int linear_iterate_devices(struct dm_target *ti,
152 iterate_devices_callout_fn fn, void *data)
153 {
154 struct linear_c *lc = ti->private;
155
156 return fn(ti, lc->dev, lc->start, ti->len, data);
157 }
158
159 #if IS_ENABLED(CONFIG_DAX_DRIVER)
linear_dax_direct_access(struct dm_target * ti,pgoff_t pgoff,long nr_pages,void ** kaddr,pfn_t * pfn)160 static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
161 long nr_pages, void **kaddr, pfn_t *pfn)
162 {
163 long ret;
164 struct linear_c *lc = ti->private;
165 struct block_device *bdev = lc->dev->bdev;
166 struct dax_device *dax_dev = lc->dev->dax_dev;
167 sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
168
169 dev_sector = linear_map_sector(ti, sector);
170 ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
171 if (ret)
172 return ret;
173 return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
174 }
175
linear_dax_copy_from_iter(struct dm_target * ti,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)176 static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
177 void *addr, size_t bytes, struct iov_iter *i)
178 {
179 struct linear_c *lc = ti->private;
180 struct block_device *bdev = lc->dev->bdev;
181 struct dax_device *dax_dev = lc->dev->dax_dev;
182 sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
183
184 dev_sector = linear_map_sector(ti, sector);
185 if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
186 return 0;
187 return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
188 }
189
linear_dax_copy_to_iter(struct dm_target * ti,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)190 static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
191 void *addr, size_t bytes, struct iov_iter *i)
192 {
193 struct linear_c *lc = ti->private;
194 struct block_device *bdev = lc->dev->bdev;
195 struct dax_device *dax_dev = lc->dev->dax_dev;
196 sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
197
198 dev_sector = linear_map_sector(ti, sector);
199 if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
200 return 0;
201 return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
202 }
203
linear_dax_zero_page_range(struct dm_target * ti,pgoff_t pgoff,size_t nr_pages)204 static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
205 size_t nr_pages)
206 {
207 int ret;
208 struct linear_c *lc = ti->private;
209 struct block_device *bdev = lc->dev->bdev;
210 struct dax_device *dax_dev = lc->dev->dax_dev;
211 sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
212
213 dev_sector = linear_map_sector(ti, sector);
214 ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff);
215 if (ret)
216 return ret;
217 return dax_zero_page_range(dax_dev, pgoff, nr_pages);
218 }
219
220 #else
221 #define linear_dax_direct_access NULL
222 #define linear_dax_copy_from_iter NULL
223 #define linear_dax_copy_to_iter NULL
224 #define linear_dax_zero_page_range NULL
225 #endif
226
227 static struct target_type linear_target = {
228 .name = "linear",
229 .version = {1, 4, 0},
230 #ifdef CONFIG_BLK_DEV_ZONED
231 .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT |
232 DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO,
233 .report_zones = linear_report_zones,
234 #else
235 .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT |
236 DM_TARGET_PASSES_CRYPTO,
237 #endif
238 .module = THIS_MODULE,
239 .ctr = linear_ctr,
240 .dtr = linear_dtr,
241 .map = linear_map,
242 .status = linear_status,
243 .prepare_ioctl = linear_prepare_ioctl,
244 .iterate_devices = linear_iterate_devices,
245 .direct_access = linear_dax_direct_access,
246 .dax_copy_from_iter = linear_dax_copy_from_iter,
247 .dax_copy_to_iter = linear_dax_copy_to_iter,
248 .dax_zero_page_range = linear_dax_zero_page_range,
249 };
250
dm_linear_init(void)251 int __init dm_linear_init(void)
252 {
253 int r = dm_register_target(&linear_target);
254
255 if (r < 0)
256 DMERR("register failed %d", r);
257
258 return r;
259 }
260
dm_linear_exit(void)261 void dm_linear_exit(void)
262 {
263 dm_unregister_target(&linear_target);
264 }
265