• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright(c) 2017 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/pagemap.h>
14 #include <linux/module.h>
15 #include <linux/mount.h>
16 #include <linux/magic.h>
17 #include <linux/genhd.h>
18 #include <linux/cdev.h>
19 #include <linux/hash.h>
20 #include <linux/slab.h>
21 #include <linux/uio.h>
22 #include <linux/dax.h>
23 #include <linux/fs.h>
24 
25 static dev_t dax_devt;
26 DEFINE_STATIC_SRCU(dax_srcu);
27 static struct vfsmount *dax_mnt;
28 static DEFINE_IDA(dax_minor_ida);
29 static struct kmem_cache *dax_cache __read_mostly;
30 static struct super_block *dax_superblock __read_mostly;
31 
32 #define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head))
33 static struct hlist_head dax_host_list[DAX_HASH_SIZE];
34 static DEFINE_SPINLOCK(dax_host_lock);
35 
dax_read_lock(void)36 int dax_read_lock(void)
37 {
38 	return srcu_read_lock(&dax_srcu);
39 }
40 EXPORT_SYMBOL_GPL(dax_read_lock);
41 
dax_read_unlock(int id)42 void dax_read_unlock(int id)
43 {
44 	srcu_read_unlock(&dax_srcu, id);
45 }
46 EXPORT_SYMBOL_GPL(dax_read_unlock);
47 
48 #ifdef CONFIG_BLOCK
49 #include <linux/blkdev.h>
50 
bdev_dax_pgoff(struct block_device * bdev,sector_t sector,size_t size,pgoff_t * pgoff)51 int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
52 		pgoff_t *pgoff)
53 {
54 	phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512;
55 
56 	if (pgoff)
57 		*pgoff = PHYS_PFN(phys_off);
58 	if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
59 		return -EINVAL;
60 	return 0;
61 }
62 EXPORT_SYMBOL(bdev_dax_pgoff);
63 
64 #if IS_ENABLED(CONFIG_FS_DAX)
fs_dax_get_by_bdev(struct block_device * bdev)65 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
66 {
67 	if (!blk_queue_dax(bdev->bd_queue))
68 		return NULL;
69 	return fs_dax_get_by_host(bdev->bd_disk->disk_name);
70 }
71 EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
72 #endif
73 
74 /**
75  * __bdev_dax_supported() - Check if the device supports dax for filesystem
76  * @bdev: block device to check
77  * @blocksize: The block size of the device
78  *
79  * This is a library function for filesystems to check if the block device
80  * can be mounted with dax option.
81  *
82  * Return: true if supported, false if unsupported
83  */
__bdev_dax_supported(struct block_device * bdev,int blocksize)84 bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
85 {
86 	struct dax_device *dax_dev;
87 	struct request_queue *q;
88 	pgoff_t pgoff;
89 	int err, id;
90 	void *kaddr;
91 	pfn_t pfn;
92 	long len;
93 	char buf[BDEVNAME_SIZE];
94 
95 	if (blocksize != PAGE_SIZE) {
96 		pr_debug("%s: error: unsupported blocksize for dax\n",
97 				bdevname(bdev, buf));
98 		return false;
99 	}
100 
101 	q = bdev_get_queue(bdev);
102 	if (!q || !blk_queue_dax(q)) {
103 		pr_debug("%s: error: request queue doesn't support dax\n",
104 				bdevname(bdev, buf));
105 		return false;
106 	}
107 
108 	err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
109 	if (err) {
110 		pr_debug("%s: error: unaligned partition for dax\n",
111 				bdevname(bdev, buf));
112 		return false;
113 	}
114 
115 	dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
116 	if (!dax_dev) {
117 		pr_debug("%s: error: device does not support dax\n",
118 				bdevname(bdev, buf));
119 		return false;
120 	}
121 
122 	id = dax_read_lock();
123 	len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
124 	dax_read_unlock(id);
125 
126 	put_dax(dax_dev);
127 
128 	if (len < 1) {
129 		pr_debug("%s: error: dax access failed (%ld)\n",
130 				bdevname(bdev, buf), len);
131 		return false;
132 	}
133 
134 	return true;
135 }
136 EXPORT_SYMBOL_GPL(__bdev_dax_supported);
137 #endif
138 
139 enum dax_device_flags {
140 	/* !alive + rcu grace period == no new operations / mappings */
141 	DAXDEV_ALIVE,
142 	/* gate whether dax_flush() calls the low level flush routine */
143 	DAXDEV_WRITE_CACHE,
144 };
145 
146 /**
147  * struct dax_device - anchor object for dax services
148  * @inode: core vfs
149  * @cdev: optional character interface for "device dax"
150  * @host: optional name for lookups where the device path is not available
151  * @private: dax driver private data
152  * @flags: state and boolean properties
153  */
154 struct dax_device {
155 	struct hlist_node list;
156 	struct inode inode;
157 	struct cdev cdev;
158 	const char *host;
159 	void *private;
160 	unsigned long flags;
161 	const struct dax_operations *ops;
162 };
163 
write_cache_show(struct device * dev,struct device_attribute * attr,char * buf)164 static ssize_t write_cache_show(struct device *dev,
165 		struct device_attribute *attr, char *buf)
166 {
167 	struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
168 	ssize_t rc;
169 
170 	WARN_ON_ONCE(!dax_dev);
171 	if (!dax_dev)
172 		return -ENXIO;
173 
174 	rc = sprintf(buf, "%d\n", !!test_bit(DAXDEV_WRITE_CACHE,
175 				&dax_dev->flags));
176 	put_dax(dax_dev);
177 	return rc;
178 }
179 
write_cache_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)180 static ssize_t write_cache_store(struct device *dev,
181 		struct device_attribute *attr, const char *buf, size_t len)
182 {
183 	bool write_cache;
184 	int rc = strtobool(buf, &write_cache);
185 	struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
186 
187 	WARN_ON_ONCE(!dax_dev);
188 	if (!dax_dev)
189 		return -ENXIO;
190 
191 	if (rc)
192 		len = rc;
193 	else if (write_cache)
194 		set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
195 	else
196 		clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
197 
198 	put_dax(dax_dev);
199 	return len;
200 }
201 static DEVICE_ATTR_RW(write_cache);
202 
dax_visible(struct kobject * kobj,struct attribute * a,int n)203 static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
204 {
205 	struct device *dev = container_of(kobj, typeof(*dev), kobj);
206 	struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
207 
208 	WARN_ON_ONCE(!dax_dev);
209 	if (!dax_dev)
210 		return 0;
211 
212 #ifndef CONFIG_ARCH_HAS_PMEM_API
213 	if (a == &dev_attr_write_cache.attr)
214 		return 0;
215 #endif
216 	return a->mode;
217 }
218 
219 static struct attribute *dax_attributes[] = {
220 	&dev_attr_write_cache.attr,
221 	NULL,
222 };
223 
224 struct attribute_group dax_attribute_group = {
225 	.name = "dax",
226 	.attrs = dax_attributes,
227 	.is_visible = dax_visible,
228 };
229 EXPORT_SYMBOL_GPL(dax_attribute_group);
230 
231 /**
232  * dax_direct_access() - translate a device pgoff to an absolute pfn
233  * @dax_dev: a dax_device instance representing the logical memory range
234  * @pgoff: offset in pages from the start of the device to translate
235  * @nr_pages: number of consecutive pages caller can handle relative to @pfn
236  * @kaddr: output parameter that returns a virtual address mapping of pfn
237  * @pfn: output parameter that returns an absolute pfn translation of @pgoff
238  *
239  * Return: negative errno if an error occurs, otherwise the number of
240  * pages accessible at the device relative @pgoff.
241  */
dax_direct_access(struct dax_device * dax_dev,pgoff_t pgoff,long nr_pages,void ** kaddr,pfn_t * pfn)242 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
243 		void **kaddr, pfn_t *pfn)
244 {
245 	long avail;
246 
247 	/*
248 	 * The device driver is allowed to sleep, in order to make the
249 	 * memory directly accessible.
250 	 */
251 	might_sleep();
252 
253 	if (!dax_dev)
254 		return -EOPNOTSUPP;
255 
256 	if (!dax_alive(dax_dev))
257 		return -ENXIO;
258 
259 	if (nr_pages < 0)
260 		return nr_pages;
261 
262 	avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
263 			kaddr, pfn);
264 	if (!avail)
265 		return -ERANGE;
266 	return min(avail, nr_pages);
267 }
268 EXPORT_SYMBOL_GPL(dax_direct_access);
269 
dax_copy_from_iter(struct dax_device * dax_dev,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)270 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
271 		size_t bytes, struct iov_iter *i)
272 {
273 	if (!dax_alive(dax_dev))
274 		return 0;
275 
276 	return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i);
277 }
278 EXPORT_SYMBOL_GPL(dax_copy_from_iter);
279 
280 #ifdef CONFIG_ARCH_HAS_PMEM_API
281 void arch_wb_cache_pmem(void *addr, size_t size);
dax_flush(struct dax_device * dax_dev,void * addr,size_t size)282 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
283 {
284 	if (unlikely(!dax_alive(dax_dev)))
285 		return;
286 
287 	if (unlikely(!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags)))
288 		return;
289 
290 	arch_wb_cache_pmem(addr, size);
291 }
292 #else
dax_flush(struct dax_device * dax_dev,void * addr,size_t size)293 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
294 {
295 }
296 #endif
297 EXPORT_SYMBOL_GPL(dax_flush);
298 
dax_write_cache(struct dax_device * dax_dev,bool wc)299 void dax_write_cache(struct dax_device *dax_dev, bool wc)
300 {
301 	if (wc)
302 		set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
303 	else
304 		clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
305 }
306 EXPORT_SYMBOL_GPL(dax_write_cache);
307 
dax_write_cache_enabled(struct dax_device * dax_dev)308 bool dax_write_cache_enabled(struct dax_device *dax_dev)
309 {
310 	return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
311 }
312 EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
313 
dax_alive(struct dax_device * dax_dev)314 bool dax_alive(struct dax_device *dax_dev)
315 {
316 	lockdep_assert_held(&dax_srcu);
317 	return test_bit(DAXDEV_ALIVE, &dax_dev->flags);
318 }
319 EXPORT_SYMBOL_GPL(dax_alive);
320 
dax_host_hash(const char * host)321 static int dax_host_hash(const char *host)
322 {
323 	return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
324 }
325 
326 /*
327  * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
328  * that any fault handlers or operations that might have seen
329  * dax_alive(), have completed.  Any operations that start after
330  * synchronize_srcu() has run will abort upon seeing !dax_alive().
331  */
kill_dax(struct dax_device * dax_dev)332 void kill_dax(struct dax_device *dax_dev)
333 {
334 	if (!dax_dev)
335 		return;
336 
337 	clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
338 
339 	synchronize_srcu(&dax_srcu);
340 
341 	spin_lock(&dax_host_lock);
342 	hlist_del_init(&dax_dev->list);
343 	spin_unlock(&dax_host_lock);
344 
345 	dax_dev->private = NULL;
346 }
347 EXPORT_SYMBOL_GPL(kill_dax);
348 
dax_alloc_inode(struct super_block * sb)349 static struct inode *dax_alloc_inode(struct super_block *sb)
350 {
351 	struct dax_device *dax_dev;
352 	struct inode *inode;
353 
354 	dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
355 	if (!dax_dev)
356 		return NULL;
357 
358 	inode = &dax_dev->inode;
359 	inode->i_rdev = 0;
360 	return inode;
361 }
362 
to_dax_dev(struct inode * inode)363 static struct dax_device *to_dax_dev(struct inode *inode)
364 {
365 	return container_of(inode, struct dax_device, inode);
366 }
367 
dax_i_callback(struct rcu_head * head)368 static void dax_i_callback(struct rcu_head *head)
369 {
370 	struct inode *inode = container_of(head, struct inode, i_rcu);
371 	struct dax_device *dax_dev = to_dax_dev(inode);
372 
373 	kfree(dax_dev->host);
374 	dax_dev->host = NULL;
375 	if (inode->i_rdev)
376 		ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
377 	kmem_cache_free(dax_cache, dax_dev);
378 }
379 
dax_destroy_inode(struct inode * inode)380 static void dax_destroy_inode(struct inode *inode)
381 {
382 	struct dax_device *dax_dev = to_dax_dev(inode);
383 
384 	WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags),
385 			"kill_dax() must be called before final iput()\n");
386 	call_rcu(&inode->i_rcu, dax_i_callback);
387 }
388 
389 static const struct super_operations dax_sops = {
390 	.statfs = simple_statfs,
391 	.alloc_inode = dax_alloc_inode,
392 	.destroy_inode = dax_destroy_inode,
393 	.drop_inode = generic_delete_inode,
394 };
395 
dax_mount(struct file_system_type * fs_type,int flags,const char * dev_name,void * data)396 static struct dentry *dax_mount(struct file_system_type *fs_type,
397 		int flags, const char *dev_name, void *data)
398 {
399 	return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC);
400 }
401 
402 static struct file_system_type dax_fs_type = {
403 	.name = "dax",
404 	.mount = dax_mount,
405 	.kill_sb = kill_anon_super,
406 };
407 
dax_test(struct inode * inode,void * data)408 static int dax_test(struct inode *inode, void *data)
409 {
410 	dev_t devt = *(dev_t *) data;
411 
412 	return inode->i_rdev == devt;
413 }
414 
dax_set(struct inode * inode,void * data)415 static int dax_set(struct inode *inode, void *data)
416 {
417 	dev_t devt = *(dev_t *) data;
418 
419 	inode->i_rdev = devt;
420 	return 0;
421 }
422 
dax_dev_get(dev_t devt)423 static struct dax_device *dax_dev_get(dev_t devt)
424 {
425 	struct dax_device *dax_dev;
426 	struct inode *inode;
427 
428 	inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
429 			dax_test, dax_set, &devt);
430 
431 	if (!inode)
432 		return NULL;
433 
434 	dax_dev = to_dax_dev(inode);
435 	if (inode->i_state & I_NEW) {
436 		set_bit(DAXDEV_ALIVE, &dax_dev->flags);
437 		inode->i_cdev = &dax_dev->cdev;
438 		inode->i_mode = S_IFCHR;
439 		inode->i_flags = S_DAX;
440 		mapping_set_gfp_mask(&inode->i_data, GFP_USER);
441 		unlock_new_inode(inode);
442 	}
443 
444 	return dax_dev;
445 }
446 
dax_add_host(struct dax_device * dax_dev,const char * host)447 static void dax_add_host(struct dax_device *dax_dev, const char *host)
448 {
449 	int hash;
450 
451 	/*
452 	 * Unconditionally init dax_dev since it's coming from a
453 	 * non-zeroed slab cache
454 	 */
455 	INIT_HLIST_NODE(&dax_dev->list);
456 	dax_dev->host = host;
457 	if (!host)
458 		return;
459 
460 	hash = dax_host_hash(host);
461 	spin_lock(&dax_host_lock);
462 	hlist_add_head(&dax_dev->list, &dax_host_list[hash]);
463 	spin_unlock(&dax_host_lock);
464 }
465 
alloc_dax(void * private,const char * __host,const struct dax_operations * ops)466 struct dax_device *alloc_dax(void *private, const char *__host,
467 		const struct dax_operations *ops)
468 {
469 	struct dax_device *dax_dev;
470 	const char *host;
471 	dev_t devt;
472 	int minor;
473 
474 	host = kstrdup(__host, GFP_KERNEL);
475 	if (__host && !host)
476 		return NULL;
477 
478 	minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
479 	if (minor < 0)
480 		goto err_minor;
481 
482 	devt = MKDEV(MAJOR(dax_devt), minor);
483 	dax_dev = dax_dev_get(devt);
484 	if (!dax_dev)
485 		goto err_dev;
486 
487 	dax_add_host(dax_dev, host);
488 	dax_dev->ops = ops;
489 	dax_dev->private = private;
490 	return dax_dev;
491 
492  err_dev:
493 	ida_simple_remove(&dax_minor_ida, minor);
494  err_minor:
495 	kfree(host);
496 	return NULL;
497 }
498 EXPORT_SYMBOL_GPL(alloc_dax);
499 
put_dax(struct dax_device * dax_dev)500 void put_dax(struct dax_device *dax_dev)
501 {
502 	if (!dax_dev)
503 		return;
504 	iput(&dax_dev->inode);
505 }
506 EXPORT_SYMBOL_GPL(put_dax);
507 
508 /**
509  * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
510  * @host: alternate name for the device registered by a dax driver
511  */
dax_get_by_host(const char * host)512 struct dax_device *dax_get_by_host(const char *host)
513 {
514 	struct dax_device *dax_dev, *found = NULL;
515 	int hash, id;
516 
517 	if (!host)
518 		return NULL;
519 
520 	hash = dax_host_hash(host);
521 
522 	id = dax_read_lock();
523 	spin_lock(&dax_host_lock);
524 	hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
525 		if (!dax_alive(dax_dev)
526 				|| strcmp(host, dax_dev->host) != 0)
527 			continue;
528 
529 		if (igrab(&dax_dev->inode))
530 			found = dax_dev;
531 		break;
532 	}
533 	spin_unlock(&dax_host_lock);
534 	dax_read_unlock(id);
535 
536 	return found;
537 }
538 EXPORT_SYMBOL_GPL(dax_get_by_host);
539 
540 /**
541  * inode_dax: convert a public inode into its dax_dev
542  * @inode: An inode with i_cdev pointing to a dax_dev
543  *
544  * Note this is not equivalent to to_dax_dev() which is for private
545  * internal use where we know the inode filesystem type == dax_fs_type.
546  */
inode_dax(struct inode * inode)547 struct dax_device *inode_dax(struct inode *inode)
548 {
549 	struct cdev *cdev = inode->i_cdev;
550 
551 	return container_of(cdev, struct dax_device, cdev);
552 }
553 EXPORT_SYMBOL_GPL(inode_dax);
554 
dax_inode(struct dax_device * dax_dev)555 struct inode *dax_inode(struct dax_device *dax_dev)
556 {
557 	return &dax_dev->inode;
558 }
559 EXPORT_SYMBOL_GPL(dax_inode);
560 
dax_get_private(struct dax_device * dax_dev)561 void *dax_get_private(struct dax_device *dax_dev)
562 {
563 	return dax_dev->private;
564 }
565 EXPORT_SYMBOL_GPL(dax_get_private);
566 
init_once(void * _dax_dev)567 static void init_once(void *_dax_dev)
568 {
569 	struct dax_device *dax_dev = _dax_dev;
570 	struct inode *inode = &dax_dev->inode;
571 
572 	memset(dax_dev, 0, sizeof(*dax_dev));
573 	inode_init_once(inode);
574 }
575 
__dax_fs_init(void)576 static int __dax_fs_init(void)
577 {
578 	int rc;
579 
580 	dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0,
581 			(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
582 			 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
583 			init_once);
584 	if (!dax_cache)
585 		return -ENOMEM;
586 
587 	rc = register_filesystem(&dax_fs_type);
588 	if (rc)
589 		goto err_register_fs;
590 
591 	dax_mnt = kern_mount(&dax_fs_type);
592 	if (IS_ERR(dax_mnt)) {
593 		rc = PTR_ERR(dax_mnt);
594 		goto err_mount;
595 	}
596 	dax_superblock = dax_mnt->mnt_sb;
597 
598 	return 0;
599 
600  err_mount:
601 	unregister_filesystem(&dax_fs_type);
602  err_register_fs:
603 	kmem_cache_destroy(dax_cache);
604 
605 	return rc;
606 }
607 
__dax_fs_exit(void)608 static void __dax_fs_exit(void)
609 {
610 	kern_unmount(dax_mnt);
611 	unregister_filesystem(&dax_fs_type);
612 	kmem_cache_destroy(dax_cache);
613 }
614 
dax_fs_init(void)615 static int __init dax_fs_init(void)
616 {
617 	int rc;
618 
619 	rc = __dax_fs_init();
620 	if (rc)
621 		return rc;
622 
623 	rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax");
624 	if (rc)
625 		__dax_fs_exit();
626 	return rc;
627 }
628 
dax_fs_exit(void)629 static void __exit dax_fs_exit(void)
630 {
631 	unregister_chrdev_region(dax_devt, MINORMASK+1);
632 	ida_destroy(&dax_minor_ida);
633 	__dax_fs_exit();
634 }
635 
636 MODULE_AUTHOR("Intel Corporation");
637 MODULE_LICENSE("GPL v2");
638 subsys_initcall(dax_fs_init);
639 module_exit(dax_fs_exit);
640