1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright(c) 2017 Intel Corporation. All rights reserved.
4 */
5 #include <linux/pagemap.h>
6 #include <linux/module.h>
7 #include <linux/mount.h>
8 #include <linux/pseudo_fs.h>
9 #include <linux/magic.h>
10 #include <linux/genhd.h>
11 #include <linux/pfn_t.h>
12 #include <linux/cdev.h>
13 #include <linux/hash.h>
14 #include <linux/slab.h>
15 #include <linux/uio.h>
16 #include <linux/dax.h>
17 #include <linux/fs.h>
18 #include "dax-private.h"
19
20 static dev_t dax_devt;
21 DEFINE_STATIC_SRCU(dax_srcu);
22 static struct vfsmount *dax_mnt;
23 static DEFINE_IDA(dax_minor_ida);
24 static struct kmem_cache *dax_cache __read_mostly;
25 static struct super_block *dax_superblock __read_mostly;
26
27 #define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head))
28 static struct hlist_head dax_host_list[DAX_HASH_SIZE];
29 static DEFINE_SPINLOCK(dax_host_lock);
30
dax_read_lock(void)31 int dax_read_lock(void)
32 {
33 return srcu_read_lock(&dax_srcu);
34 }
35 EXPORT_SYMBOL_GPL(dax_read_lock);
36
dax_read_unlock(int id)37 void dax_read_unlock(int id)
38 {
39 srcu_read_unlock(&dax_srcu, id);
40 }
41 EXPORT_SYMBOL_GPL(dax_read_unlock);
42
43 #ifdef CONFIG_BLOCK
44 #include <linux/blkdev.h>
45
bdev_dax_pgoff(struct block_device * bdev,sector_t sector,size_t size,pgoff_t * pgoff)46 int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
47 pgoff_t *pgoff)
48 {
49 sector_t start_sect = bdev ? get_start_sect(bdev) : 0;
50 phys_addr_t phys_off = (start_sect + sector) * 512;
51
52 if (pgoff)
53 *pgoff = PHYS_PFN(phys_off);
54 if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
55 return -EINVAL;
56 return 0;
57 }
58 EXPORT_SYMBOL(bdev_dax_pgoff);
59
60 #if IS_ENABLED(CONFIG_FS_DAX)
fs_dax_get_by_bdev(struct block_device * bdev)61 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
62 {
63 if (!blk_queue_dax(bdev->bd_disk->queue))
64 return NULL;
65 return dax_get_by_host(bdev->bd_disk->disk_name);
66 }
67 EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
68 #endif
69
__generic_fsdax_supported(struct dax_device * dax_dev,struct block_device * bdev,int blocksize,sector_t start,sector_t sectors)70 bool __generic_fsdax_supported(struct dax_device *dax_dev,
71 struct block_device *bdev, int blocksize, sector_t start,
72 sector_t sectors)
73 {
74 bool dax_enabled = false;
75 pgoff_t pgoff, pgoff_end;
76 char buf[BDEVNAME_SIZE];
77 void *kaddr, *end_kaddr;
78 pfn_t pfn, end_pfn;
79 sector_t last_page;
80 long len, len2;
81 int err, id;
82
83 if (blocksize != PAGE_SIZE) {
84 pr_info("%s: error: unsupported blocksize for dax\n",
85 bdevname(bdev, buf));
86 return false;
87 }
88
89 if (!dax_dev) {
90 pr_debug("%s: error: dax unsupported by block device\n",
91 bdevname(bdev, buf));
92 return false;
93 }
94
95 err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
96 if (err) {
97 pr_info("%s: error: unaligned partition for dax\n",
98 bdevname(bdev, buf));
99 return false;
100 }
101
102 last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512;
103 err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
104 if (err) {
105 pr_info("%s: error: unaligned partition for dax\n",
106 bdevname(bdev, buf));
107 return false;
108 }
109
110 id = dax_read_lock();
111 len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
112 len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
113
114 if (len < 1 || len2 < 1) {
115 pr_info("%s: error: dax access failed (%ld)\n",
116 bdevname(bdev, buf), len < 1 ? len : len2);
117 dax_read_unlock(id);
118 return false;
119 }
120
121 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) {
122 /*
123 * An arch that has enabled the pmem api should also
124 * have its drivers support pfn_t_devmap()
125 *
126 * This is a developer warning and should not trigger in
127 * production. dax_flush() will crash since it depends
128 * on being able to do (page_address(pfn_to_page())).
129 */
130 WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
131 dax_enabled = true;
132 } else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) {
133 struct dev_pagemap *pgmap, *end_pgmap;
134
135 pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
136 end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL);
137 if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX
138 && pfn_t_to_page(pfn)->pgmap == pgmap
139 && pfn_t_to_page(end_pfn)->pgmap == pgmap
140 && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr))
141 && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr)))
142 dax_enabled = true;
143 put_dev_pagemap(pgmap);
144 put_dev_pagemap(end_pgmap);
145
146 }
147 dax_read_unlock(id);
148
149 if (!dax_enabled) {
150 pr_info("%s: error: dax support not enabled\n",
151 bdevname(bdev, buf));
152 return false;
153 }
154 return true;
155 }
156 EXPORT_SYMBOL_GPL(__generic_fsdax_supported);
157
158 /**
159 * __bdev_dax_supported() - Check if the device supports dax for filesystem
160 * @bdev: block device to check
161 * @blocksize: The block size of the device
162 *
163 * This is a library function for filesystems to check if the block device
164 * can be mounted with dax option.
165 *
166 * Return: true if supported, false if unsupported
167 */
__bdev_dax_supported(struct block_device * bdev,int blocksize)168 bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
169 {
170 struct dax_device *dax_dev;
171 struct request_queue *q;
172 char buf[BDEVNAME_SIZE];
173 bool ret;
174 int id;
175
176 q = bdev_get_queue(bdev);
177 if (!q || !blk_queue_dax(q)) {
178 pr_debug("%s: error: request queue doesn't support dax\n",
179 bdevname(bdev, buf));
180 return false;
181 }
182
183 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
184 if (!dax_dev) {
185 pr_debug("%s: error: device does not support dax\n",
186 bdevname(bdev, buf));
187 return false;
188 }
189
190 id = dax_read_lock();
191 ret = dax_supported(dax_dev, bdev, blocksize, 0,
192 i_size_read(bdev->bd_inode) / 512);
193 dax_read_unlock(id);
194
195 put_dax(dax_dev);
196
197 return ret;
198 }
199 EXPORT_SYMBOL_GPL(__bdev_dax_supported);
200 #endif
201
202 enum dax_device_flags {
203 /* !alive + rcu grace period == no new operations / mappings */
204 DAXDEV_ALIVE,
205 /* gate whether dax_flush() calls the low level flush routine */
206 DAXDEV_WRITE_CACHE,
207 /* flag to check if device supports synchronous flush */
208 DAXDEV_SYNC,
209 };
210
211 /**
212 * struct dax_device - anchor object for dax services
213 * @inode: core vfs
214 * @cdev: optional character interface for "device dax"
215 * @host: optional name for lookups where the device path is not available
216 * @private: dax driver private data
217 * @flags: state and boolean properties
218 */
219 struct dax_device {
220 struct hlist_node list;
221 struct inode inode;
222 struct cdev cdev;
223 const char *host;
224 void *private;
225 unsigned long flags;
226 const struct dax_operations *ops;
227 };
228
write_cache_show(struct device * dev,struct device_attribute * attr,char * buf)229 static ssize_t write_cache_show(struct device *dev,
230 struct device_attribute *attr, char *buf)
231 {
232 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
233 ssize_t rc;
234
235 WARN_ON_ONCE(!dax_dev);
236 if (!dax_dev)
237 return -ENXIO;
238
239 rc = sprintf(buf, "%d\n", !!dax_write_cache_enabled(dax_dev));
240 put_dax(dax_dev);
241 return rc;
242 }
243
write_cache_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)244 static ssize_t write_cache_store(struct device *dev,
245 struct device_attribute *attr, const char *buf, size_t len)
246 {
247 bool write_cache;
248 int rc = strtobool(buf, &write_cache);
249 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
250
251 WARN_ON_ONCE(!dax_dev);
252 if (!dax_dev)
253 return -ENXIO;
254
255 if (rc)
256 len = rc;
257 else
258 dax_write_cache(dax_dev, write_cache);
259
260 put_dax(dax_dev);
261 return len;
262 }
263 static DEVICE_ATTR_RW(write_cache);
264
dax_visible(struct kobject * kobj,struct attribute * a,int n)265 static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
266 {
267 struct device *dev = container_of(kobj, typeof(*dev), kobj);
268 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
269
270 WARN_ON_ONCE(!dax_dev);
271 if (!dax_dev)
272 return 0;
273
274 #ifndef CONFIG_ARCH_HAS_PMEM_API
275 if (a == &dev_attr_write_cache.attr)
276 return 0;
277 #endif
278 return a->mode;
279 }
280
281 static struct attribute *dax_attributes[] = {
282 &dev_attr_write_cache.attr,
283 NULL,
284 };
285
286 struct attribute_group dax_attribute_group = {
287 .name = "dax",
288 .attrs = dax_attributes,
289 .is_visible = dax_visible,
290 };
291 EXPORT_SYMBOL_GPL(dax_attribute_group);
292
293 /**
294 * dax_direct_access() - translate a device pgoff to an absolute pfn
295 * @dax_dev: a dax_device instance representing the logical memory range
296 * @pgoff: offset in pages from the start of the device to translate
297 * @nr_pages: number of consecutive pages caller can handle relative to @pfn
298 * @kaddr: output parameter that returns a virtual address mapping of pfn
299 * @pfn: output parameter that returns an absolute pfn translation of @pgoff
300 *
301 * Return: negative errno if an error occurs, otherwise the number of
302 * pages accessible at the device relative @pgoff.
303 */
dax_direct_access(struct dax_device * dax_dev,pgoff_t pgoff,long nr_pages,void ** kaddr,pfn_t * pfn)304 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
305 void **kaddr, pfn_t *pfn)
306 {
307 long avail;
308
309 if (!dax_dev)
310 return -EOPNOTSUPP;
311
312 if (!dax_alive(dax_dev))
313 return -ENXIO;
314
315 if (nr_pages < 0)
316 return nr_pages;
317
318 avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
319 kaddr, pfn);
320 if (!avail)
321 return -ERANGE;
322 return min(avail, nr_pages);
323 }
324 EXPORT_SYMBOL_GPL(dax_direct_access);
325
dax_supported(struct dax_device * dax_dev,struct block_device * bdev,int blocksize,sector_t start,sector_t len)326 bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
327 int blocksize, sector_t start, sector_t len)
328 {
329 if (!dax_dev)
330 return false;
331
332 if (!dax_alive(dax_dev))
333 return false;
334
335 return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len);
336 }
337 EXPORT_SYMBOL_GPL(dax_supported);
338
dax_copy_from_iter(struct dax_device * dax_dev,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)339 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
340 size_t bytes, struct iov_iter *i)
341 {
342 if (!dax_alive(dax_dev))
343 return 0;
344
345 return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i);
346 }
347 EXPORT_SYMBOL_GPL(dax_copy_from_iter);
348
dax_copy_to_iter(struct dax_device * dax_dev,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)349 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
350 size_t bytes, struct iov_iter *i)
351 {
352 if (!dax_alive(dax_dev))
353 return 0;
354
355 return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i);
356 }
357 EXPORT_SYMBOL_GPL(dax_copy_to_iter);
358
dax_zero_page_range(struct dax_device * dax_dev,pgoff_t pgoff,size_t nr_pages)359 int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
360 size_t nr_pages)
361 {
362 if (!dax_alive(dax_dev))
363 return -ENXIO;
364 /*
365 * There are no callers that want to zero more than one page as of now.
366 * Once users are there, this check can be removed after the
367 * device mapper code has been updated to split ranges across targets.
368 */
369 if (nr_pages != 1)
370 return -EIO;
371
372 return dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages);
373 }
374 EXPORT_SYMBOL_GPL(dax_zero_page_range);
375
376 #ifdef CONFIG_ARCH_HAS_PMEM_API
377 void arch_wb_cache_pmem(void *addr, size_t size);
dax_flush(struct dax_device * dax_dev,void * addr,size_t size)378 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
379 {
380 if (unlikely(!dax_write_cache_enabled(dax_dev)))
381 return;
382
383 arch_wb_cache_pmem(addr, size);
384 }
385 #else
dax_flush(struct dax_device * dax_dev,void * addr,size_t size)386 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
387 {
388 }
389 #endif
390 EXPORT_SYMBOL_GPL(dax_flush);
391
dax_write_cache(struct dax_device * dax_dev,bool wc)392 void dax_write_cache(struct dax_device *dax_dev, bool wc)
393 {
394 if (wc)
395 set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
396 else
397 clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
398 }
399 EXPORT_SYMBOL_GPL(dax_write_cache);
400
dax_write_cache_enabled(struct dax_device * dax_dev)401 bool dax_write_cache_enabled(struct dax_device *dax_dev)
402 {
403 return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
404 }
405 EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
406
__dax_synchronous(struct dax_device * dax_dev)407 bool __dax_synchronous(struct dax_device *dax_dev)
408 {
409 return test_bit(DAXDEV_SYNC, &dax_dev->flags);
410 }
411 EXPORT_SYMBOL_GPL(__dax_synchronous);
412
__set_dax_synchronous(struct dax_device * dax_dev)413 void __set_dax_synchronous(struct dax_device *dax_dev)
414 {
415 set_bit(DAXDEV_SYNC, &dax_dev->flags);
416 }
417 EXPORT_SYMBOL_GPL(__set_dax_synchronous);
418
dax_alive(struct dax_device * dax_dev)419 bool dax_alive(struct dax_device *dax_dev)
420 {
421 lockdep_assert_held(&dax_srcu);
422 return test_bit(DAXDEV_ALIVE, &dax_dev->flags);
423 }
424 EXPORT_SYMBOL_GPL(dax_alive);
425
dax_host_hash(const char * host)426 static int dax_host_hash(const char *host)
427 {
428 return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
429 }
430
431 /*
432 * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
433 * that any fault handlers or operations that might have seen
434 * dax_alive(), have completed. Any operations that start after
435 * synchronize_srcu() has run will abort upon seeing !dax_alive().
436 */
kill_dax(struct dax_device * dax_dev)437 void kill_dax(struct dax_device *dax_dev)
438 {
439 if (!dax_dev)
440 return;
441
442 clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
443
444 synchronize_srcu(&dax_srcu);
445
446 spin_lock(&dax_host_lock);
447 hlist_del_init(&dax_dev->list);
448 spin_unlock(&dax_host_lock);
449 }
450 EXPORT_SYMBOL_GPL(kill_dax);
451
run_dax(struct dax_device * dax_dev)452 void run_dax(struct dax_device *dax_dev)
453 {
454 set_bit(DAXDEV_ALIVE, &dax_dev->flags);
455 }
456 EXPORT_SYMBOL_GPL(run_dax);
457
dax_alloc_inode(struct super_block * sb)458 static struct inode *dax_alloc_inode(struct super_block *sb)
459 {
460 struct dax_device *dax_dev;
461 struct inode *inode;
462
463 dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
464 if (!dax_dev)
465 return NULL;
466
467 inode = &dax_dev->inode;
468 inode->i_rdev = 0;
469 return inode;
470 }
471
to_dax_dev(struct inode * inode)472 static struct dax_device *to_dax_dev(struct inode *inode)
473 {
474 return container_of(inode, struct dax_device, inode);
475 }
476
dax_free_inode(struct inode * inode)477 static void dax_free_inode(struct inode *inode)
478 {
479 struct dax_device *dax_dev = to_dax_dev(inode);
480 kfree(dax_dev->host);
481 dax_dev->host = NULL;
482 if (inode->i_rdev)
483 ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
484 kmem_cache_free(dax_cache, dax_dev);
485 }
486
dax_destroy_inode(struct inode * inode)487 static void dax_destroy_inode(struct inode *inode)
488 {
489 struct dax_device *dax_dev = to_dax_dev(inode);
490 WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags),
491 "kill_dax() must be called before final iput()\n");
492 }
493
494 static const struct super_operations dax_sops = {
495 .statfs = simple_statfs,
496 .alloc_inode = dax_alloc_inode,
497 .destroy_inode = dax_destroy_inode,
498 .free_inode = dax_free_inode,
499 .drop_inode = generic_delete_inode,
500 };
501
dax_init_fs_context(struct fs_context * fc)502 static int dax_init_fs_context(struct fs_context *fc)
503 {
504 struct pseudo_fs_context *ctx = init_pseudo(fc, DAXFS_MAGIC);
505 if (!ctx)
506 return -ENOMEM;
507 ctx->ops = &dax_sops;
508 return 0;
509 }
510
511 static struct file_system_type dax_fs_type = {
512 .name = "dax",
513 .init_fs_context = dax_init_fs_context,
514 .kill_sb = kill_anon_super,
515 };
516
dax_test(struct inode * inode,void * data)517 static int dax_test(struct inode *inode, void *data)
518 {
519 dev_t devt = *(dev_t *) data;
520
521 return inode->i_rdev == devt;
522 }
523
dax_set(struct inode * inode,void * data)524 static int dax_set(struct inode *inode, void *data)
525 {
526 dev_t devt = *(dev_t *) data;
527
528 inode->i_rdev = devt;
529 return 0;
530 }
531
dax_dev_get(dev_t devt)532 static struct dax_device *dax_dev_get(dev_t devt)
533 {
534 struct dax_device *dax_dev;
535 struct inode *inode;
536
537 inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
538 dax_test, dax_set, &devt);
539
540 if (!inode)
541 return NULL;
542
543 dax_dev = to_dax_dev(inode);
544 if (inode->i_state & I_NEW) {
545 set_bit(DAXDEV_ALIVE, &dax_dev->flags);
546 inode->i_cdev = &dax_dev->cdev;
547 inode->i_mode = S_IFCHR;
548 inode->i_flags = S_DAX;
549 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
550 unlock_new_inode(inode);
551 }
552
553 return dax_dev;
554 }
555
dax_add_host(struct dax_device * dax_dev,const char * host)556 static void dax_add_host(struct dax_device *dax_dev, const char *host)
557 {
558 int hash;
559
560 /*
561 * Unconditionally init dax_dev since it's coming from a
562 * non-zeroed slab cache
563 */
564 INIT_HLIST_NODE(&dax_dev->list);
565 dax_dev->host = host;
566 if (!host)
567 return;
568
569 hash = dax_host_hash(host);
570 spin_lock(&dax_host_lock);
571 hlist_add_head(&dax_dev->list, &dax_host_list[hash]);
572 spin_unlock(&dax_host_lock);
573 }
574
alloc_dax(void * private,const char * __host,const struct dax_operations * ops,unsigned long flags)575 struct dax_device *alloc_dax(void *private, const char *__host,
576 const struct dax_operations *ops, unsigned long flags)
577 {
578 struct dax_device *dax_dev;
579 const char *host;
580 dev_t devt;
581 int minor;
582
583 if (ops && !ops->zero_page_range) {
584 pr_debug("%s: error: device does not provide dax"
585 " operation zero_page_range()\n",
586 __host ? __host : "Unknown");
587 return ERR_PTR(-EINVAL);
588 }
589
590 host = kstrdup(__host, GFP_KERNEL);
591 if (__host && !host)
592 return ERR_PTR(-ENOMEM);
593
594 minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
595 if (minor < 0)
596 goto err_minor;
597
598 devt = MKDEV(MAJOR(dax_devt), minor);
599 dax_dev = dax_dev_get(devt);
600 if (!dax_dev)
601 goto err_dev;
602
603 dax_add_host(dax_dev, host);
604 dax_dev->ops = ops;
605 dax_dev->private = private;
606 if (flags & DAXDEV_F_SYNC)
607 set_dax_synchronous(dax_dev);
608
609 return dax_dev;
610
611 err_dev:
612 ida_simple_remove(&dax_minor_ida, minor);
613 err_minor:
614 kfree(host);
615 return ERR_PTR(-ENOMEM);
616 }
617 EXPORT_SYMBOL_GPL(alloc_dax);
618
put_dax(struct dax_device * dax_dev)619 void put_dax(struct dax_device *dax_dev)
620 {
621 if (!dax_dev)
622 return;
623 iput(&dax_dev->inode);
624 }
625 EXPORT_SYMBOL_GPL(put_dax);
626
627 /**
628 * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
629 * @host: alternate name for the device registered by a dax driver
630 */
dax_get_by_host(const char * host)631 struct dax_device *dax_get_by_host(const char *host)
632 {
633 struct dax_device *dax_dev, *found = NULL;
634 int hash, id;
635
636 if (!host)
637 return NULL;
638
639 hash = dax_host_hash(host);
640
641 id = dax_read_lock();
642 spin_lock(&dax_host_lock);
643 hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
644 if (!dax_alive(dax_dev)
645 || strcmp(host, dax_dev->host) != 0)
646 continue;
647
648 if (igrab(&dax_dev->inode))
649 found = dax_dev;
650 break;
651 }
652 spin_unlock(&dax_host_lock);
653 dax_read_unlock(id);
654
655 return found;
656 }
657 EXPORT_SYMBOL_GPL(dax_get_by_host);
658
659 /**
660 * inode_dax: convert a public inode into its dax_dev
661 * @inode: An inode with i_cdev pointing to a dax_dev
662 *
663 * Note this is not equivalent to to_dax_dev() which is for private
664 * internal use where we know the inode filesystem type == dax_fs_type.
665 */
inode_dax(struct inode * inode)666 struct dax_device *inode_dax(struct inode *inode)
667 {
668 struct cdev *cdev = inode->i_cdev;
669
670 return container_of(cdev, struct dax_device, cdev);
671 }
672 EXPORT_SYMBOL_GPL(inode_dax);
673
dax_inode(struct dax_device * dax_dev)674 struct inode *dax_inode(struct dax_device *dax_dev)
675 {
676 return &dax_dev->inode;
677 }
678 EXPORT_SYMBOL_GPL(dax_inode);
679
dax_get_private(struct dax_device * dax_dev)680 void *dax_get_private(struct dax_device *dax_dev)
681 {
682 if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags))
683 return NULL;
684 return dax_dev->private;
685 }
686 EXPORT_SYMBOL_GPL(dax_get_private);
687
init_once(void * _dax_dev)688 static void init_once(void *_dax_dev)
689 {
690 struct dax_device *dax_dev = _dax_dev;
691 struct inode *inode = &dax_dev->inode;
692
693 memset(dax_dev, 0, sizeof(*dax_dev));
694 inode_init_once(inode);
695 }
696
dax_fs_init(void)697 static int dax_fs_init(void)
698 {
699 int rc;
700
701 dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0,
702 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
703 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
704 init_once);
705 if (!dax_cache)
706 return -ENOMEM;
707
708 dax_mnt = kern_mount(&dax_fs_type);
709 if (IS_ERR(dax_mnt)) {
710 rc = PTR_ERR(dax_mnt);
711 goto err_mount;
712 }
713 dax_superblock = dax_mnt->mnt_sb;
714
715 return 0;
716
717 err_mount:
718 kmem_cache_destroy(dax_cache);
719
720 return rc;
721 }
722
dax_fs_exit(void)723 static void dax_fs_exit(void)
724 {
725 kern_unmount(dax_mnt);
726 rcu_barrier();
727 kmem_cache_destroy(dax_cache);
728 }
729
dax_core_init(void)730 static int __init dax_core_init(void)
731 {
732 int rc;
733
734 rc = dax_fs_init();
735 if (rc)
736 return rc;
737
738 rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax");
739 if (rc)
740 goto err_chrdev;
741
742 rc = dax_bus_init();
743 if (rc)
744 goto err_bus;
745 return 0;
746
747 err_bus:
748 unregister_chrdev_region(dax_devt, MINORMASK+1);
749 err_chrdev:
750 dax_fs_exit();
751 return 0;
752 }
753
dax_core_exit(void)754 static void __exit dax_core_exit(void)
755 {
756 dax_bus_exit();
757 unregister_chrdev_region(dax_devt, MINORMASK+1);
758 ida_destroy(&dax_minor_ida);
759 dax_fs_exit();
760 }
761
762 MODULE_AUTHOR("Intel Corporation");
763 MODULE_LICENSE("GPL v2");
764 subsys_initcall(dax_core_init);
765 module_exit(dax_core_exit);
766