1 /*
2 * Copyright(c) 2016 - 2017 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13 #include <linux/pagemap.h>
14 #include <linux/module.h>
15 #include <linux/device.h>
16 #include <linux/pfn_t.h>
17 #include <linux/cdev.h>
18 #include <linux/slab.h>
19 #include <linux/dax.h>
20 #include <linux/fs.h>
21 #include <linux/mm.h>
22 #include <linux/mman.h>
23 #include "dax-private.h"
24 #include "dax.h"
25
26 static struct class *dax_class;
27
28 /*
29 * Rely on the fact that drvdata is set before the attributes are
30 * registered, and that the attributes are unregistered before drvdata
31 * is cleared to assume that drvdata is always valid.
32 */
id_show(struct device * dev,struct device_attribute * attr,char * buf)33 static ssize_t id_show(struct device *dev,
34 struct device_attribute *attr, char *buf)
35 {
36 struct dax_region *dax_region = dev_get_drvdata(dev);
37
38 return sprintf(buf, "%d\n", dax_region->id);
39 }
40 static DEVICE_ATTR_RO(id);
41
region_size_show(struct device * dev,struct device_attribute * attr,char * buf)42 static ssize_t region_size_show(struct device *dev,
43 struct device_attribute *attr, char *buf)
44 {
45 struct dax_region *dax_region = dev_get_drvdata(dev);
46
47 return sprintf(buf, "%llu\n", (unsigned long long)
48 resource_size(&dax_region->res));
49 }
50 static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
51 region_size_show, NULL);
52
align_show(struct device * dev,struct device_attribute * attr,char * buf)53 static ssize_t align_show(struct device *dev,
54 struct device_attribute *attr, char *buf)
55 {
56 struct dax_region *dax_region = dev_get_drvdata(dev);
57
58 return sprintf(buf, "%u\n", dax_region->align);
59 }
60 static DEVICE_ATTR_RO(align);
61
62 static struct attribute *dax_region_attributes[] = {
63 &dev_attr_region_size.attr,
64 &dev_attr_align.attr,
65 &dev_attr_id.attr,
66 NULL,
67 };
68
69 static const struct attribute_group dax_region_attribute_group = {
70 .name = "dax_region",
71 .attrs = dax_region_attributes,
72 };
73
74 static const struct attribute_group *dax_region_attribute_groups[] = {
75 &dax_region_attribute_group,
76 NULL,
77 };
78
dax_region_free(struct kref * kref)79 static void dax_region_free(struct kref *kref)
80 {
81 struct dax_region *dax_region;
82
83 dax_region = container_of(kref, struct dax_region, kref);
84 kfree(dax_region);
85 }
86
dax_region_put(struct dax_region * dax_region)87 void dax_region_put(struct dax_region *dax_region)
88 {
89 kref_put(&dax_region->kref, dax_region_free);
90 }
91 EXPORT_SYMBOL_GPL(dax_region_put);
92
dax_region_unregister(void * region)93 static void dax_region_unregister(void *region)
94 {
95 struct dax_region *dax_region = region;
96
97 sysfs_remove_groups(&dax_region->dev->kobj,
98 dax_region_attribute_groups);
99 dax_region_put(dax_region);
100 }
101
alloc_dax_region(struct device * parent,int region_id,struct resource * res,unsigned int align,void * addr,unsigned long pfn_flags)102 struct dax_region *alloc_dax_region(struct device *parent, int region_id,
103 struct resource *res, unsigned int align, void *addr,
104 unsigned long pfn_flags)
105 {
106 struct dax_region *dax_region;
107
108 /*
109 * The DAX core assumes that it can store its private data in
110 * parent->driver_data. This WARN is a reminder / safeguard for
111 * developers of device-dax drivers.
112 */
113 if (dev_get_drvdata(parent)) {
114 dev_WARN(parent, "dax core failed to setup private data\n");
115 return NULL;
116 }
117
118 if (!IS_ALIGNED(res->start, align)
119 || !IS_ALIGNED(resource_size(res), align))
120 return NULL;
121
122 dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
123 if (!dax_region)
124 return NULL;
125
126 dev_set_drvdata(parent, dax_region);
127 memcpy(&dax_region->res, res, sizeof(*res));
128 dax_region->pfn_flags = pfn_flags;
129 kref_init(&dax_region->kref);
130 dax_region->id = region_id;
131 ida_init(&dax_region->ida);
132 dax_region->align = align;
133 dax_region->dev = parent;
134 dax_region->base = addr;
135 if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) {
136 kfree(dax_region);
137 return NULL;
138 }
139
140 kref_get(&dax_region->kref);
141 if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region))
142 return NULL;
143 return dax_region;
144 }
145 EXPORT_SYMBOL_GPL(alloc_dax_region);
146
to_dev_dax(struct device * dev)147 static struct dev_dax *to_dev_dax(struct device *dev)
148 {
149 return container_of(dev, struct dev_dax, dev);
150 }
151
size_show(struct device * dev,struct device_attribute * attr,char * buf)152 static ssize_t size_show(struct device *dev,
153 struct device_attribute *attr, char *buf)
154 {
155 struct dev_dax *dev_dax = to_dev_dax(dev);
156 unsigned long long size = 0;
157 int i;
158
159 for (i = 0; i < dev_dax->num_resources; i++)
160 size += resource_size(&dev_dax->res[i]);
161
162 return sprintf(buf, "%llu\n", size);
163 }
164 static DEVICE_ATTR_RO(size);
165
166 static struct attribute *dev_dax_attributes[] = {
167 &dev_attr_size.attr,
168 NULL,
169 };
170
171 static const struct attribute_group dev_dax_attribute_group = {
172 .attrs = dev_dax_attributes,
173 };
174
175 static const struct attribute_group *dax_attribute_groups[] = {
176 &dev_dax_attribute_group,
177 NULL,
178 };
179
check_vma(struct dev_dax * dev_dax,struct vm_area_struct * vma,const char * func)180 static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
181 const char *func)
182 {
183 struct dax_region *dax_region = dev_dax->region;
184 struct device *dev = &dev_dax->dev;
185 unsigned long mask;
186
187 if (!dax_alive(dev_dax->dax_dev))
188 return -ENXIO;
189
190 /* prevent private mappings from being established */
191 if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
192 dev_info_ratelimited(dev,
193 "%s: %s: fail, attempted private mapping\n",
194 current->comm, func);
195 return -EINVAL;
196 }
197
198 mask = dax_region->align - 1;
199 if (vma->vm_start & mask || vma->vm_end & mask) {
200 dev_info_ratelimited(dev,
201 "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
202 current->comm, func, vma->vm_start, vma->vm_end,
203 mask);
204 return -EINVAL;
205 }
206
207 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
208 && (vma->vm_flags & VM_DONTCOPY) == 0) {
209 dev_info_ratelimited(dev,
210 "%s: %s: fail, dax range requires MADV_DONTFORK\n",
211 current->comm, func);
212 return -EINVAL;
213 }
214
215 if (!vma_is_dax(vma)) {
216 dev_info_ratelimited(dev,
217 "%s: %s: fail, vma is not DAX capable\n",
218 current->comm, func);
219 return -EINVAL;
220 }
221
222 return 0;
223 }
224
225 /* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */
dax_pgoff_to_phys(struct dev_dax * dev_dax,pgoff_t pgoff,unsigned long size)226 __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
227 unsigned long size)
228 {
229 struct resource *res;
230 /* gcc-4.6.3-nolibc for i386 complains that this is uninitialized */
231 phys_addr_t uninitialized_var(phys);
232 int i;
233
234 for (i = 0; i < dev_dax->num_resources; i++) {
235 res = &dev_dax->res[i];
236 phys = pgoff * PAGE_SIZE + res->start;
237 if (phys >= res->start && phys <= res->end)
238 break;
239 pgoff -= PHYS_PFN(resource_size(res));
240 }
241
242 if (i < dev_dax->num_resources) {
243 res = &dev_dax->res[i];
244 if (phys + size - 1 <= res->end)
245 return phys;
246 }
247
248 return -1;
249 }
250
__dev_dax_pte_fault(struct dev_dax * dev_dax,struct vm_fault * vmf,pfn_t * pfn)251 static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
252 struct vm_fault *vmf, pfn_t *pfn)
253 {
254 struct device *dev = &dev_dax->dev;
255 struct dax_region *dax_region;
256 phys_addr_t phys;
257 unsigned int fault_size = PAGE_SIZE;
258
259 if (check_vma(dev_dax, vmf->vma, __func__))
260 return VM_FAULT_SIGBUS;
261
262 dax_region = dev_dax->region;
263 if (dax_region->align > PAGE_SIZE) {
264 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
265 dax_region->align, fault_size);
266 return VM_FAULT_SIGBUS;
267 }
268
269 if (fault_size != dax_region->align)
270 return VM_FAULT_SIGBUS;
271
272 phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE);
273 if (phys == -1) {
274 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff);
275 return VM_FAULT_SIGBUS;
276 }
277
278 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
279
280 return vmf_insert_mixed(vmf->vma, vmf->address, *pfn);
281 }
282
__dev_dax_pmd_fault(struct dev_dax * dev_dax,struct vm_fault * vmf,pfn_t * pfn)283 static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
284 struct vm_fault *vmf, pfn_t *pfn)
285 {
286 unsigned long pmd_addr = vmf->address & PMD_MASK;
287 struct device *dev = &dev_dax->dev;
288 struct dax_region *dax_region;
289 phys_addr_t phys;
290 pgoff_t pgoff;
291 unsigned int fault_size = PMD_SIZE;
292
293 if (check_vma(dev_dax, vmf->vma, __func__))
294 return VM_FAULT_SIGBUS;
295
296 dax_region = dev_dax->region;
297 if (dax_region->align > PMD_SIZE) {
298 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
299 dax_region->align, fault_size);
300 return VM_FAULT_SIGBUS;
301 }
302
303 /* dax pmd mappings require pfn_t_devmap() */
304 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
305 dev_dbg(dev, "region lacks devmap flags\n");
306 return VM_FAULT_SIGBUS;
307 }
308
309 if (fault_size < dax_region->align)
310 return VM_FAULT_SIGBUS;
311 else if (fault_size > dax_region->align)
312 return VM_FAULT_FALLBACK;
313
314 /* if we are outside of the VMA */
315 if (pmd_addr < vmf->vma->vm_start ||
316 (pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
317 return VM_FAULT_SIGBUS;
318
319 pgoff = linear_page_index(vmf->vma, pmd_addr);
320 phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE);
321 if (phys == -1) {
322 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff);
323 return VM_FAULT_SIGBUS;
324 }
325
326 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
327
328 return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
329 }
330
331 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
__dev_dax_pud_fault(struct dev_dax * dev_dax,struct vm_fault * vmf,pfn_t * pfn)332 static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
333 struct vm_fault *vmf, pfn_t *pfn)
334 {
335 unsigned long pud_addr = vmf->address & PUD_MASK;
336 struct device *dev = &dev_dax->dev;
337 struct dax_region *dax_region;
338 phys_addr_t phys;
339 pgoff_t pgoff;
340 unsigned int fault_size = PUD_SIZE;
341
342
343 if (check_vma(dev_dax, vmf->vma, __func__))
344 return VM_FAULT_SIGBUS;
345
346 dax_region = dev_dax->region;
347 if (dax_region->align > PUD_SIZE) {
348 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
349 dax_region->align, fault_size);
350 return VM_FAULT_SIGBUS;
351 }
352
353 /* dax pud mappings require pfn_t_devmap() */
354 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
355 dev_dbg(dev, "region lacks devmap flags\n");
356 return VM_FAULT_SIGBUS;
357 }
358
359 if (fault_size < dax_region->align)
360 return VM_FAULT_SIGBUS;
361 else if (fault_size > dax_region->align)
362 return VM_FAULT_FALLBACK;
363
364 /* if we are outside of the VMA */
365 if (pud_addr < vmf->vma->vm_start ||
366 (pud_addr + PUD_SIZE) > vmf->vma->vm_end)
367 return VM_FAULT_SIGBUS;
368
369 pgoff = linear_page_index(vmf->vma, pud_addr);
370 phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE);
371 if (phys == -1) {
372 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff);
373 return VM_FAULT_SIGBUS;
374 }
375
376 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
377
378 return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
379 }
380 #else
__dev_dax_pud_fault(struct dev_dax * dev_dax,struct vm_fault * vmf,pfn_t * pfn)381 static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
382 struct vm_fault *vmf, pfn_t *pfn)
383 {
384 return VM_FAULT_FALLBACK;
385 }
386 #endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
387
dev_dax_huge_fault(struct vm_fault * vmf,enum page_entry_size pe_size)388 static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
389 enum page_entry_size pe_size)
390 {
391 struct file *filp = vmf->vma->vm_file;
392 unsigned long fault_size;
393 vm_fault_t rc = VM_FAULT_SIGBUS;
394 int id;
395 pfn_t pfn;
396 struct dev_dax *dev_dax = filp->private_data;
397
398 dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm,
399 (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read",
400 vmf->vma->vm_start, vmf->vma->vm_end, pe_size);
401
402 id = dax_read_lock();
403 switch (pe_size) {
404 case PE_SIZE_PTE:
405 fault_size = PAGE_SIZE;
406 rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn);
407 break;
408 case PE_SIZE_PMD:
409 fault_size = PMD_SIZE;
410 rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn);
411 break;
412 case PE_SIZE_PUD:
413 fault_size = PUD_SIZE;
414 rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn);
415 break;
416 default:
417 rc = VM_FAULT_SIGBUS;
418 }
419
420 if (rc == VM_FAULT_NOPAGE) {
421 unsigned long i;
422 pgoff_t pgoff;
423
424 /*
425 * In the device-dax case the only possibility for a
426 * VM_FAULT_NOPAGE result is when device-dax capacity is
427 * mapped. No need to consider the zero page, or racing
428 * conflicting mappings.
429 */
430 pgoff = linear_page_index(vmf->vma, vmf->address
431 & ~(fault_size - 1));
432 for (i = 0; i < fault_size / PAGE_SIZE; i++) {
433 struct page *page;
434
435 page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
436 if (page->mapping)
437 continue;
438 page->mapping = filp->f_mapping;
439 page->index = pgoff + i;
440 }
441 }
442 dax_read_unlock(id);
443
444 return rc;
445 }
446
dev_dax_fault(struct vm_fault * vmf)447 static vm_fault_t dev_dax_fault(struct vm_fault *vmf)
448 {
449 return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
450 }
451
dev_dax_split(struct vm_area_struct * vma,unsigned long addr)452 static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr)
453 {
454 struct file *filp = vma->vm_file;
455 struct dev_dax *dev_dax = filp->private_data;
456 struct dax_region *dax_region = dev_dax->region;
457
458 if (!IS_ALIGNED(addr, dax_region->align))
459 return -EINVAL;
460 return 0;
461 }
462
dev_dax_pagesize(struct vm_area_struct * vma)463 static unsigned long dev_dax_pagesize(struct vm_area_struct *vma)
464 {
465 struct file *filp = vma->vm_file;
466 struct dev_dax *dev_dax = filp->private_data;
467 struct dax_region *dax_region = dev_dax->region;
468
469 return dax_region->align;
470 }
471
472 static const struct vm_operations_struct dax_vm_ops = {
473 .fault = dev_dax_fault,
474 .huge_fault = dev_dax_huge_fault,
475 .split = dev_dax_split,
476 .pagesize = dev_dax_pagesize,
477 };
478
dax_mmap(struct file * filp,struct vm_area_struct * vma)479 static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
480 {
481 struct dev_dax *dev_dax = filp->private_data;
482 int rc, id;
483
484 dev_dbg(&dev_dax->dev, "trace\n");
485
486 /*
487 * We lock to check dax_dev liveness and will re-check at
488 * fault time.
489 */
490 id = dax_read_lock();
491 rc = check_vma(dev_dax, vma, __func__);
492 dax_read_unlock(id);
493 if (rc)
494 return rc;
495
496 vma->vm_ops = &dax_vm_ops;
497 vma->vm_flags |= VM_HUGEPAGE;
498 return 0;
499 }
500
501 /* return an unmapped area aligned to the dax region specified alignment */
dax_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)502 static unsigned long dax_get_unmapped_area(struct file *filp,
503 unsigned long addr, unsigned long len, unsigned long pgoff,
504 unsigned long flags)
505 {
506 unsigned long off, off_end, off_align, len_align, addr_align, align;
507 struct dev_dax *dev_dax = filp ? filp->private_data : NULL;
508 struct dax_region *dax_region;
509
510 if (!dev_dax || addr)
511 goto out;
512
513 dax_region = dev_dax->region;
514 align = dax_region->align;
515 off = pgoff << PAGE_SHIFT;
516 off_end = off + len;
517 off_align = round_up(off, align);
518
519 if ((off_end <= off_align) || ((off_end - off_align) < align))
520 goto out;
521
522 len_align = len + align;
523 if ((off + len_align) < off)
524 goto out;
525
526 addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
527 pgoff, flags);
528 if (!IS_ERR_VALUE(addr_align)) {
529 addr_align += (off - addr_align) & (align - 1);
530 return addr_align;
531 }
532 out:
533 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
534 }
535
536 static const struct address_space_operations dev_dax_aops = {
537 .set_page_dirty = noop_set_page_dirty,
538 .invalidatepage = noop_invalidatepage,
539 };
540
dax_open(struct inode * inode,struct file * filp)541 static int dax_open(struct inode *inode, struct file *filp)
542 {
543 struct dax_device *dax_dev = inode_dax(inode);
544 struct inode *__dax_inode = dax_inode(dax_dev);
545 struct dev_dax *dev_dax = dax_get_private(dax_dev);
546
547 dev_dbg(&dev_dax->dev, "trace\n");
548 inode->i_mapping = __dax_inode->i_mapping;
549 inode->i_mapping->host = __dax_inode;
550 inode->i_mapping->a_ops = &dev_dax_aops;
551 filp->f_mapping = inode->i_mapping;
552 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
553 filp->private_data = dev_dax;
554 inode->i_flags = S_DAX;
555
556 return 0;
557 }
558
dax_release(struct inode * inode,struct file * filp)559 static int dax_release(struct inode *inode, struct file *filp)
560 {
561 struct dev_dax *dev_dax = filp->private_data;
562
563 dev_dbg(&dev_dax->dev, "trace\n");
564 return 0;
565 }
566
567 static const struct file_operations dax_fops = {
568 .llseek = noop_llseek,
569 .owner = THIS_MODULE,
570 .open = dax_open,
571 .release = dax_release,
572 .get_unmapped_area = dax_get_unmapped_area,
573 .mmap = dax_mmap,
574 .mmap_supported_flags = MAP_SYNC,
575 };
576
dev_dax_release(struct device * dev)577 static void dev_dax_release(struct device *dev)
578 {
579 struct dev_dax *dev_dax = to_dev_dax(dev);
580 struct dax_region *dax_region = dev_dax->region;
581 struct dax_device *dax_dev = dev_dax->dax_dev;
582
583 if (dev_dax->id >= 0)
584 ida_simple_remove(&dax_region->ida, dev_dax->id);
585 dax_region_put(dax_region);
586 put_dax(dax_dev);
587 kfree(dev_dax);
588 }
589
kill_dev_dax(struct dev_dax * dev_dax)590 static void kill_dev_dax(struct dev_dax *dev_dax)
591 {
592 struct dax_device *dax_dev = dev_dax->dax_dev;
593 struct inode *inode = dax_inode(dax_dev);
594
595 kill_dax(dax_dev);
596 unmap_mapping_range(inode->i_mapping, 0, 0, 1);
597 }
598
unregister_dev_dax(void * dev)599 static void unregister_dev_dax(void *dev)
600 {
601 struct dev_dax *dev_dax = to_dev_dax(dev);
602 struct dax_device *dax_dev = dev_dax->dax_dev;
603 struct inode *inode = dax_inode(dax_dev);
604 struct cdev *cdev = inode->i_cdev;
605
606 dev_dbg(dev, "trace\n");
607
608 kill_dev_dax(dev_dax);
609 cdev_device_del(cdev, dev);
610 put_device(dev);
611 }
612
devm_create_dev_dax(struct dax_region * dax_region,int id,struct resource * res,int count)613 struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
614 int id, struct resource *res, int count)
615 {
616 struct device *parent = dax_region->dev;
617 struct dax_device *dax_dev;
618 struct dev_dax *dev_dax;
619 struct inode *inode;
620 struct device *dev;
621 struct cdev *cdev;
622 int rc, i;
623
624 if (!count)
625 return ERR_PTR(-EINVAL);
626
627 dev_dax = kzalloc(struct_size(dev_dax, res, count), GFP_KERNEL);
628 if (!dev_dax)
629 return ERR_PTR(-ENOMEM);
630
631 for (i = 0; i < count; i++) {
632 if (!IS_ALIGNED(res[i].start, dax_region->align)
633 || !IS_ALIGNED(resource_size(&res[i]),
634 dax_region->align)) {
635 rc = -EINVAL;
636 break;
637 }
638 dev_dax->res[i].start = res[i].start;
639 dev_dax->res[i].end = res[i].end;
640 }
641
642 if (i < count)
643 goto err_id;
644
645 if (id < 0) {
646 id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
647 dev_dax->id = id;
648 if (id < 0) {
649 rc = id;
650 goto err_id;
651 }
652 } else {
653 /* region provider owns @id lifetime */
654 dev_dax->id = -1;
655 }
656
657 /*
658 * No 'host' or dax_operations since there is no access to this
659 * device outside of mmap of the resulting character device.
660 */
661 dax_dev = alloc_dax(dev_dax, NULL, NULL);
662 if (!dax_dev) {
663 rc = -ENOMEM;
664 goto err_dax;
665 }
666
667 /* from here on we're committed to teardown via dax_dev_release() */
668 dev = &dev_dax->dev;
669 device_initialize(dev);
670
671 inode = dax_inode(dax_dev);
672 cdev = inode->i_cdev;
673 cdev_init(cdev, &dax_fops);
674 cdev->owner = parent->driver->owner;
675
676 dev_dax->num_resources = count;
677 dev_dax->dax_dev = dax_dev;
678 dev_dax->region = dax_region;
679 kref_get(&dax_region->kref);
680
681 dev->devt = inode->i_rdev;
682 dev->class = dax_class;
683 dev->parent = parent;
684 dev->groups = dax_attribute_groups;
685 dev->release = dev_dax_release;
686 dev_set_name(dev, "dax%d.%d", dax_region->id, id);
687
688 rc = cdev_device_add(cdev, dev);
689 if (rc) {
690 kill_dev_dax(dev_dax);
691 put_device(dev);
692 return ERR_PTR(rc);
693 }
694
695 rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev);
696 if (rc)
697 return ERR_PTR(rc);
698
699 return dev_dax;
700
701 err_dax:
702 if (dev_dax->id >= 0)
703 ida_simple_remove(&dax_region->ida, dev_dax->id);
704 err_id:
705 kfree(dev_dax);
706
707 return ERR_PTR(rc);
708 }
709 EXPORT_SYMBOL_GPL(devm_create_dev_dax);
710
dax_init(void)711 static int __init dax_init(void)
712 {
713 dax_class = class_create(THIS_MODULE, "dax");
714 return PTR_ERR_OR_ZERO(dax_class);
715 }
716
dax_exit(void)717 static void __exit dax_exit(void)
718 {
719 class_destroy(dax_class);
720 }
721
722 MODULE_AUTHOR("Intel Corporation");
723 MODULE_LICENSE("GPL v2");
724 subsys_initcall(dax_init);
725 module_exit(dax_exit);
726