• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Framework for userspace DMA-BUF allocations
4  *
5  * Copyright (C) 2011 Google, Inc.
6  * Copyright (C) 2019 Linaro Ltd.
7  */
8 
9 #include <linux/dma-heap.h>
10 #include <linux/cdev.h>
11 #include <linux/debugfs.h>
12 #include <linux/device.h>
13 #include <linux/dma-buf.h>
14 #include <linux/err.h>
15 #include <linux/xarray.h>
16 #include <linux/list.h>
17 #include <linux/slab.h>
18 #include <linux/uaccess.h>
19 #include <linux/syscalls.h>
20 
21 #include <uapi/linux/dma-heap.h>
22 
23 #define DEVNAME "dma_heap"
24 
25 #define NUM_HEAP_MINORS 128
26 
27 /**
28  * struct dma_heap - represents a dmabuf heap in the system
29  * @name:        used for debugging/device-node name
30  * @ops:        ops struct for this heap
31  * @heap_devt        heap device node
32  * @list        list head connecting to list of heaps
33  * @heap_cdev        heap char device
34  * @heap_dev        heap device struct
35  *
36  * Represents a heap of memory from which buffers can be made.
37  */
38 struct dma_heap {
39     const char *name;
40     const struct dma_heap_ops *ops;
41     void *priv;
42     dev_t heap_devt;
43     struct list_head list;
44     struct cdev heap_cdev;
45     struct kref refcount;
46     struct device *heap_dev;
47 };
48 
49 static LIST_HEAD(heap_list);
50 static DEFINE_MUTEX(heap_list_lock);
51 static dev_t dma_heap_devt;
52 static struct class *dma_heap_class;
53 static DEFINE_XARRAY_ALLOC(dma_heap_minors);
54 
dma_heap_find(const char * name)55 struct dma_heap *dma_heap_find(const char *name)
56 {
57     struct dma_heap *h;
58 
59     mutex_lock(&heap_list_lock);
60     list_for_each_entry(h, &heap_list, list)
61     {
62         if (!strcmp(h->name, name)) {
63             kref_get(&h->refcount);
64             mutex_unlock(&heap_list_lock);
65             return h;
66         }
67     }
68     mutex_unlock(&heap_list_lock);
69     return NULL;
70 }
71 EXPORT_SYMBOL_GPL(dma_heap_find);
72 
dma_heap_buffer_free(struct dma_buf * dmabuf)73 void dma_heap_buffer_free(struct dma_buf *dmabuf)
74 {
75     dma_buf_put(dmabuf);
76 }
77 EXPORT_SYMBOL_GPL(dma_heap_buffer_free);
78 
dma_heap_buffer_alloc(struct dma_heap * heap,size_t len,unsigned int fd_flags,unsigned int heap_flags)79 struct dma_buf *dma_heap_buffer_alloc(struct dma_heap *heap, size_t len, unsigned int fd_flags, unsigned int heap_flags)
80 {
81     if (fd_flags & ~DMA_HEAP_VALID_FD_FLAGS) {
82         return ERR_PTR(-EINVAL);
83     }
84 
85     if (heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS) {
86         return ERR_PTR(-EINVAL);
87     }
88     /*
89      * Allocations from all heaps have to begin
90      * and end on page boundaries.
91      */
92     len = PAGE_ALIGN(len);
93     if (!len) {
94         return ERR_PTR(-EINVAL);
95     }
96 
97     return heap->ops->allocate(heap, len, fd_flags, heap_flags);
98 }
99 EXPORT_SYMBOL_GPL(dma_heap_buffer_alloc);
100 
dma_heap_bufferfd_alloc(struct dma_heap * heap,size_t len,unsigned int fd_flags,unsigned int heap_flags)101 int dma_heap_bufferfd_alloc(struct dma_heap *heap, size_t len, unsigned int fd_flags, unsigned int heap_flags)
102 {
103     struct dma_buf *dmabuf;
104     int fd;
105 
106     dmabuf = dma_heap_buffer_alloc(heap, len, fd_flags, heap_flags);
107     if (IS_ERR(dmabuf)) {
108         return PTR_ERR(dmabuf);
109     }
110 
111     fd = dma_buf_fd(dmabuf, fd_flags);
112     if (fd < 0) {
113         dma_buf_put(dmabuf);
114         /* just return, as put will call release and that will free */
115     }
116     return fd;
117 }
118 EXPORT_SYMBOL_GPL(dma_heap_bufferfd_alloc);
119 
dma_heap_open(struct inode * inode,struct file * file)120 static int dma_heap_open(struct inode *inode, struct file *file)
121 {
122     struct dma_heap *heap;
123 
124     heap = xa_load(&dma_heap_minors, iminor(inode));
125     if (!heap) {
126         pr_err("dma_heap: minor %d unknown.\n", iminor(inode));
127         return -ENODEV;
128     }
129 
130     /* instance data as context */
131     file->private_data = heap;
132     nonseekable_open(inode, file);
133 
134     return 0;
135 }
136 
dma_heap_ioctl_allocate(struct file * file,void * data)137 static long dma_heap_ioctl_allocate(struct file *file, void *data)
138 {
139     struct dma_heap_allocation_data *heap_allocation = data;
140     struct dma_heap *heap = file->private_data;
141     int fd;
142 
143     if (heap_allocation->fd) {
144         return -EINVAL;
145     }
146 
147     fd = dma_heap_bufferfd_alloc(heap, heap_allocation->len, heap_allocation->fd_flags, heap_allocation->heap_flags);
148     if (fd < 0) {
149         return fd;
150     }
151 
152     heap_allocation->fd = fd;
153 
154     return 0;
155 }
156 
157 static unsigned int dma_heap_ioctl_cmds[] = {
158     DMA_HEAP_IOCTL_ALLOC,
159 };
160 
dma_heap_ioctl(struct file * file,unsigned int ucmd,unsigned long arg)161 static long dma_heap_ioctl(struct file *file, unsigned int ucmd, unsigned long arg)
162 {
163     char stack_kdata[128];
164     char *kdata = stack_kdata;
165     unsigned int kcmd;
166     unsigned int in_size, out_size, drv_size, ksize;
167     int nr = _IOC_NR(ucmd);
168     int ret = 0;
169 
170     if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds)) {
171         return -EINVAL;
172     }
173 
174     /* Get the kernel ioctl cmd that matches */
175     kcmd = dma_heap_ioctl_cmds[nr];
176 
177     /* Figure out the delta between user cmd size and kernel cmd size */
178     drv_size = _IOC_SIZE(kcmd);
179     out_size = _IOC_SIZE(ucmd);
180     in_size = out_size;
181     if ((ucmd & kcmd & IOC_IN) == 0) {
182         in_size = 0;
183     }
184     if ((ucmd & kcmd & IOC_OUT) == 0) {
185         out_size = 0;
186     }
187     ksize = max(max(in_size, out_size), drv_size);
188     /* If necessary, allocate buffer for ioctl argument */
189     if (ksize > sizeof(stack_kdata)) {
190         kdata = kmalloc(ksize, GFP_KERNEL);
191         if (!kdata) {
192             return -ENOMEM;
193         }
194     }
195 
196     if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) {
197         ret = -EFAULT;
198         goto err;
199     }
200 
201     /* zero out any difference between the kernel/user structure size */
202     if (ksize > in_size) {
203         memset(kdata + in_size, 0, ksize - in_size);
204     }
205 
206     switch (kcmd) {
207         case DMA_HEAP_IOCTL_ALLOC:
208             ret = dma_heap_ioctl_allocate(file, kdata);
209             break;
210         default:
211             ret = -ENOTTY;
212             goto err;
213     }
214 
215     if (copy_to_user((void __user *)arg, kdata, out_size) != 0) {
216         ret = -EFAULT;
217     }
218 err:
219     if (kdata != stack_kdata) {
220         kfree(kdata);
221     }
222     return ret;
223 }
224 
225 static const struct file_operations dma_heap_fops = {
226     .owner = THIS_MODULE,
227     .open = dma_heap_open,
228     .unlocked_ioctl = dma_heap_ioctl,
229 #ifdef CONFIG_COMPAT
230     .compat_ioctl = dma_heap_ioctl,
231 #endif
232 };
233 
234 /**
235  * dma_heap_get_drvdata() - get per-subdriver data for the heap
236  * @heap: DMA-Heap to retrieve private data for
237  *
238  * Returns:
239  * The per-subdriver data for the heap.
240  */
dma_heap_get_drvdata(struct dma_heap * heap)241 void *dma_heap_get_drvdata(struct dma_heap *heap)
242 {
243     return heap->priv;
244 }
245 EXPORT_SYMBOL_GPL(dma_heap_get_drvdata);
246 
dma_heap_release(struct kref * ref)247 static void dma_heap_release(struct kref *ref)
248 {
249     struct dma_heap *heap = container_of(ref, struct dma_heap, refcount);
250     int minor = MINOR(heap->heap_devt);
251 
252     /* Note, we already holding the heap_list_lock here */
253     list_del(&heap->list);
254 
255     device_destroy(dma_heap_class, heap->heap_devt);
256     cdev_del(&heap->heap_cdev);
257     xa_erase(&dma_heap_minors, minor);
258 
259     kfree(heap);
260 }
261 
dma_heap_put(struct dma_heap * h)262 void dma_heap_put(struct dma_heap *h)
263 {
264     /*
265      * Take the heap_list_lock now to avoid racing with code
266      * scanning the list and then taking a kref.
267      */
268     mutex_lock(&heap_list_lock);
269     kref_put(&h->refcount, dma_heap_release);
270     mutex_unlock(&heap_list_lock);
271 }
272 EXPORT_SYMBOL_GPL(dma_heap_put);
273 
274 /**
275  * dma_heap_get_dev() - get device struct for the heap
276  * @heap: DMA-Heap to retrieve device struct from
277  *
278  * Returns:
279  * The device struct for the heap.
280  */
dma_heap_get_dev(struct dma_heap * heap)281 struct device *dma_heap_get_dev(struct dma_heap *heap)
282 {
283     return heap->heap_dev;
284 }
285 EXPORT_SYMBOL_GPL(dma_heap_get_dev);
286 
287 /**
288  * dma_heap_get_name() - get heap name
289  * @heap: DMA-Heap to retrieve private data for
290  *
291  * Returns:
292  * The char* for the heap name.
293  */
dma_heap_get_name(struct dma_heap * heap)294 const char *dma_heap_get_name(struct dma_heap *heap)
295 {
296     return heap->name;
297 }
298 EXPORT_SYMBOL_GPL(dma_heap_get_name);
299 
dma_heap_add(const struct dma_heap_export_info * exp_info)300 struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
301 {
302     struct dma_heap *heap, *err_ret;
303     unsigned int minor;
304     int ret;
305 
306     if (!exp_info->name || !strcmp(exp_info->name, "")) {
307         pr_err("dma_heap: Cannot add heap without a name\n");
308         return ERR_PTR(-EINVAL);
309     }
310 
311     if (!exp_info->ops || !exp_info->ops->allocate) {
312         pr_err("dma_heap: Cannot add heap with invalid ops struct\n");
313         return ERR_PTR(-EINVAL);
314     }
315 
316     /* check the name is unique */
317     heap = dma_heap_find(exp_info->name);
318     if (heap) {
319         pr_err("dma_heap: Already registered heap named %s\n", exp_info->name);
320         dma_heap_put(heap);
321         return ERR_PTR(-EINVAL);
322     }
323 
324     heap = kzalloc(sizeof(*heap), GFP_KERNEL);
325     if (!heap) {
326         return ERR_PTR(-ENOMEM);
327     }
328 
329     kref_init(&heap->refcount);
330     heap->name = exp_info->name;
331     heap->ops = exp_info->ops;
332     heap->priv = exp_info->priv;
333 
334     /* Find unused minor number */
335     ret = xa_alloc(&dma_heap_minors, &minor, heap, XA_LIMIT(0, NUM_HEAP_MINORS - 1), GFP_KERNEL);
336     if (ret < 0) {
337         pr_err("dma_heap: Unable to get minor number for heap\n");
338         err_ret = ERR_PTR(ret);
339         goto err0;
340     }
341 
342     /* Create device */
343     heap->heap_devt = MKDEV(MAJOR(dma_heap_devt), minor);
344 
345     cdev_init(&heap->heap_cdev, &dma_heap_fops);
346     ret = cdev_add(&heap->heap_cdev, heap->heap_devt, 1);
347     if (ret < 0) {
348         pr_err("dma_heap: Unable to add char device\n");
349         err_ret = ERR_PTR(ret);
350         goto err1;
351     }
352 
353     heap->heap_dev = device_create(dma_heap_class, NULL, heap->heap_devt, NULL, heap->name);
354     if (IS_ERR(heap->heap_dev)) {
355         pr_err("dma_heap: Unable to create device\n");
356         err_ret = ERR_CAST(heap->heap_dev);
357         goto err2;
358     }
359 
360     /* Make sure it doesn't disappear on us */
361     heap->heap_dev = get_device(heap->heap_dev);
362 
363     /* Add heap to the list */
364     mutex_lock(&heap_list_lock);
365     list_add(&heap->list, &heap_list);
366     mutex_unlock(&heap_list_lock);
367 
368     return heap;
369 
370 err2:
371     cdev_del(&heap->heap_cdev);
372 err1:
373     xa_erase(&dma_heap_minors, minor);
374 err0:
375     kfree(heap);
376     return err_ret;
377 }
378 EXPORT_SYMBOL_GPL(dma_heap_add);
379 
dma_heap_devnode(struct device * dev,umode_t * mode)380 static char *dma_heap_devnode(struct device *dev, umode_t *mode)
381 {
382     return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev));
383 }
384 
total_pools_kb_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)385 static ssize_t total_pools_kb_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
386 {
387     struct dma_heap *heap;
388     u64 total_pool_size = 0;
389 
390     mutex_lock(&heap_list_lock);
391     list_for_each_entry(heap, &heap_list, list)
392     {
393         if (heap->ops->get_pool_size) {
394             total_pool_size += heap->ops->get_pool_size(heap);
395         }
396     }
397     mutex_unlock(&heap_list_lock);
398 
399     return sysfs_emit(buf, "%llu\n", total_pool_size / 0x400);
400 }
401 
402 static struct kobj_attribute total_pools_kb_attr = __ATTR_RO(total_pools_kb);
403 
404 static struct attribute *dma_heap_sysfs_attrs[] = {
405     &total_pools_kb_attr.attr,
406     NULL,
407 };
408 
409 ATTRIBUTE_GROUPS(dma_heap_sysfs);
410 
411 static struct kobject *dma_heap_kobject;
412 
dma_heap_sysfs_setup(void)413 static int dma_heap_sysfs_setup(void)
414 {
415     int ret;
416 
417     dma_heap_kobject = kobject_create_and_add("dma_heap", kernel_kobj);
418     if (!dma_heap_kobject) {
419         return -ENOMEM;
420     }
421 
422     ret = sysfs_create_groups(dma_heap_kobject, dma_heap_sysfs_groups);
423     if (ret) {
424         kobject_put(dma_heap_kobject);
425         return ret;
426     }
427 
428     return 0;
429 }
430 
dma_heap_sysfs_teardown(void)431 static void dma_heap_sysfs_teardown(void)
432 {
433     kobject_put(dma_heap_kobject);
434 }
435 
dma_heap_init(void)436 static int dma_heap_init(void)
437 {
438     int ret;
439 
440     ret = dma_heap_sysfs_setup();
441     if (ret) {
442         return ret;
443     }
444 
445     ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME);
446     if (ret) {
447         goto err_chrdev;
448     }
449 
450     dma_heap_class = class_create(THIS_MODULE, DEVNAME);
451     if (IS_ERR(dma_heap_class)) {
452         ret = PTR_ERR(dma_heap_class);
453         goto err_class;
454     }
455     dma_heap_class->devnode = dma_heap_devnode;
456 
457     return 0;
458 
459 err_class:
460     unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS);
461 err_chrdev:
462     dma_heap_sysfs_teardown();
463     return ret;
464 }
465 subsys_initcall(dma_heap_init);
466