• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1From c31b4def60d6d4b0669aea357113f7eb87c84e67 Mon Sep 17 00:00:00 2001
2From: ygopenhm <ygopenhm@vyagoo.com>
3Date: Wed, 6 Jul 2022 11:13:34 +0800
4Subject: [PATCH] kernel
5
6Signed-off-by: ygopenhm <ygopenhm@vyagoo.com>
7---
8 Kconfig                                      |   2 +
9 Makefile                                     |   2 +
10 drivers/dma-buf/Kconfig                      |  24 -
11 drivers/dma-buf/Makefile                     |   3 -
12 drivers/dma-buf/dma-buf.c                    |  53 +-
13 drivers/dma-buf/dma-heap.c                   | 223 +++++--
14 drivers/dma-buf/heaps/Kconfig                |  16 +-
15 drivers/dma-buf/heaps/Makefile               |   3 +-
16 drivers/dma-buf/heaps/cma_heap.c             | 338 +++++++++--
17 drivers/dma-buf/heaps/deferred-free-helper.c | 139 +++++
18 drivers/dma-buf/heaps/deferred-free-helper.h |  57 ++
19 drivers/dma-buf/heaps/heap-helpers.c         |   1 -
20 drivers/dma-buf/heaps/page_pool.c            | 247 ++++++++
21 drivers/dma-buf/heaps/page_pool.h            |  55 ++
22 drivers/dma-buf/heaps/system_heap.c          | 576 +++++++++++++++++--
23 drivers/gpu/drm/drm_gem_framebuffer_helper.c |   7 +-
24 drivers/gpu/drm/drm_vblank.c                 |   2 +-
25 include/linux/dma-buf.h                      |  29 +-
26 include/linux/dma-heap.h                     |  62 +-
27 kernel/sched/core.c                          |   3 +
28 20 files changed, 1580 insertions(+), 262 deletions(-)
29 create mode 100755 drivers/dma-buf/heaps/deferred-free-helper.c
30 create mode 100755 drivers/dma-buf/heaps/deferred-free-helper.h
31 create mode 100755 drivers/dma-buf/heaps/page_pool.c
32 create mode 100755 drivers/dma-buf/heaps/page_pool.h
33
34diff --git a/Kconfig b/Kconfig
35index 745bc773f..e6c33a96d 100644
36--- a/Kconfig
37+++ b/Kconfig
38@@ -7,6 +7,8 @@ mainmenu "Linux/$(ARCH) $(KERNELVERSION) Kernel Configuration"
39
40 source "scripts/Kconfig.include"
41
42+source "bsp/Kconfig"
43+
44 source "init/Kconfig"
45
46 source "kernel/Kconfig.freezer"
47diff --git a/Makefile b/Makefile
48index dce02fbb7..a60ed51b8 100644
49--- a/Makefile
50+++ b/Makefile
51@@ -491,6 +491,7 @@ LINUXINCLUDE    := \
52 		-I$(objtree)/arch/$(SRCARCH)/include/generated \
53 		$(if $(building_out_of_srctree),-I$(srctree)/include) \
54 		-I$(objtree)/include \
55+		-I$(srctree)/bsp/include \
56 		$(USERINCLUDE)
57
58 KBUILD_AFLAGS   := -D__ASSEMBLY__ -fno-PIE
59@@ -650,6 +651,7 @@ ifeq ($(KBUILD_EXTMOD),)
60 # Objects we will link into vmlinux / subdirs we need to visit
61 core-y		:= init/ usr/
62 drivers-y	:= drivers/ sound/
63+drivers-y	+= bsp/
64 drivers-$(CONFIG_SAMPLES) += samples/
65 drivers-y	+= net/ virt/
66 libs-y		:= lib/
67diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
68index 594b77d89..3ca7de37d 100644
69--- a/drivers/dma-buf/Kconfig
70+++ b/drivers/dma-buf/Kconfig
71@@ -56,19 +56,6 @@ config DMABUF_SELFTESTS
72 	default n
73 	depends on DMA_SHARED_BUFFER
74
75-config DMABUF_PROCESS_INFO
76-	bool "Show dmabuf usage of all processes"
77-	default n
78-	depends on DMA_SHARED_BUFFER
79-	depends on PROC_FS || DEBUG_FS
80-	help
81-	  Choose this option to show dmabuf objects usage of all processes.
82-	  Firstly, with this option, when a process creates a dmabuf object,
83-	  its pid and task_comm will be recorded in the dmabuf.
84-	  Secondly, this option creates dma_buf/process_bufinfo file in
85-	  debugfs (if DEBUG_FS enabled) and process_dmabuf_info file in procfs
86-	  (if PROC_FS enabled) to show dmabuf objects usage of all processes.
87-
88 menuconfig DMABUF_HEAPS
89 	bool "DMA-BUF Userland Memory Heaps"
90 	select DMA_SHARED_BUFFER
91@@ -78,17 +65,6 @@ menuconfig DMABUF_HEAPS
92 	  allows userspace to allocate dma-bufs that can be shared
93 	  between drivers.
94
95-menuconfig DMABUF_SYSFS_STATS
96-	bool "DMA-BUF sysfs statistics"
97-	depends on DMA_SHARED_BUFFER
98-	help
99-	   Choose this option to enable DMA-BUF sysfs statistics
100-	   in location /sys/kernel/dmabuf/buffers.
101-
102-	   /sys/kernel/dmabuf/buffers/<inode_number> will contain
103-	   statistics for the DMA-BUF with the unique inode number
104-	   <inode_number>.
105-
106 source "drivers/dma-buf/heaps/Kconfig"
107
108 endmenu
109diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
110index cfbc5e3da..995e05f60 100644
111--- a/drivers/dma-buf/Makefile
112+++ b/drivers/dma-buf/Makefile
113@@ -6,7 +6,6 @@ obj-$(CONFIG_DMABUF_HEAPS)	+= heaps/
114 obj-$(CONFIG_SYNC_FILE)		+= sync_file.o
115 obj-$(CONFIG_SW_SYNC)		+= sw_sync.o sync_debug.o
116 obj-$(CONFIG_UDMABUF)		+= udmabuf.o
117-obj-$(CONFIG_DMABUF_SYSFS_STATS) += dma-buf-sysfs-stats.o
118
119 dmabuf_selftests-y := \
120 	selftest.o \
121@@ -14,5 +13,3 @@ dmabuf_selftests-y := \
122 	st-dma-fence-chain.o
123
124 obj-$(CONFIG_DMABUF_SELFTESTS)	+= dmabuf_selftests.o
125-
126-obj-$(CONFIG_DMABUF_PROCESS_INFO)	+= dma-buf-process-info.o
127diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
128index b6ae42d59..922416b3a 100644
129--- a/drivers/dma-buf/dma-buf.c
130+++ b/drivers/dma-buf/dma-buf.c
131@@ -29,9 +29,6 @@
132 #include <uapi/linux/dma-buf.h>
133 #include <uapi/linux/magic.h>
134
135-#include "dma-buf-sysfs-stats.h"
136-#include "dma-buf-process-info.h"
137-
138 static inline int is_dma_buf_file(struct file *);
139
140 struct dma_buf_list {
141@@ -82,7 +79,6 @@ static void dma_buf_release(struct dentry *dentry)
142 	if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
143 		dma_resv_fini(dmabuf->resv);
144
145-	dma_buf_stats_teardown(dmabuf);
146 	module_put(dmabuf->owner);
147 	kfree(dmabuf->name);
148 	kfree(dmabuf);
149@@ -583,10 +579,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
150 	file->f_mode |= FMODE_LSEEK;
151 	dmabuf->file = file;
152
153-	ret = dma_buf_stats_setup(dmabuf);
154-	if (ret)
155-		goto err_sysfs;
156-
157 	mutex_init(&dmabuf->lock);
158 	INIT_LIST_HEAD(&dmabuf->attachments);
159
160@@ -594,17 +586,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
161 	list_add(&dmabuf->list_node, &db_list.head);
162 	mutex_unlock(&db_list.lock);
163
164-	init_dma_buf_task_info(dmabuf);
165 	return dmabuf;
166
167-err_sysfs:
168-	/*
169-	 * Set file->f_path.dentry->d_fsdata to NULL so that when
170-	 * dma_buf_release() gets invoked by dentry_ops, it exits
171-	 * early before calling the release() dma_buf op.
172-	 */
173-	file->f_path.dentry->d_fsdata = NULL;
174-	fput(file);
175 err_dmabuf:
176 	kfree(dmabuf);
177 err_module:
178@@ -1304,10 +1287,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
179 		return ret;
180
181 	seq_puts(s, "\nDma-buf Objects:\n");
182-	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\t"
183-		   "%-16s\t%-16s\t%-16s\n",
184-		   "size", "flags", "mode", "count", "ino",
185-		   "buf_name", "exp_pid",  "exp_task_comm");
186+	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
187+		   "size", "flags", "mode", "count", "ino");
188
189 	list_for_each_entry(buf_obj, &db_list.head, list_node) {
190
191@@ -1315,16 +1296,13 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
192 		if (ret)
193 			goto error_unlock;
194
195-		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\t"
196-			   "%-16d\t%-16s\n",
197+		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
198 				buf_obj->size,
199 				buf_obj->file->f_flags, buf_obj->file->f_mode,
200 				file_count(buf_obj->file),
201 				buf_obj->exp_name,
202 				file_inode(buf_obj->file)->i_ino,
203-				buf_obj->name ?: "NULL",
204-				dma_buf_exp_pid(buf_obj),
205-				dma_buf_exp_task_comm(buf_obj) ?: "NULL");
206+				buf_obj->name ?: "");
207
208 		robj = buf_obj->resv;
209 		while (true) {
210@@ -1405,7 +1383,6 @@ static int dma_buf_init_debugfs(void)
211 		err = PTR_ERR(d);
212 	}
213
214-	dma_buf_process_info_init_debugfs(dma_buf_debugfs_dir);
215 	return err;
216 }
217
218@@ -1423,27 +1400,8 @@ static inline void dma_buf_uninit_debugfs(void)
219 }
220 #endif
221
222-#ifdef CONFIG_DMABUF_PROCESS_INFO
223-struct dma_buf *get_dma_buf_from_file(struct file *f)
224-{
225-	if (IS_ERR_OR_NULL(f))
226-		return NULL;
227-
228-	if (!is_dma_buf_file(f))
229-		return NULL;
230-
231-	return f->private_data;
232-}
233-#endif /* CONFIG_DMABUF_PROCESS_INFO */
234-
235 static int __init dma_buf_init(void)
236 {
237-	int ret;
238-
239-	ret = dma_buf_init_sysfs_statistics();
240-	if (ret)
241-		return ret;
242-
243 	dma_buf_mnt = kern_mount(&dma_buf_fs_type);
244 	if (IS_ERR(dma_buf_mnt))
245 		return PTR_ERR(dma_buf_mnt);
246@@ -1451,7 +1409,6 @@ static int __init dma_buf_init(void)
247 	mutex_init(&db_list.lock);
248 	INIT_LIST_HEAD(&db_list.head);
249 	dma_buf_init_debugfs();
250-	dma_buf_process_info_init_procfs();
251 	return 0;
252 }
253 subsys_initcall(dma_buf_init);
254@@ -1460,7 +1417,5 @@ static void __exit dma_buf_deinit(void)
255 {
256 	dma_buf_uninit_debugfs();
257 	kern_unmount(dma_buf_mnt);
258-	dma_buf_uninit_sysfs_statistics();
259-	dma_buf_process_info_uninit_procfs();
260 }
261 __exitcall(dma_buf_deinit);
262diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
263index 70e410c64..26d59b48f 100644
264--- a/drivers/dma-buf/dma-heap.c
265+++ b/drivers/dma-buf/dma-heap.c
266@@ -14,6 +14,7 @@
267 #include <linux/xarray.h>
268 #include <linux/list.h>
269 #include <linux/slab.h>
270+#include <linux/nospec.h>
271 #include <linux/uaccess.h>
272 #include <linux/syscalls.h>
273 #include <linux/dma-heap.h>
274@@ -30,6 +31,7 @@
275  * @heap_devt		heap device node
276  * @list		list head connecting to list of heaps
277  * @heap_cdev		heap char device
278+ * @heap_dev		heap device struct
279  *
280  * Represents a heap of memory from which buffers can be made.
281  */
282@@ -40,6 +42,8 @@ struct dma_heap {
283 	dev_t heap_devt;
284 	struct list_head list;
285 	struct cdev heap_cdev;
286+	struct kref refcount;
287+	struct device *heap_dev;
288 };
289
290 static LIST_HEAD(heap_list);
291@@ -48,20 +52,72 @@ static dev_t dma_heap_devt;
292 static struct class *dma_heap_class;
293 static DEFINE_XARRAY_ALLOC(dma_heap_minors);
294
295-static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
296-				 unsigned int fd_flags,
297-				 unsigned int heap_flags)
298+struct dma_heap *dma_heap_find(const char *name)
299 {
300+	struct dma_heap *h;
301+
302+	mutex_lock(&heap_list_lock);
303+	list_for_each_entry(h, &heap_list, list) {
304+		if (!strcmp(h->name, name)) {
305+			kref_get(&h->refcount);
306+			mutex_unlock(&heap_list_lock);
307+			return h;
308+		}
309+	}
310+	mutex_unlock(&heap_list_lock);
311+	return NULL;
312+}
313+EXPORT_SYMBOL_GPL(dma_heap_find);
314+
315+
316+void dma_heap_buffer_free(struct dma_buf *dmabuf)
317+{
318+	dma_buf_put(dmabuf);
319+}
320+EXPORT_SYMBOL_GPL(dma_heap_buffer_free);
321+
322+struct dma_buf *dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
323+				      unsigned int fd_flags,
324+				      unsigned int heap_flags)
325+{
326+	if (fd_flags & ~DMA_HEAP_VALID_FD_FLAGS)
327+		return ERR_PTR(-EINVAL);
328+
329+	if (heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS)
330+		return ERR_PTR(-EINVAL);
331 	/*
332 	 * Allocations from all heaps have to begin
333 	 * and end on page boundaries.
334 	 */
335 	len = PAGE_ALIGN(len);
336 	if (!len)
337-		return -EINVAL;
338+		return ERR_PTR(-EINVAL);
339
340 	return heap->ops->allocate(heap, len, fd_flags, heap_flags);
341 }
342+EXPORT_SYMBOL_GPL(dma_heap_buffer_alloc);
343+
344+int dma_heap_bufferfd_alloc(struct dma_heap *heap, size_t len,
345+			    unsigned int fd_flags,
346+			    unsigned int heap_flags)
347+{
348+	struct dma_buf *dmabuf;
349+	int fd;
350+
351+	dmabuf = dma_heap_buffer_alloc(heap, len, fd_flags, heap_flags);
352+
353+	if (IS_ERR(dmabuf))
354+		return PTR_ERR(dmabuf);
355+
356+	fd = dma_buf_fd(dmabuf, fd_flags);
357+	if (fd < 0) {
358+		dma_buf_put(dmabuf);
359+		/* just return, as put will call release and that will free */
360+	}
361+	return fd;
362+
363+}
364+EXPORT_SYMBOL_GPL(dma_heap_bufferfd_alloc);
365
366 static int dma_heap_open(struct inode *inode, struct file *file)
367 {
368@@ -89,15 +145,9 @@ static long dma_heap_ioctl_allocate(struct file *file, void *data)
369 	if (heap_allocation->fd)
370 		return -EINVAL;
371
372-	if (heap_allocation->fd_flags & ~DMA_HEAP_VALID_FD_FLAGS)
373-		return -EINVAL;
374-
375-	if (heap_allocation->heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS)
376-		return -EINVAL;
377-
378-	fd = dma_heap_buffer_alloc(heap, heap_allocation->len,
379-				   heap_allocation->fd_flags,
380-				   heap_allocation->heap_flags);
381+	fd = dma_heap_bufferfd_alloc(heap, heap_allocation->len,
382+				     heap_allocation->fd_flags,
383+				     heap_allocation->heap_flags);
384 	if (fd < 0)
385 		return fd;
386
387@@ -123,6 +173,7 @@ static long dma_heap_ioctl(struct file *file, unsigned int ucmd,
388 	if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds))
389 		return -EINVAL;
390
391+	nr = array_index_nospec(nr, ARRAY_SIZE(dma_heap_ioctl_cmds));
392 	/* Get the kernel ioctl cmd that matches */
393 	kcmd = dma_heap_ioctl_cmds[nr];
394
395@@ -189,6 +240,47 @@ void *dma_heap_get_drvdata(struct dma_heap *heap)
396 {
397 	return heap->priv;
398 }
399+EXPORT_SYMBOL_GPL(dma_heap_get_drvdata);
400+
401+static void dma_heap_release(struct kref *ref)
402+{
403+	struct dma_heap *heap = container_of(ref, struct dma_heap, refcount);
404+	int minor = MINOR(heap->heap_devt);
405+
406+	/* Note, we already holding the heap_list_lock here */
407+	list_del(&heap->list);
408+
409+	device_destroy(dma_heap_class, heap->heap_devt);
410+	cdev_del(&heap->heap_cdev);
411+	xa_erase(&dma_heap_minors, minor);
412+
413+	kfree(heap);
414+}
415+
416+void dma_heap_put(struct dma_heap *h)
417+{
418+	/*
419+	 * Take the heap_list_lock now to avoid racing with code
420+	 * scanning the list and then taking a kref.
421+	 */
422+	mutex_lock(&heap_list_lock);
423+	kref_put(&h->refcount, dma_heap_release);
424+	mutex_unlock(&heap_list_lock);
425+}
426+EXPORT_SYMBOL_GPL(dma_heap_put);
427+
428+/**
429+ * dma_heap_get_dev() - get device struct for the heap
430+ * @heap: DMA-Heap to retrieve device struct from
431+ *
432+ * Returns:
433+ * The device struct for the heap.
434+ */
435+struct device *dma_heap_get_dev(struct dma_heap *heap)
436+{
437+	return heap->heap_dev;
438+}
439+EXPORT_SYMBOL_GPL(dma_heap_get_dev);
440
441 /**
442  * dma_heap_get_name() - get heap name
443@@ -201,11 +293,11 @@ const char *dma_heap_get_name(struct dma_heap *heap)
444 {
445 	return heap->name;
446 }
447+EXPORT_SYMBOL_GPL(dma_heap_get_name);
448
449 struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
450 {
451-	struct dma_heap *heap, *h, *err_ret;
452-	struct device *dev_ret;
453+	struct dma_heap *heap, *err_ret;
454 	unsigned int minor;
455 	int ret;
456
457@@ -220,21 +312,19 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
458 	}
459
460 	/* check the name is unique */
461-	mutex_lock(&heap_list_lock);
462-	list_for_each_entry(h, &heap_list, list) {
463-		if (!strcmp(h->name, exp_info->name)) {
464-			mutex_unlock(&heap_list_lock);
465-			pr_err("dma_heap: Already registered heap named %s\n",
466-			       exp_info->name);
467-			return ERR_PTR(-EINVAL);
468-		}
469+	heap = dma_heap_find(exp_info->name);
470+	if (heap) {
471+		pr_err("dma_heap: Already registered heap named %s\n",
472+		       exp_info->name);
473+		dma_heap_put(heap);
474+		return ERR_PTR(-EINVAL);
475 	}
476-	mutex_unlock(&heap_list_lock);
477
478 	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
479 	if (!heap)
480 		return ERR_PTR(-ENOMEM);
481
482+	kref_init(&heap->refcount);
483 	heap->name = exp_info->name;
484 	heap->ops = exp_info->ops;
485 	heap->priv = exp_info->priv;
486@@ -259,16 +349,20 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
487 		goto err1;
488 	}
489
490-	dev_ret = device_create(dma_heap_class,
491-				NULL,
492-				heap->heap_devt,
493-				NULL,
494-				heap->name);
495-	if (IS_ERR(dev_ret)) {
496+	heap->heap_dev = device_create(dma_heap_class,
497+				       NULL,
498+				       heap->heap_devt,
499+				       NULL,
500+				       heap->name);
501+	if (IS_ERR(heap->heap_dev)) {
502 		pr_err("dma_heap: Unable to create device\n");
503-		err_ret = ERR_CAST(dev_ret);
504+		err_ret = ERR_CAST(heap->heap_dev);
505 		goto err2;
506 	}
507+
508+	/* Make sure it doesn't disappear on us */
509+	heap->heap_dev = get_device(heap->heap_dev);
510+
511 	/* Add heap to the list */
512 	mutex_lock(&heap_list_lock);
513 	list_add(&heap->list, &heap_list);
514@@ -284,27 +378,88 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
515 	kfree(heap);
516 	return err_ret;
517 }
518+EXPORT_SYMBOL_GPL(dma_heap_add);
519
520 static char *dma_heap_devnode(struct device *dev, umode_t *mode)
521 {
522 	return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev));
523 }
524
525+static ssize_t total_pools_kb_show(struct kobject *kobj,
526+				   struct kobj_attribute *attr, char *buf)
527+{
528+	struct dma_heap *heap;
529+	u64 total_pool_size = 0;
530+
531+	mutex_lock(&heap_list_lock);
532+	list_for_each_entry(heap, &heap_list, list) {
533+		if (heap->ops->get_pool_size)
534+			total_pool_size += heap->ops->get_pool_size(heap);
535+	}
536+	mutex_unlock(&heap_list_lock);
537+
538+	return sysfs_emit(buf, "%llu\n", total_pool_size / 1024);
539+}
540+
541+static struct kobj_attribute total_pools_kb_attr =
542+	__ATTR_RO(total_pools_kb);
543+
544+static struct attribute *dma_heap_sysfs_attrs[] = {
545+	&total_pools_kb_attr.attr,
546+	NULL,
547+};
548+
549+ATTRIBUTE_GROUPS(dma_heap_sysfs);
550+
551+static struct kobject *dma_heap_kobject;
552+
553+static int dma_heap_sysfs_setup(void)
554+{
555+	int ret;
556+
557+	dma_heap_kobject = kobject_create_and_add("dma_heap", kernel_kobj);
558+	if (!dma_heap_kobject)
559+		return -ENOMEM;
560+
561+	ret = sysfs_create_groups(dma_heap_kobject, dma_heap_sysfs_groups);
562+	if (ret) {
563+		kobject_put(dma_heap_kobject);
564+		return ret;
565+	}
566+
567+	return 0;
568+}
569+
570+static void dma_heap_sysfs_teardown(void)
571+{
572+	kobject_put(dma_heap_kobject);
573+}
574+
575 static int dma_heap_init(void)
576 {
577 	int ret;
578
579-	ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME);
580+	ret = dma_heap_sysfs_setup();
581 	if (ret)
582 		return ret;
583
584+	ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME);
585+	if (ret)
586+		goto err_chrdev;
587+
588 	dma_heap_class = class_create(THIS_MODULE, DEVNAME);
589 	if (IS_ERR(dma_heap_class)) {
590-		unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS);
591-		return PTR_ERR(dma_heap_class);
592+		ret = PTR_ERR(dma_heap_class);
593+		goto err_class;
594 	}
595 	dma_heap_class->devnode = dma_heap_devnode;
596
597 	return 0;
598+
599+err_class:
600+	unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS);
601+err_chrdev:
602+	dma_heap_sysfs_teardown();
603+	return ret;
604 }
605 subsys_initcall(dma_heap_init);
606diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
607index a5eef06c4..ff52efa83 100644
608--- a/drivers/dma-buf/heaps/Kconfig
609+++ b/drivers/dma-buf/heaps/Kconfig
610@@ -1,12 +1,22 @@
611+menuconfig DMABUF_HEAPS_DEFERRED_FREE
612+	bool "DMA-BUF heaps deferred-free library"
613+	help
614+	  Choose this option to enable the DMA-BUF heaps deferred-free library.
615+
616+menuconfig DMABUF_HEAPS_PAGE_POOL
617+	bool "DMA-BUF heaps page-pool library"
618+	help
619+	  Choose this option to enable the DMA-BUF heaps page-pool library.
620+
621 config DMABUF_HEAPS_SYSTEM
622-	bool "DMA-BUF System Heap"
623-	depends on DMABUF_HEAPS
624+	tristate "DMA-BUF System Heap"
625+	depends on DMABUF_HEAPS && DMABUF_HEAPS_DEFERRED_FREE && DMABUF_HEAPS_PAGE_POOL
626 	help
627 	  Choose this option to enable the system dmabuf heap. The system heap
628 	  is backed by pages from the buddy allocator. If in doubt, say Y.
629
630 config DMABUF_HEAPS_CMA
631-	bool "DMA-BUF CMA Heap"
632+	tristate "DMA-BUF CMA Heap"
633 	depends on DMABUF_HEAPS && DMA_CMA
634 	help
635 	  Choose this option to enable dma-buf CMA heap. This heap is backed
636diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile
637index 6e54cdec3..4d4cd94a3 100644
638--- a/drivers/dma-buf/heaps/Makefile
639+++ b/drivers/dma-buf/heaps/Makefile
640@@ -1,4 +1,5 @@
641 # SPDX-License-Identifier: GPL-2.0
642-obj-y					+= heap-helpers.o
643+obj-$(CONFIG_DMABUF_HEAPS_DEFERRED_FREE) += deferred-free-helper.o
644+obj-$(CONFIG_DMABUF_HEAPS_PAGE_POOL)	+= page_pool.o
645 obj-$(CONFIG_DMABUF_HEAPS_SYSTEM)	+= system_heap.o
646 obj-$(CONFIG_DMABUF_HEAPS_CMA)		+= cma_heap.o
647diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
648index e55384dc1..fd564aa70 100644
649--- a/drivers/dma-buf/heaps/cma_heap.c
650+++ b/drivers/dma-buf/heaps/cma_heap.c
651@@ -2,76 +2,306 @@
652 /*
653  * DMABUF CMA heap exporter
654  *
655- * Copyright (C) 2012, 2019 Linaro Ltd.
656+ * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
657  * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
658+ *
659+ * Also utilizing parts of Andrew Davis' SRAM heap:
660+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
661+ *	Andrew F. Davis <afd@ti.com>
662  */
663-
664 #include <linux/cma.h>
665-#include <linux/device.h>
666 #include <linux/dma-buf.h>
667 #include <linux/dma-heap.h>
668 #include <linux/dma-map-ops.h>
669 #include <linux/err.h>
670-#include <linux/errno.h>
671 #include <linux/highmem.h>
672+#include <linux/io.h>
673+#include <linux/mm.h>
674 #include <linux/module.h>
675-#include <linux/slab.h>
676 #include <linux/scatterlist.h>
677-#include <linux/sched/signal.h>
678+#include <linux/slab.h>
679+#include <linux/vmalloc.h>
680
681-#include "heap-helpers.h"
682
683 struct cma_heap {
684 	struct dma_heap *heap;
685 	struct cma *cma;
686 };
687
688-static void cma_heap_free(struct heap_helper_buffer *buffer)
689+struct cma_heap_buffer {
690+	struct cma_heap *heap;
691+	struct list_head attachments;
692+	struct mutex lock;
693+	unsigned long len;
694+	struct page *cma_pages;
695+	struct page **pages;
696+	pgoff_t pagecount;
697+	int vmap_cnt;
698+	void *vaddr;
699+};
700+
701+struct dma_heap_attachment {
702+	struct device *dev;
703+	struct sg_table table;
704+	struct list_head list;
705+	bool mapped;
706+};
707+
708+static int cma_heap_attach(struct dma_buf *dmabuf,
709+			   struct dma_buf_attachment *attachment)
710+{
711+	struct cma_heap_buffer *buffer = dmabuf->priv;
712+	struct dma_heap_attachment *a;
713+	int ret;
714+
715+	a = kzalloc(sizeof(*a), GFP_KERNEL);
716+	if (!a)
717+		return -ENOMEM;
718+
719+	ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
720+					buffer->pagecount, 0,
721+					buffer->pagecount << PAGE_SHIFT,
722+					GFP_KERNEL);
723+	if (ret) {
724+		kfree(a);
725+		return ret;
726+	}
727+
728+	a->dev = attachment->dev;
729+	INIT_LIST_HEAD(&a->list);
730+	a->mapped = false;
731+
732+	attachment->priv = a;
733+
734+	mutex_lock(&buffer->lock);
735+	list_add(&a->list, &buffer->attachments);
736+	mutex_unlock(&buffer->lock);
737+
738+	return 0;
739+}
740+
741+static void cma_heap_detach(struct dma_buf *dmabuf,
742+			    struct dma_buf_attachment *attachment)
743+{
744+	struct cma_heap_buffer *buffer = dmabuf->priv;
745+	struct dma_heap_attachment *a = attachment->priv;
746+
747+	mutex_lock(&buffer->lock);
748+	list_del(&a->list);
749+	mutex_unlock(&buffer->lock);
750+
751+	sg_free_table(&a->table);
752+	kfree(a);
753+}
754+
755+static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
756+					     enum dma_data_direction direction)
757 {
758-	struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap);
759-	unsigned long nr_pages = buffer->pagecount;
760-	struct page *cma_pages = buffer->priv_virt;
761+	struct dma_heap_attachment *a = attachment->priv;
762+	struct sg_table *table = &a->table;
763+	int attrs = attachment->dma_map_attrs;
764+	int ret;
765+
766+	ret = dma_map_sgtable(attachment->dev, table, direction, attrs);
767+	if (ret)
768+		return ERR_PTR(-ENOMEM);
769+	a->mapped = true;
770+	return table;
771+}
772+
773+static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
774+				   struct sg_table *table,
775+				   enum dma_data_direction direction)
776+{
777+	struct dma_heap_attachment *a = attachment->priv;
778+	int attrs = attachment->dma_map_attrs;
779+
780+	a->mapped = false;
781+	dma_unmap_sgtable(attachment->dev, table, direction, attrs);
782+}
783+
784+static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
785+					     enum dma_data_direction direction)
786+{
787+	struct cma_heap_buffer *buffer = dmabuf->priv;
788+	struct dma_heap_attachment *a;
789+
790+	if (buffer->vmap_cnt)
791+		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
792+
793+	mutex_lock(&buffer->lock);
794+	list_for_each_entry(a, &buffer->attachments, list) {
795+		if (!a->mapped)
796+			continue;
797+		dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
798+	}
799+	mutex_unlock(&buffer->lock);
800+
801+	return 0;
802+}
803+
804+static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
805+					   enum dma_data_direction direction)
806+{
807+	struct cma_heap_buffer *buffer = dmabuf->priv;
808+	struct dma_heap_attachment *a;
809+
810+	if (buffer->vmap_cnt)
811+		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
812+
813+	mutex_lock(&buffer->lock);
814+	list_for_each_entry(a, &buffer->attachments, list) {
815+		if (!a->mapped)
816+			continue;
817+		dma_sync_sgtable_for_device(a->dev, &a->table, direction);
818+	}
819+	mutex_unlock(&buffer->lock);
820+
821+	return 0;
822+}
823+
824+static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
825+{
826+	struct vm_area_struct *vma = vmf->vma;
827+	struct cma_heap_buffer *buffer = vma->vm_private_data;
828+
829+	if (vmf->pgoff > buffer->pagecount)
830+		return VM_FAULT_SIGBUS;
831+
832+	vmf->page = buffer->pages[vmf->pgoff];
833+	get_page(vmf->page);
834+
835+	return 0;
836+}
837+
838+static const struct vm_operations_struct dma_heap_vm_ops = {
839+	.fault = cma_heap_vm_fault,
840+};
841+
842+static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
843+{
844+	struct cma_heap_buffer *buffer = dmabuf->priv;
845+
846+	if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
847+		return -EINVAL;
848+
849+	vma->vm_ops = &dma_heap_vm_ops;
850+	vma->vm_private_data = buffer;
851+
852+	return 0;
853+}
854+
855+static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
856+{
857+	void *vaddr;
858+
859+	vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
860+	if (!vaddr)
861+		return ERR_PTR(-ENOMEM);
862+
863+	return vaddr;
864+}
865+
866+static void *cma_heap_vmap(struct dma_buf *dmabuf)
867+{
868+	struct cma_heap_buffer *buffer = dmabuf->priv;
869+	void *vaddr;
870+
871+	mutex_lock(&buffer->lock);
872+	if (buffer->vmap_cnt) {
873+		buffer->vmap_cnt++;
874+		vaddr = buffer->vaddr;
875+		goto out;
876+	}
877+
878+	vaddr = cma_heap_do_vmap(buffer);
879+	if (IS_ERR(vaddr))
880+		goto out;
881+
882+	buffer->vaddr = vaddr;
883+	buffer->vmap_cnt++;
884+out:
885+	mutex_unlock(&buffer->lock);
886+
887+	return vaddr;
888+}
889+
890+static void cma_heap_vunmap(struct dma_buf *dmabuf, void *vaddr)
891+{
892+	struct cma_heap_buffer *buffer = dmabuf->priv;
893+
894+	mutex_lock(&buffer->lock);
895+	if (!--buffer->vmap_cnt) {
896+		vunmap(buffer->vaddr);
897+		buffer->vaddr = NULL;
898+	}
899+	mutex_unlock(&buffer->lock);
900+}
901+
902+static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
903+{
904+	struct cma_heap_buffer *buffer = dmabuf->priv;
905+	struct cma_heap *cma_heap = buffer->heap;
906+
907+	if (buffer->vmap_cnt > 0) {
908+		WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
909+		vunmap(buffer->vaddr);
910+	}
911
912 	/* free page list */
913 	kfree(buffer->pages);
914 	/* release memory */
915-	cma_release(cma_heap->cma, cma_pages, nr_pages);
916+	cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
917 	kfree(buffer);
918 }
919
920-/* dmabuf heap CMA operations functions */
921-static int cma_heap_allocate(struct dma_heap *heap,
922-			     unsigned long len,
923-			     unsigned long fd_flags,
924-			     unsigned long heap_flags)
925+static const struct dma_buf_ops cma_heap_buf_ops = {
926+	.attach = cma_heap_attach,
927+	.detach = cma_heap_detach,
928+	.map_dma_buf = cma_heap_map_dma_buf,
929+	.unmap_dma_buf = cma_heap_unmap_dma_buf,
930+	.begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
931+	.end_cpu_access = cma_heap_dma_buf_end_cpu_access,
932+	.mmap = cma_heap_mmap,
933+	.vmap = cma_heap_vmap,
934+	.vunmap = cma_heap_vunmap,
935+	.release = cma_heap_dma_buf_release,
936+};
937+
938+static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
939+					 unsigned long len,
940+					 unsigned long fd_flags,
941+					 unsigned long heap_flags)
942 {
943 	struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
944-	struct heap_helper_buffer *helper_buffer;
945-	struct page *cma_pages;
946+	struct cma_heap_buffer *buffer;
947+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
948 	size_t size = PAGE_ALIGN(len);
949-	unsigned long nr_pages = size >> PAGE_SHIFT;
950+	pgoff_t pagecount = size >> PAGE_SHIFT;
951 	unsigned long align = get_order(size);
952+	struct page *cma_pages;
953 	struct dma_buf *dmabuf;
954 	int ret = -ENOMEM;
955 	pgoff_t pg;
956
957-	if (align > CONFIG_CMA_ALIGNMENT)
958-		align = CONFIG_CMA_ALIGNMENT;
959+	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
960+	if (!buffer)
961+		return ERR_PTR(-ENOMEM);
962
963-	helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
964-	if (!helper_buffer)
965-		return -ENOMEM;
966+	INIT_LIST_HEAD(&buffer->attachments);
967+	mutex_init(&buffer->lock);
968+	buffer->len = size;
969
970-	init_heap_helper_buffer(helper_buffer, cma_heap_free);
971-	helper_buffer->heap = heap;
972-	helper_buffer->size = len;
973+	if (align > CONFIG_CMA_ALIGNMENT)
974+		align = CONFIG_CMA_ALIGNMENT;
975
976-	cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false);
977+	cma_pages = cma_alloc(cma_heap->cma, pagecount, align, GFP_KERNEL);
978 	if (!cma_pages)
979-		goto free_buf;
980+		goto free_buffer;
981
982+	/* Clear the cma pages */
983 	if (PageHighMem(cma_pages)) {
984-		unsigned long nr_clear_pages = nr_pages;
985+		unsigned long nr_clear_pages = pagecount;
986 		struct page *page = cma_pages;
987
988 		while (nr_clear_pages > 0) {
989@@ -85,7 +315,6 @@ static int cma_heap_allocate(struct dma_heap *heap,
990 			 */
991 			if (fatal_signal_pending(current))
992 				goto free_cma;
993-
994 			page++;
995 			nr_clear_pages--;
996 		}
997@@ -93,44 +322,41 @@ static int cma_heap_allocate(struct dma_heap *heap,
998 		memset(page_address(cma_pages), 0, size);
999 	}
1000
1001-	helper_buffer->pagecount = nr_pages;
1002-	helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
1003-					     sizeof(*helper_buffer->pages),
1004-					     GFP_KERNEL);
1005-	if (!helper_buffer->pages) {
1006+	buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
1007+	if (!buffer->pages) {
1008 		ret = -ENOMEM;
1009 		goto free_cma;
1010 	}
1011
1012-	for (pg = 0; pg < helper_buffer->pagecount; pg++)
1013-		helper_buffer->pages[pg] = &cma_pages[pg];
1014+	for (pg = 0; pg < pagecount; pg++)
1015+		buffer->pages[pg] = &cma_pages[pg];
1016+
1017+	buffer->cma_pages = cma_pages;
1018+	buffer->heap = cma_heap;
1019+	buffer->pagecount = pagecount;
1020
1021 	/* create the dmabuf */
1022-	dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
1023+	exp_info.exp_name = dma_heap_get_name(heap);
1024+	exp_info.ops = &cma_heap_buf_ops;
1025+	exp_info.size = buffer->len;
1026+	exp_info.flags = fd_flags;
1027+	exp_info.priv = buffer;
1028+	dmabuf = dma_buf_export(&exp_info);
1029 	if (IS_ERR(dmabuf)) {
1030 		ret = PTR_ERR(dmabuf);
1031 		goto free_pages;
1032 	}
1033
1034-	helper_buffer->dmabuf = dmabuf;
1035-	helper_buffer->priv_virt = cma_pages;
1036-
1037-	ret = dma_buf_fd(dmabuf, fd_flags);
1038-	if (ret < 0) {
1039-		dma_buf_put(dmabuf);
1040-		/* just return, as put will call release and that will free */
1041-		return ret;
1042-	}
1043-
1044-	return ret;
1045+	return dmabuf;
1046
1047 free_pages:
1048-	kfree(helper_buffer->pages);
1049+	kfree(buffer->pages);
1050 free_cma:
1051-	cma_release(cma_heap->cma, cma_pages, nr_pages);
1052-free_buf:
1053-	kfree(helper_buffer);
1054-	return ret;
1055+	cma_release(cma_heap->cma, cma_pages, pagecount);
1056+free_buffer:
1057+	kfree(buffer);
1058+
1059+	return ERR_PTR(ret);
1060 }
1061
1062 static const struct dma_heap_ops cma_heap_ops = {
1063diff --git a/drivers/dma-buf/heaps/deferred-free-helper.c b/drivers/dma-buf/heaps/deferred-free-helper.c
1064new file mode 100755
1065index 000000000..1330d279f
1066--- /dev/null
1067+++ b/drivers/dma-buf/heaps/deferred-free-helper.c
1068@@ -0,0 +1,139 @@
1069+// SPDX-License-Identifier: GPL-2.0
1070+/*
1071+ * Deferred dmabuf freeing helper
1072+ *
1073+ * Copyright (C) 2020 Linaro, Ltd.
1074+ *
1075+ * Based on the ION page pool code
1076+ * Copyright (C) 2011 Google, Inc.
1077+ */
1078+
1079+#include <linux/freezer.h>
1080+#include <linux/list.h>
1081+#include <linux/slab.h>
1082+#include <linux/swap.h>
1083+#include <linux/sched/signal.h>
1084+
1085+#include "deferred-free-helper.h"
1086+
1087+static LIST_HEAD(free_list);
1088+static size_t list_nr_pages;
1089+wait_queue_head_t freelist_waitqueue;
1090+struct task_struct *freelist_task;
1091+static DEFINE_SPINLOCK(free_list_lock);
1092+
1093+void deferred_free(struct deferred_freelist_item *item,
1094+		   void (*free)(struct deferred_freelist_item*,
1095+				enum df_reason),
1096+		   size_t nr_pages)
1097+{
1098+	unsigned long flags;
1099+
1100+	INIT_LIST_HEAD(&item->list);
1101+	item->nr_pages = nr_pages;
1102+	item->free = free;
1103+
1104+	spin_lock_irqsave(&free_list_lock, flags);
1105+	list_add(&item->list, &free_list);
1106+	list_nr_pages += nr_pages;
1107+	spin_unlock_irqrestore(&free_list_lock, flags);
1108+	wake_up(&freelist_waitqueue);
1109+}
1110+EXPORT_SYMBOL_GPL(deferred_free);
1111+
1112+static size_t free_one_item(enum df_reason reason)
1113+{
1114+	unsigned long flags;
1115+	size_t nr_pages;
1116+	struct deferred_freelist_item *item;
1117+
1118+	spin_lock_irqsave(&free_list_lock, flags);
1119+	if (list_empty(&free_list)) {
1120+		spin_unlock_irqrestore(&free_list_lock, flags);
1121+		return 0;
1122+	}
1123+	item = list_first_entry(&free_list, struct deferred_freelist_item, list);
1124+	list_del(&item->list);
1125+	nr_pages = item->nr_pages;
1126+	list_nr_pages -= nr_pages;
1127+	spin_unlock_irqrestore(&free_list_lock, flags);
1128+
1129+	item->free(item, reason);
1130+	return nr_pages;
1131+}
1132+
1133+unsigned long get_freelist_nr_pages(void)
1134+{
1135+	unsigned long nr_pages;
1136+	unsigned long flags;
1137+
1138+	spin_lock_irqsave(&free_list_lock, flags);
1139+	nr_pages = list_nr_pages;
1140+	spin_unlock_irqrestore(&free_list_lock, flags);
1141+	return nr_pages;
1142+}
1143+EXPORT_SYMBOL_GPL(get_freelist_nr_pages);
1144+
1145+static unsigned long freelist_shrink_count(struct shrinker *shrinker,
1146+					   struct shrink_control *sc)
1147+{
1148+	return get_freelist_nr_pages();
1149+}
1150+
1151+static unsigned long freelist_shrink_scan(struct shrinker *shrinker,
1152+					  struct shrink_control *sc)
1153+{
1154+	unsigned long total_freed = 0;
1155+
1156+	if (sc->nr_to_scan == 0)
1157+		return 0;
1158+
1159+	while (total_freed < sc->nr_to_scan) {
1160+		size_t pages_freed = free_one_item(DF_UNDER_PRESSURE);
1161+
1162+		if (!pages_freed)
1163+			break;
1164+
1165+		total_freed += pages_freed;
1166+	}
1167+
1168+	return total_freed;
1169+}
1170+
1171+static struct shrinker freelist_shrinker = {
1172+	.count_objects = freelist_shrink_count,
1173+	.scan_objects = freelist_shrink_scan,
1174+	.seeks = DEFAULT_SEEKS,
1175+	.batch = 0,
1176+};
1177+
1178+static int deferred_free_thread(void *data)
1179+{
1180+	while (true) {
1181+		wait_event_freezable(freelist_waitqueue,
1182+				     get_freelist_nr_pages() > 0);
1183+
1184+		free_one_item(DF_NORMAL);
1185+	}
1186+
1187+	return 0;
1188+}
1189+
1190+static int deferred_freelist_init(void)
1191+{
1192+	list_nr_pages = 0;
1193+
1194+	init_waitqueue_head(&freelist_waitqueue);
1195+	freelist_task = kthread_run(deferred_free_thread, NULL,
1196+				    "%s", "dmabuf-deferred-free-worker");
1197+	if (IS_ERR(freelist_task)) {
1198+		pr_err("Creating thread for deferred free failed\n");
1199+		return -1;
1200+	}
1201+	sched_set_normal(freelist_task, 19);
1202+
1203+	return register_shrinker(&freelist_shrinker);
1204+}
1205+module_init(deferred_freelist_init);
1206+MODULE_LICENSE("GPL v2");
1207+
1208diff --git a/drivers/dma-buf/heaps/deferred-free-helper.h b/drivers/dma-buf/heaps/deferred-free-helper.h
1209new file mode 100755
1210index 000000000..415440314
1211--- /dev/null
1212+++ b/drivers/dma-buf/heaps/deferred-free-helper.h
1213@@ -0,0 +1,57 @@
1214+/* SPDX-License-Identifier: GPL-2.0 */
1215+
1216+#ifndef DEFERRED_FREE_HELPER_H
1217+#define DEFERRED_FREE_HELPER_H
1218+
1219+/**
1220+ * df_reason - enum for reason why item was freed
1221+ *
1222+ * This provides a reason for why the free function was called
1223+ * on the item. This is useful when deferred_free is used in
1224+ * combination with a pagepool, so under pressure the page can
1225+ * be immediately freed.
1226+ *
1227+ * DF_NORMAL:         Normal deferred free
1228+ *
1229+ * DF_UNDER_PRESSURE: Free was called because the system
1230+ *                    is under memory pressure. Usually
1231+ *                    from a shrinker. Avoid allocating
1232+ *                    memory in the free call, as it may
1233+ *                    fail.
1234+ */
1235+enum df_reason {
1236+	DF_NORMAL,
1237+	DF_UNDER_PRESSURE,
1238+};
1239+
1240+/**
1241+ * deferred_freelist_item - item structure for deferred freelist
1242+ *
1243+ * This is to be added to the structure for whatever you want to
1244+ * defer freeing on.
1245+ *
1246+ * @nr_pages: number of pages used by item to be freed
1247+ * @free: function pointer to be called when freeing the item
1248+ * @list: list entry for the deferred list
1249+ */
1250+struct deferred_freelist_item {
1251+	size_t nr_pages;
1252+	void (*free)(struct deferred_freelist_item *i,
1253+		     enum df_reason reason);
1254+	struct list_head list;
1255+};
1256+
1257+/**
1258+ * deferred_free - call to add item to the deferred free list
1259+ *
1260+ * @item: Pointer to deferred_freelist_item field of a structure
1261+ * @free: Function pointer to the free call
1262+ * @nr_pages: number of pages to be freed
1263+ */
1264+void deferred_free(struct deferred_freelist_item *item,
1265+		   void (*free)(struct deferred_freelist_item *i,
1266+				enum df_reason reason),
1267+		   size_t nr_pages);
1268+
1269+unsigned long get_freelist_nr_pages(void);
1270+#endif
1271diff --git a/drivers/dma-buf/heaps/heap-helpers.c b/drivers/dma-buf/heaps/heap-helpers.c
1272index 35aa65bbf..d0696cf93 100644
1273--- a/drivers/dma-buf/heaps/heap-helpers.c
1274+++ b/drivers/dma-buf/heaps/heap-helpers.c
1275@@ -30,7 +30,6 @@ struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer,
1276 {
1277 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1278
1279-	exp_info.exp_name = dma_heap_get_name(buffer->heap);
1280 	exp_info.ops = &heap_helper_ops;
1281 	exp_info.size = buffer->size;
1282 	exp_info.flags = fd_flags;
1283diff --git a/drivers/dma-buf/heaps/page_pool.c b/drivers/dma-buf/heaps/page_pool.c
1284new file mode 100755
1285index 000000000..b79e737ba
1286--- /dev/null
1287+++ b/drivers/dma-buf/heaps/page_pool.c
1288@@ -0,0 +1,247 @@
1289+// SPDX-License-Identifier: GPL-2.0
1290+/*
1291+ * DMA BUF page pool system
1292+ *
1293+ * Copyright (C) 2020 Linaro Ltd.
1294+ *
1295+ * Based on the ION page pool code
1296+ * Copyright (C) 2011 Google, Inc.
1297+ */
1298+
1299+#include <linux/freezer.h>
1300+#include <linux/list.h>
1301+#include <linux/slab.h>
1302+#include <linux/swap.h>
1303+#include <linux/sched/signal.h>
1304+#include "page_pool.h"
1305+
1306+static LIST_HEAD(pool_list);
1307+static DEFINE_MUTEX(pool_list_lock);
1308+
1309+static inline
1310+struct page *dmabuf_page_pool_alloc_pages(struct dmabuf_page_pool *pool)
1311+{
1312+	if (fatal_signal_pending(current))
1313+		return NULL;
1314+	return alloc_pages(pool->gfp_mask, pool->order);
1315+}
1316+
1317+static inline void dmabuf_page_pool_free_pages(struct dmabuf_page_pool *pool,
1318+					       struct page *page)
1319+{
1320+	__free_pages(page, pool->order);
1321+}
1322+
1323+static void dmabuf_page_pool_add(struct dmabuf_page_pool *pool, struct page *page)
1324+{
1325+	int index;
1326+
1327+	if (PageHighMem(page))
1328+		index = POOL_HIGHPAGE;
1329+	else
1330+		index = POOL_LOWPAGE;
1331+
1332+	mutex_lock(&pool->mutex);
1333+	list_add_tail(&page->lru, &pool->items[index]);
1334+	pool->count[index]++;
1335+	mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
1336+			    1 << pool->order);
1337+	mutex_unlock(&pool->mutex);
1338+}
1339+
1340+static struct page *dmabuf_page_pool_remove(struct dmabuf_page_pool *pool, int index)
1341+{
1342+	struct page *page;
1343+
1344+	mutex_lock(&pool->mutex);
1345+	page = list_first_entry_or_null(&pool->items[index], struct page, lru);
1346+	if (page) {
1347+		pool->count[index]--;
1348+		list_del(&page->lru);
1349+		mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
1350+				    -(1 << pool->order));
1351+	}
1352+	mutex_unlock(&pool->mutex);
1353+
1354+	return page;
1355+}
1356+
1357+static struct page *dmabuf_page_pool_fetch(struct dmabuf_page_pool *pool)
1358+{
1359+	struct page *page = NULL;
1360+
1361+	page = dmabuf_page_pool_remove(pool, POOL_HIGHPAGE);
1362+	if (!page)
1363+		page = dmabuf_page_pool_remove(pool, POOL_LOWPAGE);
1364+
1365+	return page;
1366+}
1367+
1368+struct page *dmabuf_page_pool_alloc(struct dmabuf_page_pool *pool)
1369+{
1370+	struct page *page = NULL;
1371+
1372+	if (WARN_ON(!pool))
1373+		return NULL;
1374+
1375+	page = dmabuf_page_pool_fetch(pool);
1376+
1377+	if (!page)
1378+		page = dmabuf_page_pool_alloc_pages(pool);
1379+	return page;
1380+}
1381+EXPORT_SYMBOL_GPL(dmabuf_page_pool_alloc);
1382+
1383+void dmabuf_page_pool_free(struct dmabuf_page_pool *pool, struct page *page)
1384+{
1385+	if (WARN_ON(pool->order != compound_order(page)))
1386+		return;
1387+
1388+	dmabuf_page_pool_add(pool, page);
1389+}
1390+EXPORT_SYMBOL_GPL(dmabuf_page_pool_free);
1391+
1392+static int dmabuf_page_pool_total(struct dmabuf_page_pool *pool, bool high)
1393+{
1394+	int count = pool->count[POOL_LOWPAGE];
1395+
1396+	if (high)
1397+		count += pool->count[POOL_HIGHPAGE];
1398+
1399+	return count << pool->order;
1400+}
1401+
1402+struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask, unsigned int order)
1403+{
1404+	struct dmabuf_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
1405+	int i;
1406+
1407+	if (!pool)
1408+		return NULL;
1409+
1410+	for (i = 0; i < POOL_TYPE_SIZE; i++) {
1411+		pool->count[i] = 0;
1412+		INIT_LIST_HEAD(&pool->items[i]);
1413+	}
1414+	pool->gfp_mask = gfp_mask | __GFP_COMP;
1415+	pool->order = order;
1416+	mutex_init(&pool->mutex);
1417+
1418+	mutex_lock(&pool_list_lock);
1419+	list_add(&pool->list, &pool_list);
1420+	mutex_unlock(&pool_list_lock);
1421+
1422+	return pool;
1423+}
1424+EXPORT_SYMBOL_GPL(dmabuf_page_pool_create);
1425+
1426+void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool)
1427+{
1428+	struct page *page;
1429+	int i;
1430+
1431+	/* Remove us from the pool list */
1432+	mutex_lock(&pool_list_lock);
1433+	list_del(&pool->list);
1434+	mutex_unlock(&pool_list_lock);
1435+
1436+	/* Free any remaining pages in the pool */
1437+	for (i = 0; i < POOL_TYPE_SIZE; i++) {
1438+		while ((page = dmabuf_page_pool_remove(pool, i)))
1439+			dmabuf_page_pool_free_pages(pool, page);
1440+	}
1441+
1442+	kfree(pool);
1443+}
1444+EXPORT_SYMBOL_GPL(dmabuf_page_pool_destroy);
1445+
1446+static int dmabuf_page_pool_do_shrink(struct dmabuf_page_pool *pool, gfp_t gfp_mask,
1447+				      int nr_to_scan)
1448+{
1449+	int freed = 0;
1450+	bool high;
1451+
1452+	if (current_is_kswapd())
1453+		high = true;
1454+	else
1455+		high = !!(gfp_mask & __GFP_HIGHMEM);
1456+
1457+	if (nr_to_scan == 0)
1458+		return dmabuf_page_pool_total(pool, high);
1459+
1460+	while (freed < nr_to_scan) {
1461+		struct page *page;
1462+
1463+		/* Try to free low pages first */
1464+		page = dmabuf_page_pool_remove(pool, POOL_LOWPAGE);
1465+		if (!page)
1466+			page = dmabuf_page_pool_remove(pool, POOL_HIGHPAGE);
1467+
1468+		if (!page)
1469+			break;
1470+
1471+		dmabuf_page_pool_free_pages(pool, page);
1472+		freed += (1 << pool->order);
1473+	}
1474+
1475+	return freed;
1476+}
1477+
1478+static int dmabuf_page_pool_shrink(gfp_t gfp_mask, int nr_to_scan)
1479+{
1480+	struct dmabuf_page_pool *pool;
1481+	int nr_total = 0;
1482+	int nr_freed;
1483+	int only_scan = 0;
1484+
1485+	if (!nr_to_scan)
1486+		only_scan = 1;
1487+
1488+	mutex_lock(&pool_list_lock);
1489+	list_for_each_entry(pool, &pool_list, list) {
1490+		if (only_scan) {
1491+			nr_total += dmabuf_page_pool_do_shrink(pool,
1492+							       gfp_mask,
1493+							       nr_to_scan);
1494+		} else {
1495+			nr_freed = dmabuf_page_pool_do_shrink(pool,
1496+							      gfp_mask,
1497+							      nr_to_scan);
1498+			nr_to_scan -= nr_freed;
1499+			nr_total += nr_freed;
1500+			if (nr_to_scan <= 0)
1501+				break;
1502+		}
1503+	}
1504+	mutex_unlock(&pool_list_lock);
1505+
1506+	return nr_total;
1507+}
1508+
1509+static unsigned long dmabuf_page_pool_shrink_count(struct shrinker *shrinker,
1510+						   struct shrink_control *sc)
1511+{
1512+	return dmabuf_page_pool_shrink(sc->gfp_mask, 0);
1513+}
1514+
1515+static unsigned long dmabuf_page_pool_shrink_scan(struct shrinker *shrinker,
1516+						  struct shrink_control *sc)
1517+{
1518+	if (sc->nr_to_scan == 0)
1519+		return 0;
1520+	return dmabuf_page_pool_shrink(sc->gfp_mask, sc->nr_to_scan);
1521+}
1522+
1523+struct shrinker pool_shrinker = {
1524+	.count_objects = dmabuf_page_pool_shrink_count,
1525+	.scan_objects = dmabuf_page_pool_shrink_scan,
1526+	.seeks = DEFAULT_SEEKS,
1527+	.batch = 0,
1528+};
1529+
1530+static int dmabuf_page_pool_init_shrinker(void)
1531+{
1532+	return register_shrinker(&pool_shrinker);
1533+}
1534+module_init(dmabuf_page_pool_init_shrinker);
1535+MODULE_LICENSE("GPL v2");
1536diff --git a/drivers/dma-buf/heaps/page_pool.h b/drivers/dma-buf/heaps/page_pool.h
1537new file mode 100755
1538index 000000000..6b083b04f
1539--- /dev/null
1540+++ b/drivers/dma-buf/heaps/page_pool.h
1541@@ -0,0 +1,55 @@
1542+/* SPDX-License-Identifier: GPL-2.0 */
1543+/*
1544+ * DMA BUF PagePool implementation
1545+ * Based on earlier ION code by Google
1546+ *
1547+ * Copyright (C) 2011 Google, Inc.
1548+ * Copyright (C) 2020 Linaro Ltd.
1549+ */
1550+
1551+#ifndef _DMABUF_PAGE_POOL_H
1552+#define _DMABUF_PAGE_POOL_H
1553+
1554+#include <linux/device.h>
1555+#include <linux/kref.h>
1556+#include <linux/mm_types.h>
1557+#include <linux/mutex.h>
1558+#include <linux/shrinker.h>
1559+#include <linux/types.h>
1560+
1561+/* page types we track in the pool */
1562+enum {
1563+	POOL_LOWPAGE,      /* Clean lowmem pages */
1564+	POOL_HIGHPAGE,     /* Clean highmem pages */
1565+
1566+	POOL_TYPE_SIZE,
1567+};
1568+
1569+/**
1570+ * struct dmabuf_page_pool - pagepool struct
1571+ * @count[]:		array of number of pages of that type in the pool
1572+ * @items[]:		array of list of pages of the specific type
1573+ * @mutex:		lock protecting this struct and especially the count
1574+ *			item list
1575+ * @gfp_mask:		gfp_mask to use from alloc
1576+ * @order:		order of pages in the pool
1577+ * @list:		list node for list of pools
1578+ *
1579+ * Allows you to keep a pool of pre allocated pages to use
1580+ */
1581+struct dmabuf_page_pool {
1582+	int count[POOL_TYPE_SIZE];
1583+	struct list_head items[POOL_TYPE_SIZE];
1584+	struct mutex mutex;
1585+	gfp_t gfp_mask;
1586+	unsigned int order;
1587+	struct list_head list;
1588+};
1589+
1590+struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask,
1591+						 unsigned int order);
1592+void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool);
1593+struct page *dmabuf_page_pool_alloc(struct dmabuf_page_pool *pool);
1594+void dmabuf_page_pool_free(struct dmabuf_page_pool *pool, struct page *page);
1595+
1596+#endif /* _DMABUF_PAGE_POOL_H */
1597diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
1598index 0bf688e3c..bbca2e195 100644
1599--- a/drivers/dma-buf/heaps/system_heap.c
1600+++ b/drivers/dma-buf/heaps/system_heap.c
1601@@ -3,7 +3,11 @@
1602  * DMABUF System heap exporter
1603  *
1604  * Copyright (C) 2011 Google, Inc.
1605- * Copyright (C) 2019 Linaro Ltd.
1606+ * Copyright (C) 2019, 2020 Linaro Ltd.
1607+ *
1608+ * Portions based off of Andrew Davis' SRAM heap:
1609+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
1610+ *	Andrew F. Davis <afd@ti.com>
1611  */
1612
1613 #include <linux/dma-buf.h>
1614@@ -15,99 +19,547 @@
1615 #include <linux/module.h>
1616 #include <linux/scatterlist.h>
1617 #include <linux/slab.h>
1618-#include <linux/sched/signal.h>
1619-#include <asm/page.h>
1620+#include <linux/vmalloc.h>
1621+
1622+#include "page_pool.h"
1623+#include "deferred-free-helper.h"
1624+
1625+static struct dma_heap *sys_heap;
1626+static struct dma_heap *sys_uncached_heap;
1627+
1628+struct system_heap_buffer {
1629+	struct dma_heap *heap;
1630+	struct list_head attachments;
1631+	struct mutex lock;
1632+	unsigned long len;
1633+	struct sg_table sg_table;
1634+	int vmap_cnt;
1635+	void *vaddr;
1636+	struct deferred_freelist_item deferred_free;
1637+
1638+	bool uncached;
1639+};
1640+
1641+struct dma_heap_attachment {
1642+	struct device *dev;
1643+	struct sg_table *table;
1644+	struct list_head list;
1645+	bool mapped;
1646
1647-#include "heap-helpers.h"
1648+	bool uncached;
1649+};
1650
1651-struct dma_heap *sys_heap;
1652+#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
1653+#define MID_ORDER_GFP (LOW_ORDER_GFP | __GFP_NOWARN)
1654+#define HIGH_ORDER_GFP  (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
1655+				| __GFP_NORETRY) & ~__GFP_RECLAIM) \
1656+				| __GFP_COMP)
1657+static gfp_t order_flags[] = {HIGH_ORDER_GFP, MID_ORDER_GFP, LOW_ORDER_GFP};
1658+/*
1659+ * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
1660+ * to match with the sizes often found in IOMMUs. Using order 4 pages instead
1661+ * of order 0 pages can significantly improve the performance of many IOMMUs
1662+ * by reducing TLB pressure and time spent updating page tables.
1663+ */
1664+static const unsigned int orders[] = {8, 4, 0};
1665+#define NUM_ORDERS ARRAY_SIZE(orders)
1666+struct dmabuf_page_pool *pools[NUM_ORDERS];
1667
1668-static void system_heap_free(struct heap_helper_buffer *buffer)
1669+static struct sg_table *dup_sg_table(struct sg_table *table)
1670 {
1671-	pgoff_t pg;
1672+	struct sg_table *new_table;
1673+	int ret, i;
1674+	struct scatterlist *sg, *new_sg;
1675
1676-	for (pg = 0; pg < buffer->pagecount; pg++)
1677-		__free_page(buffer->pages[pg]);
1678-	kfree(buffer->pages);
1679-	kfree(buffer);
1680+	new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
1681+	if (!new_table)
1682+		return ERR_PTR(-ENOMEM);
1683+
1684+	ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
1685+	if (ret) {
1686+		kfree(new_table);
1687+		return ERR_PTR(-ENOMEM);
1688+	}
1689+
1690+	new_sg = new_table->sgl;
1691+	for_each_sgtable_sg(table, sg, i) {
1692+		sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
1693+		new_sg = sg_next(new_sg);
1694+	}
1695+
1696+	return new_table;
1697 }
1698
1699-static int system_heap_allocate(struct dma_heap *heap,
1700-				unsigned long len,
1701-				unsigned long fd_flags,
1702-				unsigned long heap_flags)
1703+static int system_heap_attach(struct dma_buf *dmabuf,
1704+			      struct dma_buf_attachment *attachment)
1705 {
1706-	struct heap_helper_buffer *helper_buffer;
1707-	struct dma_buf *dmabuf;
1708-	int ret = -ENOMEM;
1709-	pgoff_t pg;
1710+	struct system_heap_buffer *buffer = dmabuf->priv;
1711+	struct dma_heap_attachment *a;
1712+	struct sg_table *table;
1713+
1714+	a = kzalloc(sizeof(*a), GFP_KERNEL);
1715+	if (!a)
1716+		return -ENOMEM;
1717
1718-	helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
1719-	if (!helper_buffer)
1720+	table = dup_sg_table(&buffer->sg_table);
1721+	if (IS_ERR(table)) {
1722+		kfree(a);
1723 		return -ENOMEM;
1724+	}
1725+
1726+	a->table = table;
1727+	a->dev = attachment->dev;
1728+	INIT_LIST_HEAD(&a->list);
1729+	a->mapped = false;
1730+	a->uncached = buffer->uncached;
1731+	attachment->priv = a;
1732+
1733+	mutex_lock(&buffer->lock);
1734+	list_add(&a->list, &buffer->attachments);
1735+	mutex_unlock(&buffer->lock);
1736+
1737+	return 0;
1738+}
1739+
1740+static void system_heap_detach(struct dma_buf *dmabuf,
1741+			       struct dma_buf_attachment *attachment)
1742+{
1743+	struct system_heap_buffer *buffer = dmabuf->priv;
1744+	struct dma_heap_attachment *a = attachment->priv;
1745+
1746+	mutex_lock(&buffer->lock);
1747+	list_del(&a->list);
1748+	mutex_unlock(&buffer->lock);
1749+
1750+	sg_free_table(a->table);
1751+	kfree(a->table);
1752+	kfree(a);
1753+}
1754
1755-	init_heap_helper_buffer(helper_buffer, system_heap_free);
1756-	helper_buffer->heap = heap;
1757-	helper_buffer->size = len;
1758+static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
1759+						enum dma_data_direction direction)
1760+{
1761+	struct dma_heap_attachment *a = attachment->priv;
1762+	struct sg_table *table = a->table;
1763+	int attr = attachment->dma_map_attrs;
1764+	int ret;
1765+
1766+	if (a->uncached)
1767+		attr |= DMA_ATTR_SKIP_CPU_SYNC;
1768+
1769+	ret = dma_map_sgtable(attachment->dev, table, direction, attr);
1770+	if (ret)
1771+		return ERR_PTR(ret);
1772+
1773+	a->mapped = true;
1774+	return table;
1775+}
1776+
1777+static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
1778+				      struct sg_table *table,
1779+				      enum dma_data_direction direction)
1780+{
1781+	struct dma_heap_attachment *a = attachment->priv;
1782+	int attr = attachment->dma_map_attrs;
1783+
1784+	if (a->uncached)
1785+		attr |= DMA_ATTR_SKIP_CPU_SYNC;
1786+	a->mapped = false;
1787+	dma_unmap_sgtable(attachment->dev, table, direction, attr);
1788+}
1789
1790-	helper_buffer->pagecount = len / PAGE_SIZE;
1791-	helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
1792-					     sizeof(*helper_buffer->pages),
1793-					     GFP_KERNEL);
1794-	if (!helper_buffer->pages) {
1795-		ret = -ENOMEM;
1796-		goto err0;
1797+static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1798+						enum dma_data_direction direction)
1799+{
1800+	struct system_heap_buffer *buffer = dmabuf->priv;
1801+	struct dma_heap_attachment *a;
1802+
1803+	mutex_lock(&buffer->lock);
1804+
1805+	if (buffer->vmap_cnt)
1806+		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
1807+
1808+	if (!buffer->uncached) {
1809+		list_for_each_entry(a, &buffer->attachments, list) {
1810+			if (!a->mapped)
1811+				continue;
1812+			dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
1813+		}
1814 	}
1815+	mutex_unlock(&buffer->lock);
1816+
1817+	return 0;
1818+}
1819+
1820+static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1821+					      enum dma_data_direction direction)
1822+{
1823+	struct system_heap_buffer *buffer = dmabuf->priv;
1824+	struct dma_heap_attachment *a;
1825+
1826+	mutex_lock(&buffer->lock);
1827+
1828+	if (buffer->vmap_cnt)
1829+		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
1830+
1831+	if (!buffer->uncached) {
1832+		list_for_each_entry(a, &buffer->attachments, list) {
1833+			if (!a->mapped)
1834+				continue;
1835+			dma_sync_sgtable_for_device(a->dev, a->table, direction);
1836+		}
1837+	}
1838+	mutex_unlock(&buffer->lock);
1839+
1840+	return 0;
1841+}
1842+
1843+static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1844+{
1845+	struct system_heap_buffer *buffer = dmabuf->priv;
1846+	struct sg_table *table = &buffer->sg_table;
1847+	unsigned long addr = vma->vm_start;
1848+	struct sg_page_iter piter;
1849+	int ret;
1850+
1851+	if (buffer->uncached)
1852+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1853+
1854+	for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
1855+		struct page *page = sg_page_iter_page(&piter);
1856+
1857+		ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
1858+				      vma->vm_page_prot);
1859+		if (ret)
1860+			return ret;
1861+		addr += PAGE_SIZE;
1862+		if (addr >= vma->vm_end)
1863+			return 0;
1864+	}
1865+	return 0;
1866+}
1867+
1868+static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
1869+{
1870+	struct sg_table *table = &buffer->sg_table;
1871+	int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
1872+	struct page **pages = vmalloc(sizeof(struct page *) * npages);
1873+	struct page **tmp = pages;
1874+	struct sg_page_iter piter;
1875+	pgprot_t pgprot = PAGE_KERNEL;
1876+	void *vaddr;
1877+
1878+	if (!pages)
1879+		return ERR_PTR(-ENOMEM);
1880+
1881+	if (buffer->uncached)
1882+		pgprot = pgprot_writecombine(PAGE_KERNEL);
1883+
1884+	for_each_sgtable_page(table, &piter, 0) {
1885+		WARN_ON(tmp - pages >= npages);
1886+		*tmp++ = sg_page_iter_page(&piter);
1887+	}
1888+
1889+	vaddr = vmap(pages, npages, VM_MAP, pgprot);
1890+	vfree(pages);
1891+
1892+	if (!vaddr)
1893+		return ERR_PTR(-ENOMEM);
1894+
1895+	return vaddr;
1896+}
1897+
1898+static void *system_heap_vmap(struct dma_buf *dmabuf)
1899+{
1900+	struct system_heap_buffer *buffer = dmabuf->priv;
1901+	void *vaddr;
1902+
1903+	mutex_lock(&buffer->lock);
1904+	if (buffer->vmap_cnt) {
1905+		buffer->vmap_cnt++;
1906+		vaddr = buffer->vaddr;
1907+		goto out;
1908+	}
1909+
1910+	vaddr = system_heap_do_vmap(buffer);
1911+	if (IS_ERR(vaddr))
1912+		goto out;
1913+
1914+	buffer->vaddr = vaddr;
1915+	buffer->vmap_cnt++;
1916+out:
1917+	mutex_unlock(&buffer->lock);
1918
1919-	for (pg = 0; pg < helper_buffer->pagecount; pg++) {
1920+	return vaddr;
1921+}
1922+
1923+static void system_heap_vunmap(struct dma_buf *dmabuf, void *vaddr)
1924+{
1925+	struct system_heap_buffer *buffer = dmabuf->priv;
1926+
1927+	mutex_lock(&buffer->lock);
1928+	if (!--buffer->vmap_cnt) {
1929+		vunmap(buffer->vaddr);
1930+		buffer->vaddr = NULL;
1931+	}
1932+	mutex_unlock(&buffer->lock);
1933+}
1934+
1935+static int system_heap_zero_buffer(struct system_heap_buffer *buffer)
1936+{
1937+	struct sg_table *sgt = &buffer->sg_table;
1938+	struct sg_page_iter piter;
1939+	struct page *p;
1940+	void *vaddr;
1941+	int ret = 0;
1942+
1943+	for_each_sgtable_page(sgt, &piter, 0) {
1944+		p = sg_page_iter_page(&piter);
1945+		vaddr = kmap_atomic(p);
1946+		memset(vaddr, 0, PAGE_SIZE);
1947+		kunmap_atomic(vaddr);
1948+	}
1949+
1950+	return ret;
1951+}
1952+
1953+static void system_heap_buf_free(struct deferred_freelist_item *item,
1954+				 enum df_reason reason)
1955+{
1956+	struct system_heap_buffer *buffer;
1957+	struct sg_table *table;
1958+	struct scatterlist *sg;
1959+	int i, j;
1960+
1961+	buffer = container_of(item, struct system_heap_buffer, deferred_free);
1962+	/* Zero the buffer pages before adding back to the pool */
1963+	if (reason == DF_NORMAL)
1964+		if (system_heap_zero_buffer(buffer))
1965+			reason = DF_UNDER_PRESSURE; // On failure, just free
1966+
1967+	table = &buffer->sg_table;
1968+	for_each_sgtable_sg(table, sg, i) {
1969+		struct page *page = sg_page(sg);
1970+
1971+		if (reason == DF_UNDER_PRESSURE) {
1972+			__free_pages(page, compound_order(page));
1973+		} else {
1974+			for (j = 0; j < NUM_ORDERS; j++) {
1975+				if (compound_order(page) == orders[j])
1976+					break;
1977+			}
1978+			dmabuf_page_pool_free(pools[j], page);
1979+		}
1980+	}
1981+	sg_free_table(table);
1982+	kfree(buffer);
1983+}
1984+
1985+static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
1986+{
1987+	struct system_heap_buffer *buffer = dmabuf->priv;
1988+	int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
1989+
1990+	deferred_free(&buffer->deferred_free, system_heap_buf_free, npages);
1991+}
1992+
1993+static const struct dma_buf_ops system_heap_buf_ops = {
1994+	.attach = system_heap_attach,
1995+	.detach = system_heap_detach,
1996+	.map_dma_buf = system_heap_map_dma_buf,
1997+	.unmap_dma_buf = system_heap_unmap_dma_buf,
1998+	.begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
1999+	.end_cpu_access = system_heap_dma_buf_end_cpu_access,
2000+	.mmap = system_heap_mmap,
2001+	.vmap = system_heap_vmap,
2002+	.vunmap = system_heap_vunmap,
2003+	.release = system_heap_dma_buf_release,
2004+};
2005+
2006+static struct page *alloc_largest_available(unsigned long size,
2007+					    unsigned int max_order)
2008+{
2009+	struct page *page;
2010+	int i;
2011+
2012+	for (i = 0; i < NUM_ORDERS; i++) {
2013+		if (size <  (PAGE_SIZE << orders[i]))
2014+			continue;
2015+		if (max_order < orders[i])
2016+			continue;
2017+		page = dmabuf_page_pool_alloc(pools[i]);
2018+		if (!page)
2019+			continue;
2020+		return page;
2021+	}
2022+	return NULL;
2023+}
2024+
2025+static struct dma_buf *system_heap_do_allocate(struct dma_heap *heap,
2026+					       unsigned long len,
2027+					       unsigned long fd_flags,
2028+					       unsigned long heap_flags,
2029+					       bool uncached)
2030+{
2031+	struct system_heap_buffer *buffer;
2032+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
2033+	unsigned long size_remaining = len;
2034+	unsigned int max_order = orders[0];
2035+	struct dma_buf *dmabuf;
2036+	struct sg_table *table;
2037+	struct scatterlist *sg;
2038+	struct list_head pages;
2039+	struct page *page, *tmp_page;
2040+	int i, ret = -ENOMEM;
2041+
2042+	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2043+	if (!buffer)
2044+		return ERR_PTR(-ENOMEM);
2045+
2046+	INIT_LIST_HEAD(&buffer->attachments);
2047+	mutex_init(&buffer->lock);
2048+	buffer->heap = heap;
2049+	buffer->len = len;
2050+	buffer->uncached = uncached;
2051+
2052+	INIT_LIST_HEAD(&pages);
2053+	i = 0;
2054+	while (size_remaining > 0) {
2055 		/*
2056 		 * Avoid trying to allocate memory if the process
2057-		 * has been killed by by SIGKILL
2058+		 * has been killed by SIGKILL
2059 		 */
2060 		if (fatal_signal_pending(current))
2061-			goto err1;
2062+			goto free_buffer;
2063+
2064+		page = alloc_largest_available(size_remaining, max_order);
2065+		if (!page)
2066+			goto free_buffer;
2067+
2068+		list_add_tail(&page->lru, &pages);
2069+		size_remaining -= page_size(page);
2070+		max_order = compound_order(page);
2071+		i++;
2072+	}
2073
2074-		helper_buffer->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO);
2075-		if (!helper_buffer->pages[pg])
2076-			goto err1;
2077+	table = &buffer->sg_table;
2078+	if (sg_alloc_table(table, i, GFP_KERNEL))
2079+		goto free_buffer;
2080+
2081+	sg = table->sgl;
2082+	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
2083+		sg_set_page(sg, page, page_size(page), 0);
2084+		sg = sg_next(sg);
2085+		list_del(&page->lru);
2086 	}
2087
2088 	/* create the dmabuf */
2089-	dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
2090+	exp_info.exp_name = dma_heap_get_name(heap);
2091+	exp_info.ops = &system_heap_buf_ops;
2092+	exp_info.size = buffer->len;
2093+	exp_info.flags = fd_flags;
2094+	exp_info.priv = buffer;
2095+	dmabuf = dma_buf_export(&exp_info);
2096 	if (IS_ERR(dmabuf)) {
2097 		ret = PTR_ERR(dmabuf);
2098-		goto err1;
2099+		goto free_pages;
2100+	}
2101+
2102+	/*
2103+	 * For uncached buffers, we need to initially flush cpu cache, since
2104+	 * the __GFP_ZERO on the allocation means the zeroing was done by the
2105+	 * cpu and thus it is likely cached. Map (and implicitly flush) and
2106+	 * unmap it now so we don't get corruption later on.
2107+	 */
2108+	if (buffer->uncached) {
2109+		dma_map_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0);
2110+		dma_unmap_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0);
2111 	}
2112
2113-	helper_buffer->dmabuf = dmabuf;
2114+	return dmabuf;
2115
2116-	ret = dma_buf_fd(dmabuf, fd_flags);
2117-	if (ret < 0) {
2118-		dma_buf_put(dmabuf);
2119-		/* just return, as put will call release and that will free */
2120-		return ret;
2121+free_pages:
2122+	for_each_sgtable_sg(table, sg, i) {
2123+		struct page *p = sg_page(sg);
2124+
2125+		__free_pages(p, compound_order(p));
2126 	}
2127+	sg_free_table(table);
2128+free_buffer:
2129+	list_for_each_entry_safe(page, tmp_page, &pages, lru)
2130+		__free_pages(page, compound_order(page));
2131+	kfree(buffer);
2132
2133-	return ret;
2134+	return ERR_PTR(ret);
2135+}
2136
2137-err1:
2138-	while (pg > 0)
2139-		__free_page(helper_buffer->pages[--pg]);
2140-	kfree(helper_buffer->pages);
2141-err0:
2142-	kfree(helper_buffer);
2143+static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
2144+					    unsigned long len,
2145+					    unsigned long fd_flags,
2146+					    unsigned long heap_flags)
2147+{
2148+	return system_heap_do_allocate(heap, len, fd_flags, heap_flags, false);
2149+}
2150
2151-	return ret;
2152+static long system_get_pool_size(struct dma_heap *heap)
2153+{
2154+	int i;
2155+	long num_pages = 0;
2156+	struct dmabuf_page_pool **pool;
2157+
2158+	pool = pools;
2159+	for (i = 0; i < NUM_ORDERS; i++, pool++) {
2160+		num_pages += ((*pool)->count[POOL_LOWPAGE] +
2161+			      (*pool)->count[POOL_HIGHPAGE]) << (*pool)->order;
2162+	}
2163+
2164+	return num_pages << PAGE_SHIFT;
2165 }
2166
2167 static const struct dma_heap_ops system_heap_ops = {
2168 	.allocate = system_heap_allocate,
2169+	.get_pool_size = system_get_pool_size,
2170+};
2171+
2172+static struct dma_buf *system_uncached_heap_allocate(struct dma_heap *heap,
2173+						     unsigned long len,
2174+						     unsigned long fd_flags,
2175+						     unsigned long heap_flags)
2176+{
2177+	return system_heap_do_allocate(heap, len, fd_flags, heap_flags, true);
2178+}
2179+
2180+/* Dummy function to be used until we can call coerce_mask_and_coherent */
2181+static struct dma_buf *system_uncached_heap_not_initialized(struct dma_heap *heap,
2182+							    unsigned long len,
2183+							    unsigned long fd_flags,
2184+							    unsigned long heap_flags)
2185+{
2186+	return ERR_PTR(-EBUSY);
2187+}
2188+
2189+static struct dma_heap_ops system_uncached_heap_ops = {
2190+	/* After system_heap_create is complete, we will swap this */
2191+	.allocate = system_uncached_heap_not_initialized,
2192 };
2193
2194 static int system_heap_create(void)
2195 {
2196 	struct dma_heap_export_info exp_info;
2197-	int ret = 0;
2198+	int i;
2199+
2200+	for (i = 0; i < NUM_ORDERS; i++) {
2201+		pools[i] = dmabuf_page_pool_create(order_flags[i], orders[i]);
2202+
2203+		if (!pools[i]) {
2204+			int j;
2205+
2206+			pr_err("%s: page pool creation failed!\n", __func__);
2207+			for (j = 0; j < i; j++)
2208+				dmabuf_page_pool_destroy(pools[j]);
2209+			return -ENOMEM;
2210+		}
2211+	}
2212
2213 	exp_info.name = "system";
2214 	exp_info.ops = &system_heap_ops;
2215@@ -115,9 +567,21 @@ static int system_heap_create(void)
2216
2217 	sys_heap = dma_heap_add(&exp_info);
2218 	if (IS_ERR(sys_heap))
2219-		ret = PTR_ERR(sys_heap);
2220+		return PTR_ERR(sys_heap);
2221
2222-	return ret;
2223+	exp_info.name = "system-uncached";
2224+	exp_info.ops = &system_uncached_heap_ops;
2225+	exp_info.priv = NULL;
2226+
2227+	sys_uncached_heap = dma_heap_add(&exp_info);
2228+	if (IS_ERR(sys_uncached_heap))
2229+		return PTR_ERR(sys_uncached_heap);
2230+
2231+	dma_coerce_mask_and_coherent(dma_heap_get_dev(sys_uncached_heap), DMA_BIT_MASK(64));
2232+	mb(); /* make sure we only set allocate after dma_mask is set */
2233+	system_uncached_heap_ops.allocate = system_uncached_heap_allocate;
2234+
2235+	return 0;
2236 }
2237 module_init(system_heap_create);
2238 MODULE_LICENSE("GPL v2");
2239diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
2240index 109d11fb4..4aac2ec86 100644
2241--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
2242+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
2243@@ -461,9 +461,10 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
2244 		return 0;
2245
2246 	obj = drm_gem_fb_get_obj(state->fb, 0);
2247-	fence = dma_resv_get_excl_rcu(obj->resv);
2248-	drm_atomic_set_fence_for_plane(state, fence);
2249-
2250+	if (obj) {
2251+		fence = dma_resv_get_excl_rcu(obj->resv);
2252+		drm_atomic_set_fence_for_plane(state, fence);
2253+	}
2254 	return 0;
2255 }
2256 EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb);
2257diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
2258index f135b7959..fc9c53e3e 100644
2259--- a/drivers/gpu/drm/drm_vblank.c
2260+++ b/drivers/gpu/drm/drm_vblank.c
2261@@ -1100,7 +1100,7 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe)
2262 		return dev->driver->enable_vblank(dev, pipe);
2263 	}
2264
2265-	return -EINVAL;
2266+	return 0;
2267 }
2268
2269 static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
2270diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
2271index 0c5706abb..528cd83d3 100644
2272--- a/include/linux/dma-buf.h
2273+++ b/include/linux/dma-buf.h
2274@@ -289,14 +289,9 @@ struct dma_buf_ops {
2275  * @list_node: node for dma_buf accounting and debugging.
2276  * @priv: exporter specific private data for this buffer object.
2277  * @resv: reservation object linked to this dma-buf
2278- * @exp_pid: pid of exporter task which created this obj
2279- * @exp_task_comm: process name of exporter task which created this obj
2280  * @poll: for userspace poll support
2281  * @cb_excl: for userspace poll support
2282  * @cb_shared: for userspace poll support
2283- * @sysfs_entry: for exposing information about this buffer in sysfs.
2284- * The attachment_uid member of @sysfs_entry is protected by dma_resv lock
2285- * and is incremented on each attach.
2286  *
2287  * This represents a shared buffer, created by calling dma_buf_export(). The
2288  * userspace representation is a normal file descriptor, which can be created by
2289@@ -322,10 +317,6 @@ struct dma_buf {
2290 	struct list_head list_node;
2291 	void *priv;
2292 	struct dma_resv *resv;
2293-#ifdef CONFIG_DMABUF_PROCESS_INFO
2294-	pid_t exp_pid;
2295-	char exp_task_comm[TASK_COMM_LEN];
2296-#endif
2297
2298 	/* poll support */
2299 	wait_queue_head_t poll;
2300@@ -336,13 +327,6 @@ struct dma_buf {
2301
2302 		__poll_t active;
2303 	} cb_excl, cb_shared;
2304-#ifdef CONFIG_DMABUF_SYSFS_STATS
2305-	/* for sysfs stats */
2306-	struct dma_buf_sysfs_entry {
2307-		struct kobject kobj;
2308-		struct dma_buf *dmabuf;
2309-	} *sysfs_entry;
2310-#endif
2311 };
2312
2313 /**
2314@@ -412,6 +396,7 @@ struct dma_buf_attachment {
2315 	const struct dma_buf_attach_ops *importer_ops;
2316 	void *importer_priv;
2317 	void *priv;
2318+	unsigned long dma_map_attrs;
2319 };
2320
2321 /**
2322@@ -520,16 +505,4 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
2323 		 unsigned long);
2324 void *dma_buf_vmap(struct dma_buf *);
2325 void dma_buf_vunmap(struct dma_buf *, void *vaddr);
2326-
2327-#ifdef CONFIG_DMABUF_PROCESS_INFO
2328-/**
2329- * get_dma_buf_from_file - Get struct dma_buf* from struct file*
2330- * @f:	[in]	pointer to struct file, which is associated with a
2331- *		dma_buf object.
2332- *
2333- * If @f IS_ERR_OR_NULL, return NULL.
2334- * If @f is not a file associated with dma_buf, return NULL.
2335- */
2336-struct dma_buf *get_dma_buf_from_file(struct file *f);
2337-#endif /* CONFIG_DMABUF_PROCESS_INFO */
2338 #endif /* __DMA_BUF_H__ */
2339diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
2340index 83b8cfb2d..e8f0e92c2 100644
2341--- a/include/linux/dma-heap.h
2342+++ b/include/linux/dma-heap.h
2343@@ -16,15 +16,17 @@ struct dma_heap;
2344
2345 /**
2346  * struct dma_heap_ops - ops to operate on a given heap
2347- * @allocate:		allocate dmabuf and return fd
2348+ * @allocate:		allocate dmabuf and return struct dma_buf ptr
2349+ * @get_pool_size:	if heap maintains memory pools, get pool size in bytes
2350  *
2351- * allocate returns dmabuf fd  on success, -errno on error.
2352+ * allocate returns dmabuf on success, ERR_PTR(-errno) on error.
2353  */
2354 struct dma_heap_ops {
2355-	int (*allocate)(struct dma_heap *heap,
2356+	struct dma_buf *(*allocate)(struct dma_heap *heap,
2357 			unsigned long len,
2358 			unsigned long fd_flags,
2359 			unsigned long heap_flags);
2360+	long (*get_pool_size)(struct dma_heap *heap);
2361 };
2362
2363 /**
2364@@ -50,6 +52,15 @@ struct dma_heap_export_info {
2365  */
2366 void *dma_heap_get_drvdata(struct dma_heap *heap);
2367
2368+/**
2369+ * dma_heap_get_dev() - get device struct for the heap
2370+ * @heap: DMA-Heap to retrieve device struct from
2371+ *
2372+ * Returns:
2373+ * The device struct for the heap.
2374+ */
2375+struct device *dma_heap_get_dev(struct dma_heap *heap);
2376+
2377 /**
2378  * dma_heap_get_name() - get heap name
2379  * @heap: DMA-Heap to retrieve private data for
2380@@ -65,4 +76,49 @@ const char *dma_heap_get_name(struct dma_heap *heap);
2381  */
2382 struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info);
2383
2384+/**
2385+ * dma_heap_put - drops a reference to a dmabuf heaps, potentially freeing it
2386+ * @heap:		heap pointer
2387+ */
2388+void dma_heap_put(struct dma_heap *heap);
2389+
2390+/**
2391+ * dma_heap_find - Returns the registered dma_heap with the specified name
2392+ * @name: Name of the heap to find
2393+ *
2394+ * NOTE: dma_heaps returned from this function MUST be released
2395+ * using dma_heap_put() when the user is done.
2396+ */
2397+struct dma_heap *dma_heap_find(const char *name);
2398+
2399+/**
2400+ * dma_heap_buffer_alloc - Allocate dma-buf from a dma_heap
2401+ * @heap:	dma_heap to allocate from
2402+ * @len:	size to allocate
2403+ * @fd_flags:	flags to set on returned dma-buf fd
2404+ * @heap_flags:	flags to pass to the dma heap
2405+ *
2406+ * This is for internal dma-buf allocations only.
2407+ */
2408+struct dma_buf *dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
2409+				      unsigned int fd_flags,
2410+				      unsigned int heap_flags);
2411+
2412+/** dma_heap_buffer_free - Free dma_buf allocated by dma_heap_buffer_alloc
2413+ * @dma_buf:	dma_buf to free
2414+ *
2415+ * This is really only a simple wrapper to dma_buf_put()
2416+ */
2417+void dma_heap_buffer_free(struct dma_buf *);
2418+
2419+/**
2420+ * dma_heap_bufferfd_alloc - Allocate dma-buf fd from a dma_heap
2421+ * @heap:	dma_heap to allocate from
2422+ * @len:	size to allocate
2423+ * @fd_flags:	flags to set on returned dma-buf fd
2424+ * @heap_flags:	flags to pass to the dma heap
2425+ */
2426+int dma_heap_bufferfd_alloc(struct dma_heap *heap, size_t len,
2427+			    unsigned int fd_flags,
2428+			    unsigned int heap_flags);
2429 #endif /* _DMA_HEAPS_H */
2430diff --git a/kernel/sched/core.c b/kernel/sched/core.c
2431index 46a0df7d1..2909709f4 100644
2432--- a/kernel/sched/core.c
2433+++ b/kernel/sched/core.c
2434@@ -5636,16 +5636,19 @@ int sched_setscheduler(struct task_struct *p, int policy,
2435 {
2436 	return _sched_setscheduler(p, policy, param, true);
2437 }
2438+EXPORT_SYMBOL_GPL(sched_setscheduler);
2439
2440 int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
2441 {
2442 	return __sched_setscheduler(p, attr, true, true);
2443 }
2444+EXPORT_SYMBOL_GPL(sched_setattr);
2445
2446 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
2447 {
2448 	return __sched_setscheduler(p, attr, false, true);
2449 }
2450+EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
2451
2452 /**
2453  * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
2454--
24552.25.1
2456
2457