• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1From 0e7ae3e758cc4c6e0f49f1ffcc5d59df8f5cf7bb Mon Sep 17 00:00:00 2001
2From: ygopenhm <ygopenhm@vyagoo.com>
3Date: Thu, 29 Sep 2022 09:27:27 +0800
4Subject: [PATCH] kernel
5
6Signed-off-by: ygopenhm <ygopenhm@vyagoo.com>
7---
8 Kconfig                                      |   2 +
9 Makefile                                     |   2 +
10 drivers/dma-buf/dma-heap.c                   | 223 +++++--
11 drivers/dma-buf/heaps/Kconfig                |  16 +-
12 drivers/dma-buf/heaps/Makefile               |   3 +-
13 drivers/dma-buf/heaps/cma_heap.c             | 338 +++++++++--
14 drivers/dma-buf/heaps/deferred-free-helper.c | 139 +++++
15 drivers/dma-buf/heaps/deferred-free-helper.h |  57 ++
16 drivers/dma-buf/heaps/page_pool.c            | 247 ++++++++
17 drivers/dma-buf/heaps/page_pool.h            |  55 ++
18 drivers/dma-buf/heaps/system_heap.c          | 576 +++++++++++++++++--
19 drivers/gpu/drm/drm_gem_framebuffer_helper.c |   7 +-
20 drivers/gpu/drm/drm_vblank.c                 |   2 +-
21 include/linux/dma-buf.h                      |   1 +
22 include/linux/dma-heap.h                     |  62 +-
23 kernel/sched/core.c                          |   3 +
24 16 files changed, 1576 insertions(+), 157 deletions(-)
25 create mode 100644 drivers/dma-buf/heaps/deferred-free-helper.c
26 create mode 100644 drivers/dma-buf/heaps/deferred-free-helper.h
27 create mode 100644 drivers/dma-buf/heaps/page_pool.c
28 create mode 100644 drivers/dma-buf/heaps/page_pool.h
29
30diff --git a/Kconfig b/Kconfig
31index 745bc773f..e6c33a96d 100644
32--- a/Kconfig
33+++ b/Kconfig
34@@ -7,6 +7,8 @@ mainmenu "Linux/$(ARCH) $(KERNELVERSION) Kernel Configuration"
35
36 source "scripts/Kconfig.include"
37
38+source "bsp/Kconfig"
39+
40 source "init/Kconfig"
41
42 source "kernel/Kconfig.freezer"
43diff --git a/Makefile b/Makefile
44index a45981b4f..037d0c79a 100644
45--- a/Makefile
46+++ b/Makefile
47@@ -491,6 +491,7 @@ LINUXINCLUDE    := \
48 		-I$(objtree)/arch/$(SRCARCH)/include/generated \
49 		$(if $(building_out_of_srctree),-I$(srctree)/include) \
50 		-I$(objtree)/include \
51+		-I$(srctree)/bsp/include \
52 		$(USERINCLUDE)
53
54 KBUILD_AFLAGS   := -D__ASSEMBLY__ -fno-PIE
55@@ -650,6 +651,7 @@ ifeq ($(KBUILD_EXTMOD),)
56 # Objects we will link into vmlinux / subdirs we need to visit
57 core-y		:= init/ usr/
58 drivers-y	:= drivers/ sound/
59+drivers-y	+= bsp/
60 drivers-$(CONFIG_SAMPLES) += samples/
61 drivers-y	+= net/ virt/
62 libs-y		:= lib/
63diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
64index 70e410c64..26d59b48f 100644
65--- a/drivers/dma-buf/dma-heap.c
66+++ b/drivers/dma-buf/dma-heap.c
67@@ -31,6 +31,7 @@
68  * @heap_devt		heap device node
69  * @list		list head connecting to list of heaps
70  * @heap_cdev		heap char device
71+ * @heap_dev		heap device struct
72  *
73  * Represents a heap of memory from which buffers can be made.
74  */
75@@ -41,6 +42,8 @@ struct dma_heap {
76 	dev_t heap_devt;
77 	struct list_head list;
78 	struct cdev heap_cdev;
79+	struct kref refcount;
80+	struct device *heap_dev;
81 };
82
83 static LIST_HEAD(heap_list);
84@@ -49,20 +52,72 @@ static dev_t dma_heap_devt;
85 static struct class *dma_heap_class;
86 static DEFINE_XARRAY_ALLOC(dma_heap_minors);
87
88-static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
89-				 unsigned int fd_flags,
90-				 unsigned int heap_flags)
91+struct dma_heap *dma_heap_find(const char *name)
92 {
93+	struct dma_heap *h;
94+
95+	mutex_lock(&heap_list_lock);
96+	list_for_each_entry(h, &heap_list, list) {
97+		if (!strcmp(h->name, name)) {
98+			kref_get(&h->refcount);
99+			mutex_unlock(&heap_list_lock);
100+			return h;
101+		}
102+	}
103+	mutex_unlock(&heap_list_lock);
104+	return NULL;
105+}
106+EXPORT_SYMBOL_GPL(dma_heap_find);
107+
108+
109+void dma_heap_buffer_free(struct dma_buf *dmabuf)
110+{
111+	dma_buf_put(dmabuf);
112+}
113+EXPORT_SYMBOL_GPL(dma_heap_buffer_free);
114+
115+struct dma_buf *dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
116+				      unsigned int fd_flags,
117+				      unsigned int heap_flags)
118+{
119+	if (fd_flags & ~DMA_HEAP_VALID_FD_FLAGS)
120+		return ERR_PTR(-EINVAL);
121+
122+	if (heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS)
123+		return ERR_PTR(-EINVAL);
124 	/*
125 	 * Allocations from all heaps have to begin
126 	 * and end on page boundaries.
127 	 */
128 	len = PAGE_ALIGN(len);
129 	if (!len)
130-		return -EINVAL;
131+		return ERR_PTR(-EINVAL);
132
133 	return heap->ops->allocate(heap, len, fd_flags, heap_flags);
134 }
135+EXPORT_SYMBOL_GPL(dma_heap_buffer_alloc);
136+
137+int dma_heap_bufferfd_alloc(struct dma_heap *heap, size_t len,
138+			    unsigned int fd_flags,
139+			    unsigned int heap_flags)
140+{
141+	struct dma_buf *dmabuf;
142+	int fd;
143+
144+	dmabuf = dma_heap_buffer_alloc(heap, len, fd_flags, heap_flags);
145+
146+	if (IS_ERR(dmabuf))
147+		return PTR_ERR(dmabuf);
148+
149+	fd = dma_buf_fd(dmabuf, fd_flags);
150+	if (fd < 0) {
151+		dma_buf_put(dmabuf);
152+		/* just return, as put will call release and that will free */
153+	}
154+	return fd;
155+
156+}
157+EXPORT_SYMBOL_GPL(dma_heap_bufferfd_alloc);
158
159 static int dma_heap_open(struct inode *inode, struct file *file)
160 {
161@@ -90,15 +145,9 @@ static long dma_heap_ioctl_allocate(struct file *file, void *data)
162 	if (heap_allocation->fd)
163 		return -EINVAL;
164
165-	if (heap_allocation->fd_flags & ~DMA_HEAP_VALID_FD_FLAGS)
166-		return -EINVAL;
167-
168-	if (heap_allocation->heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS)
169-		return -EINVAL;
170-
171-	fd = dma_heap_buffer_alloc(heap, heap_allocation->len,
172-				   heap_allocation->fd_flags,
173-				   heap_allocation->heap_flags);
174+	fd = dma_heap_bufferfd_alloc(heap, heap_allocation->len,
175+				     heap_allocation->fd_flags,
176+				     heap_allocation->heap_flags);
177 	if (fd < 0)
178 		return fd;
179
180@@ -190,6 +240,47 @@ void *dma_heap_get_drvdata(struct dma_heap *heap)
181 {
182 	return heap->priv;
183 }
184+EXPORT_SYMBOL_GPL(dma_heap_get_drvdata);
185+
186+static void dma_heap_release(struct kref *ref)
187+{
188+	struct dma_heap *heap = container_of(ref, struct dma_heap, refcount);
189+	int minor = MINOR(heap->heap_devt);
190+
191+	/* Note, we already holding the heap_list_lock here */
192+	list_del(&heap->list);
193+
194+	device_destroy(dma_heap_class, heap->heap_devt);
195+	cdev_del(&heap->heap_cdev);
196+	xa_erase(&dma_heap_minors, minor);
197+
198+	kfree(heap);
199+}
200+
201+void dma_heap_put(struct dma_heap *h)
202+{
203+	/*
204+	 * Take the heap_list_lock now to avoid racing with code
205+	 * scanning the list and then taking a kref.
206+	 */
207+	mutex_lock(&heap_list_lock);
208+	kref_put(&h->refcount, dma_heap_release);
209+	mutex_unlock(&heap_list_lock);
210+}
211+EXPORT_SYMBOL_GPL(dma_heap_put);
212+
213+/**
214+ * dma_heap_get_dev() - get device struct for the heap
215+ * @heap: DMA-Heap to retrieve device struct from
216+ *
217+ * Returns:
218+ * The device struct for the heap.
219+ */
220+struct device *dma_heap_get_dev(struct dma_heap *heap)
221+{
222+	return heap->heap_dev;
223+}
224+EXPORT_SYMBOL_GPL(dma_heap_get_dev);
225
226 /**
227  * dma_heap_get_name() - get heap name
228@@ -202,11 +293,11 @@ const char *dma_heap_get_name(struct dma_heap *heap)
229 {
230 	return heap->name;
231 }
232+EXPORT_SYMBOL_GPL(dma_heap_get_name);
233
234 struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
235 {
236-	struct dma_heap *heap, *h, *err_ret;
237-	struct device *dev_ret;
238+	struct dma_heap *heap, *err_ret;
239 	unsigned int minor;
240 	int ret;
241
242@@ -221,21 +312,19 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
243 	}
244
245 	/* check the name is unique */
246-	mutex_lock(&heap_list_lock);
247-	list_for_each_entry(h, &heap_list, list) {
248-		if (!strcmp(h->name, exp_info->name)) {
249-			mutex_unlock(&heap_list_lock);
250-			pr_err("dma_heap: Already registered heap named %s\n",
251-			       exp_info->name);
252-			return ERR_PTR(-EINVAL);
253-		}
254+	heap = dma_heap_find(exp_info->name);
255+	if (heap) {
256+		pr_err("dma_heap: Already registered heap named %s\n",
257+		       exp_info->name);
258+		dma_heap_put(heap);
259+		return ERR_PTR(-EINVAL);
260 	}
261-	mutex_unlock(&heap_list_lock);
262
263 	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
264 	if (!heap)
265 		return ERR_PTR(-ENOMEM);
266
267+	kref_init(&heap->refcount);
268 	heap->name = exp_info->name;
269 	heap->ops = exp_info->ops;
270 	heap->priv = exp_info->priv;
271@@ -260,16 +349,20 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
272 		goto err1;
273 	}
274
275-	dev_ret = device_create(dma_heap_class,
276-				NULL,
277-				heap->heap_devt,
278-				NULL,
279-				heap->name);
280-	if (IS_ERR(dev_ret)) {
281+	heap->heap_dev = device_create(dma_heap_class,
282+				       NULL,
283+				       heap->heap_devt,
284+				       NULL,
285+				       heap->name);
286+	if (IS_ERR(heap->heap_dev)) {
287 		pr_err("dma_heap: Unable to create device\n");
288-		err_ret = ERR_CAST(dev_ret);
289+		err_ret = ERR_CAST(heap->heap_dev);
290 		goto err2;
291 	}
292+
293+	/* Make sure it doesn't disappear on us */
294+	heap->heap_dev = get_device(heap->heap_dev);
295+
296 	/* Add heap to the list */
297 	mutex_lock(&heap_list_lock);
298 	list_add(&heap->list, &heap_list);
299@@ -285,27 +378,88 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
300 	kfree(heap);
301 	return err_ret;
302 }
303+EXPORT_SYMBOL_GPL(dma_heap_add);
304
305 static char *dma_heap_devnode(struct device *dev, umode_t *mode)
306 {
307 	return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev));
308 }
309
310+static ssize_t total_pools_kb_show(struct kobject *kobj,
311+				   struct kobj_attribute *attr, char *buf)
312+{
313+	struct dma_heap *heap;
314+	u64 total_pool_size = 0;
315+
316+	mutex_lock(&heap_list_lock);
317+	list_for_each_entry(heap, &heap_list, list) {
318+		if (heap->ops->get_pool_size)
319+			total_pool_size += heap->ops->get_pool_size(heap);
320+	}
321+	mutex_unlock(&heap_list_lock);
322+
323+	return sysfs_emit(buf, "%llu\n", total_pool_size / 1024);
324+}
325+
326+static struct kobj_attribute total_pools_kb_attr =
327+	__ATTR_RO(total_pools_kb);
328+
329+static struct attribute *dma_heap_sysfs_attrs[] = {
330+	&total_pools_kb_attr.attr,
331+	NULL,
332+};
333+
334+ATTRIBUTE_GROUPS(dma_heap_sysfs);
335+
336+static struct kobject *dma_heap_kobject;
337+
338+static int dma_heap_sysfs_setup(void)
339+{
340+	int ret;
341+
342+	dma_heap_kobject = kobject_create_and_add("dma_heap", kernel_kobj);
343+	if (!dma_heap_kobject)
344+		return -ENOMEM;
345+
346+	ret = sysfs_create_groups(dma_heap_kobject, dma_heap_sysfs_groups);
347+	if (ret) {
348+		kobject_put(dma_heap_kobject);
349+		return ret;
350+	}
351+
352+	return 0;
353+}
354+
355+static void dma_heap_sysfs_teardown(void)
356+{
357+	kobject_put(dma_heap_kobject);
358+}
359+
360 static int dma_heap_init(void)
361 {
362 	int ret;
363
364-	ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME);
365+	ret = dma_heap_sysfs_setup();
366 	if (ret)
367 		return ret;
368
369+	ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME);
370+	if (ret)
371+		goto err_chrdev;
372+
373 	dma_heap_class = class_create(THIS_MODULE, DEVNAME);
374 	if (IS_ERR(dma_heap_class)) {
375-		unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS);
376-		return PTR_ERR(dma_heap_class);
377+		ret = PTR_ERR(dma_heap_class);
378+		goto err_class;
379 	}
380 	dma_heap_class->devnode = dma_heap_devnode;
381
382 	return 0;
383+
384+err_class:
385+	unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS);
386+err_chrdev:
387+	dma_heap_sysfs_teardown();
388+	return ret;
389 }
390 subsys_initcall(dma_heap_init);
391diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
392index a5eef06c4..ff52efa83 100644
393--- a/drivers/dma-buf/heaps/Kconfig
394+++ b/drivers/dma-buf/heaps/Kconfig
395@@ -1,12 +1,22 @@
396+menuconfig DMABUF_HEAPS_DEFERRED_FREE
397+	bool "DMA-BUF heaps deferred-free library"
398+	help
399+	  Choose this option to enable the DMA-BUF heaps deferred-free library.
400+
401+menuconfig DMABUF_HEAPS_PAGE_POOL
402+	bool "DMA-BUF heaps page-pool library"
403+	help
404+	  Choose this option to enable the DMA-BUF heaps page-pool library.
405+
406 config DMABUF_HEAPS_SYSTEM
407-	bool "DMA-BUF System Heap"
408-	depends on DMABUF_HEAPS
409+	tristate "DMA-BUF System Heap"
410+	depends on DMABUF_HEAPS && DMABUF_HEAPS_DEFERRED_FREE && DMABUF_HEAPS_PAGE_POOL
411 	help
412 	  Choose this option to enable the system dmabuf heap. The system heap
413 	  is backed by pages from the buddy allocator. If in doubt, say Y.
414
415 config DMABUF_HEAPS_CMA
416-	bool "DMA-BUF CMA Heap"
417+	tristate "DMA-BUF CMA Heap"
418 	depends on DMABUF_HEAPS && DMA_CMA
419 	help
420 	  Choose this option to enable dma-buf CMA heap. This heap is backed
421diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile
422index 6e54cdec3..4d4cd94a3 100644
423--- a/drivers/dma-buf/heaps/Makefile
424+++ b/drivers/dma-buf/heaps/Makefile
425@@ -1,4 +1,5 @@
426 # SPDX-License-Identifier: GPL-2.0
427-obj-y					+= heap-helpers.o
428+obj-$(CONFIG_DMABUF_HEAPS_DEFERRED_FREE) += deferred-free-helper.o
429+obj-$(CONFIG_DMABUF_HEAPS_PAGE_POOL)	+= page_pool.o
430 obj-$(CONFIG_DMABUF_HEAPS_SYSTEM)	+= system_heap.o
431 obj-$(CONFIG_DMABUF_HEAPS_CMA)		+= cma_heap.o
432diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
433index e55384dc1..fd564aa70 100644
434--- a/drivers/dma-buf/heaps/cma_heap.c
435+++ b/drivers/dma-buf/heaps/cma_heap.c
436@@ -2,76 +2,306 @@
437 /*
438  * DMABUF CMA heap exporter
439  *
440- * Copyright (C) 2012, 2019 Linaro Ltd.
441+ * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
442  * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
443+ *
444+ * Also utilizing parts of Andrew Davis' SRAM heap:
445+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
446+ *	Andrew F. Davis <afd@ti.com>
447  */
448-
449 #include <linux/cma.h>
450-#include <linux/device.h>
451 #include <linux/dma-buf.h>
452 #include <linux/dma-heap.h>
453 #include <linux/dma-map-ops.h>
454 #include <linux/err.h>
455-#include <linux/errno.h>
456 #include <linux/highmem.h>
457+#include <linux/io.h>
458+#include <linux/mm.h>
459 #include <linux/module.h>
460-#include <linux/slab.h>
461 #include <linux/scatterlist.h>
462-#include <linux/sched/signal.h>
463+#include <linux/slab.h>
464+#include <linux/vmalloc.h>
465
466-#include "heap-helpers.h"
467
468 struct cma_heap {
469 	struct dma_heap *heap;
470 	struct cma *cma;
471 };
472
473-static void cma_heap_free(struct heap_helper_buffer *buffer)
474+struct cma_heap_buffer {
475+	struct cma_heap *heap;
476+	struct list_head attachments;
477+	struct mutex lock;
478+	unsigned long len;
479+	struct page *cma_pages;
480+	struct page **pages;
481+	pgoff_t pagecount;
482+	int vmap_cnt;
483+	void *vaddr;
484+};
485+
486+struct dma_heap_attachment {
487+	struct device *dev;
488+	struct sg_table table;
489+	struct list_head list;
490+	bool mapped;
491+};
492+
493+static int cma_heap_attach(struct dma_buf *dmabuf,
494+			   struct dma_buf_attachment *attachment)
495+{
496+	struct cma_heap_buffer *buffer = dmabuf->priv;
497+	struct dma_heap_attachment *a;
498+	int ret;
499+
500+	a = kzalloc(sizeof(*a), GFP_KERNEL);
501+	if (!a)
502+		return -ENOMEM;
503+
504+	ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
505+					buffer->pagecount, 0,
506+					buffer->pagecount << PAGE_SHIFT,
507+					GFP_KERNEL);
508+	if (ret) {
509+		kfree(a);
510+		return ret;
511+	}
512+
513+	a->dev = attachment->dev;
514+	INIT_LIST_HEAD(&a->list);
515+	a->mapped = false;
516+
517+	attachment->priv = a;
518+
519+	mutex_lock(&buffer->lock);
520+	list_add(&a->list, &buffer->attachments);
521+	mutex_unlock(&buffer->lock);
522+
523+	return 0;
524+}
525+
526+static void cma_heap_detach(struct dma_buf *dmabuf,
527+			    struct dma_buf_attachment *attachment)
528+{
529+	struct cma_heap_buffer *buffer = dmabuf->priv;
530+	struct dma_heap_attachment *a = attachment->priv;
531+
532+	mutex_lock(&buffer->lock);
533+	list_del(&a->list);
534+	mutex_unlock(&buffer->lock);
535+
536+	sg_free_table(&a->table);
537+	kfree(a);
538+}
539+
540+static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
541+					     enum dma_data_direction direction)
542 {
543-	struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap);
544-	unsigned long nr_pages = buffer->pagecount;
545-	struct page *cma_pages = buffer->priv_virt;
546+	struct dma_heap_attachment *a = attachment->priv;
547+	struct sg_table *table = &a->table;
548+	int attrs = attachment->dma_map_attrs;
549+	int ret;
550+
551+	ret = dma_map_sgtable(attachment->dev, table, direction, attrs);
552+	if (ret)
553+		return ERR_PTR(-ENOMEM);
554+	a->mapped = true;
555+	return table;
556+}
557+
558+static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
559+				   struct sg_table *table,
560+				   enum dma_data_direction direction)
561+{
562+	struct dma_heap_attachment *a = attachment->priv;
563+	int attrs = attachment->dma_map_attrs;
564+
565+	a->mapped = false;
566+	dma_unmap_sgtable(attachment->dev, table, direction, attrs);
567+}
568+
569+static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
570+					     enum dma_data_direction direction)
571+{
572+	struct cma_heap_buffer *buffer = dmabuf->priv;
573+	struct dma_heap_attachment *a;
574+
575+	if (buffer->vmap_cnt)
576+		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
577+
578+	mutex_lock(&buffer->lock);
579+	list_for_each_entry(a, &buffer->attachments, list) {
580+		if (!a->mapped)
581+			continue;
582+		dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
583+	}
584+	mutex_unlock(&buffer->lock);
585+
586+	return 0;
587+}
588+
589+static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
590+					   enum dma_data_direction direction)
591+{
592+	struct cma_heap_buffer *buffer = dmabuf->priv;
593+	struct dma_heap_attachment *a;
594+
595+	if (buffer->vmap_cnt)
596+		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
597+
598+	mutex_lock(&buffer->lock);
599+	list_for_each_entry(a, &buffer->attachments, list) {
600+		if (!a->mapped)
601+			continue;
602+		dma_sync_sgtable_for_device(a->dev, &a->table, direction);
603+	}
604+	mutex_unlock(&buffer->lock);
605+
606+	return 0;
607+}
608+
609+static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
610+{
611+	struct vm_area_struct *vma = vmf->vma;
612+	struct cma_heap_buffer *buffer = vma->vm_private_data;
613+
614+	if (vmf->pgoff > buffer->pagecount)
615+		return VM_FAULT_SIGBUS;
616+
617+	vmf->page = buffer->pages[vmf->pgoff];
618+	get_page(vmf->page);
619+
620+	return 0;
621+}
622+
623+static const struct vm_operations_struct dma_heap_vm_ops = {
624+	.fault = cma_heap_vm_fault,
625+};
626+
627+static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
628+{
629+	struct cma_heap_buffer *buffer = dmabuf->priv;
630+
631+	if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
632+		return -EINVAL;
633+
634+	vma->vm_ops = &dma_heap_vm_ops;
635+	vma->vm_private_data = buffer;
636+
637+	return 0;
638+}
639+
640+static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
641+{
642+	void *vaddr;
643+
644+	vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
645+	if (!vaddr)
646+		return ERR_PTR(-ENOMEM);
647+
648+	return vaddr;
649+}
650+
651+static void *cma_heap_vmap(struct dma_buf *dmabuf)
652+{
653+	struct cma_heap_buffer *buffer = dmabuf->priv;
654+	void *vaddr;
655+
656+	mutex_lock(&buffer->lock);
657+	if (buffer->vmap_cnt) {
658+		buffer->vmap_cnt++;
659+		vaddr = buffer->vaddr;
660+		goto out;
661+	}
662+
663+	vaddr = cma_heap_do_vmap(buffer);
664+	if (IS_ERR(vaddr))
665+		goto out;
666+
667+	buffer->vaddr = vaddr;
668+	buffer->vmap_cnt++;
669+out:
670+	mutex_unlock(&buffer->lock);
671+
672+	return vaddr;
673+}
674+
675+static void cma_heap_vunmap(struct dma_buf *dmabuf, void *vaddr)
676+{
677+	struct cma_heap_buffer *buffer = dmabuf->priv;
678+
679+	mutex_lock(&buffer->lock);
680+	if (!--buffer->vmap_cnt) {
681+		vunmap(buffer->vaddr);
682+		buffer->vaddr = NULL;
683+	}
684+	mutex_unlock(&buffer->lock);
685+}
686+
687+static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
688+{
689+	struct cma_heap_buffer *buffer = dmabuf->priv;
690+	struct cma_heap *cma_heap = buffer->heap;
691+
692+	if (buffer->vmap_cnt > 0) {
693+		WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
694+		vunmap(buffer->vaddr);
695+	}
696
697 	/* free page list */
698 	kfree(buffer->pages);
699 	/* release memory */
700-	cma_release(cma_heap->cma, cma_pages, nr_pages);
701+	cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
702 	kfree(buffer);
703 }
704
705-/* dmabuf heap CMA operations functions */
706-static int cma_heap_allocate(struct dma_heap *heap,
707-			     unsigned long len,
708-			     unsigned long fd_flags,
709-			     unsigned long heap_flags)
710+static const struct dma_buf_ops cma_heap_buf_ops = {
711+	.attach = cma_heap_attach,
712+	.detach = cma_heap_detach,
713+	.map_dma_buf = cma_heap_map_dma_buf,
714+	.unmap_dma_buf = cma_heap_unmap_dma_buf,
715+	.begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
716+	.end_cpu_access = cma_heap_dma_buf_end_cpu_access,
717+	.mmap = cma_heap_mmap,
718+	.vmap = cma_heap_vmap,
719+	.vunmap = cma_heap_vunmap,
720+	.release = cma_heap_dma_buf_release,
721+};
722+
723+static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
724+					 unsigned long len,
725+					 unsigned long fd_flags,
726+					 unsigned long heap_flags)
727 {
728 	struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
729-	struct heap_helper_buffer *helper_buffer;
730-	struct page *cma_pages;
731+	struct cma_heap_buffer *buffer;
732+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
733 	size_t size = PAGE_ALIGN(len);
734-	unsigned long nr_pages = size >> PAGE_SHIFT;
735+	pgoff_t pagecount = size >> PAGE_SHIFT;
736 	unsigned long align = get_order(size);
737+	struct page *cma_pages;
738 	struct dma_buf *dmabuf;
739 	int ret = -ENOMEM;
740 	pgoff_t pg;
741
742-	if (align > CONFIG_CMA_ALIGNMENT)
743-		align = CONFIG_CMA_ALIGNMENT;
744+	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
745+	if (!buffer)
746+		return ERR_PTR(-ENOMEM);
747
748-	helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
749-	if (!helper_buffer)
750-		return -ENOMEM;
751+	INIT_LIST_HEAD(&buffer->attachments);
752+	mutex_init(&buffer->lock);
753+	buffer->len = size;
754
755-	init_heap_helper_buffer(helper_buffer, cma_heap_free);
756-	helper_buffer->heap = heap;
757-	helper_buffer->size = len;
758+	if (align > CONFIG_CMA_ALIGNMENT)
759+		align = CONFIG_CMA_ALIGNMENT;
760
761-	cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false);
762+	cma_pages = cma_alloc(cma_heap->cma, pagecount, align, GFP_KERNEL);
763 	if (!cma_pages)
764-		goto free_buf;
765+		goto free_buffer;
766
767+	/* Clear the cma pages */
768 	if (PageHighMem(cma_pages)) {
769-		unsigned long nr_clear_pages = nr_pages;
770+		unsigned long nr_clear_pages = pagecount;
771 		struct page *page = cma_pages;
772
773 		while (nr_clear_pages > 0) {
774@@ -85,7 +315,6 @@ static int cma_heap_allocate(struct dma_heap *heap,
775 			 */
776 			if (fatal_signal_pending(current))
777 				goto free_cma;
778-
779 			page++;
780 			nr_clear_pages--;
781 		}
782@@ -93,44 +322,41 @@ static int cma_heap_allocate(struct dma_heap *heap,
783 		memset(page_address(cma_pages), 0, size);
784 	}
785
786-	helper_buffer->pagecount = nr_pages;
787-	helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
788-					     sizeof(*helper_buffer->pages),
789-					     GFP_KERNEL);
790-	if (!helper_buffer->pages) {
791+	buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
792+	if (!buffer->pages) {
793 		ret = -ENOMEM;
794 		goto free_cma;
795 	}
796
797-	for (pg = 0; pg < helper_buffer->pagecount; pg++)
798-		helper_buffer->pages[pg] = &cma_pages[pg];
799+	for (pg = 0; pg < pagecount; pg++)
800+		buffer->pages[pg] = &cma_pages[pg];
801+
802+	buffer->cma_pages = cma_pages;
803+	buffer->heap = cma_heap;
804+	buffer->pagecount = pagecount;
805
806 	/* create the dmabuf */
807-	dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
808+	exp_info.exp_name = dma_heap_get_name(heap);
809+	exp_info.ops = &cma_heap_buf_ops;
810+	exp_info.size = buffer->len;
811+	exp_info.flags = fd_flags;
812+	exp_info.priv = buffer;
813+	dmabuf = dma_buf_export(&exp_info);
814 	if (IS_ERR(dmabuf)) {
815 		ret = PTR_ERR(dmabuf);
816 		goto free_pages;
817 	}
818
819-	helper_buffer->dmabuf = dmabuf;
820-	helper_buffer->priv_virt = cma_pages;
821-
822-	ret = dma_buf_fd(dmabuf, fd_flags);
823-	if (ret < 0) {
824-		dma_buf_put(dmabuf);
825-		/* just return, as put will call release and that will free */
826-		return ret;
827-	}
828-
829-	return ret;
830+	return dmabuf;
831
832 free_pages:
833-	kfree(helper_buffer->pages);
834+	kfree(buffer->pages);
835 free_cma:
836-	cma_release(cma_heap->cma, cma_pages, nr_pages);
837-free_buf:
838-	kfree(helper_buffer);
839-	return ret;
840+	cma_release(cma_heap->cma, cma_pages, pagecount);
841+free_buffer:
842+	kfree(buffer);
843+
844+	return ERR_PTR(ret);
845 }
846
847 static const struct dma_heap_ops cma_heap_ops = {
848diff --git a/drivers/dma-buf/heaps/deferred-free-helper.c b/drivers/dma-buf/heaps/deferred-free-helper.c
849new file mode 100644
850index 000000000..1330d279f
851--- /dev/null
852+++ b/drivers/dma-buf/heaps/deferred-free-helper.c
853@@ -0,0 +1,139 @@
854+// SPDX-License-Identifier: GPL-2.0
855+/*
856+ * Deferred dmabuf freeing helper
857+ *
858+ * Copyright (C) 2020 Linaro, Ltd.
859+ *
860+ * Based on the ION page pool code
861+ * Copyright (C) 2011 Google, Inc.
862+ */
863+
864+#include <linux/freezer.h>
865+#include <linux/list.h>
866+#include <linux/slab.h>
867+#include <linux/swap.h>
868+#include <linux/sched/signal.h>
869+
870+#include "deferred-free-helper.h"
871+
872+static LIST_HEAD(free_list);
873+static size_t list_nr_pages;
874+wait_queue_head_t freelist_waitqueue;
875+struct task_struct *freelist_task;
876+static DEFINE_SPINLOCK(free_list_lock);
877+
878+void deferred_free(struct deferred_freelist_item *item,
879+		   void (*free)(struct deferred_freelist_item*,
880+				enum df_reason),
881+		   size_t nr_pages)
882+{
883+	unsigned long flags;
884+
885+	INIT_LIST_HEAD(&item->list);
886+	item->nr_pages = nr_pages;
887+	item->free = free;
888+
889+	spin_lock_irqsave(&free_list_lock, flags);
890+	list_add(&item->list, &free_list);
891+	list_nr_pages += nr_pages;
892+	spin_unlock_irqrestore(&free_list_lock, flags);
893+	wake_up(&freelist_waitqueue);
894+}
895+EXPORT_SYMBOL_GPL(deferred_free);
896+
897+static size_t free_one_item(enum df_reason reason)
898+{
899+	unsigned long flags;
900+	size_t nr_pages;
901+	struct deferred_freelist_item *item;
902+
903+	spin_lock_irqsave(&free_list_lock, flags);
904+	if (list_empty(&free_list)) {
905+		spin_unlock_irqrestore(&free_list_lock, flags);
906+		return 0;
907+	}
908+	item = list_first_entry(&free_list, struct deferred_freelist_item, list);
909+	list_del(&item->list);
910+	nr_pages = item->nr_pages;
911+	list_nr_pages -= nr_pages;
912+	spin_unlock_irqrestore(&free_list_lock, flags);
913+
914+	item->free(item, reason);
915+	return nr_pages;
916+}
917+
918+unsigned long get_freelist_nr_pages(void)
919+{
920+	unsigned long nr_pages;
921+	unsigned long flags;
922+
923+	spin_lock_irqsave(&free_list_lock, flags);
924+	nr_pages = list_nr_pages;
925+	spin_unlock_irqrestore(&free_list_lock, flags);
926+	return nr_pages;
927+}
928+EXPORT_SYMBOL_GPL(get_freelist_nr_pages);
929+
930+static unsigned long freelist_shrink_count(struct shrinker *shrinker,
931+					   struct shrink_control *sc)
932+{
933+	return get_freelist_nr_pages();
934+}
935+
936+static unsigned long freelist_shrink_scan(struct shrinker *shrinker,
937+					  struct shrink_control *sc)
938+{
939+	unsigned long total_freed = 0;
940+
941+	if (sc->nr_to_scan == 0)
942+		return 0;
943+
944+	while (total_freed < sc->nr_to_scan) {
945+		size_t pages_freed = free_one_item(DF_UNDER_PRESSURE);
946+
947+		if (!pages_freed)
948+			break;
949+
950+		total_freed += pages_freed;
951+	}
952+
953+	return total_freed;
954+}
955+
956+static struct shrinker freelist_shrinker = {
957+	.count_objects = freelist_shrink_count,
958+	.scan_objects = freelist_shrink_scan,
959+	.seeks = DEFAULT_SEEKS,
960+	.batch = 0,
961+};
962+
963+static int deferred_free_thread(void *data)
964+{
965+	while (true) {
966+		wait_event_freezable(freelist_waitqueue,
967+				     get_freelist_nr_pages() > 0);
968+
969+		free_one_item(DF_NORMAL);
970+	}
971+
972+	return 0;
973+}
974+
975+static int deferred_freelist_init(void)
976+{
977+	list_nr_pages = 0;
978+
979+	init_waitqueue_head(&freelist_waitqueue);
980+	freelist_task = kthread_run(deferred_free_thread, NULL,
981+				    "%s", "dmabuf-deferred-free-worker");
982+	if (IS_ERR(freelist_task)) {
983+		pr_err("Creating thread for deferred free failed\n");
984+		return -1;
985+	}
986+
987+	sched_set_normal(freelist_task, 19);
988+
989+	return register_shrinker(&freelist_shrinker);
990+}
991+module_init(deferred_freelist_init);
992+MODULE_LICENSE("GPL v2");
993+
994diff --git a/drivers/dma-buf/heaps/deferred-free-helper.h b/drivers/dma-buf/heaps/deferred-free-helper.h
995new file mode 100644
996index 000000000..415440314
997--- /dev/null
998+++ b/drivers/dma-buf/heaps/deferred-free-helper.h
999@@ -0,0 +1,57 @@
1000+/* SPDX-License-Identifier: GPL-2.0 */
1001+
1002+#ifndef DEFERRED_FREE_HELPER_H
1003+#define DEFERRED_FREE_HELPER_H
1004+
1005+/**
1006+ * df_reason - enum for reason why item was freed
1007+ *
1008+ * This provides a reason for why the free function was called
1009+ * on the item. This is useful when deferred_free is used in
1010+ * combination with a pagepool, so under pressure the page can
1011+ * be immediately freed.
1012+ *
1013+ * DF_NORMAL:         Normal deferred free
1014+ *
1015+ * DF_UNDER_PRESSURE: Free was called because the system
1016+ *                    is under memory pressure. Usually
1017+ *                    from a shrinker. Avoid allocating
1018+ *                    memory in the free call, as it may
1019+ *                    fail.
1020+ */
1021+enum df_reason {
1022+	DF_NORMAL,
1023+	DF_UNDER_PRESSURE,
1024+};
1025+
1026+/**
1027+ * deferred_freelist_item - item structure for deferred freelist
1028+ *
1029+ * This is to be added to the structure for whatever you want to
1030+ * defer freeing on.
1031+ *
1032+ * @nr_pages: number of pages used by item to be freed
1033+ * @free: function pointer to be called when freeing the item
1034+ * @list: list entry for the deferred list
1035+ */
1036+struct deferred_freelist_item {
1037+	size_t nr_pages;
1038+	void (*free)(struct deferred_freelist_item *i,
1039+		     enum df_reason reason);
1040+	struct list_head list;
1041+};
1042+
1043+/**
1044+ * deferred_free - call to add item to the deferred free list
1045+ *
1046+ * @item: Pointer to deferred_freelist_item field of a structure
1047+ * @free: Function pointer to the free call
1048+ * @nr_pages: number of pages to be freed
1049+ */
1050+void deferred_free(struct deferred_freelist_item *item,
1051+		   void (*free)(struct deferred_freelist_item *i,
1052+				enum df_reason reason),
1053+		   size_t nr_pages);
1054+
1055+unsigned long get_freelist_nr_pages(void);
1056+#endif
1057diff --git a/drivers/dma-buf/heaps/page_pool.c b/drivers/dma-buf/heaps/page_pool.c
1058new file mode 100644
1059index 000000000..b79e737ba
1060--- /dev/null
1061+++ b/drivers/dma-buf/heaps/page_pool.c
1062@@ -0,0 +1,247 @@
1063+// SPDX-License-Identifier: GPL-2.0
1064+/*
1065+ * DMA BUF page pool system
1066+ *
1067+ * Copyright (C) 2020 Linaro Ltd.
1068+ *
1069+ * Based on the ION page pool code
1070+ * Copyright (C) 2011 Google, Inc.
1071+ */
1072+
1073+#include <linux/freezer.h>
1074+#include <linux/list.h>
1075+#include <linux/slab.h>
1076+#include <linux/swap.h>
1077+#include <linux/sched/signal.h>
1078+#include "page_pool.h"
1079+
1080+static LIST_HEAD(pool_list);
1081+static DEFINE_MUTEX(pool_list_lock);
1082+
1083+static inline
1084+struct page *dmabuf_page_pool_alloc_pages(struct dmabuf_page_pool *pool)
1085+{
1086+	if (fatal_signal_pending(current))
1087+		return NULL;
1088+	return alloc_pages(pool->gfp_mask, pool->order);
1089+}
1090+
1091+static inline void dmabuf_page_pool_free_pages(struct dmabuf_page_pool *pool,
1092+					       struct page *page)
1093+{
1094+	__free_pages(page, pool->order);
1095+}
1096+
1097+static void dmabuf_page_pool_add(struct dmabuf_page_pool *pool, struct page *page)
1098+{
1099+	int index;
1100+
1101+	if (PageHighMem(page))
1102+		index = POOL_HIGHPAGE;
1103+	else
1104+		index = POOL_LOWPAGE;
1105+
1106+	mutex_lock(&pool->mutex);
1107+	list_add_tail(&page->lru, &pool->items[index]);
1108+	pool->count[index]++;
1109+	mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
1110+			    1 << pool->order);
1111+	mutex_unlock(&pool->mutex);
1112+}
1113+
1114+static struct page *dmabuf_page_pool_remove(struct dmabuf_page_pool *pool, int index)
1115+{
1116+	struct page *page;
1117+
1118+	mutex_lock(&pool->mutex);
1119+	page = list_first_entry_or_null(&pool->items[index], struct page, lru);
1120+	if (page) {
1121+		pool->count[index]--;
1122+		list_del(&page->lru);
1123+		mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
1124+				    -(1 << pool->order));
1125+	}
1126+	mutex_unlock(&pool->mutex);
1127+
1128+	return page;
1129+}
1130+
1131+static struct page *dmabuf_page_pool_fetch(struct dmabuf_page_pool *pool)
1132+{
1133+	struct page *page = NULL;
1134+
1135+	page = dmabuf_page_pool_remove(pool, POOL_HIGHPAGE);
1136+	if (!page)
1137+		page = dmabuf_page_pool_remove(pool, POOL_LOWPAGE);
1138+
1139+	return page;
1140+}
1141+
1142+struct page *dmabuf_page_pool_alloc(struct dmabuf_page_pool *pool)
1143+{
1144+	struct page *page = NULL;
1145+
1146+	if (WARN_ON(!pool))
1147+		return NULL;
1148+
1149+	page = dmabuf_page_pool_fetch(pool);
1150+
1151+	if (!page)
1152+		page = dmabuf_page_pool_alloc_pages(pool);
1153+	return page;
1154+}
1155+EXPORT_SYMBOL_GPL(dmabuf_page_pool_alloc);
1156+
1157+void dmabuf_page_pool_free(struct dmabuf_page_pool *pool, struct page *page)
1158+{
1159+	if (WARN_ON(pool->order != compound_order(page)))
1160+		return;
1161+
1162+	dmabuf_page_pool_add(pool, page);
1163+}
1164+EXPORT_SYMBOL_GPL(dmabuf_page_pool_free);
1165+
1166+static int dmabuf_page_pool_total(struct dmabuf_page_pool *pool, bool high)
1167+{
1168+	int count = pool->count[POOL_LOWPAGE];
1169+
1170+	if (high)
1171+		count += pool->count[POOL_HIGHPAGE];
1172+
1173+	return count << pool->order;
1174+}
1175+
1176+struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask, unsigned int order)
1177+{
1178+	struct dmabuf_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
1179+	int i;
1180+
1181+	if (!pool)
1182+		return NULL;
1183+
1184+	for (i = 0; i < POOL_TYPE_SIZE; i++) {
1185+		pool->count[i] = 0;
1186+		INIT_LIST_HEAD(&pool->items[i]);
1187+	}
1188+	pool->gfp_mask = gfp_mask | __GFP_COMP;
1189+	pool->order = order;
1190+	mutex_init(&pool->mutex);
1191+
1192+	mutex_lock(&pool_list_lock);
1193+	list_add(&pool->list, &pool_list);
1194+	mutex_unlock(&pool_list_lock);
1195+
1196+	return pool;
1197+}
1198+EXPORT_SYMBOL_GPL(dmabuf_page_pool_create);
1199+
1200+void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool)
1201+{
1202+	struct page *page;
1203+	int i;
1204+
1205+	/* Remove us from the pool list */
1206+	mutex_lock(&pool_list_lock);
1207+	list_del(&pool->list);
1208+	mutex_unlock(&pool_list_lock);
1209+
1210+	/* Free any remaining pages in the pool */
1211+	for (i = 0; i < POOL_TYPE_SIZE; i++) {
1212+		while ((page = dmabuf_page_pool_remove(pool, i)))
1213+			dmabuf_page_pool_free_pages(pool, page);
1214+	}
1215+
1216+	kfree(pool);
1217+}
1218+EXPORT_SYMBOL_GPL(dmabuf_page_pool_destroy);
1219+
1220+static int dmabuf_page_pool_do_shrink(struct dmabuf_page_pool *pool, gfp_t gfp_mask,
1221+				      int nr_to_scan)
1222+{
1223+	int freed = 0;
1224+	bool high;
1225+
1226+	if (current_is_kswapd())
1227+		high = true;
1228+	else
1229+		high = !!(gfp_mask & __GFP_HIGHMEM);
1230+
1231+	if (nr_to_scan == 0)
1232+		return dmabuf_page_pool_total(pool, high);
1233+
1234+	while (freed < nr_to_scan) {
1235+		struct page *page;
1236+
1237+		/* Try to free low pages first */
1238+		page = dmabuf_page_pool_remove(pool, POOL_LOWPAGE);
1239+		if (!page)
1240+			page = dmabuf_page_pool_remove(pool, POOL_HIGHPAGE);
1241+
1242+		if (!page)
1243+			break;
1244+
1245+		dmabuf_page_pool_free_pages(pool, page);
1246+		freed += (1 << pool->order);
1247+	}
1248+
1249+	return freed;
1250+}
1251+
1252+static int dmabuf_page_pool_shrink(gfp_t gfp_mask, int nr_to_scan)
1253+{
1254+	struct dmabuf_page_pool *pool;
1255+	int nr_total = 0;
1256+	int nr_freed;
1257+	int only_scan = 0;
1258+
1259+	if (!nr_to_scan)
1260+		only_scan = 1;
1261+
1262+	mutex_lock(&pool_list_lock);
1263+	list_for_each_entry(pool, &pool_list, list) {
1264+		if (only_scan) {
1265+			nr_total += dmabuf_page_pool_do_shrink(pool,
1266+							       gfp_mask,
1267+							       nr_to_scan);
1268+		} else {
1269+			nr_freed = dmabuf_page_pool_do_shrink(pool,
1270+							      gfp_mask,
1271+							      nr_to_scan);
1272+			nr_to_scan -= nr_freed;
1273+			nr_total += nr_freed;
1274+			if (nr_to_scan <= 0)
1275+				break;
1276+		}
1277+	}
1278+	mutex_unlock(&pool_list_lock);
1279+
1280+	return nr_total;
1281+}
1282+
1283+static unsigned long dmabuf_page_pool_shrink_count(struct shrinker *shrinker,
1284+						   struct shrink_control *sc)
1285+{
1286+	return dmabuf_page_pool_shrink(sc->gfp_mask, 0);
1287+}
1288+
1289+static unsigned long dmabuf_page_pool_shrink_scan(struct shrinker *shrinker,
1290+						  struct shrink_control *sc)
1291+{
1292+	if (sc->nr_to_scan == 0)
1293+		return 0;
1294+	return dmabuf_page_pool_shrink(sc->gfp_mask, sc->nr_to_scan);
1295+}
1296+
1297+struct shrinker pool_shrinker = {
1298+	.count_objects = dmabuf_page_pool_shrink_count,
1299+	.scan_objects = dmabuf_page_pool_shrink_scan,
1300+	.seeks = DEFAULT_SEEKS,
1301+	.batch = 0,
1302+};
1303+
1304+static int dmabuf_page_pool_init_shrinker(void)
1305+{
1306+	return register_shrinker(&pool_shrinker);
1307+}
1308+module_init(dmabuf_page_pool_init_shrinker);
1309+MODULE_LICENSE("GPL v2");
1310diff --git a/drivers/dma-buf/heaps/page_pool.h b/drivers/dma-buf/heaps/page_pool.h
1311new file mode 100644
1312index 000000000..6b083b04f
1313--- /dev/null
1314+++ b/drivers/dma-buf/heaps/page_pool.h
1315@@ -0,0 +1,55 @@
1316+/* SPDX-License-Identifier: GPL-2.0 */
1317+/*
1318+ * DMA BUF PagePool implementation
1319+ * Based on earlier ION code by Google
1320+ *
1321+ * Copyright (C) 2011 Google, Inc.
1322+ * Copyright (C) 2020 Linaro Ltd.
1323+ */
1324+
1325+#ifndef _DMABUF_PAGE_POOL_H
1326+#define _DMABUF_PAGE_POOL_H
1327+
1328+#include <linux/device.h>
1329+#include <linux/kref.h>
1330+#include <linux/mm_types.h>
1331+#include <linux/mutex.h>
1332+#include <linux/shrinker.h>
1333+#include <linux/types.h>
1334+
1335+/* page types we track in the pool */
1336+enum {
1337+	POOL_LOWPAGE,      /* Clean lowmem pages */
1338+	POOL_HIGHPAGE,     /* Clean highmem pages */
1339+
1340+	POOL_TYPE_SIZE,
1341+};
1342+
1343+/**
1344+ * struct dmabuf_page_pool - pagepool struct
1345+ * @count[]:		array of number of pages of that type in the pool
1346+ * @items[]:		array of list of pages of the specific type
1347+ * @mutex:		lock protecting this struct and especially the count
1348+ *			item list
1349+ * @gfp_mask:		gfp_mask to use from alloc
1350+ * @order:		order of pages in the pool
1351+ * @list:		list node for list of pools
1352+ *
1353+ * Allows you to keep a pool of pre allocated pages to use
1354+ */
1355+struct dmabuf_page_pool {
1356+	int count[POOL_TYPE_SIZE];
1357+	struct list_head items[POOL_TYPE_SIZE];
1358+	struct mutex mutex;
1359+	gfp_t gfp_mask;
1360+	unsigned int order;
1361+	struct list_head list;
1362+};
1363+
1364+struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask,
1365+						 unsigned int order);
1366+void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool);
1367+struct page *dmabuf_page_pool_alloc(struct dmabuf_page_pool *pool);
1368+void dmabuf_page_pool_free(struct dmabuf_page_pool *pool, struct page *page);
1369+
1370+#endif /* _DMABUF_PAGE_POOL_H */
1371diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
1372index 0bf688e3c..bbca2e195 100644
1373--- a/drivers/dma-buf/heaps/system_heap.c
1374+++ b/drivers/dma-buf/heaps/system_heap.c
1375@@ -3,7 +3,11 @@
1376  * DMABUF System heap exporter
1377  *
1378  * Copyright (C) 2011 Google, Inc.
1379- * Copyright (C) 2019 Linaro Ltd.
1380+ * Copyright (C) 2019, 2020 Linaro Ltd.
1381+ *
1382+ * Portions based off of Andrew Davis' SRAM heap:
1383+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
1384+ *	Andrew F. Davis <afd@ti.com>
1385  */
1386
1387 #include <linux/dma-buf.h>
1388@@ -15,99 +19,547 @@
1389 #include <linux/module.h>
1390 #include <linux/scatterlist.h>
1391 #include <linux/slab.h>
1392-#include <linux/sched/signal.h>
1393-#include <asm/page.h>
1394+#include <linux/vmalloc.h>
1395+
1396+#include "page_pool.h"
1397+#include "deferred-free-helper.h"
1398+
1399+static struct dma_heap *sys_heap;
1400+static struct dma_heap *sys_uncached_heap;
1401+
1402+struct system_heap_buffer {
1403+	struct dma_heap *heap;
1404+	struct list_head attachments;
1405+	struct mutex lock;
1406+	unsigned long len;
1407+	struct sg_table sg_table;
1408+	int vmap_cnt;
1409+	void *vaddr;
1410+	struct deferred_freelist_item deferred_free;
1411+
1412+	bool uncached;
1413+};
1414+
1415+struct dma_heap_attachment {
1416+	struct device *dev;
1417+	struct sg_table *table;
1418+	struct list_head list;
1419+	bool mapped;
1420
1421-#include "heap-helpers.h"
1422+	bool uncached;
1423+};
1424
1425-struct dma_heap *sys_heap;
1426+#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
1427+#define MID_ORDER_GFP (LOW_ORDER_GFP | __GFP_NOWARN)
1428+#define HIGH_ORDER_GFP  (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
1429+				| __GFP_NORETRY) & ~__GFP_RECLAIM) \
1430+				| __GFP_COMP)
1431+static gfp_t order_flags[] = {HIGH_ORDER_GFP, MID_ORDER_GFP, LOW_ORDER_GFP};
1432+/*
1433+ * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
1434+ * to match with the sizes often found in IOMMUs. Using order 4 pages instead
1435+ * of order 0 pages can significantly improve the performance of many IOMMUs
1436+ * by reducing TLB pressure and time spent updating page tables.
1437+ */
1438+static const unsigned int orders[] = {8, 4, 0};
1439+#define NUM_ORDERS ARRAY_SIZE(orders)
1440+struct dmabuf_page_pool *pools[NUM_ORDERS];
1441
1442-static void system_heap_free(struct heap_helper_buffer *buffer)
1443+static struct sg_table *dup_sg_table(struct sg_table *table)
1444 {
1445-	pgoff_t pg;
1446+	struct sg_table *new_table;
1447+	int ret, i;
1448+	struct scatterlist *sg, *new_sg;
1449
1450-	for (pg = 0; pg < buffer->pagecount; pg++)
1451-		__free_page(buffer->pages[pg]);
1452-	kfree(buffer->pages);
1453-	kfree(buffer);
1454+	new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
1455+	if (!new_table)
1456+		return ERR_PTR(-ENOMEM);
1457+
1458+	ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
1459+	if (ret) {
1460+		kfree(new_table);
1461+		return ERR_PTR(-ENOMEM);
1462+	}
1463+
1464+	new_sg = new_table->sgl;
1465+	for_each_sgtable_sg(table, sg, i) {
1466+		sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
1467+		new_sg = sg_next(new_sg);
1468+	}
1469+
1470+	return new_table;
1471 }
1472
1473-static int system_heap_allocate(struct dma_heap *heap,
1474-				unsigned long len,
1475-				unsigned long fd_flags,
1476-				unsigned long heap_flags)
1477+static int system_heap_attach(struct dma_buf *dmabuf,
1478+			      struct dma_buf_attachment *attachment)
1479 {
1480-	struct heap_helper_buffer *helper_buffer;
1481-	struct dma_buf *dmabuf;
1482-	int ret = -ENOMEM;
1483-	pgoff_t pg;
1484+	struct system_heap_buffer *buffer = dmabuf->priv;
1485+	struct dma_heap_attachment *a;
1486+	struct sg_table *table;
1487+
1488+	a = kzalloc(sizeof(*a), GFP_KERNEL);
1489+	if (!a)
1490+		return -ENOMEM;
1491
1492-	helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
1493-	if (!helper_buffer)
1494+	table = dup_sg_table(&buffer->sg_table);
1495+	if (IS_ERR(table)) {
1496+		kfree(a);
1497 		return -ENOMEM;
1498+	}
1499+
1500+	a->table = table;
1501+	a->dev = attachment->dev;
1502+	INIT_LIST_HEAD(&a->list);
1503+	a->mapped = false;
1504+	a->uncached = buffer->uncached;
1505+	attachment->priv = a;
1506+
1507+	mutex_lock(&buffer->lock);
1508+	list_add(&a->list, &buffer->attachments);
1509+	mutex_unlock(&buffer->lock);
1510+
1511+	return 0;
1512+}
1513+
1514+static void system_heap_detach(struct dma_buf *dmabuf,
1515+			       struct dma_buf_attachment *attachment)
1516+{
1517+	struct system_heap_buffer *buffer = dmabuf->priv;
1518+	struct dma_heap_attachment *a = attachment->priv;
1519+
1520+	mutex_lock(&buffer->lock);
1521+	list_del(&a->list);
1522+	mutex_unlock(&buffer->lock);
1523+
1524+	sg_free_table(a->table);
1525+	kfree(a->table);
1526+	kfree(a);
1527+}
1528
1529-	init_heap_helper_buffer(helper_buffer, system_heap_free);
1530-	helper_buffer->heap = heap;
1531-	helper_buffer->size = len;
1532+static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
1533+						enum dma_data_direction direction)
1534+{
1535+	struct dma_heap_attachment *a = attachment->priv;
1536+	struct sg_table *table = a->table;
1537+	int attr = attachment->dma_map_attrs;
1538+	int ret;
1539+
1540+	if (a->uncached)
1541+		attr |= DMA_ATTR_SKIP_CPU_SYNC;
1542+
1543+	ret = dma_map_sgtable(attachment->dev, table, direction, attr);
1544+	if (ret)
1545+		return ERR_PTR(ret);
1546+
1547+	a->mapped = true;
1548+	return table;
1549+}
1550+
1551+static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
1552+				      struct sg_table *table,
1553+				      enum dma_data_direction direction)
1554+{
1555+	struct dma_heap_attachment *a = attachment->priv;
1556+	int attr = attachment->dma_map_attrs;
1557+
1558+	if (a->uncached)
1559+		attr |= DMA_ATTR_SKIP_CPU_SYNC;
1560+	a->mapped = false;
1561+	dma_unmap_sgtable(attachment->dev, table, direction, attr);
1562+}
1563
1564-	helper_buffer->pagecount = len / PAGE_SIZE;
1565-	helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
1566-					     sizeof(*helper_buffer->pages),
1567-					     GFP_KERNEL);
1568-	if (!helper_buffer->pages) {
1569-		ret = -ENOMEM;
1570-		goto err0;
1571+static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1572+						enum dma_data_direction direction)
1573+{
1574+	struct system_heap_buffer *buffer = dmabuf->priv;
1575+	struct dma_heap_attachment *a;
1576+
1577+	mutex_lock(&buffer->lock);
1578+
1579+	if (buffer->vmap_cnt)
1580+		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
1581+
1582+	if (!buffer->uncached) {
1583+		list_for_each_entry(a, &buffer->attachments, list) {
1584+			if (!a->mapped)
1585+				continue;
1586+			dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
1587+		}
1588 	}
1589+	mutex_unlock(&buffer->lock);
1590+
1591+	return 0;
1592+}
1593+
1594+static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1595+					      enum dma_data_direction direction)
1596+{
1597+	struct system_heap_buffer *buffer = dmabuf->priv;
1598+	struct dma_heap_attachment *a;
1599+
1600+	mutex_lock(&buffer->lock);
1601+
1602+	if (buffer->vmap_cnt)
1603+		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
1604+
1605+	if (!buffer->uncached) {
1606+		list_for_each_entry(a, &buffer->attachments, list) {
1607+			if (!a->mapped)
1608+				continue;
1609+			dma_sync_sgtable_for_device(a->dev, a->table, direction);
1610+		}
1611+	}
1612+	mutex_unlock(&buffer->lock);
1613+
1614+	return 0;
1615+}
1616+
1617+static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1618+{
1619+	struct system_heap_buffer *buffer = dmabuf->priv;
1620+	struct sg_table *table = &buffer->sg_table;
1621+	unsigned long addr = vma->vm_start;
1622+	struct sg_page_iter piter;
1623+	int ret;
1624+
1625+	if (buffer->uncached)
1626+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1627+
1628+	for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
1629+		struct page *page = sg_page_iter_page(&piter);
1630+
1631+		ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
1632+				      vma->vm_page_prot);
1633+		if (ret)
1634+			return ret;
1635+		addr += PAGE_SIZE;
1636+		if (addr >= vma->vm_end)
1637+			return 0;
1638+	}
1639+	return 0;
1640+}
1641+
1642+static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
1643+{
1644+	struct sg_table *table = &buffer->sg_table;
1645+	int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
1646+	struct page **pages = vmalloc(sizeof(struct page *) * npages);
1647+	struct page **tmp = pages;
1648+	struct sg_page_iter piter;
1649+	pgprot_t pgprot = PAGE_KERNEL;
1650+	void *vaddr;
1651+
1652+	if (!pages)
1653+		return ERR_PTR(-ENOMEM);
1654+
1655+	if (buffer->uncached)
1656+		pgprot = pgprot_writecombine(PAGE_KERNEL);
1657+
1658+	for_each_sgtable_page(table, &piter, 0) {
1659+		WARN_ON(tmp - pages >= npages);
1660+		*tmp++ = sg_page_iter_page(&piter);
1661+	}
1662+
1663+	vaddr = vmap(pages, npages, VM_MAP, pgprot);
1664+	vfree(pages);
1665+
1666+	if (!vaddr)
1667+		return ERR_PTR(-ENOMEM);
1668+
1669+	return vaddr;
1670+}
1671+
1672+static void *system_heap_vmap(struct dma_buf *dmabuf)
1673+{
1674+	struct system_heap_buffer *buffer = dmabuf->priv;
1675+	void *vaddr;
1676+
1677+	mutex_lock(&buffer->lock);
1678+	if (buffer->vmap_cnt) {
1679+		buffer->vmap_cnt++;
1680+		vaddr = buffer->vaddr;
1681+		goto out;
1682+	}
1683+
1684+	vaddr = system_heap_do_vmap(buffer);
1685+	if (IS_ERR(vaddr))
1686+		goto out;
1687+
1688+	buffer->vaddr = vaddr;
1689+	buffer->vmap_cnt++;
1690+out:
1691+	mutex_unlock(&buffer->lock);
1692
1693-	for (pg = 0; pg < helper_buffer->pagecount; pg++) {
1694+	return vaddr;
1695+}
1696+
1697+static void system_heap_vunmap(struct dma_buf *dmabuf, void *vaddr)
1698+{
1699+	struct system_heap_buffer *buffer = dmabuf->priv;
1700+
1701+	mutex_lock(&buffer->lock);
1702+	if (!--buffer->vmap_cnt) {
1703+		vunmap(buffer->vaddr);
1704+		buffer->vaddr = NULL;
1705+	}
1706+	mutex_unlock(&buffer->lock);
1707+}
1708+
1709+static int system_heap_zero_buffer(struct system_heap_buffer *buffer)
1710+{
1711+	struct sg_table *sgt = &buffer->sg_table;
1712+	struct sg_page_iter piter;
1713+	struct page *p;
1714+	void *vaddr;
1715+	int ret = 0;
1716+
1717+	for_each_sgtable_page(sgt, &piter, 0) {
1718+		p = sg_page_iter_page(&piter);
1719+		vaddr = kmap_atomic(p);
1720+		memset(vaddr, 0, PAGE_SIZE);
1721+		kunmap_atomic(vaddr);
1722+	}
1723+
1724+	return ret;
1725+}
1726+
1727+static void system_heap_buf_free(struct deferred_freelist_item *item,
1728+				 enum df_reason reason)
1729+{
1730+	struct system_heap_buffer *buffer;
1731+	struct sg_table *table;
1732+	struct scatterlist *sg;
1733+	int i, j;
1734+
1735+	buffer = container_of(item, struct system_heap_buffer, deferred_free);
1736+	/* Zero the buffer pages before adding back to the pool */
1737+	if (reason == DF_NORMAL)
1738+		if (system_heap_zero_buffer(buffer))
1739+			reason = DF_UNDER_PRESSURE; // On failure, just free
1740+
1741+	table = &buffer->sg_table;
1742+	for_each_sgtable_sg(table, sg, i) {
1743+		struct page *page = sg_page(sg);
1744+
1745+		if (reason == DF_UNDER_PRESSURE) {
1746+			__free_pages(page, compound_order(page));
1747+		} else {
1748+			for (j = 0; j < NUM_ORDERS; j++) {
1749+				if (compound_order(page) == orders[j])
1750+					break;
1751+			}
1752+			dmabuf_page_pool_free(pools[j], page);
1753+		}
1754+	}
1755+	sg_free_table(table);
1756+	kfree(buffer);
1757+}
1758+
1759+static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
1760+{
1761+	struct system_heap_buffer *buffer = dmabuf->priv;
1762+	int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
1763+
1764+	deferred_free(&buffer->deferred_free, system_heap_buf_free, npages);
1765+}
1766+
1767+static const struct dma_buf_ops system_heap_buf_ops = {
1768+	.attach = system_heap_attach,
1769+	.detach = system_heap_detach,
1770+	.map_dma_buf = system_heap_map_dma_buf,
1771+	.unmap_dma_buf = system_heap_unmap_dma_buf,
1772+	.begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
1773+	.end_cpu_access = system_heap_dma_buf_end_cpu_access,
1774+	.mmap = system_heap_mmap,
1775+	.vmap = system_heap_vmap,
1776+	.vunmap = system_heap_vunmap,
1777+	.release = system_heap_dma_buf_release,
1778+};
1779+
1780+static struct page *alloc_largest_available(unsigned long size,
1781+					    unsigned int max_order)
1782+{
1783+	struct page *page;
1784+	int i;
1785+
1786+	for (i = 0; i < NUM_ORDERS; i++) {
1787+		if (size <  (PAGE_SIZE << orders[i]))
1788+			continue;
1789+		if (max_order < orders[i])
1790+			continue;
1791+		page = dmabuf_page_pool_alloc(pools[i]);
1792+		if (!page)
1793+			continue;
1794+		return page;
1795+	}
1796+	return NULL;
1797+}
1798+
1799+static struct dma_buf *system_heap_do_allocate(struct dma_heap *heap,
1800+					       unsigned long len,
1801+					       unsigned long fd_flags,
1802+					       unsigned long heap_flags,
1803+					       bool uncached)
1804+{
1805+	struct system_heap_buffer *buffer;
1806+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1807+	unsigned long size_remaining = len;
1808+	unsigned int max_order = orders[0];
1809+	struct dma_buf *dmabuf;
1810+	struct sg_table *table;
1811+	struct scatterlist *sg;
1812+	struct list_head pages;
1813+	struct page *page, *tmp_page;
1814+	int i, ret = -ENOMEM;
1815+
1816+	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
1817+	if (!buffer)
1818+		return ERR_PTR(-ENOMEM);
1819+
1820+	INIT_LIST_HEAD(&buffer->attachments);
1821+	mutex_init(&buffer->lock);
1822+	buffer->heap = heap;
1823+	buffer->len = len;
1824+	buffer->uncached = uncached;
1825+
1826+	INIT_LIST_HEAD(&pages);
1827+	i = 0;
1828+	while (size_remaining > 0) {
1829 		/*
1830 		 * Avoid trying to allocate memory if the process
1831-		 * has been killed by by SIGKILL
1832+		 * has been killed by SIGKILL
1833 		 */
1834 		if (fatal_signal_pending(current))
1835-			goto err1;
1836+			goto free_buffer;
1837+
1838+		page = alloc_largest_available(size_remaining, max_order);
1839+		if (!page)
1840+			goto free_buffer;
1841+
1842+		list_add_tail(&page->lru, &pages);
1843+		size_remaining -= page_size(page);
1844+		max_order = compound_order(page);
1845+		i++;
1846+	}
1847
1848-		helper_buffer->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO);
1849-		if (!helper_buffer->pages[pg])
1850-			goto err1;
1851+	table = &buffer->sg_table;
1852+	if (sg_alloc_table(table, i, GFP_KERNEL))
1853+		goto free_buffer;
1854+
1855+	sg = table->sgl;
1856+	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
1857+		sg_set_page(sg, page, page_size(page), 0);
1858+		sg = sg_next(sg);
1859+		list_del(&page->lru);
1860 	}
1861
1862 	/* create the dmabuf */
1863-	dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
1864+	exp_info.exp_name = dma_heap_get_name(heap);
1865+	exp_info.ops = &system_heap_buf_ops;
1866+	exp_info.size = buffer->len;
1867+	exp_info.flags = fd_flags;
1868+	exp_info.priv = buffer;
1869+	dmabuf = dma_buf_export(&exp_info);
1870 	if (IS_ERR(dmabuf)) {
1871 		ret = PTR_ERR(dmabuf);
1872-		goto err1;
1873+		goto free_pages;
1874+	}
1875+
1876+	/*
1877+	 * For uncached buffers, we need to initially flush cpu cache, since
1878+	 * the __GFP_ZERO on the allocation means the zeroing was done by the
1879+	 * cpu and thus it is likely cached. Map (and implicitly flush) and
1880+	 * unmap it now so we don't get corruption later on.
1881+	 */
1882+	if (buffer->uncached) {
1883+		dma_map_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0);
1884+		dma_unmap_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0);
1885 	}
1886
1887-	helper_buffer->dmabuf = dmabuf;
1888+	return dmabuf;
1889
1890-	ret = dma_buf_fd(dmabuf, fd_flags);
1891-	if (ret < 0) {
1892-		dma_buf_put(dmabuf);
1893-		/* just return, as put will call release and that will free */
1894-		return ret;
1895+free_pages:
1896+	for_each_sgtable_sg(table, sg, i) {
1897+		struct page *p = sg_page(sg);
1898+
1899+		__free_pages(p, compound_order(p));
1900 	}
1901+	sg_free_table(table);
1902+free_buffer:
1903+	list_for_each_entry_safe(page, tmp_page, &pages, lru)
1904+		__free_pages(page, compound_order(page));
1905+	kfree(buffer);
1906
1907-	return ret;
1908+	return ERR_PTR(ret);
1909+}
1910
1911-err1:
1912-	while (pg > 0)
1913-		__free_page(helper_buffer->pages[--pg]);
1914-	kfree(helper_buffer->pages);
1915-err0:
1916-	kfree(helper_buffer);
1917+static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
1918+					    unsigned long len,
1919+					    unsigned long fd_flags,
1920+					    unsigned long heap_flags)
1921+{
1922+	return system_heap_do_allocate(heap, len, fd_flags, heap_flags, false);
1923+}
1924
1925-	return ret;
1926+static long system_get_pool_size(struct dma_heap *heap)
1927+{
1928+	int i;
1929+	long num_pages = 0;
1930+	struct dmabuf_page_pool **pool;
1931+
1932+	pool = pools;
1933+	for (i = 0; i < NUM_ORDERS; i++, pool++) {
1934+		num_pages += ((*pool)->count[POOL_LOWPAGE] +
1935+			      (*pool)->count[POOL_HIGHPAGE]) << (*pool)->order;
1936+	}
1937+
1938+	return num_pages << PAGE_SHIFT;
1939 }
1940
1941 static const struct dma_heap_ops system_heap_ops = {
1942 	.allocate = system_heap_allocate,
1943+	.get_pool_size = system_get_pool_size,
1944+};
1945+
1946+static struct dma_buf *system_uncached_heap_allocate(struct dma_heap *heap,
1947+						     unsigned long len,
1948+						     unsigned long fd_flags,
1949+						     unsigned long heap_flags)
1950+{
1951+	return system_heap_do_allocate(heap, len, fd_flags, heap_flags, true);
1952+}
1953+
1954+/* Dummy function to be used until we can call coerce_mask_and_coherent */
1955+static struct dma_buf *system_uncached_heap_not_initialized(struct dma_heap *heap,
1956+							    unsigned long len,
1957+							    unsigned long fd_flags,
1958+							    unsigned long heap_flags)
1959+{
1960+	return ERR_PTR(-EBUSY);
1961+}
1962+
1963+static struct dma_heap_ops system_uncached_heap_ops = {
1964+	/* After system_heap_create is complete, we will swap this */
1965+	.allocate = system_uncached_heap_not_initialized,
1966 };
1967
1968 static int system_heap_create(void)
1969 {
1970 	struct dma_heap_export_info exp_info;
1971-	int ret = 0;
1972+	int i;
1973+
1974+	for (i = 0; i < NUM_ORDERS; i++) {
1975+		pools[i] = dmabuf_page_pool_create(order_flags[i], orders[i]);
1976+
1977+		if (!pools[i]) {
1978+			int j;
1979+
1980+			pr_err("%s: page pool creation failed!\n", __func__);
1981+			for (j = 0; j < i; j++)
1982+				dmabuf_page_pool_destroy(pools[j]);
1983+			return -ENOMEM;
1984+		}
1985+	}
1986
1987 	exp_info.name = "system";
1988 	exp_info.ops = &system_heap_ops;
1989@@ -115,9 +567,21 @@ static int system_heap_create(void)
1990
1991 	sys_heap = dma_heap_add(&exp_info);
1992 	if (IS_ERR(sys_heap))
1993-		ret = PTR_ERR(sys_heap);
1994+		return PTR_ERR(sys_heap);
1995
1996-	return ret;
1997+	exp_info.name = "system-uncached";
1998+	exp_info.ops = &system_uncached_heap_ops;
1999+	exp_info.priv = NULL;
2000+
2001+	sys_uncached_heap = dma_heap_add(&exp_info);
2002+	if (IS_ERR(sys_uncached_heap))
2003+		return PTR_ERR(sys_uncached_heap);
2004+
2005+	dma_coerce_mask_and_coherent(dma_heap_get_dev(sys_uncached_heap), DMA_BIT_MASK(64));
2006+	mb(); /* make sure we only set allocate after dma_mask is set */
2007+	system_uncached_heap_ops.allocate = system_uncached_heap_allocate;
2008+
2009+	return 0;
2010 }
2011 module_init(system_heap_create);
2012 MODULE_LICENSE("GPL v2");
2013diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
2014index 109d11fb4..4aac2ec86 100644
2015--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
2016+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
2017@@ -461,9 +461,10 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
2018 		return 0;
2019
2020 	obj = drm_gem_fb_get_obj(state->fb, 0);
2021-	fence = dma_resv_get_excl_rcu(obj->resv);
2022-	drm_atomic_set_fence_for_plane(state, fence);
2023-
2024+	if (obj) {
2025+		fence = dma_resv_get_excl_rcu(obj->resv);
2026+		drm_atomic_set_fence_for_plane(state, fence);
2027+	}
2028 	return 0;
2029 }
2030 EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb);
2031diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
2032index f135b7959..fc9c53e3e 100644
2033--- a/drivers/gpu/drm/drm_vblank.c
2034+++ b/drivers/gpu/drm/drm_vblank.c
2035@@ -1100,7 +1100,7 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe)
2036 		return dev->driver->enable_vblank(dev, pipe);
2037 	}
2038
2039-	return -EINVAL;
2040+	return 0;
2041 }
2042
2043 static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
2044diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
2045index 0c5706abb..380c19070 100644
2046--- a/include/linux/dma-buf.h
2047+++ b/include/linux/dma-buf.h
2048@@ -412,6 +412,7 @@ struct dma_buf_attachment {
2049 	const struct dma_buf_attach_ops *importer_ops;
2050 	void *importer_priv;
2051 	void *priv;
2052+	unsigned long dma_map_attrs;
2053 };
2054
2055 /**
2056diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
2057index 83b8cfb2d..e8f0e92c2 100644
2058--- a/include/linux/dma-heap.h
2059+++ b/include/linux/dma-heap.h
2060@@ -16,15 +16,17 @@ struct dma_heap;
2061
2062 /**
2063  * struct dma_heap_ops - ops to operate on a given heap
2064- * @allocate:		allocate dmabuf and return fd
2065+ * @allocate:		allocate dmabuf and return struct dma_buf ptr
2066+ * @get_pool_size:	if heap maintains memory pools, get pool size in bytes
2067  *
2068- * allocate returns dmabuf fd  on success, -errno on error.
2069+ * allocate returns dmabuf on success, ERR_PTR(-errno) on error.
2070  */
2071 struct dma_heap_ops {
2072-	int (*allocate)(struct dma_heap *heap,
2073+	struct dma_buf *(*allocate)(struct dma_heap *heap,
2074 			unsigned long len,
2075 			unsigned long fd_flags,
2076 			unsigned long heap_flags);
2077+	long (*get_pool_size)(struct dma_heap *heap);
2078 };
2079
2080 /**
2081@@ -50,6 +52,15 @@ struct dma_heap_export_info {
2082  */
2083 void *dma_heap_get_drvdata(struct dma_heap *heap);
2084
2085+/**
2086+ * dma_heap_get_dev() - get device struct for the heap
2087+ * @heap: DMA-Heap to retrieve device struct from
2088+ *
2089+ * Returns:
2090+ * The device struct for the heap.
2091+ */
2092+struct device *dma_heap_get_dev(struct dma_heap *heap);
2093+
2094 /**
2095  * dma_heap_get_name() - get heap name
2096  * @heap: DMA-Heap to retrieve private data for
2097@@ -65,4 +76,49 @@ const char *dma_heap_get_name(struct dma_heap *heap);
2098  */
2099 struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info);
2100
2101+/**
2102+ * dma_heap_put - drops a reference to a dmabuf heaps, potentially freeing it
2103+ * @heap:		heap pointer
2104+ */
2105+void dma_heap_put(struct dma_heap *heap);
2106+
2107+/**
2108+ * dma_heap_find - Returns the registered dma_heap with the specified name
2109+ * @name: Name of the heap to find
2110+ *
2111+ * NOTE: dma_heaps returned from this function MUST be released
2112+ * using dma_heap_put() when the user is done.
2113+ */
2114+struct dma_heap *dma_heap_find(const char *name);
2115+
2116+/**
2117+ * dma_heap_buffer_alloc - Allocate dma-buf from a dma_heap
2118+ * @heap:	dma_heap to allocate from
2119+ * @len:	size to allocate
2120+ * @fd_flags:	flags to set on returned dma-buf fd
2121+ * @heap_flags:	flags to pass to the dma heap
2122+ *
2123+ * This is for internal dma-buf allocations only.
2124+ */
2125+struct dma_buf *dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
2126+				      unsigned int fd_flags,
2127+				      unsigned int heap_flags);
2128+
2129+/** dma_heap_buffer_free - Free dma_buf allocated by dma_heap_buffer_alloc
2130+ * @dma_buf:	dma_buf to free
2131+ *
2132+ * This is really only a simple wrapper to dma_buf_put()
2133+ */
2134+void dma_heap_buffer_free(struct dma_buf *);
2135+
2136+/**
2137+ * dma_heap_bufferfd_alloc - Allocate dma-buf fd from a dma_heap
2138+ * @heap:	dma_heap to allocate from
2139+ * @len:	size to allocate
2140+ * @fd_flags:	flags to set on returned dma-buf fd
2141+ * @heap_flags:	flags to pass to the dma heap
2142+ */
2143+int dma_heap_bufferfd_alloc(struct dma_heap *heap, size_t len,
2144+			    unsigned int fd_flags,
2145+			    unsigned int heap_flags);
2146 #endif /* _DMA_HEAPS_H */
2147diff --git a/kernel/sched/core.c b/kernel/sched/core.c
2148index e2f00be4b..dfc23d434 100644
2149--- a/kernel/sched/core.c
2150+++ b/kernel/sched/core.c
2151@@ -5713,16 +5713,19 @@ int sched_setscheduler(struct task_struct *p, int policy,
2152 {
2153 	return _sched_setscheduler(p, policy, param, true);
2154 }
2155+EXPORT_SYMBOL_GPL(sched_setscheduler);
2156
2157 int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
2158 {
2159 	return __sched_setscheduler(p, attr, true, true);
2160 }
2161+EXPORT_SYMBOL_GPL(sched_setattr);
2162
2163 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
2164 {
2165 	return __sched_setscheduler(p, attr, false, true);
2166 }
2167+EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
2168
2169 /**
2170  * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
2171--
21722.25.1
2173
2174