From 0e7ae3e758cc4c6e0f49f1ffcc5d59df8f5cf7bb Mon Sep 17 00:00:00 2001 From: ygopenhm Date: Thu, 29 Sep 2022 09:27:27 +0800 Subject: [PATCH] kernel Signed-off-by: ygopenhm --- Kconfig | 2 + Makefile | 2 + drivers/dma-buf/dma-heap.c | 223 +++++-- drivers/dma-buf/heaps/Kconfig | 16 +- drivers/dma-buf/heaps/Makefile | 3 +- drivers/dma-buf/heaps/cma_heap.c | 338 +++++++++-- drivers/dma-buf/heaps/deferred-free-helper.c | 139 +++++ drivers/dma-buf/heaps/deferred-free-helper.h | 57 ++ drivers/dma-buf/heaps/page_pool.c | 247 ++++++++ drivers/dma-buf/heaps/page_pool.h | 55 ++ drivers/dma-buf/heaps/system_heap.c | 576 +++++++++++++++++-- drivers/gpu/drm/drm_gem_framebuffer_helper.c | 7 +- drivers/gpu/drm/drm_vblank.c | 2 +- include/linux/dma-buf.h | 1 + include/linux/dma-heap.h | 62 +- kernel/sched/core.c | 3 + 16 files changed, 1576 insertions(+), 157 deletions(-) create mode 100644 drivers/dma-buf/heaps/deferred-free-helper.c create mode 100644 drivers/dma-buf/heaps/deferred-free-helper.h create mode 100644 drivers/dma-buf/heaps/page_pool.c create mode 100644 drivers/dma-buf/heaps/page_pool.h diff --git a/Kconfig b/Kconfig index 745bc773f..e6c33a96d 100644 --- a/Kconfig +++ b/Kconfig @@ -7,6 +7,8 @@ mainmenu "Linux/$(ARCH) $(KERNELVERSION) Kernel Configuration" source "scripts/Kconfig.include" +source "bsp/Kconfig" + source "init/Kconfig" source "kernel/Kconfig.freezer" diff --git a/Makefile b/Makefile index a45981b4f..037d0c79a 100644 --- a/Makefile +++ b/Makefile @@ -491,6 +491,7 @@ LINUXINCLUDE := \ -I$(objtree)/arch/$(SRCARCH)/include/generated \ $(if $(building_out_of_srctree),-I$(srctree)/include) \ -I$(objtree)/include \ + -I$(srctree)/bsp/include \ $(USERINCLUDE) KBUILD_AFLAGS := -D__ASSEMBLY__ -fno-PIE @@ -650,6 +651,7 @@ ifeq ($(KBUILD_EXTMOD),) # Objects we will link into vmlinux / subdirs we need to visit core-y := init/ usr/ drivers-y := drivers/ sound/ +drivers-y += bsp/ drivers-$(CONFIG_SAMPLES) += samples/ drivers-y += net/ virt/ libs-y := lib/ diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c index 70e410c64..26d59b48f 100644 --- a/drivers/dma-buf/dma-heap.c +++ b/drivers/dma-buf/dma-heap.c @@ -31,6 +31,7 @@ * @heap_devt heap device node * @list list head connecting to list of heaps * @heap_cdev heap char device + * @heap_dev heap device struct * * Represents a heap of memory from which buffers can be made. */ @@ -41,6 +42,8 @@ struct dma_heap { dev_t heap_devt; struct list_head list; struct cdev heap_cdev; + struct kref refcount; + struct device *heap_dev; }; static LIST_HEAD(heap_list); @@ -49,20 +52,72 @@ static dev_t dma_heap_devt; static struct class *dma_heap_class; static DEFINE_XARRAY_ALLOC(dma_heap_minors); -static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len, - unsigned int fd_flags, - unsigned int heap_flags) +struct dma_heap *dma_heap_find(const char *name) { + struct dma_heap *h; + + mutex_lock(&heap_list_lock); + list_for_each_entry(h, &heap_list, list) { + if (!strcmp(h->name, name)) { + kref_get(&h->refcount); + mutex_unlock(&heap_list_lock); + return h; + } + } + mutex_unlock(&heap_list_lock); + return NULL; +} +EXPORT_SYMBOL_GPL(dma_heap_find); + + +void dma_heap_buffer_free(struct dma_buf *dmabuf) +{ + dma_buf_put(dmabuf); +} +EXPORT_SYMBOL_GPL(dma_heap_buffer_free); + +struct dma_buf *dma_heap_buffer_alloc(struct dma_heap *heap, size_t len, + unsigned int fd_flags, + unsigned int heap_flags) +{ + if (fd_flags & ~DMA_HEAP_VALID_FD_FLAGS) + return ERR_PTR(-EINVAL); + + if (heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS) + return ERR_PTR(-EINVAL); /* * Allocations from all heaps have to begin * and end on page boundaries. */ len = PAGE_ALIGN(len); if (!len) - return -EINVAL; + return ERR_PTR(-EINVAL); return heap->ops->allocate(heap, len, fd_flags, heap_flags); } +EXPORT_SYMBOL_GPL(dma_heap_buffer_alloc); + +int dma_heap_bufferfd_alloc(struct dma_heap *heap, size_t len, + unsigned int fd_flags, + unsigned int heap_flags) +{ + struct dma_buf *dmabuf; + int fd; + + dmabuf = dma_heap_buffer_alloc(heap, len, fd_flags, heap_flags); + + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + + fd = dma_buf_fd(dmabuf, fd_flags); + if (fd < 0) { + dma_buf_put(dmabuf); + /* just return, as put will call release and that will free */ + } + return fd; + +} +EXPORT_SYMBOL_GPL(dma_heap_bufferfd_alloc); static int dma_heap_open(struct inode *inode, struct file *file) { @@ -90,15 +145,9 @@ static long dma_heap_ioctl_allocate(struct file *file, void *data) if (heap_allocation->fd) return -EINVAL; - if (heap_allocation->fd_flags & ~DMA_HEAP_VALID_FD_FLAGS) - return -EINVAL; - - if (heap_allocation->heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS) - return -EINVAL; - - fd = dma_heap_buffer_alloc(heap, heap_allocation->len, - heap_allocation->fd_flags, - heap_allocation->heap_flags); + fd = dma_heap_bufferfd_alloc(heap, heap_allocation->len, + heap_allocation->fd_flags, + heap_allocation->heap_flags); if (fd < 0) return fd; @@ -190,6 +240,47 @@ void *dma_heap_get_drvdata(struct dma_heap *heap) { return heap->priv; } +EXPORT_SYMBOL_GPL(dma_heap_get_drvdata); + +static void dma_heap_release(struct kref *ref) +{ + struct dma_heap *heap = container_of(ref, struct dma_heap, refcount); + int minor = MINOR(heap->heap_devt); + + /* Note, we already holding the heap_list_lock here */ + list_del(&heap->list); + + device_destroy(dma_heap_class, heap->heap_devt); + cdev_del(&heap->heap_cdev); + xa_erase(&dma_heap_minors, minor); + + kfree(heap); +} + +void dma_heap_put(struct dma_heap *h) +{ + /* + * Take the heap_list_lock now to avoid racing with code + * scanning the list and then taking a kref. + */ + mutex_lock(&heap_list_lock); + kref_put(&h->refcount, dma_heap_release); + mutex_unlock(&heap_list_lock); +} +EXPORT_SYMBOL_GPL(dma_heap_put); + +/** + * dma_heap_get_dev() - get device struct for the heap + * @heap: DMA-Heap to retrieve device struct from + * + * Returns: + * The device struct for the heap. + */ +struct device *dma_heap_get_dev(struct dma_heap *heap) +{ + return heap->heap_dev; +} +EXPORT_SYMBOL_GPL(dma_heap_get_dev); /** * dma_heap_get_name() - get heap name @@ -202,11 +293,11 @@ const char *dma_heap_get_name(struct dma_heap *heap) { return heap->name; } +EXPORT_SYMBOL_GPL(dma_heap_get_name); struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) { - struct dma_heap *heap, *h, *err_ret; - struct device *dev_ret; + struct dma_heap *heap, *err_ret; unsigned int minor; int ret; @@ -221,21 +312,19 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) } /* check the name is unique */ - mutex_lock(&heap_list_lock); - list_for_each_entry(h, &heap_list, list) { - if (!strcmp(h->name, exp_info->name)) { - mutex_unlock(&heap_list_lock); - pr_err("dma_heap: Already registered heap named %s\n", - exp_info->name); - return ERR_PTR(-EINVAL); - } + heap = dma_heap_find(exp_info->name); + if (heap) { + pr_err("dma_heap: Already registered heap named %s\n", + exp_info->name); + dma_heap_put(heap); + return ERR_PTR(-EINVAL); } - mutex_unlock(&heap_list_lock); heap = kzalloc(sizeof(*heap), GFP_KERNEL); if (!heap) return ERR_PTR(-ENOMEM); + kref_init(&heap->refcount); heap->name = exp_info->name; heap->ops = exp_info->ops; heap->priv = exp_info->priv; @@ -260,16 +349,20 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) goto err1; } - dev_ret = device_create(dma_heap_class, - NULL, - heap->heap_devt, - NULL, - heap->name); - if (IS_ERR(dev_ret)) { + heap->heap_dev = device_create(dma_heap_class, + NULL, + heap->heap_devt, + NULL, + heap->name); + if (IS_ERR(heap->heap_dev)) { pr_err("dma_heap: Unable to create device\n"); - err_ret = ERR_CAST(dev_ret); + err_ret = ERR_CAST(heap->heap_dev); goto err2; } + + /* Make sure it doesn't disappear on us */ + heap->heap_dev = get_device(heap->heap_dev); + /* Add heap to the list */ mutex_lock(&heap_list_lock); list_add(&heap->list, &heap_list); @@ -285,27 +378,88 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) kfree(heap); return err_ret; } +EXPORT_SYMBOL_GPL(dma_heap_add); static char *dma_heap_devnode(struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev)); } +static ssize_t total_pools_kb_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct dma_heap *heap; + u64 total_pool_size = 0; + + mutex_lock(&heap_list_lock); + list_for_each_entry(heap, &heap_list, list) { + if (heap->ops->get_pool_size) + total_pool_size += heap->ops->get_pool_size(heap); + } + mutex_unlock(&heap_list_lock); + + return sysfs_emit(buf, "%llu\n", total_pool_size / 1024); +} + +static struct kobj_attribute total_pools_kb_attr = + __ATTR_RO(total_pools_kb); + +static struct attribute *dma_heap_sysfs_attrs[] = { + &total_pools_kb_attr.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(dma_heap_sysfs); + +static struct kobject *dma_heap_kobject; + +static int dma_heap_sysfs_setup(void) +{ + int ret; + + dma_heap_kobject = kobject_create_and_add("dma_heap", kernel_kobj); + if (!dma_heap_kobject) + return -ENOMEM; + + ret = sysfs_create_groups(dma_heap_kobject, dma_heap_sysfs_groups); + if (ret) { + kobject_put(dma_heap_kobject); + return ret; + } + + return 0; +} + +static void dma_heap_sysfs_teardown(void) +{ + kobject_put(dma_heap_kobject); +} + static int dma_heap_init(void) { int ret; - ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME); + ret = dma_heap_sysfs_setup(); if (ret) return ret; + ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME); + if (ret) + goto err_chrdev; + dma_heap_class = class_create(THIS_MODULE, DEVNAME); if (IS_ERR(dma_heap_class)) { - unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS); - return PTR_ERR(dma_heap_class); + ret = PTR_ERR(dma_heap_class); + goto err_class; } dma_heap_class->devnode = dma_heap_devnode; return 0; + +err_class: + unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS); +err_chrdev: + dma_heap_sysfs_teardown(); + return ret; } subsys_initcall(dma_heap_init); diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig index a5eef06c4..ff52efa83 100644 --- a/drivers/dma-buf/heaps/Kconfig +++ b/drivers/dma-buf/heaps/Kconfig @@ -1,12 +1,22 @@ +menuconfig DMABUF_HEAPS_DEFERRED_FREE + bool "DMA-BUF heaps deferred-free library" + help + Choose this option to enable the DMA-BUF heaps deferred-free library. + +menuconfig DMABUF_HEAPS_PAGE_POOL + bool "DMA-BUF heaps page-pool library" + help + Choose this option to enable the DMA-BUF heaps page-pool library. + config DMABUF_HEAPS_SYSTEM - bool "DMA-BUF System Heap" - depends on DMABUF_HEAPS + tristate "DMA-BUF System Heap" + depends on DMABUF_HEAPS && DMABUF_HEAPS_DEFERRED_FREE && DMABUF_HEAPS_PAGE_POOL help Choose this option to enable the system dmabuf heap. The system heap is backed by pages from the buddy allocator. If in doubt, say Y. config DMABUF_HEAPS_CMA - bool "DMA-BUF CMA Heap" + tristate "DMA-BUF CMA Heap" depends on DMABUF_HEAPS && DMA_CMA help Choose this option to enable dma-buf CMA heap. This heap is backed diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile index 6e54cdec3..4d4cd94a3 100644 --- a/drivers/dma-buf/heaps/Makefile +++ b/drivers/dma-buf/heaps/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += heap-helpers.o +obj-$(CONFIG_DMABUF_HEAPS_DEFERRED_FREE) += deferred-free-helper.o +obj-$(CONFIG_DMABUF_HEAPS_PAGE_POOL) += page_pool.o obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index e55384dc1..fd564aa70 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -2,76 +2,306 @@ /* * DMABUF CMA heap exporter * - * Copyright (C) 2012, 2019 Linaro Ltd. + * Copyright (C) 2012, 2019, 2020 Linaro Ltd. * Author: for ST-Ericsson. + * + * Also utilizing parts of Andrew Davis' SRAM heap: + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + * Andrew F. Davis */ - #include -#include #include #include #include #include -#include #include +#include +#include #include -#include #include -#include +#include +#include -#include "heap-helpers.h" struct cma_heap { struct dma_heap *heap; struct cma *cma; }; -static void cma_heap_free(struct heap_helper_buffer *buffer) +struct cma_heap_buffer { + struct cma_heap *heap; + struct list_head attachments; + struct mutex lock; + unsigned long len; + struct page *cma_pages; + struct page **pages; + pgoff_t pagecount; + int vmap_cnt; + void *vaddr; +}; + +struct dma_heap_attachment { + struct device *dev; + struct sg_table table; + struct list_head list; + bool mapped; +}; + +static int cma_heap_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + int ret; + + a = kzalloc(sizeof(*a), GFP_KERNEL); + if (!a) + return -ENOMEM; + + ret = sg_alloc_table_from_pages(&a->table, buffer->pages, + buffer->pagecount, 0, + buffer->pagecount << PAGE_SHIFT, + GFP_KERNEL); + if (ret) { + kfree(a); + return ret; + } + + a->dev = attachment->dev; + INIT_LIST_HEAD(&a->list); + a->mapped = false; + + attachment->priv = a; + + mutex_lock(&buffer->lock); + list_add(&a->list, &buffer->attachments); + mutex_unlock(&buffer->lock); + + return 0; +} + +static void cma_heap_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a = attachment->priv; + + mutex_lock(&buffer->lock); + list_del(&a->list); + mutex_unlock(&buffer->lock); + + sg_free_table(&a->table); + kfree(a); +} + +static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) { - struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap); - unsigned long nr_pages = buffer->pagecount; - struct page *cma_pages = buffer->priv_virt; + struct dma_heap_attachment *a = attachment->priv; + struct sg_table *table = &a->table; + int attrs = attachment->dma_map_attrs; + int ret; + + ret = dma_map_sgtable(attachment->dev, table, direction, attrs); + if (ret) + return ERR_PTR(-ENOMEM); + a->mapped = true; + return table; +} + +static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *table, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + int attrs = attachment->dma_map_attrs; + + a->mapped = false; + dma_unmap_sgtable(attachment->dev, table, direction, attrs); +} + +static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + if (buffer->vmap_cnt) + invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); + + mutex_lock(&buffer->lock); + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_cpu(a->dev, &a->table, direction); + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + if (buffer->vmap_cnt) + flush_kernel_vmap_range(buffer->vaddr, buffer->len); + + mutex_lock(&buffer->lock); + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_device(a->dev, &a->table, direction); + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct cma_heap_buffer *buffer = vma->vm_private_data; + + if (vmf->pgoff > buffer->pagecount) + return VM_FAULT_SIGBUS; + + vmf->page = buffer->pages[vmf->pgoff]; + get_page(vmf->page); + + return 0; +} + +static const struct vm_operations_struct dma_heap_vm_ops = { + .fault = cma_heap_vm_fault, +}; + +static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + + if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) + return -EINVAL; + + vma->vm_ops = &dma_heap_vm_ops; + vma->vm_private_data = buffer; + + return 0; +} + +static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer) +{ + void *vaddr; + + vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL); + if (!vaddr) + return ERR_PTR(-ENOMEM); + + return vaddr; +} + +static void *cma_heap_vmap(struct dma_buf *dmabuf) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + void *vaddr; + + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) { + buffer->vmap_cnt++; + vaddr = buffer->vaddr; + goto out; + } + + vaddr = cma_heap_do_vmap(buffer); + if (IS_ERR(vaddr)) + goto out; + + buffer->vaddr = vaddr; + buffer->vmap_cnt++; +out: + mutex_unlock(&buffer->lock); + + return vaddr; +} + +static void cma_heap_vunmap(struct dma_buf *dmabuf, void *vaddr) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + + mutex_lock(&buffer->lock); + if (!--buffer->vmap_cnt) { + vunmap(buffer->vaddr); + buffer->vaddr = NULL; + } + mutex_unlock(&buffer->lock); +} + +static void cma_heap_dma_buf_release(struct dma_buf *dmabuf) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct cma_heap *cma_heap = buffer->heap; + + if (buffer->vmap_cnt > 0) { + WARN(1, "%s: buffer still mapped in the kernel\n", __func__); + vunmap(buffer->vaddr); + } /* free page list */ kfree(buffer->pages); /* release memory */ - cma_release(cma_heap->cma, cma_pages, nr_pages); + cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount); kfree(buffer); } -/* dmabuf heap CMA operations functions */ -static int cma_heap_allocate(struct dma_heap *heap, - unsigned long len, - unsigned long fd_flags, - unsigned long heap_flags) +static const struct dma_buf_ops cma_heap_buf_ops = { + .attach = cma_heap_attach, + .detach = cma_heap_detach, + .map_dma_buf = cma_heap_map_dma_buf, + .unmap_dma_buf = cma_heap_unmap_dma_buf, + .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access, + .end_cpu_access = cma_heap_dma_buf_end_cpu_access, + .mmap = cma_heap_mmap, + .vmap = cma_heap_vmap, + .vunmap = cma_heap_vunmap, + .release = cma_heap_dma_buf_release, +}; + +static struct dma_buf *cma_heap_allocate(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) { struct cma_heap *cma_heap = dma_heap_get_drvdata(heap); - struct heap_helper_buffer *helper_buffer; - struct page *cma_pages; + struct cma_heap_buffer *buffer; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); size_t size = PAGE_ALIGN(len); - unsigned long nr_pages = size >> PAGE_SHIFT; + pgoff_t pagecount = size >> PAGE_SHIFT; unsigned long align = get_order(size); + struct page *cma_pages; struct dma_buf *dmabuf; int ret = -ENOMEM; pgoff_t pg; - if (align > CONFIG_CMA_ALIGNMENT) - align = CONFIG_CMA_ALIGNMENT; + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) + return ERR_PTR(-ENOMEM); - helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); - if (!helper_buffer) - return -ENOMEM; + INIT_LIST_HEAD(&buffer->attachments); + mutex_init(&buffer->lock); + buffer->len = size; - init_heap_helper_buffer(helper_buffer, cma_heap_free); - helper_buffer->heap = heap; - helper_buffer->size = len; + if (align > CONFIG_CMA_ALIGNMENT) + align = CONFIG_CMA_ALIGNMENT; - cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false); + cma_pages = cma_alloc(cma_heap->cma, pagecount, align, GFP_KERNEL); if (!cma_pages) - goto free_buf; + goto free_buffer; + /* Clear the cma pages */ if (PageHighMem(cma_pages)) { - unsigned long nr_clear_pages = nr_pages; + unsigned long nr_clear_pages = pagecount; struct page *page = cma_pages; while (nr_clear_pages > 0) { @@ -85,7 +315,6 @@ static int cma_heap_allocate(struct dma_heap *heap, */ if (fatal_signal_pending(current)) goto free_cma; - page++; nr_clear_pages--; } @@ -93,44 +322,41 @@ static int cma_heap_allocate(struct dma_heap *heap, memset(page_address(cma_pages), 0, size); } - helper_buffer->pagecount = nr_pages; - helper_buffer->pages = kmalloc_array(helper_buffer->pagecount, - sizeof(*helper_buffer->pages), - GFP_KERNEL); - if (!helper_buffer->pages) { + buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL); + if (!buffer->pages) { ret = -ENOMEM; goto free_cma; } - for (pg = 0; pg < helper_buffer->pagecount; pg++) - helper_buffer->pages[pg] = &cma_pages[pg]; + for (pg = 0; pg < pagecount; pg++) + buffer->pages[pg] = &cma_pages[pg]; + + buffer->cma_pages = cma_pages; + buffer->heap = cma_heap; + buffer->pagecount = pagecount; /* create the dmabuf */ - dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags); + exp_info.exp_name = dma_heap_get_name(heap); + exp_info.ops = &cma_heap_buf_ops; + exp_info.size = buffer->len; + exp_info.flags = fd_flags; + exp_info.priv = buffer; + dmabuf = dma_buf_export(&exp_info); if (IS_ERR(dmabuf)) { ret = PTR_ERR(dmabuf); goto free_pages; } - helper_buffer->dmabuf = dmabuf; - helper_buffer->priv_virt = cma_pages; - - ret = dma_buf_fd(dmabuf, fd_flags); - if (ret < 0) { - dma_buf_put(dmabuf); - /* just return, as put will call release and that will free */ - return ret; - } - - return ret; + return dmabuf; free_pages: - kfree(helper_buffer->pages); + kfree(buffer->pages); free_cma: - cma_release(cma_heap->cma, cma_pages, nr_pages); -free_buf: - kfree(helper_buffer); - return ret; + cma_release(cma_heap->cma, cma_pages, pagecount); +free_buffer: + kfree(buffer); + + return ERR_PTR(ret); } static const struct dma_heap_ops cma_heap_ops = { diff --git a/drivers/dma-buf/heaps/deferred-free-helper.c b/drivers/dma-buf/heaps/deferred-free-helper.c new file mode 100644 index 000000000..1330d279f --- /dev/null +++ b/drivers/dma-buf/heaps/deferred-free-helper.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Deferred dmabuf freeing helper + * + * Copyright (C) 2020 Linaro, Ltd. + * + * Based on the ION page pool code + * Copyright (C) 2011 Google, Inc. + */ + +#include +#include +#include +#include +#include + +#include "deferred-free-helper.h" + +static LIST_HEAD(free_list); +static size_t list_nr_pages; +wait_queue_head_t freelist_waitqueue; +struct task_struct *freelist_task; +static DEFINE_SPINLOCK(free_list_lock); + +void deferred_free(struct deferred_freelist_item *item, + void (*free)(struct deferred_freelist_item*, + enum df_reason), + size_t nr_pages) +{ + unsigned long flags; + + INIT_LIST_HEAD(&item->list); + item->nr_pages = nr_pages; + item->free = free; + + spin_lock_irqsave(&free_list_lock, flags); + list_add(&item->list, &free_list); + list_nr_pages += nr_pages; + spin_unlock_irqrestore(&free_list_lock, flags); + wake_up(&freelist_waitqueue); +} +EXPORT_SYMBOL_GPL(deferred_free); + +static size_t free_one_item(enum df_reason reason) +{ + unsigned long flags; + size_t nr_pages; + struct deferred_freelist_item *item; + + spin_lock_irqsave(&free_list_lock, flags); + if (list_empty(&free_list)) { + spin_unlock_irqrestore(&free_list_lock, flags); + return 0; + } + item = list_first_entry(&free_list, struct deferred_freelist_item, list); + list_del(&item->list); + nr_pages = item->nr_pages; + list_nr_pages -= nr_pages; + spin_unlock_irqrestore(&free_list_lock, flags); + + item->free(item, reason); + return nr_pages; +} + +unsigned long get_freelist_nr_pages(void) +{ + unsigned long nr_pages; + unsigned long flags; + + spin_lock_irqsave(&free_list_lock, flags); + nr_pages = list_nr_pages; + spin_unlock_irqrestore(&free_list_lock, flags); + return nr_pages; +} +EXPORT_SYMBOL_GPL(get_freelist_nr_pages); + +static unsigned long freelist_shrink_count(struct shrinker *shrinker, + struct shrink_control *sc) +{ + return get_freelist_nr_pages(); +} + +static unsigned long freelist_shrink_scan(struct shrinker *shrinker, + struct shrink_control *sc) +{ + unsigned long total_freed = 0; + + if (sc->nr_to_scan == 0) + return 0; + + while (total_freed < sc->nr_to_scan) { + size_t pages_freed = free_one_item(DF_UNDER_PRESSURE); + + if (!pages_freed) + break; + + total_freed += pages_freed; + } + + return total_freed; +} + +static struct shrinker freelist_shrinker = { + .count_objects = freelist_shrink_count, + .scan_objects = freelist_shrink_scan, + .seeks = DEFAULT_SEEKS, + .batch = 0, +}; + +static int deferred_free_thread(void *data) +{ + while (true) { + wait_event_freezable(freelist_waitqueue, + get_freelist_nr_pages() > 0); + + free_one_item(DF_NORMAL); + } + + return 0; +} + +static int deferred_freelist_init(void) +{ + list_nr_pages = 0; + + init_waitqueue_head(&freelist_waitqueue); + freelist_task = kthread_run(deferred_free_thread, NULL, + "%s", "dmabuf-deferred-free-worker"); + if (IS_ERR(freelist_task)) { + pr_err("Creating thread for deferred free failed\n"); + return -1; + } + + sched_set_normal(freelist_task, 19); + + return register_shrinker(&freelist_shrinker); +} +module_init(deferred_freelist_init); +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/dma-buf/heaps/deferred-free-helper.h b/drivers/dma-buf/heaps/deferred-free-helper.h new file mode 100644 index 000000000..415440314 --- /dev/null +++ b/drivers/dma-buf/heaps/deferred-free-helper.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef DEFERRED_FREE_HELPER_H +#define DEFERRED_FREE_HELPER_H + +/** + * df_reason - enum for reason why item was freed + * + * This provides a reason for why the free function was called + * on the item. This is useful when deferred_free is used in + * combination with a pagepool, so under pressure the page can + * be immediately freed. + * + * DF_NORMAL: Normal deferred free + * + * DF_UNDER_PRESSURE: Free was called because the system + * is under memory pressure. Usually + * from a shrinker. Avoid allocating + * memory in the free call, as it may + * fail. + */ +enum df_reason { + DF_NORMAL, + DF_UNDER_PRESSURE, +}; + +/** + * deferred_freelist_item - item structure for deferred freelist + * + * This is to be added to the structure for whatever you want to + * defer freeing on. + * + * @nr_pages: number of pages used by item to be freed + * @free: function pointer to be called when freeing the item + * @list: list entry for the deferred list + */ +struct deferred_freelist_item { + size_t nr_pages; + void (*free)(struct deferred_freelist_item *i, + enum df_reason reason); + struct list_head list; +}; + +/** + * deferred_free - call to add item to the deferred free list + * + * @item: Pointer to deferred_freelist_item field of a structure + * @free: Function pointer to the free call + * @nr_pages: number of pages to be freed + */ +void deferred_free(struct deferred_freelist_item *item, + void (*free)(struct deferred_freelist_item *i, + enum df_reason reason), + size_t nr_pages); + +unsigned long get_freelist_nr_pages(void); +#endif diff --git a/drivers/dma-buf/heaps/page_pool.c b/drivers/dma-buf/heaps/page_pool.c new file mode 100644 index 000000000..b79e737ba --- /dev/null +++ b/drivers/dma-buf/heaps/page_pool.c @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DMA BUF page pool system + * + * Copyright (C) 2020 Linaro Ltd. + * + * Based on the ION page pool code + * Copyright (C) 2011 Google, Inc. + */ + +#include +#include +#include +#include +#include +#include "page_pool.h" + +static LIST_HEAD(pool_list); +static DEFINE_MUTEX(pool_list_lock); + +static inline +struct page *dmabuf_page_pool_alloc_pages(struct dmabuf_page_pool *pool) +{ + if (fatal_signal_pending(current)) + return NULL; + return alloc_pages(pool->gfp_mask, pool->order); +} + +static inline void dmabuf_page_pool_free_pages(struct dmabuf_page_pool *pool, + struct page *page) +{ + __free_pages(page, pool->order); +} + +static void dmabuf_page_pool_add(struct dmabuf_page_pool *pool, struct page *page) +{ + int index; + + if (PageHighMem(page)) + index = POOL_HIGHPAGE; + else + index = POOL_LOWPAGE; + + mutex_lock(&pool->mutex); + list_add_tail(&page->lru, &pool->items[index]); + pool->count[index]++; + mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE, + 1 << pool->order); + mutex_unlock(&pool->mutex); +} + +static struct page *dmabuf_page_pool_remove(struct dmabuf_page_pool *pool, int index) +{ + struct page *page; + + mutex_lock(&pool->mutex); + page = list_first_entry_or_null(&pool->items[index], struct page, lru); + if (page) { + pool->count[index]--; + list_del(&page->lru); + mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE, + -(1 << pool->order)); + } + mutex_unlock(&pool->mutex); + + return page; +} + +static struct page *dmabuf_page_pool_fetch(struct dmabuf_page_pool *pool) +{ + struct page *page = NULL; + + page = dmabuf_page_pool_remove(pool, POOL_HIGHPAGE); + if (!page) + page = dmabuf_page_pool_remove(pool, POOL_LOWPAGE); + + return page; +} + +struct page *dmabuf_page_pool_alloc(struct dmabuf_page_pool *pool) +{ + struct page *page = NULL; + + if (WARN_ON(!pool)) + return NULL; + + page = dmabuf_page_pool_fetch(pool); + + if (!page) + page = dmabuf_page_pool_alloc_pages(pool); + return page; +} +EXPORT_SYMBOL_GPL(dmabuf_page_pool_alloc); + +void dmabuf_page_pool_free(struct dmabuf_page_pool *pool, struct page *page) +{ + if (WARN_ON(pool->order != compound_order(page))) + return; + + dmabuf_page_pool_add(pool, page); +} +EXPORT_SYMBOL_GPL(dmabuf_page_pool_free); + +static int dmabuf_page_pool_total(struct dmabuf_page_pool *pool, bool high) +{ + int count = pool->count[POOL_LOWPAGE]; + + if (high) + count += pool->count[POOL_HIGHPAGE]; + + return count << pool->order; +} + +struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask, unsigned int order) +{ + struct dmabuf_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL); + int i; + + if (!pool) + return NULL; + + for (i = 0; i < POOL_TYPE_SIZE; i++) { + pool->count[i] = 0; + INIT_LIST_HEAD(&pool->items[i]); + } + pool->gfp_mask = gfp_mask | __GFP_COMP; + pool->order = order; + mutex_init(&pool->mutex); + + mutex_lock(&pool_list_lock); + list_add(&pool->list, &pool_list); + mutex_unlock(&pool_list_lock); + + return pool; +} +EXPORT_SYMBOL_GPL(dmabuf_page_pool_create); + +void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool) +{ + struct page *page; + int i; + + /* Remove us from the pool list */ + mutex_lock(&pool_list_lock); + list_del(&pool->list); + mutex_unlock(&pool_list_lock); + + /* Free any remaining pages in the pool */ + for (i = 0; i < POOL_TYPE_SIZE; i++) { + while ((page = dmabuf_page_pool_remove(pool, i))) + dmabuf_page_pool_free_pages(pool, page); + } + + kfree(pool); +} +EXPORT_SYMBOL_GPL(dmabuf_page_pool_destroy); + +static int dmabuf_page_pool_do_shrink(struct dmabuf_page_pool *pool, gfp_t gfp_mask, + int nr_to_scan) +{ + int freed = 0; + bool high; + + if (current_is_kswapd()) + high = true; + else + high = !!(gfp_mask & __GFP_HIGHMEM); + + if (nr_to_scan == 0) + return dmabuf_page_pool_total(pool, high); + + while (freed < nr_to_scan) { + struct page *page; + + /* Try to free low pages first */ + page = dmabuf_page_pool_remove(pool, POOL_LOWPAGE); + if (!page) + page = dmabuf_page_pool_remove(pool, POOL_HIGHPAGE); + + if (!page) + break; + + dmabuf_page_pool_free_pages(pool, page); + freed += (1 << pool->order); + } + + return freed; +} + +static int dmabuf_page_pool_shrink(gfp_t gfp_mask, int nr_to_scan) +{ + struct dmabuf_page_pool *pool; + int nr_total = 0; + int nr_freed; + int only_scan = 0; + + if (!nr_to_scan) + only_scan = 1; + + mutex_lock(&pool_list_lock); + list_for_each_entry(pool, &pool_list, list) { + if (only_scan) { + nr_total += dmabuf_page_pool_do_shrink(pool, + gfp_mask, + nr_to_scan); + } else { + nr_freed = dmabuf_page_pool_do_shrink(pool, + gfp_mask, + nr_to_scan); + nr_to_scan -= nr_freed; + nr_total += nr_freed; + if (nr_to_scan <= 0) + break; + } + } + mutex_unlock(&pool_list_lock); + + return nr_total; +} + +static unsigned long dmabuf_page_pool_shrink_count(struct shrinker *shrinker, + struct shrink_control *sc) +{ + return dmabuf_page_pool_shrink(sc->gfp_mask, 0); +} + +static unsigned long dmabuf_page_pool_shrink_scan(struct shrinker *shrinker, + struct shrink_control *sc) +{ + if (sc->nr_to_scan == 0) + return 0; + return dmabuf_page_pool_shrink(sc->gfp_mask, sc->nr_to_scan); +} + +struct shrinker pool_shrinker = { + .count_objects = dmabuf_page_pool_shrink_count, + .scan_objects = dmabuf_page_pool_shrink_scan, + .seeks = DEFAULT_SEEKS, + .batch = 0, +}; + +static int dmabuf_page_pool_init_shrinker(void) +{ + return register_shrinker(&pool_shrinker); +} +module_init(dmabuf_page_pool_init_shrinker); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma-buf/heaps/page_pool.h b/drivers/dma-buf/heaps/page_pool.h new file mode 100644 index 000000000..6b083b04f --- /dev/null +++ b/drivers/dma-buf/heaps/page_pool.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * DMA BUF PagePool implementation + * Based on earlier ION code by Google + * + * Copyright (C) 2011 Google, Inc. + * Copyright (C) 2020 Linaro Ltd. + */ + +#ifndef _DMABUF_PAGE_POOL_H +#define _DMABUF_PAGE_POOL_H + +#include +#include +#include +#include +#include +#include + +/* page types we track in the pool */ +enum { + POOL_LOWPAGE, /* Clean lowmem pages */ + POOL_HIGHPAGE, /* Clean highmem pages */ + + POOL_TYPE_SIZE, +}; + +/** + * struct dmabuf_page_pool - pagepool struct + * @count[]: array of number of pages of that type in the pool + * @items[]: array of list of pages of the specific type + * @mutex: lock protecting this struct and especially the count + * item list + * @gfp_mask: gfp_mask to use from alloc + * @order: order of pages in the pool + * @list: list node for list of pools + * + * Allows you to keep a pool of pre allocated pages to use + */ +struct dmabuf_page_pool { + int count[POOL_TYPE_SIZE]; + struct list_head items[POOL_TYPE_SIZE]; + struct mutex mutex; + gfp_t gfp_mask; + unsigned int order; + struct list_head list; +}; + +struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask, + unsigned int order); +void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool); +struct page *dmabuf_page_pool_alloc(struct dmabuf_page_pool *pool); +void dmabuf_page_pool_free(struct dmabuf_page_pool *pool, struct page *page); + +#endif /* _DMABUF_PAGE_POOL_H */ diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c index 0bf688e3c..bbca2e195 100644 --- a/drivers/dma-buf/heaps/system_heap.c +++ b/drivers/dma-buf/heaps/system_heap.c @@ -3,7 +3,11 @@ * DMABUF System heap exporter * * Copyright (C) 2011 Google, Inc. - * Copyright (C) 2019 Linaro Ltd. + * Copyright (C) 2019, 2020 Linaro Ltd. + * + * Portions based off of Andrew Davis' SRAM heap: + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + * Andrew F. Davis */ #include @@ -15,99 +19,547 @@ #include #include #include -#include -#include +#include + +#include "page_pool.h" +#include "deferred-free-helper.h" + +static struct dma_heap *sys_heap; +static struct dma_heap *sys_uncached_heap; + +struct system_heap_buffer { + struct dma_heap *heap; + struct list_head attachments; + struct mutex lock; + unsigned long len; + struct sg_table sg_table; + int vmap_cnt; + void *vaddr; + struct deferred_freelist_item deferred_free; + + bool uncached; +}; + +struct dma_heap_attachment { + struct device *dev; + struct sg_table *table; + struct list_head list; + bool mapped; -#include "heap-helpers.h" + bool uncached; +}; -struct dma_heap *sys_heap; +#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP) +#define MID_ORDER_GFP (LOW_ORDER_GFP | __GFP_NOWARN) +#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \ + | __GFP_NORETRY) & ~__GFP_RECLAIM) \ + | __GFP_COMP) +static gfp_t order_flags[] = {HIGH_ORDER_GFP, MID_ORDER_GFP, LOW_ORDER_GFP}; +/* + * The selection of the orders used for allocation (1MB, 64K, 4K) is designed + * to match with the sizes often found in IOMMUs. Using order 4 pages instead + * of order 0 pages can significantly improve the performance of many IOMMUs + * by reducing TLB pressure and time spent updating page tables. + */ +static const unsigned int orders[] = {8, 4, 0}; +#define NUM_ORDERS ARRAY_SIZE(orders) +struct dmabuf_page_pool *pools[NUM_ORDERS]; -static void system_heap_free(struct heap_helper_buffer *buffer) +static struct sg_table *dup_sg_table(struct sg_table *table) { - pgoff_t pg; + struct sg_table *new_table; + int ret, i; + struct scatterlist *sg, *new_sg; - for (pg = 0; pg < buffer->pagecount; pg++) - __free_page(buffer->pages[pg]); - kfree(buffer->pages); - kfree(buffer); + new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); + if (!new_table) + return ERR_PTR(-ENOMEM); + + ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL); + if (ret) { + kfree(new_table); + return ERR_PTR(-ENOMEM); + } + + new_sg = new_table->sgl; + for_each_sgtable_sg(table, sg, i) { + sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset); + new_sg = sg_next(new_sg); + } + + return new_table; } -static int system_heap_allocate(struct dma_heap *heap, - unsigned long len, - unsigned long fd_flags, - unsigned long heap_flags) +static int system_heap_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) { - struct heap_helper_buffer *helper_buffer; - struct dma_buf *dmabuf; - int ret = -ENOMEM; - pgoff_t pg; + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + struct sg_table *table; + + a = kzalloc(sizeof(*a), GFP_KERNEL); + if (!a) + return -ENOMEM; - helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); - if (!helper_buffer) + table = dup_sg_table(&buffer->sg_table); + if (IS_ERR(table)) { + kfree(a); return -ENOMEM; + } + + a->table = table; + a->dev = attachment->dev; + INIT_LIST_HEAD(&a->list); + a->mapped = false; + a->uncached = buffer->uncached; + attachment->priv = a; + + mutex_lock(&buffer->lock); + list_add(&a->list, &buffer->attachments); + mutex_unlock(&buffer->lock); + + return 0; +} + +static void system_heap_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a = attachment->priv; + + mutex_lock(&buffer->lock); + list_del(&a->list); + mutex_unlock(&buffer->lock); + + sg_free_table(a->table); + kfree(a->table); + kfree(a); +} - init_heap_helper_buffer(helper_buffer, system_heap_free); - helper_buffer->heap = heap; - helper_buffer->size = len; +static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + struct sg_table *table = a->table; + int attr = attachment->dma_map_attrs; + int ret; + + if (a->uncached) + attr |= DMA_ATTR_SKIP_CPU_SYNC; + + ret = dma_map_sgtable(attachment->dev, table, direction, attr); + if (ret) + return ERR_PTR(ret); + + a->mapped = true; + return table; +} + +static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *table, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + int attr = attachment->dma_map_attrs; + + if (a->uncached) + attr |= DMA_ATTR_SKIP_CPU_SYNC; + a->mapped = false; + dma_unmap_sgtable(attachment->dev, table, direction, attr); +} - helper_buffer->pagecount = len / PAGE_SIZE; - helper_buffer->pages = kmalloc_array(helper_buffer->pagecount, - sizeof(*helper_buffer->pages), - GFP_KERNEL); - if (!helper_buffer->pages) { - ret = -ENOMEM; - goto err0; +static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + mutex_lock(&buffer->lock); + + if (buffer->vmap_cnt) + invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); + + if (!buffer->uncached) { + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_cpu(a->dev, a->table, direction); + } } + mutex_unlock(&buffer->lock); + + return 0; +} + +static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + mutex_lock(&buffer->lock); + + if (buffer->vmap_cnt) + flush_kernel_vmap_range(buffer->vaddr, buffer->len); + + if (!buffer->uncached) { + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_device(a->dev, a->table, direction); + } + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct sg_table *table = &buffer->sg_table; + unsigned long addr = vma->vm_start; + struct sg_page_iter piter; + int ret; + + if (buffer->uncached) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + + for_each_sgtable_page(table, &piter, vma->vm_pgoff) { + struct page *page = sg_page_iter_page(&piter); + + ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, + vma->vm_page_prot); + if (ret) + return ret; + addr += PAGE_SIZE; + if (addr >= vma->vm_end) + return 0; + } + return 0; +} + +static void *system_heap_do_vmap(struct system_heap_buffer *buffer) +{ + struct sg_table *table = &buffer->sg_table; + int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE; + struct page **pages = vmalloc(sizeof(struct page *) * npages); + struct page **tmp = pages; + struct sg_page_iter piter; + pgprot_t pgprot = PAGE_KERNEL; + void *vaddr; + + if (!pages) + return ERR_PTR(-ENOMEM); + + if (buffer->uncached) + pgprot = pgprot_writecombine(PAGE_KERNEL); + + for_each_sgtable_page(table, &piter, 0) { + WARN_ON(tmp - pages >= npages); + *tmp++ = sg_page_iter_page(&piter); + } + + vaddr = vmap(pages, npages, VM_MAP, pgprot); + vfree(pages); + + if (!vaddr) + return ERR_PTR(-ENOMEM); + + return vaddr; +} + +static void *system_heap_vmap(struct dma_buf *dmabuf) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + void *vaddr; + + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) { + buffer->vmap_cnt++; + vaddr = buffer->vaddr; + goto out; + } + + vaddr = system_heap_do_vmap(buffer); + if (IS_ERR(vaddr)) + goto out; + + buffer->vaddr = vaddr; + buffer->vmap_cnt++; +out: + mutex_unlock(&buffer->lock); - for (pg = 0; pg < helper_buffer->pagecount; pg++) { + return vaddr; +} + +static void system_heap_vunmap(struct dma_buf *dmabuf, void *vaddr) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + + mutex_lock(&buffer->lock); + if (!--buffer->vmap_cnt) { + vunmap(buffer->vaddr); + buffer->vaddr = NULL; + } + mutex_unlock(&buffer->lock); +} + +static int system_heap_zero_buffer(struct system_heap_buffer *buffer) +{ + struct sg_table *sgt = &buffer->sg_table; + struct sg_page_iter piter; + struct page *p; + void *vaddr; + int ret = 0; + + for_each_sgtable_page(sgt, &piter, 0) { + p = sg_page_iter_page(&piter); + vaddr = kmap_atomic(p); + memset(vaddr, 0, PAGE_SIZE); + kunmap_atomic(vaddr); + } + + return ret; +} + +static void system_heap_buf_free(struct deferred_freelist_item *item, + enum df_reason reason) +{ + struct system_heap_buffer *buffer; + struct sg_table *table; + struct scatterlist *sg; + int i, j; + + buffer = container_of(item, struct system_heap_buffer, deferred_free); + /* Zero the buffer pages before adding back to the pool */ + if (reason == DF_NORMAL) + if (system_heap_zero_buffer(buffer)) + reason = DF_UNDER_PRESSURE; // On failure, just free + + table = &buffer->sg_table; + for_each_sgtable_sg(table, sg, i) { + struct page *page = sg_page(sg); + + if (reason == DF_UNDER_PRESSURE) { + __free_pages(page, compound_order(page)); + } else { + for (j = 0; j < NUM_ORDERS; j++) { + if (compound_order(page) == orders[j]) + break; + } + dmabuf_page_pool_free(pools[j], page); + } + } + sg_free_table(table); + kfree(buffer); +} + +static void system_heap_dma_buf_release(struct dma_buf *dmabuf) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE; + + deferred_free(&buffer->deferred_free, system_heap_buf_free, npages); +} + +static const struct dma_buf_ops system_heap_buf_ops = { + .attach = system_heap_attach, + .detach = system_heap_detach, + .map_dma_buf = system_heap_map_dma_buf, + .unmap_dma_buf = system_heap_unmap_dma_buf, + .begin_cpu_access = system_heap_dma_buf_begin_cpu_access, + .end_cpu_access = system_heap_dma_buf_end_cpu_access, + .mmap = system_heap_mmap, + .vmap = system_heap_vmap, + .vunmap = system_heap_vunmap, + .release = system_heap_dma_buf_release, +}; + +static struct page *alloc_largest_available(unsigned long size, + unsigned int max_order) +{ + struct page *page; + int i; + + for (i = 0; i < NUM_ORDERS; i++) { + if (size < (PAGE_SIZE << orders[i])) + continue; + if (max_order < orders[i]) + continue; + page = dmabuf_page_pool_alloc(pools[i]); + if (!page) + continue; + return page; + } + return NULL; +} + +static struct dma_buf *system_heap_do_allocate(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags, + bool uncached) +{ + struct system_heap_buffer *buffer; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + unsigned long size_remaining = len; + unsigned int max_order = orders[0]; + struct dma_buf *dmabuf; + struct sg_table *table; + struct scatterlist *sg; + struct list_head pages; + struct page *page, *tmp_page; + int i, ret = -ENOMEM; + + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&buffer->attachments); + mutex_init(&buffer->lock); + buffer->heap = heap; + buffer->len = len; + buffer->uncached = uncached; + + INIT_LIST_HEAD(&pages); + i = 0; + while (size_remaining > 0) { /* * Avoid trying to allocate memory if the process - * has been killed by by SIGKILL + * has been killed by SIGKILL */ if (fatal_signal_pending(current)) - goto err1; + goto free_buffer; + + page = alloc_largest_available(size_remaining, max_order); + if (!page) + goto free_buffer; + + list_add_tail(&page->lru, &pages); + size_remaining -= page_size(page); + max_order = compound_order(page); + i++; + } - helper_buffer->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!helper_buffer->pages[pg]) - goto err1; + table = &buffer->sg_table; + if (sg_alloc_table(table, i, GFP_KERNEL)) + goto free_buffer; + + sg = table->sgl; + list_for_each_entry_safe(page, tmp_page, &pages, lru) { + sg_set_page(sg, page, page_size(page), 0); + sg = sg_next(sg); + list_del(&page->lru); } /* create the dmabuf */ - dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags); + exp_info.exp_name = dma_heap_get_name(heap); + exp_info.ops = &system_heap_buf_ops; + exp_info.size = buffer->len; + exp_info.flags = fd_flags; + exp_info.priv = buffer; + dmabuf = dma_buf_export(&exp_info); if (IS_ERR(dmabuf)) { ret = PTR_ERR(dmabuf); - goto err1; + goto free_pages; + } + + /* + * For uncached buffers, we need to initially flush cpu cache, since + * the __GFP_ZERO on the allocation means the zeroing was done by the + * cpu and thus it is likely cached. Map (and implicitly flush) and + * unmap it now so we don't get corruption later on. + */ + if (buffer->uncached) { + dma_map_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0); + dma_unmap_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0); } - helper_buffer->dmabuf = dmabuf; + return dmabuf; - ret = dma_buf_fd(dmabuf, fd_flags); - if (ret < 0) { - dma_buf_put(dmabuf); - /* just return, as put will call release and that will free */ - return ret; +free_pages: + for_each_sgtable_sg(table, sg, i) { + struct page *p = sg_page(sg); + + __free_pages(p, compound_order(p)); } + sg_free_table(table); +free_buffer: + list_for_each_entry_safe(page, tmp_page, &pages, lru) + __free_pages(page, compound_order(page)); + kfree(buffer); - return ret; + return ERR_PTR(ret); +} -err1: - while (pg > 0) - __free_page(helper_buffer->pages[--pg]); - kfree(helper_buffer->pages); -err0: - kfree(helper_buffer); +static struct dma_buf *system_heap_allocate(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) +{ + return system_heap_do_allocate(heap, len, fd_flags, heap_flags, false); +} - return ret; +static long system_get_pool_size(struct dma_heap *heap) +{ + int i; + long num_pages = 0; + struct dmabuf_page_pool **pool; + + pool = pools; + for (i = 0; i < NUM_ORDERS; i++, pool++) { + num_pages += ((*pool)->count[POOL_LOWPAGE] + + (*pool)->count[POOL_HIGHPAGE]) << (*pool)->order; + } + + return num_pages << PAGE_SHIFT; } static const struct dma_heap_ops system_heap_ops = { .allocate = system_heap_allocate, + .get_pool_size = system_get_pool_size, +}; + +static struct dma_buf *system_uncached_heap_allocate(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) +{ + return system_heap_do_allocate(heap, len, fd_flags, heap_flags, true); +} + +/* Dummy function to be used until we can call coerce_mask_and_coherent */ +static struct dma_buf *system_uncached_heap_not_initialized(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) +{ + return ERR_PTR(-EBUSY); +} + +static struct dma_heap_ops system_uncached_heap_ops = { + /* After system_heap_create is complete, we will swap this */ + .allocate = system_uncached_heap_not_initialized, }; static int system_heap_create(void) { struct dma_heap_export_info exp_info; - int ret = 0; + int i; + + for (i = 0; i < NUM_ORDERS; i++) { + pools[i] = dmabuf_page_pool_create(order_flags[i], orders[i]); + + if (!pools[i]) { + int j; + + pr_err("%s: page pool creation failed!\n", __func__); + for (j = 0; j < i; j++) + dmabuf_page_pool_destroy(pools[j]); + return -ENOMEM; + } + } exp_info.name = "system"; exp_info.ops = &system_heap_ops; @@ -115,9 +567,21 @@ static int system_heap_create(void) sys_heap = dma_heap_add(&exp_info); if (IS_ERR(sys_heap)) - ret = PTR_ERR(sys_heap); + return PTR_ERR(sys_heap); - return ret; + exp_info.name = "system-uncached"; + exp_info.ops = &system_uncached_heap_ops; + exp_info.priv = NULL; + + sys_uncached_heap = dma_heap_add(&exp_info); + if (IS_ERR(sys_uncached_heap)) + return PTR_ERR(sys_uncached_heap); + + dma_coerce_mask_and_coherent(dma_heap_get_dev(sys_uncached_heap), DMA_BIT_MASK(64)); + mb(); /* make sure we only set allocate after dma_mask is set */ + system_uncached_heap_ops.allocate = system_uncached_heap_allocate; + + return 0; } module_init(system_heap_create); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index 109d11fb4..4aac2ec86 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -461,9 +461,10 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane, return 0; obj = drm_gem_fb_get_obj(state->fb, 0); - fence = dma_resv_get_excl_rcu(obj->resv); - drm_atomic_set_fence_for_plane(state, fence); - + if (obj) { + fence = dma_resv_get_excl_rcu(obj->resv); + drm_atomic_set_fence_for_plane(state, fence); + } return 0; } EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb); diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index f135b7959..fc9c53e3e 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -1100,7 +1100,7 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe) return dev->driver->enable_vblank(dev, pipe); } - return -EINVAL; + return 0; } static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe) diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 0c5706abb..380c19070 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -412,6 +412,7 @@ struct dma_buf_attachment { const struct dma_buf_attach_ops *importer_ops; void *importer_priv; void *priv; + unsigned long dma_map_attrs; }; /** diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h index 83b8cfb2d..e8f0e92c2 100644 --- a/include/linux/dma-heap.h +++ b/include/linux/dma-heap.h @@ -16,15 +16,17 @@ struct dma_heap; /** * struct dma_heap_ops - ops to operate on a given heap - * @allocate: allocate dmabuf and return fd + * @allocate: allocate dmabuf and return struct dma_buf ptr + * @get_pool_size: if heap maintains memory pools, get pool size in bytes * - * allocate returns dmabuf fd on success, -errno on error. + * allocate returns dmabuf on success, ERR_PTR(-errno) on error. */ struct dma_heap_ops { - int (*allocate)(struct dma_heap *heap, + struct dma_buf *(*allocate)(struct dma_heap *heap, unsigned long len, unsigned long fd_flags, unsigned long heap_flags); + long (*get_pool_size)(struct dma_heap *heap); }; /** @@ -50,6 +52,15 @@ struct dma_heap_export_info { */ void *dma_heap_get_drvdata(struct dma_heap *heap); +/** + * dma_heap_get_dev() - get device struct for the heap + * @heap: DMA-Heap to retrieve device struct from + * + * Returns: + * The device struct for the heap. + */ +struct device *dma_heap_get_dev(struct dma_heap *heap); + /** * dma_heap_get_name() - get heap name * @heap: DMA-Heap to retrieve private data for @@ -65,4 +76,49 @@ const char *dma_heap_get_name(struct dma_heap *heap); */ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info); +/** + * dma_heap_put - drops a reference to a dmabuf heaps, potentially freeing it + * @heap: heap pointer + */ +void dma_heap_put(struct dma_heap *heap); + +/** + * dma_heap_find - Returns the registered dma_heap with the specified name + * @name: Name of the heap to find + * + * NOTE: dma_heaps returned from this function MUST be released + * using dma_heap_put() when the user is done. + */ +struct dma_heap *dma_heap_find(const char *name); + +/** + * dma_heap_buffer_alloc - Allocate dma-buf from a dma_heap + * @heap: dma_heap to allocate from + * @len: size to allocate + * @fd_flags: flags to set on returned dma-buf fd + * @heap_flags: flags to pass to the dma heap + * + * This is for internal dma-buf allocations only. + */ +struct dma_buf *dma_heap_buffer_alloc(struct dma_heap *heap, size_t len, + unsigned int fd_flags, + unsigned int heap_flags); + +/** dma_heap_buffer_free - Free dma_buf allocated by dma_heap_buffer_alloc + * @dma_buf: dma_buf to free + * + * This is really only a simple wrapper to dma_buf_put() + */ +void dma_heap_buffer_free(struct dma_buf *); + +/** + * dma_heap_bufferfd_alloc - Allocate dma-buf fd from a dma_heap + * @heap: dma_heap to allocate from + * @len: size to allocate + * @fd_flags: flags to set on returned dma-buf fd + * @heap_flags: flags to pass to the dma heap + */ +int dma_heap_bufferfd_alloc(struct dma_heap *heap, size_t len, + unsigned int fd_flags, + unsigned int heap_flags); #endif /* _DMA_HEAPS_H */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e2f00be4b..dfc23d434 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5713,16 +5713,19 @@ int sched_setscheduler(struct task_struct *p, int policy, { return _sched_setscheduler(p, policy, param, true); } +EXPORT_SYMBOL_GPL(sched_setscheduler); int sched_setattr(struct task_struct *p, const struct sched_attr *attr) { return __sched_setscheduler(p, attr, true, true); } +EXPORT_SYMBOL_GPL(sched_setattr); int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) { return __sched_setscheduler(p, attr, false, true); } +EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck); /** * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. -- 2.25.1