Lines Matching +full:memory +full:- +full:region
1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for FPGA Accelerated Function Unit (AFU) DMA Region Management
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
12 #include <linux/dma-mapping.h>
16 #include "dfl-afu.h"
31 afu->dma_regions = RB_ROOT; in afu_dma_region_init()
35 * afu_dma_adjust_locked_vm - adjust locked memory
38 * @incr: increase or decrease locked memory
40 * Increase or decrease the locked memory size with npages input.
43 * Return -ENOMEM if locked memory size is over the limit and no CAP_IPC_LOCK.
51 if (!current->mm) in afu_dma_adjust_locked_vm()
54 down_write(¤t->mm->mmap_sem); in afu_dma_adjust_locked_vm()
57 locked = current->mm->locked_vm + npages; in afu_dma_adjust_locked_vm()
61 ret = -ENOMEM; in afu_dma_adjust_locked_vm()
63 current->mm->locked_vm += npages; in afu_dma_adjust_locked_vm()
65 if (WARN_ON_ONCE(npages > current->mm->locked_vm)) in afu_dma_adjust_locked_vm()
66 npages = current->mm->locked_vm; in afu_dma_adjust_locked_vm()
67 current->mm->locked_vm -= npages; in afu_dma_adjust_locked_vm()
70 dev_dbg(dev, "[%d] RLIMIT_MEMLOCK %c%ld %ld/%ld%s\n", current->pid, in afu_dma_adjust_locked_vm()
71 incr ? '+' : '-', npages << PAGE_SHIFT, in afu_dma_adjust_locked_vm()
72 current->mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK), in afu_dma_adjust_locked_vm()
73 ret ? "- execeeded" : ""); in afu_dma_adjust_locked_vm()
75 up_write(¤t->mm->mmap_sem); in afu_dma_adjust_locked_vm()
81 * afu_dma_pin_pages - pin pages of given dma memory region
83 * @region: dma memory region to be pinned
89 struct dfl_afu_dma_region *region) in afu_dma_pin_pages() argument
91 int npages = region->length >> PAGE_SHIFT; in afu_dma_pin_pages()
92 struct device *dev = &pdata->dev->dev; in afu_dma_pin_pages()
99 region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL); in afu_dma_pin_pages()
100 if (!region->pages) { in afu_dma_pin_pages()
101 ret = -ENOMEM; in afu_dma_pin_pages()
105 pinned = get_user_pages_fast(region->user_addr, npages, 1, in afu_dma_pin_pages()
106 region->pages); in afu_dma_pin_pages()
111 ret = -EFAULT; in afu_dma_pin_pages()
120 put_all_pages(region->pages, pinned); in afu_dma_pin_pages()
122 kfree(region->pages); in afu_dma_pin_pages()
129 * afu_dma_unpin_pages - unpin pages of given dma memory region
131 * @region: dma memory region to be unpinned
137 struct dfl_afu_dma_region *region) in afu_dma_unpin_pages() argument
139 long npages = region->length >> PAGE_SHIFT; in afu_dma_unpin_pages()
140 struct device *dev = &pdata->dev->dev; in afu_dma_unpin_pages()
142 put_all_pages(region->pages, npages); in afu_dma_unpin_pages()
143 kfree(region->pages); in afu_dma_unpin_pages()
150 * afu_dma_check_continuous_pages - check if pages are continuous
151 * @region: dma memory region
153 * Return true if pages of given dma memory region have continuous physical
156 static bool afu_dma_check_continuous_pages(struct dfl_afu_dma_region *region) in afu_dma_check_continuous_pages() argument
158 int npages = region->length >> PAGE_SHIFT; in afu_dma_check_continuous_pages()
161 for (i = 0; i < npages - 1; i++) in afu_dma_check_continuous_pages()
162 if (page_to_pfn(region->pages[i]) + 1 != in afu_dma_check_continuous_pages()
163 page_to_pfn(region->pages[i + 1])) in afu_dma_check_continuous_pages()
170 * dma_region_check_iova - check if memory area is fully contained in the region
171 * @region: dma memory region
172 * @iova: address of the dma memory area
173 * @size: size of the dma memory area
175 * Compare the dma memory area defined by @iova and @size with given dma region.
176 * Return true if memory area is fully contained in the region, otherwise false.
178 static bool dma_region_check_iova(struct dfl_afu_dma_region *region, in dma_region_check_iova() argument
181 if (!size && region->iova != iova) in dma_region_check_iova()
184 return (region->iova <= iova) && in dma_region_check_iova()
185 (region->length + region->iova >= iova + size); in dma_region_check_iova()
189 * afu_dma_region_add - add given dma region to rbtree
191 * @region: dma region to be added
193 * Return 0 for success, -EEXIST if dma region has already been added.
195 * Needs to be called with pdata->lock heold.
198 struct dfl_afu_dma_region *region) in afu_dma_region_add() argument
203 dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n", in afu_dma_region_add()
204 (unsigned long long)region->iova); in afu_dma_region_add()
206 new = &afu->dma_regions.rb_node; in afu_dma_region_add()
215 if (dma_region_check_iova(this, region->iova, region->length)) in afu_dma_region_add()
216 return -EEXIST; in afu_dma_region_add()
218 if (region->iova < this->iova) in afu_dma_region_add()
219 new = &((*new)->rb_left); in afu_dma_region_add()
220 else if (region->iova > this->iova) in afu_dma_region_add()
221 new = &((*new)->rb_right); in afu_dma_region_add()
223 return -EEXIST; in afu_dma_region_add()
226 rb_link_node(®ion->node, parent, new); in afu_dma_region_add()
227 rb_insert_color(®ion->node, &afu->dma_regions); in afu_dma_region_add()
233 * afu_dma_region_remove - remove given dma region from rbtree
235 * @region: dma region to be removed
237 * Needs to be called with pdata->lock heold.
240 struct dfl_afu_dma_region *region) in afu_dma_region_remove() argument
244 dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n", in afu_dma_region_remove()
245 (unsigned long long)region->iova); in afu_dma_region_remove()
248 rb_erase(®ion->node, &afu->dma_regions); in afu_dma_region_remove()
252 * afu_dma_region_destroy - destroy all regions in rbtree
255 * Needs to be called with pdata->lock heold.
260 struct rb_node *node = rb_first(&afu->dma_regions); in afu_dma_region_destroy()
261 struct dfl_afu_dma_region *region; in afu_dma_region_destroy() local
264 region = container_of(node, struct dfl_afu_dma_region, node); in afu_dma_region_destroy()
266 dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n", in afu_dma_region_destroy()
267 (unsigned long long)region->iova); in afu_dma_region_destroy()
269 rb_erase(node, &afu->dma_regions); in afu_dma_region_destroy()
271 if (region->iova) in afu_dma_region_destroy()
273 region->iova, region->length, in afu_dma_region_destroy()
276 if (region->pages) in afu_dma_region_destroy()
277 afu_dma_unpin_pages(pdata, region); in afu_dma_region_destroy()
280 kfree(region); in afu_dma_region_destroy()
285 * afu_dma_region_find - find the dma region from rbtree based on iova and size
287 * @iova: address of the dma memory area
288 * @size: size of the dma memory area
290 * It finds the dma region from the rbtree based on @iova and @size:
291 * - if @size == 0, it finds the dma region which starts from @iova
292 * - otherwise, it finds the dma region which fully contains
296 * Needs to be called with pdata->lock held.
302 struct rb_node *node = afu->dma_regions.rb_node; in afu_dma_region_find()
303 struct device *dev = &pdata->dev->dev; in afu_dma_region_find()
306 struct dfl_afu_dma_region *region; in afu_dma_region_find() local
308 region = container_of(node, struct dfl_afu_dma_region, node); in afu_dma_region_find()
310 if (dma_region_check_iova(region, iova, size)) { in afu_dma_region_find()
311 dev_dbg(dev, "find region (iova = %llx)\n", in afu_dma_region_find()
312 (unsigned long long)region->iova); in afu_dma_region_find()
313 return region; in afu_dma_region_find()
316 if (iova < region->iova) in afu_dma_region_find()
317 node = node->rb_left; in afu_dma_region_find()
318 else if (iova > region->iova) in afu_dma_region_find()
319 node = node->rb_right; in afu_dma_region_find()
321 /* the iova region is not fully covered. */ in afu_dma_region_find()
325 dev_dbg(dev, "region with iova %llx and size %llx is not found\n", in afu_dma_region_find()
332 * afu_dma_region_find_iova - find the dma region from rbtree by iova
334 * @iova: address of the dma region
336 * Needs to be called with pdata->lock held.
345 * afu_dma_map_region - map memory region for dma
347 * @user_addr: address of the memory region
348 * @length: size of the memory region
351 * Map memory region defined by @user_addr and @length, and return dma address
352 * of the memory region via @iova.
358 struct dfl_afu_dma_region *region; in afu_dma_map_region() local
362 * Check Inputs, only accept page-aligned user memory region with in afu_dma_map_region()
366 return -EINVAL; in afu_dma_map_region()
370 return -EINVAL; in afu_dma_map_region()
374 return -EINVAL; in afu_dma_map_region()
376 region = kzalloc(sizeof(*region), GFP_KERNEL); in afu_dma_map_region()
377 if (!region) in afu_dma_map_region()
378 return -ENOMEM; in afu_dma_map_region()
380 region->user_addr = user_addr; in afu_dma_map_region()
381 region->length = length; in afu_dma_map_region()
383 /* Pin the user memory region */ in afu_dma_map_region()
384 ret = afu_dma_pin_pages(pdata, region); in afu_dma_map_region()
386 dev_err(&pdata->dev->dev, "failed to pin memory region\n"); in afu_dma_map_region()
391 if (!afu_dma_check_continuous_pages(region)) { in afu_dma_map_region()
392 dev_err(&pdata->dev->dev, "pages are not continuous\n"); in afu_dma_map_region()
393 ret = -EINVAL; in afu_dma_map_region()
398 region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata), in afu_dma_map_region()
399 region->pages[0], 0, in afu_dma_map_region()
400 region->length, in afu_dma_map_region()
402 if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) { in afu_dma_map_region()
403 dev_err(&pdata->dev->dev, "failed to map for dma\n"); in afu_dma_map_region()
404 ret = -EFAULT; in afu_dma_map_region()
408 *iova = region->iova; in afu_dma_map_region()
410 mutex_lock(&pdata->lock); in afu_dma_map_region()
411 ret = afu_dma_region_add(pdata, region); in afu_dma_map_region()
412 mutex_unlock(&pdata->lock); in afu_dma_map_region()
414 dev_err(&pdata->dev->dev, "failed to add dma region\n"); in afu_dma_map_region()
422 region->iova, region->length, DMA_BIDIRECTIONAL); in afu_dma_map_region()
424 afu_dma_unpin_pages(pdata, region); in afu_dma_map_region()
426 kfree(region); in afu_dma_map_region()
431 * afu_dma_unmap_region - unmap dma memory region
433 * @iova: dma address of the region
435 * Unmap dma memory region based on @iova.
440 struct dfl_afu_dma_region *region; in afu_dma_unmap_region() local
442 mutex_lock(&pdata->lock); in afu_dma_unmap_region()
443 region = afu_dma_region_find_iova(pdata, iova); in afu_dma_unmap_region()
444 if (!region) { in afu_dma_unmap_region()
445 mutex_unlock(&pdata->lock); in afu_dma_unmap_region()
446 return -EINVAL; in afu_dma_unmap_region()
449 if (region->in_use) { in afu_dma_unmap_region()
450 mutex_unlock(&pdata->lock); in afu_dma_unmap_region()
451 return -EBUSY; in afu_dma_unmap_region()
454 afu_dma_region_remove(pdata, region); in afu_dma_unmap_region()
455 mutex_unlock(&pdata->lock); in afu_dma_unmap_region()
458 region->iova, region->length, DMA_BIDIRECTIONAL); in afu_dma_unmap_region()
459 afu_dma_unpin_pages(pdata, region); in afu_dma_unmap_region()
460 kfree(region); in afu_dma_unmap_region()