• Home
  • Raw
  • Download

Lines Matching full:region

3  * Driver for FPGA Accelerated Function Unit (AFU) DMA Region Management
81 * afu_dma_pin_pages - pin pages of given dma memory region
83 * @region: dma memory region to be pinned
89 struct dfl_afu_dma_region *region) in afu_dma_pin_pages() argument
91 int npages = region->length >> PAGE_SHIFT; in afu_dma_pin_pages()
99 region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL); in afu_dma_pin_pages()
100 if (!region->pages) { in afu_dma_pin_pages()
105 pinned = get_user_pages_fast(region->user_addr, npages, 1, in afu_dma_pin_pages()
106 region->pages); in afu_dma_pin_pages()
120 put_all_pages(region->pages, pinned); in afu_dma_pin_pages()
122 kfree(region->pages); in afu_dma_pin_pages()
129 * afu_dma_unpin_pages - unpin pages of given dma memory region
131 * @region: dma memory region to be unpinned
137 struct dfl_afu_dma_region *region) in afu_dma_unpin_pages() argument
139 long npages = region->length >> PAGE_SHIFT; in afu_dma_unpin_pages()
142 put_all_pages(region->pages, npages); in afu_dma_unpin_pages()
143 kfree(region->pages); in afu_dma_unpin_pages()
151 * @region: dma memory region
153 * Return true if pages of given dma memory region have continuous physical
156 static bool afu_dma_check_continuous_pages(struct dfl_afu_dma_region *region) in afu_dma_check_continuous_pages() argument
158 int npages = region->length >> PAGE_SHIFT; in afu_dma_check_continuous_pages()
162 if (page_to_pfn(region->pages[i]) + 1 != in afu_dma_check_continuous_pages()
163 page_to_pfn(region->pages[i + 1])) in afu_dma_check_continuous_pages()
170 * dma_region_check_iova - check if memory area is fully contained in the region
171 * @region: dma memory region
175 * Compare the dma memory area defined by @iova and @size with given dma region.
176 * Return true if memory area is fully contained in the region, otherwise false.
178 static bool dma_region_check_iova(struct dfl_afu_dma_region *region, in dma_region_check_iova() argument
181 if (!size && region->iova != iova) in dma_region_check_iova()
184 return (region->iova <= iova) && in dma_region_check_iova()
185 (region->length + region->iova >= iova + size); in dma_region_check_iova()
189 * afu_dma_region_add - add given dma region to rbtree
191 * @region: dma region to be added
193 * Return 0 for success, -EEXIST if dma region has already been added.
198 struct dfl_afu_dma_region *region) in afu_dma_region_add() argument
203 dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n", in afu_dma_region_add()
204 (unsigned long long)region->iova); in afu_dma_region_add()
215 if (dma_region_check_iova(this, region->iova, region->length)) in afu_dma_region_add()
218 if (region->iova < this->iova) in afu_dma_region_add()
220 else if (region->iova > this->iova) in afu_dma_region_add()
226 rb_link_node(&region->node, parent, new); in afu_dma_region_add()
227 rb_insert_color(&region->node, &afu->dma_regions); in afu_dma_region_add()
233 * afu_dma_region_remove - remove given dma region from rbtree
235 * @region: dma region to be removed
240 struct dfl_afu_dma_region *region) in afu_dma_region_remove() argument
244 dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n", in afu_dma_region_remove()
245 (unsigned long long)region->iova); in afu_dma_region_remove()
248 rb_erase(&region->node, &afu->dma_regions); in afu_dma_region_remove()
261 struct dfl_afu_dma_region *region; in afu_dma_region_destroy() local
264 region = container_of(node, struct dfl_afu_dma_region, node); in afu_dma_region_destroy()
266 dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n", in afu_dma_region_destroy()
267 (unsigned long long)region->iova); in afu_dma_region_destroy()
271 if (region->iova) in afu_dma_region_destroy()
273 region->iova, region->length, in afu_dma_region_destroy()
276 if (region->pages) in afu_dma_region_destroy()
277 afu_dma_unpin_pages(pdata, region); in afu_dma_region_destroy()
280 kfree(region); in afu_dma_region_destroy()
285 * afu_dma_region_find - find the dma region from rbtree based on iova and size
290 * It finds the dma region from the rbtree based on @iova and @size:
291 * - if @size == 0, it finds the dma region which starts from @iova
292 * - otherwise, it finds the dma region which fully contains
306 struct dfl_afu_dma_region *region; in afu_dma_region_find() local
308 region = container_of(node, struct dfl_afu_dma_region, node); in afu_dma_region_find()
310 if (dma_region_check_iova(region, iova, size)) { in afu_dma_region_find()
311 dev_dbg(dev, "find region (iova = %llx)\n", in afu_dma_region_find()
312 (unsigned long long)region->iova); in afu_dma_region_find()
313 return region; in afu_dma_region_find()
316 if (iova < region->iova) in afu_dma_region_find()
318 else if (iova > region->iova) in afu_dma_region_find()
321 /* the iova region is not fully covered. */ in afu_dma_region_find()
325 dev_dbg(dev, "region with iova %llx and size %llx is not found\n", in afu_dma_region_find()
332 * afu_dma_region_find_iova - find the dma region from rbtree by iova
334 * @iova: address of the dma region
345 * afu_dma_map_region - map memory region for dma
347 * @user_addr: address of the memory region
348 * @length: size of the memory region
351 * Map memory region defined by @user_addr and @length, and return dma address
352 * of the memory region via @iova.
358 struct dfl_afu_dma_region *region; in afu_dma_map_region() local
362 * Check Inputs, only accept page-aligned user memory region with in afu_dma_map_region()
376 region = kzalloc(sizeof(*region), GFP_KERNEL); in afu_dma_map_region()
377 if (!region) in afu_dma_map_region()
380 region->user_addr = user_addr; in afu_dma_map_region()
381 region->length = length; in afu_dma_map_region()
383 /* Pin the user memory region */ in afu_dma_map_region()
384 ret = afu_dma_pin_pages(pdata, region); in afu_dma_map_region()
386 dev_err(&pdata->dev->dev, "failed to pin memory region\n"); in afu_dma_map_region()
391 if (!afu_dma_check_continuous_pages(region)) { in afu_dma_map_region()
398 region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata), in afu_dma_map_region()
399 region->pages[0], 0, in afu_dma_map_region()
400 region->length, in afu_dma_map_region()
402 if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) { in afu_dma_map_region()
408 *iova = region->iova; in afu_dma_map_region()
411 ret = afu_dma_region_add(pdata, region); in afu_dma_map_region()
414 dev_err(&pdata->dev->dev, "failed to add dma region\n"); in afu_dma_map_region()
422 region->iova, region->length, DMA_BIDIRECTIONAL); in afu_dma_map_region()
424 afu_dma_unpin_pages(pdata, region); in afu_dma_map_region()
426 kfree(region); in afu_dma_map_region()
431 * afu_dma_unmap_region - unmap dma memory region
433 * @iova: dma address of the region
435 * Unmap dma memory region based on @iova.
440 struct dfl_afu_dma_region *region; in afu_dma_unmap_region() local
443 region = afu_dma_region_find_iova(pdata, iova); in afu_dma_unmap_region()
444 if (!region) { in afu_dma_unmap_region()
449 if (region->in_use) { in afu_dma_unmap_region()
454 afu_dma_region_remove(pdata, region); in afu_dma_unmap_region()
458 region->iova, region->length, DMA_BIDIRECTIONAL); in afu_dma_unmap_region()
459 afu_dma_unpin_pages(pdata, region); in afu_dma_unmap_region()
460 kfree(region); in afu_dma_unmap_region()