/drivers/gpu/drm/ttm/ |
D | ttm_pool.c | 167 unsigned int num_pages = last - first; in ttm_pool_apply_caching() local 169 if (!num_pages) in ttm_pool_apply_caching() 176 return set_pages_array_wc(first, num_pages); in ttm_pool_apply_caching() 178 return set_pages_array_uc(first, num_pages); in ttm_pool_apply_caching() 213 unsigned int num_pages) in ttm_pool_unmap() argument 219 dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT, in ttm_pool_unmap() 226 unsigned int i, num_pages = 1 << pt->order; in ttm_pool_type_give() local 228 for (i = 0; i < num_pages; ++i) { in ttm_pool_type_give() 323 unsigned int num_pages; in ttm_pool_shrink() local 334 num_pages = 1 << pt->order; in ttm_pool_shrink() [all …]
|
D | ttm_tt.c | 113 ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL); in ttm_tt_alloc_page_directory() 122 ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) + in ttm_dma_tt_alloc_page_directory() 127 ttm->dma_address = (void *)(ttm->pages + ttm->num_pages); in ttm_dma_tt_alloc_page_directory() 133 ttm->dma_address = kvcalloc(ttm->num_pages, sizeof(*ttm->dma_address), in ttm_sg_tt_alloc_page_directory() 152 ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages; in ttm_tt_init_fields() 225 for (i = 0; i < ttm->num_pages; ++i) { in ttm_tt_swapin() 265 loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT; in ttm_tt_swapout() 281 for (i = 0; i < ttm->num_pages; ++i) { in ttm_tt_swapout() 301 return ttm->num_pages; in ttm_tt_swapout() 321 atomic_long_add(ttm->num_pages, &ttm_pages_allocated); in ttm_tt_populate() [all …]
|
D | ttm_device.c | 69 unsigned long num_pages, num_dma32; in ttm_global_init() local 87 num_pages = ((u64)si.totalram * si.mem_unit) >> PAGE_SHIFT; in ttm_global_init() 88 num_pages /= 2; in ttm_global_init() 95 ttm_pool_mgr_init(num_pages); in ttm_global_init() 96 ttm_tt_mgr_init(num_pages, num_dma32); in ttm_global_init() 158 uint32_t num_pages; in ttm_device_swapout() local 163 num_pages = PFN_UP(bo->base.size); in ttm_device_swapout() 167 return num_pages; in ttm_device_swapout()
|
D | ttm_bo_util.c | 85 u32 num_pages, in ttm_move_memcpy() argument 100 for (i = 0; i < num_pages; ++i) { in ttm_move_memcpy() 112 for (i = 0; i < num_pages; ++i) { in ttm_move_memcpy() 337 unsigned long num_pages, in ttm_bo_kmap_ttm() argument 357 if (num_pages == 1 && ttm->caching == ttm_cached && in ttm_bo_kmap_ttm() 374 map->virtual = vmap(ttm->pages + start_page, num_pages, in ttm_bo_kmap_ttm() 397 unsigned long start_page, unsigned long num_pages, in ttm_bo_kmap() argument 405 if (num_pages > PFN_UP(bo->resource->size)) in ttm_bo_kmap() 407 if ((start_page + num_pages) > PFN_UP(bo->resource->size)) in ttm_bo_kmap() 414 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); in ttm_bo_kmap() [all …]
|
D | ttm_range_manager.c | 122 u32 num_pages = PFN_UP(size); in ttm_range_man_intersects() local 125 if (place->fpfn >= (node->start + num_pages) || in ttm_range_man_intersects() 138 u32 num_pages = PFN_UP(size); in ttm_range_man_compatible() local 141 (place->lpfn && (node->start + num_pages) > place->lpfn)) in ttm_range_man_compatible()
|
/drivers/infiniband/hw/qib/ |
D | qib_user_pages.c | 40 static void __qib_release_user_pages(struct page **p, size_t num_pages, in __qib_release_user_pages() argument 43 unpin_user_pages_dirty_lock(p, num_pages, dirty); in __qib_release_user_pages() 94 int qib_get_user_pages(unsigned long start_page, size_t num_pages, in qib_get_user_pages() argument 102 locked = atomic64_add_return(num_pages, ¤t->mm->pinned_vm); in qib_get_user_pages() 110 for (got = 0; got < num_pages; got += ret) { in qib_get_user_pages() 112 num_pages - got, in qib_get_user_pages() 126 atomic64_sub(num_pages, ¤t->mm->pinned_vm); in qib_get_user_pages() 130 void qib_release_user_pages(struct page **p, size_t num_pages) in qib_release_user_pages() argument 132 __qib_release_user_pages(p, num_pages, 1); in qib_release_user_pages() 136 atomic64_sub(num_pages, ¤t->mm->pinned_vm); in qib_release_user_pages()
|
/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_gmr.c | 39 unsigned long num_pages, in vmw_gmr2_bind() argument 47 uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0); in vmw_gmr2_bind() 48 uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num; in vmw_gmr2_bind() 58 define_cmd.numPages = num_pages; in vmw_gmr2_bind() 73 while (num_pages > 0) { in vmw_gmr2_bind() 74 unsigned long nr = min_t(unsigned long, num_pages, VMW_PPN_PER_REMAP); in vmw_gmr2_bind() 94 num_pages -= nr; in vmw_gmr2_bind() 128 unsigned long num_pages, in vmw_gmr_bind() argument 141 return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id); in vmw_gmr_bind()
|
D | vmwgfx_ttm_buffer.c | 102 return ++(viter->i) < viter->num_pages; in __vmw_piter_non_sg_next() 139 viter->num_pages = vsgt->num_pages; in vmw_piter_start() 216 vsgt->num_pages = vmw_tt->dma_ttm.num_pages; in vmw_ttm_map_dma() 228 vsgt->pages, vsgt->num_pages, 0, in vmw_ttm_map_dma() 229 (unsigned long)vsgt->num_pages << PAGE_SHIFT, in vmw_ttm_map_dma() 328 ttm->num_pages, vmw_be->gmr_id); in vmw_ttm_bind() 333 vmw_mob_create(ttm->num_pages); in vmw_ttm_bind() 339 &vmw_be->vsgt, ttm->num_pages, in vmw_ttm_bind() 405 ttm->num_pages); in vmw_ttm_populate()
|
/drivers/infiniband/sw/siw/ |
D | siw_mem.c | 63 static void siw_free_plist(struct siw_page_chunk *chunk, int num_pages, in siw_free_plist() argument 66 unpin_user_pages_dirty_lock(chunk->plist, num_pages, dirty); in siw_free_plist() 72 int i, num_pages = umem->num_pages; in siw_umem_release() local 74 for (i = 0; num_pages; i++) { in siw_umem_release() 75 int to_free = min_t(int, PAGES_PER_CHUNK, num_pages); in siw_umem_release() 80 num_pages -= to_free; in siw_umem_release() 82 atomic64_sub(umem->num_pages, &mm_s->pinned_vm); in siw_umem_release() 372 int num_pages, num_chunks, i, rv = 0; in siw_umem_get() local 381 num_pages = PAGE_ALIGN(start + len - first_page_va) >> PAGE_SHIFT; in siw_umem_get() 382 num_chunks = (num_pages >> CHUNK_SHIFT) + 1; in siw_umem_get() [all …]
|
/drivers/xen/ |
D | xen-front-pgdir-shbuf.c | 160 return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE); in get_num_pages_dir() 187 buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages; in guest_calc_num_grefs() 208 unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops), in backend_unmap() 213 for (i = 0; i < buf->num_pages; i++) { in backend_unmap() 222 buf->num_pages); in backend_unmap() 224 for (i = 0; i < buf->num_pages; i++) { in backend_unmap() 253 map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL); in backend_map() 257 buf->backend_map_handles = kcalloc(buf->num_pages, in backend_map() 271 grefs_left = buf->num_pages; in backend_map() 295 ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages); in backend_map() [all …]
|
/drivers/gpu/drm/xen/ |
D | xen_drm_front_gem.c | 28 size_t num_pages; member 47 xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE); in gem_alloc_pages_array() 48 xen_obj->pages = kvmalloc_array(xen_obj->num_pages, in gem_alloc_pages_array() 92 ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); in xen_drm_front_gem_object_mmap() 158 ret = xen_alloc_unpopulated_pages(xen_obj->num_pages, in gem_create() 162 xen_obj->num_pages, ret); in gem_create() 174 xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE); in gem_create() 211 xen_free_unpopulated_pages(xen_obj->num_pages, in xen_drm_front_gem_free_object_unlocked() 239 xen_obj->pages, xen_obj->num_pages); in xen_drm_front_gem_get_sg_table() 264 xen_obj->num_pages); in xen_drm_front_gem_import_sg_table() [all …]
|
/drivers/media/common/videobuf2/ |
D | videobuf2-dma-sg.c | 49 unsigned int num_pages; member 107 int num_pages; in vb2_dma_sg_alloc() local 121 buf->num_pages = size >> PAGE_SHIFT; in vb2_dma_sg_alloc() 129 buf->pages = kvcalloc(buf->num_pages, sizeof(struct page *), GFP_KERNEL); in vb2_dma_sg_alloc() 138 buf->num_pages, 0, size, GFP_KERNEL); in vb2_dma_sg_alloc() 162 __func__, buf->num_pages); in vb2_dma_sg_alloc() 169 num_pages = buf->num_pages; in vb2_dma_sg_alloc() 170 while (num_pages--) in vb2_dma_sg_alloc() 171 __free_page(buf->pages[num_pages]); in vb2_dma_sg_alloc() 183 int i = buf->num_pages; in vb2_dma_sg_put() [all …]
|
/drivers/tee/ |
D | tee_shm.c | 48 unpin_user_pages(shm->pages, shm->num_pages); in release_registered_pages() 50 shm_put_kernel_pages(shm->pages, shm->num_pages); in release_registered_pages() 223 size_t num_pages; in register_shm_helper() local 252 num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE; in register_shm_helper() 253 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL); in register_shm_helper() 260 rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE, in register_shm_helper() 263 rc = shm_get_kernel_pages(start, num_pages, shm->pages); in register_shm_helper() 265 shm->num_pages = rc; in register_shm_helper() 266 if (rc != num_pages) { in register_shm_helper() 274 shm->num_pages, start); in register_shm_helper() [all …]
|
/drivers/gpu/drm/i915/gem/ |
D | i915_gem_userptr.c | 122 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; in i915_gem_object_userptr_drop_ref() local 124 unpin_user_pages(pvec, num_pages); in i915_gem_object_userptr_drop_ref() 134 unsigned int num_pages; /* limited by sg_alloc_table_from_pages_segment */ in i915_gem_userptr_get_pages() local 137 if (overflows_type(obj->base.size >> PAGE_SHIFT, num_pages)) in i915_gem_userptr_get_pages() 140 num_pages = obj->base.size >> PAGE_SHIFT; in i915_gem_userptr_get_pages() 154 ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0, in i915_gem_userptr_get_pages() 155 num_pages << PAGE_SHIFT, in i915_gem_userptr_get_pages() 265 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; in i915_gem_object_userptr_submit_init() local 290 pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL); in i915_gem_object_userptr_submit_init() 298 while (pinned < num_pages) { in i915_gem_object_userptr_submit_init() [all …]
|
/drivers/gpu/drm/gma500/ |
D | mmu.c | 478 uint32_t num_pages, uint32_t desired_tile_stride, in psb_mmu_flush_ptes() argument 496 rows = num_pages / desired_tile_stride; in psb_mmu_flush_ptes() 498 desired_tile_stride = num_pages; in psb_mmu_flush_ptes() 526 unsigned long address, uint32_t num_pages) in psb_mmu_remove_pfn_sequence() argument 537 end = addr + (num_pages << PAGE_SHIFT); in psb_mmu_remove_pfn_sequence() 554 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); in psb_mmu_remove_pfn_sequence() 565 uint32_t num_pages, uint32_t desired_tile_stride, in psb_mmu_remove_pages() argument 579 rows = num_pages / desired_tile_stride; in psb_mmu_remove_pages() 581 desired_tile_stride = num_pages; in psb_mmu_remove_pages() 611 psb_mmu_flush_ptes(pd, f_address, num_pages, in psb_mmu_remove_pages() [all …]
|
D | mmu.h | 69 uint32_t num_pages); 73 uint32_t num_pages, int type); 78 unsigned long address, uint32_t num_pages, 82 unsigned long address, uint32_t num_pages,
|
/drivers/staging/vc04_services/interface/vchiq_arm/ |
D | vchiq_arm.c | 128 unsigned int num_pages; member 181 pagelistinfo->num_pages, pagelistinfo->dma_dir); in cleanup_pagelistinfo() 185 unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages); in cleanup_pagelistinfo() 221 unsigned int num_pages, offset, i, k; in create_pagelist() local 235 num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE); in create_pagelist() 237 if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) - in create_pagelist() 244 (num_pages * sizeof(u32)) + in create_pagelist() 245 (num_pages * sizeof(pages[0]) + in create_pagelist() 246 (num_pages * sizeof(struct scatterlist))) + in create_pagelist() 261 pages = (struct page **)(addrs + num_pages); in create_pagelist() [all …]
|
/drivers/gpu/drm/radeon/ |
D | radeon_ttm.c | 146 unsigned num_pages; in radeon_move_blit() local 183 num_pages = PFN_UP(new_mem->size) * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); in radeon_move_blit() 184 fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv); in radeon_move_blit() 348 unsigned long end = gtt->userptr + (u64)ttm->num_pages * PAGE_SIZE; in radeon_ttm_tt_pin_userptr() 356 unsigned num_pages = ttm->num_pages - pinned; in radeon_ttm_tt_pin_userptr() local 360 r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0, in radeon_ttm_tt_pin_userptr() 367 } while (pinned < ttm->num_pages); in radeon_ttm_tt_pin_userptr() 369 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, in radeon_ttm_tt_pin_userptr() 370 (u64)ttm->num_pages << PAGE_SHIFT, in radeon_ttm_tt_pin_userptr() 380 ttm->num_pages); in radeon_ttm_tt_pin_userptr() [all …]
|
/drivers/gpu/drm/ |
D | drm_memory.c | 61 unsigned long i, num_pages = in agp_remap() local 88 page_map = vmalloc(array_size(num_pages, sizeof(struct page *))); in agp_remap() 93 for (i = 0; i < num_pages; ++i) in agp_remap() 95 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); in agp_remap()
|
D | drm_cache.c | 67 unsigned long num_pages) in drm_cache_flush_clflush() argument 72 for (i = 0; i < num_pages; i++) in drm_cache_flush_clflush() 87 drm_clflush_pages(struct page *pages[], unsigned long num_pages) in drm_clflush_pages() argument 92 drm_cache_flush_clflush(pages, num_pages); in drm_clflush_pages() 102 for (i = 0; i < num_pages; i++) { in drm_clflush_pages()
|
/drivers/hv/ |
D | hv_balloon.c | 285 __u32 num_pages; member 455 __u32 num_pages; member 1220 int num_pages = range_array->finfo.page_cnt; in free_balloon_pages() local 1225 for (i = 0; i < num_pages; i++) { in free_balloon_pages() 1237 unsigned int num_pages, in alloc_balloon_pages() argument 1244 for (i = 0; i < num_pages / alloc_unit; i++) { in alloc_balloon_pages() 1289 unsigned int num_pages = dm_device.balloon_wrk.num_pages; in balloon_up() local 1309 if (avail_pages < num_pages || avail_pages - num_pages < floor) { in balloon_up() 1311 avail_pages < num_pages ? "Not enough memory." : in balloon_up() 1314 num_pages = avail_pages > floor ? (avail_pages - floor) : 0; in balloon_up() [all …]
|
/drivers/firmware/efi/ |
D | memattr.c | 59 u64 in_size = in->num_pages << EFI_PAGE_SHIFT; in entry_is_valid() 72 !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) { in entry_is_valid() 86 u64 md_size = md->num_pages << EFI_PAGE_SHIFT; in entry_is_valid() 169 size = md.num_pages << EFI_PAGE_SHIFT; in efi_memattr_apply_permissions()
|
/drivers/gpu/drm/i915/gt/ |
D | selftest_reset.c | 24 resource_size_t num_pages, page; in __igt_reset_stolen() local 37 num_pages = resource_size(dsm) >> PAGE_SHIFT; in __igt_reset_stolen() 38 if (!num_pages) in __igt_reset_stolen() 41 crc = kmalloc_array(num_pages, sizeof(u32), GFP_KERNEL); in __igt_reset_stolen() 82 for (page = 0; page < num_pages; page++) { in __igt_reset_stolen() 124 for (page = 0; page < num_pages; page++) { in __igt_reset_stolen()
|
/drivers/virtio/ |
D | virtio_balloon.c | 94 unsigned int num_pages; member 242 vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE; in fill_balloon() 284 num = min(num, (size_t)vb->num_pages); in leak_balloon() 292 vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE; in leak_balloon() 396 u32 num_pages; in towards_target() local 399 virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages, in towards_target() 400 &num_pages); in towards_target() 406 target = ALIGN(num_pages, VIRTIO_BALLOON_PAGES_PER_PAGE); in towards_target() 407 return target - vb->num_pages; in towards_target() 460 u32 actual = vb->num_pages; in update_balloon_size() [all …]
|
/drivers/scsi/bfa/ |
D | bfa_fcbuild.c | 647 int num_pages = 0; in fc_logout_params_pages() local 653 num_pages = (be16_to_cpu(prlo->payload_len) - 4) / 16; in fc_logout_params_pages() 656 num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16; in fc_logout_params_pages() 658 return num_pages; in fc_logout_params_pages() 663 u32 d_id, u32 s_id, __be16 ox_id, int num_pages) in fc_tprlo_acc_build() argument 669 memset(tprlo_acc, 0, (num_pages * 16) + 4); in fc_tprlo_acc_build() 673 tprlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4); in fc_tprlo_acc_build() 675 for (page = 0; page < num_pages; page++) { in fc_tprlo_acc_build() 687 u32 s_id, __be16 ox_id, int num_pages) in fc_prlo_acc_build() argument 693 memset(prlo_acc, 0, (num_pages * 16) + 4); in fc_prlo_acc_build() [all …]
|