Home
last modified time | relevance | path

Searched refs:num_pages (Results 1 – 25 of 172) sorted by relevance

1234567

/drivers/staging/vc04_services/interface/vchiq_arm/
Dvchiq_2835_arm.c42 unsigned int num_pages; member
288 pagelistinfo->num_pages, pagelistinfo->dma_dir); in cleanup_pagelistinfo()
292 unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages); in cleanup_pagelistinfo()
314 unsigned int num_pages, offset, i, k; in create_pagelist() local
328 num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE); in create_pagelist()
330 if (num_pages > (SIZE_MAX - sizeof(struct pagelist) - in create_pagelist()
337 (num_pages * sizeof(u32)) + in create_pagelist()
338 (num_pages * sizeof(pages[0]) + in create_pagelist()
339 (num_pages * sizeof(struct scatterlist))) + in create_pagelist()
354 pages = (struct page **)(addrs + num_pages); in create_pagelist()
[all …]
/drivers/staging/gasket/
Dgasket_page_table.c468 uint num_pages, int is_simple_mapping) in gasket_perform_mapping() argument
477 for (i = 0; i < num_pages; i++) { in gasket_perform_mapping()
576 ulong dev_addr, uint num_pages) in gasket_alloc_simple_entries() argument
580 num_pages)) in gasket_alloc_simple_entries()
592 u64 __iomem *slots, uint num_pages, in gasket_perform_unmapping() argument
600 for (i = 0; i < num_pages; i++) { in gasket_perform_unmapping()
630 ulong dev_addr, uint num_pages) in gasket_unmap_simple_pages() argument
635 pg_tbl->base_slot + slot, num_pages, 1); in gasket_unmap_simple_pages()
643 ulong dev_addr, uint num_pages) in gasket_unmap_extended_pages() argument
649 remain = num_pages; in gasket_unmap_extended_pages()
[all …]
/drivers/infiniband/hw/qib/
Dqib_user_pages.c40 static void __qib_release_user_pages(struct page **p, size_t num_pages, in __qib_release_user_pages() argument
43 unpin_user_pages_dirty_lock(p, num_pages, dirty); in __qib_release_user_pages()
94 int qib_get_user_pages(unsigned long start_page, size_t num_pages, in qib_get_user_pages() argument
102 locked = atomic64_add_return(num_pages, &current->mm->pinned_vm); in qib_get_user_pages()
110 for (got = 0; got < num_pages; got += ret) { in qib_get_user_pages()
112 num_pages - got, in qib_get_user_pages()
126 atomic64_sub(num_pages, &current->mm->pinned_vm); in qib_get_user_pages()
130 void qib_release_user_pages(struct page **p, size_t num_pages) in qib_release_user_pages() argument
132 __qib_release_user_pages(p, num_pages, 1); in qib_release_user_pages()
136 atomic64_sub(num_pages, &current->mm->pinned_vm); in qib_release_user_pages()
/drivers/gpu/drm/vmwgfx/
Dvmwgfx_gmr.c40 unsigned long num_pages, in vmw_gmr2_bind() argument
48 uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0); in vmw_gmr2_bind()
49 uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num; in vmw_gmr2_bind()
59 define_cmd.numPages = num_pages; in vmw_gmr2_bind()
74 while (num_pages > 0) { in vmw_gmr2_bind()
75 unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP); in vmw_gmr2_bind()
95 num_pages -= nr; in vmw_gmr2_bind()
129 unsigned long num_pages, in vmw_gmr_bind() argument
142 return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id); in vmw_gmr_bind()
Dvmwgfx_ttm_buffer.c286 return ++(viter->i) < viter->num_pages; in __vmw_piter_non_sg_next()
350 viter->num_pages = vsgt->num_pages; in vmw_piter_start()
442 vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages; in vmw_ttm_map_dma()
453 vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages; in vmw_ttm_map_dma()
459 vsgt->num_pages, 0, in vmw_ttm_map_dma()
460 (unsigned long) vsgt->num_pages << PAGE_SHIFT, in vmw_ttm_map_dma()
468 if (vsgt->num_pages > vmw_tt->sgt.orig_nents) { in vmw_ttm_map_dma()
470 sgl_size * (vsgt->num_pages - in vmw_ttm_map_dma()
581 ttm->num_pages, vmw_be->gmr_id); in vmw_ttm_bind()
586 vmw_mob_create(ttm->num_pages); in vmw_ttm_bind()
[all …]
Dvmwgfx_gmrid_manager.c68 gman->used_gmr_pages += bo->num_pages; in vmw_gmrid_man_get_node()
75 mem->num_pages = bo->num_pages; in vmw_gmrid_man_get_node()
81 gman->used_gmr_pages -= bo->num_pages; in vmw_gmrid_man_get_node()
95 gman->used_gmr_pages -= mem->num_pages; in vmw_gmrid_man_put_node()
Dvmwgfx_page_dirty.c235 pgoff_t num_pages = vbo->base.num_pages; in vmw_bo_dirty_add() local
248 size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long); in vmw_bo_dirty_add()
263 dirty->bitmap_size = num_pages; in vmw_bo_dirty_add()
267 if (num_pages < PAGE_SIZE / sizeof(pte_t)) { in vmw_bo_dirty_add()
276 wp_shared_mapping_range(mapping, offset, num_pages); in vmw_bo_dirty_add()
277 clean_record_shared_mapping_range(mapping, offset, num_pages, in vmw_bo_dirty_add()
416 if (unlikely(page_offset >= bo->num_pages)) { in vmw_bo_vm_mkwrite()
459 if (page_offset >= bo->num_pages || in vmw_bo_vm_fault()
534 if (page_offset >= bo->num_pages || in vmw_bo_vm_huge_fault()
/drivers/xen/
Dxen-front-pgdir-shbuf.c169 return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE); in get_num_pages_dir()
196 buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages; in guest_calc_num_grefs()
217 unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops), in backend_unmap()
222 for (i = 0; i < buf->num_pages; i++) { in backend_unmap()
231 buf->num_pages); in backend_unmap()
233 for (i = 0; i < buf->num_pages; i++) { in backend_unmap()
262 map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL); in backend_map()
266 buf->backend_map_handles = kcalloc(buf->num_pages, in backend_map()
280 grefs_left = buf->num_pages; in backend_map()
304 ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages); in backend_map()
[all …]
/drivers/infiniband/sw/siw/
Dsiw_mem.c63 static void siw_free_plist(struct siw_page_chunk *chunk, int num_pages, in siw_free_plist() argument
66 unpin_user_pages_dirty_lock(chunk->plist, num_pages, dirty); in siw_free_plist()
72 int i, num_pages = umem->num_pages; in siw_umem_release() local
74 for (i = 0; num_pages; i++) { in siw_umem_release()
75 int to_free = min_t(int, PAGES_PER_CHUNK, num_pages); in siw_umem_release()
80 num_pages -= to_free; in siw_umem_release()
82 atomic64_sub(umem->num_pages, &mm_s->pinned_vm); in siw_umem_release()
372 int num_pages, num_chunks, i, rv = 0; in siw_umem_get() local
381 num_pages = PAGE_ALIGN(start + len - first_page_va) >> PAGE_SHIFT; in siw_umem_get()
382 num_chunks = (num_pages >> CHUNK_SHIFT) + 1; in siw_umem_get()
[all …]
/drivers/gpu/drm/xen/
Dxen_drm_front_gem.c29 size_t num_pages; member
48 xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE); in gem_alloc_pages_array()
49 xen_obj->pages = kvmalloc_array(xen_obj->num_pages, in gem_alloc_pages_array()
103 ret = xen_alloc_unpopulated_pages(xen_obj->num_pages, in gem_create()
107 xen_obj->num_pages, ret); in gem_create()
119 xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE); in gem_create()
156 xen_free_unpopulated_pages(xen_obj->num_pages, in xen_drm_front_gem_free_object_unlocked()
184 xen_obj->pages, xen_obj->num_pages); in xen_drm_front_gem_get_sg_table()
209 NULL, xen_obj->num_pages); in xen_drm_front_gem_import_sg_table()
256 ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); in gem_mmap_obj()
[all …]
/drivers/media/common/videobuf2/
Dvideobuf2-dma-sg.c49 unsigned int num_pages; member
106 int num_pages; in vb2_dma_sg_alloc() local
120 buf->num_pages = size >> PAGE_SHIFT; in vb2_dma_sg_alloc()
128 buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *), in vb2_dma_sg_alloc()
138 buf->num_pages, 0, size, GFP_KERNEL); in vb2_dma_sg_alloc()
161 __func__, buf->num_pages); in vb2_dma_sg_alloc()
168 num_pages = buf->num_pages; in vb2_dma_sg_alloc()
169 while (num_pages--) in vb2_dma_sg_alloc()
170 __free_page(buf->pages[num_pages]); in vb2_dma_sg_alloc()
182 int i = buf->num_pages; in vb2_dma_sg_put()
[all …]
/drivers/gpu/drm/ttm/
Dttm_tt.c89 ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*), in ttm_tt_alloc_page_directory()
98 ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages, in ttm_dma_tt_alloc_page_directory()
104 ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); in ttm_dma_tt_alloc_page_directory()
110 ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages, in ttm_sg_tt_alloc_page_directory()
166 drm_clflush_pages(ttm->pages, ttm->num_pages); in ttm_tt_set_caching()
168 for (i = 0; i < ttm->num_pages; ++i) { in ttm_tt_set_caching()
231 ttm->num_pages = bo->num_pages; in ttm_tt_init_fields()
323 for (i = 0; i < ttm->num_pages; ++i) { in ttm_tt_swapin()
365 ttm->num_pages << PAGE_SHIFT, in ttm_tt_swapout()
377 for (i = 0; i < ttm->num_pages; ++i) { in ttm_tt_swapout()
[all …]
Dttm_bo_util.c136 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; in ttm_resource_ioremap()
257 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); in ttm_bo_move_memcpy()
276 add = new_mem->num_pages - 1; in ttm_bo_move_memcpy()
279 for (i = 0; i < new_mem->num_pages; ++i) { in ttm_bo_move_memcpy()
437 unsigned long num_pages, in ttm_bo_kmap_ttm() argument
455 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { in ttm_bo_kmap_ttm()
471 map->virtual = vmap(ttm->pages + start_page, num_pages, in ttm_bo_kmap_ttm()
478 unsigned long start_page, unsigned long num_pages, in ttm_bo_kmap() argument
486 if (num_pages > bo->num_pages) in ttm_bo_kmap()
488 if (start_page > bo->num_pages) in ttm_bo_kmap()
[all …]
Dttm_page_alloc.c1045 ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, in ttm_pool_unpopulate_helper()
1059 if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx)) in ttm_pool_populate()
1062 ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags, in ttm_pool_populate()
1069 for (i = 0; i < ttm->num_pages; ++i) { in ttm_pool_populate()
1093 ttm_pool_unpopulate_helper(ttm, ttm->num_pages); in ttm_pool_unpopulate()
1107 for (i = 0; i < tt->ttm.num_pages; ++i) { in ttm_populate_and_map_pages()
1109 size_t num_pages = 1; in ttm_populate_and_map_pages() local
1111 for (j = i + 1; j < tt->ttm.num_pages; ++j) { in ttm_populate_and_map_pages()
1115 ++num_pages; in ttm_populate_and_map_pages()
1119 0, num_pages * PAGE_SIZE, in ttm_populate_and_map_pages()
[all …]
/drivers/tee/
Dtee_shm.c20 unpin_user_pages(shm->pages, shm->num_pages); in release_registered_pages()
24 for (n = 0; n < shm->num_pages; n++) in release_registered_pages()
158 int num_pages; in tee_shm_register() local
189 num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE; in tee_shm_register()
190 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL); in tee_shm_register()
197 rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE, in tee_shm_register()
203 kiov = kcalloc(num_pages, sizeof(*kiov), GFP_KERNEL); in tee_shm_register()
209 for (i = 0; i < num_pages; i++) { in tee_shm_register()
214 rc = get_kernel_pages(kiov, num_pages, 0, shm->pages); in tee_shm_register()
218 shm->num_pages = rc; in tee_shm_register()
[all …]
/drivers/gpu/drm/
Ddrm_cache.c61 unsigned long num_pages) in drm_cache_flush_clflush() argument
66 for (i = 0; i < num_pages; i++) in drm_cache_flush_clflush()
81 drm_clflush_pages(struct page *pages[], unsigned long num_pages) in drm_clflush_pages() argument
86 drm_cache_flush_clflush(pages, num_pages); in drm_clflush_pages()
96 for (i = 0; i < num_pages; i++) { in drm_clflush_pages()
Ddrm_memory.c63 unsigned long i, num_pages = in agp_remap() local
90 page_map = vmalloc(array_size(num_pages, sizeof(struct page *))); in agp_remap()
95 for (i = 0; i < num_pages; ++i) in agp_remap()
97 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); in agp_remap()
/drivers/gpu/drm/gma500/
Dmmu.c500 uint32_t num_pages, uint32_t desired_tile_stride, in psb_mmu_flush_ptes() argument
518 rows = num_pages / desired_tile_stride; in psb_mmu_flush_ptes()
520 desired_tile_stride = num_pages; in psb_mmu_flush_ptes()
548 uint32_t num_pages, uint32_t desired_tile_stride, in psb_mmu_flush_ptes() argument
556 unsigned long address, uint32_t num_pages) in psb_mmu_remove_pfn_sequence() argument
567 end = addr + (num_pages << PAGE_SHIFT); in psb_mmu_remove_pfn_sequence()
584 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); in psb_mmu_remove_pfn_sequence()
595 uint32_t num_pages, uint32_t desired_tile_stride, in psb_mmu_remove_pages() argument
609 rows = num_pages / desired_tile_stride; in psb_mmu_remove_pages()
611 desired_tile_stride = num_pages; in psb_mmu_remove_pages()
[all …]
Dmmu.h69 uint32_t num_pages);
73 uint32_t num_pages, int type);
78 unsigned long address, uint32_t num_pages,
82 unsigned long address, uint32_t num_pages,
/drivers/firmware/efi/
Dmemmap.c257 end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1; in efi_memmap_split_count()
322 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1; in efi_memmap_insert()
331 md->num_pages = (m_end - md->phys_addr + 1) >> in efi_memmap_insert()
338 md->num_pages = (end - md->phys_addr + 1) >> in efi_memmap_insert()
344 md->num_pages = (m_start - md->phys_addr) >> in efi_memmap_insert()
352 md->num_pages = (m_end - m_start + 1) >> in efi_memmap_insert()
359 md->num_pages = (end - m_end) >> in efi_memmap_insert()
366 md->num_pages = (m_start - md->phys_addr) >> in efi_memmap_insert()
373 md->num_pages = (end - md->phys_addr + 1) >> in efi_memmap_insert()
Dmemattr.c59 u64 in_size = in->num_pages << EFI_PAGE_SHIFT; in entry_is_valid()
72 !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) { in entry_is_valid()
86 u64 md_size = md->num_pages << EFI_PAGE_SHIFT; in entry_is_valid()
164 size = md.num_pages << EFI_PAGE_SHIFT; in efi_memattr_apply_permissions()
/drivers/gpu/drm/amd/amdgpu/
Damdgpu_ttm.c241 unsigned num_pages, uint64_t offset, in amdgpu_ttm_map_buffer() argument
270 num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE; in amdgpu_ttm_map_buffer()
300 r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags, in amdgpu_ttm_map_buffer()
310 for (i = 0; i < num_pages; ++i) { in amdgpu_ttm_map_buffer()
480 new_mem->num_pages << PAGE_SHIFT, in amdgpu_move_blit()
642 if (nodes->size != mem->num_pages) in amdgpu_mem_visible()
733 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); in amdgpu_bo_move()
746 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; in amdgpu_ttm_io_mem_reserve()
764 (mm_node->size == mem->num_pages)) in amdgpu_ttm_io_mem_reserve()
871 range->hmm_pfns = kvmalloc_array(ttm->num_pages, in amdgpu_ttm_tt_get_user_pages()
[all …]
/drivers/gpu/drm/radeon/
Dradeon_ttm.c161 unsigned num_pages; in radeon_move_blit() local
198 num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); in radeon_move_blit()
199 fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv); in radeon_move_blit()
357 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved); in radeon_bo_move()
364 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; in radeon_ttm_io_mem_reserve()
448 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; in radeon_ttm_tt_pin_userptr()
456 unsigned num_pages = ttm->num_pages - pinned; in radeon_ttm_tt_pin_userptr() local
460 r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0, in radeon_ttm_tt_pin_userptr()
467 } while (pinned < ttm->num_pages); in radeon_ttm_tt_pin_userptr()
469 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, in radeon_ttm_tt_pin_userptr()
[all …]
/drivers/tee/optee/
Dcall.c494 void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages, in optee_fill_pages_list() argument
539 if (!--num_pages) in optee_fill_pages_list()
591 static int check_mem_type(unsigned long start, size_t num_pages) in check_mem_type() argument
605 start + num_pages * PAGE_SIZE); in check_mem_type()
612 struct page **pages, size_t num_pages, in optee_shm_register() argument
621 if (!num_pages) in optee_shm_register()
624 rc = check_mem_type(start, num_pages); in optee_shm_register()
628 pages_list = optee_allocate_pages_list(num_pages); in optee_shm_register()
638 optee_fill_pages_list(pages_list, pages, num_pages, in optee_shm_register()
659 optee_free_pages_list(pages_list, num_pages); in optee_shm_register()
[all …]
/drivers/hv/
Dhv_balloon.c281 __u32 num_pages; member
451 __u32 num_pages; member
1191 int num_pages = range_array->finfo.page_cnt; in free_balloon_pages() local
1196 for (i = 0; i < num_pages; i++) { in free_balloon_pages()
1207 unsigned int num_pages, in alloc_balloon_pages() argument
1214 for (i = 0; i < num_pages / alloc_unit; i++) { in alloc_balloon_pages()
1257 unsigned int num_pages = dm_device.balloon_wrk.num_pages; in balloon_up() local
1277 if (avail_pages < num_pages || avail_pages - num_pages < floor) { in balloon_up()
1279 avail_pages < num_pages ? "Not enough memory." : in balloon_up()
1282 num_pages = avail_pages > floor ? (avail_pages - floor) : 0; in balloon_up()
[all …]

1234567