Home
last modified time | relevance | path

Searched refs:npages (Results 1 – 25 of 133) sorted by relevance

123456

/drivers/gpu/drm/i915/selftests/
Dscatterlist.c53 unsigned int npages = npages_fn(n, pt->st.nents, rnd); in expect_pfn_sg() local
61 if (sg->length != npages * PAGE_SIZE) { in expect_pfn_sg()
63 __func__, who, npages * PAGE_SIZE, sg->length); in expect_pfn_sg()
70 pfn += npages; in expect_pfn_sg()
209 unsigned long npages) in page_contiguous() argument
211 return first + npages == last; in page_contiguous()
238 unsigned long npages = npages_fn(n, count, rnd); in alloc_table() local
242 pfn_to_page(pfn + npages), in alloc_table()
243 npages)) { in alloc_table()
250 sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZE, 0); in alloc_table()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/
Dpagealloc.c52 s32 npages; member
180 s32 *npages, int boot) in mlx5_cmd_query_pages() argument
196 *npages = MLX5_GET(query_pages_out, out, num_pages); in mlx5_cmd_query_pages()
334 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, in give_pages() argument
345 inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]); in give_pages()
353 for (i = 0; i < npages; i++) { in give_pages()
370 MLX5_SET(manage_pages_in, in, input_num_entries, npages); in give_pages()
376 func_id, npages, err); in give_pages()
380 dev->priv.fw_pages += npages; in give_pages()
382 dev->priv.vfs_pages += npages; in give_pages()
[all …]
Dalloc.c80 buf->npages = 1; in mlx5_buf_alloc_node()
96 buf->npages *= 2; in mlx5_buf_alloc_node()
127 buf->npages = DIV_ROUND_UP(size, PAGE_SIZE); in mlx5_frag_buf_alloc_node()
129 buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list), in mlx5_frag_buf_alloc_node()
134 for (i = 0; i < buf->npages; i++) { in mlx5_frag_buf_alloc_node()
169 for (i = 0; i < buf->npages; i++) { in mlx5_frag_buf_free()
294 for (i = 0; i < buf->npages; i++) { in mlx5_fill_page_array()
307 for (i = 0; i < buf->npages; i++) in mlx5_fill_page_frag_array_perm()
/drivers/infiniband/hw/hfi1/
Duser_pages.c30 u32 nlocked, u32 npages) in hfi1_can_pin_pages() argument
47 if (atomic64_read(&mm->pinned_vm) + npages > ulimit_pages) in hfi1_can_pin_pages()
66 if (nlocked + npages > (ulimit_pages / usr_ctxts / 4)) in hfi1_can_pin_pages()
74 if (nlocked + npages > cache_limit_pages) in hfi1_can_pin_pages()
80 int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages, in hfi1_acquire_user_pages() argument
86 ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages); in hfi1_acquire_user_pages()
96 size_t npages, bool dirty) in hfi1_release_user_pages() argument
98 unpin_user_pages_dirty_lock(p, npages, dirty); in hfi1_release_user_pages()
101 atomic64_sub(npages, &mm->pinned_vm); in hfi1_release_user_pages()
Duser_exp_rcv.c16 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages);
20 u16 pageidx, unsigned int npages);
137 unsigned int npages, in unpin_rcv_pages() argument
146 node->npages * PAGE_SIZE, DMA_FROM_DEVICE); in unpin_rcv_pages()
153 hfi1_release_user_pages(mm, pages, npages, mapped); in unpin_rcv_pages()
154 fd->tid_n_pinned -= npages; in unpin_rcv_pages()
163 unsigned int npages = tidbuf->npages; in pin_rcv_pages() local
168 if (npages > fd->uctxt->expected_count) { in pin_rcv_pages()
174 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); in pin_rcv_pages()
183 if (!hfi1_can_pin_pages(dd, current->mm, fd->tid_n_pinned, npages)) { in pin_rcv_pages()
[all …]
/drivers/gpu/drm/i915/gem/selftests/
Dmock_dmabuf.c21 err = sg_alloc_table(st, mock->npages, GFP_KERNEL); in mock_map_dma_buf()
26 for (i = 0; i < mock->npages; i++) { in mock_map_dma_buf()
58 for (i = 0; i < mock->npages; i++) in mock_dmabuf_release()
69 vaddr = vm_map_ram(mock->pages, mock->npages, 0); in mock_dmabuf_vmap()
81 vm_unmap_ram(map->vaddr, mock->npages); in mock_dmabuf_vunmap()
98 static struct dma_buf *mock_dmabuf(int npages) in mock_dmabuf() argument
105 mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *), in mock_dmabuf()
110 mock->npages = npages; in mock_dmabuf()
111 for (i = 0; i < npages; i++) { in mock_dmabuf()
118 exp_info.size = npages * PAGE_SIZE; in mock_dmabuf()
/drivers/gpu/drm/amd/amdkfd/
Dkfd_migrate.c44 svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, in svm_migrate_gart_map() argument
60 num_bytes = npages * 8; in svm_migrate_gart_map()
85 r = amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr); in svm_migrate_gart_map()
128 uint64_t *vram, uint64_t npages, in svm_migrate_copy_memory_gart() argument
141 while (npages) { in svm_migrate_copy_memory_gart()
142 size = min(GTT_MAX_PAGES, npages); in svm_migrate_copy_memory_gart()
167 npages -= size; in svm_migrate_copy_memory_gart()
168 if (npages) { in svm_migrate_copy_memory_gart()
272 uint64_t npages = migrate->cpages; in svm_migrate_copy_to_vram() local
284 dst = (uint64_t *)(scratch + npages); in svm_migrate_copy_to_vram()
[all …]
/drivers/infiniband/hw/mthca/
Dmthca_memfree.c69 dma_unmap_sg(&dev->pdev->dev, chunk->mem, chunk->npages, in mthca_free_icm_pages()
72 for (i = 0; i < chunk->npages; ++i) in mthca_free_icm_pages()
81 for (i = 0; i < chunk->npages; ++i) { in mthca_free_icm_coherent()
137 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, in mthca_alloc_icm() argument
157 while (npages > 0) { in mthca_alloc_icm()
165 chunk->npages = 0; in mthca_alloc_icm()
170 while (1 << cur_order > npages) in mthca_alloc_icm()
175 &chunk->mem[chunk->npages], in mthca_alloc_icm()
178 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages], in mthca_alloc_icm()
182 ++chunk->npages; in mthca_alloc_icm()
[all …]
Dmthca_allocator.c200 int npages, shift; in mthca_buf_alloc() local
207 npages = 1; in mthca_buf_alloc()
219 npages *= 2; in mthca_buf_alloc()
222 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc()
227 for (i = 0; i < npages; ++i) in mthca_buf_alloc()
231 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_buf_alloc()
234 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc()
239 buf->page_list = kmalloc_array(npages, in mthca_buf_alloc()
245 for (i = 0; i < npages; ++i) in mthca_buf_alloc()
248 for (i = 0; i < npages; ++i) { in mthca_buf_alloc()
[all …]
/drivers/fpga/
Ddfl-afu-dma-region.c37 int npages = region->length >> PAGE_SHIFT; in afu_dma_pin_pages() local
41 ret = account_locked_vm(current->mm, npages, true); in afu_dma_pin_pages()
45 region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL); in afu_dma_pin_pages()
51 pinned = pin_user_pages_fast(region->user_addr, npages, FOLL_WRITE, in afu_dma_pin_pages()
56 } else if (pinned != npages) { in afu_dma_pin_pages()
70 account_locked_vm(current->mm, npages, false); in afu_dma_pin_pages()
85 long npages = region->length >> PAGE_SHIFT; in afu_dma_unpin_pages() local
88 unpin_user_pages(region->pages, npages); in afu_dma_unpin_pages()
90 account_locked_vm(current->mm, npages, false); in afu_dma_unpin_pages()
92 dev_dbg(dev, "%ld pages unpinned\n", npages); in afu_dma_unpin_pages()
[all …]
/drivers/infiniband/core/
Dumem.c152 unsigned long npages; in ib_umem_get() local
191 npages = ib_umem_num_pages(umem); in ib_umem_get()
192 if (npages == 0 || npages > UINT_MAX) { in ib_umem_get()
199 new_pinned = atomic64_add_return(npages, &mm->pinned_vm); in ib_umem_get()
201 atomic64_sub(npages, &mm->pinned_vm); in ib_umem_get()
211 while (npages) { in ib_umem_get()
214 min_t(unsigned long, npages, in ib_umem_get()
224 npages -= pinned; in ib_umem_get()
228 npages, GFP_KERNEL); in ib_umem_get()
Dib_core_uverbs.c141 pgoff, entry->npages); in rdma_user_mmap_entry_get_pgoff()
171 if (entry->npages * PAGE_SIZE != vma->vm_end - vma->vm_start) { in rdma_user_mmap_entry_get()
191 for (i = 0; i < entry->npages; i++) in rdma_user_mmap_entry_free()
196 entry->start_pgoff, entry->npages); in rdma_user_mmap_entry_free()
269 u32 xa_first, xa_last, npages; in rdma_user_mmap_entry_insert_range() local
290 npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE); in rdma_user_mmap_entry_insert_range()
291 entry->npages = npages; in rdma_user_mmap_entry_insert_range()
301 if (check_add_overflow(xa_first, npages, &xa_last)) in rdma_user_mmap_entry_insert_range()
328 entry->start_pgoff, npages); in rdma_user_mmap_entry_insert_range()
/drivers/net/ethernet/mellanox/mlx4/
Dicm.c60 dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages, in mlx4_free_icm_pages()
63 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_pages()
72 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_coherent()
132 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, in mlx4_alloc_icm() argument
159 while (npages > 0) { in mlx4_alloc_icm()
179 while (1 << cur_order > npages) in mlx4_alloc_icm()
188 &chunk->buf[chunk->npages], in mlx4_alloc_icm()
191 ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages], in mlx4_alloc_icm()
202 ++chunk->npages; in mlx4_alloc_icm()
206 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { in mlx4_alloc_icm()
[all …]
Dmr.c194 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, in mlx4_mtt_init() argument
199 if (!npages) { in mlx4_mtt_init()
206 for (mtt->order = 0, i = 1; i < npages; i <<= 1) in mlx4_mtt_init()
418 u64 iova, u64 size, u32 access, int npages, in mlx4_mr_alloc_reserved() argument
428 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_alloc_reserved()
528 int npages, int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc() argument
538 access, npages, page_shift, mr); in mlx4_mr_alloc()
590 u64 iova, u64 size, int npages, in mlx4_mr_rereg_mem_write() argument
595 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_rereg_mem_write()
693 int start_index, int npages, u64 *page_list) in mlx4_write_mtt_chunk() argument
[all …]
/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_misc.c53 u64 npages, bool alloc_pages) in pvrdma_page_dir_init() argument
57 if (npages > PVRDMA_PAGE_DIR_MAX_PAGES) in pvrdma_page_dir_init()
67 pdir->ntables = PVRDMA_PAGE_DIR_TABLE(npages - 1) + 1; in pvrdma_page_dir_init()
81 pdir->npages = npages; in pvrdma_page_dir_init()
84 pdir->pages = kcalloc(npages, sizeof(*pdir->pages), in pvrdma_page_dir_init()
89 for (i = 0; i < pdir->npages; i++) { in pvrdma_page_dir_init()
127 for (i = 0; i < pdir->npages && pdir->pages[i]; i++) { in pvrdma_page_dir_cleanup_pages()
173 if (idx >= pdir->npages) in pvrdma_page_dir_insert_dma()
189 if (offset >= pdir->npages) in pvrdma_page_dir_insert_umem()
212 if (num_pages > pdir->npages) in pvrdma_page_dir_insert_page_list()
Dpvrdma_mr.c122 int ret, npages; in pvrdma_reg_user_mr() local
136 npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE); in pvrdma_reg_user_mr()
137 if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) { in pvrdma_reg_user_mr()
139 npages); in pvrdma_reg_user_mr()
154 ret = pvrdma_page_dir_init(dev, &mr->pdir, npages, false); in pvrdma_reg_user_mr()
171 cmd->nchunks = npages; in pvrdma_reg_user_mr()
306 if (mr->npages == mr->max_pages) in pvrdma_set_page()
309 mr->pages[mr->npages++] = addr; in pvrdma_set_page()
320 mr->npages = 0; in pvrdma_map_mr_sg()
/drivers/gpu/drm/etnaviv/
Detnaviv_gem_prime.c17 int npages = obj->size >> PAGE_SHIFT; in etnaviv_gem_prime_get_sg_table() local
22 return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages); in etnaviv_gem_prime_get_sg_table()
114 int ret, npages; in etnaviv_gem_prime_import_sg_table() local
123 npages = size / PAGE_SIZE; in etnaviv_gem_prime_import_sg_table()
126 etnaviv_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); in etnaviv_gem_prime_import_sg_table()
132 ret = drm_prime_sg_to_page_array(sgt, etnaviv_obj->pages, npages); in etnaviv_gem_prime_import_sg_table()
/drivers/infiniband/hw/usnic/
Dusnic_uiom.c95 unsigned long npages; in usnic_uiom_get_pages() local
123 npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT; in usnic_uiom_get_pages()
128 locked = atomic64_add_return(npages, &current->mm->pinned_vm); in usnic_uiom_get_pages()
143 while (npages) { in usnic_uiom_get_pages()
145 min_t(unsigned long, npages, in usnic_uiom_get_pages()
153 npages -= ret; in usnic_uiom_get_pages()
186 atomic64_sub(npages, &current->mm->pinned_vm); in usnic_uiom_get_pages()
218 int npages; in __usnic_uiom_reg_release() local
224 npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; in __usnic_uiom_reg_release()
226 vpn_last = vpn_start + npages - 1; in __usnic_uiom_reg_release()
[all …]
/drivers/infiniband/hw/mlx4/
Dmr.c98 int *start_index, int *npages) in mlx4_ib_umem_write_mtt_block() argument
127 pages[*npages] = cur_start_addr + (mtt_size * k); in mlx4_ib_umem_write_mtt_block()
128 (*npages)++; in mlx4_ib_umem_write_mtt_block()
133 if (*npages == PAGE_SIZE / sizeof(u64)) { in mlx4_ib_umem_write_mtt_block()
135 *npages, pages); in mlx4_ib_umem_write_mtt_block()
139 (*start_index) += *npages; in mlx4_ib_umem_write_mtt_block()
140 *npages = 0; in mlx4_ib_umem_write_mtt_block()
192 int npages = 0; in mlx4_ib_umem_write_mtt() local
218 &npages); in mlx4_ib_umem_write_mtt()
235 &start_index, &npages); in mlx4_ib_umem_write_mtt()
[all …]
/drivers/gpu/drm/i915/gem/
Di915_gem_internal.c39 unsigned int npages; in i915_gem_object_get_pages_internal() local
69 npages = obj->base.size / PAGE_SIZE; in i915_gem_object_get_pages_internal()
70 if (sg_alloc_table(st, npages, GFP_KERNEL)) { in i915_gem_object_get_pages_internal()
80 int order = min(fls(npages) - 1, max_order); in i915_gem_object_get_pages_internal()
99 npages -= 1 << order; in i915_gem_object_get_pages_internal()
100 if (!npages) { in i915_gem_object_get_pages_internal()
/drivers/gpu/drm/amd/amdgpu/
Damdgpu_mn.c161 uint64_t start, uint64_t npages, in amdgpu_hmm_range_get_pages() argument
175 pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); in amdgpu_hmm_range_get_pages()
187 hmm_range->end = start + npages * PAGE_SIZE; in amdgpu_hmm_range_get_pages()
191 timeout = max(npages >> 17, 1ULL) * HMM_RANGE_DEFAULT_TIMEOUT; in amdgpu_hmm_range_get_pages()
219 for (i = 0; pages && i < npages; i++) in amdgpu_hmm_range_get_pages()
/drivers/misc/habanalabs/common/
Dmemory.c94 phys_pg_pack->npages = num_pgs; in alloc_device_memory()
293 for (i = 0; i < phys_pg_pack->npages ; i++) in free_phys_pg_pack()
297 for (i = 0 ; i < phys_pg_pack->npages ; i++) { in free_phys_pg_pack()
820 u32 npages, page_size = PAGE_SIZE, in init_phys_pg_pack_from_userptr() local
849 npages = hl_get_sg_info(sg, &dma_addr); in init_phys_pg_pack_from_userptr()
851 total_npages += npages; in init_phys_pg_pack_from_userptr()
853 if ((npages % pgs_in_huge_page) || in init_phys_pg_pack_from_userptr()
872 phys_pg_pack->npages = total_npages; in init_phys_pg_pack_from_userptr()
878 npages = hl_get_sg_info(sg, &dma_addr); in init_phys_pg_pack_from_userptr()
887 while (npages) { in init_phys_pg_pack_from_userptr()
[all …]
/drivers/infiniband/hw/bnxt_re/
Dqplib_res.c119 pages = sginfo->npages; in __alloc_pbl()
186 u32 npages, aux_slots, pg_size, aux_pages = 0, aux_size = 0; in bnxt_qplib_alloc_init_hwq() local
211 npages = (depth * stride) / pg_size + aux_pages; in bnxt_qplib_alloc_init_hwq()
213 npages++; in bnxt_qplib_alloc_init_hwq()
214 if (!npages) in bnxt_qplib_alloc_init_hwq()
216 hwq_attr->sginfo->npages = npages; in bnxt_qplib_alloc_init_hwq()
218 npages = ib_umem_num_dma_blocks(hwq_attr->sginfo->umem, in bnxt_qplib_alloc_init_hwq()
223 if (npages == MAX_PBL_LVL_0_PGS) { in bnxt_qplib_alloc_init_hwq()
231 if (npages > MAX_PBL_LVL_0_PGS) { in bnxt_qplib_alloc_init_hwq()
232 if (npages > MAX_PBL_LVL_1_PGS) { in bnxt_qplib_alloc_init_hwq()
[all …]
/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/
Dgk20a.c383 gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, in gk20a_instobj_ctor_dma() argument
397 node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, in gk20a_instobj_ctor_dma()
414 node->r.length = (npages << PAGE_SHIFT) >> 12; in gk20a_instobj_ctor_dma()
421 gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align, in gk20a_instobj_ctor_iommu() argument
436 sizeof(*node->dma_addrs)) * npages), GFP_KERNEL))) in gk20a_instobj_ctor_iommu()
439 node->dma_addrs = (void *)(node->pages + npages); in gk20a_instobj_ctor_iommu()
445 for (i = 0; i < npages; i++) { in gk20a_instobj_ctor_iommu()
465 ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages, in gk20a_instobj_ctor_iommu()
474 for (i = 0; i < npages; i++) { in gk20a_instobj_ctor_iommu()
502 for (i = 0; i < npages && node->pages[i] != NULL; i++) { in gk20a_instobj_ctor_iommu()
/drivers/gpu/drm/omapdrm/
Domap_gem.c226 int npages = obj->size >> PAGE_SHIFT; in omap_gem_attach_pages() local
249 addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL); in omap_gem_attach_pages()
255 for (i = 0; i < npages; i++) { in omap_gem_attach_pages()
273 addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL); in omap_gem_attach_pages()
297 unsigned int npages = obj->size >> PAGE_SHIFT; in omap_gem_detach_pages() local
302 for (i = 0; i < npages; i++) { in omap_gem_detach_pages()
647 u32 npages = obj->size >> PAGE_SHIFT; in omap_gem_roll() local
650 if (roll > npages) { in omap_gem_roll()
665 ret = tiler_pin(omap_obj->block, omap_obj->pages, npages, in omap_gem_roll()
723 int i, npages = obj->size >> PAGE_SHIFT; in omap_gem_dma_sync_buffer() local
[all …]

123456