Home
last modified time | relevance | path

Searched refs:npages (Results 1 – 25 of 114) sorted by relevance

12345

/drivers/infiniband/hw/hfi1/
Dtrace_rx.h134 u32 npages, unsigned long va, unsigned long pa,
136 TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
141 __field(u32, npages)
150 __entry->npages = npages;
159 __entry->npages,
167 TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
169 TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
174 __field(u32, npages)
183 __entry->npages = npages;
192 __entry->npages,
[all …]
Duser_pages.c72 u32 nlocked, u32 npages) in hfi1_can_pin_pages() argument
98 if (pinned + npages >= ulimit && !can_lock) in hfi1_can_pin_pages()
101 return ((nlocked + npages) <= size) || can_lock; in hfi1_can_pin_pages()
104 int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages, in hfi1_acquire_user_pages() argument
109 ret = get_user_pages_fast(vaddr, npages, writable, pages); in hfi1_acquire_user_pages()
121 size_t npages, bool dirty) in hfi1_release_user_pages() argument
125 for (i = 0; i < npages; i++) { in hfi1_release_user_pages()
133 mm->pinned_vm -= npages; in hfi1_release_user_pages()
Duser_exp_rcv.c68 unsigned npages; member
362 unsigned npages, ngroups, pageidx = 0, pageset_count, npagesets, in hfi1_user_exp_rcv_setup() local
370 npages = num_user_pages(vaddr, tinfo->length); in hfi1_user_exp_rcv_setup()
371 if (!npages) in hfi1_user_exp_rcv_setup()
374 if (npages > uctxt->expected_count) { in hfi1_user_exp_rcv_setup()
381 npages * PAGE_SIZE)) { in hfi1_user_exp_rcv_setup()
383 (void *)vaddr, npages); in hfi1_user_exp_rcv_setup()
393 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); in hfi1_user_exp_rcv_setup()
404 if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) { in hfi1_user_exp_rcv_setup()
409 pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages); in hfi1_user_exp_rcv_setup()
[all …]
Duser_sdma.c177 unsigned npages; member
193 unsigned npages; member
1063 pageidx == iovec->npages && in user_sdma_send_pkts()
1125 static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages) in sdma_cache_evict() argument
1130 evict_data.target = npages; in sdma_cache_evict()
1138 int ret = 0, pinned, npages, cleared; in pin_vector_pages() local
1162 npages = num_user_pages(&iovec->iov); in pin_vector_pages()
1163 if (node->npages < npages) { in pin_vector_pages()
1164 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); in pin_vector_pages()
1170 memcpy(pages, node->pages, node->npages * sizeof(*pages)); in pin_vector_pages()
[all …]
/drivers/gpu/drm/ttm/
Dttm_page_alloc.c77 unsigned npages; member
276 static void ttm_pages_put(struct page *pages[], unsigned npages) in ttm_pages_put() argument
279 if (set_pages_array_wb(pages, npages)) in ttm_pages_put()
280 pr_err("Failed to set %d pages to wb!\n", npages); in ttm_pages_put()
281 for (i = 0; i < npages; ++i) in ttm_pages_put()
288 pool->npages -= freed_pages; in ttm_pool_update_free_locked()
430 count += _manager->pools[i].npages; in ttm_pool_shrink_count()
593 && count > pool->npages) { in ttm_page_pool_fill_locked()
611 pool->npages += alloc_size; in ttm_page_pool_fill_locked()
619 pool->npages += cpages; in ttm_page_pool_fill_locked()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/
Dpagealloc.c50 s32 npages; member
135 s32 *npages, int boot) in mlx5_cmd_query_pages() argument
150 *npages = MLX5_GET(query_pages_out, out, num_pages); in mlx5_cmd_query_pages()
271 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, in give_pages() argument
281 inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]); in give_pages()
289 for (i = 0; i < npages; i++) { in give_pages()
306 MLX5_SET(manage_pages_in, in, input_num_entries, npages); in give_pages()
311 func_id, npages, err); in give_pages()
315 dev->priv.fw_pages += npages; in give_pages()
317 dev->priv.vfs_pages += npages; in give_pages()
[all …]
/drivers/infiniband/core/
Dumem.c57 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { in __ib_umem_release()
91 unsigned long npages; in ib_umem_get() local
166 npages = ib_umem_num_pages(umem); in ib_umem_get()
170 locked = npages + current->mm->pinned_vm; in ib_umem_get()
180 if (npages == 0 || npages > UINT_MAX) { in ib_umem_get()
185 ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL); in ib_umem_get()
195 while (npages) { in ib_umem_get()
197 min_t(unsigned long, npages, in ib_umem_get()
204 umem->npages += ret; in ib_umem_get()
206 npages -= ret; in ib_umem_get()
[all …]
Dumem_odp.c528 int j, k, ret = 0, start_idx, npages = 0; in ib_umem_odp_map_dma_pages() local
579 npages = get_user_pages_remote(owning_process, owning_mm, in ib_umem_odp_map_dma_pages()
584 if (npages < 0) in ib_umem_odp_map_dma_pages()
587 bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt); in ib_umem_odp_map_dma_pages()
588 user_virt += npages << PAGE_SHIFT; in ib_umem_odp_map_dma_pages()
590 for (j = 0; j < npages; ++j) { in ib_umem_odp_map_dma_pages()
602 for (++j; j < npages; ++j) in ib_umem_odp_map_dma_pages()
609 if (npages < 0 && k == start_idx) in ib_umem_odp_map_dma_pages()
610 ret = npages; in ib_umem_odp_map_dma_pages()
/drivers/infiniband/hw/mthca/
Dmthca_memfree.c69 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, in mthca_free_icm_pages()
72 for (i = 0; i < chunk->npages; ++i) in mthca_free_icm_pages()
81 for (i = 0; i < chunk->npages; ++i) { in mthca_free_icm_coherent()
137 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, in mthca_alloc_icm() argument
157 while (npages > 0) { in mthca_alloc_icm()
165 chunk->npages = 0; in mthca_alloc_icm()
170 while (1 << cur_order > npages) in mthca_alloc_icm()
175 &chunk->mem[chunk->npages], in mthca_alloc_icm()
178 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages], in mthca_alloc_icm()
182 ++chunk->npages; in mthca_alloc_icm()
[all …]
Dmthca_allocator.c199 int npages, shift; in mthca_buf_alloc() local
206 npages = 1; in mthca_buf_alloc()
220 npages *= 2; in mthca_buf_alloc()
223 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); in mthca_buf_alloc()
227 for (i = 0; i < npages; ++i) in mthca_buf_alloc()
231 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_buf_alloc()
234 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); in mthca_buf_alloc()
238 buf->page_list = kmalloc(npages * sizeof *buf->page_list, in mthca_buf_alloc()
243 for (i = 0; i < npages; ++i) in mthca_buf_alloc()
246 for (i = 0; i < npages; ++i) { in mthca_buf_alloc()
[all …]
/drivers/net/ethernet/mellanox/mlx4/
Dmr.c197 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, in mlx4_mtt_init() argument
202 if (!npages) { in mlx4_mtt_init()
209 for (mtt->order = 0, i = 1; i < npages; i <<= 1) in mlx4_mtt_init()
420 u64 iova, u64 size, u32 access, int npages, in mlx4_mr_alloc_reserved() argument
430 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_alloc_reserved()
530 int npages, int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc() argument
540 access, npages, page_shift, mr); in mlx4_mr_alloc()
592 u64 iova, u64 size, int npages, in mlx4_mr_rereg_mem_write() argument
597 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_rereg_mem_write()
695 int start_index, int npages, u64 *page_list) in mlx4_write_mtt_chunk() argument
[all …]
Dicm.c59 pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages, in mlx4_free_icm_pages()
62 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_pages()
71 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_coherent()
132 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, in mlx4_alloc_icm() argument
158 while (npages > 0) { in mlx4_alloc_icm()
173 chunk->npages = 0; in mlx4_alloc_icm()
178 while (1 << cur_order > npages) in mlx4_alloc_icm()
183 &chunk->mem[chunk->npages], in mlx4_alloc_icm()
186 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], in mlx4_alloc_icm()
197 ++chunk->npages; in mlx4_alloc_icm()
[all …]
/drivers/infiniband/hw/hns/
Dhns_roce_mr.c192 int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, in hns_roce_mtt_init() argument
199 if (!npages) { in hns_roce_mtt_init()
209 for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages; in hns_roce_mtt_init()
234 u64 size, u32 access, int npages, in hns_roce_mr_alloc() argument
259 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, in hns_roce_mr_alloc()
273 int npages = 0; in hns_roce_mr_free() local
284 npages = ib_umem_page_count(mr->umem); in hns_roce_mr_free()
285 dma_free_coherent(dev, (unsigned int)(npages * 8), mr->pbl_buf, in hns_roce_mr_free()
342 u32 npages, u64 *page_list) in hns_roce_write_mtt_chunk() argument
351 (start_index + npages - 1) / (PAGE_SIZE / sizeof(u64))) in hns_roce_write_mtt_chunk()
[all …]
Dhns_roce_hem.c45 struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages, in hns_roce_alloc_hem() argument
66 while (npages > 0) { in hns_roce_alloc_hem()
74 chunk->npages = 0; in hns_roce_alloc_hem()
79 while (1 << order > npages) in hns_roce_alloc_hem()
86 mem = &chunk->mem[chunk->npages]; in hns_roce_alloc_hem()
96 ++chunk->npages; in hns_roce_alloc_hem()
98 npages -= 1 << order; in hns_roce_alloc_hem()
117 for (i = 0; i < chunk->npages; ++i) in hns_roce_free_hem()
296 for (i = 0; i < chunk->npages; ++i) { in hns_roce_table_find()
/drivers/infiniband/hw/mlx5/
Dmr.c164 int npages = 1 << ent->order; in add_keys() local
195 MLX5_SET(mkc, mkc, translations_octword_size, (npages + 1) / 2); in add_keys()
753 int npages; in get_octo_len() local
756 npages = ALIGN(len + offset, page_size) >> ilog2(page_size); in get_octo_len()
757 return (npages + 1) / 2; in get_octo_len()
766 int npages, int page_shift, int *size, in dma_map_mr_pas() argument
777 *size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT); in dma_map_mr_pas()
785 memset(pas + npages, 0, *size - npages * sizeof(u64)); in dma_map_mr_pas()
816 umrwr->npages = n; in prep_umr_wqe_common()
849 int access_flags, int *npages, in mr_umem_get() argument
[all …]
Dodp.c189 int npages = 0, ret = 0; in pagefault_single_data_segment() local
239 npages = ib_umem_odp_map_dma_pages(mr->umem, io_virt, bcnt, in pagefault_single_data_segment()
241 if (npages < 0) { in pagefault_single_data_segment()
242 ret = npages; in pagefault_single_data_segment()
246 if (npages > 0) { in pagefault_single_data_segment()
254 ret = mlx5_ib_update_mtt(mr, start_idx, npages, 0); in pagefault_single_data_segment()
266 u32 new_mappings = npages * PAGE_SIZE - in pagefault_single_data_segment()
291 return ret ? ret : npages; in pagefault_single_data_segment()
317 int ret = 0, npages = 0; in pagefault_data_segments() local
375 npages += ret; in pagefault_data_segments()
[all …]
/drivers/infiniband/hw/cxgb3/
Diwch_mem.c78 int iwch_alloc_pbl(struct iwch_mr *mhp, int npages) in iwch_alloc_pbl() argument
81 npages << 3); in iwch_alloc_pbl()
86 mhp->attr.pbl_size = npages; in iwch_alloc_pbl()
97 int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset) in iwch_write_pbl() argument
100 mhp->attr.pbl_addr + (offset << 3), npages); in iwch_write_pbl()
/drivers/infiniband/hw/usnic/
Dusnic_uiom.c108 unsigned long npages; in usnic_uiom_get_pages() local
125 npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT; in usnic_uiom_get_pages()
129 locked = npages + current->mm->locked_vm; in usnic_uiom_get_pages()
144 while (npages) { in usnic_uiom_get_pages()
146 min_t(unsigned long, npages, in usnic_uiom_get_pages()
153 npages -= ret; in usnic_uiom_get_pages()
218 int npages; in __usnic_uiom_reg_release() local
224 npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; in __usnic_uiom_reg_release()
226 vpn_last = vpn_start + npages - 1; in __usnic_uiom_reg_release()
340 unsigned long npages; in usnic_uiom_reg_get() local
[all …]
/drivers/gpu/drm/etnaviv/
Detnaviv_gem_prime.c105 int ret, npages; in etnaviv_gem_prime_import_sg_table() local
113 npages = size / PAGE_SIZE; in etnaviv_gem_prime_import_sg_table()
116 etnaviv_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); in etnaviv_gem_prime_import_sg_table()
123 NULL, npages); in etnaviv_gem_prime_import_sg_table()
/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/
Dgk20a.c374 gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, in gk20a_instobj_ctor_dma() argument
387 node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, in gk20a_instobj_ctor_dma()
404 node->r.length = (npages << PAGE_SHIFT) >> 12; in gk20a_instobj_ctor_dma()
415 gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align, in gk20a_instobj_ctor_iommu() argument
430 sizeof(*node->dma_addrs)) * npages), GFP_KERNEL))) in gk20a_instobj_ctor_iommu()
433 node->dma_addrs = (void *)(node->pages + npages); in gk20a_instobj_ctor_iommu()
438 for (i = 0; i < npages; i++) { in gk20a_instobj_ctor_iommu()
458 ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages, in gk20a_instobj_ctor_iommu()
467 for (i = 0; i < npages; i++) { in gk20a_instobj_ctor_iommu()
499 for (i = 0; i < npages && node->pages[i] != NULL; i++) { in gk20a_instobj_ctor_iommu()
/drivers/gpu/drm/udl/
Dudl_dmabuf.c221 int npages; in udl_prime_create() local
223 npages = size / PAGE_SIZE; in udl_prime_create()
226 obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE); in udl_prime_create()
231 obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); in udl_prime_create()
233 DRM_ERROR("obj pages is NULL %d\n", npages); in udl_prime_create()
237 drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages); in udl_prime_create()
/drivers/gpu/drm/omapdrm/
Domap_gem.c246 int npages = obj->size >> PAGE_SHIFT; in omap_gem_attach_pages() local
262 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL); in omap_gem_attach_pages()
268 for (i = 0; i < npages; i++) { in omap_gem_attach_pages()
286 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL); in omap_gem_attach_pages()
337 int i, npages = obj->size >> PAGE_SHIFT; in omap_gem_detach_pages() local
338 for (i = 0; i < npages; i++) { in omap_gem_detach_pages()
703 uint32_t npages = obj->size >> PAGE_SHIFT; in omap_gem_roll() local
706 if (roll > npages) { in omap_gem_roll()
721 ret = tiler_pin(omap_obj->block, pages, npages, roll, true); in omap_gem_roll()
772 int i, npages = obj->size >> PAGE_SHIFT; in omap_gem_dma_sync() local
[all …]
/drivers/staging/lustre/lustre/ptlrpc/
Dsec_bulk.c168 static void enc_pools_release_free_pages(long npages) in enc_pools_release_free_pages() argument
173 LASSERT(npages > 0); in enc_pools_release_free_pages()
174 LASSERT(npages <= page_pools.epp_free_pages); in enc_pools_release_free_pages()
180 page_pools.epp_free_pages -= npages; in enc_pools_release_free_pages()
181 page_pools.epp_total_pages -= npages; in enc_pools_release_free_pages()
191 while (npages--) { in enc_pools_release_free_pages()
270 int npages_to_npools(unsigned long npages) in npages_to_npools() argument
272 return (int)((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL); in npages_to_npools()
/drivers/staging/lustre/lustre/obdecho/
Decho_client.c158 struct page **pages, int npages, int async);
1003 struct page **pages, int npages, int async) in cl_echo_object_brw() argument
1037 offset + npages * PAGE_SIZE - 1, in cl_echo_object_brw()
1043 for (i = 0; i < npages; i++) { in cl_echo_object_brw()
1243 u32 npages; in echo_client_kbrw() local
1267 npages = count >> PAGE_SHIFT; in echo_client_kbrw()
1272 pga = kcalloc(npages, sizeof(*pga), GFP_NOFS); in echo_client_kbrw()
1276 pages = kcalloc(npages, sizeof(*pages), GFP_NOFS); in echo_client_kbrw()
1283 i < npages; in echo_client_kbrw()
1305 rc = cl_echo_object_brw(eco, rw, offset, pages, npages, async); in echo_client_kbrw()
[all …]
/drivers/firmware/efi/
Darm-init.c181 u64 paddr, npages, size; in reserve_regions() local
196 npages = md->num_pages; in reserve_regions()
202 paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, in reserve_regions()
206 memrange_efi_to_native(&paddr, &npages); in reserve_regions()
207 size = npages << PAGE_SHIFT; in reserve_regions()

12345