Home
last modified time | relevance | path

Searched refs:npages (Results 1 – 25 of 25) sorted by relevance

/drivers/infiniband/hw/cxgb3/
Diwch_mem.c77 int npages) in iwch_reregister_mem() argument
82 if (npages > mhp->attr.pbl_size) in iwch_reregister_mem()
101 int iwch_alloc_pbl(struct iwch_mr *mhp, int npages) in iwch_alloc_pbl() argument
104 npages << 3); in iwch_alloc_pbl()
109 mhp->attr.pbl_size = npages; in iwch_alloc_pbl()
120 int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset) in iwch_write_pbl() argument
123 mhp->attr.pbl_addr + (offset << 3), npages); in iwch_write_pbl()
130 int *npages, in build_phys_page_list() argument
168 *npages = 0; in build_phys_page_list()
170 *npages += (buffer_list[i].size + in build_phys_page_list()
[all …]
Dcxio_dbg.c77 int size, npages; in cxio_dump_pbl() local
80 npages = (len + (1ULL << shift) - 1) >> shift; in cxio_dump_pbl()
81 size = npages * sizeof(u64); in cxio_dump_pbl()
92 __func__, m->addr, m->len, npages); in cxio_dump_pbl()
Diwch_provider.h346 int npages);
347 int iwch_alloc_pbl(struct iwch_mr *mhp, int npages);
349 int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset);
354 int *npages,
Diwch_provider.c466 int npages; in iwch_register_phys_mem() local
495 &total_size, &npages, &shift, &page_list); in iwch_register_phys_mem()
499 ret = iwch_alloc_pbl(mhp, npages); in iwch_register_phys_mem()
505 ret = iwch_write_pbl(mhp, page_list, npages, 0); in iwch_register_phys_mem()
518 mhp->attr.pbl_size = npages; in iwch_register_phys_mem()
548 int npages; in iwch_reregister_phys_mem() local
574 &total_size, &npages, in iwch_reregister_phys_mem()
580 ret = iwch_reregister_mem(rhp, php, &mh, shift, npages); in iwch_reregister_phys_mem()
594 mhp->attr.pbl_size = npages; in iwch_reregister_phys_mem()
/drivers/net/mlx4/
Dmr.c200 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, in mlx4_mtt_init() argument
205 if (!npages) { in mlx4_mtt_init()
212 for (mtt->order = 0, i = MLX4_MTT_ENTRY_PER_SEG; i < npages; i <<= 1) in mlx4_mtt_init()
267 int npages, int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc() argument
284 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_alloc()
381 int start_index, int npages, u64 *page_list) in mlx4_write_mtt_chunk() argument
391 (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64))) in mlx4_write_mtt_chunk()
402 for (i = 0; i < npages; ++i) in mlx4_write_mtt_chunk()
405 dma_sync_single(&dev->pdev->dev, dma_handle, npages * sizeof (u64), DMA_TO_DEVICE); in mlx4_write_mtt_chunk()
411 int start_index, int npages, u64 *page_list) in mlx4_write_mtt() argument
[all …]
Dicm.c59 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, in mlx4_free_icm_pages()
62 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_pages()
71 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_coherent()
122 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, in mlx4_alloc_icm() argument
142 while (npages > 0) { in mlx4_alloc_icm()
150 chunk->npages = 0; in mlx4_alloc_icm()
155 while (1 << cur_order > npages) in mlx4_alloc_icm()
160 &chunk->mem[chunk->npages], in mlx4_alloc_icm()
163 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], in mlx4_alloc_icm()
167 ++chunk->npages; in mlx4_alloc_icm()
[all …]
Deq.c342 int npages; in mlx4_create_eq() local
351 npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE; in mlx4_create_eq()
353 eq->page_list = kmalloc(npages * sizeof *eq->page_list, in mlx4_create_eq()
358 for (i = 0; i < npages; ++i) in mlx4_create_eq()
361 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); in mlx4_create_eq()
370 for (i = 0; i < npages; ++i) { in mlx4_create_eq()
392 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); in mlx4_create_eq()
396 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); in mlx4_create_eq()
431 for (i = 0; i < npages; ++i) in mlx4_create_eq()
453 int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE; in mlx4_free_eq() local
[all …]
Dicm.h52 int npages; member
70 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
Dalloc.c199 buf->npages = 1; in mlx4_buf_alloc()
210 buf->npages *= 2; in mlx4_buf_alloc()
218 buf->npages = buf->nbufs; in mlx4_buf_alloc()
407 err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift, in mlx4_alloc_hwq_res()
/drivers/infiniband/hw/mthca/
Dmthca_memfree.c68 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, in mthca_free_icm_pages()
71 for (i = 0; i < chunk->npages; ++i) in mthca_free_icm_pages()
80 for (i = 0; i < chunk->npages; ++i) { in mthca_free_icm_coherent()
136 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, in mthca_alloc_icm() argument
156 while (npages > 0) { in mthca_alloc_icm()
164 chunk->npages = 0; in mthca_alloc_icm()
169 while (1 << cur_order > npages) in mthca_alloc_icm()
174 &chunk->mem[chunk->npages], in mthca_alloc_icm()
177 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages], in mthca_alloc_icm()
181 ++chunk->npages; in mthca_alloc_icm()
[all …]
Dmthca_allocator.c199 int npages, shift; in mthca_buf_alloc() local
206 npages = 1; in mthca_buf_alloc()
220 npages *= 2; in mthca_buf_alloc()
223 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); in mthca_buf_alloc()
227 for (i = 0; i < npages; ++i) in mthca_buf_alloc()
231 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_buf_alloc()
234 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); in mthca_buf_alloc()
238 buf->page_list = kmalloc(npages * sizeof *buf->page_list, in mthca_buf_alloc()
243 for (i = 0; i < npages; ++i) in mthca_buf_alloc()
246 for (i = 0; i < npages; ++i) { in mthca_buf_alloc()
[all …]
Dmthca_eq.c469 int npages; in mthca_create_eq() local
480 npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE; in mthca_create_eq()
482 eq->page_list = kmalloc(npages * sizeof *eq->page_list, in mthca_create_eq()
487 for (i = 0; i < npages; ++i) in mthca_create_eq()
490 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); in mthca_create_eq()
499 for (i = 0; i < npages; ++i) { in mthca_create_eq()
519 dma_list, PAGE_SHIFT, npages, in mthca_create_eq()
520 0, npages * PAGE_SIZE, in mthca_create_eq()
577 for (i = 0; i < npages; ++i) in mthca_create_eq()
600 int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / in mthca_free_eq() local
[all …]
Dmthca_memfree.h53 int npages; member
82 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
145 int npages; member
Dmthca_provider.c933 int npages; in mthca_reg_phys_mr() local
960 npages = 0; in mthca_reg_phys_mr()
962 npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift; in mthca_reg_phys_mr()
964 if (!npages) in mthca_reg_phys_mr()
967 page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL); in mthca_reg_phys_mr()
985 shift, npages); in mthca_reg_phys_mr()
989 page_list, shift, npages, in mthca_reg_phys_mr()
/drivers/infiniband/core/
Dumem.c86 unsigned long npages; in ib_umem_get() local
134 npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT; in ib_umem_get()
138 locked = npages + current->mm->locked_vm; in ib_umem_get()
149 while (npages) { in ib_umem_get()
151 min_t(unsigned long, npages, in ib_umem_get()
159 npages -= ret; in ib_umem_get()
/drivers/infiniband/hw/ipath/
Dipath_user_sdma.c276 unsigned long addr, int tlen, int npages) in ipath_user_sdma_pin_pages() argument
283 npages, 0, 1, pages, NULL); in ipath_user_sdma_pin_pages()
285 if (ret != npages) { in ipath_user_sdma_pin_pages()
295 for (j = 0; j < npages; j++) { in ipath_user_sdma_pin_pages()
332 const int npages = ipath_user_sdma_num_pages(iov + idx); in ipath_user_sdma_pin_pkt() local
337 npages); in ipath_user_sdma_pin_pkt()
356 unsigned long niov, int npages) in ipath_user_sdma_init_payload() argument
360 if (npages >= ARRAY_SIZE(pkt->addr)) in ipath_user_sdma_init_payload()
417 int npages = 0; in ipath_user_sdma_queue_pkts() local
493 npages++; in ipath_user_sdma_queue_pkts()
[all …]
/drivers/edac/
Di5100_edac.c749 const unsigned long npages = i5100_npages(mci, i); in i5100_init_csrows() local
753 if (!npages) in i5100_init_csrows()
761 mci->csrows[i].last_page = total_pages + npages - 1; in i5100_init_csrows()
764 mci->csrows[i].nr_pages = npages; in i5100_init_csrows()
782 total_pages += npages; in i5100_init_csrows()
/drivers/staging/poch/
Dpoch.c304 static unsigned long npages(unsigned long bytes) in npages() function
322 group_pages = npages(channel->group_size); in show_mmap_size()
323 header_pages = npages(channel->header_size); in show_mmap_size()
361 group_pages = npages(channel->group_size); in poch_channel_alloc_groups()
362 header_pages = npages(channel->header_size); in poch_channel_alloc_groups()
898 group_pages = npages(channel->group_size); in poch_mmap()
899 header_pages = npages(channel->header_size); in poch_mmap()
/drivers/usb/mon/
Dmon_bin.c188 static int mon_alloc_buff(struct mon_pgmap *map, int npages);
189 static void mon_free_buff(struct mon_pgmap *map, int npages);
1161 static int mon_alloc_buff(struct mon_pgmap *map, int npages) in mon_alloc_buff() argument
1166 for (n = 0; n < npages; n++) { in mon_alloc_buff()
1179 static void mon_free_buff(struct mon_pgmap *map, int npages) in mon_free_buff() argument
1183 for (n = 0; n < npages; n++) in mon_free_buff()
/drivers/infiniband/hw/mlx4/
Dmr.c295 int npages, u64 iova) in mlx4_ib_map_phys_fmr() argument
300 return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova, in mlx4_ib_map_phys_fmr()
Dmlx4_ib.h310 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages,
Dsrq.c154 err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift, in mlx4_ib_create_srq()
Dcq.c107 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift, in mlx4_ib_alloc_cq_buf()
Dqp.c531 err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, in create_qp_common()
/drivers/scsi/cxgb3i/
Dcxgb3i_ddp.c278 unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >> in cxgb3i_ddp_make_gl() local
289 npages * (sizeof(dma_addr_t) + sizeof(struct page *)), in cxgb3i_ddp_make_gl()
294 gl->pages = (struct page **)&gl->phys_addr[npages]; in cxgb3i_ddp_make_gl()