Lines Matching refs:npages
192 int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, in hns_roce_mtt_init() argument
199 if (!npages) { in hns_roce_mtt_init()
209 for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages; in hns_roce_mtt_init()
234 u64 size, u32 access, int npages, in hns_roce_mr_alloc() argument
259 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, in hns_roce_mr_alloc()
273 int npages = 0; in hns_roce_mr_free() local
284 npages = ib_umem_page_count(mr->umem); in hns_roce_mr_free()
285 dma_free_coherent(dev, (unsigned int)(npages * 8), mr->pbl_buf, in hns_roce_mr_free()
342 u32 npages, u64 *page_list) in hns_roce_write_mtt_chunk() argument
351 (start_index + npages - 1) / (PAGE_SIZE / sizeof(u64))) in hns_roce_write_mtt_chunk()
364 for (i = 0; i < npages; ++i) in hns_roce_write_mtt_chunk()
372 u32 npages, u64 *page_list) in hns_roce_write_mtt() argument
380 while (npages > 0) { in hns_roce_write_mtt()
381 chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages); in hns_roce_write_mtt()
388 npages -= chunk; in hns_roce_write_mtt()
403 page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL); in hns_roce_buf_write_mtt()
407 for (i = 0; i < buf->npages; ++i) { in hns_roce_buf_write_mtt()
414 ret = hns_roce_write_mtt(hr_dev, mtt, 0, buf->npages, page_list); in hns_roce_buf_write_mtt()