/kernel/linux/linux-5.10/include/linux/ |
D | vfio.h | 98 int npage, int prot, 101 unsigned long *user_pfn, int npage); 132 int npage, int prot, unsigned long *phys_pfn); 134 int npage); 137 unsigned long *user_iova_pfn, int npage, 140 unsigned long *user_iova_pfn, int npage);
|
/kernel/linux/linux-5.10/drivers/gpu/drm/gma500/ |
D | gtt.c | 95 set_pages_array_wc(pages, r->npage); in psb_gtt_insert() 99 for (i = r->roll; i < r->npage; i++) { in psb_gtt_insert() 137 for (i = 0; i < r->npage; i++) in psb_gtt_remove() 140 set_pages_array_wb(r->pages, r->npage); in psb_gtt_remove() 159 if (roll >= r->npage) { in psb_gtt_roll() 173 for (i = r->roll; i < r->npage; i++) { in psb_gtt_roll() 204 gt->npage = gt->gem.size / PAGE_SIZE; in psb_gtt_attach_pages() 255 gt->npage, 0, 0, PSB_MMU_CACHED_MEMORY); in psb_gtt_pin() 296 (gpu_base + gt->offset), gt->npage, 0, 0); in psb_gtt_unpin()
|
D | gtt.h | 39 int npage; /* Number of backing pages */ member
|
D | gma_display.c | 395 if (gt->npage > 4) in gma_crtc_cursor_set() 398 cursor_pages = gt->npage; in gma_crtc_cursor_set()
|
D | framebuffer.c | 90 if (gtt->npage) { in psbfb_pan()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/hns/ |
D | hns_roce_mr.c | 637 int npage; in mtr_map_region() local 648 npage = 0; in mtr_map_region() 657 addr = to_hr_hw_page_addr(pages[npage]); in mtr_map_region() 659 addr = pages[npage]; in mtr_map_region() 662 npage++; in mtr_map_region() 820 int npage; in mtr_get_pages() local 824 npage = hns_roce_get_umem_bufs(hr_dev, pages, count, 0, in mtr_get_pages() 827 npage = hns_roce_get_kmem_bufs(hr_dev, pages, count, 0, in mtr_get_pages() 830 if (mtr->hem_cfg.is_direct && npage > 1) { in mtr_get_pages() 831 err = mtr_check_direct_pages(pages, npage, page_shift); in mtr_get_pages() [all …]
|
/kernel/linux/linux-5.10/drivers/vfio/ |
D | vfio_iommu_type1.c | 386 static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async) in vfio_lock_acct() argument 391 if (!npage) in vfio_lock_acct() 400 ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task, in vfio_lock_acct() 561 long npage, unsigned long *pfn_base, in vfio_pin_pages_remote() argument 599 for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage; in vfio_pin_pages_remote() 642 unsigned long pfn, long npage, in vfio_unpin_pages_remote() argument 648 for (i = 0; i < npage; i++, iova += PAGE_SIZE) { in vfio_unpin_pages_remote() 716 int npage, int prot, in vfio_iommu_type1_pin_pages() argument 748 for (i = 0; i < npage; i++) { in vfio_iommu_type1_pin_pages() 821 int npage) in vfio_iommu_type1_unpin_pages() argument [all …]
|
D | vfio.c | 1895 int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage, in vfio_pin_pages() argument 1903 if (!dev || !user_pfn || !phys_pfn || !npage) in vfio_pin_pages() 1906 if (npage > VFIO_PIN_PAGES_MAX_ENTRIES) in vfio_pin_pages() 1927 npage, prot, phys_pfn); in vfio_pin_pages() 1948 int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage) in vfio_unpin_pages() argument 1955 if (!dev || !user_pfn || !npage) in vfio_unpin_pages() 1958 if (npage > VFIO_PIN_PAGES_MAX_ENTRIES) in vfio_unpin_pages() 1973 npage); in vfio_unpin_pages() 2007 unsigned long *user_iova_pfn, int npage, in vfio_group_pin_pages() argument 2014 if (!group || !user_iova_pfn || !phys_pfn || !npage) in vfio_group_pin_pages() [all …]
|
/kernel/linux/linux-5.10/fs/f2fs/ |
D | node.c | 725 struct page *npage[4]; in f2fs_get_dnode_of_data() local 738 npage[0] = dn->inode_page; in f2fs_get_dnode_of_data() 740 if (!npage[0]) { in f2fs_get_dnode_of_data() 741 npage[0] = f2fs_get_node_page(sbi, nids[0]); in f2fs_get_dnode_of_data() 742 if (IS_ERR(npage[0])) in f2fs_get_dnode_of_data() 743 return PTR_ERR(npage[0]); in f2fs_get_dnode_of_data() 749 f2fs_put_page(npage[0], 1); in f2fs_get_dnode_of_data() 753 parent = npage[0]; in f2fs_get_dnode_of_data() 756 dn->inode_page = npage[0]; in f2fs_get_dnode_of_data() 771 npage[i] = f2fs_new_node_page(dn, noffset[i]); in f2fs_get_dnode_of_data() [all …]
|
D | inline.c | 281 int f2fs_recover_inline_data(struct inode *inode, struct page *npage) in f2fs_recover_inline_data() argument 296 if (IS_INODE(npage)) in f2fs_recover_inline_data() 297 ri = F2FS_INODE(npage); in f2fs_recover_inline_data() 308 src_addr = inline_data_addr(inode, npage); in f2fs_recover_inline_data()
|
D | f2fs.h | 911 struct page *ipage, struct page *npage, nid_t nid) in set_new_dnode() argument 916 dn->node_page = npage; in set_new_dnode() 3816 int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mthca/ |
D | mthca_allocator.c | 162 int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_array_init() local 165 array->page_list = kmalloc_array(npage, sizeof(*array->page_list), in mthca_array_init() 170 for (i = 0; i < npage; ++i) { in mthca_array_init()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gvt/ |
D | kvmgt.c | 157 int npage; in gvt_unpin_guest_page() local 162 for (npage = 0; npage < total_pages; npage++) { in gvt_unpin_guest_page() 163 unsigned long cur_gfn = gfn + npage; in gvt_unpin_guest_page() 177 int npage; in gvt_pin_guest_page() local 185 for (npage = 0; npage < total_pages; npage++) { in gvt_pin_guest_page() 186 unsigned long cur_gfn = gfn + npage; in gvt_pin_guest_page() 199 npage++; in gvt_pin_guest_page() 204 if (npage == 0) in gvt_pin_guest_page() 206 else if (base_pfn + npage != pfn) { in gvt_pin_guest_page() 209 npage++; in gvt_pin_guest_page() [all …]
|
/kernel/linux/linux-5.10/lib/ |
D | kfifo.c | 315 struct page *npage; in setup_sgl_buf() local 319 npage = virt_to_page(buf); in setup_sgl_buf() 320 if (page_to_phys(page) != page_to_phys(npage) - l) { in setup_sgl_buf() 325 page = npage; in setup_sgl_buf()
|
/kernel/linux/linux-5.10/Documentation/driver-api/ |
D | vfio-mediated-device.rst | 297 int npage, int prot, unsigned long *phys_pfn); 300 int npage);
|