/arch/powerpc/kvm/ |
D | book3s_hv_builtin.c | 134 struct page *kvm_alloc_hpt(unsigned long nr_pages) in kvm_alloc_hpt() argument 138 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); in kvm_alloc_hpt() 142 align_pages = nr_pages; in kvm_alloc_hpt() 143 return cma_alloc(kvm_cma, nr_pages, order_base_2(align_pages)); in kvm_alloc_hpt() 147 void kvm_release_hpt(struct page *page, unsigned long nr_pages) in kvm_release_hpt() argument 149 cma_release(kvm_cma, page, nr_pages); in kvm_release_hpt()
|
/arch/arm/mach-rpc/include/mach/ |
D | uncompress.h | 23 unsigned long nr_pages; member 119 unsigned int nr_pages = 0, page_size = PAGE_SIZE; in arch_decomp_setup() local 138 nr_pages += (t->u.mem.size / PAGE_SIZE); in arch_decomp_setup() 144 nr_pages = params->nr_pages; in arch_decomp_setup() 189 if (nr_pages * page_size < 4096*1024) error("<4M of mem\n"); in arch_decomp_setup()
|
/arch/arm/xen/ |
D | p2m.c | 23 unsigned long nr_pages; member 72 entry->pfn + entry->nr_pages > pfn) { in __pfn_to_mfn() 120 unsigned long mfn, unsigned long nr_pages) in __set_phys_to_machine_multi() argument 132 p2m_entry->pfn + p2m_entry->nr_pages > pfn) { in __set_phys_to_machine_multi() 153 p2m_entry->nr_pages = nr_pages; in __set_phys_to_machine_multi()
|
/arch/powerpc/mm/ |
D | gup.c | 133 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, in __get_user_pages_fast() argument 143 pr_devel("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read"); in __get_user_pages_fast() 147 len = (unsigned long) nr_pages << PAGE_SHIFT; in __get_user_pages_fast() 201 int get_user_pages_fast(unsigned long start, int nr_pages, int write, in get_user_pages_fast() argument 208 nr = __get_user_pages_fast(start, nr_pages, write, pages); in get_user_pages_fast() 211 if (nr < nr_pages) { in get_user_pages_fast() 220 nr_pages - nr, write, 0, pages, NULL); in get_user_pages_fast()
|
D | mem.c | 122 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory() local 134 return __add_pages(nid, zone, start_pfn, nr_pages); in arch_add_memory() 141 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_remove_memory() local 146 ret = __remove_pages(zone, start_pfn, nr_pages); in arch_remove_memory() 162 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, in walk_system_ram_range() argument 166 unsigned long end_pfn = start_pfn + nr_pages; in walk_system_ram_range()
|
D | init_64.c | 394 unsigned int nr_pages = in vmemmap_free() local 396 while (nr_pages--) in vmemmap_free()
|
/arch/x86/mm/ |
D | numa_32.c | 67 unsigned long nr_pages = end_pfn - start_pfn; in node_memmap_size_bytes() local 69 if (!nr_pages) in node_memmap_size_bytes() 72 return (nr_pages + 1) * sizeof(struct page); in node_memmap_size_bytes()
|
D | gup.c | 257 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, in __get_user_pages_fast() argument 269 len = (unsigned long) nr_pages << PAGE_SHIFT; in __get_user_pages_fast() 325 int get_user_pages_fast(unsigned long start, int nr_pages, int write, in get_user_pages_fast() argument 336 len = (unsigned long) nr_pages << PAGE_SHIFT; in get_user_pages_fast()
|
D | init_64.c | 708 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory() local 713 ret = __add_pages(nid, zone, start_pfn, nr_pages); in arch_add_memory() 728 unsigned int nr_pages = 1 << order; in free_pagetable() local 736 while (nr_pages--) in free_pagetable() 739 while (nr_pages--) in free_pagetable() 1032 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_remove_memory() local 1038 ret = __remove_pages(zone, start_pfn, nr_pages); in arch_remove_memory() 1376 unsigned int nr_pages; in register_page_bootmem_memmap() local 1416 nr_pages = 1 << (get_order(PMD_SIZE)); in register_page_bootmem_memmap() 1418 while (nr_pages--) in register_page_bootmem_memmap()
|
D | tlb.c | 119 unsigned long nr_pages = in flush_tlb_func() local 126 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages); in flush_tlb_func()
|
D | init_32.c | 832 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory() local 834 return __add_pages(nid, zone, start_pfn, nr_pages); in arch_add_memory() 841 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_remove_memory() local 845 return __remove_pages(zone, start_pfn, nr_pages); in arch_remove_memory()
|
/arch/s390/mm/ |
D | init.c | 175 unsigned long zone_start_pfn, zone_end_pfn, nr_pages; in arch_add_memory() local 197 nr_pages = (start_pfn + size_pages > zone_end_pfn) ? in arch_add_memory() 199 rc = __add_pages(nid, zone, start_pfn, nr_pages); in arch_add_memory() 202 start_pfn += nr_pages; in arch_add_memory() 203 size_pages -= nr_pages; in arch_add_memory()
|
D | gup.c | 170 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, in __get_user_pages_fast() argument 181 len = (unsigned long) nr_pages << PAGE_SHIFT; in __get_user_pages_fast() 224 int get_user_pages_fast(unsigned long start, int nr_pages, int write, in get_user_pages_fast() argument 231 nr = __get_user_pages_fast(start, nr_pages, write, pages); in get_user_pages_fast() 232 if (nr == nr_pages) in get_user_pages_fast() 240 nr_pages - nr, write, 0, pages, NULL); in get_user_pages_fast()
|
/arch/arm/kernel/ |
D | atags_compat.c | 46 unsigned long nr_pages; /* 4 */ member 108 if (params->u1.s.nr_pages != 0x02000 && in build_tag_list() 109 params->u1.s.nr_pages != 0x04000 && in build_tag_list() 110 params->u1.s.nr_pages != 0x08000 && in build_tag_list() 111 params->u1.s.nr_pages != 0x10000) { in build_tag_list() 115 params->u1.s.nr_pages = 0x1000; /* 16MB */ in build_tag_list() 163 tag = memtag(tag, PHYS_OFFSET, params->u1.s.nr_pages * PAGE_SIZE); in build_tag_list()
|
/arch/s390/pci/ |
D | pci_dma.c | 145 unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; in dma_update_trans() local 151 if (!nr_pages) in dma_update_trans() 158 for (i = 0; i < nr_pages; i++) { in dma_update_trans() 177 nr_pages * PAGE_SIZE); in dma_update_trans() 281 unsigned long nr_pages, iommu_page_index; in s390_dma_map_pages() local 287 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); in s390_dma_map_pages() 288 iommu_page_index = dma_alloc_iommu(zdev, nr_pages); in s390_dma_map_pages() 293 size = nr_pages * PAGE_SIZE; in s390_dma_map_pages() 303 atomic64_add(nr_pages, &zdev->mapped_pages); in s390_dma_map_pages() 308 dma_free_iommu(zdev, iommu_page_index, nr_pages); in s390_dma_map_pages()
|
/arch/x86/xen/ |
D | setup.c | 206 unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity, in xen_set_identity_and_release_chunk() argument 212 *released += xen_do_chunk(start_pfn, min(end_pfn, nr_pages), true); in xen_set_identity_and_release_chunk() 398 unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn, in xen_set_identity_and_remap_chunk() argument 413 if (cur_pfn >= nr_pages) { in xen_set_identity_and_remap_chunk() 419 if (cur_pfn + size > nr_pages) in xen_set_identity_and_remap_chunk() 420 size = nr_pages - cur_pfn; in xen_set_identity_and_remap_chunk() 427 cur_pfn + left, nr_pages, identity, released); in xen_set_identity_and_remap_chunk() 438 cur_pfn + left, nr_pages, identity, released); in xen_set_identity_and_remap_chunk() 462 const struct e820entry *list, size_t map_size, unsigned long nr_pages, in xen_set_identity_and_remap() argument 468 unsigned long last_pfn = nr_pages; in xen_set_identity_and_remap() [all …]
|
/arch/sh/mm/ |
D | gup.c | 163 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, in __get_user_pages_fast() argument 175 len = (unsigned long) nr_pages << PAGE_SHIFT; in __get_user_pages_fast() 217 int get_user_pages_fast(unsigned long start, int nr_pages, int write, in get_user_pages_fast() argument 228 len = (unsigned long) nr_pages << PAGE_SHIFT; in get_user_pages_fast()
|
D | init.c | 492 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory() local 500 start_pfn, nr_pages); in arch_add_memory() 521 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_remove_memory() local 526 ret = __remove_pages(zone, start_pfn, nr_pages); in arch_remove_memory()
|
/arch/s390/kvm/ |
D | gaccess.c | 546 unsigned long *pages, unsigned long nr_pages, in guest_page_range() argument 559 while (nr_pages) { in guest_page_range() 584 nr_pages--; in guest_page_range() 593 unsigned long _len, nr_pages, gpa, idx; in access_guest() local 605 nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; in access_guest() 607 if (nr_pages > ARRAY_SIZE(pages_array)) in access_guest() 608 pages = vmalloc(nr_pages * sizeof(unsigned long)); in access_guest() 615 rc = guest_page_range(vcpu, ga, pages, nr_pages, write); in access_guest() 616 for (idx = 0; idx < nr_pages && !rc; idx++) { in access_guest() 629 if (nr_pages > ARRAY_SIZE(pages_array)) in access_guest()
|
/arch/sparc/mm/ |
D | gup.c | 163 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, in __get_user_pages_fast() argument 174 len = (unsigned long) nr_pages << PAGE_SHIFT; in __get_user_pages_fast() 193 int get_user_pages_fast(unsigned long start, int nr_pages, int write, in get_user_pages_fast() argument 204 len = (unsigned long) nr_pages << PAGE_SHIFT; in get_user_pages_fast()
|
/arch/mips/mm/ |
D | gup.c | 196 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, in __get_user_pages_fast() argument 208 len = (unsigned long) nr_pages << PAGE_SHIFT; in __get_user_pages_fast() 263 int get_user_pages_fast(unsigned long start, int nr_pages, int write, in get_user_pages_fast() argument 274 len = (unsigned long) nr_pages << PAGE_SHIFT; in get_user_pages_fast()
|
/arch/arm64/mm/ |
D | dma-mapping.c | 298 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; in __dma_common_mmap() local 305 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { in __dma_common_mmap() 372 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT; in atomic_pool_init() local 378 page = dma_alloc_from_contiguous(NULL, nr_pages, in atomic_pool_init() 422 if (!dma_release_from_contiguous(NULL, page, nr_pages)) in atomic_pool_init()
|
/arch/alpha/mm/ |
D | init.c | 192 unsigned long nr_pages = 0; in callback_init() local 198 nr_pages += crb->map[i].count; in callback_init() 202 console_remap_vm.size = nr_pages << PAGE_SHIFT; in callback_init()
|
/arch/arm/include/asm/xen/ |
D | page.h | 100 unsigned long nr_pages);
|
/arch/ia64/mm/ |
D | init.c | 660 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory() local 667 ret = __add_pages(nid, zone, start_pfn, nr_pages); in arch_add_memory() 680 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_remove_memory() local 685 ret = __remove_pages(zone, start_pfn, nr_pages); in arch_remove_memory()
|