/arch/ia64/xen/ |
D | grant-table.c | 37 unsigned long nr_pages; in xen_alloc_vm_area() local 44 nr_pages = 1 << order; in xen_alloc_vm_area() 45 scrub_pages(virt, nr_pages); in xen_alloc_vm_area() 55 area->nr_pages = nr_pages; in xen_alloc_vm_area()
|
D | xen_pv_ops.c | 98 xen_start_info->nr_pages, xen_start_info->flags); in xen_banner()
|
/arch/arm/mach-rpc/include/mach/ |
D | uncompress.h | 23 unsigned long nr_pages; member 121 unsigned int nr_pages = 0, page_size = PAGE_SIZE; in arch_decomp_setup() local 140 nr_pages += (t->u.mem.size / PAGE_SIZE); in arch_decomp_setup() 146 nr_pages = params->nr_pages; in arch_decomp_setup() 191 if (nr_pages * page_size < 4096*1024) error("<4M of mem\n"); in arch_decomp_setup()
|
/arch/arm/kernel/ |
D | compat.c | 46 unsigned long nr_pages; /* 4 */ member 108 if (params->u1.s.nr_pages != 0x02000 && in build_tag_list() 109 params->u1.s.nr_pages != 0x04000 && in build_tag_list() 110 params->u1.s.nr_pages != 0x08000 && in build_tag_list() 111 params->u1.s.nr_pages != 0x10000) { in build_tag_list() 115 params->u1.s.nr_pages = 0x1000; /* 16MB */ in build_tag_list() 163 tag = memtag(tag, PHYS_OFFSET, params->u1.s.nr_pages * PAGE_SIZE); in build_tag_list()
|
/arch/powerpc/platforms/pseries/ |
D | phyp_dump.c | 266 unsigned long nr_pages) in release_memory_range() argument 272 end_pfn = start_pfn + nr_pages; in release_memory_range() 335 unsigned long start_pfn, nr_pages; in store_release_region() local 356 nr_pages = PFN_DOWN(length); in store_release_region() 357 release_memory_range(start_pfn, nr_pages); in store_release_region()
|
/arch/powerpc/mm/ |
D | gup.c | 147 int get_user_pages_fast(unsigned long start, int nr_pages, int write, in get_user_pages_fast() argument 157 pr_debug("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read"); in get_user_pages_fast() 161 len = (unsigned long) nr_pages << PAGE_SHIFT; in get_user_pages_fast()
|
D | init_64.c | 207 unsigned long nr_pages, int node) in vmemmap_populate() argument 210 unsigned long end = (unsigned long)(start_page + nr_pages); in vmemmap_populate()
|
D | mem.c | 125 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory() local 135 return __add_pages(nid, zone, start_pfn, nr_pages); in arch_add_memory() 146 walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, in walk_memory_resource() argument 155 res.size = (u64) nr_pages << PAGE_SHIFT; in walk_memory_resource()
|
/arch/x86/mm/ |
D | numa_32.c | 92 unsigned long nr_pages = end_pfn - start_pfn; in node_memmap_size_bytes() local 94 if (!nr_pages) in node_memmap_size_bytes() 97 return (nr_pages + 1) * sizeof(struct page); in node_memmap_size_bytes()
|
D | gup.c | 222 int get_user_pages_fast(unsigned long start, int nr_pages, int write, in get_user_pages_fast() argument 233 len = (unsigned long) nr_pages << PAGE_SHIFT; in get_user_pages_fast()
|
D | init_32.c | 1124 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory() local 1126 return __add_pages(nid, zone, start_pfn, nr_pages); in arch_add_memory()
|
D | init_64.c | 855 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory() local 862 ret = __add_pages(nid, zone, start_pfn, nr_pages); in arch_add_memory()
|
/arch/x86/kernel/ |
D | alternative.c | 503 int nr_pages = 2; in text_poke() local 517 nr_pages = 1; in text_poke() 518 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); in text_poke()
|
/arch/sh/mm/ |
D | init.c | 308 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory() local 315 start_pfn, nr_pages); in arch_add_memory()
|
/arch/x86/xen/ |
D | setup.c | 42 unsigned long max_pfn = xen_start_info->nr_pages; in xen_memory_setup()
|
D | mmu.c | 172 HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages; in xen_setup_mfn_list_list() 179 unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages); in xen_build_dynamic_phys_to_machine()
|
D | enlighten.c | 1679 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); in xen_start_kernel()
|
/arch/sh/kernel/cpu/sh4/ |
D | sq.c | 374 unsigned int nr_pages = 0x04000000 >> PAGE_SHIFT; in sq_api_init() local 375 unsigned int size = (nr_pages + (BITS_PER_LONG - 1)) / BITS_PER_LONG; in sq_api_init()
|
/arch/powerpc/platforms/ps3/ |
D | mm.c | 297 unsigned long nr_pages; in ps3_mm_add_memory() local 306 nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT; in ps3_mm_add_memory() 309 __func__, __LINE__, start_addr, start_pfn, nr_pages); in ps3_mm_add_memory() 322 result = online_pages(start_pfn, nr_pages); in ps3_mm_add_memory()
|
/arch/ia64/mm/ |
D | init.c | 689 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory() local 695 ret = __add_pages(nid, zone, start_pfn, nr_pages); in arch_add_memory()
|
/arch/x86/kvm/ |
D | x86.c | 2346 int nr_pages = vcpu->arch.pio.guest_pages[1] ? 2 : 1; in pio_copy_data() local 2348 q = vmap(vcpu->arch.pio.guest_pages, nr_pages, VM_READ|VM_WRITE, in pio_copy_data() 2503 int nr_pages = 1; in kvm_emulate_pio_string() local 2542 nr_pages = 2; in kvm_emulate_pio_string() 2559 for (i = 0; i < nr_pages; ++i) { in kvm_emulate_pio_string()
|
D | mmu.c | 2854 unsigned int nr_pages = 0; in kvm_mmu_calculate_mmu_pages() local 2857 nr_pages += kvm->memslots[i].npages; in kvm_mmu_calculate_mmu_pages() 2859 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; in kvm_mmu_calculate_mmu_pages()
|