/arch/powerpc/platforms/powernv/ |
D | memtrace.c | 103 static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages) in memtrace_offline_pages() argument 105 u64 end_pfn = start_pfn + nr_pages - 1; in memtrace_offline_pages() 114 if (offline_pages(start_pfn, nr_pages)) { in memtrace_offline_pages() 129 u64 start_pfn, end_pfn, nr_pages, pfn; in memtrace_alloc_node() local 138 nr_pages = size >> PAGE_SHIFT; in memtrace_alloc_node() 141 end_pfn = round_down(end_pfn - nr_pages, nr_pages); in memtrace_alloc_node() 144 for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) { in memtrace_alloc_node() 145 if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) { in memtrace_alloc_node() 152 end_pfn = base_pfn + nr_pages; in memtrace_alloc_node()
|
/arch/arm/mach-rpc/include/mach/ |
D | uncompress.h | 23 unsigned long nr_pages; member 119 unsigned int nr_pages = 0, page_size = PAGE_SIZE; in arch_decomp_setup() local 138 nr_pages += (t->u.mem.size / PAGE_SIZE); in arch_decomp_setup() 144 nr_pages = params->nr_pages; in arch_decomp_setup() 189 if (nr_pages * page_size < 4096*1024) error("<4M of mem\n"); in arch_decomp_setup()
|
/arch/arm/xen/ |
D | p2m.c | 23 unsigned long nr_pages; member 72 entry->pfn + entry->nr_pages > pfn) { in __pfn_to_mfn() 120 unsigned long mfn, unsigned long nr_pages) in __set_phys_to_machine_multi() argument 132 p2m_entry->pfn + p2m_entry->nr_pages > pfn) { in __set_phys_to_machine_multi() 152 p2m_entry->nr_pages = nr_pages; in __set_phys_to_machine_multi()
|
/arch/x86/mm/ |
D | numa_32.c | 67 unsigned long nr_pages = end_pfn - start_pfn; in node_memmap_size_bytes() local 69 if (!nr_pages) in node_memmap_size_bytes() 72 return (nr_pages + 1) * sizeof(struct page); in node_memmap_size_bytes()
|
D | init_64.c | 770 unsigned long nr_pages, bool want_memblock) in add_pages() argument 774 ret = __add_pages(nid, start_pfn, nr_pages, want_memblock); in add_pages() 779 nr_pages << PAGE_SHIFT); in add_pages() 787 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory() local 791 return add_pages(nid, start_pfn, nr_pages, want_memblock); in arch_add_memory() 800 unsigned int nr_pages = 1 << order; in free_pagetable() local 804 vmem_altmap_free(altmap, nr_pages); in free_pagetable() 814 while (nr_pages--) in free_pagetable() 817 while (nr_pages--) in free_pagetable() 1132 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_remove_memory() local [all …]
|
/arch/arm/kernel/ |
D | atags_compat.c | 46 unsigned long nr_pages; /* 4 */ member 107 if (params->u1.s.nr_pages != 0x02000 && in build_tag_list() 108 params->u1.s.nr_pages != 0x04000 && in build_tag_list() 109 params->u1.s.nr_pages != 0x08000 && in build_tag_list() 110 params->u1.s.nr_pages != 0x10000) { in build_tag_list() 113 params->u1.s.nr_pages = 0x1000; /* 16MB */ in build_tag_list() 161 tag = memtag(tag, PHYS_OFFSET, params->u1.s.nr_pages * PAGE_SIZE); in build_tag_list()
|
/arch/x86/xen/ |
D | setup.c | 254 unsigned long end_pfn, unsigned long nr_pages) in xen_set_identity_and_release_chunk() argument 262 end = min(end_pfn, nr_pages); in xen_set_identity_and_release_chunk() 387 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, in xen_set_identity_and_remap_chunk() argument 395 remap_pfn = nr_pages; in xen_set_identity_and_remap_chunk() 404 if (cur_pfn >= nr_pages) { in xen_set_identity_and_remap_chunk() 409 if (cur_pfn + size > nr_pages) in xen_set_identity_and_remap_chunk() 410 size = nr_pages - cur_pfn; in xen_set_identity_and_remap_chunk() 416 cur_pfn + left, nr_pages); in xen_set_identity_and_remap_chunk() 443 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, in xen_count_remap_pages() argument 446 if (start_pfn >= nr_pages) in xen_count_remap_pages() [all …]
|
/arch/s390/pci/ |
D | pci_dma.c | 137 unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; in __dma_update_trans() local 143 if (!nr_pages) in __dma_update_trans() 152 for (i = 0; i < nr_pages; i++) { in __dma_update_trans() 327 unsigned long nr_pages; in s390_dma_map_pages() local 332 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); in s390_dma_map_pages() 333 dma_addr = dma_alloc_address(dev, nr_pages); in s390_dma_map_pages() 340 size = nr_pages * PAGE_SIZE; in s390_dma_map_pages() 349 atomic64_add(nr_pages, &zdev->mapped_pages); in s390_dma_map_pages() 353 dma_free_address(dev, dma_addr, nr_pages); in s390_dma_map_pages() 425 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; in __s390_dma_map_sg() local [all …]
|
/arch/s390/mm/ |
D | gup.c | 227 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, in __get_user_pages_fast() argument 238 len = (unsigned long) nr_pages << PAGE_SHIFT; in __get_user_pages_fast() 281 int get_user_pages_fast(unsigned long start, int nr_pages, int write, in get_user_pages_fast() argument 288 nr = __get_user_pages_fast(start, nr_pages, write, pages); in get_user_pages_fast() 289 if (nr == nr_pages) in get_user_pages_fast() 295 ret = get_user_pages_unlocked(start, nr_pages - nr, pages, in get_user_pages_fast()
|
/arch/x86/kernel/ |
D | ldt.c | 114 int i, nr_pages; in map_ldt_struct() local 136 nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); in map_ldt_struct() 138 for (i = 0; i < nr_pages; i++) { in map_ldt_struct() 195 int i, nr_pages; in unmap_ldt_struct() local 204 nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); in unmap_ldt_struct() 206 for (i = 0; i < nr_pages; i++) { in unmap_ldt_struct() 218 flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, 0); in unmap_ldt_struct()
|
D | machine_kexec_64.c | 571 unsigned int nr_pages; in kexec_mark_range() local 581 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; in kexec_mark_range() 583 return set_pages_ro(page, nr_pages); in kexec_mark_range() 585 return set_pages_rw(page, nr_pages); in kexec_mark_range()
|
/arch/sh/mm/ |
D | gup.c | 164 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, in __get_user_pages_fast() argument 176 len = (unsigned long) nr_pages << PAGE_SHIFT; in __get_user_pages_fast() 218 int get_user_pages_fast(unsigned long start, int nr_pages, int write, in get_user_pages_fast() argument 229 len = (unsigned long) nr_pages << PAGE_SHIFT; in get_user_pages_fast()
|
D | init.c | 491 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory() local 495 ret = __add_pages(nid, start_pfn, nr_pages, want_memblock); in arch_add_memory() 516 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_remove_memory() local 521 ret = __remove_pages(zone, start_pfn, nr_pages); in arch_remove_memory()
|
/arch/powerpc/mm/ |
D | mem.c | 134 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory() local 149 return __add_pages(nid, start_pfn, nr_pages, want_memblock); in arch_add_memory() 156 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_remove_memory() local 170 ret = __remove_pages(page_zone(page), start_pfn, nr_pages); in arch_remove_memory() 198 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, in walk_system_ram_range() argument 202 unsigned long end_pfn = start_pfn + nr_pages; in walk_system_ram_range()
|
D | init_64.c | 270 unsigned long nr_pages, addr; in vmemmap_free() local 289 nr_pages = 1 << page_order; in vmemmap_free() 293 vmem_altmap_free(altmap, nr_pages); in vmemmap_free() 303 while (nr_pages--) in vmemmap_free()
|
/arch/x86/events/intel/ |
D | pt.c | 679 p = virt_to_page(buf->data_pages[buf->nr_pages]); in topa_insert_pages() 701 buf->nr_pages += 1ul << order; in topa_insert_pages() 779 ((buf->nr_pages << PAGE_SHIFT) - 1)); in pt_update_head() 781 base += buf->nr_pages << PAGE_SHIFT; in pt_update_head() 899 pg &= buf->nr_pages - 1; in pt_topa_next_entry() 950 idx &= buf->nr_pages - 1; in pt_buffer_reset_markers() 960 idx &= buf->nr_pages - 1; in pt_buffer_reset_markers() 984 while (pg < buf->nr_pages) { in pt_buffer_setup_topa_index() 1025 head &= (buf->nr_pages << PAGE_SHIFT) - 1; in pt_buffer_reset_offsets() 1027 pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1); in pt_buffer_reset_offsets() [all …]
|
D | bts.c | 61 unsigned int nr_pages; member 88 bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite) in bts_buffer_setup_aux() argument 94 size_t size = nr_pages << PAGE_SHIFT; in bts_buffer_setup_aux() 98 for (pg = 0, nbuf = 0; pg < nr_pages;) { in bts_buffer_setup_aux() 114 buf->nr_pages = nr_pages; in bts_buffer_setup_aux() 327 buf->nr_pages << PAGE_SHIFT); in bts_event_stop() 385 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); in bts_buffer_reset()
|
/arch/sparc/mm/ |
D | gup.c | 195 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, in __get_user_pages_fast() argument 206 len = (unsigned long) nr_pages << PAGE_SHIFT; in __get_user_pages_fast() 225 int get_user_pages_fast(unsigned long start, int nr_pages, int write, in get_user_pages_fast() argument 236 len = (unsigned long) nr_pages << PAGE_SHIFT; in get_user_pages_fast()
|
/arch/mips/mm/ |
D | gup.c | 182 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, in __get_user_pages_fast() argument 194 len = (unsigned long) nr_pages << PAGE_SHIFT; in __get_user_pages_fast() 249 int get_user_pages_fast(unsigned long start, int nr_pages, int write, in get_user_pages_fast() argument 260 len = (unsigned long) nr_pages << PAGE_SHIFT; in get_user_pages_fast()
|
D | ioremap.c | 102 static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, in __ioremap_check_ram() argument 107 for (i = 0; i < nr_pages; i++) { in __ioremap_check_ram()
|
/arch/alpha/mm/ |
D | init.c | 193 unsigned long nr_pages = 0; in callback_init() local 199 nr_pages += crb->map[i].count; in callback_init() 203 console_remap_vm.size = nr_pages << PAGE_SHIFT; in callback_init()
|
/arch/powerpc/kvm/ |
D | book3s_hv_builtin.c | 74 struct page *kvm_alloc_hpt_cma(unsigned long nr_pages) in kvm_alloc_hpt_cma() argument 76 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); in kvm_alloc_hpt_cma() 78 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES), in kvm_alloc_hpt_cma() 83 void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages) in kvm_free_hpt_cma() argument 85 cma_release(kvm_cma, page, nr_pages); in kvm_free_hpt_cma()
|
/arch/s390/kvm/ |
D | gaccess.c | 798 unsigned long *pages, unsigned long nr_pages, in guest_page_range() argument 806 while (nr_pages) { in guest_page_range() 825 nr_pages--; in guest_page_range() 834 unsigned long _len, nr_pages, gpa, idx; in access_guest() local 847 nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; in access_guest() 849 if (nr_pages > ARRAY_SIZE(pages_array)) in access_guest() 850 pages = vmalloc(nr_pages * sizeof(unsigned long)); in access_guest() 856 rc = guest_page_range(vcpu, ga, ar, pages, nr_pages, asce, mode); in access_guest() 857 for (idx = 0; idx < nr_pages && !rc; idx++) { in access_guest() 870 if (nr_pages > ARRAY_SIZE(pages_array)) in access_guest()
|
/arch/powerpc/sysdev/ |
D | axonram.c | 148 __axon_ram_direct_access(struct axon_ram_bank *bank, pgoff_t pgoff, long nr_pages, in __axon_ram_direct_access() argument 159 axon_ram_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, in axon_ram_dax_direct_access() argument 164 return __axon_ram_direct_access(bank, pgoff, nr_pages, kaddr, pfn); in axon_ram_dax_direct_access()
|
/arch/ia64/mm/ |
D | init.c | 653 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory() local 656 ret = __add_pages(nid, start_pfn, nr_pages, want_memblock); in arch_add_memory() 668 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_remove_memory() local 673 ret = __remove_pages(zone, start_pfn, nr_pages); in arch_remove_memory()
|