Home
last modified time | relevance | path

Searched refs:nr_pages (Results 1 – 25 of 53) sorted by relevance

123

/arch/arm64/kvm/hyp/nvhe/
Dsetup.c41 unsigned long nr_pages; in divide_memory_pool() local
45 nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page)); in divide_memory_pool()
46 vmemmap_base = hyp_early_alloc_contig(nr_pages); in divide_memory_pool()
50 nr_pages = hyp_shadow_table_pages(sizeof(struct kvm_shadow_vm)); in divide_memory_pool()
51 shadow_table_base = hyp_early_alloc_contig(nr_pages); in divide_memory_pool()
55 nr_pages = hyp_s1_pgtable_pages(); in divide_memory_pool()
56 hyp_pgt_base = hyp_early_alloc_contig(nr_pages); in divide_memory_pool()
60 nr_pages = host_s2_pgtable_pages(); in divide_memory_pool()
61 host_s2_pgt_base = hyp_early_alloc_contig(nr_pages); in divide_memory_pool()
65 nr_pages = hyp_ffa_proxy_pages(); in divide_memory_pool()
[all …]
Dearly_alloc.c24 void *hyp_early_alloc_contig(unsigned int nr_pages) in hyp_early_alloc_contig() argument
26 unsigned long size = (nr_pages << PAGE_SHIFT); in hyp_early_alloc_contig()
29 if (!nr_pages) in hyp_early_alloc_contig()
Dmem_protect.c116 unsigned long nr_pages, pfn; in prepare_s2_pool() local
120 nr_pages = host_s2_pgtable_pages(); in prepare_s2_pool()
121 ret = hyp_pool_init(&host_s2_pool, pfn, nr_pages, 0); in prepare_s2_pool()
249 unsigned long nr_pages; in kvm_guest_prepare_stage2() local
252 nr_pages = kvm_pgtable_stage2_pgd_size(vm->arch.vtcr) >> PAGE_SHIFT; in kvm_guest_prepare_stage2()
254 ret = hyp_pool_init(&vm->pool, hyp_virt_to_pfn(pgd), nr_pages, 0); in kvm_guest_prepare_stage2()
709 u64 nr_pages; member
844 u64 size = tx->nr_pages * PAGE_SIZE; in host_request_owned_transition()
854 u64 size = tx->nr_pages * PAGE_SIZE; in host_request_unshare()
864 u64 size = tx->nr_pages * PAGE_SIZE; in host_initiate_share()
[all …]
Dpage_alloc.c222 int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages, in hyp_pool_init() argument
230 pool->max_order = min(MAX_ORDER, get_order((nr_pages + 1) << PAGE_SHIFT)); in hyp_pool_init()
234 pool->range_end = phys + (nr_pages << PAGE_SHIFT); in hyp_pool_init()
238 for (i = 0; i < nr_pages; i++) in hyp_pool_init()
242 for (i = reserved_pages; i < nr_pages; i++) in hyp_pool_init()
/arch/powerpc/platforms/powernv/
Dmemtrace.c92 unsigned long nr_pages) in memtrace_clear_range() argument
97 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { in memtrace_clear_range()
107 (unsigned long)pfn_to_kaddr(start_pfn + nr_pages), in memtrace_clear_range()
113 const unsigned long nr_pages = PHYS_PFN(size); in memtrace_alloc_node() local
121 page = alloc_contig_pages(nr_pages, GFP_KERNEL | __GFP_THISNODE | in memtrace_alloc_node()
132 memtrace_clear_range(start_pfn, nr_pages); in memtrace_alloc_node()
138 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) in memtrace_alloc_node()
216 const unsigned long nr_pages = PHYS_PFN(size); in memtrace_free() local
225 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) in memtrace_free()
228 free_contig_range(start_pfn, nr_pages); in memtrace_free()
/arch/arm/mach-rpc/include/mach/
Duncompress.h20 unsigned long nr_pages; member
116 unsigned int nr_pages = 0, page_size = PAGE_SIZE; in arch_decomp_setup() local
130 nr_pages += (t->u.mem.size / PAGE_SIZE); in arch_decomp_setup()
134 nr_pages = params->nr_pages; in arch_decomp_setup()
179 if (nr_pages * page_size < 4096*1024) error("<4M of mem\n"); in arch_decomp_setup()
/arch/arm/xen/
Dp2m.c25 unsigned long nr_pages; member
75 entry->pfn + entry->nr_pages > pfn) { in __pfn_to_mfn()
151 unsigned long mfn, unsigned long nr_pages) in __set_phys_to_machine_multi() argument
164 p2m_entry->pfn + p2m_entry->nr_pages > pfn) { in __set_phys_to_machine_multi()
184 p2m_entry->nr_pages = nr_pages; in __set_phys_to_machine_multi()
/arch/arm64/include/asm/
Dkvm_pkvm.h246 unsigned long nr_pages = reg->size >> PAGE_SHIFT; in hyp_vmemmap_memblock_size() local
250 end = start + nr_pages * vmemmap_entry_size; in hyp_vmemmap_memblock_size()
274 static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages) in __hyp_pgtable_max_pages() argument
280 nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE); in __hyp_pgtable_max_pages()
281 total += nr_pages; in __hyp_pgtable_max_pages()
/arch/arm64/kvm/hyp/include/nvhe/
Dmem_protect.h68 int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
69 int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
74 int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages);
75 int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
Dearly_alloc.h10 void *hyp_early_alloc_contig(unsigned int nr_pages);
Dgfp.h32 int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
/arch/powerpc/mm/
Dmem.c122 int __ref add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, in add_pages() argument
127 ret = __add_pages(nid, start_pfn, nr_pages, params); in add_pages()
133 nr_pages << PAGE_SHIFT); in add_pages()
142 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory() local
148 rc = add_pages(nid, start_pfn, nr_pages, params); in arch_add_memory()
157 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_remove_memory() local
159 __remove_pages(start_pfn, nr_pages, altmap); in arch_remove_memory()
Dinit_64.c322 unsigned long nr_pages, addr; in vmemmap_free() local
339 nr_pages = 1 << page_order; in vmemmap_free()
343 vmem_altmap_free(altmap, nr_pages); in vmemmap_free()
353 while (nr_pages--) in vmemmap_free()
/arch/x86/xen/
Dsetup.c255 unsigned long end_pfn, unsigned long nr_pages) in xen_set_identity_and_release_chunk() argument
263 end = min(end_pfn, nr_pages); in xen_set_identity_and_release_chunk()
388 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, in xen_set_identity_and_remap_chunk() argument
396 remap_pfn = nr_pages; in xen_set_identity_and_remap_chunk()
405 if (cur_pfn >= nr_pages) { in xen_set_identity_and_remap_chunk()
410 if (cur_pfn + size > nr_pages) in xen_set_identity_and_remap_chunk()
411 size = nr_pages - cur_pfn; in xen_set_identity_and_remap_chunk()
417 cur_pfn + left, nr_pages); in xen_set_identity_and_remap_chunk()
444 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, in xen_count_remap_pages() argument
447 if (start_pfn >= nr_pages) in xen_count_remap_pages()
[all …]
/arch/arm/kernel/
Datags_compat.c43 unsigned long nr_pages; /* 4 */ member
104 if (params->u1.s.nr_pages != 0x02000 && in build_tag_list()
105 params->u1.s.nr_pages != 0x04000 && in build_tag_list()
106 params->u1.s.nr_pages != 0x08000 && in build_tag_list()
107 params->u1.s.nr_pages != 0x10000) { in build_tag_list()
110 params->u1.s.nr_pages = 0x1000; /* 16MB */ in build_tag_list()
158 tag = memtag(tag, PHYS_OFFSET, params->u1.s.nr_pages * PAGE_SIZE); in build_tag_list()
/arch/s390/pci/
Dpci_dma.c136 unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; in __dma_update_trans() local
142 if (!nr_pages) in __dma_update_trans()
151 for (i = 0; i < nr_pages; i++) { in __dma_update_trans()
341 unsigned long nr_pages; in s390_dma_map_pages() local
346 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); in s390_dma_map_pages()
347 dma_addr = dma_alloc_address(dev, nr_pages); in s390_dma_map_pages()
354 size = nr_pages * PAGE_SIZE; in s390_dma_map_pages()
363 atomic64_add(nr_pages, &zdev->mapped_pages); in s390_dma_map_pages()
367 dma_free_address(dev, dma_addr, nr_pages); in s390_dma_map_pages()
439 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; in __s390_dma_map_sg() local
[all …]
/arch/x86/events/intel/
Dpt.c615 mask = (buf->nr_pages * PAGE_SIZE - 1) >> 7; in pt_config_buffer()
736 p = virt_to_page(buf->data_pages[buf->nr_pages]); in topa_insert_pages()
764 buf->nr_pages += 1ul << order; in topa_insert_pages()
850 ((buf->nr_pages << PAGE_SHIFT) - 1)); in pt_update_head()
852 base += buf->nr_pages << PAGE_SHIFT; in pt_update_head()
968 if (WARN_ON_ONCE(pg >= buf->nr_pages)) in pt_topa_entry_for_page()
1102 idx &= buf->nr_pages - 1; in pt_buffer_reset_markers()
1117 idx &= buf->nr_pages - 1; in pt_buffer_reset_markers()
1153 head &= (buf->nr_pages << PAGE_SHIFT) - 1; in pt_buffer_reset_offsets()
1156 pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1); in pt_buffer_reset_offsets()
[all …]
Dbts.c53 unsigned int nr_pages; member
81 int nr_pages, bool overwrite) in bts_buffer_setup_aux() argument
88 size_t size = nr_pages << PAGE_SHIFT; in bts_buffer_setup_aux()
92 for (pg = 0, nbuf = 0; pg < nr_pages;) { in bts_buffer_setup_aux()
108 buf->nr_pages = nr_pages; in bts_buffer_setup_aux()
321 buf->nr_pages << PAGE_SHIFT); in bts_event_stop()
379 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); in bts_buffer_reset()
/arch/mips/mm/
Dioremap.c25 static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, in __ioremap_check_ram() argument
30 for (i = 0; i < nr_pages; i++) { in __ioremap_check_ram()
/arch/x86/kernel/
Dldt.c293 int i, nr_pages; in map_ldt_struct() local
309 nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); in map_ldt_struct()
311 for (i = 0; i < nr_pages; i++) { in map_ldt_struct()
352 int i, nr_pages; in unmap_ldt_struct() local
361 nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); in unmap_ldt_struct()
363 for (i = 0; i < nr_pages; i++) { in unmap_ldt_struct()
375 flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false); in unmap_ldt_struct()
/arch/alpha/mm/
Dinit.c196 unsigned long nr_pages = 0; in callback_init() local
202 nr_pages += crb->map[i].count; in callback_init()
206 console_remap_vm.size = nr_pages << PAGE_SHIFT; in callback_init()
/arch/s390/mm/
Dinit.c268 mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT); in s390_cma_mem_notifier()
313 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_remove_memory() local
315 __remove_pages(start_pfn, nr_pages, altmap); in arch_remove_memory()
/arch/sh/mm/
Dinit.c403 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory() local
410 ret = __add_pages(nid, start_pfn, nr_pages, params); in arch_add_memory()
420 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_remove_memory() local
422 __remove_pages(start_pfn, nr_pages, altmap); in arch_remove_memory()
/arch/ia64/mm/
Dinit.c473 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory() local
479 ret = __add_pages(nid, start_pfn, nr_pages, params); in arch_add_memory()
490 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_remove_memory() local
492 __remove_pages(start_pfn, nr_pages, altmap); in arch_remove_memory()
/arch/powerpc/kvm/
Dbook3s_hv_builtin.c58 struct page *kvm_alloc_hpt_cma(unsigned long nr_pages) in kvm_alloc_hpt_cma() argument
60 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); in kvm_alloc_hpt_cma()
62 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES), in kvm_alloc_hpt_cma()
67 void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages) in kvm_free_hpt_cma() argument
69 cma_release(kvm_cma, page, nr_pages); in kvm_free_hpt_cma()

123