Home
last modified time | relevance | path

Searched refs:new_pgd (Results 1 – 7 of 7) sorted by relevance

/kernel/linux/linux-5.10/arch/nds32/mm/
Dmm-nds32.c17 pgd_t *new_pgd, *init_pgd; in pgd_alloc() local
20 new_pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, 0); in pgd_alloc()
21 if (!new_pgd) in pgd_alloc()
24 (*new_pgd) = 1; in pgd_alloc()
25 new_pgd++; in pgd_alloc()
27 new_pgd -= PTRS_PER_PGD; in pgd_alloc()
31 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, in pgd_alloc()
34 cpu_dcache_wb_range((unsigned long)new_pgd, in pgd_alloc()
35 (unsigned long)new_pgd + in pgd_alloc()
37 inc_zone_page_state(virt_to_page((unsigned long *)new_pgd), in pgd_alloc()
[all …]
/kernel/linux/linux-5.10/arch/arm/mm/
Dpgd.c32 pgd_t *new_pgd, *init_pgd; in pgd_alloc() local
38 new_pgd = __pgd_alloc(); in pgd_alloc()
39 if (!new_pgd) in pgd_alloc()
42 memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); in pgd_alloc()
48 memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, in pgd_alloc()
51 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); in pgd_alloc()
57 new_p4d = p4d_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), in pgd_alloc()
91 new_p4d = p4d_alloc(mm, new_pgd, 0); in pgd_alloc()
127 return new_pgd; in pgd_alloc()
137 __pgd_free(new_pgd); in pgd_alloc()
/kernel/linux/linux-5.10/arch/m68k/include/asm/
Dsun3_pgalloc.h45 pgd_t *new_pgd; in pgd_alloc() local
47 new_pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL); in pgd_alloc()
48 memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE); in pgd_alloc()
49 memset(new_pgd, 0, (PAGE_OFFSET >> PGDIR_SHIFT)); in pgd_alloc()
50 return new_pgd; in pgd_alloc()
Dmcf_pgalloc.h85 pgd_t *new_pgd; in pgd_alloc() local
87 new_pgd = (pgd_t *)__get_free_page(GFP_DMA | __GFP_NOWARN); in pgd_alloc()
88 if (!new_pgd) in pgd_alloc()
90 memcpy(new_pgd, swapper_pg_dir, PTRS_PER_PGD * sizeof(pgd_t)); in pgd_alloc()
91 memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT); in pgd_alloc()
92 return new_pgd; in pgd_alloc()
/kernel/linux/linux-5.10/arch/x86/power/
Dhibernate_64.c74 pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot)); in set_up_temporary_text_mapping() local
77 set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); in set_up_temporary_text_mapping()
80 pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot)); in set_up_temporary_text_mapping() local
81 set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); in set_up_temporary_text_mapping()
/kernel/linux/linux-5.10/arch/x86/kvm/mmu/
Dmmu.c3834 static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_pgd, in cached_root_available() argument
3844 if (is_root_usable(&root, new_pgd, new_role)) in cached_root_available()
3850 if (is_root_usable(&root, new_pgd, new_role)) in cached_root_available()
3860 static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd, in fast_pgd_switch() argument
3872 return cached_root_available(vcpu, new_pgd, new_role); in fast_pgd_switch()
3877 static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, in __kvm_mmu_new_pgd() argument
3881 if (!fast_pgd_switch(vcpu, new_pgd, new_role)) { in __kvm_mmu_new_pgd()
3916 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush, in kvm_mmu_new_pgd() argument
3919 __kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu), in kvm_mmu_new_pgd()
/kernel/linux/linux-5.10/arch/x86/include/asm/
Dkvm_host.h1568 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,