• Home
  • Raw
  • Download

Lines Matching refs:cpu

83 static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)  in spgd_addr()  argument
88 return &cpu->lg->pgdirs[i].pgdir[index]; in spgd_addr()
97 static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) in spmd_addr() argument
115 static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) in spte_addr() argument
118 pmd_t *pmd = spmd_addr(cpu, spgd, vaddr); in spte_addr()
136 static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) in gpgd_addr() argument
139 return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t); in gpgd_addr()
152 static unsigned long gpte_addr(struct lg_cpu *cpu, in gpte_addr() argument
162 static unsigned long gpte_addr(struct lg_cpu *cpu, in gpte_addr() argument
206 static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) in gpte_to_spte() argument
219 base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE; in gpte_to_spte()
229 kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte)); in gpte_to_spte()
253 static bool check_gpte(struct lg_cpu *cpu, pte_t gpte) in check_gpte() argument
256 pte_pfn(gpte) >= cpu->lg->pfn_limit) { in check_gpte()
257 kill_guest(cpu, "bad page table entry"); in check_gpte()
263 static bool check_gpgd(struct lg_cpu *cpu, pgd_t gpgd) in check_gpgd() argument
266 (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) { in check_gpgd()
267 kill_guest(cpu, "bad page directory entry"); in check_gpgd()
274 static bool check_gpmd(struct lg_cpu *cpu, pmd_t gpmd) in check_gpmd() argument
277 (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) { in check_gpmd()
278 kill_guest(cpu, "bad page middle directory entry"); in check_gpmd()
293 static pte_t *find_spte(struct lg_cpu *cpu, unsigned long vaddr, bool allocate, in find_spte() argument
303 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); in find_spte()
318 kill_guest(cpu, "out of memory allocating pte page"); in find_spte()
334 spmd = spmd_addr(cpu, *spgd, vaddr); in find_spte()
351 kill_guest(cpu, "out of memory allocating pmd page"); in find_spte()
364 return spte_addr(cpu, *spgd, vaddr); in find_spte()
378 bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) in demand_page() argument
391 if (unlikely(cpu->linear_pages)) { in demand_page()
395 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); in demand_page()
404 if (!check_gpgd(cpu, gpgd)) in demand_page()
412 if (likely(!cpu->linear_pages)) { in demand_page()
413 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); in demand_page()
422 if (!check_gpmd(cpu, gpmd)) in demand_page()
430 gpte_ptr = gpte_addr(cpu, gpmd, vaddr); in demand_page()
436 gpte_ptr = gpte_addr(cpu, gpgd, vaddr); in demand_page()
439 if (unlikely(cpu->linear_pages)) { in demand_page()
444 gpte = lgread(cpu, gpte_ptr, pte_t); in demand_page()
466 if (!check_gpte(cpu, gpte)) in demand_page()
475 spte = find_spte(cpu, vaddr, true, pgd_flags(gpgd), pmd_flags(gpmd)); in demand_page()
490 *spte = gpte_to_spte(cpu, gpte, 1); in demand_page()
498 set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0)); in demand_page()
504 if (likely(!cpu->linear_pages)) in demand_page()
505 lgwrite(cpu, gpte_ptr, pte_t, gpte); in demand_page()
527 static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr) in page_writable() argument
537 spte = find_spte(cpu, vaddr, false, 0, 0); in page_writable()
554 void pin_page(struct lg_cpu *cpu, unsigned long vaddr) in pin_page() argument
556 if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2)) in pin_page()
557 kill_guest(cpu, "bad stack page %#lx", vaddr); in pin_page()
642 void guest_pagetable_flush_user(struct lg_cpu *cpu) in guest_pagetable_flush_user() argument
645 flush_user_mappings(cpu->lg, cpu->cpu_pgd); in guest_pagetable_flush_user()
650 unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) in guest_pa() argument
659 if (unlikely(cpu->linear_pages)) in guest_pa()
663 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); in guest_pa()
666 kill_guest(cpu, "Bad address %#lx", vaddr); in guest_pa()
671 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); in guest_pa()
673 kill_guest(cpu, "Bad address %#lx", vaddr); in guest_pa()
674 gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t); in guest_pa()
676 gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t); in guest_pa()
679 kill_guest(cpu, "Bad address %#lx", vaddr); in guest_pa()
703 static unsigned int new_pgdir(struct lg_cpu *cpu, in new_pgdir() argument
713 next = prandom_u32() % ARRAY_SIZE(cpu->lg->pgdirs); in new_pgdir()
715 if (!cpu->lg->pgdirs[next].pgdir) { in new_pgdir()
716 cpu->lg->pgdirs[next].pgdir = in new_pgdir()
719 if (!cpu->lg->pgdirs[next].pgdir) in new_pgdir()
720 next = cpu->cpu_pgd; in new_pgdir()
730 cpu->lg->pgdirs[next].gpgdir = gpgdir; in new_pgdir()
732 flush_user_mappings(cpu->lg, next); in new_pgdir()
735 cpu->lg->pgdirs[next].last_host_cpu = -1; in new_pgdir()
750 static bool allocate_switcher_mapping(struct lg_cpu *cpu) in allocate_switcher_mapping() argument
755 pte_t *pte = find_spte(cpu, switcher_addr + i * PAGE_SIZE, true, in allocate_switcher_mapping()
774 cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped = true; in allocate_switcher_mapping()
806 void guest_pagetable_clear_all(struct lg_cpu *cpu) in guest_pagetable_clear_all() argument
808 release_all_pagetables(cpu->lg); in guest_pagetable_clear_all()
810 pin_stack_pages(cpu); in guest_pagetable_clear_all()
812 if (!allocate_switcher_mapping(cpu)) in guest_pagetable_clear_all()
813 kill_guest(cpu, "Cannot populate switcher mapping"); in guest_pagetable_clear_all()
823 void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) in guest_new_pagetable() argument
831 if (unlikely(cpu->linear_pages)) { in guest_new_pagetable()
832 release_all_pagetables(cpu->lg); in guest_new_pagetable()
833 cpu->linear_pages = false; in guest_new_pagetable()
835 newpgdir = ARRAY_SIZE(cpu->lg->pgdirs); in guest_new_pagetable()
838 newpgdir = find_pgdir(cpu->lg, pgtable); in guest_new_pagetable()
845 if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs)) in guest_new_pagetable()
846 newpgdir = new_pgdir(cpu, pgtable, &repin); in guest_new_pagetable()
848 cpu->cpu_pgd = newpgdir; in guest_new_pagetable()
854 pin_stack_pages(cpu); in guest_new_pagetable()
856 if (!cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped) { in guest_new_pagetable()
857 if (!allocate_switcher_mapping(cpu)) in guest_new_pagetable()
858 kill_guest(cpu, "Cannot populate switcher mapping"); in guest_new_pagetable()
888 static void do_set_pte(struct lg_cpu *cpu, int idx, in do_set_pte() argument
892 pgd_t *spgd = spgd_addr(cpu, idx, vaddr); in do_set_pte()
900 spmd = spmd_addr(cpu, *spgd, vaddr); in do_set_pte()
904 pte_t *spte = spte_addr(cpu, *spgd, vaddr); in do_set_pte()
914 if (!check_gpte(cpu, gpte)) in do_set_pte()
917 gpte_to_spte(cpu, gpte, in do_set_pte()
944 void guest_set_pte(struct lg_cpu *cpu, in guest_set_pte() argument
949 kill_guest(cpu, "attempt to set pte into Switcher pages"); in guest_set_pte()
957 if (vaddr >= cpu->lg->kernel_address) { in guest_set_pte()
959 for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++) in guest_set_pte()
960 if (cpu->lg->pgdirs[i].pgdir) in guest_set_pte()
961 do_set_pte(cpu, i, vaddr, gpte); in guest_set_pte()
964 int pgdir = find_pgdir(cpu->lg, gpgdir); in guest_set_pte()
965 if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs)) in guest_set_pte()
967 do_set_pte(cpu, pgdir, vaddr, gpte); in guest_set_pte()
1030 struct lg_cpu *cpu = &lg->cpus[0]; in init_guest_pagetable() local
1034 cpu->cpu_pgd = new_pgdir(cpu, 0, &allocated); in init_guest_pagetable()
1039 cpu->linear_pages = true; in init_guest_pagetable()
1042 if (!allocate_switcher_mapping(cpu)) { in init_guest_pagetable()
1051 void page_table_guest_data_init(struct lg_cpu *cpu) in page_table_guest_data_init() argument
1061 if (get_user(cpu->lg->kernel_address, in page_table_guest_data_init()
1062 &cpu->lg->lguest_data->kernel_address) in page_table_guest_data_init()
1067 || put_user(top, &cpu->lg->lguest_data->reserve_mem)) { in page_table_guest_data_init()
1068 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); in page_table_guest_data_init()
1077 if (cpu->lg->kernel_address >= switcher_addr) in page_table_guest_data_init()
1078 kill_guest(cpu, "bad kernel address %#lx", in page_table_guest_data_init()
1079 cpu->lg->kernel_address); in page_table_guest_data_init()
1097 static void remove_switcher_percpu_map(struct lg_cpu *cpu, unsigned int i) in remove_switcher_percpu_map() argument
1103 pte = find_spte(cpu, base, false, 0, 0); in remove_switcher_percpu_map()
1107 pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0); in remove_switcher_percpu_map()
1122 void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) in map_switcher_in_guest() argument
1127 struct pgdir *pgdir = &cpu->lg->pgdirs[cpu->cpu_pgd]; in map_switcher_in_guest()
1146 remove_switcher_percpu_map(cpu, i); in map_switcher_in_guest()
1149 remove_switcher_percpu_map(cpu, pgdir->last_host_cpu); in map_switcher_in_guest()
1163 pte = find_spte(cpu, base, false, 0, 0); in map_switcher_in_guest()
1164 regs_page = pfn_to_page(__pa(cpu->regs_page) >> PAGE_SHIFT); in map_switcher_in_guest()
1173 pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0); in map_switcher_in_guest()