• Home
  • Raw
  • Download

Lines Matching refs:hvc

63 static int do_ops(struct host_vm_change *hvc, int end,  in do_ops()  argument
70 op = &hvc->ops[i]; in do_ops()
73 if (hvc->userspace) in do_ops()
74 ret = map(&hvc->mm->context.id, op->u.mmap.addr, in do_ops()
78 &hvc->data); in do_ops()
84 if (hvc->userspace) in do_ops()
85 ret = unmap(&hvc->mm->context.id, in do_ops()
88 &hvc->data); in do_ops()
96 if (hvc->userspace) in do_ops()
97 ret = protect(&hvc->mm->context.id, in do_ops()
101 finished, &hvc->data); in do_ops()
123 unsigned int prot, struct host_vm_change *hvc) in add_mmap() argument
132 if (hvc->userspace) in add_mmap()
136 if (hvc->index != 0) { in add_mmap()
137 last = &hvc->ops[hvc->index - 1]; in add_mmap()
147 if (hvc->index == ARRAY_SIZE(hvc->ops)) { in add_mmap()
148 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0); in add_mmap()
149 hvc->index = 0; in add_mmap()
152 hvc->ops[hvc->index++] = ((struct host_vm_op) in add_mmap()
164 struct host_vm_change *hvc) in add_munmap() argument
172 if (hvc->index != 0) { in add_munmap()
173 last = &hvc->ops[hvc->index - 1]; in add_munmap()
181 if (hvc->index == ARRAY_SIZE(hvc->ops)) { in add_munmap()
182 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0); in add_munmap()
183 hvc->index = 0; in add_munmap()
186 hvc->ops[hvc->index++] = ((struct host_vm_op) in add_munmap()
194 unsigned int prot, struct host_vm_change *hvc) in add_mprotect() argument
202 if (hvc->index != 0) { in add_mprotect()
203 last = &hvc->ops[hvc->index - 1]; in add_mprotect()
212 if (hvc->index == ARRAY_SIZE(hvc->ops)) { in add_mprotect()
213 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0); in add_mprotect()
214 hvc->index = 0; in add_mprotect()
217 hvc->ops[hvc->index++] = ((struct host_vm_op) in add_mprotect()
229 struct host_vm_change *hvc) in update_pte_range() argument
250 if (hvc->force || pte_newpage(*pte)) { in update_pte_range()
254 PAGE_SIZE, prot, hvc); in update_pte_range()
256 ret = add_munmap(addr, PAGE_SIZE, hvc); in update_pte_range()
258 ret = add_mprotect(addr, PAGE_SIZE, prot, hvc); in update_pte_range()
266 struct host_vm_change *hvc) in update_pmd_range() argument
276 if (hvc->force || pmd_newpage(*pmd)) { in update_pmd_range()
277 ret = add_munmap(addr, next - addr, hvc); in update_pmd_range()
281 else ret = update_pte_range(pmd, addr, next, hvc); in update_pmd_range()
288 struct host_vm_change *hvc) in update_pud_range() argument
298 if (hvc->force || pud_newpage(*pud)) { in update_pud_range()
299 ret = add_munmap(addr, next - addr, hvc); in update_pud_range()
303 else ret = update_pmd_range(pud, addr, next, hvc); in update_pud_range()
312 struct host_vm_change hvc; in fix_range_common() local
316 hvc = INIT_HVC(mm, force, userspace); in fix_range_common()
322 ret = add_munmap(addr, next - addr, &hvc); in fix_range_common()
326 else ret = update_pud_range(pgd, addr, next, &hvc); in fix_range_common()
330 ret = do_ops(&hvc, hvc.index, 1); in fix_range_common()
352 struct host_vm_change hvc; in flush_tlb_kernel_range_common() local
355 hvc = INIT_HVC(mm, force, userspace); in flush_tlb_kernel_range_common()
364 err = add_munmap(addr, last - addr, &hvc); in flush_tlb_kernel_range_common()
380 err = add_munmap(addr, last - addr, &hvc); in flush_tlb_kernel_range_common()
396 err = add_munmap(addr, last - addr, &hvc); in flush_tlb_kernel_range_common()
408 err = add_munmap(addr, PAGE_SIZE, &hvc); in flush_tlb_kernel_range_common()
414 PAGE_SIZE, 0, &hvc); in flush_tlb_kernel_range_common()
418 err = add_mprotect(addr, PAGE_SIZE, 0, &hvc); in flush_tlb_kernel_range_common()
423 err = do_ops(&hvc, hvc.index, 1); in flush_tlb_kernel_range_common()