• Home
  • Raw
  • Download

Lines Matching +full:i +full:- +full:tlb +full:- +full:sets

2  * TLB flush routines for radix kernels.
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
18 #include <asm/ppc-opcode.h>
19 #include <asm/tlb.h>
30 * i.e., r=1 and is=01 or is=10 or is=11
43 : : "r"(rb), "r"(rs), "i"(ric), "i"(prs) in tlbiel_radix_set_isa300()
54 * Flush the first set of the TLB, and the entire Page Walk Cache in tlbiel_all_isa300()
55 * and partition table entries. Then flush the remaining sets of the in tlbiel_all_isa300()
56 * TLB. in tlbiel_all_isa300()
88 WARN(1, "%s called on pre-POWER9 CPU\n", __func__); in radix__tlbiel_all()
105 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); in __tlbiel_pid()
119 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); in __tlbie_pid()
135 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); in __tlbiel_lpid()
149 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); in __tlbie_lpid()
165 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); in __tlbiel_lpid_guest()
182 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); in __tlbiel_va()
198 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); in __tlbie_va()
214 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); in __tlbie_lpid_va()
253 unsigned long va = ((1UL << 52) - 1); in fixup_tlbie_pid()
287 unsigned long va = ((1UL << 52) - 1); in fixup_tlbie_lpid()
310 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL, in _tlbiel_pid()
321 /* For the remaining sets, just flush the TLB */ in _tlbiel_pid()
335 * must be a compile-time contraint to match the "i" constraint in _tlbie_pid()
363 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL, in _tlbiel_lpid()
374 /* For the remaining sets, just flush the TLB */ in _tlbiel_lpid()
388 * must be a compile-time contraint to match the "i" constraint in _tlbie_lpid()
416 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL, in _tlbiel_lpid_guest()
427 /* For the remaining sets, just flush the TLB */ in _tlbiel_lpid_guest()
478 fixup_tlbie_va_range(addr - page_size, pid, ap); in __tlbie_va_range()
515 * Base TLB flushing operations:
517 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
518 * - flush_tlb_page(vma, vmaddr) flushes one page
519 * - flush_tlb_range(vma, start, end) flushes a range of pages
520 * - flush_tlb_kernel_range(start, end) flushes kernel pages
522 * - local_* variants of page and mm only apply to the current
530 pid = mm->context.id; in radix__local_flush_tlb_mm()
543 pid = mm->context.id; in radix__local_flush_all_mm()
557 pid = mm->context.id; in radix__local_flush_tlb_page_psize()
570 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize); in radix__local_flush_tlb_page()
576 if (atomic_read(&mm->context.copros) > 0) in mm_is_singlethreaded()
578 if (atomic_read(&mm->mm_users) <= 1 && current->mm == mm) in mm_is_singlethreaded()
590 if (atomic_read(&mm->context.copros) > 0) in mm_needs_flush_escalation()
599 unsigned long pid = mm->context.id; in do_exit_flush_lazy_tlb()
604 * kthread_use_mm when interrupted here. In that case, current->mm will in do_exit_flush_lazy_tlb()
605 * be set to mm, because kthread_use_mm() setting ->mm and switching to in do_exit_flush_lazy_tlb()
608 if (current->mm == mm) in do_exit_flush_lazy_tlb()
611 if (current->active_mm == mm) { in do_exit_flush_lazy_tlb()
612 WARN_ON_ONCE(current->mm != NULL); in do_exit_flush_lazy_tlb()
613 /* Is a kernel thread and is using mm as the lazy tlb */ in do_exit_flush_lazy_tlb()
615 current->active_mm = &init_mm; in do_exit_flush_lazy_tlb()
620 atomic_dec(&mm->context.active_cpus); in do_exit_flush_lazy_tlb()
644 pid = mm->context.id; in radix__flush_tlb_mm()
676 pid = mm->context.id; in __flush_all_mm()
702 void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr) in radix__flush_tlb_pwc() argument
704 tlb->need_flush_all = 1; in radix__flush_tlb_pwc()
713 pid = mm->context.id; in radix__flush_tlb_page_psize()
738 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize); in radix__flush_tlb_page()
752 #define TLB_FLUSH_ALL -1UL
759 * It also does not iterate over sets like the local tlbiel variant when
761 * individual page flushes to full-pid flushes.
774 unsigned long nr_pages = (end - start) >> page_shift; in __radix__flush_tlb_range()
777 pid = mm->context.id; in __radix__flush_tlb_range()
819 hstart = (start + PMD_SIZE - 1) & PMD_MASK; in __radix__flush_tlb_range()
826 gstart = (start + PUD_SIZE - 1) & PUD_MASK; in __radix__flush_tlb_range()
866 __radix__flush_tlb_range(vma->vm_mm, start, end, false); in radix__flush_tlb_range()
881 return -1; in radix_get_mmu_psize()
931 void radix__tlb_flush(struct mmu_gather *tlb) in radix__tlb_flush() argument
934 struct mm_struct *mm = tlb->mm; in radix__tlb_flush()
935 int page_size = tlb->page_size; in radix__tlb_flush()
936 unsigned long start = tlb->start; in radix__tlb_flush()
937 unsigned long end = tlb->end; in radix__tlb_flush()
946 if (tlb->fullmm) { in radix__tlb_flush()
953 * cleared ptes and miss flushing the TLB. If this invalidate in radix__tlb_flush()
976 if (!tlb->need_flush_all) in radix__tlb_flush()
981 } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) { in radix__tlb_flush()
982 if (!tlb->need_flush_all) in radix__tlb_flush()
987 if (!tlb->need_flush_all) in radix__tlb_flush()
992 tlb->need_flush_all = 0; in radix__tlb_flush()
1002 unsigned long nr_pages = (end - start) >> page_shift; in __radix__flush_tlb_range_psize()
1005 pid = mm->context.id; in __radix__flush_tlb_range_psize()
1063 pid = mm->context.id; in radix__flush_tlb_collapsed_pmd()
1096 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M); in radix__flush_pmd_tlb_range()
1108 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */ in radix__flush_tlb_all()
1115 : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory"); in radix__flush_tlb_all()
1120 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory"); in radix__flush_tlb_all()
1127 unsigned long pid = mm->context.id; in radix_kvm_prefetch_workaround()
1135 * CPU just brought in obsolete translation into the TLB of in radix_kvm_prefetch_workaround()
1157 if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu) in radix_kvm_prefetch_workaround()