Lines Matching full:pid
33 unsigned int pid, in tlbiel_radix_set_isa300() argument
40 rs = ((unsigned long)pid << PPC_BITLSHIFT(31)); in tlbiel_radix_set_isa300()
93 static inline void __tlbiel_pid(unsigned long pid, int set, in __tlbiel_pid() argument
100 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31); in __tlbiel_pid()
109 static inline void __tlbie_pid(unsigned long pid, unsigned long ric) in __tlbie_pid() argument
114 rs = pid << PPC_BITLSHIFT(31); in __tlbie_pid()
170 static inline void __tlbiel_va(unsigned long va, unsigned long pid, in __tlbiel_va() argument
177 rs = pid << PPC_BITLSHIFT(31); in __tlbiel_va()
186 static inline void __tlbie_va(unsigned long va, unsigned long pid, in __tlbie_va() argument
193 rs = pid << PPC_BITLSHIFT(31); in __tlbie_va()
219 static inline void fixup_tlbie_va(unsigned long va, unsigned long pid, in fixup_tlbie_va() argument
229 __tlbie_va(va, pid, ap, RIC_FLUSH_TLB); in fixup_tlbie_va()
233 static inline void fixup_tlbie_va_range(unsigned long va, unsigned long pid, in fixup_tlbie_va_range() argument
243 __tlbie_va(va, pid, ap, RIC_FLUSH_TLB); in fixup_tlbie_va_range()
247 static inline void fixup_tlbie_pid(unsigned long pid) in fixup_tlbie_pid() argument
262 __tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB); in fixup_tlbie_pid()
303 static inline void _tlbiel_pid(unsigned long pid, unsigned long ric) in _tlbiel_pid() argument
313 __tlbiel_pid(pid, 0, ric); in _tlbiel_pid()
323 __tlbiel_pid(pid, set, RIC_FLUSH_TLB); in _tlbiel_pid()
329 static inline void _tlbie_pid(unsigned long pid, unsigned long ric) in _tlbie_pid() argument
340 __tlbie_pid(pid, RIC_FLUSH_TLB); in _tlbie_pid()
341 fixup_tlbie_pid(pid); in _tlbie_pid()
344 __tlbie_pid(pid, RIC_FLUSH_PWC); in _tlbie_pid()
348 __tlbie_pid(pid, RIC_FLUSH_ALL); in _tlbie_pid()
349 fixup_tlbie_pid(pid); in _tlbie_pid()
437 unsigned long pid, unsigned long page_size, in __tlbiel_va_range() argument
444 __tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB); in __tlbiel_va_range()
447 static inline void _tlbiel_va(unsigned long va, unsigned long pid, in _tlbiel_va() argument
453 __tlbiel_va(va, pid, ap, ric); in _tlbiel_va()
458 unsigned long pid, unsigned long page_size, in _tlbiel_va_range() argument
463 __tlbiel_pid(pid, 0, RIC_FLUSH_PWC); in _tlbiel_va_range()
464 __tlbiel_va_range(start, end, pid, page_size, psize); in _tlbiel_va_range()
469 unsigned long pid, unsigned long page_size, in __tlbie_va_range() argument
476 __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB); in __tlbie_va_range()
478 fixup_tlbie_va_range(addr - page_size, pid, ap); in __tlbie_va_range()
481 static inline void _tlbie_va(unsigned long va, unsigned long pid, in _tlbie_va() argument
487 __tlbie_va(va, pid, ap, ric); in _tlbie_va()
488 fixup_tlbie_va(va, pid, ap); in _tlbie_va()
504 unsigned long pid, unsigned long page_size, in _tlbie_va_range() argument
509 __tlbie_pid(pid, RIC_FLUSH_PWC); in _tlbie_va_range()
510 __tlbie_va_range(start, end, pid, page_size, psize); in _tlbie_va_range()
527 unsigned long pid; in radix__local_flush_tlb_mm() local
530 pid = mm->context.id; in radix__local_flush_tlb_mm()
531 if (pid != MMU_NO_CONTEXT) in radix__local_flush_tlb_mm()
532 _tlbiel_pid(pid, RIC_FLUSH_TLB); in radix__local_flush_tlb_mm()
540 unsigned long pid; in radix__local_flush_all_mm() local
543 pid = mm->context.id; in radix__local_flush_all_mm()
544 if (pid != MMU_NO_CONTEXT) in radix__local_flush_all_mm()
545 _tlbiel_pid(pid, RIC_FLUSH_ALL); in radix__local_flush_all_mm()
554 unsigned long pid; in radix__local_flush_tlb_page_psize() local
557 pid = mm->context.id; in radix__local_flush_tlb_page_psize()
558 if (pid != MMU_NO_CONTEXT) in radix__local_flush_tlb_page_psize()
559 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB); in radix__local_flush_tlb_page_psize()
588 * RIC = 0 for a PID/LPID invalidate in mm_needs_flush_escalation()
599 unsigned long pid = mm->context.id; in do_exit_flush_lazy_tlb() local
624 _tlbiel_pid(pid, RIC_FLUSH_ALL); in do_exit_flush_lazy_tlb()
642 unsigned long pid; in radix__flush_tlb_mm() local
644 pid = mm->context.id; in radix__flush_tlb_mm()
645 if (unlikely(pid == MMU_NO_CONTEXT)) in radix__flush_tlb_mm()
661 _tlbie_pid(pid, RIC_FLUSH_ALL); in radix__flush_tlb_mm()
663 _tlbie_pid(pid, RIC_FLUSH_TLB); in radix__flush_tlb_mm()
666 _tlbiel_pid(pid, RIC_FLUSH_TLB); in radix__flush_tlb_mm()
674 unsigned long pid; in __flush_all_mm() local
676 pid = mm->context.id; in __flush_all_mm()
677 if (unlikely(pid == MMU_NO_CONTEXT)) in __flush_all_mm()
689 _tlbie_pid(pid, RIC_FLUSH_ALL); in __flush_all_mm()
692 _tlbiel_pid(pid, RIC_FLUSH_ALL); in __flush_all_mm()
711 unsigned long pid; in radix__flush_tlb_page_psize() local
713 pid = mm->context.id; in radix__flush_tlb_page_psize()
714 if (unlikely(pid == MMU_NO_CONTEXT)) in radix__flush_tlb_page_psize()
724 _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB); in radix__flush_tlb_page_psize()
727 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB); in radix__flush_tlb_page_psize()
755 * Number of pages above which we invalidate the entire PID rather than
760 * invalidating a full PID, so it has a far lower threshold to change from
761 * individual page flushes to full-pid flushes.
771 unsigned long pid; in __radix__flush_tlb_range() local
777 pid = mm->context.id; in __radix__flush_tlb_range()
778 if (unlikely(pid == MMU_NO_CONTEXT)) in __radix__flush_tlb_range()
802 _tlbiel_pid(pid, RIC_FLUSH_TLB); in __radix__flush_tlb_range()
805 _tlbie_pid(pid, RIC_FLUSH_ALL); in __radix__flush_tlb_range()
807 _tlbie_pid(pid, RIC_FLUSH_TLB); in __radix__flush_tlb_range()
834 __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize); in __radix__flush_tlb_range()
836 __tlbiel_va_range(hstart, hend, pid, in __radix__flush_tlb_range()
839 __tlbiel_va_range(gstart, gend, pid, in __radix__flush_tlb_range()
843 __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize); in __radix__flush_tlb_range()
845 __tlbie_va_range(hstart, hend, pid, in __radix__flush_tlb_range()
848 __tlbie_va_range(gstart, gend, pid, in __radix__flush_tlb_range()
999 unsigned long pid; in __radix__flush_tlb_range_psize() local
1005 pid = mm->context.id; in __radix__flush_tlb_range_psize()
1006 if (unlikely(pid == MMU_NO_CONTEXT)) in __radix__flush_tlb_range_psize()
1030 _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB); in __radix__flush_tlb_range_psize()
1035 _tlbie_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB); in __radix__flush_tlb_range_psize()
1039 _tlbiel_va_range(start, end, pid, page_size, psize, also_pwc); in __radix__flush_tlb_range_psize()
1041 _tlbie_va_range(start, end, pid, page_size, psize, also_pwc); in __radix__flush_tlb_range_psize()
1061 unsigned long pid, end; in radix__flush_tlb_collapsed_pmd() local
1063 pid = mm->context.id; in radix__flush_tlb_collapsed_pmd()
1064 if (unlikely(pid == MMU_NO_CONTEXT)) in radix__flush_tlb_collapsed_pmd()
1083 _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); in radix__flush_tlb_collapsed_pmd()
1086 _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); in radix__flush_tlb_collapsed_pmd()
1127 unsigned long pid = mm->context.id; in radix_kvm_prefetch_workaround() local
1129 if (unlikely(pid == MMU_NO_CONTEXT)) in radix_kvm_prefetch_workaround()
1136 * this CPU due to a bad prefetch using the guest PID on in radix_kvm_prefetch_workaround()
1141 * and thus we flush that PID from the core. in radix_kvm_prefetch_workaround()
1144 * have never been used on the system and avoid it if the PID in radix_kvm_prefetch_workaround()
1161 _tlbiel_pid(pid, RIC_FLUSH_ALL); in radix_kvm_prefetch_workaround()