• Home
  • Raw
  • Download

Lines Matching +full:mm +full:- +full:0

1 // SPDX-License-Identifier: GPL-2.0
11 #include <linux/mm.h>
25 #include <asm/page-states.h>
47 static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr, in ptep_ipte_local() argument
53 opt = 0; in ptep_ipte_local()
54 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_local()
55 if (asce == 0UL || nodat) in ptep_ipte_local()
57 if (asce != -1UL) { in ptep_ipte_local()
58 asce = asce ? : mm->context.asce; in ptep_ipte_local()
63 __ptep_ipte(addr, ptep, 0, 0, IPTE_LOCAL); in ptep_ipte_local()
67 static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr, in ptep_ipte_global() argument
73 opt = 0; in ptep_ipte_global()
74 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_global()
75 if (asce == 0UL || nodat) in ptep_ipte_global()
77 if (asce != -1UL) { in ptep_ipte_global()
78 asce = asce ? : mm->context.asce; in ptep_ipte_global()
83 __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL); in ptep_ipte_global()
87 static inline pte_t ptep_flush_direct(struct mm_struct *mm, in ptep_flush_direct() argument
96 atomic_inc(&mm->context.flush_count); in ptep_flush_direct()
98 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in ptep_flush_direct()
99 ptep_ipte_local(mm, addr, ptep, nodat); in ptep_flush_direct()
101 ptep_ipte_global(mm, addr, ptep, nodat); in ptep_flush_direct()
102 atomic_dec(&mm->context.flush_count); in ptep_flush_direct()
106 static inline pte_t ptep_flush_lazy(struct mm_struct *mm, in ptep_flush_lazy() argument
115 atomic_inc(&mm->context.flush_count); in ptep_flush_lazy()
116 if (cpumask_equal(&mm->context.cpu_attach_mask, in ptep_flush_lazy()
119 mm->context.flush_mm = 1; in ptep_flush_lazy()
121 ptep_ipte_global(mm, addr, ptep, nodat); in ptep_flush_lazy()
122 atomic_dec(&mm->context.flush_count); in ptep_flush_lazy()
128 unsigned long new = 0; in pgste_get_lock()
133 " lg %0,%2\n" in pgste_get_lock()
134 "0: lgr %1,%0\n" in pgste_get_lock()
135 " nihh %0,0xff7f\n" /* clear PCL bit in old */ in pgste_get_lock()
136 " oihh %1,0x0080\n" /* set PCL bit in new */ in pgste_get_lock()
137 " csg %0,%1,%2\n" in pgste_get_lock()
138 " jl 0b\n" in pgste_get_lock()
149 " nihh %1,0xff7f\n" /* clear PCL bit */ in pgste_set_unlock()
150 " stg %1,%0\n" in pgste_set_unlock()
159 unsigned long pgste = 0; in pgste_get()
174 struct mm_struct *mm) in pgste_update_all() argument
179 if (!mm_uses_skeys(mm) || pte_val(pte) & _PAGE_INVALID) in pgste_update_all()
195 struct mm_struct *mm) in pgste_set_key() argument
201 if (!mm_uses_skeys(mm) || pte_val(entry) & _PAGE_INVALID) in pgste_set_key()
208 * key C/R to 0. in pgste_set_key()
212 page_set_storage_key(address, nkey, 0); in pgste_set_key()
224 * Without enhanced suppression-on-protection force in pgste_set_pte()
231 /* This pte allows write access, set user-dirty */ in pgste_set_pte()
239 static inline pgste_t pgste_pte_notify(struct mm_struct *mm, in pgste_pte_notify() argument
249 ptep_notify(mm, addr, ptep, bits); in pgste_pte_notify()
255 static inline pgste_t ptep_xchg_start(struct mm_struct *mm, in ptep_xchg_start() argument
258 pgste_t pgste = __pgste(0); in ptep_xchg_start()
260 if (mm_has_pgste(mm)) { in ptep_xchg_start()
262 pgste = pgste_pte_notify(mm, addr, ptep, pgste); in ptep_xchg_start()
267 static inline pte_t ptep_xchg_commit(struct mm_struct *mm, in ptep_xchg_commit() argument
271 if (mm_has_pgste(mm)) { in ptep_xchg_commit()
273 pgste_set_key(ptep, pgste, new, mm); in ptep_xchg_commit()
275 pgste = pgste_update_all(old, pgste, mm); in ptep_xchg_commit()
288 pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr, in ptep_xchg_direct() argument
296 pgste = ptep_xchg_start(mm, addr, ptep); in ptep_xchg_direct()
298 old = ptep_flush_direct(mm, addr, ptep, nodat); in ptep_xchg_direct()
299 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new); in ptep_xchg_direct()
305 pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr, in ptep_xchg_lazy() argument
313 pgste = ptep_xchg_start(mm, addr, ptep); in ptep_xchg_lazy()
315 old = ptep_flush_lazy(mm, addr, ptep, nodat); in ptep_xchg_lazy()
316 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new); in ptep_xchg_lazy()
328 struct mm_struct *mm = vma->vm_mm; in ptep_modify_prot_start() local
331 pgste = ptep_xchg_start(mm, addr, ptep); in ptep_modify_prot_start()
333 old = ptep_flush_lazy(mm, addr, ptep, nodat); in ptep_modify_prot_start()
334 if (mm_has_pgste(mm)) { in ptep_modify_prot_start()
335 pgste = pgste_update_all(old, pgste, mm); in ptep_modify_prot_start()
345 struct mm_struct *mm = vma->vm_mm; in ptep_modify_prot_commit() local
349 if (mm_has_pgste(mm)) { in ptep_modify_prot_commit()
351 pgste_set_key(ptep, pgste, pte, mm); in ptep_modify_prot_commit()
360 static inline void pmdp_idte_local(struct mm_struct *mm, in pmdp_idte_local() argument
365 mm->context.asce, IDTE_LOCAL); in pmdp_idte_local()
367 __pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL); in pmdp_idte_local()
368 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) in pmdp_idte_local()
369 gmap_pmdp_idte_local(mm, addr); in pmdp_idte_local()
372 static inline void pmdp_idte_global(struct mm_struct *mm, in pmdp_idte_global() argument
377 mm->context.asce, IDTE_GLOBAL); in pmdp_idte_global()
378 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) in pmdp_idte_global()
379 gmap_pmdp_idte_global(mm, addr); in pmdp_idte_global()
381 __pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL); in pmdp_idte_global()
382 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) in pmdp_idte_global()
383 gmap_pmdp_idte_global(mm, addr); in pmdp_idte_global()
386 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) in pmdp_idte_global()
387 gmap_pmdp_csp(mm, addr); in pmdp_idte_global()
391 static inline pmd_t pmdp_flush_direct(struct mm_struct *mm, in pmdp_flush_direct() argument
399 atomic_inc(&mm->context.flush_count); in pmdp_flush_direct()
401 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in pmdp_flush_direct()
402 pmdp_idte_local(mm, addr, pmdp); in pmdp_flush_direct()
404 pmdp_idte_global(mm, addr, pmdp); in pmdp_flush_direct()
405 atomic_dec(&mm->context.flush_count); in pmdp_flush_direct()
409 static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm, in pmdp_flush_lazy() argument
417 atomic_inc(&mm->context.flush_count); in pmdp_flush_lazy()
418 if (cpumask_equal(&mm->context.cpu_attach_mask, in pmdp_flush_lazy()
421 mm->context.flush_mm = 1; in pmdp_flush_lazy()
422 if (mm_has_pgste(mm)) in pmdp_flush_lazy()
423 gmap_pmdp_invalidate(mm, addr); in pmdp_flush_lazy()
425 pmdp_idte_global(mm, addr, pmdp); in pmdp_flush_lazy()
427 atomic_dec(&mm->context.flush_count); in pmdp_flush_lazy()
432 static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr) in pmd_alloc_map() argument
439 pgd = pgd_offset(mm, addr); in pmd_alloc_map()
440 p4d = p4d_alloc(mm, pgd, addr); in pmd_alloc_map()
443 pud = pud_alloc(mm, p4d, addr); in pmd_alloc_map()
446 pmd = pmd_alloc(mm, pud, addr); in pmd_alloc_map()
451 pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr, in pmdp_xchg_direct() argument
457 old = pmdp_flush_direct(mm, addr, pmdp); in pmdp_xchg_direct()
464 pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr, in pmdp_xchg_lazy() argument
470 old = pmdp_flush_lazy(mm, addr, pmdp); in pmdp_xchg_lazy()
477 static inline void pudp_idte_local(struct mm_struct *mm, in pudp_idte_local() argument
482 mm->context.asce, IDTE_LOCAL); in pudp_idte_local()
484 __pudp_idte(addr, pudp, 0, 0, IDTE_LOCAL); in pudp_idte_local()
487 static inline void pudp_idte_global(struct mm_struct *mm, in pudp_idte_global() argument
492 mm->context.asce, IDTE_GLOBAL); in pudp_idte_global()
494 __pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL); in pudp_idte_global()
498 * re-use _pmd_csp() here in pudp_idte_global()
503 static inline pud_t pudp_flush_direct(struct mm_struct *mm, in pudp_flush_direct() argument
511 atomic_inc(&mm->context.flush_count); in pudp_flush_direct()
513 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in pudp_flush_direct()
514 pudp_idte_local(mm, addr, pudp); in pudp_flush_direct()
516 pudp_idte_global(mm, addr, pudp); in pudp_flush_direct()
517 atomic_dec(&mm->context.flush_count); in pudp_flush_direct()
521 pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr, in pudp_xchg_direct() argument
527 old = pudp_flush_direct(mm, addr, pudp); in pudp_xchg_direct()
535 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, in pgtable_trans_huge_deposit() argument
540 assert_spin_locked(pmd_lockptr(mm, pmdp)); in pgtable_trans_huge_deposit()
543 if (!pmd_huge_pte(mm, pmdp)) in pgtable_trans_huge_deposit()
546 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); in pgtable_trans_huge_deposit()
547 pmd_huge_pte(mm, pmdp) = pgtable; in pgtable_trans_huge_deposit()
550 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) in pgtable_trans_huge_withdraw() argument
556 assert_spin_locked(pmd_lockptr(mm, pmdp)); in pgtable_trans_huge_withdraw()
559 pgtable = pmd_huge_pte(mm, pmdp); in pgtable_trans_huge_withdraw()
562 pmd_huge_pte(mm, pmdp) = NULL; in pgtable_trans_huge_withdraw()
564 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; in pgtable_trans_huge_withdraw()
576 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr, in ptep_set_pte_at() argument
585 pgste_set_key(ptep, pgste, entry, mm); in ptep_set_pte_at()
591 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep) in ptep_set_notify() argument
603 * ptep_force_prot - change access rights of a locked pte
604 * @mm: pointer to the process mm_struct
610 * Returns 0 if the access rights were changed and -EAGAIN if the current
613 int ptep_force_prot(struct mm_struct *mm, unsigned long addr, in ptep_force_prot() argument
628 return -EAGAIN; in ptep_force_prot()
633 ptep_flush_direct(mm, addr, ptep, nodat); in ptep_force_prot()
634 pgste = pgste_update_all(entry, pgste, mm); in ptep_force_prot()
638 ptep_flush_direct(mm, addr, ptep, nodat); in ptep_force_prot()
645 return 0; in ptep_force_prot()
648 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr, in ptep_shadow_pte() argument
653 int rc = -EAGAIN; in ptep_shadow_pte()
656 return 0; /* already shadowed */ in ptep_shadow_pte()
666 /* don't touch the storage key - it belongs to parent pgste */ in ptep_shadow_pte()
675 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep) in ptep_unshadow_pte() argument
683 ptep_flush_direct(mm, saddr, ptep, nodat); in ptep_unshadow_pte()
684 /* don't touch the storage key - it belongs to parent pgste */ in ptep_unshadow_pte()
689 static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry) in ptep_zap_swap_entry() argument
692 dec_mm_counter(mm, MM_SWAPENTS); in ptep_zap_swap_entry()
696 dec_mm_counter(mm, mm_counter(page)); in ptep_zap_swap_entry()
701 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, in ptep_zap_unused() argument
708 /* Zap unused and logically-zero pages */ in ptep_zap_unused()
716 ptep_zap_swap_entry(mm, pte_to_swp_entry(pte)); in ptep_zap_unused()
717 pte_clear(mm, addr, ptep); in ptep_zap_unused()
725 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep) in ptep_zap_key() argument
737 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0); in ptep_zap_key()
745 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr, in ptep_test_and_clear_uc() argument
758 pgste = pgste_pte_notify(mm, addr, ptep, pgste); in ptep_test_and_clear_uc()
760 ptep_ipte_global(mm, addr, ptep, nodat); in ptep_test_and_clear_uc()
772 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, in set_guest_storage_key() argument
781 pmdp = pmd_alloc_map(mm, addr); in set_guest_storage_key()
783 return -EFAULT; in set_guest_storage_key()
785 ptl = pmd_lock(mm, pmdp); in set_guest_storage_key()
788 return -EFAULT; in set_guest_storage_key()
800 return 0; in set_guest_storage_key()
804 ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl); in set_guest_storage_key()
806 return -EFAULT; in set_guest_storage_key()
833 return 0; in set_guest_storage_key()
841 * Returns 0 if a guests storage key update wasn't necessary, 1 if the guest
842 * storage key was updated and -EFAULT on access errors.
844 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr, in cond_set_guest_storage_key() argument
853 rc = get_guest_storage_key(current->mm, addr, &tmp); in cond_set_guest_storage_key()
863 return 0; in cond_set_guest_storage_key()
865 rc = set_guest_storage_key(current->mm, addr, key, nq); in cond_set_guest_storage_key()
866 return rc < 0 ? rc : 1; in cond_set_guest_storage_key()
873 * Returns < 0 in case of error, otherwise the cc to be reported to the guest.
875 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr) in reset_guest_reference_bit() argument
882 int cc = 0; in reset_guest_reference_bit()
884 pmdp = pmd_alloc_map(mm, addr); in reset_guest_reference_bit()
886 return -EFAULT; in reset_guest_reference_bit()
888 ptl = pmd_lock(mm, pmdp); in reset_guest_reference_bit()
891 return -EFAULT; in reset_guest_reference_bit()
903 ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl); in reset_guest_reference_bit()
905 return -EFAULT; in reset_guest_reference_bit()
914 /* Merge real referenced bit into host-set */ in reset_guest_reference_bit()
929 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr, in get_guest_storage_key() argument
938 pmdp = pmd_alloc_map(mm, addr); in get_guest_storage_key()
940 return -EFAULT; in get_guest_storage_key()
942 ptl = pmd_lock(mm, pmdp); in get_guest_storage_key()
946 *key = 0; in get_guest_storage_key()
947 return 0; in get_guest_storage_key()
955 return 0; in get_guest_storage_key()
959 ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl); in get_guest_storage_key()
961 return -EFAULT; in get_guest_storage_key()
972 return 0; in get_guest_storage_key()
977 * pgste_perform_essa - perform ESSA actions on the PGSTE.
978 * @mm: the memory context. It must have PGSTEs, no check is performed here!
984 * Return: 1 if the page is to be added to the CBRL, otherwise 0,
985 * or < 0 in case of error. -EINVAL is returned for invalid values
986 * of orc, -EFAULT for invalid addresses.
988 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, in pgste_perform_essa() argument
996 int res = 0; in pgste_perform_essa()
1000 return -EINVAL; in pgste_perform_essa()
1002 vma = find_vma(mm, hva); in pgste_perform_essa()
1003 if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma)) in pgste_perform_essa()
1004 return -EFAULT; in pgste_perform_essa()
1005 ptep = get_locked_pte(mm, hva, &ptl); in pgste_perform_essa()
1007 return -EFAULT; in pgste_perform_essa()
1085 * set_pgste_bits - set specific PGSTE bits.
1086 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1092 * Return: 0 on success, < 0 in case of error.
1094 int set_pgste_bits(struct mm_struct *mm, unsigned long hva, in set_pgste_bits() argument
1102 vma = find_vma(mm, hva); in set_pgste_bits()
1103 if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma)) in set_pgste_bits()
1104 return -EFAULT; in set_pgste_bits()
1105 ptep = get_locked_pte(mm, hva, &ptl); in set_pgste_bits()
1107 return -EFAULT; in set_pgste_bits()
1115 return 0; in set_pgste_bits()
1120 * get_pgste - get the current PGSTE for the given address.
1121 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1125 * Return: 0 on success, < 0 in case of error.
1127 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep) in get_pgste() argument
1133 vma = find_vma(mm, hva); in get_pgste()
1134 if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma)) in get_pgste()
1135 return -EFAULT; in get_pgste()
1136 ptep = get_locked_pte(mm, hva, &ptl); in get_pgste()
1138 return -EFAULT; in get_pgste()
1141 return 0; in get_pgste()