1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/hugetlb.h>
4 #include <linux/security.h>
5 #include <asm/cacheflush.h>
6 #include <asm/machdep.h>
7 #include <asm/mman.h>
8 #include <asm/tlb.h>
9
radix__flush_hugetlb_page(struct vm_area_struct * vma,unsigned long vmaddr)10 void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
11 {
12 int psize;
13 struct hstate *hstate = hstate_file(vma->vm_file);
14
15 psize = hstate_get_psize(hstate);
16 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
17 }
18
radix__local_flush_hugetlb_page(struct vm_area_struct * vma,unsigned long vmaddr)19 void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
20 {
21 int psize;
22 struct hstate *hstate = hstate_file(vma->vm_file);
23
24 psize = hstate_get_psize(hstate);
25 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
26 }
27
radix__flush_hugetlb_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)28 void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start,
29 unsigned long end)
30 {
31 int psize;
32 struct hstate *hstate = hstate_file(vma->vm_file);
33
34 psize = hstate_get_psize(hstate);
35 /*
36 * Flush PWC even if we get PUD_SIZE hugetlb invalidate to keep this simpler.
37 */
38 if (end - start >= PUD_SIZE)
39 radix__flush_tlb_pwc_range_psize(vma->vm_mm, start, end, psize);
40 else
41 radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
42 }
43
44 /*
45 * A vairant of hugetlb_get_unmapped_area doing topdown search
46 * FIXME!! should we do as x86 does or non hugetlb area does ?
47 * ie, use topdown or not based on mmap_is_legacy check ?
48 */
49 unsigned long
radix__hugetlb_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)50 radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
51 unsigned long len, unsigned long pgoff,
52 unsigned long flags)
53 {
54 struct mm_struct *mm = current->mm;
55 struct vm_area_struct *vma;
56 struct hstate *h = hstate_file(file);
57 int fixed = (flags & MAP_FIXED);
58 unsigned long high_limit;
59 struct vm_unmapped_area_info info;
60
61 high_limit = DEFAULT_MAP_WINDOW;
62 if (addr >= high_limit || (fixed && (addr + len > high_limit)))
63 high_limit = TASK_SIZE;
64
65 if (len & ~huge_page_mask(h))
66 return -EINVAL;
67 if (len > high_limit)
68 return -ENOMEM;
69
70 if (fixed) {
71 if (addr > high_limit - len)
72 return -ENOMEM;
73 if (prepare_hugepage_range(file, addr, len))
74 return -EINVAL;
75 return addr;
76 }
77
78 if (addr) {
79 addr = ALIGN(addr, huge_page_size(h));
80 vma = find_vma(mm, addr);
81 if (high_limit - len >= addr && addr >= mmap_min_addr &&
82 (!vma || addr + len <= vm_start_gap(vma)))
83 return addr;
84 }
85 /*
86 * We are always doing an topdown search here. Slice code
87 * does that too.
88 */
89 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
90 info.length = len;
91 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
92 info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
93 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
94 info.align_offset = 0;
95
96 return vm_unmapped_area(&info);
97 }
98
radix__huge_ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)99 void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
100 unsigned long addr, pte_t *ptep,
101 pte_t old_pte, pte_t pte)
102 {
103 struct mm_struct *mm = vma->vm_mm;
104
105 /*
106 * To avoid NMMU hang while relaxing access we need to flush the tlb before
107 * we set the new value.
108 */
109 if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
110 (atomic_read(&mm->context.copros) > 0))
111 radix__flush_hugetlb_page(vma, addr);
112
113 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
114 }
115