• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * PARISC64 Huge TLB page support.
3  *
4  * This parisc implementation is heavily based on the SPARC and x86 code.
5  *
6  * Copyright (C) 2015 Helge Deller <deller@gmx.de>
7  */
8 
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/sysctl.h>
14 
15 #include <asm/mman.h>
16 #include <asm/pgalloc.h>
17 #include <asm/tlb.h>
18 #include <asm/tlbflush.h>
19 #include <asm/cacheflush.h>
20 #include <asm/mmu_context.h>
21 
22 
23 unsigned long
hugetlb_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)24 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25 		unsigned long len, unsigned long pgoff, unsigned long flags)
26 {
27 	struct hstate *h = hstate_file(file);
28 
29 	if (len & ~huge_page_mask(h))
30 		return -EINVAL;
31 	if (len > TASK_SIZE)
32 		return -ENOMEM;
33 
34 	if (flags & MAP_FIXED)
35 		if (prepare_hugepage_range(file, addr, len))
36 			return -EINVAL;
37 
38 	if (addr)
39 		addr = ALIGN(addr, huge_page_size(h));
40 
41 	/* we need to make sure the colouring is OK */
42 	return arch_get_unmapped_area(file, addr, len, pgoff, flags);
43 }
44 
45 
huge_pte_alloc(struct mm_struct * mm,unsigned long addr,unsigned long sz)46 pte_t *huge_pte_alloc(struct mm_struct *mm,
47 			unsigned long addr, unsigned long sz)
48 {
49 	pgd_t *pgd;
50 	pud_t *pud;
51 	pmd_t *pmd;
52 	pte_t *pte = NULL;
53 
54 	/* We must align the address, because our caller will run
55 	 * set_huge_pte_at() on whatever we return, which writes out
56 	 * all of the sub-ptes for the hugepage range.  So we have
57 	 * to give it the first such sub-pte.
58 	 */
59 	addr &= HPAGE_MASK;
60 
61 	pgd = pgd_offset(mm, addr);
62 	pud = pud_alloc(mm, pgd, addr);
63 	if (pud) {
64 		pmd = pmd_alloc(mm, pud, addr);
65 		if (pmd)
66 			pte = pte_alloc_map(mm, NULL, pmd, addr);
67 	}
68 	return pte;
69 }
70 
huge_pte_offset(struct mm_struct * mm,unsigned long addr)71 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
72 {
73 	pgd_t *pgd;
74 	pud_t *pud;
75 	pmd_t *pmd;
76 	pte_t *pte = NULL;
77 
78 	addr &= HPAGE_MASK;
79 
80 	pgd = pgd_offset(mm, addr);
81 	if (!pgd_none(*pgd)) {
82 		pud = pud_offset(pgd, addr);
83 		if (!pud_none(*pud)) {
84 			pmd = pmd_offset(pud, addr);
85 			if (!pmd_none(*pmd))
86 				pte = pte_offset_map(pmd, addr);
87 		}
88 	}
89 	return pte;
90 }
91 
92 /* Purge data and instruction TLB entries.  Must be called holding
93  * the pa_tlb_lock.  The TLB purge instructions are slow on SMP
94  * machines since the purge must be broadcast to all CPUs.
95  */
purge_tlb_entries_huge(struct mm_struct * mm,unsigned long addr)96 static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
97 {
98 	int i;
99 
100 	/* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
101 	 * Linux standard huge pages (e.g. 2 MB) */
102 	BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
103 
104 	addr &= HPAGE_MASK;
105 	addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
106 
107 	for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
108 		purge_tlb_entries(mm, addr);
109 		addr += (1UL << REAL_HPAGE_SHIFT);
110 	}
111 }
112 
113 /* __set_huge_pte_at() must be called holding the pa_tlb_lock. */
__set_huge_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t entry)114 static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
115 		     pte_t *ptep, pte_t entry)
116 {
117 	unsigned long addr_start;
118 	int i;
119 
120 	addr &= HPAGE_MASK;
121 	addr_start = addr;
122 
123 	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
124 		set_pte(ptep, entry);
125 		ptep++;
126 
127 		addr += PAGE_SIZE;
128 		pte_val(entry) += PAGE_SIZE;
129 	}
130 
131 	purge_tlb_entries_huge(mm, addr_start);
132 }
133 
set_huge_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t entry)134 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
135 		     pte_t *ptep, pte_t entry)
136 {
137 	unsigned long flags;
138 
139 	purge_tlb_start(flags);
140 	__set_huge_pte_at(mm, addr, ptep, entry);
141 	purge_tlb_end(flags);
142 }
143 
144 
huge_ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)145 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
146 			      pte_t *ptep)
147 {
148 	unsigned long flags;
149 	pte_t entry;
150 
151 	purge_tlb_start(flags);
152 	entry = *ptep;
153 	__set_huge_pte_at(mm, addr, ptep, __pte(0));
154 	purge_tlb_end(flags);
155 
156 	return entry;
157 }
158 
159 
huge_ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)160 void huge_ptep_set_wrprotect(struct mm_struct *mm,
161 				unsigned long addr, pte_t *ptep)
162 {
163 	unsigned long flags;
164 	pte_t old_pte;
165 
166 	purge_tlb_start(flags);
167 	old_pte = *ptep;
168 	__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
169 	purge_tlb_end(flags);
170 }
171 
huge_ptep_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t pte,int dirty)172 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
173 				unsigned long addr, pte_t *ptep,
174 				pte_t pte, int dirty)
175 {
176 	unsigned long flags;
177 	int changed;
178 
179 	purge_tlb_start(flags);
180 	changed = !pte_same(*ptep, pte);
181 	if (changed) {
182 		__set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
183 	}
184 	purge_tlb_end(flags);
185 	return changed;
186 }
187 
188 
pmd_huge(pmd_t pmd)189 int pmd_huge(pmd_t pmd)
190 {
191 	return 0;
192 }
193 
pud_huge(pud_t pud)194 int pud_huge(pud_t pud)
195 {
196 	return 0;
197 }
198