• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  MMU context allocation for 64-bit kernels.
3  *
4  *  Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
5  *
6  *  This program is free software; you can redistribute it and/or
7  *  modify it under the terms of the GNU General Public License
8  *  as published by the Free Software Foundation; either version
9  *  2 of the License, or (at your option) any later version.
10  *
11  */
12 
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
18 #include <linux/mm.h>
19 #include <linux/spinlock.h>
20 #include <linux/idr.h>
21 #include <linux/export.h>
22 #include <linux/gfp.h>
23 #include <linux/slab.h>
24 
25 #include <asm/mmu_context.h>
26 #include <asm/pgalloc.h>
27 
28 static DEFINE_SPINLOCK(mmu_context_lock);
29 static DEFINE_IDA(mmu_context_ida);
30 
alloc_context_id(int min_id,int max_id)31 static int alloc_context_id(int min_id, int max_id)
32 {
33 	int index, err;
34 
35 again:
36 	if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
37 		return -ENOMEM;
38 
39 	spin_lock(&mmu_context_lock);
40 	err = ida_get_new_above(&mmu_context_ida, min_id, &index);
41 	spin_unlock(&mmu_context_lock);
42 
43 	if (err == -EAGAIN)
44 		goto again;
45 	else if (err)
46 		return err;
47 
48 	if (index > max_id) {
49 		spin_lock(&mmu_context_lock);
50 		ida_remove(&mmu_context_ida, index);
51 		spin_unlock(&mmu_context_lock);
52 		return -ENOMEM;
53 	}
54 
55 	return index;
56 }
57 
hash__reserve_context_id(int id)58 void hash__reserve_context_id(int id)
59 {
60 	int rc, result = 0;
61 
62 	do {
63 		if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
64 			break;
65 
66 		spin_lock(&mmu_context_lock);
67 		rc = ida_get_new_above(&mmu_context_ida, id, &result);
68 		spin_unlock(&mmu_context_lock);
69 	} while (rc == -EAGAIN);
70 
71 	WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
72 }
73 
hash__alloc_context_id(void)74 int hash__alloc_context_id(void)
75 {
76 	unsigned long max;
77 
78 	if (mmu_has_feature(MMU_FTR_68_BIT_VA))
79 		max = MAX_USER_CONTEXT;
80 	else
81 		max = MAX_USER_CONTEXT_65BIT_VA;
82 
83 	return alloc_context_id(MIN_USER_CONTEXT, max);
84 }
85 EXPORT_SYMBOL_GPL(hash__alloc_context_id);
86 
hash__init_new_context(struct mm_struct * mm)87 static int hash__init_new_context(struct mm_struct *mm)
88 {
89 	int index;
90 
91 	index = hash__alloc_context_id();
92 	if (index < 0)
93 		return index;
94 
95 	/*
96 	 * In the case of exec, use the default limit,
97 	 * otherwise inherit it from the mm we are duplicating.
98 	 */
99 	if (!mm->context.addr_limit)
100 		mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
101 
102 	/*
103 	 * The old code would re-promote on fork, we don't do that when using
104 	 * slices as it could cause problem promoting slices that have been
105 	 * forced down to 4K.
106 	 *
107 	 * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
108 	 * explicitly against context.id == 0. This ensures that we properly
109 	 * initialize context slice details for newly allocated mm's (which will
110 	 * have id == 0) and don't alter context slice inherited via fork (which
111 	 * will have id != 0).
112 	 *
113 	 * We should not be calling init_new_context() on init_mm. Hence a
114 	 * check against 0 is OK.
115 	 */
116 	if (mm->context.id == 0)
117 		slice_set_user_psize(mm, mmu_virtual_psize);
118 
119 	subpage_prot_init_new_context(mm);
120 
121 	return index;
122 }
123 
radix__init_new_context(struct mm_struct * mm)124 static int radix__init_new_context(struct mm_struct *mm)
125 {
126 	unsigned long rts_field;
127 	int index, max_id;
128 
129 	max_id = (1 << mmu_pid_bits) - 1;
130 	index = alloc_context_id(mmu_base_pid, max_id);
131 	if (index < 0)
132 		return index;
133 
134 	/*
135 	 * set the process table entry,
136 	 */
137 	rts_field = radix__get_tree_size();
138 	process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
139 
140 	/*
141 	 * Order the above store with subsequent update of the PID
142 	 * register (at which point HW can start loading/caching
143 	 * the entry) and the corresponding load by the MMU from
144 	 * the L2 cache.
145 	 */
146 	asm volatile("ptesync;isync" : : : "memory");
147 
148 	mm->context.npu_context = NULL;
149 
150 	return index;
151 }
152 
init_new_context(struct task_struct * tsk,struct mm_struct * mm)153 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
154 {
155 	int index;
156 
157 	if (radix_enabled())
158 		index = radix__init_new_context(mm);
159 	else
160 		index = hash__init_new_context(mm);
161 
162 	if (index < 0)
163 		return index;
164 
165 	mm->context.id = index;
166 
167 #ifdef CONFIG_PPC_64K_PAGES
168 	mm->context.pte_frag = NULL;
169 #endif
170 #ifdef CONFIG_SPAPR_TCE_IOMMU
171 	mm_iommu_init(mm);
172 #endif
173 	atomic_set(&mm->context.active_cpus, 0);
174 
175 	return 0;
176 }
177 
__destroy_context(int context_id)178 void __destroy_context(int context_id)
179 {
180 	spin_lock(&mmu_context_lock);
181 	ida_remove(&mmu_context_ida, context_id);
182 	spin_unlock(&mmu_context_lock);
183 }
184 EXPORT_SYMBOL_GPL(__destroy_context);
185 
186 #ifdef CONFIG_PPC_64K_PAGES
destroy_pagetable_page(struct mm_struct * mm)187 static void destroy_pagetable_page(struct mm_struct *mm)
188 {
189 	int count;
190 	void *pte_frag;
191 	struct page *page;
192 
193 	pte_frag = mm->context.pte_frag;
194 	if (!pte_frag)
195 		return;
196 
197 	page = virt_to_page(pte_frag);
198 	/* drop all the pending references */
199 	count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
200 	/* We allow PTE_FRAG_NR fragments from a PTE page */
201 	if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) {
202 		pgtable_page_dtor(page);
203 		free_hot_cold_page(page, 0);
204 	}
205 }
206 
207 #else
destroy_pagetable_page(struct mm_struct * mm)208 static inline void destroy_pagetable_page(struct mm_struct *mm)
209 {
210 	return;
211 }
212 #endif
213 
destroy_context(struct mm_struct * mm)214 void destroy_context(struct mm_struct *mm)
215 {
216 #ifdef CONFIG_SPAPR_TCE_IOMMU
217 	WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
218 #endif
219 	if (radix_enabled()) {
220 		/*
221 		 * Radix doesn't have a valid bit in the process table
222 		 * entries. However we know that at least P9 implementation
223 		 * will avoid caching an entry with an invalid RTS field,
224 		 * and 0 is invalid. So this will do.
225 		 */
226 		process_tb[mm->context.id].prtb0 = 0;
227 	} else
228 		subpage_prot_free(mm);
229 	destroy_pagetable_page(mm);
230 	__destroy_context(mm->context.id);
231 	mm->context.id = MMU_NO_CONTEXT;
232 }
233 
234 #ifdef CONFIG_PPC_RADIX_MMU
radix__switch_mmu_context(struct mm_struct * prev,struct mm_struct * next)235 void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
236 {
237 
238 	if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
239 		isync();
240 		mtspr(SPRN_PID, next->context.id);
241 		isync();
242 		asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
243 	} else {
244 		mtspr(SPRN_PID, next->context.id);
245 		isync();
246 	}
247 }
248 #endif
249