• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * PPC Huge TLB Page Support for Book3E MMU
3  *
4  * Copyright (C) 2009 David Gibson, IBM Corporation.
5  * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
6  *
7  */
8 #include <linux/mm.h>
9 #include <linux/hugetlb.h>
10 
11 #include <asm/mmu.h>
12 
13 #ifdef CONFIG_PPC_FSL_BOOK3E
14 #ifdef CONFIG_PPC64
tlb1_next(void)15 static inline int tlb1_next(void)
16 {
17 	struct paca_struct *paca = get_paca();
18 	struct tlb_core_data *tcd;
19 	int this, next;
20 
21 	tcd = paca->tcd_ptr;
22 	this = tcd->esel_next;
23 
24 	next = this + 1;
25 	if (next >= tcd->esel_max)
26 		next = tcd->esel_first;
27 
28 	tcd->esel_next = next;
29 	return this;
30 }
31 #else
tlb1_next(void)32 static inline int tlb1_next(void)
33 {
34 	int index, ncams;
35 
36 	ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
37 
38 	index = this_cpu_read(next_tlbcam_idx);
39 
40 	/* Just round-robin the entries and wrap when we hit the end */
41 	if (unlikely(index == ncams - 1))
42 		__this_cpu_write(next_tlbcam_idx, tlbcam_index);
43 	else
44 		__this_cpu_inc(next_tlbcam_idx);
45 
46 	return index;
47 }
48 #endif /* !PPC64 */
49 #endif /* FSL */
50 
mmu_get_tsize(int psize)51 static inline int mmu_get_tsize(int psize)
52 {
53 	return mmu_psize_defs[psize].enc;
54 }
55 
56 #if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_PPC64)
57 #include <asm/paca.h>
58 
book3e_tlb_lock(void)59 static inline void book3e_tlb_lock(void)
60 {
61 	struct paca_struct *paca = get_paca();
62 	unsigned long tmp;
63 	int token = smp_processor_id() + 1;
64 
65 	/*
66 	 * Besides being unnecessary in the absence of SMT, this
67 	 * check prevents trying to do lbarx/stbcx. on e5500 which
68 	 * doesn't implement either feature.
69 	 */
70 	if (!cpu_has_feature(CPU_FTR_SMT))
71 		return;
72 
73 	asm volatile("1: lbarx %0, 0, %1;"
74 		     "cmpwi %0, 0;"
75 		     "bne 2f;"
76 		     "stbcx. %2, 0, %1;"
77 		     "bne 1b;"
78 		     "b 3f;"
79 		     "2: lbzx %0, 0, %1;"
80 		     "cmpwi %0, 0;"
81 		     "bne 2b;"
82 		     "b 1b;"
83 		     "3:"
84 		     : "=&r" (tmp)
85 		     : "r" (&paca->tcd_ptr->lock), "r" (token)
86 		     : "memory");
87 }
88 
book3e_tlb_unlock(void)89 static inline void book3e_tlb_unlock(void)
90 {
91 	struct paca_struct *paca = get_paca();
92 
93 	if (!cpu_has_feature(CPU_FTR_SMT))
94 		return;
95 
96 	isync();
97 	paca->tcd_ptr->lock = 0;
98 }
99 #else
book3e_tlb_lock(void)100 static inline void book3e_tlb_lock(void)
101 {
102 }
103 
book3e_tlb_unlock(void)104 static inline void book3e_tlb_unlock(void)
105 {
106 }
107 #endif
108 
book3e_tlb_exists(unsigned long ea,unsigned long pid)109 static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid)
110 {
111 	int found = 0;
112 
113 	mtspr(SPRN_MAS6, pid << 16);
114 	if (mmu_has_feature(MMU_FTR_USE_TLBRSRV)) {
115 		asm volatile(
116 			"li	%0,0\n"
117 			"tlbsx.	0,%1\n"
118 			"bne	1f\n"
119 			"li	%0,1\n"
120 			"1:\n"
121 			: "=&r"(found) : "r"(ea));
122 	} else {
123 		asm volatile(
124 			"tlbsx	0,%1\n"
125 			"mfspr	%0,0x271\n"
126 			"srwi	%0,%0,31\n"
127 			: "=&r"(found) : "r"(ea));
128 	}
129 
130 	return found;
131 }
132 
book3e_hugetlb_preload(struct vm_area_struct * vma,unsigned long ea,pte_t pte)133 void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
134 			    pte_t pte)
135 {
136 	unsigned long mas1, mas2;
137 	u64 mas7_3;
138 	unsigned long psize, tsize, shift;
139 	unsigned long flags;
140 	struct mm_struct *mm;
141 
142 #ifdef CONFIG_PPC_FSL_BOOK3E
143 	int index;
144 #endif
145 
146 	if (unlikely(is_kernel_addr(ea)))
147 		return;
148 
149 	mm = vma->vm_mm;
150 
151 #ifdef CONFIG_PPC_MM_SLICES
152 	psize = get_slice_psize(mm, ea);
153 	tsize = mmu_get_tsize(psize);
154 	shift = mmu_psize_defs[psize].shift;
155 #else
156 	psize = vma_mmu_pagesize(vma);
157 	shift = __ilog2(psize);
158 	tsize = shift - 10;
159 #endif
160 
161 	/*
162 	 * We can't be interrupted while we're setting up the MAS
163 	 * regusters or after we've confirmed that no tlb exists.
164 	 */
165 	local_irq_save(flags);
166 
167 	book3e_tlb_lock();
168 
169 	if (unlikely(book3e_tlb_exists(ea, mm->context.id))) {
170 		book3e_tlb_unlock();
171 		local_irq_restore(flags);
172 		return;
173 	}
174 
175 #ifdef CONFIG_PPC_FSL_BOOK3E
176 	/* We have to use the CAM(TLB1) on FSL parts for hugepages */
177 	index = tlb1_next();
178 	mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1));
179 #endif
180 
181 	mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize);
182 	mas2 = ea & ~((1UL << shift) - 1);
183 	mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
184 	mas7_3 = (u64)pte_pfn(pte) << PAGE_SHIFT;
185 	mas7_3 |= (pte_val(pte) >> PTE_BAP_SHIFT) & MAS3_BAP_MASK;
186 	if (!pte_dirty(pte))
187 		mas7_3 &= ~(MAS3_SW|MAS3_UW);
188 
189 	mtspr(SPRN_MAS1, mas1);
190 	mtspr(SPRN_MAS2, mas2);
191 
192 	if (mmu_has_feature(MMU_FTR_USE_PAIRED_MAS)) {
193 		mtspr(SPRN_MAS7_MAS3, mas7_3);
194 	} else {
195 		if (mmu_has_feature(MMU_FTR_BIG_PHYS))
196 			mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
197 		mtspr(SPRN_MAS3, lower_32_bits(mas7_3));
198 	}
199 
200 	asm volatile ("tlbwe");
201 
202 	book3e_tlb_unlock();
203 	local_irq_restore(flags);
204 }
205 
flush_hugetlb_page(struct vm_area_struct * vma,unsigned long vmaddr)206 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
207 {
208 	struct hstate *hstate = hstate_file(vma->vm_file);
209 	unsigned long tsize = huge_page_shift(hstate) - 10;
210 
211 	__flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0);
212 }
213