1 /*
2 * Blackfin CPLB exception handling.
3 * Copyright 2004-2007 Analog Devices Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, see the file COPYING, or write
17 * to the Free Software Foundation, Inc.,
18 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20 #include <linux/module.h>
21 #include <linux/mm.h>
22
23 #include <asm/blackfin.h>
24 #include <asm/cacheflush.h>
25 #include <asm/cplbinit.h>
26 #include <asm/mmu_context.h>
27
28 /*
29 * WARNING
30 *
31 * This file is compiled with certain -ffixed-reg options. We have to
32 * make sure not to call any functions here that could clobber these
33 * registers.
34 */
35
36 int page_mask_nelts;
37 int page_mask_order;
38 unsigned long *current_rwx_mask[NR_CPUS];
39
40 int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
41 int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
42 int nr_cplb_flush[NR_CPUS];
43
disable_dcplb(void)44 static inline void disable_dcplb(void)
45 {
46 unsigned long ctrl;
47 SSYNC();
48 ctrl = bfin_read_DMEM_CONTROL();
49 ctrl &= ~ENDCPLB;
50 bfin_write_DMEM_CONTROL(ctrl);
51 SSYNC();
52 }
53
enable_dcplb(void)54 static inline void enable_dcplb(void)
55 {
56 unsigned long ctrl;
57 SSYNC();
58 ctrl = bfin_read_DMEM_CONTROL();
59 ctrl |= ENDCPLB;
60 bfin_write_DMEM_CONTROL(ctrl);
61 SSYNC();
62 }
63
disable_icplb(void)64 static inline void disable_icplb(void)
65 {
66 unsigned long ctrl;
67 SSYNC();
68 ctrl = bfin_read_IMEM_CONTROL();
69 ctrl &= ~ENICPLB;
70 bfin_write_IMEM_CONTROL(ctrl);
71 SSYNC();
72 }
73
enable_icplb(void)74 static inline void enable_icplb(void)
75 {
76 unsigned long ctrl;
77 SSYNC();
78 ctrl = bfin_read_IMEM_CONTROL();
79 ctrl |= ENICPLB;
80 bfin_write_IMEM_CONTROL(ctrl);
81 SSYNC();
82 }
83
84 /*
85 * Given the contents of the status register, return the index of the
86 * CPLB that caused the fault.
87 */
faulting_cplb_index(int status)88 static inline int faulting_cplb_index(int status)
89 {
90 int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
91 return 30 - signbits;
92 }
93
94 /*
95 * Given the contents of the status register and the DCPLB_DATA contents,
96 * return true if a write access should be permitted.
97 */
write_permitted(int status,unsigned long data)98 static inline int write_permitted(int status, unsigned long data)
99 {
100 if (status & FAULT_USERSUPV)
101 return !!(data & CPLB_SUPV_WR);
102 else
103 return !!(data & CPLB_USER_WR);
104 }
105
106 /* Counters to implement round-robin replacement. */
107 static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
108
109 /*
110 * Find an ICPLB entry to be evicted and return its index.
111 */
evict_one_icplb(unsigned int cpu)112 static int evict_one_icplb(unsigned int cpu)
113 {
114 int i;
115 for (i = first_switched_icplb; i < MAX_CPLBS; i++)
116 if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
117 return i;
118 i = first_switched_icplb + icplb_rr_index[cpu];
119 if (i >= MAX_CPLBS) {
120 i -= MAX_CPLBS - first_switched_icplb;
121 icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
122 }
123 icplb_rr_index[cpu]++;
124 return i;
125 }
126
evict_one_dcplb(unsigned int cpu)127 static int evict_one_dcplb(unsigned int cpu)
128 {
129 int i;
130 for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
131 if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
132 return i;
133 i = first_switched_dcplb + dcplb_rr_index[cpu];
134 if (i >= MAX_CPLBS) {
135 i -= MAX_CPLBS - first_switched_dcplb;
136 dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
137 }
138 dcplb_rr_index[cpu]++;
139 return i;
140 }
141
dcplb_miss(unsigned int cpu)142 static noinline int dcplb_miss(unsigned int cpu)
143 {
144 unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
145 int status = bfin_read_DCPLB_STATUS();
146 unsigned long *mask;
147 int idx;
148 unsigned long d_data;
149
150 nr_dcplb_miss[cpu]++;
151
152 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
153 #ifdef CONFIG_BFIN_DCACHE
154 if (bfin_addr_dcachable(addr)) {
155 d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
156 #ifdef CONFIG_BFIN_WT
157 d_data |= CPLB_L1_AOW | CPLB_WT;
158 #endif
159 }
160 #endif
161 if (addr >= physical_mem_end) {
162 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE
163 && (status & FAULT_USERSUPV)) {
164 addr &= ~0x3fffff;
165 d_data &= ~PAGE_SIZE_4KB;
166 d_data |= PAGE_SIZE_4MB;
167 } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
168 && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
169 addr &= ~(1 * 1024 * 1024 - 1);
170 d_data &= ~PAGE_SIZE_4KB;
171 d_data |= PAGE_SIZE_1MB;
172 } else
173 return CPLB_PROT_VIOL;
174 } else if (addr >= _ramend) {
175 d_data |= CPLB_USER_RD | CPLB_USER_WR;
176 } else {
177 mask = current_rwx_mask[cpu];
178 if (mask) {
179 int page = addr >> PAGE_SHIFT;
180 int idx = page >> 5;
181 int bit = 1 << (page & 31);
182
183 if (mask[idx] & bit)
184 d_data |= CPLB_USER_RD;
185
186 mask += page_mask_nelts;
187 if (mask[idx] & bit)
188 d_data |= CPLB_USER_WR;
189 }
190 }
191 idx = evict_one_dcplb(cpu);
192
193 addr &= PAGE_MASK;
194 dcplb_tbl[cpu][idx].addr = addr;
195 dcplb_tbl[cpu][idx].data = d_data;
196
197 disable_dcplb();
198 bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
199 bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
200 enable_dcplb();
201
202 return 0;
203 }
204
icplb_miss(unsigned int cpu)205 static noinline int icplb_miss(unsigned int cpu)
206 {
207 unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
208 int status = bfin_read_ICPLB_STATUS();
209 int idx;
210 unsigned long i_data;
211
212 nr_icplb_miss[cpu]++;
213
214 /* If inside the uncached DMA region, fault. */
215 if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
216 return CPLB_PROT_VIOL;
217
218 if (status & FAULT_USERSUPV)
219 nr_icplb_supv_miss[cpu]++;
220
221 /*
222 * First, try to find a CPLB that matches this address. If we
223 * find one, then the fact that we're in the miss handler means
224 * that the instruction crosses a page boundary.
225 */
226 for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
227 if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
228 unsigned long this_addr = icplb_tbl[cpu][idx].addr;
229 if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
230 addr += PAGE_SIZE;
231 break;
232 }
233 }
234 }
235
236 i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
237
238 #ifdef CONFIG_BFIN_ICACHE
239 /*
240 * Normal RAM, and possibly the reserved memory area, are
241 * cacheable.
242 */
243 if (addr < _ramend ||
244 (addr < physical_mem_end && reserved_mem_icache_on))
245 i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
246 #endif
247
248 if (addr >= physical_mem_end) {
249 if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
250 && (status & FAULT_USERSUPV)) {
251 addr &= ~(1 * 1024 * 1024 - 1);
252 i_data &= ~PAGE_SIZE_4KB;
253 i_data |= PAGE_SIZE_1MB;
254 } else
255 return CPLB_PROT_VIOL;
256 } else if (addr >= _ramend) {
257 i_data |= CPLB_USER_RD;
258 } else {
259 /*
260 * Two cases to distinguish - a supervisor access must
261 * necessarily be for a module page; we grant it
262 * unconditionally (could do better here in the future).
263 * Otherwise, check the x bitmap of the current process.
264 */
265 if (!(status & FAULT_USERSUPV)) {
266 unsigned long *mask = current_rwx_mask[cpu];
267
268 if (mask) {
269 int page = addr >> PAGE_SHIFT;
270 int idx = page >> 5;
271 int bit = 1 << (page & 31);
272
273 mask += 2 * page_mask_nelts;
274 if (mask[idx] & bit)
275 i_data |= CPLB_USER_RD;
276 }
277 }
278 }
279 idx = evict_one_icplb(cpu);
280 addr &= PAGE_MASK;
281 icplb_tbl[cpu][idx].addr = addr;
282 icplb_tbl[cpu][idx].data = i_data;
283
284 disable_icplb();
285 bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
286 bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
287 enable_icplb();
288
289 return 0;
290 }
291
dcplb_protection_fault(unsigned int cpu)292 static noinline int dcplb_protection_fault(unsigned int cpu)
293 {
294 int status = bfin_read_DCPLB_STATUS();
295
296 nr_dcplb_prot[cpu]++;
297
298 if (status & FAULT_RW) {
299 int idx = faulting_cplb_index(status);
300 unsigned long data = dcplb_tbl[cpu][idx].data;
301 if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
302 write_permitted(status, data)) {
303 data |= CPLB_DIRTY;
304 dcplb_tbl[cpu][idx].data = data;
305 bfin_write32(DCPLB_DATA0 + idx * 4, data);
306 return 0;
307 }
308 }
309 return CPLB_PROT_VIOL;
310 }
311
cplb_hdr(int seqstat,struct pt_regs * regs)312 int cplb_hdr(int seqstat, struct pt_regs *regs)
313 {
314 int cause = seqstat & 0x3f;
315 unsigned int cpu = smp_processor_id();
316 switch (cause) {
317 case 0x23:
318 return dcplb_protection_fault(cpu);
319 case 0x2C:
320 return icplb_miss(cpu);
321 case 0x26:
322 return dcplb_miss(cpu);
323 default:
324 return 1;
325 }
326 }
327
flush_switched_cplbs(unsigned int cpu)328 void flush_switched_cplbs(unsigned int cpu)
329 {
330 int i;
331 unsigned long flags;
332
333 nr_cplb_flush[cpu]++;
334
335 local_irq_save_hw(flags);
336 disable_icplb();
337 for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
338 icplb_tbl[cpu][i].data = 0;
339 bfin_write32(ICPLB_DATA0 + i * 4, 0);
340 }
341 enable_icplb();
342
343 disable_dcplb();
344 for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
345 dcplb_tbl[cpu][i].data = 0;
346 bfin_write32(DCPLB_DATA0 + i * 4, 0);
347 }
348 enable_dcplb();
349 local_irq_restore_hw(flags);
350
351 }
352
set_mask_dcplbs(unsigned long * masks,unsigned int cpu)353 void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
354 {
355 int i;
356 unsigned long addr = (unsigned long)masks;
357 unsigned long d_data;
358 unsigned long flags;
359
360 if (!masks) {
361 current_rwx_mask[cpu] = masks;
362 return;
363 }
364
365 local_irq_save_hw(flags);
366 current_rwx_mask[cpu] = masks;
367
368 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
369 #ifdef CONFIG_BFIN_DCACHE
370 d_data |= CPLB_L1_CHBL;
371 #ifdef CONFIG_BFIN_WT
372 d_data |= CPLB_L1_AOW | CPLB_WT;
373 #endif
374 #endif
375
376 disable_dcplb();
377 for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
378 dcplb_tbl[cpu][i].addr = addr;
379 dcplb_tbl[cpu][i].data = d_data;
380 bfin_write32(DCPLB_DATA0 + i * 4, d_data);
381 bfin_write32(DCPLB_ADDR0 + i * 4, addr);
382 addr += PAGE_SIZE;
383 }
384 enable_dcplb();
385 local_irq_restore_hw(flags);
386 }
387