1 /*
2 * ARC700 VIPT Cache Management
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
11 * -flush_cache_dup_mm (fork)
12 * -likewise for flush_cache_mm (exit/execve)
13 * -likewise for flush_cache_range,flush_cache_page (munmap, exit, COW-break)
14 *
15 * vineetg: Apr 2011
16 * -Now that MMU can support larger pg sz (16K), the determiniation of
17 * aliasing shd not be based on assumption of 8k pg
18 *
19 * vineetg: Mar 2011
20 * -optimised version of flush_icache_range( ) for making I/D coherent
21 * when vaddr is available (agnostic of num of aliases)
22 *
23 * vineetg: Mar 2011
24 * -Added documentation about I-cache aliasing on ARC700 and the way it
25 * was handled up until MMU V2.
26 * -Spotted a three year old bug when killing the 4 aliases, which needs
27 * bottom 2 bits, so we need to do paddr | {0x00, 0x01, 0x02, 0x03}
28 * instead of paddr | {0x00, 0x01, 0x10, 0x11}
29 * (Rajesh you owe me one now)
30 *
31 * vineetg: Dec 2010
32 * -Off-by-one error when computing num_of_lines to flush
33 * This broke signal handling with bionic which uses synthetic sigret stub
34 *
35 * vineetg: Mar 2010
36 * -GCC can't generate ZOL for core cache flush loops.
37 * Conv them into iterations based as opposed to while (start < end) types
38 *
39 * Vineetg: July 2009
40 * -In I-cache flush routine we used to chk for aliasing for every line INV.
41 * Instead now we setup routines per cache geometry and invoke them
42 * via function pointers.
43 *
44 * Vineetg: Jan 2009
45 * -Cache Line flush routines used to flush an extra line beyond end addr
46 * because check was while (end >= start) instead of (end > start)
47 * =Some call sites had to work around by doing -1, -4 etc to end param
48 * =Some callers didnt care. This was spec bad in case of INV routines
49 * which would discard valid data (cause of the horrible ext2 bug
50 * in ARC IDE driver)
51 *
52 * vineetg: June 11th 2008: Fixed flush_icache_range( )
53 * -Since ARC700 caches are not coherent (I$ doesnt snoop D$) both need
54 * to be flushed, which it was not doing.
55 * -load_module( ) passes vmalloc addr (Kernel Virtual Addr) to the API,
56 * however ARC cache maintenance OPs require PHY addr. Thus need to do
57 * vmalloc_to_phy.
58 * -Also added optimisation there, that for range > PAGE SIZE we flush the
59 * entire cache in one shot rather than line by line. For e.g. a module
60 * with Code sz 600k, old code flushed 600k worth of cache (line-by-line),
61 * while cache is only 16 or 32k.
62 */
63
64 #include <linux/module.h>
65 #include <linux/mm.h>
66 #include <linux/sched.h>
67 #include <linux/cache.h>
68 #include <linux/mmu_context.h>
69 #include <linux/syscalls.h>
70 #include <linux/uaccess.h>
71 #include <linux/pagemap.h>
72 #include <asm/cacheflush.h>
73 #include <asm/cachectl.h>
74 #include <asm/setup.h>
75
arc_cache_mumbojumbo(int c,char * buf,int len)76 char *arc_cache_mumbojumbo(int c, char *buf, int len)
77 {
78 int n = 0;
79
80 #define PR_CACHE(p, cfg, str) \
81 if (!(p)->ver) \
82 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
83 else \
84 n += scnprintf(buf + n, len - n, \
85 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
86 (p)->sz_k, (p)->assoc, (p)->line_len, \
87 (p)->vipt ? "VIPT" : "PIPT", \
88 (p)->alias ? " aliasing" : "", \
89 IS_ENABLED(cfg) ? "" : " (not used)");
90
91 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
92 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
93
94 return buf;
95 }
96
97 /*
98 * Read the Cache Build Confuration Registers, Decode them and save into
99 * the cpuinfo structure for later use.
100 * No Validation done here, simply read/convert the BCRs
101 */
read_decode_cache_bcr(void)102 void read_decode_cache_bcr(void)
103 {
104 struct cpuinfo_arc_cache *p_ic, *p_dc;
105 unsigned int cpu = smp_processor_id();
106 struct bcr_cache {
107 #ifdef CONFIG_CPU_BIG_ENDIAN
108 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
109 #else
110 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
111 #endif
112 } ibcr, dbcr;
113
114 p_ic = &cpuinfo_arc700[cpu].icache;
115 READ_BCR(ARC_REG_IC_BCR, ibcr);
116
117 if (!ibcr.ver)
118 goto dc_chk;
119
120 BUG_ON(ibcr.config != 3);
121 p_ic->assoc = 2; /* Fixed to 2w set assoc */
122 p_ic->line_len = 8 << ibcr.line_len;
123 p_ic->sz_k = 1 << (ibcr.sz - 1);
124 p_ic->ver = ibcr.ver;
125 p_ic->vipt = 1;
126 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
127
128 dc_chk:
129 p_dc = &cpuinfo_arc700[cpu].dcache;
130 READ_BCR(ARC_REG_DC_BCR, dbcr);
131
132 if (!dbcr.ver)
133 return;
134
135 BUG_ON(dbcr.config != 2);
136 p_dc->assoc = 4; /* Fixed to 4w set assoc */
137 p_dc->line_len = 16 << dbcr.line_len;
138 p_dc->sz_k = 1 << (dbcr.sz - 1);
139 p_dc->ver = dbcr.ver;
140 p_dc->vipt = 1;
141 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
142 }
143
144 /*
145 * 1. Validate the Cache Geomtery (compile time config matches hardware)
146 * 2. If I-cache suffers from aliasing, setup work arounds (difft flush rtn)
147 * (aliasing D-cache configurations are not supported YET)
148 * 3. Enable the Caches, setup default flush mode for D-Cache
149 * 3. Calculate the SHMLBA used by user space
150 */
arc_cache_init(void)151 void arc_cache_init(void)
152 {
153 unsigned int __maybe_unused cpu = smp_processor_id();
154 char str[256];
155
156 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
157
158 /*
159 * Only master CPU needs to execute rest of function:
160 * - Assume SMP so all cores will have same cache config so
161 * any geomtry checks will be same for all
162 * - IOC setup / dma callbacks only need to be setup once
163 */
164 if (cpu)
165 return;
166
167 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
168 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
169
170 if (!ic->ver)
171 panic("cache support enabled but non-existent cache\n");
172
173 if (ic->line_len != L1_CACHE_BYTES)
174 panic("ICache line [%d] != kernel Config [%d]",
175 ic->line_len, L1_CACHE_BYTES);
176
177 if (ic->ver != CONFIG_ARC_MMU_VER)
178 panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
179 ic->ver, CONFIG_ARC_MMU_VER);
180 }
181
182 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
183 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
184 int handled;
185
186 if (!dc->ver)
187 panic("cache support enabled but non-existent cache\n");
188
189 if (dc->line_len != L1_CACHE_BYTES)
190 panic("DCache line [%d] != kernel Config [%d]",
191 dc->line_len, L1_CACHE_BYTES);
192
193 /* check for D-Cache aliasing */
194 handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
195
196 if (dc->alias && !handled)
197 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
198 else if (!dc->alias && handled)
199 panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
200 }
201 }
202
203 #define OP_INV 0x1
204 #define OP_FLUSH 0x2
205 #define OP_FLUSH_N_INV 0x3
206 #define OP_INV_IC 0x4
207
208 /*
209 * Common Helper for Line Operations on {I,D}-Cache
210 */
__cache_line_loop(unsigned long paddr,unsigned long vaddr,unsigned long sz,const int cacheop)211 static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
212 unsigned long sz, const int cacheop)
213 {
214 unsigned int aux_cmd, aux_tag;
215 int num_lines;
216 const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
217
218 if (cacheop == OP_INV_IC) {
219 aux_cmd = ARC_REG_IC_IVIL;
220 #if (CONFIG_ARC_MMU_VER > 2)
221 aux_tag = ARC_REG_IC_PTAG;
222 #endif
223 }
224 else {
225 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
226 aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
227 #if (CONFIG_ARC_MMU_VER > 2)
228 aux_tag = ARC_REG_DC_PTAG;
229 #endif
230 }
231
232 /* Ensure we properly floor/ceil the non-line aligned/sized requests
233 * and have @paddr - aligned to cache line and integral @num_lines.
234 * This however can be avoided for page sized since:
235 * -@paddr will be cache-line aligned already (being page aligned)
236 * -@sz will be integral multiple of line size (being page sized).
237 */
238 if (!full_page_op) {
239 sz += paddr & ~CACHE_LINE_MASK;
240 paddr &= CACHE_LINE_MASK;
241 vaddr &= CACHE_LINE_MASK;
242 }
243
244 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
245
246 #if (CONFIG_ARC_MMU_VER <= 2)
247 /* MMUv2 and before: paddr contains stuffed vaddrs bits */
248 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
249 #else
250 /* if V-P const for loop, PTAG can be written once outside loop */
251 if (full_page_op)
252 write_aux_reg(aux_tag, paddr);
253 #endif
254
255 while (num_lines-- > 0) {
256 #if (CONFIG_ARC_MMU_VER > 2)
257 /* MMUv3, cache ops require paddr seperately */
258 if (!full_page_op) {
259 write_aux_reg(aux_tag, paddr);
260 paddr += L1_CACHE_BYTES;
261 }
262
263 write_aux_reg(aux_cmd, vaddr);
264 vaddr += L1_CACHE_BYTES;
265 #else
266 write_aux_reg(aux_cmd, paddr);
267 paddr += L1_CACHE_BYTES;
268 #endif
269 }
270 }
271
272 #ifdef CONFIG_ARC_HAS_DCACHE
273
274 /***************************************************************
275 * Machine specific helpers for Entire D-Cache or Per Line ops
276 */
277
__before_dc_op(const int op)278 static unsigned int __before_dc_op(const int op)
279 {
280 unsigned int reg = reg;
281
282 if (op == OP_FLUSH_N_INV) {
283 /* Dcache provides 2 cmd: FLUSH or INV
284 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
285 * flush-n-inv is achieved by INV cmd but with IM=1
286 * So toggle INV sub-mode depending on op request and default
287 */
288 reg = read_aux_reg(ARC_REG_DC_CTRL);
289 write_aux_reg(ARC_REG_DC_CTRL, reg | DC_CTRL_INV_MODE_FLUSH)
290 ;
291 }
292
293 return reg;
294 }
295
__after_dc_op(const int op,unsigned int reg)296 static void __after_dc_op(const int op, unsigned int reg)
297 {
298 if (op & OP_FLUSH) /* flush / flush-n-inv both wait */
299 while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS);
300
301 /* Switch back to default Invalidate mode */
302 if (op == OP_FLUSH_N_INV)
303 write_aux_reg(ARC_REG_DC_CTRL, reg & ~DC_CTRL_INV_MODE_FLUSH);
304 }
305
306 /*
307 * Operation on Entire D-Cache
308 * @cacheop = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
309 * Note that constant propagation ensures all the checks are gone
310 * in generated code
311 */
__dc_entire_op(const int cacheop)312 static inline void __dc_entire_op(const int cacheop)
313 {
314 unsigned int ctrl_reg;
315 int aux;
316
317 ctrl_reg = __before_dc_op(cacheop);
318
319 if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
320 aux = ARC_REG_DC_IVDC;
321 else
322 aux = ARC_REG_DC_FLSH;
323
324 write_aux_reg(aux, 0x1);
325
326 __after_dc_op(cacheop, ctrl_reg);
327 }
328
329 /* For kernel mappings cache operation: index is same as paddr */
330 #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
331
332 /*
333 * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
334 */
__dc_line_op(unsigned long paddr,unsigned long vaddr,unsigned long sz,const int cacheop)335 static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
336 unsigned long sz, const int cacheop)
337 {
338 unsigned long flags;
339 unsigned int ctrl_reg;
340
341 local_irq_save(flags);
342
343 ctrl_reg = __before_dc_op(cacheop);
344
345 __cache_line_loop(paddr, vaddr, sz, cacheop);
346
347 __after_dc_op(cacheop, ctrl_reg);
348
349 local_irq_restore(flags);
350 }
351
352 #else
353
354 #define __dc_entire_op(cacheop)
355 #define __dc_line_op(paddr, vaddr, sz, cacheop)
356 #define __dc_line_op_k(paddr, sz, cacheop)
357
358 #endif /* CONFIG_ARC_HAS_DCACHE */
359
360
361 #ifdef CONFIG_ARC_HAS_ICACHE
362
363 /*
364 * I-Cache Aliasing in ARC700 VIPT caches
365 *
366 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
367 * The orig Cache Management Module "CDU" only required paddr to invalidate a
368 * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
369 * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
370 * the exact same line.
371 *
372 * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
373 * paddr alone could not be used to correctly index the cache.
374 *
375 * ------------------
376 * MMU v1/v2 (Fixed Page Size 8k)
377 * ------------------
378 * The solution was to provide CDU with these additonal vaddr bits. These
379 * would be bits [x:13], x would depend on cache-geometry, 13 comes from
380 * standard page size of 8k.
381 * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
382 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
383 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
384 * represent the offset within cache-line. The adv of using this "clumsy"
385 * interface for additional info was no new reg was needed in CDU programming
386 * model.
387 *
388 * 17:13 represented the max num of bits passable, actual bits needed were
389 * fewer, based on the num-of-aliases possible.
390 * -for 2 alias possibility, only bit 13 needed (32K cache)
391 * -for 4 alias possibility, bits 14:13 needed (64K cache)
392 *
393 * ------------------
394 * MMU v3
395 * ------------------
396 * This ver of MMU supports variable page sizes (1k-16k): although Linux will
397 * only support 8k (default), 16k and 4k.
398 * However from hardware perspective, smaller page sizes aggrevate aliasing
399 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
400 * the existing scheme of piggybacking won't work for certain configurations.
401 * Two new registers IC_PTAG and DC_PTAG inttoduced.
402 * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
403 */
404
405 /***********************************************************
406 * Machine specific helper for per line I-Cache invalidate.
407 */
408
__ic_entire_inv(void)409 static inline void __ic_entire_inv(void)
410 {
411 write_aux_reg(ARC_REG_IC_IVIC, 1);
412 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
413 }
414
415 static inline void
__ic_line_inv_vaddr_local(unsigned long paddr,unsigned long vaddr,unsigned long sz)416 __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
417 unsigned long sz)
418 {
419 unsigned long flags;
420
421 local_irq_save(flags);
422 __cache_line_loop(paddr, vaddr, sz, OP_INV_IC);
423 local_irq_restore(flags);
424 }
425
426 #ifndef CONFIG_SMP
427
428 #define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
429
430 #else
431
432 struct ic_inv_args {
433 unsigned long paddr, vaddr;
434 int sz;
435 };
436
__ic_line_inv_vaddr_helper(void * info)437 static void __ic_line_inv_vaddr_helper(void *info)
438 {
439 struct ic_inv_args *ic_inv = info;
440
441 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
442 }
443
__ic_line_inv_vaddr(unsigned long paddr,unsigned long vaddr,unsigned long sz)444 static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
445 unsigned long sz)
446 {
447 struct ic_inv_args ic_inv = {
448 .paddr = paddr,
449 .vaddr = vaddr,
450 .sz = sz
451 };
452
453 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
454 }
455
456 #endif /* CONFIG_SMP */
457
458 #else /* !CONFIG_ARC_HAS_ICACHE */
459
460 #define __ic_entire_inv()
461 #define __ic_line_inv_vaddr(pstart, vstart, sz)
462
463 #endif /* CONFIG_ARC_HAS_ICACHE */
464
465
466 /***********************************************************
467 * Exported APIs
468 */
469
470 /*
471 * Handle cache congruency of kernel and userspace mappings of page when kernel
472 * writes-to/reads-from
473 *
474 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
475 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
476 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
477 * -In SMP, if hardware caches are coherent
478 *
479 * There's a corollary case, where kernel READs from a userspace mapped page.
480 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
481 */
flush_dcache_page(struct page * page)482 void flush_dcache_page(struct page *page)
483 {
484 struct address_space *mapping;
485
486 if (!cache_is_vipt_aliasing()) {
487 clear_bit(PG_dc_clean, &page->flags);
488 return;
489 }
490
491 /* don't handle anon pages here */
492 mapping = page_mapping(page);
493 if (!mapping)
494 return;
495
496 /*
497 * pagecache page, file not yet mapped to userspace
498 * Make a note that K-mapping is dirty
499 */
500 if (!mapping_mapped(mapping)) {
501 clear_bit(PG_dc_clean, &page->flags);
502 } else if (page_mapped(page)) {
503
504 /* kernel reading from page with U-mapping */
505 void *paddr = page_address(page);
506 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
507
508 if (addr_not_cache_congruent(paddr, vaddr))
509 __flush_dcache_page(paddr, vaddr);
510 }
511 }
512 EXPORT_SYMBOL(flush_dcache_page);
513
514
dma_cache_wback_inv(unsigned long start,unsigned long sz)515 void dma_cache_wback_inv(unsigned long start, unsigned long sz)
516 {
517 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
518 }
519 EXPORT_SYMBOL(dma_cache_wback_inv);
520
dma_cache_inv(unsigned long start,unsigned long sz)521 void dma_cache_inv(unsigned long start, unsigned long sz)
522 {
523 __dc_line_op_k(start, sz, OP_INV);
524 }
525 EXPORT_SYMBOL(dma_cache_inv);
526
dma_cache_wback(unsigned long start,unsigned long sz)527 void dma_cache_wback(unsigned long start, unsigned long sz)
528 {
529 __dc_line_op_k(start, sz, OP_FLUSH);
530 }
531 EXPORT_SYMBOL(dma_cache_wback);
532
533 /*
534 * This is API for making I/D Caches consistent when modifying
535 * kernel code (loadable modules, kprobes, kgdb...)
536 * This is called on insmod, with kernel virtual address for CODE of
537 * the module. ARC cache maintenance ops require PHY address thus we
538 * need to convert vmalloc addr to PHY addr
539 */
flush_icache_range(unsigned long kstart,unsigned long kend)540 void flush_icache_range(unsigned long kstart, unsigned long kend)
541 {
542 unsigned int tot_sz;
543
544 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
545
546 /* Shortcut for bigger flush ranges.
547 * Here we don't care if this was kernel virtual or phy addr
548 */
549 tot_sz = kend - kstart;
550 if (tot_sz > PAGE_SIZE) {
551 flush_cache_all();
552 return;
553 }
554
555 /* Case: Kernel Phy addr (0x8000_0000 onwards) */
556 if (likely(kstart > PAGE_OFFSET)) {
557 /*
558 * The 2nd arg despite being paddr will be used to index icache
559 * This is OK since no alternate virtual mappings will exist
560 * given the callers for this case: kprobe/kgdb in built-in
561 * kernel code only.
562 */
563 __sync_icache_dcache(kstart, kstart, kend - kstart);
564 return;
565 }
566
567 /*
568 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
569 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
570 * handling of kernel vaddr.
571 *
572 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
573 * it still needs to handle a 2 page scenario, where the range
574 * straddles across 2 virtual pages and hence need for loop
575 */
576 while (tot_sz > 0) {
577 unsigned int off, sz;
578 unsigned long phy, pfn;
579
580 off = kstart % PAGE_SIZE;
581 pfn = vmalloc_to_pfn((void *)kstart);
582 phy = (pfn << PAGE_SHIFT) + off;
583 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
584 __sync_icache_dcache(phy, kstart, sz);
585 kstart += sz;
586 tot_sz -= sz;
587 }
588 }
589 EXPORT_SYMBOL(flush_icache_range);
590
591 /*
592 * General purpose helper to make I and D cache lines consistent.
593 * @paddr is phy addr of region
594 * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
595 * However in one instance, when called by kprobe (for a breakpt in
596 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
597 * use a paddr to index the cache (despite VIPT). This is fine since since a
598 * builtin kernel page will not have any virtual mappings.
599 * kprobe on loadable module will be kernel vaddr.
600 */
__sync_icache_dcache(unsigned long paddr,unsigned long vaddr,int len)601 void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
602 {
603 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
604 __ic_line_inv_vaddr(paddr, vaddr, len);
605 }
606
607 /* wrapper to compile time eliminate alignment checks in flush loop */
__inv_icache_page(unsigned long paddr,unsigned long vaddr)608 void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
609 {
610 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
611 }
612
613 /*
614 * wrapper to clearout kernel or userspace mappings of a page
615 * For kernel mappings @vaddr == @paddr
616 */
___flush_dcache_page(unsigned long paddr,unsigned long vaddr)617 void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr)
618 {
619 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
620 }
621
flush_cache_all(void)622 noinline void flush_cache_all(void)
623 {
624 unsigned long flags;
625
626 local_irq_save(flags);
627
628 __ic_entire_inv();
629 __dc_entire_op(OP_FLUSH_N_INV);
630
631 local_irq_restore(flags);
632
633 }
634
635 #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
636
flush_cache_mm(struct mm_struct * mm)637 void flush_cache_mm(struct mm_struct *mm)
638 {
639 flush_cache_all();
640 }
641
flush_cache_page(struct vm_area_struct * vma,unsigned long u_vaddr,unsigned long pfn)642 void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
643 unsigned long pfn)
644 {
645 unsigned int paddr = pfn << PAGE_SHIFT;
646
647 u_vaddr &= PAGE_MASK;
648
649 ___flush_dcache_page(paddr, u_vaddr);
650
651 if (vma->vm_flags & VM_EXEC)
652 __inv_icache_page(paddr, u_vaddr);
653 }
654
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)655 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
656 unsigned long end)
657 {
658 flush_cache_all();
659 }
660
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long u_vaddr)661 void flush_anon_page(struct vm_area_struct *vma, struct page *page,
662 unsigned long u_vaddr)
663 {
664 /* TBD: do we really need to clear the kernel mapping */
665 __flush_dcache_page(page_address(page), u_vaddr);
666 __flush_dcache_page(page_address(page), page_address(page));
667
668 }
669
670 #endif
671
copy_user_highpage(struct page * to,struct page * from,unsigned long u_vaddr,struct vm_area_struct * vma)672 void copy_user_highpage(struct page *to, struct page *from,
673 unsigned long u_vaddr, struct vm_area_struct *vma)
674 {
675 void *kfrom = page_address(from);
676 void *kto = page_address(to);
677 int clean_src_k_mappings = 0;
678
679 /*
680 * If SRC page was already mapped in userspace AND it's U-mapping is
681 * not congruent with K-mapping, sync former to physical page so that
682 * K-mapping in memcpy below, sees the right data
683 *
684 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
685 * equally valid for SRC page as well
686 */
687 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
688 __flush_dcache_page(kfrom, u_vaddr);
689 clean_src_k_mappings = 1;
690 }
691
692 copy_page(kto, kfrom);
693
694 /*
695 * Mark DST page K-mapping as dirty for a later finalization by
696 * update_mmu_cache(). Although the finalization could have been done
697 * here as well (given that both vaddr/paddr are available).
698 * But update_mmu_cache() already has code to do that for other
699 * non copied user pages (e.g. read faults which wire in pagecache page
700 * directly).
701 */
702 clear_bit(PG_dc_clean, &to->flags);
703
704 /*
705 * if SRC was already usermapped and non-congruent to kernel mapping
706 * sync the kernel mapping back to physical page
707 */
708 if (clean_src_k_mappings) {
709 __flush_dcache_page(kfrom, kfrom);
710 set_bit(PG_dc_clean, &from->flags);
711 } else {
712 clear_bit(PG_dc_clean, &from->flags);
713 }
714 }
715
clear_user_page(void * to,unsigned long u_vaddr,struct page * page)716 void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
717 {
718 clear_page(to);
719 clear_bit(PG_dc_clean, &page->flags);
720 }
721
722
723 /**********************************************************************
724 * Explicit Cache flush request from user space via syscall
725 * Needed for JITs which generate code on the fly
726 */
SYSCALL_DEFINE3(cacheflush,uint32_t,start,uint32_t,sz,uint32_t,flags)727 SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
728 {
729 /* TBD: optimize this */
730 flush_cache_all();
731 return 0;
732 }
733