• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _PARISC_CACHEFLUSH_H
2 #define _PARISC_CACHEFLUSH_H
3 
4 #include <linux/mm.h>
5 #include <linux/uaccess.h>
6 #include <asm/tlbflush.h>
7 
8 /* The usual comment is "Caches aren't brain-dead on the <architecture>".
9  * Unfortunately, that doesn't apply to PA-RISC. */
10 
11 /* Internal implementation */
12 void flush_data_cache_local(void *);  /* flushes local data-cache only */
13 void flush_instruction_cache_local(void *); /* flushes local code-cache only */
14 #ifdef CONFIG_SMP
15 void flush_data_cache(void); /* flushes data-cache only (all processors) */
16 void flush_instruction_cache(void); /* flushes i-cache only (all processors) */
17 #else
18 #define flush_data_cache() flush_data_cache_local(NULL)
19 #define flush_instruction_cache() flush_instruction_cache_local(NULL)
20 #endif
21 
22 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
23 
24 void flush_user_icache_range_asm(unsigned long, unsigned long);
25 void flush_kernel_icache_range_asm(unsigned long, unsigned long);
26 void flush_user_dcache_range_asm(unsigned long, unsigned long);
27 void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
28 void flush_kernel_dcache_page_asm(void *);
29 void flush_kernel_icache_page(void *);
30 void flush_user_dcache_range(unsigned long, unsigned long);
31 void flush_user_icache_range(unsigned long, unsigned long);
32 
33 /* Cache flush operations */
34 
35 void flush_cache_all_local(void);
36 void flush_cache_all(void);
37 void flush_cache_mm(struct mm_struct *mm);
38 
39 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
40 void flush_kernel_dcache_page_addr(void *addr);
flush_kernel_dcache_page(struct page * page)41 static inline void flush_kernel_dcache_page(struct page *page)
42 {
43 	flush_kernel_dcache_page_addr(page_address(page));
44 }
45 
46 #define flush_kernel_dcache_range(start,size) \
47 	flush_kernel_dcache_range_asm((start), (start)+(size));
48 /* vmap range flushes and invalidates.  Architecturally, we don't need
49  * the invalidate, because the CPU should refuse to speculate once an
50  * area has been flushed, so invalidate is left empty */
flush_kernel_vmap_range(void * vaddr,int size)51 static inline void flush_kernel_vmap_range(void *vaddr, int size)
52 {
53 	unsigned long start = (unsigned long)vaddr;
54 
55 	flush_kernel_dcache_range_asm(start, start + size);
56 }
invalidate_kernel_vmap_range(void * vaddr,int size)57 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
58 {
59 	unsigned long start = (unsigned long)vaddr;
60 	void *cursor = vaddr;
61 
62 	for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
63 		struct page *page = vmalloc_to_page(cursor);
64 
65 		if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
66 			flush_kernel_dcache_page(page);
67 	}
68 	flush_kernel_dcache_range_asm(start, start + size);
69 }
70 
71 #define flush_cache_vmap(start, end)		flush_cache_all()
72 #define flush_cache_vunmap(start, end)		flush_cache_all()
73 
74 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
75 extern void flush_dcache_page(struct page *page);
76 
77 #define flush_dcache_mmap_lock(mapping) \
78 	spin_lock_irq(&(mapping)->tree_lock)
79 #define flush_dcache_mmap_unlock(mapping) \
80 	spin_unlock_irq(&(mapping)->tree_lock)
81 
82 #define flush_icache_page(vma,page)	do { 		\
83 	flush_kernel_dcache_page(page);			\
84 	flush_kernel_icache_page(page_address(page)); 	\
85 } while (0)
86 
87 #define flush_icache_range(s,e)		do { 		\
88 	flush_kernel_dcache_range_asm(s,e); 		\
89 	flush_kernel_icache_range_asm(s,e); 		\
90 } while (0)
91 
92 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
93 do { \
94 	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
95 	memcpy(dst, src, len); \
96 	flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
97 } while (0)
98 
99 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
100 do { \
101 	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
102 	memcpy(dst, src, len); \
103 } while (0)
104 
105 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
106 void flush_cache_range(struct vm_area_struct *vma,
107 		unsigned long start, unsigned long end);
108 
109 /* defined in pacache.S exported in cache.c used by flush_anon_page */
110 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
111 
112 #define ARCH_HAS_FLUSH_ANON_PAGE
113 static inline void
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)114 flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
115 {
116 	if (PageAnon(page)) {
117 		flush_tlb_page(vma, vmaddr);
118 		preempt_disable();
119 		flush_dcache_page_asm(page_to_phys(page), vmaddr);
120 		preempt_enable();
121 	}
122 }
123 
124 #include <asm/kmap_types.h>
125 
126 #define ARCH_HAS_KMAP
127 
kmap(struct page * page)128 static inline void *kmap(struct page *page)
129 {
130 	might_sleep();
131 	return page_address(page);
132 }
133 
kunmap(struct page * page)134 static inline void kunmap(struct page *page)
135 {
136 	flush_kernel_dcache_page_addr(page_address(page));
137 }
138 
kmap_atomic(struct page * page)139 static inline void *kmap_atomic(struct page *page)
140 {
141 	preempt_disable();
142 	pagefault_disable();
143 	return page_address(page);
144 }
145 
__kunmap_atomic(void * addr)146 static inline void __kunmap_atomic(void *addr)
147 {
148 	flush_kernel_dcache_page_addr(addr);
149 	pagefault_enable();
150 	preempt_enable();
151 }
152 
153 #define kmap_atomic_prot(page, prot)	kmap_atomic(page)
154 #define kmap_atomic_pfn(pfn)	kmap_atomic(pfn_to_page(pfn))
155 
156 #endif /* _PARISC_CACHEFLUSH_H */
157 
158