• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  #ifndef __ASM_SH_CACHEFLUSH_H
2  #define __ASM_SH_CACHEFLUSH_H
3  
4  #ifdef __KERNEL__
5  
6  #include <linux/mm.h>
7  
8  /*
9   * Cache flushing:
10   *
11   *  - flush_cache_all() flushes entire cache
12   *  - flush_cache_mm(mm) flushes the specified mm context's cache lines
13   *  - flush_cache_dup mm(mm) handles cache flushing when forking
14   *  - flush_cache_page(mm, vmaddr, pfn) flushes a single page
15   *  - flush_cache_range(vma, start, end) flushes a range of pages
16   *
17   *  - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
18   *  - flush_icache_range(start, end) flushes(invalidates) a range for icache
19   *  - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
20   *  - flush_cache_sigtramp(vaddr) flushes the signal trampoline
21   */
22  extern void (*local_flush_cache_all)(void *args);
23  extern void (*local_flush_cache_mm)(void *args);
24  extern void (*local_flush_cache_dup_mm)(void *args);
25  extern void (*local_flush_cache_page)(void *args);
26  extern void (*local_flush_cache_range)(void *args);
27  extern void (*local_flush_dcache_page)(void *args);
28  extern void (*local_flush_icache_range)(void *args);
29  extern void (*local_flush_icache_page)(void *args);
30  extern void (*local_flush_cache_sigtramp)(void *args);
31  
cache_noop(void * args)32  static inline void cache_noop(void *args) { }
33  
34  extern void (*__flush_wback_region)(void *start, int size);
35  extern void (*__flush_purge_region)(void *start, int size);
36  extern void (*__flush_invalidate_region)(void *start, int size);
37  
38  extern void flush_cache_all(void);
39  extern void flush_cache_mm(struct mm_struct *mm);
40  extern void flush_cache_dup_mm(struct mm_struct *mm);
41  extern void flush_cache_page(struct vm_area_struct *vma,
42  				unsigned long addr, unsigned long pfn);
43  extern void flush_cache_range(struct vm_area_struct *vma,
44  				 unsigned long start, unsigned long end);
45  #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
46  extern void flush_dcache_page(struct page *page);
47  extern void flush_icache_range(unsigned long start, unsigned long end);
48  extern void flush_icache_page(struct vm_area_struct *vma,
49  				 struct page *page);
50  extern void flush_cache_sigtramp(unsigned long address);
51  
52  struct flusher_data {
53  	struct vm_area_struct *vma;
54  	unsigned long addr1, addr2;
55  };
56  
57  #define ARCH_HAS_FLUSH_ANON_PAGE
58  extern void __flush_anon_page(struct page *page, unsigned long);
59  
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)60  static inline void flush_anon_page(struct vm_area_struct *vma,
61  				   struct page *page, unsigned long vmaddr)
62  {
63  	if (boot_cpu_data.dcache.n_aliases && PageAnon(page))
64  		__flush_anon_page(page, vmaddr);
65  }
flush_kernel_vmap_range(void * addr,int size)66  static inline void flush_kernel_vmap_range(void *addr, int size)
67  {
68  	__flush_wback_region(addr, size);
69  }
invalidate_kernel_vmap_range(void * addr,int size)70  static inline void invalidate_kernel_vmap_range(void *addr, int size)
71  {
72  	__flush_invalidate_region(addr, size);
73  }
74  
75  #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
flush_kernel_dcache_page(struct page * page)76  static inline void flush_kernel_dcache_page(struct page *page)
77  {
78  	flush_dcache_page(page);
79  }
80  
81  extern void copy_to_user_page(struct vm_area_struct *vma,
82  	struct page *page, unsigned long vaddr, void *dst, const void *src,
83  	unsigned long len);
84  
85  extern void copy_from_user_page(struct vm_area_struct *vma,
86  	struct page *page, unsigned long vaddr, void *dst, const void *src,
87  	unsigned long len);
88  
89  #define flush_cache_vmap(start, end)		local_flush_cache_all(NULL)
90  #define flush_cache_vunmap(start, end)		local_flush_cache_all(NULL)
91  
92  #define flush_dcache_mmap_lock(mapping)		do { } while (0)
93  #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
94  
95  void kmap_coherent_init(void);
96  void *kmap_coherent(struct page *page, unsigned long addr);
97  void kunmap_coherent(void *kvaddr);
98  
99  #define PG_dcache_clean	PG_arch_1
100  
101  void cpu_cache_init(void);
102  
103  #endif /* __KERNEL__ */
104  #endif /* __ASM_SH_CACHEFLUSH_H */
105