1 /*
2 * include/asm-xtensa/mmu_context.h
3 *
4 * Switch an MMU context.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 */
12
13 #ifndef _XTENSA_MMU_CONTEXT_H
14 #define _XTENSA_MMU_CONTEXT_H
15
16 #include <linux/stringify.h>
17 #include <linux/sched.h>
18
19 #include <asm/pgtable.h>
20 #include <asm/cacheflush.h>
21 #include <asm/tlbflush.h>
22 #include <asm-generic/mm_hooks.h>
23
24 #define XCHAL_MMU_ASID_BITS 8
25
26 #if (XCHAL_HAVE_TLBS != 1)
27 # error "Linux must have an MMU!"
28 #endif
29
30 extern unsigned long asid_cache;
31
32 /*
33 * NO_CONTEXT is the invalid ASID value that we don't ever assign to
34 * any user or kernel context.
35 *
36 * 0 invalid
37 * 1 kernel
38 * 2 reserved
39 * 3 reserved
40 * 4...255 available
41 */
42
43 #define NO_CONTEXT 0
44 #define ASID_USER_FIRST 4
45 #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
46 #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
47
set_rasid_register(unsigned long val)48 static inline void set_rasid_register (unsigned long val)
49 {
50 __asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t"
51 " isync\n" : : "a" (val));
52 }
53
get_rasid_register(void)54 static inline unsigned long get_rasid_register (void)
55 {
56 unsigned long tmp;
57 __asm__ __volatile__ (" rsr %0,"__stringify(RASID)"\n\t" : "=a" (tmp));
58 return tmp;
59 }
60
61 static inline void
__get_new_mmu_context(struct mm_struct * mm)62 __get_new_mmu_context(struct mm_struct *mm)
63 {
64 extern void flush_tlb_all(void);
65 if (! (++asid_cache & ASID_MASK) ) {
66 flush_tlb_all(); /* start new asid cycle */
67 asid_cache += ASID_USER_FIRST;
68 }
69 mm->context = asid_cache;
70 }
71
72 static inline void
__load_mmu_context(struct mm_struct * mm)73 __load_mmu_context(struct mm_struct *mm)
74 {
75 set_rasid_register(ASID_INSERT(mm->context));
76 invalidate_page_directory();
77 }
78
79 /*
80 * Initialize the context related info for a new mm_struct
81 * instance.
82 */
83
84 static inline int
init_new_context(struct task_struct * tsk,struct mm_struct * mm)85 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
86 {
87 mm->context = NO_CONTEXT;
88 return 0;
89 }
90
91 /*
92 * After we have set current->mm to a new value, this activates
93 * the context for the new mm so we see the new mappings.
94 */
95 static inline void
activate_mm(struct mm_struct * prev,struct mm_struct * next)96 activate_mm(struct mm_struct *prev, struct mm_struct *next)
97 {
98 /* Unconditionally get a new ASID. */
99
100 __get_new_mmu_context(next);
101 __load_mmu_context(next);
102 }
103
104
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)105 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
106 struct task_struct *tsk)
107 {
108 unsigned long asid = asid_cache;
109
110 /* Check if our ASID is of an older version and thus invalid */
111
112 if (next->context == NO_CONTEXT || ((next->context^asid) & ~ASID_MASK))
113 __get_new_mmu_context(next);
114
115 __load_mmu_context(next);
116 }
117
118 #define deactivate_mm(tsk, mm) do { } while(0)
119
120 /*
121 * Destroy context related info for an mm_struct that is about
122 * to be put to rest.
123 */
destroy_context(struct mm_struct * mm)124 static inline void destroy_context(struct mm_struct *mm)
125 {
126 invalidate_page_directory();
127 }
128
129
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)130 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
131 {
132 /* Nothing to do. */
133
134 }
135
136 #endif /* _XTENSA_MMU_CONTEXT_H */
137