• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2004-2009 Analog Devices Inc.
3  *
4  * Licensed under the GPL-2 or later.
5  */
6 
7 #ifndef __BLACKFIN_MMU_CONTEXT_H__
8 #define __BLACKFIN_MMU_CONTEXT_H__
9 
10 #include <linux/slab.h>
11 #include <linux/sched.h>
12 #include <linux/mm_types.h>
13 
14 #include <asm/setup.h>
15 #include <asm/page.h>
16 #include <asm/pgalloc.h>
17 #include <asm/cplbinit.h>
18 #include <asm/sections.h>
19 
20 /* Note: L1 stacks are CPU-private things, so we bluntly disable this
21    feature in SMP mode, and use the per-CPU scratch SRAM bank only to
22    store the PDA instead. */
23 
24 extern void *current_l1_stack_save;
25 extern int nr_l1stack_tasks;
26 extern void *l1_stack_base;
27 extern unsigned long l1_stack_len;
28 
29 extern int l1sram_free(const void*);
30 extern void *l1sram_alloc_max(void*);
31 
free_l1stack(void)32 static inline void free_l1stack(void)
33 {
34 	nr_l1stack_tasks--;
35 	if (nr_l1stack_tasks == 0) {
36 		l1sram_free(l1_stack_base);
37 		l1_stack_base = NULL;
38 		l1_stack_len = 0;
39 	}
40 }
41 
42 static inline unsigned long
alloc_l1stack(unsigned long length,unsigned long * stack_base)43 alloc_l1stack(unsigned long length, unsigned long *stack_base)
44 {
45 	if (nr_l1stack_tasks == 0) {
46 		l1_stack_base = l1sram_alloc_max(&l1_stack_len);
47 		if (!l1_stack_base)
48 			return 0;
49 	}
50 
51 	if (l1_stack_len < length) {
52 		if (nr_l1stack_tasks == 0)
53 			l1sram_free(l1_stack_base);
54 		return 0;
55 	}
56 	*stack_base = (unsigned long)l1_stack_base;
57 	nr_l1stack_tasks++;
58 	return l1_stack_len;
59 }
60 
61 static inline int
activate_l1stack(struct mm_struct * mm,unsigned long sp_base)62 activate_l1stack(struct mm_struct *mm, unsigned long sp_base)
63 {
64 	if (current_l1_stack_save)
65 		memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len);
66 	mm->context.l1_stack_save = current_l1_stack_save = (void*)sp_base;
67 	memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len);
68 	return 1;
69 }
70 
71 #define deactivate_mm(tsk,mm)	do { } while (0)
72 
73 #define activate_mm(prev, next) switch_mm(prev, next, NULL)
74 
__switch_mm(struct mm_struct * prev_mm,struct mm_struct * next_mm,struct task_struct * tsk)75 static inline void __switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
76 			       struct task_struct *tsk)
77 {
78 #ifdef CONFIG_MPU
79 	unsigned int cpu = smp_processor_id();
80 #endif
81 	if (prev_mm == next_mm)
82 		return;
83 #ifdef CONFIG_MPU
84 	if (prev_mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
85 		flush_switched_cplbs(cpu);
86 		set_mask_dcplbs(next_mm->context.page_rwx_mask, cpu);
87 	}
88 #endif
89 
90 #ifdef CONFIG_APP_STACK_L1
91 	/* L1 stack switching.  */
92 	if (!next_mm->context.l1_stack_save)
93 		return;
94 	if (next_mm->context.l1_stack_save == current_l1_stack_save)
95 		return;
96 	if (current_l1_stack_save) {
97 		memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len);
98 	}
99 	current_l1_stack_save = next_mm->context.l1_stack_save;
100 	memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len);
101 #endif
102 }
103 
104 #ifdef CONFIG_IPIPE
105 #define lock_mm_switch(flags)	flags = hard_local_irq_save_cond()
106 #define unlock_mm_switch(flags)	hard_local_irq_restore_cond(flags)
107 #else
108 #define lock_mm_switch(flags)	do { (void)(flags); } while (0)
109 #define unlock_mm_switch(flags)	do { (void)(flags); } while (0)
110 #endif /* CONFIG_IPIPE */
111 
112 #ifdef CONFIG_MPU
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)113 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
114 			     struct task_struct *tsk)
115 {
116 	unsigned long flags;
117 	lock_mm_switch(flags);
118 	__switch_mm(prev, next, tsk);
119 	unlock_mm_switch(flags);
120 }
121 
protect_page(struct mm_struct * mm,unsigned long addr,unsigned long flags)122 static inline void protect_page(struct mm_struct *mm, unsigned long addr,
123 				unsigned long flags)
124 {
125 	unsigned long *mask = mm->context.page_rwx_mask;
126 	unsigned long page;
127 	unsigned long idx;
128 	unsigned long bit;
129 
130 	if (unlikely(addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE))
131 		page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> 12;
132 	else
133 		page = addr >> 12;
134 	idx = page >> 5;
135 	bit = 1 << (page & 31);
136 
137 	if (flags & VM_READ)
138 		mask[idx] |= bit;
139 	else
140 		mask[idx] &= ~bit;
141 	mask += page_mask_nelts;
142 	if (flags & VM_WRITE)
143 		mask[idx] |= bit;
144 	else
145 		mask[idx] &= ~bit;
146 	mask += page_mask_nelts;
147 	if (flags & VM_EXEC)
148 		mask[idx] |= bit;
149 	else
150 		mask[idx] &= ~bit;
151 }
152 
update_protections(struct mm_struct * mm)153 static inline void update_protections(struct mm_struct *mm)
154 {
155 	unsigned int cpu = smp_processor_id();
156 	if (mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
157 		flush_switched_cplbs(cpu);
158 		set_mask_dcplbs(mm->context.page_rwx_mask, cpu);
159 	}
160 }
161 #else /* !CONFIG_MPU */
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)162 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
163 			     struct task_struct *tsk)
164 {
165 	__switch_mm(prev, next, tsk);
166 }
167 #endif
168 
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)169 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
170 {
171 }
172 
173 /* Called when creating a new context during fork() or execve().  */
174 static inline int
init_new_context(struct task_struct * tsk,struct mm_struct * mm)175 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
176 {
177 #ifdef CONFIG_MPU
178 	unsigned long p = __get_free_pages(GFP_KERNEL, page_mask_order);
179 	mm->context.page_rwx_mask = (unsigned long *)p;
180 	memset(mm->context.page_rwx_mask, 0,
181 	       page_mask_nelts * 3 * sizeof(long));
182 #endif
183 	return 0;
184 }
185 
destroy_context(struct mm_struct * mm)186 static inline void destroy_context(struct mm_struct *mm)
187 {
188 	struct sram_list_struct *tmp;
189 #ifdef CONFIG_MPU
190 	unsigned int cpu = smp_processor_id();
191 #endif
192 
193 #ifdef CONFIG_APP_STACK_L1
194 	if (current_l1_stack_save == mm->context.l1_stack_save)
195 		current_l1_stack_save = 0;
196 	if (mm->context.l1_stack_save)
197 		free_l1stack();
198 #endif
199 
200 	while ((tmp = mm->context.sram_list)) {
201 		mm->context.sram_list = tmp->next;
202 		sram_free(tmp->addr);
203 		kfree(tmp);
204 	}
205 #ifdef CONFIG_MPU
206 	if (current_rwx_mask[cpu] == mm->context.page_rwx_mask)
207 		current_rwx_mask[cpu] = NULL;
208 	free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order);
209 #endif
210 }
211 
212 #define ipipe_mm_switch_protect(flags)		\
213 	flags = hard_local_irq_save_cond()
214 
215 #define ipipe_mm_switch_unprotect(flags)	\
216 	hard_local_irq_restore_cond(flags)
217 
218 #endif
219