• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2004-2009 Analog Devices Inc.
3  *
4  * Licensed under the GPL-2 or later.
5  */
6 
7 #ifndef __BLACKFIN_MMU_CONTEXT_H__
8 #define __BLACKFIN_MMU_CONTEXT_H__
9 
10 #include <linux/slab.h>
11 #include <linux/sched.h>
12 #include <asm/setup.h>
13 #include <asm/page.h>
14 #include <asm/pgalloc.h>
15 #include <asm/cplbinit.h>
16 #include <asm/sections.h>
17 
18 /* Note: L1 stacks are CPU-private things, so we bluntly disable this
19    feature in SMP mode, and use the per-CPU scratch SRAM bank only to
20    store the PDA instead. */
21 
22 extern void *current_l1_stack_save;
23 extern int nr_l1stack_tasks;
24 extern void *l1_stack_base;
25 extern unsigned long l1_stack_len;
26 
27 extern int l1sram_free(const void*);
28 extern void *l1sram_alloc_max(void*);
29 
free_l1stack(void)30 static inline void free_l1stack(void)
31 {
32 	nr_l1stack_tasks--;
33 	if (nr_l1stack_tasks == 0) {
34 		l1sram_free(l1_stack_base);
35 		l1_stack_base = NULL;
36 		l1_stack_len = 0;
37 	}
38 }
39 
40 static inline unsigned long
alloc_l1stack(unsigned long length,unsigned long * stack_base)41 alloc_l1stack(unsigned long length, unsigned long *stack_base)
42 {
43 	if (nr_l1stack_tasks == 0) {
44 		l1_stack_base = l1sram_alloc_max(&l1_stack_len);
45 		if (!l1_stack_base)
46 			return 0;
47 	}
48 
49 	if (l1_stack_len < length) {
50 		if (nr_l1stack_tasks == 0)
51 			l1sram_free(l1_stack_base);
52 		return 0;
53 	}
54 	*stack_base = (unsigned long)l1_stack_base;
55 	nr_l1stack_tasks++;
56 	return l1_stack_len;
57 }
58 
59 static inline int
activate_l1stack(struct mm_struct * mm,unsigned long sp_base)60 activate_l1stack(struct mm_struct *mm, unsigned long sp_base)
61 {
62 	if (current_l1_stack_save)
63 		memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len);
64 	mm->context.l1_stack_save = current_l1_stack_save = (void*)sp_base;
65 	memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len);
66 	return 1;
67 }
68 
69 #define deactivate_mm(tsk,mm)	do { } while (0)
70 
71 #define activate_mm(prev, next) switch_mm(prev, next, NULL)
72 
__switch_mm(struct mm_struct * prev_mm,struct mm_struct * next_mm,struct task_struct * tsk)73 static inline void __switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
74 			       struct task_struct *tsk)
75 {
76 #ifdef CONFIG_MPU
77 	unsigned int cpu = smp_processor_id();
78 #endif
79 	if (prev_mm == next_mm)
80 		return;
81 #ifdef CONFIG_MPU
82 	if (prev_mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
83 		flush_switched_cplbs(cpu);
84 		set_mask_dcplbs(next_mm->context.page_rwx_mask, cpu);
85 	}
86 #endif
87 
88 #ifdef CONFIG_APP_STACK_L1
89 	/* L1 stack switching.  */
90 	if (!next_mm->context.l1_stack_save)
91 		return;
92 	if (next_mm->context.l1_stack_save == current_l1_stack_save)
93 		return;
94 	if (current_l1_stack_save) {
95 		memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len);
96 	}
97 	current_l1_stack_save = next_mm->context.l1_stack_save;
98 	memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len);
99 #endif
100 }
101 
102 #ifdef CONFIG_IPIPE
103 #define lock_mm_switch(flags)	flags = hard_local_irq_save_cond()
104 #define unlock_mm_switch(flags)	hard_local_irq_restore_cond(flags)
105 #else
106 #define lock_mm_switch(flags)	do { (void)(flags); } while (0)
107 #define unlock_mm_switch(flags)	do { (void)(flags); } while (0)
108 #endif /* CONFIG_IPIPE */
109 
110 #ifdef CONFIG_MPU
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)111 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
112 			     struct task_struct *tsk)
113 {
114 	unsigned long flags;
115 	lock_mm_switch(flags);
116 	__switch_mm(prev, next, tsk);
117 	unlock_mm_switch(flags);
118 }
119 
protect_page(struct mm_struct * mm,unsigned long addr,unsigned long flags)120 static inline void protect_page(struct mm_struct *mm, unsigned long addr,
121 				unsigned long flags)
122 {
123 	unsigned long *mask = mm->context.page_rwx_mask;
124 	unsigned long page;
125 	unsigned long idx;
126 	unsigned long bit;
127 
128 	if (unlikely(addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE))
129 		page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> 12;
130 	else
131 		page = addr >> 12;
132 	idx = page >> 5;
133 	bit = 1 << (page & 31);
134 
135 	if (flags & VM_READ)
136 		mask[idx] |= bit;
137 	else
138 		mask[idx] &= ~bit;
139 	mask += page_mask_nelts;
140 	if (flags & VM_WRITE)
141 		mask[idx] |= bit;
142 	else
143 		mask[idx] &= ~bit;
144 	mask += page_mask_nelts;
145 	if (flags & VM_EXEC)
146 		mask[idx] |= bit;
147 	else
148 		mask[idx] &= ~bit;
149 }
150 
update_protections(struct mm_struct * mm)151 static inline void update_protections(struct mm_struct *mm)
152 {
153 	unsigned int cpu = smp_processor_id();
154 	if (mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
155 		flush_switched_cplbs(cpu);
156 		set_mask_dcplbs(mm->context.page_rwx_mask, cpu);
157 	}
158 }
159 #else /* !CONFIG_MPU */
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)160 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
161 			     struct task_struct *tsk)
162 {
163 	__switch_mm(prev, next, tsk);
164 }
165 #endif
166 
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)167 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
168 {
169 }
170 
171 /* Called when creating a new context during fork() or execve().  */
172 static inline int
init_new_context(struct task_struct * tsk,struct mm_struct * mm)173 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
174 {
175 #ifdef CONFIG_MPU
176 	unsigned long p = __get_free_pages(GFP_KERNEL, page_mask_order);
177 	mm->context.page_rwx_mask = (unsigned long *)p;
178 	memset(mm->context.page_rwx_mask, 0,
179 	       page_mask_nelts * 3 * sizeof(long));
180 #endif
181 	return 0;
182 }
183 
destroy_context(struct mm_struct * mm)184 static inline void destroy_context(struct mm_struct *mm)
185 {
186 	struct sram_list_struct *tmp;
187 #ifdef CONFIG_MPU
188 	unsigned int cpu = smp_processor_id();
189 #endif
190 
191 #ifdef CONFIG_APP_STACK_L1
192 	if (current_l1_stack_save == mm->context.l1_stack_save)
193 		current_l1_stack_save = 0;
194 	if (mm->context.l1_stack_save)
195 		free_l1stack();
196 #endif
197 
198 	while ((tmp = mm->context.sram_list)) {
199 		mm->context.sram_list = tmp->next;
200 		sram_free(tmp->addr);
201 		kfree(tmp);
202 	}
203 #ifdef CONFIG_MPU
204 	if (current_rwx_mask[cpu] == mm->context.page_rwx_mask)
205 		current_rwx_mask[cpu] = NULL;
206 	free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order);
207 #endif
208 }
209 
210 #define ipipe_mm_switch_protect(flags)		\
211 	flags = hard_local_irq_save_cond()
212 
213 #define ipipe_mm_switch_unprotect(flags)	\
214 	hard_local_irq_restore_cond(flags)
215 
216 #endif
217