1 /*
2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6 #ifndef __UM_MMU_CONTEXT_H
7 #define __UM_MMU_CONTEXT_H
8
9 #include <linux/sched.h>
10 #include <asm/mmu.h>
11
12 extern void uml_setup_stubs(struct mm_struct *mm);
13 /*
14 * Needed since we do not use the asm-generic/mm_hooks.h:
15 */
arch_dup_mmap(struct mm_struct * oldmm,struct mm_struct * mm)16 static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
17 {
18 uml_setup_stubs(mm);
19 }
20 extern void arch_exit_mmap(struct mm_struct *mm);
arch_unmap(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long start,unsigned long end)21 static inline void arch_unmap(struct mm_struct *mm,
22 struct vm_area_struct *vma,
23 unsigned long start, unsigned long end)
24 {
25 }
arch_bprm_mm_init(struct mm_struct * mm,struct vm_area_struct * vma)26 static inline void arch_bprm_mm_init(struct mm_struct *mm,
27 struct vm_area_struct *vma)
28 {
29 }
30
arch_vma_access_permitted(struct vm_area_struct * vma,bool write,bool execute,bool foreign)31 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
32 bool write, bool execute, bool foreign)
33 {
34 /* by default, allow everything */
35 return true;
36 }
37
arch_pte_access_permitted(pte_t pte,bool write)38 static inline bool arch_pte_access_permitted(pte_t pte, bool write)
39 {
40 /* by default, allow everything */
41 return true;
42 }
43
44 /*
45 * end asm-generic/mm_hooks.h functions
46 */
47
48 #define deactivate_mm(tsk,mm) do { } while (0)
49
50 extern void force_flush_all(void);
51
activate_mm(struct mm_struct * old,struct mm_struct * new)52 static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
53 {
54 /*
55 * This is called by fs/exec.c and sys_unshare()
56 * when the new ->mm is used for the first time.
57 */
58 __switch_mm(&new->context.id);
59 down_write(&new->mmap_sem);
60 uml_setup_stubs(new);
61 up_write(&new->mmap_sem);
62 }
63
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)64 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
65 struct task_struct *tsk)
66 {
67 unsigned cpu = smp_processor_id();
68
69 if(prev != next){
70 cpumask_clear_cpu(cpu, mm_cpumask(prev));
71 cpumask_set_cpu(cpu, mm_cpumask(next));
72 if(next != &init_mm)
73 __switch_mm(&next->context.id);
74 }
75 }
76
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)77 static inline void enter_lazy_tlb(struct mm_struct *mm,
78 struct task_struct *tsk)
79 {
80 }
81
82 extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
83
84 extern void destroy_context(struct mm_struct *mm);
85
86 #endif
87