• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __PARISC_MMU_CONTEXT_H
3 #define __PARISC_MMU_CONTEXT_H
4 
5 #include <linux/mm.h>
6 #include <linux/sched.h>
7 #include <linux/atomic.h>
8 #include <linux/spinlock.h>
9 #include <asm-generic/mm_hooks.h>
10 
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)11 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
12 {
13 }
14 
15 /* on PA-RISC, we actually have enough contexts to justify an allocator
16  * for them.  prumpf */
17 
18 extern unsigned long alloc_sid(void);
19 extern void free_sid(unsigned long);
20 
21 static inline int
init_new_context(struct task_struct * tsk,struct mm_struct * mm)22 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
23 {
24 	BUG_ON(atomic_read(&mm->mm_users) != 1);
25 
26 	mm->context = alloc_sid();
27 	return 0;
28 }
29 
30 static inline void
destroy_context(struct mm_struct * mm)31 destroy_context(struct mm_struct *mm)
32 {
33 	free_sid(mm->context);
34 	mm->context = 0;
35 }
36 
__space_to_prot(mm_context_t context)37 static inline unsigned long __space_to_prot(mm_context_t context)
38 {
39 #if SPACEID_SHIFT == 0
40 	return context << 1;
41 #else
42 	return context >> (SPACEID_SHIFT - 1);
43 #endif
44 }
45 
load_context(mm_context_t context)46 static inline void load_context(mm_context_t context)
47 {
48 	mtsp(context, 3);
49 	mtctl(__space_to_prot(context), 8);
50 }
51 
switch_mm_irqs_off(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)52 static inline void switch_mm_irqs_off(struct mm_struct *prev,
53 		struct mm_struct *next, struct task_struct *tsk)
54 {
55 	if (prev != next) {
56 #ifdef CONFIG_TLB_PTLOCK
57 		/* put physical address of page_table_lock in cr28 (tr4)
58 		   for TLB faults */
59 		spinlock_t *pgd_lock = &next->page_table_lock;
60 		mtctl(__pa(__ldcw_align(&pgd_lock->rlock.raw_lock)), 28);
61 #endif
62 		mtctl(__pa(next->pgd), 25);
63 		load_context(next->context);
64 	}
65 }
66 
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)67 static inline void switch_mm(struct mm_struct *prev,
68 		struct mm_struct *next, struct task_struct *tsk)
69 {
70 	unsigned long flags;
71 
72 	if (prev == next)
73 		return;
74 
75 	local_irq_save(flags);
76 	switch_mm_irqs_off(prev, next, tsk);
77 	local_irq_restore(flags);
78 }
79 #define switch_mm_irqs_off switch_mm_irqs_off
80 
81 #define deactivate_mm(tsk,mm)	do { } while (0)
82 
activate_mm(struct mm_struct * prev,struct mm_struct * next)83 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
84 {
85 	/*
86 	 * Activate_mm is our one chance to allocate a space id
87 	 * for a new mm created in the exec path. There's also
88 	 * some lazy tlb stuff, which is currently dead code, but
89 	 * we only allocate a space id if one hasn't been allocated
90 	 * already, so we should be OK.
91 	 */
92 
93 	BUG_ON(next == &init_mm); /* Should never happen */
94 
95 	if (next->context == 0)
96 	    next->context = alloc_sid();
97 
98 	switch_mm(prev,next,current);
99 }
100 #endif
101