1 #ifndef _LINUX_MMAP_LOCK_H
2 #define _LINUX_MMAP_LOCK_H
3
4 #include <linux/lockdep.h>
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
7 #include <linux/rwsem.h>
8 #include <linux/tracepoint-defs.h>
9 #include <linux/types.h>
10 #include <linux/vm_event.h>
11
12 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
13 #define MMAP_LOCK_SEQ_INITIALIZER(name) \
14 .mmap_seq = 0,
15 #else
16 #define MMAP_LOCK_SEQ_INITIALIZER(name)
17 #endif
18
19 #define MMAP_LOCK_INITIALIZER(name) \
20 .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock), \
21 MMAP_LOCK_SEQ_INITIALIZER(name)
22
23 DECLARE_TRACEPOINT(mmap_lock_start_locking);
24 DECLARE_TRACEPOINT(mmap_lock_acquire_returned);
25 DECLARE_TRACEPOINT(mmap_lock_released);
26
27 #ifdef CONFIG_TRACING
28
29 void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write);
30 void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
31 bool success);
32 void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write);
33
__mmap_lock_trace_start_locking(struct mm_struct * mm,bool write)34 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
35 bool write)
36 {
37 if (tracepoint_enabled(mmap_lock_start_locking))
38 __mmap_lock_do_trace_start_locking(mm, write);
39 }
40
__mmap_lock_trace_acquire_returned(struct mm_struct * mm,bool write,bool success)41 static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
42 bool write, bool success)
43 {
44 if (tracepoint_enabled(mmap_lock_acquire_returned))
45 __mmap_lock_do_trace_acquire_returned(mm, write, success);
46 }
47
__mmap_lock_trace_released(struct mm_struct * mm,bool write)48 static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
49 {
50 if (tracepoint_enabled(mmap_lock_released))
51 __mmap_lock_do_trace_released(mm, write);
52 }
53
54 #else /* !CONFIG_TRACING */
55
__mmap_lock_trace_start_locking(struct mm_struct * mm,bool write)56 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
57 bool write)
58 {
59 }
60
__mmap_lock_trace_acquire_returned(struct mm_struct * mm,bool write,bool success)61 static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
62 bool write, bool success)
63 {
64 }
65
__mmap_lock_trace_released(struct mm_struct * mm,bool write)66 static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
67 {
68 }
69
70 #endif /* CONFIG_TRACING */
71
mmap_init_lock(struct mm_struct * mm)72 static inline void mmap_init_lock(struct mm_struct *mm)
73 {
74 init_rwsem(&mm->mmap_lock);
75 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
76 mm->mmap_seq = 0;
77 #endif
78 }
79
__mmap_seq_write_lock(struct mm_struct * mm)80 static inline void __mmap_seq_write_lock(struct mm_struct *mm)
81 {
82 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
83 VM_BUG_ON_MM(mm->mmap_seq & 1, mm);
84 mm->mmap_seq++;
85 smp_wmb();
86 #endif
87 }
88
__mmap_seq_write_unlock(struct mm_struct * mm)89 static inline void __mmap_seq_write_unlock(struct mm_struct *mm)
90 {
91 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
92 smp_wmb();
93 mm->mmap_seq++;
94 VM_BUG_ON_MM(mm->mmap_seq & 1, mm);
95 #endif
96 }
97
98 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
mmap_seq_read_start(struct mm_struct * mm)99 static inline unsigned long mmap_seq_read_start(struct mm_struct *mm)
100 {
101 unsigned long seq;
102
103 seq = READ_ONCE(mm->mmap_seq);
104 smp_rmb();
105 return seq;
106 }
107
__mmap_seq_read_check(struct mm_struct * mm,unsigned long seq)108 static inline bool __mmap_seq_read_check(struct mm_struct *mm,
109 unsigned long seq)
110 {
111 smp_rmb();
112 return seq == READ_ONCE(mm->mmap_seq);
113 }
114
115 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT_STATS
mmap_seq_read_check(struct mm_struct * mm,unsigned long seq,enum vm_event_item fail_event)116 static inline bool mmap_seq_read_check(struct mm_struct *mm, unsigned long seq,
117 enum vm_event_item fail_event)
118 {
119 if (__mmap_seq_read_check(mm, seq))
120 return true;
121 count_vm_event(fail_event);
122 return false;
123 }
124 #else
125 #define mmap_seq_read_check(mm, seq, fail) __mmap_seq_read_check(mm, seq)
126 #endif /* CONFIG_SPECULATIVE_PAGE_FAULT_STATS */
127 #endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
128
mmap_write_lock(struct mm_struct * mm)129 static inline void mmap_write_lock(struct mm_struct *mm)
130 {
131 __mmap_lock_trace_start_locking(mm, true);
132 down_write(&mm->mmap_lock);
133 __mmap_lock_trace_acquire_returned(mm, true, true);
134 __mmap_seq_write_lock(mm);
135 }
136
mmap_write_lock_nested(struct mm_struct * mm,int subclass)137 static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
138 {
139 __mmap_lock_trace_start_locking(mm, true);
140 down_write_nested(&mm->mmap_lock, subclass);
141 __mmap_lock_trace_acquire_returned(mm, true, true);
142 __mmap_seq_write_lock(mm);
143 }
144
mmap_write_lock_killable(struct mm_struct * mm)145 static inline int mmap_write_lock_killable(struct mm_struct *mm)
146 {
147 int error;
148
149 __mmap_lock_trace_start_locking(mm, true);
150 error = down_write_killable(&mm->mmap_lock);
151 __mmap_lock_trace_acquire_returned(mm, true, !error);
152 if (likely(!error))
153 __mmap_seq_write_lock(mm);
154 return error;
155 }
156
mmap_write_trylock(struct mm_struct * mm)157 static inline bool mmap_write_trylock(struct mm_struct *mm)
158 {
159 bool ok;
160
161 __mmap_lock_trace_start_locking(mm, true);
162 ok = down_write_trylock(&mm->mmap_lock) != 0;
163 __mmap_lock_trace_acquire_returned(mm, true, ok);
164 if (likely(ok))
165 __mmap_seq_write_lock(mm);
166 return ok;
167 }
168
mmap_write_unlock(struct mm_struct * mm)169 static inline void mmap_write_unlock(struct mm_struct *mm)
170 {
171 __mmap_lock_trace_released(mm, true);
172 __mmap_seq_write_unlock(mm);
173 up_write(&mm->mmap_lock);
174 }
175
mmap_write_downgrade(struct mm_struct * mm)176 static inline void mmap_write_downgrade(struct mm_struct *mm)
177 {
178 __mmap_lock_trace_acquire_returned(mm, false, true);
179 __mmap_seq_write_unlock(mm);
180 downgrade_write(&mm->mmap_lock);
181 }
182
mmap_read_lock(struct mm_struct * mm)183 static inline void mmap_read_lock(struct mm_struct *mm)
184 {
185 __mmap_lock_trace_start_locking(mm, false);
186 down_read(&mm->mmap_lock);
187 __mmap_lock_trace_acquire_returned(mm, false, true);
188 }
189
mmap_read_lock_killable(struct mm_struct * mm)190 static inline int mmap_read_lock_killable(struct mm_struct *mm)
191 {
192 int error;
193
194 __mmap_lock_trace_start_locking(mm, false);
195 error = down_read_killable(&mm->mmap_lock);
196 __mmap_lock_trace_acquire_returned(mm, false, !error);
197 return error;
198 }
199
mmap_read_trylock(struct mm_struct * mm)200 static inline bool mmap_read_trylock(struct mm_struct *mm)
201 {
202 bool ok;
203
204 __mmap_lock_trace_start_locking(mm, false);
205 ok = down_read_trylock(&mm->mmap_lock) != 0;
206 __mmap_lock_trace_acquire_returned(mm, false, ok);
207 return ok;
208 }
209
mmap_read_unlock(struct mm_struct * mm)210 static inline void mmap_read_unlock(struct mm_struct *mm)
211 {
212 __mmap_lock_trace_released(mm, false);
213 up_read(&mm->mmap_lock);
214 }
215
mmap_read_unlock_non_owner(struct mm_struct * mm)216 static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
217 {
218 __mmap_lock_trace_released(mm, false);
219 up_read_non_owner(&mm->mmap_lock);
220 }
221
mmap_assert_locked(struct mm_struct * mm)222 static inline void mmap_assert_locked(struct mm_struct *mm)
223 {
224 lockdep_assert_held(&mm->mmap_lock);
225 VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
226 }
227
mmap_assert_write_locked(struct mm_struct * mm)228 static inline void mmap_assert_write_locked(struct mm_struct *mm)
229 {
230 lockdep_assert_held_write(&mm->mmap_lock);
231 VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
232 }
233
mmap_lock_is_contended(struct mm_struct * mm)234 static inline bool mmap_lock_is_contended(struct mm_struct *mm)
235 {
236 return rwsem_is_contended(&mm->mmap_lock) != 0;
237 }
238
239 #endif /* _LINUX_MMAP_LOCK_H */
240