1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_MM_H
3 #define _LINUX_SCHED_MM_H
4
5 #include <linux/kernel.h>
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/mm_types.h>
9 #include <linux/gfp.h>
10 #include <linux/sync_core.h>
11
12 /*
13 * Routines for handling mm_structs
14 */
15 extern struct mm_struct *mm_alloc(void);
16
17 /**
18 * mmgrab() - Pin a &struct mm_struct.
19 * @mm: The &struct mm_struct to pin.
20 *
21 * Make sure that @mm will not get freed even after the owning task
22 * exits. This doesn't guarantee that the associated address space
23 * will still exist later on and mmget_not_zero() has to be used before
24 * accessing it.
25 *
26 * This is a preferred way to pin @mm for a longer/unbounded amount
27 * of time.
28 *
29 * Use mmdrop() to release the reference acquired by mmgrab().
30 *
31 * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
32 * of &mm_struct.mm_count vs &mm_struct.mm_users.
33 */
mmgrab(struct mm_struct * mm)34 static inline void mmgrab(struct mm_struct *mm)
35 {
36 atomic_inc(&mm->mm_count);
37 }
38
39 extern void __mmdrop(struct mm_struct *mm);
40
mmdrop(struct mm_struct * mm)41 static inline void mmdrop(struct mm_struct *mm)
42 {
43 /*
44 * The implicit full barrier implied by atomic_dec_and_test() is
45 * required by the membarrier system call before returning to
46 * user-space, after storing to rq->curr.
47 */
48 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
49 __mmdrop(mm);
50 }
51
52 /**
53 * mmget() - Pin the address space associated with a &struct mm_struct.
54 * @mm: The address space to pin.
55 *
56 * Make sure that the address space of the given &struct mm_struct doesn't
57 * go away. This does not protect against parts of the address space being
58 * modified or freed, however.
59 *
60 * Never use this function to pin this address space for an
61 * unbounded/indefinite amount of time.
62 *
63 * Use mmput() to release the reference acquired by mmget().
64 *
65 * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
66 * of &mm_struct.mm_count vs &mm_struct.mm_users.
67 */
mmget(struct mm_struct * mm)68 static inline void mmget(struct mm_struct *mm)
69 {
70 atomic_inc(&mm->mm_users);
71 }
72
mmget_not_zero(struct mm_struct * mm)73 static inline bool mmget_not_zero(struct mm_struct *mm)
74 {
75 return atomic_inc_not_zero(&mm->mm_users);
76 }
77
78 /* mmput gets rid of the mappings and all user-space */
79 extern void mmput(struct mm_struct *);
80 #ifdef CONFIG_MMU
81 /* same as above but performs the slow path from the async context. Can
82 * be called from the atomic context as well
83 */
84 void mmput_async(struct mm_struct *);
85 #endif
86
87 /* Grab a reference to a task's mm, if it is not already going away */
88 extern struct mm_struct *get_task_mm(struct task_struct *task);
89 /*
90 * Grab a reference to a task's mm, if it is not already going away
91 * and ptrace_may_access with the mode parameter passed to it
92 * succeeds.
93 */
94 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
95 /* Remove the current tasks stale references to the old mm_struct on exit() */
96 extern void exit_mm_release(struct task_struct *, struct mm_struct *);
97 /* Remove the current tasks stale references to the old mm_struct on exec() */
98 extern void exec_mm_release(struct task_struct *, struct mm_struct *);
99
100 #ifdef CONFIG_MEMCG
101 extern void mm_update_next_owner(struct mm_struct *mm);
102 #else
mm_update_next_owner(struct mm_struct * mm)103 static inline void mm_update_next_owner(struct mm_struct *mm)
104 {
105 }
106 #endif /* CONFIG_MEMCG */
107
108 #ifdef CONFIG_MMU
109 #ifndef arch_get_mmap_end
110 #define arch_get_mmap_end(addr) (TASK_SIZE)
111 #endif
112
113 #ifndef arch_get_mmap_base
114 #define arch_get_mmap_base(addr, base) (base)
115 #endif
116
117 extern void arch_pick_mmap_layout(struct mm_struct *mm,
118 struct rlimit *rlim_stack);
119 extern unsigned long
120 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
121 unsigned long, unsigned long);
122 extern unsigned long
123 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
124 unsigned long len, unsigned long pgoff,
125 unsigned long flags);
126 #else
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)127 static inline void arch_pick_mmap_layout(struct mm_struct *mm,
128 struct rlimit *rlim_stack) {}
129 #endif
130
in_vfork(struct task_struct * tsk)131 static inline bool in_vfork(struct task_struct *tsk)
132 {
133 bool ret;
134
135 /*
136 * need RCU to access ->real_parent if CLONE_VM was used along with
137 * CLONE_PARENT.
138 *
139 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
140 * imply CLONE_VM
141 *
142 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
143 * ->real_parent is not necessarily the task doing vfork(), so in
144 * theory we can't rely on task_lock() if we want to dereference it.
145 *
146 * And in this case we can't trust the real_parent->mm == tsk->mm
147 * check, it can be false negative. But we do not care, if init or
148 * another oom-unkillable task does this it should blame itself.
149 */
150 rcu_read_lock();
151 ret = tsk->vfork_done &&
152 rcu_dereference(tsk->real_parent)->mm == tsk->mm;
153 rcu_read_unlock();
154
155 return ret;
156 }
157
158 /*
159 * Applies per-task gfp context to the given allocation flags.
160 * PF_MEMALLOC_NOIO implies GFP_NOIO
161 * PF_MEMALLOC_NOFS implies GFP_NOFS
162 * PF_MEMALLOC_PIN implies !GFP_MOVABLE
163 */
current_gfp_context(gfp_t flags)164 static inline gfp_t current_gfp_context(gfp_t flags)
165 {
166 unsigned int pflags = READ_ONCE(current->flags);
167
168 if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) {
169 /*
170 * NOIO implies both NOIO and NOFS and it is a weaker context
171 * so always make sure it makes precedence
172 */
173 if (pflags & PF_MEMALLOC_NOIO)
174 flags &= ~(__GFP_IO | __GFP_FS);
175 else if (pflags & PF_MEMALLOC_NOFS)
176 flags &= ~__GFP_FS;
177
178 if (pflags & PF_MEMALLOC_PIN)
179 flags &= ~__GFP_MOVABLE;
180 }
181 return flags;
182 }
183
184 #ifdef CONFIG_LOCKDEP
185 extern void __fs_reclaim_acquire(unsigned long ip);
186 extern void __fs_reclaim_release(unsigned long ip);
187 extern void fs_reclaim_acquire(gfp_t gfp_mask);
188 extern void fs_reclaim_release(gfp_t gfp_mask);
189 #else
__fs_reclaim_acquire(unsigned long ip)190 static inline void __fs_reclaim_acquire(unsigned long ip) { }
__fs_reclaim_release(unsigned long ip)191 static inline void __fs_reclaim_release(unsigned long ip) { }
fs_reclaim_acquire(gfp_t gfp_mask)192 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
fs_reclaim_release(gfp_t gfp_mask)193 static inline void fs_reclaim_release(gfp_t gfp_mask) { }
194 #endif
195
196 /**
197 * might_alloc - Mark possible allocation sites
198 * @gfp_mask: gfp_t flags that would be used to allocate
199 *
200 * Similar to might_sleep() and other annotations, this can be used in functions
201 * that might allocate, but often don't. Compiles to nothing without
202 * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking.
203 */
might_alloc(gfp_t gfp_mask)204 static inline void might_alloc(gfp_t gfp_mask)
205 {
206 fs_reclaim_acquire(gfp_mask);
207 fs_reclaim_release(gfp_mask);
208
209 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
210 }
211
212 /**
213 * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
214 *
215 * This functions marks the beginning of the GFP_NOIO allocation scope.
216 * All further allocations will implicitly drop __GFP_IO flag and so
217 * they are safe for the IO critical section from the allocation recursion
218 * point of view. Use memalloc_noio_restore to end the scope with flags
219 * returned by this function.
220 *
221 * This function is safe to be used from any context.
222 */
memalloc_noio_save(void)223 static inline unsigned int memalloc_noio_save(void)
224 {
225 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
226 current->flags |= PF_MEMALLOC_NOIO;
227 return flags;
228 }
229
230 /**
231 * memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
232 * @flags: Flags to restore.
233 *
234 * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
235 * Always make sure that the given flags is the return value from the
236 * pairing memalloc_noio_save call.
237 */
memalloc_noio_restore(unsigned int flags)238 static inline void memalloc_noio_restore(unsigned int flags)
239 {
240 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
241 }
242
243 /**
244 * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
245 *
246 * This functions marks the beginning of the GFP_NOFS allocation scope.
247 * All further allocations will implicitly drop __GFP_FS flag and so
248 * they are safe for the FS critical section from the allocation recursion
249 * point of view. Use memalloc_nofs_restore to end the scope with flags
250 * returned by this function.
251 *
252 * This function is safe to be used from any context.
253 */
memalloc_nofs_save(void)254 static inline unsigned int memalloc_nofs_save(void)
255 {
256 unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
257 current->flags |= PF_MEMALLOC_NOFS;
258 return flags;
259 }
260
261 /**
262 * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
263 * @flags: Flags to restore.
264 *
265 * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
266 * Always make sure that the given flags is the return value from the
267 * pairing memalloc_nofs_save call.
268 */
memalloc_nofs_restore(unsigned int flags)269 static inline void memalloc_nofs_restore(unsigned int flags)
270 {
271 current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
272 }
273
memalloc_noreclaim_save(void)274 static inline unsigned int memalloc_noreclaim_save(void)
275 {
276 unsigned int flags = current->flags & PF_MEMALLOC;
277 current->flags |= PF_MEMALLOC;
278 return flags;
279 }
280
memalloc_noreclaim_restore(unsigned int flags)281 static inline void memalloc_noreclaim_restore(unsigned int flags)
282 {
283 current->flags = (current->flags & ~PF_MEMALLOC) | flags;
284 }
285
memalloc_pin_save(void)286 static inline unsigned int memalloc_pin_save(void)
287 {
288 unsigned int flags = current->flags & PF_MEMALLOC_PIN;
289
290 current->flags |= PF_MEMALLOC_PIN;
291 return flags;
292 }
293
memalloc_pin_restore(unsigned int flags)294 static inline void memalloc_pin_restore(unsigned int flags)
295 {
296 current->flags = (current->flags & ~PF_MEMALLOC_PIN) | flags;
297 }
298
299 #ifdef CONFIG_MEMCG
300 DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
301 /**
302 * set_active_memcg - Starts the remote memcg charging scope.
303 * @memcg: memcg to charge.
304 *
305 * This function marks the beginning of the remote memcg charging scope. All the
306 * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
307 * given memcg.
308 *
309 * NOTE: This function can nest. Users must save the return value and
310 * reset the previous value after their own charging scope is over.
311 */
312 static inline struct mem_cgroup *
set_active_memcg(struct mem_cgroup * memcg)313 set_active_memcg(struct mem_cgroup *memcg)
314 {
315 struct mem_cgroup *old;
316
317 if (!in_task()) {
318 old = this_cpu_read(int_active_memcg);
319 this_cpu_write(int_active_memcg, memcg);
320 } else {
321 old = current->active_memcg;
322 current->active_memcg = memcg;
323 }
324
325 return old;
326 }
327 #else
328 static inline struct mem_cgroup *
set_active_memcg(struct mem_cgroup * memcg)329 set_active_memcg(struct mem_cgroup *memcg)
330 {
331 return NULL;
332 }
333 #endif
334
335 #ifdef CONFIG_MEMBARRIER
336 enum {
337 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
338 MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1),
339 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2),
340 MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
341 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
342 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
343 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6),
344 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7),
345 };
346
347 enum {
348 MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
349 MEMBARRIER_FLAG_RSEQ = (1U << 1),
350 };
351
352 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
353 #include <asm/membarrier.h>
354 #endif
355
membarrier_mm_sync_core_before_usermode(struct mm_struct * mm)356 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
357 {
358 if (current->mm != mm)
359 return;
360 if (likely(!(atomic_read(&mm->membarrier_state) &
361 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
362 return;
363 sync_core_before_usermode();
364 }
365
366 extern void membarrier_exec_mmap(struct mm_struct *mm);
367
368 extern void membarrier_update_current_mm(struct mm_struct *next_mm);
369
370 #else
371 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
membarrier_arch_switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)372 static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
373 struct mm_struct *next,
374 struct task_struct *tsk)
375 {
376 }
377 #endif
membarrier_exec_mmap(struct mm_struct * mm)378 static inline void membarrier_exec_mmap(struct mm_struct *mm)
379 {
380 }
membarrier_mm_sync_core_before_usermode(struct mm_struct * mm)381 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
382 {
383 }
membarrier_update_current_mm(struct mm_struct * next_mm)384 static inline void membarrier_update_current_mm(struct mm_struct *next_mm)
385 {
386 }
387 #endif
388
389 #endif /* _LINUX_SCHED_MM_H */
390