• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_MM_H
3 #define _LINUX_SCHED_MM_H
4 
5 #include <linux/kernel.h>
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/mm_types.h>
9 #include <linux/gfp.h>
10 #include <linux/sync_core.h>
11 
12 /*
13  * Routines for handling mm_structs
14  */
15 extern struct mm_struct *mm_alloc(void);
16 
17 /**
18  * mmgrab() - Pin a &struct mm_struct.
19  * @mm: The &struct mm_struct to pin.
20  *
21  * Make sure that @mm will not get freed even after the owning task
22  * exits. This doesn't guarantee that the associated address space
23  * will still exist later on and mmget_not_zero() has to be used before
24  * accessing it.
25  *
26  * This is a preferred way to to pin @mm for a longer/unbounded amount
27  * of time.
28  *
29  * Use mmdrop() to release the reference acquired by mmgrab().
30  *
31  * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
32  * of &mm_struct.mm_count vs &mm_struct.mm_users.
33  */
mmgrab(struct mm_struct * mm)34 static inline void mmgrab(struct mm_struct *mm)
35 {
36 	atomic_inc(&mm->mm_count);
37 }
38 
39 extern void __mmdrop(struct mm_struct *mm);
40 
mmdrop(struct mm_struct * mm)41 static inline void mmdrop(struct mm_struct *mm)
42 {
43 	/*
44 	 * The implicit full barrier implied by atomic_dec_and_test() is
45 	 * required by the membarrier system call before returning to
46 	 * user-space, after storing to rq->curr.
47 	 */
48 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
49 		__mmdrop(mm);
50 }
51 
52 void mmdrop(struct mm_struct *mm);
53 
54 /*
55  * This has to be called after a get_task_mm()/mmget_not_zero()
56  * followed by taking the mmap_sem for writing before modifying the
57  * vmas or anything the coredump pretends not to change from under it.
58  *
59  * It also has to be called when mmgrab() is used in the context of
60  * the process, but then the mm_count refcount is transferred outside
61  * the context of the process to run down_write() on that pinned mm.
62  *
63  * NOTE: find_extend_vma() called from GUP context is the only place
64  * that can modify the "mm" (notably the vm_start/end) under mmap_sem
65  * for reading and outside the context of the process, so it is also
66  * the only case that holds the mmap_sem for reading that must call
67  * this function. Generally if the mmap_sem is hold for reading
68  * there's no need of this check after get_task_mm()/mmget_not_zero().
69  *
70  * This function can be obsoleted and the check can be removed, after
71  * the coredump code will hold the mmap_sem for writing before
72  * invoking the ->core_dump methods.
73  */
mmget_still_valid(struct mm_struct * mm)74 static inline bool mmget_still_valid(struct mm_struct *mm)
75 {
76 	return likely(!mm->core_state);
77 }
78 
79 /**
80  * mmget() - Pin the address space associated with a &struct mm_struct.
81  * @mm: The address space to pin.
82  *
83  * Make sure that the address space of the given &struct mm_struct doesn't
84  * go away. This does not protect against parts of the address space being
85  * modified or freed, however.
86  *
87  * Never use this function to pin this address space for an
88  * unbounded/indefinite amount of time.
89  *
90  * Use mmput() to release the reference acquired by mmget().
91  *
92  * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
93  * of &mm_struct.mm_count vs &mm_struct.mm_users.
94  */
mmget(struct mm_struct * mm)95 static inline void mmget(struct mm_struct *mm)
96 {
97 	atomic_inc(&mm->mm_users);
98 }
99 
mmget_not_zero(struct mm_struct * mm)100 static inline bool mmget_not_zero(struct mm_struct *mm)
101 {
102 	return atomic_inc_not_zero(&mm->mm_users);
103 }
104 
105 /* mmput gets rid of the mappings and all user-space */
106 extern void mmput(struct mm_struct *);
107 #ifdef CONFIG_MMU
108 /* same as above but performs the slow path from the async context. Can
109  * be called from the atomic context as well
110  */
111 void mmput_async(struct mm_struct *);
112 #endif
113 
114 /* Grab a reference to a task's mm, if it is not already going away */
115 extern struct mm_struct *get_task_mm(struct task_struct *task);
116 /*
117  * Grab a reference to a task's mm, if it is not already going away
118  * and ptrace_may_access with the mode parameter passed to it
119  * succeeds.
120  */
121 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
122 /* Remove the current tasks stale references to the old mm_struct on exit() */
123 extern void exit_mm_release(struct task_struct *, struct mm_struct *);
124 /* Remove the current tasks stale references to the old mm_struct on exec() */
125 extern void exec_mm_release(struct task_struct *, struct mm_struct *);
126 
127 #ifdef CONFIG_MEMCG
128 extern void mm_update_next_owner(struct mm_struct *mm);
129 #else
mm_update_next_owner(struct mm_struct * mm)130 static inline void mm_update_next_owner(struct mm_struct *mm)
131 {
132 }
133 #endif /* CONFIG_MEMCG */
134 
135 #ifdef CONFIG_MMU
136 #ifndef arch_get_mmap_end
137 #define arch_get_mmap_end(addr)	(TASK_SIZE)
138 #endif
139 
140 #ifndef arch_get_mmap_base
141 #define arch_get_mmap_base(addr, base) (base)
142 #endif
143 
144 extern void arch_pick_mmap_layout(struct mm_struct *mm,
145 				  struct rlimit *rlim_stack);
146 extern unsigned long
147 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
148 		       unsigned long, unsigned long);
149 extern unsigned long
150 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
151 			  unsigned long len, unsigned long pgoff,
152 			  unsigned long flags);
153 #else
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)154 static inline void arch_pick_mmap_layout(struct mm_struct *mm,
155 					 struct rlimit *rlim_stack) {}
156 #endif
157 
in_vfork(struct task_struct * tsk)158 static inline bool in_vfork(struct task_struct *tsk)
159 {
160 	bool ret;
161 
162 	/*
163 	 * need RCU to access ->real_parent if CLONE_VM was used along with
164 	 * CLONE_PARENT.
165 	 *
166 	 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
167 	 * imply CLONE_VM
168 	 *
169 	 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
170 	 * ->real_parent is not necessarily the task doing vfork(), so in
171 	 * theory we can't rely on task_lock() if we want to dereference it.
172 	 *
173 	 * And in this case we can't trust the real_parent->mm == tsk->mm
174 	 * check, it can be false negative. But we do not care, if init or
175 	 * another oom-unkillable task does this it should blame itself.
176 	 */
177 	rcu_read_lock();
178 	ret = tsk->vfork_done &&
179 			rcu_dereference(tsk->real_parent)->mm == tsk->mm;
180 	rcu_read_unlock();
181 
182 	return ret;
183 }
184 
185 /*
186  * Applies per-task gfp context to the given allocation flags.
187  * PF_MEMALLOC_NOIO implies GFP_NOIO
188  * PF_MEMALLOC_NOFS implies GFP_NOFS
189  * PF_MEMALLOC_NOCMA implies no allocation from CMA region.
190  */
current_gfp_context(gfp_t flags)191 static inline gfp_t current_gfp_context(gfp_t flags)
192 {
193 	if (unlikely(current->flags &
194 		     (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_NOCMA))) {
195 		/*
196 		 * NOIO implies both NOIO and NOFS and it is a weaker context
197 		 * so always make sure it makes precedence
198 		 */
199 		if (current->flags & PF_MEMALLOC_NOIO)
200 			flags &= ~(__GFP_IO | __GFP_FS);
201 		else if (current->flags & PF_MEMALLOC_NOFS)
202 			flags &= ~__GFP_FS;
203 #ifdef CONFIG_CMA
204 		if (current->flags & PF_MEMALLOC_NOCMA)
205 			flags &= ~__GFP_MOVABLE;
206 #endif
207 	}
208 	return flags;
209 }
210 
211 #ifdef CONFIG_LOCKDEP
212 extern void __fs_reclaim_acquire(void);
213 extern void __fs_reclaim_release(void);
214 extern void fs_reclaim_acquire(gfp_t gfp_mask);
215 extern void fs_reclaim_release(gfp_t gfp_mask);
216 #else
__fs_reclaim_acquire(void)217 static inline void __fs_reclaim_acquire(void) { }
__fs_reclaim_release(void)218 static inline void __fs_reclaim_release(void) { }
fs_reclaim_acquire(gfp_t gfp_mask)219 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
fs_reclaim_release(gfp_t gfp_mask)220 static inline void fs_reclaim_release(gfp_t gfp_mask) { }
221 #endif
222 
223 /**
224  * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
225  *
226  * This functions marks the beginning of the GFP_NOIO allocation scope.
227  * All further allocations will implicitly drop __GFP_IO flag and so
228  * they are safe for the IO critical section from the allocation recursion
229  * point of view. Use memalloc_noio_restore to end the scope with flags
230  * returned by this function.
231  *
232  * This function is safe to be used from any context.
233  */
memalloc_noio_save(void)234 static inline unsigned int memalloc_noio_save(void)
235 {
236 	unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
237 	current->flags |= PF_MEMALLOC_NOIO;
238 	return flags;
239 }
240 
241 /**
242  * memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
243  * @flags: Flags to restore.
244  *
245  * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
246  * Always make sure that that the given flags is the return value from the
247  * pairing memalloc_noio_save call.
248  */
memalloc_noio_restore(unsigned int flags)249 static inline void memalloc_noio_restore(unsigned int flags)
250 {
251 	current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
252 }
253 
254 /**
255  * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
256  *
257  * This functions marks the beginning of the GFP_NOFS allocation scope.
258  * All further allocations will implicitly drop __GFP_FS flag and so
259  * they are safe for the FS critical section from the allocation recursion
260  * point of view. Use memalloc_nofs_restore to end the scope with flags
261  * returned by this function.
262  *
263  * This function is safe to be used from any context.
264  */
memalloc_nofs_save(void)265 static inline unsigned int memalloc_nofs_save(void)
266 {
267 	unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
268 	current->flags |= PF_MEMALLOC_NOFS;
269 	return flags;
270 }
271 
272 /**
273  * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
274  * @flags: Flags to restore.
275  *
276  * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
277  * Always make sure that that the given flags is the return value from the
278  * pairing memalloc_nofs_save call.
279  */
memalloc_nofs_restore(unsigned int flags)280 static inline void memalloc_nofs_restore(unsigned int flags)
281 {
282 	current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
283 }
284 
memalloc_noreclaim_save(void)285 static inline unsigned int memalloc_noreclaim_save(void)
286 {
287 	unsigned int flags = current->flags & PF_MEMALLOC;
288 	current->flags |= PF_MEMALLOC;
289 	return flags;
290 }
291 
memalloc_noreclaim_restore(unsigned int flags)292 static inline void memalloc_noreclaim_restore(unsigned int flags)
293 {
294 	current->flags = (current->flags & ~PF_MEMALLOC) | flags;
295 }
296 
297 #ifdef CONFIG_CMA
memalloc_nocma_save(void)298 static inline unsigned int memalloc_nocma_save(void)
299 {
300 	unsigned int flags = current->flags & PF_MEMALLOC_NOCMA;
301 
302 	current->flags |= PF_MEMALLOC_NOCMA;
303 	return flags;
304 }
305 
memalloc_nocma_restore(unsigned int flags)306 static inline void memalloc_nocma_restore(unsigned int flags)
307 {
308 	current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags;
309 }
310 #else
memalloc_nocma_save(void)311 static inline unsigned int memalloc_nocma_save(void)
312 {
313 	return 0;
314 }
315 
memalloc_nocma_restore(unsigned int flags)316 static inline void memalloc_nocma_restore(unsigned int flags)
317 {
318 }
319 #endif
320 
321 #ifdef CONFIG_MEMCG
322 /**
323  * memalloc_use_memcg - Starts the remote memcg charging scope.
324  * @memcg: memcg to charge.
325  *
326  * This function marks the beginning of the remote memcg charging scope. All the
327  * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
328  * given memcg.
329  *
330  * NOTE: This function is not nesting safe.
331  */
memalloc_use_memcg(struct mem_cgroup * memcg)332 static inline void memalloc_use_memcg(struct mem_cgroup *memcg)
333 {
334 	WARN_ON_ONCE(current->active_memcg);
335 	current->active_memcg = memcg;
336 }
337 
338 /**
339  * memalloc_unuse_memcg - Ends the remote memcg charging scope.
340  *
341  * This function marks the end of the remote memcg charging scope started by
342  * memalloc_use_memcg().
343  */
memalloc_unuse_memcg(void)344 static inline void memalloc_unuse_memcg(void)
345 {
346 	current->active_memcg = NULL;
347 }
348 #else
memalloc_use_memcg(struct mem_cgroup * memcg)349 static inline void memalloc_use_memcg(struct mem_cgroup *memcg)
350 {
351 }
352 
memalloc_unuse_memcg(void)353 static inline void memalloc_unuse_memcg(void)
354 {
355 }
356 #endif
357 
358 #ifdef CONFIG_MEMBARRIER
359 enum {
360 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY		= (1U << 0),
361 	MEMBARRIER_STATE_PRIVATE_EXPEDITED			= (1U << 1),
362 	MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY			= (1U << 2),
363 	MEMBARRIER_STATE_GLOBAL_EXPEDITED			= (1U << 3),
364 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY	= (1U << 4),
365 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE		= (1U << 5),
366 };
367 
368 enum {
369 	MEMBARRIER_FLAG_SYNC_CORE	= (1U << 0),
370 };
371 
372 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
373 #include <asm/membarrier.h>
374 #endif
375 
membarrier_mm_sync_core_before_usermode(struct mm_struct * mm)376 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
377 {
378 	if (current->mm != mm)
379 		return;
380 	if (likely(!(atomic_read(&mm->membarrier_state) &
381 		     MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
382 		return;
383 	sync_core_before_usermode();
384 }
385 
386 extern void membarrier_exec_mmap(struct mm_struct *mm);
387 
388 #else
389 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
membarrier_arch_switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)390 static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
391 					     struct mm_struct *next,
392 					     struct task_struct *tsk)
393 {
394 }
395 #endif
membarrier_exec_mmap(struct mm_struct * mm)396 static inline void membarrier_exec_mmap(struct mm_struct *mm)
397 {
398 }
membarrier_mm_sync_core_before_usermode(struct mm_struct * mm)399 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
400 {
401 }
402 #endif
403 
404 #endif /* _LINUX_SCHED_MM_H */
405