• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_MM_H
3 #define _LINUX_SCHED_MM_H
4 
5 #include <linux/kernel.h>
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/mm_types.h>
9 #include <linux/gfp.h>
10 #include <linux/sync_core.h>
11 
12 /*
13  * Routines for handling mm_structs
14  */
15 extern struct mm_struct *mm_alloc(void);
16 
17 /**
18  * mmgrab() - Pin a &struct mm_struct.
19  * @mm: The &struct mm_struct to pin.
20  *
21  * Make sure that @mm will not get freed even after the owning task
22  * exits. This doesn't guarantee that the associated address space
23  * will still exist later on and mmget_not_zero() has to be used before
24  * accessing it.
25  *
26  * This is a preferred way to pin @mm for a longer/unbounded amount
27  * of time.
28  *
29  * Use mmdrop() to release the reference acquired by mmgrab().
30  *
31  * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
32  * of &mm_struct.mm_count vs &mm_struct.mm_users.
33  */
mmgrab(struct mm_struct * mm)34 static inline void mmgrab(struct mm_struct *mm)
35 {
36 	atomic_inc(&mm->mm_count);
37 }
38 
39 extern void __mmdrop(struct mm_struct *mm);
40 
mmdrop(struct mm_struct * mm)41 static inline void mmdrop(struct mm_struct *mm)
42 {
43 	/*
44 	 * The implicit full barrier implied by atomic_dec_and_test() is
45 	 * required by the membarrier system call before returning to
46 	 * user-space, after storing to rq->curr.
47 	 */
48 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
49 		__mmdrop(mm);
50 }
51 
52 /**
53  * mmget() - Pin the address space associated with a &struct mm_struct.
54  * @mm: The address space to pin.
55  *
56  * Make sure that the address space of the given &struct mm_struct doesn't
57  * go away. This does not protect against parts of the address space being
58  * modified or freed, however.
59  *
60  * Never use this function to pin this address space for an
61  * unbounded/indefinite amount of time.
62  *
63  * Use mmput() to release the reference acquired by mmget().
64  *
65  * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
66  * of &mm_struct.mm_count vs &mm_struct.mm_users.
67  */
mmget(struct mm_struct * mm)68 static inline void mmget(struct mm_struct *mm)
69 {
70 	atomic_inc(&mm->mm_users);
71 }
72 
mmget_not_zero(struct mm_struct * mm)73 static inline bool mmget_not_zero(struct mm_struct *mm)
74 {
75 	return atomic_inc_not_zero(&mm->mm_users);
76 }
77 
78 /* mmput gets rid of the mappings and all user-space */
79 extern void mmput(struct mm_struct *);
80 #ifdef CONFIG_MMU
81 /* same as above but performs the slow path from the async context. Can
82  * be called from the atomic context as well
83  */
84 void mmput_async(struct mm_struct *);
85 #endif
86 
87 /* Grab a reference to a task's mm, if it is not already going away */
88 extern struct mm_struct *get_task_mm(struct task_struct *task);
89 /*
90  * Grab a reference to a task's mm, if it is not already going away
91  * and ptrace_may_access with the mode parameter passed to it
92  * succeeds.
93  */
94 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
95 /* Remove the current tasks stale references to the old mm_struct on exit() */
96 extern void exit_mm_release(struct task_struct *, struct mm_struct *);
97 /* Remove the current tasks stale references to the old mm_struct on exec() */
98 extern void exec_mm_release(struct task_struct *, struct mm_struct *);
99 
100 #ifdef CONFIG_MEMCG
101 extern void mm_update_next_owner(struct mm_struct *mm);
102 #else
mm_update_next_owner(struct mm_struct * mm)103 static inline void mm_update_next_owner(struct mm_struct *mm)
104 {
105 }
106 #endif /* CONFIG_MEMCG */
107 
108 #ifdef CONFIG_MMU
109 extern void arch_pick_mmap_layout(struct mm_struct *mm,
110 				  struct rlimit *rlim_stack);
111 extern unsigned long
112 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
113 		       unsigned long, unsigned long);
114 extern unsigned long
115 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
116 			  unsigned long len, unsigned long pgoff,
117 			  unsigned long flags);
118 #else
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)119 static inline void arch_pick_mmap_layout(struct mm_struct *mm,
120 					 struct rlimit *rlim_stack) {}
121 #endif
122 
in_vfork(struct task_struct * tsk)123 static inline bool in_vfork(struct task_struct *tsk)
124 {
125 	bool ret;
126 
127 	/*
128 	 * need RCU to access ->real_parent if CLONE_VM was used along with
129 	 * CLONE_PARENT.
130 	 *
131 	 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
132 	 * imply CLONE_VM
133 	 *
134 	 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
135 	 * ->real_parent is not necessarily the task doing vfork(), so in
136 	 * theory we can't rely on task_lock() if we want to dereference it.
137 	 *
138 	 * And in this case we can't trust the real_parent->mm == tsk->mm
139 	 * check, it can be false negative. But we do not care, if init or
140 	 * another oom-unkillable task does this it should blame itself.
141 	 */
142 	rcu_read_lock();
143 	ret = tsk->vfork_done &&
144 			rcu_dereference(tsk->real_parent)->mm == tsk->mm;
145 	rcu_read_unlock();
146 
147 	return ret;
148 }
149 
150 /*
151  * Applies per-task gfp context to the given allocation flags.
152  * PF_MEMALLOC_NOIO implies GFP_NOIO
153  * PF_MEMALLOC_NOFS implies GFP_NOFS
154  */
current_gfp_context(gfp_t flags)155 static inline gfp_t current_gfp_context(gfp_t flags)
156 {
157 	unsigned int pflags = READ_ONCE(current->flags);
158 
159 	if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) {
160 		/*
161 		 * NOIO implies both NOIO and NOFS and it is a weaker context
162 		 * so always make sure it makes precedence
163 		 */
164 		if (pflags & PF_MEMALLOC_NOIO)
165 			flags &= ~(__GFP_IO | __GFP_FS);
166 		else if (pflags & PF_MEMALLOC_NOFS)
167 			flags &= ~__GFP_FS;
168 	}
169 	return flags;
170 }
171 
172 #ifdef CONFIG_LOCKDEP
173 extern void __fs_reclaim_acquire(void);
174 extern void __fs_reclaim_release(void);
175 extern void fs_reclaim_acquire(gfp_t gfp_mask);
176 extern void fs_reclaim_release(gfp_t gfp_mask);
177 #else
__fs_reclaim_acquire(void)178 static inline void __fs_reclaim_acquire(void) { }
__fs_reclaim_release(void)179 static inline void __fs_reclaim_release(void) { }
fs_reclaim_acquire(gfp_t gfp_mask)180 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
fs_reclaim_release(gfp_t gfp_mask)181 static inline void fs_reclaim_release(gfp_t gfp_mask) { }
182 #endif
183 
184 /**
185  * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
186  *
187  * This functions marks the beginning of the GFP_NOIO allocation scope.
188  * All further allocations will implicitly drop __GFP_IO flag and so
189  * they are safe for the IO critical section from the allocation recursion
190  * point of view. Use memalloc_noio_restore to end the scope with flags
191  * returned by this function.
192  *
193  * This function is safe to be used from any context.
194  */
memalloc_noio_save(void)195 static inline unsigned int memalloc_noio_save(void)
196 {
197 	unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
198 	current->flags |= PF_MEMALLOC_NOIO;
199 	return flags;
200 }
201 
202 /**
203  * memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
204  * @flags: Flags to restore.
205  *
206  * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
207  * Always make sure that the given flags is the return value from the
208  * pairing memalloc_noio_save call.
209  */
memalloc_noio_restore(unsigned int flags)210 static inline void memalloc_noio_restore(unsigned int flags)
211 {
212 	current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
213 }
214 
215 /**
216  * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
217  *
218  * This functions marks the beginning of the GFP_NOFS allocation scope.
219  * All further allocations will implicitly drop __GFP_FS flag and so
220  * they are safe for the FS critical section from the allocation recursion
221  * point of view. Use memalloc_nofs_restore to end the scope with flags
222  * returned by this function.
223  *
224  * This function is safe to be used from any context.
225  */
memalloc_nofs_save(void)226 static inline unsigned int memalloc_nofs_save(void)
227 {
228 	unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
229 	current->flags |= PF_MEMALLOC_NOFS;
230 	return flags;
231 }
232 
233 /**
234  * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
235  * @flags: Flags to restore.
236  *
237  * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
238  * Always make sure that the given flags is the return value from the
239  * pairing memalloc_nofs_save call.
240  */
memalloc_nofs_restore(unsigned int flags)241 static inline void memalloc_nofs_restore(unsigned int flags)
242 {
243 	current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
244 }
245 
memalloc_noreclaim_save(void)246 static inline unsigned int memalloc_noreclaim_save(void)
247 {
248 	unsigned int flags = current->flags & PF_MEMALLOC;
249 	current->flags |= PF_MEMALLOC;
250 	return flags;
251 }
252 
memalloc_noreclaim_restore(unsigned int flags)253 static inline void memalloc_noreclaim_restore(unsigned int flags)
254 {
255 	current->flags = (current->flags & ~PF_MEMALLOC) | flags;
256 }
257 
258 #ifdef CONFIG_CMA
memalloc_nocma_save(void)259 static inline unsigned int memalloc_nocma_save(void)
260 {
261 	unsigned int flags = current->flags & PF_MEMALLOC_NOCMA;
262 
263 	current->flags |= PF_MEMALLOC_NOCMA;
264 	return flags;
265 }
266 
memalloc_nocma_restore(unsigned int flags)267 static inline void memalloc_nocma_restore(unsigned int flags)
268 {
269 	current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags;
270 }
271 #else
memalloc_nocma_save(void)272 static inline unsigned int memalloc_nocma_save(void)
273 {
274 	return 0;
275 }
276 
memalloc_nocma_restore(unsigned int flags)277 static inline void memalloc_nocma_restore(unsigned int flags)
278 {
279 }
280 #endif
281 
282 #ifdef CONFIG_MEMCG
283 DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
284 /**
285  * set_active_memcg - Starts the remote memcg charging scope.
286  * @memcg: memcg to charge.
287  *
288  * This function marks the beginning of the remote memcg charging scope. All the
289  * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
290  * given memcg.
291  *
292  * NOTE: This function can nest. Users must save the return value and
293  * reset the previous value after their own charging scope is over.
294  */
295 static inline struct mem_cgroup *
set_active_memcg(struct mem_cgroup * memcg)296 set_active_memcg(struct mem_cgroup *memcg)
297 {
298 	struct mem_cgroup *old;
299 
300 	if (in_interrupt()) {
301 		old = this_cpu_read(int_active_memcg);
302 		this_cpu_write(int_active_memcg, memcg);
303 	} else {
304 		old = current->active_memcg;
305 		current->active_memcg = memcg;
306 	}
307 
308 	return old;
309 }
310 #else
311 static inline struct mem_cgroup *
set_active_memcg(struct mem_cgroup * memcg)312 set_active_memcg(struct mem_cgroup *memcg)
313 {
314 	return NULL;
315 }
316 #endif
317 
318 #ifdef CONFIG_MEMBARRIER
319 enum {
320 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY		= (1U << 0),
321 	MEMBARRIER_STATE_PRIVATE_EXPEDITED			= (1U << 1),
322 	MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY			= (1U << 2),
323 	MEMBARRIER_STATE_GLOBAL_EXPEDITED			= (1U << 3),
324 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY	= (1U << 4),
325 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE		= (1U << 5),
326 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY		= (1U << 6),
327 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ			= (1U << 7),
328 };
329 
330 enum {
331 	MEMBARRIER_FLAG_SYNC_CORE	= (1U << 0),
332 	MEMBARRIER_FLAG_RSEQ		= (1U << 1),
333 };
334 
335 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
336 #include <asm/membarrier.h>
337 #endif
338 
membarrier_mm_sync_core_before_usermode(struct mm_struct * mm)339 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
340 {
341 	if (current->mm != mm)
342 		return;
343 	if (likely(!(atomic_read(&mm->membarrier_state) &
344 		     MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
345 		return;
346 	sync_core_before_usermode();
347 }
348 
349 extern void membarrier_exec_mmap(struct mm_struct *mm);
350 
351 #else
352 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
membarrier_arch_switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)353 static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
354 					     struct mm_struct *next,
355 					     struct task_struct *tsk)
356 {
357 }
358 #endif
membarrier_exec_mmap(struct mm_struct * mm)359 static inline void membarrier_exec_mmap(struct mm_struct *mm)
360 {
361 }
membarrier_mm_sync_core_before_usermode(struct mm_struct * mm)362 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
363 {
364 }
365 #endif
366 
367 #endif /* _LINUX_SCHED_MM_H */
368