1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_SCHED_MM_H
3#define _LINUX_SCHED_MM_H
4
5#include <linux/kernel.h>
6#include <linux/atomic.h>
7#include <linux/sched.h>
8#include <linux/mm_types.h>
9#include <linux/gfp.h>
10#include <linux/sync_core.h>
11
12/*
13 * Routines for handling mm_structs
14 */
15extern struct mm_struct *mm_alloc(void);
16
17/**
18 * mmgrab() - Pin a &struct mm_struct.
19 * @mm: The &struct mm_struct to pin.
20 *
21 * Make sure that @mm will not get freed even after the owning task
22 * exits. This doesn't guarantee that the associated address space
23 * will still exist later on and mmget_not_zero() has to be used before
24 * accessing it.
25 *
26 * This is a preferred way to pin @mm for a longer/unbounded amount
27 * of time.
28 *
29 * Use mmdrop() to release the reference acquired by mmgrab().
30 *
31 * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
32 * of &mm_struct.mm_count vs &mm_struct.mm_users.
33 */
34static inline void mmgrab(struct mm_struct *mm)
35{
36 atomic_inc(v: &mm->mm_count);
37}
38
39static inline void smp_mb__after_mmgrab(void)
40{
41 smp_mb__after_atomic();
42}
43
44extern void __mmdrop(struct mm_struct *mm);
45
46static inline void mmdrop(struct mm_struct *mm)
47{
48 /*
49 * The implicit full barrier implied by atomic_dec_and_test() is
50 * required by the membarrier system call before returning to
51 * user-space, after storing to rq->curr.
52 */
53 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
54 __mmdrop(mm);
55}
56
57#ifdef CONFIG_PREEMPT_RT
58/*
59 * RCU callback for delayed mm drop. Not strictly RCU, but call_rcu() is
60 * by far the least expensive way to do that.
61 */
62static inline void __mmdrop_delayed(struct rcu_head *rhp)
63{
64 struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
65
66 __mmdrop(mm);
67}
68
69/*
70 * Invoked from finish_task_switch(). Delegates the heavy lifting on RT
71 * kernels via RCU.
72 */
73static inline void mmdrop_sched(struct mm_struct *mm)
74{
75 /* Provides a full memory barrier. See mmdrop() */
76 if (atomic_dec_and_test(&mm->mm_count))
77 call_rcu(&mm->delayed_drop, __mmdrop_delayed);
78}
79#else
80static inline void mmdrop_sched(struct mm_struct *mm)
81{
82 mmdrop(mm);
83}
84#endif
85
86/* Helpers for lazy TLB mm refcounting */
87static inline void mmgrab_lazy_tlb(struct mm_struct *mm)
88{
89 if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT))
90 mmgrab(mm);
91}
92
93static inline void mmdrop_lazy_tlb(struct mm_struct *mm)
94{
95 if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT)) {
96 mmdrop(mm);
97 } else {
98 /*
99 * mmdrop_lazy_tlb must provide a full memory barrier, see the
100 * membarrier comment finish_task_switch which relies on this.
101 */
102 smp_mb();
103 }
104}
105
106static inline void mmdrop_lazy_tlb_sched(struct mm_struct *mm)
107{
108 if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT))
109 mmdrop_sched(mm);
110 else
111 smp_mb(); /* see mmdrop_lazy_tlb() above */
112}
113
114/**
115 * mmget() - Pin the address space associated with a &struct mm_struct.
116 * @mm: The address space to pin.
117 *
118 * Make sure that the address space of the given &struct mm_struct doesn't
119 * go away. This does not protect against parts of the address space being
120 * modified or freed, however.
121 *
122 * Never use this function to pin this address space for an
123 * unbounded/indefinite amount of time.
124 *
125 * Use mmput() to release the reference acquired by mmget().
126 *
127 * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
128 * of &mm_struct.mm_count vs &mm_struct.mm_users.
129 */
130static inline void mmget(struct mm_struct *mm)
131{
132 atomic_inc(v: &mm->mm_users);
133}
134
135static inline bool mmget_not_zero(struct mm_struct *mm)
136{
137 return atomic_inc_not_zero(v: &mm->mm_users);
138}
139
140/* mmput gets rid of the mappings and all user-space */
141extern void mmput(struct mm_struct *);
142#ifdef CONFIG_MMU
143/* same as above but performs the slow path from the async context. Can
144 * be called from the atomic context as well
145 */
146void mmput_async(struct mm_struct *);
147#endif
148
149/* Grab a reference to a task's mm, if it is not already going away */
150extern struct mm_struct *get_task_mm(struct task_struct *task);
151/*
152 * Grab a reference to a task's mm, if it is not already going away
153 * and ptrace_may_access with the mode parameter passed to it
154 * succeeds.
155 */
156extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
157/* Remove the current tasks stale references to the old mm_struct on exit() */
158extern void exit_mm_release(struct task_struct *, struct mm_struct *);
159/* Remove the current tasks stale references to the old mm_struct on exec() */
160extern void exec_mm_release(struct task_struct *, struct mm_struct *);
161
162#ifdef CONFIG_MEMCG
163extern void mm_update_next_owner(struct mm_struct *mm);
164#else
165static inline void mm_update_next_owner(struct mm_struct *mm)
166{
167}
168#endif /* CONFIG_MEMCG */
169
170#ifdef CONFIG_MMU
171#ifndef arch_get_mmap_end
172#define arch_get_mmap_end(addr, len, flags) (TASK_SIZE)
173#endif
174
175#ifndef arch_get_mmap_base
176#define arch_get_mmap_base(addr, base) (base)
177#endif
178
179extern void arch_pick_mmap_layout(struct mm_struct *mm,
180 struct rlimit *rlim_stack);
181extern unsigned long
182arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
183 unsigned long, unsigned long);
184extern unsigned long
185arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
186 unsigned long len, unsigned long pgoff,
187 unsigned long flags);
188
189unsigned long
190generic_get_unmapped_area(struct file *filp, unsigned long addr,
191 unsigned long len, unsigned long pgoff,
192 unsigned long flags);
193unsigned long
194generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
195 unsigned long len, unsigned long pgoff,
196 unsigned long flags);
197#else
198static inline void arch_pick_mmap_layout(struct mm_struct *mm,
199 struct rlimit *rlim_stack) {}
200#endif
201
202static inline bool in_vfork(struct task_struct *tsk)
203{
204 bool ret;
205
206 /*
207 * need RCU to access ->real_parent if CLONE_VM was used along with
208 * CLONE_PARENT.
209 *
210 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
211 * imply CLONE_VM
212 *
213 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
214 * ->real_parent is not necessarily the task doing vfork(), so in
215 * theory we can't rely on task_lock() if we want to dereference it.
216 *
217 * And in this case we can't trust the real_parent->mm == tsk->mm
218 * check, it can be false negative. But we do not care, if init or
219 * another oom-unkillable task does this it should blame itself.
220 */
221 rcu_read_lock();
222 ret = tsk->vfork_done &&
223 rcu_dereference(tsk->real_parent)->mm == tsk->mm;
224 rcu_read_unlock();
225
226 return ret;
227}
228
229/*
230 * Applies per-task gfp context to the given allocation flags.
231 * PF_MEMALLOC_NOIO implies GFP_NOIO
232 * PF_MEMALLOC_NOFS implies GFP_NOFS
233 * PF_MEMALLOC_PIN implies !GFP_MOVABLE
234 */
235static inline gfp_t current_gfp_context(gfp_t flags)
236{
237 unsigned int pflags = READ_ONCE(current->flags);
238
239 if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) {
240 /*
241 * NOIO implies both NOIO and NOFS and it is a weaker context
242 * so always make sure it makes precedence
243 */
244 if (pflags & PF_MEMALLOC_NOIO)
245 flags &= ~(__GFP_IO | __GFP_FS);
246 else if (pflags & PF_MEMALLOC_NOFS)
247 flags &= ~__GFP_FS;
248
249 if (pflags & PF_MEMALLOC_PIN)
250 flags &= ~__GFP_MOVABLE;
251 }
252 return flags;
253}
254
255#ifdef CONFIG_LOCKDEP
256extern void __fs_reclaim_acquire(unsigned long ip);
257extern void __fs_reclaim_release(unsigned long ip);
258extern void fs_reclaim_acquire(gfp_t gfp_mask);
259extern void fs_reclaim_release(gfp_t gfp_mask);
260#else
261static inline void __fs_reclaim_acquire(unsigned long ip) { }
262static inline void __fs_reclaim_release(unsigned long ip) { }
263static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
264static inline void fs_reclaim_release(gfp_t gfp_mask) { }
265#endif
266
267/* Any memory-allocation retry loop should use
268 * memalloc_retry_wait(), and pass the flags for the most
269 * constrained allocation attempt that might have failed.
270 * This provides useful documentation of where loops are,
271 * and a central place to fine tune the waiting as the MM
272 * implementation changes.
273 */
274static inline void memalloc_retry_wait(gfp_t gfp_flags)
275{
276 /* We use io_schedule_timeout because waiting for memory
277 * typically included waiting for dirty pages to be
278 * written out, which requires IO.
279 */
280 __set_current_state(TASK_UNINTERRUPTIBLE);
281 gfp_flags = current_gfp_context(flags: gfp_flags);
282 if (gfpflags_allow_blocking(gfp_flags) &&
283 !(gfp_flags & __GFP_NORETRY))
284 /* Probably waited already, no need for much more */
285 io_schedule_timeout(timeout: 1);
286 else
287 /* Probably didn't wait, and has now released a lock,
288 * so now is a good time to wait
289 */
290 io_schedule_timeout(HZ/50);
291}
292
293/**
294 * might_alloc - Mark possible allocation sites
295 * @gfp_mask: gfp_t flags that would be used to allocate
296 *
297 * Similar to might_sleep() and other annotations, this can be used in functions
298 * that might allocate, but often don't. Compiles to nothing without
299 * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking.
300 */
301static inline void might_alloc(gfp_t gfp_mask)
302{
303 fs_reclaim_acquire(gfp_mask);
304 fs_reclaim_release(gfp_mask);
305
306 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
307}
308
309/**
310 * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
311 *
312 * This functions marks the beginning of the GFP_NOIO allocation scope.
313 * All further allocations will implicitly drop __GFP_IO flag and so
314 * they are safe for the IO critical section from the allocation recursion
315 * point of view. Use memalloc_noio_restore to end the scope with flags
316 * returned by this function.
317 *
318 * This function is safe to be used from any context.
319 */
320static inline unsigned int memalloc_noio_save(void)
321{
322 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
323 current->flags |= PF_MEMALLOC_NOIO;
324 return flags;
325}
326
327/**
328 * memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
329 * @flags: Flags to restore.
330 *
331 * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
332 * Always make sure that the given flags is the return value from the
333 * pairing memalloc_noio_save call.
334 */
335static inline void memalloc_noio_restore(unsigned int flags)
336{
337 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
338}
339
340/**
341 * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
342 *
343 * This functions marks the beginning of the GFP_NOFS allocation scope.
344 * All further allocations will implicitly drop __GFP_FS flag and so
345 * they are safe for the FS critical section from the allocation recursion
346 * point of view. Use memalloc_nofs_restore to end the scope with flags
347 * returned by this function.
348 *
349 * This function is safe to be used from any context.
350 */
351static inline unsigned int memalloc_nofs_save(void)
352{
353 unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
354 current->flags |= PF_MEMALLOC_NOFS;
355 return flags;
356}
357
358/**
359 * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
360 * @flags: Flags to restore.
361 *
362 * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
363 * Always make sure that the given flags is the return value from the
364 * pairing memalloc_nofs_save call.
365 */
366static inline void memalloc_nofs_restore(unsigned int flags)
367{
368 current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
369}
370
371static inline unsigned int memalloc_noreclaim_save(void)
372{
373 unsigned int flags = current->flags & PF_MEMALLOC;
374 current->flags |= PF_MEMALLOC;
375 return flags;
376}
377
378static inline void memalloc_noreclaim_restore(unsigned int flags)
379{
380 current->flags = (current->flags & ~PF_MEMALLOC) | flags;
381}
382
383static inline unsigned int memalloc_pin_save(void)
384{
385 unsigned int flags = current->flags & PF_MEMALLOC_PIN;
386
387 current->flags |= PF_MEMALLOC_PIN;
388 return flags;
389}
390
391static inline void memalloc_pin_restore(unsigned int flags)
392{
393 current->flags = (current->flags & ~PF_MEMALLOC_PIN) | flags;
394}
395
396#ifdef CONFIG_MEMCG
397DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
398/**
399 * set_active_memcg - Starts the remote memcg charging scope.
400 * @memcg: memcg to charge.
401 *
402 * This function marks the beginning of the remote memcg charging scope. All the
403 * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
404 * given memcg.
405 *
406 * Please, make sure that caller has a reference to the passed memcg structure,
407 * so its lifetime is guaranteed to exceed the scope between two
408 * set_active_memcg() calls.
409 *
410 * NOTE: This function can nest. Users must save the return value and
411 * reset the previous value after their own charging scope is over.
412 */
413static inline struct mem_cgroup *
414set_active_memcg(struct mem_cgroup *memcg)
415{
416 struct mem_cgroup *old;
417
418 if (!in_task()) {
419 old = this_cpu_read(int_active_memcg);
420 this_cpu_write(int_active_memcg, memcg);
421 } else {
422 old = current->active_memcg;
423 current->active_memcg = memcg;
424 }
425
426 return old;
427}
428#else
429static inline struct mem_cgroup *
430set_active_memcg(struct mem_cgroup *memcg)
431{
432 return NULL;
433}
434#endif
435
436#ifdef CONFIG_MEMBARRIER
437enum {
438 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
439 MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1),
440 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2),
441 MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
442 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
443 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
444 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6),
445 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7),
446};
447
448enum {
449 MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
450 MEMBARRIER_FLAG_RSEQ = (1U << 1),
451};
452
453#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
454#include <asm/membarrier.h>
455#endif
456
457static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
458{
459 if (current->mm != mm)
460 return;
461 if (likely(!(atomic_read(&mm->membarrier_state) &
462 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
463 return;
464 sync_core_before_usermode();
465}
466
467extern void membarrier_exec_mmap(struct mm_struct *mm);
468
469extern void membarrier_update_current_mm(struct mm_struct *next_mm);
470
471#else
472#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
473static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
474 struct mm_struct *next,
475 struct task_struct *tsk)
476{
477}
478#endif
479static inline void membarrier_exec_mmap(struct mm_struct *mm)
480{
481}
482static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
483{
484}
485static inline void membarrier_update_current_mm(struct mm_struct *next_mm)
486{
487}
488#endif
489
490#endif /* _LINUX_SCHED_MM_H */
491

source code of linux/include/linux/sched/mm.h