1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_MMU_CONTEXT_H |
3 | #define _LINUX_MMU_CONTEXT_H |
4 | |
5 | #include <asm/mmu_context.h> |
6 | #include <asm/mmu.h> |
7 | |
8 | /* Architectures that care about IRQ state in switch_mm can override this. */ |
9 | #ifndef switch_mm_irqs_off |
10 | # define switch_mm_irqs_off switch_mm |
11 | #endif |
12 | |
13 | #ifndef leave_mm |
14 | static inline void leave_mm(int cpu) { } |
15 | #endif |
16 | |
17 | /* |
18 | * CPUs that are capable of running user task @p. Must contain at least one |
19 | * active CPU. It is assumed that the kernel can run on all CPUs, so calling |
20 | * this for a kernel thread is pointless. |
21 | * |
22 | * By default, we assume a sane, homogeneous system. |
23 | */ |
24 | #ifndef task_cpu_possible_mask |
25 | # define task_cpu_possible_mask(p) cpu_possible_mask |
26 | # define task_cpu_possible(cpu, p) true |
27 | #else |
28 | # define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p)) |
29 | #endif |
30 | |
31 | #ifndef mm_untag_mask |
32 | static inline unsigned long mm_untag_mask(struct mm_struct *mm) |
33 | { |
34 | return -1UL; |
35 | } |
36 | #endif |
37 | |
38 | #ifndef arch_pgtable_dma_compat |
39 | static inline bool arch_pgtable_dma_compat(struct mm_struct *mm) |
40 | { |
41 | return true; |
42 | } |
43 | #endif |
44 | |
45 | #endif |
46 | |