1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Common implementation of switch_mm_irqs_off |
4 | * |
5 | * Copyright IBM Corp. 2017 |
6 | */ |
7 | |
8 | #include <linux/mm.h> |
9 | #include <linux/cpu.h> |
10 | #include <linux/sched/mm.h> |
11 | |
12 | #include <asm/mmu_context.h> |
13 | #include <asm/pgalloc.h> |
14 | |
15 | #if defined(CONFIG_PPC32) |
16 | static inline void switch_mm_pgdir(struct task_struct *tsk, |
17 | struct mm_struct *mm) |
18 | { |
19 | /* 32-bit keeps track of the current PGDIR in the thread struct */ |
20 | tsk->thread.pgdir = mm->pgd; |
21 | #ifdef CONFIG_PPC_BOOK3S_32 |
22 | tsk->thread.sr0 = mm->context.sr0; |
23 | #endif |
24 | #if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP) |
25 | tsk->thread.pid = mm->context.id; |
26 | #endif |
27 | } |
28 | #elif defined(CONFIG_PPC_BOOK3E_64) |
29 | static inline void switch_mm_pgdir(struct task_struct *tsk, |
30 | struct mm_struct *mm) |
31 | { |
32 | /* 64-bit Book3E keeps track of current PGD in the PACA */ |
33 | get_paca()->pgd = mm->pgd; |
34 | #ifdef CONFIG_PPC_KUAP |
35 | tsk->thread.pid = mm->context.id; |
36 | #endif |
37 | } |
38 | #else |
39 | static inline void switch_mm_pgdir(struct task_struct *tsk, |
40 | struct mm_struct *mm) { } |
41 | #endif |
42 | |
43 | void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, |
44 | struct task_struct *tsk) |
45 | { |
46 | int cpu = smp_processor_id(); |
47 | bool new_on_cpu = false; |
48 | |
49 | /* Mark this context has been used on the new CPU */ |
50 | if (!cpumask_test_cpu(cpu, cpumask: mm_cpumask(mm: next))) { |
51 | VM_WARN_ON_ONCE(next == &init_mm); |
52 | cpumask_set_cpu(cpu, dstp: mm_cpumask(mm: next)); |
53 | inc_mm_active_cpus(next); |
54 | |
55 | /* |
56 | * This full barrier orders the store to the cpumask above vs |
57 | * a subsequent load which allows this CPU/MMU to begin loading |
58 | * translations for 'next' from page table PTEs into the TLB. |
59 | * |
60 | * When using the radix MMU, that operation is the load of the |
61 | * MMU context id, which is then moved to SPRN_PID. |
62 | * |
63 | * For the hash MMU it is either the first load from slb_cache |
64 | * in switch_slb() to preload the SLBs, or the load of |
65 | * get_user_context which loads the context for the VSID hash |
66 | * to insert a new SLB, in the SLB fault handler. |
67 | * |
68 | * On the other side, the barrier is in mm/tlb-radix.c for |
69 | * radix which orders earlier stores to clear the PTEs before |
70 | * the load of mm_cpumask to check which CPU TLBs should be |
71 | * flushed. For hash, pte_xchg to clear the PTE includes the |
72 | * barrier. |
73 | * |
74 | * This full barrier is also needed by membarrier when |
75 | * switching between processes after store to rq->curr, before |
76 | * user-space memory accesses. |
77 | */ |
78 | smp_mb(); |
79 | |
80 | new_on_cpu = true; |
81 | } |
82 | |
83 | /* Some subarchs need to track the PGD elsewhere */ |
84 | switch_mm_pgdir(tsk, mm: next); |
85 | |
86 | /* Nothing else to do if we aren't actually switching */ |
87 | if (prev == next) |
88 | return; |
89 | |
90 | /* |
91 | * We must stop all altivec streams before changing the HW |
92 | * context |
93 | */ |
94 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
95 | asm volatile (PPC_DSSALL); |
96 | |
97 | if (!new_on_cpu) |
98 | membarrier_arch_switch_mm(prev, next, tsk); |
99 | |
100 | /* |
101 | * The actual HW switching method differs between the various |
102 | * sub architectures. Out of line for now |
103 | */ |
104 | switch_mmu_context(prev, next, tsk); |
105 | |
106 | VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(prev))); |
107 | } |
108 | |
109 | #ifndef CONFIG_PPC_BOOK3S_64 |
110 | void arch_exit_mmap(struct mm_struct *mm) |
111 | { |
112 | void *frag = pte_frag_get(&mm->context); |
113 | |
114 | if (frag) |
115 | pte_frag_destroy(frag); |
116 | } |
117 | #endif |
118 | |