1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * linux/arch/arm/mm/context.c |
4 | * |
5 | * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. |
6 | * Copyright (C) 2012 ARM Limited |
7 | * |
8 | * Author: Will Deacon <will.deacon@arm.com> |
9 | */ |
10 | #include <linux/init.h> |
11 | #include <linux/sched.h> |
12 | #include <linux/mm.h> |
13 | #include <linux/smp.h> |
14 | #include <linux/percpu.h> |
15 | |
16 | #include <asm/mmu_context.h> |
17 | #include <asm/smp_plat.h> |
18 | #include <asm/thread_notify.h> |
19 | #include <asm/tlbflush.h> |
20 | #include <asm/proc-fns.h> |
21 | |
22 | /* |
23 | * On ARMv6, we have the following structure in the Context ID: |
24 | * |
25 | * 31 7 0 |
26 | * +-------------------------+-----------+ |
27 | * | process ID | ASID | |
28 | * +-------------------------+-----------+ |
29 | * | context ID | |
30 | * +-------------------------------------+ |
31 | * |
32 | * The ASID is used to tag entries in the CPU caches and TLBs. |
33 | * The context ID is used by debuggers and trace logic, and |
34 | * should be unique within all running processes. |
35 | * |
36 | * In big endian operation, the two 32 bit words are swapped if accessed |
37 | * by non-64-bit operations. |
38 | */ |
39 | #define ASID_FIRST_VERSION (1ULL << ASID_BITS) |
40 | #define NUM_USER_ASIDS ASID_FIRST_VERSION |
41 | |
42 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); |
43 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); |
44 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); |
45 | |
46 | static DEFINE_PER_CPU(atomic64_t, active_asids); |
47 | static DEFINE_PER_CPU(u64, reserved_asids); |
48 | static cpumask_t tlb_flush_pending; |
49 | |
50 | #ifdef CONFIG_ARM_ERRATA_798181 |
51 | void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, |
52 | cpumask_t *mask) |
53 | { |
54 | int cpu; |
55 | unsigned long flags; |
56 | u64 context_id, asid; |
57 | |
58 | raw_spin_lock_irqsave(&cpu_asid_lock, flags); |
59 | context_id = mm->context.id.counter; |
60 | for_each_online_cpu(cpu) { |
61 | if (cpu == this_cpu) |
62 | continue; |
63 | /* |
64 | * We only need to send an IPI if the other CPUs are |
65 | * running the same ASID as the one being invalidated. |
66 | */ |
67 | asid = per_cpu(active_asids, cpu).counter; |
68 | if (asid == 0) |
69 | asid = per_cpu(reserved_asids, cpu); |
70 | if (context_id == asid) |
71 | cpumask_set_cpu(cpu, mask); |
72 | } |
73 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); |
74 | } |
75 | #endif |
76 | |
77 | #ifdef CONFIG_ARM_LPAE |
78 | /* |
79 | * With LPAE, the ASID and page tables are updated atomicly, so there is |
80 | * no need for a reserved set of tables (the active ASID tracking prevents |
81 | * any issues across a rollover). |
82 | */ |
83 | #define cpu_set_reserved_ttbr0() |
84 | #else |
85 | static void cpu_set_reserved_ttbr0(void) |
86 | { |
87 | u32 ttb; |
88 | /* |
89 | * Copy TTBR1 into TTBR0. |
90 | * This points at swapper_pg_dir, which contains only global |
91 | * entries so any speculative walks are perfectly safe. |
92 | */ |
93 | asm volatile( |
94 | " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n" |
95 | " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n" |
96 | : "=r" (ttb)); |
97 | isb(); |
98 | } |
99 | #endif |
100 | |
101 | #ifdef CONFIG_PID_IN_CONTEXTIDR |
102 | static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd, |
103 | void *t) |
104 | { |
105 | u32 contextidr; |
106 | pid_t pid; |
107 | struct thread_info *thread = t; |
108 | |
109 | if (cmd != THREAD_NOTIFY_SWITCH) |
110 | return NOTIFY_DONE; |
111 | |
112 | pid = task_pid_nr(thread_task(thread)) << ASID_BITS; |
113 | asm volatile( |
114 | " mrc p15, 0, %0, c13, c0, 1\n" |
115 | " and %0, %0, %2\n" |
116 | " orr %0, %0, %1\n" |
117 | " mcr p15, 0, %0, c13, c0, 1\n" |
118 | : "=r" (contextidr), "+r" (pid) |
119 | : "I" (~ASID_MASK)); |
120 | isb(); |
121 | |
122 | return NOTIFY_OK; |
123 | } |
124 | |
125 | static struct notifier_block contextidr_notifier_block = { |
126 | .notifier_call = contextidr_notifier, |
127 | }; |
128 | |
129 | static int __init contextidr_notifier_init(void) |
130 | { |
131 | return thread_register_notifier(&contextidr_notifier_block); |
132 | } |
133 | arch_initcall(contextidr_notifier_init); |
134 | #endif |
135 | |
136 | static void flush_context(unsigned int cpu) |
137 | { |
138 | int i; |
139 | u64 asid; |
140 | |
141 | /* Update the list of reserved ASIDs and the ASID bitmap. */ |
142 | bitmap_clear(asid_map, 0, NUM_USER_ASIDS); |
143 | for_each_possible_cpu(i) { |
144 | asid = atomic64_xchg(v: &per_cpu(active_asids, i), new: 0); |
145 | /* |
146 | * If this CPU has already been through a |
147 | * rollover, but hasn't run another task in |
148 | * the meantime, we must preserve its reserved |
149 | * ASID, as this is the only trace we have of |
150 | * the process it is still running. |
151 | */ |
152 | if (asid == 0) |
153 | asid = per_cpu(reserved_asids, i); |
154 | __set_bit(asid & ~ASID_MASK, asid_map); |
155 | per_cpu(reserved_asids, i) = asid; |
156 | } |
157 | |
158 | /* Queue a TLB invalidate and flush the I-cache if necessary. */ |
159 | cpumask_setall(dstp: &tlb_flush_pending); |
160 | |
161 | if (icache_is_vivt_asid_tagged()) |
162 | __flush_icache_all(); |
163 | } |
164 | |
165 | static bool check_update_reserved_asid(u64 asid, u64 newasid) |
166 | { |
167 | int cpu; |
168 | bool hit = false; |
169 | |
170 | /* |
171 | * Iterate over the set of reserved ASIDs looking for a match. |
172 | * If we find one, then we can update our mm to use newasid |
173 | * (i.e. the same ASID in the current generation) but we can't |
174 | * exit the loop early, since we need to ensure that all copies |
175 | * of the old ASID are updated to reflect the mm. Failure to do |
176 | * so could result in us missing the reserved ASID in a future |
177 | * generation. |
178 | */ |
179 | for_each_possible_cpu(cpu) { |
180 | if (per_cpu(reserved_asids, cpu) == asid) { |
181 | hit = true; |
182 | per_cpu(reserved_asids, cpu) = newasid; |
183 | } |
184 | } |
185 | |
186 | return hit; |
187 | } |
188 | |
189 | static u64 new_context(struct mm_struct *mm, unsigned int cpu) |
190 | { |
191 | static u32 cur_idx = 1; |
192 | u64 asid = atomic64_read(v: &mm->context.id); |
193 | u64 generation = atomic64_read(v: &asid_generation); |
194 | |
195 | if (asid != 0) { |
196 | u64 newasid = generation | (asid & ~ASID_MASK); |
197 | |
198 | /* |
199 | * If our current ASID was active during a rollover, we |
200 | * can continue to use it and this was just a false alarm. |
201 | */ |
202 | if (check_update_reserved_asid(asid, newasid)) |
203 | return newasid; |
204 | |
205 | /* |
206 | * We had a valid ASID in a previous life, so try to re-use |
207 | * it if possible., |
208 | */ |
209 | asid &= ~ASID_MASK; |
210 | if (!__test_and_set_bit(asid, asid_map)) |
211 | return newasid; |
212 | } |
213 | |
214 | /* |
215 | * Allocate a free ASID. If we can't find one, take a note of the |
216 | * currently active ASIDs and mark the TLBs as requiring flushes. |
217 | * We always count from ASID #1, as we reserve ASID #0 to switch |
218 | * via TTBR0 and to avoid speculative page table walks from hitting |
219 | * in any partial walk caches, which could be populated from |
220 | * overlapping level-1 descriptors used to map both the module |
221 | * area and the userspace stack. |
222 | */ |
223 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); |
224 | if (asid == NUM_USER_ASIDS) { |
225 | generation = atomic64_add_return(ASID_FIRST_VERSION, |
226 | &asid_generation); |
227 | flush_context(cpu); |
228 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); |
229 | } |
230 | |
231 | __set_bit(asid, asid_map); |
232 | cur_idx = asid; |
233 | cpumask_clear(dstp: mm_cpumask(mm)); |
234 | return asid | generation; |
235 | } |
236 | |
237 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) |
238 | { |
239 | unsigned long flags; |
240 | unsigned int cpu = smp_processor_id(); |
241 | u64 asid; |
242 | |
243 | check_vmalloc_seq(mm); |
244 | |
245 | /* |
246 | * We cannot update the pgd and the ASID atomicly with classic |
247 | * MMU, so switch exclusively to global mappings to avoid |
248 | * speculative page table walking with the wrong TTBR. |
249 | */ |
250 | cpu_set_reserved_ttbr0(); |
251 | |
252 | asid = atomic64_read(v: &mm->context.id); |
253 | if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) |
254 | && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) |
255 | goto switch_mm_fastpath; |
256 | |
257 | raw_spin_lock_irqsave(&cpu_asid_lock, flags); |
258 | /* Check that our ASID belongs to the current generation. */ |
259 | asid = atomic64_read(v: &mm->context.id); |
260 | if ((asid ^ atomic64_read(v: &asid_generation)) >> ASID_BITS) { |
261 | asid = new_context(mm, cpu); |
262 | atomic64_set(v: &mm->context.id, i: asid); |
263 | } |
264 | |
265 | if (cpumask_test_and_clear_cpu(cpu, cpumask: &tlb_flush_pending)) { |
266 | local_flush_bp_all(); |
267 | local_flush_tlb_all(); |
268 | } |
269 | |
270 | atomic64_set(v: &per_cpu(active_asids, cpu), i: asid); |
271 | cpumask_set_cpu(cpu, dstp: mm_cpumask(mm)); |
272 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); |
273 | |
274 | switch_mm_fastpath: |
275 | cpu_switch_mm(mm->pgd, mm); |
276 | } |
277 | |