1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/atomic.h> |
3 | #include <linux/mmu_context.h> |
4 | #include <linux/percpu.h> |
5 | #include <linux/spinlock.h> |
6 | |
7 | static DEFINE_RAW_SPINLOCK(cpu_mmid_lock); |
8 | |
9 | static atomic64_t mmid_version; |
10 | static unsigned int num_mmids; |
11 | static unsigned long *mmid_map; |
12 | |
13 | static DEFINE_PER_CPU(u64, reserved_mmids); |
14 | static cpumask_t tlb_flush_pending; |
15 | |
16 | static bool asid_versions_eq(int cpu, u64 a, u64 b) |
17 | { |
18 | return ((a ^ b) & asid_version_mask(cpu)) == 0; |
19 | } |
20 | |
21 | void get_new_mmu_context(struct mm_struct *mm) |
22 | { |
23 | unsigned int cpu; |
24 | u64 asid; |
25 | |
26 | /* |
27 | * This function is specific to ASIDs, and should not be called when |
28 | * MMIDs are in use. |
29 | */ |
30 | if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid)) |
31 | return; |
32 | |
33 | cpu = smp_processor_id(); |
34 | asid = asid_cache(cpu); |
35 | |
36 | if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) { |
37 | if (cpu_has_vtag_icache) |
38 | flush_icache_all(); |
39 | local_flush_tlb_all(); /* start new asid cycle */ |
40 | } |
41 | |
42 | set_cpu_context(cpu, mm, asid); |
43 | asid_cache(cpu) = asid; |
44 | } |
45 | EXPORT_SYMBOL_GPL(get_new_mmu_context); |
46 | |
47 | void check_mmu_context(struct mm_struct *mm) |
48 | { |
49 | unsigned int cpu = smp_processor_id(); |
50 | |
51 | /* |
52 | * This function is specific to ASIDs, and should not be called when |
53 | * MMIDs are in use. |
54 | */ |
55 | if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid)) |
56 | return; |
57 | |
58 | /* Check if our ASID is of an older version and thus invalid */ |
59 | if (!asid_versions_eq(cpu, a: cpu_context(cpu, mm), b: asid_cache(cpu))) |
60 | get_new_mmu_context(mm); |
61 | } |
62 | EXPORT_SYMBOL_GPL(check_mmu_context); |
63 | |
64 | static void flush_context(void) |
65 | { |
66 | u64 mmid; |
67 | int cpu; |
68 | |
69 | /* Update the list of reserved MMIDs and the MMID bitmap */ |
70 | bitmap_zero(dst: mmid_map, nbits: num_mmids); |
71 | |
72 | /* Reserve an MMID for kmap/wired entries */ |
73 | __set_bit(MMID_KERNEL_WIRED, mmid_map); |
74 | |
75 | for_each_possible_cpu(cpu) { |
76 | mmid = xchg_relaxed(&cpu_data[cpu].asid_cache, 0); |
77 | |
78 | /* |
79 | * If this CPU has already been through a |
80 | * rollover, but hasn't run another task in |
81 | * the meantime, we must preserve its reserved |
82 | * MMID, as this is the only trace we have of |
83 | * the process it is still running. |
84 | */ |
85 | if (mmid == 0) |
86 | mmid = per_cpu(reserved_mmids, cpu); |
87 | |
88 | __set_bit(mmid & cpu_asid_mask(&cpu_data[cpu]), mmid_map); |
89 | per_cpu(reserved_mmids, cpu) = mmid; |
90 | } |
91 | |
92 | /* |
93 | * Queue a TLB invalidation for each CPU to perform on next |
94 | * context-switch |
95 | */ |
96 | cpumask_setall(dstp: &tlb_flush_pending); |
97 | } |
98 | |
99 | static bool check_update_reserved_mmid(u64 mmid, u64 newmmid) |
100 | { |
101 | bool hit; |
102 | int cpu; |
103 | |
104 | /* |
105 | * Iterate over the set of reserved MMIDs looking for a match. |
106 | * If we find one, then we can update our mm to use newmmid |
107 | * (i.e. the same MMID in the current generation) but we can't |
108 | * exit the loop early, since we need to ensure that all copies |
109 | * of the old MMID are updated to reflect the mm. Failure to do |
110 | * so could result in us missing the reserved MMID in a future |
111 | * generation. |
112 | */ |
113 | hit = false; |
114 | for_each_possible_cpu(cpu) { |
115 | if (per_cpu(reserved_mmids, cpu) == mmid) { |
116 | hit = true; |
117 | per_cpu(reserved_mmids, cpu) = newmmid; |
118 | } |
119 | } |
120 | |
121 | return hit; |
122 | } |
123 | |
124 | static u64 get_new_mmid(struct mm_struct *mm) |
125 | { |
126 | static u32 cur_idx = MMID_KERNEL_WIRED + 1; |
127 | u64 mmid, version, mmid_mask; |
128 | |
129 | mmid = cpu_context(0, mm); |
130 | version = atomic64_read(v: &mmid_version); |
131 | mmid_mask = cpu_asid_mask(&boot_cpu_data); |
132 | |
133 | if (!asid_versions_eq(cpu: 0, a: mmid, b: 0)) { |
134 | u64 newmmid = version | (mmid & mmid_mask); |
135 | |
136 | /* |
137 | * If our current MMID was active during a rollover, we |
138 | * can continue to use it and this was just a false alarm. |
139 | */ |
140 | if (check_update_reserved_mmid(mmid, newmmid)) { |
141 | mmid = newmmid; |
142 | goto set_context; |
143 | } |
144 | |
145 | /* |
146 | * We had a valid MMID in a previous life, so try to re-use |
147 | * it if possible. |
148 | */ |
149 | if (!__test_and_set_bit(mmid & mmid_mask, mmid_map)) { |
150 | mmid = newmmid; |
151 | goto set_context; |
152 | } |
153 | } |
154 | |
155 | /* Allocate a free MMID */ |
156 | mmid = find_next_zero_bit(addr: mmid_map, size: num_mmids, offset: cur_idx); |
157 | if (mmid != num_mmids) |
158 | goto reserve_mmid; |
159 | |
160 | /* We're out of MMIDs, so increment the global version */ |
161 | version = atomic64_add_return_relaxed(i: asid_first_version(0), |
162 | v: &mmid_version); |
163 | |
164 | /* Note currently active MMIDs & mark TLBs as requiring flushes */ |
165 | flush_context(); |
166 | |
167 | /* We have more MMIDs than CPUs, so this will always succeed */ |
168 | mmid = find_first_zero_bit(addr: mmid_map, size: num_mmids); |
169 | |
170 | reserve_mmid: |
171 | __set_bit(mmid, mmid_map); |
172 | cur_idx = mmid; |
173 | mmid |= version; |
174 | set_context: |
175 | set_cpu_context(0, mm, mmid); |
176 | return mmid; |
177 | } |
178 | |
179 | void check_switch_mmu_context(struct mm_struct *mm) |
180 | { |
181 | unsigned int cpu = smp_processor_id(); |
182 | u64 ctx, old_active_mmid; |
183 | unsigned long flags; |
184 | |
185 | if (!cpu_has_mmid) { |
186 | check_mmu_context(mm); |
187 | write_c0_entryhi(cpu_asid(cpu, mm)); |
188 | goto setup_pgd; |
189 | } |
190 | |
191 | /* |
192 | * MMID switch fast-path, to avoid acquiring cpu_mmid_lock when it's |
193 | * unnecessary. |
194 | * |
195 | * The memory ordering here is subtle. If our active_mmids is non-zero |
196 | * and the MMID matches the current version, then we update the CPU's |
197 | * asid_cache with a relaxed cmpxchg. Racing with a concurrent rollover |
198 | * means that either: |
199 | * |
200 | * - We get a zero back from the cmpxchg and end up waiting on |
201 | * cpu_mmid_lock in check_mmu_context(). Taking the lock synchronises |
202 | * with the rollover and so we are forced to see the updated |
203 | * generation. |
204 | * |
205 | * - We get a valid MMID back from the cmpxchg, which means the |
206 | * relaxed xchg in flush_context will treat us as reserved |
207 | * because atomic RmWs are totally ordered for a given location. |
208 | */ |
209 | ctx = cpu_context(cpu, mm); |
210 | old_active_mmid = READ_ONCE(cpu_data[cpu].asid_cache); |
211 | if (!old_active_mmid || |
212 | !asid_versions_eq(cpu, a: ctx, b: atomic64_read(v: &mmid_version)) || |
213 | !cmpxchg_relaxed(&cpu_data[cpu].asid_cache, old_active_mmid, ctx)) { |
214 | raw_spin_lock_irqsave(&cpu_mmid_lock, flags); |
215 | |
216 | ctx = cpu_context(cpu, mm); |
217 | if (!asid_versions_eq(cpu, a: ctx, b: atomic64_read(v: &mmid_version))) |
218 | ctx = get_new_mmid(mm); |
219 | |
220 | WRITE_ONCE(cpu_data[cpu].asid_cache, ctx); |
221 | raw_spin_unlock_irqrestore(&cpu_mmid_lock, flags); |
222 | } |
223 | |
224 | /* |
225 | * Invalidate the local TLB if needed. Note that we must only clear our |
226 | * bit in tlb_flush_pending after this is complete, so that the |
227 | * cpu_has_shared_ftlb_entries case below isn't misled. |
228 | */ |
229 | if (cpumask_test_cpu(cpu, cpumask: &tlb_flush_pending)) { |
230 | if (cpu_has_vtag_icache) |
231 | flush_icache_all(); |
232 | local_flush_tlb_all(); |
233 | cpumask_clear_cpu(cpu, dstp: &tlb_flush_pending); |
234 | } |
235 | |
236 | write_c0_memorymapid(ctx & cpu_asid_mask(&boot_cpu_data)); |
237 | |
238 | /* |
239 | * If this CPU shares FTLB entries with its siblings and one or more of |
240 | * those siblings hasn't yet invalidated its TLB following a version |
241 | * increase then we need to invalidate any TLB entries for our MMID |
242 | * that we might otherwise pick up from a sibling. |
243 | * |
244 | * We ifdef on CONFIG_SMP because cpu_sibling_map isn't defined in |
245 | * CONFIG_SMP=n kernels. |
246 | */ |
247 | #ifdef CONFIG_SMP |
248 | if (cpu_has_shared_ftlb_entries && |
249 | cpumask_intersects(src1p: &tlb_flush_pending, src2p: &cpu_sibling_map[cpu])) { |
250 | /* Ensure we operate on the new MMID */ |
251 | mtc0_tlbw_hazard(); |
252 | |
253 | /* |
254 | * Invalidate all TLB entries associated with the new |
255 | * MMID, and wait for the invalidation to complete. |
256 | */ |
257 | ginvt_mmid(); |
258 | sync_ginv(); |
259 | } |
260 | #endif |
261 | |
262 | setup_pgd: |
263 | TLBMISS_HANDLER_SETUP_PGD(mm->pgd); |
264 | } |
265 | EXPORT_SYMBOL_GPL(check_switch_mmu_context); |
266 | |
267 | static int mmid_init(void) |
268 | { |
269 | if (!cpu_has_mmid) |
270 | return 0; |
271 | |
272 | /* |
273 | * Expect allocation after rollover to fail if we don't have at least |
274 | * one more MMID than CPUs. |
275 | */ |
276 | num_mmids = asid_first_version(0); |
277 | WARN_ON(num_mmids <= num_possible_cpus()); |
278 | |
279 | atomic64_set(v: &mmid_version, i: asid_first_version(0)); |
280 | mmid_map = bitmap_zalloc(nbits: num_mmids, GFP_KERNEL); |
281 | if (!mmid_map) |
282 | panic(fmt: "Failed to allocate bitmap for %u MMIDs\n" , num_mmids); |
283 | |
284 | /* Reserve an MMID for kmap/wired entries */ |
285 | __set_bit(MMID_KERNEL_WIRED, mmid_map); |
286 | |
287 | pr_info("MMID allocator initialised with %u entries\n" , num_mmids); |
288 | return 0; |
289 | } |
290 | early_initcall(mmid_init); |
291 | |