1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * linux/arch/arm/vfp/vfpmodule.c |
4 | * |
5 | * Copyright (C) 2004 ARM Limited. |
6 | * Written by Deep Blue Solutions Limited. |
7 | */ |
8 | #include <linux/types.h> |
9 | #include <linux/cpu.h> |
10 | #include <linux/cpu_pm.h> |
11 | #include <linux/hardirq.h> |
12 | #include <linux/kernel.h> |
13 | #include <linux/notifier.h> |
14 | #include <linux/signal.h> |
15 | #include <linux/sched/signal.h> |
16 | #include <linux/smp.h> |
17 | #include <linux/init.h> |
18 | #include <linux/uaccess.h> |
19 | #include <linux/user.h> |
20 | #include <linux/export.h> |
21 | #include <linux/perf_event.h> |
22 | |
23 | #include <asm/cp15.h> |
24 | #include <asm/cputype.h> |
25 | #include <asm/system_info.h> |
26 | #include <asm/thread_notify.h> |
27 | #include <asm/traps.h> |
28 | #include <asm/vfp.h> |
29 | #include <asm/neon.h> |
30 | |
31 | #include "vfpinstr.h" |
32 | #include "vfp.h" |
33 | |
34 | static bool have_vfp __ro_after_init; |
35 | |
36 | /* |
37 | * Dual-use variable. |
38 | * Used in startup: set to non-zero if VFP checks fail |
39 | * After startup, holds VFP architecture |
40 | */ |
41 | static unsigned int VFP_arch; |
42 | |
43 | #ifdef CONFIG_CPU_FEROCEON |
44 | extern unsigned int VFP_arch_feroceon __alias(VFP_arch); |
45 | #endif |
46 | |
47 | /* |
48 | * The pointer to the vfpstate structure of the thread which currently |
49 | * owns the context held in the VFP hardware, or NULL if the hardware |
50 | * context is invalid. |
51 | * |
52 | * For UP, this is sufficient to tell which thread owns the VFP context. |
53 | * However, for SMP, we also need to check the CPU number stored in the |
54 | * saved state too to catch migrations. |
55 | */ |
56 | union vfp_state *vfp_current_hw_state[NR_CPUS]; |
57 | |
58 | /* |
59 | * Is 'thread's most up to date state stored in this CPUs hardware? |
60 | * Must be called from non-preemptible context. |
61 | */ |
62 | static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread) |
63 | { |
64 | #ifdef CONFIG_SMP |
65 | if (thread->vfpstate.hard.cpu != cpu) |
66 | return false; |
67 | #endif |
68 | return vfp_current_hw_state[cpu] == &thread->vfpstate; |
69 | } |
70 | |
71 | /* |
72 | * Force a reload of the VFP context from the thread structure. We do |
73 | * this by ensuring that access to the VFP hardware is disabled, and |
74 | * clear vfp_current_hw_state. Must be called from non-preemptible context. |
75 | */ |
76 | static void vfp_force_reload(unsigned int cpu, struct thread_info *thread) |
77 | { |
78 | if (vfp_state_in_hw(cpu, thread)) { |
79 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); |
80 | vfp_current_hw_state[cpu] = NULL; |
81 | } |
82 | #ifdef CONFIG_SMP |
83 | thread->vfpstate.hard.cpu = NR_CPUS; |
84 | #endif |
85 | } |
86 | |
87 | /* |
88 | * Per-thread VFP initialization. |
89 | */ |
90 | static void vfp_thread_flush(struct thread_info *thread) |
91 | { |
92 | union vfp_state *vfp = &thread->vfpstate; |
93 | unsigned int cpu; |
94 | |
95 | /* |
96 | * Disable VFP to ensure we initialize it first. We must ensure |
97 | * that the modification of vfp_current_hw_state[] and hardware |
98 | * disable are done for the same CPU and without preemption. |
99 | * |
100 | * Do this first to ensure that preemption won't overwrite our |
101 | * state saving should access to the VFP be enabled at this point. |
102 | */ |
103 | cpu = get_cpu(); |
104 | if (vfp_current_hw_state[cpu] == vfp) |
105 | vfp_current_hw_state[cpu] = NULL; |
106 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); |
107 | put_cpu(); |
108 | |
109 | memset(vfp, 0, sizeof(union vfp_state)); |
110 | |
111 | vfp->hard.fpexc = FPEXC_EN; |
112 | vfp->hard.fpscr = FPSCR_ROUND_NEAREST; |
113 | #ifdef CONFIG_SMP |
114 | vfp->hard.cpu = NR_CPUS; |
115 | #endif |
116 | } |
117 | |
118 | static void vfp_thread_exit(struct thread_info *thread) |
119 | { |
120 | /* release case: Per-thread VFP cleanup. */ |
121 | union vfp_state *vfp = &thread->vfpstate; |
122 | unsigned int cpu = get_cpu(); |
123 | |
124 | if (vfp_current_hw_state[cpu] == vfp) |
125 | vfp_current_hw_state[cpu] = NULL; |
126 | put_cpu(); |
127 | } |
128 | |
129 | static void vfp_thread_copy(struct thread_info *thread) |
130 | { |
131 | struct thread_info *parent = current_thread_info(); |
132 | |
133 | vfp_sync_hwstate(parent); |
134 | thread->vfpstate = parent->vfpstate; |
135 | #ifdef CONFIG_SMP |
136 | thread->vfpstate.hard.cpu = NR_CPUS; |
137 | #endif |
138 | } |
139 | |
140 | /* |
141 | * When this function is called with the following 'cmd's, the following |
142 | * is true while this function is being run: |
143 | * THREAD_NOFTIFY_SWTICH: |
144 | * - the previously running thread will not be scheduled onto another CPU. |
145 | * - the next thread to be run (v) will not be running on another CPU. |
146 | * - thread->cpu is the local CPU number |
147 | * - not preemptible as we're called in the middle of a thread switch |
148 | * THREAD_NOTIFY_FLUSH: |
149 | * - the thread (v) will be running on the local CPU, so |
150 | * v === current_thread_info() |
151 | * - thread->cpu is the local CPU number at the time it is accessed, |
152 | * but may change at any time. |
153 | * - we could be preempted if tree preempt rcu is enabled, so |
154 | * it is unsafe to use thread->cpu. |
155 | * THREAD_NOTIFY_EXIT |
156 | * - we could be preempted if tree preempt rcu is enabled, so |
157 | * it is unsafe to use thread->cpu. |
158 | */ |
159 | static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) |
160 | { |
161 | struct thread_info *thread = v; |
162 | u32 fpexc; |
163 | #ifdef CONFIG_SMP |
164 | unsigned int cpu; |
165 | #endif |
166 | |
167 | switch (cmd) { |
168 | case THREAD_NOTIFY_SWITCH: |
169 | fpexc = fmrx(FPEXC); |
170 | |
171 | #ifdef CONFIG_SMP |
172 | cpu = thread->cpu; |
173 | |
174 | /* |
175 | * On SMP, if VFP is enabled, save the old state in |
176 | * case the thread migrates to a different CPU. The |
177 | * restoring is done lazily. |
178 | */ |
179 | if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) |
180 | vfp_save_state(location: vfp_current_hw_state[cpu], fpexc); |
181 | #endif |
182 | |
183 | /* |
184 | * Always disable VFP so we can lazily save/restore the |
185 | * old state. |
186 | */ |
187 | fmxr(FPEXC, fpexc & ~FPEXC_EN); |
188 | break; |
189 | |
190 | case THREAD_NOTIFY_FLUSH: |
191 | vfp_thread_flush(thread); |
192 | break; |
193 | |
194 | case THREAD_NOTIFY_EXIT: |
195 | vfp_thread_exit(thread); |
196 | break; |
197 | |
198 | case THREAD_NOTIFY_COPY: |
199 | vfp_thread_copy(thread); |
200 | break; |
201 | } |
202 | |
203 | return NOTIFY_DONE; |
204 | } |
205 | |
206 | static struct notifier_block vfp_notifier_block = { |
207 | .notifier_call = vfp_notifier, |
208 | }; |
209 | |
210 | /* |
211 | * Raise a SIGFPE for the current process. |
212 | * sicode describes the signal being raised. |
213 | */ |
214 | static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) |
215 | { |
216 | /* |
217 | * This is the same as NWFPE, because it's not clear what |
218 | * this is used for |
219 | */ |
220 | current->thread.error_code = 0; |
221 | current->thread.trap_no = 6; |
222 | |
223 | send_sig_fault(SIGFPE, code: sicode, |
224 | addr: (void __user *)(instruction_pointer(regs) - 4), |
225 | current); |
226 | } |
227 | |
228 | static void vfp_panic(char *reason, u32 inst) |
229 | { |
230 | int i; |
231 | |
232 | pr_err("VFP: Error: %s\n" , reason); |
233 | pr_err("VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n" , |
234 | fmrx(FPEXC), fmrx(FPSCR), inst); |
235 | for (i = 0; i < 32; i += 2) |
236 | pr_err("VFP: s%2u: 0x%08x s%2u: 0x%08x\n" , |
237 | i, vfp_get_float(i), i+1, vfp_get_float(i+1)); |
238 | } |
239 | |
240 | /* |
241 | * Process bitmask of exception conditions. |
242 | */ |
243 | static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs) |
244 | { |
245 | int si_code = 0; |
246 | |
247 | pr_debug("VFP: raising exceptions %08x\n" , exceptions); |
248 | |
249 | if (exceptions == VFP_EXCEPTION_ERROR) { |
250 | vfp_panic(reason: "unhandled bounce" , inst); |
251 | vfp_raise_sigfpe(FPE_FLTINV, regs); |
252 | return; |
253 | } |
254 | |
255 | /* |
256 | * If any of the status flags are set, update the FPSCR. |
257 | * Comparison instructions always return at least one of |
258 | * these flags set. |
259 | */ |
260 | if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V)) |
261 | fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V); |
262 | |
263 | fpscr |= exceptions; |
264 | |
265 | fmxr(FPSCR, fpscr); |
266 | |
267 | #define RAISE(stat,en,sig) \ |
268 | if (exceptions & stat && fpscr & en) \ |
269 | si_code = sig; |
270 | |
271 | /* |
272 | * These are arranged in priority order, least to highest. |
273 | */ |
274 | RAISE(FPSCR_DZC, FPSCR_DZE, FPE_FLTDIV); |
275 | RAISE(FPSCR_IXC, FPSCR_IXE, FPE_FLTRES); |
276 | RAISE(FPSCR_UFC, FPSCR_UFE, FPE_FLTUND); |
277 | RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF); |
278 | RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV); |
279 | |
280 | if (si_code) |
281 | vfp_raise_sigfpe(sicode: si_code, regs); |
282 | } |
283 | |
284 | /* |
285 | * Emulate a VFP instruction. |
286 | */ |
287 | static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs) |
288 | { |
289 | u32 exceptions = VFP_EXCEPTION_ERROR; |
290 | |
291 | pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n" , inst, fpscr); |
292 | |
293 | if (INST_CPRTDO(inst)) { |
294 | if (!INST_CPRT(inst)) { |
295 | /* |
296 | * CPDO |
297 | */ |
298 | if (vfp_single(inst)) { |
299 | exceptions = vfp_single_cpdo(inst, fpscr); |
300 | } else { |
301 | exceptions = vfp_double_cpdo(inst, fpscr); |
302 | } |
303 | } else { |
304 | /* |
305 | * A CPRT instruction can not appear in FPINST2, nor |
306 | * can it cause an exception. Therefore, we do not |
307 | * have to emulate it. |
308 | */ |
309 | } |
310 | } else { |
311 | /* |
312 | * A CPDT instruction can not appear in FPINST2, nor can |
313 | * it cause an exception. Therefore, we do not have to |
314 | * emulate it. |
315 | */ |
316 | } |
317 | perf_sw_event(event_id: PERF_COUNT_SW_EMULATION_FAULTS, nr: 1, regs, addr: regs->ARM_pc); |
318 | return exceptions & ~VFP_NAN_FLAG; |
319 | } |
320 | |
321 | /* |
322 | * Package up a bounce condition. |
323 | */ |
324 | static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) |
325 | { |
326 | u32 fpscr, orig_fpscr, fpsid, exceptions; |
327 | |
328 | pr_debug("VFP: bounce: trigger %08x fpexc %08x\n" , trigger, fpexc); |
329 | |
330 | /* |
331 | * At this point, FPEXC can have the following configuration: |
332 | * |
333 | * EX DEX IXE |
334 | * 0 1 x - synchronous exception |
335 | * 1 x 0 - asynchronous exception |
336 | * 1 x 1 - sychronous on VFP subarch 1 and asynchronous on later |
337 | * 0 0 1 - synchronous on VFP9 (non-standard subarch 1 |
338 | * implementation), undefined otherwise |
339 | * |
340 | * Clear various bits and enable access to the VFP so we can |
341 | * handle the bounce. |
342 | */ |
343 | fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK)); |
344 | |
345 | fpsid = fmrx(FPSID); |
346 | orig_fpscr = fpscr = fmrx(FPSCR); |
347 | |
348 | /* |
349 | * Check for the special VFP subarch 1 and FPSCR.IXE bit case |
350 | */ |
351 | if ((fpsid & FPSID_ARCH_MASK) == (1 << FPSID_ARCH_BIT) |
352 | && (fpscr & FPSCR_IXE)) { |
353 | /* |
354 | * Synchronous exception, emulate the trigger instruction |
355 | */ |
356 | goto emulate; |
357 | } |
358 | |
359 | if (fpexc & FPEXC_EX) { |
360 | /* |
361 | * Asynchronous exception. The instruction is read from FPINST |
362 | * and the interrupted instruction has to be restarted. |
363 | */ |
364 | trigger = fmrx(FPINST); |
365 | regs->ARM_pc -= 4; |
366 | } else if (!(fpexc & FPEXC_DEX)) { |
367 | /* |
368 | * Illegal combination of bits. It can be caused by an |
369 | * unallocated VFP instruction but with FPSCR.IXE set and not |
370 | * on VFP subarch 1. |
371 | */ |
372 | vfp_raise_exceptions(VFP_EXCEPTION_ERROR, inst: trigger, fpscr, regs); |
373 | return; |
374 | } |
375 | |
376 | /* |
377 | * Modify fpscr to indicate the number of iterations remaining. |
378 | * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates |
379 | * whether FPEXC.VECITR or FPSCR.LEN is used. |
380 | */ |
381 | if (fpexc & (FPEXC_EX | FPEXC_VV)) { |
382 | u32 len; |
383 | |
384 | len = fpexc + (1 << FPEXC_LENGTH_BIT); |
385 | |
386 | fpscr &= ~FPSCR_LENGTH_MASK; |
387 | fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT); |
388 | } |
389 | |
390 | /* |
391 | * Handle the first FP instruction. We used to take note of the |
392 | * FPEXC bounce reason, but this appears to be unreliable. |
393 | * Emulate the bounced instruction instead. |
394 | */ |
395 | exceptions = vfp_emulate_instruction(inst: trigger, fpscr, regs); |
396 | if (exceptions) |
397 | vfp_raise_exceptions(exceptions, inst: trigger, fpscr: orig_fpscr, regs); |
398 | |
399 | /* |
400 | * If there isn't a second FP instruction, exit now. Note that |
401 | * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1. |
402 | */ |
403 | if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V)) |
404 | return; |
405 | |
406 | /* |
407 | * The barrier() here prevents fpinst2 being read |
408 | * before the condition above. |
409 | */ |
410 | barrier(); |
411 | trigger = fmrx(FPINST2); |
412 | |
413 | emulate: |
414 | exceptions = vfp_emulate_instruction(inst: trigger, fpscr: orig_fpscr, regs); |
415 | if (exceptions) |
416 | vfp_raise_exceptions(exceptions, inst: trigger, fpscr: orig_fpscr, regs); |
417 | } |
418 | |
419 | static void vfp_enable(void *unused) |
420 | { |
421 | u32 access; |
422 | |
423 | BUG_ON(preemptible()); |
424 | access = get_copro_access(); |
425 | |
426 | /* |
427 | * Enable full access to VFP (cp10 and cp11) |
428 | */ |
429 | set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11)); |
430 | } |
431 | |
432 | /* Called by platforms on which we want to disable VFP because it may not be |
433 | * present on all CPUs within a SMP complex. Needs to be called prior to |
434 | * vfp_init(). |
435 | */ |
436 | void __init vfp_disable(void) |
437 | { |
438 | if (VFP_arch) { |
439 | pr_debug("%s: should be called prior to vfp_init\n" , __func__); |
440 | return; |
441 | } |
442 | VFP_arch = 1; |
443 | } |
444 | |
445 | #ifdef CONFIG_CPU_PM |
446 | static int vfp_pm_suspend(void) |
447 | { |
448 | struct thread_info *ti = current_thread_info(); |
449 | u32 fpexc = fmrx(FPEXC); |
450 | |
451 | /* if vfp is on, then save state for resumption */ |
452 | if (fpexc & FPEXC_EN) { |
453 | pr_debug("%s: saving vfp state\n" , __func__); |
454 | vfp_save_state(&ti->vfpstate, fpexc); |
455 | |
456 | /* disable, just in case */ |
457 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); |
458 | } else if (vfp_current_hw_state[ti->cpu]) { |
459 | #ifndef CONFIG_SMP |
460 | fmxr(FPEXC, fpexc | FPEXC_EN); |
461 | vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc); |
462 | fmxr(FPEXC, fpexc); |
463 | #endif |
464 | } |
465 | |
466 | /* clear any information we had about last context state */ |
467 | vfp_current_hw_state[ti->cpu] = NULL; |
468 | |
469 | return 0; |
470 | } |
471 | |
472 | static void vfp_pm_resume(void) |
473 | { |
474 | /* ensure we have access to the vfp */ |
475 | vfp_enable(NULL); |
476 | |
477 | /* and disable it to ensure the next usage restores the state */ |
478 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); |
479 | } |
480 | |
481 | static int vfp_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd, |
482 | void *v) |
483 | { |
484 | switch (cmd) { |
485 | case CPU_PM_ENTER: |
486 | vfp_pm_suspend(); |
487 | break; |
488 | case CPU_PM_ENTER_FAILED: |
489 | case CPU_PM_EXIT: |
490 | vfp_pm_resume(); |
491 | break; |
492 | } |
493 | return NOTIFY_OK; |
494 | } |
495 | |
496 | static struct notifier_block vfp_cpu_pm_notifier_block = { |
497 | .notifier_call = vfp_cpu_pm_notifier, |
498 | }; |
499 | |
500 | static void vfp_pm_init(void) |
501 | { |
502 | cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block); |
503 | } |
504 | |
505 | #else |
506 | static inline void vfp_pm_init(void) { } |
507 | #endif /* CONFIG_CPU_PM */ |
508 | |
509 | /* |
510 | * Ensure that the VFP state stored in 'thread->vfpstate' is up to date |
511 | * with the hardware state. |
512 | */ |
513 | void vfp_sync_hwstate(struct thread_info *thread) |
514 | { |
515 | unsigned int cpu = get_cpu(); |
516 | |
517 | local_bh_disable(); |
518 | |
519 | if (vfp_state_in_hw(cpu, thread)) { |
520 | u32 fpexc = fmrx(FPEXC); |
521 | |
522 | /* |
523 | * Save the last VFP state on this CPU. |
524 | */ |
525 | fmxr(FPEXC, fpexc | FPEXC_EN); |
526 | vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN); |
527 | fmxr(FPEXC, fpexc); |
528 | } |
529 | |
530 | local_bh_enable(); |
531 | put_cpu(); |
532 | } |
533 | |
534 | /* Ensure that the thread reloads the hardware VFP state on the next use. */ |
535 | void vfp_flush_hwstate(struct thread_info *thread) |
536 | { |
537 | unsigned int cpu = get_cpu(); |
538 | |
539 | vfp_force_reload(cpu, thread); |
540 | |
541 | put_cpu(); |
542 | } |
543 | |
544 | /* |
545 | * Save the current VFP state into the provided structures and prepare |
546 | * for entry into a new function (signal handler). |
547 | */ |
548 | int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp, |
549 | struct user_vfp_exc *ufp_exc) |
550 | { |
551 | struct thread_info *thread = current_thread_info(); |
552 | struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; |
553 | |
554 | /* Ensure that the saved hwstate is up-to-date. */ |
555 | vfp_sync_hwstate(thread); |
556 | |
557 | /* |
558 | * Copy the floating point registers. There can be unused |
559 | * registers see asm/hwcap.h for details. |
560 | */ |
561 | memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs)); |
562 | |
563 | /* |
564 | * Copy the status and control register. |
565 | */ |
566 | ufp->fpscr = hwstate->fpscr; |
567 | |
568 | /* |
569 | * Copy the exception registers. |
570 | */ |
571 | ufp_exc->fpexc = hwstate->fpexc; |
572 | ufp_exc->fpinst = hwstate->fpinst; |
573 | ufp_exc->fpinst2 = hwstate->fpinst2; |
574 | |
575 | /* Ensure that VFP is disabled. */ |
576 | vfp_flush_hwstate(thread); |
577 | |
578 | /* |
579 | * As per the PCS, clear the length and stride bits for function |
580 | * entry. |
581 | */ |
582 | hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK); |
583 | return 0; |
584 | } |
585 | |
586 | /* Sanitise and restore the current VFP state from the provided structures. */ |
587 | int vfp_restore_user_hwstate(struct user_vfp *ufp, struct user_vfp_exc *ufp_exc) |
588 | { |
589 | struct thread_info *thread = current_thread_info(); |
590 | struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; |
591 | unsigned long fpexc; |
592 | |
593 | /* Disable VFP to avoid corrupting the new thread state. */ |
594 | vfp_flush_hwstate(thread); |
595 | |
596 | /* |
597 | * Copy the floating point registers. There can be unused |
598 | * registers see asm/hwcap.h for details. |
599 | */ |
600 | memcpy(&hwstate->fpregs, &ufp->fpregs, sizeof(hwstate->fpregs)); |
601 | /* |
602 | * Copy the status and control register. |
603 | */ |
604 | hwstate->fpscr = ufp->fpscr; |
605 | |
606 | /* |
607 | * Sanitise and restore the exception registers. |
608 | */ |
609 | fpexc = ufp_exc->fpexc; |
610 | |
611 | /* Ensure the VFP is enabled. */ |
612 | fpexc |= FPEXC_EN; |
613 | |
614 | /* Ensure FPINST2 is invalid and the exception flag is cleared. */ |
615 | fpexc &= ~(FPEXC_EX | FPEXC_FP2V); |
616 | hwstate->fpexc = fpexc; |
617 | |
618 | hwstate->fpinst = ufp_exc->fpinst; |
619 | hwstate->fpinst2 = ufp_exc->fpinst2; |
620 | |
621 | return 0; |
622 | } |
623 | |
624 | /* |
625 | * VFP hardware can lose all context when a CPU goes offline. |
626 | * As we will be running in SMP mode with CPU hotplug, we will save the |
627 | * hardware state at every thread switch. We clear our held state when |
628 | * a CPU has been killed, indicating that the VFP hardware doesn't contain |
629 | * a threads VFP state. When a CPU starts up, we re-enable access to the |
630 | * VFP hardware. The callbacks below are called on the CPU which |
631 | * is being offlined/onlined. |
632 | */ |
633 | static int vfp_dying_cpu(unsigned int cpu) |
634 | { |
635 | vfp_current_hw_state[cpu] = NULL; |
636 | return 0; |
637 | } |
638 | |
639 | static int vfp_starting_cpu(unsigned int unused) |
640 | { |
641 | vfp_enable(NULL); |
642 | return 0; |
643 | } |
644 | |
645 | static int vfp_kmode_exception(struct pt_regs *regs, unsigned int instr) |
646 | { |
647 | /* |
648 | * If we reach this point, a floating point exception has been raised |
649 | * while running in kernel mode. If the NEON/VFP unit was enabled at the |
650 | * time, it means a VFP instruction has been issued that requires |
651 | * software assistance to complete, something which is not currently |
652 | * supported in kernel mode. |
653 | * If the NEON/VFP unit was disabled, and the location pointed to below |
654 | * is properly preceded by a call to kernel_neon_begin(), something has |
655 | * caused the task to be scheduled out and back in again. In this case, |
656 | * rebuilding and running with CONFIG_DEBUG_ATOMIC_SLEEP enabled should |
657 | * be helpful in localizing the problem. |
658 | */ |
659 | if (fmrx(FPEXC) & FPEXC_EN) |
660 | pr_crit("BUG: unsupported FP instruction in kernel mode\n" ); |
661 | else |
662 | pr_crit("BUG: FP instruction issued in kernel mode with FP unit disabled\n" ); |
663 | pr_crit("FPEXC == 0x%08x\n" , fmrx(FPEXC)); |
664 | return 1; |
665 | } |
666 | |
667 | /* |
668 | * vfp_support_entry - Handle VFP exception |
669 | * |
670 | * @regs: pt_regs structure holding the register state at exception entry |
671 | * @trigger: The opcode of the instruction that triggered the exception |
672 | * |
673 | * Returns 0 if the exception was handled, or an error code otherwise. |
674 | */ |
675 | static int vfp_support_entry(struct pt_regs *regs, u32 trigger) |
676 | { |
677 | struct thread_info *ti = current_thread_info(); |
678 | u32 fpexc; |
679 | |
680 | if (unlikely(!have_vfp)) |
681 | return -ENODEV; |
682 | |
683 | if (!user_mode(regs)) |
684 | return vfp_kmode_exception(regs, instr: trigger); |
685 | |
686 | local_bh_disable(); |
687 | fpexc = fmrx(FPEXC); |
688 | |
689 | /* |
690 | * If the VFP unit was not enabled yet, we have to check whether the |
691 | * VFP state in the CPU's registers is the most recent VFP state |
692 | * associated with the process. On UP systems, we don't save the VFP |
693 | * state eagerly on a context switch, so we may need to save the |
694 | * VFP state to memory first, as it may belong to another process. |
695 | */ |
696 | if (!(fpexc & FPEXC_EN)) { |
697 | /* |
698 | * Enable the VFP unit but mask the FP exception flag for the |
699 | * time being, so we can access all the registers. |
700 | */ |
701 | fpexc |= FPEXC_EN; |
702 | fmxr(FPEXC, fpexc & ~FPEXC_EX); |
703 | |
704 | /* |
705 | * Check whether or not the VFP state in the CPU's registers is |
706 | * the most recent VFP state associated with this task. On SMP, |
707 | * migration may result in multiple CPUs holding VFP states |
708 | * that belong to the same task, but only the most recent one |
709 | * is valid. |
710 | */ |
711 | if (!vfp_state_in_hw(cpu: ti->cpu, thread: ti)) { |
712 | if (!IS_ENABLED(CONFIG_SMP) && |
713 | vfp_current_hw_state[ti->cpu] != NULL) { |
714 | /* |
715 | * This CPU is currently holding the most |
716 | * recent VFP state associated with another |
717 | * task, and we must save that to memory first. |
718 | */ |
719 | vfp_save_state(location: vfp_current_hw_state[ti->cpu], |
720 | fpexc); |
721 | } |
722 | |
723 | /* |
724 | * We can now proceed with loading the task's VFP state |
725 | * from memory into the CPU registers. |
726 | */ |
727 | fpexc = vfp_load_state(location: &ti->vfpstate); |
728 | vfp_current_hw_state[ti->cpu] = &ti->vfpstate; |
729 | #ifdef CONFIG_SMP |
730 | /* |
731 | * Record that this CPU is now the one holding the most |
732 | * recent VFP state of the task. |
733 | */ |
734 | ti->vfpstate.hard.cpu = ti->cpu; |
735 | #endif |
736 | } |
737 | |
738 | if (fpexc & FPEXC_EX) |
739 | /* |
740 | * Might as well handle the pending exception before |
741 | * retrying branch out before setting an FPEXC that |
742 | * stops us reading stuff. |
743 | */ |
744 | goto bounce; |
745 | |
746 | /* |
747 | * No FP exception is pending: just enable the VFP and |
748 | * replay the instruction that trapped. |
749 | */ |
750 | fmxr(FPEXC, fpexc); |
751 | } else { |
752 | /* Check for synchronous or asynchronous exceptions */ |
753 | if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) { |
754 | u32 fpscr = fmrx(FPSCR); |
755 | |
756 | /* |
757 | * On some implementations of the VFP subarch 1, |
758 | * setting FPSCR.IXE causes all the CDP instructions to |
759 | * be bounced synchronously without setting the |
760 | * FPEXC.EX bit |
761 | */ |
762 | if (!(fpscr & FPSCR_IXE)) { |
763 | if (!(fpscr & FPSCR_LENGTH_MASK)) { |
764 | pr_debug("not VFP\n" ); |
765 | local_bh_enable(); |
766 | return -ENOEXEC; |
767 | } |
768 | fpexc |= FPEXC_DEX; |
769 | } |
770 | } |
771 | bounce: regs->ARM_pc += 4; |
772 | VFP_bounce(trigger, fpexc, regs); |
773 | } |
774 | |
775 | local_bh_enable(); |
776 | return 0; |
777 | } |
778 | |
779 | static struct undef_hook neon_support_hook[] = {{ |
780 | .instr_mask = 0xfe000000, |
781 | .instr_val = 0xf2000000, |
782 | .cpsr_mask = PSR_T_BIT, |
783 | .cpsr_val = 0, |
784 | .fn = vfp_support_entry, |
785 | }, { |
786 | .instr_mask = 0xff100000, |
787 | .instr_val = 0xf4000000, |
788 | .cpsr_mask = PSR_T_BIT, |
789 | .cpsr_val = 0, |
790 | .fn = vfp_support_entry, |
791 | }, { |
792 | .instr_mask = 0xef000000, |
793 | .instr_val = 0xef000000, |
794 | .cpsr_mask = PSR_T_BIT, |
795 | .cpsr_val = PSR_T_BIT, |
796 | .fn = vfp_support_entry, |
797 | }, { |
798 | .instr_mask = 0xff100000, |
799 | .instr_val = 0xf9000000, |
800 | .cpsr_mask = PSR_T_BIT, |
801 | .cpsr_val = PSR_T_BIT, |
802 | .fn = vfp_support_entry, |
803 | }, { |
804 | .instr_mask = 0xff000800, |
805 | .instr_val = 0xfc000800, |
806 | .cpsr_mask = 0, |
807 | .cpsr_val = 0, |
808 | .fn = vfp_support_entry, |
809 | }, { |
810 | .instr_mask = 0xff000800, |
811 | .instr_val = 0xfd000800, |
812 | .cpsr_mask = 0, |
813 | .cpsr_val = 0, |
814 | .fn = vfp_support_entry, |
815 | }, { |
816 | .instr_mask = 0xff000800, |
817 | .instr_val = 0xfe000800, |
818 | .cpsr_mask = 0, |
819 | .cpsr_val = 0, |
820 | .fn = vfp_support_entry, |
821 | }}; |
822 | |
823 | static struct undef_hook vfp_support_hook = { |
824 | .instr_mask = 0x0c000e00, |
825 | .instr_val = 0x0c000a00, |
826 | .fn = vfp_support_entry, |
827 | }; |
828 | |
829 | #ifdef CONFIG_KERNEL_MODE_NEON |
830 | |
831 | /* |
832 | * Kernel-side NEON support functions |
833 | */ |
834 | void kernel_neon_begin(void) |
835 | { |
836 | struct thread_info *thread = current_thread_info(); |
837 | unsigned int cpu; |
838 | u32 fpexc; |
839 | |
840 | local_bh_disable(); |
841 | |
842 | /* |
843 | * Kernel mode NEON is only allowed outside of hardirq context with |
844 | * preemption and softirq processing disabled. This will make sure that |
845 | * the kernel mode NEON register contents never need to be preserved. |
846 | */ |
847 | BUG_ON(in_hardirq()); |
848 | cpu = __smp_processor_id(); |
849 | |
850 | fpexc = fmrx(FPEXC) | FPEXC_EN; |
851 | fmxr(FPEXC, fpexc); |
852 | |
853 | /* |
854 | * Save the userland NEON/VFP state. Under UP, |
855 | * the owner could be a task other than 'current' |
856 | */ |
857 | if (vfp_state_in_hw(cpu, thread)) |
858 | vfp_save_state(&thread->vfpstate, fpexc); |
859 | #ifndef CONFIG_SMP |
860 | else if (vfp_current_hw_state[cpu] != NULL) |
861 | vfp_save_state(vfp_current_hw_state[cpu], fpexc); |
862 | #endif |
863 | vfp_current_hw_state[cpu] = NULL; |
864 | } |
865 | EXPORT_SYMBOL(kernel_neon_begin); |
866 | |
867 | void kernel_neon_end(void) |
868 | { |
869 | /* Disable the NEON/VFP unit. */ |
870 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); |
871 | local_bh_enable(); |
872 | } |
873 | EXPORT_SYMBOL(kernel_neon_end); |
874 | |
875 | #endif /* CONFIG_KERNEL_MODE_NEON */ |
876 | |
877 | static int __init vfp_detect(struct pt_regs *regs, unsigned int instr) |
878 | { |
879 | VFP_arch = UINT_MAX; /* mark as not present */ |
880 | regs->ARM_pc += 4; |
881 | return 0; |
882 | } |
883 | |
884 | static struct undef_hook vfp_detect_hook __initdata = { |
885 | .instr_mask = 0x0c000e00, |
886 | .instr_val = 0x0c000a00, |
887 | .cpsr_mask = MODE_MASK, |
888 | .cpsr_val = SVC_MODE, |
889 | .fn = vfp_detect, |
890 | }; |
891 | |
892 | /* |
893 | * VFP support code initialisation. |
894 | */ |
895 | static int __init vfp_init(void) |
896 | { |
897 | unsigned int vfpsid; |
898 | unsigned int cpu_arch = cpu_architecture(); |
899 | unsigned int isar6; |
900 | |
901 | /* |
902 | * Enable the access to the VFP on all online CPUs so the |
903 | * following test on FPSID will succeed. |
904 | */ |
905 | if (cpu_arch >= CPU_ARCH_ARMv6) |
906 | on_each_cpu(func: vfp_enable, NULL, wait: 1); |
907 | |
908 | /* |
909 | * First check that there is a VFP that we can use. |
910 | * The handler is already setup to just log calls, so |
911 | * we just need to read the VFPSID register. |
912 | */ |
913 | register_undef_hook(&vfp_detect_hook); |
914 | barrier(); |
915 | vfpsid = fmrx(FPSID); |
916 | barrier(); |
917 | unregister_undef_hook(&vfp_detect_hook); |
918 | |
919 | pr_info("VFP support v0.3: " ); |
920 | if (VFP_arch) { |
921 | pr_cont("not present\n" ); |
922 | return 0; |
923 | /* Extract the architecture on CPUID scheme */ |
924 | } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { |
925 | VFP_arch = vfpsid & FPSID_CPUID_ARCH_MASK; |
926 | VFP_arch >>= FPSID_ARCH_BIT; |
927 | /* |
928 | * Check for the presence of the Advanced SIMD |
929 | * load/store instructions, integer and single |
930 | * precision floating point operations. Only check |
931 | * for NEON if the hardware has the MVFR registers. |
932 | */ |
933 | if (IS_ENABLED(CONFIG_NEON) && |
934 | (fmrx(MVFR1) & 0x000fff00) == 0x00011100) { |
935 | elf_hwcap |= HWCAP_NEON; |
936 | for (int i = 0; i < ARRAY_SIZE(neon_support_hook); i++) |
937 | register_undef_hook(&neon_support_hook[i]); |
938 | } |
939 | |
940 | if (IS_ENABLED(CONFIG_VFPv3)) { |
941 | u32 mvfr0 = fmrx(MVFR0); |
942 | if (((mvfr0 & MVFR0_DP_MASK) >> MVFR0_DP_BIT) == 0x2 || |
943 | ((mvfr0 & MVFR0_SP_MASK) >> MVFR0_SP_BIT) == 0x2) { |
944 | elf_hwcap |= HWCAP_VFPv3; |
945 | /* |
946 | * Check for VFPv3 D16 and VFPv4 D16. CPUs in |
947 | * this configuration only have 16 x 64bit |
948 | * registers. |
949 | */ |
950 | if ((mvfr0 & MVFR0_A_SIMD_MASK) == 1) |
951 | /* also v4-D16 */ |
952 | elf_hwcap |= HWCAP_VFPv3D16; |
953 | else |
954 | elf_hwcap |= HWCAP_VFPD32; |
955 | } |
956 | |
957 | if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000) |
958 | elf_hwcap |= HWCAP_VFPv4; |
959 | if (((fmrx(MVFR1) & MVFR1_ASIMDHP_MASK) >> MVFR1_ASIMDHP_BIT) == 0x2) |
960 | elf_hwcap |= HWCAP_ASIMDHP; |
961 | if (((fmrx(MVFR1) & MVFR1_FPHP_MASK) >> MVFR1_FPHP_BIT) == 0x3) |
962 | elf_hwcap |= HWCAP_FPHP; |
963 | } |
964 | |
965 | /* |
966 | * Check for the presence of Advanced SIMD Dot Product |
967 | * instructions. |
968 | */ |
969 | isar6 = read_cpuid_ext(CPUID_EXT_ISAR6); |
970 | if (cpuid_feature_extract_field(isar6, 4) == 0x1) |
971 | elf_hwcap |= HWCAP_ASIMDDP; |
972 | /* |
973 | * Check for the presence of Advanced SIMD Floating point |
974 | * half-precision multiplication instructions. |
975 | */ |
976 | if (cpuid_feature_extract_field(isar6, 8) == 0x1) |
977 | elf_hwcap |= HWCAP_ASIMDFHM; |
978 | /* |
979 | * Check for the presence of Advanced SIMD Bfloat16 |
980 | * floating point instructions. |
981 | */ |
982 | if (cpuid_feature_extract_field(isar6, 20) == 0x1) |
983 | elf_hwcap |= HWCAP_ASIMDBF16; |
984 | /* |
985 | * Check for the presence of Advanced SIMD and floating point |
986 | * Int8 matrix multiplication instructions instructions. |
987 | */ |
988 | if (cpuid_feature_extract_field(isar6, 24) == 0x1) |
989 | elf_hwcap |= HWCAP_I8MM; |
990 | |
991 | /* Extract the architecture version on pre-cpuid scheme */ |
992 | } else { |
993 | if (vfpsid & FPSID_NODOUBLE) { |
994 | pr_cont("no double precision support\n" ); |
995 | return 0; |
996 | } |
997 | |
998 | VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; |
999 | } |
1000 | |
1001 | cpuhp_setup_state_nocalls(state: CPUHP_AP_ARM_VFP_STARTING, |
1002 | name: "arm/vfp:starting" , startup: vfp_starting_cpu, |
1003 | teardown: vfp_dying_cpu); |
1004 | |
1005 | have_vfp = true; |
1006 | |
1007 | register_undef_hook(&vfp_support_hook); |
1008 | thread_register_notifier(&vfp_notifier_block); |
1009 | vfp_pm_init(); |
1010 | |
1011 | /* |
1012 | * We detected VFP, and the support code is |
1013 | * in place; report VFP support to userspace. |
1014 | */ |
1015 | elf_hwcap |= HWCAP_VFP; |
1016 | |
1017 | pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n" , |
1018 | (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT, |
1019 | VFP_arch, |
1020 | (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT, |
1021 | (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT, |
1022 | (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT); |
1023 | |
1024 | return 0; |
1025 | } |
1026 | |
1027 | core_initcall(vfp_init); |
1028 | |