1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * linux/arch/arm/kernel/entry-common.S |
4 | * |
5 | * Copyright (C) 2000 Russell King |
6 | */ |
7 | |
8 | #include <asm/assembler.h> |
9 | #include <asm/unistd.h> |
10 | #include <asm/ftrace.h> |
11 | #include <asm/unwind.h> |
12 | #include <asm/page.h> |
13 | #ifdef CONFIG_AEABI |
14 | #include <asm/unistd-oabi.h> |
15 | #endif |
16 | |
17 | .equ NR_syscalls, __NR_syscalls |
18 | |
19 | #include "entry-header.S" |
20 | |
21 | saved_psr .req r8 |
22 | #if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING_USER) |
23 | saved_pc .req r9 |
24 | #define TRACE(x...) x |
25 | #else |
26 | saved_pc .req lr |
27 | #define TRACE(x...) |
28 | #endif |
29 | |
30 | .section .entry.text,"ax" ,%progbits |
31 | .align 5 |
32 | #if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING_USER) || \ |
33 | IS_ENABLED(CONFIG_DEBUG_RSEQ)) |
34 | /* |
35 | * This is the fast syscall return path. We do as little as possible here, |
36 | * such as avoiding writing r0 to the stack. We only use this path if we |
37 | * have tracing, context tracking and rseq debug disabled - the overheads |
38 | * from those features make this path too inefficient. |
39 | */ |
40 | ret_fast_syscall: |
41 | __ret_fast_syscall: |
42 | UNWIND(.fnstart ) |
43 | UNWIND(.cantunwind ) |
44 | disable_irq_notrace @ disable interrupts |
45 | ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing |
46 | movs r1, r1, lsl #16 |
47 | bne fast_work_pending |
48 | |
49 | restore_user_regs fast = 1, offset = S_OFF |
50 | UNWIND(.fnend ) |
51 | ENDPROC(ret_fast_syscall) |
52 | |
53 | /* Ok, we need to do extra processing, enter the slow path. */ |
54 | fast_work_pending: |
55 | str r0, [sp, #S_R0+S_OFF]! @ returned r0 |
56 | /* fall through to work_pending */ |
57 | #else |
58 | /* |
59 | * The "replacement" ret_fast_syscall for when tracing, context tracking, |
60 | * or rseq debug is enabled. As we will need to call out to some C functions, |
61 | * we save r0 first to avoid needing to save registers around each C function |
62 | * call. |
63 | */ |
64 | ret_fast_syscall: |
65 | __ret_fast_syscall: |
66 | UNWIND(.fnstart ) |
67 | UNWIND(.cantunwind ) |
68 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 |
69 | #if IS_ENABLED(CONFIG_DEBUG_RSEQ) |
70 | /* do_rseq_syscall needs interrupts enabled. */ |
71 | mov r0, sp @ 'regs' |
72 | bl do_rseq_syscall |
73 | #endif |
74 | disable_irq_notrace @ disable interrupts |
75 | ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing |
76 | movs r1, r1, lsl #16 |
77 | beq no_work_pending |
78 | UNWIND(.fnend ) |
79 | ENDPROC(ret_fast_syscall) |
80 | |
81 | /* Slower path - fall through to work_pending */ |
82 | #endif |
83 | |
84 | tst r1, #_TIF_SYSCALL_WORK |
85 | bne __sys_trace_return_nosave |
86 | slow_work_pending: |
87 | mov r0, sp @ 'regs' |
88 | mov r2, why @ 'syscall' |
89 | bl do_work_pending |
90 | cmp r0, #0 |
91 | beq no_work_pending |
92 | movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) |
93 | str scno, [tsk, #TI_ABI_SYSCALL] @ make sure tracers see update |
94 | ldmia sp, {r0 - r6} @ have to reload r0 - r6 |
95 | b local_restart @ ... and off we go |
96 | ENDPROC(ret_fast_syscall) |
97 | |
98 | /* |
99 | * "slow" syscall return path. "why" tells us if this was a real syscall. |
100 | * IRQs may be enabled here, so always disable them. Note that we use the |
101 | * "notrace" version to avoid calling into the tracing code unnecessarily. |
102 | * do_work_pending() will update this state if necessary. |
103 | */ |
104 | ENTRY(ret_to_user) |
105 | ret_slow_syscall: |
106 | #if IS_ENABLED(CONFIG_DEBUG_RSEQ) |
107 | /* do_rseq_syscall needs interrupts enabled. */ |
108 | enable_irq_notrace @ enable interrupts |
109 | mov r0, sp @ 'regs' |
110 | bl do_rseq_syscall |
111 | #endif |
112 | disable_irq_notrace @ disable interrupts |
113 | ENTRY(ret_to_user_from_irq) |
114 | ldr r1, [tsk, #TI_FLAGS] |
115 | movs r1, r1, lsl #16 |
116 | bne slow_work_pending |
117 | no_work_pending: |
118 | asm_trace_hardirqs_on save = 0 |
119 | |
120 | ct_user_enter save = 0 |
121 | |
122 | restore_user_regs fast = 0, offset = 0 |
123 | ENDPROC(ret_to_user_from_irq) |
124 | ENDPROC(ret_to_user) |
125 | |
126 | /* |
127 | * This is how we return from a fork. |
128 | */ |
129 | ENTRY(ret_from_fork) |
130 | bl schedule_tail |
131 | cmp r5, #0 |
132 | movne r0, r4 |
133 | badrne lr, 1f |
134 | retne r5 |
135 | 1: get_thread_info tsk |
136 | b ret_slow_syscall |
137 | ENDPROC(ret_from_fork) |
138 | |
139 | /*============================================================================= |
140 | * SWI handler |
141 | *----------------------------------------------------------------------------- |
142 | */ |
143 | |
144 | .align 5 |
145 | #ifdef CONFIG_HARDEN_BRANCH_HISTORY |
146 | ENTRY(vector_bhb_loop8_swi) |
147 | sub sp, sp, #PT_REGS_SIZE |
148 | stmia sp, {r0 - r12} |
149 | mov r8, #8 |
150 | 1: b 2f |
151 | 2: subs r8, r8, #1 |
152 | bne 1b |
153 | dsb nsh |
154 | isb |
155 | b 3f |
156 | ENDPROC(vector_bhb_loop8_swi) |
157 | |
158 | .align 5 |
159 | ENTRY(vector_bhb_bpiall_swi) |
160 | sub sp, sp, #PT_REGS_SIZE |
161 | stmia sp, {r0 - r12} |
162 | mcr p15, 0, r8, c7, c5, 6 @ BPIALL |
163 | isb |
164 | b 3f |
165 | ENDPROC(vector_bhb_bpiall_swi) |
166 | #endif |
167 | .align 5 |
168 | ENTRY(vector_swi) |
169 | #ifdef CONFIG_CPU_V7M |
170 | v7m_exception_entry |
171 | #else |
172 | sub sp, sp, #PT_REGS_SIZE |
173 | stmia sp, {r0 - r12} @ Calling r0 - r12 |
174 | 3: |
175 | ARM( add r8, sp, #S_PC ) |
176 | ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr |
177 | THUMB( mov r8, sp ) |
178 | THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr |
179 | mrs saved_psr, spsr @ called from non-FIQ mode, so ok. |
180 | TRACE( mov saved_pc, lr ) |
181 | str saved_pc, [sp, #S_PC] @ Save calling PC |
182 | str saved_psr, [sp, #S_PSR] @ Save CPSR |
183 | str r0, [sp, #S_OLD_R0] @ Save OLD_R0 |
184 | #endif |
185 | reload_current r10, ip |
186 | zero_fp |
187 | alignment_trap r10, ip, cr_alignment |
188 | asm_trace_hardirqs_on save=0 |
189 | enable_irq_notrace |
190 | ct_user_exit save=0 |
191 | |
192 | /* |
193 | * Get the system call number. |
194 | */ |
195 | |
196 | #if defined(CONFIG_OABI_COMPAT) |
197 | |
198 | /* |
199 | * If we have CONFIG_OABI_COMPAT then we need to look at the swi |
200 | * value to determine if it is an EABI or an old ABI call. |
201 | */ |
202 | #ifdef CONFIG_ARM_THUMB |
203 | tst saved_psr, #PSR_T_BIT |
204 | movne r10, #0 @ no thumb OABI emulation |
205 | USER( ldreq r10, [saved_pc, #-4] ) @ get SWI instruction |
206 | #else |
207 | USER( ldr r10, [saved_pc, #-4] ) @ get SWI instruction |
208 | #endif |
209 | ARM_BE8(rev r10, r10) @ little endian instruction |
210 | |
211 | #elif defined(CONFIG_AEABI) |
212 | |
213 | /* |
214 | * Pure EABI user space always put syscall number into scno (r7). |
215 | */ |
216 | #elif defined(CONFIG_ARM_THUMB) |
217 | /* Legacy ABI only, possibly thumb mode. */ |
218 | tst saved_psr, #PSR_T_BIT @ this is SPSR from save_user_regs |
219 | addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in |
220 | USER( ldreq scno, [saved_pc, #-4] ) |
221 | |
222 | #else |
223 | /* Legacy ABI only. */ |
224 | USER( ldr scno, [saved_pc, #-4] ) @ get SWI instruction |
225 | #endif |
226 | |
227 | /* saved_psr and saved_pc are now dead */ |
228 | |
229 | uaccess_disable tbl |
230 | get_thread_info tsk |
231 | |
232 | adr tbl, sys_call_table @ load syscall table pointer |
233 | |
234 | #if defined(CONFIG_OABI_COMPAT) |
235 | /* |
236 | * If the swi argument is zero, this is an EABI call and we do nothing. |
237 | * |
238 | * If this is an old ABI call, get the syscall number into scno and |
239 | * get the old ABI syscall table address. |
240 | */ |
241 | bics r10, r10, #0xff000000 |
242 | strne r10, [tsk, #TI_ABI_SYSCALL] |
243 | streq scno, [tsk, #TI_ABI_SYSCALL] |
244 | eorne scno, r10, #__NR_OABI_SYSCALL_BASE |
245 | ldrne tbl, =sys_oabi_call_table |
246 | #elif !defined(CONFIG_AEABI) |
247 | bic scno, scno, #0xff000000 @ mask off SWI op-code |
248 | str scno, [tsk, #TI_ABI_SYSCALL] |
249 | eor scno, scno, #__NR_SYSCALL_BASE @ check OS number |
250 | #else |
251 | str scno, [tsk, #TI_ABI_SYSCALL] |
252 | #endif |
253 | /* |
254 | * Reload the registers that may have been corrupted on entry to |
255 | * the syscall assembly (by tracing or context tracking.) |
256 | */ |
257 | TRACE( ldmia sp, {r0 - r3} ) |
258 | |
259 | local_restart: |
260 | ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing |
261 | stmdb sp!, {r4, r5} @ push fifth and sixth args |
262 | |
263 | tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? |
264 | bne __sys_trace |
265 | |
266 | invoke_syscall tbl, scno, r10, __ret_fast_syscall |
267 | |
268 | add r1, sp, #S_OFF |
269 | 2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) |
270 | eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back |
271 | bcs arm_syscall |
272 | mov why, #0 @ no longer a real syscall |
273 | b sys_ni_syscall @ not private func |
274 | |
275 | #if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) |
276 | /* |
277 | * We failed to handle a fault trying to access the page |
278 | * containing the swi instruction, but we're not really in a |
279 | * position to return -EFAULT. Instead, return back to the |
280 | * instruction and re-enter the user fault handling path trying |
281 | * to page it in. This will likely result in sending SEGV to the |
282 | * current task. |
283 | */ |
284 | 9001: |
285 | sub lr, saved_pc, #4 |
286 | str lr, [sp, #S_PC] |
287 | get_thread_info tsk |
288 | b ret_fast_syscall |
289 | #endif |
290 | ENDPROC(vector_swi) |
291 | .ltorg |
292 | |
293 | /* |
294 | * This is the really slow path. We're going to be doing |
295 | * context switches, and waiting for our parent to respond. |
296 | */ |
297 | __sys_trace: |
298 | add r0, sp, #S_OFF |
299 | bl syscall_trace_enter |
300 | mov scno, r0 |
301 | invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1 |
302 | cmp scno, #-1 @ skip the syscall? |
303 | bne 2b |
304 | add sp, sp, #S_OFF @ restore stack |
305 | |
306 | __sys_trace_return_nosave: |
307 | enable_irq_notrace |
308 | mov r0, sp |
309 | bl syscall_trace_exit |
310 | b ret_slow_syscall |
311 | |
312 | __sys_trace_return: |
313 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 |
314 | mov r0, sp |
315 | bl syscall_trace_exit |
316 | b ret_slow_syscall |
317 | |
318 | .macro syscall_table_start, sym |
319 | .equ __sys_nr, 0 |
320 | .type \sym, #object |
321 | ENTRY(\sym) |
322 | .endm |
323 | |
324 | .macro syscall, nr, func |
325 | .ifgt __sys_nr - \nr |
326 | .error "Duplicated/unorded system call entry" |
327 | .endif |
328 | .rept \nr - __sys_nr |
329 | .long sys_ni_syscall |
330 | .endr |
331 | .long \func |
332 | .equ __sys_nr, \nr + 1 |
333 | .endm |
334 | |
335 | .macro syscall_table_end, sym |
336 | .ifgt __sys_nr - __NR_syscalls |
337 | .error "System call table too big" |
338 | .endif |
339 | .rept __NR_syscalls - __sys_nr |
340 | .long sys_ni_syscall |
341 | .endr |
342 | .size \sym, . - \sym |
343 | .endm |
344 | |
345 | #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) |
346 | #define __SYSCALL(nr, func) syscall nr, func |
347 | |
348 | /* |
349 | * This is the syscall table declaration for native ABI syscalls. |
350 | * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. |
351 | */ |
352 | syscall_table_start sys_call_table |
353 | #ifdef CONFIG_AEABI |
354 | #include <calls-eabi.S> |
355 | #else |
356 | #include <calls-oabi.S> |
357 | #endif |
358 | syscall_table_end sys_call_table |
359 | |
360 | /*============================================================================ |
361 | * Special system call wrappers |
362 | */ |
363 | @ r0 = syscall number |
364 | @ r8 = syscall table |
365 | sys_syscall: |
366 | bic scno, r0, #__NR_OABI_SYSCALL_BASE |
367 | cmp scno, #__NR_syscall - __NR_SYSCALL_BASE |
368 | cmpne scno, #NR_syscalls @ check range |
369 | #ifdef CONFIG_CPU_SPECTRE |
370 | movhs scno, #0 |
371 | csdb |
372 | #endif |
373 | stmialo sp, {r5, r6} @ shuffle args |
374 | movlo r0, r1 |
375 | movlo r1, r2 |
376 | movlo r2, r3 |
377 | movlo r3, r4 |
378 | ldrlo pc, [tbl, scno, lsl #2] |
379 | b sys_ni_syscall |
380 | ENDPROC(sys_syscall) |
381 | |
382 | sys_sigreturn_wrapper: |
383 | add r0, sp, #S_OFF |
384 | mov why, #0 @ prevent syscall restart handling |
385 | b sys_sigreturn |
386 | ENDPROC(sys_sigreturn_wrapper) |
387 | |
388 | sys_rt_sigreturn_wrapper: |
389 | add r0, sp, #S_OFF |
390 | mov why, #0 @ prevent syscall restart handling |
391 | b sys_rt_sigreturn |
392 | ENDPROC(sys_rt_sigreturn_wrapper) |
393 | |
394 | sys_statfs64_wrapper: |
395 | teq r1, #88 |
396 | moveq r1, #84 |
397 | b sys_statfs64 |
398 | ENDPROC(sys_statfs64_wrapper) |
399 | |
400 | sys_fstatfs64_wrapper: |
401 | teq r1, #88 |
402 | moveq r1, #84 |
403 | b sys_fstatfs64 |
404 | ENDPROC(sys_fstatfs64_wrapper) |
405 | |
406 | /* |
407 | * Note: off_4k (r5) is always units of 4K. If we can't do the requested |
408 | * offset, we return EINVAL. |
409 | */ |
410 | sys_mmap2: |
411 | str r5, [sp, #4] |
412 | b sys_mmap_pgoff |
413 | ENDPROC(sys_mmap2) |
414 | |
415 | #ifdef CONFIG_OABI_COMPAT |
416 | |
417 | /* |
418 | * These are syscalls with argument register differences |
419 | */ |
420 | |
421 | sys_oabi_pread64: |
422 | stmia sp, {r3, r4} |
423 | b sys_pread64 |
424 | ENDPROC(sys_oabi_pread64) |
425 | |
426 | sys_oabi_pwrite64: |
427 | stmia sp, {r3, r4} |
428 | b sys_pwrite64 |
429 | ENDPROC(sys_oabi_pwrite64) |
430 | |
431 | sys_oabi_truncate64: |
432 | mov r3, r2 |
433 | mov r2, r1 |
434 | b sys_truncate64 |
435 | ENDPROC(sys_oabi_truncate64) |
436 | |
437 | sys_oabi_ftruncate64: |
438 | mov r3, r2 |
439 | mov r2, r1 |
440 | b sys_ftruncate64 |
441 | ENDPROC(sys_oabi_ftruncate64) |
442 | |
443 | sys_oabi_readahead: |
444 | str r3, [sp] |
445 | mov r3, r2 |
446 | mov r2, r1 |
447 | b sys_readahead |
448 | ENDPROC(sys_oabi_readahead) |
449 | |
450 | /* |
451 | * Let's declare a second syscall table for old ABI binaries |
452 | * using the compatibility syscall entries. |
453 | */ |
454 | syscall_table_start sys_oabi_call_table |
455 | #undef __SYSCALL_WITH_COMPAT |
456 | #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) |
457 | #include <calls-oabi.S> |
458 | syscall_table_end sys_oabi_call_table |
459 | |
460 | #endif |
461 | |
462 | |