1 | /* |
2 | * linux/arch/nios2/kernel/entry.S |
3 | * |
4 | * Copyright (C) 2013-2014 Altera Corporation |
5 | * Copyright (C) 2009, Wind River Systems Inc |
6 | * |
7 | * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com |
8 | * |
9 | * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com) |
10 | * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>, |
11 | * Kenneth Albanowski <kjahds@kjahds.com>, |
12 | * Copyright (C) 2000 Lineo Inc. (www.lineo.com) |
13 | * Copyright (C) 2004 Microtronix Datacom Ltd. |
14 | * |
15 | * This file is subject to the terms and conditions of the GNU General Public |
16 | * License. See the file "COPYING" in the main directory of this archive |
17 | * for more details. |
18 | * |
19 | * Linux/m68k support by Hamish Macdonald |
20 | * |
21 | * 68060 fixes by Jesper Skov |
22 | * ColdFire support by Greg Ungerer (gerg@snapgear.com) |
23 | * 5307 fixes by David W. Miller |
24 | * linux 2.4 support David McCullough <davidm@snapgear.com> |
25 | */ |
26 | |
27 | #include <linux/sys.h> |
28 | #include <linux/linkage.h> |
29 | #include <asm/asm-offsets.h> |
30 | #include <asm/asm-macros.h> |
31 | #include <asm/thread_info.h> |
32 | #include <asm/errno.h> |
33 | #include <asm/setup.h> |
34 | #include <asm/entry.h> |
35 | #include <asm/unistd.h> |
36 | #include <asm/processor.h> |
37 | |
38 | .macro GET_THREAD_INFO reg |
39 | .if THREAD_SIZE & 0xffff0000 |
40 | andhi \reg, sp, %hi(~(THREAD_SIZE-1)) |
41 | .else |
42 | addi \reg, r0, %lo(~(THREAD_SIZE-1)) |
43 | and \reg, \reg, sp |
44 | .endif |
45 | .endm |
46 | |
47 | .macro kuser_cmpxchg_check |
48 | /* |
49 | * Make sure our user space atomic helper is restarted if it was |
50 | * interrupted in a critical region. |
51 | * ea-4 = address of interrupted insn (ea must be preserved). |
52 | * sp = saved regs. |
53 | * cmpxchg_ldw = first critical insn, cmpxchg_stw = last critical insn. |
54 | * If ea <= cmpxchg_stw and ea > cmpxchg_ldw then saved EA is set to |
55 | * cmpxchg_ldw + 4. |
56 | */ |
57 | /* et = cmpxchg_stw + 4 */ |
58 | movui et, (KUSER_BASE + 4 + (cmpxchg_stw - __kuser_helper_start)) |
59 | bgtu ea, et, 1f |
60 | |
61 | subi et, et, (cmpxchg_stw - cmpxchg_ldw) /* et = cmpxchg_ldw + 4 */ |
62 | bltu ea, et, 1f |
63 | stw et, PT_EA(sp) /* fix up EA */ |
64 | mov ea, et |
65 | 1: |
66 | .endm |
67 | |
68 | .section .rodata |
69 | .align 4 |
70 | exception_table: |
71 | .word unhandled_exception /* 0 - Reset */ |
72 | .word unhandled_exception /* 1 - Processor-only Reset */ |
73 | .word external_interrupt /* 2 - Interrupt */ |
74 | .word handle_trap /* 3 - Trap Instruction */ |
75 | |
76 | .word instruction_trap /* 4 - Unimplemented instruction */ |
77 | .word handle_illegal /* 5 - Illegal instruction */ |
78 | .word handle_unaligned /* 6 - Misaligned data access */ |
79 | .word handle_unaligned /* 7 - Misaligned destination address */ |
80 | |
81 | .word handle_diverror /* 8 - Division error */ |
82 | .word protection_exception_ba /* 9 - Supervisor-only instr. address */ |
83 | .word protection_exception_instr /* 10 - Supervisor only instruction */ |
84 | .word protection_exception_ba /* 11 - Supervisor only data address */ |
85 | |
86 | .word unhandled_exception /* 12 - Double TLB miss (data) */ |
87 | .word protection_exception_pte /* 13 - TLB permission violation (x) */ |
88 | .word protection_exception_pte /* 14 - TLB permission violation (r) */ |
89 | .word protection_exception_pte /* 15 - TLB permission violation (w) */ |
90 | |
91 | .word unhandled_exception /* 16 - MPU region violation */ |
92 | |
93 | trap_table: |
94 | .word handle_system_call /* 0 */ |
95 | .word handle_trap_1 /* 1 */ |
96 | .word handle_trap_2 /* 2 */ |
97 | .word handle_trap_3 /* 3 */ |
98 | .word handle_trap_reserved /* 4 */ |
99 | .word handle_trap_reserved /* 5 */ |
100 | .word handle_trap_reserved /* 6 */ |
101 | .word handle_trap_reserved /* 7 */ |
102 | .word handle_trap_reserved /* 8 */ |
103 | .word handle_trap_reserved /* 9 */ |
104 | .word handle_trap_reserved /* 10 */ |
105 | .word handle_trap_reserved /* 11 */ |
106 | .word handle_trap_reserved /* 12 */ |
107 | .word handle_trap_reserved /* 13 */ |
108 | .word handle_trap_reserved /* 14 */ |
109 | .word handle_trap_reserved /* 15 */ |
110 | .word handle_trap_reserved /* 16 */ |
111 | .word handle_trap_reserved /* 17 */ |
112 | .word handle_trap_reserved /* 18 */ |
113 | .word handle_trap_reserved /* 19 */ |
114 | .word handle_trap_reserved /* 20 */ |
115 | .word handle_trap_reserved /* 21 */ |
116 | .word handle_trap_reserved /* 22 */ |
117 | .word handle_trap_reserved /* 23 */ |
118 | .word handle_trap_reserved /* 24 */ |
119 | .word handle_trap_reserved /* 25 */ |
120 | .word handle_trap_reserved /* 26 */ |
121 | .word handle_trap_reserved /* 27 */ |
122 | .word handle_trap_reserved /* 28 */ |
123 | .word handle_trap_reserved /* 29 */ |
124 | #ifdef CONFIG_KGDB |
125 | .word handle_kgdb_breakpoint /* 30 KGDB breakpoint */ |
126 | #else |
127 | .word instruction_trap /* 30 */ |
128 | #endif |
129 | .word handle_breakpoint /* 31 */ |
130 | |
131 | .text |
132 | .set noat |
133 | .set nobreak |
134 | |
135 | ENTRY(inthandler) |
136 | SAVE_ALL |
137 | |
138 | kuser_cmpxchg_check |
139 | |
140 | /* Clear EH bit before we get a new excpetion in the kernel |
141 | * and after we have saved it to the exception frame. This is done |
142 | * whether it's trap, tlb-miss or interrupt. If we don't do this |
143 | * estatus is not updated the next exception. |
144 | */ |
145 | rdctl r24, status |
146 | movi r9, %lo(~STATUS_EH) |
147 | and r24, r24, r9 |
148 | wrctl status, r24 |
149 | |
150 | /* Read cause and vector and branch to the associated handler */ |
151 | mov r4, sp |
152 | rdctl r5, exception |
153 | movia r9, exception_table |
154 | add r24, r9, r5 |
155 | ldw r24, 0(r24) |
156 | jmp r24 |
157 | |
158 | |
159 | /*********************************************************************** |
160 | * Handle traps |
161 | *********************************************************************** |
162 | */ |
163 | ENTRY(handle_trap) |
164 | ldwio r24, -4(ea) /* instruction that caused the exception */ |
165 | srli r24, r24, 4 |
166 | andi r24, r24, 0x7c |
167 | movia r9,trap_table |
168 | add r24, r24, r9 |
169 | ldw r24, 0(r24) |
170 | jmp r24 |
171 | |
172 | |
173 | /*********************************************************************** |
174 | * Handle system calls |
175 | *********************************************************************** |
176 | */ |
177 | ENTRY(handle_system_call) |
178 | /* Enable interrupts */ |
179 | rdctl r10, status |
180 | ori r10, r10, STATUS_PIE |
181 | wrctl status, r10 |
182 | |
183 | /* Reload registers destroyed by common code. */ |
184 | ldw r4, PT_R4(sp) |
185 | ldw r5, PT_R5(sp) |
186 | |
187 | local_restart: |
188 | stw r2, PT_ORIG_R2(sp) |
189 | /* Check that the requested system call is within limits */ |
190 | movui r1, __NR_syscalls |
191 | bgeu r2, r1, ret_invsyscall |
192 | slli r1, r2, 2 |
193 | movhi r11, %hiadj(sys_call_table) |
194 | add r1, r1, r11 |
195 | ldw r1, %lo(sys_call_table)(r1) |
196 | |
197 | /* Check if we are being traced */ |
198 | GET_THREAD_INFO r11 |
199 | ldw r11,TI_FLAGS(r11) |
200 | BTBNZ r11,r11,TIF_SYSCALL_TRACE,traced_system_call |
201 | |
202 | /* Execute the system call */ |
203 | callr r1 |
204 | |
205 | /* If the syscall returns a negative result: |
206 | * Set r7 to 1 to indicate error, |
207 | * Negate r2 to get a positive error code |
208 | * If the syscall returns zero or a positive value: |
209 | * Set r7 to 0. |
210 | * The sigreturn system calls will skip the code below by |
211 | * adding to register ra. To avoid destroying registers |
212 | */ |
213 | translate_rc_and_ret: |
214 | movi r1, 0 |
215 | bge r2, zero, 3f |
216 | ldw r1, PT_ORIG_R2(sp) |
217 | addi r1, r1, 1 |
218 | beq r1, zero, 3f |
219 | sub r2, zero, r2 |
220 | movi r1, 1 |
221 | 3: |
222 | stw r2, PT_R2(sp) |
223 | stw r1, PT_R7(sp) |
224 | end_translate_rc_and_ret: |
225 | |
226 | ret_from_exception: |
227 | ldw r1, PT_ESTATUS(sp) |
228 | /* if so, skip resched, signals */ |
229 | TSTBNZ r1, r1, ESTATUS_EU, Luser_return |
230 | |
231 | restore_all: |
232 | rdctl r10, status /* disable intrs */ |
233 | andi r10, r10, %lo(~STATUS_PIE) |
234 | wrctl status, r10 |
235 | RESTORE_ALL |
236 | eret |
237 | |
238 | /* If the syscall number was invalid return ENOSYS */ |
239 | ret_invsyscall: |
240 | movi r2, -ENOSYS |
241 | br translate_rc_and_ret |
242 | |
243 | /* This implements the same as above, except it calls |
244 | * do_syscall_trace_enter and do_syscall_trace_exit before and after the |
245 | * syscall in order for utilities like strace and gdb to work. |
246 | */ |
247 | traced_system_call: |
248 | SAVE_SWITCH_STACK |
249 | call do_syscall_trace_enter |
250 | RESTORE_SWITCH_STACK |
251 | |
252 | /* Create system call register arguments. The 5th and 6th |
253 | arguments on stack are already in place at the beginning |
254 | of pt_regs. */ |
255 | ldw r2, PT_R2(sp) |
256 | ldw r4, PT_R4(sp) |
257 | ldw r5, PT_R5(sp) |
258 | ldw r6, PT_R6(sp) |
259 | ldw r7, PT_R7(sp) |
260 | |
261 | /* Fetch the syscall function. */ |
262 | movui r1, __NR_syscalls |
263 | bgeu r2, r1, traced_invsyscall |
264 | slli r1, r2, 2 |
265 | movhi r11,%hiadj(sys_call_table) |
266 | add r1, r1, r11 |
267 | ldw r1, %lo(sys_call_table)(r1) |
268 | |
269 | callr r1 |
270 | |
271 | /* If the syscall returns a negative result: |
272 | * Set r7 to 1 to indicate error, |
273 | * Negate r2 to get a positive error code |
274 | * If the syscall returns zero or a positive value: |
275 | * Set r7 to 0. |
276 | * The sigreturn system calls will skip the code below by |
277 | * adding to register ra. To avoid destroying registers |
278 | */ |
279 | translate_rc_and_ret2: |
280 | movi r1, 0 |
281 | bge r2, zero, 4f |
282 | ldw r1, PT_ORIG_R2(sp) |
283 | addi r1, r1, 1 |
284 | beq r1, zero, 4f |
285 | sub r2, zero, r2 |
286 | movi r1, 1 |
287 | 4: |
288 | stw r2, PT_R2(sp) |
289 | stw r1, PT_R7(sp) |
290 | end_translate_rc_and_ret2: |
291 | SAVE_SWITCH_STACK |
292 | call do_syscall_trace_exit |
293 | RESTORE_SWITCH_STACK |
294 | br ret_from_exception |
295 | |
296 | /* If the syscall number was invalid return ENOSYS */ |
297 | traced_invsyscall: |
298 | movi r2, -ENOSYS |
299 | br translate_rc_and_ret2 |
300 | |
301 | Luser_return: |
302 | GET_THREAD_INFO r11 /* get thread_info pointer */ |
303 | ldw r10, TI_FLAGS(r11) /* get thread_info->flags */ |
304 | ANDI32 r11, r10, _TIF_WORK_MASK |
305 | beq r11, r0, restore_all /* Nothing to do */ |
306 | BTBZ r1, r10, TIF_NEED_RESCHED, Lsignal_return |
307 | |
308 | /* Reschedule work */ |
309 | call schedule |
310 | br ret_from_exception |
311 | |
312 | Lsignal_return: |
313 | ANDI32 r1, r10, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME |
314 | beq r1, r0, restore_all |
315 | mov r4, sp /* pt_regs */ |
316 | SAVE_SWITCH_STACK |
317 | call do_notify_resume |
318 | beq r2, r0, no_work_pending |
319 | RESTORE_SWITCH_STACK |
320 | /* prepare restart syscall here without leaving kernel */ |
321 | ldw r2, PT_R2(sp) /* reload syscall number in r2 */ |
322 | ldw r4, PT_R4(sp) /* reload syscall arguments r4-r9 */ |
323 | ldw r5, PT_R5(sp) |
324 | ldw r6, PT_R6(sp) |
325 | ldw r7, PT_R7(sp) |
326 | ldw r8, PT_R8(sp) |
327 | ldw r9, PT_R9(sp) |
328 | br local_restart /* restart syscall */ |
329 | |
330 | no_work_pending: |
331 | RESTORE_SWITCH_STACK |
332 | br ret_from_exception |
333 | |
334 | /*********************************************************************** |
335 | * Handle external interrupts. |
336 | *********************************************************************** |
337 | */ |
338 | /* |
339 | * This is the generic interrupt handler (for all hardware interrupt |
340 | * sources). It figures out the vector number and calls the appropriate |
341 | * interrupt service routine directly. |
342 | */ |
343 | external_interrupt: |
344 | rdctl r12, ipending |
345 | rdctl r9, ienable |
346 | and r12, r12, r9 |
347 | /* skip if no interrupt is pending */ |
348 | beq r12, r0, ret_from_interrupt |
349 | |
350 | /* |
351 | * Process an external hardware interrupt. |
352 | */ |
353 | |
354 | addi ea, ea, -4 /* re-issue the interrupted instruction */ |
355 | stw ea, PT_EA(sp) |
356 | 2: movi r4, %lo(-1) /* Start from bit position 0, |
357 | highest priority */ |
358 | /* This is the IRQ # for handler call */ |
359 | 1: andi r10, r12, 1 /* Isolate bit we are interested in */ |
360 | srli r12, r12, 1 /* shift count is costly without hardware |
361 | multiplier */ |
362 | addi r4, r4, 1 |
363 | beq r10, r0, 1b |
364 | mov r5, sp /* Setup pt_regs pointer for handler call */ |
365 | call do_IRQ |
366 | rdctl r12, ipending /* check again if irq still pending */ |
367 | rdctl r9, ienable /* Isolate possible interrupts */ |
368 | and r12, r12, r9 |
369 | bne r12, r0, 2b |
370 | /* br ret_from_interrupt */ /* fall through to ret_from_interrupt */ |
371 | |
372 | ENTRY(ret_from_interrupt) |
373 | ldw r1, PT_ESTATUS(sp) /* check if returning to kernel */ |
374 | TSTBNZ r1, r1, ESTATUS_EU, Luser_return |
375 | |
376 | #ifdef CONFIG_PREEMPTION |
377 | GET_THREAD_INFO r1 |
378 | ldw r4, TI_PREEMPT_COUNT(r1) |
379 | bne r4, r0, restore_all |
380 | ldw r4, TI_FLAGS(r1) /* ? Need resched set */ |
381 | BTBZ r10, r4, TIF_NEED_RESCHED, restore_all |
382 | ldw r4, PT_ESTATUS(sp) /* ? Interrupts off */ |
383 | andi r10, r4, ESTATUS_EPIE |
384 | beq r10, r0, restore_all |
385 | call preempt_schedule_irq |
386 | #endif |
387 | br restore_all |
388 | |
389 | /*********************************************************************** |
390 | * A few syscall wrappers |
391 | *********************************************************************** |
392 | */ |
393 | /* |
394 | * int clone(unsigned long clone_flags, unsigned long newsp, |
395 | * int __user * parent_tidptr, int __user * child_tidptr, |
396 | * int tls_val) |
397 | */ |
398 | ENTRY(sys_clone) |
399 | SAVE_SWITCH_STACK |
400 | subi sp, sp, 4 /* make space for tls pointer */ |
401 | stw r8, 0(sp) /* pass tls pointer (r8) via stack (5th argument) */ |
402 | call nios2_clone |
403 | addi sp, sp, 4 |
404 | RESTORE_SWITCH_STACK |
405 | ret |
406 | |
407 | ENTRY(sys_rt_sigreturn) |
408 | SAVE_SWITCH_STACK |
409 | mov r4, sp |
410 | call do_rt_sigreturn |
411 | RESTORE_SWITCH_STACK |
412 | addi ra, ra, (end_translate_rc_and_ret - translate_rc_and_ret) |
413 | ret |
414 | |
415 | /*********************************************************************** |
416 | * A few other wrappers and stubs |
417 | *********************************************************************** |
418 | */ |
419 | protection_exception_pte: |
420 | rdctl r6, pteaddr |
421 | slli r6, r6, 10 |
422 | call do_page_fault |
423 | br ret_from_exception |
424 | |
425 | protection_exception_ba: |
426 | rdctl r6, badaddr |
427 | call do_page_fault |
428 | br ret_from_exception |
429 | |
430 | protection_exception_instr: |
431 | call handle_supervisor_instr |
432 | br ret_from_exception |
433 | |
434 | handle_breakpoint: |
435 | call breakpoint_c |
436 | br ret_from_exception |
437 | |
438 | #ifdef CONFIG_NIOS2_ALIGNMENT_TRAP |
439 | handle_unaligned: |
440 | SAVE_SWITCH_STACK |
441 | call handle_unaligned_c |
442 | RESTORE_SWITCH_STACK |
443 | br ret_from_exception |
444 | #else |
445 | handle_unaligned: |
446 | call handle_unaligned_c |
447 | br ret_from_exception |
448 | #endif |
449 | |
450 | handle_illegal: |
451 | call handle_illegal_c |
452 | br ret_from_exception |
453 | |
454 | handle_diverror: |
455 | call handle_diverror_c |
456 | br ret_from_exception |
457 | |
458 | #ifdef CONFIG_KGDB |
459 | handle_kgdb_breakpoint: |
460 | call kgdb_breakpoint_c |
461 | br ret_from_exception |
462 | #endif |
463 | |
464 | handle_trap_1: |
465 | call handle_trap_1_c |
466 | br ret_from_exception |
467 | |
468 | handle_trap_2: |
469 | call handle_trap_2_c |
470 | br ret_from_exception |
471 | |
472 | handle_trap_3: |
473 | handle_trap_reserved: |
474 | call handle_trap_3_c |
475 | br ret_from_exception |
476 | |
477 | /* |
478 | * Beware - when entering resume, prev (the current task) is |
479 | * in r4, next (the new task) is in r5, don't change these |
480 | * registers. |
481 | */ |
482 | ENTRY(resume) |
483 | |
484 | rdctl r7, status /* save thread status reg */ |
485 | stw r7, TASK_THREAD + THREAD_KPSR(r4) |
486 | |
487 | andi r7, r7, %lo(~STATUS_PIE) /* disable interrupts */ |
488 | wrctl status, r7 |
489 | |
490 | SAVE_SWITCH_STACK |
491 | stw sp, TASK_THREAD + THREAD_KSP(r4)/* save kernel stack pointer */ |
492 | ldw sp, TASK_THREAD + THREAD_KSP(r5)/* restore new thread stack */ |
493 | movia r24, _current_thread /* save thread */ |
494 | GET_THREAD_INFO r1 |
495 | stw r1, 0(r24) |
496 | RESTORE_SWITCH_STACK |
497 | |
498 | ldw r7, TASK_THREAD + THREAD_KPSR(r5)/* restore thread status reg */ |
499 | wrctl status, r7 |
500 | ret |
501 | |
502 | ENTRY(ret_from_fork) |
503 | call schedule_tail |
504 | br ret_from_exception |
505 | |
506 | ENTRY(ret_from_kernel_thread) |
507 | call schedule_tail |
508 | mov r4,r17 /* arg */ |
509 | callr r16 /* function */ |
510 | br ret_from_exception |
511 | |
512 | /* |
513 | * Kernel user helpers. |
514 | * |
515 | * Each segment is 64-byte aligned and will be mapped to the <User space>. |
516 | * New segments (if ever needed) must be added after the existing ones. |
517 | * This mechanism should be used only for things that are really small and |
518 | * justified, and not be abused freely. |
519 | * |
520 | */ |
521 | |
522 | /* Filling pads with undefined instructions. */ |
523 | .macro kuser_pad sym size |
524 | .if ((. - \sym) & 3) |
525 | .rept (4 - (. - \sym) & 3) |
526 | .byte 0 |
527 | .endr |
528 | .endif |
529 | .rept ((\size - (. - \sym)) / 4) |
530 | .word 0xdeadbeef |
531 | .endr |
532 | .endm |
533 | |
534 | .align 6 |
535 | .globl __kuser_helper_start |
536 | __kuser_helper_start: |
537 | |
538 | __kuser_helper_version: /* @ 0x1000 */ |
539 | .word ((__kuser_helper_end - __kuser_helper_start) >> 6) |
540 | |
541 | __kuser_cmpxchg: /* @ 0x1004 */ |
542 | /* |
543 | * r4 pointer to exchange variable |
544 | * r5 old value |
545 | * r6 new value |
546 | */ |
547 | cmpxchg_ldw: |
548 | ldw r2, 0(r4) /* load current value */ |
549 | sub r2, r2, r5 /* compare with old value */ |
550 | bne r2, zero, cmpxchg_ret |
551 | |
552 | /* We had a match, store the new value */ |
553 | cmpxchg_stw: |
554 | stw r6, 0(r4) |
555 | cmpxchg_ret: |
556 | ret |
557 | |
558 | kuser_pad __kuser_cmpxchg, 64 |
559 | |
560 | .globl __kuser_sigtramp |
561 | __kuser_sigtramp: |
562 | movi r2, __NR_rt_sigreturn |
563 | trap |
564 | |
565 | kuser_pad __kuser_sigtramp, 64 |
566 | |
567 | .globl __kuser_helper_end |
568 | __kuser_helper_end: |
569 | |