1 | /* |
2 | * Low-level system-call handling, trap handlers and context-switching |
3 | * |
4 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> |
5 | * Copyright (C) 2008-2009 PetaLogix |
6 | * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au> |
7 | * Copyright (C) 2001,2002 NEC Corporation |
8 | * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org> |
9 | * |
10 | * This file is subject to the terms and conditions of the GNU General |
11 | * Public License. See the file COPYING in the main directory of this |
12 | * archive for more details. |
13 | * |
14 | * Written by Miles Bader <miles@gnu.org> |
15 | * Heavily modified by John Williams for Microblaze |
16 | */ |
17 | |
18 | #include <linux/sys.h> |
19 | #include <linux/linkage.h> |
20 | |
21 | #include <asm/entry.h> |
22 | #include <asm/current.h> |
23 | #include <asm/processor.h> |
24 | #include <asm/exceptions.h> |
25 | #include <asm/asm-offsets.h> |
26 | #include <asm/thread_info.h> |
27 | |
28 | #include <asm/page.h> |
29 | #include <asm/unistd.h> |
30 | #include <asm/xilinx_mb_manager.h> |
31 | |
32 | #include <linux/errno.h> |
33 | #include <asm/signal.h> |
34 | #include <asm/mmu.h> |
35 | |
36 | #undef DEBUG |
37 | |
38 | #ifdef DEBUG |
39 | /* Create space for syscalls counting. */ |
40 | .section .data |
41 | .global syscall_debug_table |
42 | .align 4 |
43 | syscall_debug_table: |
44 | .space (__NR_syscalls * 4) |
45 | #endif /* DEBUG */ |
46 | |
47 | #define C_ENTRY(name) .globl name; .align 4; name |
48 | |
49 | /* |
50 | * Various ways of setting and clearing BIP in flags reg. |
51 | * This is mucky, but necessary using microblaze version that |
52 | * allows msr ops to write to BIP |
53 | */ |
54 | #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR |
55 | .macro clear_bip |
56 | msrclr r0, MSR_BIP |
57 | .endm |
58 | |
59 | .macro set_bip |
60 | msrset r0, MSR_BIP |
61 | .endm |
62 | |
63 | .macro clear_eip |
64 | msrclr r0, MSR_EIP |
65 | .endm |
66 | |
67 | .macro set_ee |
68 | msrset r0, MSR_EE |
69 | .endm |
70 | |
71 | .macro disable_irq |
72 | msrclr r0, MSR_IE |
73 | .endm |
74 | |
75 | .macro enable_irq |
76 | msrset r0, MSR_IE |
77 | .endm |
78 | |
79 | .macro set_ums |
80 | msrset r0, MSR_UMS |
81 | msrclr r0, MSR_VMS |
82 | .endm |
83 | |
84 | .macro set_vms |
85 | msrclr r0, MSR_UMS |
86 | msrset r0, MSR_VMS |
87 | .endm |
88 | |
89 | .macro clear_ums |
90 | msrclr r0, MSR_UMS |
91 | .endm |
92 | |
93 | .macro clear_vms_ums |
94 | msrclr r0, MSR_VMS | MSR_UMS |
95 | .endm |
96 | #else |
97 | .macro clear_bip |
98 | mfs r11, rmsr |
99 | andi r11, r11, ~MSR_BIP |
100 | mts rmsr, r11 |
101 | .endm |
102 | |
103 | .macro set_bip |
104 | mfs r11, rmsr |
105 | ori r11, r11, MSR_BIP |
106 | mts rmsr, r11 |
107 | .endm |
108 | |
109 | .macro clear_eip |
110 | mfs r11, rmsr |
111 | andi r11, r11, ~MSR_EIP |
112 | mts rmsr, r11 |
113 | .endm |
114 | |
115 | .macro set_ee |
116 | mfs r11, rmsr |
117 | ori r11, r11, MSR_EE |
118 | mts rmsr, r11 |
119 | .endm |
120 | |
121 | .macro disable_irq |
122 | mfs r11, rmsr |
123 | andi r11, r11, ~MSR_IE |
124 | mts rmsr, r11 |
125 | .endm |
126 | |
127 | .macro enable_irq |
128 | mfs r11, rmsr |
129 | ori r11, r11, MSR_IE |
130 | mts rmsr, r11 |
131 | .endm |
132 | |
133 | .macro set_ums |
134 | mfs r11, rmsr |
135 | ori r11, r11, MSR_VMS |
136 | andni r11, r11, MSR_UMS |
137 | mts rmsr, r11 |
138 | .endm |
139 | |
140 | .macro set_vms |
141 | mfs r11, rmsr |
142 | ori r11, r11, MSR_VMS |
143 | andni r11, r11, MSR_UMS |
144 | mts rmsr, r11 |
145 | .endm |
146 | |
147 | .macro clear_ums |
148 | mfs r11, rmsr |
149 | andni r11, r11, MSR_UMS |
150 | mts rmsr,r11 |
151 | .endm |
152 | |
153 | .macro clear_vms_ums |
154 | mfs r11, rmsr |
155 | andni r11, r11, (MSR_VMS|MSR_UMS) |
156 | mts rmsr,r11 |
157 | .endm |
158 | #endif |
159 | |
160 | /* Define how to call high-level functions. With MMU, virtual mode must be |
161 | * enabled when calling the high-level function. Clobbers R11. |
162 | * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL |
163 | */ |
164 | |
165 | /* turn on virtual protected mode save */ |
166 | #define VM_ON \ |
167 | set_ums; \ |
168 | rted r0, 2f; \ |
169 | nop; \ |
170 | 2: |
171 | |
172 | /* turn off virtual protected mode save and user mode save*/ |
173 | #define VM_OFF \ |
174 | clear_vms_ums; \ |
175 | rted r0, TOPHYS(1f); \ |
176 | nop; \ |
177 | 1: |
178 | |
179 | #define SAVE_REGS \ |
180 | swi r2, r1, PT_R2; /* Save SDA */ \ |
181 | swi r3, r1, PT_R3; \ |
182 | swi r4, r1, PT_R4; \ |
183 | swi r5, r1, PT_R5; \ |
184 | swi r6, r1, PT_R6; \ |
185 | swi r7, r1, PT_R7; \ |
186 | swi r8, r1, PT_R8; \ |
187 | swi r9, r1, PT_R9; \ |
188 | swi r10, r1, PT_R10; \ |
189 | swi r11, r1, PT_R11; /* save clobbered regs after rval */\ |
190 | swi r12, r1, PT_R12; \ |
191 | swi r13, r1, PT_R13; /* Save SDA2 */ \ |
192 | swi r14, r1, PT_PC; /* PC, before IRQ/trap */ \ |
193 | swi r15, r1, PT_R15; /* Save LP */ \ |
194 | swi r16, r1, PT_R16; \ |
195 | swi r17, r1, PT_R17; \ |
196 | swi r18, r1, PT_R18; /* Save asm scratch reg */ \ |
197 | swi r19, r1, PT_R19; \ |
198 | swi r20, r1, PT_R20; \ |
199 | swi r21, r1, PT_R21; \ |
200 | swi r22, r1, PT_R22; \ |
201 | swi r23, r1, PT_R23; \ |
202 | swi r24, r1, PT_R24; \ |
203 | swi r25, r1, PT_R25; \ |
204 | swi r26, r1, PT_R26; \ |
205 | swi r27, r1, PT_R27; \ |
206 | swi r28, r1, PT_R28; \ |
207 | swi r29, r1, PT_R29; \ |
208 | swi r30, r1, PT_R30; \ |
209 | swi r31, r1, PT_R31; /* Save current task reg */ \ |
210 | mfs r11, rmsr; /* save MSR */ \ |
211 | swi r11, r1, PT_MSR; |
212 | |
213 | #define RESTORE_REGS_GP \ |
214 | lwi r2, r1, PT_R2; /* restore SDA */ \ |
215 | lwi r3, r1, PT_R3; \ |
216 | lwi r4, r1, PT_R4; \ |
217 | lwi r5, r1, PT_R5; \ |
218 | lwi r6, r1, PT_R6; \ |
219 | lwi r7, r1, PT_R7; \ |
220 | lwi r8, r1, PT_R8; \ |
221 | lwi r9, r1, PT_R9; \ |
222 | lwi r10, r1, PT_R10; \ |
223 | lwi r11, r1, PT_R11; /* restore clobbered regs after rval */\ |
224 | lwi r12, r1, PT_R12; \ |
225 | lwi r13, r1, PT_R13; /* restore SDA2 */ \ |
226 | lwi r14, r1, PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\ |
227 | lwi r15, r1, PT_R15; /* restore LP */ \ |
228 | lwi r16, r1, PT_R16; \ |
229 | lwi r17, r1, PT_R17; \ |
230 | lwi r18, r1, PT_R18; /* restore asm scratch reg */ \ |
231 | lwi r19, r1, PT_R19; \ |
232 | lwi r20, r1, PT_R20; \ |
233 | lwi r21, r1, PT_R21; \ |
234 | lwi r22, r1, PT_R22; \ |
235 | lwi r23, r1, PT_R23; \ |
236 | lwi r24, r1, PT_R24; \ |
237 | lwi r25, r1, PT_R25; \ |
238 | lwi r26, r1, PT_R26; \ |
239 | lwi r27, r1, PT_R27; \ |
240 | lwi r28, r1, PT_R28; \ |
241 | lwi r29, r1, PT_R29; \ |
242 | lwi r30, r1, PT_R30; \ |
243 | lwi r31, r1, PT_R31; /* Restore cur task reg */ |
244 | |
245 | #define RESTORE_REGS \ |
246 | lwi r11, r1, PT_MSR; \ |
247 | mts rmsr , r11; \ |
248 | RESTORE_REGS_GP |
249 | |
250 | #define RESTORE_REGS_RTBD \ |
251 | lwi r11, r1, PT_MSR; \ |
252 | andni r11, r11, MSR_EIP; /* clear EIP */ \ |
253 | ori r11, r11, MSR_EE | MSR_BIP; /* set EE and BIP */ \ |
254 | mts rmsr , r11; \ |
255 | RESTORE_REGS_GP |
256 | |
257 | #define SAVE_STATE \ |
258 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \ |
259 | /* See if already in kernel mode.*/ \ |
260 | mfs r1, rmsr; \ |
261 | andi r1, r1, MSR_UMS; \ |
262 | bnei r1, 1f; \ |
263 | /* Kernel-mode state save. */ \ |
264 | /* Reload kernel stack-ptr. */ \ |
265 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \ |
266 | /* FIXME: I can add these two lines to one */ \ |
267 | /* tophys(r1,r1); */ \ |
268 | /* addik r1, r1, -PT_SIZE; */ \ |
269 | addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \ |
270 | SAVE_REGS \ |
271 | brid 2f; \ |
272 | swi r1, r1, PT_MODE; \ |
273 | 1: /* User-mode state save. */ \ |
274 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\ |
275 | tophys(r1,r1); \ |
276 | lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \ |
277 | /* MS these three instructions can be added to one */ \ |
278 | /* addik r1, r1, THREAD_SIZE; */ \ |
279 | /* tophys(r1,r1); */ \ |
280 | /* addik r1, r1, -PT_SIZE; */ \ |
281 | addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \ |
282 | SAVE_REGS \ |
283 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \ |
284 | swi r11, r1, PT_R1; /* Store user SP. */ \ |
285 | swi r0, r1, PT_MODE; /* Was in user-mode. */ \ |
286 | /* MS: I am clearing UMS even in case when I come from kernel space */ \ |
287 | clear_ums; \ |
288 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
289 | |
290 | .text |
291 | |
292 | .extern cpuinfo |
293 | |
294 | C_ENTRY(mb_flush_dcache): |
295 | addik r1, r1, -PT_SIZE |
296 | SAVE_REGS |
297 | |
298 | addik r3, r0, cpuinfo |
299 | lwi r7, r3, CI_DCS |
300 | lwi r8, r3, CI_DCL |
301 | sub r9, r7, r8 |
302 | 1: |
303 | wdc.flush r9, r0 |
304 | bgtid r9, 1b |
305 | addk r9, r9, r8 |
306 | |
307 | RESTORE_REGS |
308 | addik r1, r1, PT_SIZE |
309 | rtsd r15, 8 |
310 | nop |
311 | |
312 | C_ENTRY(mb_invalidate_icache): |
313 | addik r1, r1, -PT_SIZE |
314 | SAVE_REGS |
315 | |
316 | addik r3, r0, cpuinfo |
317 | lwi r7, r3, CI_ICS |
318 | lwi r8, r3, CI_ICL |
319 | sub r9, r7, r8 |
320 | 1: |
321 | wic r9, r0 |
322 | bgtid r9, 1b |
323 | addk r9, r9, r8 |
324 | |
325 | RESTORE_REGS |
326 | addik r1, r1, PT_SIZE |
327 | rtsd r15, 8 |
328 | nop |
329 | |
330 | /* |
331 | * User trap. |
332 | * |
333 | * System calls are handled here. |
334 | * |
335 | * Syscall protocol: |
336 | * Syscall number in r12, args in r5-r10 |
337 | * Return value in r3 |
338 | * |
339 | * Trap entered via brki instruction, so BIP bit is set, and interrupts |
340 | * are masked. This is nice, means we don't have to CLI before state save |
341 | */ |
342 | C_ENTRY(_user_exception): |
343 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */ |
344 | addi r14, r14, 4 /* return address is 4 byte after call */ |
345 | |
346 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ |
347 | tophys(r1,r1); |
348 | lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */ |
349 | /* calculate kernel stack pointer from task struct 8k */ |
350 | addik r1, r1, THREAD_SIZE; |
351 | tophys(r1,r1); |
352 | |
353 | addik r1, r1, -PT_SIZE; /* Make room on the stack. */ |
354 | SAVE_REGS |
355 | swi r0, r1, PT_R3 |
356 | swi r0, r1, PT_R4 |
357 | |
358 | swi r0, r1, PT_MODE; /* Was in user-mode. */ |
359 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); |
360 | swi r11, r1, PT_R1; /* Store user SP. */ |
361 | clear_ums; |
362 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
363 | /* Save away the syscall number. */ |
364 | swi r12, r1, PT_R0; |
365 | tovirt(r1,r1) |
366 | |
367 | /* where the trap should return need -8 to adjust for rtsd r15, 8*/ |
368 | /* Jump to the appropriate function for the system call number in r12 |
369 | * (r12 is not preserved), or return an error if r12 is not valid. The LP |
370 | * register should point to the location where |
371 | * the called function should return. [note that MAKE_SYS_CALL uses label 1] */ |
372 | |
373 | /* Step into virtual mode */ |
374 | rtbd r0, 3f |
375 | nop |
376 | 3: |
377 | lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */ |
378 | lwi r11, r11, TI_FLAGS /* get flags in thread info */ |
379 | andi r11, r11, _TIF_WORK_SYSCALL_MASK |
380 | beqi r11, 4f |
381 | |
382 | addik r3, r0, -ENOSYS |
383 | swi r3, r1, PT_R3 |
384 | brlid r15, do_syscall_trace_enter |
385 | addik r5, r1, PT_R0 |
386 | |
387 | # do_syscall_trace_enter returns the new syscall nr. |
388 | addk r12, r0, r3 |
389 | lwi r5, r1, PT_R5; |
390 | lwi r6, r1, PT_R6; |
391 | lwi r7, r1, PT_R7; |
392 | lwi r8, r1, PT_R8; |
393 | lwi r9, r1, PT_R9; |
394 | lwi r10, r1, PT_R10; |
395 | 4: |
396 | /* Jump to the appropriate function for the system call number in r12 |
397 | * (r12 is not preserved), or return an error if r12 is not valid. |
398 | * The LP register should point to the location where the called function |
399 | * should return. [note that MAKE_SYS_CALL uses label 1] */ |
400 | /* See if the system call number is valid */ |
401 | blti r12, 5f |
402 | addi r11, r12, -__NR_syscalls; |
403 | bgei r11, 5f; |
404 | /* Figure out which function to use for this system call. */ |
405 | /* Note Microblaze barrel shift is optional, so don't rely on it */ |
406 | add r12, r12, r12; /* convert num -> ptr */ |
407 | add r12, r12, r12; |
408 | addi r30, r0, 1 /* restarts allowed */ |
409 | |
410 | #ifdef DEBUG |
411 | /* Trac syscalls and stored them to syscall_debug_table */ |
412 | /* The first syscall location stores total syscall number */ |
413 | lwi r3, r0, syscall_debug_table |
414 | addi r3, r3, 1 |
415 | swi r3, r0, syscall_debug_table |
416 | lwi r3, r12, syscall_debug_table |
417 | addi r3, r3, 1 |
418 | swi r3, r12, syscall_debug_table |
419 | #endif |
420 | |
421 | # Find and jump into the syscall handler. |
422 | lwi r12, r12, sys_call_table |
423 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ |
424 | addi r15, r0, ret_from_trap-8 |
425 | bra r12 |
426 | |
427 | /* The syscall number is invalid, return an error. */ |
428 | 5: |
429 | braid ret_from_trap |
430 | addi r3, r0, -ENOSYS; |
431 | |
432 | /* Entry point used to return from a syscall/trap */ |
433 | /* We re-enable BIP bit before state restore */ |
434 | C_ENTRY(ret_from_trap): |
435 | swi r3, r1, PT_R3 |
436 | swi r4, r1, PT_R4 |
437 | |
438 | lwi r11, r1, PT_MODE; |
439 | /* See if returning to kernel mode, if so, skip resched &c. */ |
440 | bnei r11, 2f; |
441 | /* We're returning to user mode, so check for various conditions that |
442 | * trigger rescheduling. */ |
443 | /* FIXME: Restructure all these flag checks. */ |
444 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
445 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
446 | andi r11, r11, _TIF_WORK_SYSCALL_MASK |
447 | beqi r11, 1f |
448 | |
449 | brlid r15, do_syscall_trace_leave |
450 | addik r5, r1, PT_R0 |
451 | 1: |
452 | /* We're returning to user mode, so check for various conditions that |
453 | * trigger rescheduling. */ |
454 | /* get thread info from current task */ |
455 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; |
456 | lwi r19, r11, TI_FLAGS; /* get flags in thread info */ |
457 | andi r11, r19, _TIF_NEED_RESCHED; |
458 | beqi r11, 5f; |
459 | |
460 | bralid r15, schedule; /* Call scheduler */ |
461 | nop; /* delay slot */ |
462 | bri 1b |
463 | |
464 | /* Maybe handle a signal */ |
465 | 5: |
466 | andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; |
467 | beqi r11, 4f; /* Signals to handle, handle them */ |
468 | |
469 | addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */ |
470 | bralid r15, do_notify_resume; /* Handle any signals */ |
471 | add r6, r30, r0; /* Arg 2: int in_syscall */ |
472 | add r30, r0, r0 /* no more restarts */ |
473 | bri 1b |
474 | |
475 | /* Finally, return to user state. */ |
476 | 4: set_bip; /* Ints masked for state restore */ |
477 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ |
478 | VM_OFF; |
479 | tophys(r1,r1); |
480 | RESTORE_REGS_RTBD; |
481 | addik r1, r1, PT_SIZE /* Clean up stack space. */ |
482 | lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */ |
483 | bri 6f; |
484 | |
485 | /* Return to kernel state. */ |
486 | 2: set_bip; /* Ints masked for state restore */ |
487 | VM_OFF; |
488 | tophys(r1,r1); |
489 | RESTORE_REGS_RTBD; |
490 | addik r1, r1, PT_SIZE /* Clean up stack space. */ |
491 | tovirt(r1,r1); |
492 | 6: |
493 | TRAP_return: /* Make global symbol for debugging */ |
494 | rtbd r14, 0; /* Instructions to return from an IRQ */ |
495 | nop; |
496 | |
497 | |
498 | /* This the initial entry point for a new child thread, with an appropriate |
499 | stack in place that makes it look like the child is in the middle of a |
500 | syscall. This function is actually `returned to' from switch_thread |
501 | (copy_thread makes ret_from_fork the return address in each new thread's |
502 | saved context). */ |
503 | C_ENTRY(ret_from_fork): |
504 | bralid r15, schedule_tail; /* ...which is schedule_tail's arg */ |
505 | add r5, r3, r0; /* switch_thread returns the prev task */ |
506 | /* ( in the delay slot ) */ |
507 | brid ret_from_trap; /* Do normal trap return */ |
508 | add r3, r0, r0; /* Child's fork call should return 0. */ |
509 | |
510 | C_ENTRY(ret_from_kernel_thread): |
511 | bralid r15, schedule_tail; /* ...which is schedule_tail's arg */ |
512 | add r5, r3, r0; /* switch_thread returns the prev task */ |
513 | /* ( in the delay slot ) */ |
514 | brald r15, r20 /* fn was left in r20 */ |
515 | addk r5, r0, r19 /* ... and argument - in r19 */ |
516 | brid ret_from_trap |
517 | add r3, r0, r0 |
518 | |
519 | C_ENTRY(sys_rt_sigreturn_wrapper): |
520 | addik r30, r0, 0 /* no restarts */ |
521 | brid sys_rt_sigreturn /* Do real work */ |
522 | addik r5, r1, 0; /* add user context as 1st arg */ |
523 | |
524 | /* |
525 | * HW EXCEPTION rutine start |
526 | */ |
527 | C_ENTRY(full_exception_trap): |
528 | /* adjust exception address for privileged instruction |
529 | * for finding where is it */ |
530 | addik r17, r17, -4 |
531 | SAVE_STATE /* Save registers */ |
532 | /* PC, before IRQ/trap - this is one instruction above */ |
533 | swi r17, r1, PT_PC; |
534 | tovirt(r1,r1) |
535 | /* FIXME this can be store directly in PT_ESR reg. |
536 | * I tested it but there is a fault */ |
537 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ |
538 | addik r15, r0, ret_from_exc - 8 |
539 | mfs r6, resr |
540 | mfs r7, rfsr; /* save FSR */ |
541 | mts rfsr, r0; /* Clear sticky fsr */ |
542 | rted r0, full_exception |
543 | addik r5, r1, 0 /* parameter struct pt_regs * regs */ |
544 | |
545 | /* |
546 | * Unaligned data trap. |
547 | * |
548 | * Unaligned data trap last on 4k page is handled here. |
549 | * |
550 | * Trap entered via exception, so EE bit is set, and interrupts |
551 | * are masked. This is nice, means we don't have to CLI before state save |
552 | * |
553 | * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S" |
554 | */ |
555 | C_ENTRY(unaligned_data_trap): |
556 | /* MS: I have to save r11 value and then restore it because |
557 | * set_bit, clear_eip, set_ee use r11 as temp register if MSR |
558 | * instructions are not used. We don't need to do if MSR instructions |
559 | * are used and they use r0 instead of r11. |
560 | * I am using ENTRY_SP which should be primary used only for stack |
561 | * pointer saving. */ |
562 | swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); |
563 | set_bip; /* equalize initial state for all possible entries */ |
564 | clear_eip; |
565 | set_ee; |
566 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); |
567 | SAVE_STATE /* Save registers.*/ |
568 | /* PC, before IRQ/trap - this is one instruction above */ |
569 | swi r17, r1, PT_PC; |
570 | tovirt(r1,r1) |
571 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ |
572 | addik r15, r0, ret_from_exc-8 |
573 | mfs r3, resr /* ESR */ |
574 | mfs r4, rear /* EAR */ |
575 | rtbd r0, _unaligned_data_exception |
576 | addik r7, r1, 0 /* parameter struct pt_regs * regs */ |
577 | |
578 | /* |
579 | * Page fault traps. |
580 | * |
581 | * If the real exception handler (from hw_exception_handler.S) didn't find |
582 | * the mapping for the process, then we're thrown here to handle such situation. |
583 | * |
584 | * Trap entered via exceptions, so EE bit is set, and interrupts |
585 | * are masked. This is nice, means we don't have to CLI before state save |
586 | * |
587 | * Build a standard exception frame for TLB Access errors. All TLB exceptions |
588 | * will bail out to this point if they can't resolve the lightweight TLB fault. |
589 | * |
590 | * The C function called is in "arch/microblaze/mm/fault.c", declared as: |
591 | * void do_page_fault(struct pt_regs *regs, |
592 | * unsigned long address, |
593 | * unsigned long error_code) |
594 | */ |
595 | /* data and intruction trap - which is choose is resolved int fault.c */ |
596 | C_ENTRY(page_fault_data_trap): |
597 | SAVE_STATE /* Save registers.*/ |
598 | /* PC, before IRQ/trap - this is one instruction above */ |
599 | swi r17, r1, PT_PC; |
600 | tovirt(r1,r1) |
601 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ |
602 | addik r15, r0, ret_from_exc-8 |
603 | mfs r6, rear /* parameter unsigned long address */ |
604 | mfs r7, resr /* parameter unsigned long error_code */ |
605 | rted r0, do_page_fault |
606 | addik r5, r1, 0 /* parameter struct pt_regs * regs */ |
607 | |
608 | C_ENTRY(page_fault_instr_trap): |
609 | SAVE_STATE /* Save registers.*/ |
610 | /* PC, before IRQ/trap - this is one instruction above */ |
611 | swi r17, r1, PT_PC; |
612 | tovirt(r1,r1) |
613 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ |
614 | addik r15, r0, ret_from_exc-8 |
615 | mfs r6, rear /* parameter unsigned long address */ |
616 | ori r7, r0, 0 /* parameter unsigned long error_code */ |
617 | rted r0, do_page_fault |
618 | addik r5, r1, 0 /* parameter struct pt_regs * regs */ |
619 | |
620 | /* Entry point used to return from an exception. */ |
621 | C_ENTRY(ret_from_exc): |
622 | lwi r11, r1, PT_MODE; |
623 | bnei r11, 2f; /* See if returning to kernel mode, */ |
624 | /* ... if so, skip resched &c. */ |
625 | |
626 | /* We're returning to user mode, so check for various conditions that |
627 | trigger rescheduling. */ |
628 | 1: |
629 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
630 | lwi r19, r11, TI_FLAGS; /* get flags in thread info */ |
631 | andi r11, r19, _TIF_NEED_RESCHED; |
632 | beqi r11, 5f; |
633 | |
634 | /* Call the scheduler before returning from a syscall/trap. */ |
635 | bralid r15, schedule; /* Call scheduler */ |
636 | nop; /* delay slot */ |
637 | bri 1b |
638 | |
639 | /* Maybe handle a signal */ |
640 | 5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; |
641 | beqi r11, 4f; /* Signals to handle, handle them */ |
642 | |
643 | /* |
644 | * Handle a signal return; Pending signals should be in r18. |
645 | * |
646 | * Not all registers are saved by the normal trap/interrupt entry |
647 | * points (for instance, call-saved registers (because the normal |
648 | * C-compiler calling sequence in the kernel makes sure they're |
649 | * preserved), and call-clobbered registers in the case of |
650 | * traps), but signal handlers may want to examine or change the |
651 | * complete register state. Here we save anything not saved by |
652 | * the normal entry sequence, so that it may be safely restored |
653 | * (in a possibly modified form) after do_notify_resume returns. */ |
654 | addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */ |
655 | bralid r15, do_notify_resume; /* Handle any signals */ |
656 | addi r6, r0, 0; /* Arg 2: int in_syscall */ |
657 | bri 1b |
658 | |
659 | /* Finally, return to user state. */ |
660 | 4: set_bip; /* Ints masked for state restore */ |
661 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ |
662 | VM_OFF; |
663 | tophys(r1,r1); |
664 | |
665 | RESTORE_REGS_RTBD; |
666 | addik r1, r1, PT_SIZE /* Clean up stack space. */ |
667 | |
668 | lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */ |
669 | bri 6f; |
670 | /* Return to kernel state. */ |
671 | 2: set_bip; /* Ints masked for state restore */ |
672 | VM_OFF; |
673 | tophys(r1,r1); |
674 | RESTORE_REGS_RTBD; |
675 | addik r1, r1, PT_SIZE /* Clean up stack space. */ |
676 | |
677 | tovirt(r1,r1); |
678 | 6: |
679 | EXC_return: /* Make global symbol for debugging */ |
680 | rtbd r14, 0; /* Instructions to return from an IRQ */ |
681 | nop; |
682 | |
683 | /* |
684 | * HW EXCEPTION rutine end |
685 | */ |
686 | |
687 | /* |
688 | * Hardware maskable interrupts. |
689 | * |
690 | * The stack-pointer (r1) should have already been saved to the memory |
691 | * location PER_CPU(ENTRY_SP). |
692 | */ |
693 | C_ENTRY(_interrupt): |
694 | /* MS: we are in physical address */ |
695 | /* Save registers, switch to proper stack, convert SP to virtual.*/ |
696 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) |
697 | /* MS: See if already in kernel mode. */ |
698 | mfs r1, rmsr |
699 | nop |
700 | andi r1, r1, MSR_UMS |
701 | bnei r1, 1f |
702 | |
703 | /* Kernel-mode state save. */ |
704 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) |
705 | tophys(r1,r1); /* MS: I have in r1 physical address where stack is */ |
706 | /* save registers */ |
707 | /* MS: Make room on the stack -> activation record */ |
708 | addik r1, r1, -PT_SIZE; |
709 | SAVE_REGS |
710 | brid 2f; |
711 | swi r1, r1, PT_MODE; /* 0 - user mode, 1 - kernel mode */ |
712 | 1: |
713 | /* User-mode state save. */ |
714 | /* MS: get the saved current */ |
715 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
716 | tophys(r1,r1); |
717 | lwi r1, r1, TS_THREAD_INFO; |
718 | addik r1, r1, THREAD_SIZE; |
719 | tophys(r1,r1); |
720 | /* save registers */ |
721 | addik r1, r1, -PT_SIZE; |
722 | SAVE_REGS |
723 | /* calculate mode */ |
724 | swi r0, r1, PT_MODE; |
725 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); |
726 | swi r11, r1, PT_R1; |
727 | clear_ums; |
728 | 2: |
729 | lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
730 | tovirt(r1,r1) |
731 | addik r15, r0, irq_call; |
732 | irq_call:rtbd r0, do_IRQ; |
733 | addik r5, r1, 0; |
734 | |
735 | /* MS: we are in virtual mode */ |
736 | ret_from_irq: |
737 | lwi r11, r1, PT_MODE; |
738 | bnei r11, 2f; |
739 | |
740 | 1: |
741 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; |
742 | lwi r19, r11, TI_FLAGS; /* MS: get flags from thread info */ |
743 | andi r11, r19, _TIF_NEED_RESCHED; |
744 | beqi r11, 5f |
745 | bralid r15, schedule; |
746 | nop; /* delay slot */ |
747 | bri 1b |
748 | |
749 | /* Maybe handle a signal */ |
750 | 5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; |
751 | beqid r11, no_intr_resched |
752 | /* Handle a signal return; Pending signals should be in r18. */ |
753 | addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */ |
754 | bralid r15, do_notify_resume; /* Handle any signals */ |
755 | addi r6, r0, 0; /* Arg 2: int in_syscall */ |
756 | bri 1b |
757 | |
758 | /* Finally, return to user state. */ |
759 | no_intr_resched: |
760 | /* Disable interrupts, we are now committed to the state restore */ |
761 | disable_irq |
762 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); |
763 | VM_OFF; |
764 | tophys(r1,r1); |
765 | RESTORE_REGS |
766 | addik r1, r1, PT_SIZE /* MS: Clean up stack space. */ |
767 | lwi r1, r1, PT_R1 - PT_SIZE; |
768 | bri 6f; |
769 | /* MS: Return to kernel state. */ |
770 | 2: |
771 | #ifdef CONFIG_PREEMPTION |
772 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; |
773 | /* MS: get preempt_count from thread info */ |
774 | lwi r5, r11, TI_PREEMPT_COUNT; |
775 | bgti r5, restore; |
776 | |
777 | lwi r5, r11, TI_FLAGS; /* get flags in thread info */ |
778 | andi r5, r5, _TIF_NEED_RESCHED; |
779 | beqi r5, restore /* if zero jump over */ |
780 | |
781 | /* interrupts are off that's why I am calling preempt_chedule_irq */ |
782 | bralid r15, preempt_schedule_irq |
783 | nop |
784 | restore: |
785 | #endif |
786 | VM_OFF /* MS: turn off MMU */ |
787 | tophys(r1,r1) |
788 | RESTORE_REGS |
789 | addik r1, r1, PT_SIZE /* MS: Clean up stack space. */ |
790 | tovirt(r1,r1); |
791 | 6: |
792 | IRQ_return: /* MS: Make global symbol for debugging */ |
793 | rtid r14, 0 |
794 | nop |
795 | |
796 | #ifdef CONFIG_MB_MANAGER |
797 | |
798 | #define PT_PID PT_SIZE |
799 | #define PT_TLBI PT_SIZE + 4 |
800 | #define PT_ZPR PT_SIZE + 8 |
801 | #define PT_TLBL0 PT_SIZE + 12 |
802 | #define PT_TLBH0 PT_SIZE + 16 |
803 | |
804 | C_ENTRY(_xtmr_manager_reset): |
805 | lwi r1, r0, xmb_manager_stackpointer |
806 | |
807 | /* Restore MSR */ |
808 | lwi r2, r1, PT_MSR |
809 | mts rmsr, r2 |
810 | bri 4 |
811 | |
812 | /* restore Special purpose registers */ |
813 | lwi r2, r1, PT_PID |
814 | mts rpid, r2 |
815 | |
816 | lwi r2, r1, PT_TLBI |
817 | mts rtlbx, r2 |
818 | |
819 | lwi r2, r1, PT_ZPR |
820 | mts rzpr, r2 |
821 | |
822 | #if CONFIG_XILINX_MICROBLAZE0_USE_FPU |
823 | lwi r2, r1, PT_FSR |
824 | mts rfsr, r2 |
825 | #endif |
826 | |
827 | /* restore all the tlb's */ |
828 | addik r3, r0, TOPHYS(tlb_skip) |
829 | addik r6, r0, PT_TLBL0 |
830 | addik r7, r0, PT_TLBH0 |
831 | restore_tlb: |
832 | add r6, r6, r1 |
833 | add r7, r7, r1 |
834 | lwi r2, r6, 0 |
835 | mts rtlblo, r2 |
836 | lwi r2, r7, 0 |
837 | mts rtlbhi, r2 |
838 | addik r6, r6, 4 |
839 | addik r7, r7, 4 |
840 | bgtid r3, restore_tlb |
841 | addik r3, r3, -1 |
842 | |
843 | lwi r5, r0, TOPHYS(xmb_manager_dev) |
844 | lwi r8, r0, TOPHYS(xmb_manager_reset_callback) |
845 | set_vms |
846 | /* return from reset need -8 to adjust for rtsd r15, 8 */ |
847 | addik r15, r0, ret_from_reset - 8 |
848 | rtbd r8, 0 |
849 | nop |
850 | |
851 | ret_from_reset: |
852 | set_bip /* Ints masked for state restore */ |
853 | VM_OFF |
854 | /* MS: Restore all regs */ |
855 | RESTORE_REGS |
856 | lwi r14, r1, PT_R14 |
857 | lwi r16, r1, PT_PC |
858 | addik r1, r1, PT_SIZE + 36 |
859 | rtbd r16, 0 |
860 | nop |
861 | |
862 | /* |
863 | * Break handler for MB Manager. Enter to _xmb_manager_break by |
864 | * injecting fault in one of the TMR Microblaze core. |
865 | * FIXME: This break handler supports getting |
866 | * called from kernel space only. |
867 | */ |
868 | C_ENTRY(_xmb_manager_break): |
869 | /* |
870 | * Reserve memory in the stack for context store/restore |
871 | * (which includes memory for storing tlbs (max two tlbs)) |
872 | */ |
873 | addik r1, r1, -PT_SIZE - 36 |
874 | swi r1, r0, xmb_manager_stackpointer |
875 | SAVE_REGS |
876 | swi r14, r1, PT_R14 /* rewrite saved R14 value */ |
877 | swi r16, r1, PT_PC; /* PC and r16 are the same */ |
878 | |
879 | lwi r6, r0, TOPHYS(xmb_manager_baseaddr) |
880 | lwi r7, r0, TOPHYS(xmb_manager_crval) |
881 | /* |
882 | * When the break vector gets asserted because of error injection, |
883 | * the break signal must be blocked before exiting from the |
884 | * break handler, below code configures the tmr manager |
885 | * control register to block break signal. |
886 | */ |
887 | swi r7, r6, 0 |
888 | |
889 | /* Save the special purpose registers */ |
890 | mfs r2, rpid |
891 | swi r2, r1, PT_PID |
892 | |
893 | mfs r2, rtlbx |
894 | swi r2, r1, PT_TLBI |
895 | |
896 | mfs r2, rzpr |
897 | swi r2, r1, PT_ZPR |
898 | |
899 | #if CONFIG_XILINX_MICROBLAZE0_USE_FPU |
900 | mfs r2, rfsr |
901 | swi r2, r1, PT_FSR |
902 | #endif |
903 | mfs r2, rmsr |
904 | swi r2, r1, PT_MSR |
905 | |
906 | /* Save all the tlb's */ |
907 | addik r3, r0, TOPHYS(tlb_skip) |
908 | addik r6, r0, PT_TLBL0 |
909 | addik r7, r0, PT_TLBH0 |
910 | save_tlb: |
911 | add r6, r6, r1 |
912 | add r7, r7, r1 |
913 | mfs r2, rtlblo |
914 | swi r2, r6, 0 |
915 | mfs r2, rtlbhi |
916 | swi r2, r7, 0 |
917 | addik r6, r6, 4 |
918 | addik r7, r7, 4 |
919 | bgtid r3, save_tlb |
920 | addik r3, r3, -1 |
921 | |
922 | lwi r5, r0, TOPHYS(xmb_manager_dev) |
923 | lwi r8, r0, TOPHYS(xmb_manager_callback) |
924 | /* return from break need -8 to adjust for rtsd r15, 8 */ |
925 | addik r15, r0, ret_from_break - 8 |
926 | rtbd r8, 0 |
927 | nop |
928 | |
929 | ret_from_break: |
930 | /* flush the d-cache */ |
931 | bralid r15, mb_flush_dcache |
932 | nop |
933 | |
934 | /* |
935 | * To make sure microblaze i-cache is in a proper state |
936 | * invalidate the i-cache. |
937 | */ |
938 | bralid r15, mb_invalidate_icache |
939 | nop |
940 | |
941 | set_bip; /* Ints masked for state restore */ |
942 | VM_OFF; |
943 | mbar 1 |
944 | mbar 2 |
945 | bri 4 |
946 | suspend |
947 | nop |
948 | #endif |
949 | |
950 | /* |
951 | * Debug trap for KGDB. Enter to _debug_exception by brki r16, 0x18 |
952 | * and call handling function with saved pt_regs |
953 | */ |
954 | C_ENTRY(_debug_exception): |
955 | /* BIP bit is set on entry, no interrupts can occur */ |
956 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) |
957 | |
958 | mfs r1, rmsr |
959 | nop |
960 | andi r1, r1, MSR_UMS |
961 | bnei r1, 1f |
962 | /* MS: Kernel-mode state save - kgdb */ |
963 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/ |
964 | |
965 | /* BIP bit is set on entry, no interrupts can occur */ |
966 | addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; |
967 | SAVE_REGS; |
968 | /* save all regs to pt_reg structure */ |
969 | swi r0, r1, PT_R0; /* R0 must be saved too */ |
970 | swi r14, r1, PT_R14 /* rewrite saved R14 value */ |
971 | swi r16, r1, PT_PC; /* PC and r16 are the same */ |
972 | /* save special purpose registers to pt_regs */ |
973 | mfs r11, rear; |
974 | swi r11, r1, PT_EAR; |
975 | mfs r11, resr; |
976 | swi r11, r1, PT_ESR; |
977 | mfs r11, rfsr; |
978 | swi r11, r1, PT_FSR; |
979 | |
980 | /* stack pointer is in physical address at it is decrease |
981 | * by PT_SIZE but we need to get correct R1 value */ |
982 | addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + PT_SIZE; |
983 | swi r11, r1, PT_R1 |
984 | /* MS: r31 - current pointer isn't changed */ |
985 | tovirt(r1,r1) |
986 | #ifdef CONFIG_KGDB |
987 | addi r5, r1, 0 /* pass pt_reg address as the first arg */ |
988 | addik r15, r0, dbtrap_call; /* return address */ |
989 | rtbd r0, microblaze_kgdb_break |
990 | nop; |
991 | #endif |
992 | /* MS: Place handler for brki from kernel space if KGDB is OFF. |
993 | * It is very unlikely that another brki instruction is called. */ |
994 | bri 0 |
995 | |
996 | /* MS: User-mode state save - gdb */ |
997 | 1: lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ |
998 | tophys(r1,r1); |
999 | lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ |
1000 | addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */ |
1001 | tophys(r1,r1); |
1002 | |
1003 | addik r1, r1, -PT_SIZE; /* Make room on the stack. */ |
1004 | SAVE_REGS; |
1005 | swi r16, r1, PT_PC; /* Save LP */ |
1006 | swi r0, r1, PT_MODE; /* Was in user-mode. */ |
1007 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); |
1008 | swi r11, r1, PT_R1; /* Store user SP. */ |
1009 | lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
1010 | tovirt(r1,r1) |
1011 | set_vms; |
1012 | addik r5, r1, 0; |
1013 | addik r15, r0, dbtrap_call; |
1014 | dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */ |
1015 | rtbd r0, sw_exception |
1016 | nop |
1017 | |
1018 | /* MS: The first instruction for the second part of the gdb/kgdb */ |
1019 | set_bip; /* Ints masked for state restore */ |
1020 | lwi r11, r1, PT_MODE; |
1021 | bnei r11, 2f; |
1022 | /* MS: Return to user space - gdb */ |
1023 | 1: |
1024 | /* Get current task ptr into r11 */ |
1025 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
1026 | lwi r19, r11, TI_FLAGS; /* get flags in thread info */ |
1027 | andi r11, r19, _TIF_NEED_RESCHED; |
1028 | beqi r11, 5f; |
1029 | |
1030 | /* Call the scheduler before returning from a syscall/trap. */ |
1031 | bralid r15, schedule; /* Call scheduler */ |
1032 | nop; /* delay slot */ |
1033 | bri 1b |
1034 | |
1035 | /* Maybe handle a signal */ |
1036 | 5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; |
1037 | beqi r11, 4f; /* Signals to handle, handle them */ |
1038 | |
1039 | addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */ |
1040 | bralid r15, do_notify_resume; /* Handle any signals */ |
1041 | addi r6, r0, 0; /* Arg 2: int in_syscall */ |
1042 | bri 1b |
1043 | |
1044 | /* Finally, return to user state. */ |
1045 | 4: swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ |
1046 | VM_OFF; |
1047 | tophys(r1,r1); |
1048 | /* MS: Restore all regs */ |
1049 | RESTORE_REGS_RTBD |
1050 | addik r1, r1, PT_SIZE /* Clean up stack space */ |
1051 | lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */ |
1052 | DBTRAP_return_user: /* MS: Make global symbol for debugging */ |
1053 | rtbd r16, 0; /* MS: Instructions to return from a debug trap */ |
1054 | nop; |
1055 | |
1056 | /* MS: Return to kernel state - kgdb */ |
1057 | 2: VM_OFF; |
1058 | tophys(r1,r1); |
1059 | /* MS: Restore all regs */ |
1060 | RESTORE_REGS_RTBD |
1061 | lwi r14, r1, PT_R14; |
1062 | lwi r16, r1, PT_PC; |
1063 | addik r1, r1, PT_SIZE; /* MS: Clean up stack space */ |
1064 | tovirt(r1,r1); |
1065 | DBTRAP_return_kernel: /* MS: Make global symbol for debugging */ |
1066 | rtbd r16, 0; /* MS: Instructions to return from a debug trap */ |
1067 | nop; |
1068 | |
1069 | |
1070 | ENTRY(_switch_to) |
1071 | /* prepare return value */ |
1072 | addk r3, r0, CURRENT_TASK |
1073 | |
1074 | /* save registers in cpu_context */ |
1075 | /* use r11 and r12, volatile registers, as temp register */ |
1076 | /* give start of cpu_context for previous process */ |
1077 | addik r11, r5, TI_CPU_CONTEXT |
1078 | swi r1, r11, CC_R1 |
1079 | swi r2, r11, CC_R2 |
1080 | /* skip volatile registers. |
1081 | * they are saved on stack when we jumped to _switch_to() */ |
1082 | /* dedicated registers */ |
1083 | swi r13, r11, CC_R13 |
1084 | swi r14, r11, CC_R14 |
1085 | swi r15, r11, CC_R15 |
1086 | swi r16, r11, CC_R16 |
1087 | swi r17, r11, CC_R17 |
1088 | swi r18, r11, CC_R18 |
1089 | /* save non-volatile registers */ |
1090 | swi r19, r11, CC_R19 |
1091 | swi r20, r11, CC_R20 |
1092 | swi r21, r11, CC_R21 |
1093 | swi r22, r11, CC_R22 |
1094 | swi r23, r11, CC_R23 |
1095 | swi r24, r11, CC_R24 |
1096 | swi r25, r11, CC_R25 |
1097 | swi r26, r11, CC_R26 |
1098 | swi r27, r11, CC_R27 |
1099 | swi r28, r11, CC_R28 |
1100 | swi r29, r11, CC_R29 |
1101 | swi r30, r11, CC_R30 |
1102 | /* special purpose registers */ |
1103 | mfs r12, rmsr |
1104 | swi r12, r11, CC_MSR |
1105 | mfs r12, rear |
1106 | swi r12, r11, CC_EAR |
1107 | mfs r12, resr |
1108 | swi r12, r11, CC_ESR |
1109 | mfs r12, rfsr |
1110 | swi r12, r11, CC_FSR |
1111 | |
1112 | /* update r31, the current-give me pointer to task which will be next */ |
1113 | lwi CURRENT_TASK, r6, TI_TASK |
1114 | /* stored it to current_save too */ |
1115 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE) |
1116 | |
1117 | /* get new process' cpu context and restore */ |
1118 | /* give me start where start context of next task */ |
1119 | addik r11, r6, TI_CPU_CONTEXT |
1120 | |
1121 | /* non-volatile registers */ |
1122 | lwi r30, r11, CC_R30 |
1123 | lwi r29, r11, CC_R29 |
1124 | lwi r28, r11, CC_R28 |
1125 | lwi r27, r11, CC_R27 |
1126 | lwi r26, r11, CC_R26 |
1127 | lwi r25, r11, CC_R25 |
1128 | lwi r24, r11, CC_R24 |
1129 | lwi r23, r11, CC_R23 |
1130 | lwi r22, r11, CC_R22 |
1131 | lwi r21, r11, CC_R21 |
1132 | lwi r20, r11, CC_R20 |
1133 | lwi r19, r11, CC_R19 |
1134 | /* dedicated registers */ |
1135 | lwi r18, r11, CC_R18 |
1136 | lwi r17, r11, CC_R17 |
1137 | lwi r16, r11, CC_R16 |
1138 | lwi r15, r11, CC_R15 |
1139 | lwi r14, r11, CC_R14 |
1140 | lwi r13, r11, CC_R13 |
1141 | /* skip volatile registers */ |
1142 | lwi r2, r11, CC_R2 |
1143 | lwi r1, r11, CC_R1 |
1144 | |
1145 | /* special purpose registers */ |
1146 | lwi r12, r11, CC_FSR |
1147 | mts rfsr, r12 |
1148 | lwi r12, r11, CC_MSR |
1149 | mts rmsr, r12 |
1150 | |
1151 | rtsd r15, 8 |
1152 | nop |
1153 | |
1154 | #ifdef CONFIG_MB_MANAGER |
1155 | .global xmb_inject_err |
1156 | .section .text |
1157 | .align 2 |
1158 | .ent xmb_inject_err |
1159 | .type xmb_inject_err, @function |
1160 | xmb_inject_err: |
1161 | addik r1, r1, -PT_SIZE |
1162 | SAVE_REGS |
1163 | |
1164 | /* Switch to real mode */ |
1165 | VM_OFF; |
1166 | set_bip; |
1167 | mbar 1 |
1168 | mbar 2 |
1169 | bralid r15, XMB_INJECT_ERR_OFFSET |
1170 | nop; |
1171 | |
1172 | /* enable virtual mode */ |
1173 | set_vms; |
1174 | /* barrier for instructions and data accesses */ |
1175 | mbar 1 |
1176 | mbar 2 |
1177 | /* |
1178 | * Enable Interrupts, Virtual Protected Mode, equalize |
1179 | * initial state for all possible entries. |
1180 | */ |
1181 | rtbd r0, 1f |
1182 | nop; |
1183 | 1: |
1184 | RESTORE_REGS |
1185 | addik r1, r1, PT_SIZE |
1186 | rtsd r15, 8; |
1187 | nop; |
1188 | .end xmb_inject_err |
1189 | |
1190 | .section .data |
1191 | .global xmb_manager_dev |
1192 | .global xmb_manager_baseaddr |
1193 | .global xmb_manager_crval |
1194 | .global xmb_manager_callback |
1195 | .global xmb_manager_reset_callback |
1196 | .global xmb_manager_stackpointer |
1197 | .align 4 |
1198 | xmb_manager_dev: |
1199 | .long 0 |
1200 | xmb_manager_baseaddr: |
1201 | .long 0 |
1202 | xmb_manager_crval: |
1203 | .long 0 |
1204 | xmb_manager_callback: |
1205 | .long 0 |
1206 | xmb_manager_reset_callback: |
1207 | .long 0 |
1208 | xmb_manager_stackpointer: |
1209 | .long 0 |
1210 | |
1211 | /* |
1212 | * When the break vector gets asserted because of error injection, |
1213 | * the break signal must be blocked before exiting from the |
1214 | * break handler, Below api updates the manager address and |
1215 | * control register and error count callback arguments, |
1216 | * which will be used by the break handler to block the |
1217 | * break and call the callback function. |
1218 | */ |
1219 | .global xmb_manager_register |
1220 | .section .text |
1221 | .align 2 |
1222 | .ent xmb_manager_register |
1223 | .type xmb_manager_register, @function |
1224 | xmb_manager_register: |
1225 | swi r5, r0, xmb_manager_baseaddr |
1226 | swi r6, r0, xmb_manager_crval |
1227 | swi r7, r0, xmb_manager_callback |
1228 | swi r8, r0, xmb_manager_dev |
1229 | swi r9, r0, xmb_manager_reset_callback |
1230 | |
1231 | rtsd r15, 8; |
1232 | nop; |
1233 | .end xmb_manager_register |
1234 | #endif |
1235 | |
1236 | ENTRY(_reset) |
1237 | VM_OFF |
1238 | brai 0; /* Jump to reset vector */ |
1239 | |
1240 | /* These are compiled and loaded into high memory, then |
1241 | * copied into place in mach_early_setup */ |
1242 | .section .init.ivt, "ax" |
1243 | #if CONFIG_MANUAL_RESET_VECTOR && !defined(CONFIG_MB_MANAGER) |
1244 | .org 0x0 |
1245 | brai CONFIG_MANUAL_RESET_VECTOR |
1246 | #elif defined(CONFIG_MB_MANAGER) |
1247 | .org 0x0 |
1248 | brai TOPHYS(_xtmr_manager_reset); |
1249 | #endif |
1250 | .org 0x8 |
1251 | brai TOPHYS(_user_exception); /* syscall handler */ |
1252 | .org 0x10 |
1253 | brai TOPHYS(_interrupt); /* Interrupt handler */ |
1254 | #ifdef CONFIG_MB_MANAGER |
1255 | .org 0x18 |
1256 | brai TOPHYS(_xmb_manager_break); /* microblaze manager break handler */ |
1257 | #else |
1258 | .org 0x18 |
1259 | brai TOPHYS(_debug_exception); /* debug trap handler */ |
1260 | #endif |
1261 | .org 0x20 |
1262 | brai TOPHYS(_hw_exception_handler); /* HW exception handler */ |
1263 | |
1264 | #ifdef CONFIG_MB_MANAGER |
1265 | /* |
1266 | * For TMR Inject API which injects the error should |
1267 | * be executed from LMB. |
1268 | * TMR Inject is programmed with address of 0x200 so that |
1269 | * when program counter matches with this address error will |
1270 | * be injected. 0x200 is expected to be next available bram |
1271 | * offset, hence used for this api. |
1272 | */ |
1273 | .org XMB_INJECT_ERR_OFFSET |
1274 | xmb_inject_error: |
1275 | nop |
1276 | rtsd r15, 8 |
1277 | nop |
1278 | #endif |
1279 | |
1280 | .section .rodata,"a" |
1281 | #include "syscall_table.S" |
1282 | |
1283 | syscall_table_size=(.-sys_call_table) |
1284 | |
1285 | type_SYSCALL: |
1286 | .ascii "SYSCALL\0" |
1287 | type_IRQ: |
1288 | .ascii "IRQ\0" |
1289 | type_IRQ_PREEMPT: |
1290 | .ascii "IRQ (PREEMPTED)\0" |
1291 | type_SYSCALL_PREEMPT: |
1292 | .ascii " SYSCALL (PREEMPTED)\0" |
1293 | |
1294 | /* |
1295 | * Trap decoding for stack unwinder |
1296 | * Tuples are (start addr, end addr, string) |
1297 | * If return address lies on [start addr, end addr], |
1298 | * unwinder displays 'string' |
1299 | */ |
1300 | |
1301 | .align 4 |
1302 | .global microblaze_trap_handlers |
1303 | microblaze_trap_handlers: |
1304 | /* Exact matches come first */ |
1305 | .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL |
1306 | .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ |
1307 | /* Fuzzy matches go here */ |
1308 | .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT |
1309 | .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT |
1310 | /* End of table */ |
1311 | .word 0 ; .word 0 ; .word 0 |
1312 | |