1 | /* Switch to context. |
2 | Copyright (C) 2002-2024 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <sysdep.h> |
20 | #include <rtld-global-offsets.h> |
21 | #include <shlib-compat.h> |
22 | |
23 | #define __ASSEMBLY__ |
24 | #include <asm/ptrace.h> |
25 | #include "ucontext_i.h" |
26 | #include <asm/errno.h> |
27 | |
28 | .section ".toc" ,"aw" |
29 | .LC__dl_hwcap: |
30 | #ifdef SHARED |
31 | .tc _rtld_global_ro[TC],_rtld_global_ro |
32 | #else |
33 | .tc _dl_hwcap[TC],_dl_hwcap |
34 | #endif |
35 | .section ".text" |
36 | |
37 | #if SHLIB_COMPAT (libc, GLIBC_2_3, GLIBC_2_3_4) |
38 | ENTRY(__novec_setcontext) |
39 | CALL_MCOUNT 1 |
40 | mflr r0 |
41 | std r31,-8(1) |
42 | cfi_offset(r31,-8) |
43 | std r0,FRAME_LR_SAVE(r1) |
44 | cfi_offset (lr, FRAME_LR_SAVE) |
45 | stdu r1,-128(r1) |
46 | cfi_adjust_cfa_offset (128) |
47 | mr r31,r3 |
48 | |
49 | li r5,0 |
50 | addi r4,r3,UCONTEXT_SIGMASK |
51 | li r3,SIG_SETMASK |
52 | bl JUMPTARGET(__sigprocmask) |
53 | nop |
54 | cmpdi r3,0 |
55 | bne L(nv_error_exit) |
56 | |
57 | # ifdef SHARED |
58 | /* Load _rtld-global._dl_hwcap. */ |
59 | ld r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r5) |
60 | # else |
61 | ld r5,0(r5) /* Load extern _dl_hwcap. */ |
62 | # endif |
63 | |
64 | lfd fp0,(SIGCONTEXT_FP_REGS+(32*8))(r31) |
65 | lfd fp31,(SIGCONTEXT_FP_REGS+(PT_R31*8))(r31) |
66 | lfd fp30,(SIGCONTEXT_FP_REGS+(PT_R30*8))(r31) |
67 | |
68 | # ifdef _ARCH_PWR6 |
69 | /* Use the extended four-operand version of the mtfsf insn. */ |
70 | .machine push |
71 | .machine "power6" |
72 | |
73 | mtfsf 0xff,fp0,1,0 |
74 | |
75 | .machine pop |
76 | # else |
77 | /* Availability of DFP indicates a 64-bit FPSCR. */ |
78 | andi. r6,r5,PPC_FEATURE_HAS_DFP |
79 | beq 5f |
80 | /* Use the extended four-operand version of the mtfsf insn. */ |
81 | .machine push |
82 | .machine "power6" |
83 | |
84 | mtfsf 0xff,fp0,1,0 |
85 | |
86 | .machine pop |
87 | |
88 | b 6f |
89 | /* Continue to operate on the FPSCR as if it were 32-bits. */ |
90 | 5: |
91 | mtfsf 0xff,fp0 |
92 | 6: |
93 | # endif /* _ARCH_PWR6 */ |
94 | |
95 | lfd fp29,(SIGCONTEXT_FP_REGS+(PT_R29*8))(r31) |
96 | lfd fp28,(SIGCONTEXT_FP_REGS+(PT_R28*8))(r31) |
97 | lfd fp27,(SIGCONTEXT_FP_REGS+(PT_R27*8))(r31) |
98 | lfd fp26,(SIGCONTEXT_FP_REGS+(PT_R26*8))(r31) |
99 | lfd fp25,(SIGCONTEXT_FP_REGS+(PT_R25*8))(r31) |
100 | lfd fp24,(SIGCONTEXT_FP_REGS+(PT_R24*8))(r31) |
101 | lfd fp23,(SIGCONTEXT_FP_REGS+(PT_R23*8))(r31) |
102 | lfd fp22,(SIGCONTEXT_FP_REGS+(PT_R22*8))(r31) |
103 | lfd fp21,(SIGCONTEXT_FP_REGS+(PT_R21*8))(r31) |
104 | lfd fp20,(SIGCONTEXT_FP_REGS+(PT_R20*8))(r31) |
105 | lfd fp19,(SIGCONTEXT_FP_REGS+(PT_R19*8))(r31) |
106 | lfd fp18,(SIGCONTEXT_FP_REGS+(PT_R18*8))(r31) |
107 | lfd fp17,(SIGCONTEXT_FP_REGS+(PT_R17*8))(r31) |
108 | lfd fp16,(SIGCONTEXT_FP_REGS+(PT_R16*8))(r31) |
109 | lfd fp15,(SIGCONTEXT_FP_REGS+(PT_R15*8))(r31) |
110 | lfd fp14,(SIGCONTEXT_FP_REGS+(PT_R14*8))(r31) |
111 | lfd fp13,(SIGCONTEXT_FP_REGS+(PT_R13*8))(r31) |
112 | lfd fp12,(SIGCONTEXT_FP_REGS+(PT_R12*8))(r31) |
113 | lfd fp11,(SIGCONTEXT_FP_REGS+(PT_R11*8))(r31) |
114 | lfd fp10,(SIGCONTEXT_FP_REGS+(PT_R10*8))(r31) |
115 | lfd fp9,(SIGCONTEXT_FP_REGS+(PT_R9*8))(r31) |
116 | lfd fp8,(SIGCONTEXT_FP_REGS+(PT_R8*8))(r31) |
117 | lfd fp7,(SIGCONTEXT_FP_REGS+(PT_R7*8))(r31) |
118 | lfd fp6,(SIGCONTEXT_FP_REGS+(PT_R6*8))(r31) |
119 | lfd fp5,(SIGCONTEXT_FP_REGS+(PT_R5*8))(r31) |
120 | lfd fp4,(SIGCONTEXT_FP_REGS+(PT_R4*8))(r31) |
121 | lfd fp3,(SIGCONTEXT_FP_REGS+(PT_R3*8))(r31) |
122 | lfd fp2,(SIGCONTEXT_FP_REGS+(PT_R2*8))(r31) |
123 | lfd fp1,(SIGCONTEXT_FP_REGS+(PT_R1*8))(r31) |
124 | lfd fp0,(SIGCONTEXT_FP_REGS+(PT_R0*8))(r31) |
125 | |
126 | /* End FDE now, because the unwind info would be wrong while |
127 | we're reloading registers to switch to the new context. */ |
128 | cfi_endproc |
129 | |
130 | ld r0,(SIGCONTEXT_GP_REGS+(PT_LNK*8))(r31) |
131 | ld r1,(SIGCONTEXT_GP_REGS+(PT_R1*8))(r31) |
132 | mtlr r0 |
133 | ld r2,(SIGCONTEXT_GP_REGS+(PT_R2*8))(r31) |
134 | ld r0,(SIGCONTEXT_GP_REGS+(PT_XER*8))(r31) |
135 | ld r3,(SIGCONTEXT_GP_REGS+(PT_R3*8))(r31) |
136 | mtxer r0 |
137 | ld r4,(SIGCONTEXT_GP_REGS+(PT_R4*8))(r31) |
138 | ld r0,(SIGCONTEXT_GP_REGS+(PT_CCR*8))(r31) |
139 | ld r5,(SIGCONTEXT_GP_REGS+(PT_R5*8))(r31) |
140 | mtcr r0 |
141 | ld r6,(SIGCONTEXT_GP_REGS+(PT_R6*8))(r31) |
142 | ld r7,(SIGCONTEXT_GP_REGS+(PT_R7*8))(r31) |
143 | ld r8,(SIGCONTEXT_GP_REGS+(PT_R8*8))(r31) |
144 | ld r9,(SIGCONTEXT_GP_REGS+(PT_R9*8))(r31) |
145 | ld r10,(SIGCONTEXT_GP_REGS+(PT_R10*8))(r31) |
146 | ld r11,(SIGCONTEXT_GP_REGS+(PT_R11*8))(r31) |
147 | ld r12,(SIGCONTEXT_GP_REGS+(PT_R12*8))(r31) |
148 | /* Don't reload the thread ID or TLS pointer (r13). */ |
149 | ld r14,(SIGCONTEXT_GP_REGS+(PT_R14*8))(r31) |
150 | ld r15,(SIGCONTEXT_GP_REGS+(PT_R15*8))(r31) |
151 | ld r16,(SIGCONTEXT_GP_REGS+(PT_R16*8))(r31) |
152 | ld r17,(SIGCONTEXT_GP_REGS+(PT_R17*8))(r31) |
153 | ld r18,(SIGCONTEXT_GP_REGS+(PT_R18*8))(r31) |
154 | ld r19,(SIGCONTEXT_GP_REGS+(PT_R19*8))(r31) |
155 | ld r20,(SIGCONTEXT_GP_REGS+(PT_R20*8))(r31) |
156 | ld r21,(SIGCONTEXT_GP_REGS+(PT_R21*8))(r31) |
157 | ld r22,(SIGCONTEXT_GP_REGS+(PT_R22*8))(r31) |
158 | ld r23,(SIGCONTEXT_GP_REGS+(PT_R23*8))(r31) |
159 | ld r24,(SIGCONTEXT_GP_REGS+(PT_R24*8))(r31) |
160 | ld r25,(SIGCONTEXT_GP_REGS+(PT_R25*8))(r31) |
161 | ld r26,(SIGCONTEXT_GP_REGS+(PT_R26*8))(r31) |
162 | ld r27,(SIGCONTEXT_GP_REGS+(PT_R27*8))(r31) |
163 | ld r28,(SIGCONTEXT_GP_REGS+(PT_R28*8))(r31) |
164 | ld r29,(SIGCONTEXT_GP_REGS+(PT_R29*8))(r31) |
165 | ld r30,(SIGCONTEXT_GP_REGS+(PT_R30*8))(r31) |
166 | |
167 | /* Now we branch to the "Next Instruction Pointer" from the saved |
168 | context. With the powerpc64 instruction set there is no good way to |
169 | do this (from user state) without clobbering either the LR or CTR. |
170 | The makecontext and swapcontext functions depend on the callers |
171 | LR being preserved so we use the CTR. */ |
172 | ld r0,(SIGCONTEXT_GP_REGS+(PT_NIP*8))(r31) |
173 | mtctr r0 |
174 | ld r0,(SIGCONTEXT_GP_REGS+(PT_R0*8))(r31) |
175 | ld r31,(SIGCONTEXT_GP_REGS+(PT_R31*8))(r31) |
176 | bctr |
177 | |
178 | /* Re-establish FDE for the rest of the actual setcontext routine. */ |
179 | cfi_startproc |
180 | cfi_offset (lr, FRAME_LR_SAVE) |
181 | cfi_adjust_cfa_offset (128) |
182 | |
183 | L(nv_error_exit): |
184 | ld r0,128+FRAME_LR_SAVE(r1) |
185 | addi r1,r1,128 |
186 | mtlr r0 |
187 | ld r31,-8(r1) |
188 | blr |
189 | PSEUDO_END(__novec_setcontext) |
190 | |
191 | compat_symbol (libc, __novec_setcontext, setcontext, GLIBC_2_3) |
192 | |
193 | #endif |
194 | |
195 | .section ".text" |
196 | .machine "altivec" |
197 | ENTRY(__setcontext) |
198 | CALL_MCOUNT 1 |
199 | mflr r0 |
200 | std r31,-8(1) |
201 | cfi_offset(r31,-8) |
202 | std r0,FRAME_LR_SAVE(r1) |
203 | cfi_offset (lr, FRAME_LR_SAVE) |
204 | stdu r1,-128(r1) |
205 | cfi_adjust_cfa_offset (128) |
206 | mr r31,r3 |
207 | |
208 | li r5,0 |
209 | addi r4,r3,UCONTEXT_SIGMASK |
210 | li r3,SIG_SETMASK |
211 | bl JUMPTARGET(__sigprocmask) |
212 | nop |
213 | cmpdi r3,0 |
214 | bne L(error_exit) |
215 | |
216 | ld r5,.LC__dl_hwcap@toc(r2) |
217 | ld r10,(SIGCONTEXT_V_REGS_PTR)(r31) |
218 | # ifdef SHARED |
219 | /* Load _rtld-global._dl_hwcap. */ |
220 | ld r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r5) |
221 | # else |
222 | ld r5,0(r5) /* Load extern _dl_hwcap. */ |
223 | # endif |
224 | andis. r6,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16) |
225 | beq L(has_no_vec) |
226 | |
227 | cmpdi r10,0 |
228 | beq L(has_no_vec) |
229 | lwz r0,(33*16)(r10) |
230 | |
231 | li r9,(16*32) |
232 | mtspr VRSAVE,r0 |
233 | cmpwi r0,0 |
234 | beq L(has_no_vec) |
235 | |
236 | lvx v19,r9,r10 |
237 | la r9,(16)(r10) |
238 | |
239 | lvx v0,0,r10 |
240 | lvx v1,0,r9 |
241 | addi r10,r10,32 |
242 | addi r9,r9,32 |
243 | |
244 | mtvscr v19 |
245 | lvx v2,0,r10 |
246 | lvx v3,0,r9 |
247 | addi r10,r10,32 |
248 | addi r9,r9,32 |
249 | |
250 | lvx v4,0,r10 |
251 | lvx v5,0,r9 |
252 | addi r10,r10,32 |
253 | addi r9,r9,32 |
254 | |
255 | lvx v6,0,r10 |
256 | lvx v7,0,r9 |
257 | addi r10,r10,32 |
258 | addi r9,r9,32 |
259 | |
260 | lvx v8,0,r10 |
261 | lvx v9,0,r9 |
262 | addi r10,r10,32 |
263 | addi r9,r9,32 |
264 | |
265 | lvx v10,0,r10 |
266 | lvx v11,0,r9 |
267 | addi r10,r10,32 |
268 | addi r9,r9,32 |
269 | |
270 | lvx v12,0,r10 |
271 | lvx v13,0,r9 |
272 | addi r10,r10,32 |
273 | addi r9,r9,32 |
274 | |
275 | lvx v14,0,r10 |
276 | lvx v15,0,r9 |
277 | addi r10,r10,32 |
278 | addi r9,r9,32 |
279 | |
280 | lvx v16,0,r10 |
281 | lvx v17,0,r9 |
282 | addi r10,r10,32 |
283 | addi r9,r9,32 |
284 | |
285 | lvx v18,0,r10 |
286 | lvx v19,0,r9 |
287 | addi r10,r10,32 |
288 | addi r9,r9,32 |
289 | |
290 | lvx v20,0,r10 |
291 | lvx v21,0,r9 |
292 | addi r10,r10,32 |
293 | addi r9,r9,32 |
294 | |
295 | lvx v22,0,r10 |
296 | lvx v23,0,r9 |
297 | addi r10,r10,32 |
298 | addi r9,r9,32 |
299 | |
300 | lvx v24,0,r10 |
301 | lvx v25,0,r9 |
302 | addi r10,r10,32 |
303 | addi r9,r9,32 |
304 | |
305 | lvx v26,0,r10 |
306 | lvx v27,0,r9 |
307 | addi r10,r10,32 |
308 | addi r9,r9,32 |
309 | |
310 | lvx v28,0,r10 |
311 | lvx v29,0,r9 |
312 | addi r10,r10,32 |
313 | addi r9,r9,32 |
314 | |
315 | lvx v30,0,r10 |
316 | lvx v31,0,r9 |
317 | addi r10,r10,32 |
318 | addi r9,r9,32 |
319 | |
320 | lvx v10,0,r10 |
321 | lvx v11,0,r9 |
322 | addi r10,r10,32 |
323 | addi r9,r9,32 |
324 | |
325 | L(has_no_vec): |
326 | lfd fp0,(SIGCONTEXT_FP_REGS+(32*8))(r31) |
327 | lfd fp31,(SIGCONTEXT_FP_REGS+(PT_R31*8))(r31) |
328 | lfd fp30,(SIGCONTEXT_FP_REGS+(PT_R30*8))(r31) |
329 | |
330 | # ifdef _ARCH_PWR6 |
331 | /* Use the extended four-operand version of the mtfsf insn. */ |
332 | .machine push |
333 | .machine "power6" |
334 | |
335 | mtfsf 0xff,fp0,1,0 |
336 | |
337 | .machine pop |
338 | # else |
339 | /* Availability of DFP indicates a 64-bit FPSCR. */ |
340 | andi. r6,r5,PPC_FEATURE_HAS_DFP |
341 | beq 7f |
342 | /* Use the extended four-operand version of the mtfsf insn. */ |
343 | .machine push |
344 | .machine "power6" |
345 | |
346 | mtfsf 0xff,fp0,1,0 |
347 | |
348 | .machine pop |
349 | |
350 | b 8f |
351 | /* Continue to operate on the FPSCR as if it were 32-bits. */ |
352 | 7: |
353 | mtfsf 0xff,fp0 |
354 | 8: |
355 | # endif /* _ARCH_PWR6 */ |
356 | |
357 | lfd fp29,(SIGCONTEXT_FP_REGS+(PT_R29*8))(r31) |
358 | lfd fp28,(SIGCONTEXT_FP_REGS+(PT_R28*8))(r31) |
359 | lfd fp27,(SIGCONTEXT_FP_REGS+(PT_R27*8))(r31) |
360 | lfd fp26,(SIGCONTEXT_FP_REGS+(PT_R26*8))(r31) |
361 | lfd fp25,(SIGCONTEXT_FP_REGS+(PT_R25*8))(r31) |
362 | lfd fp24,(SIGCONTEXT_FP_REGS+(PT_R24*8))(r31) |
363 | lfd fp23,(SIGCONTEXT_FP_REGS+(PT_R23*8))(r31) |
364 | lfd fp22,(SIGCONTEXT_FP_REGS+(PT_R22*8))(r31) |
365 | lfd fp21,(SIGCONTEXT_FP_REGS+(PT_R21*8))(r31) |
366 | lfd fp20,(SIGCONTEXT_FP_REGS+(PT_R20*8))(r31) |
367 | lfd fp19,(SIGCONTEXT_FP_REGS+(PT_R19*8))(r31) |
368 | lfd fp18,(SIGCONTEXT_FP_REGS+(PT_R18*8))(r31) |
369 | lfd fp17,(SIGCONTEXT_FP_REGS+(PT_R17*8))(r31) |
370 | lfd fp16,(SIGCONTEXT_FP_REGS+(PT_R16*8))(r31) |
371 | lfd fp15,(SIGCONTEXT_FP_REGS+(PT_R15*8))(r31) |
372 | lfd fp14,(SIGCONTEXT_FP_REGS+(PT_R14*8))(r31) |
373 | lfd fp13,(SIGCONTEXT_FP_REGS+(PT_R13*8))(r31) |
374 | lfd fp12,(SIGCONTEXT_FP_REGS+(PT_R12*8))(r31) |
375 | lfd fp11,(SIGCONTEXT_FP_REGS+(PT_R11*8))(r31) |
376 | lfd fp10,(SIGCONTEXT_FP_REGS+(PT_R10*8))(r31) |
377 | lfd fp9,(SIGCONTEXT_FP_REGS+(PT_R9*8))(r31) |
378 | lfd fp8,(SIGCONTEXT_FP_REGS+(PT_R8*8))(r31) |
379 | lfd fp7,(SIGCONTEXT_FP_REGS+(PT_R7*8))(r31) |
380 | lfd fp6,(SIGCONTEXT_FP_REGS+(PT_R6*8))(r31) |
381 | lfd fp5,(SIGCONTEXT_FP_REGS+(PT_R5*8))(r31) |
382 | lfd fp4,(SIGCONTEXT_FP_REGS+(PT_R4*8))(r31) |
383 | lfd fp3,(SIGCONTEXT_FP_REGS+(PT_R3*8))(r31) |
384 | lfd fp2,(SIGCONTEXT_FP_REGS+(PT_R2*8))(r31) |
385 | lfd fp1,(SIGCONTEXT_FP_REGS+(PT_R1*8))(r31) |
386 | lfd fp0,(SIGCONTEXT_FP_REGS+(PT_R0*8))(r31) |
387 | |
388 | /* End FDE now, because the unwind info would be wrong while |
389 | we're reloading registers to switch to the new context. */ |
390 | cfi_endproc |
391 | |
392 | ld r0,(SIGCONTEXT_GP_REGS+(PT_LNK*8))(r31) |
393 | ld r1,(SIGCONTEXT_GP_REGS+(PT_R1*8))(r31) |
394 | mtlr r0 |
395 | ld r2,(SIGCONTEXT_GP_REGS+(PT_R2*8))(r31) |
396 | ld r0,(SIGCONTEXT_GP_REGS+(PT_XER*8))(r31) |
397 | ld r3,(SIGCONTEXT_GP_REGS+(PT_R3*8))(r31) |
398 | mtxer r0 |
399 | ld r4,(SIGCONTEXT_GP_REGS+(PT_R4*8))(r31) |
400 | ld r0,(SIGCONTEXT_GP_REGS+(PT_CCR*8))(r31) |
401 | ld r5,(SIGCONTEXT_GP_REGS+(PT_R5*8))(r31) |
402 | ld r6,(SIGCONTEXT_GP_REGS+(PT_R6*8))(r31) |
403 | ld r7,(SIGCONTEXT_GP_REGS+(PT_R7*8))(r31) |
404 | ld r8,(SIGCONTEXT_GP_REGS+(PT_R8*8))(r31) |
405 | ld r9,(SIGCONTEXT_GP_REGS+(PT_R9*8))(r31) |
406 | mtcr r0 |
407 | ld r10,(SIGCONTEXT_GP_REGS+(PT_R10*8))(r31) |
408 | ld r11,(SIGCONTEXT_GP_REGS+(PT_R11*8))(r31) |
409 | ld r12,(SIGCONTEXT_GP_REGS+(PT_R12*8))(r31) |
410 | /* Don't reload the thread ID or TLS pointer (r13). */ |
411 | ld r14,(SIGCONTEXT_GP_REGS+(PT_R14*8))(r31) |
412 | ld r15,(SIGCONTEXT_GP_REGS+(PT_R15*8))(r31) |
413 | ld r16,(SIGCONTEXT_GP_REGS+(PT_R16*8))(r31) |
414 | ld r17,(SIGCONTEXT_GP_REGS+(PT_R17*8))(r31) |
415 | ld r18,(SIGCONTEXT_GP_REGS+(PT_R18*8))(r31) |
416 | ld r19,(SIGCONTEXT_GP_REGS+(PT_R19*8))(r31) |
417 | ld r20,(SIGCONTEXT_GP_REGS+(PT_R20*8))(r31) |
418 | ld r21,(SIGCONTEXT_GP_REGS+(PT_R21*8))(r31) |
419 | ld r22,(SIGCONTEXT_GP_REGS+(PT_R22*8))(r31) |
420 | ld r23,(SIGCONTEXT_GP_REGS+(PT_R23*8))(r31) |
421 | ld r24,(SIGCONTEXT_GP_REGS+(PT_R24*8))(r31) |
422 | ld r25,(SIGCONTEXT_GP_REGS+(PT_R25*8))(r31) |
423 | ld r26,(SIGCONTEXT_GP_REGS+(PT_R26*8))(r31) |
424 | ld r27,(SIGCONTEXT_GP_REGS+(PT_R27*8))(r31) |
425 | ld r28,(SIGCONTEXT_GP_REGS+(PT_R28*8))(r31) |
426 | ld r29,(SIGCONTEXT_GP_REGS+(PT_R29*8))(r31) |
427 | ld r30,(SIGCONTEXT_GP_REGS+(PT_R30*8))(r31) |
428 | |
429 | /* Now we branch to the "Next Instruction Pointer" from the saved |
430 | context. With the powerpc64 instruction set there is no good way to |
431 | do this (from user state) without clobbering either the LR or CTR. |
432 | The makecontext and swapcontext functions depend on the callers |
433 | LR being preserved so we use the CTR. */ |
434 | ld r0,(SIGCONTEXT_GP_REGS+(PT_NIP*8))(r31) |
435 | mtctr r0 |
436 | ld r0,(SIGCONTEXT_GP_REGS+(PT_R0*8))(r31) |
437 | ld r31,(SIGCONTEXT_GP_REGS+(PT_R31*8))(r31) |
438 | bctr |
439 | |
440 | /* Re-establish FDE for the rest of the actual setcontext routine. */ |
441 | cfi_startproc |
442 | cfi_offset (lr, FRAME_LR_SAVE) |
443 | cfi_adjust_cfa_offset (128) |
444 | |
445 | L(error_exit): |
446 | ld r0,128+FRAME_LR_SAVE(r1) |
447 | addi r1,r1,128 |
448 | mtlr r0 |
449 | ld r31,-8(r1) |
450 | blr |
451 | |
452 | PSEUDO_END(__setcontext) |
453 | |
454 | versioned_symbol (libc, __setcontext, setcontext, GLIBC_2_3_4) |
455 | |