1/* setjmp for PowerPC.
2 Copyright (C) 1995-2024 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19#include <sysdep.h>
20#include <pointer_guard.h>
21#include <stap-probe.h>
22#define _ASM
23#ifdef __NO_VMX__
24# include <novmxsetjmp.h>
25#else
26# include <jmpbuf-offsets.h>
27#endif
28
29 .machine "altivec"
30ENTRY (__sigsetjmp_symbol)
31
32#ifdef PTR_MANGLE
33 mr r5,r1
34 PTR_MANGLE(r5, r6)
35 stw r5,(JB_GPR1*4)(3)
36#else
37 stw r1,(JB_GPR1*4)(3)
38#endif
39 mflr r0
40 /* setjmp probe expects longjmp first argument (4@3), second argument
41 (-4@4), and target address (4@0), respectively. */
42 LIBC_PROBE (setjmp, 3, 4@3, -4@4, 4@0)
43 stw r14,((JB_GPRS+0)*4)(3)
44 stfd fp14,((JB_FPRS+0*2)*4)(3)
45#ifdef PTR_MANGLE
46 PTR_MANGLE2 (r0, r6)
47#endif
48 stw r0,(JB_LR*4)(3)
49 stw r15,((JB_GPRS+1)*4)(3)
50 stfd fp15,((JB_FPRS+1*2)*4)(3)
51 mfcr r0
52 stw r16,((JB_GPRS+2)*4)(3)
53 stfd fp16,((JB_FPRS+2*2)*4)(3)
54 stw r0,(JB_CR*4)(3)
55 stw r17,((JB_GPRS+3)*4)(3)
56 stfd fp17,((JB_FPRS+3*2)*4)(3)
57 stw r18,((JB_GPRS+4)*4)(3)
58 stfd fp18,((JB_FPRS+4*2)*4)(3)
59 stw r19,((JB_GPRS+5)*4)(3)
60 stfd fp19,((JB_FPRS+5*2)*4)(3)
61 stw r20,((JB_GPRS+6)*4)(3)
62 stfd fp20,((JB_FPRS+6*2)*4)(3)
63 stw r21,((JB_GPRS+7)*4)(3)
64 stfd fp21,((JB_FPRS+7*2)*4)(3)
65 stw r22,((JB_GPRS+8)*4)(3)
66 stfd fp22,((JB_FPRS+8*2)*4)(3)
67 stw r23,((JB_GPRS+9)*4)(3)
68 stfd fp23,((JB_FPRS+9*2)*4)(3)
69 stw r24,((JB_GPRS+10)*4)(3)
70 stfd fp24,((JB_FPRS+10*2)*4)(3)
71 stw r25,((JB_GPRS+11)*4)(3)
72 stfd fp25,((JB_FPRS+11*2)*4)(3)
73 stw r26,((JB_GPRS+12)*4)(3)
74 stfd fp26,((JB_FPRS+12*2)*4)(3)
75 stw r27,((JB_GPRS+13)*4)(3)
76 stfd fp27,((JB_FPRS+13*2)*4)(3)
77 stw r28,((JB_GPRS+14)*4)(3)
78 stfd fp28,((JB_FPRS+14*2)*4)(3)
79 stw r29,((JB_GPRS+15)*4)(3)
80 stfd fp29,((JB_FPRS+15*2)*4)(3)
81 stw r30,((JB_GPRS+16)*4)(3)
82 stfd fp30,((JB_FPRS+16*2)*4)(3)
83 stw r31,((JB_GPRS+17)*4)(3)
84 stfd fp31,((JB_FPRS+17*2)*4)(3)
85#ifndef __NO_VMX__
86# ifdef PIC
87 mflr r6
88 cfi_register(lr,r6)
89 SETUP_GOT_ACCESS(r5,got_label)
90 addis r5,r5,_GLOBAL_OFFSET_TABLE_-got_label@ha
91 addi r5,r5,_GLOBAL_OFFSET_TABLE_-got_label@l
92 mtlr r6
93 cfi_same_value (lr)
94# ifdef SHARED
95# if IS_IN (rtld)
96 /* Inside ld.so we use the local alias to avoid runtime GOT
97 relocations. */
98 lwz r5,_rtld_local_ro@got(r5)
99# else
100 lwz r5,_rtld_global_ro@got(r5)
101# endif
102 lwz r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET+LOWORD(r5)
103# else
104 lwz r5,_dl_hwcap@got(r5)
105 lwz r5,LOWORD(r5)
106# endif
107# else
108 lis r6,(_dl_hwcap+LOWORD)@ha
109 lwz r5,(_dl_hwcap+LOWORD)@l(r6)
110# endif
111 andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
112 beq L(no_vmx)
113 la r5,((JB_VRS)*4)(3)
114 andi. r6,r5,0xf
115 mfspr r0,VRSAVE
116 stw r0,((JB_VRSAVE)*4)(3)
117 addi r6,r5,16
118 beq+ L(aligned_save_vmx)
119
120 lvsr v0,0,r5
121 lvsl v1,0,r5
122 addi r6,r5,-16
123
124# define save_misaligned_vmx(savevr,prevvr,shiftvr,tmpvr,savegpr,addgpr) \
125 addi addgpr,addgpr,32; \
126 vperm tmpvr,prevvr,savevr,shiftvr; \
127 stvx tmpvr,0,savegpr
128
129 /*
130 * We have to be careful not to corrupt the data below v20 and
131 * above v31. To keep things simple we just rotate both ends in
132 * the opposite direction to our main permute so we can use
133 * the common macro.
134 */
135
136 /* load and rotate data below v20 */
137 lvx v2,0,r5
138 vperm v2,v2,v2,v1
139 save_misaligned_vmx(v20,v2,v0,v3,r5,r6)
140 save_misaligned_vmx(v21,v20,v0,v3,r6,r5)
141 save_misaligned_vmx(v22,v21,v0,v3,r5,r6)
142 save_misaligned_vmx(v23,v22,v0,v3,r6,r5)
143 save_misaligned_vmx(v24,v23,v0,v3,r5,r6)
144 save_misaligned_vmx(v25,v24,v0,v3,r6,r5)
145 save_misaligned_vmx(v26,v25,v0,v3,r5,r6)
146 save_misaligned_vmx(v27,v26,v0,v3,r6,r5)
147 save_misaligned_vmx(v28,v27,v0,v3,r5,r6)
148 save_misaligned_vmx(v29,v28,v0,v3,r6,r5)
149 save_misaligned_vmx(v30,v29,v0,v3,r5,r6)
150 save_misaligned_vmx(v31,v30,v0,v3,r6,r5)
151 /* load and rotate data above v31 */
152 lvx v2,0,r6
153 vperm v2,v2,v2,v1
154 save_misaligned_vmx(v2,v31,v0,v3,r5,r6)
155
156 b L(no_vmx)
157
158L(aligned_save_vmx):
159 stvx 20,0,r5
160 addi r5,r5,32
161 stvx 21,0,r6
162 addi r6,r6,32
163 stvx 22,0,r5
164 addi r5,r5,32
165 stvx 23,0,r6
166 addi r6,r6,32
167 stvx 24,0,r5
168 addi r5,r5,32
169 stvx 25,0,r6
170 addi r6,r6,32
171 stvx 26,0,r5
172 addi r5,r5,32
173 stvx 27,0,r6
174 addi r6,r6,32
175 stvx 28,0,r5
176 addi r5,r5,32
177 stvx 29,0,r6
178 addi r6,r6,32
179 stvx 30,0,r5
180 stvx 31,0,r6
181L(no_vmx):
182#endif
183 b __sigjmp_save_symbol@local
184END (__sigsetjmp_symbol)
185

source code of glibc/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S