1/* Function exp10f vectorized with SSE4.
2 Copyright (C) 2021-2024 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 https://www.gnu.org/licenses/. */
18
19/*
20 * ALGORITHM DESCRIPTION:
21 *
22 * exp10(x) = 2^x/log10(2) = 2^n * (1 + T[j]) * (1 + P(y))
23 * where
24 * x = m*log10(2)/K + y, y in [-log10(2)/K..log10(2)/K]
25 * m = n*K + j, m, n,j - signed integer, j in [-K/2..K/2]
26 *
27 * values of 2^j/K are tabulated
28 *
29 * P(y) is a minimax polynomial approximation of exp10(x)-1
30 * on small interval [-log10(2)/K..log10(2)/K]
31 *
32 * Special cases:
33 *
34 * exp10(NaN) = NaN
35 * exp10(+INF) = +INF
36 * exp10(-INF) = 0
37 * exp10(x) = 1 for subnormals
38 * For IEEE float
39 * if x > 38.5318412780761720 then exp10f(x) overflow
40 * if x < -45.4555282592773440 then exp10f(x) underflow
41 *
42 */
43
44/* Offsets for data table __svml_sexp10_data_internal
45 */
46#define _sT 0
47#define _sLg2_10 128
48#define _sShifter 144
49#define _sInvLg2_10hi 160
50#define _sInvLg2_10lo 176
51#define _sPC0 192
52#define _sPC1 208
53#define _sPC2 224
54#define _iIndexMask 240
55#define _iAbsMask 256
56#define _iDomainRange 272
57
58#include <sysdep.h>
59
60 .section .text.sse4, "ax", @progbits
61ENTRY(_ZGVbN4v_exp10f_sse4)
62 subq $72, %rsp
63 cfi_def_cfa_offset(80)
64 movaps %xmm0, %xmm4
65
66 /* Load argument */
67 movups _sLg2_10+__svml_sexp10_data_internal(%rip), %xmm2
68 lea __svml_sexp10_data_internal(%rip), %r8
69 mulps %xmm4, %xmm2
70 movups _sShifter+__svml_sexp10_data_internal(%rip), %xmm5
71
72 /* R */
73 movups _sInvLg2_10hi+__svml_sexp10_data_internal(%rip), %xmm14
74 addps %xmm5, %xmm2
75 movaps %xmm2, %xmm1
76 movups _sInvLg2_10lo+__svml_sexp10_data_internal(%rip), %xmm15
77 subps %xmm5, %xmm1
78 mulps %xmm1, %xmm14
79 movaps %xmm4, %xmm5
80 mulps %xmm1, %xmm15
81 subps %xmm14, %xmm5
82
83 /*
84 * Polynomial
85 * exp10 = 2^N*(Tj+Tj*poly)
86 * poly(sN) = {1+later} a0+a1*sR
87 */
88 movups _sPC2+__svml_sexp10_data_internal(%rip), %xmm1
89 subps %xmm15, %xmm5
90 mulps %xmm5, %xmm1
91 movdqu _iIndexMask+__svml_sexp10_data_internal(%rip), %xmm3
92
93 /* Index and lookup */
94 movdqa %xmm3, %xmm10
95
96 /* remove index bits */
97 pandn %xmm2, %xmm3
98 pand %xmm2, %xmm10
99
100 /* 2^N */
101 pslld $18, %xmm3
102
103 /* iIndex *= sizeof(S); */
104 pslld $2, %xmm10
105 addps _sPC1+__svml_sexp10_data_internal(%rip), %xmm1
106 movd %xmm10, %edx
107 pshufd $1, %xmm10, %xmm7
108 pshufd $2, %xmm10, %xmm9
109 pshufd $3, %xmm10, %xmm11
110 movd %xmm7, %ecx
111 movd %xmm9, %esi
112 movd %xmm11, %edi
113
114 /* Check for overflow\underflow */
115 movdqu _iAbsMask+__svml_sexp10_data_internal(%rip), %xmm6
116 pand %xmm4, %xmm6
117 mulps %xmm1, %xmm5
118 movslq %edx, %rdx
119 addps _sPC0+__svml_sexp10_data_internal(%rip), %xmm5
120 movslq %ecx, %rcx
121 movslq %esi, %rsi
122 movslq %edi, %rdi
123 movd (%r8, %rdx), %xmm0
124 movd (%r8, %rcx), %xmm8
125 movd (%r8, %rsi), %xmm13
126 movd (%r8, %rdi), %xmm12
127 punpckldq %xmm8, %xmm0
128 punpckldq %xmm12, %xmm13
129 punpcklqdq %xmm13, %xmm0
130
131 /* Tj_l+Tj_h*poly */
132 mulps %xmm0, %xmm5
133 pcmpgtd _iDomainRange+__svml_sexp10_data_internal(%rip), %xmm6
134 addps %xmm5, %xmm0
135 movmskps %xmm6, %eax
136
137 /* quick mul 2^N */
138 paddd %xmm3, %xmm0
139
140 /* Finish */
141 testl %eax, %eax
142
143 /* Go to special inputs processing branch */
144 jne L(SPECIAL_VALUES_BRANCH)
145 # LOE rbx rbp r12 r13 r14 r15 eax xmm0 xmm4
146
147 /* Restore registers
148 * and exit the function
149 */
150
151L(EXIT):
152 addq $72, %rsp
153 cfi_def_cfa_offset(8)
154 ret
155 cfi_def_cfa_offset(80)
156
157 /* Branch to process
158 * special inputs
159 */
160
161L(SPECIAL_VALUES_BRANCH):
162 movups %xmm4, 32(%rsp)
163 movups %xmm0, 48(%rsp)
164 # LOE rbx rbp r12 r13 r14 r15 eax
165
166 xorl %edx, %edx
167 movq %r12, 16(%rsp)
168 cfi_offset(12, -64)
169 movl %edx, %r12d
170 movq %r13, 8(%rsp)
171 cfi_offset(13, -72)
172 movl %eax, %r13d
173 movq %r14, (%rsp)
174 cfi_offset(14, -80)
175 # LOE rbx rbp r15 r12d r13d
176
177 /* Range mask
178 * bits check
179 */
180
181L(RANGEMASK_CHECK):
182 btl %r12d, %r13d
183
184 /* Call scalar math function */
185 jc L(SCALAR_MATH_CALL)
186 # LOE rbx rbp r15 r12d r13d
187
188 /* Special inputs
189 * processing loop
190 */
191
192L(SPECIAL_VALUES_LOOP):
193 incl %r12d
194 cmpl $4, %r12d
195
196 /* Check bits in range mask */
197 jl L(RANGEMASK_CHECK)
198 # LOE rbx rbp r15 r12d r13d
199
200 movq 16(%rsp), %r12
201 cfi_restore(12)
202 movq 8(%rsp), %r13
203 cfi_restore(13)
204 movq (%rsp), %r14
205 cfi_restore(14)
206 movups 48(%rsp), %xmm0
207
208 /* Go to exit */
209 jmp L(EXIT)
210 cfi_offset(12, -64)
211 cfi_offset(13, -72)
212 cfi_offset(14, -80)
213 # LOE rbx rbp r12 r13 r14 r15 xmm0
214
215 /* Scalar math function call
216 * to process special input
217 */
218
219L(SCALAR_MATH_CALL):
220 movl %r12d, %r14d
221 movss 32(%rsp, %r14, 4), %xmm0
222 call exp10f@PLT
223 # LOE rbx rbp r14 r15 r12d r13d xmm0
224
225 movss %xmm0, 48(%rsp, %r14, 4)
226
227 /* Process special inputs in loop */
228 jmp L(SPECIAL_VALUES_LOOP)
229 # LOE rbx rbp r15 r12d r13d
230END(_ZGVbN4v_exp10f_sse4)
231
232 .section .rodata, "a"
233 .align 16
234
235#ifdef __svml_sexp10_data_internal_typedef
236typedef unsigned int VUINT32;
237typedef struct {
238 __declspec(align(16)) VUINT32 _sT[(1<<5)][1];
239 __declspec(align(16)) VUINT32 _sLg2_10[4][1];
240 __declspec(align(16)) VUINT32 _sShifter[4][1];
241 __declspec(align(16)) VUINT32 _sInvLg2_10hi[4][1];
242 __declspec(align(16)) VUINT32 _sInvLg2_10lo[4][1];
243 __declspec(align(16)) VUINT32 _sPC0[4][1];
244 __declspec(align(16)) VUINT32 _sPC1[4][1];
245 __declspec(align(16)) VUINT32 _sPC2[4][1];
246 __declspec(align(16)) VUINT32 _iIndexMask[4][1];
247 __declspec(align(16)) VUINT32 _iAbsMask[4][1];
248 __declspec(align(16)) VUINT32 _iDomainRange[4][1];
249} __svml_sexp10_data_internal;
250#endif
251__svml_sexp10_data_internal:
252 /* _sT */
253 .long 0x3f800000 // 2^( 0 /32 )
254 .long 0x3f82cd87 // 2^( 1 /32 )
255 .long 0x3f85aac3 // 2^( 2 /32 )
256 .long 0x3f88980f // 2^( 3 /32 )
257 .long 0x3f8b95c2 // 2^( 4 /32 )
258 .long 0x3f8ea43a // 2^( 5 /32 )
259 .long 0x3f91c3d3 // 2^( 6 /32 )
260 .long 0x3f94f4f0 // 2^( 7 /32 )
261 .long 0x3f9837f0 // 2^( 8 /32 )
262 .long 0x3f9b8d3a // 2^( 9 /32 )
263 .long 0x3f9ef532 // 2^( 10/32 )
264 .long 0x3fa27043 // 2^( 11/32 )
265 .long 0x3fa5fed7 // 2^( 12/32 )
266 .long 0x3fa9a15b // 2^( 13/32 )
267 .long 0x3fad583f // 2^( 14/32 )
268 .long 0x3fb123f6 // 2^( 15/32 )
269 .long 0x3fb504f3 // 2^( 16/32 )
270 .long 0x3fb8fbaf // 2^( 17/32 )
271 .long 0x3fbd08a4 // 2^( 18/32 )
272 .long 0x3fc12c4d // 2^( 19/32 )
273 .long 0x3fc5672a // 2^( 20/32 )
274 .long 0x3fc9b9be // 2^( 21/32 )
275 .long 0x3fce248c // 2^( 22/32 )
276 .long 0x3fd2a81e // 2^( 23/32 )
277 .long 0x3fd744fd // 2^( 24/32 )
278 .long 0x3fdbfbb8 // 2^( 25/32 )
279 .long 0x3fe0ccdf // 2^( 26/32 )
280 .long 0x3fe5b907 // 2^( 27/32 )
281 .long 0x3feac0c7 // 2^( 28/32 )
282 .long 0x3fefe4ba // 2^( 29/32 )
283 .long 0x3ff5257d // 2^( 30/32 )
284 .long 0x3ffa83b3 // 2^( 31/32 )
285 .align 16
286 .long 0x42d49a78, 0x42d49a78, 0x42d49a78, 0x42d49a78 /* _sLg2_10*2^K */
287 .align 16
288 .long 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000 /* _sShifter) */
289 .align 16
290 .long 0x3c1a2000, 0x3c1a2000, 0x3c1a2000, 0x3c1a2000 /* _sInvLg2_10hi/2^K hi (24-K-7) bits */
291 .align 16
292 .long 0x341a84fc, 0x341a84fc, 0x341a84fc, 0x341a84fc /* _sInvLg2_10lo/2^K lo bits */
293 // otherwise exp10(0) won't produce exact 1.0
294 .align 16
295 .long 0x2fecc868, 0x2fecc868, 0x2fecc868, 0x2fecc868 /* _sPC0 */
296 .align 16
297 .long 0x40135e1b, 0x40135e1b, 0x40135e1b, 0x40135e1b /* _sPC1 */
298 .align 16
299 .long 0x4029a8d2, 0x4029a8d2, 0x4029a8d2, 0x4029a8d2 /* _sPC2 */
300 .align 16
301 .long 0x0000001f, 0x0000001f, 0x0000001f, 0x0000001f /* _iIndexMask =(2^K-1) */
302 //common
303 .align 16
304 .long 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff /* _iAbsMask */
305 .align 16
306 .long 0x4217b818, 0x4217b818, 0x4217b818, 0x4217b818 /* _iDomainRange=-log10(max_denormal=0x007fffff) RZ */
307 .align 16
308 .type __svml_sexp10_data_internal, @object
309 .size __svml_sexp10_data_internal, .-__svml_sexp10_data_internal
310

source code of glibc/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f4_core_sse4.S