1/* Function log1pf vectorized with SSE4.
2 Copyright (C) 2021-2024 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 https://www.gnu.org/licenses/. */
18
19/*
20 * ALGORITHM DESCRIPTION:
21 *
22 * 1+x = 2^k*(xh + xl) is computed in high-low parts; xh in [1, 2)
23 * Get short reciprocal approximation Rcp ~ 1/xh
24 * R = (Rcp*xh - 1.0) + Rcp*xl
25 * log1p(x) = k*log(2.0) - log(Rcp) + poly(R)
26 * log(Rcp) is tabulated
27 *
28 *
29 */
30
31/* Offsets for data table __svml_slog1p_data_internal
32 */
33#define SgnMask 0
34#define sOne 16
35#define sPoly 32
36#define iHiDelta 160
37#define iLoRange 176
38#define iBrkValue 192
39#define iOffExpoMask 208
40#define sLn2 224
41
42#include <sysdep.h>
43
44 .section .text.sse4, "ax", @progbits
45ENTRY(_ZGVbN4v_log1pf_sse4)
46 subq $72, %rsp
47 cfi_def_cfa_offset(80)
48 movups sOne+__svml_slog1p_data_internal(%rip), %xmm7
49
50 /* compute 1+x as high, low parts */
51 movaps %xmm7, %xmm1
52 movaps %xmm7, %xmm5
53 maxps %xmm0, %xmm1
54 minps %xmm0, %xmm5
55 movaps %xmm1, %xmm4
56
57 /* check argument value ranges */
58 movdqu iHiDelta+__svml_slog1p_data_internal(%rip), %xmm2
59 addps %xmm5, %xmm4
60
61 /* reduction: compute r, n */
62 movdqu iBrkValue+__svml_slog1p_data_internal(%rip), %xmm3
63 paddd %xmm4, %xmm2
64 movdqu iOffExpoMask+__svml_slog1p_data_internal(%rip), %xmm8
65 subps %xmm4, %xmm1
66 psubd %xmm3, %xmm4
67 addps %xmm1, %xmm5
68 pand %xmm4, %xmm8
69 psrad $23, %xmm4
70 cvtdq2ps %xmm4, %xmm10
71 pslld $23, %xmm4
72 movaps %xmm7, %xmm1
73 paddd %xmm3, %xmm8
74 psubd %xmm4, %xmm1
75 mulps %xmm5, %xmm1
76
77 /* polynomial evaluation */
78 subps %xmm7, %xmm8
79
80 /* final reconstruction */
81 mulps sLn2+__svml_slog1p_data_internal(%rip), %xmm10
82 addps %xmm8, %xmm1
83 movups sPoly+112+__svml_slog1p_data_internal(%rip), %xmm9
84 mulps %xmm1, %xmm9
85 movdqu iLoRange+__svml_slog1p_data_internal(%rip), %xmm6
86 pcmpgtd %xmm2, %xmm6
87 addps sPoly+96+__svml_slog1p_data_internal(%rip), %xmm9
88
89 /* combine and get argument value range mask */
90 movmskps %xmm6, %edx
91 movups SgnMask+__svml_slog1p_data_internal(%rip), %xmm11
92 mulps %xmm1, %xmm9
93 andnps %xmm0, %xmm11
94 addps sPoly+80+__svml_slog1p_data_internal(%rip), %xmm9
95 mulps %xmm1, %xmm9
96 addps sPoly+64+__svml_slog1p_data_internal(%rip), %xmm9
97 mulps %xmm1, %xmm9
98 addps sPoly+48+__svml_slog1p_data_internal(%rip), %xmm9
99 mulps %xmm1, %xmm9
100 addps sPoly+32+__svml_slog1p_data_internal(%rip), %xmm9
101 mulps %xmm1, %xmm9
102 addps sPoly+16+__svml_slog1p_data_internal(%rip), %xmm9
103 mulps %xmm1, %xmm9
104 addps sPoly+__svml_slog1p_data_internal(%rip), %xmm9
105 mulps %xmm1, %xmm9
106 mulps %xmm1, %xmm9
107 addps %xmm9, %xmm1
108 addps %xmm10, %xmm1
109 orps %xmm11, %xmm1
110 testl %edx, %edx
111
112 /* Go to special inputs processing branch */
113 jne L(SPECIAL_VALUES_BRANCH)
114 # LOE rbx rbp r12 r13 r14 r15 edx xmm0 xmm1
115
116 /* Restore registers
117 * and exit the function
118 */
119
120L(EXIT):
121 movaps %xmm1, %xmm0
122 addq $72, %rsp
123 cfi_def_cfa_offset(8)
124 ret
125 cfi_def_cfa_offset(80)
126
127 /* Branch to process
128 * special inputs
129 */
130
131L(SPECIAL_VALUES_BRANCH):
132 movups %xmm0, 32(%rsp)
133 movups %xmm1, 48(%rsp)
134 # LOE rbx rbp r12 r13 r14 r15 edx
135
136 xorl %eax, %eax
137 movq %r12, 16(%rsp)
138 cfi_offset(12, -64)
139 movl %eax, %r12d
140 movq %r13, 8(%rsp)
141 cfi_offset(13, -72)
142 movl %edx, %r13d
143 movq %r14, (%rsp)
144 cfi_offset(14, -80)
145 # LOE rbx rbp r15 r12d r13d
146
147 /* Range mask
148 * bits check
149 */
150
151L(RANGEMASK_CHECK):
152 btl %r12d, %r13d
153
154 /* Call scalar math function */
155 jc L(SCALAR_MATH_CALL)
156 # LOE rbx rbp r15 r12d r13d
157
158 /* Special inputs
159 * processing loop
160 */
161
162L(SPECIAL_VALUES_LOOP):
163 incl %r12d
164 cmpl $4, %r12d
165
166 /* Check bits in range mask */
167 jl L(RANGEMASK_CHECK)
168 # LOE rbx rbp r15 r12d r13d
169
170 movq 16(%rsp), %r12
171 cfi_restore(12)
172 movq 8(%rsp), %r13
173 cfi_restore(13)
174 movq (%rsp), %r14
175 cfi_restore(14)
176 movups 48(%rsp), %xmm1
177
178 /* Go to exit */
179 jmp L(EXIT)
180 cfi_offset(12, -64)
181 cfi_offset(13, -72)
182 cfi_offset(14, -80)
183 # LOE rbx rbp r12 r13 r14 r15 xmm1
184
185 /* Scalar math function call
186 * to process special input
187 */
188
189L(SCALAR_MATH_CALL):
190 movl %r12d, %r14d
191 movss 32(%rsp, %r14, 4), %xmm0
192 call log1pf@PLT
193 # LOE rbx rbp r14 r15 r12d r13d xmm0
194
195 movss %xmm0, 48(%rsp, %r14, 4)
196
197 /* Process special inputs in loop */
198 jmp L(SPECIAL_VALUES_LOOP)
199 # LOE rbx rbp r15 r12d r13d
200END(_ZGVbN4v_log1pf_sse4)
201
202 .section .rodata, "a"
203 .align 16
204
205#ifdef __svml_slog1p_data_internal_typedef
206typedef unsigned int VUINT32;
207typedef struct {
208 __declspec(align(16)) VUINT32 SgnMask[4][1];
209 __declspec(align(16)) VUINT32 sOne[4][1];
210 __declspec(align(16)) VUINT32 sPoly[8][4][1];
211 __declspec(align(16)) VUINT32 iHiDelta[4][1];
212 __declspec(align(16)) VUINT32 iLoRange[4][1];
213 __declspec(align(16)) VUINT32 iBrkValue[4][1];
214 __declspec(align(16)) VUINT32 iOffExpoMask[4][1];
215 __declspec(align(16)) VUINT32 sLn2[4][1];
216} __svml_slog1p_data_internal;
217#endif
218__svml_slog1p_data_internal:
219 /* SgnMask */
220 .long 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff
221 /* sOne = SP 1.0 */
222 .align 16
223 .long 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000
224 /* sPoly[] = SP polynomial */
225 .align 16
226 .long 0xbf000000, 0xbf000000, 0xbf000000, 0xbf000000 /* -5.0000000000000000000000000e-01 P0 */
227 .long 0x3eaaaa94, 0x3eaaaa94, 0x3eaaaa94, 0x3eaaaa94 /* 3.3333265781402587890625000e-01 P1 */
228 .long 0xbe80058e, 0xbe80058e, 0xbe80058e, 0xbe80058e /* -2.5004237890243530273437500e-01 P2 */
229 .long 0x3e4ce190, 0x3e4ce190, 0x3e4ce190, 0x3e4ce190 /* 2.0007920265197753906250000e-01 P3 */
230 .long 0xbe28ad37, 0xbe28ad37, 0xbe28ad37, 0xbe28ad37 /* -1.6472326219081878662109375e-01 P4 */
231 .long 0x3e0fcb12, 0x3e0fcb12, 0x3e0fcb12, 0x3e0fcb12 /* 1.4042308926582336425781250e-01 P5 */
232 .long 0xbe1ad9e3, 0xbe1ad9e3, 0xbe1ad9e3, 0xbe1ad9e3 /* -1.5122179687023162841796875e-01 P6 */
233 .long 0x3e0d84ed, 0x3e0d84ed, 0x3e0d84ed, 0x3e0d84ed /* 1.3820238411426544189453125e-01 P7 */
234 /* iHiDelta = SP 80000000-7f000000 */
235 .align 16
236 .long 0x01000000, 0x01000000, 0x01000000, 0x01000000
237 /* iLoRange = SP 00800000+iHiDelta */
238 .align 16
239 .long 0x01800000, 0x01800000, 0x01800000, 0x01800000
240 /* iBrkValue = SP 2/3 */
241 .align 16
242 .long 0x3f2aaaab, 0x3f2aaaab, 0x3f2aaaab, 0x3f2aaaab
243 /* iOffExpoMask = SP significand mask */
244 .align 16
245 .long 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff
246 /* sLn2 = SP ln(2) */
247 .align 16
248 .long 0x3f317218, 0x3f317218, 0x3f317218, 0x3f317218
249 .align 16
250 .type __svml_slog1p_data_internal, @object
251 .size __svml_slog1p_data_internal, .-__svml_slog1p_data_internal
252

source code of glibc/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf4_core_sse4.S