1/* Function powf vectorized with AVX2.
2 Copyright (C) 2014-2024 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19#include <sysdep.h>
20#include "svml_s_powf_data.h"
21
22 .section .text.avx2, "ax", @progbits
23ENTRY(_ZGVdN8vv_powf_avx2)
24/*
25 ALGORITHM DESCRIPTION:
26
27 We are using the next identity : pow(x,y) = 2^(y * log2(x)).
28
29 1) log2(x) calculation
30 Here we use the following formula.
31 Let |x|=2^k1*X1, where k1 is integer, 1<=X1<2.
32 Let C ~= 1/ln(2),
33 Rcp1 ~= 1/X1, X2=Rcp1*X1,
34 Rcp2 ~= 1/X2, X3=Rcp2*X2,
35 Rcp3 ~= 1/X3, Rcp3C ~= C/X3.
36 Then
37 log2|x| = k1 + log2(1/Rcp1) + log2(1/Rcp2) + log2(C/Rcp3C) +
38 log2(X1*Rcp1*Rcp2*Rcp3C/C),
39 where X1*Rcp1*Rcp2*Rcp3C = C*(1+q), q is very small.
40
41 The values of Rcp1, log2(1/Rcp1), Rcp2, log2(1/Rcp2),
42 Rcp3C, log2(C/Rcp3C) are taken from tables.
43 Values of Rcp1, Rcp2, Rcp3C are such that RcpC=Rcp1*Rcp2*Rcp3C
44 is exactly represented in target precision.
45
46 log2(X1*Rcp1*Rcp2*Rcp3C/C) = log2(1+q) = ln(1+q)/ln2 =
47 = 1/(ln2)*q - 1/(2ln2)*q^2 + 1/(3ln2)*q^3 - ... =
48 = 1/(C*ln2)*cq - 1/(2*C^2*ln2)*cq^2 + 1/(3*C^3*ln2)*cq^3 - ... =
49 = (1 + a1)*cq + a2*cq^2 + a3*cq^3 + ...,
50 where
51 cq=X1*Rcp1*Rcp2*Rcp3C-C,
52 a1=1/(C*ln(2))-1 is small,
53 a2=1/(2*C^2*ln2),
54 a3=1/(3*C^3*ln2),
55 ...
56 Log2 result is split by three parts: HH+HL+HLL
57
58 2) Calculation of y*log2(x)
59 Split y into YHi+YLo.
60 Get high PH and medium PL parts of y*log2|x|.
61 Get low PLL part of y*log2|x|.
62 Now we have PH+PL+PLL ~= y*log2|x|.
63
64 3) Calculation of 2^(y*log2(x))
65 Let's represent PH+PL+PLL in the form N + j/2^expK + Z,
66 where expK=7 in this implementation, N and j are integers,
67 0<=j<=2^expK-1, |Z|<2^(-expK-1). Hence
68 2^(PH+PL+PLL) ~= 2^N * 2^(j/2^expK) * 2^Z,
69 where 2^(j/2^expK) is stored in a table, and
70 2^Z ~= 1 + B1*Z + B2*Z^2 ... + B5*Z^5.
71 We compute 2^(PH+PL+PLL) as follows:
72 Break PH into PHH + PHL, where PHH = N + j/2^expK.
73 Z = PHL + PL + PLL
74 Exp2Poly = B1*Z + B2*Z^2 ... + B5*Z^5
75 Get 2^(j/2^expK) from table in the form THI+TLO.
76 Now we have 2^(PH+PL+PLL) ~= 2^N * (THI + TLO) * (1 + Exp2Poly).
77 Get significand of 2^(PH+PL+PLL) in the form ResHi+ResLo:
78 ResHi := THI
79 ResLo := THI * Exp2Poly + TLO
80 Get exponent ERes of the result:
81 Res := ResHi + ResLo:
82 Result := ex(Res) + N. */
83
84 pushq %rbp
85 cfi_adjust_cfa_offset (8)
86 cfi_rel_offset (%rbp, 0)
87 movq %rsp, %rbp
88 cfi_def_cfa_register (%rbp)
89 andq $-64, %rsp
90 subq $448, %rsp
91 lea __VPACK_ODD_ind.6357.0.1(%rip), %rcx
92 vmovups %ymm14, 320(%rsp)
93
94/* hi bits */
95 lea __VPACK_ODD_ind.6358.0.1(%rip), %rax
96 vmovups %ymm12, 256(%rsp)
97 vmovups %ymm9, 96(%rsp)
98 vmovups %ymm13, 224(%rsp)
99 vmovups %ymm15, 352(%rsp)
100 vmovups %ymm11, 384(%rsp)
101 vmovups %ymm10, 288(%rsp)
102 vmovups (%rcx), %ymm10
103 vmovups %ymm8, 160(%rsp)
104 vmovdqa %ymm1, %ymm9
105 movq __svml_spow_data@GOTPCREL(%rip), %rdx
106 vextractf128 $1, %ymm0, %xmm7
107 vcvtps2pd %xmm0, %ymm14
108 vcvtps2pd %xmm7, %ymm12
109 vpsubd _NMINNORM(%rdx), %ymm0, %ymm7
110
111/* preserve mantissa, set input exponent to 2^(-10) */
112 vandpd _ExpMask(%rdx), %ymm14, %ymm3
113 vandpd _ExpMask(%rdx), %ymm12, %ymm13
114
115/* exponent bits selection */
116 vpsrlq $20, %ymm12, %ymm12
117 vpsrlq $20, %ymm14, %ymm14
118 vextractf128 $1, %ymm9, %xmm2
119 vcvtps2pd %xmm9, %ymm1
120 vpand _ABSMASK(%rdx), %ymm9, %ymm8
121 vcvtps2pd %xmm2, %ymm6
122 vorpd _Two10(%rdx), %ymm3, %ymm2
123 vorpd _Two10(%rdx), %ymm13, %ymm3
124
125/* reciprocal approximation good to at least 11 bits */
126 vcvtpd2ps %ymm2, %xmm5
127 vcvtpd2ps %ymm3, %xmm15
128 vrcpps %xmm5, %xmm4
129 vrcpps %xmm15, %xmm11
130 vcvtps2pd %xmm4, %ymm13
131 vcvtps2pd %xmm11, %ymm4
132 vpermps %ymm12, %ymm10, %ymm11
133
134/* round reciprocal to nearest integer, will have 1+9 mantissa bits */
135 vroundpd $0, %ymm13, %ymm12
136 vpermps %ymm14, %ymm10, %ymm5
137 vroundpd $0, %ymm4, %ymm14
138 vmovupd _One(%rdx), %ymm4
139
140/* table lookup */
141 vpsrlq $40, %ymm12, %ymm10
142 vfmsub213pd %ymm4, %ymm12, %ymm2
143 vfmsub213pd %ymm4, %ymm14, %ymm3
144 vcmpgt_oqpd _Threshold(%rdx), %ymm12, %ymm12
145 vxorpd %ymm4, %ymm4, %ymm4
146 vandpd _Bias(%rdx), %ymm12, %ymm12
147
148/* biased exponent in DP format */
149 vcvtdq2pd %xmm11, %ymm13
150 vpcmpeqd %ymm11, %ymm11, %ymm11
151 vgatherqpd %ymm11, _Log2Rcp_lookup(%rdx,%ymm10), %ymm4
152 vpsrlq $40, %ymm14, %ymm10
153 vcmpgt_oqpd _Threshold(%rdx), %ymm14, %ymm14
154 vpcmpeqd %ymm11, %ymm11, %ymm11
155 vandpd _Bias(%rdx), %ymm14, %ymm14
156 vcvtdq2pd %xmm5, %ymm15
157 vxorpd %ymm5, %ymm5, %ymm5
158 vgatherqpd %ymm11, _Log2Rcp_lookup(%rdx,%ymm10), %ymm5
159 vorpd _Bias1(%rdx), %ymm12, %ymm11
160 vorpd _Bias1(%rdx), %ymm14, %ymm10
161 vsubpd %ymm11, %ymm15, %ymm11
162 vsubpd %ymm10, %ymm13, %ymm14
163 vmovupd _poly_coeff_4(%rdx), %ymm15
164 vmovupd _poly_coeff_3(%rdx), %ymm13
165 vmulpd %ymm3, %ymm3, %ymm10
166 vfmadd213pd %ymm15, %ymm3, %ymm13
167 vmovdqa %ymm15, %ymm12
168 vfmadd231pd _poly_coeff_3(%rdx), %ymm2, %ymm12
169 vmulpd %ymm2, %ymm2, %ymm15
170
171/* reconstruction */
172 vfmadd213pd %ymm3, %ymm10, %ymm13
173 vfmadd213pd %ymm2, %ymm15, %ymm12
174 vaddpd %ymm5, %ymm13, %ymm13
175 vaddpd %ymm4, %ymm12, %ymm2
176 vfmadd231pd _L2(%rdx), %ymm14, %ymm13
177 vfmadd132pd _L2(%rdx), %ymm2, %ymm11
178 vmulpd %ymm6, %ymm13, %ymm2
179 vmulpd %ymm1, %ymm11, %ymm10
180 vmulpd __dbInvLn2(%rdx), %ymm2, %ymm6
181 vmulpd __dbInvLn2(%rdx), %ymm10, %ymm15
182
183/* to round down; if dR is an integer we will get R = 1, which is ok */
184 vsubpd __dbHALF(%rdx), %ymm6, %ymm3
185 vsubpd __dbHALF(%rdx), %ymm15, %ymm1
186 vaddpd __dbShifter(%rdx), %ymm3, %ymm13
187 vaddpd __dbShifter(%rdx), %ymm1, %ymm14
188 vsubpd __dbShifter(%rdx), %ymm13, %ymm12
189 vmovups (%rax), %ymm1
190 vsubpd __dbShifter(%rdx), %ymm14, %ymm11
191
192/* [0..1) */
193 vsubpd %ymm12, %ymm6, %ymm6
194 vpermps %ymm10, %ymm1, %ymm3
195 vpermps %ymm2, %ymm1, %ymm10
196 vpcmpgtd _NMAXVAL(%rdx), %ymm7, %ymm4
197 vpcmpgtd _INF(%rdx), %ymm8, %ymm1
198 vpcmpeqd _NMAXVAL(%rdx), %ymm7, %ymm7
199 vpcmpeqd _INF(%rdx), %ymm8, %ymm8
200 vpor %ymm7, %ymm4, %ymm2
201 vpor %ymm8, %ymm1, %ymm1
202 vsubpd %ymm11, %ymm15, %ymm7
203 vinsertf128 $1, %xmm10, %ymm3, %ymm10
204 vpor %ymm1, %ymm2, %ymm3
205
206/* iAbsX = iAbsX&iAbsMask */
207 vandps __iAbsMask(%rdx), %ymm10, %ymm10
208
209/* iRangeMask = (iAbsX>iDomainRange) */
210 vpcmpgtd __iDomainRange(%rdx), %ymm10, %ymm4
211 vpor %ymm4, %ymm3, %ymm5
212 vmulpd __dbC1(%rdx), %ymm7, %ymm4
213 vmovmskps %ymm5, %ecx
214 vmulpd __dbC1(%rdx), %ymm6, %ymm5
215
216/* low K bits */
217 vandps __lbLOWKBITS(%rdx), %ymm14, %ymm6
218
219/* dpP= _dbT+lJ*T_ITEM_GRAN */
220 vxorpd %ymm7, %ymm7, %ymm7
221 vpcmpeqd %ymm1, %ymm1, %ymm1
222 vandps __lbLOWKBITS(%rdx), %ymm13, %ymm2
223 vxorpd %ymm10, %ymm10, %ymm10
224 vpcmpeqd %ymm3, %ymm3, %ymm3
225 vgatherqpd %ymm1, 13952(%rdx,%ymm6,8), %ymm7
226 vgatherqpd %ymm3, 13952(%rdx,%ymm2,8), %ymm10
227 vpsrlq $11, %ymm14, %ymm14
228 vpsrlq $11, %ymm13, %ymm13
229 vfmadd213pd %ymm7, %ymm4, %ymm7
230 vfmadd213pd %ymm10, %ymm5, %ymm10
231
232/* NB : including +/- sign for the exponent!! */
233 vpsllq $52, %ymm14, %ymm8
234 vpsllq $52, %ymm13, %ymm11
235 vpaddq %ymm8, %ymm7, %ymm12
236 vpaddq %ymm11, %ymm10, %ymm1
237 vcvtpd2ps %ymm12, %xmm15
238 vcvtpd2ps %ymm1, %xmm2
239 vinsertf128 $1, %xmm2, %ymm15, %ymm1
240 testl %ecx, %ecx
241 jne .LBL_1_3
242
243.LBL_1_2:
244 cfi_remember_state
245 vmovups 160(%rsp), %ymm8
246 vmovups 96(%rsp), %ymm9
247 vmovups 288(%rsp), %ymm10
248 vmovups 384(%rsp), %ymm11
249 vmovups 256(%rsp), %ymm12
250 vmovups 224(%rsp), %ymm13
251 vmovups 320(%rsp), %ymm14
252 vmovups 352(%rsp), %ymm15
253 vmovdqa %ymm1, %ymm0
254 movq %rbp, %rsp
255 cfi_def_cfa_register (%rsp)
256 popq %rbp
257 cfi_adjust_cfa_offset (-8)
258 cfi_restore (%rbp)
259 ret
260
261.LBL_1_3:
262 cfi_restore_state
263 vmovups %ymm0, 64(%rsp)
264 vmovups %ymm9, 128(%rsp)
265 vmovups %ymm1, 192(%rsp)
266 je .LBL_1_2
267
268 xorb %dl, %dl
269 xorl %eax, %eax
270 movq %rsi, 8(%rsp)
271 movq %rdi, (%rsp)
272 movq %r12, 40(%rsp)
273 cfi_offset_rel_rsp (12, 40)
274 movb %dl, %r12b
275 movq %r13, 32(%rsp)
276 cfi_offset_rel_rsp (13, 32)
277 movl %ecx, %r13d
278 movq %r14, 24(%rsp)
279 cfi_offset_rel_rsp (14, 24)
280 movl %eax, %r14d
281 movq %r15, 16(%rsp)
282 cfi_offset_rel_rsp (15, 16)
283 cfi_remember_state
284
285.LBL_1_6:
286 btl %r14d, %r13d
287 jc .LBL_1_12
288
289.LBL_1_7:
290 lea 1(%r14), %esi
291 btl %esi, %r13d
292 jc .LBL_1_10
293
294.LBL_1_8:
295 incb %r12b
296 addl $2, %r14d
297 cmpb $16, %r12b
298 jb .LBL_1_6
299
300 movq 8(%rsp), %rsi
301 movq (%rsp), %rdi
302 movq 40(%rsp), %r12
303 cfi_restore (%r12)
304 movq 32(%rsp), %r13
305 cfi_restore (%r13)
306 movq 24(%rsp), %r14
307 cfi_restore (%r14)
308 movq 16(%rsp), %r15
309 cfi_restore (%r15)
310 vmovups 192(%rsp), %ymm1
311 jmp .LBL_1_2
312
313.LBL_1_10:
314 cfi_restore_state
315 movzbl %r12b, %r15d
316 vmovss 68(%rsp,%r15,8), %xmm0
317 vmovss 132(%rsp,%r15,8), %xmm1
318 vzeroupper
319
320 call JUMPTARGET(powf)
321
322 vmovss %xmm0, 196(%rsp,%r15,8)
323 jmp .LBL_1_8
324
325.LBL_1_12:
326 movzbl %r12b, %r15d
327 vmovss 64(%rsp,%r15,8), %xmm0
328 vmovss 128(%rsp,%r15,8), %xmm1
329 vzeroupper
330
331 call JUMPTARGET(powf)
332
333 vmovss %xmm0, 192(%rsp,%r15,8)
334 jmp .LBL_1_7
335
336END(_ZGVdN8vv_powf_avx2)
337
338 .section .rodata, "a"
339__VPACK_ODD_ind.6357.0.1:
340 .long 1
341 .long 3
342 .long 5
343 .long 7
344 .long 0
345 .long 0
346 .long 0
347 .long 0
348 .space 32, 0x00
349__VPACK_ODD_ind.6358.0.1:
350 .long 1
351 .long 3
352 .long 5
353 .long 7
354 .long 0
355 .long 0
356 .long 0
357 .long 0
358

source code of glibc/sysdeps/x86_64/fpu/multiarch/svml_s_powf8_core_avx2.S