1/* Function pow vectorized with AVX-512. KNL and SKX versions.
2 Copyright (C) 2014-2024 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19#include <sysdep.h>
20#include "svml_d_pow_data.h"
21#include "svml_d_wrapper_impl.h"
22
23/* ALGORITHM DESCRIPTION:
24
25 1) Calculating log2|x|
26 Here we use the following formula.
27 Let |x|=2^k1*X1, where k1 is integer, 1<=X1<2.
28 Let C ~= 1/ln(2),
29 Rcp1 ~= 1/X1, X2=Rcp1*X1,
30 Rcp2 ~= 1/X2, X3=Rcp2*X2,
31 Rcp3 ~= 1/X3, Rcp3C ~= C/X3.
32 Then
33 log2|x| = k1 + log2(1/Rcp1) + log2(1/Rcp2) + log2(C/Rcp3C) +
34 log2(X1*Rcp1*Rcp2*Rcp3C/C),
35 where X1*Rcp1*Rcp2*Rcp3C = C*(1+q), q is very small.
36
37 The values of Rcp1, log2(1/Rcp1), Rcp2, log2(1/Rcp2),
38 Rcp3C, log2(C/Rcp3C) are taken from tables.
39 Values of Rcp1, Rcp2, Rcp3C are such that RcpC=Rcp1*Rcp2*Rcp3C
40 is exactly represented in target precision.
41
42 log2(X1*Rcp1*Rcp2*Rcp3C/C) = log2(1+q) = ln(1+q)/ln2 =
43 = 1/(ln2)*q - 1/(2ln2)*q^2 + 1/(3ln2)*q^3 - ... =
44 = 1/(C*ln2)*cq - 1/(2*C^2*ln2)*cq^2 + 1/(3*C^3*ln2)*cq^3 - ... =
45 = (1 + a1)*cq + a2*cq^2 + a3*cq^3 + ...,
46 where cq = X1*Rcp1*Rcp2*Rcp3C-C,
47 a1=1/(C*ln(2))-1 is small,
48 a2=1/(2*C^2*ln2),
49 a3=1/(3*C^3*ln2),
50 ...
51 We get 3 parts of log2 result: HH+HL+HLL ~= log2|x|.
52
53 2) Calculation of y*(HH+HL+HLL).
54 Split y into YHi+YLo.
55 Get high PH and medium PL parts of y*log2|x|.
56 Get low PLL part of y*log2|x|.
57 Now we have PH+PL+PLL ~= y*log2|x|.
58
59 3) Calculation of 2^(PH+PL+PLL).
60 Mathematical idea of computing 2^(PH+PL+PLL) is the following.
61 Let's represent PH+PL+PLL in the form N + j/2^expK + Z,
62 where expK=7 in this implementation, N and j are integers,
63 0<=j<=2^expK-1, |Z|<2^(-expK-1).
64 Hence 2^(PH+PL+PLL) ~= 2^N * 2^(j/2^expK) * 2^Z,
65 where 2^(j/2^expK) is stored in a table, and
66 2^Z ~= 1 + B1*Z + B2*Z^2 ... + B5*Z^5.
67
68 We compute 2^(PH+PL+PLL) as follows.
69 Break PH into PHH + PHL, where PHH = N + j/2^expK.
70 Z = PHL + PL + PLL
71 Exp2Poly = B1*Z + B2*Z^2 ... + B5*Z^5
72 Get 2^(j/2^expK) from table in the form THI+TLO.
73 Now we have 2^(PH+PL+PLL) ~= 2^N * (THI + TLO) * (1 + Exp2Poly).
74
75 Get significand of 2^(PH+PL+PLL) in the form ResHi+ResLo:
76 ResHi := THI
77 ResLo := THI * Exp2Poly + TLO
78
79 Get exponent ERes of the result:
80 Res := ResHi + ResLo:
81 Result := ex(Res) + N. */
82
83 .section .text.evex512, "ax", @progbits
84ENTRY (_ZGVeN8vv_pow_knl)
85 pushq %rbp
86 cfi_adjust_cfa_offset (8)
87 cfi_rel_offset (%rbp, 0)
88 movq %rsp, %rbp
89 cfi_def_cfa_register (%rbp)
90 andq $-64, %rsp
91 subq $1344, %rsp
92 vpsrlq $32, %zmm0, %zmm13
93 vmovaps %zmm1, %zmm12
94 movq __svml_dpow_data@GOTPCREL(%rip), %rax
95 movl $255, %edx
96 vpmovqd %zmm13, %ymm10
97 vpsrlq $32, %zmm12, %zmm14
98 kmovw %edx, %k1
99 movl $-1, %ecx
100 vpmovqd %zmm14, %ymm15
101
102/* x1 = x; Hi(x1) = (Hi(x1)&0x000fffff)|0x3ff00000 */
103 vmovups _dbOne(%rax), %zmm6
104
105/* i = (((Hi(x) & 0x000ffe00) + 0x00000200) >> 10); -> i = (b1..b11 + 1) / 2 */
106 vmovaps %zmm10, %zmm5
107
108/* k = Hi(x); k = k - 0x3fe7fe00; k = k >> 20 */
109 vpsubd _i3fe7fe00(%rax), %zmm10, %zmm14{%k1}
110 vpandd _iIndexMask(%rax), %zmm10, %zmm5{%k1}
111 vpsrad $20, %zmm14, %zmm14{%k1}
112 vpxord %zmm9, %zmm9, %zmm9
113 vpaddd _HIDELTA(%rax), %zmm10, %zmm3{%k1}
114 vpaddd _iIndexAdd(%rax), %zmm5, %zmm5{%k1}
115 vpxord %zmm7, %zmm7, %zmm7
116 vpaddd _i2p20_2p19(%rax), %zmm14, %zmm14{%k1}
117 vpcmpd $1, _LORANGE(%rax), %zmm3, %k2{%k1}
118 vpsrld $10, %zmm5, %zmm5{%k1}
119 vpandd _ABSMASK(%rax), %zmm15, %zmm2{%k1}
120 vpbroadcastd %ecx, %zmm1{%k2}{z}
121
122/* Index for reciprocal table */
123 vpslld $3, %zmm5, %zmm8{%k1}
124 kxnorw %k2, %k2, %k2
125 vgatherdpd 11712(%rax,%ymm8), %zmm9{%k2}
126 vpmovzxdq %ymm14, %zmm10
127
128/* Index for log2 table */
129 vpslld $4, %zmm5, %zmm13{%k1}
130 kxnorw %k2, %k2, %k2
131 vpsllq $32, %zmm10, %zmm3
132 vpxord %zmm8, %zmm8, %zmm8
133 vpcmpd $5, _INF(%rax), %zmm2, %k3{%k1}
134 vpbroadcastd %ecx, %zmm4{%k3}{z}
135 vpternlogq $248, _iMantissaMask(%rax), %zmm0, %zmm6
136 kxnorw %k3, %k3, %k3
137 vpternlogq $168, _iffffffff00000000(%rax), %zmm10, %zmm3
138
139/* x1Hi=x1; Lo(x1Hi)&=0xf8000000; x1Lo = x1-x1Hi */
140 vpandq _iHighMask(%rax), %zmm6, %zmm2
141 vgatherdpd 19976(%rax,%ymm13), %zmm8{%k2}
142 vpord %zmm4, %zmm1, %zmm11{%k1}
143 vsubpd _db2p20_2p19(%rax), %zmm3, %zmm1
144 vsubpd %zmm2, %zmm6, %zmm5
145
146/* r1 = x1*rcp1 */
147 vmulpd %zmm9, %zmm6, %zmm6
148 vgatherdpd 19968(%rax,%ymm13), %zmm7{%k3}
149
150/* cq = c+r1 */
151 vaddpd _LHN(%rax), %zmm6, %zmm4
152
153/* E = -r1+__fence(x1Hi*rcp1) */
154 vfmsub213pd %zmm6, %zmm9, %zmm2
155
156/* T = k + L1hi */
157 vaddpd %zmm7, %zmm1, %zmm7
158
159/* E=E+x1Lo*rcp1 */
160 vfmadd213pd %zmm2, %zmm9, %zmm5
161
162/* T_Rh = T + cq */
163 vaddpd %zmm4, %zmm7, %zmm3
164
165/* Rl = T-T_Rh; -> -Rh */
166 vsubpd %zmm3, %zmm7, %zmm9
167
168/* Rl=Rl+cq */
169 vaddpd %zmm9, %zmm4, %zmm6
170
171/* T_Rh_Eh = T_Rh + E */
172 vaddpd %zmm5, %zmm3, %zmm9
173
174/* HLL = T_Rh - T_Rh_Eh; -> -Eh */
175 vsubpd %zmm9, %zmm3, %zmm2
176
177/* cq = cq + E; */
178 vaddpd %zmm5, %zmm4, %zmm4
179
180/* HLL+=E; -> El */
181 vaddpd %zmm2, %zmm5, %zmm1
182 vmovups _clv_2(%rax), %zmm5
183
184/* HLL = HLL + (((((((a7)*cq+a6)*cq+a5)*cq+a4)*cq+a3)*cq+a2)*cq+a1)*cq */
185 vfmadd213pd _clv_3(%rax), %zmm4, %zmm5
186
187/* HLL+=Rl */
188 vaddpd %zmm6, %zmm1, %zmm7
189
190/* 2^(y*(HH+HL+HLL)) starts here:
191 yH = y; Lo(yH)&=0xf8000000
192 */
193 vpandq _iHighMask(%rax), %zmm12, %zmm6
194
195/* yL = y-yH */
196 vsubpd %zmm6, %zmm12, %zmm2
197 vfmadd213pd _clv_4(%rax), %zmm4, %zmm5
198
199/* HLL+=L1lo */
200 vaddpd %zmm8, %zmm7, %zmm8
201 vfmadd213pd _clv_5(%rax), %zmm4, %zmm5
202 vfmadd213pd _clv_6(%rax), %zmm4, %zmm5
203 vfmadd213pd _clv_7(%rax), %zmm4, %zmm5
204 vfmadd213pd %zmm8, %zmm4, %zmm5
205
206/* T_Rh_Eh_HLLhi = T_Rh_Eh + HLL */
207 vaddpd %zmm5, %zmm9, %zmm13
208
209/* HLLhi = T_Rh_Eh_HLLhi - T_Rh_Eh */
210 vsubpd %zmm9, %zmm13, %zmm10
211
212/* HLL = HLL - HLLhi */
213 vsubpd %zmm10, %zmm5, %zmm3
214
215/* HH = T_Rh_Eh_HLLhi; Lo(HH)&=0xf8000000 */
216 vpandq _iHighMask(%rax), %zmm13, %zmm5
217
218/* pH = yH*HH */
219 vmulpd %zmm5, %zmm6, %zmm1
220
221/* HL = T_Rh_Eh_HLLhi-HH */
222 vsubpd %zmm5, %zmm13, %zmm4
223 vpsrlq $32, %zmm1, %zmm14
224
225/* pLL = y*HLL;
226 pHH = pH + *(double*)&db2p45_2p44
227 */
228 vaddpd _db2p45_2p44(%rax), %zmm1, %zmm10
229 vpmovqd %zmm14, %ymm15
230 vpandd _ABSMASK(%rax), %zmm15, %zmm14{%k1}
231 vpcmpd $5, _DOMAINRANGE(%rax), %zmm14, %k3{%k1}
232
233/* T1 = ((double*)exp2_tbl)[ 2*j ] */
234 vpxord %zmm14, %zmm14, %zmm14
235 vpbroadcastd %ecx, %zmm13{%k3}{z}
236 vpord %zmm13, %zmm11, %zmm11{%k1}
237 vptestmd %zmm11, %zmm11, %k0{%k1}
238
239/* pL=yL*HL+yH*HL; pL+=yL*HH */
240 vmulpd %zmm4, %zmm2, %zmm11
241 kmovw %k0, %ecx
242 vfmadd213pd %zmm11, %zmm4, %zmm6
243
244/* pHH = pHH - *(double*)&db2p45_2p44 */
245 vsubpd _db2p45_2p44(%rax), %zmm10, %zmm11
246 vpmovqd %zmm10, %ymm4
247 movzbl %cl, %ecx
248
249/* _n = Lo(pHH);
250 _n = _n & 0xffffff80;
251 _n = _n >> 7;
252 Hi(_2n) = (0x3ff+_n)<<20; Lo(_2n) = 0; -> 2^n
253 */
254 vpslld $13, %zmm4, %zmm7{%k1}
255
256/* j = Lo(pHH)&0x0000007f */
257 vpandd _jIndexMask(%rax), %zmm4, %zmm9{%k1}
258 vfmadd213pd %zmm6, %zmm5, %zmm2
259
260/* pHL = pH - pHH */
261 vsubpd %zmm11, %zmm1, %zmm1
262 vpaddd _iOne(%rax), %zmm7, %zmm7{%k1}
263
264/* t=pL+pLL; t+=pHL */
265 vfmadd231pd %zmm12, %zmm3, %zmm2
266 vpslld $4, %zmm9, %zmm9{%k1}
267 kxnorw %k1, %k1, %k1
268 vgatherdpd 36416(%rax,%ymm9), %zmm14{%k1}
269 vpmovzxdq %ymm7, %zmm8
270 vaddpd %zmm1, %zmm2, %zmm2
271 vmovups _cev_1(%rax), %zmm1
272 vpsllq $32, %zmm8, %zmm13
273 vpternlogq $168, _ifff0000000000000(%rax), %zmm8, %zmm13
274 vfmadd213pd _cev_2(%rax), %zmm2, %zmm1
275 vmulpd %zmm14, %zmm13, %zmm15
276 vfmadd213pd _cev_3(%rax), %zmm2, %zmm1
277 vmulpd %zmm2, %zmm15, %zmm3
278 vfmadd213pd _cev_4(%rax), %zmm2, %zmm1
279 vfmadd213pd _cev_5(%rax), %zmm2, %zmm1
280 vfmadd213pd %zmm15, %zmm3, %zmm1
281 testl %ecx, %ecx
282 jne .LBL_1_3
283
284.LBL_1_2:
285 cfi_remember_state
286 vmovaps %zmm1, %zmm0
287 movq %rbp, %rsp
288 cfi_def_cfa_register (%rsp)
289 popq %rbp
290 cfi_adjust_cfa_offset (-8)
291 cfi_restore (%rbp)
292 ret
293
294.LBL_1_3:
295 cfi_restore_state
296 vmovups %zmm0, 1152(%rsp)
297 vmovups %zmm12, 1216(%rsp)
298 vmovups %zmm1, 1280(%rsp)
299 je .LBL_1_2
300
301 xorb %dl, %dl
302 kmovw %k4, 1048(%rsp)
303 xorl %eax, %eax
304 kmovw %k5, 1040(%rsp)
305 kmovw %k6, 1032(%rsp)
306 kmovw %k7, 1024(%rsp)
307 vmovups %zmm16, 960(%rsp)
308 vmovups %zmm17, 896(%rsp)
309 vmovups %zmm18, 832(%rsp)
310 vmovups %zmm19, 768(%rsp)
311 vmovups %zmm20, 704(%rsp)
312 vmovups %zmm21, 640(%rsp)
313 vmovups %zmm22, 576(%rsp)
314 vmovups %zmm23, 512(%rsp)
315 vmovups %zmm24, 448(%rsp)
316 vmovups %zmm25, 384(%rsp)
317 vmovups %zmm26, 320(%rsp)
318 vmovups %zmm27, 256(%rsp)
319 vmovups %zmm28, 192(%rsp)
320 vmovups %zmm29, 128(%rsp)
321 vmovups %zmm30, 64(%rsp)
322 vmovups %zmm31, (%rsp)
323 movq %rsi, 1064(%rsp)
324 movq %rdi, 1056(%rsp)
325 movq %r12, 1096(%rsp)
326 cfi_offset_rel_rsp (12, 1096)
327 movb %dl, %r12b
328 movq %r13, 1088(%rsp)
329 cfi_offset_rel_rsp (13, 1088)
330 movl %ecx, %r13d
331 movq %r14, 1080(%rsp)
332 cfi_offset_rel_rsp (14, 1080)
333 movl %eax, %r14d
334 movq %r15, 1072(%rsp)
335 cfi_offset_rel_rsp (15, 1072)
336 cfi_remember_state
337
338.LBL_1_6:
339 btl %r14d, %r13d
340 jc .LBL_1_12
341
342.LBL_1_7:
343 lea 1(%r14), %esi
344 btl %esi, %r13d
345 jc .LBL_1_10
346
347.LBL_1_8:
348 addb $1, %r12b
349 addl $2, %r14d
350 cmpb $16, %r12b
351 jb .LBL_1_6
352
353 kmovw 1048(%rsp), %k4
354 movq 1064(%rsp), %rsi
355 kmovw 1040(%rsp), %k5
356 movq 1056(%rsp), %rdi
357 kmovw 1032(%rsp), %k6
358 movq 1096(%rsp), %r12
359 cfi_restore (%r12)
360 movq 1088(%rsp), %r13
361 cfi_restore (%r13)
362 kmovw 1024(%rsp), %k7
363 vmovups 960(%rsp), %zmm16
364 vmovups 896(%rsp), %zmm17
365 vmovups 832(%rsp), %zmm18
366 vmovups 768(%rsp), %zmm19
367 vmovups 704(%rsp), %zmm20
368 vmovups 640(%rsp), %zmm21
369 vmovups 576(%rsp), %zmm22
370 vmovups 512(%rsp), %zmm23
371 vmovups 448(%rsp), %zmm24
372 vmovups 384(%rsp), %zmm25
373 vmovups 320(%rsp), %zmm26
374 vmovups 256(%rsp), %zmm27
375 vmovups 192(%rsp), %zmm28
376 vmovups 128(%rsp), %zmm29
377 vmovups 64(%rsp), %zmm30
378 vmovups (%rsp), %zmm31
379 movq 1080(%rsp), %r14
380 cfi_restore (%r14)
381 movq 1072(%rsp), %r15
382 cfi_restore (%r15)
383 vmovups 1280(%rsp), %zmm1
384 jmp .LBL_1_2
385
386.LBL_1_10:
387 cfi_restore_state
388 movzbl %r12b, %r15d
389 shlq $4, %r15
390 vmovsd 1160(%rsp,%r15), %xmm0
391 vmovsd 1224(%rsp,%r15), %xmm1
392 call JUMPTARGET(pow)
393 vmovsd %xmm0, 1288(%rsp,%r15)
394 jmp .LBL_1_8
395
396.LBL_1_12:
397 movzbl %r12b, %r15d
398 shlq $4, %r15
399 vmovsd 1152(%rsp,%r15), %xmm0
400 vmovsd 1216(%rsp,%r15), %xmm1
401 call JUMPTARGET(pow)
402 vmovsd %xmm0, 1280(%rsp,%r15)
403 jmp .LBL_1_7
404
405END (_ZGVeN8vv_pow_knl)
406
407ENTRY (_ZGVeN8vv_pow_skx)
408 pushq %rbp
409 cfi_adjust_cfa_offset (8)
410 cfi_rel_offset (%rbp, 0)
411 movq %rsp, %rbp
412 cfi_def_cfa_register (%rbp)
413 andq $-64, %rsp
414 subq $1344, %rsp
415 vpsrlq $32, %zmm0, %zmm10
416 kxnorw %k1, %k1, %k1
417 kxnorw %k2, %k2, %k2
418 kxnorw %k3, %k3, %k3
419 vpmovqd %zmm10, %ymm7
420 movq __svml_dpow_data@GOTPCREL(%rip), %rax
421 vmovaps %zmm1, %zmm6
422 vpsrlq $32, %zmm6, %zmm13
423
424/* i = (((Hi(x) & 0x000ffe00) + 0x00000200) >> 10); -> i = (b1..b11 + 1) / 2 */
425 vpand _iIndexMask(%rax), %ymm7, %ymm15
426 vpaddd _HIDELTA(%rax), %ymm7, %ymm2
427
428/* k = Hi(x); k = k - 0x3fe7fe00; k = k >> 20 */
429 vpsubd _i3fe7fe00(%rax), %ymm7, %ymm7
430 vmovdqu _ABSMASK(%rax), %ymm4
431 vmovdqu _LORANGE(%rax), %ymm3
432
433/* x1 = x; Hi(x1) = (Hi(x1)&0x000fffff)|0x3ff00000 */
434 vmovups _dbOne(%rax), %zmm11
435 vmovdqu _INF(%rax), %ymm5
436 vpaddd _iIndexAdd(%rax), %ymm15, %ymm12
437 vpmovqd %zmm13, %ymm14
438 vpternlogq $248, _iMantissaMask(%rax), %zmm0, %zmm11
439 vpsrld $10, %ymm12, %ymm10
440 vpsrad $20, %ymm7, %ymm13
441
442/* Index for reciprocal table */
443 vpslld $3, %ymm10, %ymm8
444
445/* Index for log2 table */
446 vpslld $4, %ymm10, %ymm1
447 vpcmpgtd %ymm2, %ymm3, %ymm3
448 vpand %ymm4, %ymm14, %ymm2
449 vpaddd _i2p20_2p19(%rax), %ymm13, %ymm14
450 vpmovzxdq %ymm14, %zmm15
451 vpsllq $32, %zmm15, %zmm7
452 vpternlogq $168, _iffffffff00000000(%rax), %zmm15, %zmm7
453 vsubpd _db2p20_2p19(%rax), %zmm7, %zmm13
454 vpxord %zmm9, %zmm9, %zmm9
455 vgatherdpd 11712(%rax,%ymm8), %zmm9{%k1}
456
457/* T1 = ((double*)exp2_tbl)[ 2*j ] */
458 kxnorw %k1, %k1, %k1
459 vpxord %zmm12, %zmm12, %zmm12
460 vpxord %zmm8, %zmm8, %zmm8
461 vgatherdpd 19968(%rax,%ymm1), %zmm12{%k2}
462 vgatherdpd 19976(%rax,%ymm1), %zmm8{%k3}
463 vmovups _iHighMask(%rax), %zmm1
464
465/* x1Hi=x1; Lo(x1Hi)&=0xf8000000; x1Lo = x1-x1Hi */
466 vandpd %zmm1, %zmm11, %zmm10
467 vsubpd %zmm10, %zmm11, %zmm15
468
469/* r1 = x1*rcp1 */
470 vmulpd %zmm9, %zmm11, %zmm11
471
472/* E = -r1+__fence(x1Hi*rcp1) */
473 vfmsub213pd %zmm11, %zmm9, %zmm10
474
475/* cq = c+r1 */
476 vaddpd _LHN(%rax), %zmm11, %zmm14
477
478/* E=E+x1Lo*rcp1 */
479 vfmadd213pd %zmm10, %zmm9, %zmm15
480
481/* T = k + L1hi */
482 vaddpd %zmm12, %zmm13, %zmm9
483
484/* T_Rh = T + cq */
485 vaddpd %zmm14, %zmm9, %zmm11
486
487/* T_Rh_Eh = T_Rh + E */
488 vaddpd %zmm15, %zmm11, %zmm13
489
490/* Rl = T-T_Rh; -> -Rh */
491 vsubpd %zmm11, %zmm9, %zmm12
492
493/* HLL = T_Rh - T_Rh_Eh; -> -Eh */
494 vsubpd %zmm13, %zmm11, %zmm9
495
496/* Rl=Rl+cq */
497 vaddpd %zmm12, %zmm14, %zmm10
498
499/* HLL+=E; -> El */
500 vaddpd %zmm9, %zmm15, %zmm7
501
502/* HLL+=Rl */
503 vaddpd %zmm10, %zmm7, %zmm12
504
505/* 2^(y*(HH+HL+HLL)) starts here:
506 yH = y; Lo(yH)&=0xf8000000
507 */
508 vandpd %zmm1, %zmm6, %zmm7
509
510/* HLL+=L1lo */
511 vaddpd %zmm8, %zmm12, %zmm12
512
513/* cq = cq + E */
514 vaddpd %zmm15, %zmm14, %zmm8
515 vmovups _clv_2(%rax), %zmm14
516
517/* HLL = HLL + (((((((a7)*cq+a6)*cq+a5)*cq+a4)*cq+a3)*cq+a2)*cq+a1)*cq */
518 vfmadd213pd _clv_3(%rax), %zmm8, %zmm14
519 vfmadd213pd _clv_4(%rax), %zmm8, %zmm14
520 vfmadd213pd _clv_5(%rax), %zmm8, %zmm14
521 vfmadd213pd _clv_6(%rax), %zmm8, %zmm14
522 vfmadd213pd _clv_7(%rax), %zmm8, %zmm14
523 vfmadd213pd %zmm12, %zmm8, %zmm14
524
525/* yL = y-yH */
526 vsubpd %zmm7, %zmm6, %zmm8
527
528/* T_Rh_Eh_HLLhi = T_Rh_Eh + HLL */
529 vaddpd %zmm14, %zmm13, %zmm15
530
531/* HH = T_Rh_Eh_HLLhi; Lo(HH)&=0xf8000000 */
532 vandpd %zmm1, %zmm15, %zmm11
533
534/* HLLhi = T_Rh_Eh_HLLhi - T_Rh_Eh */
535 vsubpd %zmm13, %zmm15, %zmm13
536
537/* pH = yH*HH */
538 vmulpd %zmm11, %zmm7, %zmm9
539
540/* HLL = HLL - HLLhi */
541 vsubpd %zmm13, %zmm14, %zmm12
542
543/* HL = T_Rh_Eh_HLLhi-HH */
544 vsubpd %zmm11, %zmm15, %zmm10
545 vpsrlq $32, %zmm9, %zmm1
546 vmovdqu _DOMAINRANGE(%rax), %ymm13
547 vpmovqd %zmm1, %ymm1
548 vpand %ymm4, %ymm1, %ymm1
549 vpcmpgtd %ymm5, %ymm2, %ymm4
550 vpcmpeqd %ymm5, %ymm2, %ymm5
551 vpternlogd $254, %ymm5, %ymm4, %ymm3
552 vpcmpgtd %ymm13, %ymm1, %ymm2
553 vpcmpeqd %ymm13, %ymm1, %ymm4
554 vpternlogd $254, %ymm4, %ymm2, %ymm3
555
556/* pLL = y*HLL */
557 vmovups _db2p45_2p44(%rax), %zmm2
558
559/* pHH = pH + *(double*)&db2p45_2p44 */
560 vaddpd %zmm2, %zmm9, %zmm1
561 vpmovqd %zmm1, %ymm5
562
563/* j = Lo(pHH)&0x0000007f */
564 vpand _jIndexMask(%rax), %ymm5, %ymm14
565 vpslld $4, %ymm14, %ymm15
566 vmovmskps %ymm3, %ecx
567
568/* pL=yL*HL+yH*HL; pL+=yL*HH */
569 vmulpd %zmm10, %zmm8, %zmm3
570 vfmadd213pd %zmm3, %zmm10, %zmm7
571 vfmadd213pd %zmm7, %zmm11, %zmm8
572
573/* _n = Lo(pHH)
574 _n = _n & 0xffffff80
575 _n = _n >> 7
576 Hi(_2n) = (0x3ff+_n)<<20; Lo(_2n) = 0; -> 2^n
577 */
578 vpslld $13, %ymm5, %ymm7
579
580/* t=pL+pLL; t+=pHL */
581 vfmadd231pd %zmm6, %zmm12, %zmm8
582 vpaddd _iOne(%rax), %ymm7, %ymm10
583 vpmovzxdq %ymm10, %zmm11
584 vpsllq $32, %zmm11, %zmm3
585 vpternlogq $168, _ifff0000000000000(%rax), %zmm11, %zmm3
586
587/* pHH = pHH - *(double*)&db2p45_2p44 */
588 vsubpd %zmm2, %zmm1, %zmm11
589 vmovups _cev_1(%rax), %zmm2
590
591/* pHL = pH - pHH */
592 vsubpd %zmm11, %zmm9, %zmm9
593 vaddpd %zmm9, %zmm8, %zmm8
594 vfmadd213pd _cev_2(%rax), %zmm8, %zmm2
595 vfmadd213pd _cev_3(%rax), %zmm8, %zmm2
596 vfmadd213pd _cev_4(%rax), %zmm8, %zmm2
597 vfmadd213pd _cev_5(%rax), %zmm8, %zmm2
598 vpxord %zmm4, %zmm4, %zmm4
599 vgatherdpd 36416(%rax,%ymm15), %zmm4{%k1}
600 vmulpd %zmm4, %zmm3, %zmm1
601 vmulpd %zmm8, %zmm1, %zmm12
602 vfmadd213pd %zmm1, %zmm12, %zmm2
603 testl %ecx, %ecx
604 jne .LBL_2_3
605
606.LBL_2_2:
607 cfi_remember_state
608 vmovaps %zmm2, %zmm0
609 movq %rbp, %rsp
610 cfi_def_cfa_register (%rsp)
611 popq %rbp
612 cfi_adjust_cfa_offset (-8)
613 cfi_restore (%rbp)
614 ret
615
616.LBL_2_3:
617 cfi_restore_state
618 vmovups %zmm0, 1152(%rsp)
619 vmovups %zmm6, 1216(%rsp)
620 vmovups %zmm2, 1280(%rsp)
621 je .LBL_2_2
622
623 xorb %dl, %dl
624 xorl %eax, %eax
625 kmovw %k4, 1048(%rsp)
626 kmovw %k5, 1040(%rsp)
627 kmovw %k6, 1032(%rsp)
628 kmovw %k7, 1024(%rsp)
629 vmovups %zmm16, 960(%rsp)
630 vmovups %zmm17, 896(%rsp)
631 vmovups %zmm18, 832(%rsp)
632 vmovups %zmm19, 768(%rsp)
633 vmovups %zmm20, 704(%rsp)
634 vmovups %zmm21, 640(%rsp)
635 vmovups %zmm22, 576(%rsp)
636 vmovups %zmm23, 512(%rsp)
637 vmovups %zmm24, 448(%rsp)
638 vmovups %zmm25, 384(%rsp)
639 vmovups %zmm26, 320(%rsp)
640 vmovups %zmm27, 256(%rsp)
641 vmovups %zmm28, 192(%rsp)
642 vmovups %zmm29, 128(%rsp)
643 vmovups %zmm30, 64(%rsp)
644 vmovups %zmm31, (%rsp)
645 movq %rsi, 1064(%rsp)
646 movq %rdi, 1056(%rsp)
647 movq %r12, 1096(%rsp)
648 cfi_offset_rel_rsp (12, 1096)
649 movb %dl, %r12b
650 movq %r13, 1088(%rsp)
651 cfi_offset_rel_rsp (13, 1088)
652 movl %ecx, %r13d
653 movq %r14, 1080(%rsp)
654 cfi_offset_rel_rsp (14, 1080)
655 movl %eax, %r14d
656 movq %r15, 1072(%rsp)
657 cfi_offset_rel_rsp (15, 1072)
658 cfi_remember_state
659
660.LBL_2_6:
661 btl %r14d, %r13d
662 jc .LBL_2_12
663
664.LBL_2_7:
665 lea 1(%r14), %esi
666 btl %esi, %r13d
667 jc .LBL_2_10
668
669.LBL_2_8:
670 incb %r12b
671 addl $2, %r14d
672 cmpb $16, %r12b
673 jb .LBL_2_6
674
675 kmovw 1048(%rsp), %k4
676 kmovw 1040(%rsp), %k5
677 kmovw 1032(%rsp), %k6
678 kmovw 1024(%rsp), %k7
679 vmovups 960(%rsp), %zmm16
680 vmovups 896(%rsp), %zmm17
681 vmovups 832(%rsp), %zmm18
682 vmovups 768(%rsp), %zmm19
683 vmovups 704(%rsp), %zmm20
684 vmovups 640(%rsp), %zmm21
685 vmovups 576(%rsp), %zmm22
686 vmovups 512(%rsp), %zmm23
687 vmovups 448(%rsp), %zmm24
688 vmovups 384(%rsp), %zmm25
689 vmovups 320(%rsp), %zmm26
690 vmovups 256(%rsp), %zmm27
691 vmovups 192(%rsp), %zmm28
692 vmovups 128(%rsp), %zmm29
693 vmovups 64(%rsp), %zmm30
694 vmovups (%rsp), %zmm31
695 vmovups 1280(%rsp), %zmm2
696 movq 1064(%rsp), %rsi
697 movq 1056(%rsp), %rdi
698 movq 1096(%rsp), %r12
699 cfi_restore (%r12)
700 movq 1088(%rsp), %r13
701 cfi_restore (%r13)
702 movq 1080(%rsp), %r14
703 cfi_restore (%r14)
704 movq 1072(%rsp), %r15
705 cfi_restore (%r15)
706 jmp .LBL_2_2
707
708.LBL_2_10:
709 cfi_restore_state
710 movzbl %r12b, %r15d
711 shlq $4, %r15
712 vmovsd 1224(%rsp,%r15), %xmm1
713 vzeroupper
714 vmovsd 1160(%rsp,%r15), %xmm0
715
716 call JUMPTARGET(pow)
717
718 vmovsd %xmm0, 1288(%rsp,%r15)
719 jmp .LBL_2_8
720
721.LBL_2_12:
722 movzbl %r12b, %r15d
723 shlq $4, %r15
724 vmovsd 1216(%rsp,%r15), %xmm1
725 vzeroupper
726 vmovsd 1152(%rsp,%r15), %xmm0
727
728 call JUMPTARGET(pow)
729
730 vmovsd %xmm0, 1280(%rsp,%r15)
731 jmp .LBL_2_7
732
733END (_ZGVeN8vv_pow_skx)
734

source code of glibc/sysdeps/x86_64/fpu/multiarch/svml_d_pow8_core_avx512.S