1/* Function cos vectorized with AVX-512, KNL and SKX versions.
2 Copyright (C) 2014-2024 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19#include <sysdep.h>
20#include "svml_d_trig_data.h"
21#include "svml_d_wrapper_impl.h"
22
23 .section .text.evex512, "ax", @progbits
24ENTRY (_ZGVeN8v_cos_knl)
25/*
26 ALGORITHM DESCRIPTION:
27
28 ( low accuracy ( < 4ulp ) or enhanced performance
29 ( half of correct mantissa ) implementation )
30
31 Argument representation:
32 arg + Pi/2 = (N*Pi + R)
33
34 Result calculation:
35 cos(arg) = sin(arg+Pi/2) = sin(N*Pi + R) = (-1)^N * sin(R)
36 sin(R) is approximated by corresponding polynomial
37 */
38 pushq %rbp
39 cfi_adjust_cfa_offset (8)
40 cfi_rel_offset (%rbp, 0)
41 movq %rsp, %rbp
42 cfi_def_cfa_register (%rbp)
43 andq $-64, %rsp
44 subq $1280, %rsp
45 movq __svml_d_trig_data@GOTPCREL(%rip), %rax
46
47/* R = X - N*Pi1 */
48 vmovaps %zmm0, %zmm7
49
50/* Check for large arguments path */
51 movq $-1, %rcx
52
53/*
54 ARGUMENT RANGE REDUCTION:
55 Add Pi/2 to argument: X' = X+Pi/2
56 */
57 vaddpd __dHalfPI(%rax), %zmm0, %zmm5
58 vmovups __dInvPI(%rax), %zmm3
59
60/* Get absolute argument value: X' = |X'| */
61 vpandq __dAbsMask(%rax), %zmm5, %zmm1
62
63/* Y = X'*InvPi + RS : right shifter add */
64 vfmadd213pd __dRShifter(%rax), %zmm3, %zmm5
65 vmovups __dPI1_FMA(%rax), %zmm6
66
67/* N = Y - RS : right shifter sub */
68 vsubpd __dRShifter(%rax), %zmm5, %zmm4
69
70/* SignRes = Y<<63 : shift LSB to MSB place for result sign */
71 vpsllq $63, %zmm5, %zmm12
72 vmovups __dC7(%rax), %zmm8
73
74/* N = N - 0.5 */
75 vsubpd __dOneHalf(%rax), %zmm4, %zmm10
76 vcmppd $22, __dRangeVal(%rax), %zmm1, %k1
77 vpbroadcastq %rcx, %zmm2{%k1}{z}
78 vfnmadd231pd %zmm10, %zmm6, %zmm7
79 vptestmq %zmm2, %zmm2, %k0
80
81/* R = R - N*Pi2 */
82 vfnmadd231pd __dPI2_FMA(%rax), %zmm10, %zmm7
83 kmovw %k0, %ecx
84 movzbl %cl, %ecx
85
86/* R = R - N*Pi3 */
87 vfnmadd132pd __dPI3_FMA(%rax), %zmm7, %zmm10
88
89/*
90 POLYNOMIAL APPROXIMATION:
91 R2 = R*R
92 */
93 vmulpd %zmm10, %zmm10, %zmm9
94 vfmadd213pd __dC6(%rax), %zmm9, %zmm8
95 vfmadd213pd __dC5(%rax), %zmm9, %zmm8
96 vfmadd213pd __dC4(%rax), %zmm9, %zmm8
97
98/* Poly = C3+R2*(C4+R2*(C5+R2*(C6+R2*C7))) */
99 vfmadd213pd __dC3(%rax), %zmm9, %zmm8
100
101/* Poly = R+R*(R2*(C1+R2*(C2+R2*Poly))) */
102 vfmadd213pd __dC2(%rax), %zmm9, %zmm8
103 vfmadd213pd __dC1(%rax), %zmm9, %zmm8
104 vmulpd %zmm9, %zmm8, %zmm11
105 vfmadd213pd %zmm10, %zmm10, %zmm11
106
107/*
108 RECONSTRUCTION:
109 Final sign setting: Res = Poly^SignRes
110 */
111 vpxorq %zmm12, %zmm11, %zmm1
112 testl %ecx, %ecx
113 jne .LBL_1_3
114
115.LBL_1_2:
116 cfi_remember_state
117 vmovaps %zmm1, %zmm0
118 movq %rbp, %rsp
119 cfi_def_cfa_register (%rsp)
120 popq %rbp
121 cfi_adjust_cfa_offset (-8)
122 cfi_restore (%rbp)
123 ret
124
125.LBL_1_3:
126 cfi_restore_state
127 vmovups %zmm0, 1152(%rsp)
128 vmovups %zmm1, 1216(%rsp)
129 je .LBL_1_2
130
131 xorb %dl, %dl
132 kmovw %k4, 1048(%rsp)
133 xorl %eax, %eax
134 kmovw %k5, 1040(%rsp)
135 kmovw %k6, 1032(%rsp)
136 kmovw %k7, 1024(%rsp)
137 vmovups %zmm16, 960(%rsp)
138 vmovups %zmm17, 896(%rsp)
139 vmovups %zmm18, 832(%rsp)
140 vmovups %zmm19, 768(%rsp)
141 vmovups %zmm20, 704(%rsp)
142 vmovups %zmm21, 640(%rsp)
143 vmovups %zmm22, 576(%rsp)
144 vmovups %zmm23, 512(%rsp)
145 vmovups %zmm24, 448(%rsp)
146 vmovups %zmm25, 384(%rsp)
147 vmovups %zmm26, 320(%rsp)
148 vmovups %zmm27, 256(%rsp)
149 vmovups %zmm28, 192(%rsp)
150 vmovups %zmm29, 128(%rsp)
151 vmovups %zmm30, 64(%rsp)
152 vmovups %zmm31, (%rsp)
153 movq %rsi, 1064(%rsp)
154 movq %rdi, 1056(%rsp)
155 movq %r12, 1096(%rsp)
156 cfi_offset_rel_rsp (12, 1096)
157 movb %dl, %r12b
158 movq %r13, 1088(%rsp)
159 cfi_offset_rel_rsp (13, 1088)
160 movl %ecx, %r13d
161 movq %r14, 1080(%rsp)
162 cfi_offset_rel_rsp (14, 1080)
163 movl %eax, %r14d
164 movq %r15, 1072(%rsp)
165 cfi_offset_rel_rsp (15, 1072)
166 cfi_remember_state
167
168.LBL_1_6:
169 btl %r14d, %r13d
170 jc .LBL_1_12
171
172.LBL_1_7:
173 lea 1(%r14), %esi
174 btl %esi, %r13d
175 jc .LBL_1_10
176
177.LBL_1_8:
178 addb $1, %r12b
179 addl $2, %r14d
180 cmpb $16, %r12b
181 jb .LBL_1_6
182
183 kmovw 1048(%rsp), %k4
184 movq 1064(%rsp), %rsi
185 kmovw 1040(%rsp), %k5
186 movq 1056(%rsp), %rdi
187 kmovw 1032(%rsp), %k6
188 movq 1096(%rsp), %r12
189 cfi_restore (%r12)
190 movq 1088(%rsp), %r13
191 cfi_restore (%r13)
192 kmovw 1024(%rsp), %k7
193 vmovups 960(%rsp), %zmm16
194 vmovups 896(%rsp), %zmm17
195 vmovups 832(%rsp), %zmm18
196 vmovups 768(%rsp), %zmm19
197 vmovups 704(%rsp), %zmm20
198 vmovups 640(%rsp), %zmm21
199 vmovups 576(%rsp), %zmm22
200 vmovups 512(%rsp), %zmm23
201 vmovups 448(%rsp), %zmm24
202 vmovups 384(%rsp), %zmm25
203 vmovups 320(%rsp), %zmm26
204 vmovups 256(%rsp), %zmm27
205 vmovups 192(%rsp), %zmm28
206 vmovups 128(%rsp), %zmm29
207 vmovups 64(%rsp), %zmm30
208 vmovups (%rsp), %zmm31
209 movq 1080(%rsp), %r14
210 cfi_restore (%r14)
211 movq 1072(%rsp), %r15
212 cfi_restore (%r15)
213 vmovups 1216(%rsp), %zmm1
214 jmp .LBL_1_2
215
216.LBL_1_10:
217 cfi_restore_state
218 movzbl %r12b, %r15d
219 shlq $4, %r15
220 vmovsd 1160(%rsp,%r15), %xmm0
221 call JUMPTARGET(cos)
222 vmovsd %xmm0, 1224(%rsp,%r15)
223 jmp .LBL_1_8
224
225.LBL_1_12:
226 movzbl %r12b, %r15d
227 shlq $4, %r15
228 vmovsd 1152(%rsp,%r15), %xmm0
229 call JUMPTARGET(cos)
230 vmovsd %xmm0, 1216(%rsp,%r15)
231 jmp .LBL_1_7
232END (_ZGVeN8v_cos_knl)
233
234ENTRY (_ZGVeN8v_cos_skx)
235/*
236 ALGORITHM DESCRIPTION:
237
238 ( low accuracy ( < 4ulp ) or enhanced performance
239 ( half of correct mantissa ) implementation )
240
241 Argument representation:
242 arg + Pi/2 = (N*Pi + R)
243
244 Result calculation:
245 cos(arg) = sin(arg+Pi/2) = sin(N*Pi + R) = (-1)^N * sin(R)
246 sin(R) is approximated by corresponding polynomial
247 */
248 pushq %rbp
249 cfi_adjust_cfa_offset (8)
250 cfi_rel_offset (%rbp, 0)
251 movq %rsp, %rbp
252 cfi_def_cfa_register (%rbp)
253 andq $-64, %rsp
254 subq $1280, %rsp
255 movq __svml_d_trig_data@GOTPCREL(%rip), %rax
256
257/* R = X - N*Pi1 */
258 vmovaps %zmm0, %zmm8
259
260/* Check for large arguments path */
261 vpternlogd $0xff, %zmm2, %zmm2, %zmm2
262
263/*
264 ARGUMENT RANGE REDUCTION:
265 Add Pi/2 to argument: X' = X+Pi/2
266 */
267 vaddpd __dHalfPI(%rax), %zmm0, %zmm6
268 vmovups __dInvPI(%rax), %zmm3
269 vmovups __dRShifter(%rax), %zmm4
270 vmovups __dPI1_FMA(%rax), %zmm7
271 vmovups __dC7(%rax), %zmm9
272
273/* Get absolute argument value: X' = |X'| */
274 vandpd __dAbsMask(%rax), %zmm6, %zmm1
275
276/* Y = X'*InvPi + RS : right shifter add */
277 vfmadd213pd %zmm4, %zmm3, %zmm6
278 vcmppd $18, __dRangeVal(%rax), %zmm1, %k1
279
280/* SignRes = Y<<63 : shift LSB to MSB place for result sign */
281 vpsllq $63, %zmm6, %zmm13
282
283/* N = Y - RS : right shifter sub */
284 vsubpd %zmm4, %zmm6, %zmm5
285
286/* N = N - 0.5 */
287 vsubpd __dOneHalf(%rax), %zmm5, %zmm11
288 vfnmadd231pd %zmm11, %zmm7, %zmm8
289
290/* R = R - N*Pi2 */
291 vfnmadd231pd __dPI2_FMA(%rax), %zmm11, %zmm8
292
293/* R = R - N*Pi3 */
294 vfnmadd132pd __dPI3_FMA(%rax), %zmm8, %zmm11
295
296/*
297 POLYNOMIAL APPROXIMATION:
298 R2 = R*R
299 */
300 vmulpd %zmm11, %zmm11, %zmm10
301 vfmadd213pd __dC6(%rax), %zmm10, %zmm9
302 vfmadd213pd __dC5(%rax), %zmm10, %zmm9
303 vfmadd213pd __dC4(%rax), %zmm10, %zmm9
304
305/* Poly = C3+R2*(C4+R2*(C5+R2*(C6+R2*C7))) */
306 vfmadd213pd __dC3(%rax), %zmm10, %zmm9
307
308/* Poly = R+R*(R2*(C1+R2*(C2+R2*Poly))) */
309 vfmadd213pd __dC2(%rax), %zmm10, %zmm9
310 vfmadd213pd __dC1(%rax), %zmm10, %zmm9
311 vmulpd %zmm10, %zmm9, %zmm12
312 vfmadd213pd %zmm11, %zmm11, %zmm12
313 vpandnq %zmm1, %zmm1, %zmm2{%k1}
314 vcmppd $3, %zmm2, %zmm2, %k0
315
316/*
317 RECONSTRUCTION:
318 Final sign setting: Res = Poly^SignRes
319 */
320 vxorpd %zmm13, %zmm12, %zmm1
321 kmovw %k0, %ecx
322 testl %ecx, %ecx
323 jne .LBL_2_3
324
325.LBL_2_2:
326 cfi_remember_state
327 vmovaps %zmm1, %zmm0
328 movq %rbp, %rsp
329 cfi_def_cfa_register (%rsp)
330 popq %rbp
331 cfi_adjust_cfa_offset (-8)
332 cfi_restore (%rbp)
333 ret
334
335.LBL_2_3:
336 cfi_restore_state
337 vmovups %zmm0, 1152(%rsp)
338 vmovups %zmm1, 1216(%rsp)
339 je .LBL_2_2
340
341 xorb %dl, %dl
342 xorl %eax, %eax
343 kmovw %k4, 1048(%rsp)
344 kmovw %k5, 1040(%rsp)
345 kmovw %k6, 1032(%rsp)
346 kmovw %k7, 1024(%rsp)
347 vmovups %zmm16, 960(%rsp)
348 vmovups %zmm17, 896(%rsp)
349 vmovups %zmm18, 832(%rsp)
350 vmovups %zmm19, 768(%rsp)
351 vmovups %zmm20, 704(%rsp)
352 vmovups %zmm21, 640(%rsp)
353 vmovups %zmm22, 576(%rsp)
354 vmovups %zmm23, 512(%rsp)
355 vmovups %zmm24, 448(%rsp)
356 vmovups %zmm25, 384(%rsp)
357 vmovups %zmm26, 320(%rsp)
358 vmovups %zmm27, 256(%rsp)
359 vmovups %zmm28, 192(%rsp)
360 vmovups %zmm29, 128(%rsp)
361 vmovups %zmm30, 64(%rsp)
362 vmovups %zmm31, (%rsp)
363 movq %rsi, 1064(%rsp)
364 movq %rdi, 1056(%rsp)
365 movq %r12, 1096(%rsp)
366 cfi_offset_rel_rsp (12, 1096)
367 movb %dl, %r12b
368 movq %r13, 1088(%rsp)
369 cfi_offset_rel_rsp (13, 1088)
370 movl %ecx, %r13d
371 movq %r14, 1080(%rsp)
372 cfi_offset_rel_rsp (14, 1080)
373 movl %eax, %r14d
374 movq %r15, 1072(%rsp)
375 cfi_offset_rel_rsp (15, 1072)
376 cfi_remember_state
377
378.LBL_2_6:
379 btl %r14d, %r13d
380 jc .LBL_2_12
381
382.LBL_2_7:
383 lea 1(%r14), %esi
384 btl %esi, %r13d
385 jc .LBL_2_10
386
387.LBL_2_8:
388 incb %r12b
389 addl $2, %r14d
390 cmpb $16, %r12b
391 jb .LBL_2_6
392
393 kmovw 1048(%rsp), %k4
394 kmovw 1040(%rsp), %k5
395 kmovw 1032(%rsp), %k6
396 kmovw 1024(%rsp), %k7
397 vmovups 960(%rsp), %zmm16
398 vmovups 896(%rsp), %zmm17
399 vmovups 832(%rsp), %zmm18
400 vmovups 768(%rsp), %zmm19
401 vmovups 704(%rsp), %zmm20
402 vmovups 640(%rsp), %zmm21
403 vmovups 576(%rsp), %zmm22
404 vmovups 512(%rsp), %zmm23
405 vmovups 448(%rsp), %zmm24
406 vmovups 384(%rsp), %zmm25
407 vmovups 320(%rsp), %zmm26
408 vmovups 256(%rsp), %zmm27
409 vmovups 192(%rsp), %zmm28
410 vmovups 128(%rsp), %zmm29
411 vmovups 64(%rsp), %zmm30
412 vmovups (%rsp), %zmm31
413 vmovups 1216(%rsp), %zmm1
414 movq 1064(%rsp), %rsi
415 movq 1056(%rsp), %rdi
416 movq 1096(%rsp), %r12
417 cfi_restore (%r12)
418 movq 1088(%rsp), %r13
419 cfi_restore (%r13)
420 movq 1080(%rsp), %r14
421 cfi_restore (%r14)
422 movq 1072(%rsp), %r15
423 cfi_restore (%r15)
424 jmp .LBL_2_2
425
426.LBL_2_10:
427 cfi_restore_state
428 movzbl %r12b, %r15d
429 shlq $4, %r15
430 vmovsd 1160(%rsp,%r15), %xmm0
431 vzeroupper
432 vmovsd 1160(%rsp,%r15), %xmm0
433
434 call JUMPTARGET(cos)
435
436 vmovsd %xmm0, 1224(%rsp,%r15)
437 jmp .LBL_2_8
438
439.LBL_2_12:
440 movzbl %r12b, %r15d
441 shlq $4, %r15
442 vmovsd 1152(%rsp,%r15), %xmm0
443 vzeroupper
444 vmovsd 1152(%rsp,%r15), %xmm0
445
446 call JUMPTARGET(cos)
447
448 vmovsd %xmm0, 1216(%rsp,%r15)
449 jmp .LBL_2_7
450END (_ZGVeN8v_cos_skx)
451

source code of glibc/sysdeps/x86_64/fpu/multiarch/svml_d_cos8_core_avx512.S