1/* Function cbrtf vectorized with AVX2.
2 Copyright (C) 2021-2024 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 https://www.gnu.org/licenses/. */
18
19/*
20 * ALGORITHM DESCRIPTION:
21 *
22 * x=2^{3*k+j} * 1.b1 b2 ... b5 b6 ... b52
23 * Let r=(x*2^{-3k-j} - 1.b1 b2 ... b5 1)* rcp[b1 b2 ..b5],
24 * where rcp[b1 b2 .. b5]=1/(1.b1 b2 b3 b4 b5 1) in single precision
25 * cbrtf(2^j * 1. b1 b2 .. b5 1) is approximated as T[j][b1..b5]+D[j][b1..b5]
26 * (T stores the high 24 bits, D stores the low order bits)
27 * Result=2^k*T+(2^k*T*r)*P+2^k*D
28 * where P=p1+p2*r+..
29 *
30 */
31
32/* Offsets for data table __svml_scbrt_data_internal
33 */
34#define _sRcp 0
35#define _sCbrtHL 128
36#define _sP2 512
37#define _sP1 544
38#define _sMantissaMask 576
39#define _sMantissaMask1 608
40#define _sExpMask 640
41#define _sExpMask1 672
42#define _iRcpIndexMask 704
43#define _iBExpMask 736
44#define _iSignMask 768
45#define _iBias 800
46#define _iOne 832
47#define _i555 864
48#define _iAbsMask 896
49#define _iSubConst 928
50#define _iCmpConst 960
51
52#include <sysdep.h>
53
54 .section .text.avx2, "ax", @progbits
55ENTRY(_ZGVdN8v_cbrtf_avx2)
56 pushq %rbp
57 cfi_def_cfa_offset(16)
58 movq %rsp, %rbp
59 cfi_def_cfa(6, 16)
60 cfi_offset(6, -16)
61 andq $-32, %rsp
62 subq $96, %rsp
63
64 /* Load reciprocal value */
65 lea __svml_scbrt_data_internal(%rip), %rdx
66 vmovaps %ymm0, %ymm5
67
68 /*
69 * Load constants
70 * Reciprocal index calculation
71 */
72 vpsrld $16, %ymm5, %ymm3
73 vpand _iRcpIndexMask+__svml_scbrt_data_internal(%rip), %ymm3, %ymm4
74 vextractf128 $1, %ymm4, %xmm15
75 vmovd %xmm4, %eax
76 vmovd %xmm15, %r8d
77 vpextrd $1, %xmm15, %r9d
78 vpextrd $2, %xmm15, %r10d
79 vpextrd $3, %xmm15, %r11d
80 movslq %r8d, %r8
81 movslq %r9d, %r9
82 movslq %r10d, %r10
83 movslq %r11d, %r11
84 vpextrd $1, %xmm4, %ecx
85 vpextrd $2, %xmm4, %esi
86 vpextrd $3, %xmm4, %edi
87 movslq %eax, %rax
88 movslq %ecx, %rcx
89 movslq %esi, %rsi
90 movslq %edi, %rdi
91 vmovd (%rdx, %r8), %xmm13
92 vmovd (%rdx, %r9), %xmm14
93 vmovd (%rdx, %r10), %xmm1
94 vmovd (%rdx, %r11), %xmm0
95 vpunpckldq %xmm14, %xmm13, %xmm2
96 vpunpckldq %xmm0, %xmm1, %xmm13
97
98 /* Get signed biased exponent */
99 vpsrld $7, %ymm3, %ymm0
100 vmovd (%rdx, %rax), %xmm6
101 vmovd (%rdx, %rcx), %xmm7
102 vmovd (%rdx, %rsi), %xmm8
103 vmovd (%rdx, %rdi), %xmm9
104 vpunpckldq %xmm7, %xmm6, %xmm10
105 vpunpckldq %xmm9, %xmm8, %xmm11
106 vpunpcklqdq %xmm11, %xmm10, %xmm12
107 vpunpcklqdq %xmm13, %xmm2, %xmm6
108 vandps _iAbsMask+__svml_scbrt_data_internal(%rip), %ymm5, %ymm3
109
110 /* Argument reduction */
111 vandps _sMantissaMask+__svml_scbrt_data_internal(%rip), %ymm5, %ymm8
112 vandps _sMantissaMask1+__svml_scbrt_data_internal(%rip), %ymm5, %ymm9
113 vpsubd _iSubConst+__svml_scbrt_data_internal(%rip), %ymm3, %ymm7
114 vorps _sExpMask+__svml_scbrt_data_internal(%rip), %ymm8, %ymm10
115 vorps _sExpMask1+__svml_scbrt_data_internal(%rip), %ymm9, %ymm11
116
117 /* r=y-y` */
118 vsubps %ymm11, %ymm10, %ymm15
119
120 /* Biased exponent-1 */
121 vpand _iSignMask+__svml_scbrt_data_internal(%rip), %ymm0, %ymm8
122 vpcmpgtd _iCmpConst+__svml_scbrt_data_internal(%rip), %ymm7, %ymm2
123 vmovmskps %ymm2, %eax
124 vinsertf128 $1, %xmm6, %ymm12, %ymm14
125
126 /* Get absolute biased exponent */
127 vpand _iBExpMask+__svml_scbrt_data_internal(%rip), %ymm0, %ymm6
128
129 /* r=(y-y`)*rcp_table(y`) */
130 vmulps %ymm15, %ymm14, %ymm1
131 vpsubd _iOne+__svml_scbrt_data_internal(%rip), %ymm6, %ymm10
132
133 /*
134 * Calculate exponent/3
135 * i555Exp=(2^{12}-1)/3*exponent
136 */
137 vpmulld _i555+__svml_scbrt_data_internal(%rip), %ymm6, %ymm3
138
139 /* Get K (exponent=3*k+j) */
140 vpsrld $12, %ymm3, %ymm13
141
142 /* Get J */
143 vpsubd %ymm13, %ymm10, %ymm11
144
145 /* Add 2/3*(bias-1)+1 to (k+1/3*(bias-1)) */
146 vpaddd _iBias+__svml_scbrt_data_internal(%rip), %ymm13, %ymm7
147 vpsubd %ymm13, %ymm11, %ymm12
148
149 /* Attach sign to exponent */
150 vpor %ymm8, %ymm7, %ymm9
151 vpsubd %ymm13, %ymm12, %ymm14
152 vpslld $23, %ymm9, %ymm0
153
154 /* Get 128*J */
155 vpslld $7, %ymm14, %ymm15
156
157 /* iCbrtIndex=4*l+128*j */
158 vpaddd %ymm15, %ymm4, %ymm4
159
160 /* Zero index if callout expected */
161 vpandn %ymm4, %ymm2, %ymm4
162
163 /* Load Cbrt table Hi & Lo values */
164 vmovd %xmm4, %ecx
165 vextractf128 $1, %ymm4, %xmm13
166 vpextrd $1, %xmm4, %esi
167 movslq %ecx, %rcx
168 movslq %esi, %rsi
169 vmovd %xmm13, %r9d
170 vmovd 128(%rdx, %rcx), %xmm2
171 vpextrd $2, %xmm4, %edi
172 vpextrd $3, %xmm4, %r8d
173 vmovd 128(%rdx, %rsi), %xmm3
174 vpextrd $1, %xmm13, %r10d
175 vpextrd $2, %xmm13, %ecx
176 vpextrd $3, %xmm13, %esi
177 movslq %edi, %rdi
178 movslq %r8d, %r8
179 movslq %r9d, %r9
180 movslq %r10d, %r10
181 movslq %ecx, %rcx
182 movslq %esi, %rsi
183 vmovd 128(%rdx, %rdi), %xmm6
184 vmovd 128(%rdx, %r8), %xmm7
185 vmovd 128(%rdx, %r9), %xmm11
186 vmovd 128(%rdx, %r10), %xmm12
187 vmovd 128(%rdx, %rcx), %xmm14
188 vmovd 128(%rdx, %rsi), %xmm15
189 vpunpckldq %xmm3, %xmm2, %xmm8
190 vpunpckldq %xmm7, %xmm6, %xmm9
191 vpunpckldq %xmm12, %xmm11, %xmm4
192 vpunpckldq %xmm15, %xmm14, %xmm11
193 vpunpcklqdq %xmm9, %xmm8, %xmm10
194 vpunpcklqdq %xmm11, %xmm4, %xmm2
195 vinsertf128 $1, %xmm2, %ymm10, %ymm3
196
197 /* sCbrtHi *= 2^k */
198 vmulps %ymm3, %ymm0, %ymm2
199
200 /* Polynomial: p1+r*(p2*r+r*(p3+r*p4)) */
201 vmovups _sP2+__svml_scbrt_data_internal(%rip), %ymm0
202 vfmadd213ps _sP1+__svml_scbrt_data_internal(%rip), %ymm1, %ymm0
203
204 /* T`*r */
205 vmulps %ymm2, %ymm1, %ymm1
206
207 /* (T`*r)*P */
208 vmulps %ymm1, %ymm0, %ymm0
209
210 /*
211 * T`*r*P+D`
212 * result = T`+(T`*r*P+D`)
213 */
214 vaddps %ymm0, %ymm2, %ymm0
215 testl %eax, %eax
216
217 /* Go to special inputs processing branch */
218 jne L(SPECIAL_VALUES_BRANCH)
219 # LOE rbx r12 r13 r14 r15 eax ymm0 ymm5
220
221 /* Restore registers
222 * and exit the function
223 */
224
225L(EXIT):
226 movq %rbp, %rsp
227 popq %rbp
228 cfi_def_cfa(7, 8)
229 cfi_restore(6)
230 ret
231 cfi_def_cfa(6, 16)
232 cfi_offset(6, -16)
233
234 /* Branch to process
235 * special inputs
236 */
237
238L(SPECIAL_VALUES_BRANCH):
239 vmovups %ymm5, 32(%rsp)
240 vmovups %ymm0, 64(%rsp)
241 # LOE rbx r12 r13 r14 r15 eax ymm0
242
243 xorl %edx, %edx
244 # LOE rbx r12 r13 r14 r15 eax edx
245
246 vzeroupper
247 movq %r12, 16(%rsp)
248 /* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -80; DW_OP_plus) */
249 .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xb0, 0xff, 0xff, 0xff, 0x22
250 movl %edx, %r12d
251 movq %r13, 8(%rsp)
252 /* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -88; DW_OP_plus) */
253 .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa8, 0xff, 0xff, 0xff, 0x22
254 movl %eax, %r13d
255 movq %r14, (%rsp)
256 /* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -96; DW_OP_plus) */
257 .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
258 # LOE rbx r15 r12d r13d
259
260 /* Range mask
261 * bits check
262 */
263
264L(RANGEMASK_CHECK):
265 btl %r12d, %r13d
266
267 /* Call scalar math function */
268 jc L(SCALAR_MATH_CALL)
269 # LOE rbx r15 r12d r13d
270
271 /* Special inputs
272 * processing loop
273 */
274
275L(SPECIAL_VALUES_LOOP):
276 incl %r12d
277 cmpl $8, %r12d
278
279 /* Check bits in range mask */
280 jl L(RANGEMASK_CHECK)
281 # LOE rbx r15 r12d r13d
282
283 movq 16(%rsp), %r12
284 cfi_restore(12)
285 movq 8(%rsp), %r13
286 cfi_restore(13)
287 movq (%rsp), %r14
288 cfi_restore(14)
289 vmovups 64(%rsp), %ymm0
290
291 /* Go to exit */
292 jmp L(EXIT)
293 /* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -80; DW_OP_plus) */
294 .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xb0, 0xff, 0xff, 0xff, 0x22
295 /* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -88; DW_OP_plus) */
296 .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa8, 0xff, 0xff, 0xff, 0x22
297 /* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -96; DW_OP_plus) */
298 .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
299 # LOE rbx r12 r13 r14 r15 ymm0
300
301 /* Scalar math function call
302 * to process special input
303 */
304
305L(SCALAR_MATH_CALL):
306 movl %r12d, %r14d
307 vmovss 32(%rsp, %r14, 4), %xmm0
308 call cbrtf@PLT
309 # LOE rbx r14 r15 r12d r13d xmm0
310
311 vmovss %xmm0, 64(%rsp, %r14, 4)
312
313 /* Process special inputs in loop */
314 jmp L(SPECIAL_VALUES_LOOP)
315 # LOE rbx r15 r12d r13d
316END(_ZGVdN8v_cbrtf_avx2)
317
318 .section .rodata, "a"
319 .align 32
320
321#ifdef __svml_scbrt_data_internal_typedef
322typedef unsigned int VUINT32;
323typedef struct {
324 __declspec(align(32)) VUINT32 _sRcp[32][1];
325 __declspec(align(32)) VUINT32 _sCbrtHL[96][1];
326 __declspec(align(32)) VUINT32 _sP2[8][1];
327 __declspec(align(32)) VUINT32 _sP1[8][1];
328 __declspec(align(32)) VUINT32 _sMantissaMask[8][1];
329 __declspec(align(32)) VUINT32 _sMantissaMask1[8][1];
330 __declspec(align(32)) VUINT32 _sExpMask[8][1];
331 __declspec(align(32)) VUINT32 _sExpMask1[8][1];
332 __declspec(align(32)) VUINT32 _iRcpIndexMask[8][1];
333 __declspec(align(32)) VUINT32 _iBExpMask[8][1];
334 __declspec(align(32)) VUINT32 _iSignMask[8][1];
335 __declspec(align(32)) VUINT32 _iBias[8][1];
336 __declspec(align(32)) VUINT32 _iOne[8][1];
337 __declspec(align(32)) VUINT32 _i555[8][1];
338 __declspec(align(32)) VUINT32 _iAbsMask[8][1];
339 __declspec(align(32)) VUINT32 _iSubConst[8][1];
340 __declspec(align(32)) VUINT32 _iCmpConst[8][1];
341} __svml_scbrt_data_internal;
342#endif
343__svml_scbrt_data_internal:
344 /* _sRcp */
345 .long 0xBF7C0FC1 /* (1/(1+0/32+1/64)) = -.984615 */
346 .long 0xBF74898D /* (1/(1+1/32+1/64)) = -.955224 */
347 .long 0xBF6D7304 /* (1/(1+2/32+1/64)) = -.927536 */
348 .long 0xBF66C2B4 /* (1/(1+3/32+1/64)) = -.901408 */
349 .long 0xBF607038 /* (1/(1+4/32+1/64)) = -.876712 */
350 .long 0xBF5A740E /* (1/(1+5/32+1/64)) = -.853333 */
351 .long 0xBF54C77B /* (1/(1+6/32+1/64)) = -.831169 */
352 .long 0xBF4F6475 /* (1/(1+7/32+1/64)) = -.810127 */
353 .long 0xBF4A4588 /* (1/(1+8/32+1/64)) = -.790123 */
354 .long 0xBF4565C8 /* (1/(1+9/32+1/64)) = -.771084 */
355 .long 0xBF40C0C1 /* (1/(1+10/32+1/64)) = -.752941 */
356 .long 0xBF3C5264 /* (1/(1+11/32+1/64)) = -.735632 */
357 .long 0xBF381703 /* (1/(1+12/32+1/64)) = -.719101 */
358 .long 0xBF340B41 /* (1/(1+13/32+1/64)) = -.703297 */
359 .long 0xBF302C0B /* (1/(1+14/32+1/64)) = -.688172 */
360 .long 0xBF2C7692 /* (1/(1+15/32+1/64)) = -.673684 */
361 .long 0xBF28E83F /* (1/(1+16/32+1/64)) = -.659794 */
362 .long 0xBF257EB5 /* (1/(1+17/32+1/64)) = -.646465 */
363 .long 0xBF2237C3 /* (1/(1+18/32+1/64)) = -.633663 */
364 .long 0xBF1F1166 /* (1/(1+19/32+1/64)) = -.621359 */
365 .long 0xBF1C09C1 /* (1/(1+20/32+1/64)) = -.609524 */
366 .long 0xBF191F1A /* (1/(1+21/32+1/64)) = -.598131 */
367 .long 0xBF164FDA /* (1/(1+22/32+1/64)) = -.587156 */
368 .long 0xBF139A86 /* (1/(1+23/32+1/64)) = -.576577 */
369 .long 0xBF10FDBC /* (1/(1+24/32+1/64)) = -.566372 */
370 .long 0xBF0E7835 /* (1/(1+25/32+1/64)) = -.556522 */
371 .long 0xBF0C08C1 /* (1/(1+26/32+1/64)) = -.547009 */
372 .long 0xBF09AE41 /* (1/(1+27/32+1/64)) = -.537815 */
373 .long 0xBF0767AB /* (1/(1+28/32+1/64)) = -.528926 */
374 .long 0xBF053408 /* (1/(1+29/32+1/64)) = -.520325 */
375 .long 0xBF03126F /* (1/(1+30/32+1/64)) = -.512 */
376 .long 0xBF010204 /* (1/(1+31/32+1/64)) = -.503937 */
377 /* _sCbrtHL */
378 .align 32
379 .long 0x3F80A9C9 /* HI((2^0*(1+0/32+1/64))^(1/3)) = 1.005181 */
380 .long 0x3F81F833 /* HI((2^0*(1+1/32+1/64))^(1/3)) = 1.015387 */
381 .long 0x3F834007 /* HI((2^0*(1+2/32+1/64))^(1/3)) = 1.025391 */
382 .long 0x3F848194 /* HI((2^0*(1+3/32+1/64))^(1/3)) = 1.035204 */
383 .long 0x3F85BD25 /* HI((2^0*(1+4/32+1/64))^(1/3)) = 1.044835 */
384 .long 0x3F86F300 /* HI((2^0*(1+5/32+1/64))^(1/3)) = 1.054291 */
385 .long 0x3F882365 /* HI((2^0*(1+6/32+1/64))^(1/3)) = 1.06358 */
386 .long 0x3F894E90 /* HI((2^0*(1+7/32+1/64))^(1/3)) = 1.07271 */
387 .long 0x3F8A74B9 /* HI((2^0*(1+8/32+1/64))^(1/3)) = 1.081687 */
388 .long 0x3F8B9615 /* HI((2^0*(1+9/32+1/64))^(1/3)) = 1.090518 */
389 .long 0x3F8CB2D4 /* HI((2^0*(1+10/32+1/64))^(1/3)) = 1.099207 */
390 .long 0x3F8DCB24 /* HI((2^0*(1+11/32+1/64))^(1/3)) = 1.107762 */
391 .long 0x3F8EDF31 /* HI((2^0*(1+12/32+1/64))^(1/3)) = 1.116186 */
392 .long 0x3F8FEF22 /* HI((2^0*(1+13/32+1/64))^(1/3)) = 1.124485 */
393 .long 0x3F90FB1F /* HI((2^0*(1+14/32+1/64))^(1/3)) = 1.132664 */
394 .long 0x3F92034C /* HI((2^0*(1+15/32+1/64))^(1/3)) = 1.140726 */
395 .long 0x3F9307CA /* HI((2^0*(1+16/32+1/64))^(1/3)) = 1.148675 */
396 .long 0x3F9408B9 /* HI((2^0*(1+17/32+1/64))^(1/3)) = 1.156516 */
397 .long 0x3F950638 /* HI((2^0*(1+18/32+1/64))^(1/3)) = 1.164252 */
398 .long 0x3F960064 /* HI((2^0*(1+19/32+1/64))^(1/3)) = 1.171887 */
399 .long 0x3F96F759 /* HI((2^0*(1+20/32+1/64))^(1/3)) = 1.179423 */
400 .long 0x3F97EB2F /* HI((2^0*(1+21/32+1/64))^(1/3)) = 1.186865 */
401 .long 0x3F98DC01 /* HI((2^0*(1+22/32+1/64))^(1/3)) = 1.194214 */
402 .long 0x3F99C9E5 /* HI((2^0*(1+23/32+1/64))^(1/3)) = 1.201474 */
403 .long 0x3F9AB4F2 /* HI((2^0*(1+24/32+1/64))^(1/3)) = 1.208647 */
404 .long 0x3F9B9D3D /* HI((2^0*(1+25/32+1/64))^(1/3)) = 1.215736 */
405 .long 0x3F9C82DA /* HI((2^0*(1+26/32+1/64))^(1/3)) = 1.222743 */
406 .long 0x3F9D65DD /* HI((2^0*(1+27/32+1/64))^(1/3)) = 1.229671 */
407 .long 0x3F9E4659 /* HI((2^0*(1+28/32+1/64))^(1/3)) = 1.236522 */
408 .long 0x3F9F245F /* HI((2^0*(1+29/32+1/64))^(1/3)) = 1.243297 */
409 .long 0x3FA00000 /* HI((2^0*(1+30/32+1/64))^(1/3)) = 1.25 */
410 .long 0x3FA0D94C /* HI((2^0*(1+31/32+1/64))^(1/3)) = 1.256631 */
411 .long 0x3FA21B02 /* HI((2^1*(1+0/32+1/64))^(1/3)) = 1.266449 */
412 .long 0x3FA3C059 /* HI((2^1*(1+1/32+1/64))^(1/3)) = 1.279307 */
413 .long 0x3FA55D61 /* HI((2^1*(1+2/32+1/64))^(1/3)) = 1.291912 */
414 .long 0x3FA6F282 /* HI((2^1*(1+3/32+1/64))^(1/3)) = 1.304276 */
415 .long 0x3FA8801A /* HI((2^1*(1+4/32+1/64))^(1/3)) = 1.316409 */
416 .long 0x3FAA067E /* HI((2^1*(1+5/32+1/64))^(1/3)) = 1.328323 */
417 .long 0x3FAB8602 /* HI((2^1*(1+6/32+1/64))^(1/3)) = 1.340027 */
418 .long 0x3FACFEEF /* HI((2^1*(1+7/32+1/64))^(1/3)) = 1.35153 */
419 .long 0x3FAE718E /* HI((2^1*(1+8/32+1/64))^(1/3)) = 1.36284 */
420 .long 0x3FAFDE1F /* HI((2^1*(1+9/32+1/64))^(1/3)) = 1.373966 */
421 .long 0x3FB144E1 /* HI((2^1*(1+10/32+1/64))^(1/3)) = 1.384915 */
422 .long 0x3FB2A60D /* HI((2^1*(1+11/32+1/64))^(1/3)) = 1.395692 */
423 .long 0x3FB401DA /* HI((2^1*(1+12/32+1/64))^(1/3)) = 1.406307 */
424 .long 0x3FB5587B /* HI((2^1*(1+13/32+1/64))^(1/3)) = 1.416763 */
425 .long 0x3FB6AA20 /* HI((2^1*(1+14/32+1/64))^(1/3)) = 1.427067 */
426 .long 0x3FB7F6F7 /* HI((2^1*(1+15/32+1/64))^(1/3)) = 1.437224 */
427 .long 0x3FB93F29 /* HI((2^1*(1+16/32+1/64))^(1/3)) = 1.44724 */
428 .long 0x3FBA82E1 /* HI((2^1*(1+17/32+1/64))^(1/3)) = 1.457119 */
429 .long 0x3FBBC244 /* HI((2^1*(1+18/32+1/64))^(1/3)) = 1.466866 */
430 .long 0x3FBCFD77 /* HI((2^1*(1+19/32+1/64))^(1/3)) = 1.476485 */
431 .long 0x3FBE349B /* HI((2^1*(1+20/32+1/64))^(1/3)) = 1.48598 */
432 .long 0x3FBF67D3 /* HI((2^1*(1+21/32+1/64))^(1/3)) = 1.495356 */
433 .long 0x3FC0973C /* HI((2^1*(1+22/32+1/64))^(1/3)) = 1.504615 */
434 .long 0x3FC1C2F6 /* HI((2^1*(1+23/32+1/64))^(1/3)) = 1.513762 */
435 .long 0x3FC2EB1A /* HI((2^1*(1+24/32+1/64))^(1/3)) = 1.5228 */
436 .long 0x3FC40FC6 /* HI((2^1*(1+25/32+1/64))^(1/3)) = 1.531731 */
437 .long 0x3FC53112 /* HI((2^1*(1+26/32+1/64))^(1/3)) = 1.54056 */
438 .long 0x3FC64F16 /* HI((2^1*(1+27/32+1/64))^(1/3)) = 1.549289 */
439 .long 0x3FC769EB /* HI((2^1*(1+28/32+1/64))^(1/3)) = 1.55792 */
440 .long 0x3FC881A6 /* HI((2^1*(1+29/32+1/64))^(1/3)) = 1.566457 */
441 .long 0x3FC9965D /* HI((2^1*(1+30/32+1/64))^(1/3)) = 1.574901 */
442 .long 0x3FCAA825 /* HI((2^1*(1+31/32+1/64))^(1/3)) = 1.583256 */
443 .long 0x3FCC3D79 /* HI((2^2*(1+0/32+1/64))^(1/3)) = 1.595626 */
444 .long 0x3FCE5054 /* HI((2^2*(1+1/32+1/64))^(1/3)) = 1.611826 */
445 .long 0x3FD058B8 /* HI((2^2*(1+2/32+1/64))^(1/3)) = 1.627707 */
446 .long 0x3FD25726 /* HI((2^2*(1+3/32+1/64))^(1/3)) = 1.643285 */
447 .long 0x3FD44C15 /* HI((2^2*(1+4/32+1/64))^(1/3)) = 1.658572 */
448 .long 0x3FD637F2 /* HI((2^2*(1+5/32+1/64))^(1/3)) = 1.673582 */
449 .long 0x3FD81B24 /* HI((2^2*(1+6/32+1/64))^(1/3)) = 1.688328 */
450 .long 0x3FD9F60B /* HI((2^2*(1+7/32+1/64))^(1/3)) = 1.702821 */
451 .long 0x3FDBC8FE /* HI((2^2*(1+8/32+1/64))^(1/3)) = 1.717071 */
452 .long 0x3FDD9452 /* HI((2^2*(1+9/32+1/64))^(1/3)) = 1.731089 */
453 .long 0x3FDF5853 /* HI((2^2*(1+10/32+1/64))^(1/3)) = 1.744883 */
454 .long 0x3FE1154B /* HI((2^2*(1+11/32+1/64))^(1/3)) = 1.758462 */
455 .long 0x3FE2CB7F /* HI((2^2*(1+12/32+1/64))^(1/3)) = 1.771835 */
456 .long 0x3FE47B2E /* HI((2^2*(1+13/32+1/64))^(1/3)) = 1.785009 */
457 .long 0x3FE62496 /* HI((2^2*(1+14/32+1/64))^(1/3)) = 1.797992 */
458 .long 0x3FE7C7F0 /* HI((2^2*(1+15/32+1/64))^(1/3)) = 1.810789 */
459 .long 0x3FE96571 /* HI((2^2*(1+16/32+1/64))^(1/3)) = 1.823408 */
460 .long 0x3FEAFD4C /* HI((2^2*(1+17/32+1/64))^(1/3)) = 1.835855 */
461 .long 0x3FEC8FB3 /* HI((2^2*(1+18/32+1/64))^(1/3)) = 1.848135 */
462 .long 0x3FEE1CD3 /* HI((2^2*(1+19/32+1/64))^(1/3)) = 1.860255 */
463 .long 0x3FEFA4D7 /* HI((2^2*(1+20/32+1/64))^(1/3)) = 1.872218 */
464 .long 0x3FF127E9 /* HI((2^2*(1+21/32+1/64))^(1/3)) = 1.88403 */
465 .long 0x3FF2A62F /* HI((2^2*(1+22/32+1/64))^(1/3)) = 1.895697 */
466 .long 0x3FF41FD0 /* HI((2^2*(1+23/32+1/64))^(1/3)) = 1.907221 */
467 .long 0x3FF594EE /* HI((2^2*(1+24/32+1/64))^(1/3)) = 1.918607 */
468 .long 0x3FF705AC /* HI((2^2*(1+25/32+1/64))^(1/3)) = 1.929861 */
469 .long 0x3FF8722A /* HI((2^2*(1+26/32+1/64))^(1/3)) = 1.940984 */
470 .long 0x3FF9DA86 /* HI((2^2*(1+27/32+1/64))^(1/3)) = 1.951981 */
471 .long 0x3FFB3EDE /* HI((2^2*(1+28/32+1/64))^(1/3)) = 1.962856 */
472 .long 0x3FFC9F4E /* HI((2^2*(1+29/32+1/64))^(1/3)) = 1.973612 */
473 .long 0x3FFDFBF2 /* HI((2^2*(1+30/32+1/64))^(1/3)) = 1.984251 */
474 .long 0x3FFF54E3 /* HI((2^2*(1+31/32+1/64))^(1/3)) = 1.994778 */
475 .align 32
476 .long 0xBDE3A962, 0xBDE3A962, 0xBDE3A962, 0xBDE3A962, 0xBDE3A962, 0xBDE3A962, 0xBDE3A962, 0xBDE3A962 /* _sP2 */
477 .align 32
478 .long 0x3EAAAC91, 0x3EAAAC91, 0x3EAAAC91, 0x3EAAAC91, 0x3EAAAC91, 0x3EAAAC91, 0x3EAAAC91, 0x3EAAAC91 /* _sP1 */
479 .align 32
480 .long 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff /* _sMantissaMask (EXP_MSK3) */
481 .align 32
482 .long 0x007e0000, 0x007e0000, 0x007e0000, 0x007e0000, 0x007e0000, 0x007e0000, 0x007e0000, 0x007e0000 /* _sMantissaMask1 (SIG_MASK) */
483 .align 32
484 .long 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000 /* _sExpMask (EXP_MASK) */
485 .align 32
486 .long 0xBF820000, 0xBF820000, 0xBF820000, 0xBF820000, 0xBF820000, 0xBF820000, 0xBF820000, 0xBF820000 /* _sExpMask1 (EXP_MASK2) */
487 .align 32
488 .long 0x0000007c, 0x0000007c, 0x0000007c, 0x0000007c, 0x0000007c, 0x0000007c, 0x0000007c, 0x0000007c /* _iRcpIndexMask */
489 .align 32
490 .long 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff /* _iBExpMask */
491 .align 32
492 .long 0x00000100, 0x00000100, 0x00000100, 0x00000100, 0x00000100, 0x00000100, 0x00000100, 0x00000100 /* _iSignMask */
493 .align 32
494 .long 0x00000055, 0x00000055, 0x00000055, 0x00000055, 0x00000055, 0x00000055, 0x00000055, 0x00000055 /* _iBias */
495 .align 32
496 .long 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001 /* _iOne */
497 .align 32
498 .long 0x00000555, 0x00000555, 0x00000555, 0x00000555, 0x00000555, 0x00000555, 0x00000555, 0x00000555 /* _i555 */
499 .align 32
500 .long 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff /* _iAbsMask */
501 .align 32
502 .long 0x80800000, 0x80800000, 0x80800000, 0x80800000, 0x80800000, 0x80800000, 0x80800000, 0x80800000 /* _iSubConst */
503 .align 32
504 .long 0xFEFFFFFF, 0xFEFFFFFF, 0xFEFFFFFF, 0xFEFFFFFF, 0xFEFFFFFF, 0xFEFFFFFF, 0xFEFFFFFF, 0xFEFFFFFF /* _iCmpConst */
505 .align 32
506 .type __svml_scbrt_data_internal, @object
507 .size __svml_scbrt_data_internal, .-__svml_scbrt_data_internal
508

source code of glibc/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf8_core_avx2.S