1/* Function log2f vectorized with SSE4.
2 Copyright (C) 2021-2024 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 https://www.gnu.org/licenses/. */
18
19/*
20 * ALGORITHM DESCRIPTION:
21 *
22 * Get short reciprocal approximation Rcp ~ 1/mantissa(x)
23 * R = Rcp*x - 1.0
24 * log2(x) = k - log2(Rcp) + poly_approximation(R)
25 * log2(Rcp) is tabulated
26 *
27 *
28 */
29
30/* Offsets for data table __svml_slog2_data_internal
31 */
32#define MinNorm 0
33#define MaxNorm 16
34#define iBrkValue 32
35#define iOffExpoMask 48
36#define One 64
37#define sPoly 80
38
39#include <sysdep.h>
40
41 .section .text.sse4, "ax", @progbits
42ENTRY(_ZGVbN4v_log2f_sse4)
43 subq $72, %rsp
44 cfi_def_cfa_offset(80)
45 movaps %xmm0, %xmm1
46
47 /* reduction: compute r, n */
48 movdqu iBrkValue+__svml_slog2_data_internal(%rip), %xmm2
49 movaps %xmm0, %xmm4
50 movdqu iOffExpoMask+__svml_slog2_data_internal(%rip), %xmm10
51 psubd %xmm2, %xmm1
52 pand %xmm1, %xmm10
53 movaps %xmm0, %xmm3
54 paddd %xmm2, %xmm10
55 psrad $23, %xmm1
56 movups sPoly+__svml_slog2_data_internal(%rip), %xmm5
57 movups sPoly+32+__svml_slog2_data_internal(%rip), %xmm6
58 movups sPoly+64+__svml_slog2_data_internal(%rip), %xmm7
59 movups sPoly+96+__svml_slog2_data_internal(%rip), %xmm9
60 cmpltps MinNorm+__svml_slog2_data_internal(%rip), %xmm4
61 cmpnleps MaxNorm+__svml_slog2_data_internal(%rip), %xmm3
62 cvtdq2ps %xmm1, %xmm1
63 subps One+__svml_slog2_data_internal(%rip), %xmm10
64 mulps %xmm10, %xmm5
65 movaps %xmm10, %xmm8
66 mulps %xmm10, %xmm6
67 mulps %xmm10, %xmm8
68 addps sPoly+16+__svml_slog2_data_internal(%rip), %xmm5
69 mulps %xmm10, %xmm7
70 addps sPoly+48+__svml_slog2_data_internal(%rip), %xmm6
71 mulps %xmm10, %xmm9
72 mulps %xmm8, %xmm5
73 addps sPoly+80+__svml_slog2_data_internal(%rip), %xmm7
74 addps sPoly+112+__svml_slog2_data_internal(%rip), %xmm9
75 addps %xmm5, %xmm6
76 mulps %xmm8, %xmm6
77 orps %xmm3, %xmm4
78
79 /* combine and get argument value range mask */
80 movmskps %xmm4, %edx
81 addps %xmm6, %xmm7
82 mulps %xmm7, %xmm8
83 addps %xmm8, %xmm9
84 mulps %xmm10, %xmm9
85 addps sPoly+128+__svml_slog2_data_internal(%rip), %xmm9
86 mulps %xmm9, %xmm10
87 addps %xmm10, %xmm1
88 testl %edx, %edx
89
90 /* Go to special inputs processing branch */
91 jne L(SPECIAL_VALUES_BRANCH)
92 # LOE rbx rbp r12 r13 r14 r15 edx xmm0 xmm1
93
94 /* Restore registers
95 * and exit the function
96 */
97
98L(EXIT):
99 movaps %xmm1, %xmm0
100 addq $72, %rsp
101 cfi_def_cfa_offset(8)
102 ret
103 cfi_def_cfa_offset(80)
104
105 /* Branch to process
106 * special inputs
107 */
108
109L(SPECIAL_VALUES_BRANCH):
110 movups %xmm0, 32(%rsp)
111 movups %xmm1, 48(%rsp)
112 # LOE rbx rbp r12 r13 r14 r15 edx
113
114 xorl %eax, %eax
115 movq %r12, 16(%rsp)
116 cfi_offset(12, -64)
117 movl %eax, %r12d
118 movq %r13, 8(%rsp)
119 cfi_offset(13, -72)
120 movl %edx, %r13d
121 movq %r14, (%rsp)
122 cfi_offset(14, -80)
123 # LOE rbx rbp r15 r12d r13d
124
125 /* Range mask
126 * bits check
127 */
128
129L(RANGEMASK_CHECK):
130 btl %r12d, %r13d
131
132 /* Call scalar math function */
133 jc L(SCALAR_MATH_CALL)
134 # LOE rbx rbp r15 r12d r13d
135
136 /* Special inputs
137 * processing loop
138 */
139
140L(SPECIAL_VALUES_LOOP):
141 incl %r12d
142 cmpl $4, %r12d
143
144 /* Check bits in range mask */
145 jl L(RANGEMASK_CHECK)
146 # LOE rbx rbp r15 r12d r13d
147
148 movq 16(%rsp), %r12
149 cfi_restore(12)
150 movq 8(%rsp), %r13
151 cfi_restore(13)
152 movq (%rsp), %r14
153 cfi_restore(14)
154 movups 48(%rsp), %xmm1
155
156 /* Go to exit */
157 jmp L(EXIT)
158 cfi_offset(12, -64)
159 cfi_offset(13, -72)
160 cfi_offset(14, -80)
161 # LOE rbx rbp r12 r13 r14 r15 xmm1
162
163 /* Scalar math function call
164 * to process special input
165 */
166
167L(SCALAR_MATH_CALL):
168 movl %r12d, %r14d
169 movss 32(%rsp, %r14, 4), %xmm0
170 call log2f@PLT
171 # LOE rbx rbp r14 r15 r12d r13d xmm0
172
173 movss %xmm0, 48(%rsp, %r14, 4)
174
175 /* Process special inputs in loop */
176 jmp L(SPECIAL_VALUES_LOOP)
177 # LOE rbx rbp r15 r12d r13d
178END(_ZGVbN4v_log2f_sse4)
179
180 .section .rodata, "a"
181 .align 16
182
183#ifdef __svml_slog2_data_internal_typedef
184typedef unsigned int VUINT32;
185typedef struct {
186 __declspec(align(16)) VUINT32 MinNorm[4][1];
187 __declspec(align(16)) VUINT32 MaxNorm[4][1];
188 __declspec(align(16)) VUINT32 iBrkValue[4][1];
189 __declspec(align(16)) VUINT32 iOffExpoMask[4][1];
190 __declspec(align(16)) VUINT32 One[4][1];
191 __declspec(align(16)) VUINT32 sPoly[9][4][1];
192} __svml_slog2_data_internal;
193#endif
194__svml_slog2_data_internal:
195 /* MinNorm */
196 .long 0x00800000, 0x00800000, 0x00800000, 0x00800000
197 /* MaxNorm */
198 .align 16
199 .long 0x7f7fffff, 0x7f7fffff, 0x7f7fffff, 0x7f7fffff
200 /* iBrkValue = SP 2/3 */
201 .align 16
202 .long 0x3f2aaaab, 0x3f2aaaab, 0x3f2aaaab, 0x3f2aaaab
203 /* iOffExpoMask = SP significand mask */
204 .align 16
205 .long 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff
206 /* sOne = SP 1.0 */
207 .align 16
208 .long 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000
209 /* spoly[9] */
210 .align 16
211 .long 0x3e554012, 0x3e554012, 0x3e554012, 0x3e554012 /* coeff9 */
212 .long 0xbe638E14, 0xbe638E14, 0xbe638E14, 0xbe638E14 /* coeff8 */
213 .long 0x3e4D660B, 0x3e4D660B, 0x3e4D660B, 0x3e4D660B /* coeff7 */
214 .long 0xbe727824, 0xbe727824, 0xbe727824, 0xbe727824 /* coeff6 */
215 .long 0x3e93DD07, 0x3e93DD07, 0x3e93DD07, 0x3e93DD07 /* coeff5 */
216 .long 0xbeB8B969, 0xbeB8B969, 0xbeB8B969, 0xbeB8B969 /* coeff4 */
217 .long 0x3eF637C0, 0x3eF637C0, 0x3eF637C0, 0x3eF637C0 /* coeff3 */
218 .long 0xbf38AA2B, 0xbf38AA2B, 0xbf38AA2B, 0xbf38AA2B /* coeff2 */
219 .long 0x3fB8AA3B, 0x3fB8AA3B, 0x3fB8AA3B, 0x3fB8AA3B /* coeff1 */
220 .align 16
221 .type __svml_slog2_data_internal, @object
222 .size __svml_slog2_data_internal, .-__svml_slog2_data_internal
223

source code of glibc/sysdeps/x86_64/fpu/multiarch/svml_s_log2f4_core_sse4.S