1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_GENERIC_DIV64_H |
3 | #define _ASM_GENERIC_DIV64_H |
4 | /* |
5 | * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com> |
6 | * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h |
7 | * |
8 | * Optimization for constant divisors on 32-bit machines: |
9 | * Copyright (C) 2006-2015 Nicolas Pitre |
10 | * |
11 | * The semantics of do_div() are: |
12 | * |
13 | * uint32_t do_div(uint64_t *n, uint32_t base) |
14 | * { |
15 | * uint32_t remainder = *n % base; |
16 | * *n = *n / base; |
17 | * return remainder; |
18 | * } |
19 | * |
20 | * NOTE: macro parameter n is evaluated multiple times, |
21 | * beware of side effects! |
22 | */ |
23 | |
24 | #include <linux/types.h> |
25 | #include <linux/compiler.h> |
26 | |
27 | #if BITS_PER_LONG == 64 |
28 | |
29 | /** |
30 | * do_div - returns 2 values: calculate remainder and update new dividend |
31 | * @n: pointer to uint64_t dividend (will be updated) |
32 | * @base: uint32_t divisor |
33 | * |
34 | * Summary: |
35 | * ``uint32_t remainder = *n % base;`` |
36 | * ``*n = *n / base;`` |
37 | * |
38 | * Return: (uint32_t)remainder |
39 | * |
40 | * NOTE: macro parameter @n is evaluated multiple times, |
41 | * beware of side effects! |
42 | */ |
43 | # define do_div(n,base) ({ \ |
44 | uint32_t __base = (base); \ |
45 | uint32_t __rem; \ |
46 | __rem = ((uint64_t)(n)) % __base; \ |
47 | (n) = ((uint64_t)(n)) / __base; \ |
48 | __rem; \ |
49 | }) |
50 | |
51 | #elif BITS_PER_LONG == 32 |
52 | |
53 | #include <linux/log2.h> |
54 | |
55 | /* |
56 | * If the divisor happens to be constant, we determine the appropriate |
57 | * inverse at compile time to turn the division into a few inline |
58 | * multiplications which ought to be much faster. And yet only if compiling |
59 | * with a sufficiently recent gcc version to perform proper 64-bit constant |
60 | * propagation. |
61 | * |
62 | * (It is unfortunate that gcc doesn't perform all this internally.) |
63 | */ |
64 | |
65 | #ifndef __div64_const32_is_OK |
66 | #define __div64_const32_is_OK (__GNUC__ >= 4) |
67 | #endif |
68 | |
69 | #define __div64_const32(n, ___b) \ |
70 | ({ \ |
71 | /* \ |
72 | * Multiplication by reciprocal of b: n / b = n * (p / b) / p \ |
73 | * \ |
74 | * We rely on the fact that most of this code gets optimized \ |
75 | * away at compile time due to constant propagation and only \ |
76 | * a few multiplication instructions should remain. \ |
77 | * Hence this monstrous macro (static inline doesn't always \ |
78 | * do the trick here). \ |
79 | */ \ |
80 | uint64_t ___res, ___x, ___t, ___m, ___n = (n); \ |
81 | uint32_t ___p, ___bias; \ |
82 | \ |
83 | /* determine MSB of b */ \ |
84 | ___p = 1 << ilog2(___b); \ |
85 | \ |
86 | /* compute m = ((p << 64) + b - 1) / b */ \ |
87 | ___m = (~0ULL / ___b) * ___p; \ |
88 | ___m += (((~0ULL % ___b + 1) * ___p) + ___b - 1) / ___b; \ |
89 | \ |
90 | /* one less than the dividend with highest result */ \ |
91 | ___x = ~0ULL / ___b * ___b - 1; \ |
92 | \ |
93 | /* test our ___m with res = m * x / (p << 64) */ \ |
94 | ___res = ((___m & 0xffffffff) * (___x & 0xffffffff)) >> 32; \ |
95 | ___t = ___res += (___m & 0xffffffff) * (___x >> 32); \ |
96 | ___res += (___x & 0xffffffff) * (___m >> 32); \ |
97 | ___t = (___res < ___t) ? (1ULL << 32) : 0; \ |
98 | ___res = (___res >> 32) + ___t; \ |
99 | ___res += (___m >> 32) * (___x >> 32); \ |
100 | ___res /= ___p; \ |
101 | \ |
102 | /* Now sanitize and optimize what we've got. */ \ |
103 | if (~0ULL % (___b / (___b & -___b)) == 0) { \ |
104 | /* special case, can be simplified to ... */ \ |
105 | ___n /= (___b & -___b); \ |
106 | ___m = ~0ULL / (___b / (___b & -___b)); \ |
107 | ___p = 1; \ |
108 | ___bias = 1; \ |
109 | } else if (___res != ___x / ___b) { \ |
110 | /* \ |
111 | * We can't get away without a bias to compensate \ |
112 | * for bit truncation errors. To avoid it we'd need an \ |
113 | * additional bit to represent m which would overflow \ |
114 | * a 64-bit variable. \ |
115 | * \ |
116 | * Instead we do m = p / b and n / b = (n * m + m) / p. \ |
117 | */ \ |
118 | ___bias = 1; \ |
119 | /* Compute m = (p << 64) / b */ \ |
120 | ___m = (~0ULL / ___b) * ___p; \ |
121 | ___m += ((~0ULL % ___b + 1) * ___p) / ___b; \ |
122 | } else { \ |
123 | /* \ |
124 | * Reduce m / p, and try to clear bit 31 of m when \ |
125 | * possible, otherwise that'll need extra overflow \ |
126 | * handling later. \ |
127 | */ \ |
128 | uint32_t ___bits = -(___m & -___m); \ |
129 | ___bits |= ___m >> 32; \ |
130 | ___bits = (~___bits) << 1; \ |
131 | /* \ |
132 | * If ___bits == 0 then setting bit 31 is unavoidable. \ |
133 | * Simply apply the maximum possible reduction in that \ |
134 | * case. Otherwise the MSB of ___bits indicates the \ |
135 | * best reduction we should apply. \ |
136 | */ \ |
137 | if (!___bits) { \ |
138 | ___p /= (___m & -___m); \ |
139 | ___m /= (___m & -___m); \ |
140 | } else { \ |
141 | ___p >>= ilog2(___bits); \ |
142 | ___m >>= ilog2(___bits); \ |
143 | } \ |
144 | /* No bias needed. */ \ |
145 | ___bias = 0; \ |
146 | } \ |
147 | \ |
148 | /* \ |
149 | * Now we have a combination of 2 conditions: \ |
150 | * \ |
151 | * 1) whether or not we need to apply a bias, and \ |
152 | * \ |
153 | * 2) whether or not there might be an overflow in the cross \ |
154 | * product determined by (___m & ((1 << 63) | (1 << 31))). \ |
155 | * \ |
156 | * Select the best way to do (m_bias + m * n) / (1 << 64). \ |
157 | * From now on there will be actual runtime code generated. \ |
158 | */ \ |
159 | ___res = __arch_xprod_64(___m, ___n, ___bias); \ |
160 | \ |
161 | ___res /= ___p; \ |
162 | }) |
163 | |
164 | #ifndef __arch_xprod_64 |
165 | /* |
166 | * Default C implementation for __arch_xprod_64() |
167 | * |
168 | * Prototype: uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) |
169 | * Semantic: retval = ((bias ? m : 0) + m * n) >> 64 |
170 | * |
171 | * The product is a 128-bit value, scaled down to 64 bits. |
172 | * Assuming constant propagation to optimize away unused conditional code. |
173 | * Architectures may provide their own optimized assembly implementation. |
174 | */ |
175 | static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) |
176 | { |
177 | uint32_t m_lo = m; |
178 | uint32_t m_hi = m >> 32; |
179 | uint32_t n_lo = n; |
180 | uint32_t n_hi = n >> 32; |
181 | uint64_t res, tmp; |
182 | |
183 | if (!bias) { |
184 | res = ((uint64_t)m_lo * n_lo) >> 32; |
185 | } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) { |
186 | /* there can't be any overflow here */ |
187 | res = (m + (uint64_t)m_lo * n_lo) >> 32; |
188 | } else { |
189 | res = m + (uint64_t)m_lo * n_lo; |
190 | tmp = (res < m) ? (1ULL << 32) : 0; |
191 | res = (res >> 32) + tmp; |
192 | } |
193 | |
194 | if (!(m & ((1ULL << 63) | (1ULL << 31)))) { |
195 | /* there can't be any overflow here */ |
196 | res += (uint64_t)m_lo * n_hi; |
197 | res += (uint64_t)m_hi * n_lo; |
198 | res >>= 32; |
199 | } else { |
200 | tmp = res += (uint64_t)m_lo * n_hi; |
201 | res += (uint64_t)m_hi * n_lo; |
202 | tmp = (res < tmp) ? (1ULL << 32) : 0; |
203 | res = (res >> 32) + tmp; |
204 | } |
205 | |
206 | res += (uint64_t)m_hi * n_hi; |
207 | |
208 | return res; |
209 | } |
210 | #endif |
211 | |
212 | #ifndef __div64_32 |
213 | extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); |
214 | #endif |
215 | |
216 | /* The unnecessary pointer compare is there |
217 | * to check for type safety (n must be 64bit) |
218 | */ |
219 | # define do_div(n,base) ({ \ |
220 | uint32_t __base = (base); \ |
221 | uint32_t __rem; \ |
222 | (void)(((typeof((n)) *)0) == ((uint64_t *)0)); \ |
223 | if (__builtin_constant_p(__base) && \ |
224 | is_power_of_2(__base)) { \ |
225 | __rem = (n) & (__base - 1); \ |
226 | (n) >>= ilog2(__base); \ |
227 | } else if (__div64_const32_is_OK && \ |
228 | __builtin_constant_p(__base) && \ |
229 | __base != 0) { \ |
230 | uint32_t __res_lo, __n_lo = (n); \ |
231 | (n) = __div64_const32(n, __base); \ |
232 | /* the remainder can be computed with 32-bit regs */ \ |
233 | __res_lo = (n); \ |
234 | __rem = __n_lo - __res_lo * __base; \ |
235 | } else if (likely(((n) >> 32) == 0)) { \ |
236 | __rem = (uint32_t)(n) % __base; \ |
237 | (n) = (uint32_t)(n) / __base; \ |
238 | } else \ |
239 | __rem = __div64_32(&(n), __base); \ |
240 | __rem; \ |
241 | }) |
242 | |
243 | #else /* BITS_PER_LONG == ?? */ |
244 | |
245 | # error do_div() does not yet support the C64 |
246 | |
247 | #endif /* BITS_PER_LONG */ |
248 | |
249 | #endif /* _ASM_GENERIC_DIV64_H */ |
250 | |