1 #ifndef _LINUX_MATH64_H
2 #define _LINUX_MATH64_H
3
4 #include <div64.h>
5 #include <linux/bitops.h>
6 #include <linux/types.h>
7
8 #if BITS_PER_LONG == 64
9
10 #define div64_long(x, y) div64_s64((x), (y))
11 #define div64_ul(x, y) div64_u64((x), (y))
12
13 /**
14 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
15 *
16 * This is commonly provided by 32bit archs to provide an optimized 64bit
17 * divide.
18 */
div_u64_rem(u64 dividend,u32 divisor,u32 * remainder)19 static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
20 {
21 *remainder = dividend % divisor;
22 return dividend / divisor;
23 }
24
25 /**
26 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
27 */
div_s64_rem(s64 dividend,s32 divisor,s32 * remainder)28 static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
29 {
30 *remainder = dividend % divisor;
31 return dividend / divisor;
32 }
33
34 /**
35 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
36 */
div64_u64_rem(u64 dividend,u64 divisor,u64 * remainder)37 static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
38 {
39 *remainder = dividend % divisor;
40 return dividend / divisor;
41 }
42
43 /**
44 * div64_u64 - unsigned 64bit divide with 64bit divisor
45 */
div64_u64(u64 dividend,u64 divisor)46 static inline u64 div64_u64(u64 dividend, u64 divisor)
47 {
48 return dividend / divisor;
49 }
50
51 /**
52 * div64_s64 - signed 64bit divide with 64bit divisor
53 */
div64_s64(s64 dividend,s64 divisor)54 static inline s64 div64_s64(s64 dividend, s64 divisor)
55 {
56 return dividend / divisor;
57 }
58
59 #elif BITS_PER_LONG == 32
60
61 #define div64_long(x, y) div_s64((x), (y))
62 #define div64_ul(x, y) div_u64((x), (y))
63
64 #ifndef div_u64_rem
div_u64_rem(u64 dividend,u32 divisor,u32 * remainder)65 static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
66 {
67 *remainder = do_div(dividend, divisor);
68 return dividend;
69 }
70 #endif
71
72 #ifndef div_s64_rem
73 extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
74 #endif
75
76 #ifndef div64_u64_rem
77 extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
78 #endif
79
80 #ifndef div64_u64
81 extern u64 div64_u64(u64 dividend, u64 divisor);
82 #endif
83
84 #ifndef div64_s64
85 extern s64 div64_s64(s64 dividend, s64 divisor);
86 #endif
87
88 #endif /* BITS_PER_LONG */
89
90 /**
91 * div_u64 - unsigned 64bit divide with 32bit divisor
92 *
93 * This is the most common 64bit divide and should be used if possible,
94 * as many 32bit archs can optimize this variant better than a full 64bit
95 * divide.
96 */
97 #ifndef div_u64
div_u64(u64 dividend,u32 divisor)98 static inline u64 div_u64(u64 dividend, u32 divisor)
99 {
100 u32 remainder;
101 return div_u64_rem(dividend, divisor, &remainder);
102 }
103 #endif
104
105 /**
106 * div_s64 - signed 64bit divide with 32bit divisor
107 */
108 #ifndef div_s64
div_s64(s64 dividend,s32 divisor)109 static inline s64 div_s64(s64 dividend, s32 divisor)
110 {
111 s32 remainder;
112 return div_s64_rem(dividend, divisor, &remainder);
113 }
114 #endif
115
116 u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
117
118 static __always_inline u32
__iter_div_u64_rem(u64 dividend,u32 divisor,u64 * remainder)119 __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
120 {
121 u32 ret = 0;
122
123 while (dividend >= divisor) {
124 /* The following asm() prevents the compiler from
125 optimising this loop into a modulo operation. */
126 asm("" : "+rm"(dividend));
127
128 dividend -= divisor;
129 ret++;
130 }
131
132 *remainder = dividend;
133
134 return ret;
135 }
136
137 #ifndef mul_u32_u32
138 /*
139 * Many a GCC version messes this up and generates a 64x64 mult :-(
140 */
mul_u32_u32(u32 a,u32 b)141 static inline u64 mul_u32_u32(u32 a, u32 b)
142 {
143 return (u64)a * b;
144 }
145 #endif
146
147 #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
148
149 #ifndef mul_u64_u32_shr
mul_u64_u32_shr(u64 a,u32 mul,unsigned int shift)150 static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
151 {
152 return (u64)(((unsigned __int128)a * mul) >> shift);
153 }
154 #endif /* mul_u64_u32_shr */
155
156 #ifndef mul_u64_u64_shr
mul_u64_u64_shr(u64 a,u64 mul,unsigned int shift)157 static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
158 {
159 return (u64)(((unsigned __int128)a * mul) >> shift);
160 }
161 #endif /* mul_u64_u64_shr */
162
163 #else
164
165 #ifndef mul_u64_u32_shr
mul_u64_u32_shr(u64 a,u32 mul,unsigned int shift)166 static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
167 {
168 u32 ah, al;
169 u64 ret;
170
171 al = a;
172 ah = a >> 32;
173
174 ret = mul_u32_u32(al, mul) >> shift;
175 if (ah)
176 ret += mul_u32_u32(ah, mul) << (32 - shift);
177
178 return ret;
179 }
180 #endif /* mul_u64_u32_shr */
181
182 #ifndef mul_u64_u64_shr
mul_u64_u64_shr(u64 a,u64 b,unsigned int shift)183 static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
184 {
185 union {
186 u64 ll;
187 struct {
188 #ifdef __BIG_ENDIAN
189 u32 high, low;
190 #else
191 u32 low, high;
192 #endif
193 } l;
194 } rl, rm, rn, rh, a0, b0;
195 u64 c;
196
197 a0.ll = a;
198 b0.ll = b;
199
200 rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
201 rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
202 rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
203 rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
204
205 /*
206 * Each of these lines computes a 64-bit intermediate result into "c",
207 * starting at bits 32-95. The low 32-bits go into the result of the
208 * multiplication, the high 32-bits are carried into the next step.
209 */
210 rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
211 rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
212 rh.l.high = (c >> 32) + rh.l.high;
213
214 /*
215 * The 128-bit result of the multiplication is in rl.ll and rh.ll,
216 * shift it right and throw away the high part of the result.
217 */
218 if (shift == 0)
219 return rl.ll;
220 if (shift < 64)
221 return (rl.ll >> shift) | (rh.ll << (64 - shift));
222 return rh.ll >> (shift & 63);
223 }
224 #endif /* mul_u64_u64_shr */
225
226 #endif
227
228 #ifndef mul_u64_u32_div
mul_u64_u32_div(u64 a,u32 mul,u32 divisor)229 static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
230 {
231 union {
232 u64 ll;
233 struct {
234 #ifdef __BIG_ENDIAN
235 u32 high, low;
236 #else
237 u32 low, high;
238 #endif
239 } l;
240 } u, rl, rh;
241
242 u.ll = a;
243 rl.ll = mul_u32_u32(u.l.low, mul);
244 rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
245
246 /* Bits 32-63 of the result will be in rh.l.low. */
247 rl.l.high = do_div(rh.ll, divisor);
248
249 /* Bits 0-31 of the result will be in rl.l.low. */
250 do_div(rl.ll, divisor);
251
252 rl.l.high = rh.l.low;
253 return rl.ll;
254 }
255 #endif /* mul_u64_u32_div */
256
257 #endif /* _LINUX_MATH64_H */
258