1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_GENERIC_DIV64_H
3#define _ASM_GENERIC_DIV64_H
4/*
5 * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
6 * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h
7 *
8 * Optimization for constant divisors on 32-bit machines:
9 * Copyright (C) 2006-2015 Nicolas Pitre
10 *
11 * The semantics of do_div() is, in C++ notation, observing that the name
12 * is a function-like macro and the n parameter has the semantics of a C++
13 * reference:
14 *
15 * uint32_t do_div(uint64_t &n, uint32_t base)
16 * {
17 * 	uint32_t remainder = n % base;
18 * 	n = n / base;
19 * 	return remainder;
20 * }
21 *
22 * NOTE: macro parameter n is evaluated multiple times,
23 *       beware of side effects!
24 */
25
26#include <linux/types.h>
27#include <linux/compiler.h>
28
29#if BITS_PER_LONG == 64
30
31/**
32 * do_div - returns 2 values: calculate remainder and update new dividend
33 * @n: uint64_t dividend (will be updated)
34 * @base: uint32_t divisor
35 *
36 * Summary:
37 * ``uint32_t remainder = n % base;``
38 * ``n = n / base;``
39 *
40 * Return: (uint32_t)remainder
41 *
42 * NOTE: macro parameter @n is evaluated multiple times,
43 * beware of side effects!
44 */
45# define do_div(n,base) ({					\
46	uint32_t __base = (base);				\
47	uint32_t __rem;						\
48	__rem = ((uint64_t)(n)) % __base;			\
49	(n) = ((uint64_t)(n)) / __base;				\
50	__rem;							\
51 })
52
53#elif BITS_PER_LONG == 32
54
55#include <linux/log2.h>
56
57/*
58 * If the divisor happens to be constant, we determine the appropriate
59 * inverse at compile time to turn the division into a few inline
60 * multiplications which ought to be much faster.
61 *
62 * (It is unfortunate that gcc doesn't perform all this internally.)
63 */
64
65#define __div64_const32(n, ___b)					\
66({									\
67	/*								\
68	 * Multiplication by reciprocal of b: n / b = n * (p / b) / p	\
69	 *								\
70	 * We rely on the fact that most of this code gets optimized	\
71	 * away at compile time due to constant propagation and only	\
72	 * a few multiplication instructions should remain.		\
73	 * Hence this monstrous macro (static inline doesn't always	\
74	 * do the trick here).						\
75	 */								\
76	uint64_t ___res, ___x, ___t, ___m, ___n = (n);			\
77	uint32_t ___p, ___bias;						\
78									\
79	/* determine MSB of b */					\
80	___p = 1 << ilog2(___b);					\
81									\
82	/* compute m = ((p << 64) + b - 1) / b */			\
83	___m = (~0ULL / ___b) * ___p;					\
84	___m += (((~0ULL % ___b + 1) * ___p) + ___b - 1) / ___b;	\
85									\
86	/* one less than the dividend with highest result */		\
87	___x = ~0ULL / ___b * ___b - 1;					\
88									\
89	/* test our ___m with res = m * x / (p << 64) */		\
90	___res = ((___m & 0xffffffff) * (___x & 0xffffffff)) >> 32;	\
91	___t = ___res += (___m & 0xffffffff) * (___x >> 32);		\
92	___res += (___x & 0xffffffff) * (___m >> 32);			\
93	___t = (___res < ___t) ? (1ULL << 32) : 0;			\
94	___res = (___res >> 32) + ___t;					\
95	___res += (___m >> 32) * (___x >> 32);				\
96	___res /= ___p;							\
97									\
98	/* Now sanitize and optimize what we've got. */			\
99	if (~0ULL % (___b / (___b & -___b)) == 0) {			\
100		/* special case, can be simplified to ... */		\
101		___n /= (___b & -___b);					\
102		___m = ~0ULL / (___b / (___b & -___b));			\
103		___p = 1;						\
104		___bias = 1;						\
105	} else if (___res != ___x / ___b) {				\
106		/*							\
107		 * We can't get away without a bias to compensate	\
108		 * for bit truncation errors.  To avoid it we'd need an	\
109		 * additional bit to represent m which would overflow	\
110		 * a 64-bit variable.					\
111		 *							\
112		 * Instead we do m = p / b and n / b = (n * m + m) / p.	\
113		 */							\
114		___bias = 1;						\
115		/* Compute m = (p << 64) / b */				\
116		___m = (~0ULL / ___b) * ___p;				\
117		___m += ((~0ULL % ___b + 1) * ___p) / ___b;		\
118	} else {							\
119		/*							\
120		 * Reduce m / p, and try to clear bit 31 of m when	\
121		 * possible, otherwise that'll need extra overflow	\
122		 * handling later.					\
123		 */							\
124		uint32_t ___bits = -(___m & -___m);			\
125		___bits |= ___m >> 32;					\
126		___bits = (~___bits) << 1;				\
127		/*							\
128		 * If ___bits == 0 then setting bit 31 is  unavoidable.	\
129		 * Simply apply the maximum possible reduction in that	\
130		 * case. Otherwise the MSB of ___bits indicates the	\
131		 * best reduction we should apply.			\
132		 */							\
133		if (!___bits) {						\
134			___p /= (___m & -___m);				\
135			___m /= (___m & -___m);				\
136		} else {						\
137			___p >>= ilog2(___bits);			\
138			___m >>= ilog2(___bits);			\
139		}							\
140		/* No bias needed. */					\
141		___bias = 0;						\
142	}								\
143									\
144	/*								\
145	 * Now we have a combination of 2 conditions:			\
146	 *								\
147	 * 1) whether or not we need to apply a bias, and		\
148	 *								\
149	 * 2) whether or not there might be an overflow in the cross	\
150	 *    product determined by (___m & ((1 << 63) | (1 << 31))).	\
151	 *								\
152	 * Select the best way to do (m_bias + m * n) / (1 << 64).	\
153	 * From now on there will be actual runtime code generated.	\
154	 */								\
155	___res = __arch_xprod_64(___m, ___n, ___bias);			\
156									\
157	___res /= ___p;							\
158})
159
160#ifndef __arch_xprod_64
161/*
162 * Default C implementation for __arch_xprod_64()
163 *
164 * Prototype: uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
165 * Semantic:  retval = ((bias ? m : 0) + m * n) >> 64
166 *
167 * The product is a 128-bit value, scaled down to 64 bits.
168 * Assuming constant propagation to optimize away unused conditional code.
169 * Architectures may provide their own optimized assembly implementation.
170 */
171static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
172{
173	uint32_t m_lo = m;
174	uint32_t m_hi = m >> 32;
175	uint32_t n_lo = n;
176	uint32_t n_hi = n >> 32;
177	uint64_t res;
178	uint32_t res_lo, res_hi, tmp;
179
180	if (!bias) {
181		res = ((uint64_t)m_lo * n_lo) >> 32;
182	} else if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
183		/* there can't be any overflow here */
184		res = (m + (uint64_t)m_lo * n_lo) >> 32;
185	} else {
186		res = m + (uint64_t)m_lo * n_lo;
187		res_lo = res >> 32;
188		res_hi = (res_lo < m_hi);
189		res = res_lo | ((uint64_t)res_hi << 32);
190	}
191
192	if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
193		/* there can't be any overflow here */
194		res += (uint64_t)m_lo * n_hi;
195		res += (uint64_t)m_hi * n_lo;
196		res >>= 32;
197	} else {
198		res += (uint64_t)m_lo * n_hi;
199		tmp = res >> 32;
200		res += (uint64_t)m_hi * n_lo;
201		res_lo = res >> 32;
202		res_hi = (res_lo < tmp);
203		res = res_lo | ((uint64_t)res_hi << 32);
204	}
205
206	res += (uint64_t)m_hi * n_hi;
207
208	return res;
209}
210#endif
211
212#ifndef __div64_32
213extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
214#endif
215
216/* The unnecessary pointer compare is there
217 * to check for type safety (n must be 64bit)
218 */
219# define do_div(n,base) ({				\
220	uint32_t __base = (base);			\
221	uint32_t __rem;					\
222	(void)(((typeof((n)) *)0) == ((uint64_t *)0));	\
223	if (__builtin_constant_p(__base) &&		\
224	    is_power_of_2(__base)) {			\
225		__rem = (n) & (__base - 1);		\
226		(n) >>= ilog2(__base);			\
227	} else if (__builtin_constant_p(__base) &&	\
228		   __base != 0) {			\
229		uint32_t __res_lo, __n_lo = (n);	\
230		(n) = __div64_const32(n, __base);	\
231		/* the remainder can be computed with 32-bit regs */ \
232		__res_lo = (n);				\
233		__rem = __n_lo - __res_lo * __base;	\
234	} else if (likely(((n) >> 32) == 0)) {		\
235		__rem = (uint32_t)(n) % __base;		\
236		(n) = (uint32_t)(n) / __base;		\
237	} else {					\
238		__rem = __div64_32(&(n), __base);	\
239	}						\
240	__rem;						\
241 })
242
243#else /* BITS_PER_LONG == ?? */
244
245# error do_div() does not yet support the C64
246
247#endif /* BITS_PER_LONG */
248
249#endif /* _ASM_GENERIC_DIV64_H */
250