1#ifndef __ASM_ARM_DIV64
2#define __ASM_ARM_DIV64
3
4#include <asm/system.h>
5#include <linux/types.h>
6
7/*
8 * The semantics of do_div() are:
9 *
10 * uint32_t do_div(uint64_t *n, uint32_t base)
11 * {
12 * 	uint32_t remainder = *n % base;
13 * 	*n = *n / base;
14 * 	return remainder;
15 * }
16 *
17 * In other words, a 64-bit dividend with a 32-bit divisor producing
18 * a 64-bit result and a 32-bit remainder.  To accomplish this optimally
19 * we call a special __do_div64 helper with completely non standard
20 * calling convention for arguments and results (beware).
21 */
22
23#ifdef __ARMEB__
24#define __xh "r0"
25#define __xl "r1"
26#else
27#define __xl "r0"
28#define __xh "r1"
29#endif
30
31#define __do_div_asm(n, base)					\
32({								\
33	register unsigned int __base      asm("r4") = base;	\
34	register unsigned long long __n   asm("r0") = n;	\
35	register unsigned long long __res asm("r2");		\
36	register unsigned int __rem       asm(__xh);		\
37	asm(	__asmeq("%0", __xh)				\
38		__asmeq("%1", "r2")				\
39		__asmeq("%2", "r0")				\
40		__asmeq("%3", "r4")				\
41		"bl	__do_div64"				\
42		: "=r" (__rem), "=r" (__res)			\
43		: "r" (__n), "r" (__base)			\
44		: "ip", "lr", "cc");				\
45	n = __res;						\
46	__rem;							\
47})
48
49#if __GNUC__ < 4
50
51#define do_div(n, base) __do_div_asm(n, base)
52
53#elif __GNUC__ >= 4
54
55#include <asm/bug.h>
56
57/*
58 * If the divisor happens to be constant, we determine the appropriate
59 * inverse at compile time to turn the division into a few inline
60 * multiplications instead which is much faster. And yet only if compiling
61 * for ARMv4 or higher (we need umull/umlal) and if the gcc version is
62 * sufficiently recent to perform proper long long constant propagation.
63 * (It is unfortunate that gcc doesn't perform all this internally.)
64 */
65#define do_div(n, base)							\
66({									\
67	unsigned int __r, __b = (base);					\
68	if (!__builtin_constant_p(__b) || __b == 0 ||			\
69	    (__LINUX_ARM_ARCH__ < 4 && (__b & (__b - 1)) != 0)) {	\
70		/* non-constant divisor (or zero): slow path */		\
71		__r = __do_div_asm(n, __b);				\
72	} else if ((__b & (__b - 1)) == 0) {				\
73		/* Trivial: __b is constant and a power of 2 */		\
74		/* gcc does the right thing with this code.  */		\
75		__r = n;						\
76		__r &= (__b - 1);					\
77		n /= __b;						\
78	} else {							\
79		/* Multiply by inverse of __b: n/b = n*(p/b)/p       */	\
80		/* We rely on the fact that most of this code gets   */	\
81		/* optimized away at compile time due to constant    */	\
82		/* propagation and only a couple inline assembly     */	\
83		/* instructions should remain. Better avoid any      */	\
84		/* code construct that might prevent that.           */	\
85		unsigned long long __res, __x, __t, __m, __n = n;	\
86		unsigned int __c, __p, __z = 0;				\
87		/* preserve low part of n for reminder computation */	\
88		__r = __n;						\
89		/* determine number of bits to represent __b */		\
90		__p = 1 << __div64_fls(__b);				\
91		/* compute __m = ((__p << 64) + __b - 1) / __b */	\
92		__m = (~0ULL / __b) * __p;				\
93		__m += (((~0ULL % __b + 1) * __p) + __b - 1) / __b;	\
94		/* compute __res = __m*(~0ULL/__b*__b-1)/(__p << 64) */	\
95		__x = ~0ULL / __b * __b - 1;				\
96		__res = (__m & 0xffffffff) * (__x & 0xffffffff);	\
97		__res >>= 32;						\
98		__res += (__m & 0xffffffff) * (__x >> 32);		\
99		__t = __res;						\
100		__res += (__x & 0xffffffff) * (__m >> 32);		\
101		__t = (__res < __t) ? (1ULL << 32) : 0;			\
102		__res = (__res >> 32) + __t;				\
103		__res += (__m >> 32) * (__x >> 32);			\
104		__res /= __p;						\
105		/* Now sanitize and optimize what we've got. */		\
106		if (~0ULL % (__b / (__b & -__b)) == 0) {		\
107			/* those cases can be simplified with: */	\
108			__n /= (__b & -__b);				\
109			__m = ~0ULL / (__b / (__b & -__b));		\
110			__p = 1;					\
111			__c = 1;					\
112		} else if (__res != __x / __b) {			\
113			/* We can't get away without a correction    */	\
114			/* to compensate for bit truncation errors.  */	\
115			/* To avoid it we'd need an additional bit   */	\
116			/* to represent __m which would overflow it. */	\
117			/* Instead we do m=p/b and n/b=(n*m+m)/p.    */	\
118			__c = 1;					\
119			/* Compute __m = (__p << 64) / __b */		\
120			__m = (~0ULL / __b) * __p;			\
121			__m += ((~0ULL % __b + 1) * __p) / __b;		\
122		} else {						\
123			/* Reduce __m/__p, and try to clear bit 31   */	\
124			/* of __m when possible otherwise that'll    */	\
125			/* need extra overflow handling later.       */	\
126			unsigned int __bits = -(__m & -__m);		\
127			__bits |= __m >> 32;				\
128			__bits = (~__bits) << 1;			\
129			/* If __bits == 0 then setting bit 31 is     */	\
130			/* unavoidable.  Simply apply the maximum    */	\
131			/* possible reduction in that case.          */	\
132			/* Otherwise the MSB of __bits indicates the */	\
133			/* best reduction we should apply.           */	\
134			if (!__bits) {					\
135				__p /= (__m & -__m);			\
136				__m /= (__m & -__m);			\
137			} else {					\
138				__p >>= __div64_fls(__bits);		\
139				__m >>= __div64_fls(__bits);		\
140			}						\
141			/* No correction needed. */			\
142			__c = 0;					\
143		}							\
144		/* Now we have a combination of 2 conditions:        */	\
145		/* 1) whether or not we need a correction (__c), and */	\
146		/* 2) whether or not there might be an overflow in   */	\
147		/*    the cross product (__m & ((1<<63) | (1<<31)))  */	\
148		/* Select the best insn combination to perform the   */	\
149		/* actual __m * __n / (__p << 64) operation.         */	\
150		if (!__c) {						\
151			asm (	"umull	%Q0, %R0, %1, %Q2\n\t"		\
152				"mov	%Q0, #0"			\
153				: "=&r" (__res)				\
154				: "r" (__m), "r" (__n)			\
155				: "cc" );				\
156		} else if (!(__m & ((1ULL << 63) | (1ULL << 31)))) {	\
157			__res = __m;					\
158			asm (	"umlal	%Q0, %R0, %Q1, %Q2\n\t"		\
159				"mov	%Q0, #0"			\
160				: "+r" (__res)				\
161				: "r" (__m), "r" (__n)			\
162				: "cc" );				\
163		} else {						\
164			asm (	"umull	%Q0, %R0, %Q1, %Q2\n\t"		\
165				"cmn	%Q0, %Q1\n\t"			\
166				"adcs	%R0, %R0, %R1\n\t"		\
167				"adc	%Q0, %3, #0"			\
168				: "=&r" (__res)				\
169				: "r" (__m), "r" (__n), "r" (__z)	\
170				: "cc" );				\
171		}							\
172		if (!(__m & ((1ULL << 63) | (1ULL << 31)))) {		\
173			asm (	"umlal	%R0, %Q0, %R1, %Q2\n\t"		\
174				"umlal	%R0, %Q0, %Q1, %R2\n\t"		\
175				"mov	%R0, #0\n\t"			\
176				"umlal	%Q0, %R0, %R1, %R2"		\
177				: "+r" (__res)				\
178				: "r" (__m), "r" (__n)			\
179				: "cc" );				\
180		} else {						\
181			asm (	"umlal	%R0, %Q0, %R2, %Q3\n\t"		\
182				"umlal	%R0, %1, %Q2, %R3\n\t"		\
183				"mov	%R0, #0\n\t"			\
184				"adds	%Q0, %1, %Q0\n\t"		\
185				"adc	%R0, %R0, #0\n\t"		\
186				"umlal	%Q0, %R0, %R2, %R3"		\
187				: "+r" (__res), "+r" (__z)		\
188				: "r" (__m), "r" (__n)			\
189				: "cc" );				\
190		}							\
191		__res /= __p;						\
192		/* The reminder can be computed with 32-bit regs     */	\
193		/* only, and gcc is good at that.                    */	\
194		{							\
195			unsigned int __res0 = __res;			\
196			unsigned int __b0 = __b;			\
197			__r -= __res0 * __b0;				\
198		}							\
199		/* BUG_ON(__r >= __b || __res * __b + __r != n); */	\
200		n = __res;						\
201	}								\
202	__r;								\
203})
204
205/* our own fls implementation to make sure constant propagation is fine */
206#define __div64_fls(bits)						\
207({									\
208	unsigned int __left = (bits), __nr = 0;				\
209	if (__left & 0xffff0000) __nr += 16, __left >>= 16;		\
210	if (__left & 0x0000ff00) __nr +=  8, __left >>=  8;		\
211	if (__left & 0x000000f0) __nr +=  4, __left >>=  4;		\
212	if (__left & 0x0000000c) __nr +=  2, __left >>=  2;		\
213	if (__left & 0x00000002) __nr +=  1;				\
214	__nr;								\
215})
216
217#endif
218
219extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
220
221#endif
222