math64.h revision 270710
1270710Shselasky/*-
2270710Shselasky * Copyright (c) 2007 Cisco Systems, Inc.  All rights reserved.
3270710Shselasky * Copyright (c) 2014 Mellanox Technologies, Ltd. All rights reserved.
4270710Shselasky * All rights reserved.
5270710Shselasky *
6270710Shselasky * Redistribution and use in source and binary forms, with or without
7270710Shselasky * modification, are permitted provided that the following conditions
8270710Shselasky * are met:
9270710Shselasky * 1. Redistributions of source code must retain the above copyright
10270710Shselasky *    notice unmodified, this list of conditions, and the following
11270710Shselasky *    disclaimer.
12270710Shselasky * 2. Redistributions in binary form must reproduce the above copyright
13270710Shselasky *    notice, this list of conditions and the following disclaimer in the
14270710Shselasky *    documentation and/or other materials provided with the distribution.
15270710Shselasky *
16270710Shselasky * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17270710Shselasky * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18270710Shselasky * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19270710Shselasky * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20270710Shselasky * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21270710Shselasky * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22270710Shselasky * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23270710Shselasky * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24270710Shselasky * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25270710Shselasky * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26270710Shselasky */
27270710Shselasky
28270710Shselasky#ifndef _LINUX_MATH64_H
29270710Shselasky#define _LINUX_MATH64_H
30270710Shselasky
31270710Shselasky#include <linux/types.h>
32270710Shselasky#include <linux/bitops.h>
33270710Shselasky
34270710Shselasky#if BITS_PER_LONG == 64
35270710Shselasky
36270710Shselasky# define do_div(n, base) ({                                    \
37270710Shselasky	uint32_t __base = (base);                               \
38270710Shselasky	uint32_t __rem;                                         \
39270710Shselasky	__rem = ((uint64_t)(n)) % __base;                       \
40270710Shselasky	(n) = ((uint64_t)(n)) / __base;                         \
41270710Shselasky	__rem;                                                  \
42270710Shselasky})
43270710Shselasky
44270710Shselasky/**
45270710Shselasky* div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
46270710Shselasky*
47270710Shselasky* This is commonly provided by 32bit archs to provide an optimized 64bit
48270710Shselasky* divide.
49270710Shselasky*/
50270710Shselaskystatic inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
51270710Shselasky{
52270710Shselasky        *remainder = dividend % divisor;
53270710Shselasky        return dividend / divisor;
54270710Shselasky}
55270710Shselasky
56270710Shselasky
57270710Shselasky#elif BITS_PER_LONG == 32
58270710Shselasky
59270710Shselaskystatic uint32_t __div64_32(uint64_t *n, uint32_t base)
60270710Shselasky{
61270710Shselasky	uint64_t rem = *n;
62270710Shselasky	uint64_t b = base;
63270710Shselasky	uint64_t res, d = 1;
64270710Shselasky	uint32_t high = rem >> 32;
65270710Shselasky
66270710Shselasky	/* Reduce the thing a bit first */
67270710Shselasky	res = 0;
68270710Shselasky	if (high >= base) {
69270710Shselasky		high /= base;
70270710Shselasky		res = (uint64_t) high << 32;
71270710Shselasky		rem -= (uint64_t) (high*base) << 32;
72270710Shselasky	}
73270710Shselasky
74270710Shselasky	while ((int64_t)b > 0 && b < rem) {
75270710Shselasky		b = b+b;
76270710Shselasky		d = d+d;
77270710Shselasky	}
78270710Shselasky
79270710Shselasky	do {
80270710Shselasky		if (rem >= b) {
81270710Shselasky			rem -= b;
82270710Shselasky			res += d;
83270710Shselasky		}
84270710Shselasky		b >>= 1;
85270710Shselasky		d >>= 1;
86270710Shselasky	} while (d);
87270710Shselasky
88270710Shselasky	*n = res;
89270710Shselasky	return rem;
90270710Shselasky}
91270710Shselasky
92270710Shselasky# define do_div(n, base) ({                            \
93270710Shselasky	uint32_t __base = (base);                       \
94270710Shselasky	uint32_t __rem;                                 \
95270710Shselasky	(void)(((typeof((n)) *)0) == ((uint64_t *)0));  \
96270710Shselasky	if (likely(((n) >> 32) == 0)) {                 \
97270710Shselasky		__rem = (uint32_t)(n) % __base;         \
98270710Shselasky		(n) = (uint32_t)(n) / __base;           \
99270710Shselasky	} else                                          \
100270710Shselasky		__rem = __div64_32(&(n), __base);       \
101270710Shselasky	__rem;                                          \
102270710Shselasky})
103270710Shselasky
104270710Shselasky#ifndef div_u64_rem
105270710Shselaskystatic inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
106270710Shselasky{
107270710Shselasky        *remainder = do_div(dividend, divisor);
108270710Shselasky        return dividend;
109270710Shselasky}
110270710Shselasky#endif
111270710Shselasky
112270710Shselasky
113270710Shselasky#endif /* BITS_PER_LONG */
114270710Shselasky
115270710Shselasky
116270710Shselasky
117270710Shselasky/**
118270710Shselasky ** div_u64 - unsigned 64bit divide with 32bit divisor
119270710Shselasky **
120270710Shselasky ** This is the most common 64bit divide and should be used if possible,
121270710Shselasky ** as many 32bit archs can optimize this variant better than a full 64bit
122270710Shselasky ** divide.
123270710Shselasky *  */
124270710Shselasky#ifndef div_u64
125270710Shselasky
126270710Shselaskystatic inline u64 div_u64(u64 dividend, u32 divisor)
127270710Shselasky{
128270710Shselasky        u32 remainder;
129270710Shselasky        return div_u64_rem(dividend, divisor, &remainder);
130270710Shselasky}
131270710Shselasky#endif
132270710Shselasky
133270710Shselasky#endif	/* _LINUX_MATH64_H */
134