1/*-
2 * Copyright (c) 2007 Cisco Systems, Inc.  All rights reserved.
3 * Copyright (c) 2014-2015 Mellanox Technologies, Ltd. All rights reserved.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice unmodified, this list of conditions, and the following
11 *    disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#ifndef _LINUXKPI_LINUX_MATH64_H
29#define	_LINUXKPI_LINUX_MATH64_H
30
31#include <sys/stdint.h>
32#include <sys/systm.h>
33
34#define	do_div(n, base) ({			\
35	uint32_t __base = (base);		\
36	uint32_t __rem;				\
37	__rem = ((uint64_t)(n)) % __base;	\
38	(n) = ((uint64_t)(n)) / __base;		\
39	__rem;					\
40})
41
42static inline uint64_t
43div64_u64_rem(uint64_t dividend, uint64_t divisor, uint64_t *remainder)
44{
45
46	*remainder = dividend % divisor;
47	return (dividend / divisor);
48}
49
50static inline int64_t
51div64_s64(int64_t dividend, int64_t divisor)
52{
53
54	return (dividend / divisor);
55}
56
57static inline uint64_t
58div64_u64(uint64_t dividend, uint64_t divisor)
59{
60
61	return (dividend / divisor);
62}
63
64static inline uint64_t
65div_u64_rem(uint64_t dividend, uint32_t divisor, uint32_t *remainder)
66{
67
68	*remainder = dividend % divisor;
69	return (dividend / divisor);
70}
71
72static inline int64_t
73div_s64(int64_t dividend, int32_t divisor)
74{
75
76	return (dividend / divisor);
77}
78
79static inline uint64_t
80div_u64(uint64_t dividend, uint32_t divisor)
81{
82
83	return (dividend / divisor);
84}
85
86static inline uint64_t
87mul_u32_u32(uint32_t a, uint32_t b)
88{
89
90	return ((uint64_t)a * b);
91}
92
93static inline uint64_t
94div64_u64_round_up(uint64_t dividend, uint64_t divisor)
95{
96	return ((dividend + divisor - 1) / divisor);
97}
98
99#define	DIV64_U64_ROUND_UP(...) \
100	div64_u64_round_up(__VA_ARGS__)
101
102static inline uint64_t
103mul_u64_u32_div(uint64_t x, uint32_t y, uint32_t div)
104{
105	const uint64_t rem = x % div;
106
107	return ((x / div) * y + (rem * y) / div);
108}
109
110static inline uint64_t
111mul_u64_u64_div_u64(uint64_t x, uint64_t y, uint64_t z)
112{
113	uint64_t res, rem;
114	uint64_t x1, y1, y1z;
115
116	res = rem = 0;
117	x1 = x;
118	y1z = y / z;
119	y1 = y - y1z * z;
120
121	/*
122	 * INVARIANT: x * y = res * z + rem + (y1 + y1z * z) * x1
123	 * INVARIANT: y1 < z
124	 * INVARIANT: rem < z
125	 */
126	while (x1 > 0) {
127		/* Handle low bit. */
128		if (x1 & 1) {
129			x1 &= ~1;
130			res += y1z;
131			rem += y1;
132			if ((rem < y1) || (rem >= z)) {
133				res += 1;
134				rem -= z;
135			}
136		}
137
138		/* Shift x1 right and (y1 + y1z * z) left */
139		x1 >>= 1;
140		if ((y1 * 2 < y1) || (y1 * 2 >= z)) {
141			y1z = y1z * 2 + 1;
142			y1 = y1 * 2 - z;
143		} else {
144			y1z *= 2;
145			y1 *= 2;
146		}
147	}
148
149	KASSERT(res * z + rem == x * y, ("%s: res %ju * z %ju + rem %ju != "
150	    "x %ju * y %ju", __func__, (uintmax_t)res, (uintmax_t)z,
151	    (uintmax_t)rem, (uintmax_t)x, (uintmax_t)y));
152	KASSERT(rem < z, ("%s: rem %ju >= z %ju\n", __func__,
153	    (uintmax_t)rem, (uintmax_t)z));
154
155	return (res);
156}
157
158static inline uint64_t
159mul_u64_u32_shr(uint64_t x, uint32_t y, unsigned int shift)
160{
161	uint32_t hi, lo;
162	hi = x >> 32;
163	lo = x & 0xffffffff;
164
165	return (mul_u32_u32(lo, y) >> shift) +
166		(mul_u32_u32(hi, y) << (32 - shift));
167}
168
169#endif /* _LINUXKPI_LINUX_MATH64_H */
170