bitops.h revision 337898
1/*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice unmodified, this list of conditions, and the following
13 *    disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * $FreeBSD: stable/11/sys/compat/linuxkpi/common/include/linux/bitops.h 337898 2018-08-16 08:12:36Z hselasky $
30 */
31#ifndef	_LINUX_BITOPS_H_
32#define	_LINUX_BITOPS_H_
33
34#include <sys/param.h>
35#include <sys/types.h>
36#include <sys/systm.h>
37#include <sys/errno.h>
38#include <sys/libkern.h>
39
40#define	BIT(nr)			(1UL << (nr))
41#define	BIT_ULL(nr)		(1ULL << (nr))
42#ifdef __LP64__
43#define	BITS_PER_LONG		64
44#else
45#define	BITS_PER_LONG		32
46#endif
47
48#define	BITS_PER_LONG_LONG	64
49
50#define	BITMAP_FIRST_WORD_MASK(start)	(~0UL << ((start) % BITS_PER_LONG))
51#define	BITMAP_LAST_WORD_MASK(n)	(~0UL >> (BITS_PER_LONG - (n)))
52#define	BITS_TO_LONGS(n)	howmany((n), BITS_PER_LONG)
53#define	BIT_MASK(nr)		(1UL << ((nr) & (BITS_PER_LONG - 1)))
54#define BIT_WORD(nr)		((nr) / BITS_PER_LONG)
55#define	GENMASK(h, l)		(((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
56#define	GENMASK_ULL(h, l)	(((~0ULL) >> (BITS_PER_LONG_LONG - (h) - 1)) & ((~0ULL) << (l)))
57#define BITS_PER_BYTE		8
58
59#define	hweight8(x)	bitcount((uint8_t)(x))
60#define	hweight16(x)	bitcount16(x)
61#define	hweight32(x)	bitcount32(x)
62#define	hweight64(x)	bitcount64(x)
63#define	hweight_long(x)	bitcountl(x)
64
65static inline int
66__ffs(int mask)
67{
68	return (ffs(mask) - 1);
69}
70
71static inline int
72__fls(int mask)
73{
74	return (fls(mask) - 1);
75}
76
77static inline int
78__ffsl(long mask)
79{
80	return (ffsl(mask) - 1);
81}
82
83static inline int
84__flsl(long mask)
85{
86	return (flsl(mask) - 1);
87}
88
89static inline int
90fls64(uint64_t mask)
91{
92	return (flsll(mask));
93}
94
95static inline uint32_t
96ror32(uint32_t word, unsigned int shift)
97{
98	return ((word >> shift) | (word << (32 - shift)));
99}
100
101#define	ffz(mask)	__ffs(~(mask))
102
103static inline int get_count_order(unsigned int count)
104{
105        int order;
106
107        order = fls(count) - 1;
108        if (count & (count - 1))
109                order++;
110        return order;
111}
112
113static inline unsigned long
114find_first_bit(const unsigned long *addr, unsigned long size)
115{
116	long mask;
117	int bit;
118
119	for (bit = 0; size >= BITS_PER_LONG;
120	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
121		if (*addr == 0)
122			continue;
123		return (bit + __ffsl(*addr));
124	}
125	if (size) {
126		mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
127		if (mask)
128			bit += __ffsl(mask);
129		else
130			bit += size;
131	}
132	return (bit);
133}
134
135static inline unsigned long
136find_first_zero_bit(const unsigned long *addr, unsigned long size)
137{
138	long mask;
139	int bit;
140
141	for (bit = 0; size >= BITS_PER_LONG;
142	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
143		if (~(*addr) == 0)
144			continue;
145		return (bit + __ffsl(~(*addr)));
146	}
147	if (size) {
148		mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
149		if (mask)
150			bit += __ffsl(mask);
151		else
152			bit += size;
153	}
154	return (bit);
155}
156
157static inline unsigned long
158find_last_bit(const unsigned long *addr, unsigned long size)
159{
160	long mask;
161	int offs;
162	int bit;
163	int pos;
164
165	pos = size / BITS_PER_LONG;
166	offs = size % BITS_PER_LONG;
167	bit = BITS_PER_LONG * pos;
168	addr += pos;
169	if (offs) {
170		mask = (*addr) & BITMAP_LAST_WORD_MASK(offs);
171		if (mask)
172			return (bit + __flsl(mask));
173	}
174	while (pos--) {
175		addr--;
176		bit -= BITS_PER_LONG;
177		if (*addr)
178			return (bit + __flsl(*addr));
179	}
180	return (size);
181}
182
183static inline unsigned long
184find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
185{
186	long mask;
187	int offs;
188	int bit;
189	int pos;
190
191	if (offset >= size)
192		return (size);
193	pos = offset / BITS_PER_LONG;
194	offs = offset % BITS_PER_LONG;
195	bit = BITS_PER_LONG * pos;
196	addr += pos;
197	if (offs) {
198		mask = (*addr) & ~BITMAP_LAST_WORD_MASK(offs);
199		if (mask)
200			return (bit + __ffsl(mask));
201		if (size - bit <= BITS_PER_LONG)
202			return (size);
203		bit += BITS_PER_LONG;
204		addr++;
205	}
206	for (size -= bit; size >= BITS_PER_LONG;
207	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
208		if (*addr == 0)
209			continue;
210		return (bit + __ffsl(*addr));
211	}
212	if (size) {
213		mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
214		if (mask)
215			bit += __ffsl(mask);
216		else
217			bit += size;
218	}
219	return (bit);
220}
221
222static inline unsigned long
223find_next_zero_bit(const unsigned long *addr, unsigned long size,
224    unsigned long offset)
225{
226	long mask;
227	int offs;
228	int bit;
229	int pos;
230
231	if (offset >= size)
232		return (size);
233	pos = offset / BITS_PER_LONG;
234	offs = offset % BITS_PER_LONG;
235	bit = BITS_PER_LONG * pos;
236	addr += pos;
237	if (offs) {
238		mask = ~(*addr) & ~BITMAP_LAST_WORD_MASK(offs);
239		if (mask)
240			return (bit + __ffsl(mask));
241		if (size - bit <= BITS_PER_LONG)
242			return (size);
243		bit += BITS_PER_LONG;
244		addr++;
245	}
246	for (size -= bit; size >= BITS_PER_LONG;
247	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
248		if (~(*addr) == 0)
249			continue;
250		return (bit + __ffsl(~(*addr)));
251	}
252	if (size) {
253		mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
254		if (mask)
255			bit += __ffsl(mask);
256		else
257			bit += size;
258	}
259	return (bit);
260}
261
262#define	__set_bit(i, a)							\
263    atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
264
265#define	set_bit(i, a)							\
266    atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
267
268#define	__clear_bit(i, a)						\
269    atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
270
271#define	clear_bit(i, a)							\
272    atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
273
274#define	test_bit(i, a)							\
275    !!(READ_ONCE(((volatile unsigned long *)(a))[BIT_WORD(i)]) & BIT_MASK(i))
276
277static inline int
278test_and_clear_bit(long bit, volatile unsigned long *var)
279{
280	long val;
281
282	var += BIT_WORD(bit);
283	bit %= BITS_PER_LONG;
284	bit = (1UL << bit);
285
286	val = *var;
287	while (!atomic_fcmpset_long(var, &val, val & ~bit))
288		;
289	return !!(val & bit);
290}
291
292static inline int
293__test_and_clear_bit(long bit, volatile unsigned long *var)
294{
295	long val;
296
297	var += BIT_WORD(bit);
298	bit %= BITS_PER_LONG;
299	bit = (1UL << bit);
300
301	val = *var;
302	*var &= ~bit;
303
304	return !!(val & bit);
305}
306
307static inline int
308test_and_set_bit(long bit, volatile unsigned long *var)
309{
310	long val;
311
312	var += BIT_WORD(bit);
313	bit %= BITS_PER_LONG;
314	bit = (1UL << bit);
315
316	val = *var;
317	while (!atomic_fcmpset_long(var, &val, val | bit))
318		;
319	return !!(val & bit);
320}
321
322static inline int
323__test_and_set_bit(long bit, volatile unsigned long *var)
324{
325	long val;
326
327	var += BIT_WORD(bit);
328	bit %= BITS_PER_LONG;
329	bit = (1UL << bit);
330
331	val = *var;
332	*var |= bit;
333
334	return !!(val & bit);
335}
336
337enum {
338        REG_OP_ISFREE,
339        REG_OP_ALLOC,
340        REG_OP_RELEASE,
341};
342
343static inline int
344linux_reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
345{
346        int nbits_reg;
347        int index;
348        int offset;
349        int nlongs_reg;
350        int nbitsinlong;
351        unsigned long mask;
352        int i;
353        int ret = 0;
354
355        nbits_reg = 1 << order;
356        index = pos / BITS_PER_LONG;
357        offset = pos - (index * BITS_PER_LONG);
358        nlongs_reg = BITS_TO_LONGS(nbits_reg);
359        nbitsinlong = min(nbits_reg,  BITS_PER_LONG);
360
361        mask = (1UL << (nbitsinlong - 1));
362        mask += mask - 1;
363        mask <<= offset;
364
365        switch (reg_op) {
366        case REG_OP_ISFREE:
367                for (i = 0; i < nlongs_reg; i++) {
368                        if (bitmap[index + i] & mask)
369                                goto done;
370                }
371                ret = 1;
372                break;
373
374        case REG_OP_ALLOC:
375                for (i = 0; i < nlongs_reg; i++)
376                        bitmap[index + i] |= mask;
377                break;
378
379        case REG_OP_RELEASE:
380                for (i = 0; i < nlongs_reg; i++)
381                        bitmap[index + i] &= ~mask;
382                break;
383        }
384done:
385        return ret;
386}
387
388#define for_each_set_bit(bit, addr, size) \
389	for ((bit) = find_first_bit((addr), (size));		\
390	     (bit) < (size);					\
391	     (bit) = find_next_bit((addr), (size), (bit) + 1))
392
393#define	for_each_clear_bit(bit, addr, size) \
394	for ((bit) = find_first_zero_bit((addr), (size));		\
395	     (bit) < (size);						\
396	     (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
397
398static inline uint64_t
399sign_extend64(uint64_t value, int index)
400{
401	uint8_t shift = 63 - index;
402
403	return ((int64_t)(value << shift) >> shift);
404}
405
406#endif	/* _LINUX_BITOPS_H_ */
407