1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _TOOLS_LINUX_BITOPS_H_
3#define _TOOLS_LINUX_BITOPS_H_
4
5#include <asm/types.h>
6#include <limits.h>
7#ifndef __WORDSIZE
8#define __WORDSIZE (__SIZEOF_LONG__ * 8)
9#endif
10
11#ifndef BITS_PER_LONG
12# define BITS_PER_LONG __WORDSIZE
13#endif
14#include <linux/bits.h>
15#include <linux/compiler.h>
16
17#define BITS_PER_TYPE(type)	(sizeof(type) * BITS_PER_BYTE)
18#define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
19#define BITS_TO_U64(nr)		DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
20#define BITS_TO_U32(nr)		DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
21#define BITS_TO_BYTES(nr)	DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
22
23extern unsigned int __sw_hweight8(unsigned int w);
24extern unsigned int __sw_hweight16(unsigned int w);
25extern unsigned int __sw_hweight32(unsigned int w);
26extern unsigned long __sw_hweight64(__u64 w);
27
28/*
29 * Defined here because those may be needed by architecture-specific static
30 * inlines.
31 */
32
33#define bitop(op, nr, addr)						\
34	op(nr, addr)
35
36#define __set_bit(nr, addr)		bitop(___set_bit, nr, addr)
37#define __clear_bit(nr, addr)		bitop(___clear_bit, nr, addr)
38#define __change_bit(nr, addr)		bitop(___change_bit, nr, addr)
39#define __test_and_set_bit(nr, addr)	bitop(___test_and_set_bit, nr, addr)
40#define __test_and_clear_bit(nr, addr)	bitop(___test_and_clear_bit, nr, addr)
41#define __test_and_change_bit(nr, addr)	bitop(___test_and_change_bit, nr, addr)
42#define test_bit(nr, addr)		bitop(_test_bit, nr, addr)
43
44/*
45 * Include this here because some architectures need generic_ffs/fls in
46 * scope
47 *
48 * XXX: this needs to be asm/bitops.h, when we get to per arch optimizations
49 */
50#include <asm-generic/bitops.h>
51
52#define for_each_set_bit(bit, addr, size) \
53	for ((bit) = find_first_bit((addr), (size));		\
54	     (bit) < (size);					\
55	     (bit) = find_next_bit((addr), (size), (bit) + 1))
56
57#define for_each_clear_bit(bit, addr, size) \
58	for ((bit) = find_first_zero_bit((addr), (size));       \
59	     (bit) < (size);                                    \
60	     (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
61
62/* same as for_each_set_bit() but use bit as value to start with */
63#define for_each_set_bit_from(bit, addr, size) \
64	for ((bit) = find_next_bit((addr), (size), (bit));	\
65	     (bit) < (size);					\
66	     (bit) = find_next_bit((addr), (size), (bit) + 1))
67
68static inline unsigned long hweight_long(unsigned long w)
69{
70	return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
71}
72
73static inline unsigned fls_long(unsigned long l)
74{
75	if (sizeof(l) == 4)
76		return fls(l);
77	return fls64(l);
78}
79
80/**
81 * rol32 - rotate a 32-bit value left
82 * @word: value to rotate
83 * @shift: bits to roll
84 */
85static inline __u32 rol32(__u32 word, unsigned int shift)
86{
87	return (word << shift) | (word >> ((-shift) & 31));
88}
89
90/**
91 * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
92 * @value: value to sign extend
93 * @index: 0 based bit index (0<=index<64) to sign bit
94 */
95static __always_inline __s64 sign_extend64(__u64 value, int index)
96{
97	__u8 shift = 63 - index;
98	return (__s64)(value << shift) >> shift;
99}
100
101#endif
102