1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ALPHA_BITOPS_H
3#define _ALPHA_BITOPS_H
4
5#ifndef _LINUX_BITOPS_H
6#error only <linux/bitops.h> can be included directly
7#endif
8
9#include <asm/compiler.h>
10#include <asm/barrier.h>
11
12/*
13 * Copyright 1994, Linus Torvalds.
14 */
15
16/*
17 * These have to be done with inline assembly: that way the bit-setting
18 * is guaranteed to be atomic. All bit operations return 0 if the bit
19 * was cleared before the operation and != 0 if it was not.
20 *
21 * To get proper branch prediction for the main line, we must branch
22 * forward to code at the end of this object's .text section, then
23 * branch back to restart the operation.
24 *
25 * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
26 */
27
28static inline void
29set_bit(unsigned long nr, volatile void * addr)
30{
31	unsigned long temp;
32	int *m = ((int *) addr) + (nr >> 5);
33
34	__asm__ __volatile__(
35	"1:	ldl_l %0,%3\n"
36	"	bis %0,%2,%0\n"
37	"	stl_c %0,%1\n"
38	"	beq %0,2f\n"
39	".subsection 2\n"
40	"2:	br 1b\n"
41	".previous"
42	:"=&r" (temp), "=m" (*m)
43	:"Ir" (1UL << (nr & 31)), "m" (*m));
44}
45
46/*
47 * WARNING: non atomic version.
48 */
49static __always_inline void
50arch___set_bit(unsigned long nr, volatile unsigned long *addr)
51{
52	int *m = ((int *) addr) + (nr >> 5);
53
54	*m |= 1 << (nr & 31);
55}
56
57static inline void
58clear_bit(unsigned long nr, volatile void * addr)
59{
60	unsigned long temp;
61	int *m = ((int *) addr) + (nr >> 5);
62
63	__asm__ __volatile__(
64	"1:	ldl_l %0,%3\n"
65	"	bic %0,%2,%0\n"
66	"	stl_c %0,%1\n"
67	"	beq %0,2f\n"
68	".subsection 2\n"
69	"2:	br 1b\n"
70	".previous"
71	:"=&r" (temp), "=m" (*m)
72	:"Ir" (1UL << (nr & 31)), "m" (*m));
73}
74
75static inline void
76clear_bit_unlock(unsigned long nr, volatile void * addr)
77{
78	smp_mb();
79	clear_bit(nr, addr);
80}
81
82/*
83 * WARNING: non atomic version.
84 */
85static __always_inline void
86arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
87{
88	int *m = ((int *) addr) + (nr >> 5);
89
90	*m &= ~(1 << (nr & 31));
91}
92
93static inline void
94__clear_bit_unlock(unsigned long nr, volatile void * addr)
95{
96	smp_mb();
97	arch___clear_bit(nr, addr);
98}
99
100static inline void
101change_bit(unsigned long nr, volatile void * addr)
102{
103	unsigned long temp;
104	int *m = ((int *) addr) + (nr >> 5);
105
106	__asm__ __volatile__(
107	"1:	ldl_l %0,%3\n"
108	"	xor %0,%2,%0\n"
109	"	stl_c %0,%1\n"
110	"	beq %0,2f\n"
111	".subsection 2\n"
112	"2:	br 1b\n"
113	".previous"
114	:"=&r" (temp), "=m" (*m)
115	:"Ir" (1UL << (nr & 31)), "m" (*m));
116}
117
118/*
119 * WARNING: non atomic version.
120 */
121static __always_inline void
122arch___change_bit(unsigned long nr, volatile unsigned long *addr)
123{
124	int *m = ((int *) addr) + (nr >> 5);
125
126	*m ^= 1 << (nr & 31);
127}
128
129static inline int
130test_and_set_bit(unsigned long nr, volatile void *addr)
131{
132	unsigned long oldbit;
133	unsigned long temp;
134	int *m = ((int *) addr) + (nr >> 5);
135
136	__asm__ __volatile__(
137#ifdef CONFIG_SMP
138	"	mb\n"
139#endif
140	"1:	ldl_l %0,%4\n"
141	"	and %0,%3,%2\n"
142	"	bne %2,2f\n"
143	"	xor %0,%3,%0\n"
144	"	stl_c %0,%1\n"
145	"	beq %0,3f\n"
146	"2:\n"
147#ifdef CONFIG_SMP
148	"	mb\n"
149#endif
150	".subsection 2\n"
151	"3:	br 1b\n"
152	".previous"
153	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
154	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
155
156	return oldbit != 0;
157}
158
159static inline int
160test_and_set_bit_lock(unsigned long nr, volatile void *addr)
161{
162	unsigned long oldbit;
163	unsigned long temp;
164	int *m = ((int *) addr) + (nr >> 5);
165
166	__asm__ __volatile__(
167	"1:	ldl_l %0,%4\n"
168	"	and %0,%3,%2\n"
169	"	bne %2,2f\n"
170	"	xor %0,%3,%0\n"
171	"	stl_c %0,%1\n"
172	"	beq %0,3f\n"
173	"2:\n"
174#ifdef CONFIG_SMP
175	"	mb\n"
176#endif
177	".subsection 2\n"
178	"3:	br 1b\n"
179	".previous"
180	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
181	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
182
183	return oldbit != 0;
184}
185
186/*
187 * WARNING: non atomic version.
188 */
189static __always_inline bool
190arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
191{
192	unsigned long mask = 1 << (nr & 0x1f);
193	int *m = ((int *) addr) + (nr >> 5);
194	int old = *m;
195
196	*m = old | mask;
197	return (old & mask) != 0;
198}
199
200static inline int
201test_and_clear_bit(unsigned long nr, volatile void * addr)
202{
203	unsigned long oldbit;
204	unsigned long temp;
205	int *m = ((int *) addr) + (nr >> 5);
206
207	__asm__ __volatile__(
208#ifdef CONFIG_SMP
209	"	mb\n"
210#endif
211	"1:	ldl_l %0,%4\n"
212	"	and %0,%3,%2\n"
213	"	beq %2,2f\n"
214	"	xor %0,%3,%0\n"
215	"	stl_c %0,%1\n"
216	"	beq %0,3f\n"
217	"2:\n"
218#ifdef CONFIG_SMP
219	"	mb\n"
220#endif
221	".subsection 2\n"
222	"3:	br 1b\n"
223	".previous"
224	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
225	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
226
227	return oldbit != 0;
228}
229
230/*
231 * WARNING: non atomic version.
232 */
233static __always_inline bool
234arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
235{
236	unsigned long mask = 1 << (nr & 0x1f);
237	int *m = ((int *) addr) + (nr >> 5);
238	int old = *m;
239
240	*m = old & ~mask;
241	return (old & mask) != 0;
242}
243
244static inline int
245test_and_change_bit(unsigned long nr, volatile void * addr)
246{
247	unsigned long oldbit;
248	unsigned long temp;
249	int *m = ((int *) addr) + (nr >> 5);
250
251	__asm__ __volatile__(
252#ifdef CONFIG_SMP
253	"	mb\n"
254#endif
255	"1:	ldl_l %0,%4\n"
256	"	and %0,%3,%2\n"
257	"	xor %0,%3,%0\n"
258	"	stl_c %0,%1\n"
259	"	beq %0,3f\n"
260#ifdef CONFIG_SMP
261	"	mb\n"
262#endif
263	".subsection 2\n"
264	"3:	br 1b\n"
265	".previous"
266	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
267	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
268
269	return oldbit != 0;
270}
271
272/*
273 * WARNING: non atomic version.
274 */
275static __always_inline bool
276arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
277{
278	unsigned long mask = 1 << (nr & 0x1f);
279	int *m = ((int *) addr) + (nr >> 5);
280	int old = *m;
281
282	*m = old ^ mask;
283	return (old & mask) != 0;
284}
285
286#define arch_test_bit generic_test_bit
287#define arch_test_bit_acquire generic_test_bit_acquire
288
289static inline bool xor_unlock_is_negative_byte(unsigned long mask,
290		volatile unsigned long *p)
291{
292	unsigned long temp, old;
293
294	__asm__ __volatile__(
295	"1:	ldl_l %0,%4\n"
296	"	mov %0,%2\n"
297	"	xor %0,%3,%0\n"
298	"	stl_c %0,%1\n"
299	"	beq %0,2f\n"
300	".subsection 2\n"
301	"2:	br 1b\n"
302	".previous"
303	:"=&r" (temp), "=m" (*p), "=&r" (old)
304	:"Ir" (mask), "m" (*p));
305
306	return (old & BIT(7)) != 0;
307}
308
309/*
310 * ffz = Find First Zero in word. Undefined if no zero exists,
311 * so code should check against ~0UL first..
312 *
313 * Do a binary search on the bits.  Due to the nature of large
314 * constants on the alpha, it is worthwhile to split the search.
315 */
316static inline unsigned long ffz_b(unsigned long x)
317{
318	unsigned long sum, x1, x2, x4;
319
320	x = ~x & -~x;		/* set first 0 bit, clear others */
321	x1 = x & 0xAA;
322	x2 = x & 0xCC;
323	x4 = x & 0xF0;
324	sum = x2 ? 2 : 0;
325	sum += (x4 != 0) * 4;
326	sum += (x1 != 0);
327
328	return sum;
329}
330
331static inline unsigned long ffz(unsigned long word)
332{
333#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
334	/* Whee.  EV67 can calculate it directly.  */
335	return __kernel_cttz(~word);
336#else
337	unsigned long bits, qofs, bofs;
338
339	bits = __kernel_cmpbge(word, ~0UL);
340	qofs = ffz_b(bits);
341	bits = __kernel_extbl(word, qofs);
342	bofs = ffz_b(bits);
343
344	return qofs*8 + bofs;
345#endif
346}
347
348/*
349 * __ffs = Find First set bit in word.  Undefined if no set bit exists.
350 */
351static inline unsigned long __ffs(unsigned long word)
352{
353#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
354	/* Whee.  EV67 can calculate it directly.  */
355	return __kernel_cttz(word);
356#else
357	unsigned long bits, qofs, bofs;
358
359	bits = __kernel_cmpbge(0, word);
360	qofs = ffz_b(bits);
361	bits = __kernel_extbl(word, qofs);
362	bofs = ffz_b(~bits);
363
364	return qofs*8 + bofs;
365#endif
366}
367
368#ifdef __KERNEL__
369
370/*
371 * ffs: find first bit set. This is defined the same way as
372 * the libc and compiler builtin ffs routines, therefore
373 * differs in spirit from the above __ffs.
374 */
375
376static inline int ffs(int word)
377{
378	int result = __ffs(word) + 1;
379	return word ? result : 0;
380}
381
382/*
383 * fls: find last bit set.
384 */
385#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
386static inline int fls64(unsigned long word)
387{
388	return 64 - __kernel_ctlz(word);
389}
390#else
391extern const unsigned char __flsm1_tab[256];
392
393static inline int fls64(unsigned long x)
394{
395	unsigned long t, a, r;
396
397	t = __kernel_cmpbge (x, 0x0101010101010101UL);
398	a = __flsm1_tab[t];
399	t = __kernel_extbl (x, a);
400	r = a*8 + __flsm1_tab[t] + (x != 0);
401
402	return r;
403}
404#endif
405
406static inline unsigned long __fls(unsigned long x)
407{
408	return fls64(x) - 1;
409}
410
411static inline int fls(unsigned int x)
412{
413	return fls64(x);
414}
415
416/*
417 * hweightN: returns the hamming weight (i.e. the number
418 * of bits set) of a N-bit word
419 */
420
421#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
422/* Whee.  EV67 can calculate it directly.  */
423static inline unsigned long __arch_hweight64(unsigned long w)
424{
425	return __kernel_ctpop(w);
426}
427
428static inline unsigned int __arch_hweight32(unsigned int w)
429{
430	return __arch_hweight64(w);
431}
432
433static inline unsigned int __arch_hweight16(unsigned int w)
434{
435	return __arch_hweight64(w & 0xffff);
436}
437
438static inline unsigned int __arch_hweight8(unsigned int w)
439{
440	return __arch_hweight64(w & 0xff);
441}
442#else
443#include <asm-generic/bitops/arch_hweight.h>
444#endif
445
446#include <asm-generic/bitops/const_hweight.h>
447
448#endif /* __KERNEL__ */
449
450#ifdef __KERNEL__
451
452/*
453 * Every architecture must define this function. It's the fastest
454 * way of searching a 100-bit bitmap.  It's guaranteed that at least
455 * one of the 100 bits is cleared.
456 */
457static inline unsigned long
458sched_find_first_bit(const unsigned long b[2])
459{
460	unsigned long b0, b1, ofs, tmp;
461
462	b0 = b[0];
463	b1 = b[1];
464	ofs = (b0 ? 0 : 64);
465	tmp = (b0 ? b0 : b1);
466
467	return __ffs(tmp) + ofs;
468}
469
470#include <asm-generic/bitops/non-instrumented-non-atomic.h>
471
472#include <asm-generic/bitops/le.h>
473
474#include <asm-generic/bitops/ext2-atomic-setbit.h>
475
476#endif /* __KERNEL__ */
477
478#endif /* _ALPHA_BITOPS_H */
479