1#ifndef _S390_BITOPS_H
2#define _S390_BITOPS_H
3
4/*
5 *  include/asm-s390/bitops.h
6 *
7 *  S390 version
8 *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
10 *
11 *  Derived from "include/asm-i386/bitops.h"
12 *    Copyright (C) 1992, Linus Torvalds
13 *
14 */
15#include <linux/config.h>
16
17/*
18 * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
19 * bit 32 is the LSB of *(addr+4). That combined with the
20 * big endian byte order on S390 give the following bit
21 * order in memory:
22 *    1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
23 *    0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
24 * after that follows the next long with bit numbers
25 *    3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
26 *    2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
27 * The reason for this bit ordering is the fact that
28 * in the architecture independent code bits operations
29 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
30 * with operation of the form "set_bit(bitnr, flags)".
31 */
32
33/* set ALIGN_CS to 1 if the SMP safe bit operations should
34 * align the address to 4 byte boundary. It seems to work
35 * without the alignment.
36 */
37#ifdef __KERNEL__
38#define ALIGN_CS 0
39#else
40#define ALIGN_CS 1
41#ifndef CONFIG_SMP
42#error "bitops won't work without CONFIG_SMP"
43#endif
44#endif
45
46/* bitmap tables from arch/S390/kernel/bitmap.S */
47extern const char _oi_bitmap[];
48extern const char _ni_bitmap[];
49extern const char _zb_findmap[];
50
51#ifdef CONFIG_SMP
52/*
53 * SMP save set_bit routine based on compare and swap (CS)
54 */
55static __inline__ void set_bit_cs(int nr, volatile void * addr)
56{
57	unsigned long bits, mask;
58        __asm__ __volatile__(
59#if ALIGN_CS == 1
60             "   lhi   %2,3\n"         /* CS must be aligned on 4 byte b. */
61             "   nr    %2,%1\n"        /* isolate last 2 bits of address */
62             "   xr    %1,%2\n"        /* make addr % 4 == 0 */
63             "   sll   %2,3\n"
64             "   ar    %0,%2\n"        /* add alignement to bitnr */
65#endif
66             "   lhi   %2,31\n"
67             "   nr    %2,%0\n"        /* make shift value */
68             "   xr    %0,%2\n"
69             "   srl   %0,3\n"
70             "   lhi   %3,1\n"
71             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */
72             "   sll   %3,0(%2)\n"       /* make OR mask */
73             "   l     %0,0(%1)\n"
74             "0: lr    %2,%0\n"         /* CS loop starts here */
75             "   or    %2,%3\n"          /* set bit */
76             "   cs    %0,%2,0(%1)\n"
77             "   jl    0b"
78             : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
79             : "cc", "memory" );
80}
81
82/*
83 * SMP save clear_bit routine based on compare and swap (CS)
84 */
85static __inline__ void clear_bit_cs(int nr, volatile void * addr)
86{
87        static const int minusone = -1;
88	unsigned long bits, mask;
89        __asm__ __volatile__(
90#if ALIGN_CS == 1
91             "   lhi   %2,3\n"         /* CS must be aligned on 4 byte b. */
92             "   nr    %2,%1\n"        /* isolate last 2 bits of address */
93             "   xr    %1,%2\n"        /* make addr % 4 == 0 */
94             "   sll   %2,3\n"
95             "   ar    %0,%2\n"        /* add alignement to bitnr */
96#endif
97             "   lhi   %2,31\n"
98             "   nr    %2,%0\n"        /* make shift value */
99             "   xr    %0,%2\n"
100             "   srl   %0,3\n"
101             "   lhi   %3,1\n"
102             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */
103             "   sll   %3,0(%2)\n"
104             "   x     %3,%4\n"        /* make AND mask */
105             "   l     %0,0(%1)\n"
106             "0: lr    %2,%0\n"        /* CS loop starts here */
107             "   nr    %2,%3\n"        /* clear bit */
108             "   cs    %0,%2,0(%1)\n"
109             "   jl    0b"
110             : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask)
111             : "m" (minusone) : "cc", "memory" );
112}
113
114/*
115 * SMP save change_bit routine based on compare and swap (CS)
116 */
117static __inline__ void change_bit_cs(int nr, volatile void * addr)
118{
119	unsigned long bits, mask;
120        __asm__ __volatile__(
121#if ALIGN_CS == 1
122             "   lhi   %2,3\n"         /* CS must be aligned on 4 byte b. */
123             "   nr    %2,%1\n"        /* isolate last 2 bits of address */
124             "   xr    %1,%2\n"        /* make addr % 4 == 0 */
125             "   sll   %2,3\n"
126             "   ar    %0,%2\n"        /* add alignement to bitnr */
127#endif
128             "   lhi   %2,31\n"
129             "   nr    %2,%0\n"        /* make shift value */
130             "   xr    %0,%2\n"
131             "   srl   %0,3\n"
132             "   lhi   %3,1\n"
133             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */
134             "   sll   %3,0(%2)\n"     /* make XR mask */
135             "   l     %0,0(%1)\n"
136             "0: lr    %2,%0\n"        /* CS loop starts here */
137             "   xr    %2,%3\n"        /* change bit */
138             "   cs    %0,%2,0(%1)\n"
139             "   jl    0b"
140             : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
141             : "cc", "memory" );
142}
143
144/*
145 * SMP save test_and_set_bit routine based on compare and swap (CS)
146 */
147static __inline__ int test_and_set_bit_cs(int nr, volatile void * addr)
148{
149	unsigned long bits, mask;
150        __asm__ __volatile__(
151#if ALIGN_CS == 1
152             "   lhi   %2,3\n"         /* CS must be aligned on 4 byte b. */
153             "   nr    %2,%1\n"        /* isolate last 2 bits of address */
154             "   xr    %1,%2\n"        /* make addr % 4 == 0 */
155             "   sll   %2,3\n"
156             "   ar    %0,%2\n"        /* add alignement to bitnr */
157#endif
158             "   lhi   %2,31\n"
159             "   nr    %2,%0\n"        /* make shift value */
160             "   xr    %0,%2\n"
161             "   srl   %0,3\n"
162             "   lhi   %3,1\n"
163             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */
164             "   sll   %3,0(%2)\n"     /* make OR mask */
165             "   l     %0,0(%1)\n"
166             "0: lr    %2,%0\n"        /* CS loop starts here */
167             "   or    %2,%3\n"        /* set bit */
168             "   cs    %0,%2,0(%1)\n"
169             "   jl    0b\n"
170             "   nr    %0,%3\n"        /* isolate old bit */
171             : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
172             : "cc", "memory" );
173        return nr != 0;
174}
175
176/*
177 * SMP save test_and_clear_bit routine based on compare and swap (CS)
178 */
179static __inline__ int test_and_clear_bit_cs(int nr, volatile void * addr)
180{
181        static const int minusone = -1;
182	unsigned long bits, mask;
183        __asm__ __volatile__(
184#if ALIGN_CS == 1
185             "   lhi   %2,3\n"         /* CS must be aligned on 4 byte b. */
186             "   nr    %2,%1\n"        /* isolate last 2 bits of address */
187             "   xr    %1,%2\n"        /* make addr % 4 == 0 */
188             "   sll   %2,3\n"
189             "   ar    %0,%2\n"        /* add alignement to bitnr */
190#endif
191             "   lhi   %2,31\n"
192             "   nr    %2,%0\n"        /* make shift value */
193             "   xr    %0,%2\n"
194             "   srl   %0,3\n"
195             "   lhi   %3,1\n"
196             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */
197             "   sll   %3,0(%2)\n"
198             "   l     %0,0(%1)\n"
199             "   x     %3,%4\n"        /* make AND mask */
200             "0: lr    %2,%0\n"        /* CS loop starts here */
201             "   nr    %2,%3\n"        /* clear bit */
202             "   cs    %0,%2,0(%1)\n"
203             "   jl    0b\n"
204             "   x     %3,%4\n"
205             "   nr    %0,%3\n"         /* isolate old bit */
206             : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask)
207             : "m" (minusone) : "cc", "memory" );
208        return nr;
209}
210
211/*
212 * SMP save test_and_change_bit routine based on compare and swap (CS)
213 */
214static __inline__ int test_and_change_bit_cs(int nr, volatile void * addr)
215{
216	unsigned long bits, mask;
217        __asm__ __volatile__(
218#if ALIGN_CS == 1
219             "   lhi   %2,3\n"         /* CS must be aligned on 4 byte b. */
220             "   nr    %2,%1\n"        /* isolate last 2 bits of address */
221             "   xr    %1,%2\n"        /* make addr % 4 == 0 */
222             "   sll   %2,3\n"
223             "   ar    %0,%2\n"        /* add alignement to bitnr */
224#endif
225             "   lhi   %2,31\n"
226             "   nr    %2,%0\n"        /* make shift value */
227             "   xr    %0,%2\n"
228             "   srl   %0,3\n"
229             "   lhi   %3,1\n"
230             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */
231             "   sll   %3,0(%2)\n"     /* make OR mask */
232             "   l     %0,0(%1)\n"
233             "0: lr    %2,%0\n"        /* CS loop starts here */
234             "   xr    %2,%3\n"        /* change bit */
235             "   cs    %0,%2,0(%1)\n"
236             "   jl    0b\n"
237             "   nr    %0,%3\n"        /* isolate old bit */
238             : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
239             : "cc", "memory" );
240        return nr != 0;
241}
242#endif /* CONFIG_SMP */
243
244/*
245 * fast, non-SMP set_bit routine
246 */
247static __inline__ void __set_bit(int nr, volatile void * addr)
248{
249	unsigned long reg1, reg2;
250        __asm__ __volatile__(
251             "   lhi   %1,24\n"
252             "   lhi   %0,7\n"
253             "   xr    %1,%2\n"
254             "   nr    %0,%2\n"
255             "   srl   %1,3\n"
256             "   la    %1,0(%1,%3)\n"
257             "   la    %0,0(%0,%4)\n"
258             "   oc    0(1,%1),0(%0)"
259             : "=&a" (reg1), "=&a" (reg2)
260             : "r" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
261}
262
263static __inline__ void
264__constant_set_bit(const int nr, volatile void * addr)
265{
266  switch (nr&7) {
267  case 0:
268    __asm__ __volatile__ ("la 1,%0\n\t"
269                          "oi 0(1),0x01"
270                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
271                          : : "1", "cc", "memory");
272    break;
273  case 1:
274    __asm__ __volatile__ ("la 1,%0\n\t"
275                          "oi 0(1),0x02"
276                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
277                          : : "1", "cc", "memory" );
278    break;
279  case 2:
280    __asm__ __volatile__ ("la 1,%0\n\t"
281                          "oi 0(1),0x04"
282                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
283                          : : "1", "cc", "memory" );
284    break;
285  case 3:
286    __asm__ __volatile__ ("la 1,%0\n\t"
287                          "oi 0(1),0x08"
288                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
289                          : : "1", "cc", "memory" );
290    break;
291  case 4:
292    __asm__ __volatile__ ("la 1,%0\n\t"
293                          "oi 0(1),0x10"
294                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
295                          : : "1", "cc", "memory" );
296    break;
297  case 5:
298    __asm__ __volatile__ ("la 1,%0\n\t"
299                          "oi 0(1),0x20"
300                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
301                          : : "1", "cc", "memory" );
302    break;
303  case 6:
304    __asm__ __volatile__ ("la 1,%0\n\t"
305                          "oi 0(1),0x40"
306                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
307                          : : "1", "cc", "memory" );
308    break;
309  case 7:
310    __asm__ __volatile__ ("la 1,%0\n\t"
311                          "oi 0(1),0x80"
312                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
313                          : : "1", "cc", "memory" );
314    break;
315  }
316}
317
318#define set_bit_simple(nr,addr) \
319(__builtin_constant_p((nr)) ? \
320 __constant_set_bit((nr),(addr)) : \
321 __set_bit((nr),(addr)) )
322
323/*
324 * fast, non-SMP clear_bit routine
325 */
326static __inline__ void
327__clear_bit(int nr, volatile void * addr)
328{
329	unsigned long reg1, reg2;
330        __asm__ __volatile__(
331             "   lhi   %1,24\n"
332             "   lhi   %0,7\n"
333             "   xr    %1,%2\n"
334             "   nr    %0,%2\n"
335             "   srl   %1,3\n"
336             "   la    %1,0(%1,%3)\n"
337             "   la    %0,0(%0,%4)\n"
338             "   nc    0(1,%1),0(%0)"
339             : "=&a" (reg1), "=&a" (reg2)
340             : "r" (nr), "a" (addr), "a" (&_ni_bitmap) : "cc", "memory" );
341}
342
343static __inline__ void
344__constant_clear_bit(const int nr, volatile void * addr)
345{
346  switch (nr&7) {
347  case 0:
348    __asm__ __volatile__ ("la 1,%0\n\t"
349                          "ni 0(1),0xFE"
350                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
351                          : : "1", "cc", "memory" );
352    break;
353  case 1:
354    __asm__ __volatile__ ("la 1,%0\n\t"
355                          "ni 0(1),0xFD"
356                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
357                          : : "1", "cc", "memory" );
358    break;
359  case 2:
360    __asm__ __volatile__ ("la 1,%0\n\t"
361                          "ni 0(1),0xFB"
362                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
363                          : : "1", "cc", "memory" );
364    break;
365  case 3:
366    __asm__ __volatile__ ("la 1,%0\n\t"
367                          "ni 0(1),0xF7"
368                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
369                          : : "1", "cc", "memory" );
370    break;
371  case 4:
372    __asm__ __volatile__ ("la 1,%0\n\t"
373                          "ni 0(1),0xEF"
374                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
375                          : : "cc", "memory" );
376    break;
377  case 5:
378    __asm__ __volatile__ ("la 1,%0\n\t"
379                          "ni 0(1),0xDF"
380                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
381                          : : "1", "cc", "memory" );
382    break;
383  case 6:
384    __asm__ __volatile__ ("la 1,%0\n\t"
385                          "ni 0(1),0xBF"
386                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
387                          : : "1", "cc", "memory" );
388    break;
389  case 7:
390    __asm__ __volatile__ ("la 1,%0\n\t"
391                          "ni 0(1),0x7F"
392                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
393                          : : "1", "cc", "memory" );
394    break;
395  }
396}
397
398#define clear_bit_simple(nr,addr) \
399(__builtin_constant_p((nr)) ? \
400 __constant_clear_bit((nr),(addr)) : \
401 __clear_bit((nr),(addr)) )
402
403/*
404 * fast, non-SMP change_bit routine
405 */
406static __inline__ void __change_bit(int nr, volatile void * addr)
407{
408	unsigned long reg1, reg2;
409        __asm__ __volatile__(
410             "   lhi   %1,24\n"
411             "   lhi   %0,7\n"
412             "   xr    %1,%2\n"
413             "   nr    %0,%2\n"
414             "   srl   %1,3\n"
415             "   la    %1,0(%1,%3)\n"
416             "   la    %0,0(%0,%4)\n"
417             "   xc    0(1,%1),0(%0)"
418             : "=&a" (reg1), "=&a" (reg2)
419             : "r" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
420}
421
422static __inline__ void
423__constant_change_bit(const int nr, volatile void * addr)
424{
425  switch (nr&7) {
426  case 0:
427    __asm__ __volatile__ ("la 1,%0\n\t"
428                          "xi 0(1),0x01"
429                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
430                          : : "cc", "memory" );
431    break;
432  case 1:
433    __asm__ __volatile__ ("la 1,%0\n\t"
434                          "xi 0(1),0x02"
435                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
436                          : : "cc", "memory" );
437    break;
438  case 2:
439    __asm__ __volatile__ ("la 1,%0\n\t"
440                          "xi 0(1),0x04"
441                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
442                          : : "cc", "memory" );
443    break;
444  case 3:
445    __asm__ __volatile__ ("la 1,%0\n\t"
446                          "xi 0(1),0x08"
447                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
448                          : : "cc", "memory" );
449    break;
450  case 4:
451    __asm__ __volatile__ ("la 1,%0\n\t"
452                          "xi 0(1),0x10"
453                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
454                          : : "cc", "memory" );
455    break;
456  case 5:
457    __asm__ __volatile__ ("la 1,%0\n\t"
458                          "xi 0(1),0x20"
459                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
460                          : : "1", "cc", "memory" );
461    break;
462  case 6:
463    __asm__ __volatile__ ("la 1,%0\n\t"
464                          "xi 0(1),0x40"
465                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
466                          : : "1", "cc", "memory" );
467    break;
468  case 7:
469    __asm__ __volatile__ ("la 1,%0\n\t"
470                          "xi 0(1),0x80"
471                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
472                          : : "1", "cc", "memory" );
473    break;
474  }
475}
476
477#define change_bit_simple(nr,addr) \
478(__builtin_constant_p((nr)) ? \
479 __constant_change_bit((nr),(addr)) : \
480 __change_bit((nr),(addr)) )
481
482/*
483 * fast, non-SMP test_and_set_bit routine
484 */
485static __inline__ int test_and_set_bit_simple(int nr, volatile void * addr)
486{
487	unsigned long reg1, reg2;
488        int oldbit;
489        __asm__ __volatile__(
490             "   lhi   %1,24\n"
491             "   lhi   %2,7\n"
492             "   xr    %1,%3\n"
493             "   nr    %2,%3\n"
494             "   srl   %1,3\n"
495             "   la    %1,0(%1,%4)\n"
496             "   ic    %0,0(%1)\n"
497             "   srl   %0,0(%2)\n"
498             "   la    %2,0(%2,%5)\n"
499             "   oc    0(1,%1),0(%2)"
500             : "=d&" (oldbit), "=&a" (reg1), "=&a" (reg2)
501             : "r" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
502        return oldbit & 1;
503}
504#define __test_and_set_bit(X,Y)		test_and_set_bit_simple(X,Y)
505
506/*
507 * fast, non-SMP test_and_clear_bit routine
508 */
509static __inline__ int test_and_clear_bit_simple(int nr, volatile void * addr)
510{
511	unsigned long reg1, reg2;
512        int oldbit;
513
514        __asm__ __volatile__(
515             "   lhi   %1,24\n"
516             "   lhi   %2,7\n"
517             "   xr    %1,%3\n"
518             "   nr    %2,%3\n"
519             "   srl   %1,3\n"
520             "   la    %1,0(%1,%4)\n"
521             "   ic    %0,0(%1)\n"
522             "   srl   %0,0(%2)\n"
523             "   la    %2,0(%2,%5)\n"
524             "   nc    0(1,%1),0(%2)"
525             : "=d&" (oldbit), "=&a" (reg1), "=&a" (reg2)
526             : "r" (nr), "a" (addr), "a" (&_ni_bitmap) : "cc", "memory" );
527        return oldbit & 1;
528}
529#define __test_and_clear_bit(X,Y)	test_and_clear_bit_simple(X,Y)
530
531/*
532 * fast, non-SMP test_and_change_bit routine
533 */
534static __inline__ int test_and_change_bit_simple(int nr, volatile void * addr)
535{
536	unsigned long reg1, reg2;
537        int oldbit;
538
539        __asm__ __volatile__(
540             "   lhi   %1,24\n"
541             "   lhi   %2,7\n"
542             "   xr    %1,%3\n"
543             "   nr    %2,%1\n"
544             "   srl   %1,3\n"
545             "   la    %1,0(%1,%4)\n"
546             "   ic    %0,0(%1)\n"
547             "   srl   %0,0(%2)\n"
548             "   la    %2,0(%2,%5)\n"
549             "   xc    0(1,%1),0(%2)"
550             : "=d&" (oldbit), "=&a" (reg1), "=&a" (reg2)
551             : "r" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
552        return oldbit & 1;
553}
554#define __test_and_change_bit(X,Y)	test_and_change_bit_simple(X,Y)
555
556#ifdef CONFIG_SMP
557#define set_bit             set_bit_cs
558#define clear_bit           clear_bit_cs
559#define change_bit          change_bit_cs
560#define test_and_set_bit    test_and_set_bit_cs
561#define test_and_clear_bit  test_and_clear_bit_cs
562#define test_and_change_bit test_and_change_bit_cs
563#else
564#define set_bit             set_bit_simple
565#define clear_bit           clear_bit_simple
566#define change_bit          change_bit_simple
567#define test_and_set_bit    test_and_set_bit_simple
568#define test_and_clear_bit  test_and_clear_bit_simple
569#define test_and_change_bit test_and_change_bit_simple
570#endif
571
572
573/*
574 * This routine doesn't need to be atomic.
575 */
576
577static __inline__ int __test_bit(int nr, volatile void * addr)
578{
579	unsigned long reg1, reg2;
580        int oldbit;
581
582        __asm__ __volatile__(
583             "   lhi   %2,24\n"
584             "   lhi   %1,7\n"
585             "   xr    %2,%3\n"
586             "   nr    %1,%3\n"
587             "   srl   %2,3\n"
588             "   ic    %0,0(%2,%4)\n"
589             "   srl   %0,0(%1)"
590             : "=d&" (oldbit), "=&a" (reg1), "=&a" (reg2)
591             : "r" (nr), "a" (addr) : "cc" );
592        return oldbit & 1;
593}
594
595static __inline__ int __constant_test_bit(int nr, volatile void * addr) {
596    return (((volatile char *) addr)[(nr>>3)^3] & (1<<(nr&7))) != 0;
597}
598
599#define test_bit(nr,addr) \
600(__builtin_constant_p((nr)) ? \
601 __constant_test_bit((nr),(addr)) : \
602 __test_bit((nr),(addr)) )
603
604/*
605 * Find-bit routines..
606 */
607static __inline__ int find_first_zero_bit(void * addr, unsigned size)
608{
609	unsigned long cmp, count;
610        int res;
611
612        if (!size)
613                return 0;
614        __asm__("   lhi  %1,-1\n"
615                "   lr   %2,%3\n"
616                "   slr  %0,%0\n"
617                "   ahi  %2,31\n"
618                "   srl  %2,5\n"
619                "0: c    %1,0(%0,%4)\n"
620                "   jne  1f\n"
621                "   ahi  %0,4\n"
622                "   brct %2,0b\n"
623                "   lr   %0,%3\n"
624                "   j    4f\n"
625                "1: l    %2,0(%0,%4)\n"
626                "   sll  %0,3\n"
627                "   lhi  %1,0xff\n"
628                "   tml  %2,0xffff\n"
629                "   jno  2f\n"
630                "   ahi  %0,16\n"
631                "   srl  %2,16\n"
632                "2: tml  %2,0x00ff\n"
633                "   jno  3f\n"
634                "   ahi  %0,8\n"
635                "   srl  %2,8\n"
636                "3: nr   %2,%1\n"
637                "   ic   %2,0(%2,%5)\n"
638                "   alr  %0,%2\n"
639                "4:"
640                : "=&a" (res), "=&d" (cmp), "=&a" (count)
641                : "a" (size), "a" (addr), "a" (&_zb_findmap) : "cc" );
642        return (res < size) ? res : size;
643}
644
645static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
646{
647        unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
648        unsigned long bitvec, reg;
649        int set, bit = offset & 31, res;
650
651        if (bit) {
652                /*
653                 * Look for zero in first word
654                 */
655                bitvec = (*p) >> bit;
656                __asm__("   slr  %0,%0\n"
657                        "   lhi  %2,0xff\n"
658                        "   tml  %1,0xffff\n"
659                        "   jno  0f\n"
660                        "   ahi  %0,16\n"
661                        "   srl  %1,16\n"
662                        "0: tml  %1,0x00ff\n"
663                        "   jno  1f\n"
664                        "   ahi  %0,8\n"
665                        "   srl  %1,8\n"
666                        "1: nr   %1,%2\n"
667                        "   ic   %1,0(%1,%3)\n"
668                        "   alr  %0,%1"
669                        : "=&d" (set), "+a" (bitvec), "=&d" (reg)
670                        : "a" (&_zb_findmap) : "cc" );
671                if (set < (32 - bit))
672                        return set + offset;
673                offset += 32 - bit;
674                p++;
675        }
676        /*
677         * No zero yet, search remaining full words for a zero
678         */
679        res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr));
680        return (offset + res);
681}
682
683/*
684 * ffz = Find First Zero in word. Undefined if no zero exists,
685 * so code should check against ~0UL first..
686 */
687static __inline__ unsigned long ffz(unsigned long word)
688{
689	unsigned long reg;
690        int result;
691
692        __asm__("   slr  %0,%0\n"
693                "   lhi  %2,0xff\n"
694                "   tml  %1,0xffff\n"
695                "   jno  0f\n"
696                "   ahi  %0,16\n"
697                "   srl  %1,16\n"
698                "0: tml  %1,0x00ff\n"
699                "   jno  1f\n"
700                "   ahi  %0,8\n"
701                "   srl  %1,8\n"
702                "1: nr   %1,%2\n"
703                "   ic   %1,0(%1,%3)\n"
704                "   alr  %0,%1"
705                : "=&d" (result), "+a" (word), "=&d" (reg)
706                : "a" (&_zb_findmap) : "cc" );
707        return result;
708}
709
710/*
711 * ffs: find first bit set. This is defined the same way as
712 * the libc and compiler builtin ffs routines, therefore
713 * differs in spirit from the above ffz (man ffs).
714 */
715
716extern int __inline__ ffs (int x)
717{
718        int r;
719
720        if (x == 0)
721          return 0;
722        __asm__("    slr  %0,%0\n"
723                "    tml  %1,0xffff\n"
724                "    jnz  0f\n"
725                "    ahi  %0,16\n"
726                "    srl  %1,16\n"
727                "0:  tml  %1,0x00ff\n"
728                "    jnz  1f\n"
729                "    ahi  %0,8\n"
730                "    srl  %1,8\n"
731                "1:  tml  %1,0x000f\n"
732                "    jnz  2f\n"
733                "    ahi  %0,4\n"
734                "    srl  %1,4\n"
735                "2:  tml  %1,0x0003\n"
736                "    jnz  3f\n"
737                "    ahi  %0,2\n"
738                "    srl  %1,2\n"
739                "3:  tml  %1,0x0001\n"
740                "    jnz  4f\n"
741                "    ahi  %0,1\n"
742                "4:"
743                : "=&d" (r), "+d" (x) : : "cc" );
744        return r+1;
745}
746
747/*
748 * hweightN: returns the hamming weight (i.e. the number
749 * of bits set) of a N-bit word
750 */
751
752#define hweight32(x) generic_hweight32(x)
753#define hweight16(x) generic_hweight16(x)
754#define hweight8(x) generic_hweight8(x)
755
756
757#ifdef __KERNEL__
758
759/*
760 * ATTENTION: intel byte ordering convention for ext2 and minix !!
761 * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
762 * bit 32 is the LSB of (addr+4).
763 * That combined with the little endian byte order of Intel gives the
764 * following bit order in memory:
765 *    07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
766 *    23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
767 */
768
769#define ext2_set_bit(nr, addr)       test_and_set_bit((nr)^24, addr)
770#define ext2_clear_bit(nr, addr)     test_and_clear_bit((nr)^24, addr)
771#define ext2_test_bit(nr, addr)      test_bit((nr)^24, addr)
772static __inline__ int ext2_find_first_zero_bit(void *vaddr, unsigned size)
773{
774	unsigned long cmp, count;
775        int res;
776
777        if (!size)
778                return 0;
779        __asm__("   lhi  %1,-1\n"
780                "   lr   %2,%3\n"
781                "   ahi  %2,31\n"
782                "   srl  %2,5\n"
783                "   slr  %0,%0\n"
784                "0: cl   %1,0(%0,%4)\n"
785                "   jne  1f\n"
786                "   ahi  %0,4\n"
787                "   brct %2,0b\n"
788                "   lr   %0,%3\n"
789                "   j    4f\n"
790                "1: l    %2,0(%0,%4)\n"
791                "   sll  %0,3\n"
792                "   ahi  %0,24\n"
793                "   lhi  %1,0xff\n"
794                "   tmh  %2,0xffff\n"
795                "   jo   2f\n"
796                "   ahi  %0,-16\n"
797                "   srl  %2,16\n"
798                "2: tml  %2,0xff00\n"
799                "   jo   3f\n"
800                "   ahi  %0,-8\n"
801                "   srl  %2,8\n"
802                "3: nr   %2,%1\n"
803                "   ic   %2,0(%2,%5)\n"
804                "   alr  %0,%2\n"
805                "4:"
806                : "=&a" (res), "=&d" (cmp), "=&a" (count)
807                : "a" (size), "a" (vaddr), "a" (&_zb_findmap) : "cc" );
808        return (res < size) ? res : size;
809}
810
811static __inline__ int
812ext2_find_next_zero_bit(void *vaddr, unsigned size, unsigned offset)
813{
814        unsigned long *addr = vaddr;
815        unsigned long *p = addr + (offset >> 5);
816        unsigned long word, reg;
817        int bit = offset & 31UL, res;
818
819        if (offset >= size)
820                return size;
821
822        if (bit) {
823                __asm__("   ic   %0,0(%1)\n"
824                        "   icm  %0,2,1(%1)\n"
825                        "   icm  %0,4,2(%1)\n"
826                        "   icm  %0,8,3(%1)"
827                        : "=&a" (word) : "a" (p) : "cc" );
828		word >>= bit;
829                res = bit;
830                /* Look for zero in first longword */
831                __asm__("   lhi  %2,0xff\n"
832                        "   tml  %1,0xffff\n"
833                	"   jno  0f\n"
834                	"   ahi  %0,16\n"
835                	"   srl  %1,16\n"
836                	"0: tml  %1,0x00ff\n"
837                	"   jno  1f\n"
838                	"   ahi  %0,8\n"
839                	"   srl  %1,8\n"
840                	"1: nr   %1,%2\n"
841                	"   ic   %1,0(%1,%3)\n"
842                	"   alr  %0,%1"
843                	: "+&d" (res), "+&a" (word), "=&d" (reg)
844                  	: "a" (&_zb_findmap) : "cc" );
845                if (res < 32)
846			return (p - addr)*32 + res;
847                p++;
848        }
849        /* No zero yet, search remaining full bytes for a zero */
850        res = ext2_find_first_zero_bit (p, size - 32 * (p - addr));
851        return (p - addr) * 32 + res;
852}
853
854/* Bitmap functions for the minix filesystem.  */
855#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
856#define minix_set_bit(nr,addr) set_bit(nr,addr)
857#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
858#define minix_test_bit(nr,addr) test_bit(nr,addr)
859#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
860
861#endif /* __KERNEL__ */
862
863#endif /* _S390_BITOPS_H */
864