1/*
2 * bitops.c: atomic operations which got too long to be inlined all over
3 *      the place.
4 *
5 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
6 * Copyright 2000 Grant Grundler (grundler@cup.hp.com)
7 */
8
9#include <linux/kernel.h>
10#include <linux/spinlock.h>
11#include <asm/system.h>
12#include <asm/atomic.h>
13
14#ifdef CONFIG_SMP
15raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
16	[0 ... (ATOMIC_HASH_SIZE-1)]  = __RAW_SPIN_LOCK_UNLOCKED
17};
18#endif
19
20#ifdef CONFIG_64BIT
21unsigned long __xchg64(unsigned long x, unsigned long *ptr)
22{
23	unsigned long temp, flags;
24
25	_atomic_spin_lock_irqsave(ptr, flags);
26	temp = *ptr;
27	*ptr = x;
28	_atomic_spin_unlock_irqrestore(ptr, flags);
29	return temp;
30}
31#endif
32
33unsigned long __xchg32(int x, int *ptr)
34{
35	unsigned long flags;
36	long temp;
37
38	_atomic_spin_lock_irqsave(ptr, flags);
39	temp = (long) *ptr;
40	*ptr = x;
41	_atomic_spin_unlock_irqrestore(ptr, flags);
42	return (unsigned long)temp;
43}
44
45
46unsigned long __xchg8(char x, char *ptr)
47{
48	unsigned long flags;
49	long temp;
50
51	_atomic_spin_lock_irqsave(ptr, flags);
52	temp = (long) *ptr;
53	*ptr = x;
54	_atomic_spin_unlock_irqrestore(ptr, flags);
55	return (unsigned long)temp;
56}
57
58
59#ifdef CONFIG_64BIT
60unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new)
61{
62	unsigned long flags;
63	unsigned long prev;
64
65	_atomic_spin_lock_irqsave(ptr, flags);
66	if ((prev = *ptr) == old)
67		*ptr = new;
68	_atomic_spin_unlock_irqrestore(ptr, flags);
69	return prev;
70}
71#endif
72
73unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new)
74{
75	unsigned long flags;
76	unsigned int prev;
77
78	_atomic_spin_lock_irqsave(ptr, flags);
79	if ((prev = *ptr) == old)
80		*ptr = new;
81	_atomic_spin_unlock_irqrestore(ptr, flags);
82	return (unsigned long)prev;
83}
84