1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org)
7 */
8#ifndef __ASM_BARRIER_H
9#define __ASM_BARRIER_H
10
11#include <asm/addrspace.h>
12#include <asm/sync.h>
13
14static inline void __sync(void)
15{
16	asm volatile(__SYNC(full, always) ::: "memory");
17}
18
19static inline void rmb(void)
20{
21	asm volatile(__SYNC(rmb, always) ::: "memory");
22}
23#define rmb rmb
24
25static inline void wmb(void)
26{
27	asm volatile(__SYNC(wmb, always) ::: "memory");
28}
29#define wmb wmb
30
31#define fast_mb()	__sync()
32
33#define __fast_iob()				\
34	__asm__ __volatile__(			\
35		".set	push\n\t"		\
36		".set	noreorder\n\t"		\
37		"lw	$0,%0\n\t"		\
38		"nop\n\t"			\
39		".set	pop"			\
40		: /* no output */		\
41		: "m" (*(int *)CKSEG1)		\
42		: "memory")
43#ifdef CONFIG_CPU_CAVIUM_OCTEON
44# define fast_iob()	do { } while (0)
45#else /* ! CONFIG_CPU_CAVIUM_OCTEON */
46# ifdef CONFIG_SGI_IP28
47#  define fast_iob()				\
48	__asm__ __volatile__(			\
49		".set	push\n\t"		\
50		".set	noreorder\n\t"		\
51		"lw	$0,%0\n\t"		\
52		"sync\n\t"			\
53		"lw	$0,%0\n\t"		\
54		".set	pop"			\
55		: /* no output */		\
56		: "m" (*(int *)CKSEG1ADDR(0x1fa00004)) \
57		: "memory")
58# else
59#  define fast_iob()				\
60	do {					\
61		__sync();			\
62		__fast_iob();			\
63	} while (0)
64# endif
65#endif /* CONFIG_CPU_CAVIUM_OCTEON */
66
67#ifdef CONFIG_CPU_HAS_WB
68
69#include <asm/wbflush.h>
70
71#define mb()		wbflush()
72#define iob()		wbflush()
73
74#else /* !CONFIG_CPU_HAS_WB */
75
76#define mb()		fast_mb()
77#define iob()		fast_iob()
78
79#endif /* !CONFIG_CPU_HAS_WB */
80
81#if defined(CONFIG_WEAK_ORDERING)
82# define __smp_mb()	__sync()
83# define __smp_rmb()	rmb()
84# define __smp_wmb()	wmb()
85#else
86# define __smp_mb()	barrier()
87# define __smp_rmb()	barrier()
88# define __smp_wmb()	barrier()
89#endif
90
91/*
92 * When LL/SC does imply order, it must also be a compiler barrier to avoid the
93 * compiler from reordering where the CPU will not. When it does not imply
94 * order, the compiler is also free to reorder across the LL/SC loop and
95 * ordering will be done by smp_llsc_mb() and friends.
96 */
97#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
98# define __WEAK_LLSC_MB		sync
99# define smp_llsc_mb() \
100	__asm__ __volatile__(__stringify(__WEAK_LLSC_MB) : : :"memory")
101# define __LLSC_CLOBBER
102#else
103# define __WEAK_LLSC_MB
104# define smp_llsc_mb()		do { } while (0)
105# define __LLSC_CLOBBER		"memory"
106#endif
107
108#ifdef CONFIG_CPU_CAVIUM_OCTEON
109#define smp_mb__before_llsc() smp_wmb()
110#define __smp_mb__before_llsc() __smp_wmb()
111/* Cause previous writes to become visible on all CPUs as soon as possible */
112#define nudge_writes() __asm__ __volatile__(".set push\n\t"		\
113					    ".set arch=octeon\n\t"	\
114					    "syncw\n\t"			\
115					    ".set pop" : : : "memory")
116#else
117#define smp_mb__before_llsc() smp_llsc_mb()
118#define __smp_mb__before_llsc() smp_llsc_mb()
119#define nudge_writes() mb()
120#endif
121
122/*
123 * In the Loongson3 LL/SC workaround case, all of our LL/SC loops already have
124 * a completion barrier immediately preceding the LL instruction. Therefore we
125 * can skip emitting a barrier from __smp_mb__before_atomic().
126 */
127#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS
128# define __smp_mb__before_atomic()
129#else
130# define __smp_mb__before_atomic()	__smp_mb__before_llsc()
131#endif
132
133#define __smp_mb__after_atomic()	smp_llsc_mb()
134
135static inline void sync_ginv(void)
136{
137	asm volatile(__SYNC(ginv, always));
138}
139
140#include <asm-generic/barrier.h>
141
142#endif /* __ASM_BARRIER_H */
143