1/*
2 * File:        include/asm/system.h
3 * Based on:
4 * Author:      Tony Kou (tonyko@lineo.ca)
5 *              Copyright (c) 2002 Arcturus Networks Inc.
6 *                    (www.arcturusnetworks.com)
7 *              Copyright (c) 2003 Metrowerks (www.metrowerks.com)
8 *              Copyright (c) 2004 Analog Device Inc.
9 * Created:     25Jan2001 - Tony Kou
10 * Description: system.h include file
11 *
12 * Modified:     22Sep2006 - Robin Getz
13 *                - move include blackfin.h down, so I can get access to
14 *                   irq functions in other include files.
15 *
16 * Bugs:         Enter bugs at http://blackfin.uclinux.org/
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; see the file COPYING.
30 * If not, write to the Free Software Foundation,
31 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
32 */
33
34#ifndef _BLACKFIN_SYSTEM_H
35#define _BLACKFIN_SYSTEM_H
36
37#include <linux/linkage.h>
38#include <linux/compiler.h>
39
40/*
41 * Interrupt configuring macros.
42 */
43
44extern unsigned long irq_flags;
45
46#define local_irq_enable() do {		\
47	__asm__ __volatile__ (		\
48		"sti %0;"		\
49		::"d"(irq_flags));	\
50} while (0)
51
52#define local_irq_disable() do {	\
53	int _tmp_dummy;			\
54	__asm__ __volatile__ (		\
55		"cli %0;"		\
56		:"=d" (_tmp_dummy):);	\
57} while (0)
58
59#if defined(ANOMALY_05000244) && defined(CONFIG_BLKFIN_CACHE)
60#define idle_with_irq_disabled() do {   \
61        __asm__ __volatile__ (          \
62                "nop; nop;\n"           \
63                ".align 8;\n"           \
64                "sti %0; idle;\n"       \
65                ::"d" (irq_flags));     \
66} while (0)
67#else
68#define idle_with_irq_disabled() do {   \
69	__asm__ __volatile__ (          \
70		".align 8;\n"           \
71		"sti %0; idle;\n"       \
72		::"d" (irq_flags));     \
73} while (0)
74#endif
75
76#ifdef CONFIG_DEBUG_HWERR
77#define __save_and_cli(x) do {			\
78	__asm__ __volatile__ (		        \
79		"cli %0;\n\tsti %1;"		\
80		:"=&d"(x): "d" (0x3F));		\
81} while (0)
82#else
83#define __save_and_cli(x) do {		\
84	__asm__ __volatile__ (          \
85		"cli %0;"		\
86		:"=&d"(x):);		\
87} while (0)
88#endif
89
90#define local_save_flags(x) asm volatile ("cli %0;"     \
91					  "sti %0;"     \
92				    	  :"=d"(x):);
93
94#ifdef CONFIG_DEBUG_HWERR
95#define irqs_enabled_from_flags(x) (((x) & ~0x3f) != 0)
96#else
97#define irqs_enabled_from_flags(x) ((x) != 0x1f)
98#endif
99
100#define local_irq_restore(x) do {			\
101	if (irqs_enabled_from_flags(x))			\
102		local_irq_enable ();			\
103} while (0)
104
105/* For spinlocks etc */
106#define local_irq_save(x) __save_and_cli(x)
107
108#define	irqs_disabled()				\
109({						\
110	unsigned long flags;			\
111	local_save_flags(flags);		\
112	!irqs_enabled_from_flags(flags);	\
113})
114
115/*
116 * Force strict CPU ordering.
117 */
118#define nop()  asm volatile ("nop;\n\t"::)
119#define mb()   asm volatile (""   : : :"memory")
120#define rmb()  asm volatile (""   : : :"memory")
121#define wmb()  asm volatile (""   : : :"memory")
122#define set_rmb(var, value)    do { (void) xchg(&var, value); } while (0)
123#define set_mb(var, value)     set_rmb(var, value)
124#define set_wmb(var, value)    do { var = value; wmb(); } while (0)
125
126#define read_barrier_depends() 		do { } while(0)
127
128#ifdef CONFIG_SMP
129#define smp_mb()	mb()
130#define smp_rmb()	rmb()
131#define smp_wmb()	wmb()
132#define smp_read_barrier_depends()	read_barrier_depends()
133#else
134#define smp_mb()	barrier()
135#define smp_rmb()	barrier()
136#define smp_wmb()	barrier()
137#define smp_read_barrier_depends()	do { } while(0)
138#endif
139
140#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
141
142struct __xchg_dummy {
143	unsigned long a[100];
144};
145#define __xg(x) ((volatile struct __xchg_dummy *)(x))
146
147static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
148				   int size)
149{
150	unsigned long tmp = 0;
151	unsigned long flags = 0;
152
153	local_irq_save(flags);
154
155	switch (size) {
156	case 1:
157		__asm__ __volatile__
158			("%0 = b%2 (z);\n\t"
159			 "b%2 = %1;\n\t"
160			 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
161		break;
162	case 2:
163		__asm__ __volatile__
164			("%0 = w%2 (z);\n\t"
165			 "w%2 = %1;\n\t"
166			 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
167		break;
168	case 4:
169		__asm__ __volatile__
170			("%0 = %2;\n\t"
171			 "%2 = %1;\n\t"
172			 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
173		break;
174	}
175	local_irq_restore(flags);
176	return tmp;
177}
178
179/*
180 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
181 * store NEW in MEM.  Return the initial value in MEM.  Success is
182 * indicated by comparing RETURN with OLD.
183 */
184static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
185				      unsigned long new, int size)
186{
187	unsigned long tmp = 0;
188	unsigned long flags = 0;
189
190	local_irq_save(flags);
191
192	switch (size) {
193	case 1:
194		__asm__ __volatile__
195			("%0 = b%3 (z);\n\t"
196			 "CC = %1 == %0;\n\t"
197			 "IF !CC JUMP 1f;\n\t"
198			 "b%3 = %2;\n\t"
199			 "1:\n\t"
200			 : "=&d" (tmp) : "d" (old), "d" (new), "m" (*__xg(ptr)) : "memory");
201		break;
202	case 2:
203		__asm__ __volatile__
204			("%0 = w%3 (z);\n\t"
205			 "CC = %1 == %0;\n\t"
206			 "IF !CC JUMP 1f;\n\t"
207			 "w%3 = %2;\n\t"
208			 "1:\n\t"
209			 : "=&d" (tmp) : "d" (old), "d" (new), "m" (*__xg(ptr)) : "memory");
210		break;
211	case 4:
212		__asm__ __volatile__
213			("%0 = %3;\n\t"
214			 "CC = %1 == %0;\n\t"
215			 "IF !CC JUMP 1f;\n\t"
216			 "%3 = %2;\n\t"
217			 "1:\n\t"
218			 : "=&d" (tmp) : "d" (old), "d" (new), "m" (*__xg(ptr)) : "memory");
219		break;
220	}
221	local_irq_restore(flags);
222	return tmp;
223}
224
225#define cmpxchg(ptr,o,n)\
226        ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
227                                        (unsigned long)(n),sizeof(*(ptr))))
228
229#define prepare_to_switch()     do { } while(0)
230
231/*
232 * switch_to(n) should switch tasks to task ptr, first checking that
233 * ptr isn't the current task, in which case it does nothing.
234 */
235
236#include <asm/blackfin.h>
237
238asmlinkage struct task_struct *resume(struct task_struct *prev, struct task_struct *next);
239
240#define switch_to(prev,next,last) \
241do {    \
242	memcpy (&task_thread_info(prev)->l1_task_info, L1_SCRATCH_TASK_INFO, \
243		sizeof *L1_SCRATCH_TASK_INFO); \
244	memcpy (L1_SCRATCH_TASK_INFO, &task_thread_info(next)->l1_task_info, \
245		sizeof *L1_SCRATCH_TASK_INFO); \
246	(last) = resume (prev, next);   \
247} while (0)
248
249#endif				/* _BLACKFIN_SYSTEM_H */
250