1/*
2 *  linux/include/asm-arm/proc-armv/system.h
3 *
4 *  Copyright (C) 1996 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __ASM_PROC_SYSTEM_H
11#define __ASM_PROC_SYSTEM_H
12
13#include <linux/config.h>
14
15#define set_cr(x)					\
16	__asm__ __volatile__(				\
17	"mcr	p15, 0, %0, c1, c0	@ set CR"	\
18	: : "r" (x))
19
20#define CR_M	(1 << 0)	/* MMU enable				*/
21#define CR_A	(1 << 1)	/* Alignment abort enable		*/
22#define CR_C	(1 << 2)	/* Dcache enable			*/
23#define CR_W	(1 << 3)	/* Write buffer enable			*/
24#define CR_P	(1 << 4)	/* 32-bit exception handler		*/
25#define CR_D	(1 << 5)	/* 32-bit data address range		*/
26#define CR_L	(1 << 6)	/* Implementation defined		*/
27#define CD_B	(1 << 7)	/* Big endian				*/
28#define CR_S	(1 << 8)	/* System MMU protection		*/
29#define CD_R	(1 << 9)	/* ROM MMU protection			*/
30#define CR_F	(1 << 10)	/* Implementation defined		*/
31#define CR_Z	(1 << 11)	/* Implementation defined		*/
32#define CR_I	(1 << 12)	/* Icache enable			*/
33#define CR_V	(1 << 13)	/* Vectors relocated to 0xffff0000	*/
34#define CR_RR	(1 << 14)	/* Round Robin cache replacement	*/
35
36extern unsigned long cr_no_alignment;	/* defined in entry-armv.S */
37extern unsigned long cr_alignment;	/* defined in entry-armv.S */
38
39#ifdef __ARM_ARCH_4__
40#define vectors_base()	((cr_alignment & CR_V) ? 0xffff0000 : 0)
41#else
42#define vectors_base()	(0)
43#endif
44
45/*
46 * A couple of speedups for the ARM
47 */
48
49/*
50 * Save the current interrupt enable state & disable IRQs
51 */
52#define __save_flags_cli(x)					\
53	({							\
54		unsigned long temp;				\
55	__asm__ __volatile__(					\
56	"mrs	%0, cpsr		@ save_flags_cli\n"	\
57"	orr	%1, %0, #128\n"					\
58"	msr	cpsr_c, %1"					\
59	: "=r" (x), "=r" (temp)					\
60	:							\
61	: "memory");						\
62	})
63
64/*
65 * Enable IRQs
66 */
67#define __sti()							\
68	({							\
69		unsigned long temp;				\
70	__asm__ __volatile__(					\
71	"mrs	%0, cpsr		@ sti\n"		\
72"	bic	%0, %0, #128\n"					\
73"	msr	cpsr_c, %0"					\
74	: "=r" (temp)						\
75	:							\
76	: "memory");						\
77	})
78
79/*
80 * Disable IRQs
81 */
82#define __cli()							\
83	({							\
84		unsigned long temp;				\
85	__asm__ __volatile__(					\
86	"mrs	%0, cpsr		@ cli\n"		\
87"	orr	%0, %0, #128\n"					\
88"	msr	cpsr_c, %0"					\
89	: "=r" (temp)						\
90	:							\
91	: "memory");						\
92	})
93
94/*
95 * Enable FIQs
96 */
97#define __stf()							\
98	({							\
99		unsigned long temp;				\
100	__asm__ __volatile__(					\
101	"mrs	%0, cpsr		@ stf\n"		\
102"	bic	%0, %0, #64\n"					\
103"	msr	cpsr_c, %0"					\
104	: "=r" (temp)						\
105	:							\
106	: "memory");						\
107	})
108
109/*
110 * Disable FIQs
111 */
112#define __clf()							\
113	({							\
114		unsigned long temp;				\
115	__asm__ __volatile__(					\
116	"mrs	%0, cpsr		@ clf\n"		\
117"	orr	%0, %0, #64\n"					\
118"	msr	cpsr_c, %0"					\
119	: "=r" (temp)						\
120	:							\
121	: "memory");						\
122	})
123
124/*
125 * save current IRQ & FIQ state
126 */
127#define __save_flags(x)						\
128	__asm__ __volatile__(					\
129	"mrs	%0, cpsr		@ save_flags\n"		\
130	  : "=r" (x)						\
131	  :							\
132	  : "memory")
133
134/*
135 * restore saved IRQ & FIQ state
136 */
137#define __restore_flags(x)					\
138	__asm__ __volatile__(					\
139	"msr	cpsr_c, %0		@ restore_flags\n"	\
140	:							\
141	: "r" (x)						\
142	: "memory")
143
144#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
145/*
146 * On the StrongARM, "swp" is terminally broken since it bypasses the
147 * cache totally.  This means that the cache becomes inconsistent, and,
148 * since we use normal loads/stores as well, this is really bad.
149 * Typically, this causes oopsen in filp_close, but could have other,
150 * more disasterous effects.  There are two work-arounds:
151 *  1. Disable interrupts and emulate the atomic swap
152 *  2. Clean the cache, perform atomic swap, flush the cache
153 *
154 * We choose (1) since its the "easiest" to achieve here and is not
155 * dependent on the processor type.
156 */
157#define swp_is_buggy
158#endif
159
160static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
161{
162	extern void __bad_xchg(volatile void *, int);
163	unsigned long ret;
164#ifdef swp_is_buggy
165	unsigned long flags;
166#endif
167
168	switch (size) {
169#ifdef swp_is_buggy
170		case 1:
171			__save_flags_cli(flags);
172			ret = *(volatile unsigned char *)ptr;
173			*(volatile unsigned char *)ptr = x;
174			__restore_flags(flags);
175			break;
176
177		case 4:
178			__save_flags_cli(flags);
179			ret = *(volatile unsigned long *)ptr;
180			*(volatile unsigned long *)ptr = x;
181			__restore_flags(flags);
182			break;
183#else
184		case 1:	__asm__ __volatile__ ("swpb %0, %1, [%2]"
185					: "=r" (ret)
186					: "r" (x), "r" (ptr)
187					: "memory");
188			break;
189		case 4:	__asm__ __volatile__ ("swp %0, %1, [%2]"
190					: "=r" (ret)
191					: "r" (x), "r" (ptr)
192					: "memory");
193			break;
194#endif
195		default: __bad_xchg(ptr, size);
196	}
197
198	return ret;
199}
200
201#endif
202