1#ifndef __ASM_ARM_SYSTEM_H
2#define __ASM_ARM_SYSTEM_H
3
4#include <autoconf.h>
5#include <ethdrivers/gen_config.h>
6
7#ifdef CONFIG_ARM64
8
9/*
10 * SCTLR_EL1/SCTLR_EL2/SCTLR_EL3 bits definitions
11 */
12#define CR_M		(1 << 0)	/* MMU enable			*/
13#define CR_A		(1 << 1)	/* Alignment abort enable	*/
14#define CR_C		(1 << 2)	/* Dcache enable		*/
15#define CR_SA		(1 << 3)	/* Stack Alignment Check Enable	*/
16#define CR_I		(1 << 12)	/* Icache enable		*/
17#define CR_WXN		(1 << 19)	/* Write Permision Imply XN	*/
18#define CR_EE		(1 << 25)	/* Exception (Big) Endian	*/
19
20#define PGTABLE_SIZE	(0x10000)
21/* 2MB granularity */
22#define MMU_SECTION_SHIFT	21
23#define MMU_SECTION_SIZE	(1 << MMU_SECTION_SHIFT)
24
25#ifndef __ASSEMBLY__
26
27enum dcache_option {
28	DCACHE_OFF = 0x3,
29};
30
31#define isb()				\
32	({asm volatile(			\
33	"isb" : : : "memory");		\
34	})
35
36#define wfi()				\
37	({asm volatile(			\
38	"wfi" : : : "memory");		\
39	})
40
41static inline unsigned int current_el(void)
42{
43	unsigned int el;
44	asm volatile("mrs %0, CurrentEL" : "=r" (el) : : "cc");
45	return el >> 2;
46}
47
48static inline unsigned int get_sctlr(void)
49{
50	unsigned int el, val;
51
52	el = current_el();
53	if (el == 1)
54		asm volatile("mrs %0, sctlr_el1" : "=r" (val) : : "cc");
55	else if (el == 2)
56		asm volatile("mrs %0, sctlr_el2" : "=r" (val) : : "cc");
57	else
58		asm volatile("mrs %0, sctlr_el3" : "=r" (val) : : "cc");
59
60	return val;
61}
62
63static inline void set_sctlr(unsigned int val)
64{
65	unsigned int el;
66
67	el = current_el();
68	if (el == 1)
69		asm volatile("msr sctlr_el1, %0" : : "r" (val) : "cc");
70	else if (el == 2)
71		asm volatile("msr sctlr_el2, %0" : : "r" (val) : "cc");
72	else
73		asm volatile("msr sctlr_el3, %0" : : "r" (val) : "cc");
74
75	asm volatile("isb");
76}
77
78void __asm_flush_dcache_all(void);
79void __asm_invalidate_dcache_all(void);
80void __asm_flush_dcache_range(u64 start, u64 end);
81void __asm_invalidate_tlb_all(void);
82void __asm_invalidate_icache_all(void);
83int __asm_flush_l3_cache(void);
84
85void armv8_switch_to_el2(void);
86void armv8_switch_to_el1(void);
87void gic_init(void);
88void gic_send_sgi(unsigned long sgino);
89void wait_for_wakeup(void);
90void protect_secure_region(void);
91void smp_kick_all_cpus(void);
92
93void flush_l3_cache(void);
94
95#endif	/* __ASSEMBLY__ */
96
97#else /* CONFIG_ARM64 */
98
99#define CPU_ARCH_UNKNOWN	0
100#define CPU_ARCH_ARMv3		1
101#define CPU_ARCH_ARMv4		2
102#define CPU_ARCH_ARMv4T		3
103#define CPU_ARCH_ARMv5		4
104#define CPU_ARCH_ARMv5T		5
105#define CPU_ARCH_ARMv5TE	6
106#define CPU_ARCH_ARMv5TEJ	7
107#define CPU_ARCH_ARMv6		8
108#define CPU_ARCH_ARMv7		9
109
110/*
111 * CR1 bits (CP#15 CR1)
112 */
113#define CR_M	(1 << 0)	/* MMU enable				*/
114#define CR_A	(1 << 1)	/* Alignment abort enable		*/
115#define CR_C	(1 << 2)	/* Dcache enable			*/
116#define CR_W	(1 << 3)	/* Write buffer enable			*/
117#define CR_P	(1 << 4)	/* 32-bit exception handler		*/
118#define CR_D	(1 << 5)	/* 32-bit data address range		*/
119#define CR_L	(1 << 6)	/* Implementation defined		*/
120#define CR_B	(1 << 7)	/* Big endian				*/
121#define CR_S	(1 << 8)	/* System MMU protection		*/
122#define CR_R	(1 << 9)	/* ROM MMU protection			*/
123#define CR_F	(1 << 10)	/* Implementation defined		*/
124#define CR_Z	(1 << 11)	/* Implementation defined		*/
125#define CR_I	(1 << 12)	/* Icache enable			*/
126#define CR_V	(1 << 13)	/* Vectors relocated to 0xffff0000	*/
127#define CR_RR	(1 << 14)	/* Round Robin cache replacement	*/
128#define CR_L4	(1 << 15)	/* LDR pc can set T bit			*/
129#define CR_DT	(1 << 16)
130#define CR_IT	(1 << 18)
131#define CR_ST	(1 << 19)
132#define CR_FI	(1 << 21)	/* Fast interrupt (lower latency mode)	*/
133#define CR_U	(1 << 22)	/* Unaligned access operation		*/
134#define CR_XP	(1 << 23)	/* Extended page tables			*/
135#define CR_VE	(1 << 24)	/* Vectored interrupts			*/
136#define CR_EE	(1 << 25)	/* Exception (Big) Endian		*/
137#define CR_TRE	(1 << 28)	/* TEX remap enable			*/
138#define CR_AFE	(1 << 29)	/* Access flag enable			*/
139#define CR_TE	(1 << 30)	/* Thumb exception enable		*/
140
141#define PGTABLE_SIZE		(4096 * 4)
142
143/*
144 * This is used to ensure the compiler did actually allocate the register we
145 * asked it for some inline assembly sequences.  Apparently we can't trust
146 * the compiler from one version to another so a bit of paranoia won't hurt.
147 * This string is meant to be concatenated with the inline asm string and
148 * will cause compilation to stop on mismatch.
149 * (for details, see gcc PR 15089)
150 */
151#define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t"
152
153#ifndef __ASSEMBLY__
154
155/**
156 * save_boot_params() - Save boot parameters before starting reset sequence
157 *
158 * If you provide this function it will be called immediately U-Boot starts,
159 * both for SPL and U-Boot proper.
160 *
161 * All registers are unchanged from U-Boot entry. No registers need be
162 * preserved.
163 *
164 * This is not a normal C function. There is no stack. Return by branching to
165 * save_boot_params_ret.
166 *
167 * void save_boot_params(u32 r0, u32 r1, u32 r2, u32 r3);
168 */
169
170/**
171 * save_boot_params_ret() - Return from save_boot_params()
172 *
173 * If you provide save_boot_params(), then you should jump back to this
174 * function when done. Try to preserve all registers.
175 *
176 * If your implementation of save_boot_params() is in C then it is acceptable
177 * to simply call save_boot_params_ret() at the end of your function. Since
178 * there is no link register set up, you cannot just exit the function. U-Boot
179 * will return to the (initialised) value of lr, and likely crash/hang.
180 *
181 * If your implementation of save_boot_params() is in assembler then you
182 * should use 'b' or 'bx' to return to save_boot_params_ret.
183 */
184void save_boot_params_ret(void);
185
186#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
187
188#ifdef CONFIG_ARCH_ARM_V7A
189#define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
190#else
191#define wfi()
192#endif
193
194static inline unsigned int get_cr(void)
195{
196	unsigned int val;
197	asm volatile("mrc p15, 0, %0, c1, c0, 0	@ get CR" : "=r" (val) : : "cc");
198	return val;
199}
200
201static inline void set_cr(unsigned int val)
202{
203	asm volatile("mcr p15, 0, %0, c1, c0, 0	@ set CR"
204	  : : "r" (val) : "cc");
205	isb();
206}
207
208static inline unsigned int get_dacr(void)
209{
210	unsigned int val;
211	asm("mrc p15, 0, %0, c3, c0, 0	@ get DACR" : "=r" (val) : : "cc");
212	return val;
213}
214
215static inline void set_dacr(unsigned int val)
216{
217	asm volatile("mcr p15, 0, %0, c3, c0, 0	@ set DACR"
218	  : : "r" (val) : "cc");
219	isb();
220}
221
222#ifdef CONFIG_ARCH_ARM_V7A
223/* Short-Descriptor Translation Table Level 1 Bits */
224#define TTB_SECT_NS_MASK	(1 << 19)
225#define TTB_SECT_NG_MASK	(1 << 17)
226#define TTB_SECT_S_MASK		(1 << 16)
227/* Note: TTB AP bits are set elsewhere */
228#define TTB_SECT_TEX(x)		((x & 0x7) << 12)
229#define TTB_SECT_DOMAIN(x)	((x & 0xf) << 5)
230#define TTB_SECT_XN_MASK	(1 << 4)
231#define TTB_SECT_C_MASK		(1 << 3)
232#define TTB_SECT_B_MASK		(1 << 2)
233#define TTB_SECT			(2 << 0)
234
235/* options available for data cache on each page */
236enum dcache_option {
237	DCACHE_OFF = TTB_SECT_S_MASK | TTB_SECT_DOMAIN(0) |
238					TTB_SECT_XN_MASK | TTB_SECT,
239	DCACHE_WRITETHROUGH = DCACHE_OFF | TTB_SECT_C_MASK,
240	DCACHE_WRITEBACK = DCACHE_WRITETHROUGH | TTB_SECT_B_MASK,
241	DCACHE_WRITEALLOC = DCACHE_WRITEBACK | TTB_SECT_TEX(1),
242};
243
244#else  /* CONFIG_ARCH_ARM_V7A */
245/* options available for data cache on each page */
246enum dcache_option {
247	DCACHE_OFF = 0x12,
248	DCACHE_WRITETHROUGH = 0x1a,
249	DCACHE_WRITEBACK = 0x1e,
250	DCACHE_WRITEALLOC = 0x16,
251};
252#endif
253
254/* Size of an MMU section */
255enum {
256	MMU_SECTION_SHIFT	= 20,
257	MMU_SECTION_SIZE	= 1 << MMU_SECTION_SHIFT,
258};
259
260#ifdef CONFIG_ARCH_ARM_V7A
261/* TTBR0 bits */
262#define TTBR0_BASE_ADDR_MASK	0xFFFFC000
263#define TTBR0_RGN_NC			(0 << 3)
264#define TTBR0_RGN_WBWA			(1 << 3)
265#define TTBR0_RGN_WT			(2 << 3)
266#define TTBR0_RGN_WB			(3 << 3)
267/* TTBR0[6] is IRGN[0] and TTBR[0] is IRGN[1] */
268#define TTBR0_IRGN_NC			(0 << 0 | 0 << 6)
269#define TTBR0_IRGN_WBWA			(0 << 0 | 1 << 6)
270#define TTBR0_IRGN_WT			(1 << 0 | 0 << 6)
271#define TTBR0_IRGN_WB			(1 << 0 | 1 << 6)
272#endif
273
274/**
275 * Register an update to the page tables, and flush the TLB
276 *
277 * \param start		start address of update in page table
278 * \param stop		stop address of update in page table
279 */
280void mmu_page_table_flush(unsigned long start, unsigned long stop);
281
282#endif /* __ASSEMBLY__ */
283
284#define arch_align_stack(x) (x)
285
286
287#endif /* CONFIG_ARM64 */
288
289#ifndef __ASSEMBLY__
290/**
291 * Change the cache settings for a region.
292 *
293 * \param start		start address of memory region to change
294 * \param size		size of memory region to change
295 * \param option	dcache option to select
296 */
297void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
298				     enum dcache_option option);
299
300#ifdef CONFIG_SYS_NONCACHED_MEMORY
301void noncached_init(void);
302phys_addr_t noncached_alloc(size_t size, size_t align);
303#endif /* CONFIG_SYS_NONCACHED_MEMORY */
304
305#endif /* __ASSEMBLY__ */
306
307#endif
308