1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * (C) Copyright 2013
4 * David Feng <fenghua@phytium.com.cn>
5 *
6 * (C) Copyright 2016
7 * Alexander Graf <agraf@suse.de>
8 */
9
10#include <common.h>
11#include <cpu_func.h>
12#include <hang.h>
13#include <log.h>
14#include <asm/cache.h>
15#include <asm/global_data.h>
16#include <asm/system.h>
17#include <asm/armv8/mmu.h>
18
19DECLARE_GLOBAL_DATA_PTR;
20
21#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
22
23/*
24 *  With 4k page granule, a virtual address is split into 4 lookup parts
25 *  spanning 9 bits each:
26 *
27 *    _______________________________________________
28 *   |       |       |       |       |       |       |
29 *   |   0   |  Lv0  |  Lv1  |  Lv2  |  Lv3  |  off  |
30 *   |_______|_______|_______|_______|_______|_______|
31 *     63-48   47-39   38-30   29-21   20-12   11-00
32 *
33 *             mask        page size
34 *
35 *    Lv0: FF8000000000       --
36 *    Lv1:   7FC0000000       1G
37 *    Lv2:     3FE00000       2M
38 *    Lv3:       1FF000       4K
39 *    off:          FFF
40 */
41
42static int get_effective_el(void)
43{
44	int el = current_el();
45
46	if (el == 2) {
47		u64 hcr_el2;
48
49		/*
50		 * If we are using the EL2&0 translation regime, the TCR_EL2
51		 * looks like the EL1 version, even though we are in EL2.
52		 */
53		__asm__ ("mrs %0, HCR_EL2\n" : "=r" (hcr_el2));
54		if (hcr_el2 & BIT(HCR_EL2_E2H_BIT))
55			return 1;
56	}
57
58	return el;
59}
60
61u64 get_tcr(u64 *pips, u64 *pva_bits)
62{
63	int el = get_effective_el();
64	u64 max_addr = 0;
65	u64 ips, va_bits;
66	u64 tcr;
67	int i;
68
69	/* Find the largest address we need to support */
70	for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
71		max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size);
72
73	/* Calculate the maximum physical (and thus virtual) address */
74	if (max_addr > (1ULL << 44)) {
75		ips = 5;
76		va_bits = 48;
77	} else  if (max_addr > (1ULL << 42)) {
78		ips = 4;
79		va_bits = 44;
80	} else  if (max_addr > (1ULL << 40)) {
81		ips = 3;
82		va_bits = 42;
83	} else  if (max_addr > (1ULL << 36)) {
84		ips = 2;
85		va_bits = 40;
86	} else  if (max_addr > (1ULL << 32)) {
87		ips = 1;
88		va_bits = 36;
89	} else {
90		ips = 0;
91		va_bits = 32;
92	}
93
94	if (el == 1) {
95		tcr = TCR_EL1_RSVD | (ips << 32) | TCR_EPD1_DISABLE;
96	} else if (el == 2) {
97		tcr = TCR_EL2_RSVD | (ips << 16);
98	} else {
99		tcr = TCR_EL3_RSVD | (ips << 16);
100	}
101
102	/* PTWs cacheable, inner/outer WBWA and inner shareable */
103	tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
104	tcr |= TCR_T0SZ(va_bits);
105
106	if (pips)
107		*pips = ips;
108	if (pva_bits)
109		*pva_bits = va_bits;
110
111	return tcr;
112}
113
114#define MAX_PTE_ENTRIES 512
115
116static int pte_type(u64 *pte)
117{
118	return *pte & PTE_TYPE_MASK;
119}
120
121/* Returns the LSB number for a PTE on level <level> */
122static int level2shift(int level)
123{
124	/* Page is 12 bits wide, every level translates 9 bits */
125	return (12 + 9 * (3 - level));
126}
127
128static u64 *find_pte(u64 addr, int level)
129{
130	int start_level = 0;
131	u64 *pte;
132	u64 idx;
133	u64 va_bits;
134	int i;
135
136	debug("addr=%llx level=%d\n", addr, level);
137
138	get_tcr(NULL, &va_bits);
139	if (va_bits < 39)
140		start_level = 1;
141
142	if (level < start_level)
143		return NULL;
144
145	/* Walk through all page table levels to find our PTE */
146	pte = (u64*)gd->arch.tlb_addr;
147	for (i = start_level; i < 4; i++) {
148		idx = (addr >> level2shift(i)) & 0x1FF;
149		pte += idx;
150		debug("idx=%llx PTE %p at level %d: %llx\n", idx, pte, i, *pte);
151
152		/* Found it */
153		if (i == level)
154			return pte;
155		/* PTE is no table (either invalid or block), can't traverse */
156		if (pte_type(pte) != PTE_TYPE_TABLE)
157			return NULL;
158		/* Off to the next level */
159		pte = (u64*)(*pte & 0x0000fffffffff000ULL);
160	}
161
162	/* Should never reach here */
163	return NULL;
164}
165
166#ifdef CONFIG_CMO_BY_VA_ONLY
167static void __cmo_on_leaves(void (*cmo_fn)(unsigned long, unsigned long),
168			    u64 pte, int level, u64 base)
169{
170	u64 *ptep;
171	int i;
172
173	ptep = (u64 *)(pte & GENMASK_ULL(47, PAGE_SHIFT));
174	for (i = 0; i < PAGE_SIZE / sizeof(u64); i++) {
175		u64 end, va = base + i * BIT(level2shift(level));
176		u64 type, attrs;
177
178		pte = ptep[i];
179		type = pte & PTE_TYPE_MASK;
180		attrs = pte & PMD_ATTRINDX_MASK;
181		debug("PTE %llx at level %d VA %llx\n", pte, level, va);
182
183		/* Not valid? next! */
184		if (!(type & PTE_TYPE_VALID))
185			continue;
186
187		/* Not a leaf? Recurse on the next level */
188		if (!(type == PTE_TYPE_BLOCK ||
189		      (level == 3 && type == PTE_TYPE_PAGE))) {
190			__cmo_on_leaves(cmo_fn, pte, level + 1, va);
191			continue;
192		}
193
194		/*
195		 * From this point, this must be a leaf.
196		 *
197		 * Start excluding non memory mappings
198		 */
199		if (attrs != PTE_BLOCK_MEMTYPE(MT_NORMAL) &&
200		    attrs != PTE_BLOCK_MEMTYPE(MT_NORMAL_NC))
201			continue;
202
203		end = va + BIT(level2shift(level)) - 1;
204
205		/* No intersection with RAM? */
206		if (end < gd->ram_base ||
207		    va >= (gd->ram_base + gd->ram_size))
208			continue;
209
210		/*
211		 * OK, we have a partial RAM mapping. However, this
212		 * can cover *more* than the RAM. Yes, u-boot is
213		 * *that* braindead. Compute the intersection we care
214		 * about, and not a byte more.
215		 */
216		va = max(va, (u64)gd->ram_base);
217		end = min(end, gd->ram_base + gd->ram_size);
218
219		debug("Flush PTE %llx at level %d: %llx-%llx\n",
220		      pte, level, va, end);
221		cmo_fn(va, end);
222	}
223}
224
225static void apply_cmo_to_mappings(void (*cmo_fn)(unsigned long, unsigned long))
226{
227	u64 va_bits;
228	int sl = 0;
229
230	if (!gd->arch.tlb_addr)
231		return;
232
233	get_tcr(NULL, &va_bits);
234	if (va_bits < 39)
235		sl = 1;
236
237	__cmo_on_leaves(cmo_fn, gd->arch.tlb_addr, sl, 0);
238}
239#else
240static inline void apply_cmo_to_mappings(void *dummy) {}
241#endif
242
243/* Returns and creates a new full table (512 entries) */
244static u64 *create_table(void)
245{
246	u64 *new_table = (u64*)gd->arch.tlb_fillptr;
247	u64 pt_len = MAX_PTE_ENTRIES * sizeof(u64);
248
249	/* Allocate MAX_PTE_ENTRIES pte entries */
250	gd->arch.tlb_fillptr += pt_len;
251
252	if (gd->arch.tlb_fillptr - gd->arch.tlb_addr > gd->arch.tlb_size)
253		panic("Insufficient RAM for page table: 0x%lx > 0x%lx. "
254		      "Please increase the size in get_page_table_size()",
255			gd->arch.tlb_fillptr - gd->arch.tlb_addr,
256			gd->arch.tlb_size);
257
258	/* Mark all entries as invalid */
259	memset(new_table, 0, pt_len);
260
261	return new_table;
262}
263
264static void set_pte_table(u64 *pte, u64 *table)
265{
266	/* Point *pte to the new table */
267	debug("Setting %p to addr=%p\n", pte, table);
268	*pte = PTE_TYPE_TABLE | (ulong)table;
269}
270
271/* Splits a block PTE into table with subpages spanning the old block */
272static void split_block(u64 *pte, int level)
273{
274	u64 old_pte = *pte;
275	u64 *new_table;
276	u64 i = 0;
277	/* level describes the parent level, we need the child ones */
278	int levelshift = level2shift(level + 1);
279
280	if (pte_type(pte) != PTE_TYPE_BLOCK)
281		panic("PTE %p (%llx) is not a block. Some driver code wants to "
282		      "modify dcache settings for an range not covered in "
283		      "mem_map.", pte, old_pte);
284
285	new_table = create_table();
286	debug("Splitting pte %p (%llx) into %p\n", pte, old_pte, new_table);
287
288	for (i = 0; i < MAX_PTE_ENTRIES; i++) {
289		new_table[i] = old_pte | (i << levelshift);
290
291		/* Level 3 block PTEs have the table type */
292		if ((level + 1) == 3)
293			new_table[i] |= PTE_TYPE_TABLE;
294
295		debug("Setting new_table[%lld] = %llx\n", i, new_table[i]);
296	}
297
298	/* Set the new table into effect */
299	set_pte_table(pte, new_table);
300}
301
302static void map_range(u64 virt, u64 phys, u64 size, int level,
303		      u64 *table, u64 attrs)
304{
305	u64 map_size = BIT_ULL(level2shift(level));
306	int i, idx;
307
308	idx = (virt >> level2shift(level)) & (MAX_PTE_ENTRIES - 1);
309	for (i = idx; size; i++) {
310		u64 next_size, *next_table;
311
312		if (level >= 1 &&
313		    size >= map_size && !(virt & (map_size - 1))) {
314			if (level == 3)
315				table[i] = phys | attrs | PTE_TYPE_PAGE;
316			else
317				table[i] = phys | attrs;
318
319			virt += map_size;
320			phys += map_size;
321			size -= map_size;
322
323			continue;
324		}
325
326		/* Going one level down */
327		if (pte_type(&table[i]) == PTE_TYPE_FAULT)
328			set_pte_table(&table[i], create_table());
329		else if (pte_type(&table[i]) != PTE_TYPE_TABLE)
330			split_block(&table[i], level);
331
332		next_table = (u64 *)(table[i] & GENMASK_ULL(47, PAGE_SHIFT));
333		next_size = min(map_size - (virt & (map_size - 1)), size);
334
335		map_range(virt, phys, next_size, level + 1, next_table, attrs);
336
337		virt += next_size;
338		phys += next_size;
339		size -= next_size;
340	}
341}
342
343static void add_map(struct mm_region *map)
344{
345	u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF;
346	u64 va_bits;
347	int level = 0;
348
349	get_tcr(NULL, &va_bits);
350	if (va_bits < 39)
351		level = 1;
352
353	map_range(map->virt, map->phys, map->size, level,
354		  (u64 *)gd->arch.tlb_addr, attrs);
355}
356
357static void count_range(u64 virt, u64 size, int level, int *cntp)
358{
359	u64 map_size = BIT_ULL(level2shift(level));
360	int i, idx;
361
362	idx = (virt >> level2shift(level)) & (MAX_PTE_ENTRIES - 1);
363	for (i = idx; size; i++) {
364		u64 next_size;
365
366		if (level >= 1 &&
367		    size >= map_size && !(virt & (map_size - 1))) {
368			virt += map_size;
369			size -= map_size;
370
371			continue;
372		}
373
374		/* Going one level down */
375		(*cntp)++;
376		next_size = min(map_size - (virt & (map_size - 1)), size);
377
378		count_range(virt, next_size, level + 1, cntp);
379
380		virt += next_size;
381		size -= next_size;
382	}
383}
384
385static int count_ranges(void)
386{
387	int i, count = 0, level = 0;
388	u64 va_bits;
389
390	get_tcr(NULL, &va_bits);
391	if (va_bits < 39)
392		level = 1;
393
394	for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
395		count_range(mem_map[i].virt, mem_map[i].size, level, &count);
396
397	return count;
398}
399
400/* Returns the estimated required size of all page tables */
401__weak u64 get_page_table_size(void)
402{
403	u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
404	u64 size;
405
406	/* Account for all page tables we would need to cover our memory map */
407	size = one_pt * count_ranges();
408
409	/*
410	 * We need to duplicate our page table once to have an emergency pt to
411	 * resort to when splitting page tables later on
412	 */
413	size *= 2;
414
415	/*
416	 * We may need to split page tables later on if dcache settings change,
417	 * so reserve up to 4 (random pick) page tables for that.
418	 */
419	size += one_pt * 4;
420
421	return size;
422}
423
424void setup_pgtables(void)
425{
426	int i;
427
428	if (!gd->arch.tlb_fillptr || !gd->arch.tlb_addr)
429		panic("Page table pointer not setup.");
430
431	/*
432	 * Allocate the first level we're on with invalidate entries.
433	 * If the starting level is 0 (va_bits >= 39), then this is our
434	 * Lv0 page table, otherwise it's the entry Lv1 page table.
435	 */
436	create_table();
437
438	/* Now add all MMU table entries one after another to the table */
439	for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
440		add_map(&mem_map[i]);
441}
442
443static void setup_all_pgtables(void)
444{
445	u64 tlb_addr = gd->arch.tlb_addr;
446	u64 tlb_size = gd->arch.tlb_size;
447
448	/* Reset the fill ptr */
449	gd->arch.tlb_fillptr = tlb_addr;
450
451	/* Create normal system page tables */
452	setup_pgtables();
453
454	/* Create emergency page tables */
455	gd->arch.tlb_size -= (uintptr_t)gd->arch.tlb_fillptr -
456			     (uintptr_t)gd->arch.tlb_addr;
457	gd->arch.tlb_addr = gd->arch.tlb_fillptr;
458	setup_pgtables();
459	gd->arch.tlb_emerg = gd->arch.tlb_addr;
460	gd->arch.tlb_addr = tlb_addr;
461	gd->arch.tlb_size = tlb_size;
462}
463
464/* to activate the MMU we need to set up virtual memory */
465__weak void mmu_setup(void)
466{
467	int el;
468
469	/* Set up page tables only once */
470	if (!gd->arch.tlb_fillptr)
471		setup_all_pgtables();
472
473	el = current_el();
474	set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(NULL, NULL),
475			  MEMORY_ATTRIBUTES);
476
477	/* enable the mmu */
478	set_sctlr(get_sctlr() | CR_M);
479}
480
481/*
482 * Performs a invalidation of the entire data cache at all levels
483 */
484void invalidate_dcache_all(void)
485{
486#ifndef CONFIG_CMO_BY_VA_ONLY
487	__asm_invalidate_dcache_all();
488	__asm_invalidate_l3_dcache();
489#else
490	apply_cmo_to_mappings(invalidate_dcache_range);
491#endif
492}
493
494/*
495 * Performs a clean & invalidation of the entire data cache at all levels.
496 * This function needs to be inline to avoid using stack.
497 * __asm_flush_l3_dcache return status of timeout
498 */
499inline void flush_dcache_all(void)
500{
501#ifndef CONFIG_CMO_BY_VA_ONLY
502	int ret;
503
504	__asm_flush_dcache_all();
505	ret = __asm_flush_l3_dcache();
506	if (ret)
507		debug("flushing dcache returns 0x%x\n", ret);
508	else
509		debug("flushing dcache successfully.\n");
510#else
511	apply_cmo_to_mappings(flush_dcache_range);
512#endif
513}
514
515#ifndef CONFIG_SYS_DISABLE_DCACHE_OPS
516/*
517 * Invalidates range in all levels of D-cache/unified cache
518 */
519void invalidate_dcache_range(unsigned long start, unsigned long stop)
520{
521	__asm_invalidate_dcache_range(start, stop);
522}
523
524/*
525 * Flush range(clean & invalidate) from all levels of D-cache/unified cache
526 */
527void flush_dcache_range(unsigned long start, unsigned long stop)
528{
529	__asm_flush_dcache_range(start, stop);
530}
531#else
532void invalidate_dcache_range(unsigned long start, unsigned long stop)
533{
534}
535
536void flush_dcache_range(unsigned long start, unsigned long stop)
537{
538}
539#endif /* CONFIG_SYS_DISABLE_DCACHE_OPS */
540
541void dcache_enable(void)
542{
543	/* The data cache is not active unless the mmu is enabled */
544	if (!(get_sctlr() & CR_M)) {
545		invalidate_dcache_all();
546		__asm_invalidate_tlb_all();
547		mmu_setup();
548	}
549
550	/* Set up page tables only once (it is done also by mmu_setup()) */
551	if (!gd->arch.tlb_fillptr)
552		setup_all_pgtables();
553
554	set_sctlr(get_sctlr() | CR_C);
555}
556
557void dcache_disable(void)
558{
559	uint32_t sctlr;
560
561	sctlr = get_sctlr();
562
563	/* if cache isn't enabled no need to disable */
564	if (!(sctlr & CR_C))
565		return;
566
567	if (IS_ENABLED(CONFIG_CMO_BY_VA_ONLY)) {
568		/*
569		 * When invalidating by VA, do it *before* turning the MMU
570		 * off, so that at least our stack is coherent.
571		 */
572		flush_dcache_all();
573	}
574
575	set_sctlr(sctlr & ~(CR_C|CR_M));
576
577	if (!IS_ENABLED(CONFIG_CMO_BY_VA_ONLY))
578		flush_dcache_all();
579
580	__asm_invalidate_tlb_all();
581}
582
583int dcache_status(void)
584{
585	return (get_sctlr() & CR_C) != 0;
586}
587
588u64 *__weak arch_get_page_table(void) {
589	puts("No page table offset defined\n");
590
591	return NULL;
592}
593
594static bool is_aligned(u64 addr, u64 size, u64 align)
595{
596	return !(addr & (align - 1)) && !(size & (align - 1));
597}
598
599/* Use flag to indicate if attrs has more than d-cache attributes */
600static u64 set_one_region(u64 start, u64 size, u64 attrs, bool flag, int level)
601{
602	int levelshift = level2shift(level);
603	u64 levelsize = 1ULL << levelshift;
604	u64 *pte = find_pte(start, level);
605
606	/* Can we can just modify the current level block PTE? */
607	if (is_aligned(start, size, levelsize)) {
608		if (flag) {
609			*pte &= ~PMD_ATTRMASK;
610			*pte |= attrs & PMD_ATTRMASK;
611		} else {
612			*pte &= ~PMD_ATTRINDX_MASK;
613			*pte |= attrs & PMD_ATTRINDX_MASK;
614		}
615		debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level);
616
617		return levelsize;
618	}
619
620	/* Unaligned or doesn't fit, maybe split block into table */
621	debug("addr=%llx level=%d pte=%p (%llx)\n", start, level, pte, *pte);
622
623	/* Maybe we need to split the block into a table */
624	if (pte_type(pte) == PTE_TYPE_BLOCK)
625		split_block(pte, level);
626
627	/* And then double-check it became a table or already is one */
628	if (pte_type(pte) != PTE_TYPE_TABLE)
629		panic("PTE %p (%llx) for addr=%llx should be a table",
630		      pte, *pte, start);
631
632	/* Roll on to the next page table level */
633	return 0;
634}
635
636void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
637				     enum dcache_option option)
638{
639	u64 attrs = PMD_ATTRINDX(option >> 2);
640	u64 real_start = start;
641	u64 real_size = size;
642
643	debug("start=%lx size=%lx\n", (ulong)start, (ulong)size);
644
645	if (!gd->arch.tlb_emerg)
646		panic("Emergency page table not setup.");
647
648	/*
649	 * We can not modify page tables that we're currently running on,
650	 * so we first need to switch to the "emergency" page tables where
651	 * we can safely modify our primary page tables and then switch back
652	 */
653	__asm_switch_ttbr(gd->arch.tlb_emerg);
654
655	/*
656	 * Loop through the address range until we find a page granule that fits
657	 * our alignment constraints, then set it to the new cache attributes
658	 */
659	while (size > 0) {
660		int level;
661		u64 r;
662
663		for (level = 1; level < 4; level++) {
664			/* Set d-cache attributes only */
665			r = set_one_region(start, size, attrs, false, level);
666			if (r) {
667				/* PTE successfully replaced */
668				size -= r;
669				start += r;
670				break;
671			}
672		}
673
674	}
675
676	/* We're done modifying page tables, switch back to our primary ones */
677	__asm_switch_ttbr(gd->arch.tlb_addr);
678
679	/*
680	 * Make sure there's nothing stale in dcache for a region that might
681	 * have caches off now
682	 */
683	flush_dcache_range(real_start, real_start + real_size);
684}
685
686/*
687 * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits.
688 * The procecess is break-before-make. The target region will be marked as
689 * invalid during the process of changing.
690 */
691void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs)
692{
693	int level;
694	u64 r, size, start;
695
696	start = addr;
697	size = siz;
698	/*
699	 * Loop through the address range until we find a page granule that fits
700	 * our alignment constraints, then set it to "invalid".
701	 */
702	while (size > 0) {
703		for (level = 1; level < 4; level++) {
704			/* Set PTE to fault */
705			r = set_one_region(start, size, PTE_TYPE_FAULT, true,
706					   level);
707			if (r) {
708				/* PTE successfully invalidated */
709				size -= r;
710				start += r;
711				break;
712			}
713		}
714	}
715
716	flush_dcache_range(gd->arch.tlb_addr,
717			   gd->arch.tlb_addr + gd->arch.tlb_size);
718	__asm_invalidate_tlb_all();
719
720	/*
721	 * Loop through the address range until we find a page granule that fits
722	 * our alignment constraints, then set it to the new cache attributes
723	 */
724	start = addr;
725	size = siz;
726	while (size > 0) {
727		for (level = 1; level < 4; level++) {
728			/* Set PTE to new attributes */
729			r = set_one_region(start, size, attrs, true, level);
730			if (r) {
731				/* PTE successfully updated */
732				size -= r;
733				start += r;
734				break;
735			}
736		}
737	}
738	flush_dcache_range(gd->arch.tlb_addr,
739			   gd->arch.tlb_addr + gd->arch.tlb_size);
740	__asm_invalidate_tlb_all();
741}
742
743#else	/* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
744
745/*
746 * For SPL builds, we may want to not have dcache enabled. Any real U-Boot
747 * running however really wants to have dcache and the MMU active. Check that
748 * everything is sane and give the developer a hint if it isn't.
749 */
750#ifndef CONFIG_SPL_BUILD
751#error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache.
752#endif
753
754void invalidate_dcache_all(void)
755{
756}
757
758void flush_dcache_all(void)
759{
760}
761
762void dcache_enable(void)
763{
764}
765
766void dcache_disable(void)
767{
768}
769
770int dcache_status(void)
771{
772	return 0;
773}
774
775void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
776				     enum dcache_option option)
777{
778}
779
780#endif	/* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
781
782#if !CONFIG_IS_ENABLED(SYS_ICACHE_OFF)
783
784void icache_enable(void)
785{
786	invalidate_icache_all();
787	set_sctlr(get_sctlr() | CR_I);
788}
789
790void icache_disable(void)
791{
792	set_sctlr(get_sctlr() & ~CR_I);
793}
794
795int icache_status(void)
796{
797	return (get_sctlr() & CR_I) != 0;
798}
799
800int mmu_status(void)
801{
802	return (get_sctlr() & CR_M) != 0;
803}
804
805void invalidate_icache_all(void)
806{
807	__asm_invalidate_icache_all();
808	__asm_invalidate_l3_icache();
809}
810
811#else	/* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
812
813void icache_enable(void)
814{
815}
816
817void icache_disable(void)
818{
819}
820
821int icache_status(void)
822{
823	return 0;
824}
825
826int mmu_status(void)
827{
828	return 0;
829}
830
831void invalidate_icache_all(void)
832{
833}
834
835#endif	/* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
836
837/*
838 * Enable dCache & iCache, whether cache is actually enabled
839 * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF
840 */
841void __weak enable_caches(void)
842{
843	icache_enable();
844	dcache_enable();
845}
846