1/*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7#include <api/types.h>
8#include <arch/machine.h>
9#include <arch/machine/hardware.h>
10#include <arch/machine/l2c_310.h>
11
12#define LINE_START(a) ROUND_DOWN(a, L1_CACHE_LINE_SIZE_BITS)
13#define LINE_INDEX(a) (LINE_START(a)>>L1_CACHE_LINE_SIZE_BITS)
14
15static void cleanCacheRange_PoC(vptr_t start, vptr_t end, paddr_t pstart)
16{
17    vptr_t line;
18    word_t index;
19
20    for (index = LINE_INDEX(start); index < LINE_INDEX(end) + 1; index++) {
21        line = index << L1_CACHE_LINE_SIZE_BITS;
22        cleanByVA(line, pstart + (line - start));
23    }
24}
25
26void cleanInvalidateCacheRange_RAM(vptr_t start, vptr_t end, paddr_t pstart)
27{
28    vptr_t line;
29    word_t index;
30    /** GHOSTUPD: "((gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state = 0
31            \<or> \<acute>end - \<acute>start <= gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state)
32        \<and> \<acute>start <= \<acute>end, id)" */
33
34    /* First clean the L1 range */
35    cleanCacheRange_PoC(start, end, pstart);
36
37    /* ensure operation completes and visible in L2 */
38    dsb();
39
40    /* Now clean and invalidate the L2 range */
41    plat_cleanInvalidateL2Range(pstart, pstart + (end - start));
42
43    /* Finally clean and invalidate the L1 range. The extra clean is only strictly neccessary
44     * in a multiprocessor environment to prevent a write being lost if another core is
45     * attempting a store at the same time. As the range should already be clean asking
46     * it to clean again should not affect performance */
47    for (index = LINE_INDEX(start); index < LINE_INDEX(end) + 1; index++) {
48        line = index << L1_CACHE_LINE_SIZE_BITS;
49        cleanInvalByVA(line, pstart + (line - start));
50    }
51    /* ensure clean and invalidate complete */
52    dsb();
53}
54
55void cleanCacheRange_RAM(vptr_t start, vptr_t end, paddr_t pstart)
56{
57    /** GHOSTUPD: "((gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state = 0
58            \<or> \<acute>end - \<acute>start <= gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state)
59        \<and> \<acute>start <= \<acute>end
60        \<and> \<acute>pstart <= \<acute>pstart + (\<acute>end - \<acute>start), id)" */
61
62    /* clean l1 to l2 */
63    cleanCacheRange_PoC(start, end, pstart);
64
65    /* ensure cache operation completes before cleaning l2 */
66    dsb();
67
68    /** GHOSTUPD: "((gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state = 0
69            \<or> \<acute>end - \<acute>start <= gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state)
70        \<and> \<acute>start <= \<acute>end
71        \<and> \<acute>pstart <= \<acute>pstart + (\<acute>end - \<acute>start), id)" */
72
73    /* now clean l2 to RAM */
74    plat_cleanL2Range(pstart, pstart + (end - start));
75}
76
77void cleanCacheRange_PoU(vptr_t start, vptr_t end, paddr_t pstart)
78{
79    vptr_t line;
80    word_t index;
81
82    /** GHOSTUPD: "((gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state = 0
83            \<or> \<acute>end - \<acute>start <= gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state)
84        \<and> \<acute>start <= \<acute>end
85        \<and> \<acute>pstart <= \<acute>pstart + (\<acute>end - \<acute>start), id)" */
86
87    for (index = LINE_INDEX(start); index < LINE_INDEX(end) + 1; index++) {
88        line = index << L1_CACHE_LINE_SIZE_BITS;
89        cleanByVA_PoU(line, pstart + (line - start));
90    }
91}
92
93void invalidateCacheRange_RAM(vptr_t start, vptr_t end, paddr_t pstart)
94{
95    vptr_t line;
96    word_t index;
97
98    /* If the start and end are not aligned to a cache line boundary
99     * then we need to clean the line first to prevent invalidating
100     * bytes we didn't mean to. Calling the functions in this way is
101     * not the most efficient method, but we assume the user will
102     * rarely be this silly */
103    if (start != LINE_START(start)) {
104        cleanCacheRange_RAM(start, start, pstart);
105    }
106    if (end + 1 != LINE_START(end + 1)) {
107        line = LINE_START(end);
108        cleanCacheRange_RAM(line, line, pstart + (line - start));
109    }
110
111    /** GHOSTUPD: "((gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state = 0
112            \<or> \<acute>end - \<acute>start <= gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state)
113        \<and> \<acute>start <= \<acute>end
114        \<and> \<acute>pstart <= \<acute>pstart + (\<acute>end - \<acute>start), id)" */
115
116    /* Invalidate L2 range. Invalidating the L2 before the L1 is the order
117     * given in the l2c_310 manual, as an L1 line might be allocated from the L2
118     * before the L2 can be invalidated. */
119    plat_invalidateL2Range(pstart, pstart + (end - start));
120
121    /** GHOSTUPD: "((gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state = 0
122            \<or> \<acute>end - \<acute>start <= gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state)
123        \<and> \<acute>start <= \<acute>end
124        \<and> \<acute>pstart <= \<acute>pstart + (\<acute>end - \<acute>start), id)" */
125
126    /* Now invalidate L1 range */
127    for (index = LINE_INDEX(start); index < LINE_INDEX(end) + 1; index++) {
128        line = index << L1_CACHE_LINE_SIZE_BITS;
129        invalidateByVA(line, pstart + (line - start));
130    }
131    /* Ensure invalidate completes */
132    dsb();
133}
134
135void invalidateCacheRange_I(vptr_t start, vptr_t end, paddr_t pstart)
136{
137    vptr_t line;
138    word_t index;
139
140    for (index = LINE_INDEX(start); index < LINE_INDEX(end) + 1; index++) {
141        line = index << L1_CACHE_LINE_SIZE_BITS;
142        invalidateByVA_I(line, pstart + (line - start));
143    }
144}
145
146void branchFlushRange(vptr_t start, vptr_t end, paddr_t pstart)
147{
148    vptr_t line;
149    word_t index;
150
151    for (index = LINE_INDEX(start); index < LINE_INDEX(end) + 1; index++) {
152        line = index << L1_CACHE_LINE_SIZE_BITS;
153        branchFlush(line, pstart + (line - start));
154    }
155}
156
157void cleanCaches_PoU(void)
158{
159    dsb();
160    clean_D_PoU();
161    dsb();
162    invalidate_I_PoU();
163    dsb();
164}
165
166void cleanInvalidateL1Caches(void)
167{
168    dsb();
169    cleanInvalidate_D_PoC();
170    dsb();
171    invalidate_I_PoU();
172    dsb();
173}
174
175void arch_clean_invalidate_caches(void)
176{
177    cleanCaches_PoU();
178    plat_cleanInvalidateL2Cache();
179    cleanInvalidateL1Caches();
180    isb();
181}
182
183void arch_clean_invalidate_L1_caches(word_t type)
184{
185    dsb();
186    if (type & BIT(1)) {
187        cleanInvalidate_L1D();
188        dsb();
189    }
190    if (type & BIT(0)) {
191        invalidate_I_PoU();
192        dsb();
193        isb();
194    }
195}
196