1/*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * This software may be distributed and modified according to the terms of
5 * the GNU General Public License version 2. Note that NO WARRANTY is provided.
6 * See "LICENSE_GPLv2.txt" for details.
7 *
8 * @TAG(GD_GPL)
9 */
10
11#include <api/types.h>
12#include <arch/machine.h>
13#include <arch/machine/hardware.h>
14#include <arch/machine/l2c_310.h>
15
16#define LINE_START(a) ROUND_DOWN(a, L1_CACHE_LINE_SIZE_BITS)
17#define LINE_INDEX(a) (LINE_START(a)>>L1_CACHE_LINE_SIZE_BITS)
18#define L1_CACHE_LINE_SIZE BIT(L1_CACHE_LINE_SIZE_BITS)
19
20static void
21cleanCacheRange_PoC(vptr_t start, vptr_t end, paddr_t pstart)
22{
23    vptr_t line;
24    word_t index;
25
26    for (index = LINE_INDEX(start); index < LINE_INDEX(end) + 1; index++) {
27        line = index << L1_CACHE_LINE_SIZE_BITS;
28        cleanByVA(line, pstart + (line - start));
29    }
30}
31
32void
33cleanInvalidateCacheRange_RAM(vptr_t start, vptr_t end, paddr_t pstart)
34{
35    vptr_t line;
36    word_t index;
37    /** GHOSTUPD: "((gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state = 0
38            \<or> \<acute>end - \<acute>start <= gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state)
39        \<and> \<acute>start <= \<acute>end, id)" */
40
41    /* First clean the L1 range */
42    cleanCacheRange_PoC(start, end, pstart);
43
44    /* ensure operation completes and visible in L2 */
45    dsb();
46
47    /* Now clean and invalidate the L2 range */
48    plat_cleanInvalidateL2Range(pstart, pstart + (end - start));
49
50    /* Finally clean and invalidate the L1 range. The extra clean is only strictly neccessary
51     * in a multiprocessor environment to prevent a write being lost if another core is
52     * attempting a store at the same time. As the range should already be clean asking
53     * it to clean again should not affect performance */
54    for (index = LINE_INDEX(start); index < LINE_INDEX(end) + 1; index++) {
55        line = index << L1_CACHE_LINE_SIZE_BITS;
56        cleanInvalByVA(line, pstart + (line - start));
57    }
58    /* ensure clean and invalidate complete */
59    dsb();
60}
61
62void
63cleanCacheRange_RAM(vptr_t start, vptr_t end, paddr_t pstart)
64{
65    /** GHOSTUPD: "((gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state = 0
66            \<or> \<acute>end - \<acute>start <= gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state)
67        \<and> \<acute>start <= \<acute>end
68        \<and> \<acute>pstart <= \<acute>pstart + (\<acute>end - \<acute>start), id)" */
69
70    /* clean l1 to l2 */
71    cleanCacheRange_PoC(start, end, pstart);
72
73    /* ensure cache operation completes before cleaning l2 */
74    dsb();
75
76    /** GHOSTUPD: "((gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state = 0
77            \<or> \<acute>end - \<acute>start <= gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state)
78        \<and> \<acute>start <= \<acute>end
79        \<and> \<acute>pstart <= \<acute>pstart + (\<acute>end - \<acute>start), id)" */
80
81    /* now clean l2 to RAM */
82    plat_cleanL2Range(pstart, pstart + (end - start));
83}
84
85void
86cleanCacheRange_PoU(vptr_t start, vptr_t end, paddr_t pstart)
87{
88    vptr_t line;
89    word_t index;
90
91    /** GHOSTUPD: "((gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state = 0
92            \<or> \<acute>end - \<acute>start <= gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state)
93        \<and> \<acute>start <= \<acute>end
94        \<and> \<acute>pstart <= \<acute>pstart + (\<acute>end - \<acute>start), id)" */
95
96    for (index = LINE_INDEX(start); index < LINE_INDEX(end) + 1; index++) {
97        line = index << L1_CACHE_LINE_SIZE_BITS;
98        cleanByVA_PoU(line, pstart + (line - start));
99    }
100}
101
102void
103invalidateCacheRange_RAM(vptr_t start, vptr_t end, paddr_t pstart)
104{
105    vptr_t line;
106    word_t index;
107
108    /* If the start and end are not aligned to a cache line boundary
109     * then we need to clean the line first to prevent invalidating
110     * bytes we didn't mean to. Calling the functions in this way is
111     * not the most efficient method, but we assume the user will
112     * rarely be this silly */
113    if (start != LINE_START(start)) {
114        cleanCacheRange_RAM(start, start, pstart);
115    }
116    if (end + 1 != LINE_START(end + 1)) {
117        line = LINE_START(end);
118        cleanCacheRange_RAM(line, line, pstart + (line - start));
119    }
120
121    /** GHOSTUPD: "((gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state = 0
122            \<or> \<acute>end - \<acute>start <= gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state)
123        \<and> \<acute>start <= \<acute>end
124        \<and> \<acute>pstart <= \<acute>pstart + (\<acute>end - \<acute>start), id)" */
125
126    /* Invalidate L2 range. Invalidating the L2 before the L1 is the order
127     * given in the l2c_310 manual, as an L1 line might be allocated from the L2
128     * before the L2 can be invalidated. */
129    plat_invalidateL2Range(pstart, pstart + (end - start));
130
131    /** GHOSTUPD: "((gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state = 0
132            \<or> \<acute>end - \<acute>start <= gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state)
133        \<and> \<acute>start <= \<acute>end
134        \<and> \<acute>pstart <= \<acute>pstart + (\<acute>end - \<acute>start), id)" */
135
136    /* Now invalidate L1 range */
137    for (index = LINE_INDEX(start); index < LINE_INDEX(end) + 1; index++) {
138        line = index << L1_CACHE_LINE_SIZE_BITS;
139        invalidateByVA(line, pstart + (line - start));
140    }
141    /* Ensure invalidate completes */
142    dsb();
143}
144
145void
146invalidateCacheRange_I(vptr_t start, vptr_t end, paddr_t pstart)
147{
148    vptr_t line;
149    word_t index;
150
151    for (index = LINE_INDEX(start); index < LINE_INDEX(end) + 1; index++) {
152        line = index << L1_CACHE_LINE_SIZE_BITS;
153        invalidateByVA_I(line, pstart + (line - start));
154    }
155}
156
157void
158branchFlushRange(vptr_t start, vptr_t end, paddr_t pstart)
159{
160    vptr_t line;
161    word_t index;
162
163    for (index = LINE_INDEX(start); index < LINE_INDEX(end) + 1; index++) {
164        line = index << L1_CACHE_LINE_SIZE_BITS;
165        branchFlush(line, pstart + (line - start));
166    }
167}
168
169void
170cleanCaches_PoU(void)
171{
172    dsb();
173    clean_D_PoU();
174    dsb();
175    invalidate_I_PoU();
176    dsb();
177}
178
179void
180cleanInvalidateL1Caches(void)
181{
182    dsb();
183    cleanInvalidate_D_PoC();
184    dsb();
185    invalidate_I_PoU();
186    dsb();
187}
188
189void
190arch_clean_invalidate_caches(void)
191{
192    cleanCaches_PoU();
193    plat_cleanInvalidateCache();
194    cleanInvalidateL1Caches();
195    isb();
196}
197