1/*- 2 * Copyright (c) 2010 Per Odlund <per.odlund@armagedon.se> 3 * Copyright (C) 2011 MARVELL INTERNATIONAL LTD. 4 * All rights reserved. 5 * 6 * Developed by Semihalf. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of MARVELL nor the names of contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33#include <machine/asm.h> 34__FBSDID("$FreeBSD: stable/11/sys/arm/arm/cpufunc_asm_armv7.S 307344 2016-10-15 08:27:54Z mmel $"); 35 36#include <machine/sysreg.h> 37 38 .cpu cortex-a8 39 40.Lcoherency_level: 41 .word _C_LABEL(arm_cache_loc) 42.Lcache_type: 43 .word _C_LABEL(arm_cache_type) 44.Larmv7_dcache_line_size: 45 .word _C_LABEL(arm_dcache_min_line_size) 46.Larmv7_icache_line_size: 47 .word _C_LABEL(arm_icache_min_line_size) 48.Larmv7_idcache_line_size: 49 .word _C_LABEL(arm_idcache_min_line_size) 50.Lway_mask: 51 .word 0x3ff 52.Lmax_index: 53 .word 0x7fff 54.Lpage_mask: 55 .word 0xfff 56 57#define PT_NOS (1 << 5) 58#define PT_S (1 << 1) 59#define PT_INNER_NC 0 60#define PT_INNER_WT (1 << 0) 61#define PT_INNER_WB ((1 << 0) | (1 << 6)) 62#define PT_INNER_WBWA (1 << 6) 63#define PT_OUTER_NC 0 64#define PT_OUTER_WT (2 << 3) 65#define PT_OUTER_WB (3 << 3) 66#define PT_OUTER_WBWA (1 << 3) 67 68#ifdef SMP 69#define PT_ATTR (PT_S|PT_INNER_WBWA|PT_OUTER_WBWA|PT_NOS) 70#else 71#define PT_ATTR (PT_INNER_WBWA|PT_OUTER_WBWA) 72#endif 73 74ENTRY(armv7_setttb) 75 dsb 76 orr r0, r0, #PT_ATTR 77 mcr CP15_TTBR0(r0) 78 isb 79#ifdef SMP 80 mcr CP15_TLBIALLIS 81#else 82 mcr CP15_TLBIALL 83#endif 84 dsb 85 isb 86 RET 87END(armv7_setttb) 88 89ENTRY(armv7_tlb_flushID) 90 dsb 91#ifdef SMP 92 mcr CP15_TLBIALLIS 93 mcr CP15_BPIALLIS 94#else 95 mcr CP15_TLBIALL 96 mcr CP15_BPIALL 97#endif 98 dsb 99 isb 100 mov pc, lr 101END(armv7_tlb_flushID) 102 103ENTRY(armv7_tlb_flushID_SE) 104 ldr r1, .Lpage_mask 105 bic r0, r0, r1 106#ifdef SMP 107 mcr CP15_TLBIMVAAIS(r0) 108 mcr CP15_BPIALLIS 109#else 110 mcr CP15_TLBIMVA(r0) 111 mcr CP15_BPIALL 112#endif 113 dsb 114 isb 115 mov pc, lr 116END(armv7_tlb_flushID_SE) 117 118/* Based on algorithm from ARM Architecture Reference Manual */ 119ENTRY(armv7_dcache_wbinv_all) 120 stmdb sp!, {r4, r5, r6, r7, r8, r9} 121 122 /* Get cache level */ 123 ldr r0, .Lcoherency_level 124 ldr r3, [r0] 125 cmp r3, #0 126 beq Finished 127 /* For each cache level */ 128 mov r8, #0 129Loop1: 130 /* Get cache type for given level */ 131 mov r2, r8, lsl #2 132 add r2, r2, r2 133 ldr r0, .Lcache_type 134 ldr r1, [r0, r2] 135 136 /* Get line size */ 137 and r2, r1, #7 138 add r2, r2, #4 139 140 /* Get number of ways */ 141 ldr r4, .Lway_mask 142 ands r4, r4, r1, lsr #3 143 clz r5, r4 144 145 /* Get max index */ 146 ldr r7, .Lmax_index 147 ands r7, r7, r1, lsr #13 148Loop2: 149 mov r9, r4 150Loop3: 151 mov r6, r8, lsl #1 152 orr r6, r6, r9, lsl r5 153 orr r6, r6, r7, lsl r2 154 155 /* Clean and invalidate data cache by way/index */ 156 mcr CP15_DCCISW(r6) 157 subs r9, r9, #1 158 bge Loop3 159 subs r7, r7, #1 160 bge Loop2 161Skip: 162 add r8, r8, #1 163 cmp r3, r8 164 bne Loop1 165Finished: 166 dsb 167 ldmia sp!, {r4, r5, r6, r7, r8, r9} 168 RET 169END(armv7_dcache_wbinv_all) 170 171ENTRY(armv7_idcache_wbinv_all) 172 stmdb sp!, {lr} 173 bl armv7_dcache_wbinv_all 174#ifdef SMP 175 mcr CP15_ICIALLUIS 176#else 177 mcr CP15_ICIALLU 178#endif 179 dsb 180 isb 181 ldmia sp!, {lr} 182 RET 183END(armv7_idcache_wbinv_all) 184 185ENTRY(armv7_dcache_wb_range) 186 ldr ip, .Larmv7_dcache_line_size 187 ldr ip, [ip] 188 sub r3, ip, #1 189 and r2, r0, r3 190 add r1, r1, r2 191 bic r0, r0, r3 192.Larmv7_wb_next: 193 mcr CP15_DCCMVAC(r0) 194 add r0, r0, ip 195 subs r1, r1, ip 196 bhi .Larmv7_wb_next 197 dsb /* data synchronization barrier */ 198 RET 199END(armv7_dcache_wb_range) 200 201ENTRY(armv7_dcache_wbinv_range) 202 ldr ip, .Larmv7_dcache_line_size 203 ldr ip, [ip] 204 sub r3, ip, #1 205 and r2, r0, r3 206 add r1, r1, r2 207 bic r0, r0, r3 208.Larmv7_wbinv_next: 209 mcr CP15_DCCIMVAC(r0) 210 add r0, r0, ip 211 subs r1, r1, ip 212 bhi .Larmv7_wbinv_next 213 dsb /* data synchronization barrier */ 214 RET 215END(armv7_dcache_wbinv_range) 216 217/* 218 * Note, we must not invalidate everything. If the range is too big we 219 * must use wb-inv of the entire cache. 220 */ 221ENTRY(armv7_dcache_inv_range) 222 ldr ip, .Larmv7_dcache_line_size 223 ldr ip, [ip] 224 sub r3, ip, #1 225 and r2, r0, r3 226 add r1, r1, r2 227 bic r0, r0, r3 228.Larmv7_inv_next: 229 mcr CP15_DCIMVAC(r0) 230 add r0, r0, ip 231 subs r1, r1, ip 232 bhi .Larmv7_inv_next 233 dsb /* data synchronization barrier */ 234 RET 235END(armv7_dcache_inv_range) 236 237ENTRY(armv7_idcache_wbinv_range) 238 ldr ip, .Larmv7_idcache_line_size 239 ldr ip, [ip] 240 sub r3, ip, #1 241 and r2, r0, r3 242 add r1, r1, r2 243 bic r0, r0, r3 244.Larmv7_id_wbinv_next: 245 mcr CP15_ICIMVAU(r0) 246 mcr CP15_DCCIMVAC(r0) 247 add r0, r0, ip 248 subs r1, r1, ip 249 bhi .Larmv7_id_wbinv_next 250 dsb /* data synchronization barrier */ 251 isb /* instruction synchronization barrier */ 252 RET 253END(armv7_idcache_wbinv_range) 254 255 256ENTRY_NP(armv7_icache_sync_range) 257 ldr ip, .Larmv7_icache_line_size 258 ldr ip, [ip] 259 sub r3, ip, #1 /* Address need not be aligned, but */ 260 and r2, r0, r3 /* round length up if op spans line */ 261 add r1, r1, r2 /* boundary: len += addr & linemask; */ 262.Larmv7_sync_next: 263 mcr CP15_DCCMVAC(r0) 264 mcr CP15_ICIMVAU(r0) 265 add r0, r0, ip 266 subs r1, r1, ip 267 bhi .Larmv7_sync_next 268 dsb /* data synchronization barrier */ 269 isb /* instruction synchronization barrier */ 270 RET 271END(armv7_icache_sync_range) 272 273ENTRY(armv7_cpu_sleep) 274 dsb /* data synchronization barrier */ 275 wfi /* wait for interrupt */ 276 RET 277END(armv7_cpu_sleep) 278 279ENTRY(armv7_context_switch) 280 dsb 281 orr r0, r0, #PT_ATTR 282 283 mcr CP15_TTBR0(r0) 284 isb 285#ifdef SMP 286 mcr CP15_TLBIALLIS 287#else 288 mcr CP15_TLBIALL 289#endif 290 dsb 291 isb 292 RET 293END(armv7_context_switch) 294 295ENTRY(armv7_drain_writebuf) 296 dsb 297 RET 298END(armv7_drain_writebuf) 299 300ENTRY(armv7_auxctrl) 301 mrc CP15_ACTLR(r2) 302 bic r3, r2, r0 /* Clear bits */ 303 eor r3, r3, r1 /* XOR bits */ 304 305 teq r2, r3 306 mcrne CP15_ACTLR(r3) 307 mov r0, r2 308 RET 309END(armv7_auxctrl) 310 311/* 312 * Invalidate all I+D+branch cache. Used by startup code, which counts 313 * on the fact that only r0-r3,ip are modified and no stack space is used. 314 */ 315ENTRY(armv7_idcache_inv_all) 316 mov r0, #0 317 mcr CP15_CSSELR(r0) @ set cache level to L1 318 mrc CP15_CCSIDR(r0) 319 320 ubfx r2, r0, #13, #15 @ get num sets - 1 from CCSIDR 321 ubfx r3, r0, #3, #10 @ get numways - 1 from CCSIDR 322 clz r1, r3 @ number of bits to MSB of way 323 lsl r3, r3, r1 @ shift into position 324 mov ip, #1 @ 325 lsl ip, ip, r1 @ ip now contains the way decr 326 327 ubfx r0, r0, #0, #3 @ get linesize from CCSIDR 328 add r0, r0, #4 @ apply bias 329 lsl r2, r2, r0 @ shift sets by log2(linesize) 330 add r3, r3, r2 @ merge numsets - 1 with numways - 1 331 sub ip, ip, r2 @ subtract numsets - 1 from way decr 332 mov r1, #1 333 lsl r1, r1, r0 @ r1 now contains the set decr 334 mov r2, ip @ r2 now contains set way decr 335 336 /* r3 = ways/sets, r2 = way decr, r1 = set decr, r0 and ip are free */ 3371: mcr CP15_DCISW(r3) @ invalidate line 338 movs r0, r3 @ get current way/set 339 beq 2f @ at 0 means we are done. 340 movs r0, r0, lsl #10 @ clear way bits leaving only set bits 341 subne r3, r3, r1 @ non-zero?, decrement set # 342 subeq r3, r3, r2 @ zero?, decrement way # and restore set count 343 b 1b 344 3452: dsb @ wait for stores to finish 346 mov r0, #0 @ and ... 347 mcr CP15_ICIALLU @ invalidate instruction+branch cache 348 isb @ instruction sync barrier 349 bx lr @ return 350END(armv7_idcache_inv_all) 351 352