1/* $NetBSD: cpufunc_asm_arm9.S,v 1.3 2004/01/26 15:54:16 rearnsha Exp $ */ 2 3/* 4 * Copyright (c) 2001, 2004 ARM Limited 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the company may not be used to endorse or promote 16 * products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 20 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 23 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * ARM9 assembly functions for CPU / MMU / TLB specific operations 32 */ 33 34#include <machine/asm.h> 35__FBSDID("$FreeBSD$"); 36 37/* 38 * Functions to set the MMU Translation Table Base register 39 * 40 * We need to clean and flush the cache as it uses virtual 41 * addresses that are about to change. 42 */ 43ENTRY(arm9_setttb) 44 stmfd sp!, {r0, lr} 45 bl _C_LABEL(arm9_idcache_wbinv_all) 46 ldmfd sp!, {r0, lr} 47 48 mcr p15, 0, r0, c2, c0, 0 /* load new TTB */ 49 50 mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */ 51 mov pc, lr 52END(arm9_setttb) 53 54/* 55 * TLB functions 56 */ 57ENTRY(arm9_tlb_flushID_SE) 58 mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */ 59 mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */ 60 mov pc, lr 61END(arm9_tlb_flushID_SE) 62 63/* 64 * Cache operations. For the entire cache we use the set/index 65 * operations. 66 */ 67 s_max .req r0 68 i_max .req r1 69 s_inc .req r2 70 i_inc .req r3 71 72ENTRY_NP(arm9_icache_sync_range) 73 ldr ip, .Larm9_line_size 74 cmp r1, #0x4000 75 bcs .Larm9_icache_sync_all 76 ldr ip, [ip] 77 sub r3, ip, #1 78 and r2, r0, r3 79 add r1, r1, r2 80 bic r0, r0, r3 81.Larm9_sync_next: 82 mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */ 83 mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */ 84 add r0, r0, ip 85 subs r1, r1, ip 86 bhi .Larm9_sync_next 87 mov pc, lr 88 89.Larm9_icache_sync_all: 90 /* 91 * We assume that the code here can never be out of sync with the 92 * dcache, so that we can safely flush the Icache and fall through 93 * into the Dcache cleaning code. 94 */ 95 mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */ 96 /* Fall through to clean Dcache. */ 97 98.Larm9_dcache_wb: 99 ldr ip, .Larm9_cache_data 100 ldmia ip, {s_max, i_max, s_inc, i_inc} 101.Lnext_set: 102 orr ip, s_max, i_max 103.Lnext_index: 104 mcr p15, 0, ip, c7, c10, 2 /* Clean D cache SE with Set/Index */ 105 subs ip, ip, i_inc 106 bhs .Lnext_index /* Next index */ 107 subs s_max, s_max, s_inc 108 bhs .Lnext_set /* Next set */ 109 mov pc, lr 110END(arm9_icache_sync_range) 111 112.Larm9_line_size: 113 .word _C_LABEL(arm_pdcache_line_size) 114 115ENTRY(arm9_dcache_wb_range) 116 ldr ip, .Larm9_line_size 117 cmp r1, #0x4000 118 bcs .Larm9_dcache_wb 119 ldr ip, [ip] 120 sub r3, ip, #1 121 and r2, r0, r3 122 add r1, r1, r2 123 bic r0, r0, r3 124.Larm9_wb_next: 125 mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */ 126 add r0, r0, ip 127 subs r1, r1, ip 128 bhi .Larm9_wb_next 129 mov pc, lr 130END(arm9_dcache_wb_range) 131 132ENTRY(arm9_dcache_wbinv_range) 133 ldr ip, .Larm9_line_size 134 cmp r1, #0x4000 135 bcs .Larm9_dcache_wbinv_all 136 ldr ip, [ip] 137 sub r3, ip, #1 138 and r2, r0, r3 139 add r1, r1, r2 140 bic r0, r0, r3 141.Larm9_wbinv_next: 142 mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */ 143 add r0, r0, ip 144 subs r1, r1, ip 145 bhi .Larm9_wbinv_next 146 mov pc, lr 147END(arm9_dcache_wbinv_range) 148 149/* 150 * Note, we must not invalidate everything. If the range is too big we 151 * must use wb-inv of the entire cache. 152 */ 153ENTRY(arm9_dcache_inv_range) 154 ldr ip, .Larm9_line_size 155 cmp r1, #0x4000 156 bcs .Larm9_dcache_wbinv_all 157 ldr ip, [ip] 158 sub r3, ip, #1 159 and r2, r0, r3 160 add r1, r1, r2 161 bic r0, r0, r3 162.Larm9_inv_next: 163 mcr p15, 0, r0, c7, c6, 1 /* Invalidate D cache SE with VA */ 164 add r0, r0, ip 165 subs r1, r1, ip 166 bhi .Larm9_inv_next 167 mov pc, lr 168END(arm9_dcache_inv_range) 169 170ENTRY(arm9_idcache_wbinv_range) 171 ldr ip, .Larm9_line_size 172 cmp r1, #0x4000 173 bcs .Larm9_idcache_wbinv_all 174 ldr ip, [ip] 175 sub r3, ip, #1 176 and r2, r0, r3 177 add r1, r1, r2 178 bic r0, r0, r3 179.Larm9_id_wbinv_next: 180 mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */ 181 mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */ 182 add r0, r0, ip 183 subs r1, r1, ip 184 bhi .Larm9_id_wbinv_next 185 mov pc, lr 186END(arm9_idcache_wbinv_range) 187 188ENTRY_NP(arm9_idcache_wbinv_all) 189.Larm9_idcache_wbinv_all: 190 /* 191 * We assume that the code here can never be out of sync with the 192 * dcache, so that we can safely flush the Icache and fall through 193 * into the Dcache purging code. 194 */ 195 mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */ 196 /* Fall through */ 197 198EENTRY(arm9_dcache_wbinv_all) 199.Larm9_dcache_wbinv_all: 200 ldr ip, .Larm9_cache_data 201 ldmia ip, {s_max, i_max, s_inc, i_inc} 202.Lnext_set_inv: 203 orr ip, s_max, i_max 204.Lnext_index_inv: 205 mcr p15, 0, ip, c7, c14, 2 /* Purge D cache SE with Set/Index */ 206 subs ip, ip, i_inc 207 bhs .Lnext_index_inv /* Next index */ 208 subs s_max, s_max, s_inc 209 bhs .Lnext_set_inv /* Next set */ 210 mov pc, lr 211EEND(arm9_dcache_wbinv_all) 212END(arm9_idcache_wbinv_all) 213 214.Larm9_cache_data: 215 .word _C_LABEL(arm9_dcache_sets_max) 216 217/* 218 * Context switch. 219 * 220 * These is the CPU-specific parts of the context switcher cpu_switch() 221 * These functions actually perform the TTB reload. 222 * 223 * NOTE: Special calling convention 224 * r1, r4-r13 must be preserved 225 */ 226ENTRY(arm9_context_switch) 227 /* 228 * We can assume that the caches will only contain kernel addresses 229 * at this point. So no need to flush them again. 230 */ 231 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ 232 mcr p15, 0, r0, c2, c0, 0 /* set the new TTB */ 233 mcr p15, 0, r0, c8, c7, 0 /* and flush the I+D tlbs */ 234 235 /* Paranoia -- make sure the pipeline is empty. */ 236 nop 237 nop 238 nop 239 mov pc, lr 240END(arm9_context_switch) 241 242 .bss 243 244/* XXX The following macros should probably be moved to asm.h */ 245#define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x: 246#define C_OBJECT(x) _DATA_OBJECT(_C_LABEL(x)) 247 248/* 249 * Parameters for the cache cleaning code. Note that the order of these 250 * four variables is assumed in the code above. Hence the reason for 251 * declaring them in the assembler file. 252 */ 253 .align 2 254C_OBJECT(arm9_dcache_sets_max) 255 .space 4 256C_OBJECT(arm9_dcache_index_max) 257 .space 4 258C_OBJECT(arm9_dcache_sets_inc) 259 .space 4 260C_OBJECT(arm9_dcache_index_inc) 261 .space 4 262