1/* $NetBSD: cpufunc_asm_armv6.S,v 1.4 2010/12/10 02:06:22 bsh Exp $ */ 2 3/* 4 * Copyright (c) 2002, 2005 ARM Limited 5 * Portions Copyright (c) 2007 Microsoft 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. The name of the company may not be used to endorse or promote 17 * products derived from this software without specific prior written 18 * permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 21 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 24 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * ARMv6 assembly functions for manipulating caches. 33 * These routines can be used by any core that supports the mcrr address 34 * range operations. 35 */ 36 37/* 38 * $FreeBSD$ 39 */ 40 41#include <machine/asm.h> 42 43 .arch armv6 44 45/* 46 * Functions to set the MMU Translation Table Base register 47 * 48 * We need to clean and flush the cache as it uses virtual 49 * addresses that are about to change. 50 */ 51ENTRY(armv6_setttb) 52 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ 53 54 mcr p15, 0, r0, c2, c0, 0 /* load new TTB */ 55 56 mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */ 57 RET 58END(armv6_setttb) 59 60/* 61 * Cache operations. 62 */ 63 64/* LINTSTUB: void armv6_dcache_wb_range(vaddr_t, vsize_t); */ 65ENTRY(armv6_dcache_wb_range) 66 add r1, r1, r0 67 sub r1, r1, #1 68 mcrr p15, 0, r1, r0, c12 /* clean D cache range */ 69 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ 70 RET 71END(armv6_dcache_wb_range) 72 73/* LINTSTUB: void armv6_dcache_wbinv_range(vaddr_t, vsize_t); */ 74ENTRY(armv6_dcache_wbinv_range) 75 add r1, r1, r0 76 sub r1, r1, #1 77 mcrr p15, 0, r1, r0, c14 /* clean and invaliate D cache range */ 78 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ 79 RET 80END(armv6_dcache_wbinv_range) 81 82/* 83 * Note, we must not invalidate everything. If the range is too big we 84 * must use wb-inv of the entire cache. 85 * 86 * LINTSTUB: void armv6_dcache_inv_range(vaddr_t, vsize_t); 87 */ 88ENTRY(armv6_dcache_inv_range) 89 add r1, r1, r0 90 sub r1, r1, #1 91 mcrr p15, 0, r1, r0, c6 /* invaliate D cache range */ 92 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ 93 RET 94END(armv6_dcache_inv_range) 95 96/* LINTSTUB: void armv6_idcache_wbinv_all(void); */ 97ENTRY_NP(armv6_idcache_wbinv_all) 98 /* 99 * We assume that the code here can never be out of sync with the 100 * dcache, so that we can safely flush the Icache and fall through 101 * into the Dcache purging code. 102 */ 103 mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */ 104 105 /* Purge Dcache. */ 106 mcr p15, 0, r0, c7, c14, 0 /* clean & invalidate D cache */ 107 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ 108 RET 109END(armv6_idcache_wbinv_all) 110 111ENTRY(armv6_idcache_inv_all) 112 mov r0, #0 113 mcr p15, 0, r0, c7, c7, 0 /* invalidate all I+D cache */ 114 RET 115END(armv6_idcache_inv_all) 116 117