1/* 2 * linux/arch/arm/mm/cache-v3.S 3 * 4 * Copyright (C) 1997-2002 Russell king 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10#include <linux/linkage.h> 11#include <linux/init.h> 12#include <asm/page.h> 13#include "proc-macros.S" 14 15/* 16 * flush_user_cache_all() 17 * 18 * Invalidate all cache entries in a particular address 19 * space. 20 * 21 * - mm - mm_struct describing address space 22 */ 23ENTRY(v3_flush_user_cache_all) 24 /* FALLTHROUGH */ 25/* 26 * flush_kern_cache_all() 27 * 28 * Clean and invalidate the entire cache. 29 */ 30ENTRY(v3_flush_kern_cache_all) 31 /* FALLTHROUGH */ 32 33/* 34 * flush_user_cache_range(start, end, flags) 35 * 36 * Invalidate a range of cache entries in the specified 37 * address space. 38 * 39 * - start - start address (may not be aligned) 40 * - end - end address (exclusive, may not be aligned) 41 * - flags - vma_area_struct flags describing address space 42 */ 43ENTRY(v3_flush_user_cache_range) 44 mov ip, #0 45 mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache 46 mov pc, lr 47 48/* 49 * coherent_kern_range(start, end) 50 * 51 * Ensure coherency between the Icache and the Dcache in the 52 * region described by start. If you have non-snooping 53 * Harvard caches, you need to implement this function. 54 * 55 * - start - virtual start address 56 * - end - virtual end address 57 */ 58ENTRY(v3_coherent_kern_range) 59 /* FALLTHROUGH */ 60 61/* 62 * coherent_user_range(start, end) 63 * 64 * Ensure coherency between the Icache and the Dcache in the 65 * region described by start. If you have non-snooping 66 * Harvard caches, you need to implement this function. 67 * 68 * - start - virtual start address 69 * - end - virtual end address 70 */ 71ENTRY(v3_coherent_user_range) 72 mov pc, lr 73 74/* 75 * flush_kern_dcache_area(void *page, size_t size) 76 * 77 * Ensure no D cache aliasing occurs, either with itself or 78 * the I cache 79 * 80 * - addr - kernel address 81 * - size - region size 82 */ 83ENTRY(v3_flush_kern_dcache_area) 84 /* FALLTHROUGH */ 85 86/* 87 * dma_flush_range(start, end) 88 * 89 * Clean and invalidate the specified virtual address range. 90 * 91 * - start - virtual start address 92 * - end - virtual end address 93 */ 94ENTRY(v3_dma_flush_range) 95 mov r0, #0 96 mcr p15, 0, r0, c7, c0, 0 @ flush ID cache 97 mov pc, lr 98 99/* 100 * dma_unmap_area(start, size, dir) 101 * - start - kernel virtual start address 102 * - size - size of region 103 * - dir - DMA direction 104 */ 105ENTRY(v3_dma_unmap_area) 106 teq r2, #DMA_TO_DEVICE 107 bne v3_dma_flush_range 108 /* FALLTHROUGH */ 109 110/* 111 * dma_map_area(start, size, dir) 112 * - start - kernel virtual start address 113 * - size - size of region 114 * - dir - DMA direction 115 */ 116ENTRY(v3_dma_map_area) 117 mov pc, lr 118ENDPROC(v3_dma_unmap_area) 119ENDPROC(v3_dma_map_area) 120 121 __INITDATA 122 123 .type v3_cache_fns, #object 124ENTRY(v3_cache_fns) 125 .long v3_flush_kern_cache_all 126 .long v3_flush_user_cache_all 127 .long v3_flush_user_cache_range 128 .long v3_coherent_kern_range 129 .long v3_coherent_user_range 130 .long v3_flush_kern_dcache_area 131 .long v3_dma_map_area 132 .long v3_dma_unmap_area 133 .long v3_dma_flush_range 134 .size v3_cache_fns, . - v3_cache_fns 135