1 2#ifndef M68K_PGALLOC_H 3#define M68K_PGALLOC_H 4 5#include <linux/config.h> 6#include <asm/setup.h> 7#include <asm/virtconvert.h> 8 9/* 10 * Cache handling functions 11 */ 12 13#define flush_icache() \ 14({ \ 15 if (CPU_IS_040_OR_060) \ 16 __asm__ __volatile__("nop\n\t" \ 17 ".chip 68040\n\t" \ 18 "cinva %%ic\n\t" \ 19 ".chip 68k" : ); \ 20 else { \ 21 unsigned long _tmp; \ 22 __asm__ __volatile__("movec %%cacr,%0\n\t" \ 23 "orw %1,%0\n\t" \ 24 "movec %0,%%cacr" \ 25 : "=&d" (_tmp) \ 26 : "id" (FLUSH_I)); \ 27 } \ 28}) 29 30/* 31 * invalidate the cache for the specified memory range. 32 * It starts at the physical address specified for 33 * the given number of bytes. 34 */ 35extern void cache_clear(unsigned long paddr, int len); 36/* 37 * push any dirty cache in the specified memory range. 38 * It starts at the physical address specified for 39 * the given number of bytes. 40 */ 41extern void cache_push(unsigned long paddr, int len); 42 43/* 44 * push and invalidate pages in the specified user virtual 45 * memory range. 46 */ 47extern void cache_push_v(unsigned long vaddr, int len); 48 49/* cache code */ 50#define FLUSH_I_AND_D (0x00000808) 51#define FLUSH_I (0x00000008) 52 53/* This is needed whenever the virtual mapping of the current 54 process changes. */ 55#define __flush_cache_all() \ 56({ \ 57 if (CPU_IS_040_OR_060) \ 58 __asm__ __volatile__("nop\n\t" \ 59 ".chip 68040\n\t" \ 60 "cpusha %dc\n\t" \ 61 ".chip 68k"); \ 62 else { \ 63 unsigned long _tmp; \ 64 __asm__ __volatile__("movec %%cacr,%0\n\t" \ 65 "orw %1,%0\n\t" \ 66 "movec %0,%%cacr" \ 67 : "=&d" (_tmp) \ 68 : "di" (FLUSH_I_AND_D)); \ 69 } \ 70}) 71 72#define __flush_cache_030() \ 73({ \ 74 if (CPU_IS_020_OR_030) { \ 75 unsigned long _tmp; \ 76 __asm__ __volatile__("movec %%cacr,%0\n\t" \ 77 "orw %1,%0\n\t" \ 78 "movec %0,%%cacr" \ 79 : "=&d" (_tmp) \ 80 : "di" (FLUSH_I_AND_D)); \ 81 } \ 82}) 83 84#define flush_cache_all() __flush_cache_all() 85 86extern inline void flush_cache_mm(struct mm_struct *mm) 87{ 88 if (mm == current->mm) 89 __flush_cache_030(); 90} 91 92extern inline void flush_cache_range(struct mm_struct *mm, 93 unsigned long start, 94 unsigned long end) 95{ 96 if (mm == current->mm) 97 __flush_cache_030(); 98} 99 100extern inline void flush_cache_page(struct vm_area_struct *vma, 101 unsigned long vmaddr) 102{ 103 if (vma->vm_mm == current->mm) 104 __flush_cache_030(); 105} 106 107/* Push the page at kernel virtual address and clear the icache */ 108/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ 109#define flush_page_to_ram(page) __flush_page_to_ram((unsigned long) page_address(page)) 110extern inline void __flush_page_to_ram(unsigned long address) 111{ 112 if (CPU_IS_040_OR_060) { 113 __asm__ __volatile__("nop\n\t" 114 ".chip 68040\n\t" 115 "cpushp %%bc,(%0)\n\t" 116 ".chip 68k" 117 : : "a" (__pa((void *)address))); 118 } else { 119 unsigned long _tmp; 120 __asm__ __volatile__("movec %%cacr,%0\n\t" 121 "orw %1,%0\n\t" 122 "movec %0,%%cacr" 123 : "=&d" (_tmp) 124 : "di" (FLUSH_I)); 125 } 126} 127 128#define flush_dcache_page(page) do { } while (0) 129#define flush_icache_page(vma,pg) do { } while (0) 130#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) 131 132/* Push n pages at kernel virtual address and clear the icache */ 133/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ 134extern inline void flush_icache_range (unsigned long address, 135 unsigned long endaddr) 136{ 137 if (CPU_IS_040_OR_060) { 138 short n = (endaddr - address + PAGE_SIZE - 1) / PAGE_SIZE; 139 140 while (--n >= 0) { 141 __asm__ __volatile__("nop\n\t" 142 ".chip 68040\n\t" 143 "cpushp %%bc,(%0)\n\t" 144 ".chip 68k" 145 : : "a" (virt_to_phys((void *)address))); 146 address += PAGE_SIZE; 147 } 148 } else { 149 unsigned long tmp; 150 __asm__ __volatile__("movec %%cacr,%0\n\t" 151 "orw %1,%0\n\t" 152 "movec %0,%%cacr" 153 : "=&d" (tmp) 154 : "di" (FLUSH_I)); 155 } 156} 157 158 159 160 161#ifdef CONFIG_SUN3 162#include <asm/sun3_pgalloc.h> 163#else 164#include <asm/motorola_pgalloc.h> 165#endif 166 167#endif /* M68K_PGALLOC_H */ 168