1#ifndef _ASM_POWERPC_TLBFLUSH_H 2#define _ASM_POWERPC_TLBFLUSH_H 3/* 4 * TLB flushing: 5 * 6 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 7 * - flush_tlb_page(vma, vmaddr) flushes one page 8 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB 9 * - flush_tlb_range(vma, start, end) flushes a range of pages 10 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 11 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License 15 * as published by the Free Software Foundation; either version 16 * 2 of the License, or (at your option) any later version. 17 */ 18#ifdef __KERNEL__ 19 20struct mm_struct; 21struct vm_area_struct; 22 23#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE) 24/* 25 * TLB flushing for software loaded TLB chips 26 * 27 * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & 28 * flush_tlb_kernel_range are best implemented as tlbia vs 29 * specific tlbie's 30 */ 31 32extern void _tlbie(unsigned long address); 33 34#if defined(CONFIG_40x) || defined(CONFIG_8xx) 35#define _tlbia() asm volatile ("tlbia; sync" : : : "memory") 36#else /* CONFIG_44x || CONFIG_FSL_BOOKE */ 37extern void _tlbia(void); 38#endif 39 40static inline void flush_tlb_mm(struct mm_struct *mm) 41{ 42 _tlbia(); 43} 44 45static inline void flush_tlb_page(struct vm_area_struct *vma, 46 unsigned long vmaddr) 47{ 48 _tlbie(vmaddr); 49} 50 51static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, 52 unsigned long vmaddr) 53{ 54 _tlbie(vmaddr); 55} 56 57static inline void flush_tlb_range(struct vm_area_struct *vma, 58 unsigned long start, unsigned long end) 59{ 60 _tlbia(); 61} 62 63static inline void flush_tlb_kernel_range(unsigned long start, 64 unsigned long end) 65{ 66 _tlbia(); 67} 68 69#elif defined(CONFIG_PPC32) 70/* 71 * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx 72 */ 73extern void _tlbie(unsigned long address); 74extern void _tlbia(void); 75 76extern void flush_tlb_mm(struct mm_struct *mm); 77extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 78extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); 79extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 80 unsigned long end); 81extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 82 83#else 84/* 85 * TLB flushing for 64-bit has-MMU CPUs 86 */ 87 88#include <linux/percpu.h> 89#include <asm/page.h> 90 91#define PPC64_TLB_BATCH_NR 192 92 93struct ppc64_tlb_batch { 94 int active; 95 unsigned long index; 96 struct mm_struct *mm; 97 real_pte_t pte[PPC64_TLB_BATCH_NR]; 98 unsigned long vaddr[PPC64_TLB_BATCH_NR]; 99 unsigned int psize; 100}; 101DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); 102 103extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); 104 105extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 106 pte_t *ptep, unsigned long pte, int huge); 107 108#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 109 110static inline void arch_enter_lazy_mmu_mode(void) 111{ 112 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 113 114 batch->active = 1; 115} 116 117static inline void arch_leave_lazy_mmu_mode(void) 118{ 119 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 120 121 if (batch->index) 122 __flush_tlb_pending(batch); 123 batch->active = 0; 124} 125 126#define arch_flush_lazy_mmu_mode() do {} while (0) 127 128 129extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, 130 int local); 131extern void flush_hash_range(unsigned long number, int local); 132 133 134static inline void flush_tlb_mm(struct mm_struct *mm) 135{ 136} 137 138static inline void flush_tlb_page(struct vm_area_struct *vma, 139 unsigned long vmaddr) 140{ 141} 142 143static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, 144 unsigned long vmaddr) 145{ 146} 147 148static inline void flush_tlb_range(struct vm_area_struct *vma, 149 unsigned long start, unsigned long end) 150{ 151} 152 153static inline void flush_tlb_kernel_range(unsigned long start, 154 unsigned long end) 155{ 156} 157 158#endif 159 160/* 161 * This gets called at the end of handling a page fault, when 162 * the kernel has put a new PTE into the page table for the process. 163 * We use it to ensure coherency between the i-cache and d-cache 164 * for the page which has just been mapped in. 165 * On machines which use an MMU hash table, we use this to put a 166 * corresponding HPTE into the hash table ahead of time, instead of 167 * waiting for the inevitable extra hash-table miss exception. 168 */ 169extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); 170 171/* 172 * This is called in munmap when we have freed up some page-table 173 * pages. We don't need to do anything here, there's nothing special 174 * about our page-table pages. -- paulus 175 */ 176static inline void flush_tlb_pgtables(struct mm_struct *mm, 177 unsigned long start, unsigned long end) 178{ 179} 180 181#endif /*__KERNEL__ */ 182#endif /* _ASM_POWERPC_TLBFLUSH_H */ 183