1#ifndef _S390_TLB_H 2#define _S390_TLB_H 3 4 5#include <linux/mm.h> 6#include <linux/swap.h> 7#include <asm/processor.h> 8#include <asm/pgalloc.h> 9#include <asm/smp.h> 10#include <asm/tlbflush.h> 11 12#ifndef CONFIG_SMP 13#define TLB_NR_PTRS 1 14#else 15#define TLB_NR_PTRS 508 16#endif 17 18struct mmu_gather { 19 struct mm_struct *mm; 20 unsigned int fullmm; 21 unsigned int nr_ptes; 22 unsigned int nr_pxds; 23 void *array[TLB_NR_PTRS]; 24}; 25 26DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); 27 28static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, 29 unsigned int full_mm_flush) 30{ 31 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); 32 33 tlb->mm = mm; 34 tlb->fullmm = full_mm_flush; 35 tlb->nr_ptes = 0; 36 tlb->nr_pxds = TLB_NR_PTRS; 37 if (tlb->fullmm) 38 __tlb_flush_mm(mm); 39 return tlb; 40} 41 42static inline void tlb_flush_mmu(struct mmu_gather *tlb, 43 unsigned long start, unsigned long end) 44{ 45 if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < TLB_NR_PTRS)) 46 __tlb_flush_mm(tlb->mm); 47 while (tlb->nr_ptes > 0) 48 pte_free(tlb->mm, tlb->array[--tlb->nr_ptes]); 49 while (tlb->nr_pxds < TLB_NR_PTRS) 50 /* pgd_free frees the pointer as region or segment table */ 51 pgd_free(tlb->mm, tlb->array[tlb->nr_pxds++]); 52} 53 54static inline void tlb_finish_mmu(struct mmu_gather *tlb, 55 unsigned long start, unsigned long end) 56{ 57 tlb_flush_mmu(tlb, start, end); 58 59 /* keep the page table cache within bounds */ 60 check_pgt_cache(); 61 62 put_cpu_var(mmu_gathers); 63} 64 65/* 66 * Release the page cache reference for a pte removed by 67 * tlb_ptep_clear_flush. In both flush modes the tlb fo a page cache page 68 * has already been freed, so just do free_page_and_swap_cache. 69 */ 70static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) 71{ 72 free_page_and_swap_cache(page); 73} 74 75/* 76 * pte_free_tlb frees a pte table and clears the CRSTE for the 77 * page table from the tlb. 78 */ 79static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 80 unsigned long address) 81{ 82 if (!tlb->fullmm) { 83 tlb->array[tlb->nr_ptes++] = pte; 84 if (tlb->nr_ptes >= tlb->nr_pxds) 85 tlb_flush_mmu(tlb, 0, 0); 86 } else 87 pte_free(tlb->mm, pte); 88} 89 90/* 91 * pmd_free_tlb frees a pmd table and clears the CRSTE for the 92 * segment table entry from the tlb. 93 * If the mm uses a two level page table the single pmd is freed 94 * as the pgd. pmd_free_tlb checks the asce_limit against 2GB 95 * to avoid the double free of the pmd in this case. 96 */ 97static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, 98 unsigned long address) 99{ 100#ifdef __s390x__ 101 if (tlb->mm->context.asce_limit <= (1UL << 31)) 102 return; 103 if (!tlb->fullmm) { 104 tlb->array[--tlb->nr_pxds] = pmd; 105 if (tlb->nr_ptes >= tlb->nr_pxds) 106 tlb_flush_mmu(tlb, 0, 0); 107 } else 108 pmd_free(tlb->mm, pmd); 109#endif 110} 111 112/* 113 * pud_free_tlb frees a pud table and clears the CRSTE for the 114 * region third table entry from the tlb. 115 * If the mm uses a three level page table the single pud is freed 116 * as the pgd. pud_free_tlb checks the asce_limit against 4TB 117 * to avoid the double free of the pud in this case. 118 */ 119static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, 120 unsigned long address) 121{ 122#ifdef __s390x__ 123 if (tlb->mm->context.asce_limit <= (1UL << 42)) 124 return; 125 if (!tlb->fullmm) { 126 tlb->array[--tlb->nr_pxds] = pud; 127 if (tlb->nr_ptes >= tlb->nr_pxds) 128 tlb_flush_mmu(tlb, 0, 0); 129 } else 130 pud_free(tlb->mm, pud); 131#endif 132} 133 134#define tlb_start_vma(tlb, vma) do { } while (0) 135#define tlb_end_vma(tlb, vma) do { } while (0) 136#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0) 137#define tlb_migrate_finish(mm) do { } while (0) 138 139#endif /* _S390_TLB_H */ 140