1#ifndef __ASMARM_TLB_H
2#define __ASMARM_TLB_H
3
4#include <asm/pgalloc.h>
5#include <asm/tlbflush.h>
6
7/*
8 * TLB handling.  This allows us to remove pages from the page
9 * tables, and efficiently handle the TLB issues.
10 */
11struct mmu_gather {
12        struct mm_struct        *mm;
13        unsigned int            need_flush;
14        unsigned int            fullmm;
15};
16
17DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
18
19static inline struct mmu_gather *
20tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
21{
22        struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
23
24        tlb->mm = mm;
25        tlb->need_flush = 0;
26        tlb->fullmm = full_mm_flush;
27
28        return tlb;
29}
30
31static inline void
32tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
33{
34        if (tlb->need_flush)
35                flush_tlb_mm(tlb->mm);
36
37        /* keep the page table cache within bounds */
38        check_pgt_cache();
39
40        put_cpu_var(mmu_gathers);
41}
42
43#define tlb_remove_tlb_entry(tlb,ptep,address)  do { } while (0)
44//#define tlb_start_vma(tlb,vma)                  do { } while (0)
45#define tlb_start_vma(tlb,vma)                                          \
46        do {                                                            \
47                if (!tlb->fullmm)                                       \
48                        flush_cache_range(vma, vma->vm_start, vma->vm_end); \
49        } while (0)
50#define tlb_end_vma(tlb,vma)                    do { } while (0)
51
52static inline void
53tlb_remove_page(struct mmu_gather *tlb, struct page *page)
54{
55        tlb->need_flush = 1;
56        free_page_and_swap_cache(page);
57}
58
59#define pte_free_tlb(tlb,ptep)          pte_free(ptep)
60#define pmd_free_tlb(tlb,pmdp)          pmd_free(pmdp)
61
62#endif
63