1#ifndef _PARISC_TLBFLUSH_H
2#define _PARISC_TLBFLUSH_H
3
4/* TLB flushing routines.... */
5
6#include <linux/mm.h>
7#include <linux/sched.h>
8#include <asm/mmu_context.h>
9
10
11/* This is for the serialisation of PxTLB broadcasts.  At least on the
12 * N class systems, only one PxTLB inter processor broadcast can be
13 * active at any one time on the Merced bus.  This tlb purge
14 * synchronisation is fairly lightweight and harmless so we activate
15 * it on all SMP systems not just the N class.  We also need to have
16 * preemption disabled on uniprocessor machines, and spin_lock does that
17 * nicely.
18 */
19extern spinlock_t pa_tlb_lock;
20
21#define purge_tlb_start(x) spin_lock(&pa_tlb_lock)
22#define purge_tlb_end(x) spin_unlock(&pa_tlb_lock)
23
24extern void flush_tlb_all(void);
25extern void flush_tlb_all_local(void *);
26
27
28static inline void flush_tlb_mm(struct mm_struct *mm)
29{
30	BUG_ON(mm == &init_mm); /* Should never happen */
31
32#ifdef CONFIG_SMP
33	flush_tlb_all();
34#else
35	if (mm) {
36		if (mm->context != 0)
37			free_sid(mm->context);
38		mm->context = alloc_sid();
39		if (mm == current->active_mm)
40			load_context(mm->context);
41	}
42#endif
43}
44
45extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
46{
47}
48
49static inline void flush_tlb_page(struct vm_area_struct *vma,
50	unsigned long addr)
51{
52	/* For one page, it's not worth testing the split_tlb variable */
53
54	mb();
55	mtsp(vma->vm_mm->context,1);
56	purge_tlb_start();
57	pdtlb(addr);
58	pitlb(addr);
59	purge_tlb_end();
60}
61
62void __flush_tlb_range(unsigned long sid,
63	unsigned long start, unsigned long end);
64
65#define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end)
66
67#define flush_tlb_kernel_range(start, end) __flush_tlb_range(0,start,end)
68
69#endif
70