1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ALPHA_TLBFLUSH_H
3#define _ALPHA_TLBFLUSH_H
4
5#include <linux/mm.h>
6#include <linux/sched.h>
7#include <asm/compiler.h>
8
9#ifndef __EXTERN_INLINE
10#define __EXTERN_INLINE extern inline
11#define __MMU_EXTERN_INLINE
12#endif
13
14extern void __load_new_mm_context(struct mm_struct *);
15
16
17/* Use a few helper functions to hide the ugly broken ASN
18   numbers on early Alphas (ev4 and ev45).  */
19
20__EXTERN_INLINE void
21ev4_flush_tlb_current(struct mm_struct *mm)
22{
23	__load_new_mm_context(mm);
24	tbiap();
25}
26
27__EXTERN_INLINE void
28ev5_flush_tlb_current(struct mm_struct *mm)
29{
30	__load_new_mm_context(mm);
31}
32
33/* Flush just one page in the current TLB set.  We need to be very
34   careful about the icache here, there is no way to invalidate a
35   specific icache page.  */
36
37__EXTERN_INLINE void
38ev4_flush_tlb_current_page(struct mm_struct * mm,
39			   struct vm_area_struct *vma,
40			   unsigned long addr)
41{
42	int tbi_flag = 2;
43	if (vma->vm_flags & VM_EXEC) {
44		__load_new_mm_context(mm);
45		tbi_flag = 3;
46	}
47	tbi(tbi_flag, addr);
48}
49
50__EXTERN_INLINE void
51ev5_flush_tlb_current_page(struct mm_struct * mm,
52			   struct vm_area_struct *vma,
53			   unsigned long addr)
54{
55	if (vma->vm_flags & VM_EXEC)
56		__load_new_mm_context(mm);
57	else
58		tbi(2, addr);
59}
60
61
62#ifdef CONFIG_ALPHA_GENERIC
63# define flush_tlb_current		alpha_mv.mv_flush_tlb_current
64# define flush_tlb_current_page		alpha_mv.mv_flush_tlb_current_page
65#else
66# ifdef CONFIG_ALPHA_EV4
67#  define flush_tlb_current		ev4_flush_tlb_current
68#  define flush_tlb_current_page	ev4_flush_tlb_current_page
69# else
70#  define flush_tlb_current		ev5_flush_tlb_current
71#  define flush_tlb_current_page	ev5_flush_tlb_current_page
72# endif
73#endif
74
75#ifdef __MMU_EXTERN_INLINE
76#undef __EXTERN_INLINE
77#undef __MMU_EXTERN_INLINE
78#endif
79
80/* Flush current user mapping.  */
81static inline void
82flush_tlb(void)
83{
84	flush_tlb_current(current->active_mm);
85}
86
87/* Flush someone else's user mapping.  */
88static inline void
89flush_tlb_other(struct mm_struct *mm)
90{
91	unsigned long *mmc = &mm->context[smp_processor_id()];
92	/* Check it's not zero first to avoid cacheline ping pong
93	   when possible.  */
94	if (*mmc) *mmc = 0;
95}
96
97#ifndef CONFIG_SMP
98/* Flush everything (kernel mapping may also have changed
99   due to vmalloc/vfree).  */
100static inline void flush_tlb_all(void)
101{
102	tbia();
103}
104
105/* Flush a specified user mapping.  */
106static inline void
107flush_tlb_mm(struct mm_struct *mm)
108{
109	if (mm == current->active_mm)
110		flush_tlb_current(mm);
111	else
112		flush_tlb_other(mm);
113}
114
115/* Page-granular tlb flush.  */
116static inline void
117flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
118{
119	struct mm_struct *mm = vma->vm_mm;
120
121	if (mm == current->active_mm)
122		flush_tlb_current_page(mm, vma, addr);
123	else
124		flush_tlb_other(mm);
125}
126
127/* Flush a specified range of user mapping.  On the Alpha we flush
128   the whole user tlb.  */
129static inline void
130flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
131		unsigned long end)
132{
133	flush_tlb_mm(vma->vm_mm);
134}
135
136#else /* CONFIG_SMP */
137
138extern void flush_tlb_all(void);
139extern void flush_tlb_mm(struct mm_struct *);
140extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
141extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
142			    unsigned long);
143
144#endif /* CONFIG_SMP */
145
146static inline void flush_tlb_kernel_range(unsigned long start,
147					unsigned long end)
148{
149	flush_tlb_all();
150}
151
152#endif /* _ALPHA_TLBFLUSH_H */
153