1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/mm.h>
4#include <linux/smp.h>
5#include <linux/sched.h>
6#include <linux/hugetlb.h>
7#include <asm/sbi.h>
8#include <asm/mmu_context.h>
9
10static inline void local_flush_tlb_all_asid(unsigned long asid)
11{
12	if (asid != FLUSH_TLB_NO_ASID)
13		__asm__ __volatile__ ("sfence.vma x0, %0"
14				:
15				: "r" (asid)
16				: "memory");
17	else
18		local_flush_tlb_all();
19}
20
21static inline void local_flush_tlb_page_asid(unsigned long addr,
22		unsigned long asid)
23{
24	if (asid != FLUSH_TLB_NO_ASID)
25		__asm__ __volatile__ ("sfence.vma %0, %1"
26				:
27				: "r" (addr), "r" (asid)
28				: "memory");
29	else
30		local_flush_tlb_page(addr);
31}
32
33/*
34 * Flush entire TLB if number of entries to be flushed is greater
35 * than the threshold below.
36 */
37static unsigned long tlb_flush_all_threshold __read_mostly = 64;
38
39static void local_flush_tlb_range_threshold_asid(unsigned long start,
40						 unsigned long size,
41						 unsigned long stride,
42						 unsigned long asid)
43{
44	unsigned long nr_ptes_in_range = DIV_ROUND_UP(size, stride);
45	int i;
46
47	if (nr_ptes_in_range > tlb_flush_all_threshold) {
48		local_flush_tlb_all_asid(asid);
49		return;
50	}
51
52	for (i = 0; i < nr_ptes_in_range; ++i) {
53		local_flush_tlb_page_asid(start, asid);
54		start += stride;
55	}
56}
57
58static inline void local_flush_tlb_range_asid(unsigned long start,
59		unsigned long size, unsigned long stride, unsigned long asid)
60{
61	if (size <= stride)
62		local_flush_tlb_page_asid(start, asid);
63	else if (size == FLUSH_TLB_MAX_SIZE)
64		local_flush_tlb_all_asid(asid);
65	else
66		local_flush_tlb_range_threshold_asid(start, size, stride, asid);
67}
68
69/* Flush a range of kernel pages without broadcasting */
70void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
71{
72	local_flush_tlb_range_asid(start, end - start, PAGE_SIZE, FLUSH_TLB_NO_ASID);
73}
74
75static void __ipi_flush_tlb_all(void *info)
76{
77	local_flush_tlb_all();
78}
79
80void flush_tlb_all(void)
81{
82	if (riscv_use_ipi_for_rfence())
83		on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
84	else
85		sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
86}
87
88struct flush_tlb_range_data {
89	unsigned long asid;
90	unsigned long start;
91	unsigned long size;
92	unsigned long stride;
93};
94
95static void __ipi_flush_tlb_range_asid(void *info)
96{
97	struct flush_tlb_range_data *d = info;
98
99	local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
100}
101
102static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid,
103			      unsigned long start, unsigned long size,
104			      unsigned long stride)
105{
106	struct flush_tlb_range_data ftd;
107	bool broadcast;
108
109	if (cpumask_empty(cmask))
110		return;
111
112	if (cmask != cpu_online_mask) {
113		unsigned int cpuid;
114
115		cpuid = get_cpu();
116		/* check if the tlbflush needs to be sent to other CPUs */
117		broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
118	} else {
119		broadcast = true;
120	}
121
122	if (broadcast) {
123		if (riscv_use_ipi_for_rfence()) {
124			ftd.asid = asid;
125			ftd.start = start;
126			ftd.size = size;
127			ftd.stride = stride;
128			on_each_cpu_mask(cmask,
129					 __ipi_flush_tlb_range_asid,
130					 &ftd, 1);
131		} else
132			sbi_remote_sfence_vma_asid(cmask,
133						   start, size, asid);
134	} else {
135		local_flush_tlb_range_asid(start, size, stride, asid);
136	}
137
138	if (cmask != cpu_online_mask)
139		put_cpu();
140}
141
142static inline unsigned long get_mm_asid(struct mm_struct *mm)
143{
144	return static_branch_unlikely(&use_asid_allocator) ?
145			atomic_long_read(&mm->context.id) & asid_mask : FLUSH_TLB_NO_ASID;
146}
147
148void flush_tlb_mm(struct mm_struct *mm)
149{
150	__flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
151			  0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
152}
153
154void flush_tlb_mm_range(struct mm_struct *mm,
155			unsigned long start, unsigned long end,
156			unsigned int page_size)
157{
158	__flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
159			  start, end - start, page_size);
160}
161
162void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
163{
164	__flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
165			  addr, PAGE_SIZE, PAGE_SIZE);
166}
167
168void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
169		     unsigned long end)
170{
171	unsigned long stride_size;
172
173	if (!is_vm_hugetlb_page(vma)) {
174		stride_size = PAGE_SIZE;
175	} else {
176		stride_size = huge_page_size(hstate_vma(vma));
177
178		/*
179		 * As stated in the privileged specification, every PTE in a
180		 * NAPOT region must be invalidated, so reset the stride in that
181		 * case.
182		 */
183		if (has_svnapot()) {
184			if (stride_size >= PGDIR_SIZE)
185				stride_size = PGDIR_SIZE;
186			else if (stride_size >= P4D_SIZE)
187				stride_size = P4D_SIZE;
188			else if (stride_size >= PUD_SIZE)
189				stride_size = PUD_SIZE;
190			else if (stride_size >= PMD_SIZE)
191				stride_size = PMD_SIZE;
192			else
193				stride_size = PAGE_SIZE;
194		}
195	}
196
197	__flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
198			  start, end - start, stride_size);
199}
200
201void flush_tlb_kernel_range(unsigned long start, unsigned long end)
202{
203	__flush_tlb_range(cpu_online_mask, FLUSH_TLB_NO_ASID,
204			  start, end - start, PAGE_SIZE);
205}
206
207#ifdef CONFIG_TRANSPARENT_HUGEPAGE
208void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
209			unsigned long end)
210{
211	__flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
212			  start, end - start, PMD_SIZE);
213}
214#endif
215
216bool arch_tlbbatch_should_defer(struct mm_struct *mm)
217{
218	return true;
219}
220
221void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
222			       struct mm_struct *mm,
223			       unsigned long uaddr)
224{
225	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
226}
227
228void arch_flush_tlb_batched_pending(struct mm_struct *mm)
229{
230	flush_tlb_mm(mm);
231}
232
233void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
234{
235	__flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0,
236			  FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
237	cpumask_clear(&batch->cpumask);
238}
239