1/*	$NetBSD: pmap_tlb.c,v 1.62 2024/01/01 16:56:30 skrll Exp $	*/
2
3/*-
4 * Copyright (c) 2010 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas at 3am Software Foundry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33
34__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.62 2024/01/01 16:56:30 skrll Exp $");
35
36/*
37 * Manages address spaces in a TLB.
38 *
39 * Normally there is a 1:1 mapping between a TLB and a CPU.  However, some
40 * implementations may share a TLB between multiple CPUs (really CPU thread
41 * contexts).  This requires the TLB abstraction to be separated from the
42 * CPU abstraction.  It also requires that the TLB be locked while doing
43 * TLB activities.
44 *
45 * For each TLB, we track the ASIDs in use in a bitmap and a list of pmaps
46 * that have a valid ASID.
47 *
48 * We allocate ASIDs in increasing order until we have exhausted the supply,
49 * then reinitialize the ASID space, and start allocating again at 1.  When
50 * allocating from the ASID bitmap, we skip any ASID who has a corresponding
51 * bit set in the ASID bitmap.  Eventually this causes the ASID bitmap to fill
52 * and, when completely filled, a reinitialization of the ASID space.
53 *
54 * To reinitialize the ASID space, the ASID bitmap is reset and then the ASIDs
55 * of non-kernel TLB entries get recorded in the ASID bitmap.  If the entries
56 * in TLB consume more than half of the ASID space, all ASIDs are invalidated,
57 * the ASID bitmap is recleared, and the list of pmaps is emptied.  Otherwise,
58 * (the normal case), any ASID present in the TLB (even those which are no
59 * longer used by a pmap) will remain active (allocated) and all other ASIDs
60 * will be freed.  If the size of the TLB is much smaller than the ASID space,
61 * this algorithm completely avoids TLB invalidation.
62 *
63 * For multiprocessors, we also have to deal TLB invalidation requests from
64 * other CPUs, some of which are dealt with the reinitialization of the ASID
65 * space.  Whereas above we keep the ASIDs of those pmaps which have active
66 * TLB entries, this type of reinitialization preserves the ASIDs of any
67 * "onproc" user pmap and all other ASIDs will be freed.  We must do this
68 * since we can't change the current ASID.
69 *
70 * Each pmap has two bitmaps: pm_active and pm_onproc.  Each bit in pm_active
71 * indicates whether that pmap has an allocated ASID for a CPU.  Each bit in
72 * pm_onproc indicates that the pmap's ASID is in use, i.e. a CPU has it in its
73 * "current ASID" field, e.g. the ASID field of the COP 0 register EntryHi for
74 * MIPS, or the ASID field of TTBR0 for AA64.  The bit number used in these
75 * bitmaps comes from the CPU's cpu_index().  Even though these bitmaps contain
76 * the bits for all CPUs, the bits that correspond to the bits belonging to
77 * the CPUs sharing a TLB can only be manipulated while holding that TLB's
78 * lock.  Atomic ops must be used to update them since multiple CPUs may be
79 * changing different sets of bits at same time but these sets never overlap.
80 *
81 * When a change to the local TLB may require a change in the TLB's of other
82 * CPUs, we try to avoid sending an IPI if at all possible.  For instance, if
83 * we are updating a PTE and that PTE previously was invalid and therefore
84 * couldn't support an active mapping, there's no need for an IPI since there
85 * can't be a TLB entry to invalidate.  The other case is when we change a PTE
86 * to be modified we just update the local TLB.  If another TLB has a stale
87 * entry, a TLB MOD exception will be raised and that will cause the local TLB
88 * to be updated.
89 *
90 * We never need to update a non-local TLB if the pmap doesn't have a valid
91 * ASID for that TLB.  If it does have a valid ASID but isn't current "onproc"
92 * we simply reset its ASID for that TLB and then when it goes "onproc" it
93 * will allocate a new ASID and any existing TLB entries will be orphaned.
94 * Only in the case that pmap has an "onproc" ASID do we actually have to send
95 * an IPI.
96 *
97 * Once we determined we must send an IPI to shootdown a TLB, we need to send
98 * it to one of CPUs that share that TLB.  We choose the lowest numbered CPU
99 * that has one of the pmap's ASID "onproc".  In reality, any CPU sharing that
100 * TLB would do, but interrupting an active CPU seems best.
101 *
102 * A TLB might have multiple shootdowns active concurrently.  The shootdown
103 * logic compresses these into a few cases:
104 *	0) nobody needs to have its TLB entries invalidated
105 *	1) one ASID needs to have its TLB entries invalidated
106 *	2) more than one ASID needs to have its TLB entries invalidated
107 *	3) the kernel needs to have its TLB entries invalidated
108 *	4) the kernel and one or more ASID need their TLB entries invalidated.
109 *
110 * And for each case we do:
111 *	0) nothing,
112 *	1) if that ASID is still "onproc", we invalidate the TLB entries for
113 *	   that single ASID.  If not, just reset the pmap's ASID to invalidate
114 *	   and let it allocate a new ASID the next time it goes "onproc",
115 *	2) we reinitialize the ASID space (preserving any "onproc" ASIDs) and
116 *	   invalidate all non-wired non-global TLB entries,
117 *	3) we invalidate all of the non-wired global TLB entries,
118 *	4) we reinitialize the ASID space (again preserving any "onproc" ASIDs)
119 *	   invalidate all non-wired TLB entries.
120 *
121 * As you can see, shootdowns are not concerned with addresses, just address
122 * spaces.  Since the number of TLB entries is usually quite small, this avoids
123 * a lot of overhead for not much gain.
124 */
125
126#define __PMAP_PRIVATE
127
128#include "opt_multiprocessor.h"
129
130#include <sys/param.h>
131
132#include <sys/atomic.h>
133#include <sys/cpu.h>
134#include <sys/kernel.h>			/* for cold */
135#include <sys/mutex.h>
136#include <sys/proc.h>
137#include <sys/systm.h>
138
139#include <uvm/uvm.h>
140
141static kmutex_t pmap_tlb0_lock __cacheline_aligned;
142
143#define	IFCONSTANT(x)	(__builtin_constant_p((x)) ? (x) : 0)
144
145#if KERNEL_PID > 31
146#error "KERNEL_PID expected in range 0-31"
147#endif
148
149#define	TLBINFO_ASID_MARK_UNUSED(ti, asid) \
150	__BITMAP_CLR((asid), &(ti)->ti_asid_bitmap)
151#define	TLBINFO_ASID_MARK_USED(ti, asid) \
152	__BITMAP_SET((asid), &(ti)->ti_asid_bitmap)
153#define	TLBINFO_ASID_INUSE_P(ti, asid) \
154	__BITMAP_ISSET((asid), &(ti)->ti_asid_bitmap)
155#define	TLBINFO_ASID_RESET(ti) \
156	do {								\
157		__BITMAP_ZERO(&ti->ti_asid_bitmap);			\
158		for (tlb_asid_t asid = 0; asid <= KERNEL_PID; asid++) 	\
159			TLBINFO_ASID_MARK_USED(ti, asid);	 	\
160	} while (0)
161#define	TLBINFO_ASID_INITIAL_FREE(asid_max) \
162	(asid_max + 1 /* 0 */ - (1 + KERNEL_PID))
163
164struct pmap_tlb_info pmap_tlb0_info = {
165	.ti_name = "tlb0",
166	.ti_asid_hint = KERNEL_PID + 1,
167#ifdef PMAP_TLB_NUM_PIDS
168	.ti_asid_max = IFCONSTANT(PMAP_TLB_NUM_PIDS - 1),
169	.ti_asids_free = IFCONSTANT(
170		TLBINFO_ASID_INITIAL_FREE(PMAP_TLB_NUM_PIDS - 1)),
171#endif
172	.ti_asid_bitmap._b[0] = __BITS(0, KERNEL_PID),
173#ifdef PMAP_TLB_WIRED_UPAGES
174	.ti_wired = PMAP_TLB_WIRED_UPAGES,
175#endif
176	.ti_lock = &pmap_tlb0_lock,
177	.ti_pais = LIST_HEAD_INITIALIZER(pmap_tlb0_info.ti_pais),
178#if defined(MULTIPROCESSOR) && PMAP_TLB_MAX > 1
179	.ti_tlbinvop = TLBINV_NOBODY,
180#endif
181};
182
183#undef IFCONSTANT
184
185#if defined(MULTIPROCESSOR) && PMAP_TLB_MAX > 1
186struct pmap_tlb_info *pmap_tlbs[PMAP_TLB_MAX] = {
187	[0] = &pmap_tlb0_info,
188};
189u_int pmap_ntlbs = 1;
190#endif
191
192#ifdef MULTIPROCESSOR
193__unused static inline bool
194pmap_tlb_intersecting_active_p(pmap_t pm, struct pmap_tlb_info *ti)
195{
196#if PMAP_TLB_MAX == 1
197	return !kcpuset_iszero(pm->pm_active);
198#else
199	return kcpuset_intersecting_p(pm->pm_active, ti->ti_kcpuset);
200#endif
201}
202
203static inline bool
204pmap_tlb_intersecting_onproc_p(pmap_t pm, struct pmap_tlb_info *ti)
205{
206#if PMAP_TLB_MAX == 1
207	return !kcpuset_iszero(pm->pm_onproc);
208#else
209	return kcpuset_intersecting_p(pm->pm_onproc, ti->ti_kcpuset);
210#endif
211}
212#endif
213
214static void
215pmap_tlb_pai_check(struct pmap_tlb_info *ti, bool locked_p)
216{
217	UVMHIST_FUNC(__func__);
218	UVMHIST_CALLARGS(maphist, "(ti=%#jx)", (uintptr_t)ti, 0, 0, 0);
219
220#ifdef DIAGNOSTIC
221	struct pmap_asid_info *pai;
222	if (!locked_p)
223		TLBINFO_LOCK(ti);
224	LIST_FOREACH(pai, &ti->ti_pais, pai_link) {
225		KASSERT(pai != NULL);
226		KASSERT(PAI_PMAP(pai, ti) != pmap_kernel());
227		KASSERT(pai->pai_asid > KERNEL_PID);
228		KASSERTMSG(pai->pai_asid <= ti->ti_asid_max,
229		    "pm %p asid %#x", PAI_PMAP(pai, ti), pai->pai_asid);
230		KASSERTMSG(TLBINFO_ASID_INUSE_P(ti, pai->pai_asid),
231		    "pm %p asid %u", PAI_PMAP(pai, ti), pai->pai_asid);
232#ifdef MULTIPROCESSOR
233		KASSERT(pmap_tlb_intersecting_active_p(PAI_PMAP(pai, ti), ti));
234#endif
235	}
236	if (!locked_p)
237		TLBINFO_UNLOCK(ti);
238#endif
239	UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
240}
241
242static void
243pmap_tlb_pai_reset(struct pmap_tlb_info *ti, struct pmap_asid_info *pai,
244	struct pmap *pm)
245{
246	UVMHIST_FUNC(__func__);
247	UVMHIST_CALLARGS(maphist, "(ti=%#jx, pai=%#jx, pm=%#jx): asid %u",
248	    (uintptr_t)ti, (uintptr_t)pai, (uintptr_t)pm, pai->pai_asid);
249
250	/*
251	 * We must have an ASID but it must not be onproc (on a processor).
252	 */
253	KASSERT(pai->pai_asid > KERNEL_PID);
254	KASSERT(pai->pai_asid <= ti->ti_asid_max);
255#if defined(MULTIPROCESSOR)
256	KASSERT(pmap_tlb_intersecting_active_p(pm, ti));
257	KASSERT(!pmap_tlb_intersecting_onproc_p(pm, ti));
258#endif
259	LIST_REMOVE(pai, pai_link);
260#ifdef DIAGNOSTIC
261	pai->pai_link.le_prev = NULL;	/* tagged as unlinked */
262#endif
263	/*
264	 * If the platform has a cheap way to flush ASIDs then free the ASID
265	 * back into the pool.  On multiprocessor systems, we will flush the
266	 * ASID from the TLB when it's allocated.  That way we know the flush
267	 * was always done in the correct TLB space.  On uniprocessor systems,
268	 * just do the flush now since we know that it has been used.  This has
269	 * a bit less overhead.  Either way, this will mean that we will only
270	 * need to flush all ASIDs if all ASIDs are in use and we need to
271	 * allocate a new one.
272	 */
273	if (PMAP_TLB_FLUSH_ASID_ON_RESET) {
274#ifndef MULTIPROCESSOR
275		UVMHIST_LOG(maphist, " ... asid %u flushed", pai->pai_asid, 0,
276		    0, 0);
277		tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
278#endif
279		if (TLBINFO_ASID_INUSE_P(ti, pai->pai_asid)) {
280			UVMHIST_LOG(maphist, " ... asid marked unused",
281			    pai->pai_asid, 0, 0, 0);
282			TLBINFO_ASID_MARK_UNUSED(ti, pai->pai_asid);
283			ti->ti_asids_free++;
284		}
285	}
286	/*
287	 * Note that we don't mark the ASID as not in use in the TLB's ASID
288	 * bitmap (thus it can't be allocated until the ASID space is exhausted
289	 * and therefore reinitialized).  We don't want to flush the TLB for
290	 * entries belonging to this ASID so we will let natural TLB entry
291	 * replacement flush them out of the TLB.  Any new entries for this
292	 * pmap will need a new ASID allocated.
293	 */
294	pai->pai_asid = 0;
295
296#if defined(MULTIPROCESSOR)
297	/*
298	 * The bits in pm_active belonging to this TLB can only be changed
299	 * while this TLB's lock is held.
300	 */
301#if PMAP_TLB_MAX == 1
302	kcpuset_zero(pm->pm_active);
303#else
304	kcpuset_remove(pm->pm_active, ti->ti_kcpuset);
305#endif
306	KASSERT(!pmap_tlb_intersecting_active_p(pm, ti));
307#endif /* MULTIPROCESSOR */
308
309	UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
310}
311
312void
313pmap_tlb_info_evcnt_attach(struct pmap_tlb_info *ti)
314{
315#if defined(MULTIPROCESSOR) && !defined(PMAP_TLB_NO_SYNCI_EVCNT)
316	evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_desired,
317	    EVCNT_TYPE_MISC, NULL,
318	    ti->ti_name, "icache syncs desired");
319	evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_asts,
320	    EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_desired,
321	    ti->ti_name, "icache sync asts");
322	evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_all,
323	    EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_asts,
324	    ti->ti_name, "icache full syncs");
325	evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_pages,
326	    EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_asts,
327	    ti->ti_name, "icache pages synced");
328	evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_duplicate,
329	    EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_desired,
330	    ti->ti_name, "icache dup pages skipped");
331	evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_deferred,
332	    EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_desired,
333	    ti->ti_name, "icache pages deferred");
334#endif /* MULTIPROCESSOR && !PMAP_TLB_NO_SYNCI_EVCNT */
335	evcnt_attach_dynamic_nozero(&ti->ti_evcnt_asid_reinits,
336	    EVCNT_TYPE_MISC, NULL,
337	    ti->ti_name, "asid pool reinit");
338}
339
340void
341pmap_tlb_info_init(struct pmap_tlb_info *ti)
342{
343#if defined(MULTIPROCESSOR)
344#if PMAP_TLB_MAX == 1
345	KASSERT(ti == &pmap_tlb0_info);
346#else
347	if (ti != &pmap_tlb0_info) {
348		KASSERT(pmap_ntlbs < PMAP_TLB_MAX);
349
350		KASSERT(pmap_tlbs[pmap_ntlbs] == NULL);
351
352		ti->ti_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
353		TLBINFO_ASID_RESET(ti);
354		ti->ti_asid_hint = KERNEL_PID + 1;
355		ti->ti_asid_max = pmap_tlbs[0]->ti_asid_max;
356		ti->ti_asids_free = TLBINFO_ASID_INITIAL_FREE(ti->ti_asid_max);
357		ti->ti_tlbinvop = TLBINV_NOBODY;
358		ti->ti_victim = NULL;
359		kcpuset_create(&ti->ti_kcpuset, true);
360		ti->ti_index = pmap_ntlbs++;
361		ti->ti_wired = 0;
362		pmap_tlbs[ti->ti_index] = ti;
363		snprintf(ti->ti_name, sizeof(ti->ti_name), "tlb%u",
364		    ti->ti_index);
365		pmap_tlb_info_evcnt_attach(ti);
366
367		KASSERT(ti->ti_asid_max < PMAP_TLB_BITMAP_LENGTH);
368		return;
369	}
370#endif
371#endif /* MULTIPROCESSOR */
372	KASSERT(ti == &pmap_tlb0_info);
373	KASSERT(ti->ti_lock == &pmap_tlb0_lock);
374
375	mutex_init(ti->ti_lock, MUTEX_DEFAULT, IPL_SCHED);
376#if defined(MULTIPROCESSOR) && PMAP_TLB_MAX > 1
377	kcpuset_create(&ti->ti_kcpuset, true);
378	kcpuset_set(ti->ti_kcpuset, cpu_index(curcpu()));
379#endif
380
381	const tlb_asid_t asid_max = pmap_md_tlb_asid_max();
382	if (ti->ti_asid_max == 0 || asid_max < ti->ti_asid_max) {
383		ti->ti_asid_max = asid_max;
384		ti->ti_asids_free = TLBINFO_ASID_INITIAL_FREE(ti->ti_asid_max);
385	}
386
387	KASSERT(__type_fit(tlb_asid_t, ti->ti_asid_max + 1));
388	KASSERT(ti->ti_asid_max < PMAP_TLB_BITMAP_LENGTH);
389}
390
391#if defined(MULTIPROCESSOR)
392void
393pmap_tlb_info_attach(struct pmap_tlb_info *ti, struct cpu_info *ci)
394{
395	KASSERT(!CPU_IS_PRIMARY(ci));
396	KASSERT(ci->ci_data.cpu_idlelwp != NULL);
397	KASSERT(cold);
398
399	TLBINFO_LOCK(ti);
400#if PMAP_TLB_MAX > 1
401	kcpuset_set(ti->ti_kcpuset, cpu_index(ci));
402	cpu_set_tlb_info(ci, ti);
403#endif
404
405	/*
406	 * Do any MD tlb info init.
407	 */
408	pmap_md_tlb_info_attach(ti, ci);
409
410	/*
411	 * The kernel pmap uses the kcpuset_running set so it's always
412	 * up-to-date.
413	 */
414	TLBINFO_UNLOCK(ti);
415}
416#endif /* MULTIPROCESSOR */
417
418#ifdef DIAGNOSTIC
419static size_t
420pmap_tlb_asid_count(struct pmap_tlb_info *ti)
421{
422	size_t count = 0;
423	for (tlb_asid_t asid = 1; asid <= ti->ti_asid_max; asid++) {
424		if (TLBINFO_ASID_INUSE_P(ti, asid))
425			count++;
426	}
427	return count;
428}
429#endif
430
431static void
432pmap_tlb_asid_reinitialize(struct pmap_tlb_info *ti, enum tlb_invalidate_op op)
433{
434	UVMHIST_FUNC(__func__);
435	UVMHIST_CALLARGS(maphist, "(ti=%#jx, op=%ju)", (uintptr_t)ti, op, 0, 0);
436
437	pmap_tlb_pai_check(ti, true);
438
439	ti->ti_evcnt_asid_reinits.ev_count++;
440
441	/*
442	 * First, clear the ASID bitmap (except for ASID 0 which belongs
443	 * to the kernel).
444	 */
445	ti->ti_asids_free = TLBINFO_ASID_INITIAL_FREE(ti->ti_asid_max);
446	ti->ti_asid_hint = KERNEL_PID + 1;
447	TLBINFO_ASID_RESET(ti);
448
449	switch (op) {
450#if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
451	case TLBINV_ALL:
452		tlb_invalidate_all();
453		break;
454	case TLBINV_ALLUSER:
455		tlb_invalidate_asids(KERNEL_PID + 1, ti->ti_asid_max);
456		break;
457#endif /* MULTIPROCESSOR && PMAP_TLB_NEED_SHOOTDOWN */
458	case TLBINV_NOBODY: {
459		/*
460		 * If we are just reclaiming ASIDs in the TLB, let's go find
461		 * what ASIDs are in use in the TLB.  Since this is a
462		 * semi-expensive operation, we don't want to do it too often.
463		 * So if more half of the ASIDs are in use, we don't have
464		 * enough free ASIDs so invalidate the TLB entries with ASIDs
465		 * and clear the ASID bitmap.  That will force everyone to
466		 * allocate a new ASID.
467		 */
468#if !defined(MULTIPROCESSOR) || defined(PMAP_TLB_NEED_SHOOTDOWN)
469		pmap_tlb_asid_check();
470		const u_int asids_found = tlb_record_asids(
471		    ti->ti_asid_bitmap._b, ti->ti_asid_max);
472		pmap_tlb_asid_check();
473#ifdef DIAGNOSTIC
474		const u_int asids_count = pmap_tlb_asid_count(ti);
475		KASSERTMSG(asids_found == asids_count,
476		    "found %u != count %u", asids_found, asids_count);
477#endif
478		if (__predict_false(asids_found >= ti->ti_asid_max / 2)) {
479			tlb_invalidate_asids(KERNEL_PID + 1, ti->ti_asid_max);
480#else /* MULTIPROCESSOR && !PMAP_TLB_NEED_SHOOTDOWN */
481			/*
482			 * For those systems (PowerPC) that don't require
483			 * cross cpu TLB shootdowns, we have to invalidate the
484			 * entire TLB because we can't record the ASIDs in use
485			 * on the other CPUs.  This is hopefully cheaper than
486			 * than trying to use an IPI to record all the ASIDs
487			 * on all the CPUs (which would be a synchronization
488			 * nightmare).
489			 */
490			tlb_invalidate_all();
491#endif /* MULTIPROCESSOR && !PMAP_TLB_NEED_SHOOTDOWN */
492			TLBINFO_ASID_RESET(ti);
493			ti->ti_asids_free = TLBINFO_ASID_INITIAL_FREE(
494				ti->ti_asid_max);
495#if !defined(MULTIPROCESSOR) || defined(PMAP_TLB_NEED_SHOOTDOWN)
496		} else {
497			ti->ti_asids_free -= asids_found;
498		}
499#endif /* !MULTIPROCESSOR || PMAP_TLB_NEED_SHOOTDOWN */
500		KASSERTMSG(ti->ti_asids_free <= ti->ti_asid_max, "%u",
501		    ti->ti_asids_free);
502		break;
503	}
504	default:
505		panic("%s: unexpected op %d", __func__, op);
506	}
507
508	/*
509	 * Now go through the active ASIDs.  If the ASID is on a processor or
510	 * we aren't invalidating all ASIDs and the TLB has an entry owned by
511	 * that ASID, mark it as in use.  Otherwise release the ASID.
512	 */
513	struct pmap_asid_info *pai, *next;
514	for (pai = LIST_FIRST(&ti->ti_pais); pai != NULL; pai = next) {
515		struct pmap * const pm = PAI_PMAP(pai, ti);
516		next = LIST_NEXT(pai, pai_link);
517		KASSERT(pm != pmap_kernel());
518		KASSERT(pai->pai_asid > KERNEL_PID);
519#if defined(MULTIPROCESSOR)
520		if (pmap_tlb_intersecting_onproc_p(pm, ti)) {
521			if (!TLBINFO_ASID_INUSE_P(ti, pai->pai_asid)) {
522				TLBINFO_ASID_MARK_USED(ti, pai->pai_asid);
523				ti->ti_asids_free--;
524			}
525			continue;
526		}
527#endif /* MULTIPROCESSOR */
528		if (TLBINFO_ASID_INUSE_P(ti, pai->pai_asid)) {
529			KASSERT(op == TLBINV_NOBODY);
530		} else {
531			pmap_tlb_pai_reset(ti, pai, pm);
532		}
533	}
534#ifdef DIAGNOSTIC
535	size_t free_count __diagused = ti->ti_asid_max - pmap_tlb_asid_count(ti);
536	KASSERTMSG(free_count == ti->ti_asids_free,
537	    "bitmap error: %zu != %u", free_count, ti->ti_asids_free);
538#endif
539	UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
540}
541
542#if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
543#if PMAP_TLB_MAX == 1
544#error shootdown not required for single TLB systems
545#endif
546void
547pmap_tlb_shootdown_process(void)
548{
549	struct cpu_info * const ci = curcpu();
550	struct pmap_tlb_info * const ti = cpu_tlb_info(ci);
551
552	UVMHIST_FUNC(__func__);
553	UVMHIST_CALLED(maphist);
554
555	KASSERT(cpu_intr_p());
556	KASSERTMSG(ci->ci_cpl >= IPL_SCHED, "%s: cpl (%d) < IPL_SCHED (%d)",
557	    __func__, ci->ci_cpl, IPL_SCHED);
558
559	TLBINFO_LOCK(ti);
560	UVMHIST_LOG(maphist, "ti %#jx", (uintptr_t)ti, 0, 0, 0);
561
562	switch (ti->ti_tlbinvop) {
563	case TLBINV_ONE: {
564		/*
565		 * We only need to invalidate one user ASID.
566		 */
567		UVMHIST_LOG(maphist, "TLBINV_ONE ti->ti_victim %#jx", (uintptr_t)ti->ti_victim, 0, 0, 0);
568		struct pmap_asid_info * const pai = PMAP_PAI(ti->ti_victim, ti);
569		KASSERT(ti->ti_victim != pmap_kernel());
570		if (pmap_tlb_intersecting_onproc_p(ti->ti_victim, ti)) {
571			UVMHIST_LOG(maphist, "... onproc asid %jd", pai->pai_asid, 0, 0, 0);
572			/*
573			 * The victim is an active pmap so we will just
574			 * invalidate its TLB entries.
575			 */
576			KASSERT(pai->pai_asid > KERNEL_PID);
577			pmap_tlb_asid_check();
578			tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
579			pmap_tlb_asid_check();
580		} else if (pai->pai_asid) {
581			UVMHIST_LOG(maphist, "... not active asid %jd", pai->pai_asid, 0, 0, 0);
582			/*
583			 * The victim is no longer an active pmap for this TLB.
584			 * So simply clear its ASID and when pmap_activate is
585			 * next called for this pmap, it will allocate a new
586			 * ASID.
587			 */
588			pmap_tlb_pai_reset(ti, pai, PAI_PMAP(pai, ti));
589		}
590		break;
591	}
592	case TLBINV_ALLUSER:
593		/*
594		 * Flush all user TLB entries.
595		 */
596		pmap_tlb_asid_reinitialize(ti, TLBINV_ALLUSER);
597		break;
598	case TLBINV_ALLKERNEL:
599		/*
600		 * We need to invalidate all global TLB entries.
601		 */
602		pmap_tlb_asid_check();
603		tlb_invalidate_globals();
604		pmap_tlb_asid_check();
605		break;
606	case TLBINV_ALL:
607		/*
608		 * Flush all the TLB entries (user and kernel).
609		 */
610		pmap_tlb_asid_reinitialize(ti, TLBINV_ALL);
611		break;
612	case TLBINV_NOBODY:
613		/*
614		 * Might be spurious or another SMT CPU sharing this TLB
615		 * could have already done the work.
616		 */
617		break;
618	}
619
620	/*
621	 * Indicate we are done with shutdown event.
622	 */
623	ti->ti_victim = NULL;
624	ti->ti_tlbinvop = TLBINV_NOBODY;
625	TLBINFO_UNLOCK(ti);
626}
627
628/*
629 * This state machine could be encoded into an array of integers but since all
630 * the values fit in 3 bits, the 5 entry "table" fits in a 16 bit value which
631 * can be loaded in a single instruction.
632 */
633#define	TLBINV_MAP(op, nobody, one, alluser, allkernel, all)	\
634	((((   (nobody) << 3 * TLBINV_NOBODY)			\
635	 | (      (one) << 3 * TLBINV_ONE)			\
636	 | (  (alluser) << 3 * TLBINV_ALLUSER)			\
637	 | ((allkernel) << 3 * TLBINV_ALLKERNEL)		\
638	 | (      (all) << 3 * TLBINV_ALL)) >> 3 * (op)) & 7)
639
640#define	TLBINV_USER_MAP(op)	\
641	TLBINV_MAP(op, TLBINV_ONE, TLBINV_ALLUSER, TLBINV_ALLUSER,	\
642	    TLBINV_ALL, TLBINV_ALL)
643
644#define	TLBINV_KERNEL_MAP(op)	\
645	TLBINV_MAP(op, TLBINV_ALLKERNEL, TLBINV_ALL, TLBINV_ALL,	\
646	    TLBINV_ALLKERNEL, TLBINV_ALL)
647
648bool
649pmap_tlb_shootdown_bystanders(pmap_t pm)
650{
651	/*
652	 * We don't need to deal with our own TLB.
653	 */
654
655	UVMHIST_FUNC(__func__);
656	UVMHIST_CALLARGS(maphist, "pm %#jx", (uintptr_t)pm, 0, 0, 0);
657
658	KASSERT(kpreempt_disabled());
659
660	const struct cpu_info * const ci = curcpu();
661
662	kcpuset_t *pm_active = ci->ci_shootdowncpus;
663	kcpuset_copy(pm_active, pm->pm_active);
664	kcpuset_remove(pm_active, cpu_tlb_info(curcpu())->ti_kcpuset);
665	const bool kernel_p = (pm == pmap_kernel());
666	bool ipi_sent = false;
667
668	/*
669	 * If pm_active gets more bits set, then it's after all our changes
670	 * have been made so they will already be cognizant of them.
671	 */
672
673	for (size_t i = 0; !kcpuset_iszero(pm_active); i++) {
674		KASSERT(i < pmap_ntlbs);
675		struct pmap_tlb_info * const ti = pmap_tlbs[i];
676		KASSERT(tlbinfo_index(ti) == i);
677		UVMHIST_LOG(maphist, "ti %#jx", (uintptr_t)ti, 0, 0, 0);
678		/*
679		 * Skip this TLB if there are no active mappings for it.
680		 */
681		if (!kcpuset_intersecting_p(pm_active, ti->ti_kcpuset))
682			continue;
683		struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
684		kcpuset_remove(pm_active, ti->ti_kcpuset);
685		TLBINFO_LOCK(ti);
686		cpuid_t j = kcpuset_ffs_intersecting(pm->pm_onproc,
687		    ti->ti_kcpuset);
688		// post decrement since ffs returns bit + 1 or 0 if no bit
689		if (j-- > 0) {
690			if (kernel_p) {
691				ti->ti_tlbinvop =
692				    TLBINV_KERNEL_MAP(ti->ti_tlbinvop);
693				ti->ti_victim = NULL;
694			} else {
695				KASSERT(pai->pai_asid);
696				if (__predict_false(ti->ti_victim == pm)) {
697					KASSERT(ti->ti_tlbinvop == TLBINV_ONE);
698					/*
699					 * We still need to invalidate this one
700					 * ASID so there's nothing to change.
701					 */
702				} else {
703					ti->ti_tlbinvop =
704					    TLBINV_USER_MAP(ti->ti_tlbinvop);
705					if (ti->ti_tlbinvop == TLBINV_ONE)
706						ti->ti_victim = pm;
707					else
708						ti->ti_victim = NULL;
709				}
710			}
711			UVMHIST_LOG(maphist, "tlbinvop %jx victim %#jx", ti->ti_tlbinvop,
712			    (uintptr_t)ti->ti_victim, 0, 0);
713			TLBINFO_UNLOCK(ti);
714			/*
715			 * Now we can send out the shootdown IPIs to a CPU
716			 * that shares this TLB and is currently using this
717			 * pmap.  That CPU will process the IPI and do the
718			 * all the work.  Any other CPUs sharing that TLB
719			 * will take advantage of that work.  pm_onproc might
720			 * change now that we have released the lock but we
721			 * can tolerate spurious shootdowns.
722			 */
723			cpu_send_ipi(cpu_lookup(j), IPI_SHOOTDOWN);
724			ipi_sent = true;
725			continue;
726		}
727		if (!pmap_tlb_intersecting_active_p(pm, ti)) {
728			UVMHIST_LOG(maphist, "pm %#jx not active", (uintptr_t)pm, 0, 0, 0);
729			/*
730			 * If this pmap has an ASID assigned but it's not
731			 * currently running, nuke its ASID.  Next time the
732			 * pmap is activated, it will allocate a new ASID.
733			 * And best of all, we avoid an IPI.
734			 */
735			KASSERT(!kernel_p);
736			pmap_tlb_pai_reset(ti, pai, pm);
737			//ti->ti_evcnt_lazy_shots.ev_count++;
738		}
739		TLBINFO_UNLOCK(ti);
740	}
741
742	UVMHIST_LOG(maphist, " <-- done (ipi_sent=%jd)", ipi_sent, 0, 0, 0);
743
744	return ipi_sent;
745}
746#endif /* MULTIPROCESSOR && PMAP_TLB_NEED_SHOOTDOWN */
747
748int
749pmap_tlb_update_addr(pmap_t pm, vaddr_t va, pt_entry_t pte, u_int flags)
750{
751	KASSERT(kpreempt_disabled());
752
753	struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu());
754	struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
755	int rv = -1;
756
757	UVMHIST_FUNC(__func__);
758	UVMHIST_CALLARGS(maphist, " (pm=%#jx va=%#jx, pte=%#jx flags=%#jx)",
759	    (uintptr_t)pm, va, pte_value(pte), flags);
760
761	KASSERTMSG(pte_valid_p(pte), "va %#"PRIxVADDR" %#"PRIxPTE,
762	    va, pte_value(pte));
763
764	TLBINFO_LOCK(ti);
765	if (pm == pmap_kernel() || PMAP_PAI_ASIDVALID_P(pai, ti)) {
766		pmap_tlb_asid_check();
767		rv = tlb_update_addr(va, pai->pai_asid, pte,
768		    (flags & PMAP_TLB_INSERT) != 0);
769		pmap_tlb_asid_check();
770		UVMHIST_LOG(maphist,
771		    "   %jd <-- tlb_update_addr(%#jx, %#jx, %#jx, ...)",
772		    rv, va, pai->pai_asid, pte_value(pte));
773		KASSERTMSG((flags & PMAP_TLB_INSERT) == 0 || rv == 1,
774		    "pmap %p (asid %u) va %#"PRIxVADDR" pte %#"PRIxPTE" rv %d",
775		    pm, pai->pai_asid, va, pte_value(pte), rv);
776	}
777#if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
778	if (flags & PMAP_TLB_NEED_IPI)
779		pm->pm_shootdown_pending = 1;
780#endif
781	TLBINFO_UNLOCK(ti);
782
783	UVMHIST_LOG(maphist, "   <-- done (rv=%jd)", rv, 0, 0, 0);
784
785	return rv;
786}
787
788void
789pmap_tlb_invalidate_addr(pmap_t pm, vaddr_t va)
790{
791	KASSERT(kpreempt_disabled());
792
793	struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu());
794	struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
795
796	UVMHIST_FUNC(__func__);
797	UVMHIST_CALLARGS(maphist, " (pm=%#jx va=%#jx) ti=%#jx asid=%#jx",
798	    (uintptr_t)pm, va, (uintptr_t)ti, pai->pai_asid);
799
800	TLBINFO_LOCK(ti);
801	if (pm == pmap_kernel() || PMAP_PAI_ASIDVALID_P(pai, ti)) {
802		pmap_tlb_asid_check();
803		UVMHIST_LOG(maphist, " invalidating %#jx asid %#jx",
804		    va, pai->pai_asid, 0, 0);
805		tlb_invalidate_addr(va, pai->pai_asid);
806		pmap_tlb_asid_check();
807	}
808#if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
809	pm->pm_shootdown_pending = 1;
810#endif
811	TLBINFO_UNLOCK(ti);
812	UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
813}
814
815static inline void
816pmap_tlb_asid_alloc(struct pmap_tlb_info *ti, pmap_t pm,
817	struct pmap_asid_info *pai)
818{
819	/*
820	 * We shouldn't have an ASID assigned, and thusly must not be onproc
821	 * nor active.
822	 */
823	KASSERT(pm != pmap_kernel());
824	KASSERT(pai->pai_asid == 0);
825	KASSERT(pai->pai_link.le_prev == NULL);
826#if defined(MULTIPROCESSOR)
827	KASSERT(!pmap_tlb_intersecting_onproc_p(pm, ti));
828	KASSERT(!pmap_tlb_intersecting_active_p(pm, ti));
829#endif
830	KASSERT(ti->ti_asids_free > 0);
831	KASSERT(ti->ti_asid_hint > KERNEL_PID);
832
833	/*
834	 * If the last ASID allocated was the maximum ASID, then the
835	 * hint will be out of range.  Reset the hint to first
836	 * available ASID.
837	 */
838	if (PMAP_TLB_FLUSH_ASID_ON_RESET
839	    && ti->ti_asid_hint > ti->ti_asid_max) {
840		ti->ti_asid_hint = KERNEL_PID + 1;
841	}
842	KASSERTMSG(ti->ti_asid_hint <= ti->ti_asid_max, "hint %u",
843	    ti->ti_asid_hint);
844
845	/*
846	 * Let's see if the hinted ASID is free.  If not search for
847	 * a new one.
848	 */
849	if (__predict_true(TLBINFO_ASID_INUSE_P(ti, ti->ti_asid_hint))) {
850		const size_t nbpw = NBBY * sizeof(ti->ti_asid_bitmap._b[0]);
851		size_t i;
852		u_long bits;
853		for (i = 0; (bits = ~ti->ti_asid_bitmap._b[i]) == 0; i++) {
854			KASSERT(i < __arraycount(ti->ti_asid_bitmap._b) - 1);
855		}
856		/*
857		 * ffs wants to find the first bit set while we want
858		 * to find the first bit cleared.
859		 */
860		const u_int n = __builtin_ffsl(bits) - 1;
861		KASSERTMSG((bits << (nbpw - (n+1))) == (1ul << (nbpw-1)),
862		    "n %u bits %#lx", n, bits);
863		KASSERT(n < nbpw);
864		ti->ti_asid_hint = n + i * nbpw;
865	}
866
867	KASSERT(ti->ti_asid_hint > KERNEL_PID);
868	KASSERT(ti->ti_asid_hint <= ti->ti_asid_max);
869	KASSERTMSG(PMAP_TLB_FLUSH_ASID_ON_RESET
870	    || TLBINFO_ASID_INUSE_P(ti, ti->ti_asid_hint - 1),
871	    "hint %u bitmap %p", ti->ti_asid_hint, &ti->ti_asid_bitmap);
872	KASSERTMSG(!TLBINFO_ASID_INUSE_P(ti, ti->ti_asid_hint),
873	    "hint %u bitmap %p", ti->ti_asid_hint, &ti->ti_asid_bitmap);
874
875	/*
876	 * The hint contains our next ASID so take it and advance the hint.
877	 * Mark it as used and insert the pai into the list of active asids.
878	 * There is also one less asid free in this TLB.
879	 */
880	pai->pai_asid = ti->ti_asid_hint++;
881#ifdef MULTIPROCESSOR
882	if (PMAP_TLB_FLUSH_ASID_ON_RESET) {
883		/*
884		 * Clean the new ASID from the TLB.
885		 */
886		tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
887	}
888#endif
889	TLBINFO_ASID_MARK_USED(ti, pai->pai_asid);
890	LIST_INSERT_HEAD(&ti->ti_pais, pai, pai_link);
891	ti->ti_asids_free--;
892
893#if defined(MULTIPROCESSOR)
894	/*
895	 * Mark that we now have an active ASID for all CPUs sharing this TLB.
896	 * The bits in pm_active belonging to this TLB can only be changed
897	 * while this TLBs lock is held.
898	 */
899#if PMAP_TLB_MAX == 1
900	kcpuset_copy(pm->pm_active, kcpuset_running);
901#else
902	kcpuset_merge(pm->pm_active, ti->ti_kcpuset);
903#endif
904#endif
905}
906
907/*
908 * Acquire a TLB address space tag (called ASID or TLBPID) and return it.
909 * ASID might have already been previously acquired.
910 */
911void
912pmap_tlb_asid_acquire(pmap_t pm, struct lwp *l)
913{
914	KASSERT(kpreempt_disabled());
915
916	struct cpu_info * const ci = l->l_cpu;
917	struct pmap_tlb_info * const ti = cpu_tlb_info(ci);
918	struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
919
920	UVMHIST_FUNC(__func__);
921	UVMHIST_CALLARGS(maphist, "(pm=%#jx, l=%#jx, ti=%#jx)", (uintptr_t)pm,
922	    (uintptr_t)l, (uintptr_t)ti, 0);
923
924	/*
925	 * Kernels use a fixed ASID and thus doesn't need to acquire one.
926	 */
927	if (pm == pmap_kernel()) {
928		UVMHIST_LOG(maphist, " <-- done (kernel)", 0, 0, 0, 0);
929		return;
930	}
931
932	TLBINFO_LOCK(ti);
933	KASSERT(pai->pai_asid <= KERNEL_PID || pai->pai_link.le_prev != NULL);
934	KASSERT(pai->pai_asid > KERNEL_PID || pai->pai_link.le_prev == NULL);
935	pmap_tlb_pai_check(ti, true);
936
937	if (__predict_false(!tlbinfo_asids_p(ti))) {
938#if defined(MULTIPROCESSOR)
939		/*
940		 * Mark that we are active for all CPUs sharing this TLB.
941		 * The bits in pm_active belonging to this TLB can only
942		 * be changed  while this TLBs lock is held.
943		 */
944#if PMAP_TLB_MAX == 1
945		kcpuset_copy(pm->pm_active, kcpuset_running);
946#else
947		kcpuset_merge(pm->pm_active, ti->ti_kcpuset);
948#endif
949#endif
950	} else if (__predict_false(!PMAP_PAI_ASIDVALID_P(pai, ti))) {
951		/*
952		 * If we've run out ASIDs, reinitialize the ASID space.
953		 */
954		if (__predict_false(tlbinfo_noasids_p(ti))) {
955			KASSERT(l == curlwp);
956			UVMHIST_LOG(maphist, " asid reinit", 0, 0, 0, 0);
957			pmap_tlb_asid_reinitialize(ti, TLBINV_NOBODY);
958			KASSERT(!tlbinfo_noasids_p(ti));
959		}
960
961		/*
962		 * Get an ASID.
963		 */
964		pmap_tlb_asid_alloc(ti, pm, pai);
965		UVMHIST_LOG(maphist, "allocated asid %#jx", pai->pai_asid,
966		    0, 0, 0);
967	}
968	pmap_tlb_pai_check(ti, true);
969#if defined(MULTIPROCESSOR)
970	KASSERT(kcpuset_isset(pm->pm_active, cpu_index(ci)));
971#endif
972
973	if (l == curlwp) {
974#if defined(MULTIPROCESSOR)
975		/*
976		 * The bits in pm_onproc belonging to this TLB can only
977		 * be changed while this TLBs lock is held unless atomic
978		 * operations are used.
979		 */
980		KASSERT(pm != pmap_kernel());
981		kcpuset_atomic_set(pm->pm_onproc, cpu_index(ci));
982#endif
983		ci->ci_pmap_asid_cur = pai->pai_asid;
984		UVMHIST_LOG(maphist, "setting asid to %#jx", pai->pai_asid,
985		    0, 0, 0);
986		tlb_set_asid(pai->pai_asid, pm);
987		pmap_tlb_asid_check();
988	} else {
989		printf("%s: l (%p) != curlwp %p\n", __func__, l, curlwp);
990	}
991	TLBINFO_UNLOCK(ti);
992	UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
993}
994
995void
996pmap_tlb_asid_deactivate(pmap_t pm)
997{
998	UVMHIST_FUNC(__func__);
999	UVMHIST_CALLARGS(maphist, "pm %#jx", (uintptr_t)pm, 0, 0, 0);
1000
1001	KASSERT(kpreempt_disabled());
1002#if defined(MULTIPROCESSOR)
1003	/*
1004	 * The kernel pmap is aways onproc and active and must never have
1005	 * those bits cleared.  If pmap_remove_all was called, it has already
1006	 * deactivated the pmap and thusly onproc will be 0 so there's nothing
1007	 * to do.
1008	 */
1009	if (pm != pmap_kernel() && !kcpuset_iszero(pm->pm_onproc)) {
1010		struct cpu_info * const ci = curcpu();
1011		KASSERT(!cpu_intr_p());
1012		KASSERTMSG(kcpuset_isset(pm->pm_onproc, cpu_index(ci)),
1013		    "%s: pmap %p onproc %p doesn't include cpu %d (%p)",
1014		    __func__, pm, pm->pm_onproc, cpu_index(ci), ci);
1015		/*
1016		 * The bits in pm_onproc that belong to this TLB can
1017		 * be changed while this TLBs lock is not held as long
1018		 * as we use atomic ops.
1019		 */
1020		kcpuset_atomic_clear(pm->pm_onproc, cpu_index(ci));
1021	}
1022#endif
1023	curcpu()->ci_pmap_asid_cur = KERNEL_PID;
1024	tlb_set_asid(KERNEL_PID, pmap_kernel());
1025
1026	pmap_tlb_pai_check(cpu_tlb_info(curcpu()), false);
1027#if defined(DEBUG)
1028	pmap_tlb_asid_check();
1029#endif
1030	UVMHIST_LOG(maphist, " <-- done (pm=%#jx)", (uintptr_t)pm, 0, 0, 0);
1031}
1032
1033void
1034pmap_tlb_asid_release_all(struct pmap *pm)
1035{
1036	UVMHIST_FUNC(__func__);
1037	UVMHIST_CALLARGS(maphist, "(pm=%#jx)", (uintptr_t)pm, 0, 0, 0);
1038
1039	KASSERT(pm != pmap_kernel());
1040#if defined(MULTIPROCESSOR)
1041	//KASSERT(!kcpuset_iszero(pm->pm_onproc)); // XXX
1042	struct cpu_info * const ci __diagused = curcpu();
1043	KASSERT(!kcpuset_isotherset(pm->pm_onproc, cpu_index(ci)));
1044#if PMAP_TLB_MAX > 1
1045	for (u_int i = 0; !kcpuset_iszero(pm->pm_active); i++) {
1046		KASSERT(i < pmap_ntlbs);
1047		struct pmap_tlb_info * const ti = pmap_tlbs[i];
1048#else
1049		struct pmap_tlb_info * const ti = &pmap_tlb0_info;
1050#endif
1051		struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
1052		TLBINFO_LOCK(ti);
1053		if (PMAP_PAI_ASIDVALID_P(pai, ti)) {
1054			/*
1055			 * This pmap should not be in use by any other cpu so
1056			 * we can just reset and be happy.
1057			 */
1058			if (ti->ti_victim == pm)
1059				ti->ti_victim = NULL;
1060			if (__predict_true(tlbinfo_asids_p(ti)))
1061				pmap_tlb_pai_reset(ti, pai, pm);
1062		}
1063		KASSERT(pai->pai_link.le_prev == NULL);
1064		TLBINFO_UNLOCK(ti);
1065#if PMAP_TLB_MAX > 1
1066	}
1067#endif
1068#ifdef DIAGNOSTIC
1069	for (size_t i = 0; i < (PMAP_TLB_MAX > 1 ? pmap_ntlbs : 1); i++) {
1070		KASSERTMSG(pm->pm_pai[i].pai_asid == 0,
1071		    "pm %p i %zu asid %u",
1072		    pm, i, pm->pm_pai[i].pai_asid);
1073	}
1074#endif
1075#else
1076	/*
1077	 * Handle the case of an UP kernel which only has, at most, one TLB.
1078	 * If the pmap has an ASID allocated, free it.
1079	 */
1080	struct pmap_tlb_info * const ti = &pmap_tlb0_info;
1081	struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
1082	TLBINFO_LOCK(ti);
1083	if (pai->pai_asid > KERNEL_PID) {
1084		if (curcpu()->ci_pmap_asid_cur == pai->pai_asid) {
1085			tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
1086		} else {
1087			pmap_tlb_pai_reset(ti, pai, pm);
1088		}
1089	}
1090	TLBINFO_UNLOCK(ti);
1091#endif /* MULTIPROCESSOR */
1092	UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
1093}
1094
1095void
1096pmap_tlb_asid_check(void)
1097{
1098	UVMHIST_FUNC(__func__);
1099	UVMHIST_CALLED(maphist);
1100
1101#ifdef DEBUG
1102	kpreempt_disable();
1103	const tlb_asid_t asid __debugused = tlb_get_asid();
1104	UVMHIST_LOG(maphist, " asid %u vs pmap_cur_asid %u", asid,
1105	    curcpu()->ci_pmap_asid_cur, 0, 0);
1106	KDASSERTMSG(asid == curcpu()->ci_pmap_asid_cur,
1107	   "%s: asid (%#x) != current asid (%#x)",
1108	    __func__, asid, curcpu()->ci_pmap_asid_cur);
1109	kpreempt_enable();
1110#endif
1111	UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
1112}
1113
1114#ifdef DEBUG
1115void
1116pmap_tlb_check(pmap_t pm, bool (*func)(void *, vaddr_t, tlb_asid_t, pt_entry_t))
1117{
1118	struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu());
1119	struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
1120	TLBINFO_LOCK(ti);
1121	if (pm == pmap_kernel() || pai->pai_asid > KERNEL_PID)
1122		tlb_walk(pm, func);
1123	TLBINFO_UNLOCK(ti);
1124}
1125#endif /* DEBUG */
1126
1127#ifdef DDB
1128void
1129pmap_db_tlb_print(struct pmap *pm,
1130    void (*pr)(const char *, ...) __printflike(1, 2))
1131{
1132#if !defined(MULTIPROCESSOR) || PMAP_TLB_MAX == 1
1133	pr(" asid %5u\n", pm->pm_pai[0].pai_asid);
1134#else
1135	for (size_t i = 0; i < (PMAP_TLB_MAX > 1 ? pmap_ntlbs : 1); i++) {
1136		pr(" tlb %zu  asid %5u\n", i, pm->pm_pai[i].pai_asid);
1137	}
1138#endif
1139}
1140#endif /* DDB */
1141