1/*-
2 * Copyright (c) 2001 Jake Burkholder.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD$");
29
30#include "opt_pmap.h"
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/ktr.h>
35#include <sys/pcpu.h>
36#include <sys/lock.h>
37#include <sys/mutex.h>
38#include <sys/smp.h>
39#include <sys/sysctl.h>
40
41#include <vm/vm.h>
42#include <vm/pmap.h>
43
44#include <machine/pmap.h>
45#include <machine/smp.h>
46#include <machine/tlb.h>
47#include <machine/vmparam.h>
48
49PMAP_STATS_VAR(tlb_ncontext_demap);
50PMAP_STATS_VAR(tlb_npage_demap);
51PMAP_STATS_VAR(tlb_nrange_demap);
52
53tlb_flush_nonlocked_t *tlb_flush_nonlocked;
54tlb_flush_user_t *tlb_flush_user;
55
56/*
57 * Some tlb operations must be atomic, so no interrupt or trap can be allowed
58 * while they are in progress. Traps should not happen, but interrupts need to
59 * be explicitely disabled. critical_enter() cannot be used here, since it only
60 * disables soft interrupts.
61 */
62
63void
64tlb_context_demap(struct pmap *pm)
65{
66	void *cookie;
67	register_t s;
68
69	/*
70	 * It is important that we are not interrupted or preempted while
71	 * doing the IPIs. The interrupted CPU may hold locks, and since
72	 * it will wait for the CPU that sent the IPI, this can lead
73	 * to a deadlock when an interrupt comes in on that CPU and it's
74	 * handler tries to grab one of that locks. This will only happen for
75	 * spin locks, but these IPI types are delivered even if normal
76	 * interrupts are disabled, so the lock critical section will not
77	 * protect the target processor from entering the IPI handler with
78	 * the lock held.
79	 */
80	PMAP_STATS_INC(tlb_ncontext_demap);
81	cookie = ipi_tlb_context_demap(pm);
82	s = intr_disable();
83	if (CPU_ISSET(PCPU_GET(cpuid), &pm->pm_active)) {
84		KASSERT(pm->pm_context[curcpu] != -1,
85		    ("tlb_context_demap: inactive pmap?"));
86		stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_DMMU_DEMAP, 0);
87		stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_IMMU_DEMAP, 0);
88		flush(KERNBASE);
89	}
90	intr_restore(s);
91	ipi_wait(cookie);
92}
93
94void
95tlb_page_demap(struct pmap *pm, vm_offset_t va)
96{
97	u_long flags;
98	void *cookie;
99	register_t s;
100
101	PMAP_STATS_INC(tlb_npage_demap);
102	cookie = ipi_tlb_page_demap(pm, va);
103	s = intr_disable();
104	if (CPU_ISSET(PCPU_GET(cpuid), &pm->pm_active)) {
105		KASSERT(pm->pm_context[curcpu] != -1,
106		    ("tlb_page_demap: inactive pmap?"));
107		if (pm == kernel_pmap)
108			flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE;
109		else
110			flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE;
111
112		stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0);
113		stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0);
114		flush(KERNBASE);
115	}
116	intr_restore(s);
117	ipi_wait(cookie);
118}
119
120void
121tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
122{
123	vm_offset_t va;
124	void *cookie;
125	u_long flags;
126	register_t s;
127
128	PMAP_STATS_INC(tlb_nrange_demap);
129	cookie = ipi_tlb_range_demap(pm, start, end);
130	s = intr_disable();
131	if (CPU_ISSET(PCPU_GET(cpuid), &pm->pm_active)) {
132		KASSERT(pm->pm_context[curcpu] != -1,
133		    ("tlb_range_demap: inactive pmap?"));
134		if (pm == kernel_pmap)
135			flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE;
136		else
137			flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE;
138
139		for (va = start; va < end; va += PAGE_SIZE) {
140			stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0);
141			stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0);
142			flush(KERNBASE);
143		}
144	}
145	intr_restore(s);
146	ipi_wait(cookie);
147}
148