1/*-
2 * Copyright (c) 2003 Jake Burkholder.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD$");
29
30#include "opt_pmap.h"
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/lock.h>
35#include <sys/mutex.h>
36#include <sys/smp.h>
37#include <sys/sysctl.h>
38
39#include <vm/vm.h>
40#include <vm/pmap.h>
41
42#include <machine/cache.h>
43#include <machine/cpufunc.h>
44#include <machine/lsu.h>
45#include <machine/smp.h>
46#include <machine/tlb.h>
47
48#define	SPITFIRE_TLB_ENTRIES	64
49
50PMAP_STATS_VAR(spitfire_dcache_npage_inval);
51PMAP_STATS_VAR(spitfire_dcache_npage_inval_match);
52PMAP_STATS_VAR(spitfire_icache_npage_inval);
53PMAP_STATS_VAR(spitfire_icache_npage_inval_match);
54
55/*
56 * Enable the level 1 caches.
57 */
58void
59spitfire_cache_enable(u_int cpu_impl __unused)
60{
61	u_long lsu;
62
63	lsu = ldxa(0, ASI_LSU_CTL_REG);
64	stxa_sync(0, ASI_LSU_CTL_REG, lsu | LSU_IC | LSU_DC);
65}
66
67/*
68 * Flush all lines from the level 1 caches.
69 */
70void
71spitfire_cache_flush(void)
72{
73	u_long addr;
74
75	for (addr = 0; addr < PCPU_GET(cache.dc_size);
76	    addr += PCPU_GET(cache.dc_linesize))
77		stxa_sync(addr, ASI_DCACHE_TAG, 0);
78	for (addr = 0; addr < PCPU_GET(cache.ic_size);
79	    addr += PCPU_GET(cache.ic_linesize))
80		stxa_sync(addr, ASI_ICACHE_TAG, 0);
81}
82
83/*
84 * Flush a physical page from the data cache.
85 */
86void
87spitfire_dcache_page_inval(vm_paddr_t pa)
88{
89	u_long target;
90	void *cookie;
91	u_long addr;
92	u_long tag;
93
94	KASSERT((pa & PAGE_MASK) == 0, ("%s: pa not page aligned", __func__));
95	PMAP_STATS_INC(spitfire_dcache_npage_inval);
96	target = pa >> (PAGE_SHIFT - DC_TAG_SHIFT);
97	cookie = ipi_dcache_page_inval(tl_ipi_spitfire_dcache_page_inval, pa);
98	for (addr = 0; addr < PCPU_GET(cache.dc_size);
99	    addr += PCPU_GET(cache.dc_linesize)) {
100		tag = ldxa(addr, ASI_DCACHE_TAG);
101		if (((tag >> DC_VALID_SHIFT) & DC_VALID_MASK) == 0)
102			continue;
103		tag &= DC_TAG_MASK << DC_TAG_SHIFT;
104		if (tag == target) {
105			PMAP_STATS_INC(spitfire_dcache_npage_inval_match);
106			stxa_sync(addr, ASI_DCACHE_TAG, tag);
107		}
108	}
109	ipi_wait(cookie);
110}
111
112/*
113 * Flush a physical page from the instruction cache.
114 */
115void
116spitfire_icache_page_inval(vm_paddr_t pa)
117{
118	register u_long tag __asm("%g1");
119	u_long target;
120	void *cookie;
121	u_long addr;
122
123	KASSERT((pa & PAGE_MASK) == 0, ("%s: pa not page aligned", __func__));
124	PMAP_STATS_INC(spitfire_icache_npage_inval);
125	target = pa >> (PAGE_SHIFT - IC_TAG_SHIFT);
126	cookie = ipi_icache_page_inval(tl_ipi_spitfire_icache_page_inval, pa);
127	for (addr = 0; addr < PCPU_GET(cache.ic_size);
128	    addr += PCPU_GET(cache.ic_linesize)) {
129		__asm __volatile("ldda [%1] %2, %%g0" /*, %g1 */
130		    : "=r" (tag) : "r" (addr), "n" (ASI_ICACHE_TAG));
131		if (((tag >> IC_VALID_SHIFT) & IC_VALID_MASK) == 0)
132			continue;
133		tag &= (u_long)IC_TAG_MASK << IC_TAG_SHIFT;
134		if (tag == target) {
135			PMAP_STATS_INC(spitfire_icache_npage_inval_match);
136			stxa_sync(addr, ASI_ICACHE_TAG, tag);
137		}
138	}
139	ipi_wait(cookie);
140}
141
142/*
143 * Flush all non-locked mappings from the TLBs.
144 */
145void
146spitfire_tlb_flush_nonlocked(void)
147{
148	u_int i;
149	u_int slot;
150
151	for (i = 0; i < SPITFIRE_TLB_ENTRIES; i++) {
152		slot = TLB_DAR_SLOT(TLB_DAR_T32, i);
153		if ((ldxa(slot, ASI_DTLB_DATA_ACCESS_REG) & TD_L) == 0)
154			stxa_sync(slot, ASI_DTLB_DATA_ACCESS_REG, 0);
155		if ((ldxa(slot, ASI_ITLB_DATA_ACCESS_REG) & TD_L) == 0)
156			stxa_sync(slot, ASI_ITLB_DATA_ACCESS_REG, 0);
157	}
158}
159
160/*
161 * Flush all user mappings from the TLBs.
162 */
163void
164spitfire_tlb_flush_user(void)
165{
166	u_long data;
167	u_long tag;
168	u_int i;
169	u_int slot;
170
171	for (i = 0; i < SPITFIRE_TLB_ENTRIES; i++) {
172		slot = TLB_DAR_SLOT(TLB_DAR_T32, i);
173		data = ldxa(slot, ASI_DTLB_DATA_ACCESS_REG);
174		tag = ldxa(slot, ASI_DTLB_TAG_READ_REG);
175		if ((data & TD_V) != 0 && (data & TD_L) == 0 &&
176		    TLB_TAR_CTX(tag) != TLB_CTX_KERNEL)
177			stxa_sync(slot, ASI_DTLB_DATA_ACCESS_REG, 0);
178		data = ldxa(slot, ASI_ITLB_DATA_ACCESS_REG);
179		tag = ldxa(slot, ASI_ITLB_TAG_READ_REG);
180		if ((data & TD_V) != 0 && (data & TD_L) == 0 &&
181		    TLB_TAR_CTX(tag) != TLB_CTX_KERNEL)
182			stxa_sync(slot, ASI_ITLB_DATA_ACCESS_REG, 0);
183	}
184}
185