1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28#include <sys/types.h>
29#include <vm/hat.h>
30#include <vm/hat_sfmmu.h>
31#include <vm/page.h>
32#include <sys/pte.h>
33#include <sys/systm.h>
34#include <sys/mman.h>
35#include <sys/sysmacros.h>
36#include <sys/machparam.h>
37#include <sys/vtrace.h>
38#include <sys/kmem.h>
39#include <sys/mmu.h>
40#include <sys/cmn_err.h>
41#include <sys/cpu.h>
42#include <sys/cpuvar.h>
43#include <sys/debug.h>
44#include <sys/lgrp.h>
45#include <sys/archsystm.h>
46#include <sys/machsystm.h>
47#include <sys/vmsystm.h>
48#include <sys/bitmap.h>
49#include <vm/rm.h>
50#include <sys/t_lock.h>
51#include <sys/vm_machparam.h>
52#include <sys/promif.h>
53#include <sys/prom_isa.h>
54#include <sys/prom_plat.h>
55#include <sys/prom_debug.h>
56#include <sys/privregs.h>
57#include <sys/bootconf.h>
58#include <sys/memlist.h>
59#include <sys/memlist_plat.h>
60#include <sys/cpu_module.h>
61#include <sys/reboot.h>
62#include <sys/kdi.h>
63#include <sys/fpu/fpusystm.h>
64
65/*
66 * External routines and data structures
67 */
68extern void	sfmmu_cache_flushcolor(int, pfn_t);
69
70/*
71 * Static routines
72 */
73static void	sfmmu_set_tlb(void);
74
75/*
76 * Global Data:
77 */
78caddr_t	textva, datava;
79tte_t	ktext_tte, kdata_tte;		/* ttes for kernel text and data */
80
81int	enable_bigktsb = 1;
82
83tte_t bigktsb_ttes[MAX_BIGKTSB_TTES];
84int bigktsb_nttes = 0;
85
86
87/*
88 * Controls the logic which enables the use of the
89 * QUAD_LDD_PHYS ASI for TSB accesses.
90 */
91int	ktsb_phys = 0;
92
93
94
95/*
96 * This routine remaps the kernel using large ttes
97 * All entries except locked ones will be removed from the tlb.
98 * It assumes that both the text and data segments reside in a separate
99 * 4mb virtual and physical contigous memory chunk.  This routine
100 * is only executed by the first cpu.  The remaining cpus execute
101 * sfmmu_mp_startup() instead.
102 * XXX It assumes that the start of the text segment is KERNELBASE.  It should
103 * actually be based on start.
104 */
105void
106sfmmu_remap_kernel(void)
107{
108	pfn_t	pfn;
109	uint_t	attr;
110	int	flags;
111
112	extern char end[];
113	extern struct as kas;
114
115	textva = (caddr_t)(KERNELBASE & MMU_PAGEMASK4M);
116	pfn = va_to_pfn(textva);
117	if (pfn == PFN_INVALID)
118		prom_panic("can't find kernel text pfn");
119	pfn &= TTE_PFNMASK(TTE4M);
120
121	attr = PROC_TEXT | HAT_NOSYNC;
122	flags = HAT_LOAD_LOCK | SFMMU_NO_TSBLOAD;
123	sfmmu_memtte(&ktext_tte, pfn, attr, TTE4M);
124	/*
125	 * We set the lock bit in the tte to lock the translation in
126	 * the tlb. Note we cannot lock Panther 32M/256M pages into the tlb.
127	 * This note is here to make sure that no one tries to remap the
128	 * kernel using 32M or 256M tte's on Panther cpus.
129	 */
130	TTE_SET_LOCKED(&ktext_tte);
131	sfmmu_tteload(kas.a_hat, &ktext_tte, textva, NULL, flags);
132
133	datava = (caddr_t)((uintptr_t)end & MMU_PAGEMASK4M);
134	pfn = va_to_pfn(datava);
135	if (pfn == PFN_INVALID)
136		prom_panic("can't find kernel data pfn");
137	pfn &= TTE_PFNMASK(TTE4M);
138
139	attr = PROC_DATA | HAT_NOSYNC;
140	sfmmu_memtte(&kdata_tte, pfn, attr, TTE4M);
141	/*
142	 * We set the lock bit in the tte to lock the translation in
143	 * the tlb.  We also set the mod bit to avoid taking dirty bit
144	 * traps on kernel data.
145	 */
146	TTE_SET_LOCKED(&kdata_tte);
147	TTE_SET_LOFLAGS(&kdata_tte, 0, TTE_HWWR_INT);
148	sfmmu_tteload(kas.a_hat, &kdata_tte, datava,
149	    (struct page *)NULL, flags);
150
151	/*
152	 * create bigktsb ttes if necessary.
153	 */
154	if (enable_bigktsb) {
155		int i = 0;
156		caddr_t va = ktsb_base;
157		size_t tsbsz = ktsb_sz;
158		tte_t tte;
159
160		ASSERT(va >= datava + MMU_PAGESIZE4M);
161		ASSERT(tsbsz >= MMU_PAGESIZE4M);
162		ASSERT(IS_P2ALIGNED(tsbsz, tsbsz));
163		ASSERT(IS_P2ALIGNED(va, tsbsz));
164		attr = PROC_DATA | HAT_NOSYNC;
165		while (tsbsz != 0) {
166			ASSERT(i < MAX_BIGKTSB_TTES);
167			pfn = va_to_pfn(va);
168			ASSERT(pfn != PFN_INVALID);
169			ASSERT((pfn & ~TTE_PFNMASK(TTE4M)) == 0);
170			sfmmu_memtte(&tte, pfn, attr, TTE4M);
171			ASSERT(TTE_IS_MOD(&tte));
172			/*
173			 * No need to lock if we use physical addresses.
174			 * Since we invalidate the kernel TSB using virtual
175			 * addresses, it's an optimization to load them now
176			 * so that we won't have to load them later.
177			 */
178			if (!ktsb_phys) {
179				TTE_SET_LOCKED(&tte);
180			}
181			sfmmu_tteload(kas.a_hat, &tte, va, NULL, flags);
182			bigktsb_ttes[i] = tte;
183			va += MMU_PAGESIZE4M;
184			tsbsz -= MMU_PAGESIZE4M;
185			i++;
186		}
187		bigktsb_nttes = i;
188	}
189
190	sfmmu_set_tlb();
191}
192
193#ifndef UTSB_PHYS
194/*
195 * Unmap all references to user TSBs from the TLB of the current processor.
196 */
197static void
198sfmmu_clear_user_tsbs()
199{
200	caddr_t va;
201	caddr_t end_va;
202
203	/* Demap all pages in the VA range for the first user TSB */
204	va = utsb_vabase;
205	end_va = va + tsb_slab_size;
206	while (va < end_va) {
207		vtag_flushpage(va, (uint64_t)ksfmmup);
208		va += MMU_PAGESIZE;
209	}
210
211	/* Demap all pages in the VA range for the second user TSB */
212	va = utsb4m_vabase;
213	end_va = va + tsb_slab_size;
214	while (va < end_va) {
215		vtag_flushpage(va, (uint64_t)ksfmmup);
216		va += MMU_PAGESIZE;
217	}
218}
219#endif /* UTSB_PHYS */
220
221/*
222 * Setup the kernel's locked tte's
223 */
224void
225sfmmu_set_tlb(void)
226{
227	uint_t index;
228	struct cpu_node *cpunode;
229
230	cpunode = &cpunodes[getprocessorid()];
231	index = cpunode->itlb_size;
232
233	/*
234	 * NOTE: the prom will do an explicit unmap of the VAs from the TLBs
235	 * in the following functions before loading the new value into the
236	 * TLB.  Thus if there was an entry already in the TLB at a different
237	 * location, it will get unmapped before we load the entry at the
238	 * specified location.
239	 */
240	(void) prom_itlb_load(index - 1, *(uint64_t *)&ktext_tte, textva);
241	index = cpunode->dtlb_size;
242	(void) prom_dtlb_load(index - 1, *(uint64_t *)&kdata_tte, datava);
243	(void) prom_dtlb_load(index - 2, *(uint64_t *)&ktext_tte, textva);
244	index -= 3;
245
246#ifndef UTSB_PHYS
247	utsb_dtlb_ttenum = index--;
248	utsb4m_dtlb_ttenum = index--;
249	sfmmu_clear_user_tsbs();
250#endif /* UTSB_PHYS */
251
252	if (!ktsb_phys && enable_bigktsb) {
253		int i;
254		caddr_t va = ktsb_base;
255		uint64_t tte;
256
257		ASSERT(bigktsb_nttes <= MAX_BIGKTSB_TTES);
258		for (i = 0; i < bigktsb_nttes; i++) {
259			tte = *(uint64_t *)&bigktsb_ttes[i];
260			(void) prom_dtlb_load(index, tte, va);
261			va += MMU_PAGESIZE4M;
262			index--;
263		}
264	}
265
266	dtlb_resv_ttenum = index + 1;
267}
268
269/*
270 * This routine is executed by all other cpus except the first one
271 * at initialization time.  It is responsible for taking over the
272 * mmu from the prom.  We follow these steps.
273 * Lock the kernel's ttes in the TLB
274 * Initialize the tsb hardware registers
275 * Take over the trap table
276 * Flush the prom's locked entries from the TLB
277 */
278void
279sfmmu_mp_startup(void)
280{
281	sfmmu_set_tlb();
282	setwstate(WSTATE_KERN);
283	prom_set_traptable(&trap_table);
284	install_va_to_tte();
285}
286
287void
288kdi_tlb_page_lock(caddr_t va, int do_dtlb)
289{
290	tte_t tte;
291	pfn_t pfn = va_to_pfn(va);
292
293	tte.tte_inthi = TTE_VALID_INT | TTE_SZ_INT(TTE8K) | TTE_PFN_INTHI(pfn);
294	tte.tte_intlo = TTE_PFN_INTLO(pfn) | TTE_LCK_INT | TTE_CP_INT |
295	    TTE_PRIV_INT | TTE_HWWR_INT;
296
297	vtag_flushpage(va, (uint64_t)ksfmmup);
298
299	sfmmu_itlb_ld_kva(va, &tte);
300	if (do_dtlb)
301		sfmmu_dtlb_ld_kva(va, &tte);
302}
303
304/*ARGSUSED*/
305void
306kdi_tlb_page_unlock(caddr_t va, int do_dtlb)
307{
308	vtag_flushpage(va, (uint64_t)ksfmmup);
309}
310
311/* clear user TSB information (applicable to hardware TSB walkers) */
312void
313sfmmu_clear_utsbinfo()
314{
315}
316
317/*ARGSUSED*/
318void
319sfmmu_setup_tsbinfo(sfmmu_t *sfmmup)
320{
321}
322
323/*
324 * Invalidate a TSB.  If floating point is enabled we use
325 * a fast block-store routine, otherwise we use the old method
326 * of walking the TSB setting each tag to TSBTAG_INVALID.
327 */
328void
329sfmmu_inv_tsb(caddr_t tsb_base, uint_t tsb_bytes)
330{
331	extern void sfmmu_inv_tsb_fast(caddr_t, uint_t);
332	struct tsbe *tsbaddr;
333
334	/* CONSTCOND */
335	if (fpu_exists) {
336		sfmmu_inv_tsb_fast(tsb_base, tsb_bytes);
337		return;
338	}
339
340	for (tsbaddr = (struct tsbe *)tsb_base;
341	    (uintptr_t)tsbaddr < (uintptr_t)(tsb_base + tsb_bytes);
342	    tsbaddr++) {
343		tsbaddr->tte_tag.tag_inthi = TSBTAG_INVALID;
344	}
345
346	if (ktsb_phys && tsb_base == ktsb_base)
347		dcache_flushall();
348}
349
350/*
351 * Completely flush the D-cache on all cpus.
352 */
353void
354sfmmu_cache_flushall()
355{
356	int i;
357
358	for (i = 0; i < CACHE_NUM_COLOR; i++)
359		sfmmu_cache_flushcolor(i, 0);
360}
361