kvm_i386.c revision 1602
11602Srgrimes/*-
21602Srgrimes * Copyright (c) 1989, 1992, 1993
31602Srgrimes *	The Regents of the University of California.  All rights reserved.
41602Srgrimes *
51602Srgrimes * This code is derived from software developed by the Computer Systems
61602Srgrimes * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
71602Srgrimes * BG 91-66 and contributed to Berkeley.
81602Srgrimes *
91602Srgrimes * Redistribution and use in source and binary forms, with or without
101602Srgrimes * modification, are permitted provided that the following conditions
111602Srgrimes * are met:
121602Srgrimes * 1. Redistributions of source code must retain the above copyright
131602Srgrimes *    notice, this list of conditions and the following disclaimer.
141602Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
151602Srgrimes *    notice, this list of conditions and the following disclaimer in the
161602Srgrimes *    documentation and/or other materials provided with the distribution.
171602Srgrimes * 3. All advertising materials mentioning features or use of this software
181602Srgrimes *    must display the following acknowledgement:
191602Srgrimes *	This product includes software developed by the University of
201602Srgrimes *	California, Berkeley and its contributors.
211602Srgrimes * 4. Neither the name of the University nor the names of its contributors
221602Srgrimes *    may be used to endorse or promote products derived from this software
231602Srgrimes *    without specific prior written permission.
241602Srgrimes *
251602Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
261602Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
271602Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
281602Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
291602Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
301602Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
311602Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
321602Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
331602Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
341602Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
351602Srgrimes * SUCH DAMAGE.
361602Srgrimes */
371602Srgrimes
381602Srgrimes#if defined(LIBC_SCCS) && !defined(lint)
391602Srgrimesstatic char sccsid[] = "@(#)kvm_hp300.c	8.1 (Berkeley) 6/4/93";
401602Srgrimes#endif /* LIBC_SCCS and not lint */
411602Srgrimes
421602Srgrimes/*
431602Srgrimes * Hp300 machine dependent routines for kvm.  Hopefully, the forthcoming
441602Srgrimes * vm code will one day obsolete this module.
451602Srgrimes */
461602Srgrimes
471602Srgrimes#include <sys/param.h>
481602Srgrimes#include <sys/user.h>
491602Srgrimes#include <sys/proc.h>
501602Srgrimes#include <sys/stat.h>
511602Srgrimes#include <unistd.h>
521602Srgrimes#include <nlist.h>
531602Srgrimes#include <kvm.h>
541602Srgrimes
551602Srgrimes#include <vm/vm.h>
561602Srgrimes#include <vm/vm_param.h>
571602Srgrimes
581602Srgrimes#include <limits.h>
591602Srgrimes#include <db.h>
601602Srgrimes
611602Srgrimes#include "kvm_private.h"
621602Srgrimes
631602Srgrimes#if defined(hp300)
641602Srgrimes#include <hp300/hp300/pte.h>
651602Srgrimes#endif
661602Srgrimes
671602Srgrimes#if defined(luna68k)
681602Srgrimes#include <luna68k/luna68k/pte.h>
691602Srgrimes#endif
701602Srgrimes
711602Srgrimes#ifndef btop
721602Srgrimes#define	btop(x)		(((unsigned)(x)) >> PGSHIFT)	/* XXX */
731602Srgrimes#define	ptob(x)		((caddr_t)((x) << PGSHIFT))	/* XXX */
741602Srgrimes#endif
751602Srgrimes
761602Srgrimesstruct vmstate {
771602Srgrimes	u_long lowram;
781602Srgrimes	int mmutype;
791602Srgrimes	struct ste *Sysseg;
801602Srgrimes};
811602Srgrimes
821602Srgrimes#define KREAD(kd, addr, p)\
831602Srgrimes	(kvm_read(kd, addr, (char *)(p), sizeof(*(p))) != sizeof(*(p)))
841602Srgrimes
851602Srgrimesvoid
861602Srgrimes_kvm_freevtop(kd)
871602Srgrimes	kvm_t *kd;
881602Srgrimes{
891602Srgrimes	if (kd->vmst != 0)
901602Srgrimes		free(kd->vmst);
911602Srgrimes}
921602Srgrimes
931602Srgrimesint
941602Srgrimes_kvm_initvtop(kd)
951602Srgrimes	kvm_t *kd;
961602Srgrimes{
971602Srgrimes	struct vmstate *vm;
981602Srgrimes	struct nlist nlist[4];
991602Srgrimes
1001602Srgrimes	vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
1011602Srgrimes	if (vm == 0)
1021602Srgrimes		return (-1);
1031602Srgrimes	kd->vmst = vm;
1041602Srgrimes
1051602Srgrimes	nlist[0].n_name = "_lowram";
1061602Srgrimes	nlist[1].n_name = "_mmutype";
1071602Srgrimes	nlist[2].n_name = "_Sysseg";
1081602Srgrimes	nlist[3].n_name = 0;
1091602Srgrimes
1101602Srgrimes	if (kvm_nlist(kd, nlist) != 0) {
1111602Srgrimes		_kvm_err(kd, kd->program, "bad namelist");
1121602Srgrimes		return (-1);
1131602Srgrimes	}
1141602Srgrimes	vm->Sysseg = 0;
1151602Srgrimes	if (KREAD(kd, (u_long)nlist[0].n_value, &vm->lowram)) {
1161602Srgrimes		_kvm_err(kd, kd->program, "cannot read lowram");
1171602Srgrimes		return (-1);
1181602Srgrimes	}
1191602Srgrimes	if (KREAD(kd, (u_long)nlist[1].n_value, &vm->mmutype)) {
1201602Srgrimes		_kvm_err(kd, kd->program, "cannot read mmutype");
1211602Srgrimes		return (-1);
1221602Srgrimes	}
1231602Srgrimes	if (KREAD(kd, (u_long)nlist[2].n_value, &vm->Sysseg)) {
1241602Srgrimes		_kvm_err(kd, kd->program, "cannot read segment table");
1251602Srgrimes		return (-1);
1261602Srgrimes	}
1271602Srgrimes	return (0);
1281602Srgrimes}
1291602Srgrimes
1301602Srgrimesstatic int
1311602Srgrimes_kvm_vatop(kd, sta, va, pa)
1321602Srgrimes	kvm_t *kd;
1331602Srgrimes	struct ste *sta;
1341602Srgrimes	u_long va;
1351602Srgrimes	u_long *pa;
1361602Srgrimes{
1371602Srgrimes	register struct vmstate *vm;
1381602Srgrimes	register u_long lowram;
1391602Srgrimes	register u_long addr;
1401602Srgrimes	int p, ste, pte;
1411602Srgrimes	int offset;
1421602Srgrimes
1431602Srgrimes	if (ISALIVE(kd)) {
1441602Srgrimes		_kvm_err(kd, 0, "vatop called in live kernel!");
1451602Srgrimes		return((off_t)0);
1461602Srgrimes	}
1471602Srgrimes	vm = kd->vmst;
1481602Srgrimes	offset = va & PGOFSET;
1491602Srgrimes	/*
1501602Srgrimes	 * If we are initializing (kernel segment table pointer not yet set)
1511602Srgrimes	 * then return pa == va to avoid infinite recursion.
1521602Srgrimes	 */
1531602Srgrimes	if (vm->Sysseg == 0) {
1541602Srgrimes		*pa = va;
1551602Srgrimes		return (NBPG - offset);
1561602Srgrimes	}
1571602Srgrimes	lowram = vm->lowram;
1581602Srgrimes	if (vm->mmutype == -2) {
1591602Srgrimes		struct ste *sta2;
1601602Srgrimes
1611602Srgrimes		addr = (u_long)&sta[va >> SG4_SHIFT1];
1621602Srgrimes		/*
1631602Srgrimes		 * Can't use KREAD to read kernel segment table entries.
1641602Srgrimes		 * Fortunately it is 1-to-1 mapped so we don't have to.
1651602Srgrimes		 */
1661602Srgrimes		if (sta == vm->Sysseg) {
1671602Srgrimes			if (lseek(kd->pmfd, (off_t)addr, 0) == -1 ||
1681602Srgrimes			    read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
1691602Srgrimes				goto invalid;
1701602Srgrimes		} else if (KREAD(kd, addr, &ste))
1711602Srgrimes			goto invalid;
1721602Srgrimes		if ((ste & SG_V) == 0) {
1731602Srgrimes			_kvm_err(kd, 0, "invalid level 1 descriptor (%x)",
1741602Srgrimes				 ste);
1751602Srgrimes			return((off_t)0);
1761602Srgrimes		}
1771602Srgrimes		sta2 = (struct ste *)(ste & SG4_ADDR1);
1781602Srgrimes		addr = (u_long)&sta2[(va & SG4_MASK2) >> SG4_SHIFT2];
1791602Srgrimes		/*
1801602Srgrimes		 * Address from level 1 STE is a physical address,
1811602Srgrimes		 * so don't use kvm_read.
1821602Srgrimes		 */
1831602Srgrimes		if (lseek(kd->pmfd, (off_t)(addr - lowram), 0) == -1 ||
1841602Srgrimes		    read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
1851602Srgrimes			goto invalid;
1861602Srgrimes		if ((ste & SG_V) == 0) {
1871602Srgrimes			_kvm_err(kd, 0, "invalid level 2 descriptor (%x)",
1881602Srgrimes				 ste);
1891602Srgrimes			return((off_t)0);
1901602Srgrimes		}
1911602Srgrimes		sta2 = (struct ste *)(ste & SG4_ADDR2);
1921602Srgrimes		addr = (u_long)&sta2[(va & SG4_MASK3) >> SG4_SHIFT3];
1931602Srgrimes	} else {
1941602Srgrimes		addr = (u_long)&sta[va >> SEGSHIFT];
1951602Srgrimes		/*
1961602Srgrimes		 * Can't use KREAD to read kernel segment table entries.
1971602Srgrimes		 * Fortunately it is 1-to-1 mapped so we don't have to.
1981602Srgrimes		 */
1991602Srgrimes		if (sta == vm->Sysseg) {
2001602Srgrimes			if (lseek(kd->pmfd, (off_t)addr, 0) == -1 ||
2011602Srgrimes			    read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
2021602Srgrimes				goto invalid;
2031602Srgrimes		} else if (KREAD(kd, addr, &ste))
2041602Srgrimes			goto invalid;
2051602Srgrimes		if ((ste & SG_V) == 0) {
2061602Srgrimes			_kvm_err(kd, 0, "invalid segment (%x)", ste);
2071602Srgrimes			return((off_t)0);
2081602Srgrimes		}
2091602Srgrimes		p = btop(va & SG_PMASK);
2101602Srgrimes		addr = (ste & SG_FRAME) + (p * sizeof(struct pte));
2111602Srgrimes	}
2121602Srgrimes	/*
2131602Srgrimes	 * Address from STE is a physical address so don't use kvm_read.
2141602Srgrimes	 */
2151602Srgrimes	if (lseek(kd->pmfd, (off_t)(addr - lowram), 0) == -1 ||
2161602Srgrimes	    read(kd->pmfd, (char *)&pte, sizeof(pte)) < 0)
2171602Srgrimes		goto invalid;
2181602Srgrimes	addr = pte & PG_FRAME;
2191602Srgrimes	if (pte == PG_NV) {
2201602Srgrimes		_kvm_err(kd, 0, "page not valid");
2211602Srgrimes		return (0);
2221602Srgrimes	}
2231602Srgrimes	*pa = addr - lowram + offset;
2241602Srgrimes
2251602Srgrimes	return (NBPG - offset);
2261602Srgrimesinvalid:
2271602Srgrimes	_kvm_err(kd, 0, "invalid address (%x)", va);
2281602Srgrimes	return (0);
2291602Srgrimes}
2301602Srgrimes
2311602Srgrimesint
2321602Srgrimes_kvm_kvatop(kd, va, pa)
2331602Srgrimes	kvm_t *kd;
2341602Srgrimes	u_long va;
2351602Srgrimes	u_long *pa;
2361602Srgrimes{
2371602Srgrimes	return (_kvm_vatop(kd, (u_long)kd->vmst->Sysseg, va, pa));
2381602Srgrimes}
2391602Srgrimes
2401602Srgrimes/*
2411602Srgrimes * Translate a user virtual address to a physical address.
2421602Srgrimes */
2431602Srgrimesint
2441602Srgrimes_kvm_uvatop(kd, p, va, pa)
2451602Srgrimes	kvm_t *kd;
2461602Srgrimes	const struct proc *p;
2471602Srgrimes	u_long va;
2481602Srgrimes	u_long *pa;
2491602Srgrimes{
2501602Srgrimes	register struct vmspace *vms = p->p_vmspace;
2511602Srgrimes	int kva;
2521602Srgrimes
2531602Srgrimes	/*
2541602Srgrimes	 * If this is a live kernel we just look it up in the kernel
2551602Srgrimes	 * virtually allocated flat 4mb page table (i.e. let the kernel
2561602Srgrimes	 * do the table walk).  In this way, we avoid needing to know
2571602Srgrimes	 * the MMU type.
2581602Srgrimes	 */
2591602Srgrimes	if (ISALIVE(kd)) {
2601602Srgrimes		struct pte *ptab;
2611602Srgrimes		int pte, offset;
2621602Srgrimes
2631602Srgrimes		kva = (int)&vms->vm_pmap.pm_ptab;
2641602Srgrimes		if (KREAD(kd, kva, &ptab)) {
2651602Srgrimes			_kvm_err(kd, 0, "invalid address (%x)", va);
2661602Srgrimes			return (0);
2671602Srgrimes		}
2681602Srgrimes		kva = (int)&ptab[btop(va)];
2691602Srgrimes		if (KREAD(kd, kva, &pte) || (pte & PG_V) == 0) {
2701602Srgrimes			_kvm_err(kd, 0, "invalid address (%x)", va);
2711602Srgrimes			return (0);
2721602Srgrimes		}
2731602Srgrimes		offset = va & PGOFSET;
2741602Srgrimes		*pa = (pte & PG_FRAME) | offset;
2751602Srgrimes		return (NBPG - offset);
2761602Srgrimes	}
2771602Srgrimes	/*
2781602Srgrimes	 * Otherwise, we just walk the table ourself.
2791602Srgrimes	 */
2801602Srgrimes	kva = (int)&vms->vm_pmap.pm_stab;
2811602Srgrimes	if (KREAD(kd, kva, &kva)) {
2821602Srgrimes		_kvm_err(kd, 0, "invalid address (%x)", va);
2831602Srgrimes		return (0);
2841602Srgrimes	}
2851602Srgrimes	return (_kvm_vatop(kd, kva, va, pa));
2861602Srgrimes}
287