1/* $NetBSD: kvm_arm.c,v 1.5 2010/09/19 02:07:00 jym Exp $	 */
2
3/*-
4 * Copyright (C) 1996 Wolfgang Solfrank.
5 * Copyright (C) 1996 TooLs GmbH.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by TooLs GmbH.
19 * 4. The name of TooLs GmbH may not be used to endorse or promote products
20 *    derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 *	from: kvm_powerpc.c,v 1.3 1997/09/19 04:00:23 thorpej Exp
34 */
35
36/*
37 * arm32 machine dependent routines for kvm.
38 */
39
40#include <sys/cdefs.h>
41#if defined(LIBC_SCCS) && !defined(lint)
42__RCSID("$NetBSD: kvm_arm.c,v 1.5 2010/09/19 02:07:00 jym Exp $");
43#endif				/* LIBC_SCCS and not lint */
44
45#include <sys/param.h>
46#include <sys/exec.h>
47#include <sys/kcore.h>
48#include <sys/types.h>
49
50#include <arm/kcore.h>
51#include <arm/arm32/pte.h>
52
53#include <stdlib.h>
54#include <db.h>
55#include <limits.h>
56#include <kvm.h>
57
58#include <unistd.h>
59
60#include "kvm_private.h"
61
62void
63_kvm_freevtop(kvm_t * kd)
64{
65	if (kd->vmst != 0)
66		free(kd->vmst);
67}
68
69int
70_kvm_initvtop(kvm_t * kd)
71{
72	return 0;
73}
74
75int
76_kvm_kvatop(kvm_t * kd, vaddr_t va, paddr_t *pa)
77{
78	cpu_kcore_hdr_t *cpu_kh;
79	pd_entry_t      pde;
80	pt_entry_t      pte;
81	paddr_t		pde_pa, pte_pa;
82
83	if (ISALIVE(kd)) {
84		_kvm_err(kd, 0, "vatop called in live kernel!");
85		return (0);
86	}
87	cpu_kh = kd->cpu_data;
88
89	if (cpu_kh->version != 1) {
90		_kvm_err(kd, 0, "unsupported kcore structure version");
91		return 0;
92	}
93	if (cpu_kh->flags != 0) {
94		_kvm_err(kd, 0, "kcore flags not supported");
95		return 0;
96	}
97	/*
98	 * work out which L1 table we need
99	 */
100	if (va >= (cpu_kh->UserL1TableSize << 17))
101		pde_pa = cpu_kh->PAKernelL1Table;
102	else
103		pde_pa = cpu_kh->PAUserL1Table;
104
105	/*
106	 * work out the offset into the L1 Table
107	 */
108	pde_pa += ((va >> 20) * sizeof(pd_entry_t));
109
110	if (_kvm_pread(kd, kd->pmfd, (void *) &pde, sizeof(pd_entry_t),
111		  _kvm_pa2off(kd, pde_pa)) != sizeof(pd_entry_t)) {
112		_kvm_syserr(kd, 0, "could not read L1 entry");
113		return (0);
114	}
115	/*
116	 * next work out what kind of record it is
117	 */
118	switch (pde & L1_TYPE_MASK) {
119	case L1_TYPE_S:
120		*pa = (pde & L1_S_FRAME) | (va & L1_S_OFFSET);
121		return L1_S_SIZE - (va & L1_S_OFFSET);
122	case L1_TYPE_C:
123		pte_pa = (pde & L1_C_ADDR_MASK)
124			| ((va & 0xff000) >> 10);
125		break;
126	case L1_TYPE_F:
127		pte_pa = (pde & L1_S_ADDR_MASK)
128			| ((va & 0xffc00) >> 8);
129		break;
130	default:
131		_kvm_syserr(kd, 0, "L1 entry is invalid");
132		return (0);
133	}
134
135	/*
136	 * locate the pte and load it
137	 */
138	if (_kvm_pread(kd, kd->pmfd, (void *) &pte, sizeof(pt_entry_t),
139		  _kvm_pa2off(kd, pte_pa)) != sizeof(pt_entry_t)) {
140		_kvm_syserr(kd, 0, "could not read L2 entry");
141		return (0);
142	}
143	switch (pte & L2_TYPE_MASK) {
144	case L2_TYPE_L:
145		*pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
146		return (L2_L_SIZE - (va & L2_L_OFFSET));
147	case L2_TYPE_S:
148		*pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
149		return (L2_S_SIZE - (va & L2_S_OFFSET));
150	case L2_TYPE_T:
151		*pa = (pte & L2_T_FRAME) | (va & L2_T_OFFSET);
152		return (L2_T_SIZE - (va & L2_T_OFFSET));
153	default:
154		_kvm_syserr(kd, 0, "L2 entry is invalid");
155		return (0);
156	}
157
158	_kvm_err(kd, 0, "vatop not yet implemented!");
159	return 0;
160}
161
162off_t
163_kvm_pa2off(kvm_t * kd, u_long pa)
164{
165	cpu_kcore_hdr_t *cpu_kh;
166	phys_ram_seg_t *ramsegs;
167	off_t           off;
168	int             i;
169
170	cpu_kh = kd->cpu_data;
171	ramsegs = (void *) ((char *) (void *) cpu_kh + cpu_kh->omemsegs);
172
173	off = 0;
174	for (i = 0; i < cpu_kh->nmemsegs; i++) {
175		if (pa >= ramsegs[i].start &&
176		    (pa - ramsegs[i].start) < ramsegs[i].size) {
177			off += (pa - ramsegs[i].start);
178			break;
179		}
180		off += ramsegs[i].size;
181	}
182	return (kd->dump_off + off);
183}
184
185/*
186 * Machine-dependent initialization for ALL open kvm descriptors,
187 * not just those for a kernel crash dump.  Some architectures
188 * have to deal with these NOT being constants!  (i.e. arm)
189 */
190int
191_kvm_mdopen(kvm_t * kd)
192{
193	uintptr_t       max_uva;
194	extern struct ps_strings *__ps_strings;
195
196#if 0				/* XXX - These vary across arm machines... */
197	kd->usrstack = USRSTACK;
198	kd->min_uva = VM_MIN_ADDRESS;
199	kd->max_uva = VM_MAXUSER_ADDRESS;
200#endif
201	/* This is somewhat hack-ish, but it works. */
202	max_uva = (uintptr_t) (__ps_strings + 1);
203	kd->usrstack = max_uva;
204	kd->max_uva = max_uva;
205	kd->min_uva = 0;
206
207	return (0);
208}
209