1/* $FreeBSD$ */
2/*	$NetBSD: kvm_alpha.c,v 1.7.2.1 1997/11/02 20:34:26 mellon Exp $	*/
3
4/*
5 * Copyright (c) 1994, 1995 Carnegie-Mellon University.
6 * All rights reserved.
7 *
8 * Author: Chris G. Demetriou
9 *
10 * Permission to use, copy, modify and distribute this software and
11 * its documentation is hereby granted, provided that both the copyright
12 * notice and this permission notice appear in all copies of the
13 * software, derivative works or modified versions, and any portions
14 * thereof, and that both notices appear in supporting documentation.
15 *
16 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
17 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
18 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
19 *
20 * Carnegie Mellon requests users of this software to return to
21 *
22 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
23 *  School of Computer Science
24 *  Carnegie Mellon University
25 *  Pittsburgh PA 15213-3890
26 *
27 * any improvements or extensions that they make and grant Carnegie the
28 * rights to redistribute these changes.
29 */
30
31#include <sys/types.h>
32#include <sys/elf64.h>
33#include <sys/mman.h>
34
35#include <machine/atomic.h>
36#include <machine/bootinfo.h>
37#include <machine/pte.h>
38
39#include <kvm.h>
40#include <limits.h>
41#include <stdlib.h>
42#include <unistd.h>
43
44#include "kvm_private.h"
45
46#define	REGION_BASE(n)		(((uint64_t)(n)) << 61)
47#define	REGION_ADDR(x)		((x) & ((1LL<<61)-1LL))
48
49#define	NKPTEPG(ps)		((ps) / sizeof(struct ia64_lpte))
50#define	NKPTEDIR(ps)		((ps) >> 3)
51#define	KPTE_PTE_INDEX(va,ps)	(((va)/(ps)) % NKPTEPG(ps))
52#define	KPTE_DIR0_INDEX(va,ps)	((((va)/(ps)) / NKPTEPG(ps)) / NKPTEDIR(ps))
53#define	KPTE_DIR1_INDEX(va,ps)	((((va)/(ps)) / NKPTEPG(ps)) % NKPTEDIR(ps))
54
55#define	PBVM_BASE		0x9ffc000000000000UL
56#define	PBVM_PGSZ		(64 * 1024)
57
58struct vmstate {
59	void	*mmapbase;
60	size_t	mmapsize;
61	size_t	pagesize;
62	u_long	kptdir;
63	u_long	*pbvm_pgtbl;
64	u_int	pbvm_pgtblsz;
65};
66
67/*
68 * Map the ELF headers into the process' address space. We do this in two
69 * steps: first the ELF header itself and using that information the whole
70 * set of headers.
71 */
72static int
73_kvm_maphdrs(kvm_t *kd, size_t sz)
74{
75	struct vmstate *vm = kd->vmst;
76
77	/* munmap() previous mmap(). */
78	if (vm->mmapbase != NULL) {
79		munmap(vm->mmapbase, vm->mmapsize);
80		vm->mmapbase = NULL;
81	}
82
83	vm->mmapsize = sz;
84	vm->mmapbase = mmap(NULL, sz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0);
85	if (vm->mmapbase == MAP_FAILED) {
86		_kvm_err(kd, kd->program, "cannot mmap corefile");
87		return (-1);
88	}
89
90	return (0);
91}
92
93/*
94 * Translate a physical memory address to a file-offset in the crash-dump.
95 */
96static size_t
97_kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs, size_t pgsz)
98{
99	Elf64_Ehdr *e = kd->vmst->mmapbase;
100	Elf64_Phdr *p = (Elf64_Phdr*)((char*)e + e->e_phoff);
101	int n = e->e_phnum;
102
103	if (pa != REGION_ADDR(pa)) {
104		_kvm_err(kd, kd->program, "internal error");
105		return (0);
106	}
107
108	while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz))
109		p++, n--;
110	if (n == 0)
111		return (0);
112
113	*ofs = (pa - p->p_paddr) + p->p_offset;
114	if (pgsz == 0)
115		return (p->p_memsz - (pa - p->p_paddr));
116	return (pgsz - ((size_t)pa & (pgsz - 1)));
117}
118
119static ssize_t
120_kvm_read_phys(kvm_t *kd, uint64_t pa, void *buf, size_t bufsz)
121{
122	off_t ofs;
123	size_t sz;
124
125	sz = _kvm_pa2off(kd, pa, &ofs, 0);
126	if (sz < bufsz)
127		return ((ssize_t)sz);
128
129	if (lseek(kd->pmfd, ofs, 0) == -1)
130		return (-1);
131	return (read(kd->pmfd, buf, bufsz));
132}
133
134void
135_kvm_freevtop(kvm_t *kd)
136{
137	struct vmstate *vm = kd->vmst;
138
139	if (vm->pbvm_pgtbl != NULL)
140		free(vm->pbvm_pgtbl);
141	if (vm->mmapbase != NULL)
142		munmap(vm->mmapbase, vm->mmapsize);
143	free(vm);
144	kd->vmst = NULL;
145}
146
147int
148_kvm_initvtop(kvm_t *kd)
149{
150	struct bootinfo bi;
151	struct nlist nl[2];
152	uint64_t va;
153	Elf64_Ehdr *ehdr;
154	size_t hdrsz;
155	ssize_t sz;
156
157	kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst));
158	if (kd->vmst == NULL) {
159		_kvm_err(kd, kd->program, "cannot allocate vm");
160		return (-1);
161	}
162
163	kd->vmst->pagesize = getpagesize();
164
165	if (_kvm_maphdrs(kd, sizeof(Elf64_Ehdr)) == -1)
166		return (-1);
167
168	ehdr = kd->vmst->mmapbase;
169	hdrsz = ehdr->e_phoff + ehdr->e_phentsize * ehdr->e_phnum;
170	if (_kvm_maphdrs(kd, hdrsz) == -1)
171		return (-1);
172
173	/*
174	 * Load the PBVM page table. We need this to resolve PBVM addresses.
175	 * The PBVM page table is obtained from the bootinfo structure, of
176	 * which the physical address is given to us in e_entry. If e_entry
177	 * is 0, then this is assumed to be a pre-PBVM kernel.
178	 */
179	if (ehdr->e_entry != 0) {
180		sz = _kvm_read_phys(kd, ehdr->e_entry, &bi, sizeof(bi));
181		if (sz != sizeof(bi)) {
182			_kvm_err(kd, kd->program,
183			    "cannot read bootinfo from PA %#lx", ehdr->e_entry);
184			return (-1);
185		}
186		if (bi.bi_magic != BOOTINFO_MAGIC) {
187			_kvm_err(kd, kd->program, "invalid bootinfo");
188			return (-1);
189		}
190		kd->vmst->pbvm_pgtbl = _kvm_malloc(kd, bi.bi_pbvm_pgtblsz);
191		if (kd->vmst->pbvm_pgtbl == NULL) {
192			_kvm_err(kd, kd->program, "cannot allocate page table");
193			return (-1);
194		}
195		kd->vmst->pbvm_pgtblsz = bi.bi_pbvm_pgtblsz;
196		sz = _kvm_read_phys(kd, bi.bi_pbvm_pgtbl, kd->vmst->pbvm_pgtbl,
197		    bi.bi_pbvm_pgtblsz);
198		if (sz != bi.bi_pbvm_pgtblsz) {
199			_kvm_err(kd, kd->program,
200			    "cannot read page table from PA %#lx",
201			    bi.bi_pbvm_pgtbl);
202			return (-1);
203		}
204	} else {
205		kd->vmst->pbvm_pgtbl = NULL;
206		kd->vmst->pbvm_pgtblsz = 0;
207	}
208
209	/*
210	 * At this point we've got enough information to use kvm_read() for
211	 * direct mapped (ie region 6 and region 7) address, such as symbol
212	 * addresses/values.
213	 */
214
215	nl[0].n_name = "ia64_kptdir";
216	nl[1].n_name = 0;
217
218	if (kvm_nlist(kd, nl) != 0) {
219		_kvm_err(kd, kd->program, "bad namelist");
220		return (-1);
221	}
222
223	if (kvm_read(kd, (nl[0].n_value), &va, sizeof(va)) != sizeof(va)) {
224		_kvm_err(kd, kd->program, "cannot read kptdir");
225		return (-1);
226	}
227
228	if (va < REGION_BASE(6)) {
229		_kvm_err(kd, kd->program, "kptdir is itself virtual");
230		return (-1);
231	}
232
233	kd->vmst->kptdir = va;
234	return (0);
235}
236
237int
238_kvm_kvatop(kvm_t *kd, u_long va, off_t *ofs)
239{
240	struct ia64_lpte pte;
241	uint64_t pa, pgaddr, pt0addr, pt1addr;
242	size_t pgno, pgsz, pt0no, pt1no;
243
244	if (va >= REGION_BASE(6)) {
245		/* Regions 6 and 7: direct mapped. */
246		pa = REGION_ADDR(va);
247		return (_kvm_pa2off(kd, pa, ofs, 0));
248	} else if (va >= REGION_BASE(5)) {
249		/* Region 5: Kernel Virtual Memory. */
250		va = REGION_ADDR(va);
251		pgsz = kd->vmst->pagesize;
252		pt0no = KPTE_DIR0_INDEX(va, pgsz);
253		pt1no = KPTE_DIR1_INDEX(va, pgsz);
254		pgno = KPTE_PTE_INDEX(va, pgsz);
255		if (pt0no >= NKPTEDIR(pgsz))
256			goto fail;
257		pt0addr = kd->vmst->kptdir + (pt0no << 3);
258		if (kvm_read(kd, pt0addr, &pt1addr, 8) != 8)
259			goto fail;
260		if (pt1addr == 0)
261			goto fail;
262		pt1addr += pt1no << 3;
263		if (kvm_read(kd, pt1addr, &pgaddr, 8) != 8)
264			goto fail;
265		if (pgaddr == 0)
266			goto fail;
267		pgaddr += pgno * sizeof(pte);
268		if (kvm_read(kd, pgaddr, &pte, sizeof(pte)) != sizeof(pte))
269			goto fail;
270		if (!(pte.pte & PTE_PRESENT))
271			goto fail;
272		pa = (pte.pte & PTE_PPN_MASK) + (va & (pgsz - 1));
273		return (_kvm_pa2off(kd, pa, ofs, pgsz));
274	} else if (va >= PBVM_BASE) {
275		/* Region 4: Pre-Boot Virtual Memory (PBVM). */
276		va -= PBVM_BASE;
277		pgsz = PBVM_PGSZ;
278		pt0no = va / pgsz;
279		if (pt0no >= (kd->vmst->pbvm_pgtblsz >> 3))
280			goto fail;
281		pt0addr = kd->vmst->pbvm_pgtbl[pt0no];
282		if (!(pt0addr & PTE_PRESENT))
283			goto fail;
284		pa = (pt0addr & PTE_PPN_MASK) + va % pgsz;
285		return (_kvm_pa2off(kd, pa, ofs, pgsz));
286	}
287
288 fail:
289	_kvm_err(kd, kd->program, "invalid kernel virtual address");
290	*ofs = ~0UL;
291	return (0);
292}
293