kvm.c revision 316071
1/*-
2 * Copyright (c) 1989, 1992, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software developed by the Computer Systems
6 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
7 * BG 91-66 and contributed to Berkeley.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: stable/11/lib/libkvm/kvm.c 316071 2017-03-28 06:03:43Z ngie $");
36
37#if defined(LIBC_SCCS) && !defined(lint)
38#if 0
39static char sccsid[] = "@(#)kvm.c	8.2 (Berkeley) 2/13/94";
40#endif
41#endif /* LIBC_SCCS and not lint */
42
43#include <sys/param.h>
44#include <sys/fnv_hash.h>
45
46#define	_WANT_VNET
47
48#include <sys/user.h>
49#include <sys/linker.h>
50#include <sys/pcpu.h>
51#include <sys/stat.h>
52
53#include <net/vnet.h>
54
55#include <fcntl.h>
56#include <kvm.h>
57#include <limits.h>
58#include <paths.h>
59#include <stdint.h>
60#include <stdio.h>
61#include <stdlib.h>
62#include <string.h>
63#include <unistd.h>
64
65#include "kvm_private.h"
66
67SET_DECLARE(kvm_arch, struct kvm_arch);
68
69static char _kd_is_null[] = "";
70
71/* from src/lib/libc/gen/nlist.c */
72int __fdnlist(int, struct nlist *);
73
74static int
75kvm_fdnlist(kvm_t *kd, struct kvm_nlist *list)
76{
77	kvaddr_t addr;
78	int error, nfail;
79
80	if (kd->resolve_symbol == NULL) {
81		struct nlist *nl;
82		int count, i;
83
84		for (count = 0; list[count].n_name != NULL &&
85		     list[count].n_name[0] != '\0'; count++)
86			;
87		nl = calloc(count + 1, sizeof(*nl));
88		for (i = 0; i < count; i++)
89			nl[i].n_name = list[i].n_name;
90		nfail = __fdnlist(kd->nlfd, nl);
91		for (i = 0; i < count; i++) {
92			list[i].n_type = nl[i].n_type;
93			list[i].n_value = nl[i].n_value;
94		}
95		free(nl);
96		return (nfail);
97	}
98
99	nfail = 0;
100	while (list->n_name != NULL && list->n_name[0] != '\0') {
101		error = kd->resolve_symbol(list->n_name, &addr);
102		if (error != 0) {
103			nfail++;
104			list->n_value = 0;
105			list->n_type = 0;
106		} else {
107			list->n_value = addr;
108			list->n_type = N_DATA | N_EXT;
109		}
110		list++;
111	}
112	return (nfail);
113}
114
115char *
116kvm_geterr(kvm_t *kd)
117{
118
119	if (kd == NULL)
120		return (_kd_is_null);
121	return (kd->errbuf);
122}
123
124#include <stdarg.h>
125
126/*
127 * Report an error using printf style arguments.  "program" is kd->program
128 * on hard errors, and 0 on soft errors, so that under sun error emulation,
129 * only hard errors are printed out (otherwise, programs like gdb will
130 * generate tons of error messages when trying to access bogus pointers).
131 */
132void
133_kvm_err(kvm_t *kd, const char *program, const char *fmt, ...)
134{
135	va_list ap;
136
137	va_start(ap, fmt);
138	if (program != NULL) {
139		(void)fprintf(stderr, "%s: ", program);
140		(void)vfprintf(stderr, fmt, ap);
141		(void)fputc('\n', stderr);
142	} else
143		(void)vsnprintf(kd->errbuf,
144		    sizeof(kd->errbuf), fmt, ap);
145
146	va_end(ap);
147}
148
149void
150_kvm_syserr(kvm_t *kd, const char *program, const char *fmt, ...)
151{
152	va_list ap;
153	int n;
154
155	va_start(ap, fmt);
156	if (program != NULL) {
157		(void)fprintf(stderr, "%s: ", program);
158		(void)vfprintf(stderr, fmt, ap);
159		(void)fprintf(stderr, ": %s\n", strerror(errno));
160	} else {
161		char *cp = kd->errbuf;
162
163		(void)vsnprintf(cp, sizeof(kd->errbuf), fmt, ap);
164		n = strlen(cp);
165		(void)snprintf(&cp[n], sizeof(kd->errbuf) - n, ": %s",
166		    strerror(errno));
167	}
168	va_end(ap);
169}
170
171void *
172_kvm_malloc(kvm_t *kd, size_t n)
173{
174	void *p;
175
176	if ((p = calloc(n, sizeof(char))) == NULL)
177		_kvm_err(kd, kd->program, "can't allocate %zu bytes: %s",
178			 n, strerror(errno));
179	return (p);
180}
181
182static int
183_kvm_read_kernel_ehdr(kvm_t *kd)
184{
185	Elf *elf;
186
187	if (elf_version(EV_CURRENT) == EV_NONE) {
188		_kvm_err(kd, kd->program, "Unsupported libelf");
189		return (-1);
190	}
191	elf = elf_begin(kd->nlfd, ELF_C_READ, NULL);
192	if (elf == NULL) {
193		_kvm_err(kd, kd->program, "%s", elf_errmsg(0));
194		return (-1);
195	}
196	if (elf_kind(elf) != ELF_K_ELF) {
197		_kvm_err(kd, kd->program, "kernel is not an ELF file");
198		return (-1);
199	}
200	if (gelf_getehdr(elf, &kd->nlehdr) == NULL) {
201		_kvm_err(kd, kd->program, "%s", elf_errmsg(0));
202		elf_end(elf);
203		return (-1);
204	}
205	elf_end(elf);
206
207	switch (kd->nlehdr.e_ident[EI_DATA]) {
208	case ELFDATA2LSB:
209	case ELFDATA2MSB:
210		return (0);
211	default:
212		_kvm_err(kd, kd->program,
213		    "unsupported ELF data encoding for kernel");
214		return (-1);
215	}
216}
217
218int
219_kvm_probe_elf_kernel(kvm_t *kd, int class, int machine)
220{
221
222	return (kd->nlehdr.e_ident[EI_CLASS] == class &&
223	    kd->nlehdr.e_type == ET_EXEC &&
224	    kd->nlehdr.e_machine == machine);
225}
226
227int
228_kvm_is_minidump(kvm_t *kd)
229{
230	char minihdr[8];
231
232	if (kd->rawdump)
233		return (0);
234	if (pread(kd->pmfd, &minihdr, 8, 0) == 8 &&
235	    memcmp(&minihdr, "minidump", 8) == 0)
236		return (1);
237	return (0);
238}
239
240/*
241 * The powerpc backend has a hack to strip a leading kerneldump
242 * header from the core before treating it as an ELF header.
243 *
244 * We can add that here if we can get a change to libelf to support
245 * an initial offset into the file.  Alternatively we could patch
246 * savecore to extract cores from a regular file instead.
247 */
248int
249_kvm_read_core_phdrs(kvm_t *kd, size_t *phnump, GElf_Phdr **phdrp)
250{
251	GElf_Ehdr ehdr;
252	GElf_Phdr *phdr;
253	Elf *elf;
254	size_t i, phnum;
255
256	elf = elf_begin(kd->pmfd, ELF_C_READ, NULL);
257	if (elf == NULL) {
258		_kvm_err(kd, kd->program, "%s", elf_errmsg(0));
259		return (-1);
260	}
261	if (elf_kind(elf) != ELF_K_ELF) {
262		_kvm_err(kd, kd->program, "invalid core");
263		goto bad;
264	}
265	if (gelf_getclass(elf) != kd->nlehdr.e_ident[EI_CLASS]) {
266		_kvm_err(kd, kd->program, "invalid core");
267		goto bad;
268	}
269	if (gelf_getehdr(elf, &ehdr) == NULL) {
270		_kvm_err(kd, kd->program, "%s", elf_errmsg(0));
271		goto bad;
272	}
273	if (ehdr.e_type != ET_CORE) {
274		_kvm_err(kd, kd->program, "invalid core");
275		goto bad;
276	}
277	if (ehdr.e_machine != kd->nlehdr.e_machine) {
278		_kvm_err(kd, kd->program, "invalid core");
279		goto bad;
280	}
281
282	if (elf_getphdrnum(elf, &phnum) == -1) {
283		_kvm_err(kd, kd->program, "%s", elf_errmsg(0));
284		goto bad;
285	}
286
287	phdr = calloc(phnum, sizeof(*phdr));
288	if (phdr == NULL) {
289		_kvm_err(kd, kd->program, "failed to allocate phdrs");
290		goto bad;
291	}
292
293	for (i = 0; i < phnum; i++) {
294		if (gelf_getphdr(elf, i, &phdr[i]) == NULL) {
295			_kvm_err(kd, kd->program, "%s", elf_errmsg(0));
296			goto bad;
297		}
298	}
299	elf_end(elf);
300	*phnump = phnum;
301	*phdrp = phdr;
302	return (0);
303
304bad:
305	elf_end(elf);
306	return (-1);
307}
308
309static void
310_kvm_hpt_insert(struct hpt *hpt, uint64_t pa, off_t off)
311{
312	struct hpte *hpte;
313	uint32_t fnv = FNV1_32_INIT;
314
315	fnv = fnv_32_buf(&pa, sizeof(pa), fnv);
316	fnv &= (HPT_SIZE - 1);
317	hpte = malloc(sizeof(*hpte));
318	hpte->pa = pa;
319	hpte->off = off;
320	hpte->next = hpt->hpt_head[fnv];
321	hpt->hpt_head[fnv] = hpte;
322}
323
324void
325_kvm_hpt_init(kvm_t *kd, struct hpt *hpt, void *base, size_t len, off_t off,
326    int page_size, int word_size)
327{
328	uint64_t bits, idx, pa;
329	uint64_t *base64;
330	uint32_t *base32;
331
332	base64 = base;
333	base32 = base;
334	for (idx = 0; idx < len / word_size; idx++) {
335		if (word_size == sizeof(uint64_t))
336			bits = _kvm64toh(kd, base64[idx]);
337		else
338			bits = _kvm32toh(kd, base32[idx]);
339		pa = idx * word_size * NBBY * page_size;
340		for (; bits != 0; bits >>= 1, pa += page_size) {
341			if ((bits & 1) == 0)
342				continue;
343			_kvm_hpt_insert(hpt, pa, off);
344			off += page_size;
345		}
346	}
347}
348
349off_t
350_kvm_hpt_find(struct hpt *hpt, uint64_t pa)
351{
352	struct hpte *hpte;
353	uint32_t fnv = FNV1_32_INIT;
354
355	fnv = fnv_32_buf(&pa, sizeof(pa), fnv);
356	fnv &= (HPT_SIZE - 1);
357	for (hpte = hpt->hpt_head[fnv]; hpte != NULL; hpte = hpte->next) {
358		if (pa == hpte->pa)
359			return (hpte->off);
360	}
361	return (-1);
362}
363
364void
365_kvm_hpt_free(struct hpt *hpt)
366{
367	struct hpte *hpte, *next;
368	int i;
369
370	for (i = 0; i < HPT_SIZE; i++) {
371		for (hpte = hpt->hpt_head[i]; hpte != NULL; hpte = next) {
372			next = hpte->next;
373			free(hpte);
374		}
375	}
376}
377
378static kvm_t *
379_kvm_open(kvm_t *kd, const char *uf, const char *mf, int flag, char *errout)
380{
381	struct kvm_arch **parch;
382	struct stat st;
383
384	kd->vmfd = -1;
385	kd->pmfd = -1;
386	kd->nlfd = -1;
387	kd->vmst = NULL;
388	kd->procbase = NULL;
389	kd->argspc = NULL;
390	kd->argv = NULL;
391
392	if (uf == NULL)
393		uf = getbootfile();
394	else if (strlen(uf) >= MAXPATHLEN) {
395		_kvm_err(kd, kd->program, "exec file name too long");
396		goto failed;
397	}
398	if (flag & ~O_RDWR) {
399		_kvm_err(kd, kd->program, "bad flags arg");
400		goto failed;
401	}
402	if (mf == NULL)
403		mf = _PATH_MEM;
404
405	if ((kd->pmfd = open(mf, flag | O_CLOEXEC, 0)) < 0) {
406		_kvm_syserr(kd, kd->program, "%s", mf);
407		goto failed;
408	}
409	if (fstat(kd->pmfd, &st) < 0) {
410		_kvm_syserr(kd, kd->program, "%s", mf);
411		goto failed;
412	}
413	if (S_ISREG(st.st_mode) && st.st_size <= 0) {
414		errno = EINVAL;
415		_kvm_syserr(kd, kd->program, "empty file");
416		goto failed;
417	}
418	if (S_ISCHR(st.st_mode)) {
419		/*
420		 * If this is a character special device, then check that
421		 * it's /dev/mem.  If so, open kmem too.  (Maybe we should
422		 * make it work for either /dev/mem or /dev/kmem -- in either
423		 * case you're working with a live kernel.)
424		 */
425		if (strcmp(mf, _PATH_DEVNULL) == 0) {
426			kd->vmfd = open(_PATH_DEVNULL, O_RDONLY | O_CLOEXEC);
427			return (kd);
428		} else if (strcmp(mf, _PATH_MEM) == 0) {
429			if ((kd->vmfd = open(_PATH_KMEM, flag | O_CLOEXEC)) <
430			    0) {
431				_kvm_syserr(kd, kd->program, "%s", _PATH_KMEM);
432				goto failed;
433			}
434			return (kd);
435		}
436	}
437
438	/*
439	 * This is either a crash dump or a remote live system with its physical
440	 * memory fully accessible via a special device.
441	 * Open the namelist fd and determine the architecture.
442	 */
443	if ((kd->nlfd = open(uf, O_RDONLY | O_CLOEXEC, 0)) < 0) {
444		_kvm_syserr(kd, kd->program, "%s", uf);
445		goto failed;
446	}
447	if (_kvm_read_kernel_ehdr(kd) < 0)
448		goto failed;
449	if (strncmp(mf, _PATH_FWMEM, strlen(_PATH_FWMEM)) == 0 ||
450	    strncmp(mf, _PATH_DEVVMM, strlen(_PATH_DEVVMM)) == 0) {
451		kd->rawdump = 1;
452		kd->writable = 1;
453	}
454	SET_FOREACH(parch, kvm_arch) {
455		if ((*parch)->ka_probe(kd)) {
456			kd->arch = *parch;
457			break;
458		}
459	}
460	if (kd->arch == NULL) {
461		_kvm_err(kd, kd->program, "unsupported architecture");
462		goto failed;
463	}
464
465	/*
466	 * Non-native kernels require a symbol resolver.
467	 */
468	if (!kd->arch->ka_native(kd) && kd->resolve_symbol == NULL) {
469		_kvm_err(kd, kd->program,
470		    "non-native kernel requires a symbol resolver");
471		goto failed;
472	}
473
474	/*
475	 * Initialize the virtual address translation machinery.
476	 */
477	if (kd->arch->ka_initvtop(kd) < 0)
478		goto failed;
479	return (kd);
480failed:
481	/*
482	 * Copy out the error if doing sane error semantics.
483	 */
484	if (errout != NULL)
485		strlcpy(errout, kd->errbuf, _POSIX2_LINE_MAX);
486	(void)kvm_close(kd);
487	return (NULL);
488}
489
490kvm_t *
491kvm_openfiles(const char *uf, const char *mf, const char *sf __unused, int flag,
492    char *errout)
493{
494	kvm_t *kd;
495
496	if ((kd = calloc(1, sizeof(*kd))) == NULL) {
497		if (errout != NULL)
498			(void)strlcpy(errout, strerror(errno),
499			    _POSIX2_LINE_MAX);
500		return (NULL);
501	}
502	return (_kvm_open(kd, uf, mf, flag, errout));
503}
504
505kvm_t *
506kvm_open(const char *uf, const char *mf, const char *sf __unused, int flag,
507    const char *errstr)
508{
509	kvm_t *kd;
510
511	if ((kd = calloc(1, sizeof(*kd))) == NULL) {
512		if (errstr != NULL)
513			(void)fprintf(stderr, "%s: %s\n",
514				      errstr, strerror(errno));
515		return (NULL);
516	}
517	kd->program = errstr;
518	return (_kvm_open(kd, uf, mf, flag, NULL));
519}
520
521kvm_t *
522kvm_open2(const char *uf, const char *mf, int flag, char *errout,
523    int (*resolver)(const char *, kvaddr_t *))
524{
525	kvm_t *kd;
526
527	if ((kd = calloc(1, sizeof(*kd))) == NULL) {
528		if (errout != NULL)
529			(void)strlcpy(errout, strerror(errno),
530			    _POSIX2_LINE_MAX);
531		return (NULL);
532	}
533	kd->resolve_symbol = resolver;
534	return (_kvm_open(kd, uf, mf, flag, errout));
535}
536
537int
538kvm_close(kvm_t *kd)
539{
540	int error = 0;
541
542	if (kd == NULL) {
543		errno = EINVAL;
544		return (-1);
545	}
546	if (kd->vmst != NULL)
547		kd->arch->ka_freevtop(kd);
548	if (kd->pmfd >= 0)
549		error |= close(kd->pmfd);
550	if (kd->vmfd >= 0)
551		error |= close(kd->vmfd);
552	if (kd->nlfd >= 0)
553		error |= close(kd->nlfd);
554	if (kd->procbase != 0)
555		free((void *)kd->procbase);
556	if (kd->argbuf != 0)
557		free((void *) kd->argbuf);
558	if (kd->argspc != 0)
559		free((void *) kd->argspc);
560	if (kd->argv != 0)
561		free((void *)kd->argv);
562	free((void *)kd);
563
564	return (error);
565}
566
567/*
568 * Walk the list of unresolved symbols, generate a new list and prefix the
569 * symbol names, try again, and merge back what we could resolve.
570 */
571static int
572kvm_fdnlist_prefix(kvm_t *kd, struct kvm_nlist *nl, int missing,
573    const char *prefix, kvaddr_t (*validate_fn)(kvm_t *, kvaddr_t))
574{
575	struct kvm_nlist *n, *np, *p;
576	char *cp, *ce;
577	const char *ccp;
578	size_t len;
579	int slen, unresolved;
580
581	/*
582	 * Calculate the space we need to malloc for nlist and names.
583	 * We are going to store the name twice for later lookups: once
584	 * with the prefix and once the unmodified name delmited by \0.
585	 */
586	len = 0;
587	unresolved = 0;
588	for (p = nl; p->n_name && p->n_name[0]; ++p) {
589		if (p->n_type != N_UNDF)
590			continue;
591		len += sizeof(struct kvm_nlist) + strlen(prefix) +
592		    2 * (strlen(p->n_name) + 1);
593		unresolved++;
594	}
595	if (unresolved == 0)
596		return (unresolved);
597	/* Add space for the terminating nlist entry. */
598	len += sizeof(struct kvm_nlist);
599	unresolved++;
600
601	/* Alloc one chunk for (nlist, [names]) and setup pointers. */
602	n = np = malloc(len);
603	bzero(n, len);
604	if (n == NULL)
605		return (missing);
606	cp = ce = (char *)np;
607	cp += unresolved * sizeof(struct kvm_nlist);
608	ce += len;
609
610	/* Generate shortened nlist with special prefix. */
611	unresolved = 0;
612	for (p = nl; p->n_name && p->n_name[0]; ++p) {
613		if (p->n_type != N_UNDF)
614			continue;
615		*np = *p;
616		/* Save the new\0orig. name so we can later match it again. */
617		slen = snprintf(cp, ce - cp, "%s%s%c%s", prefix,
618		    (prefix[0] != '\0' && p->n_name[0] == '_') ?
619			(p->n_name + 1) : p->n_name, '\0', p->n_name);
620		if (slen < 0 || slen >= ce - cp)
621			continue;
622		np->n_name = cp;
623		cp += slen + 1;
624		np++;
625		unresolved++;
626	}
627
628	/* Do lookup on the reduced list. */
629	np = n;
630	unresolved = kvm_fdnlist(kd, np);
631
632	/* Check if we could resolve further symbols and update the list. */
633	if (unresolved >= 0 && unresolved < missing) {
634		/* Find the first freshly resolved entry. */
635		for (; np->n_name && np->n_name[0]; np++)
636			if (np->n_type != N_UNDF)
637				break;
638		/*
639		 * The lists are both in the same order,
640		 * so we can walk them in parallel.
641		 */
642		for (p = nl; np->n_name && np->n_name[0] &&
643		    p->n_name && p->n_name[0]; ++p) {
644			if (p->n_type != N_UNDF)
645				continue;
646			/* Skip expanded name and compare to orig. one. */
647			ccp = np->n_name + strlen(np->n_name) + 1;
648			if (strcmp(ccp, p->n_name) != 0)
649				continue;
650			/* Update nlist with new, translated results. */
651			p->n_type = np->n_type;
652			if (validate_fn)
653				p->n_value = (*validate_fn)(kd, np->n_value);
654			else
655				p->n_value = np->n_value;
656			missing--;
657			/* Find next freshly resolved entry. */
658			for (np++; np->n_name && np->n_name[0]; np++)
659				if (np->n_type != N_UNDF)
660					break;
661		}
662	}
663	/* We could assert missing = unresolved here. */
664
665	free(n);
666	return (unresolved);
667}
668
669int
670_kvm_nlist(kvm_t *kd, struct kvm_nlist *nl, int initialize)
671{
672	struct kvm_nlist *p;
673	int nvalid;
674	struct kld_sym_lookup lookup;
675	int error;
676	const char *prefix = "";
677	char symname[1024]; /* XXX-BZ symbol name length limit? */
678	int tried_vnet, tried_dpcpu;
679
680	/*
681	 * If we can't use the kld symbol lookup, revert to the
682	 * slow library call.
683	 */
684	if (!ISALIVE(kd)) {
685		error = kvm_fdnlist(kd, nl);
686		if (error <= 0)			/* Hard error or success. */
687			return (error);
688
689		if (_kvm_vnet_initialized(kd, initialize))
690			error = kvm_fdnlist_prefix(kd, nl, error,
691			    VNET_SYMPREFIX, _kvm_vnet_validaddr);
692
693		if (error > 0 && _kvm_dpcpu_initialized(kd, initialize))
694			error = kvm_fdnlist_prefix(kd, nl, error,
695			    DPCPU_SYMPREFIX, _kvm_dpcpu_validaddr);
696
697		return (error);
698	}
699
700	/*
701	 * We can use the kld lookup syscall.  Go through each nlist entry
702	 * and look it up with a kldsym(2) syscall.
703	 */
704	nvalid = 0;
705	tried_vnet = 0;
706	tried_dpcpu = 0;
707again:
708	for (p = nl; p->n_name && p->n_name[0]; ++p) {
709		if (p->n_type != N_UNDF)
710			continue;
711
712		lookup.version = sizeof(lookup);
713		lookup.symvalue = 0;
714		lookup.symsize = 0;
715
716		error = snprintf(symname, sizeof(symname), "%s%s", prefix,
717		    (prefix[0] != '\0' && p->n_name[0] == '_') ?
718			(p->n_name + 1) : p->n_name);
719		if (error < 0 || error >= (int)sizeof(symname))
720			continue;
721		lookup.symname = symname;
722		if (lookup.symname[0] == '_')
723			lookup.symname++;
724
725		if (kldsym(0, KLDSYM_LOOKUP, &lookup) != -1) {
726			p->n_type = N_TEXT;
727			if (_kvm_vnet_initialized(kd, initialize) &&
728			    strcmp(prefix, VNET_SYMPREFIX) == 0)
729				p->n_value =
730				    _kvm_vnet_validaddr(kd, lookup.symvalue);
731			else if (_kvm_dpcpu_initialized(kd, initialize) &&
732			    strcmp(prefix, DPCPU_SYMPREFIX) == 0)
733				p->n_value =
734				    _kvm_dpcpu_validaddr(kd, lookup.symvalue);
735			else
736				p->n_value = lookup.symvalue;
737			++nvalid;
738			/* lookup.symsize */
739		}
740	}
741
742	/*
743	 * Check the number of entries that weren't found. If they exist,
744	 * try again with a prefix for virtualized or DPCPU symbol names.
745	 */
746	error = ((p - nl) - nvalid);
747	if (error && _kvm_vnet_initialized(kd, initialize) && !tried_vnet) {
748		tried_vnet = 1;
749		prefix = VNET_SYMPREFIX;
750		goto again;
751	}
752	if (error && _kvm_dpcpu_initialized(kd, initialize) && !tried_dpcpu) {
753		tried_dpcpu = 1;
754		prefix = DPCPU_SYMPREFIX;
755		goto again;
756	}
757
758	/*
759	 * Return the number of entries that weren't found. If they exist,
760	 * also fill internal error buffer.
761	 */
762	error = ((p - nl) - nvalid);
763	if (error)
764		_kvm_syserr(kd, kd->program, "kvm_nlist");
765	return (error);
766}
767
768int
769kvm_nlist2(kvm_t *kd, struct kvm_nlist *nl)
770{
771
772	/*
773	 * If called via the public interface, permit initialization of
774	 * further virtualized modules on demand.
775	 */
776	return (_kvm_nlist(kd, nl, 1));
777}
778
779int
780kvm_nlist(kvm_t *kd, struct nlist *nl)
781{
782	struct kvm_nlist *kl;
783	int count, i, nfail;
784
785	/*
786	 * Avoid reporting truncated addresses by failing for non-native
787	 * cores.
788	 */
789	if (!kvm_native(kd)) {
790		_kvm_err(kd, kd->program, "kvm_nlist of non-native vmcore");
791		return (-1);
792	}
793
794	for (count = 0; nl[count].n_name != NULL && nl[count].n_name[0] != '\0';
795	     count++)
796		;
797	if (count == 0)
798		return (0);
799	kl = calloc(count + 1, sizeof(*kl));
800	for (i = 0; i < count; i++)
801		kl[i].n_name = nl[i].n_name;
802	nfail = kvm_nlist2(kd, kl);
803	for (i = 0; i < count; i++) {
804		nl[i].n_type = kl[i].n_type;
805		nl[i].n_other = 0;
806		nl[i].n_desc = 0;
807		nl[i].n_value = kl[i].n_value;
808	}
809	return (nfail);
810}
811
812ssize_t
813kvm_read(kvm_t *kd, u_long kva, void *buf, size_t len)
814{
815
816	return (kvm_read2(kd, kva, buf, len));
817}
818
819ssize_t
820kvm_read2(kvm_t *kd, kvaddr_t kva, void *buf, size_t len)
821{
822	int cc;
823	ssize_t cr;
824	off_t pa;
825	char *cp;
826
827	if (ISALIVE(kd)) {
828		/*
829		 * We're using /dev/kmem.  Just read straight from the
830		 * device and let the active kernel do the address translation.
831		 */
832		errno = 0;
833		if (lseek(kd->vmfd, (off_t)kva, 0) == -1 && errno != 0) {
834			_kvm_err(kd, 0, "invalid address (0x%jx)",
835			    (uintmax_t)kva);
836			return (-1);
837		}
838		cr = read(kd->vmfd, buf, len);
839		if (cr < 0) {
840			_kvm_syserr(kd, 0, "kvm_read");
841			return (-1);
842		} else if (cr < (ssize_t)len)
843			_kvm_err(kd, kd->program, "short read");
844		return (cr);
845	}
846
847	cp = buf;
848	while (len > 0) {
849		cc = kd->arch->ka_kvatop(kd, kva, &pa);
850		if (cc == 0)
851			return (-1);
852		if (cc > (ssize_t)len)
853			cc = len;
854		errno = 0;
855		if (lseek(kd->pmfd, pa, 0) == -1 && errno != 0) {
856			_kvm_syserr(kd, 0, _PATH_MEM);
857			break;
858		}
859		cr = read(kd->pmfd, cp, cc);
860		if (cr < 0) {
861			_kvm_syserr(kd, kd->program, "kvm_read");
862			break;
863		}
864		/*
865		 * If ka_kvatop returns a bogus value or our core file is
866		 * truncated, we might wind up seeking beyond the end of the
867		 * core file in which case the read will return 0 (EOF).
868		 */
869		if (cr == 0)
870			break;
871		cp += cr;
872		kva += cr;
873		len -= cr;
874	}
875
876	return (cp - (char *)buf);
877}
878
879ssize_t
880kvm_write(kvm_t *kd, u_long kva, const void *buf, size_t len)
881{
882	int cc;
883	ssize_t cw;
884	off_t pa;
885	const char *cp;
886
887	if (!ISALIVE(kd) && !kd->writable) {
888		_kvm_err(kd, kd->program,
889		    "kvm_write not implemented for dead kernels");
890		return (-1);
891	}
892
893	if (ISALIVE(kd)) {
894		/*
895		 * Just like kvm_read, only we write.
896		 */
897		errno = 0;
898		if (lseek(kd->vmfd, (off_t)kva, 0) == -1 && errno != 0) {
899			_kvm_err(kd, 0, "invalid address (%lx)", kva);
900			return (-1);
901		}
902		cc = write(kd->vmfd, buf, len);
903		if (cc < 0) {
904			_kvm_syserr(kd, 0, "kvm_write");
905			return (-1);
906		} else if ((size_t)cc < len)
907			_kvm_err(kd, kd->program, "short write");
908		return (cc);
909	}
910
911	cp = buf;
912	while (len > 0) {
913		cc = kd->arch->ka_kvatop(kd, kva, &pa);
914		if (cc == 0)
915			return (-1);
916		if (cc > (ssize_t)len)
917			cc = len;
918		errno = 0;
919		if (lseek(kd->pmfd, pa, 0) == -1 && errno != 0) {
920			_kvm_syserr(kd, 0, _PATH_MEM);
921			break;
922		}
923		cw = write(kd->pmfd, cp, cc);
924		if (cw < 0) {
925			_kvm_syserr(kd, kd->program, "kvm_write");
926			break;
927		}
928		/*
929		 * If ka_kvatop returns a bogus value or our core file is
930		 * truncated, we might wind up seeking beyond the end of the
931		 * core file in which case the read will return 0 (EOF).
932		 */
933		if (cw == 0)
934			break;
935		cp += cw;
936		kva += cw;
937		len -= cw;
938	}
939
940	return (cp - (const char *)buf);
941}
942
943int
944kvm_native(kvm_t *kd)
945{
946
947	if (ISALIVE(kd))
948		return (1);
949	return (kd->arch->ka_native(kd));
950}
951