kvm.c revision 330897
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1989, 1992, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This code is derived from software developed by the Computer Systems
8 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
9 * BG 91-66 and contributed to Berkeley.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 4. Neither the name of the University nor the names of its contributors
20 *    may be used to endorse or promote products derived from this software
21 *    without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: stable/11/lib/libkvm/kvm.c 330897 2018-03-14 03:19:51Z eadler $");
38
39#if defined(LIBC_SCCS) && !defined(lint)
40#if 0
41static char sccsid[] = "@(#)kvm.c	8.2 (Berkeley) 2/13/94";
42#endif
43#endif /* LIBC_SCCS and not lint */
44
45#include <sys/param.h>
46#include <sys/fnv_hash.h>
47
48#define	_WANT_VNET
49
50#include <sys/user.h>
51#include <sys/linker.h>
52#include <sys/pcpu.h>
53#include <sys/stat.h>
54
55#include <net/vnet.h>
56
57#include <fcntl.h>
58#include <kvm.h>
59#include <limits.h>
60#include <paths.h>
61#include <stdint.h>
62#include <stdio.h>
63#include <stdlib.h>
64#include <string.h>
65#include <unistd.h>
66
67#include "kvm_private.h"
68
69SET_DECLARE(kvm_arch, struct kvm_arch);
70
71static char _kd_is_null[] = "";
72
73/* from src/lib/libc/gen/nlist.c */
74int __fdnlist(int, struct nlist *);
75
76static int
77kvm_fdnlist(kvm_t *kd, struct kvm_nlist *list)
78{
79	kvaddr_t addr;
80	int error, nfail;
81
82	if (kd->resolve_symbol == NULL) {
83		struct nlist *nl;
84		int count, i;
85
86		for (count = 0; list[count].n_name != NULL &&
87		     list[count].n_name[0] != '\0'; count++)
88			;
89		nl = calloc(count + 1, sizeof(*nl));
90		for (i = 0; i < count; i++)
91			nl[i].n_name = list[i].n_name;
92		nfail = __fdnlist(kd->nlfd, nl);
93		for (i = 0; i < count; i++) {
94			list[i].n_type = nl[i].n_type;
95			list[i].n_value = nl[i].n_value;
96		}
97		free(nl);
98		return (nfail);
99	}
100
101	nfail = 0;
102	while (list->n_name != NULL && list->n_name[0] != '\0') {
103		error = kd->resolve_symbol(list->n_name, &addr);
104		if (error != 0) {
105			nfail++;
106			list->n_value = 0;
107			list->n_type = 0;
108		} else {
109			list->n_value = addr;
110			list->n_type = N_DATA | N_EXT;
111		}
112		list++;
113	}
114	return (nfail);
115}
116
117char *
118kvm_geterr(kvm_t *kd)
119{
120
121	if (kd == NULL)
122		return (_kd_is_null);
123	return (kd->errbuf);
124}
125
126#include <stdarg.h>
127
128/*
129 * Report an error using printf style arguments.  "program" is kd->program
130 * on hard errors, and 0 on soft errors, so that under sun error emulation,
131 * only hard errors are printed out (otherwise, programs like gdb will
132 * generate tons of error messages when trying to access bogus pointers).
133 */
134void
135_kvm_err(kvm_t *kd, const char *program, const char *fmt, ...)
136{
137	va_list ap;
138
139	va_start(ap, fmt);
140	if (program != NULL) {
141		(void)fprintf(stderr, "%s: ", program);
142		(void)vfprintf(stderr, fmt, ap);
143		(void)fputc('\n', stderr);
144	} else
145		(void)vsnprintf(kd->errbuf,
146		    sizeof(kd->errbuf), fmt, ap);
147
148	va_end(ap);
149}
150
151void
152_kvm_syserr(kvm_t *kd, const char *program, const char *fmt, ...)
153{
154	va_list ap;
155	int n;
156
157	va_start(ap, fmt);
158	if (program != NULL) {
159		(void)fprintf(stderr, "%s: ", program);
160		(void)vfprintf(stderr, fmt, ap);
161		(void)fprintf(stderr, ": %s\n", strerror(errno));
162	} else {
163		char *cp = kd->errbuf;
164
165		(void)vsnprintf(cp, sizeof(kd->errbuf), fmt, ap);
166		n = strlen(cp);
167		(void)snprintf(&cp[n], sizeof(kd->errbuf) - n, ": %s",
168		    strerror(errno));
169	}
170	va_end(ap);
171}
172
173void *
174_kvm_malloc(kvm_t *kd, size_t n)
175{
176	void *p;
177
178	if ((p = calloc(n, sizeof(char))) == NULL)
179		_kvm_err(kd, kd->program, "can't allocate %zu bytes: %s",
180			 n, strerror(errno));
181	return (p);
182}
183
184static int
185_kvm_read_kernel_ehdr(kvm_t *kd)
186{
187	Elf *elf;
188
189	if (elf_version(EV_CURRENT) == EV_NONE) {
190		_kvm_err(kd, kd->program, "Unsupported libelf");
191		return (-1);
192	}
193	elf = elf_begin(kd->nlfd, ELF_C_READ, NULL);
194	if (elf == NULL) {
195		_kvm_err(kd, kd->program, "%s", elf_errmsg(0));
196		return (-1);
197	}
198	if (elf_kind(elf) != ELF_K_ELF) {
199		_kvm_err(kd, kd->program, "kernel is not an ELF file");
200		return (-1);
201	}
202	if (gelf_getehdr(elf, &kd->nlehdr) == NULL) {
203		_kvm_err(kd, kd->program, "%s", elf_errmsg(0));
204		elf_end(elf);
205		return (-1);
206	}
207	elf_end(elf);
208
209	switch (kd->nlehdr.e_ident[EI_DATA]) {
210	case ELFDATA2LSB:
211	case ELFDATA2MSB:
212		return (0);
213	default:
214		_kvm_err(kd, kd->program,
215		    "unsupported ELF data encoding for kernel");
216		return (-1);
217	}
218}
219
220int
221_kvm_probe_elf_kernel(kvm_t *kd, int class, int machine)
222{
223
224	return (kd->nlehdr.e_ident[EI_CLASS] == class &&
225	    kd->nlehdr.e_type == ET_EXEC &&
226	    kd->nlehdr.e_machine == machine);
227}
228
229int
230_kvm_is_minidump(kvm_t *kd)
231{
232	char minihdr[8];
233
234	if (kd->rawdump)
235		return (0);
236	if (pread(kd->pmfd, &minihdr, 8, 0) == 8 &&
237	    memcmp(&minihdr, "minidump", 8) == 0)
238		return (1);
239	return (0);
240}
241
242/*
243 * The powerpc backend has a hack to strip a leading kerneldump
244 * header from the core before treating it as an ELF header.
245 *
246 * We can add that here if we can get a change to libelf to support
247 * an initial offset into the file.  Alternatively we could patch
248 * savecore to extract cores from a regular file instead.
249 */
250int
251_kvm_read_core_phdrs(kvm_t *kd, size_t *phnump, GElf_Phdr **phdrp)
252{
253	GElf_Ehdr ehdr;
254	GElf_Phdr *phdr;
255	Elf *elf;
256	size_t i, phnum;
257
258	elf = elf_begin(kd->pmfd, ELF_C_READ, NULL);
259	if (elf == NULL) {
260		_kvm_err(kd, kd->program, "%s", elf_errmsg(0));
261		return (-1);
262	}
263	if (elf_kind(elf) != ELF_K_ELF) {
264		_kvm_err(kd, kd->program, "invalid core");
265		goto bad;
266	}
267	if (gelf_getclass(elf) != kd->nlehdr.e_ident[EI_CLASS]) {
268		_kvm_err(kd, kd->program, "invalid core");
269		goto bad;
270	}
271	if (gelf_getehdr(elf, &ehdr) == NULL) {
272		_kvm_err(kd, kd->program, "%s", elf_errmsg(0));
273		goto bad;
274	}
275	if (ehdr.e_type != ET_CORE) {
276		_kvm_err(kd, kd->program, "invalid core");
277		goto bad;
278	}
279	if (ehdr.e_machine != kd->nlehdr.e_machine) {
280		_kvm_err(kd, kd->program, "invalid core");
281		goto bad;
282	}
283
284	if (elf_getphdrnum(elf, &phnum) == -1) {
285		_kvm_err(kd, kd->program, "%s", elf_errmsg(0));
286		goto bad;
287	}
288
289	phdr = calloc(phnum, sizeof(*phdr));
290	if (phdr == NULL) {
291		_kvm_err(kd, kd->program, "failed to allocate phdrs");
292		goto bad;
293	}
294
295	for (i = 0; i < phnum; i++) {
296		if (gelf_getphdr(elf, i, &phdr[i]) == NULL) {
297			_kvm_err(kd, kd->program, "%s", elf_errmsg(0));
298			goto bad;
299		}
300	}
301	elf_end(elf);
302	*phnump = phnum;
303	*phdrp = phdr;
304	return (0);
305
306bad:
307	elf_end(elf);
308	return (-1);
309}
310
311static void
312_kvm_hpt_insert(struct hpt *hpt, uint64_t pa, off_t off)
313{
314	struct hpte *hpte;
315	uint32_t fnv = FNV1_32_INIT;
316
317	fnv = fnv_32_buf(&pa, sizeof(pa), fnv);
318	fnv &= (HPT_SIZE - 1);
319	hpte = malloc(sizeof(*hpte));
320	hpte->pa = pa;
321	hpte->off = off;
322	hpte->next = hpt->hpt_head[fnv];
323	hpt->hpt_head[fnv] = hpte;
324}
325
326void
327_kvm_hpt_init(kvm_t *kd, struct hpt *hpt, void *base, size_t len, off_t off,
328    int page_size, int word_size)
329{
330	uint64_t bits, idx, pa;
331	uint64_t *base64;
332	uint32_t *base32;
333
334	base64 = base;
335	base32 = base;
336	for (idx = 0; idx < len / word_size; idx++) {
337		if (word_size == sizeof(uint64_t))
338			bits = _kvm64toh(kd, base64[idx]);
339		else
340			bits = _kvm32toh(kd, base32[idx]);
341		pa = idx * word_size * NBBY * page_size;
342		for (; bits != 0; bits >>= 1, pa += page_size) {
343			if ((bits & 1) == 0)
344				continue;
345			_kvm_hpt_insert(hpt, pa, off);
346			off += page_size;
347		}
348	}
349}
350
351off_t
352_kvm_hpt_find(struct hpt *hpt, uint64_t pa)
353{
354	struct hpte *hpte;
355	uint32_t fnv = FNV1_32_INIT;
356
357	fnv = fnv_32_buf(&pa, sizeof(pa), fnv);
358	fnv &= (HPT_SIZE - 1);
359	for (hpte = hpt->hpt_head[fnv]; hpte != NULL; hpte = hpte->next) {
360		if (pa == hpte->pa)
361			return (hpte->off);
362	}
363	return (-1);
364}
365
366void
367_kvm_hpt_free(struct hpt *hpt)
368{
369	struct hpte *hpte, *next;
370	int i;
371
372	for (i = 0; i < HPT_SIZE; i++) {
373		for (hpte = hpt->hpt_head[i]; hpte != NULL; hpte = next) {
374			next = hpte->next;
375			free(hpte);
376		}
377	}
378}
379
380static kvm_t *
381_kvm_open(kvm_t *kd, const char *uf, const char *mf, int flag, char *errout)
382{
383	struct kvm_arch **parch;
384	struct stat st;
385
386	kd->vmfd = -1;
387	kd->pmfd = -1;
388	kd->nlfd = -1;
389	kd->vmst = NULL;
390	kd->procbase = NULL;
391	kd->argspc = NULL;
392	kd->argv = NULL;
393
394	if (uf == NULL)
395		uf = getbootfile();
396	else if (strlen(uf) >= MAXPATHLEN) {
397		_kvm_err(kd, kd->program, "exec file name too long");
398		goto failed;
399	}
400	if (flag & ~O_RDWR) {
401		_kvm_err(kd, kd->program, "bad flags arg");
402		goto failed;
403	}
404	if (mf == NULL)
405		mf = _PATH_MEM;
406
407	if ((kd->pmfd = open(mf, flag | O_CLOEXEC, 0)) < 0) {
408		_kvm_syserr(kd, kd->program, "%s", mf);
409		goto failed;
410	}
411	if (fstat(kd->pmfd, &st) < 0) {
412		_kvm_syserr(kd, kd->program, "%s", mf);
413		goto failed;
414	}
415	if (S_ISREG(st.st_mode) && st.st_size <= 0) {
416		errno = EINVAL;
417		_kvm_syserr(kd, kd->program, "empty file");
418		goto failed;
419	}
420	if (S_ISCHR(st.st_mode)) {
421		/*
422		 * If this is a character special device, then check that
423		 * it's /dev/mem.  If so, open kmem too.  (Maybe we should
424		 * make it work for either /dev/mem or /dev/kmem -- in either
425		 * case you're working with a live kernel.)
426		 */
427		if (strcmp(mf, _PATH_DEVNULL) == 0) {
428			kd->vmfd = open(_PATH_DEVNULL, O_RDONLY | O_CLOEXEC);
429			return (kd);
430		} else if (strcmp(mf, _PATH_MEM) == 0) {
431			if ((kd->vmfd = open(_PATH_KMEM, flag | O_CLOEXEC)) <
432			    0) {
433				_kvm_syserr(kd, kd->program, "%s", _PATH_KMEM);
434				goto failed;
435			}
436			return (kd);
437		}
438	}
439
440	/*
441	 * This is either a crash dump or a remote live system with its physical
442	 * memory fully accessible via a special device.
443	 * Open the namelist fd and determine the architecture.
444	 */
445	if ((kd->nlfd = open(uf, O_RDONLY | O_CLOEXEC, 0)) < 0) {
446		_kvm_syserr(kd, kd->program, "%s", uf);
447		goto failed;
448	}
449	if (_kvm_read_kernel_ehdr(kd) < 0)
450		goto failed;
451	if (strncmp(mf, _PATH_FWMEM, strlen(_PATH_FWMEM)) == 0 ||
452	    strncmp(mf, _PATH_DEVVMM, strlen(_PATH_DEVVMM)) == 0) {
453		kd->rawdump = 1;
454		kd->writable = 1;
455	}
456	SET_FOREACH(parch, kvm_arch) {
457		if ((*parch)->ka_probe(kd)) {
458			kd->arch = *parch;
459			break;
460		}
461	}
462	if (kd->arch == NULL) {
463		_kvm_err(kd, kd->program, "unsupported architecture");
464		goto failed;
465	}
466
467	/*
468	 * Non-native kernels require a symbol resolver.
469	 */
470	if (!kd->arch->ka_native(kd) && kd->resolve_symbol == NULL) {
471		_kvm_err(kd, kd->program,
472		    "non-native kernel requires a symbol resolver");
473		goto failed;
474	}
475
476	/*
477	 * Initialize the virtual address translation machinery.
478	 */
479	if (kd->arch->ka_initvtop(kd) < 0)
480		goto failed;
481	return (kd);
482failed:
483	/*
484	 * Copy out the error if doing sane error semantics.
485	 */
486	if (errout != NULL)
487		strlcpy(errout, kd->errbuf, _POSIX2_LINE_MAX);
488	(void)kvm_close(kd);
489	return (NULL);
490}
491
492kvm_t *
493kvm_openfiles(const char *uf, const char *mf, const char *sf __unused, int flag,
494    char *errout)
495{
496	kvm_t *kd;
497
498	if ((kd = calloc(1, sizeof(*kd))) == NULL) {
499		if (errout != NULL)
500			(void)strlcpy(errout, strerror(errno),
501			    _POSIX2_LINE_MAX);
502		return (NULL);
503	}
504	return (_kvm_open(kd, uf, mf, flag, errout));
505}
506
507kvm_t *
508kvm_open(const char *uf, const char *mf, const char *sf __unused, int flag,
509    const char *errstr)
510{
511	kvm_t *kd;
512
513	if ((kd = calloc(1, sizeof(*kd))) == NULL) {
514		if (errstr != NULL)
515			(void)fprintf(stderr, "%s: %s\n",
516				      errstr, strerror(errno));
517		return (NULL);
518	}
519	kd->program = errstr;
520	return (_kvm_open(kd, uf, mf, flag, NULL));
521}
522
523kvm_t *
524kvm_open2(const char *uf, const char *mf, int flag, char *errout,
525    int (*resolver)(const char *, kvaddr_t *))
526{
527	kvm_t *kd;
528
529	if ((kd = calloc(1, sizeof(*kd))) == NULL) {
530		if (errout != NULL)
531			(void)strlcpy(errout, strerror(errno),
532			    _POSIX2_LINE_MAX);
533		return (NULL);
534	}
535	kd->resolve_symbol = resolver;
536	return (_kvm_open(kd, uf, mf, flag, errout));
537}
538
539int
540kvm_close(kvm_t *kd)
541{
542	int error = 0;
543
544	if (kd == NULL) {
545		errno = EINVAL;
546		return (-1);
547	}
548	if (kd->vmst != NULL)
549		kd->arch->ka_freevtop(kd);
550	if (kd->pmfd >= 0)
551		error |= close(kd->pmfd);
552	if (kd->vmfd >= 0)
553		error |= close(kd->vmfd);
554	if (kd->nlfd >= 0)
555		error |= close(kd->nlfd);
556	if (kd->procbase != 0)
557		free((void *)kd->procbase);
558	if (kd->argbuf != 0)
559		free((void *) kd->argbuf);
560	if (kd->argspc != 0)
561		free((void *) kd->argspc);
562	if (kd->argv != 0)
563		free((void *)kd->argv);
564	free((void *)kd);
565
566	return (error);
567}
568
569/*
570 * Walk the list of unresolved symbols, generate a new list and prefix the
571 * symbol names, try again, and merge back what we could resolve.
572 */
573static int
574kvm_fdnlist_prefix(kvm_t *kd, struct kvm_nlist *nl, int missing,
575    const char *prefix, kvaddr_t (*validate_fn)(kvm_t *, kvaddr_t))
576{
577	struct kvm_nlist *n, *np, *p;
578	char *cp, *ce;
579	const char *ccp;
580	size_t len;
581	int slen, unresolved;
582
583	/*
584	 * Calculate the space we need to malloc for nlist and names.
585	 * We are going to store the name twice for later lookups: once
586	 * with the prefix and once the unmodified name delmited by \0.
587	 */
588	len = 0;
589	unresolved = 0;
590	for (p = nl; p->n_name && p->n_name[0]; ++p) {
591		if (p->n_type != N_UNDF)
592			continue;
593		len += sizeof(struct kvm_nlist) + strlen(prefix) +
594		    2 * (strlen(p->n_name) + 1);
595		unresolved++;
596	}
597	if (unresolved == 0)
598		return (unresolved);
599	/* Add space for the terminating nlist entry. */
600	len += sizeof(struct kvm_nlist);
601	unresolved++;
602
603	/* Alloc one chunk for (nlist, [names]) and setup pointers. */
604	n = np = malloc(len);
605	bzero(n, len);
606	if (n == NULL)
607		return (missing);
608	cp = ce = (char *)np;
609	cp += unresolved * sizeof(struct kvm_nlist);
610	ce += len;
611
612	/* Generate shortened nlist with special prefix. */
613	unresolved = 0;
614	for (p = nl; p->n_name && p->n_name[0]; ++p) {
615		if (p->n_type != N_UNDF)
616			continue;
617		*np = *p;
618		/* Save the new\0orig. name so we can later match it again. */
619		slen = snprintf(cp, ce - cp, "%s%s%c%s", prefix,
620		    (prefix[0] != '\0' && p->n_name[0] == '_') ?
621			(p->n_name + 1) : p->n_name, '\0', p->n_name);
622		if (slen < 0 || slen >= ce - cp)
623			continue;
624		np->n_name = cp;
625		cp += slen + 1;
626		np++;
627		unresolved++;
628	}
629
630	/* Do lookup on the reduced list. */
631	np = n;
632	unresolved = kvm_fdnlist(kd, np);
633
634	/* Check if we could resolve further symbols and update the list. */
635	if (unresolved >= 0 && unresolved < missing) {
636		/* Find the first freshly resolved entry. */
637		for (; np->n_name && np->n_name[0]; np++)
638			if (np->n_type != N_UNDF)
639				break;
640		/*
641		 * The lists are both in the same order,
642		 * so we can walk them in parallel.
643		 */
644		for (p = nl; np->n_name && np->n_name[0] &&
645		    p->n_name && p->n_name[0]; ++p) {
646			if (p->n_type != N_UNDF)
647				continue;
648			/* Skip expanded name and compare to orig. one. */
649			ccp = np->n_name + strlen(np->n_name) + 1;
650			if (strcmp(ccp, p->n_name) != 0)
651				continue;
652			/* Update nlist with new, translated results. */
653			p->n_type = np->n_type;
654			if (validate_fn)
655				p->n_value = (*validate_fn)(kd, np->n_value);
656			else
657				p->n_value = np->n_value;
658			missing--;
659			/* Find next freshly resolved entry. */
660			for (np++; np->n_name && np->n_name[0]; np++)
661				if (np->n_type != N_UNDF)
662					break;
663		}
664	}
665	/* We could assert missing = unresolved here. */
666
667	free(n);
668	return (unresolved);
669}
670
671int
672_kvm_nlist(kvm_t *kd, struct kvm_nlist *nl, int initialize)
673{
674	struct kvm_nlist *p;
675	int nvalid;
676	struct kld_sym_lookup lookup;
677	int error;
678	const char *prefix = "";
679	char symname[1024]; /* XXX-BZ symbol name length limit? */
680	int tried_vnet, tried_dpcpu;
681
682	/*
683	 * If we can't use the kld symbol lookup, revert to the
684	 * slow library call.
685	 */
686	if (!ISALIVE(kd)) {
687		error = kvm_fdnlist(kd, nl);
688		if (error <= 0)			/* Hard error or success. */
689			return (error);
690
691		if (_kvm_vnet_initialized(kd, initialize))
692			error = kvm_fdnlist_prefix(kd, nl, error,
693			    VNET_SYMPREFIX, _kvm_vnet_validaddr);
694
695		if (error > 0 && _kvm_dpcpu_initialized(kd, initialize))
696			error = kvm_fdnlist_prefix(kd, nl, error,
697			    DPCPU_SYMPREFIX, _kvm_dpcpu_validaddr);
698
699		return (error);
700	}
701
702	/*
703	 * We can use the kld lookup syscall.  Go through each nlist entry
704	 * and look it up with a kldsym(2) syscall.
705	 */
706	nvalid = 0;
707	tried_vnet = 0;
708	tried_dpcpu = 0;
709again:
710	for (p = nl; p->n_name && p->n_name[0]; ++p) {
711		if (p->n_type != N_UNDF)
712			continue;
713
714		lookup.version = sizeof(lookup);
715		lookup.symvalue = 0;
716		lookup.symsize = 0;
717
718		error = snprintf(symname, sizeof(symname), "%s%s", prefix,
719		    (prefix[0] != '\0' && p->n_name[0] == '_') ?
720			(p->n_name + 1) : p->n_name);
721		if (error < 0 || error >= (int)sizeof(symname))
722			continue;
723		lookup.symname = symname;
724		if (lookup.symname[0] == '_')
725			lookup.symname++;
726
727		if (kldsym(0, KLDSYM_LOOKUP, &lookup) != -1) {
728			p->n_type = N_TEXT;
729			if (_kvm_vnet_initialized(kd, initialize) &&
730			    strcmp(prefix, VNET_SYMPREFIX) == 0)
731				p->n_value =
732				    _kvm_vnet_validaddr(kd, lookup.symvalue);
733			else if (_kvm_dpcpu_initialized(kd, initialize) &&
734			    strcmp(prefix, DPCPU_SYMPREFIX) == 0)
735				p->n_value =
736				    _kvm_dpcpu_validaddr(kd, lookup.symvalue);
737			else
738				p->n_value = lookup.symvalue;
739			++nvalid;
740			/* lookup.symsize */
741		}
742	}
743
744	/*
745	 * Check the number of entries that weren't found. If they exist,
746	 * try again with a prefix for virtualized or DPCPU symbol names.
747	 */
748	error = ((p - nl) - nvalid);
749	if (error && _kvm_vnet_initialized(kd, initialize) && !tried_vnet) {
750		tried_vnet = 1;
751		prefix = VNET_SYMPREFIX;
752		goto again;
753	}
754	if (error && _kvm_dpcpu_initialized(kd, initialize) && !tried_dpcpu) {
755		tried_dpcpu = 1;
756		prefix = DPCPU_SYMPREFIX;
757		goto again;
758	}
759
760	/*
761	 * Return the number of entries that weren't found. If they exist,
762	 * also fill internal error buffer.
763	 */
764	error = ((p - nl) - nvalid);
765	if (error)
766		_kvm_syserr(kd, kd->program, "kvm_nlist");
767	return (error);
768}
769
770int
771kvm_nlist2(kvm_t *kd, struct kvm_nlist *nl)
772{
773
774	/*
775	 * If called via the public interface, permit initialization of
776	 * further virtualized modules on demand.
777	 */
778	return (_kvm_nlist(kd, nl, 1));
779}
780
781int
782kvm_nlist(kvm_t *kd, struct nlist *nl)
783{
784	struct kvm_nlist *kl;
785	int count, i, nfail;
786
787	/*
788	 * Avoid reporting truncated addresses by failing for non-native
789	 * cores.
790	 */
791	if (!kvm_native(kd)) {
792		_kvm_err(kd, kd->program, "kvm_nlist of non-native vmcore");
793		return (-1);
794	}
795
796	for (count = 0; nl[count].n_name != NULL && nl[count].n_name[0] != '\0';
797	     count++)
798		;
799	if (count == 0)
800		return (0);
801	kl = calloc(count + 1, sizeof(*kl));
802	for (i = 0; i < count; i++)
803		kl[i].n_name = nl[i].n_name;
804	nfail = kvm_nlist2(kd, kl);
805	for (i = 0; i < count; i++) {
806		nl[i].n_type = kl[i].n_type;
807		nl[i].n_other = 0;
808		nl[i].n_desc = 0;
809		nl[i].n_value = kl[i].n_value;
810	}
811	return (nfail);
812}
813
814ssize_t
815kvm_read(kvm_t *kd, u_long kva, void *buf, size_t len)
816{
817
818	return (kvm_read2(kd, kva, buf, len));
819}
820
821ssize_t
822kvm_read2(kvm_t *kd, kvaddr_t kva, void *buf, size_t len)
823{
824	int cc;
825	ssize_t cr;
826	off_t pa;
827	char *cp;
828
829	if (ISALIVE(kd)) {
830		/*
831		 * We're using /dev/kmem.  Just read straight from the
832		 * device and let the active kernel do the address translation.
833		 */
834		errno = 0;
835		if (lseek(kd->vmfd, (off_t)kva, 0) == -1 && errno != 0) {
836			_kvm_err(kd, 0, "invalid address (0x%jx)",
837			    (uintmax_t)kva);
838			return (-1);
839		}
840		cr = read(kd->vmfd, buf, len);
841		if (cr < 0) {
842			_kvm_syserr(kd, 0, "kvm_read");
843			return (-1);
844		} else if (cr < (ssize_t)len)
845			_kvm_err(kd, kd->program, "short read");
846		return (cr);
847	}
848
849	cp = buf;
850	while (len > 0) {
851		cc = kd->arch->ka_kvatop(kd, kva, &pa);
852		if (cc == 0)
853			return (-1);
854		if (cc > (ssize_t)len)
855			cc = len;
856		errno = 0;
857		if (lseek(kd->pmfd, pa, 0) == -1 && errno != 0) {
858			_kvm_syserr(kd, 0, _PATH_MEM);
859			break;
860		}
861		cr = read(kd->pmfd, cp, cc);
862		if (cr < 0) {
863			_kvm_syserr(kd, kd->program, "kvm_read");
864			break;
865		}
866		/*
867		 * If ka_kvatop returns a bogus value or our core file is
868		 * truncated, we might wind up seeking beyond the end of the
869		 * core file in which case the read will return 0 (EOF).
870		 */
871		if (cr == 0)
872			break;
873		cp += cr;
874		kva += cr;
875		len -= cr;
876	}
877
878	return (cp - (char *)buf);
879}
880
881ssize_t
882kvm_write(kvm_t *kd, u_long kva, const void *buf, size_t len)
883{
884	int cc;
885	ssize_t cw;
886	off_t pa;
887	const char *cp;
888
889	if (!ISALIVE(kd) && !kd->writable) {
890		_kvm_err(kd, kd->program,
891		    "kvm_write not implemented for dead kernels");
892		return (-1);
893	}
894
895	if (ISALIVE(kd)) {
896		/*
897		 * Just like kvm_read, only we write.
898		 */
899		errno = 0;
900		if (lseek(kd->vmfd, (off_t)kva, 0) == -1 && errno != 0) {
901			_kvm_err(kd, 0, "invalid address (%lx)", kva);
902			return (-1);
903		}
904		cc = write(kd->vmfd, buf, len);
905		if (cc < 0) {
906			_kvm_syserr(kd, 0, "kvm_write");
907			return (-1);
908		} else if ((size_t)cc < len)
909			_kvm_err(kd, kd->program, "short write");
910		return (cc);
911	}
912
913	cp = buf;
914	while (len > 0) {
915		cc = kd->arch->ka_kvatop(kd, kva, &pa);
916		if (cc == 0)
917			return (-1);
918		if (cc > (ssize_t)len)
919			cc = len;
920		errno = 0;
921		if (lseek(kd->pmfd, pa, 0) == -1 && errno != 0) {
922			_kvm_syserr(kd, 0, _PATH_MEM);
923			break;
924		}
925		cw = write(kd->pmfd, cp, cc);
926		if (cw < 0) {
927			_kvm_syserr(kd, kd->program, "kvm_write");
928			break;
929		}
930		/*
931		 * If ka_kvatop returns a bogus value or our core file is
932		 * truncated, we might wind up seeking beyond the end of the
933		 * core file in which case the read will return 0 (EOF).
934		 */
935		if (cw == 0)
936			break;
937		cp += cw;
938		kva += cw;
939		len -= cw;
940	}
941
942	return (cp - (const char *)buf);
943}
944
945int
946kvm_native(kvm_t *kd)
947{
948
949	if (ISALIVE(kd))
950		return (1);
951	return (kd->arch->ka_native(kd));
952}
953