Deleted Added
full compact
kvm_ia64.c (85478) kvm_ia64.c (105607)
1/* $FreeBSD: head/lib/libkvm/kvm_ia64.c 85478 2001-10-25 09:08:21Z dfr $ */
1/* $FreeBSD: head/lib/libkvm/kvm_ia64.c 105607 2002-10-21 04:21:12Z marcel $ */
2/* $NetBSD: kvm_alpha.c,v 1.7.2.1 1997/11/02 20:34:26 mellon Exp $ */
3
4/*
5 * Copyright (c) 1994, 1995 Carnegie-Mellon University.
6 * All rights reserved.
7 *
8 * Author: Chris G. Demetriou
9 *

--- 13 unchanged lines hidden (view full) ---

23 * School of Computer Science
24 * Carnegie Mellon University
25 * Pittsburgh PA 15213-3890
26 *
27 * any improvements or extensions that they make and grant Carnegie the
28 * rights to redistribute these changes.
29 */
30
2/* $NetBSD: kvm_alpha.c,v 1.7.2.1 1997/11/02 20:34:26 mellon Exp $ */
3
4/*
5 * Copyright (c) 1994, 1995 Carnegie-Mellon University.
6 * All rights reserved.
7 *
8 * Author: Chris G. Demetriou
9 *

--- 13 unchanged lines hidden (view full) ---

23 * School of Computer Science
24 * Carnegie Mellon University
25 * Pittsburgh PA 15213-3890
26 *
27 * any improvements or extensions that they make and grant Carnegie the
28 * rights to redistribute these changes.
29 */
30
31#include <sys/param.h>
32#include <sys/lock.h>
33#include <sys/mutex.h>
34#include <sys/user.h>
35#include <sys/proc.h>
36#include <sys/stat.h>
37#include <sys/types.h>
31#include <sys/types.h>
38#include <sys/uio.h>
39#include <unistd.h>
40#include <nlist.h>
41#include <kvm.h>
32#include <sys/elf64.h>
33#include <sys/mman.h>
42
34
43#include <vm/vm.h>
44#include <vm/vm_param.h>
35#include <machine/pte.h>
45
36
37#include <kvm.h>
46#include <limits.h>
47#include <stdlib.h>
38#include <limits.h>
39#include <stdlib.h>
48#include <machine/pmap.h>
40#include <unistd.h>
41
49#include "kvm_private.h"
50
42#include "kvm_private.h"
43
51static off_t _kvm_pa2off(kvm_t *kd, u_long pa);
44#define REGION_BASE(n) (((uint64_t)(n)) << 61)
45#define REGION_ADDR(x) ((x) & ((1LL<<61)-1LL))
52
46
47#define NKPTEPG(ps) ((ps) / sizeof(struct ia64_lpte))
48#define KPTE_PTE_INDEX(va,ps) (((va)/(ps)) % NKPTEPG(ps))
49#define KPTE_DIR_INDEX(va,ps) (((va)/(ps)) / NKPTEPG(ps))
50
53struct vmstate {
51struct vmstate {
54 u_int64_t kptdir; /* PA of page table directory */
55 u_int64_t page_size; /* Page size */
52 void *mmapbase;
53 size_t mmapsize;
54 size_t pagesize;
55 u_long kptdir;
56};
57
56};
57
58/*
59 * Map the ELF headers into the process' address space. We do this in two
60 * steps: first the ELF header itself and using that information the whole
61 * set of headers.
62 */
63static int
64_kvm_maphdrs(kvm_t *kd, size_t sz)
65{
66 struct vmstate *vm = kd->vmst;
67
68 /* munmap() previous mmap(). */
69 if (vm->mmapbase != NULL) {
70 munmap(vm->mmapbase, vm->mmapsize);
71 vm->mmapbase = NULL;
72 }
73
74 vm->mmapsize = sz;
75 vm->mmapbase = mmap(NULL, sz, PROT_READ, MAP_PRIVATE, kd->pmfd, NULL);
76 if (vm->mmapbase == MAP_FAILED) {
77 _kvm_err(kd, kd->program, "cannot mmap corefile");
78 return (-1);
79 }
80
81 return (0);
82}
83
84/*
85 * Translate a physical memory address to a file-offset in the crash-dump.
86 */
87static size_t
88_kvm_pa2off(kvm_t *kd, uint64_t pa, u_long *ofs, size_t pgsz)
89{
90 Elf64_Ehdr *e = kd->vmst->mmapbase;
91 Elf64_Phdr *p = (Elf64_Phdr*)((char*)e + e->e_phoff);
92 int n = e->e_phnum;
93
94 if (pa != REGION_ADDR(pa)) {
95 _kvm_err(kd, kd->program, "internal error");
96 return (0);
97 }
98
99 while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz))
100 p++, n--;
101 if (n == 0)
102 return (0);
103
104 *ofs = (pa - p->p_paddr) + p->p_offset;
105 if (pgsz == 0)
106 return (p->p_memsz - (pa - p->p_paddr));
107 return (pgsz - ((size_t)pa & (pgsz - 1)));
108}
109
58void
59_kvm_freevtop(kvm_t *kd)
60{
110void
111_kvm_freevtop(kvm_t *kd)
112{
113 struct vmstate *vm = kd->vmst;
61
114
62 /* Not actually used for anything right now, but safe. */
63 if (kd->vmst != 0)
64 free(kd->vmst);
115 if (vm->mmapbase != NULL)
116 munmap(vm->mmapbase, vm->mmapsize);
117 free(vm);
118 kd->vmst = NULL;
65}
66
67int
68_kvm_initvtop(kvm_t *kd)
69{
119}
120
121int
122_kvm_initvtop(kvm_t *kd)
123{
70 struct vmstate *vm;
71 struct nlist nlist[2];
124 struct nlist nlist[2];
72 u_int64_t va;
125 uint64_t va;
126 Elf64_Ehdr *ehdr;
127 size_t hdrsz;
73
128
74 vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
75 if (vm == 0) {
129 kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst));
130 if (kd->vmst == NULL) {
76 _kvm_err(kd, kd->program, "cannot allocate vm");
77 return (-1);
78 }
131 _kvm_err(kd, kd->program, "cannot allocate vm");
132 return (-1);
133 }
79 kd->vmst = vm;
80 vm->page_size = getpagesize(); /* XXX wrong for crashdumps */
81
134
135 kd->vmst->pagesize = getpagesize();
136
137 if (_kvm_maphdrs(kd, sizeof(Elf64_Ehdr)) == -1)
138 return (-1);
139
140 ehdr = kd->vmst->mmapbase;
141 hdrsz = ehdr->e_phoff + ehdr->e_phentsize * ehdr->e_phnum;
142 if (_kvm_maphdrs(kd, hdrsz) == -1)
143 return (-1);
144
145 /*
146 * At this point we've got enough information to use kvm_read() for
147 * direct mapped (ie region 6 and region 7) address, such as symbol
148 * addresses/values.
149 */
150
82 nlist[0].n_name = "kptdir";
83 nlist[1].n_name = 0;
84
85 if (kvm_nlist(kd, nlist) != 0) {
86 _kvm_err(kd, kd->program, "bad namelist");
87 return (-1);
88 }
89
151 nlist[0].n_name = "kptdir";
152 nlist[1].n_name = 0;
153
154 if (kvm_nlist(kd, nlist) != 0) {
155 _kvm_err(kd, kd->program, "bad namelist");
156 return (-1);
157 }
158
90 if(!ISALIVE(kd)) {
91 if (kvm_read(kd, (nlist[0].n_value), &va, sizeof(va)) != sizeof(va)) {
92 _kvm_err(kd, kd->program, "cannot read kptdir");
93 return (-1);
94 }
95 } else
96 if (kvm_read(kd, (nlist[0].n_value), &va, sizeof(va)) != sizeof(va)) {
97 _kvm_err(kd, kd->program, "cannot read kptdir");
98 return (-1);
99 }
100 vm->kptdir = IA64_RR_MASK(va);
101 return (0);
159 if (kvm_read(kd, (nlist[0].n_value), &va, sizeof(va)) != sizeof(va)) {
160 _kvm_err(kd, kd->program, "cannot read kptdir");
161 return (-1);
162 }
102
163
164 if (va < REGION_BASE(6)) {
165 _kvm_err(kd, kd->program, "kptdir is itself virtual");
166 return (-1);
167 }
168
169 kd->vmst->kptdir = va;
170 return (0);
103}
104
105int
106_kvm_kvatop(kvm_t *kd, u_long va, u_long *pa)
107{
171}
172
173int
174_kvm_kvatop(kvm_t *kd, u_long va, u_long *pa)
175{
108 u_int64_t kptdir; /* PA of kptdir */
109 u_int64_t page_size;
110 int rv, page_off;
111 struct ia64_lpte pte;
176 struct ia64_lpte pte;
112 off_t pteoff;
113 struct vmstate *vm;
177 uint64_t pgaddr, ptaddr;
178 size_t pgno, pgsz, ptno;
114
179
115 vm = kd->vmst;
116
117 if (ISALIVE(kd)) {
118 _kvm_err(kd, 0, "vatop called in live kernel!");
119 return(0);
120 }
121 kptdir = vm->kptdir;
122 page_size = vm->page_size;
123
124 page_off = va & (page_size - 1);
125 if (va >= IA64_RR_BASE(6) && va <= IA64_RR_BASE(7) + ((1L<<61)-1)) {
126 /*
127 * Direct-mapped address: just convert it.
128 */
129
130 *pa = IA64_RR_MASK(va);
131 rv = page_size - page_off;
132 } else if (va >= IA64_RR_BASE(5) && va < IA64_RR_BASE(6)) {
133 /*
134 * Real kernel virtual address: do the translation.
135 */
136#define KPTE_DIR_INDEX(va, ps) \
137 (IA64_RR_MASK(va) / ((ps) * (ps) * sizeof(struct ia64_lpte)))
138#define KPTE_PTE_INDEX(va, ps) \
139 (((va) / (ps)) % (ps / sizeof(struct ia64_lpte)))
140
141 int maxpt = page_size / sizeof(u_int64_t);
142 int ptno = KPTE_DIR_INDEX(va, page_size);
143 int pgno = KPTE_PTE_INDEX(va, page_size);
144 u_int64_t ptoff, pgoff;
145
146 if (ptno >= maxpt) {
147 _kvm_err(kd, 0, "invalid translation (va too large)");
148 goto lose;
149 }
150 ptoff = kptdir + ptno * sizeof(u_int64_t);
151 if (lseek(kd->pmfd, _kvm_pa2off(kd, ptoff), 0) == -1 ||
152 read(kd->pmfd, &pgoff, sizeof(pgoff)) != sizeof(pgoff)) {
153 _kvm_syserr(kd, 0, "could not read page table address");
154 goto lose;
155 }
156 pgoff = IA64_RR_MASK(pgoff);
157 if (!pgoff) {
158 _kvm_err(kd, 0, "invalid translation (no page table)");
159 goto lose;
160 }
161 if (lseek(kd->pmfd, _kvm_pa2off(kd, pgoff), 0) == -1 ||
162 read(kd->pmfd, &pte, sizeof(pte)) != sizeof(pte)) {
163 _kvm_syserr(kd, 0, "could not read PTE");
164 goto lose;
165 }
166 if (!pte.pte_p) {
167 _kvm_err(kd, 0, "invalid translation (invalid PTE)");
168 goto lose;
169 }
170 *pa = pte.pte_ppn << 12;
171 rv = page_size - page_off;
172 } else {
173 /*
174 * Bogus address (not in KV space): punt.
175 */
176
177 _kvm_err(kd, 0, "invalid kernel virtual address");
178lose:
179 *pa = -1;
180 rv = 0;
180 if (va >= REGION_BASE(6)) {
181 /* Regions 6 and 7: direct mapped. */
182 return (_kvm_pa2off(kd, REGION_ADDR(va), pa, 0));
183 } else if (va >= REGION_BASE(5)) {
184 /* Region 5: virtual. */
185 va = REGION_ADDR(va);
186 pgsz = kd->vmst->pagesize;
187 ptno = KPTE_DIR_INDEX(va, pgsz);
188 pgno = KPTE_PTE_INDEX(va, pgsz);
189 if (ptno >= (pgsz >> 3))
190 goto fail;
191 ptaddr = kd->vmst->kptdir + (ptno << 3);
192 if (kvm_read(kd, ptaddr, &pgaddr, 8) != 8)
193 goto fail;
194 if (pgaddr == 0)
195 goto fail;
196 pgaddr += (pgno * sizeof(pte));
197 if (kvm_read(kd, pgaddr, &pte, sizeof(pte)) != sizeof(pte))
198 goto fail;
199 if (!pte.pte_p)
200 goto fail;
201 va = ((u_long)pte.pte_ppn << 12) + (va & (pgsz - 1));
202 return (_kvm_pa2off(kd, va, pa, pgsz));
181 }
182
203 }
204
183 return (rv);
205 fail:
206 _kvm_err(kd, kd->program, "invalid kernel virtual address");
207 *pa = ~0UL;
208 return (0);
184}
209}
185
186/*
187 * Translate a physical address to a file-offset in the crash-dump.
188 */
189off_t
190_kvm_pa2off(kd, pa)
191 kvm_t *kd;
192 u_long pa;
193{
194 return IA64_PHYS_TO_RR7(pa);
195}
196