1/* $FreeBSD: releng/10.2/lib/libkvm/kvm_ia64.c 269449 2014-08-02 22:25:24Z marcel $ */ 2/* $NetBSD: kvm_alpha.c,v 1.7.2.1 1997/11/02 20:34:26 mellon Exp $ */ 3 4/* 5 * Copyright (c) 1994, 1995 Carnegie-Mellon University. 6 * All rights reserved. 7 * 8 * Author: Chris G. Demetriou 9 * 10 * Permission to use, copy, modify and distribute this software and 11 * its documentation is hereby granted, provided that both the copyright 12 * notice and this permission notice appear in all copies of the 13 * software, derivative works or modified versions, and any portions 14 * thereof, and that both notices appear in supporting documentation. 15 * 16 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 17 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 18 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 19 * 20 * Carnegie Mellon requests users of this software to return to 21 * 22 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 23 * School of Computer Science 24 * Carnegie Mellon University 25 * Pittsburgh PA 15213-3890 26 * 27 * any improvements or extensions that they make and grant Carnegie the 28 * rights to redistribute these changes. 29 */ 30 31#include <sys/types.h> 32#include <sys/elf64.h> 33#include <sys/mman.h> 34 35#ifndef CROSS_LIBKVM 36#include <machine/atomic.h> 37#include <machine/bootinfo.h> 38#include <machine/elf.h> 39#include <machine/pte.h> 40#else 41#include "../../sys/ia64/include/atomic.h" 42#include "../../sys/ia64/include/bootinfo.h" 43#include "../../sys/ia64/include/elf.h" 44#include "../../sys/ia64/include/pte.h" 45#endif 46 47#include <kvm.h> 48#include <limits.h> 49#include <stdint.h> 50#include <stdlib.h> 51#include <unistd.h> 52 53#include "kvm_private.h" 54 55#define REGION_BASE(n) (((uint64_t)(n)) << 61) 56#define REGION_ADDR(x) ((x) & ((1LL<<61)-1LL)) 57 58#define NKPTEPG(ps) ((ps) / sizeof(struct ia64_lpte)) 59#define NKPTEDIR(ps) ((ps) >> 3) 60#define KPTE_PTE_INDEX(va,ps) (((va)/(ps)) % NKPTEPG(ps)) 61#define KPTE_DIR0_INDEX(va,ps) ((((va)/(ps)) / NKPTEPG(ps)) / NKPTEDIR(ps)) 62#define KPTE_DIR1_INDEX(va,ps) ((((va)/(ps)) / NKPTEPG(ps)) % NKPTEDIR(ps)) 63 64#define PBVM_BASE 0x9ffc000000000000UL 65#define PBVM_PGSZ (64 * 1024) 66 67typedef size_t (a2p_f)(kvm_t *, uint64_t, off_t *); 68 69struct vmstate { 70 void *mmapbase; 71 size_t mmapsize; 72 size_t pagesize; 73 u_long kptdir; 74 u_long *pbvm_pgtbl; 75 u_int pbvm_pgtblsz; 76 a2p_f *kvatop; 77}; 78 79/* 80 * Map the ELF headers into the process' address space. We do this in two 81 * steps: first the ELF header itself and using that information the whole 82 * set of headers. 83 */ 84static int 85ia64_maphdrs(kvm_t *kd, size_t sz) 86{ 87 struct vmstate *vm = kd->vmst; 88 89 /* munmap() previous mmap(). */ 90 if (vm->mmapbase != NULL) { 91 munmap(vm->mmapbase, vm->mmapsize); 92 vm->mmapbase = NULL; 93 } 94 95 vm->mmapsize = sz; 96 vm->mmapbase = mmap(NULL, sz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0); 97 if (vm->mmapbase == MAP_FAILED) { 98 _kvm_err(kd, kd->program, "cannot mmap corefile"); 99 return (-1); 100 } 101 102 return (0); 103} 104 105/* 106 * Physical core support. 107 */ 108 109static size_t 110phys_addr2off(kvm_t *kd, uint64_t pa, off_t *ofs, size_t pgsz) 111{ 112 Elf64_Ehdr *e; 113 Elf64_Phdr *p; 114 int n; 115 116 if (pa != REGION_ADDR(pa)) 117 goto fail; 118 119 e = (Elf64_Ehdr *)(kd->vmst->mmapbase); 120 n = e->e_phnum; 121 p = (Elf64_Phdr *)(void *)((uintptr_t)(void *)e + e->e_phoff); 122 while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz)) 123 p++, n--; 124 if (n == 0) 125 goto fail; 126 127 *ofs = (pa - p->p_paddr) + p->p_offset; 128 if (pgsz == 0) 129 return (p->p_memsz - (pa - p->p_paddr)); 130 return (pgsz - ((size_t)pa & (pgsz - 1))); 131 132 fail: 133 _kvm_err(kd, kd->program, "invalid physical address %#jx", 134 (uintmax_t)pa); 135 return (0); 136} 137 138static size_t 139phys_kvatop(kvm_t *kd, uint64_t va, off_t *ofs) 140{ 141 struct ia64_lpte pte; 142 uint64_t pa, pgaddr, pt0addr, pt1addr; 143 size_t pgno, pgsz, pt0no, pt1no; 144 145 if (va >= REGION_BASE(6)) { 146 /* Regions 6 and 7: direct mapped. */ 147 pa = REGION_ADDR(va); 148 return (phys_addr2off(kd, pa, ofs, 0)); 149 } else if (va >= REGION_BASE(5)) { 150 /* Region 5: Kernel Virtual Memory. */ 151 va = REGION_ADDR(va); 152 pgsz = kd->vmst->pagesize; 153 pt0no = KPTE_DIR0_INDEX(va, pgsz); 154 pt1no = KPTE_DIR1_INDEX(va, pgsz); 155 pgno = KPTE_PTE_INDEX(va, pgsz); 156 if (pt0no >= NKPTEDIR(pgsz)) 157 goto fail; 158 pt0addr = kd->vmst->kptdir + (pt0no << 3); 159 if (kvm_read(kd, pt0addr, &pt1addr, 8) != 8) 160 goto fail; 161 if (pt1addr == 0) 162 goto fail; 163 pt1addr += pt1no << 3; 164 if (kvm_read(kd, pt1addr, &pgaddr, 8) != 8) 165 goto fail; 166 if (pgaddr == 0) 167 goto fail; 168 pgaddr += pgno * sizeof(pte); 169 if (kvm_read(kd, pgaddr, &pte, sizeof(pte)) != sizeof(pte)) 170 goto fail; 171 if (!(pte.pte & PTE_PRESENT)) 172 goto fail; 173 pa = (pte.pte & PTE_PPN_MASK) + (va & (pgsz - 1)); 174 return (phys_addr2off(kd, pa, ofs, pgsz)); 175 } else if (va >= PBVM_BASE) { 176 /* Region 4: Pre-Boot Virtual Memory (PBVM). */ 177 va -= PBVM_BASE; 178 pgsz = PBVM_PGSZ; 179 pt0no = va / pgsz; 180 if (pt0no >= (kd->vmst->pbvm_pgtblsz >> 3)) 181 goto fail; 182 pt0addr = kd->vmst->pbvm_pgtbl[pt0no]; 183 if (!(pt0addr & PTE_PRESENT)) 184 goto fail; 185 pa = (pt0addr & PTE_PPN_MASK) + va % pgsz; 186 return (phys_addr2off(kd, pa, ofs, pgsz)); 187 } 188 189 fail: 190 _kvm_err(kd, kd->program, "invalid kernel virtual address %#jx", 191 (uintmax_t)va); 192 *ofs = -1; 193 return (0); 194} 195 196static ssize_t 197phys_read(kvm_t *kd, uint64_t pa, void *buf, size_t bufsz) 198{ 199 off_t ofs; 200 size_t sz; 201 202 sz = phys_addr2off(kd, pa, &ofs, 0); 203 if (sz < bufsz) 204 return ((ssize_t)sz); 205 206 if (lseek(kd->pmfd, ofs, 0) == -1) 207 return (-1); 208 return (read(kd->pmfd, buf, bufsz)); 209} 210 211/* 212 * Virtual core support (aka minidump). 213 */ 214 215static size_t 216virt_addr2off(kvm_t *kd, uint64_t va, off_t *ofs, size_t pgsz) 217{ 218 Elf64_Ehdr *e; 219 Elf64_Phdr *p; 220 int n; 221 222 if (va < REGION_BASE(4)) 223 goto fail; 224 225 e = (Elf64_Ehdr *)(kd->vmst->mmapbase); 226 n = e->e_phnum; 227 p = (Elf64_Phdr *)(void *)((uintptr_t)(void *)e + e->e_phoff); 228 while (n && (va < p->p_vaddr || va >= p->p_vaddr + p->p_memsz)) 229 p++, n--; 230 if (n == 0) 231 goto fail; 232 233 *ofs = (va - p->p_vaddr) + p->p_offset; 234 if (pgsz == 0) 235 return (p->p_memsz - (va - p->p_vaddr)); 236 return (pgsz - ((size_t)va & (pgsz - 1))); 237 238 fail: 239 _kvm_err(kd, kd->program, "invalid virtual address %#jx", 240 (uintmax_t)va); 241 return (0); 242} 243 244static size_t 245virt_kvatop(kvm_t *kd, uint64_t va, off_t *ofs) 246{ 247 248 return (virt_addr2off(kd, va, ofs, 0)); 249} 250 251/* 252 * KVM architecture support functions. 253 */ 254 255void 256_kvm_freevtop(kvm_t *kd) 257{ 258 struct vmstate *vm = kd->vmst; 259 260 if (vm->pbvm_pgtbl != NULL) 261 free(vm->pbvm_pgtbl); 262 if (vm->mmapbase != NULL) 263 munmap(vm->mmapbase, vm->mmapsize); 264 free(vm); 265 kd->vmst = NULL; 266} 267 268int 269_kvm_initvtop(kvm_t *kd) 270{ 271 struct bootinfo bi; 272 struct nlist nl[2]; 273 uint64_t va; 274 Elf64_Ehdr *ehdr; 275 size_t hdrsz; 276 ssize_t sz; 277 278 kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst)); 279 if (kd->vmst == NULL) { 280 _kvm_err(kd, kd->program, "cannot allocate vm"); 281 return (-1); 282 } 283 284#ifndef CROSS_LIBKVM 285 kd->vmst->pagesize = getpagesize(); 286#else 287 kd->vmst->pagesize = 8192; 288#endif 289 290 if (ia64_maphdrs(kd, sizeof(Elf64_Ehdr)) == -1) 291 return (-1); 292 293 ehdr = kd->vmst->mmapbase; 294 hdrsz = ehdr->e_phoff + ehdr->e_phentsize * ehdr->e_phnum; 295 if (ia64_maphdrs(kd, hdrsz) == -1) 296 return (-1); 297 298 kd->vmst->kvatop = (ehdr->e_flags & EF_IA_64_ABSOLUTE) ? 299 phys_kvatop : virt_kvatop; 300 301 /* 302 * Load the PBVM page table. We need this to resolve PBVM addresses. 303 * The PBVM page table is obtained from the bootinfo structure, of 304 * which the address is given to us in e_entry. If e_entry is 0, then 305 * this is assumed to be a pre-PBVM kernel. 306 * Note that the address of the bootinfo structure is either physical 307 * or virtual, depending on whether the core is physical or virtual. 308 */ 309 if (ehdr->e_entry != 0 && (ehdr->e_flags & EF_IA_64_ABSOLUTE) != 0) { 310 sz = phys_read(kd, ehdr->e_entry, &bi, sizeof(bi)); 311 if (sz != sizeof(bi)) { 312 _kvm_err(kd, kd->program, 313 "cannot read bootinfo at physical address %#jx", 314 (uintmax_t)ehdr->e_entry); 315 return (-1); 316 } 317 if (bi.bi_magic != BOOTINFO_MAGIC) { 318 _kvm_err(kd, kd->program, "invalid bootinfo"); 319 return (-1); 320 } 321 kd->vmst->pbvm_pgtbl = _kvm_malloc(kd, bi.bi_pbvm_pgtblsz); 322 if (kd->vmst->pbvm_pgtbl == NULL) { 323 _kvm_err(kd, kd->program, "cannot allocate page table"); 324 return (-1); 325 } 326 kd->vmst->pbvm_pgtblsz = bi.bi_pbvm_pgtblsz; 327 sz = phys_read(kd, bi.bi_pbvm_pgtbl, kd->vmst->pbvm_pgtbl, 328 bi.bi_pbvm_pgtblsz); 329 if (sz != bi.bi_pbvm_pgtblsz) { 330 _kvm_err(kd, kd->program, 331 "cannot read page table at physical address %#jx", 332 (uintmax_t)bi.bi_pbvm_pgtbl); 333 return (-1); 334 } 335 } else { 336 kd->vmst->pbvm_pgtbl = NULL; 337 kd->vmst->pbvm_pgtblsz = 0; 338 } 339 340 /* 341 * At this point we've got enough information to use kvm_read() for 342 * direct mapped (ie region 6 and region 7) address, such as symbol 343 * addresses/values. 344 */ 345 346 nl[0].n_name = "ia64_kptdir"; 347 nl[1].n_name = 0; 348 349 if (kvm_nlist(kd, nl) != 0) { 350 _kvm_err(kd, kd->program, "bad namelist"); 351 return (-1); 352 } 353 354 if (kvm_read(kd, (nl[0].n_value), &va, sizeof(va)) != sizeof(va)) { 355 _kvm_err(kd, kd->program, "cannot read kptdir"); 356 return (-1); 357 } 358 359 if (va == REGION_BASE(5)) { 360 _kvm_err(kd, kd->program, "kptdir is itself virtual"); 361 return (-1); 362 } 363 364 kd->vmst->kptdir = va; 365 return (0); 366} 367 368int 369_kvm_kvatop(kvm_t *kd, u_long va, off_t *ofs) 370{ 371 size_t sz; 372 373 sz = kd->vmst->kvatop(kd, va, ofs); 374 return ((sz > INT_MAX) ? INT_MAX : sz); 375} 376