Deleted Added
full compact
imgact_elf.c (120422) imgact_elf.c (123742)
1/*-
2 * Copyright (c) 2000 David O'Brien
3 * Copyright (c) 1995-1996 S�ren Schmidt
4 * Copyright (c) 1996 Peter Wemm
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer
12 * in this position and unchanged.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2000 David O'Brien
3 * Copyright (c) 1995-1996 S�ren Schmidt
4 * Copyright (c) 1996 Peter Wemm
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer
12 * in this position and unchanged.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/kern/imgact_elf.c 120422 2003-09-25 01:10:26Z peter $");
32__FBSDID("$FreeBSD: head/sys/kern/imgact_elf.c 123742 2003-12-23 02:42:39Z peter $");
33
34#include <sys/param.h>
35#include <sys/exec.h>
36#include <sys/fcntl.h>
37#include <sys/imgact.h>
38#include <sys/imgact_elf.h>
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/malloc.h>
42#include <sys/mutex.h>
43#include <sys/mman.h>
44#include <sys/namei.h>
45#include <sys/pioctl.h>
46#include <sys/proc.h>
47#include <sys/procfs.h>
48#include <sys/resourcevar.h>
49#include <sys/systm.h>
50#include <sys/signalvar.h>
51#include <sys/stat.h>
52#include <sys/sx.h>
53#include <sys/syscall.h>
54#include <sys/sysctl.h>
55#include <sys/sysent.h>
56#include <sys/vnode.h>
57
58#include <vm/vm.h>
59#include <vm/vm_kern.h>
60#include <vm/vm_param.h>
61#include <vm/pmap.h>
62#include <vm/vm_map.h>
63#include <vm/vm_object.h>
64#include <vm/vm_extern.h>
65
66#include <machine/elf.h>
67#include <machine/md_var.h>
68
69#define OLD_EI_BRAND 8
70
71static int __elfN(check_header)(const Elf_Ehdr *hdr);
72static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr,
73 const char *interp);
74static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
75 u_long *entry, size_t pagesize);
76static int __elfN(load_section)(struct proc *p,
77 struct vmspace *vmspace, struct vnode *vp, vm_object_t object,
78 vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
79 vm_prot_t prot, size_t pagesize);
80static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
81
82SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
83 "");
84
85int __elfN(fallback_brand) = -1;
86SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
87 fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
88 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
89TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
90 &__elfN(fallback_brand));
91
92static int elf_trace = 0;
93SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, "");
94
95static int elf_legacy_coredump = 0;
96SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
97 &elf_legacy_coredump, 0, "");
98
99static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
100
101int
102__elfN(insert_brand_entry)(Elf_Brandinfo *entry)
103{
104 int i;
105
106 for (i = 0; i < MAX_BRANDS; i++) {
107 if (elf_brand_list[i] == NULL) {
108 elf_brand_list[i] = entry;
109 break;
110 }
111 }
112 if (i == MAX_BRANDS)
113 return (-1);
114 return (0);
115}
116
117int
118__elfN(remove_brand_entry)(Elf_Brandinfo *entry)
119{
120 int i;
121
122 for (i = 0; i < MAX_BRANDS; i++) {
123 if (elf_brand_list[i] == entry) {
124 elf_brand_list[i] = NULL;
125 break;
126 }
127 }
128 if (i == MAX_BRANDS)
129 return (-1);
130 return (0);
131}
132
133int
134__elfN(brand_inuse)(Elf_Brandinfo *entry)
135{
136 struct proc *p;
137 int rval = FALSE;
138
139 sx_slock(&allproc_lock);
140 LIST_FOREACH(p, &allproc, p_list) {
141 if (p->p_sysent == entry->sysvec) {
142 rval = TRUE;
143 break;
144 }
145 }
146 sx_sunlock(&allproc_lock);
147
148 return (rval);
149}
150
151static Elf_Brandinfo *
152__elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp)
153{
154 Elf_Brandinfo *bi;
155 int i;
156
157 /*
158 * We support three types of branding -- (1) the ELF EI_OSABI field
159 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
160 * branding w/in the ELF header, and (3) path of the `interp_path'
161 * field. We should also look for an ".note.ABI-tag" ELF section now
162 * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones.
163 */
164
165 /* If the executable has a brand, search for it in the brand list. */
166 for (i = 0; i < MAX_BRANDS; i++) {
167 bi = elf_brand_list[i];
168 if (bi != NULL && hdr->e_machine == bi->machine &&
169 (hdr->e_ident[EI_OSABI] == bi->brand ||
170 strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
171 bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
172 return (bi);
173 }
174
175 /* Lacking a known brand, search for a recognized interpreter. */
176 if (interp != NULL) {
177 for (i = 0; i < MAX_BRANDS; i++) {
178 bi = elf_brand_list[i];
179 if (bi != NULL && hdr->e_machine == bi->machine &&
180 strcmp(interp, bi->interp_path) == 0)
181 return (bi);
182 }
183 }
184
185 /* Lacking a recognized interpreter, try the default brand */
186 for (i = 0; i < MAX_BRANDS; i++) {
187 bi = elf_brand_list[i];
188 if (bi != NULL && hdr->e_machine == bi->machine &&
189 __elfN(fallback_brand) == bi->brand)
190 return (bi);
191 }
192 return (NULL);
193}
194
195static int
196__elfN(check_header)(const Elf_Ehdr *hdr)
197{
198 Elf_Brandinfo *bi;
199 int i;
200
201 if (!IS_ELF(*hdr) ||
202 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
203 hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
204 hdr->e_ident[EI_VERSION] != EV_CURRENT)
205 return (ENOEXEC);
206
207 /*
208 * Make sure we have at least one brand for this machine.
209 */
210
211 for (i = 0; i < MAX_BRANDS; i++) {
212 bi = elf_brand_list[i];
213 if (bi != NULL && bi->machine == hdr->e_machine)
214 break;
215 }
216 if (i == MAX_BRANDS)
217 return (ENOEXEC);
218
219 if (hdr->e_version != ELF_TARG_VER)
220 return (ENOEXEC);
221
222 return (0);
223}
224
225static int
226__elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
227 vm_offset_t start, vm_offset_t end, vm_prot_t prot,
228 vm_prot_t max)
229{
230 int error, rv;
231 vm_offset_t off;
232 vm_offset_t data_buf = 0;
233
234 /*
235 * Create the page if it doesn't exist yet. Ignore errors.
236 */
237 vm_map_lock(map);
238 vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end), max,
239 max, 0);
240 vm_map_unlock(map);
241
242 /*
243 * Find the page from the underlying object.
244 */
245 if (object) {
246 vm_object_reference(object);
247 rv = vm_map_find(exec_map,
248 object,
249 trunc_page(offset),
250 &data_buf,
251 PAGE_SIZE,
252 TRUE,
253 VM_PROT_READ,
254 VM_PROT_ALL,
255 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
256 if (rv != KERN_SUCCESS) {
257 vm_object_deallocate(object);
258 return (rv);
259 }
260
261 off = offset - trunc_page(offset);
262 error = copyout((caddr_t)data_buf + off, (caddr_t)start,
263 end - start);
264 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
265 if (error) {
266 return (KERN_FAILURE);
267 }
268 }
269
270 return (KERN_SUCCESS);
271}
272
273static int
274__elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
275 vm_offset_t start, vm_offset_t end, vm_prot_t prot,
276 vm_prot_t max, int cow)
277{
278 vm_offset_t data_buf, off;
279 vm_size_t sz;
280 int error, rv;
281
282 if (start != trunc_page(start)) {
283 rv = __elfN(map_partial)(map, object, offset, start,
284 round_page(start), prot, max);
285 if (rv)
286 return (rv);
287 offset += round_page(start) - start;
288 start = round_page(start);
289 }
290 if (end != round_page(end)) {
291 rv = __elfN(map_partial)(map, object, offset +
292 trunc_page(end) - start, trunc_page(end), end, prot, max);
293 if (rv)
294 return (rv);
295 end = trunc_page(end);
296 }
297 if (end > start) {
298 if (offset & PAGE_MASK) {
299 /*
300 * The mapping is not page aligned. This means we have
301 * to copy the data. Sigh.
302 */
303 rv = vm_map_find(map, 0, 0, &start, end - start,
304 FALSE, prot, max, 0);
305 if (rv)
306 return (rv);
307 data_buf = 0;
308 while (start < end) {
309 vm_object_reference(object);
310 rv = vm_map_find(exec_map,
311 object,
312 trunc_page(offset),
313 &data_buf,
314 2 * PAGE_SIZE,
315 TRUE,
316 VM_PROT_READ,
317 VM_PROT_ALL,
318 (MAP_COPY_ON_WRITE
319 | MAP_PREFAULT_PARTIAL));
320 if (rv != KERN_SUCCESS) {
321 vm_object_deallocate(object);
322 return (rv);
323 }
324 off = offset - trunc_page(offset);
325 sz = end - start;
326 if (sz > PAGE_SIZE)
327 sz = PAGE_SIZE;
328 error = copyout((caddr_t)data_buf + off,
329 (caddr_t)start, sz);
330 vm_map_remove(exec_map, data_buf,
331 data_buf + 2 * PAGE_SIZE);
332 if (error) {
333 return (KERN_FAILURE);
334 }
335 start += sz;
336 }
337 rv = KERN_SUCCESS;
338 } else {
339 vm_map_lock(map);
340 rv = vm_map_insert(map, object, offset, start, end,
341 prot, max, cow);
342 vm_map_unlock(map);
343 }
344 return (rv);
345 } else {
346 return (KERN_SUCCESS);
347 }
348}
349
350static int
351__elfN(load_section)(struct proc *p, struct vmspace *vmspace,
352 struct vnode *vp, vm_object_t object, vm_offset_t offset,
353 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
354 size_t pagesize)
355{
356 size_t map_len;
357 vm_offset_t map_addr;
358 int error, rv, cow;
359 size_t copy_len;
360 vm_offset_t file_addr;
361 vm_offset_t data_buf = 0;
362
363 GIANT_REQUIRED;
364
365 error = 0;
366
367 /*
368 * It's necessary to fail if the filsz + offset taken from the
369 * header is greater than the actual file pager object's size.
370 * If we were to allow this, then the vm_map_find() below would
371 * walk right off the end of the file object and into the ether.
372 *
373 * While I'm here, might as well check for something else that
374 * is invalid: filsz cannot be greater than memsz.
375 */
376 if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
377 filsz > memsz) {
378 uprintf("elf_load_section: truncated ELF file\n");
379 return (ENOEXEC);
380 }
381
382#define trunc_page_ps(va, ps) ((va) & ~(ps - 1))
383#define round_page_ps(va, ps) (((va) + (ps - 1)) & ~(ps - 1))
384
385 map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
386 file_addr = trunc_page_ps(offset, pagesize);
387
388 /*
389 * We have two choices. We can either clear the data in the last page
390 * of an oversized mapping, or we can start the anon mapping a page
391 * early and copy the initialized data into that first page. We
392 * choose the second..
393 */
394 if (memsz > filsz)
395 map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
396 else
397 map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
398
399 if (map_len != 0) {
400 vm_object_reference(object);
401
402 /* cow flags: don't dump readonly sections in core */
403 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
404 (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
405
406 rv = __elfN(map_insert)(&vmspace->vm_map,
407 object,
408 file_addr, /* file offset */
409 map_addr, /* virtual start */
410 map_addr + map_len,/* virtual end */
411 prot,
412 VM_PROT_ALL,
413 cow);
414 if (rv != KERN_SUCCESS) {
415 vm_object_deallocate(object);
416 return (EINVAL);
417 }
418
419 /* we can stop now if we've covered it all */
420 if (memsz == filsz) {
421 return (0);
422 }
423 }
424
425
426 /*
427 * We have to get the remaining bit of the file into the first part
428 * of the oversized map segment. This is normally because the .data
429 * segment in the file is extended to provide bss. It's a neat idea
430 * to try and save a page, but it's a pain in the behind to implement.
431 */
432 copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
433 map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
434 map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
435 map_addr;
436
437 /* This had damn well better be true! */
438 if (map_len != 0) {
439 rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
440 map_addr + map_len, VM_PROT_ALL, VM_PROT_ALL, 0);
441 if (rv != KERN_SUCCESS) {
442 return (EINVAL);
443 }
444 }
445
446 if (copy_len != 0) {
447 vm_offset_t off;
448 vm_object_reference(object);
449 rv = vm_map_find(exec_map,
450 object,
451 trunc_page(offset + filsz),
452 &data_buf,
453 PAGE_SIZE,
454 TRUE,
455 VM_PROT_READ,
456 VM_PROT_ALL,
457 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
458 if (rv != KERN_SUCCESS) {
459 vm_object_deallocate(object);
460 return (EINVAL);
461 }
462
463 /* send the page fragment to user space */
464 off = trunc_page_ps(offset + filsz, pagesize) -
465 trunc_page(offset + filsz);
466 error = copyout((caddr_t)data_buf + off, (caddr_t)map_addr,
467 copy_len);
468 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
469 if (error) {
470 return (error);
471 }
472 }
473
474 /*
475 * set it to the specified protection.
476 * XXX had better undo the damage from pasting over the cracks here!
477 */
478 vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
479 round_page(map_addr + map_len), prot, FALSE);
480
481 return (error);
482}
483
484/*
485 * Load the file "file" into memory. It may be either a shared object
486 * or an executable.
487 *
488 * The "addr" reference parameter is in/out. On entry, it specifies
489 * the address where a shared object should be loaded. If the file is
490 * an executable, this value is ignored. On exit, "addr" specifies
491 * where the file was actually loaded.
492 *
493 * The "entry" reference parameter is out only. On exit, it specifies
494 * the entry point for the loaded file.
495 */
496static int
497__elfN(load_file)(struct proc *p, const char *file, u_long *addr,
498 u_long *entry, size_t pagesize)
499{
500 struct {
501 struct nameidata nd;
502 struct vattr attr;
503 struct image_params image_params;
504 } *tempdata;
505 const Elf_Ehdr *hdr = NULL;
506 const Elf_Phdr *phdr = NULL;
507 struct nameidata *nd;
508 struct vmspace *vmspace = p->p_vmspace;
509 struct vattr *attr;
510 struct image_params *imgp;
511 vm_prot_t prot;
512 u_long rbase;
513 u_long base_addr = 0;
514 int error, i, numsegs;
515
516 if (curthread->td_proc != p)
517 panic("elf_load_file - thread"); /* XXXKSE DIAGNOSTIC */
518
519 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
520 nd = &tempdata->nd;
521 attr = &tempdata->attr;
522 imgp = &tempdata->image_params;
523
524 /*
525 * Initialize part of the common data
526 */
527 imgp->proc = p;
528 imgp->userspace_argv = NULL;
529 imgp->userspace_envv = NULL;
530 imgp->attr = attr;
531 imgp->firstpage = NULL;
532 imgp->image_header = (char *)kmem_alloc_wait(exec_map, PAGE_SIZE);
533 imgp->object = NULL;
534 imgp->execlabel = NULL;
535
536 if (imgp->image_header == NULL) {
537 nd->ni_vp = NULL;
538 error = ENOMEM;
539 goto fail;
540 }
541
542 /* XXXKSE */
543 NDINIT(nd, LOOKUP, LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, curthread);
544
545 if ((error = namei(nd)) != 0) {
546 nd->ni_vp = NULL;
547 goto fail;
548 }
549 NDFREE(nd, NDF_ONLY_PNBUF);
550 imgp->vp = nd->ni_vp;
551
552 /*
553 * Check permissions, modes, uid, etc on the file, and "open" it.
554 */
555 error = exec_check_permissions(imgp);
556 if (error) {
557 VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
558 goto fail;
559 }
560
561 error = exec_map_first_page(imgp);
562 /*
563 * Also make certain that the interpreter stays the same, so set
564 * its VV_TEXT flag, too.
565 */
566 if (error == 0)
567 nd->ni_vp->v_vflag |= VV_TEXT;
568
569 VOP_GETVOBJECT(nd->ni_vp, &imgp->object);
570 vm_object_reference(imgp->object);
571
572 VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
573 if (error)
574 goto fail;
575
576 hdr = (const Elf_Ehdr *)imgp->image_header;
577 if ((error = __elfN(check_header)(hdr)) != 0)
578 goto fail;
579 if (hdr->e_type == ET_DYN)
580 rbase = *addr;
581 else if (hdr->e_type == ET_EXEC)
582 rbase = 0;
583 else {
584 error = ENOEXEC;
585 goto fail;
586 }
587
588 /* Only support headers that fit within first page for now */
589 if ((hdr->e_phoff > PAGE_SIZE) ||
590 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
591 error = ENOEXEC;
592 goto fail;
593 }
594
595 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
596
597 for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
598 if (phdr[i].p_type == PT_LOAD) { /* Loadable segment */
599 prot = 0;
600 if (phdr[i].p_flags & PF_X)
601 prot |= VM_PROT_EXECUTE;
602 if (phdr[i].p_flags & PF_W)
603 prot |= VM_PROT_WRITE;
604 if (phdr[i].p_flags & PF_R)
605 prot |= VM_PROT_READ;
606
607 if ((error = __elfN(load_section)(p, vmspace,
608 nd->ni_vp, imgp->object, phdr[i].p_offset,
609 (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
610 phdr[i].p_memsz, phdr[i].p_filesz, prot,
611 pagesize)) != 0)
612 goto fail;
613 /*
614 * Establish the base address if this is the
615 * first segment.
616 */
617 if (numsegs == 0)
618 base_addr = trunc_page(phdr[i].p_vaddr +
619 rbase);
620 numsegs++;
621 }
622 }
623 *addr = base_addr;
624 *entry = (unsigned long)hdr->e_entry + rbase;
625
626fail:
627 if (imgp->firstpage)
628 exec_unmap_first_page(imgp);
629 if (imgp->image_header)
630 kmem_free_wakeup(exec_map, (vm_offset_t)imgp->image_header,
631 PAGE_SIZE);
632 if (imgp->object)
633 vm_object_deallocate(imgp->object);
634
635 if (nd->ni_vp)
636 vrele(nd->ni_vp);
637
638 free(tempdata, M_TEMP);
639
640 return (error);
641}
642
643static int
644__CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
645{
646 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
647 const Elf_Phdr *phdr;
648 Elf_Auxargs *elf_auxargs = NULL;
649 struct vmspace *vmspace;
650 vm_prot_t prot;
651 u_long text_size = 0, data_size = 0, total_size = 0;
652 u_long text_addr = 0, data_addr = 0;
653 u_long seg_size, seg_addr;
654 u_long addr, entry = 0, proghdr = 0;
655 int error, i;
656 const char *interp = NULL;
657 Elf_Brandinfo *brand_info;
658 char *path;
659 struct thread *td = curthread;
660 struct sysentvec *sv;
661
662 GIANT_REQUIRED;
663
664 /*
665 * Do we have a valid ELF header ?
666 */
667 if (__elfN(check_header)(hdr) != 0 || hdr->e_type != ET_EXEC)
668 return (-1);
669
670 /*
671 * From here on down, we return an errno, not -1, as we've
672 * detected an ELF file.
673 */
674
675 if ((hdr->e_phoff > PAGE_SIZE) ||
676 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
677 /* Only support headers in first page for now */
678 return (ENOEXEC);
679 }
680 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
681
682 /*
683 * From this point on, we may have resources that need to be freed.
684 */
685
686 VOP_UNLOCK(imgp->vp, 0, td);
687
688 for (i = 0; i < hdr->e_phnum; i++) {
689 switch (phdr[i].p_type) {
690 case PT_INTERP: /* Path to interpreter */
691 if (phdr[i].p_filesz > MAXPATHLEN ||
692 phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) {
693 error = ENOEXEC;
694 goto fail;
695 }
696 interp = imgp->image_header + phdr[i].p_offset;
697 break;
698 default:
699 break;
700 }
701 }
702
703 brand_info = __elfN(get_brandinfo)(hdr, interp);
704 if (brand_info == NULL) {
705 uprintf("ELF binary type \"%u\" not known.\n",
706 hdr->e_ident[EI_OSABI]);
707 error = ENOEXEC;
708 goto fail;
709 }
710 sv = brand_info->sysvec;
33
34#include <sys/param.h>
35#include <sys/exec.h>
36#include <sys/fcntl.h>
37#include <sys/imgact.h>
38#include <sys/imgact_elf.h>
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/malloc.h>
42#include <sys/mutex.h>
43#include <sys/mman.h>
44#include <sys/namei.h>
45#include <sys/pioctl.h>
46#include <sys/proc.h>
47#include <sys/procfs.h>
48#include <sys/resourcevar.h>
49#include <sys/systm.h>
50#include <sys/signalvar.h>
51#include <sys/stat.h>
52#include <sys/sx.h>
53#include <sys/syscall.h>
54#include <sys/sysctl.h>
55#include <sys/sysent.h>
56#include <sys/vnode.h>
57
58#include <vm/vm.h>
59#include <vm/vm_kern.h>
60#include <vm/vm_param.h>
61#include <vm/pmap.h>
62#include <vm/vm_map.h>
63#include <vm/vm_object.h>
64#include <vm/vm_extern.h>
65
66#include <machine/elf.h>
67#include <machine/md_var.h>
68
69#define OLD_EI_BRAND 8
70
71static int __elfN(check_header)(const Elf_Ehdr *hdr);
72static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr,
73 const char *interp);
74static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
75 u_long *entry, size_t pagesize);
76static int __elfN(load_section)(struct proc *p,
77 struct vmspace *vmspace, struct vnode *vp, vm_object_t object,
78 vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
79 vm_prot_t prot, size_t pagesize);
80static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
81
82SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
83 "");
84
85int __elfN(fallback_brand) = -1;
86SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
87 fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
88 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
89TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
90 &__elfN(fallback_brand));
91
92static int elf_trace = 0;
93SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, "");
94
95static int elf_legacy_coredump = 0;
96SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
97 &elf_legacy_coredump, 0, "");
98
99static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
100
101int
102__elfN(insert_brand_entry)(Elf_Brandinfo *entry)
103{
104 int i;
105
106 for (i = 0; i < MAX_BRANDS; i++) {
107 if (elf_brand_list[i] == NULL) {
108 elf_brand_list[i] = entry;
109 break;
110 }
111 }
112 if (i == MAX_BRANDS)
113 return (-1);
114 return (0);
115}
116
117int
118__elfN(remove_brand_entry)(Elf_Brandinfo *entry)
119{
120 int i;
121
122 for (i = 0; i < MAX_BRANDS; i++) {
123 if (elf_brand_list[i] == entry) {
124 elf_brand_list[i] = NULL;
125 break;
126 }
127 }
128 if (i == MAX_BRANDS)
129 return (-1);
130 return (0);
131}
132
133int
134__elfN(brand_inuse)(Elf_Brandinfo *entry)
135{
136 struct proc *p;
137 int rval = FALSE;
138
139 sx_slock(&allproc_lock);
140 LIST_FOREACH(p, &allproc, p_list) {
141 if (p->p_sysent == entry->sysvec) {
142 rval = TRUE;
143 break;
144 }
145 }
146 sx_sunlock(&allproc_lock);
147
148 return (rval);
149}
150
151static Elf_Brandinfo *
152__elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp)
153{
154 Elf_Brandinfo *bi;
155 int i;
156
157 /*
158 * We support three types of branding -- (1) the ELF EI_OSABI field
159 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
160 * branding w/in the ELF header, and (3) path of the `interp_path'
161 * field. We should also look for an ".note.ABI-tag" ELF section now
162 * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones.
163 */
164
165 /* If the executable has a brand, search for it in the brand list. */
166 for (i = 0; i < MAX_BRANDS; i++) {
167 bi = elf_brand_list[i];
168 if (bi != NULL && hdr->e_machine == bi->machine &&
169 (hdr->e_ident[EI_OSABI] == bi->brand ||
170 strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
171 bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
172 return (bi);
173 }
174
175 /* Lacking a known brand, search for a recognized interpreter. */
176 if (interp != NULL) {
177 for (i = 0; i < MAX_BRANDS; i++) {
178 bi = elf_brand_list[i];
179 if (bi != NULL && hdr->e_machine == bi->machine &&
180 strcmp(interp, bi->interp_path) == 0)
181 return (bi);
182 }
183 }
184
185 /* Lacking a recognized interpreter, try the default brand */
186 for (i = 0; i < MAX_BRANDS; i++) {
187 bi = elf_brand_list[i];
188 if (bi != NULL && hdr->e_machine == bi->machine &&
189 __elfN(fallback_brand) == bi->brand)
190 return (bi);
191 }
192 return (NULL);
193}
194
195static int
196__elfN(check_header)(const Elf_Ehdr *hdr)
197{
198 Elf_Brandinfo *bi;
199 int i;
200
201 if (!IS_ELF(*hdr) ||
202 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
203 hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
204 hdr->e_ident[EI_VERSION] != EV_CURRENT)
205 return (ENOEXEC);
206
207 /*
208 * Make sure we have at least one brand for this machine.
209 */
210
211 for (i = 0; i < MAX_BRANDS; i++) {
212 bi = elf_brand_list[i];
213 if (bi != NULL && bi->machine == hdr->e_machine)
214 break;
215 }
216 if (i == MAX_BRANDS)
217 return (ENOEXEC);
218
219 if (hdr->e_version != ELF_TARG_VER)
220 return (ENOEXEC);
221
222 return (0);
223}
224
225static int
226__elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
227 vm_offset_t start, vm_offset_t end, vm_prot_t prot,
228 vm_prot_t max)
229{
230 int error, rv;
231 vm_offset_t off;
232 vm_offset_t data_buf = 0;
233
234 /*
235 * Create the page if it doesn't exist yet. Ignore errors.
236 */
237 vm_map_lock(map);
238 vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end), max,
239 max, 0);
240 vm_map_unlock(map);
241
242 /*
243 * Find the page from the underlying object.
244 */
245 if (object) {
246 vm_object_reference(object);
247 rv = vm_map_find(exec_map,
248 object,
249 trunc_page(offset),
250 &data_buf,
251 PAGE_SIZE,
252 TRUE,
253 VM_PROT_READ,
254 VM_PROT_ALL,
255 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
256 if (rv != KERN_SUCCESS) {
257 vm_object_deallocate(object);
258 return (rv);
259 }
260
261 off = offset - trunc_page(offset);
262 error = copyout((caddr_t)data_buf + off, (caddr_t)start,
263 end - start);
264 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
265 if (error) {
266 return (KERN_FAILURE);
267 }
268 }
269
270 return (KERN_SUCCESS);
271}
272
273static int
274__elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
275 vm_offset_t start, vm_offset_t end, vm_prot_t prot,
276 vm_prot_t max, int cow)
277{
278 vm_offset_t data_buf, off;
279 vm_size_t sz;
280 int error, rv;
281
282 if (start != trunc_page(start)) {
283 rv = __elfN(map_partial)(map, object, offset, start,
284 round_page(start), prot, max);
285 if (rv)
286 return (rv);
287 offset += round_page(start) - start;
288 start = round_page(start);
289 }
290 if (end != round_page(end)) {
291 rv = __elfN(map_partial)(map, object, offset +
292 trunc_page(end) - start, trunc_page(end), end, prot, max);
293 if (rv)
294 return (rv);
295 end = trunc_page(end);
296 }
297 if (end > start) {
298 if (offset & PAGE_MASK) {
299 /*
300 * The mapping is not page aligned. This means we have
301 * to copy the data. Sigh.
302 */
303 rv = vm_map_find(map, 0, 0, &start, end - start,
304 FALSE, prot, max, 0);
305 if (rv)
306 return (rv);
307 data_buf = 0;
308 while (start < end) {
309 vm_object_reference(object);
310 rv = vm_map_find(exec_map,
311 object,
312 trunc_page(offset),
313 &data_buf,
314 2 * PAGE_SIZE,
315 TRUE,
316 VM_PROT_READ,
317 VM_PROT_ALL,
318 (MAP_COPY_ON_WRITE
319 | MAP_PREFAULT_PARTIAL));
320 if (rv != KERN_SUCCESS) {
321 vm_object_deallocate(object);
322 return (rv);
323 }
324 off = offset - trunc_page(offset);
325 sz = end - start;
326 if (sz > PAGE_SIZE)
327 sz = PAGE_SIZE;
328 error = copyout((caddr_t)data_buf + off,
329 (caddr_t)start, sz);
330 vm_map_remove(exec_map, data_buf,
331 data_buf + 2 * PAGE_SIZE);
332 if (error) {
333 return (KERN_FAILURE);
334 }
335 start += sz;
336 }
337 rv = KERN_SUCCESS;
338 } else {
339 vm_map_lock(map);
340 rv = vm_map_insert(map, object, offset, start, end,
341 prot, max, cow);
342 vm_map_unlock(map);
343 }
344 return (rv);
345 } else {
346 return (KERN_SUCCESS);
347 }
348}
349
350static int
351__elfN(load_section)(struct proc *p, struct vmspace *vmspace,
352 struct vnode *vp, vm_object_t object, vm_offset_t offset,
353 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
354 size_t pagesize)
355{
356 size_t map_len;
357 vm_offset_t map_addr;
358 int error, rv, cow;
359 size_t copy_len;
360 vm_offset_t file_addr;
361 vm_offset_t data_buf = 0;
362
363 GIANT_REQUIRED;
364
365 error = 0;
366
367 /*
368 * It's necessary to fail if the filsz + offset taken from the
369 * header is greater than the actual file pager object's size.
370 * If we were to allow this, then the vm_map_find() below would
371 * walk right off the end of the file object and into the ether.
372 *
373 * While I'm here, might as well check for something else that
374 * is invalid: filsz cannot be greater than memsz.
375 */
376 if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
377 filsz > memsz) {
378 uprintf("elf_load_section: truncated ELF file\n");
379 return (ENOEXEC);
380 }
381
382#define trunc_page_ps(va, ps) ((va) & ~(ps - 1))
383#define round_page_ps(va, ps) (((va) + (ps - 1)) & ~(ps - 1))
384
385 map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
386 file_addr = trunc_page_ps(offset, pagesize);
387
388 /*
389 * We have two choices. We can either clear the data in the last page
390 * of an oversized mapping, or we can start the anon mapping a page
391 * early and copy the initialized data into that first page. We
392 * choose the second..
393 */
394 if (memsz > filsz)
395 map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
396 else
397 map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
398
399 if (map_len != 0) {
400 vm_object_reference(object);
401
402 /* cow flags: don't dump readonly sections in core */
403 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
404 (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
405
406 rv = __elfN(map_insert)(&vmspace->vm_map,
407 object,
408 file_addr, /* file offset */
409 map_addr, /* virtual start */
410 map_addr + map_len,/* virtual end */
411 prot,
412 VM_PROT_ALL,
413 cow);
414 if (rv != KERN_SUCCESS) {
415 vm_object_deallocate(object);
416 return (EINVAL);
417 }
418
419 /* we can stop now if we've covered it all */
420 if (memsz == filsz) {
421 return (0);
422 }
423 }
424
425
426 /*
427 * We have to get the remaining bit of the file into the first part
428 * of the oversized map segment. This is normally because the .data
429 * segment in the file is extended to provide bss. It's a neat idea
430 * to try and save a page, but it's a pain in the behind to implement.
431 */
432 copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
433 map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
434 map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
435 map_addr;
436
437 /* This had damn well better be true! */
438 if (map_len != 0) {
439 rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
440 map_addr + map_len, VM_PROT_ALL, VM_PROT_ALL, 0);
441 if (rv != KERN_SUCCESS) {
442 return (EINVAL);
443 }
444 }
445
446 if (copy_len != 0) {
447 vm_offset_t off;
448 vm_object_reference(object);
449 rv = vm_map_find(exec_map,
450 object,
451 trunc_page(offset + filsz),
452 &data_buf,
453 PAGE_SIZE,
454 TRUE,
455 VM_PROT_READ,
456 VM_PROT_ALL,
457 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
458 if (rv != KERN_SUCCESS) {
459 vm_object_deallocate(object);
460 return (EINVAL);
461 }
462
463 /* send the page fragment to user space */
464 off = trunc_page_ps(offset + filsz, pagesize) -
465 trunc_page(offset + filsz);
466 error = copyout((caddr_t)data_buf + off, (caddr_t)map_addr,
467 copy_len);
468 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
469 if (error) {
470 return (error);
471 }
472 }
473
474 /*
475 * set it to the specified protection.
476 * XXX had better undo the damage from pasting over the cracks here!
477 */
478 vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
479 round_page(map_addr + map_len), prot, FALSE);
480
481 return (error);
482}
483
484/*
485 * Load the file "file" into memory. It may be either a shared object
486 * or an executable.
487 *
488 * The "addr" reference parameter is in/out. On entry, it specifies
489 * the address where a shared object should be loaded. If the file is
490 * an executable, this value is ignored. On exit, "addr" specifies
491 * where the file was actually loaded.
492 *
493 * The "entry" reference parameter is out only. On exit, it specifies
494 * the entry point for the loaded file.
495 */
496static int
497__elfN(load_file)(struct proc *p, const char *file, u_long *addr,
498 u_long *entry, size_t pagesize)
499{
500 struct {
501 struct nameidata nd;
502 struct vattr attr;
503 struct image_params image_params;
504 } *tempdata;
505 const Elf_Ehdr *hdr = NULL;
506 const Elf_Phdr *phdr = NULL;
507 struct nameidata *nd;
508 struct vmspace *vmspace = p->p_vmspace;
509 struct vattr *attr;
510 struct image_params *imgp;
511 vm_prot_t prot;
512 u_long rbase;
513 u_long base_addr = 0;
514 int error, i, numsegs;
515
516 if (curthread->td_proc != p)
517 panic("elf_load_file - thread"); /* XXXKSE DIAGNOSTIC */
518
519 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
520 nd = &tempdata->nd;
521 attr = &tempdata->attr;
522 imgp = &tempdata->image_params;
523
524 /*
525 * Initialize part of the common data
526 */
527 imgp->proc = p;
528 imgp->userspace_argv = NULL;
529 imgp->userspace_envv = NULL;
530 imgp->attr = attr;
531 imgp->firstpage = NULL;
532 imgp->image_header = (char *)kmem_alloc_wait(exec_map, PAGE_SIZE);
533 imgp->object = NULL;
534 imgp->execlabel = NULL;
535
536 if (imgp->image_header == NULL) {
537 nd->ni_vp = NULL;
538 error = ENOMEM;
539 goto fail;
540 }
541
542 /* XXXKSE */
543 NDINIT(nd, LOOKUP, LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, curthread);
544
545 if ((error = namei(nd)) != 0) {
546 nd->ni_vp = NULL;
547 goto fail;
548 }
549 NDFREE(nd, NDF_ONLY_PNBUF);
550 imgp->vp = nd->ni_vp;
551
552 /*
553 * Check permissions, modes, uid, etc on the file, and "open" it.
554 */
555 error = exec_check_permissions(imgp);
556 if (error) {
557 VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
558 goto fail;
559 }
560
561 error = exec_map_first_page(imgp);
562 /*
563 * Also make certain that the interpreter stays the same, so set
564 * its VV_TEXT flag, too.
565 */
566 if (error == 0)
567 nd->ni_vp->v_vflag |= VV_TEXT;
568
569 VOP_GETVOBJECT(nd->ni_vp, &imgp->object);
570 vm_object_reference(imgp->object);
571
572 VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
573 if (error)
574 goto fail;
575
576 hdr = (const Elf_Ehdr *)imgp->image_header;
577 if ((error = __elfN(check_header)(hdr)) != 0)
578 goto fail;
579 if (hdr->e_type == ET_DYN)
580 rbase = *addr;
581 else if (hdr->e_type == ET_EXEC)
582 rbase = 0;
583 else {
584 error = ENOEXEC;
585 goto fail;
586 }
587
588 /* Only support headers that fit within first page for now */
589 if ((hdr->e_phoff > PAGE_SIZE) ||
590 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
591 error = ENOEXEC;
592 goto fail;
593 }
594
595 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
596
597 for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
598 if (phdr[i].p_type == PT_LOAD) { /* Loadable segment */
599 prot = 0;
600 if (phdr[i].p_flags & PF_X)
601 prot |= VM_PROT_EXECUTE;
602 if (phdr[i].p_flags & PF_W)
603 prot |= VM_PROT_WRITE;
604 if (phdr[i].p_flags & PF_R)
605 prot |= VM_PROT_READ;
606
607 if ((error = __elfN(load_section)(p, vmspace,
608 nd->ni_vp, imgp->object, phdr[i].p_offset,
609 (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
610 phdr[i].p_memsz, phdr[i].p_filesz, prot,
611 pagesize)) != 0)
612 goto fail;
613 /*
614 * Establish the base address if this is the
615 * first segment.
616 */
617 if (numsegs == 0)
618 base_addr = trunc_page(phdr[i].p_vaddr +
619 rbase);
620 numsegs++;
621 }
622 }
623 *addr = base_addr;
624 *entry = (unsigned long)hdr->e_entry + rbase;
625
626fail:
627 if (imgp->firstpage)
628 exec_unmap_first_page(imgp);
629 if (imgp->image_header)
630 kmem_free_wakeup(exec_map, (vm_offset_t)imgp->image_header,
631 PAGE_SIZE);
632 if (imgp->object)
633 vm_object_deallocate(imgp->object);
634
635 if (nd->ni_vp)
636 vrele(nd->ni_vp);
637
638 free(tempdata, M_TEMP);
639
640 return (error);
641}
642
643static int
644__CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
645{
646 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
647 const Elf_Phdr *phdr;
648 Elf_Auxargs *elf_auxargs = NULL;
649 struct vmspace *vmspace;
650 vm_prot_t prot;
651 u_long text_size = 0, data_size = 0, total_size = 0;
652 u_long text_addr = 0, data_addr = 0;
653 u_long seg_size, seg_addr;
654 u_long addr, entry = 0, proghdr = 0;
655 int error, i;
656 const char *interp = NULL;
657 Elf_Brandinfo *brand_info;
658 char *path;
659 struct thread *td = curthread;
660 struct sysentvec *sv;
661
662 GIANT_REQUIRED;
663
664 /*
665 * Do we have a valid ELF header ?
666 */
667 if (__elfN(check_header)(hdr) != 0 || hdr->e_type != ET_EXEC)
668 return (-1);
669
670 /*
671 * From here on down, we return an errno, not -1, as we've
672 * detected an ELF file.
673 */
674
675 if ((hdr->e_phoff > PAGE_SIZE) ||
676 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
677 /* Only support headers in first page for now */
678 return (ENOEXEC);
679 }
680 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
681
682 /*
683 * From this point on, we may have resources that need to be freed.
684 */
685
686 VOP_UNLOCK(imgp->vp, 0, td);
687
688 for (i = 0; i < hdr->e_phnum; i++) {
689 switch (phdr[i].p_type) {
690 case PT_INTERP: /* Path to interpreter */
691 if (phdr[i].p_filesz > MAXPATHLEN ||
692 phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) {
693 error = ENOEXEC;
694 goto fail;
695 }
696 interp = imgp->image_header + phdr[i].p_offset;
697 break;
698 default:
699 break;
700 }
701 }
702
703 brand_info = __elfN(get_brandinfo)(hdr, interp);
704 if (brand_info == NULL) {
705 uprintf("ELF binary type \"%u\" not known.\n",
706 hdr->e_ident[EI_OSABI]);
707 error = ENOEXEC;
708 goto fail;
709 }
710 sv = brand_info->sysvec;
711 if (interp != NULL && brand_info->interp_newpath != NULL)
712 interp = brand_info->interp_newpath;
711
712 if ((error = exec_extract_strings(imgp)) != 0)
713 goto fail;
714
715 exec_new_vmspace(imgp, sv);
716
717 vmspace = imgp->proc->p_vmspace;
718
719 for (i = 0; i < hdr->e_phnum; i++) {
720 switch (phdr[i].p_type) {
721 case PT_LOAD: /* Loadable segment */
722 prot = 0;
723 if (phdr[i].p_flags & PF_X)
724 prot |= VM_PROT_EXECUTE;
725 if (phdr[i].p_flags & PF_W)
726 prot |= VM_PROT_WRITE;
727 if (phdr[i].p_flags & PF_R)
728 prot |= VM_PROT_READ;
729
730#if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
731 /*
732 * Some x86 binaries assume read == executable,
733 * notably the M3 runtime and therefore cvsup
734 */
735 if (prot & VM_PROT_READ)
736 prot |= VM_PROT_EXECUTE;
737#endif
738
739 if ((error = __elfN(load_section)(imgp->proc, vmspace,
740 imgp->vp, imgp->object, phdr[i].p_offset,
741 (caddr_t)(uintptr_t)phdr[i].p_vaddr,
742 phdr[i].p_memsz, phdr[i].p_filesz, prot,
743 sv->sv_pagesize)) != 0)
744 goto fail;
745
746 seg_addr = trunc_page(phdr[i].p_vaddr);
747 seg_size = round_page(phdr[i].p_memsz +
748 phdr[i].p_vaddr - seg_addr);
749
750 /*
751 * Is this .text or .data? We can't use
752 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
753 * alpha terribly and possibly does other bad
754 * things so we stick to the old way of figuring
755 * it out: If the segment contains the program
756 * entry point, it's a text segment, otherwise it
757 * is a data segment.
758 *
759 * Note that obreak() assumes that data_addr +
760 * data_size == end of data load area, and the ELF
761 * file format expects segments to be sorted by
762 * address. If multiple data segments exist, the
763 * last one will be used.
764 */
765 if (hdr->e_entry >= phdr[i].p_vaddr &&
766 hdr->e_entry < (phdr[i].p_vaddr +
767 phdr[i].p_memsz)) {
768 text_size = seg_size;
769 text_addr = seg_addr;
770 entry = (u_long)hdr->e_entry;
771 } else {
772 data_size = seg_size;
773 data_addr = seg_addr;
774 }
775 total_size += seg_size;
776 break;
777 case PT_PHDR: /* Program header table info */
778 proghdr = phdr[i].p_vaddr;
779 break;
780 default:
781 break;
782 }
783 }
784
785 if (data_addr == 0 && data_size == 0) {
786 data_addr = text_addr;
787 data_size = text_size;
788 }
789
790 /*
791 * Check limits. It should be safe to check the
792 * limits after loading the segments since we do
793 * not actually fault in all the segments pages.
794 */
795 if (data_size >
796 imgp->proc->p_rlimit[RLIMIT_DATA].rlim_cur ||
797 text_size > maxtsiz ||
798 total_size >
799 imgp->proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
800 error = ENOMEM;
801 goto fail;
802 }
803
804 vmspace->vm_tsize = text_size >> PAGE_SHIFT;
805 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
806 vmspace->vm_dsize = data_size >> PAGE_SHIFT;
807 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
808
809 /*
810 * We load the dynamic linker where a userland call
811 * to mmap(0, ...) would put it. The rationale behind this
812 * calculation is that it leaves room for the heap to grow to
813 * its maximum allowed size.
814 */
815 addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
816 imgp->proc->p_rlimit[RLIMIT_DATA].rlim_max);
817
818 imgp->entry_addr = entry;
819
820 imgp->proc->p_sysent = sv;
713
714 if ((error = exec_extract_strings(imgp)) != 0)
715 goto fail;
716
717 exec_new_vmspace(imgp, sv);
718
719 vmspace = imgp->proc->p_vmspace;
720
721 for (i = 0; i < hdr->e_phnum; i++) {
722 switch (phdr[i].p_type) {
723 case PT_LOAD: /* Loadable segment */
724 prot = 0;
725 if (phdr[i].p_flags & PF_X)
726 prot |= VM_PROT_EXECUTE;
727 if (phdr[i].p_flags & PF_W)
728 prot |= VM_PROT_WRITE;
729 if (phdr[i].p_flags & PF_R)
730 prot |= VM_PROT_READ;
731
732#if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
733 /*
734 * Some x86 binaries assume read == executable,
735 * notably the M3 runtime and therefore cvsup
736 */
737 if (prot & VM_PROT_READ)
738 prot |= VM_PROT_EXECUTE;
739#endif
740
741 if ((error = __elfN(load_section)(imgp->proc, vmspace,
742 imgp->vp, imgp->object, phdr[i].p_offset,
743 (caddr_t)(uintptr_t)phdr[i].p_vaddr,
744 phdr[i].p_memsz, phdr[i].p_filesz, prot,
745 sv->sv_pagesize)) != 0)
746 goto fail;
747
748 seg_addr = trunc_page(phdr[i].p_vaddr);
749 seg_size = round_page(phdr[i].p_memsz +
750 phdr[i].p_vaddr - seg_addr);
751
752 /*
753 * Is this .text or .data? We can't use
754 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
755 * alpha terribly and possibly does other bad
756 * things so we stick to the old way of figuring
757 * it out: If the segment contains the program
758 * entry point, it's a text segment, otherwise it
759 * is a data segment.
760 *
761 * Note that obreak() assumes that data_addr +
762 * data_size == end of data load area, and the ELF
763 * file format expects segments to be sorted by
764 * address. If multiple data segments exist, the
765 * last one will be used.
766 */
767 if (hdr->e_entry >= phdr[i].p_vaddr &&
768 hdr->e_entry < (phdr[i].p_vaddr +
769 phdr[i].p_memsz)) {
770 text_size = seg_size;
771 text_addr = seg_addr;
772 entry = (u_long)hdr->e_entry;
773 } else {
774 data_size = seg_size;
775 data_addr = seg_addr;
776 }
777 total_size += seg_size;
778 break;
779 case PT_PHDR: /* Program header table info */
780 proghdr = phdr[i].p_vaddr;
781 break;
782 default:
783 break;
784 }
785 }
786
787 if (data_addr == 0 && data_size == 0) {
788 data_addr = text_addr;
789 data_size = text_size;
790 }
791
792 /*
793 * Check limits. It should be safe to check the
794 * limits after loading the segments since we do
795 * not actually fault in all the segments pages.
796 */
797 if (data_size >
798 imgp->proc->p_rlimit[RLIMIT_DATA].rlim_cur ||
799 text_size > maxtsiz ||
800 total_size >
801 imgp->proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
802 error = ENOMEM;
803 goto fail;
804 }
805
806 vmspace->vm_tsize = text_size >> PAGE_SHIFT;
807 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
808 vmspace->vm_dsize = data_size >> PAGE_SHIFT;
809 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
810
811 /*
812 * We load the dynamic linker where a userland call
813 * to mmap(0, ...) would put it. The rationale behind this
814 * calculation is that it leaves room for the heap to grow to
815 * its maximum allowed size.
816 */
817 addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
818 imgp->proc->p_rlimit[RLIMIT_DATA].rlim_max);
819
820 imgp->entry_addr = entry;
821
822 imgp->proc->p_sysent = sv;
821 if (interp != NULL) {
823 if (interp != NULL && brand_info->emul_path != NULL &&
824 brand_info->emul_path[0] != '\0') {
822 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
823 snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path,
824 interp);
825 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
826 snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path,
827 interp);
825 if ((error = __elfN(load_file)(imgp->proc, path, &addr,
826 &imgp->entry_addr, sv->sv_pagesize)) != 0) {
827 if ((error = __elfN(load_file)(imgp->proc, interp,
828 &addr, &imgp->entry_addr, sv->sv_pagesize)) != 0) {
829 uprintf("ELF interpreter %s not found\n",
830 path);
831 free(path, M_TEMP);
832 goto fail;
833 }
834 }
828 error = __elfN(load_file)(imgp->proc, path, &addr,
829 &imgp->entry_addr, sv->sv_pagesize);
835 free(path, M_TEMP);
830 free(path, M_TEMP);
831 if (error == 0)
832 interp = NULL;
836 }
833 }
834 if (interp != NULL) {
835 error = __elfN(load_file)(imgp->proc, interp, &addr,
836 &imgp->entry_addr, sv->sv_pagesize);
837 if (error != 0) {
838 uprintf("ELF interpreter %s not found\n", interp);
839 goto fail;
840 }
841 }
837
838 /*
839 * Construct auxargs table (used by the fixup routine)
840 */
841 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
842 elf_auxargs->execfd = -1;
843 elf_auxargs->phdr = proghdr;
844 elf_auxargs->phent = hdr->e_phentsize;
845 elf_auxargs->phnum = hdr->e_phnum;
846 elf_auxargs->pagesz = PAGE_SIZE;
847 elf_auxargs->base = addr;
848 elf_auxargs->flags = 0;
849 elf_auxargs->entry = entry;
850 elf_auxargs->trace = elf_trace;
851
852 imgp->auxargs = elf_auxargs;
853 imgp->interpreted = 0;
854
855fail:
856 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
857 return (error);
858}
859
860#define suword __CONCAT(suword, __ELF_WORD_SIZE)
861
862int
863__elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
864{
865 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
866 Elf_Addr *base;
867 Elf_Addr *pos;
868
869 base = (Elf_Addr *)*stack_base;
870 pos = base + (imgp->argc + imgp->envc + 2);
871
872 if (args->trace) {
873 AUXARGS_ENTRY(pos, AT_DEBUG, 1);
874 }
875 if (args->execfd != -1) {
876 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
877 }
878 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
879 AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
880 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
881 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
882 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
883 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
884 AUXARGS_ENTRY(pos, AT_BASE, args->base);
885 AUXARGS_ENTRY(pos, AT_NULL, 0);
886
887 free(imgp->auxargs, M_TEMP);
888 imgp->auxargs = NULL;
889
890 base--;
891 suword(base, (long)imgp->argc);
892 *stack_base = (register_t *)base;
893 return (0);
894}
895
896/*
897 * Code for generating ELF core dumps.
898 */
899
900typedef void (*segment_callback)(vm_map_entry_t, void *);
901
902/* Closure for cb_put_phdr(). */
903struct phdr_closure {
904 Elf_Phdr *phdr; /* Program header to fill in */
905 Elf_Off offset; /* Offset of segment in core file */
906};
907
908/* Closure for cb_size_segment(). */
909struct sseg_closure {
910 int count; /* Count of writable segments. */
911 size_t size; /* Total size of all writable segments. */
912};
913
914static void cb_put_phdr(vm_map_entry_t, void *);
915static void cb_size_segment(vm_map_entry_t, void *);
916static void each_writable_segment(struct proc *, segment_callback, void *);
917static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
918 int, void *, size_t);
919static void __elfN(puthdr)(struct proc *, void *, size_t *,
920 const prstatus_t *, const prfpregset_t *, const prpsinfo_t *, int);
921static void __elfN(putnote)(void *, size_t *, const char *, int,
922 const void *, size_t);
923
924extern int osreldate;
925
926int
927__elfN(coredump)(td, vp, limit)
928 struct thread *td;
929 register struct vnode *vp;
930 off_t limit;
931{
932 register struct proc *p = td->td_proc;
933 register struct ucred *cred = td->td_ucred;
934 int error = 0;
935 struct sseg_closure seginfo;
936 void *hdr;
937 size_t hdrsize;
938
939 /* Size the program segments. */
940 seginfo.count = 0;
941 seginfo.size = 0;
942 each_writable_segment(p, cb_size_segment, &seginfo);
943
944 /*
945 * Calculate the size of the core file header area by making
946 * a dry run of generating it. Nothing is written, but the
947 * size is calculated.
948 */
949 hdrsize = 0;
950 __elfN(puthdr)((struct proc *)NULL, (void *)NULL, &hdrsize,
951 (const prstatus_t *)NULL, (const prfpregset_t *)NULL,
952 (const prpsinfo_t *)NULL, seginfo.count);
953
954 if (hdrsize + seginfo.size >= limit)
955 return (EFAULT);
956
957 /*
958 * Allocate memory for building the header, fill it up,
959 * and write it out.
960 */
961 hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
962 if (hdr == NULL) {
963 return (EINVAL);
964 }
965 error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
966
967 /* Write the contents of all of the writable segments. */
968 if (error == 0) {
969 Elf_Phdr *php;
970 off_t offset;
971 int i;
972
973 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
974 offset = hdrsize;
975 for (i = 0; i < seginfo.count; i++) {
976 error = vn_rdwr_inchunks(UIO_WRITE, vp,
977 (caddr_t)(uintptr_t)php->p_vaddr,
978 php->p_filesz, offset, UIO_USERSPACE,
979 IO_UNIT | IO_DIRECT, cred, NOCRED, (int *)NULL,
980 curthread); /* XXXKSE */
981 if (error != 0)
982 break;
983 offset += php->p_filesz;
984 php++;
985 }
986 }
987 free(hdr, M_TEMP);
988
989 return (error);
990}
991
992/*
993 * A callback for each_writable_segment() to write out the segment's
994 * program header entry.
995 */
996static void
997cb_put_phdr(entry, closure)
998 vm_map_entry_t entry;
999 void *closure;
1000{
1001 struct phdr_closure *phc = (struct phdr_closure *)closure;
1002 Elf_Phdr *phdr = phc->phdr;
1003
1004 phc->offset = round_page(phc->offset);
1005
1006 phdr->p_type = PT_LOAD;
1007 phdr->p_offset = phc->offset;
1008 phdr->p_vaddr = entry->start;
1009 phdr->p_paddr = 0;
1010 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1011 phdr->p_align = PAGE_SIZE;
1012 phdr->p_flags = 0;
1013 if (entry->protection & VM_PROT_READ)
1014 phdr->p_flags |= PF_R;
1015 if (entry->protection & VM_PROT_WRITE)
1016 phdr->p_flags |= PF_W;
1017 if (entry->protection & VM_PROT_EXECUTE)
1018 phdr->p_flags |= PF_X;
1019
1020 phc->offset += phdr->p_filesz;
1021 phc->phdr++;
1022}
1023
1024/*
1025 * A callback for each_writable_segment() to gather information about
1026 * the number of segments and their total size.
1027 */
1028static void
1029cb_size_segment(entry, closure)
1030 vm_map_entry_t entry;
1031 void *closure;
1032{
1033 struct sseg_closure *ssc = (struct sseg_closure *)closure;
1034
1035 ssc->count++;
1036 ssc->size += entry->end - entry->start;
1037}
1038
1039/*
1040 * For each writable segment in the process's memory map, call the given
1041 * function with a pointer to the map entry and some arbitrary
1042 * caller-supplied data.
1043 */
1044static void
1045each_writable_segment(p, func, closure)
1046 struct proc *p;
1047 segment_callback func;
1048 void *closure;
1049{
1050 vm_map_t map = &p->p_vmspace->vm_map;
1051 vm_map_entry_t entry;
1052
1053 for (entry = map->header.next; entry != &map->header;
1054 entry = entry->next) {
1055 vm_object_t obj;
1056
1057 /*
1058 * Don't dump inaccessible mappings, deal with legacy
1059 * coredump mode.
1060 *
1061 * Note that read-only segments related to the elf binary
1062 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1063 * need to arbitrarily ignore such segments.
1064 */
1065 if (elf_legacy_coredump) {
1066 if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1067 continue;
1068 } else {
1069 if ((entry->protection & VM_PROT_ALL) == 0)
1070 continue;
1071 }
1072
1073 /*
1074 * Dont include memory segment in the coredump if
1075 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1076 * madvise(2). Do not dump submaps (i.e. parts of the
1077 * kernel map).
1078 */
1079 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1080 continue;
1081
1082 if ((obj = entry->object.vm_object) == NULL)
1083 continue;
1084
1085 /* Find the deepest backing object. */
1086 while (obj->backing_object != NULL)
1087 obj = obj->backing_object;
1088
1089 /* Ignore memory-mapped devices and such things. */
1090 if (obj->type != OBJT_DEFAULT &&
1091 obj->type != OBJT_SWAP &&
1092 obj->type != OBJT_VNODE)
1093 continue;
1094
1095 (*func)(entry, closure);
1096 }
1097}
1098
1099/*
1100 * Write the core file header to the file, including padding up to
1101 * the page boundary.
1102 */
1103static int
1104__elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
1105 struct thread *td;
1106 struct vnode *vp;
1107 struct ucred *cred;
1108 int numsegs;
1109 size_t hdrsize;
1110 void *hdr;
1111{
1112 struct {
1113 prstatus_t status;
1114 prfpregset_t fpregset;
1115 prpsinfo_t psinfo;
1116 } *tempdata;
1117 struct proc *p = td->td_proc;
1118 size_t off;
1119 prstatus_t *status;
1120 prfpregset_t *fpregset;
1121 prpsinfo_t *psinfo;
1122
1123 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO | M_WAITOK);
1124 status = &tempdata->status;
1125 fpregset = &tempdata->fpregset;
1126 psinfo = &tempdata->psinfo;
1127
1128 /* Gather the information for the header. */
1129 status->pr_version = PRSTATUS_VERSION;
1130 status->pr_statussz = sizeof(prstatus_t);
1131 status->pr_gregsetsz = sizeof(gregset_t);
1132 status->pr_fpregsetsz = sizeof(fpregset_t);
1133 status->pr_osreldate = osreldate;
1134 status->pr_cursig = p->p_sig;
1135 status->pr_pid = p->p_pid;
1136 fill_regs(td, &status->pr_reg);
1137
1138 fill_fpregs(td, fpregset);
1139
1140 psinfo->pr_version = PRPSINFO_VERSION;
1141 psinfo->pr_psinfosz = sizeof(prpsinfo_t);
1142 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1143
1144 /* XXX - We don't fill in the command line arguments properly yet. */
1145 strlcpy(psinfo->pr_psargs, p->p_comm, sizeof(psinfo->pr_psargs));
1146
1147 /* Fill in the header. */
1148 bzero(hdr, hdrsize);
1149 off = 0;
1150 __elfN(puthdr)(p, hdr, &off, status, fpregset, psinfo, numsegs);
1151
1152 free(tempdata, M_TEMP);
1153
1154 /* Write it to the core file. */
1155 return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
1156 UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1157 td)); /* XXXKSE */
1158}
1159
1160static void
1161__elfN(puthdr)(struct proc *p, void *dst, size_t *off, const prstatus_t *status,
1162 const prfpregset_t *fpregset, const prpsinfo_t *psinfo, int numsegs)
1163{
1164 size_t ehoff;
1165 size_t phoff;
1166 size_t noteoff;
1167 size_t notesz;
1168
1169 ehoff = *off;
1170 *off += sizeof(Elf_Ehdr);
1171
1172 phoff = *off;
1173 *off += (numsegs + 1) * sizeof(Elf_Phdr);
1174
1175 noteoff = *off;
1176 __elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
1177 sizeof *status);
1178 __elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
1179 sizeof *fpregset);
1180 __elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
1181 sizeof *psinfo);
1182 notesz = *off - noteoff;
1183
1184 /* Align up to a page boundary for the program segments. */
1185 *off = round_page(*off);
1186
1187 if (dst != NULL) {
1188 Elf_Ehdr *ehdr;
1189 Elf_Phdr *phdr;
1190 struct phdr_closure phc;
1191
1192 /*
1193 * Fill in the ELF header.
1194 */
1195 ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
1196 ehdr->e_ident[EI_MAG0] = ELFMAG0;
1197 ehdr->e_ident[EI_MAG1] = ELFMAG1;
1198 ehdr->e_ident[EI_MAG2] = ELFMAG2;
1199 ehdr->e_ident[EI_MAG3] = ELFMAG3;
1200 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1201 ehdr->e_ident[EI_DATA] = ELF_DATA;
1202 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1203 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1204 ehdr->e_ident[EI_ABIVERSION] = 0;
1205 ehdr->e_ident[EI_PAD] = 0;
1206 ehdr->e_type = ET_CORE;
1207 ehdr->e_machine = ELF_ARCH;
1208 ehdr->e_version = EV_CURRENT;
1209 ehdr->e_entry = 0;
1210 ehdr->e_phoff = phoff;
1211 ehdr->e_flags = 0;
1212 ehdr->e_ehsize = sizeof(Elf_Ehdr);
1213 ehdr->e_phentsize = sizeof(Elf_Phdr);
1214 ehdr->e_phnum = numsegs + 1;
1215 ehdr->e_shentsize = sizeof(Elf_Shdr);
1216 ehdr->e_shnum = 0;
1217 ehdr->e_shstrndx = SHN_UNDEF;
1218
1219 /*
1220 * Fill in the program header entries.
1221 */
1222 phdr = (Elf_Phdr *)((char *)dst + phoff);
1223
1224 /* The note segement. */
1225 phdr->p_type = PT_NOTE;
1226 phdr->p_offset = noteoff;
1227 phdr->p_vaddr = 0;
1228 phdr->p_paddr = 0;
1229 phdr->p_filesz = notesz;
1230 phdr->p_memsz = 0;
1231 phdr->p_flags = 0;
1232 phdr->p_align = 0;
1233 phdr++;
1234
1235 /* All the writable segments from the program. */
1236 phc.phdr = phdr;
1237 phc.offset = *off;
1238 each_writable_segment(p, cb_put_phdr, &phc);
1239 }
1240}
1241
1242static void
1243__elfN(putnote)(void *dst, size_t *off, const char *name, int type,
1244 const void *desc, size_t descsz)
1245{
1246 Elf_Note note;
1247
1248 note.n_namesz = strlen(name) + 1;
1249 note.n_descsz = descsz;
1250 note.n_type = type;
1251 if (dst != NULL)
1252 bcopy(&note, (char *)dst + *off, sizeof note);
1253 *off += sizeof note;
1254 if (dst != NULL)
1255 bcopy(name, (char *)dst + *off, note.n_namesz);
1256 *off += roundup2(note.n_namesz, sizeof(Elf_Size));
1257 if (dst != NULL)
1258 bcopy(desc, (char *)dst + *off, note.n_descsz);
1259 *off += roundup2(note.n_descsz, sizeof(Elf_Size));
1260}
1261
1262/*
1263 * Tell kern_execve.c about it, with a little help from the linker.
1264 */
1265static struct execsw __elfN(execsw) = {
1266 __CONCAT(exec_, __elfN(imgact)),
1267 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
1268};
1269EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
842
843 /*
844 * Construct auxargs table (used by the fixup routine)
845 */
846 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
847 elf_auxargs->execfd = -1;
848 elf_auxargs->phdr = proghdr;
849 elf_auxargs->phent = hdr->e_phentsize;
850 elf_auxargs->phnum = hdr->e_phnum;
851 elf_auxargs->pagesz = PAGE_SIZE;
852 elf_auxargs->base = addr;
853 elf_auxargs->flags = 0;
854 elf_auxargs->entry = entry;
855 elf_auxargs->trace = elf_trace;
856
857 imgp->auxargs = elf_auxargs;
858 imgp->interpreted = 0;
859
860fail:
861 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
862 return (error);
863}
864
865#define suword __CONCAT(suword, __ELF_WORD_SIZE)
866
867int
868__elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
869{
870 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
871 Elf_Addr *base;
872 Elf_Addr *pos;
873
874 base = (Elf_Addr *)*stack_base;
875 pos = base + (imgp->argc + imgp->envc + 2);
876
877 if (args->trace) {
878 AUXARGS_ENTRY(pos, AT_DEBUG, 1);
879 }
880 if (args->execfd != -1) {
881 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
882 }
883 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
884 AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
885 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
886 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
887 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
888 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
889 AUXARGS_ENTRY(pos, AT_BASE, args->base);
890 AUXARGS_ENTRY(pos, AT_NULL, 0);
891
892 free(imgp->auxargs, M_TEMP);
893 imgp->auxargs = NULL;
894
895 base--;
896 suword(base, (long)imgp->argc);
897 *stack_base = (register_t *)base;
898 return (0);
899}
900
901/*
902 * Code for generating ELF core dumps.
903 */
904
905typedef void (*segment_callback)(vm_map_entry_t, void *);
906
907/* Closure for cb_put_phdr(). */
908struct phdr_closure {
909 Elf_Phdr *phdr; /* Program header to fill in */
910 Elf_Off offset; /* Offset of segment in core file */
911};
912
913/* Closure for cb_size_segment(). */
914struct sseg_closure {
915 int count; /* Count of writable segments. */
916 size_t size; /* Total size of all writable segments. */
917};
918
919static void cb_put_phdr(vm_map_entry_t, void *);
920static void cb_size_segment(vm_map_entry_t, void *);
921static void each_writable_segment(struct proc *, segment_callback, void *);
922static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
923 int, void *, size_t);
924static void __elfN(puthdr)(struct proc *, void *, size_t *,
925 const prstatus_t *, const prfpregset_t *, const prpsinfo_t *, int);
926static void __elfN(putnote)(void *, size_t *, const char *, int,
927 const void *, size_t);
928
929extern int osreldate;
930
931int
932__elfN(coredump)(td, vp, limit)
933 struct thread *td;
934 register struct vnode *vp;
935 off_t limit;
936{
937 register struct proc *p = td->td_proc;
938 register struct ucred *cred = td->td_ucred;
939 int error = 0;
940 struct sseg_closure seginfo;
941 void *hdr;
942 size_t hdrsize;
943
944 /* Size the program segments. */
945 seginfo.count = 0;
946 seginfo.size = 0;
947 each_writable_segment(p, cb_size_segment, &seginfo);
948
949 /*
950 * Calculate the size of the core file header area by making
951 * a dry run of generating it. Nothing is written, but the
952 * size is calculated.
953 */
954 hdrsize = 0;
955 __elfN(puthdr)((struct proc *)NULL, (void *)NULL, &hdrsize,
956 (const prstatus_t *)NULL, (const prfpregset_t *)NULL,
957 (const prpsinfo_t *)NULL, seginfo.count);
958
959 if (hdrsize + seginfo.size >= limit)
960 return (EFAULT);
961
962 /*
963 * Allocate memory for building the header, fill it up,
964 * and write it out.
965 */
966 hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
967 if (hdr == NULL) {
968 return (EINVAL);
969 }
970 error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
971
972 /* Write the contents of all of the writable segments. */
973 if (error == 0) {
974 Elf_Phdr *php;
975 off_t offset;
976 int i;
977
978 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
979 offset = hdrsize;
980 for (i = 0; i < seginfo.count; i++) {
981 error = vn_rdwr_inchunks(UIO_WRITE, vp,
982 (caddr_t)(uintptr_t)php->p_vaddr,
983 php->p_filesz, offset, UIO_USERSPACE,
984 IO_UNIT | IO_DIRECT, cred, NOCRED, (int *)NULL,
985 curthread); /* XXXKSE */
986 if (error != 0)
987 break;
988 offset += php->p_filesz;
989 php++;
990 }
991 }
992 free(hdr, M_TEMP);
993
994 return (error);
995}
996
997/*
998 * A callback for each_writable_segment() to write out the segment's
999 * program header entry.
1000 */
1001static void
1002cb_put_phdr(entry, closure)
1003 vm_map_entry_t entry;
1004 void *closure;
1005{
1006 struct phdr_closure *phc = (struct phdr_closure *)closure;
1007 Elf_Phdr *phdr = phc->phdr;
1008
1009 phc->offset = round_page(phc->offset);
1010
1011 phdr->p_type = PT_LOAD;
1012 phdr->p_offset = phc->offset;
1013 phdr->p_vaddr = entry->start;
1014 phdr->p_paddr = 0;
1015 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1016 phdr->p_align = PAGE_SIZE;
1017 phdr->p_flags = 0;
1018 if (entry->protection & VM_PROT_READ)
1019 phdr->p_flags |= PF_R;
1020 if (entry->protection & VM_PROT_WRITE)
1021 phdr->p_flags |= PF_W;
1022 if (entry->protection & VM_PROT_EXECUTE)
1023 phdr->p_flags |= PF_X;
1024
1025 phc->offset += phdr->p_filesz;
1026 phc->phdr++;
1027}
1028
1029/*
1030 * A callback for each_writable_segment() to gather information about
1031 * the number of segments and their total size.
1032 */
1033static void
1034cb_size_segment(entry, closure)
1035 vm_map_entry_t entry;
1036 void *closure;
1037{
1038 struct sseg_closure *ssc = (struct sseg_closure *)closure;
1039
1040 ssc->count++;
1041 ssc->size += entry->end - entry->start;
1042}
1043
1044/*
1045 * For each writable segment in the process's memory map, call the given
1046 * function with a pointer to the map entry and some arbitrary
1047 * caller-supplied data.
1048 */
1049static void
1050each_writable_segment(p, func, closure)
1051 struct proc *p;
1052 segment_callback func;
1053 void *closure;
1054{
1055 vm_map_t map = &p->p_vmspace->vm_map;
1056 vm_map_entry_t entry;
1057
1058 for (entry = map->header.next; entry != &map->header;
1059 entry = entry->next) {
1060 vm_object_t obj;
1061
1062 /*
1063 * Don't dump inaccessible mappings, deal with legacy
1064 * coredump mode.
1065 *
1066 * Note that read-only segments related to the elf binary
1067 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1068 * need to arbitrarily ignore such segments.
1069 */
1070 if (elf_legacy_coredump) {
1071 if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1072 continue;
1073 } else {
1074 if ((entry->protection & VM_PROT_ALL) == 0)
1075 continue;
1076 }
1077
1078 /*
1079 * Dont include memory segment in the coredump if
1080 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1081 * madvise(2). Do not dump submaps (i.e. parts of the
1082 * kernel map).
1083 */
1084 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1085 continue;
1086
1087 if ((obj = entry->object.vm_object) == NULL)
1088 continue;
1089
1090 /* Find the deepest backing object. */
1091 while (obj->backing_object != NULL)
1092 obj = obj->backing_object;
1093
1094 /* Ignore memory-mapped devices and such things. */
1095 if (obj->type != OBJT_DEFAULT &&
1096 obj->type != OBJT_SWAP &&
1097 obj->type != OBJT_VNODE)
1098 continue;
1099
1100 (*func)(entry, closure);
1101 }
1102}
1103
1104/*
1105 * Write the core file header to the file, including padding up to
1106 * the page boundary.
1107 */
1108static int
1109__elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
1110 struct thread *td;
1111 struct vnode *vp;
1112 struct ucred *cred;
1113 int numsegs;
1114 size_t hdrsize;
1115 void *hdr;
1116{
1117 struct {
1118 prstatus_t status;
1119 prfpregset_t fpregset;
1120 prpsinfo_t psinfo;
1121 } *tempdata;
1122 struct proc *p = td->td_proc;
1123 size_t off;
1124 prstatus_t *status;
1125 prfpregset_t *fpregset;
1126 prpsinfo_t *psinfo;
1127
1128 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO | M_WAITOK);
1129 status = &tempdata->status;
1130 fpregset = &tempdata->fpregset;
1131 psinfo = &tempdata->psinfo;
1132
1133 /* Gather the information for the header. */
1134 status->pr_version = PRSTATUS_VERSION;
1135 status->pr_statussz = sizeof(prstatus_t);
1136 status->pr_gregsetsz = sizeof(gregset_t);
1137 status->pr_fpregsetsz = sizeof(fpregset_t);
1138 status->pr_osreldate = osreldate;
1139 status->pr_cursig = p->p_sig;
1140 status->pr_pid = p->p_pid;
1141 fill_regs(td, &status->pr_reg);
1142
1143 fill_fpregs(td, fpregset);
1144
1145 psinfo->pr_version = PRPSINFO_VERSION;
1146 psinfo->pr_psinfosz = sizeof(prpsinfo_t);
1147 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1148
1149 /* XXX - We don't fill in the command line arguments properly yet. */
1150 strlcpy(psinfo->pr_psargs, p->p_comm, sizeof(psinfo->pr_psargs));
1151
1152 /* Fill in the header. */
1153 bzero(hdr, hdrsize);
1154 off = 0;
1155 __elfN(puthdr)(p, hdr, &off, status, fpregset, psinfo, numsegs);
1156
1157 free(tempdata, M_TEMP);
1158
1159 /* Write it to the core file. */
1160 return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
1161 UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1162 td)); /* XXXKSE */
1163}
1164
1165static void
1166__elfN(puthdr)(struct proc *p, void *dst, size_t *off, const prstatus_t *status,
1167 const prfpregset_t *fpregset, const prpsinfo_t *psinfo, int numsegs)
1168{
1169 size_t ehoff;
1170 size_t phoff;
1171 size_t noteoff;
1172 size_t notesz;
1173
1174 ehoff = *off;
1175 *off += sizeof(Elf_Ehdr);
1176
1177 phoff = *off;
1178 *off += (numsegs + 1) * sizeof(Elf_Phdr);
1179
1180 noteoff = *off;
1181 __elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
1182 sizeof *status);
1183 __elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
1184 sizeof *fpregset);
1185 __elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
1186 sizeof *psinfo);
1187 notesz = *off - noteoff;
1188
1189 /* Align up to a page boundary for the program segments. */
1190 *off = round_page(*off);
1191
1192 if (dst != NULL) {
1193 Elf_Ehdr *ehdr;
1194 Elf_Phdr *phdr;
1195 struct phdr_closure phc;
1196
1197 /*
1198 * Fill in the ELF header.
1199 */
1200 ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
1201 ehdr->e_ident[EI_MAG0] = ELFMAG0;
1202 ehdr->e_ident[EI_MAG1] = ELFMAG1;
1203 ehdr->e_ident[EI_MAG2] = ELFMAG2;
1204 ehdr->e_ident[EI_MAG3] = ELFMAG3;
1205 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1206 ehdr->e_ident[EI_DATA] = ELF_DATA;
1207 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1208 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1209 ehdr->e_ident[EI_ABIVERSION] = 0;
1210 ehdr->e_ident[EI_PAD] = 0;
1211 ehdr->e_type = ET_CORE;
1212 ehdr->e_machine = ELF_ARCH;
1213 ehdr->e_version = EV_CURRENT;
1214 ehdr->e_entry = 0;
1215 ehdr->e_phoff = phoff;
1216 ehdr->e_flags = 0;
1217 ehdr->e_ehsize = sizeof(Elf_Ehdr);
1218 ehdr->e_phentsize = sizeof(Elf_Phdr);
1219 ehdr->e_phnum = numsegs + 1;
1220 ehdr->e_shentsize = sizeof(Elf_Shdr);
1221 ehdr->e_shnum = 0;
1222 ehdr->e_shstrndx = SHN_UNDEF;
1223
1224 /*
1225 * Fill in the program header entries.
1226 */
1227 phdr = (Elf_Phdr *)((char *)dst + phoff);
1228
1229 /* The note segement. */
1230 phdr->p_type = PT_NOTE;
1231 phdr->p_offset = noteoff;
1232 phdr->p_vaddr = 0;
1233 phdr->p_paddr = 0;
1234 phdr->p_filesz = notesz;
1235 phdr->p_memsz = 0;
1236 phdr->p_flags = 0;
1237 phdr->p_align = 0;
1238 phdr++;
1239
1240 /* All the writable segments from the program. */
1241 phc.phdr = phdr;
1242 phc.offset = *off;
1243 each_writable_segment(p, cb_put_phdr, &phc);
1244 }
1245}
1246
1247static void
1248__elfN(putnote)(void *dst, size_t *off, const char *name, int type,
1249 const void *desc, size_t descsz)
1250{
1251 Elf_Note note;
1252
1253 note.n_namesz = strlen(name) + 1;
1254 note.n_descsz = descsz;
1255 note.n_type = type;
1256 if (dst != NULL)
1257 bcopy(&note, (char *)dst + *off, sizeof note);
1258 *off += sizeof note;
1259 if (dst != NULL)
1260 bcopy(name, (char *)dst + *off, note.n_namesz);
1261 *off += roundup2(note.n_namesz, sizeof(Elf_Size));
1262 if (dst != NULL)
1263 bcopy(desc, (char *)dst + *off, note.n_descsz);
1264 *off += roundup2(note.n_descsz, sizeof(Elf_Size));
1265}
1266
1267/*
1268 * Tell kern_execve.c about it, with a little help from the linker.
1269 */
1270static struct execsw __elfN(execsw) = {
1271 __CONCAT(exec_, __elfN(imgact)),
1272 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
1273};
1274EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));