Deleted Added
full compact
imgact_elf.c (133323) imgact_elf.c (133464)
1/*-
2 * Copyright (c) 2000 David O'Brien
3 * Copyright (c) 1995-1996 S�ren Schmidt
4 * Copyright (c) 1996 Peter Wemm
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer
12 * in this position and unchanged.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2000 David O'Brien
3 * Copyright (c) 1995-1996 S�ren Schmidt
4 * Copyright (c) 1996 Peter Wemm
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer
12 * in this position and unchanged.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/kern/imgact_elf.c 133323 2004-08-08 09:48:10Z dfr $");
32__FBSDID("$FreeBSD: head/sys/kern/imgact_elf.c 133464 2004-08-11 02:35:06Z marcel $");
33
34#include <sys/param.h>
35#include <sys/exec.h>
36#include <sys/fcntl.h>
37#include <sys/imgact.h>
38#include <sys/imgact_elf.h>
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/malloc.h>
42#include <sys/mutex.h>
43#include <sys/mman.h>
44#include <sys/namei.h>
45#include <sys/pioctl.h>
46#include <sys/proc.h>
47#include <sys/procfs.h>
48#include <sys/resourcevar.h>
49#include <sys/systm.h>
50#include <sys/signalvar.h>
51#include <sys/stat.h>
52#include <sys/sx.h>
53#include <sys/syscall.h>
54#include <sys/sysctl.h>
55#include <sys/sysent.h>
56#include <sys/vnode.h>
57
58#include <vm/vm.h>
59#include <vm/vm_kern.h>
60#include <vm/vm_param.h>
61#include <vm/pmap.h>
62#include <vm/vm_map.h>
63#include <vm/vm_object.h>
64#include <vm/vm_extern.h>
65
66#include <machine/elf.h>
67#include <machine/md_var.h>
68
69#define OLD_EI_BRAND 8
70
71static int __elfN(check_header)(const Elf_Ehdr *hdr);
72static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr,
73 const char *interp);
74static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
75 u_long *entry, size_t pagesize);
76static int __elfN(load_section)(struct proc *p,
77 struct vmspace *vmspace, struct vnode *vp, vm_object_t object,
78 vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
79 vm_prot_t prot, size_t pagesize);
80static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
81
82SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
83 "");
84
85int __elfN(fallback_brand) = -1;
86SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
87 fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
88 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
89TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
90 &__elfN(fallback_brand));
91
92static int elf_trace = 0;
93SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, "");
94
95static int elf_legacy_coredump = 0;
96SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
97 &elf_legacy_coredump, 0, "");
98
99static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
100
101int
102__elfN(insert_brand_entry)(Elf_Brandinfo *entry)
103{
104 int i;
105
106 for (i = 0; i < MAX_BRANDS; i++) {
107 if (elf_brand_list[i] == NULL) {
108 elf_brand_list[i] = entry;
109 break;
110 }
111 }
112 if (i == MAX_BRANDS)
113 return (-1);
114 return (0);
115}
116
117int
118__elfN(remove_brand_entry)(Elf_Brandinfo *entry)
119{
120 int i;
121
122 for (i = 0; i < MAX_BRANDS; i++) {
123 if (elf_brand_list[i] == entry) {
124 elf_brand_list[i] = NULL;
125 break;
126 }
127 }
128 if (i == MAX_BRANDS)
129 return (-1);
130 return (0);
131}
132
133int
134__elfN(brand_inuse)(Elf_Brandinfo *entry)
135{
136 struct proc *p;
137 int rval = FALSE;
138
139 sx_slock(&allproc_lock);
140 LIST_FOREACH(p, &allproc, p_list) {
141 if (p->p_sysent == entry->sysvec) {
142 rval = TRUE;
143 break;
144 }
145 }
146 sx_sunlock(&allproc_lock);
147
148 return (rval);
149}
150
151static Elf_Brandinfo *
152__elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp)
153{
154 Elf_Brandinfo *bi;
155 int i;
156
157 /*
158 * We support three types of branding -- (1) the ELF EI_OSABI field
159 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
160 * branding w/in the ELF header, and (3) path of the `interp_path'
161 * field. We should also look for an ".note.ABI-tag" ELF section now
162 * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones.
163 */
164
165 /* If the executable has a brand, search for it in the brand list. */
166 for (i = 0; i < MAX_BRANDS; i++) {
167 bi = elf_brand_list[i];
168 if (bi != NULL && hdr->e_machine == bi->machine &&
169 (hdr->e_ident[EI_OSABI] == bi->brand ||
170 strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
171 bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
172 return (bi);
173 }
174
175 /* Lacking a known brand, search for a recognized interpreter. */
176 if (interp != NULL) {
177 for (i = 0; i < MAX_BRANDS; i++) {
178 bi = elf_brand_list[i];
179 if (bi != NULL && hdr->e_machine == bi->machine &&
180 strcmp(interp, bi->interp_path) == 0)
181 return (bi);
182 }
183 }
184
185 /* Lacking a recognized interpreter, try the default brand */
186 for (i = 0; i < MAX_BRANDS; i++) {
187 bi = elf_brand_list[i];
188 if (bi != NULL && hdr->e_machine == bi->machine &&
189 __elfN(fallback_brand) == bi->brand)
190 return (bi);
191 }
192 return (NULL);
193}
194
195static int
196__elfN(check_header)(const Elf_Ehdr *hdr)
197{
198 Elf_Brandinfo *bi;
199 int i;
200
201 if (!IS_ELF(*hdr) ||
202 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
203 hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
204 hdr->e_ident[EI_VERSION] != EV_CURRENT ||
205 hdr->e_phentsize != sizeof(Elf_Phdr) ||
206 hdr->e_version != ELF_TARG_VER)
207 return (ENOEXEC);
208
209 /*
210 * Make sure we have at least one brand for this machine.
211 */
212
213 for (i = 0; i < MAX_BRANDS; i++) {
214 bi = elf_brand_list[i];
215 if (bi != NULL && bi->machine == hdr->e_machine)
216 break;
217 }
218 if (i == MAX_BRANDS)
219 return (ENOEXEC);
220
221 return (0);
222}
223
224static int
225__elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
226 vm_offset_t start, vm_offset_t end, vm_prot_t prot,
227 vm_prot_t max)
228{
229 int error, rv;
230 vm_offset_t off;
231 vm_offset_t data_buf = 0;
232
233 /*
234 * Create the page if it doesn't exist yet. Ignore errors.
235 */
236 vm_map_lock(map);
237 vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end), max,
238 max, 0);
239 vm_map_unlock(map);
240
241 /*
242 * Find the page from the underlying object.
243 */
244 if (object) {
245 vm_object_reference(object);
246 rv = vm_map_find(exec_map,
247 object,
248 trunc_page(offset),
249 &data_buf,
250 PAGE_SIZE,
251 TRUE,
252 VM_PROT_READ,
253 VM_PROT_ALL,
254 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
255 if (rv != KERN_SUCCESS) {
256 vm_object_deallocate(object);
257 return (rv);
258 }
259
260 off = offset - trunc_page(offset);
261 error = copyout((caddr_t)data_buf + off, (caddr_t)start,
262 end - start);
263 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
264 if (error) {
265 return (KERN_FAILURE);
266 }
267 }
268
269 return (KERN_SUCCESS);
270}
271
272static int
273__elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
274 vm_offset_t start, vm_offset_t end, vm_prot_t prot,
275 vm_prot_t max, int cow)
276{
277 vm_offset_t data_buf, off;
278 vm_size_t sz;
279 int error, rv;
280
281 if (start != trunc_page(start)) {
282 rv = __elfN(map_partial)(map, object, offset, start,
283 round_page(start), prot, max);
284 if (rv)
285 return (rv);
286 offset += round_page(start) - start;
287 start = round_page(start);
288 }
289 if (end != round_page(end)) {
290 rv = __elfN(map_partial)(map, object, offset +
291 trunc_page(end) - start, trunc_page(end), end, prot, max);
292 if (rv)
293 return (rv);
294 end = trunc_page(end);
295 }
296 if (end > start) {
297 if (offset & PAGE_MASK) {
298 /*
299 * The mapping is not page aligned. This means we have
300 * to copy the data. Sigh.
301 */
302 rv = vm_map_find(map, 0, 0, &start, end - start,
303 FALSE, prot, max, 0);
304 if (rv)
305 return (rv);
306 data_buf = 0;
307 while (start < end) {
308 vm_object_reference(object);
309 rv = vm_map_find(exec_map,
310 object,
311 trunc_page(offset),
312 &data_buf,
313 2 * PAGE_SIZE,
314 TRUE,
315 VM_PROT_READ,
316 VM_PROT_ALL,
317 (MAP_COPY_ON_WRITE
318 | MAP_PREFAULT_PARTIAL));
319 if (rv != KERN_SUCCESS) {
320 vm_object_deallocate(object);
321 return (rv);
322 }
323 off = offset - trunc_page(offset);
324 sz = end - start;
325 if (sz > PAGE_SIZE)
326 sz = PAGE_SIZE;
327 error = copyout((caddr_t)data_buf + off,
328 (caddr_t)start, sz);
329 vm_map_remove(exec_map, data_buf,
330 data_buf + 2 * PAGE_SIZE);
331 if (error) {
332 return (KERN_FAILURE);
333 }
334 start += sz;
335 }
336 rv = KERN_SUCCESS;
337 } else {
338 vm_map_lock(map);
339 rv = vm_map_insert(map, object, offset, start, end,
340 prot, max, cow);
341 vm_map_unlock(map);
342 }
343 return (rv);
344 } else {
345 return (KERN_SUCCESS);
346 }
347}
348
349static int
350__elfN(load_section)(struct proc *p, struct vmspace *vmspace,
351 struct vnode *vp, vm_object_t object, vm_offset_t offset,
352 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
353 size_t pagesize)
354{
355 size_t map_len;
356 vm_offset_t map_addr;
357 int error, rv, cow;
358 size_t copy_len;
359 vm_offset_t file_addr;
360 vm_offset_t data_buf = 0;
361
362 GIANT_REQUIRED;
363
364 error = 0;
365
366 /*
367 * It's necessary to fail if the filsz + offset taken from the
368 * header is greater than the actual file pager object's size.
369 * If we were to allow this, then the vm_map_find() below would
370 * walk right off the end of the file object and into the ether.
371 *
372 * While I'm here, might as well check for something else that
373 * is invalid: filsz cannot be greater than memsz.
374 */
375 if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
376 filsz > memsz) {
377 uprintf("elf_load_section: truncated ELF file\n");
378 return (ENOEXEC);
379 }
380
381#define trunc_page_ps(va, ps) ((va) & ~(ps - 1))
382#define round_page_ps(va, ps) (((va) + (ps - 1)) & ~(ps - 1))
383
384 map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
385 file_addr = trunc_page_ps(offset, pagesize);
386
387 /*
388 * We have two choices. We can either clear the data in the last page
389 * of an oversized mapping, or we can start the anon mapping a page
390 * early and copy the initialized data into that first page. We
391 * choose the second..
392 */
393 if (memsz > filsz)
394 map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
395 else
396 map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
397
398 if (map_len != 0) {
399 vm_object_reference(object);
400
401 /* cow flags: don't dump readonly sections in core */
402 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
403 (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
404
405 rv = __elfN(map_insert)(&vmspace->vm_map,
406 object,
407 file_addr, /* file offset */
408 map_addr, /* virtual start */
409 map_addr + map_len,/* virtual end */
410 prot,
411 VM_PROT_ALL,
412 cow);
413 if (rv != KERN_SUCCESS) {
414 vm_object_deallocate(object);
415 return (EINVAL);
416 }
417
418 /* we can stop now if we've covered it all */
419 if (memsz == filsz) {
420 return (0);
421 }
422 }
423
424
425 /*
426 * We have to get the remaining bit of the file into the first part
427 * of the oversized map segment. This is normally because the .data
428 * segment in the file is extended to provide bss. It's a neat idea
429 * to try and save a page, but it's a pain in the behind to implement.
430 */
431 copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
432 map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
433 map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
434 map_addr;
435
436 /* This had damn well better be true! */
437 if (map_len != 0) {
438 rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
439 map_addr + map_len, VM_PROT_ALL, VM_PROT_ALL, 0);
440 if (rv != KERN_SUCCESS) {
441 return (EINVAL);
442 }
443 }
444
445 if (copy_len != 0) {
446 vm_offset_t off;
447 vm_object_reference(object);
448 rv = vm_map_find(exec_map,
449 object,
450 trunc_page(offset + filsz),
451 &data_buf,
452 PAGE_SIZE,
453 TRUE,
454 VM_PROT_READ,
455 VM_PROT_ALL,
456 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
457 if (rv != KERN_SUCCESS) {
458 vm_object_deallocate(object);
459 return (EINVAL);
460 }
461
462 /* send the page fragment to user space */
463 off = trunc_page_ps(offset + filsz, pagesize) -
464 trunc_page(offset + filsz);
465 error = copyout((caddr_t)data_buf + off, (caddr_t)map_addr,
466 copy_len);
467 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
468 if (error) {
469 return (error);
470 }
471 }
472
473 /*
474 * set it to the specified protection.
475 * XXX had better undo the damage from pasting over the cracks here!
476 */
477 vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
478 round_page(map_addr + map_len), prot, FALSE);
479
480 return (error);
481}
482
483/*
484 * Load the file "file" into memory. It may be either a shared object
485 * or an executable.
486 *
487 * The "addr" reference parameter is in/out. On entry, it specifies
488 * the address where a shared object should be loaded. If the file is
489 * an executable, this value is ignored. On exit, "addr" specifies
490 * where the file was actually loaded.
491 *
492 * The "entry" reference parameter is out only. On exit, it specifies
493 * the entry point for the loaded file.
494 */
495static int
496__elfN(load_file)(struct proc *p, const char *file, u_long *addr,
497 u_long *entry, size_t pagesize)
498{
499 struct {
500 struct nameidata nd;
501 struct vattr attr;
502 struct image_params image_params;
503 } *tempdata;
504 const Elf_Ehdr *hdr = NULL;
505 const Elf_Phdr *phdr = NULL;
506 struct nameidata *nd;
507 struct vmspace *vmspace = p->p_vmspace;
508 struct vattr *attr;
509 struct image_params *imgp;
510 vm_prot_t prot;
511 u_long rbase;
512 u_long base_addr = 0;
513 int error, i, numsegs;
514
515 if (curthread->td_proc != p)
516 panic("elf_load_file - thread"); /* XXXKSE DIAGNOSTIC */
517
518 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
519 nd = &tempdata->nd;
520 attr = &tempdata->attr;
521 imgp = &tempdata->image_params;
522
523 /*
524 * Initialize part of the common data
525 */
526 imgp->proc = p;
527 imgp->userspace_argv = NULL;
528 imgp->userspace_envv = NULL;
529 imgp->attr = attr;
530 imgp->firstpage = NULL;
531 imgp->image_header = NULL;
532 imgp->object = NULL;
533 imgp->execlabel = NULL;
534
535 /* XXXKSE */
536 NDINIT(nd, LOOKUP, LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, curthread);
537
538 if ((error = namei(nd)) != 0) {
539 nd->ni_vp = NULL;
540 goto fail;
541 }
542 NDFREE(nd, NDF_ONLY_PNBUF);
543 imgp->vp = nd->ni_vp;
544
545 /*
546 * Check permissions, modes, uid, etc on the file, and "open" it.
547 */
548 error = exec_check_permissions(imgp);
549 if (error) {
550 VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
551 goto fail;
552 }
553
554 error = exec_map_first_page(imgp);
555 /*
556 * Also make certain that the interpreter stays the same, so set
557 * its VV_TEXT flag, too.
558 */
559 if (error == 0)
560 nd->ni_vp->v_vflag |= VV_TEXT;
561
562 VOP_GETVOBJECT(nd->ni_vp, &imgp->object);
563 vm_object_reference(imgp->object);
564
565 VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
566 if (error)
567 goto fail;
568
569 hdr = (const Elf_Ehdr *)imgp->image_header;
570 if ((error = __elfN(check_header)(hdr)) != 0)
571 goto fail;
572 if (hdr->e_type == ET_DYN)
573 rbase = *addr;
574 else if (hdr->e_type == ET_EXEC)
575 rbase = 0;
576 else {
577 error = ENOEXEC;
578 goto fail;
579 }
580
581 /* Only support headers that fit within first page for now */
582 /* (multiplication of two Elf_Half fields will not overflow) */
583 if ((hdr->e_phoff > PAGE_SIZE) ||
584 (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
585 error = ENOEXEC;
586 goto fail;
587 }
588
589 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
590
591 for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
592 if (phdr[i].p_type == PT_LOAD) { /* Loadable segment */
593 prot = 0;
594 if (phdr[i].p_flags & PF_X)
595 prot |= VM_PROT_EXECUTE;
596 if (phdr[i].p_flags & PF_W)
597 prot |= VM_PROT_WRITE;
598 if (phdr[i].p_flags & PF_R)
599 prot |= VM_PROT_READ;
600
601 if ((error = __elfN(load_section)(p, vmspace,
602 nd->ni_vp, imgp->object, phdr[i].p_offset,
603 (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
604 phdr[i].p_memsz, phdr[i].p_filesz, prot,
605 pagesize)) != 0)
606 goto fail;
607 /*
608 * Establish the base address if this is the
609 * first segment.
610 */
611 if (numsegs == 0)
612 base_addr = trunc_page(phdr[i].p_vaddr +
613 rbase);
614 numsegs++;
615 }
616 }
617 *addr = base_addr;
618 *entry = (unsigned long)hdr->e_entry + rbase;
619
620fail:
621 if (imgp->firstpage)
622 exec_unmap_first_page(imgp);
623 if (imgp->object)
624 vm_object_deallocate(imgp->object);
625
626 if (nd->ni_vp)
627 vrele(nd->ni_vp);
628
629 free(tempdata, M_TEMP);
630
631 return (error);
632}
633
634static int
635__CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
636{
637 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
638 const Elf_Phdr *phdr;
639 Elf_Auxargs *elf_auxargs = NULL;
640 struct vmspace *vmspace;
641 vm_prot_t prot;
642 u_long text_size = 0, data_size = 0, total_size = 0;
643 u_long text_addr = 0, data_addr = 0;
644 u_long seg_size, seg_addr;
645 u_long addr, entry = 0, proghdr = 0;
646 int error, i;
647 const char *interp = NULL;
648 Elf_Brandinfo *brand_info;
649 char *path;
650 struct thread *td = curthread;
651 struct sysentvec *sv;
652
653 GIANT_REQUIRED;
654
655 /*
656 * Do we have a valid ELF header ?
657 */
658 if (__elfN(check_header)(hdr) != 0 || hdr->e_type != ET_EXEC)
659 return (-1);
660
661 /*
662 * From here on down, we return an errno, not -1, as we've
663 * detected an ELF file.
664 */
665
666 if ((hdr->e_phoff > PAGE_SIZE) ||
667 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
668 /* Only support headers in first page for now */
669 return (ENOEXEC);
670 }
671 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
672
673 /*
674 * From this point on, we may have resources that need to be freed.
675 */
676
677 VOP_UNLOCK(imgp->vp, 0, td);
678
679 for (i = 0; i < hdr->e_phnum; i++) {
680 switch (phdr[i].p_type) {
681 case PT_INTERP: /* Path to interpreter */
682 if (phdr[i].p_filesz > MAXPATHLEN ||
683 phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) {
684 error = ENOEXEC;
685 goto fail;
686 }
687 interp = imgp->image_header + phdr[i].p_offset;
688 break;
689 default:
690 break;
691 }
692 }
693
694 brand_info = __elfN(get_brandinfo)(hdr, interp);
695 if (brand_info == NULL) {
696 uprintf("ELF binary type \"%u\" not known.\n",
697 hdr->e_ident[EI_OSABI]);
698 error = ENOEXEC;
699 goto fail;
700 }
701 sv = brand_info->sysvec;
702 if (interp != NULL && brand_info->interp_newpath != NULL)
703 interp = brand_info->interp_newpath;
704
705 if ((error = exec_extract_strings(imgp)) != 0)
706 goto fail;
707
708 exec_new_vmspace(imgp, sv);
709
710 vmspace = imgp->proc->p_vmspace;
711
712 for (i = 0; i < hdr->e_phnum; i++) {
713 switch (phdr[i].p_type) {
714 case PT_LOAD: /* Loadable segment */
715 prot = 0;
716 if (phdr[i].p_flags & PF_X)
717 prot |= VM_PROT_EXECUTE;
718 if (phdr[i].p_flags & PF_W)
719 prot |= VM_PROT_WRITE;
720 if (phdr[i].p_flags & PF_R)
721 prot |= VM_PROT_READ;
722
723#if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
724 /*
725 * Some x86 binaries assume read == executable,
726 * notably the M3 runtime and therefore cvsup
727 */
728 if (prot & VM_PROT_READ)
729 prot |= VM_PROT_EXECUTE;
730#endif
731
732 if ((error = __elfN(load_section)(imgp->proc, vmspace,
733 imgp->vp, imgp->object, phdr[i].p_offset,
734 (caddr_t)(uintptr_t)phdr[i].p_vaddr,
735 phdr[i].p_memsz, phdr[i].p_filesz, prot,
736 sv->sv_pagesize)) != 0)
737 goto fail;
738
739 /*
740 * If this segment contains the program headers,
741 * remember their virtual address for the AT_PHDR
742 * aux entry. Static binaries don't usually include
743 * a PT_PHDR entry.
744 */
745 if (phdr[i].p_offset == 0 &&
746 hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
747 <= phdr[i].p_filesz)
748 proghdr = phdr[i].p_vaddr + hdr->e_phoff;
749
750 seg_addr = trunc_page(phdr[i].p_vaddr);
751 seg_size = round_page(phdr[i].p_memsz +
752 phdr[i].p_vaddr - seg_addr);
753
754 /*
755 * Is this .text or .data? We can't use
756 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
757 * alpha terribly and possibly does other bad
758 * things so we stick to the old way of figuring
759 * it out: If the segment contains the program
760 * entry point, it's a text segment, otherwise it
761 * is a data segment.
762 *
763 * Note that obreak() assumes that data_addr +
764 * data_size == end of data load area, and the ELF
765 * file format expects segments to be sorted by
766 * address. If multiple data segments exist, the
767 * last one will be used.
768 */
769 if (hdr->e_entry >= phdr[i].p_vaddr &&
770 hdr->e_entry < (phdr[i].p_vaddr +
771 phdr[i].p_memsz)) {
772 text_size = seg_size;
773 text_addr = seg_addr;
774 entry = (u_long)hdr->e_entry;
775 } else {
776 data_size = seg_size;
777 data_addr = seg_addr;
778 }
779 total_size += seg_size;
780 break;
781 case PT_PHDR: /* Program header table info */
782 proghdr = phdr[i].p_vaddr;
783 break;
784 default:
785 break;
786 }
787 }
788
789 if (data_addr == 0 && data_size == 0) {
790 data_addr = text_addr;
791 data_size = text_size;
792 }
793
794 /*
795 * Check limits. It should be safe to check the
796 * limits after loading the segments since we do
797 * not actually fault in all the segments pages.
798 */
799 PROC_LOCK(imgp->proc);
800 if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
801 text_size > maxtsiz ||
802 total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
803 PROC_UNLOCK(imgp->proc);
804 error = ENOMEM;
805 goto fail;
806 }
807
808 vmspace->vm_tsize = text_size >> PAGE_SHIFT;
809 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
810 vmspace->vm_dsize = data_size >> PAGE_SHIFT;
811 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
812
813 /*
814 * We load the dynamic linker where a userland call
815 * to mmap(0, ...) would put it. The rationale behind this
816 * calculation is that it leaves room for the heap to grow to
817 * its maximum allowed size.
818 */
819 addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
820 lim_max(imgp->proc, RLIMIT_DATA));
821 PROC_UNLOCK(imgp->proc);
822
823 imgp->entry_addr = entry;
824
825 imgp->proc->p_sysent = sv;
826 if (interp != NULL && brand_info->emul_path != NULL &&
827 brand_info->emul_path[0] != '\0') {
828 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
829 snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path,
830 interp);
831 error = __elfN(load_file)(imgp->proc, path, &addr,
832 &imgp->entry_addr, sv->sv_pagesize);
833 free(path, M_TEMP);
834 if (error == 0)
835 interp = NULL;
836 }
837 if (interp != NULL) {
838 error = __elfN(load_file)(imgp->proc, interp, &addr,
839 &imgp->entry_addr, sv->sv_pagesize);
840 if (error != 0) {
841 uprintf("ELF interpreter %s not found\n", interp);
842 goto fail;
843 }
844 }
845
846 /*
847 * Construct auxargs table (used by the fixup routine)
848 */
849 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
850 elf_auxargs->execfd = -1;
851 elf_auxargs->phdr = proghdr;
852 elf_auxargs->phent = hdr->e_phentsize;
853 elf_auxargs->phnum = hdr->e_phnum;
854 elf_auxargs->pagesz = PAGE_SIZE;
855 elf_auxargs->base = addr;
856 elf_auxargs->flags = 0;
857 elf_auxargs->entry = entry;
858 elf_auxargs->trace = elf_trace;
859
860 imgp->auxargs = elf_auxargs;
861 imgp->interpreted = 0;
862
863fail:
864 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
865 return (error);
866}
867
868#define suword __CONCAT(suword, __ELF_WORD_SIZE)
869
870int
871__elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
872{
873 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
874 Elf_Addr *base;
875 Elf_Addr *pos;
876
877 base = (Elf_Addr *)*stack_base;
878 pos = base + (imgp->argc + imgp->envc + 2);
879
880 if (args->trace) {
881 AUXARGS_ENTRY(pos, AT_DEBUG, 1);
882 }
883 if (args->execfd != -1) {
884 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
885 }
886 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
887 AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
888 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
889 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
890 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
891 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
892 AUXARGS_ENTRY(pos, AT_BASE, args->base);
893 AUXARGS_ENTRY(pos, AT_NULL, 0);
894
895 free(imgp->auxargs, M_TEMP);
896 imgp->auxargs = NULL;
897
898 base--;
899 suword(base, (long)imgp->argc);
900 *stack_base = (register_t *)base;
901 return (0);
902}
903
904/*
905 * Code for generating ELF core dumps.
906 */
907
908typedef void (*segment_callback)(vm_map_entry_t, void *);
909
910/* Closure for cb_put_phdr(). */
911struct phdr_closure {
912 Elf_Phdr *phdr; /* Program header to fill in */
913 Elf_Off offset; /* Offset of segment in core file */
914};
915
916/* Closure for cb_size_segment(). */
917struct sseg_closure {
918 int count; /* Count of writable segments. */
919 size_t size; /* Total size of all writable segments. */
920};
921
922static void cb_put_phdr(vm_map_entry_t, void *);
923static void cb_size_segment(vm_map_entry_t, void *);
924static void each_writable_segment(struct thread *, segment_callback, void *);
925static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
926 int, void *, size_t);
927static void __elfN(puthdr)(struct thread *, void *, size_t *, int);
928static void __elfN(putnote)(void *, size_t *, const char *, int,
929 const void *, size_t);
930
931extern int osreldate;
932
933int
934__elfN(coredump)(td, vp, limit)
935 struct thread *td;
936 struct vnode *vp;
937 off_t limit;
938{
939 struct ucred *cred = td->td_ucred;
940 int error = 0;
941 struct sseg_closure seginfo;
942 void *hdr;
943 size_t hdrsize;
944
945 /* Size the program segments. */
946 seginfo.count = 0;
947 seginfo.size = 0;
948 each_writable_segment(td, cb_size_segment, &seginfo);
949
950 /*
951 * Calculate the size of the core file header area by making
952 * a dry run of generating it. Nothing is written, but the
953 * size is calculated.
954 */
955 hdrsize = 0;
956 __elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count);
957
958 if (hdrsize + seginfo.size >= limit)
959 return (EFAULT);
960
961 /*
962 * Allocate memory for building the header, fill it up,
963 * and write it out.
964 */
965 hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
966 if (hdr == NULL) {
967 return (EINVAL);
968 }
969 error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
970
971 /* Write the contents of all of the writable segments. */
972 if (error == 0) {
973 Elf_Phdr *php;
974 off_t offset;
975 int i;
976
977 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
978 offset = hdrsize;
979 for (i = 0; i < seginfo.count; i++) {
980 error = vn_rdwr_inchunks(UIO_WRITE, vp,
981 (caddr_t)(uintptr_t)php->p_vaddr,
982 php->p_filesz, offset, UIO_USERSPACE,
983 IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
984 curthread); /* XXXKSE */
985 if (error != 0)
986 break;
987 offset += php->p_filesz;
988 php++;
989 }
990 }
991 free(hdr, M_TEMP);
992
993 return (error);
994}
995
996/*
997 * A callback for each_writable_segment() to write out the segment's
998 * program header entry.
999 */
1000static void
1001cb_put_phdr(entry, closure)
1002 vm_map_entry_t entry;
1003 void *closure;
1004{
1005 struct phdr_closure *phc = (struct phdr_closure *)closure;
1006 Elf_Phdr *phdr = phc->phdr;
1007
1008 phc->offset = round_page(phc->offset);
1009
1010 phdr->p_type = PT_LOAD;
1011 phdr->p_offset = phc->offset;
1012 phdr->p_vaddr = entry->start;
1013 phdr->p_paddr = 0;
1014 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1015 phdr->p_align = PAGE_SIZE;
1016 phdr->p_flags = 0;
1017 if (entry->protection & VM_PROT_READ)
1018 phdr->p_flags |= PF_R;
1019 if (entry->protection & VM_PROT_WRITE)
1020 phdr->p_flags |= PF_W;
1021 if (entry->protection & VM_PROT_EXECUTE)
1022 phdr->p_flags |= PF_X;
1023
1024 phc->offset += phdr->p_filesz;
1025 phc->phdr++;
1026}
1027
1028/*
1029 * A callback for each_writable_segment() to gather information about
1030 * the number of segments and their total size.
1031 */
1032static void
1033cb_size_segment(entry, closure)
1034 vm_map_entry_t entry;
1035 void *closure;
1036{
1037 struct sseg_closure *ssc = (struct sseg_closure *)closure;
1038
1039 ssc->count++;
1040 ssc->size += entry->end - entry->start;
1041}
1042
1043/*
1044 * For each writable segment in the process's memory map, call the given
1045 * function with a pointer to the map entry and some arbitrary
1046 * caller-supplied data.
1047 */
1048static void
1049each_writable_segment(td, func, closure)
1050 struct thread *td;
1051 segment_callback func;
1052 void *closure;
1053{
1054 struct proc *p = td->td_proc;
1055 vm_map_t map = &p->p_vmspace->vm_map;
1056 vm_map_entry_t entry;
1057
1058 for (entry = map->header.next; entry != &map->header;
1059 entry = entry->next) {
1060 vm_object_t obj;
1061
1062 /*
1063 * Don't dump inaccessible mappings, deal with legacy
1064 * coredump mode.
1065 *
1066 * Note that read-only segments related to the elf binary
1067 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1068 * need to arbitrarily ignore such segments.
1069 */
1070 if (elf_legacy_coredump) {
1071 if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1072 continue;
1073 } else {
1074 if ((entry->protection & VM_PROT_ALL) == 0)
1075 continue;
1076 }
1077
1078 /*
1079 * Dont include memory segment in the coredump if
1080 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1081 * madvise(2). Do not dump submaps (i.e. parts of the
1082 * kernel map).
1083 */
1084 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1085 continue;
1086
1087 if ((obj = entry->object.vm_object) == NULL)
1088 continue;
1089
1090 /* Find the deepest backing object. */
1091 while (obj->backing_object != NULL)
1092 obj = obj->backing_object;
1093
1094 /* Ignore memory-mapped devices and such things. */
1095 if (obj->type != OBJT_DEFAULT &&
1096 obj->type != OBJT_SWAP &&
1097 obj->type != OBJT_VNODE)
1098 continue;
1099
1100 (*func)(entry, closure);
1101 }
1102}
1103
1104/*
1105 * Write the core file header to the file, including padding up to
1106 * the page boundary.
1107 */
1108static int
1109__elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
1110 struct thread *td;
1111 struct vnode *vp;
1112 struct ucred *cred;
1113 int numsegs;
1114 size_t hdrsize;
1115 void *hdr;
1116{
1117 size_t off;
1118
1119 /* Fill in the header. */
1120 bzero(hdr, hdrsize);
1121 off = 0;
1122 __elfN(puthdr)(td, hdr, &off, numsegs);
1123
1124 /* Write it to the core file. */
1125 return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
1126 UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1127 td)); /* XXXKSE */
1128}
1129
1130static void
1131__elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs)
1132{
1133 struct {
1134 prstatus_t status;
1135 prfpregset_t fpregset;
1136 prpsinfo_t psinfo;
1137 } *tempdata;
1138 prstatus_t *status;
1139 prfpregset_t *fpregset;
1140 prpsinfo_t *psinfo;
1141 struct proc *p;
1142 struct thread *thr;
1143 size_t ehoff, noteoff, notesz, phoff;
1144
1145 p = td->td_proc;
1146
1147 ehoff = *off;
1148 *off += sizeof(Elf_Ehdr);
1149
1150 phoff = *off;
1151 *off += (numsegs + 1) * sizeof(Elf_Phdr);
1152
1153 noteoff = *off;
1154 /*
1155 * Don't allocate space for the notes if we're just calculating
1156 * the size of the header. We also don't collect the data.
1157 */
1158 if (dst != NULL) {
1159 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK);
1160 status = &tempdata->status;
1161 fpregset = &tempdata->fpregset;
1162 psinfo = &tempdata->psinfo;
1163 } else {
1164 tempdata = NULL;
1165 status = NULL;
1166 fpregset = NULL;
1167 psinfo = NULL;
1168 }
1169
1170 if (dst != NULL) {
1171 psinfo->pr_version = PRPSINFO_VERSION;
1172 psinfo->pr_psinfosz = sizeof(prpsinfo_t);
1173 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1174 /*
1175 * XXX - We don't fill in the command line arguments properly
1176 * yet.
1177 */
1178 strlcpy(psinfo->pr_psargs, p->p_comm,
1179 sizeof(psinfo->pr_psargs));
1180 }
1181 __elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
1182 sizeof *psinfo);
1183
1184 /*
1185 * To have the debugger select the right thread (LWP) as the initial
1186 * thread, we dump the state of the thread passed to us in td first.
1187 * This is the thread that causes the core dump and thus likely to
1188 * be the right thread one wants to have selected in the debugger.
1189 */
1190 thr = td;
1191 while (thr != NULL) {
1192 if (dst != NULL) {
1193 status->pr_version = PRSTATUS_VERSION;
1194 status->pr_statussz = sizeof(prstatus_t);
1195 status->pr_gregsetsz = sizeof(gregset_t);
1196 status->pr_fpregsetsz = sizeof(fpregset_t);
1197 status->pr_osreldate = osreldate;
1198 status->pr_cursig = p->p_sig;
1199 status->pr_pid = thr->td_tid;
1200 fill_regs(thr, &status->pr_reg);
1201 fill_fpregs(thr, fpregset);
1202 }
1203 __elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
1204 sizeof *status);
1205 __elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
1206 sizeof *fpregset);
33
34#include <sys/param.h>
35#include <sys/exec.h>
36#include <sys/fcntl.h>
37#include <sys/imgact.h>
38#include <sys/imgact_elf.h>
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/malloc.h>
42#include <sys/mutex.h>
43#include <sys/mman.h>
44#include <sys/namei.h>
45#include <sys/pioctl.h>
46#include <sys/proc.h>
47#include <sys/procfs.h>
48#include <sys/resourcevar.h>
49#include <sys/systm.h>
50#include <sys/signalvar.h>
51#include <sys/stat.h>
52#include <sys/sx.h>
53#include <sys/syscall.h>
54#include <sys/sysctl.h>
55#include <sys/sysent.h>
56#include <sys/vnode.h>
57
58#include <vm/vm.h>
59#include <vm/vm_kern.h>
60#include <vm/vm_param.h>
61#include <vm/pmap.h>
62#include <vm/vm_map.h>
63#include <vm/vm_object.h>
64#include <vm/vm_extern.h>
65
66#include <machine/elf.h>
67#include <machine/md_var.h>
68
69#define OLD_EI_BRAND 8
70
71static int __elfN(check_header)(const Elf_Ehdr *hdr);
72static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr,
73 const char *interp);
74static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
75 u_long *entry, size_t pagesize);
76static int __elfN(load_section)(struct proc *p,
77 struct vmspace *vmspace, struct vnode *vp, vm_object_t object,
78 vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
79 vm_prot_t prot, size_t pagesize);
80static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
81
82SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
83 "");
84
85int __elfN(fallback_brand) = -1;
86SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
87 fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
88 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
89TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
90 &__elfN(fallback_brand));
91
92static int elf_trace = 0;
93SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, "");
94
95static int elf_legacy_coredump = 0;
96SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
97 &elf_legacy_coredump, 0, "");
98
99static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
100
101int
102__elfN(insert_brand_entry)(Elf_Brandinfo *entry)
103{
104 int i;
105
106 for (i = 0; i < MAX_BRANDS; i++) {
107 if (elf_brand_list[i] == NULL) {
108 elf_brand_list[i] = entry;
109 break;
110 }
111 }
112 if (i == MAX_BRANDS)
113 return (-1);
114 return (0);
115}
116
117int
118__elfN(remove_brand_entry)(Elf_Brandinfo *entry)
119{
120 int i;
121
122 for (i = 0; i < MAX_BRANDS; i++) {
123 if (elf_brand_list[i] == entry) {
124 elf_brand_list[i] = NULL;
125 break;
126 }
127 }
128 if (i == MAX_BRANDS)
129 return (-1);
130 return (0);
131}
132
133int
134__elfN(brand_inuse)(Elf_Brandinfo *entry)
135{
136 struct proc *p;
137 int rval = FALSE;
138
139 sx_slock(&allproc_lock);
140 LIST_FOREACH(p, &allproc, p_list) {
141 if (p->p_sysent == entry->sysvec) {
142 rval = TRUE;
143 break;
144 }
145 }
146 sx_sunlock(&allproc_lock);
147
148 return (rval);
149}
150
151static Elf_Brandinfo *
152__elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp)
153{
154 Elf_Brandinfo *bi;
155 int i;
156
157 /*
158 * We support three types of branding -- (1) the ELF EI_OSABI field
159 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
160 * branding w/in the ELF header, and (3) path of the `interp_path'
161 * field. We should also look for an ".note.ABI-tag" ELF section now
162 * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones.
163 */
164
165 /* If the executable has a brand, search for it in the brand list. */
166 for (i = 0; i < MAX_BRANDS; i++) {
167 bi = elf_brand_list[i];
168 if (bi != NULL && hdr->e_machine == bi->machine &&
169 (hdr->e_ident[EI_OSABI] == bi->brand ||
170 strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
171 bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
172 return (bi);
173 }
174
175 /* Lacking a known brand, search for a recognized interpreter. */
176 if (interp != NULL) {
177 for (i = 0; i < MAX_BRANDS; i++) {
178 bi = elf_brand_list[i];
179 if (bi != NULL && hdr->e_machine == bi->machine &&
180 strcmp(interp, bi->interp_path) == 0)
181 return (bi);
182 }
183 }
184
185 /* Lacking a recognized interpreter, try the default brand */
186 for (i = 0; i < MAX_BRANDS; i++) {
187 bi = elf_brand_list[i];
188 if (bi != NULL && hdr->e_machine == bi->machine &&
189 __elfN(fallback_brand) == bi->brand)
190 return (bi);
191 }
192 return (NULL);
193}
194
195static int
196__elfN(check_header)(const Elf_Ehdr *hdr)
197{
198 Elf_Brandinfo *bi;
199 int i;
200
201 if (!IS_ELF(*hdr) ||
202 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
203 hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
204 hdr->e_ident[EI_VERSION] != EV_CURRENT ||
205 hdr->e_phentsize != sizeof(Elf_Phdr) ||
206 hdr->e_version != ELF_TARG_VER)
207 return (ENOEXEC);
208
209 /*
210 * Make sure we have at least one brand for this machine.
211 */
212
213 for (i = 0; i < MAX_BRANDS; i++) {
214 bi = elf_brand_list[i];
215 if (bi != NULL && bi->machine == hdr->e_machine)
216 break;
217 }
218 if (i == MAX_BRANDS)
219 return (ENOEXEC);
220
221 return (0);
222}
223
224static int
225__elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
226 vm_offset_t start, vm_offset_t end, vm_prot_t prot,
227 vm_prot_t max)
228{
229 int error, rv;
230 vm_offset_t off;
231 vm_offset_t data_buf = 0;
232
233 /*
234 * Create the page if it doesn't exist yet. Ignore errors.
235 */
236 vm_map_lock(map);
237 vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end), max,
238 max, 0);
239 vm_map_unlock(map);
240
241 /*
242 * Find the page from the underlying object.
243 */
244 if (object) {
245 vm_object_reference(object);
246 rv = vm_map_find(exec_map,
247 object,
248 trunc_page(offset),
249 &data_buf,
250 PAGE_SIZE,
251 TRUE,
252 VM_PROT_READ,
253 VM_PROT_ALL,
254 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
255 if (rv != KERN_SUCCESS) {
256 vm_object_deallocate(object);
257 return (rv);
258 }
259
260 off = offset - trunc_page(offset);
261 error = copyout((caddr_t)data_buf + off, (caddr_t)start,
262 end - start);
263 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
264 if (error) {
265 return (KERN_FAILURE);
266 }
267 }
268
269 return (KERN_SUCCESS);
270}
271
272static int
273__elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
274 vm_offset_t start, vm_offset_t end, vm_prot_t prot,
275 vm_prot_t max, int cow)
276{
277 vm_offset_t data_buf, off;
278 vm_size_t sz;
279 int error, rv;
280
281 if (start != trunc_page(start)) {
282 rv = __elfN(map_partial)(map, object, offset, start,
283 round_page(start), prot, max);
284 if (rv)
285 return (rv);
286 offset += round_page(start) - start;
287 start = round_page(start);
288 }
289 if (end != round_page(end)) {
290 rv = __elfN(map_partial)(map, object, offset +
291 trunc_page(end) - start, trunc_page(end), end, prot, max);
292 if (rv)
293 return (rv);
294 end = trunc_page(end);
295 }
296 if (end > start) {
297 if (offset & PAGE_MASK) {
298 /*
299 * The mapping is not page aligned. This means we have
300 * to copy the data. Sigh.
301 */
302 rv = vm_map_find(map, 0, 0, &start, end - start,
303 FALSE, prot, max, 0);
304 if (rv)
305 return (rv);
306 data_buf = 0;
307 while (start < end) {
308 vm_object_reference(object);
309 rv = vm_map_find(exec_map,
310 object,
311 trunc_page(offset),
312 &data_buf,
313 2 * PAGE_SIZE,
314 TRUE,
315 VM_PROT_READ,
316 VM_PROT_ALL,
317 (MAP_COPY_ON_WRITE
318 | MAP_PREFAULT_PARTIAL));
319 if (rv != KERN_SUCCESS) {
320 vm_object_deallocate(object);
321 return (rv);
322 }
323 off = offset - trunc_page(offset);
324 sz = end - start;
325 if (sz > PAGE_SIZE)
326 sz = PAGE_SIZE;
327 error = copyout((caddr_t)data_buf + off,
328 (caddr_t)start, sz);
329 vm_map_remove(exec_map, data_buf,
330 data_buf + 2 * PAGE_SIZE);
331 if (error) {
332 return (KERN_FAILURE);
333 }
334 start += sz;
335 }
336 rv = KERN_SUCCESS;
337 } else {
338 vm_map_lock(map);
339 rv = vm_map_insert(map, object, offset, start, end,
340 prot, max, cow);
341 vm_map_unlock(map);
342 }
343 return (rv);
344 } else {
345 return (KERN_SUCCESS);
346 }
347}
348
349static int
350__elfN(load_section)(struct proc *p, struct vmspace *vmspace,
351 struct vnode *vp, vm_object_t object, vm_offset_t offset,
352 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
353 size_t pagesize)
354{
355 size_t map_len;
356 vm_offset_t map_addr;
357 int error, rv, cow;
358 size_t copy_len;
359 vm_offset_t file_addr;
360 vm_offset_t data_buf = 0;
361
362 GIANT_REQUIRED;
363
364 error = 0;
365
366 /*
367 * It's necessary to fail if the filsz + offset taken from the
368 * header is greater than the actual file pager object's size.
369 * If we were to allow this, then the vm_map_find() below would
370 * walk right off the end of the file object and into the ether.
371 *
372 * While I'm here, might as well check for something else that
373 * is invalid: filsz cannot be greater than memsz.
374 */
375 if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
376 filsz > memsz) {
377 uprintf("elf_load_section: truncated ELF file\n");
378 return (ENOEXEC);
379 }
380
381#define trunc_page_ps(va, ps) ((va) & ~(ps - 1))
382#define round_page_ps(va, ps) (((va) + (ps - 1)) & ~(ps - 1))
383
384 map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
385 file_addr = trunc_page_ps(offset, pagesize);
386
387 /*
388 * We have two choices. We can either clear the data in the last page
389 * of an oversized mapping, or we can start the anon mapping a page
390 * early and copy the initialized data into that first page. We
391 * choose the second..
392 */
393 if (memsz > filsz)
394 map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
395 else
396 map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
397
398 if (map_len != 0) {
399 vm_object_reference(object);
400
401 /* cow flags: don't dump readonly sections in core */
402 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
403 (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
404
405 rv = __elfN(map_insert)(&vmspace->vm_map,
406 object,
407 file_addr, /* file offset */
408 map_addr, /* virtual start */
409 map_addr + map_len,/* virtual end */
410 prot,
411 VM_PROT_ALL,
412 cow);
413 if (rv != KERN_SUCCESS) {
414 vm_object_deallocate(object);
415 return (EINVAL);
416 }
417
418 /* we can stop now if we've covered it all */
419 if (memsz == filsz) {
420 return (0);
421 }
422 }
423
424
425 /*
426 * We have to get the remaining bit of the file into the first part
427 * of the oversized map segment. This is normally because the .data
428 * segment in the file is extended to provide bss. It's a neat idea
429 * to try and save a page, but it's a pain in the behind to implement.
430 */
431 copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
432 map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
433 map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
434 map_addr;
435
436 /* This had damn well better be true! */
437 if (map_len != 0) {
438 rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
439 map_addr + map_len, VM_PROT_ALL, VM_PROT_ALL, 0);
440 if (rv != KERN_SUCCESS) {
441 return (EINVAL);
442 }
443 }
444
445 if (copy_len != 0) {
446 vm_offset_t off;
447 vm_object_reference(object);
448 rv = vm_map_find(exec_map,
449 object,
450 trunc_page(offset + filsz),
451 &data_buf,
452 PAGE_SIZE,
453 TRUE,
454 VM_PROT_READ,
455 VM_PROT_ALL,
456 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
457 if (rv != KERN_SUCCESS) {
458 vm_object_deallocate(object);
459 return (EINVAL);
460 }
461
462 /* send the page fragment to user space */
463 off = trunc_page_ps(offset + filsz, pagesize) -
464 trunc_page(offset + filsz);
465 error = copyout((caddr_t)data_buf + off, (caddr_t)map_addr,
466 copy_len);
467 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
468 if (error) {
469 return (error);
470 }
471 }
472
473 /*
474 * set it to the specified protection.
475 * XXX had better undo the damage from pasting over the cracks here!
476 */
477 vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
478 round_page(map_addr + map_len), prot, FALSE);
479
480 return (error);
481}
482
483/*
484 * Load the file "file" into memory. It may be either a shared object
485 * or an executable.
486 *
487 * The "addr" reference parameter is in/out. On entry, it specifies
488 * the address where a shared object should be loaded. If the file is
489 * an executable, this value is ignored. On exit, "addr" specifies
490 * where the file was actually loaded.
491 *
492 * The "entry" reference parameter is out only. On exit, it specifies
493 * the entry point for the loaded file.
494 */
495static int
496__elfN(load_file)(struct proc *p, const char *file, u_long *addr,
497 u_long *entry, size_t pagesize)
498{
499 struct {
500 struct nameidata nd;
501 struct vattr attr;
502 struct image_params image_params;
503 } *tempdata;
504 const Elf_Ehdr *hdr = NULL;
505 const Elf_Phdr *phdr = NULL;
506 struct nameidata *nd;
507 struct vmspace *vmspace = p->p_vmspace;
508 struct vattr *attr;
509 struct image_params *imgp;
510 vm_prot_t prot;
511 u_long rbase;
512 u_long base_addr = 0;
513 int error, i, numsegs;
514
515 if (curthread->td_proc != p)
516 panic("elf_load_file - thread"); /* XXXKSE DIAGNOSTIC */
517
518 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
519 nd = &tempdata->nd;
520 attr = &tempdata->attr;
521 imgp = &tempdata->image_params;
522
523 /*
524 * Initialize part of the common data
525 */
526 imgp->proc = p;
527 imgp->userspace_argv = NULL;
528 imgp->userspace_envv = NULL;
529 imgp->attr = attr;
530 imgp->firstpage = NULL;
531 imgp->image_header = NULL;
532 imgp->object = NULL;
533 imgp->execlabel = NULL;
534
535 /* XXXKSE */
536 NDINIT(nd, LOOKUP, LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, curthread);
537
538 if ((error = namei(nd)) != 0) {
539 nd->ni_vp = NULL;
540 goto fail;
541 }
542 NDFREE(nd, NDF_ONLY_PNBUF);
543 imgp->vp = nd->ni_vp;
544
545 /*
546 * Check permissions, modes, uid, etc on the file, and "open" it.
547 */
548 error = exec_check_permissions(imgp);
549 if (error) {
550 VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
551 goto fail;
552 }
553
554 error = exec_map_first_page(imgp);
555 /*
556 * Also make certain that the interpreter stays the same, so set
557 * its VV_TEXT flag, too.
558 */
559 if (error == 0)
560 nd->ni_vp->v_vflag |= VV_TEXT;
561
562 VOP_GETVOBJECT(nd->ni_vp, &imgp->object);
563 vm_object_reference(imgp->object);
564
565 VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
566 if (error)
567 goto fail;
568
569 hdr = (const Elf_Ehdr *)imgp->image_header;
570 if ((error = __elfN(check_header)(hdr)) != 0)
571 goto fail;
572 if (hdr->e_type == ET_DYN)
573 rbase = *addr;
574 else if (hdr->e_type == ET_EXEC)
575 rbase = 0;
576 else {
577 error = ENOEXEC;
578 goto fail;
579 }
580
581 /* Only support headers that fit within first page for now */
582 /* (multiplication of two Elf_Half fields will not overflow) */
583 if ((hdr->e_phoff > PAGE_SIZE) ||
584 (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
585 error = ENOEXEC;
586 goto fail;
587 }
588
589 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
590
591 for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
592 if (phdr[i].p_type == PT_LOAD) { /* Loadable segment */
593 prot = 0;
594 if (phdr[i].p_flags & PF_X)
595 prot |= VM_PROT_EXECUTE;
596 if (phdr[i].p_flags & PF_W)
597 prot |= VM_PROT_WRITE;
598 if (phdr[i].p_flags & PF_R)
599 prot |= VM_PROT_READ;
600
601 if ((error = __elfN(load_section)(p, vmspace,
602 nd->ni_vp, imgp->object, phdr[i].p_offset,
603 (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
604 phdr[i].p_memsz, phdr[i].p_filesz, prot,
605 pagesize)) != 0)
606 goto fail;
607 /*
608 * Establish the base address if this is the
609 * first segment.
610 */
611 if (numsegs == 0)
612 base_addr = trunc_page(phdr[i].p_vaddr +
613 rbase);
614 numsegs++;
615 }
616 }
617 *addr = base_addr;
618 *entry = (unsigned long)hdr->e_entry + rbase;
619
620fail:
621 if (imgp->firstpage)
622 exec_unmap_first_page(imgp);
623 if (imgp->object)
624 vm_object_deallocate(imgp->object);
625
626 if (nd->ni_vp)
627 vrele(nd->ni_vp);
628
629 free(tempdata, M_TEMP);
630
631 return (error);
632}
633
634static int
635__CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
636{
637 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
638 const Elf_Phdr *phdr;
639 Elf_Auxargs *elf_auxargs = NULL;
640 struct vmspace *vmspace;
641 vm_prot_t prot;
642 u_long text_size = 0, data_size = 0, total_size = 0;
643 u_long text_addr = 0, data_addr = 0;
644 u_long seg_size, seg_addr;
645 u_long addr, entry = 0, proghdr = 0;
646 int error, i;
647 const char *interp = NULL;
648 Elf_Brandinfo *brand_info;
649 char *path;
650 struct thread *td = curthread;
651 struct sysentvec *sv;
652
653 GIANT_REQUIRED;
654
655 /*
656 * Do we have a valid ELF header ?
657 */
658 if (__elfN(check_header)(hdr) != 0 || hdr->e_type != ET_EXEC)
659 return (-1);
660
661 /*
662 * From here on down, we return an errno, not -1, as we've
663 * detected an ELF file.
664 */
665
666 if ((hdr->e_phoff > PAGE_SIZE) ||
667 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
668 /* Only support headers in first page for now */
669 return (ENOEXEC);
670 }
671 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
672
673 /*
674 * From this point on, we may have resources that need to be freed.
675 */
676
677 VOP_UNLOCK(imgp->vp, 0, td);
678
679 for (i = 0; i < hdr->e_phnum; i++) {
680 switch (phdr[i].p_type) {
681 case PT_INTERP: /* Path to interpreter */
682 if (phdr[i].p_filesz > MAXPATHLEN ||
683 phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) {
684 error = ENOEXEC;
685 goto fail;
686 }
687 interp = imgp->image_header + phdr[i].p_offset;
688 break;
689 default:
690 break;
691 }
692 }
693
694 brand_info = __elfN(get_brandinfo)(hdr, interp);
695 if (brand_info == NULL) {
696 uprintf("ELF binary type \"%u\" not known.\n",
697 hdr->e_ident[EI_OSABI]);
698 error = ENOEXEC;
699 goto fail;
700 }
701 sv = brand_info->sysvec;
702 if (interp != NULL && brand_info->interp_newpath != NULL)
703 interp = brand_info->interp_newpath;
704
705 if ((error = exec_extract_strings(imgp)) != 0)
706 goto fail;
707
708 exec_new_vmspace(imgp, sv);
709
710 vmspace = imgp->proc->p_vmspace;
711
712 for (i = 0; i < hdr->e_phnum; i++) {
713 switch (phdr[i].p_type) {
714 case PT_LOAD: /* Loadable segment */
715 prot = 0;
716 if (phdr[i].p_flags & PF_X)
717 prot |= VM_PROT_EXECUTE;
718 if (phdr[i].p_flags & PF_W)
719 prot |= VM_PROT_WRITE;
720 if (phdr[i].p_flags & PF_R)
721 prot |= VM_PROT_READ;
722
723#if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
724 /*
725 * Some x86 binaries assume read == executable,
726 * notably the M3 runtime and therefore cvsup
727 */
728 if (prot & VM_PROT_READ)
729 prot |= VM_PROT_EXECUTE;
730#endif
731
732 if ((error = __elfN(load_section)(imgp->proc, vmspace,
733 imgp->vp, imgp->object, phdr[i].p_offset,
734 (caddr_t)(uintptr_t)phdr[i].p_vaddr,
735 phdr[i].p_memsz, phdr[i].p_filesz, prot,
736 sv->sv_pagesize)) != 0)
737 goto fail;
738
739 /*
740 * If this segment contains the program headers,
741 * remember their virtual address for the AT_PHDR
742 * aux entry. Static binaries don't usually include
743 * a PT_PHDR entry.
744 */
745 if (phdr[i].p_offset == 0 &&
746 hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
747 <= phdr[i].p_filesz)
748 proghdr = phdr[i].p_vaddr + hdr->e_phoff;
749
750 seg_addr = trunc_page(phdr[i].p_vaddr);
751 seg_size = round_page(phdr[i].p_memsz +
752 phdr[i].p_vaddr - seg_addr);
753
754 /*
755 * Is this .text or .data? We can't use
756 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
757 * alpha terribly and possibly does other bad
758 * things so we stick to the old way of figuring
759 * it out: If the segment contains the program
760 * entry point, it's a text segment, otherwise it
761 * is a data segment.
762 *
763 * Note that obreak() assumes that data_addr +
764 * data_size == end of data load area, and the ELF
765 * file format expects segments to be sorted by
766 * address. If multiple data segments exist, the
767 * last one will be used.
768 */
769 if (hdr->e_entry >= phdr[i].p_vaddr &&
770 hdr->e_entry < (phdr[i].p_vaddr +
771 phdr[i].p_memsz)) {
772 text_size = seg_size;
773 text_addr = seg_addr;
774 entry = (u_long)hdr->e_entry;
775 } else {
776 data_size = seg_size;
777 data_addr = seg_addr;
778 }
779 total_size += seg_size;
780 break;
781 case PT_PHDR: /* Program header table info */
782 proghdr = phdr[i].p_vaddr;
783 break;
784 default:
785 break;
786 }
787 }
788
789 if (data_addr == 0 && data_size == 0) {
790 data_addr = text_addr;
791 data_size = text_size;
792 }
793
794 /*
795 * Check limits. It should be safe to check the
796 * limits after loading the segments since we do
797 * not actually fault in all the segments pages.
798 */
799 PROC_LOCK(imgp->proc);
800 if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
801 text_size > maxtsiz ||
802 total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
803 PROC_UNLOCK(imgp->proc);
804 error = ENOMEM;
805 goto fail;
806 }
807
808 vmspace->vm_tsize = text_size >> PAGE_SHIFT;
809 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
810 vmspace->vm_dsize = data_size >> PAGE_SHIFT;
811 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
812
813 /*
814 * We load the dynamic linker where a userland call
815 * to mmap(0, ...) would put it. The rationale behind this
816 * calculation is that it leaves room for the heap to grow to
817 * its maximum allowed size.
818 */
819 addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
820 lim_max(imgp->proc, RLIMIT_DATA));
821 PROC_UNLOCK(imgp->proc);
822
823 imgp->entry_addr = entry;
824
825 imgp->proc->p_sysent = sv;
826 if (interp != NULL && brand_info->emul_path != NULL &&
827 brand_info->emul_path[0] != '\0') {
828 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
829 snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path,
830 interp);
831 error = __elfN(load_file)(imgp->proc, path, &addr,
832 &imgp->entry_addr, sv->sv_pagesize);
833 free(path, M_TEMP);
834 if (error == 0)
835 interp = NULL;
836 }
837 if (interp != NULL) {
838 error = __elfN(load_file)(imgp->proc, interp, &addr,
839 &imgp->entry_addr, sv->sv_pagesize);
840 if (error != 0) {
841 uprintf("ELF interpreter %s not found\n", interp);
842 goto fail;
843 }
844 }
845
846 /*
847 * Construct auxargs table (used by the fixup routine)
848 */
849 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
850 elf_auxargs->execfd = -1;
851 elf_auxargs->phdr = proghdr;
852 elf_auxargs->phent = hdr->e_phentsize;
853 elf_auxargs->phnum = hdr->e_phnum;
854 elf_auxargs->pagesz = PAGE_SIZE;
855 elf_auxargs->base = addr;
856 elf_auxargs->flags = 0;
857 elf_auxargs->entry = entry;
858 elf_auxargs->trace = elf_trace;
859
860 imgp->auxargs = elf_auxargs;
861 imgp->interpreted = 0;
862
863fail:
864 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
865 return (error);
866}
867
868#define suword __CONCAT(suword, __ELF_WORD_SIZE)
869
870int
871__elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
872{
873 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
874 Elf_Addr *base;
875 Elf_Addr *pos;
876
877 base = (Elf_Addr *)*stack_base;
878 pos = base + (imgp->argc + imgp->envc + 2);
879
880 if (args->trace) {
881 AUXARGS_ENTRY(pos, AT_DEBUG, 1);
882 }
883 if (args->execfd != -1) {
884 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
885 }
886 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
887 AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
888 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
889 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
890 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
891 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
892 AUXARGS_ENTRY(pos, AT_BASE, args->base);
893 AUXARGS_ENTRY(pos, AT_NULL, 0);
894
895 free(imgp->auxargs, M_TEMP);
896 imgp->auxargs = NULL;
897
898 base--;
899 suword(base, (long)imgp->argc);
900 *stack_base = (register_t *)base;
901 return (0);
902}
903
904/*
905 * Code for generating ELF core dumps.
906 */
907
908typedef void (*segment_callback)(vm_map_entry_t, void *);
909
910/* Closure for cb_put_phdr(). */
911struct phdr_closure {
912 Elf_Phdr *phdr; /* Program header to fill in */
913 Elf_Off offset; /* Offset of segment in core file */
914};
915
916/* Closure for cb_size_segment(). */
917struct sseg_closure {
918 int count; /* Count of writable segments. */
919 size_t size; /* Total size of all writable segments. */
920};
921
922static void cb_put_phdr(vm_map_entry_t, void *);
923static void cb_size_segment(vm_map_entry_t, void *);
924static void each_writable_segment(struct thread *, segment_callback, void *);
925static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
926 int, void *, size_t);
927static void __elfN(puthdr)(struct thread *, void *, size_t *, int);
928static void __elfN(putnote)(void *, size_t *, const char *, int,
929 const void *, size_t);
930
931extern int osreldate;
932
933int
934__elfN(coredump)(td, vp, limit)
935 struct thread *td;
936 struct vnode *vp;
937 off_t limit;
938{
939 struct ucred *cred = td->td_ucred;
940 int error = 0;
941 struct sseg_closure seginfo;
942 void *hdr;
943 size_t hdrsize;
944
945 /* Size the program segments. */
946 seginfo.count = 0;
947 seginfo.size = 0;
948 each_writable_segment(td, cb_size_segment, &seginfo);
949
950 /*
951 * Calculate the size of the core file header area by making
952 * a dry run of generating it. Nothing is written, but the
953 * size is calculated.
954 */
955 hdrsize = 0;
956 __elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count);
957
958 if (hdrsize + seginfo.size >= limit)
959 return (EFAULT);
960
961 /*
962 * Allocate memory for building the header, fill it up,
963 * and write it out.
964 */
965 hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
966 if (hdr == NULL) {
967 return (EINVAL);
968 }
969 error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
970
971 /* Write the contents of all of the writable segments. */
972 if (error == 0) {
973 Elf_Phdr *php;
974 off_t offset;
975 int i;
976
977 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
978 offset = hdrsize;
979 for (i = 0; i < seginfo.count; i++) {
980 error = vn_rdwr_inchunks(UIO_WRITE, vp,
981 (caddr_t)(uintptr_t)php->p_vaddr,
982 php->p_filesz, offset, UIO_USERSPACE,
983 IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
984 curthread); /* XXXKSE */
985 if (error != 0)
986 break;
987 offset += php->p_filesz;
988 php++;
989 }
990 }
991 free(hdr, M_TEMP);
992
993 return (error);
994}
995
996/*
997 * A callback for each_writable_segment() to write out the segment's
998 * program header entry.
999 */
1000static void
1001cb_put_phdr(entry, closure)
1002 vm_map_entry_t entry;
1003 void *closure;
1004{
1005 struct phdr_closure *phc = (struct phdr_closure *)closure;
1006 Elf_Phdr *phdr = phc->phdr;
1007
1008 phc->offset = round_page(phc->offset);
1009
1010 phdr->p_type = PT_LOAD;
1011 phdr->p_offset = phc->offset;
1012 phdr->p_vaddr = entry->start;
1013 phdr->p_paddr = 0;
1014 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1015 phdr->p_align = PAGE_SIZE;
1016 phdr->p_flags = 0;
1017 if (entry->protection & VM_PROT_READ)
1018 phdr->p_flags |= PF_R;
1019 if (entry->protection & VM_PROT_WRITE)
1020 phdr->p_flags |= PF_W;
1021 if (entry->protection & VM_PROT_EXECUTE)
1022 phdr->p_flags |= PF_X;
1023
1024 phc->offset += phdr->p_filesz;
1025 phc->phdr++;
1026}
1027
1028/*
1029 * A callback for each_writable_segment() to gather information about
1030 * the number of segments and their total size.
1031 */
1032static void
1033cb_size_segment(entry, closure)
1034 vm_map_entry_t entry;
1035 void *closure;
1036{
1037 struct sseg_closure *ssc = (struct sseg_closure *)closure;
1038
1039 ssc->count++;
1040 ssc->size += entry->end - entry->start;
1041}
1042
1043/*
1044 * For each writable segment in the process's memory map, call the given
1045 * function with a pointer to the map entry and some arbitrary
1046 * caller-supplied data.
1047 */
1048static void
1049each_writable_segment(td, func, closure)
1050 struct thread *td;
1051 segment_callback func;
1052 void *closure;
1053{
1054 struct proc *p = td->td_proc;
1055 vm_map_t map = &p->p_vmspace->vm_map;
1056 vm_map_entry_t entry;
1057
1058 for (entry = map->header.next; entry != &map->header;
1059 entry = entry->next) {
1060 vm_object_t obj;
1061
1062 /*
1063 * Don't dump inaccessible mappings, deal with legacy
1064 * coredump mode.
1065 *
1066 * Note that read-only segments related to the elf binary
1067 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1068 * need to arbitrarily ignore such segments.
1069 */
1070 if (elf_legacy_coredump) {
1071 if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1072 continue;
1073 } else {
1074 if ((entry->protection & VM_PROT_ALL) == 0)
1075 continue;
1076 }
1077
1078 /*
1079 * Dont include memory segment in the coredump if
1080 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1081 * madvise(2). Do not dump submaps (i.e. parts of the
1082 * kernel map).
1083 */
1084 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1085 continue;
1086
1087 if ((obj = entry->object.vm_object) == NULL)
1088 continue;
1089
1090 /* Find the deepest backing object. */
1091 while (obj->backing_object != NULL)
1092 obj = obj->backing_object;
1093
1094 /* Ignore memory-mapped devices and such things. */
1095 if (obj->type != OBJT_DEFAULT &&
1096 obj->type != OBJT_SWAP &&
1097 obj->type != OBJT_VNODE)
1098 continue;
1099
1100 (*func)(entry, closure);
1101 }
1102}
1103
1104/*
1105 * Write the core file header to the file, including padding up to
1106 * the page boundary.
1107 */
1108static int
1109__elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
1110 struct thread *td;
1111 struct vnode *vp;
1112 struct ucred *cred;
1113 int numsegs;
1114 size_t hdrsize;
1115 void *hdr;
1116{
1117 size_t off;
1118
1119 /* Fill in the header. */
1120 bzero(hdr, hdrsize);
1121 off = 0;
1122 __elfN(puthdr)(td, hdr, &off, numsegs);
1123
1124 /* Write it to the core file. */
1125 return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
1126 UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1127 td)); /* XXXKSE */
1128}
1129
1130static void
1131__elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs)
1132{
1133 struct {
1134 prstatus_t status;
1135 prfpregset_t fpregset;
1136 prpsinfo_t psinfo;
1137 } *tempdata;
1138 prstatus_t *status;
1139 prfpregset_t *fpregset;
1140 prpsinfo_t *psinfo;
1141 struct proc *p;
1142 struct thread *thr;
1143 size_t ehoff, noteoff, notesz, phoff;
1144
1145 p = td->td_proc;
1146
1147 ehoff = *off;
1148 *off += sizeof(Elf_Ehdr);
1149
1150 phoff = *off;
1151 *off += (numsegs + 1) * sizeof(Elf_Phdr);
1152
1153 noteoff = *off;
1154 /*
1155 * Don't allocate space for the notes if we're just calculating
1156 * the size of the header. We also don't collect the data.
1157 */
1158 if (dst != NULL) {
1159 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK);
1160 status = &tempdata->status;
1161 fpregset = &tempdata->fpregset;
1162 psinfo = &tempdata->psinfo;
1163 } else {
1164 tempdata = NULL;
1165 status = NULL;
1166 fpregset = NULL;
1167 psinfo = NULL;
1168 }
1169
1170 if (dst != NULL) {
1171 psinfo->pr_version = PRPSINFO_VERSION;
1172 psinfo->pr_psinfosz = sizeof(prpsinfo_t);
1173 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1174 /*
1175 * XXX - We don't fill in the command line arguments properly
1176 * yet.
1177 */
1178 strlcpy(psinfo->pr_psargs, p->p_comm,
1179 sizeof(psinfo->pr_psargs));
1180 }
1181 __elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
1182 sizeof *psinfo);
1183
1184 /*
1185 * To have the debugger select the right thread (LWP) as the initial
1186 * thread, we dump the state of the thread passed to us in td first.
1187 * This is the thread that causes the core dump and thus likely to
1188 * be the right thread one wants to have selected in the debugger.
1189 */
1190 thr = td;
1191 while (thr != NULL) {
1192 if (dst != NULL) {
1193 status->pr_version = PRSTATUS_VERSION;
1194 status->pr_statussz = sizeof(prstatus_t);
1195 status->pr_gregsetsz = sizeof(gregset_t);
1196 status->pr_fpregsetsz = sizeof(fpregset_t);
1197 status->pr_osreldate = osreldate;
1198 status->pr_cursig = p->p_sig;
1199 status->pr_pid = thr->td_tid;
1200 fill_regs(thr, &status->pr_reg);
1201 fill_fpregs(thr, fpregset);
1202 }
1203 __elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
1204 sizeof *status);
1205 __elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
1206 sizeof *fpregset);
1207 /*
1208 * Allow for MD specific notes, as well as any MD
1209 * specific preparations for writing MI notes.
1210 */
1211 __elfN(dump_thread)(thr, dst, off);
1207
1212
1208 /* XXX allow for MD specific notes. */
1209
1210 thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
1211 TAILQ_NEXT(thr, td_plist);
1212 if (thr == td)
1213 thr = TAILQ_NEXT(thr, td_plist);
1214 }
1215
1216 notesz = *off - noteoff;
1217
1218 if (dst != NULL)
1219 free(tempdata, M_TEMP);
1220
1221 /* Align up to a page boundary for the program segments. */
1222 *off = round_page(*off);
1223
1224 if (dst != NULL) {
1225 Elf_Ehdr *ehdr;
1226 Elf_Phdr *phdr;
1227 struct phdr_closure phc;
1228
1229 /*
1230 * Fill in the ELF header.
1231 */
1232 ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
1233 ehdr->e_ident[EI_MAG0] = ELFMAG0;
1234 ehdr->e_ident[EI_MAG1] = ELFMAG1;
1235 ehdr->e_ident[EI_MAG2] = ELFMAG2;
1236 ehdr->e_ident[EI_MAG3] = ELFMAG3;
1237 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1238 ehdr->e_ident[EI_DATA] = ELF_DATA;
1239 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1240 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1241 ehdr->e_ident[EI_ABIVERSION] = 0;
1242 ehdr->e_ident[EI_PAD] = 0;
1243 ehdr->e_type = ET_CORE;
1244 ehdr->e_machine = ELF_ARCH;
1245 ehdr->e_version = EV_CURRENT;
1246 ehdr->e_entry = 0;
1247 ehdr->e_phoff = phoff;
1248 ehdr->e_flags = 0;
1249 ehdr->e_ehsize = sizeof(Elf_Ehdr);
1250 ehdr->e_phentsize = sizeof(Elf_Phdr);
1251 ehdr->e_phnum = numsegs + 1;
1252 ehdr->e_shentsize = sizeof(Elf_Shdr);
1253 ehdr->e_shnum = 0;
1254 ehdr->e_shstrndx = SHN_UNDEF;
1255
1256 /*
1257 * Fill in the program header entries.
1258 */
1259 phdr = (Elf_Phdr *)((char *)dst + phoff);
1260
1261 /* The note segement. */
1262 phdr->p_type = PT_NOTE;
1263 phdr->p_offset = noteoff;
1264 phdr->p_vaddr = 0;
1265 phdr->p_paddr = 0;
1266 phdr->p_filesz = notesz;
1267 phdr->p_memsz = 0;
1268 phdr->p_flags = 0;
1269 phdr->p_align = 0;
1270 phdr++;
1271
1272 /* All the writable segments from the program. */
1273 phc.phdr = phdr;
1274 phc.offset = *off;
1275 each_writable_segment(td, cb_put_phdr, &phc);
1276 }
1277}
1278
1279static void
1280__elfN(putnote)(void *dst, size_t *off, const char *name, int type,
1281 const void *desc, size_t descsz)
1282{
1283 Elf_Note note;
1284
1285 note.n_namesz = strlen(name) + 1;
1286 note.n_descsz = descsz;
1287 note.n_type = type;
1288 if (dst != NULL)
1289 bcopy(&note, (char *)dst + *off, sizeof note);
1290 *off += sizeof note;
1291 if (dst != NULL)
1292 bcopy(name, (char *)dst + *off, note.n_namesz);
1293 *off += roundup2(note.n_namesz, sizeof(Elf_Size));
1294 if (dst != NULL)
1295 bcopy(desc, (char *)dst + *off, note.n_descsz);
1296 *off += roundup2(note.n_descsz, sizeof(Elf_Size));
1297}
1298
1299/*
1300 * Tell kern_execve.c about it, with a little help from the linker.
1301 */
1302static struct execsw __elfN(execsw) = {
1303 __CONCAT(exec_, __elfN(imgact)),
1304 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
1305};
1306EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
1213 thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
1214 TAILQ_NEXT(thr, td_plist);
1215 if (thr == td)
1216 thr = TAILQ_NEXT(thr, td_plist);
1217 }
1218
1219 notesz = *off - noteoff;
1220
1221 if (dst != NULL)
1222 free(tempdata, M_TEMP);
1223
1224 /* Align up to a page boundary for the program segments. */
1225 *off = round_page(*off);
1226
1227 if (dst != NULL) {
1228 Elf_Ehdr *ehdr;
1229 Elf_Phdr *phdr;
1230 struct phdr_closure phc;
1231
1232 /*
1233 * Fill in the ELF header.
1234 */
1235 ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
1236 ehdr->e_ident[EI_MAG0] = ELFMAG0;
1237 ehdr->e_ident[EI_MAG1] = ELFMAG1;
1238 ehdr->e_ident[EI_MAG2] = ELFMAG2;
1239 ehdr->e_ident[EI_MAG3] = ELFMAG3;
1240 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1241 ehdr->e_ident[EI_DATA] = ELF_DATA;
1242 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1243 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1244 ehdr->e_ident[EI_ABIVERSION] = 0;
1245 ehdr->e_ident[EI_PAD] = 0;
1246 ehdr->e_type = ET_CORE;
1247 ehdr->e_machine = ELF_ARCH;
1248 ehdr->e_version = EV_CURRENT;
1249 ehdr->e_entry = 0;
1250 ehdr->e_phoff = phoff;
1251 ehdr->e_flags = 0;
1252 ehdr->e_ehsize = sizeof(Elf_Ehdr);
1253 ehdr->e_phentsize = sizeof(Elf_Phdr);
1254 ehdr->e_phnum = numsegs + 1;
1255 ehdr->e_shentsize = sizeof(Elf_Shdr);
1256 ehdr->e_shnum = 0;
1257 ehdr->e_shstrndx = SHN_UNDEF;
1258
1259 /*
1260 * Fill in the program header entries.
1261 */
1262 phdr = (Elf_Phdr *)((char *)dst + phoff);
1263
1264 /* The note segement. */
1265 phdr->p_type = PT_NOTE;
1266 phdr->p_offset = noteoff;
1267 phdr->p_vaddr = 0;
1268 phdr->p_paddr = 0;
1269 phdr->p_filesz = notesz;
1270 phdr->p_memsz = 0;
1271 phdr->p_flags = 0;
1272 phdr->p_align = 0;
1273 phdr++;
1274
1275 /* All the writable segments from the program. */
1276 phc.phdr = phdr;
1277 phc.offset = *off;
1278 each_writable_segment(td, cb_put_phdr, &phc);
1279 }
1280}
1281
1282static void
1283__elfN(putnote)(void *dst, size_t *off, const char *name, int type,
1284 const void *desc, size_t descsz)
1285{
1286 Elf_Note note;
1287
1288 note.n_namesz = strlen(name) + 1;
1289 note.n_descsz = descsz;
1290 note.n_type = type;
1291 if (dst != NULL)
1292 bcopy(&note, (char *)dst + *off, sizeof note);
1293 *off += sizeof note;
1294 if (dst != NULL)
1295 bcopy(name, (char *)dst + *off, note.n_namesz);
1296 *off += roundup2(note.n_namesz, sizeof(Elf_Size));
1297 if (dst != NULL)
1298 bcopy(desc, (char *)dst + *off, note.n_descsz);
1299 *off += roundup2(note.n_descsz, sizeof(Elf_Size));
1300}
1301
1302/*
1303 * Tell kern_execve.c about it, with a little help from the linker.
1304 */
1305static struct execsw __elfN(execsw) = {
1306 __CONCAT(exec_, __elfN(imgact)),
1307 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
1308};
1309EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));