Deleted Added
full compact
map_object.c (225699) map_object.c (230784)
1/*-
2 * Copyright 1996-1998 John D. Polstra.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
1/*-
2 * Copyright 1996-1998 John D. Polstra.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 * $FreeBSD: head/libexec/rtld-elf/map_object.c 225699 2011-09-20 21:49:54Z kib $
25 * $FreeBSD: head/libexec/rtld-elf/map_object.c 230784 2012-01-30 19:52:17Z kib $
26 */
27
28#include <sys/param.h>
29#include <sys/mman.h>
30#include <sys/stat.h>
31
32#include <errno.h>
33#include <stddef.h>
34#include <stdlib.h>
35#include <string.h>
36#include <unistd.h>
37
38#include "debug.h"
39#include "rtld.h"
40
41static Elf_Ehdr *get_elf_header (int, const char *);
42static int convert_prot(int); /* Elf flags -> mmap protection */
43static int convert_flags(int); /* Elf flags -> mmap flags */
44
45/*
46 * Map a shared object into memory. The "fd" argument is a file descriptor,
47 * which must be open on the object and positioned at its beginning.
48 * The "path" argument is a pathname that is used only for error messages.
49 *
50 * The return value is a pointer to a newly-allocated Obj_Entry structure
51 * for the shared object. Returns NULL on failure.
52 */
53Obj_Entry *
54map_object(int fd, const char *path, const struct stat *sb)
55{
56 Obj_Entry *obj;
57 Elf_Ehdr *hdr;
58 int i;
59 Elf_Phdr *phdr;
60 Elf_Phdr *phlimit;
61 Elf_Phdr **segs;
62 int nsegs;
63 Elf_Phdr *phdyn;
64 Elf_Phdr *phinterp;
65 Elf_Phdr *phtls;
66 caddr_t mapbase;
67 size_t mapsize;
68 Elf_Off base_offset;
69 Elf_Addr base_vaddr;
70 Elf_Addr base_vlimit;
71 caddr_t base_addr;
72 Elf_Off data_offset;
73 Elf_Addr data_vaddr;
74 Elf_Addr data_vlimit;
75 caddr_t data_addr;
76 int data_prot;
77 int data_flags;
78 Elf_Addr clear_vaddr;
79 caddr_t clear_addr;
80 caddr_t clear_page;
81 Elf_Addr phdr_vaddr;
82 size_t nclear, phsize;
83 Elf_Addr bss_vaddr;
84 Elf_Addr bss_vlimit;
85 caddr_t bss_addr;
86 Elf_Word stack_flags;
26 */
27
28#include <sys/param.h>
29#include <sys/mman.h>
30#include <sys/stat.h>
31
32#include <errno.h>
33#include <stddef.h>
34#include <stdlib.h>
35#include <string.h>
36#include <unistd.h>
37
38#include "debug.h"
39#include "rtld.h"
40
41static Elf_Ehdr *get_elf_header (int, const char *);
42static int convert_prot(int); /* Elf flags -> mmap protection */
43static int convert_flags(int); /* Elf flags -> mmap flags */
44
45/*
46 * Map a shared object into memory. The "fd" argument is a file descriptor,
47 * which must be open on the object and positioned at its beginning.
48 * The "path" argument is a pathname that is used only for error messages.
49 *
50 * The return value is a pointer to a newly-allocated Obj_Entry structure
51 * for the shared object. Returns NULL on failure.
52 */
53Obj_Entry *
54map_object(int fd, const char *path, const struct stat *sb)
55{
56 Obj_Entry *obj;
57 Elf_Ehdr *hdr;
58 int i;
59 Elf_Phdr *phdr;
60 Elf_Phdr *phlimit;
61 Elf_Phdr **segs;
62 int nsegs;
63 Elf_Phdr *phdyn;
64 Elf_Phdr *phinterp;
65 Elf_Phdr *phtls;
66 caddr_t mapbase;
67 size_t mapsize;
68 Elf_Off base_offset;
69 Elf_Addr base_vaddr;
70 Elf_Addr base_vlimit;
71 caddr_t base_addr;
72 Elf_Off data_offset;
73 Elf_Addr data_vaddr;
74 Elf_Addr data_vlimit;
75 caddr_t data_addr;
76 int data_prot;
77 int data_flags;
78 Elf_Addr clear_vaddr;
79 caddr_t clear_addr;
80 caddr_t clear_page;
81 Elf_Addr phdr_vaddr;
82 size_t nclear, phsize;
83 Elf_Addr bss_vaddr;
84 Elf_Addr bss_vlimit;
85 caddr_t bss_addr;
86 Elf_Word stack_flags;
87 Elf_Addr relro_page;
88 size_t relro_size;
87
88 hdr = get_elf_header(fd, path);
89 if (hdr == NULL)
90 return (NULL);
91
92 /*
93 * Scan the program header entries, and save key information.
94 *
95 * We expect that the loadable segments are ordered by load address.
96 */
97 phdr = (Elf_Phdr *) ((char *)hdr + hdr->e_phoff);
98 phsize = hdr->e_phnum * sizeof (phdr[0]);
99 phlimit = phdr + hdr->e_phnum;
100 nsegs = -1;
101 phdyn = phinterp = phtls = NULL;
102 phdr_vaddr = 0;
89
90 hdr = get_elf_header(fd, path);
91 if (hdr == NULL)
92 return (NULL);
93
94 /*
95 * Scan the program header entries, and save key information.
96 *
97 * We expect that the loadable segments are ordered by load address.
98 */
99 phdr = (Elf_Phdr *) ((char *)hdr + hdr->e_phoff);
100 phsize = hdr->e_phnum * sizeof (phdr[0]);
101 phlimit = phdr + hdr->e_phnum;
102 nsegs = -1;
103 phdyn = phinterp = phtls = NULL;
104 phdr_vaddr = 0;
105 relro_page = 0;
106 relro_size = 0;
103 segs = alloca(sizeof(segs[0]) * hdr->e_phnum);
104 stack_flags = RTLD_DEFAULT_STACK_PF_EXEC | PF_R | PF_W;
105 while (phdr < phlimit) {
106 switch (phdr->p_type) {
107
108 case PT_INTERP:
109 phinterp = phdr;
110 break;
111
112 case PT_LOAD:
113 segs[++nsegs] = phdr;
114 if ((segs[nsegs]->p_align & (PAGE_SIZE - 1)) != 0) {
115 _rtld_error("%s: PT_LOAD segment %d not page-aligned",
116 path, nsegs);
117 return NULL;
118 }
119 break;
120
121 case PT_PHDR:
122 phdr_vaddr = phdr->p_vaddr;
123 phsize = phdr->p_memsz;
124 break;
125
126 case PT_DYNAMIC:
127 phdyn = phdr;
128 break;
129
130 case PT_TLS:
131 phtls = phdr;
132 break;
133
134 case PT_GNU_STACK:
135 stack_flags = phdr->p_flags;
136 break;
107 segs = alloca(sizeof(segs[0]) * hdr->e_phnum);
108 stack_flags = RTLD_DEFAULT_STACK_PF_EXEC | PF_R | PF_W;
109 while (phdr < phlimit) {
110 switch (phdr->p_type) {
111
112 case PT_INTERP:
113 phinterp = phdr;
114 break;
115
116 case PT_LOAD:
117 segs[++nsegs] = phdr;
118 if ((segs[nsegs]->p_align & (PAGE_SIZE - 1)) != 0) {
119 _rtld_error("%s: PT_LOAD segment %d not page-aligned",
120 path, nsegs);
121 return NULL;
122 }
123 break;
124
125 case PT_PHDR:
126 phdr_vaddr = phdr->p_vaddr;
127 phsize = phdr->p_memsz;
128 break;
129
130 case PT_DYNAMIC:
131 phdyn = phdr;
132 break;
133
134 case PT_TLS:
135 phtls = phdr;
136 break;
137
138 case PT_GNU_STACK:
139 stack_flags = phdr->p_flags;
140 break;
141
142 case PT_GNU_RELRO:
143 relro_page = phdr->p_vaddr;
144 relro_size = phdr->p_memsz;
145 break;
137 }
138
139 ++phdr;
140 }
141 if (phdyn == NULL) {
142 _rtld_error("%s: object is not dynamically-linked", path);
143 return NULL;
144 }
145
146 if (nsegs < 0) {
147 _rtld_error("%s: too few PT_LOAD segments", path);
148 return NULL;
149 }
150
151 /*
152 * Map the entire address space of the object, to stake out our
153 * contiguous region, and to establish the base address for relocation.
154 */
155 base_offset = trunc_page(segs[0]->p_offset);
156 base_vaddr = trunc_page(segs[0]->p_vaddr);
157 base_vlimit = round_page(segs[nsegs]->p_vaddr + segs[nsegs]->p_memsz);
158 mapsize = base_vlimit - base_vaddr;
159 base_addr = hdr->e_type == ET_EXEC ? (caddr_t) base_vaddr : NULL;
160
161 mapbase = mmap(base_addr, mapsize, PROT_NONE, MAP_ANON | MAP_PRIVATE |
162 MAP_NOCORE, -1, 0);
163 if (mapbase == (caddr_t) -1) {
164 _rtld_error("%s: mmap of entire address space failed: %s",
165 path, strerror(errno));
166 return NULL;
167 }
168 if (base_addr != NULL && mapbase != base_addr) {
169 _rtld_error("%s: mmap returned wrong address: wanted %p, got %p",
170 path, base_addr, mapbase);
171 munmap(mapbase, mapsize);
172 return NULL;
173 }
174
175 for (i = 0; i <= nsegs; i++) {
176 /* Overlay the segment onto the proper region. */
177 data_offset = trunc_page(segs[i]->p_offset);
178 data_vaddr = trunc_page(segs[i]->p_vaddr);
179 data_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_filesz);
180 data_addr = mapbase + (data_vaddr - base_vaddr);
181 data_prot = convert_prot(segs[i]->p_flags);
182 data_flags = convert_flags(segs[i]->p_flags) | MAP_FIXED;
183 if (mmap(data_addr, data_vlimit - data_vaddr, data_prot,
184 data_flags, fd, data_offset) == (caddr_t) -1) {
185 _rtld_error("%s: mmap of data failed: %s", path, strerror(errno));
186 return NULL;
187 }
188
189 /* Do BSS setup */
190 if (segs[i]->p_filesz != segs[i]->p_memsz) {
191
192 /* Clear any BSS in the last page of the segment. */
193 clear_vaddr = segs[i]->p_vaddr + segs[i]->p_filesz;
194 clear_addr = mapbase + (clear_vaddr - base_vaddr);
195 clear_page = mapbase + (trunc_page(clear_vaddr) - base_vaddr);
196
197 if ((nclear = data_vlimit - clear_vaddr) > 0) {
198 /* Make sure the end of the segment is writable */
199 if ((data_prot & PROT_WRITE) == 0 && -1 ==
200 mprotect(clear_page, PAGE_SIZE, data_prot|PROT_WRITE)) {
201 _rtld_error("%s: mprotect failed: %s", path,
202 strerror(errno));
203 return NULL;
204 }
205
206 memset(clear_addr, 0, nclear);
207
208 /* Reset the data protection back */
209 if ((data_prot & PROT_WRITE) == 0)
210 mprotect(clear_page, PAGE_SIZE, data_prot);
211 }
212
213 /* Overlay the BSS segment onto the proper region. */
214 bss_vaddr = data_vlimit;
215 bss_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_memsz);
216 bss_addr = mapbase + (bss_vaddr - base_vaddr);
217 if (bss_vlimit > bss_vaddr) { /* There is something to do */
218 if (mmap(bss_addr, bss_vlimit - bss_vaddr, data_prot,
219 data_flags | MAP_ANON, -1, 0) == (caddr_t)-1) {
220 _rtld_error("%s: mmap of bss failed: %s", path,
221 strerror(errno));
222 return NULL;
223 }
224 }
225 }
226
227 if (phdr_vaddr == 0 && data_offset <= hdr->e_phoff &&
228 (data_vlimit - data_vaddr + data_offset) >=
229 (hdr->e_phoff + hdr->e_phnum * sizeof (Elf_Phdr))) {
230 phdr_vaddr = data_vaddr + hdr->e_phoff - data_offset;
231 }
232 }
233
234 obj = obj_new();
235 if (sb != NULL) {
236 obj->dev = sb->st_dev;
237 obj->ino = sb->st_ino;
238 }
239 obj->mapbase = mapbase;
240 obj->mapsize = mapsize;
241 obj->textsize = round_page(segs[0]->p_vaddr + segs[0]->p_memsz) -
242 base_vaddr;
243 obj->vaddrbase = base_vaddr;
244 obj->relocbase = mapbase - base_vaddr;
245 obj->dynamic = (const Elf_Dyn *) (obj->relocbase + phdyn->p_vaddr);
246 if (hdr->e_entry != 0)
247 obj->entry = (caddr_t) (obj->relocbase + hdr->e_entry);
248 if (phdr_vaddr != 0) {
249 obj->phdr = (const Elf_Phdr *) (obj->relocbase + phdr_vaddr);
250 } else {
251 obj->phdr = malloc(phsize);
252 if (obj->phdr == NULL) {
253 obj_free(obj);
254 _rtld_error("%s: cannot allocate program header", path);
255 return NULL;
256 }
257 memcpy((char *)obj->phdr, (char *)hdr + hdr->e_phoff, phsize);
258 obj->phdr_alloc = true;
259 }
260 obj->phsize = phsize;
261 if (phinterp != NULL)
262 obj->interp = (const char *) (obj->relocbase + phinterp->p_vaddr);
263 if (phtls != NULL) {
264 tls_dtv_generation++;
265 obj->tlsindex = ++tls_max_index;
266 obj->tlssize = phtls->p_memsz;
267 obj->tlsalign = phtls->p_align;
268 obj->tlsinitsize = phtls->p_filesz;
269 obj->tlsinit = mapbase + phtls->p_vaddr;
270 }
271 obj->stack_flags = stack_flags;
146 }
147
148 ++phdr;
149 }
150 if (phdyn == NULL) {
151 _rtld_error("%s: object is not dynamically-linked", path);
152 return NULL;
153 }
154
155 if (nsegs < 0) {
156 _rtld_error("%s: too few PT_LOAD segments", path);
157 return NULL;
158 }
159
160 /*
161 * Map the entire address space of the object, to stake out our
162 * contiguous region, and to establish the base address for relocation.
163 */
164 base_offset = trunc_page(segs[0]->p_offset);
165 base_vaddr = trunc_page(segs[0]->p_vaddr);
166 base_vlimit = round_page(segs[nsegs]->p_vaddr + segs[nsegs]->p_memsz);
167 mapsize = base_vlimit - base_vaddr;
168 base_addr = hdr->e_type == ET_EXEC ? (caddr_t) base_vaddr : NULL;
169
170 mapbase = mmap(base_addr, mapsize, PROT_NONE, MAP_ANON | MAP_PRIVATE |
171 MAP_NOCORE, -1, 0);
172 if (mapbase == (caddr_t) -1) {
173 _rtld_error("%s: mmap of entire address space failed: %s",
174 path, strerror(errno));
175 return NULL;
176 }
177 if (base_addr != NULL && mapbase != base_addr) {
178 _rtld_error("%s: mmap returned wrong address: wanted %p, got %p",
179 path, base_addr, mapbase);
180 munmap(mapbase, mapsize);
181 return NULL;
182 }
183
184 for (i = 0; i <= nsegs; i++) {
185 /* Overlay the segment onto the proper region. */
186 data_offset = trunc_page(segs[i]->p_offset);
187 data_vaddr = trunc_page(segs[i]->p_vaddr);
188 data_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_filesz);
189 data_addr = mapbase + (data_vaddr - base_vaddr);
190 data_prot = convert_prot(segs[i]->p_flags);
191 data_flags = convert_flags(segs[i]->p_flags) | MAP_FIXED;
192 if (mmap(data_addr, data_vlimit - data_vaddr, data_prot,
193 data_flags, fd, data_offset) == (caddr_t) -1) {
194 _rtld_error("%s: mmap of data failed: %s", path, strerror(errno));
195 return NULL;
196 }
197
198 /* Do BSS setup */
199 if (segs[i]->p_filesz != segs[i]->p_memsz) {
200
201 /* Clear any BSS in the last page of the segment. */
202 clear_vaddr = segs[i]->p_vaddr + segs[i]->p_filesz;
203 clear_addr = mapbase + (clear_vaddr - base_vaddr);
204 clear_page = mapbase + (trunc_page(clear_vaddr) - base_vaddr);
205
206 if ((nclear = data_vlimit - clear_vaddr) > 0) {
207 /* Make sure the end of the segment is writable */
208 if ((data_prot & PROT_WRITE) == 0 && -1 ==
209 mprotect(clear_page, PAGE_SIZE, data_prot|PROT_WRITE)) {
210 _rtld_error("%s: mprotect failed: %s", path,
211 strerror(errno));
212 return NULL;
213 }
214
215 memset(clear_addr, 0, nclear);
216
217 /* Reset the data protection back */
218 if ((data_prot & PROT_WRITE) == 0)
219 mprotect(clear_page, PAGE_SIZE, data_prot);
220 }
221
222 /* Overlay the BSS segment onto the proper region. */
223 bss_vaddr = data_vlimit;
224 bss_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_memsz);
225 bss_addr = mapbase + (bss_vaddr - base_vaddr);
226 if (bss_vlimit > bss_vaddr) { /* There is something to do */
227 if (mmap(bss_addr, bss_vlimit - bss_vaddr, data_prot,
228 data_flags | MAP_ANON, -1, 0) == (caddr_t)-1) {
229 _rtld_error("%s: mmap of bss failed: %s", path,
230 strerror(errno));
231 return NULL;
232 }
233 }
234 }
235
236 if (phdr_vaddr == 0 && data_offset <= hdr->e_phoff &&
237 (data_vlimit - data_vaddr + data_offset) >=
238 (hdr->e_phoff + hdr->e_phnum * sizeof (Elf_Phdr))) {
239 phdr_vaddr = data_vaddr + hdr->e_phoff - data_offset;
240 }
241 }
242
243 obj = obj_new();
244 if (sb != NULL) {
245 obj->dev = sb->st_dev;
246 obj->ino = sb->st_ino;
247 }
248 obj->mapbase = mapbase;
249 obj->mapsize = mapsize;
250 obj->textsize = round_page(segs[0]->p_vaddr + segs[0]->p_memsz) -
251 base_vaddr;
252 obj->vaddrbase = base_vaddr;
253 obj->relocbase = mapbase - base_vaddr;
254 obj->dynamic = (const Elf_Dyn *) (obj->relocbase + phdyn->p_vaddr);
255 if (hdr->e_entry != 0)
256 obj->entry = (caddr_t) (obj->relocbase + hdr->e_entry);
257 if (phdr_vaddr != 0) {
258 obj->phdr = (const Elf_Phdr *) (obj->relocbase + phdr_vaddr);
259 } else {
260 obj->phdr = malloc(phsize);
261 if (obj->phdr == NULL) {
262 obj_free(obj);
263 _rtld_error("%s: cannot allocate program header", path);
264 return NULL;
265 }
266 memcpy((char *)obj->phdr, (char *)hdr + hdr->e_phoff, phsize);
267 obj->phdr_alloc = true;
268 }
269 obj->phsize = phsize;
270 if (phinterp != NULL)
271 obj->interp = (const char *) (obj->relocbase + phinterp->p_vaddr);
272 if (phtls != NULL) {
273 tls_dtv_generation++;
274 obj->tlsindex = ++tls_max_index;
275 obj->tlssize = phtls->p_memsz;
276 obj->tlsalign = phtls->p_align;
277 obj->tlsinitsize = phtls->p_filesz;
278 obj->tlsinit = mapbase + phtls->p_vaddr;
279 }
280 obj->stack_flags = stack_flags;
281 obj->relro_page = obj->relocbase + trunc_page(relro_page);
282 obj->relro_size = round_page(relro_size);
283
272 return obj;
273}
274
275static Elf_Ehdr *
276get_elf_header (int fd, const char *path)
277{
278 static union {
279 Elf_Ehdr hdr;
280 char buf[PAGE_SIZE];
281 } u;
282 ssize_t nbytes;
283
284 if ((nbytes = pread(fd, u.buf, PAGE_SIZE, 0)) == -1) {
285 _rtld_error("%s: read error: %s", path, strerror(errno));
286 return NULL;
287 }
288
289 /* Make sure the file is valid */
290 if (nbytes < (ssize_t)sizeof(Elf_Ehdr) || !IS_ELF(u.hdr)) {
291 _rtld_error("%s: invalid file format", path);
292 return NULL;
293 }
294 if (u.hdr.e_ident[EI_CLASS] != ELF_TARG_CLASS
295 || u.hdr.e_ident[EI_DATA] != ELF_TARG_DATA) {
296 _rtld_error("%s: unsupported file layout", path);
297 return NULL;
298 }
299 if (u.hdr.e_ident[EI_VERSION] != EV_CURRENT
300 || u.hdr.e_version != EV_CURRENT) {
301 _rtld_error("%s: unsupported file version", path);
302 return NULL;
303 }
304 if (u.hdr.e_type != ET_EXEC && u.hdr.e_type != ET_DYN) {
305 _rtld_error("%s: unsupported file type", path);
306 return NULL;
307 }
308 if (u.hdr.e_machine != ELF_TARG_MACH) {
309 _rtld_error("%s: unsupported machine", path);
310 return NULL;
311 }
312
313 /*
314 * We rely on the program header being in the first page. This is
315 * not strictly required by the ABI specification, but it seems to
316 * always true in practice. And, it simplifies things considerably.
317 */
318 if (u.hdr.e_phentsize != sizeof(Elf_Phdr)) {
319 _rtld_error(
320 "%s: invalid shared object: e_phentsize != sizeof(Elf_Phdr)", path);
321 return NULL;
322 }
323 if (u.hdr.e_phoff + u.hdr.e_phnum * sizeof(Elf_Phdr) > (size_t)nbytes) {
324 _rtld_error("%s: program header too large", path);
325 return NULL;
326 }
327
328 return (&u.hdr);
329}
330
331void
332obj_free(Obj_Entry *obj)
333{
334 Objlist_Entry *elm;
335
336 if (obj->tls_done)
337 free_tls_offset(obj);
338 while (obj->needed != NULL) {
339 Needed_Entry *needed = obj->needed;
340 obj->needed = needed->next;
341 free(needed);
342 }
343 while (!STAILQ_EMPTY(&obj->names)) {
344 Name_Entry *entry = STAILQ_FIRST(&obj->names);
345 STAILQ_REMOVE_HEAD(&obj->names, link);
346 free(entry);
347 }
348 while (!STAILQ_EMPTY(&obj->dldags)) {
349 elm = STAILQ_FIRST(&obj->dldags);
350 STAILQ_REMOVE_HEAD(&obj->dldags, link);
351 free(elm);
352 }
353 while (!STAILQ_EMPTY(&obj->dagmembers)) {
354 elm = STAILQ_FIRST(&obj->dagmembers);
355 STAILQ_REMOVE_HEAD(&obj->dagmembers, link);
356 free(elm);
357 }
358 if (obj->vertab)
359 free(obj->vertab);
360 if (obj->origin_path)
361 free(obj->origin_path);
362 if (obj->z_origin)
363 free(obj->rpath);
364 if (obj->priv)
365 free(obj->priv);
366 if (obj->path)
367 free(obj->path);
368 if (obj->phdr_alloc)
369 free((void *)obj->phdr);
370 free(obj);
371}
372
373Obj_Entry *
374obj_new(void)
375{
376 Obj_Entry *obj;
377
378 obj = CNEW(Obj_Entry);
379 STAILQ_INIT(&obj->dldags);
380 STAILQ_INIT(&obj->dagmembers);
381 STAILQ_INIT(&obj->names);
382 return obj;
383}
384
385/*
386 * Given a set of ELF protection flags, return the corresponding protection
387 * flags for MMAP.
388 */
389static int
390convert_prot(int elfflags)
391{
392 int prot = 0;
393 if (elfflags & PF_R)
394 prot |= PROT_READ;
395 if (elfflags & PF_W)
396 prot |= PROT_WRITE;
397 if (elfflags & PF_X)
398 prot |= PROT_EXEC;
399 return prot;
400}
401
402static int
403convert_flags(int elfflags)
404{
405 int flags = MAP_PRIVATE; /* All mappings are private */
406
407 /*
408 * Readonly mappings are marked "MAP_NOCORE", because they can be
409 * reconstructed by a debugger.
410 */
411 if (!(elfflags & PF_W))
412 flags |= MAP_NOCORE;
413 return flags;
414}
284 return obj;
285}
286
287static Elf_Ehdr *
288get_elf_header (int fd, const char *path)
289{
290 static union {
291 Elf_Ehdr hdr;
292 char buf[PAGE_SIZE];
293 } u;
294 ssize_t nbytes;
295
296 if ((nbytes = pread(fd, u.buf, PAGE_SIZE, 0)) == -1) {
297 _rtld_error("%s: read error: %s", path, strerror(errno));
298 return NULL;
299 }
300
301 /* Make sure the file is valid */
302 if (nbytes < (ssize_t)sizeof(Elf_Ehdr) || !IS_ELF(u.hdr)) {
303 _rtld_error("%s: invalid file format", path);
304 return NULL;
305 }
306 if (u.hdr.e_ident[EI_CLASS] != ELF_TARG_CLASS
307 || u.hdr.e_ident[EI_DATA] != ELF_TARG_DATA) {
308 _rtld_error("%s: unsupported file layout", path);
309 return NULL;
310 }
311 if (u.hdr.e_ident[EI_VERSION] != EV_CURRENT
312 || u.hdr.e_version != EV_CURRENT) {
313 _rtld_error("%s: unsupported file version", path);
314 return NULL;
315 }
316 if (u.hdr.e_type != ET_EXEC && u.hdr.e_type != ET_DYN) {
317 _rtld_error("%s: unsupported file type", path);
318 return NULL;
319 }
320 if (u.hdr.e_machine != ELF_TARG_MACH) {
321 _rtld_error("%s: unsupported machine", path);
322 return NULL;
323 }
324
325 /*
326 * We rely on the program header being in the first page. This is
327 * not strictly required by the ABI specification, but it seems to
328 * always true in practice. And, it simplifies things considerably.
329 */
330 if (u.hdr.e_phentsize != sizeof(Elf_Phdr)) {
331 _rtld_error(
332 "%s: invalid shared object: e_phentsize != sizeof(Elf_Phdr)", path);
333 return NULL;
334 }
335 if (u.hdr.e_phoff + u.hdr.e_phnum * sizeof(Elf_Phdr) > (size_t)nbytes) {
336 _rtld_error("%s: program header too large", path);
337 return NULL;
338 }
339
340 return (&u.hdr);
341}
342
343void
344obj_free(Obj_Entry *obj)
345{
346 Objlist_Entry *elm;
347
348 if (obj->tls_done)
349 free_tls_offset(obj);
350 while (obj->needed != NULL) {
351 Needed_Entry *needed = obj->needed;
352 obj->needed = needed->next;
353 free(needed);
354 }
355 while (!STAILQ_EMPTY(&obj->names)) {
356 Name_Entry *entry = STAILQ_FIRST(&obj->names);
357 STAILQ_REMOVE_HEAD(&obj->names, link);
358 free(entry);
359 }
360 while (!STAILQ_EMPTY(&obj->dldags)) {
361 elm = STAILQ_FIRST(&obj->dldags);
362 STAILQ_REMOVE_HEAD(&obj->dldags, link);
363 free(elm);
364 }
365 while (!STAILQ_EMPTY(&obj->dagmembers)) {
366 elm = STAILQ_FIRST(&obj->dagmembers);
367 STAILQ_REMOVE_HEAD(&obj->dagmembers, link);
368 free(elm);
369 }
370 if (obj->vertab)
371 free(obj->vertab);
372 if (obj->origin_path)
373 free(obj->origin_path);
374 if (obj->z_origin)
375 free(obj->rpath);
376 if (obj->priv)
377 free(obj->priv);
378 if (obj->path)
379 free(obj->path);
380 if (obj->phdr_alloc)
381 free((void *)obj->phdr);
382 free(obj);
383}
384
385Obj_Entry *
386obj_new(void)
387{
388 Obj_Entry *obj;
389
390 obj = CNEW(Obj_Entry);
391 STAILQ_INIT(&obj->dldags);
392 STAILQ_INIT(&obj->dagmembers);
393 STAILQ_INIT(&obj->names);
394 return obj;
395}
396
397/*
398 * Given a set of ELF protection flags, return the corresponding protection
399 * flags for MMAP.
400 */
401static int
402convert_prot(int elfflags)
403{
404 int prot = 0;
405 if (elfflags & PF_R)
406 prot |= PROT_READ;
407 if (elfflags & PF_W)
408 prot |= PROT_WRITE;
409 if (elfflags & PF_X)
410 prot |= PROT_EXEC;
411 return prot;
412}
413
414static int
415convert_flags(int elfflags)
416{
417 int flags = MAP_PRIVATE; /* All mappings are private */
418
419 /*
420 * Readonly mappings are marked "MAP_NOCORE", because they can be
421 * reconstructed by a debugger.
422 */
423 if (!(elfflags & PF_W))
424 flags |= MAP_NOCORE;
425 return flags;
426}