1/*
2 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2003-2011, Axel D��rfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2002, Manuel J. Petit. All rights reserved.
7 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
8 * Distributed under the terms of the NewOS License.
9 */
10
11#include "images.h"
12
13#include <stdio.h>
14#include <stdlib.h>
15#include <string.h>
16
17#include <algorithm>
18
19#include <syscalls.h>
20#include <vm_defs.h>
21
22#include "add_ons.h"
23#include "runtime_loader_private.h"
24
25
26// keep in sync with app ldscript
27#ifdef __x86_64__
28	// runtime_loader potentially occupies 0x200000 - 0x600000 due to large
29	// page segment alignment.
30#	define RLD_PROGRAM_BASE	0x600000
31#	define MAX_PAGE_SIZE	0x200000
32#else
33#	define RLD_PROGRAM_BASE	0x200000
34#	define MAX_PAGE_SIZE	B_PAGE_SIZE
35#endif
36
37
38bool gInvalidImageIDs;
39
40static image_queue_t sLoadedImages = {0, 0};
41static image_queue_t sDisposableImages = {0, 0};
42static uint32 sLoadedImageCount = 0;
43
44
45//! Remaps the image ID of \a image after fork.
46static status_t
47update_image_id(image_t* image)
48{
49	int32 cookie = 0;
50	image_info info;
51	while (_kern_get_next_image_info(B_CURRENT_TEAM, &cookie, &info,
52			sizeof(image_info)) == B_OK) {
53		for (uint32 i = 0; i < image->num_regions; i++) {
54			if (image->regions[i].vmstart == (addr_t)info.text) {
55				image->id = info.id;
56				return B_OK;
57			}
58		}
59	}
60
61	FATAL("Could not update image ID %" B_PRId32 " after fork()!\n", image->id);
62	return B_ENTRY_NOT_FOUND;
63}
64
65
66static void
67enqueue_image(image_queue_t* queue, image_t* image)
68{
69	image->next = NULL;
70
71	image->prev = queue->tail;
72	if (queue->tail)
73		queue->tail->next = image;
74
75	queue->tail = image;
76	if (!queue->head)
77		queue->head = image;
78}
79
80
81static void
82dequeue_image(image_queue_t* queue, image_t* image)
83{
84	if (image->next)
85		image->next->prev = image->prev;
86	else
87		queue->tail = image->prev;
88
89	if (image->prev)
90		image->prev->next = image->next;
91	else
92		queue->head = image->next;
93
94	image->prev = NULL;
95	image->next = NULL;
96}
97
98
99static image_t*
100find_image_in_queue(image_queue_t* queue, const char* name, bool isPath,
101	uint32 typeMask)
102{
103	for (image_t* image = queue->head; image; image = image->next) {
104		const char* imageName = isPath ? image->path : image->name;
105		int length = isPath ? sizeof(image->path) : sizeof(image->name);
106
107		if (!strncmp(imageName, name, length)
108			&& (typeMask & IMAGE_TYPE_TO_MASK(image->type)) != 0) {
109			return image;
110		}
111	}
112
113	return NULL;
114}
115
116
117static void
118update_image_flags_recursively(image_t* image, uint32 flagsToSet,
119	uint32 flagsToClear)
120{
121	image_t* queue[sLoadedImageCount];
122	uint32 count = 0;
123	uint32 index = 0;
124	queue[count++] = image;
125	image->flags |= RFLAG_VISITED;
126
127	while (index < count) {
128		// pop next image
129		image = queue[index++];
130
131		// push dependencies
132		for (uint32 i = 0; i < image->num_needed; i++) {
133			image_t* needed = image->needed[i];
134			if ((needed->flags & RFLAG_VISITED) == 0) {
135				queue[count++] = needed;
136				needed->flags |= RFLAG_VISITED;
137			}
138		}
139	}
140
141	// update flags
142	for (uint32 i = 0; i < count; i++) {
143		queue[i]->flags = (queue[i]->flags | flagsToSet)
144			& ~(flagsToClear | RFLAG_VISITED);
145	}
146}
147
148
149static uint32
150topological_sort(image_t* image, uint32 slot, image_t** initList,
151	uint32 sortFlag)
152{
153	if (image->flags & sortFlag)
154		return slot;
155
156	image->flags |= sortFlag; /* make sure we don't visit this one */
157	for (uint32 i = 0; i < image->num_needed; i++)
158		slot = topological_sort(image->needed[i], slot, initList, sortFlag);
159
160	initList[slot] = image;
161	return slot + 1;
162}
163
164
165/*!	Finds the load address and address specifier of the given image region.
166*/
167static void
168get_image_region_load_address(image_t* image, uint32 index, int32 lastDelta,
169	bool fixed, addr_t& loadAddress, uint32& addressSpecifier)
170{
171	if (image->dynamic_ptr != 0 && !fixed) {
172		// relocatable image... we can afford to place wherever
173		if (index == 0) {
174			// but only the first segment gets a free ride
175			loadAddress = RLD_PROGRAM_BASE;
176			addressSpecifier = B_BASE_ADDRESS;
177		} else {
178			loadAddress = image->regions[index].vmstart + lastDelta;
179			addressSpecifier = B_EXACT_ADDRESS;
180		}
181	} else {
182		// not relocatable, put it where it asks or die trying
183		loadAddress = image->regions[index].vmstart;
184		addressSpecifier = B_EXACT_ADDRESS;
185	}
186}
187
188
189// #pragma mark -
190
191
192image_t*
193create_image(const char* name, const char* path, int regionCount)
194{
195	size_t allocSize = sizeof(image_t)
196		+ (regionCount - 1) * sizeof(elf_region_t);
197
198	image_t* image = (image_t*)malloc(allocSize);
199	if (image == NULL) {
200		FATAL("no memory for image %s\n", path);
201		return NULL;
202	}
203
204	memset(image, 0, allocSize);
205
206	strlcpy(image->path, path, sizeof(image->path));
207
208	// Make the last component of the supplied name the image name.
209	// If present, DT_SONAME will replace this name.
210	const char* lastSlash = strrchr(name, '/');
211	if (lastSlash != NULL)
212		strlcpy(image->name, lastSlash + 1, sizeof(image->name));
213	else
214		strlcpy(image->name, name, sizeof(image->name));
215
216	image->ref_count = 1;
217	image->num_regions = regionCount;
218
219	return image;
220}
221
222
223void
224delete_image_struct(image_t* image)
225{
226#ifdef DEBUG
227	size_t size = sizeof(image_t)
228		+ (image->num_regions - 1) * sizeof(elf_region_t);
229	memset(image->needed, 0xa5, sizeof(image->needed[0]) * image->num_needed);
230#endif
231	free(image->needed);
232	free(image->versions);
233
234	while (RuntimeLoaderSymbolPatcher* patcher
235			= image->defined_symbol_patchers) {
236		image->defined_symbol_patchers = patcher->next;
237		delete patcher;
238	}
239	while (RuntimeLoaderSymbolPatcher* patcher
240			= image->undefined_symbol_patchers) {
241		image->undefined_symbol_patchers = patcher->next;
242		delete patcher;
243	}
244
245#ifdef DEBUG
246	// overwrite images to make sure they aren't accidently reused anywhere
247	memset(image, 0xa5, size);
248#endif
249	free(image);
250}
251
252
253void
254delete_image(image_t* image)
255{
256	if (image == NULL)
257		return;
258
259	_kern_unregister_image(image->id);
260		// registered in load_container()
261
262	delete_image_struct(image);
263}
264
265
266void
267put_image(image_t* image)
268{
269	// If all references to the image are gone, add it to the disposable list
270	// and remove all dependencies
271
272	if (atomic_add(&image->ref_count, -1) == 1) {
273		size_t i;
274
275		dequeue_image(&sLoadedImages, image);
276		enqueue_image(&sDisposableImages, image);
277		sLoadedImageCount--;
278
279		for (i = 0; i < image->num_needed; i++)
280			put_image(image->needed[i]);
281	}
282}
283
284
285status_t
286map_image(int fd, char const* path, image_t* image, bool fixed)
287{
288	// cut the file name from the path as base name for the created areas
289	const char* baseName = strrchr(path, '/');
290	if (baseName != NULL)
291		baseName++;
292	else
293		baseName = path;
294
295	// determine how much space we need for all loaded segments
296
297	addr_t reservedAddress = 0;
298	addr_t loadAddress;
299	size_t reservedSize = 0;
300	size_t length = 0;
301	uint32 addressSpecifier = B_ANY_ADDRESS;
302
303	for (uint32 i = 0; i < image->num_regions; i++) {
304		// for BeOS compatibility: if we load an old BeOS executable, we
305		// have to relocate it, if possible - we recognize it because the
306		// vmstart is set to 0 (hopefully always)
307		if (fixed && image->regions[i].vmstart == 0)
308			fixed = false;
309
310		uint32 regionAddressSpecifier;
311		get_image_region_load_address(image, i,
312			i > 0 ? loadAddress - image->regions[i - 1].vmstart : 0,
313			fixed, loadAddress, regionAddressSpecifier);
314		if (i == 0) {
315			reservedAddress = loadAddress;
316			addressSpecifier = regionAddressSpecifier;
317		}
318
319		length += TO_PAGE_SIZE(image->regions[i].vmsize
320			+ (loadAddress % B_PAGE_SIZE));
321
322		size_t size = TO_PAGE_SIZE(loadAddress + image->regions[i].vmsize)
323			- reservedAddress;
324		if (size > reservedSize)
325			reservedSize = size;
326	}
327
328	// Check whether the segments have an unreasonable amount of unused space
329	// inbetween.
330	if (reservedSize > length + MAX_PAGE_SIZE * 2)
331		return B_BAD_DATA;
332
333	// reserve that space and allocate the areas from that one
334	if (_kern_reserve_address_range(&reservedAddress, addressSpecifier,
335			reservedSize) != B_OK)
336		return B_NO_MEMORY;
337
338	for (uint32 i = 0; i < image->num_regions; i++) {
339		char regionName[B_OS_NAME_LENGTH];
340
341		snprintf(regionName, sizeof(regionName), "%s_seg%" B_PRIu32 "%s",
342			baseName, i, (image->regions[i].flags & RFLAG_RW) ? "rw" : "ro");
343
344		get_image_region_load_address(image, i,
345			i > 0 ? image->regions[i - 1].delta : 0, fixed, loadAddress,
346			addressSpecifier);
347
348		// If the image position is arbitrary, we must let it point to the start
349		// of the reserved address range.
350		if (addressSpecifier != B_EXACT_ADDRESS)
351			loadAddress = reservedAddress;
352
353		if ((image->regions[i].flags & RFLAG_ANON) != 0) {
354			image->regions[i].id = _kern_create_area(regionName,
355				(void**)&loadAddress, B_EXACT_ADDRESS,
356				image->regions[i].vmsize, B_NO_LOCK,
357				B_READ_AREA | B_WRITE_AREA);
358
359			if (image->regions[i].id < 0) {
360				_kern_unreserve_address_range(reservedAddress, reservedSize);
361				return image->regions[i].id;
362			}
363		} else {
364			// Map all segments r/w first -- write access might be needed for
365			// relocations. When we've done with those we change the protection
366			// of read-only segments back to read-only. We map those segments
367			// over-committing, since quite likely only a relatively small
368			// number of pages needs to be touched and we want to avoid a lot
369			// of memory to be committed for them temporarily, just because we
370			// have to write map them.
371			uint32 protection = B_READ_AREA | B_WRITE_AREA
372				| ((image->regions[i].flags & RFLAG_RW) != 0
373					? 0 : B_OVERCOMMITTING_AREA);
374			image->regions[i].id = _kern_map_file(regionName,
375				(void**)&loadAddress, B_EXACT_ADDRESS,
376				image->regions[i].vmsize, protection, REGION_PRIVATE_MAP, false,
377				fd, PAGE_BASE(image->regions[i].fdstart));
378
379			if (image->regions[i].id < 0) {
380				_kern_unreserve_address_range(reservedAddress, reservedSize);
381				return image->regions[i].id;
382			}
383
384			TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path,
385				(void *)loadAddress, image->regions[i].vmsize,
386				image->regions[i].flags & RFLAG_RW ? "rw" : "read-only"));
387
388			// handle trailer bits in data segment
389			if (image->regions[i].flags & RFLAG_RW) {
390				addr_t startClearing = loadAddress
391					+ PAGE_OFFSET(image->regions[i].start)
392					+ image->regions[i].size;
393				addr_t toClear = image->regions[i].vmsize
394					- PAGE_OFFSET(image->regions[i].start)
395					- image->regions[i].size;
396
397				TRACE(("cleared 0x%lx and the following 0x%lx bytes\n",
398					startClearing, toClear));
399				memset((void *)startClearing, 0, toClear);
400			}
401		}
402
403		image->regions[i].delta = loadAddress - image->regions[i].vmstart;
404		image->regions[i].vmstart = loadAddress;
405	}
406
407	if (image->dynamic_ptr != 0)
408		image->dynamic_ptr += image->regions[0].delta;
409
410	return B_OK;
411}
412
413
414void
415unmap_image(image_t* image)
416{
417	for (uint32 i = 0; i < image->num_regions; i++) {
418		_kern_delete_area(image->regions[i].id);
419
420		image->regions[i].id = -1;
421	}
422}
423
424
425/*!	This function will change the protection of all read-only segments to really
426	be read-only.
427	The areas have to be read/write first, so that they can be relocated.
428*/
429void
430remap_images()
431{
432	for (image_t* image = sLoadedImages.head; image != NULL;
433			image = image->next) {
434		for (uint32 i = 0; i < image->num_regions; i++) {
435			if ((image->regions[i].flags & RFLAG_RW) == 0
436				&& (image->regions[i].flags & RFLAG_REMAPPED) == 0) {
437				// we only need to do this once, so we remember those we've already mapped
438				if (_kern_set_area_protection(image->regions[i].id,
439						B_READ_AREA | B_EXECUTE_AREA) == B_OK) {
440					image->regions[i].flags |= RFLAG_REMAPPED;
441				}
442			}
443		}
444	}
445}
446
447
448void
449register_image(image_t* image, int fd, const char* path)
450{
451	struct stat stat;
452	image_info info;
453
454	// TODO: set these correctly
455	info.id = 0;
456	info.type = image->type;
457	info.sequence = 0;
458	info.init_order = 0;
459	info.init_routine = (void (*)())image->init_routine;
460	info.term_routine = (void (*)())image->term_routine;
461
462	if (_kern_read_stat(fd, NULL, false, &stat, sizeof(struct stat)) == B_OK) {
463		info.device = stat.st_dev;
464		info.node = stat.st_ino;
465	} else {
466		info.device = -1;
467		info.node = -1;
468	}
469
470	// We may have split segments into separate regions. Compute the correct
471	// segments for the image info.
472	addr_t textBase = 0;
473	addr_t textEnd = 0;
474	addr_t dataBase = 0;
475	addr_t dataEnd = 0;
476	for (uint32 i= 0; i < image->num_regions; i++) {
477		addr_t base = image->regions[i].vmstart;
478		addr_t end = base + image->regions[i].vmsize;
479		if (image->regions[i].flags & RFLAG_RW) {
480			// data
481			if (dataBase == 0) {
482				dataBase = base;
483				dataEnd = end;
484			} else {
485				dataBase = std::min(dataBase, base);
486				dataEnd = std::max(dataEnd, end);
487			}
488		} else {
489			// text
490			if (textBase == 0) {
491				textBase = base;
492				textEnd = end;
493			} else {
494				textBase = std::min(textBase, base);
495				textEnd = std::max(textEnd, end);
496			}
497		}
498	}
499
500	strlcpy(info.name, path, sizeof(info.name));
501	info.text = (void*)textBase;
502	info.text_size = textEnd - textBase;
503	info.data = (void*)dataBase;
504	info.data_size = dataEnd - dataBase;
505	info.api_version = image->api_version;
506	info.abi = image->abi;
507	image->id = _kern_register_image(&info, sizeof(image_info));
508}
509
510
511//! After fork, we lazily rebuild the image IDs of all loaded images.
512status_t
513update_image_ids()
514{
515	for (image_t* image = sLoadedImages.head; image; image = image->next) {
516		status_t status = update_image_id(image);
517		if (status != B_OK)
518			return status;
519	}
520	for (image_t* image = sDisposableImages.head; image; image = image->next) {
521		status_t status = update_image_id(image);
522		if (status != B_OK)
523			return status;
524	}
525
526	gInvalidImageIDs = false;
527	return B_OK;
528}
529
530
531image_queue_t&
532get_loaded_images()
533{
534	return sLoadedImages;
535}
536
537
538image_queue_t&
539get_disposable_images()
540{
541	return sDisposableImages;
542}
543
544
545uint32
546count_loaded_images()
547{
548	return sLoadedImageCount;
549}
550
551
552void
553enqueue_loaded_image(image_t* image)
554{
555	enqueue_image(&sLoadedImages, image);
556	sLoadedImageCount++;
557}
558
559
560void
561dequeue_loaded_image(image_t* image)
562{
563	dequeue_image(&sLoadedImages, image);
564	sLoadedImageCount--;
565}
566
567
568void
569dequeue_disposable_image(image_t* image)
570{
571	dequeue_image(&sDisposableImages, image);
572}
573
574
575image_t*
576find_loaded_image_by_name(char const* name, uint32 typeMask)
577{
578	bool isPath = strchr(name, '/') != NULL;
579	return find_image_in_queue(&sLoadedImages, name, isPath, typeMask);
580}
581
582
583image_t*
584find_loaded_image_by_id(image_id id, bool ignoreDisposable)
585{
586	if (gInvalidImageIDs) {
587		// After fork, we lazily rebuild the image IDs of all loaded images
588		update_image_ids();
589	}
590
591	for (image_t* image = sLoadedImages.head; image; image = image->next) {
592		if (image->id == id)
593			return image;
594	}
595
596	if (ignoreDisposable)
597		return NULL;
598
599	for (image_t* image = sDisposableImages.head; image; image = image->next) {
600		if (image->id == id)
601			return image;
602	}
603
604	return NULL;
605}
606
607
608image_t*
609find_loaded_image_by_address(addr_t address)
610{
611	for (image_t* image = sLoadedImages.head; image; image = image->next) {
612		for (uint32 i = 0; i < image->num_regions; i++) {
613			elf_region_t& region = image->regions[i];
614			if (region.vmstart <= address
615				&& region.vmstart - 1 + region.vmsize >= address)
616				return image;
617		}
618	}
619
620	return NULL;
621}
622
623
624void
625set_image_flags_recursively(image_t* image, uint32 flags)
626{
627	update_image_flags_recursively(image, flags, 0);
628}
629
630
631void
632clear_image_flags_recursively(image_t* image, uint32 flags)
633{
634	update_image_flags_recursively(image, 0, flags);
635}
636
637
638/*!	Returns a topologically sorted image list.
639
640	If \a image is non-NULL, an array containing the image and all its
641	transitive dependencies is returned. If \a image is NULL, all loaded images
642	are returned. In either case dependencies are listed before images
643	depending on them.
644
645	\param image The image specifying the tree of images that shall be sorted.
646		If NULL, all loaded images are sorted.
647	\param _list On success it will be set to an array of the sorted images.
648		The caller is responsible for free()ing it.
649	\param sortFlags The image flag that shall be used for sorting. Images that
650		already have this flag set are ignored (and their dependencies, unless
651		they are also reachable via another path). The flag will be set on all
652		returned images.
653	\return The number of images in the returned array or an error code on
654		failure.
655*/
656ssize_t
657get_sorted_image_list(image_t* image, image_t*** _list, uint32 sortFlag)
658{
659	image_t** list;
660
661	list = (image_t**)malloc(sLoadedImageCount * sizeof(image_t*));
662	if (list == NULL) {
663		FATAL("memory shortage in get_sorted_image_list()");
664		*_list = NULL;
665		return B_NO_MEMORY;
666	}
667
668	memset(list, 0, sLoadedImageCount * sizeof(image_t*));
669
670	*_list = list;
671
672	if (image != NULL)
673		return topological_sort(image, 0, list, sortFlag);
674
675	// no image given -- sort all loaded images
676	uint32 count = 0;
677	image = sLoadedImages.head;
678	while (image != NULL) {
679		count = topological_sort(image, count, list, sortFlag);
680		image = image->next;
681	}
682
683	return count;
684}
685