1/*
2 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2003-2011, Axel D��rfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2002, Manuel J. Petit. All rights reserved.
7 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
8 * Distributed under the terms of the NewOS License.
9 */
10
11#include "images.h"
12
13#include <stdio.h>
14#include <stdlib.h>
15#include <string.h>
16
17#include <algorithm>
18
19#include <syscalls.h>
20#include <image_defs.h>
21#include <vm_defs.h>
22
23#include "add_ons.h"
24#include "elf_tls.h"
25#include "runtime_loader_private.h"
26
27#include <util/kernel_cpp.h>
28
29
30// keep in sync with app ldscript
31#ifdef __x86_64__
32	// runtime_loader potentially occupies 0x200000 - 0x600000 due to large
33	// page segment alignment.
34#	define RLD_PROGRAM_BASE	0x600000
35#	define MAX_PAGE_SIZE	0x200000
36#elif __arm__
37#	define RLD_PROGRAM_BASE	0x200000
38#	define MAX_PAGE_SIZE	0x10000
39#else
40#	define RLD_PROGRAM_BASE	0x200000
41#	define MAX_PAGE_SIZE	B_PAGE_SIZE
42#endif
43
44
45bool gInvalidImageIDs;
46
47static image_queue_t sLoadedImages = {0, 0};
48static image_queue_t sDisposableImages = {0, 0};
49static uint32 sLoadedImageCount = 0;
50
51
52//! Remaps the image ID of \a image after fork.
53static status_t
54update_image_id(image_t* image)
55{
56	int32 cookie = 0;
57	image_info info;
58	while (_kern_get_next_image_info(B_CURRENT_TEAM, &cookie, &info,
59			sizeof(image_info)) == B_OK) {
60		for (uint32 i = 0; i < image->num_regions; i++) {
61			if (image->regions[i].vmstart == (addr_t)info.text) {
62				image->id = info.id;
63				return B_OK;
64			}
65		}
66	}
67
68	FATAL("Could not update image ID %" B_PRId32 " after fork()!\n", image->id);
69	return B_ENTRY_NOT_FOUND;
70}
71
72
73static void
74enqueue_image(image_queue_t* queue, image_t* image)
75{
76	image->next = NULL;
77
78	image->prev = queue->tail;
79	if (queue->tail)
80		queue->tail->next = image;
81
82	queue->tail = image;
83	if (!queue->head)
84		queue->head = image;
85}
86
87
88static void
89dequeue_image(image_queue_t* queue, image_t* image)
90{
91	if (image->next)
92		image->next->prev = image->prev;
93	else
94		queue->tail = image->prev;
95
96	if (image->prev)
97		image->prev->next = image->next;
98	else
99		queue->head = image->next;
100
101	image->prev = NULL;
102	image->next = NULL;
103}
104
105
106static image_t*
107find_image_in_queue(image_queue_t* queue, const char* name, bool isPath,
108	uint32 typeMask)
109{
110	for (image_t* image = queue->head; image; image = image->next) {
111		const char* imageName = isPath ? image->path : image->name;
112		int length = isPath ? sizeof(image->path) : sizeof(image->name);
113
114		if (!strncmp(imageName, name, length)
115			&& (typeMask & IMAGE_TYPE_TO_MASK(image->type)) != 0) {
116			return image;
117		}
118	}
119
120	return NULL;
121}
122
123
124static void
125update_image_flags_recursively(image_t* image, uint32 flagsToSet,
126	uint32 flagsToClear)
127{
128	image_t* queue[sLoadedImageCount];
129	uint32 count = 0;
130	uint32 index = 0;
131	queue[count++] = image;
132	image->flags |= RFLAG_VISITED;
133
134	while (index < count) {
135		// pop next image
136		image = queue[index++];
137
138		// push dependencies
139		for (uint32 i = 0; i < image->num_needed; i++) {
140			image_t* needed = image->needed[i];
141			if ((needed->flags & RFLAG_VISITED) == 0) {
142				queue[count++] = needed;
143				needed->flags |= RFLAG_VISITED;
144			}
145		}
146	}
147
148	// update flags
149	for (uint32 i = 0; i < count; i++) {
150		queue[i]->flags = (queue[i]->flags | flagsToSet)
151			& ~(flagsToClear | RFLAG_VISITED);
152	}
153}
154
155
156static uint32
157topological_sort(image_t* image, uint32 slot, image_t** initList,
158	uint32 sortFlag)
159{
160	if (image->flags & sortFlag)
161		return slot;
162
163	image->flags |= sortFlag; /* make sure we don't visit this one */
164	for (uint32 i = 0; i < image->num_needed; i++)
165		slot = topological_sort(image->needed[i], slot, initList, sortFlag);
166
167	initList[slot] = image;
168	return slot + 1;
169}
170
171
172/*!	Finds the load address and address specifier of the given image region.
173*/
174static void
175get_image_region_load_address(image_t* image, uint32 index, long lastDelta,
176	bool fixed, addr_t& loadAddress, uint32& addressSpecifier)
177{
178	if (!fixed) {
179		// relocatable image... we can afford to place wherever
180		if (index == 0) {
181			// but only the first segment gets a free ride
182			loadAddress = RLD_PROGRAM_BASE;
183			addressSpecifier = B_RANDOMIZED_BASE_ADDRESS;
184		} else {
185			loadAddress = image->regions[index].vmstart + lastDelta;
186			addressSpecifier = B_EXACT_ADDRESS;
187		}
188	} else {
189		// not relocatable, put it where it asks or die trying
190		loadAddress = image->regions[index].vmstart;
191		addressSpecifier = B_EXACT_ADDRESS;
192	}
193}
194
195
196// #pragma mark -
197
198
199image_t*
200create_image(const char* name, const char* path, int regionCount)
201{
202	size_t allocSize = sizeof(image_t)
203		+ (regionCount - 1) * sizeof(elf_region_t);
204
205	image_t* image = (image_t*)malloc(allocSize);
206	if (image == NULL) {
207		FATAL("no memory for image %s\n", path);
208		return NULL;
209	}
210
211	memset(image, 0, allocSize);
212
213	strlcpy(image->path, path, sizeof(image->path));
214
215	// Make the last component of the supplied name the image name.
216	// If present, DT_SONAME will replace this name.
217	const char* lastSlash = strrchr(name, '/');
218	if (lastSlash != NULL)
219		strlcpy(image->name, lastSlash + 1, sizeof(image->name));
220	else
221		strlcpy(image->name, name, sizeof(image->name));
222
223	image->ref_count = 1;
224	image->num_regions = regionCount;
225
226	return image;
227}
228
229
230void
231delete_image_struct(image_t* image)
232{
233#ifdef DEBUG
234	size_t size = sizeof(image_t)
235		+ (image->num_regions - 1) * sizeof(elf_region_t);
236	memset(image->needed, 0xa5, sizeof(image->needed[0]) * image->num_needed);
237#endif
238	free(image->needed);
239	free(image->versions);
240
241	while (RuntimeLoaderSymbolPatcher* patcher
242			= image->defined_symbol_patchers) {
243		image->defined_symbol_patchers = patcher->next;
244		delete patcher;
245	}
246	while (RuntimeLoaderSymbolPatcher* patcher
247			= image->undefined_symbol_patchers) {
248		image->undefined_symbol_patchers = patcher->next;
249		delete patcher;
250	}
251
252#ifdef DEBUG
253	// overwrite images to make sure they aren't accidently reused anywhere
254	memset(image, 0xa5, size);
255#endif
256	free(image);
257}
258
259
260void
261delete_image(image_t* image)
262{
263	if (image == NULL)
264		return;
265
266	_kern_unregister_image(image->id);
267		// registered in load_container()
268
269	delete_image_struct(image);
270}
271
272
273void
274put_image(image_t* image)
275{
276	// If all references to the image are gone, add it to the disposable list
277	// and remove all dependencies
278
279	if (atomic_add(&image->ref_count, -1) == 1) {
280		size_t i;
281
282		dequeue_image(&sLoadedImages, image);
283		enqueue_image(&sDisposableImages, image);
284		sLoadedImageCount--;
285
286		for (i = 0; i < image->num_needed; i++)
287			put_image(image->needed[i]);
288	}
289}
290
291
292status_t
293map_image(int fd, char const* path, image_t* image, bool fixed)
294{
295	// cut the file name from the path as base name for the created areas
296	const char* baseName = strrchr(path, '/');
297	if (baseName != NULL)
298		baseName++;
299	else
300		baseName = path;
301
302	// determine how much space we need for all loaded segments
303
304	addr_t reservedAddress = 0;
305	addr_t loadAddress;
306	size_t reservedSize = 0;
307	size_t length = 0;
308	uint32 addressSpecifier = B_RANDOMIZED_ANY_ADDRESS;
309
310	for (uint32 i = 0; i < image->num_regions; i++) {
311		// for BeOS compatibility: if we load an old BeOS executable, we
312		// have to relocate it, if possible - we recognize it because the
313		// vmstart is set to 0 (hopefully always)
314		if (fixed && image->regions[i].vmstart == 0)
315			fixed = false;
316
317		uint32 regionAddressSpecifier;
318		get_image_region_load_address(image, i,
319			i > 0 ? loadAddress - image->regions[i - 1].vmstart : 0,
320			fixed, loadAddress, regionAddressSpecifier);
321		if (i == 0) {
322			reservedAddress = loadAddress;
323			addressSpecifier = regionAddressSpecifier;
324		}
325
326		length += TO_PAGE_SIZE(image->regions[i].vmsize
327			+ (loadAddress % B_PAGE_SIZE));
328
329		size_t size = TO_PAGE_SIZE(loadAddress + image->regions[i].vmsize)
330			- reservedAddress;
331		if (size > reservedSize)
332			reservedSize = size;
333	}
334
335	// Check whether the segments have an unreasonable amount of unused space
336	// inbetween.
337	if (reservedSize > length + MAX_PAGE_SIZE * 2)
338		return B_BAD_DATA;
339
340	// reserve that space and allocate the areas from that one
341	if (_kern_reserve_address_range(&reservedAddress, addressSpecifier,
342			reservedSize) != B_OK)
343		return B_NO_MEMORY;
344
345	for (uint32 i = 0; i < image->num_regions; i++) {
346		char regionName[B_OS_NAME_LENGTH];
347
348		snprintf(regionName, sizeof(regionName), "%s_seg%" B_PRIu32 "%s",
349			baseName, i, (image->regions[i].flags & RFLAG_RW) ? "rw" : "ro");
350
351		get_image_region_load_address(image, i,
352			i > 0 ? image->regions[i - 1].delta : 0, fixed, loadAddress,
353			addressSpecifier);
354
355		// If the image position is arbitrary, we must let it point to the start
356		// of the reserved address range.
357		if (addressSpecifier != B_EXACT_ADDRESS)
358			loadAddress = reservedAddress;
359
360		if ((image->regions[i].flags & RFLAG_ANON) != 0) {
361			image->regions[i].id = _kern_create_area(regionName,
362				(void**)&loadAddress, B_EXACT_ADDRESS,
363				image->regions[i].vmsize, B_NO_LOCK,
364				B_READ_AREA | B_WRITE_AREA);
365
366			if (image->regions[i].id < 0) {
367				_kern_unreserve_address_range(reservedAddress, reservedSize);
368				return image->regions[i].id;
369			}
370		} else {
371			// Map all segments r/w first -- write access might be needed for
372			// relocations. When we've done with those we change the protection
373			// of read-only segments back to read-only. We map those segments
374			// over-committing, since quite likely only a relatively small
375			// number of pages needs to be touched and we want to avoid a lot
376			// of memory to be committed for them temporarily, just because we
377			// have to write map them.
378			uint32 protection = B_READ_AREA | B_WRITE_AREA
379				| ((image->regions[i].flags & RFLAG_RW) != 0
380					? 0 : B_OVERCOMMITTING_AREA);
381			image->regions[i].id = _kern_map_file(regionName,
382				(void**)&loadAddress, B_EXACT_ADDRESS,
383				image->regions[i].vmsize, protection, REGION_PRIVATE_MAP, false,
384				fd, PAGE_BASE(image->regions[i].fdstart));
385
386			if (image->regions[i].id < 0) {
387				_kern_unreserve_address_range(reservedAddress, reservedSize);
388				return image->regions[i].id;
389			}
390
391			TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path,
392				(void *)loadAddress, image->regions[i].vmsize,
393				image->regions[i].flags & RFLAG_RW ? "rw" : "read-only"));
394
395			// handle trailer bits in data segment
396			if (image->regions[i].flags & RFLAG_RW) {
397				addr_t startClearing = loadAddress
398					+ PAGE_OFFSET(image->regions[i].start)
399					+ image->regions[i].size;
400				addr_t toClear = image->regions[i].vmsize
401					- PAGE_OFFSET(image->regions[i].start)
402					- image->regions[i].size;
403
404				TRACE(("cleared 0x%lx and the following 0x%lx bytes\n",
405					startClearing, toClear));
406				memset((void *)startClearing, 0, toClear);
407			}
408		}
409
410		image->regions[i].delta = loadAddress - image->regions[i].vmstart;
411		image->regions[i].vmstart = loadAddress;
412		if (i == 0) {
413			TLSBlockTemplates::Get().SetBaseAddress(image->dso_tls_id,
414				loadAddress);
415		}
416	}
417
418	if (image->dynamic_ptr != 0)
419		image->dynamic_ptr += image->regions[0].delta;
420
421	return B_OK;
422}
423
424
425void
426unmap_image(image_t* image)
427{
428	for (uint32 i = 0; i < image->num_regions; i++) {
429		_kern_delete_area(image->regions[i].id);
430
431		image->regions[i].id = -1;
432	}
433}
434
435
436/*!	This function will change the protection of all read-only segments to really
437	be read-only (and executable).
438	The areas have to be read/write first, so that they can be relocated.
439	If at least one image is in compatibility mode then we allow execution of
440	all areas.
441*/
442void
443remap_images()
444{
445	for (image_t* image = sLoadedImages.head; image != NULL;
446			image = image->next) {
447		for (uint32 i = 0; i < image->num_regions; i++) {
448			// we only need to do this once, so we remember those we've already
449			// mapped
450			if ((image->regions[i].flags & RFLAG_REMAPPED) != 0)
451				continue;
452
453			status_t result = B_OK;
454			if ((image->regions[i].flags & RFLAG_RW) == 0) {
455				result = _kern_set_area_protection(image->regions[i].id,
456						B_READ_AREA | B_EXECUTE_AREA);
457			} else if (image->abi < B_HAIKU_ABI_GCC_2_HAIKU) {
458				result = _kern_set_area_protection(image->regions[i].id,
459						B_READ_AREA | B_WRITE_AREA | B_EXECUTE_AREA);
460			}
461
462			if (result == B_OK)
463				image->regions[i].flags |= RFLAG_REMAPPED;
464		}
465	}
466}
467
468
469void
470register_image(image_t* image, int fd, const char* path)
471{
472	struct stat stat;
473	extended_image_info info;
474
475	// TODO: set these correctly
476	info.basic_info.id = 0;
477	info.basic_info.type = image->type;
478	info.basic_info.sequence = 0;
479	info.basic_info.init_order = 0;
480	info.basic_info.init_routine = (void (*)())image->init_routine;
481	info.basic_info.term_routine = (void (*)())image->term_routine;
482
483	if (_kern_read_stat(fd, NULL, false, &stat, sizeof(struct stat)) == B_OK) {
484		info.basic_info.device = stat.st_dev;
485		info.basic_info.node = stat.st_ino;
486	} else {
487		info.basic_info.device = -1;
488		info.basic_info.node = -1;
489	}
490
491	// We may have split segments into separate regions. Compute the correct
492	// segments for the image info.
493	addr_t textBase = 0;
494	addr_t textEnd = 0;
495	addr_t dataBase = 0;
496	addr_t dataEnd = 0;
497	for (uint32 i= 0; i < image->num_regions; i++) {
498		addr_t base = image->regions[i].vmstart;
499		addr_t end = base + image->regions[i].vmsize;
500		if (image->regions[i].flags & RFLAG_RW) {
501			// data
502			if (dataBase == 0) {
503				dataBase = base;
504				dataEnd = end;
505			} else {
506				dataBase = std::min(dataBase, base);
507				dataEnd = std::max(dataEnd, end);
508			}
509		} else {
510			// text
511			if (textBase == 0) {
512				textBase = base;
513				textEnd = end;
514			} else {
515				textBase = std::min(textBase, base);
516				textEnd = std::max(textEnd, end);
517			}
518		}
519	}
520
521	strlcpy(info.basic_info.name, path, sizeof(info.basic_info.name));
522	info.basic_info.text = (void*)textBase;
523	info.basic_info.text_size = textEnd - textBase;
524	info.basic_info.data = (void*)dataBase;
525	info.basic_info.data_size = dataEnd - dataBase;
526	info.basic_info.api_version = image->api_version;
527	info.basic_info.abi = image->abi;
528	info.text_delta = image->regions[0].delta;
529	info.symbol_table = image->syms;
530	info.symbol_hash = image->symhash;
531	info.string_table = image->strtab;
532	image->id = _kern_register_image(&info, sizeof(info));
533}
534
535
536//! After fork, we lazily rebuild the image IDs of all loaded images.
537status_t
538update_image_ids()
539{
540	for (image_t* image = sLoadedImages.head; image; image = image->next) {
541		status_t status = update_image_id(image);
542		if (status != B_OK)
543			return status;
544	}
545	for (image_t* image = sDisposableImages.head; image; image = image->next) {
546		status_t status = update_image_id(image);
547		if (status != B_OK)
548			return status;
549	}
550
551	gInvalidImageIDs = false;
552	return B_OK;
553}
554
555
556image_queue_t&
557get_loaded_images()
558{
559	return sLoadedImages;
560}
561
562
563image_queue_t&
564get_disposable_images()
565{
566	return sDisposableImages;
567}
568
569
570uint32
571count_loaded_images()
572{
573	return sLoadedImageCount;
574}
575
576
577void
578enqueue_loaded_image(image_t* image)
579{
580	enqueue_image(&sLoadedImages, image);
581	sLoadedImageCount++;
582}
583
584
585void
586dequeue_loaded_image(image_t* image)
587{
588	dequeue_image(&sLoadedImages, image);
589	sLoadedImageCount--;
590}
591
592
593void
594dequeue_disposable_image(image_t* image)
595{
596	dequeue_image(&sDisposableImages, image);
597}
598
599
600image_t*
601find_loaded_image_by_name(char const* name, uint32 typeMask)
602{
603	bool isPath = strchr(name, '/') != NULL;
604	return find_image_in_queue(&sLoadedImages, name, isPath, typeMask);
605}
606
607
608image_t*
609find_loaded_image_by_id(image_id id, bool ignoreDisposable)
610{
611	if (gInvalidImageIDs) {
612		// After fork, we lazily rebuild the image IDs of all loaded images
613		update_image_ids();
614	}
615
616	for (image_t* image = sLoadedImages.head; image; image = image->next) {
617		if (image->id == id)
618			return image;
619	}
620
621	if (ignoreDisposable)
622		return NULL;
623
624	for (image_t* image = sDisposableImages.head; image; image = image->next) {
625		if (image->id == id)
626			return image;
627	}
628
629	return NULL;
630}
631
632
633image_t*
634find_loaded_image_by_address(addr_t address)
635{
636	for (image_t* image = sLoadedImages.head; image; image = image->next) {
637		for (uint32 i = 0; i < image->num_regions; i++) {
638			elf_region_t& region = image->regions[i];
639			if (region.vmstart <= address
640				&& region.vmstart - 1 + region.vmsize >= address)
641				return image;
642		}
643	}
644
645	return NULL;
646}
647
648
649void
650set_image_flags_recursively(image_t* image, uint32 flags)
651{
652	update_image_flags_recursively(image, flags, 0);
653}
654
655
656void
657clear_image_flags_recursively(image_t* image, uint32 flags)
658{
659	update_image_flags_recursively(image, 0, flags);
660}
661
662
663/*!	Returns a topologically sorted image list.
664
665	If \a image is non-NULL, an array containing the image and all its
666	transitive dependencies is returned. If \a image is NULL, all loaded images
667	are returned. In either case dependencies are listed before images
668	depending on them.
669
670	\param image The image specifying the tree of images that shall be sorted.
671		If NULL, all loaded images are sorted.
672	\param _list On success it will be set to an array of the sorted images.
673		The caller is responsible for free()ing it.
674	\param sortFlags The image flag that shall be used for sorting. Images that
675		already have this flag set are ignored (and their dependencies, unless
676		they are also reachable via another path). The flag will be set on all
677		returned images.
678	\return The number of images in the returned array or an error code on
679		failure.
680*/
681ssize_t
682get_sorted_image_list(image_t* image, image_t*** _list, uint32 sortFlag)
683{
684	image_t** list;
685
686	list = (image_t**)malloc(sLoadedImageCount * sizeof(image_t*));
687	if (list == NULL) {
688		FATAL("memory shortage in get_sorted_image_list()");
689		*_list = NULL;
690		return B_NO_MEMORY;
691	}
692
693	memset(list, 0, sLoadedImageCount * sizeof(image_t*));
694
695	*_list = list;
696
697	if (image != NULL)
698		return topological_sort(image, 0, list, sortFlag);
699
700	// no image given -- sort all loaded images
701	uint32 count = 0;
702	image = sLoadedImages.head;
703	while (image != NULL) {
704		count = topological_sort(image, count, list, sortFlag);
705		image = image->next;
706	}
707
708	return count;
709}
710