1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <stdlib.h>
4#include <linux/zalloc.h>
5#include "debug.h"
6#include "dso.h"
7#include "map.h"
8#include "maps.h"
9#include "rwsem.h"
10#include "thread.h"
11#include "ui/ui.h"
12#include "unwind.h"
13#include <internal/rc_check.h>
14
15/*
16 * Locking/sorting note:
17 *
18 * Sorting is done with the write lock, iteration and binary searching happens
19 * under the read lock requiring being sorted. There is a race between sorting
20 * releasing the write lock and acquiring the read lock for iteration/searching
21 * where another thread could insert and break the sorting of the maps. In
22 * practice inserting maps should be rare meaning that the race shouldn't lead
23 * to live lock. Removal of maps doesn't break being sorted.
24 */
25
26DECLARE_RC_STRUCT(maps) {
27	struct rw_semaphore lock;
28	/**
29	 * @maps_by_address: array of maps sorted by their starting address if
30	 * maps_by_address_sorted is true.
31	 */
32	struct map	 **maps_by_address;
33	/**
34	 * @maps_by_name: optional array of maps sorted by their dso name if
35	 * maps_by_name_sorted is true.
36	 */
37	struct map	 **maps_by_name;
38	struct machine	 *machine;
39#ifdef HAVE_LIBUNWIND_SUPPORT
40	void		*addr_space;
41	const struct unwind_libunwind_ops *unwind_libunwind_ops;
42#endif
43	refcount_t	 refcnt;
44	/**
45	 * @nr_maps: number of maps_by_address, and possibly maps_by_name,
46	 * entries that contain maps.
47	 */
48	unsigned int	 nr_maps;
49	/**
50	 * @nr_maps_allocated: number of entries in maps_by_address and possibly
51	 * maps_by_name.
52	 */
53	unsigned int	 nr_maps_allocated;
54	/**
55	 * @last_search_by_name_idx: cache of last found by name entry's index
56	 * as frequent searches for the same dso name are common.
57	 */
58	unsigned int	 last_search_by_name_idx;
59	/** @maps_by_address_sorted: is maps_by_address sorted. */
60	bool		 maps_by_address_sorted;
61	/** @maps_by_name_sorted: is maps_by_name sorted. */
62	bool		 maps_by_name_sorted;
63	/** @ends_broken: does the map contain a map where end values are unset/unsorted? */
64	bool		 ends_broken;
65};
66
67static void check_invariants(const struct maps *maps __maybe_unused)
68{
69#ifndef NDEBUG
70	assert(RC_CHK_ACCESS(maps)->nr_maps <= RC_CHK_ACCESS(maps)->nr_maps_allocated);
71	for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) {
72		struct map *map = RC_CHK_ACCESS(maps)->maps_by_address[i];
73
74		/* Check map is well-formed. */
75		assert(map__end(map) == 0 || map__start(map) <= map__end(map));
76		/* Expect at least 1 reference count. */
77		assert(refcount_read(map__refcnt(map)) > 0);
78
79		if (map__dso(map) && map__dso(map)->kernel)
80			assert(RC_CHK_EQUAL(map__kmap(map)->kmaps, maps));
81
82		if (i > 0) {
83			struct map *prev = RC_CHK_ACCESS(maps)->maps_by_address[i - 1];
84
85			/* If addresses are sorted... */
86			if (RC_CHK_ACCESS(maps)->maps_by_address_sorted) {
87				/* Maps should be in start address order. */
88				assert(map__start(prev) <= map__start(map));
89				/*
90				 * If the ends of maps aren't broken (during
91				 * construction) then they should be ordered
92				 * too.
93				 */
94				if (!RC_CHK_ACCESS(maps)->ends_broken) {
95					assert(map__end(prev) <= map__end(map));
96					assert(map__end(prev) <= map__start(map) ||
97					       map__start(prev) == map__start(map));
98				}
99			}
100		}
101	}
102	if (RC_CHK_ACCESS(maps)->maps_by_name) {
103		for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) {
104			struct map *map = RC_CHK_ACCESS(maps)->maps_by_name[i];
105
106			/*
107			 * Maps by name maps should be in maps_by_address, so
108			 * the reference count should be higher.
109			 */
110			assert(refcount_read(map__refcnt(map)) > 1);
111		}
112	}
113#endif
114}
115
116static struct map **maps__maps_by_address(const struct maps *maps)
117{
118	return RC_CHK_ACCESS(maps)->maps_by_address;
119}
120
121static void maps__set_maps_by_address(struct maps *maps, struct map **new)
122{
123	RC_CHK_ACCESS(maps)->maps_by_address = new;
124
125}
126
127static struct map ***maps__maps_by_name_addr(struct maps *maps)
128{
129	return &RC_CHK_ACCESS(maps)->maps_by_name;
130}
131
132static void maps__set_nr_maps_allocated(struct maps *maps, unsigned int nr_maps_allocated)
133{
134	RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_maps_allocated;
135}
136
137static void maps__set_nr_maps(struct maps *maps, unsigned int nr_maps)
138{
139	RC_CHK_ACCESS(maps)->nr_maps = nr_maps;
140}
141
142/* Not in the header, to aid reference counting. */
143static struct map **maps__maps_by_name(const struct maps *maps)
144{
145	return RC_CHK_ACCESS(maps)->maps_by_name;
146
147}
148
149static void maps__set_maps_by_name(struct maps *maps, struct map **new)
150{
151	RC_CHK_ACCESS(maps)->maps_by_name = new;
152
153}
154
155static bool maps__maps_by_address_sorted(const struct maps *maps)
156{
157	return RC_CHK_ACCESS(maps)->maps_by_address_sorted;
158}
159
160static void maps__set_maps_by_address_sorted(struct maps *maps, bool value)
161{
162	RC_CHK_ACCESS(maps)->maps_by_address_sorted = value;
163}
164
165static bool maps__maps_by_name_sorted(const struct maps *maps)
166{
167	return RC_CHK_ACCESS(maps)->maps_by_name_sorted;
168}
169
170static void maps__set_maps_by_name_sorted(struct maps *maps, bool value)
171{
172	RC_CHK_ACCESS(maps)->maps_by_name_sorted = value;
173}
174
175struct machine *maps__machine(const struct maps *maps)
176{
177	return RC_CHK_ACCESS(maps)->machine;
178}
179
180unsigned int maps__nr_maps(const struct maps *maps)
181{
182	return RC_CHK_ACCESS(maps)->nr_maps;
183}
184
185refcount_t *maps__refcnt(struct maps *maps)
186{
187	return &RC_CHK_ACCESS(maps)->refcnt;
188}
189
190#ifdef HAVE_LIBUNWIND_SUPPORT
191void *maps__addr_space(const struct maps *maps)
192{
193	return RC_CHK_ACCESS(maps)->addr_space;
194}
195
196void maps__set_addr_space(struct maps *maps, void *addr_space)
197{
198	RC_CHK_ACCESS(maps)->addr_space = addr_space;
199}
200
201const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps)
202{
203	return RC_CHK_ACCESS(maps)->unwind_libunwind_ops;
204}
205
206void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind_libunwind_ops *ops)
207{
208	RC_CHK_ACCESS(maps)->unwind_libunwind_ops = ops;
209}
210#endif
211
212static struct rw_semaphore *maps__lock(struct maps *maps)
213{
214	/*
215	 * When the lock is acquired or released the maps invariants should
216	 * hold.
217	 */
218	check_invariants(maps);
219	return &RC_CHK_ACCESS(maps)->lock;
220}
221
222static void maps__init(struct maps *maps, struct machine *machine)
223{
224	init_rwsem(maps__lock(maps));
225	RC_CHK_ACCESS(maps)->maps_by_address = NULL;
226	RC_CHK_ACCESS(maps)->maps_by_name = NULL;
227	RC_CHK_ACCESS(maps)->machine = machine;
228#ifdef HAVE_LIBUNWIND_SUPPORT
229	RC_CHK_ACCESS(maps)->addr_space = NULL;
230	RC_CHK_ACCESS(maps)->unwind_libunwind_ops = NULL;
231#endif
232	refcount_set(maps__refcnt(maps), 1);
233	RC_CHK_ACCESS(maps)->nr_maps = 0;
234	RC_CHK_ACCESS(maps)->nr_maps_allocated = 0;
235	RC_CHK_ACCESS(maps)->last_search_by_name_idx = 0;
236	RC_CHK_ACCESS(maps)->maps_by_address_sorted = true;
237	RC_CHK_ACCESS(maps)->maps_by_name_sorted = false;
238}
239
240static void maps__exit(struct maps *maps)
241{
242	struct map **maps_by_address = maps__maps_by_address(maps);
243	struct map **maps_by_name = maps__maps_by_name(maps);
244
245	for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
246		map__zput(maps_by_address[i]);
247		if (maps_by_name)
248			map__zput(maps_by_name[i]);
249	}
250	zfree(&maps_by_address);
251	zfree(&maps_by_name);
252	unwind__finish_access(maps);
253}
254
255struct maps *maps__new(struct machine *machine)
256{
257	struct maps *result;
258	RC_STRUCT(maps) *maps = zalloc(sizeof(*maps));
259
260	if (ADD_RC_CHK(result, maps))
261		maps__init(result, machine);
262
263	return result;
264}
265
266static void maps__delete(struct maps *maps)
267{
268	maps__exit(maps);
269	RC_CHK_FREE(maps);
270}
271
272struct maps *maps__get(struct maps *maps)
273{
274	struct maps *result;
275
276	if (RC_CHK_GET(result, maps))
277		refcount_inc(maps__refcnt(maps));
278
279	return result;
280}
281
282void maps__put(struct maps *maps)
283{
284	if (maps && refcount_dec_and_test(maps__refcnt(maps)))
285		maps__delete(maps);
286	else
287		RC_CHK_PUT(maps);
288}
289
290static void __maps__free_maps_by_name(struct maps *maps)
291{
292	/*
293	 * Free everything to try to do it from the rbtree in the next search
294	 */
295	for (unsigned int i = 0; i < maps__nr_maps(maps); i++)
296		map__put(maps__maps_by_name(maps)[i]);
297
298	zfree(&RC_CHK_ACCESS(maps)->maps_by_name);
299}
300
301static int map__start_cmp(const void *a, const void *b)
302{
303	const struct map *map_a = *(const struct map * const *)a;
304	const struct map *map_b = *(const struct map * const *)b;
305	u64 map_a_start = map__start(map_a);
306	u64 map_b_start = map__start(map_b);
307
308	if (map_a_start == map_b_start) {
309		u64 map_a_end = map__end(map_a);
310		u64 map_b_end = map__end(map_b);
311
312		if  (map_a_end == map_b_end) {
313			/* Ensure maps with the same addresses have a fixed order. */
314			if (RC_CHK_ACCESS(map_a) == RC_CHK_ACCESS(map_b))
315				return 0;
316			return (intptr_t)RC_CHK_ACCESS(map_a) > (intptr_t)RC_CHK_ACCESS(map_b)
317				? 1 : -1;
318		}
319		return map_a_end > map_b_end ? 1 : -1;
320	}
321	return map_a_start > map_b_start ? 1 : -1;
322}
323
324static void __maps__sort_by_address(struct maps *maps)
325{
326	if (maps__maps_by_address_sorted(maps))
327		return;
328
329	qsort(maps__maps_by_address(maps),
330		maps__nr_maps(maps),
331		sizeof(struct map *),
332		map__start_cmp);
333	maps__set_maps_by_address_sorted(maps, true);
334}
335
336static void maps__sort_by_address(struct maps *maps)
337{
338	down_write(maps__lock(maps));
339	__maps__sort_by_address(maps);
340	up_write(maps__lock(maps));
341}
342
343static int map__strcmp(const void *a, const void *b)
344{
345	const struct map *map_a = *(const struct map * const *)a;
346	const struct map *map_b = *(const struct map * const *)b;
347	const struct dso *dso_a = map__dso(map_a);
348	const struct dso *dso_b = map__dso(map_b);
349	int ret = strcmp(dso_a->short_name, dso_b->short_name);
350
351	if (ret == 0 && RC_CHK_ACCESS(map_a) != RC_CHK_ACCESS(map_b)) {
352		/* Ensure distinct but name equal maps have an order. */
353		return map__start_cmp(a, b);
354	}
355	return ret;
356}
357
358static int maps__sort_by_name(struct maps *maps)
359{
360	int err = 0;
361	down_write(maps__lock(maps));
362	if (!maps__maps_by_name_sorted(maps)) {
363		struct map **maps_by_name = maps__maps_by_name(maps);
364
365		if (!maps_by_name) {
366			maps_by_name = malloc(RC_CHK_ACCESS(maps)->nr_maps_allocated *
367					sizeof(*maps_by_name));
368			if (!maps_by_name)
369				err = -ENOMEM;
370			else {
371				struct map **maps_by_address = maps__maps_by_address(maps);
372				unsigned int n = maps__nr_maps(maps);
373
374				maps__set_maps_by_name(maps, maps_by_name);
375				for (unsigned int i = 0; i < n; i++)
376					maps_by_name[i] = map__get(maps_by_address[i]);
377			}
378		}
379		if (!err) {
380			qsort(maps_by_name,
381				maps__nr_maps(maps),
382				sizeof(struct map *),
383				map__strcmp);
384			maps__set_maps_by_name_sorted(maps, true);
385		}
386	}
387	up_write(maps__lock(maps));
388	return err;
389}
390
391static unsigned int maps__by_address_index(const struct maps *maps, const struct map *map)
392{
393	struct map **maps_by_address = maps__maps_by_address(maps);
394
395	if (maps__maps_by_address_sorted(maps)) {
396		struct map **mapp =
397			bsearch(&map, maps__maps_by_address(maps), maps__nr_maps(maps),
398				sizeof(*mapp), map__start_cmp);
399
400		if (mapp)
401			return mapp - maps_by_address;
402	} else {
403		for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
404			if (RC_CHK_ACCESS(maps_by_address[i]) == RC_CHK_ACCESS(map))
405				return i;
406		}
407	}
408	pr_err("Map missing from maps");
409	return -1;
410}
411
412static unsigned int maps__by_name_index(const struct maps *maps, const struct map *map)
413{
414	struct map **maps_by_name = maps__maps_by_name(maps);
415
416	if (maps__maps_by_name_sorted(maps)) {
417		struct map **mapp =
418			bsearch(&map, maps_by_name, maps__nr_maps(maps),
419				sizeof(*mapp), map__strcmp);
420
421		if (mapp)
422			return mapp - maps_by_name;
423	} else {
424		for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
425			if (RC_CHK_ACCESS(maps_by_name[i]) == RC_CHK_ACCESS(map))
426				return i;
427		}
428	}
429	pr_err("Map missing from maps");
430	return -1;
431}
432
433static int __maps__insert(struct maps *maps, struct map *new)
434{
435	struct map **maps_by_address = maps__maps_by_address(maps);
436	struct map **maps_by_name = maps__maps_by_name(maps);
437	const struct dso *dso = map__dso(new);
438	unsigned int nr_maps = maps__nr_maps(maps);
439	unsigned int nr_allocate = RC_CHK_ACCESS(maps)->nr_maps_allocated;
440
441	if (nr_maps + 1 > nr_allocate) {
442		nr_allocate = !nr_allocate ? 32 : nr_allocate * 2;
443
444		maps_by_address = realloc(maps_by_address, nr_allocate * sizeof(new));
445		if (!maps_by_address)
446			return -ENOMEM;
447
448		maps__set_maps_by_address(maps, maps_by_address);
449		if (maps_by_name) {
450			maps_by_name = realloc(maps_by_name, nr_allocate * sizeof(new));
451			if (!maps_by_name) {
452				/*
453				 * If by name fails, just disable by name and it will
454				 * recompute next time it is required.
455				 */
456				__maps__free_maps_by_name(maps);
457			}
458			maps__set_maps_by_name(maps, maps_by_name);
459		}
460		RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
461	}
462	/* Insert the value at the end. */
463	maps_by_address[nr_maps] = map__get(new);
464	if (maps_by_name)
465		maps_by_name[nr_maps] = map__get(new);
466
467	nr_maps++;
468	RC_CHK_ACCESS(maps)->nr_maps = nr_maps;
469
470	/*
471	 * Recompute if things are sorted. If things are inserted in a sorted
472	 * manner, for example by processing /proc/pid/maps, then no
473	 * sorting/resorting will be necessary.
474	 */
475	if (nr_maps == 1) {
476		/* If there's just 1 entry then maps are sorted. */
477		maps__set_maps_by_address_sorted(maps, true);
478		maps__set_maps_by_name_sorted(maps, maps_by_name != NULL);
479	} else {
480		/* Sorted if maps were already sorted and this map starts after the last one. */
481		maps__set_maps_by_address_sorted(maps,
482			maps__maps_by_address_sorted(maps) &&
483			map__end(maps_by_address[nr_maps - 2]) <= map__start(new));
484		maps__set_maps_by_name_sorted(maps, false);
485	}
486	if (map__end(new) < map__start(new))
487		RC_CHK_ACCESS(maps)->ends_broken = true;
488	if (dso && dso->kernel) {
489		struct kmap *kmap = map__kmap(new);
490
491		if (kmap)
492			kmap->kmaps = maps;
493		else
494			pr_err("Internal error: kernel dso with non kernel map\n");
495	}
496	return 0;
497}
498
499int maps__insert(struct maps *maps, struct map *map)
500{
501	int ret;
502
503	down_write(maps__lock(maps));
504	ret = __maps__insert(maps, map);
505	up_write(maps__lock(maps));
506	return ret;
507}
508
509static void __maps__remove(struct maps *maps, struct map *map)
510{
511	struct map **maps_by_address = maps__maps_by_address(maps);
512	struct map **maps_by_name = maps__maps_by_name(maps);
513	unsigned int nr_maps = maps__nr_maps(maps);
514	unsigned int address_idx;
515
516	/* Slide later mappings over the one to remove */
517	address_idx = maps__by_address_index(maps, map);
518	map__put(maps_by_address[address_idx]);
519	memmove(&maps_by_address[address_idx],
520		&maps_by_address[address_idx + 1],
521		(nr_maps - address_idx - 1) * sizeof(*maps_by_address));
522
523	if (maps_by_name) {
524		unsigned int name_idx = maps__by_name_index(maps, map);
525
526		map__put(maps_by_name[name_idx]);
527		memmove(&maps_by_name[name_idx],
528			&maps_by_name[name_idx + 1],
529			(nr_maps - name_idx - 1) *  sizeof(*maps_by_name));
530	}
531
532	--RC_CHK_ACCESS(maps)->nr_maps;
533}
534
535void maps__remove(struct maps *maps, struct map *map)
536{
537	down_write(maps__lock(maps));
538	__maps__remove(maps, map);
539	up_write(maps__lock(maps));
540}
541
542bool maps__empty(struct maps *maps)
543{
544	bool res;
545
546	down_read(maps__lock(maps));
547	res = maps__nr_maps(maps) == 0;
548	up_read(maps__lock(maps));
549
550	return res;
551}
552
553bool maps__equal(struct maps *a, struct maps *b)
554{
555	return RC_CHK_EQUAL(a, b);
556}
557
558int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data)
559{
560	bool done = false;
561	int ret = 0;
562
563	/* See locking/sorting note. */
564	while (!done) {
565		down_read(maps__lock(maps));
566		if (maps__maps_by_address_sorted(maps)) {
567			/*
568			 * maps__for_each_map callbacks may buggily/unsafely
569			 * insert into maps_by_address. Deliberately reload
570			 * maps__nr_maps and maps_by_address on each iteration
571			 * to avoid using memory freed by maps__insert growing
572			 * the array - this may cause maps to be skipped or
573			 * repeated.
574			 */
575			for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
576				struct map **maps_by_address = maps__maps_by_address(maps);
577				struct map *map = maps_by_address[i];
578
579				ret = cb(map, data);
580				if (ret)
581					break;
582			}
583			done = true;
584		}
585		up_read(maps__lock(maps));
586		if (!done)
587			maps__sort_by_address(maps);
588	}
589	return ret;
590}
591
592void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data)
593{
594	struct map **maps_by_address;
595
596	down_write(maps__lock(maps));
597
598	maps_by_address = maps__maps_by_address(maps);
599	for (unsigned int i = 0; i < maps__nr_maps(maps);) {
600		if (cb(maps_by_address[i], data))
601			__maps__remove(maps, maps_by_address[i]);
602		else
603			i++;
604	}
605	up_write(maps__lock(maps));
606}
607
608struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
609{
610	struct map *map = maps__find(maps, addr);
611	struct symbol *result = NULL;
612
613	/* Ensure map is loaded before using map->map_ip */
614	if (map != NULL && map__load(map) >= 0)
615		result = map__find_symbol(map, map__map_ip(map, addr));
616
617	if (mapp)
618		*mapp = map;
619	else
620		map__put(map);
621
622	return result;
623}
624
625struct maps__find_symbol_by_name_args {
626	struct map **mapp;
627	const char *name;
628	struct symbol *sym;
629};
630
631static int maps__find_symbol_by_name_cb(struct map *map, void *data)
632{
633	struct maps__find_symbol_by_name_args *args = data;
634
635	args->sym = map__find_symbol_by_name(map, args->name);
636	if (!args->sym)
637		return 0;
638
639	if (!map__contains_symbol(map, args->sym)) {
640		args->sym = NULL;
641		return 0;
642	}
643
644	if (args->mapp != NULL)
645		*args->mapp = map__get(map);
646	return 1;
647}
648
649struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
650{
651	struct maps__find_symbol_by_name_args args = {
652		.mapp = mapp,
653		.name = name,
654		.sym = NULL,
655	};
656
657	maps__for_each_map(maps, maps__find_symbol_by_name_cb, &args);
658	return args.sym;
659}
660
661int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
662{
663	if (ams->addr < map__start(ams->ms.map) || ams->addr >= map__end(ams->ms.map)) {
664		if (maps == NULL)
665			return -1;
666		ams->ms.map = maps__find(maps, ams->addr);
667		if (ams->ms.map == NULL)
668			return -1;
669	}
670
671	ams->al_addr = map__map_ip(ams->ms.map, ams->addr);
672	ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
673
674	return ams->ms.sym ? 0 : -1;
675}
676
677struct maps__fprintf_args {
678	FILE *fp;
679	size_t printed;
680};
681
682static int maps__fprintf_cb(struct map *map, void *data)
683{
684	struct maps__fprintf_args *args = data;
685
686	args->printed += fprintf(args->fp, "Map:");
687	args->printed += map__fprintf(map, args->fp);
688	if (verbose > 2) {
689		args->printed += dso__fprintf(map__dso(map), args->fp);
690		args->printed += fprintf(args->fp, "--\n");
691	}
692	return 0;
693}
694
695size_t maps__fprintf(struct maps *maps, FILE *fp)
696{
697	struct maps__fprintf_args args = {
698		.fp = fp,
699		.printed = 0,
700	};
701
702	maps__for_each_map(maps, maps__fprintf_cb, &args);
703
704	return args.printed;
705}
706
707/*
708 * Find first map where end > map->start.
709 * Same as find_vma() in kernel.
710 */
711static unsigned int first_ending_after(struct maps *maps, const struct map *map)
712{
713	struct map **maps_by_address = maps__maps_by_address(maps);
714	int low = 0, high = (int)maps__nr_maps(maps) - 1, first = high + 1;
715
716	assert(maps__maps_by_address_sorted(maps));
717	if (low <= high && map__end(maps_by_address[0]) > map__start(map))
718		return 0;
719
720	while (low <= high) {
721		int mid = (low + high) / 2;
722		struct map *pos = maps_by_address[mid];
723
724		if (map__end(pos) > map__start(map)) {
725			first = mid;
726			if (map__start(pos) <= map__start(map)) {
727				/* Entry overlaps map. */
728				break;
729			}
730			high = mid - 1;
731		} else
732			low = mid + 1;
733	}
734	return first;
735}
736
737/*
738 * Adds new to maps, if new overlaps existing entries then the existing maps are
739 * adjusted or removed so that new fits without overlapping any entries.
740 */
741static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
742{
743	struct map **maps_by_address;
744	int err = 0;
745	FILE *fp = debug_file();
746
747sort_again:
748	if (!maps__maps_by_address_sorted(maps))
749		__maps__sort_by_address(maps);
750
751	maps_by_address = maps__maps_by_address(maps);
752	/*
753	 * Iterate through entries where the end of the existing entry is
754	 * greater-than the new map's start.
755	 */
756	for (unsigned int i = first_ending_after(maps, new); i < maps__nr_maps(maps); ) {
757		struct map *pos = maps_by_address[i];
758		struct map *before = NULL, *after = NULL;
759
760		/*
761		 * Stop if current map starts after map->end.
762		 * Maps are ordered by start: next will not overlap for sure.
763		 */
764		if (map__start(pos) >= map__end(new))
765			break;
766
767		if (use_browser) {
768			pr_debug("overlapping maps in %s (disable tui for more info)\n",
769				map__dso(new)->name);
770		} else if (verbose >= 2) {
771			pr_debug("overlapping maps:\n");
772			map__fprintf(new, fp);
773			map__fprintf(pos, fp);
774		}
775
776		/*
777		 * Now check if we need to create new maps for areas not
778		 * overlapped by the new map:
779		 */
780		if (map__start(new) > map__start(pos)) {
781			/* Map starts within existing map. Need to shorten the existing map. */
782			before = map__clone(pos);
783
784			if (before == NULL) {
785				err = -ENOMEM;
786				goto out_err;
787			}
788			map__set_end(before, map__start(new));
789
790			if (verbose >= 2 && !use_browser)
791				map__fprintf(before, fp);
792		}
793		if (map__end(new) < map__end(pos)) {
794			/* The new map isn't as long as the existing map. */
795			after = map__clone(pos);
796
797			if (after == NULL) {
798				map__zput(before);
799				err = -ENOMEM;
800				goto out_err;
801			}
802
803			map__set_start(after, map__end(new));
804			map__add_pgoff(after, map__end(new) - map__start(pos));
805			assert(map__map_ip(pos, map__end(new)) ==
806			       map__map_ip(after, map__end(new)));
807
808			if (verbose >= 2 && !use_browser)
809				map__fprintf(after, fp);
810		}
811		/*
812		 * If adding one entry, for `before` or `after`, we can replace
813		 * the existing entry. If both `before` and `after` are
814		 * necessary than an insert is needed. If the existing entry
815		 * entirely overlaps the existing entry it can just be removed.
816		 */
817		if (before) {
818			map__put(maps_by_address[i]);
819			maps_by_address[i] = before;
820			/* Maps are still ordered, go to next one. */
821			i++;
822			if (after) {
823				__maps__insert(maps, after);
824				map__put(after);
825				if (!maps__maps_by_address_sorted(maps)) {
826					/*
827					 * Sorting broken so invariants don't
828					 * hold, sort and go again.
829					 */
830					goto sort_again;
831				}
832				/*
833				 * Maps are still ordered, skip after and go to
834				 * next one (terminate loop).
835				 */
836				i++;
837			}
838		} else if (after) {
839			map__put(maps_by_address[i]);
840			maps_by_address[i] = after;
841			/* Maps are ordered, go to next one. */
842			i++;
843		} else {
844			__maps__remove(maps, pos);
845			/*
846			 * Maps are ordered but no need to increase `i` as the
847			 * later maps were moved down.
848			 */
849		}
850		check_invariants(maps);
851	}
852	/* Add the map. */
853	__maps__insert(maps, new);
854out_err:
855	return err;
856}
857
858int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
859{
860	int err;
861
862	down_write(maps__lock(maps));
863	err =  __maps__fixup_overlap_and_insert(maps, new);
864	up_write(maps__lock(maps));
865	return err;
866}
867
868int maps__copy_from(struct maps *dest, struct maps *parent)
869{
870	/* Note, if struct map were immutable then cloning could use ref counts. */
871	struct map **parent_maps_by_address;
872	int err = 0;
873	unsigned int n;
874
875	down_write(maps__lock(dest));
876	down_read(maps__lock(parent));
877
878	parent_maps_by_address = maps__maps_by_address(parent);
879	n = maps__nr_maps(parent);
880	if (maps__nr_maps(dest) == 0) {
881		/* No existing mappings so just copy from parent to avoid reallocs in insert. */
882		unsigned int nr_maps_allocated = RC_CHK_ACCESS(parent)->nr_maps_allocated;
883		struct map **dest_maps_by_address =
884			malloc(nr_maps_allocated * sizeof(struct map *));
885		struct map **dest_maps_by_name = NULL;
886
887		if (!dest_maps_by_address)
888			err = -ENOMEM;
889		else {
890			if (maps__maps_by_name(parent)) {
891				dest_maps_by_name =
892					malloc(nr_maps_allocated * sizeof(struct map *));
893			}
894
895			RC_CHK_ACCESS(dest)->maps_by_address = dest_maps_by_address;
896			RC_CHK_ACCESS(dest)->maps_by_name = dest_maps_by_name;
897			RC_CHK_ACCESS(dest)->nr_maps_allocated = nr_maps_allocated;
898		}
899
900		for (unsigned int i = 0; !err && i < n; i++) {
901			struct map *pos = parent_maps_by_address[i];
902			struct map *new = map__clone(pos);
903
904			if (!new)
905				err = -ENOMEM;
906			else {
907				err = unwind__prepare_access(dest, new, NULL);
908				if (!err) {
909					dest_maps_by_address[i] = new;
910					if (dest_maps_by_name)
911						dest_maps_by_name[i] = map__get(new);
912					RC_CHK_ACCESS(dest)->nr_maps = i + 1;
913				}
914			}
915			if (err)
916				map__put(new);
917		}
918		maps__set_maps_by_address_sorted(dest, maps__maps_by_address_sorted(parent));
919		if (!err) {
920			RC_CHK_ACCESS(dest)->last_search_by_name_idx =
921				RC_CHK_ACCESS(parent)->last_search_by_name_idx;
922			maps__set_maps_by_name_sorted(dest,
923						dest_maps_by_name &&
924						maps__maps_by_name_sorted(parent));
925		} else {
926			RC_CHK_ACCESS(dest)->last_search_by_name_idx = 0;
927			maps__set_maps_by_name_sorted(dest, false);
928		}
929	} else {
930		/* Unexpected copying to a maps containing entries. */
931		for (unsigned int i = 0; !err && i < n; i++) {
932			struct map *pos = parent_maps_by_address[i];
933			struct map *new = map__clone(pos);
934
935			if (!new)
936				err = -ENOMEM;
937			else {
938				err = unwind__prepare_access(dest, new, NULL);
939				if (!err)
940					err = __maps__insert(dest, new);
941			}
942			map__put(new);
943		}
944	}
945	up_read(maps__lock(parent));
946	up_write(maps__lock(dest));
947	return err;
948}
949
950static int map__addr_cmp(const void *key, const void *entry)
951{
952	const u64 ip = *(const u64 *)key;
953	const struct map *map = *(const struct map * const *)entry;
954
955	if (ip < map__start(map))
956		return -1;
957	if (ip >= map__end(map))
958		return 1;
959	return 0;
960}
961
962struct map *maps__find(struct maps *maps, u64 ip)
963{
964	struct map *result = NULL;
965	bool done = false;
966
967	/* See locking/sorting note. */
968	while (!done) {
969		down_read(maps__lock(maps));
970		if (maps__maps_by_address_sorted(maps)) {
971			struct map **mapp =
972				bsearch(&ip, maps__maps_by_address(maps), maps__nr_maps(maps),
973					sizeof(*mapp), map__addr_cmp);
974
975			if (mapp)
976				result = map__get(*mapp);
977			done = true;
978		}
979		up_read(maps__lock(maps));
980		if (!done)
981			maps__sort_by_address(maps);
982	}
983	return result;
984}
985
986static int map__strcmp_name(const void *name, const void *b)
987{
988	const struct dso *dso = map__dso(*(const struct map **)b);
989
990	return strcmp(name, dso->short_name);
991}
992
993struct map *maps__find_by_name(struct maps *maps, const char *name)
994{
995	struct map *result = NULL;
996	bool done = false;
997
998	/* See locking/sorting note. */
999	while (!done) {
1000		unsigned int i;
1001
1002		down_read(maps__lock(maps));
1003
1004		/* First check last found entry. */
1005		i = RC_CHK_ACCESS(maps)->last_search_by_name_idx;
1006		if (i < maps__nr_maps(maps) && maps__maps_by_name(maps)) {
1007			struct dso *dso = map__dso(maps__maps_by_name(maps)[i]);
1008
1009			if (dso && strcmp(dso->short_name, name) == 0) {
1010				result = map__get(maps__maps_by_name(maps)[i]);
1011				done = true;
1012			}
1013		}
1014
1015		/* Second search sorted array. */
1016		if (!done && maps__maps_by_name_sorted(maps)) {
1017			struct map **mapp =
1018				bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
1019					sizeof(*mapp), map__strcmp_name);
1020
1021			if (mapp) {
1022				result = map__get(*mapp);
1023				i = mapp - maps__maps_by_name(maps);
1024				RC_CHK_ACCESS(maps)->last_search_by_name_idx = i;
1025			}
1026			done = true;
1027		}
1028		up_read(maps__lock(maps));
1029		if (!done) {
1030			/* Sort and retry binary search. */
1031			if (maps__sort_by_name(maps)) {
1032				/*
1033				 * Memory allocation failed do linear search
1034				 * through address sorted maps.
1035				 */
1036				struct map **maps_by_address;
1037				unsigned int n;
1038
1039				down_read(maps__lock(maps));
1040				maps_by_address =  maps__maps_by_address(maps);
1041				n = maps__nr_maps(maps);
1042				for (i = 0; i < n; i++) {
1043					struct map *pos = maps_by_address[i];
1044					struct dso *dso = map__dso(pos);
1045
1046					if (dso && strcmp(dso->short_name, name) == 0) {
1047						result = map__get(pos);
1048						break;
1049					}
1050				}
1051				up_read(maps__lock(maps));
1052				done = true;
1053			}
1054		}
1055	}
1056	return result;
1057}
1058
1059struct map *maps__find_next_entry(struct maps *maps, struct map *map)
1060{
1061	unsigned int i;
1062	struct map *result = NULL;
1063
1064	down_read(maps__lock(maps));
1065	i = maps__by_address_index(maps, map);
1066	if (i < maps__nr_maps(maps))
1067		result = map__get(maps__maps_by_address(maps)[i]);
1068
1069	up_read(maps__lock(maps));
1070	return result;
1071}
1072
1073void maps__fixup_end(struct maps *maps)
1074{
1075	struct map **maps_by_address;
1076	unsigned int n;
1077
1078	down_write(maps__lock(maps));
1079	if (!maps__maps_by_address_sorted(maps))
1080		__maps__sort_by_address(maps);
1081
1082	maps_by_address = maps__maps_by_address(maps);
1083	n = maps__nr_maps(maps);
1084	for (unsigned int i = 1; i < n; i++) {
1085		struct map *prev = maps_by_address[i - 1];
1086		struct map *curr = maps_by_address[i];
1087
1088		if (!map__end(prev) || map__end(prev) > map__start(curr))
1089			map__set_end(prev, map__start(curr));
1090	}
1091
1092	/*
1093	 * We still haven't the actual symbols, so guess the
1094	 * last map final address.
1095	 */
1096	if (n > 0 && !map__end(maps_by_address[n - 1]))
1097		map__set_end(maps_by_address[n - 1], ~0ULL);
1098
1099	RC_CHK_ACCESS(maps)->ends_broken = false;
1100
1101	up_write(maps__lock(maps));
1102}
1103
1104/*
1105 * Merges map into maps by splitting the new map within the existing map
1106 * regions.
1107 */
1108int maps__merge_in(struct maps *kmaps, struct map *new_map)
1109{
1110	unsigned int first_after_, kmaps__nr_maps;
1111	struct map **kmaps_maps_by_address;
1112	struct map **merged_maps_by_address;
1113	unsigned int merged_nr_maps_allocated;
1114
1115	/* First try under a read lock. */
1116	while (true) {
1117		down_read(maps__lock(kmaps));
1118		if (maps__maps_by_address_sorted(kmaps))
1119			break;
1120
1121		up_read(maps__lock(kmaps));
1122
1123		/* First after binary search requires sorted maps. Sort and try again. */
1124		maps__sort_by_address(kmaps);
1125	}
1126	first_after_ = first_ending_after(kmaps, new_map);
1127	kmaps_maps_by_address = maps__maps_by_address(kmaps);
1128
1129	if (first_after_ >= maps__nr_maps(kmaps) ||
1130	    map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) {
1131		/* No overlap so regular insert suffices. */
1132		up_read(maps__lock(kmaps));
1133		return maps__insert(kmaps, new_map);
1134	}
1135	up_read(maps__lock(kmaps));
1136
1137	/* Plain insert with a read-lock failed, try again now with the write lock. */
1138	down_write(maps__lock(kmaps));
1139	if (!maps__maps_by_address_sorted(kmaps))
1140		__maps__sort_by_address(kmaps);
1141
1142	first_after_ = first_ending_after(kmaps, new_map);
1143	kmaps_maps_by_address = maps__maps_by_address(kmaps);
1144	kmaps__nr_maps = maps__nr_maps(kmaps);
1145
1146	if (first_after_ >= kmaps__nr_maps ||
1147	    map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) {
1148		/* No overlap so regular insert suffices. */
1149		int ret = __maps__insert(kmaps, new_map);
1150		up_write(maps__lock(kmaps));
1151		return ret;
1152	}
1153	/* Array to merge into, possibly 1 more for the sake of new_map. */
1154	merged_nr_maps_allocated = RC_CHK_ACCESS(kmaps)->nr_maps_allocated;
1155	if (kmaps__nr_maps + 1 == merged_nr_maps_allocated)
1156		merged_nr_maps_allocated++;
1157
1158	merged_maps_by_address = malloc(merged_nr_maps_allocated * sizeof(*merged_maps_by_address));
1159	if (!merged_maps_by_address) {
1160		up_write(maps__lock(kmaps));
1161		return -ENOMEM;
1162	}
1163	maps__set_maps_by_address(kmaps, merged_maps_by_address);
1164	maps__set_maps_by_address_sorted(kmaps, true);
1165	zfree(maps__maps_by_name_addr(kmaps));
1166	maps__set_maps_by_name_sorted(kmaps, true);
1167	maps__set_nr_maps_allocated(kmaps, merged_nr_maps_allocated);
1168
1169	/* Copy entries before the new_map that can't overlap. */
1170	for (unsigned int i = 0; i < first_after_; i++)
1171		merged_maps_by_address[i] = map__get(kmaps_maps_by_address[i]);
1172
1173	maps__set_nr_maps(kmaps, first_after_);
1174
1175	/* Add the new map, it will be split when the later overlapping mappings are added. */
1176	__maps__insert(kmaps, new_map);
1177
1178	/* Insert mappings after new_map, splitting new_map in the process. */
1179	for (unsigned int i = first_after_; i < kmaps__nr_maps; i++)
1180		__maps__fixup_overlap_and_insert(kmaps, kmaps_maps_by_address[i]);
1181
1182	/* Copy the maps from merged into kmaps. */
1183	for (unsigned int i = 0; i < kmaps__nr_maps; i++)
1184		map__zput(kmaps_maps_by_address[i]);
1185
1186	free(kmaps_maps_by_address);
1187	up_write(maps__lock(kmaps));
1188	return 0;
1189}
1190
1191void maps__load_first(struct maps *maps)
1192{
1193	down_read(maps__lock(maps));
1194
1195	if (maps__nr_maps(maps) > 0)
1196		map__load(maps__maps_by_address(maps)[0]);
1197
1198	up_read(maps__lock(maps));
1199}
1200