1// SPDX-License-Identifier: GPL-2.0
2#include <perf/evlist.h>
3#include <perf/evsel.h>
4#include <linux/bitops.h>
5#include <linux/list.h>
6#include <linux/hash.h>
7#include <sys/ioctl.h>
8#include <internal/evlist.h>
9#include <internal/evsel.h>
10#include <internal/xyarray.h>
11#include <internal/mmap.h>
12#include <internal/cpumap.h>
13#include <internal/threadmap.h>
14#include <internal/lib.h>
15#include <linux/zalloc.h>
16#include <stdlib.h>
17#include <errno.h>
18#include <unistd.h>
19#include <fcntl.h>
20#include <signal.h>
21#include <poll.h>
22#include <sys/mman.h>
23#include <perf/cpumap.h>
24#include <perf/threadmap.h>
25#include <api/fd/array.h>
26#include "internal.h"
27
28void perf_evlist__init(struct perf_evlist *evlist)
29{
30	INIT_LIST_HEAD(&evlist->entries);
31	evlist->nr_entries = 0;
32	fdarray__init(&evlist->pollfd, 64);
33	perf_evlist__reset_id_hash(evlist);
34}
35
36static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
37					  struct perf_evsel *evsel)
38{
39	if (evsel->system_wide) {
40		/* System wide: set the cpu map of the evsel to all online CPUs. */
41		perf_cpu_map__put(evsel->cpus);
42		evsel->cpus = perf_cpu_map__new_online_cpus();
43	} else if (evlist->has_user_cpus && evsel->is_pmu_core) {
44		/*
45		 * User requested CPUs on a core PMU, ensure the requested CPUs
46		 * are valid by intersecting with those of the PMU.
47		 */
48		perf_cpu_map__put(evsel->cpus);
49		evsel->cpus = perf_cpu_map__intersect(evlist->user_requested_cpus, evsel->own_cpus);
50	} else if (!evsel->own_cpus || evlist->has_user_cpus ||
51		(!evsel->requires_cpu && perf_cpu_map__has_any_cpu(evlist->user_requested_cpus))) {
52		/*
53		 * The PMU didn't specify a default cpu map, this isn't a core
54		 * event and the user requested CPUs or the evlist user
55		 * requested CPUs have the "any CPU" (aka dummy) CPU value. In
56		 * which case use the user requested CPUs rather than the PMU
57		 * ones.
58		 */
59		perf_cpu_map__put(evsel->cpus);
60		evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
61	} else if (evsel->cpus != evsel->own_cpus) {
62		/*
63		 * No user requested cpu map but the PMU cpu map doesn't match
64		 * the evsel's. Reset it back to the PMU cpu map.
65		 */
66		perf_cpu_map__put(evsel->cpus);
67		evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
68	}
69
70	if (evsel->system_wide) {
71		perf_thread_map__put(evsel->threads);
72		evsel->threads = perf_thread_map__new_dummy();
73	} else {
74		perf_thread_map__put(evsel->threads);
75		evsel->threads = perf_thread_map__get(evlist->threads);
76	}
77
78	evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus);
79}
80
81static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
82{
83	struct perf_evsel *evsel;
84
85	evlist->needs_map_propagation = true;
86
87	perf_evlist__for_each_evsel(evlist, evsel)
88		__perf_evlist__propagate_maps(evlist, evsel);
89}
90
91void perf_evlist__add(struct perf_evlist *evlist,
92		      struct perf_evsel *evsel)
93{
94	evsel->idx = evlist->nr_entries;
95	list_add_tail(&evsel->node, &evlist->entries);
96	evlist->nr_entries += 1;
97
98	if (evlist->needs_map_propagation)
99		__perf_evlist__propagate_maps(evlist, evsel);
100}
101
102void perf_evlist__remove(struct perf_evlist *evlist,
103			 struct perf_evsel *evsel)
104{
105	list_del_init(&evsel->node);
106	evlist->nr_entries -= 1;
107}
108
109struct perf_evlist *perf_evlist__new(void)
110{
111	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
112
113	if (evlist != NULL)
114		perf_evlist__init(evlist);
115
116	return evlist;
117}
118
119struct perf_evsel *
120perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev)
121{
122	struct perf_evsel *next;
123
124	if (!prev) {
125		next = list_first_entry(&evlist->entries,
126					struct perf_evsel,
127					node);
128	} else {
129		next = list_next_entry(prev, node);
130	}
131
132	/* Empty list is noticed here so don't need checking on entry. */
133	if (&next->node == &evlist->entries)
134		return NULL;
135
136	return next;
137}
138
139static void perf_evlist__purge(struct perf_evlist *evlist)
140{
141	struct perf_evsel *pos, *n;
142
143	perf_evlist__for_each_entry_safe(evlist, n, pos) {
144		list_del_init(&pos->node);
145		perf_evsel__delete(pos);
146	}
147
148	evlist->nr_entries = 0;
149}
150
151void perf_evlist__exit(struct perf_evlist *evlist)
152{
153	perf_cpu_map__put(evlist->user_requested_cpus);
154	perf_cpu_map__put(evlist->all_cpus);
155	perf_thread_map__put(evlist->threads);
156	evlist->user_requested_cpus = NULL;
157	evlist->all_cpus = NULL;
158	evlist->threads = NULL;
159	fdarray__exit(&evlist->pollfd);
160}
161
162void perf_evlist__delete(struct perf_evlist *evlist)
163{
164	if (evlist == NULL)
165		return;
166
167	perf_evlist__munmap(evlist);
168	perf_evlist__close(evlist);
169	perf_evlist__purge(evlist);
170	perf_evlist__exit(evlist);
171	free(evlist);
172}
173
174void perf_evlist__set_maps(struct perf_evlist *evlist,
175			   struct perf_cpu_map *cpus,
176			   struct perf_thread_map *threads)
177{
178	/*
179	 * Allow for the possibility that one or another of the maps isn't being
180	 * changed i.e. don't put it.  Note we are assuming the maps that are
181	 * being applied are brand new and evlist is taking ownership of the
182	 * original reference count of 1.  If that is not the case it is up to
183	 * the caller to increase the reference count.
184	 */
185	if (cpus != evlist->user_requested_cpus) {
186		perf_cpu_map__put(evlist->user_requested_cpus);
187		evlist->user_requested_cpus = perf_cpu_map__get(cpus);
188	}
189
190	if (threads != evlist->threads) {
191		perf_thread_map__put(evlist->threads);
192		evlist->threads = perf_thread_map__get(threads);
193	}
194
195	perf_evlist__propagate_maps(evlist);
196}
197
198int perf_evlist__open(struct perf_evlist *evlist)
199{
200	struct perf_evsel *evsel;
201	int err;
202
203	perf_evlist__for_each_entry(evlist, evsel) {
204		err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
205		if (err < 0)
206			goto out_err;
207	}
208
209	return 0;
210
211out_err:
212	perf_evlist__close(evlist);
213	return err;
214}
215
216void perf_evlist__close(struct perf_evlist *evlist)
217{
218	struct perf_evsel *evsel;
219
220	perf_evlist__for_each_entry_reverse(evlist, evsel)
221		perf_evsel__close(evsel);
222}
223
224void perf_evlist__enable(struct perf_evlist *evlist)
225{
226	struct perf_evsel *evsel;
227
228	perf_evlist__for_each_entry(evlist, evsel)
229		perf_evsel__enable(evsel);
230}
231
232void perf_evlist__disable(struct perf_evlist *evlist)
233{
234	struct perf_evsel *evsel;
235
236	perf_evlist__for_each_entry(evlist, evsel)
237		perf_evsel__disable(evsel);
238}
239
240u64 perf_evlist__read_format(struct perf_evlist *evlist)
241{
242	struct perf_evsel *first = perf_evlist__first(evlist);
243
244	return first->attr.read_format;
245}
246
247#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
248
249static void perf_evlist__id_hash(struct perf_evlist *evlist,
250				 struct perf_evsel *evsel,
251				 int cpu_map_idx, int thread, u64 id)
252{
253	int hash;
254	struct perf_sample_id *sid = SID(evsel, cpu_map_idx, thread);
255
256	sid->id = id;
257	sid->evsel = evsel;
258	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
259	hlist_add_head(&sid->node, &evlist->heads[hash]);
260}
261
262void perf_evlist__reset_id_hash(struct perf_evlist *evlist)
263{
264	int i;
265
266	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
267		INIT_HLIST_HEAD(&evlist->heads[i]);
268}
269
270void perf_evlist__id_add(struct perf_evlist *evlist,
271			 struct perf_evsel *evsel,
272			 int cpu_map_idx, int thread, u64 id)
273{
274	if (!SID(evsel, cpu_map_idx, thread))
275		return;
276
277	perf_evlist__id_hash(evlist, evsel, cpu_map_idx, thread, id);
278	evsel->id[evsel->ids++] = id;
279}
280
281int perf_evlist__id_add_fd(struct perf_evlist *evlist,
282			   struct perf_evsel *evsel,
283			   int cpu_map_idx, int thread, int fd)
284{
285	u64 read_data[4] = { 0, };
286	int id_idx = 1; /* The first entry is the counter value */
287	u64 id;
288	int ret;
289
290	if (!SID(evsel, cpu_map_idx, thread))
291		return -1;
292
293	ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
294	if (!ret)
295		goto add;
296
297	if (errno != ENOTTY)
298		return -1;
299
300	/* Legacy way to get event id.. All hail to old kernels! */
301
302	/*
303	 * This way does not work with group format read, so bail
304	 * out in that case.
305	 */
306	if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
307		return -1;
308
309	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
310	    read(fd, &read_data, sizeof(read_data)) == -1)
311		return -1;
312
313	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
314		++id_idx;
315	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
316		++id_idx;
317
318	id = read_data[id_idx];
319
320add:
321	perf_evlist__id_add(evlist, evsel, cpu_map_idx, thread, id);
322	return 0;
323}
324
325int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
326{
327	int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
328	int nr_threads = perf_thread_map__nr(evlist->threads);
329	int nfds = 0;
330	struct perf_evsel *evsel;
331
332	perf_evlist__for_each_entry(evlist, evsel) {
333		if (evsel->system_wide)
334			nfds += nr_cpus;
335		else
336			nfds += nr_cpus * nr_threads;
337	}
338
339	if (fdarray__available_entries(&evlist->pollfd) < nfds &&
340	    fdarray__grow(&evlist->pollfd, nfds) < 0)
341		return -ENOMEM;
342
343	return 0;
344}
345
346int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
347			    void *ptr, short revent, enum fdarray_flags flags)
348{
349	int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP, flags);
350
351	if (pos >= 0) {
352		evlist->pollfd.priv[pos].ptr = ptr;
353		fcntl(fd, F_SETFL, O_NONBLOCK);
354	}
355
356	return pos;
357}
358
359static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
360					 void *arg __maybe_unused)
361{
362	struct perf_mmap *map = fda->priv[fd].ptr;
363
364	if (map)
365		perf_mmap__put(map);
366}
367
368int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
369{
370	return fdarray__filter(&evlist->pollfd, revents_and_mask,
371			       perf_evlist__munmap_filtered, NULL);
372}
373
374int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
375{
376	return fdarray__poll(&evlist->pollfd, timeout);
377}
378
379static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, bool overwrite)
380{
381	int i;
382	struct perf_mmap *map;
383
384	map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
385	if (!map)
386		return NULL;
387
388	for (i = 0; i < evlist->nr_mmaps; i++) {
389		struct perf_mmap *prev = i ? &map[i - 1] : NULL;
390
391		/*
392		 * When the perf_mmap() call is made we grab one refcount, plus
393		 * one extra to let perf_mmap__consume() get the last
394		 * events after all real references (perf_mmap__get()) are
395		 * dropped.
396		 *
397		 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
398		 * thus does perf_mmap__get() on it.
399		 */
400		perf_mmap__init(&map[i], prev, overwrite, NULL);
401	}
402
403	return map;
404}
405
406static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
407{
408	struct perf_sample_id *sid = SID(evsel, cpu, thread);
409
410	sid->idx = idx;
411	sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu);
412	sid->tid = perf_thread_map__pid(evsel->threads, thread);
413}
414
415static struct perf_mmap*
416perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
417{
418	struct perf_mmap *maps;
419
420	maps = overwrite ? evlist->mmap_ovw : evlist->mmap;
421
422	if (!maps) {
423		maps = perf_evlist__alloc_mmap(evlist, overwrite);
424		if (!maps)
425			return NULL;
426
427		if (overwrite)
428			evlist->mmap_ovw = maps;
429		else
430			evlist->mmap = maps;
431	}
432
433	return &maps[idx];
434}
435
436#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
437
438static int
439perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
440			  int output, struct perf_cpu cpu)
441{
442	return perf_mmap__mmap(map, mp, output, cpu);
443}
444
445static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_mmap *map,
446					bool overwrite)
447{
448	if (overwrite)
449		evlist->mmap_ovw_first = map;
450	else
451		evlist->mmap_first = map;
452}
453
454static int
455mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
456	       int idx, struct perf_mmap_param *mp, int cpu_idx,
457	       int thread, int *_output, int *_output_overwrite, int *nr_mmaps)
458{
459	struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->all_cpus, cpu_idx);
460	struct perf_evsel *evsel;
461	int revent;
462
463	perf_evlist__for_each_entry(evlist, evsel) {
464		bool overwrite = evsel->attr.write_backward;
465		enum fdarray_flags flgs;
466		struct perf_mmap *map;
467		int *output, fd, cpu;
468
469		if (evsel->system_wide && thread)
470			continue;
471
472		cpu = perf_cpu_map__idx(evsel->cpus, evlist_cpu);
473		if (cpu == -1)
474			continue;
475
476		map = ops->get(evlist, overwrite, idx);
477		if (map == NULL)
478			return -ENOMEM;
479
480		if (overwrite) {
481			mp->prot = PROT_READ;
482			output   = _output_overwrite;
483		} else {
484			mp->prot = PROT_READ | PROT_WRITE;
485			output   = _output;
486		}
487
488		fd = FD(evsel, cpu, thread);
489
490		if (*output == -1) {
491			*output = fd;
492
493			/*
494			 * The last one will be done at perf_mmap__consume(), so that we
495			 * make sure we don't prevent tools from consuming every last event in
496			 * the ring buffer.
497			 *
498			 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
499			 * anymore, but the last events for it are still in the ring buffer,
500			 * waiting to be consumed.
501			 *
502			 * Tools can chose to ignore this at their own discretion, but the
503			 * evlist layer can't just drop it when filtering events in
504			 * perf_evlist__filter_pollfd().
505			 */
506			refcount_set(&map->refcnt, 2);
507
508			if (ops->idx)
509				ops->idx(evlist, evsel, mp, idx);
510
511			/* Debug message used by test scripts */
512			pr_debug("idx %d: mmapping fd %d\n", idx, *output);
513			if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
514				return -1;
515
516			*nr_mmaps += 1;
517
518			if (!idx)
519				perf_evlist__set_mmap_first(evlist, map, overwrite);
520		} else {
521			/* Debug message used by test scripts */
522			pr_debug("idx %d: set output fd %d -> %d\n", idx, fd, *output);
523			if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
524				return -1;
525
526			perf_mmap__get(map);
527		}
528
529		revent = !overwrite ? POLLIN : 0;
530
531		flgs = evsel->system_wide ? fdarray_flag__nonfilterable : fdarray_flag__default;
532		if (perf_evlist__add_pollfd(evlist, fd, map, revent, flgs) < 0) {
533			perf_mmap__put(map);
534			return -1;
535		}
536
537		if (evsel->attr.read_format & PERF_FORMAT_ID) {
538			if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
539						   fd) < 0)
540				return -1;
541			perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
542		}
543	}
544
545	return 0;
546}
547
548static int
549mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
550		struct perf_mmap_param *mp)
551{
552	int nr_threads = perf_thread_map__nr(evlist->threads);
553	int nr_cpus    = perf_cpu_map__nr(evlist->all_cpus);
554	int cpu, thread, idx = 0;
555	int nr_mmaps = 0;
556
557	pr_debug("%s: nr cpu values (may include -1) %d nr threads %d\n",
558		 __func__, nr_cpus, nr_threads);
559
560	/* per-thread mmaps */
561	for (thread = 0; thread < nr_threads; thread++, idx++) {
562		int output = -1;
563		int output_overwrite = -1;
564
565		if (mmap_per_evsel(evlist, ops, idx, mp, 0, thread, &output,
566				   &output_overwrite, &nr_mmaps))
567			goto out_unmap;
568	}
569
570	/* system-wide mmaps i.e. per-cpu */
571	for (cpu = 1; cpu < nr_cpus; cpu++, idx++) {
572		int output = -1;
573		int output_overwrite = -1;
574
575		if (mmap_per_evsel(evlist, ops, idx, mp, cpu, 0, &output,
576				   &output_overwrite, &nr_mmaps))
577			goto out_unmap;
578	}
579
580	if (nr_mmaps != evlist->nr_mmaps)
581		pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
582
583	return 0;
584
585out_unmap:
586	perf_evlist__munmap(evlist);
587	return -1;
588}
589
590static int
591mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
592	     struct perf_mmap_param *mp)
593{
594	int nr_threads = perf_thread_map__nr(evlist->threads);
595	int nr_cpus    = perf_cpu_map__nr(evlist->all_cpus);
596	int nr_mmaps = 0;
597	int cpu, thread;
598
599	pr_debug("%s: nr cpu values %d nr threads %d\n", __func__, nr_cpus, nr_threads);
600
601	for (cpu = 0; cpu < nr_cpus; cpu++) {
602		int output = -1;
603		int output_overwrite = -1;
604
605		for (thread = 0; thread < nr_threads; thread++) {
606			if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
607					   thread, &output, &output_overwrite, &nr_mmaps))
608				goto out_unmap;
609		}
610	}
611
612	if (nr_mmaps != evlist->nr_mmaps)
613		pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
614
615	return 0;
616
617out_unmap:
618	perf_evlist__munmap(evlist);
619	return -1;
620}
621
622static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
623{
624	int nr_mmaps;
625
626	/* One for each CPU */
627	nr_mmaps = perf_cpu_map__nr(evlist->all_cpus);
628	if (perf_cpu_map__has_any_cpu_or_is_empty(evlist->all_cpus)) {
629		/* Plus one for each thread */
630		nr_mmaps += perf_thread_map__nr(evlist->threads);
631		/* Minus the per-thread CPU (-1) */
632		nr_mmaps -= 1;
633	}
634
635	return nr_mmaps;
636}
637
638int perf_evlist__mmap_ops(struct perf_evlist *evlist,
639			  struct perf_evlist_mmap_ops *ops,
640			  struct perf_mmap_param *mp)
641{
642	const struct perf_cpu_map *cpus = evlist->all_cpus;
643	struct perf_evsel *evsel;
644
645	if (!ops || !ops->get || !ops->mmap)
646		return -EINVAL;
647
648	mp->mask = evlist->mmap_len - page_size - 1;
649
650	evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist);
651
652	perf_evlist__for_each_entry(evlist, evsel) {
653		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
654		    evsel->sample_id == NULL &&
655		    perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
656			return -ENOMEM;
657	}
658
659	if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
660		return -ENOMEM;
661
662	if (perf_cpu_map__has_any_cpu_or_is_empty(cpus))
663		return mmap_per_thread(evlist, ops, mp);
664
665	return mmap_per_cpu(evlist, ops, mp);
666}
667
668int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
669{
670	struct perf_mmap_param mp;
671	struct perf_evlist_mmap_ops ops = {
672		.get  = perf_evlist__mmap_cb_get,
673		.mmap = perf_evlist__mmap_cb_mmap,
674	};
675
676	evlist->mmap_len = (pages + 1) * page_size;
677
678	return perf_evlist__mmap_ops(evlist, &ops, &mp);
679}
680
681void perf_evlist__munmap(struct perf_evlist *evlist)
682{
683	int i;
684
685	if (evlist->mmap) {
686		for (i = 0; i < evlist->nr_mmaps; i++)
687			perf_mmap__munmap(&evlist->mmap[i]);
688	}
689
690	if (evlist->mmap_ovw) {
691		for (i = 0; i < evlist->nr_mmaps; i++)
692			perf_mmap__munmap(&evlist->mmap_ovw[i]);
693	}
694
695	zfree(&evlist->mmap);
696	zfree(&evlist->mmap_ovw);
697}
698
699struct perf_mmap*
700perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map,
701		       bool overwrite)
702{
703	if (map)
704		return map->next;
705
706	return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first;
707}
708
709void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader)
710{
711	struct perf_evsel *evsel;
712	int n = 0;
713
714	__perf_evlist__for_each_entry(list, evsel) {
715		evsel->leader = leader;
716		n++;
717	}
718	leader->nr_members = n;
719}
720
721void perf_evlist__set_leader(struct perf_evlist *evlist)
722{
723	if (evlist->nr_entries) {
724		struct perf_evsel *first = list_entry(evlist->entries.next,
725						struct perf_evsel, node);
726
727		__perf_evlist__set_leader(&evlist->entries, first);
728	}
729}
730
731int perf_evlist__nr_groups(struct perf_evlist *evlist)
732{
733	struct perf_evsel *evsel;
734	int nr_groups = 0;
735
736	perf_evlist__for_each_evsel(evlist, evsel) {
737		/*
738		 * evsels by default have a nr_members of 1, and they are their
739		 * own leader. If the nr_members is >1 then this is an
740		 * indication of a group.
741		 */
742		if (evsel->leader == evsel && evsel->nr_members > 1)
743			nr_groups++;
744	}
745	return nr_groups;
746}
747
748void perf_evlist__go_system_wide(struct perf_evlist *evlist, struct perf_evsel *evsel)
749{
750	if (!evsel->system_wide) {
751		evsel->system_wide = true;
752		if (evlist->needs_map_propagation)
753			__perf_evlist__propagate_maps(evlist, evsel);
754	}
755}
756