1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
6 * copyright notes.
7 */
8
9#include <sys/mman.h>
10#include <inttypes.h>
11#include <asm/bug.h>
12#include <linux/zalloc.h>
13#include <stdlib.h>
14#include <string.h>
15#include <unistd.h> // sysconf()
16#include <perf/mmap.h>
17#ifdef HAVE_LIBNUMA_SUPPORT
18#include <numaif.h>
19#endif
20#include "cpumap.h"
21#include "debug.h"
22#include "event.h"
23#include "mmap.h"
24#include "../perf.h"
25#include <internal/lib.h> /* page_size */
26#include <linux/bitmap.h>
27
28#define MASK_SIZE 1023
29void mmap_cpu_mask__scnprintf(struct mmap_cpu_mask *mask, const char *tag)
30{
31	char buf[MASK_SIZE + 1];
32	size_t len;
33
34	len = bitmap_scnprintf(mask->bits, mask->nbits, buf, MASK_SIZE);
35	buf[len] = '\0';
36	pr_debug("%p: %s mask[%zd]: %s\n", mask, tag, mask->nbits, buf);
37}
38
39size_t mmap__mmap_len(struct mmap *map)
40{
41	return perf_mmap__mmap_len(&map->core);
42}
43
44int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
45			       struct auxtrace_mmap_params *mp __maybe_unused,
46			       void *userpg __maybe_unused,
47			       int fd __maybe_unused)
48{
49	return 0;
50}
51
52void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
53{
54}
55
56void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
57				       off_t auxtrace_offset __maybe_unused,
58				       unsigned int auxtrace_pages __maybe_unused,
59				       bool auxtrace_overwrite __maybe_unused)
60{
61}
62
63void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
64					  struct evlist *evlist __maybe_unused,
65					  struct evsel *evsel __maybe_unused,
66					  int idx __maybe_unused)
67{
68}
69
70#ifdef HAVE_AIO_SUPPORT
71static int perf_mmap__aio_enabled(struct mmap *map)
72{
73	return map->aio.nr_cblocks > 0;
74}
75
76#ifdef HAVE_LIBNUMA_SUPPORT
77static int perf_mmap__aio_alloc(struct mmap *map, int idx)
78{
79	map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
80				  MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
81	if (map->aio.data[idx] == MAP_FAILED) {
82		map->aio.data[idx] = NULL;
83		return -1;
84	}
85
86	return 0;
87}
88
89static void perf_mmap__aio_free(struct mmap *map, int idx)
90{
91	if (map->aio.data[idx]) {
92		munmap(map->aio.data[idx], mmap__mmap_len(map));
93		map->aio.data[idx] = NULL;
94	}
95}
96
97static int perf_mmap__aio_bind(struct mmap *map, int idx, struct perf_cpu cpu, int affinity)
98{
99	void *data;
100	size_t mmap_len;
101	unsigned long *node_mask;
102	unsigned long node_index;
103	int err = 0;
104
105	if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
106		data = map->aio.data[idx];
107		mmap_len = mmap__mmap_len(map);
108		node_index = cpu__get_node(cpu);
109		node_mask = bitmap_zalloc(node_index + 1);
110		if (!node_mask) {
111			pr_err("Failed to allocate node mask for mbind: error %m\n");
112			return -1;
113		}
114		__set_bit(node_index, node_mask);
115		if (mbind(data, mmap_len, MPOL_BIND, node_mask, node_index + 1 + 1, 0)) {
116			pr_err("Failed to bind [%p-%p] AIO buffer to node %lu: error %m\n",
117				data, data + mmap_len, node_index);
118			err = -1;
119		}
120		bitmap_free(node_mask);
121	}
122
123	return err;
124}
125#else /* !HAVE_LIBNUMA_SUPPORT */
126static int perf_mmap__aio_alloc(struct mmap *map, int idx)
127{
128	map->aio.data[idx] = malloc(mmap__mmap_len(map));
129	if (map->aio.data[idx] == NULL)
130		return -1;
131
132	return 0;
133}
134
135static void perf_mmap__aio_free(struct mmap *map, int idx)
136{
137	zfree(&(map->aio.data[idx]));
138}
139
140static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused,
141		struct perf_cpu cpu __maybe_unused, int affinity __maybe_unused)
142{
143	return 0;
144}
145#endif
146
147static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp)
148{
149	int delta_max, i, prio, ret;
150
151	map->aio.nr_cblocks = mp->nr_cblocks;
152	if (map->aio.nr_cblocks) {
153		map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *));
154		if (!map->aio.aiocb) {
155			pr_debug2("failed to allocate aiocb for data buffer, error %m\n");
156			return -1;
157		}
158		map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb));
159		if (!map->aio.cblocks) {
160			pr_debug2("failed to allocate cblocks for data buffer, error %m\n");
161			return -1;
162		}
163		map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *));
164		if (!map->aio.data) {
165			pr_debug2("failed to allocate data buffer, error %m\n");
166			return -1;
167		}
168		delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX);
169		for (i = 0; i < map->aio.nr_cblocks; ++i) {
170			ret = perf_mmap__aio_alloc(map, i);
171			if (ret == -1) {
172				pr_debug2("failed to allocate data buffer area, error %m");
173				return -1;
174			}
175			ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity);
176			if (ret == -1)
177				return -1;
178			/*
179			 * Use cblock.aio_fildes value different from -1
180			 * to denote started aio write operation on the
181			 * cblock so it requires explicit record__aio_sync()
182			 * call prior the cblock may be reused again.
183			 */
184			map->aio.cblocks[i].aio_fildes = -1;
185			/*
186			 * Allocate cblocks with priority delta to have
187			 * faster aio write system calls because queued requests
188			 * are kept in separate per-prio queues and adding
189			 * a new request will iterate thru shorter per-prio
190			 * list. Blocks with numbers higher than
191			 *  _SC_AIO_PRIO_DELTA_MAX go with priority 0.
192			 */
193			prio = delta_max - i;
194			map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0;
195		}
196	}
197
198	return 0;
199}
200
201static void perf_mmap__aio_munmap(struct mmap *map)
202{
203	int i;
204
205	for (i = 0; i < map->aio.nr_cblocks; ++i)
206		perf_mmap__aio_free(map, i);
207	if (map->aio.data)
208		zfree(&map->aio.data);
209	zfree(&map->aio.cblocks);
210	zfree(&map->aio.aiocb);
211}
212#else /* !HAVE_AIO_SUPPORT */
213static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused)
214{
215	return 0;
216}
217
218static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused,
219			       struct mmap_params *mp __maybe_unused)
220{
221	return 0;
222}
223
224static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
225{
226}
227#endif
228
229void mmap__munmap(struct mmap *map)
230{
231	bitmap_free(map->affinity_mask.bits);
232
233#ifndef PYTHON_PERF
234	zstd_fini(&map->zstd_data);
235#endif
236
237	perf_mmap__aio_munmap(map);
238	if (map->data != NULL) {
239		munmap(map->data, mmap__mmap_len(map));
240		map->data = NULL;
241	}
242	auxtrace_mmap__munmap(&map->auxtrace_mmap);
243}
244
245static void build_node_mask(int node, struct mmap_cpu_mask *mask)
246{
247	int idx, nr_cpus;
248	struct perf_cpu cpu;
249	const struct perf_cpu_map *cpu_map = NULL;
250
251	cpu_map = cpu_map__online();
252	if (!cpu_map)
253		return;
254
255	nr_cpus = perf_cpu_map__nr(cpu_map);
256	for (idx = 0; idx < nr_cpus; idx++) {
257		cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */
258		if (cpu__get_node(cpu) == node)
259			__set_bit(cpu.cpu, mask->bits);
260	}
261}
262
263static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
264{
265	map->affinity_mask.nbits = cpu__max_cpu().cpu;
266	map->affinity_mask.bits = bitmap_zalloc(map->affinity_mask.nbits);
267	if (!map->affinity_mask.bits)
268		return -1;
269
270	if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
271		build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
272	else if (mp->affinity == PERF_AFFINITY_CPU)
273		__set_bit(map->core.cpu.cpu, map->affinity_mask.bits);
274
275	return 0;
276}
277
278int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, struct perf_cpu cpu)
279{
280	if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) {
281		pr_debug2("failed to mmap perf event ring buffer, error %d\n",
282			  errno);
283		return -1;
284	}
285
286	if (mp->affinity != PERF_AFFINITY_SYS &&
287		perf_mmap__setup_affinity_mask(map, mp)) {
288		pr_debug2("failed to alloc mmap affinity mask, error %d\n",
289			  errno);
290		return -1;
291	}
292
293	if (verbose == 2)
294		mmap_cpu_mask__scnprintf(&map->affinity_mask, "mmap");
295
296	map->core.flush = mp->flush;
297
298#ifndef PYTHON_PERF
299	if (zstd_init(&map->zstd_data, mp->comp_level)) {
300		pr_debug2("failed to init mmap compressor, error %d\n", errno);
301		return -1;
302	}
303#endif
304
305	if (mp->comp_level && !perf_mmap__aio_enabled(map)) {
306		map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
307				 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
308		if (map->data == MAP_FAILED) {
309			pr_debug2("failed to mmap data buffer, error %d\n",
310					errno);
311			map->data = NULL;
312			return -1;
313		}
314	}
315
316	if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
317				&mp->auxtrace_mp, map->core.base, fd))
318		return -1;
319
320	return perf_mmap__aio_mmap(map, mp);
321}
322
323int perf_mmap__push(struct mmap *md, void *to,
324		    int push(struct mmap *map, void *to, void *buf, size_t size))
325{
326	u64 head = perf_mmap__read_head(&md->core);
327	unsigned char *data = md->core.base + page_size;
328	unsigned long size;
329	void *buf;
330	int rc = 0;
331
332	rc = perf_mmap__read_init(&md->core);
333	if (rc < 0)
334		return (rc == -EAGAIN) ? 1 : -1;
335
336	size = md->core.end - md->core.start;
337
338	if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) {
339		buf = &data[md->core.start & md->core.mask];
340		size = md->core.mask + 1 - (md->core.start & md->core.mask);
341		md->core.start += size;
342
343		if (push(md, to, buf, size) < 0) {
344			rc = -1;
345			goto out;
346		}
347	}
348
349	buf = &data[md->core.start & md->core.mask];
350	size = md->core.end - md->core.start;
351	md->core.start += size;
352
353	if (push(md, to, buf, size) < 0) {
354		rc = -1;
355		goto out;
356	}
357
358	md->core.prev = head;
359	perf_mmap__consume(&md->core);
360out:
361	return rc;
362}
363
364int mmap_cpu_mask__duplicate(struct mmap_cpu_mask *original, struct mmap_cpu_mask *clone)
365{
366	clone->nbits = original->nbits;
367	clone->bits  = bitmap_zalloc(original->nbits);
368	if (!clone->bits)
369		return -ENOMEM;
370
371	memcpy(clone->bits, original->bits, MMAP_CPU_MASK_BYTES(original));
372	return 0;
373}
374