1// SPDX-License-Identifier: GPL-2.0
2#include <linux/types.h>
3#include <linux/string.h>
4#include <linux/zalloc.h>
5#include <stdlib.h>
6
7#include "../../../util/event.h"
8#include "../../../util/synthetic-events.h"
9#include "../../../util/machine.h"
10#include "../../../util/tool.h"
11#include "../../../util/map.h"
12#include "../../../util/debug.h"
13#include "util/sample.h"
14
15#if defined(__x86_64__)
16
17struct perf_event__synthesize_extra_kmaps_cb_args {
18	struct perf_tool *tool;
19	perf_event__handler_t process;
20	struct machine *machine;
21	union perf_event *event;
22};
23
24static int perf_event__synthesize_extra_kmaps_cb(struct map *map, void *data)
25{
26	struct perf_event__synthesize_extra_kmaps_cb_args *args = data;
27	union perf_event *event = args->event;
28	struct kmap *kmap;
29	size_t size;
30
31	if (!__map__is_extra_kernel_map(map))
32		return 0;
33
34	kmap = map__kmap(map);
35
36	size = sizeof(event->mmap) - sizeof(event->mmap.filename) +
37		      PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) +
38		      args->machine->id_hdr_size;
39
40	memset(event, 0, size);
41
42	event->mmap.header.type = PERF_RECORD_MMAP;
43
44	/*
45	 * kernel uses 0 for user space maps, see kernel/perf_event.c
46	 * __perf_event_mmap
47	 */
48	if (machine__is_host(args->machine))
49		event->header.misc = PERF_RECORD_MISC_KERNEL;
50	else
51		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
52
53	event->mmap.header.size = size;
54
55	event->mmap.start = map__start(map);
56	event->mmap.len   = map__size(map);
57	event->mmap.pgoff = map__pgoff(map);
58	event->mmap.pid   = args->machine->pid;
59
60	strlcpy(event->mmap.filename, kmap->name, PATH_MAX);
61
62	if (perf_tool__process_synth_event(args->tool, event, args->machine, args->process) != 0)
63		return -1;
64
65	return 0;
66}
67
68int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
69				       perf_event__handler_t process,
70				       struct machine *machine)
71{
72	int rc;
73	struct maps *kmaps = machine__kernel_maps(machine);
74	struct perf_event__synthesize_extra_kmaps_cb_args args = {
75		.tool = tool,
76		.process = process,
77		.machine = machine,
78		.event = zalloc(sizeof(args.event->mmap) + machine->id_hdr_size),
79	};
80
81	if (!args.event) {
82		pr_debug("Not enough memory synthesizing mmap event "
83			 "for extra kernel maps\n");
84		return -1;
85	}
86
87	rc = maps__for_each_map(kmaps, perf_event__synthesize_extra_kmaps_cb, &args);
88
89	free(args.event);
90	return rc;
91}
92
93#endif
94
95void arch_perf_parse_sample_weight(struct perf_sample *data,
96				   const __u64 *array, u64 type)
97{
98	union perf_sample_weight weight;
99
100	weight.full = *array;
101	if (type & PERF_SAMPLE_WEIGHT)
102		data->weight = weight.full;
103	else {
104		data->weight = weight.var1_dw;
105		data->ins_lat = weight.var2_w;
106		data->retire_lat = weight.var3_w;
107	}
108}
109
110void arch_perf_synthesize_sample_weight(const struct perf_sample *data,
111					__u64 *array, u64 type)
112{
113	*array = data->weight;
114
115	if (type & PERF_SAMPLE_WEIGHT_STRUCT) {
116		*array &= 0xffffffff;
117		*array |= ((u64)data->ins_lat << 32);
118		*array |= ((u64)data->retire_lat << 48);
119	}
120}
121
122const char *arch_perf_header_entry(const char *se_header)
123{
124	if (!strcmp(se_header, "Local Pipeline Stage Cycle"))
125		return "Local Retire Latency";
126	else if (!strcmp(se_header, "Pipeline Stage Cycle"))
127		return "Retire Latency";
128
129	return se_header;
130}
131
132int arch_support_sort_key(const char *sort_key)
133{
134	if (!strcmp(sort_key, "p_stage_cyc"))
135		return 1;
136	if (!strcmp(sort_key, "local_p_stage_cyc"))
137		return 1;
138	return 0;
139}
140