1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015 Davidlohr Bueso.
4 */
5
6/* For the CLR_() macros */
7#include <string.h>
8#include <pthread.h>
9
10#include <signal.h>
11#include "../util/mutex.h"
12#include "../util/stat.h"
13#include <subcmd/parse-options.h>
14#include <linux/compiler.h>
15#include <linux/kernel.h>
16#include <linux/zalloc.h>
17#include <errno.h>
18#include <perf/cpumap.h>
19#include "bench.h"
20#include "futex.h"
21
22#include <err.h>
23#include <stdlib.h>
24#include <sys/time.h>
25#include <sys/mman.h>
26
27struct worker {
28	int tid;
29	u_int32_t *futex;
30	pthread_t thread;
31	unsigned long ops;
32};
33
34static u_int32_t global_futex = 0;
35static struct worker *worker;
36static bool done = false;
37static int futex_flag = 0;
38static struct mutex thread_lock;
39static unsigned int threads_starting;
40static struct stats throughput_stats;
41static struct cond thread_parent, thread_worker;
42
43static struct bench_futex_parameters params = {
44	.runtime  = 10,
45};
46
47static const struct option options[] = {
48	OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
49	OPT_UINTEGER('r', "runtime", &params.runtime, "Specify runtime (in seconds)"),
50	OPT_BOOLEAN( 'M', "multi",   &params.multi, "Use multiple futexes"),
51	OPT_BOOLEAN( 's', "silent",  &params.silent, "Silent mode: do not display data/details"),
52	OPT_BOOLEAN( 'S', "shared",  &params.fshared, "Use shared futexes instead of private ones"),
53	OPT_BOOLEAN( 'm', "mlockall", &params.mlockall, "Lock all current and future memory"),
54	OPT_END()
55};
56
57static const char * const bench_futex_lock_pi_usage[] = {
58	"perf bench futex lock-pi <options>",
59	NULL
60};
61
62static void print_summary(void)
63{
64	unsigned long avg = avg_stats(&throughput_stats);
65	double stddev = stddev_stats(&throughput_stats);
66
67	printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
68	       !params.silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
69	       (int)bench__runtime.tv_sec);
70}
71
72static void toggle_done(int sig __maybe_unused,
73			siginfo_t *info __maybe_unused,
74			void *uc __maybe_unused)
75{
76	/* inform all threads that we're done for the day */
77	done = true;
78	gettimeofday(&bench__end, NULL);
79	timersub(&bench__end, &bench__start, &bench__runtime);
80}
81
82static void *workerfn(void *arg)
83{
84	struct worker *w = (struct worker *) arg;
85	unsigned long ops = w->ops;
86
87	mutex_lock(&thread_lock);
88	threads_starting--;
89	if (!threads_starting)
90		cond_signal(&thread_parent);
91	cond_wait(&thread_worker, &thread_lock);
92	mutex_unlock(&thread_lock);
93
94	do {
95		int ret;
96	again:
97		ret = futex_lock_pi(w->futex, NULL, futex_flag);
98
99		if (ret) { /* handle lock acquisition */
100			if (!params.silent)
101				warn("thread %d: Could not lock pi-lock for %p (%d)",
102				     w->tid, w->futex, ret);
103			if (done)
104				break;
105
106			goto again;
107		}
108
109		usleep(1);
110		ret = futex_unlock_pi(w->futex, futex_flag);
111		if (ret && !params.silent)
112			warn("thread %d: Could not unlock pi-lock for %p (%d)",
113			     w->tid, w->futex, ret);
114		ops++; /* account for thread's share of work */
115	}  while (!done);
116
117	w->ops = ops;
118	return NULL;
119}
120
121static void create_threads(struct worker *w, struct perf_cpu_map *cpu)
122{
123	cpu_set_t *cpuset;
124	unsigned int i;
125	int nrcpus =  perf_cpu_map__nr(cpu);
126	size_t size;
127
128	threads_starting = params.nthreads;
129
130	cpuset = CPU_ALLOC(nrcpus);
131	BUG_ON(!cpuset);
132	size = CPU_ALLOC_SIZE(nrcpus);
133
134	for (i = 0; i < params.nthreads; i++) {
135		pthread_attr_t thread_attr;
136
137		pthread_attr_init(&thread_attr);
138		worker[i].tid = i;
139
140		if (params.multi) {
141			worker[i].futex = calloc(1, sizeof(u_int32_t));
142			if (!worker[i].futex)
143				err(EXIT_FAILURE, "calloc");
144		} else
145			worker[i].futex = &global_futex;
146
147		CPU_ZERO_S(size, cpuset);
148		CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
149
150		if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
151			CPU_FREE(cpuset);
152			err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
153		}
154
155		if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i])) {
156			CPU_FREE(cpuset);
157			err(EXIT_FAILURE, "pthread_create");
158		}
159		pthread_attr_destroy(&thread_attr);
160	}
161	CPU_FREE(cpuset);
162}
163
164int bench_futex_lock_pi(int argc, const char **argv)
165{
166	int ret = 0;
167	unsigned int i;
168	struct sigaction act;
169	struct perf_cpu_map *cpu;
170
171	argc = parse_options(argc, argv, options, bench_futex_lock_pi_usage, 0);
172	if (argc)
173		goto err;
174
175	cpu = perf_cpu_map__new_online_cpus();
176	if (!cpu)
177		err(EXIT_FAILURE, "calloc");
178
179	memset(&act, 0, sizeof(act));
180	sigfillset(&act.sa_mask);
181	act.sa_sigaction = toggle_done;
182	sigaction(SIGINT, &act, NULL);
183
184	if (params.mlockall) {
185		if (mlockall(MCL_CURRENT | MCL_FUTURE))
186			err(EXIT_FAILURE, "mlockall");
187	}
188
189	if (!params.nthreads)
190		params.nthreads = perf_cpu_map__nr(cpu);
191
192	worker = calloc(params.nthreads, sizeof(*worker));
193	if (!worker)
194		err(EXIT_FAILURE, "calloc");
195
196	if (!params.fshared)
197		futex_flag = FUTEX_PRIVATE_FLAG;
198
199	printf("Run summary [PID %d]: %d threads doing pi lock/unlock pairing for %d secs.\n\n",
200	       getpid(), params.nthreads, params.runtime);
201
202	init_stats(&throughput_stats);
203	mutex_init(&thread_lock);
204	cond_init(&thread_parent);
205	cond_init(&thread_worker);
206
207	threads_starting = params.nthreads;
208	gettimeofday(&bench__start, NULL);
209
210	create_threads(worker, cpu);
211
212	mutex_lock(&thread_lock);
213	while (threads_starting)
214		cond_wait(&thread_parent, &thread_lock);
215	cond_broadcast(&thread_worker);
216	mutex_unlock(&thread_lock);
217
218	sleep(params.runtime);
219	toggle_done(0, NULL, NULL);
220
221	for (i = 0; i < params.nthreads; i++) {
222		ret = pthread_join(worker[i].thread, NULL);
223		if (ret)
224			err(EXIT_FAILURE, "pthread_join");
225	}
226
227	/* cleanup & report results */
228	cond_destroy(&thread_parent);
229	cond_destroy(&thread_worker);
230	mutex_destroy(&thread_lock);
231
232	for (i = 0; i < params.nthreads; i++) {
233		unsigned long t = bench__runtime.tv_sec > 0 ?
234			worker[i].ops / bench__runtime.tv_sec : 0;
235
236		update_stats(&throughput_stats, t);
237		if (!params.silent)
238			printf("[thread %3d] futex: %p [ %ld ops/sec ]\n",
239			       worker[i].tid, worker[i].futex, t);
240
241		if (params.multi)
242			zfree(&worker[i].futex);
243	}
244
245	print_summary();
246
247	free(worker);
248	perf_cpu_map__put(cpu);
249	return ret;
250err:
251	usage_with_options(bench_futex_lock_pi_usage, options);
252	exit(EXIT_FAILURE);
253}
254