1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2020 Intel Corporation. All rights rsvd. */
3
4#include <linux/sched/task.h>
5#include <linux/io-64-nonatomic-lo-hi.h>
6#include "idxd.h"
7#include "perfmon.h"
8
9static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
10			    char *buf);
11
12static cpumask_t		perfmon_dsa_cpu_mask;
13static bool			cpuhp_set_up;
14static enum cpuhp_state		cpuhp_slot;
15
16/*
17 * perf userspace reads this attribute to determine which cpus to open
18 * counters on.  It's connected to perfmon_dsa_cpu_mask, which is
19 * maintained by the cpu hotplug handlers.
20 */
21static DEVICE_ATTR_RO(cpumask);
22
23static struct attribute *perfmon_cpumask_attrs[] = {
24	&dev_attr_cpumask.attr,
25	NULL,
26};
27
28static struct attribute_group cpumask_attr_group = {
29	.attrs = perfmon_cpumask_attrs,
30};
31
32/*
33 * These attributes specify the bits in the config word that the perf
34 * syscall uses to pass the event ids and categories to perfmon.
35 */
36DEFINE_PERFMON_FORMAT_ATTR(event_category, "config:0-3");
37DEFINE_PERFMON_FORMAT_ATTR(event, "config:4-31");
38
39/*
40 * These attributes specify the bits in the config1 word that the perf
41 * syscall uses to pass filter data to perfmon.
42 */
43DEFINE_PERFMON_FORMAT_ATTR(filter_wq, "config1:0-31");
44DEFINE_PERFMON_FORMAT_ATTR(filter_tc, "config1:32-39");
45DEFINE_PERFMON_FORMAT_ATTR(filter_pgsz, "config1:40-43");
46DEFINE_PERFMON_FORMAT_ATTR(filter_sz, "config1:44-51");
47DEFINE_PERFMON_FORMAT_ATTR(filter_eng, "config1:52-59");
48
49#define PERFMON_FILTERS_START	2
50#define PERFMON_FILTERS_MAX	5
51
52static struct attribute *perfmon_format_attrs[] = {
53	&format_attr_idxd_event_category.attr,
54	&format_attr_idxd_event.attr,
55	&format_attr_idxd_filter_wq.attr,
56	&format_attr_idxd_filter_tc.attr,
57	&format_attr_idxd_filter_pgsz.attr,
58	&format_attr_idxd_filter_sz.attr,
59	&format_attr_idxd_filter_eng.attr,
60	NULL,
61};
62
63static struct attribute_group perfmon_format_attr_group = {
64	.name = "format",
65	.attrs = perfmon_format_attrs,
66};
67
68static const struct attribute_group *perfmon_attr_groups[] = {
69	&perfmon_format_attr_group,
70	&cpumask_attr_group,
71	NULL,
72};
73
74static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
75			    char *buf)
76{
77	return cpumap_print_to_pagebuf(true, buf, &perfmon_dsa_cpu_mask);
78}
79
80static bool is_idxd_event(struct idxd_pmu *idxd_pmu, struct perf_event *event)
81{
82	return &idxd_pmu->pmu == event->pmu;
83}
84
85static int perfmon_collect_events(struct idxd_pmu *idxd_pmu,
86				  struct perf_event *leader,
87				  bool do_grp)
88{
89	struct perf_event *event;
90	int n, max_count;
91
92	max_count = idxd_pmu->n_counters;
93	n = idxd_pmu->n_events;
94
95	if (n >= max_count)
96		return -EINVAL;
97
98	if (is_idxd_event(idxd_pmu, leader)) {
99		idxd_pmu->event_list[n] = leader;
100		idxd_pmu->event_list[n]->hw.idx = n;
101		n++;
102	}
103
104	if (!do_grp)
105		return n;
106
107	for_each_sibling_event(event, leader) {
108		if (!is_idxd_event(idxd_pmu, event) ||
109		    event->state <= PERF_EVENT_STATE_OFF)
110			continue;
111
112		if (n >= max_count)
113			return -EINVAL;
114
115		idxd_pmu->event_list[n] = event;
116		idxd_pmu->event_list[n]->hw.idx = n;
117		n++;
118	}
119
120	return n;
121}
122
123static void perfmon_assign_hw_event(struct idxd_pmu *idxd_pmu,
124				    struct perf_event *event, int idx)
125{
126	struct idxd_device *idxd = idxd_pmu->idxd;
127	struct hw_perf_event *hwc = &event->hw;
128
129	hwc->idx = idx;
130	hwc->config_base = ioread64(CNTRCFG_REG(idxd, idx));
131	hwc->event_base = ioread64(CNTRCFG_REG(idxd, idx));
132}
133
134static int perfmon_assign_event(struct idxd_pmu *idxd_pmu,
135				struct perf_event *event)
136{
137	int i;
138
139	for (i = 0; i < IDXD_PMU_EVENT_MAX; i++)
140		if (!test_and_set_bit(i, idxd_pmu->used_mask))
141			return i;
142
143	return -EINVAL;
144}
145
146/*
147 * Check whether there are enough counters to satisfy that all the
148 * events in the group can actually be scheduled at the same time.
149 *
150 * To do this, create a fake idxd_pmu object so the event collection
151 * and assignment functions can be used without affecting the internal
152 * state of the real idxd_pmu object.
153 */
154static int perfmon_validate_group(struct idxd_pmu *pmu,
155				  struct perf_event *event)
156{
157	struct perf_event *leader = event->group_leader;
158	struct idxd_pmu *fake_pmu;
159	int i, ret = 0, n, idx;
160
161	fake_pmu = kzalloc(sizeof(*fake_pmu), GFP_KERNEL);
162	if (!fake_pmu)
163		return -ENOMEM;
164
165	fake_pmu->pmu.name = pmu->pmu.name;
166	fake_pmu->n_counters = pmu->n_counters;
167
168	n = perfmon_collect_events(fake_pmu, leader, true);
169	if (n < 0) {
170		ret = n;
171		goto out;
172	}
173
174	fake_pmu->n_events = n;
175	n = perfmon_collect_events(fake_pmu, event, false);
176	if (n < 0) {
177		ret = n;
178		goto out;
179	}
180
181	fake_pmu->n_events = n;
182
183	for (i = 0; i < n; i++) {
184		event = fake_pmu->event_list[i];
185
186		idx = perfmon_assign_event(fake_pmu, event);
187		if (idx < 0) {
188			ret = idx;
189			goto out;
190		}
191	}
192out:
193	kfree(fake_pmu);
194
195	return ret;
196}
197
198static int perfmon_pmu_event_init(struct perf_event *event)
199{
200	struct idxd_device *idxd;
201	int ret = 0;
202
203	idxd = event_to_idxd(event);
204	event->hw.idx = -1;
205
206	if (event->attr.type != event->pmu->type)
207		return -ENOENT;
208
209	/* sampling not supported */
210	if (event->attr.sample_period)
211		return -EINVAL;
212
213	if (event->cpu < 0)
214		return -EINVAL;
215
216	if (event->pmu != &idxd->idxd_pmu->pmu)
217		return -EINVAL;
218
219	event->hw.event_base = ioread64(PERFMON_TABLE_OFFSET(idxd));
220	event->cpu = idxd->idxd_pmu->cpu;
221	event->hw.config = event->attr.config;
222
223	if (event->group_leader != event)
224		 /* non-group events have themselves as leader */
225		ret = perfmon_validate_group(idxd->idxd_pmu, event);
226
227	return ret;
228}
229
230static inline u64 perfmon_pmu_read_counter(struct perf_event *event)
231{
232	struct hw_perf_event *hwc = &event->hw;
233	struct idxd_device *idxd;
234	int cntr = hwc->idx;
235
236	idxd = event_to_idxd(event);
237
238	return ioread64(CNTRDATA_REG(idxd, cntr));
239}
240
241static void perfmon_pmu_event_update(struct perf_event *event)
242{
243	struct idxd_device *idxd = event_to_idxd(event);
244	u64 prev_raw_count, new_raw_count, delta, p, n;
245	int shift = 64 - idxd->idxd_pmu->counter_width;
246	struct hw_perf_event *hwc = &event->hw;
247
248	prev_raw_count = local64_read(&hwc->prev_count);
249	do {
250		new_raw_count = perfmon_pmu_read_counter(event);
251	} while (!local64_try_cmpxchg(&hwc->prev_count,
252				      &prev_raw_count, new_raw_count));
253	n = (new_raw_count << shift);
254	p = (prev_raw_count << shift);
255
256	delta = ((n - p) >> shift);
257
258	local64_add(delta, &event->count);
259}
260
261void perfmon_counter_overflow(struct idxd_device *idxd)
262{
263	int i, n_counters, max_loop = OVERFLOW_SIZE;
264	struct perf_event *event;
265	unsigned long ovfstatus;
266
267	n_counters = min(idxd->idxd_pmu->n_counters, OVERFLOW_SIZE);
268
269	ovfstatus = ioread32(OVFSTATUS_REG(idxd));
270
271	/*
272	 * While updating overflowed counters, other counters behind
273	 * them could overflow and be missed in a given pass.
274	 * Normally this could happen at most n_counters times, but in
275	 * theory a tiny counter width could result in continual
276	 * overflows and endless looping.  max_loop provides a
277	 * failsafe in that highly unlikely case.
278	 */
279	while (ovfstatus && max_loop--) {
280		/* Figure out which counter(s) overflowed */
281		for_each_set_bit(i, &ovfstatus, n_counters) {
282			unsigned long ovfstatus_clear = 0;
283
284			/* Update event->count for overflowed counter */
285			event = idxd->idxd_pmu->event_list[i];
286			perfmon_pmu_event_update(event);
287			/* Writing 1 to OVFSTATUS bit clears it */
288			set_bit(i, &ovfstatus_clear);
289			iowrite32(ovfstatus_clear, OVFSTATUS_REG(idxd));
290		}
291
292		ovfstatus = ioread32(OVFSTATUS_REG(idxd));
293	}
294
295	/*
296	 * Should never happen.  If so, it means a counter(s) looped
297	 * around twice while this handler was running.
298	 */
299	WARN_ON_ONCE(ovfstatus);
300}
301
302static inline void perfmon_reset_config(struct idxd_device *idxd)
303{
304	iowrite32(CONFIG_RESET, PERFRST_REG(idxd));
305	iowrite32(0, OVFSTATUS_REG(idxd));
306	iowrite32(0, PERFFRZ_REG(idxd));
307}
308
309static inline void perfmon_reset_counters(struct idxd_device *idxd)
310{
311	iowrite32(CNTR_RESET, PERFRST_REG(idxd));
312}
313
314static inline void perfmon_reset(struct idxd_device *idxd)
315{
316	perfmon_reset_config(idxd);
317	perfmon_reset_counters(idxd);
318}
319
320static void perfmon_pmu_event_start(struct perf_event *event, int mode)
321{
322	u32 flt_wq, flt_tc, flt_pg_sz, flt_xfer_sz, flt_eng = 0;
323	u64 cntr_cfg, cntrdata, event_enc, event_cat = 0;
324	struct hw_perf_event *hwc = &event->hw;
325	union filter_cfg flt_cfg;
326	union event_cfg event_cfg;
327	struct idxd_device *idxd;
328	int cntr;
329
330	idxd = event_to_idxd(event);
331
332	event->hw.idx = hwc->idx;
333	cntr = hwc->idx;
334
335	/* Obtain event category and event value from user space */
336	event_cfg.val = event->attr.config;
337	flt_cfg.val = event->attr.config1;
338	event_cat = event_cfg.event_cat;
339	event_enc = event_cfg.event_enc;
340
341	/* Obtain filter configuration from user space */
342	flt_wq = flt_cfg.wq;
343	flt_tc = flt_cfg.tc;
344	flt_pg_sz = flt_cfg.pg_sz;
345	flt_xfer_sz = flt_cfg.xfer_sz;
346	flt_eng = flt_cfg.eng;
347
348	if (flt_wq && test_bit(FLT_WQ, &idxd->idxd_pmu->supported_filters))
349		iowrite32(flt_wq, FLTCFG_REG(idxd, cntr, FLT_WQ));
350	if (flt_tc && test_bit(FLT_TC, &idxd->idxd_pmu->supported_filters))
351		iowrite32(flt_tc, FLTCFG_REG(idxd, cntr, FLT_TC));
352	if (flt_pg_sz && test_bit(FLT_PG_SZ, &idxd->idxd_pmu->supported_filters))
353		iowrite32(flt_pg_sz, FLTCFG_REG(idxd, cntr, FLT_PG_SZ));
354	if (flt_xfer_sz && test_bit(FLT_XFER_SZ, &idxd->idxd_pmu->supported_filters))
355		iowrite32(flt_xfer_sz, FLTCFG_REG(idxd, cntr, FLT_XFER_SZ));
356	if (flt_eng && test_bit(FLT_ENG, &idxd->idxd_pmu->supported_filters))
357		iowrite32(flt_eng, FLTCFG_REG(idxd, cntr, FLT_ENG));
358
359	/* Read the start value */
360	cntrdata = ioread64(CNTRDATA_REG(idxd, cntr));
361	local64_set(&event->hw.prev_count, cntrdata);
362
363	/* Set counter to event/category */
364	cntr_cfg = event_cat << CNTRCFG_CATEGORY_SHIFT;
365	cntr_cfg |= event_enc << CNTRCFG_EVENT_SHIFT;
366	/* Set interrupt on overflow and counter enable bits */
367	cntr_cfg |= (CNTRCFG_IRQ_OVERFLOW | CNTRCFG_ENABLE);
368
369	iowrite64(cntr_cfg, CNTRCFG_REG(idxd, cntr));
370}
371
372static void perfmon_pmu_event_stop(struct perf_event *event, int mode)
373{
374	struct hw_perf_event *hwc = &event->hw;
375	struct idxd_device *idxd;
376	int i, cntr = hwc->idx;
377	u64 cntr_cfg;
378
379	idxd = event_to_idxd(event);
380
381	/* remove this event from event list */
382	for (i = 0; i < idxd->idxd_pmu->n_events; i++) {
383		if (event != idxd->idxd_pmu->event_list[i])
384			continue;
385
386		for (++i; i < idxd->idxd_pmu->n_events; i++)
387			idxd->idxd_pmu->event_list[i - 1] = idxd->idxd_pmu->event_list[i];
388		--idxd->idxd_pmu->n_events;
389		break;
390	}
391
392	cntr_cfg = ioread64(CNTRCFG_REG(idxd, cntr));
393	cntr_cfg &= ~CNTRCFG_ENABLE;
394	iowrite64(cntr_cfg, CNTRCFG_REG(idxd, cntr));
395
396	if (mode == PERF_EF_UPDATE)
397		perfmon_pmu_event_update(event);
398
399	event->hw.idx = -1;
400	clear_bit(cntr, idxd->idxd_pmu->used_mask);
401}
402
403static void perfmon_pmu_event_del(struct perf_event *event, int mode)
404{
405	perfmon_pmu_event_stop(event, PERF_EF_UPDATE);
406}
407
408static int perfmon_pmu_event_add(struct perf_event *event, int flags)
409{
410	struct idxd_device *idxd = event_to_idxd(event);
411	struct idxd_pmu *idxd_pmu = idxd->idxd_pmu;
412	struct hw_perf_event *hwc = &event->hw;
413	int idx, n;
414
415	n = perfmon_collect_events(idxd_pmu, event, false);
416	if (n < 0)
417		return n;
418
419	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
420	if (!(flags & PERF_EF_START))
421		hwc->state |= PERF_HES_ARCH;
422
423	idx = perfmon_assign_event(idxd_pmu, event);
424	if (idx < 0)
425		return idx;
426
427	perfmon_assign_hw_event(idxd_pmu, event, idx);
428
429	if (flags & PERF_EF_START)
430		perfmon_pmu_event_start(event, 0);
431
432	idxd_pmu->n_events = n;
433
434	return 0;
435}
436
437static void enable_perfmon_pmu(struct idxd_device *idxd)
438{
439	iowrite32(COUNTER_UNFREEZE, PERFFRZ_REG(idxd));
440}
441
442static void disable_perfmon_pmu(struct idxd_device *idxd)
443{
444	iowrite32(COUNTER_FREEZE, PERFFRZ_REG(idxd));
445}
446
447static void perfmon_pmu_enable(struct pmu *pmu)
448{
449	struct idxd_device *idxd = pmu_to_idxd(pmu);
450
451	enable_perfmon_pmu(idxd);
452}
453
454static void perfmon_pmu_disable(struct pmu *pmu)
455{
456	struct idxd_device *idxd = pmu_to_idxd(pmu);
457
458	disable_perfmon_pmu(idxd);
459}
460
461static void skip_filter(int i)
462{
463	int j;
464
465	for (j = i; j < PERFMON_FILTERS_MAX; j++)
466		perfmon_format_attrs[PERFMON_FILTERS_START + j] =
467			perfmon_format_attrs[PERFMON_FILTERS_START + j + 1];
468}
469
470static void idxd_pmu_init(struct idxd_pmu *idxd_pmu)
471{
472	int i;
473
474	for (i = 0 ; i < PERFMON_FILTERS_MAX; i++) {
475		if (!test_bit(i, &idxd_pmu->supported_filters))
476			skip_filter(i);
477	}
478
479	idxd_pmu->pmu.name		= idxd_pmu->name;
480	idxd_pmu->pmu.attr_groups	= perfmon_attr_groups;
481	idxd_pmu->pmu.task_ctx_nr	= perf_invalid_context;
482	idxd_pmu->pmu.event_init	= perfmon_pmu_event_init;
483	idxd_pmu->pmu.pmu_enable	= perfmon_pmu_enable,
484	idxd_pmu->pmu.pmu_disable	= perfmon_pmu_disable,
485	idxd_pmu->pmu.add		= perfmon_pmu_event_add;
486	idxd_pmu->pmu.del		= perfmon_pmu_event_del;
487	idxd_pmu->pmu.start		= perfmon_pmu_event_start;
488	idxd_pmu->pmu.stop		= perfmon_pmu_event_stop;
489	idxd_pmu->pmu.read		= perfmon_pmu_event_update;
490	idxd_pmu->pmu.capabilities	= PERF_PMU_CAP_NO_EXCLUDE;
491	idxd_pmu->pmu.module		= THIS_MODULE;
492}
493
494void perfmon_pmu_remove(struct idxd_device *idxd)
495{
496	if (!idxd->idxd_pmu)
497		return;
498
499	cpuhp_state_remove_instance(cpuhp_slot, &idxd->idxd_pmu->cpuhp_node);
500	perf_pmu_unregister(&idxd->idxd_pmu->pmu);
501	kfree(idxd->idxd_pmu);
502	idxd->idxd_pmu = NULL;
503}
504
505static int perf_event_cpu_online(unsigned int cpu, struct hlist_node *node)
506{
507	struct idxd_pmu *idxd_pmu;
508
509	idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node);
510
511	/* select the first online CPU as the designated reader */
512	if (cpumask_empty(&perfmon_dsa_cpu_mask)) {
513		cpumask_set_cpu(cpu, &perfmon_dsa_cpu_mask);
514		idxd_pmu->cpu = cpu;
515	}
516
517	return 0;
518}
519
520static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node)
521{
522	struct idxd_pmu *idxd_pmu;
523	unsigned int target;
524
525	idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node);
526
527	if (!cpumask_test_and_clear_cpu(cpu, &perfmon_dsa_cpu_mask))
528		return 0;
529
530	target = cpumask_any_but(cpu_online_mask, cpu);
531
532	/* migrate events if there is a valid target */
533	if (target < nr_cpu_ids)
534		cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
535	else
536		target = -1;
537
538	perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
539
540	return 0;
541}
542
543int perfmon_pmu_init(struct idxd_device *idxd)
544{
545	union idxd_perfcap perfcap;
546	struct idxd_pmu *idxd_pmu;
547	int rc = -ENODEV;
548
549	/*
550	 * perfmon module initialization failed, nothing to do
551	 */
552	if (!cpuhp_set_up)
553		return -ENODEV;
554
555	/*
556	 * If perfmon_offset or num_counters is 0, it means perfmon is
557	 * not supported on this hardware.
558	 */
559	if (idxd->perfmon_offset == 0)
560		return -ENODEV;
561
562	idxd_pmu = kzalloc(sizeof(*idxd_pmu), GFP_KERNEL);
563	if (!idxd_pmu)
564		return -ENOMEM;
565
566	idxd_pmu->idxd = idxd;
567	idxd->idxd_pmu = idxd_pmu;
568
569	if (idxd->data->type == IDXD_TYPE_DSA) {
570		rc = sprintf(idxd_pmu->name, "dsa%d", idxd->id);
571		if (rc < 0)
572			goto free;
573	} else if (idxd->data->type == IDXD_TYPE_IAX) {
574		rc = sprintf(idxd_pmu->name, "iax%d", idxd->id);
575		if (rc < 0)
576			goto free;
577	} else {
578		goto free;
579	}
580
581	perfmon_reset(idxd);
582
583	perfcap.bits = ioread64(PERFCAP_REG(idxd));
584
585	/*
586	 * If total perf counter is 0, stop further registration.
587	 * This is necessary in order to support driver running on
588	 * guest which does not have pmon support.
589	 */
590	if (perfcap.num_perf_counter == 0)
591		goto free;
592
593	/* A counter width of 0 means it can't count */
594	if (perfcap.counter_width == 0)
595		goto free;
596
597	/* Overflow interrupt and counter freeze support must be available */
598	if (!perfcap.overflow_interrupt || !perfcap.counter_freeze)
599		goto free;
600
601	/* Number of event categories cannot be 0 */
602	if (perfcap.num_event_category == 0)
603		goto free;
604
605	/*
606	 * We don't support per-counter capabilities for now.
607	 */
608	if (perfcap.cap_per_counter)
609		goto free;
610
611	idxd_pmu->n_event_categories = perfcap.num_event_category;
612	idxd_pmu->supported_event_categories = perfcap.global_event_category;
613	idxd_pmu->per_counter_caps_supported = perfcap.cap_per_counter;
614
615	/* check filter capability.  If 0, then filters are not supported */
616	idxd_pmu->supported_filters = perfcap.filter;
617	if (perfcap.filter)
618		idxd_pmu->n_filters = hweight8(perfcap.filter);
619
620	/* Store the total number of counters categories, and counter width */
621	idxd_pmu->n_counters = perfcap.num_perf_counter;
622	idxd_pmu->counter_width = perfcap.counter_width;
623
624	idxd_pmu_init(idxd_pmu);
625
626	rc = perf_pmu_register(&idxd_pmu->pmu, idxd_pmu->name, -1);
627	if (rc)
628		goto free;
629
630	rc = cpuhp_state_add_instance(cpuhp_slot, &idxd_pmu->cpuhp_node);
631	if (rc) {
632		perf_pmu_unregister(&idxd->idxd_pmu->pmu);
633		goto free;
634	}
635out:
636	return rc;
637free:
638	kfree(idxd_pmu);
639	idxd->idxd_pmu = NULL;
640
641	goto out;
642}
643
644void __init perfmon_init(void)
645{
646	int rc = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
647					 "driver/dma/idxd/perf:online",
648					 perf_event_cpu_online,
649					 perf_event_cpu_offline);
650	if (WARN_ON(rc < 0))
651		return;
652
653	cpuhp_slot = rc;
654	cpuhp_set_up = true;
655}
656
657void __exit perfmon_exit(void)
658{
659	if (cpuhp_set_up)
660		cpuhp_remove_multi_state(cpuhp_slot);
661}
662