1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Performance event support - Processor Activity Instrumentation Facility
4 *
5 *  Copyright IBM Corp. 2022
6 *  Author(s): Thomas Richter <tmricht@linux.ibm.com>
7 */
8#define KMSG_COMPONENT	"pai_crypto"
9#define pr_fmt(fmt)	KMSG_COMPONENT ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/kernel_stat.h>
13#include <linux/percpu.h>
14#include <linux/notifier.h>
15#include <linux/init.h>
16#include <linux/export.h>
17#include <linux/io.h>
18#include <linux/perf_event.h>
19#include <asm/ctlreg.h>
20#include <asm/pai.h>
21#include <asm/debug.h>
22
23static debug_info_t *cfm_dbg;
24static unsigned int paicrypt_cnt;	/* Size of the mapped counter sets */
25					/* extracted with QPACI instruction */
26
27DEFINE_STATIC_KEY_FALSE(pai_key);
28
29struct pai_userdata {
30	u16 num;
31	u64 value;
32} __packed;
33
34struct paicrypt_map {
35	unsigned long *page;		/* Page for CPU to store counters */
36	struct pai_userdata *save;	/* Page to store no-zero counters */
37	unsigned int active_events;	/* # of PAI crypto users */
38	refcount_t refcnt;		/* Reference count mapped buffers */
39	enum paievt_mode mode;		/* Type of event */
40	struct perf_event *event;	/* Perf event for sampling */
41};
42
43struct paicrypt_mapptr {
44	struct paicrypt_map *mapptr;
45};
46
47static struct paicrypt_root {		/* Anchor to per CPU data */
48	refcount_t refcnt;		/* Overall active events */
49	struct paicrypt_mapptr __percpu *mapptr;
50} paicrypt_root;
51
52/* Free per CPU data when the last event is removed. */
53static void paicrypt_root_free(void)
54{
55	if (refcount_dec_and_test(&paicrypt_root.refcnt)) {
56		free_percpu(paicrypt_root.mapptr);
57		paicrypt_root.mapptr = NULL;
58	}
59	debug_sprintf_event(cfm_dbg, 5, "%s root.refcount %d\n", __func__,
60			    refcount_read(&paicrypt_root.refcnt));
61}
62
63/*
64 * On initialization of first event also allocate per CPU data dynamically.
65 * Start with an array of pointers, the array size is the maximum number of
66 * CPUs possible, which might be larger than the number of CPUs currently
67 * online.
68 */
69static int paicrypt_root_alloc(void)
70{
71	if (!refcount_inc_not_zero(&paicrypt_root.refcnt)) {
72		/* The memory is already zeroed. */
73		paicrypt_root.mapptr = alloc_percpu(struct paicrypt_mapptr);
74		if (!paicrypt_root.mapptr)
75			return -ENOMEM;
76		refcount_set(&paicrypt_root.refcnt, 1);
77	}
78	return 0;
79}
80
81/* Release the PMU if event is the last perf event */
82static DEFINE_MUTEX(pai_reserve_mutex);
83
84/* Adjust usage counters and remove allocated memory when all users are
85 * gone.
86 */
87static void paicrypt_event_destroy(struct perf_event *event)
88{
89	struct paicrypt_mapptr *mp = per_cpu_ptr(paicrypt_root.mapptr,
90						 event->cpu);
91	struct paicrypt_map *cpump = mp->mapptr;
92
93	static_branch_dec(&pai_key);
94	mutex_lock(&pai_reserve_mutex);
95	debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d"
96			    " mode %d refcnt %u\n", __func__,
97			    event->attr.config, event->cpu,
98			    cpump->active_events, cpump->mode,
99			    refcount_read(&cpump->refcnt));
100	free_page(PAI_SAVE_AREA(event));
101	if (refcount_dec_and_test(&cpump->refcnt)) {
102		debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n",
103				    __func__, (unsigned long)cpump->page,
104				    cpump->save);
105		free_page((unsigned long)cpump->page);
106		kvfree(cpump->save);
107		kfree(cpump);
108		mp->mapptr = NULL;
109	}
110	paicrypt_root_free();
111	mutex_unlock(&pai_reserve_mutex);
112}
113
114static u64 paicrypt_getctr(unsigned long *page, int nr, bool kernel)
115{
116	if (kernel)
117		nr += PAI_CRYPTO_MAXCTR;
118	return page[nr];
119}
120
121/* Read the counter values. Return value from location in CMP. For event
122 * CRYPTO_ALL sum up all events.
123 */
124static u64 paicrypt_getdata(struct perf_event *event, bool kernel)
125{
126	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
127	struct paicrypt_map *cpump = mp->mapptr;
128	u64 sum = 0;
129	int i;
130
131	if (event->attr.config != PAI_CRYPTO_BASE) {
132		return paicrypt_getctr(cpump->page,
133				       event->attr.config - PAI_CRYPTO_BASE,
134				       kernel);
135	}
136
137	for (i = 1; i <= paicrypt_cnt; i++) {
138		u64 val = paicrypt_getctr(cpump->page, i, kernel);
139
140		if (!val)
141			continue;
142		sum += val;
143	}
144	return sum;
145}
146
147static u64 paicrypt_getall(struct perf_event *event)
148{
149	u64 sum = 0;
150
151	if (!event->attr.exclude_kernel)
152		sum += paicrypt_getdata(event, true);
153	if (!event->attr.exclude_user)
154		sum += paicrypt_getdata(event, false);
155
156	return sum;
157}
158
159/* Used to avoid races in checking concurrent access of counting and
160 * sampling for crypto events
161 *
162 * Only one instance of event pai_crypto/CRYPTO_ALL/ for sampling is
163 * allowed and when this event is running, no counting event is allowed.
164 * Several counting events are allowed in parallel, but no sampling event
165 * is allowed while one (or more) counting events are running.
166 *
167 * This function is called in process context and it is save to block.
168 * When the event initialization functions fails, no other call back will
169 * be invoked.
170 *
171 * Allocate the memory for the event.
172 */
173static struct paicrypt_map *paicrypt_busy(struct perf_event *event)
174{
175	struct perf_event_attr *a = &event->attr;
176	struct paicrypt_map *cpump = NULL;
177	struct paicrypt_mapptr *mp;
178	int rc;
179
180	mutex_lock(&pai_reserve_mutex);
181
182	/* Allocate root node */
183	rc = paicrypt_root_alloc();
184	if (rc)
185		goto unlock;
186
187	/* Allocate node for this event */
188	mp = per_cpu_ptr(paicrypt_root.mapptr, event->cpu);
189	cpump = mp->mapptr;
190	if (!cpump) {			/* Paicrypt_map allocated? */
191		cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
192		if (!cpump) {
193			rc = -ENOMEM;
194			goto free_root;
195		}
196	}
197
198	if (a->sample_period) {		/* Sampling requested */
199		if (cpump->mode != PAI_MODE_NONE)
200			rc = -EBUSY;	/* ... sampling/counting active */
201	} else {			/* Counting requested */
202		if (cpump->mode == PAI_MODE_SAMPLING)
203			rc = -EBUSY;	/* ... and sampling active */
204	}
205	/*
206	 * This error case triggers when there is a conflict:
207	 * Either sampling requested and counting already active, or visa
208	 * versa. Therefore the struct paicrypto_map for this CPU is
209	 * needed or the error could not have occurred. Only adjust root
210	 * node refcount.
211	 */
212	if (rc)
213		goto free_root;
214
215	/* Allocate memory for counter page and counter extraction.
216	 * Only the first counting event has to allocate a page.
217	 */
218	if (cpump->page) {
219		refcount_inc(&cpump->refcnt);
220		goto unlock;
221	}
222
223	rc = -ENOMEM;
224	cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
225	if (!cpump->page)
226		goto free_paicrypt_map;
227	cpump->save = kvmalloc_array(paicrypt_cnt + 1,
228				     sizeof(struct pai_userdata), GFP_KERNEL);
229	if (!cpump->save) {
230		free_page((unsigned long)cpump->page);
231		cpump->page = NULL;
232		goto free_paicrypt_map;
233	}
234
235	/* Set mode and reference count */
236	rc = 0;
237	refcount_set(&cpump->refcnt, 1);
238	cpump->mode = a->sample_period ? PAI_MODE_SAMPLING : PAI_MODE_COUNTING;
239	mp->mapptr = cpump;
240	debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx users %d"
241			    " mode %d refcnt %u page %#lx save %p rc %d\n",
242			    __func__, a->sample_period, cpump->active_events,
243			    cpump->mode, refcount_read(&cpump->refcnt),
244			    (unsigned long)cpump->page, cpump->save, rc);
245	goto unlock;
246
247free_paicrypt_map:
248	kfree(cpump);
249	mp->mapptr = NULL;
250free_root:
251	paicrypt_root_free();
252
253unlock:
254	mutex_unlock(&pai_reserve_mutex);
255	return rc ? ERR_PTR(rc) : cpump;
256}
257
258/* Might be called on different CPU than the one the event is intended for. */
259static int paicrypt_event_init(struct perf_event *event)
260{
261	struct perf_event_attr *a = &event->attr;
262	struct paicrypt_map *cpump;
263	int rc = 0;
264
265	/* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
266	if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
267		return -ENOENT;
268	/* PAI crypto event must be in valid range */
269	if (a->config < PAI_CRYPTO_BASE ||
270	    a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
271		return -EINVAL;
272	/* Allow only CPU wide operation, no process context for now. */
273	if ((event->attach_state & PERF_ATTACH_TASK) || event->cpu == -1)
274		return -ENOENT;
275	/* Allow only CRYPTO_ALL for sampling. */
276	if (a->sample_period && a->config != PAI_CRYPTO_BASE)
277		return -EINVAL;
278	/* Get a page to store last counter values for sampling */
279	if (a->sample_period) {
280		PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL);
281		if (!PAI_SAVE_AREA(event)) {
282			rc = -ENOMEM;
283			goto out;
284		}
285	}
286
287	cpump = paicrypt_busy(event);
288	if (IS_ERR(cpump)) {
289		free_page(PAI_SAVE_AREA(event));
290		rc = PTR_ERR(cpump);
291		goto out;
292	}
293
294	event->destroy = paicrypt_event_destroy;
295
296	if (a->sample_period) {
297		a->sample_period = 1;
298		a->freq = 0;
299		/* Register for paicrypt_sched_task() to be called */
300		event->attach_state |= PERF_ATTACH_SCHED_CB;
301		/* Add raw data which contain the memory mapped counters */
302		a->sample_type |= PERF_SAMPLE_RAW;
303		/* Turn off inheritance */
304		a->inherit = 0;
305	}
306
307	static_branch_inc(&pai_key);
308out:
309	return rc;
310}
311
312static void paicrypt_read(struct perf_event *event)
313{
314	u64 prev, new, delta;
315
316	prev = local64_read(&event->hw.prev_count);
317	new = paicrypt_getall(event);
318	local64_set(&event->hw.prev_count, new);
319	delta = (prev <= new) ? new - prev
320			      : (-1ULL - prev) + new + 1;	 /* overflow */
321	local64_add(delta, &event->count);
322}
323
324static void paicrypt_start(struct perf_event *event, int flags)
325{
326	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
327	struct paicrypt_map *cpump = mp->mapptr;
328	u64 sum;
329
330	if (!event->attr.sample_period) {	/* Counting */
331		sum = paicrypt_getall(event);	/* Get current value */
332		local64_set(&event->hw.prev_count, sum);
333	} else {				/* Sampling */
334		cpump->event = event;
335		perf_sched_cb_inc(event->pmu);
336	}
337}
338
339static int paicrypt_add(struct perf_event *event, int flags)
340{
341	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
342	struct paicrypt_map *cpump = mp->mapptr;
343	unsigned long ccd;
344
345	if (++cpump->active_events == 1) {
346		ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
347		WRITE_ONCE(S390_lowcore.ccd, ccd);
348		local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
349	}
350	if (flags & PERF_EF_START)
351		paicrypt_start(event, PERF_EF_RELOAD);
352	event->hw.state = 0;
353	return 0;
354}
355
356static void paicrypt_stop(struct perf_event *event, int flags)
357{
358	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
359	struct paicrypt_map *cpump = mp->mapptr;
360
361	if (!event->attr.sample_period) {	/* Counting */
362		paicrypt_read(event);
363	} else {				/* Sampling */
364		perf_sched_cb_dec(event->pmu);
365		cpump->event = NULL;
366	}
367	event->hw.state = PERF_HES_STOPPED;
368}
369
370static void paicrypt_del(struct perf_event *event, int flags)
371{
372	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
373	struct paicrypt_map *cpump = mp->mapptr;
374
375	paicrypt_stop(event, PERF_EF_UPDATE);
376	if (--cpump->active_events == 0) {
377		local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
378		WRITE_ONCE(S390_lowcore.ccd, 0);
379	}
380}
381
382/* Create raw data and save it in buffer. Calculate the delta for each
383 * counter between this invocation and the last invocation.
384 * Returns number of bytes copied.
385 * Saves only entries with positive counter difference of the form
386 * 2 bytes: Number of counter
387 * 8 bytes: Value of counter
388 */
389static size_t paicrypt_copy(struct pai_userdata *userdata, unsigned long *page,
390			    unsigned long *page_old, bool exclude_user,
391			    bool exclude_kernel)
392{
393	int i, outidx = 0;
394
395	for (i = 1; i <= paicrypt_cnt; i++) {
396		u64 val = 0, val_old = 0;
397
398		if (!exclude_kernel) {
399			val += paicrypt_getctr(page, i, true);
400			val_old += paicrypt_getctr(page_old, i, true);
401		}
402		if (!exclude_user) {
403			val += paicrypt_getctr(page, i, false);
404			val_old += paicrypt_getctr(page_old, i, false);
405		}
406		if (val >= val_old)
407			val -= val_old;
408		else
409			val = (~0ULL - val_old) + val + 1;
410		if (val) {
411			userdata[outidx].num = i;
412			userdata[outidx].value = val;
413			outidx++;
414		}
415	}
416	return outidx * sizeof(struct pai_userdata);
417}
418
419static int paicrypt_push_sample(size_t rawsize, struct paicrypt_map *cpump,
420				struct perf_event *event)
421{
422	struct perf_sample_data data;
423	struct perf_raw_record raw;
424	struct pt_regs regs;
425	int overflow;
426
427	/* Setup perf sample */
428	memset(&regs, 0, sizeof(regs));
429	memset(&raw, 0, sizeof(raw));
430	memset(&data, 0, sizeof(data));
431	perf_sample_data_init(&data, 0, event->hw.last_period);
432	if (event->attr.sample_type & PERF_SAMPLE_TID) {
433		data.tid_entry.pid = task_tgid_nr(current);
434		data.tid_entry.tid = task_pid_nr(current);
435	}
436	if (event->attr.sample_type & PERF_SAMPLE_TIME)
437		data.time = event->clock();
438	if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
439		data.id = event->id;
440	if (event->attr.sample_type & PERF_SAMPLE_CPU) {
441		data.cpu_entry.cpu = smp_processor_id();
442		data.cpu_entry.reserved = 0;
443	}
444	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
445		raw.frag.size = rawsize;
446		raw.frag.data = cpump->save;
447		perf_sample_save_raw_data(&data, &raw);
448	}
449
450	overflow = perf_event_overflow(event, &data, &regs);
451	perf_event_update_userpage(event);
452	/* Save crypto counter lowcore page after reading event data. */
453	memcpy((void *)PAI_SAVE_AREA(event), cpump->page, PAGE_SIZE);
454	return overflow;
455}
456
457/* Check if there is data to be saved on schedule out of a task. */
458static int paicrypt_have_sample(void)
459{
460	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
461	struct paicrypt_map *cpump = mp->mapptr;
462	struct perf_event *event = cpump->event;
463	size_t rawsize;
464	int rc = 0;
465
466	if (!event)		/* No event active */
467		return 0;
468	rawsize = paicrypt_copy(cpump->save, cpump->page,
469				(unsigned long *)PAI_SAVE_AREA(event),
470				cpump->event->attr.exclude_user,
471				cpump->event->attr.exclude_kernel);
472	if (rawsize)			/* No incremented counters */
473		rc = paicrypt_push_sample(rawsize, cpump, event);
474	return rc;
475}
476
477/* Called on schedule-in and schedule-out. No access to event structure,
478 * but for sampling only event CRYPTO_ALL is allowed.
479 */
480static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
481{
482	/* We started with a clean page on event installation. So read out
483	 * results on schedule_out and if page was dirty, clear values.
484	 */
485	if (!sched_in)
486		paicrypt_have_sample();
487}
488
489/* Attribute definitions for paicrypt interface. As with other CPU
490 * Measurement Facilities, there is one attribute per mapped counter.
491 * The number of mapped counters may vary per machine generation. Use
492 * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
493 * to determine the number of mapped counters. The instructions returns
494 * a positive number, which is the highest number of supported counters.
495 * All counters less than this number are also supported, there are no
496 * holes. A returned number of zero means no support for mapped counters.
497 *
498 * The identification of the counter is a unique number. The chosen range
499 * is 0x1000 + offset in mapped kernel page.
500 * All CPU Measurement Facility counters identifiers must be unique and
501 * the numbers from 0 to 496 are already used for the CPU Measurement
502 * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already
503 * used for the CPU Measurement Sampling facility.
504 */
505PMU_FORMAT_ATTR(event, "config:0-63");
506
507static struct attribute *paicrypt_format_attr[] = {
508	&format_attr_event.attr,
509	NULL,
510};
511
512static struct attribute_group paicrypt_events_group = {
513	.name = "events",
514	.attrs = NULL			/* Filled in attr_event_init() */
515};
516
517static struct attribute_group paicrypt_format_group = {
518	.name = "format",
519	.attrs = paicrypt_format_attr,
520};
521
522static const struct attribute_group *paicrypt_attr_groups[] = {
523	&paicrypt_events_group,
524	&paicrypt_format_group,
525	NULL,
526};
527
528/* Performance monitoring unit for mapped counters */
529static struct pmu paicrypt = {
530	.task_ctx_nr  = perf_invalid_context,
531	.event_init   = paicrypt_event_init,
532	.add	      = paicrypt_add,
533	.del	      = paicrypt_del,
534	.start	      = paicrypt_start,
535	.stop	      = paicrypt_stop,
536	.read	      = paicrypt_read,
537	.sched_task   = paicrypt_sched_task,
538	.attr_groups  = paicrypt_attr_groups
539};
540
541/* List of symbolic PAI counter names. */
542static const char * const paicrypt_ctrnames[] = {
543	[0] = "CRYPTO_ALL",
544	[1] = "KM_DEA",
545	[2] = "KM_TDEA_128",
546	[3] = "KM_TDEA_192",
547	[4] = "KM_ENCRYPTED_DEA",
548	[5] = "KM_ENCRYPTED_TDEA_128",
549	[6] = "KM_ENCRYPTED_TDEA_192",
550	[7] = "KM_AES_128",
551	[8] = "KM_AES_192",
552	[9] = "KM_AES_256",
553	[10] = "KM_ENCRYPTED_AES_128",
554	[11] = "KM_ENCRYPTED_AES_192",
555	[12] = "KM_ENCRYPTED_AES_256",
556	[13] = "KM_XTS_AES_128",
557	[14] = "KM_XTS_AES_256",
558	[15] = "KM_XTS_ENCRYPTED_AES_128",
559	[16] = "KM_XTS_ENCRYPTED_AES_256",
560	[17] = "KMC_DEA",
561	[18] = "KMC_TDEA_128",
562	[19] = "KMC_TDEA_192",
563	[20] = "KMC_ENCRYPTED_DEA",
564	[21] = "KMC_ENCRYPTED_TDEA_128",
565	[22] = "KMC_ENCRYPTED_TDEA_192",
566	[23] = "KMC_AES_128",
567	[24] = "KMC_AES_192",
568	[25] = "KMC_AES_256",
569	[26] = "KMC_ENCRYPTED_AES_128",
570	[27] = "KMC_ENCRYPTED_AES_192",
571	[28] = "KMC_ENCRYPTED_AES_256",
572	[29] = "KMC_PRNG",
573	[30] = "KMA_GCM_AES_128",
574	[31] = "KMA_GCM_AES_192",
575	[32] = "KMA_GCM_AES_256",
576	[33] = "KMA_GCM_ENCRYPTED_AES_128",
577	[34] = "KMA_GCM_ENCRYPTED_AES_192",
578	[35] = "KMA_GCM_ENCRYPTED_AES_256",
579	[36] = "KMF_DEA",
580	[37] = "KMF_TDEA_128",
581	[38] = "KMF_TDEA_192",
582	[39] = "KMF_ENCRYPTED_DEA",
583	[40] = "KMF_ENCRYPTED_TDEA_128",
584	[41] = "KMF_ENCRYPTED_TDEA_192",
585	[42] = "KMF_AES_128",
586	[43] = "KMF_AES_192",
587	[44] = "KMF_AES_256",
588	[45] = "KMF_ENCRYPTED_AES_128",
589	[46] = "KMF_ENCRYPTED_AES_192",
590	[47] = "KMF_ENCRYPTED_AES_256",
591	[48] = "KMCTR_DEA",
592	[49] = "KMCTR_TDEA_128",
593	[50] = "KMCTR_TDEA_192",
594	[51] = "KMCTR_ENCRYPTED_DEA",
595	[52] = "KMCTR_ENCRYPTED_TDEA_128",
596	[53] = "KMCTR_ENCRYPTED_TDEA_192",
597	[54] = "KMCTR_AES_128",
598	[55] = "KMCTR_AES_192",
599	[56] = "KMCTR_AES_256",
600	[57] = "KMCTR_ENCRYPTED_AES_128",
601	[58] = "KMCTR_ENCRYPTED_AES_192",
602	[59] = "KMCTR_ENCRYPTED_AES_256",
603	[60] = "KMO_DEA",
604	[61] = "KMO_TDEA_128",
605	[62] = "KMO_TDEA_192",
606	[63] = "KMO_ENCRYPTED_DEA",
607	[64] = "KMO_ENCRYPTED_TDEA_128",
608	[65] = "KMO_ENCRYPTED_TDEA_192",
609	[66] = "KMO_AES_128",
610	[67] = "KMO_AES_192",
611	[68] = "KMO_AES_256",
612	[69] = "KMO_ENCRYPTED_AES_128",
613	[70] = "KMO_ENCRYPTED_AES_192",
614	[71] = "KMO_ENCRYPTED_AES_256",
615	[72] = "KIMD_SHA_1",
616	[73] = "KIMD_SHA_256",
617	[74] = "KIMD_SHA_512",
618	[75] = "KIMD_SHA3_224",
619	[76] = "KIMD_SHA3_256",
620	[77] = "KIMD_SHA3_384",
621	[78] = "KIMD_SHA3_512",
622	[79] = "KIMD_SHAKE_128",
623	[80] = "KIMD_SHAKE_256",
624	[81] = "KIMD_GHASH",
625	[82] = "KLMD_SHA_1",
626	[83] = "KLMD_SHA_256",
627	[84] = "KLMD_SHA_512",
628	[85] = "KLMD_SHA3_224",
629	[86] = "KLMD_SHA3_256",
630	[87] = "KLMD_SHA3_384",
631	[88] = "KLMD_SHA3_512",
632	[89] = "KLMD_SHAKE_128",
633	[90] = "KLMD_SHAKE_256",
634	[91] = "KMAC_DEA",
635	[92] = "KMAC_TDEA_128",
636	[93] = "KMAC_TDEA_192",
637	[94] = "KMAC_ENCRYPTED_DEA",
638	[95] = "KMAC_ENCRYPTED_TDEA_128",
639	[96] = "KMAC_ENCRYPTED_TDEA_192",
640	[97] = "KMAC_AES_128",
641	[98] = "KMAC_AES_192",
642	[99] = "KMAC_AES_256",
643	[100] = "KMAC_ENCRYPTED_AES_128",
644	[101] = "KMAC_ENCRYPTED_AES_192",
645	[102] = "KMAC_ENCRYPTED_AES_256",
646	[103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA",
647	[104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128",
648	[105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192",
649	[106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA",
650	[107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
651	[108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
652	[109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128",
653	[110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192",
654	[111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
655	[112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
656	[113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
657	[114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256A",
658	[115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
659	[116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
660	[117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
661	[118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256",
662	[119] = "PCC_SCALAR_MULTIPLY_P256",
663	[120] = "PCC_SCALAR_MULTIPLY_P384",
664	[121] = "PCC_SCALAR_MULTIPLY_P521",
665	[122] = "PCC_SCALAR_MULTIPLY_ED25519",
666	[123] = "PCC_SCALAR_MULTIPLY_ED448",
667	[124] = "PCC_SCALAR_MULTIPLY_X25519",
668	[125] = "PCC_SCALAR_MULTIPLY_X448",
669	[126] = "PRNO_SHA_512_DRNG",
670	[127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO",
671	[128] = "PRNO_TRNG",
672	[129] = "KDSA_ECDSA_VERIFY_P256",
673	[130] = "KDSA_ECDSA_VERIFY_P384",
674	[131] = "KDSA_ECDSA_VERIFY_P521",
675	[132] = "KDSA_ECDSA_SIGN_P256",
676	[133] = "KDSA_ECDSA_SIGN_P384",
677	[134] = "KDSA_ECDSA_SIGN_P521",
678	[135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256",
679	[136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384",
680	[137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521",
681	[138] = "KDSA_EDDSA_VERIFY_ED25519",
682	[139] = "KDSA_EDDSA_VERIFY_ED448",
683	[140] = "KDSA_EDDSA_SIGN_ED25519",
684	[141] = "KDSA_EDDSA_SIGN_ED448",
685	[142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519",
686	[143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448",
687	[144] = "PCKMO_ENCRYPT_DEA_KEY",
688	[145] = "PCKMO_ENCRYPT_TDEA_128_KEY",
689	[146] = "PCKMO_ENCRYPT_TDEA_192_KEY",
690	[147] = "PCKMO_ENCRYPT_AES_128_KEY",
691	[148] = "PCKMO_ENCRYPT_AES_192_KEY",
692	[149] = "PCKMO_ENCRYPT_AES_256_KEY",
693	[150] = "PCKMO_ENCRYPT_ECC_P256_KEY",
694	[151] = "PCKMO_ENCRYPT_ECC_P384_KEY",
695	[152] = "PCKMO_ENCRYPT_ECC_P521_KEY",
696	[153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY",
697	[154] = "PCKMO_ENCRYPT_ECC_ED448_KEY",
698	[155] = "IBM_RESERVED_155",
699	[156] = "IBM_RESERVED_156",
700};
701
702static void __init attr_event_free(struct attribute **attrs, int num)
703{
704	struct perf_pmu_events_attr *pa;
705	int i;
706
707	for (i = 0; i < num; i++) {
708		struct device_attribute *dap;
709
710		dap = container_of(attrs[i], struct device_attribute, attr);
711		pa = container_of(dap, struct perf_pmu_events_attr, attr);
712		kfree(pa);
713	}
714	kfree(attrs);
715}
716
717static int __init attr_event_init_one(struct attribute **attrs, int num)
718{
719	struct perf_pmu_events_attr *pa;
720
721	/* Index larger than array_size, no counter name available */
722	if (num >= ARRAY_SIZE(paicrypt_ctrnames)) {
723		attrs[num] = NULL;
724		return 0;
725	}
726
727	pa = kzalloc(sizeof(*pa), GFP_KERNEL);
728	if (!pa)
729		return -ENOMEM;
730
731	sysfs_attr_init(&pa->attr.attr);
732	pa->id = PAI_CRYPTO_BASE + num;
733	pa->attr.attr.name = paicrypt_ctrnames[num];
734	pa->attr.attr.mode = 0444;
735	pa->attr.show = cpumf_events_sysfs_show;
736	pa->attr.store = NULL;
737	attrs[num] = &pa->attr.attr;
738	return 0;
739}
740
741/* Create PMU sysfs event attributes on the fly. */
742static int __init attr_event_init(void)
743{
744	struct attribute **attrs;
745	int ret, i;
746
747	attrs = kmalloc_array(paicrypt_cnt + 2, sizeof(*attrs), GFP_KERNEL);
748	if (!attrs)
749		return -ENOMEM;
750	for (i = 0; i <= paicrypt_cnt; i++) {
751		ret = attr_event_init_one(attrs, i);
752		if (ret) {
753			attr_event_free(attrs, i);
754			return ret;
755		}
756	}
757	attrs[i] = NULL;
758	paicrypt_events_group.attrs = attrs;
759	return 0;
760}
761
762static int __init paicrypt_init(void)
763{
764	struct qpaci_info_block ib;
765	int rc;
766
767	if (!test_facility(196))
768		return 0;
769
770	qpaci(&ib);
771	paicrypt_cnt = ib.num_cc;
772	if (paicrypt_cnt == 0)
773		return 0;
774	if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR) {
775		pr_err("Too many PMU pai_crypto counters %d\n", paicrypt_cnt);
776		return -E2BIG;
777	}
778
779	rc = attr_event_init();		/* Export known PAI crypto events */
780	if (rc) {
781		pr_err("Creation of PMU pai_crypto /sysfs failed\n");
782		return rc;
783	}
784
785	/* Setup s390dbf facility */
786	cfm_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128);
787	if (!cfm_dbg) {
788		pr_err("Registration of s390dbf pai_crypto failed\n");
789		return -ENOMEM;
790	}
791	debug_register_view(cfm_dbg, &debug_sprintf_view);
792
793	rc = perf_pmu_register(&paicrypt, "pai_crypto", -1);
794	if (rc) {
795		pr_err("Registering the pai_crypto PMU failed with rc=%i\n",
796		       rc);
797		debug_unregister_view(cfm_dbg, &debug_sprintf_view);
798		debug_unregister(cfm_dbg);
799		return rc;
800	}
801	return 0;
802}
803
804device_initcall(paicrypt_init);
805