1/*-
2 * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com>
3 * All rights reserved.
4 *
5 * This software was developed by the University of Cambridge Computer
6 * Laboratory with support from ARM Ltd.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD$");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/pmc.h>
36#include <sys/pmckern.h>
37
38#include <machine/pmc_mdep.h>
39#include <machine/cpu.h>
40
41static int arm64_npmcs;
42
43struct arm64_event_code_map {
44	enum pmc_event	pe_ev;
45	uint8_t		pe_code;
46};
47
48/*
49 * Per-processor information.
50 */
51struct arm64_cpu {
52	struct pmc_hw   *pc_arm64pmcs;
53};
54
55static struct arm64_cpu **arm64_pcpu;
56
57/*
58 * Interrupt Enable Set Register
59 */
60static __inline void
61arm64_interrupt_enable(uint32_t pmc)
62{
63	uint32_t reg;
64
65	reg = (1 << pmc);
66	WRITE_SPECIALREG(PMINTENSET_EL1, reg);
67
68	isb();
69}
70
71/*
72 * Interrupt Clear Set Register
73 */
74static __inline void
75arm64_interrupt_disable(uint32_t pmc)
76{
77	uint32_t reg;
78
79	reg = (1 << pmc);
80	WRITE_SPECIALREG(PMINTENCLR_EL1, reg);
81
82	isb();
83}
84
85/*
86 * Counter Set Enable Register
87 */
88static __inline void
89arm64_counter_enable(unsigned int pmc)
90{
91	uint32_t reg;
92
93	reg = (1 << pmc);
94	WRITE_SPECIALREG(PMCNTENSET_EL0, reg);
95
96	isb();
97}
98
99/*
100 * Counter Clear Enable Register
101 */
102static __inline void
103arm64_counter_disable(unsigned int pmc)
104{
105	uint32_t reg;
106
107	reg = (1 << pmc);
108	WRITE_SPECIALREG(PMCNTENCLR_EL0, reg);
109
110	isb();
111}
112
113/*
114 * Performance Monitors Control Register
115 */
116static uint32_t
117arm64_pmcr_read(void)
118{
119	uint32_t reg;
120
121	reg = READ_SPECIALREG(PMCR_EL0);
122
123	return (reg);
124}
125
126static void
127arm64_pmcr_write(uint32_t reg)
128{
129
130	WRITE_SPECIALREG(PMCR_EL0, reg);
131
132	isb();
133}
134
135/*
136 * Performance Count Register N
137 */
138static uint32_t
139arm64_pmcn_read(unsigned int pmc)
140{
141
142	KASSERT(pmc < arm64_npmcs, ("%s: illegal PMC number %d", __func__, pmc));
143
144	WRITE_SPECIALREG(PMSELR_EL0, pmc);
145
146	isb();
147
148	return (READ_SPECIALREG(PMXEVCNTR_EL0));
149}
150
151static void
152arm64_pmcn_write(unsigned int pmc, uint32_t reg)
153{
154
155	KASSERT(pmc < arm64_npmcs, ("%s: illegal PMC number %d", __func__, pmc));
156
157	WRITE_SPECIALREG(PMSELR_EL0, pmc);
158	WRITE_SPECIALREG(PMXEVCNTR_EL0, reg);
159
160	isb();
161}
162
163static int
164arm64_allocate_pmc(int cpu, int ri, struct pmc *pm,
165  const struct pmc_op_pmcallocate *a)
166{
167	uint32_t caps, config;
168	struct arm64_cpu *pac;
169	enum pmc_event pe;
170
171	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
172	    ("[arm64,%d] illegal CPU value %d", __LINE__, cpu));
173	KASSERT(ri >= 0 && ri < arm64_npmcs,
174	    ("[arm64,%d] illegal row index %d", __LINE__, ri));
175
176	pac = arm64_pcpu[cpu];
177
178	caps = a->pm_caps;
179	if (a->pm_class != PMC_CLASS_ARMV8) {
180		return (EINVAL);
181	}
182	pe = a->pm_ev;
183
184	config = (pe & EVENT_ID_MASK);
185	pm->pm_md.pm_arm64.pm_arm64_evsel = config;
186
187	PMCDBG2(MDP, ALL, 2, "arm64-allocate ri=%d -> config=0x%x", ri, config);
188
189	return 0;
190}
191
192
193static int
194arm64_read_pmc(int cpu, int ri, pmc_value_t *v)
195{
196	pmc_value_t tmp;
197	struct pmc *pm;
198
199	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
200	    ("[arm64,%d] illegal CPU value %d", __LINE__, cpu));
201	KASSERT(ri >= 0 && ri < arm64_npmcs,
202	    ("[arm64,%d] illegal row index %d", __LINE__, ri));
203
204	pm  = arm64_pcpu[cpu]->pc_arm64pmcs[ri].phw_pmc;
205
206	tmp = arm64_pmcn_read(ri);
207
208	PMCDBG2(MDP, REA, 2, "arm64-read id=%d -> %jd", ri, tmp);
209	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
210		*v = ARMV8_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
211	else
212		*v = tmp;
213
214	return 0;
215}
216
217static int
218arm64_write_pmc(int cpu, int ri, pmc_value_t v)
219{
220	struct pmc *pm;
221
222	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
223	    ("[arm64,%d] illegal CPU value %d", __LINE__, cpu));
224	KASSERT(ri >= 0 && ri < arm64_npmcs,
225	    ("[arm64,%d] illegal row-index %d", __LINE__, ri));
226
227	pm  = arm64_pcpu[cpu]->pc_arm64pmcs[ri].phw_pmc;
228
229	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
230		v = ARMV8_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
231
232	PMCDBG3(MDP, WRI, 1, "arm64-write cpu=%d ri=%d v=%jx", cpu, ri, v);
233
234	arm64_pmcn_write(ri, v);
235
236	return 0;
237}
238
239static int
240arm64_config_pmc(int cpu, int ri, struct pmc *pm)
241{
242	struct pmc_hw *phw;
243
244	PMCDBG3(MDP, CFG, 1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
245
246	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
247	    ("[arm64,%d] illegal CPU value %d", __LINE__, cpu));
248	KASSERT(ri >= 0 && ri < arm64_npmcs,
249	    ("[arm64,%d] illegal row-index %d", __LINE__, ri));
250
251	phw = &arm64_pcpu[cpu]->pc_arm64pmcs[ri];
252
253	KASSERT(pm == NULL || phw->phw_pmc == NULL,
254	    ("[arm64,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
255	    __LINE__, pm, phw->phw_pmc));
256
257	phw->phw_pmc = pm;
258
259	return 0;
260}
261
262static int
263arm64_start_pmc(int cpu, int ri)
264{
265	struct pmc_hw *phw;
266	uint32_t config;
267	struct pmc *pm;
268
269	phw    = &arm64_pcpu[cpu]->pc_arm64pmcs[ri];
270	pm     = phw->phw_pmc;
271	config = pm->pm_md.pm_arm64.pm_arm64_evsel;
272
273	/*
274	 * Configure the event selection.
275	 */
276	WRITE_SPECIALREG(PMSELR_EL0, ri);
277	WRITE_SPECIALREG(PMXEVTYPER_EL0, config);
278
279	isb();
280
281	/*
282	 * Enable the PMC.
283	 */
284	arm64_interrupt_enable(ri);
285	arm64_counter_enable(ri);
286
287	return 0;
288}
289
290static int
291arm64_stop_pmc(int cpu, int ri)
292{
293	struct pmc_hw *phw;
294	struct pmc *pm;
295
296	phw    = &arm64_pcpu[cpu]->pc_arm64pmcs[ri];
297	pm     = phw->phw_pmc;
298
299	/*
300	 * Disable the PMCs.
301	 */
302	arm64_counter_disable(ri);
303	arm64_interrupt_disable(ri);
304
305	return 0;
306}
307
308static int
309arm64_release_pmc(int cpu, int ri, struct pmc *pmc)
310{
311	struct pmc_hw *phw;
312
313	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
314	    ("[arm64,%d] illegal CPU value %d", __LINE__, cpu));
315	KASSERT(ri >= 0 && ri < arm64_npmcs,
316	    ("[arm64,%d] illegal row-index %d", __LINE__, ri));
317
318	phw = &arm64_pcpu[cpu]->pc_arm64pmcs[ri];
319	KASSERT(phw->phw_pmc == NULL,
320	    ("[arm64,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
321
322	return 0;
323}
324
325static int
326arm64_intr(struct trapframe *tf)
327{
328	struct arm64_cpu *pc;
329	int retval, ri;
330	struct pmc *pm;
331	int error;
332	int reg, cpu;
333
334	cpu = curcpu;
335	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
336	    ("[arm64,%d] CPU %d out of range", __LINE__, cpu));
337
338	retval = 0;
339	pc = arm64_pcpu[cpu];
340
341	for (ri = 0; ri < arm64_npmcs; ri++) {
342		pm = arm64_pcpu[cpu]->pc_arm64pmcs[ri].phw_pmc;
343		if (pm == NULL)
344			continue;
345		if (!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
346			continue;
347
348		/* Check if counter is overflowed */
349		reg = (1 << ri);
350		if ((READ_SPECIALREG(PMOVSCLR_EL0) & reg) == 0)
351			continue;
352		/* Clear Overflow Flag */
353		WRITE_SPECIALREG(PMOVSCLR_EL0, reg);
354
355		isb();
356
357		retval = 1; /* Found an interrupting PMC. */
358		if (pm->pm_state != PMC_STATE_RUNNING)
359			continue;
360
361		error = pmc_process_interrupt(PMC_HR, pm, tf);
362		if (error)
363			arm64_stop_pmc(cpu, ri);
364
365		/* Reload sampling count */
366		arm64_write_pmc(cpu, ri, pm->pm_sc.pm_reloadcount);
367	}
368
369	return (retval);
370}
371
372static int
373arm64_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
374{
375	char arm64_name[PMC_NAME_MAX];
376	struct pmc_hw *phw;
377	int error;
378
379	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
380	    ("[arm64,%d], illegal CPU %d", __LINE__, cpu));
381	KASSERT(ri >= 0 && ri < arm64_npmcs,
382	    ("[arm64,%d] row-index %d out of range", __LINE__, ri));
383
384	phw = &arm64_pcpu[cpu]->pc_arm64pmcs[ri];
385	snprintf(arm64_name, sizeof(arm64_name), "ARMV8-%d", ri);
386	if ((error = copystr(arm64_name, pi->pm_name, PMC_NAME_MAX,
387	    NULL)) != 0)
388		return (error);
389	pi->pm_class = PMC_CLASS_ARMV8;
390	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
391		pi->pm_enabled = TRUE;
392		*ppmc = phw->phw_pmc;
393	} else {
394		pi->pm_enabled = FALSE;
395		*ppmc = NULL;
396	}
397
398	return (0);
399}
400
401static int
402arm64_get_config(int cpu, int ri, struct pmc **ppm)
403{
404
405	*ppm = arm64_pcpu[cpu]->pc_arm64pmcs[ri].phw_pmc;
406
407	return (0);
408}
409
410/*
411 * XXX don't know what we should do here.
412 */
413static int
414arm64_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
415{
416
417	return (0);
418}
419
420static int
421arm64_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
422{
423
424	return (0);
425}
426
427static int
428arm64_pcpu_init(struct pmc_mdep *md, int cpu)
429{
430	struct arm64_cpu *pac;
431	struct pmc_hw  *phw;
432	struct pmc_cpu *pc;
433	uint64_t pmcr;
434	int first_ri;
435	int i;
436
437	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
438	    ("[arm64,%d] wrong cpu number %d", __LINE__, cpu));
439	PMCDBG1(MDP, INI, 1, "arm64-init cpu=%d", cpu);
440
441	arm64_pcpu[cpu] = pac = malloc(sizeof(struct arm64_cpu), M_PMC,
442	    M_WAITOK | M_ZERO);
443
444	pac->pc_arm64pmcs = malloc(sizeof(struct pmc_hw) * arm64_npmcs,
445	    M_PMC, M_WAITOK | M_ZERO);
446	pc = pmc_pcpu[cpu];
447	first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_ARMV8].pcd_ri;
448	KASSERT(pc != NULL, ("[arm64,%d] NULL per-cpu pointer", __LINE__));
449
450	for (i = 0, phw = pac->pc_arm64pmcs; i < arm64_npmcs; i++, phw++) {
451		phw->phw_state    = PMC_PHW_FLAG_IS_ENABLED |
452		    PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i);
453		phw->phw_pmc      = NULL;
454		pc->pc_hwpmcs[i + first_ri] = phw;
455	}
456
457	/* Enable unit */
458	pmcr = arm64_pmcr_read();
459	pmcr |= PMCR_E;
460	arm64_pmcr_write(pmcr);
461
462	return (0);
463}
464
465static int
466arm64_pcpu_fini(struct pmc_mdep *md, int cpu)
467{
468	uint32_t pmcr;
469
470	pmcr = arm64_pmcr_read();
471	pmcr &= ~PMCR_E;
472	arm64_pmcr_write(pmcr);
473
474	return (0);
475}
476
477struct pmc_mdep *
478pmc_arm64_initialize()
479{
480	struct pmc_mdep *pmc_mdep;
481	struct pmc_classdep *pcd;
482	int idcode;
483	int reg;
484
485	reg = arm64_pmcr_read();
486	arm64_npmcs = (reg & PMCR_N_MASK) >> PMCR_N_SHIFT;
487	idcode = (reg & PMCR_IDCODE_MASK) >> PMCR_IDCODE_SHIFT;
488
489	PMCDBG1(MDP, INI, 1, "arm64-init npmcs=%d", arm64_npmcs);
490
491	/*
492	 * Allocate space for pointers to PMC HW descriptors and for
493	 * the MDEP structure used by MI code.
494	 */
495	arm64_pcpu = malloc(sizeof(struct arm64_cpu *) * pmc_cpu_max(),
496		M_PMC, M_WAITOK | M_ZERO);
497
498	/* Just one class */
499	pmc_mdep = pmc_mdep_alloc(1);
500
501	switch (idcode) {
502	case PMCR_IDCODE_CORTEX_A57:
503	case PMCR_IDCODE_CORTEX_A72:
504		pmc_mdep->pmd_cputype = PMC_CPU_ARMV8_CORTEX_A57;
505		break;
506	default:
507	case PMCR_IDCODE_CORTEX_A53:
508		pmc_mdep->pmd_cputype = PMC_CPU_ARMV8_CORTEX_A53;
509		break;
510	}
511
512	pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_ARMV8];
513	pcd->pcd_caps  = ARMV8_PMC_CAPS;
514	pcd->pcd_class = PMC_CLASS_ARMV8;
515	pcd->pcd_num   = arm64_npmcs;
516	pcd->pcd_ri    = pmc_mdep->pmd_npmc;
517	pcd->pcd_width = 32;
518
519	pcd->pcd_allocate_pmc   = arm64_allocate_pmc;
520	pcd->pcd_config_pmc     = arm64_config_pmc;
521	pcd->pcd_pcpu_fini      = arm64_pcpu_fini;
522	pcd->pcd_pcpu_init      = arm64_pcpu_init;
523	pcd->pcd_describe       = arm64_describe;
524	pcd->pcd_get_config     = arm64_get_config;
525	pcd->pcd_read_pmc       = arm64_read_pmc;
526	pcd->pcd_release_pmc    = arm64_release_pmc;
527	pcd->pcd_start_pmc      = arm64_start_pmc;
528	pcd->pcd_stop_pmc       = arm64_stop_pmc;
529	pcd->pcd_write_pmc      = arm64_write_pmc;
530
531	pmc_mdep->pmd_intr       = arm64_intr;
532	pmc_mdep->pmd_switch_in  = arm64_switch_in;
533	pmc_mdep->pmd_switch_out = arm64_switch_out;
534
535	pmc_mdep->pmd_npmc   += arm64_npmcs;
536
537	return (pmc_mdep);
538}
539
540void
541pmc_arm64_finalize(struct pmc_mdep *md)
542{
543
544}
545