1/*
2 * Copyright 2017, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the BSD 2-Clause license. Note that NO WARRANTY is provided.
8 * See "LICENSE_BSD2.txt" for details.
9 *
10 * @TAG(DATA61_BSD)
11 */
12#pragma once
13
14#include <stdint.h>
15#include <sel4bench/armv/events.h>
16#include <sel4bench/sel4_arch/sel4bench.h>
17#include <utils/util.h>
18
19//function attributes
20//functions that need to be inlined for speed
21#define FASTFN inline __attribute__((always_inline))
22//functions that must not cache miss
23#define CACHESENSFN __attribute__((noinline, aligned(64)))
24
25//counters and related constants
26#define SEL4BENCH_ARMV8A_NUM_COUNTERS 4
27
28#define SEL4BENCH_ARMV8A_COUNTER_CCNT 31
29
30// select whether user mode gets access to the PMCs
31static FASTFN void sel4bench_private_switch_user_pmc(unsigned long state)
32{
33    PMU_WRITE(PMUSERENR, state);
34}
35
36/*
37 * INTENS
38 *
39 * Enables the generation of interrupt requests on overflows from the Cycle Count Register,
40 * PMCCNTR_EL0, and the event counters PMEVCNTR<n>_EL0. Reading the register shows which
41 * overflow interrupt requests are enabled.
42 */
43static FASTFN void sel4bench_private_write_intens(uint32_t mask)
44{
45    PMU_WRITE(PMINTENSET, mask);
46}
47
48/*
49 * INTENC
50 *
51 * Disables the generation of interrupt requests on overflows from the Cycle Count Register,
52 * PMCCNTR_EL0, and the event counters PMEVCNTR<n>_EL0. Reading the register shows which
53 * overflow interrupt requests are enabled.
54 *
55 */
56static FASTFN void sel4bench_private_write_intenc(uint32_t mask)
57{
58    PMU_WRITE(PMINTENCLR, mask);
59}
60
61static inline void sel4bench_private_init(void* data)
62{
63    //enable user-mode performance-counter access
64    sel4bench_private_switch_user_pmc(1);
65
66    //disable overflow interrupts on all counters
67    sel4bench_private_write_intenc(-1);
68}
69static inline void sel4bench_private_deinit(void* data)
70{
71    //disable user-mode performance-counter access
72    sel4bench_private_switch_user_pmc(0);
73}
74
75/*
76 * PMCR:
77 *
78 *  bits 31:24 = implementor
79 *  bits 23:16 = idcode
80 *  bits 15:11 = number of counters
81 *  bits 10:6  = reserved, sbz
82 *  bit  5 = disable CCNT when non-invasive debug is prohibited
83 *  bit  4 = export events to ETM
84 *  bit  3 = cycle counter divides by 64
85 *  bit  2 = write 1 to reset cycle counter to zero
86 *  bit  1 = write 1 to reset all counters to zero
87 *  bit  0 = enable bit
88 */
89#define SEL4BENCH_ARMV8A_PMCR_N(x)       (((x) & 0xFFFF) >> 11u)
90#define SEL4BENCH_ARMV8A_PMCR_ENABLE     BIT(0)
91#define SEL4BENCH_ARMV8A_PMCR_RESET_ALL  BIT(1)
92#define SEL4BENCH_ARMV8A_PMCR_RESET_CCNT BIT(2)
93#define SEL4BENCH_ARMV8A_PMCR_DIV64      BIT(3) /* Should CCNT be divided by 64? */
94
95static FASTFN void sel4bench_private_write_pmcr(uint32_t val)
96{
97    PMU_WRITE(PMCR, val);
98}
99static FASTFN uint32_t sel4bench_private_read_pmcr(void)
100{
101    uint32_t val;
102    PMU_READ(PMCR, val);
103    return val;
104}
105
106/*
107 * CNTENS/CNTENC (Count Enable Set/Clear)
108 *
109 * Enables the Cycle Count Register, PMCCNTR_EL0, and any implemented event counters
110 * PMEVCNTR<x>. Reading this register shows which counters are enabled.
111 *
112 */
113static FASTFN void sel4bench_private_write_cntens(uint32_t mask)
114{
115    PMU_WRITE(PMCNTENSET, mask);
116}
117
118static FASTFN uint32_t sel4bench_private_read_cntens(void)
119{
120    uint32_t mask;
121    PMU_READ(PMCNTENSET, mask);
122    return mask;
123}
124
125/*
126 * Disables the Cycle Count Register, PMCCNTR_EL0, and any implemented event counters
127 * PMEVCNTR<x>. Reading this register shows which counters are enabled.
128 */
129static FASTFN void sel4bench_private_write_cntenc(uint32_t mask)
130{
131    PMU_WRITE(PMCNTENCLR, mask);
132}
133
134/*
135 * Reads or writes the value of the selected event counter, PMEVCNTR<n>_EL0.
136 * PMSELR_EL0.SEL determines which event counter is selected.
137 */
138static FASTFN uint32_t sel4bench_private_read_pmcnt(void)
139{
140    uint32_t val;
141    PMU_READ(PMXEVCNTR, val);
142    return val;
143}
144
145static FASTFN void sel4bench_private_write_pmcnt(uint32_t val)
146{
147    PMU_WRITE(PMXEVCNTR, val);
148}
149
150/*
151 * Selects the current event counter PMEVCNTR<x> or the cycle counter, CCNT
152 */
153static FASTFN void sel4bench_private_write_pmnxsel(uint32_t val)
154{
155    PMU_WRITE(PMSELR, val);
156}
157
158/*
159 * When PMSELR_EL0.SEL selects an event counter, this accesses a PMEVTYPER<n>_EL0
160 * register. When PMSELR_EL0.SEL selects the cycle counter, this accesses PMCCFILTR_EL0.
161 */
162static FASTFN uint32_t sel4bench_private_read_evtsel(void)
163{
164
165    uint32_t val;
166    PMU_READ(PMXEVTYPER, val);
167    return val;
168}
169
170static FASTFN void sel4bench_private_write_evtsel(uint32_t val)
171{
172    PMU_WRITE(PMXEVTYPER, val);
173}
174