1/*
2 * Copyright 2020, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the BSD 2-Clause license. Note that NO WARRANTY is provided.
8 * See "LICENSE_BSD2.txt" for details.
9 *
10 * @TAG(DATA61_BSD)
11 */
12#pragma once
13
14#include <autoconf.h>
15#include <stdlib.h>
16#include <sel4bench/types.h>
17#include <sel4/sel4.h>
18#include <utils/util.h>
19
20#define FASTFN inline __attribute__((always_inline))
21
22#if __riscv_xlen == 32
23#define SEL4BENCH_READ_CCNT(var) \
24    uint32_t nH1, nL, nH2; \
25    asm volatile("rdcycleh %0\n" \
26                 "rdcycle %1\n" \
27                 "rdcycleh %2\n" \
28                 : "=r"(nH1), "=r"(nL), "=r"(nH2)); \
29    if (nH1 < nH2) { \
30        asm volatile("rdcycle %0" : "=r"(nL); \
31        nH1 = nH2; \
32    } \
33    var = ((uint64_t)((uint64_t) nH1 << 32 | (nL);
34#else
35#define SEL4BENCH_READ_CCNT(var) \
36    asm volatile("rdcycle %0" :"=r"(var));
37#endif
38
39#define SEL4BENCH_RESET_CCNT do {\
40    ; \
41} while(0)
42
43#if __riscv_xlen == 32
44#define SEL4BENCH_READ_PCNT(idx, var) \
45    uint32_t nH1, nL, nH2; \
46    asm volatile("csrr %0, hpmcounterh" #idx \
47                 "csrr %1, hpmcounter" #idx \
48                 "csrr %2, hpmcounterh" #idx \
49                 : "=r"(nH1), "=r"(nL), "=r"(nH2)); \
50    if (nH1 < nH2) { \
51        asm volatile("csrr %0, hpmcounter" #idx : "=r"(nL); \
52        nH1 = nH2; \
53    } \
54    var = ((uint64_t)((uint64_t) nH1 << 32 | (nL);
55#else
56#define SEL4BENCH_READ_PCNT(idx, var) \
57    asm volatile("csrr %0, hpmcounter" #idx : "=r"(var));
58#endif
59
60/* Check out SiFive FU540 Manual Chapter 4.10 for details
61 * These settings are platform specific, however, they
62 * might become part of the RISCV spec in the future.
63 */
64#define SEL4BENCH_EVENT_EXECUTE_INSTRUCTION 0x3FFFF00
65#define SEL4BENCH_EVENT_CACHE_L1I_MISS      0x102
66#define SEL4BENCH_EVENT_CACHE_L1D_MISS      0x202
67#define SEL4BENCH_EVENT_TLB_L1I_MISS        0x802
68#define SEL4BENCH_EVENT_TLB_L1D_MISS        0x1002
69#define SEL4BENCH_EVENT_BRANCH_MISPREDICT   0x6001
70#define SEL4BENCH_EVENT_MEMORY_ACCESS       0x202
71
72#define CCNT_FORMAT "%"PRIu64
73typedef uint64_t ccnt_t;
74
75static FASTFN void sel4bench_init()
76{
77    /* Nothing to do */
78}
79
80static FASTFN void sel4bench_destroy()
81{
82    /* Nothing to do */
83}
84
85static FASTFN seL4_Word sel4bench_get_num_counters()
86{
87#ifdef CONFIG_PLAT_HIFIVE
88    return 2;
89#else
90    return 0;
91#endif
92}
93
94static FASTFN ccnt_t sel4bench_get_cycle_count()
95{
96    ccnt_t val;
97
98    SEL4BENCH_READ_CCNT(val);
99
100    return val;
101}
102
103/* Being declared FASTFN allows this function (once inlined) to cache miss; I
104 * think it's worthwhile in the general case, for performance reasons.
105 * moreover, it's small enough that it'll be suitably aligned most of the time
106 */
107static FASTFN ccnt_t sel4bench_get_counter(counter_t counter)
108{
109    ccnt_t val;
110
111    /* Sifive U540 only supports two event counters */
112    switch (counter) {
113    case 0:
114        SEL4BENCH_READ_PCNT(3, val);
115        break;
116    case 1:
117        SEL4BENCH_READ_PCNT(4, val);
118        break;
119    default:
120        val = 0;
121        break;
122    }
123
124    return val;
125}
126
127static inline ccnt_t sel4bench_get_counters(counter_bitfield_t mask, ccnt_t *values)
128{
129    ccnt_t ccnt;
130    unsigned int counter = 0;
131
132    for (; mask != 0 ; mask >>= 1, counter++) {
133        if (mask & 1) {
134            values[counter] = sel4bench_get_counter(counter);
135        }
136    }
137
138    SEL4BENCH_READ_CCNT(ccnt);
139
140    return ccnt;
141}
142
143static FASTFN void sel4bench_set_count_event(counter_t counter, event_id_t event)
144{
145    /* Sifive U540 only supports two event counters */
146    switch (counter) {
147    case 0:
148        /* Stop the counter */
149        asm volatile("csrw mhpmevent3, 0");
150
151        /* Reset and start the counter*/
152#if __riscv_xlen == 32
153        asm volatile("csrw mhpmcounterh3, 0");
154#endif
155        asm volatile("csrw mhpmcounter3, 0\n"
156                     "csrw mhpmevent3, %0\n"
157                     :: "r"(event));
158        break;
159    case 1:
160        asm volatile("csrw mhpmevent4, 0");
161#if __riscv_xlen == 32
162        asm volatile("csrw mhpmcounterh4, 0");
163#endif
164        asm volatile("csrw mhpmcounter4, 0\n"
165                     "csrw mhpmevent4, %0\n"
166                     :: "r"(event));
167        break;
168    default:
169        break;
170    }
171
172    return;
173}
174
175/* Writing the to event CSR would automatically start the counter */
176static FASTFN void sel4bench_start_counters(counter_bitfield_t mask)
177{
178    /* Nothing to do */
179}
180
181/* Note that the counter is stopped by clearing the event CSR.
182 * Set event CSR before starting the counter again
183 */
184static FASTFN void sel4bench_stop_counters(counter_bitfield_t mask)
185{
186    /* Sifive U540 only supports two event counters */
187    if (mask & (1 << 3)) {
188        asm volatile("csrw mhpmevent3, 0");
189    }
190
191    if (mask & (1 << 4)) {
192        asm volatile("csrw mhpmevent4, 0");
193    }
194    return;
195}
196
197static FASTFN void sel4bench_reset_counters(void)
198{
199    /* Nothing to do */
200}
201