1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (c) 2023 Rivos Inc
4 *
5 * Authors:
6 *     Atish Patra <atishp@rivosinc.com>
7 */
8
9#ifndef __KVM_VCPU_RISCV_PMU_H
10#define __KVM_VCPU_RISCV_PMU_H
11
12#include <linux/perf/riscv_pmu.h>
13#include <asm/sbi.h>
14
15#ifdef CONFIG_RISCV_PMU_SBI
16#define RISCV_KVM_MAX_FW_CTRS	32
17#define RISCV_KVM_MAX_HW_CTRS	32
18#define RISCV_KVM_MAX_COUNTERS	(RISCV_KVM_MAX_HW_CTRS + RISCV_KVM_MAX_FW_CTRS)
19static_assert(RISCV_KVM_MAX_COUNTERS <= 64);
20
21struct kvm_fw_event {
22	/* Current value of the event */
23	unsigned long value;
24
25	/* Event monitoring status */
26	bool started;
27};
28
29/* Per virtual pmu counter data */
30struct kvm_pmc {
31	u8 idx;
32	struct perf_event *perf_event;
33	u64 counter_val;
34	union sbi_pmu_ctr_info cinfo;
35	/* Event monitoring status */
36	bool started;
37	/* Monitoring event ID */
38	unsigned long event_idx;
39};
40
41/* PMU data structure per vcpu */
42struct kvm_pmu {
43	struct kvm_pmc pmc[RISCV_KVM_MAX_COUNTERS];
44	struct kvm_fw_event fw_event[RISCV_KVM_MAX_FW_CTRS];
45	/* Number of the virtual firmware counters available */
46	int num_fw_ctrs;
47	/* Number of the virtual hardware counters available */
48	int num_hw_ctrs;
49	/* A flag to indicate that pmu initialization is done */
50	bool init_done;
51	/* Bit map of all the virtual counter used */
52	DECLARE_BITMAP(pmc_in_use, RISCV_KVM_MAX_COUNTERS);
53};
54
55#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu_context)
56#define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu_context))
57
58#if defined(CONFIG_32BIT)
59#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
60{.base = CSR_CYCLEH,	.count = 31,	.func = kvm_riscv_vcpu_pmu_read_hpm }, \
61{.base = CSR_CYCLE,	.count = 31,	.func = kvm_riscv_vcpu_pmu_read_hpm },
62#else
63#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
64{.base = CSR_CYCLE,	.count = 31,	.func = kvm_riscv_vcpu_pmu_read_hpm },
65#endif
66
67int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid);
68int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num,
69				unsigned long *val, unsigned long new_val,
70				unsigned long wr_mask);
71
72int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, struct kvm_vcpu_sbi_return *retdata);
73int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx,
74				struct kvm_vcpu_sbi_return *retdata);
75int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base,
76				 unsigned long ctr_mask, unsigned long flags, u64 ival,
77				 struct kvm_vcpu_sbi_return *retdata);
78int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
79				unsigned long ctr_mask, unsigned long flags,
80				struct kvm_vcpu_sbi_return *retdata);
81int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base,
82				     unsigned long ctr_mask, unsigned long flags,
83				     unsigned long eidx, u64 evtdata,
84				     struct kvm_vcpu_sbi_return *retdata);
85int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
86				struct kvm_vcpu_sbi_return *retdata);
87void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu);
88void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu);
89void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);
90
91#else
92struct kvm_pmu {
93};
94
95#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
96{.base = 0,	.count = 0,	.func = NULL },
97
98static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {}
99static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)
100{
101	return 0;
102}
103
104static inline void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) {}
105static inline void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) {}
106#endif /* CONFIG_RISCV_PMU_SBI */
107#endif /* !__KVM_VCPU_RISCV_PMU_H */
108