1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Copyright (C) 2015 Linaro Ltd. |
4 | * Author: Shannon Zhao <shannon.zhao@linaro.org> |
5 | */ |
6 | |
7 | #ifndef __ASM_ARM_KVM_PMU_H |
8 | #define __ASM_ARM_KVM_PMU_H |
9 | |
10 | #include <linux/perf_event.h> |
11 | #include <linux/perf/arm_pmuv3.h> |
12 | |
13 | #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) |
14 | |
15 | #if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM) |
16 | struct kvm_pmc { |
17 | u8 idx; /* index into the pmu->pmc array */ |
18 | struct perf_event *perf_event; |
19 | }; |
20 | |
21 | struct kvm_pmu_events { |
22 | u32 events_host; |
23 | u32 events_guest; |
24 | }; |
25 | |
26 | struct kvm_pmu { |
27 | struct irq_work overflow_work; |
28 | struct kvm_pmu_events events; |
29 | struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; |
30 | int irq_num; |
31 | bool created; |
32 | bool irq_level; |
33 | }; |
34 | |
35 | struct arm_pmu_entry { |
36 | struct list_head entry; |
37 | struct arm_pmu *arm_pmu; |
38 | }; |
39 | |
40 | DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available); |
41 | |
42 | static __always_inline bool kvm_arm_support_pmu_v3(void) |
43 | { |
44 | return static_branch_likely(&kvm_arm_pmu_available); |
45 | } |
46 | |
47 | #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) |
48 | u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); |
49 | void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); |
50 | u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); |
51 | u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1); |
52 | void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu); |
53 | void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); |
54 | void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); |
55 | void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val); |
56 | void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val); |
57 | void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); |
58 | void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); |
59 | bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu); |
60 | void kvm_pmu_update_run(struct kvm_vcpu *vcpu); |
61 | void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); |
62 | void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); |
63 | void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, |
64 | u64 select_idx); |
65 | void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu); |
66 | int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, |
67 | struct kvm_device_attr *attr); |
68 | int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, |
69 | struct kvm_device_attr *attr); |
70 | int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, |
71 | struct kvm_device_attr *attr); |
72 | int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu); |
73 | |
74 | struct kvm_pmu_events *kvm_get_pmu_events(void); |
75 | void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); |
76 | void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); |
77 | void kvm_vcpu_pmu_resync_el0(void); |
78 | |
79 | #define kvm_vcpu_has_pmu(vcpu) \ |
80 | (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3)) |
81 | |
82 | /* |
83 | * Updates the vcpu's view of the pmu events for this cpu. |
84 | * Must be called before every vcpu run after disabling interrupts, to ensure |
85 | * that an interrupt cannot fire and update the structure. |
86 | */ |
87 | #define kvm_pmu_update_vcpu_events(vcpu) \ |
88 | do { \ |
89 | if (!has_vhe() && kvm_vcpu_has_pmu(vcpu)) \ |
90 | vcpu->arch.pmu.events = *kvm_get_pmu_events(); \ |
91 | } while (0) |
92 | |
93 | /* |
94 | * Evaluates as true when emulating PMUv3p5, and false otherwise. |
95 | */ |
96 | #define kvm_pmu_is_3p5(vcpu) ({ \ |
97 | u64 val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1); \ |
98 | u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val); \ |
99 | \ |
100 | pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5; \ |
101 | }) |
102 | |
103 | u8 kvm_arm_pmu_get_pmuver_limit(void); |
104 | u64 kvm_pmu_evtyper_mask(struct kvm *kvm); |
105 | int kvm_arm_set_default_pmu(struct kvm *kvm); |
106 | u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm); |
107 | |
108 | u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu); |
109 | #else |
110 | struct kvm_pmu { |
111 | }; |
112 | |
113 | static inline bool kvm_arm_support_pmu_v3(void) |
114 | { |
115 | return false; |
116 | } |
117 | |
118 | #define kvm_arm_pmu_irq_initialized(v) (false) |
119 | static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, |
120 | u64 select_idx) |
121 | { |
122 | return 0; |
123 | } |
124 | static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, |
125 | u64 select_idx, u64 val) {} |
126 | static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) |
127 | { |
128 | return 0; |
129 | } |
130 | static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {} |
131 | static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} |
132 | static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} |
133 | static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} |
134 | static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} |
135 | static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} |
136 | static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} |
137 | static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) |
138 | { |
139 | return false; |
140 | } |
141 | static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {} |
142 | static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} |
143 | static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} |
144 | static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, |
145 | u64 data, u64 select_idx) {} |
146 | static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, |
147 | struct kvm_device_attr *attr) |
148 | { |
149 | return -ENXIO; |
150 | } |
151 | static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, |
152 | struct kvm_device_attr *attr) |
153 | { |
154 | return -ENXIO; |
155 | } |
156 | static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, |
157 | struct kvm_device_attr *attr) |
158 | { |
159 | return -ENXIO; |
160 | } |
161 | static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) |
162 | { |
163 | return 0; |
164 | } |
165 | static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) |
166 | { |
167 | return 0; |
168 | } |
169 | |
170 | #define kvm_vcpu_has_pmu(vcpu) ({ false; }) |
171 | #define kvm_pmu_is_3p5(vcpu) ({ false; }) |
172 | static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {} |
173 | static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} |
174 | static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} |
175 | static inline void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) {} |
176 | static inline u8 kvm_arm_pmu_get_pmuver_limit(void) |
177 | { |
178 | return 0; |
179 | } |
180 | static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm) |
181 | { |
182 | return 0; |
183 | } |
184 | static inline void kvm_vcpu_pmu_resync_el0(void) {} |
185 | |
186 | static inline int kvm_arm_set_default_pmu(struct kvm *kvm) |
187 | { |
188 | return -ENODEV; |
189 | } |
190 | |
191 | static inline u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm) |
192 | { |
193 | return 0; |
194 | } |
195 | |
196 | static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) |
197 | { |
198 | return 0; |
199 | } |
200 | |
201 | #endif |
202 | |
203 | #endif |
204 | |