1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * KVM PMU support for AMD |
4 | * |
5 | * Copyright 2015, Red Hat, Inc. and/or its affiliates. |
6 | * |
7 | * Author: |
8 | * Wei Huang <wei@redhat.com> |
9 | * |
10 | * Implementation is based on pmu_intel.c file |
11 | */ |
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
13 | |
14 | #include <linux/types.h> |
15 | #include <linux/kvm_host.h> |
16 | #include <linux/perf_event.h> |
17 | #include "x86.h" |
18 | #include "cpuid.h" |
19 | #include "lapic.h" |
20 | #include "pmu.h" |
21 | #include "svm.h" |
22 | |
23 | enum pmu_type { |
24 | PMU_TYPE_COUNTER = 0, |
25 | PMU_TYPE_EVNTSEL, |
26 | }; |
27 | |
28 | static struct kvm_pmc *amd_pmu_get_pmc(struct kvm_pmu *pmu, int pmc_idx) |
29 | { |
30 | unsigned int num_counters = pmu->nr_arch_gp_counters; |
31 | |
32 | if (pmc_idx >= num_counters) |
33 | return NULL; |
34 | |
35 | return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)]; |
36 | } |
37 | |
38 | static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, |
39 | enum pmu_type type) |
40 | { |
41 | struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); |
42 | unsigned int idx; |
43 | |
44 | if (!vcpu->kvm->arch.enable_pmu) |
45 | return NULL; |
46 | |
47 | switch (msr) { |
48 | case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: |
49 | if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) |
50 | return NULL; |
51 | /* |
52 | * Each PMU counter has a pair of CTL and CTR MSRs. CTLn |
53 | * MSRs (accessed via EVNTSEL) are even, CTRn MSRs are odd. |
54 | */ |
55 | idx = (unsigned int)((msr - MSR_F15H_PERF_CTL0) / 2); |
56 | if (!(msr & 0x1) != (type == PMU_TYPE_EVNTSEL)) |
57 | return NULL; |
58 | break; |
59 | case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: |
60 | if (type != PMU_TYPE_EVNTSEL) |
61 | return NULL; |
62 | idx = msr - MSR_K7_EVNTSEL0; |
63 | break; |
64 | case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: |
65 | if (type != PMU_TYPE_COUNTER) |
66 | return NULL; |
67 | idx = msr - MSR_K7_PERFCTR0; |
68 | break; |
69 | default: |
70 | return NULL; |
71 | } |
72 | |
73 | return amd_pmu_get_pmc(pmu, pmc_idx: idx); |
74 | } |
75 | |
76 | static int amd_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx) |
77 | { |
78 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
79 | |
80 | if (idx >= pmu->nr_arch_gp_counters) |
81 | return -EINVAL; |
82 | |
83 | return 0; |
84 | } |
85 | |
86 | /* idx is the ECX register of RDPMC instruction */ |
87 | static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, |
88 | unsigned int idx, u64 *mask) |
89 | { |
90 | return amd_pmu_get_pmc(pmu: vcpu_to_pmu(vcpu), pmc_idx: idx); |
91 | } |
92 | |
93 | static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) |
94 | { |
95 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
96 | struct kvm_pmc *pmc; |
97 | |
98 | pmc = get_gp_pmc_amd(pmu, msr, type: PMU_TYPE_COUNTER); |
99 | pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, type: PMU_TYPE_EVNTSEL); |
100 | |
101 | return pmc; |
102 | } |
103 | |
104 | static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) |
105 | { |
106 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
107 | |
108 | switch (msr) { |
109 | case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3: |
110 | return pmu->version > 0; |
111 | case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: |
112 | return guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE); |
113 | case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: |
114 | case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: |
115 | case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: |
116 | return pmu->version > 1; |
117 | default: |
118 | if (msr > MSR_F15H_PERF_CTR5 && |
119 | msr < MSR_F15H_PERF_CTL0 + 2 * pmu->nr_arch_gp_counters) |
120 | return pmu->version > 1; |
121 | break; |
122 | } |
123 | |
124 | return amd_msr_idx_to_pmc(vcpu, msr); |
125 | } |
126 | |
127 | static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
128 | { |
129 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
130 | struct kvm_pmc *pmc; |
131 | u32 msr = msr_info->index; |
132 | |
133 | /* MSR_PERFCTRn */ |
134 | pmc = get_gp_pmc_amd(pmu, msr, type: PMU_TYPE_COUNTER); |
135 | if (pmc) { |
136 | msr_info->data = pmc_read_counter(pmc); |
137 | return 0; |
138 | } |
139 | /* MSR_EVNTSELn */ |
140 | pmc = get_gp_pmc_amd(pmu, msr, type: PMU_TYPE_EVNTSEL); |
141 | if (pmc) { |
142 | msr_info->data = pmc->eventsel; |
143 | return 0; |
144 | } |
145 | |
146 | return 1; |
147 | } |
148 | |
149 | static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
150 | { |
151 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
152 | struct kvm_pmc *pmc; |
153 | u32 msr = msr_info->index; |
154 | u64 data = msr_info->data; |
155 | |
156 | /* MSR_PERFCTRn */ |
157 | pmc = get_gp_pmc_amd(pmu, msr, type: PMU_TYPE_COUNTER); |
158 | if (pmc) { |
159 | pmc_write_counter(pmc, data); |
160 | return 0; |
161 | } |
162 | /* MSR_EVNTSELn */ |
163 | pmc = get_gp_pmc_amd(pmu, msr, type: PMU_TYPE_EVNTSEL); |
164 | if (pmc) { |
165 | data &= ~pmu->reserved_bits; |
166 | if (data != pmc->eventsel) { |
167 | pmc->eventsel = data; |
168 | kvm_pmu_request_counter_reprogram(pmc); |
169 | } |
170 | return 0; |
171 | } |
172 | |
173 | return 1; |
174 | } |
175 | |
176 | static void amd_pmu_refresh(struct kvm_vcpu *vcpu) |
177 | { |
178 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
179 | union cpuid_0x80000022_ebx ebx; |
180 | |
181 | pmu->version = 1; |
182 | if (guest_cpuid_has(vcpu, X86_FEATURE_PERFMON_V2)) { |
183 | pmu->version = 2; |
184 | /* |
185 | * Note, PERFMON_V2 is also in 0x80000022.0x0, i.e. the guest |
186 | * CPUID entry is guaranteed to be non-NULL. |
187 | */ |
188 | BUILD_BUG_ON(x86_feature_cpuid(X86_FEATURE_PERFMON_V2).function != 0x80000022 || |
189 | x86_feature_cpuid(X86_FEATURE_PERFMON_V2).index); |
190 | ebx.full = kvm_find_cpuid_entry_index(vcpu, 0x80000022, 0)->ebx; |
191 | pmu->nr_arch_gp_counters = ebx.split.num_core_pmc; |
192 | } else if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) { |
193 | pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE; |
194 | } else { |
195 | pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS; |
196 | } |
197 | |
198 | pmu->nr_arch_gp_counters = min_t(unsigned int, pmu->nr_arch_gp_counters, |
199 | kvm_pmu_cap.num_counters_gp); |
200 | |
201 | if (pmu->version > 1) { |
202 | pmu->global_ctrl_mask = ~((1ull << pmu->nr_arch_gp_counters) - 1); |
203 | pmu->global_status_mask = pmu->global_ctrl_mask; |
204 | } |
205 | |
206 | pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1; |
207 | pmu->reserved_bits = 0xfffffff000280000ull; |
208 | pmu->raw_event_mask = AMD64_RAW_EVENT_MASK; |
209 | /* not applicable to AMD; but clean them to prevent any fall out */ |
210 | pmu->counter_bitmask[KVM_PMC_FIXED] = 0; |
211 | pmu->nr_arch_fixed_counters = 0; |
212 | bitmap_set(map: pmu->all_valid_pmc_idx, start: 0, nbits: pmu->nr_arch_gp_counters); |
213 | } |
214 | |
215 | static void amd_pmu_init(struct kvm_vcpu *vcpu) |
216 | { |
217 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
218 | int i; |
219 | |
220 | BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > AMD64_NUM_COUNTERS_CORE); |
221 | BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > INTEL_PMC_MAX_GENERIC); |
222 | |
223 | for (i = 0; i < KVM_AMD_PMC_MAX_GENERIC ; i++) { |
224 | pmu->gp_counters[i].type = KVM_PMC_GP; |
225 | pmu->gp_counters[i].vcpu = vcpu; |
226 | pmu->gp_counters[i].idx = i; |
227 | pmu->gp_counters[i].current_config = 0; |
228 | } |
229 | } |
230 | |
231 | struct kvm_pmu_ops amd_pmu_ops __initdata = { |
232 | .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc, |
233 | .msr_idx_to_pmc = amd_msr_idx_to_pmc, |
234 | .check_rdpmc_early = amd_check_rdpmc_early, |
235 | .is_valid_msr = amd_is_valid_msr, |
236 | .get_msr = amd_pmu_get_msr, |
237 | .set_msr = amd_pmu_set_msr, |
238 | .refresh = amd_pmu_refresh, |
239 | .init = amd_pmu_init, |
240 | .EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT, |
241 | .MAX_NR_GP_COUNTERS = KVM_AMD_PMC_MAX_GENERIC, |
242 | .MIN_NR_GP_COUNTERS = AMD64_NUM_COUNTERS, |
243 | }; |
244 | |