1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #include <linux/slab.h> |
3 | #include <linux/pci.h> |
4 | #include <asm/apicdef.h> |
5 | #include <asm/intel-family.h> |
6 | #include <linux/io-64-nonatomic-lo-hi.h> |
7 | |
8 | #include <linux/perf_event.h> |
9 | #include "../perf_event.h" |
10 | |
11 | #define UNCORE_PMU_NAME_LEN 32 |
12 | #define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC) |
13 | #define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC) |
14 | |
15 | #define UNCORE_FIXED_EVENT 0xff |
16 | #define UNCORE_PMC_IDX_MAX_GENERIC 8 |
17 | #define UNCORE_PMC_IDX_MAX_FIXED 1 |
18 | #define UNCORE_PMC_IDX_MAX_FREERUNNING 1 |
19 | #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC |
20 | #define UNCORE_PMC_IDX_FREERUNNING (UNCORE_PMC_IDX_FIXED + \ |
21 | UNCORE_PMC_IDX_MAX_FIXED) |
22 | #define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FREERUNNING + \ |
23 | UNCORE_PMC_IDX_MAX_FREERUNNING) |
24 | |
25 | #define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \ |
26 | ((dev << 24) | (func << 16) | (type << 8) | idx) |
27 | #define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx) |
28 | #define UNCORE_PCI_DEV_DEV(data) ((data >> 24) & 0xff) |
29 | #define UNCORE_PCI_DEV_FUNC(data) ((data >> 16) & 0xff) |
30 | #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) |
31 | #define UNCORE_PCI_DEV_IDX(data) (data & 0xff) |
32 | #define 0xff |
33 | #define 4 |
34 | |
35 | #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) |
36 | |
37 | #define UNCORE_IGNORE_END -1 |
38 | |
39 | struct { |
40 | struct pci_dev *[UNCORE_EXTRA_PCI_DEV_MAX]; |
41 | }; |
42 | |
43 | struct intel_uncore_ops; |
44 | struct intel_uncore_pmu; |
45 | struct intel_uncore_box; |
46 | struct uncore_event_desc; |
47 | struct freerunning_counters; |
48 | struct intel_uncore_topology; |
49 | |
50 | struct intel_uncore_type { |
51 | const char *name; |
52 | int num_counters; |
53 | int num_boxes; |
54 | int perf_ctr_bits; |
55 | int fixed_ctr_bits; |
56 | int num_freerunning_types; |
57 | int type_id; |
58 | unsigned perf_ctr; |
59 | unsigned event_ctl; |
60 | unsigned event_mask; |
61 | unsigned event_mask_ext; |
62 | unsigned fixed_ctr; |
63 | unsigned fixed_ctl; |
64 | unsigned box_ctl; |
65 | u64 *box_ctls; /* Unit ctrl addr of the first box of each die */ |
66 | union { |
67 | unsigned msr_offset; |
68 | unsigned mmio_offset; |
69 | }; |
70 | unsigned mmio_map_size; |
71 | unsigned num_shared_regs:8; |
72 | unsigned single_fixed:1; |
73 | unsigned pair_ctr_ctl:1; |
74 | union { |
75 | u64 *msr_offsets; |
76 | u64 *pci_offsets; |
77 | u64 *mmio_offsets; |
78 | }; |
79 | unsigned *box_ids; |
80 | struct event_constraint unconstrainted; |
81 | struct event_constraint *constraints; |
82 | struct intel_uncore_pmu *pmus; |
83 | struct intel_uncore_ops *ops; |
84 | struct uncore_event_desc *event_descs; |
85 | struct freerunning_counters *freerunning; |
86 | const struct attribute_group *attr_groups[4]; |
87 | const struct attribute_group **attr_update; |
88 | struct pmu *pmu; /* for custom pmu ops */ |
89 | /* |
90 | * Uncore PMU would store relevant platform topology configuration here |
91 | * to identify which platform component each PMON block of that type is |
92 | * supposed to monitor. |
93 | */ |
94 | struct intel_uncore_topology **topology; |
95 | /* |
96 | * Optional callbacks for managing mapping of Uncore units to PMONs |
97 | */ |
98 | int (*get_topology)(struct intel_uncore_type *type); |
99 | void (*set_mapping)(struct intel_uncore_type *type); |
100 | void (*cleanup_mapping)(struct intel_uncore_type *type); |
101 | }; |
102 | |
103 | #define pmu_group attr_groups[0] |
104 | #define format_group attr_groups[1] |
105 | #define events_group attr_groups[2] |
106 | |
107 | struct intel_uncore_ops { |
108 | void (*init_box)(struct intel_uncore_box *); |
109 | void (*exit_box)(struct intel_uncore_box *); |
110 | void (*disable_box)(struct intel_uncore_box *); |
111 | void (*enable_box)(struct intel_uncore_box *); |
112 | void (*disable_event)(struct intel_uncore_box *, struct perf_event *); |
113 | void (*enable_event)(struct intel_uncore_box *, struct perf_event *); |
114 | u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *); |
115 | int (*hw_config)(struct intel_uncore_box *, struct perf_event *); |
116 | struct event_constraint *(*get_constraint)(struct intel_uncore_box *, |
117 | struct perf_event *); |
118 | void (*put_constraint)(struct intel_uncore_box *, struct perf_event *); |
119 | }; |
120 | |
121 | struct intel_uncore_pmu { |
122 | struct pmu pmu; |
123 | char name[UNCORE_PMU_NAME_LEN]; |
124 | int pmu_idx; |
125 | int func_id; |
126 | bool registered; |
127 | atomic_t activeboxes; |
128 | struct intel_uncore_type *type; |
129 | struct intel_uncore_box **boxes; |
130 | }; |
131 | |
132 | struct { |
133 | raw_spinlock_t ; |
134 | u64 , , ; |
135 | atomic_t ; |
136 | }; |
137 | |
138 | struct intel_uncore_box { |
139 | int dieid; /* Logical die ID */ |
140 | int n_active; /* number of active events */ |
141 | int n_events; |
142 | int cpu; /* cpu to collect events */ |
143 | unsigned long flags; |
144 | atomic_t refcnt; |
145 | struct perf_event *events[UNCORE_PMC_IDX_MAX]; |
146 | struct perf_event *event_list[UNCORE_PMC_IDX_MAX]; |
147 | struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX]; |
148 | unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; |
149 | u64 tags[UNCORE_PMC_IDX_MAX]; |
150 | struct pci_dev *pci_dev; |
151 | struct intel_uncore_pmu *pmu; |
152 | u64 hrtimer_duration; /* hrtimer timeout for this box */ |
153 | struct hrtimer hrtimer; |
154 | struct list_head list; |
155 | struct list_head active_list; |
156 | void __iomem *io_addr; |
157 | struct intel_uncore_extra_reg shared_regs[]; |
158 | }; |
159 | |
160 | /* CFL uncore 8th cbox MSRs */ |
161 | #define CFL_UNC_CBO_7_PERFEVTSEL0 0xf70 |
162 | #define CFL_UNC_CBO_7_PER_CTR0 0xf76 |
163 | |
164 | #define UNCORE_BOX_FLAG_INITIATED 0 |
165 | /* event config registers are 8-byte apart */ |
166 | #define UNCORE_BOX_FLAG_CTL_OFFS8 1 |
167 | /* CFL 8th CBOX has different MSR space */ |
168 | #define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2 |
169 | |
170 | struct uncore_event_desc { |
171 | struct device_attribute attr; |
172 | const char *config; |
173 | }; |
174 | |
175 | struct freerunning_counters { |
176 | unsigned int counter_base; |
177 | unsigned int counter_offset; |
178 | unsigned int box_offset; |
179 | unsigned int num_counters; |
180 | unsigned int bits; |
181 | unsigned *box_offsets; |
182 | }; |
183 | |
184 | struct uncore_iio_topology { |
185 | int pci_bus_no; |
186 | int segment; |
187 | }; |
188 | |
189 | struct uncore_upi_topology { |
190 | int die_to; |
191 | int pmu_idx_to; |
192 | int enabled; |
193 | }; |
194 | |
195 | struct intel_uncore_topology { |
196 | int pmu_idx; |
197 | union { |
198 | void *untyped; |
199 | struct uncore_iio_topology *iio; |
200 | struct uncore_upi_topology *upi; |
201 | }; |
202 | }; |
203 | |
204 | struct pci2phy_map { |
205 | struct list_head list; |
206 | int segment; |
207 | int pbus_to_dieid[256]; |
208 | }; |
209 | |
210 | struct pci2phy_map *__find_pci2phy_map(int segment); |
211 | int uncore_pcibus_to_dieid(struct pci_bus *bus); |
212 | int uncore_die_to_segment(int die); |
213 | int uncore_device_to_die(struct pci_dev *dev); |
214 | |
215 | ssize_t uncore_event_show(struct device *dev, |
216 | struct device_attribute *attr, char *buf); |
217 | |
218 | static inline struct intel_uncore_pmu *dev_to_uncore_pmu(struct device *dev) |
219 | { |
220 | return container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu); |
221 | } |
222 | |
223 | #define to_device_attribute(n) container_of(n, struct device_attribute, attr) |
224 | #define to_dev_ext_attribute(n) container_of(n, struct dev_ext_attribute, attr) |
225 | #define attr_to_ext_attr(n) to_dev_ext_attribute(to_device_attribute(n)) |
226 | |
227 | extern int __uncore_max_dies; |
228 | #define uncore_max_dies() (__uncore_max_dies) |
229 | |
230 | #define INTEL_UNCORE_EVENT_DESC(_name, _config) \ |
231 | { \ |
232 | .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \ |
233 | .config = _config, \ |
234 | } |
235 | |
236 | #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ |
237 | static ssize_t __uncore_##_var##_show(struct device *dev, \ |
238 | struct device_attribute *attr, \ |
239 | char *page) \ |
240 | { \ |
241 | BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ |
242 | return sprintf(page, _format "\n"); \ |
243 | } \ |
244 | static struct device_attribute format_attr_##_var = \ |
245 | __ATTR(_name, 0444, __uncore_##_var##_show, NULL) |
246 | |
247 | static inline bool uncore_pmc_fixed(int idx) |
248 | { |
249 | return idx == UNCORE_PMC_IDX_FIXED; |
250 | } |
251 | |
252 | static inline bool uncore_pmc_freerunning(int idx) |
253 | { |
254 | return idx == UNCORE_PMC_IDX_FREERUNNING; |
255 | } |
256 | |
257 | static inline bool uncore_mmio_is_valid_offset(struct intel_uncore_box *box, |
258 | unsigned long offset) |
259 | { |
260 | if (offset < box->pmu->type->mmio_map_size) |
261 | return true; |
262 | |
263 | pr_warn_once("perf uncore: Invalid offset 0x%lx exceeds mapped area of %s.\n" , |
264 | offset, box->pmu->type->name); |
265 | |
266 | return false; |
267 | } |
268 | |
269 | static inline |
270 | unsigned int uncore_mmio_box_ctl(struct intel_uncore_box *box) |
271 | { |
272 | return box->pmu->type->box_ctl + |
273 | box->pmu->type->mmio_offset * box->pmu->pmu_idx; |
274 | } |
275 | |
276 | static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box) |
277 | { |
278 | return box->pmu->type->box_ctl; |
279 | } |
280 | |
281 | static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box) |
282 | { |
283 | return box->pmu->type->fixed_ctl; |
284 | } |
285 | |
286 | static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box) |
287 | { |
288 | return box->pmu->type->fixed_ctr; |
289 | } |
290 | |
291 | static inline |
292 | unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx) |
293 | { |
294 | if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags)) |
295 | return idx * 8 + box->pmu->type->event_ctl; |
296 | |
297 | return idx * 4 + box->pmu->type->event_ctl; |
298 | } |
299 | |
300 | static inline |
301 | unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx) |
302 | { |
303 | return idx * 8 + box->pmu->type->perf_ctr; |
304 | } |
305 | |
306 | static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box) |
307 | { |
308 | struct intel_uncore_pmu *pmu = box->pmu; |
309 | return pmu->type->msr_offsets ? |
310 | pmu->type->msr_offsets[pmu->pmu_idx] : |
311 | pmu->type->msr_offset * pmu->pmu_idx; |
312 | } |
313 | |
314 | static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box) |
315 | { |
316 | if (!box->pmu->type->box_ctl) |
317 | return 0; |
318 | return box->pmu->type->box_ctl + uncore_msr_box_offset(box); |
319 | } |
320 | |
321 | static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box) |
322 | { |
323 | if (!box->pmu->type->fixed_ctl) |
324 | return 0; |
325 | return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); |
326 | } |
327 | |
328 | static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) |
329 | { |
330 | return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); |
331 | } |
332 | |
333 | |
334 | /* |
335 | * In the uncore document, there is no event-code assigned to free running |
336 | * counters. Some events need to be defined to indicate the free running |
337 | * counters. The events are encoded as event-code + umask-code. |
338 | * |
339 | * The event-code for all free running counters is 0xff, which is the same as |
340 | * the fixed counters. |
341 | * |
342 | * The umask-code is used to distinguish a fixed counter and a free running |
343 | * counter, and different types of free running counters. |
344 | * - For fixed counters, the umask-code is 0x0X. |
345 | * X indicates the index of the fixed counter, which starts from 0. |
346 | * - For free running counters, the umask-code uses the rest of the space. |
347 | * It would bare the format of 0xXY. |
348 | * X stands for the type of free running counters, which starts from 1. |
349 | * Y stands for the index of free running counters of same type, which |
350 | * starts from 0. |
351 | * |
352 | * For example, there are three types of IIO free running counters on Skylake |
353 | * server, IO CLOCKS counters, BANDWIDTH counters and UTILIZATION counters. |
354 | * The event-code for all the free running counters is 0xff. |
355 | * 'ioclk' is the first counter of IO CLOCKS. IO CLOCKS is the first type, |
356 | * which umask-code starts from 0x10. |
357 | * So 'ioclk' is encoded as event=0xff,umask=0x10 |
358 | * 'bw_in_port2' is the third counter of BANDWIDTH counters. BANDWIDTH is |
359 | * the second type, which umask-code starts from 0x20. |
360 | * So 'bw_in_port2' is encoded as event=0xff,umask=0x22 |
361 | */ |
362 | static inline unsigned int uncore_freerunning_idx(u64 config) |
363 | { |
364 | return ((config >> 8) & 0xf); |
365 | } |
366 | |
367 | #define UNCORE_FREERUNNING_UMASK_START 0x10 |
368 | |
369 | static inline unsigned int uncore_freerunning_type(u64 config) |
370 | { |
371 | return ((((config >> 8) - UNCORE_FREERUNNING_UMASK_START) >> 4) & 0xf); |
372 | } |
373 | |
374 | static inline |
375 | unsigned int uncore_freerunning_counter(struct intel_uncore_box *box, |
376 | struct perf_event *event) |
377 | { |
378 | unsigned int type = uncore_freerunning_type(config: event->hw.config); |
379 | unsigned int idx = uncore_freerunning_idx(config: event->hw.config); |
380 | struct intel_uncore_pmu *pmu = box->pmu; |
381 | |
382 | return pmu->type->freerunning[type].counter_base + |
383 | pmu->type->freerunning[type].counter_offset * idx + |
384 | (pmu->type->freerunning[type].box_offsets ? |
385 | pmu->type->freerunning[type].box_offsets[pmu->pmu_idx] : |
386 | pmu->type->freerunning[type].box_offset * pmu->pmu_idx); |
387 | } |
388 | |
389 | static inline |
390 | unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) |
391 | { |
392 | if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) { |
393 | return CFL_UNC_CBO_7_PERFEVTSEL0 + |
394 | (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); |
395 | } else { |
396 | return box->pmu->type->event_ctl + |
397 | (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + |
398 | uncore_msr_box_offset(box); |
399 | } |
400 | } |
401 | |
402 | static inline |
403 | unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) |
404 | { |
405 | if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) { |
406 | return CFL_UNC_CBO_7_PER_CTR0 + |
407 | (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); |
408 | } else { |
409 | return box->pmu->type->perf_ctr + |
410 | (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + |
411 | uncore_msr_box_offset(box); |
412 | } |
413 | } |
414 | |
415 | static inline |
416 | unsigned uncore_fixed_ctl(struct intel_uncore_box *box) |
417 | { |
418 | if (box->pci_dev || box->io_addr) |
419 | return uncore_pci_fixed_ctl(box); |
420 | else |
421 | return uncore_msr_fixed_ctl(box); |
422 | } |
423 | |
424 | static inline |
425 | unsigned uncore_fixed_ctr(struct intel_uncore_box *box) |
426 | { |
427 | if (box->pci_dev || box->io_addr) |
428 | return uncore_pci_fixed_ctr(box); |
429 | else |
430 | return uncore_msr_fixed_ctr(box); |
431 | } |
432 | |
433 | static inline |
434 | unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx) |
435 | { |
436 | if (box->pci_dev || box->io_addr) |
437 | return uncore_pci_event_ctl(box, idx); |
438 | else |
439 | return uncore_msr_event_ctl(box, idx); |
440 | } |
441 | |
442 | static inline |
443 | unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx) |
444 | { |
445 | if (box->pci_dev || box->io_addr) |
446 | return uncore_pci_perf_ctr(box, idx); |
447 | else |
448 | return uncore_msr_perf_ctr(box, idx); |
449 | } |
450 | |
451 | static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box) |
452 | { |
453 | return box->pmu->type->perf_ctr_bits; |
454 | } |
455 | |
456 | static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box) |
457 | { |
458 | return box->pmu->type->fixed_ctr_bits; |
459 | } |
460 | |
461 | static inline |
462 | unsigned int uncore_freerunning_bits(struct intel_uncore_box *box, |
463 | struct perf_event *event) |
464 | { |
465 | unsigned int type = uncore_freerunning_type(config: event->hw.config); |
466 | |
467 | return box->pmu->type->freerunning[type].bits; |
468 | } |
469 | |
470 | static inline int uncore_num_freerunning(struct intel_uncore_box *box, |
471 | struct perf_event *event) |
472 | { |
473 | unsigned int type = uncore_freerunning_type(config: event->hw.config); |
474 | |
475 | return box->pmu->type->freerunning[type].num_counters; |
476 | } |
477 | |
478 | static inline int uncore_num_freerunning_types(struct intel_uncore_box *box, |
479 | struct perf_event *event) |
480 | { |
481 | return box->pmu->type->num_freerunning_types; |
482 | } |
483 | |
484 | static inline bool check_valid_freerunning_event(struct intel_uncore_box *box, |
485 | struct perf_event *event) |
486 | { |
487 | unsigned int type = uncore_freerunning_type(config: event->hw.config); |
488 | unsigned int idx = uncore_freerunning_idx(config: event->hw.config); |
489 | |
490 | return (type < uncore_num_freerunning_types(box, event)) && |
491 | (idx < uncore_num_freerunning(box, event)); |
492 | } |
493 | |
494 | static inline int uncore_num_counters(struct intel_uncore_box *box) |
495 | { |
496 | return box->pmu->type->num_counters; |
497 | } |
498 | |
499 | static inline bool is_freerunning_event(struct perf_event *event) |
500 | { |
501 | u64 cfg = event->attr.config; |
502 | |
503 | return ((cfg & UNCORE_FIXED_EVENT) == UNCORE_FIXED_EVENT) && |
504 | (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START); |
505 | } |
506 | |
507 | /* Check and reject invalid config */ |
508 | static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box, |
509 | struct perf_event *event) |
510 | { |
511 | if (is_freerunning_event(event)) |
512 | return 0; |
513 | |
514 | return -EINVAL; |
515 | } |
516 | |
517 | static inline void uncore_disable_event(struct intel_uncore_box *box, |
518 | struct perf_event *event) |
519 | { |
520 | box->pmu->type->ops->disable_event(box, event); |
521 | } |
522 | |
523 | static inline void uncore_enable_event(struct intel_uncore_box *box, |
524 | struct perf_event *event) |
525 | { |
526 | box->pmu->type->ops->enable_event(box, event); |
527 | } |
528 | |
529 | static inline u64 uncore_read_counter(struct intel_uncore_box *box, |
530 | struct perf_event *event) |
531 | { |
532 | return box->pmu->type->ops->read_counter(box, event); |
533 | } |
534 | |
535 | static inline void uncore_box_init(struct intel_uncore_box *box) |
536 | { |
537 | if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, addr: &box->flags)) { |
538 | if (box->pmu->type->ops->init_box) |
539 | box->pmu->type->ops->init_box(box); |
540 | } |
541 | } |
542 | |
543 | static inline void uncore_box_exit(struct intel_uncore_box *box) |
544 | { |
545 | if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, addr: &box->flags)) { |
546 | if (box->pmu->type->ops->exit_box) |
547 | box->pmu->type->ops->exit_box(box); |
548 | } |
549 | } |
550 | |
551 | static inline bool uncore_box_is_fake(struct intel_uncore_box *box) |
552 | { |
553 | return (box->dieid < 0); |
554 | } |
555 | |
556 | static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) |
557 | { |
558 | return container_of(event->pmu, struct intel_uncore_pmu, pmu); |
559 | } |
560 | |
561 | static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) |
562 | { |
563 | return event->pmu_private; |
564 | } |
565 | |
566 | struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu); |
567 | u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event); |
568 | void uncore_mmio_exit_box(struct intel_uncore_box *box); |
569 | u64 uncore_mmio_read_counter(struct intel_uncore_box *box, |
570 | struct perf_event *event); |
571 | void uncore_pmu_start_hrtimer(struct intel_uncore_box *box); |
572 | void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box); |
573 | void uncore_pmu_event_start(struct perf_event *event, int flags); |
574 | void uncore_pmu_event_stop(struct perf_event *event, int flags); |
575 | int uncore_pmu_event_add(struct perf_event *event, int flags); |
576 | void uncore_pmu_event_del(struct perf_event *event, int flags); |
577 | void uncore_pmu_event_read(struct perf_event *event); |
578 | void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event); |
579 | struct event_constraint * |
580 | uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event); |
581 | void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event); |
582 | u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx); |
583 | void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu); |
584 | |
585 | extern struct intel_uncore_type *empty_uncore[]; |
586 | extern struct intel_uncore_type **uncore_msr_uncores; |
587 | extern struct intel_uncore_type **uncore_pci_uncores; |
588 | extern struct intel_uncore_type **uncore_mmio_uncores; |
589 | extern struct pci_driver *uncore_pci_driver; |
590 | extern struct pci_driver *uncore_pci_sub_driver; |
591 | extern raw_spinlock_t pci2phy_map_lock; |
592 | extern struct list_head pci2phy_map_head; |
593 | extern struct pci_extra_dev *; |
594 | extern struct event_constraint uncore_constraint_empty; |
595 | extern int spr_uncore_units_ignore[]; |
596 | extern int gnr_uncore_units_ignore[]; |
597 | |
598 | /* uncore_snb.c */ |
599 | int snb_uncore_pci_init(void); |
600 | int ivb_uncore_pci_init(void); |
601 | int hsw_uncore_pci_init(void); |
602 | int bdw_uncore_pci_init(void); |
603 | int skl_uncore_pci_init(void); |
604 | void snb_uncore_cpu_init(void); |
605 | void nhm_uncore_cpu_init(void); |
606 | void skl_uncore_cpu_init(void); |
607 | void icl_uncore_cpu_init(void); |
608 | void tgl_uncore_cpu_init(void); |
609 | void adl_uncore_cpu_init(void); |
610 | void mtl_uncore_cpu_init(void); |
611 | void tgl_uncore_mmio_init(void); |
612 | void tgl_l_uncore_mmio_init(void); |
613 | void adl_uncore_mmio_init(void); |
614 | int snb_pci2phy_map_init(int devid); |
615 | |
616 | /* uncore_snbep.c */ |
617 | int snbep_uncore_pci_init(void); |
618 | void snbep_uncore_cpu_init(void); |
619 | int ivbep_uncore_pci_init(void); |
620 | void ivbep_uncore_cpu_init(void); |
621 | int hswep_uncore_pci_init(void); |
622 | void hswep_uncore_cpu_init(void); |
623 | int bdx_uncore_pci_init(void); |
624 | void bdx_uncore_cpu_init(void); |
625 | int knl_uncore_pci_init(void); |
626 | void knl_uncore_cpu_init(void); |
627 | int skx_uncore_pci_init(void); |
628 | void skx_uncore_cpu_init(void); |
629 | int snr_uncore_pci_init(void); |
630 | void snr_uncore_cpu_init(void); |
631 | void snr_uncore_mmio_init(void); |
632 | int icx_uncore_pci_init(void); |
633 | void icx_uncore_cpu_init(void); |
634 | void icx_uncore_mmio_init(void); |
635 | int spr_uncore_pci_init(void); |
636 | void spr_uncore_cpu_init(void); |
637 | void spr_uncore_mmio_init(void); |
638 | int gnr_uncore_pci_init(void); |
639 | void gnr_uncore_cpu_init(void); |
640 | void gnr_uncore_mmio_init(void); |
641 | |
642 | /* uncore_nhmex.c */ |
643 | void nhmex_uncore_cpu_init(void); |
644 | |