1// SPDX-License-Identifier: GPL-2.0
2#include <Python.h>
3#include <structmember.h>
4#include <inttypes.h>
5#include <poll.h>
6#include <linux/err.h>
7#include <perf/cpumap.h>
8#ifdef HAVE_LIBTRACEEVENT
9#include <traceevent/event-parse.h>
10#endif
11#include <perf/mmap.h>
12#include "evlist.h"
13#include "callchain.h"
14#include "evsel.h"
15#include "event.h"
16#include "print_binary.h"
17#include "thread_map.h"
18#include "trace-event.h"
19#include "mmap.h"
20#include "stat.h"
21#include "metricgroup.h"
22#include "util/bpf-filter.h"
23#include "util/env.h"
24#include "util/pmu.h"
25#include "util/pmus.h"
26#include <internal/lib.h>
27#include "util.h"
28
29#if PY_MAJOR_VERSION < 3
30#define _PyUnicode_FromString(arg) \
31 PyString_FromString(arg)
32#define _PyUnicode_AsString(arg) \
33 PyString_AsString(arg)
34#define _PyUnicode_FromFormat(...) \
35 PyString_FromFormat(__VA_ARGS__)
36#define _PyLong_FromLong(arg) \
37 PyInt_FromLong(arg)
38
39#else
40
41#define _PyUnicode_FromString(arg) \
42 PyUnicode_FromString(arg)
43#define _PyUnicode_FromFormat(...) \
44 PyUnicode_FromFormat(__VA_ARGS__)
45#define _PyLong_FromLong(arg) \
46 PyLong_FromLong(arg)
47#endif
48
49#ifndef Py_TYPE
50#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
51#endif
52
53/*
54 * Avoid bringing in event parsing.
55 */
56int parse_event(struct evlist *evlist __maybe_unused, const char *str __maybe_unused)
57{
58 return 0;
59}
60
61/*
62 * Provide these two so that we don't have to link against callchain.c and
63 * start dragging hist.c, etc.
64 */
65struct callchain_param callchain_param;
66
67int parse_callchain_record(const char *arg __maybe_unused,
68 struct callchain_param *param __maybe_unused)
69{
70 return 0;
71}
72
73/*
74 * Add these not to drag util/env.c
75 */
76struct perf_env perf_env;
77
78const char *perf_env__cpuid(struct perf_env *env __maybe_unused)
79{
80 return NULL;
81}
82
83// This one is a bit easier, wouldn't drag too much, but leave it as a stub we need it here
84const char *perf_env__arch(struct perf_env *env __maybe_unused)
85{
86 return NULL;
87}
88
89/*
90 * These ones are needed not to drag the PMU bandwagon, jevents generated
91 * pmu_sys_event_tables, etc and evsel__find_pmu() is used so far just for
92 * doing per PMU perf_event_attr.exclude_guest handling, not really needed, so
93 * far, for the perf python binding known usecases, revisit if this become
94 * necessary.
95 */
96struct perf_pmu *evsel__find_pmu(const struct evsel *evsel __maybe_unused)
97{
98 return NULL;
99}
100
101int perf_pmu__scan_file(const struct perf_pmu *pmu, const char *name, const char *fmt, ...)
102{
103 return EOF;
104}
105
106int perf_pmus__num_core_pmus(void)
107{
108 return 1;
109}
110
111bool evsel__is_aux_event(const struct evsel *evsel __maybe_unused)
112{
113 return false;
114}
115
116bool perf_pmus__supports_extended_type(void)
117{
118 return false;
119}
120
121/*
122 * Add this one here not to drag util/metricgroup.c
123 */
124int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
125 struct rblist *new_metric_events,
126 struct rblist *old_metric_events)
127{
128 return 0;
129}
130
131/*
132 * Add this one here not to drag util/trace-event-info.c
133 */
134char *tracepoint_id_to_name(u64 config)
135{
136 return NULL;
137}
138
139/*
140 * XXX: All these evsel destructors need some better mechanism, like a linked
141 * list of destructors registered when the relevant code indeed is used instead
142 * of having more and more calls in perf_evsel__delete(). -- acme
143 *
144 * For now, add some more:
145 *
146 * Not to drag the BPF bandwagon...
147 */
148void bpf_counter__destroy(struct evsel *evsel);
149int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
150int bpf_counter__disable(struct evsel *evsel);
151
152void bpf_counter__destroy(struct evsel *evsel __maybe_unused)
153{
154}
155
156int bpf_counter__install_pe(struct evsel *evsel __maybe_unused, int cpu __maybe_unused, int fd __maybe_unused)
157{
158 return 0;
159}
160
161int bpf_counter__disable(struct evsel *evsel __maybe_unused)
162{
163 return 0;
164}
165
166// not to drag util/bpf-filter.c
167#ifdef HAVE_BPF_SKEL
168int perf_bpf_filter__prepare(struct evsel *evsel __maybe_unused)
169{
170 return 0;
171}
172
173int perf_bpf_filter__destroy(struct evsel *evsel __maybe_unused)
174{
175 return 0;
176}
177#endif
178
179/*
180 * Support debug printing even though util/debug.c is not linked. That means
181 * implementing 'verbose' and 'eprintf'.
182 */
183int verbose;
184int debug_kmaps;
185int debug_peo_args;
186
187int eprintf(int level, int var, const char *fmt, ...);
188
189int eprintf(int level, int var, const char *fmt, ...)
190{
191 va_list args;
192 int ret = 0;
193
194 if (var >= level) {
195 va_start(args, fmt);
196 ret = vfprintf(stderr, fmt, args);
197 va_end(args);
198 }
199
200 return ret;
201}
202
203/* Define PyVarObject_HEAD_INIT for python 2.5 */
204#ifndef PyVarObject_HEAD_INIT
205# define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
206#endif
207
208#if PY_MAJOR_VERSION < 3
209PyMODINIT_FUNC initperf(void);
210#else
211PyMODINIT_FUNC PyInit_perf(void);
212#endif
213
214#define member_def(type, member, ptype, help) \
215 { #member, ptype, \
216 offsetof(struct pyrf_event, event) + offsetof(struct type, member), \
217 0, help }
218
219#define sample_member_def(name, member, ptype, help) \
220 { #name, ptype, \
221 offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \
222 0, help }
223
224struct pyrf_event {
225 PyObject_HEAD
226 struct evsel *evsel;
227 struct perf_sample sample;
228 union perf_event event;
229};
230
231#define sample_members \
232 sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \
233 sample_member_def(sample_pid, pid, T_INT, "event pid"), \
234 sample_member_def(sample_tid, tid, T_INT, "event tid"), \
235 sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \
236 sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \
237 sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \
238 sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \
239 sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \
240 sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
241
242static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
243
244static PyMemberDef pyrf_mmap_event__members[] = {
245 sample_members
246 member_def(perf_event_header, type, T_UINT, "event type"),
247 member_def(perf_event_header, misc, T_UINT, "event misc"),
248 member_def(perf_record_mmap, pid, T_UINT, "event pid"),
249 member_def(perf_record_mmap, tid, T_UINT, "event tid"),
250 member_def(perf_record_mmap, start, T_ULONGLONG, "start of the map"),
251 member_def(perf_record_mmap, len, T_ULONGLONG, "map length"),
252 member_def(perf_record_mmap, pgoff, T_ULONGLONG, "page offset"),
253 member_def(perf_record_mmap, filename, T_STRING_INPLACE, "backing store"),
254 { .name = NULL, },
255};
256
257static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent)
258{
259 PyObject *ret;
260 char *s;
261
262 if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRI_lx64 ", "
263 "length: %#" PRI_lx64 ", offset: %#" PRI_lx64 ", "
264 "filename: %s }",
265 pevent->event.mmap.pid, pevent->event.mmap.tid,
266 pevent->event.mmap.start, pevent->event.mmap.len,
267 pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) {
268 ret = PyErr_NoMemory();
269 } else {
270 ret = _PyUnicode_FromString(s);
271 free(s);
272 }
273 return ret;
274}
275
276static PyTypeObject pyrf_mmap_event__type = {
277 PyVarObject_HEAD_INIT(NULL, 0)
278 .tp_name = "perf.mmap_event",
279 .tp_basicsize = sizeof(struct pyrf_event),
280 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
281 .tp_doc = pyrf_mmap_event__doc,
282 .tp_members = pyrf_mmap_event__members,
283 .tp_repr = (reprfunc)pyrf_mmap_event__repr,
284};
285
286static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
287
288static PyMemberDef pyrf_task_event__members[] = {
289 sample_members
290 member_def(perf_event_header, type, T_UINT, "event type"),
291 member_def(perf_record_fork, pid, T_UINT, "event pid"),
292 member_def(perf_record_fork, ppid, T_UINT, "event ppid"),
293 member_def(perf_record_fork, tid, T_UINT, "event tid"),
294 member_def(perf_record_fork, ptid, T_UINT, "event ptid"),
295 member_def(perf_record_fork, time, T_ULONGLONG, "timestamp"),
296 { .name = NULL, },
297};
298
299static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent)
300{
301 return _PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
302 "ptid: %u, time: %" PRI_lu64 "}",
303 pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit",
304 pevent->event.fork.pid,
305 pevent->event.fork.ppid,
306 pevent->event.fork.tid,
307 pevent->event.fork.ptid,
308 pevent->event.fork.time);
309}
310
311static PyTypeObject pyrf_task_event__type = {
312 PyVarObject_HEAD_INIT(NULL, 0)
313 .tp_name = "perf.task_event",
314 .tp_basicsize = sizeof(struct pyrf_event),
315 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
316 .tp_doc = pyrf_task_event__doc,
317 .tp_members = pyrf_task_event__members,
318 .tp_repr = (reprfunc)pyrf_task_event__repr,
319};
320
321static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
322
323static PyMemberDef pyrf_comm_event__members[] = {
324 sample_members
325 member_def(perf_event_header, type, T_UINT, "event type"),
326 member_def(perf_record_comm, pid, T_UINT, "event pid"),
327 member_def(perf_record_comm, tid, T_UINT, "event tid"),
328 member_def(perf_record_comm, comm, T_STRING_INPLACE, "process name"),
329 { .name = NULL, },
330};
331
332static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent)
333{
334 return _PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
335 pevent->event.comm.pid,
336 pevent->event.comm.tid,
337 pevent->event.comm.comm);
338}
339
340static PyTypeObject pyrf_comm_event__type = {
341 PyVarObject_HEAD_INIT(NULL, 0)
342 .tp_name = "perf.comm_event",
343 .tp_basicsize = sizeof(struct pyrf_event),
344 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
345 .tp_doc = pyrf_comm_event__doc,
346 .tp_members = pyrf_comm_event__members,
347 .tp_repr = (reprfunc)pyrf_comm_event__repr,
348};
349
350static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
351
352static PyMemberDef pyrf_throttle_event__members[] = {
353 sample_members
354 member_def(perf_event_header, type, T_UINT, "event type"),
355 member_def(perf_record_throttle, time, T_ULONGLONG, "timestamp"),
356 member_def(perf_record_throttle, id, T_ULONGLONG, "event id"),
357 member_def(perf_record_throttle, stream_id, T_ULONGLONG, "event stream id"),
358 { .name = NULL, },
359};
360
361static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent)
362{
363 struct perf_record_throttle *te = (struct perf_record_throttle *)(&pevent->event.header + 1);
364
365 return _PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64
366 ", stream_id: %" PRI_lu64 " }",
367 pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un",
368 te->time, te->id, te->stream_id);
369}
370
371static PyTypeObject pyrf_throttle_event__type = {
372 PyVarObject_HEAD_INIT(NULL, 0)
373 .tp_name = "perf.throttle_event",
374 .tp_basicsize = sizeof(struct pyrf_event),
375 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
376 .tp_doc = pyrf_throttle_event__doc,
377 .tp_members = pyrf_throttle_event__members,
378 .tp_repr = (reprfunc)pyrf_throttle_event__repr,
379};
380
381static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object.");
382
383static PyMemberDef pyrf_lost_event__members[] = {
384 sample_members
385 member_def(perf_record_lost, id, T_ULONGLONG, "event id"),
386 member_def(perf_record_lost, lost, T_ULONGLONG, "number of lost events"),
387 { .name = NULL, },
388};
389
390static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent)
391{
392 PyObject *ret;
393 char *s;
394
395 if (asprintf(&s, "{ type: lost, id: %#" PRI_lx64 ", "
396 "lost: %#" PRI_lx64 " }",
397 pevent->event.lost.id, pevent->event.lost.lost) < 0) {
398 ret = PyErr_NoMemory();
399 } else {
400 ret = _PyUnicode_FromString(s);
401 free(s);
402 }
403 return ret;
404}
405
406static PyTypeObject pyrf_lost_event__type = {
407 PyVarObject_HEAD_INIT(NULL, 0)
408 .tp_name = "perf.lost_event",
409 .tp_basicsize = sizeof(struct pyrf_event),
410 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
411 .tp_doc = pyrf_lost_event__doc,
412 .tp_members = pyrf_lost_event__members,
413 .tp_repr = (reprfunc)pyrf_lost_event__repr,
414};
415
416static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object.");
417
418static PyMemberDef pyrf_read_event__members[] = {
419 sample_members
420 member_def(perf_record_read, pid, T_UINT, "event pid"),
421 member_def(perf_record_read, tid, T_UINT, "event tid"),
422 { .name = NULL, },
423};
424
425static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent)
426{
427 return _PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }",
428 pevent->event.read.pid,
429 pevent->event.read.tid);
430 /*
431 * FIXME: return the array of read values,
432 * making this method useful ;-)
433 */
434}
435
436static PyTypeObject pyrf_read_event__type = {
437 PyVarObject_HEAD_INIT(NULL, 0)
438 .tp_name = "perf.read_event",
439 .tp_basicsize = sizeof(struct pyrf_event),
440 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
441 .tp_doc = pyrf_read_event__doc,
442 .tp_members = pyrf_read_event__members,
443 .tp_repr = (reprfunc)pyrf_read_event__repr,
444};
445
446static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object.");
447
448static PyMemberDef pyrf_sample_event__members[] = {
449 sample_members
450 member_def(perf_event_header, type, T_UINT, "event type"),
451 { .name = NULL, },
452};
453
454static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent)
455{
456 PyObject *ret;
457 char *s;
458
459 if (asprintf(&s, "{ type: sample }") < 0) {
460 ret = PyErr_NoMemory();
461 } else {
462 ret = _PyUnicode_FromString(s);
463 free(s);
464 }
465 return ret;
466}
467
468#ifdef HAVE_LIBTRACEEVENT
469static bool is_tracepoint(struct pyrf_event *pevent)
470{
471 return pevent->evsel->core.attr.type == PERF_TYPE_TRACEPOINT;
472}
473
474static PyObject*
475tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field)
476{
477 struct tep_handle *pevent = field->event->tep;
478 void *data = pe->sample.raw_data;
479 PyObject *ret = NULL;
480 unsigned long long val;
481 unsigned int offset, len;
482
483 if (field->flags & TEP_FIELD_IS_ARRAY) {
484 offset = field->offset;
485 len = field->size;
486 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
487 val = tep_read_number(pevent, data + offset, len);
488 offset = val;
489 len = offset >> 16;
490 offset &= 0xffff;
491 if (tep_field_is_relative(field->flags))
492 offset += field->offset + field->size;
493 }
494 if (field->flags & TEP_FIELD_IS_STRING &&
495 is_printable_array(data + offset, len)) {
496 ret = _PyUnicode_FromString((char *)data + offset);
497 } else {
498 ret = PyByteArray_FromStringAndSize((const char *) data + offset, len);
499 field->flags &= ~TEP_FIELD_IS_STRING;
500 }
501 } else {
502 val = tep_read_number(pevent, data + field->offset,
503 field->size);
504 if (field->flags & TEP_FIELD_IS_POINTER)
505 ret = PyLong_FromUnsignedLong((unsigned long) val);
506 else if (field->flags & TEP_FIELD_IS_SIGNED)
507 ret = PyLong_FromLong((long) val);
508 else
509 ret = PyLong_FromUnsignedLong((unsigned long) val);
510 }
511
512 return ret;
513}
514
515static PyObject*
516get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
517{
518 const char *str = _PyUnicode_AsString(PyObject_Str(attr_name));
519 struct evsel *evsel = pevent->evsel;
520 struct tep_format_field *field;
521
522 if (!evsel->tp_format) {
523 struct tep_event *tp_format;
524
525 tp_format = trace_event__tp_format_id(evsel->core.attr.config);
526 if (IS_ERR_OR_NULL(tp_format))
527 return NULL;
528
529 evsel->tp_format = tp_format;
530 }
531
532 field = tep_find_any_field(evsel->tp_format, str);
533 if (!field)
534 return NULL;
535
536 return tracepoint_field(pevent, field);
537}
538#endif /* HAVE_LIBTRACEEVENT */
539
540static PyObject*
541pyrf_sample_event__getattro(struct pyrf_event *pevent, PyObject *attr_name)
542{
543 PyObject *obj = NULL;
544
545#ifdef HAVE_LIBTRACEEVENT
546 if (is_tracepoint(pevent))
547 obj = get_tracepoint_field(pevent, attr_name);
548#endif
549
550 return obj ?: PyObject_GenericGetAttr((PyObject *) pevent, attr_name);
551}
552
553static PyTypeObject pyrf_sample_event__type = {
554 PyVarObject_HEAD_INIT(NULL, 0)
555 .tp_name = "perf.sample_event",
556 .tp_basicsize = sizeof(struct pyrf_event),
557 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
558 .tp_doc = pyrf_sample_event__doc,
559 .tp_members = pyrf_sample_event__members,
560 .tp_repr = (reprfunc)pyrf_sample_event__repr,
561 .tp_getattro = (getattrofunc) pyrf_sample_event__getattro,
562};
563
564static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object.");
565
566static PyMemberDef pyrf_context_switch_event__members[] = {
567 sample_members
568 member_def(perf_event_header, type, T_UINT, "event type"),
569 member_def(perf_record_switch, next_prev_pid, T_UINT, "next/prev pid"),
570 member_def(perf_record_switch, next_prev_tid, T_UINT, "next/prev tid"),
571 { .name = NULL, },
572};
573
574static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent)
575{
576 PyObject *ret;
577 char *s;
578
579 if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }",
580 pevent->event.context_switch.next_prev_pid,
581 pevent->event.context_switch.next_prev_tid,
582 !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) {
583 ret = PyErr_NoMemory();
584 } else {
585 ret = _PyUnicode_FromString(s);
586 free(s);
587 }
588 return ret;
589}
590
591static PyTypeObject pyrf_context_switch_event__type = {
592 PyVarObject_HEAD_INIT(NULL, 0)
593 .tp_name = "perf.context_switch_event",
594 .tp_basicsize = sizeof(struct pyrf_event),
595 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
596 .tp_doc = pyrf_context_switch_event__doc,
597 .tp_members = pyrf_context_switch_event__members,
598 .tp_repr = (reprfunc)pyrf_context_switch_event__repr,
599};
600
601static int pyrf_event__setup_types(void)
602{
603 int err;
604 pyrf_mmap_event__type.tp_new =
605 pyrf_task_event__type.tp_new =
606 pyrf_comm_event__type.tp_new =
607 pyrf_lost_event__type.tp_new =
608 pyrf_read_event__type.tp_new =
609 pyrf_sample_event__type.tp_new =
610 pyrf_context_switch_event__type.tp_new =
611 pyrf_throttle_event__type.tp_new = PyType_GenericNew;
612 err = PyType_Ready(&pyrf_mmap_event__type);
613 if (err < 0)
614 goto out;
615 err = PyType_Ready(&pyrf_lost_event__type);
616 if (err < 0)
617 goto out;
618 err = PyType_Ready(&pyrf_task_event__type);
619 if (err < 0)
620 goto out;
621 err = PyType_Ready(&pyrf_comm_event__type);
622 if (err < 0)
623 goto out;
624 err = PyType_Ready(&pyrf_throttle_event__type);
625 if (err < 0)
626 goto out;
627 err = PyType_Ready(&pyrf_read_event__type);
628 if (err < 0)
629 goto out;
630 err = PyType_Ready(&pyrf_sample_event__type);
631 if (err < 0)
632 goto out;
633 err = PyType_Ready(&pyrf_context_switch_event__type);
634 if (err < 0)
635 goto out;
636out:
637 return err;
638}
639
640static PyTypeObject *pyrf_event__type[] = {
641 [PERF_RECORD_MMAP] = &pyrf_mmap_event__type,
642 [PERF_RECORD_LOST] = &pyrf_lost_event__type,
643 [PERF_RECORD_COMM] = &pyrf_comm_event__type,
644 [PERF_RECORD_EXIT] = &pyrf_task_event__type,
645 [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type,
646 [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type,
647 [PERF_RECORD_FORK] = &pyrf_task_event__type,
648 [PERF_RECORD_READ] = &pyrf_read_event__type,
649 [PERF_RECORD_SAMPLE] = &pyrf_sample_event__type,
650 [PERF_RECORD_SWITCH] = &pyrf_context_switch_event__type,
651 [PERF_RECORD_SWITCH_CPU_WIDE] = &pyrf_context_switch_event__type,
652};
653
654static PyObject *pyrf_event__new(union perf_event *event)
655{
656 struct pyrf_event *pevent;
657 PyTypeObject *ptype;
658
659 if ((event->header.type < PERF_RECORD_MMAP ||
660 event->header.type > PERF_RECORD_SAMPLE) &&
661 !(event->header.type == PERF_RECORD_SWITCH ||
662 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE))
663 return NULL;
664
665 ptype = pyrf_event__type[event->header.type];
666 pevent = PyObject_New(struct pyrf_event, ptype);
667 if (pevent != NULL)
668 memcpy(&pevent->event, event, event->header.size);
669 return (PyObject *)pevent;
670}
671
672struct pyrf_cpu_map {
673 PyObject_HEAD
674
675 struct perf_cpu_map *cpus;
676};
677
678static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
679 PyObject *args, PyObject *kwargs)
680{
681 static char *kwlist[] = { "cpustr", NULL };
682 char *cpustr = NULL;
683
684 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s",
685 kwlist, &cpustr))
686 return -1;
687
688 pcpus->cpus = perf_cpu_map__new(cpustr);
689 if (pcpus->cpus == NULL)
690 return -1;
691 return 0;
692}
693
694static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
695{
696 perf_cpu_map__put(pcpus->cpus);
697 Py_TYPE(pcpus)->tp_free((PyObject*)pcpus);
698}
699
700static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
701{
702 struct pyrf_cpu_map *pcpus = (void *)obj;
703
704 return perf_cpu_map__nr(pcpus->cpus);
705}
706
707static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
708{
709 struct pyrf_cpu_map *pcpus = (void *)obj;
710
711 if (i >= perf_cpu_map__nr(pcpus->cpus))
712 return NULL;
713
714 return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu);
715}
716
717static PySequenceMethods pyrf_cpu_map__sequence_methods = {
718 .sq_length = pyrf_cpu_map__length,
719 .sq_item = pyrf_cpu_map__item,
720};
721
722static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object.");
723
724static PyTypeObject pyrf_cpu_map__type = {
725 PyVarObject_HEAD_INIT(NULL, 0)
726 .tp_name = "perf.cpu_map",
727 .tp_basicsize = sizeof(struct pyrf_cpu_map),
728 .tp_dealloc = (destructor)pyrf_cpu_map__delete,
729 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
730 .tp_doc = pyrf_cpu_map__doc,
731 .tp_as_sequence = &pyrf_cpu_map__sequence_methods,
732 .tp_init = (initproc)pyrf_cpu_map__init,
733};
734
735static int pyrf_cpu_map__setup_types(void)
736{
737 pyrf_cpu_map__type.tp_new = PyType_GenericNew;
738 return PyType_Ready(&pyrf_cpu_map__type);
739}
740
741struct pyrf_thread_map {
742 PyObject_HEAD
743
744 struct perf_thread_map *threads;
745};
746
747static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
748 PyObject *args, PyObject *kwargs)
749{
750 static char *kwlist[] = { "pid", "tid", "uid", NULL };
751 int pid = -1, tid = -1, uid = UINT_MAX;
752
753 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii",
754 kwlist, &pid, &tid, &uid))
755 return -1;
756
757 pthreads->threads = thread_map__new(pid, tid, uid);
758 if (pthreads->threads == NULL)
759 return -1;
760 return 0;
761}
762
763static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
764{
765 perf_thread_map__put(pthreads->threads);
766 Py_TYPE(pthreads)->tp_free((PyObject*)pthreads);
767}
768
769static Py_ssize_t pyrf_thread_map__length(PyObject *obj)
770{
771 struct pyrf_thread_map *pthreads = (void *)obj;
772
773 return perf_thread_map__nr(pthreads->threads);
774}
775
776static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i)
777{
778 struct pyrf_thread_map *pthreads = (void *)obj;
779
780 if (i >= perf_thread_map__nr(pthreads->threads))
781 return NULL;
782
783 return Py_BuildValue("i", perf_thread_map__pid(pthreads->threads, i));
784}
785
786static PySequenceMethods pyrf_thread_map__sequence_methods = {
787 .sq_length = pyrf_thread_map__length,
788 .sq_item = pyrf_thread_map__item,
789};
790
791static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object.");
792
793static PyTypeObject pyrf_thread_map__type = {
794 PyVarObject_HEAD_INIT(NULL, 0)
795 .tp_name = "perf.thread_map",
796 .tp_basicsize = sizeof(struct pyrf_thread_map),
797 .tp_dealloc = (destructor)pyrf_thread_map__delete,
798 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
799 .tp_doc = pyrf_thread_map__doc,
800 .tp_as_sequence = &pyrf_thread_map__sequence_methods,
801 .tp_init = (initproc)pyrf_thread_map__init,
802};
803
804static int pyrf_thread_map__setup_types(void)
805{
806 pyrf_thread_map__type.tp_new = PyType_GenericNew;
807 return PyType_Ready(&pyrf_thread_map__type);
808}
809
810struct pyrf_evsel {
811 PyObject_HEAD
812
813 struct evsel evsel;
814};
815
816static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
817 PyObject *args, PyObject *kwargs)
818{
819 struct perf_event_attr attr = {
820 .type = PERF_TYPE_HARDWARE,
821 .config = PERF_COUNT_HW_CPU_CYCLES,
822 .sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID,
823 };
824 static char *kwlist[] = {
825 "type",
826 "config",
827 "sample_freq",
828 "sample_period",
829 "sample_type",
830 "read_format",
831 "disabled",
832 "inherit",
833 "pinned",
834 "exclusive",
835 "exclude_user",
836 "exclude_kernel",
837 "exclude_hv",
838 "exclude_idle",
839 "mmap",
840 "context_switch",
841 "comm",
842 "freq",
843 "inherit_stat",
844 "enable_on_exec",
845 "task",
846 "watermark",
847 "precise_ip",
848 "mmap_data",
849 "sample_id_all",
850 "wakeup_events",
851 "bp_type",
852 "bp_addr",
853 "bp_len",
854 NULL
855 };
856 u64 sample_period = 0;
857 u32 disabled = 0,
858 inherit = 0,
859 pinned = 0,
860 exclusive = 0,
861 exclude_user = 0,
862 exclude_kernel = 0,
863 exclude_hv = 0,
864 exclude_idle = 0,
865 mmap = 0,
866 context_switch = 0,
867 comm = 0,
868 freq = 1,
869 inherit_stat = 0,
870 enable_on_exec = 0,
871 task = 0,
872 watermark = 0,
873 precise_ip = 0,
874 mmap_data = 0,
875 sample_id_all = 1;
876 int idx = 0;
877
878 if (!PyArg_ParseTupleAndKeywords(args, kwargs,
879 "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
880 &attr.type, &attr.config, &attr.sample_freq,
881 &sample_period, &attr.sample_type,
882 &attr.read_format, &disabled, &inherit,
883 &pinned, &exclusive, &exclude_user,
884 &exclude_kernel, &exclude_hv, &exclude_idle,
885 &mmap, &context_switch, &comm, &freq, &inherit_stat,
886 &enable_on_exec, &task, &watermark,
887 &precise_ip, &mmap_data, &sample_id_all,
888 &attr.wakeup_events, &attr.bp_type,
889 &attr.bp_addr, &attr.bp_len, &idx))
890 return -1;
891
892 /* union... */
893 if (sample_period != 0) {
894 if (attr.sample_freq != 0)
895 return -1; /* FIXME: throw right exception */
896 attr.sample_period = sample_period;
897 }
898
899 /* Bitfields */
900 attr.disabled = disabled;
901 attr.inherit = inherit;
902 attr.pinned = pinned;
903 attr.exclusive = exclusive;
904 attr.exclude_user = exclude_user;
905 attr.exclude_kernel = exclude_kernel;
906 attr.exclude_hv = exclude_hv;
907 attr.exclude_idle = exclude_idle;
908 attr.mmap = mmap;
909 attr.context_switch = context_switch;
910 attr.comm = comm;
911 attr.freq = freq;
912 attr.inherit_stat = inherit_stat;
913 attr.enable_on_exec = enable_on_exec;
914 attr.task = task;
915 attr.watermark = watermark;
916 attr.precise_ip = precise_ip;
917 attr.mmap_data = mmap_data;
918 attr.sample_id_all = sample_id_all;
919 attr.size = sizeof(attr);
920
921 evsel__init(evsel: &pevsel->evsel, attr: &attr, idx);
922 return 0;
923}
924
925static void pyrf_evsel__delete(struct pyrf_evsel *pevsel)
926{
927 evsel__exit(evsel: &pevsel->evsel);
928 Py_TYPE(pevsel)->tp_free((PyObject*)pevsel);
929}
930
931static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
932 PyObject *args, PyObject *kwargs)
933{
934 struct evsel *evsel = &pevsel->evsel;
935 struct perf_cpu_map *cpus = NULL;
936 struct perf_thread_map *threads = NULL;
937 PyObject *pcpus = NULL, *pthreads = NULL;
938 int group = 0, inherit = 0;
939 static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL };
940
941 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
942 &pcpus, &pthreads, &group, &inherit))
943 return NULL;
944
945 if (pthreads != NULL)
946 threads = ((struct pyrf_thread_map *)pthreads)->threads;
947
948 if (pcpus != NULL)
949 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
950
951 evsel->core.attr.inherit = inherit;
952 /*
953 * This will group just the fds for this single evsel, to group
954 * multiple events, use evlist.open().
955 */
956 if (evsel__open(evsel, cpus, threads) < 0) {
957 PyErr_SetFromErrno(PyExc_OSError);
958 return NULL;
959 }
960
961 Py_INCREF(Py_None);
962 return Py_None;
963}
964
965static PyMethodDef pyrf_evsel__methods[] = {
966 {
967 .ml_name = "open",
968 .ml_meth = (PyCFunction)pyrf_evsel__open,
969 .ml_flags = METH_VARARGS | METH_KEYWORDS,
970 .ml_doc = PyDoc_STR("open the event selector file descriptor table.")
971 },
972 { .ml_name = NULL, }
973};
974
975static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
976
977static PyTypeObject pyrf_evsel__type = {
978 PyVarObject_HEAD_INIT(NULL, 0)
979 .tp_name = "perf.evsel",
980 .tp_basicsize = sizeof(struct pyrf_evsel),
981 .tp_dealloc = (destructor)pyrf_evsel__delete,
982 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
983 .tp_doc = pyrf_evsel__doc,
984 .tp_methods = pyrf_evsel__methods,
985 .tp_init = (initproc)pyrf_evsel__init,
986};
987
988static int pyrf_evsel__setup_types(void)
989{
990 pyrf_evsel__type.tp_new = PyType_GenericNew;
991 return PyType_Ready(&pyrf_evsel__type);
992}
993
994struct pyrf_evlist {
995 PyObject_HEAD
996
997 struct evlist evlist;
998};
999
1000static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
1001 PyObject *args, PyObject *kwargs __maybe_unused)
1002{
1003 PyObject *pcpus = NULL, *pthreads = NULL;
1004 struct perf_cpu_map *cpus;
1005 struct perf_thread_map *threads;
1006
1007 if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads))
1008 return -1;
1009
1010 threads = ((struct pyrf_thread_map *)pthreads)->threads;
1011 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
1012 evlist__init(evlist: &pevlist->evlist, cpus, threads);
1013 return 0;
1014}
1015
1016static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
1017{
1018 evlist__exit(evlist: &pevlist->evlist);
1019 Py_TYPE(pevlist)->tp_free((PyObject*)pevlist);
1020}
1021
1022static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
1023 PyObject *args, PyObject *kwargs)
1024{
1025 struct evlist *evlist = &pevlist->evlist;
1026 static char *kwlist[] = { "pages", "overwrite", NULL };
1027 int pages = 128, overwrite = false;
1028
1029 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist,
1030 &pages, &overwrite))
1031 return NULL;
1032
1033 if (evlist__mmap(evlist, pages) < 0) {
1034 PyErr_SetFromErrno(PyExc_OSError);
1035 return NULL;
1036 }
1037
1038 Py_INCREF(Py_None);
1039 return Py_None;
1040}
1041
1042static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
1043 PyObject *args, PyObject *kwargs)
1044{
1045 struct evlist *evlist = &pevlist->evlist;
1046 static char *kwlist[] = { "timeout", NULL };
1047 int timeout = -1, n;
1048
1049 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
1050 return NULL;
1051
1052 n = evlist__poll(evlist, timeout);
1053 if (n < 0) {
1054 PyErr_SetFromErrno(PyExc_OSError);
1055 return NULL;
1056 }
1057
1058 return Py_BuildValue("i", n);
1059}
1060
1061static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
1062 PyObject *args __maybe_unused,
1063 PyObject *kwargs __maybe_unused)
1064{
1065 struct evlist *evlist = &pevlist->evlist;
1066 PyObject *list = PyList_New(0);
1067 int i;
1068
1069 for (i = 0; i < evlist->core.pollfd.nr; ++i) {
1070 PyObject *file;
1071#if PY_MAJOR_VERSION < 3
1072 FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r");
1073
1074 if (fp == NULL)
1075 goto free_list;
1076
1077 file = PyFile_FromFile(fp, "perf", "r", NULL);
1078#else
1079 file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1,
1080 NULL, NULL, NULL, 0);
1081#endif
1082 if (file == NULL)
1083 goto free_list;
1084
1085 if (PyList_Append(list, file) != 0) {
1086 Py_DECREF(file);
1087 goto free_list;
1088 }
1089
1090 Py_DECREF(file);
1091 }
1092
1093 return list;
1094free_list:
1095 return PyErr_NoMemory();
1096}
1097
1098
1099static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
1100 PyObject *args,
1101 PyObject *kwargs __maybe_unused)
1102{
1103 struct evlist *evlist = &pevlist->evlist;
1104 PyObject *pevsel;
1105 struct evsel *evsel;
1106
1107 if (!PyArg_ParseTuple(args, "O", &pevsel))
1108 return NULL;
1109
1110 Py_INCREF(pevsel);
1111 evsel = &((struct pyrf_evsel *)pevsel)->evsel;
1112 evsel->core.idx = evlist->core.nr_entries;
1113 evlist__add(evlist, entry: evsel);
1114
1115 return Py_BuildValue("i", evlist->core.nr_entries);
1116}
1117
1118static struct mmap *get_md(struct evlist *evlist, int cpu)
1119{
1120 int i;
1121
1122 for (i = 0; i < evlist->core.nr_mmaps; i++) {
1123 struct mmap *md = &evlist->mmap[i];
1124
1125 if (md->core.cpu.cpu == cpu)
1126 return md;
1127 }
1128
1129 return NULL;
1130}
1131
1132static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
1133 PyObject *args, PyObject *kwargs)
1134{
1135 struct evlist *evlist = &pevlist->evlist;
1136 union perf_event *event;
1137 int sample_id_all = 1, cpu;
1138 static char *kwlist[] = { "cpu", "sample_id_all", NULL };
1139 struct mmap *md;
1140 int err;
1141
1142 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
1143 &cpu, &sample_id_all))
1144 return NULL;
1145
1146 md = get_md(evlist, cpu);
1147 if (!md)
1148 return NULL;
1149
1150 if (perf_mmap__read_init(&md->core) < 0)
1151 goto end;
1152
1153 event = perf_mmap__read_event(&md->core);
1154 if (event != NULL) {
1155 PyObject *pyevent = pyrf_event__new(event);
1156 struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
1157 struct evsel *evsel;
1158
1159 if (pyevent == NULL)
1160 return PyErr_NoMemory();
1161
1162 evsel = evlist__event2evsel(evlist, event);
1163 if (!evsel) {
1164 Py_INCREF(Py_None);
1165 return Py_None;
1166 }
1167
1168 pevent->evsel = evsel;
1169
1170 err = evsel__parse_sample(evsel, event, sample: &pevent->sample);
1171
1172 /* Consume the even only after we parsed it out. */
1173 perf_mmap__consume(&md->core);
1174
1175 if (err)
1176 return PyErr_Format(PyExc_OSError,
1177 "perf: can't parse sample, err=%d", err);
1178 return pyevent;
1179 }
1180end:
1181 Py_INCREF(Py_None);
1182 return Py_None;
1183}
1184
1185static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist,
1186 PyObject *args, PyObject *kwargs)
1187{
1188 struct evlist *evlist = &pevlist->evlist;
1189
1190 if (evlist__open(evlist) < 0) {
1191 PyErr_SetFromErrno(PyExc_OSError);
1192 return NULL;
1193 }
1194
1195 Py_INCREF(Py_None);
1196 return Py_None;
1197}
1198
1199static PyMethodDef pyrf_evlist__methods[] = {
1200 {
1201 .ml_name = "mmap",
1202 .ml_meth = (PyCFunction)pyrf_evlist__mmap,
1203 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1204 .ml_doc = PyDoc_STR("mmap the file descriptor table.")
1205 },
1206 {
1207 .ml_name = "open",
1208 .ml_meth = (PyCFunction)pyrf_evlist__open,
1209 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1210 .ml_doc = PyDoc_STR("open the file descriptors.")
1211 },
1212 {
1213 .ml_name = "poll",
1214 .ml_meth = (PyCFunction)pyrf_evlist__poll,
1215 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1216 .ml_doc = PyDoc_STR("poll the file descriptor table.")
1217 },
1218 {
1219 .ml_name = "get_pollfd",
1220 .ml_meth = (PyCFunction)pyrf_evlist__get_pollfd,
1221 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1222 .ml_doc = PyDoc_STR("get the poll file descriptor table.")
1223 },
1224 {
1225 .ml_name = "add",
1226 .ml_meth = (PyCFunction)pyrf_evlist__add,
1227 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1228 .ml_doc = PyDoc_STR("adds an event selector to the list.")
1229 },
1230 {
1231 .ml_name = "read_on_cpu",
1232 .ml_meth = (PyCFunction)pyrf_evlist__read_on_cpu,
1233 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1234 .ml_doc = PyDoc_STR("reads an event.")
1235 },
1236 { .ml_name = NULL, }
1237};
1238
1239static Py_ssize_t pyrf_evlist__length(PyObject *obj)
1240{
1241 struct pyrf_evlist *pevlist = (void *)obj;
1242
1243 return pevlist->evlist.core.nr_entries;
1244}
1245
1246static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
1247{
1248 struct pyrf_evlist *pevlist = (void *)obj;
1249 struct evsel *pos;
1250
1251 if (i >= pevlist->evlist.core.nr_entries)
1252 return NULL;
1253
1254 evlist__for_each_entry(&pevlist->evlist, pos) {
1255 if (i-- == 0)
1256 break;
1257 }
1258
1259 return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel));
1260}
1261
1262static PySequenceMethods pyrf_evlist__sequence_methods = {
1263 .sq_length = pyrf_evlist__length,
1264 .sq_item = pyrf_evlist__item,
1265};
1266
1267static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
1268
1269static PyTypeObject pyrf_evlist__type = {
1270 PyVarObject_HEAD_INIT(NULL, 0)
1271 .tp_name = "perf.evlist",
1272 .tp_basicsize = sizeof(struct pyrf_evlist),
1273 .tp_dealloc = (destructor)pyrf_evlist__delete,
1274 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
1275 .tp_as_sequence = &pyrf_evlist__sequence_methods,
1276 .tp_doc = pyrf_evlist__doc,
1277 .tp_methods = pyrf_evlist__methods,
1278 .tp_init = (initproc)pyrf_evlist__init,
1279};
1280
1281static int pyrf_evlist__setup_types(void)
1282{
1283 pyrf_evlist__type.tp_new = PyType_GenericNew;
1284 return PyType_Ready(&pyrf_evlist__type);
1285}
1286
1287#define PERF_CONST(name) { #name, PERF_##name }
1288
1289static struct {
1290 const char *name;
1291 int value;
1292} perf__constants[] = {
1293 PERF_CONST(TYPE_HARDWARE),
1294 PERF_CONST(TYPE_SOFTWARE),
1295 PERF_CONST(TYPE_TRACEPOINT),
1296 PERF_CONST(TYPE_HW_CACHE),
1297 PERF_CONST(TYPE_RAW),
1298 PERF_CONST(TYPE_BREAKPOINT),
1299
1300 PERF_CONST(COUNT_HW_CPU_CYCLES),
1301 PERF_CONST(COUNT_HW_INSTRUCTIONS),
1302 PERF_CONST(COUNT_HW_CACHE_REFERENCES),
1303 PERF_CONST(COUNT_HW_CACHE_MISSES),
1304 PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS),
1305 PERF_CONST(COUNT_HW_BRANCH_MISSES),
1306 PERF_CONST(COUNT_HW_BUS_CYCLES),
1307 PERF_CONST(COUNT_HW_CACHE_L1D),
1308 PERF_CONST(COUNT_HW_CACHE_L1I),
1309 PERF_CONST(COUNT_HW_CACHE_LL),
1310 PERF_CONST(COUNT_HW_CACHE_DTLB),
1311 PERF_CONST(COUNT_HW_CACHE_ITLB),
1312 PERF_CONST(COUNT_HW_CACHE_BPU),
1313 PERF_CONST(COUNT_HW_CACHE_OP_READ),
1314 PERF_CONST(COUNT_HW_CACHE_OP_WRITE),
1315 PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH),
1316 PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS),
1317 PERF_CONST(COUNT_HW_CACHE_RESULT_MISS),
1318
1319 PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND),
1320 PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND),
1321
1322 PERF_CONST(COUNT_SW_CPU_CLOCK),
1323 PERF_CONST(COUNT_SW_TASK_CLOCK),
1324 PERF_CONST(COUNT_SW_PAGE_FAULTS),
1325 PERF_CONST(COUNT_SW_CONTEXT_SWITCHES),
1326 PERF_CONST(COUNT_SW_CPU_MIGRATIONS),
1327 PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN),
1328 PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ),
1329 PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS),
1330 PERF_CONST(COUNT_SW_EMULATION_FAULTS),
1331 PERF_CONST(COUNT_SW_DUMMY),
1332
1333 PERF_CONST(SAMPLE_IP),
1334 PERF_CONST(SAMPLE_TID),
1335 PERF_CONST(SAMPLE_TIME),
1336 PERF_CONST(SAMPLE_ADDR),
1337 PERF_CONST(SAMPLE_READ),
1338 PERF_CONST(SAMPLE_CALLCHAIN),
1339 PERF_CONST(SAMPLE_ID),
1340 PERF_CONST(SAMPLE_CPU),
1341 PERF_CONST(SAMPLE_PERIOD),
1342 PERF_CONST(SAMPLE_STREAM_ID),
1343 PERF_CONST(SAMPLE_RAW),
1344
1345 PERF_CONST(FORMAT_TOTAL_TIME_ENABLED),
1346 PERF_CONST(FORMAT_TOTAL_TIME_RUNNING),
1347 PERF_CONST(FORMAT_ID),
1348 PERF_CONST(FORMAT_GROUP),
1349
1350 PERF_CONST(RECORD_MMAP),
1351 PERF_CONST(RECORD_LOST),
1352 PERF_CONST(RECORD_COMM),
1353 PERF_CONST(RECORD_EXIT),
1354 PERF_CONST(RECORD_THROTTLE),
1355 PERF_CONST(RECORD_UNTHROTTLE),
1356 PERF_CONST(RECORD_FORK),
1357 PERF_CONST(RECORD_READ),
1358 PERF_CONST(RECORD_SAMPLE),
1359 PERF_CONST(RECORD_MMAP2),
1360 PERF_CONST(RECORD_AUX),
1361 PERF_CONST(RECORD_ITRACE_START),
1362 PERF_CONST(RECORD_LOST_SAMPLES),
1363 PERF_CONST(RECORD_SWITCH),
1364 PERF_CONST(RECORD_SWITCH_CPU_WIDE),
1365
1366 PERF_CONST(RECORD_MISC_SWITCH_OUT),
1367 { .name = NULL, },
1368};
1369
1370static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
1371 PyObject *args, PyObject *kwargs)
1372{
1373#ifndef HAVE_LIBTRACEEVENT
1374 return NULL;
1375#else
1376 struct tep_event *tp_format;
1377 static char *kwlist[] = { "sys", "name", NULL };
1378 char *sys = NULL;
1379 char *name = NULL;
1380
1381 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ss", kwlist,
1382 &sys, &name))
1383 return NULL;
1384
1385 tp_format = trace_event__tp_format(sys, name);
1386 if (IS_ERR(tp_format))
1387 return _PyLong_FromLong(-1);
1388
1389 return _PyLong_FromLong(tp_format->id);
1390#endif // HAVE_LIBTRACEEVENT
1391}
1392
1393static PyMethodDef perf__methods[] = {
1394 {
1395 .ml_name = "tracepoint",
1396 .ml_meth = (PyCFunction) pyrf__tracepoint,
1397 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1398 .ml_doc = PyDoc_STR("Get tracepoint config.")
1399 },
1400 { .ml_name = NULL, }
1401};
1402
1403#if PY_MAJOR_VERSION < 3
1404PyMODINIT_FUNC initperf(void)
1405#else
1406PyMODINIT_FUNC PyInit_perf(void)
1407#endif
1408{
1409 PyObject *obj;
1410 int i;
1411 PyObject *dict;
1412#if PY_MAJOR_VERSION < 3
1413 PyObject *module = Py_InitModule("perf", perf__methods);
1414#else
1415 static struct PyModuleDef moduledef = {
1416 PyModuleDef_HEAD_INIT,
1417 "perf", /* m_name */
1418 "", /* m_doc */
1419 -1, /* m_size */
1420 perf__methods, /* m_methods */
1421 NULL, /* m_reload */
1422 NULL, /* m_traverse */
1423 NULL, /* m_clear */
1424 NULL, /* m_free */
1425 };
1426 PyObject *module = PyModule_Create(&moduledef);
1427#endif
1428
1429 if (module == NULL ||
1430 pyrf_event__setup_types() < 0 ||
1431 pyrf_evlist__setup_types() < 0 ||
1432 pyrf_evsel__setup_types() < 0 ||
1433 pyrf_thread_map__setup_types() < 0 ||
1434 pyrf_cpu_map__setup_types() < 0)
1435#if PY_MAJOR_VERSION < 3
1436 return;
1437#else
1438 return module;
1439#endif
1440
1441 /* The page_size is placed in util object. */
1442 page_size = sysconf(_SC_PAGE_SIZE);
1443
1444 Py_INCREF(&pyrf_evlist__type);
1445 PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type);
1446
1447 Py_INCREF(&pyrf_evsel__type);
1448 PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type);
1449
1450 Py_INCREF(&pyrf_mmap_event__type);
1451 PyModule_AddObject(module, "mmap_event", (PyObject *)&pyrf_mmap_event__type);
1452
1453 Py_INCREF(&pyrf_lost_event__type);
1454 PyModule_AddObject(module, "lost_event", (PyObject *)&pyrf_lost_event__type);
1455
1456 Py_INCREF(&pyrf_comm_event__type);
1457 PyModule_AddObject(module, "comm_event", (PyObject *)&pyrf_comm_event__type);
1458
1459 Py_INCREF(&pyrf_task_event__type);
1460 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1461
1462 Py_INCREF(&pyrf_throttle_event__type);
1463 PyModule_AddObject(module, "throttle_event", (PyObject *)&pyrf_throttle_event__type);
1464
1465 Py_INCREF(&pyrf_task_event__type);
1466 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1467
1468 Py_INCREF(&pyrf_read_event__type);
1469 PyModule_AddObject(module, "read_event", (PyObject *)&pyrf_read_event__type);
1470
1471 Py_INCREF(&pyrf_sample_event__type);
1472 PyModule_AddObject(module, "sample_event", (PyObject *)&pyrf_sample_event__type);
1473
1474 Py_INCREF(&pyrf_context_switch_event__type);
1475 PyModule_AddObject(module, "switch_event", (PyObject *)&pyrf_context_switch_event__type);
1476
1477 Py_INCREF(&pyrf_thread_map__type);
1478 PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type);
1479
1480 Py_INCREF(&pyrf_cpu_map__type);
1481 PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type);
1482
1483 dict = PyModule_GetDict(module);
1484 if (dict == NULL)
1485 goto error;
1486
1487 for (i = 0; perf__constants[i].name != NULL; i++) {
1488 obj = _PyLong_FromLong(perf__constants[i].value);
1489 if (obj == NULL)
1490 goto error;
1491 PyDict_SetItemString(dict, perf__constants[i].name, obj);
1492 Py_DECREF(obj);
1493 }
1494
1495error:
1496 if (PyErr_Occurred())
1497 PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
1498#if PY_MAJOR_VERSION >= 3
1499 return module;
1500#endif
1501}
1502
1503/*
1504 * Dummy, to avoid dragging all the test_attr infrastructure in the python
1505 * binding.
1506 */
1507void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu,
1508 int fd, int group_fd, unsigned long flags)
1509{
1510}
1511
1512void evlist__free_stats(struct evlist *evlist)
1513{
1514}
1515

source code of linux/tools/perf/util/python.c