1/* SPDX-License-Identifier: GPL-2.0 */
2#include <stdlib.h>
3
4#include <bpf/bpf.h>
5#include <linux/err.h>
6#include <internal/xyarray.h>
7
8#include "util/debug.h"
9#include "util/evsel.h"
10
11#include "util/bpf-filter.h"
12#include <util/bpf-filter-flex.h>
13#include <util/bpf-filter-bison.h>
14
15#include "bpf_skel/sample-filter.h"
16#include "bpf_skel/sample_filter.skel.h"
17
18#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
19
20#define __PERF_SAMPLE_TYPE(st, opt) { st, #st, opt }
21#define PERF_SAMPLE_TYPE(_st, opt) __PERF_SAMPLE_TYPE(PERF_SAMPLE_##_st, opt)
22
23static const struct perf_sample_info {
24 u64 type;
25 const char *name;
26 const char *option;
27} sample_table[] = {
28 /* default sample flags */
29 PERF_SAMPLE_TYPE(IP, NULL),
30 PERF_SAMPLE_TYPE(TID, NULL),
31 PERF_SAMPLE_TYPE(PERIOD, NULL),
32 /* flags mostly set by default, but still have options */
33 PERF_SAMPLE_TYPE(ID, "--sample-identifier"),
34 PERF_SAMPLE_TYPE(CPU, "--sample-cpu"),
35 PERF_SAMPLE_TYPE(TIME, "-T"),
36 /* optional sample flags */
37 PERF_SAMPLE_TYPE(ADDR, "-d"),
38 PERF_SAMPLE_TYPE(DATA_SRC, "-d"),
39 PERF_SAMPLE_TYPE(PHYS_ADDR, "--phys-data"),
40 PERF_SAMPLE_TYPE(WEIGHT, "-W"),
41 PERF_SAMPLE_TYPE(WEIGHT_STRUCT, "-W"),
42 PERF_SAMPLE_TYPE(TRANSACTION, "--transaction"),
43 PERF_SAMPLE_TYPE(CODE_PAGE_SIZE, "--code-page-size"),
44 PERF_SAMPLE_TYPE(DATA_PAGE_SIZE, "--data-page-size"),
45};
46
47static const struct perf_sample_info *get_sample_info(u64 flags)
48{
49 size_t i;
50
51 for (i = 0; i < ARRAY_SIZE(sample_table); i++) {
52 if (sample_table[i].type == flags)
53 return &sample_table[i];
54 }
55 return NULL;
56}
57
58static int check_sample_flags(struct evsel *evsel, struct perf_bpf_filter_expr *expr)
59{
60 const struct perf_sample_info *info;
61
62 if (evsel->core.attr.sample_type & expr->sample_flags)
63 return 0;
64
65 if (expr->op == PBF_OP_GROUP_BEGIN) {
66 struct perf_bpf_filter_expr *group;
67
68 list_for_each_entry(group, &expr->groups, list) {
69 if (check_sample_flags(evsel, expr: group) < 0)
70 return -1;
71 }
72 return 0;
73 }
74
75 info = get_sample_info(flags: expr->sample_flags);
76 if (info == NULL) {
77 pr_err("Error: %s event does not have sample flags %lx\n",
78 evsel__name(evsel), expr->sample_flags);
79 return -1;
80 }
81
82 pr_err("Error: %s event does not have %s\n", evsel__name(evsel), info->name);
83 if (info->option)
84 pr_err(" Hint: please add %s option to perf record\n", info->option);
85 return -1;
86}
87
88int perf_bpf_filter__prepare(struct evsel *evsel)
89{
90 int i, x, y, fd;
91 struct sample_filter_bpf *skel;
92 struct bpf_program *prog;
93 struct bpf_link *link;
94 struct perf_bpf_filter_expr *expr;
95
96 skel = sample_filter_bpf__open_and_load();
97 if (!skel) {
98 pr_err("Failed to load perf sample-filter BPF skeleton\n");
99 return -1;
100 }
101
102 i = 0;
103 fd = bpf_map__fd(skel->maps.filters);
104 list_for_each_entry(expr, &evsel->bpf_filters, list) {
105 struct perf_bpf_filter_entry entry = {
106 .op = expr->op,
107 .part = expr->part,
108 .flags = expr->sample_flags,
109 .value = expr->val,
110 };
111
112 if (check_sample_flags(evsel, expr) < 0)
113 return -1;
114
115 bpf_map_update_elem(fd, &i, &entry, BPF_ANY);
116 i++;
117
118 if (expr->op == PBF_OP_GROUP_BEGIN) {
119 struct perf_bpf_filter_expr *group;
120
121 list_for_each_entry(group, &expr->groups, list) {
122 struct perf_bpf_filter_entry group_entry = {
123 .op = group->op,
124 .part = group->part,
125 .flags = group->sample_flags,
126 .value = group->val,
127 };
128 bpf_map_update_elem(fd, &i, &group_entry, BPF_ANY);
129 i++;
130 }
131
132 memset(&entry, 0, sizeof(entry));
133 entry.op = PBF_OP_GROUP_END;
134 bpf_map_update_elem(fd, &i, &entry, BPF_ANY);
135 i++;
136 }
137 }
138
139 if (i > MAX_FILTERS) {
140 pr_err("Too many filters: %d (max = %d)\n", i, MAX_FILTERS);
141 return -1;
142 }
143 prog = skel->progs.perf_sample_filter;
144 for (x = 0; x < xyarray__max_x(evsel->core.fd); x++) {
145 for (y = 0; y < xyarray__max_y(evsel->core.fd); y++) {
146 link = bpf_program__attach_perf_event(prog, FD(evsel, x, y));
147 if (IS_ERR(ptr: link)) {
148 pr_err("Failed to attach perf sample-filter program\n");
149 return PTR_ERR(ptr: link);
150 }
151 }
152 }
153 evsel->bpf_skel = skel;
154 return 0;
155}
156
157int perf_bpf_filter__destroy(struct evsel *evsel)
158{
159 struct perf_bpf_filter_expr *expr, *tmp;
160
161 list_for_each_entry_safe(expr, tmp, &evsel->bpf_filters, list) {
162 list_del(&expr->list);
163 free(expr);
164 }
165 sample_filter_bpf__destroy(evsel->bpf_skel);
166 return 0;
167}
168
169u64 perf_bpf_filter__lost_count(struct evsel *evsel)
170{
171 struct sample_filter_bpf *skel = evsel->bpf_skel;
172
173 return skel ? skel->bss->dropped : 0;
174}
175
176struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(unsigned long sample_flags, int part,
177 enum perf_bpf_filter_op op,
178 unsigned long val)
179{
180 struct perf_bpf_filter_expr *expr;
181
182 expr = malloc(sizeof(*expr));
183 if (expr != NULL) {
184 expr->sample_flags = sample_flags;
185 expr->part = part;
186 expr->op = op;
187 expr->val = val;
188 INIT_LIST_HEAD(&expr->groups);
189 }
190 return expr;
191}
192
193int perf_bpf_filter__parse(struct list_head *expr_head, const char *str)
194{
195 YY_BUFFER_STATE buffer;
196 int ret;
197
198 buffer = perf_bpf_filter__scan_string(str);
199
200 ret = perf_bpf_filter_parse(expr_head);
201
202 perf_bpf_filter__flush_buffer(buffer);
203 perf_bpf_filter__delete_buffer(buffer);
204 perf_bpf_filter_lex_destroy();
205
206 return ret;
207}
208

source code of linux/tools/perf/util/bpf-filter.c