1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright(C) 2015 Linaro Limited. All rights reserved. |
4 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> |
5 | */ |
6 | |
7 | #include <linux/bitfield.h> |
8 | #include <linux/coresight.h> |
9 | #include <linux/coresight-pmu.h> |
10 | #include <linux/cpumask.h> |
11 | #include <linux/device.h> |
12 | #include <linux/list.h> |
13 | #include <linux/mm.h> |
14 | #include <linux/init.h> |
15 | #include <linux/perf_event.h> |
16 | #include <linux/percpu-defs.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/stringhash.h> |
19 | #include <linux/types.h> |
20 | #include <linux/workqueue.h> |
21 | |
22 | #include "coresight-config.h" |
23 | #include "coresight-etm-perf.h" |
24 | #include "coresight-priv.h" |
25 | #include "coresight-syscfg.h" |
26 | #include "coresight-trace-id.h" |
27 | |
28 | static struct pmu etm_pmu; |
29 | static bool etm_perf_up; |
30 | |
31 | /* |
32 | * An ETM context for a running event includes the perf aux handle |
33 | * and aux_data. For ETM, the aux_data (etm_event_data), consists of |
34 | * the trace path and the sink configuration. The event data is accessible |
35 | * via perf_get_aux(handle). However, a sink could "end" a perf output |
36 | * handle via the IRQ handler. And if the "sink" encounters a failure |
37 | * to "begin" another session (e.g due to lack of space in the buffer), |
38 | * the handle will be cleared. Thus, the event_data may not be accessible |
39 | * from the handle when we get to the etm_event_stop(), which is required |
40 | * for stopping the trace path. The event_data is guaranteed to stay alive |
41 | * until "free_aux()", which cannot happen as long as the event is active on |
42 | * the ETM. Thus the event_data for the session must be part of the ETM context |
43 | * to make sure we can disable the trace path. |
44 | */ |
45 | struct etm_ctxt { |
46 | struct perf_output_handle handle; |
47 | struct etm_event_data *event_data; |
48 | }; |
49 | |
50 | static DEFINE_PER_CPU(struct etm_ctxt, etm_ctxt); |
51 | static DEFINE_PER_CPU(struct coresight_device *, csdev_src); |
52 | |
53 | /* |
54 | * The PMU formats were orignally for ETMv3.5/PTM's ETMCR 'config'; |
55 | * now take them as general formats and apply on all ETMs. |
56 | */ |
57 | PMU_FORMAT_ATTR(branch_broadcast, "config:" __stringify(ETM_OPT_BRANCH_BROADCAST)); |
58 | PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC)); |
59 | /* contextid1 enables tracing CONTEXTIDR_EL1 for ETMv4 */ |
60 | PMU_FORMAT_ATTR(contextid1, "config:" __stringify(ETM_OPT_CTXTID)); |
61 | /* contextid2 enables tracing CONTEXTIDR_EL2 for ETMv4 */ |
62 | PMU_FORMAT_ATTR(contextid2, "config:" __stringify(ETM_OPT_CTXTID2)); |
63 | PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS)); |
64 | PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK)); |
65 | /* preset - if sink ID is used as a configuration selector */ |
66 | PMU_FORMAT_ATTR(preset, "config:0-3" ); |
67 | /* Sink ID - same for all ETMs */ |
68 | PMU_FORMAT_ATTR(sinkid, "config2:0-31" ); |
69 | /* config ID - set if a system configuration is selected */ |
70 | PMU_FORMAT_ATTR(configid, "config2:32-63" ); |
71 | |
72 | |
73 | /* |
74 | * contextid always traces the "PID". The PID is in CONTEXTIDR_EL1 |
75 | * when the kernel is running at EL1; when the kernel is at EL2, |
76 | * the PID is in CONTEXTIDR_EL2. |
77 | */ |
78 | static ssize_t format_attr_contextid_show(struct device *dev, |
79 | struct device_attribute *attr, |
80 | char *page) |
81 | { |
82 | int pid_fmt = ETM_OPT_CTXTID; |
83 | |
84 | #if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X) |
85 | pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID; |
86 | #endif |
87 | return sprintf(buf: page, fmt: "config:%d\n" , pid_fmt); |
88 | } |
89 | |
90 | static struct device_attribute format_attr_contextid = |
91 | __ATTR(contextid, 0444, format_attr_contextid_show, NULL); |
92 | |
93 | static struct attribute *etm_config_formats_attr[] = { |
94 | &format_attr_cycacc.attr, |
95 | &format_attr_contextid.attr, |
96 | &format_attr_contextid1.attr, |
97 | &format_attr_contextid2.attr, |
98 | &format_attr_timestamp.attr, |
99 | &format_attr_retstack.attr, |
100 | &format_attr_sinkid.attr, |
101 | &format_attr_preset.attr, |
102 | &format_attr_configid.attr, |
103 | &format_attr_branch_broadcast.attr, |
104 | NULL, |
105 | }; |
106 | |
107 | static const struct attribute_group etm_pmu_format_group = { |
108 | .name = "format" , |
109 | .attrs = etm_config_formats_attr, |
110 | }; |
111 | |
112 | static struct attribute *etm_config_sinks_attr[] = { |
113 | NULL, |
114 | }; |
115 | |
116 | static const struct attribute_group etm_pmu_sinks_group = { |
117 | .name = "sinks" , |
118 | .attrs = etm_config_sinks_attr, |
119 | }; |
120 | |
121 | static struct attribute *etm_config_events_attr[] = { |
122 | NULL, |
123 | }; |
124 | |
125 | static const struct attribute_group etm_pmu_events_group = { |
126 | .name = "events" , |
127 | .attrs = etm_config_events_attr, |
128 | }; |
129 | |
130 | static const struct attribute_group *etm_pmu_attr_groups[] = { |
131 | &etm_pmu_format_group, |
132 | &etm_pmu_sinks_group, |
133 | &etm_pmu_events_group, |
134 | NULL, |
135 | }; |
136 | |
137 | static inline struct list_head ** |
138 | etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu) |
139 | { |
140 | return per_cpu_ptr(data->path, cpu); |
141 | } |
142 | |
143 | static inline struct list_head * |
144 | etm_event_cpu_path(struct etm_event_data *data, int cpu) |
145 | { |
146 | return *etm_event_cpu_path_ptr(data, cpu); |
147 | } |
148 | |
149 | static void etm_event_read(struct perf_event *event) {} |
150 | |
151 | static int etm_addr_filters_alloc(struct perf_event *event) |
152 | { |
153 | struct etm_filters *filters; |
154 | int node = event->cpu == -1 ? -1 : cpu_to_node(cpu: event->cpu); |
155 | |
156 | filters = kzalloc_node(size: sizeof(struct etm_filters), GFP_KERNEL, node); |
157 | if (!filters) |
158 | return -ENOMEM; |
159 | |
160 | if (event->parent) |
161 | memcpy(filters, event->parent->hw.addr_filters, |
162 | sizeof(*filters)); |
163 | |
164 | event->hw.addr_filters = filters; |
165 | |
166 | return 0; |
167 | } |
168 | |
169 | static void etm_event_destroy(struct perf_event *event) |
170 | { |
171 | kfree(objp: event->hw.addr_filters); |
172 | event->hw.addr_filters = NULL; |
173 | } |
174 | |
175 | static int etm_event_init(struct perf_event *event) |
176 | { |
177 | int ret = 0; |
178 | |
179 | if (event->attr.type != etm_pmu.type) { |
180 | ret = -ENOENT; |
181 | goto out; |
182 | } |
183 | |
184 | ret = etm_addr_filters_alloc(event); |
185 | if (ret) |
186 | goto out; |
187 | |
188 | event->destroy = etm_event_destroy; |
189 | out: |
190 | return ret; |
191 | } |
192 | |
193 | static void free_sink_buffer(struct etm_event_data *event_data) |
194 | { |
195 | int cpu; |
196 | cpumask_t *mask = &event_data->mask; |
197 | struct coresight_device *sink; |
198 | |
199 | if (!event_data->snk_config) |
200 | return; |
201 | |
202 | if (WARN_ON(cpumask_empty(mask))) |
203 | return; |
204 | |
205 | cpu = cpumask_first(srcp: mask); |
206 | sink = coresight_get_sink(path: etm_event_cpu_path(data: event_data, cpu)); |
207 | sink_ops(sink)->free_buffer(event_data->snk_config); |
208 | } |
209 | |
210 | static void free_event_data(struct work_struct *work) |
211 | { |
212 | int cpu; |
213 | cpumask_t *mask; |
214 | struct etm_event_data *event_data; |
215 | |
216 | event_data = container_of(work, struct etm_event_data, work); |
217 | mask = &event_data->mask; |
218 | |
219 | /* Free the sink buffers, if there are any */ |
220 | free_sink_buffer(event_data); |
221 | |
222 | /* clear any configuration we were using */ |
223 | if (event_data->cfg_hash) |
224 | cscfg_deactivate_config(cfg_hash: event_data->cfg_hash); |
225 | |
226 | for_each_cpu(cpu, mask) { |
227 | struct list_head **ppath; |
228 | |
229 | ppath = etm_event_cpu_path_ptr(data: event_data, cpu); |
230 | if (!(IS_ERR_OR_NULL(ptr: *ppath))) |
231 | coresight_release_path(path: *ppath); |
232 | *ppath = NULL; |
233 | coresight_trace_id_put_cpu_id(cpu); |
234 | } |
235 | |
236 | /* mark perf event as done for trace id allocator */ |
237 | coresight_trace_id_perf_stop(); |
238 | |
239 | free_percpu(pdata: event_data->path); |
240 | kfree(objp: event_data); |
241 | } |
242 | |
243 | static void *alloc_event_data(int cpu) |
244 | { |
245 | cpumask_t *mask; |
246 | struct etm_event_data *event_data; |
247 | |
248 | /* First get memory for the session's data */ |
249 | event_data = kzalloc(size: sizeof(struct etm_event_data), GFP_KERNEL); |
250 | if (!event_data) |
251 | return NULL; |
252 | |
253 | |
254 | mask = &event_data->mask; |
255 | if (cpu != -1) |
256 | cpumask_set_cpu(cpu, dstp: mask); |
257 | else |
258 | cpumask_copy(dstp: mask, cpu_present_mask); |
259 | |
260 | /* |
261 | * Each CPU has a single path between source and destination. As such |
262 | * allocate an array using CPU numbers as indexes. That way a path |
263 | * for any CPU can easily be accessed at any given time. We proceed |
264 | * the same way for sessions involving a single CPU. The cost of |
265 | * unused memory when dealing with single CPU trace scenarios is small |
266 | * compared to the cost of searching through an optimized array. |
267 | */ |
268 | event_data->path = alloc_percpu(struct list_head *); |
269 | |
270 | if (!event_data->path) { |
271 | kfree(objp: event_data); |
272 | return NULL; |
273 | } |
274 | |
275 | return event_data; |
276 | } |
277 | |
278 | static void etm_free_aux(void *data) |
279 | { |
280 | struct etm_event_data *event_data = data; |
281 | |
282 | schedule_work(work: &event_data->work); |
283 | } |
284 | |
285 | /* |
286 | * Check if two given sinks are compatible with each other, |
287 | * so that they can use the same sink buffers, when an event |
288 | * moves around. |
289 | */ |
290 | static bool sinks_compatible(struct coresight_device *a, |
291 | struct coresight_device *b) |
292 | { |
293 | if (!a || !b) |
294 | return false; |
295 | /* |
296 | * If the sinks are of the same subtype and driven |
297 | * by the same driver, we can use the same buffer |
298 | * on these sinks. |
299 | */ |
300 | return (a->subtype.sink_subtype == b->subtype.sink_subtype) && |
301 | (sink_ops(a) == sink_ops(b)); |
302 | } |
303 | |
304 | static void *etm_setup_aux(struct perf_event *event, void **pages, |
305 | int nr_pages, bool overwrite) |
306 | { |
307 | u32 id, cfg_hash; |
308 | int cpu = event->cpu; |
309 | int trace_id; |
310 | cpumask_t *mask; |
311 | struct coresight_device *sink = NULL; |
312 | struct coresight_device *user_sink = NULL, *last_sink = NULL; |
313 | struct etm_event_data *event_data = NULL; |
314 | |
315 | event_data = alloc_event_data(cpu); |
316 | if (!event_data) |
317 | return NULL; |
318 | INIT_WORK(&event_data->work, free_event_data); |
319 | |
320 | /* First get the selected sink from user space. */ |
321 | if (event->attr.config2 & GENMASK_ULL(31, 0)) { |
322 | id = (u32)event->attr.config2; |
323 | sink = user_sink = coresight_get_sink_by_id(id); |
324 | } |
325 | |
326 | /* tell the trace ID allocator that a perf event is starting up */ |
327 | coresight_trace_id_perf_start(); |
328 | |
329 | /* check if user wants a coresight configuration selected */ |
330 | cfg_hash = (u32)((event->attr.config2 & GENMASK_ULL(63, 32)) >> 32); |
331 | if (cfg_hash) { |
332 | if (cscfg_activate_config(cfg_hash)) |
333 | goto err; |
334 | event_data->cfg_hash = cfg_hash; |
335 | } |
336 | |
337 | mask = &event_data->mask; |
338 | |
339 | /* |
340 | * Setup the path for each CPU in a trace session. We try to build |
341 | * trace path for each CPU in the mask. If we don't find an ETM |
342 | * for the CPU or fail to build a path, we clear the CPU from the |
343 | * mask and continue with the rest. If ever we try to trace on those |
344 | * CPUs, we can handle it and fail the session. |
345 | */ |
346 | for_each_cpu(cpu, mask) { |
347 | struct list_head *path; |
348 | struct coresight_device *csdev; |
349 | |
350 | csdev = per_cpu(csdev_src, cpu); |
351 | /* |
352 | * If there is no ETM associated with this CPU clear it from |
353 | * the mask and continue with the rest. If ever we try to trace |
354 | * on this CPU, we handle it accordingly. |
355 | */ |
356 | if (!csdev) { |
357 | cpumask_clear_cpu(cpu, dstp: mask); |
358 | continue; |
359 | } |
360 | |
361 | /* |
362 | * No sink provided - look for a default sink for all the ETMs, |
363 | * where this event can be scheduled. |
364 | * We allocate the sink specific buffers only once for this |
365 | * event. If the ETMs have different default sink devices, we |
366 | * can only use a single "type" of sink as the event can carry |
367 | * only one sink specific buffer. Thus we have to make sure |
368 | * that the sinks are of the same type and driven by the same |
369 | * driver, as the one we allocate the buffer for. As such |
370 | * we choose the first sink and check if the remaining ETMs |
371 | * have a compatible default sink. We don't trace on a CPU |
372 | * if the sink is not compatible. |
373 | */ |
374 | if (!user_sink) { |
375 | /* Find the default sink for this ETM */ |
376 | sink = coresight_find_default_sink(csdev); |
377 | if (!sink) { |
378 | cpumask_clear_cpu(cpu, dstp: mask); |
379 | continue; |
380 | } |
381 | |
382 | /* Check if this sink compatible with the last sink */ |
383 | if (last_sink && !sinks_compatible(a: last_sink, b: sink)) { |
384 | cpumask_clear_cpu(cpu, dstp: mask); |
385 | continue; |
386 | } |
387 | last_sink = sink; |
388 | } |
389 | |
390 | /* |
391 | * Building a path doesn't enable it, it simply builds a |
392 | * list of devices from source to sink that can be |
393 | * referenced later when the path is actually needed. |
394 | */ |
395 | path = coresight_build_path(csdev, sink); |
396 | if (IS_ERR(ptr: path)) { |
397 | cpumask_clear_cpu(cpu, dstp: mask); |
398 | continue; |
399 | } |
400 | |
401 | /* ensure we can allocate a trace ID for this CPU */ |
402 | trace_id = coresight_trace_id_get_cpu_id(cpu); |
403 | if (!IS_VALID_CS_TRACE_ID(trace_id)) { |
404 | cpumask_clear_cpu(cpu, dstp: mask); |
405 | coresight_release_path(path); |
406 | continue; |
407 | } |
408 | |
409 | *etm_event_cpu_path_ptr(data: event_data, cpu) = path; |
410 | } |
411 | |
412 | /* no sink found for any CPU - cannot trace */ |
413 | if (!sink) |
414 | goto err; |
415 | |
416 | /* If we don't have any CPUs ready for tracing, abort */ |
417 | cpu = cpumask_first(srcp: mask); |
418 | if (cpu >= nr_cpu_ids) |
419 | goto err; |
420 | |
421 | if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer) |
422 | goto err; |
423 | |
424 | /* |
425 | * Allocate the sink buffer for this session. All the sinks |
426 | * where this event can be scheduled are ensured to be of the |
427 | * same type. Thus the same sink configuration is used by the |
428 | * sinks. |
429 | */ |
430 | event_data->snk_config = |
431 | sink_ops(sink)->alloc_buffer(sink, event, pages, |
432 | nr_pages, overwrite); |
433 | if (!event_data->snk_config) |
434 | goto err; |
435 | |
436 | out: |
437 | return event_data; |
438 | |
439 | err: |
440 | etm_free_aux(data: event_data); |
441 | event_data = NULL; |
442 | goto out; |
443 | } |
444 | |
445 | static void etm_event_start(struct perf_event *event, int flags) |
446 | { |
447 | int cpu = smp_processor_id(); |
448 | struct etm_event_data *event_data; |
449 | struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); |
450 | struct perf_output_handle *handle = &ctxt->handle; |
451 | struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); |
452 | struct list_head *path; |
453 | u64 hw_id; |
454 | |
455 | if (!csdev) |
456 | goto fail; |
457 | |
458 | /* Have we messed up our tracking ? */ |
459 | if (WARN_ON(ctxt->event_data)) |
460 | goto fail; |
461 | |
462 | /* |
463 | * Deal with the ring buffer API and get a handle on the |
464 | * session's information. |
465 | */ |
466 | event_data = perf_aux_output_begin(handle, event); |
467 | if (!event_data) |
468 | goto fail; |
469 | |
470 | /* |
471 | * Check if this ETM is allowed to trace, as decided |
472 | * at etm_setup_aux(). This could be due to an unreachable |
473 | * sink from this ETM. We can't do much in this case if |
474 | * the sink was specified or hinted to the driver. For |
475 | * now, simply don't record anything on this ETM. |
476 | * |
477 | * As such we pretend that everything is fine, and let |
478 | * it continue without actually tracing. The event could |
479 | * continue tracing when it moves to a CPU where it is |
480 | * reachable to a sink. |
481 | */ |
482 | if (!cpumask_test_cpu(cpu, cpumask: &event_data->mask)) |
483 | goto out; |
484 | |
485 | path = etm_event_cpu_path(data: event_data, cpu); |
486 | /* We need a sink, no need to continue without one */ |
487 | sink = coresight_get_sink(path); |
488 | if (WARN_ON_ONCE(!sink)) |
489 | goto fail_end_stop; |
490 | |
491 | /* Nothing will happen without a path */ |
492 | if (coresight_enable_path(path, mode: CS_MODE_PERF, sink_data: handle)) |
493 | goto fail_end_stop; |
494 | |
495 | /* Finally enable the tracer */ |
496 | if (coresight_enable_source(csdev, mode: CS_MODE_PERF, data: event)) |
497 | goto fail_disable_path; |
498 | |
499 | /* |
500 | * output cpu / trace ID in perf record, once for the lifetime |
501 | * of the event. |
502 | */ |
503 | if (!cpumask_test_cpu(cpu, cpumask: &event_data->aux_hwid_done)) { |
504 | cpumask_set_cpu(cpu, dstp: &event_data->aux_hwid_done); |
505 | hw_id = FIELD_PREP(CS_AUX_HW_ID_VERSION_MASK, |
506 | CS_AUX_HW_ID_CURR_VERSION); |
507 | hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK, |
508 | coresight_trace_id_read_cpu_id(cpu)); |
509 | perf_report_aux_output_id(event, hw_id); |
510 | } |
511 | |
512 | out: |
513 | /* Tell the perf core the event is alive */ |
514 | event->hw.state = 0; |
515 | /* Save the event_data for this ETM */ |
516 | ctxt->event_data = event_data; |
517 | return; |
518 | |
519 | fail_disable_path: |
520 | coresight_disable_path(path); |
521 | fail_end_stop: |
522 | /* |
523 | * Check if the handle is still associated with the event, |
524 | * to handle cases where if the sink failed to start the |
525 | * trace and TRUNCATED the handle already. |
526 | */ |
527 | if (READ_ONCE(handle->event)) { |
528 | perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); |
529 | perf_aux_output_end(handle, size: 0); |
530 | } |
531 | fail: |
532 | event->hw.state = PERF_HES_STOPPED; |
533 | return; |
534 | } |
535 | |
536 | static void etm_event_stop(struct perf_event *event, int mode) |
537 | { |
538 | int cpu = smp_processor_id(); |
539 | unsigned long size; |
540 | struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); |
541 | struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); |
542 | struct perf_output_handle *handle = &ctxt->handle; |
543 | struct etm_event_data *event_data; |
544 | struct list_head *path; |
545 | |
546 | /* |
547 | * If we still have access to the event_data via handle, |
548 | * confirm that we haven't messed up the tracking. |
549 | */ |
550 | if (handle->event && |
551 | WARN_ON(perf_get_aux(handle) != ctxt->event_data)) |
552 | return; |
553 | |
554 | event_data = ctxt->event_data; |
555 | /* Clear the event_data as this ETM is stopping the trace. */ |
556 | ctxt->event_data = NULL; |
557 | |
558 | if (event->hw.state == PERF_HES_STOPPED) |
559 | return; |
560 | |
561 | /* We must have a valid event_data for a running event */ |
562 | if (WARN_ON(!event_data)) |
563 | return; |
564 | |
565 | /* |
566 | * Check if this ETM was allowed to trace, as decided at |
567 | * etm_setup_aux(). If it wasn't allowed to trace, then |
568 | * nothing needs to be torn down other than outputting a |
569 | * zero sized record. |
570 | */ |
571 | if (handle->event && (mode & PERF_EF_UPDATE) && |
572 | !cpumask_test_cpu(cpu, cpumask: &event_data->mask)) { |
573 | event->hw.state = PERF_HES_STOPPED; |
574 | perf_aux_output_end(handle, size: 0); |
575 | return; |
576 | } |
577 | |
578 | if (!csdev) |
579 | return; |
580 | |
581 | path = etm_event_cpu_path(data: event_data, cpu); |
582 | if (!path) |
583 | return; |
584 | |
585 | sink = coresight_get_sink(path); |
586 | if (!sink) |
587 | return; |
588 | |
589 | /* stop tracer */ |
590 | coresight_disable_source(csdev, data: event); |
591 | |
592 | /* tell the core */ |
593 | event->hw.state = PERF_HES_STOPPED; |
594 | |
595 | /* |
596 | * If the handle is not bound to an event anymore |
597 | * (e.g, the sink driver was unable to restart the |
598 | * handle due to lack of buffer space), we don't |
599 | * have to do anything here. |
600 | */ |
601 | if (handle->event && (mode & PERF_EF_UPDATE)) { |
602 | if (WARN_ON_ONCE(handle->event != event)) |
603 | return; |
604 | |
605 | /* update trace information */ |
606 | if (!sink_ops(sink)->update_buffer) |
607 | return; |
608 | |
609 | size = sink_ops(sink)->update_buffer(sink, handle, |
610 | event_data->snk_config); |
611 | /* |
612 | * Make sure the handle is still valid as the |
613 | * sink could have closed it from an IRQ. |
614 | * The sink driver must handle the race with |
615 | * update_buffer() and IRQ. Thus either we |
616 | * should get a valid handle and valid size |
617 | * (which may be 0). |
618 | * |
619 | * But we should never get a non-zero size with |
620 | * an invalid handle. |
621 | */ |
622 | if (READ_ONCE(handle->event)) |
623 | perf_aux_output_end(handle, size); |
624 | else |
625 | WARN_ON(size); |
626 | } |
627 | |
628 | /* Disabling the path make its elements available to other sessions */ |
629 | coresight_disable_path(path); |
630 | } |
631 | |
632 | static int etm_event_add(struct perf_event *event, int mode) |
633 | { |
634 | int ret = 0; |
635 | struct hw_perf_event *hwc = &event->hw; |
636 | |
637 | if (mode & PERF_EF_START) { |
638 | etm_event_start(event, flags: 0); |
639 | if (hwc->state & PERF_HES_STOPPED) |
640 | ret = -EINVAL; |
641 | } else { |
642 | hwc->state = PERF_HES_STOPPED; |
643 | } |
644 | |
645 | return ret; |
646 | } |
647 | |
648 | static void etm_event_del(struct perf_event *event, int mode) |
649 | { |
650 | etm_event_stop(event, PERF_EF_UPDATE); |
651 | } |
652 | |
653 | static int etm_addr_filters_validate(struct list_head *filters) |
654 | { |
655 | bool range = false, address = false; |
656 | int index = 0; |
657 | struct perf_addr_filter *filter; |
658 | |
659 | list_for_each_entry(filter, filters, entry) { |
660 | /* |
661 | * No need to go further if there's no more |
662 | * room for filters. |
663 | */ |
664 | if (++index > ETM_ADDR_CMP_MAX) |
665 | return -EOPNOTSUPP; |
666 | |
667 | /* filter::size==0 means single address trigger */ |
668 | if (filter->size) { |
669 | /* |
670 | * The existing code relies on START/STOP filters |
671 | * being address filters. |
672 | */ |
673 | if (filter->action == PERF_ADDR_FILTER_ACTION_START || |
674 | filter->action == PERF_ADDR_FILTER_ACTION_STOP) |
675 | return -EOPNOTSUPP; |
676 | |
677 | range = true; |
678 | } else |
679 | address = true; |
680 | |
681 | /* |
682 | * At this time we don't allow range and start/stop filtering |
683 | * to cohabitate, they have to be mutually exclusive. |
684 | */ |
685 | if (range && address) |
686 | return -EOPNOTSUPP; |
687 | } |
688 | |
689 | return 0; |
690 | } |
691 | |
692 | static void etm_addr_filters_sync(struct perf_event *event) |
693 | { |
694 | struct perf_addr_filters_head *head = perf_event_addr_filters(event); |
695 | unsigned long start, stop; |
696 | struct perf_addr_filter_range *fr = event->addr_filter_ranges; |
697 | struct etm_filters *filters = event->hw.addr_filters; |
698 | struct etm_filter *etm_filter; |
699 | struct perf_addr_filter *filter; |
700 | int i = 0; |
701 | |
702 | list_for_each_entry(filter, &head->list, entry) { |
703 | start = fr[i].start; |
704 | stop = start + fr[i].size; |
705 | etm_filter = &filters->etm_filter[i]; |
706 | |
707 | switch (filter->action) { |
708 | case PERF_ADDR_FILTER_ACTION_FILTER: |
709 | etm_filter->start_addr = start; |
710 | etm_filter->stop_addr = stop; |
711 | etm_filter->type = ETM_ADDR_TYPE_RANGE; |
712 | break; |
713 | case PERF_ADDR_FILTER_ACTION_START: |
714 | etm_filter->start_addr = start; |
715 | etm_filter->type = ETM_ADDR_TYPE_START; |
716 | break; |
717 | case PERF_ADDR_FILTER_ACTION_STOP: |
718 | etm_filter->stop_addr = stop; |
719 | etm_filter->type = ETM_ADDR_TYPE_STOP; |
720 | break; |
721 | } |
722 | i++; |
723 | } |
724 | |
725 | filters->nr_filters = i; |
726 | } |
727 | |
728 | int etm_perf_symlink(struct coresight_device *csdev, bool link) |
729 | { |
730 | char entry[sizeof("cpu9999999" )]; |
731 | int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev); |
732 | struct device *pmu_dev = etm_pmu.dev; |
733 | struct device *cs_dev = &csdev->dev; |
734 | |
735 | sprintf(buf: entry, fmt: "cpu%d" , cpu); |
736 | |
737 | if (!etm_perf_up) |
738 | return -EPROBE_DEFER; |
739 | |
740 | if (link) { |
741 | ret = sysfs_create_link(kobj: &pmu_dev->kobj, target: &cs_dev->kobj, name: entry); |
742 | if (ret) |
743 | return ret; |
744 | per_cpu(csdev_src, cpu) = csdev; |
745 | } else { |
746 | sysfs_remove_link(kobj: &pmu_dev->kobj, name: entry); |
747 | per_cpu(csdev_src, cpu) = NULL; |
748 | } |
749 | |
750 | return 0; |
751 | } |
752 | EXPORT_SYMBOL_GPL(etm_perf_symlink); |
753 | |
754 | static ssize_t etm_perf_sink_name_show(struct device *dev, |
755 | struct device_attribute *dattr, |
756 | char *buf) |
757 | { |
758 | struct dev_ext_attribute *ea; |
759 | |
760 | ea = container_of(dattr, struct dev_ext_attribute, attr); |
761 | return scnprintf(buf, PAGE_SIZE, fmt: "0x%lx\n" , (unsigned long)(ea->var)); |
762 | } |
763 | |
764 | static struct dev_ext_attribute * |
765 | etm_perf_add_symlink_group(struct device *dev, const char *name, const char *group_name) |
766 | { |
767 | struct dev_ext_attribute *ea; |
768 | unsigned long hash; |
769 | int ret; |
770 | struct device *pmu_dev = etm_pmu.dev; |
771 | |
772 | if (!etm_perf_up) |
773 | return ERR_PTR(error: -EPROBE_DEFER); |
774 | |
775 | ea = devm_kzalloc(dev, size: sizeof(*ea), GFP_KERNEL); |
776 | if (!ea) |
777 | return ERR_PTR(error: -ENOMEM); |
778 | |
779 | /* |
780 | * If this function is called adding a sink then the hash is used for |
781 | * sink selection - see function coresight_get_sink_by_id(). |
782 | * If adding a configuration then the hash is used for selection in |
783 | * cscfg_activate_config() |
784 | */ |
785 | hash = hashlen_hash(hashlen_string(NULL, name)); |
786 | |
787 | sysfs_attr_init(&ea->attr.attr); |
788 | ea->attr.attr.name = devm_kstrdup(dev, s: name, GFP_KERNEL); |
789 | if (!ea->attr.attr.name) |
790 | return ERR_PTR(error: -ENOMEM); |
791 | |
792 | ea->attr.attr.mode = 0444; |
793 | ea->var = (unsigned long *)hash; |
794 | |
795 | ret = sysfs_add_file_to_group(kobj: &pmu_dev->kobj, |
796 | attr: &ea->attr.attr, group: group_name); |
797 | |
798 | return ret ? ERR_PTR(error: ret) : ea; |
799 | } |
800 | |
801 | int etm_perf_add_symlink_sink(struct coresight_device *csdev) |
802 | { |
803 | const char *name; |
804 | struct device *dev = &csdev->dev; |
805 | int err = 0; |
806 | |
807 | if (csdev->type != CORESIGHT_DEV_TYPE_SINK && |
808 | csdev->type != CORESIGHT_DEV_TYPE_LINKSINK) |
809 | return -EINVAL; |
810 | |
811 | if (csdev->ea != NULL) |
812 | return -EINVAL; |
813 | |
814 | name = dev_name(dev); |
815 | csdev->ea = etm_perf_add_symlink_group(dev, name, group_name: "sinks" ); |
816 | if (IS_ERR(ptr: csdev->ea)) { |
817 | err = PTR_ERR(ptr: csdev->ea); |
818 | csdev->ea = NULL; |
819 | } else |
820 | csdev->ea->attr.show = etm_perf_sink_name_show; |
821 | |
822 | return err; |
823 | } |
824 | |
825 | static void etm_perf_del_symlink_group(struct dev_ext_attribute *ea, const char *group_name) |
826 | { |
827 | struct device *pmu_dev = etm_pmu.dev; |
828 | |
829 | sysfs_remove_file_from_group(kobj: &pmu_dev->kobj, |
830 | attr: &ea->attr.attr, group: group_name); |
831 | } |
832 | |
833 | void etm_perf_del_symlink_sink(struct coresight_device *csdev) |
834 | { |
835 | if (csdev->type != CORESIGHT_DEV_TYPE_SINK && |
836 | csdev->type != CORESIGHT_DEV_TYPE_LINKSINK) |
837 | return; |
838 | |
839 | if (!csdev->ea) |
840 | return; |
841 | |
842 | etm_perf_del_symlink_group(ea: csdev->ea, group_name: "sinks" ); |
843 | csdev->ea = NULL; |
844 | } |
845 | |
846 | static ssize_t etm_perf_cscfg_event_show(struct device *dev, |
847 | struct device_attribute *dattr, |
848 | char *buf) |
849 | { |
850 | struct dev_ext_attribute *ea; |
851 | |
852 | ea = container_of(dattr, struct dev_ext_attribute, attr); |
853 | return scnprintf(buf, PAGE_SIZE, fmt: "configid=0x%lx\n" , (unsigned long)(ea->var)); |
854 | } |
855 | |
856 | int etm_perf_add_symlink_cscfg(struct device *dev, struct cscfg_config_desc *config_desc) |
857 | { |
858 | int err = 0; |
859 | |
860 | if (config_desc->event_ea != NULL) |
861 | return 0; |
862 | |
863 | config_desc->event_ea = etm_perf_add_symlink_group(dev, name: config_desc->name, group_name: "events" ); |
864 | |
865 | /* set the show function to the custom cscfg event */ |
866 | if (!IS_ERR(ptr: config_desc->event_ea)) |
867 | config_desc->event_ea->attr.show = etm_perf_cscfg_event_show; |
868 | else { |
869 | err = PTR_ERR(ptr: config_desc->event_ea); |
870 | config_desc->event_ea = NULL; |
871 | } |
872 | |
873 | return err; |
874 | } |
875 | |
876 | void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc) |
877 | { |
878 | if (!config_desc->event_ea) |
879 | return; |
880 | |
881 | etm_perf_del_symlink_group(ea: config_desc->event_ea, group_name: "events" ); |
882 | config_desc->event_ea = NULL; |
883 | } |
884 | |
885 | int __init etm_perf_init(void) |
886 | { |
887 | int ret; |
888 | |
889 | etm_pmu.capabilities = (PERF_PMU_CAP_EXCLUSIVE | |
890 | PERF_PMU_CAP_ITRACE); |
891 | |
892 | etm_pmu.attr_groups = etm_pmu_attr_groups; |
893 | etm_pmu.task_ctx_nr = perf_sw_context; |
894 | etm_pmu.read = etm_event_read; |
895 | etm_pmu.event_init = etm_event_init; |
896 | etm_pmu.setup_aux = etm_setup_aux; |
897 | etm_pmu.free_aux = etm_free_aux; |
898 | etm_pmu.start = etm_event_start; |
899 | etm_pmu.stop = etm_event_stop; |
900 | etm_pmu.add = etm_event_add; |
901 | etm_pmu.del = etm_event_del; |
902 | etm_pmu.addr_filters_sync = etm_addr_filters_sync; |
903 | etm_pmu.addr_filters_validate = etm_addr_filters_validate; |
904 | etm_pmu.nr_addr_filters = ETM_ADDR_CMP_MAX; |
905 | etm_pmu.module = THIS_MODULE; |
906 | |
907 | ret = perf_pmu_register(pmu: &etm_pmu, CORESIGHT_ETM_PMU_NAME, type: -1); |
908 | if (ret == 0) |
909 | etm_perf_up = true; |
910 | |
911 | return ret; |
912 | } |
913 | |
914 | void etm_perf_exit(void) |
915 | { |
916 | perf_pmu_unregister(pmu: &etm_pmu); |
917 | } |
918 | |