1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. |
4 | * |
5 | * Description: CoreSight Program Flow Trace driver |
6 | */ |
7 | |
8 | #include <linux/kernel.h> |
9 | #include <linux/moduleparam.h> |
10 | #include <linux/init.h> |
11 | #include <linux/types.h> |
12 | #include <linux/device.h> |
13 | #include <linux/io.h> |
14 | #include <linux/err.h> |
15 | #include <linux/fs.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/delay.h> |
18 | #include <linux/smp.h> |
19 | #include <linux/sysfs.h> |
20 | #include <linux/stat.h> |
21 | #include <linux/pm_runtime.h> |
22 | #include <linux/cpu.h> |
23 | #include <linux/of.h> |
24 | #include <linux/coresight.h> |
25 | #include <linux/coresight-pmu.h> |
26 | #include <linux/amba/bus.h> |
27 | #include <linux/seq_file.h> |
28 | #include <linux/uaccess.h> |
29 | #include <linux/clk.h> |
30 | #include <linux/perf_event.h> |
31 | #include <asm/sections.h> |
32 | |
33 | #include "coresight-etm.h" |
34 | #include "coresight-etm-perf.h" |
35 | #include "coresight-trace-id.h" |
36 | |
37 | /* |
38 | * Not really modular but using module_param is the easiest way to |
39 | * remain consistent with existing use cases for now. |
40 | */ |
41 | static int boot_enable; |
42 | module_param_named(boot_enable, boot_enable, int, S_IRUGO); |
43 | |
44 | static struct etm_drvdata *etmdrvdata[NR_CPUS]; |
45 | |
46 | static enum cpuhp_state hp_online; |
47 | |
48 | /* |
49 | * Memory mapped writes to clear os lock are not supported on some processors |
50 | * and OS lock must be unlocked before any memory mapped access on such |
51 | * processors, otherwise memory mapped reads/writes will be invalid. |
52 | */ |
53 | static void etm_os_unlock(struct etm_drvdata *drvdata) |
54 | { |
55 | /* Writing any value to ETMOSLAR unlocks the trace registers */ |
56 | etm_writel(drvdata, val: 0x0, ETMOSLAR); |
57 | drvdata->os_unlock = true; |
58 | isb(); |
59 | } |
60 | |
61 | static void etm_set_pwrdwn(struct etm_drvdata *drvdata) |
62 | { |
63 | u32 etmcr; |
64 | |
65 | /* Ensure pending cp14 accesses complete before setting pwrdwn */ |
66 | mb(); |
67 | isb(); |
68 | etmcr = etm_readl(drvdata, ETMCR); |
69 | etmcr |= ETMCR_PWD_DWN; |
70 | etm_writel(drvdata, val: etmcr, ETMCR); |
71 | } |
72 | |
73 | static void etm_clr_pwrdwn(struct etm_drvdata *drvdata) |
74 | { |
75 | u32 etmcr; |
76 | |
77 | etmcr = etm_readl(drvdata, ETMCR); |
78 | etmcr &= ~ETMCR_PWD_DWN; |
79 | etm_writel(drvdata, val: etmcr, ETMCR); |
80 | /* Ensure pwrup completes before subsequent cp14 accesses */ |
81 | mb(); |
82 | isb(); |
83 | } |
84 | |
85 | static void etm_set_pwrup(struct etm_drvdata *drvdata) |
86 | { |
87 | u32 etmpdcr; |
88 | |
89 | etmpdcr = readl_relaxed(drvdata->base + ETMPDCR); |
90 | etmpdcr |= ETMPDCR_PWD_UP; |
91 | writel_relaxed(etmpdcr, drvdata->base + ETMPDCR); |
92 | /* Ensure pwrup completes before subsequent cp14 accesses */ |
93 | mb(); |
94 | isb(); |
95 | } |
96 | |
97 | static void etm_clr_pwrup(struct etm_drvdata *drvdata) |
98 | { |
99 | u32 etmpdcr; |
100 | |
101 | /* Ensure pending cp14 accesses complete before clearing pwrup */ |
102 | mb(); |
103 | isb(); |
104 | etmpdcr = readl_relaxed(drvdata->base + ETMPDCR); |
105 | etmpdcr &= ~ETMPDCR_PWD_UP; |
106 | writel_relaxed(etmpdcr, drvdata->base + ETMPDCR); |
107 | } |
108 | |
109 | /** |
110 | * coresight_timeout_etm - loop until a bit has changed to a specific state. |
111 | * @drvdata: etm's private data structure. |
112 | * @offset: address of a register, starting from @addr. |
113 | * @position: the position of the bit of interest. |
114 | * @value: the value the bit should have. |
115 | * |
116 | * Basically the same as @coresight_timeout except for the register access |
117 | * method where we have to account for CP14 configurations. |
118 | * |
119 | * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if |
120 | * TIMEOUT_US has elapsed, which ever happens first. |
121 | */ |
122 | |
123 | static int coresight_timeout_etm(struct etm_drvdata *drvdata, u32 offset, |
124 | int position, int value) |
125 | { |
126 | int i; |
127 | u32 val; |
128 | |
129 | for (i = TIMEOUT_US; i > 0; i--) { |
130 | val = etm_readl(drvdata, off: offset); |
131 | /* Waiting on the bit to go from 0 to 1 */ |
132 | if (value) { |
133 | if (val & BIT(position)) |
134 | return 0; |
135 | /* Waiting on the bit to go from 1 to 0 */ |
136 | } else { |
137 | if (!(val & BIT(position))) |
138 | return 0; |
139 | } |
140 | |
141 | /* |
142 | * Delay is arbitrary - the specification doesn't say how long |
143 | * we are expected to wait. Extra check required to make sure |
144 | * we don't wait needlessly on the last iteration. |
145 | */ |
146 | if (i - 1) |
147 | udelay(1); |
148 | } |
149 | |
150 | return -EAGAIN; |
151 | } |
152 | |
153 | |
154 | static void etm_set_prog(struct etm_drvdata *drvdata) |
155 | { |
156 | u32 etmcr; |
157 | |
158 | etmcr = etm_readl(drvdata, ETMCR); |
159 | etmcr |= ETMCR_ETM_PRG; |
160 | etm_writel(drvdata, val: etmcr, ETMCR); |
161 | /* |
162 | * Recommended by spec for cp14 accesses to ensure etmcr write is |
163 | * complete before polling etmsr |
164 | */ |
165 | isb(); |
166 | if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, value: 1)) { |
167 | dev_err(&drvdata->csdev->dev, |
168 | "%s: timeout observed when probing at offset %#x\n" , |
169 | __func__, ETMSR); |
170 | } |
171 | } |
172 | |
173 | static void etm_clr_prog(struct etm_drvdata *drvdata) |
174 | { |
175 | u32 etmcr; |
176 | |
177 | etmcr = etm_readl(drvdata, ETMCR); |
178 | etmcr &= ~ETMCR_ETM_PRG; |
179 | etm_writel(drvdata, val: etmcr, ETMCR); |
180 | /* |
181 | * Recommended by spec for cp14 accesses to ensure etmcr write is |
182 | * complete before polling etmsr |
183 | */ |
184 | isb(); |
185 | if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, value: 0)) { |
186 | dev_err(&drvdata->csdev->dev, |
187 | "%s: timeout observed when probing at offset %#x\n" , |
188 | __func__, ETMSR); |
189 | } |
190 | } |
191 | |
192 | void etm_set_default(struct etm_config *config) |
193 | { |
194 | int i; |
195 | |
196 | if (WARN_ON_ONCE(!config)) |
197 | return; |
198 | |
199 | /* |
200 | * Taken verbatim from the TRM: |
201 | * |
202 | * To trace all memory: |
203 | * set bit [24] in register 0x009, the ETMTECR1, to 1 |
204 | * set all other bits in register 0x009, the ETMTECR1, to 0 |
205 | * set all bits in register 0x007, the ETMTECR2, to 0 |
206 | * set register 0x008, the ETMTEEVR, to 0x6F (TRUE). |
207 | */ |
208 | config->enable_ctrl1 = ETMTECR1_INC_EXC; |
209 | config->enable_ctrl2 = 0x0; |
210 | config->enable_event = ETM_HARD_WIRE_RES_A; |
211 | |
212 | config->trigger_event = ETM_DEFAULT_EVENT_VAL; |
213 | config->enable_event = ETM_HARD_WIRE_RES_A; |
214 | |
215 | config->seq_12_event = ETM_DEFAULT_EVENT_VAL; |
216 | config->seq_21_event = ETM_DEFAULT_EVENT_VAL; |
217 | config->seq_23_event = ETM_DEFAULT_EVENT_VAL; |
218 | config->seq_31_event = ETM_DEFAULT_EVENT_VAL; |
219 | config->seq_32_event = ETM_DEFAULT_EVENT_VAL; |
220 | config->seq_13_event = ETM_DEFAULT_EVENT_VAL; |
221 | config->timestamp_event = ETM_DEFAULT_EVENT_VAL; |
222 | |
223 | for (i = 0; i < ETM_MAX_CNTR; i++) { |
224 | config->cntr_rld_val[i] = 0x0; |
225 | config->cntr_event[i] = ETM_DEFAULT_EVENT_VAL; |
226 | config->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL; |
227 | config->cntr_val[i] = 0x0; |
228 | } |
229 | |
230 | config->seq_curr_state = 0x0; |
231 | config->ctxid_idx = 0x0; |
232 | for (i = 0; i < ETM_MAX_CTXID_CMP; i++) |
233 | config->ctxid_pid[i] = 0x0; |
234 | |
235 | config->ctxid_mask = 0x0; |
236 | /* Setting default to 1024 as per TRM recommendation */ |
237 | config->sync_freq = 0x400; |
238 | } |
239 | |
240 | void etm_config_trace_mode(struct etm_config *config) |
241 | { |
242 | u32 flags, mode; |
243 | |
244 | mode = config->mode; |
245 | |
246 | mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER); |
247 | |
248 | /* excluding kernel AND user space doesn't make sense */ |
249 | if (mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)) |
250 | return; |
251 | |
252 | /* nothing to do if neither flags are set */ |
253 | if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER)) |
254 | return; |
255 | |
256 | flags = (1 << 0 | /* instruction execute */ |
257 | 3 << 3 | /* ARM instruction */ |
258 | 0 << 5 | /* No data value comparison */ |
259 | 0 << 7 | /* No exact mach */ |
260 | 0 << 8); /* Ignore context ID */ |
261 | |
262 | /* No need to worry about single address comparators. */ |
263 | config->enable_ctrl2 = 0x0; |
264 | |
265 | /* Bit 0 is address range comparator 1 */ |
266 | config->enable_ctrl1 = ETMTECR1_ADDR_COMP_1; |
267 | |
268 | /* |
269 | * On ETMv3.5: |
270 | * ETMACTRn[13,11] == Non-secure state comparison control |
271 | * ETMACTRn[12,10] == Secure state comparison control |
272 | * |
273 | * b00 == Match in all modes in this state |
274 | * b01 == Do not match in any more in this state |
275 | * b10 == Match in all modes excepts user mode in this state |
276 | * b11 == Match only in user mode in this state |
277 | */ |
278 | |
279 | /* Tracing in secure mode is not supported at this time */ |
280 | flags |= (0 << 12 | 1 << 10); |
281 | |
282 | if (mode & ETM_MODE_EXCL_USER) { |
283 | /* exclude user, match all modes except user mode */ |
284 | flags |= (1 << 13 | 0 << 11); |
285 | } else { |
286 | /* exclude kernel, match only in user mode */ |
287 | flags |= (1 << 13 | 1 << 11); |
288 | } |
289 | |
290 | /* |
291 | * The ETMEEVR register is already set to "hard wire A". As such |
292 | * all there is to do is setup an address comparator that spans |
293 | * the entire address range and configure the state and mode bits. |
294 | */ |
295 | config->addr_val[0] = (u32) 0x0; |
296 | config->addr_val[1] = (u32) ~0x0; |
297 | config->addr_acctype[0] = flags; |
298 | config->addr_acctype[1] = flags; |
299 | config->addr_type[0] = ETM_ADDR_TYPE_RANGE; |
300 | config->addr_type[1] = ETM_ADDR_TYPE_RANGE; |
301 | } |
302 | |
303 | #define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | \ |
304 | ETMCR_TIMESTAMP_EN | \ |
305 | ETMCR_RETURN_STACK) |
306 | |
307 | static int etm_parse_event_config(struct etm_drvdata *drvdata, |
308 | struct perf_event *event) |
309 | { |
310 | struct etm_config *config = &drvdata->config; |
311 | struct perf_event_attr *attr = &event->attr; |
312 | |
313 | if (!attr) |
314 | return -EINVAL; |
315 | |
316 | /* Clear configuration from previous run */ |
317 | memset(config, 0, sizeof(struct etm_config)); |
318 | |
319 | if (attr->exclude_kernel) |
320 | config->mode = ETM_MODE_EXCL_KERN; |
321 | |
322 | if (attr->exclude_user) |
323 | config->mode = ETM_MODE_EXCL_USER; |
324 | |
325 | /* Always start from the default config */ |
326 | etm_set_default(config); |
327 | |
328 | /* |
329 | * By default the tracers are configured to trace the whole address |
330 | * range. Narrow the field only if requested by user space. |
331 | */ |
332 | if (config->mode) |
333 | etm_config_trace_mode(config); |
334 | |
335 | /* |
336 | * At this time only cycle accurate, return stack and timestamp |
337 | * options are available. |
338 | */ |
339 | if (attr->config & ~ETM3X_SUPPORTED_OPTIONS) |
340 | return -EINVAL; |
341 | |
342 | config->ctrl = attr->config; |
343 | |
344 | /* Don't trace contextID when runs in non-root PID namespace */ |
345 | if (!task_is_in_init_pid_ns(current)) |
346 | config->ctrl &= ~ETMCR_CTXID_SIZE; |
347 | |
348 | /* |
349 | * Possible to have cores with PTM (supports ret stack) and ETM |
350 | * (never has ret stack) on the same SoC. So if we have a request |
351 | * for return stack that can't be honoured on this core then |
352 | * clear the bit - trace will still continue normally |
353 | */ |
354 | if ((config->ctrl & ETMCR_RETURN_STACK) && |
355 | !(drvdata->etmccer & ETMCCER_RETSTACK)) |
356 | config->ctrl &= ~ETMCR_RETURN_STACK; |
357 | |
358 | return 0; |
359 | } |
360 | |
361 | static int etm_enable_hw(struct etm_drvdata *drvdata) |
362 | { |
363 | int i, rc; |
364 | u32 etmcr; |
365 | struct etm_config *config = &drvdata->config; |
366 | struct coresight_device *csdev = drvdata->csdev; |
367 | |
368 | CS_UNLOCK(addr: drvdata->base); |
369 | |
370 | rc = coresight_claim_device_unlocked(csdev); |
371 | if (rc) |
372 | goto done; |
373 | |
374 | /* Turn engine on */ |
375 | etm_clr_pwrdwn(drvdata); |
376 | /* Apply power to trace registers */ |
377 | etm_set_pwrup(drvdata); |
378 | /* Make sure all registers are accessible */ |
379 | etm_os_unlock(drvdata); |
380 | |
381 | etm_set_prog(drvdata); |
382 | |
383 | etmcr = etm_readl(drvdata, ETMCR); |
384 | /* Clear setting from a previous run if need be */ |
385 | etmcr &= ~ETM3X_SUPPORTED_OPTIONS; |
386 | etmcr |= drvdata->port_size; |
387 | etmcr |= ETMCR_ETM_EN; |
388 | etm_writel(drvdata, val: config->ctrl | etmcr, ETMCR); |
389 | etm_writel(drvdata, val: config->trigger_event, ETMTRIGGER); |
390 | etm_writel(drvdata, val: config->startstop_ctrl, ETMTSSCR); |
391 | etm_writel(drvdata, val: config->enable_event, ETMTEEVR); |
392 | etm_writel(drvdata, val: config->enable_ctrl1, ETMTECR1); |
393 | etm_writel(drvdata, val: config->fifofull_level, ETMFFLR); |
394 | for (i = 0; i < drvdata->nr_addr_cmp; i++) { |
395 | etm_writel(drvdata, val: config->addr_val[i], ETMACVRn(i)); |
396 | etm_writel(drvdata, val: config->addr_acctype[i], ETMACTRn(i)); |
397 | } |
398 | for (i = 0; i < drvdata->nr_cntr; i++) { |
399 | etm_writel(drvdata, val: config->cntr_rld_val[i], ETMCNTRLDVRn(i)); |
400 | etm_writel(drvdata, val: config->cntr_event[i], ETMCNTENRn(i)); |
401 | etm_writel(drvdata, val: config->cntr_rld_event[i], |
402 | ETMCNTRLDEVRn(i)); |
403 | etm_writel(drvdata, val: config->cntr_val[i], ETMCNTVRn(i)); |
404 | } |
405 | etm_writel(drvdata, val: config->seq_12_event, ETMSQ12EVR); |
406 | etm_writel(drvdata, val: config->seq_21_event, ETMSQ21EVR); |
407 | etm_writel(drvdata, val: config->seq_23_event, ETMSQ23EVR); |
408 | etm_writel(drvdata, val: config->seq_31_event, ETMSQ31EVR); |
409 | etm_writel(drvdata, val: config->seq_32_event, ETMSQ32EVR); |
410 | etm_writel(drvdata, val: config->seq_13_event, ETMSQ13EVR); |
411 | etm_writel(drvdata, val: config->seq_curr_state, ETMSQR); |
412 | for (i = 0; i < drvdata->nr_ext_out; i++) |
413 | etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i)); |
414 | for (i = 0; i < drvdata->nr_ctxid_cmp; i++) |
415 | etm_writel(drvdata, val: config->ctxid_pid[i], ETMCIDCVRn(i)); |
416 | etm_writel(drvdata, val: config->ctxid_mask, ETMCIDCMR); |
417 | etm_writel(drvdata, val: config->sync_freq, ETMSYNCFR); |
418 | /* No external input selected */ |
419 | etm_writel(drvdata, val: 0x0, ETMEXTINSELR); |
420 | etm_writel(drvdata, val: config->timestamp_event, ETMTSEVR); |
421 | /* No auxiliary control selected */ |
422 | etm_writel(drvdata, val: 0x0, ETMAUXCR); |
423 | etm_writel(drvdata, val: drvdata->traceid, ETMTRACEIDR); |
424 | /* No VMID comparator value selected */ |
425 | etm_writel(drvdata, val: 0x0, ETMVMIDCVR); |
426 | |
427 | etm_clr_prog(drvdata); |
428 | |
429 | done: |
430 | CS_LOCK(addr: drvdata->base); |
431 | |
432 | dev_dbg(&drvdata->csdev->dev, "cpu: %d enable smp call done: %d\n" , |
433 | drvdata->cpu, rc); |
434 | return rc; |
435 | } |
436 | |
437 | struct etm_enable_arg { |
438 | struct etm_drvdata *drvdata; |
439 | int rc; |
440 | }; |
441 | |
442 | static void etm_enable_hw_smp_call(void *info) |
443 | { |
444 | struct etm_enable_arg *arg = info; |
445 | |
446 | if (WARN_ON(!arg)) |
447 | return; |
448 | arg->rc = etm_enable_hw(drvdata: arg->drvdata); |
449 | } |
450 | |
451 | static int etm_cpu_id(struct coresight_device *csdev) |
452 | { |
453 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: csdev->dev.parent); |
454 | |
455 | return drvdata->cpu; |
456 | } |
457 | |
458 | int etm_read_alloc_trace_id(struct etm_drvdata *drvdata) |
459 | { |
460 | int trace_id; |
461 | |
462 | /* |
463 | * This will allocate a trace ID to the cpu, |
464 | * or return the one currently allocated. |
465 | * |
466 | * trace id function has its own lock |
467 | */ |
468 | trace_id = coresight_trace_id_get_cpu_id(cpu: drvdata->cpu); |
469 | if (IS_VALID_CS_TRACE_ID(trace_id)) |
470 | drvdata->traceid = (u8)trace_id; |
471 | else |
472 | dev_err(&drvdata->csdev->dev, |
473 | "Failed to allocate trace ID for %s on CPU%d\n" , |
474 | dev_name(&drvdata->csdev->dev), drvdata->cpu); |
475 | return trace_id; |
476 | } |
477 | |
478 | void etm_release_trace_id(struct etm_drvdata *drvdata) |
479 | { |
480 | coresight_trace_id_put_cpu_id(cpu: drvdata->cpu); |
481 | } |
482 | |
483 | static int etm_enable_perf(struct coresight_device *csdev, |
484 | struct perf_event *event) |
485 | { |
486 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: csdev->dev.parent); |
487 | int trace_id; |
488 | |
489 | if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) |
490 | return -EINVAL; |
491 | |
492 | /* Configure the tracer based on the session's specifics */ |
493 | etm_parse_event_config(drvdata, event); |
494 | |
495 | /* |
496 | * perf allocates cpu ids as part of _setup_aux() - device needs to use |
497 | * the allocated ID. This reads the current version without allocation. |
498 | * |
499 | * This does not use the trace id lock to prevent lock_dep issues |
500 | * with perf locks - we know the ID cannot change until perf shuts down |
501 | * the session |
502 | */ |
503 | trace_id = coresight_trace_id_read_cpu_id(cpu: drvdata->cpu); |
504 | if (!IS_VALID_CS_TRACE_ID(trace_id)) { |
505 | dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n" , |
506 | dev_name(&drvdata->csdev->dev), drvdata->cpu); |
507 | return -EINVAL; |
508 | } |
509 | drvdata->traceid = (u8)trace_id; |
510 | |
511 | /* And enable it */ |
512 | return etm_enable_hw(drvdata); |
513 | } |
514 | |
515 | static int etm_enable_sysfs(struct coresight_device *csdev) |
516 | { |
517 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: csdev->dev.parent); |
518 | struct etm_enable_arg arg = { }; |
519 | int ret; |
520 | |
521 | spin_lock(lock: &drvdata->spinlock); |
522 | |
523 | /* sysfs needs to allocate and set a trace ID */ |
524 | ret = etm_read_alloc_trace_id(drvdata); |
525 | if (ret < 0) |
526 | goto unlock_enable_sysfs; |
527 | |
528 | /* |
529 | * Configure the ETM only if the CPU is online. If it isn't online |
530 | * hw configuration will take place on the local CPU during bring up. |
531 | */ |
532 | if (cpu_online(cpu: drvdata->cpu)) { |
533 | arg.drvdata = drvdata; |
534 | ret = smp_call_function_single(cpuid: drvdata->cpu, |
535 | func: etm_enable_hw_smp_call, info: &arg, wait: 1); |
536 | if (!ret) |
537 | ret = arg.rc; |
538 | if (!ret) |
539 | drvdata->sticky_enable = true; |
540 | } else { |
541 | ret = -ENODEV; |
542 | } |
543 | |
544 | if (ret) |
545 | etm_release_trace_id(drvdata); |
546 | |
547 | unlock_enable_sysfs: |
548 | spin_unlock(lock: &drvdata->spinlock); |
549 | |
550 | if (!ret) |
551 | dev_dbg(&csdev->dev, "ETM tracing enabled\n" ); |
552 | return ret; |
553 | } |
554 | |
555 | static int etm_enable(struct coresight_device *csdev, struct perf_event *event, |
556 | enum cs_mode mode) |
557 | { |
558 | int ret; |
559 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: csdev->dev.parent); |
560 | |
561 | if (!coresight_take_mode(csdev, new_mode: mode)) { |
562 | /* Someone is already using the tracer */ |
563 | return -EBUSY; |
564 | } |
565 | |
566 | switch (mode) { |
567 | case CS_MODE_SYSFS: |
568 | ret = etm_enable_sysfs(csdev); |
569 | break; |
570 | case CS_MODE_PERF: |
571 | ret = etm_enable_perf(csdev, event); |
572 | break; |
573 | default: |
574 | ret = -EINVAL; |
575 | } |
576 | |
577 | /* The tracer didn't start */ |
578 | if (ret) |
579 | coresight_set_mode(csdev: drvdata->csdev, new_mode: CS_MODE_DISABLED); |
580 | |
581 | return ret; |
582 | } |
583 | |
584 | static void etm_disable_hw(void *info) |
585 | { |
586 | int i; |
587 | struct etm_drvdata *drvdata = info; |
588 | struct etm_config *config = &drvdata->config; |
589 | struct coresight_device *csdev = drvdata->csdev; |
590 | |
591 | CS_UNLOCK(addr: drvdata->base); |
592 | etm_set_prog(drvdata); |
593 | |
594 | /* Read back sequencer and counters for post trace analysis */ |
595 | config->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); |
596 | |
597 | for (i = 0; i < drvdata->nr_cntr; i++) |
598 | config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i)); |
599 | |
600 | etm_set_pwrdwn(drvdata); |
601 | coresight_disclaim_device_unlocked(csdev); |
602 | |
603 | CS_LOCK(addr: drvdata->base); |
604 | |
605 | dev_dbg(&drvdata->csdev->dev, |
606 | "cpu: %d disable smp call done\n" , drvdata->cpu); |
607 | } |
608 | |
609 | static void etm_disable_perf(struct coresight_device *csdev) |
610 | { |
611 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: csdev->dev.parent); |
612 | |
613 | if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) |
614 | return; |
615 | |
616 | CS_UNLOCK(addr: drvdata->base); |
617 | |
618 | /* Setting the prog bit disables tracing immediately */ |
619 | etm_set_prog(drvdata); |
620 | |
621 | /* |
622 | * There is no way to know when the tracer will be used again so |
623 | * power down the tracer. |
624 | */ |
625 | etm_set_pwrdwn(drvdata); |
626 | coresight_disclaim_device_unlocked(csdev); |
627 | |
628 | CS_LOCK(addr: drvdata->base); |
629 | |
630 | /* |
631 | * perf will release trace ids when _free_aux() |
632 | * is called at the end of the session |
633 | */ |
634 | |
635 | } |
636 | |
637 | static void etm_disable_sysfs(struct coresight_device *csdev) |
638 | { |
639 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: csdev->dev.parent); |
640 | |
641 | /* |
642 | * Taking hotplug lock here protects from clocks getting disabled |
643 | * with tracing being left on (crash scenario) if user disable occurs |
644 | * after cpu online mask indicates the cpu is offline but before the |
645 | * DYING hotplug callback is serviced by the ETM driver. |
646 | */ |
647 | cpus_read_lock(); |
648 | spin_lock(lock: &drvdata->spinlock); |
649 | |
650 | /* |
651 | * Executing etm_disable_hw on the cpu whose ETM is being disabled |
652 | * ensures that register writes occur when cpu is powered. |
653 | */ |
654 | smp_call_function_single(cpuid: drvdata->cpu, func: etm_disable_hw, info: drvdata, wait: 1); |
655 | |
656 | spin_unlock(lock: &drvdata->spinlock); |
657 | cpus_read_unlock(); |
658 | |
659 | /* |
660 | * we only release trace IDs when resetting sysfs. |
661 | * This permits sysfs users to read the trace ID after the trace |
662 | * session has completed. This maintains operational behaviour with |
663 | * prior trace id allocation method |
664 | */ |
665 | |
666 | dev_dbg(&csdev->dev, "ETM tracing disabled\n" ); |
667 | } |
668 | |
669 | static void etm_disable(struct coresight_device *csdev, |
670 | struct perf_event *event) |
671 | { |
672 | enum cs_mode mode; |
673 | |
674 | /* |
675 | * For as long as the tracer isn't disabled another entity can't |
676 | * change its status. As such we can read the status here without |
677 | * fearing it will change under us. |
678 | */ |
679 | mode = coresight_get_mode(csdev); |
680 | |
681 | switch (mode) { |
682 | case CS_MODE_DISABLED: |
683 | break; |
684 | case CS_MODE_SYSFS: |
685 | etm_disable_sysfs(csdev); |
686 | break; |
687 | case CS_MODE_PERF: |
688 | etm_disable_perf(csdev); |
689 | break; |
690 | default: |
691 | WARN_ON_ONCE(mode); |
692 | return; |
693 | } |
694 | |
695 | if (mode) |
696 | coresight_set_mode(csdev, new_mode: CS_MODE_DISABLED); |
697 | } |
698 | |
699 | static const struct coresight_ops_source etm_source_ops = { |
700 | .cpu_id = etm_cpu_id, |
701 | .enable = etm_enable, |
702 | .disable = etm_disable, |
703 | }; |
704 | |
705 | static const struct coresight_ops etm_cs_ops = { |
706 | .source_ops = &etm_source_ops, |
707 | }; |
708 | |
709 | static int etm_online_cpu(unsigned int cpu) |
710 | { |
711 | if (!etmdrvdata[cpu]) |
712 | return 0; |
713 | |
714 | if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable) |
715 | coresight_enable_sysfs(csdev: etmdrvdata[cpu]->csdev); |
716 | return 0; |
717 | } |
718 | |
719 | static int etm_starting_cpu(unsigned int cpu) |
720 | { |
721 | if (!etmdrvdata[cpu]) |
722 | return 0; |
723 | |
724 | spin_lock(lock: &etmdrvdata[cpu]->spinlock); |
725 | if (!etmdrvdata[cpu]->os_unlock) { |
726 | etm_os_unlock(drvdata: etmdrvdata[cpu]); |
727 | etmdrvdata[cpu]->os_unlock = true; |
728 | } |
729 | |
730 | if (coresight_get_mode(csdev: etmdrvdata[cpu]->csdev)) |
731 | etm_enable_hw(drvdata: etmdrvdata[cpu]); |
732 | spin_unlock(lock: &etmdrvdata[cpu]->spinlock); |
733 | return 0; |
734 | } |
735 | |
736 | static int etm_dying_cpu(unsigned int cpu) |
737 | { |
738 | if (!etmdrvdata[cpu]) |
739 | return 0; |
740 | |
741 | spin_lock(lock: &etmdrvdata[cpu]->spinlock); |
742 | if (coresight_get_mode(csdev: etmdrvdata[cpu]->csdev)) |
743 | etm_disable_hw(info: etmdrvdata[cpu]); |
744 | spin_unlock(lock: &etmdrvdata[cpu]->spinlock); |
745 | return 0; |
746 | } |
747 | |
748 | static bool etm_arch_supported(u8 arch) |
749 | { |
750 | switch (arch) { |
751 | case ETM_ARCH_V3_3: |
752 | break; |
753 | case ETM_ARCH_V3_5: |
754 | break; |
755 | case PFT_ARCH_V1_0: |
756 | break; |
757 | case PFT_ARCH_V1_1: |
758 | break; |
759 | default: |
760 | return false; |
761 | } |
762 | return true; |
763 | } |
764 | |
765 | static void etm_init_arch_data(void *info) |
766 | { |
767 | u32 etmidr; |
768 | u32 etmccr; |
769 | struct etm_drvdata *drvdata = info; |
770 | |
771 | /* Make sure all registers are accessible */ |
772 | etm_os_unlock(drvdata); |
773 | |
774 | CS_UNLOCK(addr: drvdata->base); |
775 | |
776 | /* First dummy read */ |
777 | (void)etm_readl(drvdata, ETMPDSR); |
778 | /* Provide power to ETM: ETMPDCR[3] == 1 */ |
779 | etm_set_pwrup(drvdata); |
780 | /* |
781 | * Clear power down bit since when this bit is set writes to |
782 | * certain registers might be ignored. |
783 | */ |
784 | etm_clr_pwrdwn(drvdata); |
785 | /* |
786 | * Set prog bit. It will be set from reset but this is included to |
787 | * ensure it is set |
788 | */ |
789 | etm_set_prog(drvdata); |
790 | |
791 | /* Find all capabilities */ |
792 | etmidr = etm_readl(drvdata, ETMIDR); |
793 | drvdata->arch = BMVAL(etmidr, 4, 11); |
794 | drvdata->port_size = etm_readl(drvdata, ETMCR) & PORT_SIZE_MASK; |
795 | |
796 | drvdata->etmccer = etm_readl(drvdata, ETMCCER); |
797 | etmccr = etm_readl(drvdata, ETMCCR); |
798 | drvdata->etmccr = etmccr; |
799 | drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2; |
800 | drvdata->nr_cntr = BMVAL(etmccr, 13, 15); |
801 | drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19); |
802 | drvdata->nr_ext_out = BMVAL(etmccr, 20, 22); |
803 | drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25); |
804 | |
805 | etm_set_pwrdwn(drvdata); |
806 | etm_clr_pwrup(drvdata); |
807 | CS_LOCK(addr: drvdata->base); |
808 | } |
809 | |
810 | static int __init etm_hp_setup(void) |
811 | { |
812 | int ret; |
813 | |
814 | ret = cpuhp_setup_state_nocalls_cpuslocked(state: CPUHP_AP_ARM_CORESIGHT_STARTING, |
815 | name: "arm/coresight:starting" , |
816 | startup: etm_starting_cpu, teardown: etm_dying_cpu); |
817 | |
818 | if (ret) |
819 | return ret; |
820 | |
821 | ret = cpuhp_setup_state_nocalls_cpuslocked(state: CPUHP_AP_ONLINE_DYN, |
822 | name: "arm/coresight:online" , |
823 | startup: etm_online_cpu, NULL); |
824 | |
825 | /* HP dyn state ID returned in ret on success */ |
826 | if (ret > 0) { |
827 | hp_online = ret; |
828 | return 0; |
829 | } |
830 | |
831 | /* failed dyn state - remove others */ |
832 | cpuhp_remove_state_nocalls(state: CPUHP_AP_ARM_CORESIGHT_STARTING); |
833 | |
834 | return ret; |
835 | } |
836 | |
837 | static void etm_hp_clear(void) |
838 | { |
839 | cpuhp_remove_state_nocalls(state: CPUHP_AP_ARM_CORESIGHT_STARTING); |
840 | if (hp_online) { |
841 | cpuhp_remove_state_nocalls(state: hp_online); |
842 | hp_online = 0; |
843 | } |
844 | } |
845 | |
846 | static int etm_probe(struct amba_device *adev, const struct amba_id *id) |
847 | { |
848 | int ret; |
849 | void __iomem *base; |
850 | struct device *dev = &adev->dev; |
851 | struct coresight_platform_data *pdata = NULL; |
852 | struct etm_drvdata *drvdata; |
853 | struct resource *res = &adev->res; |
854 | struct coresight_desc desc = { 0 }; |
855 | |
856 | drvdata = devm_kzalloc(dev, size: sizeof(*drvdata), GFP_KERNEL); |
857 | if (!drvdata) |
858 | return -ENOMEM; |
859 | |
860 | drvdata->use_cp14 = fwnode_property_read_bool(fwnode: dev->fwnode, propname: "arm,cp14" ); |
861 | dev_set_drvdata(dev, data: drvdata); |
862 | |
863 | /* Validity for the resource is already checked by the AMBA core */ |
864 | base = devm_ioremap_resource(dev, res); |
865 | if (IS_ERR(ptr: base)) |
866 | return PTR_ERR(ptr: base); |
867 | |
868 | drvdata->base = base; |
869 | desc.access = CSDEV_ACCESS_IOMEM(base); |
870 | |
871 | spin_lock_init(&drvdata->spinlock); |
872 | |
873 | drvdata->atclk = devm_clk_get(dev: &adev->dev, id: "atclk" ); /* optional */ |
874 | if (!IS_ERR(ptr: drvdata->atclk)) { |
875 | ret = clk_prepare_enable(clk: drvdata->atclk); |
876 | if (ret) |
877 | return ret; |
878 | } |
879 | |
880 | drvdata->cpu = coresight_get_cpu(dev); |
881 | if (drvdata->cpu < 0) |
882 | return drvdata->cpu; |
883 | |
884 | desc.name = devm_kasprintf(dev, GFP_KERNEL, fmt: "etm%d" , drvdata->cpu); |
885 | if (!desc.name) |
886 | return -ENOMEM; |
887 | |
888 | if (smp_call_function_single(cpuid: drvdata->cpu, |
889 | func: etm_init_arch_data, info: drvdata, wait: 1)) |
890 | dev_err(dev, "ETM arch init failed\n" ); |
891 | |
892 | if (etm_arch_supported(arch: drvdata->arch) == false) |
893 | return -EINVAL; |
894 | |
895 | etm_set_default(config: &drvdata->config); |
896 | |
897 | pdata = coresight_get_platform_data(dev); |
898 | if (IS_ERR(ptr: pdata)) |
899 | return PTR_ERR(ptr: pdata); |
900 | |
901 | adev->dev.platform_data = pdata; |
902 | |
903 | desc.type = CORESIGHT_DEV_TYPE_SOURCE; |
904 | desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC; |
905 | desc.ops = &etm_cs_ops; |
906 | desc.pdata = pdata; |
907 | desc.dev = dev; |
908 | desc.groups = coresight_etm_groups; |
909 | drvdata->csdev = coresight_register(desc: &desc); |
910 | if (IS_ERR(ptr: drvdata->csdev)) |
911 | return PTR_ERR(ptr: drvdata->csdev); |
912 | |
913 | ret = etm_perf_symlink(csdev: drvdata->csdev, link: true); |
914 | if (ret) { |
915 | coresight_unregister(csdev: drvdata->csdev); |
916 | return ret; |
917 | } |
918 | |
919 | etmdrvdata[drvdata->cpu] = drvdata; |
920 | |
921 | pm_runtime_put(dev: &adev->dev); |
922 | dev_info(&drvdata->csdev->dev, |
923 | "%s initialized\n" , (char *)coresight_get_uci_data(id)); |
924 | if (boot_enable) { |
925 | coresight_enable_sysfs(csdev: drvdata->csdev); |
926 | drvdata->boot_enable = true; |
927 | } |
928 | |
929 | return 0; |
930 | } |
931 | |
932 | static void clear_etmdrvdata(void *info) |
933 | { |
934 | int cpu = *(int *)info; |
935 | |
936 | etmdrvdata[cpu] = NULL; |
937 | } |
938 | |
939 | static void etm_remove(struct amba_device *adev) |
940 | { |
941 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: &adev->dev); |
942 | |
943 | etm_perf_symlink(csdev: drvdata->csdev, link: false); |
944 | |
945 | /* |
946 | * Taking hotplug lock here to avoid racing between etm_remove and |
947 | * CPU hotplug call backs. |
948 | */ |
949 | cpus_read_lock(); |
950 | /* |
951 | * The readers for etmdrvdata[] are CPU hotplug call backs |
952 | * and PM notification call backs. Change etmdrvdata[i] on |
953 | * CPU i ensures these call backs has consistent view |
954 | * inside one call back function. |
955 | */ |
956 | if (smp_call_function_single(cpuid: drvdata->cpu, func: clear_etmdrvdata, info: &drvdata->cpu, wait: 1)) |
957 | etmdrvdata[drvdata->cpu] = NULL; |
958 | |
959 | cpus_read_unlock(); |
960 | |
961 | coresight_unregister(csdev: drvdata->csdev); |
962 | } |
963 | |
964 | #ifdef CONFIG_PM |
965 | static int etm_runtime_suspend(struct device *dev) |
966 | { |
967 | struct etm_drvdata *drvdata = dev_get_drvdata(dev); |
968 | |
969 | if (drvdata && !IS_ERR(ptr: drvdata->atclk)) |
970 | clk_disable_unprepare(clk: drvdata->atclk); |
971 | |
972 | return 0; |
973 | } |
974 | |
975 | static int etm_runtime_resume(struct device *dev) |
976 | { |
977 | struct etm_drvdata *drvdata = dev_get_drvdata(dev); |
978 | |
979 | if (drvdata && !IS_ERR(ptr: drvdata->atclk)) |
980 | clk_prepare_enable(clk: drvdata->atclk); |
981 | |
982 | return 0; |
983 | } |
984 | #endif |
985 | |
986 | static const struct dev_pm_ops etm_dev_pm_ops = { |
987 | SET_RUNTIME_PM_OPS(etm_runtime_suspend, etm_runtime_resume, NULL) |
988 | }; |
989 | |
990 | static const struct amba_id etm_ids[] = { |
991 | /* ETM 3.3 */ |
992 | CS_AMBA_ID_DATA(0x000bb921, "ETM 3.3" ), |
993 | /* ETM 3.5 - Cortex-A5 */ |
994 | CS_AMBA_ID_DATA(0x000bb955, "ETM 3.5" ), |
995 | /* ETM 3.5 */ |
996 | CS_AMBA_ID_DATA(0x000bb956, "ETM 3.5" ), |
997 | /* PTM 1.0 */ |
998 | CS_AMBA_ID_DATA(0x000bb950, "PTM 1.0" ), |
999 | /* PTM 1.1 */ |
1000 | CS_AMBA_ID_DATA(0x000bb95f, "PTM 1.1" ), |
1001 | /* PTM 1.1 Qualcomm */ |
1002 | CS_AMBA_ID_DATA(0x000b006f, "PTM 1.1" ), |
1003 | { 0, 0, NULL}, |
1004 | }; |
1005 | |
1006 | MODULE_DEVICE_TABLE(amba, etm_ids); |
1007 | |
1008 | static struct amba_driver etm_driver = { |
1009 | .drv = { |
1010 | .name = "coresight-etm3x" , |
1011 | .owner = THIS_MODULE, |
1012 | .pm = &etm_dev_pm_ops, |
1013 | .suppress_bind_attrs = true, |
1014 | }, |
1015 | .probe = etm_probe, |
1016 | .remove = etm_remove, |
1017 | .id_table = etm_ids, |
1018 | }; |
1019 | |
1020 | static int __init etm_init(void) |
1021 | { |
1022 | int ret; |
1023 | |
1024 | ret = etm_hp_setup(); |
1025 | |
1026 | /* etm_hp_setup() does its own cleanup - exit on error */ |
1027 | if (ret) |
1028 | return ret; |
1029 | |
1030 | ret = amba_driver_register(drv: &etm_driver); |
1031 | if (ret) { |
1032 | pr_err("Error registering etm3x driver\n" ); |
1033 | etm_hp_clear(); |
1034 | } |
1035 | |
1036 | return ret; |
1037 | } |
1038 | |
1039 | static void __exit etm_exit(void) |
1040 | { |
1041 | amba_driver_unregister(drv: &etm_driver); |
1042 | etm_hp_clear(); |
1043 | } |
1044 | |
1045 | module_init(etm_init); |
1046 | module_exit(etm_exit); |
1047 | |
1048 | MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>" ); |
1049 | MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>" ); |
1050 | MODULE_DESCRIPTION("Arm CoreSight Program Flow Trace driver" ); |
1051 | MODULE_LICENSE("GPL v2" ); |
1052 | |