1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * Thunderbolt tracing support |
4 | * |
5 | * Copyright (C) 2024, Intel Corporation |
6 | * Author: Mika Westerberg <mika.westerberg@linux.intel.com> |
7 | * Gil Fine <gil.fine@intel.com> |
8 | */ |
9 | |
10 | #undef TRACE_SYSTEM |
11 | #define TRACE_SYSTEM thunderbolt |
12 | |
13 | #if !defined(TB_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) |
14 | #define TB_TRACE_H_ |
15 | |
16 | #include <linux/trace_seq.h> |
17 | #include <linux/tracepoint.h> |
18 | |
19 | #include "tb_msgs.h" |
20 | |
21 | #define tb_cfg_type_name(type) { type, #type } |
22 | #define show_type_name(val) \ |
23 | __print_symbolic(val, \ |
24 | tb_cfg_type_name(TB_CFG_PKG_READ), \ |
25 | tb_cfg_type_name(TB_CFG_PKG_WRITE), \ |
26 | tb_cfg_type_name(TB_CFG_PKG_ERROR), \ |
27 | tb_cfg_type_name(TB_CFG_PKG_NOTIFY_ACK), \ |
28 | tb_cfg_type_name(TB_CFG_PKG_EVENT), \ |
29 | tb_cfg_type_name(TB_CFG_PKG_XDOMAIN_REQ), \ |
30 | tb_cfg_type_name(TB_CFG_PKG_XDOMAIN_RESP), \ |
31 | tb_cfg_type_name(TB_CFG_PKG_OVERRIDE), \ |
32 | tb_cfg_type_name(TB_CFG_PKG_RESET), \ |
33 | tb_cfg_type_name(TB_CFG_PKG_ICM_EVENT), \ |
34 | tb_cfg_type_name(TB_CFG_PKG_ICM_CMD), \ |
35 | tb_cfg_type_name(TB_CFG_PKG_ICM_RESP)) |
36 | |
37 | #ifndef TB_TRACE_HELPERS |
38 | #define TB_TRACE_HELPERS |
39 | static inline const char *show_data_read_write(struct trace_seq *p, |
40 | const u32 *data) |
41 | { |
42 | const struct cfg_read_pkg *msg = (const struct cfg_read_pkg *)data; |
43 | const char *ret = trace_seq_buffer_ptr(s: p); |
44 | |
45 | trace_seq_printf(s: p, fmt: "offset=%#x, len=%u, port=%d, config=%#x, seq=%d, " , |
46 | msg->addr.offset, msg->addr.length, msg->addr.port, |
47 | msg->addr.space, msg->addr.seq); |
48 | |
49 | return ret; |
50 | } |
51 | |
52 | static inline const char *show_data_error(struct trace_seq *p, const u32 *data) |
53 | { |
54 | const struct cfg_error_pkg *msg = (const struct cfg_error_pkg *)data; |
55 | const char *ret = trace_seq_buffer_ptr(s: p); |
56 | |
57 | trace_seq_printf(s: p, fmt: "error=%#x, port=%d, plug=%#x, " , msg->error, |
58 | msg->port, msg->pg); |
59 | |
60 | return ret; |
61 | } |
62 | |
63 | static inline const char *show_data_event(struct trace_seq *p, const u32 *data) |
64 | { |
65 | const struct cfg_event_pkg *msg = (const struct cfg_event_pkg *)data; |
66 | const char *ret = trace_seq_buffer_ptr(s: p); |
67 | |
68 | trace_seq_printf(s: p, fmt: "port=%d, unplug=%#x, " , msg->port, msg->unplug); |
69 | |
70 | return ret; |
71 | } |
72 | |
73 | static inline const char *show_route(struct trace_seq *p, const u32 *data) |
74 | { |
75 | const struct tb_cfg_header * = (const struct tb_cfg_header *)data; |
76 | const char *ret = trace_seq_buffer_ptr(s: p); |
77 | |
78 | trace_seq_printf(s: p, fmt: "route=%llx, " , tb_cfg_get_route(header)); |
79 | |
80 | return ret; |
81 | } |
82 | |
83 | static inline const char *show_data(struct trace_seq *p, u8 type, |
84 | const u32 *data, u32 length) |
85 | { |
86 | const char *ret = trace_seq_buffer_ptr(s: p); |
87 | const char *prefix = "" ; |
88 | int i; |
89 | |
90 | show_route(p, data); |
91 | |
92 | switch (type) { |
93 | case TB_CFG_PKG_READ: |
94 | case TB_CFG_PKG_WRITE: |
95 | show_data_read_write(p, data); |
96 | break; |
97 | |
98 | case TB_CFG_PKG_ERROR: |
99 | show_data_error(p, data); |
100 | break; |
101 | |
102 | case TB_CFG_PKG_EVENT: |
103 | show_data_event(p, data); |
104 | break; |
105 | |
106 | default: |
107 | break; |
108 | } |
109 | |
110 | trace_seq_printf(s: p, fmt: "data=[" ); |
111 | for (i = 0; i < length; i++) { |
112 | trace_seq_printf(s: p, fmt: "%s0x%08x" , prefix, data[i]); |
113 | prefix = ", " ; |
114 | } |
115 | trace_seq_printf(s: p, fmt: "]" ); |
116 | trace_seq_putc(s: p, c: 0); |
117 | |
118 | return ret; |
119 | } |
120 | #endif |
121 | |
122 | DECLARE_EVENT_CLASS(tb_raw, |
123 | TP_PROTO(int index, u8 type, const void *data, size_t size), |
124 | TP_ARGS(index, type, data, size), |
125 | TP_STRUCT__entry( |
126 | __field(int, index) |
127 | __field(u8, type) |
128 | __field(size_t, size) |
129 | __dynamic_array(u32, data, size / 4) |
130 | ), |
131 | TP_fast_assign( |
132 | __entry->index = index; |
133 | __entry->type = type; |
134 | __entry->size = size / 4; |
135 | memcpy(__get_dynamic_array(data), data, size); |
136 | ), |
137 | TP_printk("type=%s, size=%zd, domain=%d, %s" , |
138 | show_type_name(__entry->type), __entry->size, __entry->index, |
139 | show_data(p, __entry->type, __get_dynamic_array(data), |
140 | __entry->size) |
141 | ) |
142 | ); |
143 | |
144 | DEFINE_EVENT(tb_raw, tb_tx, |
145 | TP_PROTO(int index, u8 type, const void *data, size_t size), |
146 | TP_ARGS(index, type, data, size) |
147 | ); |
148 | |
149 | DEFINE_EVENT(tb_raw, tb_event, |
150 | TP_PROTO(int index, u8 type, const void *data, size_t size), |
151 | TP_ARGS(index, type, data, size) |
152 | ); |
153 | |
154 | TRACE_EVENT(tb_rx, |
155 | TP_PROTO(int index, u8 type, const void *data, size_t size, bool dropped), |
156 | TP_ARGS(index, type, data, size, dropped), |
157 | TP_STRUCT__entry( |
158 | __field(int, index) |
159 | __field(u8, type) |
160 | __field(size_t, size) |
161 | __dynamic_array(u32, data, size / 4) |
162 | __field(bool, dropped) |
163 | ), |
164 | TP_fast_assign( |
165 | __entry->index = index; |
166 | __entry->type = type; |
167 | __entry->size = size / 4; |
168 | memcpy(__get_dynamic_array(data), data, size); |
169 | __entry->dropped = dropped; |
170 | ), |
171 | TP_printk("type=%s, dropped=%u, size=%zd, domain=%d, %s" , |
172 | show_type_name(__entry->type), __entry->dropped, |
173 | __entry->size, __entry->index, |
174 | show_data(p, __entry->type, __get_dynamic_array(data), |
175 | __entry->size) |
176 | ) |
177 | ); |
178 | |
179 | #endif /* TB_TRACE_H_ */ |
180 | |
181 | #undef TRACE_INCLUDE_PATH |
182 | #define TRACE_INCLUDE_PATH . |
183 | |
184 | #undef TRACE_INCLUDE_FILE |
185 | #define TRACE_INCLUDE_FILE trace |
186 | |
187 | /* This part must be outside protection */ |
188 | #include <trace/define_trace.h> |
189 | |