1 | /* |
2 | * Copyright 2017 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | |
24 | #if !defined(_AMDGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) |
25 | #define _AMDGPU_TRACE_H_ |
26 | |
27 | #include <linux/stringify.h> |
28 | #include <linux/types.h> |
29 | #include <linux/tracepoint.h> |
30 | |
31 | #undef TRACE_SYSTEM |
32 | #define TRACE_SYSTEM amdgpu |
33 | #define TRACE_INCLUDE_FILE amdgpu_trace |
34 | |
35 | #define AMDGPU_JOB_GET_TIMELINE_NAME(job) \ |
36 | job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished) |
37 | |
38 | TRACE_EVENT(amdgpu_device_rreg, |
39 | TP_PROTO(unsigned did, uint32_t reg, uint32_t value), |
40 | TP_ARGS(did, reg, value), |
41 | TP_STRUCT__entry( |
42 | __field(unsigned, did) |
43 | __field(uint32_t, reg) |
44 | __field(uint32_t, value) |
45 | ), |
46 | TP_fast_assign( |
47 | __entry->did = did; |
48 | __entry->reg = reg; |
49 | __entry->value = value; |
50 | ), |
51 | TP_printk("0x%04lx, 0x%08lx, 0x%08lx" , |
52 | (unsigned long)__entry->did, |
53 | (unsigned long)__entry->reg, |
54 | (unsigned long)__entry->value) |
55 | ); |
56 | |
57 | TRACE_EVENT(amdgpu_device_wreg, |
58 | TP_PROTO(unsigned did, uint32_t reg, uint32_t value), |
59 | TP_ARGS(did, reg, value), |
60 | TP_STRUCT__entry( |
61 | __field(unsigned, did) |
62 | __field(uint32_t, reg) |
63 | __field(uint32_t, value) |
64 | ), |
65 | TP_fast_assign( |
66 | __entry->did = did; |
67 | __entry->reg = reg; |
68 | __entry->value = value; |
69 | ), |
70 | TP_printk("0x%04lx, 0x%08lx, 0x%08lx" , |
71 | (unsigned long)__entry->did, |
72 | (unsigned long)__entry->reg, |
73 | (unsigned long)__entry->value) |
74 | ); |
75 | |
76 | TRACE_EVENT(amdgpu_iv, |
77 | TP_PROTO(unsigned ih, struct amdgpu_iv_entry *iv), |
78 | TP_ARGS(ih, iv), |
79 | TP_STRUCT__entry( |
80 | __field(unsigned, ih) |
81 | __field(unsigned, client_id) |
82 | __field(unsigned, src_id) |
83 | __field(unsigned, ring_id) |
84 | __field(unsigned, vmid) |
85 | __field(unsigned, vmid_src) |
86 | __field(uint64_t, timestamp) |
87 | __field(unsigned, timestamp_src) |
88 | __field(unsigned, pasid) |
89 | __array(unsigned, src_data, 4) |
90 | ), |
91 | TP_fast_assign( |
92 | __entry->ih = ih; |
93 | __entry->client_id = iv->client_id; |
94 | __entry->src_id = iv->src_id; |
95 | __entry->ring_id = iv->ring_id; |
96 | __entry->vmid = iv->vmid; |
97 | __entry->vmid_src = iv->vmid_src; |
98 | __entry->timestamp = iv->timestamp; |
99 | __entry->timestamp_src = iv->timestamp_src; |
100 | __entry->pasid = iv->pasid; |
101 | __entry->src_data[0] = iv->src_data[0]; |
102 | __entry->src_data[1] = iv->src_data[1]; |
103 | __entry->src_data[2] = iv->src_data[2]; |
104 | __entry->src_data[3] = iv->src_data[3]; |
105 | ), |
106 | TP_printk("ih:%u client_id:%u src_id:%u ring:%u vmid:%u " |
107 | "timestamp: %llu pasid:%u src_data: %08x %08x %08x %08x" , |
108 | __entry->ih, __entry->client_id, __entry->src_id, |
109 | __entry->ring_id, __entry->vmid, |
110 | __entry->timestamp, __entry->pasid, |
111 | __entry->src_data[0], __entry->src_data[1], |
112 | __entry->src_data[2], __entry->src_data[3]) |
113 | ); |
114 | |
115 | |
116 | TRACE_EVENT(amdgpu_bo_create, |
117 | TP_PROTO(struct amdgpu_bo *bo), |
118 | TP_ARGS(bo), |
119 | TP_STRUCT__entry( |
120 | __field(struct amdgpu_bo *, bo) |
121 | __field(u32, pages) |
122 | __field(u32, type) |
123 | __field(u32, prefer) |
124 | __field(u32, allow) |
125 | __field(u32, visible) |
126 | ), |
127 | |
128 | TP_fast_assign( |
129 | __entry->bo = bo; |
130 | __entry->pages = PFN_UP(bo->tbo.resource->size); |
131 | __entry->type = bo->tbo.resource->mem_type; |
132 | __entry->prefer = bo->preferred_domains; |
133 | __entry->allow = bo->allowed_domains; |
134 | __entry->visible = bo->flags; |
135 | ), |
136 | |
137 | TP_printk("bo=%p, pages=%u, type=%d, preferred=%d, allowed=%d, visible=%d" , |
138 | __entry->bo, __entry->pages, __entry->type, |
139 | __entry->prefer, __entry->allow, __entry->visible) |
140 | ); |
141 | |
142 | TRACE_EVENT(amdgpu_cs, |
143 | TP_PROTO(struct amdgpu_cs_parser *p, |
144 | struct amdgpu_job *job, |
145 | struct amdgpu_ib *ib), |
146 | TP_ARGS(p, job, ib), |
147 | TP_STRUCT__entry( |
148 | __field(struct amdgpu_bo_list *, bo_list) |
149 | __field(u32, ring) |
150 | __field(u32, dw) |
151 | __field(u32, fences) |
152 | ), |
153 | |
154 | TP_fast_assign( |
155 | __entry->bo_list = p->bo_list; |
156 | __entry->ring = to_amdgpu_ring(job->base.entity->rq->sched)->idx; |
157 | __entry->dw = ib->length_dw; |
158 | __entry->fences = amdgpu_fence_count_emitted( |
159 | to_amdgpu_ring(job->base.entity->rq->sched)); |
160 | ), |
161 | TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u" , |
162 | __entry->bo_list, __entry->ring, __entry->dw, |
163 | __entry->fences) |
164 | ); |
165 | |
166 | TRACE_EVENT(amdgpu_cs_ioctl, |
167 | TP_PROTO(struct amdgpu_job *job), |
168 | TP_ARGS(job), |
169 | TP_STRUCT__entry( |
170 | __field(uint64_t, sched_job_id) |
171 | __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) |
172 | __field(unsigned int, context) |
173 | __field(unsigned int, seqno) |
174 | __field(struct dma_fence *, fence) |
175 | __string(ring, to_amdgpu_ring(job->base.sched)->name) |
176 | __field(u32, num_ibs) |
177 | ), |
178 | |
179 | TP_fast_assign( |
180 | __entry->sched_job_id = job->base.id; |
181 | __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)); |
182 | __entry->context = job->base.s_fence->finished.context; |
183 | __entry->seqno = job->base.s_fence->finished.seqno; |
184 | __assign_str(ring, to_amdgpu_ring(job->base.sched)->name); |
185 | __entry->num_ibs = job->num_ibs; |
186 | ), |
187 | TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u" , |
188 | __entry->sched_job_id, __get_str(timeline), __entry->context, |
189 | __entry->seqno, __get_str(ring), __entry->num_ibs) |
190 | ); |
191 | |
192 | TRACE_EVENT(amdgpu_sched_run_job, |
193 | TP_PROTO(struct amdgpu_job *job), |
194 | TP_ARGS(job), |
195 | TP_STRUCT__entry( |
196 | __field(uint64_t, sched_job_id) |
197 | __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) |
198 | __field(unsigned int, context) |
199 | __field(unsigned int, seqno) |
200 | __string(ring, to_amdgpu_ring(job->base.sched)->name) |
201 | __field(u32, num_ibs) |
202 | ), |
203 | |
204 | TP_fast_assign( |
205 | __entry->sched_job_id = job->base.id; |
206 | __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)); |
207 | __entry->context = job->base.s_fence->finished.context; |
208 | __entry->seqno = job->base.s_fence->finished.seqno; |
209 | __assign_str(ring, to_amdgpu_ring(job->base.sched)->name); |
210 | __entry->num_ibs = job->num_ibs; |
211 | ), |
212 | TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u" , |
213 | __entry->sched_job_id, __get_str(timeline), __entry->context, |
214 | __entry->seqno, __get_str(ring), __entry->num_ibs) |
215 | ); |
216 | |
217 | |
218 | TRACE_EVENT(amdgpu_vm_grab_id, |
219 | TP_PROTO(struct amdgpu_vm *vm, struct amdgpu_ring *ring, |
220 | struct amdgpu_job *job), |
221 | TP_ARGS(vm, ring, job), |
222 | TP_STRUCT__entry( |
223 | __field(u32, pasid) |
224 | __string(ring, ring->name) |
225 | __field(u32, ring) |
226 | __field(u32, vmid) |
227 | __field(u32, vm_hub) |
228 | __field(u64, pd_addr) |
229 | __field(u32, needs_flush) |
230 | ), |
231 | |
232 | TP_fast_assign( |
233 | __entry->pasid = vm->pasid; |
234 | __assign_str(ring, ring->name); |
235 | __entry->vmid = job->vmid; |
236 | __entry->vm_hub = ring->vm_hub, |
237 | __entry->pd_addr = job->vm_pd_addr; |
238 | __entry->needs_flush = job->vm_needs_flush; |
239 | ), |
240 | TP_printk("pasid=%d, ring=%s, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u" , |
241 | __entry->pasid, __get_str(ring), __entry->vmid, |
242 | __entry->vm_hub, __entry->pd_addr, __entry->needs_flush) |
243 | ); |
244 | |
245 | TRACE_EVENT(amdgpu_vm_bo_map, |
246 | TP_PROTO(struct amdgpu_bo_va *bo_va, |
247 | struct amdgpu_bo_va_mapping *mapping), |
248 | TP_ARGS(bo_va, mapping), |
249 | TP_STRUCT__entry( |
250 | __field(struct amdgpu_bo *, bo) |
251 | __field(long, start) |
252 | __field(long, last) |
253 | __field(u64, offset) |
254 | __field(u64, flags) |
255 | ), |
256 | |
257 | TP_fast_assign( |
258 | __entry->bo = bo_va ? bo_va->base.bo : NULL; |
259 | __entry->start = mapping->start; |
260 | __entry->last = mapping->last; |
261 | __entry->offset = mapping->offset; |
262 | __entry->flags = mapping->flags; |
263 | ), |
264 | TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%llx" , |
265 | __entry->bo, __entry->start, __entry->last, |
266 | __entry->offset, __entry->flags) |
267 | ); |
268 | |
269 | TRACE_EVENT(amdgpu_vm_bo_unmap, |
270 | TP_PROTO(struct amdgpu_bo_va *bo_va, |
271 | struct amdgpu_bo_va_mapping *mapping), |
272 | TP_ARGS(bo_va, mapping), |
273 | TP_STRUCT__entry( |
274 | __field(struct amdgpu_bo *, bo) |
275 | __field(long, start) |
276 | __field(long, last) |
277 | __field(u64, offset) |
278 | __field(u64, flags) |
279 | ), |
280 | |
281 | TP_fast_assign( |
282 | __entry->bo = bo_va ? bo_va->base.bo : NULL; |
283 | __entry->start = mapping->start; |
284 | __entry->last = mapping->last; |
285 | __entry->offset = mapping->offset; |
286 | __entry->flags = mapping->flags; |
287 | ), |
288 | TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%llx" , |
289 | __entry->bo, __entry->start, __entry->last, |
290 | __entry->offset, __entry->flags) |
291 | ); |
292 | |
293 | DECLARE_EVENT_CLASS(amdgpu_vm_mapping, |
294 | TP_PROTO(struct amdgpu_bo_va_mapping *mapping), |
295 | TP_ARGS(mapping), |
296 | TP_STRUCT__entry( |
297 | __field(u64, soffset) |
298 | __field(u64, eoffset) |
299 | __field(u64, flags) |
300 | ), |
301 | |
302 | TP_fast_assign( |
303 | __entry->soffset = mapping->start; |
304 | __entry->eoffset = mapping->last + 1; |
305 | __entry->flags = mapping->flags; |
306 | ), |
307 | TP_printk("soffs=%010llx, eoffs=%010llx, flags=%llx" , |
308 | __entry->soffset, __entry->eoffset, __entry->flags) |
309 | ); |
310 | |
311 | DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_update, |
312 | TP_PROTO(struct amdgpu_bo_va_mapping *mapping), |
313 | TP_ARGS(mapping) |
314 | ); |
315 | |
316 | DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping, |
317 | TP_PROTO(struct amdgpu_bo_va_mapping *mapping), |
318 | TP_ARGS(mapping) |
319 | ); |
320 | |
321 | DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs, |
322 | TP_PROTO(struct amdgpu_bo_va_mapping *mapping), |
323 | TP_ARGS(mapping) |
324 | ); |
325 | |
326 | TRACE_EVENT(amdgpu_vm_update_ptes, |
327 | TP_PROTO(struct amdgpu_vm_update_params *p, |
328 | uint64_t start, uint64_t end, |
329 | unsigned int nptes, uint64_t dst, |
330 | uint64_t incr, uint64_t flags, |
331 | pid_t pid, uint64_t vm_ctx), |
332 | TP_ARGS(p, start, end, nptes, dst, incr, flags, pid, vm_ctx), |
333 | TP_STRUCT__entry( |
334 | __field(u64, start) |
335 | __field(u64, end) |
336 | __field(u64, flags) |
337 | __field(unsigned int, nptes) |
338 | __field(u64, incr) |
339 | __field(pid_t, pid) |
340 | __field(u64, vm_ctx) |
341 | __dynamic_array(u64, dst, nptes) |
342 | ), |
343 | |
344 | TP_fast_assign( |
345 | unsigned int i; |
346 | |
347 | __entry->start = start; |
348 | __entry->end = end; |
349 | __entry->flags = flags; |
350 | __entry->incr = incr; |
351 | __entry->nptes = nptes; |
352 | __entry->pid = pid; |
353 | __entry->vm_ctx = vm_ctx; |
354 | for (i = 0; i < nptes; ++i) { |
355 | u64 addr = p->pages_addr ? amdgpu_vm_map_gart( |
356 | p->pages_addr, dst) : dst; |
357 | |
358 | ((u64 *)__get_dynamic_array(dst))[i] = addr; |
359 | dst += incr; |
360 | } |
361 | ), |
362 | TP_printk("pid:%u vm_ctx:0x%llx start:0x%010llx end:0x%010llx," |
363 | " flags:0x%llx, incr:%llu, dst:\n%s" , __entry->pid, |
364 | __entry->vm_ctx, __entry->start, __entry->end, |
365 | __entry->flags, __entry->incr, __print_array( |
366 | __get_dynamic_array(dst), __entry->nptes, 8)) |
367 | ); |
368 | |
369 | TRACE_EVENT(amdgpu_vm_set_ptes, |
370 | TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, |
371 | uint32_t incr, uint64_t flags, bool immediate), |
372 | TP_ARGS(pe, addr, count, incr, flags, immediate), |
373 | TP_STRUCT__entry( |
374 | __field(u64, pe) |
375 | __field(u64, addr) |
376 | __field(u32, count) |
377 | __field(u32, incr) |
378 | __field(u64, flags) |
379 | __field(bool, immediate) |
380 | ), |
381 | |
382 | TP_fast_assign( |
383 | __entry->pe = pe; |
384 | __entry->addr = addr; |
385 | __entry->count = count; |
386 | __entry->incr = incr; |
387 | __entry->flags = flags; |
388 | __entry->immediate = immediate; |
389 | ), |
390 | TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u, " |
391 | "immediate=%d" , __entry->pe, __entry->addr, __entry->incr, |
392 | __entry->flags, __entry->count, __entry->immediate) |
393 | ); |
394 | |
395 | TRACE_EVENT(amdgpu_vm_copy_ptes, |
396 | TP_PROTO(uint64_t pe, uint64_t src, unsigned count, bool immediate), |
397 | TP_ARGS(pe, src, count, immediate), |
398 | TP_STRUCT__entry( |
399 | __field(u64, pe) |
400 | __field(u64, src) |
401 | __field(u32, count) |
402 | __field(bool, immediate) |
403 | ), |
404 | |
405 | TP_fast_assign( |
406 | __entry->pe = pe; |
407 | __entry->src = src; |
408 | __entry->count = count; |
409 | __entry->immediate = immediate; |
410 | ), |
411 | TP_printk("pe=%010Lx, src=%010Lx, count=%u, immediate=%d" , |
412 | __entry->pe, __entry->src, __entry->count, |
413 | __entry->immediate) |
414 | ); |
415 | |
416 | TRACE_EVENT(amdgpu_vm_flush, |
417 | TP_PROTO(struct amdgpu_ring *ring, unsigned vmid, |
418 | uint64_t pd_addr), |
419 | TP_ARGS(ring, vmid, pd_addr), |
420 | TP_STRUCT__entry( |
421 | __string(ring, ring->name) |
422 | __field(u32, vmid) |
423 | __field(u32, vm_hub) |
424 | __field(u64, pd_addr) |
425 | ), |
426 | |
427 | TP_fast_assign( |
428 | __assign_str(ring, ring->name); |
429 | __entry->vmid = vmid; |
430 | __entry->vm_hub = ring->vm_hub; |
431 | __entry->pd_addr = pd_addr; |
432 | ), |
433 | TP_printk("ring=%s, id=%u, hub=%u, pd_addr=%010Lx" , |
434 | __get_str(ring), __entry->vmid, |
435 | __entry->vm_hub, __entry->pd_addr) |
436 | ); |
437 | |
438 | DECLARE_EVENT_CLASS(amdgpu_pasid, |
439 | TP_PROTO(unsigned pasid), |
440 | TP_ARGS(pasid), |
441 | TP_STRUCT__entry( |
442 | __field(unsigned, pasid) |
443 | ), |
444 | TP_fast_assign( |
445 | __entry->pasid = pasid; |
446 | ), |
447 | TP_printk("pasid=%u" , __entry->pasid) |
448 | ); |
449 | |
450 | DEFINE_EVENT(amdgpu_pasid, amdgpu_pasid_allocated, |
451 | TP_PROTO(unsigned pasid), |
452 | TP_ARGS(pasid) |
453 | ); |
454 | |
455 | DEFINE_EVENT(amdgpu_pasid, amdgpu_pasid_freed, |
456 | TP_PROTO(unsigned pasid), |
457 | TP_ARGS(pasid) |
458 | ); |
459 | |
460 | TRACE_EVENT(amdgpu_bo_list_set, |
461 | TP_PROTO(struct amdgpu_bo_list *list, struct amdgpu_bo *bo), |
462 | TP_ARGS(list, bo), |
463 | TP_STRUCT__entry( |
464 | __field(struct amdgpu_bo_list *, list) |
465 | __field(struct amdgpu_bo *, bo) |
466 | __field(u64, bo_size) |
467 | ), |
468 | |
469 | TP_fast_assign( |
470 | __entry->list = list; |
471 | __entry->bo = bo; |
472 | __entry->bo_size = amdgpu_bo_size(bo); |
473 | ), |
474 | TP_printk("list=%p, bo=%p, bo_size=%Ld" , |
475 | __entry->list, |
476 | __entry->bo, |
477 | __entry->bo_size) |
478 | ); |
479 | |
480 | TRACE_EVENT(amdgpu_cs_bo_status, |
481 | TP_PROTO(uint64_t total_bo, uint64_t total_size), |
482 | TP_ARGS(total_bo, total_size), |
483 | TP_STRUCT__entry( |
484 | __field(u64, total_bo) |
485 | __field(u64, total_size) |
486 | ), |
487 | |
488 | TP_fast_assign( |
489 | __entry->total_bo = total_bo; |
490 | __entry->total_size = total_size; |
491 | ), |
492 | TP_printk("total_bo_size=%Ld, total_bo_count=%Ld" , |
493 | __entry->total_bo, __entry->total_size) |
494 | ); |
495 | |
496 | TRACE_EVENT(amdgpu_bo_move, |
497 | TP_PROTO(struct amdgpu_bo *bo, uint32_t new_placement, uint32_t old_placement), |
498 | TP_ARGS(bo, new_placement, old_placement), |
499 | TP_STRUCT__entry( |
500 | __field(struct amdgpu_bo *, bo) |
501 | __field(u64, bo_size) |
502 | __field(u32, new_placement) |
503 | __field(u32, old_placement) |
504 | ), |
505 | |
506 | TP_fast_assign( |
507 | __entry->bo = bo; |
508 | __entry->bo_size = amdgpu_bo_size(bo); |
509 | __entry->new_placement = new_placement; |
510 | __entry->old_placement = old_placement; |
511 | ), |
512 | TP_printk("bo=%p, from=%d, to=%d, size=%Ld" , |
513 | __entry->bo, __entry->old_placement, |
514 | __entry->new_placement, __entry->bo_size) |
515 | ); |
516 | |
517 | TRACE_EVENT(amdgpu_ib_pipe_sync, |
518 | TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence), |
519 | TP_ARGS(sched_job, fence), |
520 | TP_STRUCT__entry( |
521 | __string(ring, sched_job->base.sched->name) |
522 | __field(uint64_t, id) |
523 | __field(struct dma_fence *, fence) |
524 | __field(uint64_t, ctx) |
525 | __field(unsigned, seqno) |
526 | ), |
527 | |
528 | TP_fast_assign( |
529 | __assign_str(ring, sched_job->base.sched->name); |
530 | __entry->id = sched_job->base.id; |
531 | __entry->fence = fence; |
532 | __entry->ctx = fence->context; |
533 | __entry->seqno = fence->seqno; |
534 | ), |
535 | TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u" , |
536 | __get_str(ring), __entry->id, |
537 | __entry->fence, __entry->ctx, |
538 | __entry->seqno) |
539 | ); |
540 | |
541 | TRACE_EVENT(amdgpu_reset_reg_dumps, |
542 | TP_PROTO(uint32_t address, uint32_t value), |
543 | TP_ARGS(address, value), |
544 | TP_STRUCT__entry( |
545 | __field(uint32_t, address) |
546 | __field(uint32_t, value) |
547 | ), |
548 | TP_fast_assign( |
549 | __entry->address = address; |
550 | __entry->value = value; |
551 | ), |
552 | TP_printk("amdgpu register dump 0x%x: 0x%x" , |
553 | __entry->address, |
554 | __entry->value) |
555 | ); |
556 | |
557 | #undef AMDGPU_JOB_GET_TIMELINE_NAME |
558 | #endif |
559 | |
560 | /* This part must be outside protection */ |
561 | #undef TRACE_INCLUDE_PATH |
562 | #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/amd/amdgpu |
563 | #include <trace/define_trace.h> |
564 | |