1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2/*
3 * Copyright(c) 2018 Intel Corporation.
4 *
5 */
6#if !defined(__HFI1_TRACE_TID_H) || defined(TRACE_HEADER_MULTI_READ)
7#define __HFI1_TRACE_TID_H
8
9#include <linux/tracepoint.h>
10#include <linux/trace_seq.h>
11
12#include "hfi.h"
13
14#define tidtype_name(type) { PT_##type, #type }
15#define show_tidtype(type) \
16__print_symbolic(type, \
17 tidtype_name(EXPECTED), \
18 tidtype_name(EAGER), \
19 tidtype_name(INVALID)) \
20
21#undef TRACE_SYSTEM
22#define TRACE_SYSTEM hfi1_tid
23
24u8 hfi1_trace_get_tid_ctrl(u32 ent);
25u16 hfi1_trace_get_tid_len(u32 ent);
26u16 hfi1_trace_get_tid_idx(u32 ent);
27
28#define OPFN_PARAM_PRN "[%s] qpn 0x%x %s OPFN: qp 0x%x, max read %u, " \
29 "max write %u, max length %u, jkey 0x%x timeout %u " \
30 "urg %u"
31
32#define TID_FLOW_PRN "[%s] qpn 0x%x flow %d: idx %d resp_ib_psn 0x%x " \
33 "generation 0x%x fpsn 0x%x-%x r_next_psn 0x%x " \
34 "ib_psn 0x%x-%x npagesets %u tnode_cnt %u " \
35 "tidcnt %u tid_idx %u tid_offset %u length %u sent %u"
36
37#define TID_NODE_PRN "[%s] qpn 0x%x %s idx %u grp base 0x%x map 0x%x " \
38 "used %u cnt %u"
39
40#define RSP_INFO_PRN "[%s] qpn 0x%x state 0x%x s_state 0x%x psn 0x%x " \
41 "r_psn 0x%x r_state 0x%x r_flags 0x%x " \
42 "r_head_ack_queue %u s_tail_ack_queue %u " \
43 "s_acked_ack_queue %u s_ack_state 0x%x " \
44 "s_nak_state 0x%x s_flags 0x%x ps_flags 0x%x " \
45 "iow_flags 0x%lx"
46
47#define SENDER_INFO_PRN "[%s] qpn 0x%x state 0x%x s_cur %u s_tail %u " \
48 "s_head %u s_acked %u s_last %u s_psn 0x%x " \
49 "s_last_psn 0x%x s_flags 0x%x ps_flags 0x%x " \
50 "iow_flags 0x%lx s_state 0x%x s_num_rd %u s_retry %u"
51
52#define TID_READ_SENDER_PRN "[%s] qpn 0x%x newreq %u tid_r_reqs %u " \
53 "tid_r_comp %u pending_tid_r_segs %u " \
54 "s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx " \
55 "s_state 0x%x hw_flow_index %u generation 0x%x " \
56 "fpsn 0x%x"
57
58#define TID_REQ_PRN "[%s] qpn 0x%x newreq %u opcode 0x%x psn 0x%x lpsn 0x%x " \
59 "cur_seg %u comp_seg %u ack_seg %u alloc_seg %u " \
60 "total_segs %u setup_head %u clear_tail %u flow_idx %u " \
61 "acked_tail %u state %u r_ack_psn 0x%x r_flow_psn 0x%x " \
62 "r_last_ackd 0x%x s_next_psn 0x%x"
63
64#define RCV_ERR_PRN "[%s] qpn 0x%x s_flags 0x%x state 0x%x " \
65 "s_acked_ack_queue %u s_tail_ack_queue %u " \
66 "r_head_ack_queue %u opcode 0x%x psn 0x%x r_psn 0x%x " \
67 " diff %d"
68
69#define TID_WRITE_RSPDR_PRN "[%s] qpn 0x%x r_tid_head %u r_tid_tail %u " \
70 "r_tid_ack %u r_tid_alloc %u alloc_w_segs %u " \
71 "pending_tid_w_segs %u sync_pt %s " \
72 "ps_nak_psn 0x%x ps_nak_state 0x%x " \
73 "prnr_nak_state 0x%x hw_flow_index %u generation "\
74 "0x%x fpsn 0x%x resync %s" \
75 "r_next_psn_kdeth 0x%x"
76
77#define TID_WRITE_SENDER_PRN "[%s] qpn 0x%x newreq %u s_tid_cur %u " \
78 "s_tid_tail %u s_tid_head %u " \
79 "pending_tid_w_resp %u n_requests %u " \
80 "n_tid_requests %u s_flags 0x%x ps_flags 0x%x "\
81 "iow_flags 0x%lx s_state 0x%x s_retry %u"
82
83#define KDETH_EFLAGS_ERR_PRN "[%s] qpn 0x%x TID ERR: RcvType 0x%x " \
84 "RcvTypeError 0x%x PSN 0x%x"
85
86DECLARE_EVENT_CLASS(/* class */
87 hfi1_exp_tid_reg_unreg,
88 TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
89 unsigned long va, unsigned long pa, dma_addr_t dma),
90 TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
91 TP_STRUCT__entry(/* entry */
92 __field(unsigned int, ctxt)
93 __field(u16, subctxt)
94 __field(u32, rarr)
95 __field(u32, npages)
96 __field(unsigned long, va)
97 __field(unsigned long, pa)
98 __field(dma_addr_t, dma)
99 ),
100 TP_fast_assign(/* assign */
101 __entry->ctxt = ctxt;
102 __entry->subctxt = subctxt;
103 __entry->rarr = rarr;
104 __entry->npages = npages;
105 __entry->va = va;
106 __entry->pa = pa;
107 __entry->dma = dma;
108 ),
109 TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
110 __entry->ctxt,
111 __entry->subctxt,
112 __entry->rarr,
113 __entry->npages,
114 __entry->pa,
115 __entry->va,
116 __entry->dma
117 )
118);
119
120DEFINE_EVENT(/* exp_tid_unreg */
121 hfi1_exp_tid_reg_unreg, hfi1_exp_tid_unreg,
122 TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
123 unsigned long va, unsigned long pa, dma_addr_t dma),
124 TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma)
125);
126
127DEFINE_EVENT(/* exp_tid_reg */
128 hfi1_exp_tid_reg_unreg, hfi1_exp_tid_reg,
129 TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
130 unsigned long va, unsigned long pa, dma_addr_t dma),
131 TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma)
132);
133
134TRACE_EVENT(/* put_tid */
135 hfi1_put_tid,
136 TP_PROTO(struct hfi1_devdata *dd,
137 u32 index, u32 type, unsigned long pa, u16 order),
138 TP_ARGS(dd, index, type, pa, order),
139 TP_STRUCT__entry(/* entry */
140 DD_DEV_ENTRY(dd)
141 __field(unsigned long, pa)
142 __field(u32, index)
143 __field(u32, type)
144 __field(u16, order)
145 ),
146 TP_fast_assign(/* assign */
147 DD_DEV_ASSIGN(dd);
148 __entry->pa = pa;
149 __entry->index = index;
150 __entry->type = type;
151 __entry->order = order;
152 ),
153 TP_printk("[%s] type %s pa %lx index %u order %u",
154 __get_str(dev),
155 show_tidtype(__entry->type),
156 __entry->pa,
157 __entry->index,
158 __entry->order
159 )
160);
161
162TRACE_EVENT(/* exp_tid_inval */
163 hfi1_exp_tid_inval,
164 TP_PROTO(unsigned int ctxt, u16 subctxt, unsigned long va, u32 rarr,
165 u32 npages, dma_addr_t dma),
166 TP_ARGS(ctxt, subctxt, va, rarr, npages, dma),
167 TP_STRUCT__entry(/* entry */
168 __field(unsigned int, ctxt)
169 __field(u16, subctxt)
170 __field(unsigned long, va)
171 __field(u32, rarr)
172 __field(u32, npages)
173 __field(dma_addr_t, dma)
174 ),
175 TP_fast_assign(/* assign */
176 __entry->ctxt = ctxt;
177 __entry->subctxt = subctxt;
178 __entry->va = va;
179 __entry->rarr = rarr;
180 __entry->npages = npages;
181 __entry->dma = dma;
182 ),
183 TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx dma: 0x%llx",
184 __entry->ctxt,
185 __entry->subctxt,
186 __entry->rarr,
187 __entry->npages,
188 __entry->va,
189 __entry->dma
190 )
191);
192
193DECLARE_EVENT_CLASS(/* opfn_state */
194 hfi1_opfn_state_template,
195 TP_PROTO(struct rvt_qp *qp),
196 TP_ARGS(qp),
197 TP_STRUCT__entry(/* entry */
198 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
199 __field(u32, qpn)
200 __field(u16, requested)
201 __field(u16, completed)
202 __field(u8, curr)
203 ),
204 TP_fast_assign(/* assign */
205 struct hfi1_qp_priv *priv = qp->priv;
206
207 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
208 __entry->qpn = qp->ibqp.qp_num;
209 __entry->requested = priv->opfn.requested;
210 __entry->completed = priv->opfn.completed;
211 __entry->curr = priv->opfn.curr;
212 ),
213 TP_printk(/* print */
214 "[%s] qpn 0x%x requested 0x%x completed 0x%x curr 0x%x",
215 __get_str(dev),
216 __entry->qpn,
217 __entry->requested,
218 __entry->completed,
219 __entry->curr
220 )
221);
222
223DEFINE_EVENT(/* event */
224 hfi1_opfn_state_template, hfi1_opfn_state_conn_request,
225 TP_PROTO(struct rvt_qp *qp),
226 TP_ARGS(qp)
227);
228
229DEFINE_EVENT(/* event */
230 hfi1_opfn_state_template, hfi1_opfn_state_sched_conn_request,
231 TP_PROTO(struct rvt_qp *qp),
232 TP_ARGS(qp)
233);
234
235DEFINE_EVENT(/* event */
236 hfi1_opfn_state_template, hfi1_opfn_state_conn_response,
237 TP_PROTO(struct rvt_qp *qp),
238 TP_ARGS(qp)
239);
240
241DEFINE_EVENT(/* event */
242 hfi1_opfn_state_template, hfi1_opfn_state_conn_reply,
243 TP_PROTO(struct rvt_qp *qp),
244 TP_ARGS(qp)
245);
246
247DEFINE_EVENT(/* event */
248 hfi1_opfn_state_template, hfi1_opfn_state_conn_error,
249 TP_PROTO(struct rvt_qp *qp),
250 TP_ARGS(qp)
251);
252
253DECLARE_EVENT_CLASS(/* opfn_data */
254 hfi1_opfn_data_template,
255 TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data),
256 TP_ARGS(qp, capcode, data),
257 TP_STRUCT__entry(/* entry */
258 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
259 __field(u32, qpn)
260 __field(u32, state)
261 __field(u8, capcode)
262 __field(u64, data)
263 ),
264 TP_fast_assign(/* assign */
265 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
266 __entry->qpn = qp->ibqp.qp_num;
267 __entry->state = qp->state;
268 __entry->capcode = capcode;
269 __entry->data = data;
270 ),
271 TP_printk(/* printk */
272 "[%s] qpn 0x%x (state 0x%x) Capcode %u data 0x%llx",
273 __get_str(dev),
274 __entry->qpn,
275 __entry->state,
276 __entry->capcode,
277 __entry->data
278 )
279);
280
281DEFINE_EVENT(/* event */
282 hfi1_opfn_data_template, hfi1_opfn_data_conn_request,
283 TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data),
284 TP_ARGS(qp, capcode, data)
285);
286
287DEFINE_EVENT(/* event */
288 hfi1_opfn_data_template, hfi1_opfn_data_conn_response,
289 TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data),
290 TP_ARGS(qp, capcode, data)
291);
292
293DEFINE_EVENT(/* event */
294 hfi1_opfn_data_template, hfi1_opfn_data_conn_reply,
295 TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data),
296 TP_ARGS(qp, capcode, data)
297);
298
299DECLARE_EVENT_CLASS(/* opfn_param */
300 hfi1_opfn_param_template,
301 TP_PROTO(struct rvt_qp *qp, char remote,
302 struct tid_rdma_params *param),
303 TP_ARGS(qp, remote, param),
304 TP_STRUCT__entry(/* entry */
305 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
306 __field(u32, qpn)
307 __field(char, remote)
308 __field(u32, param_qp)
309 __field(u32, max_len)
310 __field(u16, jkey)
311 __field(u8, max_read)
312 __field(u8, max_write)
313 __field(u8, timeout)
314 __field(u8, urg)
315 ),
316 TP_fast_assign(/* assign */
317 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
318 __entry->qpn = qp->ibqp.qp_num;
319 __entry->remote = remote;
320 __entry->param_qp = param->qp;
321 __entry->max_len = param->max_len;
322 __entry->jkey = param->jkey;
323 __entry->max_read = param->max_read;
324 __entry->max_write = param->max_write;
325 __entry->timeout = param->timeout;
326 __entry->urg = param->urg;
327 ),
328 TP_printk(/* print */
329 OPFN_PARAM_PRN,
330 __get_str(dev),
331 __entry->qpn,
332 __entry->remote ? "remote" : "local",
333 __entry->param_qp,
334 __entry->max_read,
335 __entry->max_write,
336 __entry->max_len,
337 __entry->jkey,
338 __entry->timeout,
339 __entry->urg
340 )
341);
342
343DEFINE_EVENT(/* event */
344 hfi1_opfn_param_template, hfi1_opfn_param,
345 TP_PROTO(struct rvt_qp *qp, char remote,
346 struct tid_rdma_params *param),
347 TP_ARGS(qp, remote, param)
348);
349
350DECLARE_EVENT_CLASS(/* msg */
351 hfi1_msg_template,
352 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
353 TP_ARGS(qp, msg, more),
354 TP_STRUCT__entry(/* entry */
355 __field(u32, qpn)
356 __string(msg, msg)
357 __field(u64, more)
358 ),
359 TP_fast_assign(/* assign */
360 __entry->qpn = qp ? qp->ibqp.qp_num : 0;
361 __assign_str(msg, msg);
362 __entry->more = more;
363 ),
364 TP_printk(/* print */
365 "qpn 0x%x %s 0x%llx",
366 __entry->qpn,
367 __get_str(msg),
368 __entry->more
369 )
370);
371
372DEFINE_EVENT(/* event */
373 hfi1_msg_template, hfi1_msg_opfn_conn_request,
374 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
375 TP_ARGS(qp, msg, more)
376);
377
378DEFINE_EVENT(/* event */
379 hfi1_msg_template, hfi1_msg_opfn_conn_error,
380 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
381 TP_ARGS(qp, msg, more)
382);
383
384DEFINE_EVENT(/* event */
385 hfi1_msg_template, hfi1_msg_alloc_tids,
386 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
387 TP_ARGS(qp, msg, more)
388);
389
390DEFINE_EVENT(/* event */
391 hfi1_msg_template, hfi1_msg_tid_restart_req,
392 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
393 TP_ARGS(qp, msg, more)
394);
395
396DEFINE_EVENT(/* event */
397 hfi1_msg_template, hfi1_msg_handle_kdeth_eflags,
398 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
399 TP_ARGS(qp, msg, more)
400);
401
402DEFINE_EVENT(/* event */
403 hfi1_msg_template, hfi1_msg_tid_timeout,
404 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
405 TP_ARGS(qp, msg, more)
406);
407
408DEFINE_EVENT(/* event */
409 hfi1_msg_template, hfi1_msg_tid_retry_timeout,
410 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
411 TP_ARGS(qp, msg, more)
412);
413
414DECLARE_EVENT_CLASS(/* tid_flow_page */
415 hfi1_tid_flow_page_template,
416 TP_PROTO(struct rvt_qp *qp, struct tid_rdma_flow *flow, u32 index,
417 char mtu8k, char v1, void *vaddr),
418 TP_ARGS(qp, flow, index, mtu8k, v1, vaddr),
419 TP_STRUCT__entry(/* entry */
420 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
421 __field(u32, qpn)
422 __field(char, mtu8k)
423 __field(char, v1)
424 __field(u32, index)
425 __field(u64, page)
426 __field(u64, vaddr)
427 ),
428 TP_fast_assign(/* assign */
429 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
430 __entry->qpn = qp->ibqp.qp_num;
431 __entry->mtu8k = mtu8k;
432 __entry->v1 = v1;
433 __entry->index = index;
434 __entry->page = vaddr ? (u64)virt_to_page(vaddr) : 0ULL;
435 __entry->vaddr = (u64)vaddr;
436 ),
437 TP_printk(/* print */
438 "[%s] qpn 0x%x page[%u]: page 0x%llx %s 0x%llx",
439 __get_str(dev),
440 __entry->qpn,
441 __entry->index,
442 __entry->page,
443 __entry->mtu8k ? (__entry->v1 ? "v1" : "v0") : "vaddr",
444 __entry->vaddr
445 )
446);
447
448DEFINE_EVENT(/* event */
449 hfi1_tid_flow_page_template, hfi1_tid_flow_page,
450 TP_PROTO(struct rvt_qp *qp, struct tid_rdma_flow *flow, u32 index,
451 char mtu8k, char v1, void *vaddr),
452 TP_ARGS(qp, flow, index, mtu8k, v1, vaddr)
453);
454
455DECLARE_EVENT_CLASS(/* tid_pageset */
456 hfi1_tid_pageset_template,
457 TP_PROTO(struct rvt_qp *qp, u32 index, u16 idx, u16 count),
458 TP_ARGS(qp, index, idx, count),
459 TP_STRUCT__entry(/* entry */
460 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
461 __field(u32, qpn)
462 __field(u32, index)
463 __field(u16, idx)
464 __field(u16, count)
465 ),
466 TP_fast_assign(/* assign */
467 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
468 __entry->qpn = qp->ibqp.qp_num;
469 __entry->index = index;
470 __entry->idx = idx;
471 __entry->count = count;
472 ),
473 TP_printk(/* print */
474 "[%s] qpn 0x%x list[%u]: idx %u count %u",
475 __get_str(dev),
476 __entry->qpn,
477 __entry->index,
478 __entry->idx,
479 __entry->count
480 )
481);
482
483DEFINE_EVENT(/* event */
484 hfi1_tid_pageset_template, hfi1_tid_pageset,
485 TP_PROTO(struct rvt_qp *qp, u32 index, u16 idx, u16 count),
486 TP_ARGS(qp, index, idx, count)
487);
488
489DECLARE_EVENT_CLASS(/* tid_fow */
490 hfi1_tid_flow_template,
491 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
492 TP_ARGS(qp, index, flow),
493 TP_STRUCT__entry(/* entry */
494 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
495 __field(u32, qpn)
496 __field(int, index)
497 __field(int, idx)
498 __field(u32, resp_ib_psn)
499 __field(u32, generation)
500 __field(u32, fspsn)
501 __field(u32, flpsn)
502 __field(u32, r_next_psn)
503 __field(u32, ib_spsn)
504 __field(u32, ib_lpsn)
505 __field(u32, npagesets)
506 __field(u32, tnode_cnt)
507 __field(u32, tidcnt)
508 __field(u32, tid_idx)
509 __field(u32, tid_offset)
510 __field(u32, length)
511 __field(u32, sent)
512 ),
513 TP_fast_assign(/* assign */
514 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
515 __entry->qpn = qp->ibqp.qp_num;
516 __entry->index = index;
517 __entry->idx = flow->idx;
518 __entry->resp_ib_psn = flow->flow_state.resp_ib_psn;
519 __entry->generation = flow->flow_state.generation;
520 __entry->fspsn = full_flow_psn(flow,
521 flow->flow_state.spsn);
522 __entry->flpsn = full_flow_psn(flow,
523 flow->flow_state.lpsn);
524 __entry->r_next_psn = flow->flow_state.r_next_psn;
525 __entry->ib_spsn = flow->flow_state.ib_spsn;
526 __entry->ib_lpsn = flow->flow_state.ib_lpsn;
527 __entry->npagesets = flow->npagesets;
528 __entry->tnode_cnt = flow->tnode_cnt;
529 __entry->tidcnt = flow->tidcnt;
530 __entry->tid_idx = flow->tid_idx;
531 __entry->tid_offset = flow->tid_offset;
532 __entry->length = flow->length;
533 __entry->sent = flow->sent;
534 ),
535 TP_printk(/* print */
536 TID_FLOW_PRN,
537 __get_str(dev),
538 __entry->qpn,
539 __entry->index,
540 __entry->idx,
541 __entry->resp_ib_psn,
542 __entry->generation,
543 __entry->fspsn,
544 __entry->flpsn,
545 __entry->r_next_psn,
546 __entry->ib_spsn,
547 __entry->ib_lpsn,
548 __entry->npagesets,
549 __entry->tnode_cnt,
550 __entry->tidcnt,
551 __entry->tid_idx,
552 __entry->tid_offset,
553 __entry->length,
554 __entry->sent
555 )
556);
557
558DEFINE_EVENT(/* event */
559 hfi1_tid_flow_template, hfi1_tid_flow_alloc,
560 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
561 TP_ARGS(qp, index, flow)
562);
563
564DEFINE_EVENT(/* event */
565 hfi1_tid_flow_template, hfi1_tid_flow_build_read_pkt,
566 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
567 TP_ARGS(qp, index, flow)
568);
569
570DEFINE_EVENT(/* event */
571 hfi1_tid_flow_template, hfi1_tid_flow_build_read_resp,
572 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
573 TP_ARGS(qp, index, flow)
574);
575
576DEFINE_EVENT(/* event */
577 hfi1_tid_flow_template, hfi1_tid_flow_rcv_read_req,
578 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
579 TP_ARGS(qp, index, flow)
580);
581
582DEFINE_EVENT(/* event */
583 hfi1_tid_flow_template, hfi1_tid_flow_rcv_read_resp,
584 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
585 TP_ARGS(qp, index, flow)
586);
587
588DEFINE_EVENT(/* event */
589 hfi1_tid_flow_template, hfi1_tid_flow_restart_req,
590 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
591 TP_ARGS(qp, index, flow)
592);
593
594DEFINE_EVENT(/* event */
595 hfi1_tid_flow_template, hfi1_tid_flow_build_write_resp,
596 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
597 TP_ARGS(qp, index, flow)
598);
599
600DEFINE_EVENT(/* event */
601 hfi1_tid_flow_template, hfi1_tid_flow_rcv_write_resp,
602 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
603 TP_ARGS(qp, index, flow)
604);
605
606DEFINE_EVENT(/* event */
607 hfi1_tid_flow_template, hfi1_tid_flow_build_write_data,
608 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
609 TP_ARGS(qp, index, flow)
610);
611
612DEFINE_EVENT(/* event */
613 hfi1_tid_flow_template, hfi1_tid_flow_rcv_tid_ack,
614 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
615 TP_ARGS(qp, index, flow)
616);
617
618DEFINE_EVENT(/* event */
619 hfi1_tid_flow_template, hfi1_tid_flow_rcv_resync,
620 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
621 TP_ARGS(qp, index, flow)
622);
623
624DEFINE_EVENT(/* event */
625 hfi1_tid_flow_template, hfi1_tid_flow_handle_kdeth_eflags,
626 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
627 TP_ARGS(qp, index, flow)
628);
629
630DEFINE_EVENT(/* event */
631 hfi1_tid_flow_template, hfi1_tid_flow_read_kdeth_eflags,
632 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
633 TP_ARGS(qp, index, flow)
634);
635
636DECLARE_EVENT_CLASS(/* tid_node */
637 hfi1_tid_node_template,
638 TP_PROTO(struct rvt_qp *qp, const char *msg, u32 index, u32 base,
639 u8 map, u8 used, u8 cnt),
640 TP_ARGS(qp, msg, index, base, map, used, cnt),
641 TP_STRUCT__entry(/* entry */
642 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
643 __field(u32, qpn)
644 __string(msg, msg)
645 __field(u32, index)
646 __field(u32, base)
647 __field(u8, map)
648 __field(u8, used)
649 __field(u8, cnt)
650 ),
651 TP_fast_assign(/* assign */
652 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
653 __entry->qpn = qp->ibqp.qp_num;
654 __assign_str(msg, msg);
655 __entry->index = index;
656 __entry->base = base;
657 __entry->map = map;
658 __entry->used = used;
659 __entry->cnt = cnt;
660 ),
661 TP_printk(/* print */
662 TID_NODE_PRN,
663 __get_str(dev),
664 __entry->qpn,
665 __get_str(msg),
666 __entry->index,
667 __entry->base,
668 __entry->map,
669 __entry->used,
670 __entry->cnt
671 )
672);
673
674DEFINE_EVENT(/* event */
675 hfi1_tid_node_template, hfi1_tid_node_add,
676 TP_PROTO(struct rvt_qp *qp, const char *msg, u32 index, u32 base,
677 u8 map, u8 used, u8 cnt),
678 TP_ARGS(qp, msg, index, base, map, used, cnt)
679);
680
681DECLARE_EVENT_CLASS(/* tid_entry */
682 hfi1_tid_entry_template,
683 TP_PROTO(struct rvt_qp *qp, int index, u32 ent),
684 TP_ARGS(qp, index, ent),
685 TP_STRUCT__entry(/* entry */
686 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
687 __field(u32, qpn)
688 __field(int, index)
689 __field(u8, ctrl)
690 __field(u16, idx)
691 __field(u16, len)
692 ),
693 TP_fast_assign(/* assign */
694 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
695 __entry->qpn = qp->ibqp.qp_num;
696 __entry->index = index;
697 __entry->ctrl = hfi1_trace_get_tid_ctrl(ent);
698 __entry->idx = hfi1_trace_get_tid_idx(ent);
699 __entry->len = hfi1_trace_get_tid_len(ent);
700 ),
701 TP_printk(/* print */
702 "[%s] qpn 0x%x TID entry %d: idx %u len %u ctrl 0x%x",
703 __get_str(dev),
704 __entry->qpn,
705 __entry->index,
706 __entry->idx,
707 __entry->len,
708 __entry->ctrl
709 )
710);
711
712DEFINE_EVENT(/* event */
713 hfi1_tid_entry_template, hfi1_tid_entry_alloc,
714 TP_PROTO(struct rvt_qp *qp, int index, u32 entry),
715 TP_ARGS(qp, index, entry)
716);
717
718DEFINE_EVENT(/* event */
719 hfi1_tid_entry_template, hfi1_tid_entry_build_read_resp,
720 TP_PROTO(struct rvt_qp *qp, int index, u32 ent),
721 TP_ARGS(qp, index, ent)
722);
723
724DEFINE_EVENT(/* event */
725 hfi1_tid_entry_template, hfi1_tid_entry_rcv_read_req,
726 TP_PROTO(struct rvt_qp *qp, int index, u32 ent),
727 TP_ARGS(qp, index, ent)
728);
729
730DEFINE_EVENT(/* event */
731 hfi1_tid_entry_template, hfi1_tid_entry_rcv_write_resp,
732 TP_PROTO(struct rvt_qp *qp, int index, u32 entry),
733 TP_ARGS(qp, index, entry)
734);
735
736DEFINE_EVENT(/* event */
737 hfi1_tid_entry_template, hfi1_tid_entry_build_write_data,
738 TP_PROTO(struct rvt_qp *qp, int index, u32 entry),
739 TP_ARGS(qp, index, entry)
740);
741
742DECLARE_EVENT_CLASS(/* rsp_info */
743 hfi1_responder_info_template,
744 TP_PROTO(struct rvt_qp *qp, u32 psn),
745 TP_ARGS(qp, psn),
746 TP_STRUCT__entry(/* entry */
747 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
748 __field(u32, qpn)
749 __field(u8, state)
750 __field(u8, s_state)
751 __field(u32, psn)
752 __field(u32, r_psn)
753 __field(u8, r_state)
754 __field(u8, r_flags)
755 __field(u8, r_head_ack_queue)
756 __field(u8, s_tail_ack_queue)
757 __field(u8, s_acked_ack_queue)
758 __field(u8, s_ack_state)
759 __field(u8, s_nak_state)
760 __field(u8, r_nak_state)
761 __field(u32, s_flags)
762 __field(u32, ps_flags)
763 __field(unsigned long, iow_flags)
764 ),
765 TP_fast_assign(/* assign */
766 struct hfi1_qp_priv *priv = qp->priv;
767
768 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
769 __entry->qpn = qp->ibqp.qp_num;
770 __entry->state = qp->state;
771 __entry->s_state = qp->s_state;
772 __entry->psn = psn;
773 __entry->r_psn = qp->r_psn;
774 __entry->r_state = qp->r_state;
775 __entry->r_flags = qp->r_flags;
776 __entry->r_head_ack_queue = qp->r_head_ack_queue;
777 __entry->s_tail_ack_queue = qp->s_tail_ack_queue;
778 __entry->s_acked_ack_queue = qp->s_acked_ack_queue;
779 __entry->s_ack_state = qp->s_ack_state;
780 __entry->s_nak_state = qp->s_nak_state;
781 __entry->s_flags = qp->s_flags;
782 __entry->ps_flags = priv->s_flags;
783 __entry->iow_flags = priv->s_iowait.flags;
784 ),
785 TP_printk(/* print */
786 RSP_INFO_PRN,
787 __get_str(dev),
788 __entry->qpn,
789 __entry->state,
790 __entry->s_state,
791 __entry->psn,
792 __entry->r_psn,
793 __entry->r_state,
794 __entry->r_flags,
795 __entry->r_head_ack_queue,
796 __entry->s_tail_ack_queue,
797 __entry->s_acked_ack_queue,
798 __entry->s_ack_state,
799 __entry->s_nak_state,
800 __entry->s_flags,
801 __entry->ps_flags,
802 __entry->iow_flags
803 )
804);
805
806DEFINE_EVENT(/* event */
807 hfi1_responder_info_template, hfi1_rsp_make_rc_ack,
808 TP_PROTO(struct rvt_qp *qp, u32 psn),
809 TP_ARGS(qp, psn)
810);
811
812DEFINE_EVENT(/* event */
813 hfi1_responder_info_template, hfi1_rsp_rcv_tid_read_req,
814 TP_PROTO(struct rvt_qp *qp, u32 psn),
815 TP_ARGS(qp, psn)
816);
817
818DEFINE_EVENT(/* event */
819 hfi1_responder_info_template, hfi1_rsp_tid_rcv_error,
820 TP_PROTO(struct rvt_qp *qp, u32 psn),
821 TP_ARGS(qp, psn)
822);
823
824DEFINE_EVENT(/* event */
825 hfi1_responder_info_template, hfi1_rsp_tid_write_alloc_res,
826 TP_PROTO(struct rvt_qp *qp, u32 psn),
827 TP_ARGS(qp, psn)
828);
829
830DEFINE_EVENT(/* event */
831 hfi1_responder_info_template, hfi1_rsp_rcv_tid_write_req,
832 TP_PROTO(struct rvt_qp *qp, u32 psn),
833 TP_ARGS(qp, psn)
834);
835
836DEFINE_EVENT(/* event */
837 hfi1_responder_info_template, hfi1_rsp_build_tid_write_resp,
838 TP_PROTO(struct rvt_qp *qp, u32 psn),
839 TP_ARGS(qp, psn)
840);
841
842DEFINE_EVENT(/* event */
843 hfi1_responder_info_template, hfi1_rsp_rcv_tid_write_data,
844 TP_PROTO(struct rvt_qp *qp, u32 psn),
845 TP_ARGS(qp, psn)
846);
847
848DEFINE_EVENT(/* event */
849 hfi1_responder_info_template, hfi1_rsp_make_tid_ack,
850 TP_PROTO(struct rvt_qp *qp, u32 psn),
851 TP_ARGS(qp, psn)
852);
853
854DEFINE_EVENT(/* event */
855 hfi1_responder_info_template, hfi1_rsp_handle_kdeth_eflags,
856 TP_PROTO(struct rvt_qp *qp, u32 psn),
857 TP_ARGS(qp, psn)
858);
859
860DEFINE_EVENT(/* event */
861 hfi1_responder_info_template, hfi1_rsp_read_kdeth_eflags,
862 TP_PROTO(struct rvt_qp *qp, u32 psn),
863 TP_ARGS(qp, psn)
864);
865
866DECLARE_EVENT_CLASS(/* sender_info */
867 hfi1_sender_info_template,
868 TP_PROTO(struct rvt_qp *qp),
869 TP_ARGS(qp),
870 TP_STRUCT__entry(/* entry */
871 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
872 __field(u32, qpn)
873 __field(u8, state)
874 __field(u32, s_cur)
875 __field(u32, s_tail)
876 __field(u32, s_head)
877 __field(u32, s_acked)
878 __field(u32, s_last)
879 __field(u32, s_psn)
880 __field(u32, s_last_psn)
881 __field(u32, s_flags)
882 __field(u32, ps_flags)
883 __field(unsigned long, iow_flags)
884 __field(u8, s_state)
885 __field(u8, s_num_rd)
886 __field(u8, s_retry)
887 ),
888 TP_fast_assign(/* assign */
889 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
890 __entry->qpn = qp->ibqp.qp_num;
891 __entry->state = qp->state;
892 __entry->s_cur = qp->s_cur;
893 __entry->s_tail = qp->s_tail;
894 __entry->s_head = qp->s_head;
895 __entry->s_acked = qp->s_acked;
896 __entry->s_last = qp->s_last;
897 __entry->s_psn = qp->s_psn;
898 __entry->s_last_psn = qp->s_last_psn;
899 __entry->s_flags = qp->s_flags;
900 __entry->ps_flags = ((struct hfi1_qp_priv *)qp->priv)->s_flags;
901 __entry->iow_flags =
902 ((struct hfi1_qp_priv *)qp->priv)->s_iowait.flags;
903 __entry->s_state = qp->s_state;
904 __entry->s_num_rd = qp->s_num_rd_atomic;
905 __entry->s_retry = qp->s_retry;
906 ),
907 TP_printk(/* print */
908 SENDER_INFO_PRN,
909 __get_str(dev),
910 __entry->qpn,
911 __entry->state,
912 __entry->s_cur,
913 __entry->s_tail,
914 __entry->s_head,
915 __entry->s_acked,
916 __entry->s_last,
917 __entry->s_psn,
918 __entry->s_last_psn,
919 __entry->s_flags,
920 __entry->ps_flags,
921 __entry->iow_flags,
922 __entry->s_state,
923 __entry->s_num_rd,
924 __entry->s_retry
925 )
926);
927
928DEFINE_EVENT(/* event */
929 hfi1_sender_info_template, hfi1_sender_make_rc_req,
930 TP_PROTO(struct rvt_qp *qp),
931 TP_ARGS(qp)
932);
933
934DEFINE_EVENT(/* event */
935 hfi1_sender_info_template, hfi1_sender_reset_psn,
936 TP_PROTO(struct rvt_qp *qp),
937 TP_ARGS(qp)
938);
939
940DEFINE_EVENT(/* event */
941 hfi1_sender_info_template, hfi1_sender_restart_rc,
942 TP_PROTO(struct rvt_qp *qp),
943 TP_ARGS(qp)
944);
945
946DEFINE_EVENT(/* event */
947 hfi1_sender_info_template, hfi1_sender_do_rc_ack,
948 TP_PROTO(struct rvt_qp *qp),
949 TP_ARGS(qp)
950);
951
952DEFINE_EVENT(/* event */
953 hfi1_sender_info_template, hfi1_sender_rcv_tid_read_resp,
954 TP_PROTO(struct rvt_qp *qp),
955 TP_ARGS(qp)
956);
957
958DEFINE_EVENT(/* event */
959 hfi1_sender_info_template, hfi1_sender_rcv_tid_ack,
960 TP_PROTO(struct rvt_qp *qp),
961 TP_ARGS(qp)
962);
963
964DEFINE_EVENT(/* event */
965 hfi1_sender_info_template, hfi1_sender_make_tid_pkt,
966 TP_PROTO(struct rvt_qp *qp),
967 TP_ARGS(qp)
968);
969
970DEFINE_EVENT(/* event */
971 hfi1_sender_info_template, hfi1_sender_read_kdeth_eflags,
972 TP_PROTO(struct rvt_qp *qp),
973 TP_ARGS(qp)
974);
975
976DECLARE_EVENT_CLASS(/* tid_read_sender */
977 hfi1_tid_read_sender_template,
978 TP_PROTO(struct rvt_qp *qp, char newreq),
979 TP_ARGS(qp, newreq),
980 TP_STRUCT__entry(/* entry */
981 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
982 __field(u32, qpn)
983 __field(char, newreq)
984 __field(u32, tid_r_reqs)
985 __field(u32, tid_r_comp)
986 __field(u32, pending_tid_r_segs)
987 __field(u32, s_flags)
988 __field(u32, ps_flags)
989 __field(unsigned long, iow_flags)
990 __field(u8, s_state)
991 __field(u32, hw_flow_index)
992 __field(u32, generation)
993 __field(u32, fpsn)
994 ),
995 TP_fast_assign(/* assign */
996 struct hfi1_qp_priv *priv = qp->priv;
997
998 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
999 __entry->qpn = qp->ibqp.qp_num;
1000 __entry->newreq = newreq;
1001 __entry->tid_r_reqs = priv->tid_r_reqs;
1002 __entry->tid_r_comp = priv->tid_r_comp;
1003 __entry->pending_tid_r_segs = priv->pending_tid_r_segs;
1004 __entry->s_flags = qp->s_flags;
1005 __entry->ps_flags = priv->s_flags;
1006 __entry->iow_flags = priv->s_iowait.flags;
1007 __entry->s_state = priv->s_state;
1008 __entry->hw_flow_index = priv->flow_state.index;
1009 __entry->generation = priv->flow_state.generation;
1010 __entry->fpsn = priv->flow_state.psn;
1011 ),
1012 TP_printk(/* print */
1013 TID_READ_SENDER_PRN,
1014 __get_str(dev),
1015 __entry->qpn,
1016 __entry->newreq,
1017 __entry->tid_r_reqs,
1018 __entry->tid_r_comp,
1019 __entry->pending_tid_r_segs,
1020 __entry->s_flags,
1021 __entry->ps_flags,
1022 __entry->iow_flags,
1023 __entry->s_state,
1024 __entry->hw_flow_index,
1025 __entry->generation,
1026 __entry->fpsn
1027 )
1028);
1029
1030DEFINE_EVENT(/* event */
1031 hfi1_tid_read_sender_template, hfi1_tid_read_sender_make_req,
1032 TP_PROTO(struct rvt_qp *qp, char newreq),
1033 TP_ARGS(qp, newreq)
1034);
1035
1036DEFINE_EVENT(/* event */
1037 hfi1_tid_read_sender_template, hfi1_tid_read_sender_kdeth_eflags,
1038 TP_PROTO(struct rvt_qp *qp, char newreq),
1039 TP_ARGS(qp, newreq)
1040);
1041
1042DECLARE_EVENT_CLASS(/* tid_rdma_request */
1043 hfi1_tid_rdma_request_template,
1044 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1045 struct tid_rdma_request *req),
1046 TP_ARGS(qp, newreq, opcode, psn, lpsn, req),
1047 TP_STRUCT__entry(/* entry */
1048 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
1049 __field(u32, qpn)
1050 __field(char, newreq)
1051 __field(u8, opcode)
1052 __field(u32, psn)
1053 __field(u32, lpsn)
1054 __field(u32, cur_seg)
1055 __field(u32, comp_seg)
1056 __field(u32, ack_seg)
1057 __field(u32, alloc_seg)
1058 __field(u32, total_segs)
1059 __field(u16, setup_head)
1060 __field(u16, clear_tail)
1061 __field(u16, flow_idx)
1062 __field(u16, acked_tail)
1063 __field(u32, state)
1064 __field(u32, r_ack_psn)
1065 __field(u32, r_flow_psn)
1066 __field(u32, r_last_acked)
1067 __field(u32, s_next_psn)
1068 ),
1069 TP_fast_assign(/* assign */
1070 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
1071 __entry->qpn = qp->ibqp.qp_num;
1072 __entry->newreq = newreq;
1073 __entry->opcode = opcode;
1074 __entry->psn = psn;
1075 __entry->lpsn = lpsn;
1076 __entry->cur_seg = req->cur_seg;
1077 __entry->comp_seg = req->comp_seg;
1078 __entry->ack_seg = req->ack_seg;
1079 __entry->alloc_seg = req->alloc_seg;
1080 __entry->total_segs = req->total_segs;
1081 __entry->setup_head = req->setup_head;
1082 __entry->clear_tail = req->clear_tail;
1083 __entry->flow_idx = req->flow_idx;
1084 __entry->acked_tail = req->acked_tail;
1085 __entry->state = req->state;
1086 __entry->r_ack_psn = req->r_ack_psn;
1087 __entry->r_flow_psn = req->r_flow_psn;
1088 __entry->r_last_acked = req->r_last_acked;
1089 __entry->s_next_psn = req->s_next_psn;
1090 ),
1091 TP_printk(/* print */
1092 TID_REQ_PRN,
1093 __get_str(dev),
1094 __entry->qpn,
1095 __entry->newreq,
1096 __entry->opcode,
1097 __entry->psn,
1098 __entry->lpsn,
1099 __entry->cur_seg,
1100 __entry->comp_seg,
1101 __entry->ack_seg,
1102 __entry->alloc_seg,
1103 __entry->total_segs,
1104 __entry->setup_head,
1105 __entry->clear_tail,
1106 __entry->flow_idx,
1107 __entry->acked_tail,
1108 __entry->state,
1109 __entry->r_ack_psn,
1110 __entry->r_flow_psn,
1111 __entry->r_last_acked,
1112 __entry->s_next_psn
1113 )
1114);
1115
1116DEFINE_EVENT(/* event */
1117 hfi1_tid_rdma_request_template, hfi1_tid_req_make_req_read,
1118 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1119 struct tid_rdma_request *req),
1120 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1121);
1122
1123DEFINE_EVENT(/* event */
1124 hfi1_tid_rdma_request_template, hfi1_tid_req_build_read_req,
1125 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1126 struct tid_rdma_request *req),
1127 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1128);
1129
1130DEFINE_EVENT(/* event */
1131 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_read_req,
1132 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1133 struct tid_rdma_request *req),
1134 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1135);
1136
1137DEFINE_EVENT(/* event */
1138 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_read_resp,
1139 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1140 struct tid_rdma_request *req),
1141 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1142);
1143
1144DEFINE_EVENT(/* event */
1145 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_err,
1146 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1147 struct tid_rdma_request *req),
1148 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1149);
1150
1151DEFINE_EVENT(/* event */
1152 hfi1_tid_rdma_request_template, hfi1_tid_req_restart_req,
1153 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1154 struct tid_rdma_request *req),
1155 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1156);
1157
1158DEFINE_EVENT(/* event */
1159 hfi1_tid_rdma_request_template, hfi1_tid_req_setup_tid_wqe,
1160 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1161 struct tid_rdma_request *req),
1162 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1163);
1164
1165DEFINE_EVENT(/* event */
1166 hfi1_tid_rdma_request_template, hfi1_tid_req_write_alloc_res,
1167 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1168 struct tid_rdma_request *req),
1169 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1170);
1171
1172DEFINE_EVENT(/* event */
1173 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_write_req,
1174 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1175 struct tid_rdma_request *req),
1176 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1177);
1178
1179DEFINE_EVENT(/* event */
1180 hfi1_tid_rdma_request_template, hfi1_tid_req_build_write_resp,
1181 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1182 struct tid_rdma_request *req),
1183 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1184);
1185
1186DEFINE_EVENT(/* event */
1187 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_write_resp,
1188 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1189 struct tid_rdma_request *req),
1190 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1191);
1192
1193DEFINE_EVENT(/* event */
1194 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_write_data,
1195 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1196 struct tid_rdma_request *req),
1197 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1198);
1199
1200DEFINE_EVENT(/* event */
1201 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_tid_ack,
1202 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1203 struct tid_rdma_request *req),
1204 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1205);
1206
1207DEFINE_EVENT(/* event */
1208 hfi1_tid_rdma_request_template, hfi1_tid_req_tid_retry_timeout,
1209 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1210 struct tid_rdma_request *req),
1211 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1212);
1213
1214DEFINE_EVENT(/* event */
1215 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_resync,
1216 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1217 struct tid_rdma_request *req),
1218 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1219);
1220
1221DEFINE_EVENT(/* event */
1222 hfi1_tid_rdma_request_template, hfi1_tid_req_make_tid_pkt,
1223 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1224 struct tid_rdma_request *req),
1225 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1226);
1227
1228DEFINE_EVENT(/* event */
1229 hfi1_tid_rdma_request_template, hfi1_tid_req_make_tid_ack,
1230 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1231 struct tid_rdma_request *req),
1232 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1233);
1234
1235DEFINE_EVENT(/* event */
1236 hfi1_tid_rdma_request_template, hfi1_tid_req_handle_kdeth_eflags,
1237 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1238 struct tid_rdma_request *req),
1239 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1240);
1241
1242DEFINE_EVENT(/* event */
1243 hfi1_tid_rdma_request_template, hfi1_tid_req_read_kdeth_eflags,
1244 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1245 struct tid_rdma_request *req),
1246 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1247);
1248
1249DEFINE_EVENT(/* event */
1250 hfi1_tid_rdma_request_template, hfi1_tid_req_make_rc_ack_write,
1251 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1252 struct tid_rdma_request *req),
1253 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1254);
1255
1256DEFINE_EVENT(/* event */
1257 hfi1_tid_rdma_request_template, hfi1_tid_req_make_req_write,
1258 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1259 struct tid_rdma_request *req),
1260 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1261);
1262
1263DEFINE_EVENT(/* event */
1264 hfi1_tid_rdma_request_template, hfi1_tid_req_update_num_rd_atomic,
1265 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1266 struct tid_rdma_request *req),
1267 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1268);
1269
1270DECLARE_EVENT_CLASS(/* rc_rcv_err */
1271 hfi1_rc_rcv_err_template,
1272 TP_PROTO(struct rvt_qp *qp, u32 opcode, u32 psn, int diff),
1273 TP_ARGS(qp, opcode, psn, diff),
1274 TP_STRUCT__entry(/* entry */
1275 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
1276 __field(u32, qpn)
1277 __field(u32, s_flags)
1278 __field(u8, state)
1279 __field(u8, s_acked_ack_queue)
1280 __field(u8, s_tail_ack_queue)
1281 __field(u8, r_head_ack_queue)
1282 __field(u32, opcode)
1283 __field(u32, psn)
1284 __field(u32, r_psn)
1285 __field(int, diff)
1286 ),
1287 TP_fast_assign(/* assign */
1288 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
1289 __entry->qpn = qp->ibqp.qp_num;
1290 __entry->s_flags = qp->s_flags;
1291 __entry->state = qp->state;
1292 __entry->s_acked_ack_queue = qp->s_acked_ack_queue;
1293 __entry->s_tail_ack_queue = qp->s_tail_ack_queue;
1294 __entry->r_head_ack_queue = qp->r_head_ack_queue;
1295 __entry->opcode = opcode;
1296 __entry->psn = psn;
1297 __entry->r_psn = qp->r_psn;
1298 __entry->diff = diff;
1299 ),
1300 TP_printk(/* print */
1301 RCV_ERR_PRN,
1302 __get_str(dev),
1303 __entry->qpn,
1304 __entry->s_flags,
1305 __entry->state,
1306 __entry->s_acked_ack_queue,
1307 __entry->s_tail_ack_queue,
1308 __entry->r_head_ack_queue,
1309 __entry->opcode,
1310 __entry->psn,
1311 __entry->r_psn,
1312 __entry->diff
1313 )
1314);
1315
1316DEFINE_EVENT(/* event */
1317 hfi1_rc_rcv_err_template, hfi1_tid_rdma_rcv_err,
1318 TP_PROTO(struct rvt_qp *qp, u32 opcode, u32 psn, int diff),
1319 TP_ARGS(qp, opcode, psn, diff)
1320);
1321
1322DECLARE_EVENT_CLASS(/* sge */
1323 hfi1_sge_template,
1324 TP_PROTO(struct rvt_qp *qp, int index, struct rvt_sge *sge),
1325 TP_ARGS(qp, index, sge),
1326 TP_STRUCT__entry(/* entry */
1327 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
1328 __field(u32, qpn)
1329 __field(int, index)
1330 __field(u64, vaddr)
1331 __field(u32, sge_length)
1332 ),
1333 TP_fast_assign(/* assign */
1334 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
1335 __entry->qpn = qp->ibqp.qp_num;
1336 __entry->index = index;
1337 __entry->vaddr = (u64)sge->vaddr;
1338 __entry->sge_length = sge->sge_length;
1339 ),
1340 TP_printk(/* print */
1341 "[%s] qpn 0x%x sge %d: vaddr 0x%llx sge_length %u",
1342 __get_str(dev),
1343 __entry->qpn,
1344 __entry->index,
1345 __entry->vaddr,
1346 __entry->sge_length
1347 )
1348);
1349
1350DEFINE_EVENT(/* event */
1351 hfi1_sge_template, hfi1_sge_check_align,
1352 TP_PROTO(struct rvt_qp *qp, int index, struct rvt_sge *sge),
1353 TP_ARGS(qp, index, sge)
1354);
1355
1356DECLARE_EVENT_CLASS(/* tid_write_sp */
1357 hfi1_tid_write_rsp_template,
1358 TP_PROTO(struct rvt_qp *qp),
1359 TP_ARGS(qp),
1360 TP_STRUCT__entry(/* entry */
1361 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
1362 __field(u32, qpn)
1363 __field(u32, r_tid_head)
1364 __field(u32, r_tid_tail)
1365 __field(u32, r_tid_ack)
1366 __field(u32, r_tid_alloc)
1367 __field(u32, alloc_w_segs)
1368 __field(u32, pending_tid_w_segs)
1369 __field(bool, sync_pt)
1370 __field(u32, ps_nak_psn)
1371 __field(u8, ps_nak_state)
1372 __field(u8, prnr_nak_state)
1373 __field(u32, hw_flow_index)
1374 __field(u32, generation)
1375 __field(u32, fpsn)
1376 __field(bool, resync)
1377 __field(u32, r_next_psn_kdeth)
1378 ),
1379 TP_fast_assign(/* assign */
1380 struct hfi1_qp_priv *priv = qp->priv;
1381
1382 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
1383 __entry->qpn = qp->ibqp.qp_num;
1384 __entry->r_tid_head = priv->r_tid_head;
1385 __entry->r_tid_tail = priv->r_tid_tail;
1386 __entry->r_tid_ack = priv->r_tid_ack;
1387 __entry->r_tid_alloc = priv->r_tid_alloc;
1388 __entry->alloc_w_segs = priv->alloc_w_segs;
1389 __entry->pending_tid_w_segs = priv->pending_tid_w_segs;
1390 __entry->sync_pt = priv->sync_pt;
1391 __entry->ps_nak_psn = priv->s_nak_psn;
1392 __entry->ps_nak_state = priv->s_nak_state;
1393 __entry->prnr_nak_state = priv->rnr_nak_state;
1394 __entry->hw_flow_index = priv->flow_state.index;
1395 __entry->generation = priv->flow_state.generation;
1396 __entry->fpsn = priv->flow_state.psn;
1397 __entry->resync = priv->resync;
1398 __entry->r_next_psn_kdeth = priv->r_next_psn_kdeth;
1399 ),
1400 TP_printk(/* print */
1401 TID_WRITE_RSPDR_PRN,
1402 __get_str(dev),
1403 __entry->qpn,
1404 __entry->r_tid_head,
1405 __entry->r_tid_tail,
1406 __entry->r_tid_ack,
1407 __entry->r_tid_alloc,
1408 __entry->alloc_w_segs,
1409 __entry->pending_tid_w_segs,
1410 __entry->sync_pt ? "yes" : "no",
1411 __entry->ps_nak_psn,
1412 __entry->ps_nak_state,
1413 __entry->prnr_nak_state,
1414 __entry->hw_flow_index,
1415 __entry->generation,
1416 __entry->fpsn,
1417 __entry->resync ? "yes" : "no",
1418 __entry->r_next_psn_kdeth
1419 )
1420);
1421
1422DEFINE_EVENT(/* event */
1423 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_alloc_res,
1424 TP_PROTO(struct rvt_qp *qp),
1425 TP_ARGS(qp)
1426);
1427
1428DEFINE_EVENT(/* event */
1429 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_rcv_req,
1430 TP_PROTO(struct rvt_qp *qp),
1431 TP_ARGS(qp)
1432);
1433
1434DEFINE_EVENT(/* event */
1435 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_build_resp,
1436 TP_PROTO(struct rvt_qp *qp),
1437 TP_ARGS(qp)
1438);
1439
1440DEFINE_EVENT(/* event */
1441 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_rcv_data,
1442 TP_PROTO(struct rvt_qp *qp),
1443 TP_ARGS(qp)
1444);
1445
1446DEFINE_EVENT(/* event */
1447 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_rcv_resync,
1448 TP_PROTO(struct rvt_qp *qp),
1449 TP_ARGS(qp)
1450);
1451
1452DEFINE_EVENT(/* event */
1453 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_make_tid_ack,
1454 TP_PROTO(struct rvt_qp *qp),
1455 TP_ARGS(qp)
1456);
1457
1458DEFINE_EVENT(/* event */
1459 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_handle_kdeth_eflags,
1460 TP_PROTO(struct rvt_qp *qp),
1461 TP_ARGS(qp)
1462);
1463
1464DEFINE_EVENT(/* event */
1465 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_make_rc_ack,
1466 TP_PROTO(struct rvt_qp *qp),
1467 TP_ARGS(qp)
1468);
1469
1470DECLARE_EVENT_CLASS(/* tid_write_sender */
1471 hfi1_tid_write_sender_template,
1472 TP_PROTO(struct rvt_qp *qp, char newreq),
1473 TP_ARGS(qp, newreq),
1474 TP_STRUCT__entry(/* entry */
1475 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
1476 __field(u32, qpn)
1477 __field(char, newreq)
1478 __field(u32, s_tid_cur)
1479 __field(u32, s_tid_tail)
1480 __field(u32, s_tid_head)
1481 __field(u32, pending_tid_w_resp)
1482 __field(u32, n_requests)
1483 __field(u32, n_tid_requests)
1484 __field(u32, s_flags)
1485 __field(u32, ps_flags)
1486 __field(unsigned long, iow_flags)
1487 __field(u8, s_state)
1488 __field(u8, s_retry)
1489 ),
1490 TP_fast_assign(/* assign */
1491 struct hfi1_qp_priv *priv = qp->priv;
1492
1493 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
1494 __entry->qpn = qp->ibqp.qp_num;
1495 __entry->newreq = newreq;
1496 __entry->s_tid_cur = priv->s_tid_cur;
1497 __entry->s_tid_tail = priv->s_tid_tail;
1498 __entry->s_tid_head = priv->s_tid_head;
1499 __entry->pending_tid_w_resp = priv->pending_tid_w_resp;
1500 __entry->n_requests = atomic_read(&priv->n_requests);
1501 __entry->n_tid_requests = atomic_read(&priv->n_tid_requests);
1502 __entry->s_flags = qp->s_flags;
1503 __entry->ps_flags = priv->s_flags;
1504 __entry->iow_flags = priv->s_iowait.flags;
1505 __entry->s_state = priv->s_state;
1506 __entry->s_retry = priv->s_retry;
1507 ),
1508 TP_printk(/* print */
1509 TID_WRITE_SENDER_PRN,
1510 __get_str(dev),
1511 __entry->qpn,
1512 __entry->newreq,
1513 __entry->s_tid_cur,
1514 __entry->s_tid_tail,
1515 __entry->s_tid_head,
1516 __entry->pending_tid_w_resp,
1517 __entry->n_requests,
1518 __entry->n_tid_requests,
1519 __entry->s_flags,
1520 __entry->ps_flags,
1521 __entry->iow_flags,
1522 __entry->s_state,
1523 __entry->s_retry
1524 )
1525);
1526
1527DEFINE_EVENT(/* event */
1528 hfi1_tid_write_sender_template, hfi1_tid_write_sender_rcv_resp,
1529 TP_PROTO(struct rvt_qp *qp, char newreq),
1530 TP_ARGS(qp, newreq)
1531);
1532
1533DEFINE_EVENT(/* event */
1534 hfi1_tid_write_sender_template, hfi1_tid_write_sender_rcv_tid_ack,
1535 TP_PROTO(struct rvt_qp *qp, char newreq),
1536 TP_ARGS(qp, newreq)
1537);
1538
1539DEFINE_EVENT(/* event */
1540 hfi1_tid_write_sender_template, hfi1_tid_write_sender_retry_timeout,
1541 TP_PROTO(struct rvt_qp *qp, char newreq),
1542 TP_ARGS(qp, newreq)
1543);
1544
1545DEFINE_EVENT(/* event */
1546 hfi1_tid_write_sender_template, hfi1_tid_write_sender_make_tid_pkt,
1547 TP_PROTO(struct rvt_qp *qp, char newreq),
1548 TP_ARGS(qp, newreq)
1549);
1550
1551DEFINE_EVENT(/* event */
1552 hfi1_tid_write_sender_template, hfi1_tid_write_sender_make_req,
1553 TP_PROTO(struct rvt_qp *qp, char newreq),
1554 TP_ARGS(qp, newreq)
1555);
1556
1557DEFINE_EVENT(/* event */
1558 hfi1_tid_write_sender_template, hfi1_tid_write_sender_restart_rc,
1559 TP_PROTO(struct rvt_qp *qp, char newreq),
1560 TP_ARGS(qp, newreq)
1561);
1562
1563DECLARE_EVENT_CLASS(/* tid_ack */
1564 hfi1_tid_ack_template,
1565 TP_PROTO(struct rvt_qp *qp, u32 aeth, u32 psn,
1566 u32 req_psn, u32 resync_psn),
1567 TP_ARGS(qp, aeth, psn, req_psn, resync_psn),
1568 TP_STRUCT__entry(/* entry */
1569 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
1570 __field(u32, qpn)
1571 __field(u32, aeth)
1572 __field(u32, psn)
1573 __field(u32, req_psn)
1574 __field(u32, resync_psn)
1575 ),
1576 TP_fast_assign(/* assign */
1577 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
1578 __entry->qpn = qp->ibqp.qp_num;
1579 __entry->aeth = aeth;
1580 __entry->psn = psn;
1581 __entry->req_psn = req_psn;
1582 __entry->resync_psn = resync_psn;
1583 ),
1584 TP_printk(/* print */
1585 "[%s] qpn 0x%x aeth 0x%x psn 0x%x req_psn 0x%x resync_psn 0x%x",
1586 __get_str(dev),
1587 __entry->qpn,
1588 __entry->aeth,
1589 __entry->psn,
1590 __entry->req_psn,
1591 __entry->resync_psn
1592 )
1593);
1594
1595DEFINE_EVENT(/* rcv_tid_ack */
1596 hfi1_tid_ack_template, hfi1_rcv_tid_ack,
1597 TP_PROTO(struct rvt_qp *qp, u32 aeth, u32 psn,
1598 u32 req_psn, u32 resync_psn),
1599 TP_ARGS(qp, aeth, psn, req_psn, resync_psn)
1600);
1601
1602DECLARE_EVENT_CLASS(/* kdeth_eflags_error */
1603 hfi1_kdeth_eflags_error_template,
1604 TP_PROTO(struct rvt_qp *qp, u8 rcv_type, u8 rte, u32 psn),
1605 TP_ARGS(qp, rcv_type, rte, psn),
1606 TP_STRUCT__entry(/* entry */
1607 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
1608 __field(u32, qpn)
1609 __field(u8, rcv_type)
1610 __field(u8, rte)
1611 __field(u32, psn)
1612 ),
1613 TP_fast_assign(/* assign */
1614 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
1615 __entry->qpn = qp->ibqp.qp_num;
1616 __entry->rcv_type = rcv_type;
1617 __entry->rte = rte;
1618 __entry->psn = psn;
1619 ),
1620 TP_printk(/* print */
1621 KDETH_EFLAGS_ERR_PRN,
1622 __get_str(dev),
1623 __entry->qpn,
1624 __entry->rcv_type,
1625 __entry->rte,
1626 __entry->psn
1627 )
1628);
1629
1630DEFINE_EVENT(/* event */
1631 hfi1_kdeth_eflags_error_template, hfi1_eflags_err_write,
1632 TP_PROTO(struct rvt_qp *qp, u8 rcv_type, u8 rte, u32 psn),
1633 TP_ARGS(qp, rcv_type, rte, psn)
1634);
1635
1636#endif /* __HFI1_TRACE_TID_H */
1637
1638#undef TRACE_INCLUDE_PATH
1639#undef TRACE_INCLUDE_FILE
1640#define TRACE_INCLUDE_PATH .
1641#define TRACE_INCLUDE_FILE trace_tid
1642#include <trace/define_trace.h>
1643

source code of linux/drivers/infiniband/hw/hfi1/trace_tid.h