1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Trace point definitions for core RDMA functions.
4 *
5 * Author: Chuck Lever <chuck.lever@oracle.com>
6 *
7 * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
8 */
9
10#undef TRACE_SYSTEM
11#define TRACE_SYSTEM rdma_core
12
13#if !defined(_TRACE_RDMA_CORE_H) || defined(TRACE_HEADER_MULTI_READ)
14#define _TRACE_RDMA_CORE_H
15
16#include <linux/tracepoint.h>
17#include <rdma/ib_verbs.h>
18
19/*
20 * enum ib_poll_context, from include/rdma/ib_verbs.h
21 */
22#define IB_POLL_CTX_LIST			\
23	ib_poll_ctx(DIRECT)			\
24	ib_poll_ctx(SOFTIRQ)			\
25	ib_poll_ctx(WORKQUEUE)			\
26	ib_poll_ctx_end(UNBOUND_WORKQUEUE)
27
28#undef ib_poll_ctx
29#undef ib_poll_ctx_end
30
31#define ib_poll_ctx(x)		TRACE_DEFINE_ENUM(IB_POLL_##x);
32#define ib_poll_ctx_end(x)	TRACE_DEFINE_ENUM(IB_POLL_##x);
33
34IB_POLL_CTX_LIST
35
36#undef ib_poll_ctx
37#undef ib_poll_ctx_end
38
39#define ib_poll_ctx(x)		{ IB_POLL_##x, #x },
40#define ib_poll_ctx_end(x)	{ IB_POLL_##x, #x }
41
42#define rdma_show_ib_poll_ctx(x) \
43		__print_symbolic(x, IB_POLL_CTX_LIST)
44
45/**
46 ** Completion Queue events
47 **/
48
49TRACE_EVENT(cq_schedule,
50	TP_PROTO(
51		struct ib_cq *cq
52	),
53
54	TP_ARGS(cq),
55
56	TP_STRUCT__entry(
57		__field(u32, cq_id)
58	),
59
60	TP_fast_assign(
61		cq->timestamp = ktime_get();
62		cq->interrupt = true;
63
64		__entry->cq_id = cq->res.id;
65	),
66
67	TP_printk("cq.id=%u", __entry->cq_id)
68);
69
70TRACE_EVENT(cq_reschedule,
71	TP_PROTO(
72		struct ib_cq *cq
73	),
74
75	TP_ARGS(cq),
76
77	TP_STRUCT__entry(
78		__field(u32, cq_id)
79	),
80
81	TP_fast_assign(
82		cq->timestamp = ktime_get();
83		cq->interrupt = false;
84
85		__entry->cq_id = cq->res.id;
86	),
87
88	TP_printk("cq.id=%u", __entry->cq_id)
89);
90
91TRACE_EVENT(cq_process,
92	TP_PROTO(
93		const struct ib_cq *cq
94	),
95
96	TP_ARGS(cq),
97
98	TP_STRUCT__entry(
99		__field(u32, cq_id)
100		__field(bool, interrupt)
101		__field(s64, latency)
102	),
103
104	TP_fast_assign(
105		ktime_t latency = ktime_sub(ktime_get(), cq->timestamp);
106
107		__entry->cq_id = cq->res.id;
108		__entry->latency = ktime_to_us(latency);
109		__entry->interrupt = cq->interrupt;
110	),
111
112	TP_printk("cq.id=%u wake-up took %lld [us] from %s",
113		__entry->cq_id, __entry->latency,
114		__entry->interrupt ? "interrupt" : "reschedule"
115	)
116);
117
118TRACE_EVENT(cq_poll,
119	TP_PROTO(
120		const struct ib_cq *cq,
121		int requested,
122		int rc
123	),
124
125	TP_ARGS(cq, requested, rc),
126
127	TP_STRUCT__entry(
128		__field(u32, cq_id)
129		__field(int, requested)
130		__field(int, rc)
131	),
132
133	TP_fast_assign(
134		__entry->cq_id = cq->res.id;
135		__entry->requested = requested;
136		__entry->rc = rc;
137	),
138
139	TP_printk("cq.id=%u requested %d, returned %d",
140		__entry->cq_id, __entry->requested, __entry->rc
141	)
142);
143
144TRACE_EVENT(cq_drain_complete,
145	TP_PROTO(
146		const struct ib_cq *cq
147	),
148
149	TP_ARGS(cq),
150
151	TP_STRUCT__entry(
152		__field(u32, cq_id)
153	),
154
155	TP_fast_assign(
156		__entry->cq_id = cq->res.id;
157	),
158
159	TP_printk("cq.id=%u",
160		__entry->cq_id
161	)
162);
163
164
165TRACE_EVENT(cq_modify,
166	TP_PROTO(
167		const struct ib_cq *cq,
168		u16 comps,
169		u16 usec
170	),
171
172	TP_ARGS(cq, comps, usec),
173
174	TP_STRUCT__entry(
175		__field(u32, cq_id)
176		__field(unsigned int, comps)
177		__field(unsigned int, usec)
178	),
179
180	TP_fast_assign(
181		__entry->cq_id = cq->res.id;
182		__entry->comps = comps;
183		__entry->usec = usec;
184	),
185
186	TP_printk("cq.id=%u comps=%u usec=%u",
187		__entry->cq_id, __entry->comps, __entry->usec
188	)
189);
190
191TRACE_EVENT(cq_alloc,
192	TP_PROTO(
193		const struct ib_cq *cq,
194		int nr_cqe,
195		int comp_vector,
196		enum ib_poll_context poll_ctx
197	),
198
199	TP_ARGS(cq, nr_cqe, comp_vector, poll_ctx),
200
201	TP_STRUCT__entry(
202		__field(u32, cq_id)
203		__field(int, nr_cqe)
204		__field(int, comp_vector)
205		__field(unsigned long, poll_ctx)
206	),
207
208	TP_fast_assign(
209		__entry->cq_id = cq->res.id;
210		__entry->nr_cqe = nr_cqe;
211		__entry->comp_vector = comp_vector;
212		__entry->poll_ctx = poll_ctx;
213	),
214
215	TP_printk("cq.id=%u nr_cqe=%d comp_vector=%d poll_ctx=%s",
216		__entry->cq_id, __entry->nr_cqe, __entry->comp_vector,
217		rdma_show_ib_poll_ctx(__entry->poll_ctx)
218	)
219);
220
221TRACE_EVENT(cq_alloc_error,
222	TP_PROTO(
223		int nr_cqe,
224		int comp_vector,
225		enum ib_poll_context poll_ctx,
226		int rc
227	),
228
229	TP_ARGS(nr_cqe, comp_vector, poll_ctx, rc),
230
231	TP_STRUCT__entry(
232		__field(int, rc)
233		__field(int, nr_cqe)
234		__field(int, comp_vector)
235		__field(unsigned long, poll_ctx)
236	),
237
238	TP_fast_assign(
239		__entry->rc = rc;
240		__entry->nr_cqe = nr_cqe;
241		__entry->comp_vector = comp_vector;
242		__entry->poll_ctx = poll_ctx;
243	),
244
245	TP_printk("nr_cqe=%d comp_vector=%d poll_ctx=%s rc=%d",
246		__entry->nr_cqe, __entry->comp_vector,
247		rdma_show_ib_poll_ctx(__entry->poll_ctx), __entry->rc
248	)
249);
250
251TRACE_EVENT(cq_free,
252	TP_PROTO(
253		const struct ib_cq *cq
254	),
255
256	TP_ARGS(cq),
257
258	TP_STRUCT__entry(
259		__field(u32, cq_id)
260	),
261
262	TP_fast_assign(
263		__entry->cq_id = cq->res.id;
264	),
265
266	TP_printk("cq.id=%u", __entry->cq_id)
267);
268
269/**
270 ** Memory Region events
271 **/
272
273/*
274 * enum ib_mr_type, from include/rdma/ib_verbs.h
275 */
276#define IB_MR_TYPE_LIST				\
277	ib_mr_type_item(MEM_REG)		\
278	ib_mr_type_item(SG_GAPS)		\
279	ib_mr_type_item(DM)			\
280	ib_mr_type_item(USER)			\
281	ib_mr_type_item(DMA)			\
282	ib_mr_type_end(INTEGRITY)
283
284#undef ib_mr_type_item
285#undef ib_mr_type_end
286
287#define ib_mr_type_item(x)	TRACE_DEFINE_ENUM(IB_MR_TYPE_##x);
288#define ib_mr_type_end(x)	TRACE_DEFINE_ENUM(IB_MR_TYPE_##x);
289
290IB_MR_TYPE_LIST
291
292#undef ib_mr_type_item
293#undef ib_mr_type_end
294
295#define ib_mr_type_item(x)	{ IB_MR_TYPE_##x, #x },
296#define ib_mr_type_end(x)	{ IB_MR_TYPE_##x, #x }
297
298#define rdma_show_ib_mr_type(x) \
299		__print_symbolic(x, IB_MR_TYPE_LIST)
300
301TRACE_EVENT(mr_alloc,
302	TP_PROTO(
303		const struct ib_pd *pd,
304		enum ib_mr_type mr_type,
305		u32 max_num_sg,
306		const struct ib_mr *mr
307	),
308
309	TP_ARGS(pd, mr_type, max_num_sg, mr),
310
311	TP_STRUCT__entry(
312		__field(u32, pd_id)
313		__field(u32, mr_id)
314		__field(u32, max_num_sg)
315		__field(int, rc)
316		__field(unsigned long, mr_type)
317	),
318
319	TP_fast_assign(
320		__entry->pd_id = pd->res.id;
321		if (IS_ERR(mr)) {
322			__entry->mr_id = 0;
323			__entry->rc = PTR_ERR(mr);
324		} else {
325			__entry->mr_id = mr->res.id;
326			__entry->rc = 0;
327		}
328		__entry->max_num_sg = max_num_sg;
329		__entry->mr_type = mr_type;
330	),
331
332	TP_printk("pd.id=%u mr.id=%u type=%s max_num_sg=%u rc=%d",
333		__entry->pd_id, __entry->mr_id,
334		rdma_show_ib_mr_type(__entry->mr_type),
335		__entry->max_num_sg, __entry->rc)
336);
337
338TRACE_EVENT(mr_integ_alloc,
339	TP_PROTO(
340		const struct ib_pd *pd,
341		u32 max_num_data_sg,
342		u32 max_num_meta_sg,
343		const struct ib_mr *mr
344	),
345
346	TP_ARGS(pd, max_num_data_sg, max_num_meta_sg, mr),
347
348	TP_STRUCT__entry(
349		__field(u32, pd_id)
350		__field(u32, mr_id)
351		__field(u32, max_num_data_sg)
352		__field(u32, max_num_meta_sg)
353		__field(int, rc)
354	),
355
356	TP_fast_assign(
357		__entry->pd_id = pd->res.id;
358		if (IS_ERR(mr)) {
359			__entry->mr_id = 0;
360			__entry->rc = PTR_ERR(mr);
361		} else {
362			__entry->mr_id = mr->res.id;
363			__entry->rc = 0;
364		}
365		__entry->max_num_data_sg = max_num_data_sg;
366		__entry->max_num_meta_sg = max_num_meta_sg;
367	),
368
369	TP_printk("pd.id=%u mr.id=%u max_num_data_sg=%u max_num_meta_sg=%u rc=%d",
370		__entry->pd_id, __entry->mr_id, __entry->max_num_data_sg,
371		__entry->max_num_meta_sg, __entry->rc)
372);
373
374TRACE_EVENT(mr_dereg,
375	TP_PROTO(
376		const struct ib_mr *mr
377	),
378
379	TP_ARGS(mr),
380
381	TP_STRUCT__entry(
382		__field(u32, id)
383	),
384
385	TP_fast_assign(
386		__entry->id = mr->res.id;
387	),
388
389	TP_printk("mr.id=%u", __entry->id)
390);
391
392#endif /* _TRACE_RDMA_CORE_H */
393
394#include <trace/define_trace.h>
395