1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM bcachefs
4
5#if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_BCACHEFS_H
7
8#include <linux/tracepoint.h>
9
10#define TRACE_BPOS_entries(name)				\
11	__field(u64,			name##_inode	)	\
12	__field(u64,			name##_offset	)	\
13	__field(u32,			name##_snapshot	)
14
15#define TRACE_BPOS_assign(dst, src)				\
16	__entry->dst##_inode		= (src).inode;		\
17	__entry->dst##_offset		= (src).offset;		\
18	__entry->dst##_snapshot		= (src).snapshot
19
20DECLARE_EVENT_CLASS(bpos,
21	TP_PROTO(const struct bpos *p),
22	TP_ARGS(p),
23
24	TP_STRUCT__entry(
25		TRACE_BPOS_entries(p)
26	),
27
28	TP_fast_assign(
29		TRACE_BPOS_assign(p, *p);
30	),
31
32	TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
33);
34
35DECLARE_EVENT_CLASS(fs_str,
36	TP_PROTO(struct bch_fs *c, const char *str),
37	TP_ARGS(c, str),
38
39	TP_STRUCT__entry(
40		__field(dev_t,		dev			)
41		__string(str,		str			)
42	),
43
44	TP_fast_assign(
45		__entry->dev		= c->dev;
46		__assign_str(str, str);
47	),
48
49	TP_printk("%d,%d\n%s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(str))
50);
51
52DECLARE_EVENT_CLASS(trans_str,
53	TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
54	TP_ARGS(trans, caller_ip, str),
55
56	TP_STRUCT__entry(
57		__field(dev_t,		dev			)
58		__array(char,		trans_fn, 32		)
59		__field(unsigned long,	caller_ip		)
60		__string(str,		str			)
61	),
62
63	TP_fast_assign(
64		__entry->dev		= trans->c->dev;
65		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
66		__entry->caller_ip		= caller_ip;
67		__assign_str(str, str);
68	),
69
70	TP_printk("%d,%d %s %pS %s",
71		  MAJOR(__entry->dev), MINOR(__entry->dev),
72		  __entry->trans_fn, (void *) __entry->caller_ip, __get_str(str))
73);
74
75DECLARE_EVENT_CLASS(trans_str_nocaller,
76	TP_PROTO(struct btree_trans *trans, const char *str),
77	TP_ARGS(trans, str),
78
79	TP_STRUCT__entry(
80		__field(dev_t,		dev			)
81		__array(char,		trans_fn, 32		)
82		__string(str,		str			)
83	),
84
85	TP_fast_assign(
86		__entry->dev		= trans->c->dev;
87		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
88		__assign_str(str, str);
89	),
90
91	TP_printk("%d,%d %s %s",
92		  MAJOR(__entry->dev), MINOR(__entry->dev),
93		  __entry->trans_fn, __get_str(str))
94);
95
96DECLARE_EVENT_CLASS(btree_node_nofs,
97	TP_PROTO(struct bch_fs *c, struct btree *b),
98	TP_ARGS(c, b),
99
100	TP_STRUCT__entry(
101		__field(dev_t,		dev			)
102		__field(u8,		level			)
103		__field(u8,		btree_id		)
104		TRACE_BPOS_entries(pos)
105	),
106
107	TP_fast_assign(
108		__entry->dev		= c->dev;
109		__entry->level		= b->c.level;
110		__entry->btree_id	= b->c.btree_id;
111		TRACE_BPOS_assign(pos, b->key.k.p);
112	),
113
114	TP_printk("%d,%d %u %s %llu:%llu:%u",
115		  MAJOR(__entry->dev), MINOR(__entry->dev),
116		  __entry->level,
117		  bch2_btree_id_str(__entry->btree_id),
118		  __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
119);
120
121DECLARE_EVENT_CLASS(btree_node,
122	TP_PROTO(struct btree_trans *trans, struct btree *b),
123	TP_ARGS(trans, b),
124
125	TP_STRUCT__entry(
126		__field(dev_t,		dev			)
127		__array(char,		trans_fn, 32		)
128		__field(u8,		level			)
129		__field(u8,		btree_id		)
130		TRACE_BPOS_entries(pos)
131	),
132
133	TP_fast_assign(
134		__entry->dev		= trans->c->dev;
135		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
136		__entry->level		= b->c.level;
137		__entry->btree_id	= b->c.btree_id;
138		TRACE_BPOS_assign(pos, b->key.k.p);
139	),
140
141	TP_printk("%d,%d %s %u %s %llu:%llu:%u",
142		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn,
143		  __entry->level,
144		  bch2_btree_id_str(__entry->btree_id),
145		  __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
146);
147
148DECLARE_EVENT_CLASS(bch_fs,
149	TP_PROTO(struct bch_fs *c),
150	TP_ARGS(c),
151
152	TP_STRUCT__entry(
153		__field(dev_t,		dev			)
154	),
155
156	TP_fast_assign(
157		__entry->dev		= c->dev;
158	),
159
160	TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
161);
162
163DECLARE_EVENT_CLASS(btree_trans,
164	TP_PROTO(struct btree_trans *trans),
165	TP_ARGS(trans),
166
167	TP_STRUCT__entry(
168		__field(dev_t,		dev			)
169		__array(char,		trans_fn, 32		)
170	),
171
172	TP_fast_assign(
173		__entry->dev		= trans->c->dev;
174		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
175	),
176
177	TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn)
178);
179
180DECLARE_EVENT_CLASS(bio,
181	TP_PROTO(struct bio *bio),
182	TP_ARGS(bio),
183
184	TP_STRUCT__entry(
185		__field(dev_t,		dev			)
186		__field(sector_t,	sector			)
187		__field(unsigned int,	nr_sector		)
188		__array(char,		rwbs,	6		)
189	),
190
191	TP_fast_assign(
192		__entry->dev		= bio->bi_bdev ? bio_dev(bio) : 0;
193		__entry->sector		= bio->bi_iter.bi_sector;
194		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
195		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
196	),
197
198	TP_printk("%d,%d  %s %llu + %u",
199		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
200		  (unsigned long long)__entry->sector, __entry->nr_sector)
201);
202
203/* super-io.c: */
204TRACE_EVENT(write_super,
205	TP_PROTO(struct bch_fs *c, unsigned long ip),
206	TP_ARGS(c, ip),
207
208	TP_STRUCT__entry(
209		__field(dev_t,		dev	)
210		__field(unsigned long,	ip	)
211	),
212
213	TP_fast_assign(
214		__entry->dev		= c->dev;
215		__entry->ip		= ip;
216	),
217
218	TP_printk("%d,%d for %pS",
219		  MAJOR(__entry->dev), MINOR(__entry->dev),
220		  (void *) __entry->ip)
221);
222
223/* io.c: */
224
225DEFINE_EVENT(bio, read_promote,
226	TP_PROTO(struct bio *bio),
227	TP_ARGS(bio)
228);
229
230TRACE_EVENT(read_nopromote,
231	TP_PROTO(struct bch_fs *c, int ret),
232	TP_ARGS(c, ret),
233
234	TP_STRUCT__entry(
235		__field(dev_t,		dev		)
236		__array(char,		ret, 32		)
237	),
238
239	TP_fast_assign(
240		__entry->dev		= c->dev;
241		strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
242	),
243
244	TP_printk("%d,%d ret %s",
245		  MAJOR(__entry->dev), MINOR(__entry->dev),
246		  __entry->ret)
247);
248
249DEFINE_EVENT(bio, read_bounce,
250	TP_PROTO(struct bio *bio),
251	TP_ARGS(bio)
252);
253
254DEFINE_EVENT(bio, read_split,
255	TP_PROTO(struct bio *bio),
256	TP_ARGS(bio)
257);
258
259DEFINE_EVENT(bio, read_retry,
260	TP_PROTO(struct bio *bio),
261	TP_ARGS(bio)
262);
263
264DEFINE_EVENT(bio, read_reuse_race,
265	TP_PROTO(struct bio *bio),
266	TP_ARGS(bio)
267);
268
269/* Journal */
270
271DEFINE_EVENT(bch_fs, journal_full,
272	TP_PROTO(struct bch_fs *c),
273	TP_ARGS(c)
274);
275
276DEFINE_EVENT(fs_str, journal_entry_full,
277	TP_PROTO(struct bch_fs *c, const char *str),
278	TP_ARGS(c, str)
279);
280
281DEFINE_EVENT(fs_str, journal_entry_close,
282	TP_PROTO(struct bch_fs *c, const char *str),
283	TP_ARGS(c, str)
284);
285
286DEFINE_EVENT(bio, journal_write,
287	TP_PROTO(struct bio *bio),
288	TP_ARGS(bio)
289);
290
291TRACE_EVENT(journal_reclaim_start,
292	TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
293		 u64 min_nr, u64 min_key_cache,
294		 u64 btree_cache_dirty, u64 btree_cache_total,
295		 u64 btree_key_cache_dirty, u64 btree_key_cache_total),
296	TP_ARGS(c, direct, kicked, min_nr, min_key_cache,
297		btree_cache_dirty, btree_cache_total,
298		btree_key_cache_dirty, btree_key_cache_total),
299
300	TP_STRUCT__entry(
301		__field(dev_t,		dev			)
302		__field(bool,		direct			)
303		__field(bool,		kicked			)
304		__field(u64,		min_nr			)
305		__field(u64,		min_key_cache		)
306		__field(u64,		btree_cache_dirty	)
307		__field(u64,		btree_cache_total	)
308		__field(u64,		btree_key_cache_dirty	)
309		__field(u64,		btree_key_cache_total	)
310	),
311
312	TP_fast_assign(
313		__entry->dev			= c->dev;
314		__entry->direct			= direct;
315		__entry->kicked			= kicked;
316		__entry->min_nr			= min_nr;
317		__entry->min_key_cache		= min_key_cache;
318		__entry->btree_cache_dirty	= btree_cache_dirty;
319		__entry->btree_cache_total	= btree_cache_total;
320		__entry->btree_key_cache_dirty	= btree_key_cache_dirty;
321		__entry->btree_key_cache_total	= btree_key_cache_total;
322	),
323
324	TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu btree cache %llu/%llu key cache %llu/%llu",
325		  MAJOR(__entry->dev), MINOR(__entry->dev),
326		  __entry->direct,
327		  __entry->kicked,
328		  __entry->min_nr,
329		  __entry->min_key_cache,
330		  __entry->btree_cache_dirty,
331		  __entry->btree_cache_total,
332		  __entry->btree_key_cache_dirty,
333		  __entry->btree_key_cache_total)
334);
335
336TRACE_EVENT(journal_reclaim_finish,
337	TP_PROTO(struct bch_fs *c, u64 nr_flushed),
338	TP_ARGS(c, nr_flushed),
339
340	TP_STRUCT__entry(
341		__field(dev_t,		dev			)
342		__field(u64,		nr_flushed		)
343	),
344
345	TP_fast_assign(
346		__entry->dev		= c->dev;
347		__entry->nr_flushed	= nr_flushed;
348	),
349
350	TP_printk("%d,%d flushed %llu",
351		  MAJOR(__entry->dev), MINOR(__entry->dev),
352		  __entry->nr_flushed)
353);
354
355/* bset.c: */
356
357DEFINE_EVENT(bpos, bkey_pack_pos_fail,
358	TP_PROTO(const struct bpos *p),
359	TP_ARGS(p)
360);
361
362/* Btree cache: */
363
364TRACE_EVENT(btree_cache_scan,
365	TP_PROTO(long nr_to_scan, long can_free, long ret),
366	TP_ARGS(nr_to_scan, can_free, ret),
367
368	TP_STRUCT__entry(
369		__field(long,	nr_to_scan		)
370		__field(long,	can_free		)
371		__field(long,	ret			)
372	),
373
374	TP_fast_assign(
375		__entry->nr_to_scan	= nr_to_scan;
376		__entry->can_free	= can_free;
377		__entry->ret		= ret;
378	),
379
380	TP_printk("scanned for %li nodes, can free %li, ret %li",
381		  __entry->nr_to_scan, __entry->can_free, __entry->ret)
382);
383
384DEFINE_EVENT(btree_node_nofs, btree_cache_reap,
385	TP_PROTO(struct bch_fs *c, struct btree *b),
386	TP_ARGS(c, b)
387);
388
389DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock_fail,
390	TP_PROTO(struct btree_trans *trans),
391	TP_ARGS(trans)
392);
393
394DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock,
395	TP_PROTO(struct btree_trans *trans),
396	TP_ARGS(trans)
397);
398
399DEFINE_EVENT(btree_trans, btree_cache_cannibalize,
400	TP_PROTO(struct btree_trans *trans),
401	TP_ARGS(trans)
402);
403
404DEFINE_EVENT(btree_trans, btree_cache_cannibalize_unlock,
405	TP_PROTO(struct btree_trans *trans),
406	TP_ARGS(trans)
407);
408
409/* Btree */
410
411DEFINE_EVENT(btree_node, btree_node_read,
412	TP_PROTO(struct btree_trans *trans, struct btree *b),
413	TP_ARGS(trans, b)
414);
415
416TRACE_EVENT(btree_node_write,
417	TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
418	TP_ARGS(b, bytes, sectors),
419
420	TP_STRUCT__entry(
421		__field(enum btree_node_type,	type)
422		__field(unsigned,	bytes			)
423		__field(unsigned,	sectors			)
424	),
425
426	TP_fast_assign(
427		__entry->type	= btree_node_type(b);
428		__entry->bytes	= bytes;
429		__entry->sectors = sectors;
430	),
431
432	TP_printk("bkey type %u bytes %u sectors %u",
433		  __entry->type , __entry->bytes, __entry->sectors)
434);
435
436DEFINE_EVENT(btree_node, btree_node_alloc,
437	TP_PROTO(struct btree_trans *trans, struct btree *b),
438	TP_ARGS(trans, b)
439);
440
441DEFINE_EVENT(btree_node, btree_node_free,
442	TP_PROTO(struct btree_trans *trans, struct btree *b),
443	TP_ARGS(trans, b)
444);
445
446TRACE_EVENT(btree_reserve_get_fail,
447	TP_PROTO(const char *trans_fn,
448		 unsigned long caller_ip,
449		 size_t required,
450		 int ret),
451	TP_ARGS(trans_fn, caller_ip, required, ret),
452
453	TP_STRUCT__entry(
454		__array(char,			trans_fn, 32	)
455		__field(unsigned long,		caller_ip	)
456		__field(size_t,			required	)
457		__array(char,			ret, 32		)
458	),
459
460	TP_fast_assign(
461		strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
462		__entry->caller_ip	= caller_ip;
463		__entry->required	= required;
464		strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
465	),
466
467	TP_printk("%s %pS required %zu ret %s",
468		  __entry->trans_fn,
469		  (void *) __entry->caller_ip,
470		  __entry->required,
471		  __entry->ret)
472);
473
474DEFINE_EVENT(btree_node, btree_node_compact,
475	TP_PROTO(struct btree_trans *trans, struct btree *b),
476	TP_ARGS(trans, b)
477);
478
479DEFINE_EVENT(btree_node, btree_node_merge,
480	TP_PROTO(struct btree_trans *trans, struct btree *b),
481	TP_ARGS(trans, b)
482);
483
484DEFINE_EVENT(btree_node, btree_node_split,
485	TP_PROTO(struct btree_trans *trans, struct btree *b),
486	TP_ARGS(trans, b)
487);
488
489DEFINE_EVENT(btree_node, btree_node_rewrite,
490	TP_PROTO(struct btree_trans *trans, struct btree *b),
491	TP_ARGS(trans, b)
492);
493
494DEFINE_EVENT(btree_node, btree_node_set_root,
495	TP_PROTO(struct btree_trans *trans, struct btree *b),
496	TP_ARGS(trans, b)
497);
498
499TRACE_EVENT(btree_path_relock_fail,
500	TP_PROTO(struct btree_trans *trans,
501		 unsigned long caller_ip,
502		 struct btree_path *path,
503		 unsigned level),
504	TP_ARGS(trans, caller_ip, path, level),
505
506	TP_STRUCT__entry(
507		__array(char,			trans_fn, 32	)
508		__field(unsigned long,		caller_ip	)
509		__field(u8,			btree_id	)
510		__field(u8,			level		)
511		TRACE_BPOS_entries(pos)
512		__array(char,			node, 24	)
513		__field(u8,			self_read_count	)
514		__field(u8,			self_intent_count)
515		__field(u8,			read_count	)
516		__field(u8,			intent_count	)
517		__field(u32,			iter_lock_seq	)
518		__field(u32,			node_lock_seq	)
519	),
520
521	TP_fast_assign(
522		struct btree *b = btree_path_node(path, level);
523		struct six_lock_count c;
524
525		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
526		__entry->caller_ip		= caller_ip;
527		__entry->btree_id		= path->btree_id;
528		__entry->level			= path->level;
529		TRACE_BPOS_assign(pos, path->pos);
530
531		c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level);
532		__entry->self_read_count	= c.n[SIX_LOCK_read];
533		__entry->self_intent_count	= c.n[SIX_LOCK_intent];
534
535		if (IS_ERR(b)) {
536			strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
537		} else {
538			c = six_lock_counts(&path->l[level].b->c.lock);
539			__entry->read_count	= c.n[SIX_LOCK_read];
540			__entry->intent_count	= c.n[SIX_LOCK_intent];
541			scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
542		}
543		__entry->iter_lock_seq		= path->l[level].lock_seq;
544		__entry->node_lock_seq		= is_btree_node(path, level)
545			? six_lock_seq(&path->l[level].b->c.lock)
546			: 0;
547	),
548
549	TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
550		  __entry->trans_fn,
551		  (void *) __entry->caller_ip,
552		  bch2_btree_id_str(__entry->btree_id),
553		  __entry->pos_inode,
554		  __entry->pos_offset,
555		  __entry->pos_snapshot,
556		  __entry->level,
557		  __entry->node,
558		  __entry->self_read_count,
559		  __entry->self_intent_count,
560		  __entry->read_count,
561		  __entry->intent_count,
562		  __entry->iter_lock_seq,
563		  __entry->node_lock_seq)
564);
565
566TRACE_EVENT(btree_path_upgrade_fail,
567	TP_PROTO(struct btree_trans *trans,
568		 unsigned long caller_ip,
569		 struct btree_path *path,
570		 unsigned level),
571	TP_ARGS(trans, caller_ip, path, level),
572
573	TP_STRUCT__entry(
574		__array(char,			trans_fn, 32	)
575		__field(unsigned long,		caller_ip	)
576		__field(u8,			btree_id	)
577		__field(u8,			level		)
578		TRACE_BPOS_entries(pos)
579		__field(u8,			locked		)
580		__field(u8,			self_read_count	)
581		__field(u8,			self_intent_count)
582		__field(u8,			read_count	)
583		__field(u8,			intent_count	)
584		__field(u32,			iter_lock_seq	)
585		__field(u32,			node_lock_seq	)
586	),
587
588	TP_fast_assign(
589		struct six_lock_count c;
590
591		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
592		__entry->caller_ip		= caller_ip;
593		__entry->btree_id		= path->btree_id;
594		__entry->level			= level;
595		TRACE_BPOS_assign(pos, path->pos);
596		__entry->locked			= btree_node_locked(path, level);
597
598		c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
599		__entry->self_read_count	= c.n[SIX_LOCK_read];
600		__entry->self_intent_count	= c.n[SIX_LOCK_intent];
601		c = six_lock_counts(&path->l[level].b->c.lock);
602		__entry->read_count		= c.n[SIX_LOCK_read];
603		__entry->intent_count		= c.n[SIX_LOCK_intent];
604		__entry->iter_lock_seq		= path->l[level].lock_seq;
605		__entry->node_lock_seq		= is_btree_node(path, level)
606			? six_lock_seq(&path->l[level].b->c.lock)
607			: 0;
608	),
609
610	TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
611		  __entry->trans_fn,
612		  (void *) __entry->caller_ip,
613		  bch2_btree_id_str(__entry->btree_id),
614		  __entry->pos_inode,
615		  __entry->pos_offset,
616		  __entry->pos_snapshot,
617		  __entry->level,
618		  __entry->locked,
619		  __entry->self_read_count,
620		  __entry->self_intent_count,
621		  __entry->read_count,
622		  __entry->intent_count,
623		  __entry->iter_lock_seq,
624		  __entry->node_lock_seq)
625);
626
627/* Garbage collection */
628
629DEFINE_EVENT(bch_fs, gc_gens_start,
630	TP_PROTO(struct bch_fs *c),
631	TP_ARGS(c)
632);
633
634DEFINE_EVENT(bch_fs, gc_gens_end,
635	TP_PROTO(struct bch_fs *c),
636	TP_ARGS(c)
637);
638
639/* Allocator */
640
641DECLARE_EVENT_CLASS(bucket_alloc,
642	TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
643		 u64 bucket,
644		 u64 free,
645		 u64 avail,
646		 u64 copygc_wait_amount,
647		 s64 copygc_waiting_for,
648		 struct bucket_alloc_state *s,
649		 bool nonblocking,
650		 const char *err),
651	TP_ARGS(ca, alloc_reserve, bucket, free, avail,
652		copygc_wait_amount, copygc_waiting_for,
653		s, nonblocking, err),
654
655	TP_STRUCT__entry(
656		__field(u8,			dev			)
657		__array(char,	reserve,	16			)
658		__field(u64,			bucket	)
659		__field(u64,			free			)
660		__field(u64,			avail			)
661		__field(u64,			copygc_wait_amount	)
662		__field(s64,			copygc_waiting_for	)
663		__field(u64,			seen			)
664		__field(u64,			open			)
665		__field(u64,			need_journal_commit	)
666		__field(u64,			nouse			)
667		__field(bool,			nonblocking		)
668		__field(u64,			nocow			)
669		__array(char,			err,	32		)
670	),
671
672	TP_fast_assign(
673		__entry->dev		= ca->dev_idx;
674		strscpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
675		__entry->bucket		= bucket;
676		__entry->free		= free;
677		__entry->avail		= avail;
678		__entry->copygc_wait_amount	= copygc_wait_amount;
679		__entry->copygc_waiting_for	= copygc_waiting_for;
680		__entry->seen		= s->buckets_seen;
681		__entry->open		= s->skipped_open;
682		__entry->need_journal_commit = s->skipped_need_journal_commit;
683		__entry->nouse		= s->skipped_nouse;
684		__entry->nonblocking	= nonblocking;
685		__entry->nocow		= s->skipped_nocow;
686		strscpy(__entry->err, err, sizeof(__entry->err));
687	),
688
689	TP_printk("reserve %s bucket %u:%llu free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nocow %llu nonblocking %u err %s",
690		  __entry->reserve,
691		  __entry->dev,
692		  __entry->bucket,
693		  __entry->free,
694		  __entry->avail,
695		  __entry->copygc_wait_amount,
696		  __entry->copygc_waiting_for,
697		  __entry->seen,
698		  __entry->open,
699		  __entry->need_journal_commit,
700		  __entry->nouse,
701		  __entry->nocow,
702		  __entry->nonblocking,
703		  __entry->err)
704);
705
706DEFINE_EVENT(bucket_alloc, bucket_alloc,
707	TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
708		 u64 bucket,
709		 u64 free,
710		 u64 avail,
711		 u64 copygc_wait_amount,
712		 s64 copygc_waiting_for,
713		 struct bucket_alloc_state *s,
714		 bool nonblocking,
715		 const char *err),
716	TP_ARGS(ca, alloc_reserve, bucket, free, avail,
717		copygc_wait_amount, copygc_waiting_for,
718		s, nonblocking, err)
719);
720
721DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
722	TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
723		 u64 bucket,
724		 u64 free,
725		 u64 avail,
726		 u64 copygc_wait_amount,
727		 s64 copygc_waiting_for,
728		 struct bucket_alloc_state *s,
729		 bool nonblocking,
730		 const char *err),
731	TP_ARGS(ca, alloc_reserve, bucket, free, avail,
732		copygc_wait_amount, copygc_waiting_for,
733		s, nonblocking, err)
734);
735
736TRACE_EVENT(discard_buckets,
737	TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
738		 u64 need_journal_commit, u64 discarded, const char *err),
739	TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
740
741	TP_STRUCT__entry(
742		__field(dev_t,		dev			)
743		__field(u64,		seen			)
744		__field(u64,		open			)
745		__field(u64,		need_journal_commit	)
746		__field(u64,		discarded		)
747		__array(char,		err,	16		)
748	),
749
750	TP_fast_assign(
751		__entry->dev			= c->dev;
752		__entry->seen			= seen;
753		__entry->open			= open;
754		__entry->need_journal_commit	= need_journal_commit;
755		__entry->discarded		= discarded;
756		strscpy(__entry->err, err, sizeof(__entry->err));
757	),
758
759	TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
760		  MAJOR(__entry->dev), MINOR(__entry->dev),
761		  __entry->seen,
762		  __entry->open,
763		  __entry->need_journal_commit,
764		  __entry->discarded,
765		  __entry->err)
766);
767
768TRACE_EVENT(bucket_invalidate,
769	TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
770	TP_ARGS(c, dev, bucket, sectors),
771
772	TP_STRUCT__entry(
773		__field(dev_t,		dev			)
774		__field(u32,		dev_idx			)
775		__field(u32,		sectors			)
776		__field(u64,		bucket			)
777	),
778
779	TP_fast_assign(
780		__entry->dev		= c->dev;
781		__entry->dev_idx	= dev;
782		__entry->sectors	= sectors;
783		__entry->bucket		= bucket;
784	),
785
786	TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
787		  MAJOR(__entry->dev), MINOR(__entry->dev),
788		  __entry->dev_idx, __entry->bucket,
789		  __entry->sectors)
790);
791
792/* Moving IO */
793
794TRACE_EVENT(bucket_evacuate,
795	TP_PROTO(struct bch_fs *c, struct bpos *bucket),
796	TP_ARGS(c, bucket),
797
798	TP_STRUCT__entry(
799		__field(dev_t,		dev			)
800		__field(u32,		dev_idx			)
801		__field(u64,		bucket			)
802	),
803
804	TP_fast_assign(
805		__entry->dev		= c->dev;
806		__entry->dev_idx	= bucket->inode;
807		__entry->bucket		= bucket->offset;
808	),
809
810	TP_printk("%d:%d %u:%llu",
811		  MAJOR(__entry->dev), MINOR(__entry->dev),
812		  __entry->dev_idx, __entry->bucket)
813);
814
815DEFINE_EVENT(fs_str, move_extent,
816	TP_PROTO(struct bch_fs *c, const char *str),
817	TP_ARGS(c, str)
818);
819
820DEFINE_EVENT(fs_str, move_extent_read,
821	TP_PROTO(struct bch_fs *c, const char *str),
822	TP_ARGS(c, str)
823);
824
825DEFINE_EVENT(fs_str, move_extent_write,
826	TP_PROTO(struct bch_fs *c, const char *str),
827	TP_ARGS(c, str)
828);
829
830DEFINE_EVENT(fs_str, move_extent_finish,
831	TP_PROTO(struct bch_fs *c, const char *str),
832	TP_ARGS(c, str)
833);
834
835DEFINE_EVENT(fs_str, move_extent_fail,
836	TP_PROTO(struct bch_fs *c, const char *str),
837	TP_ARGS(c, str)
838);
839
840DEFINE_EVENT(fs_str, move_extent_start_fail,
841	TP_PROTO(struct bch_fs *c, const char *str),
842	TP_ARGS(c, str)
843);
844
845TRACE_EVENT(move_data,
846	TP_PROTO(struct bch_fs *c,
847		 struct bch_move_stats *stats),
848	TP_ARGS(c, stats),
849
850	TP_STRUCT__entry(
851		__field(dev_t,		dev		)
852		__field(u64,		keys_moved	)
853		__field(u64,		keys_raced	)
854		__field(u64,		sectors_seen	)
855		__field(u64,		sectors_moved	)
856		__field(u64,		sectors_raced	)
857	),
858
859	TP_fast_assign(
860		__entry->dev		= c->dev;
861		__entry->keys_moved	= atomic64_read(&stats->keys_moved);
862		__entry->keys_raced	= atomic64_read(&stats->keys_raced);
863		__entry->sectors_seen	= atomic64_read(&stats->sectors_seen);
864		__entry->sectors_moved	= atomic64_read(&stats->sectors_moved);
865		__entry->sectors_raced	= atomic64_read(&stats->sectors_raced);
866	),
867
868	TP_printk("%d,%d keys moved %llu raced %llu"
869		  "sectors seen %llu moved %llu raced %llu",
870		  MAJOR(__entry->dev), MINOR(__entry->dev),
871		  __entry->keys_moved,
872		  __entry->keys_raced,
873		  __entry->sectors_seen,
874		  __entry->sectors_moved,
875		  __entry->sectors_raced)
876);
877
878TRACE_EVENT(evacuate_bucket,
879	TP_PROTO(struct bch_fs *c, struct bpos *bucket,
880		 unsigned sectors, unsigned bucket_size,
881		 u64 fragmentation, int ret),
882	TP_ARGS(c, bucket, sectors, bucket_size, fragmentation, ret),
883
884	TP_STRUCT__entry(
885		__field(dev_t,		dev		)
886		__field(u64,		member		)
887		__field(u64,		bucket		)
888		__field(u32,		sectors		)
889		__field(u32,		bucket_size	)
890		__field(u64,		fragmentation	)
891		__field(int,		ret		)
892	),
893
894	TP_fast_assign(
895		__entry->dev			= c->dev;
896		__entry->member			= bucket->inode;
897		__entry->bucket			= bucket->offset;
898		__entry->sectors		= sectors;
899		__entry->bucket_size		= bucket_size;
900		__entry->fragmentation		= fragmentation;
901		__entry->ret			= ret;
902	),
903
904	TP_printk("%d,%d %llu:%llu sectors %u/%u fragmentation %llu ret %i",
905		  MAJOR(__entry->dev), MINOR(__entry->dev),
906		  __entry->member, __entry->bucket,
907		  __entry->sectors, __entry->bucket_size,
908		  __entry->fragmentation, __entry->ret)
909);
910
911TRACE_EVENT(copygc,
912	TP_PROTO(struct bch_fs *c,
913		 u64 sectors_moved, u64 sectors_not_moved,
914		 u64 buckets_moved, u64 buckets_not_moved),
915	TP_ARGS(c,
916		sectors_moved, sectors_not_moved,
917		buckets_moved, buckets_not_moved),
918
919	TP_STRUCT__entry(
920		__field(dev_t,		dev			)
921		__field(u64,		sectors_moved		)
922		__field(u64,		sectors_not_moved	)
923		__field(u64,		buckets_moved		)
924		__field(u64,		buckets_not_moved	)
925	),
926
927	TP_fast_assign(
928		__entry->dev			= c->dev;
929		__entry->sectors_moved		= sectors_moved;
930		__entry->sectors_not_moved	= sectors_not_moved;
931		__entry->buckets_moved		= buckets_moved;
932		__entry->buckets_not_moved = buckets_moved;
933	),
934
935	TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu",
936		  MAJOR(__entry->dev), MINOR(__entry->dev),
937		  __entry->sectors_moved, __entry->sectors_not_moved,
938		  __entry->buckets_moved, __entry->buckets_not_moved)
939);
940
941TRACE_EVENT(copygc_wait,
942	TP_PROTO(struct bch_fs *c,
943		 u64 wait_amount, u64 until),
944	TP_ARGS(c, wait_amount, until),
945
946	TP_STRUCT__entry(
947		__field(dev_t,		dev			)
948		__field(u64,		wait_amount		)
949		__field(u64,		until			)
950	),
951
952	TP_fast_assign(
953		__entry->dev		= c->dev;
954		__entry->wait_amount	= wait_amount;
955		__entry->until		= until;
956	),
957
958	TP_printk("%d,%u waiting for %llu sectors until %llu",
959		  MAJOR(__entry->dev), MINOR(__entry->dev),
960		  __entry->wait_amount, __entry->until)
961);
962
963/* btree transactions: */
964
965DECLARE_EVENT_CLASS(transaction_event,
966	TP_PROTO(struct btree_trans *trans,
967		 unsigned long caller_ip),
968	TP_ARGS(trans, caller_ip),
969
970	TP_STRUCT__entry(
971		__array(char,			trans_fn, 32	)
972		__field(unsigned long,		caller_ip	)
973	),
974
975	TP_fast_assign(
976		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
977		__entry->caller_ip		= caller_ip;
978	),
979
980	TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
981);
982
983DEFINE_EVENT(transaction_event,	transaction_commit,
984	TP_PROTO(struct btree_trans *trans,
985		 unsigned long caller_ip),
986	TP_ARGS(trans, caller_ip)
987);
988
989DEFINE_EVENT(transaction_event,	trans_restart_injected,
990	TP_PROTO(struct btree_trans *trans,
991		 unsigned long caller_ip),
992	TP_ARGS(trans, caller_ip)
993);
994
995TRACE_EVENT(trans_restart_split_race,
996	TP_PROTO(struct btree_trans *trans,
997		 unsigned long caller_ip,
998		 struct btree *b),
999	TP_ARGS(trans, caller_ip, b),
1000
1001	TP_STRUCT__entry(
1002		__array(char,			trans_fn, 32	)
1003		__field(unsigned long,		caller_ip	)
1004		__field(u8,			level		)
1005		__field(u16,			written		)
1006		__field(u16,			blocks		)
1007		__field(u16,			u64s_remaining	)
1008	),
1009
1010	TP_fast_assign(
1011		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1012		__entry->caller_ip		= caller_ip;
1013		__entry->level		= b->c.level;
1014		__entry->written	= b->written;
1015		__entry->blocks		= btree_blocks(trans->c);
1016		__entry->u64s_remaining	= bch2_btree_keys_u64s_remaining(b);
1017	),
1018
1019	TP_printk("%s %pS l=%u written %u/%u u64s remaining %u",
1020		  __entry->trans_fn, (void *) __entry->caller_ip,
1021		  __entry->level,
1022		  __entry->written, __entry->blocks,
1023		  __entry->u64s_remaining)
1024);
1025
1026DEFINE_EVENT(transaction_event,	trans_blocked_journal_reclaim,
1027	TP_PROTO(struct btree_trans *trans,
1028		 unsigned long caller_ip),
1029	TP_ARGS(trans, caller_ip)
1030);
1031
1032TRACE_EVENT(trans_restart_journal_preres_get,
1033	TP_PROTO(struct btree_trans *trans,
1034		 unsigned long caller_ip,
1035		 unsigned flags),
1036	TP_ARGS(trans, caller_ip, flags),
1037
1038	TP_STRUCT__entry(
1039		__array(char,			trans_fn, 32	)
1040		__field(unsigned long,		caller_ip	)
1041		__field(unsigned,		flags		)
1042	),
1043
1044	TP_fast_assign(
1045		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1046		__entry->caller_ip		= caller_ip;
1047		__entry->flags			= flags;
1048	),
1049
1050	TP_printk("%s %pS %x", __entry->trans_fn,
1051		  (void *) __entry->caller_ip,
1052		  __entry->flags)
1053);
1054
1055DEFINE_EVENT(transaction_event,	trans_restart_fault_inject,
1056	TP_PROTO(struct btree_trans *trans,
1057		 unsigned long caller_ip),
1058	TP_ARGS(trans, caller_ip)
1059);
1060
1061DEFINE_EVENT(transaction_event,	trans_traverse_all,
1062	TP_PROTO(struct btree_trans *trans,
1063		 unsigned long caller_ip),
1064	TP_ARGS(trans, caller_ip)
1065);
1066
1067DEFINE_EVENT(transaction_event,	trans_restart_key_cache_raced,
1068	TP_PROTO(struct btree_trans *trans,
1069		 unsigned long caller_ip),
1070	TP_ARGS(trans, caller_ip)
1071);
1072
1073DEFINE_EVENT(trans_str, trans_restart_too_many_iters,
1074	TP_PROTO(struct btree_trans *trans,
1075		 unsigned long caller_ip,
1076		 const char *paths),
1077	TP_ARGS(trans, caller_ip, paths)
1078);
1079
1080DECLARE_EVENT_CLASS(transaction_restart_iter,
1081	TP_PROTO(struct btree_trans *trans,
1082		 unsigned long caller_ip,
1083		 struct btree_path *path),
1084	TP_ARGS(trans, caller_ip, path),
1085
1086	TP_STRUCT__entry(
1087		__array(char,			trans_fn, 32	)
1088		__field(unsigned long,		caller_ip	)
1089		__field(u8,			btree_id	)
1090		TRACE_BPOS_entries(pos)
1091	),
1092
1093	TP_fast_assign(
1094		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1095		__entry->caller_ip		= caller_ip;
1096		__entry->btree_id		= path->btree_id;
1097		TRACE_BPOS_assign(pos, path->pos)
1098	),
1099
1100	TP_printk("%s %pS btree %s pos %llu:%llu:%u",
1101		  __entry->trans_fn,
1102		  (void *) __entry->caller_ip,
1103		  bch2_btree_id_str(__entry->btree_id),
1104		  __entry->pos_inode,
1105		  __entry->pos_offset,
1106		  __entry->pos_snapshot)
1107);
1108
1109DEFINE_EVENT(transaction_restart_iter,	trans_restart_btree_node_reused,
1110	TP_PROTO(struct btree_trans *trans,
1111		 unsigned long caller_ip,
1112		 struct btree_path *path),
1113	TP_ARGS(trans, caller_ip, path)
1114);
1115
1116DEFINE_EVENT(transaction_restart_iter,	trans_restart_btree_node_split,
1117	TP_PROTO(struct btree_trans *trans,
1118		 unsigned long caller_ip,
1119		 struct btree_path *path),
1120	TP_ARGS(trans, caller_ip, path)
1121);
1122
1123TRACE_EVENT(trans_restart_upgrade,
1124	TP_PROTO(struct btree_trans *trans,
1125		 unsigned long caller_ip,
1126		 struct btree_path *path,
1127		 unsigned old_locks_want,
1128		 unsigned new_locks_want,
1129		 struct get_locks_fail *f),
1130	TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want, f),
1131
1132	TP_STRUCT__entry(
1133		__array(char,			trans_fn, 32	)
1134		__field(unsigned long,		caller_ip	)
1135		__field(u8,			btree_id	)
1136		__field(u8,			old_locks_want	)
1137		__field(u8,			new_locks_want	)
1138		__field(u8,			level		)
1139		__field(u32,			path_seq	)
1140		__field(u32,			node_seq	)
1141		TRACE_BPOS_entries(pos)
1142	),
1143
1144	TP_fast_assign(
1145		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1146		__entry->caller_ip		= caller_ip;
1147		__entry->btree_id		= path->btree_id;
1148		__entry->old_locks_want		= old_locks_want;
1149		__entry->new_locks_want		= new_locks_want;
1150		__entry->level			= f->l;
1151		__entry->path_seq		= path->l[f->l].lock_seq;
1152		__entry->node_seq		= IS_ERR_OR_NULL(f->b) ? 0 : f->b->c.lock.seq;
1153		TRACE_BPOS_assign(pos, path->pos)
1154	),
1155
1156	TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u",
1157		  __entry->trans_fn,
1158		  (void *) __entry->caller_ip,
1159		  bch2_btree_id_str(__entry->btree_id),
1160		  __entry->pos_inode,
1161		  __entry->pos_offset,
1162		  __entry->pos_snapshot,
1163		  __entry->old_locks_want,
1164		  __entry->new_locks_want,
1165		  __entry->level,
1166		  __entry->path_seq,
1167		  __entry->node_seq)
1168);
1169
1170DEFINE_EVENT(trans_str,	trans_restart_relock,
1171	TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
1172	TP_ARGS(trans, caller_ip, str)
1173);
1174
1175DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_next_node,
1176	TP_PROTO(struct btree_trans *trans,
1177		 unsigned long caller_ip,
1178		 struct btree_path *path),
1179	TP_ARGS(trans, caller_ip, path)
1180);
1181
1182DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_parent_for_fill,
1183	TP_PROTO(struct btree_trans *trans,
1184		 unsigned long caller_ip,
1185		 struct btree_path *path),
1186	TP_ARGS(trans, caller_ip, path)
1187);
1188
1189DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_after_fill,
1190	TP_PROTO(struct btree_trans *trans,
1191		 unsigned long caller_ip,
1192		 struct btree_path *path),
1193	TP_ARGS(trans, caller_ip, path)
1194);
1195
1196DEFINE_EVENT(transaction_event,	trans_restart_key_cache_upgrade,
1197	TP_PROTO(struct btree_trans *trans,
1198		 unsigned long caller_ip),
1199	TP_ARGS(trans, caller_ip)
1200);
1201
1202DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_key_cache_fill,
1203	TP_PROTO(struct btree_trans *trans,
1204		 unsigned long caller_ip,
1205		 struct btree_path *path),
1206	TP_ARGS(trans, caller_ip, path)
1207);
1208
1209DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_path,
1210	TP_PROTO(struct btree_trans *trans,
1211		 unsigned long caller_ip,
1212		 struct btree_path *path),
1213	TP_ARGS(trans, caller_ip, path)
1214);
1215
1216DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_path_intent,
1217	TP_PROTO(struct btree_trans *trans,
1218		 unsigned long caller_ip,
1219		 struct btree_path *path),
1220	TP_ARGS(trans, caller_ip, path)
1221);
1222
1223DEFINE_EVENT(transaction_restart_iter,	trans_restart_traverse,
1224	TP_PROTO(struct btree_trans *trans,
1225		 unsigned long caller_ip,
1226		 struct btree_path *path),
1227	TP_ARGS(trans, caller_ip, path)
1228);
1229
1230DEFINE_EVENT(transaction_restart_iter,	trans_restart_memory_allocation_failure,
1231	TP_PROTO(struct btree_trans *trans,
1232		 unsigned long caller_ip,
1233		 struct btree_path *path),
1234	TP_ARGS(trans, caller_ip, path)
1235);
1236
1237DEFINE_EVENT(trans_str_nocaller, trans_restart_would_deadlock,
1238	TP_PROTO(struct btree_trans *trans,
1239		 const char *cycle),
1240	TP_ARGS(trans, cycle)
1241);
1242
1243DEFINE_EVENT(transaction_event,	trans_restart_would_deadlock_recursion_limit,
1244	TP_PROTO(struct btree_trans *trans,
1245		 unsigned long caller_ip),
1246	TP_ARGS(trans, caller_ip)
1247);
1248
1249TRACE_EVENT(trans_restart_would_deadlock_write,
1250	TP_PROTO(struct btree_trans *trans),
1251	TP_ARGS(trans),
1252
1253	TP_STRUCT__entry(
1254		__array(char,			trans_fn, 32	)
1255	),
1256
1257	TP_fast_assign(
1258		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1259	),
1260
1261	TP_printk("%s", __entry->trans_fn)
1262);
1263
1264TRACE_EVENT(trans_restart_mem_realloced,
1265	TP_PROTO(struct btree_trans *trans,
1266		 unsigned long caller_ip,
1267		 unsigned long bytes),
1268	TP_ARGS(trans, caller_ip, bytes),
1269
1270	TP_STRUCT__entry(
1271		__array(char,			trans_fn, 32	)
1272		__field(unsigned long,		caller_ip	)
1273		__field(unsigned long,		bytes		)
1274	),
1275
1276	TP_fast_assign(
1277		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1278		__entry->caller_ip	= caller_ip;
1279		__entry->bytes		= bytes;
1280	),
1281
1282	TP_printk("%s %pS bytes %lu",
1283		  __entry->trans_fn,
1284		  (void *) __entry->caller_ip,
1285		  __entry->bytes)
1286);
1287
1288TRACE_EVENT(trans_restart_key_cache_key_realloced,
1289	TP_PROTO(struct btree_trans *trans,
1290		 unsigned long caller_ip,
1291		 struct btree_path *path,
1292		 unsigned old_u64s,
1293		 unsigned new_u64s),
1294	TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s),
1295
1296	TP_STRUCT__entry(
1297		__array(char,			trans_fn, 32	)
1298		__field(unsigned long,		caller_ip	)
1299		__field(enum btree_id,		btree_id	)
1300		TRACE_BPOS_entries(pos)
1301		__field(u32,			old_u64s	)
1302		__field(u32,			new_u64s	)
1303	),
1304
1305	TP_fast_assign(
1306		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1307		__entry->caller_ip		= caller_ip;
1308
1309		__entry->btree_id	= path->btree_id;
1310		TRACE_BPOS_assign(pos, path->pos);
1311		__entry->old_u64s	= old_u64s;
1312		__entry->new_u64s	= new_u64s;
1313	),
1314
1315	TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
1316		  __entry->trans_fn,
1317		  (void *) __entry->caller_ip,
1318		  bch2_btree_id_str(__entry->btree_id),
1319		  __entry->pos_inode,
1320		  __entry->pos_offset,
1321		  __entry->pos_snapshot,
1322		  __entry->old_u64s,
1323		  __entry->new_u64s)
1324);
1325
1326TRACE_EVENT(path_downgrade,
1327	TP_PROTO(struct btree_trans *trans,
1328		 unsigned long caller_ip,
1329		 struct btree_path *path,
1330		 unsigned old_locks_want),
1331	TP_ARGS(trans, caller_ip, path, old_locks_want),
1332
1333	TP_STRUCT__entry(
1334		__array(char,			trans_fn, 32	)
1335		__field(unsigned long,		caller_ip	)
1336		__field(unsigned,		old_locks_want	)
1337		__field(unsigned,		new_locks_want	)
1338		__field(unsigned,		btree		)
1339		TRACE_BPOS_entries(pos)
1340	),
1341
1342	TP_fast_assign(
1343		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1344		__entry->caller_ip		= caller_ip;
1345		__entry->old_locks_want		= old_locks_want;
1346		__entry->new_locks_want		= path->locks_want;
1347		__entry->btree			= path->btree_id;
1348		TRACE_BPOS_assign(pos, path->pos);
1349	),
1350
1351	TP_printk("%s %pS locks_want %u -> %u %s %llu:%llu:%u",
1352		  __entry->trans_fn,
1353		  (void *) __entry->caller_ip,
1354		  __entry->old_locks_want,
1355		  __entry->new_locks_want,
1356		  bch2_btree_id_str(__entry->btree),
1357		  __entry->pos_inode,
1358		  __entry->pos_offset,
1359		  __entry->pos_snapshot)
1360);
1361
1362DEFINE_EVENT(transaction_event,	trans_restart_write_buffer_flush,
1363	TP_PROTO(struct btree_trans *trans,
1364		 unsigned long caller_ip),
1365	TP_ARGS(trans, caller_ip)
1366);
1367
1368TRACE_EVENT(write_buffer_flush,
1369	TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size),
1370	TP_ARGS(trans, nr, skipped, fast, size),
1371
1372	TP_STRUCT__entry(
1373		__field(size_t,		nr		)
1374		__field(size_t,		skipped		)
1375		__field(size_t,		fast		)
1376		__field(size_t,		size		)
1377	),
1378
1379	TP_fast_assign(
1380		__entry->nr	= nr;
1381		__entry->skipped = skipped;
1382		__entry->fast	= fast;
1383		__entry->size	= size;
1384	),
1385
1386	TP_printk("%zu/%zu skipped %zu fast %zu",
1387		  __entry->nr, __entry->size, __entry->skipped, __entry->fast)
1388);
1389
1390TRACE_EVENT(write_buffer_flush_sync,
1391	TP_PROTO(struct btree_trans *trans, unsigned long caller_ip),
1392	TP_ARGS(trans, caller_ip),
1393
1394	TP_STRUCT__entry(
1395		__array(char,			trans_fn, 32	)
1396		__field(unsigned long,		caller_ip	)
1397	),
1398
1399	TP_fast_assign(
1400		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1401		__entry->caller_ip		= caller_ip;
1402	),
1403
1404	TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
1405);
1406
1407TRACE_EVENT(write_buffer_flush_slowpath,
1408	TP_PROTO(struct btree_trans *trans, size_t slowpath, size_t total),
1409	TP_ARGS(trans, slowpath, total),
1410
1411	TP_STRUCT__entry(
1412		__field(size_t,		slowpath	)
1413		__field(size_t,		total		)
1414	),
1415
1416	TP_fast_assign(
1417		__entry->slowpath	= slowpath;
1418		__entry->total		= total;
1419	),
1420
1421	TP_printk("%zu/%zu", __entry->slowpath, __entry->total)
1422);
1423
1424DEFINE_EVENT(fs_str, rebalance_extent,
1425	TP_PROTO(struct bch_fs *c, const char *str),
1426	TP_ARGS(c, str)
1427);
1428
1429DEFINE_EVENT(fs_str, data_update,
1430	TP_PROTO(struct bch_fs *c, const char *str),
1431	TP_ARGS(c, str)
1432);
1433
1434TRACE_EVENT(error_downcast,
1435	TP_PROTO(int bch_err, int std_err, unsigned long ip),
1436	TP_ARGS(bch_err, std_err, ip),
1437
1438	TP_STRUCT__entry(
1439		__array(char,		bch_err, 32		)
1440		__array(char,		std_err, 32		)
1441		__array(char,		ip, 32			)
1442	),
1443
1444	TP_fast_assign(
1445		strscpy(__entry->bch_err, bch2_err_str(bch_err), sizeof(__entry->bch_err));
1446		strscpy(__entry->std_err, bch2_err_str(std_err), sizeof(__entry->std_err));
1447		snprintf(__entry->ip, sizeof(__entry->ip), "%ps", (void *) ip);
1448	),
1449
1450	TP_printk("%s -> %s %s", __entry->bch_err, __entry->std_err, __entry->ip)
1451);
1452
1453#endif /* _TRACE_BCACHEFS_H */
1454
1455/* This part must be outside protection */
1456#undef TRACE_INCLUDE_PATH
1457#define TRACE_INCLUDE_PATH ../../fs/bcachefs
1458
1459#undef TRACE_INCLUDE_FILE
1460#define TRACE_INCLUDE_FILE trace
1461
1462#include <trace/define_trace.h>
1463