1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BCACHEFS_BTREE_LOCKING_H
3#define _BCACHEFS_BTREE_LOCKING_H
4
5/*
6 * Only for internal btree use:
7 *
8 * The btree iterator tracks what locks it wants to take, and what locks it
9 * currently has - here we have wrappers for locking/unlocking btree nodes and
10 * updating the iterator state
11 */
12
13#include "btree_iter.h"
14#include "six.h"
15
16void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags);
17
18#ifdef CONFIG_LOCKDEP
19void bch2_assert_btree_nodes_not_locked(void);
20#else
21static inline void bch2_assert_btree_nodes_not_locked(void) {}
22#endif
23
24void bch2_trans_unlock_noassert(struct btree_trans *);
25
26static inline bool is_btree_node(struct btree_path *path, unsigned l)
27{
28	return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b);
29}
30
31static inline struct btree_transaction_stats *btree_trans_stats(struct btree_trans *trans)
32{
33	return trans->fn_idx < ARRAY_SIZE(trans->c->btree_transaction_stats)
34		? &trans->c->btree_transaction_stats[trans->fn_idx]
35		: NULL;
36}
37
38/* matches six lock types */
39enum btree_node_locked_type {
40	BTREE_NODE_UNLOCKED		= -1,
41	BTREE_NODE_READ_LOCKED		= SIX_LOCK_read,
42	BTREE_NODE_INTENT_LOCKED	= SIX_LOCK_intent,
43	BTREE_NODE_WRITE_LOCKED		= SIX_LOCK_write,
44};
45
46static inline int btree_node_locked_type(struct btree_path *path,
47					 unsigned level)
48{
49	return BTREE_NODE_UNLOCKED + ((path->nodes_locked >> (level << 1)) & 3);
50}
51
52static inline bool btree_node_write_locked(struct btree_path *path, unsigned l)
53{
54	return btree_node_locked_type(path, l) == BTREE_NODE_WRITE_LOCKED;
55}
56
57static inline bool btree_node_intent_locked(struct btree_path *path, unsigned l)
58{
59	return btree_node_locked_type(path, l) == BTREE_NODE_INTENT_LOCKED;
60}
61
62static inline bool btree_node_read_locked(struct btree_path *path, unsigned l)
63{
64	return btree_node_locked_type(path, l) == BTREE_NODE_READ_LOCKED;
65}
66
67static inline bool btree_node_locked(struct btree_path *path, unsigned level)
68{
69	return btree_node_locked_type(path, level) != BTREE_NODE_UNLOCKED;
70}
71
72static inline void mark_btree_node_locked_noreset(struct btree_path *path,
73						  unsigned level,
74						  enum btree_node_locked_type type)
75{
76	/* relying on this to avoid a branch */
77	BUILD_BUG_ON(SIX_LOCK_read   != 0);
78	BUILD_BUG_ON(SIX_LOCK_intent != 1);
79
80	path->nodes_locked &= ~(3U << (level << 1));
81	path->nodes_locked |= (type + 1) << (level << 1);
82}
83
84static inline void mark_btree_node_unlocked(struct btree_path *path,
85					    unsigned level)
86{
87	EBUG_ON(btree_node_write_locked(path, level));
88	mark_btree_node_locked_noreset(path, level, BTREE_NODE_UNLOCKED);
89}
90
91static inline void mark_btree_node_locked(struct btree_trans *trans,
92					  struct btree_path *path,
93					  unsigned level,
94					  enum btree_node_locked_type type)
95{
96	mark_btree_node_locked_noreset(path, level, (enum btree_node_locked_type) type);
97#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
98	path->l[level].lock_taken_time = local_clock();
99#endif
100}
101
102static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level)
103{
104	return level < path->locks_want
105		? SIX_LOCK_intent
106		: SIX_LOCK_read;
107}
108
109static inline enum btree_node_locked_type
110btree_lock_want(struct btree_path *path, int level)
111{
112	if (level < path->level)
113		return BTREE_NODE_UNLOCKED;
114	if (level < path->locks_want)
115		return BTREE_NODE_INTENT_LOCKED;
116	if (level == path->level)
117		return BTREE_NODE_READ_LOCKED;
118	return BTREE_NODE_UNLOCKED;
119}
120
121static void btree_trans_lock_hold_time_update(struct btree_trans *trans,
122					      struct btree_path *path, unsigned level)
123{
124#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
125	__bch2_time_stats_update(&btree_trans_stats(trans)->lock_hold_times,
126				 path->l[level].lock_taken_time,
127				 local_clock());
128#endif
129}
130
131/* unlock: */
132
133static inline void btree_node_unlock(struct btree_trans *trans,
134				     struct btree_path *path, unsigned level)
135{
136	int lock_type = btree_node_locked_type(path, level);
137
138	EBUG_ON(level >= BTREE_MAX_DEPTH);
139
140	if (lock_type != BTREE_NODE_UNLOCKED) {
141		six_unlock_type(&path->l[level].b->c.lock, lock_type);
142		btree_trans_lock_hold_time_update(trans, path, level);
143	}
144	mark_btree_node_unlocked(path, level);
145}
146
147static inline int btree_path_lowest_level_locked(struct btree_path *path)
148{
149	return __ffs(path->nodes_locked) >> 1;
150}
151
152static inline int btree_path_highest_level_locked(struct btree_path *path)
153{
154	return __fls(path->nodes_locked) >> 1;
155}
156
157static inline void __bch2_btree_path_unlock(struct btree_trans *trans,
158					    struct btree_path *path)
159{
160	btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK);
161
162	while (path->nodes_locked)
163		btree_node_unlock(trans, path, btree_path_lowest_level_locked(path));
164}
165
166/*
167 * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
168 * succeed:
169 */
170static inline void
171bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
172				     struct btree *b)
173{
174	struct btree_path *linked;
175	unsigned i;
176
177	EBUG_ON(path->l[b->c.level].b != b);
178	EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock));
179	EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write);
180
181	mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
182
183	trans_for_each_path_with_node(trans, b, linked, i)
184		linked->l[b->c.level].lock_seq++;
185
186	six_unlock_write(&b->c.lock);
187}
188
189void bch2_btree_node_unlock_write(struct btree_trans *,
190			struct btree_path *, struct btree *);
191
192int bch2_six_check_for_deadlock(struct six_lock *lock, void *p);
193
194/* lock: */
195
196static inline int __btree_node_lock_nopath(struct btree_trans *trans,
197					 struct btree_bkey_cached_common *b,
198					 enum six_lock_type type,
199					 bool lock_may_not_fail,
200					 unsigned long ip)
201{
202	int ret;
203
204	trans->lock_may_not_fail = lock_may_not_fail;
205	trans->lock_must_abort	= false;
206	trans->locking		= b;
207
208	ret = six_lock_ip_waiter(&b->lock, type, &trans->locking_wait,
209				 bch2_six_check_for_deadlock, trans, ip);
210	WRITE_ONCE(trans->locking, NULL);
211	WRITE_ONCE(trans->locking_wait.start_time, 0);
212	return ret;
213}
214
215static inline int __must_check
216btree_node_lock_nopath(struct btree_trans *trans,
217		       struct btree_bkey_cached_common *b,
218		       enum six_lock_type type,
219		       unsigned long ip)
220{
221	return __btree_node_lock_nopath(trans, b, type, false, ip);
222}
223
224static inline void btree_node_lock_nopath_nofail(struct btree_trans *trans,
225					 struct btree_bkey_cached_common *b,
226					 enum six_lock_type type)
227{
228	int ret = __btree_node_lock_nopath(trans, b, type, true, _THIS_IP_);
229
230	BUG_ON(ret);
231}
232
233/*
234 * Lock a btree node if we already have it locked on one of our linked
235 * iterators:
236 */
237static inline bool btree_node_lock_increment(struct btree_trans *trans,
238					     struct btree_bkey_cached_common *b,
239					     unsigned level,
240					     enum btree_node_locked_type want)
241{
242	struct btree_path *path;
243	unsigned i;
244
245	trans_for_each_path(trans, path, i)
246		if (&path->l[level].b->c == b &&
247		    btree_node_locked_type(path, level) >= want) {
248			six_lock_increment(&b->lock, (enum six_lock_type) want);
249			return true;
250		}
251
252	return false;
253}
254
255static inline int btree_node_lock(struct btree_trans *trans,
256			struct btree_path *path,
257			struct btree_bkey_cached_common *b,
258			unsigned level,
259			enum six_lock_type type,
260			unsigned long ip)
261{
262	int ret = 0;
263
264	EBUG_ON(level >= BTREE_MAX_DEPTH);
265
266	if (likely(six_trylock_type(&b->lock, type)) ||
267	    btree_node_lock_increment(trans, b, level, (enum btree_node_locked_type) type) ||
268	    !(ret = btree_node_lock_nopath(trans, b, type, btree_path_ip_allocated(path)))) {
269#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
270		path->l[b->level].lock_taken_time = local_clock();
271#endif
272	}
273
274	return ret;
275}
276
277int __bch2_btree_node_lock_write(struct btree_trans *, struct btree_path *,
278				 struct btree_bkey_cached_common *b, bool);
279
280static inline int __btree_node_lock_write(struct btree_trans *trans,
281					  struct btree_path *path,
282					  struct btree_bkey_cached_common *b,
283					  bool lock_may_not_fail)
284{
285	EBUG_ON(&path->l[b->level].b->c != b);
286	EBUG_ON(path->l[b->level].lock_seq != six_lock_seq(&b->lock));
287	EBUG_ON(!btree_node_intent_locked(path, b->level));
288
289	/*
290	 * six locks are unfair, and read locks block while a thread wants a
291	 * write lock: thus, we need to tell the cycle detector we have a write
292	 * lock _before_ taking the lock:
293	 */
294	mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_WRITE_LOCKED);
295
296	return likely(six_trylock_write(&b->lock))
297		? 0
298		: __bch2_btree_node_lock_write(trans, path, b, lock_may_not_fail);
299}
300
301static inline int __must_check
302bch2_btree_node_lock_write(struct btree_trans *trans,
303			   struct btree_path *path,
304			   struct btree_bkey_cached_common *b)
305{
306	return __btree_node_lock_write(trans, path, b, false);
307}
308
309void bch2_btree_node_lock_write_nofail(struct btree_trans *,
310				       struct btree_path *,
311				       struct btree_bkey_cached_common *);
312
313/* relock: */
314
315bool bch2_btree_path_relock_norestart(struct btree_trans *, struct btree_path *);
316int __bch2_btree_path_relock(struct btree_trans *,
317			     struct btree_path *, unsigned long);
318
319static inline int bch2_btree_path_relock(struct btree_trans *trans,
320				struct btree_path *path, unsigned long trace_ip)
321{
322	return btree_node_locked(path, path->level)
323		? 0
324		: __bch2_btree_path_relock(trans, path, trace_ip);
325}
326
327bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned, bool trace);
328
329static inline bool bch2_btree_node_relock(struct btree_trans *trans,
330					  struct btree_path *path, unsigned level)
331{
332	EBUG_ON(btree_node_locked(path, level) &&
333		!btree_node_write_locked(path, level) &&
334		btree_node_locked_type(path, level) != __btree_lock_want(path, level));
335
336	return likely(btree_node_locked(path, level)) ||
337		(!IS_ERR_OR_NULL(path->l[level].b) &&
338		 __bch2_btree_node_relock(trans, path, level, true));
339}
340
341static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans,
342						  struct btree_path *path, unsigned level)
343{
344	EBUG_ON(btree_node_locked(path, level) &&
345		!btree_node_write_locked(path, level) &&
346		btree_node_locked_type(path, level) != __btree_lock_want(path, level));
347
348	return likely(btree_node_locked(path, level)) ||
349		(!IS_ERR_OR_NULL(path->l[level].b) &&
350		 __bch2_btree_node_relock(trans, path, level, false));
351}
352
353/* upgrade */
354
355bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *,
356			       struct btree_path *, unsigned,
357			       struct get_locks_fail *);
358
359bool __bch2_btree_path_upgrade(struct btree_trans *,
360			       struct btree_path *, unsigned,
361			       struct get_locks_fail *);
362
363static inline int bch2_btree_path_upgrade(struct btree_trans *trans,
364					  struct btree_path *path,
365					  unsigned new_locks_want)
366{
367	struct get_locks_fail f;
368	unsigned old_locks_want = path->locks_want;
369
370	new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
371
372	if (path->locks_want < new_locks_want
373	    ? __bch2_btree_path_upgrade(trans, path, new_locks_want, &f)
374	    : path->uptodate == BTREE_ITER_UPTODATE)
375		return 0;
376
377	trace_and_count(trans->c, trans_restart_upgrade, trans, _THIS_IP_, path,
378			old_locks_want, new_locks_want, &f);
379	return btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
380}
381
382/* misc: */
383
384static inline void btree_path_set_should_be_locked(struct btree_path *path)
385{
386	EBUG_ON(!btree_node_locked(path, path->level));
387	EBUG_ON(path->uptodate);
388
389	path->should_be_locked = true;
390}
391
392static inline void __btree_path_set_level_up(struct btree_trans *trans,
393				      struct btree_path *path,
394				      unsigned l)
395{
396	btree_node_unlock(trans, path, l);
397	path->l[l].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
398}
399
400static inline void btree_path_set_level_up(struct btree_trans *trans,
401				    struct btree_path *path)
402{
403	__btree_path_set_level_up(trans, path, path->level++);
404	btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
405}
406
407/* debug */
408
409struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *,
410				struct btree_path *,
411				struct btree_bkey_cached_common *b,
412				unsigned);
413
414int bch2_check_for_deadlock(struct btree_trans *, struct printbuf *);
415
416#ifdef CONFIG_BCACHEFS_DEBUG
417void bch2_btree_path_verify_locks(struct btree_path *);
418void bch2_trans_verify_locks(struct btree_trans *);
419#else
420static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
421static inline void bch2_trans_verify_locks(struct btree_trans *trans) {}
422#endif
423
424#endif /* _BCACHEFS_BTREE_LOCKING_H */
425