1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BCACHEFS_EXTENTS_H
3#define _BCACHEFS_EXTENTS_H
4
5#include "bcachefs.h"
6#include "bkey.h"
7#include "extents_types.h"
8
9struct bch_fs;
10struct btree_trans;
11enum bkey_invalid_flags;
12
13/* extent entries: */
14
15#define extent_entry_last(_e)						\
16	((typeof(&(_e).v->start[0])) bkey_val_end(_e))
17
18#define entry_to_ptr(_entry)						\
19({									\
20	EBUG_ON((_entry) && !extent_entry_is_ptr(_entry));		\
21									\
22	__builtin_choose_expr(						\
23		type_is_exact(_entry, const union bch_extent_entry *),	\
24		(const struct bch_extent_ptr *) (_entry),		\
25		(struct bch_extent_ptr *) (_entry));			\
26})
27
28/* downcast, preserves const */
29#define to_entry(_entry)						\
30({									\
31	BUILD_BUG_ON(!type_is(_entry, union bch_extent_crc *) &&	\
32		     !type_is(_entry, struct bch_extent_ptr *) &&	\
33		     !type_is(_entry, struct bch_extent_stripe_ptr *));	\
34									\
35	__builtin_choose_expr(						\
36		(type_is_exact(_entry, const union bch_extent_crc *) ||	\
37		 type_is_exact(_entry, const struct bch_extent_ptr *) ||\
38		 type_is_exact(_entry, const struct bch_extent_stripe_ptr *)),\
39		(const union bch_extent_entry *) (_entry),		\
40		(union bch_extent_entry *) (_entry));			\
41})
42
43#define extent_entry_next(_entry)					\
44	((typeof(_entry)) ((void *) (_entry) + extent_entry_bytes(_entry)))
45
46#define extent_entry_next_safe(_entry, _end)				\
47	(likely(__extent_entry_type(_entry) < BCH_EXTENT_ENTRY_MAX)	\
48	 ? extent_entry_next(_entry)					\
49	 : _end)
50
51static inline unsigned
52__extent_entry_type(const union bch_extent_entry *e)
53{
54	return e->type ? __ffs(e->type) : BCH_EXTENT_ENTRY_MAX;
55}
56
57static inline enum bch_extent_entry_type
58extent_entry_type(const union bch_extent_entry *e)
59{
60	int ret = __ffs(e->type);
61
62	EBUG_ON(ret < 0 || ret >= BCH_EXTENT_ENTRY_MAX);
63
64	return ret;
65}
66
67static inline size_t extent_entry_bytes(const union bch_extent_entry *entry)
68{
69	switch (extent_entry_type(entry)) {
70#define x(f, n)						\
71	case BCH_EXTENT_ENTRY_##f:			\
72		return sizeof(struct bch_extent_##f);
73	BCH_EXTENT_ENTRY_TYPES()
74#undef x
75	default:
76		BUG();
77	}
78}
79
80static inline size_t extent_entry_u64s(const union bch_extent_entry *entry)
81{
82	return extent_entry_bytes(entry) / sizeof(u64);
83}
84
85static inline void __extent_entry_insert(struct bkey_i *k,
86					 union bch_extent_entry *dst,
87					 union bch_extent_entry *new)
88{
89	union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
90
91	memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new),
92			      dst, (u64 *) end - (u64 *) dst);
93	k->k.u64s += extent_entry_u64s(new);
94	memcpy_u64s_small(dst, new, extent_entry_u64s(new));
95}
96
97static inline void extent_entry_drop(struct bkey_s k, union bch_extent_entry *entry)
98{
99	union bch_extent_entry *next = extent_entry_next(entry);
100
101	/* stripes have ptrs, but their layout doesn't work with this code */
102	BUG_ON(k.k->type == KEY_TYPE_stripe);
103
104	memmove_u64s_down(entry, next,
105			  (u64 *) bkey_val_end(k) - (u64 *) next);
106	k.k->u64s -= (u64 *) next - (u64 *) entry;
107}
108
109static inline bool extent_entry_is_ptr(const union bch_extent_entry *e)
110{
111	return __extent_entry_type(e) == BCH_EXTENT_ENTRY_ptr;
112}
113
114static inline bool extent_entry_is_stripe_ptr(const union bch_extent_entry *e)
115{
116	return __extent_entry_type(e) == BCH_EXTENT_ENTRY_stripe_ptr;
117}
118
119static inline bool extent_entry_is_crc(const union bch_extent_entry *e)
120{
121	switch (__extent_entry_type(e)) {
122	case BCH_EXTENT_ENTRY_crc32:
123	case BCH_EXTENT_ENTRY_crc64:
124	case BCH_EXTENT_ENTRY_crc128:
125		return true;
126	default:
127		return false;
128	}
129}
130
131union bch_extent_crc {
132	u8				type;
133	struct bch_extent_crc32		crc32;
134	struct bch_extent_crc64		crc64;
135	struct bch_extent_crc128	crc128;
136};
137
138#define __entry_to_crc(_entry)						\
139	__builtin_choose_expr(						\
140		type_is_exact(_entry, const union bch_extent_entry *),	\
141		(const union bch_extent_crc *) (_entry),		\
142		(union bch_extent_crc *) (_entry))
143
144#define entry_to_crc(_entry)						\
145({									\
146	EBUG_ON((_entry) && !extent_entry_is_crc(_entry));		\
147									\
148	__entry_to_crc(_entry);						\
149})
150
151static inline struct bch_extent_crc_unpacked
152bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
153{
154#define common_fields(_crc)						\
155		.csum_type		= _crc.csum_type,		\
156		.compression_type	= _crc.compression_type,	\
157		.compressed_size	= _crc._compressed_size + 1,	\
158		.uncompressed_size	= _crc._uncompressed_size + 1,	\
159		.offset			= _crc.offset,			\
160		.live_size		= k->size
161
162	if (!crc)
163		return (struct bch_extent_crc_unpacked) {
164			.compressed_size	= k->size,
165			.uncompressed_size	= k->size,
166			.live_size		= k->size,
167		};
168
169	switch (extent_entry_type(to_entry(crc))) {
170	case BCH_EXTENT_ENTRY_crc32: {
171		struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
172			common_fields(crc->crc32),
173		};
174
175		*((__le32 *) &ret.csum.lo) = (__le32 __force) crc->crc32.csum;
176		return ret;
177	}
178	case BCH_EXTENT_ENTRY_crc64: {
179		struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
180			common_fields(crc->crc64),
181			.nonce			= crc->crc64.nonce,
182			.csum.lo		= (__force __le64) crc->crc64.csum_lo,
183		};
184
185		*((__le16 *) &ret.csum.hi) = (__le16 __force) crc->crc64.csum_hi;
186
187		return ret;
188	}
189	case BCH_EXTENT_ENTRY_crc128: {
190		struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
191			common_fields(crc->crc128),
192			.nonce			= crc->crc128.nonce,
193			.csum			= crc->crc128.csum,
194		};
195
196		return ret;
197	}
198	default:
199		BUG();
200	}
201#undef common_fields
202}
203
204static inline bool crc_is_compressed(struct bch_extent_crc_unpacked crc)
205{
206	return (crc.compression_type != BCH_COMPRESSION_TYPE_none &&
207		crc.compression_type != BCH_COMPRESSION_TYPE_incompressible);
208}
209
210static inline bool crc_is_encoded(struct bch_extent_crc_unpacked crc)
211{
212	return crc.csum_type != BCH_CSUM_none || crc_is_compressed(crc);
213}
214
215/* bkey_ptrs: generically over any key type that has ptrs */
216
217struct bkey_ptrs_c {
218	const union bch_extent_entry	*start;
219	const union bch_extent_entry	*end;
220};
221
222struct bkey_ptrs {
223	union bch_extent_entry	*start;
224	union bch_extent_entry	*end;
225};
226
227static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
228{
229	switch (k.k->type) {
230	case KEY_TYPE_btree_ptr: {
231		struct bkey_s_c_btree_ptr e = bkey_s_c_to_btree_ptr(k);
232
233		return (struct bkey_ptrs_c) {
234			to_entry(&e.v->start[0]),
235			to_entry(extent_entry_last(e))
236		};
237	}
238	case KEY_TYPE_extent: {
239		struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
240
241		return (struct bkey_ptrs_c) {
242			e.v->start,
243			extent_entry_last(e)
244		};
245	}
246	case KEY_TYPE_stripe: {
247		struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
248
249		return (struct bkey_ptrs_c) {
250			to_entry(&s.v->ptrs[0]),
251			to_entry(&s.v->ptrs[s.v->nr_blocks]),
252		};
253	}
254	case KEY_TYPE_reflink_v: {
255		struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k);
256
257		return (struct bkey_ptrs_c) {
258			r.v->start,
259			bkey_val_end(r),
260		};
261	}
262	case KEY_TYPE_btree_ptr_v2: {
263		struct bkey_s_c_btree_ptr_v2 e = bkey_s_c_to_btree_ptr_v2(k);
264
265		return (struct bkey_ptrs_c) {
266			to_entry(&e.v->start[0]),
267			to_entry(extent_entry_last(e))
268		};
269	}
270	default:
271		return (struct bkey_ptrs_c) { NULL, NULL };
272	}
273}
274
275static inline struct bkey_ptrs bch2_bkey_ptrs(struct bkey_s k)
276{
277	struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k.s_c);
278
279	return (struct bkey_ptrs) {
280		(void *) p.start,
281		(void *) p.end
282	};
283}
284
285#define __bkey_extent_entry_for_each_from(_start, _end, _entry)		\
286	for ((_entry) = (_start);					\
287	     (_entry) < (_end);						\
288	     (_entry) = extent_entry_next_safe(_entry, _end))
289
290#define __bkey_ptr_next(_ptr, _end)					\
291({									\
292	typeof(_end) _entry;						\
293									\
294	__bkey_extent_entry_for_each_from(to_entry(_ptr), _end, _entry)	\
295		if (extent_entry_is_ptr(_entry))			\
296			break;						\
297									\
298	_entry < (_end) ? entry_to_ptr(_entry) : NULL;			\
299})
300
301#define bkey_extent_entry_for_each_from(_p, _entry, _start)		\
302	__bkey_extent_entry_for_each_from(_start, (_p).end, _entry)
303
304#define bkey_extent_entry_for_each(_p, _entry)				\
305	bkey_extent_entry_for_each_from(_p, _entry, _p.start)
306
307#define __bkey_for_each_ptr(_start, _end, _ptr)				\
308	for (typeof(_start) (_ptr) = (_start);				\
309	     ((_ptr) = __bkey_ptr_next(_ptr, _end));			\
310	     (_ptr)++)
311
312#define bkey_ptr_next(_p, _ptr)						\
313	__bkey_ptr_next(_ptr, (_p).end)
314
315#define bkey_for_each_ptr(_p, _ptr)					\
316	__bkey_for_each_ptr(&(_p).start->ptr, (_p).end, _ptr)
317
318#define __bkey_ptr_next_decode(_k, _end, _ptr, _entry)			\
319({									\
320	__label__ out;							\
321									\
322	(_ptr).idx	= 0;						\
323	(_ptr).has_ec	= false;					\
324									\
325	__bkey_extent_entry_for_each_from(_entry, _end, _entry)		\
326		switch (__extent_entry_type(_entry)) {			\
327		case BCH_EXTENT_ENTRY_ptr:				\
328			(_ptr).ptr		= _entry->ptr;		\
329			goto out;					\
330		case BCH_EXTENT_ENTRY_crc32:				\
331		case BCH_EXTENT_ENTRY_crc64:				\
332		case BCH_EXTENT_ENTRY_crc128:				\
333			(_ptr).crc = bch2_extent_crc_unpack(_k,		\
334					entry_to_crc(_entry));		\
335			break;						\
336		case BCH_EXTENT_ENTRY_stripe_ptr:			\
337			(_ptr).ec = _entry->stripe_ptr;			\
338			(_ptr).has_ec	= true;				\
339			break;						\
340		default:						\
341			/* nothing */					\
342			break;						\
343		}							\
344out:									\
345	_entry < (_end);						\
346})
347
348#define __bkey_for_each_ptr_decode(_k, _start, _end, _ptr, _entry)	\
349	for ((_ptr).crc = bch2_extent_crc_unpack(_k, NULL),		\
350	     (_entry) = _start;						\
351	     __bkey_ptr_next_decode(_k, _end, _ptr, _entry);		\
352	     (_entry) = extent_entry_next_safe(_entry, _end))
353
354#define bkey_for_each_ptr_decode(_k, _p, _ptr, _entry)			\
355	__bkey_for_each_ptr_decode(_k, (_p).start, (_p).end,		\
356				   _ptr, _entry)
357
358#define bkey_crc_next(_k, _start, _end, _crc, _iter)			\
359({									\
360	__bkey_extent_entry_for_each_from(_iter, _end, _iter)		\
361		if (extent_entry_is_crc(_iter)) {			\
362			(_crc) = bch2_extent_crc_unpack(_k,		\
363						entry_to_crc(_iter));	\
364			break;						\
365		}							\
366									\
367	(_iter) < (_end);						\
368})
369
370#define __bkey_for_each_crc(_k, _start, _end, _crc, _iter)		\
371	for ((_crc) = bch2_extent_crc_unpack(_k, NULL),			\
372	     (_iter) = (_start);					\
373	     bkey_crc_next(_k, _start, _end, _crc, _iter);		\
374	     (_iter) = extent_entry_next(_iter))
375
376#define bkey_for_each_crc(_k, _p, _crc, _iter)				\
377	__bkey_for_each_crc(_k, (_p).start, (_p).end, _crc, _iter)
378
379/* Iterate over pointers in KEY_TYPE_extent: */
380
381#define extent_for_each_entry_from(_e, _entry, _start)			\
382	__bkey_extent_entry_for_each_from(_start,			\
383				extent_entry_last(_e), _entry)
384
385#define extent_for_each_entry(_e, _entry)				\
386	extent_for_each_entry_from(_e, _entry, (_e).v->start)
387
388#define extent_ptr_next(_e, _ptr)					\
389	__bkey_ptr_next(_ptr, extent_entry_last(_e))
390
391#define extent_for_each_ptr(_e, _ptr)					\
392	__bkey_for_each_ptr(&(_e).v->start->ptr, extent_entry_last(_e), _ptr)
393
394#define extent_for_each_ptr_decode(_e, _ptr, _entry)			\
395	__bkey_for_each_ptr_decode((_e).k, (_e).v->start,		\
396				   extent_entry_last(_e), _ptr, _entry)
397
398/* utility code common to all keys with pointers: */
399
400void bch2_mark_io_failure(struct bch_io_failures *,
401			  struct extent_ptr_decoded *);
402int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c,
403			       struct bch_io_failures *,
404			       struct extent_ptr_decoded *);
405
406/* KEY_TYPE_btree_ptr: */
407
408int bch2_btree_ptr_invalid(struct bch_fs *, struct bkey_s_c,
409			   enum bkey_invalid_flags, struct printbuf *);
410void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *,
411			    struct bkey_s_c);
412
413int bch2_btree_ptr_v2_invalid(struct bch_fs *, struct bkey_s_c,
414			      enum bkey_invalid_flags, struct printbuf *);
415void bch2_btree_ptr_v2_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
416void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
417			      int, struct bkey_s);
418
419#define bch2_bkey_ops_btree_ptr ((struct bkey_ops) {		\
420	.key_invalid	= bch2_btree_ptr_invalid,		\
421	.val_to_text	= bch2_btree_ptr_to_text,		\
422	.swab		= bch2_ptr_swab,			\
423	.trigger	= bch2_trigger_extent,			\
424})
425
426#define bch2_bkey_ops_btree_ptr_v2 ((struct bkey_ops) {		\
427	.key_invalid	= bch2_btree_ptr_v2_invalid,		\
428	.val_to_text	= bch2_btree_ptr_v2_to_text,		\
429	.swab		= bch2_ptr_swab,			\
430	.compat		= bch2_btree_ptr_v2_compat,		\
431	.trigger	= bch2_trigger_extent,			\
432	.min_val_size	= 40,					\
433})
434
435/* KEY_TYPE_extent: */
436
437bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
438
439#define bch2_bkey_ops_extent ((struct bkey_ops) {		\
440	.key_invalid	= bch2_bkey_ptrs_invalid,		\
441	.val_to_text	= bch2_bkey_ptrs_to_text,		\
442	.swab		= bch2_ptr_swab,			\
443	.key_normalize	= bch2_extent_normalize,		\
444	.key_merge	= bch2_extent_merge,			\
445	.trigger	= bch2_trigger_extent,			\
446})
447
448/* KEY_TYPE_reservation: */
449
450int bch2_reservation_invalid(struct bch_fs *, struct bkey_s_c,
451			     enum bkey_invalid_flags, struct printbuf *);
452void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
453bool bch2_reservation_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
454
455#define bch2_bkey_ops_reservation ((struct bkey_ops) {		\
456	.key_invalid	= bch2_reservation_invalid,		\
457	.val_to_text	= bch2_reservation_to_text,		\
458	.key_merge	= bch2_reservation_merge,		\
459	.trigger	= bch2_trigger_reservation,		\
460	.min_val_size	= 8,					\
461})
462
463/* Extent checksum entries: */
464
465bool bch2_can_narrow_extent_crcs(struct bkey_s_c,
466				 struct bch_extent_crc_unpacked);
467bool bch2_bkey_narrow_crcs(struct bkey_i *, struct bch_extent_crc_unpacked);
468void bch2_extent_crc_append(struct bkey_i *,
469			    struct bch_extent_crc_unpacked);
470
471/* Generic code for keys with pointers: */
472
473static inline bool bkey_is_btree_ptr(const struct bkey *k)
474{
475	switch (k->type) {
476	case KEY_TYPE_btree_ptr:
477	case KEY_TYPE_btree_ptr_v2:
478		return true;
479	default:
480		return false;
481	}
482}
483
484static inline bool bkey_extent_is_direct_data(const struct bkey *k)
485{
486	switch (k->type) {
487	case KEY_TYPE_btree_ptr:
488	case KEY_TYPE_btree_ptr_v2:
489	case KEY_TYPE_extent:
490	case KEY_TYPE_reflink_v:
491		return true;
492	default:
493		return false;
494	}
495}
496
497static inline bool bkey_extent_is_inline_data(const struct bkey *k)
498{
499	return  k->type == KEY_TYPE_inline_data ||
500		k->type == KEY_TYPE_indirect_inline_data;
501}
502
503static inline unsigned bkey_inline_data_offset(const struct bkey *k)
504{
505	switch (k->type) {
506	case KEY_TYPE_inline_data:
507		return sizeof(struct bch_inline_data);
508	case KEY_TYPE_indirect_inline_data:
509		return sizeof(struct bch_indirect_inline_data);
510	default:
511		BUG();
512	}
513}
514
515static inline unsigned bkey_inline_data_bytes(const struct bkey *k)
516{
517	return bkey_val_bytes(k) - bkey_inline_data_offset(k);
518}
519
520#define bkey_inline_data_p(_k)	(((void *) (_k).v) + bkey_inline_data_offset((_k).k))
521
522static inline bool bkey_extent_is_data(const struct bkey *k)
523{
524	return  bkey_extent_is_direct_data(k) ||
525		bkey_extent_is_inline_data(k) ||
526		k->type == KEY_TYPE_reflink_p;
527}
528
529/*
530 * Should extent be counted under inode->i_sectors?
531 */
532static inline bool bkey_extent_is_allocation(const struct bkey *k)
533{
534	switch (k->type) {
535	case KEY_TYPE_extent:
536	case KEY_TYPE_reservation:
537	case KEY_TYPE_reflink_p:
538	case KEY_TYPE_reflink_v:
539	case KEY_TYPE_inline_data:
540	case KEY_TYPE_indirect_inline_data:
541	case KEY_TYPE_error:
542		return true;
543	default:
544		return false;
545	}
546}
547
548static inline bool bkey_extent_is_unwritten(struct bkey_s_c k)
549{
550	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
551
552	bkey_for_each_ptr(ptrs, ptr)
553		if (ptr->unwritten)
554			return true;
555	return false;
556}
557
558static inline bool bkey_extent_is_reservation(struct bkey_s_c k)
559{
560	return k.k->type == KEY_TYPE_reservation ||
561		bkey_extent_is_unwritten(k);
562}
563
564static inline struct bch_devs_list bch2_bkey_devs(struct bkey_s_c k)
565{
566	struct bch_devs_list ret = (struct bch_devs_list) { 0 };
567	struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
568
569	bkey_for_each_ptr(p, ptr)
570		ret.data[ret.nr++] = ptr->dev;
571
572	return ret;
573}
574
575static inline struct bch_devs_list bch2_bkey_dirty_devs(struct bkey_s_c k)
576{
577	struct bch_devs_list ret = (struct bch_devs_list) { 0 };
578	struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
579
580	bkey_for_each_ptr(p, ptr)
581		if (!ptr->cached)
582			ret.data[ret.nr++] = ptr->dev;
583
584	return ret;
585}
586
587static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
588{
589	struct bch_devs_list ret = (struct bch_devs_list) { 0 };
590	struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
591
592	bkey_for_each_ptr(p, ptr)
593		if (ptr->cached)
594			ret.data[ret.nr++] = ptr->dev;
595
596	return ret;
597}
598
599unsigned bch2_bkey_nr_ptrs(struct bkey_s_c);
600unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c);
601unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c);
602bool bch2_bkey_is_incompressible(struct bkey_s_c);
603unsigned bch2_bkey_sectors_compressed(struct bkey_s_c);
604
605unsigned bch2_bkey_replicas(struct bch_fs *, struct bkey_s_c);
606unsigned bch2_extent_ptr_desired_durability(struct bch_fs *, struct extent_ptr_decoded *);
607unsigned bch2_extent_ptr_durability(struct bch_fs *, struct extent_ptr_decoded *);
608unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
609
610void bch2_bkey_drop_device(struct bkey_s, unsigned);
611void bch2_bkey_drop_device_noerror(struct bkey_s, unsigned);
612
613const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c, unsigned);
614
615static inline struct bch_extent_ptr *bch2_bkey_has_device(struct bkey_s k, unsigned dev)
616{
617	return (void *) bch2_bkey_has_device_c(k.s_c, dev);
618}
619
620bool bch2_bkey_has_target(struct bch_fs *, struct bkey_s_c, unsigned);
621
622void bch2_bkey_extent_entry_drop(struct bkey_i *, union bch_extent_entry *);
623
624static inline void bch2_bkey_append_ptr(struct bkey_i *k, struct bch_extent_ptr ptr)
625{
626	struct bch_extent_ptr *dest;
627
628	EBUG_ON(bch2_bkey_has_device(bkey_i_to_s(k), ptr.dev));
629
630	switch (k->k.type) {
631	case KEY_TYPE_btree_ptr:
632	case KEY_TYPE_btree_ptr_v2:
633	case KEY_TYPE_extent:
634		EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
635
636		ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
637		dest = (struct bch_extent_ptr *)((void *) &k->v + bkey_val_bytes(&k->k));
638		*dest = ptr;
639		k->k.u64s++;
640		break;
641	default:
642		BUG();
643	}
644}
645
646void bch2_extent_ptr_decoded_append(struct bkey_i *,
647				    struct extent_ptr_decoded *);
648union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s,
649						   struct bch_extent_ptr *);
650union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s,
651					   struct bch_extent_ptr *);
652
653#define bch2_bkey_drop_ptrs(_k, _ptr, _cond)				\
654do {									\
655	struct bkey_ptrs _ptrs = bch2_bkey_ptrs(_k);			\
656									\
657	_ptr = &_ptrs.start->ptr;					\
658									\
659	while ((_ptr = bkey_ptr_next(_ptrs, _ptr))) {			\
660		if (_cond) {						\
661			_ptr = (void *) bch2_bkey_drop_ptr(_k, _ptr);	\
662			_ptrs = bch2_bkey_ptrs(_k);			\
663			continue;					\
664		}							\
665									\
666		(_ptr)++;						\
667	}								\
668} while (0)
669
670bool bch2_bkey_matches_ptr(struct bch_fs *, struct bkey_s_c,
671			   struct bch_extent_ptr, u64);
672bool bch2_extents_match(struct bkey_s_c, struct bkey_s_c);
673struct bch_extent_ptr *
674bch2_extent_has_ptr(struct bkey_s_c, struct extent_ptr_decoded, struct bkey_s);
675
676void bch2_extent_ptr_set_cached(struct bkey_s, struct bch_extent_ptr *);
677
678bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
679void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *, const struct bch_extent_ptr *);
680void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
681			    struct bkey_s_c);
682int bch2_bkey_ptrs_invalid(struct bch_fs *, struct bkey_s_c,
683			   enum bkey_invalid_flags, struct printbuf *);
684
685void bch2_ptr_swab(struct bkey_s);
686
687const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c);
688unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *, struct bkey_s_c,
689				       unsigned, unsigned);
690bool bch2_bkey_needs_rebalance(struct bch_fs *, struct bkey_s_c);
691
692int bch2_bkey_set_needs_rebalance(struct bch_fs *, struct bkey_i *,
693				  struct bch_io_opts *);
694
695/* Generic extent code: */
696
697enum bch_extent_overlap {
698	BCH_EXTENT_OVERLAP_ALL		= 0,
699	BCH_EXTENT_OVERLAP_BACK		= 1,
700	BCH_EXTENT_OVERLAP_FRONT	= 2,
701	BCH_EXTENT_OVERLAP_MIDDLE	= 3,
702};
703
704/* Returns how k overlaps with m */
705static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k,
706							  const struct bkey *m)
707{
708	int cmp1 = bkey_lt(k->p, m->p);
709	int cmp2 = bkey_gt(bkey_start_pos(k), bkey_start_pos(m));
710
711	return (cmp1 << 1) + cmp2;
712}
713
714int bch2_cut_front_s(struct bpos, struct bkey_s);
715int bch2_cut_back_s(struct bpos, struct bkey_s);
716
717static inline void bch2_cut_front(struct bpos where, struct bkey_i *k)
718{
719	bch2_cut_front_s(where, bkey_i_to_s(k));
720}
721
722static inline void bch2_cut_back(struct bpos where, struct bkey_i *k)
723{
724	bch2_cut_back_s(where, bkey_i_to_s(k));
725}
726
727/**
728 * bch_key_resize - adjust size of @k
729 *
730 * bkey_start_offset(k) will be preserved, modifies where the extent ends
731 */
732static inline void bch2_key_resize(struct bkey *k, unsigned new_size)
733{
734	k->p.offset -= k->size;
735	k->p.offset += new_size;
736	k->size = new_size;
737}
738
739#endif /* _BCACHEFS_EXTENTS_H */
740