1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BCACHEFS_BKEY_H
3#define _BCACHEFS_BKEY_H
4
5#include <linux/bug.h>
6#include "bcachefs_format.h"
7#include "bkey_types.h"
8#include "btree_types.h"
9#include "util.h"
10#include "vstructs.h"
11
12enum bkey_invalid_flags {
13	BKEY_INVALID_WRITE		= (1U << 0),
14	BKEY_INVALID_COMMIT		= (1U << 1),
15	BKEY_INVALID_JOURNAL		= (1U << 2),
16};
17
18#if 0
19
20/*
21 * compiled unpack functions are disabled, pending a new interface for
22 * dynamically allocating executable memory:
23 */
24
25#ifdef CONFIG_X86_64
26#define HAVE_BCACHEFS_COMPILED_UNPACK	1
27#endif
28#endif
29
30void bch2_bkey_packed_to_binary_text(struct printbuf *,
31				     const struct bkey_format *,
32				     const struct bkey_packed *);
33
34enum bkey_lr_packed {
35	BKEY_PACKED_BOTH,
36	BKEY_PACKED_RIGHT,
37	BKEY_PACKED_LEFT,
38	BKEY_PACKED_NONE,
39};
40
41#define bkey_lr_packed(_l, _r)						\
42	((_l)->format + ((_r)->format << 1))
43
44static inline void bkey_p_copy(struct bkey_packed *dst, const struct bkey_packed *src)
45{
46	memcpy_u64s_small(dst, src, src->u64s);
47}
48
49static inline void bkey_copy(struct bkey_i *dst, const struct bkey_i *src)
50{
51	memcpy_u64s_small(dst, src, src->k.u64s);
52}
53
54struct btree;
55
56__pure
57unsigned bch2_bkey_greatest_differing_bit(const struct btree *,
58					  const struct bkey_packed *,
59					  const struct bkey_packed *);
60__pure
61unsigned bch2_bkey_ffs(const struct btree *, const struct bkey_packed *);
62
63__pure
64int __bch2_bkey_cmp_packed_format_checked(const struct bkey_packed *,
65				     const struct bkey_packed *,
66				     const struct btree *);
67
68__pure
69int __bch2_bkey_cmp_left_packed_format_checked(const struct btree *,
70					  const struct bkey_packed *,
71					  const struct bpos *);
72
73__pure
74int bch2_bkey_cmp_packed(const struct btree *,
75			 const struct bkey_packed *,
76			 const struct bkey_packed *);
77
78__pure
79int __bch2_bkey_cmp_left_packed(const struct btree *,
80				const struct bkey_packed *,
81				const struct bpos *);
82
83static inline __pure
84int bkey_cmp_left_packed(const struct btree *b,
85			 const struct bkey_packed *l, const struct bpos *r)
86{
87	return __bch2_bkey_cmp_left_packed(b, l, r);
88}
89
90/*
91 * The compiler generates better code when we pass bpos by ref, but it's often
92 * enough terribly convenient to pass it by val... as much as I hate c++, const
93 * ref would be nice here:
94 */
95__pure __flatten
96static inline int bkey_cmp_left_packed_byval(const struct btree *b,
97					     const struct bkey_packed *l,
98					     struct bpos r)
99{
100	return bkey_cmp_left_packed(b, l, &r);
101}
102
103static __always_inline bool bpos_eq(struct bpos l, struct bpos r)
104{
105	return  !((l.inode	^ r.inode) |
106		  (l.offset	^ r.offset) |
107		  (l.snapshot	^ r.snapshot));
108}
109
110static __always_inline bool bpos_lt(struct bpos l, struct bpos r)
111{
112	return  l.inode	!= r.inode ? l.inode < r.inode :
113		l.offset != r.offset ? l.offset < r.offset :
114		l.snapshot != r.snapshot ? l.snapshot < r.snapshot : false;
115}
116
117static __always_inline bool bpos_le(struct bpos l, struct bpos r)
118{
119	return  l.inode	!= r.inode ? l.inode < r.inode :
120		l.offset != r.offset ? l.offset < r.offset :
121		l.snapshot != r.snapshot ? l.snapshot < r.snapshot : true;
122}
123
124static __always_inline bool bpos_gt(struct bpos l, struct bpos r)
125{
126	return bpos_lt(r, l);
127}
128
129static __always_inline bool bpos_ge(struct bpos l, struct bpos r)
130{
131	return bpos_le(r, l);
132}
133
134static __always_inline int bpos_cmp(struct bpos l, struct bpos r)
135{
136	return  cmp_int(l.inode,    r.inode) ?:
137		cmp_int(l.offset,   r.offset) ?:
138		cmp_int(l.snapshot, r.snapshot);
139}
140
141static inline struct bpos bpos_min(struct bpos l, struct bpos r)
142{
143	return bpos_lt(l, r) ? l : r;
144}
145
146static inline struct bpos bpos_max(struct bpos l, struct bpos r)
147{
148	return bpos_gt(l, r) ? l : r;
149}
150
151static __always_inline bool bkey_eq(struct bpos l, struct bpos r)
152{
153	return  !((l.inode	^ r.inode) |
154		  (l.offset	^ r.offset));
155}
156
157static __always_inline bool bkey_lt(struct bpos l, struct bpos r)
158{
159	return  l.inode	!= r.inode
160		? l.inode < r.inode
161		: l.offset < r.offset;
162}
163
164static __always_inline bool bkey_le(struct bpos l, struct bpos r)
165{
166	return  l.inode	!= r.inode
167		? l.inode < r.inode
168		: l.offset <= r.offset;
169}
170
171static __always_inline bool bkey_gt(struct bpos l, struct bpos r)
172{
173	return bkey_lt(r, l);
174}
175
176static __always_inline bool bkey_ge(struct bpos l, struct bpos r)
177{
178	return bkey_le(r, l);
179}
180
181static __always_inline int bkey_cmp(struct bpos l, struct bpos r)
182{
183	return  cmp_int(l.inode,    r.inode) ?:
184		cmp_int(l.offset,   r.offset);
185}
186
187static inline struct bpos bkey_min(struct bpos l, struct bpos r)
188{
189	return bkey_lt(l, r) ? l : r;
190}
191
192static inline struct bpos bkey_max(struct bpos l, struct bpos r)
193{
194	return bkey_gt(l, r) ? l : r;
195}
196
197void bch2_bpos_swab(struct bpos *);
198void bch2_bkey_swab_key(const struct bkey_format *, struct bkey_packed *);
199
200static __always_inline int bversion_cmp(struct bversion l, struct bversion r)
201{
202	return  cmp_int(l.hi, r.hi) ?:
203		cmp_int(l.lo, r.lo);
204}
205
206#define ZERO_VERSION	((struct bversion) { .hi = 0, .lo = 0 })
207#define MAX_VERSION	((struct bversion) { .hi = ~0, .lo = ~0ULL })
208
209static __always_inline int bversion_zero(struct bversion v)
210{
211	return !bversion_cmp(v, ZERO_VERSION);
212}
213
214#ifdef CONFIG_BCACHEFS_DEBUG
215/* statement expressions confusing unlikely()? */
216#define bkey_packed(_k)							\
217	({ EBUG_ON((_k)->format > KEY_FORMAT_CURRENT);			\
218	 (_k)->format != KEY_FORMAT_CURRENT; })
219#else
220#define bkey_packed(_k)		((_k)->format != KEY_FORMAT_CURRENT)
221#endif
222
223/*
224 * It's safe to treat an unpacked bkey as a packed one, but not the reverse
225 */
226static inline struct bkey_packed *bkey_to_packed(struct bkey_i *k)
227{
228	return (struct bkey_packed *) k;
229}
230
231static inline const struct bkey_packed *bkey_to_packed_c(const struct bkey_i *k)
232{
233	return (const struct bkey_packed *) k;
234}
235
236static inline struct bkey_i *packed_to_bkey(struct bkey_packed *k)
237{
238	return bkey_packed(k) ? NULL : (struct bkey_i *) k;
239}
240
241static inline const struct bkey *packed_to_bkey_c(const struct bkey_packed *k)
242{
243	return bkey_packed(k) ? NULL : (const struct bkey *) k;
244}
245
246static inline unsigned bkey_format_key_bits(const struct bkey_format *format)
247{
248	return format->bits_per_field[BKEY_FIELD_INODE] +
249		format->bits_per_field[BKEY_FIELD_OFFSET] +
250		format->bits_per_field[BKEY_FIELD_SNAPSHOT];
251}
252
253static inline struct bpos bpos_successor(struct bpos p)
254{
255	if (!++p.snapshot &&
256	    !++p.offset &&
257	    !++p.inode)
258		BUG();
259
260	return p;
261}
262
263static inline struct bpos bpos_predecessor(struct bpos p)
264{
265	if (!p.snapshot-- &&
266	    !p.offset-- &&
267	    !p.inode--)
268		BUG();
269
270	return p;
271}
272
273static inline struct bpos bpos_nosnap_successor(struct bpos p)
274{
275	p.snapshot = 0;
276
277	if (!++p.offset &&
278	    !++p.inode)
279		BUG();
280
281	return p;
282}
283
284static inline struct bpos bpos_nosnap_predecessor(struct bpos p)
285{
286	p.snapshot = 0;
287
288	if (!p.offset-- &&
289	    !p.inode--)
290		BUG();
291
292	return p;
293}
294
295static inline u64 bkey_start_offset(const struct bkey *k)
296{
297	return k->p.offset - k->size;
298}
299
300static inline struct bpos bkey_start_pos(const struct bkey *k)
301{
302	return (struct bpos) {
303		.inode		= k->p.inode,
304		.offset		= bkey_start_offset(k),
305		.snapshot	= k->p.snapshot,
306	};
307}
308
309/* Packed helpers */
310
311static inline unsigned bkeyp_key_u64s(const struct bkey_format *format,
312				      const struct bkey_packed *k)
313{
314	return bkey_packed(k) ? format->key_u64s : BKEY_U64s;
315}
316
317static inline bool bkeyp_u64s_valid(const struct bkey_format *f,
318				    const struct bkey_packed *k)
319{
320	return ((unsigned) k->u64s - bkeyp_key_u64s(f, k) <= U8_MAX - BKEY_U64s);
321}
322
323static inline unsigned bkeyp_key_bytes(const struct bkey_format *format,
324				       const struct bkey_packed *k)
325{
326	return bkeyp_key_u64s(format, k) * sizeof(u64);
327}
328
329static inline unsigned bkeyp_val_u64s(const struct bkey_format *format,
330				      const struct bkey_packed *k)
331{
332	return k->u64s - bkeyp_key_u64s(format, k);
333}
334
335static inline size_t bkeyp_val_bytes(const struct bkey_format *format,
336				     const struct bkey_packed *k)
337{
338	return bkeyp_val_u64s(format, k) * sizeof(u64);
339}
340
341static inline void set_bkeyp_val_u64s(const struct bkey_format *format,
342				      struct bkey_packed *k, unsigned val_u64s)
343{
344	k->u64s = bkeyp_key_u64s(format, k) + val_u64s;
345}
346
347#define bkeyp_val(_format, _k)						\
348	 ((struct bch_val *) ((u64 *) (_k)->_data + bkeyp_key_u64s(_format, _k)))
349
350extern const struct bkey_format bch2_bkey_format_current;
351
352bool bch2_bkey_transform(const struct bkey_format *,
353			 struct bkey_packed *,
354			 const struct bkey_format *,
355			 const struct bkey_packed *);
356
357struct bkey __bch2_bkey_unpack_key(const struct bkey_format *,
358				   const struct bkey_packed *);
359
360#ifndef HAVE_BCACHEFS_COMPILED_UNPACK
361struct bpos __bkey_unpack_pos(const struct bkey_format *,
362			      const struct bkey_packed *);
363#endif
364
365bool bch2_bkey_pack_key(struct bkey_packed *, const struct bkey *,
366		   const struct bkey_format *);
367
368enum bkey_pack_pos_ret {
369	BKEY_PACK_POS_EXACT,
370	BKEY_PACK_POS_SMALLER,
371	BKEY_PACK_POS_FAIL,
372};
373
374enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *, struct bpos,
375					   const struct btree *);
376
377static inline bool bkey_pack_pos(struct bkey_packed *out, struct bpos in,
378				 const struct btree *b)
379{
380	return bch2_bkey_pack_pos_lossy(out, in, b) == BKEY_PACK_POS_EXACT;
381}
382
383void bch2_bkey_unpack(const struct btree *, struct bkey_i *,
384		 const struct bkey_packed *);
385bool bch2_bkey_pack(struct bkey_packed *, const struct bkey_i *,
386	       const struct bkey_format *);
387
388typedef void (*compiled_unpack_fn)(struct bkey *, const struct bkey_packed *);
389
390static inline void
391__bkey_unpack_key_format_checked(const struct btree *b,
392			       struct bkey *dst,
393			       const struct bkey_packed *src)
394{
395	if (IS_ENABLED(HAVE_BCACHEFS_COMPILED_UNPACK)) {
396		compiled_unpack_fn unpack_fn = b->aux_data;
397		unpack_fn(dst, src);
398
399		if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
400		    bch2_expensive_debug_checks) {
401			struct bkey dst2 = __bch2_bkey_unpack_key(&b->format, src);
402
403			BUG_ON(memcmp(dst, &dst2, sizeof(*dst)));
404		}
405	} else {
406		*dst = __bch2_bkey_unpack_key(&b->format, src);
407	}
408}
409
410static inline struct bkey
411bkey_unpack_key_format_checked(const struct btree *b,
412			       const struct bkey_packed *src)
413{
414	struct bkey dst;
415
416	__bkey_unpack_key_format_checked(b, &dst, src);
417	return dst;
418}
419
420static inline void __bkey_unpack_key(const struct btree *b,
421				     struct bkey *dst,
422				     const struct bkey_packed *src)
423{
424	if (likely(bkey_packed(src)))
425		__bkey_unpack_key_format_checked(b, dst, src);
426	else
427		*dst = *packed_to_bkey_c(src);
428}
429
430/**
431 * bkey_unpack_key -- unpack just the key, not the value
432 */
433static inline struct bkey bkey_unpack_key(const struct btree *b,
434					  const struct bkey_packed *src)
435{
436	return likely(bkey_packed(src))
437		? bkey_unpack_key_format_checked(b, src)
438		: *packed_to_bkey_c(src);
439}
440
441static inline struct bpos
442bkey_unpack_pos_format_checked(const struct btree *b,
443			       const struct bkey_packed *src)
444{
445#ifdef HAVE_BCACHEFS_COMPILED_UNPACK
446	return bkey_unpack_key_format_checked(b, src).p;
447#else
448	return __bkey_unpack_pos(&b->format, src);
449#endif
450}
451
452static inline struct bpos bkey_unpack_pos(const struct btree *b,
453					  const struct bkey_packed *src)
454{
455	return likely(bkey_packed(src))
456		? bkey_unpack_pos_format_checked(b, src)
457		: packed_to_bkey_c(src)->p;
458}
459
460/* Disassembled bkeys */
461
462static inline struct bkey_s_c bkey_disassemble(const struct btree *b,
463					       const struct bkey_packed *k,
464					       struct bkey *u)
465{
466	__bkey_unpack_key(b, u, k);
467
468	return (struct bkey_s_c) { u, bkeyp_val(&b->format, k), };
469}
470
471/* non const version: */
472static inline struct bkey_s __bkey_disassemble(const struct btree *b,
473					       struct bkey_packed *k,
474					       struct bkey *u)
475{
476	__bkey_unpack_key(b, u, k);
477
478	return (struct bkey_s) { .k = u, .v = bkeyp_val(&b->format, k), };
479}
480
481static inline u64 bkey_field_max(const struct bkey_format *f,
482				 enum bch_bkey_fields nr)
483{
484	return f->bits_per_field[nr] < 64
485		? (le64_to_cpu(f->field_offset[nr]) +
486		   ~(~0ULL << f->bits_per_field[nr]))
487		: U64_MAX;
488}
489
490#ifdef HAVE_BCACHEFS_COMPILED_UNPACK
491
492int bch2_compile_bkey_format(const struct bkey_format *, void *);
493
494#else
495
496static inline int bch2_compile_bkey_format(const struct bkey_format *format,
497					  void *out) { return 0; }
498
499#endif
500
501static inline void bkey_reassemble(struct bkey_i *dst,
502				   struct bkey_s_c src)
503{
504	dst->k = *src.k;
505	memcpy_u64s_small(&dst->v, src.v, bkey_val_u64s(src.k));
506}
507
508/* byte order helpers */
509
510#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
511
512static inline unsigned high_word_offset(const struct bkey_format *f)
513{
514	return f->key_u64s - 1;
515}
516
517#define high_bit_offset		0
518#define nth_word(p, n)		((p) - (n))
519
520#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
521
522static inline unsigned high_word_offset(const struct bkey_format *f)
523{
524	return 0;
525}
526
527#define high_bit_offset		KEY_PACKED_BITS_START
528#define nth_word(p, n)		((p) + (n))
529
530#else
531#error edit for your odd byteorder.
532#endif
533
534#define high_word(f, k)		((u64 *) (k)->_data + high_word_offset(f))
535#define next_word(p)		nth_word(p, 1)
536#define prev_word(p)		nth_word(p, -1)
537
538#ifdef CONFIG_BCACHEFS_DEBUG
539void bch2_bkey_pack_test(void);
540#else
541static inline void bch2_bkey_pack_test(void) {}
542#endif
543
544#define bkey_fields()							\
545	x(BKEY_FIELD_INODE,		p.inode)			\
546	x(BKEY_FIELD_OFFSET,		p.offset)			\
547	x(BKEY_FIELD_SNAPSHOT,		p.snapshot)			\
548	x(BKEY_FIELD_SIZE,		size)				\
549	x(BKEY_FIELD_VERSION_HI,	version.hi)			\
550	x(BKEY_FIELD_VERSION_LO,	version.lo)
551
552struct bkey_format_state {
553	u64 field_min[BKEY_NR_FIELDS];
554	u64 field_max[BKEY_NR_FIELDS];
555};
556
557void bch2_bkey_format_init(struct bkey_format_state *);
558
559static inline void __bkey_format_add(struct bkey_format_state *s, unsigned field, u64 v)
560{
561	s->field_min[field] = min(s->field_min[field], v);
562	s->field_max[field] = max(s->field_max[field], v);
563}
564
565/*
566 * Changes @format so that @k can be successfully packed with @format
567 */
568static inline void bch2_bkey_format_add_key(struct bkey_format_state *s, const struct bkey *k)
569{
570#define x(id, field) __bkey_format_add(s, id, k->field);
571	bkey_fields()
572#undef x
573}
574
575void bch2_bkey_format_add_pos(struct bkey_format_state *, struct bpos);
576struct bkey_format bch2_bkey_format_done(struct bkey_format_state *);
577int bch2_bkey_format_invalid(struct bch_fs *, struct bkey_format *,
578			     enum bkey_invalid_flags, struct printbuf *);
579void bch2_bkey_format_to_text(struct printbuf *, const struct bkey_format *);
580
581#endif /* _BCACHEFS_BKEY_H */
582