xfs_refcount_btree.c revision 04fcad80
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2016 Oracle.  All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_btree.h"
14#include "xfs_btree_staging.h"
15#include "xfs_refcount_btree.h"
16#include "xfs_alloc.h"
17#include "xfs_error.h"
18#include "xfs_trace.h"
19#include "xfs_trans.h"
20#include "xfs_bit.h"
21#include "xfs_rmap.h"
22#include "xfs_ag.h"
23
24static struct xfs_btree_cur *
25xfs_refcountbt_dup_cursor(
26	struct xfs_btree_cur	*cur)
27{
28	return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
29			cur->bc_ag.agbp, cur->bc_ag.pag);
30}
31
32STATIC void
33xfs_refcountbt_set_root(
34	struct xfs_btree_cur		*cur,
35	const union xfs_btree_ptr	*ptr,
36	int				inc)
37{
38	struct xfs_buf		*agbp = cur->bc_ag.agbp;
39	struct xfs_agf		*agf = agbp->b_addr;
40	struct xfs_perag	*pag = agbp->b_pag;
41
42	ASSERT(ptr->s != 0);
43
44	agf->agf_refcount_root = ptr->s;
45	be32_add_cpu(&agf->agf_refcount_level, inc);
46	pag->pagf_refcount_level += inc;
47
48	xfs_alloc_log_agf(cur->bc_tp, agbp,
49			XFS_AGF_REFCOUNT_ROOT | XFS_AGF_REFCOUNT_LEVEL);
50}
51
52STATIC int
53xfs_refcountbt_alloc_block(
54	struct xfs_btree_cur		*cur,
55	const union xfs_btree_ptr	*start,
56	union xfs_btree_ptr		*new,
57	int				*stat)
58{
59	struct xfs_buf		*agbp = cur->bc_ag.agbp;
60	struct xfs_agf		*agf = agbp->b_addr;
61	struct xfs_alloc_arg	args;		/* block allocation args */
62	int			error;		/* error return value */
63
64	memset(&args, 0, sizeof(args));
65	args.tp = cur->bc_tp;
66	args.mp = cur->bc_mp;
67	args.type = XFS_ALLOCTYPE_NEAR_BNO;
68	args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.pag->pag_agno,
69			xfs_refc_block(args.mp));
70	args.oinfo = XFS_RMAP_OINFO_REFC;
71	args.minlen = args.maxlen = args.prod = 1;
72	args.resv = XFS_AG_RESV_METADATA;
73
74	error = xfs_alloc_vextent(&args);
75	if (error)
76		goto out_error;
77	trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_ag.pag->pag_agno,
78			args.agbno, 1);
79	if (args.fsbno == NULLFSBLOCK) {
80		*stat = 0;
81		return 0;
82	}
83	ASSERT(args.agno == cur->bc_ag.pag->pag_agno);
84	ASSERT(args.len == 1);
85
86	new->s = cpu_to_be32(args.agbno);
87	be32_add_cpu(&agf->agf_refcount_blocks, 1);
88	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
89
90	*stat = 1;
91	return 0;
92
93out_error:
94	return error;
95}
96
97STATIC int
98xfs_refcountbt_free_block(
99	struct xfs_btree_cur	*cur,
100	struct xfs_buf		*bp)
101{
102	struct xfs_mount	*mp = cur->bc_mp;
103	struct xfs_buf		*agbp = cur->bc_ag.agbp;
104	struct xfs_agf		*agf = agbp->b_addr;
105	xfs_fsblock_t		fsbno = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
106	int			error;
107
108	trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_ag.pag->pag_agno,
109			XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
110	be32_add_cpu(&agf->agf_refcount_blocks, -1);
111	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
112	error = xfs_free_extent(cur->bc_tp, fsbno, 1, &XFS_RMAP_OINFO_REFC,
113			XFS_AG_RESV_METADATA);
114	if (error)
115		return error;
116
117	return error;
118}
119
120STATIC int
121xfs_refcountbt_get_minrecs(
122	struct xfs_btree_cur	*cur,
123	int			level)
124{
125	return cur->bc_mp->m_refc_mnr[level != 0];
126}
127
128STATIC int
129xfs_refcountbt_get_maxrecs(
130	struct xfs_btree_cur	*cur,
131	int			level)
132{
133	return cur->bc_mp->m_refc_mxr[level != 0];
134}
135
136STATIC void
137xfs_refcountbt_init_key_from_rec(
138	union xfs_btree_key		*key,
139	const union xfs_btree_rec	*rec)
140{
141	key->refc.rc_startblock = rec->refc.rc_startblock;
142}
143
144STATIC void
145xfs_refcountbt_init_high_key_from_rec(
146	union xfs_btree_key		*key,
147	const union xfs_btree_rec	*rec)
148{
149	__u32				x;
150
151	x = be32_to_cpu(rec->refc.rc_startblock);
152	x += be32_to_cpu(rec->refc.rc_blockcount) - 1;
153	key->refc.rc_startblock = cpu_to_be32(x);
154}
155
156STATIC void
157xfs_refcountbt_init_rec_from_cur(
158	struct xfs_btree_cur	*cur,
159	union xfs_btree_rec	*rec)
160{
161	rec->refc.rc_startblock = cpu_to_be32(cur->bc_rec.rc.rc_startblock);
162	rec->refc.rc_blockcount = cpu_to_be32(cur->bc_rec.rc.rc_blockcount);
163	rec->refc.rc_refcount = cpu_to_be32(cur->bc_rec.rc.rc_refcount);
164}
165
166STATIC void
167xfs_refcountbt_init_ptr_from_cur(
168	struct xfs_btree_cur	*cur,
169	union xfs_btree_ptr	*ptr)
170{
171	struct xfs_agf		*agf = cur->bc_ag.agbp->b_addr;
172
173	ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
174
175	ptr->s = agf->agf_refcount_root;
176}
177
178STATIC int64_t
179xfs_refcountbt_key_diff(
180	struct xfs_btree_cur		*cur,
181	const union xfs_btree_key	*key)
182{
183	struct xfs_refcount_irec	*rec = &cur->bc_rec.rc;
184	const struct xfs_refcount_key	*kp = &key->refc;
185
186	return (int64_t)be32_to_cpu(kp->rc_startblock) - rec->rc_startblock;
187}
188
189STATIC int64_t
190xfs_refcountbt_diff_two_keys(
191	struct xfs_btree_cur		*cur,
192	const union xfs_btree_key	*k1,
193	const union xfs_btree_key	*k2)
194{
195	return (int64_t)be32_to_cpu(k1->refc.rc_startblock) -
196			  be32_to_cpu(k2->refc.rc_startblock);
197}
198
199STATIC xfs_failaddr_t
200xfs_refcountbt_verify(
201	struct xfs_buf		*bp)
202{
203	struct xfs_mount	*mp = bp->b_mount;
204	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
205	struct xfs_perag	*pag = bp->b_pag;
206	xfs_failaddr_t		fa;
207	unsigned int		level;
208
209	if (!xfs_verify_magic(bp, block->bb_magic))
210		return __this_address;
211
212	if (!xfs_has_reflink(mp))
213		return __this_address;
214	fa = xfs_btree_sblock_v5hdr_verify(bp);
215	if (fa)
216		return fa;
217
218	level = be16_to_cpu(block->bb_level);
219	if (pag && pag->pagf_init) {
220		if (level >= pag->pagf_refcount_level)
221			return __this_address;
222	} else if (level >= mp->m_refc_maxlevels)
223		return __this_address;
224
225	return xfs_btree_sblock_verify(bp, mp->m_refc_mxr[level != 0]);
226}
227
228STATIC void
229xfs_refcountbt_read_verify(
230	struct xfs_buf	*bp)
231{
232	xfs_failaddr_t	fa;
233
234	if (!xfs_btree_sblock_verify_crc(bp))
235		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
236	else {
237		fa = xfs_refcountbt_verify(bp);
238		if (fa)
239			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
240	}
241
242	if (bp->b_error)
243		trace_xfs_btree_corrupt(bp, _RET_IP_);
244}
245
246STATIC void
247xfs_refcountbt_write_verify(
248	struct xfs_buf	*bp)
249{
250	xfs_failaddr_t	fa;
251
252	fa = xfs_refcountbt_verify(bp);
253	if (fa) {
254		trace_xfs_btree_corrupt(bp, _RET_IP_);
255		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
256		return;
257	}
258	xfs_btree_sblock_calc_crc(bp);
259
260}
261
262const struct xfs_buf_ops xfs_refcountbt_buf_ops = {
263	.name			= "xfs_refcountbt",
264	.magic			= { 0, cpu_to_be32(XFS_REFC_CRC_MAGIC) },
265	.verify_read		= xfs_refcountbt_read_verify,
266	.verify_write		= xfs_refcountbt_write_verify,
267	.verify_struct		= xfs_refcountbt_verify,
268};
269
270STATIC int
271xfs_refcountbt_keys_inorder(
272	struct xfs_btree_cur		*cur,
273	const union xfs_btree_key	*k1,
274	const union xfs_btree_key	*k2)
275{
276	return be32_to_cpu(k1->refc.rc_startblock) <
277	       be32_to_cpu(k2->refc.rc_startblock);
278}
279
280STATIC int
281xfs_refcountbt_recs_inorder(
282	struct xfs_btree_cur		*cur,
283	const union xfs_btree_rec	*r1,
284	const union xfs_btree_rec	*r2)
285{
286	return  be32_to_cpu(r1->refc.rc_startblock) +
287		be32_to_cpu(r1->refc.rc_blockcount) <=
288		be32_to_cpu(r2->refc.rc_startblock);
289}
290
291static const struct xfs_btree_ops xfs_refcountbt_ops = {
292	.rec_len		= sizeof(struct xfs_refcount_rec),
293	.key_len		= sizeof(struct xfs_refcount_key),
294
295	.dup_cursor		= xfs_refcountbt_dup_cursor,
296	.set_root		= xfs_refcountbt_set_root,
297	.alloc_block		= xfs_refcountbt_alloc_block,
298	.free_block		= xfs_refcountbt_free_block,
299	.get_minrecs		= xfs_refcountbt_get_minrecs,
300	.get_maxrecs		= xfs_refcountbt_get_maxrecs,
301	.init_key_from_rec	= xfs_refcountbt_init_key_from_rec,
302	.init_high_key_from_rec	= xfs_refcountbt_init_high_key_from_rec,
303	.init_rec_from_cur	= xfs_refcountbt_init_rec_from_cur,
304	.init_ptr_from_cur	= xfs_refcountbt_init_ptr_from_cur,
305	.key_diff		= xfs_refcountbt_key_diff,
306	.buf_ops		= &xfs_refcountbt_buf_ops,
307	.diff_two_keys		= xfs_refcountbt_diff_two_keys,
308	.keys_inorder		= xfs_refcountbt_keys_inorder,
309	.recs_inorder		= xfs_refcountbt_recs_inorder,
310};
311
312/*
313 * Initialize a new refcount btree cursor.
314 */
315static struct xfs_btree_cur *
316xfs_refcountbt_init_common(
317	struct xfs_mount	*mp,
318	struct xfs_trans	*tp,
319	struct xfs_perag	*pag)
320{
321	struct xfs_btree_cur	*cur;
322
323	ASSERT(pag->pag_agno < mp->m_sb.sb_agcount);
324
325	cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL);
326	cur->bc_tp = tp;
327	cur->bc_mp = mp;
328	cur->bc_btnum = XFS_BTNUM_REFC;
329	cur->bc_blocklog = mp->m_sb.sb_blocklog;
330	cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
331
332	cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
333
334	/* take a reference for the cursor */
335	atomic_inc(&pag->pag_ref);
336	cur->bc_ag.pag = pag;
337
338	cur->bc_ag.refc.nr_ops = 0;
339	cur->bc_ag.refc.shape_changes = 0;
340	cur->bc_ops = &xfs_refcountbt_ops;
341	return cur;
342}
343
344/* Create a btree cursor. */
345struct xfs_btree_cur *
346xfs_refcountbt_init_cursor(
347	struct xfs_mount	*mp,
348	struct xfs_trans	*tp,
349	struct xfs_buf		*agbp,
350	struct xfs_perag	*pag)
351{
352	struct xfs_agf		*agf = agbp->b_addr;
353	struct xfs_btree_cur	*cur;
354
355	cur = xfs_refcountbt_init_common(mp, tp, pag);
356	cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
357	cur->bc_ag.agbp = agbp;
358	return cur;
359}
360
361/* Create a btree cursor with a fake root for staging. */
362struct xfs_btree_cur *
363xfs_refcountbt_stage_cursor(
364	struct xfs_mount	*mp,
365	struct xbtree_afakeroot	*afake,
366	struct xfs_perag	*pag)
367{
368	struct xfs_btree_cur	*cur;
369
370	cur = xfs_refcountbt_init_common(mp, NULL, pag);
371	xfs_btree_stage_afakeroot(cur, afake);
372	return cur;
373}
374
375/*
376 * Swap in the new btree root.  Once we pass this point the newly rebuilt btree
377 * is in place and we have to kill off all the old btree blocks.
378 */
379void
380xfs_refcountbt_commit_staged_btree(
381	struct xfs_btree_cur	*cur,
382	struct xfs_trans	*tp,
383	struct xfs_buf		*agbp)
384{
385	struct xfs_agf		*agf = agbp->b_addr;
386	struct xbtree_afakeroot	*afake = cur->bc_ag.afake;
387
388	ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
389
390	agf->agf_refcount_root = cpu_to_be32(afake->af_root);
391	agf->agf_refcount_level = cpu_to_be32(afake->af_levels);
392	agf->agf_refcount_blocks = cpu_to_be32(afake->af_blocks);
393	xfs_alloc_log_agf(tp, agbp, XFS_AGF_REFCOUNT_BLOCKS |
394				    XFS_AGF_REFCOUNT_ROOT |
395				    XFS_AGF_REFCOUNT_LEVEL);
396	xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_refcountbt_ops);
397}
398
399/*
400 * Calculate the number of records in a refcount btree block.
401 */
402int
403xfs_refcountbt_maxrecs(
404	int			blocklen,
405	bool			leaf)
406{
407	blocklen -= XFS_REFCOUNT_BLOCK_LEN;
408
409	if (leaf)
410		return blocklen / sizeof(struct xfs_refcount_rec);
411	return blocklen / (sizeof(struct xfs_refcount_key) +
412			   sizeof(xfs_refcount_ptr_t));
413}
414
415/* Compute the maximum height of a refcount btree. */
416void
417xfs_refcountbt_compute_maxlevels(
418	struct xfs_mount		*mp)
419{
420	mp->m_refc_maxlevels = xfs_btree_compute_maxlevels(
421			mp->m_refc_mnr, mp->m_sb.sb_agblocks);
422}
423
424/* Calculate the refcount btree size for some records. */
425xfs_extlen_t
426xfs_refcountbt_calc_size(
427	struct xfs_mount	*mp,
428	unsigned long long	len)
429{
430	return xfs_btree_calc_size(mp->m_refc_mnr, len);
431}
432
433/*
434 * Calculate the maximum refcount btree size.
435 */
436xfs_extlen_t
437xfs_refcountbt_max_size(
438	struct xfs_mount	*mp,
439	xfs_agblock_t		agblocks)
440{
441	/* Bail out if we're uninitialized, which can happen in mkfs. */
442	if (mp->m_refc_mxr[0] == 0)
443		return 0;
444
445	return xfs_refcountbt_calc_size(mp, agblocks);
446}
447
448/*
449 * Figure out how many blocks to reserve and how many are used by this btree.
450 */
451int
452xfs_refcountbt_calc_reserves(
453	struct xfs_mount	*mp,
454	struct xfs_trans	*tp,
455	struct xfs_perag	*pag,
456	xfs_extlen_t		*ask,
457	xfs_extlen_t		*used)
458{
459	struct xfs_buf		*agbp;
460	struct xfs_agf		*agf;
461	xfs_agblock_t		agblocks;
462	xfs_extlen_t		tree_len;
463	int			error;
464
465	if (!xfs_has_reflink(mp))
466		return 0;
467
468	error = xfs_alloc_read_agf(mp, tp, pag->pag_agno, 0, &agbp);
469	if (error)
470		return error;
471
472	agf = agbp->b_addr;
473	agblocks = be32_to_cpu(agf->agf_length);
474	tree_len = be32_to_cpu(agf->agf_refcount_blocks);
475	xfs_trans_brelse(tp, agbp);
476
477	/*
478	 * The log is permanently allocated, so the space it occupies will
479	 * never be available for the kinds of things that would require btree
480	 * expansion.  We therefore can pretend the space isn't there.
481	 */
482	if (mp->m_sb.sb_logstart &&
483	    XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == pag->pag_agno)
484		agblocks -= mp->m_sb.sb_logblocks;
485
486	*ask += xfs_refcountbt_max_size(mp, agblocks);
487	*used += tree_len;
488
489	return error;
490}
491