1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Meta data file for NILFS
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Ryusuke Konishi.
8 */
9
10#include <linux/buffer_head.h>
11#include <linux/mpage.h>
12#include <linux/mm.h>
13#include <linux/writeback.h>
14#include <linux/backing-dev.h>
15#include <linux/swap.h>
16#include <linux/slab.h>
17#include "nilfs.h"
18#include "btnode.h"
19#include "segment.h"
20#include "page.h"
21#include "mdt.h"
22#include "alloc.h"		/* nilfs_palloc_destroy_cache() */
23
24#include <trace/events/nilfs2.h>
25
26#define NILFS_MDT_MAX_RA_BLOCKS		(16 - 1)
27
28
29static int
30nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
31			   struct buffer_head *bh,
32			   void (*init_block)(struct inode *,
33					      struct buffer_head *, void *))
34{
35	struct nilfs_inode_info *ii = NILFS_I(inode);
36	void *kaddr;
37	int ret;
38
39	/* Caller exclude read accesses using page lock */
40
41	/* set_buffer_new(bh); */
42	bh->b_blocknr = 0;
43
44	ret = nilfs_bmap_insert(ii->i_bmap, block, (unsigned long)bh);
45	if (unlikely(ret))
46		return ret;
47
48	set_buffer_mapped(bh);
49
50	kaddr = kmap_local_page(bh->b_page);
51	memset(kaddr + bh_offset(bh), 0, i_blocksize(inode));
52	if (init_block)
53		init_block(inode, bh, kaddr);
54	flush_dcache_page(bh->b_page);
55	kunmap_local(kaddr);
56
57	set_buffer_uptodate(bh);
58	mark_buffer_dirty(bh);
59	nilfs_mdt_mark_dirty(inode);
60
61	trace_nilfs2_mdt_insert_new_block(inode, inode->i_ino, block);
62
63	return 0;
64}
65
66static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
67				  struct buffer_head **out_bh,
68				  void (*init_block)(struct inode *,
69						     struct buffer_head *,
70						     void *))
71{
72	struct super_block *sb = inode->i_sb;
73	struct nilfs_transaction_info ti;
74	struct buffer_head *bh;
75	int err;
76
77	nilfs_transaction_begin(sb, &ti, 0);
78
79	err = -ENOMEM;
80	bh = nilfs_grab_buffer(inode, inode->i_mapping, block, 0);
81	if (unlikely(!bh))
82		goto failed_unlock;
83
84	err = -EEXIST;
85	if (buffer_uptodate(bh))
86		goto failed_bh;
87
88	wait_on_buffer(bh);
89	if (buffer_uptodate(bh))
90		goto failed_bh;
91
92	bh->b_bdev = sb->s_bdev;
93	err = nilfs_mdt_insert_new_block(inode, block, bh, init_block);
94	if (likely(!err)) {
95		get_bh(bh);
96		*out_bh = bh;
97	}
98
99 failed_bh:
100	folio_unlock(bh->b_folio);
101	folio_put(bh->b_folio);
102	brelse(bh);
103
104 failed_unlock:
105	if (likely(!err))
106		err = nilfs_transaction_commit(sb);
107	else
108		nilfs_transaction_abort(sb);
109
110	return err;
111}
112
113static int
114nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff, blk_opf_t opf,
115		       struct buffer_head **out_bh)
116{
117	struct buffer_head *bh;
118	__u64 blknum = 0;
119	int ret = -ENOMEM;
120
121	bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0);
122	if (unlikely(!bh))
123		goto failed;
124
125	ret = -EEXIST; /* internal code */
126	if (buffer_uptodate(bh))
127		goto out;
128
129	if (opf & REQ_RAHEAD) {
130		if (!trylock_buffer(bh)) {
131			ret = -EBUSY;
132			goto failed_bh;
133		}
134	} else /* opf == REQ_OP_READ */
135		lock_buffer(bh);
136
137	if (buffer_uptodate(bh)) {
138		unlock_buffer(bh);
139		goto out;
140	}
141
142	ret = nilfs_bmap_lookup(NILFS_I(inode)->i_bmap, blkoff, &blknum);
143	if (unlikely(ret)) {
144		unlock_buffer(bh);
145		goto failed_bh;
146	}
147	map_bh(bh, inode->i_sb, (sector_t)blknum);
148
149	bh->b_end_io = end_buffer_read_sync;
150	get_bh(bh);
151	submit_bh(opf, bh);
152	ret = 0;
153
154	trace_nilfs2_mdt_submit_block(inode, inode->i_ino, blkoff,
155				      opf & REQ_OP_MASK);
156 out:
157	get_bh(bh);
158	*out_bh = bh;
159
160 failed_bh:
161	folio_unlock(bh->b_folio);
162	folio_put(bh->b_folio);
163	brelse(bh);
164 failed:
165	return ret;
166}
167
168static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
169				int readahead, struct buffer_head **out_bh)
170{
171	struct buffer_head *first_bh, *bh;
172	unsigned long blkoff;
173	int i, nr_ra_blocks = NILFS_MDT_MAX_RA_BLOCKS;
174	int err;
175
176	err = nilfs_mdt_submit_block(inode, block, REQ_OP_READ, &first_bh);
177	if (err == -EEXIST) /* internal code */
178		goto out;
179
180	if (unlikely(err))
181		goto failed;
182
183	if (readahead) {
184		blkoff = block + 1;
185		for (i = 0; i < nr_ra_blocks; i++, blkoff++) {
186			err = nilfs_mdt_submit_block(inode, blkoff,
187						REQ_OP_READ | REQ_RAHEAD, &bh);
188			if (likely(!err || err == -EEXIST))
189				brelse(bh);
190			else if (err != -EBUSY)
191				break;
192				/* abort readahead if bmap lookup failed */
193			if (!buffer_locked(first_bh))
194				goto out_no_wait;
195		}
196	}
197
198	wait_on_buffer(first_bh);
199
200 out_no_wait:
201	err = -EIO;
202	if (!buffer_uptodate(first_bh)) {
203		nilfs_err(inode->i_sb,
204			  "I/O error reading meta-data file (ino=%lu, block-offset=%lu)",
205			  inode->i_ino, block);
206		goto failed_bh;
207	}
208 out:
209	*out_bh = first_bh;
210	return 0;
211
212 failed_bh:
213	brelse(first_bh);
214 failed:
215	return err;
216}
217
218/**
219 * nilfs_mdt_get_block - read or create a buffer on meta data file.
220 * @inode: inode of the meta data file
221 * @blkoff: block offset
222 * @create: create flag
223 * @init_block: initializer used for newly allocated block
224 * @out_bh: output of a pointer to the buffer_head
225 *
226 * nilfs_mdt_get_block() looks up the specified buffer and tries to create
227 * a new buffer if @create is not zero.  On success, the returned buffer is
228 * assured to be either existing or formatted using a buffer lock on success.
229 * @out_bh is substituted only when zero is returned.
230 *
231 * Return Value: On success, it returns 0. On error, the following negative
232 * error code is returned.
233 *
234 * %-ENOMEM - Insufficient memory available.
235 *
236 * %-EIO - I/O error
237 *
238 * %-ENOENT - the specified block does not exist (hole block)
239 *
240 * %-EROFS - Read only filesystem (for create mode)
241 */
242int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create,
243			void (*init_block)(struct inode *,
244					   struct buffer_head *, void *),
245			struct buffer_head **out_bh)
246{
247	int ret;
248
249	/* Should be rewritten with merging nilfs_mdt_read_block() */
250 retry:
251	ret = nilfs_mdt_read_block(inode, blkoff, !create, out_bh);
252	if (!create || ret != -ENOENT)
253		return ret;
254
255	ret = nilfs_mdt_create_block(inode, blkoff, out_bh, init_block);
256	if (unlikely(ret == -EEXIST)) {
257		/* create = 0; */  /* limit read-create loop retries */
258		goto retry;
259	}
260	return ret;
261}
262
263/**
264 * nilfs_mdt_find_block - find and get a buffer on meta data file.
265 * @inode: inode of the meta data file
266 * @start: start block offset (inclusive)
267 * @end: end block offset (inclusive)
268 * @blkoff: block offset
269 * @out_bh: place to store a pointer to buffer_head struct
270 *
271 * nilfs_mdt_find_block() looks up an existing block in range of
272 * [@start, @end] and stores pointer to a buffer head of the block to
273 * @out_bh, and block offset to @blkoff, respectively.  @out_bh and
274 * @blkoff are substituted only when zero is returned.
275 *
276 * Return Value: On success, it returns 0. On error, the following negative
277 * error code is returned.
278 *
279 * %-ENOMEM - Insufficient memory available.
280 *
281 * %-EIO - I/O error
282 *
283 * %-ENOENT - no block was found in the range
284 */
285int nilfs_mdt_find_block(struct inode *inode, unsigned long start,
286			 unsigned long end, unsigned long *blkoff,
287			 struct buffer_head **out_bh)
288{
289	__u64 next;
290	int ret;
291
292	if (unlikely(start > end))
293		return -ENOENT;
294
295	ret = nilfs_mdt_read_block(inode, start, true, out_bh);
296	if (!ret) {
297		*blkoff = start;
298		goto out;
299	}
300	if (unlikely(ret != -ENOENT || start == ULONG_MAX))
301		goto out;
302
303	ret = nilfs_bmap_seek_key(NILFS_I(inode)->i_bmap, start + 1, &next);
304	if (!ret) {
305		if (next <= end) {
306			ret = nilfs_mdt_read_block(inode, next, true, out_bh);
307			if (!ret)
308				*blkoff = next;
309		} else {
310			ret = -ENOENT;
311		}
312	}
313out:
314	return ret;
315}
316
317/**
318 * nilfs_mdt_delete_block - make a hole on the meta data file.
319 * @inode: inode of the meta data file
320 * @block: block offset
321 *
322 * Return Value: On success, zero is returned.
323 * On error, one of the following negative error code is returned.
324 *
325 * %-ENOMEM - Insufficient memory available.
326 *
327 * %-EIO - I/O error
328 */
329int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
330{
331	struct nilfs_inode_info *ii = NILFS_I(inode);
332	int err;
333
334	err = nilfs_bmap_delete(ii->i_bmap, block);
335	if (!err || err == -ENOENT) {
336		nilfs_mdt_mark_dirty(inode);
337		nilfs_mdt_forget_block(inode, block);
338	}
339	return err;
340}
341
342/**
343 * nilfs_mdt_forget_block - discard dirty state and try to remove the page
344 * @inode: inode of the meta data file
345 * @block: block offset
346 *
347 * nilfs_mdt_forget_block() clears a dirty flag of the specified buffer, and
348 * tries to release the page including the buffer from a page cache.
349 *
350 * Return Value: On success, 0 is returned. On error, one of the following
351 * negative error code is returned.
352 *
353 * %-EBUSY - page has an active buffer.
354 *
355 * %-ENOENT - page cache has no page addressed by the offset.
356 */
357int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
358{
359	pgoff_t index = block >> (PAGE_SHIFT - inode->i_blkbits);
360	struct folio *folio;
361	struct buffer_head *bh;
362	int ret = 0;
363	int still_dirty;
364
365	folio = filemap_lock_folio(inode->i_mapping, index);
366	if (IS_ERR(folio))
367		return -ENOENT;
368
369	folio_wait_writeback(folio);
370
371	bh = folio_buffers(folio);
372	if (bh) {
373		unsigned long first_block = index <<
374				(PAGE_SHIFT - inode->i_blkbits);
375		bh = get_nth_bh(bh, block - first_block);
376		nilfs_forget_buffer(bh);
377	}
378	still_dirty = folio_test_dirty(folio);
379	folio_unlock(folio);
380	folio_put(folio);
381
382	if (still_dirty ||
383	    invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0)
384		ret = -EBUSY;
385	return ret;
386}
387
388int nilfs_mdt_fetch_dirty(struct inode *inode)
389{
390	struct nilfs_inode_info *ii = NILFS_I(inode);
391
392	if (nilfs_bmap_test_and_clear_dirty(ii->i_bmap)) {
393		set_bit(NILFS_I_DIRTY, &ii->i_state);
394		return 1;
395	}
396	return test_bit(NILFS_I_DIRTY, &ii->i_state);
397}
398
399static int
400nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
401{
402	struct folio *folio = page_folio(page);
403	struct inode *inode = folio->mapping->host;
404	struct super_block *sb;
405	int err = 0;
406
407	if (inode && sb_rdonly(inode->i_sb)) {
408		/*
409		 * It means that filesystem was remounted in read-only
410		 * mode because of error or metadata corruption. But we
411		 * have dirty folios that try to be flushed in background.
412		 * So, here we simply discard this dirty folio.
413		 */
414		nilfs_clear_folio_dirty(folio, false);
415		folio_unlock(folio);
416		return -EROFS;
417	}
418
419	folio_redirty_for_writepage(wbc, folio);
420	folio_unlock(folio);
421
422	if (!inode)
423		return 0;
424
425	sb = inode->i_sb;
426
427	if (wbc->sync_mode == WB_SYNC_ALL)
428		err = nilfs_construct_segment(sb);
429	else if (wbc->for_reclaim)
430		nilfs_flush_segment(sb, inode->i_ino);
431
432	return err;
433}
434
435
436static const struct address_space_operations def_mdt_aops = {
437	.dirty_folio		= block_dirty_folio,
438	.invalidate_folio	= block_invalidate_folio,
439	.writepage		= nilfs_mdt_write_page,
440};
441
442static const struct inode_operations def_mdt_iops;
443static const struct file_operations def_mdt_fops;
444
445
446int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
447{
448	struct nilfs_mdt_info *mi;
449
450	mi = kzalloc(max(sizeof(*mi), objsz), GFP_NOFS);
451	if (!mi)
452		return -ENOMEM;
453
454	init_rwsem(&mi->mi_sem);
455	inode->i_private = mi;
456
457	inode->i_mode = S_IFREG;
458	mapping_set_gfp_mask(inode->i_mapping, gfp_mask);
459
460	inode->i_op = &def_mdt_iops;
461	inode->i_fop = &def_mdt_fops;
462	inode->i_mapping->a_ops = &def_mdt_aops;
463
464	return 0;
465}
466
467/**
468 * nilfs_mdt_clear - do cleanup for the metadata file
469 * @inode: inode of the metadata file
470 */
471void nilfs_mdt_clear(struct inode *inode)
472{
473	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
474	struct nilfs_shadow_map *shadow = mdi->mi_shadow;
475
476	if (mdi->mi_palloc_cache)
477		nilfs_palloc_destroy_cache(inode);
478
479	if (shadow) {
480		struct inode *s_inode = shadow->inode;
481
482		shadow->inode = NULL;
483		iput(s_inode);
484		mdi->mi_shadow = NULL;
485	}
486}
487
488/**
489 * nilfs_mdt_destroy - release resources used by the metadata file
490 * @inode: inode of the metadata file
491 */
492void nilfs_mdt_destroy(struct inode *inode)
493{
494	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
495
496	kfree(mdi->mi_bgl); /* kfree(NULL) is safe */
497	kfree(mdi);
498}
499
500void nilfs_mdt_set_entry_size(struct inode *inode, unsigned int entry_size,
501			      unsigned int header_size)
502{
503	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
504
505	mi->mi_entry_size = entry_size;
506	mi->mi_entries_per_block = i_blocksize(inode) / entry_size;
507	mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
508}
509
510/**
511 * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
512 * @inode: inode of the metadata file
513 * @shadow: shadow mapping
514 */
515int nilfs_mdt_setup_shadow_map(struct inode *inode,
516			       struct nilfs_shadow_map *shadow)
517{
518	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
519	struct inode *s_inode;
520
521	INIT_LIST_HEAD(&shadow->frozen_buffers);
522
523	s_inode = nilfs_iget_for_shadow(inode);
524	if (IS_ERR(s_inode))
525		return PTR_ERR(s_inode);
526
527	shadow->inode = s_inode;
528	mi->mi_shadow = shadow;
529	return 0;
530}
531
532/**
533 * nilfs_mdt_save_to_shadow_map - copy bmap and dirty pages to shadow map
534 * @inode: inode of the metadata file
535 */
536int nilfs_mdt_save_to_shadow_map(struct inode *inode)
537{
538	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
539	struct nilfs_inode_info *ii = NILFS_I(inode);
540	struct nilfs_shadow_map *shadow = mi->mi_shadow;
541	struct inode *s_inode = shadow->inode;
542	int ret;
543
544	ret = nilfs_copy_dirty_pages(s_inode->i_mapping, inode->i_mapping);
545	if (ret)
546		goto out;
547
548	ret = nilfs_copy_dirty_pages(NILFS_I(s_inode)->i_assoc_inode->i_mapping,
549				     ii->i_assoc_inode->i_mapping);
550	if (ret)
551		goto out;
552
553	nilfs_bmap_save(ii->i_bmap, &shadow->bmap_store);
554 out:
555	return ret;
556}
557
558int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
559{
560	struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow;
561	struct buffer_head *bh_frozen;
562	struct folio *folio;
563	int blkbits = inode->i_blkbits;
564
565	folio = filemap_grab_folio(shadow->inode->i_mapping,
566			bh->b_folio->index);
567	if (IS_ERR(folio))
568		return PTR_ERR(folio);
569
570	bh_frozen = folio_buffers(folio);
571	if (!bh_frozen)
572		bh_frozen = create_empty_buffers(folio, 1 << blkbits, 0);
573
574	bh_frozen = get_nth_bh(bh_frozen, bh_offset(bh) >> blkbits);
575
576	if (!buffer_uptodate(bh_frozen))
577		nilfs_copy_buffer(bh_frozen, bh);
578	if (list_empty(&bh_frozen->b_assoc_buffers)) {
579		list_add_tail(&bh_frozen->b_assoc_buffers,
580			      &shadow->frozen_buffers);
581		set_buffer_nilfs_redirected(bh);
582	} else {
583		brelse(bh_frozen); /* already frozen */
584	}
585
586	folio_unlock(folio);
587	folio_put(folio);
588	return 0;
589}
590
591struct buffer_head *
592nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
593{
594	struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow;
595	struct buffer_head *bh_frozen = NULL;
596	struct folio *folio;
597	int n;
598
599	folio = filemap_lock_folio(shadow->inode->i_mapping,
600			bh->b_folio->index);
601	if (!IS_ERR(folio)) {
602		bh_frozen = folio_buffers(folio);
603		if (bh_frozen) {
604			n = bh_offset(bh) >> inode->i_blkbits;
605			bh_frozen = get_nth_bh(bh_frozen, n);
606		}
607		folio_unlock(folio);
608		folio_put(folio);
609	}
610	return bh_frozen;
611}
612
613static void nilfs_release_frozen_buffers(struct nilfs_shadow_map *shadow)
614{
615	struct list_head *head = &shadow->frozen_buffers;
616	struct buffer_head *bh;
617
618	while (!list_empty(head)) {
619		bh = list_first_entry(head, struct buffer_head,
620				      b_assoc_buffers);
621		list_del_init(&bh->b_assoc_buffers);
622		brelse(bh); /* drop ref-count to make it releasable */
623	}
624}
625
626/**
627 * nilfs_mdt_restore_from_shadow_map - restore dirty pages and bmap state
628 * @inode: inode of the metadata file
629 */
630void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
631{
632	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
633	struct nilfs_inode_info *ii = NILFS_I(inode);
634	struct nilfs_shadow_map *shadow = mi->mi_shadow;
635
636	down_write(&mi->mi_sem);
637
638	if (mi->mi_palloc_cache)
639		nilfs_palloc_clear_cache(inode);
640
641	nilfs_clear_dirty_pages(inode->i_mapping, true);
642	nilfs_copy_back_pages(inode->i_mapping, shadow->inode->i_mapping);
643
644	nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping, true);
645	nilfs_copy_back_pages(ii->i_assoc_inode->i_mapping,
646			      NILFS_I(shadow->inode)->i_assoc_inode->i_mapping);
647
648	nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store);
649
650	up_write(&mi->mi_sem);
651}
652
653/**
654 * nilfs_mdt_clear_shadow_map - truncate pages in shadow map caches
655 * @inode: inode of the metadata file
656 */
657void nilfs_mdt_clear_shadow_map(struct inode *inode)
658{
659	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
660	struct nilfs_shadow_map *shadow = mi->mi_shadow;
661	struct inode *shadow_btnc_inode = NILFS_I(shadow->inode)->i_assoc_inode;
662
663	down_write(&mi->mi_sem);
664	nilfs_release_frozen_buffers(shadow);
665	truncate_inode_pages(shadow->inode->i_mapping, 0);
666	truncate_inode_pages(shadow_btnc_inode->i_mapping, 0);
667	up_write(&mi->mi_sem);
668}
669