1/*
2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/gfs2_ondisk.h>
16#include <linux/crc32.h>
17#include <linux/lm_interface.h>
18#include <linux/delay.h>
19
20#include "gfs2.h"
21#include "incore.h"
22#include "bmap.h"
23#include "glock.h"
24#include "log.h"
25#include "lops.h"
26#include "meta_io.h"
27#include "util.h"
28#include "dir.h"
29
30#define PULL 1
31
32/**
33 * gfs2_struct2blk - compute stuff
34 * @sdp: the filesystem
35 * @nstruct: the number of structures
36 * @ssize: the size of the structures
37 *
38 * Compute the number of log descriptor blocks needed to hold a certain number
39 * of structures of a certain size.
40 *
41 * Returns: the number of blocks needed (minimum is always 1)
42 */
43
44unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
45			     unsigned int ssize)
46{
47	unsigned int blks;
48	unsigned int first, second;
49
50	blks = 1;
51	first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
52
53	if (nstruct > first) {
54		second = (sdp->sd_sb.sb_bsize -
55			  sizeof(struct gfs2_meta_header)) / ssize;
56		blks += DIV_ROUND_UP(nstruct - first, second);
57	}
58
59	return blks;
60}
61
62/**
63 * gfs2_ail1_start_one - Start I/O on a part of the AIL
64 * @sdp: the filesystem
65 * @tr: the part of the AIL
66 *
67 */
68
69static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
70{
71	struct gfs2_bufdata *bd, *s;
72	struct buffer_head *bh;
73	int retry;
74
75	BUG_ON(!spin_is_locked(&sdp->sd_log_lock));
76
77	do {
78		retry = 0;
79
80		list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
81						 bd_ail_st_list) {
82			bh = bd->bd_bh;
83
84			gfs2_assert(sdp, bd->bd_ail == ai);
85
86			if (!buffer_busy(bh)) {
87				if (!buffer_uptodate(bh)) {
88					gfs2_log_unlock(sdp);
89					gfs2_io_error_bh(sdp, bh);
90					gfs2_log_lock(sdp);
91				}
92				list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
93				continue;
94			}
95
96			if (!buffer_dirty(bh))
97				continue;
98
99			list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list);
100
101			gfs2_log_unlock(sdp);
102			wait_on_buffer(bh);
103			ll_rw_block(WRITE, 1, &bh);
104			gfs2_log_lock(sdp);
105
106			retry = 1;
107			break;
108		}
109	} while (retry);
110}
111
112/**
113 * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
114 * @sdp: the filesystem
115 * @ai: the AIL entry
116 *
117 */
118
119static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int flags)
120{
121	struct gfs2_bufdata *bd, *s;
122	struct buffer_head *bh;
123
124	list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
125					 bd_ail_st_list) {
126		bh = bd->bd_bh;
127
128		gfs2_assert(sdp, bd->bd_ail == ai);
129
130		if (buffer_busy(bh)) {
131			if (flags & DIO_ALL)
132				continue;
133			else
134				break;
135		}
136
137		if (!buffer_uptodate(bh))
138			gfs2_io_error_bh(sdp, bh);
139
140		list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
141	}
142
143	return list_empty(&ai->ai_ail1_list);
144}
145
146static void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
147{
148	struct list_head *head = &sdp->sd_ail1_list;
149	u64 sync_gen;
150	struct list_head *first;
151	struct gfs2_ail *first_ai, *ai, *tmp;
152	int done = 0;
153
154	gfs2_log_lock(sdp);
155	if (list_empty(head)) {
156		gfs2_log_unlock(sdp);
157		return;
158	}
159	sync_gen = sdp->sd_ail_sync_gen++;
160
161	first = head->prev;
162	first_ai = list_entry(first, struct gfs2_ail, ai_list);
163	first_ai->ai_sync_gen = sync_gen;
164	gfs2_ail1_start_one(sdp, first_ai); /* This may drop log lock */
165
166	if (flags & DIO_ALL)
167		first = NULL;
168
169	while(!done) {
170		if (first && (head->prev != first ||
171			      gfs2_ail1_empty_one(sdp, first_ai, 0)))
172			break;
173
174		done = 1;
175		list_for_each_entry_safe_reverse(ai, tmp, head, ai_list) {
176			if (ai->ai_sync_gen >= sync_gen)
177				continue;
178			ai->ai_sync_gen = sync_gen;
179			gfs2_ail1_start_one(sdp, ai); /* This may drop log lock */
180			done = 0;
181			break;
182		}
183	}
184
185	gfs2_log_unlock(sdp);
186}
187
188int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags)
189{
190	struct gfs2_ail *ai, *s;
191	int ret;
192
193	gfs2_log_lock(sdp);
194
195	list_for_each_entry_safe_reverse(ai, s, &sdp->sd_ail1_list, ai_list) {
196		if (gfs2_ail1_empty_one(sdp, ai, flags))
197			list_move(&ai->ai_list, &sdp->sd_ail2_list);
198		else if (!(flags & DIO_ALL))
199			break;
200	}
201
202	ret = list_empty(&sdp->sd_ail1_list);
203
204	gfs2_log_unlock(sdp);
205
206	return ret;
207}
208
209
210/**
211 * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
212 * @sdp: the filesystem
213 * @ai: the AIL entry
214 *
215 */
216
217static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
218{
219	struct list_head *head = &ai->ai_ail2_list;
220	struct gfs2_bufdata *bd;
221
222	while (!list_empty(head)) {
223		bd = list_entry(head->prev, struct gfs2_bufdata,
224				bd_ail_st_list);
225		gfs2_assert(sdp, bd->bd_ail == ai);
226		bd->bd_ail = NULL;
227		list_del(&bd->bd_ail_st_list);
228		list_del(&bd->bd_ail_gl_list);
229		atomic_dec(&bd->bd_gl->gl_ail_count);
230		brelse(bd->bd_bh);
231	}
232}
233
234static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
235{
236	struct gfs2_ail *ai, *safe;
237	unsigned int old_tail = sdp->sd_log_tail;
238	int wrap = (new_tail < old_tail);
239	int a, b, rm;
240
241	gfs2_log_lock(sdp);
242
243	list_for_each_entry_safe(ai, safe, &sdp->sd_ail2_list, ai_list) {
244		a = (old_tail <= ai->ai_first);
245		b = (ai->ai_first < new_tail);
246		rm = (wrap) ? (a || b) : (a && b);
247		if (!rm)
248			continue;
249
250		gfs2_ail2_empty_one(sdp, ai);
251		list_del(&ai->ai_list);
252		gfs2_assert_warn(sdp, list_empty(&ai->ai_ail1_list));
253		gfs2_assert_warn(sdp, list_empty(&ai->ai_ail2_list));
254		kfree(ai);
255	}
256
257	gfs2_log_unlock(sdp);
258}
259
260/**
261 * gfs2_log_reserve - Make a log reservation
262 * @sdp: The GFS2 superblock
263 * @blks: The number of blocks to reserve
264 *
265 * Note that we never give out the last 6 blocks of the journal. Thats
266 * due to the fact that there is are a small number of header blocks
267 * associated with each log flush. The exact number can't be known until
268 * flush time, so we ensure that we have just enough free blocks at all
269 * times to avoid running out during a log flush.
270 *
271 * Returns: errno
272 */
273
274int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
275{
276	unsigned int try = 0;
277
278	if (gfs2_assert_warn(sdp, blks) ||
279	    gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
280		return -EINVAL;
281
282	mutex_lock(&sdp->sd_log_reserve_mutex);
283	gfs2_log_lock(sdp);
284	while(sdp->sd_log_blks_free <= (blks + 6)) {
285		gfs2_log_unlock(sdp);
286		gfs2_ail1_empty(sdp, 0);
287		gfs2_log_flush(sdp, NULL);
288
289		if (try++)
290			gfs2_ail1_start(sdp, 0);
291		gfs2_log_lock(sdp);
292	}
293	sdp->sd_log_blks_free -= blks;
294	gfs2_log_unlock(sdp);
295	mutex_unlock(&sdp->sd_log_reserve_mutex);
296
297	down_read(&sdp->sd_log_flush_lock);
298
299	return 0;
300}
301
302/**
303 * gfs2_log_release - Release a given number of log blocks
304 * @sdp: The GFS2 superblock
305 * @blks: The number of blocks
306 *
307 */
308
309void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
310{
311
312	gfs2_log_lock(sdp);
313	sdp->sd_log_blks_free += blks;
314	gfs2_assert_withdraw(sdp,
315			     sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks);
316	gfs2_log_unlock(sdp);
317	up_read(&sdp->sd_log_flush_lock);
318}
319
320static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
321{
322	struct inode *inode = sdp->sd_jdesc->jd_inode;
323	int error;
324	struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
325
326	bh_map.b_size = 1 << inode->i_blkbits;
327	error = gfs2_block_map(inode, lbn, 0, &bh_map);
328	if (error || !bh_map.b_blocknr)
329		printk(KERN_INFO "error=%d, dbn=%llu lbn=%u", error,
330		       (unsigned long long)bh_map.b_blocknr, lbn);
331	gfs2_assert_withdraw(sdp, !error && bh_map.b_blocknr);
332
333	return bh_map.b_blocknr;
334}
335
336/**
337 * log_distance - Compute distance between two journal blocks
338 * @sdp: The GFS2 superblock
339 * @newer: The most recent journal block of the pair
340 * @older: The older journal block of the pair
341 *
342 *   Compute the distance (in the journal direction) between two
343 *   blocks in the journal
344 *
345 * Returns: the distance in blocks
346 */
347
348static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
349					unsigned int older)
350{
351	int dist;
352
353	dist = newer - older;
354	if (dist < 0)
355		dist += sdp->sd_jdesc->jd_blocks;
356
357	return dist;
358}
359
360static unsigned int current_tail(struct gfs2_sbd *sdp)
361{
362	struct gfs2_ail *ai;
363	unsigned int tail;
364
365	gfs2_log_lock(sdp);
366
367	if (list_empty(&sdp->sd_ail1_list)) {
368		tail = sdp->sd_log_head;
369	} else {
370		ai = list_entry(sdp->sd_ail1_list.prev, struct gfs2_ail, ai_list);
371		tail = ai->ai_first;
372	}
373
374	gfs2_log_unlock(sdp);
375
376	return tail;
377}
378
379static inline void log_incr_head(struct gfs2_sbd *sdp)
380{
381	if (sdp->sd_log_flush_head == sdp->sd_log_tail)
382		gfs2_assert_withdraw(sdp, sdp->sd_log_flush_head == sdp->sd_log_head);
383
384	if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
385		sdp->sd_log_flush_head = 0;
386		sdp->sd_log_flush_wrapped = 1;
387	}
388}
389
390/**
391 * gfs2_log_get_buf - Get and initialize a buffer to use for log control data
392 * @sdp: The GFS2 superblock
393 *
394 * Returns: the buffer_head
395 */
396
397struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp)
398{
399	u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
400	struct gfs2_log_buf *lb;
401	struct buffer_head *bh;
402
403	lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL);
404	list_add(&lb->lb_list, &sdp->sd_log_flush_list);
405
406	bh = lb->lb_bh = sb_getblk(sdp->sd_vfs, blkno);
407	lock_buffer(bh);
408	memset(bh->b_data, 0, bh->b_size);
409	set_buffer_uptodate(bh);
410	clear_buffer_dirty(bh);
411	unlock_buffer(bh);
412
413	log_incr_head(sdp);
414
415	return bh;
416}
417
418/**
419 * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log
420 * @sdp: the filesystem
421 * @data: the data the buffer_head should point to
422 *
423 * Returns: the log buffer descriptor
424 */
425
426struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
427				      struct buffer_head *real)
428{
429	u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
430	struct gfs2_log_buf *lb;
431	struct buffer_head *bh;
432
433	lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL);
434	list_add(&lb->lb_list, &sdp->sd_log_flush_list);
435	lb->lb_real = real;
436
437	bh = lb->lb_bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL);
438	atomic_set(&bh->b_count, 1);
439	bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate);
440	set_bh_page(bh, real->b_page, bh_offset(real));
441	bh->b_blocknr = blkno;
442	bh->b_size = sdp->sd_sb.sb_bsize;
443	bh->b_bdev = sdp->sd_vfs->s_bdev;
444
445	log_incr_head(sdp);
446
447	return bh;
448}
449
450static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail, int pull)
451{
452	unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
453
454	ail2_empty(sdp, new_tail);
455
456	gfs2_log_lock(sdp);
457	sdp->sd_log_blks_free += dist - (pull ? 1 : 0);
458	gfs2_assert_withdraw(sdp, sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks);
459	gfs2_log_unlock(sdp);
460
461	sdp->sd_log_tail = new_tail;
462}
463
464/**
465 * log_write_header - Get and initialize a journal header buffer
466 * @sdp: The GFS2 superblock
467 *
468 * Returns: the initialized log buffer descriptor
469 */
470
471static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
472{
473	u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
474	struct buffer_head *bh;
475	struct gfs2_log_header *lh;
476	unsigned int tail;
477	u32 hash;
478
479	bh = sb_getblk(sdp->sd_vfs, blkno);
480	lock_buffer(bh);
481	memset(bh->b_data, 0, bh->b_size);
482	set_buffer_uptodate(bh);
483	clear_buffer_dirty(bh);
484	unlock_buffer(bh);
485
486	gfs2_ail1_empty(sdp, 0);
487	tail = current_tail(sdp);
488
489	lh = (struct gfs2_log_header *)bh->b_data;
490	memset(lh, 0, sizeof(struct gfs2_log_header));
491	lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
492	lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
493	lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
494	lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
495	lh->lh_flags = cpu_to_be32(flags);
496	lh->lh_tail = cpu_to_be32(tail);
497	lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head);
498	hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header));
499	lh->lh_hash = cpu_to_be32(hash);
500
501	set_buffer_dirty(bh);
502	if (sync_dirty_buffer(bh))
503		gfs2_io_error_bh(sdp, bh);
504	brelse(bh);
505
506	if (sdp->sd_log_tail != tail)
507		log_pull_tail(sdp, tail, pull);
508	else
509		gfs2_assert_withdraw(sdp, !pull);
510
511	sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
512	log_incr_head(sdp);
513}
514
515static void log_flush_commit(struct gfs2_sbd *sdp)
516{
517	struct list_head *head = &sdp->sd_log_flush_list;
518	struct gfs2_log_buf *lb;
519	struct buffer_head *bh;
520
521	while (!list_empty(head)) {
522		lb = list_entry(head->next, struct gfs2_log_buf, lb_list);
523		list_del(&lb->lb_list);
524		bh = lb->lb_bh;
525
526		wait_on_buffer(bh);
527		if (!buffer_uptodate(bh))
528			gfs2_io_error_bh(sdp, bh);
529		if (lb->lb_real) {
530			while (atomic_read(&bh->b_count) != 1)  /* Grrrr... */
531				schedule();
532			free_buffer_head(bh);
533		} else
534			brelse(bh);
535		kfree(lb);
536	}
537
538	log_write_header(sdp, 0, 0);
539}
540
541/**
542 * gfs2_log_flush - flush incore transaction(s)
543 * @sdp: the filesystem
544 * @gl: The glock structure to flush.  If NULL, flush the whole incore log
545 *
546 */
547
548void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
549{
550	struct gfs2_ail *ai;
551
552	down_write(&sdp->sd_log_flush_lock);
553
554	if (gl) {
555		gfs2_log_lock(sdp);
556		if (list_empty(&gl->gl_le.le_list)) {
557			gfs2_log_unlock(sdp);
558			up_write(&sdp->sd_log_flush_lock);
559			return;
560		}
561		gfs2_log_unlock(sdp);
562	}
563
564	ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL);
565	INIT_LIST_HEAD(&ai->ai_ail1_list);
566	INIT_LIST_HEAD(&ai->ai_ail2_list);
567
568	gfs2_assert_withdraw(sdp, sdp->sd_log_num_buf == sdp->sd_log_commited_buf);
569	gfs2_assert_withdraw(sdp,
570			sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
571
572	sdp->sd_log_flush_head = sdp->sd_log_head;
573	sdp->sd_log_flush_wrapped = 0;
574	ai->ai_first = sdp->sd_log_flush_head;
575
576	lops_before_commit(sdp);
577	if (!list_empty(&sdp->sd_log_flush_list))
578		log_flush_commit(sdp);
579	else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle)
580		log_write_header(sdp, 0, PULL);
581	lops_after_commit(sdp, ai);
582
583	gfs2_log_lock(sdp);
584	sdp->sd_log_head = sdp->sd_log_flush_head;
585	sdp->sd_log_blks_free -= sdp->sd_log_num_hdrs;
586	sdp->sd_log_blks_reserved = 0;
587	sdp->sd_log_commited_buf = 0;
588	sdp->sd_log_num_hdrs = 0;
589	sdp->sd_log_commited_revoke = 0;
590
591	if (!list_empty(&ai->ai_ail1_list)) {
592		list_add(&ai->ai_list, &sdp->sd_ail1_list);
593		ai = NULL;
594	}
595	gfs2_log_unlock(sdp);
596
597	sdp->sd_vfs->s_dirt = 0;
598	up_write(&sdp->sd_log_flush_lock);
599
600	kfree(ai);
601}
602
603static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
604{
605	unsigned int reserved = 0;
606	unsigned int old;
607
608	gfs2_log_lock(sdp);
609
610	sdp->sd_log_commited_buf += tr->tr_num_buf_new - tr->tr_num_buf_rm;
611	gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_buf) >= 0);
612	sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
613	gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_revoke) >= 0);
614
615	if (sdp->sd_log_commited_buf)
616		reserved += sdp->sd_log_commited_buf;
617	if (sdp->sd_log_commited_revoke)
618		reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
619					    sizeof(u64));
620	if (reserved)
621		reserved++;
622
623	old = sdp->sd_log_blks_free;
624	sdp->sd_log_blks_free += tr->tr_reserved -
625				 (reserved - sdp->sd_log_blks_reserved);
626
627	gfs2_assert_withdraw(sdp, sdp->sd_log_blks_free >= old);
628	gfs2_assert_withdraw(sdp,
629			     sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks +
630			     sdp->sd_log_num_hdrs);
631
632	sdp->sd_log_blks_reserved = reserved;
633
634	gfs2_log_unlock(sdp);
635}
636
637/**
638 * gfs2_log_commit - Commit a transaction to the log
639 * @sdp: the filesystem
640 * @tr: the transaction
641 *
642 * Returns: errno
643 */
644
645void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
646{
647	log_refund(sdp, tr);
648	lops_incore_commit(sdp, tr);
649
650	sdp->sd_vfs->s_dirt = 1;
651	up_read(&sdp->sd_log_flush_lock);
652
653	gfs2_log_lock(sdp);
654	if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks))
655		wake_up_process(sdp->sd_logd_process);
656	gfs2_log_unlock(sdp);
657}
658
659/**
660 * gfs2_log_shutdown - write a shutdown header into a journal
661 * @sdp: the filesystem
662 *
663 */
664
665void gfs2_log_shutdown(struct gfs2_sbd *sdp)
666{
667	down_write(&sdp->sd_log_flush_lock);
668
669	gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
670	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_gl);
671	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_buf);
672	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_jdata);
673	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
674	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg);
675	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf);
676	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_hdrs);
677	gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
678
679	sdp->sd_log_flush_head = sdp->sd_log_head;
680	sdp->sd_log_flush_wrapped = 0;
681
682	log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT, 0);
683
684	gfs2_assert_warn(sdp, sdp->sd_log_blks_free == sdp->sd_jdesc->jd_blocks);
685	gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
686	gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
687
688	sdp->sd_log_head = sdp->sd_log_flush_head;
689	sdp->sd_log_tail = sdp->sd_log_head;
690
691	up_write(&sdp->sd_log_flush_lock);
692}
693
694
695/**
696 * gfs2_meta_syncfs - sync all the buffers in a filesystem
697 * @sdp: the filesystem
698 *
699 */
700
701void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
702{
703	gfs2_log_flush(sdp, NULL);
704	for (;;) {
705		gfs2_ail1_start(sdp, DIO_ALL);
706		if (gfs2_ail1_empty(sdp, DIO_ALL))
707			break;
708		msleep(10);
709	}
710}
711