g_journal.c revision 227309
1/*-
2 * Copyright (c) 2005-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/geom/journal/g_journal.c 227309 2011-11-07 15:43:11Z ed $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/module.h>
34#include <sys/limits.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/bio.h>
38#include <sys/sysctl.h>
39#include <sys/malloc.h>
40#include <sys/mount.h>
41#include <sys/eventhandler.h>
42#include <sys/proc.h>
43#include <sys/kthread.h>
44#include <sys/sched.h>
45#include <sys/taskqueue.h>
46#include <sys/vnode.h>
47#include <sys/sbuf.h>
48#ifdef GJ_MEMDEBUG
49#include <sys/stack.h>
50#include <sys/kdb.h>
51#endif
52#include <vm/vm.h>
53#include <vm/vm_kern.h>
54#include <geom/geom.h>
55
56#include <geom/journal/g_journal.h>
57
58FEATURE(geom_journal, "GEOM journaling support");
59
60/*
61 * On-disk journal format:
62 *
63 * JH - Journal header
64 * RH - Record header
65 *
66 * %%%%%% ****** +------+ +------+     ****** +------+     %%%%%%
67 * % JH % * RH * | Data | | Data | ... * RH * | Data | ... % JH % ...
68 * %%%%%% ****** +------+ +------+     ****** +------+     %%%%%%
69 *
70 */
71
72CTASSERT(sizeof(struct g_journal_header) <= 512);
73CTASSERT(sizeof(struct g_journal_record_header) <= 512);
74
75static MALLOC_DEFINE(M_JOURNAL, "journal_data", "GEOM_JOURNAL Data");
76static struct mtx g_journal_cache_mtx;
77MTX_SYSINIT(g_journal_cache, &g_journal_cache_mtx, "cache usage", MTX_DEF);
78
79const struct g_journal_desc *g_journal_filesystems[] = {
80	&g_journal_ufs,
81	NULL
82};
83
84SYSCTL_DECL(_kern_geom);
85
86int g_journal_debug = 0;
87TUNABLE_INT("kern.geom.journal.debug", &g_journal_debug);
88static u_int g_journal_switch_time = 10;
89static u_int g_journal_force_switch = 70;
90static u_int g_journal_parallel_flushes = 16;
91static u_int g_journal_parallel_copies = 16;
92static u_int g_journal_accept_immediately = 64;
93static u_int g_journal_record_entries = GJ_RECORD_HEADER_NENTRIES;
94static u_int g_journal_do_optimize = 1;
95
96static SYSCTL_NODE(_kern_geom, OID_AUTO, journal, CTLFLAG_RW, 0,
97    "GEOM_JOURNAL stuff");
98SYSCTL_INT(_kern_geom_journal, OID_AUTO, debug, CTLFLAG_RW, &g_journal_debug, 0,
99    "Debug level");
100SYSCTL_UINT(_kern_geom_journal, OID_AUTO, switch_time, CTLFLAG_RW,
101    &g_journal_switch_time, 0, "Switch journals every N seconds");
102SYSCTL_UINT(_kern_geom_journal, OID_AUTO, force_switch, CTLFLAG_RW,
103    &g_journal_force_switch, 0, "Force switch when journal is N% full");
104SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_flushes, CTLFLAG_RW,
105    &g_journal_parallel_flushes, 0,
106    "Number of flush I/O requests to send in parallel");
107SYSCTL_UINT(_kern_geom_journal, OID_AUTO, accept_immediately, CTLFLAG_RW,
108    &g_journal_accept_immediately, 0,
109    "Number of I/O requests accepted immediately");
110SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_copies, CTLFLAG_RW,
111    &g_journal_parallel_copies, 0,
112    "Number of copy I/O requests to send in parallel");
113static int
114g_journal_record_entries_sysctl(SYSCTL_HANDLER_ARGS)
115{
116	u_int entries;
117	int error;
118
119	entries = g_journal_record_entries;
120	error = sysctl_handle_int(oidp, &entries, 0, req);
121	if (error != 0 || req->newptr == NULL)
122		return (error);
123	if (entries < 1 || entries > GJ_RECORD_HEADER_NENTRIES)
124		return (EINVAL);
125	g_journal_record_entries = entries;
126	return (0);
127}
128SYSCTL_PROC(_kern_geom_journal, OID_AUTO, record_entries,
129    CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_record_entries_sysctl, "I",
130    "Maximum number of entires in one journal record");
131SYSCTL_UINT(_kern_geom_journal, OID_AUTO, optimize, CTLFLAG_RW,
132    &g_journal_do_optimize, 0, "Try to combine bios on flush and copy");
133
134static u_int g_journal_cache_used = 0;
135static u_int g_journal_cache_limit = 64 * 1024 * 1024;
136TUNABLE_INT("kern.geom.journal.cache.limit", &g_journal_cache_limit);
137static u_int g_journal_cache_divisor = 2;
138TUNABLE_INT("kern.geom.journal.cache.divisor", &g_journal_cache_divisor);
139static u_int g_journal_cache_switch = 90;
140static u_int g_journal_cache_misses = 0;
141static u_int g_journal_cache_alloc_failures = 0;
142static u_int g_journal_cache_low = 0;
143
144static SYSCTL_NODE(_kern_geom_journal, OID_AUTO, cache, CTLFLAG_RW, 0,
145    "GEOM_JOURNAL cache");
146SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, used, CTLFLAG_RD,
147    &g_journal_cache_used, 0, "Number of allocated bytes");
148static int
149g_journal_cache_limit_sysctl(SYSCTL_HANDLER_ARGS)
150{
151	u_int limit;
152	int error;
153
154	limit = g_journal_cache_limit;
155	error = sysctl_handle_int(oidp, &limit, 0, req);
156	if (error != 0 || req->newptr == NULL)
157		return (error);
158	g_journal_cache_limit = limit;
159	g_journal_cache_low = (limit / 100) * g_journal_cache_switch;
160	return (0);
161}
162SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, limit,
163    CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_cache_limit_sysctl, "I",
164    "Maximum number of allocated bytes");
165SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, divisor, CTLFLAG_RDTUN,
166    &g_journal_cache_divisor, 0,
167    "(kmem_size / kern.geom.journal.cache.divisor) == cache size");
168static int
169g_journal_cache_switch_sysctl(SYSCTL_HANDLER_ARGS)
170{
171	u_int cswitch;
172	int error;
173
174	cswitch = g_journal_cache_switch;
175	error = sysctl_handle_int(oidp, &cswitch, 0, req);
176	if (error != 0 || req->newptr == NULL)
177		return (error);
178	if (cswitch < 0 || cswitch > 100)
179		return (EINVAL);
180	g_journal_cache_switch = cswitch;
181	g_journal_cache_low = (g_journal_cache_limit / 100) * cswitch;
182	return (0);
183}
184SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, switch,
185    CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_cache_switch_sysctl, "I",
186    "Force switch when we hit this percent of cache use");
187SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, misses, CTLFLAG_RW,
188    &g_journal_cache_misses, 0, "Number of cache misses");
189SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, alloc_failures, CTLFLAG_RW,
190    &g_journal_cache_alloc_failures, 0, "Memory allocation failures");
191
192static u_long g_journal_stats_bytes_skipped = 0;
193static u_long g_journal_stats_combined_ios = 0;
194static u_long g_journal_stats_switches = 0;
195static u_long g_journal_stats_wait_for_copy = 0;
196static u_long g_journal_stats_journal_full = 0;
197static u_long g_journal_stats_low_mem = 0;
198
199static SYSCTL_NODE(_kern_geom_journal, OID_AUTO, stats, CTLFLAG_RW, 0,
200    "GEOM_JOURNAL statistics");
201SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, skipped_bytes, CTLFLAG_RW,
202    &g_journal_stats_bytes_skipped, 0, "Number of skipped bytes");
203SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, combined_ios, CTLFLAG_RW,
204    &g_journal_stats_combined_ios, 0, "Number of combined I/O requests");
205SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, switches, CTLFLAG_RW,
206    &g_journal_stats_switches, 0, "Number of journal switches");
207SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, wait_for_copy, CTLFLAG_RW,
208    &g_journal_stats_wait_for_copy, 0, "Wait for journal copy on switch");
209SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, journal_full, CTLFLAG_RW,
210    &g_journal_stats_journal_full, 0,
211    "Number of times journal was almost full.");
212SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, low_mem, CTLFLAG_RW,
213    &g_journal_stats_low_mem, 0, "Number of times low_mem hook was called.");
214
215static g_taste_t g_journal_taste;
216static g_ctl_req_t g_journal_config;
217static g_dumpconf_t g_journal_dumpconf;
218static g_init_t g_journal_init;
219static g_fini_t g_journal_fini;
220
221struct g_class g_journal_class = {
222	.name = G_JOURNAL_CLASS_NAME,
223	.version = G_VERSION,
224	.taste = g_journal_taste,
225	.ctlreq = g_journal_config,
226	.dumpconf = g_journal_dumpconf,
227	.init = g_journal_init,
228	.fini = g_journal_fini
229};
230
231static int g_journal_destroy(struct g_journal_softc *sc);
232static void g_journal_metadata_update(struct g_journal_softc *sc);
233static void g_journal_switch_wait(struct g_journal_softc *sc);
234
235#define	GJ_SWITCHER_WORKING	0
236#define	GJ_SWITCHER_DIE		1
237#define	GJ_SWITCHER_DIED	2
238static int g_journal_switcher_state = GJ_SWITCHER_WORKING;
239static int g_journal_switcher_wokenup = 0;
240static int g_journal_sync_requested = 0;
241
242#ifdef GJ_MEMDEBUG
243struct meminfo {
244	size_t		mi_size;
245	struct stack	mi_stack;
246};
247#endif
248
249/*
250 * We use our own malloc/realloc/free funtions, so we can collect statistics
251 * and force journal switch when we're running out of cache.
252 */
253static void *
254gj_malloc(size_t size, int flags)
255{
256	void *p;
257#ifdef GJ_MEMDEBUG
258	struct meminfo *mi;
259#endif
260
261	mtx_lock(&g_journal_cache_mtx);
262	if (g_journal_cache_limit > 0 && !g_journal_switcher_wokenup &&
263	    g_journal_cache_used + size > g_journal_cache_low) {
264		GJ_DEBUG(1, "No cache, waking up the switcher.");
265		g_journal_switcher_wokenup = 1;
266		wakeup(&g_journal_switcher_state);
267	}
268	if ((flags & M_NOWAIT) && g_journal_cache_limit > 0 &&
269	    g_journal_cache_used + size > g_journal_cache_limit) {
270		mtx_unlock(&g_journal_cache_mtx);
271		g_journal_cache_alloc_failures++;
272		return (NULL);
273	}
274	g_journal_cache_used += size;
275	mtx_unlock(&g_journal_cache_mtx);
276	flags &= ~M_NOWAIT;
277#ifndef GJ_MEMDEBUG
278	p = malloc(size, M_JOURNAL, flags | M_WAITOK);
279#else
280	mi = malloc(sizeof(*mi) + size, M_JOURNAL, flags | M_WAITOK);
281	p = (u_char *)mi + sizeof(*mi);
282	mi->mi_size = size;
283	stack_save(&mi->mi_stack);
284#endif
285	return (p);
286}
287
288static void
289gj_free(void *p, size_t size)
290{
291#ifdef GJ_MEMDEBUG
292	struct meminfo *mi;
293#endif
294
295	KASSERT(p != NULL, ("p=NULL"));
296	KASSERT(size > 0, ("size=0"));
297	mtx_lock(&g_journal_cache_mtx);
298	KASSERT(g_journal_cache_used >= size, ("Freeing too much?"));
299	g_journal_cache_used -= size;
300	mtx_unlock(&g_journal_cache_mtx);
301#ifdef GJ_MEMDEBUG
302	mi = p = (void *)((u_char *)p - sizeof(*mi));
303	if (mi->mi_size != size) {
304		printf("GJOURNAL: Size mismatch! %zu != %zu\n", size,
305		    mi->mi_size);
306		printf("GJOURNAL: Alloc backtrace:\n");
307		stack_print(&mi->mi_stack);
308		printf("GJOURNAL: Free backtrace:\n");
309		kdb_backtrace();
310	}
311#endif
312	free(p, M_JOURNAL);
313}
314
315static void *
316gj_realloc(void *p, size_t size, size_t oldsize)
317{
318	void *np;
319
320#ifndef GJ_MEMDEBUG
321	mtx_lock(&g_journal_cache_mtx);
322	g_journal_cache_used -= oldsize;
323	g_journal_cache_used += size;
324	mtx_unlock(&g_journal_cache_mtx);
325	np = realloc(p, size, M_JOURNAL, M_WAITOK);
326#else
327	np = gj_malloc(size, M_WAITOK);
328	bcopy(p, np, MIN(oldsize, size));
329	gj_free(p, oldsize);
330#endif
331	return (np);
332}
333
334static void
335g_journal_check_overflow(struct g_journal_softc *sc)
336{
337	off_t length, used;
338
339	if ((sc->sc_active.jj_offset < sc->sc_inactive.jj_offset &&
340	     sc->sc_journal_offset >= sc->sc_inactive.jj_offset) ||
341	    (sc->sc_active.jj_offset > sc->sc_inactive.jj_offset &&
342	     sc->sc_journal_offset >= sc->sc_inactive.jj_offset &&
343	     sc->sc_journal_offset < sc->sc_active.jj_offset)) {
344		panic("Journal overflow (joffset=%jd active=%jd inactive=%jd)",
345		    (intmax_t)sc->sc_journal_offset,
346		    (intmax_t)sc->sc_active.jj_offset,
347		    (intmax_t)sc->sc_inactive.jj_offset);
348	}
349	if (sc->sc_active.jj_offset < sc->sc_inactive.jj_offset) {
350		length = sc->sc_inactive.jj_offset - sc->sc_active.jj_offset;
351		used = sc->sc_journal_offset - sc->sc_active.jj_offset;
352	} else {
353		length = sc->sc_jend - sc->sc_active.jj_offset;
354		length += sc->sc_inactive.jj_offset - sc->sc_jstart;
355		if (sc->sc_journal_offset >= sc->sc_active.jj_offset)
356			used = sc->sc_journal_offset - sc->sc_active.jj_offset;
357		else {
358			used = sc->sc_jend - sc->sc_active.jj_offset;
359			used += sc->sc_journal_offset - sc->sc_jstart;
360		}
361	}
362	/* Already woken up? */
363	if (g_journal_switcher_wokenup)
364		return;
365	/*
366	 * If the active journal takes more than g_journal_force_switch precent
367	 * of free journal space, we force journal switch.
368	 */
369	KASSERT(length > 0,
370	    ("length=%jd used=%jd active=%jd inactive=%jd joffset=%jd",
371	    (intmax_t)length, (intmax_t)used,
372	    (intmax_t)sc->sc_active.jj_offset,
373	    (intmax_t)sc->sc_inactive.jj_offset,
374	    (intmax_t)sc->sc_journal_offset));
375	if ((used * 100) / length > g_journal_force_switch) {
376		g_journal_stats_journal_full++;
377		GJ_DEBUG(1, "Journal %s %jd%% full, forcing journal switch.",
378		    sc->sc_name, (used * 100) / length);
379		mtx_lock(&g_journal_cache_mtx);
380		g_journal_switcher_wokenup = 1;
381		wakeup(&g_journal_switcher_state);
382		mtx_unlock(&g_journal_cache_mtx);
383	}
384}
385
386static void
387g_journal_orphan(struct g_consumer *cp)
388{
389	struct g_journal_softc *sc;
390	char name[256];
391	int error;
392
393	g_topology_assert();
394	sc = cp->geom->softc;
395	strlcpy(name, cp->provider->name, sizeof(name));
396	GJ_DEBUG(0, "Lost provider %s.", name);
397	if (sc == NULL)
398		return;
399	error = g_journal_destroy(sc);
400	if (error == 0)
401		GJ_DEBUG(0, "Journal %s destroyed.", name);
402	else {
403		GJ_DEBUG(0, "Cannot destroy journal %s (error=%d). "
404		    "Destroy it manually after last close.", sc->sc_name,
405		    error);
406	}
407}
408
409static int
410g_journal_access(struct g_provider *pp, int acr, int acw, int ace)
411{
412	struct g_journal_softc *sc;
413	int dcr, dcw, dce;
414
415	g_topology_assert();
416	GJ_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name,
417	    acr, acw, ace);
418
419	dcr = pp->acr + acr;
420	dcw = pp->acw + acw;
421	dce = pp->ace + ace;
422
423	sc = pp->geom->softc;
424	if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY)) {
425		if (acr <= 0 && acw <= 0 && ace <= 0)
426			return (0);
427		else
428			return (ENXIO);
429	}
430	if (pp->acw == 0 && dcw > 0) {
431		GJ_DEBUG(1, "Marking %s as dirty.", sc->sc_name);
432		sc->sc_flags &= ~GJF_DEVICE_CLEAN;
433		g_topology_unlock();
434		g_journal_metadata_update(sc);
435		g_topology_lock();
436	} /* else if (pp->acw == 0 && dcw > 0 && JEMPTY(sc)) {
437		GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
438		sc->sc_flags |= GJF_DEVICE_CLEAN;
439		g_topology_unlock();
440		g_journal_metadata_update(sc);
441		g_topology_lock();
442	} */
443	return (0);
444}
445
446static void
447g_journal_header_encode(struct g_journal_header *hdr, u_char *data)
448{
449
450	bcopy(GJ_HEADER_MAGIC, data, sizeof(GJ_HEADER_MAGIC));
451	data += sizeof(GJ_HEADER_MAGIC);
452	le32enc(data, hdr->jh_journal_id);
453	data += 4;
454	le32enc(data, hdr->jh_journal_next_id);
455}
456
457static int
458g_journal_header_decode(const u_char *data, struct g_journal_header *hdr)
459{
460
461	bcopy(data, hdr->jh_magic, sizeof(hdr->jh_magic));
462	data += sizeof(hdr->jh_magic);
463	if (bcmp(hdr->jh_magic, GJ_HEADER_MAGIC, sizeof(GJ_HEADER_MAGIC)) != 0)
464		return (EINVAL);
465	hdr->jh_journal_id = le32dec(data);
466	data += 4;
467	hdr->jh_journal_next_id = le32dec(data);
468	return (0);
469}
470
471static void
472g_journal_flush_cache(struct g_journal_softc *sc)
473{
474	struct bintime bt;
475	int error;
476
477	if (sc->sc_bio_flush == 0)
478		return;
479	GJ_TIMER_START(1, &bt);
480	if (sc->sc_bio_flush & GJ_FLUSH_JOURNAL) {
481		error = g_io_flush(sc->sc_jconsumer);
482		GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
483		    sc->sc_jconsumer->provider->name, error);
484	}
485	if (sc->sc_bio_flush & GJ_FLUSH_DATA) {
486		/*
487		 * TODO: This could be called in parallel with the
488		 *       previous call.
489		 */
490		error = g_io_flush(sc->sc_dconsumer);
491		GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
492		    sc->sc_dconsumer->provider->name, error);
493	}
494	GJ_TIMER_STOP(1, &bt, "Cache flush time");
495}
496
497static int
498g_journal_write_header(struct g_journal_softc *sc)
499{
500	struct g_journal_header hdr;
501	struct g_consumer *cp;
502	u_char *buf;
503	int error;
504
505	cp = sc->sc_jconsumer;
506	buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
507
508	strlcpy(hdr.jh_magic, GJ_HEADER_MAGIC, sizeof(hdr.jh_magic));
509	hdr.jh_journal_id = sc->sc_journal_id;
510	hdr.jh_journal_next_id = sc->sc_journal_next_id;
511	g_journal_header_encode(&hdr, buf);
512	error = g_write_data(cp, sc->sc_journal_offset, buf,
513	    cp->provider->sectorsize);
514	/* if (error == 0) */
515	sc->sc_journal_offset += cp->provider->sectorsize;
516
517	gj_free(buf, cp->provider->sectorsize);
518	return (error);
519}
520
521/*
522 * Every journal record has a header and data following it.
523 * Functions below are used to decode the header before storing it to
524 * little endian and to encode it after reading to system endianess.
525 */
526static void
527g_journal_record_header_encode(struct g_journal_record_header *hdr,
528    u_char *data)
529{
530	struct g_journal_entry *ent;
531	u_int i;
532
533	bcopy(GJ_RECORD_HEADER_MAGIC, data, sizeof(GJ_RECORD_HEADER_MAGIC));
534	data += sizeof(GJ_RECORD_HEADER_MAGIC);
535	le32enc(data, hdr->jrh_journal_id);
536	data += 8;
537	le16enc(data, hdr->jrh_nentries);
538	data += 2;
539	bcopy(hdr->jrh_sum, data, sizeof(hdr->jrh_sum));
540	data += 8;
541	for (i = 0; i < hdr->jrh_nentries; i++) {
542		ent = &hdr->jrh_entries[i];
543		le64enc(data, ent->je_joffset);
544		data += 8;
545		le64enc(data, ent->je_offset);
546		data += 8;
547		le64enc(data, ent->je_length);
548		data += 8;
549	}
550}
551
552static int
553g_journal_record_header_decode(const u_char *data,
554    struct g_journal_record_header *hdr)
555{
556	struct g_journal_entry *ent;
557	u_int i;
558
559	bcopy(data, hdr->jrh_magic, sizeof(hdr->jrh_magic));
560	data += sizeof(hdr->jrh_magic);
561	if (strcmp(hdr->jrh_magic, GJ_RECORD_HEADER_MAGIC) != 0)
562		return (EINVAL);
563	hdr->jrh_journal_id = le32dec(data);
564	data += 8;
565	hdr->jrh_nentries = le16dec(data);
566	data += 2;
567	if (hdr->jrh_nentries > GJ_RECORD_HEADER_NENTRIES)
568		return (EINVAL);
569	bcopy(data, hdr->jrh_sum, sizeof(hdr->jrh_sum));
570	data += 8;
571	for (i = 0; i < hdr->jrh_nentries; i++) {
572		ent = &hdr->jrh_entries[i];
573		ent->je_joffset = le64dec(data);
574		data += 8;
575		ent->je_offset = le64dec(data);
576		data += 8;
577		ent->je_length = le64dec(data);
578		data += 8;
579	}
580	return (0);
581}
582
583/*
584 * Function reads metadata from a provider (via the given consumer), decodes
585 * it to system endianess and verifies its correctness.
586 */
587static int
588g_journal_metadata_read(struct g_consumer *cp, struct g_journal_metadata *md)
589{
590	struct g_provider *pp;
591	u_char *buf;
592	int error;
593
594	g_topology_assert();
595
596	error = g_access(cp, 1, 0, 0);
597	if (error != 0)
598		return (error);
599	pp = cp->provider;
600	g_topology_unlock();
601	/* Metadata is stored in last sector. */
602	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
603	    &error);
604	g_topology_lock();
605	g_access(cp, -1, 0, 0);
606	if (buf == NULL) {
607		GJ_DEBUG(1, "Cannot read metadata from %s (error=%d).",
608		    cp->provider->name, error);
609		return (error);
610	}
611
612	/* Decode metadata. */
613	error = journal_metadata_decode(buf, md);
614	g_free(buf);
615	/* Is this is gjournal provider at all? */
616	if (strcmp(md->md_magic, G_JOURNAL_MAGIC) != 0)
617		return (EINVAL);
618	/*
619	 * Are we able to handle this version of metadata?
620	 * We only maintain backward compatibility.
621	 */
622	if (md->md_version > G_JOURNAL_VERSION) {
623		GJ_DEBUG(0,
624		    "Kernel module is too old to handle metadata from %s.",
625		    cp->provider->name);
626		return (EINVAL);
627	}
628	/* Is checksum correct? */
629	if (error != 0) {
630		GJ_DEBUG(0, "MD5 metadata hash mismatch for provider %s.",
631		    cp->provider->name);
632		return (error);
633	}
634	return (0);
635}
636
637/*
638 * Two functions below are responsible for updating metadata.
639 * Only metadata on the data provider is updated (we need to update
640 * information about active journal in there).
641 */
642static void
643g_journal_metadata_done(struct bio *bp)
644{
645
646	/*
647	 * There is not much we can do on error except informing about it.
648	 */
649	if (bp->bio_error != 0) {
650		GJ_LOGREQ(0, bp, "Cannot update metadata (error=%d).",
651		    bp->bio_error);
652	} else {
653		GJ_LOGREQ(2, bp, "Metadata updated.");
654	}
655	gj_free(bp->bio_data, bp->bio_length);
656	g_destroy_bio(bp);
657}
658
659static void
660g_journal_metadata_update(struct g_journal_softc *sc)
661{
662	struct g_journal_metadata md;
663	struct g_consumer *cp;
664	struct bio *bp;
665	u_char *sector;
666
667	cp = sc->sc_dconsumer;
668	sector = gj_malloc(cp->provider->sectorsize, M_WAITOK);
669	strlcpy(md.md_magic, G_JOURNAL_MAGIC, sizeof(md.md_magic));
670	md.md_version = G_JOURNAL_VERSION;
671	md.md_id = sc->sc_id;
672	md.md_type = sc->sc_orig_type;
673	md.md_jstart = sc->sc_jstart;
674	md.md_jend = sc->sc_jend;
675	md.md_joffset = sc->sc_inactive.jj_offset;
676	md.md_jid = sc->sc_journal_previous_id;
677	md.md_flags = 0;
678	if (sc->sc_flags & GJF_DEVICE_CLEAN)
679		md.md_flags |= GJ_FLAG_CLEAN;
680
681	if (sc->sc_flags & GJF_DEVICE_HARDCODED)
682		strlcpy(md.md_provider, sc->sc_name, sizeof(md.md_provider));
683	else
684		bzero(md.md_provider, sizeof(md.md_provider));
685	md.md_provsize = cp->provider->mediasize;
686	journal_metadata_encode(&md, sector);
687
688	/*
689	 * Flush the cache, so we know all data are on disk.
690	 * We write here informations like "journal is consistent", so we need
691	 * to be sure it is. Without BIO_FLUSH here, we can end up in situation
692	 * where metadata is stored on disk, but not all data.
693	 */
694	g_journal_flush_cache(sc);
695
696	bp = g_alloc_bio();
697	bp->bio_offset = cp->provider->mediasize - cp->provider->sectorsize;
698	bp->bio_length = cp->provider->sectorsize;
699	bp->bio_data = sector;
700	bp->bio_cmd = BIO_WRITE;
701	if (!(sc->sc_flags & GJF_DEVICE_DESTROY)) {
702		bp->bio_done = g_journal_metadata_done;
703		g_io_request(bp, cp);
704	} else {
705		bp->bio_done = NULL;
706		g_io_request(bp, cp);
707		biowait(bp, "gjmdu");
708		g_journal_metadata_done(bp);
709	}
710
711	/*
712	 * Be sure metadata reached the disk.
713	 */
714	g_journal_flush_cache(sc);
715}
716
717/*
718 * This is where the I/O request comes from the GEOM.
719 */
720static void
721g_journal_start(struct bio *bp)
722{
723	struct g_journal_softc *sc;
724
725	sc = bp->bio_to->geom->softc;
726	GJ_LOGREQ(3, bp, "Request received.");
727
728	switch (bp->bio_cmd) {
729	case BIO_READ:
730	case BIO_WRITE:
731		mtx_lock(&sc->sc_mtx);
732		bioq_insert_tail(&sc->sc_regular_queue, bp);
733		wakeup(sc);
734		mtx_unlock(&sc->sc_mtx);
735		return;
736	case BIO_GETATTR:
737		if (strcmp(bp->bio_attribute, "GJOURNAL::provider") == 0) {
738			strlcpy(bp->bio_data, bp->bio_to->name, bp->bio_length);
739			bp->bio_completed = strlen(bp->bio_to->name) + 1;
740			g_io_deliver(bp, 0);
741			return;
742		}
743		/* FALLTHROUGH */
744	case BIO_DELETE:
745	default:
746		g_io_deliver(bp, EOPNOTSUPP);
747		return;
748	}
749}
750
751static void
752g_journal_std_done(struct bio *bp)
753{
754	struct g_journal_softc *sc;
755
756	sc = bp->bio_from->geom->softc;
757	mtx_lock(&sc->sc_mtx);
758	bioq_insert_tail(&sc->sc_back_queue, bp);
759	wakeup(sc);
760	mtx_unlock(&sc->sc_mtx);
761}
762
763static struct bio *
764g_journal_new_bio(off_t start, off_t end, off_t joffset, u_char *data,
765    int flags)
766{
767	struct bio *bp;
768
769	bp = g_alloc_bio();
770	bp->bio_offset = start;
771	bp->bio_joffset = joffset;
772	bp->bio_length = end - start;
773	bp->bio_cmd = BIO_WRITE;
774	bp->bio_done = g_journal_std_done;
775	if (data == NULL)
776		bp->bio_data = NULL;
777	else {
778		bp->bio_data = gj_malloc(bp->bio_length, flags);
779		if (bp->bio_data != NULL)
780			bcopy(data, bp->bio_data, bp->bio_length);
781	}
782	return (bp);
783}
784
785#define	g_journal_insert_bio(head, bp, flags)				\
786	g_journal_insert((head), (bp)->bio_offset,			\
787		(bp)->bio_offset + (bp)->bio_length, (bp)->bio_joffset,	\
788		(bp)->bio_data, flags)
789/*
790 * The function below does a lot more than just inserting bio to the queue.
791 * It keeps the queue sorted by offset and ensures that there are no doubled
792 * data (it combines bios where ranges overlap).
793 *
794 * The function returns the number of bios inserted (as bio can be splitted).
795 */
796static int
797g_journal_insert(struct bio **head, off_t nstart, off_t nend, off_t joffset,
798    u_char *data, int flags)
799{
800	struct bio *nbp, *cbp, *pbp;
801	off_t cstart, cend;
802	u_char *tmpdata;
803	int n;
804
805	GJ_DEBUG(3, "INSERT(%p): (%jd, %jd, %jd)", *head, nstart, nend,
806	    joffset);
807	n = 0;
808	pbp = NULL;
809	GJQ_FOREACH(*head, cbp) {
810		cstart = cbp->bio_offset;
811		cend = cbp->bio_offset + cbp->bio_length;
812
813		if (nstart >= cend) {
814			/*
815			 *  +-------------+
816			 *  |             |
817			 *  |   current   |  +-------------+
818			 *  |     bio     |  |             |
819			 *  |             |  |     new     |
820			 *  +-------------+  |     bio     |
821			 *                   |             |
822			 *                   +-------------+
823			 */
824			GJ_DEBUG(3, "INSERT(%p): 1", *head);
825		} else if (nend <= cstart) {
826			/*
827			 *                   +-------------+
828			 *                   |             |
829			 *  +-------------+  |   current   |
830			 *  |             |  |     bio     |
831			 *  |     new     |  |             |
832			 *  |     bio     |  +-------------+
833			 *  |             |
834			 *  +-------------+
835			 */
836			nbp = g_journal_new_bio(nstart, nend, joffset, data,
837			    flags);
838			if (pbp == NULL)
839				*head = nbp;
840			else
841				pbp->bio_next = nbp;
842			nbp->bio_next = cbp;
843			n++;
844			GJ_DEBUG(3, "INSERT(%p): 2 (nbp=%p pbp=%p)", *head, nbp,
845			    pbp);
846			goto end;
847		} else if (nstart <= cstart && nend >= cend) {
848			/*
849			 *      +-------------+      +-------------+
850			 *      | current bio |      | current bio |
851			 *  +---+-------------+---+  +-------------+---+
852			 *  |   |             |   |  |             |   |
853			 *  |   |             |   |  |             |   |
854			 *  |   +-------------+   |  +-------------+   |
855			 *  |       new bio       |  |     new bio     |
856			 *  +---------------------+  +-----------------+
857			 *
858			 *      +-------------+  +-------------+
859			 *      | current bio |  | current bio |
860			 *  +---+-------------+  +-------------+
861			 *  |   |             |  |             |
862			 *  |   |             |  |             |
863			 *  |   +-------------+  +-------------+
864			 *  |     new bio     |  |   new bio   |
865			 *  +-----------------+  +-------------+
866			 */
867			g_journal_stats_bytes_skipped += cbp->bio_length;
868			cbp->bio_offset = nstart;
869			cbp->bio_joffset = joffset;
870			cbp->bio_length = cend - nstart;
871			if (cbp->bio_data != NULL) {
872				gj_free(cbp->bio_data, cend - cstart);
873				cbp->bio_data = NULL;
874			}
875			if (data != NULL) {
876				cbp->bio_data = gj_malloc(cbp->bio_length,
877				    flags);
878				if (cbp->bio_data != NULL) {
879					bcopy(data, cbp->bio_data,
880					    cbp->bio_length);
881				}
882				data += cend - nstart;
883			}
884			joffset += cend - nstart;
885			nstart = cend;
886			GJ_DEBUG(3, "INSERT(%p): 3 (cbp=%p)", *head, cbp);
887		} else if (nstart > cstart && nend >= cend) {
888			/*
889			 *  +-----------------+  +-------------+
890			 *  |   current bio   |  | current bio |
891			 *  |   +-------------+  |   +---------+---+
892			 *  |   |             |  |   |         |   |
893			 *  |   |             |  |   |         |   |
894			 *  +---+-------------+  +---+---------+   |
895			 *      |   new bio   |      |   new bio   |
896			 *      +-------------+      +-------------+
897			 */
898			g_journal_stats_bytes_skipped += cend - nstart;
899			nbp = g_journal_new_bio(nstart, cend, joffset, data,
900			    flags);
901			nbp->bio_next = cbp->bio_next;
902			cbp->bio_next = nbp;
903			cbp->bio_length = nstart - cstart;
904			if (cbp->bio_data != NULL) {
905				cbp->bio_data = gj_realloc(cbp->bio_data,
906				    cbp->bio_length, cend - cstart);
907			}
908			if (data != NULL)
909				data += cend - nstart;
910			joffset += cend - nstart;
911			nstart = cend;
912			n++;
913			GJ_DEBUG(3, "INSERT(%p): 4 (cbp=%p)", *head, cbp);
914		} else if (nstart > cstart && nend < cend) {
915			/*
916			 *  +---------------------+
917			 *  |     current bio     |
918			 *  |   +-------------+   |
919			 *  |   |             |   |
920			 *  |   |             |   |
921			 *  +---+-------------+---+
922			 *      |   new bio   |
923			 *      +-------------+
924			 */
925			g_journal_stats_bytes_skipped += nend - nstart;
926			nbp = g_journal_new_bio(nstart, nend, joffset, data,
927			    flags);
928			nbp->bio_next = cbp->bio_next;
929			cbp->bio_next = nbp;
930			if (cbp->bio_data == NULL)
931				tmpdata = NULL;
932			else
933				tmpdata = cbp->bio_data + nend - cstart;
934			nbp = g_journal_new_bio(nend, cend,
935			    cbp->bio_joffset + nend - cstart, tmpdata, flags);
936			nbp->bio_next = ((struct bio *)cbp->bio_next)->bio_next;
937			((struct bio *)cbp->bio_next)->bio_next = nbp;
938			cbp->bio_length = nstart - cstart;
939			if (cbp->bio_data != NULL) {
940				cbp->bio_data = gj_realloc(cbp->bio_data,
941				    cbp->bio_length, cend - cstart);
942			}
943			n += 2;
944			GJ_DEBUG(3, "INSERT(%p): 5 (cbp=%p)", *head, cbp);
945			goto end;
946		} else if (nstart <= cstart && nend < cend) {
947			/*
948			 *  +-----------------+      +-------------+
949			 *  |   current bio   |      | current bio |
950			 *  +-------------+   |  +---+---------+   |
951			 *  |             |   |  |   |         |   |
952			 *  |             |   |  |   |         |   |
953			 *  +-------------+---+  |   +---------+---+
954			 *  |   new bio   |      |   new bio   |
955			 *  +-------------+      +-------------+
956			 */
957			g_journal_stats_bytes_skipped += nend - nstart;
958			nbp = g_journal_new_bio(nstart, nend, joffset, data,
959			    flags);
960			if (pbp == NULL)
961				*head = nbp;
962			else
963				pbp->bio_next = nbp;
964			nbp->bio_next = cbp;
965			cbp->bio_offset = nend;
966			cbp->bio_length = cend - nend;
967			cbp->bio_joffset += nend - cstart;
968			tmpdata = cbp->bio_data;
969			if (tmpdata != NULL) {
970				cbp->bio_data = gj_malloc(cbp->bio_length,
971				    flags);
972				if (cbp->bio_data != NULL) {
973					bcopy(tmpdata + nend - cstart,
974					    cbp->bio_data, cbp->bio_length);
975				}
976				gj_free(tmpdata, cend - cstart);
977			}
978			n++;
979			GJ_DEBUG(3, "INSERT(%p): 6 (cbp=%p)", *head, cbp);
980			goto end;
981		}
982		if (nstart == nend)
983			goto end;
984		pbp = cbp;
985	}
986	nbp = g_journal_new_bio(nstart, nend, joffset, data, flags);
987	if (pbp == NULL)
988		*head = nbp;
989	else
990		pbp->bio_next = nbp;
991	nbp->bio_next = NULL;
992	n++;
993	GJ_DEBUG(3, "INSERT(%p): 8 (nbp=%p pbp=%p)", *head, nbp, pbp);
994end:
995	if (g_journal_debug >= 3) {
996		GJQ_FOREACH(*head, cbp) {
997			GJ_DEBUG(3, "ELEMENT: %p (%jd, %jd, %jd, %p)", cbp,
998			    (intmax_t)cbp->bio_offset,
999			    (intmax_t)cbp->bio_length,
1000			    (intmax_t)cbp->bio_joffset, cbp->bio_data);
1001		}
1002		GJ_DEBUG(3, "INSERT(%p): DONE %d", *head, n);
1003	}
1004	return (n);
1005}
1006
1007/*
1008 * The function combines neighbour bios trying to squeeze as much data as
1009 * possible into one bio.
1010 *
1011 * The function returns the number of bios combined (negative value).
1012 */
1013static int
1014g_journal_optimize(struct bio *head)
1015{
1016	struct bio *cbp, *pbp;
1017	int n;
1018
1019	n = 0;
1020	pbp = NULL;
1021	GJQ_FOREACH(head, cbp) {
1022		/* Skip bios which has to be read first. */
1023		if (cbp->bio_data == NULL) {
1024			pbp = NULL;
1025			continue;
1026		}
1027		/* There is no previous bio yet. */
1028		if (pbp == NULL) {
1029			pbp = cbp;
1030			continue;
1031		}
1032		/* Is this a neighbour bio? */
1033		if (pbp->bio_offset + pbp->bio_length != cbp->bio_offset) {
1034			/* Be sure that bios queue is sorted. */
1035			KASSERT(pbp->bio_offset + pbp->bio_length < cbp->bio_offset,
1036			    ("poffset=%jd plength=%jd coffset=%jd",
1037			    (intmax_t)pbp->bio_offset,
1038			    (intmax_t)pbp->bio_length,
1039			    (intmax_t)cbp->bio_offset));
1040			pbp = cbp;
1041			continue;
1042		}
1043		/* Be sure we don't end up with too big bio. */
1044		if (pbp->bio_length + cbp->bio_length > MAXPHYS) {
1045			pbp = cbp;
1046			continue;
1047		}
1048		/* Ok, we can join bios. */
1049		GJ_LOGREQ(4, pbp, "Join: ");
1050		GJ_LOGREQ(4, cbp, "and: ");
1051		pbp->bio_data = gj_realloc(pbp->bio_data,
1052		    pbp->bio_length + cbp->bio_length, pbp->bio_length);
1053		bcopy(cbp->bio_data, pbp->bio_data + pbp->bio_length,
1054		    cbp->bio_length);
1055		gj_free(cbp->bio_data, cbp->bio_length);
1056		pbp->bio_length += cbp->bio_length;
1057		pbp->bio_next = cbp->bio_next;
1058		g_destroy_bio(cbp);
1059		cbp = pbp;
1060		g_journal_stats_combined_ios++;
1061		n--;
1062		GJ_LOGREQ(4, pbp, "Got: ");
1063	}
1064	return (n);
1065}
1066
1067/*
1068 * TODO: Update comment.
1069 * These are functions responsible for copying one portion of data from journal
1070 * to the destination provider.
1071 * The order goes like this:
1072 * 1. Read the header, which contains informations about data blocks
1073 *    following it.
1074 * 2. Read the data blocks from the journal.
1075 * 3. Write the data blocks on the data provider.
1076 *
1077 * g_journal_copy_start()
1078 * g_journal_copy_done() - got finished write request, logs potential errors.
1079 */
1080
1081/*
1082 * When there is no data in cache, this function is used to read it.
1083 */
1084static void
1085g_journal_read_first(struct g_journal_softc *sc, struct bio *bp)
1086{
1087	struct bio *cbp;
1088
1089	/*
1090	 * We were short in memory, so data was freed.
1091	 * In that case we need to read it back from journal.
1092	 */
1093	cbp = g_alloc_bio();
1094	cbp->bio_cflags = bp->bio_cflags;
1095	cbp->bio_parent = bp;
1096	cbp->bio_offset = bp->bio_joffset;
1097	cbp->bio_length = bp->bio_length;
1098	cbp->bio_data = gj_malloc(bp->bio_length, M_WAITOK);
1099	cbp->bio_cmd = BIO_READ;
1100	cbp->bio_done = g_journal_std_done;
1101	GJ_LOGREQ(4, cbp, "READ FIRST");
1102	g_io_request(cbp, sc->sc_jconsumer);
1103	g_journal_cache_misses++;
1104}
1105
1106static void
1107g_journal_copy_send(struct g_journal_softc *sc)
1108{
1109	struct bio *bioq, *bp, *lbp;
1110
1111	bioq = lbp = NULL;
1112	mtx_lock(&sc->sc_mtx);
1113	for (; sc->sc_copy_in_progress < g_journal_parallel_copies;) {
1114		bp = GJQ_FIRST(sc->sc_inactive.jj_queue);
1115		if (bp == NULL)
1116			break;
1117		GJQ_REMOVE(sc->sc_inactive.jj_queue, bp);
1118		sc->sc_copy_in_progress++;
1119		GJQ_INSERT_AFTER(bioq, bp, lbp);
1120		lbp = bp;
1121	}
1122	mtx_unlock(&sc->sc_mtx);
1123	if (g_journal_do_optimize)
1124		sc->sc_copy_in_progress += g_journal_optimize(bioq);
1125	while ((bp = GJQ_FIRST(bioq)) != NULL) {
1126		GJQ_REMOVE(bioq, bp);
1127		GJQ_INSERT_HEAD(sc->sc_copy_queue, bp);
1128		bp->bio_cflags = GJ_BIO_COPY;
1129		if (bp->bio_data == NULL)
1130			g_journal_read_first(sc, bp);
1131		else {
1132			bp->bio_joffset = 0;
1133			GJ_LOGREQ(4, bp, "SEND");
1134			g_io_request(bp, sc->sc_dconsumer);
1135		}
1136	}
1137}
1138
1139static void
1140g_journal_copy_start(struct g_journal_softc *sc)
1141{
1142
1143	/*
1144	 * Remember in metadata that we're starting to copy journaled data
1145	 * to the data provider.
1146	 * In case of power failure, we will copy these data once again on boot.
1147	 */
1148	if (!sc->sc_journal_copying) {
1149		sc->sc_journal_copying = 1;
1150		GJ_DEBUG(1, "Starting copy of journal.");
1151		g_journal_metadata_update(sc);
1152	}
1153	g_journal_copy_send(sc);
1154}
1155
1156/*
1157 * Data block has been read from the journal provider.
1158 */
1159static int
1160g_journal_copy_read_done(struct bio *bp)
1161{
1162	struct g_journal_softc *sc;
1163	struct g_consumer *cp;
1164	struct bio *pbp;
1165
1166	KASSERT(bp->bio_cflags == GJ_BIO_COPY,
1167	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
1168
1169	sc = bp->bio_from->geom->softc;
1170	pbp = bp->bio_parent;
1171
1172	if (bp->bio_error != 0) {
1173		GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
1174		    bp->bio_to->name, bp->bio_error);
1175		/*
1176		 * We will not be able to deliver WRITE request as well.
1177		 */
1178		gj_free(bp->bio_data, bp->bio_length);
1179		g_destroy_bio(pbp);
1180		g_destroy_bio(bp);
1181		sc->sc_copy_in_progress--;
1182		return (1);
1183	}
1184	pbp->bio_data = bp->bio_data;
1185	cp = sc->sc_dconsumer;
1186	g_io_request(pbp, cp);
1187	GJ_LOGREQ(4, bp, "READ DONE");
1188	g_destroy_bio(bp);
1189	return (0);
1190}
1191
1192/*
1193 * Data block has been written to the data provider.
1194 */
1195static void
1196g_journal_copy_write_done(struct bio *bp)
1197{
1198	struct g_journal_softc *sc;
1199
1200	KASSERT(bp->bio_cflags == GJ_BIO_COPY,
1201	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
1202
1203	sc = bp->bio_from->geom->softc;
1204	sc->sc_copy_in_progress--;
1205
1206	if (bp->bio_error != 0) {
1207		GJ_LOGREQ(0, bp, "[copy] Error while writing data (error=%d)",
1208		    bp->bio_error);
1209	}
1210	GJQ_REMOVE(sc->sc_copy_queue, bp);
1211	gj_free(bp->bio_data, bp->bio_length);
1212	GJ_LOGREQ(4, bp, "DONE");
1213	g_destroy_bio(bp);
1214
1215	if (sc->sc_copy_in_progress == 0) {
1216		/*
1217		 * This was the last write request for this journal.
1218		 */
1219		GJ_DEBUG(1, "Data has been copied.");
1220		sc->sc_journal_copying = 0;
1221	}
1222}
1223
1224static void g_journal_flush_done(struct bio *bp);
1225
1226/*
1227 * Flush one record onto active journal provider.
1228 */
1229static void
1230g_journal_flush(struct g_journal_softc *sc)
1231{
1232	struct g_journal_record_header hdr;
1233	struct g_journal_entry *ent;
1234	struct g_provider *pp;
1235	struct bio **bioq;
1236	struct bio *bp, *fbp, *pbp;
1237	off_t joffset, size;
1238	u_char *data, hash[16];
1239	MD5_CTX ctx;
1240	u_int i;
1241
1242	if (sc->sc_current_count == 0)
1243		return;
1244
1245	size = 0;
1246	pp = sc->sc_jprovider;
1247	GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
1248	joffset = sc->sc_journal_offset;
1249
1250	GJ_DEBUG(2, "Storing %d journal entries on %s at %jd.",
1251	    sc->sc_current_count, pp->name, (intmax_t)joffset);
1252
1253	/*
1254	 * Store 'journal id', so we know to which journal this record belongs.
1255	 */
1256	hdr.jrh_journal_id = sc->sc_journal_id;
1257	/* Could be less than g_journal_record_entries if called due timeout. */
1258	hdr.jrh_nentries = MIN(sc->sc_current_count, g_journal_record_entries);
1259	strlcpy(hdr.jrh_magic, GJ_RECORD_HEADER_MAGIC, sizeof(hdr.jrh_magic));
1260
1261	bioq = &sc->sc_active.jj_queue;
1262	pbp = sc->sc_flush_queue;
1263
1264	fbp = g_alloc_bio();
1265	fbp->bio_parent = NULL;
1266	fbp->bio_cflags = GJ_BIO_JOURNAL;
1267	fbp->bio_offset = -1;
1268	fbp->bio_joffset = joffset;
1269	fbp->bio_length = pp->sectorsize;
1270	fbp->bio_cmd = BIO_WRITE;
1271	fbp->bio_done = g_journal_std_done;
1272	GJQ_INSERT_AFTER(sc->sc_flush_queue, fbp, pbp);
1273	pbp = fbp;
1274	fbp->bio_to = pp;
1275	GJ_LOGREQ(4, fbp, "FLUSH_OUT");
1276	joffset += pp->sectorsize;
1277	sc->sc_flush_count++;
1278	if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1279		MD5Init(&ctx);
1280
1281	for (i = 0; i < hdr.jrh_nentries; i++) {
1282		bp = sc->sc_current_queue;
1283		KASSERT(bp != NULL, ("NULL bp"));
1284		bp->bio_to = pp;
1285		GJ_LOGREQ(4, bp, "FLUSHED");
1286		sc->sc_current_queue = bp->bio_next;
1287		bp->bio_next = NULL;
1288		sc->sc_current_count--;
1289
1290		/* Add to the header. */
1291		ent = &hdr.jrh_entries[i];
1292		ent->je_offset = bp->bio_offset;
1293		ent->je_joffset = joffset;
1294		ent->je_length = bp->bio_length;
1295		size += ent->je_length;
1296
1297		data = bp->bio_data;
1298		if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1299			MD5Update(&ctx, data, ent->je_length);
1300		bzero(bp, sizeof(*bp));
1301		bp->bio_cflags = GJ_BIO_JOURNAL;
1302		bp->bio_offset = ent->je_offset;
1303		bp->bio_joffset = ent->je_joffset;
1304		bp->bio_length = ent->je_length;
1305		bp->bio_data = data;
1306		bp->bio_cmd = BIO_WRITE;
1307		bp->bio_done = g_journal_std_done;
1308		GJQ_INSERT_AFTER(sc->sc_flush_queue, bp, pbp);
1309		pbp = bp;
1310		bp->bio_to = pp;
1311		GJ_LOGREQ(4, bp, "FLUSH_OUT");
1312		joffset += bp->bio_length;
1313		sc->sc_flush_count++;
1314
1315		/*
1316		 * Add request to the active sc_journal_queue queue.
1317		 * This is our cache. After journal switch we don't have to
1318		 * read the data from the inactive journal, because we keep
1319		 * it in memory.
1320		 */
1321		g_journal_insert(bioq, ent->je_offset,
1322		    ent->je_offset + ent->je_length, ent->je_joffset, data,
1323		    M_NOWAIT);
1324	}
1325
1326	/*
1327	 * After all requests, store valid header.
1328	 */
1329	data = gj_malloc(pp->sectorsize, M_WAITOK);
1330	if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1331		MD5Final(hash, &ctx);
1332		bcopy(hash, hdr.jrh_sum, sizeof(hdr.jrh_sum));
1333	}
1334	g_journal_record_header_encode(&hdr, data);
1335	fbp->bio_data = data;
1336
1337	sc->sc_journal_offset = joffset;
1338
1339	g_journal_check_overflow(sc);
1340}
1341
1342/*
1343 * Flush request finished.
1344 */
1345static void
1346g_journal_flush_done(struct bio *bp)
1347{
1348	struct g_journal_softc *sc;
1349	struct g_consumer *cp;
1350
1351	KASSERT((bp->bio_cflags & GJ_BIO_MASK) == GJ_BIO_JOURNAL,
1352	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_JOURNAL));
1353
1354	cp = bp->bio_from;
1355	sc = cp->geom->softc;
1356	sc->sc_flush_in_progress--;
1357
1358	if (bp->bio_error != 0) {
1359		GJ_LOGREQ(0, bp, "[flush] Error while writing data (error=%d)",
1360		    bp->bio_error);
1361	}
1362	gj_free(bp->bio_data, bp->bio_length);
1363	GJ_LOGREQ(4, bp, "DONE");
1364	g_destroy_bio(bp);
1365}
1366
1367static void g_journal_release_delayed(struct g_journal_softc *sc);
1368
1369static void
1370g_journal_flush_send(struct g_journal_softc *sc)
1371{
1372	struct g_consumer *cp;
1373	struct bio *bioq, *bp, *lbp;
1374
1375	cp = sc->sc_jconsumer;
1376	bioq = lbp = NULL;
1377	while (sc->sc_flush_in_progress < g_journal_parallel_flushes) {
1378		/* Send one flush requests to the active journal. */
1379		bp = GJQ_FIRST(sc->sc_flush_queue);
1380		if (bp != NULL) {
1381			GJQ_REMOVE(sc->sc_flush_queue, bp);
1382			sc->sc_flush_count--;
1383			bp->bio_offset = bp->bio_joffset;
1384			bp->bio_joffset = 0;
1385			sc->sc_flush_in_progress++;
1386			GJQ_INSERT_AFTER(bioq, bp, lbp);
1387			lbp = bp;
1388		}
1389		/* Try to release delayed requests. */
1390		g_journal_release_delayed(sc);
1391		/* If there are no requests to flush, leave. */
1392		if (GJQ_FIRST(sc->sc_flush_queue) == NULL)
1393			break;
1394	}
1395	if (g_journal_do_optimize)
1396		sc->sc_flush_in_progress += g_journal_optimize(bioq);
1397	while ((bp = GJQ_FIRST(bioq)) != NULL) {
1398		GJQ_REMOVE(bioq, bp);
1399		GJ_LOGREQ(3, bp, "Flush request send");
1400		g_io_request(bp, cp);
1401	}
1402}
1403
1404static void
1405g_journal_add_current(struct g_journal_softc *sc, struct bio *bp)
1406{
1407	int n;
1408
1409	GJ_LOGREQ(4, bp, "CURRENT %d", sc->sc_current_count);
1410	n = g_journal_insert_bio(&sc->sc_current_queue, bp, M_WAITOK);
1411	sc->sc_current_count += n;
1412	n = g_journal_optimize(sc->sc_current_queue);
1413	sc->sc_current_count += n;
1414	/*
1415	 * For requests which are added to the current queue we deliver
1416	 * response immediately.
1417	 */
1418	bp->bio_completed = bp->bio_length;
1419	g_io_deliver(bp, 0);
1420	if (sc->sc_current_count >= g_journal_record_entries) {
1421		/*
1422		 * Let's flush one record onto active journal provider.
1423		 */
1424		g_journal_flush(sc);
1425	}
1426}
1427
1428static void
1429g_journal_release_delayed(struct g_journal_softc *sc)
1430{
1431	struct bio *bp;
1432
1433	for (;;) {
1434		/* The flush queue is full, exit. */
1435		if (sc->sc_flush_count >= g_journal_accept_immediately)
1436			return;
1437		bp = bioq_takefirst(&sc->sc_delayed_queue);
1438		if (bp == NULL)
1439			return;
1440		sc->sc_delayed_count--;
1441		g_journal_add_current(sc, bp);
1442	}
1443}
1444
1445/*
1446 * Add I/O request to the current queue. If we have enough requests for one
1447 * journal record we flush them onto active journal provider.
1448 */
1449static void
1450g_journal_add_request(struct g_journal_softc *sc, struct bio *bp)
1451{
1452
1453	/*
1454	 * The flush queue is full, we need to delay the request.
1455	 */
1456	if (sc->sc_delayed_count > 0 ||
1457	    sc->sc_flush_count >= g_journal_accept_immediately) {
1458		GJ_LOGREQ(4, bp, "DELAYED");
1459		bioq_insert_tail(&sc->sc_delayed_queue, bp);
1460		sc->sc_delayed_count++;
1461		return;
1462	}
1463
1464	KASSERT(TAILQ_EMPTY(&sc->sc_delayed_queue.queue),
1465	    ("DELAYED queue not empty."));
1466	g_journal_add_current(sc, bp);
1467}
1468
1469static void g_journal_read_done(struct bio *bp);
1470
1471/*
1472 * Try to find requested data in cache.
1473 */
1474static struct bio *
1475g_journal_read_find(struct bio *head, int sorted, struct bio *pbp, off_t ostart,
1476    off_t oend)
1477{
1478	off_t cstart, cend;
1479	struct bio *bp;
1480
1481	GJQ_FOREACH(head, bp) {
1482		if (bp->bio_offset == -1)
1483			continue;
1484		cstart = MAX(ostart, bp->bio_offset);
1485		cend = MIN(oend, bp->bio_offset + bp->bio_length);
1486		if (cend <= ostart)
1487			continue;
1488		else if (cstart >= oend) {
1489			if (!sorted)
1490				continue;
1491			else {
1492				bp = NULL;
1493				break;
1494			}
1495		}
1496		if (bp->bio_data == NULL)
1497			break;
1498		GJ_DEBUG(3, "READ(%p): (%jd, %jd) (bp=%p)", head, cstart, cend,
1499		    bp);
1500		bcopy(bp->bio_data + cstart - bp->bio_offset,
1501		    pbp->bio_data + cstart - pbp->bio_offset, cend - cstart);
1502		pbp->bio_completed += cend - cstart;
1503		if (pbp->bio_completed == pbp->bio_length) {
1504			/*
1505			 * Cool, the whole request was in cache, deliver happy
1506			 * message.
1507			 */
1508			g_io_deliver(pbp, 0);
1509			return (pbp);
1510		}
1511		break;
1512	}
1513	return (bp);
1514}
1515
1516/*
1517 * Try to find requested data in cache.
1518 */
1519static struct bio *
1520g_journal_read_queue_find(struct bio_queue *head, struct bio *pbp, off_t ostart,
1521    off_t oend)
1522{
1523	off_t cstart, cend;
1524	struct bio *bp;
1525
1526	TAILQ_FOREACH(bp, head, bio_queue) {
1527		cstart = MAX(ostart, bp->bio_offset);
1528		cend = MIN(oend, bp->bio_offset + bp->bio_length);
1529		if (cend <= ostart)
1530			continue;
1531		else if (cstart >= oend)
1532			continue;
1533		KASSERT(bp->bio_data != NULL,
1534		    ("%s: bio_data == NULL", __func__));
1535		GJ_DEBUG(3, "READ(%p): (%jd, %jd) (bp=%p)", head, cstart, cend,
1536		    bp);
1537		bcopy(bp->bio_data + cstart - bp->bio_offset,
1538		    pbp->bio_data + cstart - pbp->bio_offset, cend - cstart);
1539		pbp->bio_completed += cend - cstart;
1540		if (pbp->bio_completed == pbp->bio_length) {
1541			/*
1542			 * Cool, the whole request was in cache, deliver happy
1543			 * message.
1544			 */
1545			g_io_deliver(pbp, 0);
1546			return (pbp);
1547		}
1548		break;
1549	}
1550	return (bp);
1551}
1552
1553/*
1554 * This function is used for colecting data on read.
1555 * The complexity is because parts of the data can be stored in four different
1556 * places:
1557 * - in delayed requests
1558 * - in memory - the data not yet send to the active journal provider
1559 * - in requests which are going to be sent to the active journal
1560 * - in the active journal
1561 * - in the inactive journal
1562 * - in the data provider
1563 */
1564static void
1565g_journal_read(struct g_journal_softc *sc, struct bio *pbp, off_t ostart,
1566    off_t oend)
1567{
1568	struct bio *bp, *nbp, *head;
1569	off_t cstart, cend;
1570	u_int i, sorted = 0;
1571
1572	GJ_DEBUG(3, "READ: (%jd, %jd)", ostart, oend);
1573
1574	cstart = cend = -1;
1575	bp = NULL;
1576	head = NULL;
1577	for (i = 0; i <= 5; i++) {
1578		switch (i) {
1579		case 0:	/* Delayed requests. */
1580			head = NULL;
1581			sorted = 0;
1582			break;
1583		case 1:	/* Not-yet-send data. */
1584			head = sc->sc_current_queue;
1585			sorted = 1;
1586			break;
1587		case 2:	/* In-flight to the active journal. */
1588			head = sc->sc_flush_queue;
1589			sorted = 0;
1590			break;
1591		case 3:	/* Active journal. */
1592			head = sc->sc_active.jj_queue;
1593			sorted = 1;
1594			break;
1595		case 4:	/* Inactive journal. */
1596			/*
1597			 * XXX: Here could be a race with g_journal_lowmem().
1598			 */
1599			head = sc->sc_inactive.jj_queue;
1600			sorted = 1;
1601			break;
1602		case 5:	/* In-flight to the data provider. */
1603			head = sc->sc_copy_queue;
1604			sorted = 0;
1605			break;
1606		default:
1607			panic("gjournal %s: i=%d", __func__, i);
1608		}
1609		if (i == 0)
1610			bp = g_journal_read_queue_find(&sc->sc_delayed_queue.queue, pbp, ostart, oend);
1611		else
1612			bp = g_journal_read_find(head, sorted, pbp, ostart, oend);
1613		if (bp == pbp) { /* Got the whole request. */
1614			GJ_DEBUG(2, "Got the whole request from %u.", i);
1615			return;
1616		} else if (bp != NULL) {
1617			cstart = MAX(ostart, bp->bio_offset);
1618			cend = MIN(oend, bp->bio_offset + bp->bio_length);
1619			GJ_DEBUG(2, "Got part of the request from %u (%jd-%jd).",
1620			    i, (intmax_t)cstart, (intmax_t)cend);
1621			break;
1622		}
1623	}
1624	if (bp != NULL) {
1625		if (bp->bio_data == NULL) {
1626			nbp = g_duplicate_bio(pbp);
1627			nbp->bio_cflags = GJ_BIO_READ;
1628			nbp->bio_data =
1629			    pbp->bio_data + cstart - pbp->bio_offset;
1630			nbp->bio_offset =
1631			    bp->bio_joffset + cstart - bp->bio_offset;
1632			nbp->bio_length = cend - cstart;
1633			nbp->bio_done = g_journal_read_done;
1634			g_io_request(nbp, sc->sc_jconsumer);
1635		}
1636		/*
1637		 * If we don't have the whole request yet, call g_journal_read()
1638		 * recursively.
1639		 */
1640		if (ostart < cstart)
1641			g_journal_read(sc, pbp, ostart, cstart);
1642		if (oend > cend)
1643			g_journal_read(sc, pbp, cend, oend);
1644	} else {
1645		/*
1646		 * No data in memory, no data in journal.
1647		 * Its time for asking data provider.
1648		 */
1649		GJ_DEBUG(3, "READ(data): (%jd, %jd)", ostart, oend);
1650		nbp = g_duplicate_bio(pbp);
1651		nbp->bio_cflags = GJ_BIO_READ;
1652		nbp->bio_data = pbp->bio_data + ostart - pbp->bio_offset;
1653		nbp->bio_offset = ostart;
1654		nbp->bio_length = oend - ostart;
1655		nbp->bio_done = g_journal_read_done;
1656		g_io_request(nbp, sc->sc_dconsumer);
1657		/* We have the whole request, return here. */
1658		return;
1659	}
1660}
1661
1662/*
1663 * Function responsible for handling finished READ requests.
1664 * Actually, g_std_done() could be used here, the only difference is that we
1665 * log error.
1666 */
1667static void
1668g_journal_read_done(struct bio *bp)
1669{
1670	struct bio *pbp;
1671
1672	KASSERT(bp->bio_cflags == GJ_BIO_READ,
1673	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_READ));
1674
1675	pbp = bp->bio_parent;
1676	pbp->bio_inbed++;
1677	pbp->bio_completed += bp->bio_length;
1678
1679	if (bp->bio_error != 0) {
1680		if (pbp->bio_error == 0)
1681			pbp->bio_error = bp->bio_error;
1682		GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
1683		    bp->bio_to->name, bp->bio_error);
1684	}
1685	g_destroy_bio(bp);
1686	if (pbp->bio_children == pbp->bio_inbed &&
1687	    pbp->bio_completed == pbp->bio_length) {
1688		/* We're done. */
1689		g_io_deliver(pbp, 0);
1690	}
1691}
1692
1693/*
1694 * Deactive current journal and active next one.
1695 */
1696static void
1697g_journal_switch(struct g_journal_softc *sc)
1698{
1699	struct g_provider *pp;
1700
1701	if (JEMPTY(sc)) {
1702		GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
1703		pp = LIST_FIRST(&sc->sc_geom->provider);
1704		if (!(sc->sc_flags & GJF_DEVICE_CLEAN) && pp->acw == 0) {
1705			sc->sc_flags |= GJF_DEVICE_CLEAN;
1706			GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
1707			g_journal_metadata_update(sc);
1708		}
1709	} else {
1710		GJ_DEBUG(3, "Switching journal %s.", sc->sc_geom->name);
1711
1712		pp = sc->sc_jprovider;
1713
1714		sc->sc_journal_previous_id = sc->sc_journal_id;
1715
1716		sc->sc_journal_id = sc->sc_journal_next_id;
1717		sc->sc_journal_next_id = arc4random();
1718
1719		GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
1720
1721		g_journal_write_header(sc);
1722
1723		sc->sc_inactive.jj_offset = sc->sc_active.jj_offset;
1724		sc->sc_inactive.jj_queue = sc->sc_active.jj_queue;
1725
1726		sc->sc_active.jj_offset =
1727		    sc->sc_journal_offset - pp->sectorsize;
1728		sc->sc_active.jj_queue = NULL;
1729
1730		/*
1731		 * Switch is done, start copying data from the (now) inactive
1732		 * journal to the data provider.
1733		 */
1734		g_journal_copy_start(sc);
1735	}
1736	mtx_lock(&sc->sc_mtx);
1737	sc->sc_flags &= ~GJF_DEVICE_SWITCH;
1738	mtx_unlock(&sc->sc_mtx);
1739}
1740
1741static void
1742g_journal_initialize(struct g_journal_softc *sc)
1743{
1744
1745	sc->sc_journal_id = arc4random();
1746	sc->sc_journal_next_id = arc4random();
1747	sc->sc_journal_previous_id = sc->sc_journal_id;
1748	sc->sc_journal_offset = sc->sc_jstart;
1749	sc->sc_inactive.jj_offset = sc->sc_jstart;
1750	g_journal_write_header(sc);
1751	sc->sc_active.jj_offset = sc->sc_jstart;
1752}
1753
1754static void
1755g_journal_mark_as_dirty(struct g_journal_softc *sc)
1756{
1757	const struct g_journal_desc *desc;
1758	int i;
1759
1760	GJ_DEBUG(1, "Marking file system %s as dirty.", sc->sc_name);
1761	for (i = 0; (desc = g_journal_filesystems[i]) != NULL; i++)
1762		desc->jd_dirty(sc->sc_dconsumer);
1763}
1764
1765/*
1766 * Function read record header from the given journal.
1767 * It is very simlar to g_read_data(9), but it doesn't allocate memory for bio
1768 * and data on every call.
1769 */
1770static int
1771g_journal_sync_read(struct g_consumer *cp, struct bio *bp, off_t offset,
1772    void *data)
1773{
1774	int error;
1775
1776	bzero(bp, sizeof(*bp));
1777	bp->bio_cmd = BIO_READ;
1778	bp->bio_done = NULL;
1779	bp->bio_offset = offset;
1780	bp->bio_length = cp->provider->sectorsize;
1781	bp->bio_data = data;
1782	g_io_request(bp, cp);
1783	error = biowait(bp, "gjs_read");
1784	return (error);
1785}
1786
1787#if 0
1788/*
1789 * Function is called when we start the journal device and we detect that
1790 * one of the journals was not fully copied.
1791 * The purpose of this function is to read all records headers from journal
1792 * and placed them in the inactive queue, so we can start journal
1793 * synchronization process and the journal provider itself.
1794 * Design decision was taken to not synchronize the whole journal here as it
1795 * can take too much time. Reading headers only and delaying synchronization
1796 * process until after journal provider is started should be the best choice.
1797 */
1798#endif
1799
1800static void
1801g_journal_sync(struct g_journal_softc *sc)
1802{
1803	struct g_journal_record_header rhdr;
1804	struct g_journal_entry *ent;
1805	struct g_journal_header jhdr;
1806	struct g_consumer *cp;
1807	struct bio *bp, *fbp, *tbp;
1808	off_t joffset, offset;
1809	u_char *buf, sum[16];
1810	uint64_t id;
1811	MD5_CTX ctx;
1812	int error, found, i;
1813
1814	found = 0;
1815	fbp = NULL;
1816	cp = sc->sc_jconsumer;
1817	bp = g_alloc_bio();
1818	buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
1819	offset = joffset = sc->sc_inactive.jj_offset = sc->sc_journal_offset;
1820
1821	GJ_DEBUG(2, "Looking for termination at %jd.", (intmax_t)joffset);
1822
1823	/*
1824	 * Read and decode first journal header.
1825	 */
1826	error = g_journal_sync_read(cp, bp, offset, buf);
1827	if (error != 0) {
1828		GJ_DEBUG(0, "Error while reading journal header from %s.",
1829		    cp->provider->name);
1830		goto end;
1831	}
1832	error = g_journal_header_decode(buf, &jhdr);
1833	if (error != 0) {
1834		GJ_DEBUG(0, "Cannot decode journal header from %s.",
1835		    cp->provider->name);
1836		goto end;
1837	}
1838	id = sc->sc_journal_id;
1839	if (jhdr.jh_journal_id != sc->sc_journal_id) {
1840		GJ_DEBUG(1, "Journal ID mismatch at %jd (0x%08x != 0x%08x).",
1841		    (intmax_t)offset, (u_int)jhdr.jh_journal_id, (u_int)id);
1842		goto end;
1843	}
1844	offset += cp->provider->sectorsize;
1845	id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
1846
1847	for (;;) {
1848		/*
1849		 * If the biggest record won't fit, look for a record header or
1850		 * journal header from the begining.
1851		 */
1852		GJ_VALIDATE_OFFSET(offset, sc);
1853		error = g_journal_sync_read(cp, bp, offset, buf);
1854		if (error != 0) {
1855			/*
1856			 * Not good. Having an error while reading header
1857			 * means, that we cannot read next headers and in
1858			 * consequence we cannot find termination.
1859			 */
1860			GJ_DEBUG(0,
1861			    "Error while reading record header from %s.",
1862			    cp->provider->name);
1863			break;
1864		}
1865
1866		error = g_journal_record_header_decode(buf, &rhdr);
1867		if (error != 0) {
1868			GJ_DEBUG(2, "Not a record header at %jd (error=%d).",
1869			    (intmax_t)offset, error);
1870			/*
1871			 * This is not a record header.
1872			 * If we are lucky, this is next journal header.
1873			 */
1874			error = g_journal_header_decode(buf, &jhdr);
1875			if (error != 0) {
1876				GJ_DEBUG(1, "Not a journal header at %jd (error=%d).",
1877				    (intmax_t)offset, error);
1878				/*
1879				 * Nope, this is not journal header, which
1880				 * bascially means that journal is not
1881				 * terminated properly.
1882				 */
1883				error = ENOENT;
1884				break;
1885			}
1886			/*
1887			 * Ok. This is header of _some_ journal. Now we need to
1888			 * verify if this is header of the _next_ journal.
1889			 */
1890			if (jhdr.jh_journal_id != id) {
1891				GJ_DEBUG(1, "Journal ID mismatch at %jd "
1892				    "(0x%08x != 0x%08x).", (intmax_t)offset,
1893				    (u_int)jhdr.jh_journal_id, (u_int)id);
1894				error = ENOENT;
1895				break;
1896			}
1897
1898			/* Found termination. */
1899			found++;
1900			GJ_DEBUG(1, "Found termination at %jd (id=0x%08x).",
1901			    (intmax_t)offset, (u_int)id);
1902			sc->sc_active.jj_offset = offset;
1903			sc->sc_journal_offset =
1904			    offset + cp->provider->sectorsize;
1905			sc->sc_journal_id = id;
1906			id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
1907
1908			while ((tbp = fbp) != NULL) {
1909				fbp = tbp->bio_next;
1910				GJ_LOGREQ(3, tbp, "Adding request.");
1911				g_journal_insert_bio(&sc->sc_inactive.jj_queue,
1912				    tbp, M_WAITOK);
1913			}
1914
1915			/* Skip journal's header. */
1916			offset += cp->provider->sectorsize;
1917			continue;
1918		}
1919
1920		/* Skip record's header. */
1921		offset += cp->provider->sectorsize;
1922
1923		/*
1924		 * Add information about every record entry to the inactive
1925		 * queue.
1926		 */
1927		if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1928			MD5Init(&ctx);
1929		for (i = 0; i < rhdr.jrh_nentries; i++) {
1930			ent = &rhdr.jrh_entries[i];
1931			GJ_DEBUG(3, "Insert entry: %jd %jd.",
1932			    (intmax_t)ent->je_offset, (intmax_t)ent->je_length);
1933			g_journal_insert(&fbp, ent->je_offset,
1934			    ent->je_offset + ent->je_length, ent->je_joffset,
1935			    NULL, M_WAITOK);
1936			if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1937				u_char *buf2;
1938
1939				/*
1940				 * TODO: Should use faster function (like
1941				 *       g_journal_sync_read()).
1942				 */
1943				buf2 = g_read_data(cp, offset, ent->je_length,
1944				    NULL);
1945				if (buf2 == NULL)
1946					GJ_DEBUG(0, "Cannot read data at %jd.",
1947					    (intmax_t)offset);
1948				else {
1949					MD5Update(&ctx, buf2, ent->je_length);
1950					g_free(buf2);
1951				}
1952			}
1953			/* Skip entry's data. */
1954			offset += ent->je_length;
1955		}
1956		if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1957			MD5Final(sum, &ctx);
1958			if (bcmp(sum, rhdr.jrh_sum, sizeof(rhdr.jrh_sum)) != 0) {
1959				GJ_DEBUG(0, "MD5 hash mismatch at %jd!",
1960				    (intmax_t)offset);
1961			}
1962		}
1963	}
1964end:
1965	gj_free(bp->bio_data, cp->provider->sectorsize);
1966	g_destroy_bio(bp);
1967
1968	/* Remove bios from unterminated journal. */
1969	while ((tbp = fbp) != NULL) {
1970		fbp = tbp->bio_next;
1971		g_destroy_bio(tbp);
1972	}
1973
1974	if (found < 1 && joffset > 0) {
1975		GJ_DEBUG(0, "Journal on %s is broken/corrupted. Initializing.",
1976		    sc->sc_name);
1977		while ((tbp = sc->sc_inactive.jj_queue) != NULL) {
1978			sc->sc_inactive.jj_queue = tbp->bio_next;
1979			g_destroy_bio(tbp);
1980		}
1981		g_journal_initialize(sc);
1982		g_journal_mark_as_dirty(sc);
1983	} else {
1984		GJ_DEBUG(0, "Journal %s consistent.", sc->sc_name);
1985		g_journal_copy_start(sc);
1986	}
1987}
1988
1989/*
1990 * Wait for requests.
1991 * If we have requests in the current queue, flush them after 3 seconds from the
1992 * last flush. In this way we don't wait forever (or for journal switch) with
1993 * storing not full records on journal.
1994 */
1995static void
1996g_journal_wait(struct g_journal_softc *sc, time_t last_write)
1997{
1998	int error, timeout;
1999
2000	GJ_DEBUG(3, "%s: enter", __func__);
2001	if (sc->sc_current_count == 0) {
2002		if (g_journal_debug < 2)
2003			msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", 0);
2004		else {
2005			/*
2006			 * If we have debug turned on, show number of elements
2007			 * in various queues.
2008			 */
2009			for (;;) {
2010				error = msleep(sc, &sc->sc_mtx, PRIBIO,
2011				    "gj:work", hz * 3);
2012				if (error == 0) {
2013					mtx_unlock(&sc->sc_mtx);
2014					break;
2015				}
2016				GJ_DEBUG(3, "Report: current count=%d",
2017				    sc->sc_current_count);
2018				GJ_DEBUG(3, "Report: flush count=%d",
2019				    sc->sc_flush_count);
2020				GJ_DEBUG(3, "Report: flush in progress=%d",
2021				    sc->sc_flush_in_progress);
2022				GJ_DEBUG(3, "Report: copy in progress=%d",
2023				    sc->sc_copy_in_progress);
2024				GJ_DEBUG(3, "Report: delayed=%d",
2025				    sc->sc_delayed_count);
2026			}
2027		}
2028		GJ_DEBUG(3, "%s: exit 1", __func__);
2029		return;
2030	}
2031
2032	/*
2033	 * Flush even not full records every 3 seconds.
2034	 */
2035	timeout = (last_write + 3 - time_second) * hz;
2036	if (timeout <= 0) {
2037		mtx_unlock(&sc->sc_mtx);
2038		g_journal_flush(sc);
2039		g_journal_flush_send(sc);
2040		GJ_DEBUG(3, "%s: exit 2", __func__);
2041		return;
2042	}
2043	error = msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", timeout);
2044	if (error == EWOULDBLOCK)
2045		g_journal_flush_send(sc);
2046	GJ_DEBUG(3, "%s: exit 3", __func__);
2047}
2048
2049/*
2050 * Worker thread.
2051 */
2052static void
2053g_journal_worker(void *arg)
2054{
2055	struct g_journal_softc *sc;
2056	struct g_geom *gp;
2057	struct g_provider *pp;
2058	struct bio *bp;
2059	time_t last_write;
2060	int type;
2061
2062	thread_lock(curthread);
2063	sched_prio(curthread, PRIBIO);
2064	thread_unlock(curthread);
2065
2066	sc = arg;
2067	type = 0;	/* gcc */
2068
2069	if (sc->sc_flags & GJF_DEVICE_CLEAN) {
2070		GJ_DEBUG(0, "Journal %s clean.", sc->sc_name);
2071		g_journal_initialize(sc);
2072	} else {
2073		g_journal_sync(sc);
2074	}
2075	/*
2076	 * Check if we can use BIO_FLUSH.
2077	 */
2078	sc->sc_bio_flush = 0;
2079	if (g_io_flush(sc->sc_jconsumer) == 0) {
2080		sc->sc_bio_flush |= GJ_FLUSH_JOURNAL;
2081		GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
2082		    sc->sc_jconsumer->provider->name);
2083	} else {
2084		GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
2085		    sc->sc_jconsumer->provider->name);
2086	}
2087	if (sc->sc_jconsumer != sc->sc_dconsumer) {
2088		if (g_io_flush(sc->sc_dconsumer) == 0) {
2089			sc->sc_bio_flush |= GJ_FLUSH_DATA;
2090			GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
2091			    sc->sc_dconsumer->provider->name);
2092		} else {
2093			GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
2094			    sc->sc_dconsumer->provider->name);
2095		}
2096	}
2097
2098	gp = sc->sc_geom;
2099	g_topology_lock();
2100	pp = g_new_providerf(gp, "%s.journal", sc->sc_name);
2101	pp->mediasize = sc->sc_mediasize;
2102	/*
2103	 * There could be a problem when data provider and journal providers
2104	 * have different sectorsize, but such scenario is prevented on journal
2105	 * creation.
2106	 */
2107	pp->sectorsize = sc->sc_sectorsize;
2108	g_error_provider(pp, 0);
2109	g_topology_unlock();
2110	last_write = time_second;
2111
2112	if (sc->sc_rootmount != NULL) {
2113		GJ_DEBUG(1, "root_mount_rel %p", sc->sc_rootmount);
2114		root_mount_rel(sc->sc_rootmount);
2115		sc->sc_rootmount = NULL;
2116	}
2117
2118	for (;;) {
2119		/* Get first request from the queue. */
2120		mtx_lock(&sc->sc_mtx);
2121		bp = bioq_first(&sc->sc_back_queue);
2122		if (bp != NULL)
2123			type = (bp->bio_cflags & GJ_BIO_MASK);
2124		if (bp == NULL) {
2125			bp = bioq_first(&sc->sc_regular_queue);
2126			if (bp != NULL)
2127				type = GJ_BIO_REGULAR;
2128		}
2129		if (bp == NULL) {
2130try_switch:
2131			if ((sc->sc_flags & GJF_DEVICE_SWITCH) ||
2132			    (sc->sc_flags & GJF_DEVICE_DESTROY)) {
2133				if (sc->sc_current_count > 0) {
2134					mtx_unlock(&sc->sc_mtx);
2135					g_journal_flush(sc);
2136					g_journal_flush_send(sc);
2137					continue;
2138				}
2139				if (sc->sc_flush_in_progress > 0)
2140					goto sleep;
2141				if (sc->sc_copy_in_progress > 0)
2142					goto sleep;
2143			}
2144			if (sc->sc_flags & GJF_DEVICE_SWITCH) {
2145				mtx_unlock(&sc->sc_mtx);
2146				g_journal_switch(sc);
2147				wakeup(&sc->sc_journal_copying);
2148				continue;
2149			}
2150			if (sc->sc_flags & GJF_DEVICE_DESTROY) {
2151				GJ_DEBUG(1, "Shutting down worker "
2152				    "thread for %s.", gp->name);
2153				sc->sc_worker = NULL;
2154				wakeup(&sc->sc_worker);
2155				mtx_unlock(&sc->sc_mtx);
2156				kproc_exit(0);
2157			}
2158sleep:
2159			g_journal_wait(sc, last_write);
2160			continue;
2161		}
2162		/*
2163		 * If we're in switch process, we need to delay all new
2164		 * write requests until its done.
2165		 */
2166		if ((sc->sc_flags & GJF_DEVICE_SWITCH) &&
2167		    type == GJ_BIO_REGULAR && bp->bio_cmd == BIO_WRITE) {
2168			GJ_LOGREQ(2, bp, "WRITE on SWITCH");
2169			goto try_switch;
2170		}
2171		if (type == GJ_BIO_REGULAR)
2172			bioq_remove(&sc->sc_regular_queue, bp);
2173		else
2174			bioq_remove(&sc->sc_back_queue, bp);
2175		mtx_unlock(&sc->sc_mtx);
2176		switch (type) {
2177		case GJ_BIO_REGULAR:
2178			/* Regular request. */
2179			switch (bp->bio_cmd) {
2180			case BIO_READ:
2181				g_journal_read(sc, bp, bp->bio_offset,
2182				    bp->bio_offset + bp->bio_length);
2183				break;
2184			case BIO_WRITE:
2185				last_write = time_second;
2186				g_journal_add_request(sc, bp);
2187				g_journal_flush_send(sc);
2188				break;
2189			default:
2190				panic("Invalid bio_cmd (%d).", bp->bio_cmd);
2191			}
2192			break;
2193		case GJ_BIO_COPY:
2194			switch (bp->bio_cmd) {
2195			case BIO_READ:
2196				if (g_journal_copy_read_done(bp))
2197					g_journal_copy_send(sc);
2198				break;
2199			case BIO_WRITE:
2200				g_journal_copy_write_done(bp);
2201				g_journal_copy_send(sc);
2202				break;
2203			default:
2204				panic("Invalid bio_cmd (%d).", bp->bio_cmd);
2205			}
2206			break;
2207		case GJ_BIO_JOURNAL:
2208			g_journal_flush_done(bp);
2209			g_journal_flush_send(sc);
2210			break;
2211		case GJ_BIO_READ:
2212		default:
2213			panic("Invalid bio (%d).", type);
2214		}
2215	}
2216}
2217
2218static void
2219g_journal_destroy_event(void *arg, int flags __unused)
2220{
2221	struct g_journal_softc *sc;
2222
2223	g_topology_assert();
2224	sc = arg;
2225	g_journal_destroy(sc);
2226}
2227
2228static void
2229g_journal_timeout(void *arg)
2230{
2231	struct g_journal_softc *sc;
2232
2233	sc = arg;
2234	GJ_DEBUG(0, "Timeout. Journal %s cannot be completed.",
2235	    sc->sc_geom->name);
2236	g_post_event(g_journal_destroy_event, sc, M_NOWAIT, NULL);
2237}
2238
2239static struct g_geom *
2240g_journal_create(struct g_class *mp, struct g_provider *pp,
2241    const struct g_journal_metadata *md)
2242{
2243	struct g_journal_softc *sc;
2244	struct g_geom *gp;
2245	struct g_consumer *cp;
2246	int error;
2247
2248	sc = NULL;	/* gcc */
2249
2250	g_topology_assert();
2251	/*
2252	 * There are two possibilities:
2253	 * 1. Data and both journals are on the same provider.
2254	 * 2. Data and journals are all on separated providers.
2255	 */
2256	/* Look for journal device with the same ID. */
2257	LIST_FOREACH(gp, &mp->geom, geom) {
2258		sc = gp->softc;
2259		if (sc == NULL)
2260			continue;
2261		if (sc->sc_id == md->md_id)
2262			break;
2263	}
2264	if (gp == NULL)
2265		sc = NULL;
2266	else if (sc != NULL && (sc->sc_type & md->md_type) != 0) {
2267		GJ_DEBUG(1, "Journal device %u already configured.", sc->sc_id);
2268		return (NULL);
2269	}
2270	if (md->md_type == 0 || (md->md_type & ~GJ_TYPE_COMPLETE) != 0) {
2271		GJ_DEBUG(0, "Invalid type on %s.", pp->name);
2272		return (NULL);
2273	}
2274	if (md->md_type & GJ_TYPE_DATA) {
2275		GJ_DEBUG(0, "Journal %u: %s contains data.", md->md_id,
2276		    pp->name);
2277	}
2278	if (md->md_type & GJ_TYPE_JOURNAL) {
2279		GJ_DEBUG(0, "Journal %u: %s contains journal.", md->md_id,
2280		    pp->name);
2281	}
2282
2283	if (sc == NULL) {
2284		/* Action geom. */
2285		sc = malloc(sizeof(*sc), M_JOURNAL, M_WAITOK | M_ZERO);
2286		sc->sc_id = md->md_id;
2287		sc->sc_type = 0;
2288		sc->sc_flags = 0;
2289		sc->sc_worker = NULL;
2290
2291		gp = g_new_geomf(mp, "gjournal %u", sc->sc_id);
2292		gp->start = g_journal_start;
2293		gp->orphan = g_journal_orphan;
2294		gp->access = g_journal_access;
2295		gp->softc = sc;
2296		gp->flags |= G_GEOM_VOLATILE_BIO;
2297		sc->sc_geom = gp;
2298
2299		mtx_init(&sc->sc_mtx, "gjournal", NULL, MTX_DEF);
2300
2301		bioq_init(&sc->sc_back_queue);
2302		bioq_init(&sc->sc_regular_queue);
2303		bioq_init(&sc->sc_delayed_queue);
2304		sc->sc_delayed_count = 0;
2305		sc->sc_current_queue = NULL;
2306		sc->sc_current_count = 0;
2307		sc->sc_flush_queue = NULL;
2308		sc->sc_flush_count = 0;
2309		sc->sc_flush_in_progress = 0;
2310		sc->sc_copy_queue = NULL;
2311		sc->sc_copy_in_progress = 0;
2312		sc->sc_inactive.jj_queue = NULL;
2313		sc->sc_active.jj_queue = NULL;
2314
2315		sc->sc_rootmount = root_mount_hold("GJOURNAL");
2316		GJ_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
2317
2318		callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
2319		if (md->md_type != GJ_TYPE_COMPLETE) {
2320			/*
2321			 * Journal and data are on separate providers.
2322			 * At this point we have only one of them.
2323			 * We setup a timeout in case the other part will not
2324			 * appear, so we won't wait forever.
2325			 */
2326			callout_reset(&sc->sc_callout, 5 * hz,
2327			    g_journal_timeout, sc);
2328		}
2329	}
2330
2331	/* Remember type of the data provider. */
2332	if (md->md_type & GJ_TYPE_DATA)
2333		sc->sc_orig_type = md->md_type;
2334	sc->sc_type |= md->md_type;
2335	cp = NULL;
2336
2337	if (md->md_type & GJ_TYPE_DATA) {
2338		if (md->md_flags & GJ_FLAG_CLEAN)
2339			sc->sc_flags |= GJF_DEVICE_CLEAN;
2340		if (md->md_flags & GJ_FLAG_CHECKSUM)
2341			sc->sc_flags |= GJF_DEVICE_CHECKSUM;
2342		cp = g_new_consumer(gp);
2343		error = g_attach(cp, pp);
2344		KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
2345		    pp->name, error));
2346		error = g_access(cp, 1, 1, 1);
2347		if (error != 0) {
2348			GJ_DEBUG(0, "Cannot access %s (error=%d).", pp->name,
2349			    error);
2350			g_journal_destroy(sc);
2351			return (NULL);
2352		}
2353		sc->sc_dconsumer = cp;
2354		sc->sc_mediasize = pp->mediasize - pp->sectorsize;
2355		sc->sc_sectorsize = pp->sectorsize;
2356		sc->sc_jstart = md->md_jstart;
2357		sc->sc_jend = md->md_jend;
2358		if (md->md_provider[0] != '\0')
2359			sc->sc_flags |= GJF_DEVICE_HARDCODED;
2360		sc->sc_journal_offset = md->md_joffset;
2361		sc->sc_journal_id = md->md_jid;
2362		sc->sc_journal_previous_id = md->md_jid;
2363	}
2364	if (md->md_type & GJ_TYPE_JOURNAL) {
2365		if (cp == NULL) {
2366			cp = g_new_consumer(gp);
2367			error = g_attach(cp, pp);
2368			KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
2369			    pp->name, error));
2370			error = g_access(cp, 1, 1, 1);
2371			if (error != 0) {
2372				GJ_DEBUG(0, "Cannot access %s (error=%d).",
2373				    pp->name, error);
2374				g_journal_destroy(sc);
2375				return (NULL);
2376			}
2377		} else {
2378			/*
2379			 * Journal is on the same provider as data, which means
2380			 * that data provider ends where journal starts.
2381			 */
2382			sc->sc_mediasize = md->md_jstart;
2383		}
2384		sc->sc_jconsumer = cp;
2385	}
2386
2387	if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE) {
2388		/* Journal is not complete yet. */
2389		return (gp);
2390	} else {
2391		/* Journal complete, cancel timeout. */
2392		callout_drain(&sc->sc_callout);
2393	}
2394
2395	error = kproc_create(g_journal_worker, sc, &sc->sc_worker, 0, 0,
2396	    "g_journal %s", sc->sc_name);
2397	if (error != 0) {
2398		GJ_DEBUG(0, "Cannot create worker thread for %s.journal.",
2399		    sc->sc_name);
2400		g_journal_destroy(sc);
2401		return (NULL);
2402	}
2403
2404	return (gp);
2405}
2406
2407static void
2408g_journal_destroy_consumer(void *arg, int flags __unused)
2409{
2410	struct g_consumer *cp;
2411
2412	g_topology_assert();
2413	cp = arg;
2414	g_detach(cp);
2415	g_destroy_consumer(cp);
2416}
2417
2418static int
2419g_journal_destroy(struct g_journal_softc *sc)
2420{
2421	struct g_geom *gp;
2422	struct g_provider *pp;
2423	struct g_consumer *cp;
2424
2425	g_topology_assert();
2426
2427	if (sc == NULL)
2428		return (ENXIO);
2429
2430	gp = sc->sc_geom;
2431	pp = LIST_FIRST(&gp->provider);
2432	if (pp != NULL) {
2433		if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) {
2434			GJ_DEBUG(1, "Device %s is still open (r%dw%de%d).",
2435			    pp->name, pp->acr, pp->acw, pp->ace);
2436			return (EBUSY);
2437		}
2438		g_error_provider(pp, ENXIO);
2439
2440		g_journal_flush(sc);
2441		g_journal_flush_send(sc);
2442		g_journal_switch(sc);
2443	}
2444
2445	sc->sc_flags |= (GJF_DEVICE_DESTROY | GJF_DEVICE_CLEAN);
2446
2447	g_topology_unlock();
2448
2449	if (sc->sc_rootmount != NULL) {
2450		GJ_DEBUG(1, "root_mount_rel %p", sc->sc_rootmount);
2451		root_mount_rel(sc->sc_rootmount);
2452		sc->sc_rootmount = NULL;
2453	}
2454
2455	callout_drain(&sc->sc_callout);
2456	mtx_lock(&sc->sc_mtx);
2457	wakeup(sc);
2458	while (sc->sc_worker != NULL)
2459		msleep(&sc->sc_worker, &sc->sc_mtx, PRIBIO, "gj:destroy", 0);
2460	mtx_unlock(&sc->sc_mtx);
2461
2462	if (pp != NULL) {
2463		GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
2464		g_journal_metadata_update(sc);
2465		g_topology_lock();
2466		pp->flags |= G_PF_WITHER;
2467		g_orphan_provider(pp, ENXIO);
2468	} else {
2469		g_topology_lock();
2470	}
2471	mtx_destroy(&sc->sc_mtx);
2472
2473	if (sc->sc_current_count != 0) {
2474		GJ_DEBUG(0, "Warning! Number of current requests %d.",
2475		    sc->sc_current_count);
2476	}
2477
2478	LIST_FOREACH(cp, &gp->consumer, consumer) {
2479		if (cp->acr + cp->acw + cp->ace > 0)
2480			g_access(cp, -1, -1, -1);
2481		/*
2482		 * We keep all consumers open for writting, so if I'll detach
2483		 * and destroy consumer here, I'll get providers for taste, so
2484		 * journal will be started again.
2485		 * Sending an event here, prevents this from happening.
2486		 */
2487		g_post_event(g_journal_destroy_consumer, cp, M_WAITOK, NULL);
2488	}
2489	gp->softc = NULL;
2490	g_wither_geom(gp, ENXIO);
2491	free(sc, M_JOURNAL);
2492	return (0);
2493}
2494
2495static void
2496g_journal_taste_orphan(struct g_consumer *cp)
2497{
2498
2499	KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
2500	    cp->provider->name));
2501}
2502
2503static struct g_geom *
2504g_journal_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
2505{
2506	struct g_journal_metadata md;
2507	struct g_consumer *cp;
2508	struct g_geom *gp;
2509	int error;
2510
2511	g_topology_assert();
2512	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2513	GJ_DEBUG(2, "Tasting %s.", pp->name);
2514	if (pp->geom->class == mp)
2515		return (NULL);
2516
2517	gp = g_new_geomf(mp, "journal:taste");
2518	/* This orphan function should be never called. */
2519	gp->orphan = g_journal_taste_orphan;
2520	cp = g_new_consumer(gp);
2521	g_attach(cp, pp);
2522	error = g_journal_metadata_read(cp, &md);
2523	g_detach(cp);
2524	g_destroy_consumer(cp);
2525	g_destroy_geom(gp);
2526	if (error != 0)
2527		return (NULL);
2528	gp = NULL;
2529
2530	if (md.md_provider[0] != '\0' &&
2531	    !g_compare_names(md.md_provider, pp->name))
2532		return (NULL);
2533	if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
2534		return (NULL);
2535	if (g_journal_debug >= 2)
2536		journal_metadata_dump(&md);
2537
2538	gp = g_journal_create(mp, pp, &md);
2539	return (gp);
2540}
2541
2542static struct g_journal_softc *
2543g_journal_find_device(struct g_class *mp, const char *name)
2544{
2545	struct g_journal_softc *sc;
2546	struct g_geom *gp;
2547	struct g_provider *pp;
2548
2549	if (strncmp(name, "/dev/", 5) == 0)
2550		name += 5;
2551	LIST_FOREACH(gp, &mp->geom, geom) {
2552		sc = gp->softc;
2553		if (sc == NULL)
2554			continue;
2555		if (sc->sc_flags & GJF_DEVICE_DESTROY)
2556			continue;
2557		if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
2558			continue;
2559		pp = LIST_FIRST(&gp->provider);
2560		if (strcmp(sc->sc_name, name) == 0)
2561			return (sc);
2562		if (pp != NULL && strcmp(pp->name, name) == 0)
2563			return (sc);
2564	}
2565	return (NULL);
2566}
2567
2568static void
2569g_journal_ctl_destroy(struct gctl_req *req, struct g_class *mp)
2570{
2571	struct g_journal_softc *sc;
2572	const char *name;
2573	char param[16];
2574	int *nargs;
2575	int error, i;
2576
2577	g_topology_assert();
2578
2579	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
2580	if (nargs == NULL) {
2581		gctl_error(req, "No '%s' argument.", "nargs");
2582		return;
2583	}
2584	if (*nargs <= 0) {
2585		gctl_error(req, "Missing device(s).");
2586		return;
2587	}
2588
2589	for (i = 0; i < *nargs; i++) {
2590		snprintf(param, sizeof(param), "arg%d", i);
2591		name = gctl_get_asciiparam(req, param);
2592		if (name == NULL) {
2593			gctl_error(req, "No 'arg%d' argument.", i);
2594			return;
2595		}
2596		sc = g_journal_find_device(mp, name);
2597		if (sc == NULL) {
2598			gctl_error(req, "No such device: %s.", name);
2599			return;
2600		}
2601		error = g_journal_destroy(sc);
2602		if (error != 0) {
2603			gctl_error(req, "Cannot destroy device %s (error=%d).",
2604			    LIST_FIRST(&sc->sc_geom->provider)->name, error);
2605			return;
2606		}
2607	}
2608}
2609
2610static void
2611g_journal_ctl_sync(struct gctl_req *req __unused, struct g_class *mp __unused)
2612{
2613
2614	g_topology_assert();
2615	g_topology_unlock();
2616	g_journal_sync_requested++;
2617	wakeup(&g_journal_switcher_state);
2618	while (g_journal_sync_requested > 0)
2619		tsleep(&g_journal_sync_requested, PRIBIO, "j:sreq", hz / 2);
2620	g_topology_lock();
2621}
2622
2623static void
2624g_journal_config(struct gctl_req *req, struct g_class *mp, const char *verb)
2625{
2626	uint32_t *version;
2627
2628	g_topology_assert();
2629
2630	version = gctl_get_paraml(req, "version", sizeof(*version));
2631	if (version == NULL) {
2632		gctl_error(req, "No '%s' argument.", "version");
2633		return;
2634	}
2635	if (*version != G_JOURNAL_VERSION) {
2636		gctl_error(req, "Userland and kernel parts are out of sync.");
2637		return;
2638	}
2639
2640	if (strcmp(verb, "destroy") == 0 || strcmp(verb, "stop") == 0) {
2641		g_journal_ctl_destroy(req, mp);
2642		return;
2643	} else if (strcmp(verb, "sync") == 0) {
2644		g_journal_ctl_sync(req, mp);
2645		return;
2646	}
2647
2648	gctl_error(req, "Unknown verb.");
2649}
2650
2651static void
2652g_journal_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2653    struct g_consumer *cp, struct g_provider *pp)
2654{
2655	struct g_journal_softc *sc;
2656
2657	g_topology_assert();
2658
2659	sc = gp->softc;
2660	if (sc == NULL)
2661		return;
2662	if (pp != NULL) {
2663		/* Nothing here. */
2664	} else if (cp != NULL) {
2665		int first = 1;
2666
2667		sbuf_printf(sb, "%s<Role>", indent);
2668		if (cp == sc->sc_dconsumer) {
2669			sbuf_printf(sb, "Data");
2670			first = 0;
2671		}
2672		if (cp == sc->sc_jconsumer) {
2673			if (!first)
2674				sbuf_printf(sb, ",");
2675			sbuf_printf(sb, "Journal");
2676		}
2677		sbuf_printf(sb, "</Role>\n");
2678		if (cp == sc->sc_jconsumer) {
2679			sbuf_printf(sb, "<Jstart>%jd</Jstart>\n",
2680			    (intmax_t)sc->sc_jstart);
2681			sbuf_printf(sb, "<Jend>%jd</Jend>\n",
2682			    (intmax_t)sc->sc_jend);
2683		}
2684	} else {
2685		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
2686	}
2687}
2688
2689static eventhandler_tag g_journal_event_shutdown = NULL;
2690static eventhandler_tag g_journal_event_lowmem = NULL;
2691
2692static void
2693g_journal_shutdown(void *arg, int howto __unused)
2694{
2695	struct g_class *mp;
2696	struct g_geom *gp, *gp2;
2697
2698	if (panicstr != NULL)
2699		return;
2700	mp = arg;
2701	DROP_GIANT();
2702	g_topology_lock();
2703	LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
2704		if (gp->softc == NULL)
2705			continue;
2706		GJ_DEBUG(0, "Shutting down geom %s.", gp->name);
2707		g_journal_destroy(gp->softc);
2708	}
2709	g_topology_unlock();
2710	PICKUP_GIANT();
2711}
2712
2713/*
2714 * Free cached requests from inactive queue in case of low memory.
2715 * We free GJ_FREE_AT_ONCE elements at once.
2716 */
2717#define	GJ_FREE_AT_ONCE	4
2718static void
2719g_journal_lowmem(void *arg, int howto __unused)
2720{
2721	struct g_journal_softc *sc;
2722	struct g_class *mp;
2723	struct g_geom *gp;
2724	struct bio *bp;
2725	u_int nfree = GJ_FREE_AT_ONCE;
2726
2727	g_journal_stats_low_mem++;
2728	mp = arg;
2729	DROP_GIANT();
2730	g_topology_lock();
2731	LIST_FOREACH(gp, &mp->geom, geom) {
2732		sc = gp->softc;
2733		if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY))
2734			continue;
2735		mtx_lock(&sc->sc_mtx);
2736		for (bp = sc->sc_inactive.jj_queue; nfree > 0 && bp != NULL;
2737		    nfree--, bp = bp->bio_next) {
2738			/*
2739			 * This is safe to free the bio_data, because:
2740			 * 1. If bio_data is NULL it will be read from the
2741			 *    inactive journal.
2742			 * 2. If bp is sent down, it is first removed from the
2743			 *    inactive queue, so it's impossible to free the
2744			 *    data from under in-flight bio.
2745			 * On the other hand, freeing elements from the active
2746			 * queue, is not safe.
2747			 */
2748			if (bp->bio_data != NULL) {
2749				GJ_DEBUG(2, "Freeing data from %s.",
2750				    sc->sc_name);
2751				gj_free(bp->bio_data, bp->bio_length);
2752				bp->bio_data = NULL;
2753			}
2754		}
2755		mtx_unlock(&sc->sc_mtx);
2756		if (nfree == 0)
2757			break;
2758	}
2759	g_topology_unlock();
2760	PICKUP_GIANT();
2761}
2762
2763static void g_journal_switcher(void *arg);
2764
2765static void
2766g_journal_init(struct g_class *mp)
2767{
2768	int error;
2769
2770	/* Pick a conservative value if provided value sucks. */
2771	if (g_journal_cache_divisor <= 0 ||
2772	    (vm_kmem_size / g_journal_cache_divisor == 0)) {
2773		g_journal_cache_divisor = 5;
2774	}
2775	if (g_journal_cache_limit > 0) {
2776		g_journal_cache_limit = vm_kmem_size / g_journal_cache_divisor;
2777		g_journal_cache_low =
2778		    (g_journal_cache_limit / 100) * g_journal_cache_switch;
2779	}
2780	g_journal_event_shutdown = EVENTHANDLER_REGISTER(shutdown_post_sync,
2781	    g_journal_shutdown, mp, EVENTHANDLER_PRI_FIRST);
2782	if (g_journal_event_shutdown == NULL)
2783		GJ_DEBUG(0, "Warning! Cannot register shutdown event.");
2784	g_journal_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
2785	    g_journal_lowmem, mp, EVENTHANDLER_PRI_FIRST);
2786	if (g_journal_event_lowmem == NULL)
2787		GJ_DEBUG(0, "Warning! Cannot register lowmem event.");
2788	error = kproc_create(g_journal_switcher, mp, NULL, 0, 0,
2789	    "g_journal switcher");
2790	KASSERT(error == 0, ("Cannot create switcher thread."));
2791}
2792
2793static void
2794g_journal_fini(struct g_class *mp)
2795{
2796
2797	if (g_journal_event_shutdown != NULL) {
2798		EVENTHANDLER_DEREGISTER(shutdown_post_sync,
2799		    g_journal_event_shutdown);
2800	}
2801	if (g_journal_event_lowmem != NULL)
2802		EVENTHANDLER_DEREGISTER(vm_lowmem, g_journal_event_lowmem);
2803	g_journal_switcher_state = GJ_SWITCHER_DIE;
2804	wakeup(&g_journal_switcher_state);
2805	while (g_journal_switcher_state != GJ_SWITCHER_DIED)
2806		tsleep(&g_journal_switcher_state, PRIBIO, "jfini:wait", hz / 5);
2807	GJ_DEBUG(1, "Switcher died.");
2808}
2809
2810DECLARE_GEOM_CLASS(g_journal_class, g_journal);
2811
2812static const struct g_journal_desc *
2813g_journal_find_desc(const char *fstype)
2814{
2815	const struct g_journal_desc *desc;
2816	int i;
2817
2818	for (desc = g_journal_filesystems[i = 0]; desc != NULL;
2819	     desc = g_journal_filesystems[++i]) {
2820		if (strcmp(desc->jd_fstype, fstype) == 0)
2821			break;
2822	}
2823	return (desc);
2824}
2825
2826static void
2827g_journal_switch_wait(struct g_journal_softc *sc)
2828{
2829	struct bintime bt;
2830
2831	mtx_assert(&sc->sc_mtx, MA_OWNED);
2832	if (g_journal_debug >= 2) {
2833		if (sc->sc_flush_in_progress > 0) {
2834			GJ_DEBUG(2, "%d requests flushing.",
2835			    sc->sc_flush_in_progress);
2836		}
2837		if (sc->sc_copy_in_progress > 0) {
2838			GJ_DEBUG(2, "%d requests copying.",
2839			    sc->sc_copy_in_progress);
2840		}
2841		if (sc->sc_flush_count > 0) {
2842			GJ_DEBUG(2, "%d requests to flush.",
2843			    sc->sc_flush_count);
2844		}
2845		if (sc->sc_delayed_count > 0) {
2846			GJ_DEBUG(2, "%d requests delayed.",
2847			    sc->sc_delayed_count);
2848		}
2849	}
2850	g_journal_stats_switches++;
2851	if (sc->sc_copy_in_progress > 0)
2852		g_journal_stats_wait_for_copy++;
2853	GJ_TIMER_START(1, &bt);
2854	sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
2855	sc->sc_flags |= GJF_DEVICE_SWITCH;
2856	wakeup(sc);
2857	while (sc->sc_flags & GJF_DEVICE_SWITCH) {
2858		msleep(&sc->sc_journal_copying, &sc->sc_mtx, PRIBIO,
2859		    "gj:switch", 0);
2860	}
2861	GJ_TIMER_STOP(1, &bt, "Switch time of %s", sc->sc_name);
2862}
2863
2864static void
2865g_journal_do_switch(struct g_class *classp)
2866{
2867	struct g_journal_softc *sc;
2868	const struct g_journal_desc *desc;
2869	struct g_geom *gp;
2870	struct mount *mp;
2871	struct bintime bt;
2872	char *mountpoint;
2873	int error, vfslocked;
2874
2875	DROP_GIANT();
2876	g_topology_lock();
2877	LIST_FOREACH(gp, &classp->geom, geom) {
2878		sc = gp->softc;
2879		if (sc == NULL)
2880			continue;
2881		if (sc->sc_flags & GJF_DEVICE_DESTROY)
2882			continue;
2883		if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
2884			continue;
2885		mtx_lock(&sc->sc_mtx);
2886		sc->sc_flags |= GJF_DEVICE_BEFORE_SWITCH;
2887		mtx_unlock(&sc->sc_mtx);
2888	}
2889	g_topology_unlock();
2890	PICKUP_GIANT();
2891
2892	mtx_lock(&mountlist_mtx);
2893	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2894		if (mp->mnt_gjprovider == NULL)
2895			continue;
2896		if (mp->mnt_flag & MNT_RDONLY)
2897			continue;
2898		desc = g_journal_find_desc(mp->mnt_stat.f_fstypename);
2899		if (desc == NULL)
2900			continue;
2901		if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK))
2902			continue;
2903		/* mtx_unlock(&mountlist_mtx) was done inside vfs_busy() */
2904
2905		DROP_GIANT();
2906		g_topology_lock();
2907		sc = g_journal_find_device(classp, mp->mnt_gjprovider);
2908		g_topology_unlock();
2909		PICKUP_GIANT();
2910
2911		if (sc == NULL) {
2912			GJ_DEBUG(0, "Cannot find journal geom for %s.",
2913			    mp->mnt_gjprovider);
2914			goto next;
2915		} else if (JEMPTY(sc)) {
2916			mtx_lock(&sc->sc_mtx);
2917			sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
2918			mtx_unlock(&sc->sc_mtx);
2919			GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
2920			goto next;
2921		}
2922
2923		mountpoint = mp->mnt_stat.f_mntonname;
2924
2925		vfslocked = VFS_LOCK_GIANT(mp);
2926
2927		error = vn_start_write(NULL, &mp, V_WAIT);
2928		if (error != 0) {
2929			VFS_UNLOCK_GIANT(vfslocked);
2930			GJ_DEBUG(0, "vn_start_write(%s) failed (error=%d).",
2931			    mountpoint, error);
2932			goto next;
2933		}
2934
2935		MNT_ILOCK(mp);
2936		mp->mnt_noasync++;
2937		mp->mnt_kern_flag &= ~MNTK_ASYNC;
2938		MNT_IUNLOCK(mp);
2939
2940		GJ_TIMER_START(1, &bt);
2941		vfs_msync(mp, MNT_NOWAIT);
2942		GJ_TIMER_STOP(1, &bt, "Msync time of %s", mountpoint);
2943
2944		GJ_TIMER_START(1, &bt);
2945		error = VFS_SYNC(mp, MNT_NOWAIT);
2946		if (error == 0)
2947			GJ_TIMER_STOP(1, &bt, "Sync time of %s", mountpoint);
2948		else {
2949			GJ_DEBUG(0, "Cannot sync file system %s (error=%d).",
2950			    mountpoint, error);
2951		}
2952
2953		MNT_ILOCK(mp);
2954		mp->mnt_noasync--;
2955		if ((mp->mnt_flag & MNT_ASYNC) != 0 && mp->mnt_noasync == 0)
2956			mp->mnt_kern_flag |= MNTK_ASYNC;
2957		MNT_IUNLOCK(mp);
2958
2959		vn_finished_write(mp);
2960
2961		if (error != 0) {
2962			VFS_UNLOCK_GIANT(vfslocked);
2963			goto next;
2964		}
2965
2966		/*
2967		 * Send BIO_FLUSH before freezing the file system, so it can be
2968		 * faster after the freeze.
2969		 */
2970		GJ_TIMER_START(1, &bt);
2971		g_journal_flush_cache(sc);
2972		GJ_TIMER_STOP(1, &bt, "BIO_FLUSH time of %s", sc->sc_name);
2973
2974		GJ_TIMER_START(1, &bt);
2975		error = vfs_write_suspend(mp);
2976		VFS_UNLOCK_GIANT(vfslocked);
2977		GJ_TIMER_STOP(1, &bt, "Suspend time of %s", mountpoint);
2978		if (error != 0) {
2979			GJ_DEBUG(0, "Cannot suspend file system %s (error=%d).",
2980			    mountpoint, error);
2981			goto next;
2982		}
2983
2984		error = desc->jd_clean(mp);
2985		if (error != 0)
2986			goto next;
2987
2988		mtx_lock(&sc->sc_mtx);
2989		g_journal_switch_wait(sc);
2990		mtx_unlock(&sc->sc_mtx);
2991
2992		vfs_write_resume(mp);
2993next:
2994		mtx_lock(&mountlist_mtx);
2995		vfs_unbusy(mp);
2996	}
2997	mtx_unlock(&mountlist_mtx);
2998
2999	sc = NULL;
3000	for (;;) {
3001		DROP_GIANT();
3002		g_topology_lock();
3003		LIST_FOREACH(gp, &g_journal_class.geom, geom) {
3004			sc = gp->softc;
3005			if (sc == NULL)
3006				continue;
3007			mtx_lock(&sc->sc_mtx);
3008			if ((sc->sc_type & GJ_TYPE_COMPLETE) == GJ_TYPE_COMPLETE &&
3009			    !(sc->sc_flags & GJF_DEVICE_DESTROY) &&
3010			    (sc->sc_flags & GJF_DEVICE_BEFORE_SWITCH)) {
3011				break;
3012			}
3013			mtx_unlock(&sc->sc_mtx);
3014			sc = NULL;
3015		}
3016		g_topology_unlock();
3017		PICKUP_GIANT();
3018		if (sc == NULL)
3019			break;
3020		mtx_assert(&sc->sc_mtx, MA_OWNED);
3021		g_journal_switch_wait(sc);
3022		mtx_unlock(&sc->sc_mtx);
3023	}
3024}
3025
3026/*
3027 * TODO: Switcher thread should be started on first geom creation and killed on
3028 * last geom destruction.
3029 */
3030static void
3031g_journal_switcher(void *arg)
3032{
3033	struct g_class *mp;
3034	struct bintime bt;
3035	int error;
3036
3037	mp = arg;
3038	curthread->td_pflags |= TDP_NORUNNINGBUF;
3039	for (;;) {
3040		g_journal_switcher_wokenup = 0;
3041		error = tsleep(&g_journal_switcher_state, PRIBIO, "jsw:wait",
3042		    g_journal_switch_time * hz);
3043		if (g_journal_switcher_state == GJ_SWITCHER_DIE) {
3044			g_journal_switcher_state = GJ_SWITCHER_DIED;
3045			GJ_DEBUG(1, "Switcher exiting.");
3046			wakeup(&g_journal_switcher_state);
3047			kproc_exit(0);
3048		}
3049		if (error == 0 && g_journal_sync_requested == 0) {
3050			GJ_DEBUG(1, "Out of cache, force switch (used=%u "
3051			    "limit=%u).", g_journal_cache_used,
3052			    g_journal_cache_limit);
3053		}
3054		GJ_TIMER_START(1, &bt);
3055		g_journal_do_switch(mp);
3056		GJ_TIMER_STOP(1, &bt, "Entire switch time");
3057		if (g_journal_sync_requested > 0) {
3058			g_journal_sync_requested = 0;
3059			wakeup(&g_journal_sync_requested);
3060		}
3061	}
3062}
3063