1/*-
2 * Copyright (c) 2005-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/geom/journal/g_journal.c 328947 2018-02-06 19:17:05Z mckusick $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/module.h>
34#include <sys/limits.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/bio.h>
38#include <sys/sysctl.h>
39#include <sys/malloc.h>
40#include <sys/mount.h>
41#include <sys/eventhandler.h>
42#include <sys/proc.h>
43#include <sys/kthread.h>
44#include <sys/sched.h>
45#include <sys/taskqueue.h>
46#include <sys/vnode.h>
47#include <sys/sbuf.h>
48#ifdef GJ_MEMDEBUG
49#include <sys/stack.h>
50#include <sys/kdb.h>
51#endif
52#include <vm/vm.h>
53#include <vm/vm_kern.h>
54#include <geom/geom.h>
55
56#include <geom/journal/g_journal.h>
57
58FEATURE(geom_journal, "GEOM journaling support");
59
60/*
61 * On-disk journal format:
62 *
63 * JH - Journal header
64 * RH - Record header
65 *
66 * %%%%%% ****** +------+ +------+     ****** +------+     %%%%%%
67 * % JH % * RH * | Data | | Data | ... * RH * | Data | ... % JH % ...
68 * %%%%%% ****** +------+ +------+     ****** +------+     %%%%%%
69 *
70 */
71
72CTASSERT(sizeof(struct g_journal_header) <= 512);
73CTASSERT(sizeof(struct g_journal_record_header) <= 512);
74
75static MALLOC_DEFINE(M_JOURNAL, "journal_data", "GEOM_JOURNAL Data");
76static struct mtx g_journal_cache_mtx;
77MTX_SYSINIT(g_journal_cache, &g_journal_cache_mtx, "cache usage", MTX_DEF);
78
79const struct g_journal_desc *g_journal_filesystems[] = {
80	&g_journal_ufs,
81	NULL
82};
83
84SYSCTL_DECL(_kern_geom);
85
86int g_journal_debug = 0;
87static u_int g_journal_switch_time = 10;
88static u_int g_journal_force_switch = 70;
89static u_int g_journal_parallel_flushes = 16;
90static u_int g_journal_parallel_copies = 16;
91static u_int g_journal_accept_immediately = 64;
92static u_int g_journal_record_entries = GJ_RECORD_HEADER_NENTRIES;
93static u_int g_journal_do_optimize = 1;
94
95static SYSCTL_NODE(_kern_geom, OID_AUTO, journal, CTLFLAG_RW, 0,
96    "GEOM_JOURNAL stuff");
97SYSCTL_INT(_kern_geom_journal, OID_AUTO, debug, CTLFLAG_RWTUN, &g_journal_debug, 0,
98    "Debug level");
99SYSCTL_UINT(_kern_geom_journal, OID_AUTO, switch_time, CTLFLAG_RW,
100    &g_journal_switch_time, 0, "Switch journals every N seconds");
101SYSCTL_UINT(_kern_geom_journal, OID_AUTO, force_switch, CTLFLAG_RW,
102    &g_journal_force_switch, 0, "Force switch when journal is N% full");
103SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_flushes, CTLFLAG_RW,
104    &g_journal_parallel_flushes, 0,
105    "Number of flush I/O requests to send in parallel");
106SYSCTL_UINT(_kern_geom_journal, OID_AUTO, accept_immediately, CTLFLAG_RW,
107    &g_journal_accept_immediately, 0,
108    "Number of I/O requests accepted immediately");
109SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_copies, CTLFLAG_RW,
110    &g_journal_parallel_copies, 0,
111    "Number of copy I/O requests to send in parallel");
112static int
113g_journal_record_entries_sysctl(SYSCTL_HANDLER_ARGS)
114{
115	u_int entries;
116	int error;
117
118	entries = g_journal_record_entries;
119	error = sysctl_handle_int(oidp, &entries, 0, req);
120	if (error != 0 || req->newptr == NULL)
121		return (error);
122	if (entries < 1 || entries > GJ_RECORD_HEADER_NENTRIES)
123		return (EINVAL);
124	g_journal_record_entries = entries;
125	return (0);
126}
127SYSCTL_PROC(_kern_geom_journal, OID_AUTO, record_entries,
128    CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_record_entries_sysctl, "I",
129    "Maximum number of entires in one journal record");
130SYSCTL_UINT(_kern_geom_journal, OID_AUTO, optimize, CTLFLAG_RW,
131    &g_journal_do_optimize, 0, "Try to combine bios on flush and copy");
132
133static u_long g_journal_cache_used = 0;
134static u_long g_journal_cache_limit = 64 * 1024 * 1024;
135static u_int g_journal_cache_divisor = 2;
136static u_int g_journal_cache_switch = 90;
137static u_int g_journal_cache_misses = 0;
138static u_int g_journal_cache_alloc_failures = 0;
139static u_long g_journal_cache_low = 0;
140
141static SYSCTL_NODE(_kern_geom_journal, OID_AUTO, cache, CTLFLAG_RW, 0,
142    "GEOM_JOURNAL cache");
143SYSCTL_ULONG(_kern_geom_journal_cache, OID_AUTO, used, CTLFLAG_RD,
144    &g_journal_cache_used, 0, "Number of allocated bytes");
145static int
146g_journal_cache_limit_sysctl(SYSCTL_HANDLER_ARGS)
147{
148	u_long limit;
149	int error;
150
151	limit = g_journal_cache_limit;
152	error = sysctl_handle_long(oidp, &limit, 0, req);
153	if (error != 0 || req->newptr == NULL)
154		return (error);
155	g_journal_cache_limit = limit;
156	g_journal_cache_low = (limit / 100) * g_journal_cache_switch;
157	return (0);
158}
159SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, limit,
160    CTLTYPE_ULONG | CTLFLAG_RWTUN, NULL, 0, g_journal_cache_limit_sysctl, "I",
161    "Maximum number of allocated bytes");
162SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, divisor, CTLFLAG_RDTUN,
163    &g_journal_cache_divisor, 0,
164    "(kmem_size / kern.geom.journal.cache.divisor) == cache size");
165static int
166g_journal_cache_switch_sysctl(SYSCTL_HANDLER_ARGS)
167{
168	u_int cswitch;
169	int error;
170
171	cswitch = g_journal_cache_switch;
172	error = sysctl_handle_int(oidp, &cswitch, 0, req);
173	if (error != 0 || req->newptr == NULL)
174		return (error);
175	if (cswitch > 100)
176		return (EINVAL);
177	g_journal_cache_switch = cswitch;
178	g_journal_cache_low = (g_journal_cache_limit / 100) * cswitch;
179	return (0);
180}
181SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, switch,
182    CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_cache_switch_sysctl, "I",
183    "Force switch when we hit this percent of cache use");
184SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, misses, CTLFLAG_RW,
185    &g_journal_cache_misses, 0, "Number of cache misses");
186SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, alloc_failures, CTLFLAG_RW,
187    &g_journal_cache_alloc_failures, 0, "Memory allocation failures");
188
189static u_long g_journal_stats_bytes_skipped = 0;
190static u_long g_journal_stats_combined_ios = 0;
191static u_long g_journal_stats_switches = 0;
192static u_long g_journal_stats_wait_for_copy = 0;
193static u_long g_journal_stats_journal_full = 0;
194static u_long g_journal_stats_low_mem = 0;
195
196static SYSCTL_NODE(_kern_geom_journal, OID_AUTO, stats, CTLFLAG_RW, 0,
197    "GEOM_JOURNAL statistics");
198SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, skipped_bytes, CTLFLAG_RW,
199    &g_journal_stats_bytes_skipped, 0, "Number of skipped bytes");
200SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, combined_ios, CTLFLAG_RW,
201    &g_journal_stats_combined_ios, 0, "Number of combined I/O requests");
202SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, switches, CTLFLAG_RW,
203    &g_journal_stats_switches, 0, "Number of journal switches");
204SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, wait_for_copy, CTLFLAG_RW,
205    &g_journal_stats_wait_for_copy, 0, "Wait for journal copy on switch");
206SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, journal_full, CTLFLAG_RW,
207    &g_journal_stats_journal_full, 0,
208    "Number of times journal was almost full.");
209SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, low_mem, CTLFLAG_RW,
210    &g_journal_stats_low_mem, 0, "Number of times low_mem hook was called.");
211
212static g_taste_t g_journal_taste;
213static g_ctl_req_t g_journal_config;
214static g_dumpconf_t g_journal_dumpconf;
215static g_init_t g_journal_init;
216static g_fini_t g_journal_fini;
217
218struct g_class g_journal_class = {
219	.name = G_JOURNAL_CLASS_NAME,
220	.version = G_VERSION,
221	.taste = g_journal_taste,
222	.ctlreq = g_journal_config,
223	.dumpconf = g_journal_dumpconf,
224	.init = g_journal_init,
225	.fini = g_journal_fini
226};
227
228static int g_journal_destroy(struct g_journal_softc *sc);
229static void g_journal_metadata_update(struct g_journal_softc *sc);
230static void g_journal_start_switcher(struct g_class *mp);
231static void g_journal_stop_switcher(void);
232static void g_journal_switch_wait(struct g_journal_softc *sc);
233
234#define	GJ_SWITCHER_WORKING	0
235#define	GJ_SWITCHER_DIE		1
236#define	GJ_SWITCHER_DIED	2
237static struct proc *g_journal_switcher_proc = NULL;
238static int g_journal_switcher_state = GJ_SWITCHER_WORKING;
239static int g_journal_switcher_wokenup = 0;
240static int g_journal_sync_requested = 0;
241
242#ifdef GJ_MEMDEBUG
243struct meminfo {
244	size_t		mi_size;
245	struct stack	mi_stack;
246};
247#endif
248
249/*
250 * We use our own malloc/realloc/free funtions, so we can collect statistics
251 * and force journal switch when we're running out of cache.
252 */
253static void *
254gj_malloc(size_t size, int flags)
255{
256	void *p;
257#ifdef GJ_MEMDEBUG
258	struct meminfo *mi;
259#endif
260
261	mtx_lock(&g_journal_cache_mtx);
262	if (g_journal_cache_limit > 0 && !g_journal_switcher_wokenup &&
263	    g_journal_cache_used + size > g_journal_cache_low) {
264		GJ_DEBUG(1, "No cache, waking up the switcher.");
265		g_journal_switcher_wokenup = 1;
266		wakeup(&g_journal_switcher_state);
267	}
268	if ((flags & M_NOWAIT) && g_journal_cache_limit > 0 &&
269	    g_journal_cache_used + size > g_journal_cache_limit) {
270		mtx_unlock(&g_journal_cache_mtx);
271		g_journal_cache_alloc_failures++;
272		return (NULL);
273	}
274	g_journal_cache_used += size;
275	mtx_unlock(&g_journal_cache_mtx);
276	flags &= ~M_NOWAIT;
277#ifndef GJ_MEMDEBUG
278	p = malloc(size, M_JOURNAL, flags | M_WAITOK);
279#else
280	mi = malloc(sizeof(*mi) + size, M_JOURNAL, flags | M_WAITOK);
281	p = (u_char *)mi + sizeof(*mi);
282	mi->mi_size = size;
283	stack_save(&mi->mi_stack);
284#endif
285	return (p);
286}
287
288static void
289gj_free(void *p, size_t size)
290{
291#ifdef GJ_MEMDEBUG
292	struct meminfo *mi;
293#endif
294
295	KASSERT(p != NULL, ("p=NULL"));
296	KASSERT(size > 0, ("size=0"));
297	mtx_lock(&g_journal_cache_mtx);
298	KASSERT(g_journal_cache_used >= size, ("Freeing too much?"));
299	g_journal_cache_used -= size;
300	mtx_unlock(&g_journal_cache_mtx);
301#ifdef GJ_MEMDEBUG
302	mi = p = (void *)((u_char *)p - sizeof(*mi));
303	if (mi->mi_size != size) {
304		printf("GJOURNAL: Size mismatch! %zu != %zu\n", size,
305		    mi->mi_size);
306		printf("GJOURNAL: Alloc backtrace:\n");
307		stack_print(&mi->mi_stack);
308		printf("GJOURNAL: Free backtrace:\n");
309		kdb_backtrace();
310	}
311#endif
312	free(p, M_JOURNAL);
313}
314
315static void *
316gj_realloc(void *p, size_t size, size_t oldsize)
317{
318	void *np;
319
320#ifndef GJ_MEMDEBUG
321	mtx_lock(&g_journal_cache_mtx);
322	g_journal_cache_used -= oldsize;
323	g_journal_cache_used += size;
324	mtx_unlock(&g_journal_cache_mtx);
325	np = realloc(p, size, M_JOURNAL, M_WAITOK);
326#else
327	np = gj_malloc(size, M_WAITOK);
328	bcopy(p, np, MIN(oldsize, size));
329	gj_free(p, oldsize);
330#endif
331	return (np);
332}
333
334static void
335g_journal_check_overflow(struct g_journal_softc *sc)
336{
337	off_t length, used;
338
339	if ((sc->sc_active.jj_offset < sc->sc_inactive.jj_offset &&
340	     sc->sc_journal_offset >= sc->sc_inactive.jj_offset) ||
341	    (sc->sc_active.jj_offset > sc->sc_inactive.jj_offset &&
342	     sc->sc_journal_offset >= sc->sc_inactive.jj_offset &&
343	     sc->sc_journal_offset < sc->sc_active.jj_offset)) {
344		panic("Journal overflow "
345		    "(id = %u joffset=%jd active=%jd inactive=%jd)",
346		    (unsigned)sc->sc_id,
347		    (intmax_t)sc->sc_journal_offset,
348		    (intmax_t)sc->sc_active.jj_offset,
349		    (intmax_t)sc->sc_inactive.jj_offset);
350	}
351	if (sc->sc_active.jj_offset < sc->sc_inactive.jj_offset) {
352		length = sc->sc_inactive.jj_offset - sc->sc_active.jj_offset;
353		used = sc->sc_journal_offset - sc->sc_active.jj_offset;
354	} else {
355		length = sc->sc_jend - sc->sc_active.jj_offset;
356		length += sc->sc_inactive.jj_offset - sc->sc_jstart;
357		if (sc->sc_journal_offset >= sc->sc_active.jj_offset)
358			used = sc->sc_journal_offset - sc->sc_active.jj_offset;
359		else {
360			used = sc->sc_jend - sc->sc_active.jj_offset;
361			used += sc->sc_journal_offset - sc->sc_jstart;
362		}
363	}
364	/* Already woken up? */
365	if (g_journal_switcher_wokenup)
366		return;
367	/*
368	 * If the active journal takes more than g_journal_force_switch precent
369	 * of free journal space, we force journal switch.
370	 */
371	KASSERT(length > 0,
372	    ("length=%jd used=%jd active=%jd inactive=%jd joffset=%jd",
373	    (intmax_t)length, (intmax_t)used,
374	    (intmax_t)sc->sc_active.jj_offset,
375	    (intmax_t)sc->sc_inactive.jj_offset,
376	    (intmax_t)sc->sc_journal_offset));
377	if ((used * 100) / length > g_journal_force_switch) {
378		g_journal_stats_journal_full++;
379		GJ_DEBUG(1, "Journal %s %jd%% full, forcing journal switch.",
380		    sc->sc_name, (used * 100) / length);
381		mtx_lock(&g_journal_cache_mtx);
382		g_journal_switcher_wokenup = 1;
383		wakeup(&g_journal_switcher_state);
384		mtx_unlock(&g_journal_cache_mtx);
385	}
386}
387
388static void
389g_journal_orphan(struct g_consumer *cp)
390{
391	struct g_journal_softc *sc;
392	char name[256];
393	int error;
394
395	g_topology_assert();
396	sc = cp->geom->softc;
397	strlcpy(name, cp->provider->name, sizeof(name));
398	GJ_DEBUG(0, "Lost provider %s.", name);
399	if (sc == NULL)
400		return;
401	error = g_journal_destroy(sc);
402	if (error == 0)
403		GJ_DEBUG(0, "Journal %s destroyed.", name);
404	else {
405		GJ_DEBUG(0, "Cannot destroy journal %s (error=%d). "
406		    "Destroy it manually after last close.", sc->sc_name,
407		    error);
408	}
409}
410
411static int
412g_journal_access(struct g_provider *pp, int acr, int acw, int ace)
413{
414	struct g_journal_softc *sc;
415	int dcr, dcw, dce;
416
417	g_topology_assert();
418	GJ_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name,
419	    acr, acw, ace);
420
421	dcr = pp->acr + acr;
422	dcw = pp->acw + acw;
423	dce = pp->ace + ace;
424
425	sc = pp->geom->softc;
426	if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY)) {
427		if (acr <= 0 && acw <= 0 && ace <= 0)
428			return (0);
429		else
430			return (ENXIO);
431	}
432	if (pp->acw == 0 && dcw > 0) {
433		GJ_DEBUG(1, "Marking %s as dirty.", sc->sc_name);
434		sc->sc_flags &= ~GJF_DEVICE_CLEAN;
435		g_topology_unlock();
436		g_journal_metadata_update(sc);
437		g_topology_lock();
438	} /* else if (pp->acw == 0 && dcw > 0 && JEMPTY(sc)) {
439		GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
440		sc->sc_flags |= GJF_DEVICE_CLEAN;
441		g_topology_unlock();
442		g_journal_metadata_update(sc);
443		g_topology_lock();
444	} */
445	return (0);
446}
447
448static void
449g_journal_header_encode(struct g_journal_header *hdr, u_char *data)
450{
451
452	bcopy(GJ_HEADER_MAGIC, data, sizeof(GJ_HEADER_MAGIC));
453	data += sizeof(GJ_HEADER_MAGIC);
454	le32enc(data, hdr->jh_journal_id);
455	data += 4;
456	le32enc(data, hdr->jh_journal_next_id);
457}
458
459static int
460g_journal_header_decode(const u_char *data, struct g_journal_header *hdr)
461{
462
463	bcopy(data, hdr->jh_magic, sizeof(hdr->jh_magic));
464	data += sizeof(hdr->jh_magic);
465	if (bcmp(hdr->jh_magic, GJ_HEADER_MAGIC, sizeof(GJ_HEADER_MAGIC)) != 0)
466		return (EINVAL);
467	hdr->jh_journal_id = le32dec(data);
468	data += 4;
469	hdr->jh_journal_next_id = le32dec(data);
470	return (0);
471}
472
473static void
474g_journal_flush_cache(struct g_journal_softc *sc)
475{
476	struct bintime bt;
477	int error;
478
479	if (sc->sc_bio_flush == 0)
480		return;
481	GJ_TIMER_START(1, &bt);
482	if (sc->sc_bio_flush & GJ_FLUSH_JOURNAL) {
483		error = g_io_flush(sc->sc_jconsumer);
484		GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
485		    sc->sc_jconsumer->provider->name, error);
486	}
487	if (sc->sc_bio_flush & GJ_FLUSH_DATA) {
488		/*
489		 * TODO: This could be called in parallel with the
490		 *       previous call.
491		 */
492		error = g_io_flush(sc->sc_dconsumer);
493		GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
494		    sc->sc_dconsumer->provider->name, error);
495	}
496	GJ_TIMER_STOP(1, &bt, "Cache flush time");
497}
498
499static int
500g_journal_write_header(struct g_journal_softc *sc)
501{
502	struct g_journal_header hdr;
503	struct g_consumer *cp;
504	u_char *buf;
505	int error;
506
507	cp = sc->sc_jconsumer;
508	buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
509
510	strlcpy(hdr.jh_magic, GJ_HEADER_MAGIC, sizeof(hdr.jh_magic));
511	hdr.jh_journal_id = sc->sc_journal_id;
512	hdr.jh_journal_next_id = sc->sc_journal_next_id;
513	g_journal_header_encode(&hdr, buf);
514	error = g_write_data(cp, sc->sc_journal_offset, buf,
515	    cp->provider->sectorsize);
516	/* if (error == 0) */
517	sc->sc_journal_offset += cp->provider->sectorsize;
518
519	gj_free(buf, cp->provider->sectorsize);
520	return (error);
521}
522
523/*
524 * Every journal record has a header and data following it.
525 * Functions below are used to decode the header before storing it to
526 * little endian and to encode it after reading to system endianness.
527 */
528static void
529g_journal_record_header_encode(struct g_journal_record_header *hdr,
530    u_char *data)
531{
532	struct g_journal_entry *ent;
533	u_int i;
534
535	bcopy(GJ_RECORD_HEADER_MAGIC, data, sizeof(GJ_RECORD_HEADER_MAGIC));
536	data += sizeof(GJ_RECORD_HEADER_MAGIC);
537	le32enc(data, hdr->jrh_journal_id);
538	data += 8;
539	le16enc(data, hdr->jrh_nentries);
540	data += 2;
541	bcopy(hdr->jrh_sum, data, sizeof(hdr->jrh_sum));
542	data += 8;
543	for (i = 0; i < hdr->jrh_nentries; i++) {
544		ent = &hdr->jrh_entries[i];
545		le64enc(data, ent->je_joffset);
546		data += 8;
547		le64enc(data, ent->je_offset);
548		data += 8;
549		le64enc(data, ent->je_length);
550		data += 8;
551	}
552}
553
554static int
555g_journal_record_header_decode(const u_char *data,
556    struct g_journal_record_header *hdr)
557{
558	struct g_journal_entry *ent;
559	u_int i;
560
561	bcopy(data, hdr->jrh_magic, sizeof(hdr->jrh_magic));
562	data += sizeof(hdr->jrh_magic);
563	if (strcmp(hdr->jrh_magic, GJ_RECORD_HEADER_MAGIC) != 0)
564		return (EINVAL);
565	hdr->jrh_journal_id = le32dec(data);
566	data += 8;
567	hdr->jrh_nentries = le16dec(data);
568	data += 2;
569	if (hdr->jrh_nentries > GJ_RECORD_HEADER_NENTRIES)
570		return (EINVAL);
571	bcopy(data, hdr->jrh_sum, sizeof(hdr->jrh_sum));
572	data += 8;
573	for (i = 0; i < hdr->jrh_nentries; i++) {
574		ent = &hdr->jrh_entries[i];
575		ent->je_joffset = le64dec(data);
576		data += 8;
577		ent->je_offset = le64dec(data);
578		data += 8;
579		ent->je_length = le64dec(data);
580		data += 8;
581	}
582	return (0);
583}
584
585/*
586 * Function reads metadata from a provider (via the given consumer), decodes
587 * it to system endianness and verifies its correctness.
588 */
589static int
590g_journal_metadata_read(struct g_consumer *cp, struct g_journal_metadata *md)
591{
592	struct g_provider *pp;
593	u_char *buf;
594	int error;
595
596	g_topology_assert();
597
598	error = g_access(cp, 1, 0, 0);
599	if (error != 0)
600		return (error);
601	pp = cp->provider;
602	g_topology_unlock();
603	/* Metadata is stored in last sector. */
604	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
605	    &error);
606	g_topology_lock();
607	g_access(cp, -1, 0, 0);
608	if (buf == NULL) {
609		GJ_DEBUG(1, "Cannot read metadata from %s (error=%d).",
610		    cp->provider->name, error);
611		return (error);
612	}
613
614	/* Decode metadata. */
615	error = journal_metadata_decode(buf, md);
616	g_free(buf);
617	/* Is this is gjournal provider at all? */
618	if (strcmp(md->md_magic, G_JOURNAL_MAGIC) != 0)
619		return (EINVAL);
620	/*
621	 * Are we able to handle this version of metadata?
622	 * We only maintain backward compatibility.
623	 */
624	if (md->md_version > G_JOURNAL_VERSION) {
625		GJ_DEBUG(0,
626		    "Kernel module is too old to handle metadata from %s.",
627		    cp->provider->name);
628		return (EINVAL);
629	}
630	/* Is checksum correct? */
631	if (error != 0) {
632		GJ_DEBUG(0, "MD5 metadata hash mismatch for provider %s.",
633		    cp->provider->name);
634		return (error);
635	}
636	return (0);
637}
638
639/*
640 * Two functions below are responsible for updating metadata.
641 * Only metadata on the data provider is updated (we need to update
642 * information about active journal in there).
643 */
644static void
645g_journal_metadata_done(struct bio *bp)
646{
647
648	/*
649	 * There is not much we can do on error except informing about it.
650	 */
651	if (bp->bio_error != 0) {
652		GJ_LOGREQ(0, bp, "Cannot update metadata (error=%d).",
653		    bp->bio_error);
654	} else {
655		GJ_LOGREQ(2, bp, "Metadata updated.");
656	}
657	gj_free(bp->bio_data, bp->bio_length);
658	g_destroy_bio(bp);
659}
660
661static void
662g_journal_metadata_update(struct g_journal_softc *sc)
663{
664	struct g_journal_metadata md;
665	struct g_consumer *cp;
666	struct bio *bp;
667	u_char *sector;
668
669	cp = sc->sc_dconsumer;
670	sector = gj_malloc(cp->provider->sectorsize, M_WAITOK);
671	strlcpy(md.md_magic, G_JOURNAL_MAGIC, sizeof(md.md_magic));
672	md.md_version = G_JOURNAL_VERSION;
673	md.md_id = sc->sc_id;
674	md.md_type = sc->sc_orig_type;
675	md.md_jstart = sc->sc_jstart;
676	md.md_jend = sc->sc_jend;
677	md.md_joffset = sc->sc_inactive.jj_offset;
678	md.md_jid = sc->sc_journal_previous_id;
679	md.md_flags = 0;
680	if (sc->sc_flags & GJF_DEVICE_CLEAN)
681		md.md_flags |= GJ_FLAG_CLEAN;
682
683	if (sc->sc_flags & GJF_DEVICE_HARDCODED)
684		strlcpy(md.md_provider, sc->sc_name, sizeof(md.md_provider));
685	else
686		bzero(md.md_provider, sizeof(md.md_provider));
687	md.md_provsize = cp->provider->mediasize;
688	journal_metadata_encode(&md, sector);
689
690	/*
691	 * Flush the cache, so we know all data are on disk.
692	 * We write here informations like "journal is consistent", so we need
693	 * to be sure it is. Without BIO_FLUSH here, we can end up in situation
694	 * where metadata is stored on disk, but not all data.
695	 */
696	g_journal_flush_cache(sc);
697
698	bp = g_alloc_bio();
699	bp->bio_offset = cp->provider->mediasize - cp->provider->sectorsize;
700	bp->bio_length = cp->provider->sectorsize;
701	bp->bio_data = sector;
702	bp->bio_cmd = BIO_WRITE;
703	if (!(sc->sc_flags & GJF_DEVICE_DESTROY)) {
704		bp->bio_done = g_journal_metadata_done;
705		g_io_request(bp, cp);
706	} else {
707		bp->bio_done = NULL;
708		g_io_request(bp, cp);
709		biowait(bp, "gjmdu");
710		g_journal_metadata_done(bp);
711	}
712
713	/*
714	 * Be sure metadata reached the disk.
715	 */
716	g_journal_flush_cache(sc);
717}
718
719/*
720 * This is where the I/O request comes from the GEOM.
721 */
722static void
723g_journal_start(struct bio *bp)
724{
725	struct g_journal_softc *sc;
726
727	sc = bp->bio_to->geom->softc;
728	GJ_LOGREQ(3, bp, "Request received.");
729
730	switch (bp->bio_cmd) {
731	case BIO_READ:
732	case BIO_WRITE:
733		mtx_lock(&sc->sc_mtx);
734		bioq_insert_tail(&sc->sc_regular_queue, bp);
735		wakeup(sc);
736		mtx_unlock(&sc->sc_mtx);
737		return;
738	case BIO_GETATTR:
739		if (strcmp(bp->bio_attribute, "GJOURNAL::provider") == 0) {
740			strlcpy(bp->bio_data, bp->bio_to->name, bp->bio_length);
741			bp->bio_completed = strlen(bp->bio_to->name) + 1;
742			g_io_deliver(bp, 0);
743			return;
744		}
745		/* FALLTHROUGH */
746	case BIO_DELETE:
747	default:
748		g_io_deliver(bp, EOPNOTSUPP);
749		return;
750	}
751}
752
753static void
754g_journal_std_done(struct bio *bp)
755{
756	struct g_journal_softc *sc;
757
758	sc = bp->bio_from->geom->softc;
759	mtx_lock(&sc->sc_mtx);
760	bioq_insert_tail(&sc->sc_back_queue, bp);
761	wakeup(sc);
762	mtx_unlock(&sc->sc_mtx);
763}
764
765static struct bio *
766g_journal_new_bio(off_t start, off_t end, off_t joffset, u_char *data,
767    int flags)
768{
769	struct bio *bp;
770
771	bp = g_alloc_bio();
772	bp->bio_offset = start;
773	bp->bio_joffset = joffset;
774	bp->bio_length = end - start;
775	bp->bio_cmd = BIO_WRITE;
776	bp->bio_done = g_journal_std_done;
777	if (data == NULL)
778		bp->bio_data = NULL;
779	else {
780		bp->bio_data = gj_malloc(bp->bio_length, flags);
781		if (bp->bio_data != NULL)
782			bcopy(data, bp->bio_data, bp->bio_length);
783	}
784	return (bp);
785}
786
787#define	g_journal_insert_bio(head, bp, flags)				\
788	g_journal_insert((head), (bp)->bio_offset,			\
789		(bp)->bio_offset + (bp)->bio_length, (bp)->bio_joffset,	\
790		(bp)->bio_data, flags)
791/*
792 * The function below does a lot more than just inserting bio to the queue.
793 * It keeps the queue sorted by offset and ensures that there are no doubled
794 * data (it combines bios where ranges overlap).
795 *
796 * The function returns the number of bios inserted (as bio can be splitted).
797 */
798static int
799g_journal_insert(struct bio **head, off_t nstart, off_t nend, off_t joffset,
800    u_char *data, int flags)
801{
802	struct bio *nbp, *cbp, *pbp;
803	off_t cstart, cend;
804	u_char *tmpdata;
805	int n;
806
807	GJ_DEBUG(3, "INSERT(%p): (%jd, %jd, %jd)", *head, nstart, nend,
808	    joffset);
809	n = 0;
810	pbp = NULL;
811	GJQ_FOREACH(*head, cbp) {
812		cstart = cbp->bio_offset;
813		cend = cbp->bio_offset + cbp->bio_length;
814
815		if (nstart >= cend) {
816			/*
817			 *  +-------------+
818			 *  |             |
819			 *  |   current   |  +-------------+
820			 *  |     bio     |  |             |
821			 *  |             |  |     new     |
822			 *  +-------------+  |     bio     |
823			 *                   |             |
824			 *                   +-------------+
825			 */
826			GJ_DEBUG(3, "INSERT(%p): 1", *head);
827		} else if (nend <= cstart) {
828			/*
829			 *                   +-------------+
830			 *                   |             |
831			 *  +-------------+  |   current   |
832			 *  |             |  |     bio     |
833			 *  |     new     |  |             |
834			 *  |     bio     |  +-------------+
835			 *  |             |
836			 *  +-------------+
837			 */
838			nbp = g_journal_new_bio(nstart, nend, joffset, data,
839			    flags);
840			if (pbp == NULL)
841				*head = nbp;
842			else
843				pbp->bio_next = nbp;
844			nbp->bio_next = cbp;
845			n++;
846			GJ_DEBUG(3, "INSERT(%p): 2 (nbp=%p pbp=%p)", *head, nbp,
847			    pbp);
848			goto end;
849		} else if (nstart <= cstart && nend >= cend) {
850			/*
851			 *      +-------------+      +-------------+
852			 *      | current bio |      | current bio |
853			 *  +---+-------------+---+  +-------------+---+
854			 *  |   |             |   |  |             |   |
855			 *  |   |             |   |  |             |   |
856			 *  |   +-------------+   |  +-------------+   |
857			 *  |       new bio       |  |     new bio     |
858			 *  +---------------------+  +-----------------+
859			 *
860			 *      +-------------+  +-------------+
861			 *      | current bio |  | current bio |
862			 *  +---+-------------+  +-------------+
863			 *  |   |             |  |             |
864			 *  |   |             |  |             |
865			 *  |   +-------------+  +-------------+
866			 *  |     new bio     |  |   new bio   |
867			 *  +-----------------+  +-------------+
868			 */
869			g_journal_stats_bytes_skipped += cbp->bio_length;
870			cbp->bio_offset = nstart;
871			cbp->bio_joffset = joffset;
872			cbp->bio_length = cend - nstart;
873			if (cbp->bio_data != NULL) {
874				gj_free(cbp->bio_data, cend - cstart);
875				cbp->bio_data = NULL;
876			}
877			if (data != NULL) {
878				cbp->bio_data = gj_malloc(cbp->bio_length,
879				    flags);
880				if (cbp->bio_data != NULL) {
881					bcopy(data, cbp->bio_data,
882					    cbp->bio_length);
883				}
884				data += cend - nstart;
885			}
886			joffset += cend - nstart;
887			nstart = cend;
888			GJ_DEBUG(3, "INSERT(%p): 3 (cbp=%p)", *head, cbp);
889		} else if (nstart > cstart && nend >= cend) {
890			/*
891			 *  +-----------------+  +-------------+
892			 *  |   current bio   |  | current bio |
893			 *  |   +-------------+  |   +---------+---+
894			 *  |   |             |  |   |         |   |
895			 *  |   |             |  |   |         |   |
896			 *  +---+-------------+  +---+---------+   |
897			 *      |   new bio   |      |   new bio   |
898			 *      +-------------+      +-------------+
899			 */
900			g_journal_stats_bytes_skipped += cend - nstart;
901			nbp = g_journal_new_bio(nstart, cend, joffset, data,
902			    flags);
903			nbp->bio_next = cbp->bio_next;
904			cbp->bio_next = nbp;
905			cbp->bio_length = nstart - cstart;
906			if (cbp->bio_data != NULL) {
907				cbp->bio_data = gj_realloc(cbp->bio_data,
908				    cbp->bio_length, cend - cstart);
909			}
910			if (data != NULL)
911				data += cend - nstart;
912			joffset += cend - nstart;
913			nstart = cend;
914			n++;
915			GJ_DEBUG(3, "INSERT(%p): 4 (cbp=%p)", *head, cbp);
916		} else if (nstart > cstart && nend < cend) {
917			/*
918			 *  +---------------------+
919			 *  |     current bio     |
920			 *  |   +-------------+   |
921			 *  |   |             |   |
922			 *  |   |             |   |
923			 *  +---+-------------+---+
924			 *      |   new bio   |
925			 *      +-------------+
926			 */
927			g_journal_stats_bytes_skipped += nend - nstart;
928			nbp = g_journal_new_bio(nstart, nend, joffset, data,
929			    flags);
930			nbp->bio_next = cbp->bio_next;
931			cbp->bio_next = nbp;
932			if (cbp->bio_data == NULL)
933				tmpdata = NULL;
934			else
935				tmpdata = cbp->bio_data + nend - cstart;
936			nbp = g_journal_new_bio(nend, cend,
937			    cbp->bio_joffset + nend - cstart, tmpdata, flags);
938			nbp->bio_next = ((struct bio *)cbp->bio_next)->bio_next;
939			((struct bio *)cbp->bio_next)->bio_next = nbp;
940			cbp->bio_length = nstart - cstart;
941			if (cbp->bio_data != NULL) {
942				cbp->bio_data = gj_realloc(cbp->bio_data,
943				    cbp->bio_length, cend - cstart);
944			}
945			n += 2;
946			GJ_DEBUG(3, "INSERT(%p): 5 (cbp=%p)", *head, cbp);
947			goto end;
948		} else if (nstart <= cstart && nend < cend) {
949			/*
950			 *  +-----------------+      +-------------+
951			 *  |   current bio   |      | current bio |
952			 *  +-------------+   |  +---+---------+   |
953			 *  |             |   |  |   |         |   |
954			 *  |             |   |  |   |         |   |
955			 *  +-------------+---+  |   +---------+---+
956			 *  |   new bio   |      |   new bio   |
957			 *  +-------------+      +-------------+
958			 */
959			g_journal_stats_bytes_skipped += nend - nstart;
960			nbp = g_journal_new_bio(nstart, nend, joffset, data,
961			    flags);
962			if (pbp == NULL)
963				*head = nbp;
964			else
965				pbp->bio_next = nbp;
966			nbp->bio_next = cbp;
967			cbp->bio_offset = nend;
968			cbp->bio_length = cend - nend;
969			cbp->bio_joffset += nend - cstart;
970			tmpdata = cbp->bio_data;
971			if (tmpdata != NULL) {
972				cbp->bio_data = gj_malloc(cbp->bio_length,
973				    flags);
974				if (cbp->bio_data != NULL) {
975					bcopy(tmpdata + nend - cstart,
976					    cbp->bio_data, cbp->bio_length);
977				}
978				gj_free(tmpdata, cend - cstart);
979			}
980			n++;
981			GJ_DEBUG(3, "INSERT(%p): 6 (cbp=%p)", *head, cbp);
982			goto end;
983		}
984		if (nstart == nend)
985			goto end;
986		pbp = cbp;
987	}
988	nbp = g_journal_new_bio(nstart, nend, joffset, data, flags);
989	if (pbp == NULL)
990		*head = nbp;
991	else
992		pbp->bio_next = nbp;
993	nbp->bio_next = NULL;
994	n++;
995	GJ_DEBUG(3, "INSERT(%p): 8 (nbp=%p pbp=%p)", *head, nbp, pbp);
996end:
997	if (g_journal_debug >= 3) {
998		GJQ_FOREACH(*head, cbp) {
999			GJ_DEBUG(3, "ELEMENT: %p (%jd, %jd, %jd, %p)", cbp,
1000			    (intmax_t)cbp->bio_offset,
1001			    (intmax_t)cbp->bio_length,
1002			    (intmax_t)cbp->bio_joffset, cbp->bio_data);
1003		}
1004		GJ_DEBUG(3, "INSERT(%p): DONE %d", *head, n);
1005	}
1006	return (n);
1007}
1008
1009/*
1010 * The function combines neighbour bios trying to squeeze as much data as
1011 * possible into one bio.
1012 *
1013 * The function returns the number of bios combined (negative value).
1014 */
1015static int
1016g_journal_optimize(struct bio *head)
1017{
1018	struct bio *cbp, *pbp;
1019	int n;
1020
1021	n = 0;
1022	pbp = NULL;
1023	GJQ_FOREACH(head, cbp) {
1024		/* Skip bios which has to be read first. */
1025		if (cbp->bio_data == NULL) {
1026			pbp = NULL;
1027			continue;
1028		}
1029		/* There is no previous bio yet. */
1030		if (pbp == NULL) {
1031			pbp = cbp;
1032			continue;
1033		}
1034		/* Is this a neighbour bio? */
1035		if (pbp->bio_offset + pbp->bio_length != cbp->bio_offset) {
1036			/* Be sure that bios queue is sorted. */
1037			KASSERT(pbp->bio_offset + pbp->bio_length < cbp->bio_offset,
1038			    ("poffset=%jd plength=%jd coffset=%jd",
1039			    (intmax_t)pbp->bio_offset,
1040			    (intmax_t)pbp->bio_length,
1041			    (intmax_t)cbp->bio_offset));
1042			pbp = cbp;
1043			continue;
1044		}
1045		/* Be sure we don't end up with too big bio. */
1046		if (pbp->bio_length + cbp->bio_length > MAXPHYS) {
1047			pbp = cbp;
1048			continue;
1049		}
1050		/* Ok, we can join bios. */
1051		GJ_LOGREQ(4, pbp, "Join: ");
1052		GJ_LOGREQ(4, cbp, "and: ");
1053		pbp->bio_data = gj_realloc(pbp->bio_data,
1054		    pbp->bio_length + cbp->bio_length, pbp->bio_length);
1055		bcopy(cbp->bio_data, pbp->bio_data + pbp->bio_length,
1056		    cbp->bio_length);
1057		gj_free(cbp->bio_data, cbp->bio_length);
1058		pbp->bio_length += cbp->bio_length;
1059		pbp->bio_next = cbp->bio_next;
1060		g_destroy_bio(cbp);
1061		cbp = pbp;
1062		g_journal_stats_combined_ios++;
1063		n--;
1064		GJ_LOGREQ(4, pbp, "Got: ");
1065	}
1066	return (n);
1067}
1068
1069/*
1070 * TODO: Update comment.
1071 * These are functions responsible for copying one portion of data from journal
1072 * to the destination provider.
1073 * The order goes like this:
1074 * 1. Read the header, which contains informations about data blocks
1075 *    following it.
1076 * 2. Read the data blocks from the journal.
1077 * 3. Write the data blocks on the data provider.
1078 *
1079 * g_journal_copy_start()
1080 * g_journal_copy_done() - got finished write request, logs potential errors.
1081 */
1082
1083/*
1084 * When there is no data in cache, this function is used to read it.
1085 */
1086static void
1087g_journal_read_first(struct g_journal_softc *sc, struct bio *bp)
1088{
1089	struct bio *cbp;
1090
1091	/*
1092	 * We were short in memory, so data was freed.
1093	 * In that case we need to read it back from journal.
1094	 */
1095	cbp = g_alloc_bio();
1096	cbp->bio_cflags = bp->bio_cflags;
1097	cbp->bio_parent = bp;
1098	cbp->bio_offset = bp->bio_joffset;
1099	cbp->bio_length = bp->bio_length;
1100	cbp->bio_data = gj_malloc(bp->bio_length, M_WAITOK);
1101	cbp->bio_cmd = BIO_READ;
1102	cbp->bio_done = g_journal_std_done;
1103	GJ_LOGREQ(4, cbp, "READ FIRST");
1104	g_io_request(cbp, sc->sc_jconsumer);
1105	g_journal_cache_misses++;
1106}
1107
1108static void
1109g_journal_copy_send(struct g_journal_softc *sc)
1110{
1111	struct bio *bioq, *bp, *lbp;
1112
1113	bioq = lbp = NULL;
1114	mtx_lock(&sc->sc_mtx);
1115	for (; sc->sc_copy_in_progress < g_journal_parallel_copies;) {
1116		bp = GJQ_FIRST(sc->sc_inactive.jj_queue);
1117		if (bp == NULL)
1118			break;
1119		GJQ_REMOVE(sc->sc_inactive.jj_queue, bp);
1120		sc->sc_copy_in_progress++;
1121		GJQ_INSERT_AFTER(bioq, bp, lbp);
1122		lbp = bp;
1123	}
1124	mtx_unlock(&sc->sc_mtx);
1125	if (g_journal_do_optimize)
1126		sc->sc_copy_in_progress += g_journal_optimize(bioq);
1127	while ((bp = GJQ_FIRST(bioq)) != NULL) {
1128		GJQ_REMOVE(bioq, bp);
1129		GJQ_INSERT_HEAD(sc->sc_copy_queue, bp);
1130		bp->bio_cflags = GJ_BIO_COPY;
1131		if (bp->bio_data == NULL)
1132			g_journal_read_first(sc, bp);
1133		else {
1134			bp->bio_joffset = 0;
1135			GJ_LOGREQ(4, bp, "SEND");
1136			g_io_request(bp, sc->sc_dconsumer);
1137		}
1138	}
1139}
1140
1141static void
1142g_journal_copy_start(struct g_journal_softc *sc)
1143{
1144
1145	/*
1146	 * Remember in metadata that we're starting to copy journaled data
1147	 * to the data provider.
1148	 * In case of power failure, we will copy these data once again on boot.
1149	 */
1150	if (!sc->sc_journal_copying) {
1151		sc->sc_journal_copying = 1;
1152		GJ_DEBUG(1, "Starting copy of journal.");
1153		g_journal_metadata_update(sc);
1154	}
1155	g_journal_copy_send(sc);
1156}
1157
1158/*
1159 * Data block has been read from the journal provider.
1160 */
1161static int
1162g_journal_copy_read_done(struct bio *bp)
1163{
1164	struct g_journal_softc *sc;
1165	struct g_consumer *cp;
1166	struct bio *pbp;
1167
1168	KASSERT(bp->bio_cflags == GJ_BIO_COPY,
1169	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
1170
1171	sc = bp->bio_from->geom->softc;
1172	pbp = bp->bio_parent;
1173
1174	if (bp->bio_error != 0) {
1175		GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
1176		    bp->bio_to->name, bp->bio_error);
1177		/*
1178		 * We will not be able to deliver WRITE request as well.
1179		 */
1180		gj_free(bp->bio_data, bp->bio_length);
1181		g_destroy_bio(pbp);
1182		g_destroy_bio(bp);
1183		sc->sc_copy_in_progress--;
1184		return (1);
1185	}
1186	pbp->bio_data = bp->bio_data;
1187	cp = sc->sc_dconsumer;
1188	g_io_request(pbp, cp);
1189	GJ_LOGREQ(4, bp, "READ DONE");
1190	g_destroy_bio(bp);
1191	return (0);
1192}
1193
1194/*
1195 * Data block has been written to the data provider.
1196 */
1197static void
1198g_journal_copy_write_done(struct bio *bp)
1199{
1200	struct g_journal_softc *sc;
1201
1202	KASSERT(bp->bio_cflags == GJ_BIO_COPY,
1203	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
1204
1205	sc = bp->bio_from->geom->softc;
1206	sc->sc_copy_in_progress--;
1207
1208	if (bp->bio_error != 0) {
1209		GJ_LOGREQ(0, bp, "[copy] Error while writing data (error=%d)",
1210		    bp->bio_error);
1211	}
1212	GJQ_REMOVE(sc->sc_copy_queue, bp);
1213	gj_free(bp->bio_data, bp->bio_length);
1214	GJ_LOGREQ(4, bp, "DONE");
1215	g_destroy_bio(bp);
1216
1217	if (sc->sc_copy_in_progress == 0) {
1218		/*
1219		 * This was the last write request for this journal.
1220		 */
1221		GJ_DEBUG(1, "Data has been copied.");
1222		sc->sc_journal_copying = 0;
1223	}
1224}
1225
1226static void g_journal_flush_done(struct bio *bp);
1227
1228/*
1229 * Flush one record onto active journal provider.
1230 */
1231static void
1232g_journal_flush(struct g_journal_softc *sc)
1233{
1234	struct g_journal_record_header hdr;
1235	struct g_journal_entry *ent;
1236	struct g_provider *pp;
1237	struct bio **bioq;
1238	struct bio *bp, *fbp, *pbp;
1239	off_t joffset;
1240	u_char *data, hash[16];
1241	MD5_CTX ctx;
1242	u_int i;
1243
1244	if (sc->sc_current_count == 0)
1245		return;
1246
1247	pp = sc->sc_jprovider;
1248	GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
1249	joffset = sc->sc_journal_offset;
1250
1251	GJ_DEBUG(2, "Storing %d journal entries on %s at %jd.",
1252	    sc->sc_current_count, pp->name, (intmax_t)joffset);
1253
1254	/*
1255	 * Store 'journal id', so we know to which journal this record belongs.
1256	 */
1257	hdr.jrh_journal_id = sc->sc_journal_id;
1258	/* Could be less than g_journal_record_entries if called due timeout. */
1259	hdr.jrh_nentries = MIN(sc->sc_current_count, g_journal_record_entries);
1260	strlcpy(hdr.jrh_magic, GJ_RECORD_HEADER_MAGIC, sizeof(hdr.jrh_magic));
1261
1262	bioq = &sc->sc_active.jj_queue;
1263	GJQ_LAST(sc->sc_flush_queue, pbp);
1264
1265	fbp = g_alloc_bio();
1266	fbp->bio_parent = NULL;
1267	fbp->bio_cflags = GJ_BIO_JOURNAL;
1268	fbp->bio_offset = -1;
1269	fbp->bio_joffset = joffset;
1270	fbp->bio_length = pp->sectorsize;
1271	fbp->bio_cmd = BIO_WRITE;
1272	fbp->bio_done = g_journal_std_done;
1273	GJQ_INSERT_AFTER(sc->sc_flush_queue, fbp, pbp);
1274	pbp = fbp;
1275	fbp->bio_to = pp;
1276	GJ_LOGREQ(4, fbp, "FLUSH_OUT");
1277	joffset += pp->sectorsize;
1278	sc->sc_flush_count++;
1279	if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1280		MD5Init(&ctx);
1281
1282	for (i = 0; i < hdr.jrh_nentries; i++) {
1283		bp = sc->sc_current_queue;
1284		KASSERT(bp != NULL, ("NULL bp"));
1285		bp->bio_to = pp;
1286		GJ_LOGREQ(4, bp, "FLUSHED");
1287		sc->sc_current_queue = bp->bio_next;
1288		bp->bio_next = NULL;
1289		sc->sc_current_count--;
1290
1291		/* Add to the header. */
1292		ent = &hdr.jrh_entries[i];
1293		ent->je_offset = bp->bio_offset;
1294		ent->je_joffset = joffset;
1295		ent->je_length = bp->bio_length;
1296
1297		data = bp->bio_data;
1298		if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1299			MD5Update(&ctx, data, ent->je_length);
1300		g_reset_bio(bp);
1301		bp->bio_cflags = GJ_BIO_JOURNAL;
1302		bp->bio_offset = ent->je_offset;
1303		bp->bio_joffset = ent->je_joffset;
1304		bp->bio_length = ent->je_length;
1305		bp->bio_data = data;
1306		bp->bio_cmd = BIO_WRITE;
1307		bp->bio_done = g_journal_std_done;
1308		GJQ_INSERT_AFTER(sc->sc_flush_queue, bp, pbp);
1309		pbp = bp;
1310		bp->bio_to = pp;
1311		GJ_LOGREQ(4, bp, "FLUSH_OUT");
1312		joffset += bp->bio_length;
1313		sc->sc_flush_count++;
1314
1315		/*
1316		 * Add request to the active sc_journal_queue queue.
1317		 * This is our cache. After journal switch we don't have to
1318		 * read the data from the inactive journal, because we keep
1319		 * it in memory.
1320		 */
1321		g_journal_insert(bioq, ent->je_offset,
1322		    ent->je_offset + ent->je_length, ent->je_joffset, data,
1323		    M_NOWAIT);
1324	}
1325
1326	/*
1327	 * After all requests, store valid header.
1328	 */
1329	data = gj_malloc(pp->sectorsize, M_WAITOK);
1330	if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1331		MD5Final(hash, &ctx);
1332		bcopy(hash, hdr.jrh_sum, sizeof(hdr.jrh_sum));
1333	}
1334	g_journal_record_header_encode(&hdr, data);
1335	fbp->bio_data = data;
1336
1337	sc->sc_journal_offset = joffset;
1338
1339	g_journal_check_overflow(sc);
1340}
1341
1342/*
1343 * Flush request finished.
1344 */
1345static void
1346g_journal_flush_done(struct bio *bp)
1347{
1348	struct g_journal_softc *sc;
1349	struct g_consumer *cp;
1350
1351	KASSERT((bp->bio_cflags & GJ_BIO_MASK) == GJ_BIO_JOURNAL,
1352	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_JOURNAL));
1353
1354	cp = bp->bio_from;
1355	sc = cp->geom->softc;
1356	sc->sc_flush_in_progress--;
1357
1358	if (bp->bio_error != 0) {
1359		GJ_LOGREQ(0, bp, "[flush] Error while writing data (error=%d)",
1360		    bp->bio_error);
1361	}
1362	gj_free(bp->bio_data, bp->bio_length);
1363	GJ_LOGREQ(4, bp, "DONE");
1364	g_destroy_bio(bp);
1365}
1366
1367static void g_journal_release_delayed(struct g_journal_softc *sc);
1368
1369static void
1370g_journal_flush_send(struct g_journal_softc *sc)
1371{
1372	struct g_consumer *cp;
1373	struct bio *bioq, *bp, *lbp;
1374
1375	cp = sc->sc_jconsumer;
1376	bioq = lbp = NULL;
1377	while (sc->sc_flush_in_progress < g_journal_parallel_flushes) {
1378		/* Send one flush requests to the active journal. */
1379		bp = GJQ_FIRST(sc->sc_flush_queue);
1380		if (bp != NULL) {
1381			GJQ_REMOVE(sc->sc_flush_queue, bp);
1382			sc->sc_flush_count--;
1383			bp->bio_offset = bp->bio_joffset;
1384			bp->bio_joffset = 0;
1385			sc->sc_flush_in_progress++;
1386			GJQ_INSERT_AFTER(bioq, bp, lbp);
1387			lbp = bp;
1388		}
1389		/* Try to release delayed requests. */
1390		g_journal_release_delayed(sc);
1391		/* If there are no requests to flush, leave. */
1392		if (GJQ_FIRST(sc->sc_flush_queue) == NULL)
1393			break;
1394	}
1395	if (g_journal_do_optimize)
1396		sc->sc_flush_in_progress += g_journal_optimize(bioq);
1397	while ((bp = GJQ_FIRST(bioq)) != NULL) {
1398		GJQ_REMOVE(bioq, bp);
1399		GJ_LOGREQ(3, bp, "Flush request send");
1400		g_io_request(bp, cp);
1401	}
1402}
1403
1404static void
1405g_journal_add_current(struct g_journal_softc *sc, struct bio *bp)
1406{
1407	int n;
1408
1409	GJ_LOGREQ(4, bp, "CURRENT %d", sc->sc_current_count);
1410	n = g_journal_insert_bio(&sc->sc_current_queue, bp, M_WAITOK);
1411	sc->sc_current_count += n;
1412	n = g_journal_optimize(sc->sc_current_queue);
1413	sc->sc_current_count += n;
1414	/*
1415	 * For requests which are added to the current queue we deliver
1416	 * response immediately.
1417	 */
1418	bp->bio_completed = bp->bio_length;
1419	g_io_deliver(bp, 0);
1420	if (sc->sc_current_count >= g_journal_record_entries) {
1421		/*
1422		 * Let's flush one record onto active journal provider.
1423		 */
1424		g_journal_flush(sc);
1425	}
1426}
1427
1428static void
1429g_journal_release_delayed(struct g_journal_softc *sc)
1430{
1431	struct bio *bp;
1432
1433	for (;;) {
1434		/* The flush queue is full, exit. */
1435		if (sc->sc_flush_count >= g_journal_accept_immediately)
1436			return;
1437		bp = bioq_takefirst(&sc->sc_delayed_queue);
1438		if (bp == NULL)
1439			return;
1440		sc->sc_delayed_count--;
1441		g_journal_add_current(sc, bp);
1442	}
1443}
1444
1445/*
1446 * Add I/O request to the current queue. If we have enough requests for one
1447 * journal record we flush them onto active journal provider.
1448 */
1449static void
1450g_journal_add_request(struct g_journal_softc *sc, struct bio *bp)
1451{
1452
1453	/*
1454	 * The flush queue is full, we need to delay the request.
1455	 */
1456	if (sc->sc_delayed_count > 0 ||
1457	    sc->sc_flush_count >= g_journal_accept_immediately) {
1458		GJ_LOGREQ(4, bp, "DELAYED");
1459		bioq_insert_tail(&sc->sc_delayed_queue, bp);
1460		sc->sc_delayed_count++;
1461		return;
1462	}
1463
1464	KASSERT(TAILQ_EMPTY(&sc->sc_delayed_queue.queue),
1465	    ("DELAYED queue not empty."));
1466	g_journal_add_current(sc, bp);
1467}
1468
1469static void g_journal_read_done(struct bio *bp);
1470
1471/*
1472 * Try to find requested data in cache.
1473 */
1474static struct bio *
1475g_journal_read_find(struct bio *head, int sorted, struct bio *pbp, off_t ostart,
1476    off_t oend)
1477{
1478	off_t cstart, cend;
1479	struct bio *bp;
1480
1481	GJQ_FOREACH(head, bp) {
1482		if (bp->bio_offset == -1)
1483			continue;
1484		cstart = MAX(ostart, bp->bio_offset);
1485		cend = MIN(oend, bp->bio_offset + bp->bio_length);
1486		if (cend <= ostart)
1487			continue;
1488		else if (cstart >= oend) {
1489			if (!sorted)
1490				continue;
1491			else {
1492				bp = NULL;
1493				break;
1494			}
1495		}
1496		if (bp->bio_data == NULL)
1497			break;
1498		GJ_DEBUG(3, "READ(%p): (%jd, %jd) (bp=%p)", head, cstart, cend,
1499		    bp);
1500		bcopy(bp->bio_data + cstart - bp->bio_offset,
1501		    pbp->bio_data + cstart - pbp->bio_offset, cend - cstart);
1502		pbp->bio_completed += cend - cstart;
1503		if (pbp->bio_completed == pbp->bio_length) {
1504			/*
1505			 * Cool, the whole request was in cache, deliver happy
1506			 * message.
1507			 */
1508			g_io_deliver(pbp, 0);
1509			return (pbp);
1510		}
1511		break;
1512	}
1513	return (bp);
1514}
1515
1516/*
1517 * This function is used for collecting data on read.
1518 * The complexity is because parts of the data can be stored in four different
1519 * places:
1520 * - in memory - the data not yet send to the active journal provider
1521 * - in the active journal
1522 * - in the inactive journal
1523 * - in the data provider
1524 */
1525static void
1526g_journal_read(struct g_journal_softc *sc, struct bio *pbp, off_t ostart,
1527    off_t oend)
1528{
1529	struct bio *bp, *nbp, *head;
1530	off_t cstart, cend;
1531	u_int i, sorted = 0;
1532
1533	GJ_DEBUG(3, "READ: (%jd, %jd)", ostart, oend);
1534
1535	cstart = cend = -1;
1536	bp = NULL;
1537	head = NULL;
1538	for (i = 1; i <= 5; i++) {
1539		switch (i) {
1540		case 1:	/* Not-yet-send data. */
1541			head = sc->sc_current_queue;
1542			sorted = 1;
1543			break;
1544		case 2: /* Skip flush queue as they are also in active queue */
1545			continue;
1546		case 3:	/* Active journal. */
1547			head = sc->sc_active.jj_queue;
1548			sorted = 1;
1549			break;
1550		case 4:	/* Inactive journal. */
1551			/*
1552			 * XXX: Here could be a race with g_journal_lowmem().
1553			 */
1554			head = sc->sc_inactive.jj_queue;
1555			sorted = 1;
1556			break;
1557		case 5:	/* In-flight to the data provider. */
1558			head = sc->sc_copy_queue;
1559			sorted = 0;
1560			break;
1561		default:
1562			panic("gjournal %s: i=%d", __func__, i);
1563		}
1564		bp = g_journal_read_find(head, sorted, pbp, ostart, oend);
1565		if (bp == pbp) { /* Got the whole request. */
1566			GJ_DEBUG(2, "Got the whole request from %u.", i);
1567			return;
1568		} else if (bp != NULL) {
1569			cstart = MAX(ostart, bp->bio_offset);
1570			cend = MIN(oend, bp->bio_offset + bp->bio_length);
1571			GJ_DEBUG(2, "Got part of the request from %u (%jd-%jd).",
1572			    i, (intmax_t)cstart, (intmax_t)cend);
1573			break;
1574		}
1575	}
1576	if (bp != NULL) {
1577		if (bp->bio_data == NULL) {
1578			nbp = g_duplicate_bio(pbp);
1579			nbp->bio_cflags = GJ_BIO_READ;
1580			nbp->bio_data =
1581			    pbp->bio_data + cstart - pbp->bio_offset;
1582			nbp->bio_offset =
1583			    bp->bio_joffset + cstart - bp->bio_offset;
1584			nbp->bio_length = cend - cstart;
1585			nbp->bio_done = g_journal_read_done;
1586			g_io_request(nbp, sc->sc_jconsumer);
1587		}
1588		/*
1589		 * If we don't have the whole request yet, call g_journal_read()
1590		 * recursively.
1591		 */
1592		if (ostart < cstart)
1593			g_journal_read(sc, pbp, ostart, cstart);
1594		if (oend > cend)
1595			g_journal_read(sc, pbp, cend, oend);
1596	} else {
1597		/*
1598		 * No data in memory, no data in journal.
1599		 * Its time for asking data provider.
1600		 */
1601		GJ_DEBUG(3, "READ(data): (%jd, %jd)", ostart, oend);
1602		nbp = g_duplicate_bio(pbp);
1603		nbp->bio_cflags = GJ_BIO_READ;
1604		nbp->bio_data = pbp->bio_data + ostart - pbp->bio_offset;
1605		nbp->bio_offset = ostart;
1606		nbp->bio_length = oend - ostart;
1607		nbp->bio_done = g_journal_read_done;
1608		g_io_request(nbp, sc->sc_dconsumer);
1609		/* We have the whole request, return here. */
1610		return;
1611	}
1612}
1613
1614/*
1615 * Function responsible for handling finished READ requests.
1616 * Actually, g_std_done() could be used here, the only difference is that we
1617 * log error.
1618 */
1619static void
1620g_journal_read_done(struct bio *bp)
1621{
1622	struct bio *pbp;
1623
1624	KASSERT(bp->bio_cflags == GJ_BIO_READ,
1625	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_READ));
1626
1627	pbp = bp->bio_parent;
1628	pbp->bio_inbed++;
1629	pbp->bio_completed += bp->bio_length;
1630
1631	if (bp->bio_error != 0) {
1632		if (pbp->bio_error == 0)
1633			pbp->bio_error = bp->bio_error;
1634		GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
1635		    bp->bio_to->name, bp->bio_error);
1636	}
1637	g_destroy_bio(bp);
1638	if (pbp->bio_children == pbp->bio_inbed &&
1639	    pbp->bio_completed == pbp->bio_length) {
1640		/* We're done. */
1641		g_io_deliver(pbp, 0);
1642	}
1643}
1644
1645/*
1646 * Deactive current journal and active next one.
1647 */
1648static void
1649g_journal_switch(struct g_journal_softc *sc)
1650{
1651	struct g_provider *pp;
1652
1653	if (JEMPTY(sc)) {
1654		GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
1655		pp = LIST_FIRST(&sc->sc_geom->provider);
1656		if (!(sc->sc_flags & GJF_DEVICE_CLEAN) && pp->acw == 0) {
1657			sc->sc_flags |= GJF_DEVICE_CLEAN;
1658			GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
1659			g_journal_metadata_update(sc);
1660		}
1661	} else {
1662		GJ_DEBUG(3, "Switching journal %s.", sc->sc_geom->name);
1663
1664		pp = sc->sc_jprovider;
1665
1666		sc->sc_journal_previous_id = sc->sc_journal_id;
1667
1668		sc->sc_journal_id = sc->sc_journal_next_id;
1669		sc->sc_journal_next_id = arc4random();
1670
1671		GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
1672
1673		g_journal_write_header(sc);
1674
1675		sc->sc_inactive.jj_offset = sc->sc_active.jj_offset;
1676		sc->sc_inactive.jj_queue = sc->sc_active.jj_queue;
1677
1678		sc->sc_active.jj_offset =
1679		    sc->sc_journal_offset - pp->sectorsize;
1680		sc->sc_active.jj_queue = NULL;
1681
1682		/*
1683		 * Switch is done, start copying data from the (now) inactive
1684		 * journal to the data provider.
1685		 */
1686		g_journal_copy_start(sc);
1687	}
1688	mtx_lock(&sc->sc_mtx);
1689	sc->sc_flags &= ~GJF_DEVICE_SWITCH;
1690	mtx_unlock(&sc->sc_mtx);
1691}
1692
1693static void
1694g_journal_initialize(struct g_journal_softc *sc)
1695{
1696
1697	sc->sc_journal_id = arc4random();
1698	sc->sc_journal_next_id = arc4random();
1699	sc->sc_journal_previous_id = sc->sc_journal_id;
1700	sc->sc_journal_offset = sc->sc_jstart;
1701	sc->sc_inactive.jj_offset = sc->sc_jstart;
1702	g_journal_write_header(sc);
1703	sc->sc_active.jj_offset = sc->sc_jstart;
1704}
1705
1706static void
1707g_journal_mark_as_dirty(struct g_journal_softc *sc)
1708{
1709	const struct g_journal_desc *desc;
1710	int i;
1711
1712	GJ_DEBUG(1, "Marking file system %s as dirty.", sc->sc_name);
1713	for (i = 0; (desc = g_journal_filesystems[i]) != NULL; i++)
1714		desc->jd_dirty(sc->sc_dconsumer);
1715}
1716
1717/*
1718 * Function read record header from the given journal.
1719 * It is very simlar to g_read_data(9), but it doesn't allocate memory for bio
1720 * and data on every call.
1721 */
1722static int
1723g_journal_sync_read(struct g_consumer *cp, struct bio *bp, off_t offset,
1724    void *data)
1725{
1726	int error;
1727
1728	g_reset_bio(bp);
1729	bp->bio_cmd = BIO_READ;
1730	bp->bio_done = NULL;
1731	bp->bio_offset = offset;
1732	bp->bio_length = cp->provider->sectorsize;
1733	bp->bio_data = data;
1734	g_io_request(bp, cp);
1735	error = biowait(bp, "gjs_read");
1736	return (error);
1737}
1738
1739#if 0
1740/*
1741 * Function is called when we start the journal device and we detect that
1742 * one of the journals was not fully copied.
1743 * The purpose of this function is to read all records headers from journal
1744 * and placed them in the inactive queue, so we can start journal
1745 * synchronization process and the journal provider itself.
1746 * Design decision was taken to not synchronize the whole journal here as it
1747 * can take too much time. Reading headers only and delaying synchronization
1748 * process until after journal provider is started should be the best choice.
1749 */
1750#endif
1751
1752static void
1753g_journal_sync(struct g_journal_softc *sc)
1754{
1755	struct g_journal_record_header rhdr;
1756	struct g_journal_entry *ent;
1757	struct g_journal_header jhdr;
1758	struct g_consumer *cp;
1759	struct bio *bp, *fbp, *tbp;
1760	off_t joffset, offset;
1761	u_char *buf, sum[16];
1762	uint64_t id;
1763	MD5_CTX ctx;
1764	int error, found, i;
1765
1766	found = 0;
1767	fbp = NULL;
1768	cp = sc->sc_jconsumer;
1769	bp = g_alloc_bio();
1770	buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
1771	offset = joffset = sc->sc_inactive.jj_offset = sc->sc_journal_offset;
1772
1773	GJ_DEBUG(2, "Looking for termination at %jd.", (intmax_t)joffset);
1774
1775	/*
1776	 * Read and decode first journal header.
1777	 */
1778	error = g_journal_sync_read(cp, bp, offset, buf);
1779	if (error != 0) {
1780		GJ_DEBUG(0, "Error while reading journal header from %s.",
1781		    cp->provider->name);
1782		goto end;
1783	}
1784	error = g_journal_header_decode(buf, &jhdr);
1785	if (error != 0) {
1786		GJ_DEBUG(0, "Cannot decode journal header from %s.",
1787		    cp->provider->name);
1788		goto end;
1789	}
1790	id = sc->sc_journal_id;
1791	if (jhdr.jh_journal_id != sc->sc_journal_id) {
1792		GJ_DEBUG(1, "Journal ID mismatch at %jd (0x%08x != 0x%08x).",
1793		    (intmax_t)offset, (u_int)jhdr.jh_journal_id, (u_int)id);
1794		goto end;
1795	}
1796	offset += cp->provider->sectorsize;
1797	id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
1798
1799	for (;;) {
1800		/*
1801		 * If the biggest record won't fit, look for a record header or
1802		 * journal header from the beginning.
1803		 */
1804		GJ_VALIDATE_OFFSET(offset, sc);
1805		error = g_journal_sync_read(cp, bp, offset, buf);
1806		if (error != 0) {
1807			/*
1808			 * Not good. Having an error while reading header
1809			 * means, that we cannot read next headers and in
1810			 * consequence we cannot find termination.
1811			 */
1812			GJ_DEBUG(0,
1813			    "Error while reading record header from %s.",
1814			    cp->provider->name);
1815			break;
1816		}
1817
1818		error = g_journal_record_header_decode(buf, &rhdr);
1819		if (error != 0) {
1820			GJ_DEBUG(2, "Not a record header at %jd (error=%d).",
1821			    (intmax_t)offset, error);
1822			/*
1823			 * This is not a record header.
1824			 * If we are lucky, this is next journal header.
1825			 */
1826			error = g_journal_header_decode(buf, &jhdr);
1827			if (error != 0) {
1828				GJ_DEBUG(1, "Not a journal header at %jd (error=%d).",
1829				    (intmax_t)offset, error);
1830				/*
1831				 * Nope, this is not journal header, which
1832				 * bascially means that journal is not
1833				 * terminated properly.
1834				 */
1835				error = ENOENT;
1836				break;
1837			}
1838			/*
1839			 * Ok. This is header of _some_ journal. Now we need to
1840			 * verify if this is header of the _next_ journal.
1841			 */
1842			if (jhdr.jh_journal_id != id) {
1843				GJ_DEBUG(1, "Journal ID mismatch at %jd "
1844				    "(0x%08x != 0x%08x).", (intmax_t)offset,
1845				    (u_int)jhdr.jh_journal_id, (u_int)id);
1846				error = ENOENT;
1847				break;
1848			}
1849
1850			/* Found termination. */
1851			found++;
1852			GJ_DEBUG(1, "Found termination at %jd (id=0x%08x).",
1853			    (intmax_t)offset, (u_int)id);
1854			sc->sc_active.jj_offset = offset;
1855			sc->sc_journal_offset =
1856			    offset + cp->provider->sectorsize;
1857			sc->sc_journal_id = id;
1858			id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
1859
1860			while ((tbp = fbp) != NULL) {
1861				fbp = tbp->bio_next;
1862				GJ_LOGREQ(3, tbp, "Adding request.");
1863				g_journal_insert_bio(&sc->sc_inactive.jj_queue,
1864				    tbp, M_WAITOK);
1865			}
1866
1867			/* Skip journal's header. */
1868			offset += cp->provider->sectorsize;
1869			continue;
1870		}
1871
1872		/* Skip record's header. */
1873		offset += cp->provider->sectorsize;
1874
1875		/*
1876		 * Add information about every record entry to the inactive
1877		 * queue.
1878		 */
1879		if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1880			MD5Init(&ctx);
1881		for (i = 0; i < rhdr.jrh_nentries; i++) {
1882			ent = &rhdr.jrh_entries[i];
1883			GJ_DEBUG(3, "Insert entry: %jd %jd.",
1884			    (intmax_t)ent->je_offset, (intmax_t)ent->je_length);
1885			g_journal_insert(&fbp, ent->je_offset,
1886			    ent->je_offset + ent->je_length, ent->je_joffset,
1887			    NULL, M_WAITOK);
1888			if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1889				u_char *buf2;
1890
1891				/*
1892				 * TODO: Should use faster function (like
1893				 *       g_journal_sync_read()).
1894				 */
1895				buf2 = g_read_data(cp, offset, ent->je_length,
1896				    NULL);
1897				if (buf2 == NULL)
1898					GJ_DEBUG(0, "Cannot read data at %jd.",
1899					    (intmax_t)offset);
1900				else {
1901					MD5Update(&ctx, buf2, ent->je_length);
1902					g_free(buf2);
1903				}
1904			}
1905			/* Skip entry's data. */
1906			offset += ent->je_length;
1907		}
1908		if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1909			MD5Final(sum, &ctx);
1910			if (bcmp(sum, rhdr.jrh_sum, sizeof(rhdr.jrh_sum)) != 0) {
1911				GJ_DEBUG(0, "MD5 hash mismatch at %jd!",
1912				    (intmax_t)offset);
1913			}
1914		}
1915	}
1916end:
1917	gj_free(bp->bio_data, cp->provider->sectorsize);
1918	g_destroy_bio(bp);
1919
1920	/* Remove bios from unterminated journal. */
1921	while ((tbp = fbp) != NULL) {
1922		fbp = tbp->bio_next;
1923		g_destroy_bio(tbp);
1924	}
1925
1926	if (found < 1 && joffset > 0) {
1927		GJ_DEBUG(0, "Journal on %s is broken/corrupted. Initializing.",
1928		    sc->sc_name);
1929		while ((tbp = sc->sc_inactive.jj_queue) != NULL) {
1930			sc->sc_inactive.jj_queue = tbp->bio_next;
1931			g_destroy_bio(tbp);
1932		}
1933		g_journal_initialize(sc);
1934		g_journal_mark_as_dirty(sc);
1935	} else {
1936		GJ_DEBUG(0, "Journal %s consistent.", sc->sc_name);
1937		g_journal_copy_start(sc);
1938	}
1939}
1940
1941/*
1942 * Wait for requests.
1943 * If we have requests in the current queue, flush them after 3 seconds from the
1944 * last flush. In this way we don't wait forever (or for journal switch) with
1945 * storing not full records on journal.
1946 */
1947static void
1948g_journal_wait(struct g_journal_softc *sc, time_t last_write)
1949{
1950	int error, timeout;
1951
1952	GJ_DEBUG(3, "%s: enter", __func__);
1953	if (sc->sc_current_count == 0) {
1954		if (g_journal_debug < 2)
1955			msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", 0);
1956		else {
1957			/*
1958			 * If we have debug turned on, show number of elements
1959			 * in various queues.
1960			 */
1961			for (;;) {
1962				error = msleep(sc, &sc->sc_mtx, PRIBIO,
1963				    "gj:work", hz * 3);
1964				if (error == 0) {
1965					mtx_unlock(&sc->sc_mtx);
1966					break;
1967				}
1968				GJ_DEBUG(3, "Report: current count=%d",
1969				    sc->sc_current_count);
1970				GJ_DEBUG(3, "Report: flush count=%d",
1971				    sc->sc_flush_count);
1972				GJ_DEBUG(3, "Report: flush in progress=%d",
1973				    sc->sc_flush_in_progress);
1974				GJ_DEBUG(3, "Report: copy in progress=%d",
1975				    sc->sc_copy_in_progress);
1976				GJ_DEBUG(3, "Report: delayed=%d",
1977				    sc->sc_delayed_count);
1978			}
1979		}
1980		GJ_DEBUG(3, "%s: exit 1", __func__);
1981		return;
1982	}
1983
1984	/*
1985	 * Flush even not full records every 3 seconds.
1986	 */
1987	timeout = (last_write + 3 - time_second) * hz;
1988	if (timeout <= 0) {
1989		mtx_unlock(&sc->sc_mtx);
1990		g_journal_flush(sc);
1991		g_journal_flush_send(sc);
1992		GJ_DEBUG(3, "%s: exit 2", __func__);
1993		return;
1994	}
1995	error = msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", timeout);
1996	if (error == EWOULDBLOCK)
1997		g_journal_flush_send(sc);
1998	GJ_DEBUG(3, "%s: exit 3", __func__);
1999}
2000
2001/*
2002 * Worker thread.
2003 */
2004static void
2005g_journal_worker(void *arg)
2006{
2007	struct g_journal_softc *sc;
2008	struct g_geom *gp;
2009	struct g_provider *pp;
2010	struct bio *bp;
2011	time_t last_write;
2012	int type;
2013
2014	thread_lock(curthread);
2015	sched_prio(curthread, PRIBIO);
2016	thread_unlock(curthread);
2017
2018	sc = arg;
2019	type = 0;	/* gcc */
2020
2021	if (sc->sc_flags & GJF_DEVICE_CLEAN) {
2022		GJ_DEBUG(0, "Journal %s clean.", sc->sc_name);
2023		g_journal_initialize(sc);
2024	} else {
2025		g_journal_sync(sc);
2026	}
2027	/*
2028	 * Check if we can use BIO_FLUSH.
2029	 */
2030	sc->sc_bio_flush = 0;
2031	if (g_io_flush(sc->sc_jconsumer) == 0) {
2032		sc->sc_bio_flush |= GJ_FLUSH_JOURNAL;
2033		GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
2034		    sc->sc_jconsumer->provider->name);
2035	} else {
2036		GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
2037		    sc->sc_jconsumer->provider->name);
2038	}
2039	if (sc->sc_jconsumer != sc->sc_dconsumer) {
2040		if (g_io_flush(sc->sc_dconsumer) == 0) {
2041			sc->sc_bio_flush |= GJ_FLUSH_DATA;
2042			GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
2043			    sc->sc_dconsumer->provider->name);
2044		} else {
2045			GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
2046			    sc->sc_dconsumer->provider->name);
2047		}
2048	}
2049
2050	gp = sc->sc_geom;
2051	g_topology_lock();
2052	pp = g_new_providerf(gp, "%s.journal", sc->sc_name);
2053	pp->mediasize = sc->sc_mediasize;
2054	/*
2055	 * There could be a problem when data provider and journal providers
2056	 * have different sectorsize, but such scenario is prevented on journal
2057	 * creation.
2058	 */
2059	pp->sectorsize = sc->sc_sectorsize;
2060	g_error_provider(pp, 0);
2061	g_topology_unlock();
2062	last_write = time_second;
2063
2064	if (sc->sc_rootmount != NULL) {
2065		GJ_DEBUG(1, "root_mount_rel %p", sc->sc_rootmount);
2066		root_mount_rel(sc->sc_rootmount);
2067		sc->sc_rootmount = NULL;
2068	}
2069
2070	for (;;) {
2071		/* Get first request from the queue. */
2072		mtx_lock(&sc->sc_mtx);
2073		bp = bioq_first(&sc->sc_back_queue);
2074		if (bp != NULL)
2075			type = (bp->bio_cflags & GJ_BIO_MASK);
2076		if (bp == NULL) {
2077			bp = bioq_first(&sc->sc_regular_queue);
2078			if (bp != NULL)
2079				type = GJ_BIO_REGULAR;
2080		}
2081		if (bp == NULL) {
2082try_switch:
2083			if ((sc->sc_flags & GJF_DEVICE_SWITCH) ||
2084			    (sc->sc_flags & GJF_DEVICE_DESTROY)) {
2085				if (sc->sc_current_count > 0) {
2086					mtx_unlock(&sc->sc_mtx);
2087					g_journal_flush(sc);
2088					g_journal_flush_send(sc);
2089					continue;
2090				}
2091				if (sc->sc_flush_in_progress > 0)
2092					goto sleep;
2093				if (sc->sc_copy_in_progress > 0)
2094					goto sleep;
2095			}
2096			if (sc->sc_flags & GJF_DEVICE_SWITCH) {
2097				mtx_unlock(&sc->sc_mtx);
2098				g_journal_switch(sc);
2099				wakeup(&sc->sc_journal_copying);
2100				continue;
2101			}
2102			if (sc->sc_flags & GJF_DEVICE_DESTROY) {
2103				GJ_DEBUG(1, "Shutting down worker "
2104				    "thread for %s.", gp->name);
2105				sc->sc_worker = NULL;
2106				wakeup(&sc->sc_worker);
2107				mtx_unlock(&sc->sc_mtx);
2108				kproc_exit(0);
2109			}
2110sleep:
2111			g_journal_wait(sc, last_write);
2112			continue;
2113		}
2114		/*
2115		 * If we're in switch process, we need to delay all new
2116		 * write requests until its done.
2117		 */
2118		if ((sc->sc_flags & GJF_DEVICE_SWITCH) &&
2119		    type == GJ_BIO_REGULAR && bp->bio_cmd == BIO_WRITE) {
2120			GJ_LOGREQ(2, bp, "WRITE on SWITCH");
2121			goto try_switch;
2122		}
2123		if (type == GJ_BIO_REGULAR)
2124			bioq_remove(&sc->sc_regular_queue, bp);
2125		else
2126			bioq_remove(&sc->sc_back_queue, bp);
2127		mtx_unlock(&sc->sc_mtx);
2128		switch (type) {
2129		case GJ_BIO_REGULAR:
2130			/* Regular request. */
2131			switch (bp->bio_cmd) {
2132			case BIO_READ:
2133				g_journal_read(sc, bp, bp->bio_offset,
2134				    bp->bio_offset + bp->bio_length);
2135				break;
2136			case BIO_WRITE:
2137				last_write = time_second;
2138				g_journal_add_request(sc, bp);
2139				g_journal_flush_send(sc);
2140				break;
2141			default:
2142				panic("Invalid bio_cmd (%d).", bp->bio_cmd);
2143			}
2144			break;
2145		case GJ_BIO_COPY:
2146			switch (bp->bio_cmd) {
2147			case BIO_READ:
2148				if (g_journal_copy_read_done(bp))
2149					g_journal_copy_send(sc);
2150				break;
2151			case BIO_WRITE:
2152				g_journal_copy_write_done(bp);
2153				g_journal_copy_send(sc);
2154				break;
2155			default:
2156				panic("Invalid bio_cmd (%d).", bp->bio_cmd);
2157			}
2158			break;
2159		case GJ_BIO_JOURNAL:
2160			g_journal_flush_done(bp);
2161			g_journal_flush_send(sc);
2162			break;
2163		case GJ_BIO_READ:
2164		default:
2165			panic("Invalid bio (%d).", type);
2166		}
2167	}
2168}
2169
2170static void
2171g_journal_destroy_event(void *arg, int flags __unused)
2172{
2173	struct g_journal_softc *sc;
2174
2175	g_topology_assert();
2176	sc = arg;
2177	g_journal_destroy(sc);
2178}
2179
2180static void
2181g_journal_timeout(void *arg)
2182{
2183	struct g_journal_softc *sc;
2184
2185	sc = arg;
2186	GJ_DEBUG(0, "Timeout. Journal %s cannot be completed.",
2187	    sc->sc_geom->name);
2188	g_post_event(g_journal_destroy_event, sc, M_NOWAIT, NULL);
2189}
2190
2191static struct g_geom *
2192g_journal_create(struct g_class *mp, struct g_provider *pp,
2193    const struct g_journal_metadata *md)
2194{
2195	struct g_journal_softc *sc;
2196	struct g_geom *gp;
2197	struct g_consumer *cp;
2198	int error;
2199
2200	sc = NULL;	/* gcc */
2201
2202	g_topology_assert();
2203	/*
2204	 * There are two possibilities:
2205	 * 1. Data and both journals are on the same provider.
2206	 * 2. Data and journals are all on separated providers.
2207	 */
2208	/* Look for journal device with the same ID. */
2209	LIST_FOREACH(gp, &mp->geom, geom) {
2210		sc = gp->softc;
2211		if (sc == NULL)
2212			continue;
2213		if (sc->sc_id == md->md_id)
2214			break;
2215	}
2216	if (gp == NULL)
2217		sc = NULL;
2218	else if (sc != NULL && (sc->sc_type & md->md_type) != 0) {
2219		GJ_DEBUG(1, "Journal device %u already configured.", sc->sc_id);
2220		return (NULL);
2221	}
2222	if (md->md_type == 0 || (md->md_type & ~GJ_TYPE_COMPLETE) != 0) {
2223		GJ_DEBUG(0, "Invalid type on %s.", pp->name);
2224		return (NULL);
2225	}
2226	if (md->md_type & GJ_TYPE_DATA) {
2227		GJ_DEBUG(0, "Journal %u: %s contains data.", md->md_id,
2228		    pp->name);
2229	}
2230	if (md->md_type & GJ_TYPE_JOURNAL) {
2231		GJ_DEBUG(0, "Journal %u: %s contains journal.", md->md_id,
2232		    pp->name);
2233	}
2234
2235	if (sc == NULL) {
2236		/* Action geom. */
2237		sc = malloc(sizeof(*sc), M_JOURNAL, M_WAITOK | M_ZERO);
2238		sc->sc_id = md->md_id;
2239		sc->sc_type = 0;
2240		sc->sc_flags = 0;
2241		sc->sc_worker = NULL;
2242
2243		gp = g_new_geomf(mp, "gjournal %u", sc->sc_id);
2244		gp->start = g_journal_start;
2245		gp->orphan = g_journal_orphan;
2246		gp->access = g_journal_access;
2247		gp->softc = sc;
2248		gp->flags |= G_GEOM_VOLATILE_BIO;
2249		sc->sc_geom = gp;
2250
2251		mtx_init(&sc->sc_mtx, "gjournal", NULL, MTX_DEF);
2252
2253		bioq_init(&sc->sc_back_queue);
2254		bioq_init(&sc->sc_regular_queue);
2255		bioq_init(&sc->sc_delayed_queue);
2256		sc->sc_delayed_count = 0;
2257		sc->sc_current_queue = NULL;
2258		sc->sc_current_count = 0;
2259		sc->sc_flush_queue = NULL;
2260		sc->sc_flush_count = 0;
2261		sc->sc_flush_in_progress = 0;
2262		sc->sc_copy_queue = NULL;
2263		sc->sc_copy_in_progress = 0;
2264		sc->sc_inactive.jj_queue = NULL;
2265		sc->sc_active.jj_queue = NULL;
2266
2267		sc->sc_rootmount = root_mount_hold("GJOURNAL");
2268		GJ_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
2269
2270		callout_init(&sc->sc_callout, 1);
2271		if (md->md_type != GJ_TYPE_COMPLETE) {
2272			/*
2273			 * Journal and data are on separate providers.
2274			 * At this point we have only one of them.
2275			 * We setup a timeout in case the other part will not
2276			 * appear, so we won't wait forever.
2277			 */
2278			callout_reset(&sc->sc_callout, 5 * hz,
2279			    g_journal_timeout, sc);
2280		}
2281	}
2282
2283	/* Remember type of the data provider. */
2284	if (md->md_type & GJ_TYPE_DATA)
2285		sc->sc_orig_type = md->md_type;
2286	sc->sc_type |= md->md_type;
2287	cp = NULL;
2288
2289	if (md->md_type & GJ_TYPE_DATA) {
2290		if (md->md_flags & GJ_FLAG_CLEAN)
2291			sc->sc_flags |= GJF_DEVICE_CLEAN;
2292		if (md->md_flags & GJ_FLAG_CHECKSUM)
2293			sc->sc_flags |= GJF_DEVICE_CHECKSUM;
2294		cp = g_new_consumer(gp);
2295		error = g_attach(cp, pp);
2296		KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
2297		    pp->name, error));
2298		error = g_access(cp, 1, 1, 1);
2299		if (error != 0) {
2300			GJ_DEBUG(0, "Cannot access %s (error=%d).", pp->name,
2301			    error);
2302			g_journal_destroy(sc);
2303			return (NULL);
2304		}
2305		sc->sc_dconsumer = cp;
2306		sc->sc_mediasize = pp->mediasize - pp->sectorsize;
2307		sc->sc_sectorsize = pp->sectorsize;
2308		sc->sc_jstart = md->md_jstart;
2309		sc->sc_jend = md->md_jend;
2310		if (md->md_provider[0] != '\0')
2311			sc->sc_flags |= GJF_DEVICE_HARDCODED;
2312		sc->sc_journal_offset = md->md_joffset;
2313		sc->sc_journal_id = md->md_jid;
2314		sc->sc_journal_previous_id = md->md_jid;
2315	}
2316	if (md->md_type & GJ_TYPE_JOURNAL) {
2317		if (cp == NULL) {
2318			cp = g_new_consumer(gp);
2319			error = g_attach(cp, pp);
2320			KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
2321			    pp->name, error));
2322			error = g_access(cp, 1, 1, 1);
2323			if (error != 0) {
2324				GJ_DEBUG(0, "Cannot access %s (error=%d).",
2325				    pp->name, error);
2326				g_journal_destroy(sc);
2327				return (NULL);
2328			}
2329		} else {
2330			/*
2331			 * Journal is on the same provider as data, which means
2332			 * that data provider ends where journal starts.
2333			 */
2334			sc->sc_mediasize = md->md_jstart;
2335		}
2336		sc->sc_jconsumer = cp;
2337	}
2338
2339	/* Start switcher kproc if needed. */
2340	if (g_journal_switcher_proc == NULL)
2341		g_journal_start_switcher(mp);
2342
2343	if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE) {
2344		/* Journal is not complete yet. */
2345		return (gp);
2346	} else {
2347		/* Journal complete, cancel timeout. */
2348		callout_drain(&sc->sc_callout);
2349	}
2350
2351	error = kproc_create(g_journal_worker, sc, &sc->sc_worker, 0, 0,
2352	    "g_journal %s", sc->sc_name);
2353	if (error != 0) {
2354		GJ_DEBUG(0, "Cannot create worker thread for %s.journal.",
2355		    sc->sc_name);
2356		g_journal_destroy(sc);
2357		return (NULL);
2358	}
2359
2360	return (gp);
2361}
2362
2363static void
2364g_journal_destroy_consumer(void *arg, int flags __unused)
2365{
2366	struct g_consumer *cp;
2367
2368	g_topology_assert();
2369	cp = arg;
2370	g_detach(cp);
2371	g_destroy_consumer(cp);
2372}
2373
2374static int
2375g_journal_destroy(struct g_journal_softc *sc)
2376{
2377	struct g_geom *gp;
2378	struct g_provider *pp;
2379	struct g_consumer *cp;
2380
2381	g_topology_assert();
2382
2383	if (sc == NULL)
2384		return (ENXIO);
2385
2386	gp = sc->sc_geom;
2387	pp = LIST_FIRST(&gp->provider);
2388	if (pp != NULL) {
2389		if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) {
2390			GJ_DEBUG(1, "Device %s is still open (r%dw%de%d).",
2391			    pp->name, pp->acr, pp->acw, pp->ace);
2392			return (EBUSY);
2393		}
2394		g_error_provider(pp, ENXIO);
2395
2396		g_journal_flush(sc);
2397		g_journal_flush_send(sc);
2398		g_journal_switch(sc);
2399	}
2400
2401	sc->sc_flags |= (GJF_DEVICE_DESTROY | GJF_DEVICE_CLEAN);
2402
2403	g_topology_unlock();
2404
2405	if (sc->sc_rootmount != NULL) {
2406		GJ_DEBUG(1, "root_mount_rel %p", sc->sc_rootmount);
2407		root_mount_rel(sc->sc_rootmount);
2408		sc->sc_rootmount = NULL;
2409	}
2410
2411	callout_drain(&sc->sc_callout);
2412	mtx_lock(&sc->sc_mtx);
2413	wakeup(sc);
2414	while (sc->sc_worker != NULL)
2415		msleep(&sc->sc_worker, &sc->sc_mtx, PRIBIO, "gj:destroy", 0);
2416	mtx_unlock(&sc->sc_mtx);
2417
2418	if (pp != NULL) {
2419		GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
2420		g_journal_metadata_update(sc);
2421		g_topology_lock();
2422		g_wither_provider(pp, ENXIO);
2423	} else {
2424		g_topology_lock();
2425	}
2426	mtx_destroy(&sc->sc_mtx);
2427
2428	if (sc->sc_current_count != 0) {
2429		GJ_DEBUG(0, "Warning! Number of current requests %d.",
2430		    sc->sc_current_count);
2431	}
2432
2433	gp->softc = NULL;
2434	LIST_FOREACH(cp, &gp->consumer, consumer) {
2435		if (cp->acr + cp->acw + cp->ace > 0)
2436			g_access(cp, -1, -1, -1);
2437		/*
2438		 * We keep all consumers open for writting, so if I'll detach
2439		 * and destroy consumer here, I'll get providers for taste, so
2440		 * journal will be started again.
2441		 * Sending an event here, prevents this from happening.
2442		 */
2443		g_post_event(g_journal_destroy_consumer, cp, M_WAITOK, NULL);
2444	}
2445	g_wither_geom(gp, ENXIO);
2446	free(sc, M_JOURNAL);
2447	return (0);
2448}
2449
2450static void
2451g_journal_taste_orphan(struct g_consumer *cp)
2452{
2453
2454	KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
2455	    cp->provider->name));
2456}
2457
2458static struct g_geom *
2459g_journal_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
2460{
2461	struct g_journal_metadata md;
2462	struct g_consumer *cp;
2463	struct g_geom *gp;
2464	int error;
2465
2466	g_topology_assert();
2467	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2468	GJ_DEBUG(2, "Tasting %s.", pp->name);
2469	if (pp->geom->class == mp)
2470		return (NULL);
2471
2472	gp = g_new_geomf(mp, "journal:taste");
2473	/* This orphan function should be never called. */
2474	gp->orphan = g_journal_taste_orphan;
2475	cp = g_new_consumer(gp);
2476	g_attach(cp, pp);
2477	error = g_journal_metadata_read(cp, &md);
2478	g_detach(cp);
2479	g_destroy_consumer(cp);
2480	g_destroy_geom(gp);
2481	if (error != 0)
2482		return (NULL);
2483	gp = NULL;
2484
2485	if (md.md_provider[0] != '\0' &&
2486	    !g_compare_names(md.md_provider, pp->name))
2487		return (NULL);
2488	if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
2489		return (NULL);
2490	if (g_journal_debug >= 2)
2491		journal_metadata_dump(&md);
2492
2493	gp = g_journal_create(mp, pp, &md);
2494	return (gp);
2495}
2496
2497static struct g_journal_softc *
2498g_journal_find_device(struct g_class *mp, const char *name)
2499{
2500	struct g_journal_softc *sc;
2501	struct g_geom *gp;
2502	struct g_provider *pp;
2503
2504	if (strncmp(name, "/dev/", 5) == 0)
2505		name += 5;
2506	LIST_FOREACH(gp, &mp->geom, geom) {
2507		sc = gp->softc;
2508		if (sc == NULL)
2509			continue;
2510		if (sc->sc_flags & GJF_DEVICE_DESTROY)
2511			continue;
2512		if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
2513			continue;
2514		pp = LIST_FIRST(&gp->provider);
2515		if (strcmp(sc->sc_name, name) == 0)
2516			return (sc);
2517		if (pp != NULL && strcmp(pp->name, name) == 0)
2518			return (sc);
2519	}
2520	return (NULL);
2521}
2522
2523static void
2524g_journal_ctl_destroy(struct gctl_req *req, struct g_class *mp)
2525{
2526	struct g_journal_softc *sc;
2527	const char *name;
2528	char param[16];
2529	int *nargs;
2530	int error, i;
2531
2532	g_topology_assert();
2533
2534	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
2535	if (nargs == NULL) {
2536		gctl_error(req, "No '%s' argument.", "nargs");
2537		return;
2538	}
2539	if (*nargs <= 0) {
2540		gctl_error(req, "Missing device(s).");
2541		return;
2542	}
2543
2544	for (i = 0; i < *nargs; i++) {
2545		snprintf(param, sizeof(param), "arg%d", i);
2546		name = gctl_get_asciiparam(req, param);
2547		if (name == NULL) {
2548			gctl_error(req, "No 'arg%d' argument.", i);
2549			return;
2550		}
2551		sc = g_journal_find_device(mp, name);
2552		if (sc == NULL) {
2553			gctl_error(req, "No such device: %s.", name);
2554			return;
2555		}
2556		error = g_journal_destroy(sc);
2557		if (error != 0) {
2558			gctl_error(req, "Cannot destroy device %s (error=%d).",
2559			    LIST_FIRST(&sc->sc_geom->provider)->name, error);
2560			return;
2561		}
2562	}
2563}
2564
2565static void
2566g_journal_ctl_sync(struct gctl_req *req __unused, struct g_class *mp __unused)
2567{
2568
2569	g_topology_assert();
2570	g_topology_unlock();
2571	g_journal_sync_requested++;
2572	wakeup(&g_journal_switcher_state);
2573	while (g_journal_sync_requested > 0)
2574		tsleep(&g_journal_sync_requested, PRIBIO, "j:sreq", hz / 2);
2575	g_topology_lock();
2576}
2577
2578static void
2579g_journal_config(struct gctl_req *req, struct g_class *mp, const char *verb)
2580{
2581	uint32_t *version;
2582
2583	g_topology_assert();
2584
2585	version = gctl_get_paraml(req, "version", sizeof(*version));
2586	if (version == NULL) {
2587		gctl_error(req, "No '%s' argument.", "version");
2588		return;
2589	}
2590	if (*version != G_JOURNAL_VERSION) {
2591		gctl_error(req, "Userland and kernel parts are out of sync.");
2592		return;
2593	}
2594
2595	if (strcmp(verb, "destroy") == 0 || strcmp(verb, "stop") == 0) {
2596		g_journal_ctl_destroy(req, mp);
2597		return;
2598	} else if (strcmp(verb, "sync") == 0) {
2599		g_journal_ctl_sync(req, mp);
2600		return;
2601	}
2602
2603	gctl_error(req, "Unknown verb.");
2604}
2605
2606static void
2607g_journal_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2608    struct g_consumer *cp, struct g_provider *pp)
2609{
2610	struct g_journal_softc *sc;
2611
2612	g_topology_assert();
2613
2614	sc = gp->softc;
2615	if (sc == NULL)
2616		return;
2617	if (pp != NULL) {
2618		/* Nothing here. */
2619	} else if (cp != NULL) {
2620		int first = 1;
2621
2622		sbuf_printf(sb, "%s<Role>", indent);
2623		if (cp == sc->sc_dconsumer) {
2624			sbuf_printf(sb, "Data");
2625			first = 0;
2626		}
2627		if (cp == sc->sc_jconsumer) {
2628			if (!first)
2629				sbuf_printf(sb, ",");
2630			sbuf_printf(sb, "Journal");
2631		}
2632		sbuf_printf(sb, "</Role>\n");
2633		if (cp == sc->sc_jconsumer) {
2634			sbuf_printf(sb, "<Jstart>%jd</Jstart>\n",
2635			    (intmax_t)sc->sc_jstart);
2636			sbuf_printf(sb, "<Jend>%jd</Jend>\n",
2637			    (intmax_t)sc->sc_jend);
2638		}
2639	} else {
2640		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
2641	}
2642}
2643
2644static eventhandler_tag g_journal_event_shutdown = NULL;
2645static eventhandler_tag g_journal_event_lowmem = NULL;
2646
2647static void
2648g_journal_shutdown(void *arg, int howto __unused)
2649{
2650	struct g_class *mp;
2651	struct g_geom *gp, *gp2;
2652
2653	if (panicstr != NULL)
2654		return;
2655	mp = arg;
2656	g_topology_lock();
2657	LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
2658		if (gp->softc == NULL)
2659			continue;
2660		GJ_DEBUG(0, "Shutting down geom %s.", gp->name);
2661		g_journal_destroy(gp->softc);
2662	}
2663	g_topology_unlock();
2664}
2665
2666/*
2667 * Free cached requests from inactive queue in case of low memory.
2668 * We free GJ_FREE_AT_ONCE elements at once.
2669 */
2670#define	GJ_FREE_AT_ONCE	4
2671static void
2672g_journal_lowmem(void *arg, int howto __unused)
2673{
2674	struct g_journal_softc *sc;
2675	struct g_class *mp;
2676	struct g_geom *gp;
2677	struct bio *bp;
2678	u_int nfree = GJ_FREE_AT_ONCE;
2679
2680	g_journal_stats_low_mem++;
2681	mp = arg;
2682	g_topology_lock();
2683	LIST_FOREACH(gp, &mp->geom, geom) {
2684		sc = gp->softc;
2685		if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY))
2686			continue;
2687		mtx_lock(&sc->sc_mtx);
2688		for (bp = sc->sc_inactive.jj_queue; nfree > 0 && bp != NULL;
2689		    nfree--, bp = bp->bio_next) {
2690			/*
2691			 * This is safe to free the bio_data, because:
2692			 * 1. If bio_data is NULL it will be read from the
2693			 *    inactive journal.
2694			 * 2. If bp is sent down, it is first removed from the
2695			 *    inactive queue, so it's impossible to free the
2696			 *    data from under in-flight bio.
2697			 * On the other hand, freeing elements from the active
2698			 * queue, is not safe.
2699			 */
2700			if (bp->bio_data != NULL) {
2701				GJ_DEBUG(2, "Freeing data from %s.",
2702				    sc->sc_name);
2703				gj_free(bp->bio_data, bp->bio_length);
2704				bp->bio_data = NULL;
2705			}
2706		}
2707		mtx_unlock(&sc->sc_mtx);
2708		if (nfree == 0)
2709			break;
2710	}
2711	g_topology_unlock();
2712}
2713
2714static void g_journal_switcher(void *arg);
2715
2716static void
2717g_journal_init(struct g_class *mp)
2718{
2719
2720	/* Pick a conservative value if provided value sucks. */
2721	if (g_journal_cache_divisor <= 0 ||
2722	    (vm_kmem_size / g_journal_cache_divisor == 0)) {
2723		g_journal_cache_divisor = 5;
2724	}
2725	if (g_journal_cache_limit > 0) {
2726		g_journal_cache_limit = vm_kmem_size / g_journal_cache_divisor;
2727		g_journal_cache_low =
2728		    (g_journal_cache_limit / 100) * g_journal_cache_switch;
2729	}
2730	g_journal_event_shutdown = EVENTHANDLER_REGISTER(shutdown_post_sync,
2731	    g_journal_shutdown, mp, EVENTHANDLER_PRI_FIRST);
2732	if (g_journal_event_shutdown == NULL)
2733		GJ_DEBUG(0, "Warning! Cannot register shutdown event.");
2734	g_journal_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
2735	    g_journal_lowmem, mp, EVENTHANDLER_PRI_FIRST);
2736	if (g_journal_event_lowmem == NULL)
2737		GJ_DEBUG(0, "Warning! Cannot register lowmem event.");
2738}
2739
2740static void
2741g_journal_fini(struct g_class *mp)
2742{
2743
2744	if (g_journal_event_shutdown != NULL) {
2745		EVENTHANDLER_DEREGISTER(shutdown_post_sync,
2746		    g_journal_event_shutdown);
2747	}
2748	if (g_journal_event_lowmem != NULL)
2749		EVENTHANDLER_DEREGISTER(vm_lowmem, g_journal_event_lowmem);
2750	g_journal_stop_switcher();
2751}
2752
2753DECLARE_GEOM_CLASS(g_journal_class, g_journal);
2754
2755static const struct g_journal_desc *
2756g_journal_find_desc(const char *fstype)
2757{
2758	const struct g_journal_desc *desc;
2759	int i;
2760
2761	for (desc = g_journal_filesystems[i = 0]; desc != NULL;
2762	     desc = g_journal_filesystems[++i]) {
2763		if (strcmp(desc->jd_fstype, fstype) == 0)
2764			break;
2765	}
2766	return (desc);
2767}
2768
2769static void
2770g_journal_switch_wait(struct g_journal_softc *sc)
2771{
2772	struct bintime bt;
2773
2774	mtx_assert(&sc->sc_mtx, MA_OWNED);
2775	if (g_journal_debug >= 2) {
2776		if (sc->sc_flush_in_progress > 0) {
2777			GJ_DEBUG(2, "%d requests flushing.",
2778			    sc->sc_flush_in_progress);
2779		}
2780		if (sc->sc_copy_in_progress > 0) {
2781			GJ_DEBUG(2, "%d requests copying.",
2782			    sc->sc_copy_in_progress);
2783		}
2784		if (sc->sc_flush_count > 0) {
2785			GJ_DEBUG(2, "%d requests to flush.",
2786			    sc->sc_flush_count);
2787		}
2788		if (sc->sc_delayed_count > 0) {
2789			GJ_DEBUG(2, "%d requests delayed.",
2790			    sc->sc_delayed_count);
2791		}
2792	}
2793	g_journal_stats_switches++;
2794	if (sc->sc_copy_in_progress > 0)
2795		g_journal_stats_wait_for_copy++;
2796	GJ_TIMER_START(1, &bt);
2797	sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
2798	sc->sc_flags |= GJF_DEVICE_SWITCH;
2799	wakeup(sc);
2800	while (sc->sc_flags & GJF_DEVICE_SWITCH) {
2801		msleep(&sc->sc_journal_copying, &sc->sc_mtx, PRIBIO,
2802		    "gj:switch", 0);
2803	}
2804	GJ_TIMER_STOP(1, &bt, "Switch time of %s", sc->sc_name);
2805}
2806
2807static void
2808g_journal_do_switch(struct g_class *classp)
2809{
2810	struct g_journal_softc *sc;
2811	const struct g_journal_desc *desc;
2812	struct g_geom *gp;
2813	struct mount *mp;
2814	struct bintime bt;
2815	char *mountpoint;
2816	int error, save;
2817
2818	g_topology_lock();
2819	LIST_FOREACH(gp, &classp->geom, geom) {
2820		sc = gp->softc;
2821		if (sc == NULL)
2822			continue;
2823		if (sc->sc_flags & GJF_DEVICE_DESTROY)
2824			continue;
2825		if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
2826			continue;
2827		mtx_lock(&sc->sc_mtx);
2828		sc->sc_flags |= GJF_DEVICE_BEFORE_SWITCH;
2829		mtx_unlock(&sc->sc_mtx);
2830	}
2831	g_topology_unlock();
2832
2833	mtx_lock(&mountlist_mtx);
2834	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2835		if (mp->mnt_gjprovider == NULL)
2836			continue;
2837		if (mp->mnt_flag & MNT_RDONLY)
2838			continue;
2839		desc = g_journal_find_desc(mp->mnt_stat.f_fstypename);
2840		if (desc == NULL)
2841			continue;
2842		if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK))
2843			continue;
2844		/* mtx_unlock(&mountlist_mtx) was done inside vfs_busy() */
2845
2846		g_topology_lock();
2847		sc = g_journal_find_device(classp, mp->mnt_gjprovider);
2848		g_topology_unlock();
2849
2850		if (sc == NULL) {
2851			GJ_DEBUG(0, "Cannot find journal geom for %s.",
2852			    mp->mnt_gjprovider);
2853			goto next;
2854		} else if (JEMPTY(sc)) {
2855			mtx_lock(&sc->sc_mtx);
2856			sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
2857			mtx_unlock(&sc->sc_mtx);
2858			GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
2859			goto next;
2860		}
2861
2862		mountpoint = mp->mnt_stat.f_mntonname;
2863
2864		error = vn_start_write(NULL, &mp, V_WAIT);
2865		if (error != 0) {
2866			GJ_DEBUG(0, "vn_start_write(%s) failed (error=%d).",
2867			    mountpoint, error);
2868			goto next;
2869		}
2870
2871		save = curthread_pflags_set(TDP_SYNCIO);
2872
2873		GJ_TIMER_START(1, &bt);
2874		vfs_msync(mp, MNT_NOWAIT);
2875		GJ_TIMER_STOP(1, &bt, "Msync time of %s", mountpoint);
2876
2877		GJ_TIMER_START(1, &bt);
2878		error = VFS_SYNC(mp, MNT_NOWAIT);
2879		if (error == 0)
2880			GJ_TIMER_STOP(1, &bt, "Sync time of %s", mountpoint);
2881		else {
2882			GJ_DEBUG(0, "Cannot sync file system %s (error=%d).",
2883			    mountpoint, error);
2884		}
2885
2886		curthread_pflags_restore(save);
2887
2888		vn_finished_write(mp);
2889
2890		if (error != 0)
2891			goto next;
2892
2893		/*
2894		 * Send BIO_FLUSH before freezing the file system, so it can be
2895		 * faster after the freeze.
2896		 */
2897		GJ_TIMER_START(1, &bt);
2898		g_journal_flush_cache(sc);
2899		GJ_TIMER_STOP(1, &bt, "BIO_FLUSH time of %s", sc->sc_name);
2900
2901		GJ_TIMER_START(1, &bt);
2902		error = vfs_write_suspend(mp, VS_SKIP_UNMOUNT);
2903		GJ_TIMER_STOP(1, &bt, "Suspend time of %s", mountpoint);
2904		if (error != 0) {
2905			GJ_DEBUG(0, "Cannot suspend file system %s (error=%d).",
2906			    mountpoint, error);
2907			goto next;
2908		}
2909
2910		error = desc->jd_clean(mp);
2911		if (error != 0)
2912			goto next;
2913
2914		mtx_lock(&sc->sc_mtx);
2915		g_journal_switch_wait(sc);
2916		mtx_unlock(&sc->sc_mtx);
2917
2918		vfs_write_resume(mp, 0);
2919next:
2920		mtx_lock(&mountlist_mtx);
2921		vfs_unbusy(mp);
2922	}
2923	mtx_unlock(&mountlist_mtx);
2924
2925	sc = NULL;
2926	for (;;) {
2927		g_topology_lock();
2928		LIST_FOREACH(gp, &g_journal_class.geom, geom) {
2929			sc = gp->softc;
2930			if (sc == NULL)
2931				continue;
2932			mtx_lock(&sc->sc_mtx);
2933			if ((sc->sc_type & GJ_TYPE_COMPLETE) == GJ_TYPE_COMPLETE &&
2934			    !(sc->sc_flags & GJF_DEVICE_DESTROY) &&
2935			    (sc->sc_flags & GJF_DEVICE_BEFORE_SWITCH)) {
2936				break;
2937			}
2938			mtx_unlock(&sc->sc_mtx);
2939			sc = NULL;
2940		}
2941		g_topology_unlock();
2942		if (sc == NULL)
2943			break;
2944		mtx_assert(&sc->sc_mtx, MA_OWNED);
2945		g_journal_switch_wait(sc);
2946		mtx_unlock(&sc->sc_mtx);
2947	}
2948}
2949
2950static void
2951g_journal_start_switcher(struct g_class *mp)
2952{
2953	int error;
2954
2955	g_topology_assert();
2956	MPASS(g_journal_switcher_proc == NULL);
2957	g_journal_switcher_state = GJ_SWITCHER_WORKING;
2958	error = kproc_create(g_journal_switcher, mp, &g_journal_switcher_proc,
2959	    0, 0, "g_journal switcher");
2960	KASSERT(error == 0, ("Cannot create switcher thread."));
2961}
2962
2963static void
2964g_journal_stop_switcher(void)
2965{
2966	g_topology_assert();
2967	MPASS(g_journal_switcher_proc != NULL);
2968	g_journal_switcher_state = GJ_SWITCHER_DIE;
2969	wakeup(&g_journal_switcher_state);
2970	while (g_journal_switcher_state != GJ_SWITCHER_DIED)
2971		tsleep(&g_journal_switcher_state, PRIBIO, "jfini:wait", hz / 5);
2972	GJ_DEBUG(1, "Switcher died.");
2973	g_journal_switcher_proc = NULL;
2974}
2975
2976/*
2977 * TODO: Kill switcher thread on last geom destruction?
2978 */
2979static void
2980g_journal_switcher(void *arg)
2981{
2982	struct g_class *mp;
2983	struct bintime bt;
2984	int error;
2985
2986	mp = arg;
2987	curthread->td_pflags |= TDP_NORUNNINGBUF;
2988	for (;;) {
2989		g_journal_switcher_wokenup = 0;
2990		error = tsleep(&g_journal_switcher_state, PRIBIO, "jsw:wait",
2991		    g_journal_switch_time * hz);
2992		if (g_journal_switcher_state == GJ_SWITCHER_DIE) {
2993			g_journal_switcher_state = GJ_SWITCHER_DIED;
2994			GJ_DEBUG(1, "Switcher exiting.");
2995			wakeup(&g_journal_switcher_state);
2996			kproc_exit(0);
2997		}
2998		if (error == 0 && g_journal_sync_requested == 0) {
2999			GJ_DEBUG(1, "Out of cache, force switch (used=%jd "
3000			    "limit=%jd).", (intmax_t)g_journal_cache_used,
3001			    (intmax_t)g_journal_cache_limit);
3002		}
3003		GJ_TIMER_START(1, &bt);
3004		g_journal_do_switch(mp);
3005		GJ_TIMER_STOP(1, &bt, "Entire switch time");
3006		if (g_journal_sync_requested > 0) {
3007			g_journal_sync_requested = 0;
3008			wakeup(&g_journal_sync_requested);
3009		}
3010	}
3011}
3012