Deleted Added
full compact
g_journal.c (163875) g_journal.c (163886)
1/*-
2 * Copyright (c) 2005-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2005-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/geom/journal/g_journal.c 163875 2006-11-01 14:09:59Z pjd $");
28__FBSDID("$FreeBSD: head/sys/geom/journal/g_journal.c 163886 2006-11-01 22:16:53Z pjd $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/module.h>
34#include <sys/limits.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/bio.h>
38#include <sys/sysctl.h>
39#include <sys/malloc.h>
40#include <sys/mount.h>
41#include <sys/eventhandler.h>
42#include <sys/proc.h>
43#include <sys/kthread.h>
44#include <sys/sched.h>
45#include <sys/taskqueue.h>
46#include <sys/vnode.h>
47#include <sys/sbuf.h>
48#ifdef GJ_MEMDEBUG
49#include <sys/stack.h>
50#include <sys/kdb.h>
51#endif
52#include <vm/vm.h>
53#include <vm/vm_kern.h>
54#include <geom/geom.h>
55
56#include <geom/journal/g_journal.h>
57
58
59/*
60 * On-disk journal format:
61 *
62 * JH - Journal header
63 * RH - Record header
64 *
65 * %%%%%% ****** +------+ +------+ ****** +------+ %%%%%%
66 * % JH % * RH * | Data | | Data | ... * RH * | Data | ... % JH % ...
67 * %%%%%% ****** +------+ +------+ ****** +------+ %%%%%%
68 *
69 */
70
71CTASSERT(sizeof(struct g_journal_header) <= 512);
72CTASSERT(sizeof(struct g_journal_record_header) <= 512);
73
74static MALLOC_DEFINE(M_JOURNAL, "journal_data", "GEOM_JOURNAL Data");
75static struct mtx g_journal_cache_mtx;
76MTX_SYSINIT(g_journal_cache, &g_journal_cache_mtx, "cache usage", MTX_DEF);
77
78const struct g_journal_desc *g_journal_filesystems[] = {
79 &g_journal_ufs,
80 NULL
81};
82
83SYSCTL_DECL(_kern_geom);
84
85int g_journal_debug = 0;
86TUNABLE_INT("kern.geom.journal.debug", &g_journal_debug);
87static u_int g_journal_switch_time = 10;
88static u_int g_journal_force_switch = 70;
89static u_int g_journal_parallel_flushes = 16;
90static u_int g_journal_parallel_copies = 16;
91static u_int g_journal_accept_immediately = 64;
92static u_int g_journal_record_entries = GJ_RECORD_HEADER_NENTRIES;
93static u_int g_journal_do_optimize = 1;
94
95SYSCTL_NODE(_kern_geom, OID_AUTO, journal, CTLFLAG_RW, 0, "GEOM_JOURNAL stuff");
96SYSCTL_INT(_kern_geom_journal, OID_AUTO, debug, CTLFLAG_RW, &g_journal_debug, 0,
97 "Debug level");
98SYSCTL_UINT(_kern_geom_journal, OID_AUTO, switch_time, CTLFLAG_RW,
99 &g_journal_switch_time, 0, "Switch journals every N seconds");
100SYSCTL_UINT(_kern_geom_journal, OID_AUTO, force_switch, CTLFLAG_RW,
101 &g_journal_force_switch, 0, "Force switch when journal is N%% full");
102SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_flushes, CTLFLAG_RW,
103 &g_journal_parallel_flushes, 0,
104 "Number of flush I/O requests send in parallel");
105SYSCTL_UINT(_kern_geom_journal, OID_AUTO, accept_immediately, CTLFLAG_RW,
106 &g_journal_accept_immediately, 0,
107 "Number of I/O requests accepted immediatelly");
108SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_copies, CTLFLAG_RW,
109 &g_journal_parallel_copies, 0,
110 "Number of copy I/O requests send in parallel");
111static int
112g_journal_record_entries_sysctl(SYSCTL_HANDLER_ARGS)
113{
114 u_int entries;
115 int error;
116
117 entries = g_journal_record_entries;
118 error = sysctl_handle_int(oidp, &entries, sizeof(entries), req);
119 if (error != 0 || req->newptr == NULL)
120 return (error);
121 if (entries < 1 || entries > GJ_RECORD_HEADER_NENTRIES)
122 return (EINVAL);
123 g_journal_record_entries = entries;
124 return (0);
125}
126SYSCTL_PROC(_kern_geom_journal, OID_AUTO, record_entries,
127 CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_record_entries_sysctl, "I",
128 "Maximum number of entires in one journal record");
129SYSCTL_UINT(_kern_geom_journal, OID_AUTO, optimize, CTLFLAG_RW,
130 &g_journal_do_optimize, 0, "Try to combine bios on flush and copy");
131
132static u_int g_journal_cache_used = 0;
133static u_int g_journal_cache_limit = 64 * 1024 * 1024;
134TUNABLE_INT("kern.geom.journal.cache.limit", &g_journal_cache_limit);
135static u_int g_journal_cache_divisor = 2;
136TUNABLE_INT("kern.geom.journal.cache.divisor", &g_journal_cache_divisor);
137static u_int g_journal_cache_switch = 90;
138static u_int g_journal_cache_misses = 0;
139static u_int g_journal_cache_alloc_failures = 0;
140static u_int g_journal_cache_low = 0;
141
142SYSCTL_NODE(_kern_geom_journal, OID_AUTO, cache, CTLFLAG_RW, 0,
143 "GEOM_JOURNAL cache");
144SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, used, CTLFLAG_RD,
145 &g_journal_cache_used, 0, "Number of allocated bytes");
146static int
147g_journal_cache_limit_sysctl(SYSCTL_HANDLER_ARGS)
148{
149 u_int limit;
150 int error;
151
152 limit = g_journal_cache_limit;
153 error = sysctl_handle_int(oidp, &limit, sizeof(limit), req);
154 if (error != 0 || req->newptr == NULL)
155 return (error);
156 g_journal_cache_limit = limit;
157 g_journal_cache_low = (limit / 100) * g_journal_cache_switch;
158 return (0);
159}
160SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, limit,
161 CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_cache_limit_sysctl, "I",
162 "Maximum number of allocated bytes");
163SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, divisor, CTLFLAG_RDTUN,
164 &g_journal_cache_divisor, 0,
165 "(kmem_size / kern.geom.journal.cache.divisor) == cache size");
166static int
167g_journal_cache_switch_sysctl(SYSCTL_HANDLER_ARGS)
168{
169 u_int cswitch;
170 int error;
171
172 cswitch = g_journal_cache_switch;
173 error = sysctl_handle_int(oidp, &cswitch, sizeof(cswitch), req);
174 if (error != 0 || req->newptr == NULL)
175 return (error);
176 if (cswitch < 0 || cswitch > 100)
177 return (EINVAL);
178 g_journal_cache_switch = cswitch;
179 g_journal_cache_low = (g_journal_cache_limit / 100) * cswitch;
180 return (0);
181}
182SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, switch,
183 CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_cache_switch_sysctl, "I",
184 "Force switch when we hit this percent of cache use");
185SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, misses, CTLFLAG_RW,
186 &g_journal_cache_misses, 0, "Number of cache misses");
187SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, alloc_failures, CTLFLAG_RW,
188 &g_journal_cache_alloc_failures, 0, "Memory allocation failures");
189
190static u_long g_journal_stats_bytes_skipped = 0;
191static u_long g_journal_stats_combined_ios = 0;
192static u_long g_journal_stats_switches = 0;
193static u_long g_journal_stats_wait_for_copy = 0;
194static u_long g_journal_stats_journal_full = 0;
195static u_long g_journal_stats_low_mem = 0;
196
197SYSCTL_NODE(_kern_geom_journal, OID_AUTO, stats, CTLFLAG_RW, 0,
198 "GEOM_JOURNAL statistics");
199SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, skipped_bytes, CTLFLAG_RW,
200 &g_journal_stats_bytes_skipped, 0, "Number of skipped bytes");
201SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, combined_ios, CTLFLAG_RW,
202 &g_journal_stats_combined_ios, 0, "Number of combined I/O requests");
203SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, switches, CTLFLAG_RW,
204 &g_journal_stats_switches, 0, "Number of journal switches");
205SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, wait_for_copy, CTLFLAG_RW,
206 &g_journal_stats_wait_for_copy, 0, "Wait for journal copy on switch");
207SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, journal_full, CTLFLAG_RW,
208 &g_journal_stats_journal_full, 0,
209 "Number of times journal was almost full.");
210SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, low_mem, CTLFLAG_RW,
211 &g_journal_stats_low_mem, 0, "Number of times low_mem hook was called.");
212
213static g_taste_t g_journal_taste;
214static g_ctl_req_t g_journal_config;
215static g_dumpconf_t g_journal_dumpconf;
216static g_init_t g_journal_init;
217static g_fini_t g_journal_fini;
218
219struct g_class g_journal_class = {
220 .name = G_JOURNAL_CLASS_NAME,
221 .version = G_VERSION,
222 .taste = g_journal_taste,
223 .ctlreq = g_journal_config,
224 .dumpconf = g_journal_dumpconf,
225 .init = g_journal_init,
226 .fini = g_journal_fini
227};
228
229static int g_journal_destroy(struct g_journal_softc *sc);
230static void g_journal_metadata_update(struct g_journal_softc *sc);
231static void g_journal_switch_wait(struct g_journal_softc *sc);
232
233#define GJ_SWITCHER_WORKING 0
234#define GJ_SWITCHER_DIE 1
235#define GJ_SWITCHER_DIED 2
236static int g_journal_switcher_state = GJ_SWITCHER_WORKING;
237static int g_journal_switcher_wokenup = 0;
238static int g_journal_sync_requested = 0;
239
240#ifdef GJ_MEMDEBUG
241struct meminfo {
242 size_t mi_size;
243 struct stack mi_stack;
244};
245#endif
246
247/*
248 * We use our own malloc/realloc/free funtions, so we can collect statistics
249 * and force journal switch when we're running out of cache.
250 */
251static void *
252gj_malloc(size_t size, int flags)
253{
254 void *p;
255#ifdef GJ_MEMDEBUG
256 struct meminfo *mi;
257#endif
258
259 mtx_lock(&g_journal_cache_mtx);
260 if (g_journal_cache_limit > 0 && !g_journal_switcher_wokenup &&
261 g_journal_cache_used + size > g_journal_cache_low) {
262 GJ_DEBUG(1, "No cache, waking up the switcher.");
263 g_journal_switcher_wokenup = 1;
264 wakeup(&g_journal_switcher_state);
265 }
266 if ((flags & M_NOWAIT) && g_journal_cache_limit > 0 &&
267 g_journal_cache_used + size > g_journal_cache_limit) {
268 mtx_unlock(&g_journal_cache_mtx);
269 g_journal_cache_alloc_failures++;
270 return (NULL);
271 }
272 g_journal_cache_used += size;
273 mtx_unlock(&g_journal_cache_mtx);
274 flags &= ~M_NOWAIT;
275#ifndef GJ_MEMDEBUG
276 p = malloc(size, M_JOURNAL, flags | M_WAITOK);
277#else
278 mi = malloc(sizeof(*mi) + size, M_JOURNAL, flags | M_WAITOK);
279 p = (u_char *)mi + sizeof(*mi);
280 mi->mi_size = size;
281 stack_save(&mi->mi_stack);
282#endif
283 return (p);
284}
285
286static void
287gj_free(void *p, size_t size)
288{
289#ifdef GJ_MEMDEBUG
290 struct meminfo *mi;
291#endif
292
293 KASSERT(p != NULL, ("p=NULL"));
294 KASSERT(size > 0, ("size=0"));
295 mtx_lock(&g_journal_cache_mtx);
296 KASSERT(g_journal_cache_used >= size, ("Freeing too much?"));
297 g_journal_cache_used -= size;
298 mtx_unlock(&g_journal_cache_mtx);
299#ifdef GJ_MEMDEBUG
300 mi = p = (void *)((u_char *)p - sizeof(*mi));
301 if (mi->mi_size != size) {
302 printf("GJOURNAL: Size mismatch! %zu != %zu\n", size,
303 mi->mi_size);
304 printf("GJOURNAL: Alloc backtrace:\n");
305 stack_print(&mi->mi_stack);
306 printf("GJOURNAL: Free backtrace:\n");
307 kdb_backtrace();
308 }
309#endif
310 free(p, M_JOURNAL);
311}
312
313static void *
314gj_realloc(void *p, size_t size, size_t oldsize)
315{
316 void *np;
317
318#ifndef GJ_MEMDEBUG
319 mtx_lock(&g_journal_cache_mtx);
320 g_journal_cache_used -= oldsize;
321 g_journal_cache_used += size;
322 mtx_unlock(&g_journal_cache_mtx);
323 np = realloc(p, size, M_JOURNAL, M_WAITOK);
324#else
325 np = gj_malloc(size, M_WAITOK);
326 bcopy(p, np, MIN(oldsize, size));
327 gj_free(p, oldsize);
328#endif
329 return (np);
330}
331
332static void
333g_journal_check_overflow(struct g_journal_softc *sc)
334{
335 off_t length, used;
336
337 if ((sc->sc_active.jj_offset < sc->sc_inactive.jj_offset &&
338 sc->sc_journal_offset >= sc->sc_inactive.jj_offset) ||
339 (sc->sc_active.jj_offset > sc->sc_inactive.jj_offset &&
340 sc->sc_journal_offset >= sc->sc_inactive.jj_offset &&
341 sc->sc_journal_offset < sc->sc_active.jj_offset)) {
342 panic("Journal overflow (joffset=%jd active=%jd inactive=%jd)",
343 (intmax_t)sc->sc_journal_offset,
344 (intmax_t)sc->sc_active.jj_offset,
345 (intmax_t)sc->sc_inactive.jj_offset);
346 }
347 if (sc->sc_active.jj_offset < sc->sc_inactive.jj_offset) {
348 length = sc->sc_inactive.jj_offset - sc->sc_active.jj_offset;
349 used = sc->sc_journal_offset - sc->sc_active.jj_offset;
350 } else {
351 length = sc->sc_jend - sc->sc_active.jj_offset;
352 length += sc->sc_inactive.jj_offset - sc->sc_jstart;
353 if (sc->sc_journal_offset >= sc->sc_active.jj_offset)
354 used = sc->sc_journal_offset - sc->sc_active.jj_offset;
355 else {
356 used = sc->sc_jend - sc->sc_active.jj_offset;
357 used += sc->sc_journal_offset - sc->sc_jstart;
358 }
359 }
360 /* Already woken up? */
361 if (g_journal_switcher_wokenup)
362 return;
363 /*
364 * If the active journal takes more than g_journal_force_switch precent
365 * of free journal space, we force journal switch.
366 */
367 KASSERT(length > 0,
368 ("length=%jd used=%jd active=%jd inactive=%jd joffset=%jd",
369 (intmax_t)length, (intmax_t)used,
370 (intmax_t)sc->sc_active.jj_offset,
371 (intmax_t)sc->sc_inactive.jj_offset,
372 (intmax_t)sc->sc_journal_offset));
373 if ((used * 100) / length > g_journal_force_switch) {
374 g_journal_stats_journal_full++;
375 GJ_DEBUG(1, "Journal %s %jd%% full, forcing journal switch.",
376 sc->sc_name, (used * 100) / length);
377 mtx_lock(&g_journal_cache_mtx);
378 g_journal_switcher_wokenup = 1;
379 wakeup(&g_journal_switcher_state);
380 mtx_unlock(&g_journal_cache_mtx);
381 }
382}
383
384static void
385g_journal_orphan(struct g_consumer *cp)
386{
387 struct g_journal_softc *sc;
388 char name[256];
389 int error;
390
391 g_topology_assert();
392 sc = cp->geom->softc;
393 GJ_DEBUG(0, "Lost provider %s (journal=%s).", cp->provider->name,
394 sc->sc_name);
395 strlcpy(name, sc->sc_name, sizeof(name));
396 error = g_journal_destroy(sc);
397 if (error == 0)
398 GJ_DEBUG(0, "Journal %s destroyed.", name);
399 else {
400 GJ_DEBUG(0, "Cannot destroy journal %s (error=%d). "
401 "Destroy it manually after last close.", sc->sc_name,
402 error);
403 }
404}
405
406static int
407g_journal_access(struct g_provider *pp, int acr, int acw, int ace)
408{
409 struct g_journal_softc *sc;
410 int dcr, dcw, dce;
411
412 g_topology_assert();
413 GJ_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name,
414 acr, acw, ace);
415
416 dcr = pp->acr + acr;
417 dcw = pp->acw + acw;
418 dce = pp->ace + ace;
419
420 sc = pp->geom->softc;
421 if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY)) {
422 if (acr <= 0 && acw <= 0 && ace <= 0)
423 return (0);
424 else
425 return (ENXIO);
426 }
427 if (pp->acw == 0 && dcw > 0) {
428 GJ_DEBUG(1, "Marking %s as dirty.", sc->sc_name);
429 sc->sc_flags &= ~GJF_DEVICE_CLEAN;
430 g_topology_unlock();
431 g_journal_metadata_update(sc);
432 g_topology_lock();
433 } /* else if (pp->acw == 0 && dcw > 0 && JEMPTY(sc)) {
434 GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
435 sc->sc_flags |= GJF_DEVICE_CLEAN;
436 g_topology_unlock();
437 g_journal_metadata_update(sc);
438 g_topology_lock();
439 } */
440 return (0);
441}
442
443static void
444g_journal_header_encode(struct g_journal_header *hdr, u_char *data)
445{
446
447 bcopy(GJ_HEADER_MAGIC, data, sizeof(GJ_HEADER_MAGIC));
448 data += sizeof(GJ_HEADER_MAGIC);
449 le32enc(data, hdr->jh_journal_id);
450 data += 4;
451 le32enc(data, hdr->jh_journal_next_id);
452}
453
454static int
455g_journal_header_decode(const u_char *data, struct g_journal_header *hdr)
456{
457
458 bcopy(data, hdr->jh_magic, sizeof(hdr->jh_magic));
459 data += sizeof(hdr->jh_magic);
460 if (bcmp(hdr->jh_magic, GJ_HEADER_MAGIC, sizeof(GJ_HEADER_MAGIC)) != 0)
461 return (EINVAL);
462 hdr->jh_journal_id = le32dec(data);
463 data += 4;
464 hdr->jh_journal_next_id = le32dec(data);
465 return (0);
466}
467
468static void
469g_journal_flush_cache(struct g_journal_softc *sc)
470{
471 struct bintime bt;
472 int error;
473
474 if (sc->sc_bio_flush == 0)
475 return;
476 GJ_TIMER_START(1, &bt);
477 if (sc->sc_bio_flush & GJ_FLUSH_JOURNAL) {
478 error = g_io_flush(sc->sc_jconsumer);
479 GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
480 sc->sc_jconsumer->provider->name, error);
481 }
482 if (sc->sc_bio_flush & GJ_FLUSH_DATA) {
483 /*
484 * TODO: This could be called in parallel with the
485 * previous call.
486 */
487 error = g_io_flush(sc->sc_dconsumer);
488 GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
489 sc->sc_dconsumer->provider->name, error);
490 }
491 GJ_TIMER_STOP(1, &bt, "Cache flush time");
492}
493
494static int
495g_journal_write_header(struct g_journal_softc *sc)
496{
497 struct g_journal_header hdr;
498 struct g_consumer *cp;
499 u_char *buf;
500 int error;
501
502 cp = sc->sc_jconsumer;
503 buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
504
505 strlcpy(hdr.jh_magic, GJ_HEADER_MAGIC, sizeof(hdr.jh_magic));
506 hdr.jh_journal_id = sc->sc_journal_id;
507 hdr.jh_journal_next_id = sc->sc_journal_next_id;
508 g_journal_header_encode(&hdr, buf);
509 error = g_write_data(cp, sc->sc_journal_offset, buf,
510 cp->provider->sectorsize);
511 /* if (error == 0) */
512 sc->sc_journal_offset += cp->provider->sectorsize;
513
514 gj_free(buf, cp->provider->sectorsize);
515 return (error);
516}
517
518/*
519 * Every journal record has a header and data following it.
520 * Functions below are used to decode the header before storing it to
521 * little endian and to encode it after reading to system endianess.
522 */
523static void
524g_journal_record_header_encode(struct g_journal_record_header *hdr,
525 u_char *data)
526{
527 struct g_journal_entry *ent;
528 u_int i;
529
530 bcopy(GJ_RECORD_HEADER_MAGIC, data, sizeof(GJ_RECORD_HEADER_MAGIC));
531 data += sizeof(GJ_RECORD_HEADER_MAGIC);
532 le32enc(data, hdr->jrh_journal_id);
533 data += 8;
534 le16enc(data, hdr->jrh_nentries);
535 data += 2;
536 bcopy(hdr->jrh_sum, data, sizeof(hdr->jrh_sum));
537 data += 8;
538 for (i = 0; i < hdr->jrh_nentries; i++) {
539 ent = &hdr->jrh_entries[i];
540 le64enc(data, ent->je_joffset);
541 data += 8;
542 le64enc(data, ent->je_offset);
543 data += 8;
544 le64enc(data, ent->je_length);
545 data += 8;
546 }
547}
548
549static int
550g_journal_record_header_decode(const u_char *data,
551 struct g_journal_record_header *hdr)
552{
553 struct g_journal_entry *ent;
554 u_int i;
555
556 bcopy(data, hdr->jrh_magic, sizeof(hdr->jrh_magic));
557 data += sizeof(hdr->jrh_magic);
558 if (strcmp(hdr->jrh_magic, GJ_RECORD_HEADER_MAGIC) != 0)
559 return (EINVAL);
560 hdr->jrh_journal_id = le32dec(data);
561 data += 8;
562 hdr->jrh_nentries = le16dec(data);
563 data += 2;
564 if (hdr->jrh_nentries > GJ_RECORD_HEADER_NENTRIES)
565 return (EINVAL);
566 bcopy(data, hdr->jrh_sum, sizeof(hdr->jrh_sum));
567 data += 8;
568 for (i = 0; i < hdr->jrh_nentries; i++) {
569 ent = &hdr->jrh_entries[i];
570 ent->je_joffset = le64dec(data);
571 data += 8;
572 ent->je_offset = le64dec(data);
573 data += 8;
574 ent->je_length = le64dec(data);
575 data += 8;
576 }
577 return (0);
578}
579
580/*
581 * Function reads metadata from a provider (via the given consumer), decodes
582 * it to system endianess and verifies its correctness.
583 */
584static int
585g_journal_metadata_read(struct g_consumer *cp, struct g_journal_metadata *md)
586{
587 struct g_provider *pp;
588 u_char *buf;
589 int error;
590
591 g_topology_assert();
592
593 error = g_access(cp, 1, 0, 0);
594 if (error != 0)
595 return (error);
596 pp = cp->provider;
597 g_topology_unlock();
598 /* Metadata is stored in last sector. */
599 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
600 &error);
601 g_topology_lock();
602 g_access(cp, -1, 0, 0);
603 if (error != 0) {
604 GJ_DEBUG(1, "Cannot read metadata from %s (error=%d).",
605 cp->provider->name, error);
606 if (buf != NULL)
607 g_free(buf);
608 return (error);
609 }
610
611 /* Decode metadata. */
612 error = journal_metadata_decode(buf, md);
613 g_free(buf);
614 /* Is this is gjournal provider at all? */
615 if (strcmp(md->md_magic, G_JOURNAL_MAGIC) != 0)
616 return (EINVAL);
617 /*
618 * Are we able to handle this version of metadata?
619 * We only maintain backward compatibility.
620 */
621 if (md->md_version > G_JOURNAL_VERSION) {
622 GJ_DEBUG(0,
623 "Kernel module is too old to handle metadata from %s.",
624 cp->provider->name);
625 return (EINVAL);
626 }
627 /* Is checksum correct? */
628 if (error != 0) {
629 GJ_DEBUG(0, "MD5 metadata hash mismatch for provider %s.",
630 cp->provider->name);
631 return (error);
632 }
633 return (0);
634}
635
636/*
637 * Two functions below are responsible for updating metadata.
638 * Only metadata on the data provider is updated (we need to update
639 * information about active journal in there).
640 */
641static void
642g_journal_metadata_done(struct bio *bp)
643{
644
645 /*
646 * There is not much we can do on error except informing about it.
647 */
648 if (bp->bio_error != 0) {
649 GJ_LOGREQ(0, bp, "Cannot update metadata (error=%d).",
650 bp->bio_error);
651 } else {
652 GJ_LOGREQ(2, bp, "Metadata updated.");
653 }
654 gj_free(bp->bio_data, bp->bio_length);
655 g_destroy_bio(bp);
656}
657
658static void
659g_journal_metadata_update(struct g_journal_softc *sc)
660{
661 struct g_journal_metadata md;
662 struct g_consumer *cp;
663 struct bio *bp;
664 u_char *sector;
665
666 cp = sc->sc_dconsumer;
667 sector = gj_malloc(cp->provider->sectorsize, M_WAITOK);
668 strlcpy(md.md_magic, G_JOURNAL_MAGIC, sizeof(md.md_magic));
669 md.md_version = G_JOURNAL_VERSION;
670 md.md_id = sc->sc_id;
671 md.md_type = sc->sc_orig_type;
672 md.md_jstart = sc->sc_jstart;
673 md.md_jend = sc->sc_jend;
674 md.md_joffset = sc->sc_inactive.jj_offset;
675 md.md_jid = sc->sc_journal_previous_id;
676 md.md_flags = 0;
677 if (sc->sc_flags & GJF_DEVICE_CLEAN)
678 md.md_flags |= GJ_FLAG_CLEAN;
679
680 if (sc->sc_flags & GJF_DEVICE_HARDCODED)
681 strlcpy(md.md_provider, sc->sc_name, sizeof(md.md_provider));
682 else
683 bzero(md.md_provider, sizeof(md.md_provider));
684 md.md_provsize = cp->provider->mediasize;
685 journal_metadata_encode(&md, sector);
686
687 /*
688 * Flush the cache, so we know all data are on disk.
689 * We write here informations like "journal is consistent", so we need
690 * to be sure it is. Without BIO_FLUSH here, we can end up in situation
691 * where metadata is stored on disk, but not all data.
692 */
693 g_journal_flush_cache(sc);
694
695 bp = g_alloc_bio();
696 bp->bio_offset = cp->provider->mediasize - cp->provider->sectorsize;
697 bp->bio_length = cp->provider->sectorsize;
698 bp->bio_data = sector;
699 bp->bio_cmd = BIO_WRITE;
700 if (!(sc->sc_flags & GJF_DEVICE_DESTROY)) {
701 bp->bio_done = g_journal_metadata_done;
702 g_io_request(bp, cp);
703 } else {
704 bp->bio_done = NULL;
705 g_io_request(bp, cp);
706 biowait(bp, "gjmdu");
707 g_journal_metadata_done(bp);
708 }
709
710 /*
711 * Be sure metadata reached the disk.
712 */
713 g_journal_flush_cache(sc);
714}
715
716/*
717 * This is where the I/O request comes from the GEOM.
718 */
719static void
720g_journal_start(struct bio *bp)
721{
722 struct g_journal_softc *sc;
723
724 sc = bp->bio_to->geom->softc;
725 GJ_LOGREQ(3, bp, "Request received.");
726
727 switch (bp->bio_cmd) {
728 case BIO_READ:
729 case BIO_WRITE:
730 mtx_lock(&sc->sc_mtx);
731 bioq_insert_tail(&sc->sc_regular_queue, bp);
732 wakeup(sc);
733 mtx_unlock(&sc->sc_mtx);
734 return;
735 case BIO_GETATTR:
736 if (strcmp(bp->bio_attribute, "GJOURNAL::provider") == 0) {
737 strlcpy(bp->bio_data, bp->bio_to->name, bp->bio_length);
738 bp->bio_completed = strlen(bp->bio_to->name) + 1;
739 g_io_deliver(bp, 0);
740 return;
741 }
742 /* FALLTHROUGH */
743 case BIO_DELETE:
744 default:
745 g_io_deliver(bp, EOPNOTSUPP);
746 return;
747 }
748}
749
750static void
751g_journal_std_done(struct bio *bp)
752{
753 struct g_journal_softc *sc;
754
755 sc = bp->bio_from->geom->softc;
756 mtx_lock(&sc->sc_mtx);
757 bioq_insert_tail(&sc->sc_back_queue, bp);
758 wakeup(sc);
759 mtx_unlock(&sc->sc_mtx);
760}
761
762static struct bio *
763g_journal_new_bio(off_t start, off_t end, off_t joffset, u_char *data,
764 int flags)
765{
766 struct bio *bp;
767
768 bp = g_alloc_bio();
769 bp->bio_offset = start;
770 bp->bio_joffset = joffset;
771 bp->bio_length = end - start;
772 bp->bio_cmd = BIO_WRITE;
773 bp->bio_done = g_journal_std_done;
774 if (data == NULL)
775 bp->bio_data = NULL;
776 else {
777 bp->bio_data = gj_malloc(bp->bio_length, flags);
778 if (bp->bio_data != NULL)
779 bcopy(data, bp->bio_data, bp->bio_length);
780 }
781 return (bp);
782}
783
784#define g_journal_insert_bio(head, bp, flags) \
785 g_journal_insert((head), (bp)->bio_offset, \
786 (bp)->bio_offset + (bp)->bio_length, (bp)->bio_joffset, \
787 (bp)->bio_data, flags)
788/*
789 * The function below does a lot more than just inserting bio to the queue.
790 * It keeps the queue sorted by offset and ensures that there are no doubled
791 * data (it combines bios where ranges overlap).
792 *
793 * The function returns the number of bios inserted (as bio can be splitted).
794 */
795static int
796g_journal_insert(struct bio **head, off_t nstart, off_t nend, off_t joffset,
797 u_char *data, int flags)
798{
799 struct bio *nbp, *cbp, *pbp;
800 off_t cstart, cend;
801 u_char *tmpdata;
802 int n;
803
804 GJ_DEBUG(3, "INSERT(%p): (%jd, %jd, %jd)", *head, nstart, nend,
805 joffset);
806 n = 0;
807 pbp = NULL;
808 GJQ_FOREACH(*head, cbp) {
809 cstart = cbp->bio_offset;
810 cend = cbp->bio_offset + cbp->bio_length;
811
812 if (nstart >= cend) {
813 /*
814 * +-------------+
815 * | |
816 * | current | +-------------+
817 * | bio | | |
818 * | | | new |
819 * +-------------+ | bio |
820 * | |
821 * +-------------+
822 */
823 GJ_DEBUG(3, "INSERT(%p): 1", *head);
824 } else if (nend <= cstart) {
825 /*
826 * +-------------+
827 * | |
828 * +-------------+ | current |
829 * | | | bio |
830 * | new | | |
831 * | bio | +-------------+
832 * | |
833 * +-------------+
834 */
835 nbp = g_journal_new_bio(nstart, nend, joffset, data,
836 flags);
837 if (pbp == NULL)
838 *head = nbp;
839 else
840 pbp->bio_next = nbp;
841 nbp->bio_next = cbp;
842 n++;
843 GJ_DEBUG(3, "INSERT(%p): 2 (nbp=%p pbp=%p)", *head, nbp,
844 pbp);
845 goto end;
846 } else if (nstart <= cstart && nend >= cend) {
847 /*
848 * +-------------+ +-------------+
849 * | current bio | | current bio |
850 * +---+-------------+---+ +-------------+---+
851 * | | | | | | |
852 * | | | | | | |
853 * | +-------------+ | +-------------+ |
854 * | new bio | | new bio |
855 * +---------------------+ +-----------------+
856 *
857 * +-------------+ +-------------+
858 * | current bio | | current bio |
859 * +---+-------------+ +-------------+
860 * | | | | |
861 * | | | | |
862 * | +-------------+ +-------------+
863 * | new bio | | new bio |
864 * +-----------------+ +-------------+
865 */
866 g_journal_stats_bytes_skipped += cbp->bio_length;
867 cbp->bio_offset = nstart;
868 cbp->bio_joffset = joffset;
869 cbp->bio_length = cend - nstart;
870 if (cbp->bio_data != NULL) {
871 gj_free(cbp->bio_data, cend - cstart);
872 cbp->bio_data = NULL;
873 }
874 if (data != NULL) {
875 cbp->bio_data = gj_malloc(cbp->bio_length,
876 flags);
877 if (cbp->bio_data != NULL) {
878 bcopy(data, cbp->bio_data,
879 cbp->bio_length);
880 }
881 data += cend - nstart;
882 }
883 joffset += cend - nstart;
884 nstart = cend;
885 GJ_DEBUG(3, "INSERT(%p): 3 (cbp=%p)", *head, cbp);
886 } else if (nstart > cstart && nend >= cend) {
887 /*
888 * +-----------------+ +-------------+
889 * | current bio | | current bio |
890 * | +-------------+ | +---------+---+
891 * | | | | | | |
892 * | | | | | | |
893 * +---+-------------+ +---+---------+ |
894 * | new bio | | new bio |
895 * +-------------+ +-------------+
896 */
897 g_journal_stats_bytes_skipped += cend - nstart;
898 nbp = g_journal_new_bio(nstart, cend, joffset, data,
899 flags);
900 nbp->bio_next = cbp->bio_next;
901 cbp->bio_next = nbp;
902 cbp->bio_length = nstart - cstart;
903 if (cbp->bio_data != NULL) {
904 cbp->bio_data = gj_realloc(cbp->bio_data,
905 cbp->bio_length, cend - cstart);
906 }
907 if (data != NULL)
908 data += cend - nstart;
909 joffset += cend - nstart;
910 nstart = cend;
911 n++;
912 GJ_DEBUG(3, "INSERT(%p): 4 (cbp=%p)", *head, cbp);
913 } else if (nstart > cstart && nend < cend) {
914 /*
915 * +---------------------+
916 * | current bio |
917 * | +-------------+ |
918 * | | | |
919 * | | | |
920 * +---+-------------+---+
921 * | new bio |
922 * +-------------+
923 */
924 g_journal_stats_bytes_skipped += nend - nstart;
925 nbp = g_journal_new_bio(nstart, nend, joffset, data,
926 flags);
927 nbp->bio_next = cbp->bio_next;
928 cbp->bio_next = nbp;
929 if (cbp->bio_data == NULL)
930 tmpdata = NULL;
931 else
932 tmpdata = cbp->bio_data + nend - cstart;
933 nbp = g_journal_new_bio(nend, cend,
934 cbp->bio_joffset + nend - cstart, tmpdata, flags);
935 nbp->bio_next = ((struct bio *)cbp->bio_next)->bio_next;
936 ((struct bio *)cbp->bio_next)->bio_next = nbp;
937 cbp->bio_length = nstart - cstart;
938 if (cbp->bio_data != NULL) {
939 cbp->bio_data = gj_realloc(cbp->bio_data,
940 cbp->bio_length, cend - cstart);
941 }
942 n += 2;
943 GJ_DEBUG(3, "INSERT(%p): 5 (cbp=%p)", *head, cbp);
944 goto end;
945 } else if (nstart <= cstart && nend < cend) {
946 /*
947 * +-----------------+ +-------------+
948 * | current bio | | current bio |
949 * +-------------+ | +---+---------+ |
950 * | | | | | | |
951 * | | | | | | |
952 * +-------------+---+ | +---------+---+
953 * | new bio | | new bio |
954 * +-------------+ +-------------+
955 */
956 g_journal_stats_bytes_skipped += nend - nstart;
957 nbp = g_journal_new_bio(nstart, nend, joffset, data,
958 flags);
959 if (pbp == NULL)
960 *head = nbp;
961 else
962 pbp->bio_next = nbp;
963 nbp->bio_next = cbp;
964 cbp->bio_offset = nend;
965 cbp->bio_length = cend - nend;
966 cbp->bio_joffset += nend - cstart;
967 tmpdata = cbp->bio_data;
968 if (tmpdata != NULL) {
969 cbp->bio_data = gj_malloc(cbp->bio_length,
970 flags);
971 if (cbp->bio_data != NULL) {
972 bcopy(tmpdata + nend - cstart,
973 cbp->bio_data, cbp->bio_length);
974 }
975 gj_free(tmpdata, cend - cstart);
976 }
977 n++;
978 GJ_DEBUG(3, "INSERT(%p): 6 (cbp=%p)", *head, cbp);
979 goto end;
980 }
981 if (nstart == nend)
982 goto end;
983 pbp = cbp;
984 }
985 nbp = g_journal_new_bio(nstart, nend, joffset, data, flags);
986 if (pbp == NULL)
987 *head = nbp;
988 else
989 pbp->bio_next = nbp;
990 nbp->bio_next = NULL;
991 n++;
992 GJ_DEBUG(3, "INSERT(%p): 8 (nbp=%p pbp=%p)", *head, nbp, pbp);
993end:
994 if (g_journal_debug >= 3) {
995 GJQ_FOREACH(*head, cbp) {
996 GJ_DEBUG(3, "ELEMENT: %p (%jd, %jd, %jd, %p)", cbp,
997 (intmax_t)cbp->bio_offset,
998 (intmax_t)cbp->bio_length,
999 (intmax_t)cbp->bio_joffset, cbp->bio_data);
1000 }
1001 GJ_DEBUG(3, "INSERT(%p): DONE %d", *head, n);
1002 }
1003 return (n);
1004}
1005
1006/*
1007 * The function combines neighbour bios trying to squeeze as much data as
1008 * possible into one bio.
1009 *
1010 * The function returns the number of bios combined (negative value).
1011 */
1012static int
1013g_journal_optimize(struct bio *head)
1014{
1015 struct bio *cbp, *pbp;
1016 int n;
1017
1018 n = 0;
1019 pbp = NULL;
1020 GJQ_FOREACH(head, cbp) {
1021 /* Skip bios which has to be read first. */
1022 if (cbp->bio_data == NULL) {
1023 pbp = NULL;
1024 continue;
1025 }
1026 /* There is no previous bio yet. */
1027 if (pbp == NULL) {
1028 pbp = cbp;
1029 continue;
1030 }
1031 /* Is this a neighbour bio? */
1032 if (pbp->bio_offset + pbp->bio_length != cbp->bio_offset) {
1033 /* Be sure that bios queue is sorted. */
1034 KASSERT(pbp->bio_offset + pbp->bio_length < cbp->bio_offset,
1035 ("poffset=%jd plength=%jd coffset=%jd",
1036 (intmax_t)pbp->bio_offset,
1037 (intmax_t)pbp->bio_length,
1038 (intmax_t)cbp->bio_offset));
1039 pbp = cbp;
1040 continue;
1041 }
1042 /* Be sure we don't end up with too big bio. */
1043 if (pbp->bio_length + cbp->bio_length > MAXPHYS) {
1044 pbp = cbp;
1045 continue;
1046 }
1047 /* Ok, we can join bios. */
1048 GJ_LOGREQ(4, pbp, "Join: ");
1049 GJ_LOGREQ(4, cbp, "and: ");
1050 pbp->bio_data = gj_realloc(pbp->bio_data,
1051 pbp->bio_length + cbp->bio_length, pbp->bio_length);
1052 bcopy(cbp->bio_data, pbp->bio_data + pbp->bio_length,
1053 cbp->bio_length);
1054 gj_free(cbp->bio_data, cbp->bio_length);
1055 pbp->bio_length += cbp->bio_length;
1056 pbp->bio_next = cbp->bio_next;
1057 g_destroy_bio(cbp);
1058 cbp = pbp;
1059 g_journal_stats_combined_ios++;
1060 n--;
1061 GJ_LOGREQ(4, pbp, "Got: ");
1062 }
1063 return (n);
1064}
1065
1066/*
1067 * TODO: Update comment.
1068 * These are functions responsible for copying one portion of data from journal
1069 * to the destination provider.
1070 * The order goes like this:
1071 * 1. Read the header, which contains informations about data blocks
1072 * following it.
1073 * 2. Read the data blocks from the journal.
1074 * 3. Write the data blocks on the data provider.
1075 *
1076 * g_journal_copy_start()
1077 * g_journal_copy_done() - got finished write request, logs potential errors.
1078 */
1079
1080/*
1081 * When there is no data in cache, this function is used to read it.
1082 */
1083static void
1084g_journal_read_first(struct g_journal_softc *sc, struct bio *bp)
1085{
1086 struct bio *cbp;
1087
1088 /*
1089 * We were short in memory, so data was freed.
1090 * In that case we need to read it back from journal.
1091 */
1092 cbp = g_alloc_bio();
1093 cbp->bio_cflags = bp->bio_cflags;
1094 cbp->bio_parent = bp;
1095 cbp->bio_offset = bp->bio_joffset;
1096 cbp->bio_length = bp->bio_length;
1097 cbp->bio_data = gj_malloc(bp->bio_length, M_WAITOK);
1098 cbp->bio_cmd = BIO_READ;
1099 cbp->bio_done = g_journal_std_done;
1100 GJ_LOGREQ(4, cbp, "READ FIRST");
1101 g_io_request(cbp, sc->sc_jconsumer);
1102 g_journal_cache_misses++;
1103}
1104
1105static void
1106g_journal_copy_send(struct g_journal_softc *sc)
1107{
1108 struct bio *bioq, *bp, *lbp;
1109
1110 bioq = lbp = NULL;
1111 mtx_lock(&sc->sc_mtx);
1112 for (; sc->sc_copy_in_progress < g_journal_parallel_copies;) {
1113 bp = GJQ_FIRST(sc->sc_inactive.jj_queue);
1114 if (bp == NULL)
1115 break;
1116 GJQ_REMOVE(sc->sc_inactive.jj_queue, bp);
1117 sc->sc_copy_in_progress++;
1118 GJQ_INSERT_AFTER(bioq, bp, lbp);
1119 lbp = bp;
1120 }
1121 mtx_unlock(&sc->sc_mtx);
1122 if (g_journal_do_optimize)
1123 sc->sc_copy_in_progress += g_journal_optimize(bioq);
1124 while ((bp = GJQ_FIRST(bioq)) != NULL) {
1125 GJQ_REMOVE(bioq, bp);
1126 GJQ_INSERT_HEAD(sc->sc_copy_queue, bp);
1127 bp->bio_cflags = GJ_BIO_COPY;
1128 if (bp->bio_data == NULL)
1129 g_journal_read_first(sc, bp);
1130 else {
1131 bp->bio_joffset = 0;
1132 GJ_LOGREQ(4, bp, "SEND");
1133 g_io_request(bp, sc->sc_dconsumer);
1134 }
1135 }
1136}
1137
1138static void
1139g_journal_copy_start(struct g_journal_softc *sc)
1140{
1141
1142 /*
1143 * Remember in metadata that we're starting to copy journaled data
1144 * to the data provider.
1145 * In case of power failure, we will copy these data once again on boot.
1146 */
1147 if (!sc->sc_journal_copying) {
1148 sc->sc_journal_copying = 1;
1149 GJ_DEBUG(1, "Starting copy of journal.");
1150 g_journal_metadata_update(sc);
1151 }
1152 g_journal_copy_send(sc);
1153}
1154
1155/*
1156 * Data block has been read from the journal provider.
1157 */
1158static int
1159g_journal_copy_read_done(struct bio *bp)
1160{
1161 struct g_journal_softc *sc;
1162 struct g_consumer *cp;
1163 struct bio *pbp;
1164
1165 KASSERT(bp->bio_cflags == GJ_BIO_COPY,
1166 ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
1167
1168 sc = bp->bio_from->geom->softc;
1169 pbp = bp->bio_parent;
1170
1171 if (bp->bio_error != 0) {
1172 GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
1173 bp->bio_to->name, bp->bio_error);
1174 /*
1175 * We will not be able to deliver WRITE request as well.
1176 */
1177 gj_free(bp->bio_data, bp->bio_length);
1178 g_destroy_bio(pbp);
1179 g_destroy_bio(bp);
1180 sc->sc_copy_in_progress--;
1181 return (1);
1182 }
1183 pbp->bio_data = bp->bio_data;
1184 cp = sc->sc_dconsumer;
1185 g_io_request(pbp, cp);
1186 GJ_LOGREQ(4, bp, "READ DONE");
1187 g_destroy_bio(bp);
1188 return (0);
1189}
1190
1191/*
1192 * Data block has been written to the data provider.
1193 */
1194static void
1195g_journal_copy_write_done(struct bio *bp)
1196{
1197 struct g_journal_softc *sc;
1198
1199 KASSERT(bp->bio_cflags == GJ_BIO_COPY,
1200 ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
1201
1202 sc = bp->bio_from->geom->softc;
1203 sc->sc_copy_in_progress--;
1204
1205 if (bp->bio_error != 0) {
1206 GJ_LOGREQ(0, bp, "[copy] Error while writting data (error=%d)",
1207 bp->bio_error);
1208 }
1209 GJQ_REMOVE(sc->sc_copy_queue, bp);
1210 gj_free(bp->bio_data, bp->bio_length);
1211 GJ_LOGREQ(4, bp, "DONE");
1212 g_destroy_bio(bp);
1213
1214 if (sc->sc_copy_in_progress == 0) {
1215 /*
1216 * This was the last write request for this journal.
1217 */
1218 GJ_DEBUG(1, "Data has been copied.");
1219 sc->sc_journal_copying = 0;
1220 }
1221}
1222
1223static void g_journal_flush_done(struct bio *bp);
1224
1225/*
1226 * Flush one record onto active journal provider.
1227 */
1228static void
1229g_journal_flush(struct g_journal_softc *sc)
1230{
1231 struct g_journal_record_header hdr;
1232 struct g_journal_entry *ent;
1233 struct g_provider *pp;
1234 struct bio **bioq;
1235 struct bio *bp, *fbp, *pbp;
1236 off_t joffset, size;
1237 u_char *data, hash[16];
1238 MD5_CTX ctx;
1239 u_int i;
1240
1241 if (sc->sc_current_count == 0)
1242 return;
1243
1244 size = 0;
1245 pp = sc->sc_jprovider;
1246 GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
1247 joffset = sc->sc_journal_offset;
1248
1249 GJ_DEBUG(2, "Storing %d journal entries on %s at %jd.",
1250 sc->sc_current_count, pp->name, (intmax_t)joffset);
1251
1252 /*
1253 * Store 'journal id', so we know to which journal this record belongs.
1254 */
1255 hdr.jrh_journal_id = sc->sc_journal_id;
1256 /* Could be less than g_journal_record_entries if called due timeout. */
1257 hdr.jrh_nentries = MIN(sc->sc_current_count, g_journal_record_entries);
1258 strlcpy(hdr.jrh_magic, GJ_RECORD_HEADER_MAGIC, sizeof(hdr.jrh_magic));
1259
1260 bioq = &sc->sc_active.jj_queue;
1261 pbp = sc->sc_flush_queue;
1262
1263 fbp = g_alloc_bio();
1264 fbp->bio_parent = NULL;
1265 fbp->bio_cflags = GJ_BIO_JOURNAL;
1266 fbp->bio_offset = -1;
1267 fbp->bio_joffset = joffset;
1268 fbp->bio_length = pp->sectorsize;
1269 fbp->bio_cmd = BIO_WRITE;
1270 fbp->bio_done = g_journal_std_done;
1271 GJQ_INSERT_AFTER(sc->sc_flush_queue, fbp, pbp);
1272 pbp = fbp;
1273 fbp->bio_to = pp;
1274 GJ_LOGREQ(4, fbp, "FLUSH_OUT");
1275 joffset += pp->sectorsize;
1276 sc->sc_flush_count++;
1277 if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1278 MD5Init(&ctx);
1279
1280 for (i = 0; i < hdr.jrh_nentries; i++) {
1281 bp = sc->sc_current_queue;
1282 KASSERT(bp != NULL, ("NULL bp"));
1283 bp->bio_to = pp;
1284 GJ_LOGREQ(4, bp, "FLUSHED");
1285 sc->sc_current_queue = bp->bio_next;
1286 bp->bio_next = NULL;
1287 sc->sc_current_count--;
1288
1289 /* Add to the header. */
1290 ent = &hdr.jrh_entries[i];
1291 ent->je_offset = bp->bio_offset;
1292 ent->je_joffset = joffset;
1293 ent->je_length = bp->bio_length;
1294 size += ent->je_length;
1295
1296 data = bp->bio_data;
1297 if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1298 MD5Update(&ctx, data, ent->je_length);
1299 bzero(bp, sizeof(*bp));
1300 bp->bio_cflags = GJ_BIO_JOURNAL;
1301 bp->bio_offset = ent->je_offset;
1302 bp->bio_joffset = ent->je_joffset;
1303 bp->bio_length = ent->je_length;
1304 bp->bio_data = data;
1305 bp->bio_cmd = BIO_WRITE;
1306 bp->bio_done = g_journal_std_done;
1307 GJQ_INSERT_AFTER(sc->sc_flush_queue, bp, pbp);
1308 pbp = bp;
1309 bp->bio_to = pp;
1310 GJ_LOGREQ(4, bp, "FLUSH_OUT");
1311 joffset += bp->bio_length;
1312 sc->sc_flush_count++;
1313
1314 /*
1315 * Add request to the active sc_journal_queue queue.
1316 * This is our cache. After journal switch we don't have to
1317 * read the data from the inactive journal, because we keep
1318 * it in memory.
1319 */
1320 g_journal_insert(bioq, ent->je_offset,
1321 ent->je_offset + ent->je_length, ent->je_joffset, data,
1322 M_NOWAIT);
1323 }
1324
1325 /*
1326 * After all requests, store valid header.
1327 */
1328 data = gj_malloc(pp->sectorsize, M_WAITOK);
1329 if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1330 MD5Final(hash, &ctx);
1331 bcopy(hash, hdr.jrh_sum, sizeof(hdr.jrh_sum));
1332 }
1333 g_journal_record_header_encode(&hdr, data);
1334 fbp->bio_data = data;
1335
1336 sc->sc_journal_offset = joffset;
1337
1338 g_journal_check_overflow(sc);
1339}
1340
1341/*
1342 * Flush request finished.
1343 */
1344static void
1345g_journal_flush_done(struct bio *bp)
1346{
1347 struct g_journal_softc *sc;
1348 struct g_consumer *cp;
1349
1350 KASSERT((bp->bio_cflags & GJ_BIO_MASK) == GJ_BIO_JOURNAL,
1351 ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_JOURNAL));
1352
1353 cp = bp->bio_from;
1354 sc = cp->geom->softc;
1355 sc->sc_flush_in_progress--;
1356
1357 if (bp->bio_error != 0) {
1358 GJ_LOGREQ(0, bp, "[flush] Error while writting data (error=%d)",
1359 bp->bio_error);
1360 }
1361 gj_free(bp->bio_data, bp->bio_length);
1362 GJ_LOGREQ(4, bp, "DONE");
1363 g_destroy_bio(bp);
1364}
1365
1366static void g_journal_release_delayed(struct g_journal_softc *sc);
1367
1368static void
1369g_journal_flush_send(struct g_journal_softc *sc)
1370{
1371 struct g_consumer *cp;
1372 struct bio *bioq, *bp, *lbp;
1373
1374 cp = sc->sc_jconsumer;
1375 bioq = lbp = NULL;
1376 while (sc->sc_flush_in_progress < g_journal_parallel_flushes) {
1377 /* Send one flush requests to the active journal. */
1378 bp = GJQ_FIRST(sc->sc_flush_queue);
1379 if (bp != NULL) {
1380 GJQ_REMOVE(sc->sc_flush_queue, bp);
1381 sc->sc_flush_count--;
1382 bp->bio_offset = bp->bio_joffset;
1383 bp->bio_joffset = 0;
1384 sc->sc_flush_in_progress++;
1385 GJQ_INSERT_AFTER(bioq, bp, lbp);
1386 lbp = bp;
1387 }
1388 /* Try to release delayed requests. */
1389 g_journal_release_delayed(sc);
1390 /* If there are no requests to flush, leave. */
1391 if (GJQ_FIRST(sc->sc_flush_queue) == NULL)
1392 break;
1393 }
1394 if (g_journal_do_optimize)
1395 sc->sc_flush_in_progress += g_journal_optimize(bioq);
1396 while ((bp = GJQ_FIRST(bioq)) != NULL) {
1397 GJQ_REMOVE(bioq, bp);
1398 GJ_LOGREQ(3, bp, "Flush request send");
1399 g_io_request(bp, cp);
1400 }
1401}
1402
1403static void
1404g_journal_add_current(struct g_journal_softc *sc, struct bio *bp)
1405{
1406 int n;
1407
1408 GJ_LOGREQ(4, bp, "CURRENT %d", sc->sc_current_count);
1409 n = g_journal_insert_bio(&sc->sc_current_queue, bp, M_WAITOK);
1410 sc->sc_current_count += n;
1411 n = g_journal_optimize(sc->sc_current_queue);
1412 sc->sc_current_count += n;
1413 /*
1414 * For requests which are added to the current queue we deliver
1415 * response immediately.
1416 */
1417 bp->bio_completed = bp->bio_length;
1418 g_io_deliver(bp, 0);
1419 if (sc->sc_current_count >= g_journal_record_entries) {
1420 /*
1421 * Let's flush one record onto active journal provider.
1422 */
1423 g_journal_flush(sc);
1424 }
1425}
1426
1427static void
1428g_journal_release_delayed(struct g_journal_softc *sc)
1429{
1430 struct bio *bp;
1431
1432 for (;;) {
1433 /* The flush queue is full, exit. */
1434 if (sc->sc_flush_count >= g_journal_accept_immediately)
1435 return;
1436 bp = bioq_takefirst(&sc->sc_delayed_queue);
1437 if (bp == NULL)
1438 return;
1439 sc->sc_delayed_count--;
1440 g_journal_add_current(sc, bp);
1441 }
1442}
1443
1444/*
1445 * Add I/O request to the current queue. If we have enough requests for one
1446 * journal record we flush them onto active journal provider.
1447 */
1448static void
1449g_journal_add_request(struct g_journal_softc *sc, struct bio *bp)
1450{
1451
1452 /*
1453 * The flush queue is full, we need to delay the request.
1454 */
1455 if (sc->sc_delayed_count > 0 ||
1456 sc->sc_flush_count >= g_journal_accept_immediately) {
1457 GJ_LOGREQ(4, bp, "DELAYED");
1458 bioq_insert_tail(&sc->sc_delayed_queue, bp);
1459 sc->sc_delayed_count++;
1460 return;
1461 }
1462
1463 KASSERT(TAILQ_EMPTY(&sc->sc_delayed_queue.queue),
1464 ("DELAYED queue not empty."));
1465 g_journal_add_current(sc, bp);
1466}
1467
1468static void g_journal_read_done(struct bio *bp);
1469
1470/*
1471 * Try to find requested data in cache.
1472 */
1473static struct bio *
1474g_journal_read_find(struct bio *head, int sorted, struct bio *pbp, off_t ostart,
1475 off_t oend)
1476{
1477 off_t cstart, cend;
1478 struct bio *bp;
1479
1480 GJQ_FOREACH(head, bp) {
1481 if (bp->bio_offset == -1)
1482 continue;
1483 cstart = MAX(ostart, bp->bio_offset);
1484 cend = MIN(oend, bp->bio_offset + bp->bio_length);
1485 if (cend <= ostart)
1486 continue;
1487 else if (cstart >= oend) {
1488 if (!sorted)
1489 continue;
1490 else {
1491 bp = NULL;
1492 break;
1493 }
1494 }
1495 if (bp->bio_data == NULL)
1496 break;
1497 GJ_DEBUG(3, "READ(%p): (%jd, %jd) (bp=%p)", head, cstart, cend,
1498 bp);
1499 bcopy(bp->bio_data + cstart - bp->bio_offset,
1500 pbp->bio_data + cstart - pbp->bio_offset, cend - cstart);
1501 pbp->bio_completed += cend - cstart;
1502 if (pbp->bio_completed == pbp->bio_length) {
1503 /*
1504 * Cool, the whole request was in cache, deliver happy
1505 * message.
1506 */
1507 g_io_deliver(pbp, 0);
1508 return (pbp);
1509 }
1510 break;
1511 }
1512 return (bp);
1513}
1514
1515/*
1516 * Try to find requested data in cache.
1517 */
1518static struct bio *
1519g_journal_read_queue_find(struct bio_queue *head, struct bio *pbp, off_t ostart,
1520 off_t oend)
1521{
1522 off_t cstart, cend;
1523 struct bio *bp;
1524
1525 TAILQ_FOREACH(bp, head, bio_queue) {
1526 cstart = MAX(ostart, bp->bio_offset);
1527 cend = MIN(oend, bp->bio_offset + bp->bio_length);
1528 if (cend <= ostart)
1529 continue;
1530 else if (cstart >= oend)
1531 continue;
1532 KASSERT(bp->bio_data != NULL,
1533 ("%s: bio_data == NULL", __func__));
1534 GJ_DEBUG(3, "READ(%p): (%jd, %jd) (bp=%p)", head, cstart, cend,
1535 bp);
1536 bcopy(bp->bio_data + cstart - bp->bio_offset,
1537 pbp->bio_data + cstart - pbp->bio_offset, cend - cstart);
1538 pbp->bio_completed += cend - cstart;
1539 if (pbp->bio_completed == pbp->bio_length) {
1540 /*
1541 * Cool, the whole request was in cache, deliver happy
1542 * message.
1543 */
1544 g_io_deliver(pbp, 0);
1545 return (pbp);
1546 }
1547 break;
1548 }
1549 return (bp);
1550}
1551
1552/*
1553 * This function is used for colecting data on read.
1554 * The complexity is because parts of the data can be stored in four different
1555 * places:
1556 * - in delayed requests
1557 * - in memory - the data not yet send to the active journal provider
1558 * - in requests which are going to be sent to the active journal
1559 * - in the active journal
1560 * - in the inactive journal
1561 * - in the data provider
1562 */
1563static void
1564g_journal_read(struct g_journal_softc *sc, struct bio *pbp, off_t ostart,
1565 off_t oend)
1566{
1567 struct bio *bp, *nbp, *head;
1568 off_t cstart, cend;
1569 u_int i, sorted = 0;
1570
1571 GJ_DEBUG(3, "READ: (%jd, %jd)", ostart, oend);
1572
1573 cstart = cend = -1;
1574 bp = NULL;
1575 head = NULL;
1576 for (i = 0; i <= 5; i++) {
1577 switch (i) {
1578 case 0: /* Delayed requests. */
1579 head = NULL;
1580 sorted = 0;
1581 break;
1582 case 1: /* Not-yet-send data. */
1583 head = sc->sc_current_queue;
1584 sorted = 1;
1585 break;
1586 case 2: /* In-flight to the active journal. */
1587 head = sc->sc_flush_queue;
1588 sorted = 0;
1589 break;
1590 case 3: /* Active journal. */
1591 head = sc->sc_active.jj_queue;
1592 sorted = 1;
1593 break;
1594 case 4: /* Inactive journal. */
1595 /*
1596 * XXX: Here could be a race with g_journal_lowmem().
1597 */
1598 head = sc->sc_inactive.jj_queue;
1599 sorted = 1;
1600 break;
1601 case 5: /* In-flight to the data provider. */
1602 head = sc->sc_copy_queue;
1603 sorted = 0;
1604 break;
1605 default:
1606 panic("gjournal %s: i=%d", __func__, i);
1607 }
1608 if (i == 0)
1609 bp = g_journal_read_queue_find(&sc->sc_delayed_queue.queue, pbp, ostart, oend);
1610 else
1611 bp = g_journal_read_find(head, sorted, pbp, ostart, oend);
1612 if (bp == pbp) { /* Got the whole request. */
1613 GJ_DEBUG(2, "Got the whole request from %u.", i);
1614 return;
1615 } else if (bp != NULL) {
1616 cstart = MAX(ostart, bp->bio_offset);
1617 cend = MIN(oend, bp->bio_offset + bp->bio_length);
1618 GJ_DEBUG(2, "Got part of the request from %u (%jd-%jd).",
1619 i, (intmax_t)cstart, (intmax_t)cend);
1620 break;
1621 }
1622 }
1623 if (bp != NULL) {
1624 if (bp->bio_data == NULL) {
1625 nbp = g_clone_bio(pbp);
1626 nbp->bio_cflags = GJ_BIO_READ;
1627 nbp->bio_data =
1628 pbp->bio_data + cstart - pbp->bio_offset;
1629 nbp->bio_offset =
1630 bp->bio_joffset + cstart - bp->bio_offset;
1631 nbp->bio_length = cend - cstart;
1632 nbp->bio_done = g_journal_read_done;
1633 g_io_request(nbp, sc->sc_jconsumer);
1634 }
1635 /*
1636 * If we don't have the whole request yet, call g_journal_read()
1637 * recursively.
1638 */
1639 if (ostart < cstart)
1640 g_journal_read(sc, pbp, ostart, cstart);
1641 if (oend > cend)
1642 g_journal_read(sc, pbp, cend, oend);
1643 } else {
1644 /*
1645 * No data in memory, no data in journal.
1646 * Its time for asking data provider.
1647 */
1648 GJ_DEBUG(3, "READ(data): (%jd, %jd)", ostart, oend);
1649 nbp = g_clone_bio(pbp);
1650 nbp->bio_cflags = GJ_BIO_READ;
1651 nbp->bio_data = pbp->bio_data + ostart - pbp->bio_offset;
1652 nbp->bio_offset = ostart;
1653 nbp->bio_length = oend - ostart;
1654 nbp->bio_done = g_journal_read_done;
1655 g_io_request(nbp, sc->sc_dconsumer);
1656 /* We have the whole request, return here. */
1657 return;
1658 }
1659}
1660
1661/*
1662 * Function responsible for handling finished READ requests.
1663 * Actually, g_std_done() could be used here, the only difference is that we
1664 * log error.
1665 */
1666static void
1667g_journal_read_done(struct bio *bp)
1668{
1669 struct bio *pbp;
1670
1671 KASSERT(bp->bio_cflags == GJ_BIO_READ,
1672 ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_READ));
1673
1674 pbp = bp->bio_parent;
1675 pbp->bio_inbed++;
1676 pbp->bio_completed += bp->bio_length;
1677
1678 if (bp->bio_error != 0) {
1679 if (pbp->bio_error == 0)
1680 pbp->bio_error = bp->bio_error;
1681 GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
1682 bp->bio_to->name, bp->bio_error);
1683 }
1684 g_destroy_bio(bp);
1685 if (pbp->bio_children == pbp->bio_inbed &&
1686 pbp->bio_completed == pbp->bio_length) {
1687 /* We're done. */
1688 g_io_deliver(pbp, 0);
1689 }
1690}
1691
1692/*
1693 * Deactive current journal and active next one.
1694 */
1695static void
1696g_journal_switch(struct g_journal_softc *sc)
1697{
1698 struct g_provider *pp;
1699
1700 if (JEMPTY(sc)) {
1701 GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
1702 pp = LIST_FIRST(&sc->sc_geom->provider);
1703 if (!(sc->sc_flags & GJF_DEVICE_CLEAN) && pp->acw == 0) {
1704 sc->sc_flags |= GJF_DEVICE_CLEAN;
1705 GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
1706 g_journal_metadata_update(sc);
1707 }
1708 } else {
1709 GJ_DEBUG(3, "Switching journal %s.", sc->sc_geom->name);
1710
1711 pp = sc->sc_jprovider;
1712
1713 sc->sc_journal_previous_id = sc->sc_journal_id;
1714
1715 sc->sc_journal_id = sc->sc_journal_next_id;
1716 sc->sc_journal_next_id = arc4random();
1717
1718 GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
1719
1720 g_journal_write_header(sc);
1721
1722 sc->sc_inactive.jj_offset = sc->sc_active.jj_offset;
1723 sc->sc_inactive.jj_queue = sc->sc_active.jj_queue;
1724
1725 sc->sc_active.jj_offset =
1726 sc->sc_journal_offset - pp->sectorsize;
1727 sc->sc_active.jj_queue = NULL;
1728
1729 /*
1730 * Switch is done, start copying data from the (now) inactive
1731 * journal to the data provider.
1732 */
1733 g_journal_copy_start(sc);
1734 }
1735 mtx_lock(&sc->sc_mtx);
1736 sc->sc_flags &= ~GJF_DEVICE_SWITCH;
1737 mtx_unlock(&sc->sc_mtx);
1738}
1739
1740static void
1741g_journal_initialize(struct g_journal_softc *sc)
1742{
1743
1744 sc->sc_journal_id = arc4random();
1745 sc->sc_journal_next_id = arc4random();
1746 sc->sc_journal_previous_id = sc->sc_journal_id;
1747 sc->sc_journal_offset = sc->sc_jstart;
1748 sc->sc_inactive.jj_offset = sc->sc_jstart;
1749 g_journal_write_header(sc);
1750 sc->sc_active.jj_offset = sc->sc_jstart;
1751}
1752
1753static void
1754g_journal_mark_as_dirty(struct g_journal_softc *sc)
1755{
1756 const struct g_journal_desc *desc;
1757 int i;
1758
1759 GJ_DEBUG(1, "Marking file system %s as dirty.", sc->sc_name);
1760 for (i = 0; (desc = g_journal_filesystems[i]) != NULL; i++)
1761 desc->jd_dirty(sc->sc_dconsumer);
1762}
1763
1764/*
1765 * Function read record header from the given journal.
1766 * It is very simlar to g_read_data(9), but it doesn't allocate memory for bio
1767 * and data on every call.
1768 */
1769static int
1770g_journal_sync_read(struct g_consumer *cp, struct bio *bp, off_t offset,
1771 void *data)
1772{
1773 int error;
1774
1775 bzero(bp, sizeof(*bp));
1776 bp->bio_cmd = BIO_READ;
1777 bp->bio_done = NULL;
1778 bp->bio_offset = offset;
1779 bp->bio_length = cp->provider->sectorsize;
1780 bp->bio_data = data;
1781 g_io_request(bp, cp);
1782 error = biowait(bp, "gjs_read");
1783 return (error);
1784}
1785
1786#if 0
1787/*
1788 * Function is called when we start the journal device and we detect that
1789 * one of the journals was not fully copied.
1790 * The purpose of this function is to read all records headers from journal
1791 * and placed them in the inactive queue, so we can start journal
1792 * synchronization process and the journal provider itself.
1793 * Design decision was taken to not synchronize the whole journal here as it
1794 * can take too much time. Reading headers only and delaying synchronization
1795 * process until after journal provider is started should be the best choice.
1796 */
1797#endif
1798
1799static void
1800g_journal_sync(struct g_journal_softc *sc)
1801{
1802 struct g_journal_record_header rhdr;
1803 struct g_journal_entry *ent;
1804 struct g_journal_header jhdr;
1805 struct g_consumer *cp;
1806 struct bio *bp, *fbp, *tbp;
1807 off_t joffset, offset;
1808 u_char *buf, sum[16];
1809 uint64_t id;
1810 MD5_CTX ctx;
1811 int error, found, i;
1812
1813 found = 0;
1814 fbp = NULL;
1815 cp = sc->sc_jconsumer;
1816 bp = g_alloc_bio();
1817 buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
1818 offset = joffset = sc->sc_inactive.jj_offset = sc->sc_journal_offset;
1819
1820 GJ_DEBUG(2, "Looking for termination at %jd.", (intmax_t)joffset);
1821
1822 /*
1823 * Read and decode first journal header.
1824 */
1825 error = g_journal_sync_read(cp, bp, offset, buf);
1826 if (error != 0) {
1827 GJ_DEBUG(0, "Error while reading journal header from %s.",
1828 cp->provider->name);
1829 goto end;
1830 }
1831 error = g_journal_header_decode(buf, &jhdr);
1832 if (error != 0) {
1833 GJ_DEBUG(0, "Cannot decode journal header from %s.",
1834 cp->provider->name);
1835 goto end;
1836 }
1837 id = sc->sc_journal_id;
1838 if (jhdr.jh_journal_id != sc->sc_journal_id) {
1839 GJ_DEBUG(1, "Journal ID mismatch at %jd (0x%08x != 0x%08x).",
1840 (intmax_t)offset, (u_int)jhdr.jh_journal_id, (u_int)id);
1841 goto end;
1842 }
1843 offset += cp->provider->sectorsize;
1844 id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
1845
1846 for (;;) {
1847 /*
1848 * If the biggest record won't fit, look for a record header or
1849 * journal header from the begining.
1850 */
1851 GJ_VALIDATE_OFFSET(offset, sc);
1852 error = g_journal_sync_read(cp, bp, offset, buf);
1853 if (error != 0) {
1854 /*
1855 * Not good. Having an error while reading header
1856 * means, that we cannot read next headers and in
1857 * consequence we cannot find termination.
1858 */
1859 GJ_DEBUG(0,
1860 "Error while reading record header from %s.",
1861 cp->provider->name);
1862 break;
1863 }
1864
1865 error = g_journal_record_header_decode(buf, &rhdr);
1866 if (error != 0) {
1867 GJ_DEBUG(2, "Not a record header at %jd (error=%d).",
1868 (intmax_t)offset, error);
1869 /*
1870 * This is not a record header.
1871 * If we are lucky, this is next journal header.
1872 */
1873 error = g_journal_header_decode(buf, &jhdr);
1874 if (error != 0) {
1875 GJ_DEBUG(1, "Not a journal header at %jd (error=%d).",
1876 (intmax_t)offset, error);
1877 /*
1878 * Nope, this is not journal header, which
1879 * bascially means that journal is not
1880 * terminated properly.
1881 */
1882 error = ENOENT;
1883 break;
1884 }
1885 /*
1886 * Ok. This is header of _some_ journal. Now we need to
1887 * verify if this is header of the _next_ journal.
1888 */
1889 if (jhdr.jh_journal_id != id) {
1890 GJ_DEBUG(1, "Journal ID mismatch at %jd "
1891 "(0x%08x != 0x%08x).", (intmax_t)offset,
1892 (u_int)jhdr.jh_journal_id, (u_int)id);
1893 error = ENOENT;
1894 break;
1895 }
1896
1897 /* Found termination. */
1898 found++;
1899 GJ_DEBUG(1, "Found termination at %jd (id=0x%08x).",
1900 (intmax_t)offset, (u_int)id);
1901 sc->sc_active.jj_offset = offset;
1902 sc->sc_journal_offset =
1903 offset + cp->provider->sectorsize;
1904 sc->sc_journal_id = id;
1905 id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
1906
1907 while ((tbp = fbp) != NULL) {
1908 fbp = tbp->bio_next;
1909 GJ_LOGREQ(3, tbp, "Adding request.");
1910 g_journal_insert_bio(&sc->sc_inactive.jj_queue,
1911 tbp, M_WAITOK);
1912 }
1913
1914 /* Skip journal's header. */
1915 offset += cp->provider->sectorsize;
1916 continue;
1917 }
1918
1919 /* Skip record's header. */
1920 offset += cp->provider->sectorsize;
1921
1922 /*
1923 * Add information about every record entry to the inactive
1924 * queue.
1925 */
1926 if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1927 MD5Init(&ctx);
1928 for (i = 0; i < rhdr.jrh_nentries; i++) {
1929 ent = &rhdr.jrh_entries[i];
1930 GJ_DEBUG(3, "Insert entry: %jd %jd.",
1931 (intmax_t)ent->je_offset, (intmax_t)ent->je_length);
1932 g_journal_insert(&fbp, ent->je_offset,
1933 ent->je_offset + ent->je_length, ent->je_joffset,
1934 NULL, M_WAITOK);
1935 if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1936 u_char *buf2;
1937
1938 /*
1939 * TODO: Should use faster function (like
1940 * g_journal_sync_read()).
1941 */
1942 buf2 = g_read_data(cp, offset, ent->je_length,
1943 NULL);
1944 if (buf2 == NULL)
1945 GJ_DEBUG(0, "Cannot read data at %jd.",
1946 (intmax_t)offset);
1947 else {
1948 MD5Update(&ctx, buf2, ent->je_length);
1949 g_free(buf2);
1950 }
1951 }
1952 /* Skip entry's data. */
1953 offset += ent->je_length;
1954 }
1955 if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1956 MD5Final(sum, &ctx);
1957 if (bcmp(sum, rhdr.jrh_sum, sizeof(rhdr.jrh_sum)) != 0) {
1958 GJ_DEBUG(0, "MD5 hash mismatch at %jd!",
1959 (intmax_t)offset);
1960 }
1961 }
1962 }
1963end:
1964 gj_free(bp->bio_data, cp->provider->sectorsize);
1965 g_destroy_bio(bp);
1966
1967 /* Remove bios from unterminated journal. */
1968 while ((tbp = fbp) != NULL) {
1969 fbp = tbp->bio_next;
1970 g_destroy_bio(tbp);
1971 }
1972
1973 if (found < 1 && joffset > 0) {
1974 GJ_DEBUG(0, "Journal on %s is broken/corrupted. Initializing.",
1975 sc->sc_name);
1976 while ((tbp = sc->sc_inactive.jj_queue) != NULL) {
1977 sc->sc_inactive.jj_queue = tbp->bio_next;
1978 g_destroy_bio(tbp);
1979 }
1980 g_journal_initialize(sc);
1981 g_journal_mark_as_dirty(sc);
1982 } else {
1983 GJ_DEBUG(0, "Journal %s consistent.", sc->sc_name);
1984 g_journal_copy_start(sc);
1985 }
1986}
1987
1988/*
1989 * Wait for requests.
1990 * If we have requests in the current queue, flush them after 3 seconds from the
1991 * last flush. In this way we don't wait forever (or for journal switch) with
1992 * storing not full records on journal.
1993 */
1994static void
1995g_journal_wait(struct g_journal_softc *sc, time_t last_write)
1996{
1997 int error, timeout;
1998
1999 GJ_DEBUG(3, "%s: enter", __func__);
2000 if (sc->sc_current_count == 0) {
2001 if (g_journal_debug < 2)
2002 msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", 0);
2003 else {
2004 /*
2005 * If we have debug turned on, show number of elements
2006 * in various queues.
2007 */
2008 for (;;) {
2009 error = msleep(sc, &sc->sc_mtx, PRIBIO,
2010 "gj:work", hz * 3);
2011 if (error == 0) {
2012 mtx_unlock(&sc->sc_mtx);
2013 break;
2014 }
2015 GJ_DEBUG(3, "Report: current count=%d",
2016 sc->sc_current_count);
2017 GJ_DEBUG(3, "Report: flush count=%d",
2018 sc->sc_flush_count);
2019 GJ_DEBUG(3, "Report: flush in progress=%d",
2020 sc->sc_flush_in_progress);
2021 GJ_DEBUG(3, "Report: copy in progress=%d",
2022 sc->sc_copy_in_progress);
2023 GJ_DEBUG(3, "Report: delayed=%d",
2024 sc->sc_delayed_count);
2025 }
2026 }
2027 GJ_DEBUG(3, "%s: exit 1", __func__);
2028 return;
2029 }
2030
2031 /*
2032 * Flush even not full records every 3 seconds.
2033 */
2034 timeout = (last_write + 3 - time_second) * hz;
2035 if (timeout <= 0) {
2036 mtx_unlock(&sc->sc_mtx);
2037 g_journal_flush(sc);
2038 g_journal_flush_send(sc);
2039 GJ_DEBUG(3, "%s: exit 2", __func__);
2040 return;
2041 }
2042 error = msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", timeout);
2043 if (error == EWOULDBLOCK)
2044 g_journal_flush_send(sc);
2045 GJ_DEBUG(3, "%s: exit 3", __func__);
2046}
2047
2048/*
2049 * Worker thread.
2050 */
2051static void
2052g_journal_worker(void *arg)
2053{
2054 struct g_journal_softc *sc;
2055 struct g_geom *gp;
2056 struct g_provider *pp;
2057 struct bio *bp;
2058 time_t last_write;
2059 int type;
2060
2061 mtx_lock_spin(&sched_lock);
2062 sched_prio(curthread, PRIBIO);
2063 mtx_unlock_spin(&sched_lock);
2064
2065 sc = arg;
2066
2067 if (sc->sc_flags & GJF_DEVICE_CLEAN) {
2068 GJ_DEBUG(0, "Journal %s clean.", sc->sc_name);
2069 g_journal_initialize(sc);
2070 } else {
2071 g_journal_sync(sc);
2072 }
2073 /*
2074 * Check if we can use BIO_FLUSH.
2075 */
2076 sc->sc_bio_flush = 0;
2077 if (g_io_flush(sc->sc_jconsumer) == 0) {
2078 sc->sc_bio_flush |= GJ_FLUSH_JOURNAL;
2079 GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
2080 sc->sc_jconsumer->provider->name);
2081 } else {
2082 GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
2083 sc->sc_jconsumer->provider->name);
2084 }
2085 if (sc->sc_jconsumer != sc->sc_dconsumer) {
2086 if (g_io_flush(sc->sc_dconsumer) == 0) {
2087 sc->sc_bio_flush |= GJ_FLUSH_DATA;
2088 GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
2089 sc->sc_dconsumer->provider->name);
2090 } else {
2091 GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
2092 sc->sc_dconsumer->provider->name);
2093 }
2094 }
2095
2096 gp = sc->sc_geom;
2097 g_topology_lock();
2098 pp = g_new_providerf(gp, "%s.journal", sc->sc_name);
2099 KASSERT(pp != NULL, ("Cannot create %s.journal.", sc->sc_name));
2100 pp->mediasize = sc->sc_mediasize;
2101 /*
2102 * There could be a problem when data provider and journal providers
2103 * have different sectorsize, but such scenario is prevented on journal
2104 * creation.
2105 */
2106 pp->sectorsize = sc->sc_sectorsize;
2107 g_error_provider(pp, 0);
2108 g_topology_unlock();
2109 last_write = time_second;
2110
2111 for (;;) {
2112 /* Get first request from the queue. */
2113 mtx_lock(&sc->sc_mtx);
2114 bp = bioq_first(&sc->sc_back_queue);
2115 if (bp != NULL)
2116 type = (bp->bio_cflags & GJ_BIO_MASK);
2117 if (bp == NULL) {
2118 bp = bioq_first(&sc->sc_regular_queue);
2119 if (bp != NULL)
2120 type = GJ_BIO_REGULAR;
2121 }
2122 if (bp == NULL) {
2123try_switch:
2124 if ((sc->sc_flags & GJF_DEVICE_SWITCH) ||
2125 (sc->sc_flags & GJF_DEVICE_DESTROY)) {
2126 if (sc->sc_current_count > 0) {
2127 mtx_unlock(&sc->sc_mtx);
2128 g_journal_flush(sc);
2129 g_journal_flush_send(sc);
2130 continue;
2131 }
2132 if (sc->sc_flush_in_progress > 0)
2133 goto sleep;
2134 if (sc->sc_copy_in_progress > 0)
2135 goto sleep;
2136 }
2137 if (sc->sc_flags & GJF_DEVICE_SWITCH) {
2138 mtx_unlock(&sc->sc_mtx);
2139 g_journal_switch(sc);
2140 wakeup(&sc->sc_journal_copying);
2141 continue;
2142 }
2143 if (sc->sc_flags & GJF_DEVICE_DESTROY) {
2144 GJ_DEBUG(1, "Shutting down worker "
2145 "thread for %s.", gp->name);
2146 sc->sc_worker = NULL;
2147 wakeup(&sc->sc_worker);
2148 mtx_unlock(&sc->sc_mtx);
2149 kthread_exit(0);
2150 }
2151sleep:
2152 g_journal_wait(sc, last_write);
2153 continue;
2154 }
2155 /*
2156 * If we're in switch process, we need to delay all new
2157 * write requests until its done.
2158 */
2159 if ((sc->sc_flags & GJF_DEVICE_SWITCH) &&
2160 type == GJ_BIO_REGULAR && bp->bio_cmd == BIO_WRITE) {
2161 GJ_LOGREQ(2, bp, "WRITE on SWITCH");
2162 goto try_switch;
2163 }
2164 if (type == GJ_BIO_REGULAR)
2165 bioq_remove(&sc->sc_regular_queue, bp);
2166 else
2167 bioq_remove(&sc->sc_back_queue, bp);
2168 mtx_unlock(&sc->sc_mtx);
2169 switch (type) {
2170 case GJ_BIO_REGULAR:
2171 /* Regular request. */
2172 switch (bp->bio_cmd) {
2173 case BIO_READ:
2174 g_journal_read(sc, bp, bp->bio_offset,
2175 bp->bio_offset + bp->bio_length);
2176 break;
2177 case BIO_WRITE:
2178 last_write = time_second;
2179 g_journal_add_request(sc, bp);
2180 g_journal_flush_send(sc);
2181 break;
2182 default:
2183 panic("Invalid bio_cmd (%d).", bp->bio_cmd);
2184 }
2185 break;
2186 case GJ_BIO_COPY:
2187 switch (bp->bio_cmd) {
2188 case BIO_READ:
2189 if (g_journal_copy_read_done(bp))
2190 g_journal_copy_send(sc);
2191 break;
2192 case BIO_WRITE:
2193 g_journal_copy_write_done(bp);
2194 g_journal_copy_send(sc);
2195 break;
2196 default:
2197 panic("Invalid bio_cmd (%d).", bp->bio_cmd);
2198 }
2199 break;
2200 case GJ_BIO_JOURNAL:
2201 g_journal_flush_done(bp);
2202 g_journal_flush_send(sc);
2203 break;
2204 case GJ_BIO_READ:
2205 default:
2206 panic("Invalid bio (%d).", type);
2207 }
2208 }
2209}
2210
2211static void
2212g_journal_destroy_event(void *arg, int flags __unused)
2213{
2214 struct g_journal_softc *sc;
2215
2216 g_topology_assert();
2217 sc = arg;
2218 g_journal_destroy(sc);
2219}
2220
2221static void
2222g_journal_timeout(void *arg)
2223{
2224 struct g_journal_softc *sc;
2225
2226 sc = arg;
2227 GJ_DEBUG(0, "Timeout. Journal %s cannot be completed.",
2228 sc->sc_geom->name);
2229 g_post_event(g_journal_destroy_event, sc, M_NOWAIT, NULL);
2230}
2231
2232static struct g_geom *
2233g_journal_create(struct g_class *mp, struct g_provider *pp,
2234 const struct g_journal_metadata *md)
2235{
2236 struct g_journal_softc *sc;
2237 struct g_geom *gp;
2238 struct g_consumer *cp;
2239 int error;
2240
2241 g_topology_assert();
2242 /*
2243 * There are two possibilities:
2244 * 1. Data and both journals are on the same provider.
2245 * 2. Data and journals are all on separated providers.
2246 */
2247 /* Look for journal device with the same ID. */
2248 LIST_FOREACH(gp, &mp->geom, geom) {
2249 sc = gp->softc;
2250 if (sc == NULL)
2251 continue;
2252 if (sc->sc_id == md->md_id)
2253 break;
2254 }
2255 if (gp == NULL)
2256 sc = NULL;
2257 else if (sc != NULL && (sc->sc_type & md->md_type) != 0) {
2258 GJ_DEBUG(1, "Journal device %u already configured.", sc->sc_id);
2259 return (NULL);
2260 }
2261 if (md->md_type == 0 || (md->md_type & ~GJ_TYPE_COMPLETE) != 0) {
2262 GJ_DEBUG(0, "Invalid type on %s.", pp->name);
2263 return (NULL);
2264 }
2265 if (md->md_type & GJ_TYPE_DATA) {
2266 GJ_DEBUG(0, "Journal %u: %s contains data.", md->md_id,
2267 pp->name);
2268 }
2269 if (md->md_type & GJ_TYPE_JOURNAL) {
2270 GJ_DEBUG(0, "Journal %u: %s contains journal.", md->md_id,
2271 pp->name);
2272 }
2273
2274 if (sc == NULL) {
2275 /* Action geom. */
2276 sc = malloc(sizeof(*sc), M_JOURNAL, M_WAITOK | M_ZERO);
2277 sc->sc_id = md->md_id;
2278 sc->sc_type = 0;
2279 sc->sc_flags = 0;
2280 sc->sc_worker = NULL;
2281
2282 gp = g_new_geomf(mp, "gjournal %u", sc->sc_id);
2283 gp->start = g_journal_start;
2284 gp->orphan = g_journal_orphan;
2285 gp->access = g_journal_access;
2286 gp->softc = sc;
2287 sc->sc_geom = gp;
2288
2289 mtx_init(&sc->sc_mtx, "gjournal", NULL, MTX_DEF);
2290
2291 bioq_init(&sc->sc_back_queue);
2292 bioq_init(&sc->sc_regular_queue);
2293 bioq_init(&sc->sc_delayed_queue);
2294 sc->sc_delayed_count = 0;
2295 sc->sc_current_queue = NULL;
2296 sc->sc_current_count = 0;
2297 sc->sc_flush_queue = NULL;
2298 sc->sc_flush_count = 0;
2299 sc->sc_flush_in_progress = 0;
2300 sc->sc_copy_queue = NULL;
2301 sc->sc_copy_in_progress = 0;
2302 sc->sc_inactive.jj_queue = NULL;
2303 sc->sc_active.jj_queue = NULL;
2304
2305 callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
2306 if (md->md_type != GJ_TYPE_COMPLETE) {
2307 /*
2308 * Journal and data are on separate providers.
2309 * At this point we have only one of them.
2310 * We setup a timeout in case the other part will not
2311 * appear, so we won't wait forever.
2312 */
2313 callout_reset(&sc->sc_callout, 5 * hz,
2314 g_journal_timeout, sc);
2315 }
2316 }
2317
2318 /* Remember type of the data provider. */
2319 if (md->md_type & GJ_TYPE_DATA)
2320 sc->sc_orig_type = md->md_type;
2321 sc->sc_type |= md->md_type;
2322 cp = NULL;
2323
2324 if (md->md_type & GJ_TYPE_DATA) {
2325 if (md->md_flags & GJ_FLAG_CLEAN)
2326 sc->sc_flags |= GJF_DEVICE_CLEAN;
2327 if (md->md_flags & GJ_FLAG_CHECKSUM)
2328 sc->sc_flags |= GJF_DEVICE_CHECKSUM;
2329 cp = g_new_consumer(gp);
2330 error = g_attach(cp, pp);
2331 KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
2332 pp->name, error));
2333 error = g_access(cp, 1, 1, 1);
2334 if (error != 0) {
2335 GJ_DEBUG(0, "Cannot access %s (error=%d).", pp->name,
2336 error);
2337 g_journal_destroy(sc);
2338 return (NULL);
2339 }
2340 sc->sc_dconsumer = cp;
2341 sc->sc_mediasize = pp->mediasize - pp->sectorsize;
2342 sc->sc_sectorsize = pp->sectorsize;
2343 sc->sc_jstart = md->md_jstart;
2344 sc->sc_jend = md->md_jend;
2345 if (md->md_provider[0] != '\0')
2346 sc->sc_flags |= GJF_DEVICE_HARDCODED;
2347 sc->sc_journal_offset = md->md_joffset;
2348 sc->sc_journal_id = md->md_jid;
2349 sc->sc_journal_previous_id = md->md_jid;
2350 }
2351 if (md->md_type & GJ_TYPE_JOURNAL) {
2352 if (cp == NULL) {
2353 cp = g_new_consumer(gp);
2354 error = g_attach(cp, pp);
2355 KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
2356 pp->name, error));
2357 error = g_access(cp, 1, 1, 1);
2358 if (error != 0) {
2359 GJ_DEBUG(0, "Cannot access %s (error=%d).",
2360 pp->name, error);
2361 g_journal_destroy(sc);
2362 return (NULL);
2363 }
2364 } else {
2365 /*
2366 * Journal is on the same provider as data, which means
2367 * that data provider ends where journal starts.
2368 */
2369 sc->sc_mediasize = md->md_jstart;
2370 }
2371 sc->sc_jconsumer = cp;
2372 }
2373
2374 if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE) {
2375 /* Journal is not complete yet. */
2376 return (gp);
2377 } else {
2378 /* Journal complete, cancel timeout. */
2379 callout_drain(&sc->sc_callout);
2380 }
2381
2382 error = kthread_create(g_journal_worker, sc, &sc->sc_worker, 0, 0,
2383 "g_journal %s", sc->sc_name);
2384 if (error != 0) {
2385 GJ_DEBUG(0, "Cannot create worker thread for %s.journal.",
2386 sc->sc_name);
2387 g_journal_destroy(sc);
2388 return (NULL);
2389 }
2390
2391 return (gp);
2392}
2393
2394static void
2395g_journal_destroy_consumer(void *arg, int flags __unused)
2396{
2397 struct g_consumer *cp;
2398
2399 g_topology_assert();
2400 cp = arg;
2401 g_detach(cp);
2402 g_destroy_consumer(cp);
2403}
2404
2405static int
2406g_journal_destroy(struct g_journal_softc *sc)
2407{
2408 struct g_geom *gp;
2409 struct g_provider *pp;
2410 struct g_consumer *cp;
2411
2412 g_topology_assert();
2413
2414 if (sc == NULL)
2415 return (ENXIO);
2416
2417 gp = sc->sc_geom;
2418 pp = LIST_FIRST(&gp->provider);
2419 if (pp != NULL) {
2420 if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) {
2421 GJ_DEBUG(1, "Device %s is still open (r%dw%de%d).",
2422 pp->name, pp->acr, pp->acw, pp->ace);
2423 return (EBUSY);
2424 }
2425 g_error_provider(pp, ENXIO);
2426
2427 g_journal_flush(sc);
2428 g_journal_flush_send(sc);
2429 g_journal_switch(sc);
2430 }
2431
2432 sc->sc_flags |= (GJF_DEVICE_DESTROY | GJF_DEVICE_CLEAN);
2433
2434 g_topology_unlock();
2435 callout_drain(&sc->sc_callout);
2436 mtx_lock(&sc->sc_mtx);
2437 wakeup(sc);
2438 while (sc->sc_worker != NULL)
2439 msleep(&sc->sc_worker, &sc->sc_mtx, PRIBIO, "gj:destroy", 0);
2440 mtx_unlock(&sc->sc_mtx);
2441
2442 if (pp != NULL) {
2443 GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
2444 g_journal_metadata_update(sc);
2445 g_topology_lock();
2446 pp->flags |= G_PF_WITHER;
2447 g_orphan_provider(pp, ENXIO);
2448 } else {
2449 g_topology_lock();
2450 }
2451 mtx_destroy(&sc->sc_mtx);
2452
2453 if (sc->sc_current_count != 0) {
2454 GJ_DEBUG(0, "Warning! Number of current requests %d.",
2455 sc->sc_current_count);
2456 }
2457
2458 LIST_FOREACH(cp, &gp->consumer, consumer) {
2459 if (cp->acr + cp->acw + cp->ace > 0)
2460 g_access(cp, -1, -1, -1);
2461 /*
2462 * We keep all consumers open for writting, so if I'll detach
2463 * and destroy consumer here, I'll get providers for taste, so
2464 * journal will be started again.
2465 * Sending an event here, prevents this from happening.
2466 */
2467 g_post_event(g_journal_destroy_consumer, cp, M_WAITOK, NULL);
2468 }
2469 gp->softc = NULL;
2470 g_wither_geom(gp, ENXIO);
2471 free(sc, M_JOURNAL);
2472 return (0);
2473}
2474
2475static void
2476g_journal_taste_orphan(struct g_consumer *cp)
2477{
2478
2479 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
2480 cp->provider->name));
2481}
2482
2483static struct g_geom *
2484g_journal_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
2485{
2486 struct g_journal_metadata md;
2487 struct g_consumer *cp;
2488 struct g_geom *gp;
2489 int error;
2490
2491 g_topology_assert();
2492 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2493 GJ_DEBUG(2, "Tasting %s.", pp->name);
2494 if (pp->geom->class == mp)
2495 return (NULL);
2496
2497 gp = g_new_geomf(mp, "journal:taste");
2498 /* This orphan function should be never called. */
2499 gp->orphan = g_journal_taste_orphan;
2500 cp = g_new_consumer(gp);
2501 g_attach(cp, pp);
2502 error = g_journal_metadata_read(cp, &md);
2503 g_detach(cp);
2504 g_destroy_consumer(cp);
2505 g_destroy_geom(gp);
2506 if (error != 0)
2507 return (NULL);
2508 gp = NULL;
2509
2510 if (md.md_provider[0] != '\0' && strcmp(md.md_provider, pp->name) != 0)
2511 return (NULL);
2512 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
2513 return (NULL);
2514 if (g_journal_debug >= 2)
2515 journal_metadata_dump(&md);
2516
2517 gp = g_journal_create(mp, pp, &md);
2518 return (gp);
2519}
2520
2521static struct g_journal_softc *
2522g_journal_find_device(struct g_class *mp, const char *name)
2523{
2524 struct g_journal_softc *sc;
2525 struct g_geom *gp;
2526 struct g_provider *pp;
2527
2528 if (strncmp(name, "/dev/", 5) == 0)
2529 name += 5;
2530 LIST_FOREACH(gp, &mp->geom, geom) {
2531 sc = gp->softc;
2532 if (sc == NULL)
2533 continue;
2534 if (sc->sc_flags & GJF_DEVICE_DESTROY)
2535 continue;
2536 if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
2537 continue;
2538 pp = LIST_FIRST(&gp->provider);
2539 if (strcmp(sc->sc_name, name) == 0)
2540 return (sc);
2541 if (pp != NULL && strcmp(pp->name, name) == 0)
2542 return (sc);
2543 }
2544 return (NULL);
2545}
2546
2547static void
2548g_journal_ctl_destroy(struct gctl_req *req, struct g_class *mp)
2549{
2550 struct g_journal_softc *sc;
2551 const char *name;
2552 char param[16];
2553 int *nargs;
2554 int error, i;
2555
2556 g_topology_assert();
2557
2558 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
2559 if (nargs == NULL) {
2560 gctl_error(req, "No '%s' argument.", "nargs");
2561 return;
2562 }
2563 if (*nargs <= 0) {
2564 gctl_error(req, "Missing device(s).");
2565 return;
2566 }
2567
2568 for (i = 0; i < *nargs; i++) {
2569 snprintf(param, sizeof(param), "arg%d", i);
2570 name = gctl_get_asciiparam(req, param);
2571 if (name == NULL) {
2572 gctl_error(req, "No 'arg%d' argument.", i);
2573 return;
2574 }
2575 sc = g_journal_find_device(mp, name);
2576 if (sc == NULL) {
2577 gctl_error(req, "No such device: %s.", name);
2578 return;
2579 }
2580 error = g_journal_destroy(sc);
2581 if (error != 0) {
2582 gctl_error(req, "Cannot destroy device %s (error=%d).",
2583 LIST_FIRST(&sc->sc_geom->provider)->name, error);
2584 return;
2585 }
2586 }
2587}
2588
2589static void
2590g_journal_ctl_sync(struct gctl_req *req __unused, struct g_class *mp __unused)
2591{
2592
2593 g_topology_assert();
2594 g_topology_unlock();
2595 g_journal_sync_requested++;
2596 wakeup(&g_journal_switcher_state);
2597 while (g_journal_sync_requested > 0)
2598 tsleep(&g_journal_sync_requested, PRIBIO, "j:sreq", hz / 2);
2599 g_topology_lock();
2600}
2601
2602static void
2603g_journal_config(struct gctl_req *req, struct g_class *mp, const char *verb)
2604{
2605 uint32_t *version;
2606
2607 g_topology_assert();
2608
2609 version = gctl_get_paraml(req, "version", sizeof(*version));
2610 if (version == NULL) {
2611 gctl_error(req, "No '%s' argument.", "version");
2612 return;
2613 }
2614 if (*version != G_JOURNAL_VERSION) {
2615 gctl_error(req, "Userland and kernel parts are out of sync.");
2616 return;
2617 }
2618
2619 if (strcmp(verb, "destroy") == 0 || strcmp(verb, "stop") == 0) {
2620 g_journal_ctl_destroy(req, mp);
2621 return;
2622 } else if (strcmp(verb, "sync") == 0) {
2623 g_journal_ctl_sync(req, mp);
2624 return;
2625 }
2626
2627 gctl_error(req, "Unknown verb.");
2628}
2629
2630static void
2631g_journal_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2632 struct g_consumer *cp, struct g_provider *pp)
2633{
2634 struct g_journal_softc *sc;
2635
2636 g_topology_assert();
2637
2638 sc = gp->softc;
2639 if (sc == NULL)
2640 return;
2641 if (pp != NULL) {
2642 /* Nothing here. */
2643 } else if (cp != NULL) {
2644 int first = 1;
2645
2646 sbuf_printf(sb, "%s<Role>", indent);
2647 if (cp == sc->sc_dconsumer) {
2648 sbuf_printf(sb, "Data");
2649 first = 0;
2650 }
2651 if (cp == sc->sc_jconsumer) {
2652 if (!first)
2653 sbuf_printf(sb, ",");
2654 sbuf_printf(sb, "Journal");
2655 }
2656 sbuf_printf(sb, "</Role>\n");
2657 if (cp == sc->sc_jconsumer) {
2658 sbuf_printf(sb, "<Jstart>%jd</Jstart>",
2659 (intmax_t)sc->sc_jstart);
2660 sbuf_printf(sb, "<Jend>%jd</Jend>",
2661 (intmax_t)sc->sc_jend);
2662 }
2663 } else {
2664 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
2665 }
2666}
2667
2668static eventhandler_tag g_journal_event_shutdown = NULL;
2669static eventhandler_tag g_journal_event_lowmem = NULL;
2670
2671static void
2672g_journal_shutdown(void *arg, int howto __unused)
2673{
2674 struct g_class *mp;
2675 struct g_geom *gp, *gp2;
2676
2677 if (panicstr != NULL)
2678 return;
2679 mp = arg;
2680 DROP_GIANT();
2681 g_topology_lock();
2682 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
2683 if (gp->softc == NULL)
2684 continue;
2685 GJ_DEBUG(0, "Shutting down geom %s.", gp->name);
2686 g_journal_destroy(gp->softc);
2687 }
2688 g_topology_unlock();
2689 PICKUP_GIANT();
2690}
2691
2692/*
2693 * Free cached requests from inactive queue in case of low memory.
2694 * We free GJ_FREE_AT_ONCE elements at once.
2695 */
2696#define GJ_FREE_AT_ONCE 4
2697static void
2698g_journal_lowmem(void *arg, int howto __unused)
2699{
2700 struct g_journal_softc *sc;
2701 struct g_class *mp;
2702 struct g_geom *gp;
2703 struct bio *bp;
2704 u_int nfree = GJ_FREE_AT_ONCE;
2705
2706 g_journal_stats_low_mem++;
2707 mp = arg;
2708 DROP_GIANT();
2709 g_topology_lock();
2710 LIST_FOREACH(gp, &mp->geom, geom) {
2711 sc = gp->softc;
2712 if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY))
2713 continue;
2714 mtx_lock(&sc->sc_mtx);
2715 for (bp = sc->sc_inactive.jj_queue; nfree > 0 && bp != NULL;
2716 nfree--, bp = bp->bio_next) {
2717 /*
2718 * This is safe to free the bio_data, because:
2719 * 1. If bio_data is NULL it will be read from the
2720 * inactive journal.
2721 * 2. If bp is sent down, it is first removed from the
2722 * inactive queue, so it's impossible to free the
2723 * data from under in-flight bio.
2724 * On the other hand, freeing elements from the active
2725 * queue, is not safe.
2726 */
2727 if (bp->bio_data != NULL) {
2728 GJ_DEBUG(2, "Freeing data from %s.",
2729 sc->sc_name);
2730 gj_free(bp->bio_data, bp->bio_length);
2731 bp->bio_data = NULL;
2732 }
2733 }
2734 mtx_unlock(&sc->sc_mtx);
2735 if (nfree == 0)
2736 break;
2737 }
2738 g_topology_unlock();
2739 PICKUP_GIANT();
2740}
2741
2742static void g_journal_switcher(void *arg);
2743
2744static void
2745g_journal_init(struct g_class *mp)
2746{
2747 int error;
2748
2749 /* Pick a conservative value if provided value sucks. */
2750 if (g_journal_cache_divisor <= 0 ||
2751 (vm_kmem_size / g_journal_cache_divisor == 0)) {
2752 g_journal_cache_divisor = 5;
2753 }
2754 if (g_journal_cache_limit > 0) {
2755 g_journal_cache_limit = vm_kmem_size / g_journal_cache_divisor;
2756 g_journal_cache_low =
2757 (g_journal_cache_limit / 100) * g_journal_cache_switch;
2758 }
2759 g_journal_event_shutdown = EVENTHANDLER_REGISTER(shutdown_post_sync,
2760 g_journal_shutdown, mp, EVENTHANDLER_PRI_FIRST);
2761 if (g_journal_event_shutdown == NULL)
2762 GJ_DEBUG(0, "Warning! Cannot register shutdown event.");
2763 g_journal_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
2764 g_journal_lowmem, mp, EVENTHANDLER_PRI_FIRST);
2765 if (g_journal_event_lowmem == NULL)
2766 GJ_DEBUG(0, "Warning! Cannot register lowmem event.");
2767 error = kthread_create(g_journal_switcher, mp, NULL, 0, 0,
2768 "g_journal switcher");
2769 KASSERT(error == 0, ("Cannot create switcher thread."));
2770}
2771
2772static void
2773g_journal_fini(struct g_class *mp)
2774{
2775
2776 if (g_journal_event_shutdown != NULL) {
2777 EVENTHANDLER_DEREGISTER(shutdown_post_sync,
2778 g_journal_event_shutdown);
2779 }
2780 if (g_journal_event_lowmem != NULL)
2781 EVENTHANDLER_DEREGISTER(vm_lowmem, g_journal_event_lowmem);
2782 g_journal_switcher_state = GJ_SWITCHER_DIE;
2783 wakeup(&g_journal_switcher_state);
2784 while (g_journal_switcher_state != GJ_SWITCHER_DIED)
2785 tsleep(&g_journal_switcher_state, PRIBIO, "jfini:wait", hz / 5);
2786 GJ_DEBUG(1, "Switcher died.");
2787}
2788
2789DECLARE_GEOM_CLASS(g_journal_class, g_journal);
2790
2791static const struct g_journal_desc *
2792g_journal_find_desc(const char *fstype)
2793{
2794 const struct g_journal_desc *desc;
2795 int i;
2796
2797 for (desc = g_journal_filesystems[i = 0]; desc != NULL;
2798 desc = g_journal_filesystems[++i]) {
2799 if (strcmp(desc->jd_fstype, fstype) == 0)
2800 break;
2801 }
2802 return (desc);
2803}
2804
2805static void
2806g_journal_switch_wait(struct g_journal_softc *sc)
2807{
2808 struct bintime bt;
2809
2810 mtx_assert(&sc->sc_mtx, MA_OWNED);
2811 if (g_journal_debug >= 2) {
2812 if (sc->sc_flush_in_progress > 0) {
2813 GJ_DEBUG(2, "%d requests flushing.",
2814 sc->sc_flush_in_progress);
2815 }
2816 if (sc->sc_copy_in_progress > 0) {
2817 GJ_DEBUG(2, "%d requests copying.",
2818 sc->sc_copy_in_progress);
2819 }
2820 if (sc->sc_flush_count > 0) {
2821 GJ_DEBUG(2, "%d requests to flush.",
2822 sc->sc_flush_count);
2823 }
2824 if (sc->sc_delayed_count > 0) {
2825 GJ_DEBUG(2, "%d requests delayed.",
2826 sc->sc_delayed_count);
2827 }
2828 }
2829 g_journal_stats_switches++;
2830 if (sc->sc_copy_in_progress > 0)
2831 g_journal_stats_wait_for_copy++;
2832 GJ_TIMER_START(1, &bt);
2833 sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
2834 sc->sc_flags |= GJF_DEVICE_SWITCH;
2835 wakeup(sc);
2836 while (sc->sc_flags & GJF_DEVICE_SWITCH) {
2837 msleep(&sc->sc_journal_copying, &sc->sc_mtx, PRIBIO,
2838 "gj:switch", 0);
2839 }
2840 GJ_TIMER_STOP(1, &bt, "Switch time of %s", sc->sc_name);
2841}
2842
2843static void
2844g_journal_do_switch(struct g_class *classp, struct thread *td)
2845{
2846 struct g_journal_softc *sc;
2847 const struct g_journal_desc *desc;
2848 struct g_geom *gp;
2849 struct mount *mp;
2850 struct bintime bt;
2851 char *mountpoint;
2852 int error, vfslocked;
2853
2854 DROP_GIANT();
2855 g_topology_lock();
2856 LIST_FOREACH(gp, &classp->geom, geom) {
2857 sc = gp->softc;
2858 if (sc == NULL)
2859 continue;
2860 if (sc->sc_flags & GJF_DEVICE_DESTROY)
2861 continue;
2862 if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
2863 continue;
2864 mtx_lock(&sc->sc_mtx);
2865 sc->sc_flags |= GJF_DEVICE_BEFORE_SWITCH;
2866 mtx_unlock(&sc->sc_mtx);
2867 }
2868 g_topology_unlock();
2869 PICKUP_GIANT();
2870
2871 mtx_lock(&mountlist_mtx);
2872 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2873 if (mp->mnt_gjprovider == NULL)
2874 continue;
2875 if (mp->mnt_flag & MNT_RDONLY)
2876 continue;
2877 desc = g_journal_find_desc(mp->mnt_stat.f_fstypename);
2878 if (desc == NULL)
2879 continue;
2880 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
2881 continue;
2882 /* mtx_unlock(&mountlist_mtx) was done inside vfs_busy() */
2883
2884 DROP_GIANT();
2885 g_topology_lock();
2886 sc = g_journal_find_device(classp, mp->mnt_gjprovider);
2887 g_topology_unlock();
2888 PICKUP_GIANT();
2889
2890 if (sc == NULL) {
2891 GJ_DEBUG(0, "Cannot find journal geom for %s.",
2892 mp->mnt_gjprovider);
2893 goto next;
2894 } else if (JEMPTY(sc)) {
2895 mtx_lock(&sc->sc_mtx);
2896 sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
2897 mtx_unlock(&sc->sc_mtx);
2898 GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
2899 goto next;
2900 }
2901
2902 mountpoint = mp->mnt_stat.f_mntonname;
2903
2904 vfslocked = VFS_LOCK_GIANT(mp);
2905
2906 error = vn_start_write(NULL, &mp, V_WAIT);
2907 if (error != 0) {
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/module.h>
34#include <sys/limits.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/bio.h>
38#include <sys/sysctl.h>
39#include <sys/malloc.h>
40#include <sys/mount.h>
41#include <sys/eventhandler.h>
42#include <sys/proc.h>
43#include <sys/kthread.h>
44#include <sys/sched.h>
45#include <sys/taskqueue.h>
46#include <sys/vnode.h>
47#include <sys/sbuf.h>
48#ifdef GJ_MEMDEBUG
49#include <sys/stack.h>
50#include <sys/kdb.h>
51#endif
52#include <vm/vm.h>
53#include <vm/vm_kern.h>
54#include <geom/geom.h>
55
56#include <geom/journal/g_journal.h>
57
58
59/*
60 * On-disk journal format:
61 *
62 * JH - Journal header
63 * RH - Record header
64 *
65 * %%%%%% ****** +------+ +------+ ****** +------+ %%%%%%
66 * % JH % * RH * | Data | | Data | ... * RH * | Data | ... % JH % ...
67 * %%%%%% ****** +------+ +------+ ****** +------+ %%%%%%
68 *
69 */
70
71CTASSERT(sizeof(struct g_journal_header) <= 512);
72CTASSERT(sizeof(struct g_journal_record_header) <= 512);
73
74static MALLOC_DEFINE(M_JOURNAL, "journal_data", "GEOM_JOURNAL Data");
75static struct mtx g_journal_cache_mtx;
76MTX_SYSINIT(g_journal_cache, &g_journal_cache_mtx, "cache usage", MTX_DEF);
77
78const struct g_journal_desc *g_journal_filesystems[] = {
79 &g_journal_ufs,
80 NULL
81};
82
83SYSCTL_DECL(_kern_geom);
84
85int g_journal_debug = 0;
86TUNABLE_INT("kern.geom.journal.debug", &g_journal_debug);
87static u_int g_journal_switch_time = 10;
88static u_int g_journal_force_switch = 70;
89static u_int g_journal_parallel_flushes = 16;
90static u_int g_journal_parallel_copies = 16;
91static u_int g_journal_accept_immediately = 64;
92static u_int g_journal_record_entries = GJ_RECORD_HEADER_NENTRIES;
93static u_int g_journal_do_optimize = 1;
94
95SYSCTL_NODE(_kern_geom, OID_AUTO, journal, CTLFLAG_RW, 0, "GEOM_JOURNAL stuff");
96SYSCTL_INT(_kern_geom_journal, OID_AUTO, debug, CTLFLAG_RW, &g_journal_debug, 0,
97 "Debug level");
98SYSCTL_UINT(_kern_geom_journal, OID_AUTO, switch_time, CTLFLAG_RW,
99 &g_journal_switch_time, 0, "Switch journals every N seconds");
100SYSCTL_UINT(_kern_geom_journal, OID_AUTO, force_switch, CTLFLAG_RW,
101 &g_journal_force_switch, 0, "Force switch when journal is N%% full");
102SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_flushes, CTLFLAG_RW,
103 &g_journal_parallel_flushes, 0,
104 "Number of flush I/O requests send in parallel");
105SYSCTL_UINT(_kern_geom_journal, OID_AUTO, accept_immediately, CTLFLAG_RW,
106 &g_journal_accept_immediately, 0,
107 "Number of I/O requests accepted immediatelly");
108SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_copies, CTLFLAG_RW,
109 &g_journal_parallel_copies, 0,
110 "Number of copy I/O requests send in parallel");
111static int
112g_journal_record_entries_sysctl(SYSCTL_HANDLER_ARGS)
113{
114 u_int entries;
115 int error;
116
117 entries = g_journal_record_entries;
118 error = sysctl_handle_int(oidp, &entries, sizeof(entries), req);
119 if (error != 0 || req->newptr == NULL)
120 return (error);
121 if (entries < 1 || entries > GJ_RECORD_HEADER_NENTRIES)
122 return (EINVAL);
123 g_journal_record_entries = entries;
124 return (0);
125}
126SYSCTL_PROC(_kern_geom_journal, OID_AUTO, record_entries,
127 CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_record_entries_sysctl, "I",
128 "Maximum number of entires in one journal record");
129SYSCTL_UINT(_kern_geom_journal, OID_AUTO, optimize, CTLFLAG_RW,
130 &g_journal_do_optimize, 0, "Try to combine bios on flush and copy");
131
132static u_int g_journal_cache_used = 0;
133static u_int g_journal_cache_limit = 64 * 1024 * 1024;
134TUNABLE_INT("kern.geom.journal.cache.limit", &g_journal_cache_limit);
135static u_int g_journal_cache_divisor = 2;
136TUNABLE_INT("kern.geom.journal.cache.divisor", &g_journal_cache_divisor);
137static u_int g_journal_cache_switch = 90;
138static u_int g_journal_cache_misses = 0;
139static u_int g_journal_cache_alloc_failures = 0;
140static u_int g_journal_cache_low = 0;
141
142SYSCTL_NODE(_kern_geom_journal, OID_AUTO, cache, CTLFLAG_RW, 0,
143 "GEOM_JOURNAL cache");
144SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, used, CTLFLAG_RD,
145 &g_journal_cache_used, 0, "Number of allocated bytes");
146static int
147g_journal_cache_limit_sysctl(SYSCTL_HANDLER_ARGS)
148{
149 u_int limit;
150 int error;
151
152 limit = g_journal_cache_limit;
153 error = sysctl_handle_int(oidp, &limit, sizeof(limit), req);
154 if (error != 0 || req->newptr == NULL)
155 return (error);
156 g_journal_cache_limit = limit;
157 g_journal_cache_low = (limit / 100) * g_journal_cache_switch;
158 return (0);
159}
160SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, limit,
161 CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_cache_limit_sysctl, "I",
162 "Maximum number of allocated bytes");
163SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, divisor, CTLFLAG_RDTUN,
164 &g_journal_cache_divisor, 0,
165 "(kmem_size / kern.geom.journal.cache.divisor) == cache size");
166static int
167g_journal_cache_switch_sysctl(SYSCTL_HANDLER_ARGS)
168{
169 u_int cswitch;
170 int error;
171
172 cswitch = g_journal_cache_switch;
173 error = sysctl_handle_int(oidp, &cswitch, sizeof(cswitch), req);
174 if (error != 0 || req->newptr == NULL)
175 return (error);
176 if (cswitch < 0 || cswitch > 100)
177 return (EINVAL);
178 g_journal_cache_switch = cswitch;
179 g_journal_cache_low = (g_journal_cache_limit / 100) * cswitch;
180 return (0);
181}
182SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, switch,
183 CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_cache_switch_sysctl, "I",
184 "Force switch when we hit this percent of cache use");
185SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, misses, CTLFLAG_RW,
186 &g_journal_cache_misses, 0, "Number of cache misses");
187SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, alloc_failures, CTLFLAG_RW,
188 &g_journal_cache_alloc_failures, 0, "Memory allocation failures");
189
190static u_long g_journal_stats_bytes_skipped = 0;
191static u_long g_journal_stats_combined_ios = 0;
192static u_long g_journal_stats_switches = 0;
193static u_long g_journal_stats_wait_for_copy = 0;
194static u_long g_journal_stats_journal_full = 0;
195static u_long g_journal_stats_low_mem = 0;
196
197SYSCTL_NODE(_kern_geom_journal, OID_AUTO, stats, CTLFLAG_RW, 0,
198 "GEOM_JOURNAL statistics");
199SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, skipped_bytes, CTLFLAG_RW,
200 &g_journal_stats_bytes_skipped, 0, "Number of skipped bytes");
201SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, combined_ios, CTLFLAG_RW,
202 &g_journal_stats_combined_ios, 0, "Number of combined I/O requests");
203SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, switches, CTLFLAG_RW,
204 &g_journal_stats_switches, 0, "Number of journal switches");
205SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, wait_for_copy, CTLFLAG_RW,
206 &g_journal_stats_wait_for_copy, 0, "Wait for journal copy on switch");
207SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, journal_full, CTLFLAG_RW,
208 &g_journal_stats_journal_full, 0,
209 "Number of times journal was almost full.");
210SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, low_mem, CTLFLAG_RW,
211 &g_journal_stats_low_mem, 0, "Number of times low_mem hook was called.");
212
213static g_taste_t g_journal_taste;
214static g_ctl_req_t g_journal_config;
215static g_dumpconf_t g_journal_dumpconf;
216static g_init_t g_journal_init;
217static g_fini_t g_journal_fini;
218
219struct g_class g_journal_class = {
220 .name = G_JOURNAL_CLASS_NAME,
221 .version = G_VERSION,
222 .taste = g_journal_taste,
223 .ctlreq = g_journal_config,
224 .dumpconf = g_journal_dumpconf,
225 .init = g_journal_init,
226 .fini = g_journal_fini
227};
228
229static int g_journal_destroy(struct g_journal_softc *sc);
230static void g_journal_metadata_update(struct g_journal_softc *sc);
231static void g_journal_switch_wait(struct g_journal_softc *sc);
232
233#define GJ_SWITCHER_WORKING 0
234#define GJ_SWITCHER_DIE 1
235#define GJ_SWITCHER_DIED 2
236static int g_journal_switcher_state = GJ_SWITCHER_WORKING;
237static int g_journal_switcher_wokenup = 0;
238static int g_journal_sync_requested = 0;
239
240#ifdef GJ_MEMDEBUG
241struct meminfo {
242 size_t mi_size;
243 struct stack mi_stack;
244};
245#endif
246
247/*
248 * We use our own malloc/realloc/free funtions, so we can collect statistics
249 * and force journal switch when we're running out of cache.
250 */
251static void *
252gj_malloc(size_t size, int flags)
253{
254 void *p;
255#ifdef GJ_MEMDEBUG
256 struct meminfo *mi;
257#endif
258
259 mtx_lock(&g_journal_cache_mtx);
260 if (g_journal_cache_limit > 0 && !g_journal_switcher_wokenup &&
261 g_journal_cache_used + size > g_journal_cache_low) {
262 GJ_DEBUG(1, "No cache, waking up the switcher.");
263 g_journal_switcher_wokenup = 1;
264 wakeup(&g_journal_switcher_state);
265 }
266 if ((flags & M_NOWAIT) && g_journal_cache_limit > 0 &&
267 g_journal_cache_used + size > g_journal_cache_limit) {
268 mtx_unlock(&g_journal_cache_mtx);
269 g_journal_cache_alloc_failures++;
270 return (NULL);
271 }
272 g_journal_cache_used += size;
273 mtx_unlock(&g_journal_cache_mtx);
274 flags &= ~M_NOWAIT;
275#ifndef GJ_MEMDEBUG
276 p = malloc(size, M_JOURNAL, flags | M_WAITOK);
277#else
278 mi = malloc(sizeof(*mi) + size, M_JOURNAL, flags | M_WAITOK);
279 p = (u_char *)mi + sizeof(*mi);
280 mi->mi_size = size;
281 stack_save(&mi->mi_stack);
282#endif
283 return (p);
284}
285
286static void
287gj_free(void *p, size_t size)
288{
289#ifdef GJ_MEMDEBUG
290 struct meminfo *mi;
291#endif
292
293 KASSERT(p != NULL, ("p=NULL"));
294 KASSERT(size > 0, ("size=0"));
295 mtx_lock(&g_journal_cache_mtx);
296 KASSERT(g_journal_cache_used >= size, ("Freeing too much?"));
297 g_journal_cache_used -= size;
298 mtx_unlock(&g_journal_cache_mtx);
299#ifdef GJ_MEMDEBUG
300 mi = p = (void *)((u_char *)p - sizeof(*mi));
301 if (mi->mi_size != size) {
302 printf("GJOURNAL: Size mismatch! %zu != %zu\n", size,
303 mi->mi_size);
304 printf("GJOURNAL: Alloc backtrace:\n");
305 stack_print(&mi->mi_stack);
306 printf("GJOURNAL: Free backtrace:\n");
307 kdb_backtrace();
308 }
309#endif
310 free(p, M_JOURNAL);
311}
312
313static void *
314gj_realloc(void *p, size_t size, size_t oldsize)
315{
316 void *np;
317
318#ifndef GJ_MEMDEBUG
319 mtx_lock(&g_journal_cache_mtx);
320 g_journal_cache_used -= oldsize;
321 g_journal_cache_used += size;
322 mtx_unlock(&g_journal_cache_mtx);
323 np = realloc(p, size, M_JOURNAL, M_WAITOK);
324#else
325 np = gj_malloc(size, M_WAITOK);
326 bcopy(p, np, MIN(oldsize, size));
327 gj_free(p, oldsize);
328#endif
329 return (np);
330}
331
332static void
333g_journal_check_overflow(struct g_journal_softc *sc)
334{
335 off_t length, used;
336
337 if ((sc->sc_active.jj_offset < sc->sc_inactive.jj_offset &&
338 sc->sc_journal_offset >= sc->sc_inactive.jj_offset) ||
339 (sc->sc_active.jj_offset > sc->sc_inactive.jj_offset &&
340 sc->sc_journal_offset >= sc->sc_inactive.jj_offset &&
341 sc->sc_journal_offset < sc->sc_active.jj_offset)) {
342 panic("Journal overflow (joffset=%jd active=%jd inactive=%jd)",
343 (intmax_t)sc->sc_journal_offset,
344 (intmax_t)sc->sc_active.jj_offset,
345 (intmax_t)sc->sc_inactive.jj_offset);
346 }
347 if (sc->sc_active.jj_offset < sc->sc_inactive.jj_offset) {
348 length = sc->sc_inactive.jj_offset - sc->sc_active.jj_offset;
349 used = sc->sc_journal_offset - sc->sc_active.jj_offset;
350 } else {
351 length = sc->sc_jend - sc->sc_active.jj_offset;
352 length += sc->sc_inactive.jj_offset - sc->sc_jstart;
353 if (sc->sc_journal_offset >= sc->sc_active.jj_offset)
354 used = sc->sc_journal_offset - sc->sc_active.jj_offset;
355 else {
356 used = sc->sc_jend - sc->sc_active.jj_offset;
357 used += sc->sc_journal_offset - sc->sc_jstart;
358 }
359 }
360 /* Already woken up? */
361 if (g_journal_switcher_wokenup)
362 return;
363 /*
364 * If the active journal takes more than g_journal_force_switch precent
365 * of free journal space, we force journal switch.
366 */
367 KASSERT(length > 0,
368 ("length=%jd used=%jd active=%jd inactive=%jd joffset=%jd",
369 (intmax_t)length, (intmax_t)used,
370 (intmax_t)sc->sc_active.jj_offset,
371 (intmax_t)sc->sc_inactive.jj_offset,
372 (intmax_t)sc->sc_journal_offset));
373 if ((used * 100) / length > g_journal_force_switch) {
374 g_journal_stats_journal_full++;
375 GJ_DEBUG(1, "Journal %s %jd%% full, forcing journal switch.",
376 sc->sc_name, (used * 100) / length);
377 mtx_lock(&g_journal_cache_mtx);
378 g_journal_switcher_wokenup = 1;
379 wakeup(&g_journal_switcher_state);
380 mtx_unlock(&g_journal_cache_mtx);
381 }
382}
383
384static void
385g_journal_orphan(struct g_consumer *cp)
386{
387 struct g_journal_softc *sc;
388 char name[256];
389 int error;
390
391 g_topology_assert();
392 sc = cp->geom->softc;
393 GJ_DEBUG(0, "Lost provider %s (journal=%s).", cp->provider->name,
394 sc->sc_name);
395 strlcpy(name, sc->sc_name, sizeof(name));
396 error = g_journal_destroy(sc);
397 if (error == 0)
398 GJ_DEBUG(0, "Journal %s destroyed.", name);
399 else {
400 GJ_DEBUG(0, "Cannot destroy journal %s (error=%d). "
401 "Destroy it manually after last close.", sc->sc_name,
402 error);
403 }
404}
405
406static int
407g_journal_access(struct g_provider *pp, int acr, int acw, int ace)
408{
409 struct g_journal_softc *sc;
410 int dcr, dcw, dce;
411
412 g_topology_assert();
413 GJ_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name,
414 acr, acw, ace);
415
416 dcr = pp->acr + acr;
417 dcw = pp->acw + acw;
418 dce = pp->ace + ace;
419
420 sc = pp->geom->softc;
421 if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY)) {
422 if (acr <= 0 && acw <= 0 && ace <= 0)
423 return (0);
424 else
425 return (ENXIO);
426 }
427 if (pp->acw == 0 && dcw > 0) {
428 GJ_DEBUG(1, "Marking %s as dirty.", sc->sc_name);
429 sc->sc_flags &= ~GJF_DEVICE_CLEAN;
430 g_topology_unlock();
431 g_journal_metadata_update(sc);
432 g_topology_lock();
433 } /* else if (pp->acw == 0 && dcw > 0 && JEMPTY(sc)) {
434 GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
435 sc->sc_flags |= GJF_DEVICE_CLEAN;
436 g_topology_unlock();
437 g_journal_metadata_update(sc);
438 g_topology_lock();
439 } */
440 return (0);
441}
442
443static void
444g_journal_header_encode(struct g_journal_header *hdr, u_char *data)
445{
446
447 bcopy(GJ_HEADER_MAGIC, data, sizeof(GJ_HEADER_MAGIC));
448 data += sizeof(GJ_HEADER_MAGIC);
449 le32enc(data, hdr->jh_journal_id);
450 data += 4;
451 le32enc(data, hdr->jh_journal_next_id);
452}
453
454static int
455g_journal_header_decode(const u_char *data, struct g_journal_header *hdr)
456{
457
458 bcopy(data, hdr->jh_magic, sizeof(hdr->jh_magic));
459 data += sizeof(hdr->jh_magic);
460 if (bcmp(hdr->jh_magic, GJ_HEADER_MAGIC, sizeof(GJ_HEADER_MAGIC)) != 0)
461 return (EINVAL);
462 hdr->jh_journal_id = le32dec(data);
463 data += 4;
464 hdr->jh_journal_next_id = le32dec(data);
465 return (0);
466}
467
468static void
469g_journal_flush_cache(struct g_journal_softc *sc)
470{
471 struct bintime bt;
472 int error;
473
474 if (sc->sc_bio_flush == 0)
475 return;
476 GJ_TIMER_START(1, &bt);
477 if (sc->sc_bio_flush & GJ_FLUSH_JOURNAL) {
478 error = g_io_flush(sc->sc_jconsumer);
479 GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
480 sc->sc_jconsumer->provider->name, error);
481 }
482 if (sc->sc_bio_flush & GJ_FLUSH_DATA) {
483 /*
484 * TODO: This could be called in parallel with the
485 * previous call.
486 */
487 error = g_io_flush(sc->sc_dconsumer);
488 GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
489 sc->sc_dconsumer->provider->name, error);
490 }
491 GJ_TIMER_STOP(1, &bt, "Cache flush time");
492}
493
494static int
495g_journal_write_header(struct g_journal_softc *sc)
496{
497 struct g_journal_header hdr;
498 struct g_consumer *cp;
499 u_char *buf;
500 int error;
501
502 cp = sc->sc_jconsumer;
503 buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
504
505 strlcpy(hdr.jh_magic, GJ_HEADER_MAGIC, sizeof(hdr.jh_magic));
506 hdr.jh_journal_id = sc->sc_journal_id;
507 hdr.jh_journal_next_id = sc->sc_journal_next_id;
508 g_journal_header_encode(&hdr, buf);
509 error = g_write_data(cp, sc->sc_journal_offset, buf,
510 cp->provider->sectorsize);
511 /* if (error == 0) */
512 sc->sc_journal_offset += cp->provider->sectorsize;
513
514 gj_free(buf, cp->provider->sectorsize);
515 return (error);
516}
517
518/*
519 * Every journal record has a header and data following it.
520 * Functions below are used to decode the header before storing it to
521 * little endian and to encode it after reading to system endianess.
522 */
523static void
524g_journal_record_header_encode(struct g_journal_record_header *hdr,
525 u_char *data)
526{
527 struct g_journal_entry *ent;
528 u_int i;
529
530 bcopy(GJ_RECORD_HEADER_MAGIC, data, sizeof(GJ_RECORD_HEADER_MAGIC));
531 data += sizeof(GJ_RECORD_HEADER_MAGIC);
532 le32enc(data, hdr->jrh_journal_id);
533 data += 8;
534 le16enc(data, hdr->jrh_nentries);
535 data += 2;
536 bcopy(hdr->jrh_sum, data, sizeof(hdr->jrh_sum));
537 data += 8;
538 for (i = 0; i < hdr->jrh_nentries; i++) {
539 ent = &hdr->jrh_entries[i];
540 le64enc(data, ent->je_joffset);
541 data += 8;
542 le64enc(data, ent->je_offset);
543 data += 8;
544 le64enc(data, ent->je_length);
545 data += 8;
546 }
547}
548
549static int
550g_journal_record_header_decode(const u_char *data,
551 struct g_journal_record_header *hdr)
552{
553 struct g_journal_entry *ent;
554 u_int i;
555
556 bcopy(data, hdr->jrh_magic, sizeof(hdr->jrh_magic));
557 data += sizeof(hdr->jrh_magic);
558 if (strcmp(hdr->jrh_magic, GJ_RECORD_HEADER_MAGIC) != 0)
559 return (EINVAL);
560 hdr->jrh_journal_id = le32dec(data);
561 data += 8;
562 hdr->jrh_nentries = le16dec(data);
563 data += 2;
564 if (hdr->jrh_nentries > GJ_RECORD_HEADER_NENTRIES)
565 return (EINVAL);
566 bcopy(data, hdr->jrh_sum, sizeof(hdr->jrh_sum));
567 data += 8;
568 for (i = 0; i < hdr->jrh_nentries; i++) {
569 ent = &hdr->jrh_entries[i];
570 ent->je_joffset = le64dec(data);
571 data += 8;
572 ent->je_offset = le64dec(data);
573 data += 8;
574 ent->je_length = le64dec(data);
575 data += 8;
576 }
577 return (0);
578}
579
580/*
581 * Function reads metadata from a provider (via the given consumer), decodes
582 * it to system endianess and verifies its correctness.
583 */
584static int
585g_journal_metadata_read(struct g_consumer *cp, struct g_journal_metadata *md)
586{
587 struct g_provider *pp;
588 u_char *buf;
589 int error;
590
591 g_topology_assert();
592
593 error = g_access(cp, 1, 0, 0);
594 if (error != 0)
595 return (error);
596 pp = cp->provider;
597 g_topology_unlock();
598 /* Metadata is stored in last sector. */
599 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
600 &error);
601 g_topology_lock();
602 g_access(cp, -1, 0, 0);
603 if (error != 0) {
604 GJ_DEBUG(1, "Cannot read metadata from %s (error=%d).",
605 cp->provider->name, error);
606 if (buf != NULL)
607 g_free(buf);
608 return (error);
609 }
610
611 /* Decode metadata. */
612 error = journal_metadata_decode(buf, md);
613 g_free(buf);
614 /* Is this is gjournal provider at all? */
615 if (strcmp(md->md_magic, G_JOURNAL_MAGIC) != 0)
616 return (EINVAL);
617 /*
618 * Are we able to handle this version of metadata?
619 * We only maintain backward compatibility.
620 */
621 if (md->md_version > G_JOURNAL_VERSION) {
622 GJ_DEBUG(0,
623 "Kernel module is too old to handle metadata from %s.",
624 cp->provider->name);
625 return (EINVAL);
626 }
627 /* Is checksum correct? */
628 if (error != 0) {
629 GJ_DEBUG(0, "MD5 metadata hash mismatch for provider %s.",
630 cp->provider->name);
631 return (error);
632 }
633 return (0);
634}
635
636/*
637 * Two functions below are responsible for updating metadata.
638 * Only metadata on the data provider is updated (we need to update
639 * information about active journal in there).
640 */
641static void
642g_journal_metadata_done(struct bio *bp)
643{
644
645 /*
646 * There is not much we can do on error except informing about it.
647 */
648 if (bp->bio_error != 0) {
649 GJ_LOGREQ(0, bp, "Cannot update metadata (error=%d).",
650 bp->bio_error);
651 } else {
652 GJ_LOGREQ(2, bp, "Metadata updated.");
653 }
654 gj_free(bp->bio_data, bp->bio_length);
655 g_destroy_bio(bp);
656}
657
658static void
659g_journal_metadata_update(struct g_journal_softc *sc)
660{
661 struct g_journal_metadata md;
662 struct g_consumer *cp;
663 struct bio *bp;
664 u_char *sector;
665
666 cp = sc->sc_dconsumer;
667 sector = gj_malloc(cp->provider->sectorsize, M_WAITOK);
668 strlcpy(md.md_magic, G_JOURNAL_MAGIC, sizeof(md.md_magic));
669 md.md_version = G_JOURNAL_VERSION;
670 md.md_id = sc->sc_id;
671 md.md_type = sc->sc_orig_type;
672 md.md_jstart = sc->sc_jstart;
673 md.md_jend = sc->sc_jend;
674 md.md_joffset = sc->sc_inactive.jj_offset;
675 md.md_jid = sc->sc_journal_previous_id;
676 md.md_flags = 0;
677 if (sc->sc_flags & GJF_DEVICE_CLEAN)
678 md.md_flags |= GJ_FLAG_CLEAN;
679
680 if (sc->sc_flags & GJF_DEVICE_HARDCODED)
681 strlcpy(md.md_provider, sc->sc_name, sizeof(md.md_provider));
682 else
683 bzero(md.md_provider, sizeof(md.md_provider));
684 md.md_provsize = cp->provider->mediasize;
685 journal_metadata_encode(&md, sector);
686
687 /*
688 * Flush the cache, so we know all data are on disk.
689 * We write here informations like "journal is consistent", so we need
690 * to be sure it is. Without BIO_FLUSH here, we can end up in situation
691 * where metadata is stored on disk, but not all data.
692 */
693 g_journal_flush_cache(sc);
694
695 bp = g_alloc_bio();
696 bp->bio_offset = cp->provider->mediasize - cp->provider->sectorsize;
697 bp->bio_length = cp->provider->sectorsize;
698 bp->bio_data = sector;
699 bp->bio_cmd = BIO_WRITE;
700 if (!(sc->sc_flags & GJF_DEVICE_DESTROY)) {
701 bp->bio_done = g_journal_metadata_done;
702 g_io_request(bp, cp);
703 } else {
704 bp->bio_done = NULL;
705 g_io_request(bp, cp);
706 biowait(bp, "gjmdu");
707 g_journal_metadata_done(bp);
708 }
709
710 /*
711 * Be sure metadata reached the disk.
712 */
713 g_journal_flush_cache(sc);
714}
715
716/*
717 * This is where the I/O request comes from the GEOM.
718 */
719static void
720g_journal_start(struct bio *bp)
721{
722 struct g_journal_softc *sc;
723
724 sc = bp->bio_to->geom->softc;
725 GJ_LOGREQ(3, bp, "Request received.");
726
727 switch (bp->bio_cmd) {
728 case BIO_READ:
729 case BIO_WRITE:
730 mtx_lock(&sc->sc_mtx);
731 bioq_insert_tail(&sc->sc_regular_queue, bp);
732 wakeup(sc);
733 mtx_unlock(&sc->sc_mtx);
734 return;
735 case BIO_GETATTR:
736 if (strcmp(bp->bio_attribute, "GJOURNAL::provider") == 0) {
737 strlcpy(bp->bio_data, bp->bio_to->name, bp->bio_length);
738 bp->bio_completed = strlen(bp->bio_to->name) + 1;
739 g_io_deliver(bp, 0);
740 return;
741 }
742 /* FALLTHROUGH */
743 case BIO_DELETE:
744 default:
745 g_io_deliver(bp, EOPNOTSUPP);
746 return;
747 }
748}
749
750static void
751g_journal_std_done(struct bio *bp)
752{
753 struct g_journal_softc *sc;
754
755 sc = bp->bio_from->geom->softc;
756 mtx_lock(&sc->sc_mtx);
757 bioq_insert_tail(&sc->sc_back_queue, bp);
758 wakeup(sc);
759 mtx_unlock(&sc->sc_mtx);
760}
761
762static struct bio *
763g_journal_new_bio(off_t start, off_t end, off_t joffset, u_char *data,
764 int flags)
765{
766 struct bio *bp;
767
768 bp = g_alloc_bio();
769 bp->bio_offset = start;
770 bp->bio_joffset = joffset;
771 bp->bio_length = end - start;
772 bp->bio_cmd = BIO_WRITE;
773 bp->bio_done = g_journal_std_done;
774 if (data == NULL)
775 bp->bio_data = NULL;
776 else {
777 bp->bio_data = gj_malloc(bp->bio_length, flags);
778 if (bp->bio_data != NULL)
779 bcopy(data, bp->bio_data, bp->bio_length);
780 }
781 return (bp);
782}
783
784#define g_journal_insert_bio(head, bp, flags) \
785 g_journal_insert((head), (bp)->bio_offset, \
786 (bp)->bio_offset + (bp)->bio_length, (bp)->bio_joffset, \
787 (bp)->bio_data, flags)
788/*
789 * The function below does a lot more than just inserting bio to the queue.
790 * It keeps the queue sorted by offset and ensures that there are no doubled
791 * data (it combines bios where ranges overlap).
792 *
793 * The function returns the number of bios inserted (as bio can be splitted).
794 */
795static int
796g_journal_insert(struct bio **head, off_t nstart, off_t nend, off_t joffset,
797 u_char *data, int flags)
798{
799 struct bio *nbp, *cbp, *pbp;
800 off_t cstart, cend;
801 u_char *tmpdata;
802 int n;
803
804 GJ_DEBUG(3, "INSERT(%p): (%jd, %jd, %jd)", *head, nstart, nend,
805 joffset);
806 n = 0;
807 pbp = NULL;
808 GJQ_FOREACH(*head, cbp) {
809 cstart = cbp->bio_offset;
810 cend = cbp->bio_offset + cbp->bio_length;
811
812 if (nstart >= cend) {
813 /*
814 * +-------------+
815 * | |
816 * | current | +-------------+
817 * | bio | | |
818 * | | | new |
819 * +-------------+ | bio |
820 * | |
821 * +-------------+
822 */
823 GJ_DEBUG(3, "INSERT(%p): 1", *head);
824 } else if (nend <= cstart) {
825 /*
826 * +-------------+
827 * | |
828 * +-------------+ | current |
829 * | | | bio |
830 * | new | | |
831 * | bio | +-------------+
832 * | |
833 * +-------------+
834 */
835 nbp = g_journal_new_bio(nstart, nend, joffset, data,
836 flags);
837 if (pbp == NULL)
838 *head = nbp;
839 else
840 pbp->bio_next = nbp;
841 nbp->bio_next = cbp;
842 n++;
843 GJ_DEBUG(3, "INSERT(%p): 2 (nbp=%p pbp=%p)", *head, nbp,
844 pbp);
845 goto end;
846 } else if (nstart <= cstart && nend >= cend) {
847 /*
848 * +-------------+ +-------------+
849 * | current bio | | current bio |
850 * +---+-------------+---+ +-------------+---+
851 * | | | | | | |
852 * | | | | | | |
853 * | +-------------+ | +-------------+ |
854 * | new bio | | new bio |
855 * +---------------------+ +-----------------+
856 *
857 * +-------------+ +-------------+
858 * | current bio | | current bio |
859 * +---+-------------+ +-------------+
860 * | | | | |
861 * | | | | |
862 * | +-------------+ +-------------+
863 * | new bio | | new bio |
864 * +-----------------+ +-------------+
865 */
866 g_journal_stats_bytes_skipped += cbp->bio_length;
867 cbp->bio_offset = nstart;
868 cbp->bio_joffset = joffset;
869 cbp->bio_length = cend - nstart;
870 if (cbp->bio_data != NULL) {
871 gj_free(cbp->bio_data, cend - cstart);
872 cbp->bio_data = NULL;
873 }
874 if (data != NULL) {
875 cbp->bio_data = gj_malloc(cbp->bio_length,
876 flags);
877 if (cbp->bio_data != NULL) {
878 bcopy(data, cbp->bio_data,
879 cbp->bio_length);
880 }
881 data += cend - nstart;
882 }
883 joffset += cend - nstart;
884 nstart = cend;
885 GJ_DEBUG(3, "INSERT(%p): 3 (cbp=%p)", *head, cbp);
886 } else if (nstart > cstart && nend >= cend) {
887 /*
888 * +-----------------+ +-------------+
889 * | current bio | | current bio |
890 * | +-------------+ | +---------+---+
891 * | | | | | | |
892 * | | | | | | |
893 * +---+-------------+ +---+---------+ |
894 * | new bio | | new bio |
895 * +-------------+ +-------------+
896 */
897 g_journal_stats_bytes_skipped += cend - nstart;
898 nbp = g_journal_new_bio(nstart, cend, joffset, data,
899 flags);
900 nbp->bio_next = cbp->bio_next;
901 cbp->bio_next = nbp;
902 cbp->bio_length = nstart - cstart;
903 if (cbp->bio_data != NULL) {
904 cbp->bio_data = gj_realloc(cbp->bio_data,
905 cbp->bio_length, cend - cstart);
906 }
907 if (data != NULL)
908 data += cend - nstart;
909 joffset += cend - nstart;
910 nstart = cend;
911 n++;
912 GJ_DEBUG(3, "INSERT(%p): 4 (cbp=%p)", *head, cbp);
913 } else if (nstart > cstart && nend < cend) {
914 /*
915 * +---------------------+
916 * | current bio |
917 * | +-------------+ |
918 * | | | |
919 * | | | |
920 * +---+-------------+---+
921 * | new bio |
922 * +-------------+
923 */
924 g_journal_stats_bytes_skipped += nend - nstart;
925 nbp = g_journal_new_bio(nstart, nend, joffset, data,
926 flags);
927 nbp->bio_next = cbp->bio_next;
928 cbp->bio_next = nbp;
929 if (cbp->bio_data == NULL)
930 tmpdata = NULL;
931 else
932 tmpdata = cbp->bio_data + nend - cstart;
933 nbp = g_journal_new_bio(nend, cend,
934 cbp->bio_joffset + nend - cstart, tmpdata, flags);
935 nbp->bio_next = ((struct bio *)cbp->bio_next)->bio_next;
936 ((struct bio *)cbp->bio_next)->bio_next = nbp;
937 cbp->bio_length = nstart - cstart;
938 if (cbp->bio_data != NULL) {
939 cbp->bio_data = gj_realloc(cbp->bio_data,
940 cbp->bio_length, cend - cstart);
941 }
942 n += 2;
943 GJ_DEBUG(3, "INSERT(%p): 5 (cbp=%p)", *head, cbp);
944 goto end;
945 } else if (nstart <= cstart && nend < cend) {
946 /*
947 * +-----------------+ +-------------+
948 * | current bio | | current bio |
949 * +-------------+ | +---+---------+ |
950 * | | | | | | |
951 * | | | | | | |
952 * +-------------+---+ | +---------+---+
953 * | new bio | | new bio |
954 * +-------------+ +-------------+
955 */
956 g_journal_stats_bytes_skipped += nend - nstart;
957 nbp = g_journal_new_bio(nstart, nend, joffset, data,
958 flags);
959 if (pbp == NULL)
960 *head = nbp;
961 else
962 pbp->bio_next = nbp;
963 nbp->bio_next = cbp;
964 cbp->bio_offset = nend;
965 cbp->bio_length = cend - nend;
966 cbp->bio_joffset += nend - cstart;
967 tmpdata = cbp->bio_data;
968 if (tmpdata != NULL) {
969 cbp->bio_data = gj_malloc(cbp->bio_length,
970 flags);
971 if (cbp->bio_data != NULL) {
972 bcopy(tmpdata + nend - cstart,
973 cbp->bio_data, cbp->bio_length);
974 }
975 gj_free(tmpdata, cend - cstart);
976 }
977 n++;
978 GJ_DEBUG(3, "INSERT(%p): 6 (cbp=%p)", *head, cbp);
979 goto end;
980 }
981 if (nstart == nend)
982 goto end;
983 pbp = cbp;
984 }
985 nbp = g_journal_new_bio(nstart, nend, joffset, data, flags);
986 if (pbp == NULL)
987 *head = nbp;
988 else
989 pbp->bio_next = nbp;
990 nbp->bio_next = NULL;
991 n++;
992 GJ_DEBUG(3, "INSERT(%p): 8 (nbp=%p pbp=%p)", *head, nbp, pbp);
993end:
994 if (g_journal_debug >= 3) {
995 GJQ_FOREACH(*head, cbp) {
996 GJ_DEBUG(3, "ELEMENT: %p (%jd, %jd, %jd, %p)", cbp,
997 (intmax_t)cbp->bio_offset,
998 (intmax_t)cbp->bio_length,
999 (intmax_t)cbp->bio_joffset, cbp->bio_data);
1000 }
1001 GJ_DEBUG(3, "INSERT(%p): DONE %d", *head, n);
1002 }
1003 return (n);
1004}
1005
1006/*
1007 * The function combines neighbour bios trying to squeeze as much data as
1008 * possible into one bio.
1009 *
1010 * The function returns the number of bios combined (negative value).
1011 */
1012static int
1013g_journal_optimize(struct bio *head)
1014{
1015 struct bio *cbp, *pbp;
1016 int n;
1017
1018 n = 0;
1019 pbp = NULL;
1020 GJQ_FOREACH(head, cbp) {
1021 /* Skip bios which has to be read first. */
1022 if (cbp->bio_data == NULL) {
1023 pbp = NULL;
1024 continue;
1025 }
1026 /* There is no previous bio yet. */
1027 if (pbp == NULL) {
1028 pbp = cbp;
1029 continue;
1030 }
1031 /* Is this a neighbour bio? */
1032 if (pbp->bio_offset + pbp->bio_length != cbp->bio_offset) {
1033 /* Be sure that bios queue is sorted. */
1034 KASSERT(pbp->bio_offset + pbp->bio_length < cbp->bio_offset,
1035 ("poffset=%jd plength=%jd coffset=%jd",
1036 (intmax_t)pbp->bio_offset,
1037 (intmax_t)pbp->bio_length,
1038 (intmax_t)cbp->bio_offset));
1039 pbp = cbp;
1040 continue;
1041 }
1042 /* Be sure we don't end up with too big bio. */
1043 if (pbp->bio_length + cbp->bio_length > MAXPHYS) {
1044 pbp = cbp;
1045 continue;
1046 }
1047 /* Ok, we can join bios. */
1048 GJ_LOGREQ(4, pbp, "Join: ");
1049 GJ_LOGREQ(4, cbp, "and: ");
1050 pbp->bio_data = gj_realloc(pbp->bio_data,
1051 pbp->bio_length + cbp->bio_length, pbp->bio_length);
1052 bcopy(cbp->bio_data, pbp->bio_data + pbp->bio_length,
1053 cbp->bio_length);
1054 gj_free(cbp->bio_data, cbp->bio_length);
1055 pbp->bio_length += cbp->bio_length;
1056 pbp->bio_next = cbp->bio_next;
1057 g_destroy_bio(cbp);
1058 cbp = pbp;
1059 g_journal_stats_combined_ios++;
1060 n--;
1061 GJ_LOGREQ(4, pbp, "Got: ");
1062 }
1063 return (n);
1064}
1065
1066/*
1067 * TODO: Update comment.
1068 * These are functions responsible for copying one portion of data from journal
1069 * to the destination provider.
1070 * The order goes like this:
1071 * 1. Read the header, which contains informations about data blocks
1072 * following it.
1073 * 2. Read the data blocks from the journal.
1074 * 3. Write the data blocks on the data provider.
1075 *
1076 * g_journal_copy_start()
1077 * g_journal_copy_done() - got finished write request, logs potential errors.
1078 */
1079
1080/*
1081 * When there is no data in cache, this function is used to read it.
1082 */
1083static void
1084g_journal_read_first(struct g_journal_softc *sc, struct bio *bp)
1085{
1086 struct bio *cbp;
1087
1088 /*
1089 * We were short in memory, so data was freed.
1090 * In that case we need to read it back from journal.
1091 */
1092 cbp = g_alloc_bio();
1093 cbp->bio_cflags = bp->bio_cflags;
1094 cbp->bio_parent = bp;
1095 cbp->bio_offset = bp->bio_joffset;
1096 cbp->bio_length = bp->bio_length;
1097 cbp->bio_data = gj_malloc(bp->bio_length, M_WAITOK);
1098 cbp->bio_cmd = BIO_READ;
1099 cbp->bio_done = g_journal_std_done;
1100 GJ_LOGREQ(4, cbp, "READ FIRST");
1101 g_io_request(cbp, sc->sc_jconsumer);
1102 g_journal_cache_misses++;
1103}
1104
1105static void
1106g_journal_copy_send(struct g_journal_softc *sc)
1107{
1108 struct bio *bioq, *bp, *lbp;
1109
1110 bioq = lbp = NULL;
1111 mtx_lock(&sc->sc_mtx);
1112 for (; sc->sc_copy_in_progress < g_journal_parallel_copies;) {
1113 bp = GJQ_FIRST(sc->sc_inactive.jj_queue);
1114 if (bp == NULL)
1115 break;
1116 GJQ_REMOVE(sc->sc_inactive.jj_queue, bp);
1117 sc->sc_copy_in_progress++;
1118 GJQ_INSERT_AFTER(bioq, bp, lbp);
1119 lbp = bp;
1120 }
1121 mtx_unlock(&sc->sc_mtx);
1122 if (g_journal_do_optimize)
1123 sc->sc_copy_in_progress += g_journal_optimize(bioq);
1124 while ((bp = GJQ_FIRST(bioq)) != NULL) {
1125 GJQ_REMOVE(bioq, bp);
1126 GJQ_INSERT_HEAD(sc->sc_copy_queue, bp);
1127 bp->bio_cflags = GJ_BIO_COPY;
1128 if (bp->bio_data == NULL)
1129 g_journal_read_first(sc, bp);
1130 else {
1131 bp->bio_joffset = 0;
1132 GJ_LOGREQ(4, bp, "SEND");
1133 g_io_request(bp, sc->sc_dconsumer);
1134 }
1135 }
1136}
1137
1138static void
1139g_journal_copy_start(struct g_journal_softc *sc)
1140{
1141
1142 /*
1143 * Remember in metadata that we're starting to copy journaled data
1144 * to the data provider.
1145 * In case of power failure, we will copy these data once again on boot.
1146 */
1147 if (!sc->sc_journal_copying) {
1148 sc->sc_journal_copying = 1;
1149 GJ_DEBUG(1, "Starting copy of journal.");
1150 g_journal_metadata_update(sc);
1151 }
1152 g_journal_copy_send(sc);
1153}
1154
1155/*
1156 * Data block has been read from the journal provider.
1157 */
1158static int
1159g_journal_copy_read_done(struct bio *bp)
1160{
1161 struct g_journal_softc *sc;
1162 struct g_consumer *cp;
1163 struct bio *pbp;
1164
1165 KASSERT(bp->bio_cflags == GJ_BIO_COPY,
1166 ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
1167
1168 sc = bp->bio_from->geom->softc;
1169 pbp = bp->bio_parent;
1170
1171 if (bp->bio_error != 0) {
1172 GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
1173 bp->bio_to->name, bp->bio_error);
1174 /*
1175 * We will not be able to deliver WRITE request as well.
1176 */
1177 gj_free(bp->bio_data, bp->bio_length);
1178 g_destroy_bio(pbp);
1179 g_destroy_bio(bp);
1180 sc->sc_copy_in_progress--;
1181 return (1);
1182 }
1183 pbp->bio_data = bp->bio_data;
1184 cp = sc->sc_dconsumer;
1185 g_io_request(pbp, cp);
1186 GJ_LOGREQ(4, bp, "READ DONE");
1187 g_destroy_bio(bp);
1188 return (0);
1189}
1190
1191/*
1192 * Data block has been written to the data provider.
1193 */
1194static void
1195g_journal_copy_write_done(struct bio *bp)
1196{
1197 struct g_journal_softc *sc;
1198
1199 KASSERT(bp->bio_cflags == GJ_BIO_COPY,
1200 ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
1201
1202 sc = bp->bio_from->geom->softc;
1203 sc->sc_copy_in_progress--;
1204
1205 if (bp->bio_error != 0) {
1206 GJ_LOGREQ(0, bp, "[copy] Error while writting data (error=%d)",
1207 bp->bio_error);
1208 }
1209 GJQ_REMOVE(sc->sc_copy_queue, bp);
1210 gj_free(bp->bio_data, bp->bio_length);
1211 GJ_LOGREQ(4, bp, "DONE");
1212 g_destroy_bio(bp);
1213
1214 if (sc->sc_copy_in_progress == 0) {
1215 /*
1216 * This was the last write request for this journal.
1217 */
1218 GJ_DEBUG(1, "Data has been copied.");
1219 sc->sc_journal_copying = 0;
1220 }
1221}
1222
1223static void g_journal_flush_done(struct bio *bp);
1224
1225/*
1226 * Flush one record onto active journal provider.
1227 */
1228static void
1229g_journal_flush(struct g_journal_softc *sc)
1230{
1231 struct g_journal_record_header hdr;
1232 struct g_journal_entry *ent;
1233 struct g_provider *pp;
1234 struct bio **bioq;
1235 struct bio *bp, *fbp, *pbp;
1236 off_t joffset, size;
1237 u_char *data, hash[16];
1238 MD5_CTX ctx;
1239 u_int i;
1240
1241 if (sc->sc_current_count == 0)
1242 return;
1243
1244 size = 0;
1245 pp = sc->sc_jprovider;
1246 GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
1247 joffset = sc->sc_journal_offset;
1248
1249 GJ_DEBUG(2, "Storing %d journal entries on %s at %jd.",
1250 sc->sc_current_count, pp->name, (intmax_t)joffset);
1251
1252 /*
1253 * Store 'journal id', so we know to which journal this record belongs.
1254 */
1255 hdr.jrh_journal_id = sc->sc_journal_id;
1256 /* Could be less than g_journal_record_entries if called due timeout. */
1257 hdr.jrh_nentries = MIN(sc->sc_current_count, g_journal_record_entries);
1258 strlcpy(hdr.jrh_magic, GJ_RECORD_HEADER_MAGIC, sizeof(hdr.jrh_magic));
1259
1260 bioq = &sc->sc_active.jj_queue;
1261 pbp = sc->sc_flush_queue;
1262
1263 fbp = g_alloc_bio();
1264 fbp->bio_parent = NULL;
1265 fbp->bio_cflags = GJ_BIO_JOURNAL;
1266 fbp->bio_offset = -1;
1267 fbp->bio_joffset = joffset;
1268 fbp->bio_length = pp->sectorsize;
1269 fbp->bio_cmd = BIO_WRITE;
1270 fbp->bio_done = g_journal_std_done;
1271 GJQ_INSERT_AFTER(sc->sc_flush_queue, fbp, pbp);
1272 pbp = fbp;
1273 fbp->bio_to = pp;
1274 GJ_LOGREQ(4, fbp, "FLUSH_OUT");
1275 joffset += pp->sectorsize;
1276 sc->sc_flush_count++;
1277 if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1278 MD5Init(&ctx);
1279
1280 for (i = 0; i < hdr.jrh_nentries; i++) {
1281 bp = sc->sc_current_queue;
1282 KASSERT(bp != NULL, ("NULL bp"));
1283 bp->bio_to = pp;
1284 GJ_LOGREQ(4, bp, "FLUSHED");
1285 sc->sc_current_queue = bp->bio_next;
1286 bp->bio_next = NULL;
1287 sc->sc_current_count--;
1288
1289 /* Add to the header. */
1290 ent = &hdr.jrh_entries[i];
1291 ent->je_offset = bp->bio_offset;
1292 ent->je_joffset = joffset;
1293 ent->je_length = bp->bio_length;
1294 size += ent->je_length;
1295
1296 data = bp->bio_data;
1297 if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1298 MD5Update(&ctx, data, ent->je_length);
1299 bzero(bp, sizeof(*bp));
1300 bp->bio_cflags = GJ_BIO_JOURNAL;
1301 bp->bio_offset = ent->je_offset;
1302 bp->bio_joffset = ent->je_joffset;
1303 bp->bio_length = ent->je_length;
1304 bp->bio_data = data;
1305 bp->bio_cmd = BIO_WRITE;
1306 bp->bio_done = g_journal_std_done;
1307 GJQ_INSERT_AFTER(sc->sc_flush_queue, bp, pbp);
1308 pbp = bp;
1309 bp->bio_to = pp;
1310 GJ_LOGREQ(4, bp, "FLUSH_OUT");
1311 joffset += bp->bio_length;
1312 sc->sc_flush_count++;
1313
1314 /*
1315 * Add request to the active sc_journal_queue queue.
1316 * This is our cache. After journal switch we don't have to
1317 * read the data from the inactive journal, because we keep
1318 * it in memory.
1319 */
1320 g_journal_insert(bioq, ent->je_offset,
1321 ent->je_offset + ent->je_length, ent->je_joffset, data,
1322 M_NOWAIT);
1323 }
1324
1325 /*
1326 * After all requests, store valid header.
1327 */
1328 data = gj_malloc(pp->sectorsize, M_WAITOK);
1329 if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1330 MD5Final(hash, &ctx);
1331 bcopy(hash, hdr.jrh_sum, sizeof(hdr.jrh_sum));
1332 }
1333 g_journal_record_header_encode(&hdr, data);
1334 fbp->bio_data = data;
1335
1336 sc->sc_journal_offset = joffset;
1337
1338 g_journal_check_overflow(sc);
1339}
1340
1341/*
1342 * Flush request finished.
1343 */
1344static void
1345g_journal_flush_done(struct bio *bp)
1346{
1347 struct g_journal_softc *sc;
1348 struct g_consumer *cp;
1349
1350 KASSERT((bp->bio_cflags & GJ_BIO_MASK) == GJ_BIO_JOURNAL,
1351 ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_JOURNAL));
1352
1353 cp = bp->bio_from;
1354 sc = cp->geom->softc;
1355 sc->sc_flush_in_progress--;
1356
1357 if (bp->bio_error != 0) {
1358 GJ_LOGREQ(0, bp, "[flush] Error while writting data (error=%d)",
1359 bp->bio_error);
1360 }
1361 gj_free(bp->bio_data, bp->bio_length);
1362 GJ_LOGREQ(4, bp, "DONE");
1363 g_destroy_bio(bp);
1364}
1365
1366static void g_journal_release_delayed(struct g_journal_softc *sc);
1367
1368static void
1369g_journal_flush_send(struct g_journal_softc *sc)
1370{
1371 struct g_consumer *cp;
1372 struct bio *bioq, *bp, *lbp;
1373
1374 cp = sc->sc_jconsumer;
1375 bioq = lbp = NULL;
1376 while (sc->sc_flush_in_progress < g_journal_parallel_flushes) {
1377 /* Send one flush requests to the active journal. */
1378 bp = GJQ_FIRST(sc->sc_flush_queue);
1379 if (bp != NULL) {
1380 GJQ_REMOVE(sc->sc_flush_queue, bp);
1381 sc->sc_flush_count--;
1382 bp->bio_offset = bp->bio_joffset;
1383 bp->bio_joffset = 0;
1384 sc->sc_flush_in_progress++;
1385 GJQ_INSERT_AFTER(bioq, bp, lbp);
1386 lbp = bp;
1387 }
1388 /* Try to release delayed requests. */
1389 g_journal_release_delayed(sc);
1390 /* If there are no requests to flush, leave. */
1391 if (GJQ_FIRST(sc->sc_flush_queue) == NULL)
1392 break;
1393 }
1394 if (g_journal_do_optimize)
1395 sc->sc_flush_in_progress += g_journal_optimize(bioq);
1396 while ((bp = GJQ_FIRST(bioq)) != NULL) {
1397 GJQ_REMOVE(bioq, bp);
1398 GJ_LOGREQ(3, bp, "Flush request send");
1399 g_io_request(bp, cp);
1400 }
1401}
1402
1403static void
1404g_journal_add_current(struct g_journal_softc *sc, struct bio *bp)
1405{
1406 int n;
1407
1408 GJ_LOGREQ(4, bp, "CURRENT %d", sc->sc_current_count);
1409 n = g_journal_insert_bio(&sc->sc_current_queue, bp, M_WAITOK);
1410 sc->sc_current_count += n;
1411 n = g_journal_optimize(sc->sc_current_queue);
1412 sc->sc_current_count += n;
1413 /*
1414 * For requests which are added to the current queue we deliver
1415 * response immediately.
1416 */
1417 bp->bio_completed = bp->bio_length;
1418 g_io_deliver(bp, 0);
1419 if (sc->sc_current_count >= g_journal_record_entries) {
1420 /*
1421 * Let's flush one record onto active journal provider.
1422 */
1423 g_journal_flush(sc);
1424 }
1425}
1426
1427static void
1428g_journal_release_delayed(struct g_journal_softc *sc)
1429{
1430 struct bio *bp;
1431
1432 for (;;) {
1433 /* The flush queue is full, exit. */
1434 if (sc->sc_flush_count >= g_journal_accept_immediately)
1435 return;
1436 bp = bioq_takefirst(&sc->sc_delayed_queue);
1437 if (bp == NULL)
1438 return;
1439 sc->sc_delayed_count--;
1440 g_journal_add_current(sc, bp);
1441 }
1442}
1443
1444/*
1445 * Add I/O request to the current queue. If we have enough requests for one
1446 * journal record we flush them onto active journal provider.
1447 */
1448static void
1449g_journal_add_request(struct g_journal_softc *sc, struct bio *bp)
1450{
1451
1452 /*
1453 * The flush queue is full, we need to delay the request.
1454 */
1455 if (sc->sc_delayed_count > 0 ||
1456 sc->sc_flush_count >= g_journal_accept_immediately) {
1457 GJ_LOGREQ(4, bp, "DELAYED");
1458 bioq_insert_tail(&sc->sc_delayed_queue, bp);
1459 sc->sc_delayed_count++;
1460 return;
1461 }
1462
1463 KASSERT(TAILQ_EMPTY(&sc->sc_delayed_queue.queue),
1464 ("DELAYED queue not empty."));
1465 g_journal_add_current(sc, bp);
1466}
1467
1468static void g_journal_read_done(struct bio *bp);
1469
1470/*
1471 * Try to find requested data in cache.
1472 */
1473static struct bio *
1474g_journal_read_find(struct bio *head, int sorted, struct bio *pbp, off_t ostart,
1475 off_t oend)
1476{
1477 off_t cstart, cend;
1478 struct bio *bp;
1479
1480 GJQ_FOREACH(head, bp) {
1481 if (bp->bio_offset == -1)
1482 continue;
1483 cstart = MAX(ostart, bp->bio_offset);
1484 cend = MIN(oend, bp->bio_offset + bp->bio_length);
1485 if (cend <= ostart)
1486 continue;
1487 else if (cstart >= oend) {
1488 if (!sorted)
1489 continue;
1490 else {
1491 bp = NULL;
1492 break;
1493 }
1494 }
1495 if (bp->bio_data == NULL)
1496 break;
1497 GJ_DEBUG(3, "READ(%p): (%jd, %jd) (bp=%p)", head, cstart, cend,
1498 bp);
1499 bcopy(bp->bio_data + cstart - bp->bio_offset,
1500 pbp->bio_data + cstart - pbp->bio_offset, cend - cstart);
1501 pbp->bio_completed += cend - cstart;
1502 if (pbp->bio_completed == pbp->bio_length) {
1503 /*
1504 * Cool, the whole request was in cache, deliver happy
1505 * message.
1506 */
1507 g_io_deliver(pbp, 0);
1508 return (pbp);
1509 }
1510 break;
1511 }
1512 return (bp);
1513}
1514
1515/*
1516 * Try to find requested data in cache.
1517 */
1518static struct bio *
1519g_journal_read_queue_find(struct bio_queue *head, struct bio *pbp, off_t ostart,
1520 off_t oend)
1521{
1522 off_t cstart, cend;
1523 struct bio *bp;
1524
1525 TAILQ_FOREACH(bp, head, bio_queue) {
1526 cstart = MAX(ostart, bp->bio_offset);
1527 cend = MIN(oend, bp->bio_offset + bp->bio_length);
1528 if (cend <= ostart)
1529 continue;
1530 else if (cstart >= oend)
1531 continue;
1532 KASSERT(bp->bio_data != NULL,
1533 ("%s: bio_data == NULL", __func__));
1534 GJ_DEBUG(3, "READ(%p): (%jd, %jd) (bp=%p)", head, cstart, cend,
1535 bp);
1536 bcopy(bp->bio_data + cstart - bp->bio_offset,
1537 pbp->bio_data + cstart - pbp->bio_offset, cend - cstart);
1538 pbp->bio_completed += cend - cstart;
1539 if (pbp->bio_completed == pbp->bio_length) {
1540 /*
1541 * Cool, the whole request was in cache, deliver happy
1542 * message.
1543 */
1544 g_io_deliver(pbp, 0);
1545 return (pbp);
1546 }
1547 break;
1548 }
1549 return (bp);
1550}
1551
1552/*
1553 * This function is used for colecting data on read.
1554 * The complexity is because parts of the data can be stored in four different
1555 * places:
1556 * - in delayed requests
1557 * - in memory - the data not yet send to the active journal provider
1558 * - in requests which are going to be sent to the active journal
1559 * - in the active journal
1560 * - in the inactive journal
1561 * - in the data provider
1562 */
1563static void
1564g_journal_read(struct g_journal_softc *sc, struct bio *pbp, off_t ostart,
1565 off_t oend)
1566{
1567 struct bio *bp, *nbp, *head;
1568 off_t cstart, cend;
1569 u_int i, sorted = 0;
1570
1571 GJ_DEBUG(3, "READ: (%jd, %jd)", ostart, oend);
1572
1573 cstart = cend = -1;
1574 bp = NULL;
1575 head = NULL;
1576 for (i = 0; i <= 5; i++) {
1577 switch (i) {
1578 case 0: /* Delayed requests. */
1579 head = NULL;
1580 sorted = 0;
1581 break;
1582 case 1: /* Not-yet-send data. */
1583 head = sc->sc_current_queue;
1584 sorted = 1;
1585 break;
1586 case 2: /* In-flight to the active journal. */
1587 head = sc->sc_flush_queue;
1588 sorted = 0;
1589 break;
1590 case 3: /* Active journal. */
1591 head = sc->sc_active.jj_queue;
1592 sorted = 1;
1593 break;
1594 case 4: /* Inactive journal. */
1595 /*
1596 * XXX: Here could be a race with g_journal_lowmem().
1597 */
1598 head = sc->sc_inactive.jj_queue;
1599 sorted = 1;
1600 break;
1601 case 5: /* In-flight to the data provider. */
1602 head = sc->sc_copy_queue;
1603 sorted = 0;
1604 break;
1605 default:
1606 panic("gjournal %s: i=%d", __func__, i);
1607 }
1608 if (i == 0)
1609 bp = g_journal_read_queue_find(&sc->sc_delayed_queue.queue, pbp, ostart, oend);
1610 else
1611 bp = g_journal_read_find(head, sorted, pbp, ostart, oend);
1612 if (bp == pbp) { /* Got the whole request. */
1613 GJ_DEBUG(2, "Got the whole request from %u.", i);
1614 return;
1615 } else if (bp != NULL) {
1616 cstart = MAX(ostart, bp->bio_offset);
1617 cend = MIN(oend, bp->bio_offset + bp->bio_length);
1618 GJ_DEBUG(2, "Got part of the request from %u (%jd-%jd).",
1619 i, (intmax_t)cstart, (intmax_t)cend);
1620 break;
1621 }
1622 }
1623 if (bp != NULL) {
1624 if (bp->bio_data == NULL) {
1625 nbp = g_clone_bio(pbp);
1626 nbp->bio_cflags = GJ_BIO_READ;
1627 nbp->bio_data =
1628 pbp->bio_data + cstart - pbp->bio_offset;
1629 nbp->bio_offset =
1630 bp->bio_joffset + cstart - bp->bio_offset;
1631 nbp->bio_length = cend - cstart;
1632 nbp->bio_done = g_journal_read_done;
1633 g_io_request(nbp, sc->sc_jconsumer);
1634 }
1635 /*
1636 * If we don't have the whole request yet, call g_journal_read()
1637 * recursively.
1638 */
1639 if (ostart < cstart)
1640 g_journal_read(sc, pbp, ostart, cstart);
1641 if (oend > cend)
1642 g_journal_read(sc, pbp, cend, oend);
1643 } else {
1644 /*
1645 * No data in memory, no data in journal.
1646 * Its time for asking data provider.
1647 */
1648 GJ_DEBUG(3, "READ(data): (%jd, %jd)", ostart, oend);
1649 nbp = g_clone_bio(pbp);
1650 nbp->bio_cflags = GJ_BIO_READ;
1651 nbp->bio_data = pbp->bio_data + ostart - pbp->bio_offset;
1652 nbp->bio_offset = ostart;
1653 nbp->bio_length = oend - ostart;
1654 nbp->bio_done = g_journal_read_done;
1655 g_io_request(nbp, sc->sc_dconsumer);
1656 /* We have the whole request, return here. */
1657 return;
1658 }
1659}
1660
1661/*
1662 * Function responsible for handling finished READ requests.
1663 * Actually, g_std_done() could be used here, the only difference is that we
1664 * log error.
1665 */
1666static void
1667g_journal_read_done(struct bio *bp)
1668{
1669 struct bio *pbp;
1670
1671 KASSERT(bp->bio_cflags == GJ_BIO_READ,
1672 ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_READ));
1673
1674 pbp = bp->bio_parent;
1675 pbp->bio_inbed++;
1676 pbp->bio_completed += bp->bio_length;
1677
1678 if (bp->bio_error != 0) {
1679 if (pbp->bio_error == 0)
1680 pbp->bio_error = bp->bio_error;
1681 GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
1682 bp->bio_to->name, bp->bio_error);
1683 }
1684 g_destroy_bio(bp);
1685 if (pbp->bio_children == pbp->bio_inbed &&
1686 pbp->bio_completed == pbp->bio_length) {
1687 /* We're done. */
1688 g_io_deliver(pbp, 0);
1689 }
1690}
1691
1692/*
1693 * Deactive current journal and active next one.
1694 */
1695static void
1696g_journal_switch(struct g_journal_softc *sc)
1697{
1698 struct g_provider *pp;
1699
1700 if (JEMPTY(sc)) {
1701 GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
1702 pp = LIST_FIRST(&sc->sc_geom->provider);
1703 if (!(sc->sc_flags & GJF_DEVICE_CLEAN) && pp->acw == 0) {
1704 sc->sc_flags |= GJF_DEVICE_CLEAN;
1705 GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
1706 g_journal_metadata_update(sc);
1707 }
1708 } else {
1709 GJ_DEBUG(3, "Switching journal %s.", sc->sc_geom->name);
1710
1711 pp = sc->sc_jprovider;
1712
1713 sc->sc_journal_previous_id = sc->sc_journal_id;
1714
1715 sc->sc_journal_id = sc->sc_journal_next_id;
1716 sc->sc_journal_next_id = arc4random();
1717
1718 GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
1719
1720 g_journal_write_header(sc);
1721
1722 sc->sc_inactive.jj_offset = sc->sc_active.jj_offset;
1723 sc->sc_inactive.jj_queue = sc->sc_active.jj_queue;
1724
1725 sc->sc_active.jj_offset =
1726 sc->sc_journal_offset - pp->sectorsize;
1727 sc->sc_active.jj_queue = NULL;
1728
1729 /*
1730 * Switch is done, start copying data from the (now) inactive
1731 * journal to the data provider.
1732 */
1733 g_journal_copy_start(sc);
1734 }
1735 mtx_lock(&sc->sc_mtx);
1736 sc->sc_flags &= ~GJF_DEVICE_SWITCH;
1737 mtx_unlock(&sc->sc_mtx);
1738}
1739
1740static void
1741g_journal_initialize(struct g_journal_softc *sc)
1742{
1743
1744 sc->sc_journal_id = arc4random();
1745 sc->sc_journal_next_id = arc4random();
1746 sc->sc_journal_previous_id = sc->sc_journal_id;
1747 sc->sc_journal_offset = sc->sc_jstart;
1748 sc->sc_inactive.jj_offset = sc->sc_jstart;
1749 g_journal_write_header(sc);
1750 sc->sc_active.jj_offset = sc->sc_jstart;
1751}
1752
1753static void
1754g_journal_mark_as_dirty(struct g_journal_softc *sc)
1755{
1756 const struct g_journal_desc *desc;
1757 int i;
1758
1759 GJ_DEBUG(1, "Marking file system %s as dirty.", sc->sc_name);
1760 for (i = 0; (desc = g_journal_filesystems[i]) != NULL; i++)
1761 desc->jd_dirty(sc->sc_dconsumer);
1762}
1763
1764/*
1765 * Function read record header from the given journal.
1766 * It is very simlar to g_read_data(9), but it doesn't allocate memory for bio
1767 * and data on every call.
1768 */
1769static int
1770g_journal_sync_read(struct g_consumer *cp, struct bio *bp, off_t offset,
1771 void *data)
1772{
1773 int error;
1774
1775 bzero(bp, sizeof(*bp));
1776 bp->bio_cmd = BIO_READ;
1777 bp->bio_done = NULL;
1778 bp->bio_offset = offset;
1779 bp->bio_length = cp->provider->sectorsize;
1780 bp->bio_data = data;
1781 g_io_request(bp, cp);
1782 error = biowait(bp, "gjs_read");
1783 return (error);
1784}
1785
1786#if 0
1787/*
1788 * Function is called when we start the journal device and we detect that
1789 * one of the journals was not fully copied.
1790 * The purpose of this function is to read all records headers from journal
1791 * and placed them in the inactive queue, so we can start journal
1792 * synchronization process and the journal provider itself.
1793 * Design decision was taken to not synchronize the whole journal here as it
1794 * can take too much time. Reading headers only and delaying synchronization
1795 * process until after journal provider is started should be the best choice.
1796 */
1797#endif
1798
1799static void
1800g_journal_sync(struct g_journal_softc *sc)
1801{
1802 struct g_journal_record_header rhdr;
1803 struct g_journal_entry *ent;
1804 struct g_journal_header jhdr;
1805 struct g_consumer *cp;
1806 struct bio *bp, *fbp, *tbp;
1807 off_t joffset, offset;
1808 u_char *buf, sum[16];
1809 uint64_t id;
1810 MD5_CTX ctx;
1811 int error, found, i;
1812
1813 found = 0;
1814 fbp = NULL;
1815 cp = sc->sc_jconsumer;
1816 bp = g_alloc_bio();
1817 buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
1818 offset = joffset = sc->sc_inactive.jj_offset = sc->sc_journal_offset;
1819
1820 GJ_DEBUG(2, "Looking for termination at %jd.", (intmax_t)joffset);
1821
1822 /*
1823 * Read and decode first journal header.
1824 */
1825 error = g_journal_sync_read(cp, bp, offset, buf);
1826 if (error != 0) {
1827 GJ_DEBUG(0, "Error while reading journal header from %s.",
1828 cp->provider->name);
1829 goto end;
1830 }
1831 error = g_journal_header_decode(buf, &jhdr);
1832 if (error != 0) {
1833 GJ_DEBUG(0, "Cannot decode journal header from %s.",
1834 cp->provider->name);
1835 goto end;
1836 }
1837 id = sc->sc_journal_id;
1838 if (jhdr.jh_journal_id != sc->sc_journal_id) {
1839 GJ_DEBUG(1, "Journal ID mismatch at %jd (0x%08x != 0x%08x).",
1840 (intmax_t)offset, (u_int)jhdr.jh_journal_id, (u_int)id);
1841 goto end;
1842 }
1843 offset += cp->provider->sectorsize;
1844 id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
1845
1846 for (;;) {
1847 /*
1848 * If the biggest record won't fit, look for a record header or
1849 * journal header from the begining.
1850 */
1851 GJ_VALIDATE_OFFSET(offset, sc);
1852 error = g_journal_sync_read(cp, bp, offset, buf);
1853 if (error != 0) {
1854 /*
1855 * Not good. Having an error while reading header
1856 * means, that we cannot read next headers and in
1857 * consequence we cannot find termination.
1858 */
1859 GJ_DEBUG(0,
1860 "Error while reading record header from %s.",
1861 cp->provider->name);
1862 break;
1863 }
1864
1865 error = g_journal_record_header_decode(buf, &rhdr);
1866 if (error != 0) {
1867 GJ_DEBUG(2, "Not a record header at %jd (error=%d).",
1868 (intmax_t)offset, error);
1869 /*
1870 * This is not a record header.
1871 * If we are lucky, this is next journal header.
1872 */
1873 error = g_journal_header_decode(buf, &jhdr);
1874 if (error != 0) {
1875 GJ_DEBUG(1, "Not a journal header at %jd (error=%d).",
1876 (intmax_t)offset, error);
1877 /*
1878 * Nope, this is not journal header, which
1879 * bascially means that journal is not
1880 * terminated properly.
1881 */
1882 error = ENOENT;
1883 break;
1884 }
1885 /*
1886 * Ok. This is header of _some_ journal. Now we need to
1887 * verify if this is header of the _next_ journal.
1888 */
1889 if (jhdr.jh_journal_id != id) {
1890 GJ_DEBUG(1, "Journal ID mismatch at %jd "
1891 "(0x%08x != 0x%08x).", (intmax_t)offset,
1892 (u_int)jhdr.jh_journal_id, (u_int)id);
1893 error = ENOENT;
1894 break;
1895 }
1896
1897 /* Found termination. */
1898 found++;
1899 GJ_DEBUG(1, "Found termination at %jd (id=0x%08x).",
1900 (intmax_t)offset, (u_int)id);
1901 sc->sc_active.jj_offset = offset;
1902 sc->sc_journal_offset =
1903 offset + cp->provider->sectorsize;
1904 sc->sc_journal_id = id;
1905 id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
1906
1907 while ((tbp = fbp) != NULL) {
1908 fbp = tbp->bio_next;
1909 GJ_LOGREQ(3, tbp, "Adding request.");
1910 g_journal_insert_bio(&sc->sc_inactive.jj_queue,
1911 tbp, M_WAITOK);
1912 }
1913
1914 /* Skip journal's header. */
1915 offset += cp->provider->sectorsize;
1916 continue;
1917 }
1918
1919 /* Skip record's header. */
1920 offset += cp->provider->sectorsize;
1921
1922 /*
1923 * Add information about every record entry to the inactive
1924 * queue.
1925 */
1926 if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1927 MD5Init(&ctx);
1928 for (i = 0; i < rhdr.jrh_nentries; i++) {
1929 ent = &rhdr.jrh_entries[i];
1930 GJ_DEBUG(3, "Insert entry: %jd %jd.",
1931 (intmax_t)ent->je_offset, (intmax_t)ent->je_length);
1932 g_journal_insert(&fbp, ent->je_offset,
1933 ent->je_offset + ent->je_length, ent->je_joffset,
1934 NULL, M_WAITOK);
1935 if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1936 u_char *buf2;
1937
1938 /*
1939 * TODO: Should use faster function (like
1940 * g_journal_sync_read()).
1941 */
1942 buf2 = g_read_data(cp, offset, ent->je_length,
1943 NULL);
1944 if (buf2 == NULL)
1945 GJ_DEBUG(0, "Cannot read data at %jd.",
1946 (intmax_t)offset);
1947 else {
1948 MD5Update(&ctx, buf2, ent->je_length);
1949 g_free(buf2);
1950 }
1951 }
1952 /* Skip entry's data. */
1953 offset += ent->je_length;
1954 }
1955 if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1956 MD5Final(sum, &ctx);
1957 if (bcmp(sum, rhdr.jrh_sum, sizeof(rhdr.jrh_sum)) != 0) {
1958 GJ_DEBUG(0, "MD5 hash mismatch at %jd!",
1959 (intmax_t)offset);
1960 }
1961 }
1962 }
1963end:
1964 gj_free(bp->bio_data, cp->provider->sectorsize);
1965 g_destroy_bio(bp);
1966
1967 /* Remove bios from unterminated journal. */
1968 while ((tbp = fbp) != NULL) {
1969 fbp = tbp->bio_next;
1970 g_destroy_bio(tbp);
1971 }
1972
1973 if (found < 1 && joffset > 0) {
1974 GJ_DEBUG(0, "Journal on %s is broken/corrupted. Initializing.",
1975 sc->sc_name);
1976 while ((tbp = sc->sc_inactive.jj_queue) != NULL) {
1977 sc->sc_inactive.jj_queue = tbp->bio_next;
1978 g_destroy_bio(tbp);
1979 }
1980 g_journal_initialize(sc);
1981 g_journal_mark_as_dirty(sc);
1982 } else {
1983 GJ_DEBUG(0, "Journal %s consistent.", sc->sc_name);
1984 g_journal_copy_start(sc);
1985 }
1986}
1987
1988/*
1989 * Wait for requests.
1990 * If we have requests in the current queue, flush them after 3 seconds from the
1991 * last flush. In this way we don't wait forever (or for journal switch) with
1992 * storing not full records on journal.
1993 */
1994static void
1995g_journal_wait(struct g_journal_softc *sc, time_t last_write)
1996{
1997 int error, timeout;
1998
1999 GJ_DEBUG(3, "%s: enter", __func__);
2000 if (sc->sc_current_count == 0) {
2001 if (g_journal_debug < 2)
2002 msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", 0);
2003 else {
2004 /*
2005 * If we have debug turned on, show number of elements
2006 * in various queues.
2007 */
2008 for (;;) {
2009 error = msleep(sc, &sc->sc_mtx, PRIBIO,
2010 "gj:work", hz * 3);
2011 if (error == 0) {
2012 mtx_unlock(&sc->sc_mtx);
2013 break;
2014 }
2015 GJ_DEBUG(3, "Report: current count=%d",
2016 sc->sc_current_count);
2017 GJ_DEBUG(3, "Report: flush count=%d",
2018 sc->sc_flush_count);
2019 GJ_DEBUG(3, "Report: flush in progress=%d",
2020 sc->sc_flush_in_progress);
2021 GJ_DEBUG(3, "Report: copy in progress=%d",
2022 sc->sc_copy_in_progress);
2023 GJ_DEBUG(3, "Report: delayed=%d",
2024 sc->sc_delayed_count);
2025 }
2026 }
2027 GJ_DEBUG(3, "%s: exit 1", __func__);
2028 return;
2029 }
2030
2031 /*
2032 * Flush even not full records every 3 seconds.
2033 */
2034 timeout = (last_write + 3 - time_second) * hz;
2035 if (timeout <= 0) {
2036 mtx_unlock(&sc->sc_mtx);
2037 g_journal_flush(sc);
2038 g_journal_flush_send(sc);
2039 GJ_DEBUG(3, "%s: exit 2", __func__);
2040 return;
2041 }
2042 error = msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", timeout);
2043 if (error == EWOULDBLOCK)
2044 g_journal_flush_send(sc);
2045 GJ_DEBUG(3, "%s: exit 3", __func__);
2046}
2047
2048/*
2049 * Worker thread.
2050 */
2051static void
2052g_journal_worker(void *arg)
2053{
2054 struct g_journal_softc *sc;
2055 struct g_geom *gp;
2056 struct g_provider *pp;
2057 struct bio *bp;
2058 time_t last_write;
2059 int type;
2060
2061 mtx_lock_spin(&sched_lock);
2062 sched_prio(curthread, PRIBIO);
2063 mtx_unlock_spin(&sched_lock);
2064
2065 sc = arg;
2066
2067 if (sc->sc_flags & GJF_DEVICE_CLEAN) {
2068 GJ_DEBUG(0, "Journal %s clean.", sc->sc_name);
2069 g_journal_initialize(sc);
2070 } else {
2071 g_journal_sync(sc);
2072 }
2073 /*
2074 * Check if we can use BIO_FLUSH.
2075 */
2076 sc->sc_bio_flush = 0;
2077 if (g_io_flush(sc->sc_jconsumer) == 0) {
2078 sc->sc_bio_flush |= GJ_FLUSH_JOURNAL;
2079 GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
2080 sc->sc_jconsumer->provider->name);
2081 } else {
2082 GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
2083 sc->sc_jconsumer->provider->name);
2084 }
2085 if (sc->sc_jconsumer != sc->sc_dconsumer) {
2086 if (g_io_flush(sc->sc_dconsumer) == 0) {
2087 sc->sc_bio_flush |= GJ_FLUSH_DATA;
2088 GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
2089 sc->sc_dconsumer->provider->name);
2090 } else {
2091 GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
2092 sc->sc_dconsumer->provider->name);
2093 }
2094 }
2095
2096 gp = sc->sc_geom;
2097 g_topology_lock();
2098 pp = g_new_providerf(gp, "%s.journal", sc->sc_name);
2099 KASSERT(pp != NULL, ("Cannot create %s.journal.", sc->sc_name));
2100 pp->mediasize = sc->sc_mediasize;
2101 /*
2102 * There could be a problem when data provider and journal providers
2103 * have different sectorsize, but such scenario is prevented on journal
2104 * creation.
2105 */
2106 pp->sectorsize = sc->sc_sectorsize;
2107 g_error_provider(pp, 0);
2108 g_topology_unlock();
2109 last_write = time_second;
2110
2111 for (;;) {
2112 /* Get first request from the queue. */
2113 mtx_lock(&sc->sc_mtx);
2114 bp = bioq_first(&sc->sc_back_queue);
2115 if (bp != NULL)
2116 type = (bp->bio_cflags & GJ_BIO_MASK);
2117 if (bp == NULL) {
2118 bp = bioq_first(&sc->sc_regular_queue);
2119 if (bp != NULL)
2120 type = GJ_BIO_REGULAR;
2121 }
2122 if (bp == NULL) {
2123try_switch:
2124 if ((sc->sc_flags & GJF_DEVICE_SWITCH) ||
2125 (sc->sc_flags & GJF_DEVICE_DESTROY)) {
2126 if (sc->sc_current_count > 0) {
2127 mtx_unlock(&sc->sc_mtx);
2128 g_journal_flush(sc);
2129 g_journal_flush_send(sc);
2130 continue;
2131 }
2132 if (sc->sc_flush_in_progress > 0)
2133 goto sleep;
2134 if (sc->sc_copy_in_progress > 0)
2135 goto sleep;
2136 }
2137 if (sc->sc_flags & GJF_DEVICE_SWITCH) {
2138 mtx_unlock(&sc->sc_mtx);
2139 g_journal_switch(sc);
2140 wakeup(&sc->sc_journal_copying);
2141 continue;
2142 }
2143 if (sc->sc_flags & GJF_DEVICE_DESTROY) {
2144 GJ_DEBUG(1, "Shutting down worker "
2145 "thread for %s.", gp->name);
2146 sc->sc_worker = NULL;
2147 wakeup(&sc->sc_worker);
2148 mtx_unlock(&sc->sc_mtx);
2149 kthread_exit(0);
2150 }
2151sleep:
2152 g_journal_wait(sc, last_write);
2153 continue;
2154 }
2155 /*
2156 * If we're in switch process, we need to delay all new
2157 * write requests until its done.
2158 */
2159 if ((sc->sc_flags & GJF_DEVICE_SWITCH) &&
2160 type == GJ_BIO_REGULAR && bp->bio_cmd == BIO_WRITE) {
2161 GJ_LOGREQ(2, bp, "WRITE on SWITCH");
2162 goto try_switch;
2163 }
2164 if (type == GJ_BIO_REGULAR)
2165 bioq_remove(&sc->sc_regular_queue, bp);
2166 else
2167 bioq_remove(&sc->sc_back_queue, bp);
2168 mtx_unlock(&sc->sc_mtx);
2169 switch (type) {
2170 case GJ_BIO_REGULAR:
2171 /* Regular request. */
2172 switch (bp->bio_cmd) {
2173 case BIO_READ:
2174 g_journal_read(sc, bp, bp->bio_offset,
2175 bp->bio_offset + bp->bio_length);
2176 break;
2177 case BIO_WRITE:
2178 last_write = time_second;
2179 g_journal_add_request(sc, bp);
2180 g_journal_flush_send(sc);
2181 break;
2182 default:
2183 panic("Invalid bio_cmd (%d).", bp->bio_cmd);
2184 }
2185 break;
2186 case GJ_BIO_COPY:
2187 switch (bp->bio_cmd) {
2188 case BIO_READ:
2189 if (g_journal_copy_read_done(bp))
2190 g_journal_copy_send(sc);
2191 break;
2192 case BIO_WRITE:
2193 g_journal_copy_write_done(bp);
2194 g_journal_copy_send(sc);
2195 break;
2196 default:
2197 panic("Invalid bio_cmd (%d).", bp->bio_cmd);
2198 }
2199 break;
2200 case GJ_BIO_JOURNAL:
2201 g_journal_flush_done(bp);
2202 g_journal_flush_send(sc);
2203 break;
2204 case GJ_BIO_READ:
2205 default:
2206 panic("Invalid bio (%d).", type);
2207 }
2208 }
2209}
2210
2211static void
2212g_journal_destroy_event(void *arg, int flags __unused)
2213{
2214 struct g_journal_softc *sc;
2215
2216 g_topology_assert();
2217 sc = arg;
2218 g_journal_destroy(sc);
2219}
2220
2221static void
2222g_journal_timeout(void *arg)
2223{
2224 struct g_journal_softc *sc;
2225
2226 sc = arg;
2227 GJ_DEBUG(0, "Timeout. Journal %s cannot be completed.",
2228 sc->sc_geom->name);
2229 g_post_event(g_journal_destroy_event, sc, M_NOWAIT, NULL);
2230}
2231
2232static struct g_geom *
2233g_journal_create(struct g_class *mp, struct g_provider *pp,
2234 const struct g_journal_metadata *md)
2235{
2236 struct g_journal_softc *sc;
2237 struct g_geom *gp;
2238 struct g_consumer *cp;
2239 int error;
2240
2241 g_topology_assert();
2242 /*
2243 * There are two possibilities:
2244 * 1. Data and both journals are on the same provider.
2245 * 2. Data and journals are all on separated providers.
2246 */
2247 /* Look for journal device with the same ID. */
2248 LIST_FOREACH(gp, &mp->geom, geom) {
2249 sc = gp->softc;
2250 if (sc == NULL)
2251 continue;
2252 if (sc->sc_id == md->md_id)
2253 break;
2254 }
2255 if (gp == NULL)
2256 sc = NULL;
2257 else if (sc != NULL && (sc->sc_type & md->md_type) != 0) {
2258 GJ_DEBUG(1, "Journal device %u already configured.", sc->sc_id);
2259 return (NULL);
2260 }
2261 if (md->md_type == 0 || (md->md_type & ~GJ_TYPE_COMPLETE) != 0) {
2262 GJ_DEBUG(0, "Invalid type on %s.", pp->name);
2263 return (NULL);
2264 }
2265 if (md->md_type & GJ_TYPE_DATA) {
2266 GJ_DEBUG(0, "Journal %u: %s contains data.", md->md_id,
2267 pp->name);
2268 }
2269 if (md->md_type & GJ_TYPE_JOURNAL) {
2270 GJ_DEBUG(0, "Journal %u: %s contains journal.", md->md_id,
2271 pp->name);
2272 }
2273
2274 if (sc == NULL) {
2275 /* Action geom. */
2276 sc = malloc(sizeof(*sc), M_JOURNAL, M_WAITOK | M_ZERO);
2277 sc->sc_id = md->md_id;
2278 sc->sc_type = 0;
2279 sc->sc_flags = 0;
2280 sc->sc_worker = NULL;
2281
2282 gp = g_new_geomf(mp, "gjournal %u", sc->sc_id);
2283 gp->start = g_journal_start;
2284 gp->orphan = g_journal_orphan;
2285 gp->access = g_journal_access;
2286 gp->softc = sc;
2287 sc->sc_geom = gp;
2288
2289 mtx_init(&sc->sc_mtx, "gjournal", NULL, MTX_DEF);
2290
2291 bioq_init(&sc->sc_back_queue);
2292 bioq_init(&sc->sc_regular_queue);
2293 bioq_init(&sc->sc_delayed_queue);
2294 sc->sc_delayed_count = 0;
2295 sc->sc_current_queue = NULL;
2296 sc->sc_current_count = 0;
2297 sc->sc_flush_queue = NULL;
2298 sc->sc_flush_count = 0;
2299 sc->sc_flush_in_progress = 0;
2300 sc->sc_copy_queue = NULL;
2301 sc->sc_copy_in_progress = 0;
2302 sc->sc_inactive.jj_queue = NULL;
2303 sc->sc_active.jj_queue = NULL;
2304
2305 callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
2306 if (md->md_type != GJ_TYPE_COMPLETE) {
2307 /*
2308 * Journal and data are on separate providers.
2309 * At this point we have only one of them.
2310 * We setup a timeout in case the other part will not
2311 * appear, so we won't wait forever.
2312 */
2313 callout_reset(&sc->sc_callout, 5 * hz,
2314 g_journal_timeout, sc);
2315 }
2316 }
2317
2318 /* Remember type of the data provider. */
2319 if (md->md_type & GJ_TYPE_DATA)
2320 sc->sc_orig_type = md->md_type;
2321 sc->sc_type |= md->md_type;
2322 cp = NULL;
2323
2324 if (md->md_type & GJ_TYPE_DATA) {
2325 if (md->md_flags & GJ_FLAG_CLEAN)
2326 sc->sc_flags |= GJF_DEVICE_CLEAN;
2327 if (md->md_flags & GJ_FLAG_CHECKSUM)
2328 sc->sc_flags |= GJF_DEVICE_CHECKSUM;
2329 cp = g_new_consumer(gp);
2330 error = g_attach(cp, pp);
2331 KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
2332 pp->name, error));
2333 error = g_access(cp, 1, 1, 1);
2334 if (error != 0) {
2335 GJ_DEBUG(0, "Cannot access %s (error=%d).", pp->name,
2336 error);
2337 g_journal_destroy(sc);
2338 return (NULL);
2339 }
2340 sc->sc_dconsumer = cp;
2341 sc->sc_mediasize = pp->mediasize - pp->sectorsize;
2342 sc->sc_sectorsize = pp->sectorsize;
2343 sc->sc_jstart = md->md_jstart;
2344 sc->sc_jend = md->md_jend;
2345 if (md->md_provider[0] != '\0')
2346 sc->sc_flags |= GJF_DEVICE_HARDCODED;
2347 sc->sc_journal_offset = md->md_joffset;
2348 sc->sc_journal_id = md->md_jid;
2349 sc->sc_journal_previous_id = md->md_jid;
2350 }
2351 if (md->md_type & GJ_TYPE_JOURNAL) {
2352 if (cp == NULL) {
2353 cp = g_new_consumer(gp);
2354 error = g_attach(cp, pp);
2355 KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
2356 pp->name, error));
2357 error = g_access(cp, 1, 1, 1);
2358 if (error != 0) {
2359 GJ_DEBUG(0, "Cannot access %s (error=%d).",
2360 pp->name, error);
2361 g_journal_destroy(sc);
2362 return (NULL);
2363 }
2364 } else {
2365 /*
2366 * Journal is on the same provider as data, which means
2367 * that data provider ends where journal starts.
2368 */
2369 sc->sc_mediasize = md->md_jstart;
2370 }
2371 sc->sc_jconsumer = cp;
2372 }
2373
2374 if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE) {
2375 /* Journal is not complete yet. */
2376 return (gp);
2377 } else {
2378 /* Journal complete, cancel timeout. */
2379 callout_drain(&sc->sc_callout);
2380 }
2381
2382 error = kthread_create(g_journal_worker, sc, &sc->sc_worker, 0, 0,
2383 "g_journal %s", sc->sc_name);
2384 if (error != 0) {
2385 GJ_DEBUG(0, "Cannot create worker thread for %s.journal.",
2386 sc->sc_name);
2387 g_journal_destroy(sc);
2388 return (NULL);
2389 }
2390
2391 return (gp);
2392}
2393
2394static void
2395g_journal_destroy_consumer(void *arg, int flags __unused)
2396{
2397 struct g_consumer *cp;
2398
2399 g_topology_assert();
2400 cp = arg;
2401 g_detach(cp);
2402 g_destroy_consumer(cp);
2403}
2404
2405static int
2406g_journal_destroy(struct g_journal_softc *sc)
2407{
2408 struct g_geom *gp;
2409 struct g_provider *pp;
2410 struct g_consumer *cp;
2411
2412 g_topology_assert();
2413
2414 if (sc == NULL)
2415 return (ENXIO);
2416
2417 gp = sc->sc_geom;
2418 pp = LIST_FIRST(&gp->provider);
2419 if (pp != NULL) {
2420 if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) {
2421 GJ_DEBUG(1, "Device %s is still open (r%dw%de%d).",
2422 pp->name, pp->acr, pp->acw, pp->ace);
2423 return (EBUSY);
2424 }
2425 g_error_provider(pp, ENXIO);
2426
2427 g_journal_flush(sc);
2428 g_journal_flush_send(sc);
2429 g_journal_switch(sc);
2430 }
2431
2432 sc->sc_flags |= (GJF_DEVICE_DESTROY | GJF_DEVICE_CLEAN);
2433
2434 g_topology_unlock();
2435 callout_drain(&sc->sc_callout);
2436 mtx_lock(&sc->sc_mtx);
2437 wakeup(sc);
2438 while (sc->sc_worker != NULL)
2439 msleep(&sc->sc_worker, &sc->sc_mtx, PRIBIO, "gj:destroy", 0);
2440 mtx_unlock(&sc->sc_mtx);
2441
2442 if (pp != NULL) {
2443 GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
2444 g_journal_metadata_update(sc);
2445 g_topology_lock();
2446 pp->flags |= G_PF_WITHER;
2447 g_orphan_provider(pp, ENXIO);
2448 } else {
2449 g_topology_lock();
2450 }
2451 mtx_destroy(&sc->sc_mtx);
2452
2453 if (sc->sc_current_count != 0) {
2454 GJ_DEBUG(0, "Warning! Number of current requests %d.",
2455 sc->sc_current_count);
2456 }
2457
2458 LIST_FOREACH(cp, &gp->consumer, consumer) {
2459 if (cp->acr + cp->acw + cp->ace > 0)
2460 g_access(cp, -1, -1, -1);
2461 /*
2462 * We keep all consumers open for writting, so if I'll detach
2463 * and destroy consumer here, I'll get providers for taste, so
2464 * journal will be started again.
2465 * Sending an event here, prevents this from happening.
2466 */
2467 g_post_event(g_journal_destroy_consumer, cp, M_WAITOK, NULL);
2468 }
2469 gp->softc = NULL;
2470 g_wither_geom(gp, ENXIO);
2471 free(sc, M_JOURNAL);
2472 return (0);
2473}
2474
2475static void
2476g_journal_taste_orphan(struct g_consumer *cp)
2477{
2478
2479 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
2480 cp->provider->name));
2481}
2482
2483static struct g_geom *
2484g_journal_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
2485{
2486 struct g_journal_metadata md;
2487 struct g_consumer *cp;
2488 struct g_geom *gp;
2489 int error;
2490
2491 g_topology_assert();
2492 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2493 GJ_DEBUG(2, "Tasting %s.", pp->name);
2494 if (pp->geom->class == mp)
2495 return (NULL);
2496
2497 gp = g_new_geomf(mp, "journal:taste");
2498 /* This orphan function should be never called. */
2499 gp->orphan = g_journal_taste_orphan;
2500 cp = g_new_consumer(gp);
2501 g_attach(cp, pp);
2502 error = g_journal_metadata_read(cp, &md);
2503 g_detach(cp);
2504 g_destroy_consumer(cp);
2505 g_destroy_geom(gp);
2506 if (error != 0)
2507 return (NULL);
2508 gp = NULL;
2509
2510 if (md.md_provider[0] != '\0' && strcmp(md.md_provider, pp->name) != 0)
2511 return (NULL);
2512 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
2513 return (NULL);
2514 if (g_journal_debug >= 2)
2515 journal_metadata_dump(&md);
2516
2517 gp = g_journal_create(mp, pp, &md);
2518 return (gp);
2519}
2520
2521static struct g_journal_softc *
2522g_journal_find_device(struct g_class *mp, const char *name)
2523{
2524 struct g_journal_softc *sc;
2525 struct g_geom *gp;
2526 struct g_provider *pp;
2527
2528 if (strncmp(name, "/dev/", 5) == 0)
2529 name += 5;
2530 LIST_FOREACH(gp, &mp->geom, geom) {
2531 sc = gp->softc;
2532 if (sc == NULL)
2533 continue;
2534 if (sc->sc_flags & GJF_DEVICE_DESTROY)
2535 continue;
2536 if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
2537 continue;
2538 pp = LIST_FIRST(&gp->provider);
2539 if (strcmp(sc->sc_name, name) == 0)
2540 return (sc);
2541 if (pp != NULL && strcmp(pp->name, name) == 0)
2542 return (sc);
2543 }
2544 return (NULL);
2545}
2546
2547static void
2548g_journal_ctl_destroy(struct gctl_req *req, struct g_class *mp)
2549{
2550 struct g_journal_softc *sc;
2551 const char *name;
2552 char param[16];
2553 int *nargs;
2554 int error, i;
2555
2556 g_topology_assert();
2557
2558 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
2559 if (nargs == NULL) {
2560 gctl_error(req, "No '%s' argument.", "nargs");
2561 return;
2562 }
2563 if (*nargs <= 0) {
2564 gctl_error(req, "Missing device(s).");
2565 return;
2566 }
2567
2568 for (i = 0; i < *nargs; i++) {
2569 snprintf(param, sizeof(param), "arg%d", i);
2570 name = gctl_get_asciiparam(req, param);
2571 if (name == NULL) {
2572 gctl_error(req, "No 'arg%d' argument.", i);
2573 return;
2574 }
2575 sc = g_journal_find_device(mp, name);
2576 if (sc == NULL) {
2577 gctl_error(req, "No such device: %s.", name);
2578 return;
2579 }
2580 error = g_journal_destroy(sc);
2581 if (error != 0) {
2582 gctl_error(req, "Cannot destroy device %s (error=%d).",
2583 LIST_FIRST(&sc->sc_geom->provider)->name, error);
2584 return;
2585 }
2586 }
2587}
2588
2589static void
2590g_journal_ctl_sync(struct gctl_req *req __unused, struct g_class *mp __unused)
2591{
2592
2593 g_topology_assert();
2594 g_topology_unlock();
2595 g_journal_sync_requested++;
2596 wakeup(&g_journal_switcher_state);
2597 while (g_journal_sync_requested > 0)
2598 tsleep(&g_journal_sync_requested, PRIBIO, "j:sreq", hz / 2);
2599 g_topology_lock();
2600}
2601
2602static void
2603g_journal_config(struct gctl_req *req, struct g_class *mp, const char *verb)
2604{
2605 uint32_t *version;
2606
2607 g_topology_assert();
2608
2609 version = gctl_get_paraml(req, "version", sizeof(*version));
2610 if (version == NULL) {
2611 gctl_error(req, "No '%s' argument.", "version");
2612 return;
2613 }
2614 if (*version != G_JOURNAL_VERSION) {
2615 gctl_error(req, "Userland and kernel parts are out of sync.");
2616 return;
2617 }
2618
2619 if (strcmp(verb, "destroy") == 0 || strcmp(verb, "stop") == 0) {
2620 g_journal_ctl_destroy(req, mp);
2621 return;
2622 } else if (strcmp(verb, "sync") == 0) {
2623 g_journal_ctl_sync(req, mp);
2624 return;
2625 }
2626
2627 gctl_error(req, "Unknown verb.");
2628}
2629
2630static void
2631g_journal_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2632 struct g_consumer *cp, struct g_provider *pp)
2633{
2634 struct g_journal_softc *sc;
2635
2636 g_topology_assert();
2637
2638 sc = gp->softc;
2639 if (sc == NULL)
2640 return;
2641 if (pp != NULL) {
2642 /* Nothing here. */
2643 } else if (cp != NULL) {
2644 int first = 1;
2645
2646 sbuf_printf(sb, "%s<Role>", indent);
2647 if (cp == sc->sc_dconsumer) {
2648 sbuf_printf(sb, "Data");
2649 first = 0;
2650 }
2651 if (cp == sc->sc_jconsumer) {
2652 if (!first)
2653 sbuf_printf(sb, ",");
2654 sbuf_printf(sb, "Journal");
2655 }
2656 sbuf_printf(sb, "</Role>\n");
2657 if (cp == sc->sc_jconsumer) {
2658 sbuf_printf(sb, "<Jstart>%jd</Jstart>",
2659 (intmax_t)sc->sc_jstart);
2660 sbuf_printf(sb, "<Jend>%jd</Jend>",
2661 (intmax_t)sc->sc_jend);
2662 }
2663 } else {
2664 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
2665 }
2666}
2667
2668static eventhandler_tag g_journal_event_shutdown = NULL;
2669static eventhandler_tag g_journal_event_lowmem = NULL;
2670
2671static void
2672g_journal_shutdown(void *arg, int howto __unused)
2673{
2674 struct g_class *mp;
2675 struct g_geom *gp, *gp2;
2676
2677 if (panicstr != NULL)
2678 return;
2679 mp = arg;
2680 DROP_GIANT();
2681 g_topology_lock();
2682 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
2683 if (gp->softc == NULL)
2684 continue;
2685 GJ_DEBUG(0, "Shutting down geom %s.", gp->name);
2686 g_journal_destroy(gp->softc);
2687 }
2688 g_topology_unlock();
2689 PICKUP_GIANT();
2690}
2691
2692/*
2693 * Free cached requests from inactive queue in case of low memory.
2694 * We free GJ_FREE_AT_ONCE elements at once.
2695 */
2696#define GJ_FREE_AT_ONCE 4
2697static void
2698g_journal_lowmem(void *arg, int howto __unused)
2699{
2700 struct g_journal_softc *sc;
2701 struct g_class *mp;
2702 struct g_geom *gp;
2703 struct bio *bp;
2704 u_int nfree = GJ_FREE_AT_ONCE;
2705
2706 g_journal_stats_low_mem++;
2707 mp = arg;
2708 DROP_GIANT();
2709 g_topology_lock();
2710 LIST_FOREACH(gp, &mp->geom, geom) {
2711 sc = gp->softc;
2712 if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY))
2713 continue;
2714 mtx_lock(&sc->sc_mtx);
2715 for (bp = sc->sc_inactive.jj_queue; nfree > 0 && bp != NULL;
2716 nfree--, bp = bp->bio_next) {
2717 /*
2718 * This is safe to free the bio_data, because:
2719 * 1. If bio_data is NULL it will be read from the
2720 * inactive journal.
2721 * 2. If bp is sent down, it is first removed from the
2722 * inactive queue, so it's impossible to free the
2723 * data from under in-flight bio.
2724 * On the other hand, freeing elements from the active
2725 * queue, is not safe.
2726 */
2727 if (bp->bio_data != NULL) {
2728 GJ_DEBUG(2, "Freeing data from %s.",
2729 sc->sc_name);
2730 gj_free(bp->bio_data, bp->bio_length);
2731 bp->bio_data = NULL;
2732 }
2733 }
2734 mtx_unlock(&sc->sc_mtx);
2735 if (nfree == 0)
2736 break;
2737 }
2738 g_topology_unlock();
2739 PICKUP_GIANT();
2740}
2741
2742static void g_journal_switcher(void *arg);
2743
2744static void
2745g_journal_init(struct g_class *mp)
2746{
2747 int error;
2748
2749 /* Pick a conservative value if provided value sucks. */
2750 if (g_journal_cache_divisor <= 0 ||
2751 (vm_kmem_size / g_journal_cache_divisor == 0)) {
2752 g_journal_cache_divisor = 5;
2753 }
2754 if (g_journal_cache_limit > 0) {
2755 g_journal_cache_limit = vm_kmem_size / g_journal_cache_divisor;
2756 g_journal_cache_low =
2757 (g_journal_cache_limit / 100) * g_journal_cache_switch;
2758 }
2759 g_journal_event_shutdown = EVENTHANDLER_REGISTER(shutdown_post_sync,
2760 g_journal_shutdown, mp, EVENTHANDLER_PRI_FIRST);
2761 if (g_journal_event_shutdown == NULL)
2762 GJ_DEBUG(0, "Warning! Cannot register shutdown event.");
2763 g_journal_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
2764 g_journal_lowmem, mp, EVENTHANDLER_PRI_FIRST);
2765 if (g_journal_event_lowmem == NULL)
2766 GJ_DEBUG(0, "Warning! Cannot register lowmem event.");
2767 error = kthread_create(g_journal_switcher, mp, NULL, 0, 0,
2768 "g_journal switcher");
2769 KASSERT(error == 0, ("Cannot create switcher thread."));
2770}
2771
2772static void
2773g_journal_fini(struct g_class *mp)
2774{
2775
2776 if (g_journal_event_shutdown != NULL) {
2777 EVENTHANDLER_DEREGISTER(shutdown_post_sync,
2778 g_journal_event_shutdown);
2779 }
2780 if (g_journal_event_lowmem != NULL)
2781 EVENTHANDLER_DEREGISTER(vm_lowmem, g_journal_event_lowmem);
2782 g_journal_switcher_state = GJ_SWITCHER_DIE;
2783 wakeup(&g_journal_switcher_state);
2784 while (g_journal_switcher_state != GJ_SWITCHER_DIED)
2785 tsleep(&g_journal_switcher_state, PRIBIO, "jfini:wait", hz / 5);
2786 GJ_DEBUG(1, "Switcher died.");
2787}
2788
2789DECLARE_GEOM_CLASS(g_journal_class, g_journal);
2790
2791static const struct g_journal_desc *
2792g_journal_find_desc(const char *fstype)
2793{
2794 const struct g_journal_desc *desc;
2795 int i;
2796
2797 for (desc = g_journal_filesystems[i = 0]; desc != NULL;
2798 desc = g_journal_filesystems[++i]) {
2799 if (strcmp(desc->jd_fstype, fstype) == 0)
2800 break;
2801 }
2802 return (desc);
2803}
2804
2805static void
2806g_journal_switch_wait(struct g_journal_softc *sc)
2807{
2808 struct bintime bt;
2809
2810 mtx_assert(&sc->sc_mtx, MA_OWNED);
2811 if (g_journal_debug >= 2) {
2812 if (sc->sc_flush_in_progress > 0) {
2813 GJ_DEBUG(2, "%d requests flushing.",
2814 sc->sc_flush_in_progress);
2815 }
2816 if (sc->sc_copy_in_progress > 0) {
2817 GJ_DEBUG(2, "%d requests copying.",
2818 sc->sc_copy_in_progress);
2819 }
2820 if (sc->sc_flush_count > 0) {
2821 GJ_DEBUG(2, "%d requests to flush.",
2822 sc->sc_flush_count);
2823 }
2824 if (sc->sc_delayed_count > 0) {
2825 GJ_DEBUG(2, "%d requests delayed.",
2826 sc->sc_delayed_count);
2827 }
2828 }
2829 g_journal_stats_switches++;
2830 if (sc->sc_copy_in_progress > 0)
2831 g_journal_stats_wait_for_copy++;
2832 GJ_TIMER_START(1, &bt);
2833 sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
2834 sc->sc_flags |= GJF_DEVICE_SWITCH;
2835 wakeup(sc);
2836 while (sc->sc_flags & GJF_DEVICE_SWITCH) {
2837 msleep(&sc->sc_journal_copying, &sc->sc_mtx, PRIBIO,
2838 "gj:switch", 0);
2839 }
2840 GJ_TIMER_STOP(1, &bt, "Switch time of %s", sc->sc_name);
2841}
2842
2843static void
2844g_journal_do_switch(struct g_class *classp, struct thread *td)
2845{
2846 struct g_journal_softc *sc;
2847 const struct g_journal_desc *desc;
2848 struct g_geom *gp;
2849 struct mount *mp;
2850 struct bintime bt;
2851 char *mountpoint;
2852 int error, vfslocked;
2853
2854 DROP_GIANT();
2855 g_topology_lock();
2856 LIST_FOREACH(gp, &classp->geom, geom) {
2857 sc = gp->softc;
2858 if (sc == NULL)
2859 continue;
2860 if (sc->sc_flags & GJF_DEVICE_DESTROY)
2861 continue;
2862 if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
2863 continue;
2864 mtx_lock(&sc->sc_mtx);
2865 sc->sc_flags |= GJF_DEVICE_BEFORE_SWITCH;
2866 mtx_unlock(&sc->sc_mtx);
2867 }
2868 g_topology_unlock();
2869 PICKUP_GIANT();
2870
2871 mtx_lock(&mountlist_mtx);
2872 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2873 if (mp->mnt_gjprovider == NULL)
2874 continue;
2875 if (mp->mnt_flag & MNT_RDONLY)
2876 continue;
2877 desc = g_journal_find_desc(mp->mnt_stat.f_fstypename);
2878 if (desc == NULL)
2879 continue;
2880 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
2881 continue;
2882 /* mtx_unlock(&mountlist_mtx) was done inside vfs_busy() */
2883
2884 DROP_GIANT();
2885 g_topology_lock();
2886 sc = g_journal_find_device(classp, mp->mnt_gjprovider);
2887 g_topology_unlock();
2888 PICKUP_GIANT();
2889
2890 if (sc == NULL) {
2891 GJ_DEBUG(0, "Cannot find journal geom for %s.",
2892 mp->mnt_gjprovider);
2893 goto next;
2894 } else if (JEMPTY(sc)) {
2895 mtx_lock(&sc->sc_mtx);
2896 sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
2897 mtx_unlock(&sc->sc_mtx);
2898 GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
2899 goto next;
2900 }
2901
2902 mountpoint = mp->mnt_stat.f_mntonname;
2903
2904 vfslocked = VFS_LOCK_GIANT(mp);
2905
2906 error = vn_start_write(NULL, &mp, V_WAIT);
2907 if (error != 0) {
2908 VFS_UNLOCK_GIANT(vfslocked);
2908 VFS_UNLOCK_GIANT(vfslocked);
2909 GJ_DEBUG(0, "vn_start_write(%s) failed (error=%d).",
2910 mountpoint, error);
2911 goto next;
2912 }
2913
2914 MNT_ILOCK(mp);
2915 mp->mnt_noasync++;
2916 mp->mnt_kern_flag &= ~MNTK_ASYNC;
2917 MNT_IUNLOCK(mp);
2918
2919 GJ_TIMER_START(1, &bt);
2920 vfs_msync(mp, MNT_NOWAIT);
2921 GJ_TIMER_STOP(1, &bt, "Msync time of %s", mountpoint);
2922
2923 GJ_TIMER_START(1, &bt);
2924 error = VFS_SYNC(mp, MNT_NOWAIT, curthread);
2925 if (error == 0)
2926 GJ_TIMER_STOP(1, &bt, "Sync time of %s", mountpoint);
2927 else {
2928 GJ_DEBUG(0, "Cannot sync file system %s (error=%d).",
2929 mountpoint, error);
2930 }
2931
2932 MNT_ILOCK(mp);
2933 mp->mnt_noasync--;
2934 if ((mp->mnt_flag & MNT_ASYNC) != 0 && mp->mnt_noasync == 0)
2935 mp->mnt_kern_flag |= MNTK_ASYNC;
2936 MNT_IUNLOCK(mp);
2937
2938 vn_finished_write(mp);
2939
2940 if (error != 0) {
2941 VFS_UNLOCK_GIANT(vfslocked);
2942 goto next;
2943 }
2944
2945 /*
2946 * Send BIO_FLUSH before freezing the file system, so it can be
2947 * faster after the freeze.
2948 */
2949 GJ_TIMER_START(1, &bt);
2950 g_journal_flush_cache(sc);
2951 GJ_TIMER_STOP(1, &bt, "BIO_FLUSH time of %s", sc->sc_name);
2952
2953 GJ_TIMER_START(1, &bt);
2954 error = vfs_write_suspend(mp);
2955 VFS_UNLOCK_GIANT(vfslocked);
2956 GJ_TIMER_STOP(1, &bt, "Suspend time of %s", mountpoint);
2957 if (error != 0) {
2958 GJ_DEBUG(0, "Cannot suspend file system %s (error=%d).",
2959 mountpoint, error);
2960 goto next;
2961 }
2962
2963 error = desc->jd_clean(mp);
2964 if (error != 0)
2965 goto next;
2966
2967 mtx_lock(&sc->sc_mtx);
2968 g_journal_switch_wait(sc);
2969 mtx_unlock(&sc->sc_mtx);
2970
2971 vfs_write_resume(mp);
2972next:
2973 mtx_lock(&mountlist_mtx);
2974 vfs_unbusy(mp, td);
2975 }
2976 mtx_unlock(&mountlist_mtx);
2977
2978 sc = NULL;
2979 for (;;) {
2980 DROP_GIANT();
2981 g_topology_lock();
2982 LIST_FOREACH(gp, &g_journal_class.geom, geom) {
2983 sc = gp->softc;
2984 if (sc == NULL)
2985 continue;
2986 mtx_lock(&sc->sc_mtx);
2987 if ((sc->sc_type & GJ_TYPE_COMPLETE) == GJ_TYPE_COMPLETE &&
2988 !(sc->sc_flags & GJF_DEVICE_DESTROY) &&
2989 (sc->sc_flags & GJF_DEVICE_BEFORE_SWITCH)) {
2990 break;
2991 }
2992 mtx_unlock(&sc->sc_mtx);
2993 sc = NULL;
2994 }
2995 g_topology_unlock();
2996 PICKUP_GIANT();
2997 if (sc == NULL)
2998 break;
2999 mtx_assert(&sc->sc_mtx, MA_OWNED);
3000 g_journal_switch_wait(sc);
3001 mtx_unlock(&sc->sc_mtx);
3002 }
3003}
3004
3005/*
3006 * TODO: Switcher thread should be started on first geom creation and killed on
3007 * last geom destruction.
3008 */
3009static void
3010g_journal_switcher(void *arg)
3011{
3012 struct thread *td = curthread;
3013 struct g_class *mp;
3014 struct bintime bt;
3015 int error;
3016
3017 mp = arg;
3018 for (;;) {
3019 g_journal_switcher_wokenup = 0;
3020 error = tsleep(&g_journal_switcher_state, PRIBIO, "jsw:wait",
3021 g_journal_switch_time * hz);
3022 if (g_journal_switcher_state == GJ_SWITCHER_DIE) {
3023 g_journal_switcher_state = GJ_SWITCHER_DIED;
3024 GJ_DEBUG(1, "Switcher exiting.");
3025 wakeup(&g_journal_switcher_state);
3026 kthread_exit(0);
3027 }
3028 if (error == 0 && g_journal_sync_requested == 0) {
3029 GJ_DEBUG(1, "Out of cache, force switch (used=%u "
3030 "limit=%u).", g_journal_cache_used,
3031 g_journal_cache_limit);
3032 }
3033 GJ_TIMER_START(1, &bt);
3034 g_journal_do_switch(mp, td);
3035 GJ_TIMER_STOP(1, &bt, "Entire switch time");
3036 if (g_journal_sync_requested > 0) {
3037 g_journal_sync_requested = 0;
3038 wakeup(&g_journal_sync_requested);
3039 }
3040 }
3041}
2909 GJ_DEBUG(0, "vn_start_write(%s) failed (error=%d).",
2910 mountpoint, error);
2911 goto next;
2912 }
2913
2914 MNT_ILOCK(mp);
2915 mp->mnt_noasync++;
2916 mp->mnt_kern_flag &= ~MNTK_ASYNC;
2917 MNT_IUNLOCK(mp);
2918
2919 GJ_TIMER_START(1, &bt);
2920 vfs_msync(mp, MNT_NOWAIT);
2921 GJ_TIMER_STOP(1, &bt, "Msync time of %s", mountpoint);
2922
2923 GJ_TIMER_START(1, &bt);
2924 error = VFS_SYNC(mp, MNT_NOWAIT, curthread);
2925 if (error == 0)
2926 GJ_TIMER_STOP(1, &bt, "Sync time of %s", mountpoint);
2927 else {
2928 GJ_DEBUG(0, "Cannot sync file system %s (error=%d).",
2929 mountpoint, error);
2930 }
2931
2932 MNT_ILOCK(mp);
2933 mp->mnt_noasync--;
2934 if ((mp->mnt_flag & MNT_ASYNC) != 0 && mp->mnt_noasync == 0)
2935 mp->mnt_kern_flag |= MNTK_ASYNC;
2936 MNT_IUNLOCK(mp);
2937
2938 vn_finished_write(mp);
2939
2940 if (error != 0) {
2941 VFS_UNLOCK_GIANT(vfslocked);
2942 goto next;
2943 }
2944
2945 /*
2946 * Send BIO_FLUSH before freezing the file system, so it can be
2947 * faster after the freeze.
2948 */
2949 GJ_TIMER_START(1, &bt);
2950 g_journal_flush_cache(sc);
2951 GJ_TIMER_STOP(1, &bt, "BIO_FLUSH time of %s", sc->sc_name);
2952
2953 GJ_TIMER_START(1, &bt);
2954 error = vfs_write_suspend(mp);
2955 VFS_UNLOCK_GIANT(vfslocked);
2956 GJ_TIMER_STOP(1, &bt, "Suspend time of %s", mountpoint);
2957 if (error != 0) {
2958 GJ_DEBUG(0, "Cannot suspend file system %s (error=%d).",
2959 mountpoint, error);
2960 goto next;
2961 }
2962
2963 error = desc->jd_clean(mp);
2964 if (error != 0)
2965 goto next;
2966
2967 mtx_lock(&sc->sc_mtx);
2968 g_journal_switch_wait(sc);
2969 mtx_unlock(&sc->sc_mtx);
2970
2971 vfs_write_resume(mp);
2972next:
2973 mtx_lock(&mountlist_mtx);
2974 vfs_unbusy(mp, td);
2975 }
2976 mtx_unlock(&mountlist_mtx);
2977
2978 sc = NULL;
2979 for (;;) {
2980 DROP_GIANT();
2981 g_topology_lock();
2982 LIST_FOREACH(gp, &g_journal_class.geom, geom) {
2983 sc = gp->softc;
2984 if (sc == NULL)
2985 continue;
2986 mtx_lock(&sc->sc_mtx);
2987 if ((sc->sc_type & GJ_TYPE_COMPLETE) == GJ_TYPE_COMPLETE &&
2988 !(sc->sc_flags & GJF_DEVICE_DESTROY) &&
2989 (sc->sc_flags & GJF_DEVICE_BEFORE_SWITCH)) {
2990 break;
2991 }
2992 mtx_unlock(&sc->sc_mtx);
2993 sc = NULL;
2994 }
2995 g_topology_unlock();
2996 PICKUP_GIANT();
2997 if (sc == NULL)
2998 break;
2999 mtx_assert(&sc->sc_mtx, MA_OWNED);
3000 g_journal_switch_wait(sc);
3001 mtx_unlock(&sc->sc_mtx);
3002 }
3003}
3004
3005/*
3006 * TODO: Switcher thread should be started on first geom creation and killed on
3007 * last geom destruction.
3008 */
3009static void
3010g_journal_switcher(void *arg)
3011{
3012 struct thread *td = curthread;
3013 struct g_class *mp;
3014 struct bintime bt;
3015 int error;
3016
3017 mp = arg;
3018 for (;;) {
3019 g_journal_switcher_wokenup = 0;
3020 error = tsleep(&g_journal_switcher_state, PRIBIO, "jsw:wait",
3021 g_journal_switch_time * hz);
3022 if (g_journal_switcher_state == GJ_SWITCHER_DIE) {
3023 g_journal_switcher_state = GJ_SWITCHER_DIED;
3024 GJ_DEBUG(1, "Switcher exiting.");
3025 wakeup(&g_journal_switcher_state);
3026 kthread_exit(0);
3027 }
3028 if (error == 0 && g_journal_sync_requested == 0) {
3029 GJ_DEBUG(1, "Out of cache, force switch (used=%u "
3030 "limit=%u).", g_journal_cache_used,
3031 g_journal_cache_limit);
3032 }
3033 GJ_TIMER_START(1, &bt);
3034 g_journal_do_switch(mp, td);
3035 GJ_TIMER_STOP(1, &bt, "Entire switch time");
3036 if (g_journal_sync_requested > 0) {
3037 g_journal_sync_requested = 0;
3038 wakeup(&g_journal_sync_requested);
3039 }
3040 }
3041}