Deleted Added
full compact
tr_raid1.c (220209) tr_raid1.c (220210)
1/*-
2 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/geom/raid/tr_raid1.c 220209 2011-03-31 16:14:35Z mav $");
28__FBSDID("$FreeBSD: head/sys/geom/raid/tr_raid1.c 220210 2011-03-31 16:19:53Z mav $");
29
30#include <sys/param.h>
31#include <sys/bio.h>
32#include <sys/endian.h>
33#include <sys/kernel.h>
34#include <sys/kobj.h>
35#include <sys/limits.h>
36#include <sys/lock.h>
37#include <sys/malloc.h>
38#include <sys/mutex.h>
39#include <sys/sysctl.h>
40#include <sys/systm.h>
41#include <geom/geom.h>
42#include "geom/raid/g_raid.h"
43#include "g_raid_tr_if.h"
44
45SYSCTL_DECL(_kern_geom_raid);
46SYSCTL_NODE(_kern_geom_raid, OID_AUTO, raid1, CTLFLAG_RW, 0,
47 "RAID1 parameters");
48
49#define RAID1_REBUILD_SLAB (1 << 20) /* One transation in a rebuild */
50static int g_raid1_rebuild_slab = RAID1_REBUILD_SLAB;
51TUNABLE_INT("kern.geom.raid.raid1.rebuild_slab_size",
52 &g_raid1_rebuild_slab);
53SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_slab_size, CTLFLAG_RW,
54 &g_raid1_rebuild_slab, 0,
55 "Amount of the disk to rebuild each read/write cycle of the rebuild.");
56
57#define RAID1_REBUILD_FAIR_IO 20 /* use 1/x of the available I/O */
58static int g_raid1_rebuild_fair_io = RAID1_REBUILD_FAIR_IO;
59TUNABLE_INT("kern.geom.raid.raid1.rebuild_fair_io",
60 &g_raid1_rebuild_fair_io);
61SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_fair_io, CTLFLAG_RW,
62 &g_raid1_rebuild_fair_io, 0,
63 "Fraction of the I/O bandwidth to use when disk busy for rebuild.");
64
65#define RAID1_REBUILD_CLUSTER_IDLE 100
66static int g_raid1_rebuild_cluster_idle = RAID1_REBUILD_CLUSTER_IDLE;
67TUNABLE_INT("kern.geom.raid.raid1.rebuild_cluster_idle",
68 &g_raid1_rebuild_cluster_idle);
69SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_cluster_idle, CTLFLAG_RW,
70 &g_raid1_rebuild_cluster_idle, 0,
71 "Number of slabs to do each time we trigger a rebuild cycle");
72
73#define RAID1_REBUILD_META_UPDATE 1024 /* update meta data every 1GB or so */
74static int g_raid1_rebuild_meta_update = RAID1_REBUILD_META_UPDATE;
75TUNABLE_INT("kern.geom.raid.raid1.rebuild_meta_update",
76 &g_raid1_rebuild_meta_update);
77SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_meta_update, CTLFLAG_RW,
78 &g_raid1_rebuild_meta_update, 0,
79 "When to update the meta data.");
80
81static MALLOC_DEFINE(M_TR_RAID1, "tr_raid1_data", "GEOM_RAID RAID1 data");
82
83#define TR_RAID1_NONE 0
84#define TR_RAID1_REBUILD 1
85#define TR_RAID1_RESYNC 2
86
87#define TR_RAID1_F_DOING_SOME 0x1
88#define TR_RAID1_F_LOCKED 0x2
89#define TR_RAID1_F_ABORT 0x4
90
91struct g_raid_tr_raid1_object {
92 struct g_raid_tr_object trso_base;
93 int trso_starting;
94 int trso_stopping;
95 int trso_type;
96 int trso_recover_slabs; /* slabs before rest */
97 int trso_fair_io;
98 int trso_meta_update;
99 int trso_flags;
100 struct g_raid_subdisk *trso_failed_sd; /* like per volume */
101 void *trso_buffer; /* Buffer space */
102 struct bio trso_bio;
103};
104
105static g_raid_tr_taste_t g_raid_tr_taste_raid1;
106static g_raid_tr_event_t g_raid_tr_event_raid1;
107static g_raid_tr_start_t g_raid_tr_start_raid1;
108static g_raid_tr_stop_t g_raid_tr_stop_raid1;
109static g_raid_tr_iostart_t g_raid_tr_iostart_raid1;
110static g_raid_tr_iodone_t g_raid_tr_iodone_raid1;
111static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_raid1;
112static g_raid_tr_locked_t g_raid_tr_locked_raid1;
113static g_raid_tr_idle_t g_raid_tr_idle_raid1;
114static g_raid_tr_free_t g_raid_tr_free_raid1;
115
116static kobj_method_t g_raid_tr_raid1_methods[] = {
117 KOBJMETHOD(g_raid_tr_taste, g_raid_tr_taste_raid1),
118 KOBJMETHOD(g_raid_tr_event, g_raid_tr_event_raid1),
119 KOBJMETHOD(g_raid_tr_start, g_raid_tr_start_raid1),
120 KOBJMETHOD(g_raid_tr_stop, g_raid_tr_stop_raid1),
121 KOBJMETHOD(g_raid_tr_iostart, g_raid_tr_iostart_raid1),
122 KOBJMETHOD(g_raid_tr_iodone, g_raid_tr_iodone_raid1),
123 KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid1),
124 KOBJMETHOD(g_raid_tr_locked, g_raid_tr_locked_raid1),
125 KOBJMETHOD(g_raid_tr_idle, g_raid_tr_idle_raid1),
126 KOBJMETHOD(g_raid_tr_free, g_raid_tr_free_raid1),
127 { 0, 0 }
128};
129
130static struct g_raid_tr_class g_raid_tr_raid1_class = {
131 "RAID1",
132 g_raid_tr_raid1_methods,
133 sizeof(struct g_raid_tr_raid1_object),
134 .trc_priority = 100
135};
136
137static void g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr);
138static void g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
139 struct g_raid_subdisk *sd);
140
141static int
142g_raid_tr_taste_raid1(struct g_raid_tr_object *tr, struct g_raid_volume *vol)
143{
144 struct g_raid_tr_raid1_object *trs;
145
146 trs = (struct g_raid_tr_raid1_object *)tr;
147 if (tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_RAID1 ||
148 tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_NONE)
149 return (G_RAID_TR_TASTE_FAIL);
150 trs->trso_starting = 1;
151 return (G_RAID_TR_TASTE_SUCCEED);
152}
153
154static int
155g_raid_tr_update_state_raid1(struct g_raid_volume *vol,
156 struct g_raid_subdisk *sd)
157{
158 struct g_raid_tr_raid1_object *trs;
159 struct g_raid_softc *sc;
160 struct g_raid_subdisk *tsd, *bestsd;
161 u_int s;
162 int i, na, ns;
163
164 sc = vol->v_softc;
165 trs = (struct g_raid_tr_raid1_object *)vol->v_tr;
166 if (trs->trso_stopping &&
167 (trs->trso_flags & TR_RAID1_F_DOING_SOME) == 0)
168 s = G_RAID_VOLUME_S_STOPPED;
169 else if (trs->trso_starting)
170 s = G_RAID_VOLUME_S_STARTING;
171 else {
172 /* Make sure we have at least one ACTIVE disk. */
173 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
174 if (na == 0) {
175 /*
176 * Critical situation! We have no any active disk!
177 * Choose the best disk we have to make it active.
178 */
179 bestsd = &vol->v_subdisks[0];
180 for (i = 1; i < vol->v_disks_count; i++) {
181 tsd = &vol->v_subdisks[i];
182 if (tsd->sd_state > bestsd->sd_state)
183 bestsd = tsd;
184 else if (tsd->sd_state == bestsd->sd_state &&
185 (tsd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
186 tsd->sd_state == G_RAID_SUBDISK_S_RESYNC) &&
187 tsd->sd_rebuild_pos > bestsd->sd_rebuild_pos)
188 bestsd = tsd;
189 }
190 if (bestsd->sd_state >= G_RAID_SUBDISK_S_UNINITIALIZED) {
191 /* We found reasonable candidate. */
192 G_RAID_DEBUG1(1, sc,
193 "Promote subdisk %s:%d from %s to ACTIVE.",
194 vol->v_name, bestsd->sd_pos,
195 g_raid_subdisk_state2str(bestsd->sd_state));
196 g_raid_change_subdisk_state(bestsd,
197 G_RAID_SUBDISK_S_ACTIVE);
198 g_raid_write_metadata(sc,
199 vol, bestsd, bestsd->sd_disk);
200 }
201 }
202 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
203 ns = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
204 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
205 if (na == vol->v_disks_count)
206 s = G_RAID_VOLUME_S_OPTIMAL;
207 else if (na + ns == vol->v_disks_count)
208 s = G_RAID_VOLUME_S_SUBOPTIMAL;
209 else if (na > 0)
210 s = G_RAID_VOLUME_S_DEGRADED;
211 else
212 s = G_RAID_VOLUME_S_BROKEN;
213 g_raid_tr_raid1_maybe_rebuild(vol->v_tr, sd);
214 }
215 if (s != vol->v_state) {
216 g_raid_event_send(vol, G_RAID_VOLUME_S_ALIVE(s) ?
217 G_RAID_VOLUME_E_UP : G_RAID_VOLUME_E_DOWN,
218 G_RAID_EVENT_VOLUME);
219 g_raid_change_volume_state(vol, s);
220 if (!trs->trso_starting && !trs->trso_stopping)
221 g_raid_write_metadata(sc, vol, NULL, NULL);
222 }
223 return (0);
224}
225
226static void
227g_raid_tr_raid1_fail_disk(struct g_raid_softc *sc, struct g_raid_subdisk *sd,
228 struct g_raid_disk *disk)
229{
230 /*
231 * We don't fail the last disk in the pack, since it still has decent
232 * data on it and that's better than failing the disk if it is the root
233 * file system.
234 *
235 * XXX should this be controlled via a tunable? It makes sense for
236 * the volume that has / on it. I can't think of a case where we'd
237 * want the volume to go away on this kind of event.
238 */
239 if (g_raid_nsubdisks(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == 1 &&
240 g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == sd)
241 return;
242 g_raid_fail_disk(sc, sd, disk);
243}
244
245static void
246g_raid_tr_raid1_rebuild_some(struct g_raid_tr_object *tr)
247{
248 struct g_raid_tr_raid1_object *trs;
249 struct g_raid_subdisk *sd, *good_sd;
250 struct bio *bp;
251
252 trs = (struct g_raid_tr_raid1_object *)tr;
253 if (trs->trso_flags & TR_RAID1_F_DOING_SOME)
254 return;
255 sd = trs->trso_failed_sd;
256 good_sd = g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE);
257 if (good_sd == NULL) {
258 g_raid_tr_raid1_rebuild_abort(tr);
259 return;
260 }
261 bp = &trs->trso_bio;
262 memset(bp, 0, sizeof(*bp));
263 bp->bio_offset = sd->sd_rebuild_pos;
264 bp->bio_length = MIN(g_raid1_rebuild_slab,
265 sd->sd_size - sd->sd_rebuild_pos);
266 bp->bio_data = trs->trso_buffer;
267 bp->bio_cmd = BIO_READ;
268 bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
269 bp->bio_caller1 = good_sd;
270 trs->trso_flags |= TR_RAID1_F_DOING_SOME;
271 trs->trso_flags |= TR_RAID1_F_LOCKED;
272 g_raid_lock_range(sd->sd_volume, /* Lock callback starts I/O */
273 bp->bio_offset, bp->bio_length, NULL, bp);
274}
275
276static void
277g_raid_tr_raid1_rebuild_done(struct g_raid_tr_raid1_object *trs)
278{
279 struct g_raid_volume *vol;
280 struct g_raid_subdisk *sd;
281
282 vol = trs->trso_base.tro_volume;
283 sd = trs->trso_failed_sd;
284 g_raid_write_metadata(vol->v_softc, vol, sd, sd->sd_disk);
285 free(trs->trso_buffer, M_TR_RAID1);
286 trs->trso_buffer = NULL;
287 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
288 trs->trso_type = TR_RAID1_NONE;
289 trs->trso_recover_slabs = 0;
290 trs->trso_failed_sd = NULL;
291 g_raid_tr_update_state_raid1(vol, NULL);
292}
293
294static void
295g_raid_tr_raid1_rebuild_finish(struct g_raid_tr_object *tr)
296{
297 struct g_raid_tr_raid1_object *trs;
298 struct g_raid_subdisk *sd;
299
300 trs = (struct g_raid_tr_raid1_object *)tr;
301 sd = trs->trso_failed_sd;
302 G_RAID_DEBUG1(0, tr->tro_volume->v_softc,
303 "Subdisk %s:%d-%s rebuild completed.",
304 sd->sd_volume->v_name, sd->sd_pos,
305 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
306 g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE);
307 sd->sd_rebuild_pos = 0;
308 g_raid_tr_raid1_rebuild_done(trs);
309}
310
311static void
312g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr)
313{
314 struct g_raid_tr_raid1_object *trs;
315 struct g_raid_subdisk *sd;
316 struct g_raid_volume *vol;
317 off_t len;
318
319 vol = tr->tro_volume;
320 trs = (struct g_raid_tr_raid1_object *)tr;
321 sd = trs->trso_failed_sd;
322 if (trs->trso_flags & TR_RAID1_F_DOING_SOME) {
323 G_RAID_DEBUG1(1, vol->v_softc,
324 "Subdisk %s:%d-%s rebuild is aborting.",
325 sd->sd_volume->v_name, sd->sd_pos,
326 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
327 trs->trso_flags |= TR_RAID1_F_ABORT;
328 } else {
329 G_RAID_DEBUG1(0, vol->v_softc,
330 "Subdisk %s:%d-%s rebuild aborted.",
331 sd->sd_volume->v_name, sd->sd_pos,
332 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
333 trs->trso_flags &= ~TR_RAID1_F_ABORT;
334 if (trs->trso_flags & TR_RAID1_F_LOCKED) {
335 trs->trso_flags &= ~TR_RAID1_F_LOCKED;
336 len = MIN(g_raid1_rebuild_slab,
337 sd->sd_size - sd->sd_rebuild_pos);
338 g_raid_unlock_range(tr->tro_volume,
339 sd->sd_rebuild_pos, len);
340 }
341 g_raid_tr_raid1_rebuild_done(trs);
342 }
343}
344
345static void
346g_raid_tr_raid1_rebuild_start(struct g_raid_tr_object *tr)
347{
348 struct g_raid_volume *vol;
349 struct g_raid_tr_raid1_object *trs;
350 struct g_raid_subdisk *sd, *fsd;
351
352 vol = tr->tro_volume;
353 trs = (struct g_raid_tr_raid1_object *)tr;
354 if (trs->trso_failed_sd) {
355 G_RAID_DEBUG1(1, vol->v_softc,
356 "Already rebuild in start rebuild. pos %jd\n",
357 (intmax_t)trs->trso_failed_sd->sd_rebuild_pos);
358 return;
359 }
360 sd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_ACTIVE);
361 if (sd == NULL) {
362 G_RAID_DEBUG1(1, vol->v_softc,
363 "No active disk to rebuild. night night.");
364 return;
365 }
366 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_RESYNC);
367 if (fsd == NULL)
368 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_REBUILD);
369 if (fsd == NULL) {
370 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_STALE);
371 if (fsd != NULL) {
372 fsd->sd_rebuild_pos = 0;
373 g_raid_change_subdisk_state(fsd,
374 G_RAID_SUBDISK_S_RESYNC);
375 g_raid_write_metadata(vol->v_softc, vol, fsd, NULL);
376 } else {
377 fsd = g_raid_get_subdisk(vol,
378 G_RAID_SUBDISK_S_UNINITIALIZED);
379 if (fsd == NULL)
380 fsd = g_raid_get_subdisk(vol,
381 G_RAID_SUBDISK_S_NEW);
382 if (fsd != NULL) {
383 fsd->sd_rebuild_pos = 0;
384 g_raid_change_subdisk_state(fsd,
385 G_RAID_SUBDISK_S_REBUILD);
386 g_raid_write_metadata(vol->v_softc,
387 vol, fsd, NULL);
388 }
389 }
390 }
391 if (fsd == NULL) {
392 G_RAID_DEBUG1(1, vol->v_softc,
393 "No failed disk to rebuild. night night.");
394 return;
395 }
396 trs->trso_failed_sd = fsd;
397 G_RAID_DEBUG1(0, vol->v_softc,
398 "Subdisk %s:%d-%s rebuild start at %jd.",
399 fsd->sd_volume->v_name, fsd->sd_pos,
400 fsd->sd_disk ? g_raid_get_diskname(fsd->sd_disk) : "[none]",
401 trs->trso_failed_sd->sd_rebuild_pos);
402 trs->trso_type = TR_RAID1_REBUILD;
403 trs->trso_buffer = malloc(g_raid1_rebuild_slab, M_TR_RAID1, M_WAITOK);
404 trs->trso_meta_update = g_raid1_rebuild_meta_update;
405 g_raid_tr_raid1_rebuild_some(tr);
406}
407
408
409static void
410g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
411 struct g_raid_subdisk *sd)
412{
413 struct g_raid_volume *vol;
414 struct g_raid_tr_raid1_object *trs;
415 int na, nr;
416
417 /*
418 * If we're stopping, don't do anything. If we don't have at least one
419 * good disk and one bad disk, we don't do anything. And if there's a
420 * 'good disk' stored in the trs, then we're in progress and we punt.
421 * If we make it past all these checks, we need to rebuild.
422 */
423 vol = tr->tro_volume;
424 trs = (struct g_raid_tr_raid1_object *)tr;
425 if (trs->trso_stopping)
426 return;
427 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
428 nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_REBUILD) +
429 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
430 switch(trs->trso_type) {
431 case TR_RAID1_NONE:
432 if (na == 0)
433 return;
434 if (nr == 0) {
435 nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_NEW) +
436 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
437 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_UNINITIALIZED);
438 if (nr == 0)
439 return;
440 }
441 g_raid_tr_raid1_rebuild_start(tr);
442 break;
443 case TR_RAID1_REBUILD:
444 if (na == 0 || nr == 0 || trs->trso_failed_sd == sd)
445 g_raid_tr_raid1_rebuild_abort(tr);
446 break;
447 case TR_RAID1_RESYNC:
448 break;
449 }
450}
451
452static int
453g_raid_tr_event_raid1(struct g_raid_tr_object *tr,
454 struct g_raid_subdisk *sd, u_int event)
455{
456
457 g_raid_tr_update_state_raid1(tr->tro_volume, sd);
458 return (0);
459}
460
461static int
462g_raid_tr_start_raid1(struct g_raid_tr_object *tr)
463{
464 struct g_raid_tr_raid1_object *trs;
465 struct g_raid_volume *vol;
466
467 trs = (struct g_raid_tr_raid1_object *)tr;
468 vol = tr->tro_volume;
469 trs->trso_starting = 0;
470 g_raid_tr_update_state_raid1(vol, NULL);
471 return (0);
472}
473
474static int
475g_raid_tr_stop_raid1(struct g_raid_tr_object *tr)
476{
477 struct g_raid_tr_raid1_object *trs;
478 struct g_raid_volume *vol;
479
480 trs = (struct g_raid_tr_raid1_object *)tr;
481 vol = tr->tro_volume;
482 trs->trso_starting = 0;
483 trs->trso_stopping = 1;
484 g_raid_tr_update_state_raid1(vol, NULL);
485 return (0);
486}
487
488/*
489 * Select the disk to read from. Take into account: subdisk state, running
490 * error recovery, average disk load, head position and possible cache hits.
491 */
492#define ABS(x) (((x) >= 0) ? (x) : (-(x)))
493static struct g_raid_subdisk *
494g_raid_tr_raid1_select_read_disk(struct g_raid_volume *vol, struct bio *bp,
495 u_int mask)
496{
497 struct g_raid_subdisk *sd, *best;
498 int i, prio, bestprio;
499
500 best = NULL;
501 bestprio = INT_MAX;
502 for (i = 0; i < vol->v_disks_count; i++) {
503 sd = &vol->v_subdisks[i];
504 if (sd->sd_state != G_RAID_SUBDISK_S_ACTIVE &&
505 ((sd->sd_state != G_RAID_SUBDISK_S_REBUILD &&
506 sd->sd_state != G_RAID_SUBDISK_S_RESYNC) ||
507 bp->bio_offset + bp->bio_length > sd->sd_rebuild_pos))
508 continue;
509 if ((mask & (1 << i)) != 0)
510 continue;
511 prio = G_RAID_SUBDISK_LOAD(sd);
512 prio += min(sd->sd_recovery, 255) << 22;
513 prio += (G_RAID_SUBDISK_S_ACTIVE - sd->sd_state) << 16;
514 /* If disk head is precisely in position - highly prefer it. */
515 if (G_RAID_SUBDISK_POS(sd) == bp->bio_offset)
516 prio -= 2 * G_RAID_SUBDISK_LOAD_SCALE;
517 else
518 /* If disk head is close to position - prefer it. */
519 if (ABS(G_RAID_SUBDISK_POS(sd) - bp->bio_offset) <
520 G_RAID_SUBDISK_TRACK_SIZE)
521 prio -= 1 * G_RAID_SUBDISK_LOAD_SCALE;
522 if (prio < bestprio) {
523 best = sd;
524 bestprio = prio;
525 }
526 }
527 return (best);
528}
529
530static void
531g_raid_tr_iostart_raid1_read(struct g_raid_tr_object *tr, struct bio *bp)
532{
533 struct g_raid_subdisk *sd;
534 struct bio *cbp;
535
536 sd = g_raid_tr_raid1_select_read_disk(tr->tro_volume, bp, 0);
537 KASSERT(sd != NULL, ("No active disks in volume %s.",
538 tr->tro_volume->v_name));
539
540 cbp = g_clone_bio(bp);
541 if (cbp == NULL) {
542 g_raid_iodone(bp, ENOMEM);
543 return;
544 }
545
546 g_raid_subdisk_iostart(sd, cbp);
547}
548
549static void
550g_raid_tr_iostart_raid1_write(struct g_raid_tr_object *tr, struct bio *bp)
551{
29
30#include <sys/param.h>
31#include <sys/bio.h>
32#include <sys/endian.h>
33#include <sys/kernel.h>
34#include <sys/kobj.h>
35#include <sys/limits.h>
36#include <sys/lock.h>
37#include <sys/malloc.h>
38#include <sys/mutex.h>
39#include <sys/sysctl.h>
40#include <sys/systm.h>
41#include <geom/geom.h>
42#include "geom/raid/g_raid.h"
43#include "g_raid_tr_if.h"
44
45SYSCTL_DECL(_kern_geom_raid);
46SYSCTL_NODE(_kern_geom_raid, OID_AUTO, raid1, CTLFLAG_RW, 0,
47 "RAID1 parameters");
48
49#define RAID1_REBUILD_SLAB (1 << 20) /* One transation in a rebuild */
50static int g_raid1_rebuild_slab = RAID1_REBUILD_SLAB;
51TUNABLE_INT("kern.geom.raid.raid1.rebuild_slab_size",
52 &g_raid1_rebuild_slab);
53SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_slab_size, CTLFLAG_RW,
54 &g_raid1_rebuild_slab, 0,
55 "Amount of the disk to rebuild each read/write cycle of the rebuild.");
56
57#define RAID1_REBUILD_FAIR_IO 20 /* use 1/x of the available I/O */
58static int g_raid1_rebuild_fair_io = RAID1_REBUILD_FAIR_IO;
59TUNABLE_INT("kern.geom.raid.raid1.rebuild_fair_io",
60 &g_raid1_rebuild_fair_io);
61SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_fair_io, CTLFLAG_RW,
62 &g_raid1_rebuild_fair_io, 0,
63 "Fraction of the I/O bandwidth to use when disk busy for rebuild.");
64
65#define RAID1_REBUILD_CLUSTER_IDLE 100
66static int g_raid1_rebuild_cluster_idle = RAID1_REBUILD_CLUSTER_IDLE;
67TUNABLE_INT("kern.geom.raid.raid1.rebuild_cluster_idle",
68 &g_raid1_rebuild_cluster_idle);
69SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_cluster_idle, CTLFLAG_RW,
70 &g_raid1_rebuild_cluster_idle, 0,
71 "Number of slabs to do each time we trigger a rebuild cycle");
72
73#define RAID1_REBUILD_META_UPDATE 1024 /* update meta data every 1GB or so */
74static int g_raid1_rebuild_meta_update = RAID1_REBUILD_META_UPDATE;
75TUNABLE_INT("kern.geom.raid.raid1.rebuild_meta_update",
76 &g_raid1_rebuild_meta_update);
77SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_meta_update, CTLFLAG_RW,
78 &g_raid1_rebuild_meta_update, 0,
79 "When to update the meta data.");
80
81static MALLOC_DEFINE(M_TR_RAID1, "tr_raid1_data", "GEOM_RAID RAID1 data");
82
83#define TR_RAID1_NONE 0
84#define TR_RAID1_REBUILD 1
85#define TR_RAID1_RESYNC 2
86
87#define TR_RAID1_F_DOING_SOME 0x1
88#define TR_RAID1_F_LOCKED 0x2
89#define TR_RAID1_F_ABORT 0x4
90
91struct g_raid_tr_raid1_object {
92 struct g_raid_tr_object trso_base;
93 int trso_starting;
94 int trso_stopping;
95 int trso_type;
96 int trso_recover_slabs; /* slabs before rest */
97 int trso_fair_io;
98 int trso_meta_update;
99 int trso_flags;
100 struct g_raid_subdisk *trso_failed_sd; /* like per volume */
101 void *trso_buffer; /* Buffer space */
102 struct bio trso_bio;
103};
104
105static g_raid_tr_taste_t g_raid_tr_taste_raid1;
106static g_raid_tr_event_t g_raid_tr_event_raid1;
107static g_raid_tr_start_t g_raid_tr_start_raid1;
108static g_raid_tr_stop_t g_raid_tr_stop_raid1;
109static g_raid_tr_iostart_t g_raid_tr_iostart_raid1;
110static g_raid_tr_iodone_t g_raid_tr_iodone_raid1;
111static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_raid1;
112static g_raid_tr_locked_t g_raid_tr_locked_raid1;
113static g_raid_tr_idle_t g_raid_tr_idle_raid1;
114static g_raid_tr_free_t g_raid_tr_free_raid1;
115
116static kobj_method_t g_raid_tr_raid1_methods[] = {
117 KOBJMETHOD(g_raid_tr_taste, g_raid_tr_taste_raid1),
118 KOBJMETHOD(g_raid_tr_event, g_raid_tr_event_raid1),
119 KOBJMETHOD(g_raid_tr_start, g_raid_tr_start_raid1),
120 KOBJMETHOD(g_raid_tr_stop, g_raid_tr_stop_raid1),
121 KOBJMETHOD(g_raid_tr_iostart, g_raid_tr_iostart_raid1),
122 KOBJMETHOD(g_raid_tr_iodone, g_raid_tr_iodone_raid1),
123 KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid1),
124 KOBJMETHOD(g_raid_tr_locked, g_raid_tr_locked_raid1),
125 KOBJMETHOD(g_raid_tr_idle, g_raid_tr_idle_raid1),
126 KOBJMETHOD(g_raid_tr_free, g_raid_tr_free_raid1),
127 { 0, 0 }
128};
129
130static struct g_raid_tr_class g_raid_tr_raid1_class = {
131 "RAID1",
132 g_raid_tr_raid1_methods,
133 sizeof(struct g_raid_tr_raid1_object),
134 .trc_priority = 100
135};
136
137static void g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr);
138static void g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
139 struct g_raid_subdisk *sd);
140
141static int
142g_raid_tr_taste_raid1(struct g_raid_tr_object *tr, struct g_raid_volume *vol)
143{
144 struct g_raid_tr_raid1_object *trs;
145
146 trs = (struct g_raid_tr_raid1_object *)tr;
147 if (tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_RAID1 ||
148 tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_NONE)
149 return (G_RAID_TR_TASTE_FAIL);
150 trs->trso_starting = 1;
151 return (G_RAID_TR_TASTE_SUCCEED);
152}
153
154static int
155g_raid_tr_update_state_raid1(struct g_raid_volume *vol,
156 struct g_raid_subdisk *sd)
157{
158 struct g_raid_tr_raid1_object *trs;
159 struct g_raid_softc *sc;
160 struct g_raid_subdisk *tsd, *bestsd;
161 u_int s;
162 int i, na, ns;
163
164 sc = vol->v_softc;
165 trs = (struct g_raid_tr_raid1_object *)vol->v_tr;
166 if (trs->trso_stopping &&
167 (trs->trso_flags & TR_RAID1_F_DOING_SOME) == 0)
168 s = G_RAID_VOLUME_S_STOPPED;
169 else if (trs->trso_starting)
170 s = G_RAID_VOLUME_S_STARTING;
171 else {
172 /* Make sure we have at least one ACTIVE disk. */
173 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
174 if (na == 0) {
175 /*
176 * Critical situation! We have no any active disk!
177 * Choose the best disk we have to make it active.
178 */
179 bestsd = &vol->v_subdisks[0];
180 for (i = 1; i < vol->v_disks_count; i++) {
181 tsd = &vol->v_subdisks[i];
182 if (tsd->sd_state > bestsd->sd_state)
183 bestsd = tsd;
184 else if (tsd->sd_state == bestsd->sd_state &&
185 (tsd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
186 tsd->sd_state == G_RAID_SUBDISK_S_RESYNC) &&
187 tsd->sd_rebuild_pos > bestsd->sd_rebuild_pos)
188 bestsd = tsd;
189 }
190 if (bestsd->sd_state >= G_RAID_SUBDISK_S_UNINITIALIZED) {
191 /* We found reasonable candidate. */
192 G_RAID_DEBUG1(1, sc,
193 "Promote subdisk %s:%d from %s to ACTIVE.",
194 vol->v_name, bestsd->sd_pos,
195 g_raid_subdisk_state2str(bestsd->sd_state));
196 g_raid_change_subdisk_state(bestsd,
197 G_RAID_SUBDISK_S_ACTIVE);
198 g_raid_write_metadata(sc,
199 vol, bestsd, bestsd->sd_disk);
200 }
201 }
202 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
203 ns = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
204 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
205 if (na == vol->v_disks_count)
206 s = G_RAID_VOLUME_S_OPTIMAL;
207 else if (na + ns == vol->v_disks_count)
208 s = G_RAID_VOLUME_S_SUBOPTIMAL;
209 else if (na > 0)
210 s = G_RAID_VOLUME_S_DEGRADED;
211 else
212 s = G_RAID_VOLUME_S_BROKEN;
213 g_raid_tr_raid1_maybe_rebuild(vol->v_tr, sd);
214 }
215 if (s != vol->v_state) {
216 g_raid_event_send(vol, G_RAID_VOLUME_S_ALIVE(s) ?
217 G_RAID_VOLUME_E_UP : G_RAID_VOLUME_E_DOWN,
218 G_RAID_EVENT_VOLUME);
219 g_raid_change_volume_state(vol, s);
220 if (!trs->trso_starting && !trs->trso_stopping)
221 g_raid_write_metadata(sc, vol, NULL, NULL);
222 }
223 return (0);
224}
225
226static void
227g_raid_tr_raid1_fail_disk(struct g_raid_softc *sc, struct g_raid_subdisk *sd,
228 struct g_raid_disk *disk)
229{
230 /*
231 * We don't fail the last disk in the pack, since it still has decent
232 * data on it and that's better than failing the disk if it is the root
233 * file system.
234 *
235 * XXX should this be controlled via a tunable? It makes sense for
236 * the volume that has / on it. I can't think of a case where we'd
237 * want the volume to go away on this kind of event.
238 */
239 if (g_raid_nsubdisks(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == 1 &&
240 g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == sd)
241 return;
242 g_raid_fail_disk(sc, sd, disk);
243}
244
245static void
246g_raid_tr_raid1_rebuild_some(struct g_raid_tr_object *tr)
247{
248 struct g_raid_tr_raid1_object *trs;
249 struct g_raid_subdisk *sd, *good_sd;
250 struct bio *bp;
251
252 trs = (struct g_raid_tr_raid1_object *)tr;
253 if (trs->trso_flags & TR_RAID1_F_DOING_SOME)
254 return;
255 sd = trs->trso_failed_sd;
256 good_sd = g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE);
257 if (good_sd == NULL) {
258 g_raid_tr_raid1_rebuild_abort(tr);
259 return;
260 }
261 bp = &trs->trso_bio;
262 memset(bp, 0, sizeof(*bp));
263 bp->bio_offset = sd->sd_rebuild_pos;
264 bp->bio_length = MIN(g_raid1_rebuild_slab,
265 sd->sd_size - sd->sd_rebuild_pos);
266 bp->bio_data = trs->trso_buffer;
267 bp->bio_cmd = BIO_READ;
268 bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
269 bp->bio_caller1 = good_sd;
270 trs->trso_flags |= TR_RAID1_F_DOING_SOME;
271 trs->trso_flags |= TR_RAID1_F_LOCKED;
272 g_raid_lock_range(sd->sd_volume, /* Lock callback starts I/O */
273 bp->bio_offset, bp->bio_length, NULL, bp);
274}
275
276static void
277g_raid_tr_raid1_rebuild_done(struct g_raid_tr_raid1_object *trs)
278{
279 struct g_raid_volume *vol;
280 struct g_raid_subdisk *sd;
281
282 vol = trs->trso_base.tro_volume;
283 sd = trs->trso_failed_sd;
284 g_raid_write_metadata(vol->v_softc, vol, sd, sd->sd_disk);
285 free(trs->trso_buffer, M_TR_RAID1);
286 trs->trso_buffer = NULL;
287 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
288 trs->trso_type = TR_RAID1_NONE;
289 trs->trso_recover_slabs = 0;
290 trs->trso_failed_sd = NULL;
291 g_raid_tr_update_state_raid1(vol, NULL);
292}
293
294static void
295g_raid_tr_raid1_rebuild_finish(struct g_raid_tr_object *tr)
296{
297 struct g_raid_tr_raid1_object *trs;
298 struct g_raid_subdisk *sd;
299
300 trs = (struct g_raid_tr_raid1_object *)tr;
301 sd = trs->trso_failed_sd;
302 G_RAID_DEBUG1(0, tr->tro_volume->v_softc,
303 "Subdisk %s:%d-%s rebuild completed.",
304 sd->sd_volume->v_name, sd->sd_pos,
305 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
306 g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE);
307 sd->sd_rebuild_pos = 0;
308 g_raid_tr_raid1_rebuild_done(trs);
309}
310
311static void
312g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr)
313{
314 struct g_raid_tr_raid1_object *trs;
315 struct g_raid_subdisk *sd;
316 struct g_raid_volume *vol;
317 off_t len;
318
319 vol = tr->tro_volume;
320 trs = (struct g_raid_tr_raid1_object *)tr;
321 sd = trs->trso_failed_sd;
322 if (trs->trso_flags & TR_RAID1_F_DOING_SOME) {
323 G_RAID_DEBUG1(1, vol->v_softc,
324 "Subdisk %s:%d-%s rebuild is aborting.",
325 sd->sd_volume->v_name, sd->sd_pos,
326 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
327 trs->trso_flags |= TR_RAID1_F_ABORT;
328 } else {
329 G_RAID_DEBUG1(0, vol->v_softc,
330 "Subdisk %s:%d-%s rebuild aborted.",
331 sd->sd_volume->v_name, sd->sd_pos,
332 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
333 trs->trso_flags &= ~TR_RAID1_F_ABORT;
334 if (trs->trso_flags & TR_RAID1_F_LOCKED) {
335 trs->trso_flags &= ~TR_RAID1_F_LOCKED;
336 len = MIN(g_raid1_rebuild_slab,
337 sd->sd_size - sd->sd_rebuild_pos);
338 g_raid_unlock_range(tr->tro_volume,
339 sd->sd_rebuild_pos, len);
340 }
341 g_raid_tr_raid1_rebuild_done(trs);
342 }
343}
344
345static void
346g_raid_tr_raid1_rebuild_start(struct g_raid_tr_object *tr)
347{
348 struct g_raid_volume *vol;
349 struct g_raid_tr_raid1_object *trs;
350 struct g_raid_subdisk *sd, *fsd;
351
352 vol = tr->tro_volume;
353 trs = (struct g_raid_tr_raid1_object *)tr;
354 if (trs->trso_failed_sd) {
355 G_RAID_DEBUG1(1, vol->v_softc,
356 "Already rebuild in start rebuild. pos %jd\n",
357 (intmax_t)trs->trso_failed_sd->sd_rebuild_pos);
358 return;
359 }
360 sd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_ACTIVE);
361 if (sd == NULL) {
362 G_RAID_DEBUG1(1, vol->v_softc,
363 "No active disk to rebuild. night night.");
364 return;
365 }
366 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_RESYNC);
367 if (fsd == NULL)
368 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_REBUILD);
369 if (fsd == NULL) {
370 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_STALE);
371 if (fsd != NULL) {
372 fsd->sd_rebuild_pos = 0;
373 g_raid_change_subdisk_state(fsd,
374 G_RAID_SUBDISK_S_RESYNC);
375 g_raid_write_metadata(vol->v_softc, vol, fsd, NULL);
376 } else {
377 fsd = g_raid_get_subdisk(vol,
378 G_RAID_SUBDISK_S_UNINITIALIZED);
379 if (fsd == NULL)
380 fsd = g_raid_get_subdisk(vol,
381 G_RAID_SUBDISK_S_NEW);
382 if (fsd != NULL) {
383 fsd->sd_rebuild_pos = 0;
384 g_raid_change_subdisk_state(fsd,
385 G_RAID_SUBDISK_S_REBUILD);
386 g_raid_write_metadata(vol->v_softc,
387 vol, fsd, NULL);
388 }
389 }
390 }
391 if (fsd == NULL) {
392 G_RAID_DEBUG1(1, vol->v_softc,
393 "No failed disk to rebuild. night night.");
394 return;
395 }
396 trs->trso_failed_sd = fsd;
397 G_RAID_DEBUG1(0, vol->v_softc,
398 "Subdisk %s:%d-%s rebuild start at %jd.",
399 fsd->sd_volume->v_name, fsd->sd_pos,
400 fsd->sd_disk ? g_raid_get_diskname(fsd->sd_disk) : "[none]",
401 trs->trso_failed_sd->sd_rebuild_pos);
402 trs->trso_type = TR_RAID1_REBUILD;
403 trs->trso_buffer = malloc(g_raid1_rebuild_slab, M_TR_RAID1, M_WAITOK);
404 trs->trso_meta_update = g_raid1_rebuild_meta_update;
405 g_raid_tr_raid1_rebuild_some(tr);
406}
407
408
409static void
410g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
411 struct g_raid_subdisk *sd)
412{
413 struct g_raid_volume *vol;
414 struct g_raid_tr_raid1_object *trs;
415 int na, nr;
416
417 /*
418 * If we're stopping, don't do anything. If we don't have at least one
419 * good disk and one bad disk, we don't do anything. And if there's a
420 * 'good disk' stored in the trs, then we're in progress and we punt.
421 * If we make it past all these checks, we need to rebuild.
422 */
423 vol = tr->tro_volume;
424 trs = (struct g_raid_tr_raid1_object *)tr;
425 if (trs->trso_stopping)
426 return;
427 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
428 nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_REBUILD) +
429 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
430 switch(trs->trso_type) {
431 case TR_RAID1_NONE:
432 if (na == 0)
433 return;
434 if (nr == 0) {
435 nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_NEW) +
436 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
437 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_UNINITIALIZED);
438 if (nr == 0)
439 return;
440 }
441 g_raid_tr_raid1_rebuild_start(tr);
442 break;
443 case TR_RAID1_REBUILD:
444 if (na == 0 || nr == 0 || trs->trso_failed_sd == sd)
445 g_raid_tr_raid1_rebuild_abort(tr);
446 break;
447 case TR_RAID1_RESYNC:
448 break;
449 }
450}
451
452static int
453g_raid_tr_event_raid1(struct g_raid_tr_object *tr,
454 struct g_raid_subdisk *sd, u_int event)
455{
456
457 g_raid_tr_update_state_raid1(tr->tro_volume, sd);
458 return (0);
459}
460
461static int
462g_raid_tr_start_raid1(struct g_raid_tr_object *tr)
463{
464 struct g_raid_tr_raid1_object *trs;
465 struct g_raid_volume *vol;
466
467 trs = (struct g_raid_tr_raid1_object *)tr;
468 vol = tr->tro_volume;
469 trs->trso_starting = 0;
470 g_raid_tr_update_state_raid1(vol, NULL);
471 return (0);
472}
473
474static int
475g_raid_tr_stop_raid1(struct g_raid_tr_object *tr)
476{
477 struct g_raid_tr_raid1_object *trs;
478 struct g_raid_volume *vol;
479
480 trs = (struct g_raid_tr_raid1_object *)tr;
481 vol = tr->tro_volume;
482 trs->trso_starting = 0;
483 trs->trso_stopping = 1;
484 g_raid_tr_update_state_raid1(vol, NULL);
485 return (0);
486}
487
488/*
489 * Select the disk to read from. Take into account: subdisk state, running
490 * error recovery, average disk load, head position and possible cache hits.
491 */
492#define ABS(x) (((x) >= 0) ? (x) : (-(x)))
493static struct g_raid_subdisk *
494g_raid_tr_raid1_select_read_disk(struct g_raid_volume *vol, struct bio *bp,
495 u_int mask)
496{
497 struct g_raid_subdisk *sd, *best;
498 int i, prio, bestprio;
499
500 best = NULL;
501 bestprio = INT_MAX;
502 for (i = 0; i < vol->v_disks_count; i++) {
503 sd = &vol->v_subdisks[i];
504 if (sd->sd_state != G_RAID_SUBDISK_S_ACTIVE &&
505 ((sd->sd_state != G_RAID_SUBDISK_S_REBUILD &&
506 sd->sd_state != G_RAID_SUBDISK_S_RESYNC) ||
507 bp->bio_offset + bp->bio_length > sd->sd_rebuild_pos))
508 continue;
509 if ((mask & (1 << i)) != 0)
510 continue;
511 prio = G_RAID_SUBDISK_LOAD(sd);
512 prio += min(sd->sd_recovery, 255) << 22;
513 prio += (G_RAID_SUBDISK_S_ACTIVE - sd->sd_state) << 16;
514 /* If disk head is precisely in position - highly prefer it. */
515 if (G_RAID_SUBDISK_POS(sd) == bp->bio_offset)
516 prio -= 2 * G_RAID_SUBDISK_LOAD_SCALE;
517 else
518 /* If disk head is close to position - prefer it. */
519 if (ABS(G_RAID_SUBDISK_POS(sd) - bp->bio_offset) <
520 G_RAID_SUBDISK_TRACK_SIZE)
521 prio -= 1 * G_RAID_SUBDISK_LOAD_SCALE;
522 if (prio < bestprio) {
523 best = sd;
524 bestprio = prio;
525 }
526 }
527 return (best);
528}
529
530static void
531g_raid_tr_iostart_raid1_read(struct g_raid_tr_object *tr, struct bio *bp)
532{
533 struct g_raid_subdisk *sd;
534 struct bio *cbp;
535
536 sd = g_raid_tr_raid1_select_read_disk(tr->tro_volume, bp, 0);
537 KASSERT(sd != NULL, ("No active disks in volume %s.",
538 tr->tro_volume->v_name));
539
540 cbp = g_clone_bio(bp);
541 if (cbp == NULL) {
542 g_raid_iodone(bp, ENOMEM);
543 return;
544 }
545
546 g_raid_subdisk_iostart(sd, cbp);
547}
548
549static void
550g_raid_tr_iostart_raid1_write(struct g_raid_tr_object *tr, struct bio *bp)
551{
552 struct g_raid_softc *sc;
553 struct g_raid_volume *vol;
554 struct g_raid_subdisk *sd;
555 struct bio_queue_head queue;
556 struct bio *cbp;
557 int i;
558
559 vol = tr->tro_volume;
552 struct g_raid_volume *vol;
553 struct g_raid_subdisk *sd;
554 struct bio_queue_head queue;
555 struct bio *cbp;
556 int i;
557
558 vol = tr->tro_volume;
560 sc = vol->v_softc;
561
562 /*
563 * Allocate all bios before sending any request, so we can return
564 * ENOMEM in nice and clean way.
565 */
566 bioq_init(&queue);
567 for (i = 0; i < vol->v_disks_count; i++) {
568 sd = &vol->v_subdisks[i];
569 switch (sd->sd_state) {
570 case G_RAID_SUBDISK_S_ACTIVE:
571 break;
572 case G_RAID_SUBDISK_S_REBUILD:
573 /*
574 * When rebuilding, only part of this subdisk is
575 * writable, the rest will be written as part of the
576 * that process.
577 */
578 if (bp->bio_offset >= sd->sd_rebuild_pos)
579 continue;
580 break;
581 case G_RAID_SUBDISK_S_STALE:
582 case G_RAID_SUBDISK_S_RESYNC:
583 /*
584 * Resyncing still writes on the theory that the
585 * resync'd disk is very close and writing it will
586 * keep it that way better if we keep up while
587 * resyncing.
588 */
589 break;
590 default:
591 continue;
592 }
593 cbp = g_clone_bio(bp);
594 if (cbp == NULL)
595 goto failure;
596 cbp->bio_caller1 = sd;
597 bioq_insert_tail(&queue, cbp);
598 }
599 for (cbp = bioq_first(&queue); cbp != NULL;
600 cbp = bioq_first(&queue)) {
601 bioq_remove(&queue, cbp);
602 sd = cbp->bio_caller1;
603 cbp->bio_caller1 = NULL;
604 g_raid_subdisk_iostart(sd, cbp);
605 }
606 return;
607failure:
608 for (cbp = bioq_first(&queue); cbp != NULL;
609 cbp = bioq_first(&queue)) {
610 bioq_remove(&queue, cbp);
611 g_destroy_bio(cbp);
612 }
613 if (bp->bio_error == 0)
614 bp->bio_error = ENOMEM;
615 g_raid_iodone(bp, bp->bio_error);
616}
617
618static void
619g_raid_tr_iostart_raid1(struct g_raid_tr_object *tr, struct bio *bp)
620{
621 struct g_raid_volume *vol;
622 struct g_raid_tr_raid1_object *trs;
623
624 vol = tr->tro_volume;
625 trs = (struct g_raid_tr_raid1_object *)tr;
626 if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL &&
627 vol->v_state != G_RAID_VOLUME_S_SUBOPTIMAL &&
628 vol->v_state != G_RAID_VOLUME_S_DEGRADED) {
629 g_raid_iodone(bp, EIO);
630 return;
631 }
632 /*
633 * If we're rebuilding, squeeze in rebuild activity every so often,
634 * even when the disk is busy. Be sure to only count real I/O
635 * to the disk. All 'SPECIAL' I/O is traffic generated to the disk
636 * by this module.
637 */
638 if (trs->trso_failed_sd != NULL &&
639 !(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL)) {
640 /* Make this new or running now round short. */
641 trs->trso_recover_slabs = 0;
642 if (--trs->trso_fair_io <= 0) {
643 trs->trso_fair_io = g_raid1_rebuild_fair_io;
644 g_raid_tr_raid1_rebuild_some(tr);
645 }
646 }
647 switch (bp->bio_cmd) {
648 case BIO_READ:
649 g_raid_tr_iostart_raid1_read(tr, bp);
650 break;
651 case BIO_WRITE:
652 g_raid_tr_iostart_raid1_write(tr, bp);
653 break;
654 case BIO_DELETE:
655 g_raid_iodone(bp, EIO);
656 break;
657 case BIO_FLUSH:
658 g_raid_tr_flush_common(tr, bp);
659 break;
660 default:
661 KASSERT(1 == 0, ("Invalid command here: %u (volume=%s)",
662 bp->bio_cmd, vol->v_name));
663 break;
664 }
665}
666
667static void
668g_raid_tr_iodone_raid1(struct g_raid_tr_object *tr,
669 struct g_raid_subdisk *sd, struct bio *bp)
670{
671 struct bio *cbp;
672 struct g_raid_subdisk *nsd;
673 struct g_raid_volume *vol;
674 struct bio *pbp;
675 struct g_raid_tr_raid1_object *trs;
676 uintptr_t *mask;
677 int error, do_write;
678
679 trs = (struct g_raid_tr_raid1_object *)tr;
680 vol = tr->tro_volume;
681 if (bp->bio_cflags & G_RAID_BIO_FLAG_SYNC) {
682 /*
683 * This operation is part of a rebuild or resync operation.
684 * See what work just got done, then schedule the next bit of
685 * work, if any. Rebuild/resync is done a little bit at a
686 * time. Either when a timeout happens, or after we get a
687 * bunch of I/Os to the disk (to make sure an active system
688 * will complete in a sane amount of time).
689 *
690 * We are setup to do differing amounts of work for each of
691 * these cases. so long as the slabs is smallish (less than
692 * 50 or so, I'd guess, but that's just a WAG), we shouldn't
693 * have any bio starvation issues. For active disks, we do
694 * 5MB of data, for inactive ones, we do 50MB.
695 */
696 if (trs->trso_type == TR_RAID1_REBUILD) {
697 if (bp->bio_cmd == BIO_READ) {
698
699 /* Immediately abort rebuild, if requested. */
700 if (trs->trso_flags & TR_RAID1_F_ABORT) {
701 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
702 g_raid_tr_raid1_rebuild_abort(tr);
703 return;
704 }
705
706 /* On read error, skip and cross fingers. */
707 if (bp->bio_error != 0) {
708 G_RAID_LOGREQ(0, bp,
709 "Read error during rebuild (%d), "
710 "possible data loss!",
711 bp->bio_error);
712 goto rebuild_round_done;
713 }
714
715 /*
716 * The read operation finished, queue the
717 * write and get out.
718 */
719 G_RAID_LOGREQ(4, bp, "rebuild read done. %d",
720 bp->bio_error);
721 bp->bio_cmd = BIO_WRITE;
722 bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
723 G_RAID_LOGREQ(4, bp, "Queueing rebuild write.");
724 g_raid_subdisk_iostart(trs->trso_failed_sd, bp);
725 } else {
726 /*
727 * The write operation just finished. Do
728 * another. We keep cloning the master bio
729 * since it has the right buffers allocated to
730 * it.
731 */
732 G_RAID_LOGREQ(4, bp,
733 "rebuild write done. Error %d",
734 bp->bio_error);
735 nsd = trs->trso_failed_sd;
736 if (bp->bio_error != 0 ||
737 trs->trso_flags & TR_RAID1_F_ABORT) {
738 if ((trs->trso_flags &
739 TR_RAID1_F_ABORT) == 0) {
740 g_raid_tr_raid1_fail_disk(sd->sd_softc,
741 nsd, nsd->sd_disk);
742 }
743 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
744 g_raid_tr_raid1_rebuild_abort(tr);
745 return;
746 }
747rebuild_round_done:
748 nsd = trs->trso_failed_sd;
749 trs->trso_flags &= ~TR_RAID1_F_LOCKED;
750 g_raid_unlock_range(sd->sd_volume,
751 bp->bio_offset, bp->bio_length);
752 nsd->sd_rebuild_pos += bp->bio_length;
753 if (nsd->sd_rebuild_pos >= nsd->sd_size) {
754 g_raid_tr_raid1_rebuild_finish(tr);
755 return;
756 }
757
758 /* Abort rebuild if we are stopping */
759 if (trs->trso_stopping) {
760 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
761 g_raid_tr_raid1_rebuild_abort(tr);
762 return;
763 }
764
765 if (--trs->trso_meta_update <= 0) {
766 g_raid_write_metadata(vol->v_softc,
767 vol, nsd, nsd->sd_disk);
768 trs->trso_meta_update =
769 g_raid1_rebuild_meta_update;
770 }
771 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
772 if (--trs->trso_recover_slabs <= 0)
773 return;
774 g_raid_tr_raid1_rebuild_some(tr);
775 }
776 } else if (trs->trso_type == TR_RAID1_RESYNC) {
777 /*
778 * read good sd, read bad sd in parallel. when both
779 * done, compare the buffers. write good to the bad
780 * if different. do the next bit of work.
781 */
782 panic("Somehow, we think we're doing a resync");
783 }
784 return;
785 }
786 pbp = bp->bio_parent;
787 pbp->bio_inbed++;
788 if (bp->bio_cmd == BIO_READ && bp->bio_error != 0) {
789 /*
790 * Read failed on first drive. Retry the read error on
791 * another disk drive, if available, before erroring out the
792 * read.
793 */
794 sd->sd_disk->d_read_errs++;
795 G_RAID_LOGREQ(0, bp,
796 "Read error (%d), %d read errors total",
797 bp->bio_error, sd->sd_disk->d_read_errs);
798
799 /*
800 * If there are too many read errors, we move to degraded.
801 * XXX Do we want to FAIL the drive (eg, make the user redo
802 * everything to get it back in sync), or just degrade the
803 * drive, which kicks off a resync?
804 */
805 do_write = 1;
806 if (sd->sd_disk->d_read_errs > g_raid_read_err_thresh) {
807 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
808 if (pbp->bio_children == 1)
809 do_write = 0;
810 }
811
812 /*
813 * Find the other disk, and try to do the I/O to it.
814 */
815 mask = (uintptr_t *)(&pbp->bio_driver2);
816 if (pbp->bio_children == 1) {
817 /* Save original subdisk. */
818 pbp->bio_driver1 = do_write ? sd : NULL;
819 *mask = 0;
820 }
821 *mask |= 1 << sd->sd_pos;
822 nsd = g_raid_tr_raid1_select_read_disk(vol, pbp, *mask);
823 if (nsd != NULL && (cbp = g_clone_bio(pbp)) != NULL) {
824 g_destroy_bio(bp);
825 G_RAID_LOGREQ(2, cbp, "Retrying read from %d",
826 nsd->sd_pos);
827 if (pbp->bio_children == 2 && do_write) {
828 sd->sd_recovery++;
829 cbp->bio_caller1 = nsd;
830 pbp->bio_pflags = G_RAID_BIO_FLAG_LOCKED;
831 /* Lock callback starts I/O */
832 g_raid_lock_range(sd->sd_volume,
833 cbp->bio_offset, cbp->bio_length, pbp, cbp);
834 } else {
835 g_raid_subdisk_iostart(nsd, cbp);
836 }
837 return;
838 }
839 /*
840 * We can't retry. Return the original error by falling
841 * through. This will happen when there's only one good disk.
842 * We don't need to fail the raid, since its actual state is
843 * based on the state of the subdisks.
844 */
845 G_RAID_LOGREQ(2, bp, "Couldn't retry read, failing it");
846 }
847 if (bp->bio_cmd == BIO_READ &&
848 bp->bio_error == 0 &&
849 pbp->bio_children > 1 &&
850 pbp->bio_driver1 != NULL) {
851 /*
852 * If it was a read, and bio_children is >1, then we just
853 * recovered the data from the second drive. We should try to
854 * write that data to the first drive if sector remapping is
855 * enabled. A write should put the data in a new place on the
856 * disk, remapping the bad sector. Do we need to do that by
857 * queueing a request to the main worker thread? It doesn't
858 * affect the return code of this current read, and can be
859 * done at our liesure. However, to make the code simpler, it
860 * is done syncrhonously.
861 */
862 G_RAID_LOGREQ(3, bp, "Recovered data from other drive");
863 cbp = g_clone_bio(pbp);
864 if (cbp != NULL) {
865 g_destroy_bio(bp);
866 cbp->bio_cmd = BIO_WRITE;
867 cbp->bio_cflags = G_RAID_BIO_FLAG_REMAP;
868 G_RAID_LOGREQ(2, cbp,
869 "Attempting bad sector remap on failing drive.");
870 g_raid_subdisk_iostart(pbp->bio_driver1, cbp);
871 return;
872 }
873 }
874 if (pbp->bio_pflags & G_RAID_BIO_FLAG_LOCKED) {
875 /*
876 * We're done with a recovery, mark the range as unlocked.
877 * For any write errors, we agressively fail the disk since
878 * there was both a READ and a WRITE error at this location.
879 * Both types of errors generally indicates the drive is on
880 * the verge of total failure anyway. Better to stop trusting
881 * it now. However, we need to reset error to 0 in that case
882 * because we're not failing the original I/O which succeeded.
883 */
884 if (bp->bio_cmd == BIO_WRITE && bp->bio_error) {
885 G_RAID_LOGREQ(0, bp, "Remap write failed: "
886 "failing subdisk.");
887 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
888 bp->bio_error = 0;
889 }
890 if (pbp->bio_driver1 != NULL) {
891 ((struct g_raid_subdisk *)pbp->bio_driver1)
892 ->sd_recovery--;
893 }
894 G_RAID_LOGREQ(2, bp, "REMAP done %d.", bp->bio_error);
895 g_raid_unlock_range(sd->sd_volume, bp->bio_offset,
896 bp->bio_length);
897 }
898 error = bp->bio_error;
899 g_destroy_bio(bp);
900 if (pbp->bio_children == pbp->bio_inbed) {
901 pbp->bio_completed = pbp->bio_length;
902 g_raid_iodone(pbp, error);
903 }
904}
905
906static int
907g_raid_tr_kerneldump_raid1(struct g_raid_tr_object *tr,
908 void *virtual, vm_offset_t physical, off_t offset, size_t length)
909{
910 struct g_raid_volume *vol;
911 struct g_raid_subdisk *sd;
912 int error, i, ok;
913
914 vol = tr->tro_volume;
915 error = 0;
916 ok = 0;
917 for (i = 0; i < vol->v_disks_count; i++) {
918 sd = &vol->v_subdisks[i];
919 switch (sd->sd_state) {
920 case G_RAID_SUBDISK_S_ACTIVE:
921 break;
922 case G_RAID_SUBDISK_S_REBUILD:
923 /*
924 * When rebuilding, only part of this subdisk is
925 * writable, the rest will be written as part of the
926 * that process.
927 */
928 if (offset >= sd->sd_rebuild_pos)
929 continue;
930 break;
931 case G_RAID_SUBDISK_S_STALE:
932 case G_RAID_SUBDISK_S_RESYNC:
933 /*
934 * Resyncing still writes on the theory that the
935 * resync'd disk is very close and writing it will
936 * keep it that way better if we keep up while
937 * resyncing.
938 */
939 break;
940 default:
941 continue;
942 }
943 error = g_raid_subdisk_kerneldump(sd,
944 virtual, physical, offset, length);
945 if (error == 0)
946 ok++;
947 }
948 return (ok > 0 ? 0 : error);
949}
950
951static int
952g_raid_tr_locked_raid1(struct g_raid_tr_object *tr, void *argp)
953{
954 struct bio *bp;
955 struct g_raid_subdisk *sd;
956
957 bp = (struct bio *)argp;
958 sd = (struct g_raid_subdisk *)bp->bio_caller1;
959 g_raid_subdisk_iostart(sd, bp);
960
961 return (0);
962}
963
964static int
965g_raid_tr_idle_raid1(struct g_raid_tr_object *tr)
966{
967 struct g_raid_tr_raid1_object *trs;
968
969 trs = (struct g_raid_tr_raid1_object *)tr;
970 trs->trso_fair_io = g_raid1_rebuild_fair_io;
971 trs->trso_recover_slabs = g_raid1_rebuild_cluster_idle;
972 if (trs->trso_type == TR_RAID1_REBUILD)
973 g_raid_tr_raid1_rebuild_some(tr);
974 return (0);
975}
976
977static int
978g_raid_tr_free_raid1(struct g_raid_tr_object *tr)
979{
980 struct g_raid_tr_raid1_object *trs;
981
982 trs = (struct g_raid_tr_raid1_object *)tr;
983
984 if (trs->trso_buffer != NULL) {
985 free(trs->trso_buffer, M_TR_RAID1);
986 trs->trso_buffer = NULL;
987 }
988 return (0);
989}
990
991G_RAID_TR_DECLARE(g_raid_tr_raid1);
559
560 /*
561 * Allocate all bios before sending any request, so we can return
562 * ENOMEM in nice and clean way.
563 */
564 bioq_init(&queue);
565 for (i = 0; i < vol->v_disks_count; i++) {
566 sd = &vol->v_subdisks[i];
567 switch (sd->sd_state) {
568 case G_RAID_SUBDISK_S_ACTIVE:
569 break;
570 case G_RAID_SUBDISK_S_REBUILD:
571 /*
572 * When rebuilding, only part of this subdisk is
573 * writable, the rest will be written as part of the
574 * that process.
575 */
576 if (bp->bio_offset >= sd->sd_rebuild_pos)
577 continue;
578 break;
579 case G_RAID_SUBDISK_S_STALE:
580 case G_RAID_SUBDISK_S_RESYNC:
581 /*
582 * Resyncing still writes on the theory that the
583 * resync'd disk is very close and writing it will
584 * keep it that way better if we keep up while
585 * resyncing.
586 */
587 break;
588 default:
589 continue;
590 }
591 cbp = g_clone_bio(bp);
592 if (cbp == NULL)
593 goto failure;
594 cbp->bio_caller1 = sd;
595 bioq_insert_tail(&queue, cbp);
596 }
597 for (cbp = bioq_first(&queue); cbp != NULL;
598 cbp = bioq_first(&queue)) {
599 bioq_remove(&queue, cbp);
600 sd = cbp->bio_caller1;
601 cbp->bio_caller1 = NULL;
602 g_raid_subdisk_iostart(sd, cbp);
603 }
604 return;
605failure:
606 for (cbp = bioq_first(&queue); cbp != NULL;
607 cbp = bioq_first(&queue)) {
608 bioq_remove(&queue, cbp);
609 g_destroy_bio(cbp);
610 }
611 if (bp->bio_error == 0)
612 bp->bio_error = ENOMEM;
613 g_raid_iodone(bp, bp->bio_error);
614}
615
616static void
617g_raid_tr_iostart_raid1(struct g_raid_tr_object *tr, struct bio *bp)
618{
619 struct g_raid_volume *vol;
620 struct g_raid_tr_raid1_object *trs;
621
622 vol = tr->tro_volume;
623 trs = (struct g_raid_tr_raid1_object *)tr;
624 if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL &&
625 vol->v_state != G_RAID_VOLUME_S_SUBOPTIMAL &&
626 vol->v_state != G_RAID_VOLUME_S_DEGRADED) {
627 g_raid_iodone(bp, EIO);
628 return;
629 }
630 /*
631 * If we're rebuilding, squeeze in rebuild activity every so often,
632 * even when the disk is busy. Be sure to only count real I/O
633 * to the disk. All 'SPECIAL' I/O is traffic generated to the disk
634 * by this module.
635 */
636 if (trs->trso_failed_sd != NULL &&
637 !(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL)) {
638 /* Make this new or running now round short. */
639 trs->trso_recover_slabs = 0;
640 if (--trs->trso_fair_io <= 0) {
641 trs->trso_fair_io = g_raid1_rebuild_fair_io;
642 g_raid_tr_raid1_rebuild_some(tr);
643 }
644 }
645 switch (bp->bio_cmd) {
646 case BIO_READ:
647 g_raid_tr_iostart_raid1_read(tr, bp);
648 break;
649 case BIO_WRITE:
650 g_raid_tr_iostart_raid1_write(tr, bp);
651 break;
652 case BIO_DELETE:
653 g_raid_iodone(bp, EIO);
654 break;
655 case BIO_FLUSH:
656 g_raid_tr_flush_common(tr, bp);
657 break;
658 default:
659 KASSERT(1 == 0, ("Invalid command here: %u (volume=%s)",
660 bp->bio_cmd, vol->v_name));
661 break;
662 }
663}
664
665static void
666g_raid_tr_iodone_raid1(struct g_raid_tr_object *tr,
667 struct g_raid_subdisk *sd, struct bio *bp)
668{
669 struct bio *cbp;
670 struct g_raid_subdisk *nsd;
671 struct g_raid_volume *vol;
672 struct bio *pbp;
673 struct g_raid_tr_raid1_object *trs;
674 uintptr_t *mask;
675 int error, do_write;
676
677 trs = (struct g_raid_tr_raid1_object *)tr;
678 vol = tr->tro_volume;
679 if (bp->bio_cflags & G_RAID_BIO_FLAG_SYNC) {
680 /*
681 * This operation is part of a rebuild or resync operation.
682 * See what work just got done, then schedule the next bit of
683 * work, if any. Rebuild/resync is done a little bit at a
684 * time. Either when a timeout happens, or after we get a
685 * bunch of I/Os to the disk (to make sure an active system
686 * will complete in a sane amount of time).
687 *
688 * We are setup to do differing amounts of work for each of
689 * these cases. so long as the slabs is smallish (less than
690 * 50 or so, I'd guess, but that's just a WAG), we shouldn't
691 * have any bio starvation issues. For active disks, we do
692 * 5MB of data, for inactive ones, we do 50MB.
693 */
694 if (trs->trso_type == TR_RAID1_REBUILD) {
695 if (bp->bio_cmd == BIO_READ) {
696
697 /* Immediately abort rebuild, if requested. */
698 if (trs->trso_flags & TR_RAID1_F_ABORT) {
699 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
700 g_raid_tr_raid1_rebuild_abort(tr);
701 return;
702 }
703
704 /* On read error, skip and cross fingers. */
705 if (bp->bio_error != 0) {
706 G_RAID_LOGREQ(0, bp,
707 "Read error during rebuild (%d), "
708 "possible data loss!",
709 bp->bio_error);
710 goto rebuild_round_done;
711 }
712
713 /*
714 * The read operation finished, queue the
715 * write and get out.
716 */
717 G_RAID_LOGREQ(4, bp, "rebuild read done. %d",
718 bp->bio_error);
719 bp->bio_cmd = BIO_WRITE;
720 bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
721 G_RAID_LOGREQ(4, bp, "Queueing rebuild write.");
722 g_raid_subdisk_iostart(trs->trso_failed_sd, bp);
723 } else {
724 /*
725 * The write operation just finished. Do
726 * another. We keep cloning the master bio
727 * since it has the right buffers allocated to
728 * it.
729 */
730 G_RAID_LOGREQ(4, bp,
731 "rebuild write done. Error %d",
732 bp->bio_error);
733 nsd = trs->trso_failed_sd;
734 if (bp->bio_error != 0 ||
735 trs->trso_flags & TR_RAID1_F_ABORT) {
736 if ((trs->trso_flags &
737 TR_RAID1_F_ABORT) == 0) {
738 g_raid_tr_raid1_fail_disk(sd->sd_softc,
739 nsd, nsd->sd_disk);
740 }
741 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
742 g_raid_tr_raid1_rebuild_abort(tr);
743 return;
744 }
745rebuild_round_done:
746 nsd = trs->trso_failed_sd;
747 trs->trso_flags &= ~TR_RAID1_F_LOCKED;
748 g_raid_unlock_range(sd->sd_volume,
749 bp->bio_offset, bp->bio_length);
750 nsd->sd_rebuild_pos += bp->bio_length;
751 if (nsd->sd_rebuild_pos >= nsd->sd_size) {
752 g_raid_tr_raid1_rebuild_finish(tr);
753 return;
754 }
755
756 /* Abort rebuild if we are stopping */
757 if (trs->trso_stopping) {
758 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
759 g_raid_tr_raid1_rebuild_abort(tr);
760 return;
761 }
762
763 if (--trs->trso_meta_update <= 0) {
764 g_raid_write_metadata(vol->v_softc,
765 vol, nsd, nsd->sd_disk);
766 trs->trso_meta_update =
767 g_raid1_rebuild_meta_update;
768 }
769 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
770 if (--trs->trso_recover_slabs <= 0)
771 return;
772 g_raid_tr_raid1_rebuild_some(tr);
773 }
774 } else if (trs->trso_type == TR_RAID1_RESYNC) {
775 /*
776 * read good sd, read bad sd in parallel. when both
777 * done, compare the buffers. write good to the bad
778 * if different. do the next bit of work.
779 */
780 panic("Somehow, we think we're doing a resync");
781 }
782 return;
783 }
784 pbp = bp->bio_parent;
785 pbp->bio_inbed++;
786 if (bp->bio_cmd == BIO_READ && bp->bio_error != 0) {
787 /*
788 * Read failed on first drive. Retry the read error on
789 * another disk drive, if available, before erroring out the
790 * read.
791 */
792 sd->sd_disk->d_read_errs++;
793 G_RAID_LOGREQ(0, bp,
794 "Read error (%d), %d read errors total",
795 bp->bio_error, sd->sd_disk->d_read_errs);
796
797 /*
798 * If there are too many read errors, we move to degraded.
799 * XXX Do we want to FAIL the drive (eg, make the user redo
800 * everything to get it back in sync), or just degrade the
801 * drive, which kicks off a resync?
802 */
803 do_write = 1;
804 if (sd->sd_disk->d_read_errs > g_raid_read_err_thresh) {
805 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
806 if (pbp->bio_children == 1)
807 do_write = 0;
808 }
809
810 /*
811 * Find the other disk, and try to do the I/O to it.
812 */
813 mask = (uintptr_t *)(&pbp->bio_driver2);
814 if (pbp->bio_children == 1) {
815 /* Save original subdisk. */
816 pbp->bio_driver1 = do_write ? sd : NULL;
817 *mask = 0;
818 }
819 *mask |= 1 << sd->sd_pos;
820 nsd = g_raid_tr_raid1_select_read_disk(vol, pbp, *mask);
821 if (nsd != NULL && (cbp = g_clone_bio(pbp)) != NULL) {
822 g_destroy_bio(bp);
823 G_RAID_LOGREQ(2, cbp, "Retrying read from %d",
824 nsd->sd_pos);
825 if (pbp->bio_children == 2 && do_write) {
826 sd->sd_recovery++;
827 cbp->bio_caller1 = nsd;
828 pbp->bio_pflags = G_RAID_BIO_FLAG_LOCKED;
829 /* Lock callback starts I/O */
830 g_raid_lock_range(sd->sd_volume,
831 cbp->bio_offset, cbp->bio_length, pbp, cbp);
832 } else {
833 g_raid_subdisk_iostart(nsd, cbp);
834 }
835 return;
836 }
837 /*
838 * We can't retry. Return the original error by falling
839 * through. This will happen when there's only one good disk.
840 * We don't need to fail the raid, since its actual state is
841 * based on the state of the subdisks.
842 */
843 G_RAID_LOGREQ(2, bp, "Couldn't retry read, failing it");
844 }
845 if (bp->bio_cmd == BIO_READ &&
846 bp->bio_error == 0 &&
847 pbp->bio_children > 1 &&
848 pbp->bio_driver1 != NULL) {
849 /*
850 * If it was a read, and bio_children is >1, then we just
851 * recovered the data from the second drive. We should try to
852 * write that data to the first drive if sector remapping is
853 * enabled. A write should put the data in a new place on the
854 * disk, remapping the bad sector. Do we need to do that by
855 * queueing a request to the main worker thread? It doesn't
856 * affect the return code of this current read, and can be
857 * done at our liesure. However, to make the code simpler, it
858 * is done syncrhonously.
859 */
860 G_RAID_LOGREQ(3, bp, "Recovered data from other drive");
861 cbp = g_clone_bio(pbp);
862 if (cbp != NULL) {
863 g_destroy_bio(bp);
864 cbp->bio_cmd = BIO_WRITE;
865 cbp->bio_cflags = G_RAID_BIO_FLAG_REMAP;
866 G_RAID_LOGREQ(2, cbp,
867 "Attempting bad sector remap on failing drive.");
868 g_raid_subdisk_iostart(pbp->bio_driver1, cbp);
869 return;
870 }
871 }
872 if (pbp->bio_pflags & G_RAID_BIO_FLAG_LOCKED) {
873 /*
874 * We're done with a recovery, mark the range as unlocked.
875 * For any write errors, we agressively fail the disk since
876 * there was both a READ and a WRITE error at this location.
877 * Both types of errors generally indicates the drive is on
878 * the verge of total failure anyway. Better to stop trusting
879 * it now. However, we need to reset error to 0 in that case
880 * because we're not failing the original I/O which succeeded.
881 */
882 if (bp->bio_cmd == BIO_WRITE && bp->bio_error) {
883 G_RAID_LOGREQ(0, bp, "Remap write failed: "
884 "failing subdisk.");
885 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
886 bp->bio_error = 0;
887 }
888 if (pbp->bio_driver1 != NULL) {
889 ((struct g_raid_subdisk *)pbp->bio_driver1)
890 ->sd_recovery--;
891 }
892 G_RAID_LOGREQ(2, bp, "REMAP done %d.", bp->bio_error);
893 g_raid_unlock_range(sd->sd_volume, bp->bio_offset,
894 bp->bio_length);
895 }
896 error = bp->bio_error;
897 g_destroy_bio(bp);
898 if (pbp->bio_children == pbp->bio_inbed) {
899 pbp->bio_completed = pbp->bio_length;
900 g_raid_iodone(pbp, error);
901 }
902}
903
904static int
905g_raid_tr_kerneldump_raid1(struct g_raid_tr_object *tr,
906 void *virtual, vm_offset_t physical, off_t offset, size_t length)
907{
908 struct g_raid_volume *vol;
909 struct g_raid_subdisk *sd;
910 int error, i, ok;
911
912 vol = tr->tro_volume;
913 error = 0;
914 ok = 0;
915 for (i = 0; i < vol->v_disks_count; i++) {
916 sd = &vol->v_subdisks[i];
917 switch (sd->sd_state) {
918 case G_RAID_SUBDISK_S_ACTIVE:
919 break;
920 case G_RAID_SUBDISK_S_REBUILD:
921 /*
922 * When rebuilding, only part of this subdisk is
923 * writable, the rest will be written as part of the
924 * that process.
925 */
926 if (offset >= sd->sd_rebuild_pos)
927 continue;
928 break;
929 case G_RAID_SUBDISK_S_STALE:
930 case G_RAID_SUBDISK_S_RESYNC:
931 /*
932 * Resyncing still writes on the theory that the
933 * resync'd disk is very close and writing it will
934 * keep it that way better if we keep up while
935 * resyncing.
936 */
937 break;
938 default:
939 continue;
940 }
941 error = g_raid_subdisk_kerneldump(sd,
942 virtual, physical, offset, length);
943 if (error == 0)
944 ok++;
945 }
946 return (ok > 0 ? 0 : error);
947}
948
949static int
950g_raid_tr_locked_raid1(struct g_raid_tr_object *tr, void *argp)
951{
952 struct bio *bp;
953 struct g_raid_subdisk *sd;
954
955 bp = (struct bio *)argp;
956 sd = (struct g_raid_subdisk *)bp->bio_caller1;
957 g_raid_subdisk_iostart(sd, bp);
958
959 return (0);
960}
961
962static int
963g_raid_tr_idle_raid1(struct g_raid_tr_object *tr)
964{
965 struct g_raid_tr_raid1_object *trs;
966
967 trs = (struct g_raid_tr_raid1_object *)tr;
968 trs->trso_fair_io = g_raid1_rebuild_fair_io;
969 trs->trso_recover_slabs = g_raid1_rebuild_cluster_idle;
970 if (trs->trso_type == TR_RAID1_REBUILD)
971 g_raid_tr_raid1_rebuild_some(tr);
972 return (0);
973}
974
975static int
976g_raid_tr_free_raid1(struct g_raid_tr_object *tr)
977{
978 struct g_raid_tr_raid1_object *trs;
979
980 trs = (struct g_raid_tr_raid1_object *)tr;
981
982 if (trs->trso_buffer != NULL) {
983 free(trs->trso_buffer, M_TR_RAID1);
984 trs->trso_buffer = NULL;
985 }
986 return (0);
987}
988
989G_RAID_TR_DECLARE(g_raid_tr_raid1);