g_mirror.h revision 157630
1/*- 2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/geom/mirror/g_mirror.h 157630 2006-04-10 10:32:22Z pjd $ 27 */ 28 29#ifndef _G_MIRROR_H_ 30#define _G_MIRROR_H_ 31 32#include <sys/endian.h> 33#include <sys/md5.h> 34 35#define G_MIRROR_CLASS_NAME "MIRROR" 36 37#define G_MIRROR_MAGIC "GEOM::MIRROR" 38/* 39 * Version history: 40 * 0 - Initial version number. 41 * 1 - Added 'prefer' balance algorithm. 42 * 2 - Added md_genid field to metadata. 43 * 3 - Added md_provsize field to metadata. 44 */ 45#define G_MIRROR_VERSION 3 46 47#define G_MIRROR_BALANCE_NONE 0 48#define G_MIRROR_BALANCE_ROUND_ROBIN 1 49#define G_MIRROR_BALANCE_LOAD 2 50#define G_MIRROR_BALANCE_SPLIT 3 51#define G_MIRROR_BALANCE_PREFER 4 52#define G_MIRROR_BALANCE_MIN G_MIRROR_BALANCE_NONE 53#define G_MIRROR_BALANCE_MAX G_MIRROR_BALANCE_PREFER 54 55#define G_MIRROR_DISK_FLAG_DIRTY 0x0000000000000001ULL 56#define G_MIRROR_DISK_FLAG_SYNCHRONIZING 0x0000000000000002ULL 57#define G_MIRROR_DISK_FLAG_FORCE_SYNC 0x0000000000000004ULL 58#define G_MIRROR_DISK_FLAG_INACTIVE 0x0000000000000008ULL 59#define G_MIRROR_DISK_FLAG_HARDCODED 0x0000000000000010ULL 60#define G_MIRROR_DISK_FLAG_BROKEN 0x0000000000000020ULL 61#define G_MIRROR_DISK_FLAG_MASK (G_MIRROR_DISK_FLAG_DIRTY | \ 62 G_MIRROR_DISK_FLAG_SYNCHRONIZING | \ 63 G_MIRROR_DISK_FLAG_FORCE_SYNC | \ 64 G_MIRROR_DISK_FLAG_INACTIVE) 65 66#define G_MIRROR_DEVICE_FLAG_NOAUTOSYNC 0x0000000000000001ULL 67#define G_MIRROR_DEVICE_FLAG_MASK (G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) 68 69#ifdef _KERNEL 70extern u_int g_mirror_debug; 71 72#define G_MIRROR_DEBUG(lvl, ...) do { \ 73 if (g_mirror_debug >= (lvl)) { \ 74 printf("GEOM_MIRROR"); \ 75 if (g_mirror_debug > 0) \ 76 printf("[%u]", lvl); \ 77 printf(": "); \ 78 printf(__VA_ARGS__); \ 79 printf("\n"); \ 80 } \ 81} while (0) 82#define G_MIRROR_LOGREQ(lvl, bp, ...) do { \ 83 if (g_mirror_debug >= (lvl)) { \ 84 printf("GEOM_MIRROR"); \ 85 if (g_mirror_debug > 0) \ 86 printf("[%u]", lvl); \ 87 printf(": "); \ 88 printf(__VA_ARGS__); \ 89 printf(" "); \ 90 g_print_bio(bp); \ 91 printf("\n"); \ 92 } \ 93} while (0) 94 95#define G_MIRROR_BIO_FLAG_REGULAR 0x01 96#define G_MIRROR_BIO_FLAG_SYNC 0x02 97 98/* 99 * Informations needed for synchronization. 100 */ 101struct g_mirror_disk_sync { 102 struct g_consumer *ds_consumer; /* Consumer connected to our mirror. */ 103 off_t ds_offset; /* Offset of next request to send. */ 104 off_t ds_offset_done; /* Offset of already synchronized 105 region. */ 106 u_int ds_syncid; /* Disk's synchronization ID. */ 107 u_int ds_inflight; /* Number of in-flight sync requests. */ 108 struct bio **ds_bios; /* BIOs for synchronization I/O. */ 109}; 110 111/* 112 * Informations needed for synchronization. 113 */ 114struct g_mirror_device_sync { 115 struct g_geom *ds_geom; /* Synchronization geom. */ 116 u_int ds_ndisks; /* Number of disks in SYNCHRONIZING 117 state. */ 118}; 119 120#define G_MIRROR_DISK_STATE_NONE 0 121#define G_MIRROR_DISK_STATE_NEW 1 122#define G_MIRROR_DISK_STATE_ACTIVE 2 123#define G_MIRROR_DISK_STATE_STALE 3 124#define G_MIRROR_DISK_STATE_SYNCHRONIZING 4 125#define G_MIRROR_DISK_STATE_DISCONNECTED 5 126#define G_MIRROR_DISK_STATE_DESTROY 6 127struct g_mirror_disk { 128 uint32_t d_id; /* Disk ID. */ 129 struct g_consumer *d_consumer; /* Consumer. */ 130 struct g_mirror_softc *d_softc; /* Back-pointer to softc. */ 131 int d_state; /* Disk state. */ 132 u_int d_priority; /* Disk priority. */ 133 struct bintime d_delay; /* Disk delay. */ 134 struct bintime d_last_used; /* When disk was last used. */ 135 uint64_t d_flags; /* Additional flags. */ 136 u_int d_genid; /* Disk's generation ID. */ 137 struct g_mirror_disk_sync d_sync;/* Sync information. */ 138 LIST_ENTRY(g_mirror_disk) d_next; 139}; 140#define d_name d_consumer->provider->name 141 142#define G_MIRROR_EVENT_DONTWAIT 0x1 143#define G_MIRROR_EVENT_WAIT 0x2 144#define G_MIRROR_EVENT_DEVICE 0x4 145#define G_MIRROR_EVENT_DONE 0x8 146struct g_mirror_event { 147 struct g_mirror_disk *e_disk; 148 int e_state; 149 int e_flags; 150 int e_error; 151 TAILQ_ENTRY(g_mirror_event) e_next; 152}; 153 154#define G_MIRROR_DEVICE_FLAG_DESTROY 0x0100000000000000ULL 155#define G_MIRROR_DEVICE_FLAG_WAIT 0x0200000000000000ULL 156#define G_MIRROR_DEVICE_FLAG_DESTROYING 0x0400000000000000ULL 157 158#define G_MIRROR_DEVICE_STATE_STARTING 0 159#define G_MIRROR_DEVICE_STATE_RUNNING 1 160 161/* Bump syncid on first write. */ 162#define G_MIRROR_BUMP_SYNCID 0x1 163/* Bump genid immediately. */ 164#define G_MIRROR_BUMP_GENID 0x2 165struct g_mirror_softc { 166 u_int sc_state; /* Device state. */ 167 uint32_t sc_slice; /* Slice size. */ 168 uint8_t sc_balance; /* Balance algorithm. */ 169 uint64_t sc_mediasize; /* Device size. */ 170 uint32_t sc_sectorsize; /* Sector size. */ 171 uint64_t sc_flags; /* Additional flags. */ 172 173 struct g_geom *sc_geom; 174 struct g_provider *sc_provider; 175 176 uint32_t sc_id; /* Mirror unique ID. */ 177 178 struct sx sc_lock; 179 struct bio_queue_head sc_queue; 180 struct mtx sc_queue_mtx; 181 struct proc *sc_worker; 182 struct bio_queue_head sc_regular_delayed; /* Delayed I/O requests due 183 collision with sync 184 requests. */ 185 struct bio_queue_head sc_inflight; /* In-flight regular write 186 requests. */ 187 struct bio_queue_head sc_sync_delayed; /* Delayed sync requests due 188 collision with regular 189 requests. */ 190 191 LIST_HEAD(, g_mirror_disk) sc_disks; 192 u_int sc_ndisks; /* Number of disks. */ 193 struct g_mirror_disk *sc_hint; 194 195 u_int sc_genid; /* Generation ID. */ 196 u_int sc_syncid; /* Synchronization ID. */ 197 int sc_bump_id; 198 struct g_mirror_device_sync sc_sync; 199 int sc_idle; /* DIRTY flags removed. */ 200 time_t sc_last_write; 201 u_int sc_writes; 202 203 TAILQ_HEAD(, g_mirror_event) sc_events; 204 struct mtx sc_events_mtx; 205 206 struct callout sc_callout; 207 208 struct root_hold_token *sc_rootmount; 209}; 210#define sc_name sc_geom->name 211 212u_int g_mirror_ndisks(struct g_mirror_softc *sc, int state); 213#define G_MIRROR_DESTROY_SOFT 0 214#define G_MIRROR_DESTROY_DELAYED 1 215#define G_MIRROR_DESTROY_HARD 2 216int g_mirror_destroy(struct g_mirror_softc *sc, int how); 217int g_mirror_event_send(void *arg, int state, int flags); 218struct g_mirror_metadata; 219int g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp, 220 struct g_mirror_metadata *md); 221int g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md); 222void g_mirror_fill_metadata(struct g_mirror_softc *sc, 223 struct g_mirror_disk *disk, struct g_mirror_metadata *md); 224void g_mirror_update_metadata(struct g_mirror_disk *disk); 225 226g_ctl_req_t g_mirror_config; 227#endif /* _KERNEL */ 228 229struct g_mirror_metadata { 230 char md_magic[16]; /* Magic value. */ 231 uint32_t md_version; /* Version number. */ 232 char md_name[16]; /* Mirror name. */ 233 uint32_t md_mid; /* Mirror unique ID. */ 234 uint32_t md_did; /* Disk unique ID. */ 235 uint8_t md_all; /* Number of disks in mirror. */ 236 uint32_t md_genid; /* Generation ID. */ 237 uint32_t md_syncid; /* Synchronization ID. */ 238 uint8_t md_priority; /* Disk priority. */ 239 uint32_t md_slice; /* Slice size. */ 240 uint8_t md_balance; /* Balance type. */ 241 uint64_t md_mediasize; /* Size of the smallest 242 disk in mirror. */ 243 uint32_t md_sectorsize; /* Sector size. */ 244 uint64_t md_sync_offset; /* Synchronized offset. */ 245 uint64_t md_mflags; /* Additional mirror flags. */ 246 uint64_t md_dflags; /* Additional disk flags. */ 247 char md_provider[16]; /* Hardcoded provider. */ 248 uint64_t md_provsize; /* Provider's size. */ 249 u_char md_hash[16]; /* MD5 hash. */ 250}; 251static __inline void 252mirror_metadata_encode(struct g_mirror_metadata *md, u_char *data) 253{ 254 MD5_CTX ctx; 255 256 bcopy(md->md_magic, data, 16); 257 le32enc(data + 16, md->md_version); 258 bcopy(md->md_name, data + 20, 16); 259 le32enc(data + 36, md->md_mid); 260 le32enc(data + 40, md->md_did); 261 *(data + 44) = md->md_all; 262 le32enc(data + 45, md->md_genid); 263 le32enc(data + 49, md->md_syncid); 264 *(data + 53) = md->md_priority; 265 le32enc(data + 54, md->md_slice); 266 *(data + 58) = md->md_balance; 267 le64enc(data + 59, md->md_mediasize); 268 le32enc(data + 67, md->md_sectorsize); 269 le64enc(data + 71, md->md_sync_offset); 270 le64enc(data + 79, md->md_mflags); 271 le64enc(data + 87, md->md_dflags); 272 bcopy(md->md_provider, data + 95, 16); 273 le64enc(data + 111, md->md_provsize); 274 MD5Init(&ctx); 275 MD5Update(&ctx, data, 119); 276 MD5Final(md->md_hash, &ctx); 277 bcopy(md->md_hash, data + 119, 16); 278} 279static __inline int 280mirror_metadata_decode_v0v1(const u_char *data, struct g_mirror_metadata *md) 281{ 282 MD5_CTX ctx; 283 284 bcopy(data + 20, md->md_name, 16); 285 md->md_mid = le32dec(data + 36); 286 md->md_did = le32dec(data + 40); 287 md->md_all = *(data + 44); 288 md->md_syncid = le32dec(data + 45); 289 md->md_priority = *(data + 49); 290 md->md_slice = le32dec(data + 50); 291 md->md_balance = *(data + 54); 292 md->md_mediasize = le64dec(data + 55); 293 md->md_sectorsize = le32dec(data + 63); 294 md->md_sync_offset = le64dec(data + 67); 295 md->md_mflags = le64dec(data + 75); 296 md->md_dflags = le64dec(data + 83); 297 bcopy(data + 91, md->md_provider, 16); 298 bcopy(data + 107, md->md_hash, 16); 299 MD5Init(&ctx); 300 MD5Update(&ctx, data, 107); 301 MD5Final(md->md_hash, &ctx); 302 if (bcmp(md->md_hash, data + 107, 16) != 0) 303 return (EINVAL); 304 305 /* New fields. */ 306 md->md_genid = 0; 307 md->md_provsize = 0; 308 309 return (0); 310} 311static __inline int 312mirror_metadata_decode_v2(const u_char *data, struct g_mirror_metadata *md) 313{ 314 MD5_CTX ctx; 315 316 bcopy(data + 20, md->md_name, 16); 317 md->md_mid = le32dec(data + 36); 318 md->md_did = le32dec(data + 40); 319 md->md_all = *(data + 44); 320 md->md_genid = le32dec(data + 45); 321 md->md_syncid = le32dec(data + 49); 322 md->md_priority = *(data + 53); 323 md->md_slice = le32dec(data + 54); 324 md->md_balance = *(data + 58); 325 md->md_mediasize = le64dec(data + 59); 326 md->md_sectorsize = le32dec(data + 67); 327 md->md_sync_offset = le64dec(data + 71); 328 md->md_mflags = le64dec(data + 79); 329 md->md_dflags = le64dec(data + 87); 330 bcopy(data + 95, md->md_provider, 16); 331 bcopy(data + 111, md->md_hash, 16); 332 MD5Init(&ctx); 333 MD5Update(&ctx, data, 111); 334 MD5Final(md->md_hash, &ctx); 335 if (bcmp(md->md_hash, data + 111, 16) != 0) 336 return (EINVAL); 337 338 /* New fields. */ 339 md->md_provsize = 0; 340 341 return (0); 342} 343static __inline int 344mirror_metadata_decode_v3(const u_char *data, struct g_mirror_metadata *md) 345{ 346 MD5_CTX ctx; 347 348 bcopy(data + 20, md->md_name, 16); 349 md->md_mid = le32dec(data + 36); 350 md->md_did = le32dec(data + 40); 351 md->md_all = *(data + 44); 352 md->md_genid = le32dec(data + 45); 353 md->md_syncid = le32dec(data + 49); 354 md->md_priority = *(data + 53); 355 md->md_slice = le32dec(data + 54); 356 md->md_balance = *(data + 58); 357 md->md_mediasize = le64dec(data + 59); 358 md->md_sectorsize = le32dec(data + 67); 359 md->md_sync_offset = le64dec(data + 71); 360 md->md_mflags = le64dec(data + 79); 361 md->md_dflags = le64dec(data + 87); 362 bcopy(data + 95, md->md_provider, 16); 363 md->md_provsize = le64dec(data + 111); 364 bcopy(data + 119, md->md_hash, 16); 365 MD5Init(&ctx); 366 MD5Update(&ctx, data, 119); 367 MD5Final(md->md_hash, &ctx); 368 if (bcmp(md->md_hash, data + 119, 16) != 0) 369 return (EINVAL); 370 return (0); 371} 372static __inline int 373mirror_metadata_decode(const u_char *data, struct g_mirror_metadata *md) 374{ 375 int error; 376 377 bcopy(data, md->md_magic, 16); 378 md->md_version = le32dec(data + 16); 379 switch (md->md_version) { 380 case 0: 381 case 1: 382 error = mirror_metadata_decode_v0v1(data, md); 383 break; 384 case 2: 385 error = mirror_metadata_decode_v2(data, md); 386 break; 387 case 3: 388 error = mirror_metadata_decode_v3(data, md); 389 break; 390 default: 391 error = EINVAL; 392 break; 393 } 394 return (error); 395} 396 397static __inline const char * 398balance_name(u_int balance) 399{ 400 static const char *algorithms[] = { 401 [G_MIRROR_BALANCE_NONE] = "none", 402 [G_MIRROR_BALANCE_ROUND_ROBIN] = "round-robin", 403 [G_MIRROR_BALANCE_LOAD] = "load", 404 [G_MIRROR_BALANCE_SPLIT] = "split", 405 [G_MIRROR_BALANCE_PREFER] = "prefer", 406 [G_MIRROR_BALANCE_MAX + 1] = "unknown" 407 }; 408 409 if (balance > G_MIRROR_BALANCE_MAX) 410 balance = G_MIRROR_BALANCE_MAX + 1; 411 412 return (algorithms[balance]); 413} 414 415static __inline int 416balance_id(const char *name) 417{ 418 static const char *algorithms[] = { 419 [G_MIRROR_BALANCE_NONE] = "none", 420 [G_MIRROR_BALANCE_ROUND_ROBIN] = "round-robin", 421 [G_MIRROR_BALANCE_LOAD] = "load", 422 [G_MIRROR_BALANCE_SPLIT] = "split", 423 [G_MIRROR_BALANCE_PREFER] = "prefer" 424 }; 425 int n; 426 427 for (n = G_MIRROR_BALANCE_MIN; n <= G_MIRROR_BALANCE_MAX; n++) { 428 if (strcmp(name, algorithms[n]) == 0) 429 return (n); 430 } 431 return (-1); 432} 433 434static __inline void 435mirror_metadata_dump(const struct g_mirror_metadata *md) 436{ 437 static const char hex[] = "0123456789abcdef"; 438 char hash[16 * 2 + 1]; 439 u_int i; 440 441 printf(" magic: %s\n", md->md_magic); 442 printf(" version: %u\n", (u_int)md->md_version); 443 printf(" name: %s\n", md->md_name); 444 printf(" mid: %u\n", (u_int)md->md_mid); 445 printf(" did: %u\n", (u_int)md->md_did); 446 printf(" all: %u\n", (u_int)md->md_all); 447 printf(" genid: %u\n", (u_int)md->md_genid); 448 printf(" syncid: %u\n", (u_int)md->md_syncid); 449 printf(" priority: %u\n", (u_int)md->md_priority); 450 printf(" slice: %u\n", (u_int)md->md_slice); 451 printf(" balance: %s\n", balance_name((u_int)md->md_balance)); 452 printf(" mediasize: %jd\n", (intmax_t)md->md_mediasize); 453 printf("sectorsize: %u\n", (u_int)md->md_sectorsize); 454 printf("syncoffset: %jd\n", (intmax_t)md->md_sync_offset); 455 printf(" mflags:"); 456 if (md->md_mflags == 0) 457 printf(" NONE"); 458 else { 459 if ((md->md_mflags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0) 460 printf(" NOAUTOSYNC"); 461 } 462 printf("\n"); 463 printf(" dflags:"); 464 if (md->md_dflags == 0) 465 printf(" NONE"); 466 else { 467 if ((md->md_dflags & G_MIRROR_DISK_FLAG_DIRTY) != 0) 468 printf(" DIRTY"); 469 if ((md->md_dflags & G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) 470 printf(" SYNCHRONIZING"); 471 if ((md->md_dflags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) 472 printf(" FORCE_SYNC"); 473 if ((md->md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) 474 printf(" INACTIVE"); 475 } 476 printf("\n"); 477 printf("hcprovider: %s\n", md->md_provider); 478 printf(" provsize: %ju\n", (uintmax_t)md->md_provsize); 479 bzero(hash, sizeof(hash)); 480 for (i = 0; i < 16; i++) { 481 hash[i * 2] = hex[md->md_hash[i] >> 4]; 482 hash[i * 2 + 1] = hex[md->md_hash[i] & 0x0f]; 483 } 484 printf(" MD5 hash: %s\n", hash); 485} 486#endif /* !_G_MIRROR_H_ */ 487