primary.c revision 211977
1/*- 2 * Copyright (c) 2009 The FreeBSD Foundation 3 * Copyright (c) 2010 Pawel Jakub Dawidek <pjd@FreeBSD.org> 4 * All rights reserved. 5 * 6 * This software was developed by Pawel Jakub Dawidek under sponsorship from 7 * the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31#include <sys/cdefs.h> 32__FBSDID("$FreeBSD: head/sbin/hastd/primary.c 211977 2010-08-29 21:41:53Z pjd $"); 33 34#include <sys/types.h> 35#include <sys/time.h> 36#include <sys/bio.h> 37#include <sys/disk.h> 38#include <sys/refcount.h> 39#include <sys/stat.h> 40 41#include <geom/gate/g_gate.h> 42 43#include <assert.h> 44#include <err.h> 45#include <errno.h> 46#include <fcntl.h> 47#include <libgeom.h> 48#include <pthread.h> 49#include <stdint.h> 50#include <stdio.h> 51#include <string.h> 52#include <sysexits.h> 53#include <unistd.h> 54 55#include <activemap.h> 56#include <nv.h> 57#include <rangelock.h> 58 59#include "control.h" 60#include "hast.h" 61#include "hast_proto.h" 62#include "hastd.h" 63#include "hooks.h" 64#include "metadata.h" 65#include "proto.h" 66#include "pjdlog.h" 67#include "subr.h" 68#include "synch.h" 69 70/* The is only one remote component for now. */ 71#define ISREMOTE(no) ((no) == 1) 72 73struct hio { 74 /* 75 * Number of components we are still waiting for. 76 * When this field goes to 0, we can send the request back to the 77 * kernel. Each component has to decrease this counter by one 78 * even on failure. 79 */ 80 unsigned int hio_countdown; 81 /* 82 * Each component has a place to store its own error. 83 * Once the request is handled by all components we can decide if the 84 * request overall is successful or not. 85 */ 86 int *hio_errors; 87 /* 88 * Structure used to comunicate with GEOM Gate class. 89 */ 90 struct g_gate_ctl_io hio_ggio; 91 TAILQ_ENTRY(hio) *hio_next; 92}; 93#define hio_free_next hio_next[0] 94#define hio_done_next hio_next[0] 95 96/* 97 * Free list holds unused structures. When free list is empty, we have to wait 98 * until some in-progress requests are freed. 99 */ 100static TAILQ_HEAD(, hio) hio_free_list; 101static pthread_mutex_t hio_free_list_lock; 102static pthread_cond_t hio_free_list_cond; 103/* 104 * There is one send list for every component. One requests is placed on all 105 * send lists - each component gets the same request, but each component is 106 * responsible for managing his own send list. 107 */ 108static TAILQ_HEAD(, hio) *hio_send_list; 109static pthread_mutex_t *hio_send_list_lock; 110static pthread_cond_t *hio_send_list_cond; 111/* 112 * There is one recv list for every component, although local components don't 113 * use recv lists as local requests are done synchronously. 114 */ 115static TAILQ_HEAD(, hio) *hio_recv_list; 116static pthread_mutex_t *hio_recv_list_lock; 117static pthread_cond_t *hio_recv_list_cond; 118/* 119 * Request is placed on done list by the slowest component (the one that 120 * decreased hio_countdown from 1 to 0). 121 */ 122static TAILQ_HEAD(, hio) hio_done_list; 123static pthread_mutex_t hio_done_list_lock; 124static pthread_cond_t hio_done_list_cond; 125/* 126 * Structure below are for interaction with sync thread. 127 */ 128static bool sync_inprogress; 129static pthread_mutex_t sync_lock; 130static pthread_cond_t sync_cond; 131/* 132 * The lock below allows to synchornize access to remote connections. 133 */ 134static pthread_rwlock_t *hio_remote_lock; 135static pthread_mutex_t hio_guard_lock; 136static pthread_cond_t hio_guard_cond; 137 138/* 139 * Lock to synchronize metadata updates. Also synchronize access to 140 * hr_primary_localcnt and hr_primary_remotecnt fields. 141 */ 142static pthread_mutex_t metadata_lock; 143 144/* 145 * Maximum number of outstanding I/O requests. 146 */ 147#define HAST_HIO_MAX 256 148/* 149 * Number of components. At this point there are only two components: local 150 * and remote, but in the future it might be possible to use multiple local 151 * and remote components. 152 */ 153#define HAST_NCOMPONENTS 2 154/* 155 * Number of seconds to sleep between keepalive packets. 156 */ 157#define KEEPALIVE_SLEEP 10 158/* 159 * Number of seconds to sleep between reconnect retries. 160 */ 161#define RECONNECT_SLEEP 5 162 163#define ISCONNECTED(res, no) \ 164 ((res)->hr_remotein != NULL && (res)->hr_remoteout != NULL) 165 166#define QUEUE_INSERT1(hio, name, ncomp) do { \ 167 bool _wakeup; \ 168 \ 169 mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 170 _wakeup = TAILQ_EMPTY(&hio_##name##_list[(ncomp)]); \ 171 TAILQ_INSERT_TAIL(&hio_##name##_list[(ncomp)], (hio), \ 172 hio_next[(ncomp)]); \ 173 mtx_unlock(&hio_##name##_list_lock[ncomp]); \ 174 if (_wakeup) \ 175 cv_signal(&hio_##name##_list_cond[(ncomp)]); \ 176} while (0) 177#define QUEUE_INSERT2(hio, name) do { \ 178 bool _wakeup; \ 179 \ 180 mtx_lock(&hio_##name##_list_lock); \ 181 _wakeup = TAILQ_EMPTY(&hio_##name##_list); \ 182 TAILQ_INSERT_TAIL(&hio_##name##_list, (hio), hio_##name##_next);\ 183 mtx_unlock(&hio_##name##_list_lock); \ 184 if (_wakeup) \ 185 cv_signal(&hio_##name##_list_cond); \ 186} while (0) 187#define QUEUE_TAKE1(hio, name, ncomp) do { \ 188 mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 189 while (((hio) = TAILQ_FIRST(&hio_##name##_list[(ncomp)])) == NULL) { \ 190 cv_wait(&hio_##name##_list_cond[(ncomp)], \ 191 &hio_##name##_list_lock[(ncomp)]); \ 192 } \ 193 TAILQ_REMOVE(&hio_##name##_list[(ncomp)], (hio), \ 194 hio_next[(ncomp)]); \ 195 mtx_unlock(&hio_##name##_list_lock[(ncomp)]); \ 196} while (0) 197#define QUEUE_TAKE2(hio, name) do { \ 198 mtx_lock(&hio_##name##_list_lock); \ 199 while (((hio) = TAILQ_FIRST(&hio_##name##_list)) == NULL) { \ 200 cv_wait(&hio_##name##_list_cond, \ 201 &hio_##name##_list_lock); \ 202 } \ 203 TAILQ_REMOVE(&hio_##name##_list, (hio), hio_##name##_next); \ 204 mtx_unlock(&hio_##name##_list_lock); \ 205} while (0) 206 207#define SYNCREQ(hio) do { \ 208 (hio)->hio_ggio.gctl_unit = -1; \ 209 (hio)->hio_ggio.gctl_seq = 1; \ 210} while (0) 211#define ISSYNCREQ(hio) ((hio)->hio_ggio.gctl_unit == -1) 212#define SYNCREQDONE(hio) do { (hio)->hio_ggio.gctl_unit = -2; } while (0) 213#define ISSYNCREQDONE(hio) ((hio)->hio_ggio.gctl_unit == -2) 214 215static struct hast_resource *gres; 216 217static pthread_mutex_t range_lock; 218static struct rangelocks *range_regular; 219static bool range_regular_wait; 220static pthread_cond_t range_regular_cond; 221static struct rangelocks *range_sync; 222static bool range_sync_wait; 223static pthread_cond_t range_sync_cond; 224 225static void *ggate_recv_thread(void *arg); 226static void *local_send_thread(void *arg); 227static void *remote_send_thread(void *arg); 228static void *remote_recv_thread(void *arg); 229static void *ggate_send_thread(void *arg); 230static void *sync_thread(void *arg); 231static void *guard_thread(void *arg); 232 233static void sighandler(int sig); 234 235static void 236cleanup(struct hast_resource *res) 237{ 238 int rerrno; 239 240 /* Remember errno. */ 241 rerrno = errno; 242 243 /* 244 * Close descriptor to /dev/hast/<name> 245 * to work-around race in the kernel. 246 */ 247 close(res->hr_localfd); 248 249 /* Destroy ggate provider if we created one. */ 250 if (res->hr_ggateunit >= 0) { 251 struct g_gate_ctl_destroy ggiod; 252 253 ggiod.gctl_version = G_GATE_VERSION; 254 ggiod.gctl_unit = res->hr_ggateunit; 255 ggiod.gctl_force = 1; 256 if (ioctl(res->hr_ggatefd, G_GATE_CMD_DESTROY, &ggiod) < 0) { 257 pjdlog_warning("Unable to destroy hast/%s device", 258 res->hr_provname); 259 } 260 res->hr_ggateunit = -1; 261 } 262 263 /* Restore errno. */ 264 errno = rerrno; 265} 266 267static void 268primary_exit(int exitcode, const char *fmt, ...) 269{ 270 va_list ap; 271 272 assert(exitcode != EX_OK); 273 va_start(ap, fmt); 274 pjdlogv_errno(LOG_ERR, fmt, ap); 275 va_end(ap); 276 cleanup(gres); 277 exit(exitcode); 278} 279 280static void 281primary_exitx(int exitcode, const char *fmt, ...) 282{ 283 va_list ap; 284 285 va_start(ap, fmt); 286 pjdlogv(exitcode == EX_OK ? LOG_INFO : LOG_ERR, fmt, ap); 287 va_end(ap); 288 cleanup(gres); 289 exit(exitcode); 290} 291 292static int 293hast_activemap_flush(struct hast_resource *res) 294{ 295 const unsigned char *buf; 296 size_t size; 297 298 buf = activemap_bitmap(res->hr_amp, &size); 299 assert(buf != NULL); 300 assert((size % res->hr_local_sectorsize) == 0); 301 if (pwrite(res->hr_localfd, buf, size, METADATA_SIZE) != 302 (ssize_t)size) { 303 KEEP_ERRNO(pjdlog_errno(LOG_ERR, 304 "Unable to flush activemap to disk")); 305 return (-1); 306 } 307 return (0); 308} 309 310static bool 311real_remote(const struct hast_resource *res) 312{ 313 314 return (strcmp(res->hr_remoteaddr, "none") != 0); 315} 316 317static void 318init_environment(struct hast_resource *res __unused) 319{ 320 struct hio *hio; 321 unsigned int ii, ncomps; 322 323 /* 324 * In the future it might be per-resource value. 325 */ 326 ncomps = HAST_NCOMPONENTS; 327 328 /* 329 * Allocate memory needed by lists. 330 */ 331 hio_send_list = malloc(sizeof(hio_send_list[0]) * ncomps); 332 if (hio_send_list == NULL) { 333 primary_exitx(EX_TEMPFAIL, 334 "Unable to allocate %zu bytes of memory for send lists.", 335 sizeof(hio_send_list[0]) * ncomps); 336 } 337 hio_send_list_lock = malloc(sizeof(hio_send_list_lock[0]) * ncomps); 338 if (hio_send_list_lock == NULL) { 339 primary_exitx(EX_TEMPFAIL, 340 "Unable to allocate %zu bytes of memory for send list locks.", 341 sizeof(hio_send_list_lock[0]) * ncomps); 342 } 343 hio_send_list_cond = malloc(sizeof(hio_send_list_cond[0]) * ncomps); 344 if (hio_send_list_cond == NULL) { 345 primary_exitx(EX_TEMPFAIL, 346 "Unable to allocate %zu bytes of memory for send list condition variables.", 347 sizeof(hio_send_list_cond[0]) * ncomps); 348 } 349 hio_recv_list = malloc(sizeof(hio_recv_list[0]) * ncomps); 350 if (hio_recv_list == NULL) { 351 primary_exitx(EX_TEMPFAIL, 352 "Unable to allocate %zu bytes of memory for recv lists.", 353 sizeof(hio_recv_list[0]) * ncomps); 354 } 355 hio_recv_list_lock = malloc(sizeof(hio_recv_list_lock[0]) * ncomps); 356 if (hio_recv_list_lock == NULL) { 357 primary_exitx(EX_TEMPFAIL, 358 "Unable to allocate %zu bytes of memory for recv list locks.", 359 sizeof(hio_recv_list_lock[0]) * ncomps); 360 } 361 hio_recv_list_cond = malloc(sizeof(hio_recv_list_cond[0]) * ncomps); 362 if (hio_recv_list_cond == NULL) { 363 primary_exitx(EX_TEMPFAIL, 364 "Unable to allocate %zu bytes of memory for recv list condition variables.", 365 sizeof(hio_recv_list_cond[0]) * ncomps); 366 } 367 hio_remote_lock = malloc(sizeof(hio_remote_lock[0]) * ncomps); 368 if (hio_remote_lock == NULL) { 369 primary_exitx(EX_TEMPFAIL, 370 "Unable to allocate %zu bytes of memory for remote connections locks.", 371 sizeof(hio_remote_lock[0]) * ncomps); 372 } 373 374 /* 375 * Initialize lists, their locks and theirs condition variables. 376 */ 377 TAILQ_INIT(&hio_free_list); 378 mtx_init(&hio_free_list_lock); 379 cv_init(&hio_free_list_cond); 380 for (ii = 0; ii < HAST_NCOMPONENTS; ii++) { 381 TAILQ_INIT(&hio_send_list[ii]); 382 mtx_init(&hio_send_list_lock[ii]); 383 cv_init(&hio_send_list_cond[ii]); 384 TAILQ_INIT(&hio_recv_list[ii]); 385 mtx_init(&hio_recv_list_lock[ii]); 386 cv_init(&hio_recv_list_cond[ii]); 387 rw_init(&hio_remote_lock[ii]); 388 } 389 TAILQ_INIT(&hio_done_list); 390 mtx_init(&hio_done_list_lock); 391 cv_init(&hio_done_list_cond); 392 mtx_init(&hio_guard_lock); 393 cv_init(&hio_guard_cond); 394 mtx_init(&metadata_lock); 395 396 /* 397 * Allocate requests pool and initialize requests. 398 */ 399 for (ii = 0; ii < HAST_HIO_MAX; ii++) { 400 hio = malloc(sizeof(*hio)); 401 if (hio == NULL) { 402 primary_exitx(EX_TEMPFAIL, 403 "Unable to allocate %zu bytes of memory for hio request.", 404 sizeof(*hio)); 405 } 406 hio->hio_countdown = 0; 407 hio->hio_errors = malloc(sizeof(hio->hio_errors[0]) * ncomps); 408 if (hio->hio_errors == NULL) { 409 primary_exitx(EX_TEMPFAIL, 410 "Unable allocate %zu bytes of memory for hio errors.", 411 sizeof(hio->hio_errors[0]) * ncomps); 412 } 413 hio->hio_next = malloc(sizeof(hio->hio_next[0]) * ncomps); 414 if (hio->hio_next == NULL) { 415 primary_exitx(EX_TEMPFAIL, 416 "Unable allocate %zu bytes of memory for hio_next field.", 417 sizeof(hio->hio_next[0]) * ncomps); 418 } 419 hio->hio_ggio.gctl_version = G_GATE_VERSION; 420 hio->hio_ggio.gctl_data = malloc(MAXPHYS); 421 if (hio->hio_ggio.gctl_data == NULL) { 422 primary_exitx(EX_TEMPFAIL, 423 "Unable to allocate %zu bytes of memory for gctl_data.", 424 MAXPHYS); 425 } 426 hio->hio_ggio.gctl_length = MAXPHYS; 427 hio->hio_ggio.gctl_error = 0; 428 TAILQ_INSERT_HEAD(&hio_free_list, hio, hio_free_next); 429 } 430 431 /* 432 * Turn on signals handling. 433 */ 434 signal(SIGINT, sighandler); 435 signal(SIGTERM, sighandler); 436 signal(SIGHUP, sighandler); 437 signal(SIGCHLD, sighandler); 438} 439 440static void 441init_local(struct hast_resource *res) 442{ 443 unsigned char *buf; 444 size_t mapsize; 445 446 if (metadata_read(res, true) < 0) 447 exit(EX_NOINPUT); 448 mtx_init(&res->hr_amp_lock); 449 if (activemap_init(&res->hr_amp, res->hr_datasize, res->hr_extentsize, 450 res->hr_local_sectorsize, res->hr_keepdirty) < 0) { 451 primary_exit(EX_TEMPFAIL, "Unable to create activemap"); 452 } 453 mtx_init(&range_lock); 454 cv_init(&range_regular_cond); 455 if (rangelock_init(&range_regular) < 0) 456 primary_exit(EX_TEMPFAIL, "Unable to create regular range lock"); 457 cv_init(&range_sync_cond); 458 if (rangelock_init(&range_sync) < 0) 459 primary_exit(EX_TEMPFAIL, "Unable to create sync range lock"); 460 mapsize = activemap_ondisk_size(res->hr_amp); 461 buf = calloc(1, mapsize); 462 if (buf == NULL) { 463 primary_exitx(EX_TEMPFAIL, 464 "Unable to allocate buffer for activemap."); 465 } 466 if (pread(res->hr_localfd, buf, mapsize, METADATA_SIZE) != 467 (ssize_t)mapsize) { 468 primary_exit(EX_NOINPUT, "Unable to read activemap"); 469 } 470 activemap_copyin(res->hr_amp, buf, mapsize); 471 free(buf); 472 if (res->hr_resuid != 0) 473 return; 474 /* 475 * We're using provider for the first time, so we have to generate 476 * resource unique identifier and initialize local and remote counts. 477 */ 478 arc4random_buf(&res->hr_resuid, sizeof(res->hr_resuid)); 479 res->hr_primary_localcnt = 1; 480 res->hr_primary_remotecnt = 0; 481 if (metadata_write(res) < 0) 482 exit(EX_NOINPUT); 483} 484 485static bool 486init_remote(struct hast_resource *res, struct proto_conn **inp, 487 struct proto_conn **outp) 488{ 489 struct proto_conn *in, *out; 490 struct nv *nvout, *nvin; 491 const unsigned char *token; 492 unsigned char *map; 493 const char *errmsg; 494 int32_t extentsize; 495 int64_t datasize; 496 uint32_t mapsize; 497 size_t size; 498 499 assert((inp == NULL && outp == NULL) || (inp != NULL && outp != NULL)); 500 assert(real_remote(res)); 501 502 in = out = NULL; 503 504 /* Prepare outgoing connection with remote node. */ 505 if (proto_client(res->hr_remoteaddr, &out) < 0) { 506 primary_exit(EX_TEMPFAIL, "Unable to create connection to %s", 507 res->hr_remoteaddr); 508 } 509 /* Try to connect, but accept failure. */ 510 if (proto_connect(out) < 0) { 511 pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 512 res->hr_remoteaddr); 513 goto close; 514 } 515 /* Error in setting timeout is not critical, but why should it fail? */ 516 if (proto_timeout(out, res->hr_timeout) < 0) 517 pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 518 /* 519 * First handshake step. 520 * Setup outgoing connection with remote node. 521 */ 522 nvout = nv_alloc(); 523 nv_add_string(nvout, res->hr_name, "resource"); 524 if (nv_error(nvout) != 0) { 525 pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 526 "Unable to allocate header for connection with %s", 527 res->hr_remoteaddr); 528 nv_free(nvout); 529 goto close; 530 } 531 if (hast_proto_send(res, out, nvout, NULL, 0) < 0) { 532 pjdlog_errno(LOG_WARNING, 533 "Unable to send handshake header to %s", 534 res->hr_remoteaddr); 535 nv_free(nvout); 536 goto close; 537 } 538 nv_free(nvout); 539 if (hast_proto_recv_hdr(out, &nvin) < 0) { 540 pjdlog_errno(LOG_WARNING, 541 "Unable to receive handshake header from %s", 542 res->hr_remoteaddr); 543 goto close; 544 } 545 errmsg = nv_get_string(nvin, "errmsg"); 546 if (errmsg != NULL) { 547 pjdlog_warning("%s", errmsg); 548 nv_free(nvin); 549 goto close; 550 } 551 token = nv_get_uint8_array(nvin, &size, "token"); 552 if (token == NULL) { 553 pjdlog_warning("Handshake header from %s has no 'token' field.", 554 res->hr_remoteaddr); 555 nv_free(nvin); 556 goto close; 557 } 558 if (size != sizeof(res->hr_token)) { 559 pjdlog_warning("Handshake header from %s contains 'token' of wrong size (got %zu, expected %zu).", 560 res->hr_remoteaddr, size, sizeof(res->hr_token)); 561 nv_free(nvin); 562 goto close; 563 } 564 bcopy(token, res->hr_token, sizeof(res->hr_token)); 565 nv_free(nvin); 566 567 /* 568 * Second handshake step. 569 * Setup incoming connection with remote node. 570 */ 571 if (proto_client(res->hr_remoteaddr, &in) < 0) { 572 pjdlog_errno(LOG_WARNING, "Unable to create connection to %s", 573 res->hr_remoteaddr); 574 } 575 /* Try to connect, but accept failure. */ 576 if (proto_connect(in) < 0) { 577 pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 578 res->hr_remoteaddr); 579 goto close; 580 } 581 /* Error in setting timeout is not critical, but why should it fail? */ 582 if (proto_timeout(in, res->hr_timeout) < 0) 583 pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 584 nvout = nv_alloc(); 585 nv_add_string(nvout, res->hr_name, "resource"); 586 nv_add_uint8_array(nvout, res->hr_token, sizeof(res->hr_token), 587 "token"); 588 nv_add_uint64(nvout, res->hr_resuid, "resuid"); 589 nv_add_uint64(nvout, res->hr_primary_localcnt, "localcnt"); 590 nv_add_uint64(nvout, res->hr_primary_remotecnt, "remotecnt"); 591 if (nv_error(nvout) != 0) { 592 pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 593 "Unable to allocate header for connection with %s", 594 res->hr_remoteaddr); 595 nv_free(nvout); 596 goto close; 597 } 598 if (hast_proto_send(res, in, nvout, NULL, 0) < 0) { 599 pjdlog_errno(LOG_WARNING, 600 "Unable to send handshake header to %s", 601 res->hr_remoteaddr); 602 nv_free(nvout); 603 goto close; 604 } 605 nv_free(nvout); 606 if (hast_proto_recv_hdr(out, &nvin) < 0) { 607 pjdlog_errno(LOG_WARNING, 608 "Unable to receive handshake header from %s", 609 res->hr_remoteaddr); 610 goto close; 611 } 612 errmsg = nv_get_string(nvin, "errmsg"); 613 if (errmsg != NULL) { 614 pjdlog_warning("%s", errmsg); 615 nv_free(nvin); 616 goto close; 617 } 618 datasize = nv_get_int64(nvin, "datasize"); 619 if (datasize != res->hr_datasize) { 620 pjdlog_warning("Data size differs between nodes (local=%jd, remote=%jd).", 621 (intmax_t)res->hr_datasize, (intmax_t)datasize); 622 nv_free(nvin); 623 goto close; 624 } 625 extentsize = nv_get_int32(nvin, "extentsize"); 626 if (extentsize != res->hr_extentsize) { 627 pjdlog_warning("Extent size differs between nodes (local=%zd, remote=%zd).", 628 (ssize_t)res->hr_extentsize, (ssize_t)extentsize); 629 nv_free(nvin); 630 goto close; 631 } 632 res->hr_secondary_localcnt = nv_get_uint64(nvin, "localcnt"); 633 res->hr_secondary_remotecnt = nv_get_uint64(nvin, "remotecnt"); 634 res->hr_syncsrc = nv_get_uint8(nvin, "syncsrc"); 635 map = NULL; 636 mapsize = nv_get_uint32(nvin, "mapsize"); 637 if (mapsize > 0) { 638 map = malloc(mapsize); 639 if (map == NULL) { 640 pjdlog_error("Unable to allocate memory for remote activemap (mapsize=%ju).", 641 (uintmax_t)mapsize); 642 nv_free(nvin); 643 goto close; 644 } 645 /* 646 * Remote node have some dirty extents on its own, lets 647 * download its activemap. 648 */ 649 if (hast_proto_recv_data(res, out, nvin, map, 650 mapsize) < 0) { 651 pjdlog_errno(LOG_ERR, 652 "Unable to receive remote activemap"); 653 nv_free(nvin); 654 free(map); 655 goto close; 656 } 657 /* 658 * Merge local and remote bitmaps. 659 */ 660 activemap_merge(res->hr_amp, map, mapsize); 661 free(map); 662 /* 663 * Now that we merged bitmaps from both nodes, flush it to the 664 * disk before we start to synchronize. 665 */ 666 (void)hast_activemap_flush(res); 667 } 668 pjdlog_info("Connected to %s.", res->hr_remoteaddr); 669 if (inp != NULL && outp != NULL) { 670 *inp = in; 671 *outp = out; 672 } else { 673 res->hr_remotein = in; 674 res->hr_remoteout = out; 675 } 676 return (true); 677close: 678 proto_close(out); 679 if (in != NULL) 680 proto_close(in); 681 return (false); 682} 683 684static void 685sync_start(void) 686{ 687 688 mtx_lock(&sync_lock); 689 sync_inprogress = true; 690 mtx_unlock(&sync_lock); 691 cv_signal(&sync_cond); 692} 693 694static void 695sync_stop(void) 696{ 697 698 mtx_lock(&sync_lock); 699 if (sync_inprogress) 700 sync_inprogress = false; 701 mtx_unlock(&sync_lock); 702} 703 704static void 705init_ggate(struct hast_resource *res) 706{ 707 struct g_gate_ctl_create ggiocreate; 708 struct g_gate_ctl_cancel ggiocancel; 709 710 /* 711 * We communicate with ggate via /dev/ggctl. Open it. 712 */ 713 res->hr_ggatefd = open("/dev/" G_GATE_CTL_NAME, O_RDWR); 714 if (res->hr_ggatefd < 0) 715 primary_exit(EX_OSFILE, "Unable to open /dev/" G_GATE_CTL_NAME); 716 /* 717 * Create provider before trying to connect, as connection failure 718 * is not critical, but may take some time. 719 */ 720 ggiocreate.gctl_version = G_GATE_VERSION; 721 ggiocreate.gctl_mediasize = res->hr_datasize; 722 ggiocreate.gctl_sectorsize = res->hr_local_sectorsize; 723 ggiocreate.gctl_flags = 0; 724 ggiocreate.gctl_maxcount = G_GATE_MAX_QUEUE_SIZE; 725 ggiocreate.gctl_timeout = 0; 726 ggiocreate.gctl_unit = G_GATE_NAME_GIVEN; 727 snprintf(ggiocreate.gctl_name, sizeof(ggiocreate.gctl_name), "hast/%s", 728 res->hr_provname); 729 bzero(ggiocreate.gctl_info, sizeof(ggiocreate.gctl_info)); 730 if (ioctl(res->hr_ggatefd, G_GATE_CMD_CREATE, &ggiocreate) == 0) { 731 pjdlog_info("Device hast/%s created.", res->hr_provname); 732 res->hr_ggateunit = ggiocreate.gctl_unit; 733 return; 734 } 735 if (errno != EEXIST) { 736 primary_exit(EX_OSERR, "Unable to create hast/%s device", 737 res->hr_provname); 738 } 739 pjdlog_debug(1, 740 "Device hast/%s already exists, we will try to take it over.", 741 res->hr_provname); 742 /* 743 * If we received EEXIST, we assume that the process who created the 744 * provider died and didn't clean up. In that case we will start from 745 * where he left of. 746 */ 747 ggiocancel.gctl_version = G_GATE_VERSION; 748 ggiocancel.gctl_unit = G_GATE_NAME_GIVEN; 749 snprintf(ggiocancel.gctl_name, sizeof(ggiocancel.gctl_name), "hast/%s", 750 res->hr_provname); 751 if (ioctl(res->hr_ggatefd, G_GATE_CMD_CANCEL, &ggiocancel) == 0) { 752 pjdlog_info("Device hast/%s recovered.", res->hr_provname); 753 res->hr_ggateunit = ggiocancel.gctl_unit; 754 return; 755 } 756 primary_exit(EX_OSERR, "Unable to take over hast/%s device", 757 res->hr_provname); 758} 759 760void 761hastd_primary(struct hast_resource *res) 762{ 763 pthread_t td; 764 pid_t pid; 765 int error; 766 767 gres = res; 768 769 /* 770 * Create communication channel between parent and child. 771 */ 772 if (proto_client("socketpair://", &res->hr_ctrl) < 0) { 773 KEEP_ERRNO((void)pidfile_remove(pfh)); 774 primary_exit(EX_OSERR, 775 "Unable to create control sockets between parent and child"); 776 } 777 778 pid = fork(); 779 if (pid < 0) { 780 KEEP_ERRNO((void)pidfile_remove(pfh)); 781 primary_exit(EX_TEMPFAIL, "Unable to fork"); 782 } 783 784 if (pid > 0) { 785 /* This is parent. */ 786 res->hr_workerpid = pid; 787 return; 788 } 789 790 (void)pidfile_close(pfh); 791 hook_fini(); 792 793 setproctitle("%s (primary)", res->hr_name); 794 795 signal(SIGHUP, SIG_DFL); 796 signal(SIGCHLD, SIG_DFL); 797 798 hook_init(); 799 init_local(res); 800 if (real_remote(res) && init_remote(res, NULL, NULL)) 801 sync_start(); 802 init_ggate(res); 803 init_environment(res); 804 error = pthread_create(&td, NULL, ggate_recv_thread, res); 805 assert(error == 0); 806 error = pthread_create(&td, NULL, local_send_thread, res); 807 assert(error == 0); 808 error = pthread_create(&td, NULL, remote_send_thread, res); 809 assert(error == 0); 810 error = pthread_create(&td, NULL, remote_recv_thread, res); 811 assert(error == 0); 812 error = pthread_create(&td, NULL, ggate_send_thread, res); 813 assert(error == 0); 814 error = pthread_create(&td, NULL, sync_thread, res); 815 assert(error == 0); 816 error = pthread_create(&td, NULL, ctrl_thread, res); 817 assert(error == 0); 818 (void)guard_thread(res); 819} 820 821static void 822reqlog(int loglevel, int debuglevel, struct g_gate_ctl_io *ggio, const char *fmt, ...) 823{ 824 char msg[1024]; 825 va_list ap; 826 int len; 827 828 va_start(ap, fmt); 829 len = vsnprintf(msg, sizeof(msg), fmt, ap); 830 va_end(ap); 831 if ((size_t)len < sizeof(msg)) { 832 switch (ggio->gctl_cmd) { 833 case BIO_READ: 834 (void)snprintf(msg + len, sizeof(msg) - len, 835 "READ(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 836 (uintmax_t)ggio->gctl_length); 837 break; 838 case BIO_DELETE: 839 (void)snprintf(msg + len, sizeof(msg) - len, 840 "DELETE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 841 (uintmax_t)ggio->gctl_length); 842 break; 843 case BIO_FLUSH: 844 (void)snprintf(msg + len, sizeof(msg) - len, "FLUSH."); 845 break; 846 case BIO_WRITE: 847 (void)snprintf(msg + len, sizeof(msg) - len, 848 "WRITE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 849 (uintmax_t)ggio->gctl_length); 850 break; 851 default: 852 (void)snprintf(msg + len, sizeof(msg) - len, 853 "UNKNOWN(%u).", (unsigned int)ggio->gctl_cmd); 854 break; 855 } 856 } 857 pjdlog_common(loglevel, debuglevel, -1, "%s", msg); 858} 859 860static void 861remote_close(struct hast_resource *res, int ncomp) 862{ 863 864 rw_wlock(&hio_remote_lock[ncomp]); 865 /* 866 * A race is possible between dropping rlock and acquiring wlock - 867 * another thread can close connection in-between. 868 */ 869 if (!ISCONNECTED(res, ncomp)) { 870 assert(res->hr_remotein == NULL); 871 assert(res->hr_remoteout == NULL); 872 rw_unlock(&hio_remote_lock[ncomp]); 873 return; 874 } 875 876 assert(res->hr_remotein != NULL); 877 assert(res->hr_remoteout != NULL); 878 879 pjdlog_debug(2, "Closing incoming connection to %s.", 880 res->hr_remoteaddr); 881 proto_close(res->hr_remotein); 882 res->hr_remotein = NULL; 883 pjdlog_debug(2, "Closing outgoing connection to %s.", 884 res->hr_remoteaddr); 885 proto_close(res->hr_remoteout); 886 res->hr_remoteout = NULL; 887 888 rw_unlock(&hio_remote_lock[ncomp]); 889 890 pjdlog_warning("Disconnected from %s.", res->hr_remoteaddr); 891 892 /* 893 * Stop synchronization if in-progress. 894 */ 895 sync_stop(); 896 897 /* 898 * Wake up guard thread (if we are not called from within guard thread), 899 * so it can immediately start reconnect. 900 */ 901 if (!mtx_owned(&hio_guard_lock)) { 902 mtx_lock(&hio_guard_lock); 903 cv_signal(&hio_guard_cond); 904 mtx_unlock(&hio_guard_lock); 905 } 906} 907 908/* 909 * Thread receives ggate I/O requests from the kernel and passes them to 910 * appropriate threads: 911 * WRITE - always goes to both local_send and remote_send threads 912 * READ (when the block is up-to-date on local component) - 913 * only local_send thread 914 * READ (when the block isn't up-to-date on local component) - 915 * only remote_send thread 916 * DELETE - always goes to both local_send and remote_send threads 917 * FLUSH - always goes to both local_send and remote_send threads 918 */ 919static void * 920ggate_recv_thread(void *arg) 921{ 922 struct hast_resource *res = arg; 923 struct g_gate_ctl_io *ggio; 924 struct hio *hio; 925 unsigned int ii, ncomp, ncomps; 926 int error; 927 928 ncomps = HAST_NCOMPONENTS; 929 930 for (;;) { 931 pjdlog_debug(2, "ggate_recv: Taking free request."); 932 QUEUE_TAKE2(hio, free); 933 pjdlog_debug(2, "ggate_recv: (%p) Got free request.", hio); 934 ggio = &hio->hio_ggio; 935 ggio->gctl_unit = res->hr_ggateunit; 936 ggio->gctl_length = MAXPHYS; 937 ggio->gctl_error = 0; 938 pjdlog_debug(2, 939 "ggate_recv: (%p) Waiting for request from the kernel.", 940 hio); 941 if (ioctl(res->hr_ggatefd, G_GATE_CMD_START, ggio) < 0) { 942 if (sigexit_received) 943 pthread_exit(NULL); 944 primary_exit(EX_OSERR, "G_GATE_CMD_START failed"); 945 } 946 error = ggio->gctl_error; 947 switch (error) { 948 case 0: 949 break; 950 case ECANCELED: 951 /* Exit gracefully. */ 952 if (!sigexit_received) { 953 pjdlog_debug(2, 954 "ggate_recv: (%p) Received cancel from the kernel.", 955 hio); 956 pjdlog_info("Received cancel from the kernel, exiting."); 957 } 958 pthread_exit(NULL); 959 case ENOMEM: 960 /* 961 * Buffer too small? Impossible, we allocate MAXPHYS 962 * bytes - request can't be bigger than that. 963 */ 964 /* FALLTHROUGH */ 965 case ENXIO: 966 default: 967 primary_exitx(EX_OSERR, "G_GATE_CMD_START failed: %s.", 968 strerror(error)); 969 } 970 for (ii = 0; ii < ncomps; ii++) 971 hio->hio_errors[ii] = EINVAL; 972 reqlog(LOG_DEBUG, 2, ggio, 973 "ggate_recv: (%p) Request received from the kernel: ", 974 hio); 975 /* 976 * Inform all components about new write request. 977 * For read request prefer local component unless the given 978 * range is out-of-date, then use remote component. 979 */ 980 switch (ggio->gctl_cmd) { 981 case BIO_READ: 982 pjdlog_debug(2, 983 "ggate_recv: (%p) Moving request to the send queue.", 984 hio); 985 refcount_init(&hio->hio_countdown, 1); 986 mtx_lock(&metadata_lock); 987 if (res->hr_syncsrc == HAST_SYNCSRC_UNDEF || 988 res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 989 /* 990 * This range is up-to-date on local component, 991 * so handle request locally. 992 */ 993 /* Local component is 0 for now. */ 994 ncomp = 0; 995 } else /* if (res->hr_syncsrc == 996 HAST_SYNCSRC_SECONDARY) */ { 997 assert(res->hr_syncsrc == 998 HAST_SYNCSRC_SECONDARY); 999 /* 1000 * This range is out-of-date on local component, 1001 * so send request to the remote node. 1002 */ 1003 /* Remote component is 1 for now. */ 1004 ncomp = 1; 1005 } 1006 mtx_unlock(&metadata_lock); 1007 QUEUE_INSERT1(hio, send, ncomp); 1008 break; 1009 case BIO_WRITE: 1010 for (;;) { 1011 mtx_lock(&range_lock); 1012 if (rangelock_islocked(range_sync, 1013 ggio->gctl_offset, ggio->gctl_length)) { 1014 pjdlog_debug(2, 1015 "regular: Range offset=%jd length=%zu locked.", 1016 (intmax_t)ggio->gctl_offset, 1017 (size_t)ggio->gctl_length); 1018 range_regular_wait = true; 1019 cv_wait(&range_regular_cond, &range_lock); 1020 range_regular_wait = false; 1021 mtx_unlock(&range_lock); 1022 continue; 1023 } 1024 if (rangelock_add(range_regular, 1025 ggio->gctl_offset, ggio->gctl_length) < 0) { 1026 mtx_unlock(&range_lock); 1027 pjdlog_debug(2, 1028 "regular: Range offset=%jd length=%zu is already locked, waiting.", 1029 (intmax_t)ggio->gctl_offset, 1030 (size_t)ggio->gctl_length); 1031 sleep(1); 1032 continue; 1033 } 1034 mtx_unlock(&range_lock); 1035 break; 1036 } 1037 mtx_lock(&res->hr_amp_lock); 1038 if (activemap_write_start(res->hr_amp, 1039 ggio->gctl_offset, ggio->gctl_length)) { 1040 (void)hast_activemap_flush(res); 1041 } 1042 mtx_unlock(&res->hr_amp_lock); 1043 /* FALLTHROUGH */ 1044 case BIO_DELETE: 1045 case BIO_FLUSH: 1046 pjdlog_debug(2, 1047 "ggate_recv: (%p) Moving request to the send queues.", 1048 hio); 1049 refcount_init(&hio->hio_countdown, ncomps); 1050 for (ii = 0; ii < ncomps; ii++) 1051 QUEUE_INSERT1(hio, send, ii); 1052 break; 1053 } 1054 } 1055 /* NOTREACHED */ 1056 return (NULL); 1057} 1058 1059/* 1060 * Thread reads from or writes to local component. 1061 * If local read fails, it redirects it to remote_send thread. 1062 */ 1063static void * 1064local_send_thread(void *arg) 1065{ 1066 struct hast_resource *res = arg; 1067 struct g_gate_ctl_io *ggio; 1068 struct hio *hio; 1069 unsigned int ncomp, rncomp; 1070 ssize_t ret; 1071 1072 /* Local component is 0 for now. */ 1073 ncomp = 0; 1074 /* Remote component is 1 for now. */ 1075 rncomp = 1; 1076 1077 for (;;) { 1078 pjdlog_debug(2, "local_send: Taking request."); 1079 QUEUE_TAKE1(hio, send, ncomp); 1080 pjdlog_debug(2, "local_send: (%p) Got request.", hio); 1081 ggio = &hio->hio_ggio; 1082 switch (ggio->gctl_cmd) { 1083 case BIO_READ: 1084 ret = pread(res->hr_localfd, ggio->gctl_data, 1085 ggio->gctl_length, 1086 ggio->gctl_offset + res->hr_localoff); 1087 if (ret == ggio->gctl_length) 1088 hio->hio_errors[ncomp] = 0; 1089 else { 1090 /* 1091 * If READ failed, try to read from remote node. 1092 */ 1093 QUEUE_INSERT1(hio, send, rncomp); 1094 continue; 1095 } 1096 break; 1097 case BIO_WRITE: 1098 ret = pwrite(res->hr_localfd, ggio->gctl_data, 1099 ggio->gctl_length, 1100 ggio->gctl_offset + res->hr_localoff); 1101 if (ret < 0) 1102 hio->hio_errors[ncomp] = errno; 1103 else if (ret != ggio->gctl_length) 1104 hio->hio_errors[ncomp] = EIO; 1105 else 1106 hio->hio_errors[ncomp] = 0; 1107 break; 1108 case BIO_DELETE: 1109 ret = g_delete(res->hr_localfd, 1110 ggio->gctl_offset + res->hr_localoff, 1111 ggio->gctl_length); 1112 if (ret < 0) 1113 hio->hio_errors[ncomp] = errno; 1114 else 1115 hio->hio_errors[ncomp] = 0; 1116 break; 1117 case BIO_FLUSH: 1118 ret = g_flush(res->hr_localfd); 1119 if (ret < 0) 1120 hio->hio_errors[ncomp] = errno; 1121 else 1122 hio->hio_errors[ncomp] = 0; 1123 break; 1124 } 1125 if (refcount_release(&hio->hio_countdown)) { 1126 if (ISSYNCREQ(hio)) { 1127 mtx_lock(&sync_lock); 1128 SYNCREQDONE(hio); 1129 mtx_unlock(&sync_lock); 1130 cv_signal(&sync_cond); 1131 } else { 1132 pjdlog_debug(2, 1133 "local_send: (%p) Moving request to the done queue.", 1134 hio); 1135 QUEUE_INSERT2(hio, done); 1136 } 1137 } 1138 } 1139 /* NOTREACHED */ 1140 return (NULL); 1141} 1142 1143/* 1144 * Thread sends request to secondary node. 1145 */ 1146static void * 1147remote_send_thread(void *arg) 1148{ 1149 struct hast_resource *res = arg; 1150 struct g_gate_ctl_io *ggio; 1151 struct hio *hio; 1152 struct nv *nv; 1153 unsigned int ncomp; 1154 bool wakeup; 1155 uint64_t offset, length; 1156 uint8_t cmd; 1157 void *data; 1158 1159 /* Remote component is 1 for now. */ 1160 ncomp = 1; 1161 1162 for (;;) { 1163 pjdlog_debug(2, "remote_send: Taking request."); 1164 QUEUE_TAKE1(hio, send, ncomp); 1165 pjdlog_debug(2, "remote_send: (%p) Got request.", hio); 1166 ggio = &hio->hio_ggio; 1167 switch (ggio->gctl_cmd) { 1168 case BIO_READ: 1169 cmd = HIO_READ; 1170 data = NULL; 1171 offset = ggio->gctl_offset; 1172 length = ggio->gctl_length; 1173 break; 1174 case BIO_WRITE: 1175 cmd = HIO_WRITE; 1176 data = ggio->gctl_data; 1177 offset = ggio->gctl_offset; 1178 length = ggio->gctl_length; 1179 break; 1180 case BIO_DELETE: 1181 cmd = HIO_DELETE; 1182 data = NULL; 1183 offset = ggio->gctl_offset; 1184 length = ggio->gctl_length; 1185 break; 1186 case BIO_FLUSH: 1187 cmd = HIO_FLUSH; 1188 data = NULL; 1189 offset = 0; 1190 length = 0; 1191 break; 1192 default: 1193 assert(!"invalid condition"); 1194 abort(); 1195 } 1196 nv = nv_alloc(); 1197 nv_add_uint8(nv, cmd, "cmd"); 1198 nv_add_uint64(nv, (uint64_t)ggio->gctl_seq, "seq"); 1199 nv_add_uint64(nv, offset, "offset"); 1200 nv_add_uint64(nv, length, "length"); 1201 if (nv_error(nv) != 0) { 1202 hio->hio_errors[ncomp] = nv_error(nv); 1203 pjdlog_debug(2, 1204 "remote_send: (%p) Unable to prepare header to send.", 1205 hio); 1206 reqlog(LOG_ERR, 0, ggio, 1207 "Unable to prepare header to send (%s): ", 1208 strerror(nv_error(nv))); 1209 /* Move failed request immediately to the done queue. */ 1210 goto done_queue; 1211 } 1212 pjdlog_debug(2, 1213 "remote_send: (%p) Moving request to the recv queue.", 1214 hio); 1215 /* 1216 * Protect connection from disappearing. 1217 */ 1218 rw_rlock(&hio_remote_lock[ncomp]); 1219 if (!ISCONNECTED(res, ncomp)) { 1220 rw_unlock(&hio_remote_lock[ncomp]); 1221 hio->hio_errors[ncomp] = ENOTCONN; 1222 goto done_queue; 1223 } 1224 /* 1225 * Move the request to recv queue before sending it, because 1226 * in different order we can get reply before we move request 1227 * to recv queue. 1228 */ 1229 mtx_lock(&hio_recv_list_lock[ncomp]); 1230 wakeup = TAILQ_EMPTY(&hio_recv_list[ncomp]); 1231 TAILQ_INSERT_TAIL(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1232 mtx_unlock(&hio_recv_list_lock[ncomp]); 1233 if (hast_proto_send(res, res->hr_remoteout, nv, data, 1234 data != NULL ? length : 0) < 0) { 1235 hio->hio_errors[ncomp] = errno; 1236 rw_unlock(&hio_remote_lock[ncomp]); 1237 remote_close(res, ncomp); 1238 pjdlog_debug(2, 1239 "remote_send: (%p) Unable to send request.", hio); 1240 reqlog(LOG_ERR, 0, ggio, 1241 "Unable to send request (%s): ", 1242 strerror(hio->hio_errors[ncomp])); 1243 /* 1244 * Take request back from the receive queue and move 1245 * it immediately to the done queue. 1246 */ 1247 mtx_lock(&hio_recv_list_lock[ncomp]); 1248 TAILQ_REMOVE(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1249 mtx_unlock(&hio_recv_list_lock[ncomp]); 1250 goto done_queue; 1251 } 1252 rw_unlock(&hio_remote_lock[ncomp]); 1253 nv_free(nv); 1254 if (wakeup) 1255 cv_signal(&hio_recv_list_cond[ncomp]); 1256 continue; 1257done_queue: 1258 nv_free(nv); 1259 if (ISSYNCREQ(hio)) { 1260 if (!refcount_release(&hio->hio_countdown)) 1261 continue; 1262 mtx_lock(&sync_lock); 1263 SYNCREQDONE(hio); 1264 mtx_unlock(&sync_lock); 1265 cv_signal(&sync_cond); 1266 continue; 1267 } 1268 if (ggio->gctl_cmd == BIO_WRITE) { 1269 mtx_lock(&res->hr_amp_lock); 1270 if (activemap_need_sync(res->hr_amp, ggio->gctl_offset, 1271 ggio->gctl_length)) { 1272 (void)hast_activemap_flush(res); 1273 } 1274 mtx_unlock(&res->hr_amp_lock); 1275 } 1276 if (!refcount_release(&hio->hio_countdown)) 1277 continue; 1278 pjdlog_debug(2, 1279 "remote_send: (%p) Moving request to the done queue.", 1280 hio); 1281 QUEUE_INSERT2(hio, done); 1282 } 1283 /* NOTREACHED */ 1284 return (NULL); 1285} 1286 1287/* 1288 * Thread receives answer from secondary node and passes it to ggate_send 1289 * thread. 1290 */ 1291static void * 1292remote_recv_thread(void *arg) 1293{ 1294 struct hast_resource *res = arg; 1295 struct g_gate_ctl_io *ggio; 1296 struct hio *hio; 1297 struct nv *nv; 1298 unsigned int ncomp; 1299 uint64_t seq; 1300 int error; 1301 1302 /* Remote component is 1 for now. */ 1303 ncomp = 1; 1304 1305 for (;;) { 1306 /* Wait until there is anything to receive. */ 1307 mtx_lock(&hio_recv_list_lock[ncomp]); 1308 while (TAILQ_EMPTY(&hio_recv_list[ncomp])) { 1309 pjdlog_debug(2, "remote_recv: No requests, waiting."); 1310 cv_wait(&hio_recv_list_cond[ncomp], 1311 &hio_recv_list_lock[ncomp]); 1312 } 1313 mtx_unlock(&hio_recv_list_lock[ncomp]); 1314 rw_rlock(&hio_remote_lock[ncomp]); 1315 if (!ISCONNECTED(res, ncomp)) { 1316 rw_unlock(&hio_remote_lock[ncomp]); 1317 /* 1318 * Connection is dead, so move all pending requests to 1319 * the done queue (one-by-one). 1320 */ 1321 mtx_lock(&hio_recv_list_lock[ncomp]); 1322 hio = TAILQ_FIRST(&hio_recv_list[ncomp]); 1323 assert(hio != NULL); 1324 TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1325 hio_next[ncomp]); 1326 mtx_unlock(&hio_recv_list_lock[ncomp]); 1327 goto done_queue; 1328 } 1329 if (hast_proto_recv_hdr(res->hr_remotein, &nv) < 0) { 1330 pjdlog_errno(LOG_ERR, 1331 "Unable to receive reply header"); 1332 rw_unlock(&hio_remote_lock[ncomp]); 1333 remote_close(res, ncomp); 1334 continue; 1335 } 1336 rw_unlock(&hio_remote_lock[ncomp]); 1337 seq = nv_get_uint64(nv, "seq"); 1338 if (seq == 0) { 1339 pjdlog_error("Header contains no 'seq' field."); 1340 nv_free(nv); 1341 continue; 1342 } 1343 mtx_lock(&hio_recv_list_lock[ncomp]); 1344 TAILQ_FOREACH(hio, &hio_recv_list[ncomp], hio_next[ncomp]) { 1345 if (hio->hio_ggio.gctl_seq == seq) { 1346 TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1347 hio_next[ncomp]); 1348 break; 1349 } 1350 } 1351 mtx_unlock(&hio_recv_list_lock[ncomp]); 1352 if (hio == NULL) { 1353 pjdlog_error("Found no request matching received 'seq' field (%ju).", 1354 (uintmax_t)seq); 1355 nv_free(nv); 1356 continue; 1357 } 1358 error = nv_get_int16(nv, "error"); 1359 if (error != 0) { 1360 /* Request failed on remote side. */ 1361 hio->hio_errors[ncomp] = 0; 1362 nv_free(nv); 1363 goto done_queue; 1364 } 1365 ggio = &hio->hio_ggio; 1366 switch (ggio->gctl_cmd) { 1367 case BIO_READ: 1368 rw_rlock(&hio_remote_lock[ncomp]); 1369 if (!ISCONNECTED(res, ncomp)) { 1370 rw_unlock(&hio_remote_lock[ncomp]); 1371 nv_free(nv); 1372 goto done_queue; 1373 } 1374 if (hast_proto_recv_data(res, res->hr_remotein, nv, 1375 ggio->gctl_data, ggio->gctl_length) < 0) { 1376 hio->hio_errors[ncomp] = errno; 1377 pjdlog_errno(LOG_ERR, 1378 "Unable to receive reply data"); 1379 rw_unlock(&hio_remote_lock[ncomp]); 1380 nv_free(nv); 1381 remote_close(res, ncomp); 1382 goto done_queue; 1383 } 1384 rw_unlock(&hio_remote_lock[ncomp]); 1385 break; 1386 case BIO_WRITE: 1387 case BIO_DELETE: 1388 case BIO_FLUSH: 1389 break; 1390 default: 1391 assert(!"invalid condition"); 1392 abort(); 1393 } 1394 hio->hio_errors[ncomp] = 0; 1395 nv_free(nv); 1396done_queue: 1397 if (refcount_release(&hio->hio_countdown)) { 1398 if (ISSYNCREQ(hio)) { 1399 mtx_lock(&sync_lock); 1400 SYNCREQDONE(hio); 1401 mtx_unlock(&sync_lock); 1402 cv_signal(&sync_cond); 1403 } else { 1404 pjdlog_debug(2, 1405 "remote_recv: (%p) Moving request to the done queue.", 1406 hio); 1407 QUEUE_INSERT2(hio, done); 1408 } 1409 } 1410 } 1411 /* NOTREACHED */ 1412 return (NULL); 1413} 1414 1415/* 1416 * Thread sends answer to the kernel. 1417 */ 1418static void * 1419ggate_send_thread(void *arg) 1420{ 1421 struct hast_resource *res = arg; 1422 struct g_gate_ctl_io *ggio; 1423 struct hio *hio; 1424 unsigned int ii, ncomp, ncomps; 1425 1426 ncomps = HAST_NCOMPONENTS; 1427 1428 for (;;) { 1429 pjdlog_debug(2, "ggate_send: Taking request."); 1430 QUEUE_TAKE2(hio, done); 1431 pjdlog_debug(2, "ggate_send: (%p) Got request.", hio); 1432 ggio = &hio->hio_ggio; 1433 for (ii = 0; ii < ncomps; ii++) { 1434 if (hio->hio_errors[ii] == 0) { 1435 /* 1436 * One successful request is enough to declare 1437 * success. 1438 */ 1439 ggio->gctl_error = 0; 1440 break; 1441 } 1442 } 1443 if (ii == ncomps) { 1444 /* 1445 * None of the requests were successful. 1446 * Use first error. 1447 */ 1448 ggio->gctl_error = hio->hio_errors[0]; 1449 } 1450 if (ggio->gctl_error == 0 && ggio->gctl_cmd == BIO_WRITE) { 1451 mtx_lock(&res->hr_amp_lock); 1452 activemap_write_complete(res->hr_amp, 1453 ggio->gctl_offset, ggio->gctl_length); 1454 mtx_unlock(&res->hr_amp_lock); 1455 } 1456 if (ggio->gctl_cmd == BIO_WRITE) { 1457 /* 1458 * Unlock range we locked. 1459 */ 1460 mtx_lock(&range_lock); 1461 rangelock_del(range_regular, ggio->gctl_offset, 1462 ggio->gctl_length); 1463 if (range_sync_wait) 1464 cv_signal(&range_sync_cond); 1465 mtx_unlock(&range_lock); 1466 /* 1467 * Bump local count if this is first write after 1468 * connection failure with remote node. 1469 */ 1470 ncomp = 1; 1471 rw_rlock(&hio_remote_lock[ncomp]); 1472 if (!ISCONNECTED(res, ncomp)) { 1473 mtx_lock(&metadata_lock); 1474 if (res->hr_primary_localcnt == 1475 res->hr_secondary_remotecnt) { 1476 res->hr_primary_localcnt++; 1477 pjdlog_debug(1, 1478 "Increasing localcnt to %ju.", 1479 (uintmax_t)res->hr_primary_localcnt); 1480 (void)metadata_write(res); 1481 } 1482 mtx_unlock(&metadata_lock); 1483 } 1484 rw_unlock(&hio_remote_lock[ncomp]); 1485 } 1486 if (ioctl(res->hr_ggatefd, G_GATE_CMD_DONE, ggio) < 0) 1487 primary_exit(EX_OSERR, "G_GATE_CMD_DONE failed"); 1488 pjdlog_debug(2, 1489 "ggate_send: (%p) Moving request to the free queue.", hio); 1490 QUEUE_INSERT2(hio, free); 1491 } 1492 /* NOTREACHED */ 1493 return (NULL); 1494} 1495 1496/* 1497 * Thread synchronize local and remote components. 1498 */ 1499static void * 1500sync_thread(void *arg __unused) 1501{ 1502 struct hast_resource *res = arg; 1503 struct hio *hio; 1504 struct g_gate_ctl_io *ggio; 1505 unsigned int ii, ncomp, ncomps; 1506 off_t offset, length, synced; 1507 bool dorewind; 1508 int syncext; 1509 1510 ncomps = HAST_NCOMPONENTS; 1511 dorewind = true; 1512 synced = 0; 1513 offset = -1; 1514 1515 for (;;) { 1516 mtx_lock(&sync_lock); 1517 if (offset >= 0 && !sync_inprogress) { 1518 pjdlog_info("Synchronization interrupted. " 1519 "%jd bytes synchronized so far.", 1520 (intmax_t)synced); 1521 hook_exec(res->hr_exec, "syncintr", res->hr_name, NULL); 1522 } 1523 while (!sync_inprogress) { 1524 dorewind = true; 1525 synced = 0; 1526 cv_wait(&sync_cond, &sync_lock); 1527 } 1528 mtx_unlock(&sync_lock); 1529 /* 1530 * Obtain offset at which we should synchronize. 1531 * Rewind synchronization if needed. 1532 */ 1533 mtx_lock(&res->hr_amp_lock); 1534 if (dorewind) 1535 activemap_sync_rewind(res->hr_amp); 1536 offset = activemap_sync_offset(res->hr_amp, &length, &syncext); 1537 if (syncext != -1) { 1538 /* 1539 * We synchronized entire syncext extent, we can mark 1540 * it as clean now. 1541 */ 1542 if (activemap_extent_complete(res->hr_amp, syncext)) 1543 (void)hast_activemap_flush(res); 1544 } 1545 mtx_unlock(&res->hr_amp_lock); 1546 if (dorewind) { 1547 dorewind = false; 1548 if (offset < 0) 1549 pjdlog_info("Nodes are in sync."); 1550 else { 1551 pjdlog_info("Synchronization started. %ju bytes to go.", 1552 (uintmax_t)(res->hr_extentsize * 1553 activemap_ndirty(res->hr_amp))); 1554 hook_exec(res->hr_exec, "syncstart", 1555 res->hr_name, NULL); 1556 } 1557 } 1558 if (offset < 0) { 1559 sync_stop(); 1560 pjdlog_debug(1, "Nothing to synchronize."); 1561 /* 1562 * Synchronization complete, make both localcnt and 1563 * remotecnt equal. 1564 */ 1565 ncomp = 1; 1566 rw_rlock(&hio_remote_lock[ncomp]); 1567 if (ISCONNECTED(res, ncomp)) { 1568 if (synced > 0) { 1569 pjdlog_info("Synchronization complete. " 1570 "%jd bytes synchronized.", 1571 (intmax_t)synced); 1572 hook_exec(res->hr_exec, "syncdone", 1573 res->hr_name, NULL); 1574 } 1575 mtx_lock(&metadata_lock); 1576 res->hr_syncsrc = HAST_SYNCSRC_UNDEF; 1577 res->hr_primary_localcnt = 1578 res->hr_secondary_localcnt; 1579 res->hr_primary_remotecnt = 1580 res->hr_secondary_remotecnt; 1581 pjdlog_debug(1, 1582 "Setting localcnt to %ju and remotecnt to %ju.", 1583 (uintmax_t)res->hr_primary_localcnt, 1584 (uintmax_t)res->hr_secondary_localcnt); 1585 (void)metadata_write(res); 1586 mtx_unlock(&metadata_lock); 1587 } 1588 rw_unlock(&hio_remote_lock[ncomp]); 1589 continue; 1590 } 1591 pjdlog_debug(2, "sync: Taking free request."); 1592 QUEUE_TAKE2(hio, free); 1593 pjdlog_debug(2, "sync: (%p) Got free request.", hio); 1594 /* 1595 * Lock the range we are going to synchronize. We don't want 1596 * race where someone writes between our read and write. 1597 */ 1598 for (;;) { 1599 mtx_lock(&range_lock); 1600 if (rangelock_islocked(range_regular, offset, length)) { 1601 pjdlog_debug(2, 1602 "sync: Range offset=%jd length=%jd locked.", 1603 (intmax_t)offset, (intmax_t)length); 1604 range_sync_wait = true; 1605 cv_wait(&range_sync_cond, &range_lock); 1606 range_sync_wait = false; 1607 mtx_unlock(&range_lock); 1608 continue; 1609 } 1610 if (rangelock_add(range_sync, offset, length) < 0) { 1611 mtx_unlock(&range_lock); 1612 pjdlog_debug(2, 1613 "sync: Range offset=%jd length=%jd is already locked, waiting.", 1614 (intmax_t)offset, (intmax_t)length); 1615 sleep(1); 1616 continue; 1617 } 1618 mtx_unlock(&range_lock); 1619 break; 1620 } 1621 /* 1622 * First read the data from synchronization source. 1623 */ 1624 SYNCREQ(hio); 1625 ggio = &hio->hio_ggio; 1626 ggio->gctl_cmd = BIO_READ; 1627 ggio->gctl_offset = offset; 1628 ggio->gctl_length = length; 1629 ggio->gctl_error = 0; 1630 for (ii = 0; ii < ncomps; ii++) 1631 hio->hio_errors[ii] = EINVAL; 1632 reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 1633 hio); 1634 pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 1635 hio); 1636 mtx_lock(&metadata_lock); 1637 if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1638 /* 1639 * This range is up-to-date on local component, 1640 * so handle request locally. 1641 */ 1642 /* Local component is 0 for now. */ 1643 ncomp = 0; 1644 } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 1645 assert(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 1646 /* 1647 * This range is out-of-date on local component, 1648 * so send request to the remote node. 1649 */ 1650 /* Remote component is 1 for now. */ 1651 ncomp = 1; 1652 } 1653 mtx_unlock(&metadata_lock); 1654 refcount_init(&hio->hio_countdown, 1); 1655 QUEUE_INSERT1(hio, send, ncomp); 1656 1657 /* 1658 * Let's wait for READ to finish. 1659 */ 1660 mtx_lock(&sync_lock); 1661 while (!ISSYNCREQDONE(hio)) 1662 cv_wait(&sync_cond, &sync_lock); 1663 mtx_unlock(&sync_lock); 1664 1665 if (hio->hio_errors[ncomp] != 0) { 1666 pjdlog_error("Unable to read synchronization data: %s.", 1667 strerror(hio->hio_errors[ncomp])); 1668 goto free_queue; 1669 } 1670 1671 /* 1672 * We read the data from synchronization source, now write it 1673 * to synchronization target. 1674 */ 1675 SYNCREQ(hio); 1676 ggio->gctl_cmd = BIO_WRITE; 1677 for (ii = 0; ii < ncomps; ii++) 1678 hio->hio_errors[ii] = EINVAL; 1679 reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 1680 hio); 1681 pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 1682 hio); 1683 mtx_lock(&metadata_lock); 1684 if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1685 /* 1686 * This range is up-to-date on local component, 1687 * so we update remote component. 1688 */ 1689 /* Remote component is 1 for now. */ 1690 ncomp = 1; 1691 } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 1692 assert(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 1693 /* 1694 * This range is out-of-date on local component, 1695 * so we update it. 1696 */ 1697 /* Local component is 0 for now. */ 1698 ncomp = 0; 1699 } 1700 mtx_unlock(&metadata_lock); 1701 1702 pjdlog_debug(2, "sync: (%p) Moving request to the send queues.", 1703 hio); 1704 refcount_init(&hio->hio_countdown, 1); 1705 QUEUE_INSERT1(hio, send, ncomp); 1706 1707 /* 1708 * Let's wait for WRITE to finish. 1709 */ 1710 mtx_lock(&sync_lock); 1711 while (!ISSYNCREQDONE(hio)) 1712 cv_wait(&sync_cond, &sync_lock); 1713 mtx_unlock(&sync_lock); 1714 1715 if (hio->hio_errors[ncomp] != 0) { 1716 pjdlog_error("Unable to write synchronization data: %s.", 1717 strerror(hio->hio_errors[ncomp])); 1718 goto free_queue; 1719 } 1720 1721 synced += length; 1722free_queue: 1723 mtx_lock(&range_lock); 1724 rangelock_del(range_sync, offset, length); 1725 if (range_regular_wait) 1726 cv_signal(&range_regular_cond); 1727 mtx_unlock(&range_lock); 1728 pjdlog_debug(2, "sync: (%p) Moving request to the free queue.", 1729 hio); 1730 QUEUE_INSERT2(hio, free); 1731 } 1732 /* NOTREACHED */ 1733 return (NULL); 1734} 1735 1736static void 1737sighandler(int sig) 1738{ 1739 bool unlock; 1740 1741 switch (sig) { 1742 case SIGINT: 1743 case SIGTERM: 1744 sigexit_received = true; 1745 break; 1746 case SIGHUP: 1747 sighup_received = true; 1748 break; 1749 case SIGCHLD: 1750 sigchld_received = true; 1751 break; 1752 default: 1753 assert(!"invalid condition"); 1754 } 1755 /* 1756 * Racy, but if we cannot obtain hio_guard_lock here, we don't 1757 * want to risk deadlock. 1758 */ 1759 unlock = mtx_trylock(&hio_guard_lock); 1760 cv_signal(&hio_guard_cond); 1761 if (unlock) 1762 mtx_unlock(&hio_guard_lock); 1763} 1764 1765static void 1766config_reload(void) 1767{ 1768 struct hastd_config *newcfg; 1769 struct hast_resource *res; 1770 unsigned int ii, ncomps; 1771 int modified; 1772 1773 pjdlog_info("Reloading configuration..."); 1774 1775 ncomps = HAST_NCOMPONENTS; 1776 1777 newcfg = yy_config_parse(cfgpath, false); 1778 if (newcfg == NULL) 1779 goto failed; 1780 1781 TAILQ_FOREACH(res, &newcfg->hc_resources, hr_next) { 1782 if (strcmp(res->hr_name, gres->hr_name) == 0) 1783 break; 1784 } 1785 /* 1786 * If resource was removed from the configuration file, resource 1787 * name, provider name or path to local component was modified we 1788 * shouldn't be here. This means that someone modified configuration 1789 * file and send SIGHUP to us instead of main hastd process. 1790 * Log advice and ignore the signal. 1791 */ 1792 if (res == NULL || strcmp(gres->hr_name, res->hr_name) != 0 || 1793 strcmp(gres->hr_provname, res->hr_provname) != 0 || 1794 strcmp(gres->hr_localpath, res->hr_localpath) != 0) { 1795 pjdlog_warning("To reload configuration send SIGHUP to the main hastd process (pid %u).", 1796 (unsigned int)getppid()); 1797 goto failed; 1798 } 1799 1800#define MODIFIED_REMOTEADDR 0x1 1801#define MODIFIED_REPLICATION 0x2 1802#define MODIFIED_TIMEOUT 0x4 1803#define MODIFIED_EXEC 0x8 1804 modified = 0; 1805 if (strcmp(gres->hr_remoteaddr, res->hr_remoteaddr) != 0) { 1806 /* 1807 * Don't copy res->hr_remoteaddr to gres just yet. 1808 * We want remote_close() to log disconnect from the old 1809 * addresses, not from the new ones. 1810 */ 1811 modified |= MODIFIED_REMOTEADDR; 1812 } 1813 if (gres->hr_replication != res->hr_replication) { 1814 gres->hr_replication = res->hr_replication; 1815 modified |= MODIFIED_REPLICATION; 1816 } 1817 if (gres->hr_timeout != res->hr_timeout) { 1818 gres->hr_timeout = res->hr_timeout; 1819 modified |= MODIFIED_TIMEOUT; 1820 } 1821 if (strcmp(gres->hr_exec, res->hr_exec) != 0) { 1822 strlcpy(gres->hr_exec, res->hr_exec, sizeof(gres->hr_exec)); 1823 modified |= MODIFIED_EXEC; 1824 } 1825 /* 1826 * If only timeout was modified we only need to change it without 1827 * reconnecting. 1828 */ 1829 if (modified == MODIFIED_TIMEOUT) { 1830 for (ii = 0; ii < ncomps; ii++) { 1831 if (!ISREMOTE(ii)) 1832 continue; 1833 rw_rlock(&hio_remote_lock[ii]); 1834 if (!ISCONNECTED(gres, ii)) { 1835 rw_unlock(&hio_remote_lock[ii]); 1836 continue; 1837 } 1838 rw_unlock(&hio_remote_lock[ii]); 1839 if (proto_timeout(gres->hr_remotein, 1840 gres->hr_timeout) < 0) { 1841 pjdlog_errno(LOG_WARNING, 1842 "Unable to set connection timeout"); 1843 } 1844 if (proto_timeout(gres->hr_remoteout, 1845 gres->hr_timeout) < 0) { 1846 pjdlog_errno(LOG_WARNING, 1847 "Unable to set connection timeout"); 1848 } 1849 } 1850 } else if ((modified & 1851 (MODIFIED_REMOTEADDR | MODIFIED_REPLICATION)) != 0) { 1852 for (ii = 0; ii < ncomps; ii++) { 1853 if (!ISREMOTE(ii)) 1854 continue; 1855 remote_close(gres, ii); 1856 } 1857 if (modified & MODIFIED_REMOTEADDR) { 1858 strlcpy(gres->hr_remoteaddr, res->hr_remoteaddr, 1859 sizeof(gres->hr_remoteaddr)); 1860 } 1861 } 1862#undef MODIFIED_REMOTEADDR 1863#undef MODIFIED_REPLICATION 1864#undef MODIFIED_TIMEOUT 1865#undef MODIFIED_EXEC 1866 1867 pjdlog_info("Configuration reloaded successfully."); 1868 return; 1869failed: 1870 if (newcfg != NULL) { 1871 if (newcfg->hc_controlconn != NULL) 1872 proto_close(newcfg->hc_controlconn); 1873 if (newcfg->hc_listenconn != NULL) 1874 proto_close(newcfg->hc_listenconn); 1875 yy_config_free(newcfg); 1876 } 1877 pjdlog_warning("Configuration not reloaded."); 1878} 1879 1880static void 1881keepalive_send(struct hast_resource *res, unsigned int ncomp) 1882{ 1883 struct nv *nv; 1884 1885 nv = nv_alloc(); 1886 nv_add_uint8(nv, HIO_KEEPALIVE, "cmd"); 1887 if (nv_error(nv) != 0) { 1888 nv_free(nv); 1889 pjdlog_debug(1, 1890 "keepalive_send: Unable to prepare header to send."); 1891 return; 1892 } 1893 if (hast_proto_send(res, res->hr_remoteout, nv, NULL, 0) < 0) { 1894 pjdlog_common(LOG_DEBUG, 1, errno, 1895 "keepalive_send: Unable to send request"); 1896 nv_free(nv); 1897 rw_unlock(&hio_remote_lock[ncomp]); 1898 remote_close(res, ncomp); 1899 rw_rlock(&hio_remote_lock[ncomp]); 1900 return; 1901 } 1902 nv_free(nv); 1903 pjdlog_debug(2, "keepalive_send: Request sent."); 1904} 1905 1906/* 1907 * Thread guards remote connections and reconnects when needed, handles 1908 * signals, etc. 1909 */ 1910static void * 1911guard_thread(void *arg) 1912{ 1913 struct hast_resource *res = arg; 1914 struct proto_conn *in, *out; 1915 unsigned int ii, ncomps; 1916 int timeout; 1917 1918 ncomps = HAST_NCOMPONENTS; 1919 1920 for (;;) { 1921 if (sigexit_received) { 1922 primary_exitx(EX_OK, 1923 "Termination signal received, exiting."); 1924 } 1925 if (sighup_received) { 1926 sighup_received = false; 1927 config_reload(); 1928 } 1929 hook_check(sigchld_received); 1930 if (sigchld_received) 1931 sigchld_received = false; 1932 1933 timeout = KEEPALIVE_SLEEP; 1934 pjdlog_debug(2, "remote_guard: Checking connections."); 1935 mtx_lock(&hio_guard_lock); 1936 for (ii = 0; ii < ncomps; ii++) { 1937 if (!ISREMOTE(ii)) 1938 continue; 1939 rw_rlock(&hio_remote_lock[ii]); 1940 if (ISCONNECTED(res, ii)) { 1941 assert(res->hr_remotein != NULL); 1942 assert(res->hr_remoteout != NULL); 1943 keepalive_send(res, ii); 1944 } 1945 if (ISCONNECTED(res, ii)) { 1946 assert(res->hr_remotein != NULL); 1947 assert(res->hr_remoteout != NULL); 1948 rw_unlock(&hio_remote_lock[ii]); 1949 pjdlog_debug(2, 1950 "remote_guard: Connection to %s is ok.", 1951 res->hr_remoteaddr); 1952 } else if (real_remote(res)) { 1953 assert(res->hr_remotein == NULL); 1954 assert(res->hr_remoteout == NULL); 1955 /* 1956 * Upgrade the lock. It doesn't have to be 1957 * atomic as no other thread can change 1958 * connection status from disconnected to 1959 * connected. 1960 */ 1961 rw_unlock(&hio_remote_lock[ii]); 1962 pjdlog_debug(2, 1963 "remote_guard: Reconnecting to %s.", 1964 res->hr_remoteaddr); 1965 in = out = NULL; 1966 if (init_remote(res, &in, &out)) { 1967 rw_wlock(&hio_remote_lock[ii]); 1968 assert(res->hr_remotein == NULL); 1969 assert(res->hr_remoteout == NULL); 1970 assert(in != NULL && out != NULL); 1971 res->hr_remotein = in; 1972 res->hr_remoteout = out; 1973 rw_unlock(&hio_remote_lock[ii]); 1974 pjdlog_info("Successfully reconnected to %s.", 1975 res->hr_remoteaddr); 1976 sync_start(); 1977 } else { 1978 /* Both connections should be NULL. */ 1979 assert(res->hr_remotein == NULL); 1980 assert(res->hr_remoteout == NULL); 1981 assert(in == NULL && out == NULL); 1982 pjdlog_debug(2, 1983 "remote_guard: Reconnect to %s failed.", 1984 res->hr_remoteaddr); 1985 timeout = RECONNECT_SLEEP; 1986 } 1987 } else { 1988 rw_unlock(&hio_remote_lock[ii]); 1989 } 1990 } 1991 /* Sleep only if a signal wasn't delivered in the meantime. */ 1992 if (!sigexit_received && !sighup_received && !sigchld_received) 1993 cv_timedwait(&hio_guard_cond, &hio_guard_lock, timeout); 1994 mtx_unlock(&hio_guard_lock); 1995 } 1996 /* NOTREACHED */ 1997 return (NULL); 1998} 1999