primary.c revision 213007
1/*- 2 * Copyright (c) 2009 The FreeBSD Foundation 3 * Copyright (c) 2010 Pawel Jakub Dawidek <pjd@FreeBSD.org> 4 * All rights reserved. 5 * 6 * This software was developed by Pawel Jakub Dawidek under sponsorship from 7 * the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31#include <sys/cdefs.h> 32__FBSDID("$FreeBSD: head/sbin/hastd/primary.c 213007 2010-09-22 19:03:11Z pjd $"); 33 34#include <sys/types.h> 35#include <sys/time.h> 36#include <sys/bio.h> 37#include <sys/disk.h> 38#include <sys/refcount.h> 39#include <sys/stat.h> 40 41#include <geom/gate/g_gate.h> 42 43#include <assert.h> 44#include <err.h> 45#include <errno.h> 46#include <fcntl.h> 47#include <libgeom.h> 48#include <pthread.h> 49#include <signal.h> 50#include <stdint.h> 51#include <stdio.h> 52#include <string.h> 53#include <sysexits.h> 54#include <unistd.h> 55 56#include <activemap.h> 57#include <nv.h> 58#include <rangelock.h> 59 60#include "control.h" 61#include "event.h" 62#include "hast.h" 63#include "hast_proto.h" 64#include "hastd.h" 65#include "hooks.h" 66#include "metadata.h" 67#include "proto.h" 68#include "pjdlog.h" 69#include "subr.h" 70#include "synch.h" 71 72/* The is only one remote component for now. */ 73#define ISREMOTE(no) ((no) == 1) 74 75struct hio { 76 /* 77 * Number of components we are still waiting for. 78 * When this field goes to 0, we can send the request back to the 79 * kernel. Each component has to decrease this counter by one 80 * even on failure. 81 */ 82 unsigned int hio_countdown; 83 /* 84 * Each component has a place to store its own error. 85 * Once the request is handled by all components we can decide if the 86 * request overall is successful or not. 87 */ 88 int *hio_errors; 89 /* 90 * Structure used to comunicate with GEOM Gate class. 91 */ 92 struct g_gate_ctl_io hio_ggio; 93 TAILQ_ENTRY(hio) *hio_next; 94}; 95#define hio_free_next hio_next[0] 96#define hio_done_next hio_next[0] 97 98/* 99 * Free list holds unused structures. When free list is empty, we have to wait 100 * until some in-progress requests are freed. 101 */ 102static TAILQ_HEAD(, hio) hio_free_list; 103static pthread_mutex_t hio_free_list_lock; 104static pthread_cond_t hio_free_list_cond; 105/* 106 * There is one send list for every component. One requests is placed on all 107 * send lists - each component gets the same request, but each component is 108 * responsible for managing his own send list. 109 */ 110static TAILQ_HEAD(, hio) *hio_send_list; 111static pthread_mutex_t *hio_send_list_lock; 112static pthread_cond_t *hio_send_list_cond; 113/* 114 * There is one recv list for every component, although local components don't 115 * use recv lists as local requests are done synchronously. 116 */ 117static TAILQ_HEAD(, hio) *hio_recv_list; 118static pthread_mutex_t *hio_recv_list_lock; 119static pthread_cond_t *hio_recv_list_cond; 120/* 121 * Request is placed on done list by the slowest component (the one that 122 * decreased hio_countdown from 1 to 0). 123 */ 124static TAILQ_HEAD(, hio) hio_done_list; 125static pthread_mutex_t hio_done_list_lock; 126static pthread_cond_t hio_done_list_cond; 127/* 128 * Structure below are for interaction with sync thread. 129 */ 130static bool sync_inprogress; 131static pthread_mutex_t sync_lock; 132static pthread_cond_t sync_cond; 133/* 134 * The lock below allows to synchornize access to remote connections. 135 */ 136static pthread_rwlock_t *hio_remote_lock; 137 138/* 139 * Lock to synchronize metadata updates. Also synchronize access to 140 * hr_primary_localcnt and hr_primary_remotecnt fields. 141 */ 142static pthread_mutex_t metadata_lock; 143 144/* 145 * Maximum number of outstanding I/O requests. 146 */ 147#define HAST_HIO_MAX 256 148/* 149 * Number of components. At this point there are only two components: local 150 * and remote, but in the future it might be possible to use multiple local 151 * and remote components. 152 */ 153#define HAST_NCOMPONENTS 2 154/* 155 * Number of seconds to sleep between reconnect retries or keepalive packets. 156 */ 157#define RETRY_SLEEP 10 158 159#define ISCONNECTED(res, no) \ 160 ((res)->hr_remotein != NULL && (res)->hr_remoteout != NULL) 161 162#define QUEUE_INSERT1(hio, name, ncomp) do { \ 163 bool _wakeup; \ 164 \ 165 mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 166 _wakeup = TAILQ_EMPTY(&hio_##name##_list[(ncomp)]); \ 167 TAILQ_INSERT_TAIL(&hio_##name##_list[(ncomp)], (hio), \ 168 hio_next[(ncomp)]); \ 169 mtx_unlock(&hio_##name##_list_lock[ncomp]); \ 170 if (_wakeup) \ 171 cv_signal(&hio_##name##_list_cond[(ncomp)]); \ 172} while (0) 173#define QUEUE_INSERT2(hio, name) do { \ 174 bool _wakeup; \ 175 \ 176 mtx_lock(&hio_##name##_list_lock); \ 177 _wakeup = TAILQ_EMPTY(&hio_##name##_list); \ 178 TAILQ_INSERT_TAIL(&hio_##name##_list, (hio), hio_##name##_next);\ 179 mtx_unlock(&hio_##name##_list_lock); \ 180 if (_wakeup) \ 181 cv_signal(&hio_##name##_list_cond); \ 182} while (0) 183#define QUEUE_TAKE1(hio, name, ncomp) do { \ 184 mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 185 while (((hio) = TAILQ_FIRST(&hio_##name##_list[(ncomp)])) == NULL) { \ 186 cv_wait(&hio_##name##_list_cond[(ncomp)], \ 187 &hio_##name##_list_lock[(ncomp)]); \ 188 } \ 189 TAILQ_REMOVE(&hio_##name##_list[(ncomp)], (hio), \ 190 hio_next[(ncomp)]); \ 191 mtx_unlock(&hio_##name##_list_lock[(ncomp)]); \ 192} while (0) 193#define QUEUE_TAKE2(hio, name) do { \ 194 mtx_lock(&hio_##name##_list_lock); \ 195 while (((hio) = TAILQ_FIRST(&hio_##name##_list)) == NULL) { \ 196 cv_wait(&hio_##name##_list_cond, \ 197 &hio_##name##_list_lock); \ 198 } \ 199 TAILQ_REMOVE(&hio_##name##_list, (hio), hio_##name##_next); \ 200 mtx_unlock(&hio_##name##_list_lock); \ 201} while (0) 202 203#define SYNCREQ(hio) do { \ 204 (hio)->hio_ggio.gctl_unit = -1; \ 205 (hio)->hio_ggio.gctl_seq = 1; \ 206} while (0) 207#define ISSYNCREQ(hio) ((hio)->hio_ggio.gctl_unit == -1) 208#define SYNCREQDONE(hio) do { (hio)->hio_ggio.gctl_unit = -2; } while (0) 209#define ISSYNCREQDONE(hio) ((hio)->hio_ggio.gctl_unit == -2) 210 211static struct hast_resource *gres; 212 213static pthread_mutex_t range_lock; 214static struct rangelocks *range_regular; 215static bool range_regular_wait; 216static pthread_cond_t range_regular_cond; 217static struct rangelocks *range_sync; 218static bool range_sync_wait; 219static pthread_cond_t range_sync_cond; 220 221static void *ggate_recv_thread(void *arg); 222static void *local_send_thread(void *arg); 223static void *remote_send_thread(void *arg); 224static void *remote_recv_thread(void *arg); 225static void *ggate_send_thread(void *arg); 226static void *sync_thread(void *arg); 227static void *guard_thread(void *arg); 228 229static void 230cleanup(struct hast_resource *res) 231{ 232 int rerrno; 233 234 /* Remember errno. */ 235 rerrno = errno; 236 237 /* 238 * Close descriptor to /dev/hast/<name> 239 * to work-around race in the kernel. 240 */ 241 close(res->hr_localfd); 242 243 /* Destroy ggate provider if we created one. */ 244 if (res->hr_ggateunit >= 0) { 245 struct g_gate_ctl_destroy ggiod; 246 247 ggiod.gctl_version = G_GATE_VERSION; 248 ggiod.gctl_unit = res->hr_ggateunit; 249 ggiod.gctl_force = 1; 250 if (ioctl(res->hr_ggatefd, G_GATE_CMD_DESTROY, &ggiod) < 0) { 251 pjdlog_warning("Unable to destroy hast/%s device", 252 res->hr_provname); 253 } 254 res->hr_ggateunit = -1; 255 } 256 257 /* Restore errno. */ 258 errno = rerrno; 259} 260 261static __dead2 void 262primary_exit(int exitcode, const char *fmt, ...) 263{ 264 va_list ap; 265 266 assert(exitcode != EX_OK); 267 va_start(ap, fmt); 268 pjdlogv_errno(LOG_ERR, fmt, ap); 269 va_end(ap); 270 cleanup(gres); 271 exit(exitcode); 272} 273 274static __dead2 void 275primary_exitx(int exitcode, const char *fmt, ...) 276{ 277 va_list ap; 278 279 va_start(ap, fmt); 280 pjdlogv(exitcode == EX_OK ? LOG_INFO : LOG_ERR, fmt, ap); 281 va_end(ap); 282 cleanup(gres); 283 exit(exitcode); 284} 285 286static int 287hast_activemap_flush(struct hast_resource *res) 288{ 289 const unsigned char *buf; 290 size_t size; 291 292 buf = activemap_bitmap(res->hr_amp, &size); 293 assert(buf != NULL); 294 assert((size % res->hr_local_sectorsize) == 0); 295 if (pwrite(res->hr_localfd, buf, size, METADATA_SIZE) != 296 (ssize_t)size) { 297 KEEP_ERRNO(pjdlog_errno(LOG_ERR, 298 "Unable to flush activemap to disk")); 299 return (-1); 300 } 301 return (0); 302} 303 304static bool 305real_remote(const struct hast_resource *res) 306{ 307 308 return (strcmp(res->hr_remoteaddr, "none") != 0); 309} 310 311static void 312init_environment(struct hast_resource *res __unused) 313{ 314 struct hio *hio; 315 unsigned int ii, ncomps; 316 sigset_t mask; 317 318 /* 319 * In the future it might be per-resource value. 320 */ 321 ncomps = HAST_NCOMPONENTS; 322 323 /* 324 * Allocate memory needed by lists. 325 */ 326 hio_send_list = malloc(sizeof(hio_send_list[0]) * ncomps); 327 if (hio_send_list == NULL) { 328 primary_exitx(EX_TEMPFAIL, 329 "Unable to allocate %zu bytes of memory for send lists.", 330 sizeof(hio_send_list[0]) * ncomps); 331 } 332 hio_send_list_lock = malloc(sizeof(hio_send_list_lock[0]) * ncomps); 333 if (hio_send_list_lock == NULL) { 334 primary_exitx(EX_TEMPFAIL, 335 "Unable to allocate %zu bytes of memory for send list locks.", 336 sizeof(hio_send_list_lock[0]) * ncomps); 337 } 338 hio_send_list_cond = malloc(sizeof(hio_send_list_cond[0]) * ncomps); 339 if (hio_send_list_cond == NULL) { 340 primary_exitx(EX_TEMPFAIL, 341 "Unable to allocate %zu bytes of memory for send list condition variables.", 342 sizeof(hio_send_list_cond[0]) * ncomps); 343 } 344 hio_recv_list = malloc(sizeof(hio_recv_list[0]) * ncomps); 345 if (hio_recv_list == NULL) { 346 primary_exitx(EX_TEMPFAIL, 347 "Unable to allocate %zu bytes of memory for recv lists.", 348 sizeof(hio_recv_list[0]) * ncomps); 349 } 350 hio_recv_list_lock = malloc(sizeof(hio_recv_list_lock[0]) * ncomps); 351 if (hio_recv_list_lock == NULL) { 352 primary_exitx(EX_TEMPFAIL, 353 "Unable to allocate %zu bytes of memory for recv list locks.", 354 sizeof(hio_recv_list_lock[0]) * ncomps); 355 } 356 hio_recv_list_cond = malloc(sizeof(hio_recv_list_cond[0]) * ncomps); 357 if (hio_recv_list_cond == NULL) { 358 primary_exitx(EX_TEMPFAIL, 359 "Unable to allocate %zu bytes of memory for recv list condition variables.", 360 sizeof(hio_recv_list_cond[0]) * ncomps); 361 } 362 hio_remote_lock = malloc(sizeof(hio_remote_lock[0]) * ncomps); 363 if (hio_remote_lock == NULL) { 364 primary_exitx(EX_TEMPFAIL, 365 "Unable to allocate %zu bytes of memory for remote connections locks.", 366 sizeof(hio_remote_lock[0]) * ncomps); 367 } 368 369 /* 370 * Initialize lists, their locks and theirs condition variables. 371 */ 372 TAILQ_INIT(&hio_free_list); 373 mtx_init(&hio_free_list_lock); 374 cv_init(&hio_free_list_cond); 375 for (ii = 0; ii < HAST_NCOMPONENTS; ii++) { 376 TAILQ_INIT(&hio_send_list[ii]); 377 mtx_init(&hio_send_list_lock[ii]); 378 cv_init(&hio_send_list_cond[ii]); 379 TAILQ_INIT(&hio_recv_list[ii]); 380 mtx_init(&hio_recv_list_lock[ii]); 381 cv_init(&hio_recv_list_cond[ii]); 382 rw_init(&hio_remote_lock[ii]); 383 } 384 TAILQ_INIT(&hio_done_list); 385 mtx_init(&hio_done_list_lock); 386 cv_init(&hio_done_list_cond); 387 mtx_init(&metadata_lock); 388 389 /* 390 * Allocate requests pool and initialize requests. 391 */ 392 for (ii = 0; ii < HAST_HIO_MAX; ii++) { 393 hio = malloc(sizeof(*hio)); 394 if (hio == NULL) { 395 primary_exitx(EX_TEMPFAIL, 396 "Unable to allocate %zu bytes of memory for hio request.", 397 sizeof(*hio)); 398 } 399 hio->hio_countdown = 0; 400 hio->hio_errors = malloc(sizeof(hio->hio_errors[0]) * ncomps); 401 if (hio->hio_errors == NULL) { 402 primary_exitx(EX_TEMPFAIL, 403 "Unable allocate %zu bytes of memory for hio errors.", 404 sizeof(hio->hio_errors[0]) * ncomps); 405 } 406 hio->hio_next = malloc(sizeof(hio->hio_next[0]) * ncomps); 407 if (hio->hio_next == NULL) { 408 primary_exitx(EX_TEMPFAIL, 409 "Unable allocate %zu bytes of memory for hio_next field.", 410 sizeof(hio->hio_next[0]) * ncomps); 411 } 412 hio->hio_ggio.gctl_version = G_GATE_VERSION; 413 hio->hio_ggio.gctl_data = malloc(MAXPHYS); 414 if (hio->hio_ggio.gctl_data == NULL) { 415 primary_exitx(EX_TEMPFAIL, 416 "Unable to allocate %zu bytes of memory for gctl_data.", 417 MAXPHYS); 418 } 419 hio->hio_ggio.gctl_length = MAXPHYS; 420 hio->hio_ggio.gctl_error = 0; 421 TAILQ_INSERT_HEAD(&hio_free_list, hio, hio_free_next); 422 } 423 424 /* 425 * Turn on signals handling. 426 */ 427 PJDLOG_VERIFY(sigemptyset(&mask) == 0); 428 PJDLOG_VERIFY(sigaddset(&mask, SIGHUP) == 0); 429 PJDLOG_VERIFY(sigaddset(&mask, SIGINT) == 0); 430 PJDLOG_VERIFY(sigaddset(&mask, SIGTERM) == 0); 431 PJDLOG_VERIFY(sigprocmask(SIG_SETMASK, &mask, NULL) == 0); 432} 433 434static void 435init_local(struct hast_resource *res) 436{ 437 unsigned char *buf; 438 size_t mapsize; 439 440 if (metadata_read(res, true) < 0) 441 exit(EX_NOINPUT); 442 mtx_init(&res->hr_amp_lock); 443 if (activemap_init(&res->hr_amp, res->hr_datasize, res->hr_extentsize, 444 res->hr_local_sectorsize, res->hr_keepdirty) < 0) { 445 primary_exit(EX_TEMPFAIL, "Unable to create activemap"); 446 } 447 mtx_init(&range_lock); 448 cv_init(&range_regular_cond); 449 if (rangelock_init(&range_regular) < 0) 450 primary_exit(EX_TEMPFAIL, "Unable to create regular range lock"); 451 cv_init(&range_sync_cond); 452 if (rangelock_init(&range_sync) < 0) 453 primary_exit(EX_TEMPFAIL, "Unable to create sync range lock"); 454 mapsize = activemap_ondisk_size(res->hr_amp); 455 buf = calloc(1, mapsize); 456 if (buf == NULL) { 457 primary_exitx(EX_TEMPFAIL, 458 "Unable to allocate buffer for activemap."); 459 } 460 if (pread(res->hr_localfd, buf, mapsize, METADATA_SIZE) != 461 (ssize_t)mapsize) { 462 primary_exit(EX_NOINPUT, "Unable to read activemap"); 463 } 464 activemap_copyin(res->hr_amp, buf, mapsize); 465 free(buf); 466 if (res->hr_resuid != 0) 467 return; 468 /* 469 * We're using provider for the first time, so we have to generate 470 * resource unique identifier and initialize local and remote counts. 471 */ 472 arc4random_buf(&res->hr_resuid, sizeof(res->hr_resuid)); 473 res->hr_primary_localcnt = 1; 474 res->hr_primary_remotecnt = 0; 475 if (metadata_write(res) < 0) 476 exit(EX_NOINPUT); 477} 478 479static bool 480init_remote(struct hast_resource *res, struct proto_conn **inp, 481 struct proto_conn **outp) 482{ 483 struct proto_conn *in, *out; 484 struct nv *nvout, *nvin; 485 const unsigned char *token; 486 unsigned char *map; 487 const char *errmsg; 488 int32_t extentsize; 489 int64_t datasize; 490 uint32_t mapsize; 491 size_t size; 492 493 assert((inp == NULL && outp == NULL) || (inp != NULL && outp != NULL)); 494 assert(real_remote(res)); 495 496 in = out = NULL; 497 errmsg = NULL; 498 499 /* Prepare outgoing connection with remote node. */ 500 if (proto_client(res->hr_remoteaddr, &out) < 0) { 501 primary_exit(EX_TEMPFAIL, "Unable to create connection to %s", 502 res->hr_remoteaddr); 503 } 504 /* Try to connect, but accept failure. */ 505 if (proto_connect(out) < 0) { 506 pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 507 res->hr_remoteaddr); 508 goto close; 509 } 510 /* Error in setting timeout is not critical, but why should it fail? */ 511 if (proto_timeout(out, res->hr_timeout) < 0) 512 pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 513 /* 514 * First handshake step. 515 * Setup outgoing connection with remote node. 516 */ 517 nvout = nv_alloc(); 518 nv_add_string(nvout, res->hr_name, "resource"); 519 if (nv_error(nvout) != 0) { 520 pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 521 "Unable to allocate header for connection with %s", 522 res->hr_remoteaddr); 523 nv_free(nvout); 524 goto close; 525 } 526 if (hast_proto_send(res, out, nvout, NULL, 0) < 0) { 527 pjdlog_errno(LOG_WARNING, 528 "Unable to send handshake header to %s", 529 res->hr_remoteaddr); 530 nv_free(nvout); 531 goto close; 532 } 533 nv_free(nvout); 534 if (hast_proto_recv_hdr(out, &nvin) < 0) { 535 pjdlog_errno(LOG_WARNING, 536 "Unable to receive handshake header from %s", 537 res->hr_remoteaddr); 538 goto close; 539 } 540 errmsg = nv_get_string(nvin, "errmsg"); 541 if (errmsg != NULL) { 542 pjdlog_warning("%s", errmsg); 543 nv_free(nvin); 544 goto close; 545 } 546 token = nv_get_uint8_array(nvin, &size, "token"); 547 if (token == NULL) { 548 pjdlog_warning("Handshake header from %s has no 'token' field.", 549 res->hr_remoteaddr); 550 nv_free(nvin); 551 goto close; 552 } 553 if (size != sizeof(res->hr_token)) { 554 pjdlog_warning("Handshake header from %s contains 'token' of wrong size (got %zu, expected %zu).", 555 res->hr_remoteaddr, size, sizeof(res->hr_token)); 556 nv_free(nvin); 557 goto close; 558 } 559 bcopy(token, res->hr_token, sizeof(res->hr_token)); 560 nv_free(nvin); 561 562 /* 563 * Second handshake step. 564 * Setup incoming connection with remote node. 565 */ 566 if (proto_client(res->hr_remoteaddr, &in) < 0) { 567 pjdlog_errno(LOG_WARNING, "Unable to create connection to %s", 568 res->hr_remoteaddr); 569 } 570 /* Try to connect, but accept failure. */ 571 if (proto_connect(in) < 0) { 572 pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 573 res->hr_remoteaddr); 574 goto close; 575 } 576 /* Error in setting timeout is not critical, but why should it fail? */ 577 if (proto_timeout(in, res->hr_timeout) < 0) 578 pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 579 nvout = nv_alloc(); 580 nv_add_string(nvout, res->hr_name, "resource"); 581 nv_add_uint8_array(nvout, res->hr_token, sizeof(res->hr_token), 582 "token"); 583 nv_add_uint64(nvout, res->hr_resuid, "resuid"); 584 nv_add_uint64(nvout, res->hr_primary_localcnt, "localcnt"); 585 nv_add_uint64(nvout, res->hr_primary_remotecnt, "remotecnt"); 586 if (nv_error(nvout) != 0) { 587 pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 588 "Unable to allocate header for connection with %s", 589 res->hr_remoteaddr); 590 nv_free(nvout); 591 goto close; 592 } 593 if (hast_proto_send(res, in, nvout, NULL, 0) < 0) { 594 pjdlog_errno(LOG_WARNING, 595 "Unable to send handshake header to %s", 596 res->hr_remoteaddr); 597 nv_free(nvout); 598 goto close; 599 } 600 nv_free(nvout); 601 if (hast_proto_recv_hdr(out, &nvin) < 0) { 602 pjdlog_errno(LOG_WARNING, 603 "Unable to receive handshake header from %s", 604 res->hr_remoteaddr); 605 goto close; 606 } 607 errmsg = nv_get_string(nvin, "errmsg"); 608 if (errmsg != NULL) { 609 pjdlog_warning("%s", errmsg); 610 nv_free(nvin); 611 goto close; 612 } 613 datasize = nv_get_int64(nvin, "datasize"); 614 if (datasize != res->hr_datasize) { 615 pjdlog_warning("Data size differs between nodes (local=%jd, remote=%jd).", 616 (intmax_t)res->hr_datasize, (intmax_t)datasize); 617 nv_free(nvin); 618 goto close; 619 } 620 extentsize = nv_get_int32(nvin, "extentsize"); 621 if (extentsize != res->hr_extentsize) { 622 pjdlog_warning("Extent size differs between nodes (local=%zd, remote=%zd).", 623 (ssize_t)res->hr_extentsize, (ssize_t)extentsize); 624 nv_free(nvin); 625 goto close; 626 } 627 res->hr_secondary_localcnt = nv_get_uint64(nvin, "localcnt"); 628 res->hr_secondary_remotecnt = nv_get_uint64(nvin, "remotecnt"); 629 res->hr_syncsrc = nv_get_uint8(nvin, "syncsrc"); 630 map = NULL; 631 mapsize = nv_get_uint32(nvin, "mapsize"); 632 if (mapsize > 0) { 633 map = malloc(mapsize); 634 if (map == NULL) { 635 pjdlog_error("Unable to allocate memory for remote activemap (mapsize=%ju).", 636 (uintmax_t)mapsize); 637 nv_free(nvin); 638 goto close; 639 } 640 /* 641 * Remote node have some dirty extents on its own, lets 642 * download its activemap. 643 */ 644 if (hast_proto_recv_data(res, out, nvin, map, 645 mapsize) < 0) { 646 pjdlog_errno(LOG_ERR, 647 "Unable to receive remote activemap"); 648 nv_free(nvin); 649 free(map); 650 goto close; 651 } 652 /* 653 * Merge local and remote bitmaps. 654 */ 655 activemap_merge(res->hr_amp, map, mapsize); 656 free(map); 657 /* 658 * Now that we merged bitmaps from both nodes, flush it to the 659 * disk before we start to synchronize. 660 */ 661 (void)hast_activemap_flush(res); 662 } 663 pjdlog_info("Connected to %s.", res->hr_remoteaddr); 664 if (inp != NULL && outp != NULL) { 665 *inp = in; 666 *outp = out; 667 } else { 668 res->hr_remotein = in; 669 res->hr_remoteout = out; 670 } 671 event_send(res, EVENT_CONNECT); 672 return (true); 673close: 674 if (errmsg != NULL && strcmp(errmsg, "Split-brain condition!") == 0) 675 event_send(res, EVENT_SPLITBRAIN); 676 proto_close(out); 677 if (in != NULL) 678 proto_close(in); 679 return (false); 680} 681 682static void 683sync_start(void) 684{ 685 686 mtx_lock(&sync_lock); 687 sync_inprogress = true; 688 mtx_unlock(&sync_lock); 689 cv_signal(&sync_cond); 690} 691 692static void 693sync_stop(void) 694{ 695 696 mtx_lock(&sync_lock); 697 if (sync_inprogress) 698 sync_inprogress = false; 699 mtx_unlock(&sync_lock); 700} 701 702static void 703init_ggate(struct hast_resource *res) 704{ 705 struct g_gate_ctl_create ggiocreate; 706 struct g_gate_ctl_cancel ggiocancel; 707 708 /* 709 * We communicate with ggate via /dev/ggctl. Open it. 710 */ 711 res->hr_ggatefd = open("/dev/" G_GATE_CTL_NAME, O_RDWR); 712 if (res->hr_ggatefd < 0) 713 primary_exit(EX_OSFILE, "Unable to open /dev/" G_GATE_CTL_NAME); 714 /* 715 * Create provider before trying to connect, as connection failure 716 * is not critical, but may take some time. 717 */ 718 ggiocreate.gctl_version = G_GATE_VERSION; 719 ggiocreate.gctl_mediasize = res->hr_datasize; 720 ggiocreate.gctl_sectorsize = res->hr_local_sectorsize; 721 ggiocreate.gctl_flags = 0; 722 ggiocreate.gctl_maxcount = G_GATE_MAX_QUEUE_SIZE; 723 ggiocreate.gctl_timeout = 0; 724 ggiocreate.gctl_unit = G_GATE_NAME_GIVEN; 725 snprintf(ggiocreate.gctl_name, sizeof(ggiocreate.gctl_name), "hast/%s", 726 res->hr_provname); 727 bzero(ggiocreate.gctl_info, sizeof(ggiocreate.gctl_info)); 728 if (ioctl(res->hr_ggatefd, G_GATE_CMD_CREATE, &ggiocreate) == 0) { 729 pjdlog_info("Device hast/%s created.", res->hr_provname); 730 res->hr_ggateunit = ggiocreate.gctl_unit; 731 return; 732 } 733 if (errno != EEXIST) { 734 primary_exit(EX_OSERR, "Unable to create hast/%s device", 735 res->hr_provname); 736 } 737 pjdlog_debug(1, 738 "Device hast/%s already exists, we will try to take it over.", 739 res->hr_provname); 740 /* 741 * If we received EEXIST, we assume that the process who created the 742 * provider died and didn't clean up. In that case we will start from 743 * where he left of. 744 */ 745 ggiocancel.gctl_version = G_GATE_VERSION; 746 ggiocancel.gctl_unit = G_GATE_NAME_GIVEN; 747 snprintf(ggiocancel.gctl_name, sizeof(ggiocancel.gctl_name), "hast/%s", 748 res->hr_provname); 749 if (ioctl(res->hr_ggatefd, G_GATE_CMD_CANCEL, &ggiocancel) == 0) { 750 pjdlog_info("Device hast/%s recovered.", res->hr_provname); 751 res->hr_ggateunit = ggiocancel.gctl_unit; 752 return; 753 } 754 primary_exit(EX_OSERR, "Unable to take over hast/%s device", 755 res->hr_provname); 756} 757 758void 759hastd_primary(struct hast_resource *res) 760{ 761 pthread_t td; 762 pid_t pid; 763 int error; 764 765 /* 766 * Create communication channel between parent and child. 767 */ 768 if (proto_client("socketpair://", &res->hr_ctrl) < 0) { 769 KEEP_ERRNO((void)pidfile_remove(pfh)); 770 pjdlog_exit(EX_OSERR, 771 "Unable to create control sockets between parent and child"); 772 } 773 /* 774 * Create communication channel between child and parent. 775 */ 776 if (proto_client("socketpair://", &res->hr_event) < 0) { 777 KEEP_ERRNO((void)pidfile_remove(pfh)); 778 pjdlog_exit(EX_OSERR, 779 "Unable to create event sockets between child and parent"); 780 } 781 782 pid = fork(); 783 if (pid < 0) { 784 KEEP_ERRNO((void)pidfile_remove(pfh)); 785 pjdlog_exit(EX_TEMPFAIL, "Unable to fork"); 786 } 787 788 if (pid > 0) { 789 /* This is parent. */ 790 /* Declare that we are receiver. */ 791 proto_recv(res->hr_event, NULL, 0); 792 res->hr_workerpid = pid; 793 return; 794 } 795 796 gres = res; 797 798 (void)pidfile_close(pfh); 799 hook_fini(); 800 801 setproctitle("%s (primary)", res->hr_name); 802 803 signal(SIGHUP, SIG_DFL); 804 signal(SIGCHLD, SIG_DFL); 805 806 /* Declare that we are sender. */ 807 proto_send(res->hr_event, NULL, 0); 808 809 init_local(res); 810 init_ggate(res); 811 init_environment(res); 812 /* 813 * Create the control thread before sending any event to the parent, 814 * as we can deadlock when parent sends control request to worker, 815 * but worker has no control thread started yet, so parent waits. 816 * In the meantime worker sends an event to the parent, but parent 817 * is unable to handle the event, because it waits for control 818 * request response. 819 */ 820 error = pthread_create(&td, NULL, ctrl_thread, res); 821 assert(error == 0); 822 if (real_remote(res) && init_remote(res, NULL, NULL)) 823 sync_start(); 824 error = pthread_create(&td, NULL, ggate_recv_thread, res); 825 assert(error == 0); 826 error = pthread_create(&td, NULL, local_send_thread, res); 827 assert(error == 0); 828 error = pthread_create(&td, NULL, remote_send_thread, res); 829 assert(error == 0); 830 error = pthread_create(&td, NULL, remote_recv_thread, res); 831 assert(error == 0); 832 error = pthread_create(&td, NULL, ggate_send_thread, res); 833 assert(error == 0); 834 error = pthread_create(&td, NULL, sync_thread, res); 835 assert(error == 0); 836 (void)guard_thread(res); 837} 838 839static void 840reqlog(int loglevel, int debuglevel, struct g_gate_ctl_io *ggio, const char *fmt, ...) 841{ 842 char msg[1024]; 843 va_list ap; 844 int len; 845 846 va_start(ap, fmt); 847 len = vsnprintf(msg, sizeof(msg), fmt, ap); 848 va_end(ap); 849 if ((size_t)len < sizeof(msg)) { 850 switch (ggio->gctl_cmd) { 851 case BIO_READ: 852 (void)snprintf(msg + len, sizeof(msg) - len, 853 "READ(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 854 (uintmax_t)ggio->gctl_length); 855 break; 856 case BIO_DELETE: 857 (void)snprintf(msg + len, sizeof(msg) - len, 858 "DELETE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 859 (uintmax_t)ggio->gctl_length); 860 break; 861 case BIO_FLUSH: 862 (void)snprintf(msg + len, sizeof(msg) - len, "FLUSH."); 863 break; 864 case BIO_WRITE: 865 (void)snprintf(msg + len, sizeof(msg) - len, 866 "WRITE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 867 (uintmax_t)ggio->gctl_length); 868 break; 869 default: 870 (void)snprintf(msg + len, sizeof(msg) - len, 871 "UNKNOWN(%u).", (unsigned int)ggio->gctl_cmd); 872 break; 873 } 874 } 875 pjdlog_common(loglevel, debuglevel, -1, "%s", msg); 876} 877 878static void 879remote_close(struct hast_resource *res, int ncomp) 880{ 881 882 rw_wlock(&hio_remote_lock[ncomp]); 883 /* 884 * A race is possible between dropping rlock and acquiring wlock - 885 * another thread can close connection in-between. 886 */ 887 if (!ISCONNECTED(res, ncomp)) { 888 assert(res->hr_remotein == NULL); 889 assert(res->hr_remoteout == NULL); 890 rw_unlock(&hio_remote_lock[ncomp]); 891 return; 892 } 893 894 assert(res->hr_remotein != NULL); 895 assert(res->hr_remoteout != NULL); 896 897 pjdlog_debug(2, "Closing incoming connection to %s.", 898 res->hr_remoteaddr); 899 proto_close(res->hr_remotein); 900 res->hr_remotein = NULL; 901 pjdlog_debug(2, "Closing outgoing connection to %s.", 902 res->hr_remoteaddr); 903 proto_close(res->hr_remoteout); 904 res->hr_remoteout = NULL; 905 906 rw_unlock(&hio_remote_lock[ncomp]); 907 908 pjdlog_warning("Disconnected from %s.", res->hr_remoteaddr); 909 910 /* 911 * Stop synchronization if in-progress. 912 */ 913 sync_stop(); 914 915 event_send(res, EVENT_DISCONNECT); 916} 917 918/* 919 * Thread receives ggate I/O requests from the kernel and passes them to 920 * appropriate threads: 921 * WRITE - always goes to both local_send and remote_send threads 922 * READ (when the block is up-to-date on local component) - 923 * only local_send thread 924 * READ (when the block isn't up-to-date on local component) - 925 * only remote_send thread 926 * DELETE - always goes to both local_send and remote_send threads 927 * FLUSH - always goes to both local_send and remote_send threads 928 */ 929static void * 930ggate_recv_thread(void *arg) 931{ 932 struct hast_resource *res = arg; 933 struct g_gate_ctl_io *ggio; 934 struct hio *hio; 935 unsigned int ii, ncomp, ncomps; 936 int error; 937 938 ncomps = HAST_NCOMPONENTS; 939 940 for (;;) { 941 pjdlog_debug(2, "ggate_recv: Taking free request."); 942 QUEUE_TAKE2(hio, free); 943 pjdlog_debug(2, "ggate_recv: (%p) Got free request.", hio); 944 ggio = &hio->hio_ggio; 945 ggio->gctl_unit = res->hr_ggateunit; 946 ggio->gctl_length = MAXPHYS; 947 ggio->gctl_error = 0; 948 pjdlog_debug(2, 949 "ggate_recv: (%p) Waiting for request from the kernel.", 950 hio); 951 if (ioctl(res->hr_ggatefd, G_GATE_CMD_START, ggio) < 0) { 952 if (sigexit_received) 953 pthread_exit(NULL); 954 primary_exit(EX_OSERR, "G_GATE_CMD_START failed"); 955 } 956 error = ggio->gctl_error; 957 switch (error) { 958 case 0: 959 break; 960 case ECANCELED: 961 /* Exit gracefully. */ 962 if (!sigexit_received) { 963 pjdlog_debug(2, 964 "ggate_recv: (%p) Received cancel from the kernel.", 965 hio); 966 pjdlog_info("Received cancel from the kernel, exiting."); 967 } 968 pthread_exit(NULL); 969 case ENOMEM: 970 /* 971 * Buffer too small? Impossible, we allocate MAXPHYS 972 * bytes - request can't be bigger than that. 973 */ 974 /* FALLTHROUGH */ 975 case ENXIO: 976 default: 977 primary_exitx(EX_OSERR, "G_GATE_CMD_START failed: %s.", 978 strerror(error)); 979 } 980 for (ii = 0; ii < ncomps; ii++) 981 hio->hio_errors[ii] = EINVAL; 982 reqlog(LOG_DEBUG, 2, ggio, 983 "ggate_recv: (%p) Request received from the kernel: ", 984 hio); 985 /* 986 * Inform all components about new write request. 987 * For read request prefer local component unless the given 988 * range is out-of-date, then use remote component. 989 */ 990 switch (ggio->gctl_cmd) { 991 case BIO_READ: 992 pjdlog_debug(2, 993 "ggate_recv: (%p) Moving request to the send queue.", 994 hio); 995 refcount_init(&hio->hio_countdown, 1); 996 mtx_lock(&metadata_lock); 997 if (res->hr_syncsrc == HAST_SYNCSRC_UNDEF || 998 res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 999 /* 1000 * This range is up-to-date on local component, 1001 * so handle request locally. 1002 */ 1003 /* Local component is 0 for now. */ 1004 ncomp = 0; 1005 } else /* if (res->hr_syncsrc == 1006 HAST_SYNCSRC_SECONDARY) */ { 1007 assert(res->hr_syncsrc == 1008 HAST_SYNCSRC_SECONDARY); 1009 /* 1010 * This range is out-of-date on local component, 1011 * so send request to the remote node. 1012 */ 1013 /* Remote component is 1 for now. */ 1014 ncomp = 1; 1015 } 1016 mtx_unlock(&metadata_lock); 1017 QUEUE_INSERT1(hio, send, ncomp); 1018 break; 1019 case BIO_WRITE: 1020 for (;;) { 1021 mtx_lock(&range_lock); 1022 if (rangelock_islocked(range_sync, 1023 ggio->gctl_offset, ggio->gctl_length)) { 1024 pjdlog_debug(2, 1025 "regular: Range offset=%jd length=%zu locked.", 1026 (intmax_t)ggio->gctl_offset, 1027 (size_t)ggio->gctl_length); 1028 range_regular_wait = true; 1029 cv_wait(&range_regular_cond, &range_lock); 1030 range_regular_wait = false; 1031 mtx_unlock(&range_lock); 1032 continue; 1033 } 1034 if (rangelock_add(range_regular, 1035 ggio->gctl_offset, ggio->gctl_length) < 0) { 1036 mtx_unlock(&range_lock); 1037 pjdlog_debug(2, 1038 "regular: Range offset=%jd length=%zu is already locked, waiting.", 1039 (intmax_t)ggio->gctl_offset, 1040 (size_t)ggio->gctl_length); 1041 sleep(1); 1042 continue; 1043 } 1044 mtx_unlock(&range_lock); 1045 break; 1046 } 1047 mtx_lock(&res->hr_amp_lock); 1048 if (activemap_write_start(res->hr_amp, 1049 ggio->gctl_offset, ggio->gctl_length)) { 1050 (void)hast_activemap_flush(res); 1051 } 1052 mtx_unlock(&res->hr_amp_lock); 1053 /* FALLTHROUGH */ 1054 case BIO_DELETE: 1055 case BIO_FLUSH: 1056 pjdlog_debug(2, 1057 "ggate_recv: (%p) Moving request to the send queues.", 1058 hio); 1059 refcount_init(&hio->hio_countdown, ncomps); 1060 for (ii = 0; ii < ncomps; ii++) 1061 QUEUE_INSERT1(hio, send, ii); 1062 break; 1063 } 1064 } 1065 /* NOTREACHED */ 1066 return (NULL); 1067} 1068 1069/* 1070 * Thread reads from or writes to local component. 1071 * If local read fails, it redirects it to remote_send thread. 1072 */ 1073static void * 1074local_send_thread(void *arg) 1075{ 1076 struct hast_resource *res = arg; 1077 struct g_gate_ctl_io *ggio; 1078 struct hio *hio; 1079 unsigned int ncomp, rncomp; 1080 ssize_t ret; 1081 1082 /* Local component is 0 for now. */ 1083 ncomp = 0; 1084 /* Remote component is 1 for now. */ 1085 rncomp = 1; 1086 1087 for (;;) { 1088 pjdlog_debug(2, "local_send: Taking request."); 1089 QUEUE_TAKE1(hio, send, ncomp); 1090 pjdlog_debug(2, "local_send: (%p) Got request.", hio); 1091 ggio = &hio->hio_ggio; 1092 switch (ggio->gctl_cmd) { 1093 case BIO_READ: 1094 ret = pread(res->hr_localfd, ggio->gctl_data, 1095 ggio->gctl_length, 1096 ggio->gctl_offset + res->hr_localoff); 1097 if (ret == ggio->gctl_length) 1098 hio->hio_errors[ncomp] = 0; 1099 else { 1100 /* 1101 * If READ failed, try to read from remote node. 1102 */ 1103 QUEUE_INSERT1(hio, send, rncomp); 1104 continue; 1105 } 1106 break; 1107 case BIO_WRITE: 1108 ret = pwrite(res->hr_localfd, ggio->gctl_data, 1109 ggio->gctl_length, 1110 ggio->gctl_offset + res->hr_localoff); 1111 if (ret < 0) 1112 hio->hio_errors[ncomp] = errno; 1113 else if (ret != ggio->gctl_length) 1114 hio->hio_errors[ncomp] = EIO; 1115 else 1116 hio->hio_errors[ncomp] = 0; 1117 break; 1118 case BIO_DELETE: 1119 ret = g_delete(res->hr_localfd, 1120 ggio->gctl_offset + res->hr_localoff, 1121 ggio->gctl_length); 1122 if (ret < 0) 1123 hio->hio_errors[ncomp] = errno; 1124 else 1125 hio->hio_errors[ncomp] = 0; 1126 break; 1127 case BIO_FLUSH: 1128 ret = g_flush(res->hr_localfd); 1129 if (ret < 0) 1130 hio->hio_errors[ncomp] = errno; 1131 else 1132 hio->hio_errors[ncomp] = 0; 1133 break; 1134 } 1135 if (refcount_release(&hio->hio_countdown)) { 1136 if (ISSYNCREQ(hio)) { 1137 mtx_lock(&sync_lock); 1138 SYNCREQDONE(hio); 1139 mtx_unlock(&sync_lock); 1140 cv_signal(&sync_cond); 1141 } else { 1142 pjdlog_debug(2, 1143 "local_send: (%p) Moving request to the done queue.", 1144 hio); 1145 QUEUE_INSERT2(hio, done); 1146 } 1147 } 1148 } 1149 /* NOTREACHED */ 1150 return (NULL); 1151} 1152 1153/* 1154 * Thread sends request to secondary node. 1155 */ 1156static void * 1157remote_send_thread(void *arg) 1158{ 1159 struct hast_resource *res = arg; 1160 struct g_gate_ctl_io *ggio; 1161 struct hio *hio; 1162 struct nv *nv; 1163 unsigned int ncomp; 1164 bool wakeup; 1165 uint64_t offset, length; 1166 uint8_t cmd; 1167 void *data; 1168 1169 /* Remote component is 1 for now. */ 1170 ncomp = 1; 1171 1172 for (;;) { 1173 pjdlog_debug(2, "remote_send: Taking request."); 1174 QUEUE_TAKE1(hio, send, ncomp); 1175 pjdlog_debug(2, "remote_send: (%p) Got request.", hio); 1176 ggio = &hio->hio_ggio; 1177 switch (ggio->gctl_cmd) { 1178 case BIO_READ: 1179 cmd = HIO_READ; 1180 data = NULL; 1181 offset = ggio->gctl_offset; 1182 length = ggio->gctl_length; 1183 break; 1184 case BIO_WRITE: 1185 cmd = HIO_WRITE; 1186 data = ggio->gctl_data; 1187 offset = ggio->gctl_offset; 1188 length = ggio->gctl_length; 1189 break; 1190 case BIO_DELETE: 1191 cmd = HIO_DELETE; 1192 data = NULL; 1193 offset = ggio->gctl_offset; 1194 length = ggio->gctl_length; 1195 break; 1196 case BIO_FLUSH: 1197 cmd = HIO_FLUSH; 1198 data = NULL; 1199 offset = 0; 1200 length = 0; 1201 break; 1202 default: 1203 assert(!"invalid condition"); 1204 abort(); 1205 } 1206 nv = nv_alloc(); 1207 nv_add_uint8(nv, cmd, "cmd"); 1208 nv_add_uint64(nv, (uint64_t)ggio->gctl_seq, "seq"); 1209 nv_add_uint64(nv, offset, "offset"); 1210 nv_add_uint64(nv, length, "length"); 1211 if (nv_error(nv) != 0) { 1212 hio->hio_errors[ncomp] = nv_error(nv); 1213 pjdlog_debug(2, 1214 "remote_send: (%p) Unable to prepare header to send.", 1215 hio); 1216 reqlog(LOG_ERR, 0, ggio, 1217 "Unable to prepare header to send (%s): ", 1218 strerror(nv_error(nv))); 1219 /* Move failed request immediately to the done queue. */ 1220 goto done_queue; 1221 } 1222 pjdlog_debug(2, 1223 "remote_send: (%p) Moving request to the recv queue.", 1224 hio); 1225 /* 1226 * Protect connection from disappearing. 1227 */ 1228 rw_rlock(&hio_remote_lock[ncomp]); 1229 if (!ISCONNECTED(res, ncomp)) { 1230 rw_unlock(&hio_remote_lock[ncomp]); 1231 hio->hio_errors[ncomp] = ENOTCONN; 1232 goto done_queue; 1233 } 1234 /* 1235 * Move the request to recv queue before sending it, because 1236 * in different order we can get reply before we move request 1237 * to recv queue. 1238 */ 1239 mtx_lock(&hio_recv_list_lock[ncomp]); 1240 wakeup = TAILQ_EMPTY(&hio_recv_list[ncomp]); 1241 TAILQ_INSERT_TAIL(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1242 mtx_unlock(&hio_recv_list_lock[ncomp]); 1243 if (hast_proto_send(res, res->hr_remoteout, nv, data, 1244 data != NULL ? length : 0) < 0) { 1245 hio->hio_errors[ncomp] = errno; 1246 rw_unlock(&hio_remote_lock[ncomp]); 1247 pjdlog_debug(2, 1248 "remote_send: (%p) Unable to send request.", hio); 1249 reqlog(LOG_ERR, 0, ggio, 1250 "Unable to send request (%s): ", 1251 strerror(hio->hio_errors[ncomp])); 1252 remote_close(res, ncomp); 1253 /* 1254 * Take request back from the receive queue and move 1255 * it immediately to the done queue. 1256 */ 1257 mtx_lock(&hio_recv_list_lock[ncomp]); 1258 TAILQ_REMOVE(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1259 mtx_unlock(&hio_recv_list_lock[ncomp]); 1260 goto done_queue; 1261 } 1262 rw_unlock(&hio_remote_lock[ncomp]); 1263 nv_free(nv); 1264 if (wakeup) 1265 cv_signal(&hio_recv_list_cond[ncomp]); 1266 continue; 1267done_queue: 1268 nv_free(nv); 1269 if (ISSYNCREQ(hio)) { 1270 if (!refcount_release(&hio->hio_countdown)) 1271 continue; 1272 mtx_lock(&sync_lock); 1273 SYNCREQDONE(hio); 1274 mtx_unlock(&sync_lock); 1275 cv_signal(&sync_cond); 1276 continue; 1277 } 1278 if (ggio->gctl_cmd == BIO_WRITE) { 1279 mtx_lock(&res->hr_amp_lock); 1280 if (activemap_need_sync(res->hr_amp, ggio->gctl_offset, 1281 ggio->gctl_length)) { 1282 (void)hast_activemap_flush(res); 1283 } 1284 mtx_unlock(&res->hr_amp_lock); 1285 } 1286 if (!refcount_release(&hio->hio_countdown)) 1287 continue; 1288 pjdlog_debug(2, 1289 "remote_send: (%p) Moving request to the done queue.", 1290 hio); 1291 QUEUE_INSERT2(hio, done); 1292 } 1293 /* NOTREACHED */ 1294 return (NULL); 1295} 1296 1297/* 1298 * Thread receives answer from secondary node and passes it to ggate_send 1299 * thread. 1300 */ 1301static void * 1302remote_recv_thread(void *arg) 1303{ 1304 struct hast_resource *res = arg; 1305 struct g_gate_ctl_io *ggio; 1306 struct hio *hio; 1307 struct nv *nv; 1308 unsigned int ncomp; 1309 uint64_t seq; 1310 int error; 1311 1312 /* Remote component is 1 for now. */ 1313 ncomp = 1; 1314 1315 for (;;) { 1316 /* Wait until there is anything to receive. */ 1317 mtx_lock(&hio_recv_list_lock[ncomp]); 1318 while (TAILQ_EMPTY(&hio_recv_list[ncomp])) { 1319 pjdlog_debug(2, "remote_recv: No requests, waiting."); 1320 cv_wait(&hio_recv_list_cond[ncomp], 1321 &hio_recv_list_lock[ncomp]); 1322 } 1323 mtx_unlock(&hio_recv_list_lock[ncomp]); 1324 rw_rlock(&hio_remote_lock[ncomp]); 1325 if (!ISCONNECTED(res, ncomp)) { 1326 rw_unlock(&hio_remote_lock[ncomp]); 1327 /* 1328 * Connection is dead, so move all pending requests to 1329 * the done queue (one-by-one). 1330 */ 1331 mtx_lock(&hio_recv_list_lock[ncomp]); 1332 hio = TAILQ_FIRST(&hio_recv_list[ncomp]); 1333 assert(hio != NULL); 1334 TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1335 hio_next[ncomp]); 1336 mtx_unlock(&hio_recv_list_lock[ncomp]); 1337 goto done_queue; 1338 } 1339 if (hast_proto_recv_hdr(res->hr_remotein, &nv) < 0) { 1340 pjdlog_errno(LOG_ERR, 1341 "Unable to receive reply header"); 1342 rw_unlock(&hio_remote_lock[ncomp]); 1343 remote_close(res, ncomp); 1344 continue; 1345 } 1346 rw_unlock(&hio_remote_lock[ncomp]); 1347 seq = nv_get_uint64(nv, "seq"); 1348 if (seq == 0) { 1349 pjdlog_error("Header contains no 'seq' field."); 1350 nv_free(nv); 1351 continue; 1352 } 1353 mtx_lock(&hio_recv_list_lock[ncomp]); 1354 TAILQ_FOREACH(hio, &hio_recv_list[ncomp], hio_next[ncomp]) { 1355 if (hio->hio_ggio.gctl_seq == seq) { 1356 TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1357 hio_next[ncomp]); 1358 break; 1359 } 1360 } 1361 mtx_unlock(&hio_recv_list_lock[ncomp]); 1362 if (hio == NULL) { 1363 pjdlog_error("Found no request matching received 'seq' field (%ju).", 1364 (uintmax_t)seq); 1365 nv_free(nv); 1366 continue; 1367 } 1368 error = nv_get_int16(nv, "error"); 1369 if (error != 0) { 1370 /* Request failed on remote side. */ 1371 hio->hio_errors[ncomp] = 0; 1372 nv_free(nv); 1373 goto done_queue; 1374 } 1375 ggio = &hio->hio_ggio; 1376 switch (ggio->gctl_cmd) { 1377 case BIO_READ: 1378 rw_rlock(&hio_remote_lock[ncomp]); 1379 if (!ISCONNECTED(res, ncomp)) { 1380 rw_unlock(&hio_remote_lock[ncomp]); 1381 nv_free(nv); 1382 goto done_queue; 1383 } 1384 if (hast_proto_recv_data(res, res->hr_remotein, nv, 1385 ggio->gctl_data, ggio->gctl_length) < 0) { 1386 hio->hio_errors[ncomp] = errno; 1387 pjdlog_errno(LOG_ERR, 1388 "Unable to receive reply data"); 1389 rw_unlock(&hio_remote_lock[ncomp]); 1390 nv_free(nv); 1391 remote_close(res, ncomp); 1392 goto done_queue; 1393 } 1394 rw_unlock(&hio_remote_lock[ncomp]); 1395 break; 1396 case BIO_WRITE: 1397 case BIO_DELETE: 1398 case BIO_FLUSH: 1399 break; 1400 default: 1401 assert(!"invalid condition"); 1402 abort(); 1403 } 1404 hio->hio_errors[ncomp] = 0; 1405 nv_free(nv); 1406done_queue: 1407 if (refcount_release(&hio->hio_countdown)) { 1408 if (ISSYNCREQ(hio)) { 1409 mtx_lock(&sync_lock); 1410 SYNCREQDONE(hio); 1411 mtx_unlock(&sync_lock); 1412 cv_signal(&sync_cond); 1413 } else { 1414 pjdlog_debug(2, 1415 "remote_recv: (%p) Moving request to the done queue.", 1416 hio); 1417 QUEUE_INSERT2(hio, done); 1418 } 1419 } 1420 } 1421 /* NOTREACHED */ 1422 return (NULL); 1423} 1424 1425/* 1426 * Thread sends answer to the kernel. 1427 */ 1428static void * 1429ggate_send_thread(void *arg) 1430{ 1431 struct hast_resource *res = arg; 1432 struct g_gate_ctl_io *ggio; 1433 struct hio *hio; 1434 unsigned int ii, ncomp, ncomps; 1435 1436 ncomps = HAST_NCOMPONENTS; 1437 1438 for (;;) { 1439 pjdlog_debug(2, "ggate_send: Taking request."); 1440 QUEUE_TAKE2(hio, done); 1441 pjdlog_debug(2, "ggate_send: (%p) Got request.", hio); 1442 ggio = &hio->hio_ggio; 1443 for (ii = 0; ii < ncomps; ii++) { 1444 if (hio->hio_errors[ii] == 0) { 1445 /* 1446 * One successful request is enough to declare 1447 * success. 1448 */ 1449 ggio->gctl_error = 0; 1450 break; 1451 } 1452 } 1453 if (ii == ncomps) { 1454 /* 1455 * None of the requests were successful. 1456 * Use first error. 1457 */ 1458 ggio->gctl_error = hio->hio_errors[0]; 1459 } 1460 if (ggio->gctl_error == 0 && ggio->gctl_cmd == BIO_WRITE) { 1461 mtx_lock(&res->hr_amp_lock); 1462 activemap_write_complete(res->hr_amp, 1463 ggio->gctl_offset, ggio->gctl_length); 1464 mtx_unlock(&res->hr_amp_lock); 1465 } 1466 if (ggio->gctl_cmd == BIO_WRITE) { 1467 /* 1468 * Unlock range we locked. 1469 */ 1470 mtx_lock(&range_lock); 1471 rangelock_del(range_regular, ggio->gctl_offset, 1472 ggio->gctl_length); 1473 if (range_sync_wait) 1474 cv_signal(&range_sync_cond); 1475 mtx_unlock(&range_lock); 1476 /* 1477 * Bump local count if this is first write after 1478 * connection failure with remote node. 1479 */ 1480 ncomp = 1; 1481 rw_rlock(&hio_remote_lock[ncomp]); 1482 if (!ISCONNECTED(res, ncomp)) { 1483 mtx_lock(&metadata_lock); 1484 if (res->hr_primary_localcnt == 1485 res->hr_secondary_remotecnt) { 1486 res->hr_primary_localcnt++; 1487 pjdlog_debug(1, 1488 "Increasing localcnt to %ju.", 1489 (uintmax_t)res->hr_primary_localcnt); 1490 (void)metadata_write(res); 1491 } 1492 mtx_unlock(&metadata_lock); 1493 } 1494 rw_unlock(&hio_remote_lock[ncomp]); 1495 } 1496 if (ioctl(res->hr_ggatefd, G_GATE_CMD_DONE, ggio) < 0) 1497 primary_exit(EX_OSERR, "G_GATE_CMD_DONE failed"); 1498 pjdlog_debug(2, 1499 "ggate_send: (%p) Moving request to the free queue.", hio); 1500 QUEUE_INSERT2(hio, free); 1501 } 1502 /* NOTREACHED */ 1503 return (NULL); 1504} 1505 1506/* 1507 * Thread synchronize local and remote components. 1508 */ 1509static void * 1510sync_thread(void *arg __unused) 1511{ 1512 struct hast_resource *res = arg; 1513 struct hio *hio; 1514 struct g_gate_ctl_io *ggio; 1515 unsigned int ii, ncomp, ncomps; 1516 off_t offset, length, synced; 1517 bool dorewind; 1518 int syncext; 1519 1520 ncomps = HAST_NCOMPONENTS; 1521 dorewind = true; 1522 synced = 0; 1523 offset = -1; 1524 1525 for (;;) { 1526 mtx_lock(&sync_lock); 1527 if (offset >= 0 && !sync_inprogress) { 1528 pjdlog_info("Synchronization interrupted. " 1529 "%jd bytes synchronized so far.", 1530 (intmax_t)synced); 1531 event_send(res, EVENT_SYNCINTR); 1532 } 1533 while (!sync_inprogress) { 1534 dorewind = true; 1535 synced = 0; 1536 cv_wait(&sync_cond, &sync_lock); 1537 } 1538 mtx_unlock(&sync_lock); 1539 /* 1540 * Obtain offset at which we should synchronize. 1541 * Rewind synchronization if needed. 1542 */ 1543 mtx_lock(&res->hr_amp_lock); 1544 if (dorewind) 1545 activemap_sync_rewind(res->hr_amp); 1546 offset = activemap_sync_offset(res->hr_amp, &length, &syncext); 1547 if (syncext != -1) { 1548 /* 1549 * We synchronized entire syncext extent, we can mark 1550 * it as clean now. 1551 */ 1552 if (activemap_extent_complete(res->hr_amp, syncext)) 1553 (void)hast_activemap_flush(res); 1554 } 1555 mtx_unlock(&res->hr_amp_lock); 1556 if (dorewind) { 1557 dorewind = false; 1558 if (offset < 0) 1559 pjdlog_info("Nodes are in sync."); 1560 else { 1561 pjdlog_info("Synchronization started. %ju bytes to go.", 1562 (uintmax_t)(res->hr_extentsize * 1563 activemap_ndirty(res->hr_amp))); 1564 event_send(res, EVENT_SYNCSTART); 1565 } 1566 } 1567 if (offset < 0) { 1568 sync_stop(); 1569 pjdlog_debug(1, "Nothing to synchronize."); 1570 /* 1571 * Synchronization complete, make both localcnt and 1572 * remotecnt equal. 1573 */ 1574 ncomp = 1; 1575 rw_rlock(&hio_remote_lock[ncomp]); 1576 if (ISCONNECTED(res, ncomp)) { 1577 if (synced > 0) { 1578 pjdlog_info("Synchronization complete. " 1579 "%jd bytes synchronized.", 1580 (intmax_t)synced); 1581 event_send(res, EVENT_SYNCDONE); 1582 } 1583 mtx_lock(&metadata_lock); 1584 res->hr_syncsrc = HAST_SYNCSRC_UNDEF; 1585 res->hr_primary_localcnt = 1586 res->hr_secondary_localcnt; 1587 res->hr_primary_remotecnt = 1588 res->hr_secondary_remotecnt; 1589 pjdlog_debug(1, 1590 "Setting localcnt to %ju and remotecnt to %ju.", 1591 (uintmax_t)res->hr_primary_localcnt, 1592 (uintmax_t)res->hr_secondary_localcnt); 1593 (void)metadata_write(res); 1594 mtx_unlock(&metadata_lock); 1595 } 1596 rw_unlock(&hio_remote_lock[ncomp]); 1597 continue; 1598 } 1599 pjdlog_debug(2, "sync: Taking free request."); 1600 QUEUE_TAKE2(hio, free); 1601 pjdlog_debug(2, "sync: (%p) Got free request.", hio); 1602 /* 1603 * Lock the range we are going to synchronize. We don't want 1604 * race where someone writes between our read and write. 1605 */ 1606 for (;;) { 1607 mtx_lock(&range_lock); 1608 if (rangelock_islocked(range_regular, offset, length)) { 1609 pjdlog_debug(2, 1610 "sync: Range offset=%jd length=%jd locked.", 1611 (intmax_t)offset, (intmax_t)length); 1612 range_sync_wait = true; 1613 cv_wait(&range_sync_cond, &range_lock); 1614 range_sync_wait = false; 1615 mtx_unlock(&range_lock); 1616 continue; 1617 } 1618 if (rangelock_add(range_sync, offset, length) < 0) { 1619 mtx_unlock(&range_lock); 1620 pjdlog_debug(2, 1621 "sync: Range offset=%jd length=%jd is already locked, waiting.", 1622 (intmax_t)offset, (intmax_t)length); 1623 sleep(1); 1624 continue; 1625 } 1626 mtx_unlock(&range_lock); 1627 break; 1628 } 1629 /* 1630 * First read the data from synchronization source. 1631 */ 1632 SYNCREQ(hio); 1633 ggio = &hio->hio_ggio; 1634 ggio->gctl_cmd = BIO_READ; 1635 ggio->gctl_offset = offset; 1636 ggio->gctl_length = length; 1637 ggio->gctl_error = 0; 1638 for (ii = 0; ii < ncomps; ii++) 1639 hio->hio_errors[ii] = EINVAL; 1640 reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 1641 hio); 1642 pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 1643 hio); 1644 mtx_lock(&metadata_lock); 1645 if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1646 /* 1647 * This range is up-to-date on local component, 1648 * so handle request locally. 1649 */ 1650 /* Local component is 0 for now. */ 1651 ncomp = 0; 1652 } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 1653 assert(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 1654 /* 1655 * This range is out-of-date on local component, 1656 * so send request to the remote node. 1657 */ 1658 /* Remote component is 1 for now. */ 1659 ncomp = 1; 1660 } 1661 mtx_unlock(&metadata_lock); 1662 refcount_init(&hio->hio_countdown, 1); 1663 QUEUE_INSERT1(hio, send, ncomp); 1664 1665 /* 1666 * Let's wait for READ to finish. 1667 */ 1668 mtx_lock(&sync_lock); 1669 while (!ISSYNCREQDONE(hio)) 1670 cv_wait(&sync_cond, &sync_lock); 1671 mtx_unlock(&sync_lock); 1672 1673 if (hio->hio_errors[ncomp] != 0) { 1674 pjdlog_error("Unable to read synchronization data: %s.", 1675 strerror(hio->hio_errors[ncomp])); 1676 goto free_queue; 1677 } 1678 1679 /* 1680 * We read the data from synchronization source, now write it 1681 * to synchronization target. 1682 */ 1683 SYNCREQ(hio); 1684 ggio->gctl_cmd = BIO_WRITE; 1685 for (ii = 0; ii < ncomps; ii++) 1686 hio->hio_errors[ii] = EINVAL; 1687 reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 1688 hio); 1689 pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 1690 hio); 1691 mtx_lock(&metadata_lock); 1692 if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1693 /* 1694 * This range is up-to-date on local component, 1695 * so we update remote component. 1696 */ 1697 /* Remote component is 1 for now. */ 1698 ncomp = 1; 1699 } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 1700 assert(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 1701 /* 1702 * This range is out-of-date on local component, 1703 * so we update it. 1704 */ 1705 /* Local component is 0 for now. */ 1706 ncomp = 0; 1707 } 1708 mtx_unlock(&metadata_lock); 1709 1710 pjdlog_debug(2, "sync: (%p) Moving request to the send queues.", 1711 hio); 1712 refcount_init(&hio->hio_countdown, 1); 1713 QUEUE_INSERT1(hio, send, ncomp); 1714 1715 /* 1716 * Let's wait for WRITE to finish. 1717 */ 1718 mtx_lock(&sync_lock); 1719 while (!ISSYNCREQDONE(hio)) 1720 cv_wait(&sync_cond, &sync_lock); 1721 mtx_unlock(&sync_lock); 1722 1723 if (hio->hio_errors[ncomp] != 0) { 1724 pjdlog_error("Unable to write synchronization data: %s.", 1725 strerror(hio->hio_errors[ncomp])); 1726 goto free_queue; 1727 } 1728 1729 synced += length; 1730free_queue: 1731 mtx_lock(&range_lock); 1732 rangelock_del(range_sync, offset, length); 1733 if (range_regular_wait) 1734 cv_signal(&range_regular_cond); 1735 mtx_unlock(&range_lock); 1736 pjdlog_debug(2, "sync: (%p) Moving request to the free queue.", 1737 hio); 1738 QUEUE_INSERT2(hio, free); 1739 } 1740 /* NOTREACHED */ 1741 return (NULL); 1742} 1743 1744static void 1745config_reload(void) 1746{ 1747 struct hastd_config *newcfg; 1748 struct hast_resource *res; 1749 unsigned int ii, ncomps; 1750 int modified; 1751 1752 pjdlog_info("Reloading configuration..."); 1753 1754 ncomps = HAST_NCOMPONENTS; 1755 1756 newcfg = yy_config_parse(cfgpath, false); 1757 if (newcfg == NULL) 1758 goto failed; 1759 1760 TAILQ_FOREACH(res, &newcfg->hc_resources, hr_next) { 1761 if (strcmp(res->hr_name, gres->hr_name) == 0) 1762 break; 1763 } 1764 /* 1765 * If resource was removed from the configuration file, resource 1766 * name, provider name or path to local component was modified we 1767 * shouldn't be here. This means that someone modified configuration 1768 * file and send SIGHUP to us instead of main hastd process. 1769 * Log advice and ignore the signal. 1770 */ 1771 if (res == NULL || strcmp(gres->hr_name, res->hr_name) != 0 || 1772 strcmp(gres->hr_provname, res->hr_provname) != 0 || 1773 strcmp(gres->hr_localpath, res->hr_localpath) != 0) { 1774 pjdlog_warning("To reload configuration send SIGHUP to the main hastd process (pid %u).", 1775 (unsigned int)getppid()); 1776 goto failed; 1777 } 1778 1779#define MODIFIED_REMOTEADDR 0x1 1780#define MODIFIED_REPLICATION 0x2 1781#define MODIFIED_TIMEOUT 0x4 1782#define MODIFIED_EXEC 0x8 1783 modified = 0; 1784 if (strcmp(gres->hr_remoteaddr, res->hr_remoteaddr) != 0) { 1785 /* 1786 * Don't copy res->hr_remoteaddr to gres just yet. 1787 * We want remote_close() to log disconnect from the old 1788 * addresses, not from the new ones. 1789 */ 1790 modified |= MODIFIED_REMOTEADDR; 1791 } 1792 if (gres->hr_replication != res->hr_replication) { 1793 gres->hr_replication = res->hr_replication; 1794 modified |= MODIFIED_REPLICATION; 1795 } 1796 if (gres->hr_timeout != res->hr_timeout) { 1797 gres->hr_timeout = res->hr_timeout; 1798 modified |= MODIFIED_TIMEOUT; 1799 } 1800 if (strcmp(gres->hr_exec, res->hr_exec) != 0) { 1801 strlcpy(gres->hr_exec, res->hr_exec, sizeof(gres->hr_exec)); 1802 modified |= MODIFIED_EXEC; 1803 } 1804 /* 1805 * If only timeout was modified we only need to change it without 1806 * reconnecting. 1807 */ 1808 if (modified == MODIFIED_TIMEOUT) { 1809 for (ii = 0; ii < ncomps; ii++) { 1810 if (!ISREMOTE(ii)) 1811 continue; 1812 rw_rlock(&hio_remote_lock[ii]); 1813 if (!ISCONNECTED(gres, ii)) { 1814 rw_unlock(&hio_remote_lock[ii]); 1815 continue; 1816 } 1817 rw_unlock(&hio_remote_lock[ii]); 1818 if (proto_timeout(gres->hr_remotein, 1819 gres->hr_timeout) < 0) { 1820 pjdlog_errno(LOG_WARNING, 1821 "Unable to set connection timeout"); 1822 } 1823 if (proto_timeout(gres->hr_remoteout, 1824 gres->hr_timeout) < 0) { 1825 pjdlog_errno(LOG_WARNING, 1826 "Unable to set connection timeout"); 1827 } 1828 } 1829 } else if ((modified & 1830 (MODIFIED_REMOTEADDR | MODIFIED_REPLICATION)) != 0) { 1831 for (ii = 0; ii < ncomps; ii++) { 1832 if (!ISREMOTE(ii)) 1833 continue; 1834 remote_close(gres, ii); 1835 } 1836 if (modified & MODIFIED_REMOTEADDR) { 1837 strlcpy(gres->hr_remoteaddr, res->hr_remoteaddr, 1838 sizeof(gres->hr_remoteaddr)); 1839 } 1840 } 1841#undef MODIFIED_REMOTEADDR 1842#undef MODIFIED_REPLICATION 1843#undef MODIFIED_TIMEOUT 1844#undef MODIFIED_EXEC 1845 1846 pjdlog_info("Configuration reloaded successfully."); 1847 return; 1848failed: 1849 if (newcfg != NULL) { 1850 if (newcfg->hc_controlconn != NULL) 1851 proto_close(newcfg->hc_controlconn); 1852 if (newcfg->hc_listenconn != NULL) 1853 proto_close(newcfg->hc_listenconn); 1854 yy_config_free(newcfg); 1855 } 1856 pjdlog_warning("Configuration not reloaded."); 1857} 1858 1859static void 1860keepalive_send(struct hast_resource *res, unsigned int ncomp) 1861{ 1862 struct nv *nv; 1863 1864 nv = nv_alloc(); 1865 nv_add_uint8(nv, HIO_KEEPALIVE, "cmd"); 1866 if (nv_error(nv) != 0) { 1867 nv_free(nv); 1868 pjdlog_debug(1, 1869 "keepalive_send: Unable to prepare header to send."); 1870 return; 1871 } 1872 if (hast_proto_send(res, res->hr_remoteout, nv, NULL, 0) < 0) { 1873 pjdlog_common(LOG_DEBUG, 1, errno, 1874 "keepalive_send: Unable to send request"); 1875 nv_free(nv); 1876 rw_unlock(&hio_remote_lock[ncomp]); 1877 remote_close(res, ncomp); 1878 rw_rlock(&hio_remote_lock[ncomp]); 1879 return; 1880 } 1881 nv_free(nv); 1882 pjdlog_debug(2, "keepalive_send: Request sent."); 1883} 1884 1885static void 1886guard_one(struct hast_resource *res, unsigned int ncomp) 1887{ 1888 struct proto_conn *in, *out; 1889 1890 if (!ISREMOTE(ncomp)) 1891 return; 1892 1893 rw_rlock(&hio_remote_lock[ncomp]); 1894 1895 if (!real_remote(res)) { 1896 rw_unlock(&hio_remote_lock[ncomp]); 1897 return; 1898 } 1899 1900 if (ISCONNECTED(res, ncomp)) { 1901 assert(res->hr_remotein != NULL); 1902 assert(res->hr_remoteout != NULL); 1903 keepalive_send(res, ncomp); 1904 } 1905 1906 if (ISCONNECTED(res, ncomp)) { 1907 assert(res->hr_remotein != NULL); 1908 assert(res->hr_remoteout != NULL); 1909 rw_unlock(&hio_remote_lock[ncomp]); 1910 pjdlog_debug(2, "remote_guard: Connection to %s is ok.", 1911 res->hr_remoteaddr); 1912 return; 1913 } 1914 1915 assert(res->hr_remotein == NULL); 1916 assert(res->hr_remoteout == NULL); 1917 /* 1918 * Upgrade the lock. It doesn't have to be atomic as no other thread 1919 * can change connection status from disconnected to connected. 1920 */ 1921 rw_unlock(&hio_remote_lock[ncomp]); 1922 pjdlog_debug(2, "remote_guard: Reconnecting to %s.", 1923 res->hr_remoteaddr); 1924 in = out = NULL; 1925 if (init_remote(res, &in, &out)) { 1926 rw_wlock(&hio_remote_lock[ncomp]); 1927 assert(res->hr_remotein == NULL); 1928 assert(res->hr_remoteout == NULL); 1929 assert(in != NULL && out != NULL); 1930 res->hr_remotein = in; 1931 res->hr_remoteout = out; 1932 rw_unlock(&hio_remote_lock[ncomp]); 1933 pjdlog_info("Successfully reconnected to %s.", 1934 res->hr_remoteaddr); 1935 sync_start(); 1936 } else { 1937 /* Both connections should be NULL. */ 1938 assert(res->hr_remotein == NULL); 1939 assert(res->hr_remoteout == NULL); 1940 assert(in == NULL && out == NULL); 1941 pjdlog_debug(2, "remote_guard: Reconnect to %s failed.", 1942 res->hr_remoteaddr); 1943 } 1944} 1945 1946/* 1947 * Thread guards remote connections and reconnects when needed, handles 1948 * signals, etc. 1949 */ 1950static void * 1951guard_thread(void *arg) 1952{ 1953 struct hast_resource *res = arg; 1954 unsigned int ii, ncomps; 1955 struct timespec timeout; 1956 time_t lastcheck, now; 1957 sigset_t mask; 1958 int signo; 1959 1960 ncomps = HAST_NCOMPONENTS; 1961 lastcheck = time(NULL); 1962 1963 PJDLOG_VERIFY(sigemptyset(&mask) == 0); 1964 PJDLOG_VERIFY(sigaddset(&mask, SIGHUP) == 0); 1965 PJDLOG_VERIFY(sigaddset(&mask, SIGINT) == 0); 1966 PJDLOG_VERIFY(sigaddset(&mask, SIGTERM) == 0); 1967 1968 timeout.tv_nsec = 0; 1969 signo = -1; 1970 1971 for (;;) { 1972 switch (signo) { 1973 case SIGHUP: 1974 config_reload(); 1975 break; 1976 case SIGINT: 1977 case SIGTERM: 1978 sigexit_received = true; 1979 primary_exitx(EX_OK, 1980 "Termination signal received, exiting."); 1981 break; 1982 default: 1983 break; 1984 } 1985 1986 pjdlog_debug(2, "remote_guard: Checking connections."); 1987 now = time(NULL); 1988 if (lastcheck + RETRY_SLEEP <= now) { 1989 for (ii = 0; ii < ncomps; ii++) 1990 guard_one(res, ii); 1991 lastcheck = now; 1992 } 1993 timeout.tv_sec = RETRY_SLEEP; 1994 signo = sigtimedwait(&mask, NULL, &timeout); 1995 } 1996 /* NOTREACHED */ 1997 return (NULL); 1998} 1999