primary.c revision 211878
112361Speytoia/*- 212361Speytoia * Copyright (c) 2009 The FreeBSD Foundation 312361Speytoia * Copyright (c) 2010 Pawel Jakub Dawidek <pjd@FreeBSD.org> 412361Speytoia * All rights reserved. 512361Speytoia * 612361Speytoia * This software was developed by Pawel Jakub Dawidek under sponsorship from 712361Speytoia * the FreeBSD Foundation. 812361Speytoia * 912361Speytoia * Redistribution and use in source and binary forms, with or without 1012361Speytoia * modification, are permitted provided that the following conditions 1112361Speytoia * are met: 1212361Speytoia * 1. Redistributions of source code must retain the above copyright 1312361Speytoia * notice, this list of conditions and the following disclaimer. 1412361Speytoia * 2. Redistributions in binary form must reproduce the above copyright 1512361Speytoia * notice, this list of conditions and the following disclaimer in the 1612361Speytoia * documentation and/or other materials provided with the distribution. 1712361Speytoia * 1812361Speytoia * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 1912361Speytoia * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2012361Speytoia * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2112361Speytoia * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 2212361Speytoia * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2312361Speytoia * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2412361Speytoia * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2512361Speytoia * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2612361Speytoia * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2712361Speytoia * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2812361Speytoia * SUCH DAMAGE. 2912361Speytoia */ 3012361Speytoia 3112361Speytoia#include <sys/cdefs.h> 3212361Speytoia__FBSDID("$FreeBSD: head/sbin/hastd/primary.c 211878 2010-08-27 14:06:00Z pjd $"); 3312361Speytoia 3412361Speytoia#include <sys/types.h> 3512361Speytoia#include <sys/time.h> 3612361Speytoia#include <sys/bio.h> 3712361Speytoia#include <sys/disk.h> 3812361Speytoia#include <sys/refcount.h> 3912361Speytoia#include <sys/stat.h> 4012361Speytoia 4112361Speytoia#include <geom/gate/g_gate.h> 4212361Speytoia 4312361Speytoia#include <assert.h> 4412361Speytoia#include <err.h> 4512361Speytoia#include <errno.h> 4612361Speytoia#include <fcntl.h> 4712361Speytoia#include <libgeom.h> 4812361Speytoia#include <pthread.h> 4912361Speytoia#include <stdint.h> 5012361Speytoia#include <stdio.h> 5112361Speytoia#include <string.h> 5212361Speytoia#include <sysexits.h> 5312361Speytoia#include <unistd.h> 5412361Speytoia 5512361Speytoia#include <activemap.h> 5612361Speytoia#include <nv.h> 5712361Speytoia#include <rangelock.h> 5812361Speytoia 5912361Speytoia#include "control.h" 6012361Speytoia#include "hast.h" 6112361Speytoia#include "hast_proto.h" 6212361Speytoia#include "hastd.h" 6312361Speytoia#include "metadata.h" 6412361Speytoia#include "proto.h" 6512361Speytoia#include "pjdlog.h" 6612361Speytoia#include "subr.h" 6712361Speytoia#include "synch.h" 6812361Speytoia 6912361Speytoia/* The is only one remote component for now. */ 7012361Speytoia#define ISREMOTE(no) ((no) == 1) 7112361Speytoia 7212361Speytoiastruct hio { 7312361Speytoia /* 7412361Speytoia * Number of components we are still waiting for. 7512361Speytoia * When this field goes to 0, we can send the request back to the 7612361Speytoia * kernel. Each component has to decrease this counter by one 7712361Speytoia * even on failure. 7812361Speytoia */ 7912361Speytoia unsigned int hio_countdown; 8012361Speytoia /* 8112361Speytoia * Each component has a place to store its own error. 8212361Speytoia * Once the request is handled by all components we can decide if the 8312361Speytoia * request overall is successful or not. 8412361Speytoia */ 8512361Speytoia int *hio_errors; 8612361Speytoia /* 8712361Speytoia * Structure used to comunicate with GEOM Gate class. 8812361Speytoia */ 8912361Speytoia struct g_gate_ctl_io hio_ggio; 9012361Speytoia TAILQ_ENTRY(hio) *hio_next; 9112361Speytoia}; 9212361Speytoia#define hio_free_next hio_next[0] 9312361Speytoia#define hio_done_next hio_next[0] 9412361Speytoia 9512361Speytoia/* 9612361Speytoia * Free list holds unused structures. When free list is empty, we have to wait 9712361Speytoia * until some in-progress requests are freed. 9812361Speytoia */ 9912361Speytoiastatic TAILQ_HEAD(, hio) hio_free_list; 10012361Speytoiastatic pthread_mutex_t hio_free_list_lock; 10112745Smartinstatic pthread_cond_t hio_free_list_cond; 10212361Speytoia/* 10312361Speytoia * There is one send list for every component. One requests is placed on all 10412361Speytoia * send lists - each component gets the same request, but each component is 10512361Speytoia * responsible for managing his own send list. 10612361Speytoia */ 10712361Speytoiastatic TAILQ_HEAD(, hio) *hio_send_list; 10812361Speytoiastatic pthread_mutex_t *hio_send_list_lock; 10912361Speytoiastatic pthread_cond_t *hio_send_list_cond; 11012361Speytoia/* 11112361Speytoia * There is one recv list for every component, although local components don't 11212361Speytoia * use recv lists as local requests are done synchronously. 11312361Speytoia */ 11412361Speytoiastatic TAILQ_HEAD(, hio) *hio_recv_list; 11512361Speytoiastatic pthread_mutex_t *hio_recv_list_lock; 11612361Speytoiastatic pthread_cond_t *hio_recv_list_cond; 11712361Speytoia/* 11812361Speytoia * Request is placed on done list by the slowest component (the one that 11912361Speytoia * decreased hio_countdown from 1 to 0). 12012361Speytoia */ 12112361Speytoiastatic TAILQ_HEAD(, hio) hio_done_list; 12212361Speytoiastatic pthread_mutex_t hio_done_list_lock; 12312361Speytoiastatic pthread_cond_t hio_done_list_cond; 12412361Speytoia/* 12512361Speytoia * Structure below are for interaction with sync thread. 12612361Speytoia */ 12712361Speytoiastatic bool sync_inprogress; 12812361Speytoiastatic pthread_mutex_t sync_lock; 12912361Speytoiastatic pthread_cond_t sync_cond; 13012361Speytoia/* 13112361Speytoia * The lock below allows to synchornize access to remote connections. 13212361Speytoia */ 13312361Speytoiastatic pthread_rwlock_t *hio_remote_lock; 13412361Speytoiastatic pthread_mutex_t hio_guard_lock; 13512361Speytoiastatic pthread_cond_t hio_guard_cond; 13612361Speytoia 13712361Speytoia/* 13812361Speytoia * Lock to synchronize metadata updates. Also synchronize access to 13912361Speytoia * hr_primary_localcnt and hr_primary_remotecnt fields. 14012361Speytoia */ 14112361Speytoiastatic pthread_mutex_t metadata_lock; 14212361Speytoia 14312361Speytoia/* 14412361Speytoia * Maximum number of outstanding I/O requests. 14512361Speytoia */ 14612361Speytoia#define HAST_HIO_MAX 256 14712361Speytoia/* 14812361Speytoia * Number of components. At this point there are only two components: local 14912361Speytoia * and remote, but in the future it might be possible to use multiple local 15012361Speytoia * and remote components. 15112361Speytoia */ 15212361Speytoia#define HAST_NCOMPONENTS 2 15312361Speytoia/* 15412361Speytoia * Number of seconds to sleep before next reconnect try. 15512361Speytoia */ 15612361Speytoia#define RECONNECT_SLEEP 5 15712361Speytoia 15812361Speytoia#define ISCONNECTED(res, no) \ 15912361Speytoia ((res)->hr_remotein != NULL && (res)->hr_remoteout != NULL) 16012361Speytoia 16112361Speytoia#define QUEUE_INSERT1(hio, name, ncomp) do { \ 16212361Speytoia bool _wakeup; \ 16312361Speytoia \ 16412361Speytoia mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 16512361Speytoia _wakeup = TAILQ_EMPTY(&hio_##name##_list[(ncomp)]); \ 16612361Speytoia TAILQ_INSERT_TAIL(&hio_##name##_list[(ncomp)], (hio), \ 16712361Speytoia hio_next[(ncomp)]); \ 16812361Speytoia mtx_unlock(&hio_##name##_list_lock[ncomp]); \ 16912361Speytoia if (_wakeup) \ 17012361Speytoia cv_signal(&hio_##name##_list_cond[(ncomp)]); \ 17112361Speytoia} while (0) 17212361Speytoia#define QUEUE_INSERT2(hio, name) do { \ 17312361Speytoia bool _wakeup; \ 17412361Speytoia \ 17512361Speytoia mtx_lock(&hio_##name##_list_lock); \ 17612361Speytoia _wakeup = TAILQ_EMPTY(&hio_##name##_list); \ 17712361Speytoia TAILQ_INSERT_TAIL(&hio_##name##_list, (hio), hio_##name##_next);\ 17812361Speytoia mtx_unlock(&hio_##name##_list_lock); \ 17912361Speytoia if (_wakeup) \ 18012361Speytoia cv_signal(&hio_##name##_list_cond); \ 18112361Speytoia} while (0) 18212361Speytoia#define QUEUE_TAKE1(hio, name, ncomp) do { \ 18312361Speytoia mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 18412361Speytoia while (((hio) = TAILQ_FIRST(&hio_##name##_list[(ncomp)])) == NULL) { \ 18512361Speytoia cv_wait(&hio_##name##_list_cond[(ncomp)], \ 18612361Speytoia &hio_##name##_list_lock[(ncomp)]); \ 18712361Speytoia } \ 18812361Speytoia TAILQ_REMOVE(&hio_##name##_list[(ncomp)], (hio), \ 18912361Speytoia hio_next[(ncomp)]); \ 19012361Speytoia mtx_unlock(&hio_##name##_list_lock[(ncomp)]); \ 19112361Speytoia} while (0) 19212361Speytoia#define QUEUE_TAKE2(hio, name) do { \ 19312361Speytoia mtx_lock(&hio_##name##_list_lock); \ 19412361Speytoia while (((hio) = TAILQ_FIRST(&hio_##name##_list)) == NULL) { \ 19512361Speytoia cv_wait(&hio_##name##_list_cond, \ 19612361Speytoia &hio_##name##_list_lock); \ 19712361Speytoia } \ 19812361Speytoia TAILQ_REMOVE(&hio_##name##_list, (hio), hio_##name##_next); \ 19912361Speytoia mtx_unlock(&hio_##name##_list_lock); \ 20012361Speytoia} while (0) 20112361Speytoia 20212361Speytoia#define SYNCREQ(hio) do { \ 20312361Speytoia (hio)->hio_ggio.gctl_unit = -1; \ 20412361Speytoia (hio)->hio_ggio.gctl_seq = 1; \ 20512361Speytoia} while (0) 20612361Speytoia#define ISSYNCREQ(hio) ((hio)->hio_ggio.gctl_unit == -1) 20712361Speytoia#define SYNCREQDONE(hio) do { (hio)->hio_ggio.gctl_unit = -2; } while (0) 20812361Speytoia#define ISSYNCREQDONE(hio) ((hio)->hio_ggio.gctl_unit == -2) 20912361Speytoia 21012361Speytoiastatic struct hast_resource *gres; 21112361Speytoia 21212361Speytoiastatic pthread_mutex_t range_lock; 21312361Speytoiastatic struct rangelocks *range_regular; 21412361Speytoiastatic bool range_regular_wait; 21512361Speytoiastatic pthread_cond_t range_regular_cond; 21612361Speytoiastatic struct rangelocks *range_sync; 21712361Speytoiastatic bool range_sync_wait; 21812361Speytoiastatic pthread_cond_t range_sync_cond; 21912361Speytoia 22012361Speytoiastatic void *ggate_recv_thread(void *arg); 22112361Speytoiastatic void *local_send_thread(void *arg); 22212361Speytoiastatic void *remote_send_thread(void *arg); 22312361Speytoiastatic void *remote_recv_thread(void *arg); 22412361Speytoiastatic void *ggate_send_thread(void *arg); 22512361Speytoiastatic void *sync_thread(void *arg); 22612361Speytoiastatic void *guard_thread(void *arg); 22712361Speytoia 22812361Speytoiastatic void sighandler(int sig); 22912361Speytoia 23012361Speytoiastatic void 23112361Speytoiacleanup(struct hast_resource *res) 23212361Speytoia{ 23312361Speytoia int rerrno; 23412361Speytoia 23512361Speytoia /* Remember errno. */ 23612361Speytoia rerrno = errno; 23712361Speytoia 23812361Speytoia /* 23912361Speytoia * Close descriptor to /dev/hast/<name> 24012361Speytoia * to work-around race in the kernel. 24112361Speytoia */ 24212361Speytoia close(res->hr_localfd); 24312361Speytoia 24412361Speytoia /* Destroy ggate provider if we created one. */ 24512361Speytoia if (res->hr_ggateunit >= 0) { 24612361Speytoia struct g_gate_ctl_destroy ggiod; 24712361Speytoia 24812361Speytoia ggiod.gctl_version = G_GATE_VERSION; 24912361Speytoia ggiod.gctl_unit = res->hr_ggateunit; 25012361Speytoia ggiod.gctl_force = 1; 25112361Speytoia if (ioctl(res->hr_ggatefd, G_GATE_CMD_DESTROY, &ggiod) < 0) { 25212361Speytoia pjdlog_warning("Unable to destroy hast/%s device", 25312361Speytoia res->hr_provname); 25412361Speytoia } 25512361Speytoia res->hr_ggateunit = -1; 25612361Speytoia } 25712361Speytoia 25812361Speytoia /* Restore errno. */ 25912361Speytoia errno = rerrno; 26012361Speytoia} 26112361Speytoia 26212361Speytoiastatic void 26312361Speytoiaprimary_exit(int exitcode, const char *fmt, ...) 26412361Speytoia{ 26512361Speytoia va_list ap; 26612361Speytoia 26712361Speytoia assert(exitcode != EX_OK); 26812361Speytoia va_start(ap, fmt); 26912361Speytoia pjdlogv_errno(LOG_ERR, fmt, ap); 27012361Speytoia va_end(ap); 27112361Speytoia cleanup(gres); 27212361Speytoia exit(exitcode); 27312361Speytoia} 27412361Speytoia 27512361Speytoiastatic void 27612361Speytoiaprimary_exitx(int exitcode, const char *fmt, ...) 27712361Speytoia{ 27812361Speytoia va_list ap; 27912361Speytoia 28012361Speytoia va_start(ap, fmt); 28112361Speytoia pjdlogv(exitcode == EX_OK ? LOG_INFO : LOG_ERR, fmt, ap); 28212361Speytoia va_end(ap); 28312361Speytoia cleanup(gres); 28412361Speytoia exit(exitcode); 28512361Speytoia} 28612361Speytoia 28712361Speytoiastatic int 288hast_activemap_flush(struct hast_resource *res) 289{ 290 const unsigned char *buf; 291 size_t size; 292 293 buf = activemap_bitmap(res->hr_amp, &size); 294 assert(buf != NULL); 295 assert((size % res->hr_local_sectorsize) == 0); 296 if (pwrite(res->hr_localfd, buf, size, METADATA_SIZE) != 297 (ssize_t)size) { 298 KEEP_ERRNO(pjdlog_errno(LOG_ERR, 299 "Unable to flush activemap to disk")); 300 return (-1); 301 } 302 return (0); 303} 304 305static bool 306real_remote(const struct hast_resource *res) 307{ 308 309 return (strcmp(res->hr_remoteaddr, "none") != 0); 310} 311 312static void 313init_environment(struct hast_resource *res __unused) 314{ 315 struct hio *hio; 316 unsigned int ii, ncomps; 317 318 /* 319 * In the future it might be per-resource value. 320 */ 321 ncomps = HAST_NCOMPONENTS; 322 323 /* 324 * Allocate memory needed by lists. 325 */ 326 hio_send_list = malloc(sizeof(hio_send_list[0]) * ncomps); 327 if (hio_send_list == NULL) { 328 primary_exitx(EX_TEMPFAIL, 329 "Unable to allocate %zu bytes of memory for send lists.", 330 sizeof(hio_send_list[0]) * ncomps); 331 } 332 hio_send_list_lock = malloc(sizeof(hio_send_list_lock[0]) * ncomps); 333 if (hio_send_list_lock == NULL) { 334 primary_exitx(EX_TEMPFAIL, 335 "Unable to allocate %zu bytes of memory for send list locks.", 336 sizeof(hio_send_list_lock[0]) * ncomps); 337 } 338 hio_send_list_cond = malloc(sizeof(hio_send_list_cond[0]) * ncomps); 339 if (hio_send_list_cond == NULL) { 340 primary_exitx(EX_TEMPFAIL, 341 "Unable to allocate %zu bytes of memory for send list condition variables.", 342 sizeof(hio_send_list_cond[0]) * ncomps); 343 } 344 hio_recv_list = malloc(sizeof(hio_recv_list[0]) * ncomps); 345 if (hio_recv_list == NULL) { 346 primary_exitx(EX_TEMPFAIL, 347 "Unable to allocate %zu bytes of memory for recv lists.", 348 sizeof(hio_recv_list[0]) * ncomps); 349 } 350 hio_recv_list_lock = malloc(sizeof(hio_recv_list_lock[0]) * ncomps); 351 if (hio_recv_list_lock == NULL) { 352 primary_exitx(EX_TEMPFAIL, 353 "Unable to allocate %zu bytes of memory for recv list locks.", 354 sizeof(hio_recv_list_lock[0]) * ncomps); 355 } 356 hio_recv_list_cond = malloc(sizeof(hio_recv_list_cond[0]) * ncomps); 357 if (hio_recv_list_cond == NULL) { 358 primary_exitx(EX_TEMPFAIL, 359 "Unable to allocate %zu bytes of memory for recv list condition variables.", 360 sizeof(hio_recv_list_cond[0]) * ncomps); 361 } 362 hio_remote_lock = malloc(sizeof(hio_remote_lock[0]) * ncomps); 363 if (hio_remote_lock == NULL) { 364 primary_exitx(EX_TEMPFAIL, 365 "Unable to allocate %zu bytes of memory for remote connections locks.", 366 sizeof(hio_remote_lock[0]) * ncomps); 367 } 368 369 /* 370 * Initialize lists, their locks and theirs condition variables. 371 */ 372 TAILQ_INIT(&hio_free_list); 373 mtx_init(&hio_free_list_lock); 374 cv_init(&hio_free_list_cond); 375 for (ii = 0; ii < HAST_NCOMPONENTS; ii++) { 376 TAILQ_INIT(&hio_send_list[ii]); 377 mtx_init(&hio_send_list_lock[ii]); 378 cv_init(&hio_send_list_cond[ii]); 379 TAILQ_INIT(&hio_recv_list[ii]); 380 mtx_init(&hio_recv_list_lock[ii]); 381 cv_init(&hio_recv_list_cond[ii]); 382 rw_init(&hio_remote_lock[ii]); 383 } 384 TAILQ_INIT(&hio_done_list); 385 mtx_init(&hio_done_list_lock); 386 cv_init(&hio_done_list_cond); 387 mtx_init(&hio_guard_lock); 388 cv_init(&hio_guard_cond); 389 mtx_init(&metadata_lock); 390 391 /* 392 * Allocate requests pool and initialize requests. 393 */ 394 for (ii = 0; ii < HAST_HIO_MAX; ii++) { 395 hio = malloc(sizeof(*hio)); 396 if (hio == NULL) { 397 primary_exitx(EX_TEMPFAIL, 398 "Unable to allocate %zu bytes of memory for hio request.", 399 sizeof(*hio)); 400 } 401 hio->hio_countdown = 0; 402 hio->hio_errors = malloc(sizeof(hio->hio_errors[0]) * ncomps); 403 if (hio->hio_errors == NULL) { 404 primary_exitx(EX_TEMPFAIL, 405 "Unable allocate %zu bytes of memory for hio errors.", 406 sizeof(hio->hio_errors[0]) * ncomps); 407 } 408 hio->hio_next = malloc(sizeof(hio->hio_next[0]) * ncomps); 409 if (hio->hio_next == NULL) { 410 primary_exitx(EX_TEMPFAIL, 411 "Unable allocate %zu bytes of memory for hio_next field.", 412 sizeof(hio->hio_next[0]) * ncomps); 413 } 414 hio->hio_ggio.gctl_version = G_GATE_VERSION; 415 hio->hio_ggio.gctl_data = malloc(MAXPHYS); 416 if (hio->hio_ggio.gctl_data == NULL) { 417 primary_exitx(EX_TEMPFAIL, 418 "Unable to allocate %zu bytes of memory for gctl_data.", 419 MAXPHYS); 420 } 421 hio->hio_ggio.gctl_length = MAXPHYS; 422 hio->hio_ggio.gctl_error = 0; 423 TAILQ_INSERT_HEAD(&hio_free_list, hio, hio_free_next); 424 } 425 426 /* 427 * Turn on signals handling. 428 */ 429 signal(SIGINT, sighandler); 430 signal(SIGTERM, sighandler); 431 signal(SIGHUP, sighandler); 432} 433 434static void 435init_local(struct hast_resource *res) 436{ 437 unsigned char *buf; 438 size_t mapsize; 439 440 if (metadata_read(res, true) < 0) 441 exit(EX_NOINPUT); 442 mtx_init(&res->hr_amp_lock); 443 if (activemap_init(&res->hr_amp, res->hr_datasize, res->hr_extentsize, 444 res->hr_local_sectorsize, res->hr_keepdirty) < 0) { 445 primary_exit(EX_TEMPFAIL, "Unable to create activemap"); 446 } 447 mtx_init(&range_lock); 448 cv_init(&range_regular_cond); 449 if (rangelock_init(&range_regular) < 0) 450 primary_exit(EX_TEMPFAIL, "Unable to create regular range lock"); 451 cv_init(&range_sync_cond); 452 if (rangelock_init(&range_sync) < 0) 453 primary_exit(EX_TEMPFAIL, "Unable to create sync range lock"); 454 mapsize = activemap_ondisk_size(res->hr_amp); 455 buf = calloc(1, mapsize); 456 if (buf == NULL) { 457 primary_exitx(EX_TEMPFAIL, 458 "Unable to allocate buffer for activemap."); 459 } 460 if (pread(res->hr_localfd, buf, mapsize, METADATA_SIZE) != 461 (ssize_t)mapsize) { 462 primary_exit(EX_NOINPUT, "Unable to read activemap"); 463 } 464 activemap_copyin(res->hr_amp, buf, mapsize); 465 free(buf); 466 if (res->hr_resuid != 0) 467 return; 468 /* 469 * We're using provider for the first time, so we have to generate 470 * resource unique identifier and initialize local and remote counts. 471 */ 472 arc4random_buf(&res->hr_resuid, sizeof(res->hr_resuid)); 473 res->hr_primary_localcnt = 1; 474 res->hr_primary_remotecnt = 0; 475 if (metadata_write(res) < 0) 476 exit(EX_NOINPUT); 477} 478 479static bool 480init_remote(struct hast_resource *res, struct proto_conn **inp, 481 struct proto_conn **outp) 482{ 483 struct proto_conn *in, *out; 484 struct nv *nvout, *nvin; 485 const unsigned char *token; 486 unsigned char *map; 487 const char *errmsg; 488 int32_t extentsize; 489 int64_t datasize; 490 uint32_t mapsize; 491 size_t size; 492 493 assert((inp == NULL && outp == NULL) || (inp != NULL && outp != NULL)); 494 assert(real_remote(res)); 495 496 in = out = NULL; 497 498 /* Prepare outgoing connection with remote node. */ 499 if (proto_client(res->hr_remoteaddr, &out) < 0) { 500 primary_exit(EX_TEMPFAIL, "Unable to create connection to %s", 501 res->hr_remoteaddr); 502 } 503 /* Try to connect, but accept failure. */ 504 if (proto_connect(out) < 0) { 505 pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 506 res->hr_remoteaddr); 507 goto close; 508 } 509 /* Error in setting timeout is not critical, but why should it fail? */ 510 if (proto_timeout(out, res->hr_timeout) < 0) 511 pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 512 /* 513 * First handshake step. 514 * Setup outgoing connection with remote node. 515 */ 516 nvout = nv_alloc(); 517 nv_add_string(nvout, res->hr_name, "resource"); 518 if (nv_error(nvout) != 0) { 519 pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 520 "Unable to allocate header for connection with %s", 521 res->hr_remoteaddr); 522 nv_free(nvout); 523 goto close; 524 } 525 if (hast_proto_send(res, out, nvout, NULL, 0) < 0) { 526 pjdlog_errno(LOG_WARNING, 527 "Unable to send handshake header to %s", 528 res->hr_remoteaddr); 529 nv_free(nvout); 530 goto close; 531 } 532 nv_free(nvout); 533 if (hast_proto_recv_hdr(out, &nvin) < 0) { 534 pjdlog_errno(LOG_WARNING, 535 "Unable to receive handshake header from %s", 536 res->hr_remoteaddr); 537 goto close; 538 } 539 errmsg = nv_get_string(nvin, "errmsg"); 540 if (errmsg != NULL) { 541 pjdlog_warning("%s", errmsg); 542 nv_free(nvin); 543 goto close; 544 } 545 token = nv_get_uint8_array(nvin, &size, "token"); 546 if (token == NULL) { 547 pjdlog_warning("Handshake header from %s has no 'token' field.", 548 res->hr_remoteaddr); 549 nv_free(nvin); 550 goto close; 551 } 552 if (size != sizeof(res->hr_token)) { 553 pjdlog_warning("Handshake header from %s contains 'token' of wrong size (got %zu, expected %zu).", 554 res->hr_remoteaddr, size, sizeof(res->hr_token)); 555 nv_free(nvin); 556 goto close; 557 } 558 bcopy(token, res->hr_token, sizeof(res->hr_token)); 559 nv_free(nvin); 560 561 /* 562 * Second handshake step. 563 * Setup incoming connection with remote node. 564 */ 565 if (proto_client(res->hr_remoteaddr, &in) < 0) { 566 pjdlog_errno(LOG_WARNING, "Unable to create connection to %s", 567 res->hr_remoteaddr); 568 } 569 /* Try to connect, but accept failure. */ 570 if (proto_connect(in) < 0) { 571 pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 572 res->hr_remoteaddr); 573 goto close; 574 } 575 /* Error in setting timeout is not critical, but why should it fail? */ 576 if (proto_timeout(in, res->hr_timeout) < 0) 577 pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 578 nvout = nv_alloc(); 579 nv_add_string(nvout, res->hr_name, "resource"); 580 nv_add_uint8_array(nvout, res->hr_token, sizeof(res->hr_token), 581 "token"); 582 nv_add_uint64(nvout, res->hr_resuid, "resuid"); 583 nv_add_uint64(nvout, res->hr_primary_localcnt, "localcnt"); 584 nv_add_uint64(nvout, res->hr_primary_remotecnt, "remotecnt"); 585 if (nv_error(nvout) != 0) { 586 pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 587 "Unable to allocate header for connection with %s", 588 res->hr_remoteaddr); 589 nv_free(nvout); 590 goto close; 591 } 592 if (hast_proto_send(res, in, nvout, NULL, 0) < 0) { 593 pjdlog_errno(LOG_WARNING, 594 "Unable to send handshake header to %s", 595 res->hr_remoteaddr); 596 nv_free(nvout); 597 goto close; 598 } 599 nv_free(nvout); 600 if (hast_proto_recv_hdr(out, &nvin) < 0) { 601 pjdlog_errno(LOG_WARNING, 602 "Unable to receive handshake header from %s", 603 res->hr_remoteaddr); 604 goto close; 605 } 606 errmsg = nv_get_string(nvin, "errmsg"); 607 if (errmsg != NULL) { 608 pjdlog_warning("%s", errmsg); 609 nv_free(nvin); 610 goto close; 611 } 612 datasize = nv_get_int64(nvin, "datasize"); 613 if (datasize != res->hr_datasize) { 614 pjdlog_warning("Data size differs between nodes (local=%jd, remote=%jd).", 615 (intmax_t)res->hr_datasize, (intmax_t)datasize); 616 nv_free(nvin); 617 goto close; 618 } 619 extentsize = nv_get_int32(nvin, "extentsize"); 620 if (extentsize != res->hr_extentsize) { 621 pjdlog_warning("Extent size differs between nodes (local=%zd, remote=%zd).", 622 (ssize_t)res->hr_extentsize, (ssize_t)extentsize); 623 nv_free(nvin); 624 goto close; 625 } 626 res->hr_secondary_localcnt = nv_get_uint64(nvin, "localcnt"); 627 res->hr_secondary_remotecnt = nv_get_uint64(nvin, "remotecnt"); 628 res->hr_syncsrc = nv_get_uint8(nvin, "syncsrc"); 629 map = NULL; 630 mapsize = nv_get_uint32(nvin, "mapsize"); 631 if (mapsize > 0) { 632 map = malloc(mapsize); 633 if (map == NULL) { 634 pjdlog_error("Unable to allocate memory for remote activemap (mapsize=%ju).", 635 (uintmax_t)mapsize); 636 nv_free(nvin); 637 goto close; 638 } 639 /* 640 * Remote node have some dirty extents on its own, lets 641 * download its activemap. 642 */ 643 if (hast_proto_recv_data(res, out, nvin, map, 644 mapsize) < 0) { 645 pjdlog_errno(LOG_ERR, 646 "Unable to receive remote activemap"); 647 nv_free(nvin); 648 free(map); 649 goto close; 650 } 651 /* 652 * Merge local and remote bitmaps. 653 */ 654 activemap_merge(res->hr_amp, map, mapsize); 655 free(map); 656 /* 657 * Now that we merged bitmaps from both nodes, flush it to the 658 * disk before we start to synchronize. 659 */ 660 (void)hast_activemap_flush(res); 661 } 662 pjdlog_info("Connected to %s.", res->hr_remoteaddr); 663 if (inp != NULL && outp != NULL) { 664 *inp = in; 665 *outp = out; 666 } else { 667 res->hr_remotein = in; 668 res->hr_remoteout = out; 669 } 670 return (true); 671close: 672 proto_close(out); 673 if (in != NULL) 674 proto_close(in); 675 return (false); 676} 677 678static void 679sync_start(void) 680{ 681 682 mtx_lock(&sync_lock); 683 sync_inprogress = true; 684 mtx_unlock(&sync_lock); 685 cv_signal(&sync_cond); 686} 687 688static void 689sync_stop(void) 690{ 691 692 mtx_lock(&sync_lock); 693 if (sync_inprogress) 694 sync_inprogress = false; 695 mtx_unlock(&sync_lock); 696} 697 698static void 699init_ggate(struct hast_resource *res) 700{ 701 struct g_gate_ctl_create ggiocreate; 702 struct g_gate_ctl_cancel ggiocancel; 703 704 /* 705 * We communicate with ggate via /dev/ggctl. Open it. 706 */ 707 res->hr_ggatefd = open("/dev/" G_GATE_CTL_NAME, O_RDWR); 708 if (res->hr_ggatefd < 0) 709 primary_exit(EX_OSFILE, "Unable to open /dev/" G_GATE_CTL_NAME); 710 /* 711 * Create provider before trying to connect, as connection failure 712 * is not critical, but may take some time. 713 */ 714 ggiocreate.gctl_version = G_GATE_VERSION; 715 ggiocreate.gctl_mediasize = res->hr_datasize; 716 ggiocreate.gctl_sectorsize = res->hr_local_sectorsize; 717 ggiocreate.gctl_flags = 0; 718 ggiocreate.gctl_maxcount = G_GATE_MAX_QUEUE_SIZE; 719 ggiocreate.gctl_timeout = 0; 720 ggiocreate.gctl_unit = G_GATE_NAME_GIVEN; 721 snprintf(ggiocreate.gctl_name, sizeof(ggiocreate.gctl_name), "hast/%s", 722 res->hr_provname); 723 bzero(ggiocreate.gctl_info, sizeof(ggiocreate.gctl_info)); 724 if (ioctl(res->hr_ggatefd, G_GATE_CMD_CREATE, &ggiocreate) == 0) { 725 pjdlog_info("Device hast/%s created.", res->hr_provname); 726 res->hr_ggateunit = ggiocreate.gctl_unit; 727 return; 728 } 729 if (errno != EEXIST) { 730 primary_exit(EX_OSERR, "Unable to create hast/%s device", 731 res->hr_provname); 732 } 733 pjdlog_debug(1, 734 "Device hast/%s already exists, we will try to take it over.", 735 res->hr_provname); 736 /* 737 * If we received EEXIST, we assume that the process who created the 738 * provider died and didn't clean up. In that case we will start from 739 * where he left of. 740 */ 741 ggiocancel.gctl_version = G_GATE_VERSION; 742 ggiocancel.gctl_unit = G_GATE_NAME_GIVEN; 743 snprintf(ggiocancel.gctl_name, sizeof(ggiocancel.gctl_name), "hast/%s", 744 res->hr_provname); 745 if (ioctl(res->hr_ggatefd, G_GATE_CMD_CANCEL, &ggiocancel) == 0) { 746 pjdlog_info("Device hast/%s recovered.", res->hr_provname); 747 res->hr_ggateunit = ggiocancel.gctl_unit; 748 return; 749 } 750 primary_exit(EX_OSERR, "Unable to take over hast/%s device", 751 res->hr_provname); 752} 753 754void 755hastd_primary(struct hast_resource *res) 756{ 757 pthread_t td; 758 pid_t pid; 759 int error; 760 761 gres = res; 762 763 /* 764 * Create communication channel between parent and child. 765 */ 766 if (proto_client("socketpair://", &res->hr_ctrl) < 0) { 767 KEEP_ERRNO((void)pidfile_remove(pfh)); 768 primary_exit(EX_OSERR, 769 "Unable to create control sockets between parent and child"); 770 } 771 772 pid = fork(); 773 if (pid < 0) { 774 KEEP_ERRNO((void)pidfile_remove(pfh)); 775 primary_exit(EX_TEMPFAIL, "Unable to fork"); 776 } 777 778 if (pid > 0) { 779 /* This is parent. */ 780 res->hr_workerpid = pid; 781 return; 782 } 783 (void)pidfile_close(pfh); 784 785 setproctitle("%s (primary)", res->hr_name); 786 787 signal(SIGHUP, SIG_DFL); 788 signal(SIGCHLD, SIG_DFL); 789 790 init_local(res); 791 if (real_remote(res) && init_remote(res, NULL, NULL)) 792 sync_start(); 793 init_ggate(res); 794 init_environment(res); 795 error = pthread_create(&td, NULL, ggate_recv_thread, res); 796 assert(error == 0); 797 error = pthread_create(&td, NULL, local_send_thread, res); 798 assert(error == 0); 799 error = pthread_create(&td, NULL, remote_send_thread, res); 800 assert(error == 0); 801 error = pthread_create(&td, NULL, remote_recv_thread, res); 802 assert(error == 0); 803 error = pthread_create(&td, NULL, ggate_send_thread, res); 804 assert(error == 0); 805 error = pthread_create(&td, NULL, sync_thread, res); 806 assert(error == 0); 807 error = pthread_create(&td, NULL, ctrl_thread, res); 808 assert(error == 0); 809 (void)guard_thread(res); 810} 811 812static void 813reqlog(int loglevel, int debuglevel, struct g_gate_ctl_io *ggio, const char *fmt, ...) 814{ 815 char msg[1024]; 816 va_list ap; 817 int len; 818 819 va_start(ap, fmt); 820 len = vsnprintf(msg, sizeof(msg), fmt, ap); 821 va_end(ap); 822 if ((size_t)len < sizeof(msg)) { 823 switch (ggio->gctl_cmd) { 824 case BIO_READ: 825 (void)snprintf(msg + len, sizeof(msg) - len, 826 "READ(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 827 (uintmax_t)ggio->gctl_length); 828 break; 829 case BIO_DELETE: 830 (void)snprintf(msg + len, sizeof(msg) - len, 831 "DELETE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 832 (uintmax_t)ggio->gctl_length); 833 break; 834 case BIO_FLUSH: 835 (void)snprintf(msg + len, sizeof(msg) - len, "FLUSH."); 836 break; 837 case BIO_WRITE: 838 (void)snprintf(msg + len, sizeof(msg) - len, 839 "WRITE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 840 (uintmax_t)ggio->gctl_length); 841 break; 842 default: 843 (void)snprintf(msg + len, sizeof(msg) - len, 844 "UNKNOWN(%u).", (unsigned int)ggio->gctl_cmd); 845 break; 846 } 847 } 848 pjdlog_common(loglevel, debuglevel, -1, "%s", msg); 849} 850 851static void 852remote_close(struct hast_resource *res, int ncomp) 853{ 854 855 rw_wlock(&hio_remote_lock[ncomp]); 856 /* 857 * A race is possible between dropping rlock and acquiring wlock - 858 * another thread can close connection in-between. 859 */ 860 if (!ISCONNECTED(res, ncomp)) { 861 assert(res->hr_remotein == NULL); 862 assert(res->hr_remoteout == NULL); 863 rw_unlock(&hio_remote_lock[ncomp]); 864 return; 865 } 866 867 assert(res->hr_remotein != NULL); 868 assert(res->hr_remoteout != NULL); 869 870 pjdlog_debug(2, "Closing old incoming connection to %s.", 871 res->hr_remoteaddr); 872 proto_close(res->hr_remotein); 873 res->hr_remotein = NULL; 874 pjdlog_debug(2, "Closing old outgoing connection to %s.", 875 res->hr_remoteaddr); 876 proto_close(res->hr_remoteout); 877 res->hr_remoteout = NULL; 878 879 rw_unlock(&hio_remote_lock[ncomp]); 880 881 /* 882 * Stop synchronization if in-progress. 883 */ 884 sync_stop(); 885 886 /* 887 * Wake up guard thread, so it can immediately start reconnect. 888 */ 889 mtx_lock(&hio_guard_lock); 890 cv_signal(&hio_guard_cond); 891 mtx_unlock(&hio_guard_lock); 892} 893 894/* 895 * Thread receives ggate I/O requests from the kernel and passes them to 896 * appropriate threads: 897 * WRITE - always goes to both local_send and remote_send threads 898 * READ (when the block is up-to-date on local component) - 899 * only local_send thread 900 * READ (when the block isn't up-to-date on local component) - 901 * only remote_send thread 902 * DELETE - always goes to both local_send and remote_send threads 903 * FLUSH - always goes to both local_send and remote_send threads 904 */ 905static void * 906ggate_recv_thread(void *arg) 907{ 908 struct hast_resource *res = arg; 909 struct g_gate_ctl_io *ggio; 910 struct hio *hio; 911 unsigned int ii, ncomp, ncomps; 912 int error; 913 914 ncomps = HAST_NCOMPONENTS; 915 916 for (;;) { 917 pjdlog_debug(2, "ggate_recv: Taking free request."); 918 QUEUE_TAKE2(hio, free); 919 pjdlog_debug(2, "ggate_recv: (%p) Got free request.", hio); 920 ggio = &hio->hio_ggio; 921 ggio->gctl_unit = res->hr_ggateunit; 922 ggio->gctl_length = MAXPHYS; 923 ggio->gctl_error = 0; 924 pjdlog_debug(2, 925 "ggate_recv: (%p) Waiting for request from the kernel.", 926 hio); 927 if (ioctl(res->hr_ggatefd, G_GATE_CMD_START, ggio) < 0) { 928 if (sigexit_received) 929 pthread_exit(NULL); 930 primary_exit(EX_OSERR, "G_GATE_CMD_START failed"); 931 } 932 error = ggio->gctl_error; 933 switch (error) { 934 case 0: 935 break; 936 case ECANCELED: 937 /* Exit gracefully. */ 938 if (!sigexit_received) { 939 pjdlog_debug(2, 940 "ggate_recv: (%p) Received cancel from the kernel.", 941 hio); 942 pjdlog_info("Received cancel from the kernel, exiting."); 943 } 944 pthread_exit(NULL); 945 case ENOMEM: 946 /* 947 * Buffer too small? Impossible, we allocate MAXPHYS 948 * bytes - request can't be bigger than that. 949 */ 950 /* FALLTHROUGH */ 951 case ENXIO: 952 default: 953 primary_exitx(EX_OSERR, "G_GATE_CMD_START failed: %s.", 954 strerror(error)); 955 } 956 for (ii = 0; ii < ncomps; ii++) 957 hio->hio_errors[ii] = EINVAL; 958 reqlog(LOG_DEBUG, 2, ggio, 959 "ggate_recv: (%p) Request received from the kernel: ", 960 hio); 961 /* 962 * Inform all components about new write request. 963 * For read request prefer local component unless the given 964 * range is out-of-date, then use remote component. 965 */ 966 switch (ggio->gctl_cmd) { 967 case BIO_READ: 968 pjdlog_debug(2, 969 "ggate_recv: (%p) Moving request to the send queue.", 970 hio); 971 refcount_init(&hio->hio_countdown, 1); 972 mtx_lock(&metadata_lock); 973 if (res->hr_syncsrc == HAST_SYNCSRC_UNDEF || 974 res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 975 /* 976 * This range is up-to-date on local component, 977 * so handle request locally. 978 */ 979 /* Local component is 0 for now. */ 980 ncomp = 0; 981 } else /* if (res->hr_syncsrc == 982 HAST_SYNCSRC_SECONDARY) */ { 983 assert(res->hr_syncsrc == 984 HAST_SYNCSRC_SECONDARY); 985 /* 986 * This range is out-of-date on local component, 987 * so send request to the remote node. 988 */ 989 /* Remote component is 1 for now. */ 990 ncomp = 1; 991 } 992 mtx_unlock(&metadata_lock); 993 QUEUE_INSERT1(hio, send, ncomp); 994 break; 995 case BIO_WRITE: 996 for (;;) { 997 mtx_lock(&range_lock); 998 if (rangelock_islocked(range_sync, 999 ggio->gctl_offset, ggio->gctl_length)) { 1000 pjdlog_debug(2, 1001 "regular: Range offset=%jd length=%zu locked.", 1002 (intmax_t)ggio->gctl_offset, 1003 (size_t)ggio->gctl_length); 1004 range_regular_wait = true; 1005 cv_wait(&range_regular_cond, &range_lock); 1006 range_regular_wait = false; 1007 mtx_unlock(&range_lock); 1008 continue; 1009 } 1010 if (rangelock_add(range_regular, 1011 ggio->gctl_offset, ggio->gctl_length) < 0) { 1012 mtx_unlock(&range_lock); 1013 pjdlog_debug(2, 1014 "regular: Range offset=%jd length=%zu is already locked, waiting.", 1015 (intmax_t)ggio->gctl_offset, 1016 (size_t)ggio->gctl_length); 1017 sleep(1); 1018 continue; 1019 } 1020 mtx_unlock(&range_lock); 1021 break; 1022 } 1023 mtx_lock(&res->hr_amp_lock); 1024 if (activemap_write_start(res->hr_amp, 1025 ggio->gctl_offset, ggio->gctl_length)) { 1026 (void)hast_activemap_flush(res); 1027 } 1028 mtx_unlock(&res->hr_amp_lock); 1029 /* FALLTHROUGH */ 1030 case BIO_DELETE: 1031 case BIO_FLUSH: 1032 pjdlog_debug(2, 1033 "ggate_recv: (%p) Moving request to the send queues.", 1034 hio); 1035 refcount_init(&hio->hio_countdown, ncomps); 1036 for (ii = 0; ii < ncomps; ii++) 1037 QUEUE_INSERT1(hio, send, ii); 1038 break; 1039 } 1040 } 1041 /* NOTREACHED */ 1042 return (NULL); 1043} 1044 1045/* 1046 * Thread reads from or writes to local component. 1047 * If local read fails, it redirects it to remote_send thread. 1048 */ 1049static void * 1050local_send_thread(void *arg) 1051{ 1052 struct hast_resource *res = arg; 1053 struct g_gate_ctl_io *ggio; 1054 struct hio *hio; 1055 unsigned int ncomp, rncomp; 1056 ssize_t ret; 1057 1058 /* Local component is 0 for now. */ 1059 ncomp = 0; 1060 /* Remote component is 1 for now. */ 1061 rncomp = 1; 1062 1063 for (;;) { 1064 pjdlog_debug(2, "local_send: Taking request."); 1065 QUEUE_TAKE1(hio, send, ncomp); 1066 pjdlog_debug(2, "local_send: (%p) Got request.", hio); 1067 ggio = &hio->hio_ggio; 1068 switch (ggio->gctl_cmd) { 1069 case BIO_READ: 1070 ret = pread(res->hr_localfd, ggio->gctl_data, 1071 ggio->gctl_length, 1072 ggio->gctl_offset + res->hr_localoff); 1073 if (ret == ggio->gctl_length) 1074 hio->hio_errors[ncomp] = 0; 1075 else { 1076 /* 1077 * If READ failed, try to read from remote node. 1078 */ 1079 QUEUE_INSERT1(hio, send, rncomp); 1080 continue; 1081 } 1082 break; 1083 case BIO_WRITE: 1084 ret = pwrite(res->hr_localfd, ggio->gctl_data, 1085 ggio->gctl_length, 1086 ggio->gctl_offset + res->hr_localoff); 1087 if (ret < 0) 1088 hio->hio_errors[ncomp] = errno; 1089 else if (ret != ggio->gctl_length) 1090 hio->hio_errors[ncomp] = EIO; 1091 else 1092 hio->hio_errors[ncomp] = 0; 1093 break; 1094 case BIO_DELETE: 1095 ret = g_delete(res->hr_localfd, 1096 ggio->gctl_offset + res->hr_localoff, 1097 ggio->gctl_length); 1098 if (ret < 0) 1099 hio->hio_errors[ncomp] = errno; 1100 else 1101 hio->hio_errors[ncomp] = 0; 1102 break; 1103 case BIO_FLUSH: 1104 ret = g_flush(res->hr_localfd); 1105 if (ret < 0) 1106 hio->hio_errors[ncomp] = errno; 1107 else 1108 hio->hio_errors[ncomp] = 0; 1109 break; 1110 } 1111 if (refcount_release(&hio->hio_countdown)) { 1112 if (ISSYNCREQ(hio)) { 1113 mtx_lock(&sync_lock); 1114 SYNCREQDONE(hio); 1115 mtx_unlock(&sync_lock); 1116 cv_signal(&sync_cond); 1117 } else { 1118 pjdlog_debug(2, 1119 "local_send: (%p) Moving request to the done queue.", 1120 hio); 1121 QUEUE_INSERT2(hio, done); 1122 } 1123 } 1124 } 1125 /* NOTREACHED */ 1126 return (NULL); 1127} 1128 1129/* 1130 * Thread sends request to secondary node. 1131 */ 1132static void * 1133remote_send_thread(void *arg) 1134{ 1135 struct hast_resource *res = arg; 1136 struct g_gate_ctl_io *ggio; 1137 struct hio *hio; 1138 struct nv *nv; 1139 unsigned int ncomp; 1140 bool wakeup; 1141 uint64_t offset, length; 1142 uint8_t cmd; 1143 void *data; 1144 1145 /* Remote component is 1 for now. */ 1146 ncomp = 1; 1147 1148 for (;;) { 1149 pjdlog_debug(2, "remote_send: Taking request."); 1150 QUEUE_TAKE1(hio, send, ncomp); 1151 pjdlog_debug(2, "remote_send: (%p) Got request.", hio); 1152 ggio = &hio->hio_ggio; 1153 switch (ggio->gctl_cmd) { 1154 case BIO_READ: 1155 cmd = HIO_READ; 1156 data = NULL; 1157 offset = ggio->gctl_offset; 1158 length = ggio->gctl_length; 1159 break; 1160 case BIO_WRITE: 1161 cmd = HIO_WRITE; 1162 data = ggio->gctl_data; 1163 offset = ggio->gctl_offset; 1164 length = ggio->gctl_length; 1165 break; 1166 case BIO_DELETE: 1167 cmd = HIO_DELETE; 1168 data = NULL; 1169 offset = ggio->gctl_offset; 1170 length = ggio->gctl_length; 1171 break; 1172 case BIO_FLUSH: 1173 cmd = HIO_FLUSH; 1174 data = NULL; 1175 offset = 0; 1176 length = 0; 1177 break; 1178 default: 1179 assert(!"invalid condition"); 1180 abort(); 1181 } 1182 nv = nv_alloc(); 1183 nv_add_uint8(nv, cmd, "cmd"); 1184 nv_add_uint64(nv, (uint64_t)ggio->gctl_seq, "seq"); 1185 nv_add_uint64(nv, offset, "offset"); 1186 nv_add_uint64(nv, length, "length"); 1187 if (nv_error(nv) != 0) { 1188 hio->hio_errors[ncomp] = nv_error(nv); 1189 pjdlog_debug(2, 1190 "remote_send: (%p) Unable to prepare header to send.", 1191 hio); 1192 reqlog(LOG_ERR, 0, ggio, 1193 "Unable to prepare header to send (%s): ", 1194 strerror(nv_error(nv))); 1195 /* Move failed request immediately to the done queue. */ 1196 goto done_queue; 1197 } 1198 pjdlog_debug(2, 1199 "remote_send: (%p) Moving request to the recv queue.", 1200 hio); 1201 /* 1202 * Protect connection from disappearing. 1203 */ 1204 rw_rlock(&hio_remote_lock[ncomp]); 1205 if (!ISCONNECTED(res, ncomp)) { 1206 rw_unlock(&hio_remote_lock[ncomp]); 1207 hio->hio_errors[ncomp] = ENOTCONN; 1208 goto done_queue; 1209 } 1210 /* 1211 * Move the request to recv queue before sending it, because 1212 * in different order we can get reply before we move request 1213 * to recv queue. 1214 */ 1215 mtx_lock(&hio_recv_list_lock[ncomp]); 1216 wakeup = TAILQ_EMPTY(&hio_recv_list[ncomp]); 1217 TAILQ_INSERT_TAIL(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1218 mtx_unlock(&hio_recv_list_lock[ncomp]); 1219 if (hast_proto_send(res, res->hr_remoteout, nv, data, 1220 data != NULL ? length : 0) < 0) { 1221 hio->hio_errors[ncomp] = errno; 1222 rw_unlock(&hio_remote_lock[ncomp]); 1223 remote_close(res, ncomp); 1224 pjdlog_debug(2, 1225 "remote_send: (%p) Unable to send request.", hio); 1226 reqlog(LOG_ERR, 0, ggio, 1227 "Unable to send request (%s): ", 1228 strerror(hio->hio_errors[ncomp])); 1229 /* 1230 * Take request back from the receive queue and move 1231 * it immediately to the done queue. 1232 */ 1233 mtx_lock(&hio_recv_list_lock[ncomp]); 1234 TAILQ_REMOVE(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1235 mtx_unlock(&hio_recv_list_lock[ncomp]); 1236 goto done_queue; 1237 } 1238 rw_unlock(&hio_remote_lock[ncomp]); 1239 nv_free(nv); 1240 if (wakeup) 1241 cv_signal(&hio_recv_list_cond[ncomp]); 1242 continue; 1243done_queue: 1244 nv_free(nv); 1245 if (ISSYNCREQ(hio)) { 1246 if (!refcount_release(&hio->hio_countdown)) 1247 continue; 1248 mtx_lock(&sync_lock); 1249 SYNCREQDONE(hio); 1250 mtx_unlock(&sync_lock); 1251 cv_signal(&sync_cond); 1252 continue; 1253 } 1254 if (ggio->gctl_cmd == BIO_WRITE) { 1255 mtx_lock(&res->hr_amp_lock); 1256 if (activemap_need_sync(res->hr_amp, ggio->gctl_offset, 1257 ggio->gctl_length)) { 1258 (void)hast_activemap_flush(res); 1259 } 1260 mtx_unlock(&res->hr_amp_lock); 1261 } 1262 if (!refcount_release(&hio->hio_countdown)) 1263 continue; 1264 pjdlog_debug(2, 1265 "remote_send: (%p) Moving request to the done queue.", 1266 hio); 1267 QUEUE_INSERT2(hio, done); 1268 } 1269 /* NOTREACHED */ 1270 return (NULL); 1271} 1272 1273/* 1274 * Thread receives answer from secondary node and passes it to ggate_send 1275 * thread. 1276 */ 1277static void * 1278remote_recv_thread(void *arg) 1279{ 1280 struct hast_resource *res = arg; 1281 struct g_gate_ctl_io *ggio; 1282 struct hio *hio; 1283 struct nv *nv; 1284 unsigned int ncomp; 1285 uint64_t seq; 1286 int error; 1287 1288 /* Remote component is 1 for now. */ 1289 ncomp = 1; 1290 1291 for (;;) { 1292 /* Wait until there is anything to receive. */ 1293 mtx_lock(&hio_recv_list_lock[ncomp]); 1294 while (TAILQ_EMPTY(&hio_recv_list[ncomp])) { 1295 pjdlog_debug(2, "remote_recv: No requests, waiting."); 1296 cv_wait(&hio_recv_list_cond[ncomp], 1297 &hio_recv_list_lock[ncomp]); 1298 } 1299 mtx_unlock(&hio_recv_list_lock[ncomp]); 1300 rw_rlock(&hio_remote_lock[ncomp]); 1301 if (!ISCONNECTED(res, ncomp)) { 1302 rw_unlock(&hio_remote_lock[ncomp]); 1303 /* 1304 * Connection is dead, so move all pending requests to 1305 * the done queue (one-by-one). 1306 */ 1307 mtx_lock(&hio_recv_list_lock[ncomp]); 1308 hio = TAILQ_FIRST(&hio_recv_list[ncomp]); 1309 assert(hio != NULL); 1310 TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1311 hio_next[ncomp]); 1312 mtx_unlock(&hio_recv_list_lock[ncomp]); 1313 goto done_queue; 1314 } 1315 if (hast_proto_recv_hdr(res->hr_remotein, &nv) < 0) { 1316 pjdlog_errno(LOG_ERR, 1317 "Unable to receive reply header"); 1318 rw_unlock(&hio_remote_lock[ncomp]); 1319 remote_close(res, ncomp); 1320 continue; 1321 } 1322 rw_unlock(&hio_remote_lock[ncomp]); 1323 seq = nv_get_uint64(nv, "seq"); 1324 if (seq == 0) { 1325 pjdlog_error("Header contains no 'seq' field."); 1326 nv_free(nv); 1327 continue; 1328 } 1329 mtx_lock(&hio_recv_list_lock[ncomp]); 1330 TAILQ_FOREACH(hio, &hio_recv_list[ncomp], hio_next[ncomp]) { 1331 if (hio->hio_ggio.gctl_seq == seq) { 1332 TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1333 hio_next[ncomp]); 1334 break; 1335 } 1336 } 1337 mtx_unlock(&hio_recv_list_lock[ncomp]); 1338 if (hio == NULL) { 1339 pjdlog_error("Found no request matching received 'seq' field (%ju).", 1340 (uintmax_t)seq); 1341 nv_free(nv); 1342 continue; 1343 } 1344 error = nv_get_int16(nv, "error"); 1345 if (error != 0) { 1346 /* Request failed on remote side. */ 1347 hio->hio_errors[ncomp] = 0; 1348 nv_free(nv); 1349 goto done_queue; 1350 } 1351 ggio = &hio->hio_ggio; 1352 switch (ggio->gctl_cmd) { 1353 case BIO_READ: 1354 rw_rlock(&hio_remote_lock[ncomp]); 1355 if (!ISCONNECTED(res, ncomp)) { 1356 rw_unlock(&hio_remote_lock[ncomp]); 1357 nv_free(nv); 1358 goto done_queue; 1359 } 1360 if (hast_proto_recv_data(res, res->hr_remotein, nv, 1361 ggio->gctl_data, ggio->gctl_length) < 0) { 1362 hio->hio_errors[ncomp] = errno; 1363 pjdlog_errno(LOG_ERR, 1364 "Unable to receive reply data"); 1365 rw_unlock(&hio_remote_lock[ncomp]); 1366 nv_free(nv); 1367 remote_close(res, ncomp); 1368 goto done_queue; 1369 } 1370 rw_unlock(&hio_remote_lock[ncomp]); 1371 break; 1372 case BIO_WRITE: 1373 case BIO_DELETE: 1374 case BIO_FLUSH: 1375 break; 1376 default: 1377 assert(!"invalid condition"); 1378 abort(); 1379 } 1380 hio->hio_errors[ncomp] = 0; 1381 nv_free(nv); 1382done_queue: 1383 if (refcount_release(&hio->hio_countdown)) { 1384 if (ISSYNCREQ(hio)) { 1385 mtx_lock(&sync_lock); 1386 SYNCREQDONE(hio); 1387 mtx_unlock(&sync_lock); 1388 cv_signal(&sync_cond); 1389 } else { 1390 pjdlog_debug(2, 1391 "remote_recv: (%p) Moving request to the done queue.", 1392 hio); 1393 QUEUE_INSERT2(hio, done); 1394 } 1395 } 1396 } 1397 /* NOTREACHED */ 1398 return (NULL); 1399} 1400 1401/* 1402 * Thread sends answer to the kernel. 1403 */ 1404static void * 1405ggate_send_thread(void *arg) 1406{ 1407 struct hast_resource *res = arg; 1408 struct g_gate_ctl_io *ggio; 1409 struct hio *hio; 1410 unsigned int ii, ncomp, ncomps; 1411 1412 ncomps = HAST_NCOMPONENTS; 1413 1414 for (;;) { 1415 pjdlog_debug(2, "ggate_send: Taking request."); 1416 QUEUE_TAKE2(hio, done); 1417 pjdlog_debug(2, "ggate_send: (%p) Got request.", hio); 1418 ggio = &hio->hio_ggio; 1419 for (ii = 0; ii < ncomps; ii++) { 1420 if (hio->hio_errors[ii] == 0) { 1421 /* 1422 * One successful request is enough to declare 1423 * success. 1424 */ 1425 ggio->gctl_error = 0; 1426 break; 1427 } 1428 } 1429 if (ii == ncomps) { 1430 /* 1431 * None of the requests were successful. 1432 * Use first error. 1433 */ 1434 ggio->gctl_error = hio->hio_errors[0]; 1435 } 1436 if (ggio->gctl_error == 0 && ggio->gctl_cmd == BIO_WRITE) { 1437 mtx_lock(&res->hr_amp_lock); 1438 activemap_write_complete(res->hr_amp, 1439 ggio->gctl_offset, ggio->gctl_length); 1440 mtx_unlock(&res->hr_amp_lock); 1441 } 1442 if (ggio->gctl_cmd == BIO_WRITE) { 1443 /* 1444 * Unlock range we locked. 1445 */ 1446 mtx_lock(&range_lock); 1447 rangelock_del(range_regular, ggio->gctl_offset, 1448 ggio->gctl_length); 1449 if (range_sync_wait) 1450 cv_signal(&range_sync_cond); 1451 mtx_unlock(&range_lock); 1452 /* 1453 * Bump local count if this is first write after 1454 * connection failure with remote node. 1455 */ 1456 ncomp = 1; 1457 rw_rlock(&hio_remote_lock[ncomp]); 1458 if (!ISCONNECTED(res, ncomp)) { 1459 mtx_lock(&metadata_lock); 1460 if (res->hr_primary_localcnt == 1461 res->hr_secondary_remotecnt) { 1462 res->hr_primary_localcnt++; 1463 pjdlog_debug(1, 1464 "Increasing localcnt to %ju.", 1465 (uintmax_t)res->hr_primary_localcnt); 1466 (void)metadata_write(res); 1467 } 1468 mtx_unlock(&metadata_lock); 1469 } 1470 rw_unlock(&hio_remote_lock[ncomp]); 1471 } 1472 if (ioctl(res->hr_ggatefd, G_GATE_CMD_DONE, ggio) < 0) 1473 primary_exit(EX_OSERR, "G_GATE_CMD_DONE failed"); 1474 pjdlog_debug(2, 1475 "ggate_send: (%p) Moving request to the free queue.", hio); 1476 QUEUE_INSERT2(hio, free); 1477 } 1478 /* NOTREACHED */ 1479 return (NULL); 1480} 1481 1482/* 1483 * Thread synchronize local and remote components. 1484 */ 1485static void * 1486sync_thread(void *arg __unused) 1487{ 1488 struct hast_resource *res = arg; 1489 struct hio *hio; 1490 struct g_gate_ctl_io *ggio; 1491 unsigned int ii, ncomp, ncomps; 1492 off_t offset, length, synced; 1493 bool dorewind; 1494 int syncext; 1495 1496 ncomps = HAST_NCOMPONENTS; 1497 dorewind = true; 1498 synced = 0; 1499 1500 for (;;) { 1501 mtx_lock(&sync_lock); 1502 while (!sync_inprogress) { 1503 dorewind = true; 1504 synced = 0; 1505 cv_wait(&sync_cond, &sync_lock); 1506 } 1507 mtx_unlock(&sync_lock); 1508 /* 1509 * Obtain offset at which we should synchronize. 1510 * Rewind synchronization if needed. 1511 */ 1512 mtx_lock(&res->hr_amp_lock); 1513 if (dorewind) 1514 activemap_sync_rewind(res->hr_amp); 1515 offset = activemap_sync_offset(res->hr_amp, &length, &syncext); 1516 if (syncext != -1) { 1517 /* 1518 * We synchronized entire syncext extent, we can mark 1519 * it as clean now. 1520 */ 1521 if (activemap_extent_complete(res->hr_amp, syncext)) 1522 (void)hast_activemap_flush(res); 1523 } 1524 mtx_unlock(&res->hr_amp_lock); 1525 if (dorewind) { 1526 dorewind = false; 1527 if (offset < 0) 1528 pjdlog_info("Nodes are in sync."); 1529 else { 1530 pjdlog_info("Synchronization started. %ju bytes to go.", 1531 (uintmax_t)(res->hr_extentsize * 1532 activemap_ndirty(res->hr_amp))); 1533 } 1534 } 1535 if (offset < 0) { 1536 sync_stop(); 1537 pjdlog_debug(1, "Nothing to synchronize."); 1538 /* 1539 * Synchronization complete, make both localcnt and 1540 * remotecnt equal. 1541 */ 1542 ncomp = 1; 1543 rw_rlock(&hio_remote_lock[ncomp]); 1544 if (ISCONNECTED(res, ncomp)) { 1545 if (synced > 0) { 1546 pjdlog_info("Synchronization complete. " 1547 "%jd bytes synchronized.", 1548 (intmax_t)synced); 1549 } 1550 mtx_lock(&metadata_lock); 1551 res->hr_syncsrc = HAST_SYNCSRC_UNDEF; 1552 res->hr_primary_localcnt = 1553 res->hr_secondary_localcnt; 1554 res->hr_primary_remotecnt = 1555 res->hr_secondary_remotecnt; 1556 pjdlog_debug(1, 1557 "Setting localcnt to %ju and remotecnt to %ju.", 1558 (uintmax_t)res->hr_primary_localcnt, 1559 (uintmax_t)res->hr_secondary_localcnt); 1560 (void)metadata_write(res); 1561 mtx_unlock(&metadata_lock); 1562 } else if (synced > 0) { 1563 pjdlog_info("Synchronization interrupted. " 1564 "%jd bytes synchronized so far.", 1565 (intmax_t)synced); 1566 } 1567 rw_unlock(&hio_remote_lock[ncomp]); 1568 continue; 1569 } 1570 pjdlog_debug(2, "sync: Taking free request."); 1571 QUEUE_TAKE2(hio, free); 1572 pjdlog_debug(2, "sync: (%p) Got free request.", hio); 1573 /* 1574 * Lock the range we are going to synchronize. We don't want 1575 * race where someone writes between our read and write. 1576 */ 1577 for (;;) { 1578 mtx_lock(&range_lock); 1579 if (rangelock_islocked(range_regular, offset, length)) { 1580 pjdlog_debug(2, 1581 "sync: Range offset=%jd length=%jd locked.", 1582 (intmax_t)offset, (intmax_t)length); 1583 range_sync_wait = true; 1584 cv_wait(&range_sync_cond, &range_lock); 1585 range_sync_wait = false; 1586 mtx_unlock(&range_lock); 1587 continue; 1588 } 1589 if (rangelock_add(range_sync, offset, length) < 0) { 1590 mtx_unlock(&range_lock); 1591 pjdlog_debug(2, 1592 "sync: Range offset=%jd length=%jd is already locked, waiting.", 1593 (intmax_t)offset, (intmax_t)length); 1594 sleep(1); 1595 continue; 1596 } 1597 mtx_unlock(&range_lock); 1598 break; 1599 } 1600 /* 1601 * First read the data from synchronization source. 1602 */ 1603 SYNCREQ(hio); 1604 ggio = &hio->hio_ggio; 1605 ggio->gctl_cmd = BIO_READ; 1606 ggio->gctl_offset = offset; 1607 ggio->gctl_length = length; 1608 ggio->gctl_error = 0; 1609 for (ii = 0; ii < ncomps; ii++) 1610 hio->hio_errors[ii] = EINVAL; 1611 reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 1612 hio); 1613 pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 1614 hio); 1615 mtx_lock(&metadata_lock); 1616 if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1617 /* 1618 * This range is up-to-date on local component, 1619 * so handle request locally. 1620 */ 1621 /* Local component is 0 for now. */ 1622 ncomp = 0; 1623 } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 1624 assert(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 1625 /* 1626 * This range is out-of-date on local component, 1627 * so send request to the remote node. 1628 */ 1629 /* Remote component is 1 for now. */ 1630 ncomp = 1; 1631 } 1632 mtx_unlock(&metadata_lock); 1633 refcount_init(&hio->hio_countdown, 1); 1634 QUEUE_INSERT1(hio, send, ncomp); 1635 1636 /* 1637 * Let's wait for READ to finish. 1638 */ 1639 mtx_lock(&sync_lock); 1640 while (!ISSYNCREQDONE(hio)) 1641 cv_wait(&sync_cond, &sync_lock); 1642 mtx_unlock(&sync_lock); 1643 1644 if (hio->hio_errors[ncomp] != 0) { 1645 pjdlog_error("Unable to read synchronization data: %s.", 1646 strerror(hio->hio_errors[ncomp])); 1647 goto free_queue; 1648 } 1649 1650 /* 1651 * We read the data from synchronization source, now write it 1652 * to synchronization target. 1653 */ 1654 SYNCREQ(hio); 1655 ggio->gctl_cmd = BIO_WRITE; 1656 for (ii = 0; ii < ncomps; ii++) 1657 hio->hio_errors[ii] = EINVAL; 1658 reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 1659 hio); 1660 pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 1661 hio); 1662 mtx_lock(&metadata_lock); 1663 if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1664 /* 1665 * This range is up-to-date on local component, 1666 * so we update remote component. 1667 */ 1668 /* Remote component is 1 for now. */ 1669 ncomp = 1; 1670 } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 1671 assert(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 1672 /* 1673 * This range is out-of-date on local component, 1674 * so we update it. 1675 */ 1676 /* Local component is 0 for now. */ 1677 ncomp = 0; 1678 } 1679 mtx_unlock(&metadata_lock); 1680 1681 pjdlog_debug(2, "sync: (%p) Moving request to the send queues.", 1682 hio); 1683 refcount_init(&hio->hio_countdown, 1); 1684 QUEUE_INSERT1(hio, send, ncomp); 1685 1686 /* 1687 * Let's wait for WRITE to finish. 1688 */ 1689 mtx_lock(&sync_lock); 1690 while (!ISSYNCREQDONE(hio)) 1691 cv_wait(&sync_cond, &sync_lock); 1692 mtx_unlock(&sync_lock); 1693 1694 if (hio->hio_errors[ncomp] != 0) { 1695 pjdlog_error("Unable to write synchronization data: %s.", 1696 strerror(hio->hio_errors[ncomp])); 1697 goto free_queue; 1698 } 1699free_queue: 1700 mtx_lock(&range_lock); 1701 rangelock_del(range_sync, offset, length); 1702 if (range_regular_wait) 1703 cv_signal(&range_regular_cond); 1704 mtx_unlock(&range_lock); 1705 1706 synced += length; 1707 1708 pjdlog_debug(2, "sync: (%p) Moving request to the free queue.", 1709 hio); 1710 QUEUE_INSERT2(hio, free); 1711 } 1712 /* NOTREACHED */ 1713 return (NULL); 1714} 1715 1716static void 1717sighandler(int sig) 1718{ 1719 bool unlock; 1720 1721 switch (sig) { 1722 case SIGINT: 1723 case SIGTERM: 1724 sigexit_received = true; 1725 break; 1726 case SIGHUP: 1727 sighup_received = true; 1728 break; 1729 default: 1730 assert(!"invalid condition"); 1731 } 1732 /* 1733 * XXX: Racy, but if we cannot obtain hio_guard_lock here, we don't 1734 * want to risk deadlock. 1735 */ 1736 unlock = mtx_trylock(&hio_guard_lock); 1737 cv_signal(&hio_guard_cond); 1738 if (unlock) 1739 mtx_unlock(&hio_guard_lock); 1740} 1741 1742static void 1743config_reload(void) 1744{ 1745 struct hastd_config *newcfg; 1746 struct hast_resource *res; 1747 unsigned int ii, ncomps; 1748 int modified; 1749 1750 pjdlog_info("Reloading configuration..."); 1751 1752 ncomps = HAST_NCOMPONENTS; 1753 1754 newcfg = yy_config_parse(cfgpath, false); 1755 if (newcfg == NULL) 1756 goto failed; 1757 1758 TAILQ_FOREACH(res, &newcfg->hc_resources, hr_next) { 1759 if (strcmp(res->hr_name, gres->hr_name) == 0) 1760 break; 1761 } 1762 /* 1763 * If resource was removed from the configuration file, resource 1764 * name, provider name or path to local component was modified we 1765 * shouldn't be here. This means that someone modified configuration 1766 * file and send SIGHUP to us instead of main hastd process. 1767 * Log advice and ignore the signal. 1768 */ 1769 if (res == NULL || strcmp(gres->hr_name, res->hr_name) != 0 || 1770 strcmp(gres->hr_provname, res->hr_provname) != 0 || 1771 strcmp(gres->hr_localpath, res->hr_localpath) != 0) { 1772 pjdlog_warning("To reload configuration send SIGHUP to the main hastd process (pid %u).", 1773 (unsigned int)getppid()); 1774 goto failed; 1775 } 1776 1777#define MODIFIED_REMOTEADDR 0x1 1778#define MODIFIED_REPLICATION 0x2 1779#define MODIFIED_TIMEOUT 0x4 1780 modified = 0; 1781 if (strcmp(gres->hr_remoteaddr, res->hr_remoteaddr) != 0) { 1782 /* 1783 * Don't copy res->hr_remoteaddr to gres just yet. 1784 * We want remote_close() to log disconnect from the old 1785 * addresses, not from the new ones. 1786 */ 1787 modified |= MODIFIED_REMOTEADDR; 1788 } 1789 if (gres->hr_replication != res->hr_replication) { 1790 gres->hr_replication = res->hr_replication; 1791 modified |= MODIFIED_REPLICATION; 1792 } 1793 if (gres->hr_timeout != res->hr_timeout) { 1794 gres->hr_timeout = res->hr_timeout; 1795 modified |= MODIFIED_TIMEOUT; 1796 } 1797 /* 1798 * If only timeout was modified we only need to change it without 1799 * reconnecting. 1800 */ 1801 if (modified == MODIFIED_TIMEOUT) { 1802 for (ii = 0; ii < ncomps; ii++) { 1803 if (!ISREMOTE(ii)) 1804 continue; 1805 rw_rlock(&hio_remote_lock[ii]); 1806 if (!ISCONNECTED(gres, ii)) { 1807 rw_unlock(&hio_remote_lock[ii]); 1808 continue; 1809 } 1810 rw_unlock(&hio_remote_lock[ii]); 1811 if (proto_timeout(gres->hr_remotein, 1812 gres->hr_timeout) < 0) { 1813 pjdlog_errno(LOG_WARNING, 1814 "Unable to set connection timeout"); 1815 } 1816 if (proto_timeout(gres->hr_remoteout, 1817 gres->hr_timeout) < 0) { 1818 pjdlog_errno(LOG_WARNING, 1819 "Unable to set connection timeout"); 1820 } 1821 } 1822 } else { 1823 for (ii = 0; ii < ncomps; ii++) { 1824 if (!ISREMOTE(ii)) 1825 continue; 1826 remote_close(gres, ii); 1827 } 1828 if (modified & MODIFIED_REMOTEADDR) { 1829 strlcpy(gres->hr_remoteaddr, res->hr_remoteaddr, 1830 sizeof(gres->hr_remoteaddr)); 1831 } 1832 } 1833#undef MODIFIED_REMOTEADDR 1834#undef MODIFIED_REPLICATION 1835#undef MODIFIED_TIMEOUT 1836 1837 pjdlog_info("Configuration reloaded successfully."); 1838 return; 1839failed: 1840 if (newcfg != NULL) { 1841 if (newcfg->hc_controlconn != NULL) 1842 proto_close(newcfg->hc_controlconn); 1843 if (newcfg->hc_listenconn != NULL) 1844 proto_close(newcfg->hc_listenconn); 1845 yy_config_free(newcfg); 1846 } 1847 pjdlog_warning("Configuration not reloaded."); 1848} 1849 1850/* 1851 * Thread guards remote connections and reconnects when needed, handles 1852 * signals, etc. 1853 */ 1854static void * 1855guard_thread(void *arg) 1856{ 1857 struct hast_resource *res = arg; 1858 struct proto_conn *in, *out; 1859 unsigned int ii, ncomps; 1860 int timeout; 1861 1862 ncomps = HAST_NCOMPONENTS; 1863 1864 for (;;) { 1865 if (sigexit_received) { 1866 primary_exitx(EX_OK, 1867 "Termination signal received, exiting."); 1868 } 1869 if (sighup_received) { 1870 sighup_received = false; 1871 config_reload(); 1872 } 1873 /* 1874 * If all the connection will be fine, we will sleep until 1875 * someone wakes us up. 1876 * If any of the connections will be broken and we won't be 1877 * able to connect, we will sleep only for RECONNECT_SLEEP 1878 * seconds so we can retry soon. 1879 */ 1880 timeout = 0; 1881 pjdlog_debug(2, "remote_guard: Checking connections."); 1882 mtx_lock(&hio_guard_lock); 1883 for (ii = 0; ii < ncomps; ii++) { 1884 if (!ISREMOTE(ii)) 1885 continue; 1886 rw_rlock(&hio_remote_lock[ii]); 1887 if (ISCONNECTED(res, ii)) { 1888 assert(res->hr_remotein != NULL); 1889 assert(res->hr_remoteout != NULL); 1890 rw_unlock(&hio_remote_lock[ii]); 1891 pjdlog_debug(2, 1892 "remote_guard: Connection to %s is ok.", 1893 res->hr_remoteaddr); 1894 } else if (real_remote(res)) { 1895 assert(res->hr_remotein == NULL); 1896 assert(res->hr_remoteout == NULL); 1897 /* 1898 * Upgrade the lock. It doesn't have to be 1899 * atomic as no other thread can change 1900 * connection status from disconnected to 1901 * connected. 1902 */ 1903 rw_unlock(&hio_remote_lock[ii]); 1904 pjdlog_debug(2, 1905 "remote_guard: Reconnecting to %s.", 1906 res->hr_remoteaddr); 1907 in = out = NULL; 1908 if (init_remote(res, &in, &out)) { 1909 rw_wlock(&hio_remote_lock[ii]); 1910 assert(res->hr_remotein == NULL); 1911 assert(res->hr_remoteout == NULL); 1912 assert(in != NULL && out != NULL); 1913 res->hr_remotein = in; 1914 res->hr_remoteout = out; 1915 rw_unlock(&hio_remote_lock[ii]); 1916 pjdlog_info("Successfully reconnected to %s.", 1917 res->hr_remoteaddr); 1918 sync_start(); 1919 } else { 1920 /* Both connections should be NULL. */ 1921 assert(res->hr_remotein == NULL); 1922 assert(res->hr_remoteout == NULL); 1923 assert(in == NULL && out == NULL); 1924 pjdlog_debug(2, 1925 "remote_guard: Reconnect to %s failed.", 1926 res->hr_remoteaddr); 1927 timeout = RECONNECT_SLEEP; 1928 } 1929 } else { 1930 rw_unlock(&hio_remote_lock[ii]); 1931 } 1932 } 1933 (void)cv_timedwait(&hio_guard_cond, &hio_guard_lock, timeout); 1934 mtx_unlock(&hio_guard_lock); 1935 } 1936 /* NOTREACHED */ 1937 return (NULL); 1938} 1939