primary.c revision 257470
1204076Spjd/*- 2204076Spjd * Copyright (c) 2009 The FreeBSD Foundation 3219351Spjd * Copyright (c) 2010-2011 Pawel Jakub Dawidek <pawel@dawidek.net> 4204076Spjd * All rights reserved. 5204076Spjd * 6204076Spjd * This software was developed by Pawel Jakub Dawidek under sponsorship from 7204076Spjd * the FreeBSD Foundation. 8204076Spjd * 9204076Spjd * Redistribution and use in source and binary forms, with or without 10204076Spjd * modification, are permitted provided that the following conditions 11204076Spjd * are met: 12204076Spjd * 1. Redistributions of source code must retain the above copyright 13204076Spjd * notice, this list of conditions and the following disclaimer. 14204076Spjd * 2. Redistributions in binary form must reproduce the above copyright 15204076Spjd * notice, this list of conditions and the following disclaimer in the 16204076Spjd * documentation and/or other materials provided with the distribution. 17204076Spjd * 18204076Spjd * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 19204076Spjd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20204076Spjd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21204076Spjd * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 22204076Spjd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23204076Spjd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24204076Spjd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25204076Spjd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26204076Spjd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27204076Spjd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28204076Spjd * SUCH DAMAGE. 29204076Spjd */ 30204076Spjd 31204076Spjd#include <sys/cdefs.h> 32204076Spjd__FBSDID("$FreeBSD: stable/9/sbin/hastd/primary.c 257470 2013-10-31 20:31:16Z trociny $"); 33204076Spjd 34204076Spjd#include <sys/types.h> 35204076Spjd#include <sys/time.h> 36204076Spjd#include <sys/bio.h> 37204076Spjd#include <sys/disk.h> 38204076Spjd#include <sys/stat.h> 39204076Spjd 40204076Spjd#include <geom/gate/g_gate.h> 41204076Spjd 42204076Spjd#include <err.h> 43204076Spjd#include <errno.h> 44204076Spjd#include <fcntl.h> 45204076Spjd#include <libgeom.h> 46204076Spjd#include <pthread.h> 47211982Spjd#include <signal.h> 48204076Spjd#include <stdint.h> 49204076Spjd#include <stdio.h> 50204076Spjd#include <string.h> 51204076Spjd#include <sysexits.h> 52204076Spjd#include <unistd.h> 53204076Spjd 54204076Spjd#include <activemap.h> 55204076Spjd#include <nv.h> 56204076Spjd#include <rangelock.h> 57204076Spjd 58204076Spjd#include "control.h" 59212038Spjd#include "event.h" 60204076Spjd#include "hast.h" 61204076Spjd#include "hast_proto.h" 62204076Spjd#include "hastd.h" 63211886Spjd#include "hooks.h" 64204076Spjd#include "metadata.h" 65204076Spjd#include "proto.h" 66204076Spjd#include "pjdlog.h" 67249236Strociny#include "refcnt.h" 68204076Spjd#include "subr.h" 69204076Spjd#include "synch.h" 70204076Spjd 71210886Spjd/* The is only one remote component for now. */ 72210886Spjd#define ISREMOTE(no) ((no) == 1) 73210886Spjd 74204076Spjdstruct hio { 75204076Spjd /* 76204076Spjd * Number of components we are still waiting for. 77204076Spjd * When this field goes to 0, we can send the request back to the 78204076Spjd * kernel. Each component has to decrease this counter by one 79204076Spjd * even on failure. 80204076Spjd */ 81204076Spjd unsigned int hio_countdown; 82204076Spjd /* 83204076Spjd * Each component has a place to store its own error. 84204076Spjd * Once the request is handled by all components we can decide if the 85204076Spjd * request overall is successful or not. 86204076Spjd */ 87204076Spjd int *hio_errors; 88204076Spjd /* 89219818Spjd * Structure used to communicate with GEOM Gate class. 90204076Spjd */ 91204076Spjd struct g_gate_ctl_io hio_ggio; 92229509Strociny /* 93229509Strociny * Request was already confirmed to GEOM Gate. 94229509Strociny */ 95229509Strociny bool hio_done; 96229509Strociny /* 97229509Strociny * Remember replication from the time the request was initiated, 98229509Strociny * so we won't get confused when replication changes on reload. 99229509Strociny */ 100229509Strociny int hio_replication; 101204076Spjd TAILQ_ENTRY(hio) *hio_next; 102204076Spjd}; 103204076Spjd#define hio_free_next hio_next[0] 104204076Spjd#define hio_done_next hio_next[0] 105204076Spjd 106204076Spjd/* 107204076Spjd * Free list holds unused structures. When free list is empty, we have to wait 108204076Spjd * until some in-progress requests are freed. 109204076Spjd */ 110204076Spjdstatic TAILQ_HEAD(, hio) hio_free_list; 111204076Spjdstatic pthread_mutex_t hio_free_list_lock; 112204076Spjdstatic pthread_cond_t hio_free_list_cond; 113204076Spjd/* 114204076Spjd * There is one send list for every component. One requests is placed on all 115204076Spjd * send lists - each component gets the same request, but each component is 116204076Spjd * responsible for managing his own send list. 117204076Spjd */ 118204076Spjdstatic TAILQ_HEAD(, hio) *hio_send_list; 119204076Spjdstatic pthread_mutex_t *hio_send_list_lock; 120204076Spjdstatic pthread_cond_t *hio_send_list_cond; 121204076Spjd/* 122204076Spjd * There is one recv list for every component, although local components don't 123204076Spjd * use recv lists as local requests are done synchronously. 124204076Spjd */ 125204076Spjdstatic TAILQ_HEAD(, hio) *hio_recv_list; 126204076Spjdstatic pthread_mutex_t *hio_recv_list_lock; 127204076Spjdstatic pthread_cond_t *hio_recv_list_cond; 128204076Spjd/* 129204076Spjd * Request is placed on done list by the slowest component (the one that 130204076Spjd * decreased hio_countdown from 1 to 0). 131204076Spjd */ 132204076Spjdstatic TAILQ_HEAD(, hio) hio_done_list; 133204076Spjdstatic pthread_mutex_t hio_done_list_lock; 134204076Spjdstatic pthread_cond_t hio_done_list_cond; 135204076Spjd/* 136204076Spjd * Structure below are for interaction with sync thread. 137204076Spjd */ 138204076Spjdstatic bool sync_inprogress; 139204076Spjdstatic pthread_mutex_t sync_lock; 140204076Spjdstatic pthread_cond_t sync_cond; 141204076Spjd/* 142204076Spjd * The lock below allows to synchornize access to remote connections. 143204076Spjd */ 144204076Spjdstatic pthread_rwlock_t *hio_remote_lock; 145204076Spjd 146204076Spjd/* 147204076Spjd * Lock to synchronize metadata updates. Also synchronize access to 148204076Spjd * hr_primary_localcnt and hr_primary_remotecnt fields. 149204076Spjd */ 150204076Spjdstatic pthread_mutex_t metadata_lock; 151204076Spjd 152204076Spjd/* 153204076Spjd * Maximum number of outstanding I/O requests. 154204076Spjd */ 155204076Spjd#define HAST_HIO_MAX 256 156204076Spjd/* 157204076Spjd * Number of components. At this point there are only two components: local 158204076Spjd * and remote, but in the future it might be possible to use multiple local 159204076Spjd * and remote components. 160204076Spjd */ 161204076Spjd#define HAST_NCOMPONENTS 2 162204076Spjd 163204076Spjd#define ISCONNECTED(res, no) \ 164204076Spjd ((res)->hr_remotein != NULL && (res)->hr_remoteout != NULL) 165204076Spjd 166204076Spjd#define QUEUE_INSERT1(hio, name, ncomp) do { \ 167204076Spjd bool _wakeup; \ 168204076Spjd \ 169204076Spjd mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 170204076Spjd _wakeup = TAILQ_EMPTY(&hio_##name##_list[(ncomp)]); \ 171204076Spjd TAILQ_INSERT_TAIL(&hio_##name##_list[(ncomp)], (hio), \ 172204076Spjd hio_next[(ncomp)]); \ 173204076Spjd mtx_unlock(&hio_##name##_list_lock[ncomp]); \ 174204076Spjd if (_wakeup) \ 175256027Strociny cv_broadcast(&hio_##name##_list_cond[(ncomp)]); \ 176204076Spjd} while (0) 177204076Spjd#define QUEUE_INSERT2(hio, name) do { \ 178204076Spjd bool _wakeup; \ 179204076Spjd \ 180204076Spjd mtx_lock(&hio_##name##_list_lock); \ 181204076Spjd _wakeup = TAILQ_EMPTY(&hio_##name##_list); \ 182204076Spjd TAILQ_INSERT_TAIL(&hio_##name##_list, (hio), hio_##name##_next);\ 183204076Spjd mtx_unlock(&hio_##name##_list_lock); \ 184204076Spjd if (_wakeup) \ 185256027Strociny cv_broadcast(&hio_##name##_list_cond); \ 186204076Spjd} while (0) 187214692Spjd#define QUEUE_TAKE1(hio, name, ncomp, timeout) do { \ 188214692Spjd bool _last; \ 189214692Spjd \ 190204076Spjd mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 191214692Spjd _last = false; \ 192214692Spjd while (((hio) = TAILQ_FIRST(&hio_##name##_list[(ncomp)])) == NULL && !_last) { \ 193214692Spjd cv_timedwait(&hio_##name##_list_cond[(ncomp)], \ 194214692Spjd &hio_##name##_list_lock[(ncomp)], (timeout)); \ 195219864Spjd if ((timeout) != 0) \ 196214692Spjd _last = true; \ 197204076Spjd } \ 198214692Spjd if (hio != NULL) { \ 199214692Spjd TAILQ_REMOVE(&hio_##name##_list[(ncomp)], (hio), \ 200214692Spjd hio_next[(ncomp)]); \ 201214692Spjd } \ 202204076Spjd mtx_unlock(&hio_##name##_list_lock[(ncomp)]); \ 203204076Spjd} while (0) 204204076Spjd#define QUEUE_TAKE2(hio, name) do { \ 205204076Spjd mtx_lock(&hio_##name##_list_lock); \ 206204076Spjd while (((hio) = TAILQ_FIRST(&hio_##name##_list)) == NULL) { \ 207204076Spjd cv_wait(&hio_##name##_list_cond, \ 208204076Spjd &hio_##name##_list_lock); \ 209204076Spjd } \ 210204076Spjd TAILQ_REMOVE(&hio_##name##_list, (hio), hio_##name##_next); \ 211204076Spjd mtx_unlock(&hio_##name##_list_lock); \ 212204076Spjd} while (0) 213204076Spjd 214209183Spjd#define SYNCREQ(hio) do { \ 215209183Spjd (hio)->hio_ggio.gctl_unit = -1; \ 216209183Spjd (hio)->hio_ggio.gctl_seq = 1; \ 217209183Spjd} while (0) 218204076Spjd#define ISSYNCREQ(hio) ((hio)->hio_ggio.gctl_unit == -1) 219204076Spjd#define SYNCREQDONE(hio) do { (hio)->hio_ggio.gctl_unit = -2; } while (0) 220204076Spjd#define ISSYNCREQDONE(hio) ((hio)->hio_ggio.gctl_unit == -2) 221204076Spjd 222204076Spjdstatic struct hast_resource *gres; 223204076Spjd 224204076Spjdstatic pthread_mutex_t range_lock; 225204076Spjdstatic struct rangelocks *range_regular; 226204076Spjdstatic bool range_regular_wait; 227204076Spjdstatic pthread_cond_t range_regular_cond; 228204076Spjdstatic struct rangelocks *range_sync; 229204076Spjdstatic bool range_sync_wait; 230204076Spjdstatic pthread_cond_t range_sync_cond; 231220898Spjdstatic bool fullystarted; 232204076Spjd 233204076Spjdstatic void *ggate_recv_thread(void *arg); 234204076Spjdstatic void *local_send_thread(void *arg); 235204076Spjdstatic void *remote_send_thread(void *arg); 236204076Spjdstatic void *remote_recv_thread(void *arg); 237204076Spjdstatic void *ggate_send_thread(void *arg); 238204076Spjdstatic void *sync_thread(void *arg); 239204076Spjdstatic void *guard_thread(void *arg); 240204076Spjd 241211982Spjdstatic void 242204076Spjdcleanup(struct hast_resource *res) 243204076Spjd{ 244204076Spjd int rerrno; 245204076Spjd 246204076Spjd /* Remember errno. */ 247204076Spjd rerrno = errno; 248204076Spjd 249204076Spjd /* Destroy ggate provider if we created one. */ 250204076Spjd if (res->hr_ggateunit >= 0) { 251204076Spjd struct g_gate_ctl_destroy ggiod; 252204076Spjd 253213533Spjd bzero(&ggiod, sizeof(ggiod)); 254204076Spjd ggiod.gctl_version = G_GATE_VERSION; 255204076Spjd ggiod.gctl_unit = res->hr_ggateunit; 256204076Spjd ggiod.gctl_force = 1; 257231017Strociny if (ioctl(res->hr_ggatefd, G_GATE_CMD_DESTROY, &ggiod) == -1) { 258213531Spjd pjdlog_errno(LOG_WARNING, 259213531Spjd "Unable to destroy hast/%s device", 260204076Spjd res->hr_provname); 261204076Spjd } 262204076Spjd res->hr_ggateunit = -1; 263204076Spjd } 264204076Spjd 265204076Spjd /* Restore errno. */ 266204076Spjd errno = rerrno; 267204076Spjd} 268204076Spjd 269212899Spjdstatic __dead2 void 270204076Spjdprimary_exit(int exitcode, const char *fmt, ...) 271204076Spjd{ 272204076Spjd va_list ap; 273204076Spjd 274218138Spjd PJDLOG_ASSERT(exitcode != EX_OK); 275204076Spjd va_start(ap, fmt); 276204076Spjd pjdlogv_errno(LOG_ERR, fmt, ap); 277204076Spjd va_end(ap); 278204076Spjd cleanup(gres); 279204076Spjd exit(exitcode); 280204076Spjd} 281204076Spjd 282212899Spjdstatic __dead2 void 283204076Spjdprimary_exitx(int exitcode, const char *fmt, ...) 284204076Spjd{ 285204076Spjd va_list ap; 286204076Spjd 287204076Spjd va_start(ap, fmt); 288204076Spjd pjdlogv(exitcode == EX_OK ? LOG_INFO : LOG_ERR, fmt, ap); 289204076Spjd va_end(ap); 290204076Spjd cleanup(gres); 291204076Spjd exit(exitcode); 292204076Spjd} 293204076Spjd 294256027Strociny/* Expects res->hr_amp locked, returns unlocked. */ 295204076Spjdstatic int 296204076Spjdhast_activemap_flush(struct hast_resource *res) 297204076Spjd{ 298204076Spjd const unsigned char *buf; 299204076Spjd size_t size; 300256027Strociny int ret; 301204076Spjd 302256027Strociny mtx_lock(&res->hr_amp_diskmap_lock); 303204076Spjd buf = activemap_bitmap(res->hr_amp, &size); 304256027Strociny mtx_unlock(&res->hr_amp_lock); 305218138Spjd PJDLOG_ASSERT(buf != NULL); 306218138Spjd PJDLOG_ASSERT((size % res->hr_local_sectorsize) == 0); 307256027Strociny ret = 0; 308204076Spjd if (pwrite(res->hr_localfd, buf, size, METADATA_SIZE) != 309204076Spjd (ssize_t)size) { 310229509Strociny pjdlog_errno(LOG_ERR, "Unable to flush activemap to disk"); 311247866Strociny res->hr_stat_activemap_write_error++; 312256027Strociny ret = -1; 313204076Spjd } 314256027Strociny if (ret == 0 && res->hr_metaflush == 1 && 315256027Strociny g_flush(res->hr_localfd) == -1) { 316229509Strociny if (errno == EOPNOTSUPP) { 317229509Strociny pjdlog_warning("The %s provider doesn't support flushing write cache. Disabling it.", 318229509Strociny res->hr_localpath); 319229509Strociny res->hr_metaflush = 0; 320229509Strociny } else { 321229509Strociny pjdlog_errno(LOG_ERR, 322229509Strociny "Unable to flush disk cache on activemap update"); 323247866Strociny res->hr_stat_activemap_flush_error++; 324256027Strociny ret = -1; 325229509Strociny } 326229509Strociny } 327256027Strociny mtx_unlock(&res->hr_amp_diskmap_lock); 328256027Strociny return (ret); 329204076Spjd} 330204076Spjd 331210881Spjdstatic bool 332210881Spjdreal_remote(const struct hast_resource *res) 333210881Spjd{ 334210881Spjd 335210881Spjd return (strcmp(res->hr_remoteaddr, "none") != 0); 336210881Spjd} 337210881Spjd 338204076Spjdstatic void 339204076Spjdinit_environment(struct hast_resource *res __unused) 340204076Spjd{ 341204076Spjd struct hio *hio; 342204076Spjd unsigned int ii, ncomps; 343204076Spjd 344204076Spjd /* 345204076Spjd * In the future it might be per-resource value. 346204076Spjd */ 347204076Spjd ncomps = HAST_NCOMPONENTS; 348204076Spjd 349204076Spjd /* 350204076Spjd * Allocate memory needed by lists. 351204076Spjd */ 352204076Spjd hio_send_list = malloc(sizeof(hio_send_list[0]) * ncomps); 353204076Spjd if (hio_send_list == NULL) { 354204076Spjd primary_exitx(EX_TEMPFAIL, 355204076Spjd "Unable to allocate %zu bytes of memory for send lists.", 356204076Spjd sizeof(hio_send_list[0]) * ncomps); 357204076Spjd } 358204076Spjd hio_send_list_lock = malloc(sizeof(hio_send_list_lock[0]) * ncomps); 359204076Spjd if (hio_send_list_lock == NULL) { 360204076Spjd primary_exitx(EX_TEMPFAIL, 361204076Spjd "Unable to allocate %zu bytes of memory for send list locks.", 362204076Spjd sizeof(hio_send_list_lock[0]) * ncomps); 363204076Spjd } 364204076Spjd hio_send_list_cond = malloc(sizeof(hio_send_list_cond[0]) * ncomps); 365204076Spjd if (hio_send_list_cond == NULL) { 366204076Spjd primary_exitx(EX_TEMPFAIL, 367204076Spjd "Unable to allocate %zu bytes of memory for send list condition variables.", 368204076Spjd sizeof(hio_send_list_cond[0]) * ncomps); 369204076Spjd } 370204076Spjd hio_recv_list = malloc(sizeof(hio_recv_list[0]) * ncomps); 371204076Spjd if (hio_recv_list == NULL) { 372204076Spjd primary_exitx(EX_TEMPFAIL, 373204076Spjd "Unable to allocate %zu bytes of memory for recv lists.", 374204076Spjd sizeof(hio_recv_list[0]) * ncomps); 375204076Spjd } 376204076Spjd hio_recv_list_lock = malloc(sizeof(hio_recv_list_lock[0]) * ncomps); 377204076Spjd if (hio_recv_list_lock == NULL) { 378204076Spjd primary_exitx(EX_TEMPFAIL, 379204076Spjd "Unable to allocate %zu bytes of memory for recv list locks.", 380204076Spjd sizeof(hio_recv_list_lock[0]) * ncomps); 381204076Spjd } 382204076Spjd hio_recv_list_cond = malloc(sizeof(hio_recv_list_cond[0]) * ncomps); 383204076Spjd if (hio_recv_list_cond == NULL) { 384204076Spjd primary_exitx(EX_TEMPFAIL, 385204076Spjd "Unable to allocate %zu bytes of memory for recv list condition variables.", 386204076Spjd sizeof(hio_recv_list_cond[0]) * ncomps); 387204076Spjd } 388204076Spjd hio_remote_lock = malloc(sizeof(hio_remote_lock[0]) * ncomps); 389204076Spjd if (hio_remote_lock == NULL) { 390204076Spjd primary_exitx(EX_TEMPFAIL, 391204076Spjd "Unable to allocate %zu bytes of memory for remote connections locks.", 392204076Spjd sizeof(hio_remote_lock[0]) * ncomps); 393204076Spjd } 394204076Spjd 395204076Spjd /* 396204076Spjd * Initialize lists, their locks and theirs condition variables. 397204076Spjd */ 398204076Spjd TAILQ_INIT(&hio_free_list); 399204076Spjd mtx_init(&hio_free_list_lock); 400204076Spjd cv_init(&hio_free_list_cond); 401204076Spjd for (ii = 0; ii < HAST_NCOMPONENTS; ii++) { 402204076Spjd TAILQ_INIT(&hio_send_list[ii]); 403204076Spjd mtx_init(&hio_send_list_lock[ii]); 404204076Spjd cv_init(&hio_send_list_cond[ii]); 405204076Spjd TAILQ_INIT(&hio_recv_list[ii]); 406204076Spjd mtx_init(&hio_recv_list_lock[ii]); 407204076Spjd cv_init(&hio_recv_list_cond[ii]); 408204076Spjd rw_init(&hio_remote_lock[ii]); 409204076Spjd } 410204076Spjd TAILQ_INIT(&hio_done_list); 411204076Spjd mtx_init(&hio_done_list_lock); 412204076Spjd cv_init(&hio_done_list_cond); 413204076Spjd mtx_init(&metadata_lock); 414204076Spjd 415204076Spjd /* 416204076Spjd * Allocate requests pool and initialize requests. 417204076Spjd */ 418204076Spjd for (ii = 0; ii < HAST_HIO_MAX; ii++) { 419204076Spjd hio = malloc(sizeof(*hio)); 420204076Spjd if (hio == NULL) { 421204076Spjd primary_exitx(EX_TEMPFAIL, 422204076Spjd "Unable to allocate %zu bytes of memory for hio request.", 423204076Spjd sizeof(*hio)); 424204076Spjd } 425204076Spjd hio->hio_countdown = 0; 426204076Spjd hio->hio_errors = malloc(sizeof(hio->hio_errors[0]) * ncomps); 427204076Spjd if (hio->hio_errors == NULL) { 428204076Spjd primary_exitx(EX_TEMPFAIL, 429204076Spjd "Unable allocate %zu bytes of memory for hio errors.", 430204076Spjd sizeof(hio->hio_errors[0]) * ncomps); 431204076Spjd } 432204076Spjd hio->hio_next = malloc(sizeof(hio->hio_next[0]) * ncomps); 433204076Spjd if (hio->hio_next == NULL) { 434204076Spjd primary_exitx(EX_TEMPFAIL, 435204076Spjd "Unable allocate %zu bytes of memory for hio_next field.", 436204076Spjd sizeof(hio->hio_next[0]) * ncomps); 437204076Spjd } 438204076Spjd hio->hio_ggio.gctl_version = G_GATE_VERSION; 439204076Spjd hio->hio_ggio.gctl_data = malloc(MAXPHYS); 440204076Spjd if (hio->hio_ggio.gctl_data == NULL) { 441204076Spjd primary_exitx(EX_TEMPFAIL, 442204076Spjd "Unable to allocate %zu bytes of memory for gctl_data.", 443204076Spjd MAXPHYS); 444204076Spjd } 445204076Spjd hio->hio_ggio.gctl_length = MAXPHYS; 446204076Spjd hio->hio_ggio.gctl_error = 0; 447204076Spjd TAILQ_INSERT_HEAD(&hio_free_list, hio, hio_free_next); 448204076Spjd } 449204076Spjd} 450204076Spjd 451214284Spjdstatic bool 452214284Spjdinit_resuid(struct hast_resource *res) 453214284Spjd{ 454214284Spjd 455214284Spjd mtx_lock(&metadata_lock); 456214284Spjd if (res->hr_resuid != 0) { 457214284Spjd mtx_unlock(&metadata_lock); 458214284Spjd return (false); 459214284Spjd } else { 460214284Spjd /* Initialize unique resource identifier. */ 461214284Spjd arc4random_buf(&res->hr_resuid, sizeof(res->hr_resuid)); 462214284Spjd mtx_unlock(&metadata_lock); 463231017Strociny if (metadata_write(res) == -1) 464214284Spjd exit(EX_NOINPUT); 465214284Spjd return (true); 466214284Spjd } 467214284Spjd} 468214284Spjd 469204076Spjdstatic void 470204076Spjdinit_local(struct hast_resource *res) 471204076Spjd{ 472204076Spjd unsigned char *buf; 473204076Spjd size_t mapsize; 474204076Spjd 475231017Strociny if (metadata_read(res, true) == -1) 476204076Spjd exit(EX_NOINPUT); 477204076Spjd mtx_init(&res->hr_amp_lock); 478204076Spjd if (activemap_init(&res->hr_amp, res->hr_datasize, res->hr_extentsize, 479231017Strociny res->hr_local_sectorsize, res->hr_keepdirty) == -1) { 480204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create activemap"); 481204076Spjd } 482204076Spjd mtx_init(&range_lock); 483204076Spjd cv_init(&range_regular_cond); 484231017Strociny if (rangelock_init(&range_regular) == -1) 485204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create regular range lock"); 486204076Spjd cv_init(&range_sync_cond); 487231017Strociny if (rangelock_init(&range_sync) == -1) 488204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create sync range lock"); 489204076Spjd mapsize = activemap_ondisk_size(res->hr_amp); 490204076Spjd buf = calloc(1, mapsize); 491204076Spjd if (buf == NULL) { 492204076Spjd primary_exitx(EX_TEMPFAIL, 493204076Spjd "Unable to allocate buffer for activemap."); 494204076Spjd } 495204076Spjd if (pread(res->hr_localfd, buf, mapsize, METADATA_SIZE) != 496204076Spjd (ssize_t)mapsize) { 497204076Spjd primary_exit(EX_NOINPUT, "Unable to read activemap"); 498204076Spjd } 499204076Spjd activemap_copyin(res->hr_amp, buf, mapsize); 500209181Spjd free(buf); 501204076Spjd if (res->hr_resuid != 0) 502204076Spjd return; 503204076Spjd /* 504214284Spjd * We're using provider for the first time. Initialize local and remote 505214284Spjd * counters. We don't initialize resuid here, as we want to do it just 506214284Spjd * in time. The reason for this is that we want to inform secondary 507214284Spjd * that there were no writes yet, so there is no need to synchronize 508214284Spjd * anything. 509204076Spjd */ 510219844Spjd res->hr_primary_localcnt = 0; 511204076Spjd res->hr_primary_remotecnt = 0; 512231017Strociny if (metadata_write(res) == -1) 513204076Spjd exit(EX_NOINPUT); 514204076Spjd} 515204076Spjd 516218218Spjdstatic int 517218218Spjdprimary_connect(struct hast_resource *res, struct proto_conn **connp) 518218218Spjd{ 519218218Spjd struct proto_conn *conn; 520218218Spjd int16_t val; 521218218Spjd 522218218Spjd val = 1; 523231017Strociny if (proto_send(res->hr_conn, &val, sizeof(val)) == -1) { 524218218Spjd primary_exit(EX_TEMPFAIL, 525218218Spjd "Unable to send connection request to parent"); 526218218Spjd } 527231017Strociny if (proto_recv(res->hr_conn, &val, sizeof(val)) == -1) { 528218218Spjd primary_exit(EX_TEMPFAIL, 529218218Spjd "Unable to receive reply to connection request from parent"); 530218218Spjd } 531218218Spjd if (val != 0) { 532218218Spjd errno = val; 533218218Spjd pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 534218218Spjd res->hr_remoteaddr); 535218218Spjd return (-1); 536218218Spjd } 537231017Strociny if (proto_connection_recv(res->hr_conn, true, &conn) == -1) { 538218218Spjd primary_exit(EX_TEMPFAIL, 539218218Spjd "Unable to receive connection from parent"); 540218218Spjd } 541231017Strociny if (proto_connect_wait(conn, res->hr_timeout) == -1) { 542218218Spjd pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 543218218Spjd res->hr_remoteaddr); 544218218Spjd proto_close(conn); 545218218Spjd return (-1); 546218218Spjd } 547218218Spjd /* Error in setting timeout is not critical, but why should it fail? */ 548231017Strociny if (proto_timeout(conn, res->hr_timeout) == -1) 549218218Spjd pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 550218218Spjd 551218218Spjd *connp = conn; 552218218Spjd 553218218Spjd return (0); 554218218Spjd} 555249236Strociny 556240269Strociny/* 557240269Strociny * Function instructs GEOM_GATE to handle reads directly from within the kernel. 558240269Strociny */ 559240269Strocinystatic void 560240269Strocinyenable_direct_reads(struct hast_resource *res) 561240269Strociny{ 562240269Strociny struct g_gate_ctl_modify ggiomodify; 563218218Spjd 564240269Strociny bzero(&ggiomodify, sizeof(ggiomodify)); 565240269Strociny ggiomodify.gctl_version = G_GATE_VERSION; 566240269Strociny ggiomodify.gctl_unit = res->hr_ggateunit; 567240269Strociny ggiomodify.gctl_modify = GG_MODIFY_READPROV | GG_MODIFY_READOFFSET; 568240269Strociny strlcpy(ggiomodify.gctl_readprov, res->hr_localpath, 569240269Strociny sizeof(ggiomodify.gctl_readprov)); 570240269Strociny ggiomodify.gctl_readoffset = res->hr_localoff; 571240269Strociny if (ioctl(res->hr_ggatefd, G_GATE_CMD_MODIFY, &ggiomodify) == 0) 572240269Strociny pjdlog_debug(1, "Direct reads enabled."); 573240269Strociny else 574240269Strociny pjdlog_errno(LOG_WARNING, "Failed to enable direct reads"); 575240269Strociny} 576240269Strociny 577220898Spjdstatic int 578205738Spjdinit_remote(struct hast_resource *res, struct proto_conn **inp, 579205738Spjd struct proto_conn **outp) 580204076Spjd{ 581205738Spjd struct proto_conn *in, *out; 582204076Spjd struct nv *nvout, *nvin; 583204076Spjd const unsigned char *token; 584204076Spjd unsigned char *map; 585204076Spjd const char *errmsg; 586204076Spjd int32_t extentsize; 587204076Spjd int64_t datasize; 588204076Spjd uint32_t mapsize; 589249236Strociny uint8_t version; 590204076Spjd size_t size; 591220898Spjd int error; 592204076Spjd 593218138Spjd PJDLOG_ASSERT((inp == NULL && outp == NULL) || (inp != NULL && outp != NULL)); 594218138Spjd PJDLOG_ASSERT(real_remote(res)); 595205738Spjd 596205738Spjd in = out = NULL; 597211983Spjd errmsg = NULL; 598205738Spjd 599218218Spjd if (primary_connect(res, &out) == -1) 600220898Spjd return (ECONNREFUSED); 601218218Spjd 602220898Spjd error = ECONNABORTED; 603220898Spjd 604204076Spjd /* 605204076Spjd * First handshake step. 606204076Spjd * Setup outgoing connection with remote node. 607204076Spjd */ 608204076Spjd nvout = nv_alloc(); 609204076Spjd nv_add_string(nvout, res->hr_name, "resource"); 610249236Strociny nv_add_uint8(nvout, HAST_PROTO_VERSION, "version"); 611204076Spjd if (nv_error(nvout) != 0) { 612204076Spjd pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 613204076Spjd "Unable to allocate header for connection with %s", 614204076Spjd res->hr_remoteaddr); 615204076Spjd nv_free(nvout); 616204076Spjd goto close; 617204076Spjd } 618231017Strociny if (hast_proto_send(res, out, nvout, NULL, 0) == -1) { 619204076Spjd pjdlog_errno(LOG_WARNING, 620204076Spjd "Unable to send handshake header to %s", 621204076Spjd res->hr_remoteaddr); 622204076Spjd nv_free(nvout); 623204076Spjd goto close; 624204076Spjd } 625204076Spjd nv_free(nvout); 626231017Strociny if (hast_proto_recv_hdr(out, &nvin) == -1) { 627204076Spjd pjdlog_errno(LOG_WARNING, 628204076Spjd "Unable to receive handshake header from %s", 629204076Spjd res->hr_remoteaddr); 630204076Spjd goto close; 631204076Spjd } 632204076Spjd errmsg = nv_get_string(nvin, "errmsg"); 633204076Spjd if (errmsg != NULL) { 634204076Spjd pjdlog_warning("%s", errmsg); 635220898Spjd if (nv_exists(nvin, "wait")) 636220898Spjd error = EBUSY; 637204076Spjd nv_free(nvin); 638204076Spjd goto close; 639204076Spjd } 640249236Strociny version = nv_get_uint8(nvin, "version"); 641249236Strociny if (version == 0) { 642249236Strociny /* 643249236Strociny * If no version is sent, it means this is protocol version 1. 644249236Strociny */ 645249236Strociny version = 1; 646249236Strociny } 647249236Strociny if (version > HAST_PROTO_VERSION) { 648249236Strociny pjdlog_warning("Invalid version received (%hhu).", version); 649249236Strociny nv_free(nvin); 650249236Strociny goto close; 651249236Strociny } 652249236Strociny res->hr_version = version; 653249236Strociny pjdlog_debug(1, "Negotiated protocol version %d.", res->hr_version); 654204076Spjd token = nv_get_uint8_array(nvin, &size, "token"); 655204076Spjd if (token == NULL) { 656204076Spjd pjdlog_warning("Handshake header from %s has no 'token' field.", 657204076Spjd res->hr_remoteaddr); 658204076Spjd nv_free(nvin); 659204076Spjd goto close; 660204076Spjd } 661204076Spjd if (size != sizeof(res->hr_token)) { 662204076Spjd pjdlog_warning("Handshake header from %s contains 'token' of wrong size (got %zu, expected %zu).", 663204076Spjd res->hr_remoteaddr, size, sizeof(res->hr_token)); 664204076Spjd nv_free(nvin); 665204076Spjd goto close; 666204076Spjd } 667204076Spjd bcopy(token, res->hr_token, sizeof(res->hr_token)); 668204076Spjd nv_free(nvin); 669204076Spjd 670204076Spjd /* 671204076Spjd * Second handshake step. 672204076Spjd * Setup incoming connection with remote node. 673204076Spjd */ 674218218Spjd if (primary_connect(res, &in) == -1) 675204076Spjd goto close; 676218218Spjd 677204076Spjd nvout = nv_alloc(); 678204076Spjd nv_add_string(nvout, res->hr_name, "resource"); 679204076Spjd nv_add_uint8_array(nvout, res->hr_token, sizeof(res->hr_token), 680204076Spjd "token"); 681214284Spjd if (res->hr_resuid == 0) { 682214284Spjd /* 683214284Spjd * The resuid field was not yet initialized. 684214284Spjd * Because we do synchronization inside init_resuid(), it is 685214284Spjd * possible that someone already initialized it, the function 686214284Spjd * will return false then, but if we successfully initialized 687214284Spjd * it, we will get true. True means that there were no writes 688214284Spjd * to this resource yet and we want to inform secondary that 689214284Spjd * synchronization is not needed by sending "virgin" argument. 690214284Spjd */ 691214284Spjd if (init_resuid(res)) 692214284Spjd nv_add_int8(nvout, 1, "virgin"); 693214284Spjd } 694204076Spjd nv_add_uint64(nvout, res->hr_resuid, "resuid"); 695204076Spjd nv_add_uint64(nvout, res->hr_primary_localcnt, "localcnt"); 696204076Spjd nv_add_uint64(nvout, res->hr_primary_remotecnt, "remotecnt"); 697204076Spjd if (nv_error(nvout) != 0) { 698204076Spjd pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 699204076Spjd "Unable to allocate header for connection with %s", 700204076Spjd res->hr_remoteaddr); 701204076Spjd nv_free(nvout); 702204076Spjd goto close; 703204076Spjd } 704231017Strociny if (hast_proto_send(res, in, nvout, NULL, 0) == -1) { 705204076Spjd pjdlog_errno(LOG_WARNING, 706204076Spjd "Unable to send handshake header to %s", 707204076Spjd res->hr_remoteaddr); 708204076Spjd nv_free(nvout); 709204076Spjd goto close; 710204076Spjd } 711204076Spjd nv_free(nvout); 712231017Strociny if (hast_proto_recv_hdr(out, &nvin) == -1) { 713204076Spjd pjdlog_errno(LOG_WARNING, 714204076Spjd "Unable to receive handshake header from %s", 715204076Spjd res->hr_remoteaddr); 716204076Spjd goto close; 717204076Spjd } 718204076Spjd errmsg = nv_get_string(nvin, "errmsg"); 719204076Spjd if (errmsg != NULL) { 720204076Spjd pjdlog_warning("%s", errmsg); 721204076Spjd nv_free(nvin); 722204076Spjd goto close; 723204076Spjd } 724204076Spjd datasize = nv_get_int64(nvin, "datasize"); 725204076Spjd if (datasize != res->hr_datasize) { 726204076Spjd pjdlog_warning("Data size differs between nodes (local=%jd, remote=%jd).", 727204076Spjd (intmax_t)res->hr_datasize, (intmax_t)datasize); 728204076Spjd nv_free(nvin); 729204076Spjd goto close; 730204076Spjd } 731204076Spjd extentsize = nv_get_int32(nvin, "extentsize"); 732204076Spjd if (extentsize != res->hr_extentsize) { 733204076Spjd pjdlog_warning("Extent size differs between nodes (local=%zd, remote=%zd).", 734204076Spjd (ssize_t)res->hr_extentsize, (ssize_t)extentsize); 735204076Spjd nv_free(nvin); 736204076Spjd goto close; 737204076Spjd } 738204076Spjd res->hr_secondary_localcnt = nv_get_uint64(nvin, "localcnt"); 739204076Spjd res->hr_secondary_remotecnt = nv_get_uint64(nvin, "remotecnt"); 740204076Spjd res->hr_syncsrc = nv_get_uint8(nvin, "syncsrc"); 741240269Strociny if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) 742240269Strociny enable_direct_reads(res); 743220865Spjd if (nv_exists(nvin, "virgin")) { 744220865Spjd /* 745220865Spjd * Secondary was reinitialized, bump localcnt if it is 0 as 746220865Spjd * only we have the data. 747220865Spjd */ 748220865Spjd PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_PRIMARY); 749220865Spjd PJDLOG_ASSERT(res->hr_secondary_localcnt == 0); 750220865Spjd 751220865Spjd if (res->hr_primary_localcnt == 0) { 752220865Spjd PJDLOG_ASSERT(res->hr_secondary_remotecnt == 0); 753220865Spjd 754220865Spjd mtx_lock(&metadata_lock); 755220865Spjd res->hr_primary_localcnt++; 756220865Spjd pjdlog_debug(1, "Increasing localcnt to %ju.", 757220865Spjd (uintmax_t)res->hr_primary_localcnt); 758220865Spjd (void)metadata_write(res); 759220865Spjd mtx_unlock(&metadata_lock); 760220865Spjd } 761220865Spjd } 762204076Spjd map = NULL; 763204076Spjd mapsize = nv_get_uint32(nvin, "mapsize"); 764204076Spjd if (mapsize > 0) { 765204076Spjd map = malloc(mapsize); 766204076Spjd if (map == NULL) { 767204076Spjd pjdlog_error("Unable to allocate memory for remote activemap (mapsize=%ju).", 768204076Spjd (uintmax_t)mapsize); 769204076Spjd nv_free(nvin); 770204076Spjd goto close; 771204076Spjd } 772204076Spjd /* 773204076Spjd * Remote node have some dirty extents on its own, lets 774204076Spjd * download its activemap. 775204076Spjd */ 776205738Spjd if (hast_proto_recv_data(res, out, nvin, map, 777231017Strociny mapsize) == -1) { 778204076Spjd pjdlog_errno(LOG_ERR, 779204076Spjd "Unable to receive remote activemap"); 780204076Spjd nv_free(nvin); 781204076Spjd free(map); 782204076Spjd goto close; 783204076Spjd } 784257470Strociny mtx_lock(&res->hr_amp_lock); 785204076Spjd /* 786204076Spjd * Merge local and remote bitmaps. 787204076Spjd */ 788204076Spjd activemap_merge(res->hr_amp, map, mapsize); 789204076Spjd free(map); 790204076Spjd /* 791204076Spjd * Now that we merged bitmaps from both nodes, flush it to the 792204076Spjd * disk before we start to synchronize. 793204076Spjd */ 794204076Spjd (void)hast_activemap_flush(res); 795204076Spjd } 796214274Spjd nv_free(nvin); 797223181Strociny#ifdef notyet 798220271Spjd /* Setup directions. */ 799220271Spjd if (proto_send(out, NULL, 0) == -1) 800220271Spjd pjdlog_errno(LOG_WARNING, "Unable to set connection direction"); 801220271Spjd if (proto_recv(in, NULL, 0) == -1) 802220271Spjd pjdlog_errno(LOG_WARNING, "Unable to set connection direction"); 803223181Strociny#endif 804204076Spjd pjdlog_info("Connected to %s.", res->hr_remoteaddr); 805249236Strociny if (res->hr_original_replication == HAST_REPLICATION_MEMSYNC && 806249236Strociny res->hr_version < 2) { 807249236Strociny pjdlog_warning("The 'memsync' replication mode is not supported by the remote node, falling back to 'fullsync' mode."); 808249236Strociny res->hr_replication = HAST_REPLICATION_FULLSYNC; 809249236Strociny } else if (res->hr_replication != res->hr_original_replication) { 810249236Strociny /* 811249236Strociny * This is in case hastd disconnected and was upgraded. 812249236Strociny */ 813249236Strociny res->hr_replication = res->hr_original_replication; 814249236Strociny } 815205738Spjd if (inp != NULL && outp != NULL) { 816205738Spjd *inp = in; 817205738Spjd *outp = out; 818205738Spjd } else { 819205738Spjd res->hr_remotein = in; 820205738Spjd res->hr_remoteout = out; 821205738Spjd } 822212038Spjd event_send(res, EVENT_CONNECT); 823220898Spjd return (0); 824205738Spjdclose: 825211983Spjd if (errmsg != NULL && strcmp(errmsg, "Split-brain condition!") == 0) 826212038Spjd event_send(res, EVENT_SPLITBRAIN); 827205738Spjd proto_close(out); 828205738Spjd if (in != NULL) 829205738Spjd proto_close(in); 830220898Spjd return (error); 831205738Spjd} 832205738Spjd 833205738Spjdstatic void 834205738Spjdsync_start(void) 835205738Spjd{ 836205738Spjd 837204076Spjd mtx_lock(&sync_lock); 838204076Spjd sync_inprogress = true; 839204076Spjd mtx_unlock(&sync_lock); 840204076Spjd cv_signal(&sync_cond); 841204076Spjd} 842204076Spjd 843204076Spjdstatic void 844211878Spjdsync_stop(void) 845211878Spjd{ 846211878Spjd 847211878Spjd mtx_lock(&sync_lock); 848211878Spjd if (sync_inprogress) 849211878Spjd sync_inprogress = false; 850211878Spjd mtx_unlock(&sync_lock); 851211878Spjd} 852211878Spjd 853211878Spjdstatic void 854204076Spjdinit_ggate(struct hast_resource *res) 855204076Spjd{ 856204076Spjd struct g_gate_ctl_create ggiocreate; 857204076Spjd struct g_gate_ctl_cancel ggiocancel; 858204076Spjd 859204076Spjd /* 860204076Spjd * We communicate with ggate via /dev/ggctl. Open it. 861204076Spjd */ 862204076Spjd res->hr_ggatefd = open("/dev/" G_GATE_CTL_NAME, O_RDWR); 863231017Strociny if (res->hr_ggatefd == -1) 864204076Spjd primary_exit(EX_OSFILE, "Unable to open /dev/" G_GATE_CTL_NAME); 865204076Spjd /* 866204076Spjd * Create provider before trying to connect, as connection failure 867204076Spjd * is not critical, but may take some time. 868204076Spjd */ 869213533Spjd bzero(&ggiocreate, sizeof(ggiocreate)); 870204076Spjd ggiocreate.gctl_version = G_GATE_VERSION; 871204076Spjd ggiocreate.gctl_mediasize = res->hr_datasize; 872204076Spjd ggiocreate.gctl_sectorsize = res->hr_local_sectorsize; 873204076Spjd ggiocreate.gctl_flags = 0; 874220266Spjd ggiocreate.gctl_maxcount = 0; 875204076Spjd ggiocreate.gctl_timeout = 0; 876204076Spjd ggiocreate.gctl_unit = G_GATE_NAME_GIVEN; 877204076Spjd snprintf(ggiocreate.gctl_name, sizeof(ggiocreate.gctl_name), "hast/%s", 878204076Spjd res->hr_provname); 879204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_CREATE, &ggiocreate) == 0) { 880204076Spjd pjdlog_info("Device hast/%s created.", res->hr_provname); 881204076Spjd res->hr_ggateunit = ggiocreate.gctl_unit; 882204076Spjd return; 883204076Spjd } 884204076Spjd if (errno != EEXIST) { 885204076Spjd primary_exit(EX_OSERR, "Unable to create hast/%s device", 886204076Spjd res->hr_provname); 887204076Spjd } 888204076Spjd pjdlog_debug(1, 889204076Spjd "Device hast/%s already exists, we will try to take it over.", 890204076Spjd res->hr_provname); 891204076Spjd /* 892204076Spjd * If we received EEXIST, we assume that the process who created the 893204076Spjd * provider died and didn't clean up. In that case we will start from 894204076Spjd * where he left of. 895204076Spjd */ 896213533Spjd bzero(&ggiocancel, sizeof(ggiocancel)); 897204076Spjd ggiocancel.gctl_version = G_GATE_VERSION; 898204076Spjd ggiocancel.gctl_unit = G_GATE_NAME_GIVEN; 899204076Spjd snprintf(ggiocancel.gctl_name, sizeof(ggiocancel.gctl_name), "hast/%s", 900204076Spjd res->hr_provname); 901204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_CANCEL, &ggiocancel) == 0) { 902204076Spjd pjdlog_info("Device hast/%s recovered.", res->hr_provname); 903204076Spjd res->hr_ggateunit = ggiocancel.gctl_unit; 904204076Spjd return; 905204076Spjd } 906204076Spjd primary_exit(EX_OSERR, "Unable to take over hast/%s device", 907204076Spjd res->hr_provname); 908204076Spjd} 909204076Spjd 910204076Spjdvoid 911204076Spjdhastd_primary(struct hast_resource *res) 912204076Spjd{ 913204076Spjd pthread_t td; 914204076Spjd pid_t pid; 915219482Strociny int error, mode, debuglevel; 916204076Spjd 917204076Spjd /* 918218218Spjd * Create communication channel for sending control commands from 919218218Spjd * parent to child. 920204076Spjd */ 921231017Strociny if (proto_client(NULL, "socketpair://", &res->hr_ctrl) == -1) { 922218042Spjd /* TODO: There's no need for this to be fatal error. */ 923204076Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 924212034Spjd pjdlog_exit(EX_OSERR, 925204076Spjd "Unable to create control sockets between parent and child"); 926204076Spjd } 927212038Spjd /* 928218218Spjd * Create communication channel for sending events from child to parent. 929212038Spjd */ 930231017Strociny if (proto_client(NULL, "socketpair://", &res->hr_event) == -1) { 931218042Spjd /* TODO: There's no need for this to be fatal error. */ 932212038Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 933212038Spjd pjdlog_exit(EX_OSERR, 934212038Spjd "Unable to create event sockets between child and parent"); 935212038Spjd } 936218218Spjd /* 937218218Spjd * Create communication channel for sending connection requests from 938218218Spjd * child to parent. 939218218Spjd */ 940231017Strociny if (proto_client(NULL, "socketpair://", &res->hr_conn) == -1) { 941218218Spjd /* TODO: There's no need for this to be fatal error. */ 942218218Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 943218218Spjd pjdlog_exit(EX_OSERR, 944218218Spjd "Unable to create connection sockets between child and parent"); 945218218Spjd } 946204076Spjd 947204076Spjd pid = fork(); 948231017Strociny if (pid == -1) { 949218042Spjd /* TODO: There's no need for this to be fatal error. */ 950204076Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 951212034Spjd pjdlog_exit(EX_TEMPFAIL, "Unable to fork"); 952204076Spjd } 953204076Spjd 954204076Spjd if (pid > 0) { 955204076Spjd /* This is parent. */ 956212038Spjd /* Declare that we are receiver. */ 957212038Spjd proto_recv(res->hr_event, NULL, 0); 958218218Spjd proto_recv(res->hr_conn, NULL, 0); 959218043Spjd /* Declare that we are sender. */ 960218043Spjd proto_send(res->hr_ctrl, NULL, 0); 961204076Spjd res->hr_workerpid = pid; 962204076Spjd return; 963204076Spjd } 964211977Spjd 965211984Spjd gres = res; 966218043Spjd mode = pjdlog_mode_get(); 967219482Strociny debuglevel = pjdlog_debug_get(); 968211984Spjd 969218043Spjd /* Declare that we are sender. */ 970218043Spjd proto_send(res->hr_event, NULL, 0); 971218218Spjd proto_send(res->hr_conn, NULL, 0); 972218043Spjd /* Declare that we are receiver. */ 973218043Spjd proto_recv(res->hr_ctrl, NULL, 0); 974218043Spjd descriptors_cleanup(res); 975204076Spjd 976218045Spjd descriptors_assert(res, mode); 977218045Spjd 978218043Spjd pjdlog_init(mode); 979219482Strociny pjdlog_debug_set(debuglevel); 980218043Spjd pjdlog_prefix_set("[%s] (%s) ", res->hr_name, role2str(res->hr_role)); 981220005Spjd setproctitle("%s (%s)", res->hr_name, role2str(res->hr_role)); 982204076Spjd 983204076Spjd init_local(res); 984213007Spjd init_ggate(res); 985213007Spjd init_environment(res); 986217784Spjd 987221899Spjd if (drop_privs(res) != 0) { 988218049Spjd cleanup(res); 989218049Spjd exit(EX_CONFIG); 990218049Spjd } 991218214Spjd pjdlog_info("Privileges successfully dropped."); 992218049Spjd 993213007Spjd /* 994213530Spjd * Create the guard thread first, so we can handle signals from the 995231017Strociny * very beginning. 996213530Spjd */ 997213530Spjd error = pthread_create(&td, NULL, guard_thread, res); 998218138Spjd PJDLOG_ASSERT(error == 0); 999213530Spjd /* 1000213007Spjd * Create the control thread before sending any event to the parent, 1001213007Spjd * as we can deadlock when parent sends control request to worker, 1002213007Spjd * but worker has no control thread started yet, so parent waits. 1003213007Spjd * In the meantime worker sends an event to the parent, but parent 1004213007Spjd * is unable to handle the event, because it waits for control 1005213007Spjd * request response. 1006213007Spjd */ 1007213007Spjd error = pthread_create(&td, NULL, ctrl_thread, res); 1008218138Spjd PJDLOG_ASSERT(error == 0); 1009220898Spjd if (real_remote(res)) { 1010220898Spjd error = init_remote(res, NULL, NULL); 1011220898Spjd if (error == 0) { 1012220898Spjd sync_start(); 1013220898Spjd } else if (error == EBUSY) { 1014220898Spjd time_t start = time(NULL); 1015220898Spjd 1016220898Spjd pjdlog_warning("Waiting for remote node to become %s for %ds.", 1017220898Spjd role2str(HAST_ROLE_SECONDARY), 1018220898Spjd res->hr_timeout); 1019220898Spjd for (;;) { 1020220898Spjd sleep(1); 1021220898Spjd error = init_remote(res, NULL, NULL); 1022220898Spjd if (error != EBUSY) 1023220898Spjd break; 1024220898Spjd if (time(NULL) > start + res->hr_timeout) 1025220898Spjd break; 1026220898Spjd } 1027220898Spjd if (error == EBUSY) { 1028220898Spjd pjdlog_warning("Remote node is still %s, starting anyway.", 1029220898Spjd role2str(HAST_ROLE_PRIMARY)); 1030220898Spjd } 1031220898Spjd } 1032220898Spjd } 1033204076Spjd error = pthread_create(&td, NULL, ggate_recv_thread, res); 1034218138Spjd PJDLOG_ASSERT(error == 0); 1035204076Spjd error = pthread_create(&td, NULL, local_send_thread, res); 1036218138Spjd PJDLOG_ASSERT(error == 0); 1037204076Spjd error = pthread_create(&td, NULL, remote_send_thread, res); 1038218138Spjd PJDLOG_ASSERT(error == 0); 1039204076Spjd error = pthread_create(&td, NULL, remote_recv_thread, res); 1040218138Spjd PJDLOG_ASSERT(error == 0); 1041204076Spjd error = pthread_create(&td, NULL, ggate_send_thread, res); 1042218138Spjd PJDLOG_ASSERT(error == 0); 1043220898Spjd fullystarted = true; 1044213530Spjd (void)sync_thread(res); 1045204076Spjd} 1046204076Spjd 1047204076Spjdstatic void 1048249236Strocinyreqlog(int loglevel, int debuglevel, struct g_gate_ctl_io *ggio, 1049249236Strociny const char *fmt, ...) 1050204076Spjd{ 1051204076Spjd char msg[1024]; 1052204076Spjd va_list ap; 1053204076Spjd 1054204076Spjd va_start(ap, fmt); 1055240269Strociny (void)vsnprintf(msg, sizeof(msg), fmt, ap); 1056204076Spjd va_end(ap); 1057240269Strociny switch (ggio->gctl_cmd) { 1058240269Strociny case BIO_READ: 1059240269Strociny (void)snprlcat(msg, sizeof(msg), "READ(%ju, %ju).", 1060249236Strociny (uintmax_t)ggio->gctl_offset, (uintmax_t)ggio->gctl_length); 1061240269Strociny break; 1062240269Strociny case BIO_DELETE: 1063240269Strociny (void)snprlcat(msg, sizeof(msg), "DELETE(%ju, %ju).", 1064249236Strociny (uintmax_t)ggio->gctl_offset, (uintmax_t)ggio->gctl_length); 1065240269Strociny break; 1066240269Strociny case BIO_FLUSH: 1067240269Strociny (void)snprlcat(msg, sizeof(msg), "FLUSH."); 1068240269Strociny break; 1069240269Strociny case BIO_WRITE: 1070240269Strociny (void)snprlcat(msg, sizeof(msg), "WRITE(%ju, %ju).", 1071249236Strociny (uintmax_t)ggio->gctl_offset, (uintmax_t)ggio->gctl_length); 1072240269Strociny break; 1073240269Strociny default: 1074240269Strociny (void)snprlcat(msg, sizeof(msg), "UNKNOWN(%u).", 1075240269Strociny (unsigned int)ggio->gctl_cmd); 1076240269Strociny break; 1077204076Spjd } 1078204076Spjd pjdlog_common(loglevel, debuglevel, -1, "%s", msg); 1079204076Spjd} 1080204076Spjd 1081204076Spjdstatic void 1082204076Spjdremote_close(struct hast_resource *res, int ncomp) 1083204076Spjd{ 1084204076Spjd 1085204076Spjd rw_wlock(&hio_remote_lock[ncomp]); 1086204076Spjd /* 1087229509Strociny * Check for a race between dropping rlock and acquiring wlock - 1088204076Spjd * another thread can close connection in-between. 1089204076Spjd */ 1090204076Spjd if (!ISCONNECTED(res, ncomp)) { 1091218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 1092218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 1093204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1094204076Spjd return; 1095204076Spjd } 1096204076Spjd 1097218138Spjd PJDLOG_ASSERT(res->hr_remotein != NULL); 1098218138Spjd PJDLOG_ASSERT(res->hr_remoteout != NULL); 1099204076Spjd 1100211881Spjd pjdlog_debug(2, "Closing incoming connection to %s.", 1101204076Spjd res->hr_remoteaddr); 1102204076Spjd proto_close(res->hr_remotein); 1103204076Spjd res->hr_remotein = NULL; 1104211881Spjd pjdlog_debug(2, "Closing outgoing connection to %s.", 1105204076Spjd res->hr_remoteaddr); 1106204076Spjd proto_close(res->hr_remoteout); 1107204076Spjd res->hr_remoteout = NULL; 1108204076Spjd 1109204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1110204076Spjd 1111211881Spjd pjdlog_warning("Disconnected from %s.", res->hr_remoteaddr); 1112211881Spjd 1113204076Spjd /* 1114204076Spjd * Stop synchronization if in-progress. 1115204076Spjd */ 1116211878Spjd sync_stop(); 1117211984Spjd 1118212038Spjd event_send(res, EVENT_DISCONNECT); 1119204076Spjd} 1120204076Spjd 1121204076Spjd/* 1122229509Strociny * Acknowledge write completion to the kernel, but don't update activemap yet. 1123229509Strociny */ 1124229509Strocinystatic void 1125229509Strocinywrite_complete(struct hast_resource *res, struct hio *hio) 1126229509Strociny{ 1127229509Strociny struct g_gate_ctl_io *ggio; 1128229509Strociny unsigned int ncomp; 1129229509Strociny 1130229509Strociny PJDLOG_ASSERT(!hio->hio_done); 1131229509Strociny 1132229509Strociny ggio = &hio->hio_ggio; 1133229509Strociny PJDLOG_ASSERT(ggio->gctl_cmd == BIO_WRITE); 1134229509Strociny 1135229509Strociny /* 1136229509Strociny * Bump local count if this is first write after 1137229509Strociny * connection failure with remote node. 1138229509Strociny */ 1139229509Strociny ncomp = 1; 1140229509Strociny rw_rlock(&hio_remote_lock[ncomp]); 1141229509Strociny if (!ISCONNECTED(res, ncomp)) { 1142229509Strociny mtx_lock(&metadata_lock); 1143229509Strociny if (res->hr_primary_localcnt == res->hr_secondary_remotecnt) { 1144229509Strociny res->hr_primary_localcnt++; 1145229509Strociny pjdlog_debug(1, "Increasing localcnt to %ju.", 1146229509Strociny (uintmax_t)res->hr_primary_localcnt); 1147229509Strociny (void)metadata_write(res); 1148229509Strociny } 1149229509Strociny mtx_unlock(&metadata_lock); 1150229509Strociny } 1151229509Strociny rw_unlock(&hio_remote_lock[ncomp]); 1152231017Strociny if (ioctl(res->hr_ggatefd, G_GATE_CMD_DONE, ggio) == -1) 1153229509Strociny primary_exit(EX_OSERR, "G_GATE_CMD_DONE failed"); 1154229509Strociny hio->hio_done = true; 1155229509Strociny} 1156229509Strociny 1157229509Strociny/* 1158204076Spjd * Thread receives ggate I/O requests from the kernel and passes them to 1159204076Spjd * appropriate threads: 1160204076Spjd * WRITE - always goes to both local_send and remote_send threads 1161204076Spjd * READ (when the block is up-to-date on local component) - 1162204076Spjd * only local_send thread 1163204076Spjd * READ (when the block isn't up-to-date on local component) - 1164204076Spjd * only remote_send thread 1165204076Spjd * DELETE - always goes to both local_send and remote_send threads 1166204076Spjd * FLUSH - always goes to both local_send and remote_send threads 1167204076Spjd */ 1168204076Spjdstatic void * 1169204076Spjdggate_recv_thread(void *arg) 1170204076Spjd{ 1171204076Spjd struct hast_resource *res = arg; 1172204076Spjd struct g_gate_ctl_io *ggio; 1173204076Spjd struct hio *hio; 1174204076Spjd unsigned int ii, ncomp, ncomps; 1175204076Spjd int error; 1176204076Spjd 1177204076Spjd for (;;) { 1178204076Spjd pjdlog_debug(2, "ggate_recv: Taking free request."); 1179204076Spjd QUEUE_TAKE2(hio, free); 1180204076Spjd pjdlog_debug(2, "ggate_recv: (%p) Got free request.", hio); 1181204076Spjd ggio = &hio->hio_ggio; 1182204076Spjd ggio->gctl_unit = res->hr_ggateunit; 1183204076Spjd ggio->gctl_length = MAXPHYS; 1184204076Spjd ggio->gctl_error = 0; 1185229509Strociny hio->hio_done = false; 1186229509Strociny hio->hio_replication = res->hr_replication; 1187204076Spjd pjdlog_debug(2, 1188204076Spjd "ggate_recv: (%p) Waiting for request from the kernel.", 1189204076Spjd hio); 1190231017Strociny if (ioctl(res->hr_ggatefd, G_GATE_CMD_START, ggio) == -1) { 1191204076Spjd if (sigexit_received) 1192204076Spjd pthread_exit(NULL); 1193204076Spjd primary_exit(EX_OSERR, "G_GATE_CMD_START failed"); 1194204076Spjd } 1195204076Spjd error = ggio->gctl_error; 1196204076Spjd switch (error) { 1197204076Spjd case 0: 1198204076Spjd break; 1199204076Spjd case ECANCELED: 1200204076Spjd /* Exit gracefully. */ 1201204076Spjd if (!sigexit_received) { 1202204076Spjd pjdlog_debug(2, 1203204076Spjd "ggate_recv: (%p) Received cancel from the kernel.", 1204204076Spjd hio); 1205204076Spjd pjdlog_info("Received cancel from the kernel, exiting."); 1206204076Spjd } 1207204076Spjd pthread_exit(NULL); 1208204076Spjd case ENOMEM: 1209204076Spjd /* 1210204076Spjd * Buffer too small? Impossible, we allocate MAXPHYS 1211204076Spjd * bytes - request can't be bigger than that. 1212204076Spjd */ 1213204076Spjd /* FALLTHROUGH */ 1214204076Spjd case ENXIO: 1215204076Spjd default: 1216204076Spjd primary_exitx(EX_OSERR, "G_GATE_CMD_START failed: %s.", 1217204076Spjd strerror(error)); 1218204076Spjd } 1219229509Strociny 1220229509Strociny ncomp = 0; 1221229509Strociny ncomps = HAST_NCOMPONENTS; 1222229509Strociny 1223204076Spjd for (ii = 0; ii < ncomps; ii++) 1224204076Spjd hio->hio_errors[ii] = EINVAL; 1225204076Spjd reqlog(LOG_DEBUG, 2, ggio, 1226204076Spjd "ggate_recv: (%p) Request received from the kernel: ", 1227204076Spjd hio); 1228229509Strociny 1229204076Spjd /* 1230204076Spjd * Inform all components about new write request. 1231204076Spjd * For read request prefer local component unless the given 1232204076Spjd * range is out-of-date, then use remote component. 1233204076Spjd */ 1234204076Spjd switch (ggio->gctl_cmd) { 1235204076Spjd case BIO_READ: 1236222228Spjd res->hr_stat_read++; 1237229509Strociny ncomps = 1; 1238204076Spjd mtx_lock(&metadata_lock); 1239204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_UNDEF || 1240204076Spjd res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1241204076Spjd /* 1242204076Spjd * This range is up-to-date on local component, 1243204076Spjd * so handle request locally. 1244204076Spjd */ 1245204076Spjd /* Local component is 0 for now. */ 1246204076Spjd ncomp = 0; 1247204076Spjd } else /* if (res->hr_syncsrc == 1248204076Spjd HAST_SYNCSRC_SECONDARY) */ { 1249218138Spjd PJDLOG_ASSERT(res->hr_syncsrc == 1250204076Spjd HAST_SYNCSRC_SECONDARY); 1251204076Spjd /* 1252204076Spjd * This range is out-of-date on local component, 1253204076Spjd * so send request to the remote node. 1254204076Spjd */ 1255204076Spjd /* Remote component is 1 for now. */ 1256204076Spjd ncomp = 1; 1257204076Spjd } 1258204076Spjd mtx_unlock(&metadata_lock); 1259204076Spjd break; 1260204076Spjd case BIO_WRITE: 1261222228Spjd res->hr_stat_write++; 1262229509Strociny if (res->hr_resuid == 0 && 1263229509Strociny res->hr_primary_localcnt == 0) { 1264229509Strociny /* This is first write. */ 1265219844Spjd res->hr_primary_localcnt = 1; 1266214284Spjd } 1267204076Spjd for (;;) { 1268204076Spjd mtx_lock(&range_lock); 1269204076Spjd if (rangelock_islocked(range_sync, 1270204076Spjd ggio->gctl_offset, ggio->gctl_length)) { 1271204076Spjd pjdlog_debug(2, 1272204076Spjd "regular: Range offset=%jd length=%zu locked.", 1273204076Spjd (intmax_t)ggio->gctl_offset, 1274204076Spjd (size_t)ggio->gctl_length); 1275204076Spjd range_regular_wait = true; 1276204076Spjd cv_wait(&range_regular_cond, &range_lock); 1277204076Spjd range_regular_wait = false; 1278204076Spjd mtx_unlock(&range_lock); 1279204076Spjd continue; 1280204076Spjd } 1281204076Spjd if (rangelock_add(range_regular, 1282231017Strociny ggio->gctl_offset, ggio->gctl_length) == -1) { 1283204076Spjd mtx_unlock(&range_lock); 1284204076Spjd pjdlog_debug(2, 1285204076Spjd "regular: Range offset=%jd length=%zu is already locked, waiting.", 1286204076Spjd (intmax_t)ggio->gctl_offset, 1287204076Spjd (size_t)ggio->gctl_length); 1288204076Spjd sleep(1); 1289204076Spjd continue; 1290204076Spjd } 1291204076Spjd mtx_unlock(&range_lock); 1292204076Spjd break; 1293204076Spjd } 1294204076Spjd mtx_lock(&res->hr_amp_lock); 1295204076Spjd if (activemap_write_start(res->hr_amp, 1296204076Spjd ggio->gctl_offset, ggio->gctl_length)) { 1297222228Spjd res->hr_stat_activemap_update++; 1298204076Spjd (void)hast_activemap_flush(res); 1299256027Strociny } else { 1300256027Strociny mtx_unlock(&res->hr_amp_lock); 1301204076Spjd } 1302229509Strociny break; 1303204076Spjd case BIO_DELETE: 1304229509Strociny res->hr_stat_delete++; 1305229509Strociny break; 1306204076Spjd case BIO_FLUSH: 1307229509Strociny res->hr_stat_flush++; 1308204076Spjd break; 1309204076Spjd } 1310229509Strociny pjdlog_debug(2, 1311229509Strociny "ggate_recv: (%p) Moving request to the send queues.", hio); 1312249236Strociny hio->hio_countdown = ncomps; 1313249236Strociny if (hio->hio_replication == HAST_REPLICATION_MEMSYNC && 1314249236Strociny ggio->gctl_cmd == BIO_WRITE) { 1315249236Strociny /* Each remote request needs two responses in memsync. */ 1316249236Strociny hio->hio_countdown++; 1317249236Strociny } 1318249236Strociny for (ii = ncomp; ii < ncomps; ii++) 1319229509Strociny QUEUE_INSERT1(hio, send, ii); 1320204076Spjd } 1321204076Spjd /* NOTREACHED */ 1322204076Spjd return (NULL); 1323204076Spjd} 1324204076Spjd 1325204076Spjd/* 1326204076Spjd * Thread reads from or writes to local component. 1327204076Spjd * If local read fails, it redirects it to remote_send thread. 1328204076Spjd */ 1329204076Spjdstatic void * 1330204076Spjdlocal_send_thread(void *arg) 1331204076Spjd{ 1332204076Spjd struct hast_resource *res = arg; 1333204076Spjd struct g_gate_ctl_io *ggio; 1334204076Spjd struct hio *hio; 1335204076Spjd unsigned int ncomp, rncomp; 1336204076Spjd ssize_t ret; 1337204076Spjd 1338204076Spjd /* Local component is 0 for now. */ 1339204076Spjd ncomp = 0; 1340204076Spjd /* Remote component is 1 for now. */ 1341204076Spjd rncomp = 1; 1342204076Spjd 1343204076Spjd for (;;) { 1344204076Spjd pjdlog_debug(2, "local_send: Taking request."); 1345214692Spjd QUEUE_TAKE1(hio, send, ncomp, 0); 1346204076Spjd pjdlog_debug(2, "local_send: (%p) Got request.", hio); 1347204076Spjd ggio = &hio->hio_ggio; 1348204076Spjd switch (ggio->gctl_cmd) { 1349204076Spjd case BIO_READ: 1350204076Spjd ret = pread(res->hr_localfd, ggio->gctl_data, 1351204076Spjd ggio->gctl_length, 1352204076Spjd ggio->gctl_offset + res->hr_localoff); 1353204076Spjd if (ret == ggio->gctl_length) 1354204076Spjd hio->hio_errors[ncomp] = 0; 1355222467Strociny else if (!ISSYNCREQ(hio)) { 1356204076Spjd /* 1357204076Spjd * If READ failed, try to read from remote node. 1358204076Spjd */ 1359231017Strociny if (ret == -1) { 1360216479Spjd reqlog(LOG_WARNING, 0, ggio, 1361216479Spjd "Local request failed (%s), trying remote node. ", 1362216479Spjd strerror(errno)); 1363216479Spjd } else if (ret != ggio->gctl_length) { 1364216479Spjd reqlog(LOG_WARNING, 0, ggio, 1365216479Spjd "Local request failed (%zd != %jd), trying remote node. ", 1366216494Spjd ret, (intmax_t)ggio->gctl_length); 1367216479Spjd } 1368204076Spjd QUEUE_INSERT1(hio, send, rncomp); 1369204076Spjd continue; 1370204076Spjd } 1371204076Spjd break; 1372204076Spjd case BIO_WRITE: 1373204076Spjd ret = pwrite(res->hr_localfd, ggio->gctl_data, 1374204076Spjd ggio->gctl_length, 1375204076Spjd ggio->gctl_offset + res->hr_localoff); 1376231017Strociny if (ret == -1) { 1377204076Spjd hio->hio_errors[ncomp] = errno; 1378216479Spjd reqlog(LOG_WARNING, 0, ggio, 1379216479Spjd "Local request failed (%s): ", 1380216479Spjd strerror(errno)); 1381216479Spjd } else if (ret != ggio->gctl_length) { 1382204076Spjd hio->hio_errors[ncomp] = EIO; 1383216479Spjd reqlog(LOG_WARNING, 0, ggio, 1384216479Spjd "Local request failed (%zd != %jd): ", 1385216494Spjd ret, (intmax_t)ggio->gctl_length); 1386216479Spjd } else { 1387204076Spjd hio->hio_errors[ncomp] = 0; 1388229509Strociny if (hio->hio_replication == 1389249236Strociny HAST_REPLICATION_ASYNC) { 1390229509Strociny ggio->gctl_error = 0; 1391229509Strociny write_complete(res, hio); 1392229509Strociny } 1393216479Spjd } 1394204076Spjd break; 1395204076Spjd case BIO_DELETE: 1396204076Spjd ret = g_delete(res->hr_localfd, 1397204076Spjd ggio->gctl_offset + res->hr_localoff, 1398204076Spjd ggio->gctl_length); 1399231017Strociny if (ret == -1) { 1400204076Spjd hio->hio_errors[ncomp] = errno; 1401216479Spjd reqlog(LOG_WARNING, 0, ggio, 1402216479Spjd "Local request failed (%s): ", 1403216479Spjd strerror(errno)); 1404216479Spjd } else { 1405204076Spjd hio->hio_errors[ncomp] = 0; 1406216479Spjd } 1407204076Spjd break; 1408204076Spjd case BIO_FLUSH: 1409229509Strociny if (!res->hr_localflush) { 1410229509Strociny ret = -1; 1411229509Strociny errno = EOPNOTSUPP; 1412229509Strociny break; 1413229509Strociny } 1414204076Spjd ret = g_flush(res->hr_localfd); 1415231017Strociny if (ret == -1) { 1416229509Strociny if (errno == EOPNOTSUPP) 1417229509Strociny res->hr_localflush = false; 1418204076Spjd hio->hio_errors[ncomp] = errno; 1419216479Spjd reqlog(LOG_WARNING, 0, ggio, 1420216479Spjd "Local request failed (%s): ", 1421216479Spjd strerror(errno)); 1422216479Spjd } else { 1423204076Spjd hio->hio_errors[ncomp] = 0; 1424216479Spjd } 1425204076Spjd break; 1426204076Spjd } 1427249236Strociny 1428249236Strociny if (hio->hio_replication != HAST_REPLICATION_MEMSYNC || 1429249236Strociny ggio->gctl_cmd != BIO_WRITE || ISSYNCREQ(hio)) { 1430249236Strociny if (refcnt_release(&hio->hio_countdown) > 0) 1431249236Strociny continue; 1432249236Strociny } else { 1433249236Strociny /* 1434249236Strociny * Depending on hio_countdown value, requests finished 1435249236Strociny * in the following order: 1436249236Strociny * 0: remote memsync, remote final, local write 1437249236Strociny * 1: remote memsync, local write, (remote final) 1438249236Strociny * 2: local write, (remote memsync), (remote final) 1439249236Strociny */ 1440249236Strociny switch (refcnt_release(&hio->hio_countdown)) { 1441249236Strociny case 0: 1442249236Strociny /* 1443249236Strociny * Local write finished as last. 1444249236Strociny */ 1445249236Strociny break; 1446249236Strociny case 1: 1447249236Strociny /* 1448249236Strociny * Local write finished after remote memsync 1449249236Strociny * reply arrvied. We can complete the write now. 1450249236Strociny */ 1451249236Strociny if (hio->hio_errors[0] == 0) 1452249236Strociny write_complete(res, hio); 1453249236Strociny continue; 1454249236Strociny case 2: 1455249236Strociny /* 1456249236Strociny * Local write finished as first. 1457249236Strociny */ 1458249236Strociny continue; 1459249236Strociny default: 1460249236Strociny PJDLOG_ABORT("Invalid hio_countdown."); 1461249236Strociny } 1462249236Strociny } 1463229509Strociny if (ISSYNCREQ(hio)) { 1464229509Strociny mtx_lock(&sync_lock); 1465229509Strociny SYNCREQDONE(hio); 1466229509Strociny mtx_unlock(&sync_lock); 1467229509Strociny cv_signal(&sync_cond); 1468229509Strociny } else { 1469229509Strociny pjdlog_debug(2, 1470229509Strociny "local_send: (%p) Moving request to the done queue.", 1471229509Strociny hio); 1472229509Strociny QUEUE_INSERT2(hio, done); 1473204076Spjd } 1474204076Spjd } 1475204076Spjd /* NOTREACHED */ 1476204076Spjd return (NULL); 1477204076Spjd} 1478204076Spjd 1479214692Spjdstatic void 1480214692Spjdkeepalive_send(struct hast_resource *res, unsigned int ncomp) 1481214692Spjd{ 1482214692Spjd struct nv *nv; 1483214692Spjd 1484218217Spjd rw_rlock(&hio_remote_lock[ncomp]); 1485218217Spjd 1486218217Spjd if (!ISCONNECTED(res, ncomp)) { 1487218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1488214692Spjd return; 1489218217Spjd } 1490219864Spjd 1491218138Spjd PJDLOG_ASSERT(res->hr_remotein != NULL); 1492218138Spjd PJDLOG_ASSERT(res->hr_remoteout != NULL); 1493214692Spjd 1494214692Spjd nv = nv_alloc(); 1495214692Spjd nv_add_uint8(nv, HIO_KEEPALIVE, "cmd"); 1496214692Spjd if (nv_error(nv) != 0) { 1497218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1498214692Spjd nv_free(nv); 1499214692Spjd pjdlog_debug(1, 1500214692Spjd "keepalive_send: Unable to prepare header to send."); 1501214692Spjd return; 1502214692Spjd } 1503231017Strociny if (hast_proto_send(res, res->hr_remoteout, nv, NULL, 0) == -1) { 1504218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1505214692Spjd pjdlog_common(LOG_DEBUG, 1, errno, 1506214692Spjd "keepalive_send: Unable to send request"); 1507214692Spjd nv_free(nv); 1508214692Spjd remote_close(res, ncomp); 1509214692Spjd return; 1510214692Spjd } 1511218217Spjd 1512218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1513214692Spjd nv_free(nv); 1514214692Spjd pjdlog_debug(2, "keepalive_send: Request sent."); 1515214692Spjd} 1516214692Spjd 1517204076Spjd/* 1518204076Spjd * Thread sends request to secondary node. 1519204076Spjd */ 1520204076Spjdstatic void * 1521204076Spjdremote_send_thread(void *arg) 1522204076Spjd{ 1523204076Spjd struct hast_resource *res = arg; 1524204076Spjd struct g_gate_ctl_io *ggio; 1525214692Spjd time_t lastcheck, now; 1526204076Spjd struct hio *hio; 1527204076Spjd struct nv *nv; 1528204076Spjd unsigned int ncomp; 1529204076Spjd bool wakeup; 1530204076Spjd uint64_t offset, length; 1531204076Spjd uint8_t cmd; 1532204076Spjd void *data; 1533204076Spjd 1534204076Spjd /* Remote component is 1 for now. */ 1535204076Spjd ncomp = 1; 1536219864Spjd lastcheck = time(NULL); 1537204076Spjd 1538204076Spjd for (;;) { 1539204076Spjd pjdlog_debug(2, "remote_send: Taking request."); 1540219721Strociny QUEUE_TAKE1(hio, send, ncomp, HAST_KEEPALIVE); 1541214692Spjd if (hio == NULL) { 1542214692Spjd now = time(NULL); 1543219721Strociny if (lastcheck + HAST_KEEPALIVE <= now) { 1544214692Spjd keepalive_send(res, ncomp); 1545214692Spjd lastcheck = now; 1546214692Spjd } 1547214692Spjd continue; 1548214692Spjd } 1549204076Spjd pjdlog_debug(2, "remote_send: (%p) Got request.", hio); 1550204076Spjd ggio = &hio->hio_ggio; 1551204076Spjd switch (ggio->gctl_cmd) { 1552204076Spjd case BIO_READ: 1553204076Spjd cmd = HIO_READ; 1554204076Spjd data = NULL; 1555204076Spjd offset = ggio->gctl_offset; 1556204076Spjd length = ggio->gctl_length; 1557204076Spjd break; 1558204076Spjd case BIO_WRITE: 1559204076Spjd cmd = HIO_WRITE; 1560204076Spjd data = ggio->gctl_data; 1561204076Spjd offset = ggio->gctl_offset; 1562204076Spjd length = ggio->gctl_length; 1563204076Spjd break; 1564204076Spjd case BIO_DELETE: 1565204076Spjd cmd = HIO_DELETE; 1566204076Spjd data = NULL; 1567204076Spjd offset = ggio->gctl_offset; 1568204076Spjd length = ggio->gctl_length; 1569204076Spjd break; 1570204076Spjd case BIO_FLUSH: 1571204076Spjd cmd = HIO_FLUSH; 1572204076Spjd data = NULL; 1573204076Spjd offset = 0; 1574204076Spjd length = 0; 1575204076Spjd break; 1576204076Spjd default: 1577229509Strociny PJDLOG_ABORT("invalid condition"); 1578204076Spjd } 1579204076Spjd nv = nv_alloc(); 1580204076Spjd nv_add_uint8(nv, cmd, "cmd"); 1581204076Spjd nv_add_uint64(nv, (uint64_t)ggio->gctl_seq, "seq"); 1582204076Spjd nv_add_uint64(nv, offset, "offset"); 1583204076Spjd nv_add_uint64(nv, length, "length"); 1584249236Strociny if (hio->hio_replication == HAST_REPLICATION_MEMSYNC && 1585249236Strociny ggio->gctl_cmd == BIO_WRITE && !ISSYNCREQ(hio)) { 1586249236Strociny nv_add_uint8(nv, 1, "memsync"); 1587249236Strociny } 1588204076Spjd if (nv_error(nv) != 0) { 1589204076Spjd hio->hio_errors[ncomp] = nv_error(nv); 1590204076Spjd pjdlog_debug(2, 1591204076Spjd "remote_send: (%p) Unable to prepare header to send.", 1592204076Spjd hio); 1593204076Spjd reqlog(LOG_ERR, 0, ggio, 1594204076Spjd "Unable to prepare header to send (%s): ", 1595204076Spjd strerror(nv_error(nv))); 1596204076Spjd /* Move failed request immediately to the done queue. */ 1597204076Spjd goto done_queue; 1598204076Spjd } 1599204076Spjd /* 1600204076Spjd * Protect connection from disappearing. 1601204076Spjd */ 1602204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1603204076Spjd if (!ISCONNECTED(res, ncomp)) { 1604204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1605204076Spjd hio->hio_errors[ncomp] = ENOTCONN; 1606204076Spjd goto done_queue; 1607204076Spjd } 1608204076Spjd /* 1609204076Spjd * Move the request to recv queue before sending it, because 1610204076Spjd * in different order we can get reply before we move request 1611204076Spjd * to recv queue. 1612204076Spjd */ 1613229509Strociny pjdlog_debug(2, 1614229509Strociny "remote_send: (%p) Moving request to the recv queue.", 1615229509Strociny hio); 1616204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1617204076Spjd wakeup = TAILQ_EMPTY(&hio_recv_list[ncomp]); 1618204076Spjd TAILQ_INSERT_TAIL(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1619204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1620204076Spjd if (hast_proto_send(res, res->hr_remoteout, nv, data, 1621231017Strociny data != NULL ? length : 0) == -1) { 1622204076Spjd hio->hio_errors[ncomp] = errno; 1623204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1624204076Spjd pjdlog_debug(2, 1625204076Spjd "remote_send: (%p) Unable to send request.", hio); 1626204076Spjd reqlog(LOG_ERR, 0, ggio, 1627204076Spjd "Unable to send request (%s): ", 1628204076Spjd strerror(hio->hio_errors[ncomp])); 1629211979Spjd remote_close(res, ncomp); 1630204076Spjd /* 1631204076Spjd * Take request back from the receive queue and move 1632204076Spjd * it immediately to the done queue. 1633204076Spjd */ 1634204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1635229509Strociny TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1636229509Strociny hio_next[ncomp]); 1637204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1638204076Spjd goto done_queue; 1639204076Spjd } 1640204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1641204076Spjd nv_free(nv); 1642204076Spjd if (wakeup) 1643204076Spjd cv_signal(&hio_recv_list_cond[ncomp]); 1644204076Spjd continue; 1645204076Spjddone_queue: 1646204076Spjd nv_free(nv); 1647204076Spjd if (ISSYNCREQ(hio)) { 1648249236Strociny if (refcnt_release(&hio->hio_countdown) > 0) 1649204076Spjd continue; 1650204076Spjd mtx_lock(&sync_lock); 1651204076Spjd SYNCREQDONE(hio); 1652204076Spjd mtx_unlock(&sync_lock); 1653204076Spjd cv_signal(&sync_cond); 1654204076Spjd continue; 1655204076Spjd } 1656204076Spjd if (ggio->gctl_cmd == BIO_WRITE) { 1657204076Spjd mtx_lock(&res->hr_amp_lock); 1658204076Spjd if (activemap_need_sync(res->hr_amp, ggio->gctl_offset, 1659204076Spjd ggio->gctl_length)) { 1660204076Spjd (void)hast_activemap_flush(res); 1661256027Strociny } else { 1662256027Strociny mtx_unlock(&res->hr_amp_lock); 1663204076Spjd } 1664249236Strociny if (hio->hio_replication == HAST_REPLICATION_MEMSYNC) 1665249236Strociny (void)refcnt_release(&hio->hio_countdown); 1666204076Spjd } 1667249236Strociny if (refcnt_release(&hio->hio_countdown) > 0) 1668204076Spjd continue; 1669204076Spjd pjdlog_debug(2, 1670204076Spjd "remote_send: (%p) Moving request to the done queue.", 1671204076Spjd hio); 1672204076Spjd QUEUE_INSERT2(hio, done); 1673204076Spjd } 1674204076Spjd /* NOTREACHED */ 1675204076Spjd return (NULL); 1676204076Spjd} 1677204076Spjd 1678204076Spjd/* 1679204076Spjd * Thread receives answer from secondary node and passes it to ggate_send 1680204076Spjd * thread. 1681204076Spjd */ 1682204076Spjdstatic void * 1683204076Spjdremote_recv_thread(void *arg) 1684204076Spjd{ 1685204076Spjd struct hast_resource *res = arg; 1686204076Spjd struct g_gate_ctl_io *ggio; 1687204076Spjd struct hio *hio; 1688204076Spjd struct nv *nv; 1689204076Spjd unsigned int ncomp; 1690204076Spjd uint64_t seq; 1691249236Strociny bool memsyncack; 1692204076Spjd int error; 1693204076Spjd 1694204076Spjd /* Remote component is 1 for now. */ 1695204076Spjd ncomp = 1; 1696204076Spjd 1697204076Spjd for (;;) { 1698204076Spjd /* Wait until there is anything to receive. */ 1699204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1700204076Spjd while (TAILQ_EMPTY(&hio_recv_list[ncomp])) { 1701204076Spjd pjdlog_debug(2, "remote_recv: No requests, waiting."); 1702204076Spjd cv_wait(&hio_recv_list_cond[ncomp], 1703204076Spjd &hio_recv_list_lock[ncomp]); 1704204076Spjd } 1705204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1706229509Strociny 1707249236Strociny memsyncack = false; 1708249236Strociny 1709204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1710204076Spjd if (!ISCONNECTED(res, ncomp)) { 1711204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1712204076Spjd /* 1713204076Spjd * Connection is dead, so move all pending requests to 1714204076Spjd * the done queue (one-by-one). 1715204076Spjd */ 1716204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1717204076Spjd hio = TAILQ_FIRST(&hio_recv_list[ncomp]); 1718218138Spjd PJDLOG_ASSERT(hio != NULL); 1719204076Spjd TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1720204076Spjd hio_next[ncomp]); 1721204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1722204076Spjd goto done_queue; 1723204076Spjd } 1724231017Strociny if (hast_proto_recv_hdr(res->hr_remotein, &nv) == -1) { 1725204076Spjd pjdlog_errno(LOG_ERR, 1726204076Spjd "Unable to receive reply header"); 1727204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1728204076Spjd remote_close(res, ncomp); 1729204076Spjd continue; 1730204076Spjd } 1731204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1732204076Spjd seq = nv_get_uint64(nv, "seq"); 1733204076Spjd if (seq == 0) { 1734204076Spjd pjdlog_error("Header contains no 'seq' field."); 1735204076Spjd nv_free(nv); 1736204076Spjd continue; 1737204076Spjd } 1738249236Strociny memsyncack = nv_exists(nv, "received"); 1739204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1740204076Spjd TAILQ_FOREACH(hio, &hio_recv_list[ncomp], hio_next[ncomp]) { 1741204076Spjd if (hio->hio_ggio.gctl_seq == seq) { 1742204076Spjd TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1743204076Spjd hio_next[ncomp]); 1744204076Spjd break; 1745204076Spjd } 1746204076Spjd } 1747204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1748204076Spjd if (hio == NULL) { 1749204076Spjd pjdlog_error("Found no request matching received 'seq' field (%ju).", 1750204076Spjd (uintmax_t)seq); 1751204076Spjd nv_free(nv); 1752204076Spjd continue; 1753204076Spjd } 1754229509Strociny ggio = &hio->hio_ggio; 1755204076Spjd error = nv_get_int16(nv, "error"); 1756204076Spjd if (error != 0) { 1757204076Spjd /* Request failed on remote side. */ 1758216478Spjd hio->hio_errors[ncomp] = error; 1759229509Strociny reqlog(LOG_WARNING, 0, ggio, 1760216479Spjd "Remote request failed (%s): ", strerror(error)); 1761204076Spjd nv_free(nv); 1762204076Spjd goto done_queue; 1763204076Spjd } 1764204076Spjd switch (ggio->gctl_cmd) { 1765204076Spjd case BIO_READ: 1766204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1767204076Spjd if (!ISCONNECTED(res, ncomp)) { 1768204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1769204076Spjd nv_free(nv); 1770204076Spjd goto done_queue; 1771204076Spjd } 1772204076Spjd if (hast_proto_recv_data(res, res->hr_remotein, nv, 1773231017Strociny ggio->gctl_data, ggio->gctl_length) == -1) { 1774204076Spjd hio->hio_errors[ncomp] = errno; 1775204076Spjd pjdlog_errno(LOG_ERR, 1776204076Spjd "Unable to receive reply data"); 1777204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1778204076Spjd nv_free(nv); 1779204076Spjd remote_close(res, ncomp); 1780204076Spjd goto done_queue; 1781204076Spjd } 1782204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1783204076Spjd break; 1784204076Spjd case BIO_WRITE: 1785204076Spjd case BIO_DELETE: 1786204076Spjd case BIO_FLUSH: 1787204076Spjd break; 1788204076Spjd default: 1789229509Strociny PJDLOG_ABORT("invalid condition"); 1790204076Spjd } 1791204076Spjd hio->hio_errors[ncomp] = 0; 1792204076Spjd nv_free(nv); 1793204076Spjddone_queue: 1794249236Strociny if (hio->hio_replication != HAST_REPLICATION_MEMSYNC || 1795249236Strociny hio->hio_ggio.gctl_cmd != BIO_WRITE || ISSYNCREQ(hio)) { 1796249236Strociny if (refcnt_release(&hio->hio_countdown) > 0) 1797249236Strociny continue; 1798249236Strociny } else { 1799249236Strociny /* 1800249236Strociny * Depending on hio_countdown value, requests finished 1801249236Strociny * in the following order: 1802249236Strociny * 1803249236Strociny * 0: local write, remote memsync, remote final 1804249236Strociny * or 1805249236Strociny * 0: remote memsync, local write, remote final 1806249236Strociny * 1807249236Strociny * 1: local write, remote memsync, (remote final) 1808249236Strociny * or 1809249236Strociny * 1: remote memsync, remote final, (local write) 1810249236Strociny * 1811249236Strociny * 2: remote memsync, (local write), (remote final) 1812249236Strociny * or 1813249236Strociny * 2: remote memsync, (remote final), (local write) 1814249236Strociny */ 1815249236Strociny switch (refcnt_release(&hio->hio_countdown)) { 1816249236Strociny case 0: 1817249236Strociny /* 1818249236Strociny * Remote final reply arrived. 1819249236Strociny */ 1820249236Strociny PJDLOG_ASSERT(!memsyncack); 1821249236Strociny break; 1822249236Strociny case 1: 1823249236Strociny if (memsyncack) { 1824249236Strociny /* 1825249236Strociny * Local request already finished, so we 1826249236Strociny * can complete the write. 1827249236Strociny */ 1828249236Strociny if (hio->hio_errors[0] == 0) 1829249236Strociny write_complete(res, hio); 1830249236Strociny /* 1831249236Strociny * We still need to wait for final 1832249236Strociny * remote reply. 1833249236Strociny */ 1834249236Strociny pjdlog_debug(2, 1835249236Strociny "remote_recv: (%p) Moving request back to the recv queue.", 1836249236Strociny hio); 1837249236Strociny mtx_lock(&hio_recv_list_lock[ncomp]); 1838249236Strociny TAILQ_INSERT_TAIL(&hio_recv_list[ncomp], 1839249236Strociny hio, hio_next[ncomp]); 1840249236Strociny mtx_unlock(&hio_recv_list_lock[ncomp]); 1841249236Strociny } else { 1842249236Strociny /* 1843249236Strociny * Remote final reply arrived before 1844249236Strociny * local write finished. 1845249236Strociny * Nothing to do in such case. 1846249236Strociny */ 1847249236Strociny } 1848249236Strociny continue; 1849249236Strociny case 2: 1850249236Strociny /* 1851249236Strociny * We received remote memsync reply even before 1852249236Strociny * local write finished. 1853249236Strociny */ 1854249236Strociny PJDLOG_ASSERT(memsyncack); 1855249236Strociny 1856249236Strociny pjdlog_debug(2, 1857249236Strociny "remote_recv: (%p) Moving request back to the recv queue.", 1858249236Strociny hio); 1859249236Strociny mtx_lock(&hio_recv_list_lock[ncomp]); 1860249236Strociny TAILQ_INSERT_TAIL(&hio_recv_list[ncomp], hio, 1861249236Strociny hio_next[ncomp]); 1862249236Strociny mtx_unlock(&hio_recv_list_lock[ncomp]); 1863249236Strociny continue; 1864249236Strociny default: 1865249236Strociny PJDLOG_ABORT("Invalid hio_countdown."); 1866249236Strociny } 1867249236Strociny } 1868229509Strociny if (ISSYNCREQ(hio)) { 1869229509Strociny mtx_lock(&sync_lock); 1870229509Strociny SYNCREQDONE(hio); 1871229509Strociny mtx_unlock(&sync_lock); 1872229509Strociny cv_signal(&sync_cond); 1873229509Strociny } else { 1874229509Strociny pjdlog_debug(2, 1875229509Strociny "remote_recv: (%p) Moving request to the done queue.", 1876229509Strociny hio); 1877229509Strociny QUEUE_INSERT2(hio, done); 1878204076Spjd } 1879204076Spjd } 1880204076Spjd /* NOTREACHED */ 1881204076Spjd return (NULL); 1882204076Spjd} 1883204076Spjd 1884204076Spjd/* 1885204076Spjd * Thread sends answer to the kernel. 1886204076Spjd */ 1887204076Spjdstatic void * 1888204076Spjdggate_send_thread(void *arg) 1889204076Spjd{ 1890204076Spjd struct hast_resource *res = arg; 1891204076Spjd struct g_gate_ctl_io *ggio; 1892204076Spjd struct hio *hio; 1893229509Strociny unsigned int ii, ncomps; 1894204076Spjd 1895204076Spjd ncomps = HAST_NCOMPONENTS; 1896204076Spjd 1897204076Spjd for (;;) { 1898204076Spjd pjdlog_debug(2, "ggate_send: Taking request."); 1899204076Spjd QUEUE_TAKE2(hio, done); 1900204076Spjd pjdlog_debug(2, "ggate_send: (%p) Got request.", hio); 1901204076Spjd ggio = &hio->hio_ggio; 1902204076Spjd for (ii = 0; ii < ncomps; ii++) { 1903204076Spjd if (hio->hio_errors[ii] == 0) { 1904204076Spjd /* 1905204076Spjd * One successful request is enough to declare 1906204076Spjd * success. 1907204076Spjd */ 1908204076Spjd ggio->gctl_error = 0; 1909204076Spjd break; 1910204076Spjd } 1911204076Spjd } 1912204076Spjd if (ii == ncomps) { 1913204076Spjd /* 1914204076Spjd * None of the requests were successful. 1915219879Strociny * Use the error from local component except the 1916219879Strociny * case when we did only remote request. 1917204076Spjd */ 1918219879Strociny if (ggio->gctl_cmd == BIO_READ && 1919219879Strociny res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) 1920219879Strociny ggio->gctl_error = hio->hio_errors[1]; 1921219879Strociny else 1922219879Strociny ggio->gctl_error = hio->hio_errors[0]; 1923204076Spjd } 1924204076Spjd if (ggio->gctl_error == 0 && ggio->gctl_cmd == BIO_WRITE) { 1925204076Spjd mtx_lock(&res->hr_amp_lock); 1926223655Strociny if (activemap_write_complete(res->hr_amp, 1927223974Strociny ggio->gctl_offset, ggio->gctl_length)) { 1928223655Strociny res->hr_stat_activemap_update++; 1929223655Strociny (void)hast_activemap_flush(res); 1930256027Strociny } else { 1931256027Strociny mtx_unlock(&res->hr_amp_lock); 1932223655Strociny } 1933204076Spjd } 1934204076Spjd if (ggio->gctl_cmd == BIO_WRITE) { 1935204076Spjd /* 1936204076Spjd * Unlock range we locked. 1937204076Spjd */ 1938204076Spjd mtx_lock(&range_lock); 1939204076Spjd rangelock_del(range_regular, ggio->gctl_offset, 1940204076Spjd ggio->gctl_length); 1941204076Spjd if (range_sync_wait) 1942204076Spjd cv_signal(&range_sync_cond); 1943204076Spjd mtx_unlock(&range_lock); 1944229509Strociny if (!hio->hio_done) 1945229509Strociny write_complete(res, hio); 1946229509Strociny } else { 1947231017Strociny if (ioctl(res->hr_ggatefd, G_GATE_CMD_DONE, ggio) == -1) { 1948229509Strociny primary_exit(EX_OSERR, 1949229509Strociny "G_GATE_CMD_DONE failed"); 1950204076Spjd } 1951204076Spjd } 1952247866Strociny if (hio->hio_errors[0]) { 1953247866Strociny switch (ggio->gctl_cmd) { 1954247866Strociny case BIO_READ: 1955247866Strociny res->hr_stat_read_error++; 1956247866Strociny break; 1957247866Strociny case BIO_WRITE: 1958247866Strociny res->hr_stat_write_error++; 1959247866Strociny break; 1960247866Strociny case BIO_DELETE: 1961247866Strociny res->hr_stat_delete_error++; 1962247866Strociny break; 1963247866Strociny case BIO_FLUSH: 1964247866Strociny res->hr_stat_flush_error++; 1965247866Strociny break; 1966247866Strociny } 1967247866Strociny } 1968204076Spjd pjdlog_debug(2, 1969204076Spjd "ggate_send: (%p) Moving request to the free queue.", hio); 1970204076Spjd QUEUE_INSERT2(hio, free); 1971204076Spjd } 1972204076Spjd /* NOTREACHED */ 1973204076Spjd return (NULL); 1974204076Spjd} 1975204076Spjd 1976204076Spjd/* 1977204076Spjd * Thread synchronize local and remote components. 1978204076Spjd */ 1979204076Spjdstatic void * 1980204076Spjdsync_thread(void *arg __unused) 1981204076Spjd{ 1982204076Spjd struct hast_resource *res = arg; 1983204076Spjd struct hio *hio; 1984204076Spjd struct g_gate_ctl_io *ggio; 1985219372Spjd struct timeval tstart, tend, tdiff; 1986204076Spjd unsigned int ii, ncomp, ncomps; 1987204076Spjd off_t offset, length, synced; 1988240269Strociny bool dorewind, directreads; 1989204076Spjd int syncext; 1990204076Spjd 1991204076Spjd ncomps = HAST_NCOMPONENTS; 1992204076Spjd dorewind = true; 1993211897Spjd synced = 0; 1994211897Spjd offset = -1; 1995240269Strociny directreads = false; 1996204076Spjd 1997204076Spjd for (;;) { 1998204076Spjd mtx_lock(&sync_lock); 1999211897Spjd if (offset >= 0 && !sync_inprogress) { 2000219372Spjd gettimeofday(&tend, NULL); 2001219372Spjd timersub(&tend, &tstart, &tdiff); 2002219372Spjd pjdlog_info("Synchronization interrupted after %#.0T. " 2003219372Spjd "%NB synchronized so far.", &tdiff, 2004211879Spjd (intmax_t)synced); 2005212038Spjd event_send(res, EVENT_SYNCINTR); 2006211879Spjd } 2007204076Spjd while (!sync_inprogress) { 2008204076Spjd dorewind = true; 2009204076Spjd synced = 0; 2010204076Spjd cv_wait(&sync_cond, &sync_lock); 2011204076Spjd } 2012204076Spjd mtx_unlock(&sync_lock); 2013204076Spjd /* 2014204076Spjd * Obtain offset at which we should synchronize. 2015204076Spjd * Rewind synchronization if needed. 2016204076Spjd */ 2017204076Spjd mtx_lock(&res->hr_amp_lock); 2018204076Spjd if (dorewind) 2019204076Spjd activemap_sync_rewind(res->hr_amp); 2020204076Spjd offset = activemap_sync_offset(res->hr_amp, &length, &syncext); 2021204076Spjd if (syncext != -1) { 2022204076Spjd /* 2023204076Spjd * We synchronized entire syncext extent, we can mark 2024204076Spjd * it as clean now. 2025204076Spjd */ 2026204076Spjd if (activemap_extent_complete(res->hr_amp, syncext)) 2027204076Spjd (void)hast_activemap_flush(res); 2028256027Strociny else 2029256027Strociny mtx_unlock(&res->hr_amp_lock); 2030256027Strociny } else { 2031256027Strociny mtx_unlock(&res->hr_amp_lock); 2032204076Spjd } 2033204076Spjd if (dorewind) { 2034204076Spjd dorewind = false; 2035231017Strociny if (offset == -1) 2036204076Spjd pjdlog_info("Nodes are in sync."); 2037204076Spjd else { 2038219372Spjd pjdlog_info("Synchronization started. %NB to go.", 2039219372Spjd (intmax_t)(res->hr_extentsize * 2040204076Spjd activemap_ndirty(res->hr_amp))); 2041212038Spjd event_send(res, EVENT_SYNCSTART); 2042219372Spjd gettimeofday(&tstart, NULL); 2043204076Spjd } 2044204076Spjd } 2045231017Strociny if (offset == -1) { 2046211878Spjd sync_stop(); 2047204076Spjd pjdlog_debug(1, "Nothing to synchronize."); 2048204076Spjd /* 2049204076Spjd * Synchronization complete, make both localcnt and 2050204076Spjd * remotecnt equal. 2051204076Spjd */ 2052204076Spjd ncomp = 1; 2053204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 2054204076Spjd if (ISCONNECTED(res, ncomp)) { 2055204076Spjd if (synced > 0) { 2056219372Spjd int64_t bps; 2057219372Spjd 2058219372Spjd gettimeofday(&tend, NULL); 2059219372Spjd timersub(&tend, &tstart, &tdiff); 2060219372Spjd bps = (int64_t)((double)synced / 2061219372Spjd ((double)tdiff.tv_sec + 2062219372Spjd (double)tdiff.tv_usec / 1000000)); 2063204076Spjd pjdlog_info("Synchronization complete. " 2064219372Spjd "%NB synchronized in %#.0lT (%NB/sec).", 2065219372Spjd (intmax_t)synced, &tdiff, 2066219372Spjd (intmax_t)bps); 2067212038Spjd event_send(res, EVENT_SYNCDONE); 2068204076Spjd } 2069204076Spjd mtx_lock(&metadata_lock); 2070240269Strociny if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) 2071240269Strociny directreads = true; 2072204076Spjd res->hr_syncsrc = HAST_SYNCSRC_UNDEF; 2073204076Spjd res->hr_primary_localcnt = 2074219882Strociny res->hr_secondary_remotecnt; 2075219882Strociny res->hr_primary_remotecnt = 2076204076Spjd res->hr_secondary_localcnt; 2077204076Spjd pjdlog_debug(1, 2078204076Spjd "Setting localcnt to %ju and remotecnt to %ju.", 2079204076Spjd (uintmax_t)res->hr_primary_localcnt, 2080219882Strociny (uintmax_t)res->hr_primary_remotecnt); 2081204076Spjd (void)metadata_write(res); 2082204076Spjd mtx_unlock(&metadata_lock); 2083204076Spjd } 2084204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 2085240269Strociny if (directreads) { 2086240269Strociny directreads = false; 2087240269Strociny enable_direct_reads(res); 2088240269Strociny } 2089204076Spjd continue; 2090204076Spjd } 2091204076Spjd pjdlog_debug(2, "sync: Taking free request."); 2092204076Spjd QUEUE_TAKE2(hio, free); 2093204076Spjd pjdlog_debug(2, "sync: (%p) Got free request.", hio); 2094204076Spjd /* 2095204076Spjd * Lock the range we are going to synchronize. We don't want 2096204076Spjd * race where someone writes between our read and write. 2097204076Spjd */ 2098204076Spjd for (;;) { 2099204076Spjd mtx_lock(&range_lock); 2100204076Spjd if (rangelock_islocked(range_regular, offset, length)) { 2101204076Spjd pjdlog_debug(2, 2102204076Spjd "sync: Range offset=%jd length=%jd locked.", 2103204076Spjd (intmax_t)offset, (intmax_t)length); 2104204076Spjd range_sync_wait = true; 2105204076Spjd cv_wait(&range_sync_cond, &range_lock); 2106204076Spjd range_sync_wait = false; 2107204076Spjd mtx_unlock(&range_lock); 2108204076Spjd continue; 2109204076Spjd } 2110231017Strociny if (rangelock_add(range_sync, offset, length) == -1) { 2111204076Spjd mtx_unlock(&range_lock); 2112204076Spjd pjdlog_debug(2, 2113204076Spjd "sync: Range offset=%jd length=%jd is already locked, waiting.", 2114204076Spjd (intmax_t)offset, (intmax_t)length); 2115204076Spjd sleep(1); 2116204076Spjd continue; 2117204076Spjd } 2118204076Spjd mtx_unlock(&range_lock); 2119204076Spjd break; 2120204076Spjd } 2121204076Spjd /* 2122204076Spjd * First read the data from synchronization source. 2123204076Spjd */ 2124204076Spjd SYNCREQ(hio); 2125204076Spjd ggio = &hio->hio_ggio; 2126204076Spjd ggio->gctl_cmd = BIO_READ; 2127204076Spjd ggio->gctl_offset = offset; 2128204076Spjd ggio->gctl_length = length; 2129204076Spjd ggio->gctl_error = 0; 2130229509Strociny hio->hio_done = false; 2131229509Strociny hio->hio_replication = res->hr_replication; 2132204076Spjd for (ii = 0; ii < ncomps; ii++) 2133204076Spjd hio->hio_errors[ii] = EINVAL; 2134204076Spjd reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 2135204076Spjd hio); 2136204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 2137204076Spjd hio); 2138204076Spjd mtx_lock(&metadata_lock); 2139204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 2140204076Spjd /* 2141204076Spjd * This range is up-to-date on local component, 2142204076Spjd * so handle request locally. 2143204076Spjd */ 2144204076Spjd /* Local component is 0 for now. */ 2145204076Spjd ncomp = 0; 2146204076Spjd } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 2147218138Spjd PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 2148204076Spjd /* 2149204076Spjd * This range is out-of-date on local component, 2150204076Spjd * so send request to the remote node. 2151204076Spjd */ 2152204076Spjd /* Remote component is 1 for now. */ 2153204076Spjd ncomp = 1; 2154204076Spjd } 2155204076Spjd mtx_unlock(&metadata_lock); 2156249236Strociny hio->hio_countdown = 1; 2157204076Spjd QUEUE_INSERT1(hio, send, ncomp); 2158204076Spjd 2159204076Spjd /* 2160204076Spjd * Let's wait for READ to finish. 2161204076Spjd */ 2162204076Spjd mtx_lock(&sync_lock); 2163204076Spjd while (!ISSYNCREQDONE(hio)) 2164204076Spjd cv_wait(&sync_cond, &sync_lock); 2165204076Spjd mtx_unlock(&sync_lock); 2166204076Spjd 2167204076Spjd if (hio->hio_errors[ncomp] != 0) { 2168204076Spjd pjdlog_error("Unable to read synchronization data: %s.", 2169204076Spjd strerror(hio->hio_errors[ncomp])); 2170204076Spjd goto free_queue; 2171204076Spjd } 2172204076Spjd 2173204076Spjd /* 2174204076Spjd * We read the data from synchronization source, now write it 2175204076Spjd * to synchronization target. 2176204076Spjd */ 2177204076Spjd SYNCREQ(hio); 2178204076Spjd ggio->gctl_cmd = BIO_WRITE; 2179204076Spjd for (ii = 0; ii < ncomps; ii++) 2180204076Spjd hio->hio_errors[ii] = EINVAL; 2181204076Spjd reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 2182204076Spjd hio); 2183204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 2184204076Spjd hio); 2185204076Spjd mtx_lock(&metadata_lock); 2186204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 2187204076Spjd /* 2188204076Spjd * This range is up-to-date on local component, 2189204076Spjd * so we update remote component. 2190204076Spjd */ 2191204076Spjd /* Remote component is 1 for now. */ 2192204076Spjd ncomp = 1; 2193204076Spjd } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 2194218138Spjd PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 2195204076Spjd /* 2196204076Spjd * This range is out-of-date on local component, 2197204076Spjd * so we update it. 2198204076Spjd */ 2199204076Spjd /* Local component is 0 for now. */ 2200204076Spjd ncomp = 0; 2201204076Spjd } 2202204076Spjd mtx_unlock(&metadata_lock); 2203204076Spjd 2204229509Strociny pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 2205204076Spjd hio); 2206249236Strociny hio->hio_countdown = 1; 2207204076Spjd QUEUE_INSERT1(hio, send, ncomp); 2208204076Spjd 2209204076Spjd /* 2210204076Spjd * Let's wait for WRITE to finish. 2211204076Spjd */ 2212204076Spjd mtx_lock(&sync_lock); 2213204076Spjd while (!ISSYNCREQDONE(hio)) 2214204076Spjd cv_wait(&sync_cond, &sync_lock); 2215204076Spjd mtx_unlock(&sync_lock); 2216204076Spjd 2217204076Spjd if (hio->hio_errors[ncomp] != 0) { 2218204076Spjd pjdlog_error("Unable to write synchronization data: %s.", 2219204076Spjd strerror(hio->hio_errors[ncomp])); 2220204076Spjd goto free_queue; 2221204076Spjd } 2222211880Spjd 2223211880Spjd synced += length; 2224204076Spjdfree_queue: 2225204076Spjd mtx_lock(&range_lock); 2226204076Spjd rangelock_del(range_sync, offset, length); 2227204076Spjd if (range_regular_wait) 2228204076Spjd cv_signal(&range_regular_cond); 2229204076Spjd mtx_unlock(&range_lock); 2230204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the free queue.", 2231204076Spjd hio); 2232204076Spjd QUEUE_INSERT2(hio, free); 2233204076Spjd } 2234204076Spjd /* NOTREACHED */ 2235204076Spjd return (NULL); 2236204076Spjd} 2237204076Spjd 2238217784Spjdvoid 2239217784Spjdprimary_config_reload(struct hast_resource *res, struct nv *nv) 2240210886Spjd{ 2241210886Spjd unsigned int ii, ncomps; 2242217784Spjd int modified, vint; 2243217784Spjd const char *vstr; 2244210886Spjd 2245210886Spjd pjdlog_info("Reloading configuration..."); 2246210886Spjd 2247218138Spjd PJDLOG_ASSERT(res->hr_role == HAST_ROLE_PRIMARY); 2248218138Spjd PJDLOG_ASSERT(gres == res); 2249217784Spjd nv_assert(nv, "remoteaddr"); 2250219818Spjd nv_assert(nv, "sourceaddr"); 2251217784Spjd nv_assert(nv, "replication"); 2252219351Spjd nv_assert(nv, "checksum"); 2253219354Spjd nv_assert(nv, "compression"); 2254217784Spjd nv_assert(nv, "timeout"); 2255217784Spjd nv_assert(nv, "exec"); 2256229509Strociny nv_assert(nv, "metaflush"); 2257217784Spjd 2258210886Spjd ncomps = HAST_NCOMPONENTS; 2259210886Spjd 2260219351Spjd#define MODIFIED_REMOTEADDR 0x01 2261219818Spjd#define MODIFIED_SOURCEADDR 0x02 2262219818Spjd#define MODIFIED_REPLICATION 0x04 2263219818Spjd#define MODIFIED_CHECKSUM 0x08 2264219818Spjd#define MODIFIED_COMPRESSION 0x10 2265219818Spjd#define MODIFIED_TIMEOUT 0x20 2266219818Spjd#define MODIFIED_EXEC 0x40 2267229509Strociny#define MODIFIED_METAFLUSH 0x80 2268210886Spjd modified = 0; 2269217784Spjd 2270217784Spjd vstr = nv_get_string(nv, "remoteaddr"); 2271217784Spjd if (strcmp(gres->hr_remoteaddr, vstr) != 0) { 2272210886Spjd /* 2273210886Spjd * Don't copy res->hr_remoteaddr to gres just yet. 2274210886Spjd * We want remote_close() to log disconnect from the old 2275210886Spjd * addresses, not from the new ones. 2276210886Spjd */ 2277210886Spjd modified |= MODIFIED_REMOTEADDR; 2278210886Spjd } 2279219818Spjd vstr = nv_get_string(nv, "sourceaddr"); 2280219818Spjd if (strcmp(gres->hr_sourceaddr, vstr) != 0) { 2281219818Spjd strlcpy(gres->hr_sourceaddr, vstr, sizeof(gres->hr_sourceaddr)); 2282219818Spjd modified |= MODIFIED_SOURCEADDR; 2283219818Spjd } 2284217784Spjd vint = nv_get_int32(nv, "replication"); 2285217784Spjd if (gres->hr_replication != vint) { 2286217784Spjd gres->hr_replication = vint; 2287210886Spjd modified |= MODIFIED_REPLICATION; 2288210886Spjd } 2289219351Spjd vint = nv_get_int32(nv, "checksum"); 2290219351Spjd if (gres->hr_checksum != vint) { 2291219351Spjd gres->hr_checksum = vint; 2292219351Spjd modified |= MODIFIED_CHECKSUM; 2293219351Spjd } 2294219354Spjd vint = nv_get_int32(nv, "compression"); 2295219354Spjd if (gres->hr_compression != vint) { 2296219354Spjd gres->hr_compression = vint; 2297219354Spjd modified |= MODIFIED_COMPRESSION; 2298219354Spjd } 2299217784Spjd vint = nv_get_int32(nv, "timeout"); 2300217784Spjd if (gres->hr_timeout != vint) { 2301217784Spjd gres->hr_timeout = vint; 2302210886Spjd modified |= MODIFIED_TIMEOUT; 2303210886Spjd } 2304217784Spjd vstr = nv_get_string(nv, "exec"); 2305217784Spjd if (strcmp(gres->hr_exec, vstr) != 0) { 2306217784Spjd strlcpy(gres->hr_exec, vstr, sizeof(gres->hr_exec)); 2307211886Spjd modified |= MODIFIED_EXEC; 2308211886Spjd } 2309229509Strociny vint = nv_get_int32(nv, "metaflush"); 2310229509Strociny if (gres->hr_metaflush != vint) { 2311229509Strociny gres->hr_metaflush = vint; 2312229509Strociny modified |= MODIFIED_METAFLUSH; 2313229509Strociny } 2314217784Spjd 2315210886Spjd /* 2316219351Spjd * Change timeout for connected sockets. 2317219351Spjd * Don't bother if we need to reconnect. 2318210886Spjd */ 2319219351Spjd if ((modified & MODIFIED_TIMEOUT) != 0 && 2320229509Strociny (modified & (MODIFIED_REMOTEADDR | MODIFIED_SOURCEADDR)) == 0) { 2321210886Spjd for (ii = 0; ii < ncomps; ii++) { 2322210886Spjd if (!ISREMOTE(ii)) 2323210886Spjd continue; 2324210886Spjd rw_rlock(&hio_remote_lock[ii]); 2325210886Spjd if (!ISCONNECTED(gres, ii)) { 2326210886Spjd rw_unlock(&hio_remote_lock[ii]); 2327210886Spjd continue; 2328210886Spjd } 2329210886Spjd rw_unlock(&hio_remote_lock[ii]); 2330210886Spjd if (proto_timeout(gres->hr_remotein, 2331231017Strociny gres->hr_timeout) == -1) { 2332210886Spjd pjdlog_errno(LOG_WARNING, 2333210886Spjd "Unable to set connection timeout"); 2334210886Spjd } 2335210886Spjd if (proto_timeout(gres->hr_remoteout, 2336231017Strociny gres->hr_timeout) == -1) { 2337210886Spjd pjdlog_errno(LOG_WARNING, 2338210886Spjd "Unable to set connection timeout"); 2339210886Spjd } 2340210886Spjd } 2341219351Spjd } 2342229509Strociny if ((modified & (MODIFIED_REMOTEADDR | MODIFIED_SOURCEADDR)) != 0) { 2343210886Spjd for (ii = 0; ii < ncomps; ii++) { 2344210886Spjd if (!ISREMOTE(ii)) 2345210886Spjd continue; 2346210886Spjd remote_close(gres, ii); 2347210886Spjd } 2348210886Spjd if (modified & MODIFIED_REMOTEADDR) { 2349217784Spjd vstr = nv_get_string(nv, "remoteaddr"); 2350217784Spjd strlcpy(gres->hr_remoteaddr, vstr, 2351210886Spjd sizeof(gres->hr_remoteaddr)); 2352210886Spjd } 2353210886Spjd } 2354210886Spjd#undef MODIFIED_REMOTEADDR 2355219818Spjd#undef MODIFIED_SOURCEADDR 2356210886Spjd#undef MODIFIED_REPLICATION 2357219351Spjd#undef MODIFIED_CHECKSUM 2358219354Spjd#undef MODIFIED_COMPRESSION 2359210886Spjd#undef MODIFIED_TIMEOUT 2360211886Spjd#undef MODIFIED_EXEC 2361229509Strociny#undef MODIFIED_METAFLUSH 2362210886Spjd 2363210886Spjd pjdlog_info("Configuration reloaded successfully."); 2364210886Spjd} 2365210886Spjd 2366211882Spjdstatic void 2367211981Spjdguard_one(struct hast_resource *res, unsigned int ncomp) 2368211981Spjd{ 2369211981Spjd struct proto_conn *in, *out; 2370211981Spjd 2371211981Spjd if (!ISREMOTE(ncomp)) 2372211981Spjd return; 2373211981Spjd 2374211981Spjd rw_rlock(&hio_remote_lock[ncomp]); 2375211981Spjd 2376211981Spjd if (!real_remote(res)) { 2377211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2378211981Spjd return; 2379211981Spjd } 2380211981Spjd 2381211981Spjd if (ISCONNECTED(res, ncomp)) { 2382218138Spjd PJDLOG_ASSERT(res->hr_remotein != NULL); 2383218138Spjd PJDLOG_ASSERT(res->hr_remoteout != NULL); 2384211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2385211981Spjd pjdlog_debug(2, "remote_guard: Connection to %s is ok.", 2386211981Spjd res->hr_remoteaddr); 2387211981Spjd return; 2388211981Spjd } 2389211981Spjd 2390218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 2391218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 2392211981Spjd /* 2393211981Spjd * Upgrade the lock. It doesn't have to be atomic as no other thread 2394211981Spjd * can change connection status from disconnected to connected. 2395211981Spjd */ 2396211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2397211981Spjd pjdlog_debug(2, "remote_guard: Reconnecting to %s.", 2398211981Spjd res->hr_remoteaddr); 2399211981Spjd in = out = NULL; 2400220898Spjd if (init_remote(res, &in, &out) == 0) { 2401211981Spjd rw_wlock(&hio_remote_lock[ncomp]); 2402218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 2403218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 2404218138Spjd PJDLOG_ASSERT(in != NULL && out != NULL); 2405211981Spjd res->hr_remotein = in; 2406211981Spjd res->hr_remoteout = out; 2407211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2408211981Spjd pjdlog_info("Successfully reconnected to %s.", 2409211981Spjd res->hr_remoteaddr); 2410211981Spjd sync_start(); 2411211981Spjd } else { 2412211981Spjd /* Both connections should be NULL. */ 2413218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 2414218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 2415218138Spjd PJDLOG_ASSERT(in == NULL && out == NULL); 2416211981Spjd pjdlog_debug(2, "remote_guard: Reconnect to %s failed.", 2417211981Spjd res->hr_remoteaddr); 2418211981Spjd } 2419211981Spjd} 2420211981Spjd 2421204076Spjd/* 2422204076Spjd * Thread guards remote connections and reconnects when needed, handles 2423204076Spjd * signals, etc. 2424204076Spjd */ 2425204076Spjdstatic void * 2426204076Spjdguard_thread(void *arg) 2427204076Spjd{ 2428204076Spjd struct hast_resource *res = arg; 2429204076Spjd unsigned int ii, ncomps; 2430211982Spjd struct timespec timeout; 2431211981Spjd time_t lastcheck, now; 2432211982Spjd sigset_t mask; 2433211982Spjd int signo; 2434204076Spjd 2435204076Spjd ncomps = HAST_NCOMPONENTS; 2436211981Spjd lastcheck = time(NULL); 2437204076Spjd 2438211982Spjd PJDLOG_VERIFY(sigemptyset(&mask) == 0); 2439211982Spjd PJDLOG_VERIFY(sigaddset(&mask, SIGINT) == 0); 2440211982Spjd PJDLOG_VERIFY(sigaddset(&mask, SIGTERM) == 0); 2441211982Spjd 2442219721Strociny timeout.tv_sec = HAST_KEEPALIVE; 2443211982Spjd timeout.tv_nsec = 0; 2444211982Spjd signo = -1; 2445211982Spjd 2446204076Spjd for (;;) { 2447211982Spjd switch (signo) { 2448211982Spjd case SIGINT: 2449211982Spjd case SIGTERM: 2450211982Spjd sigexit_received = true; 2451204076Spjd primary_exitx(EX_OK, 2452204076Spjd "Termination signal received, exiting."); 2453211982Spjd break; 2454211982Spjd default: 2455211982Spjd break; 2456204076Spjd } 2457211882Spjd 2458220898Spjd /* 2459220898Spjd * Don't check connections until we fully started, 2460220898Spjd * as we may still be looping, waiting for remote node 2461220898Spjd * to switch from primary to secondary. 2462220898Spjd */ 2463220898Spjd if (fullystarted) { 2464220898Spjd pjdlog_debug(2, "remote_guard: Checking connections."); 2465220898Spjd now = time(NULL); 2466220898Spjd if (lastcheck + HAST_KEEPALIVE <= now) { 2467220898Spjd for (ii = 0; ii < ncomps; ii++) 2468220898Spjd guard_one(res, ii); 2469220898Spjd lastcheck = now; 2470220898Spjd } 2471204076Spjd } 2472211982Spjd signo = sigtimedwait(&mask, NULL, &timeout); 2473204076Spjd } 2474204076Spjd /* NOTREACHED */ 2475204076Spjd return (NULL); 2476204076Spjd} 2477