primary.c revision 226855
1204076Spjd/*- 2204076Spjd * Copyright (c) 2009 The FreeBSD Foundation 3219351Spjd * Copyright (c) 2010-2011 Pawel Jakub Dawidek <pawel@dawidek.net> 4204076Spjd * All rights reserved. 5204076Spjd * 6204076Spjd * This software was developed by Pawel Jakub Dawidek under sponsorship from 7204076Spjd * the FreeBSD Foundation. 8204076Spjd * 9204076Spjd * Redistribution and use in source and binary forms, with or without 10204076Spjd * modification, are permitted provided that the following conditions 11204076Spjd * are met: 12204076Spjd * 1. Redistributions of source code must retain the above copyright 13204076Spjd * notice, this list of conditions and the following disclaimer. 14204076Spjd * 2. Redistributions in binary form must reproduce the above copyright 15204076Spjd * notice, this list of conditions and the following disclaimer in the 16204076Spjd * documentation and/or other materials provided with the distribution. 17204076Spjd * 18204076Spjd * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 19204076Spjd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20204076Spjd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21204076Spjd * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 22204076Spjd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23204076Spjd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24204076Spjd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25204076Spjd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26204076Spjd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27204076Spjd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28204076Spjd * SUCH DAMAGE. 29204076Spjd */ 30204076Spjd 31204076Spjd#include <sys/cdefs.h> 32204076Spjd__FBSDID("$FreeBSD: head/sbin/hastd/primary.c 226855 2011-10-27 20:10:21Z pjd $"); 33204076Spjd 34204076Spjd#include <sys/types.h> 35204076Spjd#include <sys/time.h> 36204076Spjd#include <sys/bio.h> 37204076Spjd#include <sys/disk.h> 38204076Spjd#include <sys/refcount.h> 39204076Spjd#include <sys/stat.h> 40204076Spjd 41204076Spjd#include <geom/gate/g_gate.h> 42204076Spjd 43204076Spjd#include <err.h> 44204076Spjd#include <errno.h> 45204076Spjd#include <fcntl.h> 46204076Spjd#include <libgeom.h> 47204076Spjd#include <pthread.h> 48211982Spjd#include <signal.h> 49204076Spjd#include <stdint.h> 50204076Spjd#include <stdio.h> 51204076Spjd#include <string.h> 52204076Spjd#include <sysexits.h> 53204076Spjd#include <unistd.h> 54204076Spjd 55204076Spjd#include <activemap.h> 56204076Spjd#include <nv.h> 57204076Spjd#include <rangelock.h> 58204076Spjd 59204076Spjd#include "control.h" 60212038Spjd#include "event.h" 61204076Spjd#include "hast.h" 62204076Spjd#include "hast_proto.h" 63204076Spjd#include "hastd.h" 64211886Spjd#include "hooks.h" 65204076Spjd#include "metadata.h" 66204076Spjd#include "proto.h" 67204076Spjd#include "pjdlog.h" 68204076Spjd#include "subr.h" 69204076Spjd#include "synch.h" 70204076Spjd 71210886Spjd/* The is only one remote component for now. */ 72210886Spjd#define ISREMOTE(no) ((no) == 1) 73210886Spjd 74204076Spjdstruct hio { 75204076Spjd /* 76204076Spjd * Number of components we are still waiting for. 77204076Spjd * When this field goes to 0, we can send the request back to the 78204076Spjd * kernel. Each component has to decrease this counter by one 79204076Spjd * even on failure. 80204076Spjd */ 81204076Spjd unsigned int hio_countdown; 82204076Spjd /* 83204076Spjd * Each component has a place to store its own error. 84204076Spjd * Once the request is handled by all components we can decide if the 85204076Spjd * request overall is successful or not. 86204076Spjd */ 87204076Spjd int *hio_errors; 88204076Spjd /* 89219818Spjd * Structure used to communicate with GEOM Gate class. 90204076Spjd */ 91204076Spjd struct g_gate_ctl_io hio_ggio; 92204076Spjd TAILQ_ENTRY(hio) *hio_next; 93204076Spjd}; 94204076Spjd#define hio_free_next hio_next[0] 95204076Spjd#define hio_done_next hio_next[0] 96204076Spjd 97204076Spjd/* 98204076Spjd * Free list holds unused structures. When free list is empty, we have to wait 99204076Spjd * until some in-progress requests are freed. 100204076Spjd */ 101204076Spjdstatic TAILQ_HEAD(, hio) hio_free_list; 102204076Spjdstatic pthread_mutex_t hio_free_list_lock; 103204076Spjdstatic pthread_cond_t hio_free_list_cond; 104204076Spjd/* 105204076Spjd * There is one send list for every component. One requests is placed on all 106204076Spjd * send lists - each component gets the same request, but each component is 107204076Spjd * responsible for managing his own send list. 108204076Spjd */ 109204076Spjdstatic TAILQ_HEAD(, hio) *hio_send_list; 110204076Spjdstatic pthread_mutex_t *hio_send_list_lock; 111204076Spjdstatic pthread_cond_t *hio_send_list_cond; 112204076Spjd/* 113204076Spjd * There is one recv list for every component, although local components don't 114204076Spjd * use recv lists as local requests are done synchronously. 115204076Spjd */ 116204076Spjdstatic TAILQ_HEAD(, hio) *hio_recv_list; 117204076Spjdstatic pthread_mutex_t *hio_recv_list_lock; 118204076Spjdstatic pthread_cond_t *hio_recv_list_cond; 119204076Spjd/* 120204076Spjd * Request is placed on done list by the slowest component (the one that 121204076Spjd * decreased hio_countdown from 1 to 0). 122204076Spjd */ 123204076Spjdstatic TAILQ_HEAD(, hio) hio_done_list; 124204076Spjdstatic pthread_mutex_t hio_done_list_lock; 125204076Spjdstatic pthread_cond_t hio_done_list_cond; 126204076Spjd/* 127204076Spjd * Structure below are for interaction with sync thread. 128204076Spjd */ 129204076Spjdstatic bool sync_inprogress; 130204076Spjdstatic pthread_mutex_t sync_lock; 131204076Spjdstatic pthread_cond_t sync_cond; 132204076Spjd/* 133204076Spjd * The lock below allows to synchornize access to remote connections. 134204076Spjd */ 135204076Spjdstatic pthread_rwlock_t *hio_remote_lock; 136204076Spjd 137204076Spjd/* 138204076Spjd * Lock to synchronize metadata updates. Also synchronize access to 139204076Spjd * hr_primary_localcnt and hr_primary_remotecnt fields. 140204076Spjd */ 141204076Spjdstatic pthread_mutex_t metadata_lock; 142204076Spjd 143204076Spjd/* 144204076Spjd * Maximum number of outstanding I/O requests. 145204076Spjd */ 146204076Spjd#define HAST_HIO_MAX 256 147204076Spjd/* 148204076Spjd * Number of components. At this point there are only two components: local 149204076Spjd * and remote, but in the future it might be possible to use multiple local 150204076Spjd * and remote components. 151204076Spjd */ 152204076Spjd#define HAST_NCOMPONENTS 2 153204076Spjd 154204076Spjd#define ISCONNECTED(res, no) \ 155204076Spjd ((res)->hr_remotein != NULL && (res)->hr_remoteout != NULL) 156204076Spjd 157204076Spjd#define QUEUE_INSERT1(hio, name, ncomp) do { \ 158204076Spjd bool _wakeup; \ 159204076Spjd \ 160204076Spjd mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 161204076Spjd _wakeup = TAILQ_EMPTY(&hio_##name##_list[(ncomp)]); \ 162204076Spjd TAILQ_INSERT_TAIL(&hio_##name##_list[(ncomp)], (hio), \ 163204076Spjd hio_next[(ncomp)]); \ 164204076Spjd mtx_unlock(&hio_##name##_list_lock[ncomp]); \ 165204076Spjd if (_wakeup) \ 166204076Spjd cv_signal(&hio_##name##_list_cond[(ncomp)]); \ 167204076Spjd} while (0) 168204076Spjd#define QUEUE_INSERT2(hio, name) do { \ 169204076Spjd bool _wakeup; \ 170204076Spjd \ 171204076Spjd mtx_lock(&hio_##name##_list_lock); \ 172204076Spjd _wakeup = TAILQ_EMPTY(&hio_##name##_list); \ 173204076Spjd TAILQ_INSERT_TAIL(&hio_##name##_list, (hio), hio_##name##_next);\ 174204076Spjd mtx_unlock(&hio_##name##_list_lock); \ 175204076Spjd if (_wakeup) \ 176204076Spjd cv_signal(&hio_##name##_list_cond); \ 177204076Spjd} while (0) 178214692Spjd#define QUEUE_TAKE1(hio, name, ncomp, timeout) do { \ 179214692Spjd bool _last; \ 180214692Spjd \ 181204076Spjd mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 182214692Spjd _last = false; \ 183214692Spjd while (((hio) = TAILQ_FIRST(&hio_##name##_list[(ncomp)])) == NULL && !_last) { \ 184214692Spjd cv_timedwait(&hio_##name##_list_cond[(ncomp)], \ 185214692Spjd &hio_##name##_list_lock[(ncomp)], (timeout)); \ 186219864Spjd if ((timeout) != 0) \ 187214692Spjd _last = true; \ 188204076Spjd } \ 189214692Spjd if (hio != NULL) { \ 190214692Spjd TAILQ_REMOVE(&hio_##name##_list[(ncomp)], (hio), \ 191214692Spjd hio_next[(ncomp)]); \ 192214692Spjd } \ 193204076Spjd mtx_unlock(&hio_##name##_list_lock[(ncomp)]); \ 194204076Spjd} while (0) 195204076Spjd#define QUEUE_TAKE2(hio, name) do { \ 196204076Spjd mtx_lock(&hio_##name##_list_lock); \ 197204076Spjd while (((hio) = TAILQ_FIRST(&hio_##name##_list)) == NULL) { \ 198204076Spjd cv_wait(&hio_##name##_list_cond, \ 199204076Spjd &hio_##name##_list_lock); \ 200204076Spjd } \ 201204076Spjd TAILQ_REMOVE(&hio_##name##_list, (hio), hio_##name##_next); \ 202204076Spjd mtx_unlock(&hio_##name##_list_lock); \ 203204076Spjd} while (0) 204204076Spjd 205209183Spjd#define SYNCREQ(hio) do { \ 206209183Spjd (hio)->hio_ggio.gctl_unit = -1; \ 207209183Spjd (hio)->hio_ggio.gctl_seq = 1; \ 208209183Spjd} while (0) 209204076Spjd#define ISSYNCREQ(hio) ((hio)->hio_ggio.gctl_unit == -1) 210204076Spjd#define SYNCREQDONE(hio) do { (hio)->hio_ggio.gctl_unit = -2; } while (0) 211204076Spjd#define ISSYNCREQDONE(hio) ((hio)->hio_ggio.gctl_unit == -2) 212204076Spjd 213204076Spjdstatic struct hast_resource *gres; 214204076Spjd 215204076Spjdstatic pthread_mutex_t range_lock; 216204076Spjdstatic struct rangelocks *range_regular; 217204076Spjdstatic bool range_regular_wait; 218204076Spjdstatic pthread_cond_t range_regular_cond; 219204076Spjdstatic struct rangelocks *range_sync; 220204076Spjdstatic bool range_sync_wait; 221204076Spjdstatic pthread_cond_t range_sync_cond; 222220898Spjdstatic bool fullystarted; 223204076Spjd 224204076Spjdstatic void *ggate_recv_thread(void *arg); 225204076Spjdstatic void *local_send_thread(void *arg); 226204076Spjdstatic void *remote_send_thread(void *arg); 227204076Spjdstatic void *remote_recv_thread(void *arg); 228204076Spjdstatic void *ggate_send_thread(void *arg); 229204076Spjdstatic void *sync_thread(void *arg); 230204076Spjdstatic void *guard_thread(void *arg); 231204076Spjd 232211982Spjdstatic void 233204076Spjdcleanup(struct hast_resource *res) 234204076Spjd{ 235204076Spjd int rerrno; 236204076Spjd 237204076Spjd /* Remember errno. */ 238204076Spjd rerrno = errno; 239204076Spjd 240204076Spjd /* Destroy ggate provider if we created one. */ 241204076Spjd if (res->hr_ggateunit >= 0) { 242204076Spjd struct g_gate_ctl_destroy ggiod; 243204076Spjd 244213533Spjd bzero(&ggiod, sizeof(ggiod)); 245204076Spjd ggiod.gctl_version = G_GATE_VERSION; 246204076Spjd ggiod.gctl_unit = res->hr_ggateunit; 247204076Spjd ggiod.gctl_force = 1; 248204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_DESTROY, &ggiod) < 0) { 249213531Spjd pjdlog_errno(LOG_WARNING, 250213531Spjd "Unable to destroy hast/%s device", 251204076Spjd res->hr_provname); 252204076Spjd } 253204076Spjd res->hr_ggateunit = -1; 254204076Spjd } 255204076Spjd 256204076Spjd /* Restore errno. */ 257204076Spjd errno = rerrno; 258204076Spjd} 259204076Spjd 260212899Spjdstatic __dead2 void 261204076Spjdprimary_exit(int exitcode, const char *fmt, ...) 262204076Spjd{ 263204076Spjd va_list ap; 264204076Spjd 265218138Spjd PJDLOG_ASSERT(exitcode != EX_OK); 266204076Spjd va_start(ap, fmt); 267204076Spjd pjdlogv_errno(LOG_ERR, fmt, ap); 268204076Spjd va_end(ap); 269204076Spjd cleanup(gres); 270204076Spjd exit(exitcode); 271204076Spjd} 272204076Spjd 273212899Spjdstatic __dead2 void 274204076Spjdprimary_exitx(int exitcode, const char *fmt, ...) 275204076Spjd{ 276204076Spjd va_list ap; 277204076Spjd 278204076Spjd va_start(ap, fmt); 279204076Spjd pjdlogv(exitcode == EX_OK ? LOG_INFO : LOG_ERR, fmt, ap); 280204076Spjd va_end(ap); 281204076Spjd cleanup(gres); 282204076Spjd exit(exitcode); 283204076Spjd} 284204076Spjd 285204076Spjdstatic int 286204076Spjdhast_activemap_flush(struct hast_resource *res) 287204076Spjd{ 288204076Spjd const unsigned char *buf; 289204076Spjd size_t size; 290204076Spjd 291204076Spjd buf = activemap_bitmap(res->hr_amp, &size); 292218138Spjd PJDLOG_ASSERT(buf != NULL); 293218138Spjd PJDLOG_ASSERT((size % res->hr_local_sectorsize) == 0); 294204076Spjd if (pwrite(res->hr_localfd, buf, size, METADATA_SIZE) != 295204076Spjd (ssize_t)size) { 296225786Spjd pjdlog_errno(LOG_ERR, "Unable to flush activemap to disk"); 297204076Spjd return (-1); 298204076Spjd } 299225830Spjd if (res->hr_metaflush == 1 && g_flush(res->hr_localfd) == -1) { 300225830Spjd if (errno == EOPNOTSUPP) { 301225830Spjd pjdlog_warning("The %s provider doesn't support flushing write cache. Disabling it.", 302225830Spjd res->hr_localpath); 303225830Spjd res->hr_metaflush = 0; 304225830Spjd } else { 305225830Spjd pjdlog_errno(LOG_ERR, 306225830Spjd "Unable to flush disk cache on activemap update"); 307225830Spjd return (-1); 308225830Spjd } 309225830Spjd } 310204076Spjd return (0); 311204076Spjd} 312204076Spjd 313210881Spjdstatic bool 314210881Spjdreal_remote(const struct hast_resource *res) 315210881Spjd{ 316210881Spjd 317210881Spjd return (strcmp(res->hr_remoteaddr, "none") != 0); 318210881Spjd} 319210881Spjd 320204076Spjdstatic void 321204076Spjdinit_environment(struct hast_resource *res __unused) 322204076Spjd{ 323204076Spjd struct hio *hio; 324204076Spjd unsigned int ii, ncomps; 325204076Spjd 326204076Spjd /* 327204076Spjd * In the future it might be per-resource value. 328204076Spjd */ 329204076Spjd ncomps = HAST_NCOMPONENTS; 330204076Spjd 331204076Spjd /* 332204076Spjd * Allocate memory needed by lists. 333204076Spjd */ 334204076Spjd hio_send_list = malloc(sizeof(hio_send_list[0]) * ncomps); 335204076Spjd if (hio_send_list == NULL) { 336204076Spjd primary_exitx(EX_TEMPFAIL, 337204076Spjd "Unable to allocate %zu bytes of memory for send lists.", 338204076Spjd sizeof(hio_send_list[0]) * ncomps); 339204076Spjd } 340204076Spjd hio_send_list_lock = malloc(sizeof(hio_send_list_lock[0]) * ncomps); 341204076Spjd if (hio_send_list_lock == NULL) { 342204076Spjd primary_exitx(EX_TEMPFAIL, 343204076Spjd "Unable to allocate %zu bytes of memory for send list locks.", 344204076Spjd sizeof(hio_send_list_lock[0]) * ncomps); 345204076Spjd } 346204076Spjd hio_send_list_cond = malloc(sizeof(hio_send_list_cond[0]) * ncomps); 347204076Spjd if (hio_send_list_cond == NULL) { 348204076Spjd primary_exitx(EX_TEMPFAIL, 349204076Spjd "Unable to allocate %zu bytes of memory for send list condition variables.", 350204076Spjd sizeof(hio_send_list_cond[0]) * ncomps); 351204076Spjd } 352204076Spjd hio_recv_list = malloc(sizeof(hio_recv_list[0]) * ncomps); 353204076Spjd if (hio_recv_list == NULL) { 354204076Spjd primary_exitx(EX_TEMPFAIL, 355204076Spjd "Unable to allocate %zu bytes of memory for recv lists.", 356204076Spjd sizeof(hio_recv_list[0]) * ncomps); 357204076Spjd } 358204076Spjd hio_recv_list_lock = malloc(sizeof(hio_recv_list_lock[0]) * ncomps); 359204076Spjd if (hio_recv_list_lock == NULL) { 360204076Spjd primary_exitx(EX_TEMPFAIL, 361204076Spjd "Unable to allocate %zu bytes of memory for recv list locks.", 362204076Spjd sizeof(hio_recv_list_lock[0]) * ncomps); 363204076Spjd } 364204076Spjd hio_recv_list_cond = malloc(sizeof(hio_recv_list_cond[0]) * ncomps); 365204076Spjd if (hio_recv_list_cond == NULL) { 366204076Spjd primary_exitx(EX_TEMPFAIL, 367204076Spjd "Unable to allocate %zu bytes of memory for recv list condition variables.", 368204076Spjd sizeof(hio_recv_list_cond[0]) * ncomps); 369204076Spjd } 370204076Spjd hio_remote_lock = malloc(sizeof(hio_remote_lock[0]) * ncomps); 371204076Spjd if (hio_remote_lock == NULL) { 372204076Spjd primary_exitx(EX_TEMPFAIL, 373204076Spjd "Unable to allocate %zu bytes of memory for remote connections locks.", 374204076Spjd sizeof(hio_remote_lock[0]) * ncomps); 375204076Spjd } 376204076Spjd 377204076Spjd /* 378204076Spjd * Initialize lists, their locks and theirs condition variables. 379204076Spjd */ 380204076Spjd TAILQ_INIT(&hio_free_list); 381204076Spjd mtx_init(&hio_free_list_lock); 382204076Spjd cv_init(&hio_free_list_cond); 383204076Spjd for (ii = 0; ii < HAST_NCOMPONENTS; ii++) { 384204076Spjd TAILQ_INIT(&hio_send_list[ii]); 385204076Spjd mtx_init(&hio_send_list_lock[ii]); 386204076Spjd cv_init(&hio_send_list_cond[ii]); 387204076Spjd TAILQ_INIT(&hio_recv_list[ii]); 388204076Spjd mtx_init(&hio_recv_list_lock[ii]); 389204076Spjd cv_init(&hio_recv_list_cond[ii]); 390204076Spjd rw_init(&hio_remote_lock[ii]); 391204076Spjd } 392204076Spjd TAILQ_INIT(&hio_done_list); 393204076Spjd mtx_init(&hio_done_list_lock); 394204076Spjd cv_init(&hio_done_list_cond); 395204076Spjd mtx_init(&metadata_lock); 396204076Spjd 397204076Spjd /* 398204076Spjd * Allocate requests pool and initialize requests. 399204076Spjd */ 400204076Spjd for (ii = 0; ii < HAST_HIO_MAX; ii++) { 401204076Spjd hio = malloc(sizeof(*hio)); 402204076Spjd if (hio == NULL) { 403204076Spjd primary_exitx(EX_TEMPFAIL, 404204076Spjd "Unable to allocate %zu bytes of memory for hio request.", 405204076Spjd sizeof(*hio)); 406204076Spjd } 407204076Spjd hio->hio_countdown = 0; 408204076Spjd hio->hio_errors = malloc(sizeof(hio->hio_errors[0]) * ncomps); 409204076Spjd if (hio->hio_errors == NULL) { 410204076Spjd primary_exitx(EX_TEMPFAIL, 411204076Spjd "Unable allocate %zu bytes of memory for hio errors.", 412204076Spjd sizeof(hio->hio_errors[0]) * ncomps); 413204076Spjd } 414204076Spjd hio->hio_next = malloc(sizeof(hio->hio_next[0]) * ncomps); 415204076Spjd if (hio->hio_next == NULL) { 416204076Spjd primary_exitx(EX_TEMPFAIL, 417204076Spjd "Unable allocate %zu bytes of memory for hio_next field.", 418204076Spjd sizeof(hio->hio_next[0]) * ncomps); 419204076Spjd } 420204076Spjd hio->hio_ggio.gctl_version = G_GATE_VERSION; 421204076Spjd hio->hio_ggio.gctl_data = malloc(MAXPHYS); 422204076Spjd if (hio->hio_ggio.gctl_data == NULL) { 423204076Spjd primary_exitx(EX_TEMPFAIL, 424204076Spjd "Unable to allocate %zu bytes of memory for gctl_data.", 425204076Spjd MAXPHYS); 426204076Spjd } 427204076Spjd hio->hio_ggio.gctl_length = MAXPHYS; 428204076Spjd hio->hio_ggio.gctl_error = 0; 429204076Spjd TAILQ_INSERT_HEAD(&hio_free_list, hio, hio_free_next); 430204076Spjd } 431204076Spjd} 432204076Spjd 433214284Spjdstatic bool 434214284Spjdinit_resuid(struct hast_resource *res) 435214284Spjd{ 436214284Spjd 437214284Spjd mtx_lock(&metadata_lock); 438214284Spjd if (res->hr_resuid != 0) { 439214284Spjd mtx_unlock(&metadata_lock); 440214284Spjd return (false); 441214284Spjd } else { 442214284Spjd /* Initialize unique resource identifier. */ 443214284Spjd arc4random_buf(&res->hr_resuid, sizeof(res->hr_resuid)); 444214284Spjd mtx_unlock(&metadata_lock); 445214284Spjd if (metadata_write(res) < 0) 446214284Spjd exit(EX_NOINPUT); 447214284Spjd return (true); 448214284Spjd } 449214284Spjd} 450214284Spjd 451204076Spjdstatic void 452204076Spjdinit_local(struct hast_resource *res) 453204076Spjd{ 454204076Spjd unsigned char *buf; 455204076Spjd size_t mapsize; 456204076Spjd 457204076Spjd if (metadata_read(res, true) < 0) 458204076Spjd exit(EX_NOINPUT); 459204076Spjd mtx_init(&res->hr_amp_lock); 460204076Spjd if (activemap_init(&res->hr_amp, res->hr_datasize, res->hr_extentsize, 461204076Spjd res->hr_local_sectorsize, res->hr_keepdirty) < 0) { 462204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create activemap"); 463204076Spjd } 464204076Spjd mtx_init(&range_lock); 465204076Spjd cv_init(&range_regular_cond); 466204076Spjd if (rangelock_init(&range_regular) < 0) 467204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create regular range lock"); 468204076Spjd cv_init(&range_sync_cond); 469204076Spjd if (rangelock_init(&range_sync) < 0) 470204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create sync range lock"); 471204076Spjd mapsize = activemap_ondisk_size(res->hr_amp); 472204076Spjd buf = calloc(1, mapsize); 473204076Spjd if (buf == NULL) { 474204076Spjd primary_exitx(EX_TEMPFAIL, 475204076Spjd "Unable to allocate buffer for activemap."); 476204076Spjd } 477204076Spjd if (pread(res->hr_localfd, buf, mapsize, METADATA_SIZE) != 478204076Spjd (ssize_t)mapsize) { 479204076Spjd primary_exit(EX_NOINPUT, "Unable to read activemap"); 480204076Spjd } 481204076Spjd activemap_copyin(res->hr_amp, buf, mapsize); 482209181Spjd free(buf); 483204076Spjd if (res->hr_resuid != 0) 484204076Spjd return; 485204076Spjd /* 486214284Spjd * We're using provider for the first time. Initialize local and remote 487214284Spjd * counters. We don't initialize resuid here, as we want to do it just 488214284Spjd * in time. The reason for this is that we want to inform secondary 489214284Spjd * that there were no writes yet, so there is no need to synchronize 490214284Spjd * anything. 491204076Spjd */ 492219844Spjd res->hr_primary_localcnt = 0; 493204076Spjd res->hr_primary_remotecnt = 0; 494204076Spjd if (metadata_write(res) < 0) 495204076Spjd exit(EX_NOINPUT); 496204076Spjd} 497204076Spjd 498218218Spjdstatic int 499218218Spjdprimary_connect(struct hast_resource *res, struct proto_conn **connp) 500218218Spjd{ 501218218Spjd struct proto_conn *conn; 502218218Spjd int16_t val; 503218218Spjd 504218218Spjd val = 1; 505218218Spjd if (proto_send(res->hr_conn, &val, sizeof(val)) < 0) { 506218218Spjd primary_exit(EX_TEMPFAIL, 507218218Spjd "Unable to send connection request to parent"); 508218218Spjd } 509218218Spjd if (proto_recv(res->hr_conn, &val, sizeof(val)) < 0) { 510218218Spjd primary_exit(EX_TEMPFAIL, 511218218Spjd "Unable to receive reply to connection request from parent"); 512218218Spjd } 513218218Spjd if (val != 0) { 514218218Spjd errno = val; 515218218Spjd pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 516218218Spjd res->hr_remoteaddr); 517218218Spjd return (-1); 518218218Spjd } 519218218Spjd if (proto_connection_recv(res->hr_conn, true, &conn) < 0) { 520218218Spjd primary_exit(EX_TEMPFAIL, 521218218Spjd "Unable to receive connection from parent"); 522218218Spjd } 523220006Spjd if (proto_connect_wait(conn, res->hr_timeout) < 0) { 524218218Spjd pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 525218218Spjd res->hr_remoteaddr); 526218218Spjd proto_close(conn); 527218218Spjd return (-1); 528218218Spjd } 529218218Spjd /* Error in setting timeout is not critical, but why should it fail? */ 530218218Spjd if (proto_timeout(conn, res->hr_timeout) < 0) 531218218Spjd pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 532218218Spjd 533218218Spjd *connp = conn; 534218218Spjd 535218218Spjd return (0); 536218218Spjd} 537218218Spjd 538220898Spjdstatic int 539205738Spjdinit_remote(struct hast_resource *res, struct proto_conn **inp, 540205738Spjd struct proto_conn **outp) 541204076Spjd{ 542205738Spjd struct proto_conn *in, *out; 543204076Spjd struct nv *nvout, *nvin; 544204076Spjd const unsigned char *token; 545204076Spjd unsigned char *map; 546204076Spjd const char *errmsg; 547204076Spjd int32_t extentsize; 548204076Spjd int64_t datasize; 549204076Spjd uint32_t mapsize; 550204076Spjd size_t size; 551220898Spjd int error; 552204076Spjd 553218138Spjd PJDLOG_ASSERT((inp == NULL && outp == NULL) || (inp != NULL && outp != NULL)); 554218138Spjd PJDLOG_ASSERT(real_remote(res)); 555205738Spjd 556205738Spjd in = out = NULL; 557211983Spjd errmsg = NULL; 558205738Spjd 559218218Spjd if (primary_connect(res, &out) == -1) 560220898Spjd return (ECONNREFUSED); 561218218Spjd 562220898Spjd error = ECONNABORTED; 563220898Spjd 564204076Spjd /* 565204076Spjd * First handshake step. 566204076Spjd * Setup outgoing connection with remote node. 567204076Spjd */ 568204076Spjd nvout = nv_alloc(); 569204076Spjd nv_add_string(nvout, res->hr_name, "resource"); 570204076Spjd if (nv_error(nvout) != 0) { 571204076Spjd pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 572204076Spjd "Unable to allocate header for connection with %s", 573204076Spjd res->hr_remoteaddr); 574204076Spjd nv_free(nvout); 575204076Spjd goto close; 576204076Spjd } 577205738Spjd if (hast_proto_send(res, out, nvout, NULL, 0) < 0) { 578204076Spjd pjdlog_errno(LOG_WARNING, 579204076Spjd "Unable to send handshake header to %s", 580204076Spjd res->hr_remoteaddr); 581204076Spjd nv_free(nvout); 582204076Spjd goto close; 583204076Spjd } 584204076Spjd nv_free(nvout); 585205738Spjd if (hast_proto_recv_hdr(out, &nvin) < 0) { 586204076Spjd pjdlog_errno(LOG_WARNING, 587204076Spjd "Unable to receive handshake header from %s", 588204076Spjd res->hr_remoteaddr); 589204076Spjd goto close; 590204076Spjd } 591204076Spjd errmsg = nv_get_string(nvin, "errmsg"); 592204076Spjd if (errmsg != NULL) { 593204076Spjd pjdlog_warning("%s", errmsg); 594220898Spjd if (nv_exists(nvin, "wait")) 595220898Spjd error = EBUSY; 596204076Spjd nv_free(nvin); 597204076Spjd goto close; 598204076Spjd } 599204076Spjd token = nv_get_uint8_array(nvin, &size, "token"); 600204076Spjd if (token == NULL) { 601204076Spjd pjdlog_warning("Handshake header from %s has no 'token' field.", 602204076Spjd res->hr_remoteaddr); 603204076Spjd nv_free(nvin); 604204076Spjd goto close; 605204076Spjd } 606204076Spjd if (size != sizeof(res->hr_token)) { 607204076Spjd pjdlog_warning("Handshake header from %s contains 'token' of wrong size (got %zu, expected %zu).", 608204076Spjd res->hr_remoteaddr, size, sizeof(res->hr_token)); 609204076Spjd nv_free(nvin); 610204076Spjd goto close; 611204076Spjd } 612204076Spjd bcopy(token, res->hr_token, sizeof(res->hr_token)); 613204076Spjd nv_free(nvin); 614204076Spjd 615204076Spjd /* 616204076Spjd * Second handshake step. 617204076Spjd * Setup incoming connection with remote node. 618204076Spjd */ 619218218Spjd if (primary_connect(res, &in) == -1) 620204076Spjd goto close; 621218218Spjd 622204076Spjd nvout = nv_alloc(); 623204076Spjd nv_add_string(nvout, res->hr_name, "resource"); 624204076Spjd nv_add_uint8_array(nvout, res->hr_token, sizeof(res->hr_token), 625204076Spjd "token"); 626214284Spjd if (res->hr_resuid == 0) { 627214284Spjd /* 628214284Spjd * The resuid field was not yet initialized. 629214284Spjd * Because we do synchronization inside init_resuid(), it is 630214284Spjd * possible that someone already initialized it, the function 631214284Spjd * will return false then, but if we successfully initialized 632214284Spjd * it, we will get true. True means that there were no writes 633214284Spjd * to this resource yet and we want to inform secondary that 634214284Spjd * synchronization is not needed by sending "virgin" argument. 635214284Spjd */ 636214284Spjd if (init_resuid(res)) 637214284Spjd nv_add_int8(nvout, 1, "virgin"); 638214284Spjd } 639204076Spjd nv_add_uint64(nvout, res->hr_resuid, "resuid"); 640204076Spjd nv_add_uint64(nvout, res->hr_primary_localcnt, "localcnt"); 641204076Spjd nv_add_uint64(nvout, res->hr_primary_remotecnt, "remotecnt"); 642204076Spjd if (nv_error(nvout) != 0) { 643204076Spjd pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 644204076Spjd "Unable to allocate header for connection with %s", 645204076Spjd res->hr_remoteaddr); 646204076Spjd nv_free(nvout); 647204076Spjd goto close; 648204076Spjd } 649205738Spjd if (hast_proto_send(res, in, nvout, NULL, 0) < 0) { 650204076Spjd pjdlog_errno(LOG_WARNING, 651204076Spjd "Unable to send handshake header to %s", 652204076Spjd res->hr_remoteaddr); 653204076Spjd nv_free(nvout); 654204076Spjd goto close; 655204076Spjd } 656204076Spjd nv_free(nvout); 657205738Spjd if (hast_proto_recv_hdr(out, &nvin) < 0) { 658204076Spjd pjdlog_errno(LOG_WARNING, 659204076Spjd "Unable to receive handshake header from %s", 660204076Spjd res->hr_remoteaddr); 661204076Spjd goto close; 662204076Spjd } 663204076Spjd errmsg = nv_get_string(nvin, "errmsg"); 664204076Spjd if (errmsg != NULL) { 665204076Spjd pjdlog_warning("%s", errmsg); 666204076Spjd nv_free(nvin); 667204076Spjd goto close; 668204076Spjd } 669204076Spjd datasize = nv_get_int64(nvin, "datasize"); 670204076Spjd if (datasize != res->hr_datasize) { 671204076Spjd pjdlog_warning("Data size differs between nodes (local=%jd, remote=%jd).", 672204076Spjd (intmax_t)res->hr_datasize, (intmax_t)datasize); 673204076Spjd nv_free(nvin); 674204076Spjd goto close; 675204076Spjd } 676204076Spjd extentsize = nv_get_int32(nvin, "extentsize"); 677204076Spjd if (extentsize != res->hr_extentsize) { 678204076Spjd pjdlog_warning("Extent size differs between nodes (local=%zd, remote=%zd).", 679204076Spjd (ssize_t)res->hr_extentsize, (ssize_t)extentsize); 680204076Spjd nv_free(nvin); 681204076Spjd goto close; 682204076Spjd } 683204076Spjd res->hr_secondary_localcnt = nv_get_uint64(nvin, "localcnt"); 684204076Spjd res->hr_secondary_remotecnt = nv_get_uint64(nvin, "remotecnt"); 685204076Spjd res->hr_syncsrc = nv_get_uint8(nvin, "syncsrc"); 686220865Spjd if (nv_exists(nvin, "virgin")) { 687220865Spjd /* 688220865Spjd * Secondary was reinitialized, bump localcnt if it is 0 as 689220865Spjd * only we have the data. 690220865Spjd */ 691220865Spjd PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_PRIMARY); 692220865Spjd PJDLOG_ASSERT(res->hr_secondary_localcnt == 0); 693220865Spjd 694220865Spjd if (res->hr_primary_localcnt == 0) { 695220865Spjd PJDLOG_ASSERT(res->hr_secondary_remotecnt == 0); 696220865Spjd 697220865Spjd mtx_lock(&metadata_lock); 698220865Spjd res->hr_primary_localcnt++; 699220865Spjd pjdlog_debug(1, "Increasing localcnt to %ju.", 700220865Spjd (uintmax_t)res->hr_primary_localcnt); 701220865Spjd (void)metadata_write(res); 702220865Spjd mtx_unlock(&metadata_lock); 703220865Spjd } 704220865Spjd } 705204076Spjd map = NULL; 706204076Spjd mapsize = nv_get_uint32(nvin, "mapsize"); 707204076Spjd if (mapsize > 0) { 708204076Spjd map = malloc(mapsize); 709204076Spjd if (map == NULL) { 710204076Spjd pjdlog_error("Unable to allocate memory for remote activemap (mapsize=%ju).", 711204076Spjd (uintmax_t)mapsize); 712204076Spjd nv_free(nvin); 713204076Spjd goto close; 714204076Spjd } 715204076Spjd /* 716204076Spjd * Remote node have some dirty extents on its own, lets 717204076Spjd * download its activemap. 718204076Spjd */ 719205738Spjd if (hast_proto_recv_data(res, out, nvin, map, 720204076Spjd mapsize) < 0) { 721204076Spjd pjdlog_errno(LOG_ERR, 722204076Spjd "Unable to receive remote activemap"); 723204076Spjd nv_free(nvin); 724204076Spjd free(map); 725204076Spjd goto close; 726204076Spjd } 727204076Spjd /* 728204076Spjd * Merge local and remote bitmaps. 729204076Spjd */ 730204076Spjd activemap_merge(res->hr_amp, map, mapsize); 731204076Spjd free(map); 732204076Spjd /* 733204076Spjd * Now that we merged bitmaps from both nodes, flush it to the 734204076Spjd * disk before we start to synchronize. 735204076Spjd */ 736204076Spjd (void)hast_activemap_flush(res); 737204076Spjd } 738214274Spjd nv_free(nvin); 739223181Strociny#ifdef notyet 740220271Spjd /* Setup directions. */ 741220271Spjd if (proto_send(out, NULL, 0) == -1) 742220271Spjd pjdlog_errno(LOG_WARNING, "Unable to set connection direction"); 743220271Spjd if (proto_recv(in, NULL, 0) == -1) 744220271Spjd pjdlog_errno(LOG_WARNING, "Unable to set connection direction"); 745223181Strociny#endif 746204076Spjd pjdlog_info("Connected to %s.", res->hr_remoteaddr); 747205738Spjd if (inp != NULL && outp != NULL) { 748205738Spjd *inp = in; 749205738Spjd *outp = out; 750205738Spjd } else { 751205738Spjd res->hr_remotein = in; 752205738Spjd res->hr_remoteout = out; 753205738Spjd } 754212038Spjd event_send(res, EVENT_CONNECT); 755220898Spjd return (0); 756205738Spjdclose: 757211983Spjd if (errmsg != NULL && strcmp(errmsg, "Split-brain condition!") == 0) 758212038Spjd event_send(res, EVENT_SPLITBRAIN); 759205738Spjd proto_close(out); 760205738Spjd if (in != NULL) 761205738Spjd proto_close(in); 762220898Spjd return (error); 763205738Spjd} 764205738Spjd 765205738Spjdstatic void 766205738Spjdsync_start(void) 767205738Spjd{ 768205738Spjd 769204076Spjd mtx_lock(&sync_lock); 770204076Spjd sync_inprogress = true; 771204076Spjd mtx_unlock(&sync_lock); 772204076Spjd cv_signal(&sync_cond); 773204076Spjd} 774204076Spjd 775204076Spjdstatic void 776211878Spjdsync_stop(void) 777211878Spjd{ 778211878Spjd 779211878Spjd mtx_lock(&sync_lock); 780211878Spjd if (sync_inprogress) 781211878Spjd sync_inprogress = false; 782211878Spjd mtx_unlock(&sync_lock); 783211878Spjd} 784211878Spjd 785211878Spjdstatic void 786204076Spjdinit_ggate(struct hast_resource *res) 787204076Spjd{ 788204076Spjd struct g_gate_ctl_create ggiocreate; 789204076Spjd struct g_gate_ctl_cancel ggiocancel; 790204076Spjd 791204076Spjd /* 792204076Spjd * We communicate with ggate via /dev/ggctl. Open it. 793204076Spjd */ 794204076Spjd res->hr_ggatefd = open("/dev/" G_GATE_CTL_NAME, O_RDWR); 795204076Spjd if (res->hr_ggatefd < 0) 796204076Spjd primary_exit(EX_OSFILE, "Unable to open /dev/" G_GATE_CTL_NAME); 797204076Spjd /* 798204076Spjd * Create provider before trying to connect, as connection failure 799204076Spjd * is not critical, but may take some time. 800204076Spjd */ 801213533Spjd bzero(&ggiocreate, sizeof(ggiocreate)); 802204076Spjd ggiocreate.gctl_version = G_GATE_VERSION; 803204076Spjd ggiocreate.gctl_mediasize = res->hr_datasize; 804204076Spjd ggiocreate.gctl_sectorsize = res->hr_local_sectorsize; 805204076Spjd ggiocreate.gctl_flags = 0; 806220266Spjd ggiocreate.gctl_maxcount = 0; 807204076Spjd ggiocreate.gctl_timeout = 0; 808204076Spjd ggiocreate.gctl_unit = G_GATE_NAME_GIVEN; 809204076Spjd snprintf(ggiocreate.gctl_name, sizeof(ggiocreate.gctl_name), "hast/%s", 810204076Spjd res->hr_provname); 811204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_CREATE, &ggiocreate) == 0) { 812204076Spjd pjdlog_info("Device hast/%s created.", res->hr_provname); 813204076Spjd res->hr_ggateunit = ggiocreate.gctl_unit; 814204076Spjd return; 815204076Spjd } 816204076Spjd if (errno != EEXIST) { 817204076Spjd primary_exit(EX_OSERR, "Unable to create hast/%s device", 818204076Spjd res->hr_provname); 819204076Spjd } 820204076Spjd pjdlog_debug(1, 821204076Spjd "Device hast/%s already exists, we will try to take it over.", 822204076Spjd res->hr_provname); 823204076Spjd /* 824204076Spjd * If we received EEXIST, we assume that the process who created the 825204076Spjd * provider died and didn't clean up. In that case we will start from 826204076Spjd * where he left of. 827204076Spjd */ 828213533Spjd bzero(&ggiocancel, sizeof(ggiocancel)); 829204076Spjd ggiocancel.gctl_version = G_GATE_VERSION; 830204076Spjd ggiocancel.gctl_unit = G_GATE_NAME_GIVEN; 831204076Spjd snprintf(ggiocancel.gctl_name, sizeof(ggiocancel.gctl_name), "hast/%s", 832204076Spjd res->hr_provname); 833204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_CANCEL, &ggiocancel) == 0) { 834204076Spjd pjdlog_info("Device hast/%s recovered.", res->hr_provname); 835204076Spjd res->hr_ggateunit = ggiocancel.gctl_unit; 836204076Spjd return; 837204076Spjd } 838204076Spjd primary_exit(EX_OSERR, "Unable to take over hast/%s device", 839204076Spjd res->hr_provname); 840204076Spjd} 841204076Spjd 842204076Spjdvoid 843204076Spjdhastd_primary(struct hast_resource *res) 844204076Spjd{ 845204076Spjd pthread_t td; 846204076Spjd pid_t pid; 847219482Strociny int error, mode, debuglevel; 848204076Spjd 849204076Spjd /* 850218218Spjd * Create communication channel for sending control commands from 851218218Spjd * parent to child. 852204076Spjd */ 853219818Spjd if (proto_client(NULL, "socketpair://", &res->hr_ctrl) < 0) { 854218042Spjd /* TODO: There's no need for this to be fatal error. */ 855204076Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 856212034Spjd pjdlog_exit(EX_OSERR, 857204076Spjd "Unable to create control sockets between parent and child"); 858204076Spjd } 859212038Spjd /* 860218218Spjd * Create communication channel for sending events from child to parent. 861212038Spjd */ 862219818Spjd if (proto_client(NULL, "socketpair://", &res->hr_event) < 0) { 863218042Spjd /* TODO: There's no need for this to be fatal error. */ 864212038Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 865212038Spjd pjdlog_exit(EX_OSERR, 866212038Spjd "Unable to create event sockets between child and parent"); 867212038Spjd } 868218218Spjd /* 869218218Spjd * Create communication channel for sending connection requests from 870218218Spjd * child to parent. 871218218Spjd */ 872219818Spjd if (proto_client(NULL, "socketpair://", &res->hr_conn) < 0) { 873218218Spjd /* TODO: There's no need for this to be fatal error. */ 874218218Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 875218218Spjd pjdlog_exit(EX_OSERR, 876218218Spjd "Unable to create connection sockets between child and parent"); 877218218Spjd } 878204076Spjd 879204076Spjd pid = fork(); 880204076Spjd if (pid < 0) { 881218042Spjd /* TODO: There's no need for this to be fatal error. */ 882204076Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 883212034Spjd pjdlog_exit(EX_TEMPFAIL, "Unable to fork"); 884204076Spjd } 885204076Spjd 886204076Spjd if (pid > 0) { 887204076Spjd /* This is parent. */ 888212038Spjd /* Declare that we are receiver. */ 889212038Spjd proto_recv(res->hr_event, NULL, 0); 890218218Spjd proto_recv(res->hr_conn, NULL, 0); 891218043Spjd /* Declare that we are sender. */ 892218043Spjd proto_send(res->hr_ctrl, NULL, 0); 893204076Spjd res->hr_workerpid = pid; 894204076Spjd return; 895204076Spjd } 896211977Spjd 897211984Spjd gres = res; 898218043Spjd mode = pjdlog_mode_get(); 899219482Strociny debuglevel = pjdlog_debug_get(); 900211984Spjd 901218043Spjd /* Declare that we are sender. */ 902218043Spjd proto_send(res->hr_event, NULL, 0); 903218218Spjd proto_send(res->hr_conn, NULL, 0); 904218043Spjd /* Declare that we are receiver. */ 905218043Spjd proto_recv(res->hr_ctrl, NULL, 0); 906218043Spjd descriptors_cleanup(res); 907204076Spjd 908218045Spjd descriptors_assert(res, mode); 909218045Spjd 910218043Spjd pjdlog_init(mode); 911219482Strociny pjdlog_debug_set(debuglevel); 912218043Spjd pjdlog_prefix_set("[%s] (%s) ", res->hr_name, role2str(res->hr_role)); 913220005Spjd setproctitle("%s (%s)", res->hr_name, role2str(res->hr_role)); 914204076Spjd 915204076Spjd init_local(res); 916213007Spjd init_ggate(res); 917213007Spjd init_environment(res); 918217784Spjd 919221899Spjd if (drop_privs(res) != 0) { 920218049Spjd cleanup(res); 921218049Spjd exit(EX_CONFIG); 922218049Spjd } 923218214Spjd pjdlog_info("Privileges successfully dropped."); 924218049Spjd 925213007Spjd /* 926213530Spjd * Create the guard thread first, so we can handle signals from the 927213530Spjd * very begining. 928213530Spjd */ 929213530Spjd error = pthread_create(&td, NULL, guard_thread, res); 930218138Spjd PJDLOG_ASSERT(error == 0); 931213530Spjd /* 932213007Spjd * Create the control thread before sending any event to the parent, 933213007Spjd * as we can deadlock when parent sends control request to worker, 934213007Spjd * but worker has no control thread started yet, so parent waits. 935213007Spjd * In the meantime worker sends an event to the parent, but parent 936213007Spjd * is unable to handle the event, because it waits for control 937213007Spjd * request response. 938213007Spjd */ 939213007Spjd error = pthread_create(&td, NULL, ctrl_thread, res); 940218138Spjd PJDLOG_ASSERT(error == 0); 941220898Spjd if (real_remote(res)) { 942220898Spjd error = init_remote(res, NULL, NULL); 943220898Spjd if (error == 0) { 944220898Spjd sync_start(); 945220898Spjd } else if (error == EBUSY) { 946220898Spjd time_t start = time(NULL); 947220898Spjd 948220898Spjd pjdlog_warning("Waiting for remote node to become %s for %ds.", 949220898Spjd role2str(HAST_ROLE_SECONDARY), 950220898Spjd res->hr_timeout); 951220898Spjd for (;;) { 952220898Spjd sleep(1); 953220898Spjd error = init_remote(res, NULL, NULL); 954220898Spjd if (error != EBUSY) 955220898Spjd break; 956220898Spjd if (time(NULL) > start + res->hr_timeout) 957220898Spjd break; 958220898Spjd } 959220898Spjd if (error == EBUSY) { 960220898Spjd pjdlog_warning("Remote node is still %s, starting anyway.", 961220898Spjd role2str(HAST_ROLE_PRIMARY)); 962220898Spjd } 963220898Spjd } 964220898Spjd } 965204076Spjd error = pthread_create(&td, NULL, ggate_recv_thread, res); 966218138Spjd PJDLOG_ASSERT(error == 0); 967204076Spjd error = pthread_create(&td, NULL, local_send_thread, res); 968218138Spjd PJDLOG_ASSERT(error == 0); 969204076Spjd error = pthread_create(&td, NULL, remote_send_thread, res); 970218138Spjd PJDLOG_ASSERT(error == 0); 971204076Spjd error = pthread_create(&td, NULL, remote_recv_thread, res); 972218138Spjd PJDLOG_ASSERT(error == 0); 973204076Spjd error = pthread_create(&td, NULL, ggate_send_thread, res); 974218138Spjd PJDLOG_ASSERT(error == 0); 975220898Spjd fullystarted = true; 976213530Spjd (void)sync_thread(res); 977204076Spjd} 978204076Spjd 979204076Spjdstatic void 980204076Spjdreqlog(int loglevel, int debuglevel, struct g_gate_ctl_io *ggio, const char *fmt, ...) 981204076Spjd{ 982204076Spjd char msg[1024]; 983204076Spjd va_list ap; 984204076Spjd int len; 985204076Spjd 986204076Spjd va_start(ap, fmt); 987204076Spjd len = vsnprintf(msg, sizeof(msg), fmt, ap); 988204076Spjd va_end(ap); 989204076Spjd if ((size_t)len < sizeof(msg)) { 990204076Spjd switch (ggio->gctl_cmd) { 991204076Spjd case BIO_READ: 992204076Spjd (void)snprintf(msg + len, sizeof(msg) - len, 993204076Spjd "READ(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 994204076Spjd (uintmax_t)ggio->gctl_length); 995204076Spjd break; 996204076Spjd case BIO_DELETE: 997204076Spjd (void)snprintf(msg + len, sizeof(msg) - len, 998204076Spjd "DELETE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 999204076Spjd (uintmax_t)ggio->gctl_length); 1000204076Spjd break; 1001204076Spjd case BIO_FLUSH: 1002204076Spjd (void)snprintf(msg + len, sizeof(msg) - len, "FLUSH."); 1003204076Spjd break; 1004204076Spjd case BIO_WRITE: 1005204076Spjd (void)snprintf(msg + len, sizeof(msg) - len, 1006204076Spjd "WRITE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 1007204076Spjd (uintmax_t)ggio->gctl_length); 1008204076Spjd break; 1009204076Spjd default: 1010204076Spjd (void)snprintf(msg + len, sizeof(msg) - len, 1011204076Spjd "UNKNOWN(%u).", (unsigned int)ggio->gctl_cmd); 1012204076Spjd break; 1013204076Spjd } 1014204076Spjd } 1015204076Spjd pjdlog_common(loglevel, debuglevel, -1, "%s", msg); 1016204076Spjd} 1017204076Spjd 1018204076Spjdstatic void 1019204076Spjdremote_close(struct hast_resource *res, int ncomp) 1020204076Spjd{ 1021204076Spjd 1022204076Spjd rw_wlock(&hio_remote_lock[ncomp]); 1023204076Spjd /* 1024226855Spjd * Check for a race between dropping rlock and acquiring wlock - 1025204076Spjd * another thread can close connection in-between. 1026204076Spjd */ 1027204076Spjd if (!ISCONNECTED(res, ncomp)) { 1028218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 1029218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 1030204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1031204076Spjd return; 1032204076Spjd } 1033204076Spjd 1034218138Spjd PJDLOG_ASSERT(res->hr_remotein != NULL); 1035218138Spjd PJDLOG_ASSERT(res->hr_remoteout != NULL); 1036204076Spjd 1037211881Spjd pjdlog_debug(2, "Closing incoming connection to %s.", 1038204076Spjd res->hr_remoteaddr); 1039204076Spjd proto_close(res->hr_remotein); 1040204076Spjd res->hr_remotein = NULL; 1041211881Spjd pjdlog_debug(2, "Closing outgoing connection to %s.", 1042204076Spjd res->hr_remoteaddr); 1043204076Spjd proto_close(res->hr_remoteout); 1044204076Spjd res->hr_remoteout = NULL; 1045204076Spjd 1046204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1047204076Spjd 1048211881Spjd pjdlog_warning("Disconnected from %s.", res->hr_remoteaddr); 1049211881Spjd 1050204076Spjd /* 1051204076Spjd * Stop synchronization if in-progress. 1052204076Spjd */ 1053211878Spjd sync_stop(); 1054211984Spjd 1055212038Spjd event_send(res, EVENT_DISCONNECT); 1056204076Spjd} 1057204076Spjd 1058204076Spjd/* 1059204076Spjd * Thread receives ggate I/O requests from the kernel and passes them to 1060204076Spjd * appropriate threads: 1061204076Spjd * WRITE - always goes to both local_send and remote_send threads 1062204076Spjd * READ (when the block is up-to-date on local component) - 1063204076Spjd * only local_send thread 1064204076Spjd * READ (when the block isn't up-to-date on local component) - 1065204076Spjd * only remote_send thread 1066204076Spjd * DELETE - always goes to both local_send and remote_send threads 1067204076Spjd * FLUSH - always goes to both local_send and remote_send threads 1068204076Spjd */ 1069204076Spjdstatic void * 1070204076Spjdggate_recv_thread(void *arg) 1071204076Spjd{ 1072204076Spjd struct hast_resource *res = arg; 1073204076Spjd struct g_gate_ctl_io *ggio; 1074204076Spjd struct hio *hio; 1075204076Spjd unsigned int ii, ncomp, ncomps; 1076204076Spjd int error; 1077204076Spjd 1078204076Spjd ncomps = HAST_NCOMPONENTS; 1079204076Spjd 1080204076Spjd for (;;) { 1081204076Spjd pjdlog_debug(2, "ggate_recv: Taking free request."); 1082204076Spjd QUEUE_TAKE2(hio, free); 1083204076Spjd pjdlog_debug(2, "ggate_recv: (%p) Got free request.", hio); 1084204076Spjd ggio = &hio->hio_ggio; 1085204076Spjd ggio->gctl_unit = res->hr_ggateunit; 1086204076Spjd ggio->gctl_length = MAXPHYS; 1087204076Spjd ggio->gctl_error = 0; 1088204076Spjd pjdlog_debug(2, 1089204076Spjd "ggate_recv: (%p) Waiting for request from the kernel.", 1090204076Spjd hio); 1091204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_START, ggio) < 0) { 1092204076Spjd if (sigexit_received) 1093204076Spjd pthread_exit(NULL); 1094204076Spjd primary_exit(EX_OSERR, "G_GATE_CMD_START failed"); 1095204076Spjd } 1096204076Spjd error = ggio->gctl_error; 1097204076Spjd switch (error) { 1098204076Spjd case 0: 1099204076Spjd break; 1100204076Spjd case ECANCELED: 1101204076Spjd /* Exit gracefully. */ 1102204076Spjd if (!sigexit_received) { 1103204076Spjd pjdlog_debug(2, 1104204076Spjd "ggate_recv: (%p) Received cancel from the kernel.", 1105204076Spjd hio); 1106204076Spjd pjdlog_info("Received cancel from the kernel, exiting."); 1107204076Spjd } 1108204076Spjd pthread_exit(NULL); 1109204076Spjd case ENOMEM: 1110204076Spjd /* 1111204076Spjd * Buffer too small? Impossible, we allocate MAXPHYS 1112204076Spjd * bytes - request can't be bigger than that. 1113204076Spjd */ 1114204076Spjd /* FALLTHROUGH */ 1115204076Spjd case ENXIO: 1116204076Spjd default: 1117204076Spjd primary_exitx(EX_OSERR, "G_GATE_CMD_START failed: %s.", 1118204076Spjd strerror(error)); 1119204076Spjd } 1120204076Spjd for (ii = 0; ii < ncomps; ii++) 1121204076Spjd hio->hio_errors[ii] = EINVAL; 1122204076Spjd reqlog(LOG_DEBUG, 2, ggio, 1123204076Spjd "ggate_recv: (%p) Request received from the kernel: ", 1124204076Spjd hio); 1125204076Spjd /* 1126204076Spjd * Inform all components about new write request. 1127204076Spjd * For read request prefer local component unless the given 1128204076Spjd * range is out-of-date, then use remote component. 1129204076Spjd */ 1130204076Spjd switch (ggio->gctl_cmd) { 1131204076Spjd case BIO_READ: 1132222228Spjd res->hr_stat_read++; 1133204076Spjd pjdlog_debug(2, 1134204076Spjd "ggate_recv: (%p) Moving request to the send queue.", 1135204076Spjd hio); 1136204076Spjd refcount_init(&hio->hio_countdown, 1); 1137204076Spjd mtx_lock(&metadata_lock); 1138204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_UNDEF || 1139204076Spjd res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1140204076Spjd /* 1141204076Spjd * This range is up-to-date on local component, 1142204076Spjd * so handle request locally. 1143204076Spjd */ 1144204076Spjd /* Local component is 0 for now. */ 1145204076Spjd ncomp = 0; 1146204076Spjd } else /* if (res->hr_syncsrc == 1147204076Spjd HAST_SYNCSRC_SECONDARY) */ { 1148218138Spjd PJDLOG_ASSERT(res->hr_syncsrc == 1149204076Spjd HAST_SYNCSRC_SECONDARY); 1150204076Spjd /* 1151204076Spjd * This range is out-of-date on local component, 1152204076Spjd * so send request to the remote node. 1153204076Spjd */ 1154204076Spjd /* Remote component is 1 for now. */ 1155204076Spjd ncomp = 1; 1156204076Spjd } 1157204076Spjd mtx_unlock(&metadata_lock); 1158204076Spjd QUEUE_INSERT1(hio, send, ncomp); 1159204076Spjd break; 1160204076Spjd case BIO_WRITE: 1161222228Spjd res->hr_stat_write++; 1162226851Spjd if (res->hr_resuid == 0 && 1163226851Spjd res->hr_primary_localcnt == 0) { 1164226851Spjd /* This is first write. */ 1165219844Spjd res->hr_primary_localcnt = 1; 1166214284Spjd } 1167204076Spjd for (;;) { 1168204076Spjd mtx_lock(&range_lock); 1169204076Spjd if (rangelock_islocked(range_sync, 1170204076Spjd ggio->gctl_offset, ggio->gctl_length)) { 1171204076Spjd pjdlog_debug(2, 1172204076Spjd "regular: Range offset=%jd length=%zu locked.", 1173204076Spjd (intmax_t)ggio->gctl_offset, 1174204076Spjd (size_t)ggio->gctl_length); 1175204076Spjd range_regular_wait = true; 1176204076Spjd cv_wait(&range_regular_cond, &range_lock); 1177204076Spjd range_regular_wait = false; 1178204076Spjd mtx_unlock(&range_lock); 1179204076Spjd continue; 1180204076Spjd } 1181204076Spjd if (rangelock_add(range_regular, 1182204076Spjd ggio->gctl_offset, ggio->gctl_length) < 0) { 1183204076Spjd mtx_unlock(&range_lock); 1184204076Spjd pjdlog_debug(2, 1185204076Spjd "regular: Range offset=%jd length=%zu is already locked, waiting.", 1186204076Spjd (intmax_t)ggio->gctl_offset, 1187204076Spjd (size_t)ggio->gctl_length); 1188204076Spjd sleep(1); 1189204076Spjd continue; 1190204076Spjd } 1191204076Spjd mtx_unlock(&range_lock); 1192204076Spjd break; 1193204076Spjd } 1194204076Spjd mtx_lock(&res->hr_amp_lock); 1195204076Spjd if (activemap_write_start(res->hr_amp, 1196204076Spjd ggio->gctl_offset, ggio->gctl_length)) { 1197222228Spjd res->hr_stat_activemap_update++; 1198204076Spjd (void)hast_activemap_flush(res); 1199204076Spjd } 1200204076Spjd mtx_unlock(&res->hr_amp_lock); 1201204076Spjd /* FALLTHROUGH */ 1202204076Spjd case BIO_DELETE: 1203204076Spjd case BIO_FLUSH: 1204222228Spjd switch (ggio->gctl_cmd) { 1205222228Spjd case BIO_DELETE: 1206222228Spjd res->hr_stat_delete++; 1207222228Spjd break; 1208222228Spjd case BIO_FLUSH: 1209222228Spjd res->hr_stat_flush++; 1210222228Spjd break; 1211222228Spjd } 1212204076Spjd pjdlog_debug(2, 1213225835Spjd "ggate_recv: (%p) Moving request to the send queue.", 1214204076Spjd hio); 1215204076Spjd refcount_init(&hio->hio_countdown, ncomps); 1216204076Spjd for (ii = 0; ii < ncomps; ii++) 1217204076Spjd QUEUE_INSERT1(hio, send, ii); 1218204076Spjd break; 1219204076Spjd } 1220204076Spjd } 1221204076Spjd /* NOTREACHED */ 1222204076Spjd return (NULL); 1223204076Spjd} 1224204076Spjd 1225204076Spjd/* 1226204076Spjd * Thread reads from or writes to local component. 1227204076Spjd * If local read fails, it redirects it to remote_send thread. 1228204076Spjd */ 1229204076Spjdstatic void * 1230204076Spjdlocal_send_thread(void *arg) 1231204076Spjd{ 1232204076Spjd struct hast_resource *res = arg; 1233204076Spjd struct g_gate_ctl_io *ggio; 1234204076Spjd struct hio *hio; 1235204076Spjd unsigned int ncomp, rncomp; 1236204076Spjd ssize_t ret; 1237204076Spjd 1238204076Spjd /* Local component is 0 for now. */ 1239204076Spjd ncomp = 0; 1240204076Spjd /* Remote component is 1 for now. */ 1241204076Spjd rncomp = 1; 1242204076Spjd 1243204076Spjd for (;;) { 1244204076Spjd pjdlog_debug(2, "local_send: Taking request."); 1245214692Spjd QUEUE_TAKE1(hio, send, ncomp, 0); 1246204076Spjd pjdlog_debug(2, "local_send: (%p) Got request.", hio); 1247204076Spjd ggio = &hio->hio_ggio; 1248204076Spjd switch (ggio->gctl_cmd) { 1249204076Spjd case BIO_READ: 1250204076Spjd ret = pread(res->hr_localfd, ggio->gctl_data, 1251204076Spjd ggio->gctl_length, 1252204076Spjd ggio->gctl_offset + res->hr_localoff); 1253204076Spjd if (ret == ggio->gctl_length) 1254204076Spjd hio->hio_errors[ncomp] = 0; 1255222467Strociny else if (!ISSYNCREQ(hio)) { 1256204076Spjd /* 1257204076Spjd * If READ failed, try to read from remote node. 1258204076Spjd */ 1259216479Spjd if (ret < 0) { 1260216479Spjd reqlog(LOG_WARNING, 0, ggio, 1261216479Spjd "Local request failed (%s), trying remote node. ", 1262216479Spjd strerror(errno)); 1263216479Spjd } else if (ret != ggio->gctl_length) { 1264216479Spjd reqlog(LOG_WARNING, 0, ggio, 1265216479Spjd "Local request failed (%zd != %jd), trying remote node. ", 1266216494Spjd ret, (intmax_t)ggio->gctl_length); 1267216479Spjd } 1268204076Spjd QUEUE_INSERT1(hio, send, rncomp); 1269204076Spjd continue; 1270204076Spjd } 1271204076Spjd break; 1272204076Spjd case BIO_WRITE: 1273204076Spjd ret = pwrite(res->hr_localfd, ggio->gctl_data, 1274204076Spjd ggio->gctl_length, 1275204076Spjd ggio->gctl_offset + res->hr_localoff); 1276216479Spjd if (ret < 0) { 1277204076Spjd hio->hio_errors[ncomp] = errno; 1278216479Spjd reqlog(LOG_WARNING, 0, ggio, 1279216479Spjd "Local request failed (%s): ", 1280216479Spjd strerror(errno)); 1281216479Spjd } else if (ret != ggio->gctl_length) { 1282204076Spjd hio->hio_errors[ncomp] = EIO; 1283216479Spjd reqlog(LOG_WARNING, 0, ggio, 1284216479Spjd "Local request failed (%zd != %jd): ", 1285216494Spjd ret, (intmax_t)ggio->gctl_length); 1286216479Spjd } else { 1287204076Spjd hio->hio_errors[ncomp] = 0; 1288216479Spjd } 1289204076Spjd break; 1290204076Spjd case BIO_DELETE: 1291204076Spjd ret = g_delete(res->hr_localfd, 1292204076Spjd ggio->gctl_offset + res->hr_localoff, 1293204076Spjd ggio->gctl_length); 1294216479Spjd if (ret < 0) { 1295204076Spjd hio->hio_errors[ncomp] = errno; 1296216479Spjd reqlog(LOG_WARNING, 0, ggio, 1297216479Spjd "Local request failed (%s): ", 1298216479Spjd strerror(errno)); 1299216479Spjd } else { 1300204076Spjd hio->hio_errors[ncomp] = 0; 1301216479Spjd } 1302204076Spjd break; 1303204076Spjd case BIO_FLUSH: 1304225832Spjd if (!res->hr_localflush) { 1305225832Spjd ret = -1; 1306225832Spjd errno = EOPNOTSUPP; 1307225832Spjd break; 1308225832Spjd } 1309204076Spjd ret = g_flush(res->hr_localfd); 1310216479Spjd if (ret < 0) { 1311225832Spjd if (errno == EOPNOTSUPP) 1312225832Spjd res->hr_localflush = false; 1313204076Spjd hio->hio_errors[ncomp] = errno; 1314216479Spjd reqlog(LOG_WARNING, 0, ggio, 1315216479Spjd "Local request failed (%s): ", 1316216479Spjd strerror(errno)); 1317216479Spjd } else { 1318204076Spjd hio->hio_errors[ncomp] = 0; 1319216479Spjd } 1320204076Spjd break; 1321204076Spjd } 1322204076Spjd if (refcount_release(&hio->hio_countdown)) { 1323204076Spjd if (ISSYNCREQ(hio)) { 1324204076Spjd mtx_lock(&sync_lock); 1325204076Spjd SYNCREQDONE(hio); 1326204076Spjd mtx_unlock(&sync_lock); 1327204076Spjd cv_signal(&sync_cond); 1328204076Spjd } else { 1329204076Spjd pjdlog_debug(2, 1330204076Spjd "local_send: (%p) Moving request to the done queue.", 1331204076Spjd hio); 1332204076Spjd QUEUE_INSERT2(hio, done); 1333204076Spjd } 1334204076Spjd } 1335204076Spjd } 1336204076Spjd /* NOTREACHED */ 1337204076Spjd return (NULL); 1338204076Spjd} 1339204076Spjd 1340214692Spjdstatic void 1341214692Spjdkeepalive_send(struct hast_resource *res, unsigned int ncomp) 1342214692Spjd{ 1343214692Spjd struct nv *nv; 1344214692Spjd 1345218217Spjd rw_rlock(&hio_remote_lock[ncomp]); 1346218217Spjd 1347218217Spjd if (!ISCONNECTED(res, ncomp)) { 1348218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1349214692Spjd return; 1350218217Spjd } 1351219864Spjd 1352218138Spjd PJDLOG_ASSERT(res->hr_remotein != NULL); 1353218138Spjd PJDLOG_ASSERT(res->hr_remoteout != NULL); 1354214692Spjd 1355214692Spjd nv = nv_alloc(); 1356214692Spjd nv_add_uint8(nv, HIO_KEEPALIVE, "cmd"); 1357214692Spjd if (nv_error(nv) != 0) { 1358218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1359214692Spjd nv_free(nv); 1360214692Spjd pjdlog_debug(1, 1361214692Spjd "keepalive_send: Unable to prepare header to send."); 1362214692Spjd return; 1363214692Spjd } 1364214692Spjd if (hast_proto_send(res, res->hr_remoteout, nv, NULL, 0) < 0) { 1365218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1366214692Spjd pjdlog_common(LOG_DEBUG, 1, errno, 1367214692Spjd "keepalive_send: Unable to send request"); 1368214692Spjd nv_free(nv); 1369214692Spjd remote_close(res, ncomp); 1370214692Spjd return; 1371214692Spjd } 1372218217Spjd 1373218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1374214692Spjd nv_free(nv); 1375214692Spjd pjdlog_debug(2, "keepalive_send: Request sent."); 1376214692Spjd} 1377214692Spjd 1378204076Spjd/* 1379204076Spjd * Thread sends request to secondary node. 1380204076Spjd */ 1381204076Spjdstatic void * 1382204076Spjdremote_send_thread(void *arg) 1383204076Spjd{ 1384204076Spjd struct hast_resource *res = arg; 1385204076Spjd struct g_gate_ctl_io *ggio; 1386214692Spjd time_t lastcheck, now; 1387204076Spjd struct hio *hio; 1388204076Spjd struct nv *nv; 1389204076Spjd unsigned int ncomp; 1390204076Spjd bool wakeup; 1391204076Spjd uint64_t offset, length; 1392204076Spjd uint8_t cmd; 1393204076Spjd void *data; 1394204076Spjd 1395204076Spjd /* Remote component is 1 for now. */ 1396204076Spjd ncomp = 1; 1397219864Spjd lastcheck = time(NULL); 1398204076Spjd 1399204076Spjd for (;;) { 1400204076Spjd pjdlog_debug(2, "remote_send: Taking request."); 1401219721Strociny QUEUE_TAKE1(hio, send, ncomp, HAST_KEEPALIVE); 1402214692Spjd if (hio == NULL) { 1403214692Spjd now = time(NULL); 1404219721Strociny if (lastcheck + HAST_KEEPALIVE <= now) { 1405214692Spjd keepalive_send(res, ncomp); 1406214692Spjd lastcheck = now; 1407214692Spjd } 1408214692Spjd continue; 1409214692Spjd } 1410204076Spjd pjdlog_debug(2, "remote_send: (%p) Got request.", hio); 1411204076Spjd ggio = &hio->hio_ggio; 1412204076Spjd switch (ggio->gctl_cmd) { 1413204076Spjd case BIO_READ: 1414204076Spjd cmd = HIO_READ; 1415204076Spjd data = NULL; 1416204076Spjd offset = ggio->gctl_offset; 1417204076Spjd length = ggio->gctl_length; 1418204076Spjd break; 1419204076Spjd case BIO_WRITE: 1420204076Spjd cmd = HIO_WRITE; 1421204076Spjd data = ggio->gctl_data; 1422204076Spjd offset = ggio->gctl_offset; 1423204076Spjd length = ggio->gctl_length; 1424204076Spjd break; 1425204076Spjd case BIO_DELETE: 1426204076Spjd cmd = HIO_DELETE; 1427204076Spjd data = NULL; 1428204076Spjd offset = ggio->gctl_offset; 1429204076Spjd length = ggio->gctl_length; 1430204076Spjd break; 1431204076Spjd case BIO_FLUSH: 1432204076Spjd cmd = HIO_FLUSH; 1433204076Spjd data = NULL; 1434204076Spjd offset = 0; 1435204076Spjd length = 0; 1436204076Spjd break; 1437204076Spjd default: 1438225783Spjd PJDLOG_ABORT("invalid condition"); 1439204076Spjd } 1440204076Spjd nv = nv_alloc(); 1441204076Spjd nv_add_uint8(nv, cmd, "cmd"); 1442204076Spjd nv_add_uint64(nv, (uint64_t)ggio->gctl_seq, "seq"); 1443204076Spjd nv_add_uint64(nv, offset, "offset"); 1444204076Spjd nv_add_uint64(nv, length, "length"); 1445204076Spjd if (nv_error(nv) != 0) { 1446204076Spjd hio->hio_errors[ncomp] = nv_error(nv); 1447204076Spjd pjdlog_debug(2, 1448204076Spjd "remote_send: (%p) Unable to prepare header to send.", 1449204076Spjd hio); 1450204076Spjd reqlog(LOG_ERR, 0, ggio, 1451204076Spjd "Unable to prepare header to send (%s): ", 1452204076Spjd strerror(nv_error(nv))); 1453204076Spjd /* Move failed request immediately to the done queue. */ 1454204076Spjd goto done_queue; 1455204076Spjd } 1456204076Spjd /* 1457204076Spjd * Protect connection from disappearing. 1458204076Spjd */ 1459204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1460204076Spjd if (!ISCONNECTED(res, ncomp)) { 1461204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1462204076Spjd hio->hio_errors[ncomp] = ENOTCONN; 1463204076Spjd goto done_queue; 1464204076Spjd } 1465204076Spjd /* 1466204076Spjd * Move the request to recv queue before sending it, because 1467204076Spjd * in different order we can get reply before we move request 1468204076Spjd * to recv queue. 1469204076Spjd */ 1470226852Spjd pjdlog_debug(2, 1471226852Spjd "remote_send: (%p) Moving request to the recv queue.", 1472226852Spjd hio); 1473204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1474204076Spjd wakeup = TAILQ_EMPTY(&hio_recv_list[ncomp]); 1475204076Spjd TAILQ_INSERT_TAIL(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1476204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1477204076Spjd if (hast_proto_send(res, res->hr_remoteout, nv, data, 1478204076Spjd data != NULL ? length : 0) < 0) { 1479204076Spjd hio->hio_errors[ncomp] = errno; 1480204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1481204076Spjd pjdlog_debug(2, 1482204076Spjd "remote_send: (%p) Unable to send request.", hio); 1483204076Spjd reqlog(LOG_ERR, 0, ggio, 1484204076Spjd "Unable to send request (%s): ", 1485204076Spjd strerror(hio->hio_errors[ncomp])); 1486211979Spjd remote_close(res, ncomp); 1487204076Spjd /* 1488204076Spjd * Take request back from the receive queue and move 1489204076Spjd * it immediately to the done queue. 1490204076Spjd */ 1491204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1492226852Spjd TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1493226852Spjd hio_next[ncomp]); 1494204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1495204076Spjd goto done_queue; 1496204076Spjd } 1497204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1498204076Spjd nv_free(nv); 1499204076Spjd if (wakeup) 1500204076Spjd cv_signal(&hio_recv_list_cond[ncomp]); 1501204076Spjd continue; 1502204076Spjddone_queue: 1503204076Spjd nv_free(nv); 1504204076Spjd if (ISSYNCREQ(hio)) { 1505204076Spjd if (!refcount_release(&hio->hio_countdown)) 1506204076Spjd continue; 1507204076Spjd mtx_lock(&sync_lock); 1508204076Spjd SYNCREQDONE(hio); 1509204076Spjd mtx_unlock(&sync_lock); 1510204076Spjd cv_signal(&sync_cond); 1511204076Spjd continue; 1512204076Spjd } 1513204076Spjd if (ggio->gctl_cmd == BIO_WRITE) { 1514204076Spjd mtx_lock(&res->hr_amp_lock); 1515204076Spjd if (activemap_need_sync(res->hr_amp, ggio->gctl_offset, 1516204076Spjd ggio->gctl_length)) { 1517204076Spjd (void)hast_activemap_flush(res); 1518204076Spjd } 1519204076Spjd mtx_unlock(&res->hr_amp_lock); 1520204076Spjd } 1521204076Spjd if (!refcount_release(&hio->hio_countdown)) 1522204076Spjd continue; 1523204076Spjd pjdlog_debug(2, 1524204076Spjd "remote_send: (%p) Moving request to the done queue.", 1525204076Spjd hio); 1526204076Spjd QUEUE_INSERT2(hio, done); 1527204076Spjd } 1528204076Spjd /* NOTREACHED */ 1529204076Spjd return (NULL); 1530204076Spjd} 1531204076Spjd 1532204076Spjd/* 1533204076Spjd * Thread receives answer from secondary node and passes it to ggate_send 1534204076Spjd * thread. 1535204076Spjd */ 1536204076Spjdstatic void * 1537204076Spjdremote_recv_thread(void *arg) 1538204076Spjd{ 1539204076Spjd struct hast_resource *res = arg; 1540204076Spjd struct g_gate_ctl_io *ggio; 1541204076Spjd struct hio *hio; 1542204076Spjd struct nv *nv; 1543204076Spjd unsigned int ncomp; 1544204076Spjd uint64_t seq; 1545204076Spjd int error; 1546204076Spjd 1547204076Spjd /* Remote component is 1 for now. */ 1548204076Spjd ncomp = 1; 1549204076Spjd 1550204076Spjd for (;;) { 1551204076Spjd /* Wait until there is anything to receive. */ 1552204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1553204076Spjd while (TAILQ_EMPTY(&hio_recv_list[ncomp])) { 1554204076Spjd pjdlog_debug(2, "remote_recv: No requests, waiting."); 1555204076Spjd cv_wait(&hio_recv_list_cond[ncomp], 1556204076Spjd &hio_recv_list_lock[ncomp]); 1557204076Spjd } 1558204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1559204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1560204076Spjd if (!ISCONNECTED(res, ncomp)) { 1561204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1562204076Spjd /* 1563204076Spjd * Connection is dead, so move all pending requests to 1564204076Spjd * the done queue (one-by-one). 1565204076Spjd */ 1566204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1567204076Spjd hio = TAILQ_FIRST(&hio_recv_list[ncomp]); 1568218138Spjd PJDLOG_ASSERT(hio != NULL); 1569204076Spjd TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1570204076Spjd hio_next[ncomp]); 1571204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1572204076Spjd goto done_queue; 1573204076Spjd } 1574204076Spjd if (hast_proto_recv_hdr(res->hr_remotein, &nv) < 0) { 1575204076Spjd pjdlog_errno(LOG_ERR, 1576204076Spjd "Unable to receive reply header"); 1577204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1578204076Spjd remote_close(res, ncomp); 1579204076Spjd continue; 1580204076Spjd } 1581204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1582204076Spjd seq = nv_get_uint64(nv, "seq"); 1583204076Spjd if (seq == 0) { 1584204076Spjd pjdlog_error("Header contains no 'seq' field."); 1585204076Spjd nv_free(nv); 1586204076Spjd continue; 1587204076Spjd } 1588204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1589204076Spjd TAILQ_FOREACH(hio, &hio_recv_list[ncomp], hio_next[ncomp]) { 1590204076Spjd if (hio->hio_ggio.gctl_seq == seq) { 1591204076Spjd TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1592204076Spjd hio_next[ncomp]); 1593204076Spjd break; 1594204076Spjd } 1595204076Spjd } 1596204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1597204076Spjd if (hio == NULL) { 1598204076Spjd pjdlog_error("Found no request matching received 'seq' field (%ju).", 1599204076Spjd (uintmax_t)seq); 1600204076Spjd nv_free(nv); 1601204076Spjd continue; 1602204076Spjd } 1603226852Spjd ggio = &hio->hio_ggio; 1604204076Spjd error = nv_get_int16(nv, "error"); 1605204076Spjd if (error != 0) { 1606204076Spjd /* Request failed on remote side. */ 1607216478Spjd hio->hio_errors[ncomp] = error; 1608226852Spjd reqlog(LOG_WARNING, 0, ggio, 1609216479Spjd "Remote request failed (%s): ", strerror(error)); 1610204076Spjd nv_free(nv); 1611204076Spjd goto done_queue; 1612204076Spjd } 1613204076Spjd switch (ggio->gctl_cmd) { 1614204076Spjd case BIO_READ: 1615204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1616204076Spjd if (!ISCONNECTED(res, ncomp)) { 1617204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1618204076Spjd nv_free(nv); 1619204076Spjd goto done_queue; 1620204076Spjd } 1621204076Spjd if (hast_proto_recv_data(res, res->hr_remotein, nv, 1622204076Spjd ggio->gctl_data, ggio->gctl_length) < 0) { 1623204076Spjd hio->hio_errors[ncomp] = errno; 1624204076Spjd pjdlog_errno(LOG_ERR, 1625204076Spjd "Unable to receive reply data"); 1626204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1627204076Spjd nv_free(nv); 1628204076Spjd remote_close(res, ncomp); 1629204076Spjd goto done_queue; 1630204076Spjd } 1631204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1632204076Spjd break; 1633204076Spjd case BIO_WRITE: 1634204076Spjd case BIO_DELETE: 1635204076Spjd case BIO_FLUSH: 1636204076Spjd break; 1637204076Spjd default: 1638225783Spjd PJDLOG_ABORT("invalid condition"); 1639204076Spjd } 1640204076Spjd hio->hio_errors[ncomp] = 0; 1641204076Spjd nv_free(nv); 1642204076Spjddone_queue: 1643204076Spjd if (refcount_release(&hio->hio_countdown)) { 1644204076Spjd if (ISSYNCREQ(hio)) { 1645204076Spjd mtx_lock(&sync_lock); 1646204076Spjd SYNCREQDONE(hio); 1647204076Spjd mtx_unlock(&sync_lock); 1648204076Spjd cv_signal(&sync_cond); 1649204076Spjd } else { 1650204076Spjd pjdlog_debug(2, 1651204076Spjd "remote_recv: (%p) Moving request to the done queue.", 1652204076Spjd hio); 1653204076Spjd QUEUE_INSERT2(hio, done); 1654204076Spjd } 1655204076Spjd } 1656204076Spjd } 1657204076Spjd /* NOTREACHED */ 1658204076Spjd return (NULL); 1659204076Spjd} 1660204076Spjd 1661204076Spjd/* 1662204076Spjd * Thread sends answer to the kernel. 1663204076Spjd */ 1664204076Spjdstatic void * 1665204076Spjdggate_send_thread(void *arg) 1666204076Spjd{ 1667204076Spjd struct hast_resource *res = arg; 1668204076Spjd struct g_gate_ctl_io *ggio; 1669204076Spjd struct hio *hio; 1670204076Spjd unsigned int ii, ncomp, ncomps; 1671204076Spjd 1672204076Spjd ncomps = HAST_NCOMPONENTS; 1673204076Spjd 1674204076Spjd for (;;) { 1675204076Spjd pjdlog_debug(2, "ggate_send: Taking request."); 1676204076Spjd QUEUE_TAKE2(hio, done); 1677204076Spjd pjdlog_debug(2, "ggate_send: (%p) Got request.", hio); 1678204076Spjd ggio = &hio->hio_ggio; 1679204076Spjd for (ii = 0; ii < ncomps; ii++) { 1680204076Spjd if (hio->hio_errors[ii] == 0) { 1681204076Spjd /* 1682204076Spjd * One successful request is enough to declare 1683204076Spjd * success. 1684204076Spjd */ 1685204076Spjd ggio->gctl_error = 0; 1686204076Spjd break; 1687204076Spjd } 1688204076Spjd } 1689204076Spjd if (ii == ncomps) { 1690204076Spjd /* 1691204076Spjd * None of the requests were successful. 1692219879Strociny * Use the error from local component except the 1693219879Strociny * case when we did only remote request. 1694204076Spjd */ 1695219879Strociny if (ggio->gctl_cmd == BIO_READ && 1696219879Strociny res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) 1697219879Strociny ggio->gctl_error = hio->hio_errors[1]; 1698219879Strociny else 1699219879Strociny ggio->gctl_error = hio->hio_errors[0]; 1700204076Spjd } 1701204076Spjd if (ggio->gctl_error == 0 && ggio->gctl_cmd == BIO_WRITE) { 1702204076Spjd mtx_lock(&res->hr_amp_lock); 1703223655Strociny if (activemap_write_complete(res->hr_amp, 1704223974Strociny ggio->gctl_offset, ggio->gctl_length)) { 1705223655Strociny res->hr_stat_activemap_update++; 1706223655Strociny (void)hast_activemap_flush(res); 1707223655Strociny } 1708204076Spjd mtx_unlock(&res->hr_amp_lock); 1709204076Spjd } 1710204076Spjd if (ggio->gctl_cmd == BIO_WRITE) { 1711204076Spjd /* 1712204076Spjd * Unlock range we locked. 1713204076Spjd */ 1714204076Spjd mtx_lock(&range_lock); 1715204076Spjd rangelock_del(range_regular, ggio->gctl_offset, 1716204076Spjd ggio->gctl_length); 1717204076Spjd if (range_sync_wait) 1718204076Spjd cv_signal(&range_sync_cond); 1719204076Spjd mtx_unlock(&range_lock); 1720204076Spjd /* 1721204076Spjd * Bump local count if this is first write after 1722204076Spjd * connection failure with remote node. 1723204076Spjd */ 1724204076Spjd ncomp = 1; 1725204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1726204076Spjd if (!ISCONNECTED(res, ncomp)) { 1727204076Spjd mtx_lock(&metadata_lock); 1728204076Spjd if (res->hr_primary_localcnt == 1729204076Spjd res->hr_secondary_remotecnt) { 1730204076Spjd res->hr_primary_localcnt++; 1731204076Spjd pjdlog_debug(1, 1732204076Spjd "Increasing localcnt to %ju.", 1733204076Spjd (uintmax_t)res->hr_primary_localcnt); 1734204076Spjd (void)metadata_write(res); 1735204076Spjd } 1736204076Spjd mtx_unlock(&metadata_lock); 1737204076Spjd } 1738204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1739204076Spjd } 1740204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_DONE, ggio) < 0) 1741204076Spjd primary_exit(EX_OSERR, "G_GATE_CMD_DONE failed"); 1742204076Spjd pjdlog_debug(2, 1743204076Spjd "ggate_send: (%p) Moving request to the free queue.", hio); 1744204076Spjd QUEUE_INSERT2(hio, free); 1745204076Spjd } 1746204076Spjd /* NOTREACHED */ 1747204076Spjd return (NULL); 1748204076Spjd} 1749204076Spjd 1750204076Spjd/* 1751204076Spjd * Thread synchronize local and remote components. 1752204076Spjd */ 1753204076Spjdstatic void * 1754204076Spjdsync_thread(void *arg __unused) 1755204076Spjd{ 1756204076Spjd struct hast_resource *res = arg; 1757204076Spjd struct hio *hio; 1758204076Spjd struct g_gate_ctl_io *ggio; 1759219372Spjd struct timeval tstart, tend, tdiff; 1760204076Spjd unsigned int ii, ncomp, ncomps; 1761204076Spjd off_t offset, length, synced; 1762204076Spjd bool dorewind; 1763204076Spjd int syncext; 1764204076Spjd 1765204076Spjd ncomps = HAST_NCOMPONENTS; 1766204076Spjd dorewind = true; 1767211897Spjd synced = 0; 1768211897Spjd offset = -1; 1769204076Spjd 1770204076Spjd for (;;) { 1771204076Spjd mtx_lock(&sync_lock); 1772211897Spjd if (offset >= 0 && !sync_inprogress) { 1773219372Spjd gettimeofday(&tend, NULL); 1774219372Spjd timersub(&tend, &tstart, &tdiff); 1775219372Spjd pjdlog_info("Synchronization interrupted after %#.0T. " 1776219372Spjd "%NB synchronized so far.", &tdiff, 1777211879Spjd (intmax_t)synced); 1778212038Spjd event_send(res, EVENT_SYNCINTR); 1779211879Spjd } 1780204076Spjd while (!sync_inprogress) { 1781204076Spjd dorewind = true; 1782204076Spjd synced = 0; 1783204076Spjd cv_wait(&sync_cond, &sync_lock); 1784204076Spjd } 1785204076Spjd mtx_unlock(&sync_lock); 1786204076Spjd /* 1787204076Spjd * Obtain offset at which we should synchronize. 1788204076Spjd * Rewind synchronization if needed. 1789204076Spjd */ 1790204076Spjd mtx_lock(&res->hr_amp_lock); 1791204076Spjd if (dorewind) 1792204076Spjd activemap_sync_rewind(res->hr_amp); 1793204076Spjd offset = activemap_sync_offset(res->hr_amp, &length, &syncext); 1794204076Spjd if (syncext != -1) { 1795204076Spjd /* 1796204076Spjd * We synchronized entire syncext extent, we can mark 1797204076Spjd * it as clean now. 1798204076Spjd */ 1799204076Spjd if (activemap_extent_complete(res->hr_amp, syncext)) 1800204076Spjd (void)hast_activemap_flush(res); 1801204076Spjd } 1802204076Spjd mtx_unlock(&res->hr_amp_lock); 1803204076Spjd if (dorewind) { 1804204076Spjd dorewind = false; 1805204076Spjd if (offset < 0) 1806204076Spjd pjdlog_info("Nodes are in sync."); 1807204076Spjd else { 1808219372Spjd pjdlog_info("Synchronization started. %NB to go.", 1809219372Spjd (intmax_t)(res->hr_extentsize * 1810204076Spjd activemap_ndirty(res->hr_amp))); 1811212038Spjd event_send(res, EVENT_SYNCSTART); 1812219372Spjd gettimeofday(&tstart, NULL); 1813204076Spjd } 1814204076Spjd } 1815204076Spjd if (offset < 0) { 1816211878Spjd sync_stop(); 1817204076Spjd pjdlog_debug(1, "Nothing to synchronize."); 1818204076Spjd /* 1819204076Spjd * Synchronization complete, make both localcnt and 1820204076Spjd * remotecnt equal. 1821204076Spjd */ 1822204076Spjd ncomp = 1; 1823204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1824204076Spjd if (ISCONNECTED(res, ncomp)) { 1825204076Spjd if (synced > 0) { 1826219372Spjd int64_t bps; 1827219372Spjd 1828219372Spjd gettimeofday(&tend, NULL); 1829219372Spjd timersub(&tend, &tstart, &tdiff); 1830219372Spjd bps = (int64_t)((double)synced / 1831219372Spjd ((double)tdiff.tv_sec + 1832219372Spjd (double)tdiff.tv_usec / 1000000)); 1833204076Spjd pjdlog_info("Synchronization complete. " 1834219372Spjd "%NB synchronized in %#.0lT (%NB/sec).", 1835219372Spjd (intmax_t)synced, &tdiff, 1836219372Spjd (intmax_t)bps); 1837212038Spjd event_send(res, EVENT_SYNCDONE); 1838204076Spjd } 1839204076Spjd mtx_lock(&metadata_lock); 1840204076Spjd res->hr_syncsrc = HAST_SYNCSRC_UNDEF; 1841204076Spjd res->hr_primary_localcnt = 1842219882Strociny res->hr_secondary_remotecnt; 1843219882Strociny res->hr_primary_remotecnt = 1844204076Spjd res->hr_secondary_localcnt; 1845204076Spjd pjdlog_debug(1, 1846204076Spjd "Setting localcnt to %ju and remotecnt to %ju.", 1847204076Spjd (uintmax_t)res->hr_primary_localcnt, 1848219882Strociny (uintmax_t)res->hr_primary_remotecnt); 1849204076Spjd (void)metadata_write(res); 1850204076Spjd mtx_unlock(&metadata_lock); 1851204076Spjd } 1852204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1853204076Spjd continue; 1854204076Spjd } 1855204076Spjd pjdlog_debug(2, "sync: Taking free request."); 1856204076Spjd QUEUE_TAKE2(hio, free); 1857204076Spjd pjdlog_debug(2, "sync: (%p) Got free request.", hio); 1858204076Spjd /* 1859204076Spjd * Lock the range we are going to synchronize. We don't want 1860204076Spjd * race where someone writes between our read and write. 1861204076Spjd */ 1862204076Spjd for (;;) { 1863204076Spjd mtx_lock(&range_lock); 1864204076Spjd if (rangelock_islocked(range_regular, offset, length)) { 1865204076Spjd pjdlog_debug(2, 1866204076Spjd "sync: Range offset=%jd length=%jd locked.", 1867204076Spjd (intmax_t)offset, (intmax_t)length); 1868204076Spjd range_sync_wait = true; 1869204076Spjd cv_wait(&range_sync_cond, &range_lock); 1870204076Spjd range_sync_wait = false; 1871204076Spjd mtx_unlock(&range_lock); 1872204076Spjd continue; 1873204076Spjd } 1874204076Spjd if (rangelock_add(range_sync, offset, length) < 0) { 1875204076Spjd mtx_unlock(&range_lock); 1876204076Spjd pjdlog_debug(2, 1877204076Spjd "sync: Range offset=%jd length=%jd is already locked, waiting.", 1878204076Spjd (intmax_t)offset, (intmax_t)length); 1879204076Spjd sleep(1); 1880204076Spjd continue; 1881204076Spjd } 1882204076Spjd mtx_unlock(&range_lock); 1883204076Spjd break; 1884204076Spjd } 1885204076Spjd /* 1886204076Spjd * First read the data from synchronization source. 1887204076Spjd */ 1888204076Spjd SYNCREQ(hio); 1889204076Spjd ggio = &hio->hio_ggio; 1890204076Spjd ggio->gctl_cmd = BIO_READ; 1891204076Spjd ggio->gctl_offset = offset; 1892204076Spjd ggio->gctl_length = length; 1893204076Spjd ggio->gctl_error = 0; 1894204076Spjd for (ii = 0; ii < ncomps; ii++) 1895204076Spjd hio->hio_errors[ii] = EINVAL; 1896204076Spjd reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 1897204076Spjd hio); 1898204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 1899204076Spjd hio); 1900204076Spjd mtx_lock(&metadata_lock); 1901204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1902204076Spjd /* 1903204076Spjd * This range is up-to-date on local component, 1904204076Spjd * so handle request locally. 1905204076Spjd */ 1906204076Spjd /* Local component is 0 for now. */ 1907204076Spjd ncomp = 0; 1908204076Spjd } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 1909218138Spjd PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 1910204076Spjd /* 1911204076Spjd * This range is out-of-date on local component, 1912204076Spjd * so send request to the remote node. 1913204076Spjd */ 1914204076Spjd /* Remote component is 1 for now. */ 1915204076Spjd ncomp = 1; 1916204076Spjd } 1917204076Spjd mtx_unlock(&metadata_lock); 1918204076Spjd refcount_init(&hio->hio_countdown, 1); 1919204076Spjd QUEUE_INSERT1(hio, send, ncomp); 1920204076Spjd 1921204076Spjd /* 1922204076Spjd * Let's wait for READ to finish. 1923204076Spjd */ 1924204076Spjd mtx_lock(&sync_lock); 1925204076Spjd while (!ISSYNCREQDONE(hio)) 1926204076Spjd cv_wait(&sync_cond, &sync_lock); 1927204076Spjd mtx_unlock(&sync_lock); 1928204076Spjd 1929204076Spjd if (hio->hio_errors[ncomp] != 0) { 1930204076Spjd pjdlog_error("Unable to read synchronization data: %s.", 1931204076Spjd strerror(hio->hio_errors[ncomp])); 1932204076Spjd goto free_queue; 1933204076Spjd } 1934204076Spjd 1935204076Spjd /* 1936204076Spjd * We read the data from synchronization source, now write it 1937204076Spjd * to synchronization target. 1938204076Spjd */ 1939204076Spjd SYNCREQ(hio); 1940204076Spjd ggio->gctl_cmd = BIO_WRITE; 1941204076Spjd for (ii = 0; ii < ncomps; ii++) 1942204076Spjd hio->hio_errors[ii] = EINVAL; 1943204076Spjd reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 1944204076Spjd hio); 1945204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 1946204076Spjd hio); 1947204076Spjd mtx_lock(&metadata_lock); 1948204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1949204076Spjd /* 1950204076Spjd * This range is up-to-date on local component, 1951204076Spjd * so we update remote component. 1952204076Spjd */ 1953204076Spjd /* Remote component is 1 for now. */ 1954204076Spjd ncomp = 1; 1955204076Spjd } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 1956218138Spjd PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 1957204076Spjd /* 1958204076Spjd * This range is out-of-date on local component, 1959204076Spjd * so we update it. 1960204076Spjd */ 1961204076Spjd /* Local component is 0 for now. */ 1962204076Spjd ncomp = 0; 1963204076Spjd } 1964204076Spjd mtx_unlock(&metadata_lock); 1965204076Spjd 1966204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the send queues.", 1967204076Spjd hio); 1968204076Spjd refcount_init(&hio->hio_countdown, 1); 1969204076Spjd QUEUE_INSERT1(hio, send, ncomp); 1970204076Spjd 1971204076Spjd /* 1972204076Spjd * Let's wait for WRITE to finish. 1973204076Spjd */ 1974204076Spjd mtx_lock(&sync_lock); 1975204076Spjd while (!ISSYNCREQDONE(hio)) 1976204076Spjd cv_wait(&sync_cond, &sync_lock); 1977204076Spjd mtx_unlock(&sync_lock); 1978204076Spjd 1979204076Spjd if (hio->hio_errors[ncomp] != 0) { 1980204076Spjd pjdlog_error("Unable to write synchronization data: %s.", 1981204076Spjd strerror(hio->hio_errors[ncomp])); 1982204076Spjd goto free_queue; 1983204076Spjd } 1984211880Spjd 1985211880Spjd synced += length; 1986204076Spjdfree_queue: 1987204076Spjd mtx_lock(&range_lock); 1988204076Spjd rangelock_del(range_sync, offset, length); 1989204076Spjd if (range_regular_wait) 1990204076Spjd cv_signal(&range_regular_cond); 1991204076Spjd mtx_unlock(&range_lock); 1992204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the free queue.", 1993204076Spjd hio); 1994204076Spjd QUEUE_INSERT2(hio, free); 1995204076Spjd } 1996204076Spjd /* NOTREACHED */ 1997204076Spjd return (NULL); 1998204076Spjd} 1999204076Spjd 2000217784Spjdvoid 2001217784Spjdprimary_config_reload(struct hast_resource *res, struct nv *nv) 2002210886Spjd{ 2003210886Spjd unsigned int ii, ncomps; 2004217784Spjd int modified, vint; 2005217784Spjd const char *vstr; 2006210886Spjd 2007210886Spjd pjdlog_info("Reloading configuration..."); 2008210886Spjd 2009218138Spjd PJDLOG_ASSERT(res->hr_role == HAST_ROLE_PRIMARY); 2010218138Spjd PJDLOG_ASSERT(gres == res); 2011217784Spjd nv_assert(nv, "remoteaddr"); 2012219818Spjd nv_assert(nv, "sourceaddr"); 2013217784Spjd nv_assert(nv, "replication"); 2014219351Spjd nv_assert(nv, "checksum"); 2015219354Spjd nv_assert(nv, "compression"); 2016217784Spjd nv_assert(nv, "timeout"); 2017217784Spjd nv_assert(nv, "exec"); 2018225830Spjd nv_assert(nv, "metaflush"); 2019217784Spjd 2020210886Spjd ncomps = HAST_NCOMPONENTS; 2021210886Spjd 2022219351Spjd#define MODIFIED_REMOTEADDR 0x01 2023219818Spjd#define MODIFIED_SOURCEADDR 0x02 2024219818Spjd#define MODIFIED_REPLICATION 0x04 2025219818Spjd#define MODIFIED_CHECKSUM 0x08 2026219818Spjd#define MODIFIED_COMPRESSION 0x10 2027219818Spjd#define MODIFIED_TIMEOUT 0x20 2028219818Spjd#define MODIFIED_EXEC 0x40 2029225830Spjd#define MODIFIED_METAFLUSH 0x80 2030210886Spjd modified = 0; 2031217784Spjd 2032217784Spjd vstr = nv_get_string(nv, "remoteaddr"); 2033217784Spjd if (strcmp(gres->hr_remoteaddr, vstr) != 0) { 2034210886Spjd /* 2035210886Spjd * Don't copy res->hr_remoteaddr to gres just yet. 2036210886Spjd * We want remote_close() to log disconnect from the old 2037210886Spjd * addresses, not from the new ones. 2038210886Spjd */ 2039210886Spjd modified |= MODIFIED_REMOTEADDR; 2040210886Spjd } 2041219818Spjd vstr = nv_get_string(nv, "sourceaddr"); 2042219818Spjd if (strcmp(gres->hr_sourceaddr, vstr) != 0) { 2043219818Spjd strlcpy(gres->hr_sourceaddr, vstr, sizeof(gres->hr_sourceaddr)); 2044219818Spjd modified |= MODIFIED_SOURCEADDR; 2045219818Spjd } 2046217784Spjd vint = nv_get_int32(nv, "replication"); 2047217784Spjd if (gres->hr_replication != vint) { 2048217784Spjd gres->hr_replication = vint; 2049210886Spjd modified |= MODIFIED_REPLICATION; 2050210886Spjd } 2051219351Spjd vint = nv_get_int32(nv, "checksum"); 2052219351Spjd if (gres->hr_checksum != vint) { 2053219351Spjd gres->hr_checksum = vint; 2054219351Spjd modified |= MODIFIED_CHECKSUM; 2055219351Spjd } 2056219354Spjd vint = nv_get_int32(nv, "compression"); 2057219354Spjd if (gres->hr_compression != vint) { 2058219354Spjd gres->hr_compression = vint; 2059219354Spjd modified |= MODIFIED_COMPRESSION; 2060219354Spjd } 2061217784Spjd vint = nv_get_int32(nv, "timeout"); 2062217784Spjd if (gres->hr_timeout != vint) { 2063217784Spjd gres->hr_timeout = vint; 2064210886Spjd modified |= MODIFIED_TIMEOUT; 2065210886Spjd } 2066217784Spjd vstr = nv_get_string(nv, "exec"); 2067217784Spjd if (strcmp(gres->hr_exec, vstr) != 0) { 2068217784Spjd strlcpy(gres->hr_exec, vstr, sizeof(gres->hr_exec)); 2069211886Spjd modified |= MODIFIED_EXEC; 2070211886Spjd } 2071225830Spjd vint = nv_get_int32(nv, "metaflush"); 2072225830Spjd if (gres->hr_metaflush != vint) { 2073225830Spjd gres->hr_metaflush = vint; 2074225830Spjd modified |= MODIFIED_METAFLUSH; 2075225830Spjd } 2076217784Spjd 2077210886Spjd /* 2078219351Spjd * Change timeout for connected sockets. 2079219351Spjd * Don't bother if we need to reconnect. 2080210886Spjd */ 2081219351Spjd if ((modified & MODIFIED_TIMEOUT) != 0 && 2082219818Spjd (modified & (MODIFIED_REMOTEADDR | MODIFIED_SOURCEADDR | 2083219818Spjd MODIFIED_REPLICATION)) == 0) { 2084210886Spjd for (ii = 0; ii < ncomps; ii++) { 2085210886Spjd if (!ISREMOTE(ii)) 2086210886Spjd continue; 2087210886Spjd rw_rlock(&hio_remote_lock[ii]); 2088210886Spjd if (!ISCONNECTED(gres, ii)) { 2089210886Spjd rw_unlock(&hio_remote_lock[ii]); 2090210886Spjd continue; 2091210886Spjd } 2092210886Spjd rw_unlock(&hio_remote_lock[ii]); 2093210886Spjd if (proto_timeout(gres->hr_remotein, 2094210886Spjd gres->hr_timeout) < 0) { 2095210886Spjd pjdlog_errno(LOG_WARNING, 2096210886Spjd "Unable to set connection timeout"); 2097210886Spjd } 2098210886Spjd if (proto_timeout(gres->hr_remoteout, 2099210886Spjd gres->hr_timeout) < 0) { 2100210886Spjd pjdlog_errno(LOG_WARNING, 2101210886Spjd "Unable to set connection timeout"); 2102210886Spjd } 2103210886Spjd } 2104219351Spjd } 2105219818Spjd if ((modified & (MODIFIED_REMOTEADDR | MODIFIED_SOURCEADDR | 2106219818Spjd MODIFIED_REPLICATION)) != 0) { 2107210886Spjd for (ii = 0; ii < ncomps; ii++) { 2108210886Spjd if (!ISREMOTE(ii)) 2109210886Spjd continue; 2110210886Spjd remote_close(gres, ii); 2111210886Spjd } 2112210886Spjd if (modified & MODIFIED_REMOTEADDR) { 2113217784Spjd vstr = nv_get_string(nv, "remoteaddr"); 2114217784Spjd strlcpy(gres->hr_remoteaddr, vstr, 2115210886Spjd sizeof(gres->hr_remoteaddr)); 2116210886Spjd } 2117210886Spjd } 2118210886Spjd#undef MODIFIED_REMOTEADDR 2119219818Spjd#undef MODIFIED_SOURCEADDR 2120210886Spjd#undef MODIFIED_REPLICATION 2121219351Spjd#undef MODIFIED_CHECKSUM 2122219354Spjd#undef MODIFIED_COMPRESSION 2123210886Spjd#undef MODIFIED_TIMEOUT 2124211886Spjd#undef MODIFIED_EXEC 2125225830Spjd#undef MODIFIED_METAFLUSH 2126210886Spjd 2127210886Spjd pjdlog_info("Configuration reloaded successfully."); 2128210886Spjd} 2129210886Spjd 2130211882Spjdstatic void 2131211981Spjdguard_one(struct hast_resource *res, unsigned int ncomp) 2132211981Spjd{ 2133211981Spjd struct proto_conn *in, *out; 2134211981Spjd 2135211981Spjd if (!ISREMOTE(ncomp)) 2136211981Spjd return; 2137211981Spjd 2138211981Spjd rw_rlock(&hio_remote_lock[ncomp]); 2139211981Spjd 2140211981Spjd if (!real_remote(res)) { 2141211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2142211981Spjd return; 2143211981Spjd } 2144211981Spjd 2145211981Spjd if (ISCONNECTED(res, ncomp)) { 2146218138Spjd PJDLOG_ASSERT(res->hr_remotein != NULL); 2147218138Spjd PJDLOG_ASSERT(res->hr_remoteout != NULL); 2148211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2149211981Spjd pjdlog_debug(2, "remote_guard: Connection to %s is ok.", 2150211981Spjd res->hr_remoteaddr); 2151211981Spjd return; 2152211981Spjd } 2153211981Spjd 2154218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 2155218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 2156211981Spjd /* 2157211981Spjd * Upgrade the lock. It doesn't have to be atomic as no other thread 2158211981Spjd * can change connection status from disconnected to connected. 2159211981Spjd */ 2160211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2161211981Spjd pjdlog_debug(2, "remote_guard: Reconnecting to %s.", 2162211981Spjd res->hr_remoteaddr); 2163211981Spjd in = out = NULL; 2164220898Spjd if (init_remote(res, &in, &out) == 0) { 2165211981Spjd rw_wlock(&hio_remote_lock[ncomp]); 2166218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 2167218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 2168218138Spjd PJDLOG_ASSERT(in != NULL && out != NULL); 2169211981Spjd res->hr_remotein = in; 2170211981Spjd res->hr_remoteout = out; 2171211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2172211981Spjd pjdlog_info("Successfully reconnected to %s.", 2173211981Spjd res->hr_remoteaddr); 2174211981Spjd sync_start(); 2175211981Spjd } else { 2176211981Spjd /* Both connections should be NULL. */ 2177218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 2178218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 2179218138Spjd PJDLOG_ASSERT(in == NULL && out == NULL); 2180211981Spjd pjdlog_debug(2, "remote_guard: Reconnect to %s failed.", 2181211981Spjd res->hr_remoteaddr); 2182211981Spjd } 2183211981Spjd} 2184211981Spjd 2185204076Spjd/* 2186204076Spjd * Thread guards remote connections and reconnects when needed, handles 2187204076Spjd * signals, etc. 2188204076Spjd */ 2189204076Spjdstatic void * 2190204076Spjdguard_thread(void *arg) 2191204076Spjd{ 2192204076Spjd struct hast_resource *res = arg; 2193204076Spjd unsigned int ii, ncomps; 2194211982Spjd struct timespec timeout; 2195211981Spjd time_t lastcheck, now; 2196211982Spjd sigset_t mask; 2197211982Spjd int signo; 2198204076Spjd 2199204076Spjd ncomps = HAST_NCOMPONENTS; 2200211981Spjd lastcheck = time(NULL); 2201204076Spjd 2202211982Spjd PJDLOG_VERIFY(sigemptyset(&mask) == 0); 2203211982Spjd PJDLOG_VERIFY(sigaddset(&mask, SIGINT) == 0); 2204211982Spjd PJDLOG_VERIFY(sigaddset(&mask, SIGTERM) == 0); 2205211982Spjd 2206219721Strociny timeout.tv_sec = HAST_KEEPALIVE; 2207211982Spjd timeout.tv_nsec = 0; 2208211982Spjd signo = -1; 2209211982Spjd 2210204076Spjd for (;;) { 2211211982Spjd switch (signo) { 2212211982Spjd case SIGINT: 2213211982Spjd case SIGTERM: 2214211982Spjd sigexit_received = true; 2215204076Spjd primary_exitx(EX_OK, 2216204076Spjd "Termination signal received, exiting."); 2217211982Spjd break; 2218211982Spjd default: 2219211982Spjd break; 2220204076Spjd } 2221211882Spjd 2222220898Spjd /* 2223220898Spjd * Don't check connections until we fully started, 2224220898Spjd * as we may still be looping, waiting for remote node 2225220898Spjd * to switch from primary to secondary. 2226220898Spjd */ 2227220898Spjd if (fullystarted) { 2228220898Spjd pjdlog_debug(2, "remote_guard: Checking connections."); 2229220898Spjd now = time(NULL); 2230220898Spjd if (lastcheck + HAST_KEEPALIVE <= now) { 2231220898Spjd for (ii = 0; ii < ncomps; ii++) 2232220898Spjd guard_one(res, ii); 2233220898Spjd lastcheck = now; 2234220898Spjd } 2235204076Spjd } 2236211982Spjd signo = sigtimedwait(&mask, NULL, &timeout); 2237204076Spjd } 2238204076Spjd /* NOTREACHED */ 2239204076Spjd return (NULL); 2240204076Spjd} 2241