primary.c revision 240269
1204076Spjd/*- 2204076Spjd * Copyright (c) 2009 The FreeBSD Foundation 3219351Spjd * Copyright (c) 2010-2011 Pawel Jakub Dawidek <pawel@dawidek.net> 4204076Spjd * All rights reserved. 5204076Spjd * 6204076Spjd * This software was developed by Pawel Jakub Dawidek under sponsorship from 7204076Spjd * the FreeBSD Foundation. 8204076Spjd * 9204076Spjd * Redistribution and use in source and binary forms, with or without 10204076Spjd * modification, are permitted provided that the following conditions 11204076Spjd * are met: 12204076Spjd * 1. Redistributions of source code must retain the above copyright 13204076Spjd * notice, this list of conditions and the following disclaimer. 14204076Spjd * 2. Redistributions in binary form must reproduce the above copyright 15204076Spjd * notice, this list of conditions and the following disclaimer in the 16204076Spjd * documentation and/or other materials provided with the distribution. 17204076Spjd * 18204076Spjd * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 19204076Spjd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20204076Spjd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21204076Spjd * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 22204076Spjd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23204076Spjd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24204076Spjd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25204076Spjd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26204076Spjd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27204076Spjd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28204076Spjd * SUCH DAMAGE. 29204076Spjd */ 30204076Spjd 31204076Spjd#include <sys/cdefs.h> 32204076Spjd__FBSDID("$FreeBSD: stable/9/sbin/hastd/primary.c 240269 2012-09-09 08:39:41Z trociny $"); 33204076Spjd 34204076Spjd#include <sys/types.h> 35204076Spjd#include <sys/time.h> 36204076Spjd#include <sys/bio.h> 37204076Spjd#include <sys/disk.h> 38204076Spjd#include <sys/refcount.h> 39204076Spjd#include <sys/stat.h> 40204076Spjd 41204076Spjd#include <geom/gate/g_gate.h> 42204076Spjd 43204076Spjd#include <err.h> 44204076Spjd#include <errno.h> 45204076Spjd#include <fcntl.h> 46204076Spjd#include <libgeom.h> 47204076Spjd#include <pthread.h> 48211982Spjd#include <signal.h> 49204076Spjd#include <stdint.h> 50204076Spjd#include <stdio.h> 51204076Spjd#include <string.h> 52204076Spjd#include <sysexits.h> 53204076Spjd#include <unistd.h> 54204076Spjd 55204076Spjd#include <activemap.h> 56204076Spjd#include <nv.h> 57204076Spjd#include <rangelock.h> 58204076Spjd 59204076Spjd#include "control.h" 60212038Spjd#include "event.h" 61204076Spjd#include "hast.h" 62204076Spjd#include "hast_proto.h" 63204076Spjd#include "hastd.h" 64211886Spjd#include "hooks.h" 65204076Spjd#include "metadata.h" 66204076Spjd#include "proto.h" 67204076Spjd#include "pjdlog.h" 68204076Spjd#include "subr.h" 69204076Spjd#include "synch.h" 70204076Spjd 71210886Spjd/* The is only one remote component for now. */ 72210886Spjd#define ISREMOTE(no) ((no) == 1) 73210886Spjd 74204076Spjdstruct hio { 75204076Spjd /* 76204076Spjd * Number of components we are still waiting for. 77204076Spjd * When this field goes to 0, we can send the request back to the 78204076Spjd * kernel. Each component has to decrease this counter by one 79204076Spjd * even on failure. 80204076Spjd */ 81204076Spjd unsigned int hio_countdown; 82204076Spjd /* 83204076Spjd * Each component has a place to store its own error. 84204076Spjd * Once the request is handled by all components we can decide if the 85204076Spjd * request overall is successful or not. 86204076Spjd */ 87204076Spjd int *hio_errors; 88204076Spjd /* 89219818Spjd * Structure used to communicate with GEOM Gate class. 90204076Spjd */ 91204076Spjd struct g_gate_ctl_io hio_ggio; 92229509Strociny /* 93229509Strociny * Request was already confirmed to GEOM Gate. 94229509Strociny */ 95229509Strociny bool hio_done; 96229509Strociny /* 97229509Strociny * Remember replication from the time the request was initiated, 98229509Strociny * so we won't get confused when replication changes on reload. 99229509Strociny */ 100229509Strociny int hio_replication; 101204076Spjd TAILQ_ENTRY(hio) *hio_next; 102204076Spjd}; 103204076Spjd#define hio_free_next hio_next[0] 104204076Spjd#define hio_done_next hio_next[0] 105204076Spjd 106204076Spjd/* 107204076Spjd * Free list holds unused structures. When free list is empty, we have to wait 108204076Spjd * until some in-progress requests are freed. 109204076Spjd */ 110204076Spjdstatic TAILQ_HEAD(, hio) hio_free_list; 111204076Spjdstatic pthread_mutex_t hio_free_list_lock; 112204076Spjdstatic pthread_cond_t hio_free_list_cond; 113204076Spjd/* 114204076Spjd * There is one send list for every component. One requests is placed on all 115204076Spjd * send lists - each component gets the same request, but each component is 116204076Spjd * responsible for managing his own send list. 117204076Spjd */ 118204076Spjdstatic TAILQ_HEAD(, hio) *hio_send_list; 119204076Spjdstatic pthread_mutex_t *hio_send_list_lock; 120204076Spjdstatic pthread_cond_t *hio_send_list_cond; 121204076Spjd/* 122204076Spjd * There is one recv list for every component, although local components don't 123204076Spjd * use recv lists as local requests are done synchronously. 124204076Spjd */ 125204076Spjdstatic TAILQ_HEAD(, hio) *hio_recv_list; 126204076Spjdstatic pthread_mutex_t *hio_recv_list_lock; 127204076Spjdstatic pthread_cond_t *hio_recv_list_cond; 128204076Spjd/* 129204076Spjd * Request is placed on done list by the slowest component (the one that 130204076Spjd * decreased hio_countdown from 1 to 0). 131204076Spjd */ 132204076Spjdstatic TAILQ_HEAD(, hio) hio_done_list; 133204076Spjdstatic pthread_mutex_t hio_done_list_lock; 134204076Spjdstatic pthread_cond_t hio_done_list_cond; 135204076Spjd/* 136204076Spjd * Structure below are for interaction with sync thread. 137204076Spjd */ 138204076Spjdstatic bool sync_inprogress; 139204076Spjdstatic pthread_mutex_t sync_lock; 140204076Spjdstatic pthread_cond_t sync_cond; 141204076Spjd/* 142204076Spjd * The lock below allows to synchornize access to remote connections. 143204076Spjd */ 144204076Spjdstatic pthread_rwlock_t *hio_remote_lock; 145204076Spjd 146204076Spjd/* 147204076Spjd * Lock to synchronize metadata updates. Also synchronize access to 148204076Spjd * hr_primary_localcnt and hr_primary_remotecnt fields. 149204076Spjd */ 150204076Spjdstatic pthread_mutex_t metadata_lock; 151204076Spjd 152204076Spjd/* 153204076Spjd * Maximum number of outstanding I/O requests. 154204076Spjd */ 155204076Spjd#define HAST_HIO_MAX 256 156204076Spjd/* 157204076Spjd * Number of components. At this point there are only two components: local 158204076Spjd * and remote, but in the future it might be possible to use multiple local 159204076Spjd * and remote components. 160204076Spjd */ 161204076Spjd#define HAST_NCOMPONENTS 2 162204076Spjd 163204076Spjd#define ISCONNECTED(res, no) \ 164204076Spjd ((res)->hr_remotein != NULL && (res)->hr_remoteout != NULL) 165204076Spjd 166204076Spjd#define QUEUE_INSERT1(hio, name, ncomp) do { \ 167204076Spjd bool _wakeup; \ 168204076Spjd \ 169204076Spjd mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 170204076Spjd _wakeup = TAILQ_EMPTY(&hio_##name##_list[(ncomp)]); \ 171204076Spjd TAILQ_INSERT_TAIL(&hio_##name##_list[(ncomp)], (hio), \ 172204076Spjd hio_next[(ncomp)]); \ 173204076Spjd mtx_unlock(&hio_##name##_list_lock[ncomp]); \ 174204076Spjd if (_wakeup) \ 175204076Spjd cv_signal(&hio_##name##_list_cond[(ncomp)]); \ 176204076Spjd} while (0) 177204076Spjd#define QUEUE_INSERT2(hio, name) do { \ 178204076Spjd bool _wakeup; \ 179204076Spjd \ 180204076Spjd mtx_lock(&hio_##name##_list_lock); \ 181204076Spjd _wakeup = TAILQ_EMPTY(&hio_##name##_list); \ 182204076Spjd TAILQ_INSERT_TAIL(&hio_##name##_list, (hio), hio_##name##_next);\ 183204076Spjd mtx_unlock(&hio_##name##_list_lock); \ 184204076Spjd if (_wakeup) \ 185204076Spjd cv_signal(&hio_##name##_list_cond); \ 186204076Spjd} while (0) 187214692Spjd#define QUEUE_TAKE1(hio, name, ncomp, timeout) do { \ 188214692Spjd bool _last; \ 189214692Spjd \ 190204076Spjd mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 191214692Spjd _last = false; \ 192214692Spjd while (((hio) = TAILQ_FIRST(&hio_##name##_list[(ncomp)])) == NULL && !_last) { \ 193214692Spjd cv_timedwait(&hio_##name##_list_cond[(ncomp)], \ 194214692Spjd &hio_##name##_list_lock[(ncomp)], (timeout)); \ 195219864Spjd if ((timeout) != 0) \ 196214692Spjd _last = true; \ 197204076Spjd } \ 198214692Spjd if (hio != NULL) { \ 199214692Spjd TAILQ_REMOVE(&hio_##name##_list[(ncomp)], (hio), \ 200214692Spjd hio_next[(ncomp)]); \ 201214692Spjd } \ 202204076Spjd mtx_unlock(&hio_##name##_list_lock[(ncomp)]); \ 203204076Spjd} while (0) 204204076Spjd#define QUEUE_TAKE2(hio, name) do { \ 205204076Spjd mtx_lock(&hio_##name##_list_lock); \ 206204076Spjd while (((hio) = TAILQ_FIRST(&hio_##name##_list)) == NULL) { \ 207204076Spjd cv_wait(&hio_##name##_list_cond, \ 208204076Spjd &hio_##name##_list_lock); \ 209204076Spjd } \ 210204076Spjd TAILQ_REMOVE(&hio_##name##_list, (hio), hio_##name##_next); \ 211204076Spjd mtx_unlock(&hio_##name##_list_lock); \ 212204076Spjd} while (0) 213204076Spjd 214209183Spjd#define SYNCREQ(hio) do { \ 215209183Spjd (hio)->hio_ggio.gctl_unit = -1; \ 216209183Spjd (hio)->hio_ggio.gctl_seq = 1; \ 217209183Spjd} while (0) 218204076Spjd#define ISSYNCREQ(hio) ((hio)->hio_ggio.gctl_unit == -1) 219204076Spjd#define SYNCREQDONE(hio) do { (hio)->hio_ggio.gctl_unit = -2; } while (0) 220204076Spjd#define ISSYNCREQDONE(hio) ((hio)->hio_ggio.gctl_unit == -2) 221204076Spjd 222204076Spjdstatic struct hast_resource *gres; 223204076Spjd 224204076Spjdstatic pthread_mutex_t range_lock; 225204076Spjdstatic struct rangelocks *range_regular; 226204076Spjdstatic bool range_regular_wait; 227204076Spjdstatic pthread_cond_t range_regular_cond; 228204076Spjdstatic struct rangelocks *range_sync; 229204076Spjdstatic bool range_sync_wait; 230204076Spjdstatic pthread_cond_t range_sync_cond; 231220898Spjdstatic bool fullystarted; 232204076Spjd 233204076Spjdstatic void *ggate_recv_thread(void *arg); 234204076Spjdstatic void *local_send_thread(void *arg); 235204076Spjdstatic void *remote_send_thread(void *arg); 236204076Spjdstatic void *remote_recv_thread(void *arg); 237204076Spjdstatic void *ggate_send_thread(void *arg); 238204076Spjdstatic void *sync_thread(void *arg); 239204076Spjdstatic void *guard_thread(void *arg); 240204076Spjd 241211982Spjdstatic void 242204076Spjdcleanup(struct hast_resource *res) 243204076Spjd{ 244204076Spjd int rerrno; 245204076Spjd 246204076Spjd /* Remember errno. */ 247204076Spjd rerrno = errno; 248204076Spjd 249204076Spjd /* Destroy ggate provider if we created one. */ 250204076Spjd if (res->hr_ggateunit >= 0) { 251204076Spjd struct g_gate_ctl_destroy ggiod; 252204076Spjd 253213533Spjd bzero(&ggiod, sizeof(ggiod)); 254204076Spjd ggiod.gctl_version = G_GATE_VERSION; 255204076Spjd ggiod.gctl_unit = res->hr_ggateunit; 256204076Spjd ggiod.gctl_force = 1; 257231017Strociny if (ioctl(res->hr_ggatefd, G_GATE_CMD_DESTROY, &ggiod) == -1) { 258213531Spjd pjdlog_errno(LOG_WARNING, 259213531Spjd "Unable to destroy hast/%s device", 260204076Spjd res->hr_provname); 261204076Spjd } 262204076Spjd res->hr_ggateunit = -1; 263204076Spjd } 264204076Spjd 265204076Spjd /* Restore errno. */ 266204076Spjd errno = rerrno; 267204076Spjd} 268204076Spjd 269212899Spjdstatic __dead2 void 270204076Spjdprimary_exit(int exitcode, const char *fmt, ...) 271204076Spjd{ 272204076Spjd va_list ap; 273204076Spjd 274218138Spjd PJDLOG_ASSERT(exitcode != EX_OK); 275204076Spjd va_start(ap, fmt); 276204076Spjd pjdlogv_errno(LOG_ERR, fmt, ap); 277204076Spjd va_end(ap); 278204076Spjd cleanup(gres); 279204076Spjd exit(exitcode); 280204076Spjd} 281204076Spjd 282212899Spjdstatic __dead2 void 283204076Spjdprimary_exitx(int exitcode, const char *fmt, ...) 284204076Spjd{ 285204076Spjd va_list ap; 286204076Spjd 287204076Spjd va_start(ap, fmt); 288204076Spjd pjdlogv(exitcode == EX_OK ? LOG_INFO : LOG_ERR, fmt, ap); 289204076Spjd va_end(ap); 290204076Spjd cleanup(gres); 291204076Spjd exit(exitcode); 292204076Spjd} 293204076Spjd 294204076Spjdstatic int 295204076Spjdhast_activemap_flush(struct hast_resource *res) 296204076Spjd{ 297204076Spjd const unsigned char *buf; 298204076Spjd size_t size; 299204076Spjd 300204076Spjd buf = activemap_bitmap(res->hr_amp, &size); 301218138Spjd PJDLOG_ASSERT(buf != NULL); 302218138Spjd PJDLOG_ASSERT((size % res->hr_local_sectorsize) == 0); 303204076Spjd if (pwrite(res->hr_localfd, buf, size, METADATA_SIZE) != 304204076Spjd (ssize_t)size) { 305229509Strociny pjdlog_errno(LOG_ERR, "Unable to flush activemap to disk"); 306204076Spjd return (-1); 307204076Spjd } 308229509Strociny if (res->hr_metaflush == 1 && g_flush(res->hr_localfd) == -1) { 309229509Strociny if (errno == EOPNOTSUPP) { 310229509Strociny pjdlog_warning("The %s provider doesn't support flushing write cache. Disabling it.", 311229509Strociny res->hr_localpath); 312229509Strociny res->hr_metaflush = 0; 313229509Strociny } else { 314229509Strociny pjdlog_errno(LOG_ERR, 315229509Strociny "Unable to flush disk cache on activemap update"); 316229509Strociny return (-1); 317229509Strociny } 318229509Strociny } 319204076Spjd return (0); 320204076Spjd} 321204076Spjd 322210881Spjdstatic bool 323210881Spjdreal_remote(const struct hast_resource *res) 324210881Spjd{ 325210881Spjd 326210881Spjd return (strcmp(res->hr_remoteaddr, "none") != 0); 327210881Spjd} 328210881Spjd 329204076Spjdstatic void 330204076Spjdinit_environment(struct hast_resource *res __unused) 331204076Spjd{ 332204076Spjd struct hio *hio; 333204076Spjd unsigned int ii, ncomps; 334204076Spjd 335204076Spjd /* 336204076Spjd * In the future it might be per-resource value. 337204076Spjd */ 338204076Spjd ncomps = HAST_NCOMPONENTS; 339204076Spjd 340204076Spjd /* 341204076Spjd * Allocate memory needed by lists. 342204076Spjd */ 343204076Spjd hio_send_list = malloc(sizeof(hio_send_list[0]) * ncomps); 344204076Spjd if (hio_send_list == NULL) { 345204076Spjd primary_exitx(EX_TEMPFAIL, 346204076Spjd "Unable to allocate %zu bytes of memory for send lists.", 347204076Spjd sizeof(hio_send_list[0]) * ncomps); 348204076Spjd } 349204076Spjd hio_send_list_lock = malloc(sizeof(hio_send_list_lock[0]) * ncomps); 350204076Spjd if (hio_send_list_lock == NULL) { 351204076Spjd primary_exitx(EX_TEMPFAIL, 352204076Spjd "Unable to allocate %zu bytes of memory for send list locks.", 353204076Spjd sizeof(hio_send_list_lock[0]) * ncomps); 354204076Spjd } 355204076Spjd hio_send_list_cond = malloc(sizeof(hio_send_list_cond[0]) * ncomps); 356204076Spjd if (hio_send_list_cond == NULL) { 357204076Spjd primary_exitx(EX_TEMPFAIL, 358204076Spjd "Unable to allocate %zu bytes of memory for send list condition variables.", 359204076Spjd sizeof(hio_send_list_cond[0]) * ncomps); 360204076Spjd } 361204076Spjd hio_recv_list = malloc(sizeof(hio_recv_list[0]) * ncomps); 362204076Spjd if (hio_recv_list == NULL) { 363204076Spjd primary_exitx(EX_TEMPFAIL, 364204076Spjd "Unable to allocate %zu bytes of memory for recv lists.", 365204076Spjd sizeof(hio_recv_list[0]) * ncomps); 366204076Spjd } 367204076Spjd hio_recv_list_lock = malloc(sizeof(hio_recv_list_lock[0]) * ncomps); 368204076Spjd if (hio_recv_list_lock == NULL) { 369204076Spjd primary_exitx(EX_TEMPFAIL, 370204076Spjd "Unable to allocate %zu bytes of memory for recv list locks.", 371204076Spjd sizeof(hio_recv_list_lock[0]) * ncomps); 372204076Spjd } 373204076Spjd hio_recv_list_cond = malloc(sizeof(hio_recv_list_cond[0]) * ncomps); 374204076Spjd if (hio_recv_list_cond == NULL) { 375204076Spjd primary_exitx(EX_TEMPFAIL, 376204076Spjd "Unable to allocate %zu bytes of memory for recv list condition variables.", 377204076Spjd sizeof(hio_recv_list_cond[0]) * ncomps); 378204076Spjd } 379204076Spjd hio_remote_lock = malloc(sizeof(hio_remote_lock[0]) * ncomps); 380204076Spjd if (hio_remote_lock == NULL) { 381204076Spjd primary_exitx(EX_TEMPFAIL, 382204076Spjd "Unable to allocate %zu bytes of memory for remote connections locks.", 383204076Spjd sizeof(hio_remote_lock[0]) * ncomps); 384204076Spjd } 385204076Spjd 386204076Spjd /* 387204076Spjd * Initialize lists, their locks and theirs condition variables. 388204076Spjd */ 389204076Spjd TAILQ_INIT(&hio_free_list); 390204076Spjd mtx_init(&hio_free_list_lock); 391204076Spjd cv_init(&hio_free_list_cond); 392204076Spjd for (ii = 0; ii < HAST_NCOMPONENTS; ii++) { 393204076Spjd TAILQ_INIT(&hio_send_list[ii]); 394204076Spjd mtx_init(&hio_send_list_lock[ii]); 395204076Spjd cv_init(&hio_send_list_cond[ii]); 396204076Spjd TAILQ_INIT(&hio_recv_list[ii]); 397204076Spjd mtx_init(&hio_recv_list_lock[ii]); 398204076Spjd cv_init(&hio_recv_list_cond[ii]); 399204076Spjd rw_init(&hio_remote_lock[ii]); 400204076Spjd } 401204076Spjd TAILQ_INIT(&hio_done_list); 402204076Spjd mtx_init(&hio_done_list_lock); 403204076Spjd cv_init(&hio_done_list_cond); 404204076Spjd mtx_init(&metadata_lock); 405204076Spjd 406204076Spjd /* 407204076Spjd * Allocate requests pool and initialize requests. 408204076Spjd */ 409204076Spjd for (ii = 0; ii < HAST_HIO_MAX; ii++) { 410204076Spjd hio = malloc(sizeof(*hio)); 411204076Spjd if (hio == NULL) { 412204076Spjd primary_exitx(EX_TEMPFAIL, 413204076Spjd "Unable to allocate %zu bytes of memory for hio request.", 414204076Spjd sizeof(*hio)); 415204076Spjd } 416204076Spjd hio->hio_countdown = 0; 417204076Spjd hio->hio_errors = malloc(sizeof(hio->hio_errors[0]) * ncomps); 418204076Spjd if (hio->hio_errors == NULL) { 419204076Spjd primary_exitx(EX_TEMPFAIL, 420204076Spjd "Unable allocate %zu bytes of memory for hio errors.", 421204076Spjd sizeof(hio->hio_errors[0]) * ncomps); 422204076Spjd } 423204076Spjd hio->hio_next = malloc(sizeof(hio->hio_next[0]) * ncomps); 424204076Spjd if (hio->hio_next == NULL) { 425204076Spjd primary_exitx(EX_TEMPFAIL, 426204076Spjd "Unable allocate %zu bytes of memory for hio_next field.", 427204076Spjd sizeof(hio->hio_next[0]) * ncomps); 428204076Spjd } 429204076Spjd hio->hio_ggio.gctl_version = G_GATE_VERSION; 430204076Spjd hio->hio_ggio.gctl_data = malloc(MAXPHYS); 431204076Spjd if (hio->hio_ggio.gctl_data == NULL) { 432204076Spjd primary_exitx(EX_TEMPFAIL, 433204076Spjd "Unable to allocate %zu bytes of memory for gctl_data.", 434204076Spjd MAXPHYS); 435204076Spjd } 436204076Spjd hio->hio_ggio.gctl_length = MAXPHYS; 437204076Spjd hio->hio_ggio.gctl_error = 0; 438204076Spjd TAILQ_INSERT_HEAD(&hio_free_list, hio, hio_free_next); 439204076Spjd } 440204076Spjd} 441204076Spjd 442214284Spjdstatic bool 443214284Spjdinit_resuid(struct hast_resource *res) 444214284Spjd{ 445214284Spjd 446214284Spjd mtx_lock(&metadata_lock); 447214284Spjd if (res->hr_resuid != 0) { 448214284Spjd mtx_unlock(&metadata_lock); 449214284Spjd return (false); 450214284Spjd } else { 451214284Spjd /* Initialize unique resource identifier. */ 452214284Spjd arc4random_buf(&res->hr_resuid, sizeof(res->hr_resuid)); 453214284Spjd mtx_unlock(&metadata_lock); 454231017Strociny if (metadata_write(res) == -1) 455214284Spjd exit(EX_NOINPUT); 456214284Spjd return (true); 457214284Spjd } 458214284Spjd} 459214284Spjd 460204076Spjdstatic void 461204076Spjdinit_local(struct hast_resource *res) 462204076Spjd{ 463204076Spjd unsigned char *buf; 464204076Spjd size_t mapsize; 465204076Spjd 466231017Strociny if (metadata_read(res, true) == -1) 467204076Spjd exit(EX_NOINPUT); 468204076Spjd mtx_init(&res->hr_amp_lock); 469204076Spjd if (activemap_init(&res->hr_amp, res->hr_datasize, res->hr_extentsize, 470231017Strociny res->hr_local_sectorsize, res->hr_keepdirty) == -1) { 471204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create activemap"); 472204076Spjd } 473204076Spjd mtx_init(&range_lock); 474204076Spjd cv_init(&range_regular_cond); 475231017Strociny if (rangelock_init(&range_regular) == -1) 476204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create regular range lock"); 477204076Spjd cv_init(&range_sync_cond); 478231017Strociny if (rangelock_init(&range_sync) == -1) 479204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create sync range lock"); 480204076Spjd mapsize = activemap_ondisk_size(res->hr_amp); 481204076Spjd buf = calloc(1, mapsize); 482204076Spjd if (buf == NULL) { 483204076Spjd primary_exitx(EX_TEMPFAIL, 484204076Spjd "Unable to allocate buffer for activemap."); 485204076Spjd } 486204076Spjd if (pread(res->hr_localfd, buf, mapsize, METADATA_SIZE) != 487204076Spjd (ssize_t)mapsize) { 488204076Spjd primary_exit(EX_NOINPUT, "Unable to read activemap"); 489204076Spjd } 490204076Spjd activemap_copyin(res->hr_amp, buf, mapsize); 491209181Spjd free(buf); 492204076Spjd if (res->hr_resuid != 0) 493204076Spjd return; 494204076Spjd /* 495214284Spjd * We're using provider for the first time. Initialize local and remote 496214284Spjd * counters. We don't initialize resuid here, as we want to do it just 497214284Spjd * in time. The reason for this is that we want to inform secondary 498214284Spjd * that there were no writes yet, so there is no need to synchronize 499214284Spjd * anything. 500204076Spjd */ 501219844Spjd res->hr_primary_localcnt = 0; 502204076Spjd res->hr_primary_remotecnt = 0; 503231017Strociny if (metadata_write(res) == -1) 504204076Spjd exit(EX_NOINPUT); 505204076Spjd} 506204076Spjd 507218218Spjdstatic int 508218218Spjdprimary_connect(struct hast_resource *res, struct proto_conn **connp) 509218218Spjd{ 510218218Spjd struct proto_conn *conn; 511218218Spjd int16_t val; 512218218Spjd 513218218Spjd val = 1; 514231017Strociny if (proto_send(res->hr_conn, &val, sizeof(val)) == -1) { 515218218Spjd primary_exit(EX_TEMPFAIL, 516218218Spjd "Unable to send connection request to parent"); 517218218Spjd } 518231017Strociny if (proto_recv(res->hr_conn, &val, sizeof(val)) == -1) { 519218218Spjd primary_exit(EX_TEMPFAIL, 520218218Spjd "Unable to receive reply to connection request from parent"); 521218218Spjd } 522218218Spjd if (val != 0) { 523218218Spjd errno = val; 524218218Spjd pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 525218218Spjd res->hr_remoteaddr); 526218218Spjd return (-1); 527218218Spjd } 528231017Strociny if (proto_connection_recv(res->hr_conn, true, &conn) == -1) { 529218218Spjd primary_exit(EX_TEMPFAIL, 530218218Spjd "Unable to receive connection from parent"); 531218218Spjd } 532231017Strociny if (proto_connect_wait(conn, res->hr_timeout) == -1) { 533218218Spjd pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 534218218Spjd res->hr_remoteaddr); 535218218Spjd proto_close(conn); 536218218Spjd return (-1); 537218218Spjd } 538218218Spjd /* Error in setting timeout is not critical, but why should it fail? */ 539231017Strociny if (proto_timeout(conn, res->hr_timeout) == -1) 540218218Spjd pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 541218218Spjd 542218218Spjd *connp = conn; 543218218Spjd 544218218Spjd return (0); 545218218Spjd} 546240269Strociny 547240269Strociny/* 548240269Strociny * Function instructs GEOM_GATE to handle reads directly from within the kernel. 549240269Strociny */ 550240269Strocinystatic void 551240269Strocinyenable_direct_reads(struct hast_resource *res) 552240269Strociny{ 553240269Strociny struct g_gate_ctl_modify ggiomodify; 554218218Spjd 555240269Strociny bzero(&ggiomodify, sizeof(ggiomodify)); 556240269Strociny ggiomodify.gctl_version = G_GATE_VERSION; 557240269Strociny ggiomodify.gctl_unit = res->hr_ggateunit; 558240269Strociny ggiomodify.gctl_modify = GG_MODIFY_READPROV | GG_MODIFY_READOFFSET; 559240269Strociny strlcpy(ggiomodify.gctl_readprov, res->hr_localpath, 560240269Strociny sizeof(ggiomodify.gctl_readprov)); 561240269Strociny ggiomodify.gctl_readoffset = res->hr_localoff; 562240269Strociny if (ioctl(res->hr_ggatefd, G_GATE_CMD_MODIFY, &ggiomodify) == 0) 563240269Strociny pjdlog_debug(1, "Direct reads enabled."); 564240269Strociny else 565240269Strociny pjdlog_errno(LOG_WARNING, "Failed to enable direct reads"); 566240269Strociny} 567240269Strociny 568220898Spjdstatic int 569205738Spjdinit_remote(struct hast_resource *res, struct proto_conn **inp, 570205738Spjd struct proto_conn **outp) 571204076Spjd{ 572205738Spjd struct proto_conn *in, *out; 573204076Spjd struct nv *nvout, *nvin; 574204076Spjd const unsigned char *token; 575204076Spjd unsigned char *map; 576204076Spjd const char *errmsg; 577204076Spjd int32_t extentsize; 578204076Spjd int64_t datasize; 579204076Spjd uint32_t mapsize; 580204076Spjd size_t size; 581220898Spjd int error; 582204076Spjd 583218138Spjd PJDLOG_ASSERT((inp == NULL && outp == NULL) || (inp != NULL && outp != NULL)); 584218138Spjd PJDLOG_ASSERT(real_remote(res)); 585205738Spjd 586205738Spjd in = out = NULL; 587211983Spjd errmsg = NULL; 588205738Spjd 589218218Spjd if (primary_connect(res, &out) == -1) 590220898Spjd return (ECONNREFUSED); 591218218Spjd 592220898Spjd error = ECONNABORTED; 593220898Spjd 594204076Spjd /* 595204076Spjd * First handshake step. 596204076Spjd * Setup outgoing connection with remote node. 597204076Spjd */ 598204076Spjd nvout = nv_alloc(); 599204076Spjd nv_add_string(nvout, res->hr_name, "resource"); 600204076Spjd if (nv_error(nvout) != 0) { 601204076Spjd pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 602204076Spjd "Unable to allocate header for connection with %s", 603204076Spjd res->hr_remoteaddr); 604204076Spjd nv_free(nvout); 605204076Spjd goto close; 606204076Spjd } 607231017Strociny if (hast_proto_send(res, out, nvout, NULL, 0) == -1) { 608204076Spjd pjdlog_errno(LOG_WARNING, 609204076Spjd "Unable to send handshake header to %s", 610204076Spjd res->hr_remoteaddr); 611204076Spjd nv_free(nvout); 612204076Spjd goto close; 613204076Spjd } 614204076Spjd nv_free(nvout); 615231017Strociny if (hast_proto_recv_hdr(out, &nvin) == -1) { 616204076Spjd pjdlog_errno(LOG_WARNING, 617204076Spjd "Unable to receive handshake header from %s", 618204076Spjd res->hr_remoteaddr); 619204076Spjd goto close; 620204076Spjd } 621204076Spjd errmsg = nv_get_string(nvin, "errmsg"); 622204076Spjd if (errmsg != NULL) { 623204076Spjd pjdlog_warning("%s", errmsg); 624220898Spjd if (nv_exists(nvin, "wait")) 625220898Spjd error = EBUSY; 626204076Spjd nv_free(nvin); 627204076Spjd goto close; 628204076Spjd } 629204076Spjd token = nv_get_uint8_array(nvin, &size, "token"); 630204076Spjd if (token == NULL) { 631204076Spjd pjdlog_warning("Handshake header from %s has no 'token' field.", 632204076Spjd res->hr_remoteaddr); 633204076Spjd nv_free(nvin); 634204076Spjd goto close; 635204076Spjd } 636204076Spjd if (size != sizeof(res->hr_token)) { 637204076Spjd pjdlog_warning("Handshake header from %s contains 'token' of wrong size (got %zu, expected %zu).", 638204076Spjd res->hr_remoteaddr, size, sizeof(res->hr_token)); 639204076Spjd nv_free(nvin); 640204076Spjd goto close; 641204076Spjd } 642204076Spjd bcopy(token, res->hr_token, sizeof(res->hr_token)); 643204076Spjd nv_free(nvin); 644204076Spjd 645204076Spjd /* 646204076Spjd * Second handshake step. 647204076Spjd * Setup incoming connection with remote node. 648204076Spjd */ 649218218Spjd if (primary_connect(res, &in) == -1) 650204076Spjd goto close; 651218218Spjd 652204076Spjd nvout = nv_alloc(); 653204076Spjd nv_add_string(nvout, res->hr_name, "resource"); 654204076Spjd nv_add_uint8_array(nvout, res->hr_token, sizeof(res->hr_token), 655204076Spjd "token"); 656214284Spjd if (res->hr_resuid == 0) { 657214284Spjd /* 658214284Spjd * The resuid field was not yet initialized. 659214284Spjd * Because we do synchronization inside init_resuid(), it is 660214284Spjd * possible that someone already initialized it, the function 661214284Spjd * will return false then, but if we successfully initialized 662214284Spjd * it, we will get true. True means that there were no writes 663214284Spjd * to this resource yet and we want to inform secondary that 664214284Spjd * synchronization is not needed by sending "virgin" argument. 665214284Spjd */ 666214284Spjd if (init_resuid(res)) 667214284Spjd nv_add_int8(nvout, 1, "virgin"); 668214284Spjd } 669204076Spjd nv_add_uint64(nvout, res->hr_resuid, "resuid"); 670204076Spjd nv_add_uint64(nvout, res->hr_primary_localcnt, "localcnt"); 671204076Spjd nv_add_uint64(nvout, res->hr_primary_remotecnt, "remotecnt"); 672204076Spjd if (nv_error(nvout) != 0) { 673204076Spjd pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 674204076Spjd "Unable to allocate header for connection with %s", 675204076Spjd res->hr_remoteaddr); 676204076Spjd nv_free(nvout); 677204076Spjd goto close; 678204076Spjd } 679231017Strociny if (hast_proto_send(res, in, nvout, NULL, 0) == -1) { 680204076Spjd pjdlog_errno(LOG_WARNING, 681204076Spjd "Unable to send handshake header to %s", 682204076Spjd res->hr_remoteaddr); 683204076Spjd nv_free(nvout); 684204076Spjd goto close; 685204076Spjd } 686204076Spjd nv_free(nvout); 687231017Strociny if (hast_proto_recv_hdr(out, &nvin) == -1) { 688204076Spjd pjdlog_errno(LOG_WARNING, 689204076Spjd "Unable to receive handshake header from %s", 690204076Spjd res->hr_remoteaddr); 691204076Spjd goto close; 692204076Spjd } 693204076Spjd errmsg = nv_get_string(nvin, "errmsg"); 694204076Spjd if (errmsg != NULL) { 695204076Spjd pjdlog_warning("%s", errmsg); 696204076Spjd nv_free(nvin); 697204076Spjd goto close; 698204076Spjd } 699204076Spjd datasize = nv_get_int64(nvin, "datasize"); 700204076Spjd if (datasize != res->hr_datasize) { 701204076Spjd pjdlog_warning("Data size differs between nodes (local=%jd, remote=%jd).", 702204076Spjd (intmax_t)res->hr_datasize, (intmax_t)datasize); 703204076Spjd nv_free(nvin); 704204076Spjd goto close; 705204076Spjd } 706204076Spjd extentsize = nv_get_int32(nvin, "extentsize"); 707204076Spjd if (extentsize != res->hr_extentsize) { 708204076Spjd pjdlog_warning("Extent size differs between nodes (local=%zd, remote=%zd).", 709204076Spjd (ssize_t)res->hr_extentsize, (ssize_t)extentsize); 710204076Spjd nv_free(nvin); 711204076Spjd goto close; 712204076Spjd } 713204076Spjd res->hr_secondary_localcnt = nv_get_uint64(nvin, "localcnt"); 714204076Spjd res->hr_secondary_remotecnt = nv_get_uint64(nvin, "remotecnt"); 715204076Spjd res->hr_syncsrc = nv_get_uint8(nvin, "syncsrc"); 716240269Strociny if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) 717240269Strociny enable_direct_reads(res); 718220865Spjd if (nv_exists(nvin, "virgin")) { 719220865Spjd /* 720220865Spjd * Secondary was reinitialized, bump localcnt if it is 0 as 721220865Spjd * only we have the data. 722220865Spjd */ 723220865Spjd PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_PRIMARY); 724220865Spjd PJDLOG_ASSERT(res->hr_secondary_localcnt == 0); 725220865Spjd 726220865Spjd if (res->hr_primary_localcnt == 0) { 727220865Spjd PJDLOG_ASSERT(res->hr_secondary_remotecnt == 0); 728220865Spjd 729220865Spjd mtx_lock(&metadata_lock); 730220865Spjd res->hr_primary_localcnt++; 731220865Spjd pjdlog_debug(1, "Increasing localcnt to %ju.", 732220865Spjd (uintmax_t)res->hr_primary_localcnt); 733220865Spjd (void)metadata_write(res); 734220865Spjd mtx_unlock(&metadata_lock); 735220865Spjd } 736220865Spjd } 737204076Spjd map = NULL; 738204076Spjd mapsize = nv_get_uint32(nvin, "mapsize"); 739204076Spjd if (mapsize > 0) { 740204076Spjd map = malloc(mapsize); 741204076Spjd if (map == NULL) { 742204076Spjd pjdlog_error("Unable to allocate memory for remote activemap (mapsize=%ju).", 743204076Spjd (uintmax_t)mapsize); 744204076Spjd nv_free(nvin); 745204076Spjd goto close; 746204076Spjd } 747204076Spjd /* 748204076Spjd * Remote node have some dirty extents on its own, lets 749204076Spjd * download its activemap. 750204076Spjd */ 751205738Spjd if (hast_proto_recv_data(res, out, nvin, map, 752231017Strociny mapsize) == -1) { 753204076Spjd pjdlog_errno(LOG_ERR, 754204076Spjd "Unable to receive remote activemap"); 755204076Spjd nv_free(nvin); 756204076Spjd free(map); 757204076Spjd goto close; 758204076Spjd } 759204076Spjd /* 760204076Spjd * Merge local and remote bitmaps. 761204076Spjd */ 762204076Spjd activemap_merge(res->hr_amp, map, mapsize); 763204076Spjd free(map); 764204076Spjd /* 765204076Spjd * Now that we merged bitmaps from both nodes, flush it to the 766204076Spjd * disk before we start to synchronize. 767204076Spjd */ 768204076Spjd (void)hast_activemap_flush(res); 769204076Spjd } 770214274Spjd nv_free(nvin); 771223181Strociny#ifdef notyet 772220271Spjd /* Setup directions. */ 773220271Spjd if (proto_send(out, NULL, 0) == -1) 774220271Spjd pjdlog_errno(LOG_WARNING, "Unable to set connection direction"); 775220271Spjd if (proto_recv(in, NULL, 0) == -1) 776220271Spjd pjdlog_errno(LOG_WARNING, "Unable to set connection direction"); 777223181Strociny#endif 778204076Spjd pjdlog_info("Connected to %s.", res->hr_remoteaddr); 779205738Spjd if (inp != NULL && outp != NULL) { 780205738Spjd *inp = in; 781205738Spjd *outp = out; 782205738Spjd } else { 783205738Spjd res->hr_remotein = in; 784205738Spjd res->hr_remoteout = out; 785205738Spjd } 786212038Spjd event_send(res, EVENT_CONNECT); 787220898Spjd return (0); 788205738Spjdclose: 789211983Spjd if (errmsg != NULL && strcmp(errmsg, "Split-brain condition!") == 0) 790212038Spjd event_send(res, EVENT_SPLITBRAIN); 791205738Spjd proto_close(out); 792205738Spjd if (in != NULL) 793205738Spjd proto_close(in); 794220898Spjd return (error); 795205738Spjd} 796205738Spjd 797205738Spjdstatic void 798205738Spjdsync_start(void) 799205738Spjd{ 800205738Spjd 801204076Spjd mtx_lock(&sync_lock); 802204076Spjd sync_inprogress = true; 803204076Spjd mtx_unlock(&sync_lock); 804204076Spjd cv_signal(&sync_cond); 805204076Spjd} 806204076Spjd 807204076Spjdstatic void 808211878Spjdsync_stop(void) 809211878Spjd{ 810211878Spjd 811211878Spjd mtx_lock(&sync_lock); 812211878Spjd if (sync_inprogress) 813211878Spjd sync_inprogress = false; 814211878Spjd mtx_unlock(&sync_lock); 815211878Spjd} 816211878Spjd 817211878Spjdstatic void 818204076Spjdinit_ggate(struct hast_resource *res) 819204076Spjd{ 820204076Spjd struct g_gate_ctl_create ggiocreate; 821204076Spjd struct g_gate_ctl_cancel ggiocancel; 822204076Spjd 823204076Spjd /* 824204076Spjd * We communicate with ggate via /dev/ggctl. Open it. 825204076Spjd */ 826204076Spjd res->hr_ggatefd = open("/dev/" G_GATE_CTL_NAME, O_RDWR); 827231017Strociny if (res->hr_ggatefd == -1) 828204076Spjd primary_exit(EX_OSFILE, "Unable to open /dev/" G_GATE_CTL_NAME); 829204076Spjd /* 830204076Spjd * Create provider before trying to connect, as connection failure 831204076Spjd * is not critical, but may take some time. 832204076Spjd */ 833213533Spjd bzero(&ggiocreate, sizeof(ggiocreate)); 834204076Spjd ggiocreate.gctl_version = G_GATE_VERSION; 835204076Spjd ggiocreate.gctl_mediasize = res->hr_datasize; 836204076Spjd ggiocreate.gctl_sectorsize = res->hr_local_sectorsize; 837204076Spjd ggiocreate.gctl_flags = 0; 838220266Spjd ggiocreate.gctl_maxcount = 0; 839204076Spjd ggiocreate.gctl_timeout = 0; 840204076Spjd ggiocreate.gctl_unit = G_GATE_NAME_GIVEN; 841204076Spjd snprintf(ggiocreate.gctl_name, sizeof(ggiocreate.gctl_name), "hast/%s", 842204076Spjd res->hr_provname); 843204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_CREATE, &ggiocreate) == 0) { 844204076Spjd pjdlog_info("Device hast/%s created.", res->hr_provname); 845204076Spjd res->hr_ggateunit = ggiocreate.gctl_unit; 846204076Spjd return; 847204076Spjd } 848204076Spjd if (errno != EEXIST) { 849204076Spjd primary_exit(EX_OSERR, "Unable to create hast/%s device", 850204076Spjd res->hr_provname); 851204076Spjd } 852204076Spjd pjdlog_debug(1, 853204076Spjd "Device hast/%s already exists, we will try to take it over.", 854204076Spjd res->hr_provname); 855204076Spjd /* 856204076Spjd * If we received EEXIST, we assume that the process who created the 857204076Spjd * provider died and didn't clean up. In that case we will start from 858204076Spjd * where he left of. 859204076Spjd */ 860213533Spjd bzero(&ggiocancel, sizeof(ggiocancel)); 861204076Spjd ggiocancel.gctl_version = G_GATE_VERSION; 862204076Spjd ggiocancel.gctl_unit = G_GATE_NAME_GIVEN; 863204076Spjd snprintf(ggiocancel.gctl_name, sizeof(ggiocancel.gctl_name), "hast/%s", 864204076Spjd res->hr_provname); 865204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_CANCEL, &ggiocancel) == 0) { 866204076Spjd pjdlog_info("Device hast/%s recovered.", res->hr_provname); 867204076Spjd res->hr_ggateunit = ggiocancel.gctl_unit; 868204076Spjd return; 869204076Spjd } 870204076Spjd primary_exit(EX_OSERR, "Unable to take over hast/%s device", 871204076Spjd res->hr_provname); 872204076Spjd} 873204076Spjd 874204076Spjdvoid 875204076Spjdhastd_primary(struct hast_resource *res) 876204076Spjd{ 877204076Spjd pthread_t td; 878204076Spjd pid_t pid; 879219482Strociny int error, mode, debuglevel; 880204076Spjd 881204076Spjd /* 882218218Spjd * Create communication channel for sending control commands from 883218218Spjd * parent to child. 884204076Spjd */ 885231017Strociny if (proto_client(NULL, "socketpair://", &res->hr_ctrl) == -1) { 886218042Spjd /* TODO: There's no need for this to be fatal error. */ 887204076Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 888212034Spjd pjdlog_exit(EX_OSERR, 889204076Spjd "Unable to create control sockets between parent and child"); 890204076Spjd } 891212038Spjd /* 892218218Spjd * Create communication channel for sending events from child to parent. 893212038Spjd */ 894231017Strociny if (proto_client(NULL, "socketpair://", &res->hr_event) == -1) { 895218042Spjd /* TODO: There's no need for this to be fatal error. */ 896212038Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 897212038Spjd pjdlog_exit(EX_OSERR, 898212038Spjd "Unable to create event sockets between child and parent"); 899212038Spjd } 900218218Spjd /* 901218218Spjd * Create communication channel for sending connection requests from 902218218Spjd * child to parent. 903218218Spjd */ 904231017Strociny if (proto_client(NULL, "socketpair://", &res->hr_conn) == -1) { 905218218Spjd /* TODO: There's no need for this to be fatal error. */ 906218218Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 907218218Spjd pjdlog_exit(EX_OSERR, 908218218Spjd "Unable to create connection sockets between child and parent"); 909218218Spjd } 910204076Spjd 911204076Spjd pid = fork(); 912231017Strociny if (pid == -1) { 913218042Spjd /* TODO: There's no need for this to be fatal error. */ 914204076Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 915212034Spjd pjdlog_exit(EX_TEMPFAIL, "Unable to fork"); 916204076Spjd } 917204076Spjd 918204076Spjd if (pid > 0) { 919204076Spjd /* This is parent. */ 920212038Spjd /* Declare that we are receiver. */ 921212038Spjd proto_recv(res->hr_event, NULL, 0); 922218218Spjd proto_recv(res->hr_conn, NULL, 0); 923218043Spjd /* Declare that we are sender. */ 924218043Spjd proto_send(res->hr_ctrl, NULL, 0); 925204076Spjd res->hr_workerpid = pid; 926204076Spjd return; 927204076Spjd } 928211977Spjd 929211984Spjd gres = res; 930218043Spjd mode = pjdlog_mode_get(); 931219482Strociny debuglevel = pjdlog_debug_get(); 932211984Spjd 933218043Spjd /* Declare that we are sender. */ 934218043Spjd proto_send(res->hr_event, NULL, 0); 935218218Spjd proto_send(res->hr_conn, NULL, 0); 936218043Spjd /* Declare that we are receiver. */ 937218043Spjd proto_recv(res->hr_ctrl, NULL, 0); 938218043Spjd descriptors_cleanup(res); 939204076Spjd 940218045Spjd descriptors_assert(res, mode); 941218045Spjd 942218043Spjd pjdlog_init(mode); 943219482Strociny pjdlog_debug_set(debuglevel); 944218043Spjd pjdlog_prefix_set("[%s] (%s) ", res->hr_name, role2str(res->hr_role)); 945220005Spjd setproctitle("%s (%s)", res->hr_name, role2str(res->hr_role)); 946204076Spjd 947204076Spjd init_local(res); 948213007Spjd init_ggate(res); 949213007Spjd init_environment(res); 950217784Spjd 951221899Spjd if (drop_privs(res) != 0) { 952218049Spjd cleanup(res); 953218049Spjd exit(EX_CONFIG); 954218049Spjd } 955218214Spjd pjdlog_info("Privileges successfully dropped."); 956218049Spjd 957213007Spjd /* 958213530Spjd * Create the guard thread first, so we can handle signals from the 959231017Strociny * very beginning. 960213530Spjd */ 961213530Spjd error = pthread_create(&td, NULL, guard_thread, res); 962218138Spjd PJDLOG_ASSERT(error == 0); 963213530Spjd /* 964213007Spjd * Create the control thread before sending any event to the parent, 965213007Spjd * as we can deadlock when parent sends control request to worker, 966213007Spjd * but worker has no control thread started yet, so parent waits. 967213007Spjd * In the meantime worker sends an event to the parent, but parent 968213007Spjd * is unable to handle the event, because it waits for control 969213007Spjd * request response. 970213007Spjd */ 971213007Spjd error = pthread_create(&td, NULL, ctrl_thread, res); 972218138Spjd PJDLOG_ASSERT(error == 0); 973220898Spjd if (real_remote(res)) { 974220898Spjd error = init_remote(res, NULL, NULL); 975220898Spjd if (error == 0) { 976220898Spjd sync_start(); 977220898Spjd } else if (error == EBUSY) { 978220898Spjd time_t start = time(NULL); 979220898Spjd 980220898Spjd pjdlog_warning("Waiting for remote node to become %s for %ds.", 981220898Spjd role2str(HAST_ROLE_SECONDARY), 982220898Spjd res->hr_timeout); 983220898Spjd for (;;) { 984220898Spjd sleep(1); 985220898Spjd error = init_remote(res, NULL, NULL); 986220898Spjd if (error != EBUSY) 987220898Spjd break; 988220898Spjd if (time(NULL) > start + res->hr_timeout) 989220898Spjd break; 990220898Spjd } 991220898Spjd if (error == EBUSY) { 992220898Spjd pjdlog_warning("Remote node is still %s, starting anyway.", 993220898Spjd role2str(HAST_ROLE_PRIMARY)); 994220898Spjd } 995220898Spjd } 996220898Spjd } 997204076Spjd error = pthread_create(&td, NULL, ggate_recv_thread, res); 998218138Spjd PJDLOG_ASSERT(error == 0); 999204076Spjd error = pthread_create(&td, NULL, local_send_thread, res); 1000218138Spjd PJDLOG_ASSERT(error == 0); 1001204076Spjd error = pthread_create(&td, NULL, remote_send_thread, res); 1002218138Spjd PJDLOG_ASSERT(error == 0); 1003204076Spjd error = pthread_create(&td, NULL, remote_recv_thread, res); 1004218138Spjd PJDLOG_ASSERT(error == 0); 1005204076Spjd error = pthread_create(&td, NULL, ggate_send_thread, res); 1006218138Spjd PJDLOG_ASSERT(error == 0); 1007220898Spjd fullystarted = true; 1008213530Spjd (void)sync_thread(res); 1009204076Spjd} 1010204076Spjd 1011204076Spjdstatic void 1012204076Spjdreqlog(int loglevel, int debuglevel, struct g_gate_ctl_io *ggio, const char *fmt, ...) 1013204076Spjd{ 1014204076Spjd char msg[1024]; 1015204076Spjd va_list ap; 1016204076Spjd 1017204076Spjd va_start(ap, fmt); 1018240269Strociny (void)vsnprintf(msg, sizeof(msg), fmt, ap); 1019204076Spjd va_end(ap); 1020240269Strociny switch (ggio->gctl_cmd) { 1021240269Strociny case BIO_READ: 1022240269Strociny (void)snprlcat(msg, sizeof(msg), "READ(%ju, %ju).", 1023240269Strociny (uintmax_t)ggio->gctl_offset, 1024240269Strociny (uintmax_t)ggio->gctl_length); 1025240269Strociny break; 1026240269Strociny case BIO_DELETE: 1027240269Strociny (void)snprlcat(msg, sizeof(msg), "DELETE(%ju, %ju).", 1028240269Strociny (uintmax_t)ggio->gctl_offset, 1029240269Strociny (uintmax_t)ggio->gctl_length); 1030240269Strociny break; 1031240269Strociny case BIO_FLUSH: 1032240269Strociny (void)snprlcat(msg, sizeof(msg), "FLUSH."); 1033240269Strociny break; 1034240269Strociny case BIO_WRITE: 1035240269Strociny (void)snprlcat(msg, sizeof(msg), "WRITE(%ju, %ju).", 1036240269Strociny (uintmax_t)ggio->gctl_offset, 1037240269Strociny (uintmax_t)ggio->gctl_length); 1038240269Strociny break; 1039240269Strociny default: 1040240269Strociny (void)snprlcat(msg, sizeof(msg), "UNKNOWN(%u).", 1041240269Strociny (unsigned int)ggio->gctl_cmd); 1042240269Strociny break; 1043204076Spjd } 1044204076Spjd pjdlog_common(loglevel, debuglevel, -1, "%s", msg); 1045204076Spjd} 1046204076Spjd 1047204076Spjdstatic void 1048204076Spjdremote_close(struct hast_resource *res, int ncomp) 1049204076Spjd{ 1050204076Spjd 1051204076Spjd rw_wlock(&hio_remote_lock[ncomp]); 1052204076Spjd /* 1053229509Strociny * Check for a race between dropping rlock and acquiring wlock - 1054204076Spjd * another thread can close connection in-between. 1055204076Spjd */ 1056204076Spjd if (!ISCONNECTED(res, ncomp)) { 1057218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 1058218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 1059204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1060204076Spjd return; 1061204076Spjd } 1062204076Spjd 1063218138Spjd PJDLOG_ASSERT(res->hr_remotein != NULL); 1064218138Spjd PJDLOG_ASSERT(res->hr_remoteout != NULL); 1065204076Spjd 1066211881Spjd pjdlog_debug(2, "Closing incoming connection to %s.", 1067204076Spjd res->hr_remoteaddr); 1068204076Spjd proto_close(res->hr_remotein); 1069204076Spjd res->hr_remotein = NULL; 1070211881Spjd pjdlog_debug(2, "Closing outgoing connection to %s.", 1071204076Spjd res->hr_remoteaddr); 1072204076Spjd proto_close(res->hr_remoteout); 1073204076Spjd res->hr_remoteout = NULL; 1074204076Spjd 1075204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1076204076Spjd 1077211881Spjd pjdlog_warning("Disconnected from %s.", res->hr_remoteaddr); 1078211881Spjd 1079204076Spjd /* 1080204076Spjd * Stop synchronization if in-progress. 1081204076Spjd */ 1082211878Spjd sync_stop(); 1083211984Spjd 1084212038Spjd event_send(res, EVENT_DISCONNECT); 1085204076Spjd} 1086204076Spjd 1087204076Spjd/* 1088229509Strociny * Acknowledge write completion to the kernel, but don't update activemap yet. 1089229509Strociny */ 1090229509Strocinystatic void 1091229509Strocinywrite_complete(struct hast_resource *res, struct hio *hio) 1092229509Strociny{ 1093229509Strociny struct g_gate_ctl_io *ggio; 1094229509Strociny unsigned int ncomp; 1095229509Strociny 1096229509Strociny PJDLOG_ASSERT(!hio->hio_done); 1097229509Strociny 1098229509Strociny ggio = &hio->hio_ggio; 1099229509Strociny PJDLOG_ASSERT(ggio->gctl_cmd == BIO_WRITE); 1100229509Strociny 1101229509Strociny /* 1102229509Strociny * Bump local count if this is first write after 1103229509Strociny * connection failure with remote node. 1104229509Strociny */ 1105229509Strociny ncomp = 1; 1106229509Strociny rw_rlock(&hio_remote_lock[ncomp]); 1107229509Strociny if (!ISCONNECTED(res, ncomp)) { 1108229509Strociny mtx_lock(&metadata_lock); 1109229509Strociny if (res->hr_primary_localcnt == res->hr_secondary_remotecnt) { 1110229509Strociny res->hr_primary_localcnt++; 1111229509Strociny pjdlog_debug(1, "Increasing localcnt to %ju.", 1112229509Strociny (uintmax_t)res->hr_primary_localcnt); 1113229509Strociny (void)metadata_write(res); 1114229509Strociny } 1115229509Strociny mtx_unlock(&metadata_lock); 1116229509Strociny } 1117229509Strociny rw_unlock(&hio_remote_lock[ncomp]); 1118231017Strociny if (ioctl(res->hr_ggatefd, G_GATE_CMD_DONE, ggio) == -1) 1119229509Strociny primary_exit(EX_OSERR, "G_GATE_CMD_DONE failed"); 1120229509Strociny hio->hio_done = true; 1121229509Strociny} 1122229509Strociny 1123229509Strociny/* 1124204076Spjd * Thread receives ggate I/O requests from the kernel and passes them to 1125204076Spjd * appropriate threads: 1126204076Spjd * WRITE - always goes to both local_send and remote_send threads 1127204076Spjd * READ (when the block is up-to-date on local component) - 1128204076Spjd * only local_send thread 1129204076Spjd * READ (when the block isn't up-to-date on local component) - 1130204076Spjd * only remote_send thread 1131204076Spjd * DELETE - always goes to both local_send and remote_send threads 1132204076Spjd * FLUSH - always goes to both local_send and remote_send threads 1133204076Spjd */ 1134204076Spjdstatic void * 1135204076Spjdggate_recv_thread(void *arg) 1136204076Spjd{ 1137204076Spjd struct hast_resource *res = arg; 1138204076Spjd struct g_gate_ctl_io *ggio; 1139204076Spjd struct hio *hio; 1140204076Spjd unsigned int ii, ncomp, ncomps; 1141204076Spjd int error; 1142204076Spjd 1143204076Spjd for (;;) { 1144204076Spjd pjdlog_debug(2, "ggate_recv: Taking free request."); 1145204076Spjd QUEUE_TAKE2(hio, free); 1146204076Spjd pjdlog_debug(2, "ggate_recv: (%p) Got free request.", hio); 1147204076Spjd ggio = &hio->hio_ggio; 1148204076Spjd ggio->gctl_unit = res->hr_ggateunit; 1149204076Spjd ggio->gctl_length = MAXPHYS; 1150204076Spjd ggio->gctl_error = 0; 1151229509Strociny hio->hio_done = false; 1152229509Strociny hio->hio_replication = res->hr_replication; 1153204076Spjd pjdlog_debug(2, 1154204076Spjd "ggate_recv: (%p) Waiting for request from the kernel.", 1155204076Spjd hio); 1156231017Strociny if (ioctl(res->hr_ggatefd, G_GATE_CMD_START, ggio) == -1) { 1157204076Spjd if (sigexit_received) 1158204076Spjd pthread_exit(NULL); 1159204076Spjd primary_exit(EX_OSERR, "G_GATE_CMD_START failed"); 1160204076Spjd } 1161204076Spjd error = ggio->gctl_error; 1162204076Spjd switch (error) { 1163204076Spjd case 0: 1164204076Spjd break; 1165204076Spjd case ECANCELED: 1166204076Spjd /* Exit gracefully. */ 1167204076Spjd if (!sigexit_received) { 1168204076Spjd pjdlog_debug(2, 1169204076Spjd "ggate_recv: (%p) Received cancel from the kernel.", 1170204076Spjd hio); 1171204076Spjd pjdlog_info("Received cancel from the kernel, exiting."); 1172204076Spjd } 1173204076Spjd pthread_exit(NULL); 1174204076Spjd case ENOMEM: 1175204076Spjd /* 1176204076Spjd * Buffer too small? Impossible, we allocate MAXPHYS 1177204076Spjd * bytes - request can't be bigger than that. 1178204076Spjd */ 1179204076Spjd /* FALLTHROUGH */ 1180204076Spjd case ENXIO: 1181204076Spjd default: 1182204076Spjd primary_exitx(EX_OSERR, "G_GATE_CMD_START failed: %s.", 1183204076Spjd strerror(error)); 1184204076Spjd } 1185229509Strociny 1186229509Strociny ncomp = 0; 1187229509Strociny ncomps = HAST_NCOMPONENTS; 1188229509Strociny 1189204076Spjd for (ii = 0; ii < ncomps; ii++) 1190204076Spjd hio->hio_errors[ii] = EINVAL; 1191204076Spjd reqlog(LOG_DEBUG, 2, ggio, 1192204076Spjd "ggate_recv: (%p) Request received from the kernel: ", 1193204076Spjd hio); 1194229509Strociny 1195204076Spjd /* 1196204076Spjd * Inform all components about new write request. 1197204076Spjd * For read request prefer local component unless the given 1198204076Spjd * range is out-of-date, then use remote component. 1199204076Spjd */ 1200204076Spjd switch (ggio->gctl_cmd) { 1201204076Spjd case BIO_READ: 1202222228Spjd res->hr_stat_read++; 1203229509Strociny ncomps = 1; 1204204076Spjd mtx_lock(&metadata_lock); 1205204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_UNDEF || 1206204076Spjd res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1207204076Spjd /* 1208204076Spjd * This range is up-to-date on local component, 1209204076Spjd * so handle request locally. 1210204076Spjd */ 1211204076Spjd /* Local component is 0 for now. */ 1212204076Spjd ncomp = 0; 1213204076Spjd } else /* if (res->hr_syncsrc == 1214204076Spjd HAST_SYNCSRC_SECONDARY) */ { 1215218138Spjd PJDLOG_ASSERT(res->hr_syncsrc == 1216204076Spjd HAST_SYNCSRC_SECONDARY); 1217204076Spjd /* 1218204076Spjd * This range is out-of-date on local component, 1219204076Spjd * so send request to the remote node. 1220204076Spjd */ 1221204076Spjd /* Remote component is 1 for now. */ 1222204076Spjd ncomp = 1; 1223204076Spjd } 1224204076Spjd mtx_unlock(&metadata_lock); 1225204076Spjd break; 1226204076Spjd case BIO_WRITE: 1227222228Spjd res->hr_stat_write++; 1228229509Strociny if (res->hr_resuid == 0 && 1229229509Strociny res->hr_primary_localcnt == 0) { 1230229509Strociny /* This is first write. */ 1231219844Spjd res->hr_primary_localcnt = 1; 1232214284Spjd } 1233204076Spjd for (;;) { 1234204076Spjd mtx_lock(&range_lock); 1235204076Spjd if (rangelock_islocked(range_sync, 1236204076Spjd ggio->gctl_offset, ggio->gctl_length)) { 1237204076Spjd pjdlog_debug(2, 1238204076Spjd "regular: Range offset=%jd length=%zu locked.", 1239204076Spjd (intmax_t)ggio->gctl_offset, 1240204076Spjd (size_t)ggio->gctl_length); 1241204076Spjd range_regular_wait = true; 1242204076Spjd cv_wait(&range_regular_cond, &range_lock); 1243204076Spjd range_regular_wait = false; 1244204076Spjd mtx_unlock(&range_lock); 1245204076Spjd continue; 1246204076Spjd } 1247204076Spjd if (rangelock_add(range_regular, 1248231017Strociny ggio->gctl_offset, ggio->gctl_length) == -1) { 1249204076Spjd mtx_unlock(&range_lock); 1250204076Spjd pjdlog_debug(2, 1251204076Spjd "regular: Range offset=%jd length=%zu is already locked, waiting.", 1252204076Spjd (intmax_t)ggio->gctl_offset, 1253204076Spjd (size_t)ggio->gctl_length); 1254204076Spjd sleep(1); 1255204076Spjd continue; 1256204076Spjd } 1257204076Spjd mtx_unlock(&range_lock); 1258204076Spjd break; 1259204076Spjd } 1260204076Spjd mtx_lock(&res->hr_amp_lock); 1261204076Spjd if (activemap_write_start(res->hr_amp, 1262204076Spjd ggio->gctl_offset, ggio->gctl_length)) { 1263222228Spjd res->hr_stat_activemap_update++; 1264204076Spjd (void)hast_activemap_flush(res); 1265204076Spjd } 1266204076Spjd mtx_unlock(&res->hr_amp_lock); 1267229509Strociny break; 1268204076Spjd case BIO_DELETE: 1269229509Strociny res->hr_stat_delete++; 1270229509Strociny break; 1271204076Spjd case BIO_FLUSH: 1272229509Strociny res->hr_stat_flush++; 1273204076Spjd break; 1274204076Spjd } 1275229509Strociny pjdlog_debug(2, 1276229509Strociny "ggate_recv: (%p) Moving request to the send queues.", hio); 1277229509Strociny refcount_init(&hio->hio_countdown, ncomps); 1278231556Strociny for (ii = ncomp; ii < ncomp + ncomps; ii++) 1279229509Strociny QUEUE_INSERT1(hio, send, ii); 1280204076Spjd } 1281204076Spjd /* NOTREACHED */ 1282204076Spjd return (NULL); 1283204076Spjd} 1284204076Spjd 1285204076Spjd/* 1286204076Spjd * Thread reads from or writes to local component. 1287204076Spjd * If local read fails, it redirects it to remote_send thread. 1288204076Spjd */ 1289204076Spjdstatic void * 1290204076Spjdlocal_send_thread(void *arg) 1291204076Spjd{ 1292204076Spjd struct hast_resource *res = arg; 1293204076Spjd struct g_gate_ctl_io *ggio; 1294204076Spjd struct hio *hio; 1295204076Spjd unsigned int ncomp, rncomp; 1296204076Spjd ssize_t ret; 1297204076Spjd 1298204076Spjd /* Local component is 0 for now. */ 1299204076Spjd ncomp = 0; 1300204076Spjd /* Remote component is 1 for now. */ 1301204076Spjd rncomp = 1; 1302204076Spjd 1303204076Spjd for (;;) { 1304204076Spjd pjdlog_debug(2, "local_send: Taking request."); 1305214692Spjd QUEUE_TAKE1(hio, send, ncomp, 0); 1306204076Spjd pjdlog_debug(2, "local_send: (%p) Got request.", hio); 1307204076Spjd ggio = &hio->hio_ggio; 1308204076Spjd switch (ggio->gctl_cmd) { 1309204076Spjd case BIO_READ: 1310204076Spjd ret = pread(res->hr_localfd, ggio->gctl_data, 1311204076Spjd ggio->gctl_length, 1312204076Spjd ggio->gctl_offset + res->hr_localoff); 1313204076Spjd if (ret == ggio->gctl_length) 1314204076Spjd hio->hio_errors[ncomp] = 0; 1315222467Strociny else if (!ISSYNCREQ(hio)) { 1316204076Spjd /* 1317204076Spjd * If READ failed, try to read from remote node. 1318204076Spjd */ 1319231017Strociny if (ret == -1) { 1320216479Spjd reqlog(LOG_WARNING, 0, ggio, 1321216479Spjd "Local request failed (%s), trying remote node. ", 1322216479Spjd strerror(errno)); 1323216479Spjd } else if (ret != ggio->gctl_length) { 1324216479Spjd reqlog(LOG_WARNING, 0, ggio, 1325216479Spjd "Local request failed (%zd != %jd), trying remote node. ", 1326216494Spjd ret, (intmax_t)ggio->gctl_length); 1327216479Spjd } 1328204076Spjd QUEUE_INSERT1(hio, send, rncomp); 1329204076Spjd continue; 1330204076Spjd } 1331204076Spjd break; 1332204076Spjd case BIO_WRITE: 1333204076Spjd ret = pwrite(res->hr_localfd, ggio->gctl_data, 1334204076Spjd ggio->gctl_length, 1335204076Spjd ggio->gctl_offset + res->hr_localoff); 1336231017Strociny if (ret == -1) { 1337204076Spjd hio->hio_errors[ncomp] = errno; 1338216479Spjd reqlog(LOG_WARNING, 0, ggio, 1339216479Spjd "Local request failed (%s): ", 1340216479Spjd strerror(errno)); 1341216479Spjd } else if (ret != ggio->gctl_length) { 1342204076Spjd hio->hio_errors[ncomp] = EIO; 1343216479Spjd reqlog(LOG_WARNING, 0, ggio, 1344216479Spjd "Local request failed (%zd != %jd): ", 1345216494Spjd ret, (intmax_t)ggio->gctl_length); 1346216479Spjd } else { 1347204076Spjd hio->hio_errors[ncomp] = 0; 1348229509Strociny if (hio->hio_replication == 1349231556Strociny HAST_REPLICATION_ASYNC && 1350231556Strociny !ISSYNCREQ(hio)) { 1351229509Strociny ggio->gctl_error = 0; 1352229509Strociny write_complete(res, hio); 1353229509Strociny } 1354216479Spjd } 1355204076Spjd break; 1356204076Spjd case BIO_DELETE: 1357204076Spjd ret = g_delete(res->hr_localfd, 1358204076Spjd ggio->gctl_offset + res->hr_localoff, 1359204076Spjd ggio->gctl_length); 1360231017Strociny if (ret == -1) { 1361204076Spjd hio->hio_errors[ncomp] = errno; 1362216479Spjd reqlog(LOG_WARNING, 0, ggio, 1363216479Spjd "Local request failed (%s): ", 1364216479Spjd strerror(errno)); 1365216479Spjd } else { 1366204076Spjd hio->hio_errors[ncomp] = 0; 1367216479Spjd } 1368204076Spjd break; 1369204076Spjd case BIO_FLUSH: 1370229509Strociny if (!res->hr_localflush) { 1371229509Strociny ret = -1; 1372229509Strociny errno = EOPNOTSUPP; 1373229509Strociny break; 1374229509Strociny } 1375204076Spjd ret = g_flush(res->hr_localfd); 1376231017Strociny if (ret == -1) { 1377229509Strociny if (errno == EOPNOTSUPP) 1378229509Strociny res->hr_localflush = false; 1379204076Spjd hio->hio_errors[ncomp] = errno; 1380216479Spjd reqlog(LOG_WARNING, 0, ggio, 1381216479Spjd "Local request failed (%s): ", 1382216479Spjd strerror(errno)); 1383216479Spjd } else { 1384204076Spjd hio->hio_errors[ncomp] = 0; 1385216479Spjd } 1386204076Spjd break; 1387204076Spjd } 1388229509Strociny if (!refcount_release(&hio->hio_countdown)) 1389229509Strociny continue; 1390229509Strociny if (ISSYNCREQ(hio)) { 1391229509Strociny mtx_lock(&sync_lock); 1392229509Strociny SYNCREQDONE(hio); 1393229509Strociny mtx_unlock(&sync_lock); 1394229509Strociny cv_signal(&sync_cond); 1395229509Strociny } else { 1396229509Strociny pjdlog_debug(2, 1397229509Strociny "local_send: (%p) Moving request to the done queue.", 1398229509Strociny hio); 1399229509Strociny QUEUE_INSERT2(hio, done); 1400204076Spjd } 1401204076Spjd } 1402204076Spjd /* NOTREACHED */ 1403204076Spjd return (NULL); 1404204076Spjd} 1405204076Spjd 1406214692Spjdstatic void 1407214692Spjdkeepalive_send(struct hast_resource *res, unsigned int ncomp) 1408214692Spjd{ 1409214692Spjd struct nv *nv; 1410214692Spjd 1411218217Spjd rw_rlock(&hio_remote_lock[ncomp]); 1412218217Spjd 1413218217Spjd if (!ISCONNECTED(res, ncomp)) { 1414218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1415214692Spjd return; 1416218217Spjd } 1417219864Spjd 1418218138Spjd PJDLOG_ASSERT(res->hr_remotein != NULL); 1419218138Spjd PJDLOG_ASSERT(res->hr_remoteout != NULL); 1420214692Spjd 1421214692Spjd nv = nv_alloc(); 1422214692Spjd nv_add_uint8(nv, HIO_KEEPALIVE, "cmd"); 1423214692Spjd if (nv_error(nv) != 0) { 1424218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1425214692Spjd nv_free(nv); 1426214692Spjd pjdlog_debug(1, 1427214692Spjd "keepalive_send: Unable to prepare header to send."); 1428214692Spjd return; 1429214692Spjd } 1430231017Strociny if (hast_proto_send(res, res->hr_remoteout, nv, NULL, 0) == -1) { 1431218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1432214692Spjd pjdlog_common(LOG_DEBUG, 1, errno, 1433214692Spjd "keepalive_send: Unable to send request"); 1434214692Spjd nv_free(nv); 1435214692Spjd remote_close(res, ncomp); 1436214692Spjd return; 1437214692Spjd } 1438218217Spjd 1439218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1440214692Spjd nv_free(nv); 1441214692Spjd pjdlog_debug(2, "keepalive_send: Request sent."); 1442214692Spjd} 1443214692Spjd 1444204076Spjd/* 1445204076Spjd * Thread sends request to secondary node. 1446204076Spjd */ 1447204076Spjdstatic void * 1448204076Spjdremote_send_thread(void *arg) 1449204076Spjd{ 1450204076Spjd struct hast_resource *res = arg; 1451204076Spjd struct g_gate_ctl_io *ggio; 1452214692Spjd time_t lastcheck, now; 1453204076Spjd struct hio *hio; 1454204076Spjd struct nv *nv; 1455204076Spjd unsigned int ncomp; 1456204076Spjd bool wakeup; 1457204076Spjd uint64_t offset, length; 1458204076Spjd uint8_t cmd; 1459204076Spjd void *data; 1460204076Spjd 1461204076Spjd /* Remote component is 1 for now. */ 1462204076Spjd ncomp = 1; 1463219864Spjd lastcheck = time(NULL); 1464204076Spjd 1465204076Spjd for (;;) { 1466204076Spjd pjdlog_debug(2, "remote_send: Taking request."); 1467219721Strociny QUEUE_TAKE1(hio, send, ncomp, HAST_KEEPALIVE); 1468214692Spjd if (hio == NULL) { 1469214692Spjd now = time(NULL); 1470219721Strociny if (lastcheck + HAST_KEEPALIVE <= now) { 1471214692Spjd keepalive_send(res, ncomp); 1472214692Spjd lastcheck = now; 1473214692Spjd } 1474214692Spjd continue; 1475214692Spjd } 1476204076Spjd pjdlog_debug(2, "remote_send: (%p) Got request.", hio); 1477204076Spjd ggio = &hio->hio_ggio; 1478204076Spjd switch (ggio->gctl_cmd) { 1479204076Spjd case BIO_READ: 1480204076Spjd cmd = HIO_READ; 1481204076Spjd data = NULL; 1482204076Spjd offset = ggio->gctl_offset; 1483204076Spjd length = ggio->gctl_length; 1484204076Spjd break; 1485204076Spjd case BIO_WRITE: 1486204076Spjd cmd = HIO_WRITE; 1487204076Spjd data = ggio->gctl_data; 1488204076Spjd offset = ggio->gctl_offset; 1489204076Spjd length = ggio->gctl_length; 1490204076Spjd break; 1491204076Spjd case BIO_DELETE: 1492204076Spjd cmd = HIO_DELETE; 1493204076Spjd data = NULL; 1494204076Spjd offset = ggio->gctl_offset; 1495204076Spjd length = ggio->gctl_length; 1496204076Spjd break; 1497204076Spjd case BIO_FLUSH: 1498204076Spjd cmd = HIO_FLUSH; 1499204076Spjd data = NULL; 1500204076Spjd offset = 0; 1501204076Spjd length = 0; 1502204076Spjd break; 1503204076Spjd default: 1504229509Strociny PJDLOG_ABORT("invalid condition"); 1505204076Spjd } 1506204076Spjd nv = nv_alloc(); 1507204076Spjd nv_add_uint8(nv, cmd, "cmd"); 1508204076Spjd nv_add_uint64(nv, (uint64_t)ggio->gctl_seq, "seq"); 1509204076Spjd nv_add_uint64(nv, offset, "offset"); 1510204076Spjd nv_add_uint64(nv, length, "length"); 1511204076Spjd if (nv_error(nv) != 0) { 1512204076Spjd hio->hio_errors[ncomp] = nv_error(nv); 1513204076Spjd pjdlog_debug(2, 1514204076Spjd "remote_send: (%p) Unable to prepare header to send.", 1515204076Spjd hio); 1516204076Spjd reqlog(LOG_ERR, 0, ggio, 1517204076Spjd "Unable to prepare header to send (%s): ", 1518204076Spjd strerror(nv_error(nv))); 1519204076Spjd /* Move failed request immediately to the done queue. */ 1520204076Spjd goto done_queue; 1521204076Spjd } 1522204076Spjd /* 1523204076Spjd * Protect connection from disappearing. 1524204076Spjd */ 1525204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1526204076Spjd if (!ISCONNECTED(res, ncomp)) { 1527204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1528204076Spjd hio->hio_errors[ncomp] = ENOTCONN; 1529204076Spjd goto done_queue; 1530204076Spjd } 1531204076Spjd /* 1532204076Spjd * Move the request to recv queue before sending it, because 1533204076Spjd * in different order we can get reply before we move request 1534204076Spjd * to recv queue. 1535204076Spjd */ 1536229509Strociny pjdlog_debug(2, 1537229509Strociny "remote_send: (%p) Moving request to the recv queue.", 1538229509Strociny hio); 1539204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1540204076Spjd wakeup = TAILQ_EMPTY(&hio_recv_list[ncomp]); 1541204076Spjd TAILQ_INSERT_TAIL(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1542204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1543204076Spjd if (hast_proto_send(res, res->hr_remoteout, nv, data, 1544231017Strociny data != NULL ? length : 0) == -1) { 1545204076Spjd hio->hio_errors[ncomp] = errno; 1546204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1547204076Spjd pjdlog_debug(2, 1548204076Spjd "remote_send: (%p) Unable to send request.", hio); 1549204076Spjd reqlog(LOG_ERR, 0, ggio, 1550204076Spjd "Unable to send request (%s): ", 1551204076Spjd strerror(hio->hio_errors[ncomp])); 1552211979Spjd remote_close(res, ncomp); 1553204076Spjd /* 1554204076Spjd * Take request back from the receive queue and move 1555204076Spjd * it immediately to the done queue. 1556204076Spjd */ 1557204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1558229509Strociny TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1559229509Strociny hio_next[ncomp]); 1560204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1561204076Spjd goto done_queue; 1562204076Spjd } 1563204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1564204076Spjd nv_free(nv); 1565204076Spjd if (wakeup) 1566204076Spjd cv_signal(&hio_recv_list_cond[ncomp]); 1567204076Spjd continue; 1568204076Spjddone_queue: 1569204076Spjd nv_free(nv); 1570204076Spjd if (ISSYNCREQ(hio)) { 1571204076Spjd if (!refcount_release(&hio->hio_countdown)) 1572204076Spjd continue; 1573204076Spjd mtx_lock(&sync_lock); 1574204076Spjd SYNCREQDONE(hio); 1575204076Spjd mtx_unlock(&sync_lock); 1576204076Spjd cv_signal(&sync_cond); 1577204076Spjd continue; 1578204076Spjd } 1579204076Spjd if (ggio->gctl_cmd == BIO_WRITE) { 1580204076Spjd mtx_lock(&res->hr_amp_lock); 1581204076Spjd if (activemap_need_sync(res->hr_amp, ggio->gctl_offset, 1582204076Spjd ggio->gctl_length)) { 1583204076Spjd (void)hast_activemap_flush(res); 1584204076Spjd } 1585204076Spjd mtx_unlock(&res->hr_amp_lock); 1586204076Spjd } 1587204076Spjd if (!refcount_release(&hio->hio_countdown)) 1588204076Spjd continue; 1589204076Spjd pjdlog_debug(2, 1590204076Spjd "remote_send: (%p) Moving request to the done queue.", 1591204076Spjd hio); 1592204076Spjd QUEUE_INSERT2(hio, done); 1593204076Spjd } 1594204076Spjd /* NOTREACHED */ 1595204076Spjd return (NULL); 1596204076Spjd} 1597204076Spjd 1598204076Spjd/* 1599204076Spjd * Thread receives answer from secondary node and passes it to ggate_send 1600204076Spjd * thread. 1601204076Spjd */ 1602204076Spjdstatic void * 1603204076Spjdremote_recv_thread(void *arg) 1604204076Spjd{ 1605204076Spjd struct hast_resource *res = arg; 1606204076Spjd struct g_gate_ctl_io *ggio; 1607204076Spjd struct hio *hio; 1608204076Spjd struct nv *nv; 1609204076Spjd unsigned int ncomp; 1610204076Spjd uint64_t seq; 1611204076Spjd int error; 1612204076Spjd 1613204076Spjd /* Remote component is 1 for now. */ 1614204076Spjd ncomp = 1; 1615204076Spjd 1616204076Spjd for (;;) { 1617204076Spjd /* Wait until there is anything to receive. */ 1618204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1619204076Spjd while (TAILQ_EMPTY(&hio_recv_list[ncomp])) { 1620204076Spjd pjdlog_debug(2, "remote_recv: No requests, waiting."); 1621204076Spjd cv_wait(&hio_recv_list_cond[ncomp], 1622204076Spjd &hio_recv_list_lock[ncomp]); 1623204076Spjd } 1624204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1625229509Strociny 1626204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1627204076Spjd if (!ISCONNECTED(res, ncomp)) { 1628204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1629204076Spjd /* 1630204076Spjd * Connection is dead, so move all pending requests to 1631204076Spjd * the done queue (one-by-one). 1632204076Spjd */ 1633204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1634204076Spjd hio = TAILQ_FIRST(&hio_recv_list[ncomp]); 1635218138Spjd PJDLOG_ASSERT(hio != NULL); 1636204076Spjd TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1637204076Spjd hio_next[ncomp]); 1638204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1639204076Spjd goto done_queue; 1640204076Spjd } 1641231017Strociny if (hast_proto_recv_hdr(res->hr_remotein, &nv) == -1) { 1642204076Spjd pjdlog_errno(LOG_ERR, 1643204076Spjd "Unable to receive reply header"); 1644204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1645204076Spjd remote_close(res, ncomp); 1646204076Spjd continue; 1647204076Spjd } 1648204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1649204076Spjd seq = nv_get_uint64(nv, "seq"); 1650204076Spjd if (seq == 0) { 1651204076Spjd pjdlog_error("Header contains no 'seq' field."); 1652204076Spjd nv_free(nv); 1653204076Spjd continue; 1654204076Spjd } 1655204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1656204076Spjd TAILQ_FOREACH(hio, &hio_recv_list[ncomp], hio_next[ncomp]) { 1657204076Spjd if (hio->hio_ggio.gctl_seq == seq) { 1658204076Spjd TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1659204076Spjd hio_next[ncomp]); 1660204076Spjd break; 1661204076Spjd } 1662204076Spjd } 1663204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1664204076Spjd if (hio == NULL) { 1665204076Spjd pjdlog_error("Found no request matching received 'seq' field (%ju).", 1666204076Spjd (uintmax_t)seq); 1667204076Spjd nv_free(nv); 1668204076Spjd continue; 1669204076Spjd } 1670229509Strociny ggio = &hio->hio_ggio; 1671204076Spjd error = nv_get_int16(nv, "error"); 1672204076Spjd if (error != 0) { 1673204076Spjd /* Request failed on remote side. */ 1674216478Spjd hio->hio_errors[ncomp] = error; 1675229509Strociny reqlog(LOG_WARNING, 0, ggio, 1676216479Spjd "Remote request failed (%s): ", strerror(error)); 1677204076Spjd nv_free(nv); 1678204076Spjd goto done_queue; 1679204076Spjd } 1680204076Spjd switch (ggio->gctl_cmd) { 1681204076Spjd case BIO_READ: 1682204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1683204076Spjd if (!ISCONNECTED(res, ncomp)) { 1684204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1685204076Spjd nv_free(nv); 1686204076Spjd goto done_queue; 1687204076Spjd } 1688204076Spjd if (hast_proto_recv_data(res, res->hr_remotein, nv, 1689231017Strociny ggio->gctl_data, ggio->gctl_length) == -1) { 1690204076Spjd hio->hio_errors[ncomp] = errno; 1691204076Spjd pjdlog_errno(LOG_ERR, 1692204076Spjd "Unable to receive reply data"); 1693204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1694204076Spjd nv_free(nv); 1695204076Spjd remote_close(res, ncomp); 1696204076Spjd goto done_queue; 1697204076Spjd } 1698204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1699204076Spjd break; 1700204076Spjd case BIO_WRITE: 1701204076Spjd case BIO_DELETE: 1702204076Spjd case BIO_FLUSH: 1703204076Spjd break; 1704204076Spjd default: 1705229509Strociny PJDLOG_ABORT("invalid condition"); 1706204076Spjd } 1707204076Spjd hio->hio_errors[ncomp] = 0; 1708204076Spjd nv_free(nv); 1709204076Spjddone_queue: 1710229509Strociny if (!refcount_release(&hio->hio_countdown)) 1711229509Strociny continue; 1712229509Strociny if (ISSYNCREQ(hio)) { 1713229509Strociny mtx_lock(&sync_lock); 1714229509Strociny SYNCREQDONE(hio); 1715229509Strociny mtx_unlock(&sync_lock); 1716229509Strociny cv_signal(&sync_cond); 1717229509Strociny } else { 1718229509Strociny pjdlog_debug(2, 1719229509Strociny "remote_recv: (%p) Moving request to the done queue.", 1720229509Strociny hio); 1721229509Strociny QUEUE_INSERT2(hio, done); 1722204076Spjd } 1723204076Spjd } 1724204076Spjd /* NOTREACHED */ 1725204076Spjd return (NULL); 1726204076Spjd} 1727204076Spjd 1728204076Spjd/* 1729204076Spjd * Thread sends answer to the kernel. 1730204076Spjd */ 1731204076Spjdstatic void * 1732204076Spjdggate_send_thread(void *arg) 1733204076Spjd{ 1734204076Spjd struct hast_resource *res = arg; 1735204076Spjd struct g_gate_ctl_io *ggio; 1736204076Spjd struct hio *hio; 1737229509Strociny unsigned int ii, ncomps; 1738204076Spjd 1739204076Spjd ncomps = HAST_NCOMPONENTS; 1740204076Spjd 1741204076Spjd for (;;) { 1742204076Spjd pjdlog_debug(2, "ggate_send: Taking request."); 1743204076Spjd QUEUE_TAKE2(hio, done); 1744204076Spjd pjdlog_debug(2, "ggate_send: (%p) Got request.", hio); 1745204076Spjd ggio = &hio->hio_ggio; 1746204076Spjd for (ii = 0; ii < ncomps; ii++) { 1747204076Spjd if (hio->hio_errors[ii] == 0) { 1748204076Spjd /* 1749204076Spjd * One successful request is enough to declare 1750204076Spjd * success. 1751204076Spjd */ 1752204076Spjd ggio->gctl_error = 0; 1753204076Spjd break; 1754204076Spjd } 1755204076Spjd } 1756204076Spjd if (ii == ncomps) { 1757204076Spjd /* 1758204076Spjd * None of the requests were successful. 1759219879Strociny * Use the error from local component except the 1760219879Strociny * case when we did only remote request. 1761204076Spjd */ 1762219879Strociny if (ggio->gctl_cmd == BIO_READ && 1763219879Strociny res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) 1764219879Strociny ggio->gctl_error = hio->hio_errors[1]; 1765219879Strociny else 1766219879Strociny ggio->gctl_error = hio->hio_errors[0]; 1767204076Spjd } 1768204076Spjd if (ggio->gctl_error == 0 && ggio->gctl_cmd == BIO_WRITE) { 1769204076Spjd mtx_lock(&res->hr_amp_lock); 1770223655Strociny if (activemap_write_complete(res->hr_amp, 1771223974Strociny ggio->gctl_offset, ggio->gctl_length)) { 1772223655Strociny res->hr_stat_activemap_update++; 1773223655Strociny (void)hast_activemap_flush(res); 1774223655Strociny } 1775204076Spjd mtx_unlock(&res->hr_amp_lock); 1776204076Spjd } 1777204076Spjd if (ggio->gctl_cmd == BIO_WRITE) { 1778204076Spjd /* 1779204076Spjd * Unlock range we locked. 1780204076Spjd */ 1781204076Spjd mtx_lock(&range_lock); 1782204076Spjd rangelock_del(range_regular, ggio->gctl_offset, 1783204076Spjd ggio->gctl_length); 1784204076Spjd if (range_sync_wait) 1785204076Spjd cv_signal(&range_sync_cond); 1786204076Spjd mtx_unlock(&range_lock); 1787229509Strociny if (!hio->hio_done) 1788229509Strociny write_complete(res, hio); 1789229509Strociny } else { 1790231017Strociny if (ioctl(res->hr_ggatefd, G_GATE_CMD_DONE, ggio) == -1) { 1791229509Strociny primary_exit(EX_OSERR, 1792229509Strociny "G_GATE_CMD_DONE failed"); 1793204076Spjd } 1794204076Spjd } 1795204076Spjd pjdlog_debug(2, 1796204076Spjd "ggate_send: (%p) Moving request to the free queue.", hio); 1797204076Spjd QUEUE_INSERT2(hio, free); 1798204076Spjd } 1799204076Spjd /* NOTREACHED */ 1800204076Spjd return (NULL); 1801204076Spjd} 1802204076Spjd 1803204076Spjd/* 1804204076Spjd * Thread synchronize local and remote components. 1805204076Spjd */ 1806204076Spjdstatic void * 1807204076Spjdsync_thread(void *arg __unused) 1808204076Spjd{ 1809204076Spjd struct hast_resource *res = arg; 1810204076Spjd struct hio *hio; 1811204076Spjd struct g_gate_ctl_io *ggio; 1812219372Spjd struct timeval tstart, tend, tdiff; 1813204076Spjd unsigned int ii, ncomp, ncomps; 1814204076Spjd off_t offset, length, synced; 1815240269Strociny bool dorewind, directreads; 1816204076Spjd int syncext; 1817204076Spjd 1818204076Spjd ncomps = HAST_NCOMPONENTS; 1819204076Spjd dorewind = true; 1820211897Spjd synced = 0; 1821211897Spjd offset = -1; 1822240269Strociny directreads = false; 1823204076Spjd 1824204076Spjd for (;;) { 1825204076Spjd mtx_lock(&sync_lock); 1826211897Spjd if (offset >= 0 && !sync_inprogress) { 1827219372Spjd gettimeofday(&tend, NULL); 1828219372Spjd timersub(&tend, &tstart, &tdiff); 1829219372Spjd pjdlog_info("Synchronization interrupted after %#.0T. " 1830219372Spjd "%NB synchronized so far.", &tdiff, 1831211879Spjd (intmax_t)synced); 1832212038Spjd event_send(res, EVENT_SYNCINTR); 1833211879Spjd } 1834204076Spjd while (!sync_inprogress) { 1835204076Spjd dorewind = true; 1836204076Spjd synced = 0; 1837204076Spjd cv_wait(&sync_cond, &sync_lock); 1838204076Spjd } 1839204076Spjd mtx_unlock(&sync_lock); 1840204076Spjd /* 1841204076Spjd * Obtain offset at which we should synchronize. 1842204076Spjd * Rewind synchronization if needed. 1843204076Spjd */ 1844204076Spjd mtx_lock(&res->hr_amp_lock); 1845204076Spjd if (dorewind) 1846204076Spjd activemap_sync_rewind(res->hr_amp); 1847204076Spjd offset = activemap_sync_offset(res->hr_amp, &length, &syncext); 1848204076Spjd if (syncext != -1) { 1849204076Spjd /* 1850204076Spjd * We synchronized entire syncext extent, we can mark 1851204076Spjd * it as clean now. 1852204076Spjd */ 1853204076Spjd if (activemap_extent_complete(res->hr_amp, syncext)) 1854204076Spjd (void)hast_activemap_flush(res); 1855204076Spjd } 1856204076Spjd mtx_unlock(&res->hr_amp_lock); 1857204076Spjd if (dorewind) { 1858204076Spjd dorewind = false; 1859231017Strociny if (offset == -1) 1860204076Spjd pjdlog_info("Nodes are in sync."); 1861204076Spjd else { 1862219372Spjd pjdlog_info("Synchronization started. %NB to go.", 1863219372Spjd (intmax_t)(res->hr_extentsize * 1864204076Spjd activemap_ndirty(res->hr_amp))); 1865212038Spjd event_send(res, EVENT_SYNCSTART); 1866219372Spjd gettimeofday(&tstart, NULL); 1867204076Spjd } 1868204076Spjd } 1869231017Strociny if (offset == -1) { 1870211878Spjd sync_stop(); 1871204076Spjd pjdlog_debug(1, "Nothing to synchronize."); 1872204076Spjd /* 1873204076Spjd * Synchronization complete, make both localcnt and 1874204076Spjd * remotecnt equal. 1875204076Spjd */ 1876204076Spjd ncomp = 1; 1877204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1878204076Spjd if (ISCONNECTED(res, ncomp)) { 1879204076Spjd if (synced > 0) { 1880219372Spjd int64_t bps; 1881219372Spjd 1882219372Spjd gettimeofday(&tend, NULL); 1883219372Spjd timersub(&tend, &tstart, &tdiff); 1884219372Spjd bps = (int64_t)((double)synced / 1885219372Spjd ((double)tdiff.tv_sec + 1886219372Spjd (double)tdiff.tv_usec / 1000000)); 1887204076Spjd pjdlog_info("Synchronization complete. " 1888219372Spjd "%NB synchronized in %#.0lT (%NB/sec).", 1889219372Spjd (intmax_t)synced, &tdiff, 1890219372Spjd (intmax_t)bps); 1891212038Spjd event_send(res, EVENT_SYNCDONE); 1892204076Spjd } 1893204076Spjd mtx_lock(&metadata_lock); 1894240269Strociny if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) 1895240269Strociny directreads = true; 1896204076Spjd res->hr_syncsrc = HAST_SYNCSRC_UNDEF; 1897204076Spjd res->hr_primary_localcnt = 1898219882Strociny res->hr_secondary_remotecnt; 1899219882Strociny res->hr_primary_remotecnt = 1900204076Spjd res->hr_secondary_localcnt; 1901204076Spjd pjdlog_debug(1, 1902204076Spjd "Setting localcnt to %ju and remotecnt to %ju.", 1903204076Spjd (uintmax_t)res->hr_primary_localcnt, 1904219882Strociny (uintmax_t)res->hr_primary_remotecnt); 1905204076Spjd (void)metadata_write(res); 1906204076Spjd mtx_unlock(&metadata_lock); 1907204076Spjd } 1908204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1909240269Strociny if (directreads) { 1910240269Strociny directreads = false; 1911240269Strociny enable_direct_reads(res); 1912240269Strociny } 1913204076Spjd continue; 1914204076Spjd } 1915204076Spjd pjdlog_debug(2, "sync: Taking free request."); 1916204076Spjd QUEUE_TAKE2(hio, free); 1917204076Spjd pjdlog_debug(2, "sync: (%p) Got free request.", hio); 1918204076Spjd /* 1919204076Spjd * Lock the range we are going to synchronize. We don't want 1920204076Spjd * race where someone writes between our read and write. 1921204076Spjd */ 1922204076Spjd for (;;) { 1923204076Spjd mtx_lock(&range_lock); 1924204076Spjd if (rangelock_islocked(range_regular, offset, length)) { 1925204076Spjd pjdlog_debug(2, 1926204076Spjd "sync: Range offset=%jd length=%jd locked.", 1927204076Spjd (intmax_t)offset, (intmax_t)length); 1928204076Spjd range_sync_wait = true; 1929204076Spjd cv_wait(&range_sync_cond, &range_lock); 1930204076Spjd range_sync_wait = false; 1931204076Spjd mtx_unlock(&range_lock); 1932204076Spjd continue; 1933204076Spjd } 1934231017Strociny if (rangelock_add(range_sync, offset, length) == -1) { 1935204076Spjd mtx_unlock(&range_lock); 1936204076Spjd pjdlog_debug(2, 1937204076Spjd "sync: Range offset=%jd length=%jd is already locked, waiting.", 1938204076Spjd (intmax_t)offset, (intmax_t)length); 1939204076Spjd sleep(1); 1940204076Spjd continue; 1941204076Spjd } 1942204076Spjd mtx_unlock(&range_lock); 1943204076Spjd break; 1944204076Spjd } 1945204076Spjd /* 1946204076Spjd * First read the data from synchronization source. 1947204076Spjd */ 1948204076Spjd SYNCREQ(hio); 1949204076Spjd ggio = &hio->hio_ggio; 1950204076Spjd ggio->gctl_cmd = BIO_READ; 1951204076Spjd ggio->gctl_offset = offset; 1952204076Spjd ggio->gctl_length = length; 1953204076Spjd ggio->gctl_error = 0; 1954229509Strociny hio->hio_done = false; 1955229509Strociny hio->hio_replication = res->hr_replication; 1956204076Spjd for (ii = 0; ii < ncomps; ii++) 1957204076Spjd hio->hio_errors[ii] = EINVAL; 1958204076Spjd reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 1959204076Spjd hio); 1960204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 1961204076Spjd hio); 1962204076Spjd mtx_lock(&metadata_lock); 1963204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1964204076Spjd /* 1965204076Spjd * This range is up-to-date on local component, 1966204076Spjd * so handle request locally. 1967204076Spjd */ 1968204076Spjd /* Local component is 0 for now. */ 1969204076Spjd ncomp = 0; 1970204076Spjd } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 1971218138Spjd PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 1972204076Spjd /* 1973204076Spjd * This range is out-of-date on local component, 1974204076Spjd * so send request to the remote node. 1975204076Spjd */ 1976204076Spjd /* Remote component is 1 for now. */ 1977204076Spjd ncomp = 1; 1978204076Spjd } 1979204076Spjd mtx_unlock(&metadata_lock); 1980204076Spjd refcount_init(&hio->hio_countdown, 1); 1981204076Spjd QUEUE_INSERT1(hio, send, ncomp); 1982204076Spjd 1983204076Spjd /* 1984204076Spjd * Let's wait for READ to finish. 1985204076Spjd */ 1986204076Spjd mtx_lock(&sync_lock); 1987204076Spjd while (!ISSYNCREQDONE(hio)) 1988204076Spjd cv_wait(&sync_cond, &sync_lock); 1989204076Spjd mtx_unlock(&sync_lock); 1990204076Spjd 1991204076Spjd if (hio->hio_errors[ncomp] != 0) { 1992204076Spjd pjdlog_error("Unable to read synchronization data: %s.", 1993204076Spjd strerror(hio->hio_errors[ncomp])); 1994204076Spjd goto free_queue; 1995204076Spjd } 1996204076Spjd 1997204076Spjd /* 1998204076Spjd * We read the data from synchronization source, now write it 1999204076Spjd * to synchronization target. 2000204076Spjd */ 2001204076Spjd SYNCREQ(hio); 2002204076Spjd ggio->gctl_cmd = BIO_WRITE; 2003204076Spjd for (ii = 0; ii < ncomps; ii++) 2004204076Spjd hio->hio_errors[ii] = EINVAL; 2005204076Spjd reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 2006204076Spjd hio); 2007204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 2008204076Spjd hio); 2009204076Spjd mtx_lock(&metadata_lock); 2010204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 2011204076Spjd /* 2012204076Spjd * This range is up-to-date on local component, 2013204076Spjd * so we update remote component. 2014204076Spjd */ 2015204076Spjd /* Remote component is 1 for now. */ 2016204076Spjd ncomp = 1; 2017204076Spjd } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 2018218138Spjd PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 2019204076Spjd /* 2020204076Spjd * This range is out-of-date on local component, 2021204076Spjd * so we update it. 2022204076Spjd */ 2023204076Spjd /* Local component is 0 for now. */ 2024204076Spjd ncomp = 0; 2025204076Spjd } 2026204076Spjd mtx_unlock(&metadata_lock); 2027204076Spjd 2028229509Strociny pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 2029204076Spjd hio); 2030204076Spjd refcount_init(&hio->hio_countdown, 1); 2031204076Spjd QUEUE_INSERT1(hio, send, ncomp); 2032204076Spjd 2033204076Spjd /* 2034204076Spjd * Let's wait for WRITE to finish. 2035204076Spjd */ 2036204076Spjd mtx_lock(&sync_lock); 2037204076Spjd while (!ISSYNCREQDONE(hio)) 2038204076Spjd cv_wait(&sync_cond, &sync_lock); 2039204076Spjd mtx_unlock(&sync_lock); 2040204076Spjd 2041204076Spjd if (hio->hio_errors[ncomp] != 0) { 2042204076Spjd pjdlog_error("Unable to write synchronization data: %s.", 2043204076Spjd strerror(hio->hio_errors[ncomp])); 2044204076Spjd goto free_queue; 2045204076Spjd } 2046211880Spjd 2047211880Spjd synced += length; 2048204076Spjdfree_queue: 2049204076Spjd mtx_lock(&range_lock); 2050204076Spjd rangelock_del(range_sync, offset, length); 2051204076Spjd if (range_regular_wait) 2052204076Spjd cv_signal(&range_regular_cond); 2053204076Spjd mtx_unlock(&range_lock); 2054204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the free queue.", 2055204076Spjd hio); 2056204076Spjd QUEUE_INSERT2(hio, free); 2057204076Spjd } 2058204076Spjd /* NOTREACHED */ 2059204076Spjd return (NULL); 2060204076Spjd} 2061204076Spjd 2062217784Spjdvoid 2063217784Spjdprimary_config_reload(struct hast_resource *res, struct nv *nv) 2064210886Spjd{ 2065210886Spjd unsigned int ii, ncomps; 2066217784Spjd int modified, vint; 2067217784Spjd const char *vstr; 2068210886Spjd 2069210886Spjd pjdlog_info("Reloading configuration..."); 2070210886Spjd 2071218138Spjd PJDLOG_ASSERT(res->hr_role == HAST_ROLE_PRIMARY); 2072218138Spjd PJDLOG_ASSERT(gres == res); 2073217784Spjd nv_assert(nv, "remoteaddr"); 2074219818Spjd nv_assert(nv, "sourceaddr"); 2075217784Spjd nv_assert(nv, "replication"); 2076219351Spjd nv_assert(nv, "checksum"); 2077219354Spjd nv_assert(nv, "compression"); 2078217784Spjd nv_assert(nv, "timeout"); 2079217784Spjd nv_assert(nv, "exec"); 2080229509Strociny nv_assert(nv, "metaflush"); 2081217784Spjd 2082210886Spjd ncomps = HAST_NCOMPONENTS; 2083210886Spjd 2084219351Spjd#define MODIFIED_REMOTEADDR 0x01 2085219818Spjd#define MODIFIED_SOURCEADDR 0x02 2086219818Spjd#define MODIFIED_REPLICATION 0x04 2087219818Spjd#define MODIFIED_CHECKSUM 0x08 2088219818Spjd#define MODIFIED_COMPRESSION 0x10 2089219818Spjd#define MODIFIED_TIMEOUT 0x20 2090219818Spjd#define MODIFIED_EXEC 0x40 2091229509Strociny#define MODIFIED_METAFLUSH 0x80 2092210886Spjd modified = 0; 2093217784Spjd 2094217784Spjd vstr = nv_get_string(nv, "remoteaddr"); 2095217784Spjd if (strcmp(gres->hr_remoteaddr, vstr) != 0) { 2096210886Spjd /* 2097210886Spjd * Don't copy res->hr_remoteaddr to gres just yet. 2098210886Spjd * We want remote_close() to log disconnect from the old 2099210886Spjd * addresses, not from the new ones. 2100210886Spjd */ 2101210886Spjd modified |= MODIFIED_REMOTEADDR; 2102210886Spjd } 2103219818Spjd vstr = nv_get_string(nv, "sourceaddr"); 2104219818Spjd if (strcmp(gres->hr_sourceaddr, vstr) != 0) { 2105219818Spjd strlcpy(gres->hr_sourceaddr, vstr, sizeof(gres->hr_sourceaddr)); 2106219818Spjd modified |= MODIFIED_SOURCEADDR; 2107219818Spjd } 2108217784Spjd vint = nv_get_int32(nv, "replication"); 2109217784Spjd if (gres->hr_replication != vint) { 2110217784Spjd gres->hr_replication = vint; 2111210886Spjd modified |= MODIFIED_REPLICATION; 2112210886Spjd } 2113219351Spjd vint = nv_get_int32(nv, "checksum"); 2114219351Spjd if (gres->hr_checksum != vint) { 2115219351Spjd gres->hr_checksum = vint; 2116219351Spjd modified |= MODIFIED_CHECKSUM; 2117219351Spjd } 2118219354Spjd vint = nv_get_int32(nv, "compression"); 2119219354Spjd if (gres->hr_compression != vint) { 2120219354Spjd gres->hr_compression = vint; 2121219354Spjd modified |= MODIFIED_COMPRESSION; 2122219354Spjd } 2123217784Spjd vint = nv_get_int32(nv, "timeout"); 2124217784Spjd if (gres->hr_timeout != vint) { 2125217784Spjd gres->hr_timeout = vint; 2126210886Spjd modified |= MODIFIED_TIMEOUT; 2127210886Spjd } 2128217784Spjd vstr = nv_get_string(nv, "exec"); 2129217784Spjd if (strcmp(gres->hr_exec, vstr) != 0) { 2130217784Spjd strlcpy(gres->hr_exec, vstr, sizeof(gres->hr_exec)); 2131211886Spjd modified |= MODIFIED_EXEC; 2132211886Spjd } 2133229509Strociny vint = nv_get_int32(nv, "metaflush"); 2134229509Strociny if (gres->hr_metaflush != vint) { 2135229509Strociny gres->hr_metaflush = vint; 2136229509Strociny modified |= MODIFIED_METAFLUSH; 2137229509Strociny } 2138217784Spjd 2139210886Spjd /* 2140219351Spjd * Change timeout for connected sockets. 2141219351Spjd * Don't bother if we need to reconnect. 2142210886Spjd */ 2143219351Spjd if ((modified & MODIFIED_TIMEOUT) != 0 && 2144229509Strociny (modified & (MODIFIED_REMOTEADDR | MODIFIED_SOURCEADDR)) == 0) { 2145210886Spjd for (ii = 0; ii < ncomps; ii++) { 2146210886Spjd if (!ISREMOTE(ii)) 2147210886Spjd continue; 2148210886Spjd rw_rlock(&hio_remote_lock[ii]); 2149210886Spjd if (!ISCONNECTED(gres, ii)) { 2150210886Spjd rw_unlock(&hio_remote_lock[ii]); 2151210886Spjd continue; 2152210886Spjd } 2153210886Spjd rw_unlock(&hio_remote_lock[ii]); 2154210886Spjd if (proto_timeout(gres->hr_remotein, 2155231017Strociny gres->hr_timeout) == -1) { 2156210886Spjd pjdlog_errno(LOG_WARNING, 2157210886Spjd "Unable to set connection timeout"); 2158210886Spjd } 2159210886Spjd if (proto_timeout(gres->hr_remoteout, 2160231017Strociny gres->hr_timeout) == -1) { 2161210886Spjd pjdlog_errno(LOG_WARNING, 2162210886Spjd "Unable to set connection timeout"); 2163210886Spjd } 2164210886Spjd } 2165219351Spjd } 2166229509Strociny if ((modified & (MODIFIED_REMOTEADDR | MODIFIED_SOURCEADDR)) != 0) { 2167210886Spjd for (ii = 0; ii < ncomps; ii++) { 2168210886Spjd if (!ISREMOTE(ii)) 2169210886Spjd continue; 2170210886Spjd remote_close(gres, ii); 2171210886Spjd } 2172210886Spjd if (modified & MODIFIED_REMOTEADDR) { 2173217784Spjd vstr = nv_get_string(nv, "remoteaddr"); 2174217784Spjd strlcpy(gres->hr_remoteaddr, vstr, 2175210886Spjd sizeof(gres->hr_remoteaddr)); 2176210886Spjd } 2177210886Spjd } 2178210886Spjd#undef MODIFIED_REMOTEADDR 2179219818Spjd#undef MODIFIED_SOURCEADDR 2180210886Spjd#undef MODIFIED_REPLICATION 2181219351Spjd#undef MODIFIED_CHECKSUM 2182219354Spjd#undef MODIFIED_COMPRESSION 2183210886Spjd#undef MODIFIED_TIMEOUT 2184211886Spjd#undef MODIFIED_EXEC 2185229509Strociny#undef MODIFIED_METAFLUSH 2186210886Spjd 2187210886Spjd pjdlog_info("Configuration reloaded successfully."); 2188210886Spjd} 2189210886Spjd 2190211882Spjdstatic void 2191211981Spjdguard_one(struct hast_resource *res, unsigned int ncomp) 2192211981Spjd{ 2193211981Spjd struct proto_conn *in, *out; 2194211981Spjd 2195211981Spjd if (!ISREMOTE(ncomp)) 2196211981Spjd return; 2197211981Spjd 2198211981Spjd rw_rlock(&hio_remote_lock[ncomp]); 2199211981Spjd 2200211981Spjd if (!real_remote(res)) { 2201211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2202211981Spjd return; 2203211981Spjd } 2204211981Spjd 2205211981Spjd if (ISCONNECTED(res, ncomp)) { 2206218138Spjd PJDLOG_ASSERT(res->hr_remotein != NULL); 2207218138Spjd PJDLOG_ASSERT(res->hr_remoteout != NULL); 2208211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2209211981Spjd pjdlog_debug(2, "remote_guard: Connection to %s is ok.", 2210211981Spjd res->hr_remoteaddr); 2211211981Spjd return; 2212211981Spjd } 2213211981Spjd 2214218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 2215218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 2216211981Spjd /* 2217211981Spjd * Upgrade the lock. It doesn't have to be atomic as no other thread 2218211981Spjd * can change connection status from disconnected to connected. 2219211981Spjd */ 2220211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2221211981Spjd pjdlog_debug(2, "remote_guard: Reconnecting to %s.", 2222211981Spjd res->hr_remoteaddr); 2223211981Spjd in = out = NULL; 2224220898Spjd if (init_remote(res, &in, &out) == 0) { 2225211981Spjd rw_wlock(&hio_remote_lock[ncomp]); 2226218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 2227218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 2228218138Spjd PJDLOG_ASSERT(in != NULL && out != NULL); 2229211981Spjd res->hr_remotein = in; 2230211981Spjd res->hr_remoteout = out; 2231211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2232211981Spjd pjdlog_info("Successfully reconnected to %s.", 2233211981Spjd res->hr_remoteaddr); 2234211981Spjd sync_start(); 2235211981Spjd } else { 2236211981Spjd /* Both connections should be NULL. */ 2237218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 2238218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 2239218138Spjd PJDLOG_ASSERT(in == NULL && out == NULL); 2240211981Spjd pjdlog_debug(2, "remote_guard: Reconnect to %s failed.", 2241211981Spjd res->hr_remoteaddr); 2242211981Spjd } 2243211981Spjd} 2244211981Spjd 2245204076Spjd/* 2246204076Spjd * Thread guards remote connections and reconnects when needed, handles 2247204076Spjd * signals, etc. 2248204076Spjd */ 2249204076Spjdstatic void * 2250204076Spjdguard_thread(void *arg) 2251204076Spjd{ 2252204076Spjd struct hast_resource *res = arg; 2253204076Spjd unsigned int ii, ncomps; 2254211982Spjd struct timespec timeout; 2255211981Spjd time_t lastcheck, now; 2256211982Spjd sigset_t mask; 2257211982Spjd int signo; 2258204076Spjd 2259204076Spjd ncomps = HAST_NCOMPONENTS; 2260211981Spjd lastcheck = time(NULL); 2261204076Spjd 2262211982Spjd PJDLOG_VERIFY(sigemptyset(&mask) == 0); 2263211982Spjd PJDLOG_VERIFY(sigaddset(&mask, SIGINT) == 0); 2264211982Spjd PJDLOG_VERIFY(sigaddset(&mask, SIGTERM) == 0); 2265211982Spjd 2266219721Strociny timeout.tv_sec = HAST_KEEPALIVE; 2267211982Spjd timeout.tv_nsec = 0; 2268211982Spjd signo = -1; 2269211982Spjd 2270204076Spjd for (;;) { 2271211982Spjd switch (signo) { 2272211982Spjd case SIGINT: 2273211982Spjd case SIGTERM: 2274211982Spjd sigexit_received = true; 2275204076Spjd primary_exitx(EX_OK, 2276204076Spjd "Termination signal received, exiting."); 2277211982Spjd break; 2278211982Spjd default: 2279211982Spjd break; 2280204076Spjd } 2281211882Spjd 2282220898Spjd /* 2283220898Spjd * Don't check connections until we fully started, 2284220898Spjd * as we may still be looping, waiting for remote node 2285220898Spjd * to switch from primary to secondary. 2286220898Spjd */ 2287220898Spjd if (fullystarted) { 2288220898Spjd pjdlog_debug(2, "remote_guard: Checking connections."); 2289220898Spjd now = time(NULL); 2290220898Spjd if (lastcheck + HAST_KEEPALIVE <= now) { 2291220898Spjd for (ii = 0; ii < ncomps; ii++) 2292220898Spjd guard_one(res, ii); 2293220898Spjd lastcheck = now; 2294220898Spjd } 2295204076Spjd } 2296211982Spjd signo = sigtimedwait(&mask, NULL, &timeout); 2297204076Spjd } 2298204076Spjd /* NOTREACHED */ 2299204076Spjd return (NULL); 2300204076Spjd} 2301