primary.c revision 223974
1204076Spjd/*- 2204076Spjd * Copyright (c) 2009 The FreeBSD Foundation 3219351Spjd * Copyright (c) 2010-2011 Pawel Jakub Dawidek <pawel@dawidek.net> 4204076Spjd * All rights reserved. 5204076Spjd * 6204076Spjd * This software was developed by Pawel Jakub Dawidek under sponsorship from 7204076Spjd * the FreeBSD Foundation. 8204076Spjd * 9204076Spjd * Redistribution and use in source and binary forms, with or without 10204076Spjd * modification, are permitted provided that the following conditions 11204076Spjd * are met: 12204076Spjd * 1. Redistributions of source code must retain the above copyright 13204076Spjd * notice, this list of conditions and the following disclaimer. 14204076Spjd * 2. Redistributions in binary form must reproduce the above copyright 15204076Spjd * notice, this list of conditions and the following disclaimer in the 16204076Spjd * documentation and/or other materials provided with the distribution. 17204076Spjd * 18204076Spjd * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 19204076Spjd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20204076Spjd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21204076Spjd * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 22204076Spjd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23204076Spjd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24204076Spjd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25204076Spjd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26204076Spjd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27204076Spjd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28204076Spjd * SUCH DAMAGE. 29204076Spjd */ 30204076Spjd 31204076Spjd#include <sys/cdefs.h> 32204076Spjd__FBSDID("$FreeBSD: head/sbin/hastd/primary.c 223974 2011-07-13 05:32:55Z trociny $"); 33204076Spjd 34204076Spjd#include <sys/types.h> 35204076Spjd#include <sys/time.h> 36204076Spjd#include <sys/bio.h> 37204076Spjd#include <sys/disk.h> 38204076Spjd#include <sys/refcount.h> 39204076Spjd#include <sys/stat.h> 40204076Spjd 41204076Spjd#include <geom/gate/g_gate.h> 42204076Spjd 43204076Spjd#include <err.h> 44204076Spjd#include <errno.h> 45204076Spjd#include <fcntl.h> 46204076Spjd#include <libgeom.h> 47204076Spjd#include <pthread.h> 48211982Spjd#include <signal.h> 49204076Spjd#include <stdint.h> 50204076Spjd#include <stdio.h> 51204076Spjd#include <string.h> 52204076Spjd#include <sysexits.h> 53204076Spjd#include <unistd.h> 54204076Spjd 55204076Spjd#include <activemap.h> 56204076Spjd#include <nv.h> 57204076Spjd#include <rangelock.h> 58204076Spjd 59204076Spjd#include "control.h" 60212038Spjd#include "event.h" 61204076Spjd#include "hast.h" 62204076Spjd#include "hast_proto.h" 63204076Spjd#include "hastd.h" 64211886Spjd#include "hooks.h" 65204076Spjd#include "metadata.h" 66204076Spjd#include "proto.h" 67204076Spjd#include "pjdlog.h" 68204076Spjd#include "subr.h" 69204076Spjd#include "synch.h" 70204076Spjd 71210886Spjd/* The is only one remote component for now. */ 72210886Spjd#define ISREMOTE(no) ((no) == 1) 73210886Spjd 74204076Spjdstruct hio { 75204076Spjd /* 76204076Spjd * Number of components we are still waiting for. 77204076Spjd * When this field goes to 0, we can send the request back to the 78204076Spjd * kernel. Each component has to decrease this counter by one 79204076Spjd * even on failure. 80204076Spjd */ 81204076Spjd unsigned int hio_countdown; 82204076Spjd /* 83204076Spjd * Each component has a place to store its own error. 84204076Spjd * Once the request is handled by all components we can decide if the 85204076Spjd * request overall is successful or not. 86204076Spjd */ 87204076Spjd int *hio_errors; 88204076Spjd /* 89219818Spjd * Structure used to communicate with GEOM Gate class. 90204076Spjd */ 91204076Spjd struct g_gate_ctl_io hio_ggio; 92204076Spjd TAILQ_ENTRY(hio) *hio_next; 93204076Spjd}; 94204076Spjd#define hio_free_next hio_next[0] 95204076Spjd#define hio_done_next hio_next[0] 96204076Spjd 97204076Spjd/* 98204076Spjd * Free list holds unused structures. When free list is empty, we have to wait 99204076Spjd * until some in-progress requests are freed. 100204076Spjd */ 101204076Spjdstatic TAILQ_HEAD(, hio) hio_free_list; 102204076Spjdstatic pthread_mutex_t hio_free_list_lock; 103204076Spjdstatic pthread_cond_t hio_free_list_cond; 104204076Spjd/* 105204076Spjd * There is one send list for every component. One requests is placed on all 106204076Spjd * send lists - each component gets the same request, but each component is 107204076Spjd * responsible for managing his own send list. 108204076Spjd */ 109204076Spjdstatic TAILQ_HEAD(, hio) *hio_send_list; 110204076Spjdstatic pthread_mutex_t *hio_send_list_lock; 111204076Spjdstatic pthread_cond_t *hio_send_list_cond; 112204076Spjd/* 113204076Spjd * There is one recv list for every component, although local components don't 114204076Spjd * use recv lists as local requests are done synchronously. 115204076Spjd */ 116204076Spjdstatic TAILQ_HEAD(, hio) *hio_recv_list; 117204076Spjdstatic pthread_mutex_t *hio_recv_list_lock; 118204076Spjdstatic pthread_cond_t *hio_recv_list_cond; 119204076Spjd/* 120204076Spjd * Request is placed on done list by the slowest component (the one that 121204076Spjd * decreased hio_countdown from 1 to 0). 122204076Spjd */ 123204076Spjdstatic TAILQ_HEAD(, hio) hio_done_list; 124204076Spjdstatic pthread_mutex_t hio_done_list_lock; 125204076Spjdstatic pthread_cond_t hio_done_list_cond; 126204076Spjd/* 127204076Spjd * Structure below are for interaction with sync thread. 128204076Spjd */ 129204076Spjdstatic bool sync_inprogress; 130204076Spjdstatic pthread_mutex_t sync_lock; 131204076Spjdstatic pthread_cond_t sync_cond; 132204076Spjd/* 133204076Spjd * The lock below allows to synchornize access to remote connections. 134204076Spjd */ 135204076Spjdstatic pthread_rwlock_t *hio_remote_lock; 136204076Spjd 137204076Spjd/* 138204076Spjd * Lock to synchronize metadata updates. Also synchronize access to 139204076Spjd * hr_primary_localcnt and hr_primary_remotecnt fields. 140204076Spjd */ 141204076Spjdstatic pthread_mutex_t metadata_lock; 142204076Spjd 143204076Spjd/* 144204076Spjd * Maximum number of outstanding I/O requests. 145204076Spjd */ 146204076Spjd#define HAST_HIO_MAX 256 147204076Spjd/* 148204076Spjd * Number of components. At this point there are only two components: local 149204076Spjd * and remote, but in the future it might be possible to use multiple local 150204076Spjd * and remote components. 151204076Spjd */ 152204076Spjd#define HAST_NCOMPONENTS 2 153204076Spjd 154204076Spjd#define ISCONNECTED(res, no) \ 155204076Spjd ((res)->hr_remotein != NULL && (res)->hr_remoteout != NULL) 156204076Spjd 157204076Spjd#define QUEUE_INSERT1(hio, name, ncomp) do { \ 158204076Spjd bool _wakeup; \ 159204076Spjd \ 160204076Spjd mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 161204076Spjd _wakeup = TAILQ_EMPTY(&hio_##name##_list[(ncomp)]); \ 162204076Spjd TAILQ_INSERT_TAIL(&hio_##name##_list[(ncomp)], (hio), \ 163204076Spjd hio_next[(ncomp)]); \ 164204076Spjd mtx_unlock(&hio_##name##_list_lock[ncomp]); \ 165204076Spjd if (_wakeup) \ 166204076Spjd cv_signal(&hio_##name##_list_cond[(ncomp)]); \ 167204076Spjd} while (0) 168204076Spjd#define QUEUE_INSERT2(hio, name) do { \ 169204076Spjd bool _wakeup; \ 170204076Spjd \ 171204076Spjd mtx_lock(&hio_##name##_list_lock); \ 172204076Spjd _wakeup = TAILQ_EMPTY(&hio_##name##_list); \ 173204076Spjd TAILQ_INSERT_TAIL(&hio_##name##_list, (hio), hio_##name##_next);\ 174204076Spjd mtx_unlock(&hio_##name##_list_lock); \ 175204076Spjd if (_wakeup) \ 176204076Spjd cv_signal(&hio_##name##_list_cond); \ 177204076Spjd} while (0) 178214692Spjd#define QUEUE_TAKE1(hio, name, ncomp, timeout) do { \ 179214692Spjd bool _last; \ 180214692Spjd \ 181204076Spjd mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 182214692Spjd _last = false; \ 183214692Spjd while (((hio) = TAILQ_FIRST(&hio_##name##_list[(ncomp)])) == NULL && !_last) { \ 184214692Spjd cv_timedwait(&hio_##name##_list_cond[(ncomp)], \ 185214692Spjd &hio_##name##_list_lock[(ncomp)], (timeout)); \ 186219864Spjd if ((timeout) != 0) \ 187214692Spjd _last = true; \ 188204076Spjd } \ 189214692Spjd if (hio != NULL) { \ 190214692Spjd TAILQ_REMOVE(&hio_##name##_list[(ncomp)], (hio), \ 191214692Spjd hio_next[(ncomp)]); \ 192214692Spjd } \ 193204076Spjd mtx_unlock(&hio_##name##_list_lock[(ncomp)]); \ 194204076Spjd} while (0) 195204076Spjd#define QUEUE_TAKE2(hio, name) do { \ 196204076Spjd mtx_lock(&hio_##name##_list_lock); \ 197204076Spjd while (((hio) = TAILQ_FIRST(&hio_##name##_list)) == NULL) { \ 198204076Spjd cv_wait(&hio_##name##_list_cond, \ 199204076Spjd &hio_##name##_list_lock); \ 200204076Spjd } \ 201204076Spjd TAILQ_REMOVE(&hio_##name##_list, (hio), hio_##name##_next); \ 202204076Spjd mtx_unlock(&hio_##name##_list_lock); \ 203204076Spjd} while (0) 204204076Spjd 205209183Spjd#define SYNCREQ(hio) do { \ 206209183Spjd (hio)->hio_ggio.gctl_unit = -1; \ 207209183Spjd (hio)->hio_ggio.gctl_seq = 1; \ 208209183Spjd} while (0) 209204076Spjd#define ISSYNCREQ(hio) ((hio)->hio_ggio.gctl_unit == -1) 210204076Spjd#define SYNCREQDONE(hio) do { (hio)->hio_ggio.gctl_unit = -2; } while (0) 211204076Spjd#define ISSYNCREQDONE(hio) ((hio)->hio_ggio.gctl_unit == -2) 212204076Spjd 213204076Spjdstatic struct hast_resource *gres; 214204076Spjd 215204076Spjdstatic pthread_mutex_t range_lock; 216204076Spjdstatic struct rangelocks *range_regular; 217204076Spjdstatic bool range_regular_wait; 218204076Spjdstatic pthread_cond_t range_regular_cond; 219204076Spjdstatic struct rangelocks *range_sync; 220204076Spjdstatic bool range_sync_wait; 221204076Spjdstatic pthread_cond_t range_sync_cond; 222220898Spjdstatic bool fullystarted; 223204076Spjd 224204076Spjdstatic void *ggate_recv_thread(void *arg); 225204076Spjdstatic void *local_send_thread(void *arg); 226204076Spjdstatic void *remote_send_thread(void *arg); 227204076Spjdstatic void *remote_recv_thread(void *arg); 228204076Spjdstatic void *ggate_send_thread(void *arg); 229204076Spjdstatic void *sync_thread(void *arg); 230204076Spjdstatic void *guard_thread(void *arg); 231204076Spjd 232211982Spjdstatic void 233204076Spjdcleanup(struct hast_resource *res) 234204076Spjd{ 235204076Spjd int rerrno; 236204076Spjd 237204076Spjd /* Remember errno. */ 238204076Spjd rerrno = errno; 239204076Spjd 240204076Spjd /* Destroy ggate provider if we created one. */ 241204076Spjd if (res->hr_ggateunit >= 0) { 242204076Spjd struct g_gate_ctl_destroy ggiod; 243204076Spjd 244213533Spjd bzero(&ggiod, sizeof(ggiod)); 245204076Spjd ggiod.gctl_version = G_GATE_VERSION; 246204076Spjd ggiod.gctl_unit = res->hr_ggateunit; 247204076Spjd ggiod.gctl_force = 1; 248204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_DESTROY, &ggiod) < 0) { 249213531Spjd pjdlog_errno(LOG_WARNING, 250213531Spjd "Unable to destroy hast/%s device", 251204076Spjd res->hr_provname); 252204076Spjd } 253204076Spjd res->hr_ggateunit = -1; 254204076Spjd } 255204076Spjd 256204076Spjd /* Restore errno. */ 257204076Spjd errno = rerrno; 258204076Spjd} 259204076Spjd 260212899Spjdstatic __dead2 void 261204076Spjdprimary_exit(int exitcode, const char *fmt, ...) 262204076Spjd{ 263204076Spjd va_list ap; 264204076Spjd 265218138Spjd PJDLOG_ASSERT(exitcode != EX_OK); 266204076Spjd va_start(ap, fmt); 267204076Spjd pjdlogv_errno(LOG_ERR, fmt, ap); 268204076Spjd va_end(ap); 269204076Spjd cleanup(gres); 270204076Spjd exit(exitcode); 271204076Spjd} 272204076Spjd 273212899Spjdstatic __dead2 void 274204076Spjdprimary_exitx(int exitcode, const char *fmt, ...) 275204076Spjd{ 276204076Spjd va_list ap; 277204076Spjd 278204076Spjd va_start(ap, fmt); 279204076Spjd pjdlogv(exitcode == EX_OK ? LOG_INFO : LOG_ERR, fmt, ap); 280204076Spjd va_end(ap); 281204076Spjd cleanup(gres); 282204076Spjd exit(exitcode); 283204076Spjd} 284204076Spjd 285204076Spjdstatic int 286204076Spjdhast_activemap_flush(struct hast_resource *res) 287204076Spjd{ 288204076Spjd const unsigned char *buf; 289204076Spjd size_t size; 290204076Spjd 291204076Spjd buf = activemap_bitmap(res->hr_amp, &size); 292218138Spjd PJDLOG_ASSERT(buf != NULL); 293218138Spjd PJDLOG_ASSERT((size % res->hr_local_sectorsize) == 0); 294204076Spjd if (pwrite(res->hr_localfd, buf, size, METADATA_SIZE) != 295204076Spjd (ssize_t)size) { 296204076Spjd KEEP_ERRNO(pjdlog_errno(LOG_ERR, 297204076Spjd "Unable to flush activemap to disk")); 298204076Spjd return (-1); 299204076Spjd } 300204076Spjd return (0); 301204076Spjd} 302204076Spjd 303210881Spjdstatic bool 304210881Spjdreal_remote(const struct hast_resource *res) 305210881Spjd{ 306210881Spjd 307210881Spjd return (strcmp(res->hr_remoteaddr, "none") != 0); 308210881Spjd} 309210881Spjd 310204076Spjdstatic void 311204076Spjdinit_environment(struct hast_resource *res __unused) 312204076Spjd{ 313204076Spjd struct hio *hio; 314204076Spjd unsigned int ii, ncomps; 315204076Spjd 316204076Spjd /* 317204076Spjd * In the future it might be per-resource value. 318204076Spjd */ 319204076Spjd ncomps = HAST_NCOMPONENTS; 320204076Spjd 321204076Spjd /* 322204076Spjd * Allocate memory needed by lists. 323204076Spjd */ 324204076Spjd hio_send_list = malloc(sizeof(hio_send_list[0]) * ncomps); 325204076Spjd if (hio_send_list == NULL) { 326204076Spjd primary_exitx(EX_TEMPFAIL, 327204076Spjd "Unable to allocate %zu bytes of memory for send lists.", 328204076Spjd sizeof(hio_send_list[0]) * ncomps); 329204076Spjd } 330204076Spjd hio_send_list_lock = malloc(sizeof(hio_send_list_lock[0]) * ncomps); 331204076Spjd if (hio_send_list_lock == NULL) { 332204076Spjd primary_exitx(EX_TEMPFAIL, 333204076Spjd "Unable to allocate %zu bytes of memory for send list locks.", 334204076Spjd sizeof(hio_send_list_lock[0]) * ncomps); 335204076Spjd } 336204076Spjd hio_send_list_cond = malloc(sizeof(hio_send_list_cond[0]) * ncomps); 337204076Spjd if (hio_send_list_cond == NULL) { 338204076Spjd primary_exitx(EX_TEMPFAIL, 339204076Spjd "Unable to allocate %zu bytes of memory for send list condition variables.", 340204076Spjd sizeof(hio_send_list_cond[0]) * ncomps); 341204076Spjd } 342204076Spjd hio_recv_list = malloc(sizeof(hio_recv_list[0]) * ncomps); 343204076Spjd if (hio_recv_list == NULL) { 344204076Spjd primary_exitx(EX_TEMPFAIL, 345204076Spjd "Unable to allocate %zu bytes of memory for recv lists.", 346204076Spjd sizeof(hio_recv_list[0]) * ncomps); 347204076Spjd } 348204076Spjd hio_recv_list_lock = malloc(sizeof(hio_recv_list_lock[0]) * ncomps); 349204076Spjd if (hio_recv_list_lock == NULL) { 350204076Spjd primary_exitx(EX_TEMPFAIL, 351204076Spjd "Unable to allocate %zu bytes of memory for recv list locks.", 352204076Spjd sizeof(hio_recv_list_lock[0]) * ncomps); 353204076Spjd } 354204076Spjd hio_recv_list_cond = malloc(sizeof(hio_recv_list_cond[0]) * ncomps); 355204076Spjd if (hio_recv_list_cond == NULL) { 356204076Spjd primary_exitx(EX_TEMPFAIL, 357204076Spjd "Unable to allocate %zu bytes of memory for recv list condition variables.", 358204076Spjd sizeof(hio_recv_list_cond[0]) * ncomps); 359204076Spjd } 360204076Spjd hio_remote_lock = malloc(sizeof(hio_remote_lock[0]) * ncomps); 361204076Spjd if (hio_remote_lock == NULL) { 362204076Spjd primary_exitx(EX_TEMPFAIL, 363204076Spjd "Unable to allocate %zu bytes of memory for remote connections locks.", 364204076Spjd sizeof(hio_remote_lock[0]) * ncomps); 365204076Spjd } 366204076Spjd 367204076Spjd /* 368204076Spjd * Initialize lists, their locks and theirs condition variables. 369204076Spjd */ 370204076Spjd TAILQ_INIT(&hio_free_list); 371204076Spjd mtx_init(&hio_free_list_lock); 372204076Spjd cv_init(&hio_free_list_cond); 373204076Spjd for (ii = 0; ii < HAST_NCOMPONENTS; ii++) { 374204076Spjd TAILQ_INIT(&hio_send_list[ii]); 375204076Spjd mtx_init(&hio_send_list_lock[ii]); 376204076Spjd cv_init(&hio_send_list_cond[ii]); 377204076Spjd TAILQ_INIT(&hio_recv_list[ii]); 378204076Spjd mtx_init(&hio_recv_list_lock[ii]); 379204076Spjd cv_init(&hio_recv_list_cond[ii]); 380204076Spjd rw_init(&hio_remote_lock[ii]); 381204076Spjd } 382204076Spjd TAILQ_INIT(&hio_done_list); 383204076Spjd mtx_init(&hio_done_list_lock); 384204076Spjd cv_init(&hio_done_list_cond); 385204076Spjd mtx_init(&metadata_lock); 386204076Spjd 387204076Spjd /* 388204076Spjd * Allocate requests pool and initialize requests. 389204076Spjd */ 390204076Spjd for (ii = 0; ii < HAST_HIO_MAX; ii++) { 391204076Spjd hio = malloc(sizeof(*hio)); 392204076Spjd if (hio == NULL) { 393204076Spjd primary_exitx(EX_TEMPFAIL, 394204076Spjd "Unable to allocate %zu bytes of memory for hio request.", 395204076Spjd sizeof(*hio)); 396204076Spjd } 397204076Spjd hio->hio_countdown = 0; 398204076Spjd hio->hio_errors = malloc(sizeof(hio->hio_errors[0]) * ncomps); 399204076Spjd if (hio->hio_errors == NULL) { 400204076Spjd primary_exitx(EX_TEMPFAIL, 401204076Spjd "Unable allocate %zu bytes of memory for hio errors.", 402204076Spjd sizeof(hio->hio_errors[0]) * ncomps); 403204076Spjd } 404204076Spjd hio->hio_next = malloc(sizeof(hio->hio_next[0]) * ncomps); 405204076Spjd if (hio->hio_next == NULL) { 406204076Spjd primary_exitx(EX_TEMPFAIL, 407204076Spjd "Unable allocate %zu bytes of memory for hio_next field.", 408204076Spjd sizeof(hio->hio_next[0]) * ncomps); 409204076Spjd } 410204076Spjd hio->hio_ggio.gctl_version = G_GATE_VERSION; 411204076Spjd hio->hio_ggio.gctl_data = malloc(MAXPHYS); 412204076Spjd if (hio->hio_ggio.gctl_data == NULL) { 413204076Spjd primary_exitx(EX_TEMPFAIL, 414204076Spjd "Unable to allocate %zu bytes of memory for gctl_data.", 415204076Spjd MAXPHYS); 416204076Spjd } 417204076Spjd hio->hio_ggio.gctl_length = MAXPHYS; 418204076Spjd hio->hio_ggio.gctl_error = 0; 419204076Spjd TAILQ_INSERT_HEAD(&hio_free_list, hio, hio_free_next); 420204076Spjd } 421204076Spjd} 422204076Spjd 423214284Spjdstatic bool 424214284Spjdinit_resuid(struct hast_resource *res) 425214284Spjd{ 426214284Spjd 427214284Spjd mtx_lock(&metadata_lock); 428214284Spjd if (res->hr_resuid != 0) { 429214284Spjd mtx_unlock(&metadata_lock); 430214284Spjd return (false); 431214284Spjd } else { 432214284Spjd /* Initialize unique resource identifier. */ 433214284Spjd arc4random_buf(&res->hr_resuid, sizeof(res->hr_resuid)); 434214284Spjd mtx_unlock(&metadata_lock); 435214284Spjd if (metadata_write(res) < 0) 436214284Spjd exit(EX_NOINPUT); 437214284Spjd return (true); 438214284Spjd } 439214284Spjd} 440214284Spjd 441204076Spjdstatic void 442204076Spjdinit_local(struct hast_resource *res) 443204076Spjd{ 444204076Spjd unsigned char *buf; 445204076Spjd size_t mapsize; 446204076Spjd 447204076Spjd if (metadata_read(res, true) < 0) 448204076Spjd exit(EX_NOINPUT); 449204076Spjd mtx_init(&res->hr_amp_lock); 450204076Spjd if (activemap_init(&res->hr_amp, res->hr_datasize, res->hr_extentsize, 451204076Spjd res->hr_local_sectorsize, res->hr_keepdirty) < 0) { 452204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create activemap"); 453204076Spjd } 454204076Spjd mtx_init(&range_lock); 455204076Spjd cv_init(&range_regular_cond); 456204076Spjd if (rangelock_init(&range_regular) < 0) 457204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create regular range lock"); 458204076Spjd cv_init(&range_sync_cond); 459204076Spjd if (rangelock_init(&range_sync) < 0) 460204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create sync range lock"); 461204076Spjd mapsize = activemap_ondisk_size(res->hr_amp); 462204076Spjd buf = calloc(1, mapsize); 463204076Spjd if (buf == NULL) { 464204076Spjd primary_exitx(EX_TEMPFAIL, 465204076Spjd "Unable to allocate buffer for activemap."); 466204076Spjd } 467204076Spjd if (pread(res->hr_localfd, buf, mapsize, METADATA_SIZE) != 468204076Spjd (ssize_t)mapsize) { 469204076Spjd primary_exit(EX_NOINPUT, "Unable to read activemap"); 470204076Spjd } 471204076Spjd activemap_copyin(res->hr_amp, buf, mapsize); 472209181Spjd free(buf); 473204076Spjd if (res->hr_resuid != 0) 474204076Spjd return; 475204076Spjd /* 476214284Spjd * We're using provider for the first time. Initialize local and remote 477214284Spjd * counters. We don't initialize resuid here, as we want to do it just 478214284Spjd * in time. The reason for this is that we want to inform secondary 479214284Spjd * that there were no writes yet, so there is no need to synchronize 480214284Spjd * anything. 481204076Spjd */ 482219844Spjd res->hr_primary_localcnt = 0; 483204076Spjd res->hr_primary_remotecnt = 0; 484204076Spjd if (metadata_write(res) < 0) 485204076Spjd exit(EX_NOINPUT); 486204076Spjd} 487204076Spjd 488218218Spjdstatic int 489218218Spjdprimary_connect(struct hast_resource *res, struct proto_conn **connp) 490218218Spjd{ 491218218Spjd struct proto_conn *conn; 492218218Spjd int16_t val; 493218218Spjd 494218218Spjd val = 1; 495218218Spjd if (proto_send(res->hr_conn, &val, sizeof(val)) < 0) { 496218218Spjd primary_exit(EX_TEMPFAIL, 497218218Spjd "Unable to send connection request to parent"); 498218218Spjd } 499218218Spjd if (proto_recv(res->hr_conn, &val, sizeof(val)) < 0) { 500218218Spjd primary_exit(EX_TEMPFAIL, 501218218Spjd "Unable to receive reply to connection request from parent"); 502218218Spjd } 503218218Spjd if (val != 0) { 504218218Spjd errno = val; 505218218Spjd pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 506218218Spjd res->hr_remoteaddr); 507218218Spjd return (-1); 508218218Spjd } 509218218Spjd if (proto_connection_recv(res->hr_conn, true, &conn) < 0) { 510218218Spjd primary_exit(EX_TEMPFAIL, 511218218Spjd "Unable to receive connection from parent"); 512218218Spjd } 513220006Spjd if (proto_connect_wait(conn, res->hr_timeout) < 0) { 514218218Spjd pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 515218218Spjd res->hr_remoteaddr); 516218218Spjd proto_close(conn); 517218218Spjd return (-1); 518218218Spjd } 519218218Spjd /* Error in setting timeout is not critical, but why should it fail? */ 520218218Spjd if (proto_timeout(conn, res->hr_timeout) < 0) 521218218Spjd pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 522218218Spjd 523218218Spjd *connp = conn; 524218218Spjd 525218218Spjd return (0); 526218218Spjd} 527218218Spjd 528220898Spjdstatic int 529205738Spjdinit_remote(struct hast_resource *res, struct proto_conn **inp, 530205738Spjd struct proto_conn **outp) 531204076Spjd{ 532205738Spjd struct proto_conn *in, *out; 533204076Spjd struct nv *nvout, *nvin; 534204076Spjd const unsigned char *token; 535204076Spjd unsigned char *map; 536204076Spjd const char *errmsg; 537204076Spjd int32_t extentsize; 538204076Spjd int64_t datasize; 539204076Spjd uint32_t mapsize; 540204076Spjd size_t size; 541220898Spjd int error; 542204076Spjd 543218138Spjd PJDLOG_ASSERT((inp == NULL && outp == NULL) || (inp != NULL && outp != NULL)); 544218138Spjd PJDLOG_ASSERT(real_remote(res)); 545205738Spjd 546205738Spjd in = out = NULL; 547211983Spjd errmsg = NULL; 548205738Spjd 549218218Spjd if (primary_connect(res, &out) == -1) 550220898Spjd return (ECONNREFUSED); 551218218Spjd 552220898Spjd error = ECONNABORTED; 553220898Spjd 554204076Spjd /* 555204076Spjd * First handshake step. 556204076Spjd * Setup outgoing connection with remote node. 557204076Spjd */ 558204076Spjd nvout = nv_alloc(); 559204076Spjd nv_add_string(nvout, res->hr_name, "resource"); 560204076Spjd if (nv_error(nvout) != 0) { 561204076Spjd pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 562204076Spjd "Unable to allocate header for connection with %s", 563204076Spjd res->hr_remoteaddr); 564204076Spjd nv_free(nvout); 565204076Spjd goto close; 566204076Spjd } 567205738Spjd if (hast_proto_send(res, out, nvout, NULL, 0) < 0) { 568204076Spjd pjdlog_errno(LOG_WARNING, 569204076Spjd "Unable to send handshake header to %s", 570204076Spjd res->hr_remoteaddr); 571204076Spjd nv_free(nvout); 572204076Spjd goto close; 573204076Spjd } 574204076Spjd nv_free(nvout); 575205738Spjd if (hast_proto_recv_hdr(out, &nvin) < 0) { 576204076Spjd pjdlog_errno(LOG_WARNING, 577204076Spjd "Unable to receive handshake header from %s", 578204076Spjd res->hr_remoteaddr); 579204076Spjd goto close; 580204076Spjd } 581204076Spjd errmsg = nv_get_string(nvin, "errmsg"); 582204076Spjd if (errmsg != NULL) { 583204076Spjd pjdlog_warning("%s", errmsg); 584220898Spjd if (nv_exists(nvin, "wait")) 585220898Spjd error = EBUSY; 586204076Spjd nv_free(nvin); 587204076Spjd goto close; 588204076Spjd } 589204076Spjd token = nv_get_uint8_array(nvin, &size, "token"); 590204076Spjd if (token == NULL) { 591204076Spjd pjdlog_warning("Handshake header from %s has no 'token' field.", 592204076Spjd res->hr_remoteaddr); 593204076Spjd nv_free(nvin); 594204076Spjd goto close; 595204076Spjd } 596204076Spjd if (size != sizeof(res->hr_token)) { 597204076Spjd pjdlog_warning("Handshake header from %s contains 'token' of wrong size (got %zu, expected %zu).", 598204076Spjd res->hr_remoteaddr, size, sizeof(res->hr_token)); 599204076Spjd nv_free(nvin); 600204076Spjd goto close; 601204076Spjd } 602204076Spjd bcopy(token, res->hr_token, sizeof(res->hr_token)); 603204076Spjd nv_free(nvin); 604204076Spjd 605204076Spjd /* 606204076Spjd * Second handshake step. 607204076Spjd * Setup incoming connection with remote node. 608204076Spjd */ 609218218Spjd if (primary_connect(res, &in) == -1) 610204076Spjd goto close; 611218218Spjd 612204076Spjd nvout = nv_alloc(); 613204076Spjd nv_add_string(nvout, res->hr_name, "resource"); 614204076Spjd nv_add_uint8_array(nvout, res->hr_token, sizeof(res->hr_token), 615204076Spjd "token"); 616214284Spjd if (res->hr_resuid == 0) { 617214284Spjd /* 618214284Spjd * The resuid field was not yet initialized. 619214284Spjd * Because we do synchronization inside init_resuid(), it is 620214284Spjd * possible that someone already initialized it, the function 621214284Spjd * will return false then, but if we successfully initialized 622214284Spjd * it, we will get true. True means that there were no writes 623214284Spjd * to this resource yet and we want to inform secondary that 624214284Spjd * synchronization is not needed by sending "virgin" argument. 625214284Spjd */ 626214284Spjd if (init_resuid(res)) 627214284Spjd nv_add_int8(nvout, 1, "virgin"); 628214284Spjd } 629204076Spjd nv_add_uint64(nvout, res->hr_resuid, "resuid"); 630204076Spjd nv_add_uint64(nvout, res->hr_primary_localcnt, "localcnt"); 631204076Spjd nv_add_uint64(nvout, res->hr_primary_remotecnt, "remotecnt"); 632204076Spjd if (nv_error(nvout) != 0) { 633204076Spjd pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 634204076Spjd "Unable to allocate header for connection with %s", 635204076Spjd res->hr_remoteaddr); 636204076Spjd nv_free(nvout); 637204076Spjd goto close; 638204076Spjd } 639205738Spjd if (hast_proto_send(res, in, nvout, NULL, 0) < 0) { 640204076Spjd pjdlog_errno(LOG_WARNING, 641204076Spjd "Unable to send handshake header to %s", 642204076Spjd res->hr_remoteaddr); 643204076Spjd nv_free(nvout); 644204076Spjd goto close; 645204076Spjd } 646204076Spjd nv_free(nvout); 647205738Spjd if (hast_proto_recv_hdr(out, &nvin) < 0) { 648204076Spjd pjdlog_errno(LOG_WARNING, 649204076Spjd "Unable to receive handshake header from %s", 650204076Spjd res->hr_remoteaddr); 651204076Spjd goto close; 652204076Spjd } 653204076Spjd errmsg = nv_get_string(nvin, "errmsg"); 654204076Spjd if (errmsg != NULL) { 655204076Spjd pjdlog_warning("%s", errmsg); 656204076Spjd nv_free(nvin); 657204076Spjd goto close; 658204076Spjd } 659204076Spjd datasize = nv_get_int64(nvin, "datasize"); 660204076Spjd if (datasize != res->hr_datasize) { 661204076Spjd pjdlog_warning("Data size differs between nodes (local=%jd, remote=%jd).", 662204076Spjd (intmax_t)res->hr_datasize, (intmax_t)datasize); 663204076Spjd nv_free(nvin); 664204076Spjd goto close; 665204076Spjd } 666204076Spjd extentsize = nv_get_int32(nvin, "extentsize"); 667204076Spjd if (extentsize != res->hr_extentsize) { 668204076Spjd pjdlog_warning("Extent size differs between nodes (local=%zd, remote=%zd).", 669204076Spjd (ssize_t)res->hr_extentsize, (ssize_t)extentsize); 670204076Spjd nv_free(nvin); 671204076Spjd goto close; 672204076Spjd } 673204076Spjd res->hr_secondary_localcnt = nv_get_uint64(nvin, "localcnt"); 674204076Spjd res->hr_secondary_remotecnt = nv_get_uint64(nvin, "remotecnt"); 675204076Spjd res->hr_syncsrc = nv_get_uint8(nvin, "syncsrc"); 676220865Spjd if (nv_exists(nvin, "virgin")) { 677220865Spjd /* 678220865Spjd * Secondary was reinitialized, bump localcnt if it is 0 as 679220865Spjd * only we have the data. 680220865Spjd */ 681220865Spjd PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_PRIMARY); 682220865Spjd PJDLOG_ASSERT(res->hr_secondary_localcnt == 0); 683220865Spjd 684220865Spjd if (res->hr_primary_localcnt == 0) { 685220865Spjd PJDLOG_ASSERT(res->hr_secondary_remotecnt == 0); 686220865Spjd 687220865Spjd mtx_lock(&metadata_lock); 688220865Spjd res->hr_primary_localcnt++; 689220865Spjd pjdlog_debug(1, "Increasing localcnt to %ju.", 690220865Spjd (uintmax_t)res->hr_primary_localcnt); 691220865Spjd (void)metadata_write(res); 692220865Spjd mtx_unlock(&metadata_lock); 693220865Spjd } 694220865Spjd } 695204076Spjd map = NULL; 696204076Spjd mapsize = nv_get_uint32(nvin, "mapsize"); 697204076Spjd if (mapsize > 0) { 698204076Spjd map = malloc(mapsize); 699204076Spjd if (map == NULL) { 700204076Spjd pjdlog_error("Unable to allocate memory for remote activemap (mapsize=%ju).", 701204076Spjd (uintmax_t)mapsize); 702204076Spjd nv_free(nvin); 703204076Spjd goto close; 704204076Spjd } 705204076Spjd /* 706204076Spjd * Remote node have some dirty extents on its own, lets 707204076Spjd * download its activemap. 708204076Spjd */ 709205738Spjd if (hast_proto_recv_data(res, out, nvin, map, 710204076Spjd mapsize) < 0) { 711204076Spjd pjdlog_errno(LOG_ERR, 712204076Spjd "Unable to receive remote activemap"); 713204076Spjd nv_free(nvin); 714204076Spjd free(map); 715204076Spjd goto close; 716204076Spjd } 717204076Spjd /* 718204076Spjd * Merge local and remote bitmaps. 719204076Spjd */ 720204076Spjd activemap_merge(res->hr_amp, map, mapsize); 721204076Spjd free(map); 722204076Spjd /* 723204076Spjd * Now that we merged bitmaps from both nodes, flush it to the 724204076Spjd * disk before we start to synchronize. 725204076Spjd */ 726204076Spjd (void)hast_activemap_flush(res); 727204076Spjd } 728214274Spjd nv_free(nvin); 729223181Strociny#ifdef notyet 730220271Spjd /* Setup directions. */ 731220271Spjd if (proto_send(out, NULL, 0) == -1) 732220271Spjd pjdlog_errno(LOG_WARNING, "Unable to set connection direction"); 733220271Spjd if (proto_recv(in, NULL, 0) == -1) 734220271Spjd pjdlog_errno(LOG_WARNING, "Unable to set connection direction"); 735223181Strociny#endif 736204076Spjd pjdlog_info("Connected to %s.", res->hr_remoteaddr); 737205738Spjd if (inp != NULL && outp != NULL) { 738205738Spjd *inp = in; 739205738Spjd *outp = out; 740205738Spjd } else { 741205738Spjd res->hr_remotein = in; 742205738Spjd res->hr_remoteout = out; 743205738Spjd } 744212038Spjd event_send(res, EVENT_CONNECT); 745220898Spjd return (0); 746205738Spjdclose: 747211983Spjd if (errmsg != NULL && strcmp(errmsg, "Split-brain condition!") == 0) 748212038Spjd event_send(res, EVENT_SPLITBRAIN); 749205738Spjd proto_close(out); 750205738Spjd if (in != NULL) 751205738Spjd proto_close(in); 752220898Spjd return (error); 753205738Spjd} 754205738Spjd 755205738Spjdstatic void 756205738Spjdsync_start(void) 757205738Spjd{ 758205738Spjd 759204076Spjd mtx_lock(&sync_lock); 760204076Spjd sync_inprogress = true; 761204076Spjd mtx_unlock(&sync_lock); 762204076Spjd cv_signal(&sync_cond); 763204076Spjd} 764204076Spjd 765204076Spjdstatic void 766211878Spjdsync_stop(void) 767211878Spjd{ 768211878Spjd 769211878Spjd mtx_lock(&sync_lock); 770211878Spjd if (sync_inprogress) 771211878Spjd sync_inprogress = false; 772211878Spjd mtx_unlock(&sync_lock); 773211878Spjd} 774211878Spjd 775211878Spjdstatic void 776204076Spjdinit_ggate(struct hast_resource *res) 777204076Spjd{ 778204076Spjd struct g_gate_ctl_create ggiocreate; 779204076Spjd struct g_gate_ctl_cancel ggiocancel; 780204076Spjd 781204076Spjd /* 782204076Spjd * We communicate with ggate via /dev/ggctl. Open it. 783204076Spjd */ 784204076Spjd res->hr_ggatefd = open("/dev/" G_GATE_CTL_NAME, O_RDWR); 785204076Spjd if (res->hr_ggatefd < 0) 786204076Spjd primary_exit(EX_OSFILE, "Unable to open /dev/" G_GATE_CTL_NAME); 787204076Spjd /* 788204076Spjd * Create provider before trying to connect, as connection failure 789204076Spjd * is not critical, but may take some time. 790204076Spjd */ 791213533Spjd bzero(&ggiocreate, sizeof(ggiocreate)); 792204076Spjd ggiocreate.gctl_version = G_GATE_VERSION; 793204076Spjd ggiocreate.gctl_mediasize = res->hr_datasize; 794204076Spjd ggiocreate.gctl_sectorsize = res->hr_local_sectorsize; 795204076Spjd ggiocreate.gctl_flags = 0; 796220266Spjd ggiocreate.gctl_maxcount = 0; 797204076Spjd ggiocreate.gctl_timeout = 0; 798204076Spjd ggiocreate.gctl_unit = G_GATE_NAME_GIVEN; 799204076Spjd snprintf(ggiocreate.gctl_name, sizeof(ggiocreate.gctl_name), "hast/%s", 800204076Spjd res->hr_provname); 801204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_CREATE, &ggiocreate) == 0) { 802204076Spjd pjdlog_info("Device hast/%s created.", res->hr_provname); 803204076Spjd res->hr_ggateunit = ggiocreate.gctl_unit; 804204076Spjd return; 805204076Spjd } 806204076Spjd if (errno != EEXIST) { 807204076Spjd primary_exit(EX_OSERR, "Unable to create hast/%s device", 808204076Spjd res->hr_provname); 809204076Spjd } 810204076Spjd pjdlog_debug(1, 811204076Spjd "Device hast/%s already exists, we will try to take it over.", 812204076Spjd res->hr_provname); 813204076Spjd /* 814204076Spjd * If we received EEXIST, we assume that the process who created the 815204076Spjd * provider died and didn't clean up. In that case we will start from 816204076Spjd * where he left of. 817204076Spjd */ 818213533Spjd bzero(&ggiocancel, sizeof(ggiocancel)); 819204076Spjd ggiocancel.gctl_version = G_GATE_VERSION; 820204076Spjd ggiocancel.gctl_unit = G_GATE_NAME_GIVEN; 821204076Spjd snprintf(ggiocancel.gctl_name, sizeof(ggiocancel.gctl_name), "hast/%s", 822204076Spjd res->hr_provname); 823204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_CANCEL, &ggiocancel) == 0) { 824204076Spjd pjdlog_info("Device hast/%s recovered.", res->hr_provname); 825204076Spjd res->hr_ggateunit = ggiocancel.gctl_unit; 826204076Spjd return; 827204076Spjd } 828204076Spjd primary_exit(EX_OSERR, "Unable to take over hast/%s device", 829204076Spjd res->hr_provname); 830204076Spjd} 831204076Spjd 832204076Spjdvoid 833204076Spjdhastd_primary(struct hast_resource *res) 834204076Spjd{ 835204076Spjd pthread_t td; 836204076Spjd pid_t pid; 837219482Strociny int error, mode, debuglevel; 838204076Spjd 839204076Spjd /* 840218218Spjd * Create communication channel for sending control commands from 841218218Spjd * parent to child. 842204076Spjd */ 843219818Spjd if (proto_client(NULL, "socketpair://", &res->hr_ctrl) < 0) { 844218042Spjd /* TODO: There's no need for this to be fatal error. */ 845204076Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 846212034Spjd pjdlog_exit(EX_OSERR, 847204076Spjd "Unable to create control sockets between parent and child"); 848204076Spjd } 849212038Spjd /* 850218218Spjd * Create communication channel for sending events from child to parent. 851212038Spjd */ 852219818Spjd if (proto_client(NULL, "socketpair://", &res->hr_event) < 0) { 853218042Spjd /* TODO: There's no need for this to be fatal error. */ 854212038Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 855212038Spjd pjdlog_exit(EX_OSERR, 856212038Spjd "Unable to create event sockets between child and parent"); 857212038Spjd } 858218218Spjd /* 859218218Spjd * Create communication channel for sending connection requests from 860218218Spjd * child to parent. 861218218Spjd */ 862219818Spjd if (proto_client(NULL, "socketpair://", &res->hr_conn) < 0) { 863218218Spjd /* TODO: There's no need for this to be fatal error. */ 864218218Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 865218218Spjd pjdlog_exit(EX_OSERR, 866218218Spjd "Unable to create connection sockets between child and parent"); 867218218Spjd } 868204076Spjd 869204076Spjd pid = fork(); 870204076Spjd if (pid < 0) { 871218042Spjd /* TODO: There's no need for this to be fatal error. */ 872204076Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 873212034Spjd pjdlog_exit(EX_TEMPFAIL, "Unable to fork"); 874204076Spjd } 875204076Spjd 876204076Spjd if (pid > 0) { 877204076Spjd /* This is parent. */ 878212038Spjd /* Declare that we are receiver. */ 879212038Spjd proto_recv(res->hr_event, NULL, 0); 880218218Spjd proto_recv(res->hr_conn, NULL, 0); 881218043Spjd /* Declare that we are sender. */ 882218043Spjd proto_send(res->hr_ctrl, NULL, 0); 883204076Spjd res->hr_workerpid = pid; 884204076Spjd return; 885204076Spjd } 886211977Spjd 887211984Spjd gres = res; 888218043Spjd mode = pjdlog_mode_get(); 889219482Strociny debuglevel = pjdlog_debug_get(); 890211984Spjd 891218043Spjd /* Declare that we are sender. */ 892218043Spjd proto_send(res->hr_event, NULL, 0); 893218218Spjd proto_send(res->hr_conn, NULL, 0); 894218043Spjd /* Declare that we are receiver. */ 895218043Spjd proto_recv(res->hr_ctrl, NULL, 0); 896218043Spjd descriptors_cleanup(res); 897204076Spjd 898218045Spjd descriptors_assert(res, mode); 899218045Spjd 900218043Spjd pjdlog_init(mode); 901219482Strociny pjdlog_debug_set(debuglevel); 902218043Spjd pjdlog_prefix_set("[%s] (%s) ", res->hr_name, role2str(res->hr_role)); 903220005Spjd setproctitle("%s (%s)", res->hr_name, role2str(res->hr_role)); 904204076Spjd 905204076Spjd init_local(res); 906213007Spjd init_ggate(res); 907213007Spjd init_environment(res); 908217784Spjd 909221899Spjd if (drop_privs(res) != 0) { 910218049Spjd cleanup(res); 911218049Spjd exit(EX_CONFIG); 912218049Spjd } 913218214Spjd pjdlog_info("Privileges successfully dropped."); 914218049Spjd 915213007Spjd /* 916213530Spjd * Create the guard thread first, so we can handle signals from the 917213530Spjd * very begining. 918213530Spjd */ 919213530Spjd error = pthread_create(&td, NULL, guard_thread, res); 920218138Spjd PJDLOG_ASSERT(error == 0); 921213530Spjd /* 922213007Spjd * Create the control thread before sending any event to the parent, 923213007Spjd * as we can deadlock when parent sends control request to worker, 924213007Spjd * but worker has no control thread started yet, so parent waits. 925213007Spjd * In the meantime worker sends an event to the parent, but parent 926213007Spjd * is unable to handle the event, because it waits for control 927213007Spjd * request response. 928213007Spjd */ 929213007Spjd error = pthread_create(&td, NULL, ctrl_thread, res); 930218138Spjd PJDLOG_ASSERT(error == 0); 931220898Spjd if (real_remote(res)) { 932220898Spjd error = init_remote(res, NULL, NULL); 933220898Spjd if (error == 0) { 934220898Spjd sync_start(); 935220898Spjd } else if (error == EBUSY) { 936220898Spjd time_t start = time(NULL); 937220898Spjd 938220898Spjd pjdlog_warning("Waiting for remote node to become %s for %ds.", 939220898Spjd role2str(HAST_ROLE_SECONDARY), 940220898Spjd res->hr_timeout); 941220898Spjd for (;;) { 942220898Spjd sleep(1); 943220898Spjd error = init_remote(res, NULL, NULL); 944220898Spjd if (error != EBUSY) 945220898Spjd break; 946220898Spjd if (time(NULL) > start + res->hr_timeout) 947220898Spjd break; 948220898Spjd } 949220898Spjd if (error == EBUSY) { 950220898Spjd pjdlog_warning("Remote node is still %s, starting anyway.", 951220898Spjd role2str(HAST_ROLE_PRIMARY)); 952220898Spjd } 953220898Spjd } 954220898Spjd } 955204076Spjd error = pthread_create(&td, NULL, ggate_recv_thread, res); 956218138Spjd PJDLOG_ASSERT(error == 0); 957204076Spjd error = pthread_create(&td, NULL, local_send_thread, res); 958218138Spjd PJDLOG_ASSERT(error == 0); 959204076Spjd error = pthread_create(&td, NULL, remote_send_thread, res); 960218138Spjd PJDLOG_ASSERT(error == 0); 961204076Spjd error = pthread_create(&td, NULL, remote_recv_thread, res); 962218138Spjd PJDLOG_ASSERT(error == 0); 963204076Spjd error = pthread_create(&td, NULL, ggate_send_thread, res); 964218138Spjd PJDLOG_ASSERT(error == 0); 965220898Spjd fullystarted = true; 966213530Spjd (void)sync_thread(res); 967204076Spjd} 968204076Spjd 969204076Spjdstatic void 970204076Spjdreqlog(int loglevel, int debuglevel, struct g_gate_ctl_io *ggio, const char *fmt, ...) 971204076Spjd{ 972204076Spjd char msg[1024]; 973204076Spjd va_list ap; 974204076Spjd int len; 975204076Spjd 976204076Spjd va_start(ap, fmt); 977204076Spjd len = vsnprintf(msg, sizeof(msg), fmt, ap); 978204076Spjd va_end(ap); 979204076Spjd if ((size_t)len < sizeof(msg)) { 980204076Spjd switch (ggio->gctl_cmd) { 981204076Spjd case BIO_READ: 982204076Spjd (void)snprintf(msg + len, sizeof(msg) - len, 983204076Spjd "READ(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 984204076Spjd (uintmax_t)ggio->gctl_length); 985204076Spjd break; 986204076Spjd case BIO_DELETE: 987204076Spjd (void)snprintf(msg + len, sizeof(msg) - len, 988204076Spjd "DELETE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 989204076Spjd (uintmax_t)ggio->gctl_length); 990204076Spjd break; 991204076Spjd case BIO_FLUSH: 992204076Spjd (void)snprintf(msg + len, sizeof(msg) - len, "FLUSH."); 993204076Spjd break; 994204076Spjd case BIO_WRITE: 995204076Spjd (void)snprintf(msg + len, sizeof(msg) - len, 996204076Spjd "WRITE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 997204076Spjd (uintmax_t)ggio->gctl_length); 998204076Spjd break; 999204076Spjd default: 1000204076Spjd (void)snprintf(msg + len, sizeof(msg) - len, 1001204076Spjd "UNKNOWN(%u).", (unsigned int)ggio->gctl_cmd); 1002204076Spjd break; 1003204076Spjd } 1004204076Spjd } 1005204076Spjd pjdlog_common(loglevel, debuglevel, -1, "%s", msg); 1006204076Spjd} 1007204076Spjd 1008204076Spjdstatic void 1009204076Spjdremote_close(struct hast_resource *res, int ncomp) 1010204076Spjd{ 1011204076Spjd 1012204076Spjd rw_wlock(&hio_remote_lock[ncomp]); 1013204076Spjd /* 1014204076Spjd * A race is possible between dropping rlock and acquiring wlock - 1015204076Spjd * another thread can close connection in-between. 1016204076Spjd */ 1017204076Spjd if (!ISCONNECTED(res, ncomp)) { 1018218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 1019218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 1020204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1021204076Spjd return; 1022204076Spjd } 1023204076Spjd 1024218138Spjd PJDLOG_ASSERT(res->hr_remotein != NULL); 1025218138Spjd PJDLOG_ASSERT(res->hr_remoteout != NULL); 1026204076Spjd 1027211881Spjd pjdlog_debug(2, "Closing incoming connection to %s.", 1028204076Spjd res->hr_remoteaddr); 1029204076Spjd proto_close(res->hr_remotein); 1030204076Spjd res->hr_remotein = NULL; 1031211881Spjd pjdlog_debug(2, "Closing outgoing connection to %s.", 1032204076Spjd res->hr_remoteaddr); 1033204076Spjd proto_close(res->hr_remoteout); 1034204076Spjd res->hr_remoteout = NULL; 1035204076Spjd 1036204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1037204076Spjd 1038211881Spjd pjdlog_warning("Disconnected from %s.", res->hr_remoteaddr); 1039211881Spjd 1040204076Spjd /* 1041204076Spjd * Stop synchronization if in-progress. 1042204076Spjd */ 1043211878Spjd sync_stop(); 1044211984Spjd 1045212038Spjd event_send(res, EVENT_DISCONNECT); 1046204076Spjd} 1047204076Spjd 1048204076Spjd/* 1049204076Spjd * Thread receives ggate I/O requests from the kernel and passes them to 1050204076Spjd * appropriate threads: 1051204076Spjd * WRITE - always goes to both local_send and remote_send threads 1052204076Spjd * READ (when the block is up-to-date on local component) - 1053204076Spjd * only local_send thread 1054204076Spjd * READ (when the block isn't up-to-date on local component) - 1055204076Spjd * only remote_send thread 1056204076Spjd * DELETE - always goes to both local_send and remote_send threads 1057204076Spjd * FLUSH - always goes to both local_send and remote_send threads 1058204076Spjd */ 1059204076Spjdstatic void * 1060204076Spjdggate_recv_thread(void *arg) 1061204076Spjd{ 1062204076Spjd struct hast_resource *res = arg; 1063204076Spjd struct g_gate_ctl_io *ggio; 1064204076Spjd struct hio *hio; 1065204076Spjd unsigned int ii, ncomp, ncomps; 1066204076Spjd int error; 1067204076Spjd 1068204076Spjd ncomps = HAST_NCOMPONENTS; 1069204076Spjd 1070204076Spjd for (;;) { 1071204076Spjd pjdlog_debug(2, "ggate_recv: Taking free request."); 1072204076Spjd QUEUE_TAKE2(hio, free); 1073204076Spjd pjdlog_debug(2, "ggate_recv: (%p) Got free request.", hio); 1074204076Spjd ggio = &hio->hio_ggio; 1075204076Spjd ggio->gctl_unit = res->hr_ggateunit; 1076204076Spjd ggio->gctl_length = MAXPHYS; 1077204076Spjd ggio->gctl_error = 0; 1078204076Spjd pjdlog_debug(2, 1079204076Spjd "ggate_recv: (%p) Waiting for request from the kernel.", 1080204076Spjd hio); 1081204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_START, ggio) < 0) { 1082204076Spjd if (sigexit_received) 1083204076Spjd pthread_exit(NULL); 1084204076Spjd primary_exit(EX_OSERR, "G_GATE_CMD_START failed"); 1085204076Spjd } 1086204076Spjd error = ggio->gctl_error; 1087204076Spjd switch (error) { 1088204076Spjd case 0: 1089204076Spjd break; 1090204076Spjd case ECANCELED: 1091204076Spjd /* Exit gracefully. */ 1092204076Spjd if (!sigexit_received) { 1093204076Spjd pjdlog_debug(2, 1094204076Spjd "ggate_recv: (%p) Received cancel from the kernel.", 1095204076Spjd hio); 1096204076Spjd pjdlog_info("Received cancel from the kernel, exiting."); 1097204076Spjd } 1098204076Spjd pthread_exit(NULL); 1099204076Spjd case ENOMEM: 1100204076Spjd /* 1101204076Spjd * Buffer too small? Impossible, we allocate MAXPHYS 1102204076Spjd * bytes - request can't be bigger than that. 1103204076Spjd */ 1104204076Spjd /* FALLTHROUGH */ 1105204076Spjd case ENXIO: 1106204076Spjd default: 1107204076Spjd primary_exitx(EX_OSERR, "G_GATE_CMD_START failed: %s.", 1108204076Spjd strerror(error)); 1109204076Spjd } 1110204076Spjd for (ii = 0; ii < ncomps; ii++) 1111204076Spjd hio->hio_errors[ii] = EINVAL; 1112204076Spjd reqlog(LOG_DEBUG, 2, ggio, 1113204076Spjd "ggate_recv: (%p) Request received from the kernel: ", 1114204076Spjd hio); 1115204076Spjd /* 1116204076Spjd * Inform all components about new write request. 1117204076Spjd * For read request prefer local component unless the given 1118204076Spjd * range is out-of-date, then use remote component. 1119204076Spjd */ 1120204076Spjd switch (ggio->gctl_cmd) { 1121204076Spjd case BIO_READ: 1122222228Spjd res->hr_stat_read++; 1123204076Spjd pjdlog_debug(2, 1124204076Spjd "ggate_recv: (%p) Moving request to the send queue.", 1125204076Spjd hio); 1126204076Spjd refcount_init(&hio->hio_countdown, 1); 1127204076Spjd mtx_lock(&metadata_lock); 1128204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_UNDEF || 1129204076Spjd res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1130204076Spjd /* 1131204076Spjd * This range is up-to-date on local component, 1132204076Spjd * so handle request locally. 1133204076Spjd */ 1134204076Spjd /* Local component is 0 for now. */ 1135204076Spjd ncomp = 0; 1136204076Spjd } else /* if (res->hr_syncsrc == 1137204076Spjd HAST_SYNCSRC_SECONDARY) */ { 1138218138Spjd PJDLOG_ASSERT(res->hr_syncsrc == 1139204076Spjd HAST_SYNCSRC_SECONDARY); 1140204076Spjd /* 1141204076Spjd * This range is out-of-date on local component, 1142204076Spjd * so send request to the remote node. 1143204076Spjd */ 1144204076Spjd /* Remote component is 1 for now. */ 1145204076Spjd ncomp = 1; 1146204076Spjd } 1147204076Spjd mtx_unlock(&metadata_lock); 1148204076Spjd QUEUE_INSERT1(hio, send, ncomp); 1149204076Spjd break; 1150204076Spjd case BIO_WRITE: 1151222228Spjd res->hr_stat_write++; 1152214284Spjd if (res->hr_resuid == 0) { 1153219844Spjd /* 1154219844Spjd * This is first write, initialize localcnt and 1155219844Spjd * resuid. 1156219844Spjd */ 1157219844Spjd res->hr_primary_localcnt = 1; 1158214284Spjd (void)init_resuid(res); 1159214284Spjd } 1160204076Spjd for (;;) { 1161204076Spjd mtx_lock(&range_lock); 1162204076Spjd if (rangelock_islocked(range_sync, 1163204076Spjd ggio->gctl_offset, ggio->gctl_length)) { 1164204076Spjd pjdlog_debug(2, 1165204076Spjd "regular: Range offset=%jd length=%zu locked.", 1166204076Spjd (intmax_t)ggio->gctl_offset, 1167204076Spjd (size_t)ggio->gctl_length); 1168204076Spjd range_regular_wait = true; 1169204076Spjd cv_wait(&range_regular_cond, &range_lock); 1170204076Spjd range_regular_wait = false; 1171204076Spjd mtx_unlock(&range_lock); 1172204076Spjd continue; 1173204076Spjd } 1174204076Spjd if (rangelock_add(range_regular, 1175204076Spjd ggio->gctl_offset, ggio->gctl_length) < 0) { 1176204076Spjd mtx_unlock(&range_lock); 1177204076Spjd pjdlog_debug(2, 1178204076Spjd "regular: Range offset=%jd length=%zu is already locked, waiting.", 1179204076Spjd (intmax_t)ggio->gctl_offset, 1180204076Spjd (size_t)ggio->gctl_length); 1181204076Spjd sleep(1); 1182204076Spjd continue; 1183204076Spjd } 1184204076Spjd mtx_unlock(&range_lock); 1185204076Spjd break; 1186204076Spjd } 1187204076Spjd mtx_lock(&res->hr_amp_lock); 1188204076Spjd if (activemap_write_start(res->hr_amp, 1189204076Spjd ggio->gctl_offset, ggio->gctl_length)) { 1190222228Spjd res->hr_stat_activemap_update++; 1191204076Spjd (void)hast_activemap_flush(res); 1192204076Spjd } 1193204076Spjd mtx_unlock(&res->hr_amp_lock); 1194204076Spjd /* FALLTHROUGH */ 1195204076Spjd case BIO_DELETE: 1196204076Spjd case BIO_FLUSH: 1197222228Spjd switch (ggio->gctl_cmd) { 1198222228Spjd case BIO_DELETE: 1199222228Spjd res->hr_stat_delete++; 1200222228Spjd break; 1201222228Spjd case BIO_FLUSH: 1202222228Spjd res->hr_stat_flush++; 1203222228Spjd break; 1204222228Spjd } 1205204076Spjd pjdlog_debug(2, 1206204076Spjd "ggate_recv: (%p) Moving request to the send queues.", 1207204076Spjd hio); 1208204076Spjd refcount_init(&hio->hio_countdown, ncomps); 1209204076Spjd for (ii = 0; ii < ncomps; ii++) 1210204076Spjd QUEUE_INSERT1(hio, send, ii); 1211204076Spjd break; 1212204076Spjd } 1213204076Spjd } 1214204076Spjd /* NOTREACHED */ 1215204076Spjd return (NULL); 1216204076Spjd} 1217204076Spjd 1218204076Spjd/* 1219204076Spjd * Thread reads from or writes to local component. 1220204076Spjd * If local read fails, it redirects it to remote_send thread. 1221204076Spjd */ 1222204076Spjdstatic void * 1223204076Spjdlocal_send_thread(void *arg) 1224204076Spjd{ 1225204076Spjd struct hast_resource *res = arg; 1226204076Spjd struct g_gate_ctl_io *ggio; 1227204076Spjd struct hio *hio; 1228204076Spjd unsigned int ncomp, rncomp; 1229204076Spjd ssize_t ret; 1230204076Spjd 1231204076Spjd /* Local component is 0 for now. */ 1232204076Spjd ncomp = 0; 1233204076Spjd /* Remote component is 1 for now. */ 1234204076Spjd rncomp = 1; 1235204076Spjd 1236204076Spjd for (;;) { 1237204076Spjd pjdlog_debug(2, "local_send: Taking request."); 1238214692Spjd QUEUE_TAKE1(hio, send, ncomp, 0); 1239204076Spjd pjdlog_debug(2, "local_send: (%p) Got request.", hio); 1240204076Spjd ggio = &hio->hio_ggio; 1241204076Spjd switch (ggio->gctl_cmd) { 1242204076Spjd case BIO_READ: 1243204076Spjd ret = pread(res->hr_localfd, ggio->gctl_data, 1244204076Spjd ggio->gctl_length, 1245204076Spjd ggio->gctl_offset + res->hr_localoff); 1246204076Spjd if (ret == ggio->gctl_length) 1247204076Spjd hio->hio_errors[ncomp] = 0; 1248222467Strociny else if (!ISSYNCREQ(hio)) { 1249204076Spjd /* 1250204076Spjd * If READ failed, try to read from remote node. 1251204076Spjd */ 1252216479Spjd if (ret < 0) { 1253216479Spjd reqlog(LOG_WARNING, 0, ggio, 1254216479Spjd "Local request failed (%s), trying remote node. ", 1255216479Spjd strerror(errno)); 1256216479Spjd } else if (ret != ggio->gctl_length) { 1257216479Spjd reqlog(LOG_WARNING, 0, ggio, 1258216479Spjd "Local request failed (%zd != %jd), trying remote node. ", 1259216494Spjd ret, (intmax_t)ggio->gctl_length); 1260216479Spjd } 1261204076Spjd QUEUE_INSERT1(hio, send, rncomp); 1262204076Spjd continue; 1263204076Spjd } 1264204076Spjd break; 1265204076Spjd case BIO_WRITE: 1266204076Spjd ret = pwrite(res->hr_localfd, ggio->gctl_data, 1267204076Spjd ggio->gctl_length, 1268204076Spjd ggio->gctl_offset + res->hr_localoff); 1269216479Spjd if (ret < 0) { 1270204076Spjd hio->hio_errors[ncomp] = errno; 1271216479Spjd reqlog(LOG_WARNING, 0, ggio, 1272216479Spjd "Local request failed (%s): ", 1273216479Spjd strerror(errno)); 1274216479Spjd } else if (ret != ggio->gctl_length) { 1275204076Spjd hio->hio_errors[ncomp] = EIO; 1276216479Spjd reqlog(LOG_WARNING, 0, ggio, 1277216479Spjd "Local request failed (%zd != %jd): ", 1278216494Spjd ret, (intmax_t)ggio->gctl_length); 1279216479Spjd } else { 1280204076Spjd hio->hio_errors[ncomp] = 0; 1281216479Spjd } 1282204076Spjd break; 1283204076Spjd case BIO_DELETE: 1284204076Spjd ret = g_delete(res->hr_localfd, 1285204076Spjd ggio->gctl_offset + res->hr_localoff, 1286204076Spjd ggio->gctl_length); 1287216479Spjd if (ret < 0) { 1288204076Spjd hio->hio_errors[ncomp] = errno; 1289216479Spjd reqlog(LOG_WARNING, 0, ggio, 1290216479Spjd "Local request failed (%s): ", 1291216479Spjd strerror(errno)); 1292216479Spjd } else { 1293204076Spjd hio->hio_errors[ncomp] = 0; 1294216479Spjd } 1295204076Spjd break; 1296204076Spjd case BIO_FLUSH: 1297204076Spjd ret = g_flush(res->hr_localfd); 1298216479Spjd if (ret < 0) { 1299204076Spjd hio->hio_errors[ncomp] = errno; 1300216479Spjd reqlog(LOG_WARNING, 0, ggio, 1301216479Spjd "Local request failed (%s): ", 1302216479Spjd strerror(errno)); 1303216479Spjd } else { 1304204076Spjd hio->hio_errors[ncomp] = 0; 1305216479Spjd } 1306204076Spjd break; 1307204076Spjd } 1308204076Spjd if (refcount_release(&hio->hio_countdown)) { 1309204076Spjd if (ISSYNCREQ(hio)) { 1310204076Spjd mtx_lock(&sync_lock); 1311204076Spjd SYNCREQDONE(hio); 1312204076Spjd mtx_unlock(&sync_lock); 1313204076Spjd cv_signal(&sync_cond); 1314204076Spjd } else { 1315204076Spjd pjdlog_debug(2, 1316204076Spjd "local_send: (%p) Moving request to the done queue.", 1317204076Spjd hio); 1318204076Spjd QUEUE_INSERT2(hio, done); 1319204076Spjd } 1320204076Spjd } 1321204076Spjd } 1322204076Spjd /* NOTREACHED */ 1323204076Spjd return (NULL); 1324204076Spjd} 1325204076Spjd 1326214692Spjdstatic void 1327214692Spjdkeepalive_send(struct hast_resource *res, unsigned int ncomp) 1328214692Spjd{ 1329214692Spjd struct nv *nv; 1330214692Spjd 1331218217Spjd rw_rlock(&hio_remote_lock[ncomp]); 1332218217Spjd 1333218217Spjd if (!ISCONNECTED(res, ncomp)) { 1334218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1335214692Spjd return; 1336218217Spjd } 1337219864Spjd 1338218138Spjd PJDLOG_ASSERT(res->hr_remotein != NULL); 1339218138Spjd PJDLOG_ASSERT(res->hr_remoteout != NULL); 1340214692Spjd 1341214692Spjd nv = nv_alloc(); 1342214692Spjd nv_add_uint8(nv, HIO_KEEPALIVE, "cmd"); 1343214692Spjd if (nv_error(nv) != 0) { 1344218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1345214692Spjd nv_free(nv); 1346214692Spjd pjdlog_debug(1, 1347214692Spjd "keepalive_send: Unable to prepare header to send."); 1348214692Spjd return; 1349214692Spjd } 1350214692Spjd if (hast_proto_send(res, res->hr_remoteout, nv, NULL, 0) < 0) { 1351218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1352214692Spjd pjdlog_common(LOG_DEBUG, 1, errno, 1353214692Spjd "keepalive_send: Unable to send request"); 1354214692Spjd nv_free(nv); 1355214692Spjd remote_close(res, ncomp); 1356214692Spjd return; 1357214692Spjd } 1358218217Spjd 1359218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1360214692Spjd nv_free(nv); 1361214692Spjd pjdlog_debug(2, "keepalive_send: Request sent."); 1362214692Spjd} 1363214692Spjd 1364204076Spjd/* 1365204076Spjd * Thread sends request to secondary node. 1366204076Spjd */ 1367204076Spjdstatic void * 1368204076Spjdremote_send_thread(void *arg) 1369204076Spjd{ 1370204076Spjd struct hast_resource *res = arg; 1371204076Spjd struct g_gate_ctl_io *ggio; 1372214692Spjd time_t lastcheck, now; 1373204076Spjd struct hio *hio; 1374204076Spjd struct nv *nv; 1375204076Spjd unsigned int ncomp; 1376204076Spjd bool wakeup; 1377204076Spjd uint64_t offset, length; 1378204076Spjd uint8_t cmd; 1379204076Spjd void *data; 1380204076Spjd 1381204076Spjd /* Remote component is 1 for now. */ 1382204076Spjd ncomp = 1; 1383219864Spjd lastcheck = time(NULL); 1384204076Spjd 1385204076Spjd for (;;) { 1386204076Spjd pjdlog_debug(2, "remote_send: Taking request."); 1387219721Strociny QUEUE_TAKE1(hio, send, ncomp, HAST_KEEPALIVE); 1388214692Spjd if (hio == NULL) { 1389214692Spjd now = time(NULL); 1390219721Strociny if (lastcheck + HAST_KEEPALIVE <= now) { 1391214692Spjd keepalive_send(res, ncomp); 1392214692Spjd lastcheck = now; 1393214692Spjd } 1394214692Spjd continue; 1395214692Spjd } 1396204076Spjd pjdlog_debug(2, "remote_send: (%p) Got request.", hio); 1397204076Spjd ggio = &hio->hio_ggio; 1398204076Spjd switch (ggio->gctl_cmd) { 1399204076Spjd case BIO_READ: 1400204076Spjd cmd = HIO_READ; 1401204076Spjd data = NULL; 1402204076Spjd offset = ggio->gctl_offset; 1403204076Spjd length = ggio->gctl_length; 1404204076Spjd break; 1405204076Spjd case BIO_WRITE: 1406204076Spjd cmd = HIO_WRITE; 1407204076Spjd data = ggio->gctl_data; 1408204076Spjd offset = ggio->gctl_offset; 1409204076Spjd length = ggio->gctl_length; 1410204076Spjd break; 1411204076Spjd case BIO_DELETE: 1412204076Spjd cmd = HIO_DELETE; 1413204076Spjd data = NULL; 1414204076Spjd offset = ggio->gctl_offset; 1415204076Spjd length = ggio->gctl_length; 1416204076Spjd break; 1417204076Spjd case BIO_FLUSH: 1418204076Spjd cmd = HIO_FLUSH; 1419204076Spjd data = NULL; 1420204076Spjd offset = 0; 1421204076Spjd length = 0; 1422204076Spjd break; 1423204076Spjd default: 1424218138Spjd PJDLOG_ASSERT(!"invalid condition"); 1425204076Spjd abort(); 1426204076Spjd } 1427204076Spjd nv = nv_alloc(); 1428204076Spjd nv_add_uint8(nv, cmd, "cmd"); 1429204076Spjd nv_add_uint64(nv, (uint64_t)ggio->gctl_seq, "seq"); 1430204076Spjd nv_add_uint64(nv, offset, "offset"); 1431204076Spjd nv_add_uint64(nv, length, "length"); 1432204076Spjd if (nv_error(nv) != 0) { 1433204076Spjd hio->hio_errors[ncomp] = nv_error(nv); 1434204076Spjd pjdlog_debug(2, 1435204076Spjd "remote_send: (%p) Unable to prepare header to send.", 1436204076Spjd hio); 1437204076Spjd reqlog(LOG_ERR, 0, ggio, 1438204076Spjd "Unable to prepare header to send (%s): ", 1439204076Spjd strerror(nv_error(nv))); 1440204076Spjd /* Move failed request immediately to the done queue. */ 1441204076Spjd goto done_queue; 1442204076Spjd } 1443204076Spjd pjdlog_debug(2, 1444204076Spjd "remote_send: (%p) Moving request to the recv queue.", 1445204076Spjd hio); 1446204076Spjd /* 1447204076Spjd * Protect connection from disappearing. 1448204076Spjd */ 1449204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1450204076Spjd if (!ISCONNECTED(res, ncomp)) { 1451204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1452204076Spjd hio->hio_errors[ncomp] = ENOTCONN; 1453204076Spjd goto done_queue; 1454204076Spjd } 1455204076Spjd /* 1456204076Spjd * Move the request to recv queue before sending it, because 1457204076Spjd * in different order we can get reply before we move request 1458204076Spjd * to recv queue. 1459204076Spjd */ 1460204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1461204076Spjd wakeup = TAILQ_EMPTY(&hio_recv_list[ncomp]); 1462204076Spjd TAILQ_INSERT_TAIL(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1463204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1464204076Spjd if (hast_proto_send(res, res->hr_remoteout, nv, data, 1465204076Spjd data != NULL ? length : 0) < 0) { 1466204076Spjd hio->hio_errors[ncomp] = errno; 1467204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1468204076Spjd pjdlog_debug(2, 1469204076Spjd "remote_send: (%p) Unable to send request.", hio); 1470204076Spjd reqlog(LOG_ERR, 0, ggio, 1471204076Spjd "Unable to send request (%s): ", 1472204076Spjd strerror(hio->hio_errors[ncomp])); 1473211979Spjd remote_close(res, ncomp); 1474204076Spjd /* 1475204076Spjd * Take request back from the receive queue and move 1476204076Spjd * it immediately to the done queue. 1477204076Spjd */ 1478204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1479204076Spjd TAILQ_REMOVE(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1480204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1481204076Spjd goto done_queue; 1482204076Spjd } 1483204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1484204076Spjd nv_free(nv); 1485204076Spjd if (wakeup) 1486204076Spjd cv_signal(&hio_recv_list_cond[ncomp]); 1487204076Spjd continue; 1488204076Spjddone_queue: 1489204076Spjd nv_free(nv); 1490204076Spjd if (ISSYNCREQ(hio)) { 1491204076Spjd if (!refcount_release(&hio->hio_countdown)) 1492204076Spjd continue; 1493204076Spjd mtx_lock(&sync_lock); 1494204076Spjd SYNCREQDONE(hio); 1495204076Spjd mtx_unlock(&sync_lock); 1496204076Spjd cv_signal(&sync_cond); 1497204076Spjd continue; 1498204076Spjd } 1499204076Spjd if (ggio->gctl_cmd == BIO_WRITE) { 1500204076Spjd mtx_lock(&res->hr_amp_lock); 1501204076Spjd if (activemap_need_sync(res->hr_amp, ggio->gctl_offset, 1502204076Spjd ggio->gctl_length)) { 1503204076Spjd (void)hast_activemap_flush(res); 1504204076Spjd } 1505204076Spjd mtx_unlock(&res->hr_amp_lock); 1506204076Spjd } 1507204076Spjd if (!refcount_release(&hio->hio_countdown)) 1508204076Spjd continue; 1509204076Spjd pjdlog_debug(2, 1510204076Spjd "remote_send: (%p) Moving request to the done queue.", 1511204076Spjd hio); 1512204076Spjd QUEUE_INSERT2(hio, done); 1513204076Spjd } 1514204076Spjd /* NOTREACHED */ 1515204076Spjd return (NULL); 1516204076Spjd} 1517204076Spjd 1518204076Spjd/* 1519204076Spjd * Thread receives answer from secondary node and passes it to ggate_send 1520204076Spjd * thread. 1521204076Spjd */ 1522204076Spjdstatic void * 1523204076Spjdremote_recv_thread(void *arg) 1524204076Spjd{ 1525204076Spjd struct hast_resource *res = arg; 1526204076Spjd struct g_gate_ctl_io *ggio; 1527204076Spjd struct hio *hio; 1528204076Spjd struct nv *nv; 1529204076Spjd unsigned int ncomp; 1530204076Spjd uint64_t seq; 1531204076Spjd int error; 1532204076Spjd 1533204076Spjd /* Remote component is 1 for now. */ 1534204076Spjd ncomp = 1; 1535204076Spjd 1536204076Spjd for (;;) { 1537204076Spjd /* Wait until there is anything to receive. */ 1538204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1539204076Spjd while (TAILQ_EMPTY(&hio_recv_list[ncomp])) { 1540204076Spjd pjdlog_debug(2, "remote_recv: No requests, waiting."); 1541204076Spjd cv_wait(&hio_recv_list_cond[ncomp], 1542204076Spjd &hio_recv_list_lock[ncomp]); 1543204076Spjd } 1544204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1545204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1546204076Spjd if (!ISCONNECTED(res, ncomp)) { 1547204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1548204076Spjd /* 1549204076Spjd * Connection is dead, so move all pending requests to 1550204076Spjd * the done queue (one-by-one). 1551204076Spjd */ 1552204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1553204076Spjd hio = TAILQ_FIRST(&hio_recv_list[ncomp]); 1554218138Spjd PJDLOG_ASSERT(hio != NULL); 1555204076Spjd TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1556204076Spjd hio_next[ncomp]); 1557204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1558204076Spjd goto done_queue; 1559204076Spjd } 1560204076Spjd if (hast_proto_recv_hdr(res->hr_remotein, &nv) < 0) { 1561204076Spjd pjdlog_errno(LOG_ERR, 1562204076Spjd "Unable to receive reply header"); 1563204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1564204076Spjd remote_close(res, ncomp); 1565204076Spjd continue; 1566204076Spjd } 1567204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1568204076Spjd seq = nv_get_uint64(nv, "seq"); 1569204076Spjd if (seq == 0) { 1570204076Spjd pjdlog_error("Header contains no 'seq' field."); 1571204076Spjd nv_free(nv); 1572204076Spjd continue; 1573204076Spjd } 1574204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1575204076Spjd TAILQ_FOREACH(hio, &hio_recv_list[ncomp], hio_next[ncomp]) { 1576204076Spjd if (hio->hio_ggio.gctl_seq == seq) { 1577204076Spjd TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1578204076Spjd hio_next[ncomp]); 1579204076Spjd break; 1580204076Spjd } 1581204076Spjd } 1582204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1583204076Spjd if (hio == NULL) { 1584204076Spjd pjdlog_error("Found no request matching received 'seq' field (%ju).", 1585204076Spjd (uintmax_t)seq); 1586204076Spjd nv_free(nv); 1587204076Spjd continue; 1588204076Spjd } 1589204076Spjd error = nv_get_int16(nv, "error"); 1590204076Spjd if (error != 0) { 1591204076Spjd /* Request failed on remote side. */ 1592216478Spjd hio->hio_errors[ncomp] = error; 1593216479Spjd reqlog(LOG_WARNING, 0, &hio->hio_ggio, 1594216479Spjd "Remote request failed (%s): ", strerror(error)); 1595204076Spjd nv_free(nv); 1596204076Spjd goto done_queue; 1597204076Spjd } 1598204076Spjd ggio = &hio->hio_ggio; 1599204076Spjd switch (ggio->gctl_cmd) { 1600204076Spjd case BIO_READ: 1601204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1602204076Spjd if (!ISCONNECTED(res, ncomp)) { 1603204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1604204076Spjd nv_free(nv); 1605204076Spjd goto done_queue; 1606204076Spjd } 1607204076Spjd if (hast_proto_recv_data(res, res->hr_remotein, nv, 1608204076Spjd ggio->gctl_data, ggio->gctl_length) < 0) { 1609204076Spjd hio->hio_errors[ncomp] = errno; 1610204076Spjd pjdlog_errno(LOG_ERR, 1611204076Spjd "Unable to receive reply data"); 1612204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1613204076Spjd nv_free(nv); 1614204076Spjd remote_close(res, ncomp); 1615204076Spjd goto done_queue; 1616204076Spjd } 1617204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1618204076Spjd break; 1619204076Spjd case BIO_WRITE: 1620204076Spjd case BIO_DELETE: 1621204076Spjd case BIO_FLUSH: 1622204076Spjd break; 1623204076Spjd default: 1624218138Spjd PJDLOG_ASSERT(!"invalid condition"); 1625204076Spjd abort(); 1626204076Spjd } 1627204076Spjd hio->hio_errors[ncomp] = 0; 1628204076Spjd nv_free(nv); 1629204076Spjddone_queue: 1630204076Spjd if (refcount_release(&hio->hio_countdown)) { 1631204076Spjd if (ISSYNCREQ(hio)) { 1632204076Spjd mtx_lock(&sync_lock); 1633204076Spjd SYNCREQDONE(hio); 1634204076Spjd mtx_unlock(&sync_lock); 1635204076Spjd cv_signal(&sync_cond); 1636204076Spjd } else { 1637204076Spjd pjdlog_debug(2, 1638204076Spjd "remote_recv: (%p) Moving request to the done queue.", 1639204076Spjd hio); 1640204076Spjd QUEUE_INSERT2(hio, done); 1641204076Spjd } 1642204076Spjd } 1643204076Spjd } 1644204076Spjd /* NOTREACHED */ 1645204076Spjd return (NULL); 1646204076Spjd} 1647204076Spjd 1648204076Spjd/* 1649204076Spjd * Thread sends answer to the kernel. 1650204076Spjd */ 1651204076Spjdstatic void * 1652204076Spjdggate_send_thread(void *arg) 1653204076Spjd{ 1654204076Spjd struct hast_resource *res = arg; 1655204076Spjd struct g_gate_ctl_io *ggio; 1656204076Spjd struct hio *hio; 1657204076Spjd unsigned int ii, ncomp, ncomps; 1658204076Spjd 1659204076Spjd ncomps = HAST_NCOMPONENTS; 1660204076Spjd 1661204076Spjd for (;;) { 1662204076Spjd pjdlog_debug(2, "ggate_send: Taking request."); 1663204076Spjd QUEUE_TAKE2(hio, done); 1664204076Spjd pjdlog_debug(2, "ggate_send: (%p) Got request.", hio); 1665204076Spjd ggio = &hio->hio_ggio; 1666204076Spjd for (ii = 0; ii < ncomps; ii++) { 1667204076Spjd if (hio->hio_errors[ii] == 0) { 1668204076Spjd /* 1669204076Spjd * One successful request is enough to declare 1670204076Spjd * success. 1671204076Spjd */ 1672204076Spjd ggio->gctl_error = 0; 1673204076Spjd break; 1674204076Spjd } 1675204076Spjd } 1676204076Spjd if (ii == ncomps) { 1677204076Spjd /* 1678204076Spjd * None of the requests were successful. 1679219879Strociny * Use the error from local component except the 1680219879Strociny * case when we did only remote request. 1681204076Spjd */ 1682219879Strociny if (ggio->gctl_cmd == BIO_READ && 1683219879Strociny res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) 1684219879Strociny ggio->gctl_error = hio->hio_errors[1]; 1685219879Strociny else 1686219879Strociny ggio->gctl_error = hio->hio_errors[0]; 1687204076Spjd } 1688204076Spjd if (ggio->gctl_error == 0 && ggio->gctl_cmd == BIO_WRITE) { 1689204076Spjd mtx_lock(&res->hr_amp_lock); 1690223655Strociny if (activemap_write_complete(res->hr_amp, 1691223974Strociny ggio->gctl_offset, ggio->gctl_length)) { 1692223655Strociny res->hr_stat_activemap_update++; 1693223655Strociny (void)hast_activemap_flush(res); 1694223655Strociny } 1695204076Spjd mtx_unlock(&res->hr_amp_lock); 1696204076Spjd } 1697204076Spjd if (ggio->gctl_cmd == BIO_WRITE) { 1698204076Spjd /* 1699204076Spjd * Unlock range we locked. 1700204076Spjd */ 1701204076Spjd mtx_lock(&range_lock); 1702204076Spjd rangelock_del(range_regular, ggio->gctl_offset, 1703204076Spjd ggio->gctl_length); 1704204076Spjd if (range_sync_wait) 1705204076Spjd cv_signal(&range_sync_cond); 1706204076Spjd mtx_unlock(&range_lock); 1707204076Spjd /* 1708204076Spjd * Bump local count if this is first write after 1709204076Spjd * connection failure with remote node. 1710204076Spjd */ 1711204076Spjd ncomp = 1; 1712204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1713204076Spjd if (!ISCONNECTED(res, ncomp)) { 1714204076Spjd mtx_lock(&metadata_lock); 1715204076Spjd if (res->hr_primary_localcnt == 1716204076Spjd res->hr_secondary_remotecnt) { 1717204076Spjd res->hr_primary_localcnt++; 1718204076Spjd pjdlog_debug(1, 1719204076Spjd "Increasing localcnt to %ju.", 1720204076Spjd (uintmax_t)res->hr_primary_localcnt); 1721204076Spjd (void)metadata_write(res); 1722204076Spjd } 1723204076Spjd mtx_unlock(&metadata_lock); 1724204076Spjd } 1725204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1726204076Spjd } 1727204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_DONE, ggio) < 0) 1728204076Spjd primary_exit(EX_OSERR, "G_GATE_CMD_DONE failed"); 1729204076Spjd pjdlog_debug(2, 1730204076Spjd "ggate_send: (%p) Moving request to the free queue.", hio); 1731204076Spjd QUEUE_INSERT2(hio, free); 1732204076Spjd } 1733204076Spjd /* NOTREACHED */ 1734204076Spjd return (NULL); 1735204076Spjd} 1736204076Spjd 1737204076Spjd/* 1738204076Spjd * Thread synchronize local and remote components. 1739204076Spjd */ 1740204076Spjdstatic void * 1741204076Spjdsync_thread(void *arg __unused) 1742204076Spjd{ 1743204076Spjd struct hast_resource *res = arg; 1744204076Spjd struct hio *hio; 1745204076Spjd struct g_gate_ctl_io *ggio; 1746219372Spjd struct timeval tstart, tend, tdiff; 1747204076Spjd unsigned int ii, ncomp, ncomps; 1748204076Spjd off_t offset, length, synced; 1749204076Spjd bool dorewind; 1750204076Spjd int syncext; 1751204076Spjd 1752204076Spjd ncomps = HAST_NCOMPONENTS; 1753204076Spjd dorewind = true; 1754211897Spjd synced = 0; 1755211897Spjd offset = -1; 1756204076Spjd 1757204076Spjd for (;;) { 1758204076Spjd mtx_lock(&sync_lock); 1759211897Spjd if (offset >= 0 && !sync_inprogress) { 1760219372Spjd gettimeofday(&tend, NULL); 1761219372Spjd timersub(&tend, &tstart, &tdiff); 1762219372Spjd pjdlog_info("Synchronization interrupted after %#.0T. " 1763219372Spjd "%NB synchronized so far.", &tdiff, 1764211879Spjd (intmax_t)synced); 1765212038Spjd event_send(res, EVENT_SYNCINTR); 1766211879Spjd } 1767204076Spjd while (!sync_inprogress) { 1768204076Spjd dorewind = true; 1769204076Spjd synced = 0; 1770204076Spjd cv_wait(&sync_cond, &sync_lock); 1771204076Spjd } 1772204076Spjd mtx_unlock(&sync_lock); 1773204076Spjd /* 1774204076Spjd * Obtain offset at which we should synchronize. 1775204076Spjd * Rewind synchronization if needed. 1776204076Spjd */ 1777204076Spjd mtx_lock(&res->hr_amp_lock); 1778204076Spjd if (dorewind) 1779204076Spjd activemap_sync_rewind(res->hr_amp); 1780204076Spjd offset = activemap_sync_offset(res->hr_amp, &length, &syncext); 1781204076Spjd if (syncext != -1) { 1782204076Spjd /* 1783204076Spjd * We synchronized entire syncext extent, we can mark 1784204076Spjd * it as clean now. 1785204076Spjd */ 1786204076Spjd if (activemap_extent_complete(res->hr_amp, syncext)) 1787204076Spjd (void)hast_activemap_flush(res); 1788204076Spjd } 1789204076Spjd mtx_unlock(&res->hr_amp_lock); 1790204076Spjd if (dorewind) { 1791204076Spjd dorewind = false; 1792204076Spjd if (offset < 0) 1793204076Spjd pjdlog_info("Nodes are in sync."); 1794204076Spjd else { 1795219372Spjd pjdlog_info("Synchronization started. %NB to go.", 1796219372Spjd (intmax_t)(res->hr_extentsize * 1797204076Spjd activemap_ndirty(res->hr_amp))); 1798212038Spjd event_send(res, EVENT_SYNCSTART); 1799219372Spjd gettimeofday(&tstart, NULL); 1800204076Spjd } 1801204076Spjd } 1802204076Spjd if (offset < 0) { 1803211878Spjd sync_stop(); 1804204076Spjd pjdlog_debug(1, "Nothing to synchronize."); 1805204076Spjd /* 1806204076Spjd * Synchronization complete, make both localcnt and 1807204076Spjd * remotecnt equal. 1808204076Spjd */ 1809204076Spjd ncomp = 1; 1810204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1811204076Spjd if (ISCONNECTED(res, ncomp)) { 1812204076Spjd if (synced > 0) { 1813219372Spjd int64_t bps; 1814219372Spjd 1815219372Spjd gettimeofday(&tend, NULL); 1816219372Spjd timersub(&tend, &tstart, &tdiff); 1817219372Spjd bps = (int64_t)((double)synced / 1818219372Spjd ((double)tdiff.tv_sec + 1819219372Spjd (double)tdiff.tv_usec / 1000000)); 1820204076Spjd pjdlog_info("Synchronization complete. " 1821219372Spjd "%NB synchronized in %#.0lT (%NB/sec).", 1822219372Spjd (intmax_t)synced, &tdiff, 1823219372Spjd (intmax_t)bps); 1824212038Spjd event_send(res, EVENT_SYNCDONE); 1825204076Spjd } 1826204076Spjd mtx_lock(&metadata_lock); 1827204076Spjd res->hr_syncsrc = HAST_SYNCSRC_UNDEF; 1828204076Spjd res->hr_primary_localcnt = 1829219882Strociny res->hr_secondary_remotecnt; 1830219882Strociny res->hr_primary_remotecnt = 1831204076Spjd res->hr_secondary_localcnt; 1832204076Spjd pjdlog_debug(1, 1833204076Spjd "Setting localcnt to %ju and remotecnt to %ju.", 1834204076Spjd (uintmax_t)res->hr_primary_localcnt, 1835219882Strociny (uintmax_t)res->hr_primary_remotecnt); 1836204076Spjd (void)metadata_write(res); 1837204076Spjd mtx_unlock(&metadata_lock); 1838204076Spjd } 1839204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1840204076Spjd continue; 1841204076Spjd } 1842204076Spjd pjdlog_debug(2, "sync: Taking free request."); 1843204076Spjd QUEUE_TAKE2(hio, free); 1844204076Spjd pjdlog_debug(2, "sync: (%p) Got free request.", hio); 1845204076Spjd /* 1846204076Spjd * Lock the range we are going to synchronize. We don't want 1847204076Spjd * race where someone writes between our read and write. 1848204076Spjd */ 1849204076Spjd for (;;) { 1850204076Spjd mtx_lock(&range_lock); 1851204076Spjd if (rangelock_islocked(range_regular, offset, length)) { 1852204076Spjd pjdlog_debug(2, 1853204076Spjd "sync: Range offset=%jd length=%jd locked.", 1854204076Spjd (intmax_t)offset, (intmax_t)length); 1855204076Spjd range_sync_wait = true; 1856204076Spjd cv_wait(&range_sync_cond, &range_lock); 1857204076Spjd range_sync_wait = false; 1858204076Spjd mtx_unlock(&range_lock); 1859204076Spjd continue; 1860204076Spjd } 1861204076Spjd if (rangelock_add(range_sync, offset, length) < 0) { 1862204076Spjd mtx_unlock(&range_lock); 1863204076Spjd pjdlog_debug(2, 1864204076Spjd "sync: Range offset=%jd length=%jd is already locked, waiting.", 1865204076Spjd (intmax_t)offset, (intmax_t)length); 1866204076Spjd sleep(1); 1867204076Spjd continue; 1868204076Spjd } 1869204076Spjd mtx_unlock(&range_lock); 1870204076Spjd break; 1871204076Spjd } 1872204076Spjd /* 1873204076Spjd * First read the data from synchronization source. 1874204076Spjd */ 1875204076Spjd SYNCREQ(hio); 1876204076Spjd ggio = &hio->hio_ggio; 1877204076Spjd ggio->gctl_cmd = BIO_READ; 1878204076Spjd ggio->gctl_offset = offset; 1879204076Spjd ggio->gctl_length = length; 1880204076Spjd ggio->gctl_error = 0; 1881204076Spjd for (ii = 0; ii < ncomps; ii++) 1882204076Spjd hio->hio_errors[ii] = EINVAL; 1883204076Spjd reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 1884204076Spjd hio); 1885204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 1886204076Spjd hio); 1887204076Spjd mtx_lock(&metadata_lock); 1888204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1889204076Spjd /* 1890204076Spjd * This range is up-to-date on local component, 1891204076Spjd * so handle request locally. 1892204076Spjd */ 1893204076Spjd /* Local component is 0 for now. */ 1894204076Spjd ncomp = 0; 1895204076Spjd } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 1896218138Spjd PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 1897204076Spjd /* 1898204076Spjd * This range is out-of-date on local component, 1899204076Spjd * so send request to the remote node. 1900204076Spjd */ 1901204076Spjd /* Remote component is 1 for now. */ 1902204076Spjd ncomp = 1; 1903204076Spjd } 1904204076Spjd mtx_unlock(&metadata_lock); 1905204076Spjd refcount_init(&hio->hio_countdown, 1); 1906204076Spjd QUEUE_INSERT1(hio, send, ncomp); 1907204076Spjd 1908204076Spjd /* 1909204076Spjd * Let's wait for READ to finish. 1910204076Spjd */ 1911204076Spjd mtx_lock(&sync_lock); 1912204076Spjd while (!ISSYNCREQDONE(hio)) 1913204076Spjd cv_wait(&sync_cond, &sync_lock); 1914204076Spjd mtx_unlock(&sync_lock); 1915204076Spjd 1916204076Spjd if (hio->hio_errors[ncomp] != 0) { 1917204076Spjd pjdlog_error("Unable to read synchronization data: %s.", 1918204076Spjd strerror(hio->hio_errors[ncomp])); 1919204076Spjd goto free_queue; 1920204076Spjd } 1921204076Spjd 1922204076Spjd /* 1923204076Spjd * We read the data from synchronization source, now write it 1924204076Spjd * to synchronization target. 1925204076Spjd */ 1926204076Spjd SYNCREQ(hio); 1927204076Spjd ggio->gctl_cmd = BIO_WRITE; 1928204076Spjd for (ii = 0; ii < ncomps; ii++) 1929204076Spjd hio->hio_errors[ii] = EINVAL; 1930204076Spjd reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 1931204076Spjd hio); 1932204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 1933204076Spjd hio); 1934204076Spjd mtx_lock(&metadata_lock); 1935204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1936204076Spjd /* 1937204076Spjd * This range is up-to-date on local component, 1938204076Spjd * so we update remote component. 1939204076Spjd */ 1940204076Spjd /* Remote component is 1 for now. */ 1941204076Spjd ncomp = 1; 1942204076Spjd } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 1943218138Spjd PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 1944204076Spjd /* 1945204076Spjd * This range is out-of-date on local component, 1946204076Spjd * so we update it. 1947204076Spjd */ 1948204076Spjd /* Local component is 0 for now. */ 1949204076Spjd ncomp = 0; 1950204076Spjd } 1951204076Spjd mtx_unlock(&metadata_lock); 1952204076Spjd 1953204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the send queues.", 1954204076Spjd hio); 1955204076Spjd refcount_init(&hio->hio_countdown, 1); 1956204076Spjd QUEUE_INSERT1(hio, send, ncomp); 1957204076Spjd 1958204076Spjd /* 1959204076Spjd * Let's wait for WRITE to finish. 1960204076Spjd */ 1961204076Spjd mtx_lock(&sync_lock); 1962204076Spjd while (!ISSYNCREQDONE(hio)) 1963204076Spjd cv_wait(&sync_cond, &sync_lock); 1964204076Spjd mtx_unlock(&sync_lock); 1965204076Spjd 1966204076Spjd if (hio->hio_errors[ncomp] != 0) { 1967204076Spjd pjdlog_error("Unable to write synchronization data: %s.", 1968204076Spjd strerror(hio->hio_errors[ncomp])); 1969204076Spjd goto free_queue; 1970204076Spjd } 1971211880Spjd 1972211880Spjd synced += length; 1973204076Spjdfree_queue: 1974204076Spjd mtx_lock(&range_lock); 1975204076Spjd rangelock_del(range_sync, offset, length); 1976204076Spjd if (range_regular_wait) 1977204076Spjd cv_signal(&range_regular_cond); 1978204076Spjd mtx_unlock(&range_lock); 1979204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the free queue.", 1980204076Spjd hio); 1981204076Spjd QUEUE_INSERT2(hio, free); 1982204076Spjd } 1983204076Spjd /* NOTREACHED */ 1984204076Spjd return (NULL); 1985204076Spjd} 1986204076Spjd 1987217784Spjdvoid 1988217784Spjdprimary_config_reload(struct hast_resource *res, struct nv *nv) 1989210886Spjd{ 1990210886Spjd unsigned int ii, ncomps; 1991217784Spjd int modified, vint; 1992217784Spjd const char *vstr; 1993210886Spjd 1994210886Spjd pjdlog_info("Reloading configuration..."); 1995210886Spjd 1996218138Spjd PJDLOG_ASSERT(res->hr_role == HAST_ROLE_PRIMARY); 1997218138Spjd PJDLOG_ASSERT(gres == res); 1998217784Spjd nv_assert(nv, "remoteaddr"); 1999219818Spjd nv_assert(nv, "sourceaddr"); 2000217784Spjd nv_assert(nv, "replication"); 2001219351Spjd nv_assert(nv, "checksum"); 2002219354Spjd nv_assert(nv, "compression"); 2003217784Spjd nv_assert(nv, "timeout"); 2004217784Spjd nv_assert(nv, "exec"); 2005217784Spjd 2006210886Spjd ncomps = HAST_NCOMPONENTS; 2007210886Spjd 2008219351Spjd#define MODIFIED_REMOTEADDR 0x01 2009219818Spjd#define MODIFIED_SOURCEADDR 0x02 2010219818Spjd#define MODIFIED_REPLICATION 0x04 2011219818Spjd#define MODIFIED_CHECKSUM 0x08 2012219818Spjd#define MODIFIED_COMPRESSION 0x10 2013219818Spjd#define MODIFIED_TIMEOUT 0x20 2014219818Spjd#define MODIFIED_EXEC 0x40 2015210886Spjd modified = 0; 2016217784Spjd 2017217784Spjd vstr = nv_get_string(nv, "remoteaddr"); 2018217784Spjd if (strcmp(gres->hr_remoteaddr, vstr) != 0) { 2019210886Spjd /* 2020210886Spjd * Don't copy res->hr_remoteaddr to gres just yet. 2021210886Spjd * We want remote_close() to log disconnect from the old 2022210886Spjd * addresses, not from the new ones. 2023210886Spjd */ 2024210886Spjd modified |= MODIFIED_REMOTEADDR; 2025210886Spjd } 2026219818Spjd vstr = nv_get_string(nv, "sourceaddr"); 2027219818Spjd if (strcmp(gres->hr_sourceaddr, vstr) != 0) { 2028219818Spjd strlcpy(gres->hr_sourceaddr, vstr, sizeof(gres->hr_sourceaddr)); 2029219818Spjd modified |= MODIFIED_SOURCEADDR; 2030219818Spjd } 2031217784Spjd vint = nv_get_int32(nv, "replication"); 2032217784Spjd if (gres->hr_replication != vint) { 2033217784Spjd gres->hr_replication = vint; 2034210886Spjd modified |= MODIFIED_REPLICATION; 2035210886Spjd } 2036219351Spjd vint = nv_get_int32(nv, "checksum"); 2037219351Spjd if (gres->hr_checksum != vint) { 2038219351Spjd gres->hr_checksum = vint; 2039219351Spjd modified |= MODIFIED_CHECKSUM; 2040219351Spjd } 2041219354Spjd vint = nv_get_int32(nv, "compression"); 2042219354Spjd if (gres->hr_compression != vint) { 2043219354Spjd gres->hr_compression = vint; 2044219354Spjd modified |= MODIFIED_COMPRESSION; 2045219354Spjd } 2046217784Spjd vint = nv_get_int32(nv, "timeout"); 2047217784Spjd if (gres->hr_timeout != vint) { 2048217784Spjd gres->hr_timeout = vint; 2049210886Spjd modified |= MODIFIED_TIMEOUT; 2050210886Spjd } 2051217784Spjd vstr = nv_get_string(nv, "exec"); 2052217784Spjd if (strcmp(gres->hr_exec, vstr) != 0) { 2053217784Spjd strlcpy(gres->hr_exec, vstr, sizeof(gres->hr_exec)); 2054211886Spjd modified |= MODIFIED_EXEC; 2055211886Spjd } 2056217784Spjd 2057210886Spjd /* 2058219351Spjd * Change timeout for connected sockets. 2059219351Spjd * Don't bother if we need to reconnect. 2060210886Spjd */ 2061219351Spjd if ((modified & MODIFIED_TIMEOUT) != 0 && 2062219818Spjd (modified & (MODIFIED_REMOTEADDR | MODIFIED_SOURCEADDR | 2063219818Spjd MODIFIED_REPLICATION)) == 0) { 2064210886Spjd for (ii = 0; ii < ncomps; ii++) { 2065210886Spjd if (!ISREMOTE(ii)) 2066210886Spjd continue; 2067210886Spjd rw_rlock(&hio_remote_lock[ii]); 2068210886Spjd if (!ISCONNECTED(gres, ii)) { 2069210886Spjd rw_unlock(&hio_remote_lock[ii]); 2070210886Spjd continue; 2071210886Spjd } 2072210886Spjd rw_unlock(&hio_remote_lock[ii]); 2073210886Spjd if (proto_timeout(gres->hr_remotein, 2074210886Spjd gres->hr_timeout) < 0) { 2075210886Spjd pjdlog_errno(LOG_WARNING, 2076210886Spjd "Unable to set connection timeout"); 2077210886Spjd } 2078210886Spjd if (proto_timeout(gres->hr_remoteout, 2079210886Spjd gres->hr_timeout) < 0) { 2080210886Spjd pjdlog_errno(LOG_WARNING, 2081210886Spjd "Unable to set connection timeout"); 2082210886Spjd } 2083210886Spjd } 2084219351Spjd } 2085219818Spjd if ((modified & (MODIFIED_REMOTEADDR | MODIFIED_SOURCEADDR | 2086219818Spjd MODIFIED_REPLICATION)) != 0) { 2087210886Spjd for (ii = 0; ii < ncomps; ii++) { 2088210886Spjd if (!ISREMOTE(ii)) 2089210886Spjd continue; 2090210886Spjd remote_close(gres, ii); 2091210886Spjd } 2092210886Spjd if (modified & MODIFIED_REMOTEADDR) { 2093217784Spjd vstr = nv_get_string(nv, "remoteaddr"); 2094217784Spjd strlcpy(gres->hr_remoteaddr, vstr, 2095210886Spjd sizeof(gres->hr_remoteaddr)); 2096210886Spjd } 2097210886Spjd } 2098210886Spjd#undef MODIFIED_REMOTEADDR 2099219818Spjd#undef MODIFIED_SOURCEADDR 2100210886Spjd#undef MODIFIED_REPLICATION 2101219351Spjd#undef MODIFIED_CHECKSUM 2102219354Spjd#undef MODIFIED_COMPRESSION 2103210886Spjd#undef MODIFIED_TIMEOUT 2104211886Spjd#undef MODIFIED_EXEC 2105210886Spjd 2106210886Spjd pjdlog_info("Configuration reloaded successfully."); 2107210886Spjd} 2108210886Spjd 2109211882Spjdstatic void 2110211981Spjdguard_one(struct hast_resource *res, unsigned int ncomp) 2111211981Spjd{ 2112211981Spjd struct proto_conn *in, *out; 2113211981Spjd 2114211981Spjd if (!ISREMOTE(ncomp)) 2115211981Spjd return; 2116211981Spjd 2117211981Spjd rw_rlock(&hio_remote_lock[ncomp]); 2118211981Spjd 2119211981Spjd if (!real_remote(res)) { 2120211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2121211981Spjd return; 2122211981Spjd } 2123211981Spjd 2124211981Spjd if (ISCONNECTED(res, ncomp)) { 2125218138Spjd PJDLOG_ASSERT(res->hr_remotein != NULL); 2126218138Spjd PJDLOG_ASSERT(res->hr_remoteout != NULL); 2127211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2128211981Spjd pjdlog_debug(2, "remote_guard: Connection to %s is ok.", 2129211981Spjd res->hr_remoteaddr); 2130211981Spjd return; 2131211981Spjd } 2132211981Spjd 2133218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 2134218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 2135211981Spjd /* 2136211981Spjd * Upgrade the lock. It doesn't have to be atomic as no other thread 2137211981Spjd * can change connection status from disconnected to connected. 2138211981Spjd */ 2139211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2140211981Spjd pjdlog_debug(2, "remote_guard: Reconnecting to %s.", 2141211981Spjd res->hr_remoteaddr); 2142211981Spjd in = out = NULL; 2143220898Spjd if (init_remote(res, &in, &out) == 0) { 2144211981Spjd rw_wlock(&hio_remote_lock[ncomp]); 2145218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 2146218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 2147218138Spjd PJDLOG_ASSERT(in != NULL && out != NULL); 2148211981Spjd res->hr_remotein = in; 2149211981Spjd res->hr_remoteout = out; 2150211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2151211981Spjd pjdlog_info("Successfully reconnected to %s.", 2152211981Spjd res->hr_remoteaddr); 2153211981Spjd sync_start(); 2154211981Spjd } else { 2155211981Spjd /* Both connections should be NULL. */ 2156218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 2157218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 2158218138Spjd PJDLOG_ASSERT(in == NULL && out == NULL); 2159211981Spjd pjdlog_debug(2, "remote_guard: Reconnect to %s failed.", 2160211981Spjd res->hr_remoteaddr); 2161211981Spjd } 2162211981Spjd} 2163211981Spjd 2164204076Spjd/* 2165204076Spjd * Thread guards remote connections and reconnects when needed, handles 2166204076Spjd * signals, etc. 2167204076Spjd */ 2168204076Spjdstatic void * 2169204076Spjdguard_thread(void *arg) 2170204076Spjd{ 2171204076Spjd struct hast_resource *res = arg; 2172204076Spjd unsigned int ii, ncomps; 2173211982Spjd struct timespec timeout; 2174211981Spjd time_t lastcheck, now; 2175211982Spjd sigset_t mask; 2176211982Spjd int signo; 2177204076Spjd 2178204076Spjd ncomps = HAST_NCOMPONENTS; 2179211981Spjd lastcheck = time(NULL); 2180204076Spjd 2181211982Spjd PJDLOG_VERIFY(sigemptyset(&mask) == 0); 2182211982Spjd PJDLOG_VERIFY(sigaddset(&mask, SIGINT) == 0); 2183211982Spjd PJDLOG_VERIFY(sigaddset(&mask, SIGTERM) == 0); 2184211982Spjd 2185219721Strociny timeout.tv_sec = HAST_KEEPALIVE; 2186211982Spjd timeout.tv_nsec = 0; 2187211982Spjd signo = -1; 2188211982Spjd 2189204076Spjd for (;;) { 2190211982Spjd switch (signo) { 2191211982Spjd case SIGINT: 2192211982Spjd case SIGTERM: 2193211982Spjd sigexit_received = true; 2194204076Spjd primary_exitx(EX_OK, 2195204076Spjd "Termination signal received, exiting."); 2196211982Spjd break; 2197211982Spjd default: 2198211982Spjd break; 2199204076Spjd } 2200211882Spjd 2201220898Spjd /* 2202220898Spjd * Don't check connections until we fully started, 2203220898Spjd * as we may still be looping, waiting for remote node 2204220898Spjd * to switch from primary to secondary. 2205220898Spjd */ 2206220898Spjd if (fullystarted) { 2207220898Spjd pjdlog_debug(2, "remote_guard: Checking connections."); 2208220898Spjd now = time(NULL); 2209220898Spjd if (lastcheck + HAST_KEEPALIVE <= now) { 2210220898Spjd for (ii = 0; ii < ncomps; ii++) 2211220898Spjd guard_one(res, ii); 2212220898Spjd lastcheck = now; 2213220898Spjd } 2214204076Spjd } 2215211982Spjd signo = sigtimedwait(&mask, NULL, &timeout); 2216204076Spjd } 2217204076Spjd /* NOTREACHED */ 2218204076Spjd return (NULL); 2219204076Spjd} 2220