primary.c revision 219482
1204076Spjd/*- 2204076Spjd * Copyright (c) 2009 The FreeBSD Foundation 3219351Spjd * Copyright (c) 2010-2011 Pawel Jakub Dawidek <pawel@dawidek.net> 4204076Spjd * All rights reserved. 5204076Spjd * 6204076Spjd * This software was developed by Pawel Jakub Dawidek under sponsorship from 7204076Spjd * the FreeBSD Foundation. 8204076Spjd * 9204076Spjd * Redistribution and use in source and binary forms, with or without 10204076Spjd * modification, are permitted provided that the following conditions 11204076Spjd * are met: 12204076Spjd * 1. Redistributions of source code must retain the above copyright 13204076Spjd * notice, this list of conditions and the following disclaimer. 14204076Spjd * 2. Redistributions in binary form must reproduce the above copyright 15204076Spjd * notice, this list of conditions and the following disclaimer in the 16204076Spjd * documentation and/or other materials provided with the distribution. 17204076Spjd * 18204076Spjd * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 19204076Spjd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20204076Spjd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21204076Spjd * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 22204076Spjd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23204076Spjd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24204076Spjd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25204076Spjd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26204076Spjd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27204076Spjd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28204076Spjd * SUCH DAMAGE. 29204076Spjd */ 30204076Spjd 31204076Spjd#include <sys/cdefs.h> 32204076Spjd__FBSDID("$FreeBSD: head/sbin/hastd/primary.c 219482 2011-03-11 12:12:35Z trociny $"); 33204076Spjd 34204076Spjd#include <sys/types.h> 35204076Spjd#include <sys/time.h> 36204076Spjd#include <sys/bio.h> 37204076Spjd#include <sys/disk.h> 38204076Spjd#include <sys/refcount.h> 39204076Spjd#include <sys/stat.h> 40204076Spjd 41204076Spjd#include <geom/gate/g_gate.h> 42204076Spjd 43204076Spjd#include <err.h> 44204076Spjd#include <errno.h> 45204076Spjd#include <fcntl.h> 46204076Spjd#include <libgeom.h> 47204076Spjd#include <pthread.h> 48211982Spjd#include <signal.h> 49204076Spjd#include <stdint.h> 50204076Spjd#include <stdio.h> 51204076Spjd#include <string.h> 52204076Spjd#include <sysexits.h> 53204076Spjd#include <unistd.h> 54204076Spjd 55204076Spjd#include <activemap.h> 56204076Spjd#include <nv.h> 57204076Spjd#include <rangelock.h> 58204076Spjd 59204076Spjd#include "control.h" 60212038Spjd#include "event.h" 61204076Spjd#include "hast.h" 62204076Spjd#include "hast_proto.h" 63204076Spjd#include "hastd.h" 64211886Spjd#include "hooks.h" 65204076Spjd#include "metadata.h" 66204076Spjd#include "proto.h" 67204076Spjd#include "pjdlog.h" 68204076Spjd#include "subr.h" 69204076Spjd#include "synch.h" 70204076Spjd 71210886Spjd/* The is only one remote component for now. */ 72210886Spjd#define ISREMOTE(no) ((no) == 1) 73210886Spjd 74204076Spjdstruct hio { 75204076Spjd /* 76204076Spjd * Number of components we are still waiting for. 77204076Spjd * When this field goes to 0, we can send the request back to the 78204076Spjd * kernel. Each component has to decrease this counter by one 79204076Spjd * even on failure. 80204076Spjd */ 81204076Spjd unsigned int hio_countdown; 82204076Spjd /* 83204076Spjd * Each component has a place to store its own error. 84204076Spjd * Once the request is handled by all components we can decide if the 85204076Spjd * request overall is successful or not. 86204076Spjd */ 87204076Spjd int *hio_errors; 88204076Spjd /* 89204076Spjd * Structure used to comunicate with GEOM Gate class. 90204076Spjd */ 91204076Spjd struct g_gate_ctl_io hio_ggio; 92204076Spjd TAILQ_ENTRY(hio) *hio_next; 93204076Spjd}; 94204076Spjd#define hio_free_next hio_next[0] 95204076Spjd#define hio_done_next hio_next[0] 96204076Spjd 97204076Spjd/* 98204076Spjd * Free list holds unused structures. When free list is empty, we have to wait 99204076Spjd * until some in-progress requests are freed. 100204076Spjd */ 101204076Spjdstatic TAILQ_HEAD(, hio) hio_free_list; 102204076Spjdstatic pthread_mutex_t hio_free_list_lock; 103204076Spjdstatic pthread_cond_t hio_free_list_cond; 104204076Spjd/* 105204076Spjd * There is one send list for every component. One requests is placed on all 106204076Spjd * send lists - each component gets the same request, but each component is 107204076Spjd * responsible for managing his own send list. 108204076Spjd */ 109204076Spjdstatic TAILQ_HEAD(, hio) *hio_send_list; 110204076Spjdstatic pthread_mutex_t *hio_send_list_lock; 111204076Spjdstatic pthread_cond_t *hio_send_list_cond; 112204076Spjd/* 113204076Spjd * There is one recv list for every component, although local components don't 114204076Spjd * use recv lists as local requests are done synchronously. 115204076Spjd */ 116204076Spjdstatic TAILQ_HEAD(, hio) *hio_recv_list; 117204076Spjdstatic pthread_mutex_t *hio_recv_list_lock; 118204076Spjdstatic pthread_cond_t *hio_recv_list_cond; 119204076Spjd/* 120204076Spjd * Request is placed on done list by the slowest component (the one that 121204076Spjd * decreased hio_countdown from 1 to 0). 122204076Spjd */ 123204076Spjdstatic TAILQ_HEAD(, hio) hio_done_list; 124204076Spjdstatic pthread_mutex_t hio_done_list_lock; 125204076Spjdstatic pthread_cond_t hio_done_list_cond; 126204076Spjd/* 127204076Spjd * Structure below are for interaction with sync thread. 128204076Spjd */ 129204076Spjdstatic bool sync_inprogress; 130204076Spjdstatic pthread_mutex_t sync_lock; 131204076Spjdstatic pthread_cond_t sync_cond; 132204076Spjd/* 133204076Spjd * The lock below allows to synchornize access to remote connections. 134204076Spjd */ 135204076Spjdstatic pthread_rwlock_t *hio_remote_lock; 136204076Spjd 137204076Spjd/* 138204076Spjd * Lock to synchronize metadata updates. Also synchronize access to 139204076Spjd * hr_primary_localcnt and hr_primary_remotecnt fields. 140204076Spjd */ 141204076Spjdstatic pthread_mutex_t metadata_lock; 142204076Spjd 143204076Spjd/* 144204076Spjd * Maximum number of outstanding I/O requests. 145204076Spjd */ 146204076Spjd#define HAST_HIO_MAX 256 147204076Spjd/* 148204076Spjd * Number of components. At this point there are only two components: local 149204076Spjd * and remote, but in the future it might be possible to use multiple local 150204076Spjd * and remote components. 151204076Spjd */ 152204076Spjd#define HAST_NCOMPONENTS 2 153204076Spjd/* 154211982Spjd * Number of seconds to sleep between reconnect retries or keepalive packets. 155204076Spjd */ 156211982Spjd#define RETRY_SLEEP 10 157204076Spjd 158204076Spjd#define ISCONNECTED(res, no) \ 159204076Spjd ((res)->hr_remotein != NULL && (res)->hr_remoteout != NULL) 160204076Spjd 161204076Spjd#define QUEUE_INSERT1(hio, name, ncomp) do { \ 162204076Spjd bool _wakeup; \ 163204076Spjd \ 164204076Spjd mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 165204076Spjd _wakeup = TAILQ_EMPTY(&hio_##name##_list[(ncomp)]); \ 166204076Spjd TAILQ_INSERT_TAIL(&hio_##name##_list[(ncomp)], (hio), \ 167204076Spjd hio_next[(ncomp)]); \ 168204076Spjd mtx_unlock(&hio_##name##_list_lock[ncomp]); \ 169204076Spjd if (_wakeup) \ 170204076Spjd cv_signal(&hio_##name##_list_cond[(ncomp)]); \ 171204076Spjd} while (0) 172204076Spjd#define QUEUE_INSERT2(hio, name) do { \ 173204076Spjd bool _wakeup; \ 174204076Spjd \ 175204076Spjd mtx_lock(&hio_##name##_list_lock); \ 176204076Spjd _wakeup = TAILQ_EMPTY(&hio_##name##_list); \ 177204076Spjd TAILQ_INSERT_TAIL(&hio_##name##_list, (hio), hio_##name##_next);\ 178204076Spjd mtx_unlock(&hio_##name##_list_lock); \ 179204076Spjd if (_wakeup) \ 180204076Spjd cv_signal(&hio_##name##_list_cond); \ 181204076Spjd} while (0) 182214692Spjd#define QUEUE_TAKE1(hio, name, ncomp, timeout) do { \ 183214692Spjd bool _last; \ 184214692Spjd \ 185204076Spjd mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 186214692Spjd _last = false; \ 187214692Spjd while (((hio) = TAILQ_FIRST(&hio_##name##_list[(ncomp)])) == NULL && !_last) { \ 188214692Spjd cv_timedwait(&hio_##name##_list_cond[(ncomp)], \ 189214692Spjd &hio_##name##_list_lock[(ncomp)], (timeout)); \ 190214692Spjd if ((timeout) != 0) \ 191214692Spjd _last = true; \ 192204076Spjd } \ 193214692Spjd if (hio != NULL) { \ 194214692Spjd TAILQ_REMOVE(&hio_##name##_list[(ncomp)], (hio), \ 195214692Spjd hio_next[(ncomp)]); \ 196214692Spjd } \ 197204076Spjd mtx_unlock(&hio_##name##_list_lock[(ncomp)]); \ 198204076Spjd} while (0) 199204076Spjd#define QUEUE_TAKE2(hio, name) do { \ 200204076Spjd mtx_lock(&hio_##name##_list_lock); \ 201204076Spjd while (((hio) = TAILQ_FIRST(&hio_##name##_list)) == NULL) { \ 202204076Spjd cv_wait(&hio_##name##_list_cond, \ 203204076Spjd &hio_##name##_list_lock); \ 204204076Spjd } \ 205204076Spjd TAILQ_REMOVE(&hio_##name##_list, (hio), hio_##name##_next); \ 206204076Spjd mtx_unlock(&hio_##name##_list_lock); \ 207204076Spjd} while (0) 208204076Spjd 209209183Spjd#define SYNCREQ(hio) do { \ 210209183Spjd (hio)->hio_ggio.gctl_unit = -1; \ 211209183Spjd (hio)->hio_ggio.gctl_seq = 1; \ 212209183Spjd} while (0) 213204076Spjd#define ISSYNCREQ(hio) ((hio)->hio_ggio.gctl_unit == -1) 214204076Spjd#define SYNCREQDONE(hio) do { (hio)->hio_ggio.gctl_unit = -2; } while (0) 215204076Spjd#define ISSYNCREQDONE(hio) ((hio)->hio_ggio.gctl_unit == -2) 216204076Spjd 217204076Spjdstatic struct hast_resource *gres; 218204076Spjd 219204076Spjdstatic pthread_mutex_t range_lock; 220204076Spjdstatic struct rangelocks *range_regular; 221204076Spjdstatic bool range_regular_wait; 222204076Spjdstatic pthread_cond_t range_regular_cond; 223204076Spjdstatic struct rangelocks *range_sync; 224204076Spjdstatic bool range_sync_wait; 225204076Spjdstatic pthread_cond_t range_sync_cond; 226204076Spjd 227204076Spjdstatic void *ggate_recv_thread(void *arg); 228204076Spjdstatic void *local_send_thread(void *arg); 229204076Spjdstatic void *remote_send_thread(void *arg); 230204076Spjdstatic void *remote_recv_thread(void *arg); 231204076Spjdstatic void *ggate_send_thread(void *arg); 232204076Spjdstatic void *sync_thread(void *arg); 233204076Spjdstatic void *guard_thread(void *arg); 234204076Spjd 235211982Spjdstatic void 236204076Spjdcleanup(struct hast_resource *res) 237204076Spjd{ 238204076Spjd int rerrno; 239204076Spjd 240204076Spjd /* Remember errno. */ 241204076Spjd rerrno = errno; 242204076Spjd 243204076Spjd /* Destroy ggate provider if we created one. */ 244204076Spjd if (res->hr_ggateunit >= 0) { 245204076Spjd struct g_gate_ctl_destroy ggiod; 246204076Spjd 247213533Spjd bzero(&ggiod, sizeof(ggiod)); 248204076Spjd ggiod.gctl_version = G_GATE_VERSION; 249204076Spjd ggiod.gctl_unit = res->hr_ggateunit; 250204076Spjd ggiod.gctl_force = 1; 251204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_DESTROY, &ggiod) < 0) { 252213531Spjd pjdlog_errno(LOG_WARNING, 253213531Spjd "Unable to destroy hast/%s device", 254204076Spjd res->hr_provname); 255204076Spjd } 256204076Spjd res->hr_ggateunit = -1; 257204076Spjd } 258204076Spjd 259204076Spjd /* Restore errno. */ 260204076Spjd errno = rerrno; 261204076Spjd} 262204076Spjd 263212899Spjdstatic __dead2 void 264204076Spjdprimary_exit(int exitcode, const char *fmt, ...) 265204076Spjd{ 266204076Spjd va_list ap; 267204076Spjd 268218138Spjd PJDLOG_ASSERT(exitcode != EX_OK); 269204076Spjd va_start(ap, fmt); 270204076Spjd pjdlogv_errno(LOG_ERR, fmt, ap); 271204076Spjd va_end(ap); 272204076Spjd cleanup(gres); 273204076Spjd exit(exitcode); 274204076Spjd} 275204076Spjd 276212899Spjdstatic __dead2 void 277204076Spjdprimary_exitx(int exitcode, const char *fmt, ...) 278204076Spjd{ 279204076Spjd va_list ap; 280204076Spjd 281204076Spjd va_start(ap, fmt); 282204076Spjd pjdlogv(exitcode == EX_OK ? LOG_INFO : LOG_ERR, fmt, ap); 283204076Spjd va_end(ap); 284204076Spjd cleanup(gres); 285204076Spjd exit(exitcode); 286204076Spjd} 287204076Spjd 288204076Spjdstatic int 289204076Spjdhast_activemap_flush(struct hast_resource *res) 290204076Spjd{ 291204076Spjd const unsigned char *buf; 292204076Spjd size_t size; 293204076Spjd 294204076Spjd buf = activemap_bitmap(res->hr_amp, &size); 295218138Spjd PJDLOG_ASSERT(buf != NULL); 296218138Spjd PJDLOG_ASSERT((size % res->hr_local_sectorsize) == 0); 297204076Spjd if (pwrite(res->hr_localfd, buf, size, METADATA_SIZE) != 298204076Spjd (ssize_t)size) { 299204076Spjd KEEP_ERRNO(pjdlog_errno(LOG_ERR, 300204076Spjd "Unable to flush activemap to disk")); 301204076Spjd return (-1); 302204076Spjd } 303204076Spjd return (0); 304204076Spjd} 305204076Spjd 306210881Spjdstatic bool 307210881Spjdreal_remote(const struct hast_resource *res) 308210881Spjd{ 309210881Spjd 310210881Spjd return (strcmp(res->hr_remoteaddr, "none") != 0); 311210881Spjd} 312210881Spjd 313204076Spjdstatic void 314204076Spjdinit_environment(struct hast_resource *res __unused) 315204076Spjd{ 316204076Spjd struct hio *hio; 317204076Spjd unsigned int ii, ncomps; 318204076Spjd 319204076Spjd /* 320204076Spjd * In the future it might be per-resource value. 321204076Spjd */ 322204076Spjd ncomps = HAST_NCOMPONENTS; 323204076Spjd 324204076Spjd /* 325204076Spjd * Allocate memory needed by lists. 326204076Spjd */ 327204076Spjd hio_send_list = malloc(sizeof(hio_send_list[0]) * ncomps); 328204076Spjd if (hio_send_list == NULL) { 329204076Spjd primary_exitx(EX_TEMPFAIL, 330204076Spjd "Unable to allocate %zu bytes of memory for send lists.", 331204076Spjd sizeof(hio_send_list[0]) * ncomps); 332204076Spjd } 333204076Spjd hio_send_list_lock = malloc(sizeof(hio_send_list_lock[0]) * ncomps); 334204076Spjd if (hio_send_list_lock == NULL) { 335204076Spjd primary_exitx(EX_TEMPFAIL, 336204076Spjd "Unable to allocate %zu bytes of memory for send list locks.", 337204076Spjd sizeof(hio_send_list_lock[0]) * ncomps); 338204076Spjd } 339204076Spjd hio_send_list_cond = malloc(sizeof(hio_send_list_cond[0]) * ncomps); 340204076Spjd if (hio_send_list_cond == NULL) { 341204076Spjd primary_exitx(EX_TEMPFAIL, 342204076Spjd "Unable to allocate %zu bytes of memory for send list condition variables.", 343204076Spjd sizeof(hio_send_list_cond[0]) * ncomps); 344204076Spjd } 345204076Spjd hio_recv_list = malloc(sizeof(hio_recv_list[0]) * ncomps); 346204076Spjd if (hio_recv_list == NULL) { 347204076Spjd primary_exitx(EX_TEMPFAIL, 348204076Spjd "Unable to allocate %zu bytes of memory for recv lists.", 349204076Spjd sizeof(hio_recv_list[0]) * ncomps); 350204076Spjd } 351204076Spjd hio_recv_list_lock = malloc(sizeof(hio_recv_list_lock[0]) * ncomps); 352204076Spjd if (hio_recv_list_lock == NULL) { 353204076Spjd primary_exitx(EX_TEMPFAIL, 354204076Spjd "Unable to allocate %zu bytes of memory for recv list locks.", 355204076Spjd sizeof(hio_recv_list_lock[0]) * ncomps); 356204076Spjd } 357204076Spjd hio_recv_list_cond = malloc(sizeof(hio_recv_list_cond[0]) * ncomps); 358204076Spjd if (hio_recv_list_cond == NULL) { 359204076Spjd primary_exitx(EX_TEMPFAIL, 360204076Spjd "Unable to allocate %zu bytes of memory for recv list condition variables.", 361204076Spjd sizeof(hio_recv_list_cond[0]) * ncomps); 362204076Spjd } 363204076Spjd hio_remote_lock = malloc(sizeof(hio_remote_lock[0]) * ncomps); 364204076Spjd if (hio_remote_lock == NULL) { 365204076Spjd primary_exitx(EX_TEMPFAIL, 366204076Spjd "Unable to allocate %zu bytes of memory for remote connections locks.", 367204076Spjd sizeof(hio_remote_lock[0]) * ncomps); 368204076Spjd } 369204076Spjd 370204076Spjd /* 371204076Spjd * Initialize lists, their locks and theirs condition variables. 372204076Spjd */ 373204076Spjd TAILQ_INIT(&hio_free_list); 374204076Spjd mtx_init(&hio_free_list_lock); 375204076Spjd cv_init(&hio_free_list_cond); 376204076Spjd for (ii = 0; ii < HAST_NCOMPONENTS; ii++) { 377204076Spjd TAILQ_INIT(&hio_send_list[ii]); 378204076Spjd mtx_init(&hio_send_list_lock[ii]); 379204076Spjd cv_init(&hio_send_list_cond[ii]); 380204076Spjd TAILQ_INIT(&hio_recv_list[ii]); 381204076Spjd mtx_init(&hio_recv_list_lock[ii]); 382204076Spjd cv_init(&hio_recv_list_cond[ii]); 383204076Spjd rw_init(&hio_remote_lock[ii]); 384204076Spjd } 385204076Spjd TAILQ_INIT(&hio_done_list); 386204076Spjd mtx_init(&hio_done_list_lock); 387204076Spjd cv_init(&hio_done_list_cond); 388204076Spjd mtx_init(&metadata_lock); 389204076Spjd 390204076Spjd /* 391204076Spjd * Allocate requests pool and initialize requests. 392204076Spjd */ 393204076Spjd for (ii = 0; ii < HAST_HIO_MAX; ii++) { 394204076Spjd hio = malloc(sizeof(*hio)); 395204076Spjd if (hio == NULL) { 396204076Spjd primary_exitx(EX_TEMPFAIL, 397204076Spjd "Unable to allocate %zu bytes of memory for hio request.", 398204076Spjd sizeof(*hio)); 399204076Spjd } 400204076Spjd hio->hio_countdown = 0; 401204076Spjd hio->hio_errors = malloc(sizeof(hio->hio_errors[0]) * ncomps); 402204076Spjd if (hio->hio_errors == NULL) { 403204076Spjd primary_exitx(EX_TEMPFAIL, 404204076Spjd "Unable allocate %zu bytes of memory for hio errors.", 405204076Spjd sizeof(hio->hio_errors[0]) * ncomps); 406204076Spjd } 407204076Spjd hio->hio_next = malloc(sizeof(hio->hio_next[0]) * ncomps); 408204076Spjd if (hio->hio_next == NULL) { 409204076Spjd primary_exitx(EX_TEMPFAIL, 410204076Spjd "Unable allocate %zu bytes of memory for hio_next field.", 411204076Spjd sizeof(hio->hio_next[0]) * ncomps); 412204076Spjd } 413204076Spjd hio->hio_ggio.gctl_version = G_GATE_VERSION; 414204076Spjd hio->hio_ggio.gctl_data = malloc(MAXPHYS); 415204076Spjd if (hio->hio_ggio.gctl_data == NULL) { 416204076Spjd primary_exitx(EX_TEMPFAIL, 417204076Spjd "Unable to allocate %zu bytes of memory for gctl_data.", 418204076Spjd MAXPHYS); 419204076Spjd } 420204076Spjd hio->hio_ggio.gctl_length = MAXPHYS; 421204076Spjd hio->hio_ggio.gctl_error = 0; 422204076Spjd TAILQ_INSERT_HEAD(&hio_free_list, hio, hio_free_next); 423204076Spjd } 424204076Spjd} 425204076Spjd 426214284Spjdstatic bool 427214284Spjdinit_resuid(struct hast_resource *res) 428214284Spjd{ 429214284Spjd 430214284Spjd mtx_lock(&metadata_lock); 431214284Spjd if (res->hr_resuid != 0) { 432214284Spjd mtx_unlock(&metadata_lock); 433214284Spjd return (false); 434214284Spjd } else { 435214284Spjd /* Initialize unique resource identifier. */ 436214284Spjd arc4random_buf(&res->hr_resuid, sizeof(res->hr_resuid)); 437214284Spjd mtx_unlock(&metadata_lock); 438214284Spjd if (metadata_write(res) < 0) 439214284Spjd exit(EX_NOINPUT); 440214284Spjd return (true); 441214284Spjd } 442214284Spjd} 443214284Spjd 444204076Spjdstatic void 445204076Spjdinit_local(struct hast_resource *res) 446204076Spjd{ 447204076Spjd unsigned char *buf; 448204076Spjd size_t mapsize; 449204076Spjd 450204076Spjd if (metadata_read(res, true) < 0) 451204076Spjd exit(EX_NOINPUT); 452204076Spjd mtx_init(&res->hr_amp_lock); 453204076Spjd if (activemap_init(&res->hr_amp, res->hr_datasize, res->hr_extentsize, 454204076Spjd res->hr_local_sectorsize, res->hr_keepdirty) < 0) { 455204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create activemap"); 456204076Spjd } 457204076Spjd mtx_init(&range_lock); 458204076Spjd cv_init(&range_regular_cond); 459204076Spjd if (rangelock_init(&range_regular) < 0) 460204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create regular range lock"); 461204076Spjd cv_init(&range_sync_cond); 462204076Spjd if (rangelock_init(&range_sync) < 0) 463204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create sync range lock"); 464204076Spjd mapsize = activemap_ondisk_size(res->hr_amp); 465204076Spjd buf = calloc(1, mapsize); 466204076Spjd if (buf == NULL) { 467204076Spjd primary_exitx(EX_TEMPFAIL, 468204076Spjd "Unable to allocate buffer for activemap."); 469204076Spjd } 470204076Spjd if (pread(res->hr_localfd, buf, mapsize, METADATA_SIZE) != 471204076Spjd (ssize_t)mapsize) { 472204076Spjd primary_exit(EX_NOINPUT, "Unable to read activemap"); 473204076Spjd } 474204076Spjd activemap_copyin(res->hr_amp, buf, mapsize); 475209181Spjd free(buf); 476204076Spjd if (res->hr_resuid != 0) 477204076Spjd return; 478204076Spjd /* 479214284Spjd * We're using provider for the first time. Initialize local and remote 480214284Spjd * counters. We don't initialize resuid here, as we want to do it just 481214284Spjd * in time. The reason for this is that we want to inform secondary 482214284Spjd * that there were no writes yet, so there is no need to synchronize 483214284Spjd * anything. 484204076Spjd */ 485204076Spjd res->hr_primary_localcnt = 1; 486204076Spjd res->hr_primary_remotecnt = 0; 487204076Spjd if (metadata_write(res) < 0) 488204076Spjd exit(EX_NOINPUT); 489204076Spjd} 490204076Spjd 491218218Spjdstatic int 492218218Spjdprimary_connect(struct hast_resource *res, struct proto_conn **connp) 493218218Spjd{ 494218218Spjd struct proto_conn *conn; 495218218Spjd int16_t val; 496218218Spjd 497218218Spjd val = 1; 498218218Spjd if (proto_send(res->hr_conn, &val, sizeof(val)) < 0) { 499218218Spjd primary_exit(EX_TEMPFAIL, 500218218Spjd "Unable to send connection request to parent"); 501218218Spjd } 502218218Spjd if (proto_recv(res->hr_conn, &val, sizeof(val)) < 0) { 503218218Spjd primary_exit(EX_TEMPFAIL, 504218218Spjd "Unable to receive reply to connection request from parent"); 505218218Spjd } 506218218Spjd if (val != 0) { 507218218Spjd errno = val; 508218218Spjd pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 509218218Spjd res->hr_remoteaddr); 510218218Spjd return (-1); 511218218Spjd } 512218218Spjd if (proto_connection_recv(res->hr_conn, true, &conn) < 0) { 513218218Spjd primary_exit(EX_TEMPFAIL, 514218218Spjd "Unable to receive connection from parent"); 515218218Spjd } 516218218Spjd if (proto_connect_wait(conn, HAST_TIMEOUT) < 0) { 517218218Spjd pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 518218218Spjd res->hr_remoteaddr); 519218218Spjd proto_close(conn); 520218218Spjd return (-1); 521218218Spjd } 522218218Spjd /* Error in setting timeout is not critical, but why should it fail? */ 523218218Spjd if (proto_timeout(conn, res->hr_timeout) < 0) 524218218Spjd pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 525218218Spjd 526218218Spjd *connp = conn; 527218218Spjd 528218218Spjd return (0); 529218218Spjd} 530218218Spjd 531205738Spjdstatic bool 532205738Spjdinit_remote(struct hast_resource *res, struct proto_conn **inp, 533205738Spjd struct proto_conn **outp) 534204076Spjd{ 535205738Spjd struct proto_conn *in, *out; 536204076Spjd struct nv *nvout, *nvin; 537204076Spjd const unsigned char *token; 538204076Spjd unsigned char *map; 539204076Spjd const char *errmsg; 540204076Spjd int32_t extentsize; 541204076Spjd int64_t datasize; 542204076Spjd uint32_t mapsize; 543204076Spjd size_t size; 544204076Spjd 545218138Spjd PJDLOG_ASSERT((inp == NULL && outp == NULL) || (inp != NULL && outp != NULL)); 546218138Spjd PJDLOG_ASSERT(real_remote(res)); 547205738Spjd 548205738Spjd in = out = NULL; 549211983Spjd errmsg = NULL; 550205738Spjd 551218218Spjd if (primary_connect(res, &out) == -1) 552218218Spjd return (false); 553218218Spjd 554204076Spjd /* 555204076Spjd * First handshake step. 556204076Spjd * Setup outgoing connection with remote node. 557204076Spjd */ 558204076Spjd nvout = nv_alloc(); 559204076Spjd nv_add_string(nvout, res->hr_name, "resource"); 560204076Spjd if (nv_error(nvout) != 0) { 561204076Spjd pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 562204076Spjd "Unable to allocate header for connection with %s", 563204076Spjd res->hr_remoteaddr); 564204076Spjd nv_free(nvout); 565204076Spjd goto close; 566204076Spjd } 567205738Spjd if (hast_proto_send(res, out, nvout, NULL, 0) < 0) { 568204076Spjd pjdlog_errno(LOG_WARNING, 569204076Spjd "Unable to send handshake header to %s", 570204076Spjd res->hr_remoteaddr); 571204076Spjd nv_free(nvout); 572204076Spjd goto close; 573204076Spjd } 574204076Spjd nv_free(nvout); 575205738Spjd if (hast_proto_recv_hdr(out, &nvin) < 0) { 576204076Spjd pjdlog_errno(LOG_WARNING, 577204076Spjd "Unable to receive handshake header from %s", 578204076Spjd res->hr_remoteaddr); 579204076Spjd goto close; 580204076Spjd } 581204076Spjd errmsg = nv_get_string(nvin, "errmsg"); 582204076Spjd if (errmsg != NULL) { 583204076Spjd pjdlog_warning("%s", errmsg); 584204076Spjd nv_free(nvin); 585204076Spjd goto close; 586204076Spjd } 587204076Spjd token = nv_get_uint8_array(nvin, &size, "token"); 588204076Spjd if (token == NULL) { 589204076Spjd pjdlog_warning("Handshake header from %s has no 'token' field.", 590204076Spjd res->hr_remoteaddr); 591204076Spjd nv_free(nvin); 592204076Spjd goto close; 593204076Spjd } 594204076Spjd if (size != sizeof(res->hr_token)) { 595204076Spjd pjdlog_warning("Handshake header from %s contains 'token' of wrong size (got %zu, expected %zu).", 596204076Spjd res->hr_remoteaddr, size, sizeof(res->hr_token)); 597204076Spjd nv_free(nvin); 598204076Spjd goto close; 599204076Spjd } 600204076Spjd bcopy(token, res->hr_token, sizeof(res->hr_token)); 601204076Spjd nv_free(nvin); 602204076Spjd 603204076Spjd /* 604204076Spjd * Second handshake step. 605204076Spjd * Setup incoming connection with remote node. 606204076Spjd */ 607218218Spjd if (primary_connect(res, &in) == -1) 608204076Spjd goto close; 609218218Spjd 610204076Spjd nvout = nv_alloc(); 611204076Spjd nv_add_string(nvout, res->hr_name, "resource"); 612204076Spjd nv_add_uint8_array(nvout, res->hr_token, sizeof(res->hr_token), 613204076Spjd "token"); 614214284Spjd if (res->hr_resuid == 0) { 615214284Spjd /* 616214284Spjd * The resuid field was not yet initialized. 617214284Spjd * Because we do synchronization inside init_resuid(), it is 618214284Spjd * possible that someone already initialized it, the function 619214284Spjd * will return false then, but if we successfully initialized 620214284Spjd * it, we will get true. True means that there were no writes 621214284Spjd * to this resource yet and we want to inform secondary that 622214284Spjd * synchronization is not needed by sending "virgin" argument. 623214284Spjd */ 624214284Spjd if (init_resuid(res)) 625214284Spjd nv_add_int8(nvout, 1, "virgin"); 626214284Spjd } 627204076Spjd nv_add_uint64(nvout, res->hr_resuid, "resuid"); 628204076Spjd nv_add_uint64(nvout, res->hr_primary_localcnt, "localcnt"); 629204076Spjd nv_add_uint64(nvout, res->hr_primary_remotecnt, "remotecnt"); 630204076Spjd if (nv_error(nvout) != 0) { 631204076Spjd pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 632204076Spjd "Unable to allocate header for connection with %s", 633204076Spjd res->hr_remoteaddr); 634204076Spjd nv_free(nvout); 635204076Spjd goto close; 636204076Spjd } 637205738Spjd if (hast_proto_send(res, in, nvout, NULL, 0) < 0) { 638204076Spjd pjdlog_errno(LOG_WARNING, 639204076Spjd "Unable to send handshake header to %s", 640204076Spjd res->hr_remoteaddr); 641204076Spjd nv_free(nvout); 642204076Spjd goto close; 643204076Spjd } 644204076Spjd nv_free(nvout); 645205738Spjd if (hast_proto_recv_hdr(out, &nvin) < 0) { 646204076Spjd pjdlog_errno(LOG_WARNING, 647204076Spjd "Unable to receive handshake header from %s", 648204076Spjd res->hr_remoteaddr); 649204076Spjd goto close; 650204076Spjd } 651204076Spjd errmsg = nv_get_string(nvin, "errmsg"); 652204076Spjd if (errmsg != NULL) { 653204076Spjd pjdlog_warning("%s", errmsg); 654204076Spjd nv_free(nvin); 655204076Spjd goto close; 656204076Spjd } 657204076Spjd datasize = nv_get_int64(nvin, "datasize"); 658204076Spjd if (datasize != res->hr_datasize) { 659204076Spjd pjdlog_warning("Data size differs between nodes (local=%jd, remote=%jd).", 660204076Spjd (intmax_t)res->hr_datasize, (intmax_t)datasize); 661204076Spjd nv_free(nvin); 662204076Spjd goto close; 663204076Spjd } 664204076Spjd extentsize = nv_get_int32(nvin, "extentsize"); 665204076Spjd if (extentsize != res->hr_extentsize) { 666204076Spjd pjdlog_warning("Extent size differs between nodes (local=%zd, remote=%zd).", 667204076Spjd (ssize_t)res->hr_extentsize, (ssize_t)extentsize); 668204076Spjd nv_free(nvin); 669204076Spjd goto close; 670204076Spjd } 671204076Spjd res->hr_secondary_localcnt = nv_get_uint64(nvin, "localcnt"); 672204076Spjd res->hr_secondary_remotecnt = nv_get_uint64(nvin, "remotecnt"); 673204076Spjd res->hr_syncsrc = nv_get_uint8(nvin, "syncsrc"); 674204076Spjd map = NULL; 675204076Spjd mapsize = nv_get_uint32(nvin, "mapsize"); 676204076Spjd if (mapsize > 0) { 677204076Spjd map = malloc(mapsize); 678204076Spjd if (map == NULL) { 679204076Spjd pjdlog_error("Unable to allocate memory for remote activemap (mapsize=%ju).", 680204076Spjd (uintmax_t)mapsize); 681204076Spjd nv_free(nvin); 682204076Spjd goto close; 683204076Spjd } 684204076Spjd /* 685204076Spjd * Remote node have some dirty extents on its own, lets 686204076Spjd * download its activemap. 687204076Spjd */ 688205738Spjd if (hast_proto_recv_data(res, out, nvin, map, 689204076Spjd mapsize) < 0) { 690204076Spjd pjdlog_errno(LOG_ERR, 691204076Spjd "Unable to receive remote activemap"); 692204076Spjd nv_free(nvin); 693204076Spjd free(map); 694204076Spjd goto close; 695204076Spjd } 696204076Spjd /* 697204076Spjd * Merge local and remote bitmaps. 698204076Spjd */ 699204076Spjd activemap_merge(res->hr_amp, map, mapsize); 700204076Spjd free(map); 701204076Spjd /* 702204076Spjd * Now that we merged bitmaps from both nodes, flush it to the 703204076Spjd * disk before we start to synchronize. 704204076Spjd */ 705204076Spjd (void)hast_activemap_flush(res); 706204076Spjd } 707214274Spjd nv_free(nvin); 708204076Spjd pjdlog_info("Connected to %s.", res->hr_remoteaddr); 709205738Spjd if (inp != NULL && outp != NULL) { 710205738Spjd *inp = in; 711205738Spjd *outp = out; 712205738Spjd } else { 713205738Spjd res->hr_remotein = in; 714205738Spjd res->hr_remoteout = out; 715205738Spjd } 716212038Spjd event_send(res, EVENT_CONNECT); 717205738Spjd return (true); 718205738Spjdclose: 719211983Spjd if (errmsg != NULL && strcmp(errmsg, "Split-brain condition!") == 0) 720212038Spjd event_send(res, EVENT_SPLITBRAIN); 721205738Spjd proto_close(out); 722205738Spjd if (in != NULL) 723205738Spjd proto_close(in); 724205738Spjd return (false); 725205738Spjd} 726205738Spjd 727205738Spjdstatic void 728205738Spjdsync_start(void) 729205738Spjd{ 730205738Spjd 731204076Spjd mtx_lock(&sync_lock); 732204076Spjd sync_inprogress = true; 733204076Spjd mtx_unlock(&sync_lock); 734204076Spjd cv_signal(&sync_cond); 735204076Spjd} 736204076Spjd 737204076Spjdstatic void 738211878Spjdsync_stop(void) 739211878Spjd{ 740211878Spjd 741211878Spjd mtx_lock(&sync_lock); 742211878Spjd if (sync_inprogress) 743211878Spjd sync_inprogress = false; 744211878Spjd mtx_unlock(&sync_lock); 745211878Spjd} 746211878Spjd 747211878Spjdstatic void 748204076Spjdinit_ggate(struct hast_resource *res) 749204076Spjd{ 750204076Spjd struct g_gate_ctl_create ggiocreate; 751204076Spjd struct g_gate_ctl_cancel ggiocancel; 752204076Spjd 753204076Spjd /* 754204076Spjd * We communicate with ggate via /dev/ggctl. Open it. 755204076Spjd */ 756204076Spjd res->hr_ggatefd = open("/dev/" G_GATE_CTL_NAME, O_RDWR); 757204076Spjd if (res->hr_ggatefd < 0) 758204076Spjd primary_exit(EX_OSFILE, "Unable to open /dev/" G_GATE_CTL_NAME); 759204076Spjd /* 760204076Spjd * Create provider before trying to connect, as connection failure 761204076Spjd * is not critical, but may take some time. 762204076Spjd */ 763213533Spjd bzero(&ggiocreate, sizeof(ggiocreate)); 764204076Spjd ggiocreate.gctl_version = G_GATE_VERSION; 765204076Spjd ggiocreate.gctl_mediasize = res->hr_datasize; 766204076Spjd ggiocreate.gctl_sectorsize = res->hr_local_sectorsize; 767204076Spjd ggiocreate.gctl_flags = 0; 768206669Spjd ggiocreate.gctl_maxcount = G_GATE_MAX_QUEUE_SIZE; 769204076Spjd ggiocreate.gctl_timeout = 0; 770204076Spjd ggiocreate.gctl_unit = G_GATE_NAME_GIVEN; 771204076Spjd snprintf(ggiocreate.gctl_name, sizeof(ggiocreate.gctl_name), "hast/%s", 772204076Spjd res->hr_provname); 773204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_CREATE, &ggiocreate) == 0) { 774204076Spjd pjdlog_info("Device hast/%s created.", res->hr_provname); 775204076Spjd res->hr_ggateunit = ggiocreate.gctl_unit; 776204076Spjd return; 777204076Spjd } 778204076Spjd if (errno != EEXIST) { 779204076Spjd primary_exit(EX_OSERR, "Unable to create hast/%s device", 780204076Spjd res->hr_provname); 781204076Spjd } 782204076Spjd pjdlog_debug(1, 783204076Spjd "Device hast/%s already exists, we will try to take it over.", 784204076Spjd res->hr_provname); 785204076Spjd /* 786204076Spjd * If we received EEXIST, we assume that the process who created the 787204076Spjd * provider died and didn't clean up. In that case we will start from 788204076Spjd * where he left of. 789204076Spjd */ 790213533Spjd bzero(&ggiocancel, sizeof(ggiocancel)); 791204076Spjd ggiocancel.gctl_version = G_GATE_VERSION; 792204076Spjd ggiocancel.gctl_unit = G_GATE_NAME_GIVEN; 793204076Spjd snprintf(ggiocancel.gctl_name, sizeof(ggiocancel.gctl_name), "hast/%s", 794204076Spjd res->hr_provname); 795204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_CANCEL, &ggiocancel) == 0) { 796204076Spjd pjdlog_info("Device hast/%s recovered.", res->hr_provname); 797204076Spjd res->hr_ggateunit = ggiocancel.gctl_unit; 798204076Spjd return; 799204076Spjd } 800204076Spjd primary_exit(EX_OSERR, "Unable to take over hast/%s device", 801204076Spjd res->hr_provname); 802204076Spjd} 803204076Spjd 804204076Spjdvoid 805204076Spjdhastd_primary(struct hast_resource *res) 806204076Spjd{ 807204076Spjd pthread_t td; 808204076Spjd pid_t pid; 809219482Strociny int error, mode, debuglevel; 810204076Spjd 811204076Spjd /* 812218218Spjd * Create communication channel for sending control commands from 813218218Spjd * parent to child. 814204076Spjd */ 815204076Spjd if (proto_client("socketpair://", &res->hr_ctrl) < 0) { 816218042Spjd /* TODO: There's no need for this to be fatal error. */ 817204076Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 818212034Spjd pjdlog_exit(EX_OSERR, 819204076Spjd "Unable to create control sockets between parent and child"); 820204076Spjd } 821212038Spjd /* 822218218Spjd * Create communication channel for sending events from child to parent. 823212038Spjd */ 824212038Spjd if (proto_client("socketpair://", &res->hr_event) < 0) { 825218042Spjd /* TODO: There's no need for this to be fatal error. */ 826212038Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 827212038Spjd pjdlog_exit(EX_OSERR, 828212038Spjd "Unable to create event sockets between child and parent"); 829212038Spjd } 830218218Spjd /* 831218218Spjd * Create communication channel for sending connection requests from 832218218Spjd * child to parent. 833218218Spjd */ 834218218Spjd if (proto_client("socketpair://", &res->hr_conn) < 0) { 835218218Spjd /* TODO: There's no need for this to be fatal error. */ 836218218Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 837218218Spjd pjdlog_exit(EX_OSERR, 838218218Spjd "Unable to create connection sockets between child and parent"); 839218218Spjd } 840204076Spjd 841204076Spjd pid = fork(); 842204076Spjd if (pid < 0) { 843218042Spjd /* TODO: There's no need for this to be fatal error. */ 844204076Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 845212034Spjd pjdlog_exit(EX_TEMPFAIL, "Unable to fork"); 846204076Spjd } 847204076Spjd 848204076Spjd if (pid > 0) { 849204076Spjd /* This is parent. */ 850212038Spjd /* Declare that we are receiver. */ 851212038Spjd proto_recv(res->hr_event, NULL, 0); 852218218Spjd proto_recv(res->hr_conn, NULL, 0); 853218043Spjd /* Declare that we are sender. */ 854218043Spjd proto_send(res->hr_ctrl, NULL, 0); 855204076Spjd res->hr_workerpid = pid; 856204076Spjd return; 857204076Spjd } 858211977Spjd 859211984Spjd gres = res; 860218043Spjd mode = pjdlog_mode_get(); 861219482Strociny debuglevel = pjdlog_debug_get(); 862211984Spjd 863218043Spjd /* Declare that we are sender. */ 864218043Spjd proto_send(res->hr_event, NULL, 0); 865218218Spjd proto_send(res->hr_conn, NULL, 0); 866218043Spjd /* Declare that we are receiver. */ 867218043Spjd proto_recv(res->hr_ctrl, NULL, 0); 868218043Spjd descriptors_cleanup(res); 869204076Spjd 870218045Spjd descriptors_assert(res, mode); 871218045Spjd 872218043Spjd pjdlog_init(mode); 873219482Strociny pjdlog_debug_set(debuglevel); 874218043Spjd pjdlog_prefix_set("[%s] (%s) ", res->hr_name, role2str(res->hr_role)); 875204076Spjd setproctitle("%s (primary)", res->hr_name); 876204076Spjd 877204076Spjd init_local(res); 878213007Spjd init_ggate(res); 879213007Spjd init_environment(res); 880217784Spjd 881218049Spjd if (drop_privs() != 0) { 882218049Spjd cleanup(res); 883218049Spjd exit(EX_CONFIG); 884218049Spjd } 885218214Spjd pjdlog_info("Privileges successfully dropped."); 886218049Spjd 887213007Spjd /* 888213530Spjd * Create the guard thread first, so we can handle signals from the 889213530Spjd * very begining. 890213530Spjd */ 891213530Spjd error = pthread_create(&td, NULL, guard_thread, res); 892218138Spjd PJDLOG_ASSERT(error == 0); 893213530Spjd /* 894213007Spjd * Create the control thread before sending any event to the parent, 895213007Spjd * as we can deadlock when parent sends control request to worker, 896213007Spjd * but worker has no control thread started yet, so parent waits. 897213007Spjd * In the meantime worker sends an event to the parent, but parent 898213007Spjd * is unable to handle the event, because it waits for control 899213007Spjd * request response. 900213007Spjd */ 901213007Spjd error = pthread_create(&td, NULL, ctrl_thread, res); 902218138Spjd PJDLOG_ASSERT(error == 0); 903210881Spjd if (real_remote(res) && init_remote(res, NULL, NULL)) 904205738Spjd sync_start(); 905204076Spjd error = pthread_create(&td, NULL, ggate_recv_thread, res); 906218138Spjd PJDLOG_ASSERT(error == 0); 907204076Spjd error = pthread_create(&td, NULL, local_send_thread, res); 908218138Spjd PJDLOG_ASSERT(error == 0); 909204076Spjd error = pthread_create(&td, NULL, remote_send_thread, res); 910218138Spjd PJDLOG_ASSERT(error == 0); 911204076Spjd error = pthread_create(&td, NULL, remote_recv_thread, res); 912218138Spjd PJDLOG_ASSERT(error == 0); 913204076Spjd error = pthread_create(&td, NULL, ggate_send_thread, res); 914218138Spjd PJDLOG_ASSERT(error == 0); 915213530Spjd (void)sync_thread(res); 916204076Spjd} 917204076Spjd 918204076Spjdstatic void 919204076Spjdreqlog(int loglevel, int debuglevel, struct g_gate_ctl_io *ggio, const char *fmt, ...) 920204076Spjd{ 921204076Spjd char msg[1024]; 922204076Spjd va_list ap; 923204076Spjd int len; 924204076Spjd 925204076Spjd va_start(ap, fmt); 926204076Spjd len = vsnprintf(msg, sizeof(msg), fmt, ap); 927204076Spjd va_end(ap); 928204076Spjd if ((size_t)len < sizeof(msg)) { 929204076Spjd switch (ggio->gctl_cmd) { 930204076Spjd case BIO_READ: 931204076Spjd (void)snprintf(msg + len, sizeof(msg) - len, 932204076Spjd "READ(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 933204076Spjd (uintmax_t)ggio->gctl_length); 934204076Spjd break; 935204076Spjd case BIO_DELETE: 936204076Spjd (void)snprintf(msg + len, sizeof(msg) - len, 937204076Spjd "DELETE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 938204076Spjd (uintmax_t)ggio->gctl_length); 939204076Spjd break; 940204076Spjd case BIO_FLUSH: 941204076Spjd (void)snprintf(msg + len, sizeof(msg) - len, "FLUSH."); 942204076Spjd break; 943204076Spjd case BIO_WRITE: 944204076Spjd (void)snprintf(msg + len, sizeof(msg) - len, 945204076Spjd "WRITE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 946204076Spjd (uintmax_t)ggio->gctl_length); 947204076Spjd break; 948204076Spjd default: 949204076Spjd (void)snprintf(msg + len, sizeof(msg) - len, 950204076Spjd "UNKNOWN(%u).", (unsigned int)ggio->gctl_cmd); 951204076Spjd break; 952204076Spjd } 953204076Spjd } 954204076Spjd pjdlog_common(loglevel, debuglevel, -1, "%s", msg); 955204076Spjd} 956204076Spjd 957204076Spjdstatic void 958204076Spjdremote_close(struct hast_resource *res, int ncomp) 959204076Spjd{ 960204076Spjd 961204076Spjd rw_wlock(&hio_remote_lock[ncomp]); 962204076Spjd /* 963204076Spjd * A race is possible between dropping rlock and acquiring wlock - 964204076Spjd * another thread can close connection in-between. 965204076Spjd */ 966204076Spjd if (!ISCONNECTED(res, ncomp)) { 967218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 968218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 969204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 970204076Spjd return; 971204076Spjd } 972204076Spjd 973218138Spjd PJDLOG_ASSERT(res->hr_remotein != NULL); 974218138Spjd PJDLOG_ASSERT(res->hr_remoteout != NULL); 975204076Spjd 976211881Spjd pjdlog_debug(2, "Closing incoming connection to %s.", 977204076Spjd res->hr_remoteaddr); 978204076Spjd proto_close(res->hr_remotein); 979204076Spjd res->hr_remotein = NULL; 980211881Spjd pjdlog_debug(2, "Closing outgoing connection to %s.", 981204076Spjd res->hr_remoteaddr); 982204076Spjd proto_close(res->hr_remoteout); 983204076Spjd res->hr_remoteout = NULL; 984204076Spjd 985204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 986204076Spjd 987211881Spjd pjdlog_warning("Disconnected from %s.", res->hr_remoteaddr); 988211881Spjd 989204076Spjd /* 990204076Spjd * Stop synchronization if in-progress. 991204076Spjd */ 992211878Spjd sync_stop(); 993211984Spjd 994212038Spjd event_send(res, EVENT_DISCONNECT); 995204076Spjd} 996204076Spjd 997204076Spjd/* 998204076Spjd * Thread receives ggate I/O requests from the kernel and passes them to 999204076Spjd * appropriate threads: 1000204076Spjd * WRITE - always goes to both local_send and remote_send threads 1001204076Spjd * READ (when the block is up-to-date on local component) - 1002204076Spjd * only local_send thread 1003204076Spjd * READ (when the block isn't up-to-date on local component) - 1004204076Spjd * only remote_send thread 1005204076Spjd * DELETE - always goes to both local_send and remote_send threads 1006204076Spjd * FLUSH - always goes to both local_send and remote_send threads 1007204076Spjd */ 1008204076Spjdstatic void * 1009204076Spjdggate_recv_thread(void *arg) 1010204076Spjd{ 1011204076Spjd struct hast_resource *res = arg; 1012204076Spjd struct g_gate_ctl_io *ggio; 1013204076Spjd struct hio *hio; 1014204076Spjd unsigned int ii, ncomp, ncomps; 1015204076Spjd int error; 1016204076Spjd 1017204076Spjd ncomps = HAST_NCOMPONENTS; 1018204076Spjd 1019204076Spjd for (;;) { 1020204076Spjd pjdlog_debug(2, "ggate_recv: Taking free request."); 1021204076Spjd QUEUE_TAKE2(hio, free); 1022204076Spjd pjdlog_debug(2, "ggate_recv: (%p) Got free request.", hio); 1023204076Spjd ggio = &hio->hio_ggio; 1024204076Spjd ggio->gctl_unit = res->hr_ggateunit; 1025204076Spjd ggio->gctl_length = MAXPHYS; 1026204076Spjd ggio->gctl_error = 0; 1027204076Spjd pjdlog_debug(2, 1028204076Spjd "ggate_recv: (%p) Waiting for request from the kernel.", 1029204076Spjd hio); 1030204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_START, ggio) < 0) { 1031204076Spjd if (sigexit_received) 1032204076Spjd pthread_exit(NULL); 1033204076Spjd primary_exit(EX_OSERR, "G_GATE_CMD_START failed"); 1034204076Spjd } 1035204076Spjd error = ggio->gctl_error; 1036204076Spjd switch (error) { 1037204076Spjd case 0: 1038204076Spjd break; 1039204076Spjd case ECANCELED: 1040204076Spjd /* Exit gracefully. */ 1041204076Spjd if (!sigexit_received) { 1042204076Spjd pjdlog_debug(2, 1043204076Spjd "ggate_recv: (%p) Received cancel from the kernel.", 1044204076Spjd hio); 1045204076Spjd pjdlog_info("Received cancel from the kernel, exiting."); 1046204076Spjd } 1047204076Spjd pthread_exit(NULL); 1048204076Spjd case ENOMEM: 1049204076Spjd /* 1050204076Spjd * Buffer too small? Impossible, we allocate MAXPHYS 1051204076Spjd * bytes - request can't be bigger than that. 1052204076Spjd */ 1053204076Spjd /* FALLTHROUGH */ 1054204076Spjd case ENXIO: 1055204076Spjd default: 1056204076Spjd primary_exitx(EX_OSERR, "G_GATE_CMD_START failed: %s.", 1057204076Spjd strerror(error)); 1058204076Spjd } 1059204076Spjd for (ii = 0; ii < ncomps; ii++) 1060204076Spjd hio->hio_errors[ii] = EINVAL; 1061204076Spjd reqlog(LOG_DEBUG, 2, ggio, 1062204076Spjd "ggate_recv: (%p) Request received from the kernel: ", 1063204076Spjd hio); 1064204076Spjd /* 1065204076Spjd * Inform all components about new write request. 1066204076Spjd * For read request prefer local component unless the given 1067204076Spjd * range is out-of-date, then use remote component. 1068204076Spjd */ 1069204076Spjd switch (ggio->gctl_cmd) { 1070204076Spjd case BIO_READ: 1071204076Spjd pjdlog_debug(2, 1072204076Spjd "ggate_recv: (%p) Moving request to the send queue.", 1073204076Spjd hio); 1074204076Spjd refcount_init(&hio->hio_countdown, 1); 1075204076Spjd mtx_lock(&metadata_lock); 1076204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_UNDEF || 1077204076Spjd res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1078204076Spjd /* 1079204076Spjd * This range is up-to-date on local component, 1080204076Spjd * so handle request locally. 1081204076Spjd */ 1082204076Spjd /* Local component is 0 for now. */ 1083204076Spjd ncomp = 0; 1084204076Spjd } else /* if (res->hr_syncsrc == 1085204076Spjd HAST_SYNCSRC_SECONDARY) */ { 1086218138Spjd PJDLOG_ASSERT(res->hr_syncsrc == 1087204076Spjd HAST_SYNCSRC_SECONDARY); 1088204076Spjd /* 1089204076Spjd * This range is out-of-date on local component, 1090204076Spjd * so send request to the remote node. 1091204076Spjd */ 1092204076Spjd /* Remote component is 1 for now. */ 1093204076Spjd ncomp = 1; 1094204076Spjd } 1095204076Spjd mtx_unlock(&metadata_lock); 1096204076Spjd QUEUE_INSERT1(hio, send, ncomp); 1097204076Spjd break; 1098204076Spjd case BIO_WRITE: 1099214284Spjd if (res->hr_resuid == 0) { 1100214284Spjd /* This is first write, initialize resuid. */ 1101214284Spjd (void)init_resuid(res); 1102214284Spjd } 1103204076Spjd for (;;) { 1104204076Spjd mtx_lock(&range_lock); 1105204076Spjd if (rangelock_islocked(range_sync, 1106204076Spjd ggio->gctl_offset, ggio->gctl_length)) { 1107204076Spjd pjdlog_debug(2, 1108204076Spjd "regular: Range offset=%jd length=%zu locked.", 1109204076Spjd (intmax_t)ggio->gctl_offset, 1110204076Spjd (size_t)ggio->gctl_length); 1111204076Spjd range_regular_wait = true; 1112204076Spjd cv_wait(&range_regular_cond, &range_lock); 1113204076Spjd range_regular_wait = false; 1114204076Spjd mtx_unlock(&range_lock); 1115204076Spjd continue; 1116204076Spjd } 1117204076Spjd if (rangelock_add(range_regular, 1118204076Spjd ggio->gctl_offset, ggio->gctl_length) < 0) { 1119204076Spjd mtx_unlock(&range_lock); 1120204076Spjd pjdlog_debug(2, 1121204076Spjd "regular: Range offset=%jd length=%zu is already locked, waiting.", 1122204076Spjd (intmax_t)ggio->gctl_offset, 1123204076Spjd (size_t)ggio->gctl_length); 1124204076Spjd sleep(1); 1125204076Spjd continue; 1126204076Spjd } 1127204076Spjd mtx_unlock(&range_lock); 1128204076Spjd break; 1129204076Spjd } 1130204076Spjd mtx_lock(&res->hr_amp_lock); 1131204076Spjd if (activemap_write_start(res->hr_amp, 1132204076Spjd ggio->gctl_offset, ggio->gctl_length)) { 1133204076Spjd (void)hast_activemap_flush(res); 1134204076Spjd } 1135204076Spjd mtx_unlock(&res->hr_amp_lock); 1136204076Spjd /* FALLTHROUGH */ 1137204076Spjd case BIO_DELETE: 1138204076Spjd case BIO_FLUSH: 1139204076Spjd pjdlog_debug(2, 1140204076Spjd "ggate_recv: (%p) Moving request to the send queues.", 1141204076Spjd hio); 1142204076Spjd refcount_init(&hio->hio_countdown, ncomps); 1143204076Spjd for (ii = 0; ii < ncomps; ii++) 1144204076Spjd QUEUE_INSERT1(hio, send, ii); 1145204076Spjd break; 1146204076Spjd } 1147204076Spjd } 1148204076Spjd /* NOTREACHED */ 1149204076Spjd return (NULL); 1150204076Spjd} 1151204076Spjd 1152204076Spjd/* 1153204076Spjd * Thread reads from or writes to local component. 1154204076Spjd * If local read fails, it redirects it to remote_send thread. 1155204076Spjd */ 1156204076Spjdstatic void * 1157204076Spjdlocal_send_thread(void *arg) 1158204076Spjd{ 1159204076Spjd struct hast_resource *res = arg; 1160204076Spjd struct g_gate_ctl_io *ggio; 1161204076Spjd struct hio *hio; 1162204076Spjd unsigned int ncomp, rncomp; 1163204076Spjd ssize_t ret; 1164204076Spjd 1165204076Spjd /* Local component is 0 for now. */ 1166204076Spjd ncomp = 0; 1167204076Spjd /* Remote component is 1 for now. */ 1168204076Spjd rncomp = 1; 1169204076Spjd 1170204076Spjd for (;;) { 1171204076Spjd pjdlog_debug(2, "local_send: Taking request."); 1172214692Spjd QUEUE_TAKE1(hio, send, ncomp, 0); 1173204076Spjd pjdlog_debug(2, "local_send: (%p) Got request.", hio); 1174204076Spjd ggio = &hio->hio_ggio; 1175204076Spjd switch (ggio->gctl_cmd) { 1176204076Spjd case BIO_READ: 1177204076Spjd ret = pread(res->hr_localfd, ggio->gctl_data, 1178204076Spjd ggio->gctl_length, 1179204076Spjd ggio->gctl_offset + res->hr_localoff); 1180204076Spjd if (ret == ggio->gctl_length) 1181204076Spjd hio->hio_errors[ncomp] = 0; 1182204076Spjd else { 1183204076Spjd /* 1184204076Spjd * If READ failed, try to read from remote node. 1185204076Spjd */ 1186216479Spjd if (ret < 0) { 1187216479Spjd reqlog(LOG_WARNING, 0, ggio, 1188216479Spjd "Local request failed (%s), trying remote node. ", 1189216479Spjd strerror(errno)); 1190216479Spjd } else if (ret != ggio->gctl_length) { 1191216479Spjd reqlog(LOG_WARNING, 0, ggio, 1192216479Spjd "Local request failed (%zd != %jd), trying remote node. ", 1193216494Spjd ret, (intmax_t)ggio->gctl_length); 1194216479Spjd } 1195204076Spjd QUEUE_INSERT1(hio, send, rncomp); 1196204076Spjd continue; 1197204076Spjd } 1198204076Spjd break; 1199204076Spjd case BIO_WRITE: 1200204076Spjd ret = pwrite(res->hr_localfd, ggio->gctl_data, 1201204076Spjd ggio->gctl_length, 1202204076Spjd ggio->gctl_offset + res->hr_localoff); 1203216479Spjd if (ret < 0) { 1204204076Spjd hio->hio_errors[ncomp] = errno; 1205216479Spjd reqlog(LOG_WARNING, 0, ggio, 1206216479Spjd "Local request failed (%s): ", 1207216479Spjd strerror(errno)); 1208216479Spjd } else if (ret != ggio->gctl_length) { 1209204076Spjd hio->hio_errors[ncomp] = EIO; 1210216479Spjd reqlog(LOG_WARNING, 0, ggio, 1211216479Spjd "Local request failed (%zd != %jd): ", 1212216494Spjd ret, (intmax_t)ggio->gctl_length); 1213216479Spjd } else { 1214204076Spjd hio->hio_errors[ncomp] = 0; 1215216479Spjd } 1216204076Spjd break; 1217204076Spjd case BIO_DELETE: 1218204076Spjd ret = g_delete(res->hr_localfd, 1219204076Spjd ggio->gctl_offset + res->hr_localoff, 1220204076Spjd ggio->gctl_length); 1221216479Spjd if (ret < 0) { 1222204076Spjd hio->hio_errors[ncomp] = errno; 1223216479Spjd reqlog(LOG_WARNING, 0, ggio, 1224216479Spjd "Local request failed (%s): ", 1225216479Spjd strerror(errno)); 1226216479Spjd } else { 1227204076Spjd hio->hio_errors[ncomp] = 0; 1228216479Spjd } 1229204076Spjd break; 1230204076Spjd case BIO_FLUSH: 1231204076Spjd ret = g_flush(res->hr_localfd); 1232216479Spjd if (ret < 0) { 1233204076Spjd hio->hio_errors[ncomp] = errno; 1234216479Spjd reqlog(LOG_WARNING, 0, ggio, 1235216479Spjd "Local request failed (%s): ", 1236216479Spjd strerror(errno)); 1237216479Spjd } else { 1238204076Spjd hio->hio_errors[ncomp] = 0; 1239216479Spjd } 1240204076Spjd break; 1241204076Spjd } 1242204076Spjd if (refcount_release(&hio->hio_countdown)) { 1243204076Spjd if (ISSYNCREQ(hio)) { 1244204076Spjd mtx_lock(&sync_lock); 1245204076Spjd SYNCREQDONE(hio); 1246204076Spjd mtx_unlock(&sync_lock); 1247204076Spjd cv_signal(&sync_cond); 1248204076Spjd } else { 1249204076Spjd pjdlog_debug(2, 1250204076Spjd "local_send: (%p) Moving request to the done queue.", 1251204076Spjd hio); 1252204076Spjd QUEUE_INSERT2(hio, done); 1253204076Spjd } 1254204076Spjd } 1255204076Spjd } 1256204076Spjd /* NOTREACHED */ 1257204076Spjd return (NULL); 1258204076Spjd} 1259204076Spjd 1260214692Spjdstatic void 1261214692Spjdkeepalive_send(struct hast_resource *res, unsigned int ncomp) 1262214692Spjd{ 1263214692Spjd struct nv *nv; 1264214692Spjd 1265218217Spjd rw_rlock(&hio_remote_lock[ncomp]); 1266218217Spjd 1267218217Spjd if (!ISCONNECTED(res, ncomp)) { 1268218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1269214692Spjd return; 1270218217Spjd } 1271214692Spjd 1272218138Spjd PJDLOG_ASSERT(res->hr_remotein != NULL); 1273218138Spjd PJDLOG_ASSERT(res->hr_remoteout != NULL); 1274214692Spjd 1275214692Spjd nv = nv_alloc(); 1276214692Spjd nv_add_uint8(nv, HIO_KEEPALIVE, "cmd"); 1277214692Spjd if (nv_error(nv) != 0) { 1278218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1279214692Spjd nv_free(nv); 1280214692Spjd pjdlog_debug(1, 1281214692Spjd "keepalive_send: Unable to prepare header to send."); 1282214692Spjd return; 1283214692Spjd } 1284214692Spjd if (hast_proto_send(res, res->hr_remoteout, nv, NULL, 0) < 0) { 1285218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1286214692Spjd pjdlog_common(LOG_DEBUG, 1, errno, 1287214692Spjd "keepalive_send: Unable to send request"); 1288214692Spjd nv_free(nv); 1289214692Spjd remote_close(res, ncomp); 1290214692Spjd return; 1291214692Spjd } 1292218217Spjd 1293218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1294214692Spjd nv_free(nv); 1295214692Spjd pjdlog_debug(2, "keepalive_send: Request sent."); 1296214692Spjd} 1297214692Spjd 1298204076Spjd/* 1299204076Spjd * Thread sends request to secondary node. 1300204076Spjd */ 1301204076Spjdstatic void * 1302204076Spjdremote_send_thread(void *arg) 1303204076Spjd{ 1304204076Spjd struct hast_resource *res = arg; 1305204076Spjd struct g_gate_ctl_io *ggio; 1306214692Spjd time_t lastcheck, now; 1307204076Spjd struct hio *hio; 1308204076Spjd struct nv *nv; 1309204076Spjd unsigned int ncomp; 1310204076Spjd bool wakeup; 1311204076Spjd uint64_t offset, length; 1312204076Spjd uint8_t cmd; 1313204076Spjd void *data; 1314204076Spjd 1315204076Spjd /* Remote component is 1 for now. */ 1316204076Spjd ncomp = 1; 1317214692Spjd lastcheck = time(NULL); 1318204076Spjd 1319204076Spjd for (;;) { 1320204076Spjd pjdlog_debug(2, "remote_send: Taking request."); 1321214692Spjd QUEUE_TAKE1(hio, send, ncomp, RETRY_SLEEP); 1322214692Spjd if (hio == NULL) { 1323214692Spjd now = time(NULL); 1324214692Spjd if (lastcheck + RETRY_SLEEP <= now) { 1325214692Spjd keepalive_send(res, ncomp); 1326214692Spjd lastcheck = now; 1327214692Spjd } 1328214692Spjd continue; 1329214692Spjd } 1330204076Spjd pjdlog_debug(2, "remote_send: (%p) Got request.", hio); 1331204076Spjd ggio = &hio->hio_ggio; 1332204076Spjd switch (ggio->gctl_cmd) { 1333204076Spjd case BIO_READ: 1334204076Spjd cmd = HIO_READ; 1335204076Spjd data = NULL; 1336204076Spjd offset = ggio->gctl_offset; 1337204076Spjd length = ggio->gctl_length; 1338204076Spjd break; 1339204076Spjd case BIO_WRITE: 1340204076Spjd cmd = HIO_WRITE; 1341204076Spjd data = ggio->gctl_data; 1342204076Spjd offset = ggio->gctl_offset; 1343204076Spjd length = ggio->gctl_length; 1344204076Spjd break; 1345204076Spjd case BIO_DELETE: 1346204076Spjd cmd = HIO_DELETE; 1347204076Spjd data = NULL; 1348204076Spjd offset = ggio->gctl_offset; 1349204076Spjd length = ggio->gctl_length; 1350204076Spjd break; 1351204076Spjd case BIO_FLUSH: 1352204076Spjd cmd = HIO_FLUSH; 1353204076Spjd data = NULL; 1354204076Spjd offset = 0; 1355204076Spjd length = 0; 1356204076Spjd break; 1357204076Spjd default: 1358218138Spjd PJDLOG_ASSERT(!"invalid condition"); 1359204076Spjd abort(); 1360204076Spjd } 1361204076Spjd nv = nv_alloc(); 1362204076Spjd nv_add_uint8(nv, cmd, "cmd"); 1363204076Spjd nv_add_uint64(nv, (uint64_t)ggio->gctl_seq, "seq"); 1364204076Spjd nv_add_uint64(nv, offset, "offset"); 1365204076Spjd nv_add_uint64(nv, length, "length"); 1366204076Spjd if (nv_error(nv) != 0) { 1367204076Spjd hio->hio_errors[ncomp] = nv_error(nv); 1368204076Spjd pjdlog_debug(2, 1369204076Spjd "remote_send: (%p) Unable to prepare header to send.", 1370204076Spjd hio); 1371204076Spjd reqlog(LOG_ERR, 0, ggio, 1372204076Spjd "Unable to prepare header to send (%s): ", 1373204076Spjd strerror(nv_error(nv))); 1374204076Spjd /* Move failed request immediately to the done queue. */ 1375204076Spjd goto done_queue; 1376204076Spjd } 1377204076Spjd pjdlog_debug(2, 1378204076Spjd "remote_send: (%p) Moving request to the recv queue.", 1379204076Spjd hio); 1380204076Spjd /* 1381204076Spjd * Protect connection from disappearing. 1382204076Spjd */ 1383204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1384204076Spjd if (!ISCONNECTED(res, ncomp)) { 1385204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1386204076Spjd hio->hio_errors[ncomp] = ENOTCONN; 1387204076Spjd goto done_queue; 1388204076Spjd } 1389204076Spjd /* 1390204076Spjd * Move the request to recv queue before sending it, because 1391204076Spjd * in different order we can get reply before we move request 1392204076Spjd * to recv queue. 1393204076Spjd */ 1394204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1395204076Spjd wakeup = TAILQ_EMPTY(&hio_recv_list[ncomp]); 1396204076Spjd TAILQ_INSERT_TAIL(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1397204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1398204076Spjd if (hast_proto_send(res, res->hr_remoteout, nv, data, 1399204076Spjd data != NULL ? length : 0) < 0) { 1400204076Spjd hio->hio_errors[ncomp] = errno; 1401204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1402204076Spjd pjdlog_debug(2, 1403204076Spjd "remote_send: (%p) Unable to send request.", hio); 1404204076Spjd reqlog(LOG_ERR, 0, ggio, 1405204076Spjd "Unable to send request (%s): ", 1406204076Spjd strerror(hio->hio_errors[ncomp])); 1407211979Spjd remote_close(res, ncomp); 1408204076Spjd /* 1409204076Spjd * Take request back from the receive queue and move 1410204076Spjd * it immediately to the done queue. 1411204076Spjd */ 1412204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1413204076Spjd TAILQ_REMOVE(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1414204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1415204076Spjd goto done_queue; 1416204076Spjd } 1417204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1418204076Spjd nv_free(nv); 1419204076Spjd if (wakeup) 1420204076Spjd cv_signal(&hio_recv_list_cond[ncomp]); 1421204076Spjd continue; 1422204076Spjddone_queue: 1423204076Spjd nv_free(nv); 1424204076Spjd if (ISSYNCREQ(hio)) { 1425204076Spjd if (!refcount_release(&hio->hio_countdown)) 1426204076Spjd continue; 1427204076Spjd mtx_lock(&sync_lock); 1428204076Spjd SYNCREQDONE(hio); 1429204076Spjd mtx_unlock(&sync_lock); 1430204076Spjd cv_signal(&sync_cond); 1431204076Spjd continue; 1432204076Spjd } 1433204076Spjd if (ggio->gctl_cmd == BIO_WRITE) { 1434204076Spjd mtx_lock(&res->hr_amp_lock); 1435204076Spjd if (activemap_need_sync(res->hr_amp, ggio->gctl_offset, 1436204076Spjd ggio->gctl_length)) { 1437204076Spjd (void)hast_activemap_flush(res); 1438204076Spjd } 1439204076Spjd mtx_unlock(&res->hr_amp_lock); 1440204076Spjd } 1441204076Spjd if (!refcount_release(&hio->hio_countdown)) 1442204076Spjd continue; 1443204076Spjd pjdlog_debug(2, 1444204076Spjd "remote_send: (%p) Moving request to the done queue.", 1445204076Spjd hio); 1446204076Spjd QUEUE_INSERT2(hio, done); 1447204076Spjd } 1448204076Spjd /* NOTREACHED */ 1449204076Spjd return (NULL); 1450204076Spjd} 1451204076Spjd 1452204076Spjd/* 1453204076Spjd * Thread receives answer from secondary node and passes it to ggate_send 1454204076Spjd * thread. 1455204076Spjd */ 1456204076Spjdstatic void * 1457204076Spjdremote_recv_thread(void *arg) 1458204076Spjd{ 1459204076Spjd struct hast_resource *res = arg; 1460204076Spjd struct g_gate_ctl_io *ggio; 1461204076Spjd struct hio *hio; 1462204076Spjd struct nv *nv; 1463204076Spjd unsigned int ncomp; 1464204076Spjd uint64_t seq; 1465204076Spjd int error; 1466204076Spjd 1467204076Spjd /* Remote component is 1 for now. */ 1468204076Spjd ncomp = 1; 1469204076Spjd 1470204076Spjd for (;;) { 1471204076Spjd /* Wait until there is anything to receive. */ 1472204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1473204076Spjd while (TAILQ_EMPTY(&hio_recv_list[ncomp])) { 1474204076Spjd pjdlog_debug(2, "remote_recv: No requests, waiting."); 1475204076Spjd cv_wait(&hio_recv_list_cond[ncomp], 1476204076Spjd &hio_recv_list_lock[ncomp]); 1477204076Spjd } 1478204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1479204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1480204076Spjd if (!ISCONNECTED(res, ncomp)) { 1481204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1482204076Spjd /* 1483204076Spjd * Connection is dead, so move all pending requests to 1484204076Spjd * the done queue (one-by-one). 1485204076Spjd */ 1486204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1487204076Spjd hio = TAILQ_FIRST(&hio_recv_list[ncomp]); 1488218138Spjd PJDLOG_ASSERT(hio != NULL); 1489204076Spjd TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1490204076Spjd hio_next[ncomp]); 1491204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1492204076Spjd goto done_queue; 1493204076Spjd } 1494204076Spjd if (hast_proto_recv_hdr(res->hr_remotein, &nv) < 0) { 1495204076Spjd pjdlog_errno(LOG_ERR, 1496204076Spjd "Unable to receive reply header"); 1497204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1498204076Spjd remote_close(res, ncomp); 1499204076Spjd continue; 1500204076Spjd } 1501204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1502204076Spjd seq = nv_get_uint64(nv, "seq"); 1503204076Spjd if (seq == 0) { 1504204076Spjd pjdlog_error("Header contains no 'seq' field."); 1505204076Spjd nv_free(nv); 1506204076Spjd continue; 1507204076Spjd } 1508204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1509204076Spjd TAILQ_FOREACH(hio, &hio_recv_list[ncomp], hio_next[ncomp]) { 1510204076Spjd if (hio->hio_ggio.gctl_seq == seq) { 1511204076Spjd TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1512204076Spjd hio_next[ncomp]); 1513204076Spjd break; 1514204076Spjd } 1515204076Spjd } 1516204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1517204076Spjd if (hio == NULL) { 1518204076Spjd pjdlog_error("Found no request matching received 'seq' field (%ju).", 1519204076Spjd (uintmax_t)seq); 1520204076Spjd nv_free(nv); 1521204076Spjd continue; 1522204076Spjd } 1523204076Spjd error = nv_get_int16(nv, "error"); 1524204076Spjd if (error != 0) { 1525204076Spjd /* Request failed on remote side. */ 1526216478Spjd hio->hio_errors[ncomp] = error; 1527216479Spjd reqlog(LOG_WARNING, 0, &hio->hio_ggio, 1528216479Spjd "Remote request failed (%s): ", strerror(error)); 1529204076Spjd nv_free(nv); 1530204076Spjd goto done_queue; 1531204076Spjd } 1532204076Spjd ggio = &hio->hio_ggio; 1533204076Spjd switch (ggio->gctl_cmd) { 1534204076Spjd case BIO_READ: 1535204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1536204076Spjd if (!ISCONNECTED(res, ncomp)) { 1537204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1538204076Spjd nv_free(nv); 1539204076Spjd goto done_queue; 1540204076Spjd } 1541204076Spjd if (hast_proto_recv_data(res, res->hr_remotein, nv, 1542204076Spjd ggio->gctl_data, ggio->gctl_length) < 0) { 1543204076Spjd hio->hio_errors[ncomp] = errno; 1544204076Spjd pjdlog_errno(LOG_ERR, 1545204076Spjd "Unable to receive reply data"); 1546204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1547204076Spjd nv_free(nv); 1548204076Spjd remote_close(res, ncomp); 1549204076Spjd goto done_queue; 1550204076Spjd } 1551204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1552204076Spjd break; 1553204076Spjd case BIO_WRITE: 1554204076Spjd case BIO_DELETE: 1555204076Spjd case BIO_FLUSH: 1556204076Spjd break; 1557204076Spjd default: 1558218138Spjd PJDLOG_ASSERT(!"invalid condition"); 1559204076Spjd abort(); 1560204076Spjd } 1561204076Spjd hio->hio_errors[ncomp] = 0; 1562204076Spjd nv_free(nv); 1563204076Spjddone_queue: 1564204076Spjd if (refcount_release(&hio->hio_countdown)) { 1565204076Spjd if (ISSYNCREQ(hio)) { 1566204076Spjd mtx_lock(&sync_lock); 1567204076Spjd SYNCREQDONE(hio); 1568204076Spjd mtx_unlock(&sync_lock); 1569204076Spjd cv_signal(&sync_cond); 1570204076Spjd } else { 1571204076Spjd pjdlog_debug(2, 1572204076Spjd "remote_recv: (%p) Moving request to the done queue.", 1573204076Spjd hio); 1574204076Spjd QUEUE_INSERT2(hio, done); 1575204076Spjd } 1576204076Spjd } 1577204076Spjd } 1578204076Spjd /* NOTREACHED */ 1579204076Spjd return (NULL); 1580204076Spjd} 1581204076Spjd 1582204076Spjd/* 1583204076Spjd * Thread sends answer to the kernel. 1584204076Spjd */ 1585204076Spjdstatic void * 1586204076Spjdggate_send_thread(void *arg) 1587204076Spjd{ 1588204076Spjd struct hast_resource *res = arg; 1589204076Spjd struct g_gate_ctl_io *ggio; 1590204076Spjd struct hio *hio; 1591204076Spjd unsigned int ii, ncomp, ncomps; 1592204076Spjd 1593204076Spjd ncomps = HAST_NCOMPONENTS; 1594204076Spjd 1595204076Spjd for (;;) { 1596204076Spjd pjdlog_debug(2, "ggate_send: Taking request."); 1597204076Spjd QUEUE_TAKE2(hio, done); 1598204076Spjd pjdlog_debug(2, "ggate_send: (%p) Got request.", hio); 1599204076Spjd ggio = &hio->hio_ggio; 1600204076Spjd for (ii = 0; ii < ncomps; ii++) { 1601204076Spjd if (hio->hio_errors[ii] == 0) { 1602204076Spjd /* 1603204076Spjd * One successful request is enough to declare 1604204076Spjd * success. 1605204076Spjd */ 1606204076Spjd ggio->gctl_error = 0; 1607204076Spjd break; 1608204076Spjd } 1609204076Spjd } 1610204076Spjd if (ii == ncomps) { 1611204076Spjd /* 1612204076Spjd * None of the requests were successful. 1613204076Spjd * Use first error. 1614204076Spjd */ 1615204076Spjd ggio->gctl_error = hio->hio_errors[0]; 1616204076Spjd } 1617204076Spjd if (ggio->gctl_error == 0 && ggio->gctl_cmd == BIO_WRITE) { 1618204076Spjd mtx_lock(&res->hr_amp_lock); 1619204076Spjd activemap_write_complete(res->hr_amp, 1620204076Spjd ggio->gctl_offset, ggio->gctl_length); 1621204076Spjd mtx_unlock(&res->hr_amp_lock); 1622204076Spjd } 1623204076Spjd if (ggio->gctl_cmd == BIO_WRITE) { 1624204076Spjd /* 1625204076Spjd * Unlock range we locked. 1626204076Spjd */ 1627204076Spjd mtx_lock(&range_lock); 1628204076Spjd rangelock_del(range_regular, ggio->gctl_offset, 1629204076Spjd ggio->gctl_length); 1630204076Spjd if (range_sync_wait) 1631204076Spjd cv_signal(&range_sync_cond); 1632204076Spjd mtx_unlock(&range_lock); 1633204076Spjd /* 1634204076Spjd * Bump local count if this is first write after 1635204076Spjd * connection failure with remote node. 1636204076Spjd */ 1637204076Spjd ncomp = 1; 1638204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1639204076Spjd if (!ISCONNECTED(res, ncomp)) { 1640204076Spjd mtx_lock(&metadata_lock); 1641204076Spjd if (res->hr_primary_localcnt == 1642204076Spjd res->hr_secondary_remotecnt) { 1643204076Spjd res->hr_primary_localcnt++; 1644204076Spjd pjdlog_debug(1, 1645204076Spjd "Increasing localcnt to %ju.", 1646204076Spjd (uintmax_t)res->hr_primary_localcnt); 1647204076Spjd (void)metadata_write(res); 1648204076Spjd } 1649204076Spjd mtx_unlock(&metadata_lock); 1650204076Spjd } 1651204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1652204076Spjd } 1653204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_DONE, ggio) < 0) 1654204076Spjd primary_exit(EX_OSERR, "G_GATE_CMD_DONE failed"); 1655204076Spjd pjdlog_debug(2, 1656204076Spjd "ggate_send: (%p) Moving request to the free queue.", hio); 1657204076Spjd QUEUE_INSERT2(hio, free); 1658204076Spjd } 1659204076Spjd /* NOTREACHED */ 1660204076Spjd return (NULL); 1661204076Spjd} 1662204076Spjd 1663204076Spjd/* 1664204076Spjd * Thread synchronize local and remote components. 1665204076Spjd */ 1666204076Spjdstatic void * 1667204076Spjdsync_thread(void *arg __unused) 1668204076Spjd{ 1669204076Spjd struct hast_resource *res = arg; 1670204076Spjd struct hio *hio; 1671204076Spjd struct g_gate_ctl_io *ggio; 1672219372Spjd struct timeval tstart, tend, tdiff; 1673204076Spjd unsigned int ii, ncomp, ncomps; 1674204076Spjd off_t offset, length, synced; 1675204076Spjd bool dorewind; 1676204076Spjd int syncext; 1677204076Spjd 1678204076Spjd ncomps = HAST_NCOMPONENTS; 1679204076Spjd dorewind = true; 1680211897Spjd synced = 0; 1681211897Spjd offset = -1; 1682204076Spjd 1683204076Spjd for (;;) { 1684204076Spjd mtx_lock(&sync_lock); 1685211897Spjd if (offset >= 0 && !sync_inprogress) { 1686219372Spjd gettimeofday(&tend, NULL); 1687219372Spjd timersub(&tend, &tstart, &tdiff); 1688219372Spjd pjdlog_info("Synchronization interrupted after %#.0T. " 1689219372Spjd "%NB synchronized so far.", &tdiff, 1690211879Spjd (intmax_t)synced); 1691212038Spjd event_send(res, EVENT_SYNCINTR); 1692211879Spjd } 1693204076Spjd while (!sync_inprogress) { 1694204076Spjd dorewind = true; 1695204076Spjd synced = 0; 1696204076Spjd cv_wait(&sync_cond, &sync_lock); 1697204076Spjd } 1698204076Spjd mtx_unlock(&sync_lock); 1699204076Spjd /* 1700204076Spjd * Obtain offset at which we should synchronize. 1701204076Spjd * Rewind synchronization if needed. 1702204076Spjd */ 1703204076Spjd mtx_lock(&res->hr_amp_lock); 1704204076Spjd if (dorewind) 1705204076Spjd activemap_sync_rewind(res->hr_amp); 1706204076Spjd offset = activemap_sync_offset(res->hr_amp, &length, &syncext); 1707204076Spjd if (syncext != -1) { 1708204076Spjd /* 1709204076Spjd * We synchronized entire syncext extent, we can mark 1710204076Spjd * it as clean now. 1711204076Spjd */ 1712204076Spjd if (activemap_extent_complete(res->hr_amp, syncext)) 1713204076Spjd (void)hast_activemap_flush(res); 1714204076Spjd } 1715204076Spjd mtx_unlock(&res->hr_amp_lock); 1716204076Spjd if (dorewind) { 1717204076Spjd dorewind = false; 1718204076Spjd if (offset < 0) 1719204076Spjd pjdlog_info("Nodes are in sync."); 1720204076Spjd else { 1721219372Spjd pjdlog_info("Synchronization started. %NB to go.", 1722219372Spjd (intmax_t)(res->hr_extentsize * 1723204076Spjd activemap_ndirty(res->hr_amp))); 1724212038Spjd event_send(res, EVENT_SYNCSTART); 1725219372Spjd gettimeofday(&tstart, NULL); 1726204076Spjd } 1727204076Spjd } 1728204076Spjd if (offset < 0) { 1729211878Spjd sync_stop(); 1730204076Spjd pjdlog_debug(1, "Nothing to synchronize."); 1731204076Spjd /* 1732204076Spjd * Synchronization complete, make both localcnt and 1733204076Spjd * remotecnt equal. 1734204076Spjd */ 1735204076Spjd ncomp = 1; 1736204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1737204076Spjd if (ISCONNECTED(res, ncomp)) { 1738204076Spjd if (synced > 0) { 1739219372Spjd int64_t bps; 1740219372Spjd 1741219372Spjd gettimeofday(&tend, NULL); 1742219372Spjd timersub(&tend, &tstart, &tdiff); 1743219372Spjd bps = (int64_t)((double)synced / 1744219372Spjd ((double)tdiff.tv_sec + 1745219372Spjd (double)tdiff.tv_usec / 1000000)); 1746204076Spjd pjdlog_info("Synchronization complete. " 1747219372Spjd "%NB synchronized in %#.0lT (%NB/sec).", 1748219372Spjd (intmax_t)synced, &tdiff, 1749219372Spjd (intmax_t)bps); 1750212038Spjd event_send(res, EVENT_SYNCDONE); 1751204076Spjd } 1752204076Spjd mtx_lock(&metadata_lock); 1753204076Spjd res->hr_syncsrc = HAST_SYNCSRC_UNDEF; 1754204076Spjd res->hr_primary_localcnt = 1755204076Spjd res->hr_secondary_localcnt; 1756204076Spjd res->hr_primary_remotecnt = 1757204076Spjd res->hr_secondary_remotecnt; 1758204076Spjd pjdlog_debug(1, 1759204076Spjd "Setting localcnt to %ju and remotecnt to %ju.", 1760204076Spjd (uintmax_t)res->hr_primary_localcnt, 1761204076Spjd (uintmax_t)res->hr_secondary_localcnt); 1762204076Spjd (void)metadata_write(res); 1763204076Spjd mtx_unlock(&metadata_lock); 1764204076Spjd } 1765204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1766204076Spjd continue; 1767204076Spjd } 1768204076Spjd pjdlog_debug(2, "sync: Taking free request."); 1769204076Spjd QUEUE_TAKE2(hio, free); 1770204076Spjd pjdlog_debug(2, "sync: (%p) Got free request.", hio); 1771204076Spjd /* 1772204076Spjd * Lock the range we are going to synchronize. We don't want 1773204076Spjd * race where someone writes between our read and write. 1774204076Spjd */ 1775204076Spjd for (;;) { 1776204076Spjd mtx_lock(&range_lock); 1777204076Spjd if (rangelock_islocked(range_regular, offset, length)) { 1778204076Spjd pjdlog_debug(2, 1779204076Spjd "sync: Range offset=%jd length=%jd locked.", 1780204076Spjd (intmax_t)offset, (intmax_t)length); 1781204076Spjd range_sync_wait = true; 1782204076Spjd cv_wait(&range_sync_cond, &range_lock); 1783204076Spjd range_sync_wait = false; 1784204076Spjd mtx_unlock(&range_lock); 1785204076Spjd continue; 1786204076Spjd } 1787204076Spjd if (rangelock_add(range_sync, offset, length) < 0) { 1788204076Spjd mtx_unlock(&range_lock); 1789204076Spjd pjdlog_debug(2, 1790204076Spjd "sync: Range offset=%jd length=%jd is already locked, waiting.", 1791204076Spjd (intmax_t)offset, (intmax_t)length); 1792204076Spjd sleep(1); 1793204076Spjd continue; 1794204076Spjd } 1795204076Spjd mtx_unlock(&range_lock); 1796204076Spjd break; 1797204076Spjd } 1798204076Spjd /* 1799204076Spjd * First read the data from synchronization source. 1800204076Spjd */ 1801204076Spjd SYNCREQ(hio); 1802204076Spjd ggio = &hio->hio_ggio; 1803204076Spjd ggio->gctl_cmd = BIO_READ; 1804204076Spjd ggio->gctl_offset = offset; 1805204076Spjd ggio->gctl_length = length; 1806204076Spjd ggio->gctl_error = 0; 1807204076Spjd for (ii = 0; ii < ncomps; ii++) 1808204076Spjd hio->hio_errors[ii] = EINVAL; 1809204076Spjd reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 1810204076Spjd hio); 1811204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 1812204076Spjd hio); 1813204076Spjd mtx_lock(&metadata_lock); 1814204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1815204076Spjd /* 1816204076Spjd * This range is up-to-date on local component, 1817204076Spjd * so handle request locally. 1818204076Spjd */ 1819204076Spjd /* Local component is 0 for now. */ 1820204076Spjd ncomp = 0; 1821204076Spjd } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 1822218138Spjd PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 1823204076Spjd /* 1824204076Spjd * This range is out-of-date on local component, 1825204076Spjd * so send request to the remote node. 1826204076Spjd */ 1827204076Spjd /* Remote component is 1 for now. */ 1828204076Spjd ncomp = 1; 1829204076Spjd } 1830204076Spjd mtx_unlock(&metadata_lock); 1831204076Spjd refcount_init(&hio->hio_countdown, 1); 1832204076Spjd QUEUE_INSERT1(hio, send, ncomp); 1833204076Spjd 1834204076Spjd /* 1835204076Spjd * Let's wait for READ to finish. 1836204076Spjd */ 1837204076Spjd mtx_lock(&sync_lock); 1838204076Spjd while (!ISSYNCREQDONE(hio)) 1839204076Spjd cv_wait(&sync_cond, &sync_lock); 1840204076Spjd mtx_unlock(&sync_lock); 1841204076Spjd 1842204076Spjd if (hio->hio_errors[ncomp] != 0) { 1843204076Spjd pjdlog_error("Unable to read synchronization data: %s.", 1844204076Spjd strerror(hio->hio_errors[ncomp])); 1845204076Spjd goto free_queue; 1846204076Spjd } 1847204076Spjd 1848204076Spjd /* 1849204076Spjd * We read the data from synchronization source, now write it 1850204076Spjd * to synchronization target. 1851204076Spjd */ 1852204076Spjd SYNCREQ(hio); 1853204076Spjd ggio->gctl_cmd = BIO_WRITE; 1854204076Spjd for (ii = 0; ii < ncomps; ii++) 1855204076Spjd hio->hio_errors[ii] = EINVAL; 1856204076Spjd reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 1857204076Spjd hio); 1858204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 1859204076Spjd hio); 1860204076Spjd mtx_lock(&metadata_lock); 1861204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1862204076Spjd /* 1863204076Spjd * This range is up-to-date on local component, 1864204076Spjd * so we update remote component. 1865204076Spjd */ 1866204076Spjd /* Remote component is 1 for now. */ 1867204076Spjd ncomp = 1; 1868204076Spjd } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 1869218138Spjd PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 1870204076Spjd /* 1871204076Spjd * This range is out-of-date on local component, 1872204076Spjd * so we update it. 1873204076Spjd */ 1874204076Spjd /* Local component is 0 for now. */ 1875204076Spjd ncomp = 0; 1876204076Spjd } 1877204076Spjd mtx_unlock(&metadata_lock); 1878204076Spjd 1879204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the send queues.", 1880204076Spjd hio); 1881204076Spjd refcount_init(&hio->hio_countdown, 1); 1882204076Spjd QUEUE_INSERT1(hio, send, ncomp); 1883204076Spjd 1884204076Spjd /* 1885204076Spjd * Let's wait for WRITE to finish. 1886204076Spjd */ 1887204076Spjd mtx_lock(&sync_lock); 1888204076Spjd while (!ISSYNCREQDONE(hio)) 1889204076Spjd cv_wait(&sync_cond, &sync_lock); 1890204076Spjd mtx_unlock(&sync_lock); 1891204076Spjd 1892204076Spjd if (hio->hio_errors[ncomp] != 0) { 1893204076Spjd pjdlog_error("Unable to write synchronization data: %s.", 1894204076Spjd strerror(hio->hio_errors[ncomp])); 1895204076Spjd goto free_queue; 1896204076Spjd } 1897211880Spjd 1898211880Spjd synced += length; 1899204076Spjdfree_queue: 1900204076Spjd mtx_lock(&range_lock); 1901204076Spjd rangelock_del(range_sync, offset, length); 1902204076Spjd if (range_regular_wait) 1903204076Spjd cv_signal(&range_regular_cond); 1904204076Spjd mtx_unlock(&range_lock); 1905204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the free queue.", 1906204076Spjd hio); 1907204076Spjd QUEUE_INSERT2(hio, free); 1908204076Spjd } 1909204076Spjd /* NOTREACHED */ 1910204076Spjd return (NULL); 1911204076Spjd} 1912204076Spjd 1913217784Spjdvoid 1914217784Spjdprimary_config_reload(struct hast_resource *res, struct nv *nv) 1915210886Spjd{ 1916210886Spjd unsigned int ii, ncomps; 1917217784Spjd int modified, vint; 1918217784Spjd const char *vstr; 1919210886Spjd 1920210886Spjd pjdlog_info("Reloading configuration..."); 1921210886Spjd 1922218138Spjd PJDLOG_ASSERT(res->hr_role == HAST_ROLE_PRIMARY); 1923218138Spjd PJDLOG_ASSERT(gres == res); 1924217784Spjd nv_assert(nv, "remoteaddr"); 1925217784Spjd nv_assert(nv, "replication"); 1926219351Spjd nv_assert(nv, "checksum"); 1927219354Spjd nv_assert(nv, "compression"); 1928217784Spjd nv_assert(nv, "timeout"); 1929217784Spjd nv_assert(nv, "exec"); 1930217784Spjd 1931210886Spjd ncomps = HAST_NCOMPONENTS; 1932210886Spjd 1933219351Spjd#define MODIFIED_REMOTEADDR 0x01 1934219351Spjd#define MODIFIED_REPLICATION 0x02 1935219351Spjd#define MODIFIED_CHECKSUM 0x04 1936219354Spjd#define MODIFIED_COMPRESSION 0x08 1937219351Spjd#define MODIFIED_TIMEOUT 0x10 1938219351Spjd#define MODIFIED_EXEC 0x20 1939210886Spjd modified = 0; 1940217784Spjd 1941217784Spjd vstr = nv_get_string(nv, "remoteaddr"); 1942217784Spjd if (strcmp(gres->hr_remoteaddr, vstr) != 0) { 1943210886Spjd /* 1944210886Spjd * Don't copy res->hr_remoteaddr to gres just yet. 1945210886Spjd * We want remote_close() to log disconnect from the old 1946210886Spjd * addresses, not from the new ones. 1947210886Spjd */ 1948210886Spjd modified |= MODIFIED_REMOTEADDR; 1949210886Spjd } 1950217784Spjd vint = nv_get_int32(nv, "replication"); 1951217784Spjd if (gres->hr_replication != vint) { 1952217784Spjd gres->hr_replication = vint; 1953210886Spjd modified |= MODIFIED_REPLICATION; 1954210886Spjd } 1955219351Spjd vint = nv_get_int32(nv, "checksum"); 1956219351Spjd if (gres->hr_checksum != vint) { 1957219351Spjd gres->hr_checksum = vint; 1958219351Spjd modified |= MODIFIED_CHECKSUM; 1959219351Spjd } 1960219354Spjd vint = nv_get_int32(nv, "compression"); 1961219354Spjd if (gres->hr_compression != vint) { 1962219354Spjd gres->hr_compression = vint; 1963219354Spjd modified |= MODIFIED_COMPRESSION; 1964219354Spjd } 1965217784Spjd vint = nv_get_int32(nv, "timeout"); 1966217784Spjd if (gres->hr_timeout != vint) { 1967217784Spjd gres->hr_timeout = vint; 1968210886Spjd modified |= MODIFIED_TIMEOUT; 1969210886Spjd } 1970217784Spjd vstr = nv_get_string(nv, "exec"); 1971217784Spjd if (strcmp(gres->hr_exec, vstr) != 0) { 1972217784Spjd strlcpy(gres->hr_exec, vstr, sizeof(gres->hr_exec)); 1973211886Spjd modified |= MODIFIED_EXEC; 1974211886Spjd } 1975217784Spjd 1976210886Spjd /* 1977219351Spjd * Change timeout for connected sockets. 1978219351Spjd * Don't bother if we need to reconnect. 1979210886Spjd */ 1980219351Spjd if ((modified & MODIFIED_TIMEOUT) != 0 && 1981219351Spjd (modified & (MODIFIED_REMOTEADDR | MODIFIED_REPLICATION)) == 0) { 1982210886Spjd for (ii = 0; ii < ncomps; ii++) { 1983210886Spjd if (!ISREMOTE(ii)) 1984210886Spjd continue; 1985210886Spjd rw_rlock(&hio_remote_lock[ii]); 1986210886Spjd if (!ISCONNECTED(gres, ii)) { 1987210886Spjd rw_unlock(&hio_remote_lock[ii]); 1988210886Spjd continue; 1989210886Spjd } 1990210886Spjd rw_unlock(&hio_remote_lock[ii]); 1991210886Spjd if (proto_timeout(gres->hr_remotein, 1992210886Spjd gres->hr_timeout) < 0) { 1993210886Spjd pjdlog_errno(LOG_WARNING, 1994210886Spjd "Unable to set connection timeout"); 1995210886Spjd } 1996210886Spjd if (proto_timeout(gres->hr_remoteout, 1997210886Spjd gres->hr_timeout) < 0) { 1998210886Spjd pjdlog_errno(LOG_WARNING, 1999210886Spjd "Unable to set connection timeout"); 2000210886Spjd } 2001210886Spjd } 2002219351Spjd } 2003219351Spjd if ((modified & (MODIFIED_REMOTEADDR | MODIFIED_REPLICATION)) != 0) { 2004210886Spjd for (ii = 0; ii < ncomps; ii++) { 2005210886Spjd if (!ISREMOTE(ii)) 2006210886Spjd continue; 2007210886Spjd remote_close(gres, ii); 2008210886Spjd } 2009210886Spjd if (modified & MODIFIED_REMOTEADDR) { 2010217784Spjd vstr = nv_get_string(nv, "remoteaddr"); 2011217784Spjd strlcpy(gres->hr_remoteaddr, vstr, 2012210886Spjd sizeof(gres->hr_remoteaddr)); 2013210886Spjd } 2014210886Spjd } 2015210886Spjd#undef MODIFIED_REMOTEADDR 2016210886Spjd#undef MODIFIED_REPLICATION 2017219351Spjd#undef MODIFIED_CHECKSUM 2018219354Spjd#undef MODIFIED_COMPRESSION 2019210886Spjd#undef MODIFIED_TIMEOUT 2020211886Spjd#undef MODIFIED_EXEC 2021210886Spjd 2022210886Spjd pjdlog_info("Configuration reloaded successfully."); 2023210886Spjd} 2024210886Spjd 2025211882Spjdstatic void 2026211981Spjdguard_one(struct hast_resource *res, unsigned int ncomp) 2027211981Spjd{ 2028211981Spjd struct proto_conn *in, *out; 2029211981Spjd 2030211981Spjd if (!ISREMOTE(ncomp)) 2031211981Spjd return; 2032211981Spjd 2033211981Spjd rw_rlock(&hio_remote_lock[ncomp]); 2034211981Spjd 2035211981Spjd if (!real_remote(res)) { 2036211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2037211981Spjd return; 2038211981Spjd } 2039211981Spjd 2040211981Spjd if (ISCONNECTED(res, ncomp)) { 2041218138Spjd PJDLOG_ASSERT(res->hr_remotein != NULL); 2042218138Spjd PJDLOG_ASSERT(res->hr_remoteout != NULL); 2043211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2044211981Spjd pjdlog_debug(2, "remote_guard: Connection to %s is ok.", 2045211981Spjd res->hr_remoteaddr); 2046211981Spjd return; 2047211981Spjd } 2048211981Spjd 2049218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 2050218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 2051211981Spjd /* 2052211981Spjd * Upgrade the lock. It doesn't have to be atomic as no other thread 2053211981Spjd * can change connection status from disconnected to connected. 2054211981Spjd */ 2055211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2056211981Spjd pjdlog_debug(2, "remote_guard: Reconnecting to %s.", 2057211981Spjd res->hr_remoteaddr); 2058211981Spjd in = out = NULL; 2059211981Spjd if (init_remote(res, &in, &out)) { 2060211981Spjd rw_wlock(&hio_remote_lock[ncomp]); 2061218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 2062218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 2063218138Spjd PJDLOG_ASSERT(in != NULL && out != NULL); 2064211981Spjd res->hr_remotein = in; 2065211981Spjd res->hr_remoteout = out; 2066211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2067211981Spjd pjdlog_info("Successfully reconnected to %s.", 2068211981Spjd res->hr_remoteaddr); 2069211981Spjd sync_start(); 2070211981Spjd } else { 2071211981Spjd /* Both connections should be NULL. */ 2072218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 2073218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 2074218138Spjd PJDLOG_ASSERT(in == NULL && out == NULL); 2075211981Spjd pjdlog_debug(2, "remote_guard: Reconnect to %s failed.", 2076211981Spjd res->hr_remoteaddr); 2077211981Spjd } 2078211981Spjd} 2079211981Spjd 2080204076Spjd/* 2081204076Spjd * Thread guards remote connections and reconnects when needed, handles 2082204076Spjd * signals, etc. 2083204076Spjd */ 2084204076Spjdstatic void * 2085204076Spjdguard_thread(void *arg) 2086204076Spjd{ 2087204076Spjd struct hast_resource *res = arg; 2088204076Spjd unsigned int ii, ncomps; 2089211982Spjd struct timespec timeout; 2090211981Spjd time_t lastcheck, now; 2091211982Spjd sigset_t mask; 2092211982Spjd int signo; 2093204076Spjd 2094204076Spjd ncomps = HAST_NCOMPONENTS; 2095211981Spjd lastcheck = time(NULL); 2096204076Spjd 2097211982Spjd PJDLOG_VERIFY(sigemptyset(&mask) == 0); 2098211982Spjd PJDLOG_VERIFY(sigaddset(&mask, SIGINT) == 0); 2099211982Spjd PJDLOG_VERIFY(sigaddset(&mask, SIGTERM) == 0); 2100211982Spjd 2101215332Spjd timeout.tv_sec = RETRY_SLEEP; 2102211982Spjd timeout.tv_nsec = 0; 2103211982Spjd signo = -1; 2104211982Spjd 2105204076Spjd for (;;) { 2106211982Spjd switch (signo) { 2107211982Spjd case SIGINT: 2108211982Spjd case SIGTERM: 2109211982Spjd sigexit_received = true; 2110204076Spjd primary_exitx(EX_OK, 2111204076Spjd "Termination signal received, exiting."); 2112211982Spjd break; 2113211982Spjd default: 2114211982Spjd break; 2115204076Spjd } 2116211882Spjd 2117204076Spjd pjdlog_debug(2, "remote_guard: Checking connections."); 2118211981Spjd now = time(NULL); 2119211982Spjd if (lastcheck + RETRY_SLEEP <= now) { 2120211982Spjd for (ii = 0; ii < ncomps; ii++) 2121211981Spjd guard_one(res, ii); 2122211981Spjd lastcheck = now; 2123204076Spjd } 2124211982Spjd signo = sigtimedwait(&mask, NULL, &timeout); 2125204076Spjd } 2126204076Spjd /* NOTREACHED */ 2127204076Spjd return (NULL); 2128204076Spjd} 2129