1204076Spjd/*- 2204076Spjd * Copyright (c) 2009 The FreeBSD Foundation 3219351Spjd * Copyright (c) 2010-2011 Pawel Jakub Dawidek <pawel@dawidek.net> 4204076Spjd * All rights reserved. 5204076Spjd * 6204076Spjd * This software was developed by Pawel Jakub Dawidek under sponsorship from 7204076Spjd * the FreeBSD Foundation. 8204076Spjd * 9204076Spjd * Redistribution and use in source and binary forms, with or without 10204076Spjd * modification, are permitted provided that the following conditions 11204076Spjd * are met: 12204076Spjd * 1. Redistributions of source code must retain the above copyright 13204076Spjd * notice, this list of conditions and the following disclaimer. 14204076Spjd * 2. Redistributions in binary form must reproduce the above copyright 15204076Spjd * notice, this list of conditions and the following disclaimer in the 16204076Spjd * documentation and/or other materials provided with the distribution. 17204076Spjd * 18204076Spjd * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 19204076Spjd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20204076Spjd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21204076Spjd * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 22204076Spjd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23204076Spjd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24204076Spjd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25204076Spjd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26204076Spjd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27204076Spjd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28204076Spjd * SUCH DAMAGE. 29204076Spjd */ 30204076Spjd 31204076Spjd#include <sys/cdefs.h> 32204076Spjd__FBSDID("$FreeBSD$"); 33204076Spjd 34204076Spjd#include <sys/types.h> 35204076Spjd#include <sys/time.h> 36204076Spjd#include <sys/bio.h> 37204076Spjd#include <sys/disk.h> 38204076Spjd#include <sys/stat.h> 39204076Spjd 40204076Spjd#include <geom/gate/g_gate.h> 41204076Spjd 42204076Spjd#include <err.h> 43204076Spjd#include <errno.h> 44204076Spjd#include <fcntl.h> 45204076Spjd#include <libgeom.h> 46204076Spjd#include <pthread.h> 47211982Spjd#include <signal.h> 48204076Spjd#include <stdint.h> 49204076Spjd#include <stdio.h> 50204076Spjd#include <string.h> 51204076Spjd#include <sysexits.h> 52204076Spjd#include <unistd.h> 53204076Spjd 54204076Spjd#include <activemap.h> 55204076Spjd#include <nv.h> 56204076Spjd#include <rangelock.h> 57204076Spjd 58204076Spjd#include "control.h" 59212038Spjd#include "event.h" 60204076Spjd#include "hast.h" 61204076Spjd#include "hast_proto.h" 62204076Spjd#include "hastd.h" 63211886Spjd#include "hooks.h" 64204076Spjd#include "metadata.h" 65204076Spjd#include "proto.h" 66204076Spjd#include "pjdlog.h" 67249236Strociny#include "refcnt.h" 68204076Spjd#include "subr.h" 69204076Spjd#include "synch.h" 70204076Spjd 71210886Spjd/* The is only one remote component for now. */ 72210886Spjd#define ISREMOTE(no) ((no) == 1) 73210886Spjd 74204076Spjdstruct hio { 75204076Spjd /* 76204076Spjd * Number of components we are still waiting for. 77204076Spjd * When this field goes to 0, we can send the request back to the 78204076Spjd * kernel. Each component has to decrease this counter by one 79204076Spjd * even on failure. 80204076Spjd */ 81204076Spjd unsigned int hio_countdown; 82204076Spjd /* 83204076Spjd * Each component has a place to store its own error. 84204076Spjd * Once the request is handled by all components we can decide if the 85204076Spjd * request overall is successful or not. 86204076Spjd */ 87204076Spjd int *hio_errors; 88204076Spjd /* 89219818Spjd * Structure used to communicate with GEOM Gate class. 90204076Spjd */ 91204076Spjd struct g_gate_ctl_io hio_ggio; 92229509Strociny /* 93229509Strociny * Request was already confirmed to GEOM Gate. 94229509Strociny */ 95229509Strociny bool hio_done; 96229509Strociny /* 97260007Strociny * Number of components we are still waiting before sending write 98260007Strociny * completion ack to GEOM Gate. Used for memsync. 99260007Strociny */ 100260007Strociny unsigned int hio_writecount; 101260007Strociny /* 102260007Strociny * Memsync request was acknowleged by remote. 103260007Strociny */ 104260007Strociny bool hio_memsyncacked; 105260007Strociny /* 106229509Strociny * Remember replication from the time the request was initiated, 107229509Strociny * so we won't get confused when replication changes on reload. 108229509Strociny */ 109229509Strociny int hio_replication; 110204076Spjd TAILQ_ENTRY(hio) *hio_next; 111204076Spjd}; 112204076Spjd#define hio_free_next hio_next[0] 113204076Spjd#define hio_done_next hio_next[0] 114204076Spjd 115204076Spjd/* 116204076Spjd * Free list holds unused structures. When free list is empty, we have to wait 117204076Spjd * until some in-progress requests are freed. 118204076Spjd */ 119204076Spjdstatic TAILQ_HEAD(, hio) hio_free_list; 120260007Strocinystatic size_t hio_free_list_size; 121204076Spjdstatic pthread_mutex_t hio_free_list_lock; 122204076Spjdstatic pthread_cond_t hio_free_list_cond; 123204076Spjd/* 124204076Spjd * There is one send list for every component. One requests is placed on all 125204076Spjd * send lists - each component gets the same request, but each component is 126204076Spjd * responsible for managing his own send list. 127204076Spjd */ 128204076Spjdstatic TAILQ_HEAD(, hio) *hio_send_list; 129260007Strocinystatic size_t *hio_send_list_size; 130204076Spjdstatic pthread_mutex_t *hio_send_list_lock; 131204076Spjdstatic pthread_cond_t *hio_send_list_cond; 132260007Strociny#define hio_send_local_list_size hio_send_list_size[0] 133260007Strociny#define hio_send_remote_list_size hio_send_list_size[1] 134204076Spjd/* 135204076Spjd * There is one recv list for every component, although local components don't 136204076Spjd * use recv lists as local requests are done synchronously. 137204076Spjd */ 138204076Spjdstatic TAILQ_HEAD(, hio) *hio_recv_list; 139260007Strocinystatic size_t *hio_recv_list_size; 140204076Spjdstatic pthread_mutex_t *hio_recv_list_lock; 141204076Spjdstatic pthread_cond_t *hio_recv_list_cond; 142260007Strociny#define hio_recv_remote_list_size hio_recv_list_size[1] 143204076Spjd/* 144204076Spjd * Request is placed on done list by the slowest component (the one that 145204076Spjd * decreased hio_countdown from 1 to 0). 146204076Spjd */ 147204076Spjdstatic TAILQ_HEAD(, hio) hio_done_list; 148260007Strocinystatic size_t hio_done_list_size; 149204076Spjdstatic pthread_mutex_t hio_done_list_lock; 150204076Spjdstatic pthread_cond_t hio_done_list_cond; 151204076Spjd/* 152204076Spjd * Structure below are for interaction with sync thread. 153204076Spjd */ 154204076Spjdstatic bool sync_inprogress; 155204076Spjdstatic pthread_mutex_t sync_lock; 156204076Spjdstatic pthread_cond_t sync_cond; 157204076Spjd/* 158204076Spjd * The lock below allows to synchornize access to remote connections. 159204076Spjd */ 160204076Spjdstatic pthread_rwlock_t *hio_remote_lock; 161204076Spjd 162204076Spjd/* 163204076Spjd * Lock to synchronize metadata updates. Also synchronize access to 164204076Spjd * hr_primary_localcnt and hr_primary_remotecnt fields. 165204076Spjd */ 166204076Spjdstatic pthread_mutex_t metadata_lock; 167204076Spjd 168204076Spjd/* 169204076Spjd * Maximum number of outstanding I/O requests. 170204076Spjd */ 171204076Spjd#define HAST_HIO_MAX 256 172204076Spjd/* 173204076Spjd * Number of components. At this point there are only two components: local 174204076Spjd * and remote, but in the future it might be possible to use multiple local 175204076Spjd * and remote components. 176204076Spjd */ 177204076Spjd#define HAST_NCOMPONENTS 2 178204076Spjd 179204076Spjd#define ISCONNECTED(res, no) \ 180204076Spjd ((res)->hr_remotein != NULL && (res)->hr_remoteout != NULL) 181204076Spjd 182204076Spjd#define QUEUE_INSERT1(hio, name, ncomp) do { \ 183204076Spjd mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 184260007Strociny if (TAILQ_EMPTY(&hio_##name##_list[(ncomp)])) \ 185260007Strociny cv_broadcast(&hio_##name##_list_cond[(ncomp)]); \ 186204076Spjd TAILQ_INSERT_TAIL(&hio_##name##_list[(ncomp)], (hio), \ 187204076Spjd hio_next[(ncomp)]); \ 188260007Strociny hio_##name##_list_size[(ncomp)]++; \ 189260007Strociny mtx_unlock(&hio_##name##_list_lock[(ncomp)]); \ 190204076Spjd} while (0) 191204076Spjd#define QUEUE_INSERT2(hio, name) do { \ 192204076Spjd mtx_lock(&hio_##name##_list_lock); \ 193260007Strociny if (TAILQ_EMPTY(&hio_##name##_list)) \ 194260007Strociny cv_broadcast(&hio_##name##_list_cond); \ 195204076Spjd TAILQ_INSERT_TAIL(&hio_##name##_list, (hio), hio_##name##_next);\ 196260007Strociny hio_##name##_list_size++; \ 197204076Spjd mtx_unlock(&hio_##name##_list_lock); \ 198204076Spjd} while (0) 199214692Spjd#define QUEUE_TAKE1(hio, name, ncomp, timeout) do { \ 200214692Spjd bool _last; \ 201214692Spjd \ 202204076Spjd mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 203214692Spjd _last = false; \ 204214692Spjd while (((hio) = TAILQ_FIRST(&hio_##name##_list[(ncomp)])) == NULL && !_last) { \ 205214692Spjd cv_timedwait(&hio_##name##_list_cond[(ncomp)], \ 206214692Spjd &hio_##name##_list_lock[(ncomp)], (timeout)); \ 207219864Spjd if ((timeout) != 0) \ 208214692Spjd _last = true; \ 209204076Spjd } \ 210214692Spjd if (hio != NULL) { \ 211260007Strociny PJDLOG_ASSERT(hio_##name##_list_size[(ncomp)] != 0); \ 212260007Strociny hio_##name##_list_size[(ncomp)]--; \ 213214692Spjd TAILQ_REMOVE(&hio_##name##_list[(ncomp)], (hio), \ 214214692Spjd hio_next[(ncomp)]); \ 215214692Spjd } \ 216204076Spjd mtx_unlock(&hio_##name##_list_lock[(ncomp)]); \ 217204076Spjd} while (0) 218204076Spjd#define QUEUE_TAKE2(hio, name) do { \ 219204076Spjd mtx_lock(&hio_##name##_list_lock); \ 220204076Spjd while (((hio) = TAILQ_FIRST(&hio_##name##_list)) == NULL) { \ 221204076Spjd cv_wait(&hio_##name##_list_cond, \ 222204076Spjd &hio_##name##_list_lock); \ 223204076Spjd } \ 224260007Strociny PJDLOG_ASSERT(hio_##name##_list_size != 0); \ 225260007Strociny hio_##name##_list_size--; \ 226204076Spjd TAILQ_REMOVE(&hio_##name##_list, (hio), hio_##name##_next); \ 227204076Spjd mtx_unlock(&hio_##name##_list_lock); \ 228204076Spjd} while (0) 229204076Spjd 230260007Strociny#define ISFULLSYNC(hio) ((hio)->hio_replication == HAST_REPLICATION_FULLSYNC) 231260007Strociny#define ISMEMSYNC(hio) ((hio)->hio_replication == HAST_REPLICATION_MEMSYNC) 232260007Strociny#define ISASYNC(hio) ((hio)->hio_replication == HAST_REPLICATION_ASYNC) 233260007Strociny 234209183Spjd#define SYNCREQ(hio) do { \ 235209183Spjd (hio)->hio_ggio.gctl_unit = -1; \ 236209183Spjd (hio)->hio_ggio.gctl_seq = 1; \ 237209183Spjd} while (0) 238204076Spjd#define ISSYNCREQ(hio) ((hio)->hio_ggio.gctl_unit == -1) 239204076Spjd#define SYNCREQDONE(hio) do { (hio)->hio_ggio.gctl_unit = -2; } while (0) 240204076Spjd#define ISSYNCREQDONE(hio) ((hio)->hio_ggio.gctl_unit == -2) 241204076Spjd 242260007Strociny#define ISMEMSYNCWRITE(hio) (ISMEMSYNC(hio) && \ 243260007Strociny (hio)->hio_ggio.gctl_cmd == BIO_WRITE && !ISSYNCREQ(hio)) 244260007Strociny 245204076Spjdstatic struct hast_resource *gres; 246204076Spjd 247204076Spjdstatic pthread_mutex_t range_lock; 248204076Spjdstatic struct rangelocks *range_regular; 249204076Spjdstatic bool range_regular_wait; 250204076Spjdstatic pthread_cond_t range_regular_cond; 251204076Spjdstatic struct rangelocks *range_sync; 252204076Spjdstatic bool range_sync_wait; 253204076Spjdstatic pthread_cond_t range_sync_cond; 254220898Spjdstatic bool fullystarted; 255204076Spjd 256204076Spjdstatic void *ggate_recv_thread(void *arg); 257204076Spjdstatic void *local_send_thread(void *arg); 258204076Spjdstatic void *remote_send_thread(void *arg); 259204076Spjdstatic void *remote_recv_thread(void *arg); 260204076Spjdstatic void *ggate_send_thread(void *arg); 261204076Spjdstatic void *sync_thread(void *arg); 262204076Spjdstatic void *guard_thread(void *arg); 263204076Spjd 264211982Spjdstatic void 265260007Strocinyoutput_status_aux(struct nv *nvout) 266260007Strociny{ 267260007Strociny 268260007Strociny nv_add_uint64(nvout, (uint64_t)hio_free_list_size, 269260007Strociny "idle_queue_size"); 270260007Strociny nv_add_uint64(nvout, (uint64_t)hio_send_local_list_size, 271260007Strociny "local_queue_size"); 272260007Strociny nv_add_uint64(nvout, (uint64_t)hio_send_remote_list_size, 273260007Strociny "send_queue_size"); 274260007Strociny nv_add_uint64(nvout, (uint64_t)hio_recv_remote_list_size, 275260007Strociny "recv_queue_size"); 276260007Strociny nv_add_uint64(nvout, (uint64_t)hio_done_list_size, 277260007Strociny "done_queue_size"); 278260007Strociny} 279260007Strociny 280260007Strocinystatic void 281204076Spjdcleanup(struct hast_resource *res) 282204076Spjd{ 283204076Spjd int rerrno; 284204076Spjd 285204076Spjd /* Remember errno. */ 286204076Spjd rerrno = errno; 287204076Spjd 288204076Spjd /* Destroy ggate provider if we created one. */ 289204076Spjd if (res->hr_ggateunit >= 0) { 290204076Spjd struct g_gate_ctl_destroy ggiod; 291204076Spjd 292213533Spjd bzero(&ggiod, sizeof(ggiod)); 293204076Spjd ggiod.gctl_version = G_GATE_VERSION; 294204076Spjd ggiod.gctl_unit = res->hr_ggateunit; 295204076Spjd ggiod.gctl_force = 1; 296231017Strociny if (ioctl(res->hr_ggatefd, G_GATE_CMD_DESTROY, &ggiod) == -1) { 297213531Spjd pjdlog_errno(LOG_WARNING, 298213531Spjd "Unable to destroy hast/%s device", 299204076Spjd res->hr_provname); 300204076Spjd } 301204076Spjd res->hr_ggateunit = -1; 302204076Spjd } 303204076Spjd 304204076Spjd /* Restore errno. */ 305204076Spjd errno = rerrno; 306204076Spjd} 307204076Spjd 308212899Spjdstatic __dead2 void 309204076Spjdprimary_exit(int exitcode, const char *fmt, ...) 310204076Spjd{ 311204076Spjd va_list ap; 312204076Spjd 313218138Spjd PJDLOG_ASSERT(exitcode != EX_OK); 314204076Spjd va_start(ap, fmt); 315204076Spjd pjdlogv_errno(LOG_ERR, fmt, ap); 316204076Spjd va_end(ap); 317204076Spjd cleanup(gres); 318204076Spjd exit(exitcode); 319204076Spjd} 320204076Spjd 321212899Spjdstatic __dead2 void 322204076Spjdprimary_exitx(int exitcode, const char *fmt, ...) 323204076Spjd{ 324204076Spjd va_list ap; 325204076Spjd 326204076Spjd va_start(ap, fmt); 327204076Spjd pjdlogv(exitcode == EX_OK ? LOG_INFO : LOG_ERR, fmt, ap); 328204076Spjd va_end(ap); 329204076Spjd cleanup(gres); 330204076Spjd exit(exitcode); 331204076Spjd} 332204076Spjd 333256027Strociny/* Expects res->hr_amp locked, returns unlocked. */ 334204076Spjdstatic int 335204076Spjdhast_activemap_flush(struct hast_resource *res) 336204076Spjd{ 337204076Spjd const unsigned char *buf; 338204076Spjd size_t size; 339256027Strociny int ret; 340204076Spjd 341256027Strociny mtx_lock(&res->hr_amp_diskmap_lock); 342204076Spjd buf = activemap_bitmap(res->hr_amp, &size); 343256027Strociny mtx_unlock(&res->hr_amp_lock); 344218138Spjd PJDLOG_ASSERT(buf != NULL); 345218138Spjd PJDLOG_ASSERT((size % res->hr_local_sectorsize) == 0); 346256027Strociny ret = 0; 347204076Spjd if (pwrite(res->hr_localfd, buf, size, METADATA_SIZE) != 348204076Spjd (ssize_t)size) { 349229509Strociny pjdlog_errno(LOG_ERR, "Unable to flush activemap to disk"); 350247866Strociny res->hr_stat_activemap_write_error++; 351256027Strociny ret = -1; 352204076Spjd } 353256027Strociny if (ret == 0 && res->hr_metaflush == 1 && 354256027Strociny g_flush(res->hr_localfd) == -1) { 355229509Strociny if (errno == EOPNOTSUPP) { 356229509Strociny pjdlog_warning("The %s provider doesn't support flushing write cache. Disabling it.", 357229509Strociny res->hr_localpath); 358229509Strociny res->hr_metaflush = 0; 359229509Strociny } else { 360229509Strociny pjdlog_errno(LOG_ERR, 361229509Strociny "Unable to flush disk cache on activemap update"); 362247866Strociny res->hr_stat_activemap_flush_error++; 363256027Strociny ret = -1; 364229509Strociny } 365229509Strociny } 366256027Strociny mtx_unlock(&res->hr_amp_diskmap_lock); 367256027Strociny return (ret); 368204076Spjd} 369204076Spjd 370210881Spjdstatic bool 371210881Spjdreal_remote(const struct hast_resource *res) 372210881Spjd{ 373210881Spjd 374210881Spjd return (strcmp(res->hr_remoteaddr, "none") != 0); 375210881Spjd} 376210881Spjd 377204076Spjdstatic void 378204076Spjdinit_environment(struct hast_resource *res __unused) 379204076Spjd{ 380204076Spjd struct hio *hio; 381204076Spjd unsigned int ii, ncomps; 382204076Spjd 383204076Spjd /* 384204076Spjd * In the future it might be per-resource value. 385204076Spjd */ 386204076Spjd ncomps = HAST_NCOMPONENTS; 387204076Spjd 388204076Spjd /* 389204076Spjd * Allocate memory needed by lists. 390204076Spjd */ 391204076Spjd hio_send_list = malloc(sizeof(hio_send_list[0]) * ncomps); 392204076Spjd if (hio_send_list == NULL) { 393204076Spjd primary_exitx(EX_TEMPFAIL, 394204076Spjd "Unable to allocate %zu bytes of memory for send lists.", 395204076Spjd sizeof(hio_send_list[0]) * ncomps); 396204076Spjd } 397260007Strociny hio_send_list_size = malloc(sizeof(hio_send_list_size[0]) * ncomps); 398260007Strociny if (hio_send_list_size == NULL) { 399260007Strociny primary_exitx(EX_TEMPFAIL, 400260007Strociny "Unable to allocate %zu bytes of memory for send list counters.", 401260007Strociny sizeof(hio_send_list_size[0]) * ncomps); 402260007Strociny } 403204076Spjd hio_send_list_lock = malloc(sizeof(hio_send_list_lock[0]) * ncomps); 404204076Spjd if (hio_send_list_lock == NULL) { 405204076Spjd primary_exitx(EX_TEMPFAIL, 406204076Spjd "Unable to allocate %zu bytes of memory for send list locks.", 407204076Spjd sizeof(hio_send_list_lock[0]) * ncomps); 408204076Spjd } 409204076Spjd hio_send_list_cond = malloc(sizeof(hio_send_list_cond[0]) * ncomps); 410204076Spjd if (hio_send_list_cond == NULL) { 411204076Spjd primary_exitx(EX_TEMPFAIL, 412204076Spjd "Unable to allocate %zu bytes of memory for send list condition variables.", 413204076Spjd sizeof(hio_send_list_cond[0]) * ncomps); 414204076Spjd } 415204076Spjd hio_recv_list = malloc(sizeof(hio_recv_list[0]) * ncomps); 416204076Spjd if (hio_recv_list == NULL) { 417204076Spjd primary_exitx(EX_TEMPFAIL, 418204076Spjd "Unable to allocate %zu bytes of memory for recv lists.", 419204076Spjd sizeof(hio_recv_list[0]) * ncomps); 420204076Spjd } 421260007Strociny hio_recv_list_size = malloc(sizeof(hio_recv_list_size[0]) * ncomps); 422260007Strociny if (hio_recv_list_size == NULL) { 423260007Strociny primary_exitx(EX_TEMPFAIL, 424260007Strociny "Unable to allocate %zu bytes of memory for recv list counters.", 425260007Strociny sizeof(hio_recv_list_size[0]) * ncomps); 426260007Strociny } 427204076Spjd hio_recv_list_lock = malloc(sizeof(hio_recv_list_lock[0]) * ncomps); 428204076Spjd if (hio_recv_list_lock == NULL) { 429204076Spjd primary_exitx(EX_TEMPFAIL, 430204076Spjd "Unable to allocate %zu bytes of memory for recv list locks.", 431204076Spjd sizeof(hio_recv_list_lock[0]) * ncomps); 432204076Spjd } 433204076Spjd hio_recv_list_cond = malloc(sizeof(hio_recv_list_cond[0]) * ncomps); 434204076Spjd if (hio_recv_list_cond == NULL) { 435204076Spjd primary_exitx(EX_TEMPFAIL, 436204076Spjd "Unable to allocate %zu bytes of memory for recv list condition variables.", 437204076Spjd sizeof(hio_recv_list_cond[0]) * ncomps); 438204076Spjd } 439204076Spjd hio_remote_lock = malloc(sizeof(hio_remote_lock[0]) * ncomps); 440204076Spjd if (hio_remote_lock == NULL) { 441204076Spjd primary_exitx(EX_TEMPFAIL, 442204076Spjd "Unable to allocate %zu bytes of memory for remote connections locks.", 443204076Spjd sizeof(hio_remote_lock[0]) * ncomps); 444204076Spjd } 445204076Spjd 446204076Spjd /* 447260007Strociny * Initialize lists, their counters, locks and condition variables. 448204076Spjd */ 449204076Spjd TAILQ_INIT(&hio_free_list); 450204076Spjd mtx_init(&hio_free_list_lock); 451204076Spjd cv_init(&hio_free_list_cond); 452204076Spjd for (ii = 0; ii < HAST_NCOMPONENTS; ii++) { 453204076Spjd TAILQ_INIT(&hio_send_list[ii]); 454260007Strociny hio_send_list_size[ii] = 0; 455204076Spjd mtx_init(&hio_send_list_lock[ii]); 456204076Spjd cv_init(&hio_send_list_cond[ii]); 457204076Spjd TAILQ_INIT(&hio_recv_list[ii]); 458260007Strociny hio_recv_list_size[ii] = 0; 459204076Spjd mtx_init(&hio_recv_list_lock[ii]); 460204076Spjd cv_init(&hio_recv_list_cond[ii]); 461204076Spjd rw_init(&hio_remote_lock[ii]); 462204076Spjd } 463204076Spjd TAILQ_INIT(&hio_done_list); 464204076Spjd mtx_init(&hio_done_list_lock); 465204076Spjd cv_init(&hio_done_list_cond); 466204076Spjd mtx_init(&metadata_lock); 467204076Spjd 468204076Spjd /* 469204076Spjd * Allocate requests pool and initialize requests. 470204076Spjd */ 471204076Spjd for (ii = 0; ii < HAST_HIO_MAX; ii++) { 472204076Spjd hio = malloc(sizeof(*hio)); 473204076Spjd if (hio == NULL) { 474204076Spjd primary_exitx(EX_TEMPFAIL, 475204076Spjd "Unable to allocate %zu bytes of memory for hio request.", 476204076Spjd sizeof(*hio)); 477204076Spjd } 478204076Spjd hio->hio_countdown = 0; 479204076Spjd hio->hio_errors = malloc(sizeof(hio->hio_errors[0]) * ncomps); 480204076Spjd if (hio->hio_errors == NULL) { 481204076Spjd primary_exitx(EX_TEMPFAIL, 482204076Spjd "Unable allocate %zu bytes of memory for hio errors.", 483204076Spjd sizeof(hio->hio_errors[0]) * ncomps); 484204076Spjd } 485204076Spjd hio->hio_next = malloc(sizeof(hio->hio_next[0]) * ncomps); 486204076Spjd if (hio->hio_next == NULL) { 487204076Spjd primary_exitx(EX_TEMPFAIL, 488204076Spjd "Unable allocate %zu bytes of memory for hio_next field.", 489204076Spjd sizeof(hio->hio_next[0]) * ncomps); 490204076Spjd } 491204076Spjd hio->hio_ggio.gctl_version = G_GATE_VERSION; 492204076Spjd hio->hio_ggio.gctl_data = malloc(MAXPHYS); 493204076Spjd if (hio->hio_ggio.gctl_data == NULL) { 494204076Spjd primary_exitx(EX_TEMPFAIL, 495204076Spjd "Unable to allocate %zu bytes of memory for gctl_data.", 496204076Spjd MAXPHYS); 497204076Spjd } 498204076Spjd hio->hio_ggio.gctl_length = MAXPHYS; 499204076Spjd hio->hio_ggio.gctl_error = 0; 500204076Spjd TAILQ_INSERT_HEAD(&hio_free_list, hio, hio_free_next); 501260007Strociny hio_free_list_size++; 502204076Spjd } 503204076Spjd} 504204076Spjd 505214284Spjdstatic bool 506214284Spjdinit_resuid(struct hast_resource *res) 507214284Spjd{ 508214284Spjd 509214284Spjd mtx_lock(&metadata_lock); 510214284Spjd if (res->hr_resuid != 0) { 511214284Spjd mtx_unlock(&metadata_lock); 512214284Spjd return (false); 513214284Spjd } else { 514214284Spjd /* Initialize unique resource identifier. */ 515214284Spjd arc4random_buf(&res->hr_resuid, sizeof(res->hr_resuid)); 516214284Spjd mtx_unlock(&metadata_lock); 517231017Strociny if (metadata_write(res) == -1) 518214284Spjd exit(EX_NOINPUT); 519214284Spjd return (true); 520214284Spjd } 521214284Spjd} 522214284Spjd 523204076Spjdstatic void 524204076Spjdinit_local(struct hast_resource *res) 525204076Spjd{ 526204076Spjd unsigned char *buf; 527204076Spjd size_t mapsize; 528204076Spjd 529231017Strociny if (metadata_read(res, true) == -1) 530204076Spjd exit(EX_NOINPUT); 531204076Spjd mtx_init(&res->hr_amp_lock); 532204076Spjd if (activemap_init(&res->hr_amp, res->hr_datasize, res->hr_extentsize, 533231017Strociny res->hr_local_sectorsize, res->hr_keepdirty) == -1) { 534204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create activemap"); 535204076Spjd } 536204076Spjd mtx_init(&range_lock); 537204076Spjd cv_init(&range_regular_cond); 538231017Strociny if (rangelock_init(&range_regular) == -1) 539204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create regular range lock"); 540204076Spjd cv_init(&range_sync_cond); 541231017Strociny if (rangelock_init(&range_sync) == -1) 542204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create sync range lock"); 543204076Spjd mapsize = activemap_ondisk_size(res->hr_amp); 544204076Spjd buf = calloc(1, mapsize); 545204076Spjd if (buf == NULL) { 546204076Spjd primary_exitx(EX_TEMPFAIL, 547204076Spjd "Unable to allocate buffer for activemap."); 548204076Spjd } 549204076Spjd if (pread(res->hr_localfd, buf, mapsize, METADATA_SIZE) != 550204076Spjd (ssize_t)mapsize) { 551204076Spjd primary_exit(EX_NOINPUT, "Unable to read activemap"); 552204076Spjd } 553204076Spjd activemap_copyin(res->hr_amp, buf, mapsize); 554209181Spjd free(buf); 555204076Spjd if (res->hr_resuid != 0) 556204076Spjd return; 557204076Spjd /* 558214284Spjd * We're using provider for the first time. Initialize local and remote 559214284Spjd * counters. We don't initialize resuid here, as we want to do it just 560214284Spjd * in time. The reason for this is that we want to inform secondary 561214284Spjd * that there were no writes yet, so there is no need to synchronize 562214284Spjd * anything. 563204076Spjd */ 564219844Spjd res->hr_primary_localcnt = 0; 565204076Spjd res->hr_primary_remotecnt = 0; 566231017Strociny if (metadata_write(res) == -1) 567204076Spjd exit(EX_NOINPUT); 568204076Spjd} 569204076Spjd 570218218Spjdstatic int 571218218Spjdprimary_connect(struct hast_resource *res, struct proto_conn **connp) 572218218Spjd{ 573218218Spjd struct proto_conn *conn; 574218218Spjd int16_t val; 575218218Spjd 576218218Spjd val = 1; 577231017Strociny if (proto_send(res->hr_conn, &val, sizeof(val)) == -1) { 578218218Spjd primary_exit(EX_TEMPFAIL, 579218218Spjd "Unable to send connection request to parent"); 580218218Spjd } 581231017Strociny if (proto_recv(res->hr_conn, &val, sizeof(val)) == -1) { 582218218Spjd primary_exit(EX_TEMPFAIL, 583218218Spjd "Unable to receive reply to connection request from parent"); 584218218Spjd } 585218218Spjd if (val != 0) { 586218218Spjd errno = val; 587218218Spjd pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 588218218Spjd res->hr_remoteaddr); 589218218Spjd return (-1); 590218218Spjd } 591231017Strociny if (proto_connection_recv(res->hr_conn, true, &conn) == -1) { 592218218Spjd primary_exit(EX_TEMPFAIL, 593218218Spjd "Unable to receive connection from parent"); 594218218Spjd } 595231017Strociny if (proto_connect_wait(conn, res->hr_timeout) == -1) { 596218218Spjd pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 597218218Spjd res->hr_remoteaddr); 598218218Spjd proto_close(conn); 599218218Spjd return (-1); 600218218Spjd } 601218218Spjd /* Error in setting timeout is not critical, but why should it fail? */ 602231017Strociny if (proto_timeout(conn, res->hr_timeout) == -1) 603218218Spjd pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 604218218Spjd 605218218Spjd *connp = conn; 606218218Spjd 607218218Spjd return (0); 608218218Spjd} 609249236Strociny 610240269Strociny/* 611240269Strociny * Function instructs GEOM_GATE to handle reads directly from within the kernel. 612240269Strociny */ 613240269Strocinystatic void 614240269Strocinyenable_direct_reads(struct hast_resource *res) 615240269Strociny{ 616240269Strociny struct g_gate_ctl_modify ggiomodify; 617218218Spjd 618240269Strociny bzero(&ggiomodify, sizeof(ggiomodify)); 619240269Strociny ggiomodify.gctl_version = G_GATE_VERSION; 620240269Strociny ggiomodify.gctl_unit = res->hr_ggateunit; 621240269Strociny ggiomodify.gctl_modify = GG_MODIFY_READPROV | GG_MODIFY_READOFFSET; 622240269Strociny strlcpy(ggiomodify.gctl_readprov, res->hr_localpath, 623240269Strociny sizeof(ggiomodify.gctl_readprov)); 624240269Strociny ggiomodify.gctl_readoffset = res->hr_localoff; 625240269Strociny if (ioctl(res->hr_ggatefd, G_GATE_CMD_MODIFY, &ggiomodify) == 0) 626240269Strociny pjdlog_debug(1, "Direct reads enabled."); 627240269Strociny else 628240269Strociny pjdlog_errno(LOG_WARNING, "Failed to enable direct reads"); 629240269Strociny} 630240269Strociny 631220898Spjdstatic int 632205738Spjdinit_remote(struct hast_resource *res, struct proto_conn **inp, 633205738Spjd struct proto_conn **outp) 634204076Spjd{ 635205738Spjd struct proto_conn *in, *out; 636204076Spjd struct nv *nvout, *nvin; 637204076Spjd const unsigned char *token; 638204076Spjd unsigned char *map; 639204076Spjd const char *errmsg; 640204076Spjd int32_t extentsize; 641204076Spjd int64_t datasize; 642204076Spjd uint32_t mapsize; 643249236Strociny uint8_t version; 644204076Spjd size_t size; 645220898Spjd int error; 646204076Spjd 647218138Spjd PJDLOG_ASSERT((inp == NULL && outp == NULL) || (inp != NULL && outp != NULL)); 648218138Spjd PJDLOG_ASSERT(real_remote(res)); 649205738Spjd 650205738Spjd in = out = NULL; 651211983Spjd errmsg = NULL; 652205738Spjd 653218218Spjd if (primary_connect(res, &out) == -1) 654220898Spjd return (ECONNREFUSED); 655218218Spjd 656220898Spjd error = ECONNABORTED; 657220898Spjd 658204076Spjd /* 659204076Spjd * First handshake step. 660204076Spjd * Setup outgoing connection with remote node. 661204076Spjd */ 662204076Spjd nvout = nv_alloc(); 663204076Spjd nv_add_string(nvout, res->hr_name, "resource"); 664249236Strociny nv_add_uint8(nvout, HAST_PROTO_VERSION, "version"); 665204076Spjd if (nv_error(nvout) != 0) { 666204076Spjd pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 667204076Spjd "Unable to allocate header for connection with %s", 668204076Spjd res->hr_remoteaddr); 669204076Spjd nv_free(nvout); 670204076Spjd goto close; 671204076Spjd } 672231017Strociny if (hast_proto_send(res, out, nvout, NULL, 0) == -1) { 673204076Spjd pjdlog_errno(LOG_WARNING, 674204076Spjd "Unable to send handshake header to %s", 675204076Spjd res->hr_remoteaddr); 676204076Spjd nv_free(nvout); 677204076Spjd goto close; 678204076Spjd } 679204076Spjd nv_free(nvout); 680231017Strociny if (hast_proto_recv_hdr(out, &nvin) == -1) { 681204076Spjd pjdlog_errno(LOG_WARNING, 682204076Spjd "Unable to receive handshake header from %s", 683204076Spjd res->hr_remoteaddr); 684204076Spjd goto close; 685204076Spjd } 686204076Spjd errmsg = nv_get_string(nvin, "errmsg"); 687204076Spjd if (errmsg != NULL) { 688204076Spjd pjdlog_warning("%s", errmsg); 689220898Spjd if (nv_exists(nvin, "wait")) 690220898Spjd error = EBUSY; 691204076Spjd nv_free(nvin); 692204076Spjd goto close; 693204076Spjd } 694249236Strociny version = nv_get_uint8(nvin, "version"); 695249236Strociny if (version == 0) { 696249236Strociny /* 697249236Strociny * If no version is sent, it means this is protocol version 1. 698249236Strociny */ 699249236Strociny version = 1; 700249236Strociny } 701249236Strociny if (version > HAST_PROTO_VERSION) { 702249236Strociny pjdlog_warning("Invalid version received (%hhu).", version); 703249236Strociny nv_free(nvin); 704249236Strociny goto close; 705249236Strociny } 706249236Strociny res->hr_version = version; 707249236Strociny pjdlog_debug(1, "Negotiated protocol version %d.", res->hr_version); 708204076Spjd token = nv_get_uint8_array(nvin, &size, "token"); 709204076Spjd if (token == NULL) { 710204076Spjd pjdlog_warning("Handshake header from %s has no 'token' field.", 711204076Spjd res->hr_remoteaddr); 712204076Spjd nv_free(nvin); 713204076Spjd goto close; 714204076Spjd } 715204076Spjd if (size != sizeof(res->hr_token)) { 716204076Spjd pjdlog_warning("Handshake header from %s contains 'token' of wrong size (got %zu, expected %zu).", 717204076Spjd res->hr_remoteaddr, size, sizeof(res->hr_token)); 718204076Spjd nv_free(nvin); 719204076Spjd goto close; 720204076Spjd } 721204076Spjd bcopy(token, res->hr_token, sizeof(res->hr_token)); 722204076Spjd nv_free(nvin); 723204076Spjd 724204076Spjd /* 725204076Spjd * Second handshake step. 726204076Spjd * Setup incoming connection with remote node. 727204076Spjd */ 728218218Spjd if (primary_connect(res, &in) == -1) 729204076Spjd goto close; 730218218Spjd 731204076Spjd nvout = nv_alloc(); 732204076Spjd nv_add_string(nvout, res->hr_name, "resource"); 733204076Spjd nv_add_uint8_array(nvout, res->hr_token, sizeof(res->hr_token), 734204076Spjd "token"); 735214284Spjd if (res->hr_resuid == 0) { 736214284Spjd /* 737214284Spjd * The resuid field was not yet initialized. 738214284Spjd * Because we do synchronization inside init_resuid(), it is 739214284Spjd * possible that someone already initialized it, the function 740214284Spjd * will return false then, but if we successfully initialized 741214284Spjd * it, we will get true. True means that there were no writes 742214284Spjd * to this resource yet and we want to inform secondary that 743214284Spjd * synchronization is not needed by sending "virgin" argument. 744214284Spjd */ 745214284Spjd if (init_resuid(res)) 746214284Spjd nv_add_int8(nvout, 1, "virgin"); 747214284Spjd } 748204076Spjd nv_add_uint64(nvout, res->hr_resuid, "resuid"); 749204076Spjd nv_add_uint64(nvout, res->hr_primary_localcnt, "localcnt"); 750204076Spjd nv_add_uint64(nvout, res->hr_primary_remotecnt, "remotecnt"); 751204076Spjd if (nv_error(nvout) != 0) { 752204076Spjd pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 753204076Spjd "Unable to allocate header for connection with %s", 754204076Spjd res->hr_remoteaddr); 755204076Spjd nv_free(nvout); 756204076Spjd goto close; 757204076Spjd } 758231017Strociny if (hast_proto_send(res, in, nvout, NULL, 0) == -1) { 759204076Spjd pjdlog_errno(LOG_WARNING, 760204076Spjd "Unable to send handshake header to %s", 761204076Spjd res->hr_remoteaddr); 762204076Spjd nv_free(nvout); 763204076Spjd goto close; 764204076Spjd } 765204076Spjd nv_free(nvout); 766231017Strociny if (hast_proto_recv_hdr(out, &nvin) == -1) { 767204076Spjd pjdlog_errno(LOG_WARNING, 768204076Spjd "Unable to receive handshake header from %s", 769204076Spjd res->hr_remoteaddr); 770204076Spjd goto close; 771204076Spjd } 772204076Spjd errmsg = nv_get_string(nvin, "errmsg"); 773204076Spjd if (errmsg != NULL) { 774204076Spjd pjdlog_warning("%s", errmsg); 775204076Spjd nv_free(nvin); 776204076Spjd goto close; 777204076Spjd } 778204076Spjd datasize = nv_get_int64(nvin, "datasize"); 779204076Spjd if (datasize != res->hr_datasize) { 780204076Spjd pjdlog_warning("Data size differs between nodes (local=%jd, remote=%jd).", 781204076Spjd (intmax_t)res->hr_datasize, (intmax_t)datasize); 782204076Spjd nv_free(nvin); 783204076Spjd goto close; 784204076Spjd } 785204076Spjd extentsize = nv_get_int32(nvin, "extentsize"); 786204076Spjd if (extentsize != res->hr_extentsize) { 787204076Spjd pjdlog_warning("Extent size differs between nodes (local=%zd, remote=%zd).", 788204076Spjd (ssize_t)res->hr_extentsize, (ssize_t)extentsize); 789204076Spjd nv_free(nvin); 790204076Spjd goto close; 791204076Spjd } 792204076Spjd res->hr_secondary_localcnt = nv_get_uint64(nvin, "localcnt"); 793204076Spjd res->hr_secondary_remotecnt = nv_get_uint64(nvin, "remotecnt"); 794204076Spjd res->hr_syncsrc = nv_get_uint8(nvin, "syncsrc"); 795240269Strociny if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) 796240269Strociny enable_direct_reads(res); 797220865Spjd if (nv_exists(nvin, "virgin")) { 798220865Spjd /* 799220865Spjd * Secondary was reinitialized, bump localcnt if it is 0 as 800220865Spjd * only we have the data. 801220865Spjd */ 802220865Spjd PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_PRIMARY); 803220865Spjd PJDLOG_ASSERT(res->hr_secondary_localcnt == 0); 804220865Spjd 805220865Spjd if (res->hr_primary_localcnt == 0) { 806220865Spjd PJDLOG_ASSERT(res->hr_secondary_remotecnt == 0); 807220865Spjd 808220865Spjd mtx_lock(&metadata_lock); 809220865Spjd res->hr_primary_localcnt++; 810220865Spjd pjdlog_debug(1, "Increasing localcnt to %ju.", 811220865Spjd (uintmax_t)res->hr_primary_localcnt); 812220865Spjd (void)metadata_write(res); 813220865Spjd mtx_unlock(&metadata_lock); 814220865Spjd } 815220865Spjd } 816204076Spjd map = NULL; 817204076Spjd mapsize = nv_get_uint32(nvin, "mapsize"); 818204076Spjd if (mapsize > 0) { 819204076Spjd map = malloc(mapsize); 820204076Spjd if (map == NULL) { 821204076Spjd pjdlog_error("Unable to allocate memory for remote activemap (mapsize=%ju).", 822204076Spjd (uintmax_t)mapsize); 823204076Spjd nv_free(nvin); 824204076Spjd goto close; 825204076Spjd } 826204076Spjd /* 827204076Spjd * Remote node have some dirty extents on its own, lets 828204076Spjd * download its activemap. 829204076Spjd */ 830205738Spjd if (hast_proto_recv_data(res, out, nvin, map, 831231017Strociny mapsize) == -1) { 832204076Spjd pjdlog_errno(LOG_ERR, 833204076Spjd "Unable to receive remote activemap"); 834204076Spjd nv_free(nvin); 835204076Spjd free(map); 836204076Spjd goto close; 837204076Spjd } 838257470Strociny mtx_lock(&res->hr_amp_lock); 839204076Spjd /* 840204076Spjd * Merge local and remote bitmaps. 841204076Spjd */ 842204076Spjd activemap_merge(res->hr_amp, map, mapsize); 843204076Spjd free(map); 844204076Spjd /* 845204076Spjd * Now that we merged bitmaps from both nodes, flush it to the 846204076Spjd * disk before we start to synchronize. 847204076Spjd */ 848204076Spjd (void)hast_activemap_flush(res); 849204076Spjd } 850214274Spjd nv_free(nvin); 851223181Strociny#ifdef notyet 852220271Spjd /* Setup directions. */ 853220271Spjd if (proto_send(out, NULL, 0) == -1) 854220271Spjd pjdlog_errno(LOG_WARNING, "Unable to set connection direction"); 855220271Spjd if (proto_recv(in, NULL, 0) == -1) 856220271Spjd pjdlog_errno(LOG_WARNING, "Unable to set connection direction"); 857223181Strociny#endif 858204076Spjd pjdlog_info("Connected to %s.", res->hr_remoteaddr); 859249236Strociny if (res->hr_original_replication == HAST_REPLICATION_MEMSYNC && 860249236Strociny res->hr_version < 2) { 861249236Strociny pjdlog_warning("The 'memsync' replication mode is not supported by the remote node, falling back to 'fullsync' mode."); 862249236Strociny res->hr_replication = HAST_REPLICATION_FULLSYNC; 863249236Strociny } else if (res->hr_replication != res->hr_original_replication) { 864249236Strociny /* 865249236Strociny * This is in case hastd disconnected and was upgraded. 866249236Strociny */ 867249236Strociny res->hr_replication = res->hr_original_replication; 868249236Strociny } 869205738Spjd if (inp != NULL && outp != NULL) { 870205738Spjd *inp = in; 871205738Spjd *outp = out; 872205738Spjd } else { 873205738Spjd res->hr_remotein = in; 874205738Spjd res->hr_remoteout = out; 875205738Spjd } 876212038Spjd event_send(res, EVENT_CONNECT); 877220898Spjd return (0); 878205738Spjdclose: 879211983Spjd if (errmsg != NULL && strcmp(errmsg, "Split-brain condition!") == 0) 880212038Spjd event_send(res, EVENT_SPLITBRAIN); 881205738Spjd proto_close(out); 882205738Spjd if (in != NULL) 883205738Spjd proto_close(in); 884220898Spjd return (error); 885205738Spjd} 886205738Spjd 887205738Spjdstatic void 888205738Spjdsync_start(void) 889205738Spjd{ 890205738Spjd 891204076Spjd mtx_lock(&sync_lock); 892204076Spjd sync_inprogress = true; 893204076Spjd mtx_unlock(&sync_lock); 894204076Spjd cv_signal(&sync_cond); 895204076Spjd} 896204076Spjd 897204076Spjdstatic void 898211878Spjdsync_stop(void) 899211878Spjd{ 900211878Spjd 901211878Spjd mtx_lock(&sync_lock); 902211878Spjd if (sync_inprogress) 903211878Spjd sync_inprogress = false; 904211878Spjd mtx_unlock(&sync_lock); 905211878Spjd} 906211878Spjd 907211878Spjdstatic void 908204076Spjdinit_ggate(struct hast_resource *res) 909204076Spjd{ 910204076Spjd struct g_gate_ctl_create ggiocreate; 911204076Spjd struct g_gate_ctl_cancel ggiocancel; 912204076Spjd 913204076Spjd /* 914204076Spjd * We communicate with ggate via /dev/ggctl. Open it. 915204076Spjd */ 916204076Spjd res->hr_ggatefd = open("/dev/" G_GATE_CTL_NAME, O_RDWR); 917231017Strociny if (res->hr_ggatefd == -1) 918204076Spjd primary_exit(EX_OSFILE, "Unable to open /dev/" G_GATE_CTL_NAME); 919204076Spjd /* 920204076Spjd * Create provider before trying to connect, as connection failure 921204076Spjd * is not critical, but may take some time. 922204076Spjd */ 923213533Spjd bzero(&ggiocreate, sizeof(ggiocreate)); 924204076Spjd ggiocreate.gctl_version = G_GATE_VERSION; 925204076Spjd ggiocreate.gctl_mediasize = res->hr_datasize; 926204076Spjd ggiocreate.gctl_sectorsize = res->hr_local_sectorsize; 927204076Spjd ggiocreate.gctl_flags = 0; 928220266Spjd ggiocreate.gctl_maxcount = 0; 929204076Spjd ggiocreate.gctl_timeout = 0; 930204076Spjd ggiocreate.gctl_unit = G_GATE_NAME_GIVEN; 931204076Spjd snprintf(ggiocreate.gctl_name, sizeof(ggiocreate.gctl_name), "hast/%s", 932204076Spjd res->hr_provname); 933204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_CREATE, &ggiocreate) == 0) { 934204076Spjd pjdlog_info("Device hast/%s created.", res->hr_provname); 935204076Spjd res->hr_ggateunit = ggiocreate.gctl_unit; 936204076Spjd return; 937204076Spjd } 938204076Spjd if (errno != EEXIST) { 939204076Spjd primary_exit(EX_OSERR, "Unable to create hast/%s device", 940204076Spjd res->hr_provname); 941204076Spjd } 942204076Spjd pjdlog_debug(1, 943204076Spjd "Device hast/%s already exists, we will try to take it over.", 944204076Spjd res->hr_provname); 945204076Spjd /* 946204076Spjd * If we received EEXIST, we assume that the process who created the 947204076Spjd * provider died and didn't clean up. In that case we will start from 948204076Spjd * where he left of. 949204076Spjd */ 950213533Spjd bzero(&ggiocancel, sizeof(ggiocancel)); 951204076Spjd ggiocancel.gctl_version = G_GATE_VERSION; 952204076Spjd ggiocancel.gctl_unit = G_GATE_NAME_GIVEN; 953204076Spjd snprintf(ggiocancel.gctl_name, sizeof(ggiocancel.gctl_name), "hast/%s", 954204076Spjd res->hr_provname); 955204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_CANCEL, &ggiocancel) == 0) { 956204076Spjd pjdlog_info("Device hast/%s recovered.", res->hr_provname); 957204076Spjd res->hr_ggateunit = ggiocancel.gctl_unit; 958204076Spjd return; 959204076Spjd } 960204076Spjd primary_exit(EX_OSERR, "Unable to take over hast/%s device", 961204076Spjd res->hr_provname); 962204076Spjd} 963204076Spjd 964204076Spjdvoid 965204076Spjdhastd_primary(struct hast_resource *res) 966204076Spjd{ 967204076Spjd pthread_t td; 968204076Spjd pid_t pid; 969219482Strociny int error, mode, debuglevel; 970204076Spjd 971204076Spjd /* 972218218Spjd * Create communication channel for sending control commands from 973218218Spjd * parent to child. 974204076Spjd */ 975231017Strociny if (proto_client(NULL, "socketpair://", &res->hr_ctrl) == -1) { 976218042Spjd /* TODO: There's no need for this to be fatal error. */ 977204076Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 978212034Spjd pjdlog_exit(EX_OSERR, 979204076Spjd "Unable to create control sockets between parent and child"); 980204076Spjd } 981212038Spjd /* 982218218Spjd * Create communication channel for sending events from child to parent. 983212038Spjd */ 984231017Strociny if (proto_client(NULL, "socketpair://", &res->hr_event) == -1) { 985218042Spjd /* TODO: There's no need for this to be fatal error. */ 986212038Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 987212038Spjd pjdlog_exit(EX_OSERR, 988212038Spjd "Unable to create event sockets between child and parent"); 989212038Spjd } 990218218Spjd /* 991218218Spjd * Create communication channel for sending connection requests from 992218218Spjd * child to parent. 993218218Spjd */ 994231017Strociny if (proto_client(NULL, "socketpair://", &res->hr_conn) == -1) { 995218218Spjd /* TODO: There's no need for this to be fatal error. */ 996218218Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 997218218Spjd pjdlog_exit(EX_OSERR, 998218218Spjd "Unable to create connection sockets between child and parent"); 999218218Spjd } 1000204076Spjd 1001204076Spjd pid = fork(); 1002231017Strociny if (pid == -1) { 1003218042Spjd /* TODO: There's no need for this to be fatal error. */ 1004204076Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 1005212034Spjd pjdlog_exit(EX_TEMPFAIL, "Unable to fork"); 1006204076Spjd } 1007204076Spjd 1008204076Spjd if (pid > 0) { 1009204076Spjd /* This is parent. */ 1010212038Spjd /* Declare that we are receiver. */ 1011212038Spjd proto_recv(res->hr_event, NULL, 0); 1012218218Spjd proto_recv(res->hr_conn, NULL, 0); 1013218043Spjd /* Declare that we are sender. */ 1014218043Spjd proto_send(res->hr_ctrl, NULL, 0); 1015204076Spjd res->hr_workerpid = pid; 1016204076Spjd return; 1017204076Spjd } 1018211977Spjd 1019211984Spjd gres = res; 1020260007Strociny res->output_status_aux = output_status_aux; 1021218043Spjd mode = pjdlog_mode_get(); 1022219482Strociny debuglevel = pjdlog_debug_get(); 1023211984Spjd 1024218043Spjd /* Declare that we are sender. */ 1025218043Spjd proto_send(res->hr_event, NULL, 0); 1026218218Spjd proto_send(res->hr_conn, NULL, 0); 1027218043Spjd /* Declare that we are receiver. */ 1028218043Spjd proto_recv(res->hr_ctrl, NULL, 0); 1029218043Spjd descriptors_cleanup(res); 1030204076Spjd 1031218045Spjd descriptors_assert(res, mode); 1032218045Spjd 1033218043Spjd pjdlog_init(mode); 1034219482Strociny pjdlog_debug_set(debuglevel); 1035218043Spjd pjdlog_prefix_set("[%s] (%s) ", res->hr_name, role2str(res->hr_role)); 1036220005Spjd setproctitle("%s (%s)", res->hr_name, role2str(res->hr_role)); 1037204076Spjd 1038204076Spjd init_local(res); 1039213007Spjd init_ggate(res); 1040213007Spjd init_environment(res); 1041217784Spjd 1042221899Spjd if (drop_privs(res) != 0) { 1043218049Spjd cleanup(res); 1044218049Spjd exit(EX_CONFIG); 1045218049Spjd } 1046218214Spjd pjdlog_info("Privileges successfully dropped."); 1047218049Spjd 1048213007Spjd /* 1049213530Spjd * Create the guard thread first, so we can handle signals from the 1050231017Strociny * very beginning. 1051213530Spjd */ 1052213530Spjd error = pthread_create(&td, NULL, guard_thread, res); 1053218138Spjd PJDLOG_ASSERT(error == 0); 1054213530Spjd /* 1055213007Spjd * Create the control thread before sending any event to the parent, 1056213007Spjd * as we can deadlock when parent sends control request to worker, 1057213007Spjd * but worker has no control thread started yet, so parent waits. 1058213007Spjd * In the meantime worker sends an event to the parent, but parent 1059213007Spjd * is unable to handle the event, because it waits for control 1060213007Spjd * request response. 1061213007Spjd */ 1062213007Spjd error = pthread_create(&td, NULL, ctrl_thread, res); 1063218138Spjd PJDLOG_ASSERT(error == 0); 1064220898Spjd if (real_remote(res)) { 1065220898Spjd error = init_remote(res, NULL, NULL); 1066220898Spjd if (error == 0) { 1067220898Spjd sync_start(); 1068220898Spjd } else if (error == EBUSY) { 1069220898Spjd time_t start = time(NULL); 1070220898Spjd 1071220898Spjd pjdlog_warning("Waiting for remote node to become %s for %ds.", 1072220898Spjd role2str(HAST_ROLE_SECONDARY), 1073220898Spjd res->hr_timeout); 1074220898Spjd for (;;) { 1075220898Spjd sleep(1); 1076220898Spjd error = init_remote(res, NULL, NULL); 1077220898Spjd if (error != EBUSY) 1078220898Spjd break; 1079220898Spjd if (time(NULL) > start + res->hr_timeout) 1080220898Spjd break; 1081220898Spjd } 1082220898Spjd if (error == EBUSY) { 1083220898Spjd pjdlog_warning("Remote node is still %s, starting anyway.", 1084220898Spjd role2str(HAST_ROLE_PRIMARY)); 1085220898Spjd } 1086220898Spjd } 1087220898Spjd } 1088204076Spjd error = pthread_create(&td, NULL, ggate_recv_thread, res); 1089218138Spjd PJDLOG_ASSERT(error == 0); 1090204076Spjd error = pthread_create(&td, NULL, local_send_thread, res); 1091218138Spjd PJDLOG_ASSERT(error == 0); 1092204076Spjd error = pthread_create(&td, NULL, remote_send_thread, res); 1093218138Spjd PJDLOG_ASSERT(error == 0); 1094204076Spjd error = pthread_create(&td, NULL, remote_recv_thread, res); 1095218138Spjd PJDLOG_ASSERT(error == 0); 1096204076Spjd error = pthread_create(&td, NULL, ggate_send_thread, res); 1097218138Spjd PJDLOG_ASSERT(error == 0); 1098220898Spjd fullystarted = true; 1099213530Spjd (void)sync_thread(res); 1100204076Spjd} 1101204076Spjd 1102204076Spjdstatic void 1103249236Strocinyreqlog(int loglevel, int debuglevel, struct g_gate_ctl_io *ggio, 1104249236Strociny const char *fmt, ...) 1105204076Spjd{ 1106204076Spjd char msg[1024]; 1107204076Spjd va_list ap; 1108204076Spjd 1109204076Spjd va_start(ap, fmt); 1110240269Strociny (void)vsnprintf(msg, sizeof(msg), fmt, ap); 1111204076Spjd va_end(ap); 1112240269Strociny switch (ggio->gctl_cmd) { 1113240269Strociny case BIO_READ: 1114240269Strociny (void)snprlcat(msg, sizeof(msg), "READ(%ju, %ju).", 1115249236Strociny (uintmax_t)ggio->gctl_offset, (uintmax_t)ggio->gctl_length); 1116240269Strociny break; 1117240269Strociny case BIO_DELETE: 1118240269Strociny (void)snprlcat(msg, sizeof(msg), "DELETE(%ju, %ju).", 1119249236Strociny (uintmax_t)ggio->gctl_offset, (uintmax_t)ggio->gctl_length); 1120240269Strociny break; 1121240269Strociny case BIO_FLUSH: 1122240269Strociny (void)snprlcat(msg, sizeof(msg), "FLUSH."); 1123240269Strociny break; 1124240269Strociny case BIO_WRITE: 1125240269Strociny (void)snprlcat(msg, sizeof(msg), "WRITE(%ju, %ju).", 1126249236Strociny (uintmax_t)ggio->gctl_offset, (uintmax_t)ggio->gctl_length); 1127240269Strociny break; 1128240269Strociny default: 1129240269Strociny (void)snprlcat(msg, sizeof(msg), "UNKNOWN(%u).", 1130240269Strociny (unsigned int)ggio->gctl_cmd); 1131240269Strociny break; 1132204076Spjd } 1133204076Spjd pjdlog_common(loglevel, debuglevel, -1, "%s", msg); 1134204076Spjd} 1135204076Spjd 1136204076Spjdstatic void 1137204076Spjdremote_close(struct hast_resource *res, int ncomp) 1138204076Spjd{ 1139204076Spjd 1140204076Spjd rw_wlock(&hio_remote_lock[ncomp]); 1141204076Spjd /* 1142229509Strociny * Check for a race between dropping rlock and acquiring wlock - 1143204076Spjd * another thread can close connection in-between. 1144204076Spjd */ 1145204076Spjd if (!ISCONNECTED(res, ncomp)) { 1146218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 1147218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 1148204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1149204076Spjd return; 1150204076Spjd } 1151204076Spjd 1152218138Spjd PJDLOG_ASSERT(res->hr_remotein != NULL); 1153218138Spjd PJDLOG_ASSERT(res->hr_remoteout != NULL); 1154204076Spjd 1155211881Spjd pjdlog_debug(2, "Closing incoming connection to %s.", 1156204076Spjd res->hr_remoteaddr); 1157204076Spjd proto_close(res->hr_remotein); 1158204076Spjd res->hr_remotein = NULL; 1159211881Spjd pjdlog_debug(2, "Closing outgoing connection to %s.", 1160204076Spjd res->hr_remoteaddr); 1161204076Spjd proto_close(res->hr_remoteout); 1162204076Spjd res->hr_remoteout = NULL; 1163204076Spjd 1164204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1165204076Spjd 1166211881Spjd pjdlog_warning("Disconnected from %s.", res->hr_remoteaddr); 1167211881Spjd 1168204076Spjd /* 1169204076Spjd * Stop synchronization if in-progress. 1170204076Spjd */ 1171211878Spjd sync_stop(); 1172211984Spjd 1173212038Spjd event_send(res, EVENT_DISCONNECT); 1174204076Spjd} 1175204076Spjd 1176204076Spjd/* 1177229509Strociny * Acknowledge write completion to the kernel, but don't update activemap yet. 1178229509Strociny */ 1179229509Strocinystatic void 1180229509Strocinywrite_complete(struct hast_resource *res, struct hio *hio) 1181229509Strociny{ 1182229509Strociny struct g_gate_ctl_io *ggio; 1183229509Strociny unsigned int ncomp; 1184229509Strociny 1185229509Strociny PJDLOG_ASSERT(!hio->hio_done); 1186229509Strociny 1187229509Strociny ggio = &hio->hio_ggio; 1188229509Strociny PJDLOG_ASSERT(ggio->gctl_cmd == BIO_WRITE); 1189229509Strociny 1190229509Strociny /* 1191229509Strociny * Bump local count if this is first write after 1192229509Strociny * connection failure with remote node. 1193229509Strociny */ 1194229509Strociny ncomp = 1; 1195229509Strociny rw_rlock(&hio_remote_lock[ncomp]); 1196229509Strociny if (!ISCONNECTED(res, ncomp)) { 1197229509Strociny mtx_lock(&metadata_lock); 1198229509Strociny if (res->hr_primary_localcnt == res->hr_secondary_remotecnt) { 1199229509Strociny res->hr_primary_localcnt++; 1200229509Strociny pjdlog_debug(1, "Increasing localcnt to %ju.", 1201229509Strociny (uintmax_t)res->hr_primary_localcnt); 1202229509Strociny (void)metadata_write(res); 1203229509Strociny } 1204229509Strociny mtx_unlock(&metadata_lock); 1205229509Strociny } 1206229509Strociny rw_unlock(&hio_remote_lock[ncomp]); 1207231017Strociny if (ioctl(res->hr_ggatefd, G_GATE_CMD_DONE, ggio) == -1) 1208229509Strociny primary_exit(EX_OSERR, "G_GATE_CMD_DONE failed"); 1209229509Strociny hio->hio_done = true; 1210229509Strociny} 1211229509Strociny 1212229509Strociny/* 1213204076Spjd * Thread receives ggate I/O requests from the kernel and passes them to 1214204076Spjd * appropriate threads: 1215204076Spjd * WRITE - always goes to both local_send and remote_send threads 1216204076Spjd * READ (when the block is up-to-date on local component) - 1217204076Spjd * only local_send thread 1218204076Spjd * READ (when the block isn't up-to-date on local component) - 1219204076Spjd * only remote_send thread 1220204076Spjd * DELETE - always goes to both local_send and remote_send threads 1221204076Spjd * FLUSH - always goes to both local_send and remote_send threads 1222204076Spjd */ 1223204076Spjdstatic void * 1224204076Spjdggate_recv_thread(void *arg) 1225204076Spjd{ 1226204076Spjd struct hast_resource *res = arg; 1227204076Spjd struct g_gate_ctl_io *ggio; 1228204076Spjd struct hio *hio; 1229204076Spjd unsigned int ii, ncomp, ncomps; 1230204076Spjd int error; 1231204076Spjd 1232204076Spjd for (;;) { 1233204076Spjd pjdlog_debug(2, "ggate_recv: Taking free request."); 1234204076Spjd QUEUE_TAKE2(hio, free); 1235204076Spjd pjdlog_debug(2, "ggate_recv: (%p) Got free request.", hio); 1236204076Spjd ggio = &hio->hio_ggio; 1237204076Spjd ggio->gctl_unit = res->hr_ggateunit; 1238204076Spjd ggio->gctl_length = MAXPHYS; 1239204076Spjd ggio->gctl_error = 0; 1240229509Strociny hio->hio_done = false; 1241229509Strociny hio->hio_replication = res->hr_replication; 1242204076Spjd pjdlog_debug(2, 1243204076Spjd "ggate_recv: (%p) Waiting for request from the kernel.", 1244204076Spjd hio); 1245231017Strociny if (ioctl(res->hr_ggatefd, G_GATE_CMD_START, ggio) == -1) { 1246204076Spjd if (sigexit_received) 1247204076Spjd pthread_exit(NULL); 1248204076Spjd primary_exit(EX_OSERR, "G_GATE_CMD_START failed"); 1249204076Spjd } 1250204076Spjd error = ggio->gctl_error; 1251204076Spjd switch (error) { 1252204076Spjd case 0: 1253204076Spjd break; 1254204076Spjd case ECANCELED: 1255204076Spjd /* Exit gracefully. */ 1256204076Spjd if (!sigexit_received) { 1257204076Spjd pjdlog_debug(2, 1258204076Spjd "ggate_recv: (%p) Received cancel from the kernel.", 1259204076Spjd hio); 1260204076Spjd pjdlog_info("Received cancel from the kernel, exiting."); 1261204076Spjd } 1262204076Spjd pthread_exit(NULL); 1263204076Spjd case ENOMEM: 1264204076Spjd /* 1265204076Spjd * Buffer too small? Impossible, we allocate MAXPHYS 1266204076Spjd * bytes - request can't be bigger than that. 1267204076Spjd */ 1268204076Spjd /* FALLTHROUGH */ 1269204076Spjd case ENXIO: 1270204076Spjd default: 1271204076Spjd primary_exitx(EX_OSERR, "G_GATE_CMD_START failed: %s.", 1272204076Spjd strerror(error)); 1273204076Spjd } 1274229509Strociny 1275229509Strociny ncomp = 0; 1276229509Strociny ncomps = HAST_NCOMPONENTS; 1277229509Strociny 1278204076Spjd for (ii = 0; ii < ncomps; ii++) 1279204076Spjd hio->hio_errors[ii] = EINVAL; 1280204076Spjd reqlog(LOG_DEBUG, 2, ggio, 1281204076Spjd "ggate_recv: (%p) Request received from the kernel: ", 1282204076Spjd hio); 1283229509Strociny 1284204076Spjd /* 1285204076Spjd * Inform all components about new write request. 1286204076Spjd * For read request prefer local component unless the given 1287204076Spjd * range is out-of-date, then use remote component. 1288204076Spjd */ 1289204076Spjd switch (ggio->gctl_cmd) { 1290204076Spjd case BIO_READ: 1291222228Spjd res->hr_stat_read++; 1292229509Strociny ncomps = 1; 1293204076Spjd mtx_lock(&metadata_lock); 1294204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_UNDEF || 1295204076Spjd res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1296204076Spjd /* 1297204076Spjd * This range is up-to-date on local component, 1298204076Spjd * so handle request locally. 1299204076Spjd */ 1300204076Spjd /* Local component is 0 for now. */ 1301204076Spjd ncomp = 0; 1302204076Spjd } else /* if (res->hr_syncsrc == 1303204076Spjd HAST_SYNCSRC_SECONDARY) */ { 1304218138Spjd PJDLOG_ASSERT(res->hr_syncsrc == 1305204076Spjd HAST_SYNCSRC_SECONDARY); 1306204076Spjd /* 1307204076Spjd * This range is out-of-date on local component, 1308204076Spjd * so send request to the remote node. 1309204076Spjd */ 1310204076Spjd /* Remote component is 1 for now. */ 1311204076Spjd ncomp = 1; 1312204076Spjd } 1313204076Spjd mtx_unlock(&metadata_lock); 1314204076Spjd break; 1315204076Spjd case BIO_WRITE: 1316222228Spjd res->hr_stat_write++; 1317229509Strociny if (res->hr_resuid == 0 && 1318229509Strociny res->hr_primary_localcnt == 0) { 1319229509Strociny /* This is first write. */ 1320219844Spjd res->hr_primary_localcnt = 1; 1321214284Spjd } 1322204076Spjd for (;;) { 1323204076Spjd mtx_lock(&range_lock); 1324204076Spjd if (rangelock_islocked(range_sync, 1325204076Spjd ggio->gctl_offset, ggio->gctl_length)) { 1326204076Spjd pjdlog_debug(2, 1327204076Spjd "regular: Range offset=%jd length=%zu locked.", 1328204076Spjd (intmax_t)ggio->gctl_offset, 1329204076Spjd (size_t)ggio->gctl_length); 1330204076Spjd range_regular_wait = true; 1331204076Spjd cv_wait(&range_regular_cond, &range_lock); 1332204076Spjd range_regular_wait = false; 1333204076Spjd mtx_unlock(&range_lock); 1334204076Spjd continue; 1335204076Spjd } 1336204076Spjd if (rangelock_add(range_regular, 1337231017Strociny ggio->gctl_offset, ggio->gctl_length) == -1) { 1338204076Spjd mtx_unlock(&range_lock); 1339204076Spjd pjdlog_debug(2, 1340204076Spjd "regular: Range offset=%jd length=%zu is already locked, waiting.", 1341204076Spjd (intmax_t)ggio->gctl_offset, 1342204076Spjd (size_t)ggio->gctl_length); 1343204076Spjd sleep(1); 1344204076Spjd continue; 1345204076Spjd } 1346204076Spjd mtx_unlock(&range_lock); 1347204076Spjd break; 1348204076Spjd } 1349204076Spjd mtx_lock(&res->hr_amp_lock); 1350204076Spjd if (activemap_write_start(res->hr_amp, 1351204076Spjd ggio->gctl_offset, ggio->gctl_length)) { 1352222228Spjd res->hr_stat_activemap_update++; 1353204076Spjd (void)hast_activemap_flush(res); 1354256027Strociny } else { 1355256027Strociny mtx_unlock(&res->hr_amp_lock); 1356204076Spjd } 1357260007Strociny if (ISMEMSYNC(hio)) { 1358260007Strociny hio->hio_memsyncacked = false; 1359260007Strociny hio->hio_writecount = ncomps; 1360260007Strociny } 1361229509Strociny break; 1362204076Spjd case BIO_DELETE: 1363229509Strociny res->hr_stat_delete++; 1364229509Strociny break; 1365204076Spjd case BIO_FLUSH: 1366229509Strociny res->hr_stat_flush++; 1367204076Spjd break; 1368204076Spjd } 1369229509Strociny pjdlog_debug(2, 1370229509Strociny "ggate_recv: (%p) Moving request to the send queues.", hio); 1371249236Strociny hio->hio_countdown = ncomps; 1372249236Strociny for (ii = ncomp; ii < ncomps; ii++) 1373229509Strociny QUEUE_INSERT1(hio, send, ii); 1374204076Spjd } 1375204076Spjd /* NOTREACHED */ 1376204076Spjd return (NULL); 1377204076Spjd} 1378204076Spjd 1379204076Spjd/* 1380204076Spjd * Thread reads from or writes to local component. 1381204076Spjd * If local read fails, it redirects it to remote_send thread. 1382204076Spjd */ 1383204076Spjdstatic void * 1384204076Spjdlocal_send_thread(void *arg) 1385204076Spjd{ 1386204076Spjd struct hast_resource *res = arg; 1387204076Spjd struct g_gate_ctl_io *ggio; 1388204076Spjd struct hio *hio; 1389204076Spjd unsigned int ncomp, rncomp; 1390204076Spjd ssize_t ret; 1391204076Spjd 1392204076Spjd /* Local component is 0 for now. */ 1393204076Spjd ncomp = 0; 1394204076Spjd /* Remote component is 1 for now. */ 1395204076Spjd rncomp = 1; 1396204076Spjd 1397204076Spjd for (;;) { 1398204076Spjd pjdlog_debug(2, "local_send: Taking request."); 1399214692Spjd QUEUE_TAKE1(hio, send, ncomp, 0); 1400204076Spjd pjdlog_debug(2, "local_send: (%p) Got request.", hio); 1401204076Spjd ggio = &hio->hio_ggio; 1402204076Spjd switch (ggio->gctl_cmd) { 1403204076Spjd case BIO_READ: 1404204076Spjd ret = pread(res->hr_localfd, ggio->gctl_data, 1405204076Spjd ggio->gctl_length, 1406204076Spjd ggio->gctl_offset + res->hr_localoff); 1407204076Spjd if (ret == ggio->gctl_length) 1408204076Spjd hio->hio_errors[ncomp] = 0; 1409222467Strociny else if (!ISSYNCREQ(hio)) { 1410204076Spjd /* 1411204076Spjd * If READ failed, try to read from remote node. 1412204076Spjd */ 1413231017Strociny if (ret == -1) { 1414216479Spjd reqlog(LOG_WARNING, 0, ggio, 1415216479Spjd "Local request failed (%s), trying remote node. ", 1416216479Spjd strerror(errno)); 1417216479Spjd } else if (ret != ggio->gctl_length) { 1418216479Spjd reqlog(LOG_WARNING, 0, ggio, 1419216479Spjd "Local request failed (%zd != %jd), trying remote node. ", 1420216494Spjd ret, (intmax_t)ggio->gctl_length); 1421216479Spjd } 1422204076Spjd QUEUE_INSERT1(hio, send, rncomp); 1423204076Spjd continue; 1424204076Spjd } 1425204076Spjd break; 1426204076Spjd case BIO_WRITE: 1427204076Spjd ret = pwrite(res->hr_localfd, ggio->gctl_data, 1428204076Spjd ggio->gctl_length, 1429204076Spjd ggio->gctl_offset + res->hr_localoff); 1430231017Strociny if (ret == -1) { 1431204076Spjd hio->hio_errors[ncomp] = errno; 1432216479Spjd reqlog(LOG_WARNING, 0, ggio, 1433216479Spjd "Local request failed (%s): ", 1434216479Spjd strerror(errno)); 1435216479Spjd } else if (ret != ggio->gctl_length) { 1436204076Spjd hio->hio_errors[ncomp] = EIO; 1437216479Spjd reqlog(LOG_WARNING, 0, ggio, 1438216479Spjd "Local request failed (%zd != %jd): ", 1439216494Spjd ret, (intmax_t)ggio->gctl_length); 1440216479Spjd } else { 1441204076Spjd hio->hio_errors[ncomp] = 0; 1442260007Strociny if (ISASYNC(hio)) { 1443229509Strociny ggio->gctl_error = 0; 1444229509Strociny write_complete(res, hio); 1445229509Strociny } 1446216479Spjd } 1447204076Spjd break; 1448204076Spjd case BIO_DELETE: 1449204076Spjd ret = g_delete(res->hr_localfd, 1450204076Spjd ggio->gctl_offset + res->hr_localoff, 1451204076Spjd ggio->gctl_length); 1452231017Strociny if (ret == -1) { 1453204076Spjd hio->hio_errors[ncomp] = errno; 1454216479Spjd reqlog(LOG_WARNING, 0, ggio, 1455216479Spjd "Local request failed (%s): ", 1456216479Spjd strerror(errno)); 1457216479Spjd } else { 1458204076Spjd hio->hio_errors[ncomp] = 0; 1459216479Spjd } 1460204076Spjd break; 1461204076Spjd case BIO_FLUSH: 1462229509Strociny if (!res->hr_localflush) { 1463229509Strociny ret = -1; 1464229509Strociny errno = EOPNOTSUPP; 1465229509Strociny break; 1466229509Strociny } 1467204076Spjd ret = g_flush(res->hr_localfd); 1468231017Strociny if (ret == -1) { 1469229509Strociny if (errno == EOPNOTSUPP) 1470229509Strociny res->hr_localflush = false; 1471204076Spjd hio->hio_errors[ncomp] = errno; 1472216479Spjd reqlog(LOG_WARNING, 0, ggio, 1473216479Spjd "Local request failed (%s): ", 1474216479Spjd strerror(errno)); 1475216479Spjd } else { 1476204076Spjd hio->hio_errors[ncomp] = 0; 1477216479Spjd } 1478204076Spjd break; 1479204076Spjd } 1480260007Strociny if (ISMEMSYNCWRITE(hio)) { 1481260007Strociny if (refcnt_release(&hio->hio_writecount) == 0) { 1482260007Strociny write_complete(res, hio); 1483249236Strociny } 1484249236Strociny } 1485260007Strociny if (refcnt_release(&hio->hio_countdown) > 0) 1486260007Strociny continue; 1487229509Strociny if (ISSYNCREQ(hio)) { 1488229509Strociny mtx_lock(&sync_lock); 1489229509Strociny SYNCREQDONE(hio); 1490229509Strociny mtx_unlock(&sync_lock); 1491229509Strociny cv_signal(&sync_cond); 1492229509Strociny } else { 1493229509Strociny pjdlog_debug(2, 1494229509Strociny "local_send: (%p) Moving request to the done queue.", 1495229509Strociny hio); 1496229509Strociny QUEUE_INSERT2(hio, done); 1497204076Spjd } 1498204076Spjd } 1499204076Spjd /* NOTREACHED */ 1500204076Spjd return (NULL); 1501204076Spjd} 1502204076Spjd 1503214692Spjdstatic void 1504214692Spjdkeepalive_send(struct hast_resource *res, unsigned int ncomp) 1505214692Spjd{ 1506214692Spjd struct nv *nv; 1507214692Spjd 1508218217Spjd rw_rlock(&hio_remote_lock[ncomp]); 1509218217Spjd 1510218217Spjd if (!ISCONNECTED(res, ncomp)) { 1511218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1512214692Spjd return; 1513218217Spjd } 1514219864Spjd 1515218138Spjd PJDLOG_ASSERT(res->hr_remotein != NULL); 1516218138Spjd PJDLOG_ASSERT(res->hr_remoteout != NULL); 1517214692Spjd 1518214692Spjd nv = nv_alloc(); 1519214692Spjd nv_add_uint8(nv, HIO_KEEPALIVE, "cmd"); 1520214692Spjd if (nv_error(nv) != 0) { 1521218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1522214692Spjd nv_free(nv); 1523214692Spjd pjdlog_debug(1, 1524214692Spjd "keepalive_send: Unable to prepare header to send."); 1525214692Spjd return; 1526214692Spjd } 1527231017Strociny if (hast_proto_send(res, res->hr_remoteout, nv, NULL, 0) == -1) { 1528218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1529214692Spjd pjdlog_common(LOG_DEBUG, 1, errno, 1530214692Spjd "keepalive_send: Unable to send request"); 1531214692Spjd nv_free(nv); 1532214692Spjd remote_close(res, ncomp); 1533214692Spjd return; 1534214692Spjd } 1535218217Spjd 1536218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1537214692Spjd nv_free(nv); 1538214692Spjd pjdlog_debug(2, "keepalive_send: Request sent."); 1539214692Spjd} 1540214692Spjd 1541204076Spjd/* 1542204076Spjd * Thread sends request to secondary node. 1543204076Spjd */ 1544204076Spjdstatic void * 1545204076Spjdremote_send_thread(void *arg) 1546204076Spjd{ 1547204076Spjd struct hast_resource *res = arg; 1548204076Spjd struct g_gate_ctl_io *ggio; 1549214692Spjd time_t lastcheck, now; 1550204076Spjd struct hio *hio; 1551204076Spjd struct nv *nv; 1552204076Spjd unsigned int ncomp; 1553204076Spjd bool wakeup; 1554204076Spjd uint64_t offset, length; 1555204076Spjd uint8_t cmd; 1556204076Spjd void *data; 1557204076Spjd 1558204076Spjd /* Remote component is 1 for now. */ 1559204076Spjd ncomp = 1; 1560219864Spjd lastcheck = time(NULL); 1561204076Spjd 1562204076Spjd for (;;) { 1563204076Spjd pjdlog_debug(2, "remote_send: Taking request."); 1564219721Strociny QUEUE_TAKE1(hio, send, ncomp, HAST_KEEPALIVE); 1565214692Spjd if (hio == NULL) { 1566214692Spjd now = time(NULL); 1567219721Strociny if (lastcheck + HAST_KEEPALIVE <= now) { 1568214692Spjd keepalive_send(res, ncomp); 1569214692Spjd lastcheck = now; 1570214692Spjd } 1571214692Spjd continue; 1572214692Spjd } 1573204076Spjd pjdlog_debug(2, "remote_send: (%p) Got request.", hio); 1574204076Spjd ggio = &hio->hio_ggio; 1575204076Spjd switch (ggio->gctl_cmd) { 1576204076Spjd case BIO_READ: 1577204076Spjd cmd = HIO_READ; 1578204076Spjd data = NULL; 1579204076Spjd offset = ggio->gctl_offset; 1580204076Spjd length = ggio->gctl_length; 1581204076Spjd break; 1582204076Spjd case BIO_WRITE: 1583204076Spjd cmd = HIO_WRITE; 1584204076Spjd data = ggio->gctl_data; 1585204076Spjd offset = ggio->gctl_offset; 1586204076Spjd length = ggio->gctl_length; 1587204076Spjd break; 1588204076Spjd case BIO_DELETE: 1589204076Spjd cmd = HIO_DELETE; 1590204076Spjd data = NULL; 1591204076Spjd offset = ggio->gctl_offset; 1592204076Spjd length = ggio->gctl_length; 1593204076Spjd break; 1594204076Spjd case BIO_FLUSH: 1595204076Spjd cmd = HIO_FLUSH; 1596204076Spjd data = NULL; 1597204076Spjd offset = 0; 1598204076Spjd length = 0; 1599204076Spjd break; 1600204076Spjd default: 1601229509Strociny PJDLOG_ABORT("invalid condition"); 1602204076Spjd } 1603204076Spjd nv = nv_alloc(); 1604204076Spjd nv_add_uint8(nv, cmd, "cmd"); 1605204076Spjd nv_add_uint64(nv, (uint64_t)ggio->gctl_seq, "seq"); 1606204076Spjd nv_add_uint64(nv, offset, "offset"); 1607204076Spjd nv_add_uint64(nv, length, "length"); 1608260007Strociny if (ISMEMSYNCWRITE(hio)) 1609249236Strociny nv_add_uint8(nv, 1, "memsync"); 1610204076Spjd if (nv_error(nv) != 0) { 1611204076Spjd hio->hio_errors[ncomp] = nv_error(nv); 1612204076Spjd pjdlog_debug(2, 1613204076Spjd "remote_send: (%p) Unable to prepare header to send.", 1614204076Spjd hio); 1615204076Spjd reqlog(LOG_ERR, 0, ggio, 1616204076Spjd "Unable to prepare header to send (%s): ", 1617204076Spjd strerror(nv_error(nv))); 1618204076Spjd /* Move failed request immediately to the done queue. */ 1619204076Spjd goto done_queue; 1620204076Spjd } 1621204076Spjd /* 1622204076Spjd * Protect connection from disappearing. 1623204076Spjd */ 1624204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1625204076Spjd if (!ISCONNECTED(res, ncomp)) { 1626204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1627204076Spjd hio->hio_errors[ncomp] = ENOTCONN; 1628204076Spjd goto done_queue; 1629204076Spjd } 1630204076Spjd /* 1631204076Spjd * Move the request to recv queue before sending it, because 1632204076Spjd * in different order we can get reply before we move request 1633204076Spjd * to recv queue. 1634204076Spjd */ 1635229509Strociny pjdlog_debug(2, 1636229509Strociny "remote_send: (%p) Moving request to the recv queue.", 1637229509Strociny hio); 1638204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1639204076Spjd wakeup = TAILQ_EMPTY(&hio_recv_list[ncomp]); 1640204076Spjd TAILQ_INSERT_TAIL(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1641260007Strociny hio_recv_list_size[ncomp]++; 1642204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1643204076Spjd if (hast_proto_send(res, res->hr_remoteout, nv, data, 1644231017Strociny data != NULL ? length : 0) == -1) { 1645204076Spjd hio->hio_errors[ncomp] = errno; 1646204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1647204076Spjd pjdlog_debug(2, 1648204076Spjd "remote_send: (%p) Unable to send request.", hio); 1649204076Spjd reqlog(LOG_ERR, 0, ggio, 1650204076Spjd "Unable to send request (%s): ", 1651204076Spjd strerror(hio->hio_errors[ncomp])); 1652211979Spjd remote_close(res, ncomp); 1653260007Strociny } else { 1654260007Strociny rw_unlock(&hio_remote_lock[ncomp]); 1655204076Spjd } 1656204076Spjd nv_free(nv); 1657204076Spjd if (wakeup) 1658204076Spjd cv_signal(&hio_recv_list_cond[ncomp]); 1659204076Spjd continue; 1660204076Spjddone_queue: 1661204076Spjd nv_free(nv); 1662204076Spjd if (ISSYNCREQ(hio)) { 1663249236Strociny if (refcnt_release(&hio->hio_countdown) > 0) 1664204076Spjd continue; 1665204076Spjd mtx_lock(&sync_lock); 1666204076Spjd SYNCREQDONE(hio); 1667204076Spjd mtx_unlock(&sync_lock); 1668204076Spjd cv_signal(&sync_cond); 1669204076Spjd continue; 1670204076Spjd } 1671204076Spjd if (ggio->gctl_cmd == BIO_WRITE) { 1672204076Spjd mtx_lock(&res->hr_amp_lock); 1673204076Spjd if (activemap_need_sync(res->hr_amp, ggio->gctl_offset, 1674204076Spjd ggio->gctl_length)) { 1675204076Spjd (void)hast_activemap_flush(res); 1676256027Strociny } else { 1677256027Strociny mtx_unlock(&res->hr_amp_lock); 1678204076Spjd } 1679260007Strociny if (ISMEMSYNCWRITE(hio)) { 1680260007Strociny if (refcnt_release(&hio->hio_writecount) == 0) { 1681260007Strociny if (hio->hio_errors[0] == 0) 1682260007Strociny write_complete(res, hio); 1683260007Strociny } 1684260007Strociny } 1685204076Spjd } 1686249236Strociny if (refcnt_release(&hio->hio_countdown) > 0) 1687204076Spjd continue; 1688204076Spjd pjdlog_debug(2, 1689204076Spjd "remote_send: (%p) Moving request to the done queue.", 1690204076Spjd hio); 1691204076Spjd QUEUE_INSERT2(hio, done); 1692204076Spjd } 1693204076Spjd /* NOTREACHED */ 1694204076Spjd return (NULL); 1695204076Spjd} 1696204076Spjd 1697204076Spjd/* 1698204076Spjd * Thread receives answer from secondary node and passes it to ggate_send 1699204076Spjd * thread. 1700204076Spjd */ 1701204076Spjdstatic void * 1702204076Spjdremote_recv_thread(void *arg) 1703204076Spjd{ 1704204076Spjd struct hast_resource *res = arg; 1705204076Spjd struct g_gate_ctl_io *ggio; 1706204076Spjd struct hio *hio; 1707204076Spjd struct nv *nv; 1708204076Spjd unsigned int ncomp; 1709204076Spjd uint64_t seq; 1710249236Strociny bool memsyncack; 1711204076Spjd int error; 1712204076Spjd 1713204076Spjd /* Remote component is 1 for now. */ 1714204076Spjd ncomp = 1; 1715204076Spjd 1716204076Spjd for (;;) { 1717204076Spjd /* Wait until there is anything to receive. */ 1718204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1719204076Spjd while (TAILQ_EMPTY(&hio_recv_list[ncomp])) { 1720204076Spjd pjdlog_debug(2, "remote_recv: No requests, waiting."); 1721204076Spjd cv_wait(&hio_recv_list_cond[ncomp], 1722204076Spjd &hio_recv_list_lock[ncomp]); 1723204076Spjd } 1724204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1725229509Strociny 1726249236Strociny memsyncack = false; 1727249236Strociny 1728204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1729204076Spjd if (!ISCONNECTED(res, ncomp)) { 1730204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1731204076Spjd /* 1732204076Spjd * Connection is dead, so move all pending requests to 1733204076Spjd * the done queue (one-by-one). 1734204076Spjd */ 1735204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1736204076Spjd hio = TAILQ_FIRST(&hio_recv_list[ncomp]); 1737218138Spjd PJDLOG_ASSERT(hio != NULL); 1738204076Spjd TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1739204076Spjd hio_next[ncomp]); 1740260007Strociny hio_recv_list_size[ncomp]--; 1741204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1742260007Strociny hio->hio_errors[ncomp] = ENOTCONN; 1743204076Spjd goto done_queue; 1744204076Spjd } 1745231017Strociny if (hast_proto_recv_hdr(res->hr_remotein, &nv) == -1) { 1746204076Spjd pjdlog_errno(LOG_ERR, 1747204076Spjd "Unable to receive reply header"); 1748204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1749204076Spjd remote_close(res, ncomp); 1750204076Spjd continue; 1751204076Spjd } 1752204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1753204076Spjd seq = nv_get_uint64(nv, "seq"); 1754204076Spjd if (seq == 0) { 1755204076Spjd pjdlog_error("Header contains no 'seq' field."); 1756204076Spjd nv_free(nv); 1757204076Spjd continue; 1758204076Spjd } 1759249236Strociny memsyncack = nv_exists(nv, "received"); 1760204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1761204076Spjd TAILQ_FOREACH(hio, &hio_recv_list[ncomp], hio_next[ncomp]) { 1762204076Spjd if (hio->hio_ggio.gctl_seq == seq) { 1763204076Spjd TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1764204076Spjd hio_next[ncomp]); 1765260007Strociny hio_recv_list_size[ncomp]--; 1766204076Spjd break; 1767204076Spjd } 1768204076Spjd } 1769204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1770204076Spjd if (hio == NULL) { 1771204076Spjd pjdlog_error("Found no request matching received 'seq' field (%ju).", 1772204076Spjd (uintmax_t)seq); 1773204076Spjd nv_free(nv); 1774204076Spjd continue; 1775204076Spjd } 1776229509Strociny ggio = &hio->hio_ggio; 1777204076Spjd error = nv_get_int16(nv, "error"); 1778204076Spjd if (error != 0) { 1779204076Spjd /* Request failed on remote side. */ 1780216478Spjd hio->hio_errors[ncomp] = error; 1781229509Strociny reqlog(LOG_WARNING, 0, ggio, 1782216479Spjd "Remote request failed (%s): ", strerror(error)); 1783204076Spjd nv_free(nv); 1784204076Spjd goto done_queue; 1785204076Spjd } 1786204076Spjd switch (ggio->gctl_cmd) { 1787204076Spjd case BIO_READ: 1788204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1789204076Spjd if (!ISCONNECTED(res, ncomp)) { 1790204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1791204076Spjd nv_free(nv); 1792204076Spjd goto done_queue; 1793204076Spjd } 1794204076Spjd if (hast_proto_recv_data(res, res->hr_remotein, nv, 1795231017Strociny ggio->gctl_data, ggio->gctl_length) == -1) { 1796204076Spjd hio->hio_errors[ncomp] = errno; 1797204076Spjd pjdlog_errno(LOG_ERR, 1798204076Spjd "Unable to receive reply data"); 1799204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1800204076Spjd nv_free(nv); 1801204076Spjd remote_close(res, ncomp); 1802204076Spjd goto done_queue; 1803204076Spjd } 1804204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1805204076Spjd break; 1806204076Spjd case BIO_WRITE: 1807204076Spjd case BIO_DELETE: 1808204076Spjd case BIO_FLUSH: 1809204076Spjd break; 1810204076Spjd default: 1811229509Strociny PJDLOG_ABORT("invalid condition"); 1812204076Spjd } 1813204076Spjd hio->hio_errors[ncomp] = 0; 1814204076Spjd nv_free(nv); 1815204076Spjddone_queue: 1816260007Strociny if (ISMEMSYNCWRITE(hio)) { 1817260007Strociny if (!hio->hio_memsyncacked) { 1818260007Strociny PJDLOG_ASSERT(memsyncack || 1819260007Strociny hio->hio_errors[ncomp] != 0); 1820260007Strociny /* Remote ack arrived. */ 1821260007Strociny if (refcnt_release(&hio->hio_writecount) == 0) { 1822249236Strociny if (hio->hio_errors[0] == 0) 1823249236Strociny write_complete(res, hio); 1824260007Strociny } 1825260007Strociny hio->hio_memsyncacked = true; 1826260007Strociny if (hio->hio_errors[ncomp] == 0) { 1827249236Strociny pjdlog_debug(2, 1828260007Strociny "remote_recv: (%p) Moving request " 1829260007Strociny "back to the recv queue.", hio); 1830249236Strociny mtx_lock(&hio_recv_list_lock[ncomp]); 1831249236Strociny TAILQ_INSERT_TAIL(&hio_recv_list[ncomp], 1832249236Strociny hio, hio_next[ncomp]); 1833260007Strociny hio_recv_list_size[ncomp]++; 1834249236Strociny mtx_unlock(&hio_recv_list_lock[ncomp]); 1835260007Strociny continue; 1836249236Strociny } 1837260007Strociny } else { 1838260007Strociny PJDLOG_ASSERT(!memsyncack); 1839260007Strociny /* Remote final reply arrived. */ 1840249236Strociny } 1841249236Strociny } 1842260007Strociny if (refcnt_release(&hio->hio_countdown) > 0) 1843260007Strociny continue; 1844229509Strociny if (ISSYNCREQ(hio)) { 1845229509Strociny mtx_lock(&sync_lock); 1846229509Strociny SYNCREQDONE(hio); 1847229509Strociny mtx_unlock(&sync_lock); 1848229509Strociny cv_signal(&sync_cond); 1849229509Strociny } else { 1850229509Strociny pjdlog_debug(2, 1851229509Strociny "remote_recv: (%p) Moving request to the done queue.", 1852229509Strociny hio); 1853229509Strociny QUEUE_INSERT2(hio, done); 1854204076Spjd } 1855204076Spjd } 1856204076Spjd /* NOTREACHED */ 1857204076Spjd return (NULL); 1858204076Spjd} 1859204076Spjd 1860204076Spjd/* 1861204076Spjd * Thread sends answer to the kernel. 1862204076Spjd */ 1863204076Spjdstatic void * 1864204076Spjdggate_send_thread(void *arg) 1865204076Spjd{ 1866204076Spjd struct hast_resource *res = arg; 1867204076Spjd struct g_gate_ctl_io *ggio; 1868204076Spjd struct hio *hio; 1869229509Strociny unsigned int ii, ncomps; 1870204076Spjd 1871204076Spjd ncomps = HAST_NCOMPONENTS; 1872204076Spjd 1873204076Spjd for (;;) { 1874204076Spjd pjdlog_debug(2, "ggate_send: Taking request."); 1875204076Spjd QUEUE_TAKE2(hio, done); 1876204076Spjd pjdlog_debug(2, "ggate_send: (%p) Got request.", hio); 1877204076Spjd ggio = &hio->hio_ggio; 1878204076Spjd for (ii = 0; ii < ncomps; ii++) { 1879204076Spjd if (hio->hio_errors[ii] == 0) { 1880204076Spjd /* 1881204076Spjd * One successful request is enough to declare 1882204076Spjd * success. 1883204076Spjd */ 1884204076Spjd ggio->gctl_error = 0; 1885204076Spjd break; 1886204076Spjd } 1887204076Spjd } 1888204076Spjd if (ii == ncomps) { 1889204076Spjd /* 1890204076Spjd * None of the requests were successful. 1891219879Strociny * Use the error from local component except the 1892219879Strociny * case when we did only remote request. 1893204076Spjd */ 1894219879Strociny if (ggio->gctl_cmd == BIO_READ && 1895219879Strociny res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) 1896219879Strociny ggio->gctl_error = hio->hio_errors[1]; 1897219879Strociny else 1898219879Strociny ggio->gctl_error = hio->hio_errors[0]; 1899204076Spjd } 1900204076Spjd if (ggio->gctl_error == 0 && ggio->gctl_cmd == BIO_WRITE) { 1901204076Spjd mtx_lock(&res->hr_amp_lock); 1902223655Strociny if (activemap_write_complete(res->hr_amp, 1903223974Strociny ggio->gctl_offset, ggio->gctl_length)) { 1904223655Strociny res->hr_stat_activemap_update++; 1905223655Strociny (void)hast_activemap_flush(res); 1906256027Strociny } else { 1907256027Strociny mtx_unlock(&res->hr_amp_lock); 1908223655Strociny } 1909204076Spjd } 1910204076Spjd if (ggio->gctl_cmd == BIO_WRITE) { 1911204076Spjd /* 1912204076Spjd * Unlock range we locked. 1913204076Spjd */ 1914204076Spjd mtx_lock(&range_lock); 1915204076Spjd rangelock_del(range_regular, ggio->gctl_offset, 1916204076Spjd ggio->gctl_length); 1917204076Spjd if (range_sync_wait) 1918204076Spjd cv_signal(&range_sync_cond); 1919204076Spjd mtx_unlock(&range_lock); 1920229509Strociny if (!hio->hio_done) 1921229509Strociny write_complete(res, hio); 1922229509Strociny } else { 1923231017Strociny if (ioctl(res->hr_ggatefd, G_GATE_CMD_DONE, ggio) == -1) { 1924229509Strociny primary_exit(EX_OSERR, 1925229509Strociny "G_GATE_CMD_DONE failed"); 1926204076Spjd } 1927204076Spjd } 1928247866Strociny if (hio->hio_errors[0]) { 1929247866Strociny switch (ggio->gctl_cmd) { 1930247866Strociny case BIO_READ: 1931247866Strociny res->hr_stat_read_error++; 1932247866Strociny break; 1933247866Strociny case BIO_WRITE: 1934247866Strociny res->hr_stat_write_error++; 1935247866Strociny break; 1936247866Strociny case BIO_DELETE: 1937247866Strociny res->hr_stat_delete_error++; 1938247866Strociny break; 1939247866Strociny case BIO_FLUSH: 1940247866Strociny res->hr_stat_flush_error++; 1941247866Strociny break; 1942247866Strociny } 1943247866Strociny } 1944204076Spjd pjdlog_debug(2, 1945204076Spjd "ggate_send: (%p) Moving request to the free queue.", hio); 1946204076Spjd QUEUE_INSERT2(hio, free); 1947204076Spjd } 1948204076Spjd /* NOTREACHED */ 1949204076Spjd return (NULL); 1950204076Spjd} 1951204076Spjd 1952204076Spjd/* 1953204076Spjd * Thread synchronize local and remote components. 1954204076Spjd */ 1955204076Spjdstatic void * 1956204076Spjdsync_thread(void *arg __unused) 1957204076Spjd{ 1958204076Spjd struct hast_resource *res = arg; 1959204076Spjd struct hio *hio; 1960204076Spjd struct g_gate_ctl_io *ggio; 1961219372Spjd struct timeval tstart, tend, tdiff; 1962204076Spjd unsigned int ii, ncomp, ncomps; 1963204076Spjd off_t offset, length, synced; 1964240269Strociny bool dorewind, directreads; 1965204076Spjd int syncext; 1966204076Spjd 1967204076Spjd ncomps = HAST_NCOMPONENTS; 1968204076Spjd dorewind = true; 1969211897Spjd synced = 0; 1970211897Spjd offset = -1; 1971240269Strociny directreads = false; 1972204076Spjd 1973204076Spjd for (;;) { 1974204076Spjd mtx_lock(&sync_lock); 1975211897Spjd if (offset >= 0 && !sync_inprogress) { 1976219372Spjd gettimeofday(&tend, NULL); 1977219372Spjd timersub(&tend, &tstart, &tdiff); 1978219372Spjd pjdlog_info("Synchronization interrupted after %#.0T. " 1979219372Spjd "%NB synchronized so far.", &tdiff, 1980211879Spjd (intmax_t)synced); 1981212038Spjd event_send(res, EVENT_SYNCINTR); 1982211879Spjd } 1983204076Spjd while (!sync_inprogress) { 1984204076Spjd dorewind = true; 1985204076Spjd synced = 0; 1986204076Spjd cv_wait(&sync_cond, &sync_lock); 1987204076Spjd } 1988204076Spjd mtx_unlock(&sync_lock); 1989204076Spjd /* 1990204076Spjd * Obtain offset at which we should synchronize. 1991204076Spjd * Rewind synchronization if needed. 1992204076Spjd */ 1993204076Spjd mtx_lock(&res->hr_amp_lock); 1994204076Spjd if (dorewind) 1995204076Spjd activemap_sync_rewind(res->hr_amp); 1996204076Spjd offset = activemap_sync_offset(res->hr_amp, &length, &syncext); 1997204076Spjd if (syncext != -1) { 1998204076Spjd /* 1999204076Spjd * We synchronized entire syncext extent, we can mark 2000204076Spjd * it as clean now. 2001204076Spjd */ 2002204076Spjd if (activemap_extent_complete(res->hr_amp, syncext)) 2003204076Spjd (void)hast_activemap_flush(res); 2004256027Strociny else 2005256027Strociny mtx_unlock(&res->hr_amp_lock); 2006256027Strociny } else { 2007256027Strociny mtx_unlock(&res->hr_amp_lock); 2008204076Spjd } 2009204076Spjd if (dorewind) { 2010204076Spjd dorewind = false; 2011231017Strociny if (offset == -1) 2012204076Spjd pjdlog_info("Nodes are in sync."); 2013204076Spjd else { 2014219372Spjd pjdlog_info("Synchronization started. %NB to go.", 2015219372Spjd (intmax_t)(res->hr_extentsize * 2016204076Spjd activemap_ndirty(res->hr_amp))); 2017212038Spjd event_send(res, EVENT_SYNCSTART); 2018219372Spjd gettimeofday(&tstart, NULL); 2019204076Spjd } 2020204076Spjd } 2021231017Strociny if (offset == -1) { 2022211878Spjd sync_stop(); 2023204076Spjd pjdlog_debug(1, "Nothing to synchronize."); 2024204076Spjd /* 2025204076Spjd * Synchronization complete, make both localcnt and 2026204076Spjd * remotecnt equal. 2027204076Spjd */ 2028204076Spjd ncomp = 1; 2029204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 2030204076Spjd if (ISCONNECTED(res, ncomp)) { 2031204076Spjd if (synced > 0) { 2032219372Spjd int64_t bps; 2033219372Spjd 2034219372Spjd gettimeofday(&tend, NULL); 2035219372Spjd timersub(&tend, &tstart, &tdiff); 2036219372Spjd bps = (int64_t)((double)synced / 2037219372Spjd ((double)tdiff.tv_sec + 2038219372Spjd (double)tdiff.tv_usec / 1000000)); 2039204076Spjd pjdlog_info("Synchronization complete. " 2040219372Spjd "%NB synchronized in %#.0lT (%NB/sec).", 2041219372Spjd (intmax_t)synced, &tdiff, 2042219372Spjd (intmax_t)bps); 2043212038Spjd event_send(res, EVENT_SYNCDONE); 2044204076Spjd } 2045204076Spjd mtx_lock(&metadata_lock); 2046240269Strociny if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) 2047240269Strociny directreads = true; 2048204076Spjd res->hr_syncsrc = HAST_SYNCSRC_UNDEF; 2049204076Spjd res->hr_primary_localcnt = 2050219882Strociny res->hr_secondary_remotecnt; 2051219882Strociny res->hr_primary_remotecnt = 2052204076Spjd res->hr_secondary_localcnt; 2053204076Spjd pjdlog_debug(1, 2054204076Spjd "Setting localcnt to %ju and remotecnt to %ju.", 2055204076Spjd (uintmax_t)res->hr_primary_localcnt, 2056219882Strociny (uintmax_t)res->hr_primary_remotecnt); 2057204076Spjd (void)metadata_write(res); 2058204076Spjd mtx_unlock(&metadata_lock); 2059204076Spjd } 2060204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 2061240269Strociny if (directreads) { 2062240269Strociny directreads = false; 2063240269Strociny enable_direct_reads(res); 2064240269Strociny } 2065204076Spjd continue; 2066204076Spjd } 2067204076Spjd pjdlog_debug(2, "sync: Taking free request."); 2068204076Spjd QUEUE_TAKE2(hio, free); 2069204076Spjd pjdlog_debug(2, "sync: (%p) Got free request.", hio); 2070204076Spjd /* 2071204076Spjd * Lock the range we are going to synchronize. We don't want 2072204076Spjd * race where someone writes between our read and write. 2073204076Spjd */ 2074204076Spjd for (;;) { 2075204076Spjd mtx_lock(&range_lock); 2076204076Spjd if (rangelock_islocked(range_regular, offset, length)) { 2077204076Spjd pjdlog_debug(2, 2078204076Spjd "sync: Range offset=%jd length=%jd locked.", 2079204076Spjd (intmax_t)offset, (intmax_t)length); 2080204076Spjd range_sync_wait = true; 2081204076Spjd cv_wait(&range_sync_cond, &range_lock); 2082204076Spjd range_sync_wait = false; 2083204076Spjd mtx_unlock(&range_lock); 2084204076Spjd continue; 2085204076Spjd } 2086231017Strociny if (rangelock_add(range_sync, offset, length) == -1) { 2087204076Spjd mtx_unlock(&range_lock); 2088204076Spjd pjdlog_debug(2, 2089204076Spjd "sync: Range offset=%jd length=%jd is already locked, waiting.", 2090204076Spjd (intmax_t)offset, (intmax_t)length); 2091204076Spjd sleep(1); 2092204076Spjd continue; 2093204076Spjd } 2094204076Spjd mtx_unlock(&range_lock); 2095204076Spjd break; 2096204076Spjd } 2097204076Spjd /* 2098204076Spjd * First read the data from synchronization source. 2099204076Spjd */ 2100204076Spjd SYNCREQ(hio); 2101204076Spjd ggio = &hio->hio_ggio; 2102204076Spjd ggio->gctl_cmd = BIO_READ; 2103204076Spjd ggio->gctl_offset = offset; 2104204076Spjd ggio->gctl_length = length; 2105204076Spjd ggio->gctl_error = 0; 2106229509Strociny hio->hio_done = false; 2107229509Strociny hio->hio_replication = res->hr_replication; 2108204076Spjd for (ii = 0; ii < ncomps; ii++) 2109204076Spjd hio->hio_errors[ii] = EINVAL; 2110204076Spjd reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 2111204076Spjd hio); 2112204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 2113204076Spjd hio); 2114204076Spjd mtx_lock(&metadata_lock); 2115204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 2116204076Spjd /* 2117204076Spjd * This range is up-to-date on local component, 2118204076Spjd * so handle request locally. 2119204076Spjd */ 2120204076Spjd /* Local component is 0 for now. */ 2121204076Spjd ncomp = 0; 2122204076Spjd } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 2123218138Spjd PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 2124204076Spjd /* 2125204076Spjd * This range is out-of-date on local component, 2126204076Spjd * so send request to the remote node. 2127204076Spjd */ 2128204076Spjd /* Remote component is 1 for now. */ 2129204076Spjd ncomp = 1; 2130204076Spjd } 2131204076Spjd mtx_unlock(&metadata_lock); 2132249236Strociny hio->hio_countdown = 1; 2133204076Spjd QUEUE_INSERT1(hio, send, ncomp); 2134204076Spjd 2135204076Spjd /* 2136204076Spjd * Let's wait for READ to finish. 2137204076Spjd */ 2138204076Spjd mtx_lock(&sync_lock); 2139204076Spjd while (!ISSYNCREQDONE(hio)) 2140204076Spjd cv_wait(&sync_cond, &sync_lock); 2141204076Spjd mtx_unlock(&sync_lock); 2142204076Spjd 2143204076Spjd if (hio->hio_errors[ncomp] != 0) { 2144204076Spjd pjdlog_error("Unable to read synchronization data: %s.", 2145204076Spjd strerror(hio->hio_errors[ncomp])); 2146204076Spjd goto free_queue; 2147204076Spjd } 2148204076Spjd 2149204076Spjd /* 2150204076Spjd * We read the data from synchronization source, now write it 2151204076Spjd * to synchronization target. 2152204076Spjd */ 2153204076Spjd SYNCREQ(hio); 2154204076Spjd ggio->gctl_cmd = BIO_WRITE; 2155204076Spjd for (ii = 0; ii < ncomps; ii++) 2156204076Spjd hio->hio_errors[ii] = EINVAL; 2157204076Spjd reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 2158204076Spjd hio); 2159204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 2160204076Spjd hio); 2161204076Spjd mtx_lock(&metadata_lock); 2162204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 2163204076Spjd /* 2164204076Spjd * This range is up-to-date on local component, 2165204076Spjd * so we update remote component. 2166204076Spjd */ 2167204076Spjd /* Remote component is 1 for now. */ 2168204076Spjd ncomp = 1; 2169204076Spjd } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 2170218138Spjd PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 2171204076Spjd /* 2172204076Spjd * This range is out-of-date on local component, 2173204076Spjd * so we update it. 2174204076Spjd */ 2175204076Spjd /* Local component is 0 for now. */ 2176204076Spjd ncomp = 0; 2177204076Spjd } 2178204076Spjd mtx_unlock(&metadata_lock); 2179204076Spjd 2180229509Strociny pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 2181204076Spjd hio); 2182249236Strociny hio->hio_countdown = 1; 2183204076Spjd QUEUE_INSERT1(hio, send, ncomp); 2184204076Spjd 2185204076Spjd /* 2186204076Spjd * Let's wait for WRITE to finish. 2187204076Spjd */ 2188204076Spjd mtx_lock(&sync_lock); 2189204076Spjd while (!ISSYNCREQDONE(hio)) 2190204076Spjd cv_wait(&sync_cond, &sync_lock); 2191204076Spjd mtx_unlock(&sync_lock); 2192204076Spjd 2193204076Spjd if (hio->hio_errors[ncomp] != 0) { 2194204076Spjd pjdlog_error("Unable to write synchronization data: %s.", 2195204076Spjd strerror(hio->hio_errors[ncomp])); 2196204076Spjd goto free_queue; 2197204076Spjd } 2198211880Spjd 2199211880Spjd synced += length; 2200204076Spjdfree_queue: 2201204076Spjd mtx_lock(&range_lock); 2202204076Spjd rangelock_del(range_sync, offset, length); 2203204076Spjd if (range_regular_wait) 2204204076Spjd cv_signal(&range_regular_cond); 2205204076Spjd mtx_unlock(&range_lock); 2206204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the free queue.", 2207204076Spjd hio); 2208204076Spjd QUEUE_INSERT2(hio, free); 2209204076Spjd } 2210204076Spjd /* NOTREACHED */ 2211204076Spjd return (NULL); 2212204076Spjd} 2213204076Spjd 2214217784Spjdvoid 2215217784Spjdprimary_config_reload(struct hast_resource *res, struct nv *nv) 2216210886Spjd{ 2217210886Spjd unsigned int ii, ncomps; 2218217784Spjd int modified, vint; 2219217784Spjd const char *vstr; 2220210886Spjd 2221210886Spjd pjdlog_info("Reloading configuration..."); 2222210886Spjd 2223218138Spjd PJDLOG_ASSERT(res->hr_role == HAST_ROLE_PRIMARY); 2224218138Spjd PJDLOG_ASSERT(gres == res); 2225217784Spjd nv_assert(nv, "remoteaddr"); 2226219818Spjd nv_assert(nv, "sourceaddr"); 2227217784Spjd nv_assert(nv, "replication"); 2228219351Spjd nv_assert(nv, "checksum"); 2229219354Spjd nv_assert(nv, "compression"); 2230217784Spjd nv_assert(nv, "timeout"); 2231217784Spjd nv_assert(nv, "exec"); 2232229509Strociny nv_assert(nv, "metaflush"); 2233217784Spjd 2234210886Spjd ncomps = HAST_NCOMPONENTS; 2235210886Spjd 2236219351Spjd#define MODIFIED_REMOTEADDR 0x01 2237219818Spjd#define MODIFIED_SOURCEADDR 0x02 2238219818Spjd#define MODIFIED_REPLICATION 0x04 2239219818Spjd#define MODIFIED_CHECKSUM 0x08 2240219818Spjd#define MODIFIED_COMPRESSION 0x10 2241219818Spjd#define MODIFIED_TIMEOUT 0x20 2242219818Spjd#define MODIFIED_EXEC 0x40 2243229509Strociny#define MODIFIED_METAFLUSH 0x80 2244210886Spjd modified = 0; 2245217784Spjd 2246217784Spjd vstr = nv_get_string(nv, "remoteaddr"); 2247217784Spjd if (strcmp(gres->hr_remoteaddr, vstr) != 0) { 2248210886Spjd /* 2249210886Spjd * Don't copy res->hr_remoteaddr to gres just yet. 2250210886Spjd * We want remote_close() to log disconnect from the old 2251210886Spjd * addresses, not from the new ones. 2252210886Spjd */ 2253210886Spjd modified |= MODIFIED_REMOTEADDR; 2254210886Spjd } 2255219818Spjd vstr = nv_get_string(nv, "sourceaddr"); 2256219818Spjd if (strcmp(gres->hr_sourceaddr, vstr) != 0) { 2257219818Spjd strlcpy(gres->hr_sourceaddr, vstr, sizeof(gres->hr_sourceaddr)); 2258219818Spjd modified |= MODIFIED_SOURCEADDR; 2259219818Spjd } 2260217784Spjd vint = nv_get_int32(nv, "replication"); 2261217784Spjd if (gres->hr_replication != vint) { 2262217784Spjd gres->hr_replication = vint; 2263210886Spjd modified |= MODIFIED_REPLICATION; 2264210886Spjd } 2265219351Spjd vint = nv_get_int32(nv, "checksum"); 2266219351Spjd if (gres->hr_checksum != vint) { 2267219351Spjd gres->hr_checksum = vint; 2268219351Spjd modified |= MODIFIED_CHECKSUM; 2269219351Spjd } 2270219354Spjd vint = nv_get_int32(nv, "compression"); 2271219354Spjd if (gres->hr_compression != vint) { 2272219354Spjd gres->hr_compression = vint; 2273219354Spjd modified |= MODIFIED_COMPRESSION; 2274219354Spjd } 2275217784Spjd vint = nv_get_int32(nv, "timeout"); 2276217784Spjd if (gres->hr_timeout != vint) { 2277217784Spjd gres->hr_timeout = vint; 2278210886Spjd modified |= MODIFIED_TIMEOUT; 2279210886Spjd } 2280217784Spjd vstr = nv_get_string(nv, "exec"); 2281217784Spjd if (strcmp(gres->hr_exec, vstr) != 0) { 2282217784Spjd strlcpy(gres->hr_exec, vstr, sizeof(gres->hr_exec)); 2283211886Spjd modified |= MODIFIED_EXEC; 2284211886Spjd } 2285229509Strociny vint = nv_get_int32(nv, "metaflush"); 2286229509Strociny if (gres->hr_metaflush != vint) { 2287229509Strociny gres->hr_metaflush = vint; 2288229509Strociny modified |= MODIFIED_METAFLUSH; 2289229509Strociny } 2290217784Spjd 2291210886Spjd /* 2292219351Spjd * Change timeout for connected sockets. 2293219351Spjd * Don't bother if we need to reconnect. 2294210886Spjd */ 2295219351Spjd if ((modified & MODIFIED_TIMEOUT) != 0 && 2296229509Strociny (modified & (MODIFIED_REMOTEADDR | MODIFIED_SOURCEADDR)) == 0) { 2297210886Spjd for (ii = 0; ii < ncomps; ii++) { 2298210886Spjd if (!ISREMOTE(ii)) 2299210886Spjd continue; 2300210886Spjd rw_rlock(&hio_remote_lock[ii]); 2301210886Spjd if (!ISCONNECTED(gres, ii)) { 2302210886Spjd rw_unlock(&hio_remote_lock[ii]); 2303210886Spjd continue; 2304210886Spjd } 2305210886Spjd rw_unlock(&hio_remote_lock[ii]); 2306210886Spjd if (proto_timeout(gres->hr_remotein, 2307231017Strociny gres->hr_timeout) == -1) { 2308210886Spjd pjdlog_errno(LOG_WARNING, 2309210886Spjd "Unable to set connection timeout"); 2310210886Spjd } 2311210886Spjd if (proto_timeout(gres->hr_remoteout, 2312231017Strociny gres->hr_timeout) == -1) { 2313210886Spjd pjdlog_errno(LOG_WARNING, 2314210886Spjd "Unable to set connection timeout"); 2315210886Spjd } 2316210886Spjd } 2317219351Spjd } 2318229509Strociny if ((modified & (MODIFIED_REMOTEADDR | MODIFIED_SOURCEADDR)) != 0) { 2319210886Spjd for (ii = 0; ii < ncomps; ii++) { 2320210886Spjd if (!ISREMOTE(ii)) 2321210886Spjd continue; 2322210886Spjd remote_close(gres, ii); 2323210886Spjd } 2324210886Spjd if (modified & MODIFIED_REMOTEADDR) { 2325217784Spjd vstr = nv_get_string(nv, "remoteaddr"); 2326217784Spjd strlcpy(gres->hr_remoteaddr, vstr, 2327210886Spjd sizeof(gres->hr_remoteaddr)); 2328210886Spjd } 2329210886Spjd } 2330210886Spjd#undef MODIFIED_REMOTEADDR 2331219818Spjd#undef MODIFIED_SOURCEADDR 2332210886Spjd#undef MODIFIED_REPLICATION 2333219351Spjd#undef MODIFIED_CHECKSUM 2334219354Spjd#undef MODIFIED_COMPRESSION 2335210886Spjd#undef MODIFIED_TIMEOUT 2336211886Spjd#undef MODIFIED_EXEC 2337229509Strociny#undef MODIFIED_METAFLUSH 2338210886Spjd 2339210886Spjd pjdlog_info("Configuration reloaded successfully."); 2340210886Spjd} 2341210886Spjd 2342211882Spjdstatic void 2343211981Spjdguard_one(struct hast_resource *res, unsigned int ncomp) 2344211981Spjd{ 2345211981Spjd struct proto_conn *in, *out; 2346211981Spjd 2347211981Spjd if (!ISREMOTE(ncomp)) 2348211981Spjd return; 2349211981Spjd 2350211981Spjd rw_rlock(&hio_remote_lock[ncomp]); 2351211981Spjd 2352211981Spjd if (!real_remote(res)) { 2353211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2354211981Spjd return; 2355211981Spjd } 2356211981Spjd 2357211981Spjd if (ISCONNECTED(res, ncomp)) { 2358218138Spjd PJDLOG_ASSERT(res->hr_remotein != NULL); 2359218138Spjd PJDLOG_ASSERT(res->hr_remoteout != NULL); 2360211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2361211981Spjd pjdlog_debug(2, "remote_guard: Connection to %s is ok.", 2362211981Spjd res->hr_remoteaddr); 2363211981Spjd return; 2364211981Spjd } 2365211981Spjd 2366218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 2367218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 2368211981Spjd /* 2369211981Spjd * Upgrade the lock. It doesn't have to be atomic as no other thread 2370211981Spjd * can change connection status from disconnected to connected. 2371211981Spjd */ 2372211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2373211981Spjd pjdlog_debug(2, "remote_guard: Reconnecting to %s.", 2374211981Spjd res->hr_remoteaddr); 2375211981Spjd in = out = NULL; 2376220898Spjd if (init_remote(res, &in, &out) == 0) { 2377211981Spjd rw_wlock(&hio_remote_lock[ncomp]); 2378218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 2379218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 2380218138Spjd PJDLOG_ASSERT(in != NULL && out != NULL); 2381211981Spjd res->hr_remotein = in; 2382211981Spjd res->hr_remoteout = out; 2383211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2384211981Spjd pjdlog_info("Successfully reconnected to %s.", 2385211981Spjd res->hr_remoteaddr); 2386211981Spjd sync_start(); 2387211981Spjd } else { 2388211981Spjd /* Both connections should be NULL. */ 2389218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 2390218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 2391218138Spjd PJDLOG_ASSERT(in == NULL && out == NULL); 2392211981Spjd pjdlog_debug(2, "remote_guard: Reconnect to %s failed.", 2393211981Spjd res->hr_remoteaddr); 2394211981Spjd } 2395211981Spjd} 2396211981Spjd 2397204076Spjd/* 2398204076Spjd * Thread guards remote connections and reconnects when needed, handles 2399204076Spjd * signals, etc. 2400204076Spjd */ 2401204076Spjdstatic void * 2402204076Spjdguard_thread(void *arg) 2403204076Spjd{ 2404204076Spjd struct hast_resource *res = arg; 2405204076Spjd unsigned int ii, ncomps; 2406211982Spjd struct timespec timeout; 2407211981Spjd time_t lastcheck, now; 2408211982Spjd sigset_t mask; 2409211982Spjd int signo; 2410204076Spjd 2411204076Spjd ncomps = HAST_NCOMPONENTS; 2412211981Spjd lastcheck = time(NULL); 2413204076Spjd 2414211982Spjd PJDLOG_VERIFY(sigemptyset(&mask) == 0); 2415211982Spjd PJDLOG_VERIFY(sigaddset(&mask, SIGINT) == 0); 2416211982Spjd PJDLOG_VERIFY(sigaddset(&mask, SIGTERM) == 0); 2417211982Spjd 2418219721Strociny timeout.tv_sec = HAST_KEEPALIVE; 2419211982Spjd timeout.tv_nsec = 0; 2420211982Spjd signo = -1; 2421211982Spjd 2422204076Spjd for (;;) { 2423211982Spjd switch (signo) { 2424211982Spjd case SIGINT: 2425211982Spjd case SIGTERM: 2426211982Spjd sigexit_received = true; 2427204076Spjd primary_exitx(EX_OK, 2428204076Spjd "Termination signal received, exiting."); 2429211982Spjd break; 2430211982Spjd default: 2431211982Spjd break; 2432204076Spjd } 2433211882Spjd 2434220898Spjd /* 2435220898Spjd * Don't check connections until we fully started, 2436220898Spjd * as we may still be looping, waiting for remote node 2437220898Spjd * to switch from primary to secondary. 2438220898Spjd */ 2439220898Spjd if (fullystarted) { 2440220898Spjd pjdlog_debug(2, "remote_guard: Checking connections."); 2441220898Spjd now = time(NULL); 2442220898Spjd if (lastcheck + HAST_KEEPALIVE <= now) { 2443220898Spjd for (ii = 0; ii < ncomps; ii++) 2444220898Spjd guard_one(res, ii); 2445220898Spjd lastcheck = now; 2446220898Spjd } 2447204076Spjd } 2448211982Spjd signo = sigtimedwait(&mask, NULL, &timeout); 2449204076Spjd } 2450204076Spjd /* NOTREACHED */ 2451204076Spjd return (NULL); 2452204076Spjd} 2453