1204076Spjd/*- 2330449Seadler * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3330449Seadler * 4204076Spjd * Copyright (c) 2009 The FreeBSD Foundation 5219351Spjd * Copyright (c) 2010-2011 Pawel Jakub Dawidek <pawel@dawidek.net> 6204076Spjd * All rights reserved. 7204076Spjd * 8204076Spjd * This software was developed by Pawel Jakub Dawidek under sponsorship from 9204076Spjd * the FreeBSD Foundation. 10204076Spjd * 11204076Spjd * Redistribution and use in source and binary forms, with or without 12204076Spjd * modification, are permitted provided that the following conditions 13204076Spjd * are met: 14204076Spjd * 1. Redistributions of source code must retain the above copyright 15204076Spjd * notice, this list of conditions and the following disclaimer. 16204076Spjd * 2. Redistributions in binary form must reproduce the above copyright 17204076Spjd * notice, this list of conditions and the following disclaimer in the 18204076Spjd * documentation and/or other materials provided with the distribution. 19204076Spjd * 20204076Spjd * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 21204076Spjd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22204076Spjd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23204076Spjd * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 24204076Spjd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25204076Spjd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26204076Spjd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27204076Spjd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28204076Spjd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29204076Spjd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30204076Spjd * SUCH DAMAGE. 31204076Spjd */ 32204076Spjd 33204076Spjd#include <sys/cdefs.h> 34204076Spjd__FBSDID("$FreeBSD: stable/11/sbin/hastd/primary.c 330449 2018-03-05 07:26:05Z eadler $"); 35204076Spjd 36204076Spjd#include <sys/types.h> 37204076Spjd#include <sys/time.h> 38204076Spjd#include <sys/bio.h> 39204076Spjd#include <sys/disk.h> 40204076Spjd#include <sys/stat.h> 41204076Spjd 42204076Spjd#include <geom/gate/g_gate.h> 43204076Spjd 44204076Spjd#include <err.h> 45204076Spjd#include <errno.h> 46204076Spjd#include <fcntl.h> 47204076Spjd#include <libgeom.h> 48204076Spjd#include <pthread.h> 49211982Spjd#include <signal.h> 50204076Spjd#include <stdint.h> 51204076Spjd#include <stdio.h> 52204076Spjd#include <string.h> 53204076Spjd#include <sysexits.h> 54204076Spjd#include <unistd.h> 55204076Spjd 56204076Spjd#include <activemap.h> 57204076Spjd#include <nv.h> 58204076Spjd#include <rangelock.h> 59204076Spjd 60204076Spjd#include "control.h" 61212038Spjd#include "event.h" 62204076Spjd#include "hast.h" 63204076Spjd#include "hast_proto.h" 64204076Spjd#include "hastd.h" 65211886Spjd#include "hooks.h" 66204076Spjd#include "metadata.h" 67204076Spjd#include "proto.h" 68204076Spjd#include "pjdlog.h" 69246922Spjd#include "refcnt.h" 70204076Spjd#include "subr.h" 71204076Spjd#include "synch.h" 72204076Spjd 73210886Spjd/* The is only one remote component for now. */ 74210886Spjd#define ISREMOTE(no) ((no) == 1) 75210886Spjd 76204076Spjdstruct hio { 77204076Spjd /* 78204076Spjd * Number of components we are still waiting for. 79204076Spjd * When this field goes to 0, we can send the request back to the 80204076Spjd * kernel. Each component has to decrease this counter by one 81204076Spjd * even on failure. 82204076Spjd */ 83249969Sed refcnt_t hio_countdown; 84204076Spjd /* 85204076Spjd * Each component has a place to store its own error. 86204076Spjd * Once the request is handled by all components we can decide if the 87204076Spjd * request overall is successful or not. 88204076Spjd */ 89204076Spjd int *hio_errors; 90204076Spjd /* 91219818Spjd * Structure used to communicate with GEOM Gate class. 92204076Spjd */ 93204076Spjd struct g_gate_ctl_io hio_ggio; 94226859Spjd /* 95226859Spjd * Request was already confirmed to GEOM Gate. 96226859Spjd */ 97226859Spjd bool hio_done; 98226859Spjd /* 99259191Strociny * Number of components we are still waiting before sending write 100259191Strociny * completion ack to GEOM Gate. Used for memsync. 101259191Strociny */ 102259191Strociny refcnt_t hio_writecount; 103259191Strociny /* 104259191Strociny * Memsync request was acknowleged by remote. 105259191Strociny */ 106259191Strociny bool hio_memsyncacked; 107259191Strociny /* 108226859Spjd * Remember replication from the time the request was initiated, 109226859Spjd * so we won't get confused when replication changes on reload. 110226859Spjd */ 111226859Spjd int hio_replication; 112204076Spjd TAILQ_ENTRY(hio) *hio_next; 113204076Spjd}; 114204076Spjd#define hio_free_next hio_next[0] 115204076Spjd#define hio_done_next hio_next[0] 116204076Spjd 117204076Spjd/* 118204076Spjd * Free list holds unused structures. When free list is empty, we have to wait 119204076Spjd * until some in-progress requests are freed. 120204076Spjd */ 121204076Spjdstatic TAILQ_HEAD(, hio) hio_free_list; 122257155Strocinystatic size_t hio_free_list_size; 123204076Spjdstatic pthread_mutex_t hio_free_list_lock; 124204076Spjdstatic pthread_cond_t hio_free_list_cond; 125204076Spjd/* 126204076Spjd * There is one send list for every component. One requests is placed on all 127204076Spjd * send lists - each component gets the same request, but each component is 128204076Spjd * responsible for managing his own send list. 129204076Spjd */ 130204076Spjdstatic TAILQ_HEAD(, hio) *hio_send_list; 131257155Strocinystatic size_t *hio_send_list_size; 132204076Spjdstatic pthread_mutex_t *hio_send_list_lock; 133204076Spjdstatic pthread_cond_t *hio_send_list_cond; 134257155Strociny#define hio_send_local_list_size hio_send_list_size[0] 135257155Strociny#define hio_send_remote_list_size hio_send_list_size[1] 136204076Spjd/* 137204076Spjd * There is one recv list for every component, although local components don't 138204076Spjd * use recv lists as local requests are done synchronously. 139204076Spjd */ 140204076Spjdstatic TAILQ_HEAD(, hio) *hio_recv_list; 141257155Strocinystatic size_t *hio_recv_list_size; 142204076Spjdstatic pthread_mutex_t *hio_recv_list_lock; 143204076Spjdstatic pthread_cond_t *hio_recv_list_cond; 144257155Strociny#define hio_recv_remote_list_size hio_recv_list_size[1] 145204076Spjd/* 146204076Spjd * Request is placed on done list by the slowest component (the one that 147204076Spjd * decreased hio_countdown from 1 to 0). 148204076Spjd */ 149204076Spjdstatic TAILQ_HEAD(, hio) hio_done_list; 150257155Strocinystatic size_t hio_done_list_size; 151204076Spjdstatic pthread_mutex_t hio_done_list_lock; 152204076Spjdstatic pthread_cond_t hio_done_list_cond; 153204076Spjd/* 154204076Spjd * Structure below are for interaction with sync thread. 155204076Spjd */ 156204076Spjdstatic bool sync_inprogress; 157204076Spjdstatic pthread_mutex_t sync_lock; 158204076Spjdstatic pthread_cond_t sync_cond; 159204076Spjd/* 160204076Spjd * The lock below allows to synchornize access to remote connections. 161204076Spjd */ 162204076Spjdstatic pthread_rwlock_t *hio_remote_lock; 163204076Spjd 164204076Spjd/* 165204076Spjd * Lock to synchronize metadata updates. Also synchronize access to 166204076Spjd * hr_primary_localcnt and hr_primary_remotecnt fields. 167204076Spjd */ 168204076Spjdstatic pthread_mutex_t metadata_lock; 169204076Spjd 170204076Spjd/* 171204076Spjd * Maximum number of outstanding I/O requests. 172204076Spjd */ 173204076Spjd#define HAST_HIO_MAX 256 174204076Spjd/* 175204076Spjd * Number of components. At this point there are only two components: local 176204076Spjd * and remote, but in the future it might be possible to use multiple local 177204076Spjd * and remote components. 178204076Spjd */ 179204076Spjd#define HAST_NCOMPONENTS 2 180204076Spjd 181204076Spjd#define ISCONNECTED(res, no) \ 182204076Spjd ((res)->hr_remotein != NULL && (res)->hr_remoteout != NULL) 183204076Spjd 184204076Spjd#define QUEUE_INSERT1(hio, name, ncomp) do { \ 185204076Spjd mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 186259195Strociny if (TAILQ_EMPTY(&hio_##name##_list[(ncomp)])) \ 187259195Strociny cv_broadcast(&hio_##name##_list_cond[(ncomp)]); \ 188204076Spjd TAILQ_INSERT_TAIL(&hio_##name##_list[(ncomp)], (hio), \ 189204076Spjd hio_next[(ncomp)]); \ 190257155Strociny hio_##name##_list_size[(ncomp)]++; \ 191259195Strociny mtx_unlock(&hio_##name##_list_lock[(ncomp)]); \ 192204076Spjd} while (0) 193204076Spjd#define QUEUE_INSERT2(hio, name) do { \ 194204076Spjd mtx_lock(&hio_##name##_list_lock); \ 195259195Strociny if (TAILQ_EMPTY(&hio_##name##_list)) \ 196259195Strociny cv_broadcast(&hio_##name##_list_cond); \ 197204076Spjd TAILQ_INSERT_TAIL(&hio_##name##_list, (hio), hio_##name##_next);\ 198257155Strociny hio_##name##_list_size++; \ 199204076Spjd mtx_unlock(&hio_##name##_list_lock); \ 200204076Spjd} while (0) 201214692Spjd#define QUEUE_TAKE1(hio, name, ncomp, timeout) do { \ 202214692Spjd bool _last; \ 203214692Spjd \ 204204076Spjd mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 205214692Spjd _last = false; \ 206214692Spjd while (((hio) = TAILQ_FIRST(&hio_##name##_list[(ncomp)])) == NULL && !_last) { \ 207214692Spjd cv_timedwait(&hio_##name##_list_cond[(ncomp)], \ 208214692Spjd &hio_##name##_list_lock[(ncomp)], (timeout)); \ 209219864Spjd if ((timeout) != 0) \ 210214692Spjd _last = true; \ 211204076Spjd } \ 212214692Spjd if (hio != NULL) { \ 213257155Strociny PJDLOG_ASSERT(hio_##name##_list_size[(ncomp)] != 0); \ 214257155Strociny hio_##name##_list_size[(ncomp)]--; \ 215214692Spjd TAILQ_REMOVE(&hio_##name##_list[(ncomp)], (hio), \ 216214692Spjd hio_next[(ncomp)]); \ 217214692Spjd } \ 218204076Spjd mtx_unlock(&hio_##name##_list_lock[(ncomp)]); \ 219204076Spjd} while (0) 220204076Spjd#define QUEUE_TAKE2(hio, name) do { \ 221204076Spjd mtx_lock(&hio_##name##_list_lock); \ 222204076Spjd while (((hio) = TAILQ_FIRST(&hio_##name##_list)) == NULL) { \ 223204076Spjd cv_wait(&hio_##name##_list_cond, \ 224204076Spjd &hio_##name##_list_lock); \ 225204076Spjd } \ 226257155Strociny PJDLOG_ASSERT(hio_##name##_list_size != 0); \ 227257155Strociny hio_##name##_list_size--; \ 228204076Spjd TAILQ_REMOVE(&hio_##name##_list, (hio), hio_##name##_next); \ 229204076Spjd mtx_unlock(&hio_##name##_list_lock); \ 230204076Spjd} while (0) 231204076Spjd 232259192Strociny#define ISFULLSYNC(hio) ((hio)->hio_replication == HAST_REPLICATION_FULLSYNC) 233259192Strociny#define ISMEMSYNC(hio) ((hio)->hio_replication == HAST_REPLICATION_MEMSYNC) 234259192Strociny#define ISASYNC(hio) ((hio)->hio_replication == HAST_REPLICATION_ASYNC) 235259192Strociny 236209183Spjd#define SYNCREQ(hio) do { \ 237209183Spjd (hio)->hio_ggio.gctl_unit = -1; \ 238209183Spjd (hio)->hio_ggio.gctl_seq = 1; \ 239209183Spjd} while (0) 240204076Spjd#define ISSYNCREQ(hio) ((hio)->hio_ggio.gctl_unit == -1) 241204076Spjd#define SYNCREQDONE(hio) do { (hio)->hio_ggio.gctl_unit = -2; } while (0) 242204076Spjd#define ISSYNCREQDONE(hio) ((hio)->hio_ggio.gctl_unit == -2) 243204076Spjd 244259192Strociny#define ISMEMSYNCWRITE(hio) (ISMEMSYNC(hio) && \ 245259192Strociny (hio)->hio_ggio.gctl_cmd == BIO_WRITE && !ISSYNCREQ(hio)) 246259192Strociny 247204076Spjdstatic struct hast_resource *gres; 248204076Spjd 249204076Spjdstatic pthread_mutex_t range_lock; 250204076Spjdstatic struct rangelocks *range_regular; 251204076Spjdstatic bool range_regular_wait; 252204076Spjdstatic pthread_cond_t range_regular_cond; 253204076Spjdstatic struct rangelocks *range_sync; 254204076Spjdstatic bool range_sync_wait; 255204076Spjdstatic pthread_cond_t range_sync_cond; 256220898Spjdstatic bool fullystarted; 257204076Spjd 258204076Spjdstatic void *ggate_recv_thread(void *arg); 259204076Spjdstatic void *local_send_thread(void *arg); 260204076Spjdstatic void *remote_send_thread(void *arg); 261204076Spjdstatic void *remote_recv_thread(void *arg); 262204076Spjdstatic void *ggate_send_thread(void *arg); 263204076Spjdstatic void *sync_thread(void *arg); 264204076Spjdstatic void *guard_thread(void *arg); 265204076Spjd 266211982Spjdstatic void 267257155Strocinyoutput_status_aux(struct nv *nvout) 268257155Strociny{ 269257155Strociny 270257155Strociny nv_add_uint64(nvout, (uint64_t)hio_free_list_size, 271257155Strociny "idle_queue_size"); 272257155Strociny nv_add_uint64(nvout, (uint64_t)hio_send_local_list_size, 273257155Strociny "local_queue_size"); 274257155Strociny nv_add_uint64(nvout, (uint64_t)hio_send_remote_list_size, 275257155Strociny "send_queue_size"); 276257155Strociny nv_add_uint64(nvout, (uint64_t)hio_recv_remote_list_size, 277257155Strociny "recv_queue_size"); 278257155Strociny nv_add_uint64(nvout, (uint64_t)hio_done_list_size, 279257155Strociny "done_queue_size"); 280257155Strociny} 281257155Strociny 282257155Strocinystatic void 283204076Spjdcleanup(struct hast_resource *res) 284204076Spjd{ 285204076Spjd int rerrno; 286204076Spjd 287204076Spjd /* Remember errno. */ 288204076Spjd rerrno = errno; 289204076Spjd 290204076Spjd /* Destroy ggate provider if we created one. */ 291204076Spjd if (res->hr_ggateunit >= 0) { 292204076Spjd struct g_gate_ctl_destroy ggiod; 293204076Spjd 294213533Spjd bzero(&ggiod, sizeof(ggiod)); 295204076Spjd ggiod.gctl_version = G_GATE_VERSION; 296204076Spjd ggiod.gctl_unit = res->hr_ggateunit; 297204076Spjd ggiod.gctl_force = 1; 298229945Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_DESTROY, &ggiod) == -1) { 299213531Spjd pjdlog_errno(LOG_WARNING, 300213531Spjd "Unable to destroy hast/%s device", 301204076Spjd res->hr_provname); 302204076Spjd } 303204076Spjd res->hr_ggateunit = -1; 304204076Spjd } 305204076Spjd 306204076Spjd /* Restore errno. */ 307204076Spjd errno = rerrno; 308204076Spjd} 309204076Spjd 310212899Spjdstatic __dead2 void 311204076Spjdprimary_exit(int exitcode, const char *fmt, ...) 312204076Spjd{ 313204076Spjd va_list ap; 314204076Spjd 315218138Spjd PJDLOG_ASSERT(exitcode != EX_OK); 316204076Spjd va_start(ap, fmt); 317204076Spjd pjdlogv_errno(LOG_ERR, fmt, ap); 318204076Spjd va_end(ap); 319204076Spjd cleanup(gres); 320204076Spjd exit(exitcode); 321204076Spjd} 322204076Spjd 323212899Spjdstatic __dead2 void 324204076Spjdprimary_exitx(int exitcode, const char *fmt, ...) 325204076Spjd{ 326204076Spjd va_list ap; 327204076Spjd 328204076Spjd va_start(ap, fmt); 329204076Spjd pjdlogv(exitcode == EX_OK ? LOG_INFO : LOG_ERR, fmt, ap); 330204076Spjd va_end(ap); 331204076Spjd cleanup(gres); 332204076Spjd exit(exitcode); 333204076Spjd} 334204076Spjd 335204076Spjdstatic int 336270944Sedhast_activemap_flush(struct hast_resource *res) __unlocks(res->hr_amp_lock) 337204076Spjd{ 338204076Spjd const unsigned char *buf; 339204076Spjd size_t size; 340255716Strociny int ret; 341204076Spjd 342255716Strociny mtx_lock(&res->hr_amp_diskmap_lock); 343204076Spjd buf = activemap_bitmap(res->hr_amp, &size); 344255716Strociny mtx_unlock(&res->hr_amp_lock); 345218138Spjd PJDLOG_ASSERT(buf != NULL); 346218138Spjd PJDLOG_ASSERT((size % res->hr_local_sectorsize) == 0); 347255716Strociny ret = 0; 348204076Spjd if (pwrite(res->hr_localfd, buf, size, METADATA_SIZE) != 349204076Spjd (ssize_t)size) { 350225786Spjd pjdlog_errno(LOG_ERR, "Unable to flush activemap to disk"); 351247281Strociny res->hr_stat_activemap_write_error++; 352255716Strociny ret = -1; 353204076Spjd } 354255716Strociny if (ret == 0 && res->hr_metaflush == 1 && 355255716Strociny g_flush(res->hr_localfd) == -1) { 356225830Spjd if (errno == EOPNOTSUPP) { 357225830Spjd pjdlog_warning("The %s provider doesn't support flushing write cache. Disabling it.", 358225830Spjd res->hr_localpath); 359225830Spjd res->hr_metaflush = 0; 360225830Spjd } else { 361225830Spjd pjdlog_errno(LOG_ERR, 362225830Spjd "Unable to flush disk cache on activemap update"); 363247281Strociny res->hr_stat_activemap_flush_error++; 364255716Strociny ret = -1; 365225830Spjd } 366225830Spjd } 367255716Strociny mtx_unlock(&res->hr_amp_diskmap_lock); 368255716Strociny return (ret); 369204076Spjd} 370204076Spjd 371210881Spjdstatic bool 372210881Spjdreal_remote(const struct hast_resource *res) 373210881Spjd{ 374210881Spjd 375210881Spjd return (strcmp(res->hr_remoteaddr, "none") != 0); 376210881Spjd} 377210881Spjd 378204076Spjdstatic void 379204076Spjdinit_environment(struct hast_resource *res __unused) 380204076Spjd{ 381204076Spjd struct hio *hio; 382204076Spjd unsigned int ii, ncomps; 383204076Spjd 384204076Spjd /* 385204076Spjd * In the future it might be per-resource value. 386204076Spjd */ 387204076Spjd ncomps = HAST_NCOMPONENTS; 388204076Spjd 389204076Spjd /* 390204076Spjd * Allocate memory needed by lists. 391204076Spjd */ 392204076Spjd hio_send_list = malloc(sizeof(hio_send_list[0]) * ncomps); 393204076Spjd if (hio_send_list == NULL) { 394204076Spjd primary_exitx(EX_TEMPFAIL, 395204076Spjd "Unable to allocate %zu bytes of memory for send lists.", 396204076Spjd sizeof(hio_send_list[0]) * ncomps); 397204076Spjd } 398257155Strociny hio_send_list_size = malloc(sizeof(hio_send_list_size[0]) * ncomps); 399257155Strociny if (hio_send_list_size == NULL) { 400257155Strociny primary_exitx(EX_TEMPFAIL, 401257155Strociny "Unable to allocate %zu bytes of memory for send list counters.", 402257155Strociny sizeof(hio_send_list_size[0]) * ncomps); 403257155Strociny } 404204076Spjd hio_send_list_lock = malloc(sizeof(hio_send_list_lock[0]) * ncomps); 405204076Spjd if (hio_send_list_lock == NULL) { 406204076Spjd primary_exitx(EX_TEMPFAIL, 407204076Spjd "Unable to allocate %zu bytes of memory for send list locks.", 408204076Spjd sizeof(hio_send_list_lock[0]) * ncomps); 409204076Spjd } 410204076Spjd hio_send_list_cond = malloc(sizeof(hio_send_list_cond[0]) * ncomps); 411204076Spjd if (hio_send_list_cond == NULL) { 412204076Spjd primary_exitx(EX_TEMPFAIL, 413204076Spjd "Unable to allocate %zu bytes of memory for send list condition variables.", 414204076Spjd sizeof(hio_send_list_cond[0]) * ncomps); 415204076Spjd } 416204076Spjd hio_recv_list = malloc(sizeof(hio_recv_list[0]) * ncomps); 417204076Spjd if (hio_recv_list == NULL) { 418204076Spjd primary_exitx(EX_TEMPFAIL, 419204076Spjd "Unable to allocate %zu bytes of memory for recv lists.", 420204076Spjd sizeof(hio_recv_list[0]) * ncomps); 421204076Spjd } 422257155Strociny hio_recv_list_size = malloc(sizeof(hio_recv_list_size[0]) * ncomps); 423257155Strociny if (hio_recv_list_size == NULL) { 424257155Strociny primary_exitx(EX_TEMPFAIL, 425257155Strociny "Unable to allocate %zu bytes of memory for recv list counters.", 426257155Strociny sizeof(hio_recv_list_size[0]) * ncomps); 427257155Strociny } 428204076Spjd hio_recv_list_lock = malloc(sizeof(hio_recv_list_lock[0]) * ncomps); 429204076Spjd if (hio_recv_list_lock == NULL) { 430204076Spjd primary_exitx(EX_TEMPFAIL, 431204076Spjd "Unable to allocate %zu bytes of memory for recv list locks.", 432204076Spjd sizeof(hio_recv_list_lock[0]) * ncomps); 433204076Spjd } 434204076Spjd hio_recv_list_cond = malloc(sizeof(hio_recv_list_cond[0]) * ncomps); 435204076Spjd if (hio_recv_list_cond == NULL) { 436204076Spjd primary_exitx(EX_TEMPFAIL, 437204076Spjd "Unable to allocate %zu bytes of memory for recv list condition variables.", 438204076Spjd sizeof(hio_recv_list_cond[0]) * ncomps); 439204076Spjd } 440204076Spjd hio_remote_lock = malloc(sizeof(hio_remote_lock[0]) * ncomps); 441204076Spjd if (hio_remote_lock == NULL) { 442204076Spjd primary_exitx(EX_TEMPFAIL, 443204076Spjd "Unable to allocate %zu bytes of memory for remote connections locks.", 444204076Spjd sizeof(hio_remote_lock[0]) * ncomps); 445204076Spjd } 446204076Spjd 447204076Spjd /* 448257155Strociny * Initialize lists, their counters, locks and condition variables. 449204076Spjd */ 450204076Spjd TAILQ_INIT(&hio_free_list); 451204076Spjd mtx_init(&hio_free_list_lock); 452204076Spjd cv_init(&hio_free_list_cond); 453204076Spjd for (ii = 0; ii < HAST_NCOMPONENTS; ii++) { 454204076Spjd TAILQ_INIT(&hio_send_list[ii]); 455257155Strociny hio_send_list_size[ii] = 0; 456204076Spjd mtx_init(&hio_send_list_lock[ii]); 457204076Spjd cv_init(&hio_send_list_cond[ii]); 458204076Spjd TAILQ_INIT(&hio_recv_list[ii]); 459257155Strociny hio_recv_list_size[ii] = 0; 460204076Spjd mtx_init(&hio_recv_list_lock[ii]); 461204076Spjd cv_init(&hio_recv_list_cond[ii]); 462204076Spjd rw_init(&hio_remote_lock[ii]); 463204076Spjd } 464204076Spjd TAILQ_INIT(&hio_done_list); 465204076Spjd mtx_init(&hio_done_list_lock); 466204076Spjd cv_init(&hio_done_list_cond); 467204076Spjd mtx_init(&metadata_lock); 468204076Spjd 469204076Spjd /* 470204076Spjd * Allocate requests pool and initialize requests. 471204076Spjd */ 472204076Spjd for (ii = 0; ii < HAST_HIO_MAX; ii++) { 473204076Spjd hio = malloc(sizeof(*hio)); 474204076Spjd if (hio == NULL) { 475204076Spjd primary_exitx(EX_TEMPFAIL, 476204076Spjd "Unable to allocate %zu bytes of memory for hio request.", 477204076Spjd sizeof(*hio)); 478204076Spjd } 479249969Sed refcnt_init(&hio->hio_countdown, 0); 480204076Spjd hio->hio_errors = malloc(sizeof(hio->hio_errors[0]) * ncomps); 481204076Spjd if (hio->hio_errors == NULL) { 482204076Spjd primary_exitx(EX_TEMPFAIL, 483204076Spjd "Unable allocate %zu bytes of memory for hio errors.", 484204076Spjd sizeof(hio->hio_errors[0]) * ncomps); 485204076Spjd } 486204076Spjd hio->hio_next = malloc(sizeof(hio->hio_next[0]) * ncomps); 487204076Spjd if (hio->hio_next == NULL) { 488204076Spjd primary_exitx(EX_TEMPFAIL, 489204076Spjd "Unable allocate %zu bytes of memory for hio_next field.", 490204076Spjd sizeof(hio->hio_next[0]) * ncomps); 491204076Spjd } 492204076Spjd hio->hio_ggio.gctl_version = G_GATE_VERSION; 493204076Spjd hio->hio_ggio.gctl_data = malloc(MAXPHYS); 494204076Spjd if (hio->hio_ggio.gctl_data == NULL) { 495204076Spjd primary_exitx(EX_TEMPFAIL, 496204076Spjd "Unable to allocate %zu bytes of memory for gctl_data.", 497204076Spjd MAXPHYS); 498204076Spjd } 499204076Spjd hio->hio_ggio.gctl_length = MAXPHYS; 500204076Spjd hio->hio_ggio.gctl_error = 0; 501204076Spjd TAILQ_INSERT_HEAD(&hio_free_list, hio, hio_free_next); 502257155Strociny hio_free_list_size++; 503204076Spjd } 504204076Spjd} 505204076Spjd 506214284Spjdstatic bool 507214284Spjdinit_resuid(struct hast_resource *res) 508214284Spjd{ 509214284Spjd 510214284Spjd mtx_lock(&metadata_lock); 511214284Spjd if (res->hr_resuid != 0) { 512214284Spjd mtx_unlock(&metadata_lock); 513214284Spjd return (false); 514214284Spjd } else { 515214284Spjd /* Initialize unique resource identifier. */ 516214284Spjd arc4random_buf(&res->hr_resuid, sizeof(res->hr_resuid)); 517214284Spjd mtx_unlock(&metadata_lock); 518229945Spjd if (metadata_write(res) == -1) 519214284Spjd exit(EX_NOINPUT); 520214284Spjd return (true); 521214284Spjd } 522214284Spjd} 523214284Spjd 524204076Spjdstatic void 525204076Spjdinit_local(struct hast_resource *res) 526204076Spjd{ 527204076Spjd unsigned char *buf; 528204076Spjd size_t mapsize; 529204076Spjd 530229945Spjd if (metadata_read(res, true) == -1) 531204076Spjd exit(EX_NOINPUT); 532204076Spjd mtx_init(&res->hr_amp_lock); 533204076Spjd if (activemap_init(&res->hr_amp, res->hr_datasize, res->hr_extentsize, 534229945Spjd res->hr_local_sectorsize, res->hr_keepdirty) == -1) { 535204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create activemap"); 536204076Spjd } 537204076Spjd mtx_init(&range_lock); 538204076Spjd cv_init(&range_regular_cond); 539229945Spjd if (rangelock_init(&range_regular) == -1) 540204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create regular range lock"); 541204076Spjd cv_init(&range_sync_cond); 542229945Spjd if (rangelock_init(&range_sync) == -1) 543204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create sync range lock"); 544204076Spjd mapsize = activemap_ondisk_size(res->hr_amp); 545204076Spjd buf = calloc(1, mapsize); 546204076Spjd if (buf == NULL) { 547204076Spjd primary_exitx(EX_TEMPFAIL, 548204076Spjd "Unable to allocate buffer for activemap."); 549204076Spjd } 550204076Spjd if (pread(res->hr_localfd, buf, mapsize, METADATA_SIZE) != 551204076Spjd (ssize_t)mapsize) { 552204076Spjd primary_exit(EX_NOINPUT, "Unable to read activemap"); 553204076Spjd } 554204076Spjd activemap_copyin(res->hr_amp, buf, mapsize); 555209181Spjd free(buf); 556204076Spjd if (res->hr_resuid != 0) 557204076Spjd return; 558204076Spjd /* 559214284Spjd * We're using provider for the first time. Initialize local and remote 560214284Spjd * counters. We don't initialize resuid here, as we want to do it just 561214284Spjd * in time. The reason for this is that we want to inform secondary 562214284Spjd * that there were no writes yet, so there is no need to synchronize 563214284Spjd * anything. 564204076Spjd */ 565219844Spjd res->hr_primary_localcnt = 0; 566204076Spjd res->hr_primary_remotecnt = 0; 567229945Spjd if (metadata_write(res) == -1) 568204076Spjd exit(EX_NOINPUT); 569204076Spjd} 570204076Spjd 571218218Spjdstatic int 572218218Spjdprimary_connect(struct hast_resource *res, struct proto_conn **connp) 573218218Spjd{ 574218218Spjd struct proto_conn *conn; 575218218Spjd int16_t val; 576218218Spjd 577218218Spjd val = 1; 578229945Spjd if (proto_send(res->hr_conn, &val, sizeof(val)) == -1) { 579218218Spjd primary_exit(EX_TEMPFAIL, 580218218Spjd "Unable to send connection request to parent"); 581218218Spjd } 582229945Spjd if (proto_recv(res->hr_conn, &val, sizeof(val)) == -1) { 583218218Spjd primary_exit(EX_TEMPFAIL, 584218218Spjd "Unable to receive reply to connection request from parent"); 585218218Spjd } 586218218Spjd if (val != 0) { 587218218Spjd errno = val; 588218218Spjd pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 589218218Spjd res->hr_remoteaddr); 590218218Spjd return (-1); 591218218Spjd } 592229945Spjd if (proto_connection_recv(res->hr_conn, true, &conn) == -1) { 593218218Spjd primary_exit(EX_TEMPFAIL, 594218218Spjd "Unable to receive connection from parent"); 595218218Spjd } 596229945Spjd if (proto_connect_wait(conn, res->hr_timeout) == -1) { 597218218Spjd pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 598218218Spjd res->hr_remoteaddr); 599218218Spjd proto_close(conn); 600218218Spjd return (-1); 601218218Spjd } 602218218Spjd /* Error in setting timeout is not critical, but why should it fail? */ 603229945Spjd if (proto_timeout(conn, res->hr_timeout) == -1) 604218218Spjd pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 605218218Spjd 606218218Spjd *connp = conn; 607218218Spjd 608218218Spjd return (0); 609218218Spjd} 610246922Spjd 611238120Spjd/* 612238120Spjd * Function instructs GEOM_GATE to handle reads directly from within the kernel. 613238120Spjd */ 614238120Spjdstatic void 615238120Spjdenable_direct_reads(struct hast_resource *res) 616238120Spjd{ 617238120Spjd struct g_gate_ctl_modify ggiomodify; 618218218Spjd 619238120Spjd bzero(&ggiomodify, sizeof(ggiomodify)); 620238120Spjd ggiomodify.gctl_version = G_GATE_VERSION; 621238120Spjd ggiomodify.gctl_unit = res->hr_ggateunit; 622238120Spjd ggiomodify.gctl_modify = GG_MODIFY_READPROV | GG_MODIFY_READOFFSET; 623238120Spjd strlcpy(ggiomodify.gctl_readprov, res->hr_localpath, 624238120Spjd sizeof(ggiomodify.gctl_readprov)); 625238120Spjd ggiomodify.gctl_readoffset = res->hr_localoff; 626238120Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_MODIFY, &ggiomodify) == 0) 627238120Spjd pjdlog_debug(1, "Direct reads enabled."); 628238120Spjd else 629238120Spjd pjdlog_errno(LOG_WARNING, "Failed to enable direct reads"); 630238120Spjd} 631238120Spjd 632220898Spjdstatic int 633205738Spjdinit_remote(struct hast_resource *res, struct proto_conn **inp, 634205738Spjd struct proto_conn **outp) 635204076Spjd{ 636205738Spjd struct proto_conn *in, *out; 637204076Spjd struct nv *nvout, *nvin; 638204076Spjd const unsigned char *token; 639204076Spjd unsigned char *map; 640204076Spjd const char *errmsg; 641204076Spjd int32_t extentsize; 642204076Spjd int64_t datasize; 643204076Spjd uint32_t mapsize; 644246922Spjd uint8_t version; 645204076Spjd size_t size; 646220898Spjd int error; 647204076Spjd 648218138Spjd PJDLOG_ASSERT((inp == NULL && outp == NULL) || (inp != NULL && outp != NULL)); 649218138Spjd PJDLOG_ASSERT(real_remote(res)); 650205738Spjd 651205738Spjd in = out = NULL; 652211983Spjd errmsg = NULL; 653205738Spjd 654218218Spjd if (primary_connect(res, &out) == -1) 655220898Spjd return (ECONNREFUSED); 656218218Spjd 657220898Spjd error = ECONNABORTED; 658220898Spjd 659204076Spjd /* 660204076Spjd * First handshake step. 661204076Spjd * Setup outgoing connection with remote node. 662204076Spjd */ 663204076Spjd nvout = nv_alloc(); 664204076Spjd nv_add_string(nvout, res->hr_name, "resource"); 665246922Spjd nv_add_uint8(nvout, HAST_PROTO_VERSION, "version"); 666204076Spjd if (nv_error(nvout) != 0) { 667204076Spjd pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 668204076Spjd "Unable to allocate header for connection with %s", 669204076Spjd res->hr_remoteaddr); 670204076Spjd nv_free(nvout); 671204076Spjd goto close; 672204076Spjd } 673229945Spjd if (hast_proto_send(res, out, nvout, NULL, 0) == -1) { 674204076Spjd pjdlog_errno(LOG_WARNING, 675204076Spjd "Unable to send handshake header to %s", 676204076Spjd res->hr_remoteaddr); 677204076Spjd nv_free(nvout); 678204076Spjd goto close; 679204076Spjd } 680204076Spjd nv_free(nvout); 681229945Spjd if (hast_proto_recv_hdr(out, &nvin) == -1) { 682204076Spjd pjdlog_errno(LOG_WARNING, 683204076Spjd "Unable to receive handshake header from %s", 684204076Spjd res->hr_remoteaddr); 685204076Spjd goto close; 686204076Spjd } 687204076Spjd errmsg = nv_get_string(nvin, "errmsg"); 688204076Spjd if (errmsg != NULL) { 689204076Spjd pjdlog_warning("%s", errmsg); 690220898Spjd if (nv_exists(nvin, "wait")) 691220898Spjd error = EBUSY; 692204076Spjd nv_free(nvin); 693204076Spjd goto close; 694204076Spjd } 695246922Spjd version = nv_get_uint8(nvin, "version"); 696246922Spjd if (version == 0) { 697246922Spjd /* 698246922Spjd * If no version is sent, it means this is protocol version 1. 699246922Spjd */ 700246922Spjd version = 1; 701246922Spjd } 702246922Spjd if (version > HAST_PROTO_VERSION) { 703246922Spjd pjdlog_warning("Invalid version received (%hhu).", version); 704246922Spjd nv_free(nvin); 705246922Spjd goto close; 706246922Spjd } 707246922Spjd res->hr_version = version; 708246922Spjd pjdlog_debug(1, "Negotiated protocol version %d.", res->hr_version); 709204076Spjd token = nv_get_uint8_array(nvin, &size, "token"); 710204076Spjd if (token == NULL) { 711204076Spjd pjdlog_warning("Handshake header from %s has no 'token' field.", 712204076Spjd res->hr_remoteaddr); 713204076Spjd nv_free(nvin); 714204076Spjd goto close; 715204076Spjd } 716204076Spjd if (size != sizeof(res->hr_token)) { 717204076Spjd pjdlog_warning("Handshake header from %s contains 'token' of wrong size (got %zu, expected %zu).", 718204076Spjd res->hr_remoteaddr, size, sizeof(res->hr_token)); 719204076Spjd nv_free(nvin); 720204076Spjd goto close; 721204076Spjd } 722204076Spjd bcopy(token, res->hr_token, sizeof(res->hr_token)); 723204076Spjd nv_free(nvin); 724204076Spjd 725204076Spjd /* 726204076Spjd * Second handshake step. 727204076Spjd * Setup incoming connection with remote node. 728204076Spjd */ 729218218Spjd if (primary_connect(res, &in) == -1) 730204076Spjd goto close; 731218218Spjd 732204076Spjd nvout = nv_alloc(); 733204076Spjd nv_add_string(nvout, res->hr_name, "resource"); 734204076Spjd nv_add_uint8_array(nvout, res->hr_token, sizeof(res->hr_token), 735204076Spjd "token"); 736214284Spjd if (res->hr_resuid == 0) { 737214284Spjd /* 738214284Spjd * The resuid field was not yet initialized. 739214284Spjd * Because we do synchronization inside init_resuid(), it is 740214284Spjd * possible that someone already initialized it, the function 741214284Spjd * will return false then, but if we successfully initialized 742214284Spjd * it, we will get true. True means that there were no writes 743214284Spjd * to this resource yet and we want to inform secondary that 744214284Spjd * synchronization is not needed by sending "virgin" argument. 745214284Spjd */ 746214284Spjd if (init_resuid(res)) 747214284Spjd nv_add_int8(nvout, 1, "virgin"); 748214284Spjd } 749204076Spjd nv_add_uint64(nvout, res->hr_resuid, "resuid"); 750204076Spjd nv_add_uint64(nvout, res->hr_primary_localcnt, "localcnt"); 751204076Spjd nv_add_uint64(nvout, res->hr_primary_remotecnt, "remotecnt"); 752204076Spjd if (nv_error(nvout) != 0) { 753204076Spjd pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 754204076Spjd "Unable to allocate header for connection with %s", 755204076Spjd res->hr_remoteaddr); 756204076Spjd nv_free(nvout); 757204076Spjd goto close; 758204076Spjd } 759229945Spjd if (hast_proto_send(res, in, nvout, NULL, 0) == -1) { 760204076Spjd pjdlog_errno(LOG_WARNING, 761204076Spjd "Unable to send handshake header to %s", 762204076Spjd res->hr_remoteaddr); 763204076Spjd nv_free(nvout); 764204076Spjd goto close; 765204076Spjd } 766204076Spjd nv_free(nvout); 767229945Spjd if (hast_proto_recv_hdr(out, &nvin) == -1) { 768204076Spjd pjdlog_errno(LOG_WARNING, 769204076Spjd "Unable to receive handshake header from %s", 770204076Spjd res->hr_remoteaddr); 771204076Spjd goto close; 772204076Spjd } 773204076Spjd errmsg = nv_get_string(nvin, "errmsg"); 774204076Spjd if (errmsg != NULL) { 775204076Spjd pjdlog_warning("%s", errmsg); 776204076Spjd nv_free(nvin); 777204076Spjd goto close; 778204076Spjd } 779204076Spjd datasize = nv_get_int64(nvin, "datasize"); 780204076Spjd if (datasize != res->hr_datasize) { 781204076Spjd pjdlog_warning("Data size differs between nodes (local=%jd, remote=%jd).", 782204076Spjd (intmax_t)res->hr_datasize, (intmax_t)datasize); 783204076Spjd nv_free(nvin); 784204076Spjd goto close; 785204076Spjd } 786204076Spjd extentsize = nv_get_int32(nvin, "extentsize"); 787204076Spjd if (extentsize != res->hr_extentsize) { 788204076Spjd pjdlog_warning("Extent size differs between nodes (local=%zd, remote=%zd).", 789204076Spjd (ssize_t)res->hr_extentsize, (ssize_t)extentsize); 790204076Spjd nv_free(nvin); 791204076Spjd goto close; 792204076Spjd } 793204076Spjd res->hr_secondary_localcnt = nv_get_uint64(nvin, "localcnt"); 794204076Spjd res->hr_secondary_remotecnt = nv_get_uint64(nvin, "remotecnt"); 795204076Spjd res->hr_syncsrc = nv_get_uint8(nvin, "syncsrc"); 796238120Spjd if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) 797238120Spjd enable_direct_reads(res); 798220865Spjd if (nv_exists(nvin, "virgin")) { 799220865Spjd /* 800220865Spjd * Secondary was reinitialized, bump localcnt if it is 0 as 801220865Spjd * only we have the data. 802220865Spjd */ 803220865Spjd PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_PRIMARY); 804220865Spjd PJDLOG_ASSERT(res->hr_secondary_localcnt == 0); 805220865Spjd 806220865Spjd if (res->hr_primary_localcnt == 0) { 807220865Spjd PJDLOG_ASSERT(res->hr_secondary_remotecnt == 0); 808220865Spjd 809220865Spjd mtx_lock(&metadata_lock); 810220865Spjd res->hr_primary_localcnt++; 811220865Spjd pjdlog_debug(1, "Increasing localcnt to %ju.", 812220865Spjd (uintmax_t)res->hr_primary_localcnt); 813220865Spjd (void)metadata_write(res); 814220865Spjd mtx_unlock(&metadata_lock); 815220865Spjd } 816220865Spjd } 817204076Spjd map = NULL; 818204076Spjd mapsize = nv_get_uint32(nvin, "mapsize"); 819204076Spjd if (mapsize > 0) { 820204076Spjd map = malloc(mapsize); 821204076Spjd if (map == NULL) { 822204076Spjd pjdlog_error("Unable to allocate memory for remote activemap (mapsize=%ju).", 823204076Spjd (uintmax_t)mapsize); 824204076Spjd nv_free(nvin); 825204076Spjd goto close; 826204076Spjd } 827204076Spjd /* 828204076Spjd * Remote node have some dirty extents on its own, lets 829204076Spjd * download its activemap. 830204076Spjd */ 831205738Spjd if (hast_proto_recv_data(res, out, nvin, map, 832229945Spjd mapsize) == -1) { 833204076Spjd pjdlog_errno(LOG_ERR, 834204076Spjd "Unable to receive remote activemap"); 835204076Spjd nv_free(nvin); 836204076Spjd free(map); 837204076Spjd goto close; 838204076Spjd } 839257154Strociny mtx_lock(&res->hr_amp_lock); 840204076Spjd /* 841204076Spjd * Merge local and remote bitmaps. 842204076Spjd */ 843204076Spjd activemap_merge(res->hr_amp, map, mapsize); 844204076Spjd free(map); 845204076Spjd /* 846204076Spjd * Now that we merged bitmaps from both nodes, flush it to the 847204076Spjd * disk before we start to synchronize. 848204076Spjd */ 849204076Spjd (void)hast_activemap_flush(res); 850204076Spjd } 851214274Spjd nv_free(nvin); 852223181Strociny#ifdef notyet 853220271Spjd /* Setup directions. */ 854220271Spjd if (proto_send(out, NULL, 0) == -1) 855220271Spjd pjdlog_errno(LOG_WARNING, "Unable to set connection direction"); 856220271Spjd if (proto_recv(in, NULL, 0) == -1) 857220271Spjd pjdlog_errno(LOG_WARNING, "Unable to set connection direction"); 858223181Strociny#endif 859204076Spjd pjdlog_info("Connected to %s.", res->hr_remoteaddr); 860246922Spjd if (res->hr_original_replication == HAST_REPLICATION_MEMSYNC && 861246922Spjd res->hr_version < 2) { 862246922Spjd pjdlog_warning("The 'memsync' replication mode is not supported by the remote node, falling back to 'fullsync' mode."); 863246922Spjd res->hr_replication = HAST_REPLICATION_FULLSYNC; 864246922Spjd } else if (res->hr_replication != res->hr_original_replication) { 865246922Spjd /* 866246922Spjd * This is in case hastd disconnected and was upgraded. 867246922Spjd */ 868246922Spjd res->hr_replication = res->hr_original_replication; 869246922Spjd } 870205738Spjd if (inp != NULL && outp != NULL) { 871205738Spjd *inp = in; 872205738Spjd *outp = out; 873205738Spjd } else { 874205738Spjd res->hr_remotein = in; 875205738Spjd res->hr_remoteout = out; 876205738Spjd } 877212038Spjd event_send(res, EVENT_CONNECT); 878220898Spjd return (0); 879205738Spjdclose: 880211983Spjd if (errmsg != NULL && strcmp(errmsg, "Split-brain condition!") == 0) 881212038Spjd event_send(res, EVENT_SPLITBRAIN); 882205738Spjd proto_close(out); 883205738Spjd if (in != NULL) 884205738Spjd proto_close(in); 885220898Spjd return (error); 886205738Spjd} 887205738Spjd 888205738Spjdstatic void 889205738Spjdsync_start(void) 890205738Spjd{ 891205738Spjd 892204076Spjd mtx_lock(&sync_lock); 893204076Spjd sync_inprogress = true; 894204076Spjd mtx_unlock(&sync_lock); 895204076Spjd cv_signal(&sync_cond); 896204076Spjd} 897204076Spjd 898204076Spjdstatic void 899211878Spjdsync_stop(void) 900211878Spjd{ 901211878Spjd 902211878Spjd mtx_lock(&sync_lock); 903211878Spjd if (sync_inprogress) 904211878Spjd sync_inprogress = false; 905211878Spjd mtx_unlock(&sync_lock); 906211878Spjd} 907211878Spjd 908211878Spjdstatic void 909204076Spjdinit_ggate(struct hast_resource *res) 910204076Spjd{ 911204076Spjd struct g_gate_ctl_create ggiocreate; 912204076Spjd struct g_gate_ctl_cancel ggiocancel; 913204076Spjd 914204076Spjd /* 915204076Spjd * We communicate with ggate via /dev/ggctl. Open it. 916204076Spjd */ 917204076Spjd res->hr_ggatefd = open("/dev/" G_GATE_CTL_NAME, O_RDWR); 918229945Spjd if (res->hr_ggatefd == -1) 919204076Spjd primary_exit(EX_OSFILE, "Unable to open /dev/" G_GATE_CTL_NAME); 920204076Spjd /* 921204076Spjd * Create provider before trying to connect, as connection failure 922204076Spjd * is not critical, but may take some time. 923204076Spjd */ 924213533Spjd bzero(&ggiocreate, sizeof(ggiocreate)); 925204076Spjd ggiocreate.gctl_version = G_GATE_VERSION; 926204076Spjd ggiocreate.gctl_mediasize = res->hr_datasize; 927204076Spjd ggiocreate.gctl_sectorsize = res->hr_local_sectorsize; 928204076Spjd ggiocreate.gctl_flags = 0; 929220266Spjd ggiocreate.gctl_maxcount = 0; 930204076Spjd ggiocreate.gctl_timeout = 0; 931204076Spjd ggiocreate.gctl_unit = G_GATE_NAME_GIVEN; 932204076Spjd snprintf(ggiocreate.gctl_name, sizeof(ggiocreate.gctl_name), "hast/%s", 933204076Spjd res->hr_provname); 934204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_CREATE, &ggiocreate) == 0) { 935204076Spjd pjdlog_info("Device hast/%s created.", res->hr_provname); 936204076Spjd res->hr_ggateunit = ggiocreate.gctl_unit; 937204076Spjd return; 938204076Spjd } 939204076Spjd if (errno != EEXIST) { 940204076Spjd primary_exit(EX_OSERR, "Unable to create hast/%s device", 941204076Spjd res->hr_provname); 942204076Spjd } 943204076Spjd pjdlog_debug(1, 944204076Spjd "Device hast/%s already exists, we will try to take it over.", 945204076Spjd res->hr_provname); 946204076Spjd /* 947204076Spjd * If we received EEXIST, we assume that the process who created the 948204076Spjd * provider died and didn't clean up. In that case we will start from 949204076Spjd * where he left of. 950204076Spjd */ 951213533Spjd bzero(&ggiocancel, sizeof(ggiocancel)); 952204076Spjd ggiocancel.gctl_version = G_GATE_VERSION; 953204076Spjd ggiocancel.gctl_unit = G_GATE_NAME_GIVEN; 954204076Spjd snprintf(ggiocancel.gctl_name, sizeof(ggiocancel.gctl_name), "hast/%s", 955204076Spjd res->hr_provname); 956204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_CANCEL, &ggiocancel) == 0) { 957204076Spjd pjdlog_info("Device hast/%s recovered.", res->hr_provname); 958204076Spjd res->hr_ggateunit = ggiocancel.gctl_unit; 959204076Spjd return; 960204076Spjd } 961204076Spjd primary_exit(EX_OSERR, "Unable to take over hast/%s device", 962204076Spjd res->hr_provname); 963204076Spjd} 964204076Spjd 965204076Spjdvoid 966204076Spjdhastd_primary(struct hast_resource *res) 967204076Spjd{ 968204076Spjd pthread_t td; 969204076Spjd pid_t pid; 970219482Strociny int error, mode, debuglevel; 971204076Spjd 972204076Spjd /* 973218218Spjd * Create communication channel for sending control commands from 974218218Spjd * parent to child. 975204076Spjd */ 976229945Spjd if (proto_client(NULL, "socketpair://", &res->hr_ctrl) == -1) { 977218042Spjd /* TODO: There's no need for this to be fatal error. */ 978204076Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 979212034Spjd pjdlog_exit(EX_OSERR, 980204076Spjd "Unable to create control sockets between parent and child"); 981204076Spjd } 982212038Spjd /* 983218218Spjd * Create communication channel for sending events from child to parent. 984212038Spjd */ 985229945Spjd if (proto_client(NULL, "socketpair://", &res->hr_event) == -1) { 986218042Spjd /* TODO: There's no need for this to be fatal error. */ 987212038Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 988212038Spjd pjdlog_exit(EX_OSERR, 989212038Spjd "Unable to create event sockets between child and parent"); 990212038Spjd } 991218218Spjd /* 992218218Spjd * Create communication channel for sending connection requests from 993218218Spjd * child to parent. 994218218Spjd */ 995229945Spjd if (proto_client(NULL, "socketpair://", &res->hr_conn) == -1) { 996218218Spjd /* TODO: There's no need for this to be fatal error. */ 997218218Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 998218218Spjd pjdlog_exit(EX_OSERR, 999218218Spjd "Unable to create connection sockets between child and parent"); 1000218218Spjd } 1001204076Spjd 1002204076Spjd pid = fork(); 1003229744Spjd if (pid == -1) { 1004218042Spjd /* TODO: There's no need for this to be fatal error. */ 1005204076Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 1006212034Spjd pjdlog_exit(EX_TEMPFAIL, "Unable to fork"); 1007204076Spjd } 1008204076Spjd 1009204076Spjd if (pid > 0) { 1010204076Spjd /* This is parent. */ 1011212038Spjd /* Declare that we are receiver. */ 1012212038Spjd proto_recv(res->hr_event, NULL, 0); 1013218218Spjd proto_recv(res->hr_conn, NULL, 0); 1014218043Spjd /* Declare that we are sender. */ 1015218043Spjd proto_send(res->hr_ctrl, NULL, 0); 1016204076Spjd res->hr_workerpid = pid; 1017204076Spjd return; 1018204076Spjd } 1019211977Spjd 1020211984Spjd gres = res; 1021257155Strociny res->output_status_aux = output_status_aux; 1022218043Spjd mode = pjdlog_mode_get(); 1023219482Strociny debuglevel = pjdlog_debug_get(); 1024211984Spjd 1025218043Spjd /* Declare that we are sender. */ 1026218043Spjd proto_send(res->hr_event, NULL, 0); 1027218218Spjd proto_send(res->hr_conn, NULL, 0); 1028218043Spjd /* Declare that we are receiver. */ 1029218043Spjd proto_recv(res->hr_ctrl, NULL, 0); 1030218043Spjd descriptors_cleanup(res); 1031204076Spjd 1032218045Spjd descriptors_assert(res, mode); 1033218045Spjd 1034218043Spjd pjdlog_init(mode); 1035219482Strociny pjdlog_debug_set(debuglevel); 1036218043Spjd pjdlog_prefix_set("[%s] (%s) ", res->hr_name, role2str(res->hr_role)); 1037220005Spjd setproctitle("%s (%s)", res->hr_name, role2str(res->hr_role)); 1038204076Spjd 1039204076Spjd init_local(res); 1040213007Spjd init_ggate(res); 1041213007Spjd init_environment(res); 1042217784Spjd 1043221899Spjd if (drop_privs(res) != 0) { 1044218049Spjd cleanup(res); 1045218049Spjd exit(EX_CONFIG); 1046218049Spjd } 1047218214Spjd pjdlog_info("Privileges successfully dropped."); 1048218049Spjd 1049213007Spjd /* 1050213530Spjd * Create the guard thread first, so we can handle signals from the 1051229778Suqs * very beginning. 1052213530Spjd */ 1053213530Spjd error = pthread_create(&td, NULL, guard_thread, res); 1054218138Spjd PJDLOG_ASSERT(error == 0); 1055213530Spjd /* 1056213007Spjd * Create the control thread before sending any event to the parent, 1057213007Spjd * as we can deadlock when parent sends control request to worker, 1058213007Spjd * but worker has no control thread started yet, so parent waits. 1059213007Spjd * In the meantime worker sends an event to the parent, but parent 1060213007Spjd * is unable to handle the event, because it waits for control 1061213007Spjd * request response. 1062213007Spjd */ 1063213007Spjd error = pthread_create(&td, NULL, ctrl_thread, res); 1064218138Spjd PJDLOG_ASSERT(error == 0); 1065220898Spjd if (real_remote(res)) { 1066220898Spjd error = init_remote(res, NULL, NULL); 1067220898Spjd if (error == 0) { 1068220898Spjd sync_start(); 1069220898Spjd } else if (error == EBUSY) { 1070220898Spjd time_t start = time(NULL); 1071220898Spjd 1072220898Spjd pjdlog_warning("Waiting for remote node to become %s for %ds.", 1073220898Spjd role2str(HAST_ROLE_SECONDARY), 1074220898Spjd res->hr_timeout); 1075220898Spjd for (;;) { 1076220898Spjd sleep(1); 1077220898Spjd error = init_remote(res, NULL, NULL); 1078220898Spjd if (error != EBUSY) 1079220898Spjd break; 1080220898Spjd if (time(NULL) > start + res->hr_timeout) 1081220898Spjd break; 1082220898Spjd } 1083220898Spjd if (error == EBUSY) { 1084220898Spjd pjdlog_warning("Remote node is still %s, starting anyway.", 1085220898Spjd role2str(HAST_ROLE_PRIMARY)); 1086220898Spjd } 1087220898Spjd } 1088220898Spjd } 1089204076Spjd error = pthread_create(&td, NULL, ggate_recv_thread, res); 1090218138Spjd PJDLOG_ASSERT(error == 0); 1091204076Spjd error = pthread_create(&td, NULL, local_send_thread, res); 1092218138Spjd PJDLOG_ASSERT(error == 0); 1093204076Spjd error = pthread_create(&td, NULL, remote_send_thread, res); 1094218138Spjd PJDLOG_ASSERT(error == 0); 1095204076Spjd error = pthread_create(&td, NULL, remote_recv_thread, res); 1096218138Spjd PJDLOG_ASSERT(error == 0); 1097204076Spjd error = pthread_create(&td, NULL, ggate_send_thread, res); 1098218138Spjd PJDLOG_ASSERT(error == 0); 1099220898Spjd fullystarted = true; 1100213530Spjd (void)sync_thread(res); 1101204076Spjd} 1102204076Spjd 1103204076Spjdstatic void 1104246922Spjdreqlog(int loglevel, int debuglevel, struct g_gate_ctl_io *ggio, 1105246922Spjd const char *fmt, ...) 1106204076Spjd{ 1107204076Spjd char msg[1024]; 1108204076Spjd va_list ap; 1109204076Spjd 1110204076Spjd va_start(ap, fmt); 1111236507Spjd (void)vsnprintf(msg, sizeof(msg), fmt, ap); 1112204076Spjd va_end(ap); 1113236507Spjd switch (ggio->gctl_cmd) { 1114236507Spjd case BIO_READ: 1115236507Spjd (void)snprlcat(msg, sizeof(msg), "READ(%ju, %ju).", 1116246922Spjd (uintmax_t)ggio->gctl_offset, (uintmax_t)ggio->gctl_length); 1117236507Spjd break; 1118236507Spjd case BIO_DELETE: 1119236507Spjd (void)snprlcat(msg, sizeof(msg), "DELETE(%ju, %ju).", 1120246922Spjd (uintmax_t)ggio->gctl_offset, (uintmax_t)ggio->gctl_length); 1121236507Spjd break; 1122236507Spjd case BIO_FLUSH: 1123236507Spjd (void)snprlcat(msg, sizeof(msg), "FLUSH."); 1124236507Spjd break; 1125236507Spjd case BIO_WRITE: 1126236507Spjd (void)snprlcat(msg, sizeof(msg), "WRITE(%ju, %ju).", 1127246922Spjd (uintmax_t)ggio->gctl_offset, (uintmax_t)ggio->gctl_length); 1128236507Spjd break; 1129236507Spjd default: 1130236507Spjd (void)snprlcat(msg, sizeof(msg), "UNKNOWN(%u).", 1131236507Spjd (unsigned int)ggio->gctl_cmd); 1132236507Spjd break; 1133204076Spjd } 1134204076Spjd pjdlog_common(loglevel, debuglevel, -1, "%s", msg); 1135204076Spjd} 1136204076Spjd 1137204076Spjdstatic void 1138204076Spjdremote_close(struct hast_resource *res, int ncomp) 1139204076Spjd{ 1140204076Spjd 1141204076Spjd rw_wlock(&hio_remote_lock[ncomp]); 1142204076Spjd /* 1143226855Spjd * Check for a race between dropping rlock and acquiring wlock - 1144204076Spjd * another thread can close connection in-between. 1145204076Spjd */ 1146204076Spjd if (!ISCONNECTED(res, ncomp)) { 1147218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 1148218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 1149204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1150204076Spjd return; 1151204076Spjd } 1152204076Spjd 1153218138Spjd PJDLOG_ASSERT(res->hr_remotein != NULL); 1154218138Spjd PJDLOG_ASSERT(res->hr_remoteout != NULL); 1155204076Spjd 1156211881Spjd pjdlog_debug(2, "Closing incoming connection to %s.", 1157204076Spjd res->hr_remoteaddr); 1158204076Spjd proto_close(res->hr_remotein); 1159204076Spjd res->hr_remotein = NULL; 1160211881Spjd pjdlog_debug(2, "Closing outgoing connection to %s.", 1161204076Spjd res->hr_remoteaddr); 1162204076Spjd proto_close(res->hr_remoteout); 1163204076Spjd res->hr_remoteout = NULL; 1164204076Spjd 1165204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1166204076Spjd 1167211881Spjd pjdlog_warning("Disconnected from %s.", res->hr_remoteaddr); 1168211881Spjd 1169204076Spjd /* 1170204076Spjd * Stop synchronization if in-progress. 1171204076Spjd */ 1172211878Spjd sync_stop(); 1173211984Spjd 1174212038Spjd event_send(res, EVENT_DISCONNECT); 1175204076Spjd} 1176204076Spjd 1177204076Spjd/* 1178226859Spjd * Acknowledge write completion to the kernel, but don't update activemap yet. 1179226859Spjd */ 1180226859Spjdstatic void 1181226859Spjdwrite_complete(struct hast_resource *res, struct hio *hio) 1182226859Spjd{ 1183226859Spjd struct g_gate_ctl_io *ggio; 1184226859Spjd unsigned int ncomp; 1185226859Spjd 1186226859Spjd PJDLOG_ASSERT(!hio->hio_done); 1187226859Spjd 1188226859Spjd ggio = &hio->hio_ggio; 1189226859Spjd PJDLOG_ASSERT(ggio->gctl_cmd == BIO_WRITE); 1190226859Spjd 1191226859Spjd /* 1192226859Spjd * Bump local count if this is first write after 1193226859Spjd * connection failure with remote node. 1194226859Spjd */ 1195226859Spjd ncomp = 1; 1196226859Spjd rw_rlock(&hio_remote_lock[ncomp]); 1197226859Spjd if (!ISCONNECTED(res, ncomp)) { 1198226859Spjd mtx_lock(&metadata_lock); 1199226859Spjd if (res->hr_primary_localcnt == res->hr_secondary_remotecnt) { 1200226859Spjd res->hr_primary_localcnt++; 1201226859Spjd pjdlog_debug(1, "Increasing localcnt to %ju.", 1202226859Spjd (uintmax_t)res->hr_primary_localcnt); 1203226859Spjd (void)metadata_write(res); 1204226859Spjd } 1205226859Spjd mtx_unlock(&metadata_lock); 1206226859Spjd } 1207226859Spjd rw_unlock(&hio_remote_lock[ncomp]); 1208229945Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_DONE, ggio) == -1) 1209226859Spjd primary_exit(EX_OSERR, "G_GATE_CMD_DONE failed"); 1210226859Spjd hio->hio_done = true; 1211226859Spjd} 1212226859Spjd 1213226859Spjd/* 1214204076Spjd * Thread receives ggate I/O requests from the kernel and passes them to 1215204076Spjd * appropriate threads: 1216204076Spjd * WRITE - always goes to both local_send and remote_send threads 1217204076Spjd * READ (when the block is up-to-date on local component) - 1218204076Spjd * only local_send thread 1219204076Spjd * READ (when the block isn't up-to-date on local component) - 1220204076Spjd * only remote_send thread 1221204076Spjd * DELETE - always goes to both local_send and remote_send threads 1222204076Spjd * FLUSH - always goes to both local_send and remote_send threads 1223204076Spjd */ 1224204076Spjdstatic void * 1225204076Spjdggate_recv_thread(void *arg) 1226204076Spjd{ 1227204076Spjd struct hast_resource *res = arg; 1228204076Spjd struct g_gate_ctl_io *ggio; 1229204076Spjd struct hio *hio; 1230204076Spjd unsigned int ii, ncomp, ncomps; 1231204076Spjd int error; 1232204076Spjd 1233204076Spjd for (;;) { 1234204076Spjd pjdlog_debug(2, "ggate_recv: Taking free request."); 1235204076Spjd QUEUE_TAKE2(hio, free); 1236204076Spjd pjdlog_debug(2, "ggate_recv: (%p) Got free request.", hio); 1237204076Spjd ggio = &hio->hio_ggio; 1238204076Spjd ggio->gctl_unit = res->hr_ggateunit; 1239204076Spjd ggio->gctl_length = MAXPHYS; 1240204076Spjd ggio->gctl_error = 0; 1241226859Spjd hio->hio_done = false; 1242226859Spjd hio->hio_replication = res->hr_replication; 1243204076Spjd pjdlog_debug(2, 1244204076Spjd "ggate_recv: (%p) Waiting for request from the kernel.", 1245204076Spjd hio); 1246229945Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_START, ggio) == -1) { 1247204076Spjd if (sigexit_received) 1248204076Spjd pthread_exit(NULL); 1249204076Spjd primary_exit(EX_OSERR, "G_GATE_CMD_START failed"); 1250204076Spjd } 1251204076Spjd error = ggio->gctl_error; 1252204076Spjd switch (error) { 1253204076Spjd case 0: 1254204076Spjd break; 1255204076Spjd case ECANCELED: 1256204076Spjd /* Exit gracefully. */ 1257204076Spjd if (!sigexit_received) { 1258204076Spjd pjdlog_debug(2, 1259204076Spjd "ggate_recv: (%p) Received cancel from the kernel.", 1260204076Spjd hio); 1261204076Spjd pjdlog_info("Received cancel from the kernel, exiting."); 1262204076Spjd } 1263204076Spjd pthread_exit(NULL); 1264204076Spjd case ENOMEM: 1265204076Spjd /* 1266204076Spjd * Buffer too small? Impossible, we allocate MAXPHYS 1267204076Spjd * bytes - request can't be bigger than that. 1268204076Spjd */ 1269204076Spjd /* FALLTHROUGH */ 1270204076Spjd case ENXIO: 1271204076Spjd default: 1272204076Spjd primary_exitx(EX_OSERR, "G_GATE_CMD_START failed: %s.", 1273204076Spjd strerror(error)); 1274204076Spjd } 1275226859Spjd 1276226859Spjd ncomp = 0; 1277226859Spjd ncomps = HAST_NCOMPONENTS; 1278226859Spjd 1279204076Spjd for (ii = 0; ii < ncomps; ii++) 1280204076Spjd hio->hio_errors[ii] = EINVAL; 1281204076Spjd reqlog(LOG_DEBUG, 2, ggio, 1282204076Spjd "ggate_recv: (%p) Request received from the kernel: ", 1283204076Spjd hio); 1284226859Spjd 1285204076Spjd /* 1286204076Spjd * Inform all components about new write request. 1287204076Spjd * For read request prefer local component unless the given 1288204076Spjd * range is out-of-date, then use remote component. 1289204076Spjd */ 1290204076Spjd switch (ggio->gctl_cmd) { 1291204076Spjd case BIO_READ: 1292222228Spjd res->hr_stat_read++; 1293226859Spjd ncomps = 1; 1294204076Spjd mtx_lock(&metadata_lock); 1295204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_UNDEF || 1296204076Spjd res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1297204076Spjd /* 1298204076Spjd * This range is up-to-date on local component, 1299204076Spjd * so handle request locally. 1300204076Spjd */ 1301204076Spjd /* Local component is 0 for now. */ 1302204076Spjd ncomp = 0; 1303204076Spjd } else /* if (res->hr_syncsrc == 1304204076Spjd HAST_SYNCSRC_SECONDARY) */ { 1305218138Spjd PJDLOG_ASSERT(res->hr_syncsrc == 1306204076Spjd HAST_SYNCSRC_SECONDARY); 1307204076Spjd /* 1308204076Spjd * This range is out-of-date on local component, 1309204076Spjd * so send request to the remote node. 1310204076Spjd */ 1311204076Spjd /* Remote component is 1 for now. */ 1312204076Spjd ncomp = 1; 1313204076Spjd } 1314204076Spjd mtx_unlock(&metadata_lock); 1315204076Spjd break; 1316204076Spjd case BIO_WRITE: 1317222228Spjd res->hr_stat_write++; 1318226851Spjd if (res->hr_resuid == 0 && 1319226851Spjd res->hr_primary_localcnt == 0) { 1320226851Spjd /* This is first write. */ 1321219844Spjd res->hr_primary_localcnt = 1; 1322214284Spjd } 1323204076Spjd for (;;) { 1324204076Spjd mtx_lock(&range_lock); 1325204076Spjd if (rangelock_islocked(range_sync, 1326204076Spjd ggio->gctl_offset, ggio->gctl_length)) { 1327204076Spjd pjdlog_debug(2, 1328204076Spjd "regular: Range offset=%jd length=%zu locked.", 1329204076Spjd (intmax_t)ggio->gctl_offset, 1330204076Spjd (size_t)ggio->gctl_length); 1331204076Spjd range_regular_wait = true; 1332204076Spjd cv_wait(&range_regular_cond, &range_lock); 1333204076Spjd range_regular_wait = false; 1334204076Spjd mtx_unlock(&range_lock); 1335204076Spjd continue; 1336204076Spjd } 1337204076Spjd if (rangelock_add(range_regular, 1338229945Spjd ggio->gctl_offset, ggio->gctl_length) == -1) { 1339204076Spjd mtx_unlock(&range_lock); 1340204076Spjd pjdlog_debug(2, 1341204076Spjd "regular: Range offset=%jd length=%zu is already locked, waiting.", 1342204076Spjd (intmax_t)ggio->gctl_offset, 1343204076Spjd (size_t)ggio->gctl_length); 1344204076Spjd sleep(1); 1345204076Spjd continue; 1346204076Spjd } 1347204076Spjd mtx_unlock(&range_lock); 1348204076Spjd break; 1349204076Spjd } 1350204076Spjd mtx_lock(&res->hr_amp_lock); 1351204076Spjd if (activemap_write_start(res->hr_amp, 1352204076Spjd ggio->gctl_offset, ggio->gctl_length)) { 1353222228Spjd res->hr_stat_activemap_update++; 1354204076Spjd (void)hast_activemap_flush(res); 1355255716Strociny } else { 1356255716Strociny mtx_unlock(&res->hr_amp_lock); 1357204076Spjd } 1358259192Strociny if (ISMEMSYNC(hio)) { 1359259191Strociny hio->hio_memsyncacked = false; 1360259191Strociny refcnt_init(&hio->hio_writecount, ncomps); 1361259191Strociny } 1362226859Spjd break; 1363204076Spjd case BIO_DELETE: 1364226859Spjd res->hr_stat_delete++; 1365226859Spjd break; 1366204076Spjd case BIO_FLUSH: 1367226859Spjd res->hr_stat_flush++; 1368204076Spjd break; 1369204076Spjd } 1370226859Spjd pjdlog_debug(2, 1371226859Spjd "ggate_recv: (%p) Moving request to the send queues.", hio); 1372259191Strociny refcnt_init(&hio->hio_countdown, ncomps); 1373246922Spjd for (ii = ncomp; ii < ncomps; ii++) 1374226859Spjd QUEUE_INSERT1(hio, send, ii); 1375204076Spjd } 1376204076Spjd /* NOTREACHED */ 1377204076Spjd return (NULL); 1378204076Spjd} 1379204076Spjd 1380204076Spjd/* 1381204076Spjd * Thread reads from or writes to local component. 1382204076Spjd * If local read fails, it redirects it to remote_send thread. 1383204076Spjd */ 1384204076Spjdstatic void * 1385204076Spjdlocal_send_thread(void *arg) 1386204076Spjd{ 1387204076Spjd struct hast_resource *res = arg; 1388204076Spjd struct g_gate_ctl_io *ggio; 1389204076Spjd struct hio *hio; 1390204076Spjd unsigned int ncomp, rncomp; 1391204076Spjd ssize_t ret; 1392204076Spjd 1393204076Spjd /* Local component is 0 for now. */ 1394204076Spjd ncomp = 0; 1395204076Spjd /* Remote component is 1 for now. */ 1396204076Spjd rncomp = 1; 1397204076Spjd 1398204076Spjd for (;;) { 1399204076Spjd pjdlog_debug(2, "local_send: Taking request."); 1400214692Spjd QUEUE_TAKE1(hio, send, ncomp, 0); 1401204076Spjd pjdlog_debug(2, "local_send: (%p) Got request.", hio); 1402204076Spjd ggio = &hio->hio_ggio; 1403204076Spjd switch (ggio->gctl_cmd) { 1404204076Spjd case BIO_READ: 1405204076Spjd ret = pread(res->hr_localfd, ggio->gctl_data, 1406204076Spjd ggio->gctl_length, 1407204076Spjd ggio->gctl_offset + res->hr_localoff); 1408204076Spjd if (ret == ggio->gctl_length) 1409204076Spjd hio->hio_errors[ncomp] = 0; 1410222467Strociny else if (!ISSYNCREQ(hio)) { 1411204076Spjd /* 1412204076Spjd * If READ failed, try to read from remote node. 1413204076Spjd */ 1414229945Spjd if (ret == -1) { 1415216479Spjd reqlog(LOG_WARNING, 0, ggio, 1416216479Spjd "Local request failed (%s), trying remote node. ", 1417216479Spjd strerror(errno)); 1418216479Spjd } else if (ret != ggio->gctl_length) { 1419216479Spjd reqlog(LOG_WARNING, 0, ggio, 1420216479Spjd "Local request failed (%zd != %jd), trying remote node. ", 1421216494Spjd ret, (intmax_t)ggio->gctl_length); 1422216479Spjd } 1423204076Spjd QUEUE_INSERT1(hio, send, rncomp); 1424204076Spjd continue; 1425204076Spjd } 1426204076Spjd break; 1427204076Spjd case BIO_WRITE: 1428204076Spjd ret = pwrite(res->hr_localfd, ggio->gctl_data, 1429204076Spjd ggio->gctl_length, 1430204076Spjd ggio->gctl_offset + res->hr_localoff); 1431229945Spjd if (ret == -1) { 1432204076Spjd hio->hio_errors[ncomp] = errno; 1433216479Spjd reqlog(LOG_WARNING, 0, ggio, 1434216479Spjd "Local request failed (%s): ", 1435216479Spjd strerror(errno)); 1436216479Spjd } else if (ret != ggio->gctl_length) { 1437204076Spjd hio->hio_errors[ncomp] = EIO; 1438216479Spjd reqlog(LOG_WARNING, 0, ggio, 1439216479Spjd "Local request failed (%zd != %jd): ", 1440216494Spjd ret, (intmax_t)ggio->gctl_length); 1441216479Spjd } else { 1442204076Spjd hio->hio_errors[ncomp] = 0; 1443259192Strociny if (ISASYNC(hio)) { 1444226859Spjd ggio->gctl_error = 0; 1445226859Spjd write_complete(res, hio); 1446226859Spjd } 1447216479Spjd } 1448204076Spjd break; 1449204076Spjd case BIO_DELETE: 1450204076Spjd ret = g_delete(res->hr_localfd, 1451204076Spjd ggio->gctl_offset + res->hr_localoff, 1452204076Spjd ggio->gctl_length); 1453229945Spjd if (ret == -1) { 1454204076Spjd hio->hio_errors[ncomp] = errno; 1455216479Spjd reqlog(LOG_WARNING, 0, ggio, 1456216479Spjd "Local request failed (%s): ", 1457216479Spjd strerror(errno)); 1458216479Spjd } else { 1459204076Spjd hio->hio_errors[ncomp] = 0; 1460216479Spjd } 1461204076Spjd break; 1462204076Spjd case BIO_FLUSH: 1463225832Spjd if (!res->hr_localflush) { 1464225832Spjd ret = -1; 1465225832Spjd errno = EOPNOTSUPP; 1466225832Spjd break; 1467225832Spjd } 1468204076Spjd ret = g_flush(res->hr_localfd); 1469229945Spjd if (ret == -1) { 1470225832Spjd if (errno == EOPNOTSUPP) 1471225832Spjd res->hr_localflush = false; 1472204076Spjd hio->hio_errors[ncomp] = errno; 1473216479Spjd reqlog(LOG_WARNING, 0, ggio, 1474216479Spjd "Local request failed (%s): ", 1475216479Spjd strerror(errno)); 1476216479Spjd } else { 1477204076Spjd hio->hio_errors[ncomp] = 0; 1478216479Spjd } 1479204076Spjd break; 1480204076Spjd } 1481259191Strociny if (ISMEMSYNCWRITE(hio)) { 1482259191Strociny if (refcnt_release(&hio->hio_writecount) == 0) { 1483259191Strociny write_complete(res, hio); 1484246922Spjd } 1485246922Spjd } 1486259191Strociny if (refcnt_release(&hio->hio_countdown) > 0) 1487259191Strociny continue; 1488226856Spjd if (ISSYNCREQ(hio)) { 1489226856Spjd mtx_lock(&sync_lock); 1490226856Spjd SYNCREQDONE(hio); 1491226856Spjd mtx_unlock(&sync_lock); 1492226856Spjd cv_signal(&sync_cond); 1493226856Spjd } else { 1494226856Spjd pjdlog_debug(2, 1495226856Spjd "local_send: (%p) Moving request to the done queue.", 1496226856Spjd hio); 1497226856Spjd QUEUE_INSERT2(hio, done); 1498204076Spjd } 1499204076Spjd } 1500204076Spjd /* NOTREACHED */ 1501204076Spjd return (NULL); 1502204076Spjd} 1503204076Spjd 1504214692Spjdstatic void 1505214692Spjdkeepalive_send(struct hast_resource *res, unsigned int ncomp) 1506214692Spjd{ 1507214692Spjd struct nv *nv; 1508214692Spjd 1509218217Spjd rw_rlock(&hio_remote_lock[ncomp]); 1510218217Spjd 1511218217Spjd if (!ISCONNECTED(res, ncomp)) { 1512218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1513214692Spjd return; 1514218217Spjd } 1515219864Spjd 1516218138Spjd PJDLOG_ASSERT(res->hr_remotein != NULL); 1517218138Spjd PJDLOG_ASSERT(res->hr_remoteout != NULL); 1518214692Spjd 1519214692Spjd nv = nv_alloc(); 1520214692Spjd nv_add_uint8(nv, HIO_KEEPALIVE, "cmd"); 1521214692Spjd if (nv_error(nv) != 0) { 1522218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1523214692Spjd nv_free(nv); 1524214692Spjd pjdlog_debug(1, 1525214692Spjd "keepalive_send: Unable to prepare header to send."); 1526214692Spjd return; 1527214692Spjd } 1528229945Spjd if (hast_proto_send(res, res->hr_remoteout, nv, NULL, 0) == -1) { 1529218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1530214692Spjd pjdlog_common(LOG_DEBUG, 1, errno, 1531214692Spjd "keepalive_send: Unable to send request"); 1532214692Spjd nv_free(nv); 1533214692Spjd remote_close(res, ncomp); 1534214692Spjd return; 1535214692Spjd } 1536218217Spjd 1537218217Spjd rw_unlock(&hio_remote_lock[ncomp]); 1538214692Spjd nv_free(nv); 1539214692Spjd pjdlog_debug(2, "keepalive_send: Request sent."); 1540214692Spjd} 1541214692Spjd 1542204076Spjd/* 1543204076Spjd * Thread sends request to secondary node. 1544204076Spjd */ 1545204076Spjdstatic void * 1546204076Spjdremote_send_thread(void *arg) 1547204076Spjd{ 1548204076Spjd struct hast_resource *res = arg; 1549204076Spjd struct g_gate_ctl_io *ggio; 1550214692Spjd time_t lastcheck, now; 1551204076Spjd struct hio *hio; 1552204076Spjd struct nv *nv; 1553204076Spjd unsigned int ncomp; 1554204076Spjd bool wakeup; 1555204076Spjd uint64_t offset, length; 1556204076Spjd uint8_t cmd; 1557204076Spjd void *data; 1558204076Spjd 1559204076Spjd /* Remote component is 1 for now. */ 1560204076Spjd ncomp = 1; 1561219864Spjd lastcheck = time(NULL); 1562204076Spjd 1563204076Spjd for (;;) { 1564204076Spjd pjdlog_debug(2, "remote_send: Taking request."); 1565219721Strociny QUEUE_TAKE1(hio, send, ncomp, HAST_KEEPALIVE); 1566214692Spjd if (hio == NULL) { 1567214692Spjd now = time(NULL); 1568219721Strociny if (lastcheck + HAST_KEEPALIVE <= now) { 1569214692Spjd keepalive_send(res, ncomp); 1570214692Spjd lastcheck = now; 1571214692Spjd } 1572214692Spjd continue; 1573214692Spjd } 1574204076Spjd pjdlog_debug(2, "remote_send: (%p) Got request.", hio); 1575204076Spjd ggio = &hio->hio_ggio; 1576204076Spjd switch (ggio->gctl_cmd) { 1577204076Spjd case BIO_READ: 1578204076Spjd cmd = HIO_READ; 1579204076Spjd data = NULL; 1580204076Spjd offset = ggio->gctl_offset; 1581204076Spjd length = ggio->gctl_length; 1582204076Spjd break; 1583204076Spjd case BIO_WRITE: 1584204076Spjd cmd = HIO_WRITE; 1585204076Spjd data = ggio->gctl_data; 1586204076Spjd offset = ggio->gctl_offset; 1587204076Spjd length = ggio->gctl_length; 1588204076Spjd break; 1589204076Spjd case BIO_DELETE: 1590204076Spjd cmd = HIO_DELETE; 1591204076Spjd data = NULL; 1592204076Spjd offset = ggio->gctl_offset; 1593204076Spjd length = ggio->gctl_length; 1594204076Spjd break; 1595204076Spjd case BIO_FLUSH: 1596204076Spjd cmd = HIO_FLUSH; 1597204076Spjd data = NULL; 1598204076Spjd offset = 0; 1599204076Spjd length = 0; 1600204076Spjd break; 1601204076Spjd default: 1602225783Spjd PJDLOG_ABORT("invalid condition"); 1603204076Spjd } 1604204076Spjd nv = nv_alloc(); 1605204076Spjd nv_add_uint8(nv, cmd, "cmd"); 1606204076Spjd nv_add_uint64(nv, (uint64_t)ggio->gctl_seq, "seq"); 1607204076Spjd nv_add_uint64(nv, offset, "offset"); 1608204076Spjd nv_add_uint64(nv, length, "length"); 1609259191Strociny if (ISMEMSYNCWRITE(hio)) 1610246922Spjd nv_add_uint8(nv, 1, "memsync"); 1611204076Spjd if (nv_error(nv) != 0) { 1612204076Spjd hio->hio_errors[ncomp] = nv_error(nv); 1613204076Spjd pjdlog_debug(2, 1614204076Spjd "remote_send: (%p) Unable to prepare header to send.", 1615204076Spjd hio); 1616204076Spjd reqlog(LOG_ERR, 0, ggio, 1617204076Spjd "Unable to prepare header to send (%s): ", 1618204076Spjd strerror(nv_error(nv))); 1619204076Spjd /* Move failed request immediately to the done queue. */ 1620204076Spjd goto done_queue; 1621204076Spjd } 1622204076Spjd /* 1623204076Spjd * Protect connection from disappearing. 1624204076Spjd */ 1625204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1626204076Spjd if (!ISCONNECTED(res, ncomp)) { 1627204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1628204076Spjd hio->hio_errors[ncomp] = ENOTCONN; 1629204076Spjd goto done_queue; 1630204076Spjd } 1631204076Spjd /* 1632204076Spjd * Move the request to recv queue before sending it, because 1633204076Spjd * in different order we can get reply before we move request 1634204076Spjd * to recv queue. 1635204076Spjd */ 1636226852Spjd pjdlog_debug(2, 1637226852Spjd "remote_send: (%p) Moving request to the recv queue.", 1638226852Spjd hio); 1639204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1640204076Spjd wakeup = TAILQ_EMPTY(&hio_recv_list[ncomp]); 1641204076Spjd TAILQ_INSERT_TAIL(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1642257155Strociny hio_recv_list_size[ncomp]++; 1643204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1644204076Spjd if (hast_proto_send(res, res->hr_remoteout, nv, data, 1645229945Spjd data != NULL ? length : 0) == -1) { 1646204076Spjd hio->hio_errors[ncomp] = errno; 1647204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1648204076Spjd pjdlog_debug(2, 1649204076Spjd "remote_send: (%p) Unable to send request.", hio); 1650204076Spjd reqlog(LOG_ERR, 0, ggio, 1651204076Spjd "Unable to send request (%s): ", 1652204076Spjd strerror(hio->hio_errors[ncomp])); 1653211979Spjd remote_close(res, ncomp); 1654259194Strociny } else { 1655259194Strociny rw_unlock(&hio_remote_lock[ncomp]); 1656204076Spjd } 1657204076Spjd nv_free(nv); 1658204076Spjd if (wakeup) 1659204076Spjd cv_signal(&hio_recv_list_cond[ncomp]); 1660204076Spjd continue; 1661204076Spjddone_queue: 1662204076Spjd nv_free(nv); 1663204076Spjd if (ISSYNCREQ(hio)) { 1664246922Spjd if (refcnt_release(&hio->hio_countdown) > 0) 1665204076Spjd continue; 1666204076Spjd mtx_lock(&sync_lock); 1667204076Spjd SYNCREQDONE(hio); 1668204076Spjd mtx_unlock(&sync_lock); 1669204076Spjd cv_signal(&sync_cond); 1670204076Spjd continue; 1671204076Spjd } 1672204076Spjd if (ggio->gctl_cmd == BIO_WRITE) { 1673204076Spjd mtx_lock(&res->hr_amp_lock); 1674204076Spjd if (activemap_need_sync(res->hr_amp, ggio->gctl_offset, 1675204076Spjd ggio->gctl_length)) { 1676204076Spjd (void)hast_activemap_flush(res); 1677255716Strociny } else { 1678255716Strociny mtx_unlock(&res->hr_amp_lock); 1679204076Spjd } 1680259191Strociny if (ISMEMSYNCWRITE(hio)) { 1681259191Strociny if (refcnt_release(&hio->hio_writecount) == 0) { 1682259191Strociny if (hio->hio_errors[0] == 0) 1683259191Strociny write_complete(res, hio); 1684259191Strociny } 1685259191Strociny } 1686204076Spjd } 1687246922Spjd if (refcnt_release(&hio->hio_countdown) > 0) 1688204076Spjd continue; 1689204076Spjd pjdlog_debug(2, 1690204076Spjd "remote_send: (%p) Moving request to the done queue.", 1691204076Spjd hio); 1692204076Spjd QUEUE_INSERT2(hio, done); 1693204076Spjd } 1694204076Spjd /* NOTREACHED */ 1695204076Spjd return (NULL); 1696204076Spjd} 1697204076Spjd 1698204076Spjd/* 1699204076Spjd * Thread receives answer from secondary node and passes it to ggate_send 1700204076Spjd * thread. 1701204076Spjd */ 1702204076Spjdstatic void * 1703204076Spjdremote_recv_thread(void *arg) 1704204076Spjd{ 1705204076Spjd struct hast_resource *res = arg; 1706204076Spjd struct g_gate_ctl_io *ggio; 1707204076Spjd struct hio *hio; 1708204076Spjd struct nv *nv; 1709204076Spjd unsigned int ncomp; 1710204076Spjd uint64_t seq; 1711246922Spjd bool memsyncack; 1712204076Spjd int error; 1713204076Spjd 1714204076Spjd /* Remote component is 1 for now. */ 1715204076Spjd ncomp = 1; 1716204076Spjd 1717204076Spjd for (;;) { 1718204076Spjd /* Wait until there is anything to receive. */ 1719204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1720204076Spjd while (TAILQ_EMPTY(&hio_recv_list[ncomp])) { 1721204076Spjd pjdlog_debug(2, "remote_recv: No requests, waiting."); 1722204076Spjd cv_wait(&hio_recv_list_cond[ncomp], 1723204076Spjd &hio_recv_list_lock[ncomp]); 1724204076Spjd } 1725204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1726226857Spjd 1727246922Spjd memsyncack = false; 1728246922Spjd 1729204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1730204076Spjd if (!ISCONNECTED(res, ncomp)) { 1731204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1732204076Spjd /* 1733204076Spjd * Connection is dead, so move all pending requests to 1734204076Spjd * the done queue (one-by-one). 1735204076Spjd */ 1736204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1737204076Spjd hio = TAILQ_FIRST(&hio_recv_list[ncomp]); 1738218138Spjd PJDLOG_ASSERT(hio != NULL); 1739204076Spjd TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1740204076Spjd hio_next[ncomp]); 1741257155Strociny hio_recv_list_size[ncomp]--; 1742204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1743259191Strociny hio->hio_errors[ncomp] = ENOTCONN; 1744204076Spjd goto done_queue; 1745204076Spjd } 1746229945Spjd if (hast_proto_recv_hdr(res->hr_remotein, &nv) == -1) { 1747204076Spjd pjdlog_errno(LOG_ERR, 1748204076Spjd "Unable to receive reply header"); 1749204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1750204076Spjd remote_close(res, ncomp); 1751204076Spjd continue; 1752204076Spjd } 1753204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1754204076Spjd seq = nv_get_uint64(nv, "seq"); 1755204076Spjd if (seq == 0) { 1756204076Spjd pjdlog_error("Header contains no 'seq' field."); 1757204076Spjd nv_free(nv); 1758204076Spjd continue; 1759204076Spjd } 1760246922Spjd memsyncack = nv_exists(nv, "received"); 1761204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1762204076Spjd TAILQ_FOREACH(hio, &hio_recv_list[ncomp], hio_next[ncomp]) { 1763204076Spjd if (hio->hio_ggio.gctl_seq == seq) { 1764204076Spjd TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1765204076Spjd hio_next[ncomp]); 1766257155Strociny hio_recv_list_size[ncomp]--; 1767204076Spjd break; 1768204076Spjd } 1769204076Spjd } 1770204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1771204076Spjd if (hio == NULL) { 1772204076Spjd pjdlog_error("Found no request matching received 'seq' field (%ju).", 1773204076Spjd (uintmax_t)seq); 1774204076Spjd nv_free(nv); 1775204076Spjd continue; 1776204076Spjd } 1777226852Spjd ggio = &hio->hio_ggio; 1778204076Spjd error = nv_get_int16(nv, "error"); 1779204076Spjd if (error != 0) { 1780204076Spjd /* Request failed on remote side. */ 1781216478Spjd hio->hio_errors[ncomp] = error; 1782226852Spjd reqlog(LOG_WARNING, 0, ggio, 1783216479Spjd "Remote request failed (%s): ", strerror(error)); 1784204076Spjd nv_free(nv); 1785204076Spjd goto done_queue; 1786204076Spjd } 1787204076Spjd switch (ggio->gctl_cmd) { 1788204076Spjd case BIO_READ: 1789204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1790204076Spjd if (!ISCONNECTED(res, ncomp)) { 1791204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1792204076Spjd nv_free(nv); 1793204076Spjd goto done_queue; 1794204076Spjd } 1795204076Spjd if (hast_proto_recv_data(res, res->hr_remotein, nv, 1796229945Spjd ggio->gctl_data, ggio->gctl_length) == -1) { 1797204076Spjd hio->hio_errors[ncomp] = errno; 1798204076Spjd pjdlog_errno(LOG_ERR, 1799204076Spjd "Unable to receive reply data"); 1800204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1801204076Spjd nv_free(nv); 1802204076Spjd remote_close(res, ncomp); 1803204076Spjd goto done_queue; 1804204076Spjd } 1805204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1806204076Spjd break; 1807204076Spjd case BIO_WRITE: 1808204076Spjd case BIO_DELETE: 1809204076Spjd case BIO_FLUSH: 1810204076Spjd break; 1811204076Spjd default: 1812225783Spjd PJDLOG_ABORT("invalid condition"); 1813204076Spjd } 1814204076Spjd hio->hio_errors[ncomp] = 0; 1815204076Spjd nv_free(nv); 1816204076Spjddone_queue: 1817259191Strociny if (ISMEMSYNCWRITE(hio)) { 1818259191Strociny if (!hio->hio_memsyncacked) { 1819259191Strociny PJDLOG_ASSERT(memsyncack || 1820259191Strociny hio->hio_errors[ncomp] != 0); 1821259191Strociny /* Remote ack arrived. */ 1822259191Strociny if (refcnt_release(&hio->hio_writecount) == 0) { 1823246922Spjd if (hio->hio_errors[0] == 0) 1824246922Spjd write_complete(res, hio); 1825259191Strociny } 1826259191Strociny hio->hio_memsyncacked = true; 1827259191Strociny if (hio->hio_errors[ncomp] == 0) { 1828246922Spjd pjdlog_debug(2, 1829259191Strociny "remote_recv: (%p) Moving request " 1830259191Strociny "back to the recv queue.", hio); 1831246922Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1832246922Spjd TAILQ_INSERT_TAIL(&hio_recv_list[ncomp], 1833246922Spjd hio, hio_next[ncomp]); 1834257155Strociny hio_recv_list_size[ncomp]++; 1835246922Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1836259191Strociny continue; 1837246922Spjd } 1838259191Strociny } else { 1839259191Strociny PJDLOG_ASSERT(!memsyncack); 1840259191Strociny /* Remote final reply arrived. */ 1841246922Spjd } 1842246922Spjd } 1843259191Strociny if (refcnt_release(&hio->hio_countdown) > 0) 1844259191Strociny continue; 1845226856Spjd if (ISSYNCREQ(hio)) { 1846226856Spjd mtx_lock(&sync_lock); 1847226856Spjd SYNCREQDONE(hio); 1848226856Spjd mtx_unlock(&sync_lock); 1849226856Spjd cv_signal(&sync_cond); 1850226856Spjd } else { 1851226856Spjd pjdlog_debug(2, 1852226856Spjd "remote_recv: (%p) Moving request to the done queue.", 1853226856Spjd hio); 1854226856Spjd QUEUE_INSERT2(hio, done); 1855204076Spjd } 1856204076Spjd } 1857204076Spjd /* NOTREACHED */ 1858204076Spjd return (NULL); 1859204076Spjd} 1860204076Spjd 1861204076Spjd/* 1862204076Spjd * Thread sends answer to the kernel. 1863204076Spjd */ 1864204076Spjdstatic void * 1865204076Spjdggate_send_thread(void *arg) 1866204076Spjd{ 1867204076Spjd struct hast_resource *res = arg; 1868204076Spjd struct g_gate_ctl_io *ggio; 1869204076Spjd struct hio *hio; 1870226859Spjd unsigned int ii, ncomps; 1871204076Spjd 1872204076Spjd ncomps = HAST_NCOMPONENTS; 1873204076Spjd 1874204076Spjd for (;;) { 1875204076Spjd pjdlog_debug(2, "ggate_send: Taking request."); 1876204076Spjd QUEUE_TAKE2(hio, done); 1877204076Spjd pjdlog_debug(2, "ggate_send: (%p) Got request.", hio); 1878204076Spjd ggio = &hio->hio_ggio; 1879204076Spjd for (ii = 0; ii < ncomps; ii++) { 1880204076Spjd if (hio->hio_errors[ii] == 0) { 1881204076Spjd /* 1882204076Spjd * One successful request is enough to declare 1883204076Spjd * success. 1884204076Spjd */ 1885204076Spjd ggio->gctl_error = 0; 1886204076Spjd break; 1887204076Spjd } 1888204076Spjd } 1889204076Spjd if (ii == ncomps) { 1890204076Spjd /* 1891204076Spjd * None of the requests were successful. 1892219879Strociny * Use the error from local component except the 1893219879Strociny * case when we did only remote request. 1894204076Spjd */ 1895219879Strociny if (ggio->gctl_cmd == BIO_READ && 1896219879Strociny res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) 1897219879Strociny ggio->gctl_error = hio->hio_errors[1]; 1898219879Strociny else 1899219879Strociny ggio->gctl_error = hio->hio_errors[0]; 1900204076Spjd } 1901204076Spjd if (ggio->gctl_error == 0 && ggio->gctl_cmd == BIO_WRITE) { 1902204076Spjd mtx_lock(&res->hr_amp_lock); 1903223655Strociny if (activemap_write_complete(res->hr_amp, 1904223974Strociny ggio->gctl_offset, ggio->gctl_length)) { 1905223655Strociny res->hr_stat_activemap_update++; 1906223655Strociny (void)hast_activemap_flush(res); 1907255716Strociny } else { 1908255716Strociny mtx_unlock(&res->hr_amp_lock); 1909223655Strociny } 1910204076Spjd } 1911204076Spjd if (ggio->gctl_cmd == BIO_WRITE) { 1912204076Spjd /* 1913204076Spjd * Unlock range we locked. 1914204076Spjd */ 1915204076Spjd mtx_lock(&range_lock); 1916204076Spjd rangelock_del(range_regular, ggio->gctl_offset, 1917204076Spjd ggio->gctl_length); 1918204076Spjd if (range_sync_wait) 1919204076Spjd cv_signal(&range_sync_cond); 1920204076Spjd mtx_unlock(&range_lock); 1921226859Spjd if (!hio->hio_done) 1922226859Spjd write_complete(res, hio); 1923226859Spjd } else { 1924229945Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_DONE, ggio) == -1) { 1925226859Spjd primary_exit(EX_OSERR, 1926226859Spjd "G_GATE_CMD_DONE failed"); 1927204076Spjd } 1928204076Spjd } 1929247281Strociny if (hio->hio_errors[0]) { 1930247281Strociny switch (ggio->gctl_cmd) { 1931247281Strociny case BIO_READ: 1932247281Strociny res->hr_stat_read_error++; 1933247281Strociny break; 1934247281Strociny case BIO_WRITE: 1935247281Strociny res->hr_stat_write_error++; 1936247281Strociny break; 1937247281Strociny case BIO_DELETE: 1938247281Strociny res->hr_stat_delete_error++; 1939247281Strociny break; 1940247281Strociny case BIO_FLUSH: 1941247281Strociny res->hr_stat_flush_error++; 1942247281Strociny break; 1943247281Strociny } 1944247281Strociny } 1945204076Spjd pjdlog_debug(2, 1946204076Spjd "ggate_send: (%p) Moving request to the free queue.", hio); 1947204076Spjd QUEUE_INSERT2(hio, free); 1948204076Spjd } 1949204076Spjd /* NOTREACHED */ 1950204076Spjd return (NULL); 1951204076Spjd} 1952204076Spjd 1953204076Spjd/* 1954204076Spjd * Thread synchronize local and remote components. 1955204076Spjd */ 1956204076Spjdstatic void * 1957204076Spjdsync_thread(void *arg __unused) 1958204076Spjd{ 1959204076Spjd struct hast_resource *res = arg; 1960204076Spjd struct hio *hio; 1961204076Spjd struct g_gate_ctl_io *ggio; 1962219372Spjd struct timeval tstart, tend, tdiff; 1963204076Spjd unsigned int ii, ncomp, ncomps; 1964204076Spjd off_t offset, length, synced; 1965238120Spjd bool dorewind, directreads; 1966204076Spjd int syncext; 1967204076Spjd 1968204076Spjd ncomps = HAST_NCOMPONENTS; 1969204076Spjd dorewind = true; 1970211897Spjd synced = 0; 1971211897Spjd offset = -1; 1972238120Spjd directreads = false; 1973204076Spjd 1974204076Spjd for (;;) { 1975204076Spjd mtx_lock(&sync_lock); 1976211897Spjd if (offset >= 0 && !sync_inprogress) { 1977219372Spjd gettimeofday(&tend, NULL); 1978219372Spjd timersub(&tend, &tstart, &tdiff); 1979219372Spjd pjdlog_info("Synchronization interrupted after %#.0T. " 1980219372Spjd "%NB synchronized so far.", &tdiff, 1981211879Spjd (intmax_t)synced); 1982212038Spjd event_send(res, EVENT_SYNCINTR); 1983211879Spjd } 1984204076Spjd while (!sync_inprogress) { 1985204076Spjd dorewind = true; 1986204076Spjd synced = 0; 1987204076Spjd cv_wait(&sync_cond, &sync_lock); 1988204076Spjd } 1989204076Spjd mtx_unlock(&sync_lock); 1990204076Spjd /* 1991204076Spjd * Obtain offset at which we should synchronize. 1992204076Spjd * Rewind synchronization if needed. 1993204076Spjd */ 1994204076Spjd mtx_lock(&res->hr_amp_lock); 1995204076Spjd if (dorewind) 1996204076Spjd activemap_sync_rewind(res->hr_amp); 1997204076Spjd offset = activemap_sync_offset(res->hr_amp, &length, &syncext); 1998204076Spjd if (syncext != -1) { 1999204076Spjd /* 2000204076Spjd * We synchronized entire syncext extent, we can mark 2001204076Spjd * it as clean now. 2002204076Spjd */ 2003204076Spjd if (activemap_extent_complete(res->hr_amp, syncext)) 2004204076Spjd (void)hast_activemap_flush(res); 2005255716Strociny else 2006255716Strociny mtx_unlock(&res->hr_amp_lock); 2007255716Strociny } else { 2008255716Strociny mtx_unlock(&res->hr_amp_lock); 2009204076Spjd } 2010204076Spjd if (dorewind) { 2011204076Spjd dorewind = false; 2012229945Spjd if (offset == -1) 2013204076Spjd pjdlog_info("Nodes are in sync."); 2014204076Spjd else { 2015219372Spjd pjdlog_info("Synchronization started. %NB to go.", 2016219372Spjd (intmax_t)(res->hr_extentsize * 2017204076Spjd activemap_ndirty(res->hr_amp))); 2018212038Spjd event_send(res, EVENT_SYNCSTART); 2019219372Spjd gettimeofday(&tstart, NULL); 2020204076Spjd } 2021204076Spjd } 2022229945Spjd if (offset == -1) { 2023211878Spjd sync_stop(); 2024204076Spjd pjdlog_debug(1, "Nothing to synchronize."); 2025204076Spjd /* 2026204076Spjd * Synchronization complete, make both localcnt and 2027204076Spjd * remotecnt equal. 2028204076Spjd */ 2029204076Spjd ncomp = 1; 2030204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 2031204076Spjd if (ISCONNECTED(res, ncomp)) { 2032204076Spjd if (synced > 0) { 2033219372Spjd int64_t bps; 2034219372Spjd 2035219372Spjd gettimeofday(&tend, NULL); 2036219372Spjd timersub(&tend, &tstart, &tdiff); 2037219372Spjd bps = (int64_t)((double)synced / 2038219372Spjd ((double)tdiff.tv_sec + 2039219372Spjd (double)tdiff.tv_usec / 1000000)); 2040204076Spjd pjdlog_info("Synchronization complete. " 2041219372Spjd "%NB synchronized in %#.0lT (%NB/sec).", 2042219372Spjd (intmax_t)synced, &tdiff, 2043219372Spjd (intmax_t)bps); 2044212038Spjd event_send(res, EVENT_SYNCDONE); 2045204076Spjd } 2046204076Spjd mtx_lock(&metadata_lock); 2047238120Spjd if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) 2048238120Spjd directreads = true; 2049204076Spjd res->hr_syncsrc = HAST_SYNCSRC_UNDEF; 2050204076Spjd res->hr_primary_localcnt = 2051219882Strociny res->hr_secondary_remotecnt; 2052219882Strociny res->hr_primary_remotecnt = 2053204076Spjd res->hr_secondary_localcnt; 2054204076Spjd pjdlog_debug(1, 2055204076Spjd "Setting localcnt to %ju and remotecnt to %ju.", 2056204076Spjd (uintmax_t)res->hr_primary_localcnt, 2057219882Strociny (uintmax_t)res->hr_primary_remotecnt); 2058204076Spjd (void)metadata_write(res); 2059204076Spjd mtx_unlock(&metadata_lock); 2060204076Spjd } 2061204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 2062238120Spjd if (directreads) { 2063238120Spjd directreads = false; 2064238120Spjd enable_direct_reads(res); 2065238120Spjd } 2066204076Spjd continue; 2067204076Spjd } 2068204076Spjd pjdlog_debug(2, "sync: Taking free request."); 2069204076Spjd QUEUE_TAKE2(hio, free); 2070204076Spjd pjdlog_debug(2, "sync: (%p) Got free request.", hio); 2071204076Spjd /* 2072204076Spjd * Lock the range we are going to synchronize. We don't want 2073204076Spjd * race where someone writes between our read and write. 2074204076Spjd */ 2075204076Spjd for (;;) { 2076204076Spjd mtx_lock(&range_lock); 2077204076Spjd if (rangelock_islocked(range_regular, offset, length)) { 2078204076Spjd pjdlog_debug(2, 2079204076Spjd "sync: Range offset=%jd length=%jd locked.", 2080204076Spjd (intmax_t)offset, (intmax_t)length); 2081204076Spjd range_sync_wait = true; 2082204076Spjd cv_wait(&range_sync_cond, &range_lock); 2083204076Spjd range_sync_wait = false; 2084204076Spjd mtx_unlock(&range_lock); 2085204076Spjd continue; 2086204076Spjd } 2087229945Spjd if (rangelock_add(range_sync, offset, length) == -1) { 2088204076Spjd mtx_unlock(&range_lock); 2089204076Spjd pjdlog_debug(2, 2090204076Spjd "sync: Range offset=%jd length=%jd is already locked, waiting.", 2091204076Spjd (intmax_t)offset, (intmax_t)length); 2092204076Spjd sleep(1); 2093204076Spjd continue; 2094204076Spjd } 2095204076Spjd mtx_unlock(&range_lock); 2096204076Spjd break; 2097204076Spjd } 2098204076Spjd /* 2099204076Spjd * First read the data from synchronization source. 2100204076Spjd */ 2101204076Spjd SYNCREQ(hio); 2102204076Spjd ggio = &hio->hio_ggio; 2103204076Spjd ggio->gctl_cmd = BIO_READ; 2104204076Spjd ggio->gctl_offset = offset; 2105204076Spjd ggio->gctl_length = length; 2106204076Spjd ggio->gctl_error = 0; 2107226859Spjd hio->hio_done = false; 2108226859Spjd hio->hio_replication = res->hr_replication; 2109204076Spjd for (ii = 0; ii < ncomps; ii++) 2110204076Spjd hio->hio_errors[ii] = EINVAL; 2111204076Spjd reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 2112204076Spjd hio); 2113204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 2114204076Spjd hio); 2115204076Spjd mtx_lock(&metadata_lock); 2116204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 2117204076Spjd /* 2118204076Spjd * This range is up-to-date on local component, 2119204076Spjd * so handle request locally. 2120204076Spjd */ 2121204076Spjd /* Local component is 0 for now. */ 2122204076Spjd ncomp = 0; 2123204076Spjd } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 2124218138Spjd PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 2125204076Spjd /* 2126204076Spjd * This range is out-of-date on local component, 2127204076Spjd * so send request to the remote node. 2128204076Spjd */ 2129204076Spjd /* Remote component is 1 for now. */ 2130204076Spjd ncomp = 1; 2131204076Spjd } 2132204076Spjd mtx_unlock(&metadata_lock); 2133249969Sed refcnt_init(&hio->hio_countdown, 1); 2134204076Spjd QUEUE_INSERT1(hio, send, ncomp); 2135204076Spjd 2136204076Spjd /* 2137204076Spjd * Let's wait for READ to finish. 2138204076Spjd */ 2139204076Spjd mtx_lock(&sync_lock); 2140204076Spjd while (!ISSYNCREQDONE(hio)) 2141204076Spjd cv_wait(&sync_cond, &sync_lock); 2142204076Spjd mtx_unlock(&sync_lock); 2143204076Spjd 2144204076Spjd if (hio->hio_errors[ncomp] != 0) { 2145204076Spjd pjdlog_error("Unable to read synchronization data: %s.", 2146204076Spjd strerror(hio->hio_errors[ncomp])); 2147204076Spjd goto free_queue; 2148204076Spjd } 2149204076Spjd 2150204076Spjd /* 2151204076Spjd * We read the data from synchronization source, now write it 2152204076Spjd * to synchronization target. 2153204076Spjd */ 2154204076Spjd SYNCREQ(hio); 2155204076Spjd ggio->gctl_cmd = BIO_WRITE; 2156204076Spjd for (ii = 0; ii < ncomps; ii++) 2157204076Spjd hio->hio_errors[ii] = EINVAL; 2158204076Spjd reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 2159204076Spjd hio); 2160204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 2161204076Spjd hio); 2162204076Spjd mtx_lock(&metadata_lock); 2163204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 2164204076Spjd /* 2165204076Spjd * This range is up-to-date on local component, 2166204076Spjd * so we update remote component. 2167204076Spjd */ 2168204076Spjd /* Remote component is 1 for now. */ 2169204076Spjd ncomp = 1; 2170204076Spjd } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 2171218138Spjd PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 2172204076Spjd /* 2173204076Spjd * This range is out-of-date on local component, 2174204076Spjd * so we update it. 2175204076Spjd */ 2176204076Spjd /* Local component is 0 for now. */ 2177204076Spjd ncomp = 0; 2178204076Spjd } 2179204076Spjd mtx_unlock(&metadata_lock); 2180204076Spjd 2181226857Spjd pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 2182204076Spjd hio); 2183249969Sed refcnt_init(&hio->hio_countdown, 1); 2184204076Spjd QUEUE_INSERT1(hio, send, ncomp); 2185204076Spjd 2186204076Spjd /* 2187204076Spjd * Let's wait for WRITE to finish. 2188204076Spjd */ 2189204076Spjd mtx_lock(&sync_lock); 2190204076Spjd while (!ISSYNCREQDONE(hio)) 2191204076Spjd cv_wait(&sync_cond, &sync_lock); 2192204076Spjd mtx_unlock(&sync_lock); 2193204076Spjd 2194204076Spjd if (hio->hio_errors[ncomp] != 0) { 2195204076Spjd pjdlog_error("Unable to write synchronization data: %s.", 2196204076Spjd strerror(hio->hio_errors[ncomp])); 2197204076Spjd goto free_queue; 2198204076Spjd } 2199211880Spjd 2200211880Spjd synced += length; 2201204076Spjdfree_queue: 2202204076Spjd mtx_lock(&range_lock); 2203204076Spjd rangelock_del(range_sync, offset, length); 2204204076Spjd if (range_regular_wait) 2205204076Spjd cv_signal(&range_regular_cond); 2206204076Spjd mtx_unlock(&range_lock); 2207204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the free queue.", 2208204076Spjd hio); 2209204076Spjd QUEUE_INSERT2(hio, free); 2210204076Spjd } 2211204076Spjd /* NOTREACHED */ 2212204076Spjd return (NULL); 2213204076Spjd} 2214204076Spjd 2215217784Spjdvoid 2216217784Spjdprimary_config_reload(struct hast_resource *res, struct nv *nv) 2217210886Spjd{ 2218210886Spjd unsigned int ii, ncomps; 2219217784Spjd int modified, vint; 2220217784Spjd const char *vstr; 2221210886Spjd 2222210886Spjd pjdlog_info("Reloading configuration..."); 2223210886Spjd 2224218138Spjd PJDLOG_ASSERT(res->hr_role == HAST_ROLE_PRIMARY); 2225218138Spjd PJDLOG_ASSERT(gres == res); 2226217784Spjd nv_assert(nv, "remoteaddr"); 2227219818Spjd nv_assert(nv, "sourceaddr"); 2228217784Spjd nv_assert(nv, "replication"); 2229219351Spjd nv_assert(nv, "checksum"); 2230219354Spjd nv_assert(nv, "compression"); 2231217784Spjd nv_assert(nv, "timeout"); 2232217784Spjd nv_assert(nv, "exec"); 2233225830Spjd nv_assert(nv, "metaflush"); 2234217784Spjd 2235210886Spjd ncomps = HAST_NCOMPONENTS; 2236210886Spjd 2237219351Spjd#define MODIFIED_REMOTEADDR 0x01 2238219818Spjd#define MODIFIED_SOURCEADDR 0x02 2239219818Spjd#define MODIFIED_REPLICATION 0x04 2240219818Spjd#define MODIFIED_CHECKSUM 0x08 2241219818Spjd#define MODIFIED_COMPRESSION 0x10 2242219818Spjd#define MODIFIED_TIMEOUT 0x20 2243219818Spjd#define MODIFIED_EXEC 0x40 2244225830Spjd#define MODIFIED_METAFLUSH 0x80 2245210886Spjd modified = 0; 2246217784Spjd 2247217784Spjd vstr = nv_get_string(nv, "remoteaddr"); 2248217784Spjd if (strcmp(gres->hr_remoteaddr, vstr) != 0) { 2249210886Spjd /* 2250210886Spjd * Don't copy res->hr_remoteaddr to gres just yet. 2251210886Spjd * We want remote_close() to log disconnect from the old 2252210886Spjd * addresses, not from the new ones. 2253210886Spjd */ 2254210886Spjd modified |= MODIFIED_REMOTEADDR; 2255210886Spjd } 2256219818Spjd vstr = nv_get_string(nv, "sourceaddr"); 2257219818Spjd if (strcmp(gres->hr_sourceaddr, vstr) != 0) { 2258219818Spjd strlcpy(gres->hr_sourceaddr, vstr, sizeof(gres->hr_sourceaddr)); 2259219818Spjd modified |= MODIFIED_SOURCEADDR; 2260219818Spjd } 2261217784Spjd vint = nv_get_int32(nv, "replication"); 2262217784Spjd if (gres->hr_replication != vint) { 2263217784Spjd gres->hr_replication = vint; 2264210886Spjd modified |= MODIFIED_REPLICATION; 2265210886Spjd } 2266219351Spjd vint = nv_get_int32(nv, "checksum"); 2267219351Spjd if (gres->hr_checksum != vint) { 2268219351Spjd gres->hr_checksum = vint; 2269219351Spjd modified |= MODIFIED_CHECKSUM; 2270219351Spjd } 2271219354Spjd vint = nv_get_int32(nv, "compression"); 2272219354Spjd if (gres->hr_compression != vint) { 2273219354Spjd gres->hr_compression = vint; 2274219354Spjd modified |= MODIFIED_COMPRESSION; 2275219354Spjd } 2276217784Spjd vint = nv_get_int32(nv, "timeout"); 2277217784Spjd if (gres->hr_timeout != vint) { 2278217784Spjd gres->hr_timeout = vint; 2279210886Spjd modified |= MODIFIED_TIMEOUT; 2280210886Spjd } 2281217784Spjd vstr = nv_get_string(nv, "exec"); 2282217784Spjd if (strcmp(gres->hr_exec, vstr) != 0) { 2283217784Spjd strlcpy(gres->hr_exec, vstr, sizeof(gres->hr_exec)); 2284211886Spjd modified |= MODIFIED_EXEC; 2285211886Spjd } 2286225830Spjd vint = nv_get_int32(nv, "metaflush"); 2287225830Spjd if (gres->hr_metaflush != vint) { 2288225830Spjd gres->hr_metaflush = vint; 2289225830Spjd modified |= MODIFIED_METAFLUSH; 2290225830Spjd } 2291217784Spjd 2292210886Spjd /* 2293219351Spjd * Change timeout for connected sockets. 2294219351Spjd * Don't bother if we need to reconnect. 2295210886Spjd */ 2296219351Spjd if ((modified & MODIFIED_TIMEOUT) != 0 && 2297226859Spjd (modified & (MODIFIED_REMOTEADDR | MODIFIED_SOURCEADDR)) == 0) { 2298210886Spjd for (ii = 0; ii < ncomps; ii++) { 2299210886Spjd if (!ISREMOTE(ii)) 2300210886Spjd continue; 2301210886Spjd rw_rlock(&hio_remote_lock[ii]); 2302210886Spjd if (!ISCONNECTED(gres, ii)) { 2303210886Spjd rw_unlock(&hio_remote_lock[ii]); 2304210886Spjd continue; 2305210886Spjd } 2306210886Spjd rw_unlock(&hio_remote_lock[ii]); 2307210886Spjd if (proto_timeout(gres->hr_remotein, 2308229945Spjd gres->hr_timeout) == -1) { 2309210886Spjd pjdlog_errno(LOG_WARNING, 2310210886Spjd "Unable to set connection timeout"); 2311210886Spjd } 2312210886Spjd if (proto_timeout(gres->hr_remoteout, 2313229945Spjd gres->hr_timeout) == -1) { 2314210886Spjd pjdlog_errno(LOG_WARNING, 2315210886Spjd "Unable to set connection timeout"); 2316210886Spjd } 2317210886Spjd } 2318219351Spjd } 2319226859Spjd if ((modified & (MODIFIED_REMOTEADDR | MODIFIED_SOURCEADDR)) != 0) { 2320210886Spjd for (ii = 0; ii < ncomps; ii++) { 2321210886Spjd if (!ISREMOTE(ii)) 2322210886Spjd continue; 2323210886Spjd remote_close(gres, ii); 2324210886Spjd } 2325210886Spjd if (modified & MODIFIED_REMOTEADDR) { 2326217784Spjd vstr = nv_get_string(nv, "remoteaddr"); 2327217784Spjd strlcpy(gres->hr_remoteaddr, vstr, 2328210886Spjd sizeof(gres->hr_remoteaddr)); 2329210886Spjd } 2330210886Spjd } 2331210886Spjd#undef MODIFIED_REMOTEADDR 2332219818Spjd#undef MODIFIED_SOURCEADDR 2333210886Spjd#undef MODIFIED_REPLICATION 2334219351Spjd#undef MODIFIED_CHECKSUM 2335219354Spjd#undef MODIFIED_COMPRESSION 2336210886Spjd#undef MODIFIED_TIMEOUT 2337211886Spjd#undef MODIFIED_EXEC 2338225830Spjd#undef MODIFIED_METAFLUSH 2339210886Spjd 2340210886Spjd pjdlog_info("Configuration reloaded successfully."); 2341210886Spjd} 2342210886Spjd 2343211882Spjdstatic void 2344211981Spjdguard_one(struct hast_resource *res, unsigned int ncomp) 2345211981Spjd{ 2346211981Spjd struct proto_conn *in, *out; 2347211981Spjd 2348211981Spjd if (!ISREMOTE(ncomp)) 2349211981Spjd return; 2350211981Spjd 2351211981Spjd rw_rlock(&hio_remote_lock[ncomp]); 2352211981Spjd 2353211981Spjd if (!real_remote(res)) { 2354211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2355211981Spjd return; 2356211981Spjd } 2357211981Spjd 2358211981Spjd if (ISCONNECTED(res, ncomp)) { 2359218138Spjd PJDLOG_ASSERT(res->hr_remotein != NULL); 2360218138Spjd PJDLOG_ASSERT(res->hr_remoteout != NULL); 2361211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2362211981Spjd pjdlog_debug(2, "remote_guard: Connection to %s is ok.", 2363211981Spjd res->hr_remoteaddr); 2364211981Spjd return; 2365211981Spjd } 2366211981Spjd 2367218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 2368218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 2369211981Spjd /* 2370211981Spjd * Upgrade the lock. It doesn't have to be atomic as no other thread 2371211981Spjd * can change connection status from disconnected to connected. 2372211981Spjd */ 2373211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2374211981Spjd pjdlog_debug(2, "remote_guard: Reconnecting to %s.", 2375211981Spjd res->hr_remoteaddr); 2376211981Spjd in = out = NULL; 2377220898Spjd if (init_remote(res, &in, &out) == 0) { 2378211981Spjd rw_wlock(&hio_remote_lock[ncomp]); 2379218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 2380218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 2381218138Spjd PJDLOG_ASSERT(in != NULL && out != NULL); 2382211981Spjd res->hr_remotein = in; 2383211981Spjd res->hr_remoteout = out; 2384211981Spjd rw_unlock(&hio_remote_lock[ncomp]); 2385211981Spjd pjdlog_info("Successfully reconnected to %s.", 2386211981Spjd res->hr_remoteaddr); 2387211981Spjd sync_start(); 2388211981Spjd } else { 2389211981Spjd /* Both connections should be NULL. */ 2390218138Spjd PJDLOG_ASSERT(res->hr_remotein == NULL); 2391218138Spjd PJDLOG_ASSERT(res->hr_remoteout == NULL); 2392218138Spjd PJDLOG_ASSERT(in == NULL && out == NULL); 2393211981Spjd pjdlog_debug(2, "remote_guard: Reconnect to %s failed.", 2394211981Spjd res->hr_remoteaddr); 2395211981Spjd } 2396211981Spjd} 2397211981Spjd 2398204076Spjd/* 2399204076Spjd * Thread guards remote connections and reconnects when needed, handles 2400204076Spjd * signals, etc. 2401204076Spjd */ 2402204076Spjdstatic void * 2403204076Spjdguard_thread(void *arg) 2404204076Spjd{ 2405204076Spjd struct hast_resource *res = arg; 2406204076Spjd unsigned int ii, ncomps; 2407211982Spjd struct timespec timeout; 2408211981Spjd time_t lastcheck, now; 2409211982Spjd sigset_t mask; 2410211982Spjd int signo; 2411204076Spjd 2412204076Spjd ncomps = HAST_NCOMPONENTS; 2413211981Spjd lastcheck = time(NULL); 2414204076Spjd 2415211982Spjd PJDLOG_VERIFY(sigemptyset(&mask) == 0); 2416211982Spjd PJDLOG_VERIFY(sigaddset(&mask, SIGINT) == 0); 2417211982Spjd PJDLOG_VERIFY(sigaddset(&mask, SIGTERM) == 0); 2418211982Spjd 2419219721Strociny timeout.tv_sec = HAST_KEEPALIVE; 2420211982Spjd timeout.tv_nsec = 0; 2421211982Spjd signo = -1; 2422211982Spjd 2423204076Spjd for (;;) { 2424211982Spjd switch (signo) { 2425211982Spjd case SIGINT: 2426211982Spjd case SIGTERM: 2427211982Spjd sigexit_received = true; 2428204076Spjd primary_exitx(EX_OK, 2429204076Spjd "Termination signal received, exiting."); 2430211982Spjd break; 2431211982Spjd default: 2432211982Spjd break; 2433204076Spjd } 2434211882Spjd 2435220898Spjd /* 2436220898Spjd * Don't check connections until we fully started, 2437220898Spjd * as we may still be looping, waiting for remote node 2438220898Spjd * to switch from primary to secondary. 2439220898Spjd */ 2440220898Spjd if (fullystarted) { 2441220898Spjd pjdlog_debug(2, "remote_guard: Checking connections."); 2442220898Spjd now = time(NULL); 2443220898Spjd if (lastcheck + HAST_KEEPALIVE <= now) { 2444220898Spjd for (ii = 0; ii < ncomps; ii++) 2445220898Spjd guard_one(res, ii); 2446220898Spjd lastcheck = now; 2447220898Spjd } 2448204076Spjd } 2449211982Spjd signo = sigtimedwait(&mask, NULL, &timeout); 2450204076Spjd } 2451204076Spjd /* NOTREACHED */ 2452204076Spjd return (NULL); 2453204076Spjd} 2454