primary.c revision 210881
1204076Spjd/*- 2204076Spjd * Copyright (c) 2009 The FreeBSD Foundation 3204076Spjd * All rights reserved. 4204076Spjd * 5204076Spjd * This software was developed by Pawel Jakub Dawidek under sponsorship from 6204076Spjd * the FreeBSD Foundation. 7204076Spjd * 8204076Spjd * Redistribution and use in source and binary forms, with or without 9204076Spjd * modification, are permitted provided that the following conditions 10204076Spjd * are met: 11204076Spjd * 1. Redistributions of source code must retain the above copyright 12204076Spjd * notice, this list of conditions and the following disclaimer. 13204076Spjd * 2. Redistributions in binary form must reproduce the above copyright 14204076Spjd * notice, this list of conditions and the following disclaimer in the 15204076Spjd * documentation and/or other materials provided with the distribution. 16204076Spjd * 17204076Spjd * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 18204076Spjd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19204076Spjd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20204076Spjd * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 21204076Spjd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22204076Spjd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23204076Spjd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24204076Spjd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25204076Spjd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26204076Spjd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27204076Spjd * SUCH DAMAGE. 28204076Spjd */ 29204076Spjd 30204076Spjd#include <sys/cdefs.h> 31204076Spjd__FBSDID("$FreeBSD: head/sbin/hastd/primary.c 210881 2010-08-05 19:01:57Z pjd $"); 32204076Spjd 33204076Spjd#include <sys/types.h> 34204076Spjd#include <sys/time.h> 35204076Spjd#include <sys/bio.h> 36204076Spjd#include <sys/disk.h> 37204076Spjd#include <sys/refcount.h> 38204076Spjd#include <sys/stat.h> 39204076Spjd 40204076Spjd#include <geom/gate/g_gate.h> 41204076Spjd 42204076Spjd#include <assert.h> 43204076Spjd#include <err.h> 44204076Spjd#include <errno.h> 45204076Spjd#include <fcntl.h> 46204076Spjd#include <libgeom.h> 47204076Spjd#include <pthread.h> 48204076Spjd#include <stdint.h> 49204076Spjd#include <stdio.h> 50204076Spjd#include <string.h> 51204076Spjd#include <sysexits.h> 52204076Spjd#include <unistd.h> 53204076Spjd 54204076Spjd#include <activemap.h> 55204076Spjd#include <nv.h> 56204076Spjd#include <rangelock.h> 57204076Spjd 58204076Spjd#include "control.h" 59204076Spjd#include "hast.h" 60204076Spjd#include "hast_proto.h" 61204076Spjd#include "hastd.h" 62204076Spjd#include "metadata.h" 63204076Spjd#include "proto.h" 64204076Spjd#include "pjdlog.h" 65204076Spjd#include "subr.h" 66204076Spjd#include "synch.h" 67204076Spjd 68204076Spjdstruct hio { 69204076Spjd /* 70204076Spjd * Number of components we are still waiting for. 71204076Spjd * When this field goes to 0, we can send the request back to the 72204076Spjd * kernel. Each component has to decrease this counter by one 73204076Spjd * even on failure. 74204076Spjd */ 75204076Spjd unsigned int hio_countdown; 76204076Spjd /* 77204076Spjd * Each component has a place to store its own error. 78204076Spjd * Once the request is handled by all components we can decide if the 79204076Spjd * request overall is successful or not. 80204076Spjd */ 81204076Spjd int *hio_errors; 82204076Spjd /* 83204076Spjd * Structure used to comunicate with GEOM Gate class. 84204076Spjd */ 85204076Spjd struct g_gate_ctl_io hio_ggio; 86204076Spjd TAILQ_ENTRY(hio) *hio_next; 87204076Spjd}; 88204076Spjd#define hio_free_next hio_next[0] 89204076Spjd#define hio_done_next hio_next[0] 90204076Spjd 91204076Spjd/* 92204076Spjd * Free list holds unused structures. When free list is empty, we have to wait 93204076Spjd * until some in-progress requests are freed. 94204076Spjd */ 95204076Spjdstatic TAILQ_HEAD(, hio) hio_free_list; 96204076Spjdstatic pthread_mutex_t hio_free_list_lock; 97204076Spjdstatic pthread_cond_t hio_free_list_cond; 98204076Spjd/* 99204076Spjd * There is one send list for every component. One requests is placed on all 100204076Spjd * send lists - each component gets the same request, but each component is 101204076Spjd * responsible for managing his own send list. 102204076Spjd */ 103204076Spjdstatic TAILQ_HEAD(, hio) *hio_send_list; 104204076Spjdstatic pthread_mutex_t *hio_send_list_lock; 105204076Spjdstatic pthread_cond_t *hio_send_list_cond; 106204076Spjd/* 107204076Spjd * There is one recv list for every component, although local components don't 108204076Spjd * use recv lists as local requests are done synchronously. 109204076Spjd */ 110204076Spjdstatic TAILQ_HEAD(, hio) *hio_recv_list; 111204076Spjdstatic pthread_mutex_t *hio_recv_list_lock; 112204076Spjdstatic pthread_cond_t *hio_recv_list_cond; 113204076Spjd/* 114204076Spjd * Request is placed on done list by the slowest component (the one that 115204076Spjd * decreased hio_countdown from 1 to 0). 116204076Spjd */ 117204076Spjdstatic TAILQ_HEAD(, hio) hio_done_list; 118204076Spjdstatic pthread_mutex_t hio_done_list_lock; 119204076Spjdstatic pthread_cond_t hio_done_list_cond; 120204076Spjd/* 121204076Spjd * Structure below are for interaction with sync thread. 122204076Spjd */ 123204076Spjdstatic bool sync_inprogress; 124204076Spjdstatic pthread_mutex_t sync_lock; 125204076Spjdstatic pthread_cond_t sync_cond; 126204076Spjd/* 127204076Spjd * The lock below allows to synchornize access to remote connections. 128204076Spjd */ 129204076Spjdstatic pthread_rwlock_t *hio_remote_lock; 130204076Spjdstatic pthread_mutex_t hio_guard_lock; 131204076Spjdstatic pthread_cond_t hio_guard_cond; 132204076Spjd 133204076Spjd/* 134204076Spjd * Lock to synchronize metadata updates. Also synchronize access to 135204076Spjd * hr_primary_localcnt and hr_primary_remotecnt fields. 136204076Spjd */ 137204076Spjdstatic pthread_mutex_t metadata_lock; 138204076Spjd 139204076Spjd/* 140204076Spjd * Maximum number of outstanding I/O requests. 141204076Spjd */ 142204076Spjd#define HAST_HIO_MAX 256 143204076Spjd/* 144204076Spjd * Number of components. At this point there are only two components: local 145204076Spjd * and remote, but in the future it might be possible to use multiple local 146204076Spjd * and remote components. 147204076Spjd */ 148204076Spjd#define HAST_NCOMPONENTS 2 149204076Spjd/* 150204076Spjd * Number of seconds to sleep before next reconnect try. 151204076Spjd */ 152204076Spjd#define RECONNECT_SLEEP 5 153204076Spjd 154204076Spjd#define ISCONNECTED(res, no) \ 155204076Spjd ((res)->hr_remotein != NULL && (res)->hr_remoteout != NULL) 156204076Spjd 157204076Spjd#define QUEUE_INSERT1(hio, name, ncomp) do { \ 158204076Spjd bool _wakeup; \ 159204076Spjd \ 160204076Spjd mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 161204076Spjd _wakeup = TAILQ_EMPTY(&hio_##name##_list[(ncomp)]); \ 162204076Spjd TAILQ_INSERT_TAIL(&hio_##name##_list[(ncomp)], (hio), \ 163204076Spjd hio_next[(ncomp)]); \ 164204076Spjd mtx_unlock(&hio_##name##_list_lock[ncomp]); \ 165204076Spjd if (_wakeup) \ 166204076Spjd cv_signal(&hio_##name##_list_cond[(ncomp)]); \ 167204076Spjd} while (0) 168204076Spjd#define QUEUE_INSERT2(hio, name) do { \ 169204076Spjd bool _wakeup; \ 170204076Spjd \ 171204076Spjd mtx_lock(&hio_##name##_list_lock); \ 172204076Spjd _wakeup = TAILQ_EMPTY(&hio_##name##_list); \ 173204076Spjd TAILQ_INSERT_TAIL(&hio_##name##_list, (hio), hio_##name##_next);\ 174204076Spjd mtx_unlock(&hio_##name##_list_lock); \ 175204076Spjd if (_wakeup) \ 176204076Spjd cv_signal(&hio_##name##_list_cond); \ 177204076Spjd} while (0) 178204076Spjd#define QUEUE_TAKE1(hio, name, ncomp) do { \ 179204076Spjd mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 180204076Spjd while (((hio) = TAILQ_FIRST(&hio_##name##_list[(ncomp)])) == NULL) { \ 181204076Spjd cv_wait(&hio_##name##_list_cond[(ncomp)], \ 182204076Spjd &hio_##name##_list_lock[(ncomp)]); \ 183204076Spjd } \ 184204076Spjd TAILQ_REMOVE(&hio_##name##_list[(ncomp)], (hio), \ 185204076Spjd hio_next[(ncomp)]); \ 186204076Spjd mtx_unlock(&hio_##name##_list_lock[(ncomp)]); \ 187204076Spjd} while (0) 188204076Spjd#define QUEUE_TAKE2(hio, name) do { \ 189204076Spjd mtx_lock(&hio_##name##_list_lock); \ 190204076Spjd while (((hio) = TAILQ_FIRST(&hio_##name##_list)) == NULL) { \ 191204076Spjd cv_wait(&hio_##name##_list_cond, \ 192204076Spjd &hio_##name##_list_lock); \ 193204076Spjd } \ 194204076Spjd TAILQ_REMOVE(&hio_##name##_list, (hio), hio_##name##_next); \ 195204076Spjd mtx_unlock(&hio_##name##_list_lock); \ 196204076Spjd} while (0) 197204076Spjd 198209183Spjd#define SYNCREQ(hio) do { \ 199209183Spjd (hio)->hio_ggio.gctl_unit = -1; \ 200209183Spjd (hio)->hio_ggio.gctl_seq = 1; \ 201209183Spjd} while (0) 202204076Spjd#define ISSYNCREQ(hio) ((hio)->hio_ggio.gctl_unit == -1) 203204076Spjd#define SYNCREQDONE(hio) do { (hio)->hio_ggio.gctl_unit = -2; } while (0) 204204076Spjd#define ISSYNCREQDONE(hio) ((hio)->hio_ggio.gctl_unit == -2) 205204076Spjd 206204076Spjdstatic struct hast_resource *gres; 207204076Spjd 208204076Spjdstatic pthread_mutex_t range_lock; 209204076Spjdstatic struct rangelocks *range_regular; 210204076Spjdstatic bool range_regular_wait; 211204076Spjdstatic pthread_cond_t range_regular_cond; 212204076Spjdstatic struct rangelocks *range_sync; 213204076Spjdstatic bool range_sync_wait; 214204076Spjdstatic pthread_cond_t range_sync_cond; 215204076Spjd 216204076Spjdstatic void *ggate_recv_thread(void *arg); 217204076Spjdstatic void *local_send_thread(void *arg); 218204076Spjdstatic void *remote_send_thread(void *arg); 219204076Spjdstatic void *remote_recv_thread(void *arg); 220204076Spjdstatic void *ggate_send_thread(void *arg); 221204076Spjdstatic void *sync_thread(void *arg); 222204076Spjdstatic void *guard_thread(void *arg); 223204076Spjd 224204076Spjdstatic void sighandler(int sig); 225204076Spjd 226204076Spjdstatic void 227204076Spjdcleanup(struct hast_resource *res) 228204076Spjd{ 229204076Spjd int rerrno; 230204076Spjd 231204076Spjd /* Remember errno. */ 232204076Spjd rerrno = errno; 233204076Spjd 234204076Spjd /* 235204076Spjd * Close descriptor to /dev/hast/<name> 236204076Spjd * to work-around race in the kernel. 237204076Spjd */ 238204076Spjd close(res->hr_localfd); 239204076Spjd 240204076Spjd /* Destroy ggate provider if we created one. */ 241204076Spjd if (res->hr_ggateunit >= 0) { 242204076Spjd struct g_gate_ctl_destroy ggiod; 243204076Spjd 244204076Spjd ggiod.gctl_version = G_GATE_VERSION; 245204076Spjd ggiod.gctl_unit = res->hr_ggateunit; 246204076Spjd ggiod.gctl_force = 1; 247204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_DESTROY, &ggiod) < 0) { 248204076Spjd pjdlog_warning("Unable to destroy hast/%s device", 249204076Spjd res->hr_provname); 250204076Spjd } 251204076Spjd res->hr_ggateunit = -1; 252204076Spjd } 253204076Spjd 254204076Spjd /* Restore errno. */ 255204076Spjd errno = rerrno; 256204076Spjd} 257204076Spjd 258204076Spjdstatic void 259204076Spjdprimary_exit(int exitcode, const char *fmt, ...) 260204076Spjd{ 261204076Spjd va_list ap; 262204076Spjd 263204076Spjd assert(exitcode != EX_OK); 264204076Spjd va_start(ap, fmt); 265204076Spjd pjdlogv_errno(LOG_ERR, fmt, ap); 266204076Spjd va_end(ap); 267204076Spjd cleanup(gres); 268204076Spjd exit(exitcode); 269204076Spjd} 270204076Spjd 271204076Spjdstatic void 272204076Spjdprimary_exitx(int exitcode, const char *fmt, ...) 273204076Spjd{ 274204076Spjd va_list ap; 275204076Spjd 276204076Spjd va_start(ap, fmt); 277204076Spjd pjdlogv(exitcode == EX_OK ? LOG_INFO : LOG_ERR, fmt, ap); 278204076Spjd va_end(ap); 279204076Spjd cleanup(gres); 280204076Spjd exit(exitcode); 281204076Spjd} 282204076Spjd 283204076Spjdstatic int 284204076Spjdhast_activemap_flush(struct hast_resource *res) 285204076Spjd{ 286204076Spjd const unsigned char *buf; 287204076Spjd size_t size; 288204076Spjd 289204076Spjd buf = activemap_bitmap(res->hr_amp, &size); 290204076Spjd assert(buf != NULL); 291204076Spjd assert((size % res->hr_local_sectorsize) == 0); 292204076Spjd if (pwrite(res->hr_localfd, buf, size, METADATA_SIZE) != 293204076Spjd (ssize_t)size) { 294204076Spjd KEEP_ERRNO(pjdlog_errno(LOG_ERR, 295204076Spjd "Unable to flush activemap to disk")); 296204076Spjd return (-1); 297204076Spjd } 298204076Spjd return (0); 299204076Spjd} 300204076Spjd 301210881Spjdstatic bool 302210881Spjdreal_remote(const struct hast_resource *res) 303210881Spjd{ 304210881Spjd 305210881Spjd return (strcmp(res->hr_remoteaddr, "none") != 0); 306210881Spjd} 307210881Spjd 308204076Spjdstatic void 309204076Spjdinit_environment(struct hast_resource *res __unused) 310204076Spjd{ 311204076Spjd struct hio *hio; 312204076Spjd unsigned int ii, ncomps; 313204076Spjd 314204076Spjd /* 315204076Spjd * In the future it might be per-resource value. 316204076Spjd */ 317204076Spjd ncomps = HAST_NCOMPONENTS; 318204076Spjd 319204076Spjd /* 320204076Spjd * Allocate memory needed by lists. 321204076Spjd */ 322204076Spjd hio_send_list = malloc(sizeof(hio_send_list[0]) * ncomps); 323204076Spjd if (hio_send_list == NULL) { 324204076Spjd primary_exitx(EX_TEMPFAIL, 325204076Spjd "Unable to allocate %zu bytes of memory for send lists.", 326204076Spjd sizeof(hio_send_list[0]) * ncomps); 327204076Spjd } 328204076Spjd hio_send_list_lock = malloc(sizeof(hio_send_list_lock[0]) * ncomps); 329204076Spjd if (hio_send_list_lock == NULL) { 330204076Spjd primary_exitx(EX_TEMPFAIL, 331204076Spjd "Unable to allocate %zu bytes of memory for send list locks.", 332204076Spjd sizeof(hio_send_list_lock[0]) * ncomps); 333204076Spjd } 334204076Spjd hio_send_list_cond = malloc(sizeof(hio_send_list_cond[0]) * ncomps); 335204076Spjd if (hio_send_list_cond == NULL) { 336204076Spjd primary_exitx(EX_TEMPFAIL, 337204076Spjd "Unable to allocate %zu bytes of memory for send list condition variables.", 338204076Spjd sizeof(hio_send_list_cond[0]) * ncomps); 339204076Spjd } 340204076Spjd hio_recv_list = malloc(sizeof(hio_recv_list[0]) * ncomps); 341204076Spjd if (hio_recv_list == NULL) { 342204076Spjd primary_exitx(EX_TEMPFAIL, 343204076Spjd "Unable to allocate %zu bytes of memory for recv lists.", 344204076Spjd sizeof(hio_recv_list[0]) * ncomps); 345204076Spjd } 346204076Spjd hio_recv_list_lock = malloc(sizeof(hio_recv_list_lock[0]) * ncomps); 347204076Spjd if (hio_recv_list_lock == NULL) { 348204076Spjd primary_exitx(EX_TEMPFAIL, 349204076Spjd "Unable to allocate %zu bytes of memory for recv list locks.", 350204076Spjd sizeof(hio_recv_list_lock[0]) * ncomps); 351204076Spjd } 352204076Spjd hio_recv_list_cond = malloc(sizeof(hio_recv_list_cond[0]) * ncomps); 353204076Spjd if (hio_recv_list_cond == NULL) { 354204076Spjd primary_exitx(EX_TEMPFAIL, 355204076Spjd "Unable to allocate %zu bytes of memory for recv list condition variables.", 356204076Spjd sizeof(hio_recv_list_cond[0]) * ncomps); 357204076Spjd } 358204076Spjd hio_remote_lock = malloc(sizeof(hio_remote_lock[0]) * ncomps); 359204076Spjd if (hio_remote_lock == NULL) { 360204076Spjd primary_exitx(EX_TEMPFAIL, 361204076Spjd "Unable to allocate %zu bytes of memory for remote connections locks.", 362204076Spjd sizeof(hio_remote_lock[0]) * ncomps); 363204076Spjd } 364204076Spjd 365204076Spjd /* 366204076Spjd * Initialize lists, their locks and theirs condition variables. 367204076Spjd */ 368204076Spjd TAILQ_INIT(&hio_free_list); 369204076Spjd mtx_init(&hio_free_list_lock); 370204076Spjd cv_init(&hio_free_list_cond); 371204076Spjd for (ii = 0; ii < HAST_NCOMPONENTS; ii++) { 372204076Spjd TAILQ_INIT(&hio_send_list[ii]); 373204076Spjd mtx_init(&hio_send_list_lock[ii]); 374204076Spjd cv_init(&hio_send_list_cond[ii]); 375204076Spjd TAILQ_INIT(&hio_recv_list[ii]); 376204076Spjd mtx_init(&hio_recv_list_lock[ii]); 377204076Spjd cv_init(&hio_recv_list_cond[ii]); 378204076Spjd rw_init(&hio_remote_lock[ii]); 379204076Spjd } 380204076Spjd TAILQ_INIT(&hio_done_list); 381204076Spjd mtx_init(&hio_done_list_lock); 382204076Spjd cv_init(&hio_done_list_cond); 383204076Spjd mtx_init(&hio_guard_lock); 384204076Spjd cv_init(&hio_guard_cond); 385204076Spjd mtx_init(&metadata_lock); 386204076Spjd 387204076Spjd /* 388204076Spjd * Allocate requests pool and initialize requests. 389204076Spjd */ 390204076Spjd for (ii = 0; ii < HAST_HIO_MAX; ii++) { 391204076Spjd hio = malloc(sizeof(*hio)); 392204076Spjd if (hio == NULL) { 393204076Spjd primary_exitx(EX_TEMPFAIL, 394204076Spjd "Unable to allocate %zu bytes of memory for hio request.", 395204076Spjd sizeof(*hio)); 396204076Spjd } 397204076Spjd hio->hio_countdown = 0; 398204076Spjd hio->hio_errors = malloc(sizeof(hio->hio_errors[0]) * ncomps); 399204076Spjd if (hio->hio_errors == NULL) { 400204076Spjd primary_exitx(EX_TEMPFAIL, 401204076Spjd "Unable allocate %zu bytes of memory for hio errors.", 402204076Spjd sizeof(hio->hio_errors[0]) * ncomps); 403204076Spjd } 404204076Spjd hio->hio_next = malloc(sizeof(hio->hio_next[0]) * ncomps); 405204076Spjd if (hio->hio_next == NULL) { 406204076Spjd primary_exitx(EX_TEMPFAIL, 407204076Spjd "Unable allocate %zu bytes of memory for hio_next field.", 408204076Spjd sizeof(hio->hio_next[0]) * ncomps); 409204076Spjd } 410204076Spjd hio->hio_ggio.gctl_version = G_GATE_VERSION; 411204076Spjd hio->hio_ggio.gctl_data = malloc(MAXPHYS); 412204076Spjd if (hio->hio_ggio.gctl_data == NULL) { 413204076Spjd primary_exitx(EX_TEMPFAIL, 414204076Spjd "Unable to allocate %zu bytes of memory for gctl_data.", 415204076Spjd MAXPHYS); 416204076Spjd } 417204076Spjd hio->hio_ggio.gctl_length = MAXPHYS; 418204076Spjd hio->hio_ggio.gctl_error = 0; 419204076Spjd TAILQ_INSERT_HEAD(&hio_free_list, hio, hio_free_next); 420204076Spjd } 421204076Spjd 422204076Spjd /* 423204076Spjd * Turn on signals handling. 424204076Spjd */ 425204076Spjd signal(SIGINT, sighandler); 426204076Spjd signal(SIGTERM, sighandler); 427204076Spjd} 428204076Spjd 429204076Spjdstatic void 430204076Spjdinit_local(struct hast_resource *res) 431204076Spjd{ 432204076Spjd unsigned char *buf; 433204076Spjd size_t mapsize; 434204076Spjd 435204076Spjd if (metadata_read(res, true) < 0) 436204076Spjd exit(EX_NOINPUT); 437204076Spjd mtx_init(&res->hr_amp_lock); 438204076Spjd if (activemap_init(&res->hr_amp, res->hr_datasize, res->hr_extentsize, 439204076Spjd res->hr_local_sectorsize, res->hr_keepdirty) < 0) { 440204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create activemap"); 441204076Spjd } 442204076Spjd mtx_init(&range_lock); 443204076Spjd cv_init(&range_regular_cond); 444204076Spjd if (rangelock_init(&range_regular) < 0) 445204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create regular range lock"); 446204076Spjd cv_init(&range_sync_cond); 447204076Spjd if (rangelock_init(&range_sync) < 0) 448204076Spjd primary_exit(EX_TEMPFAIL, "Unable to create sync range lock"); 449204076Spjd mapsize = activemap_ondisk_size(res->hr_amp); 450204076Spjd buf = calloc(1, mapsize); 451204076Spjd if (buf == NULL) { 452204076Spjd primary_exitx(EX_TEMPFAIL, 453204076Spjd "Unable to allocate buffer for activemap."); 454204076Spjd } 455204076Spjd if (pread(res->hr_localfd, buf, mapsize, METADATA_SIZE) != 456204076Spjd (ssize_t)mapsize) { 457204076Spjd primary_exit(EX_NOINPUT, "Unable to read activemap"); 458204076Spjd } 459204076Spjd activemap_copyin(res->hr_amp, buf, mapsize); 460209181Spjd free(buf); 461204076Spjd if (res->hr_resuid != 0) 462204076Spjd return; 463204076Spjd /* 464204076Spjd * We're using provider for the first time, so we have to generate 465204076Spjd * resource unique identifier and initialize local and remote counts. 466204076Spjd */ 467204076Spjd arc4random_buf(&res->hr_resuid, sizeof(res->hr_resuid)); 468204076Spjd res->hr_primary_localcnt = 1; 469204076Spjd res->hr_primary_remotecnt = 0; 470204076Spjd if (metadata_write(res) < 0) 471204076Spjd exit(EX_NOINPUT); 472204076Spjd} 473204076Spjd 474205738Spjdstatic bool 475205738Spjdinit_remote(struct hast_resource *res, struct proto_conn **inp, 476205738Spjd struct proto_conn **outp) 477204076Spjd{ 478205738Spjd struct proto_conn *in, *out; 479204076Spjd struct nv *nvout, *nvin; 480204076Spjd const unsigned char *token; 481204076Spjd unsigned char *map; 482204076Spjd const char *errmsg; 483204076Spjd int32_t extentsize; 484204076Spjd int64_t datasize; 485204076Spjd uint32_t mapsize; 486204076Spjd size_t size; 487204076Spjd 488205738Spjd assert((inp == NULL && outp == NULL) || (inp != NULL && outp != NULL)); 489210881Spjd assert(real_remote(res)); 490205738Spjd 491205738Spjd in = out = NULL; 492205738Spjd 493204076Spjd /* Prepare outgoing connection with remote node. */ 494205738Spjd if (proto_client(res->hr_remoteaddr, &out) < 0) { 495207347Spjd primary_exit(EX_TEMPFAIL, "Unable to create connection to %s", 496204076Spjd res->hr_remoteaddr); 497204076Spjd } 498204076Spjd /* Try to connect, but accept failure. */ 499205738Spjd if (proto_connect(out) < 0) { 500204076Spjd pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 501204076Spjd res->hr_remoteaddr); 502204076Spjd goto close; 503204076Spjd } 504207371Spjd /* Error in setting timeout is not critical, but why should it fail? */ 505207371Spjd if (proto_timeout(out, res->hr_timeout) < 0) 506207371Spjd pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 507204076Spjd /* 508204076Spjd * First handshake step. 509204076Spjd * Setup outgoing connection with remote node. 510204076Spjd */ 511204076Spjd nvout = nv_alloc(); 512204076Spjd nv_add_string(nvout, res->hr_name, "resource"); 513204076Spjd if (nv_error(nvout) != 0) { 514204076Spjd pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 515204076Spjd "Unable to allocate header for connection with %s", 516204076Spjd res->hr_remoteaddr); 517204076Spjd nv_free(nvout); 518204076Spjd goto close; 519204076Spjd } 520205738Spjd if (hast_proto_send(res, out, nvout, NULL, 0) < 0) { 521204076Spjd pjdlog_errno(LOG_WARNING, 522204076Spjd "Unable to send handshake header to %s", 523204076Spjd res->hr_remoteaddr); 524204076Spjd nv_free(nvout); 525204076Spjd goto close; 526204076Spjd } 527204076Spjd nv_free(nvout); 528205738Spjd if (hast_proto_recv_hdr(out, &nvin) < 0) { 529204076Spjd pjdlog_errno(LOG_WARNING, 530204076Spjd "Unable to receive handshake header from %s", 531204076Spjd res->hr_remoteaddr); 532204076Spjd goto close; 533204076Spjd } 534204076Spjd errmsg = nv_get_string(nvin, "errmsg"); 535204076Spjd if (errmsg != NULL) { 536204076Spjd pjdlog_warning("%s", errmsg); 537204076Spjd nv_free(nvin); 538204076Spjd goto close; 539204076Spjd } 540204076Spjd token = nv_get_uint8_array(nvin, &size, "token"); 541204076Spjd if (token == NULL) { 542204076Spjd pjdlog_warning("Handshake header from %s has no 'token' field.", 543204076Spjd res->hr_remoteaddr); 544204076Spjd nv_free(nvin); 545204076Spjd goto close; 546204076Spjd } 547204076Spjd if (size != sizeof(res->hr_token)) { 548204076Spjd pjdlog_warning("Handshake header from %s contains 'token' of wrong size (got %zu, expected %zu).", 549204076Spjd res->hr_remoteaddr, size, sizeof(res->hr_token)); 550204076Spjd nv_free(nvin); 551204076Spjd goto close; 552204076Spjd } 553204076Spjd bcopy(token, res->hr_token, sizeof(res->hr_token)); 554204076Spjd nv_free(nvin); 555204076Spjd 556204076Spjd /* 557204076Spjd * Second handshake step. 558204076Spjd * Setup incoming connection with remote node. 559204076Spjd */ 560205738Spjd if (proto_client(res->hr_remoteaddr, &in) < 0) { 561204076Spjd pjdlog_errno(LOG_WARNING, "Unable to create connection to %s", 562204076Spjd res->hr_remoteaddr); 563204076Spjd } 564204076Spjd /* Try to connect, but accept failure. */ 565205738Spjd if (proto_connect(in) < 0) { 566204076Spjd pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 567204076Spjd res->hr_remoteaddr); 568204076Spjd goto close; 569204076Spjd } 570207371Spjd /* Error in setting timeout is not critical, but why should it fail? */ 571207371Spjd if (proto_timeout(in, res->hr_timeout) < 0) 572207371Spjd pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 573204076Spjd nvout = nv_alloc(); 574204076Spjd nv_add_string(nvout, res->hr_name, "resource"); 575204076Spjd nv_add_uint8_array(nvout, res->hr_token, sizeof(res->hr_token), 576204076Spjd "token"); 577204076Spjd nv_add_uint64(nvout, res->hr_resuid, "resuid"); 578204076Spjd nv_add_uint64(nvout, res->hr_primary_localcnt, "localcnt"); 579204076Spjd nv_add_uint64(nvout, res->hr_primary_remotecnt, "remotecnt"); 580204076Spjd if (nv_error(nvout) != 0) { 581204076Spjd pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 582204076Spjd "Unable to allocate header for connection with %s", 583204076Spjd res->hr_remoteaddr); 584204076Spjd nv_free(nvout); 585204076Spjd goto close; 586204076Spjd } 587205738Spjd if (hast_proto_send(res, in, nvout, NULL, 0) < 0) { 588204076Spjd pjdlog_errno(LOG_WARNING, 589204076Spjd "Unable to send handshake header to %s", 590204076Spjd res->hr_remoteaddr); 591204076Spjd nv_free(nvout); 592204076Spjd goto close; 593204076Spjd } 594204076Spjd nv_free(nvout); 595205738Spjd if (hast_proto_recv_hdr(out, &nvin) < 0) { 596204076Spjd pjdlog_errno(LOG_WARNING, 597204076Spjd "Unable to receive handshake header from %s", 598204076Spjd res->hr_remoteaddr); 599204076Spjd goto close; 600204076Spjd } 601204076Spjd errmsg = nv_get_string(nvin, "errmsg"); 602204076Spjd if (errmsg != NULL) { 603204076Spjd pjdlog_warning("%s", errmsg); 604204076Spjd nv_free(nvin); 605204076Spjd goto close; 606204076Spjd } 607204076Spjd datasize = nv_get_int64(nvin, "datasize"); 608204076Spjd if (datasize != res->hr_datasize) { 609204076Spjd pjdlog_warning("Data size differs between nodes (local=%jd, remote=%jd).", 610204076Spjd (intmax_t)res->hr_datasize, (intmax_t)datasize); 611204076Spjd nv_free(nvin); 612204076Spjd goto close; 613204076Spjd } 614204076Spjd extentsize = nv_get_int32(nvin, "extentsize"); 615204076Spjd if (extentsize != res->hr_extentsize) { 616204076Spjd pjdlog_warning("Extent size differs between nodes (local=%zd, remote=%zd).", 617204076Spjd (ssize_t)res->hr_extentsize, (ssize_t)extentsize); 618204076Spjd nv_free(nvin); 619204076Spjd goto close; 620204076Spjd } 621204076Spjd res->hr_secondary_localcnt = nv_get_uint64(nvin, "localcnt"); 622204076Spjd res->hr_secondary_remotecnt = nv_get_uint64(nvin, "remotecnt"); 623204076Spjd res->hr_syncsrc = nv_get_uint8(nvin, "syncsrc"); 624204076Spjd map = NULL; 625204076Spjd mapsize = nv_get_uint32(nvin, "mapsize"); 626204076Spjd if (mapsize > 0) { 627204076Spjd map = malloc(mapsize); 628204076Spjd if (map == NULL) { 629204076Spjd pjdlog_error("Unable to allocate memory for remote activemap (mapsize=%ju).", 630204076Spjd (uintmax_t)mapsize); 631204076Spjd nv_free(nvin); 632204076Spjd goto close; 633204076Spjd } 634204076Spjd /* 635204076Spjd * Remote node have some dirty extents on its own, lets 636204076Spjd * download its activemap. 637204076Spjd */ 638205738Spjd if (hast_proto_recv_data(res, out, nvin, map, 639204076Spjd mapsize) < 0) { 640204076Spjd pjdlog_errno(LOG_ERR, 641204076Spjd "Unable to receive remote activemap"); 642204076Spjd nv_free(nvin); 643204076Spjd free(map); 644204076Spjd goto close; 645204076Spjd } 646204076Spjd /* 647204076Spjd * Merge local and remote bitmaps. 648204076Spjd */ 649204076Spjd activemap_merge(res->hr_amp, map, mapsize); 650204076Spjd free(map); 651204076Spjd /* 652204076Spjd * Now that we merged bitmaps from both nodes, flush it to the 653204076Spjd * disk before we start to synchronize. 654204076Spjd */ 655204076Spjd (void)hast_activemap_flush(res); 656204076Spjd } 657204076Spjd pjdlog_info("Connected to %s.", res->hr_remoteaddr); 658205738Spjd if (inp != NULL && outp != NULL) { 659205738Spjd *inp = in; 660205738Spjd *outp = out; 661205738Spjd } else { 662205738Spjd res->hr_remotein = in; 663205738Spjd res->hr_remoteout = out; 664205738Spjd } 665205738Spjd return (true); 666205738Spjdclose: 667205738Spjd proto_close(out); 668205738Spjd if (in != NULL) 669205738Spjd proto_close(in); 670205738Spjd return (false); 671205738Spjd} 672205738Spjd 673205738Spjdstatic void 674205738Spjdsync_start(void) 675205738Spjd{ 676205738Spjd 677204076Spjd mtx_lock(&sync_lock); 678204076Spjd sync_inprogress = true; 679204076Spjd mtx_unlock(&sync_lock); 680204076Spjd cv_signal(&sync_cond); 681204076Spjd} 682204076Spjd 683204076Spjdstatic void 684204076Spjdinit_ggate(struct hast_resource *res) 685204076Spjd{ 686204076Spjd struct g_gate_ctl_create ggiocreate; 687204076Spjd struct g_gate_ctl_cancel ggiocancel; 688204076Spjd 689204076Spjd /* 690204076Spjd * We communicate with ggate via /dev/ggctl. Open it. 691204076Spjd */ 692204076Spjd res->hr_ggatefd = open("/dev/" G_GATE_CTL_NAME, O_RDWR); 693204076Spjd if (res->hr_ggatefd < 0) 694204076Spjd primary_exit(EX_OSFILE, "Unable to open /dev/" G_GATE_CTL_NAME); 695204076Spjd /* 696204076Spjd * Create provider before trying to connect, as connection failure 697204076Spjd * is not critical, but may take some time. 698204076Spjd */ 699204076Spjd ggiocreate.gctl_version = G_GATE_VERSION; 700204076Spjd ggiocreate.gctl_mediasize = res->hr_datasize; 701204076Spjd ggiocreate.gctl_sectorsize = res->hr_local_sectorsize; 702204076Spjd ggiocreate.gctl_flags = 0; 703206669Spjd ggiocreate.gctl_maxcount = G_GATE_MAX_QUEUE_SIZE; 704204076Spjd ggiocreate.gctl_timeout = 0; 705204076Spjd ggiocreate.gctl_unit = G_GATE_NAME_GIVEN; 706204076Spjd snprintf(ggiocreate.gctl_name, sizeof(ggiocreate.gctl_name), "hast/%s", 707204076Spjd res->hr_provname); 708204076Spjd bzero(ggiocreate.gctl_info, sizeof(ggiocreate.gctl_info)); 709204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_CREATE, &ggiocreate) == 0) { 710204076Spjd pjdlog_info("Device hast/%s created.", res->hr_provname); 711204076Spjd res->hr_ggateunit = ggiocreate.gctl_unit; 712204076Spjd return; 713204076Spjd } 714204076Spjd if (errno != EEXIST) { 715204076Spjd primary_exit(EX_OSERR, "Unable to create hast/%s device", 716204076Spjd res->hr_provname); 717204076Spjd } 718204076Spjd pjdlog_debug(1, 719204076Spjd "Device hast/%s already exists, we will try to take it over.", 720204076Spjd res->hr_provname); 721204076Spjd /* 722204076Spjd * If we received EEXIST, we assume that the process who created the 723204076Spjd * provider died and didn't clean up. In that case we will start from 724204076Spjd * where he left of. 725204076Spjd */ 726204076Spjd ggiocancel.gctl_version = G_GATE_VERSION; 727204076Spjd ggiocancel.gctl_unit = G_GATE_NAME_GIVEN; 728204076Spjd snprintf(ggiocancel.gctl_name, sizeof(ggiocancel.gctl_name), "hast/%s", 729204076Spjd res->hr_provname); 730204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_CANCEL, &ggiocancel) == 0) { 731204076Spjd pjdlog_info("Device hast/%s recovered.", res->hr_provname); 732204076Spjd res->hr_ggateunit = ggiocancel.gctl_unit; 733204076Spjd return; 734204076Spjd } 735204076Spjd primary_exit(EX_OSERR, "Unable to take over hast/%s device", 736204076Spjd res->hr_provname); 737204076Spjd} 738204076Spjd 739204076Spjdvoid 740204076Spjdhastd_primary(struct hast_resource *res) 741204076Spjd{ 742204076Spjd pthread_t td; 743204076Spjd pid_t pid; 744204076Spjd int error; 745204076Spjd 746204076Spjd gres = res; 747204076Spjd 748204076Spjd /* 749204076Spjd * Create communication channel between parent and child. 750204076Spjd */ 751204076Spjd if (proto_client("socketpair://", &res->hr_ctrl) < 0) { 752204076Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 753204076Spjd primary_exit(EX_OSERR, 754204076Spjd "Unable to create control sockets between parent and child"); 755204076Spjd } 756204076Spjd 757204076Spjd pid = fork(); 758204076Spjd if (pid < 0) { 759204076Spjd KEEP_ERRNO((void)pidfile_remove(pfh)); 760207347Spjd primary_exit(EX_TEMPFAIL, "Unable to fork"); 761204076Spjd } 762204076Spjd 763204076Spjd if (pid > 0) { 764204076Spjd /* This is parent. */ 765204076Spjd res->hr_workerpid = pid; 766204076Spjd return; 767204076Spjd } 768204076Spjd (void)pidfile_close(pfh); 769204076Spjd 770204076Spjd setproctitle("%s (primary)", res->hr_name); 771204076Spjd 772210880Spjd signal(SIGHUP, SIG_DFL); 773210880Spjd signal(SIGCHLD, SIG_DFL); 774210880Spjd 775204076Spjd init_local(res); 776210881Spjd if (real_remote(res) && init_remote(res, NULL, NULL)) 777205738Spjd sync_start(); 778204076Spjd init_ggate(res); 779204076Spjd init_environment(res); 780204076Spjd error = pthread_create(&td, NULL, ggate_recv_thread, res); 781204076Spjd assert(error == 0); 782204076Spjd error = pthread_create(&td, NULL, local_send_thread, res); 783204076Spjd assert(error == 0); 784204076Spjd error = pthread_create(&td, NULL, remote_send_thread, res); 785204076Spjd assert(error == 0); 786204076Spjd error = pthread_create(&td, NULL, remote_recv_thread, res); 787204076Spjd assert(error == 0); 788204076Spjd error = pthread_create(&td, NULL, ggate_send_thread, res); 789204076Spjd assert(error == 0); 790204076Spjd error = pthread_create(&td, NULL, sync_thread, res); 791204076Spjd assert(error == 0); 792204076Spjd error = pthread_create(&td, NULL, ctrl_thread, res); 793204076Spjd assert(error == 0); 794204076Spjd (void)guard_thread(res); 795204076Spjd} 796204076Spjd 797204076Spjdstatic void 798204076Spjdreqlog(int loglevel, int debuglevel, struct g_gate_ctl_io *ggio, const char *fmt, ...) 799204076Spjd{ 800204076Spjd char msg[1024]; 801204076Spjd va_list ap; 802204076Spjd int len; 803204076Spjd 804204076Spjd va_start(ap, fmt); 805204076Spjd len = vsnprintf(msg, sizeof(msg), fmt, ap); 806204076Spjd va_end(ap); 807204076Spjd if ((size_t)len < sizeof(msg)) { 808204076Spjd switch (ggio->gctl_cmd) { 809204076Spjd case BIO_READ: 810204076Spjd (void)snprintf(msg + len, sizeof(msg) - len, 811204076Spjd "READ(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 812204076Spjd (uintmax_t)ggio->gctl_length); 813204076Spjd break; 814204076Spjd case BIO_DELETE: 815204076Spjd (void)snprintf(msg + len, sizeof(msg) - len, 816204076Spjd "DELETE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 817204076Spjd (uintmax_t)ggio->gctl_length); 818204076Spjd break; 819204076Spjd case BIO_FLUSH: 820204076Spjd (void)snprintf(msg + len, sizeof(msg) - len, "FLUSH."); 821204076Spjd break; 822204076Spjd case BIO_WRITE: 823204076Spjd (void)snprintf(msg + len, sizeof(msg) - len, 824204076Spjd "WRITE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 825204076Spjd (uintmax_t)ggio->gctl_length); 826204076Spjd break; 827204076Spjd default: 828204076Spjd (void)snprintf(msg + len, sizeof(msg) - len, 829204076Spjd "UNKNOWN(%u).", (unsigned int)ggio->gctl_cmd); 830204076Spjd break; 831204076Spjd } 832204076Spjd } 833204076Spjd pjdlog_common(loglevel, debuglevel, -1, "%s", msg); 834204076Spjd} 835204076Spjd 836204076Spjdstatic void 837204076Spjdremote_close(struct hast_resource *res, int ncomp) 838204076Spjd{ 839204076Spjd 840204076Spjd rw_wlock(&hio_remote_lock[ncomp]); 841204076Spjd /* 842204076Spjd * A race is possible between dropping rlock and acquiring wlock - 843204076Spjd * another thread can close connection in-between. 844204076Spjd */ 845204076Spjd if (!ISCONNECTED(res, ncomp)) { 846204076Spjd assert(res->hr_remotein == NULL); 847204076Spjd assert(res->hr_remoteout == NULL); 848204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 849204076Spjd return; 850204076Spjd } 851204076Spjd 852204076Spjd assert(res->hr_remotein != NULL); 853204076Spjd assert(res->hr_remoteout != NULL); 854204076Spjd 855204076Spjd pjdlog_debug(2, "Closing old incoming connection to %s.", 856204076Spjd res->hr_remoteaddr); 857204076Spjd proto_close(res->hr_remotein); 858204076Spjd res->hr_remotein = NULL; 859204076Spjd pjdlog_debug(2, "Closing old outgoing connection to %s.", 860204076Spjd res->hr_remoteaddr); 861204076Spjd proto_close(res->hr_remoteout); 862204076Spjd res->hr_remoteout = NULL; 863204076Spjd 864204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 865204076Spjd 866204076Spjd /* 867204076Spjd * Stop synchronization if in-progress. 868204076Spjd */ 869204076Spjd mtx_lock(&sync_lock); 870204076Spjd if (sync_inprogress) 871204076Spjd sync_inprogress = false; 872204076Spjd mtx_unlock(&sync_lock); 873204076Spjd 874204076Spjd /* 875204076Spjd * Wake up guard thread, so it can immediately start reconnect. 876204076Spjd */ 877204076Spjd mtx_lock(&hio_guard_lock); 878204076Spjd cv_signal(&hio_guard_cond); 879204076Spjd mtx_unlock(&hio_guard_lock); 880204076Spjd} 881204076Spjd 882204076Spjd/* 883204076Spjd * Thread receives ggate I/O requests from the kernel and passes them to 884204076Spjd * appropriate threads: 885204076Spjd * WRITE - always goes to both local_send and remote_send threads 886204076Spjd * READ (when the block is up-to-date on local component) - 887204076Spjd * only local_send thread 888204076Spjd * READ (when the block isn't up-to-date on local component) - 889204076Spjd * only remote_send thread 890204076Spjd * DELETE - always goes to both local_send and remote_send threads 891204076Spjd * FLUSH - always goes to both local_send and remote_send threads 892204076Spjd */ 893204076Spjdstatic void * 894204076Spjdggate_recv_thread(void *arg) 895204076Spjd{ 896204076Spjd struct hast_resource *res = arg; 897204076Spjd struct g_gate_ctl_io *ggio; 898204076Spjd struct hio *hio; 899204076Spjd unsigned int ii, ncomp, ncomps; 900204076Spjd int error; 901204076Spjd 902204076Spjd ncomps = HAST_NCOMPONENTS; 903204076Spjd 904204076Spjd for (;;) { 905204076Spjd pjdlog_debug(2, "ggate_recv: Taking free request."); 906204076Spjd QUEUE_TAKE2(hio, free); 907204076Spjd pjdlog_debug(2, "ggate_recv: (%p) Got free request.", hio); 908204076Spjd ggio = &hio->hio_ggio; 909204076Spjd ggio->gctl_unit = res->hr_ggateunit; 910204076Spjd ggio->gctl_length = MAXPHYS; 911204076Spjd ggio->gctl_error = 0; 912204076Spjd pjdlog_debug(2, 913204076Spjd "ggate_recv: (%p) Waiting for request from the kernel.", 914204076Spjd hio); 915204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_START, ggio) < 0) { 916204076Spjd if (sigexit_received) 917204076Spjd pthread_exit(NULL); 918204076Spjd primary_exit(EX_OSERR, "G_GATE_CMD_START failed"); 919204076Spjd } 920204076Spjd error = ggio->gctl_error; 921204076Spjd switch (error) { 922204076Spjd case 0: 923204076Spjd break; 924204076Spjd case ECANCELED: 925204076Spjd /* Exit gracefully. */ 926204076Spjd if (!sigexit_received) { 927204076Spjd pjdlog_debug(2, 928204076Spjd "ggate_recv: (%p) Received cancel from the kernel.", 929204076Spjd hio); 930204076Spjd pjdlog_info("Received cancel from the kernel, exiting."); 931204076Spjd } 932204076Spjd pthread_exit(NULL); 933204076Spjd case ENOMEM: 934204076Spjd /* 935204076Spjd * Buffer too small? Impossible, we allocate MAXPHYS 936204076Spjd * bytes - request can't be bigger than that. 937204076Spjd */ 938204076Spjd /* FALLTHROUGH */ 939204076Spjd case ENXIO: 940204076Spjd default: 941204076Spjd primary_exitx(EX_OSERR, "G_GATE_CMD_START failed: %s.", 942204076Spjd strerror(error)); 943204076Spjd } 944204076Spjd for (ii = 0; ii < ncomps; ii++) 945204076Spjd hio->hio_errors[ii] = EINVAL; 946204076Spjd reqlog(LOG_DEBUG, 2, ggio, 947204076Spjd "ggate_recv: (%p) Request received from the kernel: ", 948204076Spjd hio); 949204076Spjd /* 950204076Spjd * Inform all components about new write request. 951204076Spjd * For read request prefer local component unless the given 952204076Spjd * range is out-of-date, then use remote component. 953204076Spjd */ 954204076Spjd switch (ggio->gctl_cmd) { 955204076Spjd case BIO_READ: 956204076Spjd pjdlog_debug(2, 957204076Spjd "ggate_recv: (%p) Moving request to the send queue.", 958204076Spjd hio); 959204076Spjd refcount_init(&hio->hio_countdown, 1); 960204076Spjd mtx_lock(&metadata_lock); 961204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_UNDEF || 962204076Spjd res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 963204076Spjd /* 964204076Spjd * This range is up-to-date on local component, 965204076Spjd * so handle request locally. 966204076Spjd */ 967204076Spjd /* Local component is 0 for now. */ 968204076Spjd ncomp = 0; 969204076Spjd } else /* if (res->hr_syncsrc == 970204076Spjd HAST_SYNCSRC_SECONDARY) */ { 971204076Spjd assert(res->hr_syncsrc == 972204076Spjd HAST_SYNCSRC_SECONDARY); 973204076Spjd /* 974204076Spjd * This range is out-of-date on local component, 975204076Spjd * so send request to the remote node. 976204076Spjd */ 977204076Spjd /* Remote component is 1 for now. */ 978204076Spjd ncomp = 1; 979204076Spjd } 980204076Spjd mtx_unlock(&metadata_lock); 981204076Spjd QUEUE_INSERT1(hio, send, ncomp); 982204076Spjd break; 983204076Spjd case BIO_WRITE: 984204076Spjd for (;;) { 985204076Spjd mtx_lock(&range_lock); 986204076Spjd if (rangelock_islocked(range_sync, 987204076Spjd ggio->gctl_offset, ggio->gctl_length)) { 988204076Spjd pjdlog_debug(2, 989204076Spjd "regular: Range offset=%jd length=%zu locked.", 990204076Spjd (intmax_t)ggio->gctl_offset, 991204076Spjd (size_t)ggio->gctl_length); 992204076Spjd range_regular_wait = true; 993204076Spjd cv_wait(&range_regular_cond, &range_lock); 994204076Spjd range_regular_wait = false; 995204076Spjd mtx_unlock(&range_lock); 996204076Spjd continue; 997204076Spjd } 998204076Spjd if (rangelock_add(range_regular, 999204076Spjd ggio->gctl_offset, ggio->gctl_length) < 0) { 1000204076Spjd mtx_unlock(&range_lock); 1001204076Spjd pjdlog_debug(2, 1002204076Spjd "regular: Range offset=%jd length=%zu is already locked, waiting.", 1003204076Spjd (intmax_t)ggio->gctl_offset, 1004204076Spjd (size_t)ggio->gctl_length); 1005204076Spjd sleep(1); 1006204076Spjd continue; 1007204076Spjd } 1008204076Spjd mtx_unlock(&range_lock); 1009204076Spjd break; 1010204076Spjd } 1011204076Spjd mtx_lock(&res->hr_amp_lock); 1012204076Spjd if (activemap_write_start(res->hr_amp, 1013204076Spjd ggio->gctl_offset, ggio->gctl_length)) { 1014204076Spjd (void)hast_activemap_flush(res); 1015204076Spjd } 1016204076Spjd mtx_unlock(&res->hr_amp_lock); 1017204076Spjd /* FALLTHROUGH */ 1018204076Spjd case BIO_DELETE: 1019204076Spjd case BIO_FLUSH: 1020204076Spjd pjdlog_debug(2, 1021204076Spjd "ggate_recv: (%p) Moving request to the send queues.", 1022204076Spjd hio); 1023204076Spjd refcount_init(&hio->hio_countdown, ncomps); 1024204076Spjd for (ii = 0; ii < ncomps; ii++) 1025204076Spjd QUEUE_INSERT1(hio, send, ii); 1026204076Spjd break; 1027204076Spjd } 1028204076Spjd } 1029204076Spjd /* NOTREACHED */ 1030204076Spjd return (NULL); 1031204076Spjd} 1032204076Spjd 1033204076Spjd/* 1034204076Spjd * Thread reads from or writes to local component. 1035204076Spjd * If local read fails, it redirects it to remote_send thread. 1036204076Spjd */ 1037204076Spjdstatic void * 1038204076Spjdlocal_send_thread(void *arg) 1039204076Spjd{ 1040204076Spjd struct hast_resource *res = arg; 1041204076Spjd struct g_gate_ctl_io *ggio; 1042204076Spjd struct hio *hio; 1043204076Spjd unsigned int ncomp, rncomp; 1044204076Spjd ssize_t ret; 1045204076Spjd 1046204076Spjd /* Local component is 0 for now. */ 1047204076Spjd ncomp = 0; 1048204076Spjd /* Remote component is 1 for now. */ 1049204076Spjd rncomp = 1; 1050204076Spjd 1051204076Spjd for (;;) { 1052204076Spjd pjdlog_debug(2, "local_send: Taking request."); 1053204076Spjd QUEUE_TAKE1(hio, send, ncomp); 1054204076Spjd pjdlog_debug(2, "local_send: (%p) Got request.", hio); 1055204076Spjd ggio = &hio->hio_ggio; 1056204076Spjd switch (ggio->gctl_cmd) { 1057204076Spjd case BIO_READ: 1058204076Spjd ret = pread(res->hr_localfd, ggio->gctl_data, 1059204076Spjd ggio->gctl_length, 1060204076Spjd ggio->gctl_offset + res->hr_localoff); 1061204076Spjd if (ret == ggio->gctl_length) 1062204076Spjd hio->hio_errors[ncomp] = 0; 1063204076Spjd else { 1064204076Spjd /* 1065204076Spjd * If READ failed, try to read from remote node. 1066204076Spjd */ 1067204076Spjd QUEUE_INSERT1(hio, send, rncomp); 1068204076Spjd continue; 1069204076Spjd } 1070204076Spjd break; 1071204076Spjd case BIO_WRITE: 1072204076Spjd ret = pwrite(res->hr_localfd, ggio->gctl_data, 1073204076Spjd ggio->gctl_length, 1074204076Spjd ggio->gctl_offset + res->hr_localoff); 1075204076Spjd if (ret < 0) 1076204076Spjd hio->hio_errors[ncomp] = errno; 1077204076Spjd else if (ret != ggio->gctl_length) 1078204076Spjd hio->hio_errors[ncomp] = EIO; 1079204076Spjd else 1080204076Spjd hio->hio_errors[ncomp] = 0; 1081204076Spjd break; 1082204076Spjd case BIO_DELETE: 1083204076Spjd ret = g_delete(res->hr_localfd, 1084204076Spjd ggio->gctl_offset + res->hr_localoff, 1085204076Spjd ggio->gctl_length); 1086204076Spjd if (ret < 0) 1087204076Spjd hio->hio_errors[ncomp] = errno; 1088204076Spjd else 1089204076Spjd hio->hio_errors[ncomp] = 0; 1090204076Spjd break; 1091204076Spjd case BIO_FLUSH: 1092204076Spjd ret = g_flush(res->hr_localfd); 1093204076Spjd if (ret < 0) 1094204076Spjd hio->hio_errors[ncomp] = errno; 1095204076Spjd else 1096204076Spjd hio->hio_errors[ncomp] = 0; 1097204076Spjd break; 1098204076Spjd } 1099204076Spjd if (refcount_release(&hio->hio_countdown)) { 1100204076Spjd if (ISSYNCREQ(hio)) { 1101204076Spjd mtx_lock(&sync_lock); 1102204076Spjd SYNCREQDONE(hio); 1103204076Spjd mtx_unlock(&sync_lock); 1104204076Spjd cv_signal(&sync_cond); 1105204076Spjd } else { 1106204076Spjd pjdlog_debug(2, 1107204076Spjd "local_send: (%p) Moving request to the done queue.", 1108204076Spjd hio); 1109204076Spjd QUEUE_INSERT2(hio, done); 1110204076Spjd } 1111204076Spjd } 1112204076Spjd } 1113204076Spjd /* NOTREACHED */ 1114204076Spjd return (NULL); 1115204076Spjd} 1116204076Spjd 1117204076Spjd/* 1118204076Spjd * Thread sends request to secondary node. 1119204076Spjd */ 1120204076Spjdstatic void * 1121204076Spjdremote_send_thread(void *arg) 1122204076Spjd{ 1123204076Spjd struct hast_resource *res = arg; 1124204076Spjd struct g_gate_ctl_io *ggio; 1125204076Spjd struct hio *hio; 1126204076Spjd struct nv *nv; 1127204076Spjd unsigned int ncomp; 1128204076Spjd bool wakeup; 1129204076Spjd uint64_t offset, length; 1130204076Spjd uint8_t cmd; 1131204076Spjd void *data; 1132204076Spjd 1133204076Spjd /* Remote component is 1 for now. */ 1134204076Spjd ncomp = 1; 1135204076Spjd 1136204076Spjd for (;;) { 1137204076Spjd pjdlog_debug(2, "remote_send: Taking request."); 1138204076Spjd QUEUE_TAKE1(hio, send, ncomp); 1139204076Spjd pjdlog_debug(2, "remote_send: (%p) Got request.", hio); 1140204076Spjd ggio = &hio->hio_ggio; 1141204076Spjd switch (ggio->gctl_cmd) { 1142204076Spjd case BIO_READ: 1143204076Spjd cmd = HIO_READ; 1144204076Spjd data = NULL; 1145204076Spjd offset = ggio->gctl_offset; 1146204076Spjd length = ggio->gctl_length; 1147204076Spjd break; 1148204076Spjd case BIO_WRITE: 1149204076Spjd cmd = HIO_WRITE; 1150204076Spjd data = ggio->gctl_data; 1151204076Spjd offset = ggio->gctl_offset; 1152204076Spjd length = ggio->gctl_length; 1153204076Spjd break; 1154204076Spjd case BIO_DELETE: 1155204076Spjd cmd = HIO_DELETE; 1156204076Spjd data = NULL; 1157204076Spjd offset = ggio->gctl_offset; 1158204076Spjd length = ggio->gctl_length; 1159204076Spjd break; 1160204076Spjd case BIO_FLUSH: 1161204076Spjd cmd = HIO_FLUSH; 1162204076Spjd data = NULL; 1163204076Spjd offset = 0; 1164204076Spjd length = 0; 1165204076Spjd break; 1166204076Spjd default: 1167204076Spjd assert(!"invalid condition"); 1168204076Spjd abort(); 1169204076Spjd } 1170204076Spjd nv = nv_alloc(); 1171204076Spjd nv_add_uint8(nv, cmd, "cmd"); 1172204076Spjd nv_add_uint64(nv, (uint64_t)ggio->gctl_seq, "seq"); 1173204076Spjd nv_add_uint64(nv, offset, "offset"); 1174204076Spjd nv_add_uint64(nv, length, "length"); 1175204076Spjd if (nv_error(nv) != 0) { 1176204076Spjd hio->hio_errors[ncomp] = nv_error(nv); 1177204076Spjd pjdlog_debug(2, 1178204076Spjd "remote_send: (%p) Unable to prepare header to send.", 1179204076Spjd hio); 1180204076Spjd reqlog(LOG_ERR, 0, ggio, 1181204076Spjd "Unable to prepare header to send (%s): ", 1182204076Spjd strerror(nv_error(nv))); 1183204076Spjd /* Move failed request immediately to the done queue. */ 1184204076Spjd goto done_queue; 1185204076Spjd } 1186204076Spjd pjdlog_debug(2, 1187204076Spjd "remote_send: (%p) Moving request to the recv queue.", 1188204076Spjd hio); 1189204076Spjd /* 1190204076Spjd * Protect connection from disappearing. 1191204076Spjd */ 1192204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1193204076Spjd if (!ISCONNECTED(res, ncomp)) { 1194204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1195204076Spjd hio->hio_errors[ncomp] = ENOTCONN; 1196204076Spjd goto done_queue; 1197204076Spjd } 1198204076Spjd /* 1199204076Spjd * Move the request to recv queue before sending it, because 1200204076Spjd * in different order we can get reply before we move request 1201204076Spjd * to recv queue. 1202204076Spjd */ 1203204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1204204076Spjd wakeup = TAILQ_EMPTY(&hio_recv_list[ncomp]); 1205204076Spjd TAILQ_INSERT_TAIL(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1206204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1207204076Spjd if (hast_proto_send(res, res->hr_remoteout, nv, data, 1208204076Spjd data != NULL ? length : 0) < 0) { 1209204076Spjd hio->hio_errors[ncomp] = errno; 1210204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1211204076Spjd remote_close(res, ncomp); 1212204076Spjd pjdlog_debug(2, 1213204076Spjd "remote_send: (%p) Unable to send request.", hio); 1214204076Spjd reqlog(LOG_ERR, 0, ggio, 1215204076Spjd "Unable to send request (%s): ", 1216204076Spjd strerror(hio->hio_errors[ncomp])); 1217204076Spjd /* 1218204076Spjd * Take request back from the receive queue and move 1219204076Spjd * it immediately to the done queue. 1220204076Spjd */ 1221204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1222204076Spjd TAILQ_REMOVE(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1223204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1224204076Spjd goto done_queue; 1225204076Spjd } 1226204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1227204076Spjd nv_free(nv); 1228204076Spjd if (wakeup) 1229204076Spjd cv_signal(&hio_recv_list_cond[ncomp]); 1230204076Spjd continue; 1231204076Spjddone_queue: 1232204076Spjd nv_free(nv); 1233204076Spjd if (ISSYNCREQ(hio)) { 1234204076Spjd if (!refcount_release(&hio->hio_countdown)) 1235204076Spjd continue; 1236204076Spjd mtx_lock(&sync_lock); 1237204076Spjd SYNCREQDONE(hio); 1238204076Spjd mtx_unlock(&sync_lock); 1239204076Spjd cv_signal(&sync_cond); 1240204076Spjd continue; 1241204076Spjd } 1242204076Spjd if (ggio->gctl_cmd == BIO_WRITE) { 1243204076Spjd mtx_lock(&res->hr_amp_lock); 1244204076Spjd if (activemap_need_sync(res->hr_amp, ggio->gctl_offset, 1245204076Spjd ggio->gctl_length)) { 1246204076Spjd (void)hast_activemap_flush(res); 1247204076Spjd } 1248204076Spjd mtx_unlock(&res->hr_amp_lock); 1249204076Spjd } 1250204076Spjd if (!refcount_release(&hio->hio_countdown)) 1251204076Spjd continue; 1252204076Spjd pjdlog_debug(2, 1253204076Spjd "remote_send: (%p) Moving request to the done queue.", 1254204076Spjd hio); 1255204076Spjd QUEUE_INSERT2(hio, done); 1256204076Spjd } 1257204076Spjd /* NOTREACHED */ 1258204076Spjd return (NULL); 1259204076Spjd} 1260204076Spjd 1261204076Spjd/* 1262204076Spjd * Thread receives answer from secondary node and passes it to ggate_send 1263204076Spjd * thread. 1264204076Spjd */ 1265204076Spjdstatic void * 1266204076Spjdremote_recv_thread(void *arg) 1267204076Spjd{ 1268204076Spjd struct hast_resource *res = arg; 1269204076Spjd struct g_gate_ctl_io *ggio; 1270204076Spjd struct hio *hio; 1271204076Spjd struct nv *nv; 1272204076Spjd unsigned int ncomp; 1273204076Spjd uint64_t seq; 1274204076Spjd int error; 1275204076Spjd 1276204076Spjd /* Remote component is 1 for now. */ 1277204076Spjd ncomp = 1; 1278204076Spjd 1279204076Spjd for (;;) { 1280204076Spjd /* Wait until there is anything to receive. */ 1281204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1282204076Spjd while (TAILQ_EMPTY(&hio_recv_list[ncomp])) { 1283204076Spjd pjdlog_debug(2, "remote_recv: No requests, waiting."); 1284204076Spjd cv_wait(&hio_recv_list_cond[ncomp], 1285204076Spjd &hio_recv_list_lock[ncomp]); 1286204076Spjd } 1287204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1288204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1289204076Spjd if (!ISCONNECTED(res, ncomp)) { 1290204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1291204076Spjd /* 1292204076Spjd * Connection is dead, so move all pending requests to 1293204076Spjd * the done queue (one-by-one). 1294204076Spjd */ 1295204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1296204076Spjd hio = TAILQ_FIRST(&hio_recv_list[ncomp]); 1297204076Spjd assert(hio != NULL); 1298204076Spjd TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1299204076Spjd hio_next[ncomp]); 1300204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1301204076Spjd goto done_queue; 1302204076Spjd } 1303204076Spjd if (hast_proto_recv_hdr(res->hr_remotein, &nv) < 0) { 1304204076Spjd pjdlog_errno(LOG_ERR, 1305204076Spjd "Unable to receive reply header"); 1306204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1307204076Spjd remote_close(res, ncomp); 1308204076Spjd continue; 1309204076Spjd } 1310204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1311204076Spjd seq = nv_get_uint64(nv, "seq"); 1312204076Spjd if (seq == 0) { 1313204076Spjd pjdlog_error("Header contains no 'seq' field."); 1314204076Spjd nv_free(nv); 1315204076Spjd continue; 1316204076Spjd } 1317204076Spjd mtx_lock(&hio_recv_list_lock[ncomp]); 1318204076Spjd TAILQ_FOREACH(hio, &hio_recv_list[ncomp], hio_next[ncomp]) { 1319204076Spjd if (hio->hio_ggio.gctl_seq == seq) { 1320204076Spjd TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1321204076Spjd hio_next[ncomp]); 1322204076Spjd break; 1323204076Spjd } 1324204076Spjd } 1325204076Spjd mtx_unlock(&hio_recv_list_lock[ncomp]); 1326204076Spjd if (hio == NULL) { 1327204076Spjd pjdlog_error("Found no request matching received 'seq' field (%ju).", 1328204076Spjd (uintmax_t)seq); 1329204076Spjd nv_free(nv); 1330204076Spjd continue; 1331204076Spjd } 1332204076Spjd error = nv_get_int16(nv, "error"); 1333204076Spjd if (error != 0) { 1334204076Spjd /* Request failed on remote side. */ 1335204076Spjd hio->hio_errors[ncomp] = 0; 1336204076Spjd nv_free(nv); 1337204076Spjd goto done_queue; 1338204076Spjd } 1339204076Spjd ggio = &hio->hio_ggio; 1340204076Spjd switch (ggio->gctl_cmd) { 1341204076Spjd case BIO_READ: 1342204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1343204076Spjd if (!ISCONNECTED(res, ncomp)) { 1344204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1345204076Spjd nv_free(nv); 1346204076Spjd goto done_queue; 1347204076Spjd } 1348204076Spjd if (hast_proto_recv_data(res, res->hr_remotein, nv, 1349204076Spjd ggio->gctl_data, ggio->gctl_length) < 0) { 1350204076Spjd hio->hio_errors[ncomp] = errno; 1351204076Spjd pjdlog_errno(LOG_ERR, 1352204076Spjd "Unable to receive reply data"); 1353204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1354204076Spjd nv_free(nv); 1355204076Spjd remote_close(res, ncomp); 1356204076Spjd goto done_queue; 1357204076Spjd } 1358204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1359204076Spjd break; 1360204076Spjd case BIO_WRITE: 1361204076Spjd case BIO_DELETE: 1362204076Spjd case BIO_FLUSH: 1363204076Spjd break; 1364204076Spjd default: 1365204076Spjd assert(!"invalid condition"); 1366204076Spjd abort(); 1367204076Spjd } 1368204076Spjd hio->hio_errors[ncomp] = 0; 1369204076Spjd nv_free(nv); 1370204076Spjddone_queue: 1371204076Spjd if (refcount_release(&hio->hio_countdown)) { 1372204076Spjd if (ISSYNCREQ(hio)) { 1373204076Spjd mtx_lock(&sync_lock); 1374204076Spjd SYNCREQDONE(hio); 1375204076Spjd mtx_unlock(&sync_lock); 1376204076Spjd cv_signal(&sync_cond); 1377204076Spjd } else { 1378204076Spjd pjdlog_debug(2, 1379204076Spjd "remote_recv: (%p) Moving request to the done queue.", 1380204076Spjd hio); 1381204076Spjd QUEUE_INSERT2(hio, done); 1382204076Spjd } 1383204076Spjd } 1384204076Spjd } 1385204076Spjd /* NOTREACHED */ 1386204076Spjd return (NULL); 1387204076Spjd} 1388204076Spjd 1389204076Spjd/* 1390204076Spjd * Thread sends answer to the kernel. 1391204076Spjd */ 1392204076Spjdstatic void * 1393204076Spjdggate_send_thread(void *arg) 1394204076Spjd{ 1395204076Spjd struct hast_resource *res = arg; 1396204076Spjd struct g_gate_ctl_io *ggio; 1397204076Spjd struct hio *hio; 1398204076Spjd unsigned int ii, ncomp, ncomps; 1399204076Spjd 1400204076Spjd ncomps = HAST_NCOMPONENTS; 1401204076Spjd 1402204076Spjd for (;;) { 1403204076Spjd pjdlog_debug(2, "ggate_send: Taking request."); 1404204076Spjd QUEUE_TAKE2(hio, done); 1405204076Spjd pjdlog_debug(2, "ggate_send: (%p) Got request.", hio); 1406204076Spjd ggio = &hio->hio_ggio; 1407204076Spjd for (ii = 0; ii < ncomps; ii++) { 1408204076Spjd if (hio->hio_errors[ii] == 0) { 1409204076Spjd /* 1410204076Spjd * One successful request is enough to declare 1411204076Spjd * success. 1412204076Spjd */ 1413204076Spjd ggio->gctl_error = 0; 1414204076Spjd break; 1415204076Spjd } 1416204076Spjd } 1417204076Spjd if (ii == ncomps) { 1418204076Spjd /* 1419204076Spjd * None of the requests were successful. 1420204076Spjd * Use first error. 1421204076Spjd */ 1422204076Spjd ggio->gctl_error = hio->hio_errors[0]; 1423204076Spjd } 1424204076Spjd if (ggio->gctl_error == 0 && ggio->gctl_cmd == BIO_WRITE) { 1425204076Spjd mtx_lock(&res->hr_amp_lock); 1426204076Spjd activemap_write_complete(res->hr_amp, 1427204076Spjd ggio->gctl_offset, ggio->gctl_length); 1428204076Spjd mtx_unlock(&res->hr_amp_lock); 1429204076Spjd } 1430204076Spjd if (ggio->gctl_cmd == BIO_WRITE) { 1431204076Spjd /* 1432204076Spjd * Unlock range we locked. 1433204076Spjd */ 1434204076Spjd mtx_lock(&range_lock); 1435204076Spjd rangelock_del(range_regular, ggio->gctl_offset, 1436204076Spjd ggio->gctl_length); 1437204076Spjd if (range_sync_wait) 1438204076Spjd cv_signal(&range_sync_cond); 1439204076Spjd mtx_unlock(&range_lock); 1440204076Spjd /* 1441204076Spjd * Bump local count if this is first write after 1442204076Spjd * connection failure with remote node. 1443204076Spjd */ 1444204076Spjd ncomp = 1; 1445204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1446204076Spjd if (!ISCONNECTED(res, ncomp)) { 1447204076Spjd mtx_lock(&metadata_lock); 1448204076Spjd if (res->hr_primary_localcnt == 1449204076Spjd res->hr_secondary_remotecnt) { 1450204076Spjd res->hr_primary_localcnt++; 1451204076Spjd pjdlog_debug(1, 1452204076Spjd "Increasing localcnt to %ju.", 1453204076Spjd (uintmax_t)res->hr_primary_localcnt); 1454204076Spjd (void)metadata_write(res); 1455204076Spjd } 1456204076Spjd mtx_unlock(&metadata_lock); 1457204076Spjd } 1458204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1459204076Spjd } 1460204076Spjd if (ioctl(res->hr_ggatefd, G_GATE_CMD_DONE, ggio) < 0) 1461204076Spjd primary_exit(EX_OSERR, "G_GATE_CMD_DONE failed"); 1462204076Spjd pjdlog_debug(2, 1463204076Spjd "ggate_send: (%p) Moving request to the free queue.", hio); 1464204076Spjd QUEUE_INSERT2(hio, free); 1465204076Spjd } 1466204076Spjd /* NOTREACHED */ 1467204076Spjd return (NULL); 1468204076Spjd} 1469204076Spjd 1470204076Spjd/* 1471204076Spjd * Thread synchronize local and remote components. 1472204076Spjd */ 1473204076Spjdstatic void * 1474204076Spjdsync_thread(void *arg __unused) 1475204076Spjd{ 1476204076Spjd struct hast_resource *res = arg; 1477204076Spjd struct hio *hio; 1478204076Spjd struct g_gate_ctl_io *ggio; 1479204076Spjd unsigned int ii, ncomp, ncomps; 1480204076Spjd off_t offset, length, synced; 1481204076Spjd bool dorewind; 1482204076Spjd int syncext; 1483204076Spjd 1484204076Spjd ncomps = HAST_NCOMPONENTS; 1485204076Spjd dorewind = true; 1486204076Spjd synced = 0; 1487204076Spjd 1488204076Spjd for (;;) { 1489204076Spjd mtx_lock(&sync_lock); 1490204076Spjd while (!sync_inprogress) { 1491204076Spjd dorewind = true; 1492204076Spjd synced = 0; 1493204076Spjd cv_wait(&sync_cond, &sync_lock); 1494204076Spjd } 1495204076Spjd mtx_unlock(&sync_lock); 1496204076Spjd /* 1497204076Spjd * Obtain offset at which we should synchronize. 1498204076Spjd * Rewind synchronization if needed. 1499204076Spjd */ 1500204076Spjd mtx_lock(&res->hr_amp_lock); 1501204076Spjd if (dorewind) 1502204076Spjd activemap_sync_rewind(res->hr_amp); 1503204076Spjd offset = activemap_sync_offset(res->hr_amp, &length, &syncext); 1504204076Spjd if (syncext != -1) { 1505204076Spjd /* 1506204076Spjd * We synchronized entire syncext extent, we can mark 1507204076Spjd * it as clean now. 1508204076Spjd */ 1509204076Spjd if (activemap_extent_complete(res->hr_amp, syncext)) 1510204076Spjd (void)hast_activemap_flush(res); 1511204076Spjd } 1512204076Spjd mtx_unlock(&res->hr_amp_lock); 1513204076Spjd if (dorewind) { 1514204076Spjd dorewind = false; 1515204076Spjd if (offset < 0) 1516204076Spjd pjdlog_info("Nodes are in sync."); 1517204076Spjd else { 1518204076Spjd pjdlog_info("Synchronization started. %ju bytes to go.", 1519204076Spjd (uintmax_t)(res->hr_extentsize * 1520204076Spjd activemap_ndirty(res->hr_amp))); 1521204076Spjd } 1522204076Spjd } 1523204076Spjd if (offset < 0) { 1524204076Spjd mtx_lock(&sync_lock); 1525204076Spjd sync_inprogress = false; 1526204076Spjd mtx_unlock(&sync_lock); 1527204076Spjd pjdlog_debug(1, "Nothing to synchronize."); 1528204076Spjd /* 1529204076Spjd * Synchronization complete, make both localcnt and 1530204076Spjd * remotecnt equal. 1531204076Spjd */ 1532204076Spjd ncomp = 1; 1533204076Spjd rw_rlock(&hio_remote_lock[ncomp]); 1534204076Spjd if (ISCONNECTED(res, ncomp)) { 1535204076Spjd if (synced > 0) { 1536204076Spjd pjdlog_info("Synchronization complete. " 1537204076Spjd "%jd bytes synchronized.", 1538204076Spjd (intmax_t)synced); 1539204076Spjd } 1540204076Spjd mtx_lock(&metadata_lock); 1541204076Spjd res->hr_syncsrc = HAST_SYNCSRC_UNDEF; 1542204076Spjd res->hr_primary_localcnt = 1543204076Spjd res->hr_secondary_localcnt; 1544204076Spjd res->hr_primary_remotecnt = 1545204076Spjd res->hr_secondary_remotecnt; 1546204076Spjd pjdlog_debug(1, 1547204076Spjd "Setting localcnt to %ju and remotecnt to %ju.", 1548204076Spjd (uintmax_t)res->hr_primary_localcnt, 1549204076Spjd (uintmax_t)res->hr_secondary_localcnt); 1550204076Spjd (void)metadata_write(res); 1551204076Spjd mtx_unlock(&metadata_lock); 1552204076Spjd } else if (synced > 0) { 1553204076Spjd pjdlog_info("Synchronization interrupted. " 1554204076Spjd "%jd bytes synchronized so far.", 1555204076Spjd (intmax_t)synced); 1556204076Spjd } 1557204076Spjd rw_unlock(&hio_remote_lock[ncomp]); 1558204076Spjd continue; 1559204076Spjd } 1560204076Spjd pjdlog_debug(2, "sync: Taking free request."); 1561204076Spjd QUEUE_TAKE2(hio, free); 1562204076Spjd pjdlog_debug(2, "sync: (%p) Got free request.", hio); 1563204076Spjd /* 1564204076Spjd * Lock the range we are going to synchronize. We don't want 1565204076Spjd * race where someone writes between our read and write. 1566204076Spjd */ 1567204076Spjd for (;;) { 1568204076Spjd mtx_lock(&range_lock); 1569204076Spjd if (rangelock_islocked(range_regular, offset, length)) { 1570204076Spjd pjdlog_debug(2, 1571204076Spjd "sync: Range offset=%jd length=%jd locked.", 1572204076Spjd (intmax_t)offset, (intmax_t)length); 1573204076Spjd range_sync_wait = true; 1574204076Spjd cv_wait(&range_sync_cond, &range_lock); 1575204076Spjd range_sync_wait = false; 1576204076Spjd mtx_unlock(&range_lock); 1577204076Spjd continue; 1578204076Spjd } 1579204076Spjd if (rangelock_add(range_sync, offset, length) < 0) { 1580204076Spjd mtx_unlock(&range_lock); 1581204076Spjd pjdlog_debug(2, 1582204076Spjd "sync: Range offset=%jd length=%jd is already locked, waiting.", 1583204076Spjd (intmax_t)offset, (intmax_t)length); 1584204076Spjd sleep(1); 1585204076Spjd continue; 1586204076Spjd } 1587204076Spjd mtx_unlock(&range_lock); 1588204076Spjd break; 1589204076Spjd } 1590204076Spjd /* 1591204076Spjd * First read the data from synchronization source. 1592204076Spjd */ 1593204076Spjd SYNCREQ(hio); 1594204076Spjd ggio = &hio->hio_ggio; 1595204076Spjd ggio->gctl_cmd = BIO_READ; 1596204076Spjd ggio->gctl_offset = offset; 1597204076Spjd ggio->gctl_length = length; 1598204076Spjd ggio->gctl_error = 0; 1599204076Spjd for (ii = 0; ii < ncomps; ii++) 1600204076Spjd hio->hio_errors[ii] = EINVAL; 1601204076Spjd reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 1602204076Spjd hio); 1603204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 1604204076Spjd hio); 1605204076Spjd mtx_lock(&metadata_lock); 1606204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1607204076Spjd /* 1608204076Spjd * This range is up-to-date on local component, 1609204076Spjd * so handle request locally. 1610204076Spjd */ 1611204076Spjd /* Local component is 0 for now. */ 1612204076Spjd ncomp = 0; 1613204076Spjd } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 1614204076Spjd assert(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 1615204076Spjd /* 1616204076Spjd * This range is out-of-date on local component, 1617204076Spjd * so send request to the remote node. 1618204076Spjd */ 1619204076Spjd /* Remote component is 1 for now. */ 1620204076Spjd ncomp = 1; 1621204076Spjd } 1622204076Spjd mtx_unlock(&metadata_lock); 1623204076Spjd refcount_init(&hio->hio_countdown, 1); 1624204076Spjd QUEUE_INSERT1(hio, send, ncomp); 1625204076Spjd 1626204076Spjd /* 1627204076Spjd * Let's wait for READ to finish. 1628204076Spjd */ 1629204076Spjd mtx_lock(&sync_lock); 1630204076Spjd while (!ISSYNCREQDONE(hio)) 1631204076Spjd cv_wait(&sync_cond, &sync_lock); 1632204076Spjd mtx_unlock(&sync_lock); 1633204076Spjd 1634204076Spjd if (hio->hio_errors[ncomp] != 0) { 1635204076Spjd pjdlog_error("Unable to read synchronization data: %s.", 1636204076Spjd strerror(hio->hio_errors[ncomp])); 1637204076Spjd goto free_queue; 1638204076Spjd } 1639204076Spjd 1640204076Spjd /* 1641204076Spjd * We read the data from synchronization source, now write it 1642204076Spjd * to synchronization target. 1643204076Spjd */ 1644204076Spjd SYNCREQ(hio); 1645204076Spjd ggio->gctl_cmd = BIO_WRITE; 1646204076Spjd for (ii = 0; ii < ncomps; ii++) 1647204076Spjd hio->hio_errors[ii] = EINVAL; 1648204076Spjd reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 1649204076Spjd hio); 1650204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 1651204076Spjd hio); 1652204076Spjd mtx_lock(&metadata_lock); 1653204076Spjd if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1654204076Spjd /* 1655204076Spjd * This range is up-to-date on local component, 1656204076Spjd * so we update remote component. 1657204076Spjd */ 1658204076Spjd /* Remote component is 1 for now. */ 1659204076Spjd ncomp = 1; 1660204076Spjd } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 1661204076Spjd assert(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 1662204076Spjd /* 1663204076Spjd * This range is out-of-date on local component, 1664204076Spjd * so we update it. 1665204076Spjd */ 1666204076Spjd /* Local component is 0 for now. */ 1667204076Spjd ncomp = 0; 1668204076Spjd } 1669204076Spjd mtx_unlock(&metadata_lock); 1670204076Spjd 1671204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the send queues.", 1672204076Spjd hio); 1673204076Spjd refcount_init(&hio->hio_countdown, 1); 1674204076Spjd QUEUE_INSERT1(hio, send, ncomp); 1675204076Spjd 1676204076Spjd /* 1677204076Spjd * Let's wait for WRITE to finish. 1678204076Spjd */ 1679204076Spjd mtx_lock(&sync_lock); 1680204076Spjd while (!ISSYNCREQDONE(hio)) 1681204076Spjd cv_wait(&sync_cond, &sync_lock); 1682204076Spjd mtx_unlock(&sync_lock); 1683204076Spjd 1684204076Spjd if (hio->hio_errors[ncomp] != 0) { 1685204076Spjd pjdlog_error("Unable to write synchronization data: %s.", 1686204076Spjd strerror(hio->hio_errors[ncomp])); 1687204076Spjd goto free_queue; 1688204076Spjd } 1689204076Spjdfree_queue: 1690204076Spjd mtx_lock(&range_lock); 1691204076Spjd rangelock_del(range_sync, offset, length); 1692204076Spjd if (range_regular_wait) 1693204076Spjd cv_signal(&range_regular_cond); 1694204076Spjd mtx_unlock(&range_lock); 1695204076Spjd 1696204076Spjd synced += length; 1697204076Spjd 1698204076Spjd pjdlog_debug(2, "sync: (%p) Moving request to the free queue.", 1699204076Spjd hio); 1700204076Spjd QUEUE_INSERT2(hio, free); 1701204076Spjd } 1702204076Spjd /* NOTREACHED */ 1703204076Spjd return (NULL); 1704204076Spjd} 1705204076Spjd 1706204076Spjdstatic void 1707204076Spjdsighandler(int sig) 1708204076Spjd{ 1709204076Spjd bool unlock; 1710204076Spjd 1711204076Spjd switch (sig) { 1712204076Spjd case SIGINT: 1713204076Spjd case SIGTERM: 1714204076Spjd sigexit_received = true; 1715204076Spjd break; 1716204076Spjd default: 1717204076Spjd assert(!"invalid condition"); 1718204076Spjd } 1719204076Spjd /* 1720204076Spjd * XXX: Racy, but if we cannot obtain hio_guard_lock here, we don't 1721204076Spjd * want to risk deadlock. 1722204076Spjd */ 1723204076Spjd unlock = mtx_trylock(&hio_guard_lock); 1724204076Spjd cv_signal(&hio_guard_cond); 1725204076Spjd if (unlock) 1726204076Spjd mtx_unlock(&hio_guard_lock); 1727204076Spjd} 1728204076Spjd 1729204076Spjd/* 1730204076Spjd * Thread guards remote connections and reconnects when needed, handles 1731204076Spjd * signals, etc. 1732204076Spjd */ 1733204076Spjdstatic void * 1734204076Spjdguard_thread(void *arg) 1735204076Spjd{ 1736204076Spjd struct hast_resource *res = arg; 1737205738Spjd struct proto_conn *in, *out; 1738204076Spjd unsigned int ii, ncomps; 1739204076Spjd int timeout; 1740204076Spjd 1741204076Spjd ncomps = HAST_NCOMPONENTS; 1742204076Spjd /* The is only one remote component for now. */ 1743204076Spjd#define ISREMOTE(no) ((no) == 1) 1744204076Spjd 1745204076Spjd for (;;) { 1746204076Spjd if (sigexit_received) { 1747204076Spjd primary_exitx(EX_OK, 1748204076Spjd "Termination signal received, exiting."); 1749204076Spjd } 1750204076Spjd /* 1751204076Spjd * If all the connection will be fine, we will sleep until 1752204076Spjd * someone wakes us up. 1753204076Spjd * If any of the connections will be broken and we won't be 1754204076Spjd * able to connect, we will sleep only for RECONNECT_SLEEP 1755204076Spjd * seconds so we can retry soon. 1756204076Spjd */ 1757204076Spjd timeout = 0; 1758204076Spjd pjdlog_debug(2, "remote_guard: Checking connections."); 1759204076Spjd mtx_lock(&hio_guard_lock); 1760204076Spjd for (ii = 0; ii < ncomps; ii++) { 1761204076Spjd if (!ISREMOTE(ii)) 1762204076Spjd continue; 1763204076Spjd rw_rlock(&hio_remote_lock[ii]); 1764204076Spjd if (ISCONNECTED(res, ii)) { 1765204076Spjd assert(res->hr_remotein != NULL); 1766204076Spjd assert(res->hr_remoteout != NULL); 1767204076Spjd rw_unlock(&hio_remote_lock[ii]); 1768204076Spjd pjdlog_debug(2, 1769204076Spjd "remote_guard: Connection to %s is ok.", 1770204076Spjd res->hr_remoteaddr); 1771210881Spjd } else if (real_remote(res)) { 1772204076Spjd assert(res->hr_remotein == NULL); 1773204076Spjd assert(res->hr_remoteout == NULL); 1774204076Spjd /* 1775204076Spjd * Upgrade the lock. It doesn't have to be 1776204076Spjd * atomic as no other thread can change 1777204076Spjd * connection status from disconnected to 1778204076Spjd * connected. 1779204076Spjd */ 1780204076Spjd rw_unlock(&hio_remote_lock[ii]); 1781204076Spjd pjdlog_debug(2, 1782204076Spjd "remote_guard: Reconnecting to %s.", 1783204076Spjd res->hr_remoteaddr); 1784205738Spjd in = out = NULL; 1785205738Spjd if (init_remote(res, &in, &out)) { 1786205738Spjd rw_wlock(&hio_remote_lock[ii]); 1787205738Spjd assert(res->hr_remotein == NULL); 1788205738Spjd assert(res->hr_remoteout == NULL); 1789205738Spjd assert(in != NULL && out != NULL); 1790205738Spjd res->hr_remotein = in; 1791205738Spjd res->hr_remoteout = out; 1792205738Spjd rw_unlock(&hio_remote_lock[ii]); 1793204076Spjd pjdlog_info("Successfully reconnected to %s.", 1794204076Spjd res->hr_remoteaddr); 1795205738Spjd sync_start(); 1796204076Spjd } else { 1797204076Spjd /* Both connections should be NULL. */ 1798204076Spjd assert(res->hr_remotein == NULL); 1799204076Spjd assert(res->hr_remoteout == NULL); 1800205738Spjd assert(in == NULL && out == NULL); 1801204076Spjd pjdlog_debug(2, 1802204076Spjd "remote_guard: Reconnect to %s failed.", 1803204076Spjd res->hr_remoteaddr); 1804204076Spjd timeout = RECONNECT_SLEEP; 1805204076Spjd } 1806210881Spjd } else { 1807210881Spjd rw_unlock(&hio_remote_lock[ii]); 1808204076Spjd } 1809204076Spjd } 1810204076Spjd (void)cv_timedwait(&hio_guard_cond, &hio_guard_lock, timeout); 1811204076Spjd mtx_unlock(&hio_guard_lock); 1812204076Spjd } 1813204076Spjd#undef ISREMOTE 1814204076Spjd /* NOTREACHED */ 1815204076Spjd return (NULL); 1816204076Spjd} 1817