1/* 2 drbd_nl.c 3 4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5 6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. 7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9 10 drbd is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation; either version 2, or (at your option) 13 any later version. 14 15 drbd is distributed in the hope that it will be useful, 16 but WITHOUT ANY WARRANTY; without even the implied warranty of 17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 GNU General Public License for more details. 19 20 You should have received a copy of the GNU General Public License 21 along with drbd; see the file COPYING. If not, write to 22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 24 */ 25 26#include <linux/module.h> 27#include <linux/drbd.h> 28#include <linux/in.h> 29#include <linux/fs.h> 30#include <linux/file.h> 31#include <linux/slab.h> 32#include <linux/connector.h> 33#include <linux/blkpg.h> 34#include <linux/cpumask.h> 35#include "drbd_int.h" 36#include "drbd_wrappers.h" 37#include <asm/unaligned.h> 38#include <linux/drbd_tag_magic.h> 39#include <linux/drbd_limits.h> 40 41static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int); 42static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *); 43static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *); 44 45/* see get_sb_bdev and bd_claim */ 46static char *drbd_m_holder = "Hands off! this is DRBD's meta data device."; 47 48/* Generate the tag_list to struct functions */ 49#define NL_PACKET(name, number, fields) \ 50static int name ## _from_tags(struct drbd_conf *mdev, \ 51 unsigned short *tags, struct name *arg) __attribute__ ((unused)); \ 52static int name ## _from_tags(struct drbd_conf *mdev, \ 53 unsigned short *tags, struct name *arg) \ 54{ \ 55 int tag; \ 56 int dlen; \ 57 \ 58 while ((tag = get_unaligned(tags++)) != TT_END) { \ 59 dlen = get_unaligned(tags++); \ 60 switch (tag_number(tag)) { \ 61 fields \ 62 default: \ 63 if (tag & T_MANDATORY) { \ 64 dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \ 65 return 0; \ 66 } \ 67 } \ 68 tags = (unsigned short *)((char *)tags + dlen); \ 69 } \ 70 return 1; \ 71} 72#define NL_INTEGER(pn, pr, member) \ 73 case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \ 74 arg->member = get_unaligned((int *)(tags)); \ 75 break; 76#define NL_INT64(pn, pr, member) \ 77 case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \ 78 arg->member = get_unaligned((u64 *)(tags)); \ 79 break; 80#define NL_BIT(pn, pr, member) \ 81 case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \ 82 arg->member = *(char *)(tags) ? 1 : 0; \ 83 break; 84#define NL_STRING(pn, pr, member, len) \ 85 case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \ 86 if (dlen > len) { \ 87 dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \ 88 #member, dlen, (unsigned int)len); \ 89 return 0; \ 90 } \ 91 arg->member ## _len = dlen; \ 92 memcpy(arg->member, tags, min_t(size_t, dlen, len)); \ 93 break; 94#include "linux/drbd_nl.h" 95 96/* Generate the struct to tag_list functions */ 97#define NL_PACKET(name, number, fields) \ 98static unsigned short* \ 99name ## _to_tags(struct drbd_conf *mdev, \ 100 struct name *arg, unsigned short *tags) __attribute__ ((unused)); \ 101static unsigned short* \ 102name ## _to_tags(struct drbd_conf *mdev, \ 103 struct name *arg, unsigned short *tags) \ 104{ \ 105 fields \ 106 return tags; \ 107} 108 109#define NL_INTEGER(pn, pr, member) \ 110 put_unaligned(pn | pr | TT_INTEGER, tags++); \ 111 put_unaligned(sizeof(int), tags++); \ 112 put_unaligned(arg->member, (int *)tags); \ 113 tags = (unsigned short *)((char *)tags+sizeof(int)); 114#define NL_INT64(pn, pr, member) \ 115 put_unaligned(pn | pr | TT_INT64, tags++); \ 116 put_unaligned(sizeof(u64), tags++); \ 117 put_unaligned(arg->member, (u64 *)tags); \ 118 tags = (unsigned short *)((char *)tags+sizeof(u64)); 119#define NL_BIT(pn, pr, member) \ 120 put_unaligned(pn | pr | TT_BIT, tags++); \ 121 put_unaligned(sizeof(char), tags++); \ 122 *(char *)tags = arg->member; \ 123 tags = (unsigned short *)((char *)tags+sizeof(char)); 124#define NL_STRING(pn, pr, member, len) \ 125 put_unaligned(pn | pr | TT_STRING, tags++); \ 126 put_unaligned(arg->member ## _len, tags++); \ 127 memcpy(tags, arg->member, arg->member ## _len); \ 128 tags = (unsigned short *)((char *)tags + arg->member ## _len); 129#include "linux/drbd_nl.h" 130 131void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name); 132void drbd_nl_send_reply(struct cn_msg *, int); 133 134int drbd_khelper(struct drbd_conf *mdev, char *cmd) 135{ 136 char *envp[] = { "HOME=/", 137 "TERM=linux", 138 "PATH=/sbin:/usr/sbin:/bin:/usr/bin", 139 NULL, /* Will be set to address family */ 140 NULL, /* Will be set to address */ 141 NULL }; 142 143 char mb[12], af[20], ad[60], *afs; 144 char *argv[] = {usermode_helper, cmd, mb, NULL }; 145 int ret; 146 147 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev)); 148 149 if (get_net_conf(mdev)) { 150 switch (((struct sockaddr *)mdev->net_conf->peer_addr)->sa_family) { 151 case AF_INET6: 152 afs = "ipv6"; 153 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6", 154 &((struct sockaddr_in6 *)mdev->net_conf->peer_addr)->sin6_addr); 155 break; 156 case AF_INET: 157 afs = "ipv4"; 158 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4", 159 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr); 160 break; 161 default: 162 afs = "ssocks"; 163 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4", 164 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr); 165 } 166 snprintf(af, 20, "DRBD_PEER_AF=%s", afs); 167 envp[3]=af; 168 envp[4]=ad; 169 put_net_conf(mdev); 170 } 171 172 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb); 173 174 drbd_bcast_ev_helper(mdev, cmd); 175 ret = call_usermodehelper(usermode_helper, argv, envp, 1); 176 if (ret) 177 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", 178 usermode_helper, cmd, mb, 179 (ret >> 8) & 0xff, ret); 180 else 181 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", 182 usermode_helper, cmd, mb, 183 (ret >> 8) & 0xff, ret); 184 185 if (ret < 0) /* Ignore any ERRNOs we got. */ 186 ret = 0; 187 188 return ret; 189} 190 191enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev) 192{ 193 char *ex_to_string; 194 int r; 195 enum drbd_disk_state nps; 196 enum drbd_fencing_p fp; 197 198 D_ASSERT(mdev->state.pdsk == D_UNKNOWN); 199 200 if (get_ldev_if_state(mdev, D_CONSISTENT)) { 201 fp = mdev->ldev->dc.fencing; 202 put_ldev(mdev); 203 } else { 204 dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n"); 205 return mdev->state.pdsk; 206 } 207 208 if (fp == FP_STONITH) 209 _drbd_request_state(mdev, NS(susp, 1), CS_WAIT_COMPLETE); 210 211 r = drbd_khelper(mdev, "fence-peer"); 212 213 switch ((r>>8) & 0xff) { 214 case 3: /* peer is inconsistent */ 215 ex_to_string = "peer is inconsistent or worse"; 216 nps = D_INCONSISTENT; 217 break; 218 case 4: /* peer got outdated, or was already outdated */ 219 ex_to_string = "peer was fenced"; 220 nps = D_OUTDATED; 221 break; 222 case 5: /* peer was down */ 223 if (mdev->state.disk == D_UP_TO_DATE) { 224 /* we will(have) create(d) a new UUID anyways... */ 225 ex_to_string = "peer is unreachable, assumed to be dead"; 226 nps = D_OUTDATED; 227 } else { 228 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate"; 229 nps = mdev->state.pdsk; 230 } 231 break; 232 case 6: /* Peer is primary, voluntarily outdate myself. 233 * This is useful when an unconnected R_SECONDARY is asked to 234 * become R_PRIMARY, but finds the other peer being active. */ 235 ex_to_string = "peer is active"; 236 dev_warn(DEV, "Peer is primary, outdating myself.\n"); 237 nps = D_UNKNOWN; 238 _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE); 239 break; 240 case 7: 241 if (fp != FP_STONITH) 242 dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n"); 243 ex_to_string = "peer was stonithed"; 244 nps = D_OUTDATED; 245 break; 246 default: 247 /* The script is broken ... */ 248 nps = D_UNKNOWN; 249 dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff); 250 return nps; 251 } 252 253 dev_info(DEV, "fence-peer helper returned %d (%s)\n", 254 (r>>8) & 0xff, ex_to_string); 255 return nps; 256} 257 258 259int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) 260{ 261 const int max_tries = 4; 262 int r = 0; 263 int try = 0; 264 int forced = 0; 265 union drbd_state mask, val; 266 enum drbd_disk_state nps; 267 268 if (new_role == R_PRIMARY) 269 request_ping(mdev); /* Detect a dead peer ASAP */ 270 271 mutex_lock(&mdev->state_mutex); 272 273 mask.i = 0; mask.role = R_MASK; 274 val.i = 0; val.role = new_role; 275 276 while (try++ < max_tries) { 277 r = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE); 278 279 /* in case we first succeeded to outdate, 280 * but now suddenly could establish a connection */ 281 if (r == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) { 282 val.pdsk = 0; 283 mask.pdsk = 0; 284 continue; 285 } 286 287 if (r == SS_NO_UP_TO_DATE_DISK && force && 288 (mdev->state.disk < D_UP_TO_DATE && 289 mdev->state.disk >= D_INCONSISTENT)) { 290 mask.disk = D_MASK; 291 val.disk = D_UP_TO_DATE; 292 forced = 1; 293 continue; 294 } 295 296 if (r == SS_NO_UP_TO_DATE_DISK && 297 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) { 298 D_ASSERT(mdev->state.pdsk == D_UNKNOWN); 299 nps = drbd_try_outdate_peer(mdev); 300 301 if (nps == D_OUTDATED || nps == D_INCONSISTENT) { 302 val.disk = D_UP_TO_DATE; 303 mask.disk = D_MASK; 304 } 305 306 val.pdsk = nps; 307 mask.pdsk = D_MASK; 308 309 continue; 310 } 311 312 if (r == SS_NOTHING_TO_DO) 313 goto fail; 314 if (r == SS_PRIMARY_NOP && mask.pdsk == 0) { 315 nps = drbd_try_outdate_peer(mdev); 316 317 if (force && nps > D_OUTDATED) { 318 dev_warn(DEV, "Forced into split brain situation!\n"); 319 nps = D_OUTDATED; 320 } 321 322 mask.pdsk = D_MASK; 323 val.pdsk = nps; 324 325 continue; 326 } 327 if (r == SS_TWO_PRIMARIES) { 328 /* Maybe the peer is detected as dead very soon... 329 retry at most once more in this case. */ 330 __set_current_state(TASK_INTERRUPTIBLE); 331 schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10); 332 if (try < max_tries) 333 try = max_tries - 1; 334 continue; 335 } 336 if (r < SS_SUCCESS) { 337 r = _drbd_request_state(mdev, mask, val, 338 CS_VERBOSE + CS_WAIT_COMPLETE); 339 if (r < SS_SUCCESS) 340 goto fail; 341 } 342 break; 343 } 344 345 if (r < SS_SUCCESS) 346 goto fail; 347 348 if (forced) 349 dev_warn(DEV, "Forced to consider local data as UpToDate!\n"); 350 351 /* Wait until nothing is on the fly :) */ 352 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0); 353 354 if (new_role == R_SECONDARY) { 355 set_disk_ro(mdev->vdisk, TRUE); 356 if (get_ldev(mdev)) { 357 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; 358 put_ldev(mdev); 359 } 360 } else { 361 if (get_net_conf(mdev)) { 362 mdev->net_conf->want_lose = 0; 363 put_net_conf(mdev); 364 } 365 set_disk_ro(mdev->vdisk, FALSE); 366 if (get_ldev(mdev)) { 367 if (((mdev->state.conn < C_CONNECTED || 368 mdev->state.pdsk <= D_FAILED) 369 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced) 370 drbd_uuid_new_current(mdev); 371 372 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1; 373 put_ldev(mdev); 374 } 375 } 376 377 if ((new_role == R_SECONDARY) && get_ldev(mdev)) { 378 drbd_al_to_on_disk_bm(mdev); 379 put_ldev(mdev); 380 } 381 382 if (mdev->state.conn >= C_WF_REPORT_PARAMS) { 383 /* if this was forced, we should consider sync */ 384 if (forced) 385 drbd_send_uuids(mdev); 386 drbd_send_state(mdev); 387 } 388 389 drbd_md_sync(mdev); 390 391 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 392 fail: 393 mutex_unlock(&mdev->state_mutex); 394 return r; 395} 396 397 398static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 399 struct drbd_nl_cfg_reply *reply) 400{ 401 struct primary primary_args; 402 403 memset(&primary_args, 0, sizeof(struct primary)); 404 if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) { 405 reply->ret_code = ERR_MANDATORY_TAG; 406 return 0; 407 } 408 409 reply->ret_code = 410 drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force); 411 412 return 0; 413} 414 415static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 416 struct drbd_nl_cfg_reply *reply) 417{ 418 reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0); 419 420 return 0; 421} 422 423/* initializes the md.*_offset members, so we are able to find 424 * the on disk meta data */ 425static void drbd_md_set_sector_offsets(struct drbd_conf *mdev, 426 struct drbd_backing_dev *bdev) 427{ 428 sector_t md_size_sect = 0; 429 switch (bdev->dc.meta_dev_idx) { 430 default: 431 /* v07 style fixed size indexed meta data */ 432 bdev->md.md_size_sect = MD_RESERVED_SECT; 433 bdev->md.md_offset = drbd_md_ss__(mdev, bdev); 434 bdev->md.al_offset = MD_AL_OFFSET; 435 bdev->md.bm_offset = MD_BM_OFFSET; 436 break; 437 case DRBD_MD_INDEX_FLEX_EXT: 438 /* just occupy the full device; unit: sectors */ 439 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev); 440 bdev->md.md_offset = 0; 441 bdev->md.al_offset = MD_AL_OFFSET; 442 bdev->md.bm_offset = MD_BM_OFFSET; 443 break; 444 case DRBD_MD_INDEX_INTERNAL: 445 case DRBD_MD_INDEX_FLEX_INT: 446 bdev->md.md_offset = drbd_md_ss__(mdev, bdev); 447 /* al size is still fixed */ 448 bdev->md.al_offset = -MD_AL_MAX_SIZE; 449 /* we need (slightly less than) ~ this much bitmap sectors: */ 450 md_size_sect = drbd_get_capacity(bdev->backing_bdev); 451 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT); 452 md_size_sect = BM_SECT_TO_EXT(md_size_sect); 453 md_size_sect = ALIGN(md_size_sect, 8); 454 455 /* plus the "drbd meta data super block", 456 * and the activity log; */ 457 md_size_sect += MD_BM_OFFSET; 458 459 bdev->md.md_size_sect = md_size_sect; 460 /* bitmap offset is adjusted by 'super' block size */ 461 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET; 462 break; 463 } 464} 465 466char *ppsize(char *buf, unsigned long long size) 467{ 468 /* Needs 9 bytes at max. */ 469 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' }; 470 int base = 0; 471 while (size >= 10000) { 472 /* shift + round */ 473 size = (size >> 10) + !!(size & (1<<9)); 474 base++; 475 } 476 sprintf(buf, "%lu %cB", (long)size, units[base]); 477 478 return buf; 479} 480 481/* there is still a theoretical deadlock when called from receiver 482 * on an D_INCONSISTENT R_PRIMARY: 483 * remote READ does inc_ap_bio, receiver would need to receive answer 484 * packet from remote to dec_ap_bio again. 485 * receiver receive_sizes(), comes here, 486 * waits for ap_bio_cnt == 0. -> deadlock. 487 * but this cannot happen, actually, because: 488 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable 489 * (not connected, or bad/no disk on peer): 490 * see drbd_fail_request_early, ap_bio_cnt is zero. 491 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET: 492 * peer may not initiate a resize. 493 */ 494void drbd_suspend_io(struct drbd_conf *mdev) 495{ 496 set_bit(SUSPEND_IO, &mdev->flags); 497 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); 498} 499 500void drbd_resume_io(struct drbd_conf *mdev) 501{ 502 clear_bit(SUSPEND_IO, &mdev->flags); 503 wake_up(&mdev->misc_wait); 504} 505 506/** 507 * drbd_determine_dev_size() - Sets the right device size obeying all constraints 508 * @mdev: DRBD device. 509 * 510 * Returns 0 on success, negative return values indicate errors. 511 * You should call drbd_md_sync() after calling this function. 512 */ 513enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local) 514{ 515 sector_t prev_first_sect, prev_size; /* previous meta location */ 516 sector_t la_size; 517 sector_t size; 518 char ppb[10]; 519 520 int md_moved, la_size_changed; 521 enum determine_dev_size rv = unchanged; 522 523 /* race: 524 * application request passes inc_ap_bio, 525 * but then cannot get an AL-reference. 526 * this function later may wait on ap_bio_cnt == 0. -> deadlock. 527 * 528 * to avoid that: 529 * Suspend IO right here. 530 * still lock the act_log to not trigger ASSERTs there. 531 */ 532 drbd_suspend_io(mdev); 533 534 /* no wait necessary anymore, actually we could assert that */ 535 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 536 537 prev_first_sect = drbd_md_first_sector(mdev->ldev); 538 prev_size = mdev->ldev->md.md_size_sect; 539 la_size = mdev->ldev->md.la_size_sect; 540 541 /* TODO: should only be some assert here, not (re)init... */ 542 drbd_md_set_sector_offsets(mdev, mdev->ldev); 543 544 size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED); 545 546 if (drbd_get_capacity(mdev->this_bdev) != size || 547 drbd_bm_capacity(mdev) != size) { 548 int err; 549 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC)); 550 if (unlikely(err)) { 551 /* currently there is only one error: ENOMEM! */ 552 size = drbd_bm_capacity(mdev)>>1; 553 if (size == 0) { 554 dev_err(DEV, "OUT OF MEMORY! " 555 "Could not allocate bitmap!\n"); 556 } else { 557 dev_err(DEV, "BM resizing failed. " 558 "Leaving size unchanged at size = %lu KB\n", 559 (unsigned long)size); 560 } 561 rv = dev_size_error; 562 } 563 /* racy, see comments above. */ 564 drbd_set_my_capacity(mdev, size); 565 mdev->ldev->md.la_size_sect = size; 566 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1), 567 (unsigned long long)size>>1); 568 } 569 if (rv == dev_size_error) 570 goto out; 571 572 la_size_changed = (la_size != mdev->ldev->md.la_size_sect); 573 574 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev) 575 || prev_size != mdev->ldev->md.md_size_sect; 576 577 if (la_size_changed || md_moved) { 578 drbd_al_shrink(mdev); /* All extents inactive. */ 579 dev_info(DEV, "Writing the whole bitmap, %s\n", 580 la_size_changed && md_moved ? "size changed and md moved" : 581 la_size_changed ? "size changed" : "md moved"); 582 rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */ 583 drbd_md_mark_dirty(mdev); 584 } 585 586 if (size > la_size) 587 rv = grew; 588 if (size < la_size) 589 rv = shrunk; 590out: 591 lc_unlock(mdev->act_log); 592 wake_up(&mdev->al_wait); 593 drbd_resume_io(mdev); 594 595 return rv; 596} 597 598sector_t 599drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space) 600{ 601 sector_t p_size = mdev->p_size; /* partner's disk size. */ 602 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */ 603 sector_t m_size; /* my size */ 604 sector_t u_size = bdev->dc.disk_size; /* size requested by user. */ 605 sector_t size = 0; 606 607 m_size = drbd_get_max_capacity(bdev); 608 609 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) { 610 dev_warn(DEV, "Resize while not connected was forced by the user!\n"); 611 p_size = m_size; 612 } 613 614 if (p_size && m_size) { 615 size = min_t(sector_t, p_size, m_size); 616 } else { 617 if (la_size) { 618 size = la_size; 619 if (m_size && m_size < size) 620 size = m_size; 621 if (p_size && p_size < size) 622 size = p_size; 623 } else { 624 if (m_size) 625 size = m_size; 626 if (p_size) 627 size = p_size; 628 } 629 } 630 631 if (size == 0) 632 dev_err(DEV, "Both nodes diskless!\n"); 633 634 if (u_size) { 635 if (u_size > size) 636 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n", 637 (unsigned long)u_size>>1, (unsigned long)size>>1); 638 else 639 size = u_size; 640 } 641 642 return size; 643} 644 645/** 646 * drbd_check_al_size() - Ensures that the AL is of the right size 647 * @mdev: DRBD device. 648 * 649 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation 650 * failed, and 0 on success. You should call drbd_md_sync() after you called 651 * this function. 652 */ 653static int drbd_check_al_size(struct drbd_conf *mdev) 654{ 655 struct lru_cache *n, *t; 656 struct lc_element *e; 657 unsigned int in_use; 658 int i; 659 660 ERR_IF(mdev->sync_conf.al_extents < 7) 661 mdev->sync_conf.al_extents = 127; 662 663 if (mdev->act_log && 664 mdev->act_log->nr_elements == mdev->sync_conf.al_extents) 665 return 0; 666 667 in_use = 0; 668 t = mdev->act_log; 669 n = lc_create("act_log", drbd_al_ext_cache, 670 mdev->sync_conf.al_extents, sizeof(struct lc_element), 0); 671 672 if (n == NULL) { 673 dev_err(DEV, "Cannot allocate act_log lru!\n"); 674 return -ENOMEM; 675 } 676 spin_lock_irq(&mdev->al_lock); 677 if (t) { 678 for (i = 0; i < t->nr_elements; i++) { 679 e = lc_element_by_index(t, i); 680 if (e->refcnt) 681 dev_err(DEV, "refcnt(%d)==%d\n", 682 e->lc_number, e->refcnt); 683 in_use += e->refcnt; 684 } 685 } 686 if (!in_use) 687 mdev->act_log = n; 688 spin_unlock_irq(&mdev->al_lock); 689 if (in_use) { 690 dev_err(DEV, "Activity log still in use!\n"); 691 lc_destroy(n); 692 return -EBUSY; 693 } else { 694 if (t) 695 lc_destroy(t); 696 } 697 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */ 698 return 0; 699} 700 701void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __must_hold(local) 702{ 703 struct request_queue * const q = mdev->rq_queue; 704 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; 705 int max_segments = mdev->ldev->dc.max_bio_bvecs; 706 707 max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); 708 709 blk_queue_max_hw_sectors(q, max_seg_s >> 9); 710 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); 711 blk_queue_max_segment_size(q, max_seg_s); 712 blk_queue_logical_block_size(q, 512); 713 blk_queue_segment_boundary(q, PAGE_SIZE-1); 714 blk_stack_limits(&q->limits, &b->limits, 0); 715 716 if (b->merge_bvec_fn) 717 dev_warn(DEV, "Backing device's merge_bvec_fn() = %p\n", 718 b->merge_bvec_fn); 719 dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q)); 720 721 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { 722 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", 723 q->backing_dev_info.ra_pages, 724 b->backing_dev_info.ra_pages); 725 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; 726 } 727} 728 729/* serialize deconfig (worker exiting, doing cleanup) 730 * and reconfig (drbdsetup disk, drbdsetup net) 731 * 732 * wait for a potentially exiting worker, then restart it, 733 * or start a new one. 734 */ 735static void drbd_reconfig_start(struct drbd_conf *mdev) 736{ 737 wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags)); 738 wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags)); 739 drbd_thread_start(&mdev->worker); 740} 741 742/* if still unconfigured, stops worker again. 743 * if configured now, clears CONFIG_PENDING. 744 * wakes potential waiters */ 745static void drbd_reconfig_done(struct drbd_conf *mdev) 746{ 747 spin_lock_irq(&mdev->req_lock); 748 if (mdev->state.disk == D_DISKLESS && 749 mdev->state.conn == C_STANDALONE && 750 mdev->state.role == R_SECONDARY) { 751 set_bit(DEVICE_DYING, &mdev->flags); 752 drbd_thread_stop_nowait(&mdev->worker); 753 } else 754 clear_bit(CONFIG_PENDING, &mdev->flags); 755 spin_unlock_irq(&mdev->req_lock); 756 wake_up(&mdev->state_wait); 757} 758 759/* does always return 0; 760 * interesting return code is in reply->ret_code */ 761static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 762 struct drbd_nl_cfg_reply *reply) 763{ 764 enum drbd_ret_codes retcode; 765 enum determine_dev_size dd; 766 sector_t max_possible_sectors; 767 sector_t min_md_device_sectors; 768 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */ 769 struct inode *inode, *inode2; 770 struct lru_cache *resync_lru = NULL; 771 union drbd_state ns, os; 772 int rv; 773 int cp_discovered = 0; 774 int logical_block_size; 775 776 drbd_reconfig_start(mdev); 777 778 /* if you want to reconfigure, please tear down first */ 779 if (mdev->state.disk > D_DISKLESS) { 780 retcode = ERR_DISK_CONFIGURED; 781 goto fail; 782 } 783 784 /* allocation not in the IO path, cqueue thread context */ 785 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); 786 if (!nbc) { 787 retcode = ERR_NOMEM; 788 goto fail; 789 } 790 791 nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF; 792 nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF; 793 nbc->dc.fencing = DRBD_FENCING_DEF; 794 nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF; 795 796 if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) { 797 retcode = ERR_MANDATORY_TAG; 798 goto fail; 799 } 800 801 if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) { 802 retcode = ERR_MD_IDX_INVALID; 803 goto fail; 804 } 805 806 nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0); 807 if (IS_ERR(nbc->lo_file)) { 808 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev, 809 PTR_ERR(nbc->lo_file)); 810 nbc->lo_file = NULL; 811 retcode = ERR_OPEN_DISK; 812 goto fail; 813 } 814 815 inode = nbc->lo_file->f_dentry->d_inode; 816 817 if (!S_ISBLK(inode->i_mode)) { 818 retcode = ERR_DISK_NOT_BDEV; 819 goto fail; 820 } 821 822 nbc->md_file = filp_open(nbc->dc.meta_dev, O_RDWR, 0); 823 if (IS_ERR(nbc->md_file)) { 824 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev, 825 PTR_ERR(nbc->md_file)); 826 nbc->md_file = NULL; 827 retcode = ERR_OPEN_MD_DISK; 828 goto fail; 829 } 830 831 inode2 = nbc->md_file->f_dentry->d_inode; 832 833 if (!S_ISBLK(inode2->i_mode)) { 834 retcode = ERR_MD_NOT_BDEV; 835 goto fail; 836 } 837 838 nbc->backing_bdev = inode->i_bdev; 839 if (bd_claim(nbc->backing_bdev, mdev)) { 840 printk(KERN_ERR "drbd: bd_claim(%p,%p); failed [%p;%p;%u]\n", 841 nbc->backing_bdev, mdev, 842 nbc->backing_bdev->bd_holder, 843 nbc->backing_bdev->bd_contains->bd_holder, 844 nbc->backing_bdev->bd_holders); 845 retcode = ERR_BDCLAIM_DISK; 846 goto fail; 847 } 848 849 resync_lru = lc_create("resync", drbd_bm_ext_cache, 850 61, sizeof(struct bm_extent), 851 offsetof(struct bm_extent, lce)); 852 if (!resync_lru) { 853 retcode = ERR_NOMEM; 854 goto release_bdev_fail; 855 } 856 857 /* meta_dev_idx >= 0: external fixed size, 858 * possibly multiple drbd sharing one meta device. 859 * TODO in that case, paranoia check that [md_bdev, meta_dev_idx] is 860 * not yet used by some other drbd minor! 861 * (if you use drbd.conf + drbdadm, 862 * that should check it for you already; but if you don't, or someone 863 * fooled it, we need to double check here) */ 864 nbc->md_bdev = inode2->i_bdev; 865 if (bd_claim(nbc->md_bdev, (nbc->dc.meta_dev_idx < 0) ? (void *)mdev 866 : (void *) drbd_m_holder)) { 867 retcode = ERR_BDCLAIM_MD_DISK; 868 goto release_bdev_fail; 869 } 870 871 if ((nbc->backing_bdev == nbc->md_bdev) != 872 (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL || 873 nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) { 874 retcode = ERR_MD_IDX_INVALID; 875 goto release_bdev2_fail; 876 } 877 878 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */ 879 drbd_md_set_sector_offsets(mdev, nbc); 880 881 if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) { 882 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n", 883 (unsigned long long) drbd_get_max_capacity(nbc), 884 (unsigned long long) nbc->dc.disk_size); 885 retcode = ERR_DISK_TO_SMALL; 886 goto release_bdev2_fail; 887 } 888 889 if (nbc->dc.meta_dev_idx < 0) { 890 max_possible_sectors = DRBD_MAX_SECTORS_FLEX; 891 /* at least one MB, otherwise it does not make sense */ 892 min_md_device_sectors = (2<<10); 893 } else { 894 max_possible_sectors = DRBD_MAX_SECTORS; 895 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1); 896 } 897 898 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) { 899 retcode = ERR_MD_DISK_TO_SMALL; 900 dev_warn(DEV, "refusing attach: md-device too small, " 901 "at least %llu sectors needed for this meta-disk type\n", 902 (unsigned long long) min_md_device_sectors); 903 goto release_bdev2_fail; 904 } 905 906 /* Make sure the new disk is big enough 907 * (we may currently be R_PRIMARY with no local disk...) */ 908 if (drbd_get_max_capacity(nbc) < 909 drbd_get_capacity(mdev->this_bdev)) { 910 retcode = ERR_DISK_TO_SMALL; 911 goto release_bdev2_fail; 912 } 913 914 nbc->known_size = drbd_get_capacity(nbc->backing_bdev); 915 916 if (nbc->known_size > max_possible_sectors) { 917 dev_warn(DEV, "==> truncating very big lower level device " 918 "to currently maximum possible %llu sectors <==\n", 919 (unsigned long long) max_possible_sectors); 920 if (nbc->dc.meta_dev_idx >= 0) 921 dev_warn(DEV, "==>> using internal or flexible " 922 "meta data may help <<==\n"); 923 } 924 925 drbd_suspend_io(mdev); 926 /* also wait for the last barrier ack. */ 927 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt)); 928 /* and for any other previously queued work */ 929 drbd_flush_workqueue(mdev); 930 931 retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE); 932 drbd_resume_io(mdev); 933 if (retcode < SS_SUCCESS) 934 goto release_bdev2_fail; 935 936 if (!get_ldev_if_state(mdev, D_ATTACHING)) 937 goto force_diskless; 938 939 drbd_md_set_sector_offsets(mdev, nbc); 940 941 /* allocate a second IO page if logical_block_size != 512 */ 942 logical_block_size = bdev_logical_block_size(nbc->md_bdev); 943 if (logical_block_size == 0) 944 logical_block_size = MD_SECTOR_SIZE; 945 946 if (logical_block_size != MD_SECTOR_SIZE) { 947 if (!mdev->md_io_tmpp) { 948 struct page *page = alloc_page(GFP_NOIO); 949 if (!page) 950 goto force_diskless_dec; 951 952 dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n", 953 logical_block_size, MD_SECTOR_SIZE); 954 dev_warn(DEV, "Workaround engaged (has performance impact).\n"); 955 956 mdev->md_io_tmpp = page; 957 } 958 } 959 960 if (!mdev->bitmap) { 961 if (drbd_bm_init(mdev)) { 962 retcode = ERR_NOMEM; 963 goto force_diskless_dec; 964 } 965 } 966 967 retcode = drbd_md_read(mdev, nbc); 968 if (retcode != NO_ERROR) 969 goto force_diskless_dec; 970 971 if (mdev->state.conn < C_CONNECTED && 972 mdev->state.role == R_PRIMARY && 973 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) { 974 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n", 975 (unsigned long long)mdev->ed_uuid); 976 retcode = ERR_DATA_NOT_CURRENT; 977 goto force_diskless_dec; 978 } 979 980 /* Since we are diskless, fix the activity log first... */ 981 if (drbd_check_al_size(mdev)) { 982 retcode = ERR_NOMEM; 983 goto force_diskless_dec; 984 } 985 986 /* Prevent shrinking of consistent devices ! */ 987 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && 988 drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) { 989 dev_warn(DEV, "refusing to truncate a consistent device\n"); 990 retcode = ERR_DISK_TO_SMALL; 991 goto force_diskless_dec; 992 } 993 994 if (!drbd_al_read_log(mdev, nbc)) { 995 retcode = ERR_IO_MD_DISK; 996 goto force_diskless_dec; 997 } 998 999 /* Reset the "barriers don't work" bits here, then force meta data to 1000 * be written, to ensure we determine if barriers are supported. */ 1001 if (nbc->dc.no_md_flush) 1002 set_bit(MD_NO_BARRIER, &mdev->flags); 1003 else 1004 clear_bit(MD_NO_BARRIER, &mdev->flags); 1005 1006 /* Point of no return reached. 1007 * Devices and memory are no longer released by error cleanup below. 1008 * now mdev takes over responsibility, and the state engine should 1009 * clean it up somewhere. */ 1010 D_ASSERT(mdev->ldev == NULL); 1011 mdev->ldev = nbc; 1012 mdev->resync = resync_lru; 1013 nbc = NULL; 1014 resync_lru = NULL; 1015 1016 mdev->write_ordering = WO_bio_barrier; 1017 drbd_bump_write_ordering(mdev, WO_bio_barrier); 1018 1019 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY)) 1020 set_bit(CRASHED_PRIMARY, &mdev->flags); 1021 else 1022 clear_bit(CRASHED_PRIMARY, &mdev->flags); 1023 1024 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND)) { 1025 set_bit(CRASHED_PRIMARY, &mdev->flags); 1026 cp_discovered = 1; 1027 } 1028 1029 mdev->send_cnt = 0; 1030 mdev->recv_cnt = 0; 1031 mdev->read_cnt = 0; 1032 mdev->writ_cnt = 0; 1033 1034 drbd_setup_queue_param(mdev, DRBD_MAX_SEGMENT_SIZE); 1035 1036 /* If I am currently not R_PRIMARY, 1037 * but meta data primary indicator is set, 1038 * I just now recover from a hard crash, 1039 * and have been R_PRIMARY before that crash. 1040 * 1041 * Now, if I had no connection before that crash 1042 * (have been degraded R_PRIMARY), chances are that 1043 * I won't find my peer now either. 1044 * 1045 * In that case, and _only_ in that case, 1046 * we use the degr-wfc-timeout instead of the default, 1047 * so we can automatically recover from a crash of a 1048 * degraded but active "cluster" after a certain timeout. 1049 */ 1050 clear_bit(USE_DEGR_WFC_T, &mdev->flags); 1051 if (mdev->state.role != R_PRIMARY && 1052 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) && 1053 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) 1054 set_bit(USE_DEGR_WFC_T, &mdev->flags); 1055 1056 dd = drbd_determin_dev_size(mdev, 0); 1057 if (dd == dev_size_error) { 1058 retcode = ERR_NOMEM_BITMAP; 1059 goto force_diskless_dec; 1060 } else if (dd == grew) 1061 set_bit(RESYNC_AFTER_NEG, &mdev->flags); 1062 1063 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) { 1064 dev_info(DEV, "Assuming that all blocks are out of sync " 1065 "(aka FullSync)\n"); 1066 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) { 1067 retcode = ERR_IO_MD_DISK; 1068 goto force_diskless_dec; 1069 } 1070 } else { 1071 if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) { 1072 retcode = ERR_IO_MD_DISK; 1073 goto force_diskless_dec; 1074 } 1075 } 1076 1077 if (cp_discovered) { 1078 drbd_al_apply_to_bm(mdev); 1079 drbd_al_to_on_disk_bm(mdev); 1080 } 1081 1082 spin_lock_irq(&mdev->req_lock); 1083 os = mdev->state; 1084 ns.i = os.i; 1085 /* If MDF_CONSISTENT is not set go into inconsistent state, 1086 otherwise investigate MDF_WasUpToDate... 1087 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state, 1088 otherwise into D_CONSISTENT state. 1089 */ 1090 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) { 1091 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE)) 1092 ns.disk = D_CONSISTENT; 1093 else 1094 ns.disk = D_OUTDATED; 1095 } else { 1096 ns.disk = D_INCONSISTENT; 1097 } 1098 1099 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED)) 1100 ns.pdsk = D_OUTDATED; 1101 1102 if ( ns.disk == D_CONSISTENT && 1103 (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE)) 1104 ns.disk = D_UP_TO_DATE; 1105 1106 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND, 1107 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before 1108 this point, because drbd_request_state() modifies these 1109 flags. */ 1110 1111 /* In case we are C_CONNECTED postpone any decision on the new disk 1112 state after the negotiation phase. */ 1113 if (mdev->state.conn == C_CONNECTED) { 1114 mdev->new_state_tmp.i = ns.i; 1115 ns.i = os.i; 1116 ns.disk = D_NEGOTIATING; 1117 1118 /* We expect to receive up-to-date UUIDs soon. 1119 To avoid a race in receive_state, free p_uuid while 1120 holding req_lock. I.e. atomic with the state change */ 1121 kfree(mdev->p_uuid); 1122 mdev->p_uuid = NULL; 1123 } 1124 1125 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); 1126 ns = mdev->state; 1127 spin_unlock_irq(&mdev->req_lock); 1128 1129 if (rv < SS_SUCCESS) 1130 goto force_diskless_dec; 1131 1132 if (mdev->state.role == R_PRIMARY) 1133 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1; 1134 else 1135 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; 1136 1137 drbd_md_mark_dirty(mdev); 1138 drbd_md_sync(mdev); 1139 1140 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1141 put_ldev(mdev); 1142 reply->ret_code = retcode; 1143 drbd_reconfig_done(mdev); 1144 return 0; 1145 1146 force_diskless_dec: 1147 put_ldev(mdev); 1148 force_diskless: 1149 drbd_force_state(mdev, NS(disk, D_DISKLESS)); 1150 drbd_md_sync(mdev); 1151 release_bdev2_fail: 1152 if (nbc) 1153 bd_release(nbc->md_bdev); 1154 release_bdev_fail: 1155 if (nbc) 1156 bd_release(nbc->backing_bdev); 1157 fail: 1158 if (nbc) { 1159 if (nbc->lo_file) 1160 fput(nbc->lo_file); 1161 if (nbc->md_file) 1162 fput(nbc->md_file); 1163 kfree(nbc); 1164 } 1165 lc_destroy(resync_lru); 1166 1167 reply->ret_code = retcode; 1168 drbd_reconfig_done(mdev); 1169 return 0; 1170} 1171 1172static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1173 struct drbd_nl_cfg_reply *reply) 1174{ 1175 reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS)); 1176 return 0; 1177} 1178 1179static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1180 struct drbd_nl_cfg_reply *reply) 1181{ 1182 int i, ns; 1183 enum drbd_ret_codes retcode; 1184 struct net_conf *new_conf = NULL; 1185 struct crypto_hash *tfm = NULL; 1186 struct crypto_hash *integrity_w_tfm = NULL; 1187 struct crypto_hash *integrity_r_tfm = NULL; 1188 struct hlist_head *new_tl_hash = NULL; 1189 struct hlist_head *new_ee_hash = NULL; 1190 struct drbd_conf *odev; 1191 char hmac_name[CRYPTO_MAX_ALG_NAME]; 1192 void *int_dig_out = NULL; 1193 void *int_dig_in = NULL; 1194 void *int_dig_vv = NULL; 1195 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr; 1196 1197 drbd_reconfig_start(mdev); 1198 1199 if (mdev->state.conn > C_STANDALONE) { 1200 retcode = ERR_NET_CONFIGURED; 1201 goto fail; 1202 } 1203 1204 /* allocation not in the IO path, cqueue thread context */ 1205 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL); 1206 if (!new_conf) { 1207 retcode = ERR_NOMEM; 1208 goto fail; 1209 } 1210 1211 new_conf->timeout = DRBD_TIMEOUT_DEF; 1212 new_conf->try_connect_int = DRBD_CONNECT_INT_DEF; 1213 new_conf->ping_int = DRBD_PING_INT_DEF; 1214 new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF; 1215 new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF; 1216 new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF; 1217 new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF; 1218 new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF; 1219 new_conf->ko_count = DRBD_KO_COUNT_DEF; 1220 new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF; 1221 new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF; 1222 new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF; 1223 new_conf->want_lose = 0; 1224 new_conf->two_primaries = 0; 1225 new_conf->wire_protocol = DRBD_PROT_C; 1226 new_conf->ping_timeo = DRBD_PING_TIMEO_DEF; 1227 new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF; 1228 1229 if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) { 1230 retcode = ERR_MANDATORY_TAG; 1231 goto fail; 1232 } 1233 1234 if (new_conf->two_primaries 1235 && (new_conf->wire_protocol != DRBD_PROT_C)) { 1236 retcode = ERR_NOT_PROTO_C; 1237 goto fail; 1238 }; 1239 1240 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) { 1241 retcode = ERR_DISCARD; 1242 goto fail; 1243 } 1244 1245 retcode = NO_ERROR; 1246 1247 new_my_addr = (struct sockaddr *)&new_conf->my_addr; 1248 new_peer_addr = (struct sockaddr *)&new_conf->peer_addr; 1249 for (i = 0; i < minor_count; i++) { 1250 odev = minor_to_mdev(i); 1251 if (!odev || odev == mdev) 1252 continue; 1253 if (get_net_conf(odev)) { 1254 taken_addr = (struct sockaddr *)&odev->net_conf->my_addr; 1255 if (new_conf->my_addr_len == odev->net_conf->my_addr_len && 1256 !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len)) 1257 retcode = ERR_LOCAL_ADDR; 1258 1259 taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr; 1260 if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len && 1261 !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len)) 1262 retcode = ERR_PEER_ADDR; 1263 1264 put_net_conf(odev); 1265 if (retcode != NO_ERROR) 1266 goto fail; 1267 } 1268 } 1269 1270 if (new_conf->cram_hmac_alg[0] != 0) { 1271 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", 1272 new_conf->cram_hmac_alg); 1273 tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC); 1274 if (IS_ERR(tfm)) { 1275 tfm = NULL; 1276 retcode = ERR_AUTH_ALG; 1277 goto fail; 1278 } 1279 1280 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) { 1281 retcode = ERR_AUTH_ALG_ND; 1282 goto fail; 1283 } 1284 } 1285 1286 if (new_conf->integrity_alg[0]) { 1287 integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC); 1288 if (IS_ERR(integrity_w_tfm)) { 1289 integrity_w_tfm = NULL; 1290 retcode=ERR_INTEGRITY_ALG; 1291 goto fail; 1292 } 1293 1294 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) { 1295 retcode=ERR_INTEGRITY_ALG_ND; 1296 goto fail; 1297 } 1298 1299 integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC); 1300 if (IS_ERR(integrity_r_tfm)) { 1301 integrity_r_tfm = NULL; 1302 retcode=ERR_INTEGRITY_ALG; 1303 goto fail; 1304 } 1305 } 1306 1307 ns = new_conf->max_epoch_size/8; 1308 if (mdev->tl_hash_s != ns) { 1309 new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL); 1310 if (!new_tl_hash) { 1311 retcode = ERR_NOMEM; 1312 goto fail; 1313 } 1314 } 1315 1316 ns = new_conf->max_buffers/8; 1317 if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) { 1318 new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL); 1319 if (!new_ee_hash) { 1320 retcode = ERR_NOMEM; 1321 goto fail; 1322 } 1323 } 1324 1325 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0; 1326 1327 if (integrity_w_tfm) { 1328 i = crypto_hash_digestsize(integrity_w_tfm); 1329 int_dig_out = kmalloc(i, GFP_KERNEL); 1330 if (!int_dig_out) { 1331 retcode = ERR_NOMEM; 1332 goto fail; 1333 } 1334 int_dig_in = kmalloc(i, GFP_KERNEL); 1335 if (!int_dig_in) { 1336 retcode = ERR_NOMEM; 1337 goto fail; 1338 } 1339 int_dig_vv = kmalloc(i, GFP_KERNEL); 1340 if (!int_dig_vv) { 1341 retcode = ERR_NOMEM; 1342 goto fail; 1343 } 1344 } 1345 1346 if (!mdev->bitmap) { 1347 if(drbd_bm_init(mdev)) { 1348 retcode = ERR_NOMEM; 1349 goto fail; 1350 } 1351 } 1352 1353 spin_lock_irq(&mdev->req_lock); 1354 if (mdev->net_conf != NULL) { 1355 retcode = ERR_NET_CONFIGURED; 1356 spin_unlock_irq(&mdev->req_lock); 1357 goto fail; 1358 } 1359 mdev->net_conf = new_conf; 1360 1361 mdev->send_cnt = 0; 1362 mdev->recv_cnt = 0; 1363 1364 if (new_tl_hash) { 1365 kfree(mdev->tl_hash); 1366 mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8; 1367 mdev->tl_hash = new_tl_hash; 1368 } 1369 1370 if (new_ee_hash) { 1371 kfree(mdev->ee_hash); 1372 mdev->ee_hash_s = mdev->net_conf->max_buffers/8; 1373 mdev->ee_hash = new_ee_hash; 1374 } 1375 1376 crypto_free_hash(mdev->cram_hmac_tfm); 1377 mdev->cram_hmac_tfm = tfm; 1378 1379 crypto_free_hash(mdev->integrity_w_tfm); 1380 mdev->integrity_w_tfm = integrity_w_tfm; 1381 1382 crypto_free_hash(mdev->integrity_r_tfm); 1383 mdev->integrity_r_tfm = integrity_r_tfm; 1384 1385 kfree(mdev->int_dig_out); 1386 kfree(mdev->int_dig_in); 1387 kfree(mdev->int_dig_vv); 1388 mdev->int_dig_out=int_dig_out; 1389 mdev->int_dig_in=int_dig_in; 1390 mdev->int_dig_vv=int_dig_vv; 1391 spin_unlock_irq(&mdev->req_lock); 1392 1393 retcode = _drbd_request_state(mdev, NS(conn, C_UNCONNECTED), CS_VERBOSE); 1394 1395 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1396 reply->ret_code = retcode; 1397 drbd_reconfig_done(mdev); 1398 return 0; 1399 1400fail: 1401 kfree(int_dig_out); 1402 kfree(int_dig_in); 1403 kfree(int_dig_vv); 1404 crypto_free_hash(tfm); 1405 crypto_free_hash(integrity_w_tfm); 1406 crypto_free_hash(integrity_r_tfm); 1407 kfree(new_tl_hash); 1408 kfree(new_ee_hash); 1409 kfree(new_conf); 1410 1411 reply->ret_code = retcode; 1412 drbd_reconfig_done(mdev); 1413 return 0; 1414} 1415 1416static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1417 struct drbd_nl_cfg_reply *reply) 1418{ 1419 int retcode; 1420 1421 retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED); 1422 1423 if (retcode == SS_NOTHING_TO_DO) 1424 goto done; 1425 else if (retcode == SS_ALREADY_STANDALONE) 1426 goto done; 1427 else if (retcode == SS_PRIMARY_NOP) { 1428 /* Our statche checking code wants to see the peer outdated. */ 1429 retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING, 1430 pdsk, D_OUTDATED)); 1431 } else if (retcode == SS_CW_FAILED_BY_PEER) { 1432 /* The peer probably wants to see us outdated. */ 1433 retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING, 1434 disk, D_OUTDATED), 1435 CS_ORDERED); 1436 if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) { 1437 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 1438 retcode = SS_SUCCESS; 1439 } 1440 } 1441 1442 if (retcode < SS_SUCCESS) 1443 goto fail; 1444 1445 if (wait_event_interruptible(mdev->state_wait, 1446 mdev->state.conn != C_DISCONNECTING)) { 1447 /* Do not test for mdev->state.conn == C_STANDALONE, since 1448 someone else might connect us in the mean time! */ 1449 retcode = ERR_INTR; 1450 goto fail; 1451 } 1452 1453 done: 1454 retcode = NO_ERROR; 1455 fail: 1456 drbd_md_sync(mdev); 1457 reply->ret_code = retcode; 1458 return 0; 1459} 1460 1461void resync_after_online_grow(struct drbd_conf *mdev) 1462{ 1463 int iass; /* I am sync source */ 1464 1465 dev_info(DEV, "Resync of new storage after online grow\n"); 1466 if (mdev->state.role != mdev->state.peer) 1467 iass = (mdev->state.role == R_PRIMARY); 1468 else 1469 iass = test_bit(DISCARD_CONCURRENT, &mdev->flags); 1470 1471 if (iass) 1472 drbd_start_resync(mdev, C_SYNC_SOURCE); 1473 else 1474 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE); 1475} 1476 1477static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1478 struct drbd_nl_cfg_reply *reply) 1479{ 1480 struct resize rs; 1481 int retcode = NO_ERROR; 1482 enum determine_dev_size dd; 1483 enum dds_flags ddsf; 1484 1485 memset(&rs, 0, sizeof(struct resize)); 1486 if (!resize_from_tags(mdev, nlp->tag_list, &rs)) { 1487 retcode = ERR_MANDATORY_TAG; 1488 goto fail; 1489 } 1490 1491 if (mdev->state.conn > C_CONNECTED) { 1492 retcode = ERR_RESIZE_RESYNC; 1493 goto fail; 1494 } 1495 1496 if (mdev->state.role == R_SECONDARY && 1497 mdev->state.peer == R_SECONDARY) { 1498 retcode = ERR_NO_PRIMARY; 1499 goto fail; 1500 } 1501 1502 if (!get_ldev(mdev)) { 1503 retcode = ERR_NO_DISK; 1504 goto fail; 1505 } 1506 1507 if (rs.no_resync && mdev->agreed_pro_version < 93) { 1508 retcode = ERR_NEED_APV_93; 1509 goto fail; 1510 } 1511 1512 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) 1513 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); 1514 1515 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; 1516 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0); 1517 dd = drbd_determin_dev_size(mdev, ddsf); 1518 drbd_md_sync(mdev); 1519 put_ldev(mdev); 1520 if (dd == dev_size_error) { 1521 retcode = ERR_NOMEM_BITMAP; 1522 goto fail; 1523 } 1524 1525 if (mdev->state.conn == C_CONNECTED) { 1526 if (dd == grew) 1527 set_bit(RESIZE_PENDING, &mdev->flags); 1528 1529 drbd_send_uuids(mdev); 1530 drbd_send_sizes(mdev, 1, ddsf); 1531 } 1532 1533 fail: 1534 reply->ret_code = retcode; 1535 return 0; 1536} 1537 1538static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1539 struct drbd_nl_cfg_reply *reply) 1540{ 1541 int retcode = NO_ERROR; 1542 int err; 1543 int ovr; /* online verify running */ 1544 int rsr; /* re-sync running */ 1545 struct crypto_hash *verify_tfm = NULL; 1546 struct crypto_hash *csums_tfm = NULL; 1547 struct syncer_conf sc; 1548 cpumask_var_t new_cpu_mask; 1549 1550 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) { 1551 retcode = ERR_NOMEM; 1552 goto fail; 1553 } 1554 1555 if (nlp->flags & DRBD_NL_SET_DEFAULTS) { 1556 memset(&sc, 0, sizeof(struct syncer_conf)); 1557 sc.rate = DRBD_RATE_DEF; 1558 sc.after = DRBD_AFTER_DEF; 1559 sc.al_extents = DRBD_AL_EXTENTS_DEF; 1560 } else 1561 memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf)); 1562 1563 if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) { 1564 retcode = ERR_MANDATORY_TAG; 1565 goto fail; 1566 } 1567 1568 /* re-sync running */ 1569 rsr = ( mdev->state.conn == C_SYNC_SOURCE || 1570 mdev->state.conn == C_SYNC_TARGET || 1571 mdev->state.conn == C_PAUSED_SYNC_S || 1572 mdev->state.conn == C_PAUSED_SYNC_T ); 1573 1574 if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) { 1575 retcode = ERR_CSUMS_RESYNC_RUNNING; 1576 goto fail; 1577 } 1578 1579 if (!rsr && sc.csums_alg[0]) { 1580 csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC); 1581 if (IS_ERR(csums_tfm)) { 1582 csums_tfm = NULL; 1583 retcode = ERR_CSUMS_ALG; 1584 goto fail; 1585 } 1586 1587 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) { 1588 retcode = ERR_CSUMS_ALG_ND; 1589 goto fail; 1590 } 1591 } 1592 1593 /* online verify running */ 1594 ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T); 1595 1596 if (ovr) { 1597 if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) { 1598 retcode = ERR_VERIFY_RUNNING; 1599 goto fail; 1600 } 1601 } 1602 1603 if (!ovr && sc.verify_alg[0]) { 1604 verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC); 1605 if (IS_ERR(verify_tfm)) { 1606 verify_tfm = NULL; 1607 retcode = ERR_VERIFY_ALG; 1608 goto fail; 1609 } 1610 1611 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) { 1612 retcode = ERR_VERIFY_ALG_ND; 1613 goto fail; 1614 } 1615 } 1616 1617 /* silently ignore cpu mask on UP kernel */ 1618 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) { 1619 err = __bitmap_parse(sc.cpu_mask, 32, 0, 1620 cpumask_bits(new_cpu_mask), nr_cpu_ids); 1621 if (err) { 1622 dev_warn(DEV, "__bitmap_parse() failed with %d\n", err); 1623 retcode = ERR_CPU_MASK_PARSE; 1624 goto fail; 1625 } 1626 } 1627 1628 ERR_IF (sc.rate < 1) sc.rate = 1; 1629 ERR_IF (sc.al_extents < 7) sc.al_extents = 127; /* arbitrary minimum */ 1630#define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT) 1631 if (sc.al_extents > AL_MAX) { 1632 dev_err(DEV, "sc.al_extents > %d\n", AL_MAX); 1633 sc.al_extents = AL_MAX; 1634 } 1635#undef AL_MAX 1636 1637 /* most sanity checks done, try to assign the new sync-after 1638 * dependency. need to hold the global lock in there, 1639 * to avoid a race in the dependency loop check. */ 1640 retcode = drbd_alter_sa(mdev, sc.after); 1641 if (retcode != NO_ERROR) 1642 goto fail; 1643 1644 /* ok, assign the rest of it as well. 1645 * lock against receive_SyncParam() */ 1646 spin_lock(&mdev->peer_seq_lock); 1647 mdev->sync_conf = sc; 1648 1649 if (!rsr) { 1650 crypto_free_hash(mdev->csums_tfm); 1651 mdev->csums_tfm = csums_tfm; 1652 csums_tfm = NULL; 1653 } 1654 1655 if (!ovr) { 1656 crypto_free_hash(mdev->verify_tfm); 1657 mdev->verify_tfm = verify_tfm; 1658 verify_tfm = NULL; 1659 } 1660 spin_unlock(&mdev->peer_seq_lock); 1661 1662 if (get_ldev(mdev)) { 1663 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 1664 drbd_al_shrink(mdev); 1665 err = drbd_check_al_size(mdev); 1666 lc_unlock(mdev->act_log); 1667 wake_up(&mdev->al_wait); 1668 1669 put_ldev(mdev); 1670 drbd_md_sync(mdev); 1671 1672 if (err) { 1673 retcode = ERR_NOMEM; 1674 goto fail; 1675 } 1676 } 1677 1678 if (mdev->state.conn >= C_CONNECTED) 1679 drbd_send_sync_param(mdev, &sc); 1680 1681 if (!cpumask_equal(mdev->cpu_mask, new_cpu_mask)) { 1682 cpumask_copy(mdev->cpu_mask, new_cpu_mask); 1683 drbd_calc_cpu_mask(mdev); 1684 mdev->receiver.reset_cpu_mask = 1; 1685 mdev->asender.reset_cpu_mask = 1; 1686 mdev->worker.reset_cpu_mask = 1; 1687 } 1688 1689 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1690fail: 1691 free_cpumask_var(new_cpu_mask); 1692 crypto_free_hash(csums_tfm); 1693 crypto_free_hash(verify_tfm); 1694 reply->ret_code = retcode; 1695 return 0; 1696} 1697 1698static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1699 struct drbd_nl_cfg_reply *reply) 1700{ 1701 int retcode; 1702 1703 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); 1704 1705 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION) 1706 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); 1707 1708 while (retcode == SS_NEED_CONNECTION) { 1709 spin_lock_irq(&mdev->req_lock); 1710 if (mdev->state.conn < C_CONNECTED) 1711 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL); 1712 spin_unlock_irq(&mdev->req_lock); 1713 1714 if (retcode != SS_NEED_CONNECTION) 1715 break; 1716 1717 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); 1718 } 1719 1720 reply->ret_code = retcode; 1721 return 0; 1722} 1723 1724static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1725 struct drbd_nl_cfg_reply *reply) 1726{ 1727 1728 reply->ret_code = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S)); 1729 1730 return 0; 1731} 1732 1733static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1734 struct drbd_nl_cfg_reply *reply) 1735{ 1736 int retcode = NO_ERROR; 1737 1738 if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO) 1739 retcode = ERR_PAUSE_IS_SET; 1740 1741 reply->ret_code = retcode; 1742 return 0; 1743} 1744 1745static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1746 struct drbd_nl_cfg_reply *reply) 1747{ 1748 int retcode = NO_ERROR; 1749 1750 if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) 1751 retcode = ERR_PAUSE_IS_CLEAR; 1752 1753 reply->ret_code = retcode; 1754 return 0; 1755} 1756 1757static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1758 struct drbd_nl_cfg_reply *reply) 1759{ 1760 reply->ret_code = drbd_request_state(mdev, NS(susp, 1)); 1761 1762 return 0; 1763} 1764 1765static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1766 struct drbd_nl_cfg_reply *reply) 1767{ 1768 reply->ret_code = drbd_request_state(mdev, NS(susp, 0)); 1769 return 0; 1770} 1771 1772static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1773 struct drbd_nl_cfg_reply *reply) 1774{ 1775 reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED)); 1776 return 0; 1777} 1778 1779static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1780 struct drbd_nl_cfg_reply *reply) 1781{ 1782 unsigned short *tl; 1783 1784 tl = reply->tag_list; 1785 1786 if (get_ldev(mdev)) { 1787 tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl); 1788 put_ldev(mdev); 1789 } 1790 1791 if (get_net_conf(mdev)) { 1792 tl = net_conf_to_tags(mdev, mdev->net_conf, tl); 1793 put_net_conf(mdev); 1794 } 1795 tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl); 1796 1797 put_unaligned(TT_END, tl++); /* Close the tag list */ 1798 1799 return (int)((char *)tl - (char *)reply->tag_list); 1800} 1801 1802static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1803 struct drbd_nl_cfg_reply *reply) 1804{ 1805 unsigned short *tl = reply->tag_list; 1806 union drbd_state s = mdev->state; 1807 unsigned long rs_left; 1808 unsigned int res; 1809 1810 tl = get_state_to_tags(mdev, (struct get_state *)&s, tl); 1811 1812 /* no local ref, no bitmap, no syncer progress. */ 1813 if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) { 1814 if (get_ldev(mdev)) { 1815 drbd_get_syncer_progress(mdev, &rs_left, &res); 1816 tl = tl_add_int(tl, T_sync_progress, &res); 1817 put_ldev(mdev); 1818 } 1819 } 1820 put_unaligned(TT_END, tl++); /* Close the tag list */ 1821 1822 return (int)((char *)tl - (char *)reply->tag_list); 1823} 1824 1825static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1826 struct drbd_nl_cfg_reply *reply) 1827{ 1828 unsigned short *tl; 1829 1830 tl = reply->tag_list; 1831 1832 if (get_ldev(mdev)) { 1833 tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64)); 1834 tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags); 1835 put_ldev(mdev); 1836 } 1837 put_unaligned(TT_END, tl++); /* Close the tag list */ 1838 1839 return (int)((char *)tl - (char *)reply->tag_list); 1840} 1841 1842/** 1843 * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use 1844 * @mdev: DRBD device. 1845 * @nlp: Netlink/connector packet from drbdsetup 1846 * @reply: Reply packet for drbdsetup 1847 */ 1848static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1849 struct drbd_nl_cfg_reply *reply) 1850{ 1851 unsigned short *tl; 1852 char rv; 1853 1854 tl = reply->tag_list; 1855 1856 rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED : 1857 test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT; 1858 1859 tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv)); 1860 put_unaligned(TT_END, tl++); /* Close the tag list */ 1861 1862 return (int)((char *)tl - (char *)reply->tag_list); 1863} 1864 1865static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1866 struct drbd_nl_cfg_reply *reply) 1867{ 1868 /* default to resume from last known position, if possible */ 1869 struct start_ov args = 1870 { .start_sector = mdev->ov_start_sector }; 1871 1872 if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) { 1873 reply->ret_code = ERR_MANDATORY_TAG; 1874 return 0; 1875 } 1876 /* w_make_ov_request expects position to be aligned */ 1877 mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT; 1878 reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S)); 1879 return 0; 1880} 1881 1882 1883static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1884 struct drbd_nl_cfg_reply *reply) 1885{ 1886 int retcode = NO_ERROR; 1887 int skip_initial_sync = 0; 1888 int err; 1889 1890 struct new_c_uuid args; 1891 1892 memset(&args, 0, sizeof(struct new_c_uuid)); 1893 if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) { 1894 reply->ret_code = ERR_MANDATORY_TAG; 1895 return 0; 1896 } 1897 1898 mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */ 1899 1900 if (!get_ldev(mdev)) { 1901 retcode = ERR_NO_DISK; 1902 goto out; 1903 } 1904 1905 /* this is "skip initial sync", assume to be clean */ 1906 if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 && 1907 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) { 1908 dev_info(DEV, "Preparing to skip initial sync\n"); 1909 skip_initial_sync = 1; 1910 } else if (mdev->state.conn != C_STANDALONE) { 1911 retcode = ERR_CONNECTED; 1912 goto out_dec; 1913 } 1914 1915 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */ 1916 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */ 1917 1918 if (args.clear_bm) { 1919 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid"); 1920 if (err) { 1921 dev_err(DEV, "Writing bitmap failed with %d\n",err); 1922 retcode = ERR_IO_MD_DISK; 1923 } 1924 if (skip_initial_sync) { 1925 drbd_send_uuids_skip_initial_sync(mdev); 1926 _drbd_uuid_set(mdev, UI_BITMAP, 0); 1927 spin_lock_irq(&mdev->req_lock); 1928 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), 1929 CS_VERBOSE, NULL); 1930 spin_unlock_irq(&mdev->req_lock); 1931 } 1932 } 1933 1934 drbd_md_sync(mdev); 1935out_dec: 1936 put_ldev(mdev); 1937out: 1938 mutex_unlock(&mdev->state_mutex); 1939 1940 reply->ret_code = retcode; 1941 return 0; 1942} 1943 1944static struct drbd_conf *ensure_mdev(struct drbd_nl_cfg_req *nlp) 1945{ 1946 struct drbd_conf *mdev; 1947 1948 if (nlp->drbd_minor >= minor_count) 1949 return NULL; 1950 1951 mdev = minor_to_mdev(nlp->drbd_minor); 1952 1953 if (!mdev && (nlp->flags & DRBD_NL_CREATE_DEVICE)) { 1954 struct gendisk *disk = NULL; 1955 mdev = drbd_new_device(nlp->drbd_minor); 1956 1957 spin_lock_irq(&drbd_pp_lock); 1958 if (minor_table[nlp->drbd_minor] == NULL) { 1959 minor_table[nlp->drbd_minor] = mdev; 1960 disk = mdev->vdisk; 1961 mdev = NULL; 1962 } /* else: we lost the race */ 1963 spin_unlock_irq(&drbd_pp_lock); 1964 1965 if (disk) /* we won the race above */ 1966 /* in case we ever add a drbd_delete_device(), 1967 * don't forget the del_gendisk! */ 1968 add_disk(disk); 1969 else /* we lost the race above */ 1970 drbd_free_mdev(mdev); 1971 1972 mdev = minor_to_mdev(nlp->drbd_minor); 1973 } 1974 1975 return mdev; 1976} 1977 1978struct cn_handler_struct { 1979 int (*function)(struct drbd_conf *, 1980 struct drbd_nl_cfg_req *, 1981 struct drbd_nl_cfg_reply *); 1982 int reply_body_size; 1983}; 1984 1985static struct cn_handler_struct cnd_table[] = { 1986 [ P_primary ] = { &drbd_nl_primary, 0 }, 1987 [ P_secondary ] = { &drbd_nl_secondary, 0 }, 1988 [ P_disk_conf ] = { &drbd_nl_disk_conf, 0 }, 1989 [ P_detach ] = { &drbd_nl_detach, 0 }, 1990 [ P_net_conf ] = { &drbd_nl_net_conf, 0 }, 1991 [ P_disconnect ] = { &drbd_nl_disconnect, 0 }, 1992 [ P_resize ] = { &drbd_nl_resize, 0 }, 1993 [ P_syncer_conf ] = { &drbd_nl_syncer_conf, 0 }, 1994 [ P_invalidate ] = { &drbd_nl_invalidate, 0 }, 1995 [ P_invalidate_peer ] = { &drbd_nl_invalidate_peer, 0 }, 1996 [ P_pause_sync ] = { &drbd_nl_pause_sync, 0 }, 1997 [ P_resume_sync ] = { &drbd_nl_resume_sync, 0 }, 1998 [ P_suspend_io ] = { &drbd_nl_suspend_io, 0 }, 1999 [ P_resume_io ] = { &drbd_nl_resume_io, 0 }, 2000 [ P_outdate ] = { &drbd_nl_outdate, 0 }, 2001 [ P_get_config ] = { &drbd_nl_get_config, 2002 sizeof(struct syncer_conf_tag_len_struct) + 2003 sizeof(struct disk_conf_tag_len_struct) + 2004 sizeof(struct net_conf_tag_len_struct) }, 2005 [ P_get_state ] = { &drbd_nl_get_state, 2006 sizeof(struct get_state_tag_len_struct) + 2007 sizeof(struct sync_progress_tag_len_struct) }, 2008 [ P_get_uuids ] = { &drbd_nl_get_uuids, 2009 sizeof(struct get_uuids_tag_len_struct) }, 2010 [ P_get_timeout_flag ] = { &drbd_nl_get_timeout_flag, 2011 sizeof(struct get_timeout_flag_tag_len_struct)}, 2012 [ P_start_ov ] = { &drbd_nl_start_ov, 0 }, 2013 [ P_new_c_uuid ] = { &drbd_nl_new_c_uuid, 0 }, 2014}; 2015 2016static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp) 2017{ 2018 struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data; 2019 struct cn_handler_struct *cm; 2020 struct cn_msg *cn_reply; 2021 struct drbd_nl_cfg_reply *reply; 2022 struct drbd_conf *mdev; 2023 int retcode, rr; 2024 int reply_size = sizeof(struct cn_msg) 2025 + sizeof(struct drbd_nl_cfg_reply) 2026 + sizeof(short int); 2027 2028 if (!try_module_get(THIS_MODULE)) { 2029 printk(KERN_ERR "drbd: try_module_get() failed!\n"); 2030 return; 2031 } 2032 2033 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) { 2034 retcode = ERR_PERM; 2035 goto fail; 2036 } 2037 2038 mdev = ensure_mdev(nlp); 2039 if (!mdev) { 2040 retcode = ERR_MINOR_INVALID; 2041 goto fail; 2042 } 2043 2044 if (nlp->packet_type >= P_nl_after_last_packet) { 2045 retcode = ERR_PACKET_NR; 2046 goto fail; 2047 } 2048 2049 cm = cnd_table + nlp->packet_type; 2050 2051 /* This may happen if packet number is 0: */ 2052 if (cm->function == NULL) { 2053 retcode = ERR_PACKET_NR; 2054 goto fail; 2055 } 2056 2057 reply_size += cm->reply_body_size; 2058 2059 /* allocation not in the IO path, cqueue thread context */ 2060 cn_reply = kmalloc(reply_size, GFP_KERNEL); 2061 if (!cn_reply) { 2062 retcode = ERR_NOMEM; 2063 goto fail; 2064 } 2065 reply = (struct drbd_nl_cfg_reply *) cn_reply->data; 2066 2067 reply->packet_type = 2068 cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet; 2069 reply->minor = nlp->drbd_minor; 2070 reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */ 2071 /* reply->tag_list; might be modified by cm->function. */ 2072 2073 rr = cm->function(mdev, nlp, reply); 2074 2075 cn_reply->id = req->id; 2076 cn_reply->seq = req->seq; 2077 cn_reply->ack = req->ack + 1; 2078 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr; 2079 cn_reply->flags = 0; 2080 2081 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL); 2082 if (rr && rr != -ESRCH) 2083 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr); 2084 2085 kfree(cn_reply); 2086 module_put(THIS_MODULE); 2087 return; 2088 fail: 2089 drbd_nl_send_reply(req, retcode); 2090 module_put(THIS_MODULE); 2091} 2092 2093static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */ 2094 2095static unsigned short * 2096__tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, 2097 unsigned short len, int nul_terminated) 2098{ 2099 unsigned short l = tag_descriptions[tag_number(tag)].max_len; 2100 len = (len < l) ? len : l; 2101 put_unaligned(tag, tl++); 2102 put_unaligned(len, tl++); 2103 memcpy(tl, data, len); 2104 tl = (unsigned short*)((char*)tl + len); 2105 if (nul_terminated) 2106 *((char*)tl - 1) = 0; 2107 return tl; 2108} 2109 2110static unsigned short * 2111tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len) 2112{ 2113 return __tl_add_blob(tl, tag, data, len, 0); 2114} 2115 2116static unsigned short * 2117tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str) 2118{ 2119 return __tl_add_blob(tl, tag, str, strlen(str)+1, 0); 2120} 2121 2122static unsigned short * 2123tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val) 2124{ 2125 put_unaligned(tag, tl++); 2126 switch(tag_type(tag)) { 2127 case TT_INTEGER: 2128 put_unaligned(sizeof(int), tl++); 2129 put_unaligned(*(int *)val, (int *)tl); 2130 tl = (unsigned short*)((char*)tl+sizeof(int)); 2131 break; 2132 case TT_INT64: 2133 put_unaligned(sizeof(u64), tl++); 2134 put_unaligned(*(u64 *)val, (u64 *)tl); 2135 tl = (unsigned short*)((char*)tl+sizeof(u64)); 2136 break; 2137 default: 2138 /* someone did something stupid. */ 2139 ; 2140 } 2141 return tl; 2142} 2143 2144void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state) 2145{ 2146 char buffer[sizeof(struct cn_msg)+ 2147 sizeof(struct drbd_nl_cfg_reply)+ 2148 sizeof(struct get_state_tag_len_struct)+ 2149 sizeof(short int)]; 2150 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2151 struct drbd_nl_cfg_reply *reply = 2152 (struct drbd_nl_cfg_reply *)cn_reply->data; 2153 unsigned short *tl = reply->tag_list; 2154 2155 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */ 2156 2157 tl = get_state_to_tags(mdev, (struct get_state *)&state, tl); 2158 2159 put_unaligned(TT_END, tl++); /* Close the tag list */ 2160 2161 cn_reply->id.idx = CN_IDX_DRBD; 2162 cn_reply->id.val = CN_VAL_DRBD; 2163 2164 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2165 cn_reply->ack = 0; /* not used here. */ 2166 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2167 (int)((char *)tl - (char *)reply->tag_list); 2168 cn_reply->flags = 0; 2169 2170 reply->packet_type = P_get_state; 2171 reply->minor = mdev_to_minor(mdev); 2172 reply->ret_code = NO_ERROR; 2173 2174 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2175} 2176 2177void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name) 2178{ 2179 char buffer[sizeof(struct cn_msg)+ 2180 sizeof(struct drbd_nl_cfg_reply)+ 2181 sizeof(struct call_helper_tag_len_struct)+ 2182 sizeof(short int)]; 2183 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2184 struct drbd_nl_cfg_reply *reply = 2185 (struct drbd_nl_cfg_reply *)cn_reply->data; 2186 unsigned short *tl = reply->tag_list; 2187 2188 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */ 2189 2190 tl = tl_add_str(tl, T_helper, helper_name); 2191 put_unaligned(TT_END, tl++); /* Close the tag list */ 2192 2193 cn_reply->id.idx = CN_IDX_DRBD; 2194 cn_reply->id.val = CN_VAL_DRBD; 2195 2196 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2197 cn_reply->ack = 0; /* not used here. */ 2198 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2199 (int)((char *)tl - (char *)reply->tag_list); 2200 cn_reply->flags = 0; 2201 2202 reply->packet_type = P_call_helper; 2203 reply->minor = mdev_to_minor(mdev); 2204 reply->ret_code = NO_ERROR; 2205 2206 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2207} 2208 2209void drbd_bcast_ee(struct drbd_conf *mdev, 2210 const char *reason, const int dgs, 2211 const char* seen_hash, const char* calc_hash, 2212 const struct drbd_epoch_entry* e) 2213{ 2214 struct cn_msg *cn_reply; 2215 struct drbd_nl_cfg_reply *reply; 2216 unsigned short *tl; 2217 struct page *page; 2218 unsigned len; 2219 2220 if (!e) 2221 return; 2222 if (!reason || !reason[0]) 2223 return; 2224 2225 /* apparently we have to memcpy twice, first to prepare the data for the 2226 * struct cn_msg, then within cn_netlink_send from the cn_msg to the 2227 * netlink skb. */ 2228 /* receiver thread context, which is not in the writeout path (of this node), 2229 * but may be in the writeout path of the _other_ node. 2230 * GFP_NOIO to avoid potential "distributed deadlock". */ 2231 cn_reply = kmalloc( 2232 sizeof(struct cn_msg)+ 2233 sizeof(struct drbd_nl_cfg_reply)+ 2234 sizeof(struct dump_ee_tag_len_struct)+ 2235 sizeof(short int), 2236 GFP_NOIO); 2237 2238 if (!cn_reply) { 2239 dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n", 2240 (unsigned long long)e->sector, e->size); 2241 return; 2242 } 2243 2244 reply = (struct drbd_nl_cfg_reply*)cn_reply->data; 2245 tl = reply->tag_list; 2246 2247 tl = tl_add_str(tl, T_dump_ee_reason, reason); 2248 tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs); 2249 tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs); 2250 tl = tl_add_int(tl, T_ee_sector, &e->sector); 2251 tl = tl_add_int(tl, T_ee_block_id, &e->block_id); 2252 2253 put_unaligned(T_ee_data, tl++); 2254 put_unaligned(e->size, tl++); 2255 2256 len = e->size; 2257 page = e->pages; 2258 page_chain_for_each(page) { 2259 void *d = kmap_atomic(page, KM_USER0); 2260 unsigned l = min_t(unsigned, len, PAGE_SIZE); 2261 memcpy(tl, d, l); 2262 kunmap_atomic(d, KM_USER0); 2263 tl = (unsigned short*)((char*)tl + l); 2264 len -= l; 2265 } 2266 put_unaligned(TT_END, tl++); /* Close the tag list */ 2267 2268 cn_reply->id.idx = CN_IDX_DRBD; 2269 cn_reply->id.val = CN_VAL_DRBD; 2270 2271 cn_reply->seq = atomic_add_return(1,&drbd_nl_seq); 2272 cn_reply->ack = 0; // not used here. 2273 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2274 (int)((char*)tl - (char*)reply->tag_list); 2275 cn_reply->flags = 0; 2276 2277 reply->packet_type = P_dump_ee; 2278 reply->minor = mdev_to_minor(mdev); 2279 reply->ret_code = NO_ERROR; 2280 2281 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2282 kfree(cn_reply); 2283} 2284 2285void drbd_bcast_sync_progress(struct drbd_conf *mdev) 2286{ 2287 char buffer[sizeof(struct cn_msg)+ 2288 sizeof(struct drbd_nl_cfg_reply)+ 2289 sizeof(struct sync_progress_tag_len_struct)+ 2290 sizeof(short int)]; 2291 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2292 struct drbd_nl_cfg_reply *reply = 2293 (struct drbd_nl_cfg_reply *)cn_reply->data; 2294 unsigned short *tl = reply->tag_list; 2295 unsigned long rs_left; 2296 unsigned int res; 2297 2298 /* no local ref, no bitmap, no syncer progress, no broadcast. */ 2299 if (!get_ldev(mdev)) 2300 return; 2301 drbd_get_syncer_progress(mdev, &rs_left, &res); 2302 put_ldev(mdev); 2303 2304 tl = tl_add_int(tl, T_sync_progress, &res); 2305 put_unaligned(TT_END, tl++); /* Close the tag list */ 2306 2307 cn_reply->id.idx = CN_IDX_DRBD; 2308 cn_reply->id.val = CN_VAL_DRBD; 2309 2310 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2311 cn_reply->ack = 0; /* not used here. */ 2312 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2313 (int)((char *)tl - (char *)reply->tag_list); 2314 cn_reply->flags = 0; 2315 2316 reply->packet_type = P_sync_progress; 2317 reply->minor = mdev_to_minor(mdev); 2318 reply->ret_code = NO_ERROR; 2319 2320 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2321} 2322 2323int __init drbd_nl_init(void) 2324{ 2325 static struct cb_id cn_id_drbd; 2326 int err, try=10; 2327 2328 cn_id_drbd.val = CN_VAL_DRBD; 2329 do { 2330 cn_id_drbd.idx = cn_idx; 2331 err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback); 2332 if (!err) 2333 break; 2334 cn_idx = (cn_idx + CN_IDX_STEP); 2335 } while (try--); 2336 2337 if (err) { 2338 printk(KERN_ERR "drbd: cn_drbd failed to register\n"); 2339 return err; 2340 } 2341 2342 return 0; 2343} 2344 2345void drbd_nl_cleanup(void) 2346{ 2347 static struct cb_id cn_id_drbd; 2348 2349 cn_id_drbd.idx = cn_idx; 2350 cn_id_drbd.val = CN_VAL_DRBD; 2351 2352 cn_del_callback(&cn_id_drbd); 2353} 2354 2355void drbd_nl_send_reply(struct cn_msg *req, int ret_code) 2356{ 2357 char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)]; 2358 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2359 struct drbd_nl_cfg_reply *reply = 2360 (struct drbd_nl_cfg_reply *)cn_reply->data; 2361 int rr; 2362 2363 cn_reply->id = req->id; 2364 2365 cn_reply->seq = req->seq; 2366 cn_reply->ack = req->ack + 1; 2367 cn_reply->len = sizeof(struct drbd_nl_cfg_reply); 2368 cn_reply->flags = 0; 2369 2370 reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor; 2371 reply->ret_code = ret_code; 2372 2373 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2374 if (rr && rr != -ESRCH) 2375 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr); 2376} 2377