Lines Matching refs:device

147 static struct page *__drbd_alloc_pages(struct drbd_device *device,
193 static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
203 list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
210 static void drbd_reclaim_net_peer_reqs(struct drbd_device *device)
215 spin_lock_irq(&device->resource->req_lock);
216 reclaim_finished_net_peer_reqs(device, &reclaimed);
217 spin_unlock_irq(&device->resource->req_lock);
219 drbd_free_net_peer_req(device, peer_req);
229 struct drbd_device *device = peer_device->device;
230 if (!atomic_read(&device->pp_in_use_by_net))
233 kref_get(&device->kref);
235 drbd_reclaim_net_peer_reqs(device);
236 kref_put(&device->kref, drbd_destroy_device);
244 * @peer_device: DRBD device.
265 struct drbd_device *device = peer_device->device;
276 if (atomic_read(&device->pp_in_use) < mxb)
277 page = __drbd_alloc_pages(device, number);
281 if (page && atomic_read(&device->pp_in_use_by_net) > 512)
282 drbd_reclaim_net_peer_reqs(device);
287 drbd_reclaim_net_peer_reqs(device);
289 if (atomic_read(&device->pp_in_use) < mxb) {
290 page = __drbd_alloc_pages(device, number);
299 drbd_warn(device, "drbd_alloc_pages interrupted!\n");
309 atomic_add(number, &device->pp_in_use);
317 static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
319 atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
337 drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
363 struct drbd_device *device = peer_device->device;
368 if (drbd_insert_fault(device, DRBD_FAULT_AL_EE))
374 drbd_err(device, "%s: allocation failed\n", __func__);
406 void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
412 drbd_free_pages(device, peer_req->pages, is_net);
413 D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
414 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
415 if (!expect(device, !(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) {
417 drbd_al_complete_io(device, &peer_req->i);
422 int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
427 int is_net = list == &device->net_ee;
429 spin_lock_irq(&device->resource->req_lock);
431 spin_unlock_irq(&device->resource->req_lock);
434 __drbd_free_peer_req(device, peer_req, is_net);
443 static int drbd_finish_peer_reqs(struct drbd_device *device)
450 spin_lock_irq(&device->resource->req_lock);
451 reclaim_finished_net_peer_reqs(device, &reclaimed);
452 list_splice_init(&device->done_ee, &work_list);
453 spin_unlock_irq(&device->resource->req_lock);
456 drbd_free_net_peer_req(device, peer_req);
469 drbd_free_peer_req(device, peer_req);
471 wake_up(&device->ee_wait);
476 static void _drbd_wait_ee_list_empty(struct drbd_device *device,
484 prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
485 spin_unlock_irq(&device->resource->req_lock);
487 finish_wait(&device->ee_wait, &wait);
488 spin_lock_irq(&device->resource->req_lock);
492 static void drbd_wait_ee_list_empty(struct drbd_device *device,
495 spin_lock_irq(&device->resource->req_lock);
496 _drbd_wait_ee_list_empty(device, head);
497 spin_unlock_irq(&device->resource->req_lock);
891 struct drbd_device *device = peer_device->device;
894 atomic_set(&device->packet_seq, 0);
895 device->peer_seq = 0;
897 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
899 &device->own_state_mutex;
908 clear_bit(USE_DEGR_WFC_T, &device->flags);
909 clear_bit(RESIZE_PENDING, &device->flags);
910 atomic_set(&device->ap_in_flight, 0);
911 mod_timer(&device->request_timer, jiffies + HZ); /* just start it here. */
1070 /* drbd_request_state(device, NS(conn, WFAuth)); */
1095 mutex_lock(peer_device->device->state_mutex);
1103 mutex_unlock(peer_device->device->state_mutex);
1107 struct drbd_device *device = peer_device->device;
1108 kref_get(&device->kref);
1112 set_bit(DISCARD_MY_DATA, &device->flags);
1114 clear_bit(DISCARD_MY_DATA, &device->flags);
1117 kref_put(&device->kref, drbd_destroy_device);
1257 struct drbd_device *device;
1264 struct drbd_device *device = octx->device;
1269 drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status);
1274 clear_bit(FLUSH_PENDING, &device->flags);
1275 put_ldev(device);
1276 kref_put(&device->kref, drbd_destroy_device);
1282 static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
1284 struct bio *bio = bio_alloc(device->ldev->backing_bdev, 0,
1289 drbd_warn(device, "Could not allocate a octx, CANNOT ISSUE FLUSH\n");
1296 put_ldev(device);
1297 kref_put(&device->kref, drbd_destroy_device);
1301 octx->device = device;
1306 device->flush_jif = jiffies;
1307 set_bit(FLUSH_PENDING, &device->flags);
1325 struct drbd_device *device = peer_device->device;
1327 if (!get_ldev(device))
1329 kref_get(&device->kref);
1332 submit_one_flush(device, &ctx);
1451 struct drbd_device *device;
1464 idr_for_each_entry(&resource->devices, device, vnr) {
1465 if (get_ldev(device)) {
1466 wo = max_allowed_wo(device->ldev, wo);
1467 if (device->ldev == bdev)
1469 put_ldev(device);
1513 int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, int flags)
1515 struct block_device *bdev = device->ldev->backing_bdev;
1578 static bool can_do_reliable_discards(struct drbd_device *device)
1583 if (!bdev_max_discard_sectors(device->ldev->backing_bdev))
1587 dc = rcu_dereference(device->ldev->disk_conf);
1593 static void drbd_issue_peer_discard_or_zero_out(struct drbd_device *device, struct drbd_peer_request *peer_req)
1599 if (!can_do_reliable_discards(device))
1602 if (drbd_issue_discard_or_zero_out(device, peer_req->i.sector,
1636 struct drbd_device *device = peer_req->peer_device->device;
1663 spin_lock_irq(&device->resource->req_lock);
1664 list_add_tail(&peer_req->w.list, &device->active_ee);
1665 spin_unlock_irq(&device->resource->req_lock);
1668 drbd_issue_peer_discard_or_zero_out(device, peer_req);
1688 drbd_err(device, "Invalid bio op received: 0x%x\n", peer_req->opf);
1692 bio = bio_alloc(device->ldev->backing_bdev, nr_pages, peer_req->opf, GFP_NOIO);
1710 D_ASSERT(device, data_size == 0);
1711 D_ASSERT(device, page == NULL);
1722 drbd_submit_bio_noacct(device, peer_request_fault_type(peer_req), bio);
1727 static void drbd_remove_epoch_entry_interval(struct drbd_device *device,
1732 drbd_remove_interval(&device->write_requests, i);
1737 wake_up(&device->misc_wait);
1747 struct drbd_device *device = peer_device->device;
1749 kref_get(&device->kref);
1751 drbd_wait_ee_list_empty(device, &device->active_ee);
1752 kref_put(&device->kref, drbd_destroy_device);
1765 * not a specific (peer)device.
1850 struct drbd_device *device = peer_device->device;
1851 const sector_t capacity = get_capacity(device->vdisk);
1898 drbd_err(device, "request from peer beyond end of local disk: "
1929 if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) {
1930 drbd_err(device, "Fault injection: Corrupting data on receive\n");
1935 drbd_free_peer_req(device, peer_req);
1944 drbd_err(device, "Digest integrity check FAILED: %llus +%u\n",
1946 drbd_free_peer_req(device, peer_req);
1950 device->recv_cnt += data_size >> 9;
1978 drbd_free_pages(peer_device->device, page, 0);
2003 peer_device->device->recv_cnt += data_size>>9;
2006 D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector);
2026 D_ASSERT(peer_device->device, data_size == 0);
2039 struct drbd_device *device = peer_device->device;
2043 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
2054 dec_unacked(device);
2062 struct drbd_device *device = peer_device->device;
2071 inc_unacked(device);
2079 spin_lock_irq(&device->resource->req_lock);
2080 list_add_tail(&peer_req->w.list, &device->sync_ee);
2081 spin_unlock_irq(&device->resource->req_lock);
2083 atomic_add(pi->size >> 9, &device->rs_sect_ev);
2088 drbd_err(device, "submit failed, triggering re-connect\n");
2089 spin_lock_irq(&device->resource->req_lock);
2091 spin_unlock_irq(&device->resource->req_lock);
2093 drbd_free_peer_req(device, peer_req);
2095 put_ldev(device);
2100 find_request(struct drbd_device *device, struct rb_root *root, u64 id,
2110 drbd_err(device, "%s: failed to find request 0x%lx, sector %llus\n", func,
2119 struct drbd_device *device;
2128 device = peer_device->device;
2132 spin_lock_irq(&device->resource->req_lock);
2133 req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__);
2134 spin_unlock_irq(&device->resource->req_lock);
2151 struct drbd_device *device;
2159 device = peer_device->device;
2162 D_ASSERT(device, p->block_id == ID_SYNCER);
2164 if (get_ldev(device)) {
2171 drbd_err(device, "Can not write resync data to local disk.\n");
2178 atomic_add(pi->size >> 9, &device->rs_sect_in);
2183 static void restart_conflicting_writes(struct drbd_device *device,
2189 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
2210 struct drbd_device *device = peer_device->device;
2216 pcmd = (device->state.conn >= C_SYNC_SOURCE &&
2217 device->state.conn <= C_PAUSED_SYNC_T &&
2228 dec_unacked(device);
2234 spin_lock_irq(&device->resource->req_lock);
2235 D_ASSERT(device, !drbd_interval_empty(&peer_req->i));
2236 drbd_remove_epoch_entry_interval(device, peer_req);
2238 restart_conflicting_writes(device, sector, peer_req->i.size);
2239 spin_unlock_irq(&device->resource->req_lock);
2241 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
2256 dec_unacked(peer_device->device);
2293 struct drbd_device *device = peer_device->device;
2297 spin_lock(&device->peer_seq_lock);
2298 newest_peer_seq = seq_max(device->peer_seq, peer_seq);
2299 device->peer_seq = newest_peer_seq;
2300 spin_unlock(&device->peer_seq_lock);
2301 /* wake up only if we actually changed device->peer_seq */
2303 wake_up(&device->seq_wait);
2313 static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req)
2318 spin_lock_irq(&device->resource->req_lock);
2319 list_for_each_entry(rs_req, &device->sync_ee, w.list) {
2326 spin_unlock_irq(&device->resource->req_lock);
2340 * In case packet_seq is larger than device->peer_seq number, there are
2342 * In case we are the logically next packet, we update device->peer_seq
2354 struct drbd_device *device = peer_device->device;
2362 spin_lock(&device->peer_seq_lock);
2364 if (!seq_greater(peer_seq - 1, device->peer_seq)) {
2365 device->peer_seq = seq_max(device->peer_seq, peer_seq);
2382 prepare_to_wait(&device->seq_wait, &wait, TASK_INTERRUPTIBLE);
2383 spin_unlock(&device->peer_seq_lock);
2388 spin_lock(&device->peer_seq_lock);
2391 drbd_err(device, "Timed out waiting for missing ack packets; disconnecting\n");
2395 spin_unlock(&device->peer_seq_lock);
2396 finish_wait(&device->seq_wait, &wait);
2419 static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
2422 struct drbd_peer_device *peer_device = first_peer_device(device);
2426 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
2437 spin_unlock_irq(&device->resource->req_lock);
2439 complete_master_bio(device, &m);
2440 spin_lock_irq(&device->resource->req_lock);
2445 static int handle_write_conflicts(struct drbd_device *device,
2460 drbd_insert_interval(&device->write_requests, &peer_req->i);
2463 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
2475 err = drbd_wait_misc(device, i);
2493 drbd_alert(device, "Concurrent writes detected: "
2502 list_add_tail(&peer_req->w.list, &device->done_ee);
2512 drbd_alert(device, "Concurrent writes detected: "
2530 err = drbd_wait_misc(device, &req->i);
2533 fail_postponed_requests(device, sector, size);
2549 drbd_remove_epoch_entry_interval(device, peer_req);
2557 struct drbd_device *device;
2569 device = peer_device->device;
2571 if (!get_ldev(device)) {
2592 put_ldev(device);
2618 D_ASSERT(device, peer_req->i.size == 0);
2619 D_ASSERT(device, dp_flags & DP_FLUSH);
2648 inc_unacked(device);
2661 D_ASSERT(device, dp_flags & DP_SEND_WRITE_ACK);
2666 spin_lock_irq(&device->resource->req_lock);
2667 err = handle_write_conflicts(device, peer_req);
2669 spin_unlock_irq(&device->resource->req_lock);
2671 put_ldev(device);
2678 spin_lock_irq(&device->resource->req_lock);
2685 list_add_tail(&peer_req->w.list, &device->active_ee);
2686 spin_unlock_irq(&device->resource->req_lock);
2688 if (device->state.conn == C_SYNC_TARGET)
2689 wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req));
2691 if (device->state.pdsk < D_INCONSISTENT) {
2695 drbd_al_begin_io(device, &peer_req->i);
2704 drbd_err(device, "submit failed, triggering re-connect\n");
2705 spin_lock_irq(&device->resource->req_lock);
2707 drbd_remove_epoch_entry_interval(device, peer_req);
2708 spin_unlock_irq(&device->resource->req_lock);
2711 drbd_al_complete_io(device, &peer_req->i);
2716 put_ldev(device);
2717 drbd_free_peer_req(device, peer_req);
2721 /* We may throttle resync, if the lower device seems to be busy,
2724 * To decide whether or not the lower device is busy, we use a scheme similar
2735 struct drbd_device *device = peer_device->device;
2737 bool throttle = drbd_rs_c_min_rate_throttle(device);
2742 spin_lock_irq(&device->al_lock);
2743 tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector));
2751 spin_unlock_irq(&device->al_lock);
2756 bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
2758 struct gendisk *disk = device->ldev->backing_bdev->bd_disk;
2764 c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate;
2772 atomic_read(&device->rs_sect_ev);
2774 if (atomic_read(&device->ap_actlog_cnt)
2775 || curr_events - device->rs_last_events > 64) {
2779 device->rs_last_events = curr_events;
2783 i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2785 if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
2786 rs_left = device->ov_left;
2788 rs_left = drbd_bm_total_weight(device) - device->rs_failed;
2790 dt = ((long)jiffies - (long)device->rs_mark_time[i]) / HZ;
2793 db = device->rs_mark_left[i] - rs_left;
2805 struct drbd_device *device;
2816 device = peer_device->device;
2817 capacity = get_capacity(device->vdisk);
2823 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2828 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2833 if (!get_ldev_if_state(device, D_UP_TO_DATE)) {
2854 drbd_err(device, "Can not satisfy peer's read request, "
2867 put_ldev(device);
2889 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
2908 D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
2911 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
2913 device->use_csums = true;
2916 atomic_add(size >> 9, &device->rs_sect_in);
2926 if (device->ov_start_sector == ~(sector_t)0 &&
2930 device->ov_start_sector = sector;
2931 device->ov_position = sector;
2932 device->ov_left = drbd_bm_bits(device) - BM_SECT_TO_BIT(sector);
2933 device->rs_total = device->ov_left;
2935 device->rs_mark_left[i] = device->ov_left;
2936 device->rs_mark_time[i] = now;
2938 drbd_info(device, "Online Verify start sector: %llu\n",
2975 spin_lock_irq(&device->resource->req_lock);
2976 list_add_tail(&peer_req->w.list, &device->read_ee);
2977 spin_unlock_irq(&device->resource->req_lock);
2980 if (device->state.peer != R_PRIMARY
2984 if (drbd_rs_begin_io(device, sector))
2988 atomic_add(size >> 9, &device->rs_sect_ev);
2992 inc_unacked(device);
2997 drbd_err(device, "submit failed, triggering re-connect\n");
3000 spin_lock_irq(&device->resource->req_lock);
3002 spin_unlock_irq(&device->resource->req_lock);
3005 put_ldev(device);
3006 drbd_free_peer_req(device, peer_req);
3015 struct drbd_device *device = peer_device->device;
3020 self = device->ldev->md.uuid[UI_BITMAP] & 1;
3021 peer = device->p_uuid[UI_BITMAP] & 1;
3023 ch_peer = device->p_uuid[UI_SIZE];
3024 ch_self = device->comm_bm_set;
3034 drbd_err(device, "Configuration error.\n");
3058 drbd_warn(device, "Discard younger/older primary did not find a decision\n"
3098 struct drbd_device *device = peer_device->device;
3112 drbd_err(device, "Configuration error.\n");
3118 if (hg == -1 && device->state.role == R_SECONDARY)
3120 if (hg == 1 && device->state.role == R_PRIMARY)
3127 return device->state.role == R_PRIMARY ? 1 : -1;
3130 if (hg == -1 && device->state.role == R_PRIMARY) {
3136 rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
3138 drbd_khelper(device, "pri-lost-after-sb");
3140 drbd_warn(device, "Successfully gave up primary role.\n");
3155 struct drbd_device *device = peer_device->device;
3171 drbd_err(device, "Configuration error.\n");
3186 rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
3188 drbd_khelper(device, "pri-lost-after-sb");
3190 drbd_warn(device, "Successfully gave up primary role.\n");
3200 static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid,
3204 drbd_info(device, "%s uuid info vanished while I was looking!\n", text);
3207 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
3234 struct drbd_device *device = peer_device->device;
3238 self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
3239 peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
3258 if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) {
3263 if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
3264 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
3265 drbd_info(device, "was SyncSource, missed the resync finished event, corrected myself:\n");
3266 drbd_uuid_move_history(device);
3267 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3268 device->ldev->md.uuid[UI_BITMAP] = 0;
3270 drbd_uuid_dump(device, "self", device->ldev->md.uuid,
3271 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
3274 drbd_info(device, "was SyncSource (peer failed to write sync_uuid)\n");
3281 if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) {
3286 if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) &&
3287 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
3288 drbd_info(device, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
3290 device->p_uuid[UI_HISTORY_START + 1] = device->p_uuid[UI_HISTORY_START];
3291 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_BITMAP];
3292 device->p_uuid[UI_BITMAP] = 0UL;
3294 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
3297 drbd_info(device, "was SyncTarget (failed to write sync_uuid)\n");
3305 rct = (test_bit(CRASHED_PRIMARY, &device->flags) ? 1 : 0) +
3306 (device->p_uuid[UI_FLAGS] & 2);
3321 if (device->state.role == R_PRIMARY || peer_role == R_PRIMARY) {
3327 if (device->state.role == R_PRIMARY && peer_role == R_PRIMARY) {
3334 if (device->state.role == R_PRIMARY)
3353 peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
3358 peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1);
3361 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
3362 (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
3363 peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) {
3370 device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START];
3371 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_HISTORY_START + 1];
3373 drbd_info(device, "Lost last syncUUID packet, corrected:\n");
3374 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
3381 self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
3383 peer = device->p_uuid[i] & ~((u64)1);
3389 self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
3390 peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
3395 self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
3398 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
3399 (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
3400 self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
3407 __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]);
3408 __drbd_uuid_set(device, UI_HISTORY_START, device->ldev->md.uuid[UI_HISTORY_START + 1]);
3410 drbd_info(device, "Last syncUUID did not get through, corrected:\n");
3411 drbd_uuid_dump(device, "self", device->ldev->md.uuid,
3412 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
3420 peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
3422 self = device->ldev->md.uuid[i] & ~((u64)1);
3428 self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
3429 peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
3435 self = device->ldev->md.uuid[i] & ~((u64)1);
3437 peer = device->p_uuid[j] & ~((u64)1);
3453 struct drbd_device *device = peer_device->device;
3459 mydisk = device->state.disk;
3461 mydisk = device->new_state_tmp.disk;
3463 drbd_info(device, "drbd_sync_handshake:\n");
3465 spin_lock_irq(&device->ldev->md.uuid_lock);
3466 drbd_uuid_dump(device, "self", device->ldev->md.uuid, device->comm_bm_set, 0);
3467 drbd_uuid_dump(device, "peer", device->p_uuid,
3468 device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
3471 spin_unlock_irq(&device->ldev->md.uuid_lock);
3473 drbd_info(device, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
3476 drbd_alert(device, "Unrelated data, aborting!\n");
3484 drbd_alert(device, "To resolve this both sides have to support at least protocol %d and feature flags 0x%x\n",
3489 drbd_alert(device, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
3499 drbd_info(device, "Becoming sync %s due to disk states.\n",
3504 drbd_khelper(device, "initial-split-brain");
3514 int pcount = (device->state.role == R_PRIMARY)
3530 drbd_warn(device, "Split-Brain detected, %d primaries, "
3534 drbd_warn(device, "Doing a full sync, since"
3542 if (test_bit(DISCARD_MY_DATA, &device->flags) && !(device->p_uuid[UI_FLAGS]&1))
3544 if (!test_bit(DISCARD_MY_DATA, &device->flags) && (device->p_uuid[UI_FLAGS]&1))
3548 drbd_warn(device, "Split-Brain detected, manually solved. "
3558 drbd_alert(device, "Split-Brain detected but unresolved, dropping connection!\n");
3559 drbd_khelper(device, "split-brain");
3564 drbd_err(device, "I shall become SyncSource, but I am inconsistent!\n");
3569 device->state.role == R_PRIMARY && device->state.disk >= D_CONSISTENT) {
3572 drbd_khelper(device, "pri-lost");
3575 drbd_err(device, "I shall become SyncTarget, but I am primary!\n");
3578 drbd_warn(device, "Becoming SyncTarget, violating the stable-data"
3585 drbd_info(device, "dry-run connect: No resync, would become Connected immediately.\n");
3587 drbd_info(device, "dry-run connect: Would become %s, doing a %s resync.",
3594 drbd_info(device, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
3595 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
3606 if (drbd_bm_total_weight(device)) {
3607 drbd_info(device, "No resync, but %lu bits in bitmap!\n",
3608 drbd_bm_total_weight(device));
3783 const struct drbd_device *device,
3793 drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3821 * config_unknown_volume - device configuration command for unknown volume
3823 * When a device is added to an existing connection, the node on which the
3824 * device is added first will send configuration commands to its peer but the
3825 * peer will not know about the device yet. It will warn and ignore these
3826 * commands. Once the device is added on the second node, the second node will
3827 * send the same device configuration commands, but in the other direction.
3841 struct drbd_device *device;
3856 device = peer_device->device;
3865 drbd_err(device, "SyncParam packet too long: received %u, expected <= %u bytes\n",
3876 D_ASSERT(device, data_size == 0);
3880 D_ASSERT(device, data_size == 0);
3894 if (get_ldev(device)) {
3897 put_ldev(device);
3899 drbd_err(device, "Allocation of new disk_conf failed\n");
3903 old_disk_conf = device->ldev->disk_conf;
3912 drbd_err(device, "verify-alg of wrong size, "
3923 D_ASSERT(device, p->verify_alg[data_size-1] == 0);
3929 D_ASSERT(device, p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3930 D_ASSERT(device, p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3936 if (device->state.conn == C_WF_REPORT_PARAMS) {
3937 drbd_err(device, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
3941 verify_tfm = drbd_crypto_alloc_digest_safe(device,
3950 if (device->state.conn == C_WF_REPORT_PARAMS) {
3951 drbd_err(device, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
3955 csums_tfm = drbd_crypto_alloc_digest_safe(device,
3970 if (fifo_size != device->rs_plan_s->size) {
3973 drbd_err(device, "kmalloc of fifo_buffer failed");
3974 put_ldev(device);
3992 drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg);
3999 drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg);
4006 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
4007 put_ldev(device);
4011 old_plan = device->rs_plan_s;
4012 rcu_assign_pointer(device->rs_plan_s, new_plan);
4026 put_ldev(device);
4035 put_ldev(device);
4049 static void warn_if_differ_considerably(struct drbd_device *device,
4057 drbd_warn(device, "Considerable difference in %s: %llus vs. %llus\n", s,
4064 struct drbd_device *device;
4076 device = peer_device->device;
4077 cur_size = get_capacity(device->vdisk);
4085 device->p_size = p_size;
4087 if (get_ldev(device)) {
4089 my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size;
4092 warn_if_differ_considerably(device, "lower level device sizes",
4093 p_size, drbd_get_max_capacity(device->ldev));
4094 warn_if_differ_considerably(device, "user requested size",
4099 if (device->state.conn == C_WF_REPORT_PARAMS)
4102 /* Never shrink a device with usable data during connect,
4105 new_size = drbd_new_dev_size(device, device->ldev, p_usize, 0);
4107 device->state.disk >= D_OUTDATED &&
4108 (device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS)) {
4109 drbd_err(device, "The peer's disk size is too small! (%llu < %llu sectors)\n",
4112 put_ldev(device);
4121 put_ldev(device);
4126 old_disk_conf = device->ldev->disk_conf;
4130 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
4134 drbd_info(device, "Peer sets u_size to %lu sectors (old: %lu)\n",
4138 put_ldev(device);
4141 device->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
4148 if (get_ldev(device)) {
4149 drbd_reconsider_queue_parameters(device, device->ldev, o);
4150 dd = drbd_determine_dev_size(device, ddsf, NULL);
4151 put_ldev(device);
4154 drbd_md_sync(device);
4173 drbd_reconsider_queue_parameters(device, NULL, o);
4179 drbd_warn(device, "Ignored diskless peer device size (peer:%llu != me:%llu sectors)!\n",
4181 } else if (new_size < cur_size && device->state.role == R_PRIMARY) {
4182 drbd_err(device, "The peer's device size is too small! (%llu < %llu sectors); demote me first!\n",
4196 drbd_set_my_capacity(device, new_size);
4200 if (get_ldev(device)) {
4201 if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev)) {
4202 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
4206 put_ldev(device);
4209 if (device->state.conn > C_WF_REPORT_PARAMS) {
4210 if (be64_to_cpu(p->c_size) != get_capacity(device->vdisk) ||
4216 if (test_and_clear_bit(RESIZE_PENDING, &device->flags) ||
4217 (dd == DS_GREW && device->state.conn == C_CONNECTED)) {
4218 if (device->state.pdsk >= D_INCONSISTENT &&
4219 device->state.disk >= D_INCONSISTENT) {
4221 drbd_info(device, "Resync of new storage suppressed with --assume-clean\n");
4223 resync_after_online_grow(device);
4225 set_bit(RESYNC_AFTER_NEG, &device->flags);
4235 struct drbd_device *device;
4243 device = peer_device->device;
4252 kfree(device->p_uuid);
4253 device->p_uuid = p_uuid;
4255 if ((device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS) &&
4256 device->state.disk < D_INCONSISTENT &&
4257 device->state.role == R_PRIMARY &&
4258 (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
4259 drbd_err(device, "Can only connect to data with current UUID=%016llX\n",
4260 (unsigned long long)device->ed_uuid);
4265 if (get_ldev(device)) {
4267 device->state.conn == C_CONNECTED &&
4269 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
4272 drbd_info(device, "Accepted new current UUID, preparing to skip initial sync\n");
4273 drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
4276 _drbd_uuid_set(device, UI_CURRENT, p_uuid[UI_CURRENT]);
4277 _drbd_uuid_set(device, UI_BITMAP, 0);
4278 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
4280 drbd_md_sync(device);
4283 put_ldev(device);
4284 } else if (device->state.disk < D_INCONSISTENT &&
4285 device->state.role == R_PRIMARY) {
4288 updated_uuids = drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
4295 mutex_lock(device->state_mutex);
4296 mutex_unlock(device->state_mutex);
4297 if (device->state.conn >= C_CONNECTED && device->state.disk < D_INCONSISTENT)
4298 updated_uuids |= drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
4301 drbd_print_uuids(device, "receiver updated UUIDs to");
4340 struct drbd_device *device;
4348 device = peer_device->device;
4354 mutex_is_locked(device->state_mutex)) {
4362 rv = drbd_change_state(device, CS_VERBOSE, mask, val);
4365 drbd_md_sync(device);
4397 struct drbd_device *device;
4407 device = peer_device->device;
4413 real_peer_disk = device->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
4414 drbd_info(device, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
4417 spin_lock_irq(&device->resource->req_lock);
4419 os = ns = drbd_read_state(device);
4420 spin_unlock_irq(&device->resource->req_lock);
4452 if (drbd_bm_total_weight(device) <= device->rs_failed)
4500 if (device->p_uuid && peer_state.disk >= D_NEGOTIATING &&
4501 get_ldev_if_state(device, D_NEGOTIATING)) {
4513 cr |= test_bit(CONSIDER_RESYNC, &device->flags);
4523 put_ldev(device);
4526 if (device->state.disk == D_NEGOTIATING) {
4527 drbd_force_state(device, NS(disk, D_FAILED));
4529 drbd_err(device, "Disk attach process on the peer node was aborted.\n");
4535 D_ASSERT(device, os.conn == C_WF_REPORT_PARAMS);
4542 spin_lock_irq(&device->resource->req_lock);
4543 if (os.i != drbd_read_state(device).i)
4545 clear_bit(CONSIDER_RESYNC, &device->flags);
4550 ns.disk = device->new_state_tmp.disk;
4552 if (ns.pdsk == D_CONSISTENT && drbd_suspended(device) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
4553 test_bit(NEW_CUR_UUID, &device->flags)) {
4556 spin_unlock_irq(&device->resource->req_lock);
4557 drbd_err(device, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
4559 drbd_uuid_new_current(device);
4560 clear_bit(NEW_CUR_UUID, &device->flags);
4564 rv = _drbd_set_state(device, ns, cs_flags, NULL);
4565 ns = drbd_read_state(device);
4566 spin_unlock_irq(&device->resource->req_lock);
4584 clear_bit(DISCARD_MY_DATA, &device->flags);
4586 drbd_md_sync(device); /* update connected indicator, la_size_sect, ... */
4594 struct drbd_device *device;
4600 device = peer_device->device;
4602 wait_event(device->misc_wait,
4603 device->state.conn == C_WF_SYNC_UUID ||
4604 device->state.conn == C_BEHIND ||
4605 device->state.conn < C_CONNECTED ||
4606 device->state.disk < D_NEGOTIATING);
4608 /* D_ASSERT(device, device->state.conn == C_WF_SYNC_UUID ); */
4612 if (get_ldev_if_state(device, D_NEGOTIATING)) {
4613 _drbd_uuid_set(device, UI_CURRENT, be64_to_cpu(p->uuid));
4614 _drbd_uuid_set(device, UI_BITMAP, 0UL);
4616 drbd_print_uuids(device, "updated sync uuid");
4617 drbd_start_resync(device, C_SYNC_TARGET);
4619 put_ldev(device);
4621 drbd_err(device, "Ignoring SyncUUID packet!\n");
4653 drbd_bm_merge_lel(peer_device->device, c->word_offset, num_words, p);
4717 _drbd_bm_set_bits(peer_device->device, s, e);
4818 struct drbd_device *device;
4825 device = peer_device->device;
4827 drbd_bm_lock(device, "receive bitmap", BM_LOCKED_SET_ALLOWED);
4832 .bm_bits = drbd_bm_bits(device),
4833 .bm_words = drbd_bm_words(device),
4845 drbd_err(device, "ReportCBitmap packet too large\n");
4850 drbd_err(device, "ReportCBitmap packet too small (l:%u)\n", pi->size);
4859 drbd_warn(device, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
4879 if (device->state.conn == C_WF_BITMAP_T) {
4882 err = drbd_send_bitmap(device, peer_device);
4886 rv = _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4887 D_ASSERT(device, rv == SS_SUCCESS);
4888 } else if (device->state.conn != C_WF_BITMAP_S) {
4891 drbd_info(device, "unexpected cstate (%s) in receive_bitmap\n",
4892 drbd_conn_str(device->state.conn));
4897 drbd_bm_unlock(device);
4898 if (!err && device->state.conn == C_WF_BITMAP_S)
4899 drbd_start_resync(device, C_SYNC_SOURCE);
4922 struct drbd_device *device;
4928 device = peer_device->device;
4930 switch (device->state.conn) {
4936 drbd_err(device, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4937 drbd_conn_str(device->state.conn));
4949 struct drbd_device *device;
4956 device = peer_device->device;
4963 if (get_ldev(device)) {
4969 put_ldev(device);
4978 spin_lock_irq(&device->resource->req_lock);
4979 list_add_tail(&peer_req->w.list, &device->sync_ee);
4980 spin_unlock_irq(&device->resource->req_lock);
4982 atomic_add(pi->size >> 9, &device->rs_sect_ev);
4986 spin_lock_irq(&device->resource->req_lock);
4988 spin_unlock_irq(&device->resource->req_lock);
4990 drbd_free_peer_req(device, peer_req);
4991 put_ldev(device);
4996 inc_unacked(device);
5002 drbd_rs_complete_io(device, sector);
5006 atomic_add(size >> 9, &device->rs_sect_in);
5131 struct drbd_device *device = peer_device->device;
5132 kref_get(&device->kref);
5135 kref_put(&device->kref, drbd_destroy_device);
5164 struct drbd_device *device = peer_device->device;
5168 spin_lock_irq(&device->resource->req_lock);
5169 _drbd_wait_ee_list_empty(device, &device->active_ee);
5170 _drbd_wait_ee_list_empty(device, &device->sync_ee);
5171 _drbd_wait_ee_list_empty(device, &device->read_ee);
5172 spin_unlock_irq(&device->resource->req_lock);
5184 drbd_rs_cancel_all(device);
5185 device->rs_total = 0;
5186 device->rs_failed = 0;
5187 atomic_set(&device->rs_pending_cnt, 0);
5188 wake_up(&device->misc_wait);
5190 del_timer_sync(&device->resync_timer);
5191 resync_timer_fn(&device->resync_timer);
5198 drbd_finish_peer_reqs(device);
5207 drbd_rs_cancel_all(device);
5209 kfree(device->p_uuid);
5210 device->p_uuid = NULL;
5212 if (!drbd_suspended(device))
5215 drbd_md_sync(device);
5217 if (get_ldev(device)) {
5218 drbd_bitmap_io(device, &drbd_bm_write_copy_pages,
5220 put_ldev(device);
5230 i = drbd_free_peer_reqs(device, &device->net_ee);
5232 drbd_info(device, "net_ee not empty, killed %u entries\n", i);
5233 i = atomic_read(&device->pp_in_use_by_net);
5235 drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
5236 i = atomic_read(&device->pp_in_use);
5238 drbd_info(device, "pp_in_use = %d, expected 0\n", i);
5240 D_ASSERT(device, list_empty(&device->read_ee));
5241 D_ASSERT(device, list_empty(&device->active_ee));
5242 D_ASSERT(device, list_empty(&device->sync_ee));
5243 D_ASSERT(device, list_empty(&device->done_ee));
5592 struct drbd_device *device;
5599 device = peer_device->device;
5602 D_ASSERT(device, connection->agreed_pro_version < 100);
5607 set_bit(CL_ST_CHG_SUCCESS, &device->flags);
5609 set_bit(CL_ST_CHG_FAIL, &device->flags);
5610 drbd_err(device, "Requested state change failed by peer: %s (%d)\n",
5613 wake_up(&device->state_wait);
5637 struct drbd_device *device;
5645 device = peer_device->device;
5647 D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
5651 if (get_ldev(device)) {
5652 drbd_rs_complete_io(device, sector);
5655 device->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
5656 put_ldev(device);
5659 atomic_add(blksize >> 9, &device->rs_sect_in);
5669 struct drbd_device *device = peer_device->device;
5673 spin_lock_irq(&device->resource->req_lock);
5674 req = find_request(device, root, id, sector, missing_ok, func);
5676 spin_unlock_irq(&device->resource->req_lock);
5680 spin_unlock_irq(&device->resource->req_lock);
5683 complete_master_bio(device, &m);
5690 struct drbd_device *device;
5699 device = peer_device->device;
5729 &device->write_requests, __func__,
5736 struct drbd_device *device;
5745 device = peer_device->device;
5756 &device->write_requests, __func__,
5772 struct drbd_device *device;
5779 device = peer_device->device;
5783 drbd_err(device, "Got NegDReply; Sector %llus, len %u.\n",
5787 &device->read_requests, __func__,
5794 struct drbd_device *device;
5802 device = peer_device->device;
5811 if (get_ldev_if_state(device, D_FAILED)) {
5812 drbd_rs_complete_io(device, sector);
5822 put_ldev(device);
5838 struct drbd_device *device = peer_device->device;
5840 if (device->state.conn == C_AHEAD &&
5841 atomic_read(&device->ap_in_flight) == 0 &&
5842 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &device->flags)) {
5843 device->start_resync_timer.expires = jiffies + HZ;
5844 add_timer(&device->start_resync_timer);
5855 struct drbd_device *device;
5864 device = peer_device->device;
5876 if (!get_ldev(device))
5879 drbd_rs_complete_io(device, sector);
5882 --device->ov_left;
5885 if ((device->ov_left & 0x200) == 0x200)
5886 drbd_advance_rs_marks(peer_device, device->ov_left);
5888 if (device->ov_left == 0) {
5892 dw->device = device;
5895 drbd_err(device, "kmalloc(dw) failed.");
5900 put_ldev(device);
6102 struct drbd_device *device = peer_device->device;
6114 err = drbd_finish_peer_reqs(device);
6115 kref_put(&device->kref, drbd_destroy_device);