1/* 2 drbd_req.h 3 4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5 6 Copyright (C) 2006-2008, LINBIT Information Technologies GmbH. 7 Copyright (C) 2006-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 8 Copyright (C) 2006-2008, Philipp Reisner <philipp.reisner@linbit.com>. 9 10 DRBD is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation; either version 2, or (at your option) 13 any later version. 14 15 DRBD is distributed in the hope that it will be useful, 16 but WITHOUT ANY WARRANTY; without even the implied warranty of 17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 GNU General Public License for more details. 19 20 You should have received a copy of the GNU General Public License 21 along with drbd; see the file COPYING. If not, write to 22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 */ 24 25#ifndef _DRBD_REQ_H 26#define _DRBD_REQ_H 27 28#include <linux/module.h> 29 30#include <linux/slab.h> 31#include <linux/drbd.h> 32#include "drbd_int.h" 33#include "drbd_wrappers.h" 34 35/* The request callbacks will be called in irq context by the IDE drivers, 36 and in Softirqs/Tasklets/BH context by the SCSI drivers, 37 and by the receiver and worker in kernel-thread context. 38 Try to get the locking right :) */ 39 40/* 41 * Objects of type struct drbd_request do only exist on a R_PRIMARY node, and are 42 * associated with IO requests originating from the block layer above us. 43 * 44 * There are quite a few things that may happen to a drbd request 45 * during its lifetime. 46 * 47 * It will be created. 48 * It will be marked with the intention to be 49 * submitted to local disk and/or 50 * send via the network. 51 * 52 * It has to be placed on the transfer log and other housekeeping lists, 53 * In case we have a network connection. 54 * 55 * It may be identified as a concurrent (write) request 56 * and be handled accordingly. 57 * 58 * It may me handed over to the local disk subsystem. 59 * It may be completed by the local disk subsystem, 60 * either successfully or with io-error. 61 * In case it is a READ request, and it failed locally, 62 * it may be retried remotely. 63 * 64 * It may be queued for sending. 65 * It may be handed over to the network stack, 66 * which may fail. 67 * It may be acknowledged by the "peer" according to the wire_protocol in use. 68 * this may be a negative ack. 69 * It may receive a faked ack when the network connection is lost and the 70 * transfer log is cleaned up. 71 * Sending may be canceled due to network connection loss. 72 * When it finally has outlived its time, 73 * corresponding dirty bits in the resync-bitmap may be cleared or set, 74 * it will be destroyed, 75 * and completion will be signalled to the originator, 76 * with or without "success". 77 */ 78 79enum drbd_req_event { 80 created, 81 to_be_send, 82 to_be_submitted, 83 84 queue_for_net_write, 85 queue_for_net_read, 86 87 send_canceled, 88 send_failed, 89 handed_over_to_network, 90 connection_lost_while_pending, 91 read_retry_remote_canceled, 92 recv_acked_by_peer, 93 write_acked_by_peer, 94 write_acked_by_peer_and_sis, /* and set_in_sync */ 95 conflict_discarded_by_peer, 96 neg_acked, 97 barrier_acked, /* in protocol A and B */ 98 data_received, /* (remote read) */ 99 100 read_completed_with_error, 101 read_ahead_completed_with_error, 102 write_completed_with_error, 103 completed_ok, 104 nothing, /* for tracing only */ 105}; 106 107/* encoding of request states for now. we don't actually need that many bits. 108 * we don't need to do atomic bit operations either, since most of the time we 109 * need to look at the connection state and/or manipulate some lists at the 110 * same time, so we should hold the request lock anyways. 111 */ 112enum drbd_req_state_bits { 113 /* 210 114 * 000: no local possible 115 * 001: to be submitted 116 * UNUSED, we could map: 011: submitted, completion still pending 117 * 110: completed ok 118 * 010: completed with error 119 */ 120 __RQ_LOCAL_PENDING, 121 __RQ_LOCAL_COMPLETED, 122 __RQ_LOCAL_OK, 123 124 /* 76543 125 * 00000: no network possible 126 * 00001: to be send 127 * 00011: to be send, on worker queue 128 * 00101: sent, expecting recv_ack (B) or write_ack (C) 129 * 11101: sent, 130 * recv_ack (B) or implicit "ack" (A), 131 * still waiting for the barrier ack. 132 * master_bio may already be completed and invalidated. 133 * 11100: write_acked (C), 134 * data_received (for remote read, any protocol) 135 * or finally the barrier ack has arrived (B,A)... 136 * request can be freed 137 * 01100: neg-acked (write, protocol C) 138 * or neg-d-acked (read, any protocol) 139 * or killed from the transfer log 140 * during cleanup after connection loss 141 * request can be freed 142 * 01000: canceled or send failed... 143 * request can be freed 144 */ 145 146 /* if "SENT" is not set, yet, this can still fail or be canceled. 147 * if "SENT" is set already, we still wait for an Ack packet. 148 * when cleared, the master_bio may be completed. 149 * in (B,A) the request object may still linger on the transaction log 150 * until the corresponding barrier ack comes in */ 151 __RQ_NET_PENDING, 152 153 /* If it is QUEUED, and it is a WRITE, it is also registered in the 154 * transfer log. Currently we need this flag to avoid conflicts between 155 * worker canceling the request and tl_clear_barrier killing it from 156 * transfer log. We should restructure the code so this conflict does 157 * no longer occur. */ 158 __RQ_NET_QUEUED, 159 160 /* well, actually only "handed over to the network stack". 161 * 162 * TODO can potentially be dropped because of the similar meaning 163 * of RQ_NET_SENT and ~RQ_NET_QUEUED. 164 * however it is not exactly the same. before we drop it 165 * we must ensure that we can tell a request with network part 166 * from a request without, regardless of what happens to it. */ 167 __RQ_NET_SENT, 168 169 /* when set, the request may be freed (if RQ_NET_QUEUED is clear). 170 * basically this means the corresponding P_BARRIER_ACK was received */ 171 __RQ_NET_DONE, 172 173 /* whether or not we know (C) or pretend (B,A) that the write 174 * was successfully written on the peer. 175 */ 176 __RQ_NET_OK, 177 178 /* peer called drbd_set_in_sync() for this write */ 179 __RQ_NET_SIS, 180 181 /* keep this last, its for the RQ_NET_MASK */ 182 __RQ_NET_MAX, 183}; 184 185#define RQ_LOCAL_PENDING (1UL << __RQ_LOCAL_PENDING) 186#define RQ_LOCAL_COMPLETED (1UL << __RQ_LOCAL_COMPLETED) 187#define RQ_LOCAL_OK (1UL << __RQ_LOCAL_OK) 188 189#define RQ_LOCAL_MASK ((RQ_LOCAL_OK << 1)-1) /* 0x07 */ 190 191#define RQ_NET_PENDING (1UL << __RQ_NET_PENDING) 192#define RQ_NET_QUEUED (1UL << __RQ_NET_QUEUED) 193#define RQ_NET_SENT (1UL << __RQ_NET_SENT) 194#define RQ_NET_DONE (1UL << __RQ_NET_DONE) 195#define RQ_NET_OK (1UL << __RQ_NET_OK) 196#define RQ_NET_SIS (1UL << __RQ_NET_SIS) 197 198/* 0x1f8 */ 199#define RQ_NET_MASK (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK) 200 201/* epoch entries */ 202static inline 203struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector) 204{ 205 BUG_ON(mdev->ee_hash_s == 0); 206 return mdev->ee_hash + 207 ((unsigned int)(sector>>HT_SHIFT) % mdev->ee_hash_s); 208} 209 210/* transfer log (drbd_request objects) */ 211static inline 212struct hlist_head *tl_hash_slot(struct drbd_conf *mdev, sector_t sector) 213{ 214 BUG_ON(mdev->tl_hash_s == 0); 215 return mdev->tl_hash + 216 ((unsigned int)(sector>>HT_SHIFT) % mdev->tl_hash_s); 217} 218 219/* application reads (drbd_request objects) */ 220static struct hlist_head *ar_hash_slot(struct drbd_conf *mdev, sector_t sector) 221{ 222 return mdev->app_reads_hash 223 + ((unsigned int)(sector) % APP_R_HSIZE); 224} 225 226/* when we receive the answer for a read request, 227 * verify that we actually know about it */ 228static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev, 229 u64 id, sector_t sector) 230{ 231 struct hlist_head *slot = ar_hash_slot(mdev, sector); 232 struct hlist_node *n; 233 struct drbd_request *req; 234 235 hlist_for_each_entry(req, n, slot, colision) { 236 if ((unsigned long)req == (unsigned long)id) { 237 D_ASSERT(req->sector == sector); 238 return req; 239 } 240 } 241 return NULL; 242} 243 244static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev, 245 struct bio *bio_src) 246{ 247 struct bio *bio; 248 struct drbd_request *req = 249 mempool_alloc(drbd_request_mempool, GFP_NOIO); 250 if (likely(req)) { 251 bio = bio_clone(bio_src, GFP_NOIO); 252 253 req->rq_state = 0; 254 req->mdev = mdev; 255 req->master_bio = bio_src; 256 req->private_bio = bio; 257 req->epoch = 0; 258 req->sector = bio->bi_sector; 259 req->size = bio->bi_size; 260 req->start_time = jiffies; 261 INIT_HLIST_NODE(&req->colision); 262 INIT_LIST_HEAD(&req->tl_requests); 263 INIT_LIST_HEAD(&req->w.list); 264 265 bio->bi_private = req; 266 bio->bi_end_io = drbd_endio_pri; 267 bio->bi_next = NULL; 268 } 269 return req; 270} 271 272static inline void drbd_req_free(struct drbd_request *req) 273{ 274 mempool_free(req, drbd_request_mempool); 275} 276 277static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2) 278{ 279 return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9))); 280} 281 282/* Short lived temporary struct on the stack. 283 * We could squirrel the error to be returned into 284 * bio->bi_size, or similar. But that would be too ugly. */ 285struct bio_and_error { 286 struct bio *bio; 287 int error; 288}; 289 290extern void _req_may_be_done(struct drbd_request *req, 291 struct bio_and_error *m); 292extern void __req_mod(struct drbd_request *req, enum drbd_req_event what, 293 struct bio_and_error *m); 294extern void complete_master_bio(struct drbd_conf *mdev, 295 struct bio_and_error *m); 296 297/* use this if you don't want to deal with calling complete_master_bio() 298 * outside the spinlock, e.g. when walking some list on cleanup. */ 299static inline void _req_mod(struct drbd_request *req, enum drbd_req_event what) 300{ 301 struct drbd_conf *mdev = req->mdev; 302 struct bio_and_error m; 303 304 /* __req_mod possibly frees req, do not touch req after that! */ 305 __req_mod(req, what, &m); 306 if (m.bio) 307 complete_master_bio(mdev, &m); 308} 309 310/* completion of master bio is outside of spinlock. 311 * If you need it irqsave, do it your self! */ 312static inline void req_mod(struct drbd_request *req, 313 enum drbd_req_event what) 314{ 315 struct drbd_conf *mdev = req->mdev; 316 struct bio_and_error m; 317 spin_lock_irq(&mdev->req_lock); 318 __req_mod(req, what, &m); 319 spin_unlock_irq(&mdev->req_lock); 320 321 if (m.bio) 322 complete_master_bio(mdev, &m); 323} 324#endif 325