Lines Matching refs:drbd_request

24 static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio *bio_src)
26 struct drbd_request *req;
59 struct drbd_request *req)
73 struct drbd_request *req = container_of(kref, struct drbd_request, kref);
193 void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
292 static void drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
320 static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
329 static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
332 struct drbd_request *iter = req;
350 static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
359 static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
362 struct drbd_request *iter = req;
380 static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
389 static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
392 struct drbd_request *iter = req;
412 static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
520 static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
538 static inline bool is_pending_write_protocol_A(struct drbd_request *req)
560 int __req_mod(struct drbd_request *req, enum drbd_req_event what,
953 static void complete_conflicting_writes(struct drbd_request *req)
1041 static bool do_remote_read(struct drbd_request *req)
1104 static int drbd_process_write_request(struct drbd_request *req)
1141 static void drbd_process_discard_or_zeroes_req(struct drbd_request *req, int flags)
1151 drbd_submit_req_private_bio(struct drbd_request *req)
1184 static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req)
1196 /* returns the new drbd_request pointer, if the caller is expected to
1199 * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
1201 static struct drbd_request *
1205 struct drbd_request *req;
1268 struct drbd_request *most_recent_req;
1276 struct drbd_request *req = plug->most_recent_req;
1306 static void drbd_update_plug(struct drbd_plug_cb *plug, struct drbd_request *req)
1308 struct drbd_request *tmp = plug->most_recent_req;
1317 static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
1436 struct drbd_request *req = drbd_request_prepare(device, bio);
1445 struct drbd_request *req, *tmp;
1473 struct drbd_request *req;
1478 while ((req = list_first_entry_or_null(incoming, struct drbd_request, tl_requests))) {
1498 struct drbd_request *req;
1501 while ((req = list_first_entry_or_null(pending, struct drbd_request, tl_requests))) {
1628 static bool net_timeout_reached(struct drbd_request *net_req,
1704 struct drbd_request *req_read, *req_write, *req_peer; /* oldest request */
1735 req_read = list_first_entry_or_null(&device->pending_completion[0], struct drbd_request, req_pending_local);
1736 req_write = list_first_entry_or_null(&device->pending_completion[1], struct drbd_request, req_pending_local);