block.h revision 285738
1/* 2 * XenBSD block device driver 3 * 4 * Copyright (c) 2010-2013 Spectra Logic Corporation 5 * Copyright (c) 2009 Scott Long, Yahoo! 6 * Copyright (c) 2009 Frank Suchomel, Citrix 7 * Copyright (c) 2009 Doug F. Rabson, Citrix 8 * Copyright (c) 2005 Kip Macy 9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand 10 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge 11 * 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a copy 14 * of this software and associated documentation files (the "Software"), to 15 * deal in the Software without restriction, including without limitation the 16 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 17 * sell copies of the Software, and to permit persons to whom the Software is 18 * furnished to do so, subject to the following conditions: 19 * 20 * The above copyright notice and this permission notice shall be included in 21 * all copies or substantial portions of the Software. 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 28 * DEALINGS IN THE SOFTWARE. 29 * 30 * $FreeBSD: stable/10/sys/dev/xen/blkfront/block.h 285738 2015-07-21 07:22:18Z royger $ 31 */ 32 33#ifndef __XEN_BLKFRONT_BLOCK_H__ 34#define __XEN_BLKFRONT_BLOCK_H__ 35#include <xen/blkif.h> 36 37/** 38 * Given a number of blkif segments, compute the maximum I/O size supported. 39 * 40 * \note This calculation assumes that all but the first and last segments 41 * of the I/O are fully utilized. 42 * 43 * \note We reserve a segement from the maximum supported by the transport to 44 * guarantee we can handle an unaligned transfer without the need to 45 * use a bounce buffer. 46 */ 47#define XBD_SEGS_TO_SIZE(segs) \ 48 (((segs) - 1) * PAGE_SIZE) 49 50/** 51 * Compute the maximum number of blkif segments requried to represent 52 * an I/O of the given size. 53 * 54 * \note This calculation assumes that all but the first and last segments 55 * of the I/O are fully utilized. 56 * 57 * \note We reserve a segement to guarantee we can handle an unaligned 58 * transfer without the need to use a bounce buffer. 59 */ 60#define XBD_SIZE_TO_SEGS(size) \ 61 ((size / PAGE_SIZE) + 1) 62 63/** 64 * The maximum number of shared memory ring pages we will allow in a 65 * negotiated block-front/back communication channel. Allow enough 66 * ring space for all requests to be XBD_MAX_REQUEST_SIZE'd. 67 */ 68#define XBD_MAX_RING_PAGES 32 69 70/** 71 * The maximum number of outstanding requests blocks (request headers plus 72 * additional segment blocks) we will allow in a negotiated block-front/back 73 * communication channel. 74 */ 75#define XBD_MAX_REQUESTS \ 76 __CONST_RING_SIZE(blkif, PAGE_SIZE * XBD_MAX_RING_PAGES) 77 78/** 79 * The maximum mapped region size per request we will allow in a negotiated 80 * block-front/back communication channel. 81 */ 82#define XBD_MAX_REQUEST_SIZE \ 83 MIN(MAXPHYS, XBD_SEGS_TO_SIZE(BLKIF_MAX_SEGMENTS_PER_REQUEST)) 84 85/** 86 * The maximum number of segments (within a request header and accompanying 87 * segment blocks) per request we will allow in a negotiated block-front/back 88 * communication channel. 89 */ 90#define XBD_MAX_SEGMENTS_PER_REQUEST \ 91 (MIN(BLKIF_MAX_SEGMENTS_PER_REQUEST, \ 92 XBD_SIZE_TO_SEGS(XBD_MAX_REQUEST_SIZE))) 93 94typedef enum { 95 XBDCF_Q_MASK = 0xFF, 96 /* This command has contributed to xbd_qfrozen_cnt. */ 97 XBDCF_FROZEN = 1<<8, 98 /* Freeze the command queue on dispatch (i.e. single step command). */ 99 XBDCF_Q_FREEZE = 1<<9, 100 /* Bus DMA returned EINPROGRESS for this command. */ 101 XBDCF_ASYNC_MAPPING = 1<<10, 102 XBDCF_INITIALIZER = XBDCF_Q_MASK 103} xbdc_flag_t; 104 105struct xbd_command; 106typedef void xbd_cbcf_t(struct xbd_command *); 107 108struct xbd_command { 109 TAILQ_ENTRY(xbd_command) cm_link; 110 struct xbd_softc *cm_sc; 111 xbdc_flag_t cm_flags; 112 bus_dmamap_t cm_map; 113 uint64_t cm_id; 114 grant_ref_t *cm_sg_refs; 115 struct bio *cm_bp; 116 grant_ref_t cm_gref_head; 117 void *cm_data; 118 size_t cm_datalen; 119 u_int cm_nseg; 120 int cm_operation; 121 blkif_sector_t cm_sector_number; 122 int cm_status; 123 xbd_cbcf_t *cm_complete; 124}; 125 126typedef enum { 127 XBD_Q_FREE, 128 XBD_Q_READY, 129 XBD_Q_BUSY, 130 XBD_Q_COMPLETE, 131 XBD_Q_BIO, 132 XBD_Q_COUNT, 133 XBD_Q_NONE = XBDCF_Q_MASK 134} xbd_q_index_t; 135 136typedef struct xbd_cm_q { 137 TAILQ_HEAD(, xbd_command) q_tailq; 138 uint32_t q_length; 139 uint32_t q_max; 140} xbd_cm_q_t; 141 142typedef enum { 143 XBD_STATE_DISCONNECTED, 144 XBD_STATE_CONNECTED, 145 XBD_STATE_SUSPENDED 146} xbd_state_t; 147 148typedef enum { 149 XBDF_NONE = 0, 150 XBDF_OPEN = 1 << 0, /* drive is open (can't shut down) */ 151 XBDF_BARRIER = 1 << 1, /* backend supports barriers */ 152 XBDF_FLUSH = 1 << 2, /* backend supports flush */ 153 XBDF_READY = 1 << 3, /* Is ready */ 154 XBDF_CM_SHORTAGE = 1 << 4, /* Free cm resource shortage active. */ 155 XBDF_GNT_SHORTAGE = 1 << 5, /* Grant ref resource shortage active */ 156 XBDF_WAIT_IDLE = 1 << 6 /* 157 * No new work until oustanding work 158 * completes. 159 */ 160} xbd_flag_t; 161 162/* 163 * We have one of these per vbd, whether ide, scsi or 'other'. 164 */ 165struct xbd_softc { 166 device_t xbd_dev; 167 struct disk *xbd_disk; /* disk params */ 168 struct bio_queue_head xbd_bioq; /* sort queue */ 169 int xbd_unit; 170 xbd_flag_t xbd_flags; 171 int xbd_qfrozen_cnt; 172 int xbd_vdevice; 173 xbd_state_t xbd_state; 174 u_int xbd_ring_pages; 175 uint32_t xbd_max_requests; 176 uint32_t xbd_max_request_segments; 177 uint32_t xbd_max_request_size; 178 grant_ref_t xbd_ring_ref[XBD_MAX_RING_PAGES]; 179 blkif_front_ring_t xbd_ring; 180 xen_intr_handle_t xen_intr_handle; 181 struct gnttab_free_callback xbd_callback; 182 xbd_cm_q_t xbd_cm_q[XBD_Q_COUNT]; 183 bus_dma_tag_t xbd_io_dmat; 184 185 /** 186 * The number of people holding this device open. We won't allow a 187 * hot-unplug unless this is 0. 188 */ 189 int xbd_users; 190 struct mtx xbd_io_lock; 191 192 struct xbd_command *xbd_shadow; 193}; 194 195int xbd_instance_create(struct xbd_softc *, blkif_sector_t sectors, int device, 196 uint16_t vdisk_info, unsigned long sector_size); 197 198static inline void 199xbd_added_qentry(struct xbd_softc *sc, xbd_q_index_t index) 200{ 201 struct xbd_cm_q *cmq; 202 203 cmq = &sc->xbd_cm_q[index]; 204 cmq->q_length++; 205 if (cmq->q_length > cmq->q_max) 206 cmq->q_max = cmq->q_length; 207} 208 209static inline void 210xbd_removed_qentry(struct xbd_softc *sc, xbd_q_index_t index) 211{ 212 sc->xbd_cm_q[index].q_length--; 213} 214 215static inline uint32_t 216xbd_queue_length(struct xbd_softc *sc, xbd_q_index_t index) 217{ 218 return (sc->xbd_cm_q[index].q_length); 219} 220 221static inline void 222xbd_initq_cm(struct xbd_softc *sc, xbd_q_index_t index) 223{ 224 struct xbd_cm_q *cmq; 225 226 cmq = &sc->xbd_cm_q[index]; 227 TAILQ_INIT(&cmq->q_tailq); 228 cmq->q_length = 0; 229 cmq->q_max = 0; 230} 231 232static inline void 233xbd_enqueue_cm(struct xbd_command *cm, xbd_q_index_t index) 234{ 235 KASSERT(index != XBD_Q_BIO, 236 ("%s: Commands cannot access the bio queue.", __func__)); 237 if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE) 238 panic("%s: command %p is already on queue %d.", 239 __func__, cm, cm->cm_flags & XBDCF_Q_MASK); 240 TAILQ_INSERT_TAIL(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link); 241 cm->cm_flags &= ~XBDCF_Q_MASK; 242 cm->cm_flags |= index; 243 xbd_added_qentry(cm->cm_sc, index); 244} 245 246static inline void 247xbd_requeue_cm(struct xbd_command *cm, xbd_q_index_t index) 248{ 249 KASSERT(index != XBD_Q_BIO, 250 ("%s: Commands cannot access the bio queue.", __func__)); 251 if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE) 252 panic("%s: command %p is already on queue %d.", 253 __func__, cm, cm->cm_flags & XBDCF_Q_MASK); 254 TAILQ_INSERT_HEAD(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link); 255 cm->cm_flags &= ~XBDCF_Q_MASK; 256 cm->cm_flags |= index; 257 xbd_added_qentry(cm->cm_sc, index); 258} 259 260static inline struct xbd_command * 261xbd_dequeue_cm(struct xbd_softc *sc, xbd_q_index_t index) 262{ 263 struct xbd_command *cm; 264 265 KASSERT(index != XBD_Q_BIO, 266 ("%s: Commands cannot access the bio queue.", __func__)); 267 268 if ((cm = TAILQ_FIRST(&sc->xbd_cm_q[index].q_tailq)) != NULL) { 269 if ((cm->cm_flags & XBDCF_Q_MASK) != index) { 270 panic("%s: command %p is on queue %d, " 271 "not specified queue %d", 272 __func__, cm, 273 cm->cm_flags & XBDCF_Q_MASK, 274 index); 275 } 276 TAILQ_REMOVE(&sc->xbd_cm_q[index].q_tailq, cm, cm_link); 277 cm->cm_flags &= ~XBDCF_Q_MASK; 278 cm->cm_flags |= XBD_Q_NONE; 279 xbd_removed_qentry(cm->cm_sc, index); 280 } 281 return (cm); 282} 283 284static inline void 285xbd_remove_cm(struct xbd_command *cm, xbd_q_index_t expected_index) 286{ 287 xbd_q_index_t index; 288 289 index = cm->cm_flags & XBDCF_Q_MASK; 290 291 KASSERT(index != XBD_Q_BIO, 292 ("%s: Commands cannot access the bio queue.", __func__)); 293 294 if (index != expected_index) { 295 panic("%s: command %p is on queue %d, not specified queue %d", 296 __func__, cm, index, expected_index); 297 } 298 TAILQ_REMOVE(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link); 299 cm->cm_flags &= ~XBDCF_Q_MASK; 300 cm->cm_flags |= XBD_Q_NONE; 301 xbd_removed_qentry(cm->cm_sc, index); 302} 303 304static inline void 305xbd_initq_bio(struct xbd_softc *sc) 306{ 307 bioq_init(&sc->xbd_bioq); 308} 309 310static inline void 311xbd_enqueue_bio(struct xbd_softc *sc, struct bio *bp) 312{ 313 bioq_insert_tail(&sc->xbd_bioq, bp); 314 xbd_added_qentry(sc, XBD_Q_BIO); 315} 316 317static inline void 318xbd_requeue_bio(struct xbd_softc *sc, struct bio *bp) 319{ 320 bioq_insert_head(&sc->xbd_bioq, bp); 321 xbd_added_qentry(sc, XBD_Q_BIO); 322} 323 324static inline struct bio * 325xbd_dequeue_bio(struct xbd_softc *sc) 326{ 327 struct bio *bp; 328 329 if ((bp = bioq_first(&sc->xbd_bioq)) != NULL) { 330 bioq_remove(&sc->xbd_bioq, bp); 331 xbd_removed_qentry(sc, XBD_Q_BIO); 332 } 333 return (bp); 334} 335 336static inline void 337xbd_initqs(struct xbd_softc *sc) 338{ 339 u_int index; 340 341 for (index = 0; index < XBD_Q_COUNT; index++) 342 xbd_initq_cm(sc, index); 343 344 xbd_initq_bio(sc); 345} 346 347#endif /* __XEN_BLKFRONT_BLOCK_H__ */ 348