Deleted Added
full compact
hv_storvsc_drv_freebsd.c (263065) hv_storvsc_drv_freebsd.c (266794)
1/*-
2 * Copyright (c) 2009-2012 Microsoft Corp.
3 * Copyright (c) 2012 NetApp Inc.
4 * Copyright (c) 2012 Citrix Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/**
30 * StorVSC driver for Hyper-V. This driver presents a SCSI HBA interface
31 * to the Comman Access Method (CAM) layer. CAM control blocks (CCBs) are
32 * converted into VSCSI protocol messages which are delivered to the parent
33 * partition StorVSP driver over the Hyper-V VMBUS.
34 */
35#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2009-2012 Microsoft Corp.
3 * Copyright (c) 2012 NetApp Inc.
4 * Copyright (c) 2012 Citrix Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/**
30 * StorVSC driver for Hyper-V. This driver presents a SCSI HBA interface
31 * to the Comman Access Method (CAM) layer. CAM control blocks (CCBs) are
32 * converted into VSCSI protocol messages which are delivered to the parent
33 * partition StorVSP driver over the Hyper-V VMBUS.
34 */
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: stable/10/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c 263065 2014-03-12 07:27:05Z mav $");
36__FBSDID("$FreeBSD: stable/10/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c 266794 2014-05-28 09:06:36Z marius $");
37
38#include <sys/param.h>
39#include <sys/proc.h>
40#include <sys/condvar.h>
41#include <sys/systm.h>
42#include <sys/sockio.h>
43#include <sys/mbuf.h>
44#include <sys/malloc.h>
45#include <sys/module.h>
46#include <sys/kernel.h>
47#include <sys/queue.h>
48#include <sys/lock.h>
49#include <sys/sx.h>
50#include <sys/taskqueue.h>
51#include <sys/bus.h>
52#include <sys/mutex.h>
53#include <sys/callout.h>
54#include <vm/vm.h>
55#include <vm/pmap.h>
56#include <sys/lock.h>
57#include <sys/sema.h>
58
59#include <cam/cam.h>
60#include <cam/cam_ccb.h>
61#include <cam/cam_periph.h>
62#include <cam/cam_sim.h>
63#include <cam/cam_xpt_sim.h>
64#include <cam/cam_xpt_internal.h>
65#include <cam/cam_debug.h>
66#include <cam/scsi/scsi_all.h>
67#include <cam/scsi/scsi_message.h>
68
69
70#include <dev/hyperv/include/hyperv.h>
71#include "hv_vstorage.h"
72
73#define STORVSC_RINGBUFFER_SIZE (20*PAGE_SIZE)
74#define STORVSC_MAX_LUNS_PER_TARGET (64)
75#define STORVSC_MAX_IO_REQUESTS (STORVSC_MAX_LUNS_PER_TARGET * 2)
76#define BLKVSC_MAX_IDE_DISKS_PER_TARGET (1)
77#define BLKVSC_MAX_IO_REQUESTS STORVSC_MAX_IO_REQUESTS
78#define STORVSC_MAX_TARGETS (1)
79
80struct storvsc_softc;
81
82enum storvsc_request_type {
83 WRITE_TYPE,
84 READ_TYPE,
85 UNKNOWN_TYPE
86};
87
88struct hv_storvsc_request {
89 LIST_ENTRY(hv_storvsc_request) link;
90 struct vstor_packet vstor_packet;
91 hv_vmbus_multipage_buffer data_buf;
92 void *sense_data;
93 uint8_t sense_info_len;
94 uint8_t retries;
95 union ccb *ccb;
96 struct storvsc_softc *softc;
97 struct callout callout;
98 struct sema synch_sema; /*Synchronize the request/response if needed */
99};
100
101struct storvsc_softc {
102 struct hv_device *hs_dev;
103 LIST_HEAD(, hv_storvsc_request) hs_free_list;
104 struct mtx hs_lock;
105 struct storvsc_driver_props *hs_drv_props;
106 int hs_unit;
107 uint32_t hs_frozen;
108 struct cam_sim *hs_sim;
109 struct cam_path *hs_path;
110 uint32_t hs_num_out_reqs;
111 boolean_t hs_destroy;
112 boolean_t hs_drain_notify;
113 struct sema hs_drain_sema;
114 struct hv_storvsc_request hs_init_req;
115 struct hv_storvsc_request hs_reset_req;
116};
117
118
119/**
120 * HyperV storvsc timeout testing cases:
121 * a. IO returned after first timeout;
122 * b. IO returned after second timeout and queue freeze;
123 * c. IO returned while timer handler is running
124 * The first can be tested by "sg_senddiag -vv /dev/daX",
125 * and the second and third can be done by
126 * "sg_wr_mode -v -p 08 -c 0,1a -m 0,ff /dev/daX".
127 */
128#define HVS_TIMEOUT_TEST 0
129
130/*
131 * Bus/adapter reset functionality on the Hyper-V host is
132 * buggy and it will be disabled until
133 * it can be further tested.
134 */
135#define HVS_HOST_RESET 0
136
137struct storvsc_driver_props {
138 char *drv_name;
139 char *drv_desc;
140 uint8_t drv_max_luns_per_target;
141 uint8_t drv_max_ios_per_target;
142 uint32_t drv_ringbuffer_size;
143};
144
145enum hv_storage_type {
146 DRIVER_BLKVSC,
147 DRIVER_STORVSC,
148 DRIVER_UNKNOWN
149};
150
151#define HS_MAX_ADAPTERS 10
152
153/* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */
154static const hv_guid gStorVscDeviceType={
155 .data = {0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
156 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f}
157};
158
159/* {32412632-86cb-44a2-9b5c-50d1417354f5} */
160static const hv_guid gBlkVscDeviceType={
161 .data = {0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
162 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5}
163};
164
165static struct storvsc_driver_props g_drv_props_table[] = {
166 {"blkvsc", "Hyper-V IDE Storage Interface",
167 BLKVSC_MAX_IDE_DISKS_PER_TARGET, BLKVSC_MAX_IO_REQUESTS,
168 STORVSC_RINGBUFFER_SIZE},
169 {"storvsc", "Hyper-V SCSI Storage Interface",
170 STORVSC_MAX_LUNS_PER_TARGET, STORVSC_MAX_IO_REQUESTS,
171 STORVSC_RINGBUFFER_SIZE}
172};
173
174/* static functions */
175static int storvsc_probe(device_t dev);
176static int storvsc_attach(device_t dev);
177static int storvsc_detach(device_t dev);
178static void storvsc_poll(struct cam_sim * sim);
179static void storvsc_action(struct cam_sim * sim, union ccb * ccb);
180static void create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp);
181static void storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp);
182static enum hv_storage_type storvsc_get_storage_type(device_t dev);
183static void hv_storvsc_on_channel_callback(void *context);
184static void hv_storvsc_on_iocompletion( struct storvsc_softc *sc,
185 struct vstor_packet *vstor_packet,
186 struct hv_storvsc_request *request);
187static int hv_storvsc_connect_vsp(struct hv_device *device);
188static void storvsc_io_done(struct hv_storvsc_request *reqp);
189
190static device_method_t storvsc_methods[] = {
191 /* Device interface */
192 DEVMETHOD(device_probe, storvsc_probe),
193 DEVMETHOD(device_attach, storvsc_attach),
194 DEVMETHOD(device_detach, storvsc_detach),
195 DEVMETHOD(device_shutdown, bus_generic_shutdown),
196 DEVMETHOD_END
197};
198
199static driver_t storvsc_driver = {
200 "storvsc", storvsc_methods, sizeof(struct storvsc_softc),
201};
202
203static devclass_t storvsc_devclass;
204DRIVER_MODULE(storvsc, vmbus, storvsc_driver, storvsc_devclass, 0, 0);
205MODULE_VERSION(storvsc, 1);
206MODULE_DEPEND(storvsc, vmbus, 1, 1, 1);
207
208
209/**
210 * The host is capable of sending messages to us that are
211 * completely unsolicited. So, we need to address the race
212 * condition where we may be in the process of unloading the
213 * driver when the host may send us an unsolicited message.
214 * We address this issue by implementing a sequentially
215 * consistent protocol:
216 *
217 * 1. Channel callback is invoked while holding the the channel lock
218 * and an unloading driver will reset the channel callback under
219 * the protection of this channel lock.
220 *
221 * 2. To ensure bounded wait time for unloading a driver, we don't
222 * permit outgoing traffic once the device is marked as being
223 * destroyed.
224 *
225 * 3. Once the device is marked as being destroyed, we only
226 * permit incoming traffic to properly account for
227 * packets already sent out.
228 */
229static inline struct storvsc_softc *
230get_stor_device(struct hv_device *device,
231 boolean_t outbound)
232{
233 struct storvsc_softc *sc;
234
235 sc = device_get_softc(device->device);
236 if (sc == NULL) {
237 return NULL;
238 }
239
240 if (outbound) {
241 /*
242 * Here we permit outgoing I/O only
243 * if the device is not being destroyed.
244 */
245
246 if (sc->hs_destroy) {
247 sc = NULL;
248 }
249 } else {
250 /*
251 * inbound case; if being destroyed
252 * only permit to account for
253 * messages already sent out.
254 */
255 if (sc->hs_destroy && (sc->hs_num_out_reqs == 0)) {
256 sc = NULL;
257 }
258 }
259 return sc;
260}
261
262/**
263 * @brief initialize channel connection to parent partition
264 *
265 * @param dev a Hyper-V device pointer
266 * @returns 0 on success, non-zero error on failure
267 */
268static int
269hv_storvsc_channel_init(struct hv_device *dev)
270{
271 int ret = 0;
272 struct hv_storvsc_request *request;
273 struct vstor_packet *vstor_packet;
274 struct storvsc_softc *sc;
275
276 sc = get_stor_device(dev, TRUE);
277 if (sc == NULL) {
278 return ENODEV;
279 }
280
281 request = &sc->hs_init_req;
282 memset(request, 0, sizeof(struct hv_storvsc_request));
283 vstor_packet = &request->vstor_packet;
284 request->softc = sc;
285
286 /**
287 * Initiate the vsc/vsp initialization protocol on the open channel
288 */
289 sema_init(&request->synch_sema, 0, ("stor_synch_sema"));
290
291 vstor_packet->operation = VSTOR_OPERATION_BEGININITIALIZATION;
292 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
293
294
295 ret = hv_vmbus_channel_send_packet(
296 dev->channel,
297 vstor_packet,
298 sizeof(struct vstor_packet),
37
38#include <sys/param.h>
39#include <sys/proc.h>
40#include <sys/condvar.h>
41#include <sys/systm.h>
42#include <sys/sockio.h>
43#include <sys/mbuf.h>
44#include <sys/malloc.h>
45#include <sys/module.h>
46#include <sys/kernel.h>
47#include <sys/queue.h>
48#include <sys/lock.h>
49#include <sys/sx.h>
50#include <sys/taskqueue.h>
51#include <sys/bus.h>
52#include <sys/mutex.h>
53#include <sys/callout.h>
54#include <vm/vm.h>
55#include <vm/pmap.h>
56#include <sys/lock.h>
57#include <sys/sema.h>
58
59#include <cam/cam.h>
60#include <cam/cam_ccb.h>
61#include <cam/cam_periph.h>
62#include <cam/cam_sim.h>
63#include <cam/cam_xpt_sim.h>
64#include <cam/cam_xpt_internal.h>
65#include <cam/cam_debug.h>
66#include <cam/scsi/scsi_all.h>
67#include <cam/scsi/scsi_message.h>
68
69
70#include <dev/hyperv/include/hyperv.h>
71#include "hv_vstorage.h"
72
73#define STORVSC_RINGBUFFER_SIZE (20*PAGE_SIZE)
74#define STORVSC_MAX_LUNS_PER_TARGET (64)
75#define STORVSC_MAX_IO_REQUESTS (STORVSC_MAX_LUNS_PER_TARGET * 2)
76#define BLKVSC_MAX_IDE_DISKS_PER_TARGET (1)
77#define BLKVSC_MAX_IO_REQUESTS STORVSC_MAX_IO_REQUESTS
78#define STORVSC_MAX_TARGETS (1)
79
80struct storvsc_softc;
81
82enum storvsc_request_type {
83 WRITE_TYPE,
84 READ_TYPE,
85 UNKNOWN_TYPE
86};
87
88struct hv_storvsc_request {
89 LIST_ENTRY(hv_storvsc_request) link;
90 struct vstor_packet vstor_packet;
91 hv_vmbus_multipage_buffer data_buf;
92 void *sense_data;
93 uint8_t sense_info_len;
94 uint8_t retries;
95 union ccb *ccb;
96 struct storvsc_softc *softc;
97 struct callout callout;
98 struct sema synch_sema; /*Synchronize the request/response if needed */
99};
100
101struct storvsc_softc {
102 struct hv_device *hs_dev;
103 LIST_HEAD(, hv_storvsc_request) hs_free_list;
104 struct mtx hs_lock;
105 struct storvsc_driver_props *hs_drv_props;
106 int hs_unit;
107 uint32_t hs_frozen;
108 struct cam_sim *hs_sim;
109 struct cam_path *hs_path;
110 uint32_t hs_num_out_reqs;
111 boolean_t hs_destroy;
112 boolean_t hs_drain_notify;
113 struct sema hs_drain_sema;
114 struct hv_storvsc_request hs_init_req;
115 struct hv_storvsc_request hs_reset_req;
116};
117
118
119/**
120 * HyperV storvsc timeout testing cases:
121 * a. IO returned after first timeout;
122 * b. IO returned after second timeout and queue freeze;
123 * c. IO returned while timer handler is running
124 * The first can be tested by "sg_senddiag -vv /dev/daX",
125 * and the second and third can be done by
126 * "sg_wr_mode -v -p 08 -c 0,1a -m 0,ff /dev/daX".
127 */
128#define HVS_TIMEOUT_TEST 0
129
130/*
131 * Bus/adapter reset functionality on the Hyper-V host is
132 * buggy and it will be disabled until
133 * it can be further tested.
134 */
135#define HVS_HOST_RESET 0
136
137struct storvsc_driver_props {
138 char *drv_name;
139 char *drv_desc;
140 uint8_t drv_max_luns_per_target;
141 uint8_t drv_max_ios_per_target;
142 uint32_t drv_ringbuffer_size;
143};
144
145enum hv_storage_type {
146 DRIVER_BLKVSC,
147 DRIVER_STORVSC,
148 DRIVER_UNKNOWN
149};
150
151#define HS_MAX_ADAPTERS 10
152
153/* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */
154static const hv_guid gStorVscDeviceType={
155 .data = {0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
156 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f}
157};
158
159/* {32412632-86cb-44a2-9b5c-50d1417354f5} */
160static const hv_guid gBlkVscDeviceType={
161 .data = {0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
162 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5}
163};
164
165static struct storvsc_driver_props g_drv_props_table[] = {
166 {"blkvsc", "Hyper-V IDE Storage Interface",
167 BLKVSC_MAX_IDE_DISKS_PER_TARGET, BLKVSC_MAX_IO_REQUESTS,
168 STORVSC_RINGBUFFER_SIZE},
169 {"storvsc", "Hyper-V SCSI Storage Interface",
170 STORVSC_MAX_LUNS_PER_TARGET, STORVSC_MAX_IO_REQUESTS,
171 STORVSC_RINGBUFFER_SIZE}
172};
173
174/* static functions */
175static int storvsc_probe(device_t dev);
176static int storvsc_attach(device_t dev);
177static int storvsc_detach(device_t dev);
178static void storvsc_poll(struct cam_sim * sim);
179static void storvsc_action(struct cam_sim * sim, union ccb * ccb);
180static void create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp);
181static void storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp);
182static enum hv_storage_type storvsc_get_storage_type(device_t dev);
183static void hv_storvsc_on_channel_callback(void *context);
184static void hv_storvsc_on_iocompletion( struct storvsc_softc *sc,
185 struct vstor_packet *vstor_packet,
186 struct hv_storvsc_request *request);
187static int hv_storvsc_connect_vsp(struct hv_device *device);
188static void storvsc_io_done(struct hv_storvsc_request *reqp);
189
190static device_method_t storvsc_methods[] = {
191 /* Device interface */
192 DEVMETHOD(device_probe, storvsc_probe),
193 DEVMETHOD(device_attach, storvsc_attach),
194 DEVMETHOD(device_detach, storvsc_detach),
195 DEVMETHOD(device_shutdown, bus_generic_shutdown),
196 DEVMETHOD_END
197};
198
199static driver_t storvsc_driver = {
200 "storvsc", storvsc_methods, sizeof(struct storvsc_softc),
201};
202
203static devclass_t storvsc_devclass;
204DRIVER_MODULE(storvsc, vmbus, storvsc_driver, storvsc_devclass, 0, 0);
205MODULE_VERSION(storvsc, 1);
206MODULE_DEPEND(storvsc, vmbus, 1, 1, 1);
207
208
209/**
210 * The host is capable of sending messages to us that are
211 * completely unsolicited. So, we need to address the race
212 * condition where we may be in the process of unloading the
213 * driver when the host may send us an unsolicited message.
214 * We address this issue by implementing a sequentially
215 * consistent protocol:
216 *
217 * 1. Channel callback is invoked while holding the the channel lock
218 * and an unloading driver will reset the channel callback under
219 * the protection of this channel lock.
220 *
221 * 2. To ensure bounded wait time for unloading a driver, we don't
222 * permit outgoing traffic once the device is marked as being
223 * destroyed.
224 *
225 * 3. Once the device is marked as being destroyed, we only
226 * permit incoming traffic to properly account for
227 * packets already sent out.
228 */
229static inline struct storvsc_softc *
230get_stor_device(struct hv_device *device,
231 boolean_t outbound)
232{
233 struct storvsc_softc *sc;
234
235 sc = device_get_softc(device->device);
236 if (sc == NULL) {
237 return NULL;
238 }
239
240 if (outbound) {
241 /*
242 * Here we permit outgoing I/O only
243 * if the device is not being destroyed.
244 */
245
246 if (sc->hs_destroy) {
247 sc = NULL;
248 }
249 } else {
250 /*
251 * inbound case; if being destroyed
252 * only permit to account for
253 * messages already sent out.
254 */
255 if (sc->hs_destroy && (sc->hs_num_out_reqs == 0)) {
256 sc = NULL;
257 }
258 }
259 return sc;
260}
261
262/**
263 * @brief initialize channel connection to parent partition
264 *
265 * @param dev a Hyper-V device pointer
266 * @returns 0 on success, non-zero error on failure
267 */
268static int
269hv_storvsc_channel_init(struct hv_device *dev)
270{
271 int ret = 0;
272 struct hv_storvsc_request *request;
273 struct vstor_packet *vstor_packet;
274 struct storvsc_softc *sc;
275
276 sc = get_stor_device(dev, TRUE);
277 if (sc == NULL) {
278 return ENODEV;
279 }
280
281 request = &sc->hs_init_req;
282 memset(request, 0, sizeof(struct hv_storvsc_request));
283 vstor_packet = &request->vstor_packet;
284 request->softc = sc;
285
286 /**
287 * Initiate the vsc/vsp initialization protocol on the open channel
288 */
289 sema_init(&request->synch_sema, 0, ("stor_synch_sema"));
290
291 vstor_packet->operation = VSTOR_OPERATION_BEGININITIALIZATION;
292 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
293
294
295 ret = hv_vmbus_channel_send_packet(
296 dev->channel,
297 vstor_packet,
298 sizeof(struct vstor_packet),
299 (uint64_t)request,
299 (uint64_t)(uintptr_t)request,
300 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
301 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
302
303 if (ret != 0) {
304 goto cleanup;
305 }
306
307 ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */
308
309 if (ret != 0) {
310 goto cleanup;
311 }
312
313 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
314 vstor_packet->status != 0) {
315 goto cleanup;
316 }
317
318 /* reuse the packet for version range supported */
319
320 memset(vstor_packet, 0, sizeof(struct vstor_packet));
321 vstor_packet->operation = VSTOR_OPERATION_QUERYPROTOCOLVERSION;
322 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
323
324 vstor_packet->u.version.major_minor = VMSTOR_PROTOCOL_VERSION_CURRENT;
325
326 /* revision is only significant for Windows guests */
327 vstor_packet->u.version.revision = 0;
328
329 ret = hv_vmbus_channel_send_packet(
330 dev->channel,
331 vstor_packet,
332 sizeof(struct vstor_packet),
300 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
301 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
302
303 if (ret != 0) {
304 goto cleanup;
305 }
306
307 ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */
308
309 if (ret != 0) {
310 goto cleanup;
311 }
312
313 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
314 vstor_packet->status != 0) {
315 goto cleanup;
316 }
317
318 /* reuse the packet for version range supported */
319
320 memset(vstor_packet, 0, sizeof(struct vstor_packet));
321 vstor_packet->operation = VSTOR_OPERATION_QUERYPROTOCOLVERSION;
322 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
323
324 vstor_packet->u.version.major_minor = VMSTOR_PROTOCOL_VERSION_CURRENT;
325
326 /* revision is only significant for Windows guests */
327 vstor_packet->u.version.revision = 0;
328
329 ret = hv_vmbus_channel_send_packet(
330 dev->channel,
331 vstor_packet,
332 sizeof(struct vstor_packet),
333 (uint64_t)request,
333 (uint64_t)(uintptr_t)request,
334 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
335 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
336
337 if (ret != 0) {
338 goto cleanup;
339 }
340
341 ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */
342
343 if (ret) {
344 goto cleanup;
345 }
346
347 /* TODO: Check returned version */
348 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
349 vstor_packet->status != 0) {
350 goto cleanup;
351 }
352
353 /**
354 * Query channel properties
355 */
356 memset(vstor_packet, 0, sizeof(struct vstor_packet));
357 vstor_packet->operation = VSTOR_OPERATION_QUERYPROPERTIES;
358 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
359
360 ret = hv_vmbus_channel_send_packet(
361 dev->channel,
362 vstor_packet,
363 sizeof(struct vstor_packet),
334 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
335 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
336
337 if (ret != 0) {
338 goto cleanup;
339 }
340
341 ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */
342
343 if (ret) {
344 goto cleanup;
345 }
346
347 /* TODO: Check returned version */
348 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
349 vstor_packet->status != 0) {
350 goto cleanup;
351 }
352
353 /**
354 * Query channel properties
355 */
356 memset(vstor_packet, 0, sizeof(struct vstor_packet));
357 vstor_packet->operation = VSTOR_OPERATION_QUERYPROPERTIES;
358 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
359
360 ret = hv_vmbus_channel_send_packet(
361 dev->channel,
362 vstor_packet,
363 sizeof(struct vstor_packet),
364 (uint64_t)request,
364 (uint64_t)(uintptr_t)request,
365 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
366 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
367
368 if ( ret != 0) {
369 goto cleanup;
370 }
371
372 ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */
373
374 if (ret != 0) {
375 goto cleanup;
376 }
377
378 /* TODO: Check returned version */
379 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
380 vstor_packet->status != 0) {
381 goto cleanup;
382 }
383
384 memset(vstor_packet, 0, sizeof(struct vstor_packet));
385 vstor_packet->operation = VSTOR_OPERATION_ENDINITIALIZATION;
386 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
387
388 ret = hv_vmbus_channel_send_packet(
389 dev->channel,
390 vstor_packet,
391 sizeof(struct vstor_packet),
365 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
366 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
367
368 if ( ret != 0) {
369 goto cleanup;
370 }
371
372 ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */
373
374 if (ret != 0) {
375 goto cleanup;
376 }
377
378 /* TODO: Check returned version */
379 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
380 vstor_packet->status != 0) {
381 goto cleanup;
382 }
383
384 memset(vstor_packet, 0, sizeof(struct vstor_packet));
385 vstor_packet->operation = VSTOR_OPERATION_ENDINITIALIZATION;
386 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
387
388 ret = hv_vmbus_channel_send_packet(
389 dev->channel,
390 vstor_packet,
391 sizeof(struct vstor_packet),
392 (uint64_t)request,
392 (uint64_t)(uintptr_t)request,
393 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
394 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
395
396 if (ret != 0) {
397 goto cleanup;
398 }
399
400 ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */
401
402 if (ret != 0) {
403 goto cleanup;
404 }
405
406 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
407 vstor_packet->status != 0) {
408 goto cleanup;
409 }
410
411cleanup:
412 sema_destroy(&request->synch_sema);
413 return (ret);
414}
415
416/**
417 * @brief Open channel connection to paraent partition StorVSP driver
418 *
419 * Open and initialize channel connection to parent partition StorVSP driver.
420 *
421 * @param pointer to a Hyper-V device
422 * @returns 0 on success, non-zero error on failure
423 */
424static int
425hv_storvsc_connect_vsp(struct hv_device *dev)
426{
427 int ret = 0;
428 struct vmstor_chan_props props;
429 struct storvsc_softc *sc;
430
431 sc = device_get_softc(dev->device);
432
433 memset(&props, 0, sizeof(struct vmstor_chan_props));
434
435 /*
436 * Open the channel
437 */
438
439 ret = hv_vmbus_channel_open(
440 dev->channel,
441 sc->hs_drv_props->drv_ringbuffer_size,
442 sc->hs_drv_props->drv_ringbuffer_size,
443 (void *)&props,
444 sizeof(struct vmstor_chan_props),
445 hv_storvsc_on_channel_callback,
446 dev);
447
448
449 if (ret != 0) {
450 return ret;
451 }
452
453 ret = hv_storvsc_channel_init(dev);
454
455 return (ret);
456}
457
458#if HVS_HOST_RESET
459static int
460hv_storvsc_host_reset(struct hv_device *dev)
461{
462 int ret = 0;
463 struct storvsc_softc *sc;
464
465 struct hv_storvsc_request *request;
466 struct vstor_packet *vstor_packet;
467
468 sc = get_stor_device(dev, TRUE);
469 if (sc == NULL) {
470 return ENODEV;
471 }
472
473 request = &sc->hs_reset_req;
474 request->softc = sc;
475 vstor_packet = &request->vstor_packet;
476
477 sema_init(&request->synch_sema, 0, "stor synch sema");
478
479 vstor_packet->operation = VSTOR_OPERATION_RESETBUS;
480 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
481
482 ret = hv_vmbus_channel_send_packet(dev->channel,
483 vstor_packet,
484 sizeof(struct vstor_packet),
393 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
394 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
395
396 if (ret != 0) {
397 goto cleanup;
398 }
399
400 ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */
401
402 if (ret != 0) {
403 goto cleanup;
404 }
405
406 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
407 vstor_packet->status != 0) {
408 goto cleanup;
409 }
410
411cleanup:
412 sema_destroy(&request->synch_sema);
413 return (ret);
414}
415
416/**
417 * @brief Open channel connection to paraent partition StorVSP driver
418 *
419 * Open and initialize channel connection to parent partition StorVSP driver.
420 *
421 * @param pointer to a Hyper-V device
422 * @returns 0 on success, non-zero error on failure
423 */
424static int
425hv_storvsc_connect_vsp(struct hv_device *dev)
426{
427 int ret = 0;
428 struct vmstor_chan_props props;
429 struct storvsc_softc *sc;
430
431 sc = device_get_softc(dev->device);
432
433 memset(&props, 0, sizeof(struct vmstor_chan_props));
434
435 /*
436 * Open the channel
437 */
438
439 ret = hv_vmbus_channel_open(
440 dev->channel,
441 sc->hs_drv_props->drv_ringbuffer_size,
442 sc->hs_drv_props->drv_ringbuffer_size,
443 (void *)&props,
444 sizeof(struct vmstor_chan_props),
445 hv_storvsc_on_channel_callback,
446 dev);
447
448
449 if (ret != 0) {
450 return ret;
451 }
452
453 ret = hv_storvsc_channel_init(dev);
454
455 return (ret);
456}
457
458#if HVS_HOST_RESET
459static int
460hv_storvsc_host_reset(struct hv_device *dev)
461{
462 int ret = 0;
463 struct storvsc_softc *sc;
464
465 struct hv_storvsc_request *request;
466 struct vstor_packet *vstor_packet;
467
468 sc = get_stor_device(dev, TRUE);
469 if (sc == NULL) {
470 return ENODEV;
471 }
472
473 request = &sc->hs_reset_req;
474 request->softc = sc;
475 vstor_packet = &request->vstor_packet;
476
477 sema_init(&request->synch_sema, 0, "stor synch sema");
478
479 vstor_packet->operation = VSTOR_OPERATION_RESETBUS;
480 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
481
482 ret = hv_vmbus_channel_send_packet(dev->channel,
483 vstor_packet,
484 sizeof(struct vstor_packet),
485 (uint64_t)&sc->hs_reset_req,
485 (uint64_t)(uintptr_t)&sc->hs_reset_req,
486 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
487 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
488
489 if (ret != 0) {
490 goto cleanup;
491 }
492
493 ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */
494
495 if (ret) {
496 goto cleanup;
497 }
498
499
500 /*
501 * At this point, all outstanding requests in the adapter
502 * should have been flushed out and return to us
503 */
504
505cleanup:
506 sema_destroy(&request->synch_sema);
507 return (ret);
508}
509#endif /* HVS_HOST_RESET */
510
511/**
512 * @brief Function to initiate an I/O request
513 *
514 * @param device Hyper-V device pointer
515 * @param request pointer to a request structure
516 * @returns 0 on success, non-zero error on failure
517 */
518static int
519hv_storvsc_io_request(struct hv_device *device,
520 struct hv_storvsc_request *request)
521{
522 struct storvsc_softc *sc;
523 struct vstor_packet *vstor_packet = &request->vstor_packet;
524 int ret = 0;
525
526 sc = get_stor_device(device, TRUE);
527
528 if (sc == NULL) {
529 return ENODEV;
530 }
531
532 vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
533
534 vstor_packet->u.vm_srb.length = sizeof(struct vmscsi_req);
535
536 vstor_packet->u.vm_srb.sense_info_len = SENSE_BUFFER_SIZE;
537
538 vstor_packet->u.vm_srb.transfer_len = request->data_buf.length;
539
540 vstor_packet->operation = VSTOR_OPERATION_EXECUTESRB;
541
542
543 mtx_unlock(&request->softc->hs_lock);
544 if (request->data_buf.length) {
545 ret = hv_vmbus_channel_send_packet_multipagebuffer(
546 device->channel,
547 &request->data_buf,
548 vstor_packet,
549 sizeof(struct vstor_packet),
486 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
487 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
488
489 if (ret != 0) {
490 goto cleanup;
491 }
492
493 ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */
494
495 if (ret) {
496 goto cleanup;
497 }
498
499
500 /*
501 * At this point, all outstanding requests in the adapter
502 * should have been flushed out and return to us
503 */
504
505cleanup:
506 sema_destroy(&request->synch_sema);
507 return (ret);
508}
509#endif /* HVS_HOST_RESET */
510
511/**
512 * @brief Function to initiate an I/O request
513 *
514 * @param device Hyper-V device pointer
515 * @param request pointer to a request structure
516 * @returns 0 on success, non-zero error on failure
517 */
518static int
519hv_storvsc_io_request(struct hv_device *device,
520 struct hv_storvsc_request *request)
521{
522 struct storvsc_softc *sc;
523 struct vstor_packet *vstor_packet = &request->vstor_packet;
524 int ret = 0;
525
526 sc = get_stor_device(device, TRUE);
527
528 if (sc == NULL) {
529 return ENODEV;
530 }
531
532 vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
533
534 vstor_packet->u.vm_srb.length = sizeof(struct vmscsi_req);
535
536 vstor_packet->u.vm_srb.sense_info_len = SENSE_BUFFER_SIZE;
537
538 vstor_packet->u.vm_srb.transfer_len = request->data_buf.length;
539
540 vstor_packet->operation = VSTOR_OPERATION_EXECUTESRB;
541
542
543 mtx_unlock(&request->softc->hs_lock);
544 if (request->data_buf.length) {
545 ret = hv_vmbus_channel_send_packet_multipagebuffer(
546 device->channel,
547 &request->data_buf,
548 vstor_packet,
549 sizeof(struct vstor_packet),
550 (uint64_t)request);
550 (uint64_t)(uintptr_t)request);
551
552 } else {
553 ret = hv_vmbus_channel_send_packet(
554 device->channel,
555 vstor_packet,
556 sizeof(struct vstor_packet),
551
552 } else {
553 ret = hv_vmbus_channel_send_packet(
554 device->channel,
555 vstor_packet,
556 sizeof(struct vstor_packet),
557 (uint64_t)request,
557 (uint64_t)(uintptr_t)request,
558 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
559 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
560 }
561 mtx_lock(&request->softc->hs_lock);
562
563 if (ret != 0) {
564 printf("Unable to send packet %p ret %d", vstor_packet, ret);
565 } else {
566 atomic_add_int(&sc->hs_num_out_reqs, 1);
567 }
568
569 return (ret);
570}
571
572
573/**
574 * Process IO_COMPLETION_OPERATION and ready
575 * the result to be completed for upper layer
576 * processing by the CAM layer.
577 */
578static void
579hv_storvsc_on_iocompletion(struct storvsc_softc *sc,
580 struct vstor_packet *vstor_packet,
581 struct hv_storvsc_request *request)
582{
583 struct vmscsi_req *vm_srb;
584
585 vm_srb = &vstor_packet->u.vm_srb;
586
587 request->sense_info_len = 0;
588 if (((vm_srb->scsi_status & 0xFF) == SCSI_STATUS_CHECK_COND) &&
589 (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)) {
590 /* Autosense data available */
591
592 KASSERT(vm_srb->sense_info_len <= request->sense_info_len,
593 ("vm_srb->sense_info_len <= "
594 "request->sense_info_len"));
595
596 memcpy(request->sense_data, vm_srb->u.sense_data,
597 vm_srb->sense_info_len);
598
599 request->sense_info_len = vm_srb->sense_info_len;
600 }
601
602 /* Complete request by passing to the CAM layer */
603 storvsc_io_done(request);
604 atomic_subtract_int(&sc->hs_num_out_reqs, 1);
605 if (sc->hs_drain_notify && (sc->hs_num_out_reqs == 0)) {
606 sema_post(&sc->hs_drain_sema);
607 }
608}
609
610static void
611hv_storvsc_on_channel_callback(void *context)
612{
613 int ret = 0;
614 struct hv_device *device = (struct hv_device *)context;
615 struct storvsc_softc *sc;
616 uint32_t bytes_recvd;
617 uint64_t request_id;
618 uint8_t packet[roundup2(sizeof(struct vstor_packet), 8)];
619 struct hv_storvsc_request *request;
620 struct vstor_packet *vstor_packet;
621
622 sc = get_stor_device(device, FALSE);
623 if (sc == NULL) {
624 return;
625 }
626
627 KASSERT(device, ("device"));
628
629 ret = hv_vmbus_channel_recv_packet(
630 device->channel,
631 packet,
632 roundup2(sizeof(struct vstor_packet), 8),
633 &bytes_recvd,
634 &request_id);
635
636 while ((ret == 0) && (bytes_recvd > 0)) {
558 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
559 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
560 }
561 mtx_lock(&request->softc->hs_lock);
562
563 if (ret != 0) {
564 printf("Unable to send packet %p ret %d", vstor_packet, ret);
565 } else {
566 atomic_add_int(&sc->hs_num_out_reqs, 1);
567 }
568
569 return (ret);
570}
571
572
573/**
574 * Process IO_COMPLETION_OPERATION and ready
575 * the result to be completed for upper layer
576 * processing by the CAM layer.
577 */
578static void
579hv_storvsc_on_iocompletion(struct storvsc_softc *sc,
580 struct vstor_packet *vstor_packet,
581 struct hv_storvsc_request *request)
582{
583 struct vmscsi_req *vm_srb;
584
585 vm_srb = &vstor_packet->u.vm_srb;
586
587 request->sense_info_len = 0;
588 if (((vm_srb->scsi_status & 0xFF) == SCSI_STATUS_CHECK_COND) &&
589 (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)) {
590 /* Autosense data available */
591
592 KASSERT(vm_srb->sense_info_len <= request->sense_info_len,
593 ("vm_srb->sense_info_len <= "
594 "request->sense_info_len"));
595
596 memcpy(request->sense_data, vm_srb->u.sense_data,
597 vm_srb->sense_info_len);
598
599 request->sense_info_len = vm_srb->sense_info_len;
600 }
601
602 /* Complete request by passing to the CAM layer */
603 storvsc_io_done(request);
604 atomic_subtract_int(&sc->hs_num_out_reqs, 1);
605 if (sc->hs_drain_notify && (sc->hs_num_out_reqs == 0)) {
606 sema_post(&sc->hs_drain_sema);
607 }
608}
609
610static void
611hv_storvsc_on_channel_callback(void *context)
612{
613 int ret = 0;
614 struct hv_device *device = (struct hv_device *)context;
615 struct storvsc_softc *sc;
616 uint32_t bytes_recvd;
617 uint64_t request_id;
618 uint8_t packet[roundup2(sizeof(struct vstor_packet), 8)];
619 struct hv_storvsc_request *request;
620 struct vstor_packet *vstor_packet;
621
622 sc = get_stor_device(device, FALSE);
623 if (sc == NULL) {
624 return;
625 }
626
627 KASSERT(device, ("device"));
628
629 ret = hv_vmbus_channel_recv_packet(
630 device->channel,
631 packet,
632 roundup2(sizeof(struct vstor_packet), 8),
633 &bytes_recvd,
634 &request_id);
635
636 while ((ret == 0) && (bytes_recvd > 0)) {
637 request = (struct hv_storvsc_request *)request_id;
637 request = (struct hv_storvsc_request *)(uintptr_t)request_id;
638 KASSERT(request, ("request"));
639
640 if ((request == &sc->hs_init_req) ||
641 (request == &sc->hs_reset_req)) {
642 memcpy(&request->vstor_packet, packet,
643 sizeof(struct vstor_packet));
644 sema_post(&request->synch_sema);
645 } else {
646 vstor_packet = (struct vstor_packet *)packet;
647 switch(vstor_packet->operation) {
648 case VSTOR_OPERATION_COMPLETEIO:
649 hv_storvsc_on_iocompletion(sc,
650 vstor_packet, request);
651 break;
652 case VSTOR_OPERATION_REMOVEDEVICE:
653 /* TODO: implement */
654 break;
655 default:
656 break;
657 }
658 }
659 ret = hv_vmbus_channel_recv_packet(
660 device->channel,
661 packet,
662 roundup2(sizeof(struct vstor_packet), 8),
663 &bytes_recvd,
664 &request_id);
665 }
666}
667
668/**
669 * @brief StorVSC probe function
670 *
671 * Device probe function. Returns 0 if the input device is a StorVSC
672 * device. Otherwise, a ENXIO is returned. If the input device is
673 * for BlkVSC (paravirtual IDE) device and this support is disabled in
674 * favor of the emulated ATA/IDE device, return ENXIO.
675 *
676 * @param a device
677 * @returns 0 on success, ENXIO if not a matcing StorVSC device
678 */
679static int
680storvsc_probe(device_t dev)
681{
682 int ata_disk_enable = 0;
683 int ret = ENXIO;
684
685 switch (storvsc_get_storage_type(dev)) {
686 case DRIVER_BLKVSC:
687 if(bootverbose)
688 device_printf(dev, "DRIVER_BLKVSC-Emulated ATA/IDE probe\n");
689 if (!getenv_int("hw.ata.disk_enable", &ata_disk_enable)) {
690 if(bootverbose)
691 device_printf(dev,
692 "Enlightened ATA/IDE detected\n");
693 ret = 0;
694 } else if(bootverbose)
695 device_printf(dev, "Emulated ATA/IDE set (hw.ata.disk_enable set)\n");
696 break;
697 case DRIVER_STORVSC:
698 if(bootverbose)
699 device_printf(dev, "Enlightened SCSI device detected\n");
700 ret = 0;
701 break;
702 default:
703 ret = ENXIO;
704 }
705 return (ret);
706}
707
708/**
709 * @brief StorVSC attach function
710 *
711 * Function responsible for allocating per-device structures,
712 * setting up CAM interfaces and scanning for available LUNs to
713 * be used for SCSI device peripherals.
714 *
715 * @param a device
716 * @returns 0 on success or an error on failure
717 */
718static int
719storvsc_attach(device_t dev)
720{
721 struct hv_device *hv_dev = vmbus_get_devctx(dev);
722 enum hv_storage_type stor_type;
723 struct storvsc_softc *sc;
724 struct cam_devq *devq;
725 int ret, i;
726 struct hv_storvsc_request *reqp;
727 struct root_hold_token *root_mount_token = NULL;
728
729 /*
730 * We need to serialize storvsc attach calls.
731 */
732 root_mount_token = root_mount_hold("storvsc");
733
734 sc = device_get_softc(dev);
735 if (sc == NULL) {
736 ret = ENOMEM;
737 goto cleanup;
738 }
739
740 stor_type = storvsc_get_storage_type(dev);
741
742 if (stor_type == DRIVER_UNKNOWN) {
743 ret = ENODEV;
744 goto cleanup;
745 }
746
747 bzero(sc, sizeof(struct storvsc_softc));
748
749 /* fill in driver specific properties */
750 sc->hs_drv_props = &g_drv_props_table[stor_type];
751
752 /* fill in device specific properties */
753 sc->hs_unit = device_get_unit(dev);
754 sc->hs_dev = hv_dev;
755 device_set_desc(dev, g_drv_props_table[stor_type].drv_desc);
756
757 LIST_INIT(&sc->hs_free_list);
758 mtx_init(&sc->hs_lock, "hvslck", NULL, MTX_DEF);
759
760 for (i = 0; i < sc->hs_drv_props->drv_max_ios_per_target; ++i) {
761 reqp = malloc(sizeof(struct hv_storvsc_request),
762 M_DEVBUF, M_WAITOK|M_ZERO);
763 reqp->softc = sc;
764
765 LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
766 }
767
768 sc->hs_destroy = FALSE;
769 sc->hs_drain_notify = FALSE;
770 sema_init(&sc->hs_drain_sema, 0, "Store Drain Sema");
771
772 ret = hv_storvsc_connect_vsp(hv_dev);
773 if (ret != 0) {
774 goto cleanup;
775 }
776
777 /*
778 * Create the device queue.
779 * Hyper-V maps each target to one SCSI HBA
780 */
781 devq = cam_simq_alloc(sc->hs_drv_props->drv_max_ios_per_target);
782 if (devq == NULL) {
783 device_printf(dev, "Failed to alloc device queue\n");
784 ret = ENOMEM;
785 goto cleanup;
786 }
787
788 sc->hs_sim = cam_sim_alloc(storvsc_action,
789 storvsc_poll,
790 sc->hs_drv_props->drv_name,
791 sc,
792 sc->hs_unit,
793 &sc->hs_lock, 1,
794 sc->hs_drv_props->drv_max_ios_per_target,
795 devq);
796
797 if (sc->hs_sim == NULL) {
798 device_printf(dev, "Failed to alloc sim\n");
799 cam_simq_free(devq);
800 ret = ENOMEM;
801 goto cleanup;
802 }
803
804 mtx_lock(&sc->hs_lock);
805 /* bus_id is set to 0, need to get it from VMBUS channel query? */
806 if (xpt_bus_register(sc->hs_sim, dev, 0) != CAM_SUCCESS) {
807 cam_sim_free(sc->hs_sim, /*free_devq*/TRUE);
808 mtx_unlock(&sc->hs_lock);
809 device_printf(dev, "Unable to register SCSI bus\n");
810 ret = ENXIO;
811 goto cleanup;
812 }
813
814 if (xpt_create_path(&sc->hs_path, /*periph*/NULL,
815 cam_sim_path(sc->hs_sim),
816 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
817 xpt_bus_deregister(cam_sim_path(sc->hs_sim));
818 cam_sim_free(sc->hs_sim, /*free_devq*/TRUE);
819 mtx_unlock(&sc->hs_lock);
820 device_printf(dev, "Unable to create path\n");
821 ret = ENXIO;
822 goto cleanup;
823 }
824
825 mtx_unlock(&sc->hs_lock);
826
827 root_mount_rel(root_mount_token);
828 return (0);
829
830
831cleanup:
832 root_mount_rel(root_mount_token);
833 while (!LIST_EMPTY(&sc->hs_free_list)) {
834 reqp = LIST_FIRST(&sc->hs_free_list);
835 LIST_REMOVE(reqp, link);
836 free(reqp, M_DEVBUF);
837 }
838 return (ret);
839}
840
841/**
842 * @brief StorVSC device detach function
843 *
844 * This function is responsible for safely detaching a
845 * StorVSC device. This includes waiting for inbound responses
846 * to complete and freeing associated per-device structures.
847 *
848 * @param dev a device
849 * returns 0 on success
850 */
851static int
852storvsc_detach(device_t dev)
853{
854 struct storvsc_softc *sc = device_get_softc(dev);
855 struct hv_storvsc_request *reqp = NULL;
856 struct hv_device *hv_device = vmbus_get_devctx(dev);
857
858 mtx_lock(&hv_device->channel->inbound_lock);
859 sc->hs_destroy = TRUE;
860 mtx_unlock(&hv_device->channel->inbound_lock);
861
862 /*
863 * At this point, all outbound traffic should be disabled. We
864 * only allow inbound traffic (responses) to proceed so that
865 * outstanding requests can be completed.
866 */
867
868 sc->hs_drain_notify = TRUE;
869 sema_wait(&sc->hs_drain_sema);
870 sc->hs_drain_notify = FALSE;
871
872 /*
873 * Since we have already drained, we don't need to busy wait.
874 * The call to close the channel will reset the callback
875 * under the protection of the incoming channel lock.
876 */
877
878 hv_vmbus_channel_close(hv_device->channel);
879
880 mtx_lock(&sc->hs_lock);
881 while (!LIST_EMPTY(&sc->hs_free_list)) {
882 reqp = LIST_FIRST(&sc->hs_free_list);
883 LIST_REMOVE(reqp, link);
884
885 free(reqp, M_DEVBUF);
886 }
887 mtx_unlock(&sc->hs_lock);
888 return (0);
889}
890
891#if HVS_TIMEOUT_TEST
892/**
893 * @brief unit test for timed out operations
894 *
895 * This function provides unit testing capability to simulate
896 * timed out operations. Recompilation with HV_TIMEOUT_TEST=1
897 * is required.
898 *
899 * @param reqp pointer to a request structure
900 * @param opcode SCSI operation being performed
901 * @param wait if 1, wait for I/O to complete
902 */
903static void
904storvsc_timeout_test(struct hv_storvsc_request *reqp,
905 uint8_t opcode, int wait)
906{
907 int ret;
908 union ccb *ccb = reqp->ccb;
909 struct storvsc_softc *sc = reqp->softc;
910
911 if (reqp->vstor_packet.vm_srb.cdb[0] != opcode) {
912 return;
913 }
914
915 if (wait) {
916 mtx_lock(&reqp->event.mtx);
917 }
918 ret = hv_storvsc_io_request(sc->hs_dev, reqp);
919 if (ret != 0) {
920 if (wait) {
921 mtx_unlock(&reqp->event.mtx);
922 }
923 printf("%s: io_request failed with %d.\n",
924 __func__, ret);
925 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
926 mtx_lock(&sc->hs_lock);
927 storvsc_free_request(sc, reqp);
928 xpt_done(ccb);
929 mtx_unlock(&sc->hs_lock);
930 return;
931 }
932
933 if (wait) {
934 xpt_print(ccb->ccb_h.path,
935 "%u: %s: waiting for IO return.\n",
936 ticks, __func__);
937 ret = cv_timedwait(&reqp->event.cv, &reqp->event.mtx, 60*hz);
938 mtx_unlock(&reqp->event.mtx);
939 xpt_print(ccb->ccb_h.path, "%u: %s: %s.\n",
940 ticks, __func__, (ret == 0)?
941 "IO return detected" :
942 "IO return not detected");
943 /*
944 * Now both the timer handler and io done are running
945 * simultaneously. We want to confirm the io done always
946 * finishes after the timer handler exits. So reqp used by
947 * timer handler is not freed or stale. Do busy loop for
948 * another 1/10 second to make sure io done does
949 * wait for the timer handler to complete.
950 */
951 DELAY(100*1000);
952 mtx_lock(&sc->hs_lock);
953 xpt_print(ccb->ccb_h.path,
954 "%u: %s: finishing, queue frozen %d, "
955 "ccb status 0x%x scsi_status 0x%x.\n",
956 ticks, __func__, sc->hs_frozen,
957 ccb->ccb_h.status,
958 ccb->csio.scsi_status);
959 mtx_unlock(&sc->hs_lock);
960 }
961}
962#endif /* HVS_TIMEOUT_TEST */
963
964/**
965 * @brief timeout handler for requests
966 *
967 * This function is called as a result of a callout expiring.
968 *
969 * @param arg pointer to a request
970 */
971static void
972storvsc_timeout(void *arg)
973{
974 struct hv_storvsc_request *reqp = arg;
975 struct storvsc_softc *sc = reqp->softc;
976 union ccb *ccb = reqp->ccb;
977
978 if (reqp->retries == 0) {
979 mtx_lock(&sc->hs_lock);
980 xpt_print(ccb->ccb_h.path,
981 "%u: IO timed out (req=0x%p), wait for another %u secs.\n",
982 ticks, reqp, ccb->ccb_h.timeout / 1000);
983 cam_error_print(ccb, CAM_ESF_ALL, CAM_EPF_ALL);
984 mtx_unlock(&sc->hs_lock);
985
986 reqp->retries++;
987 callout_reset(&reqp->callout,
988 (ccb->ccb_h.timeout * hz) / 1000,
989 storvsc_timeout, reqp);
990#if HVS_TIMEOUT_TEST
991 storvsc_timeout_test(reqp, SEND_DIAGNOSTIC, 0);
992#endif
993 return;
994 }
995
996 mtx_lock(&sc->hs_lock);
997 xpt_print(ccb->ccb_h.path,
998 "%u: IO (reqp = 0x%p) did not return for %u seconds, %s.\n",
999 ticks, reqp, ccb->ccb_h.timeout * (reqp->retries+1) / 1000,
1000 (sc->hs_frozen == 0)?
1001 "freezing the queue" : "the queue is already frozen");
1002 if (sc->hs_frozen == 0) {
1003 sc->hs_frozen = 1;
1004 xpt_freeze_simq(xpt_path_sim(ccb->ccb_h.path), 1);
1005 }
1006 mtx_unlock(&sc->hs_lock);
1007
1008#if HVS_TIMEOUT_TEST
1009 storvsc_timeout_test(reqp, MODE_SELECT_10, 1);
1010#endif
1011}
1012
1013/**
1014 * @brief StorVSC device poll function
1015 *
1016 * This function is responsible for servicing requests when
1017 * interrupts are disabled (i.e when we are dumping core.)
1018 *
1019 * @param sim a pointer to a CAM SCSI interface module
1020 */
1021static void
1022storvsc_poll(struct cam_sim *sim)
1023{
1024 struct storvsc_softc *sc = cam_sim_softc(sim);
1025
1026 mtx_assert(&sc->hs_lock, MA_OWNED);
1027 mtx_unlock(&sc->hs_lock);
1028 hv_storvsc_on_channel_callback(sc->hs_dev);
1029 mtx_lock(&sc->hs_lock);
1030}
1031
1032/**
1033 * @brief StorVSC device action function
1034 *
1035 * This function is responsible for handling SCSI operations which
1036 * are passed from the CAM layer. The requests are in the form of
1037 * CAM control blocks which indicate the action being performed.
1038 * Not all actions require converting the request to a VSCSI protocol
1039 * message - these actions can be responded to by this driver.
1040 * Requests which are destined for a backend storage device are converted
1041 * to a VSCSI protocol message and sent on the channel connection associated
1042 * with this device.
1043 *
1044 * @param sim pointer to a CAM SCSI interface module
1045 * @param ccb pointer to a CAM control block
1046 */
1047static void
1048storvsc_action(struct cam_sim *sim, union ccb *ccb)
1049{
1050 struct storvsc_softc *sc = cam_sim_softc(sim);
1051 int res;
1052
1053 mtx_assert(&sc->hs_lock, MA_OWNED);
1054 switch (ccb->ccb_h.func_code) {
1055 case XPT_PATH_INQ: {
1056 struct ccb_pathinq *cpi = &ccb->cpi;
1057
1058 cpi->version_num = 1;
1059 cpi->hba_inquiry = PI_TAG_ABLE|PI_SDTR_ABLE;
1060 cpi->target_sprt = 0;
1061 cpi->hba_misc = PIM_NOBUSRESET;
1062 cpi->hba_eng_cnt = 0;
1063 cpi->max_target = STORVSC_MAX_TARGETS;
1064 cpi->max_lun = sc->hs_drv_props->drv_max_luns_per_target;
1065 cpi->initiator_id = cpi->max_target;
1066 cpi->bus_id = cam_sim_bus(sim);
1067 cpi->base_transfer_speed = 300000;
1068 cpi->transport = XPORT_SAS;
1069 cpi->transport_version = 0;
1070 cpi->protocol = PROTO_SCSI;
1071 cpi->protocol_version = SCSI_REV_SPC2;
1072 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1073 strncpy(cpi->hba_vid, sc->hs_drv_props->drv_name, HBA_IDLEN);
1074 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1075 cpi->unit_number = cam_sim_unit(sim);
1076
1077 ccb->ccb_h.status = CAM_REQ_CMP;
1078 xpt_done(ccb);
1079 return;
1080 }
1081 case XPT_GET_TRAN_SETTINGS: {
1082 struct ccb_trans_settings *cts = &ccb->cts;
1083
1084 cts->transport = XPORT_SAS;
1085 cts->transport_version = 0;
1086 cts->protocol = PROTO_SCSI;
1087 cts->protocol_version = SCSI_REV_SPC2;
1088
1089 /* enable tag queuing and disconnected mode */
1090 cts->proto_specific.valid = CTS_SCSI_VALID_TQ;
1091 cts->proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
1092 cts->proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
1093 cts->xport_specific.valid = CTS_SPI_VALID_DISC;
1094 cts->xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
1095
1096 ccb->ccb_h.status = CAM_REQ_CMP;
1097 xpt_done(ccb);
1098 return;
1099 }
1100 case XPT_SET_TRAN_SETTINGS: {
1101 ccb->ccb_h.status = CAM_REQ_CMP;
1102 xpt_done(ccb);
1103 return;
1104 }
1105 case XPT_CALC_GEOMETRY:{
1106 cam_calc_geometry(&ccb->ccg, 1);
1107 xpt_done(ccb);
1108 return;
1109 }
1110 case XPT_RESET_BUS:
1111 case XPT_RESET_DEV:{
1112#if HVS_HOST_RESET
1113 if ((res = hv_storvsc_host_reset(sc->hs_dev)) != 0) {
1114 xpt_print(ccb->ccb_h.path,
1115 "hv_storvsc_host_reset failed with %d\n", res);
1116 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1117 xpt_done(ccb);
1118 return;
1119 }
1120 ccb->ccb_h.status = CAM_REQ_CMP;
1121 xpt_done(ccb);
1122 return;
1123#else
1124 xpt_print(ccb->ccb_h.path,
1125 "%s reset not supported.\n",
1126 (ccb->ccb_h.func_code == XPT_RESET_BUS)?
1127 "bus" : "dev");
1128 ccb->ccb_h.status = CAM_REQ_INVALID;
1129 xpt_done(ccb);
1130 return;
1131#endif /* HVS_HOST_RESET */
1132 }
1133 case XPT_SCSI_IO:
1134 case XPT_IMMED_NOTIFY: {
1135 struct hv_storvsc_request *reqp = NULL;
1136
1137 if (ccb->csio.cdb_len == 0) {
1138 panic("cdl_len is 0\n");
1139 }
1140
1141 if (LIST_EMPTY(&sc->hs_free_list)) {
1142 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1143 if (sc->hs_frozen == 0) {
1144 sc->hs_frozen = 1;
1145 xpt_freeze_simq(sim, /* count*/1);
1146 }
1147 xpt_done(ccb);
1148 return;
1149 }
1150
1151 reqp = LIST_FIRST(&sc->hs_free_list);
1152 LIST_REMOVE(reqp, link);
1153
1154 bzero(reqp, sizeof(struct hv_storvsc_request));
1155 reqp->softc = sc;
1156
1157 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1158 create_storvsc_request(ccb, reqp);
1159
1160 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1161 callout_init(&reqp->callout, CALLOUT_MPSAFE);
1162 callout_reset(&reqp->callout,
1163 (ccb->ccb_h.timeout * hz) / 1000,
1164 storvsc_timeout, reqp);
1165#if HVS_TIMEOUT_TEST
1166 cv_init(&reqp->event.cv, "storvsc timeout cv");
1167 mtx_init(&reqp->event.mtx, "storvsc timeout mutex",
1168 NULL, MTX_DEF);
1169 switch (reqp->vstor_packet.vm_srb.cdb[0]) {
1170 case MODE_SELECT_10:
1171 case SEND_DIAGNOSTIC:
1172 /* To have timer send the request. */
1173 return;
1174 default:
1175 break;
1176 }
1177#endif /* HVS_TIMEOUT_TEST */
1178 }
1179
1180 if ((res = hv_storvsc_io_request(sc->hs_dev, reqp)) != 0) {
1181 xpt_print(ccb->ccb_h.path,
1182 "hv_storvsc_io_request failed with %d\n", res);
1183 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1184 storvsc_free_request(sc, reqp);
1185 xpt_done(ccb);
1186 return;
1187 }
1188 return;
1189 }
1190
1191 default:
1192 ccb->ccb_h.status = CAM_REQ_INVALID;
1193 xpt_done(ccb);
1194 return;
1195 }
1196}
1197
1198/**
1199 * @brief Fill in a request structure based on a CAM control block
1200 *
1201 * Fills in a request structure based on the contents of a CAM control
1202 * block. The request structure holds the payload information for
1203 * VSCSI protocol request.
1204 *
1205 * @param ccb pointer to a CAM contorl block
1206 * @param reqp pointer to a request structure
1207 */
1208static void
1209create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp)
1210{
1211 struct ccb_scsiio *csio = &ccb->csio;
1212 uint64_t phys_addr;
1213 uint32_t bytes_to_copy = 0;
1214 uint32_t pfn_num = 0;
1215 uint32_t pfn;
1216
1217 /* refer to struct vmscsi_req for meanings of these two fields */
1218 reqp->vstor_packet.u.vm_srb.port =
1219 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path));
1220 reqp->vstor_packet.u.vm_srb.path_id =
1221 cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1222
1223 reqp->vstor_packet.u.vm_srb.target_id = ccb->ccb_h.target_id;
1224 reqp->vstor_packet.u.vm_srb.lun = ccb->ccb_h.target_lun;
1225
1226 reqp->vstor_packet.u.vm_srb.cdb_len = csio->cdb_len;
1227 if(ccb->ccb_h.flags & CAM_CDB_POINTER) {
1228 memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_ptr,
1229 csio->cdb_len);
1230 } else {
1231 memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_bytes,
1232 csio->cdb_len);
1233 }
1234
1235 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
1236 case CAM_DIR_OUT:
1237 reqp->vstor_packet.u.vm_srb.data_in = WRITE_TYPE;
1238 break;
1239 case CAM_DIR_IN:
1240 reqp->vstor_packet.u.vm_srb.data_in = READ_TYPE;
1241 break;
1242 case CAM_DIR_NONE:
1243 reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE;
1244 break;
1245 default:
1246 reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE;
1247 break;
1248 }
1249
1250 reqp->sense_data = &csio->sense_data;
1251 reqp->sense_info_len = csio->sense_len;
1252
1253 reqp->ccb = ccb;
1254 /*
1255 KASSERT((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0,
1256 ("ccb is scatter gather valid\n"));
1257 */
1258 if (csio->dxfer_len != 0) {
1259 reqp->data_buf.length = csio->dxfer_len;
1260 bytes_to_copy = csio->dxfer_len;
1261 phys_addr = vtophys(csio->data_ptr);
1262 reqp->data_buf.offset = phys_addr - trunc_page(phys_addr);
1263 }
1264
1265 while (bytes_to_copy != 0) {
1266 int bytes, page_offset;
1267 phys_addr = vtophys(&csio->data_ptr[reqp->data_buf.length -
1268 bytes_to_copy]);
1269 pfn = phys_addr >> PAGE_SHIFT;
1270 reqp->data_buf.pfn_array[pfn_num] = pfn;
1271 page_offset = phys_addr - trunc_page(phys_addr);
1272
1273 bytes = min(PAGE_SIZE - page_offset, bytes_to_copy);
1274
1275 bytes_to_copy -= bytes;
1276 pfn_num++;
1277 }
1278}
1279
1280/**
1281 * @brief completion function before returning to CAM
1282 *
1283 * I/O process has been completed and the result needs
1284 * to be passed to the CAM layer.
1285 * Free resources related to this request.
1286 *
1287 * @param reqp pointer to a request structure
1288 */
1289static void
1290storvsc_io_done(struct hv_storvsc_request *reqp)
1291{
1292 union ccb *ccb = reqp->ccb;
1293 struct ccb_scsiio *csio = &ccb->csio;
1294 struct storvsc_softc *sc = reqp->softc;
1295 struct vmscsi_req *vm_srb = &reqp->vstor_packet.u.vm_srb;
1296
1297 if (reqp->retries > 0) {
1298 mtx_lock(&sc->hs_lock);
1299#if HVS_TIMEOUT_TEST
1300 xpt_print(ccb->ccb_h.path,
1301 "%u: IO returned after timeout, "
1302 "waking up timer handler if any.\n", ticks);
1303 mtx_lock(&reqp->event.mtx);
1304 cv_signal(&reqp->event.cv);
1305 mtx_unlock(&reqp->event.mtx);
1306#endif
1307 reqp->retries = 0;
1308 xpt_print(ccb->ccb_h.path,
1309 "%u: IO returned after timeout, "
1310 "stopping timer if any.\n", ticks);
1311 mtx_unlock(&sc->hs_lock);
1312 }
1313
1314 /*
1315 * callout_drain() will wait for the timer handler to finish
1316 * if it is running. So we don't need any lock to synchronize
1317 * between this routine and the timer handler.
1318 * Note that we need to make sure reqp is not freed when timer
1319 * handler is using or will use it.
1320 */
1321 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1322 callout_drain(&reqp->callout);
1323 }
1324
1325 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1326 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1327 if (vm_srb->scsi_status == SCSI_STATUS_OK) {
1328 ccb->ccb_h.status |= CAM_REQ_CMP;
1329 } else {
1330 mtx_lock(&sc->hs_lock);
1331 xpt_print(ccb->ccb_h.path,
1332 "srovsc scsi_status = %d\n",
1333 vm_srb->scsi_status);
1334 mtx_unlock(&sc->hs_lock);
1335 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1336 }
1337
1338 ccb->csio.scsi_status = (vm_srb->scsi_status & 0xFF);
1339 ccb->csio.resid = ccb->csio.dxfer_len - vm_srb->transfer_len;
1340
1341 if (reqp->sense_info_len != 0) {
1342 csio->sense_resid = csio->sense_len - reqp->sense_info_len;
1343 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1344 }
1345
1346 mtx_lock(&sc->hs_lock);
1347 if (reqp->softc->hs_frozen == 1) {
1348 xpt_print(ccb->ccb_h.path,
1349 "%u: storvsc unfreezing softc 0x%p.\n",
1350 ticks, reqp->softc);
1351 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1352 reqp->softc->hs_frozen = 0;
1353 }
1354 storvsc_free_request(sc, reqp);
1355 xpt_done(ccb);
1356 mtx_unlock(&sc->hs_lock);
1357}
1358
1359/**
1360 * @brief Free a request structure
1361 *
1362 * Free a request structure by returning it to the free list
1363 *
1364 * @param sc pointer to a softc
1365 * @param reqp pointer to a request structure
1366 */
1367static void
1368storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp)
1369{
1370
1371 LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
1372}
1373
1374/**
1375 * @brief Determine type of storage device from GUID
1376 *
1377 * Using the type GUID, determine if this is a StorVSC (paravirtual
1378 * SCSI or BlkVSC (paravirtual IDE) device.
1379 *
1380 * @param dev a device
1381 * returns an enum
1382 */
1383static enum hv_storage_type
1384storvsc_get_storage_type(device_t dev)
1385{
1386 const char *p = vmbus_get_type(dev);
1387
1388 if (!memcmp(p, &gBlkVscDeviceType, sizeof(hv_guid))) {
1389 return DRIVER_BLKVSC;
1390 } else if (!memcmp(p, &gStorVscDeviceType, sizeof(hv_guid))) {
1391 return DRIVER_STORVSC;
1392 }
1393 return (DRIVER_UNKNOWN);
1394}
1395
638 KASSERT(request, ("request"));
639
640 if ((request == &sc->hs_init_req) ||
641 (request == &sc->hs_reset_req)) {
642 memcpy(&request->vstor_packet, packet,
643 sizeof(struct vstor_packet));
644 sema_post(&request->synch_sema);
645 } else {
646 vstor_packet = (struct vstor_packet *)packet;
647 switch(vstor_packet->operation) {
648 case VSTOR_OPERATION_COMPLETEIO:
649 hv_storvsc_on_iocompletion(sc,
650 vstor_packet, request);
651 break;
652 case VSTOR_OPERATION_REMOVEDEVICE:
653 /* TODO: implement */
654 break;
655 default:
656 break;
657 }
658 }
659 ret = hv_vmbus_channel_recv_packet(
660 device->channel,
661 packet,
662 roundup2(sizeof(struct vstor_packet), 8),
663 &bytes_recvd,
664 &request_id);
665 }
666}
667
668/**
669 * @brief StorVSC probe function
670 *
671 * Device probe function. Returns 0 if the input device is a StorVSC
672 * device. Otherwise, a ENXIO is returned. If the input device is
673 * for BlkVSC (paravirtual IDE) device and this support is disabled in
674 * favor of the emulated ATA/IDE device, return ENXIO.
675 *
676 * @param a device
677 * @returns 0 on success, ENXIO if not a matcing StorVSC device
678 */
679static int
680storvsc_probe(device_t dev)
681{
682 int ata_disk_enable = 0;
683 int ret = ENXIO;
684
685 switch (storvsc_get_storage_type(dev)) {
686 case DRIVER_BLKVSC:
687 if(bootverbose)
688 device_printf(dev, "DRIVER_BLKVSC-Emulated ATA/IDE probe\n");
689 if (!getenv_int("hw.ata.disk_enable", &ata_disk_enable)) {
690 if(bootverbose)
691 device_printf(dev,
692 "Enlightened ATA/IDE detected\n");
693 ret = 0;
694 } else if(bootverbose)
695 device_printf(dev, "Emulated ATA/IDE set (hw.ata.disk_enable set)\n");
696 break;
697 case DRIVER_STORVSC:
698 if(bootverbose)
699 device_printf(dev, "Enlightened SCSI device detected\n");
700 ret = 0;
701 break;
702 default:
703 ret = ENXIO;
704 }
705 return (ret);
706}
707
708/**
709 * @brief StorVSC attach function
710 *
711 * Function responsible for allocating per-device structures,
712 * setting up CAM interfaces and scanning for available LUNs to
713 * be used for SCSI device peripherals.
714 *
715 * @param a device
716 * @returns 0 on success or an error on failure
717 */
718static int
719storvsc_attach(device_t dev)
720{
721 struct hv_device *hv_dev = vmbus_get_devctx(dev);
722 enum hv_storage_type stor_type;
723 struct storvsc_softc *sc;
724 struct cam_devq *devq;
725 int ret, i;
726 struct hv_storvsc_request *reqp;
727 struct root_hold_token *root_mount_token = NULL;
728
729 /*
730 * We need to serialize storvsc attach calls.
731 */
732 root_mount_token = root_mount_hold("storvsc");
733
734 sc = device_get_softc(dev);
735 if (sc == NULL) {
736 ret = ENOMEM;
737 goto cleanup;
738 }
739
740 stor_type = storvsc_get_storage_type(dev);
741
742 if (stor_type == DRIVER_UNKNOWN) {
743 ret = ENODEV;
744 goto cleanup;
745 }
746
747 bzero(sc, sizeof(struct storvsc_softc));
748
749 /* fill in driver specific properties */
750 sc->hs_drv_props = &g_drv_props_table[stor_type];
751
752 /* fill in device specific properties */
753 sc->hs_unit = device_get_unit(dev);
754 sc->hs_dev = hv_dev;
755 device_set_desc(dev, g_drv_props_table[stor_type].drv_desc);
756
757 LIST_INIT(&sc->hs_free_list);
758 mtx_init(&sc->hs_lock, "hvslck", NULL, MTX_DEF);
759
760 for (i = 0; i < sc->hs_drv_props->drv_max_ios_per_target; ++i) {
761 reqp = malloc(sizeof(struct hv_storvsc_request),
762 M_DEVBUF, M_WAITOK|M_ZERO);
763 reqp->softc = sc;
764
765 LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
766 }
767
768 sc->hs_destroy = FALSE;
769 sc->hs_drain_notify = FALSE;
770 sema_init(&sc->hs_drain_sema, 0, "Store Drain Sema");
771
772 ret = hv_storvsc_connect_vsp(hv_dev);
773 if (ret != 0) {
774 goto cleanup;
775 }
776
777 /*
778 * Create the device queue.
779 * Hyper-V maps each target to one SCSI HBA
780 */
781 devq = cam_simq_alloc(sc->hs_drv_props->drv_max_ios_per_target);
782 if (devq == NULL) {
783 device_printf(dev, "Failed to alloc device queue\n");
784 ret = ENOMEM;
785 goto cleanup;
786 }
787
788 sc->hs_sim = cam_sim_alloc(storvsc_action,
789 storvsc_poll,
790 sc->hs_drv_props->drv_name,
791 sc,
792 sc->hs_unit,
793 &sc->hs_lock, 1,
794 sc->hs_drv_props->drv_max_ios_per_target,
795 devq);
796
797 if (sc->hs_sim == NULL) {
798 device_printf(dev, "Failed to alloc sim\n");
799 cam_simq_free(devq);
800 ret = ENOMEM;
801 goto cleanup;
802 }
803
804 mtx_lock(&sc->hs_lock);
805 /* bus_id is set to 0, need to get it from VMBUS channel query? */
806 if (xpt_bus_register(sc->hs_sim, dev, 0) != CAM_SUCCESS) {
807 cam_sim_free(sc->hs_sim, /*free_devq*/TRUE);
808 mtx_unlock(&sc->hs_lock);
809 device_printf(dev, "Unable to register SCSI bus\n");
810 ret = ENXIO;
811 goto cleanup;
812 }
813
814 if (xpt_create_path(&sc->hs_path, /*periph*/NULL,
815 cam_sim_path(sc->hs_sim),
816 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
817 xpt_bus_deregister(cam_sim_path(sc->hs_sim));
818 cam_sim_free(sc->hs_sim, /*free_devq*/TRUE);
819 mtx_unlock(&sc->hs_lock);
820 device_printf(dev, "Unable to create path\n");
821 ret = ENXIO;
822 goto cleanup;
823 }
824
825 mtx_unlock(&sc->hs_lock);
826
827 root_mount_rel(root_mount_token);
828 return (0);
829
830
831cleanup:
832 root_mount_rel(root_mount_token);
833 while (!LIST_EMPTY(&sc->hs_free_list)) {
834 reqp = LIST_FIRST(&sc->hs_free_list);
835 LIST_REMOVE(reqp, link);
836 free(reqp, M_DEVBUF);
837 }
838 return (ret);
839}
840
841/**
842 * @brief StorVSC device detach function
843 *
844 * This function is responsible for safely detaching a
845 * StorVSC device. This includes waiting for inbound responses
846 * to complete and freeing associated per-device structures.
847 *
848 * @param dev a device
849 * returns 0 on success
850 */
851static int
852storvsc_detach(device_t dev)
853{
854 struct storvsc_softc *sc = device_get_softc(dev);
855 struct hv_storvsc_request *reqp = NULL;
856 struct hv_device *hv_device = vmbus_get_devctx(dev);
857
858 mtx_lock(&hv_device->channel->inbound_lock);
859 sc->hs_destroy = TRUE;
860 mtx_unlock(&hv_device->channel->inbound_lock);
861
862 /*
863 * At this point, all outbound traffic should be disabled. We
864 * only allow inbound traffic (responses) to proceed so that
865 * outstanding requests can be completed.
866 */
867
868 sc->hs_drain_notify = TRUE;
869 sema_wait(&sc->hs_drain_sema);
870 sc->hs_drain_notify = FALSE;
871
872 /*
873 * Since we have already drained, we don't need to busy wait.
874 * The call to close the channel will reset the callback
875 * under the protection of the incoming channel lock.
876 */
877
878 hv_vmbus_channel_close(hv_device->channel);
879
880 mtx_lock(&sc->hs_lock);
881 while (!LIST_EMPTY(&sc->hs_free_list)) {
882 reqp = LIST_FIRST(&sc->hs_free_list);
883 LIST_REMOVE(reqp, link);
884
885 free(reqp, M_DEVBUF);
886 }
887 mtx_unlock(&sc->hs_lock);
888 return (0);
889}
890
891#if HVS_TIMEOUT_TEST
892/**
893 * @brief unit test for timed out operations
894 *
895 * This function provides unit testing capability to simulate
896 * timed out operations. Recompilation with HV_TIMEOUT_TEST=1
897 * is required.
898 *
899 * @param reqp pointer to a request structure
900 * @param opcode SCSI operation being performed
901 * @param wait if 1, wait for I/O to complete
902 */
903static void
904storvsc_timeout_test(struct hv_storvsc_request *reqp,
905 uint8_t opcode, int wait)
906{
907 int ret;
908 union ccb *ccb = reqp->ccb;
909 struct storvsc_softc *sc = reqp->softc;
910
911 if (reqp->vstor_packet.vm_srb.cdb[0] != opcode) {
912 return;
913 }
914
915 if (wait) {
916 mtx_lock(&reqp->event.mtx);
917 }
918 ret = hv_storvsc_io_request(sc->hs_dev, reqp);
919 if (ret != 0) {
920 if (wait) {
921 mtx_unlock(&reqp->event.mtx);
922 }
923 printf("%s: io_request failed with %d.\n",
924 __func__, ret);
925 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
926 mtx_lock(&sc->hs_lock);
927 storvsc_free_request(sc, reqp);
928 xpt_done(ccb);
929 mtx_unlock(&sc->hs_lock);
930 return;
931 }
932
933 if (wait) {
934 xpt_print(ccb->ccb_h.path,
935 "%u: %s: waiting for IO return.\n",
936 ticks, __func__);
937 ret = cv_timedwait(&reqp->event.cv, &reqp->event.mtx, 60*hz);
938 mtx_unlock(&reqp->event.mtx);
939 xpt_print(ccb->ccb_h.path, "%u: %s: %s.\n",
940 ticks, __func__, (ret == 0)?
941 "IO return detected" :
942 "IO return not detected");
943 /*
944 * Now both the timer handler and io done are running
945 * simultaneously. We want to confirm the io done always
946 * finishes after the timer handler exits. So reqp used by
947 * timer handler is not freed or stale. Do busy loop for
948 * another 1/10 second to make sure io done does
949 * wait for the timer handler to complete.
950 */
951 DELAY(100*1000);
952 mtx_lock(&sc->hs_lock);
953 xpt_print(ccb->ccb_h.path,
954 "%u: %s: finishing, queue frozen %d, "
955 "ccb status 0x%x scsi_status 0x%x.\n",
956 ticks, __func__, sc->hs_frozen,
957 ccb->ccb_h.status,
958 ccb->csio.scsi_status);
959 mtx_unlock(&sc->hs_lock);
960 }
961}
962#endif /* HVS_TIMEOUT_TEST */
963
964/**
965 * @brief timeout handler for requests
966 *
967 * This function is called as a result of a callout expiring.
968 *
969 * @param arg pointer to a request
970 */
971static void
972storvsc_timeout(void *arg)
973{
974 struct hv_storvsc_request *reqp = arg;
975 struct storvsc_softc *sc = reqp->softc;
976 union ccb *ccb = reqp->ccb;
977
978 if (reqp->retries == 0) {
979 mtx_lock(&sc->hs_lock);
980 xpt_print(ccb->ccb_h.path,
981 "%u: IO timed out (req=0x%p), wait for another %u secs.\n",
982 ticks, reqp, ccb->ccb_h.timeout / 1000);
983 cam_error_print(ccb, CAM_ESF_ALL, CAM_EPF_ALL);
984 mtx_unlock(&sc->hs_lock);
985
986 reqp->retries++;
987 callout_reset(&reqp->callout,
988 (ccb->ccb_h.timeout * hz) / 1000,
989 storvsc_timeout, reqp);
990#if HVS_TIMEOUT_TEST
991 storvsc_timeout_test(reqp, SEND_DIAGNOSTIC, 0);
992#endif
993 return;
994 }
995
996 mtx_lock(&sc->hs_lock);
997 xpt_print(ccb->ccb_h.path,
998 "%u: IO (reqp = 0x%p) did not return for %u seconds, %s.\n",
999 ticks, reqp, ccb->ccb_h.timeout * (reqp->retries+1) / 1000,
1000 (sc->hs_frozen == 0)?
1001 "freezing the queue" : "the queue is already frozen");
1002 if (sc->hs_frozen == 0) {
1003 sc->hs_frozen = 1;
1004 xpt_freeze_simq(xpt_path_sim(ccb->ccb_h.path), 1);
1005 }
1006 mtx_unlock(&sc->hs_lock);
1007
1008#if HVS_TIMEOUT_TEST
1009 storvsc_timeout_test(reqp, MODE_SELECT_10, 1);
1010#endif
1011}
1012
1013/**
1014 * @brief StorVSC device poll function
1015 *
1016 * This function is responsible for servicing requests when
1017 * interrupts are disabled (i.e when we are dumping core.)
1018 *
1019 * @param sim a pointer to a CAM SCSI interface module
1020 */
1021static void
1022storvsc_poll(struct cam_sim *sim)
1023{
1024 struct storvsc_softc *sc = cam_sim_softc(sim);
1025
1026 mtx_assert(&sc->hs_lock, MA_OWNED);
1027 mtx_unlock(&sc->hs_lock);
1028 hv_storvsc_on_channel_callback(sc->hs_dev);
1029 mtx_lock(&sc->hs_lock);
1030}
1031
1032/**
1033 * @brief StorVSC device action function
1034 *
1035 * This function is responsible for handling SCSI operations which
1036 * are passed from the CAM layer. The requests are in the form of
1037 * CAM control blocks which indicate the action being performed.
1038 * Not all actions require converting the request to a VSCSI protocol
1039 * message - these actions can be responded to by this driver.
1040 * Requests which are destined for a backend storage device are converted
1041 * to a VSCSI protocol message and sent on the channel connection associated
1042 * with this device.
1043 *
1044 * @param sim pointer to a CAM SCSI interface module
1045 * @param ccb pointer to a CAM control block
1046 */
1047static void
1048storvsc_action(struct cam_sim *sim, union ccb *ccb)
1049{
1050 struct storvsc_softc *sc = cam_sim_softc(sim);
1051 int res;
1052
1053 mtx_assert(&sc->hs_lock, MA_OWNED);
1054 switch (ccb->ccb_h.func_code) {
1055 case XPT_PATH_INQ: {
1056 struct ccb_pathinq *cpi = &ccb->cpi;
1057
1058 cpi->version_num = 1;
1059 cpi->hba_inquiry = PI_TAG_ABLE|PI_SDTR_ABLE;
1060 cpi->target_sprt = 0;
1061 cpi->hba_misc = PIM_NOBUSRESET;
1062 cpi->hba_eng_cnt = 0;
1063 cpi->max_target = STORVSC_MAX_TARGETS;
1064 cpi->max_lun = sc->hs_drv_props->drv_max_luns_per_target;
1065 cpi->initiator_id = cpi->max_target;
1066 cpi->bus_id = cam_sim_bus(sim);
1067 cpi->base_transfer_speed = 300000;
1068 cpi->transport = XPORT_SAS;
1069 cpi->transport_version = 0;
1070 cpi->protocol = PROTO_SCSI;
1071 cpi->protocol_version = SCSI_REV_SPC2;
1072 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1073 strncpy(cpi->hba_vid, sc->hs_drv_props->drv_name, HBA_IDLEN);
1074 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1075 cpi->unit_number = cam_sim_unit(sim);
1076
1077 ccb->ccb_h.status = CAM_REQ_CMP;
1078 xpt_done(ccb);
1079 return;
1080 }
1081 case XPT_GET_TRAN_SETTINGS: {
1082 struct ccb_trans_settings *cts = &ccb->cts;
1083
1084 cts->transport = XPORT_SAS;
1085 cts->transport_version = 0;
1086 cts->protocol = PROTO_SCSI;
1087 cts->protocol_version = SCSI_REV_SPC2;
1088
1089 /* enable tag queuing and disconnected mode */
1090 cts->proto_specific.valid = CTS_SCSI_VALID_TQ;
1091 cts->proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
1092 cts->proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
1093 cts->xport_specific.valid = CTS_SPI_VALID_DISC;
1094 cts->xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
1095
1096 ccb->ccb_h.status = CAM_REQ_CMP;
1097 xpt_done(ccb);
1098 return;
1099 }
1100 case XPT_SET_TRAN_SETTINGS: {
1101 ccb->ccb_h.status = CAM_REQ_CMP;
1102 xpt_done(ccb);
1103 return;
1104 }
1105 case XPT_CALC_GEOMETRY:{
1106 cam_calc_geometry(&ccb->ccg, 1);
1107 xpt_done(ccb);
1108 return;
1109 }
1110 case XPT_RESET_BUS:
1111 case XPT_RESET_DEV:{
1112#if HVS_HOST_RESET
1113 if ((res = hv_storvsc_host_reset(sc->hs_dev)) != 0) {
1114 xpt_print(ccb->ccb_h.path,
1115 "hv_storvsc_host_reset failed with %d\n", res);
1116 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1117 xpt_done(ccb);
1118 return;
1119 }
1120 ccb->ccb_h.status = CAM_REQ_CMP;
1121 xpt_done(ccb);
1122 return;
1123#else
1124 xpt_print(ccb->ccb_h.path,
1125 "%s reset not supported.\n",
1126 (ccb->ccb_h.func_code == XPT_RESET_BUS)?
1127 "bus" : "dev");
1128 ccb->ccb_h.status = CAM_REQ_INVALID;
1129 xpt_done(ccb);
1130 return;
1131#endif /* HVS_HOST_RESET */
1132 }
1133 case XPT_SCSI_IO:
1134 case XPT_IMMED_NOTIFY: {
1135 struct hv_storvsc_request *reqp = NULL;
1136
1137 if (ccb->csio.cdb_len == 0) {
1138 panic("cdl_len is 0\n");
1139 }
1140
1141 if (LIST_EMPTY(&sc->hs_free_list)) {
1142 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1143 if (sc->hs_frozen == 0) {
1144 sc->hs_frozen = 1;
1145 xpt_freeze_simq(sim, /* count*/1);
1146 }
1147 xpt_done(ccb);
1148 return;
1149 }
1150
1151 reqp = LIST_FIRST(&sc->hs_free_list);
1152 LIST_REMOVE(reqp, link);
1153
1154 bzero(reqp, sizeof(struct hv_storvsc_request));
1155 reqp->softc = sc;
1156
1157 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1158 create_storvsc_request(ccb, reqp);
1159
1160 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1161 callout_init(&reqp->callout, CALLOUT_MPSAFE);
1162 callout_reset(&reqp->callout,
1163 (ccb->ccb_h.timeout * hz) / 1000,
1164 storvsc_timeout, reqp);
1165#if HVS_TIMEOUT_TEST
1166 cv_init(&reqp->event.cv, "storvsc timeout cv");
1167 mtx_init(&reqp->event.mtx, "storvsc timeout mutex",
1168 NULL, MTX_DEF);
1169 switch (reqp->vstor_packet.vm_srb.cdb[0]) {
1170 case MODE_SELECT_10:
1171 case SEND_DIAGNOSTIC:
1172 /* To have timer send the request. */
1173 return;
1174 default:
1175 break;
1176 }
1177#endif /* HVS_TIMEOUT_TEST */
1178 }
1179
1180 if ((res = hv_storvsc_io_request(sc->hs_dev, reqp)) != 0) {
1181 xpt_print(ccb->ccb_h.path,
1182 "hv_storvsc_io_request failed with %d\n", res);
1183 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1184 storvsc_free_request(sc, reqp);
1185 xpt_done(ccb);
1186 return;
1187 }
1188 return;
1189 }
1190
1191 default:
1192 ccb->ccb_h.status = CAM_REQ_INVALID;
1193 xpt_done(ccb);
1194 return;
1195 }
1196}
1197
1198/**
1199 * @brief Fill in a request structure based on a CAM control block
1200 *
1201 * Fills in a request structure based on the contents of a CAM control
1202 * block. The request structure holds the payload information for
1203 * VSCSI protocol request.
1204 *
1205 * @param ccb pointer to a CAM contorl block
1206 * @param reqp pointer to a request structure
1207 */
1208static void
1209create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp)
1210{
1211 struct ccb_scsiio *csio = &ccb->csio;
1212 uint64_t phys_addr;
1213 uint32_t bytes_to_copy = 0;
1214 uint32_t pfn_num = 0;
1215 uint32_t pfn;
1216
1217 /* refer to struct vmscsi_req for meanings of these two fields */
1218 reqp->vstor_packet.u.vm_srb.port =
1219 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path));
1220 reqp->vstor_packet.u.vm_srb.path_id =
1221 cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1222
1223 reqp->vstor_packet.u.vm_srb.target_id = ccb->ccb_h.target_id;
1224 reqp->vstor_packet.u.vm_srb.lun = ccb->ccb_h.target_lun;
1225
1226 reqp->vstor_packet.u.vm_srb.cdb_len = csio->cdb_len;
1227 if(ccb->ccb_h.flags & CAM_CDB_POINTER) {
1228 memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_ptr,
1229 csio->cdb_len);
1230 } else {
1231 memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_bytes,
1232 csio->cdb_len);
1233 }
1234
1235 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
1236 case CAM_DIR_OUT:
1237 reqp->vstor_packet.u.vm_srb.data_in = WRITE_TYPE;
1238 break;
1239 case CAM_DIR_IN:
1240 reqp->vstor_packet.u.vm_srb.data_in = READ_TYPE;
1241 break;
1242 case CAM_DIR_NONE:
1243 reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE;
1244 break;
1245 default:
1246 reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE;
1247 break;
1248 }
1249
1250 reqp->sense_data = &csio->sense_data;
1251 reqp->sense_info_len = csio->sense_len;
1252
1253 reqp->ccb = ccb;
1254 /*
1255 KASSERT((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0,
1256 ("ccb is scatter gather valid\n"));
1257 */
1258 if (csio->dxfer_len != 0) {
1259 reqp->data_buf.length = csio->dxfer_len;
1260 bytes_to_copy = csio->dxfer_len;
1261 phys_addr = vtophys(csio->data_ptr);
1262 reqp->data_buf.offset = phys_addr - trunc_page(phys_addr);
1263 }
1264
1265 while (bytes_to_copy != 0) {
1266 int bytes, page_offset;
1267 phys_addr = vtophys(&csio->data_ptr[reqp->data_buf.length -
1268 bytes_to_copy]);
1269 pfn = phys_addr >> PAGE_SHIFT;
1270 reqp->data_buf.pfn_array[pfn_num] = pfn;
1271 page_offset = phys_addr - trunc_page(phys_addr);
1272
1273 bytes = min(PAGE_SIZE - page_offset, bytes_to_copy);
1274
1275 bytes_to_copy -= bytes;
1276 pfn_num++;
1277 }
1278}
1279
1280/**
1281 * @brief completion function before returning to CAM
1282 *
1283 * I/O process has been completed and the result needs
1284 * to be passed to the CAM layer.
1285 * Free resources related to this request.
1286 *
1287 * @param reqp pointer to a request structure
1288 */
1289static void
1290storvsc_io_done(struct hv_storvsc_request *reqp)
1291{
1292 union ccb *ccb = reqp->ccb;
1293 struct ccb_scsiio *csio = &ccb->csio;
1294 struct storvsc_softc *sc = reqp->softc;
1295 struct vmscsi_req *vm_srb = &reqp->vstor_packet.u.vm_srb;
1296
1297 if (reqp->retries > 0) {
1298 mtx_lock(&sc->hs_lock);
1299#if HVS_TIMEOUT_TEST
1300 xpt_print(ccb->ccb_h.path,
1301 "%u: IO returned after timeout, "
1302 "waking up timer handler if any.\n", ticks);
1303 mtx_lock(&reqp->event.mtx);
1304 cv_signal(&reqp->event.cv);
1305 mtx_unlock(&reqp->event.mtx);
1306#endif
1307 reqp->retries = 0;
1308 xpt_print(ccb->ccb_h.path,
1309 "%u: IO returned after timeout, "
1310 "stopping timer if any.\n", ticks);
1311 mtx_unlock(&sc->hs_lock);
1312 }
1313
1314 /*
1315 * callout_drain() will wait for the timer handler to finish
1316 * if it is running. So we don't need any lock to synchronize
1317 * between this routine and the timer handler.
1318 * Note that we need to make sure reqp is not freed when timer
1319 * handler is using or will use it.
1320 */
1321 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1322 callout_drain(&reqp->callout);
1323 }
1324
1325 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1326 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1327 if (vm_srb->scsi_status == SCSI_STATUS_OK) {
1328 ccb->ccb_h.status |= CAM_REQ_CMP;
1329 } else {
1330 mtx_lock(&sc->hs_lock);
1331 xpt_print(ccb->ccb_h.path,
1332 "srovsc scsi_status = %d\n",
1333 vm_srb->scsi_status);
1334 mtx_unlock(&sc->hs_lock);
1335 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1336 }
1337
1338 ccb->csio.scsi_status = (vm_srb->scsi_status & 0xFF);
1339 ccb->csio.resid = ccb->csio.dxfer_len - vm_srb->transfer_len;
1340
1341 if (reqp->sense_info_len != 0) {
1342 csio->sense_resid = csio->sense_len - reqp->sense_info_len;
1343 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1344 }
1345
1346 mtx_lock(&sc->hs_lock);
1347 if (reqp->softc->hs_frozen == 1) {
1348 xpt_print(ccb->ccb_h.path,
1349 "%u: storvsc unfreezing softc 0x%p.\n",
1350 ticks, reqp->softc);
1351 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1352 reqp->softc->hs_frozen = 0;
1353 }
1354 storvsc_free_request(sc, reqp);
1355 xpt_done(ccb);
1356 mtx_unlock(&sc->hs_lock);
1357}
1358
1359/**
1360 * @brief Free a request structure
1361 *
1362 * Free a request structure by returning it to the free list
1363 *
1364 * @param sc pointer to a softc
1365 * @param reqp pointer to a request structure
1366 */
1367static void
1368storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp)
1369{
1370
1371 LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
1372}
1373
1374/**
1375 * @brief Determine type of storage device from GUID
1376 *
1377 * Using the type GUID, determine if this is a StorVSC (paravirtual
1378 * SCSI or BlkVSC (paravirtual IDE) device.
1379 *
1380 * @param dev a device
1381 * returns an enum
1382 */
1383static enum hv_storage_type
1384storvsc_get_storage_type(device_t dev)
1385{
1386 const char *p = vmbus_get_type(dev);
1387
1388 if (!memcmp(p, &gBlkVscDeviceType, sizeof(hv_guid))) {
1389 return DRIVER_BLKVSC;
1390 } else if (!memcmp(p, &gStorVscDeviceType, sizeof(hv_guid))) {
1391 return DRIVER_STORVSC;
1392 }
1393 return (DRIVER_UNKNOWN);
1394}
1395