hv_storvsc_drv_freebsd.c revision 275982
1/*-
2 * Copyright (c) 2009-2012 Microsoft Corp.
3 * Copyright (c) 2012 NetApp Inc.
4 * Copyright (c) 2012 Citrix Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice unmodified, this list of conditions, and the following
12 *    disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/**
30 * StorVSC driver for Hyper-V.  This driver presents a SCSI HBA interface
31 * to the Comman Access Method (CAM) layer.  CAM control blocks (CCBs) are
32 * converted into VSCSI protocol messages which are delivered to the parent
33 * partition StorVSP driver over the Hyper-V VMBUS.
34 */
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: stable/10/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c 275982 2014-12-21 03:06:11Z smh $");
37
38#include <sys/param.h>
39#include <sys/proc.h>
40#include <sys/condvar.h>
41#include <sys/systm.h>
42#include <sys/sockio.h>
43#include <sys/mbuf.h>
44#include <sys/malloc.h>
45#include <sys/module.h>
46#include <sys/kernel.h>
47#include <sys/queue.h>
48#include <sys/lock.h>
49#include <sys/sx.h>
50#include <sys/taskqueue.h>
51#include <sys/bus.h>
52#include <sys/mutex.h>
53#include <sys/callout.h>
54#include <vm/vm.h>
55#include <vm/pmap.h>
56#include <sys/lock.h>
57#include <sys/sema.h>
58
59#include <cam/cam.h>
60#include <cam/cam_ccb.h>
61#include <cam/cam_periph.h>
62#include <cam/cam_sim.h>
63#include <cam/cam_xpt_sim.h>
64#include <cam/cam_xpt_internal.h>
65#include <cam/cam_debug.h>
66#include <cam/scsi/scsi_all.h>
67#include <cam/scsi/scsi_message.h>
68
69
70#include <dev/hyperv/include/hyperv.h>
71#include "hv_vstorage.h"
72
73#define STORVSC_RINGBUFFER_SIZE		(20*PAGE_SIZE)
74#define STORVSC_MAX_LUNS_PER_TARGET	(64)
75#define STORVSC_MAX_IO_REQUESTS		(STORVSC_MAX_LUNS_PER_TARGET * 2)
76#define BLKVSC_MAX_IDE_DISKS_PER_TARGET	(1)
77#define BLKVSC_MAX_IO_REQUESTS		STORVSC_MAX_IO_REQUESTS
78#define STORVSC_MAX_TARGETS		(2)
79
80struct storvsc_softc;
81
82enum storvsc_request_type {
83	WRITE_TYPE,
84	READ_TYPE,
85	UNKNOWN_TYPE
86};
87
88struct hv_storvsc_request {
89	LIST_ENTRY(hv_storvsc_request) link;
90	struct vstor_packet	vstor_packet;
91	hv_vmbus_multipage_buffer data_buf;
92	void *sense_data;
93	uint8_t sense_info_len;
94	uint8_t retries;
95	union ccb *ccb;
96	struct storvsc_softc *softc;
97	struct callout callout;
98	struct sema synch_sema; /*Synchronize the request/response if needed */
99};
100
101struct storvsc_softc {
102	struct hv_device		*hs_dev;
103        LIST_HEAD(, hv_storvsc_request) hs_free_list;
104        struct mtx      		hs_lock;
105        struct storvsc_driver_props     *hs_drv_props;
106        int 				hs_unit;
107        uint32_t         		hs_frozen;
108        struct cam_sim  		*hs_sim;
109        struct cam_path 		*hs_path;
110	uint32_t			hs_num_out_reqs;
111	boolean_t			hs_destroy;
112	boolean_t			hs_drain_notify;
113	struct sema 			hs_drain_sema;
114	struct hv_storvsc_request	hs_init_req;
115	struct hv_storvsc_request	hs_reset_req;
116};
117
118
119/**
120 * HyperV storvsc timeout testing cases:
121 * a. IO returned after first timeout;
122 * b. IO returned after second timeout and queue freeze;
123 * c. IO returned while timer handler is running
124 * The first can be tested by "sg_senddiag -vv /dev/daX",
125 * and the second and third can be done by
126 * "sg_wr_mode -v -p 08 -c 0,1a -m 0,ff /dev/daX".
127 */
128#define HVS_TIMEOUT_TEST 0
129
130/*
131 * Bus/adapter reset functionality on the Hyper-V host is
132 * buggy and it will be disabled until
133 * it can be further tested.
134 */
135#define HVS_HOST_RESET 0
136
137struct storvsc_driver_props {
138	char		*drv_name;
139	char		*drv_desc;
140	uint8_t		drv_max_luns_per_target;
141	uint8_t		drv_max_ios_per_target;
142	uint32_t	drv_ringbuffer_size;
143};
144
145enum hv_storage_type {
146	DRIVER_BLKVSC,
147	DRIVER_STORVSC,
148	DRIVER_UNKNOWN
149};
150
151#define HS_MAX_ADAPTERS 10
152
153/* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */
154static const hv_guid gStorVscDeviceType={
155	.data = {0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
156		 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f}
157};
158
159/* {32412632-86cb-44a2-9b5c-50d1417354f5} */
160static const hv_guid gBlkVscDeviceType={
161	.data = {0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
162		 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5}
163};
164
165static struct storvsc_driver_props g_drv_props_table[] = {
166	{"blkvsc", "Hyper-V IDE Storage Interface",
167	 BLKVSC_MAX_IDE_DISKS_PER_TARGET, BLKVSC_MAX_IO_REQUESTS,
168	 STORVSC_RINGBUFFER_SIZE},
169	{"storvsc", "Hyper-V SCSI Storage Interface",
170	 STORVSC_MAX_LUNS_PER_TARGET, STORVSC_MAX_IO_REQUESTS,
171	 STORVSC_RINGBUFFER_SIZE}
172};
173
174/* static functions */
175static int storvsc_probe(device_t dev);
176static int storvsc_attach(device_t dev);
177static int storvsc_detach(device_t dev);
178static void storvsc_poll(struct cam_sim * sim);
179static void storvsc_action(struct cam_sim * sim, union ccb * ccb);
180static void create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp);
181static void storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp);
182static enum hv_storage_type storvsc_get_storage_type(device_t dev);
183static void hv_storvsc_on_channel_callback(void *context);
184static void hv_storvsc_on_iocompletion( struct storvsc_softc *sc,
185					struct vstor_packet *vstor_packet,
186					struct hv_storvsc_request *request);
187static int hv_storvsc_connect_vsp(struct hv_device *device);
188static void storvsc_io_done(struct hv_storvsc_request *reqp);
189
190static device_method_t storvsc_methods[] = {
191	/* Device interface */
192	DEVMETHOD(device_probe,		storvsc_probe),
193	DEVMETHOD(device_attach,	storvsc_attach),
194	DEVMETHOD(device_detach,	storvsc_detach),
195	DEVMETHOD(device_shutdown,      bus_generic_shutdown),
196	DEVMETHOD_END
197};
198
199static driver_t storvsc_driver = {
200	"storvsc", storvsc_methods, sizeof(struct storvsc_softc),
201};
202
203static devclass_t storvsc_devclass;
204DRIVER_MODULE(storvsc, vmbus, storvsc_driver, storvsc_devclass, 0, 0);
205MODULE_VERSION(storvsc, 1);
206MODULE_DEPEND(storvsc, vmbus, 1, 1, 1);
207
208
209/**
210 * The host is capable of sending messages to us that are
211 * completely unsolicited. So, we need to address the race
212 * condition where we may be in the process of unloading the
213 * driver when the host may send us an unsolicited message.
214 * We address this issue by implementing a sequentially
215 * consistent protocol:
216 *
217 * 1. Channel callback is invoked while holding the the channel lock
218 *    and an unloading driver will reset the channel callback under
219 *    the protection of this channel lock.
220 *
221 * 2. To ensure bounded wait time for unloading a driver, we don't
222 *    permit outgoing traffic once the device is marked as being
223 *    destroyed.
224 *
225 * 3. Once the device is marked as being destroyed, we only
226 *    permit incoming traffic to properly account for
227 *    packets already sent out.
228 */
229static inline struct storvsc_softc *
230get_stor_device(struct hv_device *device,
231				boolean_t outbound)
232{
233	struct storvsc_softc *sc;
234
235	sc = device_get_softc(device->device);
236	if (sc == NULL) {
237		return NULL;
238	}
239
240	if (outbound) {
241		/*
242		 * Here we permit outgoing I/O only
243		 * if the device is not being destroyed.
244		 */
245
246		if (sc->hs_destroy) {
247			sc = NULL;
248		}
249	} else {
250		/*
251		 * inbound case; if being destroyed
252		 * only permit to account for
253		 * messages already sent out.
254		 */
255		if (sc->hs_destroy && (sc->hs_num_out_reqs == 0)) {
256			sc = NULL;
257		}
258	}
259	return sc;
260}
261
262/**
263 * @brief initialize channel connection to parent partition
264 *
265 * @param dev  a Hyper-V device pointer
266 * @returns  0 on success, non-zero error on failure
267 */
268static int
269hv_storvsc_channel_init(struct hv_device *dev)
270{
271	int ret = 0;
272	struct hv_storvsc_request *request;
273	struct vstor_packet *vstor_packet;
274	struct storvsc_softc *sc;
275
276	sc = get_stor_device(dev, TRUE);
277	if (sc == NULL) {
278		return ENODEV;
279	}
280
281	request = &sc->hs_init_req;
282	memset(request, 0, sizeof(struct hv_storvsc_request));
283	vstor_packet = &request->vstor_packet;
284	request->softc = sc;
285
286	/**
287	 * Initiate the vsc/vsp initialization protocol on the open channel
288	 */
289	sema_init(&request->synch_sema, 0, ("stor_synch_sema"));
290
291	vstor_packet->operation = VSTOR_OPERATION_BEGININITIALIZATION;
292	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
293
294
295	ret = hv_vmbus_channel_send_packet(
296			dev->channel,
297			vstor_packet,
298			sizeof(struct vstor_packet),
299			(uint64_t)(uintptr_t)request,
300			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
301			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
302
303	if (ret != 0) {
304		goto cleanup;
305	}
306
307	ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */
308
309	if (ret != 0) {
310		goto cleanup;
311	}
312
313	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
314		vstor_packet->status != 0) {
315		goto cleanup;
316	}
317
318	/* reuse the packet for version range supported */
319
320	memset(vstor_packet, 0, sizeof(struct vstor_packet));
321	vstor_packet->operation = VSTOR_OPERATION_QUERYPROTOCOLVERSION;
322	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
323
324	vstor_packet->u.version.major_minor = VMSTOR_PROTOCOL_VERSION_CURRENT;
325
326	/* revision is only significant for Windows guests */
327	vstor_packet->u.version.revision = 0;
328
329	ret = hv_vmbus_channel_send_packet(
330			dev->channel,
331			vstor_packet,
332			sizeof(struct vstor_packet),
333			(uint64_t)(uintptr_t)request,
334			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
335			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
336
337	if (ret != 0) {
338		goto cleanup;
339	}
340
341	ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */
342
343	if (ret) {
344		goto cleanup;
345	}
346
347	/* TODO: Check returned version */
348	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
349		vstor_packet->status != 0) {
350		goto cleanup;
351	}
352
353	/**
354	 * Query channel properties
355	 */
356	memset(vstor_packet, 0, sizeof(struct vstor_packet));
357	vstor_packet->operation = VSTOR_OPERATION_QUERYPROPERTIES;
358	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
359
360	ret = hv_vmbus_channel_send_packet(
361				dev->channel,
362				vstor_packet,
363				sizeof(struct vstor_packet),
364				(uint64_t)(uintptr_t)request,
365				HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
366				HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
367
368	if ( ret != 0) {
369		goto cleanup;
370	}
371
372	ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */
373
374	if (ret != 0) {
375		goto cleanup;
376	}
377
378	/* TODO: Check returned version */
379	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
380		vstor_packet->status != 0) {
381		goto cleanup;
382	}
383
384	memset(vstor_packet, 0, sizeof(struct vstor_packet));
385	vstor_packet->operation = VSTOR_OPERATION_ENDINITIALIZATION;
386	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
387
388	ret = hv_vmbus_channel_send_packet(
389			dev->channel,
390			vstor_packet,
391			sizeof(struct vstor_packet),
392			(uint64_t)(uintptr_t)request,
393			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
394			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
395
396	if (ret != 0) {
397		goto cleanup;
398	}
399
400	ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */
401
402	if (ret != 0) {
403		goto cleanup;
404	}
405
406	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
407		vstor_packet->status != 0) {
408		goto cleanup;
409	}
410
411cleanup:
412	sema_destroy(&request->synch_sema);
413	return (ret);
414}
415
416/**
417 * @brief Open channel connection to paraent partition StorVSP driver
418 *
419 * Open and initialize channel connection to parent partition StorVSP driver.
420 *
421 * @param pointer to a Hyper-V device
422 * @returns 0 on success, non-zero error on failure
423 */
424static int
425hv_storvsc_connect_vsp(struct hv_device *dev)
426{
427	int ret = 0;
428	struct vmstor_chan_props props;
429	struct storvsc_softc *sc;
430
431	sc = device_get_softc(dev->device);
432
433	memset(&props, 0, sizeof(struct vmstor_chan_props));
434
435	/*
436	 * Open the channel
437	 */
438
439	ret = hv_vmbus_channel_open(
440		dev->channel,
441		sc->hs_drv_props->drv_ringbuffer_size,
442		sc->hs_drv_props->drv_ringbuffer_size,
443		(void *)&props,
444		sizeof(struct vmstor_chan_props),
445		hv_storvsc_on_channel_callback,
446		dev);
447
448
449	if (ret != 0) {
450		return ret;
451	}
452
453	ret = hv_storvsc_channel_init(dev);
454
455	return (ret);
456}
457
458#if HVS_HOST_RESET
459static int
460hv_storvsc_host_reset(struct hv_device *dev)
461{
462	int ret = 0;
463	struct storvsc_softc *sc;
464
465	struct hv_storvsc_request *request;
466	struct vstor_packet *vstor_packet;
467
468	sc = get_stor_device(dev, TRUE);
469	if (sc == NULL) {
470		return ENODEV;
471	}
472
473	request = &sc->hs_reset_req;
474	request->softc = sc;
475	vstor_packet = &request->vstor_packet;
476
477	sema_init(&request->synch_sema, 0, "stor synch sema");
478
479	vstor_packet->operation = VSTOR_OPERATION_RESETBUS;
480	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
481
482	ret = hv_vmbus_channel_send_packet(dev->channel,
483			vstor_packet,
484			sizeof(struct vstor_packet),
485			(uint64_t)(uintptr_t)&sc->hs_reset_req,
486			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
487			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
488
489	if (ret != 0) {
490		goto cleanup;
491	}
492
493	ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */
494
495	if (ret) {
496		goto cleanup;
497	}
498
499
500	/*
501	 * At this point, all outstanding requests in the adapter
502	 * should have been flushed out and return to us
503	 */
504
505cleanup:
506	sema_destroy(&request->synch_sema);
507	return (ret);
508}
509#endif /* HVS_HOST_RESET */
510
511/**
512 * @brief Function to initiate an I/O request
513 *
514 * @param device Hyper-V device pointer
515 * @param request pointer to a request structure
516 * @returns 0 on success, non-zero error on failure
517 */
518static int
519hv_storvsc_io_request(struct hv_device *device,
520					  struct hv_storvsc_request *request)
521{
522	struct storvsc_softc *sc;
523	struct vstor_packet *vstor_packet = &request->vstor_packet;
524	int ret = 0;
525
526	sc = get_stor_device(device, TRUE);
527
528	if (sc == NULL) {
529		return ENODEV;
530	}
531
532	vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
533
534	vstor_packet->u.vm_srb.length = sizeof(struct vmscsi_req);
535
536	vstor_packet->u.vm_srb.sense_info_len = SENSE_BUFFER_SIZE;
537
538	vstor_packet->u.vm_srb.transfer_len = request->data_buf.length;
539
540	vstor_packet->operation = VSTOR_OPERATION_EXECUTESRB;
541
542
543	mtx_unlock(&request->softc->hs_lock);
544	if (request->data_buf.length) {
545		ret = hv_vmbus_channel_send_packet_multipagebuffer(
546				device->channel,
547				&request->data_buf,
548				vstor_packet,
549				sizeof(struct vstor_packet),
550				(uint64_t)(uintptr_t)request);
551
552	} else {
553		ret = hv_vmbus_channel_send_packet(
554			device->channel,
555			vstor_packet,
556			sizeof(struct vstor_packet),
557			(uint64_t)(uintptr_t)request,
558			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
559			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
560	}
561	mtx_lock(&request->softc->hs_lock);
562
563	if (ret != 0) {
564		printf("Unable to send packet %p ret %d", vstor_packet, ret);
565	} else {
566		atomic_add_int(&sc->hs_num_out_reqs, 1);
567	}
568
569	return (ret);
570}
571
572
573/**
574 * Process IO_COMPLETION_OPERATION and ready
575 * the result to be completed for upper layer
576 * processing by the CAM layer.
577 */
578static void
579hv_storvsc_on_iocompletion(struct storvsc_softc *sc,
580			   struct vstor_packet *vstor_packet,
581			   struct hv_storvsc_request *request)
582{
583	struct vmscsi_req *vm_srb;
584
585	vm_srb = &vstor_packet->u.vm_srb;
586
587	if (((vm_srb->scsi_status & 0xFF) == SCSI_STATUS_CHECK_COND) &&
588			(vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)) {
589		/* Autosense data available */
590
591		KASSERT(vm_srb->sense_info_len <= request->sense_info_len,
592				("vm_srb->sense_info_len <= "
593				 "request->sense_info_len"));
594
595		memcpy(request->sense_data, vm_srb->u.sense_data,
596			vm_srb->sense_info_len);
597
598		request->sense_info_len = vm_srb->sense_info_len;
599	}
600
601	/* Complete request by passing to the CAM layer */
602	storvsc_io_done(request);
603	atomic_subtract_int(&sc->hs_num_out_reqs, 1);
604	if (sc->hs_drain_notify && (sc->hs_num_out_reqs == 0)) {
605		sema_post(&sc->hs_drain_sema);
606	}
607}
608
609static void
610hv_storvsc_on_channel_callback(void *context)
611{
612	int ret = 0;
613	struct hv_device *device = (struct hv_device *)context;
614	struct storvsc_softc *sc;
615	uint32_t bytes_recvd;
616	uint64_t request_id;
617	uint8_t packet[roundup2(sizeof(struct vstor_packet), 8)];
618	struct hv_storvsc_request *request;
619	struct vstor_packet *vstor_packet;
620
621	sc = get_stor_device(device, FALSE);
622	if (sc == NULL) {
623		return;
624	}
625
626	KASSERT(device, ("device"));
627
628	ret = hv_vmbus_channel_recv_packet(
629			device->channel,
630			packet,
631			roundup2(sizeof(struct vstor_packet), 8),
632			&bytes_recvd,
633			&request_id);
634
635	while ((ret == 0) && (bytes_recvd > 0)) {
636		request = (struct hv_storvsc_request *)(uintptr_t)request_id;
637		KASSERT(request, ("request"));
638
639		if ((request == &sc->hs_init_req) ||
640			(request == &sc->hs_reset_req)) {
641			memcpy(&request->vstor_packet, packet,
642				   sizeof(struct vstor_packet));
643			sema_post(&request->synch_sema);
644		} else {
645			vstor_packet = (struct vstor_packet *)packet;
646			switch(vstor_packet->operation) {
647			case VSTOR_OPERATION_COMPLETEIO:
648				hv_storvsc_on_iocompletion(sc,
649							vstor_packet, request);
650				break;
651			case VSTOR_OPERATION_REMOVEDEVICE:
652				/* TODO: implement */
653				break;
654			default:
655				break;
656			}
657		}
658		ret = hv_vmbus_channel_recv_packet(
659				device->channel,
660				packet,
661				roundup2(sizeof(struct vstor_packet), 8),
662				&bytes_recvd,
663				&request_id);
664	}
665}
666
667/**
668 * @brief StorVSC probe function
669 *
670 * Device probe function.  Returns 0 if the input device is a StorVSC
671 * device.  Otherwise, a ENXIO is returned.  If the input device is
672 * for BlkVSC (paravirtual IDE) device and this support is disabled in
673 * favor of the emulated ATA/IDE device, return ENXIO.
674 *
675 * @param a device
676 * @returns 0 on success, ENXIO if not a matcing StorVSC device
677 */
678static int
679storvsc_probe(device_t dev)
680{
681	int ata_disk_enable = 0;
682	int ret	= ENXIO;
683
684	switch (storvsc_get_storage_type(dev)) {
685	case DRIVER_BLKVSC:
686		if(bootverbose)
687			device_printf(dev, "DRIVER_BLKVSC-Emulated ATA/IDE probe\n");
688		if (!getenv_int("hw.ata.disk_enable", &ata_disk_enable)) {
689			if(bootverbose)
690				device_printf(dev,
691					"Enlightened ATA/IDE detected\n");
692			ret = BUS_PROBE_DEFAULT;
693		} else if(bootverbose)
694			device_printf(dev, "Emulated ATA/IDE set (hw.ata.disk_enable set)\n");
695		break;
696	case DRIVER_STORVSC:
697		if(bootverbose)
698			device_printf(dev, "Enlightened SCSI device detected\n");
699		ret = BUS_PROBE_DEFAULT;
700		break;
701	default:
702		ret = ENXIO;
703	}
704	return (ret);
705}
706
707/**
708 * @brief StorVSC attach function
709 *
710 * Function responsible for allocating per-device structures,
711 * setting up CAM interfaces and scanning for available LUNs to
712 * be used for SCSI device peripherals.
713 *
714 * @param a device
715 * @returns 0 on success or an error on failure
716 */
717static int
718storvsc_attach(device_t dev)
719{
720	struct hv_device *hv_dev = vmbus_get_devctx(dev);
721	enum hv_storage_type stor_type;
722	struct storvsc_softc *sc;
723	struct cam_devq *devq;
724	int ret, i;
725	struct hv_storvsc_request *reqp;
726	struct root_hold_token *root_mount_token = NULL;
727
728	/*
729	 * We need to serialize storvsc attach calls.
730	 */
731	root_mount_token = root_mount_hold("storvsc");
732
733	sc = device_get_softc(dev);
734	if (sc == NULL) {
735		ret = ENOMEM;
736		goto cleanup;
737	}
738
739	stor_type = storvsc_get_storage_type(dev);
740
741	if (stor_type == DRIVER_UNKNOWN) {
742		ret = ENODEV;
743		goto cleanup;
744	}
745
746	bzero(sc, sizeof(struct storvsc_softc));
747
748	/* fill in driver specific properties */
749	sc->hs_drv_props = &g_drv_props_table[stor_type];
750
751	/* fill in device specific properties */
752	sc->hs_unit	= device_get_unit(dev);
753	sc->hs_dev	= hv_dev;
754	device_set_desc(dev, g_drv_props_table[stor_type].drv_desc);
755
756	LIST_INIT(&sc->hs_free_list);
757	mtx_init(&sc->hs_lock, "hvslck", NULL, MTX_DEF);
758
759	for (i = 0; i < sc->hs_drv_props->drv_max_ios_per_target; ++i) {
760		reqp = malloc(sizeof(struct hv_storvsc_request),
761				 M_DEVBUF, M_WAITOK|M_ZERO);
762		reqp->softc = sc;
763
764		LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
765	}
766
767	sc->hs_destroy = FALSE;
768	sc->hs_drain_notify = FALSE;
769	sema_init(&sc->hs_drain_sema, 0, "Store Drain Sema");
770
771	ret = hv_storvsc_connect_vsp(hv_dev);
772	if (ret != 0) {
773		goto cleanup;
774	}
775
776	/*
777	 * Create the device queue.
778	 * Hyper-V maps each target to one SCSI HBA
779	 */
780	devq = cam_simq_alloc(sc->hs_drv_props->drv_max_ios_per_target);
781	if (devq == NULL) {
782		device_printf(dev, "Failed to alloc device queue\n");
783		ret = ENOMEM;
784		goto cleanup;
785	}
786
787	sc->hs_sim = cam_sim_alloc(storvsc_action,
788				storvsc_poll,
789				sc->hs_drv_props->drv_name,
790				sc,
791				sc->hs_unit,
792				&sc->hs_lock, 1,
793				sc->hs_drv_props->drv_max_ios_per_target,
794				devq);
795
796	if (sc->hs_sim == NULL) {
797		device_printf(dev, "Failed to alloc sim\n");
798		cam_simq_free(devq);
799		ret = ENOMEM;
800		goto cleanup;
801	}
802
803	mtx_lock(&sc->hs_lock);
804	/* bus_id is set to 0, need to get it from VMBUS channel query? */
805	if (xpt_bus_register(sc->hs_sim, dev, 0) != CAM_SUCCESS) {
806		cam_sim_free(sc->hs_sim, /*free_devq*/TRUE);
807		mtx_unlock(&sc->hs_lock);
808		device_printf(dev, "Unable to register SCSI bus\n");
809		ret = ENXIO;
810		goto cleanup;
811	}
812
813	if (xpt_create_path(&sc->hs_path, /*periph*/NULL,
814		 cam_sim_path(sc->hs_sim),
815		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
816		xpt_bus_deregister(cam_sim_path(sc->hs_sim));
817		cam_sim_free(sc->hs_sim, /*free_devq*/TRUE);
818		mtx_unlock(&sc->hs_lock);
819		device_printf(dev, "Unable to create path\n");
820		ret = ENXIO;
821		goto cleanup;
822	}
823
824	mtx_unlock(&sc->hs_lock);
825
826	root_mount_rel(root_mount_token);
827	return (0);
828
829
830cleanup:
831	root_mount_rel(root_mount_token);
832	while (!LIST_EMPTY(&sc->hs_free_list)) {
833		reqp = LIST_FIRST(&sc->hs_free_list);
834		LIST_REMOVE(reqp, link);
835		free(reqp, M_DEVBUF);
836	}
837	return (ret);
838}
839
840/**
841 * @brief StorVSC device detach function
842 *
843 * This function is responsible for safely detaching a
844 * StorVSC device.  This includes waiting for inbound responses
845 * to complete and freeing associated per-device structures.
846 *
847 * @param dev a device
848 * returns 0 on success
849 */
850static int
851storvsc_detach(device_t dev)
852{
853	struct storvsc_softc *sc = device_get_softc(dev);
854	struct hv_storvsc_request *reqp = NULL;
855	struct hv_device *hv_device = vmbus_get_devctx(dev);
856
857	mtx_lock(&hv_device->channel->inbound_lock);
858	sc->hs_destroy = TRUE;
859	mtx_unlock(&hv_device->channel->inbound_lock);
860
861	/*
862	 * At this point, all outbound traffic should be disabled. We
863	 * only allow inbound traffic (responses) to proceed so that
864	 * outstanding requests can be completed.
865	 */
866
867	sc->hs_drain_notify = TRUE;
868	sema_wait(&sc->hs_drain_sema);
869	sc->hs_drain_notify = FALSE;
870
871	/*
872	 * Since we have already drained, we don't need to busy wait.
873	 * The call to close the channel will reset the callback
874	 * under the protection of the incoming channel lock.
875	 */
876
877	hv_vmbus_channel_close(hv_device->channel);
878
879	mtx_lock(&sc->hs_lock);
880	while (!LIST_EMPTY(&sc->hs_free_list)) {
881		reqp = LIST_FIRST(&sc->hs_free_list);
882		LIST_REMOVE(reqp, link);
883
884		free(reqp, M_DEVBUF);
885	}
886	mtx_unlock(&sc->hs_lock);
887	return (0);
888}
889
890#if HVS_TIMEOUT_TEST
891/**
892 * @brief unit test for timed out operations
893 *
894 * This function provides unit testing capability to simulate
895 * timed out operations.  Recompilation with HV_TIMEOUT_TEST=1
896 * is required.
897 *
898 * @param reqp pointer to a request structure
899 * @param opcode SCSI operation being performed
900 * @param wait if 1, wait for I/O to complete
901 */
902static void
903storvsc_timeout_test(struct hv_storvsc_request *reqp,
904		uint8_t opcode, int wait)
905{
906	int ret;
907	union ccb *ccb = reqp->ccb;
908	struct storvsc_softc *sc = reqp->softc;
909
910	if (reqp->vstor_packet.vm_srb.cdb[0] != opcode) {
911		return;
912	}
913
914	if (wait) {
915		mtx_lock(&reqp->event.mtx);
916	}
917	ret = hv_storvsc_io_request(sc->hs_dev, reqp);
918	if (ret != 0) {
919		if (wait) {
920			mtx_unlock(&reqp->event.mtx);
921		}
922		printf("%s: io_request failed with %d.\n",
923				__func__, ret);
924		ccb->ccb_h.status = CAM_PROVIDE_FAIL;
925		mtx_lock(&sc->hs_lock);
926		storvsc_free_request(sc, reqp);
927		xpt_done(ccb);
928		mtx_unlock(&sc->hs_lock);
929		return;
930	}
931
932	if (wait) {
933		xpt_print(ccb->ccb_h.path,
934				"%u: %s: waiting for IO return.\n",
935				ticks, __func__);
936		ret = cv_timedwait(&reqp->event.cv, &reqp->event.mtx, 60*hz);
937		mtx_unlock(&reqp->event.mtx);
938		xpt_print(ccb->ccb_h.path, "%u: %s: %s.\n",
939				ticks, __func__, (ret == 0)?
940				"IO return detected" :
941				"IO return not detected");
942		/*
943		 * Now both the timer handler and io done are running
944		 * simultaneously. We want to confirm the io done always
945		 * finishes after the timer handler exits. So reqp used by
946		 * timer handler is not freed or stale. Do busy loop for
947		 * another 1/10 second to make sure io done does
948		 * wait for the timer handler to complete.
949		 */
950		DELAY(100*1000);
951		mtx_lock(&sc->hs_lock);
952		xpt_print(ccb->ccb_h.path,
953				"%u: %s: finishing, queue frozen %d, "
954				"ccb status 0x%x scsi_status 0x%x.\n",
955				ticks, __func__, sc->hs_frozen,
956				ccb->ccb_h.status,
957				ccb->csio.scsi_status);
958		mtx_unlock(&sc->hs_lock);
959	}
960}
961#endif /* HVS_TIMEOUT_TEST */
962
963/**
964 * @brief timeout handler for requests
965 *
966 * This function is called as a result of a callout expiring.
967 *
968 * @param arg pointer to a request
969 */
970static void
971storvsc_timeout(void *arg)
972{
973	struct hv_storvsc_request *reqp = arg;
974	struct storvsc_softc *sc = reqp->softc;
975	union ccb *ccb = reqp->ccb;
976
977	if (reqp->retries == 0) {
978		mtx_lock(&sc->hs_lock);
979		xpt_print(ccb->ccb_h.path,
980		    "%u: IO timed out (req=0x%p), wait for another %u secs.\n",
981		    ticks, reqp, ccb->ccb_h.timeout / 1000);
982		cam_error_print(ccb, CAM_ESF_ALL, CAM_EPF_ALL);
983		mtx_unlock(&sc->hs_lock);
984
985		reqp->retries++;
986		callout_reset_sbt(&reqp->callout, SBT_1MS * ccb->ccb_h.timeout,
987		    0, storvsc_timeout, reqp, 0);
988#if HVS_TIMEOUT_TEST
989		storvsc_timeout_test(reqp, SEND_DIAGNOSTIC, 0);
990#endif
991		return;
992	}
993
994	mtx_lock(&sc->hs_lock);
995	xpt_print(ccb->ccb_h.path,
996		"%u: IO (reqp = 0x%p) did not return for %u seconds, %s.\n",
997		ticks, reqp, ccb->ccb_h.timeout * (reqp->retries+1) / 1000,
998		(sc->hs_frozen == 0)?
999		"freezing the queue" : "the queue is already frozen");
1000	if (sc->hs_frozen == 0) {
1001		sc->hs_frozen = 1;
1002		xpt_freeze_simq(xpt_path_sim(ccb->ccb_h.path), 1);
1003	}
1004	mtx_unlock(&sc->hs_lock);
1005
1006#if HVS_TIMEOUT_TEST
1007	storvsc_timeout_test(reqp, MODE_SELECT_10, 1);
1008#endif
1009}
1010
1011/**
1012 * @brief StorVSC device poll function
1013 *
1014 * This function is responsible for servicing requests when
1015 * interrupts are disabled (i.e when we are dumping core.)
1016 *
1017 * @param sim a pointer to a CAM SCSI interface module
1018 */
1019static void
1020storvsc_poll(struct cam_sim *sim)
1021{
1022	struct storvsc_softc *sc = cam_sim_softc(sim);
1023
1024	mtx_assert(&sc->hs_lock, MA_OWNED);
1025	mtx_unlock(&sc->hs_lock);
1026	hv_storvsc_on_channel_callback(sc->hs_dev);
1027	mtx_lock(&sc->hs_lock);
1028}
1029
1030/**
1031 * @brief StorVSC device action function
1032 *
1033 * This function is responsible for handling SCSI operations which
1034 * are passed from the CAM layer.  The requests are in the form of
1035 * CAM control blocks which indicate the action being performed.
1036 * Not all actions require converting the request to a VSCSI protocol
1037 * message - these actions can be responded to by this driver.
1038 * Requests which are destined for a backend storage device are converted
1039 * to a VSCSI protocol message and sent on the channel connection associated
1040 * with this device.
1041 *
1042 * @param sim pointer to a CAM SCSI interface module
1043 * @param ccb pointer to a CAM control block
1044 */
1045static void
1046storvsc_action(struct cam_sim *sim, union ccb *ccb)
1047{
1048	struct storvsc_softc *sc = cam_sim_softc(sim);
1049	int res;
1050
1051	mtx_assert(&sc->hs_lock, MA_OWNED);
1052	switch (ccb->ccb_h.func_code) {
1053	case XPT_PATH_INQ: {
1054		struct ccb_pathinq *cpi = &ccb->cpi;
1055
1056		cpi->version_num = 1;
1057		cpi->hba_inquiry = PI_TAG_ABLE|PI_SDTR_ABLE;
1058		cpi->target_sprt = 0;
1059		cpi->hba_misc = PIM_NOBUSRESET;
1060		cpi->hba_eng_cnt = 0;
1061		cpi->max_target = STORVSC_MAX_TARGETS;
1062		cpi->max_lun = sc->hs_drv_props->drv_max_luns_per_target;
1063		cpi->initiator_id = cpi->max_target;
1064		cpi->bus_id = cam_sim_bus(sim);
1065		cpi->base_transfer_speed = 300000;
1066		cpi->transport = XPORT_SAS;
1067		cpi->transport_version = 0;
1068		cpi->protocol = PROTO_SCSI;
1069		cpi->protocol_version = SCSI_REV_SPC2;
1070		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1071		strncpy(cpi->hba_vid, sc->hs_drv_props->drv_name, HBA_IDLEN);
1072		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1073		cpi->unit_number = cam_sim_unit(sim);
1074
1075		ccb->ccb_h.status = CAM_REQ_CMP;
1076		xpt_done(ccb);
1077		return;
1078	}
1079	case XPT_GET_TRAN_SETTINGS: {
1080		struct  ccb_trans_settings *cts = &ccb->cts;
1081
1082		cts->transport = XPORT_SAS;
1083		cts->transport_version = 0;
1084		cts->protocol = PROTO_SCSI;
1085		cts->protocol_version = SCSI_REV_SPC2;
1086
1087		/* enable tag queuing and disconnected mode */
1088		cts->proto_specific.valid = CTS_SCSI_VALID_TQ;
1089		cts->proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
1090		cts->proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
1091		cts->xport_specific.valid = CTS_SPI_VALID_DISC;
1092		cts->xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
1093
1094		ccb->ccb_h.status = CAM_REQ_CMP;
1095		xpt_done(ccb);
1096		return;
1097	}
1098	case XPT_SET_TRAN_SETTINGS:	{
1099		ccb->ccb_h.status = CAM_REQ_CMP;
1100		xpt_done(ccb);
1101		return;
1102	}
1103	case XPT_CALC_GEOMETRY:{
1104		cam_calc_geometry(&ccb->ccg, 1);
1105		xpt_done(ccb);
1106		return;
1107	}
1108	case  XPT_RESET_BUS:
1109	case  XPT_RESET_DEV:{
1110#if HVS_HOST_RESET
1111		if ((res = hv_storvsc_host_reset(sc->hs_dev)) != 0) {
1112			xpt_print(ccb->ccb_h.path,
1113				"hv_storvsc_host_reset failed with %d\n", res);
1114			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1115			xpt_done(ccb);
1116			return;
1117		}
1118		ccb->ccb_h.status = CAM_REQ_CMP;
1119		xpt_done(ccb);
1120		return;
1121#else
1122		xpt_print(ccb->ccb_h.path,
1123				  "%s reset not supported.\n",
1124				  (ccb->ccb_h.func_code == XPT_RESET_BUS)?
1125				  "bus" : "dev");
1126		ccb->ccb_h.status = CAM_REQ_INVALID;
1127		xpt_done(ccb);
1128		return;
1129#endif	/* HVS_HOST_RESET */
1130	}
1131	case XPT_SCSI_IO:
1132	case XPT_IMMED_NOTIFY: {
1133		struct hv_storvsc_request *reqp = NULL;
1134
1135		if (ccb->csio.cdb_len == 0) {
1136			panic("cdl_len is 0\n");
1137		}
1138
1139		if (LIST_EMPTY(&sc->hs_free_list)) {
1140			ccb->ccb_h.status = CAM_REQUEUE_REQ;
1141			if (sc->hs_frozen == 0) {
1142				sc->hs_frozen = 1;
1143				xpt_freeze_simq(sim, /* count*/1);
1144			}
1145			xpt_done(ccb);
1146			return;
1147		}
1148
1149		reqp = LIST_FIRST(&sc->hs_free_list);
1150		LIST_REMOVE(reqp, link);
1151
1152		bzero(reqp, sizeof(struct hv_storvsc_request));
1153		reqp->softc = sc;
1154
1155		ccb->ccb_h.status |= CAM_SIM_QUEUED;
1156		create_storvsc_request(ccb, reqp);
1157
1158		if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1159			callout_init(&reqp->callout, CALLOUT_MPSAFE);
1160			callout_reset_sbt(&reqp->callout,
1161			    SBT_1MS * ccb->ccb_h.timeout, 0,
1162			    storvsc_timeout, reqp, 0);
1163#if HVS_TIMEOUT_TEST
1164			cv_init(&reqp->event.cv, "storvsc timeout cv");
1165			mtx_init(&reqp->event.mtx, "storvsc timeout mutex",
1166					NULL, MTX_DEF);
1167			switch (reqp->vstor_packet.vm_srb.cdb[0]) {
1168				case MODE_SELECT_10:
1169				case SEND_DIAGNOSTIC:
1170					/* To have timer send the request. */
1171					return;
1172				default:
1173					break;
1174			}
1175#endif /* HVS_TIMEOUT_TEST */
1176		}
1177
1178		if ((res = hv_storvsc_io_request(sc->hs_dev, reqp)) != 0) {
1179			xpt_print(ccb->ccb_h.path,
1180				"hv_storvsc_io_request failed with %d\n", res);
1181			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1182			storvsc_free_request(sc, reqp);
1183			xpt_done(ccb);
1184			return;
1185		}
1186		return;
1187	}
1188
1189	default:
1190		ccb->ccb_h.status = CAM_REQ_INVALID;
1191		xpt_done(ccb);
1192		return;
1193	}
1194}
1195
1196/**
1197 * @brief Fill in a request structure based on a CAM control block
1198 *
1199 * Fills in a request structure based on the contents of a CAM control
1200 * block.  The request structure holds the payload information for
1201 * VSCSI protocol request.
1202 *
1203 * @param ccb pointer to a CAM contorl block
1204 * @param reqp pointer to a request structure
1205 */
1206static void
1207create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp)
1208{
1209	struct ccb_scsiio *csio = &ccb->csio;
1210	uint64_t phys_addr;
1211	uint32_t bytes_to_copy = 0;
1212	uint32_t pfn_num = 0;
1213	uint32_t pfn;
1214
1215	/* refer to struct vmscsi_req for meanings of these two fields */
1216	reqp->vstor_packet.u.vm_srb.port =
1217		cam_sim_unit(xpt_path_sim(ccb->ccb_h.path));
1218	reqp->vstor_packet.u.vm_srb.path_id =
1219		cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1220
1221	reqp->vstor_packet.u.vm_srb.target_id = ccb->ccb_h.target_id;
1222	reqp->vstor_packet.u.vm_srb.lun = ccb->ccb_h.target_lun;
1223
1224	reqp->vstor_packet.u.vm_srb.cdb_len = csio->cdb_len;
1225	if(ccb->ccb_h.flags & CAM_CDB_POINTER) {
1226		memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_ptr,
1227			csio->cdb_len);
1228	} else {
1229		memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_bytes,
1230			csio->cdb_len);
1231	}
1232
1233	switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
1234    	case CAM_DIR_OUT:
1235    		reqp->vstor_packet.u.vm_srb.data_in = WRITE_TYPE;
1236    		break;
1237    	case CAM_DIR_IN:
1238    		reqp->vstor_packet.u.vm_srb.data_in = READ_TYPE;
1239    		break;
1240    	case CAM_DIR_NONE:
1241    		reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE;
1242    		break;
1243    	default:
1244    		reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE;
1245    		break;
1246	}
1247
1248	reqp->sense_data     = &csio->sense_data;
1249	reqp->sense_info_len = csio->sense_len;
1250
1251	reqp->ccb = ccb;
1252	/*
1253	KASSERT((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0,
1254			("ccb is scatter gather valid\n"));
1255	*/
1256	if (csio->dxfer_len != 0) {
1257		reqp->data_buf.length = csio->dxfer_len;
1258		bytes_to_copy = csio->dxfer_len;
1259		phys_addr = vtophys(csio->data_ptr);
1260		reqp->data_buf.offset = phys_addr - trunc_page(phys_addr);
1261	}
1262
1263	while (bytes_to_copy != 0) {
1264		int bytes, page_offset;
1265		phys_addr = vtophys(&csio->data_ptr[reqp->data_buf.length -
1266		                                    bytes_to_copy]);
1267		pfn = phys_addr >> PAGE_SHIFT;
1268		reqp->data_buf.pfn_array[pfn_num] = pfn;
1269		page_offset = phys_addr - trunc_page(phys_addr);
1270
1271		bytes = min(PAGE_SIZE - page_offset, bytes_to_copy);
1272
1273		bytes_to_copy -= bytes;
1274		pfn_num++;
1275	}
1276}
1277
1278/**
1279 * @brief completion function before returning to CAM
1280 *
1281 * I/O process has been completed and the result needs
1282 * to be passed to the CAM layer.
1283 * Free resources related to this request.
1284 *
1285 * @param reqp pointer to a request structure
1286 */
1287static void
1288storvsc_io_done(struct hv_storvsc_request *reqp)
1289{
1290	union ccb *ccb = reqp->ccb;
1291	struct ccb_scsiio *csio = &ccb->csio;
1292	struct storvsc_softc *sc = reqp->softc;
1293	struct vmscsi_req *vm_srb = &reqp->vstor_packet.u.vm_srb;
1294
1295	if (reqp->retries > 0) {
1296		mtx_lock(&sc->hs_lock);
1297#if HVS_TIMEOUT_TEST
1298		xpt_print(ccb->ccb_h.path,
1299			"%u: IO returned after timeout, "
1300			"waking up timer handler if any.\n", ticks);
1301		mtx_lock(&reqp->event.mtx);
1302		cv_signal(&reqp->event.cv);
1303		mtx_unlock(&reqp->event.mtx);
1304#endif
1305		reqp->retries = 0;
1306		xpt_print(ccb->ccb_h.path,
1307			"%u: IO returned after timeout, "
1308			"stopping timer if any.\n", ticks);
1309		mtx_unlock(&sc->hs_lock);
1310	}
1311
1312	/*
1313	 * callout_drain() will wait for the timer handler to finish
1314	 * if it is running. So we don't need any lock to synchronize
1315	 * between this routine and the timer handler.
1316	 * Note that we need to make sure reqp is not freed when timer
1317	 * handler is using or will use it.
1318	 */
1319	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1320		callout_drain(&reqp->callout);
1321	}
1322
1323	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1324	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1325	if (vm_srb->scsi_status == SCSI_STATUS_OK) {
1326		ccb->ccb_h.status |= CAM_REQ_CMP;
1327	 } else {
1328		mtx_lock(&sc->hs_lock);
1329		xpt_print(ccb->ccb_h.path,
1330			"srovsc scsi_status = %d\n",
1331			vm_srb->scsi_status);
1332		mtx_unlock(&sc->hs_lock);
1333		ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1334	}
1335
1336	ccb->csio.scsi_status = (vm_srb->scsi_status & 0xFF);
1337	ccb->csio.resid = ccb->csio.dxfer_len - vm_srb->transfer_len;
1338
1339	if (reqp->sense_info_len != 0) {
1340		csio->sense_resid = csio->sense_len - reqp->sense_info_len;
1341		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1342	}
1343
1344	mtx_lock(&sc->hs_lock);
1345	if (reqp->softc->hs_frozen == 1) {
1346		xpt_print(ccb->ccb_h.path,
1347			"%u: storvsc unfreezing softc 0x%p.\n",
1348			ticks, reqp->softc);
1349		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1350		reqp->softc->hs_frozen = 0;
1351	}
1352	storvsc_free_request(sc, reqp);
1353	xpt_done(ccb);
1354	mtx_unlock(&sc->hs_lock);
1355}
1356
1357/**
1358 * @brief Free a request structure
1359 *
1360 * Free a request structure by returning it to the free list
1361 *
1362 * @param sc pointer to a softc
1363 * @param reqp pointer to a request structure
1364 */
1365static void
1366storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp)
1367{
1368
1369	LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
1370}
1371
1372/**
1373 * @brief Determine type of storage device from GUID
1374 *
1375 * Using the type GUID, determine if this is a StorVSC (paravirtual
1376 * SCSI or BlkVSC (paravirtual IDE) device.
1377 *
1378 * @param dev a device
1379 * returns an enum
1380 */
1381static enum hv_storage_type
1382storvsc_get_storage_type(device_t dev)
1383{
1384	const char *p = vmbus_get_type(dev);
1385
1386	if (!memcmp(p, &gBlkVscDeviceType, sizeof(hv_guid))) {
1387		return DRIVER_BLKVSC;
1388	} else if (!memcmp(p, &gStorVscDeviceType, sizeof(hv_guid))) {
1389		return DRIVER_STORVSC;
1390	}
1391	return (DRIVER_UNKNOWN);
1392}
1393
1394