hv_storvsc_drv_freebsd.c revision 307459
1/*-
2 * Copyright (c) 2009-2012,2016 Microsoft Corp.
3 * Copyright (c) 2012 NetApp Inc.
4 * Copyright (c) 2012 Citrix Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice unmodified, this list of conditions, and the following
12 *    disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/**
30 * StorVSC driver for Hyper-V.  This driver presents a SCSI HBA interface
31 * to the Comman Access Method (CAM) layer.  CAM control blocks (CCBs) are
32 * converted into VSCSI protocol messages which are delivered to the parent
33 * partition StorVSP driver over the Hyper-V VMBUS.
34 */
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: stable/11/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c 307459 2016-10-17 02:44:27Z sephe $");
37
38#include <sys/param.h>
39#include <sys/proc.h>
40#include <sys/condvar.h>
41#include <sys/time.h>
42#include <sys/systm.h>
43#include <sys/sockio.h>
44#include <sys/mbuf.h>
45#include <sys/malloc.h>
46#include <sys/module.h>
47#include <sys/kernel.h>
48#include <sys/queue.h>
49#include <sys/lock.h>
50#include <sys/sx.h>
51#include <sys/taskqueue.h>
52#include <sys/bus.h>
53#include <sys/mutex.h>
54#include <sys/callout.h>
55#include <vm/vm.h>
56#include <vm/pmap.h>
57#include <vm/uma.h>
58#include <sys/lock.h>
59#include <sys/sema.h>
60#include <sys/sglist.h>
61#include <machine/bus.h>
62#include <sys/bus_dma.h>
63
64#include <cam/cam.h>
65#include <cam/cam_ccb.h>
66#include <cam/cam_periph.h>
67#include <cam/cam_sim.h>
68#include <cam/cam_xpt_sim.h>
69#include <cam/cam_xpt_internal.h>
70#include <cam/cam_debug.h>
71#include <cam/scsi/scsi_all.h>
72#include <cam/scsi/scsi_message.h>
73
74#include <dev/hyperv/include/hyperv.h>
75#include <dev/hyperv/include/vmbus.h>
76
77#include "hv_vstorage.h"
78#include "vmbus_if.h"
79
80#define STORVSC_RINGBUFFER_SIZE		(20*PAGE_SIZE)
81#define STORVSC_MAX_LUNS_PER_TARGET	(64)
82#define STORVSC_MAX_IO_REQUESTS		(STORVSC_MAX_LUNS_PER_TARGET * 2)
83#define BLKVSC_MAX_IDE_DISKS_PER_TARGET	(1)
84#define BLKVSC_MAX_IO_REQUESTS		STORVSC_MAX_IO_REQUESTS
85#define STORVSC_MAX_TARGETS		(2)
86
87#define VSTOR_PKT_SIZE	(sizeof(struct vstor_packet) - vmscsi_size_delta)
88
89#define HV_ALIGN(x, a) roundup2(x, a)
90
91struct storvsc_softc;
92
93struct hv_sgl_node {
94	LIST_ENTRY(hv_sgl_node) link;
95	struct sglist *sgl_data;
96};
97
98struct hv_sgl_page_pool{
99	LIST_HEAD(, hv_sgl_node) in_use_sgl_list;
100	LIST_HEAD(, hv_sgl_node) free_sgl_list;
101	boolean_t                is_init;
102} g_hv_sgl_page_pool;
103
104#define STORVSC_MAX_SG_PAGE_CNT STORVSC_MAX_IO_REQUESTS * VMBUS_CHAN_PRPLIST_MAX
105
106enum storvsc_request_type {
107	WRITE_TYPE,
108	READ_TYPE,
109	UNKNOWN_TYPE
110};
111
112struct hvs_gpa_range {
113	struct vmbus_gpa_range	gpa_range;
114	uint64_t		gpa_page[VMBUS_CHAN_PRPLIST_MAX];
115} __packed;
116
117struct hv_storvsc_request {
118	LIST_ENTRY(hv_storvsc_request) link;
119	struct vstor_packet	vstor_packet;
120	int prp_cnt;
121	struct hvs_gpa_range prp_list;
122	void *sense_data;
123	uint8_t sense_info_len;
124	uint8_t retries;
125	union ccb *ccb;
126	struct storvsc_softc *softc;
127	struct callout callout;
128	struct sema synch_sema; /*Synchronize the request/response if needed */
129	struct sglist *bounce_sgl;
130	unsigned int bounce_sgl_count;
131	uint64_t not_aligned_seg_bits;
132};
133
134struct storvsc_softc {
135	struct hv_vmbus_channel		*hs_chan;
136	LIST_HEAD(, hv_storvsc_request)	hs_free_list;
137	struct mtx			hs_lock;
138	struct storvsc_driver_props	*hs_drv_props;
139	int 				hs_unit;
140	uint32_t			hs_frozen;
141	struct cam_sim			*hs_sim;
142	struct cam_path 		*hs_path;
143	uint32_t			hs_num_out_reqs;
144	boolean_t			hs_destroy;
145	boolean_t			hs_drain_notify;
146	struct sema 			hs_drain_sema;
147	struct hv_storvsc_request	hs_init_req;
148	struct hv_storvsc_request	hs_reset_req;
149	device_t			hs_dev;
150
151	struct hv_vmbus_channel		*hs_cpu2chan[MAXCPU];
152};
153
154
155/**
156 * HyperV storvsc timeout testing cases:
157 * a. IO returned after first timeout;
158 * b. IO returned after second timeout and queue freeze;
159 * c. IO returned while timer handler is running
160 * The first can be tested by "sg_senddiag -vv /dev/daX",
161 * and the second and third can be done by
162 * "sg_wr_mode -v -p 08 -c 0,1a -m 0,ff /dev/daX".
163 */
164#define HVS_TIMEOUT_TEST 0
165
166/*
167 * Bus/adapter reset functionality on the Hyper-V host is
168 * buggy and it will be disabled until
169 * it can be further tested.
170 */
171#define HVS_HOST_RESET 0
172
173struct storvsc_driver_props {
174	char		*drv_name;
175	char		*drv_desc;
176	uint8_t		drv_max_luns_per_target;
177	uint8_t		drv_max_ios_per_target;
178	uint32_t	drv_ringbuffer_size;
179};
180
181enum hv_storage_type {
182	DRIVER_BLKVSC,
183	DRIVER_STORVSC,
184	DRIVER_UNKNOWN
185};
186
187#define HS_MAX_ADAPTERS 10
188
189#define HV_STORAGE_SUPPORTS_MULTI_CHANNEL 0x1
190
191/* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */
192static const struct hyperv_guid gStorVscDeviceType={
193	.hv_guid = {0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
194		 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f}
195};
196
197/* {32412632-86cb-44a2-9b5c-50d1417354f5} */
198static const struct hyperv_guid gBlkVscDeviceType={
199	.hv_guid = {0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
200		 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5}
201};
202
203static struct storvsc_driver_props g_drv_props_table[] = {
204	{"blkvsc", "Hyper-V IDE Storage Interface",
205	 BLKVSC_MAX_IDE_DISKS_PER_TARGET, BLKVSC_MAX_IO_REQUESTS,
206	 STORVSC_RINGBUFFER_SIZE},
207	{"storvsc", "Hyper-V SCSI Storage Interface",
208	 STORVSC_MAX_LUNS_PER_TARGET, STORVSC_MAX_IO_REQUESTS,
209	 STORVSC_RINGBUFFER_SIZE}
210};
211
212/*
213 * Sense buffer size changed in win8; have a run-time
214 * variable to track the size we should use.
215 */
216static int sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
217
218/*
219 * The size of the vmscsi_request has changed in win8. The
220 * additional size is for the newly added elements in the
221 * structure. These elements are valid only when we are talking
222 * to a win8 host.
223 * Track the correct size we need to apply.
224 */
225static int vmscsi_size_delta;
226/*
227 * The storage protocol version is determined during the
228 * initial exchange with the host.  It will indicate which
229 * storage functionality is available in the host.
230*/
231static int vmstor_proto_version;
232
233struct vmstor_proto {
234        int proto_version;
235        int sense_buffer_size;
236        int vmscsi_size_delta;
237};
238
239static const struct vmstor_proto vmstor_proto_list[] = {
240        {
241                VMSTOR_PROTOCOL_VERSION_WIN10,
242                POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
243                0
244        },
245        {
246                VMSTOR_PROTOCOL_VERSION_WIN8_1,
247                POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
248                0
249        },
250        {
251                VMSTOR_PROTOCOL_VERSION_WIN8,
252                POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
253                0
254        },
255        {
256                VMSTOR_PROTOCOL_VERSION_WIN7,
257                PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
258                sizeof(struct vmscsi_win8_extension),
259        },
260        {
261                VMSTOR_PROTOCOL_VERSION_WIN6,
262                PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
263                sizeof(struct vmscsi_win8_extension),
264        }
265};
266
267/* static functions */
268static int storvsc_probe(device_t dev);
269static int storvsc_attach(device_t dev);
270static int storvsc_detach(device_t dev);
271static void storvsc_poll(struct cam_sim * sim);
272static void storvsc_action(struct cam_sim * sim, union ccb * ccb);
273static int create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp);
274static void storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp);
275static enum hv_storage_type storvsc_get_storage_type(device_t dev);
276static void hv_storvsc_rescan_target(struct storvsc_softc *sc);
277static void hv_storvsc_on_channel_callback(void *xchan);
278static void hv_storvsc_on_iocompletion( struct storvsc_softc *sc,
279					struct vstor_packet *vstor_packet,
280					struct hv_storvsc_request *request);
281static int hv_storvsc_connect_vsp(struct storvsc_softc *);
282static void storvsc_io_done(struct hv_storvsc_request *reqp);
283static void storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl,
284				bus_dma_segment_t *orig_sgl,
285				unsigned int orig_sgl_count,
286				uint64_t seg_bits);
287void storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl,
288				unsigned int dest_sgl_count,
289				struct sglist* src_sgl,
290				uint64_t seg_bits);
291
292static device_method_t storvsc_methods[] = {
293	/* Device interface */
294	DEVMETHOD(device_probe,		storvsc_probe),
295	DEVMETHOD(device_attach,	storvsc_attach),
296	DEVMETHOD(device_detach,	storvsc_detach),
297	DEVMETHOD(device_shutdown,      bus_generic_shutdown),
298	DEVMETHOD_END
299};
300
301static driver_t storvsc_driver = {
302	"storvsc", storvsc_methods, sizeof(struct storvsc_softc),
303};
304
305static devclass_t storvsc_devclass;
306DRIVER_MODULE(storvsc, vmbus, storvsc_driver, storvsc_devclass, 0, 0);
307MODULE_VERSION(storvsc, 1);
308MODULE_DEPEND(storvsc, vmbus, 1, 1, 1);
309
310static void
311storvsc_subchan_attach(struct storvsc_softc *sc,
312    struct hv_vmbus_channel *new_channel)
313{
314	struct vmstor_chan_props props;
315	int ret = 0;
316
317	memset(&props, 0, sizeof(props));
318
319	new_channel->hv_chan_priv1 = sc;
320	vmbus_chan_cpu_rr(new_channel);
321	ret = vmbus_chan_open(new_channel,
322	    sc->hs_drv_props->drv_ringbuffer_size,
323  	    sc->hs_drv_props->drv_ringbuffer_size,
324	    (void *)&props,
325	    sizeof(struct vmstor_chan_props),
326	    hv_storvsc_on_channel_callback,
327	    new_channel);
328}
329
330/**
331 * @brief Send multi-channel creation request to host
332 *
333 * @param device  a Hyper-V device pointer
334 * @param max_chans  the max channels supported by vmbus
335 */
336static void
337storvsc_send_multichannel_request(struct storvsc_softc *sc, int max_chans)
338{
339	struct hv_vmbus_channel **subchan;
340	struct hv_storvsc_request *request;
341	struct vstor_packet *vstor_packet;
342	int request_channels_cnt = 0;
343	int ret, i;
344
345	/* get multichannels count that need to create */
346	request_channels_cnt = MIN(max_chans, mp_ncpus);
347
348	request = &sc->hs_init_req;
349
350	/* request the host to create multi-channel */
351	memset(request, 0, sizeof(struct hv_storvsc_request));
352
353	sema_init(&request->synch_sema, 0, ("stor_synch_sema"));
354
355	vstor_packet = &request->vstor_packet;
356
357	vstor_packet->operation = VSTOR_OPERATION_CREATE_MULTI_CHANNELS;
358	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
359	vstor_packet->u.multi_channels_cnt = request_channels_cnt;
360
361	ret = vmbus_chan_send(sc->hs_chan,
362	    VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
363	    vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request);
364
365	/* wait for 5 seconds */
366	ret = sema_timedwait(&request->synch_sema, 5 * hz);
367	if (ret != 0) {
368		printf("Storvsc_error: create multi-channel timeout, %d\n",
369		    ret);
370		return;
371	}
372
373	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
374	    vstor_packet->status != 0) {
375		printf("Storvsc_error: create multi-channel invalid operation "
376		    "(%d) or statue (%u)\n",
377		    vstor_packet->operation, vstor_packet->status);
378		return;
379	}
380
381	/* Wait for sub-channels setup to complete. */
382	subchan = vmbus_subchan_get(sc->hs_chan, request_channels_cnt);
383
384	/* Attach the sub-channels. */
385	for (i = 0; i < request_channels_cnt; ++i)
386		storvsc_subchan_attach(sc, subchan[i]);
387
388	/* Release the sub-channels. */
389	vmbus_subchan_rel(subchan, request_channels_cnt);
390
391	if (bootverbose)
392		printf("Storvsc create multi-channel success!\n");
393}
394
395/**
396 * @brief initialize channel connection to parent partition
397 *
398 * @param dev  a Hyper-V device pointer
399 * @returns  0 on success, non-zero error on failure
400 */
401static int
402hv_storvsc_channel_init(struct storvsc_softc *sc)
403{
404	int ret = 0, i;
405	struct hv_storvsc_request *request;
406	struct vstor_packet *vstor_packet;
407	uint16_t max_chans = 0;
408	boolean_t support_multichannel = FALSE;
409	uint32_t version;
410
411	max_chans = 0;
412	support_multichannel = FALSE;
413
414	request = &sc->hs_init_req;
415	memset(request, 0, sizeof(struct hv_storvsc_request));
416	vstor_packet = &request->vstor_packet;
417	request->softc = sc;
418
419	/**
420	 * Initiate the vsc/vsp initialization protocol on the open channel
421	 */
422	sema_init(&request->synch_sema, 0, ("stor_synch_sema"));
423
424	vstor_packet->operation = VSTOR_OPERATION_BEGININITIALIZATION;
425	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
426
427
428	ret = vmbus_chan_send(sc->hs_chan,
429	    VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
430	    vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request);
431
432	if (ret != 0)
433		goto cleanup;
434
435	/* wait 5 seconds */
436	ret = sema_timedwait(&request->synch_sema, 5 * hz);
437	if (ret != 0)
438		goto cleanup;
439
440	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
441		vstor_packet->status != 0) {
442		goto cleanup;
443	}
444
445	for (i = 0; i < nitems(vmstor_proto_list); i++) {
446		/* reuse the packet for version range supported */
447
448		memset(vstor_packet, 0, sizeof(struct vstor_packet));
449		vstor_packet->operation = VSTOR_OPERATION_QUERYPROTOCOLVERSION;
450		vstor_packet->flags = REQUEST_COMPLETION_FLAG;
451
452		vstor_packet->u.version.major_minor =
453			vmstor_proto_list[i].proto_version;
454
455		/* revision is only significant for Windows guests */
456		vstor_packet->u.version.revision = 0;
457
458		ret = vmbus_chan_send(sc->hs_chan,
459		    VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
460		    vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request);
461
462		if (ret != 0)
463			goto cleanup;
464
465		/* wait 5 seconds */
466		ret = sema_timedwait(&request->synch_sema, 5 * hz);
467
468		if (ret)
469			goto cleanup;
470
471		if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO) {
472			ret = EINVAL;
473			goto cleanup;
474		}
475		if (vstor_packet->status == 0) {
476			vmstor_proto_version =
477				vmstor_proto_list[i].proto_version;
478			sense_buffer_size =
479				vmstor_proto_list[i].sense_buffer_size;
480			vmscsi_size_delta =
481				vmstor_proto_list[i].vmscsi_size_delta;
482			break;
483		}
484	}
485
486	if (vstor_packet->status != 0) {
487		ret = EINVAL;
488		goto cleanup;
489	}
490	/**
491	 * Query channel properties
492	 */
493	memset(vstor_packet, 0, sizeof(struct vstor_packet));
494	vstor_packet->operation = VSTOR_OPERATION_QUERYPROPERTIES;
495	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
496
497	ret = vmbus_chan_send(sc->hs_chan,
498	    VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
499	    vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request);
500
501	if ( ret != 0)
502		goto cleanup;
503
504	/* wait 5 seconds */
505	ret = sema_timedwait(&request->synch_sema, 5 * hz);
506
507	if (ret != 0)
508		goto cleanup;
509
510	/* TODO: Check returned version */
511	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
512	    vstor_packet->status != 0) {
513		goto cleanup;
514	}
515
516	/* multi-channels feature is supported by WIN8 and above version */
517	max_chans = vstor_packet->u.chan_props.max_channel_cnt;
518	version = VMBUS_GET_VERSION(device_get_parent(sc->hs_dev), sc->hs_dev);
519	if (version != VMBUS_VERSION_WIN7 && version != VMBUS_VERSION_WS2008 &&
520	    (vstor_packet->u.chan_props.flags &
521	     HV_STORAGE_SUPPORTS_MULTI_CHANNEL)) {
522		support_multichannel = TRUE;
523	}
524
525	memset(vstor_packet, 0, sizeof(struct vstor_packet));
526	vstor_packet->operation = VSTOR_OPERATION_ENDINITIALIZATION;
527	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
528
529	ret = vmbus_chan_send(sc->hs_chan,
530	    VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
531	    vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request);
532
533	if (ret != 0) {
534		goto cleanup;
535	}
536
537	/* wait 5 seconds */
538	ret = sema_timedwait(&request->synch_sema, 5 * hz);
539
540	if (ret != 0)
541		goto cleanup;
542
543	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
544	    vstor_packet->status != 0)
545		goto cleanup;
546
547	/*
548	 * If multi-channel is supported, send multichannel create
549	 * request to host.
550	 */
551	if (support_multichannel)
552		storvsc_send_multichannel_request(sc, max_chans);
553
554cleanup:
555	sema_destroy(&request->synch_sema);
556	return (ret);
557}
558
559/**
560 * @brief Open channel connection to paraent partition StorVSP driver
561 *
562 * Open and initialize channel connection to parent partition StorVSP driver.
563 *
564 * @param pointer to a Hyper-V device
565 * @returns 0 on success, non-zero error on failure
566 */
567static int
568hv_storvsc_connect_vsp(struct storvsc_softc *sc)
569{
570	int ret = 0;
571	struct vmstor_chan_props props;
572
573	memset(&props, 0, sizeof(struct vmstor_chan_props));
574
575	/*
576	 * Open the channel
577	 */
578	KASSERT(sc->hs_chan->hv_chan_priv1 == sc, ("invalid chan priv1"));
579	vmbus_chan_cpu_rr(sc->hs_chan);
580	ret = vmbus_chan_open(
581		sc->hs_chan,
582		sc->hs_drv_props->drv_ringbuffer_size,
583		sc->hs_drv_props->drv_ringbuffer_size,
584		(void *)&props,
585		sizeof(struct vmstor_chan_props),
586		hv_storvsc_on_channel_callback,
587		sc->hs_chan);
588
589	if (ret != 0) {
590		return ret;
591	}
592
593	ret = hv_storvsc_channel_init(sc);
594
595	return (ret);
596}
597
598#if HVS_HOST_RESET
599static int
600hv_storvsc_host_reset(struct storvsc_softc *sc)
601{
602	int ret = 0;
603
604	struct hv_storvsc_request *request;
605	struct vstor_packet *vstor_packet;
606
607	request = &sc->hs_reset_req;
608	request->softc = sc;
609	vstor_packet = &request->vstor_packet;
610
611	sema_init(&request->synch_sema, 0, "stor synch sema");
612
613	vstor_packet->operation = VSTOR_OPERATION_RESETBUS;
614	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
615
616	ret = vmbus_chan_send(dev->channel,
617	    VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
618	    vstor_packet, VSTOR_PKT_SIZE,
619	    (uint64_t)(uintptr_t)&sc->hs_reset_req);
620
621	if (ret != 0) {
622		goto cleanup;
623	}
624
625	ret = sema_timedwait(&request->synch_sema, 5 * hz); /* KYS 5 seconds */
626
627	if (ret) {
628		goto cleanup;
629	}
630
631
632	/*
633	 * At this point, all outstanding requests in the adapter
634	 * should have been flushed out and return to us
635	 */
636
637cleanup:
638	sema_destroy(&request->synch_sema);
639	return (ret);
640}
641#endif /* HVS_HOST_RESET */
642
643/**
644 * @brief Function to initiate an I/O request
645 *
646 * @param device Hyper-V device pointer
647 * @param request pointer to a request structure
648 * @returns 0 on success, non-zero error on failure
649 */
650static int
651hv_storvsc_io_request(struct storvsc_softc *sc,
652					  struct hv_storvsc_request *request)
653{
654	struct vstor_packet *vstor_packet = &request->vstor_packet;
655	struct hv_vmbus_channel* outgoing_channel = NULL;
656	int ret = 0;
657
658	vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
659
660	vstor_packet->u.vm_srb.length =
661	    sizeof(struct vmscsi_req) - vmscsi_size_delta;
662
663	vstor_packet->u.vm_srb.sense_info_len = sense_buffer_size;
664
665	vstor_packet->u.vm_srb.transfer_len =
666	    request->prp_list.gpa_range.gpa_len;
667
668	vstor_packet->operation = VSTOR_OPERATION_EXECUTESRB;
669
670	outgoing_channel = sc->hs_cpu2chan[curcpu];
671
672	mtx_unlock(&request->softc->hs_lock);
673	if (request->prp_list.gpa_range.gpa_len) {
674		ret = vmbus_chan_send_prplist(outgoing_channel,
675		    &request->prp_list.gpa_range, request->prp_cnt,
676		    vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request);
677	} else {
678		ret = vmbus_chan_send(outgoing_channel,
679		    VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
680		    vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request);
681	}
682	mtx_lock(&request->softc->hs_lock);
683
684	if (ret != 0) {
685		printf("Unable to send packet %p ret %d", vstor_packet, ret);
686	} else {
687		atomic_add_int(&sc->hs_num_out_reqs, 1);
688	}
689
690	return (ret);
691}
692
693
694/**
695 * Process IO_COMPLETION_OPERATION and ready
696 * the result to be completed for upper layer
697 * processing by the CAM layer.
698 */
699static void
700hv_storvsc_on_iocompletion(struct storvsc_softc *sc,
701			   struct vstor_packet *vstor_packet,
702			   struct hv_storvsc_request *request)
703{
704	struct vmscsi_req *vm_srb;
705
706	vm_srb = &vstor_packet->u.vm_srb;
707
708	/*
709	 * Copy some fields of the host's response into the request structure,
710	 * because the fields will be used later in storvsc_io_done().
711	 */
712	request->vstor_packet.u.vm_srb.scsi_status = vm_srb->scsi_status;
713	request->vstor_packet.u.vm_srb.srb_status = vm_srb->srb_status;
714	request->vstor_packet.u.vm_srb.transfer_len = vm_srb->transfer_len;
715
716	if (((vm_srb->scsi_status & 0xFF) == SCSI_STATUS_CHECK_COND) &&
717			(vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)) {
718		/* Autosense data available */
719
720		KASSERT(vm_srb->sense_info_len <= request->sense_info_len,
721				("vm_srb->sense_info_len <= "
722				 "request->sense_info_len"));
723
724		memcpy(request->sense_data, vm_srb->u.sense_data,
725			vm_srb->sense_info_len);
726
727		request->sense_info_len = vm_srb->sense_info_len;
728	}
729
730	/* Complete request by passing to the CAM layer */
731	storvsc_io_done(request);
732	atomic_subtract_int(&sc->hs_num_out_reqs, 1);
733	if (sc->hs_drain_notify && (sc->hs_num_out_reqs == 0)) {
734		sema_post(&sc->hs_drain_sema);
735	}
736}
737
738static void
739hv_storvsc_rescan_target(struct storvsc_softc *sc)
740{
741	path_id_t pathid;
742	target_id_t targetid;
743	union ccb *ccb;
744
745	pathid = cam_sim_path(sc->hs_sim);
746	targetid = CAM_TARGET_WILDCARD;
747
748	/*
749	 * Allocate a CCB and schedule a rescan.
750	 */
751	ccb = xpt_alloc_ccb_nowait();
752	if (ccb == NULL) {
753		printf("unable to alloc CCB for rescan\n");
754		return;
755	}
756
757	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
758	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
759		printf("unable to create path for rescan, pathid: %u,"
760		    "targetid: %u\n", pathid, targetid);
761		xpt_free_ccb(ccb);
762		return;
763	}
764
765	if (targetid == CAM_TARGET_WILDCARD)
766		ccb->ccb_h.func_code = XPT_SCAN_BUS;
767	else
768		ccb->ccb_h.func_code = XPT_SCAN_TGT;
769
770	xpt_rescan(ccb);
771}
772
773static void
774hv_storvsc_on_channel_callback(void *xchan)
775{
776	int ret = 0;
777	hv_vmbus_channel *channel = xchan;
778	struct storvsc_softc *sc = channel->hv_chan_priv1;
779	uint32_t bytes_recvd;
780	uint64_t request_id;
781	uint8_t packet[roundup2(sizeof(struct vstor_packet), 8)];
782	struct hv_storvsc_request *request;
783	struct vstor_packet *vstor_packet;
784
785	bytes_recvd = roundup2(VSTOR_PKT_SIZE, 8);
786	ret = vmbus_chan_recv(channel, packet, &bytes_recvd, &request_id);
787	KASSERT(ret != ENOBUFS, ("storvsc recvbuf is not large enough"));
788	/* XXX check bytes_recvd to make sure that it contains enough data */
789
790	while ((ret == 0) && (bytes_recvd > 0)) {
791		request = (struct hv_storvsc_request *)(uintptr_t)request_id;
792
793		if ((request == &sc->hs_init_req) ||
794			(request == &sc->hs_reset_req)) {
795			memcpy(&request->vstor_packet, packet,
796				   sizeof(struct vstor_packet));
797			sema_post(&request->synch_sema);
798		} else {
799			vstor_packet = (struct vstor_packet *)packet;
800			switch(vstor_packet->operation) {
801			case VSTOR_OPERATION_COMPLETEIO:
802				if (request == NULL)
803					panic("VMBUS: storvsc received a "
804					    "packet with NULL request id in "
805					    "COMPLETEIO operation.");
806
807				hv_storvsc_on_iocompletion(sc,
808							vstor_packet, request);
809				break;
810			case VSTOR_OPERATION_REMOVEDEVICE:
811				printf("VMBUS: storvsc operation %d not "
812				    "implemented.\n", vstor_packet->operation);
813				/* TODO: implement */
814				break;
815			case VSTOR_OPERATION_ENUMERATE_BUS:
816				hv_storvsc_rescan_target(sc);
817				break;
818			default:
819				break;
820			}
821		}
822
823		bytes_recvd = roundup2(VSTOR_PKT_SIZE, 8),
824		ret = vmbus_chan_recv(channel, packet, &bytes_recvd,
825		    &request_id);
826		KASSERT(ret != ENOBUFS,
827		    ("storvsc recvbuf is not large enough"));
828		/*
829		 * XXX check bytes_recvd to make sure that it contains
830		 * enough data
831		 */
832	}
833}
834
835/**
836 * @brief StorVSC probe function
837 *
838 * Device probe function.  Returns 0 if the input device is a StorVSC
839 * device.  Otherwise, a ENXIO is returned.  If the input device is
840 * for BlkVSC (paravirtual IDE) device and this support is disabled in
841 * favor of the emulated ATA/IDE device, return ENXIO.
842 *
843 * @param a device
844 * @returns 0 on success, ENXIO if not a matcing StorVSC device
845 */
846static int
847storvsc_probe(device_t dev)
848{
849	int ata_disk_enable = 0;
850	int ret	= ENXIO;
851
852	switch (storvsc_get_storage_type(dev)) {
853	case DRIVER_BLKVSC:
854		if(bootverbose)
855			device_printf(dev, "DRIVER_BLKVSC-Emulated ATA/IDE probe\n");
856		if (!getenv_int("hw.ata.disk_enable", &ata_disk_enable)) {
857			if(bootverbose)
858				device_printf(dev,
859					"Enlightened ATA/IDE detected\n");
860			device_set_desc(dev, g_drv_props_table[DRIVER_BLKVSC].drv_desc);
861			ret = BUS_PROBE_DEFAULT;
862		} else if(bootverbose)
863			device_printf(dev, "Emulated ATA/IDE set (hw.ata.disk_enable set)\n");
864		break;
865	case DRIVER_STORVSC:
866		if(bootverbose)
867			device_printf(dev, "Enlightened SCSI device detected\n");
868		device_set_desc(dev, g_drv_props_table[DRIVER_STORVSC].drv_desc);
869		ret = BUS_PROBE_DEFAULT;
870		break;
871	default:
872		ret = ENXIO;
873	}
874	return (ret);
875}
876
877static void
878storvsc_create_cpu2chan(struct storvsc_softc *sc)
879{
880	int cpu;
881
882	CPU_FOREACH(cpu) {
883		sc->hs_cpu2chan[cpu] = vmbus_chan_cpu2chan(sc->hs_chan, cpu);
884		if (bootverbose) {
885			device_printf(sc->hs_dev, "cpu%d -> chan%u\n",
886			    cpu, sc->hs_cpu2chan[cpu]->ch_id);
887		}
888	}
889}
890
891/**
892 * @brief StorVSC attach function
893 *
894 * Function responsible for allocating per-device structures,
895 * setting up CAM interfaces and scanning for available LUNs to
896 * be used for SCSI device peripherals.
897 *
898 * @param a device
899 * @returns 0 on success or an error on failure
900 */
901static int
902storvsc_attach(device_t dev)
903{
904	enum hv_storage_type stor_type;
905	struct storvsc_softc *sc;
906	struct cam_devq *devq;
907	int ret, i, j;
908	struct hv_storvsc_request *reqp;
909	struct root_hold_token *root_mount_token = NULL;
910	struct hv_sgl_node *sgl_node = NULL;
911	void *tmp_buff = NULL;
912
913	/*
914	 * We need to serialize storvsc attach calls.
915	 */
916	root_mount_token = root_mount_hold("storvsc");
917
918	sc = device_get_softc(dev);
919	sc->hs_chan = vmbus_get_channel(dev);
920	sc->hs_chan->hv_chan_priv1 = sc;
921
922	stor_type = storvsc_get_storage_type(dev);
923
924	if (stor_type == DRIVER_UNKNOWN) {
925		ret = ENODEV;
926		goto cleanup;
927	}
928
929	/* fill in driver specific properties */
930	sc->hs_drv_props = &g_drv_props_table[stor_type];
931
932	/* fill in device specific properties */
933	sc->hs_unit	= device_get_unit(dev);
934	sc->hs_dev	= dev;
935
936	LIST_INIT(&sc->hs_free_list);
937	mtx_init(&sc->hs_lock, "hvslck", NULL, MTX_DEF);
938
939	for (i = 0; i < sc->hs_drv_props->drv_max_ios_per_target; ++i) {
940		reqp = malloc(sizeof(struct hv_storvsc_request),
941				 M_DEVBUF, M_WAITOK|M_ZERO);
942		reqp->softc = sc;
943
944		LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
945	}
946
947	/* create sg-list page pool */
948	if (FALSE == g_hv_sgl_page_pool.is_init) {
949		g_hv_sgl_page_pool.is_init = TRUE;
950		LIST_INIT(&g_hv_sgl_page_pool.in_use_sgl_list);
951		LIST_INIT(&g_hv_sgl_page_pool.free_sgl_list);
952
953		/*
954		 * Pre-create SG list, each SG list with
955		 * VMBUS_CHAN_PRPLIST_MAX segments, each
956		 * segment has one page buffer
957		 */
958		for (i = 0; i < STORVSC_MAX_IO_REQUESTS; i++) {
959	        	sgl_node = malloc(sizeof(struct hv_sgl_node),
960			    M_DEVBUF, M_WAITOK|M_ZERO);
961
962			sgl_node->sgl_data =
963			    sglist_alloc(VMBUS_CHAN_PRPLIST_MAX,
964			    M_WAITOK|M_ZERO);
965
966			for (j = 0; j < VMBUS_CHAN_PRPLIST_MAX; j++) {
967				tmp_buff = malloc(PAGE_SIZE,
968				    M_DEVBUF, M_WAITOK|M_ZERO);
969
970				sgl_node->sgl_data->sg_segs[j].ss_paddr =
971				    (vm_paddr_t)tmp_buff;
972			}
973
974			LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list,
975			    sgl_node, link);
976		}
977	}
978
979	sc->hs_destroy = FALSE;
980	sc->hs_drain_notify = FALSE;
981	sema_init(&sc->hs_drain_sema, 0, "Store Drain Sema");
982
983	ret = hv_storvsc_connect_vsp(sc);
984	if (ret != 0) {
985		goto cleanup;
986	}
987
988	/* Construct cpu to channel mapping */
989	storvsc_create_cpu2chan(sc);
990
991	/*
992	 * Create the device queue.
993	 * Hyper-V maps each target to one SCSI HBA
994	 */
995	devq = cam_simq_alloc(sc->hs_drv_props->drv_max_ios_per_target);
996	if (devq == NULL) {
997		device_printf(dev, "Failed to alloc device queue\n");
998		ret = ENOMEM;
999		goto cleanup;
1000	}
1001
1002	sc->hs_sim = cam_sim_alloc(storvsc_action,
1003				storvsc_poll,
1004				sc->hs_drv_props->drv_name,
1005				sc,
1006				sc->hs_unit,
1007				&sc->hs_lock, 1,
1008				sc->hs_drv_props->drv_max_ios_per_target,
1009				devq);
1010
1011	if (sc->hs_sim == NULL) {
1012		device_printf(dev, "Failed to alloc sim\n");
1013		cam_simq_free(devq);
1014		ret = ENOMEM;
1015		goto cleanup;
1016	}
1017
1018	mtx_lock(&sc->hs_lock);
1019	/* bus_id is set to 0, need to get it from VMBUS channel query? */
1020	if (xpt_bus_register(sc->hs_sim, dev, 0) != CAM_SUCCESS) {
1021		cam_sim_free(sc->hs_sim, /*free_devq*/TRUE);
1022		mtx_unlock(&sc->hs_lock);
1023		device_printf(dev, "Unable to register SCSI bus\n");
1024		ret = ENXIO;
1025		goto cleanup;
1026	}
1027
1028	if (xpt_create_path(&sc->hs_path, /*periph*/NULL,
1029		 cam_sim_path(sc->hs_sim),
1030		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1031		xpt_bus_deregister(cam_sim_path(sc->hs_sim));
1032		cam_sim_free(sc->hs_sim, /*free_devq*/TRUE);
1033		mtx_unlock(&sc->hs_lock);
1034		device_printf(dev, "Unable to create path\n");
1035		ret = ENXIO;
1036		goto cleanup;
1037	}
1038
1039	mtx_unlock(&sc->hs_lock);
1040
1041	root_mount_rel(root_mount_token);
1042	return (0);
1043
1044
1045cleanup:
1046	root_mount_rel(root_mount_token);
1047	while (!LIST_EMPTY(&sc->hs_free_list)) {
1048		reqp = LIST_FIRST(&sc->hs_free_list);
1049		LIST_REMOVE(reqp, link);
1050		free(reqp, M_DEVBUF);
1051	}
1052
1053	while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1054		sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1055		LIST_REMOVE(sgl_node, link);
1056		for (j = 0; j < VMBUS_CHAN_PRPLIST_MAX; j++) {
1057			if (NULL !=
1058			    (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) {
1059				free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF);
1060			}
1061		}
1062		sglist_free(sgl_node->sgl_data);
1063		free(sgl_node, M_DEVBUF);
1064	}
1065
1066	return (ret);
1067}
1068
1069/**
1070 * @brief StorVSC device detach function
1071 *
1072 * This function is responsible for safely detaching a
1073 * StorVSC device.  This includes waiting for inbound responses
1074 * to complete and freeing associated per-device structures.
1075 *
1076 * @param dev a device
1077 * returns 0 on success
1078 */
1079static int
1080storvsc_detach(device_t dev)
1081{
1082	struct storvsc_softc *sc = device_get_softc(dev);
1083	struct hv_storvsc_request *reqp = NULL;
1084	struct hv_sgl_node *sgl_node = NULL;
1085	int j = 0;
1086
1087	sc->hs_destroy = TRUE;
1088
1089	/*
1090	 * At this point, all outbound traffic should be disabled. We
1091	 * only allow inbound traffic (responses) to proceed so that
1092	 * outstanding requests can be completed.
1093	 */
1094
1095	sc->hs_drain_notify = TRUE;
1096	sema_wait(&sc->hs_drain_sema);
1097	sc->hs_drain_notify = FALSE;
1098
1099	/*
1100	 * Since we have already drained, we don't need to busy wait.
1101	 * The call to close the channel will reset the callback
1102	 * under the protection of the incoming channel lock.
1103	 */
1104
1105	vmbus_chan_close(sc->hs_chan);
1106
1107	mtx_lock(&sc->hs_lock);
1108	while (!LIST_EMPTY(&sc->hs_free_list)) {
1109		reqp = LIST_FIRST(&sc->hs_free_list);
1110		LIST_REMOVE(reqp, link);
1111
1112		free(reqp, M_DEVBUF);
1113	}
1114	mtx_unlock(&sc->hs_lock);
1115
1116	while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1117		sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1118		LIST_REMOVE(sgl_node, link);
1119		for (j = 0; j < VMBUS_CHAN_PRPLIST_MAX; j++){
1120			if (NULL !=
1121			    (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) {
1122				free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF);
1123			}
1124		}
1125		sglist_free(sgl_node->sgl_data);
1126		free(sgl_node, M_DEVBUF);
1127	}
1128
1129	return (0);
1130}
1131
1132#if HVS_TIMEOUT_TEST
1133/**
1134 * @brief unit test for timed out operations
1135 *
1136 * This function provides unit testing capability to simulate
1137 * timed out operations.  Recompilation with HV_TIMEOUT_TEST=1
1138 * is required.
1139 *
1140 * @param reqp pointer to a request structure
1141 * @param opcode SCSI operation being performed
1142 * @param wait if 1, wait for I/O to complete
1143 */
1144static void
1145storvsc_timeout_test(struct hv_storvsc_request *reqp,
1146		uint8_t opcode, int wait)
1147{
1148	int ret;
1149	union ccb *ccb = reqp->ccb;
1150	struct storvsc_softc *sc = reqp->softc;
1151
1152	if (reqp->vstor_packet.vm_srb.cdb[0] != opcode) {
1153		return;
1154	}
1155
1156	if (wait) {
1157		mtx_lock(&reqp->event.mtx);
1158	}
1159	ret = hv_storvsc_io_request(sc, reqp);
1160	if (ret != 0) {
1161		if (wait) {
1162			mtx_unlock(&reqp->event.mtx);
1163		}
1164		printf("%s: io_request failed with %d.\n",
1165				__func__, ret);
1166		ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1167		mtx_lock(&sc->hs_lock);
1168		storvsc_free_request(sc, reqp);
1169		xpt_done(ccb);
1170		mtx_unlock(&sc->hs_lock);
1171		return;
1172	}
1173
1174	if (wait) {
1175		xpt_print(ccb->ccb_h.path,
1176				"%u: %s: waiting for IO return.\n",
1177				ticks, __func__);
1178		ret = cv_timedwait(&reqp->event.cv, &reqp->event.mtx, 60*hz);
1179		mtx_unlock(&reqp->event.mtx);
1180		xpt_print(ccb->ccb_h.path, "%u: %s: %s.\n",
1181				ticks, __func__, (ret == 0)?
1182				"IO return detected" :
1183				"IO return not detected");
1184		/*
1185		 * Now both the timer handler and io done are running
1186		 * simultaneously. We want to confirm the io done always
1187		 * finishes after the timer handler exits. So reqp used by
1188		 * timer handler is not freed or stale. Do busy loop for
1189		 * another 1/10 second to make sure io done does
1190		 * wait for the timer handler to complete.
1191		 */
1192		DELAY(100*1000);
1193		mtx_lock(&sc->hs_lock);
1194		xpt_print(ccb->ccb_h.path,
1195				"%u: %s: finishing, queue frozen %d, "
1196				"ccb status 0x%x scsi_status 0x%x.\n",
1197				ticks, __func__, sc->hs_frozen,
1198				ccb->ccb_h.status,
1199				ccb->csio.scsi_status);
1200		mtx_unlock(&sc->hs_lock);
1201	}
1202}
1203#endif /* HVS_TIMEOUT_TEST */
1204
1205#ifdef notyet
1206/**
1207 * @brief timeout handler for requests
1208 *
1209 * This function is called as a result of a callout expiring.
1210 *
1211 * @param arg pointer to a request
1212 */
1213static void
1214storvsc_timeout(void *arg)
1215{
1216	struct hv_storvsc_request *reqp = arg;
1217	struct storvsc_softc *sc = reqp->softc;
1218	union ccb *ccb = reqp->ccb;
1219
1220	if (reqp->retries == 0) {
1221		mtx_lock(&sc->hs_lock);
1222		xpt_print(ccb->ccb_h.path,
1223		    "%u: IO timed out (req=0x%p), wait for another %u secs.\n",
1224		    ticks, reqp, ccb->ccb_h.timeout / 1000);
1225		cam_error_print(ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1226		mtx_unlock(&sc->hs_lock);
1227
1228		reqp->retries++;
1229		callout_reset_sbt(&reqp->callout, SBT_1MS * ccb->ccb_h.timeout,
1230		    0, storvsc_timeout, reqp, 0);
1231#if HVS_TIMEOUT_TEST
1232		storvsc_timeout_test(reqp, SEND_DIAGNOSTIC, 0);
1233#endif
1234		return;
1235	}
1236
1237	mtx_lock(&sc->hs_lock);
1238	xpt_print(ccb->ccb_h.path,
1239		"%u: IO (reqp = 0x%p) did not return for %u seconds, %s.\n",
1240		ticks, reqp, ccb->ccb_h.timeout * (reqp->retries+1) / 1000,
1241		(sc->hs_frozen == 0)?
1242		"freezing the queue" : "the queue is already frozen");
1243	if (sc->hs_frozen == 0) {
1244		sc->hs_frozen = 1;
1245		xpt_freeze_simq(xpt_path_sim(ccb->ccb_h.path), 1);
1246	}
1247	mtx_unlock(&sc->hs_lock);
1248
1249#if HVS_TIMEOUT_TEST
1250	storvsc_timeout_test(reqp, MODE_SELECT_10, 1);
1251#endif
1252}
1253#endif
1254
1255/**
1256 * @brief StorVSC device poll function
1257 *
1258 * This function is responsible for servicing requests when
1259 * interrupts are disabled (i.e when we are dumping core.)
1260 *
1261 * @param sim a pointer to a CAM SCSI interface module
1262 */
1263static void
1264storvsc_poll(struct cam_sim *sim)
1265{
1266	struct storvsc_softc *sc = cam_sim_softc(sim);
1267
1268	mtx_assert(&sc->hs_lock, MA_OWNED);
1269	mtx_unlock(&sc->hs_lock);
1270	hv_storvsc_on_channel_callback(sc->hs_chan);
1271	mtx_lock(&sc->hs_lock);
1272}
1273
1274/**
1275 * @brief StorVSC device action function
1276 *
1277 * This function is responsible for handling SCSI operations which
1278 * are passed from the CAM layer.  The requests are in the form of
1279 * CAM control blocks which indicate the action being performed.
1280 * Not all actions require converting the request to a VSCSI protocol
1281 * message - these actions can be responded to by this driver.
1282 * Requests which are destined for a backend storage device are converted
1283 * to a VSCSI protocol message and sent on the channel connection associated
1284 * with this device.
1285 *
1286 * @param sim pointer to a CAM SCSI interface module
1287 * @param ccb pointer to a CAM control block
1288 */
1289static void
1290storvsc_action(struct cam_sim *sim, union ccb *ccb)
1291{
1292	struct storvsc_softc *sc = cam_sim_softc(sim);
1293	int res;
1294
1295	mtx_assert(&sc->hs_lock, MA_OWNED);
1296	switch (ccb->ccb_h.func_code) {
1297	case XPT_PATH_INQ: {
1298		struct ccb_pathinq *cpi = &ccb->cpi;
1299
1300		cpi->version_num = 1;
1301		cpi->hba_inquiry = PI_TAG_ABLE|PI_SDTR_ABLE;
1302		cpi->target_sprt = 0;
1303		cpi->hba_misc = PIM_NOBUSRESET;
1304		cpi->hba_eng_cnt = 0;
1305		cpi->max_target = STORVSC_MAX_TARGETS;
1306		cpi->max_lun = sc->hs_drv_props->drv_max_luns_per_target;
1307		cpi->initiator_id = cpi->max_target;
1308		cpi->bus_id = cam_sim_bus(sim);
1309		cpi->base_transfer_speed = 300000;
1310		cpi->transport = XPORT_SAS;
1311		cpi->transport_version = 0;
1312		cpi->protocol = PROTO_SCSI;
1313		cpi->protocol_version = SCSI_REV_SPC2;
1314		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1315		strncpy(cpi->hba_vid, sc->hs_drv_props->drv_name, HBA_IDLEN);
1316		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1317		cpi->unit_number = cam_sim_unit(sim);
1318
1319		ccb->ccb_h.status = CAM_REQ_CMP;
1320		xpt_done(ccb);
1321		return;
1322	}
1323	case XPT_GET_TRAN_SETTINGS: {
1324		struct  ccb_trans_settings *cts = &ccb->cts;
1325
1326		cts->transport = XPORT_SAS;
1327		cts->transport_version = 0;
1328		cts->protocol = PROTO_SCSI;
1329		cts->protocol_version = SCSI_REV_SPC2;
1330
1331		/* enable tag queuing and disconnected mode */
1332		cts->proto_specific.valid = CTS_SCSI_VALID_TQ;
1333		cts->proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
1334		cts->proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
1335		cts->xport_specific.valid = CTS_SPI_VALID_DISC;
1336		cts->xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
1337
1338		ccb->ccb_h.status = CAM_REQ_CMP;
1339		xpt_done(ccb);
1340		return;
1341	}
1342	case XPT_SET_TRAN_SETTINGS:	{
1343		ccb->ccb_h.status = CAM_REQ_CMP;
1344		xpt_done(ccb);
1345		return;
1346	}
1347	case XPT_CALC_GEOMETRY:{
1348		cam_calc_geometry(&ccb->ccg, 1);
1349		xpt_done(ccb);
1350		return;
1351	}
1352	case  XPT_RESET_BUS:
1353	case  XPT_RESET_DEV:{
1354#if HVS_HOST_RESET
1355		if ((res = hv_storvsc_host_reset(sc)) != 0) {
1356			xpt_print(ccb->ccb_h.path,
1357				"hv_storvsc_host_reset failed with %d\n", res);
1358			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1359			xpt_done(ccb);
1360			return;
1361		}
1362		ccb->ccb_h.status = CAM_REQ_CMP;
1363		xpt_done(ccb);
1364		return;
1365#else
1366		xpt_print(ccb->ccb_h.path,
1367				  "%s reset not supported.\n",
1368				  (ccb->ccb_h.func_code == XPT_RESET_BUS)?
1369				  "bus" : "dev");
1370		ccb->ccb_h.status = CAM_REQ_INVALID;
1371		xpt_done(ccb);
1372		return;
1373#endif	/* HVS_HOST_RESET */
1374	}
1375	case XPT_SCSI_IO:
1376	case XPT_IMMED_NOTIFY: {
1377		struct hv_storvsc_request *reqp = NULL;
1378
1379		if (ccb->csio.cdb_len == 0) {
1380			panic("cdl_len is 0\n");
1381		}
1382
1383		if (LIST_EMPTY(&sc->hs_free_list)) {
1384			ccb->ccb_h.status = CAM_REQUEUE_REQ;
1385			if (sc->hs_frozen == 0) {
1386				sc->hs_frozen = 1;
1387				xpt_freeze_simq(sim, /* count*/1);
1388			}
1389			xpt_done(ccb);
1390			return;
1391		}
1392
1393		reqp = LIST_FIRST(&sc->hs_free_list);
1394		LIST_REMOVE(reqp, link);
1395
1396		bzero(reqp, sizeof(struct hv_storvsc_request));
1397		reqp->softc = sc;
1398
1399		ccb->ccb_h.status |= CAM_SIM_QUEUED;
1400		if ((res = create_storvsc_request(ccb, reqp)) != 0) {
1401			ccb->ccb_h.status = CAM_REQ_INVALID;
1402			xpt_done(ccb);
1403			return;
1404		}
1405
1406#ifdef notyet
1407		if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1408			callout_init(&reqp->callout, 1);
1409			callout_reset_sbt(&reqp->callout,
1410			    SBT_1MS * ccb->ccb_h.timeout, 0,
1411			    storvsc_timeout, reqp, 0);
1412#if HVS_TIMEOUT_TEST
1413			cv_init(&reqp->event.cv, "storvsc timeout cv");
1414			mtx_init(&reqp->event.mtx, "storvsc timeout mutex",
1415					NULL, MTX_DEF);
1416			switch (reqp->vstor_packet.vm_srb.cdb[0]) {
1417				case MODE_SELECT_10:
1418				case SEND_DIAGNOSTIC:
1419					/* To have timer send the request. */
1420					return;
1421				default:
1422					break;
1423			}
1424#endif /* HVS_TIMEOUT_TEST */
1425		}
1426#endif
1427
1428		if ((res = hv_storvsc_io_request(sc, reqp)) != 0) {
1429			xpt_print(ccb->ccb_h.path,
1430				"hv_storvsc_io_request failed with %d\n", res);
1431			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1432			storvsc_free_request(sc, reqp);
1433			xpt_done(ccb);
1434			return;
1435		}
1436		return;
1437	}
1438
1439	default:
1440		ccb->ccb_h.status = CAM_REQ_INVALID;
1441		xpt_done(ccb);
1442		return;
1443	}
1444}
1445
1446/**
1447 * @brief destroy bounce buffer
1448 *
1449 * This function is responsible for destroy a Scatter/Gather list
1450 * that create by storvsc_create_bounce_buffer()
1451 *
1452 * @param sgl- the Scatter/Gather need be destroy
1453 * @param sg_count- page count of the SG list.
1454 *
1455 */
1456static void
1457storvsc_destroy_bounce_buffer(struct sglist *sgl)
1458{
1459	struct hv_sgl_node *sgl_node = NULL;
1460	if (LIST_EMPTY(&g_hv_sgl_page_pool.in_use_sgl_list)) {
1461		printf("storvsc error: not enough in use sgl\n");
1462		return;
1463	}
1464	sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.in_use_sgl_list);
1465	LIST_REMOVE(sgl_node, link);
1466	sgl_node->sgl_data = sgl;
1467	LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list, sgl_node, link);
1468}
1469
1470/**
1471 * @brief create bounce buffer
1472 *
1473 * This function is responsible for create a Scatter/Gather list,
1474 * which hold several pages that can be aligned with page size.
1475 *
1476 * @param seg_count- SG-list segments count
1477 * @param write - if WRITE_TYPE, set SG list page used size to 0,
1478 * otherwise set used size to page size.
1479 *
1480 * return NULL if create failed
1481 */
1482static struct sglist *
1483storvsc_create_bounce_buffer(uint16_t seg_count, int write)
1484{
1485	int i = 0;
1486	struct sglist *bounce_sgl = NULL;
1487	unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
1488	struct hv_sgl_node *sgl_node = NULL;
1489
1490	/* get struct sglist from free_sgl_list */
1491	if (LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1492		printf("storvsc error: not enough free sgl\n");
1493		return NULL;
1494	}
1495	sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1496	LIST_REMOVE(sgl_node, link);
1497	bounce_sgl = sgl_node->sgl_data;
1498	LIST_INSERT_HEAD(&g_hv_sgl_page_pool.in_use_sgl_list, sgl_node, link);
1499
1500	bounce_sgl->sg_maxseg = seg_count;
1501
1502	if (write == WRITE_TYPE)
1503		bounce_sgl->sg_nseg = 0;
1504	else
1505		bounce_sgl->sg_nseg = seg_count;
1506
1507	for (i = 0; i < seg_count; i++)
1508	        bounce_sgl->sg_segs[i].ss_len = buf_len;
1509
1510	return bounce_sgl;
1511}
1512
1513/**
1514 * @brief copy data from SG list to bounce buffer
1515 *
1516 * This function is responsible for copy data from one SG list's segments
1517 * to another SG list which used as bounce buffer.
1518 *
1519 * @param bounce_sgl - the destination SG list
1520 * @param orig_sgl - the segment of the source SG list.
1521 * @param orig_sgl_count - the count of segments.
1522 * @param orig_sgl_count - indicate which segment need bounce buffer,
1523 *  set 1 means need.
1524 *
1525 */
1526static void
1527storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl,
1528			       bus_dma_segment_t *orig_sgl,
1529			       unsigned int orig_sgl_count,
1530			       uint64_t seg_bits)
1531{
1532	int src_sgl_idx = 0;
1533
1534	for (src_sgl_idx = 0; src_sgl_idx < orig_sgl_count; src_sgl_idx++) {
1535		if (seg_bits & (1 << src_sgl_idx)) {
1536			memcpy((void*)bounce_sgl->sg_segs[src_sgl_idx].ss_paddr,
1537			    (void*)orig_sgl[src_sgl_idx].ds_addr,
1538			    orig_sgl[src_sgl_idx].ds_len);
1539
1540			bounce_sgl->sg_segs[src_sgl_idx].ss_len =
1541			    orig_sgl[src_sgl_idx].ds_len;
1542		}
1543	}
1544}
1545
1546/**
1547 * @brief copy data from SG list which used as bounce to another SG list
1548 *
1549 * This function is responsible for copy data from one SG list with bounce
1550 * buffer to another SG list's segments.
1551 *
1552 * @param dest_sgl - the destination SG list's segments
1553 * @param dest_sgl_count - the count of destination SG list's segment.
1554 * @param src_sgl - the source SG list.
1555 * @param seg_bits - indicate which segment used bounce buffer of src SG-list.
1556 *
1557 */
1558void
1559storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl,
1560				    unsigned int dest_sgl_count,
1561				    struct sglist* src_sgl,
1562				    uint64_t seg_bits)
1563{
1564	int sgl_idx = 0;
1565
1566	for (sgl_idx = 0; sgl_idx < dest_sgl_count; sgl_idx++) {
1567		if (seg_bits & (1 << sgl_idx)) {
1568			memcpy((void*)(dest_sgl[sgl_idx].ds_addr),
1569			    (void*)(src_sgl->sg_segs[sgl_idx].ss_paddr),
1570			    src_sgl->sg_segs[sgl_idx].ss_len);
1571		}
1572	}
1573}
1574
1575/**
1576 * @brief check SG list with bounce buffer or not
1577 *
1578 * This function is responsible for check if need bounce buffer for SG list.
1579 *
1580 * @param sgl - the SG list's segments
1581 * @param sg_count - the count of SG list's segment.
1582 * @param bits - segmengs number that need bounce buffer
1583 *
1584 * return -1 if SG list needless bounce buffer
1585 */
1586static int
1587storvsc_check_bounce_buffer_sgl(bus_dma_segment_t *sgl,
1588				unsigned int sg_count,
1589				uint64_t *bits)
1590{
1591	int i = 0;
1592	int offset = 0;
1593	uint64_t phys_addr = 0;
1594	uint64_t tmp_bits = 0;
1595	boolean_t found_hole = FALSE;
1596	boolean_t pre_aligned = TRUE;
1597
1598	if (sg_count < 2){
1599		return -1;
1600	}
1601
1602	*bits = 0;
1603
1604	phys_addr = vtophys(sgl[0].ds_addr);
1605	offset =  phys_addr - trunc_page(phys_addr);
1606
1607	if (offset != 0) {
1608		pre_aligned = FALSE;
1609		tmp_bits |= 1;
1610	}
1611
1612	for (i = 1; i < sg_count; i++) {
1613		phys_addr = vtophys(sgl[i].ds_addr);
1614		offset =  phys_addr - trunc_page(phys_addr);
1615
1616		if (offset == 0) {
1617			if (FALSE == pre_aligned){
1618				/*
1619				 * This segment is aligned, if the previous
1620				 * one is not aligned, find a hole
1621				 */
1622				found_hole = TRUE;
1623			}
1624			pre_aligned = TRUE;
1625		} else {
1626			tmp_bits |= 1 << i;
1627			if (!pre_aligned) {
1628				if (phys_addr != vtophys(sgl[i-1].ds_addr +
1629				    sgl[i-1].ds_len)) {
1630					/*
1631					 * Check whether connect to previous
1632					 * segment,if not, find the hole
1633					 */
1634					found_hole = TRUE;
1635				}
1636			} else {
1637				found_hole = TRUE;
1638			}
1639			pre_aligned = FALSE;
1640		}
1641	}
1642
1643	if (!found_hole) {
1644		return (-1);
1645	} else {
1646		*bits = tmp_bits;
1647		return 0;
1648	}
1649}
1650
1651/**
1652 * @brief Fill in a request structure based on a CAM control block
1653 *
1654 * Fills in a request structure based on the contents of a CAM control
1655 * block.  The request structure holds the payload information for
1656 * VSCSI protocol request.
1657 *
1658 * @param ccb pointer to a CAM contorl block
1659 * @param reqp pointer to a request structure
1660 */
1661static int
1662create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp)
1663{
1664	struct ccb_scsiio *csio = &ccb->csio;
1665	uint64_t phys_addr;
1666	uint32_t bytes_to_copy = 0;
1667	uint32_t pfn_num = 0;
1668	uint32_t pfn;
1669	uint64_t not_aligned_seg_bits = 0;
1670	struct hvs_gpa_range *prplist;
1671
1672	/* refer to struct vmscsi_req for meanings of these two fields */
1673	reqp->vstor_packet.u.vm_srb.port =
1674		cam_sim_unit(xpt_path_sim(ccb->ccb_h.path));
1675	reqp->vstor_packet.u.vm_srb.path_id =
1676		cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1677
1678	reqp->vstor_packet.u.vm_srb.target_id = ccb->ccb_h.target_id;
1679	reqp->vstor_packet.u.vm_srb.lun = ccb->ccb_h.target_lun;
1680
1681	reqp->vstor_packet.u.vm_srb.cdb_len = csio->cdb_len;
1682	if(ccb->ccb_h.flags & CAM_CDB_POINTER) {
1683		memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_ptr,
1684			csio->cdb_len);
1685	} else {
1686		memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_bytes,
1687			csio->cdb_len);
1688	}
1689
1690	switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
1691	case CAM_DIR_OUT:
1692		reqp->vstor_packet.u.vm_srb.data_in = WRITE_TYPE;
1693		break;
1694	case CAM_DIR_IN:
1695		reqp->vstor_packet.u.vm_srb.data_in = READ_TYPE;
1696		break;
1697	case CAM_DIR_NONE:
1698		reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE;
1699		break;
1700	default:
1701		reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE;
1702		break;
1703	}
1704
1705	reqp->sense_data     = &csio->sense_data;
1706	reqp->sense_info_len = csio->sense_len;
1707
1708	reqp->ccb = ccb;
1709
1710	if (0 == csio->dxfer_len) {
1711		return (0);
1712	}
1713
1714	prplist = &reqp->prp_list;
1715	prplist->gpa_range.gpa_len = csio->dxfer_len;
1716
1717	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
1718	case CAM_DATA_VADDR:
1719	{
1720		bytes_to_copy = csio->dxfer_len;
1721		phys_addr = vtophys(csio->data_ptr);
1722		prplist->gpa_range.gpa_ofs = phys_addr & PAGE_MASK;
1723
1724		while (bytes_to_copy != 0) {
1725			int bytes, page_offset;
1726			phys_addr =
1727			    vtophys(&csio->data_ptr[prplist->gpa_range.gpa_len -
1728			    bytes_to_copy]);
1729			pfn = phys_addr >> PAGE_SHIFT;
1730			prplist->gpa_page[pfn_num] = pfn;
1731			page_offset = phys_addr & PAGE_MASK;
1732
1733			bytes = min(PAGE_SIZE - page_offset, bytes_to_copy);
1734
1735			bytes_to_copy -= bytes;
1736			pfn_num++;
1737		}
1738		reqp->prp_cnt = pfn_num;
1739		break;
1740	}
1741
1742	case CAM_DATA_SG:
1743	{
1744		int i = 0;
1745		int offset = 0;
1746		int ret;
1747
1748		bus_dma_segment_t *storvsc_sglist =
1749		    (bus_dma_segment_t *)ccb->csio.data_ptr;
1750		u_int16_t storvsc_sg_count = ccb->csio.sglist_cnt;
1751
1752		printf("Storvsc: get SG I/O operation, %d\n",
1753		    reqp->vstor_packet.u.vm_srb.data_in);
1754
1755		if (storvsc_sg_count > VMBUS_CHAN_PRPLIST_MAX){
1756			printf("Storvsc: %d segments is too much, "
1757			    "only support %d segments\n",
1758			    storvsc_sg_count, VMBUS_CHAN_PRPLIST_MAX);
1759			return (EINVAL);
1760		}
1761
1762		/*
1763		 * We create our own bounce buffer function currently. Idealy
1764		 * we should use BUS_DMA(9) framework. But with current BUS_DMA
1765		 * code there is no callback API to check the page alignment of
1766		 * middle segments before busdma can decide if a bounce buffer
1767		 * is needed for particular segment. There is callback,
1768		 * "bus_dma_filter_t *filter", but the parrameters are not
1769		 * sufficient for storvsc driver.
1770		 * TODO:
1771		 *	Add page alignment check in BUS_DMA(9) callback. Once
1772		 *	this is complete, switch the following code to use
1773		 *	BUS_DMA(9) for storvsc bounce buffer support.
1774		 */
1775		/* check if we need to create bounce buffer */
1776		ret = storvsc_check_bounce_buffer_sgl(storvsc_sglist,
1777		    storvsc_sg_count, &not_aligned_seg_bits);
1778		if (ret != -1) {
1779			reqp->bounce_sgl =
1780			    storvsc_create_bounce_buffer(storvsc_sg_count,
1781			    reqp->vstor_packet.u.vm_srb.data_in);
1782			if (NULL == reqp->bounce_sgl) {
1783				printf("Storvsc_error: "
1784				    "create bounce buffer failed.\n");
1785				return (ENOMEM);
1786			}
1787
1788			reqp->bounce_sgl_count = storvsc_sg_count;
1789			reqp->not_aligned_seg_bits = not_aligned_seg_bits;
1790
1791			/*
1792			 * if it is write, we need copy the original data
1793			 *to bounce buffer
1794			 */
1795			if (WRITE_TYPE == reqp->vstor_packet.u.vm_srb.data_in) {
1796				storvsc_copy_sgl_to_bounce_buf(
1797				    reqp->bounce_sgl,
1798				    storvsc_sglist,
1799				    storvsc_sg_count,
1800				    reqp->not_aligned_seg_bits);
1801			}
1802
1803			/* transfer virtual address to physical frame number */
1804			if (reqp->not_aligned_seg_bits & 0x1){
1805 				phys_addr =
1806				    vtophys(reqp->bounce_sgl->sg_segs[0].ss_paddr);
1807			}else{
1808 				phys_addr =
1809					vtophys(storvsc_sglist[0].ds_addr);
1810			}
1811			prplist->gpa_range.gpa_ofs = phys_addr & PAGE_MASK;
1812
1813			pfn = phys_addr >> PAGE_SHIFT;
1814			prplist->gpa_page[0] = pfn;
1815
1816			for (i = 1; i < storvsc_sg_count; i++) {
1817				if (reqp->not_aligned_seg_bits & (1 << i)) {
1818					phys_addr =
1819					    vtophys(reqp->bounce_sgl->sg_segs[i].ss_paddr);
1820				} else {
1821					phys_addr =
1822					    vtophys(storvsc_sglist[i].ds_addr);
1823				}
1824
1825				pfn = phys_addr >> PAGE_SHIFT;
1826				prplist->gpa_page[i] = pfn;
1827			}
1828			reqp->prp_cnt = i;
1829		} else {
1830			phys_addr = vtophys(storvsc_sglist[0].ds_addr);
1831
1832			prplist->gpa_range.gpa_ofs = phys_addr & PAGE_MASK;
1833
1834			for (i = 0; i < storvsc_sg_count; i++) {
1835				phys_addr = vtophys(storvsc_sglist[i].ds_addr);
1836				pfn = phys_addr >> PAGE_SHIFT;
1837				prplist->gpa_page[i] = pfn;
1838			}
1839			reqp->prp_cnt = i;
1840
1841			/* check the last segment cross boundary or not */
1842			offset = phys_addr & PAGE_MASK;
1843			if (offset) {
1844				/* Add one more PRP entry */
1845				phys_addr =
1846				    vtophys(storvsc_sglist[i-1].ds_addr +
1847				    PAGE_SIZE - offset);
1848				pfn = phys_addr >> PAGE_SHIFT;
1849				prplist->gpa_page[i] = pfn;
1850				reqp->prp_cnt++;
1851			}
1852
1853			reqp->bounce_sgl_count = 0;
1854		}
1855		break;
1856	}
1857	default:
1858		printf("Unknow flags: %d\n", ccb->ccb_h.flags);
1859		return(EINVAL);
1860	}
1861
1862	return(0);
1863}
1864
1865/**
1866 * @brief completion function before returning to CAM
1867 *
1868 * I/O process has been completed and the result needs
1869 * to be passed to the CAM layer.
1870 * Free resources related to this request.
1871 *
1872 * @param reqp pointer to a request structure
1873 */
1874static void
1875storvsc_io_done(struct hv_storvsc_request *reqp)
1876{
1877	union ccb *ccb = reqp->ccb;
1878	struct ccb_scsiio *csio = &ccb->csio;
1879	struct storvsc_softc *sc = reqp->softc;
1880	struct vmscsi_req *vm_srb = &reqp->vstor_packet.u.vm_srb;
1881	bus_dma_segment_t *ori_sglist = NULL;
1882	int ori_sg_count = 0;
1883	/* destroy bounce buffer if it is used */
1884	if (reqp->bounce_sgl_count) {
1885		ori_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr;
1886		ori_sg_count = ccb->csio.sglist_cnt;
1887
1888		/*
1889		 * If it is READ operation, we should copy back the data
1890		 * to original SG list.
1891		 */
1892		if (READ_TYPE == reqp->vstor_packet.u.vm_srb.data_in) {
1893			storvsc_copy_from_bounce_buf_to_sgl(ori_sglist,
1894			    ori_sg_count,
1895			    reqp->bounce_sgl,
1896			    reqp->not_aligned_seg_bits);
1897		}
1898
1899		storvsc_destroy_bounce_buffer(reqp->bounce_sgl);
1900		reqp->bounce_sgl_count = 0;
1901	}
1902
1903	if (reqp->retries > 0) {
1904		mtx_lock(&sc->hs_lock);
1905#if HVS_TIMEOUT_TEST
1906		xpt_print(ccb->ccb_h.path,
1907			"%u: IO returned after timeout, "
1908			"waking up timer handler if any.\n", ticks);
1909		mtx_lock(&reqp->event.mtx);
1910		cv_signal(&reqp->event.cv);
1911		mtx_unlock(&reqp->event.mtx);
1912#endif
1913		reqp->retries = 0;
1914		xpt_print(ccb->ccb_h.path,
1915			"%u: IO returned after timeout, "
1916			"stopping timer if any.\n", ticks);
1917		mtx_unlock(&sc->hs_lock);
1918	}
1919
1920#ifdef notyet
1921	/*
1922	 * callout_drain() will wait for the timer handler to finish
1923	 * if it is running. So we don't need any lock to synchronize
1924	 * between this routine and the timer handler.
1925	 * Note that we need to make sure reqp is not freed when timer
1926	 * handler is using or will use it.
1927	 */
1928	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1929		callout_drain(&reqp->callout);
1930	}
1931#endif
1932
1933	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1934	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1935	if (vm_srb->scsi_status == SCSI_STATUS_OK) {
1936		const struct scsi_generic *cmd;
1937
1938		if (vm_srb->srb_status != SRB_STATUS_SUCCESS) {
1939			if (vm_srb->srb_status == SRB_STATUS_INVALID_LUN) {
1940				xpt_print(ccb->ccb_h.path, "invalid LUN %d\n",
1941				    vm_srb->lun);
1942			} else {
1943				xpt_print(ccb->ccb_h.path, "Unknown SRB flag: %d\n",
1944				    vm_srb->srb_status);
1945			}
1946			/*
1947			 * If there are errors, for example, invalid LUN,
1948			 * host will inform VM through SRB status.
1949			 */
1950			ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
1951		} else {
1952			ccb->ccb_h.status |= CAM_REQ_CMP;
1953		}
1954
1955		cmd = (const struct scsi_generic *)
1956		    ((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
1957		     csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes);
1958		if (cmd->opcode == INQUIRY) {
1959			struct scsi_inquiry_data *inq_data =
1960			    (struct scsi_inquiry_data *)csio->data_ptr;
1961			uint8_t *resp_buf = (uint8_t *)csio->data_ptr;
1962			int resp_xfer_len, resp_buf_len, data_len;
1963
1964			/* Get the buffer length reported by host */
1965			resp_xfer_len = vm_srb->transfer_len;
1966			/* Get the available buffer length */
1967			resp_buf_len = resp_xfer_len >= 5 ? resp_buf[4] + 5 : 0;
1968			data_len = (resp_buf_len < resp_xfer_len) ?
1969			    resp_buf_len : resp_xfer_len;
1970
1971			if (bootverbose && data_len >= 5) {
1972				xpt_print(ccb->ccb_h.path, "storvsc inquiry "
1973				    "(%d) [%x %x %x %x %x ... ]\n", data_len,
1974				    resp_buf[0], resp_buf[1], resp_buf[2],
1975				    resp_buf[3], resp_buf[4]);
1976			}
1977			if (vm_srb->srb_status == SRB_STATUS_SUCCESS &&
1978			    data_len > SHORT_INQUIRY_LENGTH) {
1979				char vendor[16];
1980
1981				cam_strvis(vendor, inq_data->vendor,
1982				    sizeof(inq_data->vendor), sizeof(vendor));
1983
1984				/*
1985				 * XXX: Upgrade SPC2 to SPC3 if host is WIN8 or
1986				 * WIN2012 R2 in order to support UNMAP feature.
1987				 */
1988				if (!strncmp(vendor, "Msft", 4) &&
1989				    SID_ANSI_REV(inq_data) == SCSI_REV_SPC2 &&
1990				    (vmstor_proto_version ==
1991				     VMSTOR_PROTOCOL_VERSION_WIN8_1 ||
1992				     vmstor_proto_version ==
1993				     VMSTOR_PROTOCOL_VERSION_WIN8)) {
1994					inq_data->version = SCSI_REV_SPC3;
1995					if (bootverbose) {
1996						xpt_print(ccb->ccb_h.path,
1997						    "storvsc upgrades "
1998						    "SPC2 to SPC3\n");
1999					}
2000				}
2001			}
2002		}
2003	} else {
2004		mtx_lock(&sc->hs_lock);
2005		xpt_print(ccb->ccb_h.path,
2006			"storvsc scsi_status = %d\n",
2007			vm_srb->scsi_status);
2008		mtx_unlock(&sc->hs_lock);
2009		ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2010	}
2011
2012	ccb->csio.scsi_status = (vm_srb->scsi_status & 0xFF);
2013	ccb->csio.resid = ccb->csio.dxfer_len - vm_srb->transfer_len;
2014
2015	if (reqp->sense_info_len != 0) {
2016		csio->sense_resid = csio->sense_len - reqp->sense_info_len;
2017		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2018	}
2019
2020	mtx_lock(&sc->hs_lock);
2021	if (reqp->softc->hs_frozen == 1) {
2022		xpt_print(ccb->ccb_h.path,
2023			"%u: storvsc unfreezing softc 0x%p.\n",
2024			ticks, reqp->softc);
2025		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2026		reqp->softc->hs_frozen = 0;
2027	}
2028	storvsc_free_request(sc, reqp);
2029	mtx_unlock(&sc->hs_lock);
2030
2031	xpt_done_direct(ccb);
2032}
2033
2034/**
2035 * @brief Free a request structure
2036 *
2037 * Free a request structure by returning it to the free list
2038 *
2039 * @param sc pointer to a softc
2040 * @param reqp pointer to a request structure
2041 */
2042static void
2043storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp)
2044{
2045
2046	LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
2047}
2048
2049/**
2050 * @brief Determine type of storage device from GUID
2051 *
2052 * Using the type GUID, determine if this is a StorVSC (paravirtual
2053 * SCSI or BlkVSC (paravirtual IDE) device.
2054 *
2055 * @param dev a device
2056 * returns an enum
2057 */
2058static enum hv_storage_type
2059storvsc_get_storage_type(device_t dev)
2060{
2061	device_t parent = device_get_parent(dev);
2062
2063	if (VMBUS_PROBE_GUID(parent, dev, &gBlkVscDeviceType) == 0)
2064		return DRIVER_BLKVSC;
2065	if (VMBUS_PROBE_GUID(parent, dev, &gStorVscDeviceType) == 0)
2066		return DRIVER_STORVSC;
2067	return DRIVER_UNKNOWN;
2068}
2069