hyperv.h revision 251775
1/*-
2 * Copyright (c) 2009-2012 Microsoft Corp.
3 * Copyright (c) 2012 NetApp Inc.
4 * Copyright (c) 2012 Citrix Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice unmodified, this list of conditions, and the following
12 *    disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/**
30 * HyperV definitions for messages that are sent between instances of the
31 * Channel Management Library in separate partitions, or in some cases,
32 * back to itself.
33 */
34
35#ifndef __HYPERV_H__
36#define __HYPERV_H__
37
38#include <sys/param.h>
39#include <sys/mbuf.h>
40#include <sys/queue.h>
41#include <sys/malloc.h>
42#include <sys/kthread.h>
43#include <sys/taskqueue.h>
44#include <sys/systm.h>
45#include <sys/lock.h>
46#include <sys/sema.h>
47#include <sys/mutex.h>
48#include <sys/bus.h>
49#include <vm/vm.h>
50#include <vm/vm_param.h>
51#include <vm/pmap.h>
52
53#include <amd64/include/xen/synch_bitops.h>
54#include <amd64/include/atomic.h>
55
56typedef uint8_t	hv_bool_uint8_t;
57
58#define HV_S_OK			0x00000000
59#define HV_E_FAIL		0x80004005
60#define HV_ERROR_NOT_SUPPORTED	0x80070032
61#define HV_ERROR_MACHINE_LOCKED	0x800704F7
62
63/*
64 * A revision number of vmbus that is used for ensuring both ends on a
65 * partition are using compatible versions.
66 */
67
68#define HV_VMBUS_REVISION_NUMBER	13
69
70/*
71 * Make maximum size of pipe payload of 16K
72 */
73
74#define HV_MAX_PIPE_DATA_PAYLOAD	(sizeof(BYTE) * 16384)
75
76/*
77 * Define pipe_mode values
78 */
79
80#define HV_VMBUS_PIPE_TYPE_BYTE		0x00000000
81#define HV_VMBUS_PIPE_TYPE_MESSAGE	0x00000004
82
83/*
84 * The size of the user defined data buffer for non-pipe offers
85 */
86
87#define HV_MAX_USER_DEFINED_BYTES	120
88
89/*
90 *  The size of the user defined data buffer for pipe offers
91 */
92
93#define HV_MAX_PIPE_USER_DEFINED_BYTES	116
94
95
96#define HV_MAX_PAGE_BUFFER_COUNT	16
97#define HV_MAX_MULTIPAGE_BUFFER_COUNT	32
98
99#define HV_ALIGN_UP(value, align)					\
100		(((value) & (align-1)) ?				\
101		    (((value) + (align-1)) & ~(align-1) ) : (value))
102
103#define HV_ALIGN_DOWN(value, align) ( (value) & ~(align-1) )
104
105#define HV_NUM_PAGES_SPANNED(addr, len)					\
106		((HV_ALIGN_UP(addr+len, PAGE_SIZE) -			\
107		    HV_ALIGN_DOWN(addr, PAGE_SIZE)) >> PAGE_SHIFT )
108
109typedef struct hv_guid {
110	 unsigned char data[16];
111} __packed hv_guid;
112
113/*
114 * At the center of the Channel Management library is
115 * the Channel Offer. This struct contains the
116 * fundamental information about an offer.
117 */
118
119typedef struct hv_vmbus_channel_offer {
120	hv_guid		interface_type;
121	hv_guid		interface_instance;
122	uint64_t	interrupt_latency_in_100ns_units;
123	uint32_t	interface_revision;
124	uint32_t	server_context_area_size; /* in bytes */
125	uint16_t	channel_flags;
126	uint16_t	mmio_megabytes;		  /* in bytes * 1024 * 1024 */
127	union
128	{
129        /*
130         * Non-pipes: The user has HV_MAX_USER_DEFINED_BYTES bytes.
131         */
132		struct {
133			uint8_t	user_defined[HV_MAX_USER_DEFINED_BYTES];
134		} __packed standard;
135
136        /*
137         * Pipes: The following structure is an integrated pipe protocol, which
138         *        is implemented on top of standard user-defined data. pipe
139         *        clients  have HV_MAX_PIPE_USER_DEFINED_BYTES left for their
140         *        own use.
141         */
142		struct {
143			uint32_t	pipe_mode;
144			uint8_t	user_defined[HV_MAX_PIPE_USER_DEFINED_BYTES];
145		} __packed pipe;
146	} u;
147
148	uint32_t	padding;
149
150} __packed hv_vmbus_channel_offer;
151
152typedef uint32_t hv_gpadl_handle;
153
154typedef struct {
155	uint16_t type;
156	uint16_t data_offset8;
157	uint16_t length8;
158	uint16_t flags;
159	uint64_t transaction_id;
160} __packed hv_vm_packet_descriptor;
161
162typedef uint32_t hv_previous_packet_offset;
163
164typedef struct {
165	hv_previous_packet_offset	previous_packet_start_offset;
166	hv_vm_packet_descriptor		descriptor;
167} __packed hv_vm_packet_header;
168
169typedef struct {
170	uint32_t byte_count;
171	uint32_t byte_offset;
172} __packed hv_vm_transfer_page;
173
174typedef struct {
175	hv_vm_packet_descriptor	d;
176	uint16_t		transfer_page_set_id;
177	hv_bool_uint8_t		sender_owns_set;
178	uint8_t			reserved;
179	uint32_t		range_count;
180	hv_vm_transfer_page	ranges[1];
181} __packed hv_vm_transfer_page_packet_header;
182
183typedef struct {
184	hv_vm_packet_descriptor	d;
185	uint32_t		gpadl;
186	uint32_t		reserved;
187} __packed hv_vm_gpadl_packet_header;
188
189typedef struct {
190	hv_vm_packet_descriptor	d;
191	uint32_t		gpadl;
192	uint16_t		transfer_page_set_id;
193	uint16_t		reserved;
194} __packed hv_vm_add_remove_transfer_page_set;
195
196/*
197 * This structure defines a range in guest
198 * physical space that can be made
199 * to look virtually contiguous.
200 */
201
202typedef struct {
203	uint32_t byte_count;
204	uint32_t byte_offset;
205	uint64_t pfn_array[0];
206} __packed hv_gpa_range;
207
208/*
209 * This is the format for an Establish Gpadl packet, which contains a handle
210 * by which this GPADL will be known and a set of GPA ranges associated with
211 * it.  This can be converted to a MDL by the guest OS.  If there are multiple
212 * GPA ranges, then the resulting MDL will be "chained," representing multiple
213 * VA ranges.
214 */
215
216typedef struct {
217	hv_vm_packet_descriptor	d;
218	uint32_t		gpadl;
219	uint32_t		range_count;
220	hv_gpa_range		range[1];
221} __packed hv_vm_establish_gpadl;
222
223/*
224 * This is the format for a Teardown Gpadl packet, which indicates that the
225 * GPADL handle in the Establish Gpadl packet will never be referenced again.
226 */
227
228typedef struct {
229	hv_vm_packet_descriptor	d;
230	uint32_t		gpadl;
231				/* for alignment to a 8-byte boundary */
232	uint32_t		reserved;
233} __packed hv_vm_teardown_gpadl;
234
235/*
236 * This is the format for a GPA-Direct packet, which contains a set of GPA
237 * ranges, in addition to commands and/or data.
238 */
239
240typedef struct {
241	hv_vm_packet_descriptor	d;
242	uint32_t		reserved;
243	uint32_t		range_count;
244	hv_gpa_range		range[1];
245} __packed hv_vm_data_gpa_direct;
246
247/*
248 * This is the format for a Additional data Packet.
249 */
250typedef struct {
251	hv_vm_packet_descriptor	d;
252	uint64_t		total_bytes;
253	uint32_t		byte_offset;
254	uint32_t		byte_count;
255	uint8_t			data[1];
256} __packed hv_vm_additional_data;
257
258typedef union {
259	hv_vm_packet_descriptor             simple_header;
260	hv_vm_transfer_page_packet_header   transfer_page_header;
261	hv_vm_gpadl_packet_header           gpadl_header;
262	hv_vm_add_remove_transfer_page_set  add_remove_transfer_page_header;
263	hv_vm_establish_gpadl               establish_gpadl_header;
264	hv_vm_teardown_gpadl                teardown_gpadl_header;
265	hv_vm_data_gpa_direct               data_gpa_direct_header;
266} __packed hv_vm_packet_largest_possible_header;
267
268typedef enum {
269	HV_VMBUS_PACKET_TYPE_INVALID				= 0x0,
270	HV_VMBUS_PACKET_TYPES_SYNCH				= 0x1,
271	HV_VMBUS_PACKET_TYPE_ADD_TRANSFER_PAGE_SET		= 0x2,
272	HV_VMBUS_PACKET_TYPE_REMOVE_TRANSFER_PAGE_SET		= 0x3,
273	HV_VMBUS_PACKET_TYPE_ESTABLISH_GPADL			= 0x4,
274	HV_VMBUS_PACKET_TYPE_TEAR_DOWN_GPADL			= 0x5,
275	HV_VMBUS_PACKET_TYPE_DATA_IN_BAND			= 0x6,
276	HV_VMBUS_PACKET_TYPE_DATA_USING_TRANSFER_PAGES		= 0x7,
277	HV_VMBUS_PACKET_TYPE_DATA_USING_GPADL			= 0x8,
278	HV_VMBUS_PACKET_TYPE_DATA_USING_GPA_DIRECT		= 0x9,
279	HV_VMBUS_PACKET_TYPE_CANCEL_REQUEST			= 0xa,
280	HV_VMBUS_PACKET_TYPE_COMPLETION				= 0xb,
281	HV_VMBUS_PACKET_TYPE_DATA_USING_ADDITIONAL_PACKETS	= 0xc,
282	HV_VMBUS_PACKET_TYPE_ADDITIONAL_DATA = 0xd
283} hv_vmbus_packet_type;
284
285#define HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED    1
286
287/*
288 * Version 1 messages
289 */
290typedef enum {
291	HV_CHANNEL_MESSAGE_INVALID			= 0,
292	HV_CHANNEL_MESSAGE_OFFER_CHANNEL		= 1,
293	HV_CHANNEL_MESSAGE_RESCIND_CHANNEL_OFFER	= 2,
294	HV_CHANNEL_MESSAGE_REQUEST_OFFERS		= 3,
295	HV_CHANNEL_MESSAGE_ALL_OFFERS_DELIVERED		= 4,
296	HV_CHANNEL_MESSAGE_OPEN_CHANNEL			= 5,
297	HV_CHANNEL_MESSAGE_OPEN_CHANNEL_RESULT		= 6,
298	HV_CHANNEL_MESSAGE_CLOSE_CHANNEL		= 7,
299	HV_CHANNEL_MESSAGEL_GPADL_HEADER		= 8,
300	HV_CHANNEL_MESSAGE_GPADL_BODY			= 9,
301	HV_CHANNEL_MESSAGE_GPADL_CREATED		= 10,
302	HV_CHANNEL_MESSAGE_GPADL_TEARDOWN		= 11,
303	HV_CHANNEL_MESSAGE_GPADL_TORNDOWN		= 12,
304	HV_CHANNEL_MESSAGE_REL_ID_RELEASED		= 13,
305	HV_CHANNEL_MESSAGE_INITIATED_CONTACT		= 14,
306	HV_CHANNEL_MESSAGE_VERSION_RESPONSE		= 15,
307	HV_CHANNEL_MESSAGE_UNLOAD			= 16,
308
309#ifdef	HV_VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
310	HV_CHANNEL_MESSAGE_VIEW_RANGE_ADD		= 17,
311	HV_CHANNEL_MESSAGE_VIEW_RANGE_REMOVE		= 18,
312#endif
313	HV_CHANNEL_MESSAGE_COUNT
314} hv_vmbus_channel_msg_type;
315
316typedef struct {
317	hv_vmbus_channel_msg_type	message_type;
318	uint32_t			padding;
319} __packed hv_vmbus_channel_msg_header;
320
321/*
322 * Query VMBus Version parameters
323 */
324typedef struct {
325	hv_vmbus_channel_msg_header	header;
326	uint32_t			version;
327} __packed hv_vmbus_channel_query_vmbus_version;
328
329/*
330 * VMBus Version Supported parameters
331 */
332typedef struct {
333	hv_vmbus_channel_msg_header	header;
334	hv_bool_uint8_t			version_supported;
335} __packed hv_vmbus_channel_version_supported;
336
337/*
338 * Channel Offer parameters
339 */
340typedef struct {
341	hv_vmbus_channel_msg_header	header;
342	hv_vmbus_channel_offer		offer;
343	uint32_t			child_rel_id;
344	uint8_t				monitor_id;
345	hv_bool_uint8_t			monitor_allocated;
346} __packed hv_vmbus_channel_offer_channel;
347
348/*
349 * Rescind Offer parameters
350 */
351typedef struct
352{
353    hv_vmbus_channel_msg_header	header;
354    uint32_t			child_rel_id;
355} __packed hv_vmbus_channel_rescind_offer;
356
357
358/*
359 * Request Offer -- no parameters, SynIC message contains the partition ID
360 *
361 * Set Snoop -- no parameters, SynIC message contains the partition ID
362 *
363 * Clear Snoop -- no parameters, SynIC message contains the partition ID
364 *
365 * All Offers Delivered -- no parameters, SynIC message contains the
366 * partition ID
367 *
368 * Flush Client -- no parameters, SynIC message contains the partition ID
369 */
370
371
372/*
373 * Open Channel parameters
374 */
375typedef struct
376{
377    hv_vmbus_channel_msg_header header;
378
379    /*
380     * Identifies the specific VMBus channel that is being opened.
381     */
382    uint32_t		child_rel_id;
383
384    /*
385     * ID making a particular open request at a channel offer unique.
386     */
387    uint32_t		open_id;
388
389    /*
390     * GPADL for the channel's ring buffer.
391     */
392    hv_gpadl_handle	ring_buffer_gpadl_handle;
393
394    /*
395     * GPADL for the channel's server context save area.
396     */
397    hv_gpadl_handle	server_context_area_gpadl_handle;
398
399    /*
400     * The upstream ring buffer begins at offset zero in the memory described
401     * by ring_buffer_gpadl_handle. The downstream ring buffer follows it at
402     * this offset (in pages).
403     */
404    uint32_t		downstream_ring_buffer_page_offset;
405
406    /*
407     * User-specific data to be passed along to the server endpoint.
408     */
409    uint8_t		user_data[HV_MAX_USER_DEFINED_BYTES];
410
411} __packed hv_vmbus_channel_open_channel;
412
413typedef uint32_t hv_nt_status;
414
415/*
416 * Open Channel Result parameters
417 */
418typedef struct
419{
420	hv_vmbus_channel_msg_header	header;
421	uint32_t			child_rel_id;
422	uint32_t			open_id;
423	hv_nt_status			status;
424} __packed hv_vmbus_channel_open_result;
425
426/*
427 * Close channel parameters
428 */
429typedef struct
430{
431	hv_vmbus_channel_msg_header	header;
432	uint32_t			child_rel_id;
433} __packed hv_vmbus_channel_close_channel;
434
435/*
436 * Channel Message GPADL
437 */
438#define HV_GPADL_TYPE_RING_BUFFER	1
439#define HV_GPADL_TYPE_SERVER_SAVE_AREA	2
440#define HV_GPADL_TYPE_TRANSACTION	8
441
442/*
443 * The number of PFNs in a GPADL message is defined by the number of pages
444 * that would be spanned by byte_count and byte_offset.  If the implied number
445 * of PFNs won't fit in this packet, there will be a follow-up packet that
446 * contains more
447 */
448
449typedef struct {
450	hv_vmbus_channel_msg_header	header;
451	uint32_t			child_rel_id;
452	uint32_t			gpadl;
453	uint16_t			range_buf_len;
454	uint16_t			range_count;
455	hv_gpa_range			range[0];
456} __packed hv_vmbus_channel_gpadl_header;
457
458/*
459 * This is the follow-up packet that contains more PFNs
460 */
461typedef struct {
462	hv_vmbus_channel_msg_header	header;
463	uint32_t			message_number;
464	uint32_t 			gpadl;
465	uint64_t 			pfn[0];
466} __packed hv_vmbus_channel_gpadl_body;
467
468typedef struct {
469	hv_vmbus_channel_msg_header	header;
470	uint32_t			child_rel_id;
471	uint32_t			gpadl;
472	uint32_t			creation_status;
473} __packed hv_vmbus_channel_gpadl_created;
474
475typedef struct {
476	hv_vmbus_channel_msg_header	header;
477	uint32_t			child_rel_id;
478	uint32_t			gpadl;
479} __packed hv_vmbus_channel_gpadl_teardown;
480
481typedef struct {
482	hv_vmbus_channel_msg_header	header;
483	uint32_t			gpadl;
484} __packed hv_vmbus_channel_gpadl_torndown;
485
486typedef struct {
487	hv_vmbus_channel_msg_header	header;
488	uint32_t			child_rel_id;
489} __packed hv_vmbus_channel_relid_released;
490
491typedef struct {
492	hv_vmbus_channel_msg_header	header;
493	uint32_t			vmbus_version_requested;
494	uint32_t			padding2;
495	uint64_t			interrupt_page;
496	uint64_t			monitor_page_1;
497	uint64_t			monitor_page_2;
498} __packed hv_vmbus_channel_initiate_contact;
499
500typedef struct {
501	hv_vmbus_channel_msg_header header;
502	hv_bool_uint8_t		version_supported;
503} __packed hv_vmbus_channel_version_response;
504
505typedef hv_vmbus_channel_msg_header hv_vmbus_channel_unload;
506
507#define HW_MACADDR_LEN	6
508
509/*
510 * Fixme:  Added to quiet "typeof" errors involving hv_vmbus.h when
511 * the including C file was compiled with "-std=c99".
512 */
513#ifndef typeof
514#define typeof __typeof
515#endif
516
517#ifndef NULL
518#define NULL  (void *)0
519#endif
520
521typedef void *hv_vmbus_handle;
522
523#ifndef CONTAINING_RECORD
524#define CONTAINING_RECORD(address, type, field) ((type *)(	\
525		(uint8_t *)(address) -				\
526		(uint8_t *)(&((type *)0)->field)))
527#endif /* CONTAINING_RECORD */
528
529
530#define container_of(ptr, type, member) ({				\
531		__typeof__( ((type *)0)->member ) *__mptr = (ptr);	\
532		(type *)( (char *)__mptr - offsetof(type,member) );})
533
534enum {
535	HV_VMBUS_IVAR_TYPE,
536	HV_VMBUS_IVAR_INSTANCE,
537	HV_VMBUS_IVAR_NODE,
538	HV_VMBUS_IVAR_DEVCTX
539};
540
541#define HV_VMBUS_ACCESSOR(var, ivar, type) \
542		__BUS_ACCESSOR(vmbus, var, HV_VMBUS, ivar, type)
543
544HV_VMBUS_ACCESSOR(type, TYPE,  const char *)
545HV_VMBUS_ACCESSOR(devctx, DEVCTX,  struct hv_device *)
546
547
548/*
549 * Common defines for Hyper-V ICs
550 */
551#define HV_ICMSGTYPE_NEGOTIATE		0
552#define HV_ICMSGTYPE_HEARTBEAT		1
553#define HV_ICMSGTYPE_KVPEXCHANGE	2
554#define HV_ICMSGTYPE_SHUTDOWN		3
555#define HV_ICMSGTYPE_TIMESYNC		4
556#define HV_ICMSGTYPE_VSS		5
557
558#define HV_ICMSGHDRFLAG_TRANSACTION	1
559#define HV_ICMSGHDRFLAG_REQUEST		2
560#define HV_ICMSGHDRFLAG_RESPONSE	4
561
562typedef struct hv_vmbus_pipe_hdr {
563	uint32_t flags;
564	uint32_t msgsize;
565} __packed hv_vmbus_pipe_hdr;
566
567typedef struct hv_vmbus_ic_version {
568	uint16_t major;
569	uint16_t minor;
570} __packed hv_vmbus_ic_version;
571
572typedef struct hv_vmbus_icmsg_hdr {
573	hv_vmbus_ic_version	icverframe;
574	uint16_t		icmsgtype;
575	hv_vmbus_ic_version	icvermsg;
576	uint16_t		icmsgsize;
577	uint32_t		status;
578	uint8_t			ictransaction_id;
579	uint8_t			icflags;
580	uint8_t			reserved[2];
581} __packed hv_vmbus_icmsg_hdr;
582
583typedef struct hv_vmbus_icmsg_negotiate {
584	uint16_t		icframe_vercnt;
585	uint16_t		icmsg_vercnt;
586	uint32_t		reserved;
587	hv_vmbus_ic_version	icversion_data[1]; /* any size array */
588} __packed hv_vmbus_icmsg_negotiate;
589
590typedef struct hv_vmbus_shutdown_msg_data {
591	uint32_t		reason_code;
592	uint32_t		timeout_seconds;
593	uint32_t 		flags;
594	uint8_t			display_message[2048];
595} __packed hv_vmbus_shutdown_msg_data;
596
597typedef struct hv_vmbus_heartbeat_msg_data {
598	uint64_t 		seq_num;
599	uint32_t 		reserved[8];
600} __packed hv_vmbus_heartbeat_msg_data;
601
602typedef struct {
603	/*
604	 * offset in bytes from the start of ring data below
605	 */
606	volatile uint32_t       write_index;
607	/*
608	 * offset in bytes from the start of ring data below
609	 */
610	volatile uint32_t       read_index;
611	/*
612	 * NOTE: The interrupt_mask field is used only for channels, but
613	 * vmbus connection also uses this data structure
614	 */
615	volatile uint32_t       interrupt_mask;
616	/* pad it to PAGE_SIZE so that data starts on a page */
617	uint8_t                 reserved[4084];
618
619	/*
620	 * WARNING: Ring data starts here + ring_data_start_offset
621	 *  !!! DO NOT place any fields below this !!!
622	 */
623	uint8_t			buffer[0];	/* doubles as interrupt mask */
624} __packed hv_vmbus_ring_buffer;
625
626typedef struct {
627	int		length;
628	int		offset;
629	uint64_t	pfn;
630} __packed hv_vmbus_page_buffer;
631
632typedef struct {
633	int		length;
634	int		offset;
635	uint64_t	pfn_array[HV_MAX_MULTIPAGE_BUFFER_COUNT];
636} __packed hv_vmbus_multipage_buffer;
637
638typedef struct {
639	hv_vmbus_ring_buffer*	ring_buffer;
640	uint32_t		ring_size;	/* Include the shared header */
641	struct mtx		ring_lock;
642	uint32_t		ring_data_size;	/* ring_size */
643	uint32_t		ring_data_start_offset;
644} hv_vmbus_ring_buffer_info;
645
646typedef void (*hv_vmbus_pfn_channel_callback)(void *context);
647
648typedef enum {
649	HV_CHANNEL_OFFER_STATE,
650	HV_CHANNEL_OPENING_STATE,
651	HV_CHANNEL_OPEN_STATE,
652	HV_CHANNEL_CLOSING_NONDESTRUCTIVE_STATE,
653} hv_vmbus_channel_state;
654
655typedef struct hv_vmbus_channel {
656	TAILQ_ENTRY(hv_vmbus_channel)	list_entry;
657	struct hv_device*		device;
658	hv_vmbus_channel_state		state;
659	hv_vmbus_channel_offer_channel	offer_msg;
660	/*
661	 * These are based on the offer_msg.monitor_id.
662	 * Save it here for easy access.
663	 */
664	uint8_t				monitor_group;
665	uint8_t				monitor_bit;
666
667	uint32_t			ring_buffer_gpadl_handle;
668	/*
669	 * Allocated memory for ring buffer
670	 */
671	void*				ring_buffer_pages;
672	uint32_t			ring_buffer_page_count;
673	/*
674	 * send to parent
675	 */
676	hv_vmbus_ring_buffer_info	outbound;
677	/*
678	 * receive from parent
679	 */
680	hv_vmbus_ring_buffer_info	inbound;
681
682	struct mtx			inbound_lock;
683	hv_vmbus_handle			control_work_queue;
684
685	hv_vmbus_pfn_channel_callback	on_channel_callback;
686	void*				channel_callback_context;
687
688} hv_vmbus_channel;
689
690typedef struct hv_device {
691	hv_guid		    class_id;
692	hv_guid		    device_id;
693	device_t	    device;
694	hv_vmbus_channel*   channel;
695} hv_device;
696
697
698
699int		hv_vmbus_channel_recv_packet(
700				hv_vmbus_channel*	channel,
701				void*			buffer,
702				uint32_t		buffer_len,
703				uint32_t*		buffer_actual_len,
704				uint64_t*		request_id);
705
706int		hv_vmbus_channel_recv_packet_raw(
707				hv_vmbus_channel*	channel,
708				void*			buffer,
709				uint32_t		buffer_len,
710				uint32_t*		buffer_actual_len,
711				uint64_t*		request_id);
712
713int		hv_vmbus_channel_open(
714				hv_vmbus_channel*	channel,
715				uint32_t		send_ring_buffer_size,
716				uint32_t		recv_ring_buffer_size,
717				void*			user_data,
718				uint32_t		user_data_len,
719				hv_vmbus_pfn_channel_callback
720							pfn_on_channel_callback,
721				void*			context);
722
723void		hv_vmbus_channel_close(hv_vmbus_channel *channel);
724
725int		hv_vmbus_channel_send_packet(
726				hv_vmbus_channel*	channel,
727				void*			buffer,
728				uint32_t		buffer_len,
729				uint64_t		request_id,
730				hv_vmbus_packet_type	type,
731				uint32_t		flags);
732
733int		hv_vmbus_channel_send_packet_pagebuffer(
734				hv_vmbus_channel*	channel,
735				hv_vmbus_page_buffer	page_buffers[],
736				uint32_t		page_count,
737				void*			buffer,
738				uint32_t		buffer_len,
739				uint64_t		request_id);
740
741int		hv_vmbus_channel_send_packet_multipagebuffer(
742				hv_vmbus_channel*	    channel,
743				hv_vmbus_multipage_buffer*  multi_page_buffer,
744				void*			    buffer,
745				uint32_t		    buffer_len,
746				uint64_t		    request_id);
747
748int		hv_vmbus_channel_establish_gpadl(
749				hv_vmbus_channel*	channel,
750				/* must be phys and virt contiguous */
751				void*			contig_buffer,
752				/*  page-size multiple	*/
753				uint32_t		size,
754				uint32_t*		gpadl_handle);
755
756int		hv_vmbus_channel_teardown_gpdal(
757				hv_vmbus_channel*	channel,
758				uint32_t		gpadl_handle);
759
760/*
761 * Work abstraction defines
762 */
763typedef struct hv_work_queue {
764	struct taskqueue*	queue;
765	struct proc*		proc;
766	struct sema*		work_sema;
767} hv_work_queue;
768
769typedef struct hv_work_item {
770	struct task	work;
771	void		(*callback)(void *);
772	void*		context;
773	hv_work_queue*	wq;
774} hv_work_item;
775
776struct hv_work_queue*	hv_work_queue_create(char* name);
777
778void			hv_work_queue_close(struct hv_work_queue* wq);
779
780int			hv_queue_work_item(
781				hv_work_queue*	wq,
782				void		(*callback)(void *),
783				void*		context);
784/**
785 * @brief Get physical address from virtual
786 */
787static inline unsigned long
788hv_get_phys_addr(void *virt)
789{
790	unsigned long ret;
791	ret = (vtophys(virt) | ((vm_offset_t) virt & PAGE_MASK));
792	return (ret);
793}
794
795#endif  /* __HYPERV_H__ */
796
797