1/*-
2 * Copyright (c) 2009-2012 Microsoft Corp.
3 * Copyright (c) 2012 NetApp Inc.
4 * Copyright (c) 2012 Citrix Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice unmodified, this list of conditions, and the following
12 *    disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: releng/10.2/sys/dev/hyperv/vmbus/hv_channel.c 283280 2015-05-22 09:03:55Z whu $");
31
32#include <sys/param.h>
33#include <sys/malloc.h>
34#include <sys/systm.h>
35#include <sys/mbuf.h>
36#include <sys/lock.h>
37#include <sys/mutex.h>
38#include <machine/bus.h>
39#include <vm/vm.h>
40#include <vm/vm_param.h>
41#include <vm/pmap.h>
42
43#include "hv_vmbus_priv.h"
44
45static int 	vmbus_channel_create_gpadl_header(
46			/* must be phys and virt contiguous*/
47			void*				contig_buffer,
48			/* page-size multiple */
49			uint32_t 			size,
50			hv_vmbus_channel_msg_info**	msg_info,
51			uint32_t*			message_count);
52
53static void 	vmbus_channel_set_event(hv_vmbus_channel* channel);
54
55/**
56 *  @brief Trigger an event notification on the specified channel
57 */
58static void
59vmbus_channel_set_event(hv_vmbus_channel *channel)
60{
61	hv_vmbus_monitor_page *monitor_page;
62
63	if (channel->offer_msg.monitor_allocated) {
64		/* Each uint32_t represents 32 channels */
65		synch_set_bit((channel->offer_msg.child_rel_id & 31),
66			((uint32_t *)hv_vmbus_g_connection.send_interrupt_page
67				+ ((channel->offer_msg.child_rel_id >> 5))));
68
69		monitor_page = (hv_vmbus_monitor_page *)
70			hv_vmbus_g_connection.monitor_pages;
71
72		monitor_page++; /* Get the child to parent monitor page */
73
74		synch_set_bit(channel->monitor_bit,
75			(uint32_t *)&monitor_page->
76				trigger_group[channel->monitor_group].u.pending);
77	} else {
78		hv_vmbus_set_event(channel);
79	}
80
81}
82
83/**
84 * @brief Open the specified channel
85 */
86int
87hv_vmbus_channel_open(
88	hv_vmbus_channel*		new_channel,
89	uint32_t			send_ring_buffer_size,
90	uint32_t			recv_ring_buffer_size,
91	void*				user_data,
92	uint32_t			user_data_len,
93	hv_vmbus_pfn_channel_callback	pfn_on_channel_callback,
94	void* 				context)
95{
96
97	int ret = 0;
98	void *in, *out;
99	hv_vmbus_channel_open_channel*	open_msg;
100	hv_vmbus_channel_msg_info* 	open_info;
101
102	mtx_lock(&new_channel->sc_lock);
103	if (new_channel->state == HV_CHANNEL_OPEN_STATE) {
104	    new_channel->state = HV_CHANNEL_OPENING_STATE;
105	} else {
106	    mtx_unlock(&new_channel->sc_lock);
107	    if(bootverbose)
108		printf("VMBUS: Trying to open channel <%p> which in "
109		    "%d state.\n", new_channel, new_channel->state);
110	    return (EINVAL);
111	}
112	mtx_unlock(&new_channel->sc_lock);
113
114	new_channel->on_channel_callback = pfn_on_channel_callback;
115	new_channel->channel_callback_context = context;
116
117	/* Allocate the ring buffer */
118	out = contigmalloc((send_ring_buffer_size + recv_ring_buffer_size),
119	    M_DEVBUF, M_ZERO, 0UL, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
120	KASSERT(out != NULL,
121	    ("Error VMBUS: contigmalloc failed to allocate Ring Buffer!"));
122	if (out == NULL)
123		return (ENOMEM);
124
125	in = ((uint8_t *) out + send_ring_buffer_size);
126
127	new_channel->ring_buffer_pages = out;
128	new_channel->ring_buffer_page_count = (send_ring_buffer_size +
129	    recv_ring_buffer_size) >> PAGE_SHIFT;
130	new_channel->ring_buffer_size = send_ring_buffer_size +
131	    recv_ring_buffer_size;
132
133	hv_vmbus_ring_buffer_init(
134		&new_channel->outbound,
135		out,
136		send_ring_buffer_size);
137
138	hv_vmbus_ring_buffer_init(
139		&new_channel->inbound,
140		in,
141		recv_ring_buffer_size);
142
143	/**
144	 * Establish the gpadl for the ring buffer
145	 */
146	new_channel->ring_buffer_gpadl_handle = 0;
147
148	ret = hv_vmbus_channel_establish_gpadl(new_channel,
149		new_channel->outbound.ring_buffer,
150		send_ring_buffer_size + recv_ring_buffer_size,
151		&new_channel->ring_buffer_gpadl_handle);
152
153	/**
154	 * Create and init the channel open message
155	 */
156	open_info = (hv_vmbus_channel_msg_info*) malloc(
157		sizeof(hv_vmbus_channel_msg_info) +
158			sizeof(hv_vmbus_channel_open_channel),
159		M_DEVBUF,
160		M_NOWAIT);
161	KASSERT(open_info != NULL,
162	    ("Error VMBUS: malloc failed to allocate Open Channel message!"));
163
164	if (open_info == NULL)
165		return (ENOMEM);
166
167	sema_init(&open_info->wait_sema, 0, "Open Info Sema");
168
169	open_msg = (hv_vmbus_channel_open_channel*) open_info->msg;
170	open_msg->header.message_type = HV_CHANNEL_MESSAGE_OPEN_CHANNEL;
171	open_msg->open_id = new_channel->offer_msg.child_rel_id;
172	open_msg->child_rel_id = new_channel->offer_msg.child_rel_id;
173	open_msg->ring_buffer_gpadl_handle =
174		new_channel->ring_buffer_gpadl_handle;
175	open_msg->downstream_ring_buffer_page_offset = send_ring_buffer_size
176		>> PAGE_SHIFT;
177	open_msg->target_vcpu = new_channel->target_vcpu;
178
179	if (user_data_len)
180		memcpy(open_msg->user_data, user_data, user_data_len);
181
182	mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock);
183	TAILQ_INSERT_TAIL(
184		&hv_vmbus_g_connection.channel_msg_anchor,
185		open_info,
186		msg_list_entry);
187	mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock);
188
189	ret = hv_vmbus_post_message(
190		open_msg, sizeof(hv_vmbus_channel_open_channel));
191
192	if (ret != 0)
193	    goto cleanup;
194
195	ret = sema_timedwait(&open_info->wait_sema, 500); /* KYS 5 seconds */
196
197	if (ret) {
198	    if(bootverbose)
199		printf("VMBUS: channel <%p> open timeout.\n", new_channel);
200	    goto cleanup;
201	}
202
203	if (open_info->response.open_result.status == 0) {
204	    new_channel->state = HV_CHANNEL_OPENED_STATE;
205	    if(bootverbose)
206		printf("VMBUS: channel <%p> open success.\n", new_channel);
207	} else {
208	    if(bootverbose)
209		printf("Error VMBUS: channel <%p> open failed - %d!\n",
210			new_channel, open_info->response.open_result.status);
211	}
212
213	cleanup:
214	mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock);
215	TAILQ_REMOVE(
216		&hv_vmbus_g_connection.channel_msg_anchor,
217		open_info,
218		msg_list_entry);
219	mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock);
220	sema_destroy(&open_info->wait_sema);
221	free(open_info, M_DEVBUF);
222
223	return (ret);
224}
225
226/**
227 * @brief Create a gpadl for the specified buffer
228 */
229static int
230vmbus_channel_create_gpadl_header(
231	void*				contig_buffer,
232	uint32_t			size,	/* page-size multiple */
233	hv_vmbus_channel_msg_info**	msg_info,
234	uint32_t*			message_count)
235{
236	int				i;
237	int				page_count;
238	unsigned long long 		pfn;
239	uint32_t			msg_size;
240	hv_vmbus_channel_gpadl_header*	gpa_header;
241	hv_vmbus_channel_gpadl_body*	gpadl_body;
242	hv_vmbus_channel_msg_info*	msg_header;
243	hv_vmbus_channel_msg_info*	msg_body;
244
245	int pfnSum, pfnCount, pfnLeft, pfnCurr, pfnSize;
246
247	page_count = size >> PAGE_SHIFT;
248	pfn = hv_get_phys_addr(contig_buffer) >> PAGE_SHIFT;
249
250	/*do we need a gpadl body msg */
251	pfnSize = HV_MAX_SIZE_CHANNEL_MESSAGE
252	    - sizeof(hv_vmbus_channel_gpadl_header)
253	    - sizeof(hv_gpa_range);
254	pfnCount = pfnSize / sizeof(uint64_t);
255
256	if (page_count > pfnCount) { /* if(we need a gpadl body)	*/
257	    /* fill in the header		*/
258	    msg_size = sizeof(hv_vmbus_channel_msg_info)
259		+ sizeof(hv_vmbus_channel_gpadl_header)
260		+ sizeof(hv_gpa_range)
261		+ pfnCount * sizeof(uint64_t);
262	    msg_header = malloc(msg_size, M_DEVBUF, M_NOWAIT | M_ZERO);
263	    KASSERT(
264		msg_header != NULL,
265		("Error VMBUS: malloc failed to allocate Gpadl Message!"));
266	    if (msg_header == NULL)
267		return (ENOMEM);
268
269	    TAILQ_INIT(&msg_header->sub_msg_list_anchor);
270	    msg_header->message_size = msg_size;
271
272	    gpa_header = (hv_vmbus_channel_gpadl_header*) msg_header->msg;
273	    gpa_header->range_count = 1;
274	    gpa_header->range_buf_len = sizeof(hv_gpa_range)
275		+ page_count * sizeof(uint64_t);
276	    gpa_header->range[0].byte_offset = 0;
277	    gpa_header->range[0].byte_count = size;
278	    for (i = 0; i < pfnCount; i++) {
279		gpa_header->range[0].pfn_array[i] = pfn + i;
280	    }
281	    *msg_info = msg_header;
282	    *message_count = 1;
283
284	    pfnSum = pfnCount;
285	    pfnLeft = page_count - pfnCount;
286
287	    /*
288	     *  figure out how many pfns we can fit
289	     */
290	    pfnSize = HV_MAX_SIZE_CHANNEL_MESSAGE
291		- sizeof(hv_vmbus_channel_gpadl_body);
292	    pfnCount = pfnSize / sizeof(uint64_t);
293
294	    /*
295	     * fill in the body
296	     */
297	    while (pfnLeft) {
298		if (pfnLeft > pfnCount) {
299		    pfnCurr = pfnCount;
300		} else {
301		    pfnCurr = pfnLeft;
302		}
303
304		msg_size = sizeof(hv_vmbus_channel_msg_info) +
305		    sizeof(hv_vmbus_channel_gpadl_body) +
306		    pfnCurr * sizeof(uint64_t);
307		msg_body = malloc(msg_size, M_DEVBUF, M_NOWAIT | M_ZERO);
308		KASSERT(
309		    msg_body != NULL,
310		    ("Error VMBUS: malloc failed to allocate Gpadl msg_body!"));
311		if (msg_body == NULL)
312		    return (ENOMEM);
313
314		msg_body->message_size = msg_size;
315		(*message_count)++;
316		gpadl_body =
317		    (hv_vmbus_channel_gpadl_body*) msg_body->msg;
318		/*
319		 * gpadl_body->gpadl = kbuffer;
320		 */
321		for (i = 0; i < pfnCurr; i++) {
322		    gpadl_body->pfn[i] = pfn + pfnSum + i;
323		}
324
325		TAILQ_INSERT_TAIL(
326		    &msg_header->sub_msg_list_anchor,
327		    msg_body,
328		    msg_list_entry);
329		pfnSum += pfnCurr;
330		pfnLeft -= pfnCurr;
331	    }
332	} else { /* else everything fits in a header */
333
334	    msg_size = sizeof(hv_vmbus_channel_msg_info) +
335		sizeof(hv_vmbus_channel_gpadl_header) +
336		sizeof(hv_gpa_range) +
337		page_count * sizeof(uint64_t);
338	    msg_header = malloc(msg_size, M_DEVBUF, M_NOWAIT | M_ZERO);
339	    KASSERT(
340		msg_header != NULL,
341		("Error VMBUS: malloc failed to allocate Gpadl Message!"));
342	    if (msg_header == NULL)
343		return (ENOMEM);
344
345	    msg_header->message_size = msg_size;
346
347	    gpa_header = (hv_vmbus_channel_gpadl_header*) msg_header->msg;
348	    gpa_header->range_count = 1;
349	    gpa_header->range_buf_len = sizeof(hv_gpa_range) +
350		page_count * sizeof(uint64_t);
351	    gpa_header->range[0].byte_offset = 0;
352	    gpa_header->range[0].byte_count = size;
353	    for (i = 0; i < page_count; i++) {
354		gpa_header->range[0].pfn_array[i] = pfn + i;
355	    }
356
357	    *msg_info = msg_header;
358	    *message_count = 1;
359	}
360
361	return (0);
362}
363
364/**
365 * @brief Establish a GPADL for the specified buffer
366 */
367int
368hv_vmbus_channel_establish_gpadl(
369	hv_vmbus_channel*	channel,
370	void*			contig_buffer,
371	uint32_t		size, /* page-size multiple */
372	uint32_t*		gpadl_handle)
373
374{
375	int ret = 0;
376	hv_vmbus_channel_gpadl_header*	gpadl_msg;
377	hv_vmbus_channel_gpadl_body*	gpadl_body;
378	hv_vmbus_channel_msg_info*	msg_info;
379	hv_vmbus_channel_msg_info*	sub_msg_info;
380	uint32_t			msg_count;
381	hv_vmbus_channel_msg_info*	curr;
382	uint32_t			next_gpadl_handle;
383
384	next_gpadl_handle = hv_vmbus_g_connection.next_gpadl_handle;
385	atomic_add_int((int*) &hv_vmbus_g_connection.next_gpadl_handle, 1);
386
387	ret = vmbus_channel_create_gpadl_header(
388		contig_buffer, size, &msg_info, &msg_count);
389
390	if(ret != 0) { /* if(allocation failed) return immediately */
391	    /* reverse atomic_add_int above */
392	    atomic_subtract_int((int*)
393		    &hv_vmbus_g_connection.next_gpadl_handle, 1);
394	    return ret;
395	}
396
397	sema_init(&msg_info->wait_sema, 0, "Open Info Sema");
398	gpadl_msg = (hv_vmbus_channel_gpadl_header*) msg_info->msg;
399	gpadl_msg->header.message_type = HV_CHANNEL_MESSAGEL_GPADL_HEADER;
400	gpadl_msg->child_rel_id = channel->offer_msg.child_rel_id;
401	gpadl_msg->gpadl = next_gpadl_handle;
402
403	mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock);
404	TAILQ_INSERT_TAIL(
405		&hv_vmbus_g_connection.channel_msg_anchor,
406		msg_info,
407		msg_list_entry);
408
409	mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock);
410
411	ret = hv_vmbus_post_message(
412		gpadl_msg,
413		msg_info->message_size -
414		    (uint32_t) sizeof(hv_vmbus_channel_msg_info));
415
416	if (ret != 0)
417	    goto cleanup;
418
419	if (msg_count > 1) {
420	    TAILQ_FOREACH(curr,
421		    &msg_info->sub_msg_list_anchor, msg_list_entry) {
422		sub_msg_info = curr;
423		gpadl_body =
424		    (hv_vmbus_channel_gpadl_body*) sub_msg_info->msg;
425
426		gpadl_body->header.message_type =
427		    HV_CHANNEL_MESSAGE_GPADL_BODY;
428		gpadl_body->gpadl = next_gpadl_handle;
429
430		ret = hv_vmbus_post_message(
431			gpadl_body,
432			sub_msg_info->message_size
433			    - (uint32_t) sizeof(hv_vmbus_channel_msg_info));
434		 /* if (the post message failed) give up and clean up */
435		if(ret != 0)
436		    goto cleanup;
437	    }
438	}
439
440	ret = sema_timedwait(&msg_info->wait_sema, 500); /* KYS 5 seconds*/
441	if (ret != 0)
442	    goto cleanup;
443
444	*gpadl_handle = gpadl_msg->gpadl;
445
446cleanup:
447
448	mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock);
449	TAILQ_REMOVE(&hv_vmbus_g_connection.channel_msg_anchor,
450		msg_info, msg_list_entry);
451	mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock);
452
453	sema_destroy(&msg_info->wait_sema);
454	free(msg_info, M_DEVBUF);
455
456	return (ret);
457}
458
459/**
460 * @brief Teardown the specified GPADL handle
461 */
462int
463hv_vmbus_channel_teardown_gpdal(
464	hv_vmbus_channel*	channel,
465	uint32_t		gpadl_handle)
466{
467	int					ret = 0;
468	hv_vmbus_channel_gpadl_teardown*	msg;
469	hv_vmbus_channel_msg_info*		info;
470
471	info = (hv_vmbus_channel_msg_info *)
472		malloc(	sizeof(hv_vmbus_channel_msg_info) +
473			sizeof(hv_vmbus_channel_gpadl_teardown),
474				M_DEVBUF, M_NOWAIT);
475	KASSERT(info != NULL,
476	    ("Error VMBUS: malloc failed to allocate Gpadl Teardown Msg!"));
477	if (info == NULL) {
478	    ret = ENOMEM;
479	    goto cleanup;
480	}
481
482	sema_init(&info->wait_sema, 0, "Open Info Sema");
483
484	msg = (hv_vmbus_channel_gpadl_teardown*) info->msg;
485
486	msg->header.message_type = HV_CHANNEL_MESSAGE_GPADL_TEARDOWN;
487	msg->child_rel_id = channel->offer_msg.child_rel_id;
488	msg->gpadl = gpadl_handle;
489
490	mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock);
491	TAILQ_INSERT_TAIL(&hv_vmbus_g_connection.channel_msg_anchor,
492			info, msg_list_entry);
493	mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock);
494
495	ret = hv_vmbus_post_message(msg,
496			sizeof(hv_vmbus_channel_gpadl_teardown));
497	if (ret != 0)
498	    goto cleanup;
499
500	ret = sema_timedwait(&info->wait_sema, 500); /* KYS 5 seconds */
501
502cleanup:
503	/*
504	 * Received a torndown response
505	 */
506	mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock);
507	TAILQ_REMOVE(&hv_vmbus_g_connection.channel_msg_anchor,
508			info, msg_list_entry);
509	mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock);
510	sema_destroy(&info->wait_sema);
511	free(info, M_DEVBUF);
512
513	return (ret);
514}
515
516static void
517hv_vmbus_channel_close_internal(hv_vmbus_channel *channel)
518{
519	int ret = 0;
520	hv_vmbus_channel_close_channel* msg;
521	hv_vmbus_channel_msg_info* info;
522
523	channel->state = HV_CHANNEL_OPEN_STATE;
524	channel->sc_creation_callback = NULL;
525
526	/*
527	 * Grab the lock to prevent race condition when a packet received
528	 * and unloading driver is in the process.
529	 */
530	mtx_lock(&channel->inbound_lock);
531	channel->on_channel_callback = NULL;
532	mtx_unlock(&channel->inbound_lock);
533
534	/**
535	 * Send a closing message
536	 */
537	info = (hv_vmbus_channel_msg_info *)
538		malloc(	sizeof(hv_vmbus_channel_msg_info) +
539			sizeof(hv_vmbus_channel_close_channel),
540				M_DEVBUF, M_NOWAIT);
541	KASSERT(info != NULL, ("VMBUS: malloc failed hv_vmbus_channel_close!"));
542	if(info == NULL)
543	    return;
544
545	msg = (hv_vmbus_channel_close_channel*) info->msg;
546	msg->header.message_type = HV_CHANNEL_MESSAGE_CLOSE_CHANNEL;
547	msg->child_rel_id = channel->offer_msg.child_rel_id;
548
549	ret = hv_vmbus_post_message(
550		msg, sizeof(hv_vmbus_channel_close_channel));
551
552	/* Tear down the gpadl for the channel's ring buffer */
553	if (channel->ring_buffer_gpadl_handle) {
554		hv_vmbus_channel_teardown_gpdal(channel,
555			channel->ring_buffer_gpadl_handle);
556	}
557
558	/* TODO: Send a msg to release the childRelId */
559
560	/* cleanup the ring buffers for this channel */
561	hv_ring_buffer_cleanup(&channel->outbound);
562	hv_ring_buffer_cleanup(&channel->inbound);
563
564	contigfree(channel->ring_buffer_pages, channel->ring_buffer_size,
565	    M_DEVBUF);
566
567	free(info, M_DEVBUF);
568}
569
570/**
571 * @brief Close the specified channel
572 */
573void
574hv_vmbus_channel_close(hv_vmbus_channel *channel)
575{
576	hv_vmbus_channel*	sub_channel;
577
578	if (channel->primary_channel != NULL) {
579		/*
580		 * We only close multi-channels when the primary is
581		 * closed.
582		 */
583		return;
584	}
585
586	/*
587	 * Close all multi-channels first.
588	 */
589	TAILQ_FOREACH(sub_channel, &channel->sc_list_anchor,
590	    sc_list_entry) {
591		if (sub_channel->state != HV_CHANNEL_OPENED_STATE)
592			continue;
593		hv_vmbus_channel_close_internal(sub_channel);
594	}
595	/*
596	 * Then close the primary channel.
597	 */
598	hv_vmbus_channel_close_internal(channel);
599}
600
601/**
602 * @brief Send the specified buffer on the given channel
603 */
604int
605hv_vmbus_channel_send_packet(
606	hv_vmbus_channel*	channel,
607	void*			buffer,
608	uint32_t		buffer_len,
609	uint64_t		request_id,
610	hv_vmbus_packet_type	type,
611	uint32_t		flags)
612{
613	int			ret = 0;
614	hv_vm_packet_descriptor	desc;
615	uint32_t		packet_len;
616	uint64_t		aligned_data;
617	uint32_t		packet_len_aligned;
618	boolean_t		need_sig;
619	hv_vmbus_sg_buffer_list	buffer_list[3];
620
621	packet_len = sizeof(hv_vm_packet_descriptor) + buffer_len;
622	packet_len_aligned = HV_ALIGN_UP(packet_len, sizeof(uint64_t));
623	aligned_data = 0;
624
625	/* Setup the descriptor */
626	desc.type = type;   /* HV_VMBUS_PACKET_TYPE_DATA_IN_BAND;             */
627	desc.flags = flags; /* HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED */
628			    /* in 8-bytes granularity */
629	desc.data_offset8 = sizeof(hv_vm_packet_descriptor) >> 3;
630	desc.length8 = (uint16_t) (packet_len_aligned >> 3);
631	desc.transaction_id = request_id;
632
633	buffer_list[0].data = &desc;
634	buffer_list[0].length = sizeof(hv_vm_packet_descriptor);
635
636	buffer_list[1].data = buffer;
637	buffer_list[1].length = buffer_len;
638
639	buffer_list[2].data = &aligned_data;
640	buffer_list[2].length = packet_len_aligned - packet_len;
641
642	ret = hv_ring_buffer_write(&channel->outbound, buffer_list, 3,
643	    &need_sig);
644
645	/* TODO: We should determine if this is optional */
646	if (ret == 0 && need_sig) {
647		vmbus_channel_set_event(channel);
648	}
649
650	return (ret);
651}
652
653/**
654 * @brief Send a range of single-page buffer packets using
655 * a GPADL Direct packet type
656 */
657int
658hv_vmbus_channel_send_packet_pagebuffer(
659	hv_vmbus_channel*	channel,
660	hv_vmbus_page_buffer	page_buffers[],
661	uint32_t		page_count,
662	void*			buffer,
663	uint32_t		buffer_len,
664	uint64_t		request_id)
665{
666
667	int					ret = 0;
668	int					i = 0;
669	boolean_t				need_sig;
670	uint32_t				packet_len;
671	uint32_t				packetLen_aligned;
672	hv_vmbus_sg_buffer_list			buffer_list[3];
673	hv_vmbus_channel_packet_page_buffer	desc;
674	uint32_t				descSize;
675	uint64_t				alignedData = 0;
676
677	if (page_count > HV_MAX_PAGE_BUFFER_COUNT)
678		return (EINVAL);
679
680	/*
681	 * Adjust the size down since hv_vmbus_channel_packet_page_buffer
682	 *  is the largest size we support
683	 */
684	descSize = sizeof(hv_vmbus_channel_packet_page_buffer) -
685			((HV_MAX_PAGE_BUFFER_COUNT - page_count) *
686			sizeof(hv_vmbus_page_buffer));
687	packet_len = descSize + buffer_len;
688	packetLen_aligned = HV_ALIGN_UP(packet_len, sizeof(uint64_t));
689
690	/* Setup the descriptor */
691	desc.type = HV_VMBUS_PACKET_TYPE_DATA_USING_GPA_DIRECT;
692	desc.flags = HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
693	desc.data_offset8 = descSize >> 3; /* in 8-bytes granularity */
694	desc.length8 = (uint16_t) (packetLen_aligned >> 3);
695	desc.transaction_id = request_id;
696	desc.range_count = page_count;
697
698	for (i = 0; i < page_count; i++) {
699		desc.range[i].length = page_buffers[i].length;
700		desc.range[i].offset = page_buffers[i].offset;
701		desc.range[i].pfn = page_buffers[i].pfn;
702	}
703
704	buffer_list[0].data = &desc;
705	buffer_list[0].length = descSize;
706
707	buffer_list[1].data = buffer;
708	buffer_list[1].length = buffer_len;
709
710	buffer_list[2].data = &alignedData;
711	buffer_list[2].length = packetLen_aligned - packet_len;
712
713	ret = hv_ring_buffer_write(&channel->outbound, buffer_list, 3,
714	    &need_sig);
715
716	/* TODO: We should determine if this is optional */
717	if (ret == 0 && need_sig) {
718		vmbus_channel_set_event(channel);
719	}
720
721	return (ret);
722}
723
724/**
725 * @brief Send a multi-page buffer packet using a GPADL Direct packet type
726 */
727int
728hv_vmbus_channel_send_packet_multipagebuffer(
729	hv_vmbus_channel*		channel,
730	hv_vmbus_multipage_buffer*	multi_page_buffer,
731	void*				buffer,
732	uint32_t			buffer_len,
733	uint64_t			request_id)
734{
735
736	int			ret = 0;
737	uint32_t		desc_size;
738	boolean_t		need_sig;
739	uint32_t		packet_len;
740	uint32_t		packet_len_aligned;
741	uint32_t		pfn_count;
742	uint64_t		aligned_data = 0;
743	hv_vmbus_sg_buffer_list	buffer_list[3];
744	hv_vmbus_channel_packet_multipage_buffer desc;
745
746	pfn_count =
747	    HV_NUM_PAGES_SPANNED(
748		    multi_page_buffer->offset,
749		    multi_page_buffer->length);
750
751	if ((pfn_count == 0) || (pfn_count > HV_MAX_MULTIPAGE_BUFFER_COUNT))
752	    return (EINVAL);
753	/*
754	 * Adjust the size down since hv_vmbus_channel_packet_multipage_buffer
755	 * is the largest size we support
756	 */
757	desc_size =
758	    sizeof(hv_vmbus_channel_packet_multipage_buffer) -
759		    ((HV_MAX_MULTIPAGE_BUFFER_COUNT - pfn_count) *
760			sizeof(uint64_t));
761	packet_len = desc_size + buffer_len;
762	packet_len_aligned = HV_ALIGN_UP(packet_len, sizeof(uint64_t));
763
764	/*
765	 * Setup the descriptor
766	 */
767	desc.type = HV_VMBUS_PACKET_TYPE_DATA_USING_GPA_DIRECT;
768	desc.flags = HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
769	desc.data_offset8 = desc_size >> 3; /* in 8-bytes granularity */
770	desc.length8 = (uint16_t) (packet_len_aligned >> 3);
771	desc.transaction_id = request_id;
772	desc.range_count = 1;
773
774	desc.range.length = multi_page_buffer->length;
775	desc.range.offset = multi_page_buffer->offset;
776
777	memcpy(desc.range.pfn_array, multi_page_buffer->pfn_array,
778		pfn_count * sizeof(uint64_t));
779
780	buffer_list[0].data = &desc;
781	buffer_list[0].length = desc_size;
782
783	buffer_list[1].data = buffer;
784	buffer_list[1].length = buffer_len;
785
786	buffer_list[2].data = &aligned_data;
787	buffer_list[2].length = packet_len_aligned - packet_len;
788
789	ret = hv_ring_buffer_write(&channel->outbound, buffer_list, 3,
790	    &need_sig);
791
792	/* TODO: We should determine if this is optional */
793	if (ret == 0 && need_sig) {
794	    vmbus_channel_set_event(channel);
795	}
796
797	return (ret);
798}
799
800/**
801 * @brief Retrieve the user packet on the specified channel
802 */
803int
804hv_vmbus_channel_recv_packet(
805	hv_vmbus_channel*	channel,
806	void*			Buffer,
807	uint32_t		buffer_len,
808	uint32_t*		buffer_actual_len,
809	uint64_t*		request_id)
810{
811	int			ret;
812	uint32_t		user_len;
813	uint32_t		packet_len;
814	hv_vm_packet_descriptor	desc;
815
816	*buffer_actual_len = 0;
817	*request_id = 0;
818
819	ret = hv_ring_buffer_peek(&channel->inbound, &desc,
820		sizeof(hv_vm_packet_descriptor));
821	if (ret != 0)
822		return (0);
823
824	packet_len = desc.length8 << 3;
825	user_len = packet_len - (desc.data_offset8 << 3);
826
827	*buffer_actual_len = user_len;
828
829	if (user_len > buffer_len)
830		return (EINVAL);
831
832	*request_id = desc.transaction_id;
833
834	/* Copy over the packet to the user buffer */
835	ret = hv_ring_buffer_read(&channel->inbound, Buffer, user_len,
836		(desc.data_offset8 << 3));
837
838	return (0);
839}
840
841/**
842 * @brief Retrieve the raw packet on the specified channel
843 */
844int
845hv_vmbus_channel_recv_packet_raw(
846	hv_vmbus_channel*	channel,
847	void*			buffer,
848	uint32_t		buffer_len,
849	uint32_t*		buffer_actual_len,
850	uint64_t*		request_id)
851{
852	int		ret;
853	uint32_t	packetLen;
854	uint32_t	userLen;
855	hv_vm_packet_descriptor	desc;
856
857	*buffer_actual_len = 0;
858	*request_id = 0;
859
860	ret = hv_ring_buffer_peek(
861		&channel->inbound, &desc,
862		sizeof(hv_vm_packet_descriptor));
863
864	if (ret != 0)
865	    return (0);
866
867	packetLen = desc.length8 << 3;
868	userLen = packetLen - (desc.data_offset8 << 3);
869
870	*buffer_actual_len = packetLen;
871
872	if (packetLen > buffer_len)
873	    return (ENOBUFS);
874
875	*request_id = desc.transaction_id;
876
877	/* Copy over the entire packet to the user buffer */
878	ret = hv_ring_buffer_read(&channel->inbound, buffer, packetLen, 0);
879
880	return (0);
881}
882