vmbus_chan.c revision 302610
1/*-
2 * Copyright (c) 2009-2012,2016 Microsoft Corp.
3 * Copyright (c) 2012 NetApp Inc.
4 * Copyright (c) 2012 Citrix Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice unmodified, this list of conditions, and the following
12 *    disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/dev/hyperv/vmbus/hv_channel.c 302610 2016-07-12 03:25:36Z sephe $");
31
32#include <sys/param.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35#include <sys/systm.h>
36#include <sys/mbuf.h>
37#include <sys/lock.h>
38#include <sys/mutex.h>
39#include <sys/sysctl.h>
40
41#include <machine/atomic.h>
42#include <machine/bus.h>
43
44#include <vm/vm.h>
45#include <vm/vm_param.h>
46#include <vm/pmap.h>
47
48#include <dev/hyperv/vmbus/hv_vmbus_priv.h>
49#include <dev/hyperv/vmbus/vmbus_reg.h>
50#include <dev/hyperv/vmbus/vmbus_var.h>
51
52static void 	vmbus_channel_set_event(hv_vmbus_channel* channel);
53static void	VmbusProcessChannelEvent(void* channel, int pending);
54
55/**
56 *  @brief Trigger an event notification on the specified channel
57 */
58static void
59vmbus_channel_set_event(hv_vmbus_channel *channel)
60{
61	if (channel->offer_msg.monitor_allocated) {
62		struct vmbus_softc *sc = channel->vmbus_sc;
63		hv_vmbus_monitor_page *monitor_page;
64		uint32_t chanid = channel->offer_msg.child_rel_id;
65
66		atomic_set_long(
67		    &sc->vmbus_tx_evtflags[chanid >> VMBUS_EVTFLAG_SHIFT],
68		    1UL << (chanid & VMBUS_EVTFLAG_MASK));
69
70		monitor_page = sc->vmbus_mnf2;
71		synch_set_bit(channel->monitor_bit,
72			(uint32_t *)&monitor_page->
73				trigger_group[channel->monitor_group].u.pending);
74	} else {
75		hv_vmbus_set_event(channel);
76	}
77
78}
79
80static int
81vmbus_channel_sysctl_monalloc(SYSCTL_HANDLER_ARGS)
82{
83	struct hv_vmbus_channel *chan = arg1;
84	int alloc = 0;
85
86	if (chan->offer_msg.monitor_allocated)
87		alloc = 1;
88	return sysctl_handle_int(oidp, &alloc, 0, req);
89}
90
91static void
92vmbus_channel_sysctl_create(hv_vmbus_channel* channel)
93{
94	device_t dev;
95	struct sysctl_oid *devch_sysctl;
96	struct sysctl_oid *devch_id_sysctl, *devch_sub_sysctl;
97	struct sysctl_oid *devch_id_in_sysctl, *devch_id_out_sysctl;
98	struct sysctl_ctx_list *ctx;
99	uint32_t ch_id;
100	uint16_t sub_ch_id;
101	char name[16];
102
103	hv_vmbus_channel* primary_ch = channel->primary_channel;
104
105	if (primary_ch == NULL) {
106		dev = channel->device->device;
107		ch_id = channel->offer_msg.child_rel_id;
108	} else {
109		dev = primary_ch->device->device;
110		ch_id = primary_ch->offer_msg.child_rel_id;
111		sub_ch_id = channel->offer_msg.offer.sub_channel_index;
112	}
113	ctx = device_get_sysctl_ctx(dev);
114	/* This creates dev.DEVNAME.DEVUNIT.channel tree */
115	devch_sysctl = SYSCTL_ADD_NODE(ctx,
116		    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
117		    OID_AUTO, "channel", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
118	/* This creates dev.DEVNAME.DEVUNIT.channel.CHANID tree */
119	snprintf(name, sizeof(name), "%d", ch_id);
120	devch_id_sysctl = SYSCTL_ADD_NODE(ctx,
121	    	    SYSCTL_CHILDREN(devch_sysctl),
122	    	    OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
123
124	if (primary_ch != NULL) {
125		devch_sub_sysctl = SYSCTL_ADD_NODE(ctx,
126			SYSCTL_CHILDREN(devch_id_sysctl),
127			OID_AUTO, "sub", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
128		snprintf(name, sizeof(name), "%d", sub_ch_id);
129		devch_id_sysctl = SYSCTL_ADD_NODE(ctx,
130			SYSCTL_CHILDREN(devch_sub_sysctl),
131			OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
132
133		SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(devch_id_sysctl),
134		    OID_AUTO, "chanid", CTLFLAG_RD,
135		    &channel->offer_msg.child_rel_id, 0, "channel id");
136	}
137	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(devch_id_sysctl), OID_AUTO,
138	    "cpu", CTLFLAG_RD, &channel->target_cpu, 0, "owner CPU id");
139	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(devch_id_sysctl), OID_AUTO,
140	    "monitor_allocated", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
141	    channel, 0, vmbus_channel_sysctl_monalloc, "I",
142	    "is monitor allocated to this channel");
143
144	devch_id_in_sysctl = SYSCTL_ADD_NODE(ctx,
145                    SYSCTL_CHILDREN(devch_id_sysctl),
146                    OID_AUTO,
147		    "in",
148		    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
149	devch_id_out_sysctl = SYSCTL_ADD_NODE(ctx,
150                    SYSCTL_CHILDREN(devch_id_sysctl),
151                    OID_AUTO,
152		    "out",
153		    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
154	hv_ring_buffer_stat(ctx,
155		SYSCTL_CHILDREN(devch_id_in_sysctl),
156		&(channel->inbound),
157		"inbound ring buffer stats");
158	hv_ring_buffer_stat(ctx,
159		SYSCTL_CHILDREN(devch_id_out_sysctl),
160		&(channel->outbound),
161		"outbound ring buffer stats");
162}
163
164/**
165 * @brief Open the specified channel
166 */
167int
168hv_vmbus_channel_open(
169	hv_vmbus_channel*		new_channel,
170	uint32_t			send_ring_buffer_size,
171	uint32_t			recv_ring_buffer_size,
172	void*				user_data,
173	uint32_t			user_data_len,
174	hv_vmbus_pfn_channel_callback	pfn_on_channel_callback,
175	void* 				context)
176{
177	struct vmbus_softc *sc = new_channel->vmbus_sc;
178	const struct vmbus_chanmsg_chopen_resp *resp;
179	const struct vmbus_message *msg;
180	struct vmbus_chanmsg_chopen *req;
181	struct vmbus_msghc *mh;
182	uint32_t status;
183	int ret = 0;
184	void *in, *out;
185
186	if (user_data_len > VMBUS_CHANMSG_CHOPEN_UDATA_SIZE) {
187		device_printf(sc->vmbus_dev,
188		    "invalid udata len %u for chan%u\n",
189		    user_data_len, new_channel->offer_msg.child_rel_id);
190		return EINVAL;
191	}
192
193	mtx_lock(&new_channel->sc_lock);
194	if (new_channel->state == HV_CHANNEL_OPEN_STATE) {
195	    new_channel->state = HV_CHANNEL_OPENING_STATE;
196	} else {
197	    mtx_unlock(&new_channel->sc_lock);
198	    if(bootverbose)
199		printf("VMBUS: Trying to open channel <%p> which in "
200		    "%d state.\n", new_channel, new_channel->state);
201	    return (EINVAL);
202	}
203	mtx_unlock(&new_channel->sc_lock);
204
205	new_channel->on_channel_callback = pfn_on_channel_callback;
206	new_channel->channel_callback_context = context;
207
208	vmbus_on_channel_open(new_channel);
209
210	new_channel->rxq = VMBUS_PCPU_GET(new_channel->vmbus_sc, event_tq,
211	    new_channel->target_cpu);
212	TASK_INIT(&new_channel->channel_task, 0, VmbusProcessChannelEvent, new_channel);
213
214	/* Allocate the ring buffer */
215	out = contigmalloc((send_ring_buffer_size + recv_ring_buffer_size),
216	    M_DEVBUF, M_ZERO, 0UL, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
217	KASSERT(out != NULL,
218	    ("Error VMBUS: contigmalloc failed to allocate Ring Buffer!"));
219	if (out == NULL)
220		return (ENOMEM);
221
222	in = ((uint8_t *) out + send_ring_buffer_size);
223
224	new_channel->ring_buffer_pages = out;
225	new_channel->ring_buffer_page_count = (send_ring_buffer_size +
226	    recv_ring_buffer_size) >> PAGE_SHIFT;
227	new_channel->ring_buffer_size = send_ring_buffer_size +
228	    recv_ring_buffer_size;
229
230	hv_vmbus_ring_buffer_init(
231		&new_channel->outbound,
232		out,
233		send_ring_buffer_size);
234
235	hv_vmbus_ring_buffer_init(
236		&new_channel->inbound,
237		in,
238		recv_ring_buffer_size);
239
240	/* Create sysctl tree for this channel */
241	vmbus_channel_sysctl_create(new_channel);
242
243	/**
244	 * Establish the gpadl for the ring buffer
245	 */
246	new_channel->ring_buffer_gpadl_handle = 0;
247
248	ret = hv_vmbus_channel_establish_gpadl(new_channel,
249		new_channel->outbound.ring_buffer,
250		send_ring_buffer_size + recv_ring_buffer_size,
251		&new_channel->ring_buffer_gpadl_handle);
252
253	/*
254	 * Open channel w/ the bufring GPADL on the target CPU.
255	 */
256	mh = vmbus_msghc_get(sc, sizeof(*req));
257	if (mh == NULL) {
258		device_printf(sc->vmbus_dev,
259		    "can not get msg hypercall for chopen(chan%u)\n",
260		    new_channel->offer_msg.child_rel_id);
261		return ENXIO;
262	}
263
264	req = vmbus_msghc_dataptr(mh);
265	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHOPEN;
266	req->chm_chanid = new_channel->offer_msg.child_rel_id;
267	req->chm_openid = new_channel->offer_msg.child_rel_id;
268	req->chm_gpadl = new_channel->ring_buffer_gpadl_handle;
269	req->chm_vcpuid = new_channel->target_vcpu;
270	req->chm_rxbr_pgofs = send_ring_buffer_size >> PAGE_SHIFT;
271	if (user_data_len)
272		memcpy(req->chm_udata, user_data, user_data_len);
273
274	ret = vmbus_msghc_exec(sc, mh);
275	if (ret != 0) {
276		device_printf(sc->vmbus_dev,
277		    "chopen(chan%u) msg hypercall exec failed: %d\n",
278		    new_channel->offer_msg.child_rel_id, ret);
279		vmbus_msghc_put(sc, mh);
280		return ret;
281	}
282
283	msg = vmbus_msghc_wait_result(sc, mh);
284	resp = (const struct vmbus_chanmsg_chopen_resp *)msg->msg_data;
285	status = resp->chm_status;
286
287	vmbus_msghc_put(sc, mh);
288
289	if (status == 0) {
290		new_channel->state = HV_CHANNEL_OPENED_STATE;
291		if (bootverbose) {
292			device_printf(sc->vmbus_dev, "chan%u opened\n",
293			    new_channel->offer_msg.child_rel_id);
294		}
295	} else {
296		device_printf(sc->vmbus_dev, "failed to open chan%u\n",
297		    new_channel->offer_msg.child_rel_id);
298		ret = ENXIO;
299	}
300	return (ret);
301}
302
303/**
304 * @brief Establish a GPADL for the specified buffer
305 */
306int
307hv_vmbus_channel_establish_gpadl(struct hv_vmbus_channel *channel,
308    void *contig_buffer, uint32_t size, uint32_t *gpadl0)
309{
310	struct vmbus_softc *sc = channel->vmbus_sc;
311	struct vmbus_msghc *mh;
312	struct vmbus_chanmsg_gpadl_conn *req;
313	const struct vmbus_message *msg;
314	size_t reqsz;
315	uint32_t gpadl, status;
316	int page_count, range_len, i, cnt, error;
317	uint64_t page_id, paddr;
318
319	/*
320	 * Preliminary checks.
321	 */
322
323	KASSERT((size & PAGE_MASK) == 0,
324	    ("invalid GPA size %u, not multiple page size", size));
325	page_count = size >> PAGE_SHIFT;
326
327	paddr = hv_get_phys_addr(contig_buffer);
328	KASSERT((paddr & PAGE_MASK) == 0,
329	    ("GPA is not page aligned %jx", (uintmax_t)paddr));
330	page_id = paddr >> PAGE_SHIFT;
331
332	range_len = __offsetof(struct vmbus_gpa_range, gpa_page[page_count]);
333	/*
334	 * We don't support multiple GPA ranges.
335	 */
336	if (range_len > UINT16_MAX) {
337		device_printf(sc->vmbus_dev, "GPA too large, %d pages\n",
338		    page_count);
339		return EOPNOTSUPP;
340	}
341
342	/*
343	 * Allocate GPADL id.
344	 */
345	gpadl = atomic_fetchadd_int(
346	    &hv_vmbus_g_connection.next_gpadl_handle, 1);
347	*gpadl0 = gpadl;
348
349	/*
350	 * Connect this GPADL to the target channel.
351	 *
352	 * NOTE:
353	 * Since each message can only hold small set of page
354	 * addresses, several messages may be required to
355	 * complete the connection.
356	 */
357	if (page_count > VMBUS_CHANMSG_GPADL_CONN_PGMAX)
358		cnt = VMBUS_CHANMSG_GPADL_CONN_PGMAX;
359	else
360		cnt = page_count;
361	page_count -= cnt;
362
363	reqsz = __offsetof(struct vmbus_chanmsg_gpadl_conn,
364	    chm_range.gpa_page[cnt]);
365	mh = vmbus_msghc_get(sc, reqsz);
366	if (mh == NULL) {
367		device_printf(sc->vmbus_dev,
368		    "can not get msg hypercall for gpadl->chan%u\n",
369		    channel->offer_msg.child_rel_id);
370		return EIO;
371	}
372
373	req = vmbus_msghc_dataptr(mh);
374	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_CONN;
375	req->chm_chanid = channel->offer_msg.child_rel_id;
376	req->chm_gpadl = gpadl;
377	req->chm_range_len = range_len;
378	req->chm_range_cnt = 1;
379	req->chm_range.gpa_len = size;
380	req->chm_range.gpa_ofs = 0;
381	for (i = 0; i < cnt; ++i)
382		req->chm_range.gpa_page[i] = page_id++;
383
384	error = vmbus_msghc_exec(sc, mh);
385	if (error) {
386		device_printf(sc->vmbus_dev,
387		    "gpadl->chan%u msg hypercall exec failed: %d\n",
388		    channel->offer_msg.child_rel_id, error);
389		vmbus_msghc_put(sc, mh);
390		return error;
391	}
392
393	while (page_count > 0) {
394		struct vmbus_chanmsg_gpadl_subconn *subreq;
395
396		if (page_count > VMBUS_CHANMSG_GPADL_SUBCONN_PGMAX)
397			cnt = VMBUS_CHANMSG_GPADL_SUBCONN_PGMAX;
398		else
399			cnt = page_count;
400		page_count -= cnt;
401
402		reqsz = __offsetof(struct vmbus_chanmsg_gpadl_subconn,
403		    chm_gpa_page[cnt]);
404		vmbus_msghc_reset(mh, reqsz);
405
406		subreq = vmbus_msghc_dataptr(mh);
407		subreq->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_SUBCONN;
408		subreq->chm_gpadl = gpadl;
409		for (i = 0; i < cnt; ++i)
410			subreq->chm_gpa_page[i] = page_id++;
411
412		vmbus_msghc_exec_noresult(mh);
413	}
414	KASSERT(page_count == 0, ("invalid page count %d", page_count));
415
416	msg = vmbus_msghc_wait_result(sc, mh);
417	status = ((const struct vmbus_chanmsg_gpadl_connresp *)
418	    msg->msg_data)->chm_status;
419
420	vmbus_msghc_put(sc, mh);
421
422	if (status != 0) {
423		device_printf(sc->vmbus_dev, "gpadl->chan%u failed: "
424		    "status %u\n", channel->offer_msg.child_rel_id, status);
425		return EIO;
426	}
427	return 0;
428}
429
430/**
431 * @brief Teardown the specified GPADL handle
432 */
433int
434hv_vmbus_channel_teardown_gpdal(
435	hv_vmbus_channel*	channel,
436	uint32_t		gpadl_handle)
437{
438	int					ret = 0;
439	hv_vmbus_channel_gpadl_teardown*	msg;
440	hv_vmbus_channel_msg_info*		info;
441
442	info = (hv_vmbus_channel_msg_info *)
443		malloc(	sizeof(hv_vmbus_channel_msg_info) +
444			sizeof(hv_vmbus_channel_gpadl_teardown),
445				M_DEVBUF, M_NOWAIT);
446	KASSERT(info != NULL,
447	    ("Error VMBUS: malloc failed to allocate Gpadl Teardown Msg!"));
448	if (info == NULL) {
449	    ret = ENOMEM;
450	    goto cleanup;
451	}
452
453	sema_init(&info->wait_sema, 0, "Open Info Sema");
454
455	msg = (hv_vmbus_channel_gpadl_teardown*) info->msg;
456
457	msg->header.message_type = HV_CHANNEL_MESSAGE_GPADL_TEARDOWN;
458	msg->child_rel_id = channel->offer_msg.child_rel_id;
459	msg->gpadl = gpadl_handle;
460
461	mtx_lock(&hv_vmbus_g_connection.channel_msg_lock);
462	TAILQ_INSERT_TAIL(&hv_vmbus_g_connection.channel_msg_anchor,
463			info, msg_list_entry);
464	mtx_unlock(&hv_vmbus_g_connection.channel_msg_lock);
465
466	ret = hv_vmbus_post_message(msg,
467			sizeof(hv_vmbus_channel_gpadl_teardown));
468	if (ret != 0)
469	    goto cleanup;
470
471	ret = sema_timedwait(&info->wait_sema, 5 * hz); /* KYS 5 seconds */
472
473cleanup:
474	/*
475	 * Received a torndown response
476	 */
477	mtx_lock(&hv_vmbus_g_connection.channel_msg_lock);
478	TAILQ_REMOVE(&hv_vmbus_g_connection.channel_msg_anchor,
479			info, msg_list_entry);
480	mtx_unlock(&hv_vmbus_g_connection.channel_msg_lock);
481	sema_destroy(&info->wait_sema);
482	free(info, M_DEVBUF);
483
484	return (ret);
485}
486
487static void
488hv_vmbus_channel_close_internal(hv_vmbus_channel *channel)
489{
490	struct vmbus_softc *sc = channel->vmbus_sc;
491	struct vmbus_msghc *mh;
492	struct vmbus_chanmsg_chclose *req;
493	struct taskqueue *rxq = channel->rxq;
494	int error;
495
496	channel->state = HV_CHANNEL_OPEN_STATE;
497
498	/*
499	 * set rxq to NULL to avoid more requests be scheduled
500	 */
501	channel->rxq = NULL;
502	taskqueue_drain(rxq, &channel->channel_task);
503	channel->on_channel_callback = NULL;
504
505	/**
506	 * Send a closing message
507	 */
508
509	mh = vmbus_msghc_get(sc, sizeof(*req));
510	if (mh == NULL) {
511		device_printf(sc->vmbus_dev,
512		    "can not get msg hypercall for chclose(chan%u)\n",
513		    channel->offer_msg.child_rel_id);
514		return;
515	}
516
517	req = vmbus_msghc_dataptr(mh);
518	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHCLOSE;
519	req->chm_chanid = channel->offer_msg.child_rel_id;
520
521	error = vmbus_msghc_exec_noresult(mh);
522	vmbus_msghc_put(sc, mh);
523
524	if (error) {
525		device_printf(sc->vmbus_dev,
526		    "chclose(chan%u) msg hypercall exec failed: %d\n",
527		    channel->offer_msg.child_rel_id, error);
528		return;
529	} else if (bootverbose) {
530		device_printf(sc->vmbus_dev, "close chan%u\n",
531		    channel->offer_msg.child_rel_id);
532	}
533
534	/* Tear down the gpadl for the channel's ring buffer */
535	if (channel->ring_buffer_gpadl_handle) {
536		hv_vmbus_channel_teardown_gpdal(channel,
537			channel->ring_buffer_gpadl_handle);
538	}
539
540	/* TODO: Send a msg to release the childRelId */
541
542	/* cleanup the ring buffers for this channel */
543	hv_ring_buffer_cleanup(&channel->outbound);
544	hv_ring_buffer_cleanup(&channel->inbound);
545
546	contigfree(channel->ring_buffer_pages, channel->ring_buffer_size,
547	    M_DEVBUF);
548}
549
550/**
551 * @brief Close the specified channel
552 */
553void
554hv_vmbus_channel_close(hv_vmbus_channel *channel)
555{
556	hv_vmbus_channel*	sub_channel;
557
558	if (channel->primary_channel != NULL) {
559		/*
560		 * We only close multi-channels when the primary is
561		 * closed.
562		 */
563		return;
564	}
565
566	/*
567	 * Close all multi-channels first.
568	 */
569	TAILQ_FOREACH(sub_channel, &channel->sc_list_anchor,
570	    sc_list_entry) {
571		if (sub_channel->state != HV_CHANNEL_OPENED_STATE)
572			continue;
573		hv_vmbus_channel_close_internal(sub_channel);
574	}
575	/*
576	 * Then close the primary channel.
577	 */
578	hv_vmbus_channel_close_internal(channel);
579}
580
581/**
582 * @brief Send the specified buffer on the given channel
583 */
584int
585hv_vmbus_channel_send_packet(
586	hv_vmbus_channel*	channel,
587	void*			buffer,
588	uint32_t		buffer_len,
589	uint64_t		request_id,
590	hv_vmbus_packet_type	type,
591	uint32_t		flags)
592{
593	int			ret = 0;
594	hv_vm_packet_descriptor	desc;
595	uint32_t		packet_len;
596	uint64_t		aligned_data;
597	uint32_t		packet_len_aligned;
598	boolean_t		need_sig;
599	hv_vmbus_sg_buffer_list	buffer_list[3];
600
601	packet_len = sizeof(hv_vm_packet_descriptor) + buffer_len;
602	packet_len_aligned = HV_ALIGN_UP(packet_len, sizeof(uint64_t));
603	aligned_data = 0;
604
605	/* Setup the descriptor */
606	desc.type = type;   /* HV_VMBUS_PACKET_TYPE_DATA_IN_BAND;             */
607	desc.flags = flags; /* HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED */
608			    /* in 8-bytes granularity */
609	desc.data_offset8 = sizeof(hv_vm_packet_descriptor) >> 3;
610	desc.length8 = (uint16_t) (packet_len_aligned >> 3);
611	desc.transaction_id = request_id;
612
613	buffer_list[0].data = &desc;
614	buffer_list[0].length = sizeof(hv_vm_packet_descriptor);
615
616	buffer_list[1].data = buffer;
617	buffer_list[1].length = buffer_len;
618
619	buffer_list[2].data = &aligned_data;
620	buffer_list[2].length = packet_len_aligned - packet_len;
621
622	ret = hv_ring_buffer_write(&channel->outbound, buffer_list, 3,
623	    &need_sig);
624
625	/* TODO: We should determine if this is optional */
626	if (ret == 0 && need_sig) {
627		vmbus_channel_set_event(channel);
628	}
629
630	return (ret);
631}
632
633/**
634 * @brief Send a range of single-page buffer packets using
635 * a GPADL Direct packet type
636 */
637int
638hv_vmbus_channel_send_packet_pagebuffer(
639	hv_vmbus_channel*	channel,
640	hv_vmbus_page_buffer	page_buffers[],
641	uint32_t		page_count,
642	void*			buffer,
643	uint32_t		buffer_len,
644	uint64_t		request_id)
645{
646
647	int					ret = 0;
648	boolean_t				need_sig;
649	uint32_t				packet_len;
650	uint32_t				page_buflen;
651	uint32_t				packetLen_aligned;
652	hv_vmbus_sg_buffer_list			buffer_list[4];
653	hv_vmbus_channel_packet_page_buffer	desc;
654	uint32_t				descSize;
655	uint64_t				alignedData = 0;
656
657	if (page_count > HV_MAX_PAGE_BUFFER_COUNT)
658		return (EINVAL);
659
660	/*
661	 * Adjust the size down since hv_vmbus_channel_packet_page_buffer
662	 *  is the largest size we support
663	 */
664	descSize = __offsetof(hv_vmbus_channel_packet_page_buffer, range);
665	page_buflen = sizeof(hv_vmbus_page_buffer) * page_count;
666	packet_len = descSize + page_buflen + buffer_len;
667	packetLen_aligned = HV_ALIGN_UP(packet_len, sizeof(uint64_t));
668
669	/* Setup the descriptor */
670	desc.type = HV_VMBUS_PACKET_TYPE_DATA_USING_GPA_DIRECT;
671	desc.flags = HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
672	/* in 8-bytes granularity */
673	desc.data_offset8 = (descSize + page_buflen) >> 3;
674	desc.length8 = (uint16_t) (packetLen_aligned >> 3);
675	desc.transaction_id = request_id;
676	desc.range_count = page_count;
677
678	buffer_list[0].data = &desc;
679	buffer_list[0].length = descSize;
680
681	buffer_list[1].data = page_buffers;
682	buffer_list[1].length = page_buflen;
683
684	buffer_list[2].data = buffer;
685	buffer_list[2].length = buffer_len;
686
687	buffer_list[3].data = &alignedData;
688	buffer_list[3].length = packetLen_aligned - packet_len;
689
690	ret = hv_ring_buffer_write(&channel->outbound, buffer_list, 4,
691	    &need_sig);
692
693	/* TODO: We should determine if this is optional */
694	if (ret == 0 && need_sig) {
695		vmbus_channel_set_event(channel);
696	}
697
698	return (ret);
699}
700
701/**
702 * @brief Send a multi-page buffer packet using a GPADL Direct packet type
703 */
704int
705hv_vmbus_channel_send_packet_multipagebuffer(
706	hv_vmbus_channel*		channel,
707	hv_vmbus_multipage_buffer*	multi_page_buffer,
708	void*				buffer,
709	uint32_t			buffer_len,
710	uint64_t			request_id)
711{
712
713	int			ret = 0;
714	uint32_t		desc_size;
715	boolean_t		need_sig;
716	uint32_t		packet_len;
717	uint32_t		packet_len_aligned;
718	uint32_t		pfn_count;
719	uint64_t		aligned_data = 0;
720	hv_vmbus_sg_buffer_list	buffer_list[3];
721	hv_vmbus_channel_packet_multipage_buffer desc;
722
723	pfn_count =
724	    HV_NUM_PAGES_SPANNED(
725		    multi_page_buffer->offset,
726		    multi_page_buffer->length);
727
728	if ((pfn_count == 0) || (pfn_count > HV_MAX_MULTIPAGE_BUFFER_COUNT))
729	    return (EINVAL);
730	/*
731	 * Adjust the size down since hv_vmbus_channel_packet_multipage_buffer
732	 * is the largest size we support
733	 */
734	desc_size =
735	    sizeof(hv_vmbus_channel_packet_multipage_buffer) -
736		    ((HV_MAX_MULTIPAGE_BUFFER_COUNT - pfn_count) *
737			sizeof(uint64_t));
738	packet_len = desc_size + buffer_len;
739	packet_len_aligned = HV_ALIGN_UP(packet_len, sizeof(uint64_t));
740
741	/*
742	 * Setup the descriptor
743	 */
744	desc.type = HV_VMBUS_PACKET_TYPE_DATA_USING_GPA_DIRECT;
745	desc.flags = HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
746	desc.data_offset8 = desc_size >> 3; /* in 8-bytes granularity */
747	desc.length8 = (uint16_t) (packet_len_aligned >> 3);
748	desc.transaction_id = request_id;
749	desc.range_count = 1;
750
751	desc.range.length = multi_page_buffer->length;
752	desc.range.offset = multi_page_buffer->offset;
753
754	memcpy(desc.range.pfn_array, multi_page_buffer->pfn_array,
755		pfn_count * sizeof(uint64_t));
756
757	buffer_list[0].data = &desc;
758	buffer_list[0].length = desc_size;
759
760	buffer_list[1].data = buffer;
761	buffer_list[1].length = buffer_len;
762
763	buffer_list[2].data = &aligned_data;
764	buffer_list[2].length = packet_len_aligned - packet_len;
765
766	ret = hv_ring_buffer_write(&channel->outbound, buffer_list, 3,
767	    &need_sig);
768
769	/* TODO: We should determine if this is optional */
770	if (ret == 0 && need_sig) {
771	    vmbus_channel_set_event(channel);
772	}
773
774	return (ret);
775}
776
777/**
778 * @brief Retrieve the user packet on the specified channel
779 */
780int
781hv_vmbus_channel_recv_packet(
782	hv_vmbus_channel*	channel,
783	void*			Buffer,
784	uint32_t		buffer_len,
785	uint32_t*		buffer_actual_len,
786	uint64_t*		request_id)
787{
788	int			ret;
789	uint32_t		user_len;
790	uint32_t		packet_len;
791	hv_vm_packet_descriptor	desc;
792
793	*buffer_actual_len = 0;
794	*request_id = 0;
795
796	ret = hv_ring_buffer_peek(&channel->inbound, &desc,
797		sizeof(hv_vm_packet_descriptor));
798	if (ret != 0)
799		return (0);
800
801	packet_len = desc.length8 << 3;
802	user_len = packet_len - (desc.data_offset8 << 3);
803
804	*buffer_actual_len = user_len;
805
806	if (user_len > buffer_len)
807		return (EINVAL);
808
809	*request_id = desc.transaction_id;
810
811	/* Copy over the packet to the user buffer */
812	ret = hv_ring_buffer_read(&channel->inbound, Buffer, user_len,
813		(desc.data_offset8 << 3));
814
815	return (0);
816}
817
818/**
819 * @brief Retrieve the raw packet on the specified channel
820 */
821int
822hv_vmbus_channel_recv_packet_raw(
823	hv_vmbus_channel*	channel,
824	void*			buffer,
825	uint32_t		buffer_len,
826	uint32_t*		buffer_actual_len,
827	uint64_t*		request_id)
828{
829	int		ret;
830	uint32_t	packetLen;
831	hv_vm_packet_descriptor	desc;
832
833	*buffer_actual_len = 0;
834	*request_id = 0;
835
836	ret = hv_ring_buffer_peek(
837		&channel->inbound, &desc,
838		sizeof(hv_vm_packet_descriptor));
839
840	if (ret != 0)
841	    return (0);
842
843	packetLen = desc.length8 << 3;
844	*buffer_actual_len = packetLen;
845
846	if (packetLen > buffer_len)
847	    return (ENOBUFS);
848
849	*request_id = desc.transaction_id;
850
851	/* Copy over the entire packet to the user buffer */
852	ret = hv_ring_buffer_read(&channel->inbound, buffer, packetLen, 0);
853
854	return (0);
855}
856
857
858/**
859 * Process a channel event notification
860 */
861static void
862VmbusProcessChannelEvent(void* context, int pending)
863{
864	void* arg;
865	uint32_t bytes_to_read;
866	hv_vmbus_channel* channel = (hv_vmbus_channel*)context;
867	boolean_t is_batched_reading;
868
869	if (channel->on_channel_callback != NULL) {
870		arg = channel->channel_callback_context;
871		is_batched_reading = channel->batched_reading;
872		/*
873		 * Optimize host to guest signaling by ensuring:
874		 * 1. While reading the channel, we disable interrupts from
875		 *    host.
876		 * 2. Ensure that we process all posted messages from the host
877		 *    before returning from this callback.
878		 * 3. Once we return, enable signaling from the host. Once this
879		 *    state is set we check to see if additional packets are
880		 *    available to read. In this case we repeat the process.
881		 */
882		do {
883			if (is_batched_reading)
884				hv_ring_buffer_read_begin(&channel->inbound);
885
886			channel->on_channel_callback(arg);
887
888			if (is_batched_reading)
889				bytes_to_read =
890				    hv_ring_buffer_read_end(&channel->inbound);
891			else
892				bytes_to_read = 0;
893		} while (is_batched_reading && (bytes_to_read != 0));
894	}
895}
896