vmbus_chan.c revision 302692
1/*-
2 * Copyright (c) 2009-2012,2016 Microsoft Corp.
3 * Copyright (c) 2012 NetApp Inc.
4 * Copyright (c) 2012 Citrix Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice unmodified, this list of conditions, and the following
12 *    disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/dev/hyperv/vmbus/hv_channel.c 302692 2016-07-13 03:14:29Z sephe $");
31
32#include <sys/param.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35#include <sys/systm.h>
36#include <sys/mbuf.h>
37#include <sys/lock.h>
38#include <sys/mutex.h>
39#include <sys/sysctl.h>
40
41#include <machine/atomic.h>
42#include <machine/bus.h>
43
44#include <vm/vm.h>
45#include <vm/vm_param.h>
46#include <vm/pmap.h>
47
48#include <dev/hyperv/vmbus/hv_vmbus_priv.h>
49#include <dev/hyperv/vmbus/hyperv_var.h>
50#include <dev/hyperv/vmbus/vmbus_reg.h>
51#include <dev/hyperv/vmbus/vmbus_var.h>
52
53static void 	vmbus_channel_set_event(hv_vmbus_channel* channel);
54static void	VmbusProcessChannelEvent(void* channel, int pending);
55static void	vmbus_chan_update_evtflagcnt(struct vmbus_softc *,
56		    const struct hv_vmbus_channel *);
57
58/**
59 *  @brief Trigger an event notification on the specified channel
60 */
61static void
62vmbus_channel_set_event(hv_vmbus_channel *channel)
63{
64	struct vmbus_softc *sc = channel->vmbus_sc;
65	uint32_t chanid = channel->offer_msg.child_rel_id;
66
67	atomic_set_long(&sc->vmbus_tx_evtflags[chanid >> VMBUS_EVTFLAG_SHIFT],
68	    1UL << (chanid & VMBUS_EVTFLAG_MASK));
69
70	if (channel->offer_msg.monitor_allocated) {
71		hv_vmbus_monitor_page *monitor_page;
72
73		monitor_page = sc->vmbus_mnf2;
74		synch_set_bit(channel->monitor_bit,
75			(uint32_t *)&monitor_page->
76				trigger_group[channel->monitor_group].u.pending);
77	} else {
78		hypercall_signal_event(channel->ch_sigevt_dma.hv_paddr);
79	}
80
81}
82
83static int
84vmbus_channel_sysctl_monalloc(SYSCTL_HANDLER_ARGS)
85{
86	struct hv_vmbus_channel *chan = arg1;
87	int alloc = 0;
88
89	if (chan->offer_msg.monitor_allocated)
90		alloc = 1;
91	return sysctl_handle_int(oidp, &alloc, 0, req);
92}
93
94static void
95vmbus_channel_sysctl_create(hv_vmbus_channel* channel)
96{
97	device_t dev;
98	struct sysctl_oid *devch_sysctl;
99	struct sysctl_oid *devch_id_sysctl, *devch_sub_sysctl;
100	struct sysctl_oid *devch_id_in_sysctl, *devch_id_out_sysctl;
101	struct sysctl_ctx_list *ctx;
102	uint32_t ch_id;
103	uint16_t sub_ch_id;
104	char name[16];
105
106	hv_vmbus_channel* primary_ch = channel->primary_channel;
107
108	if (primary_ch == NULL) {
109		dev = channel->device->device;
110		ch_id = channel->offer_msg.child_rel_id;
111	} else {
112		dev = primary_ch->device->device;
113		ch_id = primary_ch->offer_msg.child_rel_id;
114		sub_ch_id = channel->offer_msg.offer.sub_channel_index;
115	}
116	ctx = &channel->ch_sysctl_ctx;
117	sysctl_ctx_init(ctx);
118	/* This creates dev.DEVNAME.DEVUNIT.channel tree */
119	devch_sysctl = SYSCTL_ADD_NODE(ctx,
120		    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
121		    OID_AUTO, "channel", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
122	/* This creates dev.DEVNAME.DEVUNIT.channel.CHANID tree */
123	snprintf(name, sizeof(name), "%d", ch_id);
124	devch_id_sysctl = SYSCTL_ADD_NODE(ctx,
125	    	    SYSCTL_CHILDREN(devch_sysctl),
126	    	    OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
127
128	if (primary_ch != NULL) {
129		devch_sub_sysctl = SYSCTL_ADD_NODE(ctx,
130			SYSCTL_CHILDREN(devch_id_sysctl),
131			OID_AUTO, "sub", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
132		snprintf(name, sizeof(name), "%d", sub_ch_id);
133		devch_id_sysctl = SYSCTL_ADD_NODE(ctx,
134			SYSCTL_CHILDREN(devch_sub_sysctl),
135			OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
136
137		SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(devch_id_sysctl),
138		    OID_AUTO, "chanid", CTLFLAG_RD,
139		    &channel->offer_msg.child_rel_id, 0, "channel id");
140	}
141	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(devch_id_sysctl), OID_AUTO,
142	    "cpu", CTLFLAG_RD, &channel->target_cpu, 0, "owner CPU id");
143	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(devch_id_sysctl), OID_AUTO,
144	    "monitor_allocated", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
145	    channel, 0, vmbus_channel_sysctl_monalloc, "I",
146	    "is monitor allocated to this channel");
147
148	devch_id_in_sysctl = SYSCTL_ADD_NODE(ctx,
149                    SYSCTL_CHILDREN(devch_id_sysctl),
150                    OID_AUTO,
151		    "in",
152		    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
153	devch_id_out_sysctl = SYSCTL_ADD_NODE(ctx,
154                    SYSCTL_CHILDREN(devch_id_sysctl),
155                    OID_AUTO,
156		    "out",
157		    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
158	hv_ring_buffer_stat(ctx,
159		SYSCTL_CHILDREN(devch_id_in_sysctl),
160		&(channel->inbound),
161		"inbound ring buffer stats");
162	hv_ring_buffer_stat(ctx,
163		SYSCTL_CHILDREN(devch_id_out_sysctl),
164		&(channel->outbound),
165		"outbound ring buffer stats");
166}
167
168/**
169 * @brief Open the specified channel
170 */
171int
172hv_vmbus_channel_open(
173	hv_vmbus_channel*		new_channel,
174	uint32_t			send_ring_buffer_size,
175	uint32_t			recv_ring_buffer_size,
176	void*				user_data,
177	uint32_t			user_data_len,
178	hv_vmbus_pfn_channel_callback	pfn_on_channel_callback,
179	void* 				context)
180{
181	struct vmbus_softc *sc = new_channel->vmbus_sc;
182	const struct vmbus_chanmsg_chopen_resp *resp;
183	const struct vmbus_message *msg;
184	struct vmbus_chanmsg_chopen *req;
185	struct vmbus_msghc *mh;
186	uint32_t status;
187	int ret = 0;
188	void *in, *out;
189
190	if (user_data_len > VMBUS_CHANMSG_CHOPEN_UDATA_SIZE) {
191		device_printf(sc->vmbus_dev,
192		    "invalid udata len %u for chan%u\n",
193		    user_data_len, new_channel->offer_msg.child_rel_id);
194		return EINVAL;
195	}
196
197	mtx_lock(&new_channel->sc_lock);
198	if (new_channel->state == HV_CHANNEL_OPEN_STATE) {
199	    new_channel->state = HV_CHANNEL_OPENING_STATE;
200	} else {
201	    mtx_unlock(&new_channel->sc_lock);
202	    if(bootverbose)
203		printf("VMBUS: Trying to open channel <%p> which in "
204		    "%d state.\n", new_channel, new_channel->state);
205	    return (EINVAL);
206	}
207	mtx_unlock(&new_channel->sc_lock);
208
209	new_channel->on_channel_callback = pfn_on_channel_callback;
210	new_channel->channel_callback_context = context;
211
212	vmbus_chan_update_evtflagcnt(sc, new_channel);
213
214	new_channel->rxq = VMBUS_PCPU_GET(new_channel->vmbus_sc, event_tq,
215	    new_channel->target_cpu);
216	TASK_INIT(&new_channel->channel_task, 0, VmbusProcessChannelEvent, new_channel);
217
218	/* Allocate the ring buffer */
219	out = contigmalloc((send_ring_buffer_size + recv_ring_buffer_size),
220	    M_DEVBUF, M_ZERO, 0UL, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
221	KASSERT(out != NULL,
222	    ("Error VMBUS: contigmalloc failed to allocate Ring Buffer!"));
223	if (out == NULL)
224		return (ENOMEM);
225
226	in = ((uint8_t *) out + send_ring_buffer_size);
227
228	new_channel->ring_buffer_pages = out;
229	new_channel->ring_buffer_page_count = (send_ring_buffer_size +
230	    recv_ring_buffer_size) >> PAGE_SHIFT;
231	new_channel->ring_buffer_size = send_ring_buffer_size +
232	    recv_ring_buffer_size;
233
234	hv_vmbus_ring_buffer_init(
235		&new_channel->outbound,
236		out,
237		send_ring_buffer_size);
238
239	hv_vmbus_ring_buffer_init(
240		&new_channel->inbound,
241		in,
242		recv_ring_buffer_size);
243
244	/* Create sysctl tree for this channel */
245	vmbus_channel_sysctl_create(new_channel);
246
247	/**
248	 * Establish the gpadl for the ring buffer
249	 */
250	new_channel->ring_buffer_gpadl_handle = 0;
251
252	ret = hv_vmbus_channel_establish_gpadl(new_channel,
253		new_channel->outbound.ring_buffer,
254		send_ring_buffer_size + recv_ring_buffer_size,
255		&new_channel->ring_buffer_gpadl_handle);
256
257	/*
258	 * Open channel w/ the bufring GPADL on the target CPU.
259	 */
260	mh = vmbus_msghc_get(sc, sizeof(*req));
261	if (mh == NULL) {
262		device_printf(sc->vmbus_dev,
263		    "can not get msg hypercall for chopen(chan%u)\n",
264		    new_channel->offer_msg.child_rel_id);
265		return ENXIO;
266	}
267
268	req = vmbus_msghc_dataptr(mh);
269	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHOPEN;
270	req->chm_chanid = new_channel->offer_msg.child_rel_id;
271	req->chm_openid = new_channel->offer_msg.child_rel_id;
272	req->chm_gpadl = new_channel->ring_buffer_gpadl_handle;
273	req->chm_vcpuid = new_channel->target_vcpu;
274	req->chm_rxbr_pgofs = send_ring_buffer_size >> PAGE_SHIFT;
275	if (user_data_len)
276		memcpy(req->chm_udata, user_data, user_data_len);
277
278	ret = vmbus_msghc_exec(sc, mh);
279	if (ret != 0) {
280		device_printf(sc->vmbus_dev,
281		    "chopen(chan%u) msg hypercall exec failed: %d\n",
282		    new_channel->offer_msg.child_rel_id, ret);
283		vmbus_msghc_put(sc, mh);
284		return ret;
285	}
286
287	msg = vmbus_msghc_wait_result(sc, mh);
288	resp = (const struct vmbus_chanmsg_chopen_resp *)msg->msg_data;
289	status = resp->chm_status;
290
291	vmbus_msghc_put(sc, mh);
292
293	if (status == 0) {
294		new_channel->state = HV_CHANNEL_OPENED_STATE;
295		if (bootverbose) {
296			device_printf(sc->vmbus_dev, "chan%u opened\n",
297			    new_channel->offer_msg.child_rel_id);
298		}
299	} else {
300		device_printf(sc->vmbus_dev, "failed to open chan%u\n",
301		    new_channel->offer_msg.child_rel_id);
302		ret = ENXIO;
303	}
304	return (ret);
305}
306
307/**
308 * @brief Establish a GPADL for the specified buffer
309 */
310int
311hv_vmbus_channel_establish_gpadl(struct hv_vmbus_channel *channel,
312    void *contig_buffer, uint32_t size, uint32_t *gpadl0)
313{
314	struct vmbus_softc *sc = channel->vmbus_sc;
315	struct vmbus_msghc *mh;
316	struct vmbus_chanmsg_gpadl_conn *req;
317	const struct vmbus_message *msg;
318	size_t reqsz;
319	uint32_t gpadl, status;
320	int page_count, range_len, i, cnt, error;
321	uint64_t page_id, paddr;
322
323	/*
324	 * Preliminary checks.
325	 */
326
327	KASSERT((size & PAGE_MASK) == 0,
328	    ("invalid GPA size %u, not multiple page size", size));
329	page_count = size >> PAGE_SHIFT;
330
331	paddr = hv_get_phys_addr(contig_buffer);
332	KASSERT((paddr & PAGE_MASK) == 0,
333	    ("GPA is not page aligned %jx", (uintmax_t)paddr));
334	page_id = paddr >> PAGE_SHIFT;
335
336	range_len = __offsetof(struct vmbus_gpa_range, gpa_page[page_count]);
337	/*
338	 * We don't support multiple GPA ranges.
339	 */
340	if (range_len > UINT16_MAX) {
341		device_printf(sc->vmbus_dev, "GPA too large, %d pages\n",
342		    page_count);
343		return EOPNOTSUPP;
344	}
345
346	/*
347	 * Allocate GPADL id.
348	 */
349	gpadl = vmbus_gpadl_alloc(sc);
350	*gpadl0 = gpadl;
351
352	/*
353	 * Connect this GPADL to the target channel.
354	 *
355	 * NOTE:
356	 * Since each message can only hold small set of page
357	 * addresses, several messages may be required to
358	 * complete the connection.
359	 */
360	if (page_count > VMBUS_CHANMSG_GPADL_CONN_PGMAX)
361		cnt = VMBUS_CHANMSG_GPADL_CONN_PGMAX;
362	else
363		cnt = page_count;
364	page_count -= cnt;
365
366	reqsz = __offsetof(struct vmbus_chanmsg_gpadl_conn,
367	    chm_range.gpa_page[cnt]);
368	mh = vmbus_msghc_get(sc, reqsz);
369	if (mh == NULL) {
370		device_printf(sc->vmbus_dev,
371		    "can not get msg hypercall for gpadl->chan%u\n",
372		    channel->offer_msg.child_rel_id);
373		return EIO;
374	}
375
376	req = vmbus_msghc_dataptr(mh);
377	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_CONN;
378	req->chm_chanid = channel->offer_msg.child_rel_id;
379	req->chm_gpadl = gpadl;
380	req->chm_range_len = range_len;
381	req->chm_range_cnt = 1;
382	req->chm_range.gpa_len = size;
383	req->chm_range.gpa_ofs = 0;
384	for (i = 0; i < cnt; ++i)
385		req->chm_range.gpa_page[i] = page_id++;
386
387	error = vmbus_msghc_exec(sc, mh);
388	if (error) {
389		device_printf(sc->vmbus_dev,
390		    "gpadl->chan%u msg hypercall exec failed: %d\n",
391		    channel->offer_msg.child_rel_id, error);
392		vmbus_msghc_put(sc, mh);
393		return error;
394	}
395
396	while (page_count > 0) {
397		struct vmbus_chanmsg_gpadl_subconn *subreq;
398
399		if (page_count > VMBUS_CHANMSG_GPADL_SUBCONN_PGMAX)
400			cnt = VMBUS_CHANMSG_GPADL_SUBCONN_PGMAX;
401		else
402			cnt = page_count;
403		page_count -= cnt;
404
405		reqsz = __offsetof(struct vmbus_chanmsg_gpadl_subconn,
406		    chm_gpa_page[cnt]);
407		vmbus_msghc_reset(mh, reqsz);
408
409		subreq = vmbus_msghc_dataptr(mh);
410		subreq->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_SUBCONN;
411		subreq->chm_gpadl = gpadl;
412		for (i = 0; i < cnt; ++i)
413			subreq->chm_gpa_page[i] = page_id++;
414
415		vmbus_msghc_exec_noresult(mh);
416	}
417	KASSERT(page_count == 0, ("invalid page count %d", page_count));
418
419	msg = vmbus_msghc_wait_result(sc, mh);
420	status = ((const struct vmbus_chanmsg_gpadl_connresp *)
421	    msg->msg_data)->chm_status;
422
423	vmbus_msghc_put(sc, mh);
424
425	if (status != 0) {
426		device_printf(sc->vmbus_dev, "gpadl->chan%u failed: "
427		    "status %u\n", channel->offer_msg.child_rel_id, status);
428		return EIO;
429	} else {
430		if (bootverbose) {
431			device_printf(sc->vmbus_dev, "gpadl->chan%u "
432			    "succeeded\n", channel->offer_msg.child_rel_id);
433		}
434	}
435	return 0;
436}
437
438/*
439 * Disconnect the GPA from the target channel
440 */
441int
442hv_vmbus_channel_teardown_gpdal(struct hv_vmbus_channel *chan, uint32_t gpadl)
443{
444	struct vmbus_softc *sc = chan->vmbus_sc;
445	struct vmbus_msghc *mh;
446	struct vmbus_chanmsg_gpadl_disconn *req;
447	int error;
448
449	mh = vmbus_msghc_get(sc, sizeof(*req));
450	if (mh == NULL) {
451		device_printf(sc->vmbus_dev,
452		    "can not get msg hypercall for gpa x->chan%u\n",
453		    chan->offer_msg.child_rel_id);
454		return EBUSY;
455	}
456
457	req = vmbus_msghc_dataptr(mh);
458	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_DISCONN;
459	req->chm_chanid = chan->offer_msg.child_rel_id;
460	req->chm_gpadl = gpadl;
461
462	error = vmbus_msghc_exec(sc, mh);
463	if (error) {
464		device_printf(sc->vmbus_dev,
465		    "gpa x->chan%u msg hypercall exec failed: %d\n",
466		    chan->offer_msg.child_rel_id, error);
467		vmbus_msghc_put(sc, mh);
468		return error;
469	}
470
471	vmbus_msghc_wait_result(sc, mh);
472	/* Discard result; no useful information */
473	vmbus_msghc_put(sc, mh);
474
475	return 0;
476}
477
478static void
479hv_vmbus_channel_close_internal(hv_vmbus_channel *channel)
480{
481	struct vmbus_softc *sc = channel->vmbus_sc;
482	struct vmbus_msghc *mh;
483	struct vmbus_chanmsg_chclose *req;
484	struct taskqueue *rxq = channel->rxq;
485	int error;
486
487	channel->state = HV_CHANNEL_OPEN_STATE;
488	sysctl_ctx_free(&channel->ch_sysctl_ctx);
489
490	/*
491	 * set rxq to NULL to avoid more requests be scheduled
492	 */
493	channel->rxq = NULL;
494	taskqueue_drain(rxq, &channel->channel_task);
495	channel->on_channel_callback = NULL;
496
497	/**
498	 * Send a closing message
499	 */
500
501	mh = vmbus_msghc_get(sc, sizeof(*req));
502	if (mh == NULL) {
503		device_printf(sc->vmbus_dev,
504		    "can not get msg hypercall for chclose(chan%u)\n",
505		    channel->offer_msg.child_rel_id);
506		return;
507	}
508
509	req = vmbus_msghc_dataptr(mh);
510	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHCLOSE;
511	req->chm_chanid = channel->offer_msg.child_rel_id;
512
513	error = vmbus_msghc_exec_noresult(mh);
514	vmbus_msghc_put(sc, mh);
515
516	if (error) {
517		device_printf(sc->vmbus_dev,
518		    "chclose(chan%u) msg hypercall exec failed: %d\n",
519		    channel->offer_msg.child_rel_id, error);
520		return;
521	} else if (bootverbose) {
522		device_printf(sc->vmbus_dev, "close chan%u\n",
523		    channel->offer_msg.child_rel_id);
524	}
525
526	/* Tear down the gpadl for the channel's ring buffer */
527	if (channel->ring_buffer_gpadl_handle) {
528		hv_vmbus_channel_teardown_gpdal(channel,
529			channel->ring_buffer_gpadl_handle);
530	}
531
532	/* TODO: Send a msg to release the childRelId */
533
534	/* cleanup the ring buffers for this channel */
535	hv_ring_buffer_cleanup(&channel->outbound);
536	hv_ring_buffer_cleanup(&channel->inbound);
537
538	contigfree(channel->ring_buffer_pages, channel->ring_buffer_size,
539	    M_DEVBUF);
540}
541
542/**
543 * @brief Close the specified channel
544 */
545void
546hv_vmbus_channel_close(hv_vmbus_channel *channel)
547{
548	hv_vmbus_channel*	sub_channel;
549
550	if (channel->primary_channel != NULL) {
551		/*
552		 * We only close multi-channels when the primary is
553		 * closed.
554		 */
555		return;
556	}
557
558	/*
559	 * Close all multi-channels first.
560	 */
561	TAILQ_FOREACH(sub_channel, &channel->sc_list_anchor,
562	    sc_list_entry) {
563		if (sub_channel->state != HV_CHANNEL_OPENED_STATE)
564			continue;
565		hv_vmbus_channel_close_internal(sub_channel);
566	}
567	/*
568	 * Then close the primary channel.
569	 */
570	hv_vmbus_channel_close_internal(channel);
571}
572
573/**
574 * @brief Send the specified buffer on the given channel
575 */
576int
577hv_vmbus_channel_send_packet(
578	hv_vmbus_channel*	channel,
579	void*			buffer,
580	uint32_t		buffer_len,
581	uint64_t		request_id,
582	hv_vmbus_packet_type	type,
583	uint32_t		flags)
584{
585	int			ret = 0;
586	hv_vm_packet_descriptor	desc;
587	uint32_t		packet_len;
588	uint64_t		aligned_data;
589	uint32_t		packet_len_aligned;
590	boolean_t		need_sig;
591	hv_vmbus_sg_buffer_list	buffer_list[3];
592
593	packet_len = sizeof(hv_vm_packet_descriptor) + buffer_len;
594	packet_len_aligned = HV_ALIGN_UP(packet_len, sizeof(uint64_t));
595	aligned_data = 0;
596
597	/* Setup the descriptor */
598	desc.type = type;   /* HV_VMBUS_PACKET_TYPE_DATA_IN_BAND;             */
599	desc.flags = flags; /* HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED */
600			    /* in 8-bytes granularity */
601	desc.data_offset8 = sizeof(hv_vm_packet_descriptor) >> 3;
602	desc.length8 = (uint16_t) (packet_len_aligned >> 3);
603	desc.transaction_id = request_id;
604
605	buffer_list[0].data = &desc;
606	buffer_list[0].length = sizeof(hv_vm_packet_descriptor);
607
608	buffer_list[1].data = buffer;
609	buffer_list[1].length = buffer_len;
610
611	buffer_list[2].data = &aligned_data;
612	buffer_list[2].length = packet_len_aligned - packet_len;
613
614	ret = hv_ring_buffer_write(&channel->outbound, buffer_list, 3,
615	    &need_sig);
616
617	/* TODO: We should determine if this is optional */
618	if (ret == 0 && need_sig) {
619		vmbus_channel_set_event(channel);
620	}
621
622	return (ret);
623}
624
625/**
626 * @brief Send a range of single-page buffer packets using
627 * a GPADL Direct packet type
628 */
629int
630hv_vmbus_channel_send_packet_pagebuffer(
631	hv_vmbus_channel*	channel,
632	hv_vmbus_page_buffer	page_buffers[],
633	uint32_t		page_count,
634	void*			buffer,
635	uint32_t		buffer_len,
636	uint64_t		request_id)
637{
638
639	int					ret = 0;
640	boolean_t				need_sig;
641	uint32_t				packet_len;
642	uint32_t				page_buflen;
643	uint32_t				packetLen_aligned;
644	hv_vmbus_sg_buffer_list			buffer_list[4];
645	hv_vmbus_channel_packet_page_buffer	desc;
646	uint32_t				descSize;
647	uint64_t				alignedData = 0;
648
649	if (page_count > HV_MAX_PAGE_BUFFER_COUNT)
650		return (EINVAL);
651
652	/*
653	 * Adjust the size down since hv_vmbus_channel_packet_page_buffer
654	 *  is the largest size we support
655	 */
656	descSize = __offsetof(hv_vmbus_channel_packet_page_buffer, range);
657	page_buflen = sizeof(hv_vmbus_page_buffer) * page_count;
658	packet_len = descSize + page_buflen + buffer_len;
659	packetLen_aligned = HV_ALIGN_UP(packet_len, sizeof(uint64_t));
660
661	/* Setup the descriptor */
662	desc.type = HV_VMBUS_PACKET_TYPE_DATA_USING_GPA_DIRECT;
663	desc.flags = HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
664	/* in 8-bytes granularity */
665	desc.data_offset8 = (descSize + page_buflen) >> 3;
666	desc.length8 = (uint16_t) (packetLen_aligned >> 3);
667	desc.transaction_id = request_id;
668	desc.range_count = page_count;
669
670	buffer_list[0].data = &desc;
671	buffer_list[0].length = descSize;
672
673	buffer_list[1].data = page_buffers;
674	buffer_list[1].length = page_buflen;
675
676	buffer_list[2].data = buffer;
677	buffer_list[2].length = buffer_len;
678
679	buffer_list[3].data = &alignedData;
680	buffer_list[3].length = packetLen_aligned - packet_len;
681
682	ret = hv_ring_buffer_write(&channel->outbound, buffer_list, 4,
683	    &need_sig);
684
685	/* TODO: We should determine if this is optional */
686	if (ret == 0 && need_sig) {
687		vmbus_channel_set_event(channel);
688	}
689
690	return (ret);
691}
692
693/**
694 * @brief Send a multi-page buffer packet using a GPADL Direct packet type
695 */
696int
697hv_vmbus_channel_send_packet_multipagebuffer(
698	hv_vmbus_channel*		channel,
699	hv_vmbus_multipage_buffer*	multi_page_buffer,
700	void*				buffer,
701	uint32_t			buffer_len,
702	uint64_t			request_id)
703{
704
705	int			ret = 0;
706	uint32_t		desc_size;
707	boolean_t		need_sig;
708	uint32_t		packet_len;
709	uint32_t		packet_len_aligned;
710	uint32_t		pfn_count;
711	uint64_t		aligned_data = 0;
712	hv_vmbus_sg_buffer_list	buffer_list[3];
713	hv_vmbus_channel_packet_multipage_buffer desc;
714
715	pfn_count =
716	    HV_NUM_PAGES_SPANNED(
717		    multi_page_buffer->offset,
718		    multi_page_buffer->length);
719
720	if ((pfn_count == 0) || (pfn_count > HV_MAX_MULTIPAGE_BUFFER_COUNT))
721	    return (EINVAL);
722	/*
723	 * Adjust the size down since hv_vmbus_channel_packet_multipage_buffer
724	 * is the largest size we support
725	 */
726	desc_size =
727	    sizeof(hv_vmbus_channel_packet_multipage_buffer) -
728		    ((HV_MAX_MULTIPAGE_BUFFER_COUNT - pfn_count) *
729			sizeof(uint64_t));
730	packet_len = desc_size + buffer_len;
731	packet_len_aligned = HV_ALIGN_UP(packet_len, sizeof(uint64_t));
732
733	/*
734	 * Setup the descriptor
735	 */
736	desc.type = HV_VMBUS_PACKET_TYPE_DATA_USING_GPA_DIRECT;
737	desc.flags = HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
738	desc.data_offset8 = desc_size >> 3; /* in 8-bytes granularity */
739	desc.length8 = (uint16_t) (packet_len_aligned >> 3);
740	desc.transaction_id = request_id;
741	desc.range_count = 1;
742
743	desc.range.length = multi_page_buffer->length;
744	desc.range.offset = multi_page_buffer->offset;
745
746	memcpy(desc.range.pfn_array, multi_page_buffer->pfn_array,
747		pfn_count * sizeof(uint64_t));
748
749	buffer_list[0].data = &desc;
750	buffer_list[0].length = desc_size;
751
752	buffer_list[1].data = buffer;
753	buffer_list[1].length = buffer_len;
754
755	buffer_list[2].data = &aligned_data;
756	buffer_list[2].length = packet_len_aligned - packet_len;
757
758	ret = hv_ring_buffer_write(&channel->outbound, buffer_list, 3,
759	    &need_sig);
760
761	/* TODO: We should determine if this is optional */
762	if (ret == 0 && need_sig) {
763	    vmbus_channel_set_event(channel);
764	}
765
766	return (ret);
767}
768
769/**
770 * @brief Retrieve the user packet on the specified channel
771 */
772int
773hv_vmbus_channel_recv_packet(
774	hv_vmbus_channel*	channel,
775	void*			Buffer,
776	uint32_t		buffer_len,
777	uint32_t*		buffer_actual_len,
778	uint64_t*		request_id)
779{
780	int			ret;
781	uint32_t		user_len;
782	uint32_t		packet_len;
783	hv_vm_packet_descriptor	desc;
784
785	*buffer_actual_len = 0;
786	*request_id = 0;
787
788	ret = hv_ring_buffer_peek(&channel->inbound, &desc,
789		sizeof(hv_vm_packet_descriptor));
790	if (ret != 0)
791		return (0);
792
793	packet_len = desc.length8 << 3;
794	user_len = packet_len - (desc.data_offset8 << 3);
795
796	*buffer_actual_len = user_len;
797
798	if (user_len > buffer_len)
799		return (EINVAL);
800
801	*request_id = desc.transaction_id;
802
803	/* Copy over the packet to the user buffer */
804	ret = hv_ring_buffer_read(&channel->inbound, Buffer, user_len,
805		(desc.data_offset8 << 3));
806
807	return (0);
808}
809
810/**
811 * @brief Retrieve the raw packet on the specified channel
812 */
813int
814hv_vmbus_channel_recv_packet_raw(
815	hv_vmbus_channel*	channel,
816	void*			buffer,
817	uint32_t		buffer_len,
818	uint32_t*		buffer_actual_len,
819	uint64_t*		request_id)
820{
821	int		ret;
822	uint32_t	packetLen;
823	hv_vm_packet_descriptor	desc;
824
825	*buffer_actual_len = 0;
826	*request_id = 0;
827
828	ret = hv_ring_buffer_peek(
829		&channel->inbound, &desc,
830		sizeof(hv_vm_packet_descriptor));
831
832	if (ret != 0)
833	    return (0);
834
835	packetLen = desc.length8 << 3;
836	*buffer_actual_len = packetLen;
837
838	if (packetLen > buffer_len)
839	    return (ENOBUFS);
840
841	*request_id = desc.transaction_id;
842
843	/* Copy over the entire packet to the user buffer */
844	ret = hv_ring_buffer_read(&channel->inbound, buffer, packetLen, 0);
845
846	return (0);
847}
848
849
850/**
851 * Process a channel event notification
852 */
853static void
854VmbusProcessChannelEvent(void* context, int pending)
855{
856	void* arg;
857	uint32_t bytes_to_read;
858	hv_vmbus_channel* channel = (hv_vmbus_channel*)context;
859	boolean_t is_batched_reading;
860
861	if (channel->on_channel_callback != NULL) {
862		arg = channel->channel_callback_context;
863		is_batched_reading = channel->batched_reading;
864		/*
865		 * Optimize host to guest signaling by ensuring:
866		 * 1. While reading the channel, we disable interrupts from
867		 *    host.
868		 * 2. Ensure that we process all posted messages from the host
869		 *    before returning from this callback.
870		 * 3. Once we return, enable signaling from the host. Once this
871		 *    state is set we check to see if additional packets are
872		 *    available to read. In this case we repeat the process.
873		 */
874		do {
875			if (is_batched_reading)
876				hv_ring_buffer_read_begin(&channel->inbound);
877
878			channel->on_channel_callback(arg);
879
880			if (is_batched_reading)
881				bytes_to_read =
882				    hv_ring_buffer_read_end(&channel->inbound);
883			else
884				bytes_to_read = 0;
885		} while (is_batched_reading && (bytes_to_read != 0));
886	}
887}
888
889static __inline void
890vmbus_event_flags_proc(struct vmbus_softc *sc, volatile u_long *event_flags,
891    int flag_cnt)
892{
893	int f;
894
895	for (f = 0; f < flag_cnt; ++f) {
896		uint32_t rel_id_base;
897		u_long flags;
898		int bit;
899
900		if (event_flags[f] == 0)
901			continue;
902
903		flags = atomic_swap_long(&event_flags[f], 0);
904		rel_id_base = f << VMBUS_EVTFLAG_SHIFT;
905
906		while ((bit = ffsl(flags)) != 0) {
907			struct hv_vmbus_channel *channel;
908			uint32_t rel_id;
909
910			--bit;	/* NOTE: ffsl is 1-based */
911			flags &= ~(1UL << bit);
912
913			rel_id = rel_id_base + bit;
914			channel = sc->vmbus_chmap[rel_id];
915
916			/* if channel is closed or closing */
917			if (channel == NULL || channel->rxq == NULL)
918				continue;
919
920			if (channel->batched_reading)
921				hv_ring_buffer_read_begin(&channel->inbound);
922			taskqueue_enqueue(channel->rxq, &channel->channel_task);
923		}
924	}
925}
926
927void
928vmbus_event_proc(struct vmbus_softc *sc, int cpu)
929{
930	struct vmbus_evtflags *eventf;
931
932	/*
933	 * On Host with Win8 or above, the event page can be checked directly
934	 * to get the id of the channel that has the pending interrupt.
935	 */
936	eventf = VMBUS_PCPU_GET(sc, event_flags, cpu) + VMBUS_SINT_MESSAGE;
937	vmbus_event_flags_proc(sc, eventf->evt_flags,
938	    VMBUS_PCPU_GET(sc, event_flags_cnt, cpu));
939}
940
941void
942vmbus_event_proc_compat(struct vmbus_softc *sc, int cpu)
943{
944	struct vmbus_evtflags *eventf;
945
946	eventf = VMBUS_PCPU_GET(sc, event_flags, cpu) + VMBUS_SINT_MESSAGE;
947	if (atomic_testandclear_long(&eventf->evt_flags[0], 0)) {
948		vmbus_event_flags_proc(sc, sc->vmbus_rx_evtflags,
949		    VMBUS_CHAN_MAX_COMPAT >> VMBUS_EVTFLAG_SHIFT);
950	}
951}
952
953static void
954vmbus_chan_update_evtflagcnt(struct vmbus_softc *sc,
955    const struct hv_vmbus_channel *chan)
956{
957	volatile int *flag_cnt_ptr;
958	int flag_cnt;
959
960	flag_cnt = (chan->offer_msg.child_rel_id / VMBUS_EVTFLAG_LEN) + 1;
961	flag_cnt_ptr = VMBUS_PCPU_PTR(sc, event_flags_cnt, chan->target_cpu);
962
963	for (;;) {
964		int old_flag_cnt;
965
966		old_flag_cnt = *flag_cnt_ptr;
967		if (old_flag_cnt >= flag_cnt)
968			break;
969		if (atomic_cmpset_int(flag_cnt_ptr, old_flag_cnt, flag_cnt)) {
970			if (bootverbose) {
971				device_printf(sc->vmbus_dev,
972				    "channel%u update cpu%d flag_cnt to %d\n",
973				    chan->offer_msg.child_rel_id,
974				    chan->target_cpu, flag_cnt);
975			}
976			break;
977		}
978	}
979}
980