vmbus_chan.c revision 307464
1/*-
2 * Copyright (c) 2009-2012,2016 Microsoft Corp.
3 * Copyright (c) 2012 NetApp Inc.
4 * Copyright (c) 2012 Citrix Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice unmodified, this list of conditions, and the following
12 *    disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: stable/11/sys/dev/hyperv/vmbus/vmbus_chan.c 307464 2016-10-17 03:35:20Z sephe $");
31
32#include <sys/param.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35#include <sys/systm.h>
36#include <sys/mbuf.h>
37#include <sys/lock.h>
38#include <sys/mutex.h>
39#include <sys/sysctl.h>
40
41#include <machine/atomic.h>
42#include <machine/bus.h>
43
44#include <vm/vm.h>
45#include <vm/vm_param.h>
46#include <vm/pmap.h>
47
48#include <dev/hyperv/include/hyperv_busdma.h>
49#include <dev/hyperv/vmbus/hyperv_var.h>
50#include <dev/hyperv/vmbus/vmbus_reg.h>
51#include <dev/hyperv/vmbus/vmbus_var.h>
52#include <dev/hyperv/vmbus/vmbus_brvar.h>
53#include <dev/hyperv/vmbus/vmbus_chanvar.h>
54
55static void	vmbus_chan_update_evtflagcnt(struct vmbus_softc *,
56		    const struct vmbus_channel *);
57
58static void	vmbus_chan_task(void *, int);
59static void	vmbus_chan_task_nobatch(void *, int);
60static void	vmbus_chan_detach_task(void *, int);
61
62static void	vmbus_chan_msgproc_choffer(struct vmbus_softc *,
63		    const struct vmbus_message *);
64static void	vmbus_chan_msgproc_chrescind(struct vmbus_softc *,
65		    const struct vmbus_message *);
66
67/*
68 * Vmbus channel message processing.
69 */
70static const vmbus_chanmsg_proc_t
71vmbus_chan_msgprocs[VMBUS_CHANMSG_TYPE_MAX] = {
72	VMBUS_CHANMSG_PROC(CHOFFER,	vmbus_chan_msgproc_choffer),
73	VMBUS_CHANMSG_PROC(CHRESCIND,	vmbus_chan_msgproc_chrescind),
74
75	VMBUS_CHANMSG_PROC_WAKEUP(CHOPEN_RESP),
76	VMBUS_CHANMSG_PROC_WAKEUP(GPADL_CONNRESP),
77	VMBUS_CHANMSG_PROC_WAKEUP(GPADL_DISCONNRESP)
78};
79
80/*
81 * Notify host that there are data pending on our TX bufring.
82 */
83static __inline void
84vmbus_chan_signal_tx(const struct vmbus_channel *chan)
85{
86	atomic_set_long(chan->ch_evtflag, chan->ch_evtflag_mask);
87	if (chan->ch_txflags & VMBUS_CHAN_TXF_HASMNF)
88		atomic_set_int(chan->ch_montrig, chan->ch_montrig_mask);
89	else
90		hypercall_signal_event(chan->ch_monprm_dma.hv_paddr);
91}
92
93static int
94vmbus_chan_sysctl_mnf(SYSCTL_HANDLER_ARGS)
95{
96	struct vmbus_channel *chan = arg1;
97	int mnf = 0;
98
99	if (chan->ch_txflags & VMBUS_CHAN_TXF_HASMNF)
100		mnf = 1;
101	return sysctl_handle_int(oidp, &mnf, 0, req);
102}
103
104static void
105vmbus_chan_sysctl_create(struct vmbus_channel *chan)
106{
107	struct sysctl_oid *ch_tree, *chid_tree, *br_tree;
108	struct sysctl_ctx_list *ctx;
109	uint32_t ch_id;
110	char name[16];
111
112	/*
113	 * Add sysctl nodes related to this channel to this
114	 * channel's sysctl ctx, so that they can be destroyed
115	 * independently upon close of this channel, which can
116	 * happen even if the device is not detached.
117	 */
118	ctx = &chan->ch_sysctl_ctx;
119	sysctl_ctx_init(ctx);
120
121	/*
122	 * Create dev.NAME.UNIT.channel tree.
123	 */
124	ch_tree = SYSCTL_ADD_NODE(ctx,
125	    SYSCTL_CHILDREN(device_get_sysctl_tree(chan->ch_dev)),
126	    OID_AUTO, "channel", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
127	if (ch_tree == NULL)
128		return;
129
130	/*
131	 * Create dev.NAME.UNIT.channel.CHANID tree.
132	 */
133	if (VMBUS_CHAN_ISPRIMARY(chan))
134		ch_id = chan->ch_id;
135	else
136		ch_id = chan->ch_prichan->ch_id;
137	snprintf(name, sizeof(name), "%d", ch_id);
138	chid_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(ch_tree),
139	    OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
140	if (chid_tree == NULL)
141		return;
142
143	if (!VMBUS_CHAN_ISPRIMARY(chan)) {
144		/*
145		 * Create dev.NAME.UNIT.channel.CHANID.sub tree.
146		 */
147		ch_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(chid_tree),
148		    OID_AUTO, "sub", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
149		if (ch_tree == NULL)
150			return;
151
152		/*
153		 * Create dev.NAME.UNIT.channel.CHANID.sub.SUBIDX tree.
154		 *
155		 * NOTE:
156		 * chid_tree is changed to this new sysctl tree.
157		 */
158		snprintf(name, sizeof(name), "%d", chan->ch_subidx);
159		chid_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(ch_tree),
160		    OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
161		if (chid_tree == NULL)
162			return;
163
164		SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
165		    "chanid", CTLFLAG_RD, &chan->ch_id, 0, "channel id");
166	}
167
168	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
169	    "cpu", CTLFLAG_RD, &chan->ch_cpuid, 0, "owner CPU id");
170	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
171	    "mnf", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
172	    chan, 0, vmbus_chan_sysctl_mnf, "I",
173	    "has monitor notification facilities");
174
175	br_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
176	    "br", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
177	if (br_tree != NULL) {
178		/*
179		 * Create sysctl tree for RX bufring.
180		 */
181		vmbus_br_sysctl_create(ctx, br_tree, &chan->ch_rxbr.rxbr, "rx");
182		/*
183		 * Create sysctl tree for TX bufring.
184		 */
185		vmbus_br_sysctl_create(ctx, br_tree, &chan->ch_txbr.txbr, "tx");
186	}
187}
188
189int
190vmbus_chan_open(struct vmbus_channel *chan, int txbr_size, int rxbr_size,
191    const void *udata, int udlen, vmbus_chan_callback_t cb, void *cbarg)
192{
193	struct vmbus_softc *sc = chan->ch_vmbus;
194	const struct vmbus_chanmsg_chopen_resp *resp;
195	const struct vmbus_message *msg;
196	struct vmbus_chanmsg_chopen *req;
197	struct vmbus_msghc *mh;
198	uint32_t status;
199	int error;
200	uint8_t *br;
201
202	if (udlen > VMBUS_CHANMSG_CHOPEN_UDATA_SIZE) {
203		device_printf(sc->vmbus_dev,
204		    "invalid udata len %d for chan%u\n", udlen, chan->ch_id);
205		return EINVAL;
206	}
207	KASSERT((txbr_size & PAGE_MASK) == 0,
208	    ("send bufring size is not multiple page"));
209	KASSERT((rxbr_size & PAGE_MASK) == 0,
210	    ("recv bufring size is not multiple page"));
211
212	if (atomic_testandset_int(&chan->ch_stflags,
213	    VMBUS_CHAN_ST_OPENED_SHIFT))
214		panic("double-open chan%u", chan->ch_id);
215
216	chan->ch_cb = cb;
217	chan->ch_cbarg = cbarg;
218
219	vmbus_chan_update_evtflagcnt(sc, chan);
220
221	chan->ch_tq = VMBUS_PCPU_GET(chan->ch_vmbus, event_tq, chan->ch_cpuid);
222	if (chan->ch_flags & VMBUS_CHAN_FLAG_BATCHREAD)
223		TASK_INIT(&chan->ch_task, 0, vmbus_chan_task, chan);
224	else
225		TASK_INIT(&chan->ch_task, 0, vmbus_chan_task_nobatch, chan);
226
227	/*
228	 * Allocate the TX+RX bufrings.
229	 * XXX should use ch_dev dtag
230	 */
231	br = hyperv_dmamem_alloc(bus_get_dma_tag(sc->vmbus_dev),
232	    PAGE_SIZE, 0, txbr_size + rxbr_size, &chan->ch_bufring_dma,
233	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
234	if (br == NULL) {
235		device_printf(sc->vmbus_dev, "bufring allocation failed\n");
236		error = ENOMEM;
237		goto failed;
238	}
239	chan->ch_bufring = br;
240
241	/* TX bufring comes first */
242	vmbus_txbr_setup(&chan->ch_txbr, br, txbr_size);
243	/* RX bufring immediately follows TX bufring */
244	vmbus_rxbr_setup(&chan->ch_rxbr, br + txbr_size, rxbr_size);
245
246	/* Create sysctl tree for this channel */
247	vmbus_chan_sysctl_create(chan);
248
249	/*
250	 * Connect the bufrings, both RX and TX, to this channel.
251	 */
252	error = vmbus_chan_gpadl_connect(chan, chan->ch_bufring_dma.hv_paddr,
253	    txbr_size + rxbr_size, &chan->ch_bufring_gpadl);
254	if (error) {
255		device_printf(sc->vmbus_dev,
256		    "failed to connect bufring GPADL to chan%u\n", chan->ch_id);
257		goto failed;
258	}
259
260	/*
261	 * Open channel w/ the bufring GPADL on the target CPU.
262	 */
263	mh = vmbus_msghc_get(sc, sizeof(*req));
264	if (mh == NULL) {
265		device_printf(sc->vmbus_dev,
266		    "can not get msg hypercall for chopen(chan%u)\n",
267		    chan->ch_id);
268		error = ENXIO;
269		goto failed;
270	}
271
272	req = vmbus_msghc_dataptr(mh);
273	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHOPEN;
274	req->chm_chanid = chan->ch_id;
275	req->chm_openid = chan->ch_id;
276	req->chm_gpadl = chan->ch_bufring_gpadl;
277	req->chm_vcpuid = chan->ch_vcpuid;
278	req->chm_txbr_pgcnt = txbr_size >> PAGE_SHIFT;
279	if (udlen > 0)
280		memcpy(req->chm_udata, udata, udlen);
281
282	error = vmbus_msghc_exec(sc, mh);
283	if (error) {
284		device_printf(sc->vmbus_dev,
285		    "chopen(chan%u) msg hypercall exec failed: %d\n",
286		    chan->ch_id, error);
287		vmbus_msghc_put(sc, mh);
288		goto failed;
289	}
290
291	msg = vmbus_msghc_wait_result(sc, mh);
292	resp = (const struct vmbus_chanmsg_chopen_resp *)msg->msg_data;
293	status = resp->chm_status;
294
295	vmbus_msghc_put(sc, mh);
296
297	if (status == 0) {
298		if (bootverbose) {
299			device_printf(sc->vmbus_dev, "chan%u opened\n",
300			    chan->ch_id);
301		}
302		return 0;
303	}
304
305	device_printf(sc->vmbus_dev, "failed to open chan%u\n", chan->ch_id);
306	error = ENXIO;
307
308failed:
309	if (chan->ch_bufring_gpadl) {
310		vmbus_chan_gpadl_disconnect(chan, chan->ch_bufring_gpadl);
311		chan->ch_bufring_gpadl = 0;
312	}
313	if (chan->ch_bufring != NULL) {
314		hyperv_dmamem_free(&chan->ch_bufring_dma, chan->ch_bufring);
315		chan->ch_bufring = NULL;
316	}
317	atomic_clear_int(&chan->ch_stflags, VMBUS_CHAN_ST_OPENED);
318	return error;
319}
320
321int
322vmbus_chan_gpadl_connect(struct vmbus_channel *chan, bus_addr_t paddr,
323    int size, uint32_t *gpadl0)
324{
325	struct vmbus_softc *sc = chan->ch_vmbus;
326	struct vmbus_msghc *mh;
327	struct vmbus_chanmsg_gpadl_conn *req;
328	const struct vmbus_message *msg;
329	size_t reqsz;
330	uint32_t gpadl, status;
331	int page_count, range_len, i, cnt, error;
332	uint64_t page_id;
333
334	/*
335	 * Preliminary checks.
336	 */
337
338	KASSERT((size & PAGE_MASK) == 0,
339	    ("invalid GPA size %d, not multiple page size", size));
340	page_count = size >> PAGE_SHIFT;
341
342	KASSERT((paddr & PAGE_MASK) == 0,
343	    ("GPA is not page aligned %jx", (uintmax_t)paddr));
344	page_id = paddr >> PAGE_SHIFT;
345
346	range_len = __offsetof(struct vmbus_gpa_range, gpa_page[page_count]);
347	/*
348	 * We don't support multiple GPA ranges.
349	 */
350	if (range_len > UINT16_MAX) {
351		device_printf(sc->vmbus_dev, "GPA too large, %d pages\n",
352		    page_count);
353		return EOPNOTSUPP;
354	}
355
356	/*
357	 * Allocate GPADL id.
358	 */
359	gpadl = vmbus_gpadl_alloc(sc);
360	*gpadl0 = gpadl;
361
362	/*
363	 * Connect this GPADL to the target channel.
364	 *
365	 * NOTE:
366	 * Since each message can only hold small set of page
367	 * addresses, several messages may be required to
368	 * complete the connection.
369	 */
370	if (page_count > VMBUS_CHANMSG_GPADL_CONN_PGMAX)
371		cnt = VMBUS_CHANMSG_GPADL_CONN_PGMAX;
372	else
373		cnt = page_count;
374	page_count -= cnt;
375
376	reqsz = __offsetof(struct vmbus_chanmsg_gpadl_conn,
377	    chm_range.gpa_page[cnt]);
378	mh = vmbus_msghc_get(sc, reqsz);
379	if (mh == NULL) {
380		device_printf(sc->vmbus_dev,
381		    "can not get msg hypercall for gpadl->chan%u\n",
382		    chan->ch_id);
383		return EIO;
384	}
385
386	req = vmbus_msghc_dataptr(mh);
387	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_CONN;
388	req->chm_chanid = chan->ch_id;
389	req->chm_gpadl = gpadl;
390	req->chm_range_len = range_len;
391	req->chm_range_cnt = 1;
392	req->chm_range.gpa_len = size;
393	req->chm_range.gpa_ofs = 0;
394	for (i = 0; i < cnt; ++i)
395		req->chm_range.gpa_page[i] = page_id++;
396
397	error = vmbus_msghc_exec(sc, mh);
398	if (error) {
399		device_printf(sc->vmbus_dev,
400		    "gpadl->chan%u msg hypercall exec failed: %d\n",
401		    chan->ch_id, error);
402		vmbus_msghc_put(sc, mh);
403		return error;
404	}
405
406	while (page_count > 0) {
407		struct vmbus_chanmsg_gpadl_subconn *subreq;
408
409		if (page_count > VMBUS_CHANMSG_GPADL_SUBCONN_PGMAX)
410			cnt = VMBUS_CHANMSG_GPADL_SUBCONN_PGMAX;
411		else
412			cnt = page_count;
413		page_count -= cnt;
414
415		reqsz = __offsetof(struct vmbus_chanmsg_gpadl_subconn,
416		    chm_gpa_page[cnt]);
417		vmbus_msghc_reset(mh, reqsz);
418
419		subreq = vmbus_msghc_dataptr(mh);
420		subreq->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_SUBCONN;
421		subreq->chm_gpadl = gpadl;
422		for (i = 0; i < cnt; ++i)
423			subreq->chm_gpa_page[i] = page_id++;
424
425		vmbus_msghc_exec_noresult(mh);
426	}
427	KASSERT(page_count == 0, ("invalid page count %d", page_count));
428
429	msg = vmbus_msghc_wait_result(sc, mh);
430	status = ((const struct vmbus_chanmsg_gpadl_connresp *)
431	    msg->msg_data)->chm_status;
432
433	vmbus_msghc_put(sc, mh);
434
435	if (status != 0) {
436		device_printf(sc->vmbus_dev, "gpadl->chan%u failed: "
437		    "status %u\n", chan->ch_id, status);
438		return EIO;
439	} else {
440		if (bootverbose) {
441			device_printf(sc->vmbus_dev, "gpadl->chan%u "
442			    "succeeded\n", chan->ch_id);
443		}
444	}
445	return 0;
446}
447
448/*
449 * Disconnect the GPA from the target channel
450 */
451int
452vmbus_chan_gpadl_disconnect(struct vmbus_channel *chan, uint32_t gpadl)
453{
454	struct vmbus_softc *sc = chan->ch_vmbus;
455	struct vmbus_msghc *mh;
456	struct vmbus_chanmsg_gpadl_disconn *req;
457	int error;
458
459	mh = vmbus_msghc_get(sc, sizeof(*req));
460	if (mh == NULL) {
461		device_printf(sc->vmbus_dev,
462		    "can not get msg hypercall for gpa x->chan%u\n",
463		    chan->ch_id);
464		return EBUSY;
465	}
466
467	req = vmbus_msghc_dataptr(mh);
468	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_DISCONN;
469	req->chm_chanid = chan->ch_id;
470	req->chm_gpadl = gpadl;
471
472	error = vmbus_msghc_exec(sc, mh);
473	if (error) {
474		device_printf(sc->vmbus_dev,
475		    "gpa x->chan%u msg hypercall exec failed: %d\n",
476		    chan->ch_id, error);
477		vmbus_msghc_put(sc, mh);
478		return error;
479	}
480
481	vmbus_msghc_wait_result(sc, mh);
482	/* Discard result; no useful information */
483	vmbus_msghc_put(sc, mh);
484
485	return 0;
486}
487
488static void
489vmbus_chan_close_internal(struct vmbus_channel *chan)
490{
491	struct vmbus_softc *sc = chan->ch_vmbus;
492	struct vmbus_msghc *mh;
493	struct vmbus_chanmsg_chclose *req;
494	struct taskqueue *tq = chan->ch_tq;
495	int error;
496
497	/* TODO: stringent check */
498	atomic_clear_int(&chan->ch_stflags, VMBUS_CHAN_ST_OPENED);
499
500	/*
501	 * Free this channel's sysctl tree attached to its device's
502	 * sysctl tree.
503	 */
504	sysctl_ctx_free(&chan->ch_sysctl_ctx);
505
506	/*
507	 * Set ch_tq to NULL to avoid more requests be scheduled.
508	 * XXX pretty broken; need rework.
509	 */
510	chan->ch_tq = NULL;
511	taskqueue_drain(tq, &chan->ch_task);
512	chan->ch_cb = NULL;
513
514	/*
515	 * Close this channel.
516	 */
517	mh = vmbus_msghc_get(sc, sizeof(*req));
518	if (mh == NULL) {
519		device_printf(sc->vmbus_dev,
520		    "can not get msg hypercall for chclose(chan%u)\n",
521		    chan->ch_id);
522		return;
523	}
524
525	req = vmbus_msghc_dataptr(mh);
526	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHCLOSE;
527	req->chm_chanid = chan->ch_id;
528
529	error = vmbus_msghc_exec_noresult(mh);
530	vmbus_msghc_put(sc, mh);
531
532	if (error) {
533		device_printf(sc->vmbus_dev,
534		    "chclose(chan%u) msg hypercall exec failed: %d\n",
535		    chan->ch_id, error);
536		return;
537	} else if (bootverbose) {
538		device_printf(sc->vmbus_dev, "close chan%u\n", chan->ch_id);
539	}
540
541	/*
542	 * Disconnect the TX+RX bufrings from this channel.
543	 */
544	if (chan->ch_bufring_gpadl) {
545		vmbus_chan_gpadl_disconnect(chan, chan->ch_bufring_gpadl);
546		chan->ch_bufring_gpadl = 0;
547	}
548
549	/*
550	 * Destroy the TX+RX bufrings.
551	 */
552	if (chan->ch_bufring != NULL) {
553		hyperv_dmamem_free(&chan->ch_bufring_dma, chan->ch_bufring);
554		chan->ch_bufring = NULL;
555	}
556}
557
558/*
559 * Caller should make sure that all sub-channels have
560 * been added to 'chan' and all to-be-closed channels
561 * are not being opened.
562 */
563void
564vmbus_chan_close(struct vmbus_channel *chan)
565{
566	int subchan_cnt;
567
568	if (!VMBUS_CHAN_ISPRIMARY(chan)) {
569		/*
570		 * Sub-channel is closed when its primary channel
571		 * is closed; done.
572		 */
573		return;
574	}
575
576	/*
577	 * Close all sub-channels, if any.
578	 */
579	subchan_cnt = chan->ch_subchan_cnt;
580	if (subchan_cnt > 0) {
581		struct vmbus_channel **subchan;
582		int i;
583
584		subchan = vmbus_subchan_get(chan, subchan_cnt);
585		for (i = 0; i < subchan_cnt; ++i)
586			vmbus_chan_close_internal(subchan[i]);
587		vmbus_subchan_rel(subchan, subchan_cnt);
588	}
589
590	/* Then close the primary channel. */
591	vmbus_chan_close_internal(chan);
592}
593
594int
595vmbus_chan_send(struct vmbus_channel *chan, uint16_t type, uint16_t flags,
596    void *data, int dlen, uint64_t xactid)
597{
598	struct vmbus_chanpkt pkt;
599	int pktlen, pad_pktlen, hlen, error;
600	uint64_t pad = 0;
601	struct iovec iov[3];
602	boolean_t send_evt;
603
604	hlen = sizeof(pkt);
605	pktlen = hlen + dlen;
606	pad_pktlen = VMBUS_CHANPKT_TOTLEN(pktlen);
607
608	pkt.cp_hdr.cph_type = type;
609	pkt.cp_hdr.cph_flags = flags;
610	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_hlen, hlen);
611	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_tlen, pad_pktlen);
612	pkt.cp_hdr.cph_xactid = xactid;
613
614	iov[0].iov_base = &pkt;
615	iov[0].iov_len = hlen;
616	iov[1].iov_base = data;
617	iov[1].iov_len = dlen;
618	iov[2].iov_base = &pad;
619	iov[2].iov_len = pad_pktlen - pktlen;
620
621	error = vmbus_txbr_write(&chan->ch_txbr, iov, 3, &send_evt);
622	if (!error && send_evt)
623		vmbus_chan_signal_tx(chan);
624	return error;
625}
626
627int
628vmbus_chan_send_sglist(struct vmbus_channel *chan,
629    struct vmbus_gpa sg[], int sglen, void *data, int dlen, uint64_t xactid)
630{
631	struct vmbus_chanpkt_sglist pkt;
632	int pktlen, pad_pktlen, hlen, error;
633	struct iovec iov[4];
634	boolean_t send_evt;
635	uint64_t pad = 0;
636
637	KASSERT(sglen < VMBUS_CHAN_SGLIST_MAX,
638	    ("invalid sglist len %d", sglen));
639
640	hlen = __offsetof(struct vmbus_chanpkt_sglist, cp_gpa[sglen]);
641	pktlen = hlen + dlen;
642	pad_pktlen = VMBUS_CHANPKT_TOTLEN(pktlen);
643
644	pkt.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA;
645	pkt.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC;
646	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_hlen, hlen);
647	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_tlen, pad_pktlen);
648	pkt.cp_hdr.cph_xactid = xactid;
649	pkt.cp_rsvd = 0;
650	pkt.cp_gpa_cnt = sglen;
651
652	iov[0].iov_base = &pkt;
653	iov[0].iov_len = sizeof(pkt);
654	iov[1].iov_base = sg;
655	iov[1].iov_len = sizeof(struct vmbus_gpa) * sglen;
656	iov[2].iov_base = data;
657	iov[2].iov_len = dlen;
658	iov[3].iov_base = &pad;
659	iov[3].iov_len = pad_pktlen - pktlen;
660
661	error = vmbus_txbr_write(&chan->ch_txbr, iov, 4, &send_evt);
662	if (!error && send_evt)
663		vmbus_chan_signal_tx(chan);
664	return error;
665}
666
667int
668vmbus_chan_send_prplist(struct vmbus_channel *chan,
669    struct vmbus_gpa_range *prp, int prp_cnt, void *data, int dlen,
670    uint64_t xactid)
671{
672	struct vmbus_chanpkt_prplist pkt;
673	int pktlen, pad_pktlen, hlen, error;
674	struct iovec iov[4];
675	boolean_t send_evt;
676	uint64_t pad = 0;
677
678	KASSERT(prp_cnt < VMBUS_CHAN_PRPLIST_MAX,
679	    ("invalid prplist entry count %d", prp_cnt));
680
681	hlen = __offsetof(struct vmbus_chanpkt_prplist,
682	    cp_range[0].gpa_page[prp_cnt]);
683	pktlen = hlen + dlen;
684	pad_pktlen = VMBUS_CHANPKT_TOTLEN(pktlen);
685
686	pkt.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA;
687	pkt.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC;
688	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_hlen, hlen);
689	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_tlen, pad_pktlen);
690	pkt.cp_hdr.cph_xactid = xactid;
691	pkt.cp_rsvd = 0;
692	pkt.cp_range_cnt = 1;
693
694	iov[0].iov_base = &pkt;
695	iov[0].iov_len = sizeof(pkt);
696	iov[1].iov_base = prp;
697	iov[1].iov_len = __offsetof(struct vmbus_gpa_range, gpa_page[prp_cnt]);
698	iov[2].iov_base = data;
699	iov[2].iov_len = dlen;
700	iov[3].iov_base = &pad;
701	iov[3].iov_len = pad_pktlen - pktlen;
702
703	error = vmbus_txbr_write(&chan->ch_txbr, iov, 4, &send_evt);
704	if (!error && send_evt)
705		vmbus_chan_signal_tx(chan);
706	return error;
707}
708
709int
710vmbus_chan_recv(struct vmbus_channel *chan, void *data, int *dlen0,
711    uint64_t *xactid)
712{
713	struct vmbus_chanpkt_hdr pkt;
714	int error, dlen, hlen;
715
716	error = vmbus_rxbr_peek(&chan->ch_rxbr, &pkt, sizeof(pkt));
717	if (error)
718		return error;
719
720	hlen = VMBUS_CHANPKT_GETLEN(pkt.cph_hlen);
721	dlen = VMBUS_CHANPKT_GETLEN(pkt.cph_tlen) - hlen;
722
723	if (*dlen0 < dlen) {
724		/* Return the size of this packet's data. */
725		*dlen0 = dlen;
726		return ENOBUFS;
727	}
728
729	*xactid = pkt.cph_xactid;
730	*dlen0 = dlen;
731
732	/* Skip packet header */
733	error = vmbus_rxbr_read(&chan->ch_rxbr, data, dlen, hlen);
734	KASSERT(!error, ("vmbus_rxbr_read failed"));
735
736	return 0;
737}
738
739int
740vmbus_chan_recv_pkt(struct vmbus_channel *chan,
741    struct vmbus_chanpkt_hdr *pkt0, int *pktlen0)
742{
743	struct vmbus_chanpkt_hdr pkt;
744	int error, pktlen;
745
746	error = vmbus_rxbr_peek(&chan->ch_rxbr, &pkt, sizeof(pkt));
747	if (error)
748		return error;
749
750	pktlen = VMBUS_CHANPKT_GETLEN(pkt.cph_tlen);
751	if (*pktlen0 < pktlen) {
752		/* Return the size of this packet. */
753		*pktlen0 = pktlen;
754		return ENOBUFS;
755	}
756	*pktlen0 = pktlen;
757
758	/* Include packet header */
759	error = vmbus_rxbr_read(&chan->ch_rxbr, pkt0, pktlen, 0);
760	KASSERT(!error, ("vmbus_rxbr_read failed"));
761
762	return 0;
763}
764
765static void
766vmbus_chan_task(void *xchan, int pending __unused)
767{
768	struct vmbus_channel *chan = xchan;
769	vmbus_chan_callback_t cb = chan->ch_cb;
770	void *cbarg = chan->ch_cbarg;
771
772	/*
773	 * Optimize host to guest signaling by ensuring:
774	 * 1. While reading the channel, we disable interrupts from
775	 *    host.
776	 * 2. Ensure that we process all posted messages from the host
777	 *    before returning from this callback.
778	 * 3. Once we return, enable signaling from the host. Once this
779	 *    state is set we check to see if additional packets are
780	 *    available to read. In this case we repeat the process.
781	 *
782	 * NOTE: Interrupt has been disabled in the ISR.
783	 */
784	for (;;) {
785		uint32_t left;
786
787		cb(chan, cbarg);
788
789		left = vmbus_rxbr_intr_unmask(&chan->ch_rxbr);
790		if (left == 0) {
791			/* No more data in RX bufring; done */
792			break;
793		}
794		vmbus_rxbr_intr_mask(&chan->ch_rxbr);
795	}
796}
797
798static void
799vmbus_chan_task_nobatch(void *xchan, int pending __unused)
800{
801	struct vmbus_channel *chan = xchan;
802
803	chan->ch_cb(chan, chan->ch_cbarg);
804}
805
806static __inline void
807vmbus_event_flags_proc(struct vmbus_softc *sc, volatile u_long *event_flags,
808    int flag_cnt)
809{
810	int f;
811
812	for (f = 0; f < flag_cnt; ++f) {
813		uint32_t chid_base;
814		u_long flags;
815		int chid_ofs;
816
817		if (event_flags[f] == 0)
818			continue;
819
820		flags = atomic_swap_long(&event_flags[f], 0);
821		chid_base = f << VMBUS_EVTFLAG_SHIFT;
822
823		while ((chid_ofs = ffsl(flags)) != 0) {
824			struct vmbus_channel *chan;
825
826			--chid_ofs; /* NOTE: ffsl is 1-based */
827			flags &= ~(1UL << chid_ofs);
828
829			chan = sc->vmbus_chmap[chid_base + chid_ofs];
830
831			/* if channel is closed or closing */
832			if (chan == NULL || chan->ch_tq == NULL)
833				continue;
834
835			if (chan->ch_flags & VMBUS_CHAN_FLAG_BATCHREAD)
836				vmbus_rxbr_intr_mask(&chan->ch_rxbr);
837			taskqueue_enqueue(chan->ch_tq, &chan->ch_task);
838		}
839	}
840}
841
842void
843vmbus_event_proc(struct vmbus_softc *sc, int cpu)
844{
845	struct vmbus_evtflags *eventf;
846
847	/*
848	 * On Host with Win8 or above, the event page can be checked directly
849	 * to get the id of the channel that has the pending interrupt.
850	 */
851	eventf = VMBUS_PCPU_GET(sc, event_flags, cpu) + VMBUS_SINT_MESSAGE;
852	vmbus_event_flags_proc(sc, eventf->evt_flags,
853	    VMBUS_PCPU_GET(sc, event_flags_cnt, cpu));
854}
855
856void
857vmbus_event_proc_compat(struct vmbus_softc *sc, int cpu)
858{
859	struct vmbus_evtflags *eventf;
860
861	eventf = VMBUS_PCPU_GET(sc, event_flags, cpu) + VMBUS_SINT_MESSAGE;
862	if (atomic_testandclear_long(&eventf->evt_flags[0], 0)) {
863		vmbus_event_flags_proc(sc, sc->vmbus_rx_evtflags,
864		    VMBUS_CHAN_MAX_COMPAT >> VMBUS_EVTFLAG_SHIFT);
865	}
866}
867
868static void
869vmbus_chan_update_evtflagcnt(struct vmbus_softc *sc,
870    const struct vmbus_channel *chan)
871{
872	volatile int *flag_cnt_ptr;
873	int flag_cnt;
874
875	flag_cnt = (chan->ch_id / VMBUS_EVTFLAG_LEN) + 1;
876	flag_cnt_ptr = VMBUS_PCPU_PTR(sc, event_flags_cnt, chan->ch_cpuid);
877
878	for (;;) {
879		int old_flag_cnt;
880
881		old_flag_cnt = *flag_cnt_ptr;
882		if (old_flag_cnt >= flag_cnt)
883			break;
884		if (atomic_cmpset_int(flag_cnt_ptr, old_flag_cnt, flag_cnt)) {
885			if (bootverbose) {
886				device_printf(sc->vmbus_dev,
887				    "channel%u update cpu%d flag_cnt to %d\n",
888				    chan->ch_id, chan->ch_cpuid, flag_cnt);
889			}
890			break;
891		}
892	}
893}
894
895static struct vmbus_channel *
896vmbus_chan_alloc(struct vmbus_softc *sc)
897{
898	struct vmbus_channel *chan;
899
900	chan = malloc(sizeof(*chan), M_DEVBUF, M_WAITOK | M_ZERO);
901
902	chan->ch_monprm = hyperv_dmamem_alloc(bus_get_dma_tag(sc->vmbus_dev),
903	    HYPERCALL_PARAM_ALIGN, 0, sizeof(struct hyperv_mon_param),
904	    &chan->ch_monprm_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO);
905	if (chan->ch_monprm == NULL) {
906		device_printf(sc->vmbus_dev, "monprm alloc failed\n");
907		free(chan, M_DEVBUF);
908		return NULL;
909	}
910
911	chan->ch_vmbus = sc;
912	mtx_init(&chan->ch_subchan_lock, "vmbus subchan", NULL, MTX_DEF);
913	TAILQ_INIT(&chan->ch_subchans);
914	TASK_INIT(&chan->ch_detach_task, 0, vmbus_chan_detach_task, chan);
915	vmbus_rxbr_init(&chan->ch_rxbr);
916	vmbus_txbr_init(&chan->ch_txbr);
917
918	return chan;
919}
920
921static void
922vmbus_chan_free(struct vmbus_channel *chan)
923{
924	/* TODO: assert sub-channel list is empty */
925	/* TODO: asset no longer on the primary channel's sub-channel list */
926	/* TODO: asset no longer on the vmbus channel list */
927	hyperv_dmamem_free(&chan->ch_monprm_dma, chan->ch_monprm);
928	mtx_destroy(&chan->ch_subchan_lock);
929	vmbus_rxbr_deinit(&chan->ch_rxbr);
930	vmbus_txbr_deinit(&chan->ch_txbr);
931	free(chan, M_DEVBUF);
932}
933
934static int
935vmbus_chan_add(struct vmbus_channel *newchan)
936{
937	struct vmbus_softc *sc = newchan->ch_vmbus;
938	struct vmbus_channel *prichan;
939
940	if (newchan->ch_id == 0) {
941		/*
942		 * XXX
943		 * Chan0 will neither be processed nor should be offered;
944		 * skip it.
945		 */
946		device_printf(sc->vmbus_dev, "got chan0 offer, discard\n");
947		return EINVAL;
948	} else if (newchan->ch_id >= VMBUS_CHAN_MAX) {
949		device_printf(sc->vmbus_dev, "invalid chan%u offer\n",
950		    newchan->ch_id);
951		return EINVAL;
952	}
953	sc->vmbus_chmap[newchan->ch_id] = newchan;
954
955	if (bootverbose) {
956		device_printf(sc->vmbus_dev, "chan%u subidx%u offer\n",
957		    newchan->ch_id, newchan->ch_subidx);
958	}
959
960	mtx_lock(&sc->vmbus_prichan_lock);
961	TAILQ_FOREACH(prichan, &sc->vmbus_prichans, ch_prilink) {
962		/*
963		 * Sub-channel will have the same type GUID and instance
964		 * GUID as its primary channel.
965		 */
966		if (memcmp(&prichan->ch_guid_type, &newchan->ch_guid_type,
967		    sizeof(struct hyperv_guid)) == 0 &&
968		    memcmp(&prichan->ch_guid_inst, &newchan->ch_guid_inst,
969		    sizeof(struct hyperv_guid)) == 0)
970			break;
971	}
972	if (VMBUS_CHAN_ISPRIMARY(newchan)) {
973		if (prichan == NULL) {
974			/* Install the new primary channel */
975			TAILQ_INSERT_TAIL(&sc->vmbus_prichans, newchan,
976			    ch_prilink);
977			mtx_unlock(&sc->vmbus_prichan_lock);
978			return 0;
979		} else {
980			mtx_unlock(&sc->vmbus_prichan_lock);
981			device_printf(sc->vmbus_dev, "duplicated primary "
982			    "chan%u\n", newchan->ch_id);
983			return EINVAL;
984		}
985	} else { /* Sub-channel */
986		if (prichan == NULL) {
987			mtx_unlock(&sc->vmbus_prichan_lock);
988			device_printf(sc->vmbus_dev, "no primary chan for "
989			    "chan%u\n", newchan->ch_id);
990			return EINVAL;
991		}
992		/*
993		 * Found the primary channel for this sub-channel and
994		 * move on.
995		 *
996		 * XXX refcnt prichan
997		 */
998	}
999	mtx_unlock(&sc->vmbus_prichan_lock);
1000
1001	/*
1002	 * This is a sub-channel; link it with the primary channel.
1003	 */
1004	KASSERT(!VMBUS_CHAN_ISPRIMARY(newchan),
1005	    ("new channel is not sub-channel"));
1006	KASSERT(prichan != NULL, ("no primary channel"));
1007
1008	newchan->ch_prichan = prichan;
1009	newchan->ch_dev = prichan->ch_dev;
1010
1011	mtx_lock(&prichan->ch_subchan_lock);
1012	TAILQ_INSERT_TAIL(&prichan->ch_subchans, newchan, ch_sublink);
1013	/*
1014	 * Bump up sub-channel count and notify anyone that is
1015	 * interested in this sub-channel, after this sub-channel
1016	 * is setup.
1017	 */
1018	prichan->ch_subchan_cnt++;
1019	mtx_unlock(&prichan->ch_subchan_lock);
1020	wakeup(prichan);
1021
1022	return 0;
1023}
1024
1025void
1026vmbus_chan_cpu_set(struct vmbus_channel *chan, int cpu)
1027{
1028	KASSERT(cpu >= 0 && cpu < mp_ncpus, ("invalid cpu %d", cpu));
1029
1030	if (chan->ch_vmbus->vmbus_version == VMBUS_VERSION_WS2008 ||
1031	    chan->ch_vmbus->vmbus_version == VMBUS_VERSION_WIN7) {
1032		/* Only cpu0 is supported */
1033		cpu = 0;
1034	}
1035
1036	chan->ch_cpuid = cpu;
1037	chan->ch_vcpuid = VMBUS_PCPU_GET(chan->ch_vmbus, vcpuid, cpu);
1038
1039	if (bootverbose) {
1040		printf("vmbus_chan%u: assigned to cpu%u [vcpu%u]\n",
1041		    chan->ch_id, chan->ch_cpuid, chan->ch_vcpuid);
1042	}
1043}
1044
1045void
1046vmbus_chan_cpu_rr(struct vmbus_channel *chan)
1047{
1048	static uint32_t vmbus_chan_nextcpu;
1049	int cpu;
1050
1051	cpu = atomic_fetchadd_int(&vmbus_chan_nextcpu, 1) % mp_ncpus;
1052	vmbus_chan_cpu_set(chan, cpu);
1053}
1054
1055static void
1056vmbus_chan_cpu_default(struct vmbus_channel *chan)
1057{
1058	/*
1059	 * By default, pin the channel to cpu0.  Devices having
1060	 * special channel-cpu mapping requirement should call
1061	 * vmbus_chan_cpu_{set,rr}().
1062	 */
1063	vmbus_chan_cpu_set(chan, 0);
1064}
1065
1066static void
1067vmbus_chan_msgproc_choffer(struct vmbus_softc *sc,
1068    const struct vmbus_message *msg)
1069{
1070	const struct vmbus_chanmsg_choffer *offer;
1071	struct vmbus_channel *chan;
1072	int error;
1073
1074	offer = (const struct vmbus_chanmsg_choffer *)msg->msg_data;
1075
1076	chan = vmbus_chan_alloc(sc);
1077	if (chan == NULL) {
1078		device_printf(sc->vmbus_dev, "allocate chan%u failed\n",
1079		    offer->chm_chanid);
1080		return;
1081	}
1082
1083	chan->ch_id = offer->chm_chanid;
1084	chan->ch_subidx = offer->chm_subidx;
1085	chan->ch_guid_type = offer->chm_chtype;
1086	chan->ch_guid_inst = offer->chm_chinst;
1087
1088	/* Batch reading is on by default */
1089	chan->ch_flags |= VMBUS_CHAN_FLAG_BATCHREAD;
1090
1091	chan->ch_monprm->mp_connid = VMBUS_CONNID_EVENT;
1092	if (sc->vmbus_version != VMBUS_VERSION_WS2008)
1093		chan->ch_monprm->mp_connid = offer->chm_connid;
1094
1095	if (offer->chm_flags1 & VMBUS_CHOFFER_FLAG1_HASMNF) {
1096		int trig_idx;
1097
1098		/*
1099		 * Setup MNF stuffs.
1100		 */
1101		chan->ch_txflags |= VMBUS_CHAN_TXF_HASMNF;
1102
1103		trig_idx = offer->chm_montrig / VMBUS_MONTRIG_LEN;
1104		if (trig_idx >= VMBUS_MONTRIGS_MAX)
1105			panic("invalid monitor trigger %u", offer->chm_montrig);
1106		chan->ch_montrig =
1107		    &sc->vmbus_mnf2->mnf_trigs[trig_idx].mt_pending;
1108
1109		chan->ch_montrig_mask =
1110		    1 << (offer->chm_montrig % VMBUS_MONTRIG_LEN);
1111	}
1112
1113	/*
1114	 * Setup event flag.
1115	 */
1116	chan->ch_evtflag =
1117	    &sc->vmbus_tx_evtflags[chan->ch_id >> VMBUS_EVTFLAG_SHIFT];
1118	chan->ch_evtflag_mask = 1UL << (chan->ch_id & VMBUS_EVTFLAG_MASK);
1119
1120	/* Select default cpu for this channel. */
1121	vmbus_chan_cpu_default(chan);
1122
1123	error = vmbus_chan_add(chan);
1124	if (error) {
1125		device_printf(sc->vmbus_dev, "add chan%u failed: %d\n",
1126		    chan->ch_id, error);
1127		vmbus_chan_free(chan);
1128		return;
1129	}
1130
1131	if (VMBUS_CHAN_ISPRIMARY(chan)) {
1132		/*
1133		 * Add device for this primary channel.
1134		 *
1135		 * NOTE:
1136		 * Error is ignored here; don't have much to do if error
1137		 * really happens.
1138		 */
1139		vmbus_add_child(chan);
1140	}
1141}
1142
1143/*
1144 * XXX pretty broken; need rework.
1145 */
1146static void
1147vmbus_chan_msgproc_chrescind(struct vmbus_softc *sc,
1148    const struct vmbus_message *msg)
1149{
1150	const struct vmbus_chanmsg_chrescind *note;
1151	struct vmbus_channel *chan;
1152
1153	note = (const struct vmbus_chanmsg_chrescind *)msg->msg_data;
1154	if (note->chm_chanid > VMBUS_CHAN_MAX) {
1155		device_printf(sc->vmbus_dev, "invalid rescinded chan%u\n",
1156		    note->chm_chanid);
1157		return;
1158	}
1159
1160	if (bootverbose) {
1161		device_printf(sc->vmbus_dev, "chan%u rescinded\n",
1162		    note->chm_chanid);
1163	}
1164
1165	chan = sc->vmbus_chmap[note->chm_chanid];
1166	if (chan == NULL)
1167		return;
1168	sc->vmbus_chmap[note->chm_chanid] = NULL;
1169
1170	taskqueue_enqueue(taskqueue_thread, &chan->ch_detach_task);
1171}
1172
1173static void
1174vmbus_chan_detach_task(void *xchan, int pending __unused)
1175{
1176	struct vmbus_channel *chan = xchan;
1177
1178	if (VMBUS_CHAN_ISPRIMARY(chan)) {
1179		/* Only primary channel owns the device */
1180		vmbus_delete_child(chan);
1181		/* NOTE: DO NOT free primary channel for now */
1182	} else {
1183		struct vmbus_softc *sc = chan->ch_vmbus;
1184		struct vmbus_channel *pri_chan = chan->ch_prichan;
1185		struct vmbus_chanmsg_chfree *req;
1186		struct vmbus_msghc *mh;
1187		int error;
1188
1189		mh = vmbus_msghc_get(sc, sizeof(*req));
1190		if (mh == NULL) {
1191			device_printf(sc->vmbus_dev,
1192			    "can not get msg hypercall for chfree(chan%u)\n",
1193			    chan->ch_id);
1194			goto remove;
1195		}
1196
1197		req = vmbus_msghc_dataptr(mh);
1198		req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHFREE;
1199		req->chm_chanid = chan->ch_id;
1200
1201		error = vmbus_msghc_exec_noresult(mh);
1202		vmbus_msghc_put(sc, mh);
1203
1204		if (error) {
1205			device_printf(sc->vmbus_dev,
1206			    "chfree(chan%u) failed: %d",
1207			    chan->ch_id, error);
1208			/* NOTE: Move on! */
1209		} else {
1210			if (bootverbose) {
1211				device_printf(sc->vmbus_dev, "chan%u freed\n",
1212				    chan->ch_id);
1213			}
1214		}
1215remove:
1216		mtx_lock(&pri_chan->ch_subchan_lock);
1217		TAILQ_REMOVE(&pri_chan->ch_subchans, chan, ch_sublink);
1218		KASSERT(pri_chan->ch_subchan_cnt > 0,
1219		    ("invalid subchan_cnt %d", pri_chan->ch_subchan_cnt));
1220		pri_chan->ch_subchan_cnt--;
1221		mtx_unlock(&pri_chan->ch_subchan_lock);
1222		wakeup(pri_chan);
1223
1224		vmbus_chan_free(chan);
1225	}
1226}
1227
1228/*
1229 * Detach all devices and destroy the corresponding primary channels.
1230 */
1231void
1232vmbus_chan_destroy_all(struct vmbus_softc *sc)
1233{
1234	struct vmbus_channel *chan;
1235
1236	mtx_lock(&sc->vmbus_prichan_lock);
1237	while ((chan = TAILQ_FIRST(&sc->vmbus_prichans)) != NULL) {
1238		KASSERT(VMBUS_CHAN_ISPRIMARY(chan), ("not primary channel"));
1239		TAILQ_REMOVE(&sc->vmbus_prichans, chan, ch_prilink);
1240		mtx_unlock(&sc->vmbus_prichan_lock);
1241
1242		vmbus_delete_child(chan);
1243		vmbus_chan_free(chan);
1244
1245		mtx_lock(&sc->vmbus_prichan_lock);
1246	}
1247	bzero(sc->vmbus_chmap,
1248	    sizeof(struct vmbus_channel *) * VMBUS_CHAN_MAX);
1249	mtx_unlock(&sc->vmbus_prichan_lock);
1250}
1251
1252/*
1253 * The channel whose vcpu binding is closest to the currect vcpu will
1254 * be selected.
1255 * If no multi-channel, always select primary channel.
1256 */
1257struct vmbus_channel *
1258vmbus_chan_cpu2chan(struct vmbus_channel *prichan, int cpu)
1259{
1260	struct vmbus_channel *sel, *chan;
1261	uint32_t vcpu, sel_dist;
1262
1263	KASSERT(cpu >= 0 && cpu < mp_ncpus, ("invalid cpuid %d", cpu));
1264	if (TAILQ_EMPTY(&prichan->ch_subchans))
1265		return prichan;
1266
1267	vcpu = VMBUS_PCPU_GET(prichan->ch_vmbus, vcpuid, cpu);
1268
1269#define CHAN_VCPU_DIST(ch, vcpu)		\
1270	(((ch)->ch_vcpuid > (vcpu)) ?		\
1271	 ((ch)->ch_vcpuid - (vcpu)) : ((vcpu) - (ch)->ch_vcpuid))
1272
1273#define CHAN_SELECT(ch)				\
1274do {						\
1275	sel = ch;				\
1276	sel_dist = CHAN_VCPU_DIST(ch, vcpu);	\
1277} while (0)
1278
1279	CHAN_SELECT(prichan);
1280
1281	mtx_lock(&prichan->ch_subchan_lock);
1282	TAILQ_FOREACH(chan, &prichan->ch_subchans, ch_sublink) {
1283		uint32_t dist;
1284
1285		KASSERT(chan->ch_stflags & VMBUS_CHAN_ST_OPENED,
1286		    ("chan%u is not opened", chan->ch_id));
1287
1288		if (chan->ch_vcpuid == vcpu) {
1289			/* Exact match; done */
1290			CHAN_SELECT(chan);
1291			break;
1292		}
1293
1294		dist = CHAN_VCPU_DIST(chan, vcpu);
1295		if (sel_dist <= dist) {
1296			/* Far or same distance; skip */
1297			continue;
1298		}
1299
1300		/* Select the closer channel. */
1301		CHAN_SELECT(chan);
1302	}
1303	mtx_unlock(&prichan->ch_subchan_lock);
1304
1305#undef CHAN_SELECT
1306#undef CHAN_VCPU_DIST
1307
1308	return sel;
1309}
1310
1311struct vmbus_channel **
1312vmbus_subchan_get(struct vmbus_channel *pri_chan, int subchan_cnt)
1313{
1314	struct vmbus_channel **ret, *chan;
1315	int i;
1316
1317	ret = malloc(subchan_cnt * sizeof(struct vmbus_channel *), M_TEMP,
1318	    M_WAITOK);
1319
1320	mtx_lock(&pri_chan->ch_subchan_lock);
1321
1322	while (pri_chan->ch_subchan_cnt < subchan_cnt)
1323		mtx_sleep(pri_chan, &pri_chan->ch_subchan_lock, 0, "subch", 0);
1324
1325	i = 0;
1326	TAILQ_FOREACH(chan, &pri_chan->ch_subchans, ch_sublink) {
1327		/* TODO: refcnt chan */
1328		ret[i] = chan;
1329
1330		++i;
1331		if (i == subchan_cnt)
1332			break;
1333	}
1334	KASSERT(i == subchan_cnt, ("invalid subchan count %d, should be %d",
1335	    pri_chan->ch_subchan_cnt, subchan_cnt));
1336
1337	mtx_unlock(&pri_chan->ch_subchan_lock);
1338
1339	return ret;
1340}
1341
1342void
1343vmbus_subchan_rel(struct vmbus_channel **subchan, int subchan_cnt __unused)
1344{
1345
1346	free(subchan, M_TEMP);
1347}
1348
1349void
1350vmbus_subchan_drain(struct vmbus_channel *pri_chan)
1351{
1352	mtx_lock(&pri_chan->ch_subchan_lock);
1353	while (pri_chan->ch_subchan_cnt > 0)
1354		mtx_sleep(pri_chan, &pri_chan->ch_subchan_lock, 0, "dsubch", 0);
1355	mtx_unlock(&pri_chan->ch_subchan_lock);
1356}
1357
1358void
1359vmbus_chan_msgproc(struct vmbus_softc *sc, const struct vmbus_message *msg)
1360{
1361	vmbus_chanmsg_proc_t msg_proc;
1362	uint32_t msg_type;
1363
1364	msg_type = ((const struct vmbus_chanmsg_hdr *)msg->msg_data)->chm_type;
1365	KASSERT(msg_type < VMBUS_CHANMSG_TYPE_MAX,
1366	    ("invalid message type %u", msg_type));
1367
1368	msg_proc = vmbus_chan_msgprocs[msg_type];
1369	if (msg_proc != NULL)
1370		msg_proc(sc, msg);
1371}
1372
1373void
1374vmbus_chan_set_readbatch(struct vmbus_channel *chan, bool on)
1375{
1376	if (!on)
1377		chan->ch_flags &= ~VMBUS_CHAN_FLAG_BATCHREAD;
1378	else
1379		chan->ch_flags |= VMBUS_CHAN_FLAG_BATCHREAD;
1380}
1381
1382uint32_t
1383vmbus_chan_id(const struct vmbus_channel *chan)
1384{
1385	return chan->ch_id;
1386}
1387
1388uint32_t
1389vmbus_chan_subidx(const struct vmbus_channel *chan)
1390{
1391	return chan->ch_subidx;
1392}
1393
1394bool
1395vmbus_chan_is_primary(const struct vmbus_channel *chan)
1396{
1397	if (VMBUS_CHAN_ISPRIMARY(chan))
1398		return true;
1399	else
1400		return false;
1401}
1402
1403const struct hyperv_guid *
1404vmbus_chan_guid_inst(const struct vmbus_channel *chan)
1405{
1406	return &chan->ch_guid_inst;
1407}
1408