vmbus_chan.c revision 308634
1/*-
2 * Copyright (c) 2009-2012,2016 Microsoft Corp.
3 * Copyright (c) 2012 NetApp Inc.
4 * Copyright (c) 2012 Citrix Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice unmodified, this list of conditions, and the following
12 *    disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: stable/11/sys/dev/hyperv/vmbus/vmbus_chan.c 308634 2016-11-14 06:31:01Z sephe $");
31
32#include <sys/param.h>
33#include <sys/bus.h>
34#include <sys/kernel.h>
35#include <sys/lock.h>
36#include <sys/malloc.h>
37#include <sys/mutex.h>
38#include <sys/smp.h>
39#include <sys/sysctl.h>
40#include <sys/systm.h>
41
42#include <machine/atomic.h>
43#include <machine/stdarg.h>
44
45#include <dev/hyperv/include/hyperv_busdma.h>
46#include <dev/hyperv/vmbus/hyperv_var.h>
47#include <dev/hyperv/vmbus/vmbus_reg.h>
48#include <dev/hyperv/vmbus/vmbus_var.h>
49#include <dev/hyperv/vmbus/vmbus_brvar.h>
50#include <dev/hyperv/vmbus/vmbus_chanvar.h>
51
52static void			vmbus_chan_update_evtflagcnt(
53				    struct vmbus_softc *,
54				    const struct vmbus_channel *);
55static void			vmbus_chan_close_internal(
56				    struct vmbus_channel *);
57static int			vmbus_chan_sysctl_mnf(SYSCTL_HANDLER_ARGS);
58static void			vmbus_chan_sysctl_create(
59				    struct vmbus_channel *);
60static struct vmbus_channel	*vmbus_chan_alloc(struct vmbus_softc *);
61static void			vmbus_chan_free(struct vmbus_channel *);
62static int			vmbus_chan_add(struct vmbus_channel *);
63static void			vmbus_chan_cpu_default(struct vmbus_channel *);
64static int			vmbus_chan_release(struct vmbus_channel *);
65static void			vmbus_chan_set_chmap(struct vmbus_channel *);
66static void			vmbus_chan_clear_chmap(struct vmbus_channel *);
67
68static void			vmbus_chan_ins_prilist(struct vmbus_softc *,
69				    struct vmbus_channel *);
70static void			vmbus_chan_rem_prilist(struct vmbus_softc *,
71				    struct vmbus_channel *);
72static void			vmbus_chan_ins_list(struct vmbus_softc *,
73				    struct vmbus_channel *);
74static void			vmbus_chan_rem_list(struct vmbus_softc *,
75				    struct vmbus_channel *);
76static void			vmbus_chan_ins_sublist(struct vmbus_channel *,
77				    struct vmbus_channel *);
78static void			vmbus_chan_rem_sublist(struct vmbus_channel *,
79				    struct vmbus_channel *);
80
81static void			vmbus_chan_task(void *, int);
82static void			vmbus_chan_task_nobatch(void *, int);
83static void			vmbus_chan_clrchmap_task(void *, int);
84static void			vmbus_prichan_attach_task(void *, int);
85static void			vmbus_subchan_attach_task(void *, int);
86static void			vmbus_prichan_detach_task(void *, int);
87static void			vmbus_subchan_detach_task(void *, int);
88
89static void			vmbus_chan_msgproc_choffer(struct vmbus_softc *,
90				    const struct vmbus_message *);
91static void			vmbus_chan_msgproc_chrescind(
92				    struct vmbus_softc *,
93				    const struct vmbus_message *);
94
95static int			vmbus_chan_printf(const struct vmbus_channel *,
96				    const char *, ...) __printflike(2, 3);
97
98/*
99 * Vmbus channel message processing.
100 */
101static const vmbus_chanmsg_proc_t
102vmbus_chan_msgprocs[VMBUS_CHANMSG_TYPE_MAX] = {
103	VMBUS_CHANMSG_PROC(CHOFFER,	vmbus_chan_msgproc_choffer),
104	VMBUS_CHANMSG_PROC(CHRESCIND,	vmbus_chan_msgproc_chrescind),
105
106	VMBUS_CHANMSG_PROC_WAKEUP(CHOPEN_RESP),
107	VMBUS_CHANMSG_PROC_WAKEUP(GPADL_CONNRESP),
108	VMBUS_CHANMSG_PROC_WAKEUP(GPADL_DISCONNRESP)
109};
110
111/*
112 * Notify host that there are data pending on our TX bufring.
113 */
114static __inline void
115vmbus_chan_signal_tx(const struct vmbus_channel *chan)
116{
117	atomic_set_long(chan->ch_evtflag, chan->ch_evtflag_mask);
118	if (chan->ch_txflags & VMBUS_CHAN_TXF_HASMNF)
119		atomic_set_int(chan->ch_montrig, chan->ch_montrig_mask);
120	else
121		hypercall_signal_event(chan->ch_monprm_dma.hv_paddr);
122}
123
124static void
125vmbus_chan_ins_prilist(struct vmbus_softc *sc, struct vmbus_channel *chan)
126{
127
128	mtx_assert(&sc->vmbus_prichan_lock, MA_OWNED);
129	if (atomic_testandset_int(&chan->ch_stflags,
130	    VMBUS_CHAN_ST_ONPRIL_SHIFT))
131		panic("channel is already on the prilist");
132	TAILQ_INSERT_TAIL(&sc->vmbus_prichans, chan, ch_prilink);
133}
134
135static void
136vmbus_chan_rem_prilist(struct vmbus_softc *sc, struct vmbus_channel *chan)
137{
138
139	mtx_assert(&sc->vmbus_prichan_lock, MA_OWNED);
140	if (atomic_testandclear_int(&chan->ch_stflags,
141	    VMBUS_CHAN_ST_ONPRIL_SHIFT) == 0)
142		panic("channel is not on the prilist");
143	TAILQ_REMOVE(&sc->vmbus_prichans, chan, ch_prilink);
144}
145
146static void
147vmbus_chan_ins_sublist(struct vmbus_channel *prichan,
148    struct vmbus_channel *chan)
149{
150
151	mtx_assert(&prichan->ch_subchan_lock, MA_OWNED);
152
153	if (atomic_testandset_int(&chan->ch_stflags,
154	    VMBUS_CHAN_ST_ONSUBL_SHIFT))
155		panic("channel is already on the sublist");
156	TAILQ_INSERT_TAIL(&prichan->ch_subchans, chan, ch_sublink);
157
158	/* Bump sub-channel count. */
159	prichan->ch_subchan_cnt++;
160}
161
162static void
163vmbus_chan_rem_sublist(struct vmbus_channel *prichan,
164    struct vmbus_channel *chan)
165{
166
167	mtx_assert(&prichan->ch_subchan_lock, MA_OWNED);
168
169	KASSERT(prichan->ch_subchan_cnt > 0,
170	    ("invalid subchan_cnt %d", prichan->ch_subchan_cnt));
171	prichan->ch_subchan_cnt--;
172
173	if (atomic_testandclear_int(&chan->ch_stflags,
174	    VMBUS_CHAN_ST_ONSUBL_SHIFT) == 0)
175		panic("channel is not on the sublist");
176	TAILQ_REMOVE(&prichan->ch_subchans, chan, ch_sublink);
177}
178
179static void
180vmbus_chan_ins_list(struct vmbus_softc *sc, struct vmbus_channel *chan)
181{
182
183	mtx_assert(&sc->vmbus_chan_lock, MA_OWNED);
184	if (atomic_testandset_int(&chan->ch_stflags,
185	    VMBUS_CHAN_ST_ONLIST_SHIFT))
186		panic("channel is already on the list");
187	TAILQ_INSERT_TAIL(&sc->vmbus_chans, chan, ch_link);
188}
189
190static void
191vmbus_chan_rem_list(struct vmbus_softc *sc, struct vmbus_channel *chan)
192{
193
194	mtx_assert(&sc->vmbus_chan_lock, MA_OWNED);
195	if (atomic_testandclear_int(&chan->ch_stflags,
196	    VMBUS_CHAN_ST_ONLIST_SHIFT) == 0)
197		panic("channel is not on the list");
198	TAILQ_REMOVE(&sc->vmbus_chans, chan, ch_link);
199}
200
201static int
202vmbus_chan_sysctl_mnf(SYSCTL_HANDLER_ARGS)
203{
204	struct vmbus_channel *chan = arg1;
205	int mnf = 0;
206
207	if (chan->ch_txflags & VMBUS_CHAN_TXF_HASMNF)
208		mnf = 1;
209	return sysctl_handle_int(oidp, &mnf, 0, req);
210}
211
212static void
213vmbus_chan_sysctl_create(struct vmbus_channel *chan)
214{
215	struct sysctl_oid *ch_tree, *chid_tree, *br_tree;
216	struct sysctl_ctx_list *ctx;
217	uint32_t ch_id;
218	char name[16];
219
220	/*
221	 * Add sysctl nodes related to this channel to this
222	 * channel's sysctl ctx, so that they can be destroyed
223	 * independently upon close of this channel, which can
224	 * happen even if the device is not detached.
225	 */
226	ctx = &chan->ch_sysctl_ctx;
227	sysctl_ctx_init(ctx);
228
229	/*
230	 * Create dev.NAME.UNIT.channel tree.
231	 */
232	ch_tree = SYSCTL_ADD_NODE(ctx,
233	    SYSCTL_CHILDREN(device_get_sysctl_tree(chan->ch_dev)),
234	    OID_AUTO, "channel", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
235	if (ch_tree == NULL)
236		return;
237
238	/*
239	 * Create dev.NAME.UNIT.channel.CHANID tree.
240	 */
241	if (VMBUS_CHAN_ISPRIMARY(chan))
242		ch_id = chan->ch_id;
243	else
244		ch_id = chan->ch_prichan->ch_id;
245	snprintf(name, sizeof(name), "%d", ch_id);
246	chid_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(ch_tree),
247	    OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
248	if (chid_tree == NULL)
249		return;
250
251	if (!VMBUS_CHAN_ISPRIMARY(chan)) {
252		/*
253		 * Create dev.NAME.UNIT.channel.CHANID.sub tree.
254		 */
255		ch_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(chid_tree),
256		    OID_AUTO, "sub", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
257		if (ch_tree == NULL)
258			return;
259
260		/*
261		 * Create dev.NAME.UNIT.channel.CHANID.sub.SUBIDX tree.
262		 *
263		 * NOTE:
264		 * chid_tree is changed to this new sysctl tree.
265		 */
266		snprintf(name, sizeof(name), "%d", chan->ch_subidx);
267		chid_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(ch_tree),
268		    OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
269		if (chid_tree == NULL)
270			return;
271
272		SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
273		    "chanid", CTLFLAG_RD, &chan->ch_id, 0, "channel id");
274	}
275
276	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
277	    "cpu", CTLFLAG_RD, &chan->ch_cpuid, 0, "owner CPU id");
278	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
279	    "mnf", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
280	    chan, 0, vmbus_chan_sysctl_mnf, "I",
281	    "has monitor notification facilities");
282
283	br_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
284	    "br", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
285	if (br_tree != NULL) {
286		/*
287		 * Create sysctl tree for RX bufring.
288		 */
289		vmbus_br_sysctl_create(ctx, br_tree, &chan->ch_rxbr.rxbr, "rx");
290		/*
291		 * Create sysctl tree for TX bufring.
292		 */
293		vmbus_br_sysctl_create(ctx, br_tree, &chan->ch_txbr.txbr, "tx");
294	}
295}
296
297int
298vmbus_chan_open(struct vmbus_channel *chan, int txbr_size, int rxbr_size,
299    const void *udata, int udlen, vmbus_chan_callback_t cb, void *cbarg)
300{
301	struct vmbus_chan_br cbr;
302	int error;
303
304	/*
305	 * Allocate the TX+RX bufrings.
306	 */
307	KASSERT(chan->ch_bufring == NULL, ("bufrings are allocated"));
308	chan->ch_bufring = hyperv_dmamem_alloc(bus_get_dma_tag(chan->ch_dev),
309	    PAGE_SIZE, 0, txbr_size + rxbr_size, &chan->ch_bufring_dma,
310	    BUS_DMA_WAITOK);
311	if (chan->ch_bufring == NULL) {
312		vmbus_chan_printf(chan, "bufring allocation failed\n");
313		return (ENOMEM);
314	}
315
316	cbr.cbr = chan->ch_bufring;
317	cbr.cbr_paddr = chan->ch_bufring_dma.hv_paddr;
318	cbr.cbr_txsz = txbr_size;
319	cbr.cbr_rxsz = rxbr_size;
320
321	error = vmbus_chan_open_br(chan, &cbr, udata, udlen, cb, cbarg);
322	if (error) {
323		hyperv_dmamem_free(&chan->ch_bufring_dma, chan->ch_bufring);
324		chan->ch_bufring = NULL;
325	}
326	return (error);
327}
328
329int
330vmbus_chan_open_br(struct vmbus_channel *chan, const struct vmbus_chan_br *cbr,
331    const void *udata, int udlen, vmbus_chan_callback_t cb, void *cbarg)
332{
333	struct vmbus_softc *sc = chan->ch_vmbus;
334	const struct vmbus_chanmsg_chopen_resp *resp;
335	const struct vmbus_message *msg;
336	struct vmbus_chanmsg_chopen *req;
337	struct vmbus_msghc *mh;
338	uint32_t status;
339	int error, txbr_size, rxbr_size;
340	task_fn_t *task_fn;
341	uint8_t *br;
342
343	if (udlen > VMBUS_CHANMSG_CHOPEN_UDATA_SIZE) {
344		vmbus_chan_printf(chan,
345		    "invalid udata len %d for chan%u\n", udlen, chan->ch_id);
346		return EINVAL;
347	}
348
349	br = cbr->cbr;
350	txbr_size = cbr->cbr_txsz;
351	rxbr_size = cbr->cbr_rxsz;
352	KASSERT((txbr_size & PAGE_MASK) == 0,
353	    ("send bufring size is not multiple page"));
354	KASSERT((rxbr_size & PAGE_MASK) == 0,
355	    ("recv bufring size is not multiple page"));
356	KASSERT((cbr->cbr_paddr & PAGE_MASK) == 0,
357	    ("bufring is not page aligned"));
358
359	/*
360	 * Zero out the TX/RX bufrings, in case that they were used before.
361	 */
362	memset(br, 0, txbr_size + rxbr_size);
363
364	if (atomic_testandset_int(&chan->ch_stflags,
365	    VMBUS_CHAN_ST_OPENED_SHIFT))
366		panic("double-open chan%u", chan->ch_id);
367
368	chan->ch_cb = cb;
369	chan->ch_cbarg = cbarg;
370
371	vmbus_chan_update_evtflagcnt(sc, chan);
372
373	chan->ch_tq = VMBUS_PCPU_GET(chan->ch_vmbus, event_tq, chan->ch_cpuid);
374	if (chan->ch_flags & VMBUS_CHAN_FLAG_BATCHREAD)
375		task_fn = vmbus_chan_task;
376	else
377		task_fn = vmbus_chan_task_nobatch;
378	TASK_INIT(&chan->ch_task, 0, task_fn, chan);
379
380	/* TX bufring comes first */
381	vmbus_txbr_setup(&chan->ch_txbr, br, txbr_size);
382	/* RX bufring immediately follows TX bufring */
383	vmbus_rxbr_setup(&chan->ch_rxbr, br + txbr_size, rxbr_size);
384
385	/* Create sysctl tree for this channel */
386	vmbus_chan_sysctl_create(chan);
387
388	/*
389	 * Connect the bufrings, both RX and TX, to this channel.
390	 */
391	error = vmbus_chan_gpadl_connect(chan, cbr->cbr_paddr,
392	    txbr_size + rxbr_size, &chan->ch_bufring_gpadl);
393	if (error) {
394		vmbus_chan_printf(chan,
395		    "failed to connect bufring GPADL to chan%u\n", chan->ch_id);
396		goto failed;
397	}
398
399	/*
400	 * Install this channel, before it is opened, but after everything
401	 * else has been setup.
402	 */
403	vmbus_chan_set_chmap(chan);
404
405	/*
406	 * Open channel w/ the bufring GPADL on the target CPU.
407	 */
408	mh = vmbus_msghc_get(sc, sizeof(*req));
409	if (mh == NULL) {
410		vmbus_chan_printf(chan,
411		    "can not get msg hypercall for chopen(chan%u)\n",
412		    chan->ch_id);
413		error = ENXIO;
414		goto failed;
415	}
416
417	req = vmbus_msghc_dataptr(mh);
418	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHOPEN;
419	req->chm_chanid = chan->ch_id;
420	req->chm_openid = chan->ch_id;
421	req->chm_gpadl = chan->ch_bufring_gpadl;
422	req->chm_vcpuid = chan->ch_vcpuid;
423	req->chm_txbr_pgcnt = txbr_size >> PAGE_SHIFT;
424	if (udlen > 0)
425		memcpy(req->chm_udata, udata, udlen);
426
427	error = vmbus_msghc_exec(sc, mh);
428	if (error) {
429		vmbus_chan_printf(chan,
430		    "chopen(chan%u) msg hypercall exec failed: %d\n",
431		    chan->ch_id, error);
432		vmbus_msghc_put(sc, mh);
433		goto failed;
434	}
435
436	msg = vmbus_msghc_wait_result(sc, mh);
437	resp = (const struct vmbus_chanmsg_chopen_resp *)msg->msg_data;
438	status = resp->chm_status;
439
440	vmbus_msghc_put(sc, mh);
441
442	if (status == 0) {
443		if (bootverbose) {
444			vmbus_chan_printf(chan, "chan%u opened\n", chan->ch_id);
445		}
446		return 0;
447	}
448
449	vmbus_chan_printf(chan, "failed to open chan%u\n", chan->ch_id);
450	error = ENXIO;
451
452failed:
453	vmbus_chan_clear_chmap(chan);
454	if (chan->ch_bufring_gpadl) {
455		vmbus_chan_gpadl_disconnect(chan, chan->ch_bufring_gpadl);
456		chan->ch_bufring_gpadl = 0;
457	}
458	atomic_clear_int(&chan->ch_stflags, VMBUS_CHAN_ST_OPENED);
459	return error;
460}
461
462int
463vmbus_chan_gpadl_connect(struct vmbus_channel *chan, bus_addr_t paddr,
464    int size, uint32_t *gpadl0)
465{
466	struct vmbus_softc *sc = chan->ch_vmbus;
467	struct vmbus_msghc *mh;
468	struct vmbus_chanmsg_gpadl_conn *req;
469	const struct vmbus_message *msg;
470	size_t reqsz;
471	uint32_t gpadl, status;
472	int page_count, range_len, i, cnt, error;
473	uint64_t page_id;
474
475	/*
476	 * Preliminary checks.
477	 */
478
479	KASSERT((size & PAGE_MASK) == 0,
480	    ("invalid GPA size %d, not multiple page size", size));
481	page_count = size >> PAGE_SHIFT;
482
483	KASSERT((paddr & PAGE_MASK) == 0,
484	    ("GPA is not page aligned %jx", (uintmax_t)paddr));
485	page_id = paddr >> PAGE_SHIFT;
486
487	range_len = __offsetof(struct vmbus_gpa_range, gpa_page[page_count]);
488	/*
489	 * We don't support multiple GPA ranges.
490	 */
491	if (range_len > UINT16_MAX) {
492		vmbus_chan_printf(chan, "GPA too large, %d pages\n",
493		    page_count);
494		return EOPNOTSUPP;
495	}
496
497	/*
498	 * Allocate GPADL id.
499	 */
500	gpadl = vmbus_gpadl_alloc(sc);
501	*gpadl0 = gpadl;
502
503	/*
504	 * Connect this GPADL to the target channel.
505	 *
506	 * NOTE:
507	 * Since each message can only hold small set of page
508	 * addresses, several messages may be required to
509	 * complete the connection.
510	 */
511	if (page_count > VMBUS_CHANMSG_GPADL_CONN_PGMAX)
512		cnt = VMBUS_CHANMSG_GPADL_CONN_PGMAX;
513	else
514		cnt = page_count;
515	page_count -= cnt;
516
517	reqsz = __offsetof(struct vmbus_chanmsg_gpadl_conn,
518	    chm_range.gpa_page[cnt]);
519	mh = vmbus_msghc_get(sc, reqsz);
520	if (mh == NULL) {
521		vmbus_chan_printf(chan,
522		    "can not get msg hypercall for gpadl_conn(chan%u)\n",
523		    chan->ch_id);
524		return EIO;
525	}
526
527	req = vmbus_msghc_dataptr(mh);
528	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_CONN;
529	req->chm_chanid = chan->ch_id;
530	req->chm_gpadl = gpadl;
531	req->chm_range_len = range_len;
532	req->chm_range_cnt = 1;
533	req->chm_range.gpa_len = size;
534	req->chm_range.gpa_ofs = 0;
535	for (i = 0; i < cnt; ++i)
536		req->chm_range.gpa_page[i] = page_id++;
537
538	error = vmbus_msghc_exec(sc, mh);
539	if (error) {
540		vmbus_chan_printf(chan,
541		    "gpadl_conn(chan%u) msg hypercall exec failed: %d\n",
542		    chan->ch_id, error);
543		vmbus_msghc_put(sc, mh);
544		return error;
545	}
546
547	while (page_count > 0) {
548		struct vmbus_chanmsg_gpadl_subconn *subreq;
549
550		if (page_count > VMBUS_CHANMSG_GPADL_SUBCONN_PGMAX)
551			cnt = VMBUS_CHANMSG_GPADL_SUBCONN_PGMAX;
552		else
553			cnt = page_count;
554		page_count -= cnt;
555
556		reqsz = __offsetof(struct vmbus_chanmsg_gpadl_subconn,
557		    chm_gpa_page[cnt]);
558		vmbus_msghc_reset(mh, reqsz);
559
560		subreq = vmbus_msghc_dataptr(mh);
561		subreq->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_SUBCONN;
562		subreq->chm_gpadl = gpadl;
563		for (i = 0; i < cnt; ++i)
564			subreq->chm_gpa_page[i] = page_id++;
565
566		vmbus_msghc_exec_noresult(mh);
567	}
568	KASSERT(page_count == 0, ("invalid page count %d", page_count));
569
570	msg = vmbus_msghc_wait_result(sc, mh);
571	status = ((const struct vmbus_chanmsg_gpadl_connresp *)
572	    msg->msg_data)->chm_status;
573
574	vmbus_msghc_put(sc, mh);
575
576	if (status != 0) {
577		vmbus_chan_printf(chan, "gpadl_conn(chan%u) failed: %u\n",
578		    chan->ch_id, status);
579		return EIO;
580	} else {
581		if (bootverbose) {
582			vmbus_chan_printf(chan,
583			    "gpadl_conn(chan%u) succeeded\n", chan->ch_id);
584		}
585	}
586	return 0;
587}
588
589/*
590 * Disconnect the GPA from the target channel
591 */
592int
593vmbus_chan_gpadl_disconnect(struct vmbus_channel *chan, uint32_t gpadl)
594{
595	struct vmbus_softc *sc = chan->ch_vmbus;
596	struct vmbus_msghc *mh;
597	struct vmbus_chanmsg_gpadl_disconn *req;
598	int error;
599
600	mh = vmbus_msghc_get(sc, sizeof(*req));
601	if (mh == NULL) {
602		vmbus_chan_printf(chan,
603		    "can not get msg hypercall for gpadl_disconn(chan%u)\n",
604		    chan->ch_id);
605		return EBUSY;
606	}
607
608	req = vmbus_msghc_dataptr(mh);
609	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_DISCONN;
610	req->chm_chanid = chan->ch_id;
611	req->chm_gpadl = gpadl;
612
613	error = vmbus_msghc_exec(sc, mh);
614	if (error) {
615		vmbus_chan_printf(chan,
616		    "gpadl_disconn(chan%u) msg hypercall exec failed: %d\n",
617		    chan->ch_id, error);
618		vmbus_msghc_put(sc, mh);
619		return error;
620	}
621
622	vmbus_msghc_wait_result(sc, mh);
623	/* Discard result; no useful information */
624	vmbus_msghc_put(sc, mh);
625
626	return 0;
627}
628
629static void
630vmbus_chan_clrchmap_task(void *xchan, int pending __unused)
631{
632	struct vmbus_channel *chan = xchan;
633
634	critical_enter();
635	chan->ch_vmbus->vmbus_chmap[chan->ch_id] = NULL;
636	critical_exit();
637}
638
639static void
640vmbus_chan_clear_chmap(struct vmbus_channel *chan)
641{
642	struct task chmap_task;
643
644	TASK_INIT(&chmap_task, 0, vmbus_chan_clrchmap_task, chan);
645	taskqueue_enqueue(chan->ch_tq, &chmap_task);
646	taskqueue_drain(chan->ch_tq, &chmap_task);
647}
648
649static void
650vmbus_chan_set_chmap(struct vmbus_channel *chan)
651{
652	__compiler_membar();
653	chan->ch_vmbus->vmbus_chmap[chan->ch_id] = chan;
654}
655
656static void
657vmbus_chan_close_internal(struct vmbus_channel *chan)
658{
659	struct vmbus_softc *sc = chan->ch_vmbus;
660	struct vmbus_msghc *mh;
661	struct vmbus_chanmsg_chclose *req;
662	int error;
663
664	/* TODO: stringent check */
665	atomic_clear_int(&chan->ch_stflags, VMBUS_CHAN_ST_OPENED);
666
667	/*
668	 * Free this channel's sysctl tree attached to its device's
669	 * sysctl tree.
670	 */
671	sysctl_ctx_free(&chan->ch_sysctl_ctx);
672
673	/*
674	 * NOTE:
675	 * Order is critical.  This channel _must_ be uninstalled first,
676	 * else the channel task may be enqueued by the IDT after it has
677	 * been drained.
678	 */
679	vmbus_chan_clear_chmap(chan);
680	taskqueue_drain(chan->ch_tq, &chan->ch_task);
681	chan->ch_tq = NULL;
682
683	/*
684	 * Close this channel.
685	 */
686	mh = vmbus_msghc_get(sc, sizeof(*req));
687	if (mh == NULL) {
688		vmbus_chan_printf(chan,
689		    "can not get msg hypercall for chclose(chan%u)\n",
690		    chan->ch_id);
691		return;
692	}
693
694	req = vmbus_msghc_dataptr(mh);
695	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHCLOSE;
696	req->chm_chanid = chan->ch_id;
697
698	error = vmbus_msghc_exec_noresult(mh);
699	vmbus_msghc_put(sc, mh);
700
701	if (error) {
702		vmbus_chan_printf(chan,
703		    "chclose(chan%u) msg hypercall exec failed: %d\n",
704		    chan->ch_id, error);
705		return;
706	} else if (bootverbose) {
707		vmbus_chan_printf(chan, "close chan%u\n", chan->ch_id);
708	}
709
710	/*
711	 * Disconnect the TX+RX bufrings from this channel.
712	 */
713	if (chan->ch_bufring_gpadl) {
714		vmbus_chan_gpadl_disconnect(chan, chan->ch_bufring_gpadl);
715		chan->ch_bufring_gpadl = 0;
716	}
717
718	/*
719	 * Destroy the TX+RX bufrings.
720	 */
721	if (chan->ch_bufring != NULL) {
722		hyperv_dmamem_free(&chan->ch_bufring_dma, chan->ch_bufring);
723		chan->ch_bufring = NULL;
724	}
725}
726
727/*
728 * Caller should make sure that all sub-channels have
729 * been added to 'chan' and all to-be-closed channels
730 * are not being opened.
731 */
732void
733vmbus_chan_close(struct vmbus_channel *chan)
734{
735	int subchan_cnt;
736
737	if (!VMBUS_CHAN_ISPRIMARY(chan)) {
738		/*
739		 * Sub-channel is closed when its primary channel
740		 * is closed; done.
741		 */
742		return;
743	}
744
745	/*
746	 * Close all sub-channels, if any.
747	 */
748	subchan_cnt = chan->ch_subchan_cnt;
749	if (subchan_cnt > 0) {
750		struct vmbus_channel **subchan;
751		int i;
752
753		subchan = vmbus_subchan_get(chan, subchan_cnt);
754		for (i = 0; i < subchan_cnt; ++i)
755			vmbus_chan_close_internal(subchan[i]);
756		vmbus_subchan_rel(subchan, subchan_cnt);
757	}
758
759	/* Then close the primary channel. */
760	vmbus_chan_close_internal(chan);
761}
762
763void
764vmbus_chan_intr_drain(struct vmbus_channel *chan)
765{
766
767	taskqueue_drain(chan->ch_tq, &chan->ch_task);
768}
769
770int
771vmbus_chan_send(struct vmbus_channel *chan, uint16_t type, uint16_t flags,
772    void *data, int dlen, uint64_t xactid)
773{
774	struct vmbus_chanpkt pkt;
775	int pktlen, pad_pktlen, hlen, error;
776	uint64_t pad = 0;
777	struct iovec iov[3];
778	boolean_t send_evt;
779
780	hlen = sizeof(pkt);
781	pktlen = hlen + dlen;
782	pad_pktlen = VMBUS_CHANPKT_TOTLEN(pktlen);
783	KASSERT(pad_pktlen <= vmbus_txbr_maxpktsz(&chan->ch_txbr),
784	    ("invalid packet size %d", pad_pktlen));
785
786	pkt.cp_hdr.cph_type = type;
787	pkt.cp_hdr.cph_flags = flags;
788	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_hlen, hlen);
789	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_tlen, pad_pktlen);
790	pkt.cp_hdr.cph_xactid = xactid;
791
792	iov[0].iov_base = &pkt;
793	iov[0].iov_len = hlen;
794	iov[1].iov_base = data;
795	iov[1].iov_len = dlen;
796	iov[2].iov_base = &pad;
797	iov[2].iov_len = pad_pktlen - pktlen;
798
799	error = vmbus_txbr_write(&chan->ch_txbr, iov, 3, &send_evt);
800	if (!error && send_evt)
801		vmbus_chan_signal_tx(chan);
802	return error;
803}
804
805int
806vmbus_chan_send_sglist(struct vmbus_channel *chan,
807    struct vmbus_gpa sg[], int sglen, void *data, int dlen, uint64_t xactid)
808{
809	struct vmbus_chanpkt_sglist pkt;
810	int pktlen, pad_pktlen, hlen, error;
811	struct iovec iov[4];
812	boolean_t send_evt;
813	uint64_t pad = 0;
814
815	hlen = __offsetof(struct vmbus_chanpkt_sglist, cp_gpa[sglen]);
816	pktlen = hlen + dlen;
817	pad_pktlen = VMBUS_CHANPKT_TOTLEN(pktlen);
818	KASSERT(pad_pktlen <= vmbus_txbr_maxpktsz(&chan->ch_txbr),
819	    ("invalid packet size %d", pad_pktlen));
820
821	pkt.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA;
822	pkt.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC;
823	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_hlen, hlen);
824	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_tlen, pad_pktlen);
825	pkt.cp_hdr.cph_xactid = xactid;
826	pkt.cp_rsvd = 0;
827	pkt.cp_gpa_cnt = sglen;
828
829	iov[0].iov_base = &pkt;
830	iov[0].iov_len = sizeof(pkt);
831	iov[1].iov_base = sg;
832	iov[1].iov_len = sizeof(struct vmbus_gpa) * sglen;
833	iov[2].iov_base = data;
834	iov[2].iov_len = dlen;
835	iov[3].iov_base = &pad;
836	iov[3].iov_len = pad_pktlen - pktlen;
837
838	error = vmbus_txbr_write(&chan->ch_txbr, iov, 4, &send_evt);
839	if (!error && send_evt)
840		vmbus_chan_signal_tx(chan);
841	return error;
842}
843
844int
845vmbus_chan_send_prplist(struct vmbus_channel *chan,
846    struct vmbus_gpa_range *prp, int prp_cnt, void *data, int dlen,
847    uint64_t xactid)
848{
849	struct vmbus_chanpkt_prplist pkt;
850	int pktlen, pad_pktlen, hlen, error;
851	struct iovec iov[4];
852	boolean_t send_evt;
853	uint64_t pad = 0;
854
855	hlen = __offsetof(struct vmbus_chanpkt_prplist,
856	    cp_range[0].gpa_page[prp_cnt]);
857	pktlen = hlen + dlen;
858	pad_pktlen = VMBUS_CHANPKT_TOTLEN(pktlen);
859	KASSERT(pad_pktlen <= vmbus_txbr_maxpktsz(&chan->ch_txbr),
860	    ("invalid packet size %d", pad_pktlen));
861
862	pkt.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA;
863	pkt.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC;
864	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_hlen, hlen);
865	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_tlen, pad_pktlen);
866	pkt.cp_hdr.cph_xactid = xactid;
867	pkt.cp_rsvd = 0;
868	pkt.cp_range_cnt = 1;
869
870	iov[0].iov_base = &pkt;
871	iov[0].iov_len = sizeof(pkt);
872	iov[1].iov_base = prp;
873	iov[1].iov_len = __offsetof(struct vmbus_gpa_range, gpa_page[prp_cnt]);
874	iov[2].iov_base = data;
875	iov[2].iov_len = dlen;
876	iov[3].iov_base = &pad;
877	iov[3].iov_len = pad_pktlen - pktlen;
878
879	error = vmbus_txbr_write(&chan->ch_txbr, iov, 4, &send_evt);
880	if (!error && send_evt)
881		vmbus_chan_signal_tx(chan);
882	return error;
883}
884
885int
886vmbus_chan_recv(struct vmbus_channel *chan, void *data, int *dlen0,
887    uint64_t *xactid)
888{
889	struct vmbus_chanpkt_hdr pkt;
890	int error, dlen, hlen;
891
892	error = vmbus_rxbr_peek(&chan->ch_rxbr, &pkt, sizeof(pkt));
893	if (error)
894		return (error);
895
896	if (__predict_false(pkt.cph_hlen < VMBUS_CHANPKT_HLEN_MIN)) {
897		vmbus_chan_printf(chan, "invalid hlen %u\n", pkt.cph_hlen);
898		/* XXX this channel is dead actually. */
899		return (EIO);
900	}
901	if (__predict_false(pkt.cph_hlen > pkt.cph_tlen)) {
902		vmbus_chan_printf(chan, "invalid hlen %u and tlen %u\n",
903		    pkt.cph_hlen, pkt.cph_tlen);
904		/* XXX this channel is dead actually. */
905		return (EIO);
906	}
907
908	hlen = VMBUS_CHANPKT_GETLEN(pkt.cph_hlen);
909	dlen = VMBUS_CHANPKT_GETLEN(pkt.cph_tlen) - hlen;
910
911	if (*dlen0 < dlen) {
912		/* Return the size of this packet's data. */
913		*dlen0 = dlen;
914		return (ENOBUFS);
915	}
916
917	*xactid = pkt.cph_xactid;
918	*dlen0 = dlen;
919
920	/* Skip packet header */
921	error = vmbus_rxbr_read(&chan->ch_rxbr, data, dlen, hlen);
922	KASSERT(!error, ("vmbus_rxbr_read failed"));
923
924	return (0);
925}
926
927int
928vmbus_chan_recv_pkt(struct vmbus_channel *chan,
929    struct vmbus_chanpkt_hdr *pkt, int *pktlen0)
930{
931	int error, pktlen, pkt_hlen;
932
933	pkt_hlen = sizeof(*pkt);
934	error = vmbus_rxbr_peek(&chan->ch_rxbr, pkt, pkt_hlen);
935	if (error)
936		return (error);
937
938	if (__predict_false(pkt->cph_hlen < VMBUS_CHANPKT_HLEN_MIN)) {
939		vmbus_chan_printf(chan, "invalid hlen %u\n", pkt->cph_hlen);
940		/* XXX this channel is dead actually. */
941		return (EIO);
942	}
943	if (__predict_false(pkt->cph_hlen > pkt->cph_tlen)) {
944		vmbus_chan_printf(chan, "invalid hlen %u and tlen %u\n",
945		    pkt->cph_hlen, pkt->cph_tlen);
946		/* XXX this channel is dead actually. */
947		return (EIO);
948	}
949
950	pktlen = VMBUS_CHANPKT_GETLEN(pkt->cph_tlen);
951	if (*pktlen0 < pktlen) {
952		/* Return the size of this packet. */
953		*pktlen0 = pktlen;
954		return (ENOBUFS);
955	}
956	*pktlen0 = pktlen;
957
958	/*
959	 * Skip the fixed-size packet header, which has been filled
960	 * by the above vmbus_rxbr_peek().
961	 */
962	error = vmbus_rxbr_read(&chan->ch_rxbr, pkt + 1,
963	    pktlen - pkt_hlen, pkt_hlen);
964	KASSERT(!error, ("vmbus_rxbr_read failed"));
965
966	return (0);
967}
968
969static void
970vmbus_chan_task(void *xchan, int pending __unused)
971{
972	struct vmbus_channel *chan = xchan;
973	vmbus_chan_callback_t cb = chan->ch_cb;
974	void *cbarg = chan->ch_cbarg;
975
976	/*
977	 * Optimize host to guest signaling by ensuring:
978	 * 1. While reading the channel, we disable interrupts from
979	 *    host.
980	 * 2. Ensure that we process all posted messages from the host
981	 *    before returning from this callback.
982	 * 3. Once we return, enable signaling from the host. Once this
983	 *    state is set we check to see if additional packets are
984	 *    available to read. In this case we repeat the process.
985	 *
986	 * NOTE: Interrupt has been disabled in the ISR.
987	 */
988	for (;;) {
989		uint32_t left;
990
991		cb(chan, cbarg);
992
993		left = vmbus_rxbr_intr_unmask(&chan->ch_rxbr);
994		if (left == 0) {
995			/* No more data in RX bufring; done */
996			break;
997		}
998		vmbus_rxbr_intr_mask(&chan->ch_rxbr);
999	}
1000}
1001
1002static void
1003vmbus_chan_task_nobatch(void *xchan, int pending __unused)
1004{
1005	struct vmbus_channel *chan = xchan;
1006
1007	chan->ch_cb(chan, chan->ch_cbarg);
1008}
1009
1010static __inline void
1011vmbus_event_flags_proc(struct vmbus_softc *sc, volatile u_long *event_flags,
1012    int flag_cnt)
1013{
1014	int f;
1015
1016	for (f = 0; f < flag_cnt; ++f) {
1017		uint32_t chid_base;
1018		u_long flags;
1019		int chid_ofs;
1020
1021		if (event_flags[f] == 0)
1022			continue;
1023
1024		flags = atomic_swap_long(&event_flags[f], 0);
1025		chid_base = f << VMBUS_EVTFLAG_SHIFT;
1026
1027		while ((chid_ofs = ffsl(flags)) != 0) {
1028			struct vmbus_channel *chan;
1029
1030			--chid_ofs; /* NOTE: ffsl is 1-based */
1031			flags &= ~(1UL << chid_ofs);
1032
1033			chan = sc->vmbus_chmap[chid_base + chid_ofs];
1034			if (__predict_false(chan == NULL)) {
1035				/* Channel is closed. */
1036				continue;
1037			}
1038			__compiler_membar();
1039
1040			if (chan->ch_flags & VMBUS_CHAN_FLAG_BATCHREAD)
1041				vmbus_rxbr_intr_mask(&chan->ch_rxbr);
1042			taskqueue_enqueue(chan->ch_tq, &chan->ch_task);
1043		}
1044	}
1045}
1046
1047void
1048vmbus_event_proc(struct vmbus_softc *sc, int cpu)
1049{
1050	struct vmbus_evtflags *eventf;
1051
1052	/*
1053	 * On Host with Win8 or above, the event page can be checked directly
1054	 * to get the id of the channel that has the pending interrupt.
1055	 */
1056	eventf = VMBUS_PCPU_GET(sc, event_flags, cpu) + VMBUS_SINT_MESSAGE;
1057	vmbus_event_flags_proc(sc, eventf->evt_flags,
1058	    VMBUS_PCPU_GET(sc, event_flags_cnt, cpu));
1059}
1060
1061void
1062vmbus_event_proc_compat(struct vmbus_softc *sc, int cpu)
1063{
1064	struct vmbus_evtflags *eventf;
1065
1066	eventf = VMBUS_PCPU_GET(sc, event_flags, cpu) + VMBUS_SINT_MESSAGE;
1067	if (atomic_testandclear_long(&eventf->evt_flags[0], 0)) {
1068		vmbus_event_flags_proc(sc, sc->vmbus_rx_evtflags,
1069		    VMBUS_CHAN_MAX_COMPAT >> VMBUS_EVTFLAG_SHIFT);
1070	}
1071}
1072
1073static void
1074vmbus_chan_update_evtflagcnt(struct vmbus_softc *sc,
1075    const struct vmbus_channel *chan)
1076{
1077	volatile int *flag_cnt_ptr;
1078	int flag_cnt;
1079
1080	flag_cnt = (chan->ch_id / VMBUS_EVTFLAG_LEN) + 1;
1081	flag_cnt_ptr = VMBUS_PCPU_PTR(sc, event_flags_cnt, chan->ch_cpuid);
1082
1083	for (;;) {
1084		int old_flag_cnt;
1085
1086		old_flag_cnt = *flag_cnt_ptr;
1087		if (old_flag_cnt >= flag_cnt)
1088			break;
1089		if (atomic_cmpset_int(flag_cnt_ptr, old_flag_cnt, flag_cnt)) {
1090			if (bootverbose) {
1091				vmbus_chan_printf(chan,
1092				    "chan%u update cpu%d flag_cnt to %d\n",
1093				    chan->ch_id, chan->ch_cpuid, flag_cnt);
1094			}
1095			break;
1096		}
1097	}
1098}
1099
1100static struct vmbus_channel *
1101vmbus_chan_alloc(struct vmbus_softc *sc)
1102{
1103	struct vmbus_channel *chan;
1104
1105	chan = malloc(sizeof(*chan), M_DEVBUF, M_WAITOK | M_ZERO);
1106
1107	chan->ch_monprm = hyperv_dmamem_alloc(bus_get_dma_tag(sc->vmbus_dev),
1108	    HYPERCALL_PARAM_ALIGN, 0, sizeof(struct hyperv_mon_param),
1109	    &chan->ch_monprm_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO);
1110	if (chan->ch_monprm == NULL) {
1111		device_printf(sc->vmbus_dev, "monprm alloc failed\n");
1112		free(chan, M_DEVBUF);
1113		return NULL;
1114	}
1115
1116	chan->ch_vmbus = sc;
1117	mtx_init(&chan->ch_subchan_lock, "vmbus subchan", NULL, MTX_DEF);
1118	TAILQ_INIT(&chan->ch_subchans);
1119	vmbus_rxbr_init(&chan->ch_rxbr);
1120	vmbus_txbr_init(&chan->ch_txbr);
1121
1122	return chan;
1123}
1124
1125static void
1126vmbus_chan_free(struct vmbus_channel *chan)
1127{
1128
1129	KASSERT(TAILQ_EMPTY(&chan->ch_subchans) && chan->ch_subchan_cnt == 0,
1130	    ("still owns sub-channels"));
1131	KASSERT((chan->ch_stflags &
1132	    (VMBUS_CHAN_ST_OPENED |
1133	     VMBUS_CHAN_ST_ONPRIL |
1134	     VMBUS_CHAN_ST_ONSUBL |
1135	     VMBUS_CHAN_ST_ONLIST)) == 0, ("free busy channel"));
1136	hyperv_dmamem_free(&chan->ch_monprm_dma, chan->ch_monprm);
1137	mtx_destroy(&chan->ch_subchan_lock);
1138	vmbus_rxbr_deinit(&chan->ch_rxbr);
1139	vmbus_txbr_deinit(&chan->ch_txbr);
1140	free(chan, M_DEVBUF);
1141}
1142
1143static int
1144vmbus_chan_add(struct vmbus_channel *newchan)
1145{
1146	struct vmbus_softc *sc = newchan->ch_vmbus;
1147	struct vmbus_channel *prichan;
1148
1149	if (newchan->ch_id == 0) {
1150		/*
1151		 * XXX
1152		 * Chan0 will neither be processed nor should be offered;
1153		 * skip it.
1154		 */
1155		device_printf(sc->vmbus_dev, "got chan0 offer, discard\n");
1156		return EINVAL;
1157	} else if (newchan->ch_id >= VMBUS_CHAN_MAX) {
1158		device_printf(sc->vmbus_dev, "invalid chan%u offer\n",
1159		    newchan->ch_id);
1160		return EINVAL;
1161	}
1162
1163	mtx_lock(&sc->vmbus_prichan_lock);
1164	TAILQ_FOREACH(prichan, &sc->vmbus_prichans, ch_prilink) {
1165		/*
1166		 * Sub-channel will have the same type GUID and instance
1167		 * GUID as its primary channel.
1168		 */
1169		if (memcmp(&prichan->ch_guid_type, &newchan->ch_guid_type,
1170		    sizeof(struct hyperv_guid)) == 0 &&
1171		    memcmp(&prichan->ch_guid_inst, &newchan->ch_guid_inst,
1172		    sizeof(struct hyperv_guid)) == 0)
1173			break;
1174	}
1175	if (VMBUS_CHAN_ISPRIMARY(newchan)) {
1176		if (prichan == NULL) {
1177			/* Install the new primary channel */
1178			vmbus_chan_ins_prilist(sc, newchan);
1179			mtx_unlock(&sc->vmbus_prichan_lock);
1180			goto done;
1181		} else {
1182			mtx_unlock(&sc->vmbus_prichan_lock);
1183			device_printf(sc->vmbus_dev,
1184			    "duplicated primary chan%u\n", newchan->ch_id);
1185			return EINVAL;
1186		}
1187	} else { /* Sub-channel */
1188		if (prichan == NULL) {
1189			mtx_unlock(&sc->vmbus_prichan_lock);
1190			device_printf(sc->vmbus_dev,
1191			    "no primary chan for chan%u\n", newchan->ch_id);
1192			return EINVAL;
1193		}
1194		/*
1195		 * Found the primary channel for this sub-channel and
1196		 * move on.
1197		 *
1198		 * XXX refcnt prichan
1199		 */
1200	}
1201	mtx_unlock(&sc->vmbus_prichan_lock);
1202
1203	/*
1204	 * This is a sub-channel; link it with the primary channel.
1205	 */
1206	KASSERT(!VMBUS_CHAN_ISPRIMARY(newchan),
1207	    ("new channel is not sub-channel"));
1208	KASSERT(prichan != NULL, ("no primary channel"));
1209
1210	newchan->ch_prichan = prichan;
1211	newchan->ch_dev = prichan->ch_dev;
1212
1213	mtx_lock(&prichan->ch_subchan_lock);
1214	vmbus_chan_ins_sublist(prichan, newchan);
1215	mtx_unlock(&prichan->ch_subchan_lock);
1216	/*
1217	 * Notify anyone that is interested in this sub-channel,
1218	 * after this sub-channel is setup.
1219	 */
1220	wakeup(prichan);
1221done:
1222	/*
1223	 * Hook this channel up for later rescind.
1224	 */
1225	mtx_lock(&sc->vmbus_chan_lock);
1226	vmbus_chan_ins_list(sc, newchan);
1227	mtx_unlock(&sc->vmbus_chan_lock);
1228
1229	if (bootverbose) {
1230		vmbus_chan_printf(newchan, "chan%u subidx%u offer\n",
1231		    newchan->ch_id, newchan->ch_subidx);
1232	}
1233
1234	/* Select default cpu for this channel. */
1235	vmbus_chan_cpu_default(newchan);
1236
1237	return 0;
1238}
1239
1240void
1241vmbus_chan_cpu_set(struct vmbus_channel *chan, int cpu)
1242{
1243	KASSERT(cpu >= 0 && cpu < mp_ncpus, ("invalid cpu %d", cpu));
1244
1245	if (chan->ch_vmbus->vmbus_version == VMBUS_VERSION_WS2008 ||
1246	    chan->ch_vmbus->vmbus_version == VMBUS_VERSION_WIN7) {
1247		/* Only cpu0 is supported */
1248		cpu = 0;
1249	}
1250
1251	chan->ch_cpuid = cpu;
1252	chan->ch_vcpuid = VMBUS_PCPU_GET(chan->ch_vmbus, vcpuid, cpu);
1253
1254	if (bootverbose) {
1255		vmbus_chan_printf(chan,
1256		    "chan%u assigned to cpu%u [vcpu%u]\n",
1257		    chan->ch_id, chan->ch_cpuid, chan->ch_vcpuid);
1258	}
1259}
1260
1261void
1262vmbus_chan_cpu_rr(struct vmbus_channel *chan)
1263{
1264	static uint32_t vmbus_chan_nextcpu;
1265	int cpu;
1266
1267	cpu = atomic_fetchadd_int(&vmbus_chan_nextcpu, 1) % mp_ncpus;
1268	vmbus_chan_cpu_set(chan, cpu);
1269}
1270
1271static void
1272vmbus_chan_cpu_default(struct vmbus_channel *chan)
1273{
1274	/*
1275	 * By default, pin the channel to cpu0.  Devices having
1276	 * special channel-cpu mapping requirement should call
1277	 * vmbus_chan_cpu_{set,rr}().
1278	 */
1279	vmbus_chan_cpu_set(chan, 0);
1280}
1281
1282static void
1283vmbus_chan_msgproc_choffer(struct vmbus_softc *sc,
1284    const struct vmbus_message *msg)
1285{
1286	const struct vmbus_chanmsg_choffer *offer;
1287	struct vmbus_channel *chan;
1288	task_fn_t *detach_fn, *attach_fn;
1289	int error;
1290
1291	offer = (const struct vmbus_chanmsg_choffer *)msg->msg_data;
1292
1293	chan = vmbus_chan_alloc(sc);
1294	if (chan == NULL) {
1295		device_printf(sc->vmbus_dev, "allocate chan%u failed\n",
1296		    offer->chm_chanid);
1297		return;
1298	}
1299
1300	chan->ch_id = offer->chm_chanid;
1301	chan->ch_subidx = offer->chm_subidx;
1302	chan->ch_guid_type = offer->chm_chtype;
1303	chan->ch_guid_inst = offer->chm_chinst;
1304
1305	/* Batch reading is on by default */
1306	chan->ch_flags |= VMBUS_CHAN_FLAG_BATCHREAD;
1307
1308	chan->ch_monprm->mp_connid = VMBUS_CONNID_EVENT;
1309	if (sc->vmbus_version != VMBUS_VERSION_WS2008)
1310		chan->ch_monprm->mp_connid = offer->chm_connid;
1311
1312	if (offer->chm_flags1 & VMBUS_CHOFFER_FLAG1_HASMNF) {
1313		int trig_idx;
1314
1315		/*
1316		 * Setup MNF stuffs.
1317		 */
1318		chan->ch_txflags |= VMBUS_CHAN_TXF_HASMNF;
1319
1320		trig_idx = offer->chm_montrig / VMBUS_MONTRIG_LEN;
1321		if (trig_idx >= VMBUS_MONTRIGS_MAX)
1322			panic("invalid monitor trigger %u", offer->chm_montrig);
1323		chan->ch_montrig =
1324		    &sc->vmbus_mnf2->mnf_trigs[trig_idx].mt_pending;
1325
1326		chan->ch_montrig_mask =
1327		    1 << (offer->chm_montrig % VMBUS_MONTRIG_LEN);
1328	}
1329
1330	/*
1331	 * Setup event flag.
1332	 */
1333	chan->ch_evtflag =
1334	    &sc->vmbus_tx_evtflags[chan->ch_id >> VMBUS_EVTFLAG_SHIFT];
1335	chan->ch_evtflag_mask = 1UL << (chan->ch_id & VMBUS_EVTFLAG_MASK);
1336
1337	/*
1338	 * Setup attach and detach tasks.
1339	 */
1340	if (VMBUS_CHAN_ISPRIMARY(chan)) {
1341		chan->ch_mgmt_tq = sc->vmbus_devtq;
1342		attach_fn = vmbus_prichan_attach_task;
1343		detach_fn = vmbus_prichan_detach_task;
1344	} else {
1345		chan->ch_mgmt_tq = sc->vmbus_subchtq;
1346		attach_fn = vmbus_subchan_attach_task;
1347		detach_fn = vmbus_subchan_detach_task;
1348	}
1349	TASK_INIT(&chan->ch_attach_task, 0, attach_fn, chan);
1350	TASK_INIT(&chan->ch_detach_task, 0, detach_fn, chan);
1351
1352	error = vmbus_chan_add(chan);
1353	if (error) {
1354		device_printf(sc->vmbus_dev, "add chan%u failed: %d\n",
1355		    chan->ch_id, error);
1356		vmbus_chan_free(chan);
1357		return;
1358	}
1359	taskqueue_enqueue(chan->ch_mgmt_tq, &chan->ch_attach_task);
1360}
1361
1362static void
1363vmbus_chan_msgproc_chrescind(struct vmbus_softc *sc,
1364    const struct vmbus_message *msg)
1365{
1366	const struct vmbus_chanmsg_chrescind *note;
1367	struct vmbus_channel *chan;
1368
1369	note = (const struct vmbus_chanmsg_chrescind *)msg->msg_data;
1370	if (note->chm_chanid > VMBUS_CHAN_MAX) {
1371		device_printf(sc->vmbus_dev, "invalid rescinded chan%u\n",
1372		    note->chm_chanid);
1373		return;
1374	}
1375
1376	/*
1377	 * Find and remove the target channel from the channel list.
1378	 */
1379	mtx_lock(&sc->vmbus_chan_lock);
1380	TAILQ_FOREACH(chan, &sc->vmbus_chans, ch_link) {
1381		if (chan->ch_id == note->chm_chanid)
1382			break;
1383	}
1384	if (chan == NULL) {
1385		mtx_unlock(&sc->vmbus_chan_lock);
1386		device_printf(sc->vmbus_dev, "chan%u is not offered\n",
1387		    note->chm_chanid);
1388		return;
1389	}
1390	vmbus_chan_rem_list(sc, chan);
1391	mtx_unlock(&sc->vmbus_chan_lock);
1392
1393	if (VMBUS_CHAN_ISPRIMARY(chan)) {
1394		/*
1395		 * The target channel is a primary channel; remove the
1396		 * target channel from the primary channel list now,
1397		 * instead of later, so that it will not be found by
1398		 * other sub-channel offers, which are processed in
1399		 * this thread.
1400		 */
1401		mtx_lock(&sc->vmbus_prichan_lock);
1402		vmbus_chan_rem_prilist(sc, chan);
1403		mtx_unlock(&sc->vmbus_prichan_lock);
1404	}
1405
1406	if (bootverbose)
1407		vmbus_chan_printf(chan, "chan%u rescinded\n", note->chm_chanid);
1408
1409	/* Detach the target channel. */
1410	taskqueue_enqueue(chan->ch_mgmt_tq, &chan->ch_detach_task);
1411}
1412
1413static int
1414vmbus_chan_release(struct vmbus_channel *chan)
1415{
1416	struct vmbus_softc *sc = chan->ch_vmbus;
1417	struct vmbus_chanmsg_chfree *req;
1418	struct vmbus_msghc *mh;
1419	int error;
1420
1421	mh = vmbus_msghc_get(sc, sizeof(*req));
1422	if (mh == NULL) {
1423		vmbus_chan_printf(chan,
1424		    "can not get msg hypercall for chfree(chan%u)\n",
1425		    chan->ch_id);
1426		return (ENXIO);
1427	}
1428
1429	req = vmbus_msghc_dataptr(mh);
1430	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHFREE;
1431	req->chm_chanid = chan->ch_id;
1432
1433	error = vmbus_msghc_exec_noresult(mh);
1434	vmbus_msghc_put(sc, mh);
1435
1436	if (error) {
1437		vmbus_chan_printf(chan,
1438		    "chfree(chan%u) msg hypercall exec failed: %d\n",
1439		    chan->ch_id, error);
1440	} else {
1441		if (bootverbose)
1442			vmbus_chan_printf(chan, "chan%u freed\n", chan->ch_id);
1443	}
1444	return (error);
1445}
1446
1447static void
1448vmbus_prichan_detach_task(void *xchan, int pending __unused)
1449{
1450	struct vmbus_channel *chan = xchan;
1451
1452	KASSERT(VMBUS_CHAN_ISPRIMARY(chan),
1453	    ("chan%u is not primary channel", chan->ch_id));
1454
1455	/* Delete and detach the device associated with this channel. */
1456	vmbus_delete_child(chan);
1457
1458	/* Release this channel (back to vmbus). */
1459	vmbus_chan_release(chan);
1460
1461	/* Free this channel's resource. */
1462	vmbus_chan_free(chan);
1463}
1464
1465static void
1466vmbus_subchan_detach_task(void *xchan, int pending __unused)
1467{
1468	struct vmbus_channel *chan = xchan;
1469	struct vmbus_channel *pri_chan = chan->ch_prichan;
1470
1471	KASSERT(!VMBUS_CHAN_ISPRIMARY(chan),
1472	    ("chan%u is primary channel", chan->ch_id));
1473
1474	/* Release this channel (back to vmbus). */
1475	vmbus_chan_release(chan);
1476
1477	/* Unlink from its primary channel's sub-channel list. */
1478	mtx_lock(&pri_chan->ch_subchan_lock);
1479	vmbus_chan_rem_sublist(pri_chan, chan);
1480	mtx_unlock(&pri_chan->ch_subchan_lock);
1481	/* Notify anyone that is waiting for this sub-channel to vanish. */
1482	wakeup(pri_chan);
1483
1484	/* Free this channel's resource. */
1485	vmbus_chan_free(chan);
1486}
1487
1488static void
1489vmbus_prichan_attach_task(void *xchan, int pending __unused)
1490{
1491
1492	/*
1493	 * Add device for this primary channel.
1494	 */
1495	vmbus_add_child(xchan);
1496}
1497
1498static void
1499vmbus_subchan_attach_task(void *xchan __unused, int pending __unused)
1500{
1501
1502	/* Nothing */
1503}
1504
1505void
1506vmbus_chan_destroy_all(struct vmbus_softc *sc)
1507{
1508
1509	/*
1510	 * Detach all devices and destroy the corresponding primary
1511	 * channels.
1512	 */
1513	for (;;) {
1514		struct vmbus_channel *chan;
1515
1516		mtx_lock(&sc->vmbus_chan_lock);
1517		TAILQ_FOREACH(chan, &sc->vmbus_chans, ch_link) {
1518			if (VMBUS_CHAN_ISPRIMARY(chan))
1519				break;
1520		}
1521		if (chan == NULL) {
1522			/* No more primary channels; done. */
1523			mtx_unlock(&sc->vmbus_chan_lock);
1524			break;
1525		}
1526		vmbus_chan_rem_list(sc, chan);
1527		mtx_unlock(&sc->vmbus_chan_lock);
1528
1529		mtx_lock(&sc->vmbus_prichan_lock);
1530		vmbus_chan_rem_prilist(sc, chan);
1531		mtx_unlock(&sc->vmbus_prichan_lock);
1532
1533		taskqueue_enqueue(chan->ch_mgmt_tq, &chan->ch_detach_task);
1534	}
1535}
1536
1537struct vmbus_channel **
1538vmbus_subchan_get(struct vmbus_channel *pri_chan, int subchan_cnt)
1539{
1540	struct vmbus_channel **ret, *chan;
1541	int i;
1542
1543	KASSERT(subchan_cnt > 0, ("invalid sub-channel count %d", subchan_cnt));
1544
1545	ret = malloc(subchan_cnt * sizeof(struct vmbus_channel *), M_TEMP,
1546	    M_WAITOK);
1547
1548	mtx_lock(&pri_chan->ch_subchan_lock);
1549
1550	while (pri_chan->ch_subchan_cnt < subchan_cnt)
1551		mtx_sleep(pri_chan, &pri_chan->ch_subchan_lock, 0, "subch", 0);
1552
1553	i = 0;
1554	TAILQ_FOREACH(chan, &pri_chan->ch_subchans, ch_sublink) {
1555		/* TODO: refcnt chan */
1556		ret[i] = chan;
1557
1558		++i;
1559		if (i == subchan_cnt)
1560			break;
1561	}
1562	KASSERT(i == subchan_cnt, ("invalid subchan count %d, should be %d",
1563	    pri_chan->ch_subchan_cnt, subchan_cnt));
1564
1565	mtx_unlock(&pri_chan->ch_subchan_lock);
1566
1567	return ret;
1568}
1569
1570void
1571vmbus_subchan_rel(struct vmbus_channel **subchan, int subchan_cnt __unused)
1572{
1573
1574	free(subchan, M_TEMP);
1575}
1576
1577void
1578vmbus_subchan_drain(struct vmbus_channel *pri_chan)
1579{
1580	mtx_lock(&pri_chan->ch_subchan_lock);
1581	while (pri_chan->ch_subchan_cnt > 0)
1582		mtx_sleep(pri_chan, &pri_chan->ch_subchan_lock, 0, "dsubch", 0);
1583	mtx_unlock(&pri_chan->ch_subchan_lock);
1584}
1585
1586void
1587vmbus_chan_msgproc(struct vmbus_softc *sc, const struct vmbus_message *msg)
1588{
1589	vmbus_chanmsg_proc_t msg_proc;
1590	uint32_t msg_type;
1591
1592	msg_type = ((const struct vmbus_chanmsg_hdr *)msg->msg_data)->chm_type;
1593	KASSERT(msg_type < VMBUS_CHANMSG_TYPE_MAX,
1594	    ("invalid message type %u", msg_type));
1595
1596	msg_proc = vmbus_chan_msgprocs[msg_type];
1597	if (msg_proc != NULL)
1598		msg_proc(sc, msg);
1599}
1600
1601void
1602vmbus_chan_set_readbatch(struct vmbus_channel *chan, bool on)
1603{
1604	if (!on)
1605		chan->ch_flags &= ~VMBUS_CHAN_FLAG_BATCHREAD;
1606	else
1607		chan->ch_flags |= VMBUS_CHAN_FLAG_BATCHREAD;
1608}
1609
1610uint32_t
1611vmbus_chan_id(const struct vmbus_channel *chan)
1612{
1613	return chan->ch_id;
1614}
1615
1616uint32_t
1617vmbus_chan_subidx(const struct vmbus_channel *chan)
1618{
1619	return chan->ch_subidx;
1620}
1621
1622bool
1623vmbus_chan_is_primary(const struct vmbus_channel *chan)
1624{
1625	if (VMBUS_CHAN_ISPRIMARY(chan))
1626		return true;
1627	else
1628		return false;
1629}
1630
1631const struct hyperv_guid *
1632vmbus_chan_guid_inst(const struct vmbus_channel *chan)
1633{
1634	return &chan->ch_guid_inst;
1635}
1636
1637int
1638vmbus_chan_prplist_nelem(int br_size, int prpcnt_max, int dlen_max)
1639{
1640	int elem_size;
1641
1642	elem_size = __offsetof(struct vmbus_chanpkt_prplist,
1643	    cp_range[0].gpa_page[prpcnt_max]);
1644	elem_size += dlen_max;
1645	elem_size = VMBUS_CHANPKT_TOTLEN(elem_size);
1646
1647	return (vmbus_br_nelem(br_size, elem_size));
1648}
1649
1650bool
1651vmbus_chan_tx_empty(const struct vmbus_channel *chan)
1652{
1653
1654	return (vmbus_txbr_empty(&chan->ch_txbr));
1655}
1656
1657bool
1658vmbus_chan_rx_empty(const struct vmbus_channel *chan)
1659{
1660
1661	return (vmbus_rxbr_empty(&chan->ch_rxbr));
1662}
1663
1664static int
1665vmbus_chan_printf(const struct vmbus_channel *chan, const char *fmt, ...)
1666{
1667	va_list ap;
1668	device_t dev;
1669	int retval;
1670
1671	if (chan->ch_dev == NULL || !device_is_alive(chan->ch_dev))
1672		dev = chan->ch_vmbus->vmbus_dev;
1673	else
1674		dev = chan->ch_dev;
1675
1676	retval = device_print_prettyname(dev);
1677	va_start(ap, fmt);
1678	retval += vprintf(fmt, ap);
1679	va_end(ap);
1680
1681	return (retval);
1682}
1683
1684void
1685vmbus_chan_run_task(struct vmbus_channel *chan, struct task *task)
1686{
1687
1688	taskqueue_enqueue(chan->ch_tq, task);
1689	taskqueue_drain(chan->ch_tq, task);
1690}
1691
1692struct taskqueue *
1693vmbus_chan_mgmt_tq(const struct vmbus_channel *chan)
1694{
1695
1696	return (chan->ch_mgmt_tq);
1697}
1698