vmbus_chan.c revision 311359
1/*-
2 * Copyright (c) 2009-2012,2016 Microsoft Corp.
3 * Copyright (c) 2012 NetApp Inc.
4 * Copyright (c) 2012 Citrix Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice unmodified, this list of conditions, and the following
12 *    disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: stable/11/sys/dev/hyperv/vmbus/vmbus_chan.c 311359 2017-01-05 04:22:03Z sephe $");
31
32#include <sys/param.h>
33#include <sys/bus.h>
34#include <sys/kernel.h>
35#include <sys/lock.h>
36#include <sys/malloc.h>
37#include <sys/mutex.h>
38#include <sys/smp.h>
39#include <sys/sysctl.h>
40#include <sys/systm.h>
41
42#include <machine/atomic.h>
43#include <machine/stdarg.h>
44
45#include <dev/hyperv/include/hyperv_busdma.h>
46#include <dev/hyperv/include/vmbus_xact.h>
47#include <dev/hyperv/vmbus/hyperv_var.h>
48#include <dev/hyperv/vmbus/vmbus_reg.h>
49#include <dev/hyperv/vmbus/vmbus_var.h>
50#include <dev/hyperv/vmbus/vmbus_brvar.h>
51#include <dev/hyperv/vmbus/vmbus_chanvar.h>
52
53static void			vmbus_chan_update_evtflagcnt(
54				    struct vmbus_softc *,
55				    const struct vmbus_channel *);
56static void			vmbus_chan_close_internal(
57				    struct vmbus_channel *);
58static int			vmbus_chan_sysctl_mnf(SYSCTL_HANDLER_ARGS);
59static void			vmbus_chan_sysctl_create(
60				    struct vmbus_channel *);
61static struct vmbus_channel	*vmbus_chan_alloc(struct vmbus_softc *);
62static void			vmbus_chan_free(struct vmbus_channel *);
63static int			vmbus_chan_add(struct vmbus_channel *);
64static void			vmbus_chan_cpu_default(struct vmbus_channel *);
65static int			vmbus_chan_release(struct vmbus_channel *);
66static void			vmbus_chan_set_chmap(struct vmbus_channel *);
67static void			vmbus_chan_clear_chmap(struct vmbus_channel *);
68static void			vmbus_chan_detach(struct vmbus_channel *);
69
70static void			vmbus_chan_ins_prilist(struct vmbus_softc *,
71				    struct vmbus_channel *);
72static void			vmbus_chan_rem_prilist(struct vmbus_softc *,
73				    struct vmbus_channel *);
74static void			vmbus_chan_ins_list(struct vmbus_softc *,
75				    struct vmbus_channel *);
76static void			vmbus_chan_rem_list(struct vmbus_softc *,
77				    struct vmbus_channel *);
78static void			vmbus_chan_ins_sublist(struct vmbus_channel *,
79				    struct vmbus_channel *);
80static void			vmbus_chan_rem_sublist(struct vmbus_channel *,
81				    struct vmbus_channel *);
82
83static void			vmbus_chan_task(void *, int);
84static void			vmbus_chan_task_nobatch(void *, int);
85static void			vmbus_chan_clrchmap_task(void *, int);
86static void			vmbus_prichan_attach_task(void *, int);
87static void			vmbus_subchan_attach_task(void *, int);
88static void			vmbus_prichan_detach_task(void *, int);
89static void			vmbus_subchan_detach_task(void *, int);
90
91static void			vmbus_chan_msgproc_choffer(struct vmbus_softc *,
92				    const struct vmbus_message *);
93static void			vmbus_chan_msgproc_chrescind(
94				    struct vmbus_softc *,
95				    const struct vmbus_message *);
96
97static int			vmbus_chan_printf(const struct vmbus_channel *,
98				    const char *, ...) __printflike(2, 3);
99
100/*
101 * Vmbus channel message processing.
102 */
103static const vmbus_chanmsg_proc_t
104vmbus_chan_msgprocs[VMBUS_CHANMSG_TYPE_MAX] = {
105	VMBUS_CHANMSG_PROC(CHOFFER,	vmbus_chan_msgproc_choffer),
106	VMBUS_CHANMSG_PROC(CHRESCIND,	vmbus_chan_msgproc_chrescind),
107
108	VMBUS_CHANMSG_PROC_WAKEUP(CHOPEN_RESP),
109	VMBUS_CHANMSG_PROC_WAKEUP(GPADL_CONNRESP),
110	VMBUS_CHANMSG_PROC_WAKEUP(GPADL_DISCONNRESP)
111};
112
113/*
114 * Notify host that there are data pending on our TX bufring.
115 */
116static __inline void
117vmbus_chan_signal_tx(const struct vmbus_channel *chan)
118{
119	atomic_set_long(chan->ch_evtflag, chan->ch_evtflag_mask);
120	if (chan->ch_txflags & VMBUS_CHAN_TXF_HASMNF)
121		atomic_set_int(chan->ch_montrig, chan->ch_montrig_mask);
122	else
123		hypercall_signal_event(chan->ch_monprm_dma.hv_paddr);
124}
125
126static void
127vmbus_chan_ins_prilist(struct vmbus_softc *sc, struct vmbus_channel *chan)
128{
129
130	mtx_assert(&sc->vmbus_prichan_lock, MA_OWNED);
131	if (atomic_testandset_int(&chan->ch_stflags,
132	    VMBUS_CHAN_ST_ONPRIL_SHIFT))
133		panic("channel is already on the prilist");
134	TAILQ_INSERT_TAIL(&sc->vmbus_prichans, chan, ch_prilink);
135}
136
137static void
138vmbus_chan_rem_prilist(struct vmbus_softc *sc, struct vmbus_channel *chan)
139{
140
141	mtx_assert(&sc->vmbus_prichan_lock, MA_OWNED);
142	if (atomic_testandclear_int(&chan->ch_stflags,
143	    VMBUS_CHAN_ST_ONPRIL_SHIFT) == 0)
144		panic("channel is not on the prilist");
145	TAILQ_REMOVE(&sc->vmbus_prichans, chan, ch_prilink);
146}
147
148static void
149vmbus_chan_ins_sublist(struct vmbus_channel *prichan,
150    struct vmbus_channel *chan)
151{
152
153	mtx_assert(&prichan->ch_subchan_lock, MA_OWNED);
154
155	if (atomic_testandset_int(&chan->ch_stflags,
156	    VMBUS_CHAN_ST_ONSUBL_SHIFT))
157		panic("channel is already on the sublist");
158	TAILQ_INSERT_TAIL(&prichan->ch_subchans, chan, ch_sublink);
159
160	/* Bump sub-channel count. */
161	prichan->ch_subchan_cnt++;
162}
163
164static void
165vmbus_chan_rem_sublist(struct vmbus_channel *prichan,
166    struct vmbus_channel *chan)
167{
168
169	mtx_assert(&prichan->ch_subchan_lock, MA_OWNED);
170
171	KASSERT(prichan->ch_subchan_cnt > 0,
172	    ("invalid subchan_cnt %d", prichan->ch_subchan_cnt));
173	prichan->ch_subchan_cnt--;
174
175	if (atomic_testandclear_int(&chan->ch_stflags,
176	    VMBUS_CHAN_ST_ONSUBL_SHIFT) == 0)
177		panic("channel is not on the sublist");
178	TAILQ_REMOVE(&prichan->ch_subchans, chan, ch_sublink);
179}
180
181static void
182vmbus_chan_ins_list(struct vmbus_softc *sc, struct vmbus_channel *chan)
183{
184
185	mtx_assert(&sc->vmbus_chan_lock, MA_OWNED);
186	if (atomic_testandset_int(&chan->ch_stflags,
187	    VMBUS_CHAN_ST_ONLIST_SHIFT))
188		panic("channel is already on the list");
189	TAILQ_INSERT_TAIL(&sc->vmbus_chans, chan, ch_link);
190}
191
192static void
193vmbus_chan_rem_list(struct vmbus_softc *sc, struct vmbus_channel *chan)
194{
195
196	mtx_assert(&sc->vmbus_chan_lock, MA_OWNED);
197	if (atomic_testandclear_int(&chan->ch_stflags,
198	    VMBUS_CHAN_ST_ONLIST_SHIFT) == 0)
199		panic("channel is not on the list");
200	TAILQ_REMOVE(&sc->vmbus_chans, chan, ch_link);
201}
202
203static int
204vmbus_chan_sysctl_mnf(SYSCTL_HANDLER_ARGS)
205{
206	struct vmbus_channel *chan = arg1;
207	int mnf = 0;
208
209	if (chan->ch_txflags & VMBUS_CHAN_TXF_HASMNF)
210		mnf = 1;
211	return sysctl_handle_int(oidp, &mnf, 0, req);
212}
213
214static void
215vmbus_chan_sysctl_create(struct vmbus_channel *chan)
216{
217	struct sysctl_oid *ch_tree, *chid_tree, *br_tree;
218	struct sysctl_ctx_list *ctx;
219	uint32_t ch_id;
220	char name[16];
221
222	/*
223	 * Add sysctl nodes related to this channel to this
224	 * channel's sysctl ctx, so that they can be destroyed
225	 * independently upon close of this channel, which can
226	 * happen even if the device is not detached.
227	 */
228	ctx = &chan->ch_sysctl_ctx;
229	sysctl_ctx_init(ctx);
230
231	/*
232	 * Create dev.NAME.UNIT.channel tree.
233	 */
234	ch_tree = SYSCTL_ADD_NODE(ctx,
235	    SYSCTL_CHILDREN(device_get_sysctl_tree(chan->ch_dev)),
236	    OID_AUTO, "channel", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
237	if (ch_tree == NULL)
238		return;
239
240	/*
241	 * Create dev.NAME.UNIT.channel.CHANID tree.
242	 */
243	if (VMBUS_CHAN_ISPRIMARY(chan))
244		ch_id = chan->ch_id;
245	else
246		ch_id = chan->ch_prichan->ch_id;
247	snprintf(name, sizeof(name), "%d", ch_id);
248	chid_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(ch_tree),
249	    OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
250	if (chid_tree == NULL)
251		return;
252
253	if (!VMBUS_CHAN_ISPRIMARY(chan)) {
254		/*
255		 * Create dev.NAME.UNIT.channel.CHANID.sub tree.
256		 */
257		ch_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(chid_tree),
258		    OID_AUTO, "sub", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
259		if (ch_tree == NULL)
260			return;
261
262		/*
263		 * Create dev.NAME.UNIT.channel.CHANID.sub.SUBIDX tree.
264		 *
265		 * NOTE:
266		 * chid_tree is changed to this new sysctl tree.
267		 */
268		snprintf(name, sizeof(name), "%d", chan->ch_subidx);
269		chid_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(ch_tree),
270		    OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
271		if (chid_tree == NULL)
272			return;
273
274		SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
275		    "chanid", CTLFLAG_RD, &chan->ch_id, 0, "channel id");
276	}
277
278	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
279	    "cpu", CTLFLAG_RD, &chan->ch_cpuid, 0, "owner CPU id");
280	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
281	    "mnf", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
282	    chan, 0, vmbus_chan_sysctl_mnf, "I",
283	    "has monitor notification facilities");
284
285	br_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
286	    "br", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
287	if (br_tree != NULL) {
288		/*
289		 * Create sysctl tree for RX bufring.
290		 */
291		vmbus_br_sysctl_create(ctx, br_tree, &chan->ch_rxbr.rxbr, "rx");
292		/*
293		 * Create sysctl tree for TX bufring.
294		 */
295		vmbus_br_sysctl_create(ctx, br_tree, &chan->ch_txbr.txbr, "tx");
296	}
297}
298
299int
300vmbus_chan_open(struct vmbus_channel *chan, int txbr_size, int rxbr_size,
301    const void *udata, int udlen, vmbus_chan_callback_t cb, void *cbarg)
302{
303	struct vmbus_chan_br cbr;
304	int error;
305
306	/*
307	 * Allocate the TX+RX bufrings.
308	 */
309	KASSERT(chan->ch_bufring == NULL, ("bufrings are allocated"));
310	chan->ch_bufring = hyperv_dmamem_alloc(bus_get_dma_tag(chan->ch_dev),
311	    PAGE_SIZE, 0, txbr_size + rxbr_size, &chan->ch_bufring_dma,
312	    BUS_DMA_WAITOK);
313	if (chan->ch_bufring == NULL) {
314		vmbus_chan_printf(chan, "bufring allocation failed\n");
315		return (ENOMEM);
316	}
317
318	cbr.cbr = chan->ch_bufring;
319	cbr.cbr_paddr = chan->ch_bufring_dma.hv_paddr;
320	cbr.cbr_txsz = txbr_size;
321	cbr.cbr_rxsz = rxbr_size;
322
323	error = vmbus_chan_open_br(chan, &cbr, udata, udlen, cb, cbarg);
324	if (error) {
325		hyperv_dmamem_free(&chan->ch_bufring_dma, chan->ch_bufring);
326		chan->ch_bufring = NULL;
327	}
328	return (error);
329}
330
331int
332vmbus_chan_open_br(struct vmbus_channel *chan, const struct vmbus_chan_br *cbr,
333    const void *udata, int udlen, vmbus_chan_callback_t cb, void *cbarg)
334{
335	struct vmbus_softc *sc = chan->ch_vmbus;
336	const struct vmbus_chanmsg_chopen_resp *resp;
337	const struct vmbus_message *msg;
338	struct vmbus_chanmsg_chopen *req;
339	struct vmbus_msghc *mh;
340	uint32_t status;
341	int error, txbr_size, rxbr_size;
342	task_fn_t *task_fn;
343	uint8_t *br;
344
345	if (udlen > VMBUS_CHANMSG_CHOPEN_UDATA_SIZE) {
346		vmbus_chan_printf(chan,
347		    "invalid udata len %d for chan%u\n", udlen, chan->ch_id);
348		return EINVAL;
349	}
350
351	br = cbr->cbr;
352	txbr_size = cbr->cbr_txsz;
353	rxbr_size = cbr->cbr_rxsz;
354	KASSERT((txbr_size & PAGE_MASK) == 0,
355	    ("send bufring size is not multiple page"));
356	KASSERT((rxbr_size & PAGE_MASK) == 0,
357	    ("recv bufring size is not multiple page"));
358	KASSERT((cbr->cbr_paddr & PAGE_MASK) == 0,
359	    ("bufring is not page aligned"));
360
361	/*
362	 * Zero out the TX/RX bufrings, in case that they were used before.
363	 */
364	memset(br, 0, txbr_size + rxbr_size);
365
366	if (atomic_testandset_int(&chan->ch_stflags,
367	    VMBUS_CHAN_ST_OPENED_SHIFT))
368		panic("double-open chan%u", chan->ch_id);
369
370	chan->ch_cb = cb;
371	chan->ch_cbarg = cbarg;
372
373	vmbus_chan_update_evtflagcnt(sc, chan);
374
375	chan->ch_tq = VMBUS_PCPU_GET(chan->ch_vmbus, event_tq, chan->ch_cpuid);
376	if (chan->ch_flags & VMBUS_CHAN_FLAG_BATCHREAD)
377		task_fn = vmbus_chan_task;
378	else
379		task_fn = vmbus_chan_task_nobatch;
380	TASK_INIT(&chan->ch_task, 0, task_fn, chan);
381
382	/* TX bufring comes first */
383	vmbus_txbr_setup(&chan->ch_txbr, br, txbr_size);
384	/* RX bufring immediately follows TX bufring */
385	vmbus_rxbr_setup(&chan->ch_rxbr, br + txbr_size, rxbr_size);
386
387	/* Create sysctl tree for this channel */
388	vmbus_chan_sysctl_create(chan);
389
390	/*
391	 * Connect the bufrings, both RX and TX, to this channel.
392	 */
393	error = vmbus_chan_gpadl_connect(chan, cbr->cbr_paddr,
394	    txbr_size + rxbr_size, &chan->ch_bufring_gpadl);
395	if (error) {
396		vmbus_chan_printf(chan,
397		    "failed to connect bufring GPADL to chan%u\n", chan->ch_id);
398		goto failed;
399	}
400
401	/*
402	 * Install this channel, before it is opened, but after everything
403	 * else has been setup.
404	 */
405	vmbus_chan_set_chmap(chan);
406
407	/*
408	 * Open channel w/ the bufring GPADL on the target CPU.
409	 */
410	mh = vmbus_msghc_get(sc, sizeof(*req));
411	if (mh == NULL) {
412		vmbus_chan_printf(chan,
413		    "can not get msg hypercall for chopen(chan%u)\n",
414		    chan->ch_id);
415		error = ENXIO;
416		goto failed;
417	}
418
419	req = vmbus_msghc_dataptr(mh);
420	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHOPEN;
421	req->chm_chanid = chan->ch_id;
422	req->chm_openid = chan->ch_id;
423	req->chm_gpadl = chan->ch_bufring_gpadl;
424	req->chm_vcpuid = chan->ch_vcpuid;
425	req->chm_txbr_pgcnt = txbr_size >> PAGE_SHIFT;
426	if (udlen > 0)
427		memcpy(req->chm_udata, udata, udlen);
428
429	error = vmbus_msghc_exec(sc, mh);
430	if (error) {
431		vmbus_chan_printf(chan,
432		    "chopen(chan%u) msg hypercall exec failed: %d\n",
433		    chan->ch_id, error);
434		vmbus_msghc_put(sc, mh);
435		goto failed;
436	}
437
438	msg = vmbus_msghc_wait_result(sc, mh);
439	resp = (const struct vmbus_chanmsg_chopen_resp *)msg->msg_data;
440	status = resp->chm_status;
441
442	vmbus_msghc_put(sc, mh);
443
444	if (status == 0) {
445		if (bootverbose) {
446			vmbus_chan_printf(chan, "chan%u opened\n", chan->ch_id);
447		}
448		return 0;
449	}
450
451	vmbus_chan_printf(chan, "failed to open chan%u\n", chan->ch_id);
452	error = ENXIO;
453
454failed:
455	vmbus_chan_clear_chmap(chan);
456	if (chan->ch_bufring_gpadl) {
457		vmbus_chan_gpadl_disconnect(chan, chan->ch_bufring_gpadl);
458		chan->ch_bufring_gpadl = 0;
459	}
460	atomic_clear_int(&chan->ch_stflags, VMBUS_CHAN_ST_OPENED);
461	return error;
462}
463
464int
465vmbus_chan_gpadl_connect(struct vmbus_channel *chan, bus_addr_t paddr,
466    int size, uint32_t *gpadl0)
467{
468	struct vmbus_softc *sc = chan->ch_vmbus;
469	struct vmbus_msghc *mh;
470	struct vmbus_chanmsg_gpadl_conn *req;
471	const struct vmbus_message *msg;
472	size_t reqsz;
473	uint32_t gpadl, status;
474	int page_count, range_len, i, cnt, error;
475	uint64_t page_id;
476
477	/*
478	 * Preliminary checks.
479	 */
480
481	KASSERT((size & PAGE_MASK) == 0,
482	    ("invalid GPA size %d, not multiple page size", size));
483	page_count = size >> PAGE_SHIFT;
484
485	KASSERT((paddr & PAGE_MASK) == 0,
486	    ("GPA is not page aligned %jx", (uintmax_t)paddr));
487	page_id = paddr >> PAGE_SHIFT;
488
489	range_len = __offsetof(struct vmbus_gpa_range, gpa_page[page_count]);
490	/*
491	 * We don't support multiple GPA ranges.
492	 */
493	if (range_len > UINT16_MAX) {
494		vmbus_chan_printf(chan, "GPA too large, %d pages\n",
495		    page_count);
496		return EOPNOTSUPP;
497	}
498
499	/*
500	 * Allocate GPADL id.
501	 */
502	gpadl = vmbus_gpadl_alloc(sc);
503	*gpadl0 = gpadl;
504
505	/*
506	 * Connect this GPADL to the target channel.
507	 *
508	 * NOTE:
509	 * Since each message can only hold small set of page
510	 * addresses, several messages may be required to
511	 * complete the connection.
512	 */
513	if (page_count > VMBUS_CHANMSG_GPADL_CONN_PGMAX)
514		cnt = VMBUS_CHANMSG_GPADL_CONN_PGMAX;
515	else
516		cnt = page_count;
517	page_count -= cnt;
518
519	reqsz = __offsetof(struct vmbus_chanmsg_gpadl_conn,
520	    chm_range.gpa_page[cnt]);
521	mh = vmbus_msghc_get(sc, reqsz);
522	if (mh == NULL) {
523		vmbus_chan_printf(chan,
524		    "can not get msg hypercall for gpadl_conn(chan%u)\n",
525		    chan->ch_id);
526		return EIO;
527	}
528
529	req = vmbus_msghc_dataptr(mh);
530	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_CONN;
531	req->chm_chanid = chan->ch_id;
532	req->chm_gpadl = gpadl;
533	req->chm_range_len = range_len;
534	req->chm_range_cnt = 1;
535	req->chm_range.gpa_len = size;
536	req->chm_range.gpa_ofs = 0;
537	for (i = 0; i < cnt; ++i)
538		req->chm_range.gpa_page[i] = page_id++;
539
540	error = vmbus_msghc_exec(sc, mh);
541	if (error) {
542		vmbus_chan_printf(chan,
543		    "gpadl_conn(chan%u) msg hypercall exec failed: %d\n",
544		    chan->ch_id, error);
545		vmbus_msghc_put(sc, mh);
546		return error;
547	}
548
549	while (page_count > 0) {
550		struct vmbus_chanmsg_gpadl_subconn *subreq;
551
552		if (page_count > VMBUS_CHANMSG_GPADL_SUBCONN_PGMAX)
553			cnt = VMBUS_CHANMSG_GPADL_SUBCONN_PGMAX;
554		else
555			cnt = page_count;
556		page_count -= cnt;
557
558		reqsz = __offsetof(struct vmbus_chanmsg_gpadl_subconn,
559		    chm_gpa_page[cnt]);
560		vmbus_msghc_reset(mh, reqsz);
561
562		subreq = vmbus_msghc_dataptr(mh);
563		subreq->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_SUBCONN;
564		subreq->chm_gpadl = gpadl;
565		for (i = 0; i < cnt; ++i)
566			subreq->chm_gpa_page[i] = page_id++;
567
568		vmbus_msghc_exec_noresult(mh);
569	}
570	KASSERT(page_count == 0, ("invalid page count %d", page_count));
571
572	msg = vmbus_msghc_wait_result(sc, mh);
573	status = ((const struct vmbus_chanmsg_gpadl_connresp *)
574	    msg->msg_data)->chm_status;
575
576	vmbus_msghc_put(sc, mh);
577
578	if (status != 0) {
579		vmbus_chan_printf(chan, "gpadl_conn(chan%u) failed: %u\n",
580		    chan->ch_id, status);
581		return EIO;
582	} else {
583		if (bootverbose) {
584			vmbus_chan_printf(chan,
585			    "gpadl_conn(chan%u) succeeded\n", chan->ch_id);
586		}
587	}
588	return 0;
589}
590
591/*
592 * Disconnect the GPA from the target channel
593 */
594int
595vmbus_chan_gpadl_disconnect(struct vmbus_channel *chan, uint32_t gpadl)
596{
597	struct vmbus_softc *sc = chan->ch_vmbus;
598	struct vmbus_msghc *mh;
599	struct vmbus_chanmsg_gpadl_disconn *req;
600	int error;
601
602	mh = vmbus_msghc_get(sc, sizeof(*req));
603	if (mh == NULL) {
604		vmbus_chan_printf(chan,
605		    "can not get msg hypercall for gpadl_disconn(chan%u)\n",
606		    chan->ch_id);
607		return EBUSY;
608	}
609
610	req = vmbus_msghc_dataptr(mh);
611	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_DISCONN;
612	req->chm_chanid = chan->ch_id;
613	req->chm_gpadl = gpadl;
614
615	error = vmbus_msghc_exec(sc, mh);
616	if (error) {
617		vmbus_chan_printf(chan,
618		    "gpadl_disconn(chan%u) msg hypercall exec failed: %d\n",
619		    chan->ch_id, error);
620		vmbus_msghc_put(sc, mh);
621		return error;
622	}
623
624	vmbus_msghc_wait_result(sc, mh);
625	/* Discard result; no useful information */
626	vmbus_msghc_put(sc, mh);
627
628	return 0;
629}
630
631static void
632vmbus_chan_detach(struct vmbus_channel *chan)
633{
634	int refs;
635
636	KASSERT(chan->ch_refs > 0, ("chan%u: invalid refcnt %d",
637	    chan->ch_id, chan->ch_refs));
638	refs = atomic_fetchadd_int(&chan->ch_refs, -1);
639#ifdef INVARIANTS
640	if (VMBUS_CHAN_ISPRIMARY(chan)) {
641		KASSERT(refs == 1, ("chan%u: invalid refcnt %d for prichan",
642		    chan->ch_id, refs + 1));
643	}
644#endif
645	if (refs == 1) {
646		/*
647		 * Detach the target channel.
648		 */
649		if (bootverbose) {
650			vmbus_chan_printf(chan, "chan%u detached\n",
651			    chan->ch_id);
652		}
653		taskqueue_enqueue(chan->ch_mgmt_tq, &chan->ch_detach_task);
654	}
655}
656
657static void
658vmbus_chan_clrchmap_task(void *xchan, int pending __unused)
659{
660	struct vmbus_channel *chan = xchan;
661
662	critical_enter();
663	chan->ch_vmbus->vmbus_chmap[chan->ch_id] = NULL;
664	critical_exit();
665}
666
667static void
668vmbus_chan_clear_chmap(struct vmbus_channel *chan)
669{
670	struct task chmap_task;
671
672	TASK_INIT(&chmap_task, 0, vmbus_chan_clrchmap_task, chan);
673	taskqueue_enqueue(chan->ch_tq, &chmap_task);
674	taskqueue_drain(chan->ch_tq, &chmap_task);
675}
676
677static void
678vmbus_chan_set_chmap(struct vmbus_channel *chan)
679{
680	__compiler_membar();
681	chan->ch_vmbus->vmbus_chmap[chan->ch_id] = chan;
682}
683
684static void
685vmbus_chan_close_internal(struct vmbus_channel *chan)
686{
687	struct vmbus_softc *sc = chan->ch_vmbus;
688	struct vmbus_msghc *mh;
689	struct vmbus_chanmsg_chclose *req;
690	int error;
691
692	/* TODO: stringent check */
693	atomic_clear_int(&chan->ch_stflags, VMBUS_CHAN_ST_OPENED);
694
695	/*
696	 * Free this channel's sysctl tree attached to its device's
697	 * sysctl tree.
698	 */
699	sysctl_ctx_free(&chan->ch_sysctl_ctx);
700
701	/*
702	 * NOTE:
703	 * Order is critical.  This channel _must_ be uninstalled first,
704	 * else the channel task may be enqueued by the IDT after it has
705	 * been drained.
706	 */
707	vmbus_chan_clear_chmap(chan);
708	taskqueue_drain(chan->ch_tq, &chan->ch_task);
709	chan->ch_tq = NULL;
710
711	/*
712	 * Close this channel.
713	 */
714	mh = vmbus_msghc_get(sc, sizeof(*req));
715	if (mh == NULL) {
716		vmbus_chan_printf(chan,
717		    "can not get msg hypercall for chclose(chan%u)\n",
718		    chan->ch_id);
719		return;
720	}
721
722	req = vmbus_msghc_dataptr(mh);
723	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHCLOSE;
724	req->chm_chanid = chan->ch_id;
725
726	error = vmbus_msghc_exec_noresult(mh);
727	vmbus_msghc_put(sc, mh);
728
729	if (error) {
730		vmbus_chan_printf(chan,
731		    "chclose(chan%u) msg hypercall exec failed: %d\n",
732		    chan->ch_id, error);
733		return;
734	} else if (bootverbose) {
735		vmbus_chan_printf(chan, "close chan%u\n", chan->ch_id);
736	}
737
738	/*
739	 * Disconnect the TX+RX bufrings from this channel.
740	 */
741	if (chan->ch_bufring_gpadl) {
742		vmbus_chan_gpadl_disconnect(chan, chan->ch_bufring_gpadl);
743		chan->ch_bufring_gpadl = 0;
744	}
745
746	/*
747	 * Destroy the TX+RX bufrings.
748	 */
749	if (chan->ch_bufring != NULL) {
750		hyperv_dmamem_free(&chan->ch_bufring_dma, chan->ch_bufring);
751		chan->ch_bufring = NULL;
752	}
753}
754
755/*
756 * Caller should make sure that all sub-channels have
757 * been added to 'chan' and all to-be-closed channels
758 * are not being opened.
759 */
760void
761vmbus_chan_close(struct vmbus_channel *chan)
762{
763	int subchan_cnt;
764
765	if (!VMBUS_CHAN_ISPRIMARY(chan)) {
766		/*
767		 * Sub-channel is closed when its primary channel
768		 * is closed; done.
769		 */
770		return;
771	}
772
773	/*
774	 * Close all sub-channels, if any.
775	 */
776	subchan_cnt = chan->ch_subchan_cnt;
777	if (subchan_cnt > 0) {
778		struct vmbus_channel **subchan;
779		int i;
780
781		subchan = vmbus_subchan_get(chan, subchan_cnt);
782		for (i = 0; i < subchan_cnt; ++i) {
783			vmbus_chan_close_internal(subchan[i]);
784			/*
785			 * This sub-channel is referenced, when it is
786			 * linked to the primary channel; drop that
787			 * reference now.
788			 */
789			vmbus_chan_detach(subchan[i]);
790		}
791		vmbus_subchan_rel(subchan, subchan_cnt);
792	}
793
794	/* Then close the primary channel. */
795	vmbus_chan_close_internal(chan);
796}
797
798void
799vmbus_chan_intr_drain(struct vmbus_channel *chan)
800{
801
802	taskqueue_drain(chan->ch_tq, &chan->ch_task);
803}
804
805int
806vmbus_chan_send(struct vmbus_channel *chan, uint16_t type, uint16_t flags,
807    void *data, int dlen, uint64_t xactid)
808{
809	struct vmbus_chanpkt pkt;
810	int pktlen, pad_pktlen, hlen, error;
811	uint64_t pad = 0;
812	struct iovec iov[3];
813	boolean_t send_evt;
814
815	hlen = sizeof(pkt);
816	pktlen = hlen + dlen;
817	pad_pktlen = VMBUS_CHANPKT_TOTLEN(pktlen);
818	KASSERT(pad_pktlen <= vmbus_txbr_maxpktsz(&chan->ch_txbr),
819	    ("invalid packet size %d", pad_pktlen));
820
821	pkt.cp_hdr.cph_type = type;
822	pkt.cp_hdr.cph_flags = flags;
823	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_hlen, hlen);
824	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_tlen, pad_pktlen);
825	pkt.cp_hdr.cph_xactid = xactid;
826
827	iov[0].iov_base = &pkt;
828	iov[0].iov_len = hlen;
829	iov[1].iov_base = data;
830	iov[1].iov_len = dlen;
831	iov[2].iov_base = &pad;
832	iov[2].iov_len = pad_pktlen - pktlen;
833
834	error = vmbus_txbr_write(&chan->ch_txbr, iov, 3, &send_evt);
835	if (!error && send_evt)
836		vmbus_chan_signal_tx(chan);
837	return error;
838}
839
840int
841vmbus_chan_send_sglist(struct vmbus_channel *chan,
842    struct vmbus_gpa sg[], int sglen, void *data, int dlen, uint64_t xactid)
843{
844	struct vmbus_chanpkt_sglist pkt;
845	int pktlen, pad_pktlen, hlen, error;
846	struct iovec iov[4];
847	boolean_t send_evt;
848	uint64_t pad = 0;
849
850	hlen = __offsetof(struct vmbus_chanpkt_sglist, cp_gpa[sglen]);
851	pktlen = hlen + dlen;
852	pad_pktlen = VMBUS_CHANPKT_TOTLEN(pktlen);
853	KASSERT(pad_pktlen <= vmbus_txbr_maxpktsz(&chan->ch_txbr),
854	    ("invalid packet size %d", pad_pktlen));
855
856	pkt.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA;
857	pkt.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC;
858	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_hlen, hlen);
859	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_tlen, pad_pktlen);
860	pkt.cp_hdr.cph_xactid = xactid;
861	pkt.cp_rsvd = 0;
862	pkt.cp_gpa_cnt = sglen;
863
864	iov[0].iov_base = &pkt;
865	iov[0].iov_len = sizeof(pkt);
866	iov[1].iov_base = sg;
867	iov[1].iov_len = sizeof(struct vmbus_gpa) * sglen;
868	iov[2].iov_base = data;
869	iov[2].iov_len = dlen;
870	iov[3].iov_base = &pad;
871	iov[3].iov_len = pad_pktlen - pktlen;
872
873	error = vmbus_txbr_write(&chan->ch_txbr, iov, 4, &send_evt);
874	if (!error && send_evt)
875		vmbus_chan_signal_tx(chan);
876	return error;
877}
878
879int
880vmbus_chan_send_prplist(struct vmbus_channel *chan,
881    struct vmbus_gpa_range *prp, int prp_cnt, void *data, int dlen,
882    uint64_t xactid)
883{
884	struct vmbus_chanpkt_prplist pkt;
885	int pktlen, pad_pktlen, hlen, error;
886	struct iovec iov[4];
887	boolean_t send_evt;
888	uint64_t pad = 0;
889
890	hlen = __offsetof(struct vmbus_chanpkt_prplist,
891	    cp_range[0].gpa_page[prp_cnt]);
892	pktlen = hlen + dlen;
893	pad_pktlen = VMBUS_CHANPKT_TOTLEN(pktlen);
894	KASSERT(pad_pktlen <= vmbus_txbr_maxpktsz(&chan->ch_txbr),
895	    ("invalid packet size %d", pad_pktlen));
896
897	pkt.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA;
898	pkt.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC;
899	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_hlen, hlen);
900	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_tlen, pad_pktlen);
901	pkt.cp_hdr.cph_xactid = xactid;
902	pkt.cp_rsvd = 0;
903	pkt.cp_range_cnt = 1;
904
905	iov[0].iov_base = &pkt;
906	iov[0].iov_len = sizeof(pkt);
907	iov[1].iov_base = prp;
908	iov[1].iov_len = __offsetof(struct vmbus_gpa_range, gpa_page[prp_cnt]);
909	iov[2].iov_base = data;
910	iov[2].iov_len = dlen;
911	iov[3].iov_base = &pad;
912	iov[3].iov_len = pad_pktlen - pktlen;
913
914	error = vmbus_txbr_write(&chan->ch_txbr, iov, 4, &send_evt);
915	if (!error && send_evt)
916		vmbus_chan_signal_tx(chan);
917	return error;
918}
919
920int
921vmbus_chan_recv(struct vmbus_channel *chan, void *data, int *dlen0,
922    uint64_t *xactid)
923{
924	struct vmbus_chanpkt_hdr pkt;
925	int error, dlen, hlen;
926
927	error = vmbus_rxbr_peek(&chan->ch_rxbr, &pkt, sizeof(pkt));
928	if (error)
929		return (error);
930
931	if (__predict_false(pkt.cph_hlen < VMBUS_CHANPKT_HLEN_MIN)) {
932		vmbus_chan_printf(chan, "invalid hlen %u\n", pkt.cph_hlen);
933		/* XXX this channel is dead actually. */
934		return (EIO);
935	}
936	if (__predict_false(pkt.cph_hlen > pkt.cph_tlen)) {
937		vmbus_chan_printf(chan, "invalid hlen %u and tlen %u\n",
938		    pkt.cph_hlen, pkt.cph_tlen);
939		/* XXX this channel is dead actually. */
940		return (EIO);
941	}
942
943	hlen = VMBUS_CHANPKT_GETLEN(pkt.cph_hlen);
944	dlen = VMBUS_CHANPKT_GETLEN(pkt.cph_tlen) - hlen;
945
946	if (*dlen0 < dlen) {
947		/* Return the size of this packet's data. */
948		*dlen0 = dlen;
949		return (ENOBUFS);
950	}
951
952	*xactid = pkt.cph_xactid;
953	*dlen0 = dlen;
954
955	/* Skip packet header */
956	error = vmbus_rxbr_read(&chan->ch_rxbr, data, dlen, hlen);
957	KASSERT(!error, ("vmbus_rxbr_read failed"));
958
959	return (0);
960}
961
962int
963vmbus_chan_recv_pkt(struct vmbus_channel *chan,
964    struct vmbus_chanpkt_hdr *pkt, int *pktlen0)
965{
966	int error, pktlen, pkt_hlen;
967
968	pkt_hlen = sizeof(*pkt);
969	error = vmbus_rxbr_peek(&chan->ch_rxbr, pkt, pkt_hlen);
970	if (error)
971		return (error);
972
973	if (__predict_false(pkt->cph_hlen < VMBUS_CHANPKT_HLEN_MIN)) {
974		vmbus_chan_printf(chan, "invalid hlen %u\n", pkt->cph_hlen);
975		/* XXX this channel is dead actually. */
976		return (EIO);
977	}
978	if (__predict_false(pkt->cph_hlen > pkt->cph_tlen)) {
979		vmbus_chan_printf(chan, "invalid hlen %u and tlen %u\n",
980		    pkt->cph_hlen, pkt->cph_tlen);
981		/* XXX this channel is dead actually. */
982		return (EIO);
983	}
984
985	pktlen = VMBUS_CHANPKT_GETLEN(pkt->cph_tlen);
986	if (*pktlen0 < pktlen) {
987		/* Return the size of this packet. */
988		*pktlen0 = pktlen;
989		return (ENOBUFS);
990	}
991	*pktlen0 = pktlen;
992
993	/*
994	 * Skip the fixed-size packet header, which has been filled
995	 * by the above vmbus_rxbr_peek().
996	 */
997	error = vmbus_rxbr_read(&chan->ch_rxbr, pkt + 1,
998	    pktlen - pkt_hlen, pkt_hlen);
999	KASSERT(!error, ("vmbus_rxbr_read failed"));
1000
1001	return (0);
1002}
1003
1004static void
1005vmbus_chan_task(void *xchan, int pending __unused)
1006{
1007	struct vmbus_channel *chan = xchan;
1008	vmbus_chan_callback_t cb = chan->ch_cb;
1009	void *cbarg = chan->ch_cbarg;
1010
1011	/*
1012	 * Optimize host to guest signaling by ensuring:
1013	 * 1. While reading the channel, we disable interrupts from
1014	 *    host.
1015	 * 2. Ensure that we process all posted messages from the host
1016	 *    before returning from this callback.
1017	 * 3. Once we return, enable signaling from the host. Once this
1018	 *    state is set we check to see if additional packets are
1019	 *    available to read. In this case we repeat the process.
1020	 *
1021	 * NOTE: Interrupt has been disabled in the ISR.
1022	 */
1023	for (;;) {
1024		uint32_t left;
1025
1026		cb(chan, cbarg);
1027
1028		left = vmbus_rxbr_intr_unmask(&chan->ch_rxbr);
1029		if (left == 0) {
1030			/* No more data in RX bufring; done */
1031			break;
1032		}
1033		vmbus_rxbr_intr_mask(&chan->ch_rxbr);
1034	}
1035}
1036
1037static void
1038vmbus_chan_task_nobatch(void *xchan, int pending __unused)
1039{
1040	struct vmbus_channel *chan = xchan;
1041
1042	chan->ch_cb(chan, chan->ch_cbarg);
1043}
1044
1045static __inline void
1046vmbus_event_flags_proc(struct vmbus_softc *sc, volatile u_long *event_flags,
1047    int flag_cnt)
1048{
1049	int f;
1050
1051	for (f = 0; f < flag_cnt; ++f) {
1052		uint32_t chid_base;
1053		u_long flags;
1054		int chid_ofs;
1055
1056		if (event_flags[f] == 0)
1057			continue;
1058
1059		flags = atomic_swap_long(&event_flags[f], 0);
1060		chid_base = f << VMBUS_EVTFLAG_SHIFT;
1061
1062		while ((chid_ofs = ffsl(flags)) != 0) {
1063			struct vmbus_channel *chan;
1064
1065			--chid_ofs; /* NOTE: ffsl is 1-based */
1066			flags &= ~(1UL << chid_ofs);
1067
1068			chan = sc->vmbus_chmap[chid_base + chid_ofs];
1069			if (__predict_false(chan == NULL)) {
1070				/* Channel is closed. */
1071				continue;
1072			}
1073			__compiler_membar();
1074
1075			if (chan->ch_flags & VMBUS_CHAN_FLAG_BATCHREAD)
1076				vmbus_rxbr_intr_mask(&chan->ch_rxbr);
1077			taskqueue_enqueue(chan->ch_tq, &chan->ch_task);
1078		}
1079	}
1080}
1081
1082void
1083vmbus_event_proc(struct vmbus_softc *sc, int cpu)
1084{
1085	struct vmbus_evtflags *eventf;
1086
1087	/*
1088	 * On Host with Win8 or above, the event page can be checked directly
1089	 * to get the id of the channel that has the pending interrupt.
1090	 */
1091	eventf = VMBUS_PCPU_GET(sc, event_flags, cpu) + VMBUS_SINT_MESSAGE;
1092	vmbus_event_flags_proc(sc, eventf->evt_flags,
1093	    VMBUS_PCPU_GET(sc, event_flags_cnt, cpu));
1094}
1095
1096void
1097vmbus_event_proc_compat(struct vmbus_softc *sc, int cpu)
1098{
1099	struct vmbus_evtflags *eventf;
1100
1101	eventf = VMBUS_PCPU_GET(sc, event_flags, cpu) + VMBUS_SINT_MESSAGE;
1102	if (atomic_testandclear_long(&eventf->evt_flags[0], 0)) {
1103		vmbus_event_flags_proc(sc, sc->vmbus_rx_evtflags,
1104		    VMBUS_CHAN_MAX_COMPAT >> VMBUS_EVTFLAG_SHIFT);
1105	}
1106}
1107
1108static void
1109vmbus_chan_update_evtflagcnt(struct vmbus_softc *sc,
1110    const struct vmbus_channel *chan)
1111{
1112	volatile int *flag_cnt_ptr;
1113	int flag_cnt;
1114
1115	flag_cnt = (chan->ch_id / VMBUS_EVTFLAG_LEN) + 1;
1116	flag_cnt_ptr = VMBUS_PCPU_PTR(sc, event_flags_cnt, chan->ch_cpuid);
1117
1118	for (;;) {
1119		int old_flag_cnt;
1120
1121		old_flag_cnt = *flag_cnt_ptr;
1122		if (old_flag_cnt >= flag_cnt)
1123			break;
1124		if (atomic_cmpset_int(flag_cnt_ptr, old_flag_cnt, flag_cnt)) {
1125			if (bootverbose) {
1126				vmbus_chan_printf(chan,
1127				    "chan%u update cpu%d flag_cnt to %d\n",
1128				    chan->ch_id, chan->ch_cpuid, flag_cnt);
1129			}
1130			break;
1131		}
1132	}
1133}
1134
1135static struct vmbus_channel *
1136vmbus_chan_alloc(struct vmbus_softc *sc)
1137{
1138	struct vmbus_channel *chan;
1139
1140	chan = malloc(sizeof(*chan), M_DEVBUF, M_WAITOK | M_ZERO);
1141
1142	chan->ch_monprm = hyperv_dmamem_alloc(bus_get_dma_tag(sc->vmbus_dev),
1143	    HYPERCALL_PARAM_ALIGN, 0, sizeof(struct hyperv_mon_param),
1144	    &chan->ch_monprm_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO);
1145	if (chan->ch_monprm == NULL) {
1146		device_printf(sc->vmbus_dev, "monprm alloc failed\n");
1147		free(chan, M_DEVBUF);
1148		return NULL;
1149	}
1150
1151	chan->ch_refs = 1;
1152	chan->ch_vmbus = sc;
1153	mtx_init(&chan->ch_subchan_lock, "vmbus subchan", NULL, MTX_DEF);
1154	sx_init(&chan->ch_orphan_lock, "vmbus chorphan");
1155	TAILQ_INIT(&chan->ch_subchans);
1156	vmbus_rxbr_init(&chan->ch_rxbr);
1157	vmbus_txbr_init(&chan->ch_txbr);
1158
1159	return chan;
1160}
1161
1162static void
1163vmbus_chan_free(struct vmbus_channel *chan)
1164{
1165
1166	KASSERT(TAILQ_EMPTY(&chan->ch_subchans) && chan->ch_subchan_cnt == 0,
1167	    ("still owns sub-channels"));
1168	KASSERT((chan->ch_stflags &
1169	    (VMBUS_CHAN_ST_OPENED |
1170	     VMBUS_CHAN_ST_ONPRIL |
1171	     VMBUS_CHAN_ST_ONSUBL |
1172	     VMBUS_CHAN_ST_ONLIST)) == 0, ("free busy channel"));
1173	KASSERT(chan->ch_orphan_xact == NULL,
1174	    ("still has orphan xact installed"));
1175	KASSERT(chan->ch_refs == 0, ("chan%u: invalid refcnt %d",
1176	    chan->ch_id, chan->ch_refs));
1177
1178	hyperv_dmamem_free(&chan->ch_monprm_dma, chan->ch_monprm);
1179	mtx_destroy(&chan->ch_subchan_lock);
1180	sx_destroy(&chan->ch_orphan_lock);
1181	vmbus_rxbr_deinit(&chan->ch_rxbr);
1182	vmbus_txbr_deinit(&chan->ch_txbr);
1183	free(chan, M_DEVBUF);
1184}
1185
1186static int
1187vmbus_chan_add(struct vmbus_channel *newchan)
1188{
1189	struct vmbus_softc *sc = newchan->ch_vmbus;
1190	struct vmbus_channel *prichan;
1191
1192	if (newchan->ch_id == 0) {
1193		/*
1194		 * XXX
1195		 * Chan0 will neither be processed nor should be offered;
1196		 * skip it.
1197		 */
1198		device_printf(sc->vmbus_dev, "got chan0 offer, discard\n");
1199		return EINVAL;
1200	} else if (newchan->ch_id >= VMBUS_CHAN_MAX) {
1201		device_printf(sc->vmbus_dev, "invalid chan%u offer\n",
1202		    newchan->ch_id);
1203		return EINVAL;
1204	}
1205
1206	mtx_lock(&sc->vmbus_prichan_lock);
1207	TAILQ_FOREACH(prichan, &sc->vmbus_prichans, ch_prilink) {
1208		/*
1209		 * Sub-channel will have the same type GUID and instance
1210		 * GUID as its primary channel.
1211		 */
1212		if (memcmp(&prichan->ch_guid_type, &newchan->ch_guid_type,
1213		    sizeof(struct hyperv_guid)) == 0 &&
1214		    memcmp(&prichan->ch_guid_inst, &newchan->ch_guid_inst,
1215		    sizeof(struct hyperv_guid)) == 0)
1216			break;
1217	}
1218	if (VMBUS_CHAN_ISPRIMARY(newchan)) {
1219		if (prichan == NULL) {
1220			/* Install the new primary channel */
1221			vmbus_chan_ins_prilist(sc, newchan);
1222			mtx_unlock(&sc->vmbus_prichan_lock);
1223			goto done;
1224		} else {
1225			mtx_unlock(&sc->vmbus_prichan_lock);
1226			device_printf(sc->vmbus_dev,
1227			    "duplicated primary chan%u\n", newchan->ch_id);
1228			return EINVAL;
1229		}
1230	} else { /* Sub-channel */
1231		if (prichan == NULL) {
1232			mtx_unlock(&sc->vmbus_prichan_lock);
1233			device_printf(sc->vmbus_dev,
1234			    "no primary chan for chan%u\n", newchan->ch_id);
1235			return EINVAL;
1236		}
1237		/*
1238		 * Found the primary channel for this sub-channel and
1239		 * move on.
1240		 *
1241		 * XXX refcnt prichan
1242		 */
1243	}
1244	mtx_unlock(&sc->vmbus_prichan_lock);
1245
1246	/*
1247	 * This is a sub-channel; link it with the primary channel.
1248	 */
1249	KASSERT(!VMBUS_CHAN_ISPRIMARY(newchan),
1250	    ("new channel is not sub-channel"));
1251	KASSERT(prichan != NULL, ("no primary channel"));
1252
1253	/*
1254	 * Reference count this sub-channel; it will be dereferenced
1255	 * when this sub-channel is closed.
1256	 */
1257	KASSERT(newchan->ch_refs == 1, ("chan%u: invalid refcnt %d",
1258	    newchan->ch_id, newchan->ch_refs));
1259	atomic_add_int(&newchan->ch_refs, 1);
1260
1261	newchan->ch_prichan = prichan;
1262	newchan->ch_dev = prichan->ch_dev;
1263
1264	mtx_lock(&prichan->ch_subchan_lock);
1265	vmbus_chan_ins_sublist(prichan, newchan);
1266	mtx_unlock(&prichan->ch_subchan_lock);
1267	/*
1268	 * Notify anyone that is interested in this sub-channel,
1269	 * after this sub-channel is setup.
1270	 */
1271	wakeup(prichan);
1272done:
1273	/*
1274	 * Hook this channel up for later revocation.
1275	 */
1276	mtx_lock(&sc->vmbus_chan_lock);
1277	vmbus_chan_ins_list(sc, newchan);
1278	mtx_unlock(&sc->vmbus_chan_lock);
1279
1280	if (bootverbose) {
1281		vmbus_chan_printf(newchan, "chan%u subidx%u offer\n",
1282		    newchan->ch_id, newchan->ch_subidx);
1283	}
1284
1285	/* Select default cpu for this channel. */
1286	vmbus_chan_cpu_default(newchan);
1287
1288	return 0;
1289}
1290
1291void
1292vmbus_chan_cpu_set(struct vmbus_channel *chan, int cpu)
1293{
1294	KASSERT(cpu >= 0 && cpu < mp_ncpus, ("invalid cpu %d", cpu));
1295
1296	if (chan->ch_vmbus->vmbus_version == VMBUS_VERSION_WS2008 ||
1297	    chan->ch_vmbus->vmbus_version == VMBUS_VERSION_WIN7) {
1298		/* Only cpu0 is supported */
1299		cpu = 0;
1300	}
1301
1302	chan->ch_cpuid = cpu;
1303	chan->ch_vcpuid = VMBUS_PCPU_GET(chan->ch_vmbus, vcpuid, cpu);
1304
1305	if (bootverbose) {
1306		vmbus_chan_printf(chan,
1307		    "chan%u assigned to cpu%u [vcpu%u]\n",
1308		    chan->ch_id, chan->ch_cpuid, chan->ch_vcpuid);
1309	}
1310}
1311
1312void
1313vmbus_chan_cpu_rr(struct vmbus_channel *chan)
1314{
1315	static uint32_t vmbus_chan_nextcpu;
1316	int cpu;
1317
1318	cpu = atomic_fetchadd_int(&vmbus_chan_nextcpu, 1) % mp_ncpus;
1319	vmbus_chan_cpu_set(chan, cpu);
1320}
1321
1322static void
1323vmbus_chan_cpu_default(struct vmbus_channel *chan)
1324{
1325	/*
1326	 * By default, pin the channel to cpu0.  Devices having
1327	 * special channel-cpu mapping requirement should call
1328	 * vmbus_chan_cpu_{set,rr}().
1329	 */
1330	vmbus_chan_cpu_set(chan, 0);
1331}
1332
1333static void
1334vmbus_chan_msgproc_choffer(struct vmbus_softc *sc,
1335    const struct vmbus_message *msg)
1336{
1337	const struct vmbus_chanmsg_choffer *offer;
1338	struct vmbus_channel *chan;
1339	task_fn_t *detach_fn, *attach_fn;
1340	int error;
1341
1342	offer = (const struct vmbus_chanmsg_choffer *)msg->msg_data;
1343
1344	chan = vmbus_chan_alloc(sc);
1345	if (chan == NULL) {
1346		device_printf(sc->vmbus_dev, "allocate chan%u failed\n",
1347		    offer->chm_chanid);
1348		return;
1349	}
1350
1351	chan->ch_id = offer->chm_chanid;
1352	chan->ch_subidx = offer->chm_subidx;
1353	chan->ch_guid_type = offer->chm_chtype;
1354	chan->ch_guid_inst = offer->chm_chinst;
1355
1356	/* Batch reading is on by default */
1357	chan->ch_flags |= VMBUS_CHAN_FLAG_BATCHREAD;
1358
1359	chan->ch_monprm->mp_connid = VMBUS_CONNID_EVENT;
1360	if (sc->vmbus_version != VMBUS_VERSION_WS2008)
1361		chan->ch_monprm->mp_connid = offer->chm_connid;
1362
1363	if (offer->chm_flags1 & VMBUS_CHOFFER_FLAG1_HASMNF) {
1364		int trig_idx;
1365
1366		/*
1367		 * Setup MNF stuffs.
1368		 */
1369		chan->ch_txflags |= VMBUS_CHAN_TXF_HASMNF;
1370
1371		trig_idx = offer->chm_montrig / VMBUS_MONTRIG_LEN;
1372		if (trig_idx >= VMBUS_MONTRIGS_MAX)
1373			panic("invalid monitor trigger %u", offer->chm_montrig);
1374		chan->ch_montrig =
1375		    &sc->vmbus_mnf2->mnf_trigs[trig_idx].mt_pending;
1376
1377		chan->ch_montrig_mask =
1378		    1 << (offer->chm_montrig % VMBUS_MONTRIG_LEN);
1379	}
1380
1381	/*
1382	 * Setup event flag.
1383	 */
1384	chan->ch_evtflag =
1385	    &sc->vmbus_tx_evtflags[chan->ch_id >> VMBUS_EVTFLAG_SHIFT];
1386	chan->ch_evtflag_mask = 1UL << (chan->ch_id & VMBUS_EVTFLAG_MASK);
1387
1388	/*
1389	 * Setup attach and detach tasks.
1390	 */
1391	if (VMBUS_CHAN_ISPRIMARY(chan)) {
1392		chan->ch_mgmt_tq = sc->vmbus_devtq;
1393		attach_fn = vmbus_prichan_attach_task;
1394		detach_fn = vmbus_prichan_detach_task;
1395	} else {
1396		chan->ch_mgmt_tq = sc->vmbus_subchtq;
1397		attach_fn = vmbus_subchan_attach_task;
1398		detach_fn = vmbus_subchan_detach_task;
1399	}
1400	TASK_INIT(&chan->ch_attach_task, 0, attach_fn, chan);
1401	TASK_INIT(&chan->ch_detach_task, 0, detach_fn, chan);
1402
1403	error = vmbus_chan_add(chan);
1404	if (error) {
1405		device_printf(sc->vmbus_dev, "add chan%u failed: %d\n",
1406		    chan->ch_id, error);
1407		atomic_subtract_int(&chan->ch_refs, 1);
1408		vmbus_chan_free(chan);
1409		return;
1410	}
1411	taskqueue_enqueue(chan->ch_mgmt_tq, &chan->ch_attach_task);
1412}
1413
1414static void
1415vmbus_chan_msgproc_chrescind(struct vmbus_softc *sc,
1416    const struct vmbus_message *msg)
1417{
1418	const struct vmbus_chanmsg_chrescind *note;
1419	struct vmbus_channel *chan;
1420
1421	note = (const struct vmbus_chanmsg_chrescind *)msg->msg_data;
1422	if (note->chm_chanid > VMBUS_CHAN_MAX) {
1423		device_printf(sc->vmbus_dev, "invalid revoked chan%u\n",
1424		    note->chm_chanid);
1425		return;
1426	}
1427
1428	/*
1429	 * Find and remove the target channel from the channel list.
1430	 */
1431	mtx_lock(&sc->vmbus_chan_lock);
1432	TAILQ_FOREACH(chan, &sc->vmbus_chans, ch_link) {
1433		if (chan->ch_id == note->chm_chanid)
1434			break;
1435	}
1436	if (chan == NULL) {
1437		mtx_unlock(&sc->vmbus_chan_lock);
1438		device_printf(sc->vmbus_dev, "chan%u is not offered\n",
1439		    note->chm_chanid);
1440		return;
1441	}
1442	vmbus_chan_rem_list(sc, chan);
1443	mtx_unlock(&sc->vmbus_chan_lock);
1444
1445	if (VMBUS_CHAN_ISPRIMARY(chan)) {
1446		/*
1447		 * The target channel is a primary channel; remove the
1448		 * target channel from the primary channel list now,
1449		 * instead of later, so that it will not be found by
1450		 * other sub-channel offers, which are processed in
1451		 * this thread.
1452		 */
1453		mtx_lock(&sc->vmbus_prichan_lock);
1454		vmbus_chan_rem_prilist(sc, chan);
1455		mtx_unlock(&sc->vmbus_prichan_lock);
1456	}
1457
1458	/*
1459	 * NOTE:
1460	 * The following processing order is critical:
1461	 * Set the REVOKED state flag before orphaning the installed xact.
1462	 */
1463
1464	if (atomic_testandset_int(&chan->ch_stflags,
1465	    VMBUS_CHAN_ST_REVOKED_SHIFT))
1466		panic("channel has already been revoked");
1467
1468	sx_xlock(&chan->ch_orphan_lock);
1469	if (chan->ch_orphan_xact != NULL)
1470		vmbus_xact_ctx_orphan(chan->ch_orphan_xact);
1471	sx_xunlock(&chan->ch_orphan_lock);
1472
1473	if (bootverbose)
1474		vmbus_chan_printf(chan, "chan%u revoked\n", note->chm_chanid);
1475	vmbus_chan_detach(chan);
1476}
1477
1478static int
1479vmbus_chan_release(struct vmbus_channel *chan)
1480{
1481	struct vmbus_softc *sc = chan->ch_vmbus;
1482	struct vmbus_chanmsg_chfree *req;
1483	struct vmbus_msghc *mh;
1484	int error;
1485
1486	mh = vmbus_msghc_get(sc, sizeof(*req));
1487	if (mh == NULL) {
1488		vmbus_chan_printf(chan,
1489		    "can not get msg hypercall for chfree(chan%u)\n",
1490		    chan->ch_id);
1491		return (ENXIO);
1492	}
1493
1494	req = vmbus_msghc_dataptr(mh);
1495	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHFREE;
1496	req->chm_chanid = chan->ch_id;
1497
1498	error = vmbus_msghc_exec_noresult(mh);
1499	vmbus_msghc_put(sc, mh);
1500
1501	if (error) {
1502		vmbus_chan_printf(chan,
1503		    "chfree(chan%u) msg hypercall exec failed: %d\n",
1504		    chan->ch_id, error);
1505	} else {
1506		if (bootverbose)
1507			vmbus_chan_printf(chan, "chan%u freed\n", chan->ch_id);
1508	}
1509	return (error);
1510}
1511
1512static void
1513vmbus_prichan_detach_task(void *xchan, int pending __unused)
1514{
1515	struct vmbus_channel *chan = xchan;
1516
1517	KASSERT(VMBUS_CHAN_ISPRIMARY(chan),
1518	    ("chan%u is not primary channel", chan->ch_id));
1519
1520	/* Delete and detach the device associated with this channel. */
1521	vmbus_delete_child(chan);
1522
1523	/* Release this channel (back to vmbus). */
1524	vmbus_chan_release(chan);
1525
1526	/* Free this channel's resource. */
1527	vmbus_chan_free(chan);
1528}
1529
1530static void
1531vmbus_subchan_detach_task(void *xchan, int pending __unused)
1532{
1533	struct vmbus_channel *chan = xchan;
1534	struct vmbus_channel *pri_chan = chan->ch_prichan;
1535
1536	KASSERT(!VMBUS_CHAN_ISPRIMARY(chan),
1537	    ("chan%u is primary channel", chan->ch_id));
1538
1539	/* Release this channel (back to vmbus). */
1540	vmbus_chan_release(chan);
1541
1542	/* Unlink from its primary channel's sub-channel list. */
1543	mtx_lock(&pri_chan->ch_subchan_lock);
1544	vmbus_chan_rem_sublist(pri_chan, chan);
1545	mtx_unlock(&pri_chan->ch_subchan_lock);
1546	/* Notify anyone that is waiting for this sub-channel to vanish. */
1547	wakeup(pri_chan);
1548
1549	/* Free this channel's resource. */
1550	vmbus_chan_free(chan);
1551}
1552
1553static void
1554vmbus_prichan_attach_task(void *xchan, int pending __unused)
1555{
1556
1557	/*
1558	 * Add device for this primary channel.
1559	 */
1560	vmbus_add_child(xchan);
1561}
1562
1563static void
1564vmbus_subchan_attach_task(void *xchan __unused, int pending __unused)
1565{
1566
1567	/* Nothing */
1568}
1569
1570void
1571vmbus_chan_destroy_all(struct vmbus_softc *sc)
1572{
1573
1574	/*
1575	 * Detach all devices and destroy the corresponding primary
1576	 * channels.
1577	 */
1578	for (;;) {
1579		struct vmbus_channel *chan;
1580
1581		mtx_lock(&sc->vmbus_chan_lock);
1582		TAILQ_FOREACH(chan, &sc->vmbus_chans, ch_link) {
1583			if (VMBUS_CHAN_ISPRIMARY(chan))
1584				break;
1585		}
1586		if (chan == NULL) {
1587			/* No more primary channels; done. */
1588			mtx_unlock(&sc->vmbus_chan_lock);
1589			break;
1590		}
1591		vmbus_chan_rem_list(sc, chan);
1592		mtx_unlock(&sc->vmbus_chan_lock);
1593
1594		mtx_lock(&sc->vmbus_prichan_lock);
1595		vmbus_chan_rem_prilist(sc, chan);
1596		mtx_unlock(&sc->vmbus_prichan_lock);
1597
1598		taskqueue_enqueue(chan->ch_mgmt_tq, &chan->ch_detach_task);
1599	}
1600}
1601
1602struct vmbus_channel **
1603vmbus_subchan_get(struct vmbus_channel *pri_chan, int subchan_cnt)
1604{
1605	struct vmbus_channel **ret, *chan;
1606	int i;
1607
1608	KASSERT(subchan_cnt > 0, ("invalid sub-channel count %d", subchan_cnt));
1609
1610	ret = malloc(subchan_cnt * sizeof(struct vmbus_channel *), M_TEMP,
1611	    M_WAITOK);
1612
1613	mtx_lock(&pri_chan->ch_subchan_lock);
1614
1615	while (pri_chan->ch_subchan_cnt < subchan_cnt)
1616		mtx_sleep(pri_chan, &pri_chan->ch_subchan_lock, 0, "subch", 0);
1617
1618	i = 0;
1619	TAILQ_FOREACH(chan, &pri_chan->ch_subchans, ch_sublink) {
1620		/* TODO: refcnt chan */
1621		ret[i] = chan;
1622
1623		++i;
1624		if (i == subchan_cnt)
1625			break;
1626	}
1627	KASSERT(i == subchan_cnt, ("invalid subchan count %d, should be %d",
1628	    pri_chan->ch_subchan_cnt, subchan_cnt));
1629
1630	mtx_unlock(&pri_chan->ch_subchan_lock);
1631
1632	return ret;
1633}
1634
1635void
1636vmbus_subchan_rel(struct vmbus_channel **subchan, int subchan_cnt __unused)
1637{
1638
1639	free(subchan, M_TEMP);
1640}
1641
1642void
1643vmbus_subchan_drain(struct vmbus_channel *pri_chan)
1644{
1645	mtx_lock(&pri_chan->ch_subchan_lock);
1646	while (pri_chan->ch_subchan_cnt > 0)
1647		mtx_sleep(pri_chan, &pri_chan->ch_subchan_lock, 0, "dsubch", 0);
1648	mtx_unlock(&pri_chan->ch_subchan_lock);
1649}
1650
1651void
1652vmbus_chan_msgproc(struct vmbus_softc *sc, const struct vmbus_message *msg)
1653{
1654	vmbus_chanmsg_proc_t msg_proc;
1655	uint32_t msg_type;
1656
1657	msg_type = ((const struct vmbus_chanmsg_hdr *)msg->msg_data)->chm_type;
1658	KASSERT(msg_type < VMBUS_CHANMSG_TYPE_MAX,
1659	    ("invalid message type %u", msg_type));
1660
1661	msg_proc = vmbus_chan_msgprocs[msg_type];
1662	if (msg_proc != NULL)
1663		msg_proc(sc, msg);
1664}
1665
1666void
1667vmbus_chan_set_readbatch(struct vmbus_channel *chan, bool on)
1668{
1669	if (!on)
1670		chan->ch_flags &= ~VMBUS_CHAN_FLAG_BATCHREAD;
1671	else
1672		chan->ch_flags |= VMBUS_CHAN_FLAG_BATCHREAD;
1673}
1674
1675uint32_t
1676vmbus_chan_id(const struct vmbus_channel *chan)
1677{
1678	return chan->ch_id;
1679}
1680
1681uint32_t
1682vmbus_chan_subidx(const struct vmbus_channel *chan)
1683{
1684	return chan->ch_subidx;
1685}
1686
1687bool
1688vmbus_chan_is_primary(const struct vmbus_channel *chan)
1689{
1690	if (VMBUS_CHAN_ISPRIMARY(chan))
1691		return true;
1692	else
1693		return false;
1694}
1695
1696const struct hyperv_guid *
1697vmbus_chan_guid_inst(const struct vmbus_channel *chan)
1698{
1699	return &chan->ch_guid_inst;
1700}
1701
1702int
1703vmbus_chan_prplist_nelem(int br_size, int prpcnt_max, int dlen_max)
1704{
1705	int elem_size;
1706
1707	elem_size = __offsetof(struct vmbus_chanpkt_prplist,
1708	    cp_range[0].gpa_page[prpcnt_max]);
1709	elem_size += dlen_max;
1710	elem_size = VMBUS_CHANPKT_TOTLEN(elem_size);
1711
1712	return (vmbus_br_nelem(br_size, elem_size));
1713}
1714
1715bool
1716vmbus_chan_tx_empty(const struct vmbus_channel *chan)
1717{
1718
1719	return (vmbus_txbr_empty(&chan->ch_txbr));
1720}
1721
1722bool
1723vmbus_chan_rx_empty(const struct vmbus_channel *chan)
1724{
1725
1726	return (vmbus_rxbr_empty(&chan->ch_rxbr));
1727}
1728
1729static int
1730vmbus_chan_printf(const struct vmbus_channel *chan, const char *fmt, ...)
1731{
1732	va_list ap;
1733	device_t dev;
1734	int retval;
1735
1736	if (chan->ch_dev == NULL || !device_is_alive(chan->ch_dev))
1737		dev = chan->ch_vmbus->vmbus_dev;
1738	else
1739		dev = chan->ch_dev;
1740
1741	retval = device_print_prettyname(dev);
1742	va_start(ap, fmt);
1743	retval += vprintf(fmt, ap);
1744	va_end(ap);
1745
1746	return (retval);
1747}
1748
1749void
1750vmbus_chan_run_task(struct vmbus_channel *chan, struct task *task)
1751{
1752
1753	taskqueue_enqueue(chan->ch_tq, task);
1754	taskqueue_drain(chan->ch_tq, task);
1755}
1756
1757struct taskqueue *
1758vmbus_chan_mgmt_tq(const struct vmbus_channel *chan)
1759{
1760
1761	return (chan->ch_mgmt_tq);
1762}
1763
1764bool
1765vmbus_chan_is_revoked(const struct vmbus_channel *chan)
1766{
1767
1768	if (chan->ch_stflags & VMBUS_CHAN_ST_REVOKED)
1769		return (true);
1770	return (false);
1771}
1772
1773void
1774vmbus_chan_set_orphan(struct vmbus_channel *chan, struct vmbus_xact_ctx *xact)
1775{
1776
1777	sx_xlock(&chan->ch_orphan_lock);
1778	chan->ch_orphan_xact = xact;
1779	sx_xunlock(&chan->ch_orphan_lock);
1780}
1781
1782void
1783vmbus_chan_unset_orphan(struct vmbus_channel *chan)
1784{
1785
1786	sx_xlock(&chan->ch_orphan_lock);
1787	chan->ch_orphan_xact = NULL;
1788	sx_xunlock(&chan->ch_orphan_lock);
1789}
1790