cnex.c revision 7656:2621e50fdf4a
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26
27/*
28 * Logical domain channel devices are devices implemented entirely
29 * in software; cnex is the nexus for channel-devices. They use
30 * the HV channel interfaces via the LDC transport module to send
31 * and receive data and to register callbacks.
32 */
33
34#include <sys/types.h>
35#include <sys/cmn_err.h>
36#include <sys/conf.h>
37#include <sys/ddi.h>
38#include <sys/ddi_impldefs.h>
39#include <sys/devops.h>
40#include <sys/instance.h>
41#include <sys/modctl.h>
42#include <sys/open.h>
43#include <sys/stat.h>
44#include <sys/sunddi.h>
45#include <sys/sunndi.h>
46#include <sys/systm.h>
47#include <sys/mkdev.h>
48#include <sys/machsystm.h>
49#include <sys/intreg.h>
50#include <sys/intr.h>
51#include <sys/ddi_intr_impl.h>
52#include <sys/ivintr.h>
53#include <sys/hypervisor_api.h>
54#include <sys/ldc.h>
55#include <sys/cnex.h>
56#include <sys/mach_descrip.h>
57#include <sys/hsvc.h>
58#include <sys/sdt.h>
59
60/*
61 * Internal functions/information
62 */
63static struct cnex_intr_map cnex_class_to_intr[] = {
64	{LDC_DEV_GENERIC,	PIL_3,	 0},
65	{LDC_DEV_BLK,		PIL_4,	10},
66	{LDC_DEV_BLK_SVC,	PIL_3,	10},
67	{LDC_DEV_NT,		PIL_6,	35},
68	{LDC_DEV_NT_SVC,	PIL_4,	35},
69	{LDC_DEV_SERIAL,	PIL_6,	 0}
70};
71#define	CNEX_MAX_DEVS (sizeof (cnex_class_to_intr) / \
72				sizeof (cnex_class_to_intr[0]))
73
74#define	CNEX_TX_INTR_WEIGHT	0
75
76#define	SUN4V_REG_SPEC2CFG_HDL(x)	((x >> 32) & ~(0xfull << 28))
77
78static clock_t cnex_wait_usecs = 1000; /* wait time in usecs */
79static int cnex_wait_retries = 3;
80static void *cnex_state;
81
82static uint_t cnex_intr_wrapper(caddr_t arg);
83static dev_info_t *cnex_find_chan_dip(dev_info_t *dip, uint64_t chan_id,
84    md_t *mdp, mde_cookie_t mde);
85
86/*
87 * Channel Interrupt Distribution
88 *
89 * In order to balance interrupts among available CPUs, we use
90 * the intr_dist_cpuid_{add,remove}_device_weight() interface to
91 * assign weights to channel interrupts. These weights, which are
92 * defined in the cnex_intr_map structure, influence which CPU
93 * is returned by intr_dist_cpuid() when called via the cnex
94 * interrupt redistribution callback cnex_intr_redist().
95 * Interrupts for VIO devclass channels are given more weight than
96 * other interrupts because they are expected to occur more
97 * frequently and have a larger impact on overall performance.
98 * Transmit interrupts are given a zero weight because they are
99 * not used.
100 *
101 * The interrupt weights influence the target CPU selection when
102 * interrupts are redistributed and when they are added. However,
103 * removal of interrupts can unbalance the distribution even if
104 * they are removed in converse order--compared to the order they
105 * are added. This can occur when interrupts are removed after
106 * redistribution occurs.
107 *
108 * Channel interrupt weights affect interrupt-CPU distribution
109 * relative to other weighted interrupts on the system. For VIO
110 * devclass channels, values are chosen to match those used by
111 * the PCI express nexus driver for net and storage devices.
112 */
113static void cnex_intr_redist(void *arg, int32_t weight_max, int32_t weight);
114static int cnex_intr_new_cpu(cnex_soft_state_t *ssp, cnex_intr_t *iinfo);
115static int cnex_intr_dis_wait(cnex_soft_state_t *ssp, cnex_intr_t *iinfo);
116static int32_t cnex_class_weight(ldc_dev_t devclass);
117
118/*
119 * Debug info
120 */
121#ifdef DEBUG
122
123/*
124 * Print debug messages
125 *
126 * set cnexdbg to 0xf for enabling all msgs
127 * 0x8 - Errors
128 * 0x4 - Warnings
129 * 0x2 - All debug messages
130 * 0x1 - Minimal debug messages
131 */
132
133int cnexdbg = 0x8;
134
135static void
136cnexdebug(const char *fmt, ...)
137{
138	char buf[512];
139	va_list ap;
140
141	va_start(ap, fmt);
142	(void) vsprintf(buf, fmt, ap);
143	va_end(ap);
144
145	cmn_err(CE_CONT, "%s\n", buf);
146}
147
148#define	D1		\
149if (cnexdbg & 0x01)	\
150	cnexdebug
151
152#define	D2		\
153if (cnexdbg & 0x02)	\
154	cnexdebug
155
156#define	DWARN		\
157if (cnexdbg & 0x04)	\
158	cnexdebug
159
160#define	DERR		\
161if (cnexdbg & 0x08)	\
162	cnexdebug
163
164#else
165
166#define	D1
167#define	D2
168#define	DWARN
169#define	DERR
170
171#endif
172
173/*
174 * Config information
175 */
176static int cnex_attach(dev_info_t *, ddi_attach_cmd_t);
177static int cnex_detach(dev_info_t *, ddi_detach_cmd_t);
178static int cnex_open(dev_t *, int, int, cred_t *);
179static int cnex_close(dev_t, int, int, cred_t *);
180static int cnex_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
181static int cnex_ctl(dev_info_t *, dev_info_t *, ddi_ctl_enum_t, void *,
182    void *);
183
184static struct bus_ops cnex_bus_ops = {
185	BUSO_REV,
186	nullbusmap,		/* bus_map */
187	NULL,			/* bus_get_intrspec */
188	NULL,			/* bus_add_intrspec */
189	NULL,			/* bus_remove_intrspec */
190	i_ddi_map_fault,	/* bus_map_fault */
191	ddi_no_dma_map,		/* bus_dma_map */
192	ddi_no_dma_allochdl,	/* bus_dma_allochdl */
193	NULL,			/* bus_dma_freehdl */
194	NULL,			/* bus_dma_bindhdl */
195	NULL,			/* bus_dma_unbindhdl */
196	NULL,			/* bus_dma_flush */
197	NULL,			/* bus_dma_win */
198	NULL,			/* bus_dma_ctl */
199	cnex_ctl,		/* bus_ctl */
200	ddi_bus_prop_op,	/* bus_prop_op */
201	0,			/* bus_get_eventcookie */
202	0,			/* bus_add_eventcall */
203	0,			/* bus_remove_eventcall	*/
204	0,			/* bus_post_event */
205	NULL,			/* bus_intr_ctl */
206	NULL,			/* bus_config */
207	NULL,			/* bus_unconfig */
208	NULL,			/* bus_fm_init */
209	NULL,			/* bus_fm_fini */
210	NULL,			/* bus_fm_access_enter */
211	NULL,			/* bus_fm_access_exit */
212	NULL,			/* bus_power */
213	NULL			/* bus_intr_op */
214};
215
216static struct cb_ops cnex_cb_ops = {
217	cnex_open,			/* open */
218	cnex_close,			/* close */
219	nodev,				/* strategy */
220	nodev,				/* print */
221	nodev,				/* dump */
222	nodev,				/* read */
223	nodev,				/* write */
224	cnex_ioctl,			/* ioctl */
225	nodev,				/* devmap */
226	nodev,				/* mmap */
227	nodev,				/* segmap */
228	nochpoll,			/* poll */
229	ddi_prop_op,			/* cb_prop_op */
230	0,				/* streamtab  */
231	D_MP | D_NEW | D_HOTPLUG	/* Driver compatibility flag */
232};
233
234static struct dev_ops cnex_ops = {
235	DEVO_REV,		/* devo_rev, */
236	0,			/* refcnt  */
237	ddi_getinfo_1to1,	/* info */
238	nulldev,		/* identify */
239	nulldev,		/* probe */
240	cnex_attach,		/* attach */
241	cnex_detach,		/* detach */
242	nodev,			/* reset */
243	&cnex_cb_ops,		/* driver operations */
244	&cnex_bus_ops,		/* bus operations */
245	nulldev,		/* power */
246	ddi_quiesce_not_needed,		/* quiesce */
247};
248
249/*
250 * Module linkage information for the kernel.
251 */
252static struct modldrv modldrv = {
253	&mod_driverops,
254	"sun4v channel-devices nexus",
255	&cnex_ops,
256};
257
258static struct modlinkage modlinkage = {
259	MODREV_1, (void *)&modldrv, NULL
260};
261
262int
263_init(void)
264{
265	int err;
266	uint64_t majornum;
267	uint64_t minornum;
268
269	/*
270	 * Check HV intr group api versioning.
271	 * Note that cnex assumes interrupt cookies is
272	 * in version 1.0 of the intr group api.
273	 */
274	if ((err = hsvc_version(HSVC_GROUP_INTR, &majornum, &minornum)) != 0) {
275		cmn_err(CE_WARN, "cnex: failed to get intr api "
276		    "group versioning errno=%d", err);
277		return (err);
278	} else if ((majornum != 1) && (majornum != 2)) {
279		cmn_err(CE_WARN, "cnex: unsupported intr api group: "
280		    "maj:0x%lx, min:0x%lx", majornum, minornum);
281		return (ENOTSUP);
282	}
283
284	if ((err = ddi_soft_state_init(&cnex_state,
285	    sizeof (cnex_soft_state_t), 0)) != 0) {
286		return (err);
287	}
288	if ((err = mod_install(&modlinkage)) != 0) {
289		ddi_soft_state_fini(&cnex_state);
290		return (err);
291	}
292	return (0);
293}
294
295int
296_fini(void)
297{
298	int err;
299
300	if ((err = mod_remove(&modlinkage)) != 0)
301		return (err);
302	ddi_soft_state_fini(&cnex_state);
303	return (0);
304}
305
306int
307_info(struct modinfo *modinfop)
308{
309	return (mod_info(&modlinkage, modinfop));
310}
311
312/*
313 * Callback function invoked by the interrupt redistribution
314 * framework. This will redirect interrupts at CPUs that are
315 * currently available in the system.
316 *
317 * Note: any interrupts with weight greater than or equal to
318 * weight_max must be redistributed when this callback is
319 * invoked with (weight == weight_max) which will be once per
320 * redistribution.
321 */
322/*ARGSUSED*/
323static void
324cnex_intr_redist(void *arg, int32_t weight_max, int32_t weight)
325{
326	cnex_ldc_t		*cldcp;
327	cnex_soft_state_t	*cnex_ssp = arg;
328
329	ASSERT(cnex_ssp != NULL);
330	mutex_enter(&cnex_ssp->clist_lock);
331
332	cldcp = cnex_ssp->clist;
333	while (cldcp != NULL) {
334
335		mutex_enter(&cldcp->lock);
336
337		if (cldcp->tx.hdlr && (cldcp->tx.weight == weight ||
338		    (weight_max == weight && cldcp->tx.weight > weight))) {
339			(void) cnex_intr_new_cpu(cnex_ssp, &cldcp->tx);
340		}
341
342		if (cldcp->rx.hdlr && (cldcp->rx.weight == weight ||
343		    (weight_max == weight && cldcp->rx.weight > weight))) {
344			(void) cnex_intr_new_cpu(cnex_ssp, &cldcp->rx);
345		}
346
347		mutex_exit(&cldcp->lock);
348
349		/* next channel */
350		cldcp = cldcp->next;
351	}
352
353	mutex_exit(&cnex_ssp->clist_lock);
354}
355
356/*
357 * Internal function to replace the CPU used by an interrupt
358 * during interrupt redistribution.
359 */
360static int
361cnex_intr_new_cpu(cnex_soft_state_t *ssp, cnex_intr_t *iinfo)
362{
363	int	intr_state;
364	int 	rv;
365
366	/* Determine if the interrupt is enabled */
367	rv = hvldc_intr_getvalid(ssp->cfghdl, iinfo->ino, &intr_state);
368	if (rv) {
369		DWARN("cnex_intr_new_cpu: rx ino=0x%llx, can't get valid\n",
370		    iinfo->ino);
371		return (rv);
372	}
373
374	/* If it is enabled, disable it */
375	if (intr_state == HV_INTR_VALID) {
376		rv = cnex_intr_dis_wait(ssp, iinfo);
377		if (rv) {
378			return (rv);
379		}
380	}
381
382	/* Target the interrupt at a new CPU. */
383	iinfo->cpuid = intr_dist_cpuid();
384	(void) hvldc_intr_settarget(ssp->cfghdl, iinfo->ino, iinfo->cpuid);
385	intr_dist_cpuid_add_device_weight(iinfo->cpuid, iinfo->dip,
386	    iinfo->weight);
387
388	/* Re-enable the interrupt if it was enabled */
389	if (intr_state == HV_INTR_VALID) {
390		(void) hvldc_intr_setvalid(ssp->cfghdl, iinfo->ino,
391		    HV_INTR_VALID);
392	}
393
394	return (0);
395}
396
397/*
398 * Internal function to disable an interrupt and wait
399 * for any pending interrupts to finish.
400 */
401static int
402cnex_intr_dis_wait(cnex_soft_state_t *ssp, cnex_intr_t *iinfo)
403{
404	int rv, intr_state, retries;
405
406	/* disable interrupts */
407	rv = hvldc_intr_setvalid(ssp->cfghdl, iinfo->ino, HV_INTR_NOTVALID);
408	if (rv) {
409		DWARN("cnex_intr_dis_wait: ino=0x%llx, can't set valid\n",
410		    iinfo->ino);
411		return (ENXIO);
412	}
413
414	/*
415	 * Make a best effort to wait for pending interrupts
416	 * to finish. There is not much we can do if we timeout.
417	 */
418	retries = 0;
419
420	do {
421		rv = hvldc_intr_getstate(ssp->cfghdl, iinfo->ino, &intr_state);
422		if (rv) {
423			DWARN("cnex_intr_dis_wait: ino=0x%llx, can't get "
424			    "state\n", iinfo->ino);
425			return (ENXIO);
426		}
427
428		if (intr_state != HV_INTR_DELIVERED_STATE)
429			break;
430
431		drv_usecwait(cnex_wait_usecs);
432
433	} while (!panicstr && ++retries <= cnex_wait_retries);
434
435	return (0);
436}
437
438/*
439 * Returns the interrupt weight to use for the specified devclass.
440 */
441static int32_t
442cnex_class_weight(ldc_dev_t devclass)
443{
444	int idx;
445
446	for (idx = 0; idx < CNEX_MAX_DEVS; idx++) {
447		if (devclass == cnex_class_to_intr[idx].devclass) {
448			return (cnex_class_to_intr[idx].weight);
449		}
450	}
451
452	/*
453	 * If this code is reached, the specified devclass is
454	 * invalid. New devclasses should be added to
455	 * cnex_class_to_intr.
456	 */
457	ASSERT(0);
458
459	return (0);
460}
461
462/*
463 * Exported interface to register a LDC endpoint with
464 * the channel nexus
465 */
466static int
467cnex_reg_chan(dev_info_t *dip, uint64_t id, ldc_dev_t devclass)
468{
469	int		idx;
470	cnex_ldc_t	*cldcp;
471	int		listsz, num_nodes, num_channels;
472	md_t		*mdp = NULL;
473	mde_cookie_t	rootnode, *listp = NULL;
474	uint64_t	tmp_id;
475	uint64_t	rxino = (uint64_t)-1;
476	uint64_t	txino = (uint64_t)-1;
477	cnex_soft_state_t *cnex_ssp;
478	int		status, instance;
479	dev_info_t	*chan_dip = NULL;
480
481	/* Get device instance and structure */
482	instance = ddi_get_instance(dip);
483	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
484
485	/* Check to see if channel is already registered */
486	mutex_enter(&cnex_ssp->clist_lock);
487	cldcp = cnex_ssp->clist;
488	while (cldcp) {
489		if (cldcp->id == id) {
490			DWARN("cnex_reg_chan: channel 0x%llx exists\n", id);
491			mutex_exit(&cnex_ssp->clist_lock);
492			return (EINVAL);
493		}
494		cldcp = cldcp->next;
495	}
496
497	/* Get the Tx/Rx inos from the MD */
498	if ((mdp = md_get_handle()) == NULL) {
499		DWARN("cnex_reg_chan: cannot init MD\n");
500		mutex_exit(&cnex_ssp->clist_lock);
501		return (ENXIO);
502	}
503	num_nodes = md_node_count(mdp);
504	ASSERT(num_nodes > 0);
505
506	listsz = num_nodes * sizeof (mde_cookie_t);
507	listp = (mde_cookie_t *)kmem_zalloc(listsz, KM_SLEEP);
508
509	rootnode = md_root_node(mdp);
510
511	/* search for all channel_endpoint nodes */
512	num_channels = md_scan_dag(mdp, rootnode,
513	    md_find_name(mdp, "channel-endpoint"),
514	    md_find_name(mdp, "fwd"), listp);
515	if (num_channels <= 0) {
516		DWARN("cnex_reg_chan: invalid channel id\n");
517		kmem_free(listp, listsz);
518		(void) md_fini_handle(mdp);
519		mutex_exit(&cnex_ssp->clist_lock);
520		return (EINVAL);
521	}
522
523	for (idx = 0; idx < num_channels; idx++) {
524
525		/* Get the channel ID */
526		status = md_get_prop_val(mdp, listp[idx], "id", &tmp_id);
527		if (status) {
528			DWARN("cnex_reg_chan: cannot read LDC ID\n");
529			kmem_free(listp, listsz);
530			(void) md_fini_handle(mdp);
531			mutex_exit(&cnex_ssp->clist_lock);
532			return (ENXIO);
533		}
534		if (tmp_id != id)
535			continue;
536
537		/* Get the Tx and Rx ino */
538		status = md_get_prop_val(mdp, listp[idx], "tx-ino", &txino);
539		if (status) {
540			DWARN("cnex_reg_chan: cannot read Tx ino\n");
541			kmem_free(listp, listsz);
542			(void) md_fini_handle(mdp);
543			mutex_exit(&cnex_ssp->clist_lock);
544			return (ENXIO);
545		}
546		status = md_get_prop_val(mdp, listp[idx], "rx-ino", &rxino);
547		if (status) {
548			DWARN("cnex_reg_chan: cannot read Rx ino\n");
549			kmem_free(listp, listsz);
550			(void) md_fini_handle(mdp);
551			mutex_exit(&cnex_ssp->clist_lock);
552			return (ENXIO);
553		}
554		chan_dip = cnex_find_chan_dip(dip, id, mdp, listp[idx]);
555		ASSERT(chan_dip != NULL);
556	}
557	kmem_free(listp, listsz);
558	(void) md_fini_handle(mdp);
559
560	/*
561	 * check to see if we looped through the list of channel IDs without
562	 * matching one (i.e. an 'ino' has not been initialised).
563	 */
564	if ((rxino == -1) || (txino == -1)) {
565		DERR("cnex_reg_chan: no ID matching '%llx' in MD\n", id);
566		mutex_exit(&cnex_ssp->clist_lock);
567		return (ENOENT);
568	}
569
570	/* Allocate a new channel structure */
571	cldcp = kmem_zalloc(sizeof (*cldcp), KM_SLEEP);
572
573	/* Initialize the channel */
574	mutex_init(&cldcp->lock, NULL, MUTEX_DRIVER, NULL);
575
576	cldcp->id = id;
577	cldcp->tx.ino = txino;
578	cldcp->rx.ino = rxino;
579	cldcp->devclass = devclass;
580	cldcp->tx.weight = CNEX_TX_INTR_WEIGHT;
581	cldcp->rx.weight = cnex_class_weight(devclass);
582	cldcp->dip = chan_dip;
583
584	/* add channel to nexus channel list */
585	cldcp->next = cnex_ssp->clist;
586	cnex_ssp->clist = cldcp;
587
588	mutex_exit(&cnex_ssp->clist_lock);
589
590	return (0);
591}
592
593/*
594 * Add Tx/Rx interrupt handler for the channel
595 */
596static int
597cnex_add_intr(dev_info_t *dip, uint64_t id, cnex_intrtype_t itype,
598    uint_t (*hdlr)(), caddr_t arg1, caddr_t arg2)
599{
600	int		rv, idx, pil;
601	cnex_ldc_t	*cldcp;
602	cnex_intr_t	*iinfo;
603	cnex_soft_state_t *cnex_ssp;
604	int		instance;
605
606	/* Get device instance and structure */
607	instance = ddi_get_instance(dip);
608	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
609
610	/* get channel info */
611	mutex_enter(&cnex_ssp->clist_lock);
612	cldcp = cnex_ssp->clist;
613	while (cldcp) {
614		if (cldcp->id == id)
615			break;
616		cldcp = cldcp->next;
617	}
618	if (cldcp == NULL) {
619		DWARN("cnex_add_intr: channel 0x%llx does not exist\n", id);
620		mutex_exit(&cnex_ssp->clist_lock);
621		return (EINVAL);
622	}
623	mutex_exit(&cnex_ssp->clist_lock);
624
625	/* get channel lock */
626	mutex_enter(&cldcp->lock);
627
628	/* get interrupt type */
629	if (itype == CNEX_TX_INTR) {
630		iinfo = &(cldcp->tx);
631	} else if (itype == CNEX_RX_INTR) {
632		iinfo = &(cldcp->rx);
633	} else {
634		DWARN("cnex_add_intr: invalid interrupt type\n", id);
635		mutex_exit(&cldcp->lock);
636		return (EINVAL);
637	}
638
639	/* check if a handler is already added */
640	if (iinfo->hdlr != 0) {
641		DWARN("cnex_add_intr: interrupt handler exists\n");
642		mutex_exit(&cldcp->lock);
643		return (EINVAL);
644	}
645
646	/* save interrupt handler info */
647	iinfo->hdlr = hdlr;
648	iinfo->arg1 = arg1;
649	iinfo->arg2 = arg2;
650
651	/* save data for DTrace probes used by intrstat(1m) */
652	iinfo->dip = cldcp->dip;
653	iinfo->id = cldcp->id;
654
655	iinfo->icookie = MINVINTR_COOKIE + iinfo->ino;
656
657	/*
658	 * Verify that the ino does not generate a cookie which
659	 * is outside the (MINVINTR_COOKIE, MAXIVNUM) range of the
660	 * system interrupt table.
661	 */
662	if (iinfo->icookie >= MAXIVNUM || iinfo->icookie < MINVINTR_COOKIE) {
663		DWARN("cnex_add_intr: invalid cookie %x ino %x\n",
664		    iinfo->icookie, iinfo->ino);
665		mutex_exit(&cldcp->lock);
666		return (EINVAL);
667	}
668
669	D1("cnex_add_intr: add hdlr, cfghdl=0x%llx, ino=0x%llx, "
670	    "cookie=0x%llx\n", cnex_ssp->cfghdl, iinfo->ino, iinfo->icookie);
671
672	/* Pick a PIL on the basis of the channel's devclass */
673	for (idx = 0, pil = PIL_3; idx < CNEX_MAX_DEVS; idx++) {
674		if (cldcp->devclass == cnex_class_to_intr[idx].devclass) {
675			pil = cnex_class_to_intr[idx].pil;
676			break;
677		}
678	}
679
680	/* add interrupt to solaris ivec table */
681	if (add_ivintr(iinfo->icookie, pil, (intrfunc)cnex_intr_wrapper,
682	    (caddr_t)iinfo, NULL, NULL) != 0) {
683		DWARN("cnex_add_intr: add_ivintr fail cookie %x ino %x\n",
684		    iinfo->icookie, iinfo->ino);
685		mutex_exit(&cldcp->lock);
686		return (EINVAL);
687	}
688
689	/* set the cookie in the HV */
690	rv = hvldc_intr_setcookie(cnex_ssp->cfghdl, iinfo->ino, iinfo->icookie);
691
692	/* pick next CPU in the domain for this channel */
693	iinfo->cpuid = intr_dist_cpuid();
694
695	/* set the target CPU and then enable interrupts */
696	rv = hvldc_intr_settarget(cnex_ssp->cfghdl, iinfo->ino, iinfo->cpuid);
697	if (rv) {
698		DWARN("cnex_add_intr: ino=0x%llx, cannot set target cpu\n",
699		    iinfo->ino);
700		goto hv_error;
701	}
702	rv = hvldc_intr_setstate(cnex_ssp->cfghdl, iinfo->ino,
703	    HV_INTR_IDLE_STATE);
704	if (rv) {
705		DWARN("cnex_add_intr: ino=0x%llx, cannot set state\n",
706		    iinfo->ino);
707		goto hv_error;
708	}
709	rv = hvldc_intr_setvalid(cnex_ssp->cfghdl, iinfo->ino, HV_INTR_VALID);
710	if (rv) {
711		DWARN("cnex_add_intr: ino=0x%llx, cannot set valid\n",
712		    iinfo->ino);
713		goto hv_error;
714	}
715
716	intr_dist_cpuid_add_device_weight(iinfo->cpuid, iinfo->dip,
717	    iinfo->weight);
718
719	mutex_exit(&cldcp->lock);
720	return (0);
721
722hv_error:
723	(void) rem_ivintr(iinfo->icookie, pil);
724	mutex_exit(&cldcp->lock);
725	return (ENXIO);
726}
727
728
729/*
730 * Exported interface to unregister a LDC endpoint with
731 * the channel nexus
732 */
733static int
734cnex_unreg_chan(dev_info_t *dip, uint64_t id)
735{
736	cnex_ldc_t	*cldcp, *prev_cldcp;
737	cnex_soft_state_t *cnex_ssp;
738	int		instance;
739
740	/* Get device instance and structure */
741	instance = ddi_get_instance(dip);
742	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
743
744	/* find and remove channel from list */
745	mutex_enter(&cnex_ssp->clist_lock);
746	prev_cldcp = NULL;
747	cldcp = cnex_ssp->clist;
748	while (cldcp) {
749		if (cldcp->id == id)
750			break;
751		prev_cldcp = cldcp;
752		cldcp = cldcp->next;
753	}
754
755	if (cldcp == 0) {
756		DWARN("cnex_unreg_chan: invalid channel %d\n", id);
757		mutex_exit(&cnex_ssp->clist_lock);
758		return (EINVAL);
759	}
760
761	if (cldcp->tx.hdlr || cldcp->rx.hdlr) {
762		DWARN("cnex_unreg_chan: handlers still exist: chan %lx\n", id);
763		mutex_exit(&cnex_ssp->clist_lock);
764		return (ENXIO);
765	}
766
767	if (prev_cldcp)
768		prev_cldcp->next = cldcp->next;
769	else
770		cnex_ssp->clist = cldcp->next;
771
772	mutex_exit(&cnex_ssp->clist_lock);
773
774	/* destroy mutex */
775	mutex_destroy(&cldcp->lock);
776
777	/* free channel */
778	kmem_free(cldcp, sizeof (*cldcp));
779
780	return (0);
781}
782
783/*
784 * Remove Tx/Rx interrupt handler for the channel
785 */
786static int
787cnex_rem_intr(dev_info_t *dip, uint64_t id, cnex_intrtype_t itype)
788{
789	int			rv, idx, pil;
790	cnex_ldc_t		*cldcp;
791	cnex_intr_t		*iinfo;
792	cnex_soft_state_t	*cnex_ssp;
793	int			instance, istate;
794
795	/* Get device instance and structure */
796	instance = ddi_get_instance(dip);
797	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
798
799	/* get channel info */
800	mutex_enter(&cnex_ssp->clist_lock);
801	cldcp = cnex_ssp->clist;
802	while (cldcp) {
803		if (cldcp->id == id)
804			break;
805		cldcp = cldcp->next;
806	}
807	if (cldcp == NULL) {
808		DWARN("cnex_rem_intr: channel 0x%llx does not exist\n", id);
809		mutex_exit(&cnex_ssp->clist_lock);
810		return (EINVAL);
811	}
812	mutex_exit(&cnex_ssp->clist_lock);
813
814	/* get rid of the channel intr handler */
815	mutex_enter(&cldcp->lock);
816
817	/* get interrupt type */
818	if (itype == CNEX_TX_INTR) {
819		iinfo = &(cldcp->tx);
820	} else if (itype == CNEX_RX_INTR) {
821		iinfo = &(cldcp->rx);
822	} else {
823		DWARN("cnex_rem_intr: invalid interrupt type\n");
824		mutex_exit(&cldcp->lock);
825		return (EINVAL);
826	}
827
828	D1("cnex_rem_intr: interrupt ino=0x%x\n", iinfo->ino);
829
830	/* check if a handler is already added */
831	if (iinfo->hdlr == 0) {
832		DWARN("cnex_rem_intr: interrupt handler does not exist\n");
833		mutex_exit(&cldcp->lock);
834		return (EINVAL);
835	}
836
837	D1("cnex_rem_intr: set intr to invalid ino=0x%x\n", iinfo->ino);
838	rv = hvldc_intr_setvalid(cnex_ssp->cfghdl,
839	    iinfo->ino, HV_INTR_NOTVALID);
840	if (rv) {
841		DWARN("cnex_rem_intr: cannot set valid ino=%x\n", iinfo->ino);
842		mutex_exit(&cldcp->lock);
843		return (ENXIO);
844	}
845
846	/*
847	 * Check if there are pending interrupts. If interrupts are
848	 * pending return EAGAIN.
849	 */
850	rv = hvldc_intr_getstate(cnex_ssp->cfghdl, iinfo->ino, &istate);
851	if (rv) {
852		DWARN("cnex_rem_intr: ino=0x%llx, cannot get state\n",
853		    iinfo->ino);
854		mutex_exit(&cldcp->lock);
855		return (ENXIO);
856	}
857
858	/* if interrupts are still pending print warning */
859	if (istate != HV_INTR_IDLE_STATE) {
860		DWARN("cnex_rem_intr: cannot remove intr busy ino=%x\n",
861		    iinfo->ino);
862		mutex_exit(&cldcp->lock);
863		return (EAGAIN);
864	}
865
866	/* Pick a PIL on the basis of the channel's devclass */
867	for (idx = 0, pil = PIL_3; idx < CNEX_MAX_DEVS; idx++) {
868		if (cldcp->devclass == cnex_class_to_intr[idx].devclass) {
869			pil = cnex_class_to_intr[idx].pil;
870			break;
871		}
872	}
873
874	intr_dist_cpuid_rem_device_weight(iinfo->cpuid, iinfo->dip);
875
876	/* remove interrupt */
877	(void) rem_ivintr(iinfo->icookie, pil);
878
879	/* clear interrupt info */
880	bzero(iinfo, sizeof (*iinfo));
881
882	mutex_exit(&cldcp->lock);
883
884	return (0);
885}
886
887
888/*
889 * Clear pending Tx/Rx interrupt
890 */
891static int
892cnex_clr_intr(dev_info_t *dip, uint64_t id, cnex_intrtype_t itype)
893{
894	int			rv;
895	cnex_ldc_t		*cldcp;
896	cnex_intr_t		*iinfo;
897	cnex_soft_state_t	*cnex_ssp;
898	int			instance;
899
900	/* Get device instance and structure */
901	instance = ddi_get_instance(dip);
902	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
903
904	/* get channel info */
905	mutex_enter(&cnex_ssp->clist_lock);
906	cldcp = cnex_ssp->clist;
907	while (cldcp) {
908		if (cldcp->id == id)
909			break;
910		cldcp = cldcp->next;
911	}
912	if (cldcp == NULL) {
913		DWARN("cnex_clr_intr: channel 0x%llx does not exist\n", id);
914		mutex_exit(&cnex_ssp->clist_lock);
915		return (EINVAL);
916	}
917	mutex_exit(&cnex_ssp->clist_lock);
918
919	mutex_enter(&cldcp->lock);
920
921	/* get interrupt type */
922	if (itype == CNEX_TX_INTR) {
923		iinfo = &(cldcp->tx);
924	} else if (itype == CNEX_RX_INTR) {
925		iinfo = &(cldcp->rx);
926	} else {
927		DWARN("cnex_clr_intr: invalid interrupt type\n");
928		mutex_exit(&cldcp->lock);
929		return (EINVAL);
930	}
931
932	D1("%s: interrupt ino=0x%x\n", __func__, iinfo->ino);
933
934	/* check if a handler is already added */
935	if (iinfo->hdlr == 0) {
936		DWARN("cnex_clr_intr: interrupt handler does not exist\n");
937		mutex_exit(&cldcp->lock);
938		return (EINVAL);
939	}
940
941	rv = hvldc_intr_setstate(cnex_ssp->cfghdl, iinfo->ino,
942	    HV_INTR_IDLE_STATE);
943	if (rv) {
944		DWARN("cnex_clr_intr: cannot clear interrupt state\n");
945		mutex_exit(&cldcp->lock);
946		return (ENXIO);
947	}
948
949	mutex_exit(&cldcp->lock);
950
951	return (0);
952}
953
954/*
955 * Channel nexus interrupt handler wrapper
956 */
957static uint_t
958cnex_intr_wrapper(caddr_t arg)
959{
960	int 			res;
961	uint_t 			(*handler)();
962	caddr_t 		handler_arg1;
963	caddr_t 		handler_arg2;
964	cnex_intr_t 		*iinfo = (cnex_intr_t *)arg;
965
966	ASSERT(iinfo != NULL);
967
968	handler = iinfo->hdlr;
969	handler_arg1 = iinfo->arg1;
970	handler_arg2 = iinfo->arg2;
971
972	/*
973	 * The 'interrupt__start' and 'interrupt__complete' probes
974	 * are provided to support 'intrstat' command. These probes
975	 * help monitor the interrupts on a per device basis only.
976	 * In order to provide the ability to monitor the
977	 * activity on a per channel basis, two additional
978	 * probes('channelintr__start','channelintr__complete')
979	 * are provided here.
980	 */
981	DTRACE_PROBE4(channelintr__start, uint64_t, iinfo->id,
982	    cnex_intr_t *, iinfo, void *, handler, caddr_t, handler_arg1);
983
984	DTRACE_PROBE4(interrupt__start, dev_info_t, iinfo->dip,
985	    void *, handler, caddr_t, handler_arg1, caddr_t, handler_arg2);
986
987	D1("cnex_intr_wrapper:ino=0x%llx invoke client handler\n", iinfo->ino);
988	res = (*handler)(handler_arg1, handler_arg2);
989
990	DTRACE_PROBE4(interrupt__complete, dev_info_t, iinfo->dip,
991	    void *, handler, caddr_t, handler_arg1, int, res);
992
993	DTRACE_PROBE4(channelintr__complete, uint64_t, iinfo->id,
994	    cnex_intr_t *, iinfo, void *, handler, caddr_t, handler_arg1);
995
996	return (res);
997}
998
999/*ARGSUSED*/
1000static int
1001cnex_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1002{
1003	int 		rv, instance, reglen;
1004	cnex_regspec_t	*reg_p;
1005	ldc_cnex_t	cinfo;
1006	cnex_soft_state_t *cnex_ssp;
1007
1008	switch (cmd) {
1009	case DDI_ATTACH:
1010		break;
1011	case DDI_RESUME:
1012		return (DDI_SUCCESS);
1013	default:
1014		return (DDI_FAILURE);
1015	}
1016
1017	/*
1018	 * Get the instance specific soft state structure.
1019	 * Save the devi for this instance in the soft_state data.
1020	 */
1021	instance = ddi_get_instance(devi);
1022	if (ddi_soft_state_zalloc(cnex_state, instance) != DDI_SUCCESS)
1023		return (DDI_FAILURE);
1024	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
1025
1026	cnex_ssp->devi = devi;
1027	cnex_ssp->clist = NULL;
1028
1029	if (ddi_getlongprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
1030	    "reg", (caddr_t)&reg_p, &reglen) != DDI_SUCCESS) {
1031		return (DDI_FAILURE);
1032	}
1033
1034	/* get the sun4v config handle for this device */
1035	cnex_ssp->cfghdl = SUN4V_REG_SPEC2CFG_HDL(reg_p->physaddr);
1036	kmem_free(reg_p, reglen);
1037
1038	D1("cnex_attach: cfghdl=0x%llx\n", cnex_ssp->cfghdl);
1039
1040	/* init channel list mutex */
1041	mutex_init(&cnex_ssp->clist_lock, NULL, MUTEX_DRIVER, NULL);
1042
1043	/* Register with LDC module */
1044	cinfo.dip = devi;
1045	cinfo.reg_chan = cnex_reg_chan;
1046	cinfo.unreg_chan = cnex_unreg_chan;
1047	cinfo.add_intr = cnex_add_intr;
1048	cinfo.rem_intr = cnex_rem_intr;
1049	cinfo.clr_intr = cnex_clr_intr;
1050
1051	/*
1052	 * LDC register will fail if an nexus instance had already
1053	 * registered with the LDC framework
1054	 */
1055	rv = ldc_register(&cinfo);
1056	if (rv) {
1057		DWARN("cnex_attach: unable to register with LDC\n");
1058		ddi_soft_state_free(cnex_state, instance);
1059		mutex_destroy(&cnex_ssp->clist_lock);
1060		return (DDI_FAILURE);
1061	}
1062
1063	if (ddi_create_minor_node(devi, "devctl", S_IFCHR, instance,
1064	    DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1065		ddi_remove_minor_node(devi, NULL);
1066		ddi_soft_state_free(cnex_state, instance);
1067		mutex_destroy(&cnex_ssp->clist_lock);
1068		return (DDI_FAILURE);
1069	}
1070
1071	/* Add interrupt redistribution callback. */
1072	intr_dist_add_weighted(cnex_intr_redist, cnex_ssp);
1073
1074	ddi_report_dev(devi);
1075	return (DDI_SUCCESS);
1076}
1077
1078/*ARGSUSED*/
1079static int
1080cnex_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1081{
1082	int 		instance;
1083	ldc_cnex_t	cinfo;
1084	cnex_soft_state_t *cnex_ssp;
1085
1086	switch (cmd) {
1087	case DDI_DETACH:
1088		break;
1089	case DDI_SUSPEND:
1090		return (DDI_SUCCESS);
1091	default:
1092		return (DDI_FAILURE);
1093	}
1094
1095	instance = ddi_get_instance(devi);
1096	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
1097
1098	/* check if there are any channels still registered */
1099	if (cnex_ssp->clist) {
1100		cmn_err(CE_WARN, "?cnex_dettach: channels registered %d\n",
1101		    ddi_get_instance(devi));
1102		return (DDI_FAILURE);
1103	}
1104
1105	/* Unregister with LDC module */
1106	cinfo.dip = devi;
1107	(void) ldc_unregister(&cinfo);
1108
1109	/* Remove interrupt redistribution callback. */
1110	intr_dist_rem_weighted(cnex_intr_redist, cnex_ssp);
1111
1112	/* destroy mutex */
1113	mutex_destroy(&cnex_ssp->clist_lock);
1114
1115	/* free soft state structure */
1116	ddi_soft_state_free(cnex_state, instance);
1117
1118	return (DDI_SUCCESS);
1119}
1120
1121/*ARGSUSED*/
1122static int
1123cnex_open(dev_t *devp, int flags, int otyp, cred_t *credp)
1124{
1125	int instance;
1126
1127	if (otyp != OTYP_CHR)
1128		return (EINVAL);
1129
1130	instance = getminor(*devp);
1131	if (ddi_get_soft_state(cnex_state, instance) == NULL)
1132		return (ENXIO);
1133
1134	return (0);
1135}
1136
1137/*ARGSUSED*/
1138static int
1139cnex_close(dev_t dev, int flags, int otyp, cred_t *credp)
1140{
1141	int instance;
1142
1143	if (otyp != OTYP_CHR)
1144		return (EINVAL);
1145
1146	instance = getminor(dev);
1147	if (ddi_get_soft_state(cnex_state, instance) == NULL)
1148		return (ENXIO);
1149
1150	return (0);
1151}
1152
1153/*ARGSUSED*/
1154static int
1155cnex_ioctl(dev_t dev,
1156    int cmd, intptr_t arg, int mode, cred_t *cred_p, int *rval_p)
1157{
1158	int instance;
1159	cnex_soft_state_t *cnex_ssp;
1160
1161	instance = getminor(dev);
1162	if ((cnex_ssp = ddi_get_soft_state(cnex_state, instance)) == NULL)
1163		return (ENXIO);
1164	ASSERT(cnex_ssp->devi);
1165	return (ndi_devctl_ioctl(cnex_ssp->devi, cmd, arg, mode, 0));
1166}
1167
1168static int
1169cnex_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
1170    void *arg, void *result)
1171{
1172	char		name[MAXNAMELEN];
1173	uint32_t	reglen;
1174	int		*cnex_regspec;
1175
1176	switch (ctlop) {
1177	case DDI_CTLOPS_REPORTDEV:
1178		if (rdip == NULL)
1179			return (DDI_FAILURE);
1180		cmn_err(CE_CONT, "?channel-device: %s%d\n",
1181		    ddi_driver_name(rdip), ddi_get_instance(rdip));
1182		return (DDI_SUCCESS);
1183
1184	case DDI_CTLOPS_INITCHILD:
1185	{
1186		dev_info_t *child = (dev_info_t *)arg;
1187
1188		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, child,
1189		    DDI_PROP_DONTPASS, "reg",
1190		    &cnex_regspec, &reglen) != DDI_SUCCESS) {
1191			return (DDI_FAILURE);
1192		}
1193
1194		(void) snprintf(name, sizeof (name), "%x", *cnex_regspec);
1195		ddi_set_name_addr(child, name);
1196		ddi_set_parent_data(child, NULL);
1197		ddi_prop_free(cnex_regspec);
1198		return (DDI_SUCCESS);
1199	}
1200
1201	case DDI_CTLOPS_UNINITCHILD:
1202	{
1203		dev_info_t *child = (dev_info_t *)arg;
1204
1205		NDI_CONFIG_DEBUG((CE_NOTE,
1206		    "DDI_CTLOPS_UNINITCHILD(%s, instance=%d)",
1207		    ddi_driver_name(child), DEVI(child)->devi_instance));
1208
1209		ddi_set_name_addr(child, NULL);
1210
1211		return (DDI_SUCCESS);
1212	}
1213
1214	case DDI_CTLOPS_DMAPMAPC:
1215	case DDI_CTLOPS_REPORTINT:
1216	case DDI_CTLOPS_REGSIZE:
1217	case DDI_CTLOPS_NREGS:
1218	case DDI_CTLOPS_SIDDEV:
1219	case DDI_CTLOPS_SLAVEONLY:
1220	case DDI_CTLOPS_AFFINITY:
1221	case DDI_CTLOPS_POKE:
1222	case DDI_CTLOPS_PEEK:
1223		/*
1224		 * These ops correspond to functions that "shouldn't" be called
1225		 * by a channel-device driver.  So we whine when we're called.
1226		 */
1227		cmn_err(CE_WARN, "%s%d: invalid op (%d) from %s%d\n",
1228		    ddi_driver_name(dip), ddi_get_instance(dip), ctlop,
1229		    ddi_driver_name(rdip), ddi_get_instance(rdip));
1230		return (DDI_FAILURE);
1231
1232	case DDI_CTLOPS_ATTACH:
1233	case DDI_CTLOPS_BTOP:
1234	case DDI_CTLOPS_BTOPR:
1235	case DDI_CTLOPS_DETACH:
1236	case DDI_CTLOPS_DVMAPAGESIZE:
1237	case DDI_CTLOPS_IOMIN:
1238	case DDI_CTLOPS_POWER:
1239	case DDI_CTLOPS_PTOB:
1240	default:
1241		/*
1242		 * Everything else (e.g. PTOB/BTOP/BTOPR requests) we pass up
1243		 */
1244		return (ddi_ctlops(dip, rdip, ctlop, arg, result));
1245	}
1246}
1247
1248/*
1249 * cnex_find_chan_dip -- Find the dip of a device that is corresponding
1250 * 	to the specific channel. Below are the details on how the dip
1251 *	is derived.
1252 *
1253 *	- In the MD, the cfg-handle is expected to be unique for
1254 *	  virtual-device nodes that have the same 'name' property value.
1255 *	  This value is expected to be the same as that of "reg" property
1256 *	  of the corresponding OBP device node.
1257 *
1258 *	- The value of the 'name' property of a virtual-device node
1259 *	  in the MD is expected to be the same for the corresponding
1260 *	  OBP device node.
1261 *
1262 *	- Find the virtual-device node corresponding to a channel-endpoint
1263 *	  by walking backwards. Then obtain the values for the 'name' and
1264 *	  'cfg-handle' properties.
1265 *
1266 *	- Walk all the children of the cnex, find a matching dip which
1267 *	  has the same 'name' and 'reg' property values.
1268 *
1269 *	- The channels that have no corresponding device driver are
1270 *	  treated as if they  correspond to the cnex driver,
1271 *	  that is, return cnex dip for them. This means, the
1272 *	  cnex acts as an umbrella device driver. Note, this is
1273 *	  for 'intrstat' statistics purposes only. As a result of this,
1274 *	  the 'intrstat' shows cnex as the device that is servicing the
1275 *	  interrupts corresponding to these channels.
1276 *
1277 *	  For now, only one such case is known, that is, the channels that
1278 *	  are used by the "domain-services".
1279 */
1280static dev_info_t *
1281cnex_find_chan_dip(dev_info_t *dip, uint64_t chan_id,
1282    md_t *mdp, mde_cookie_t mde)
1283{
1284	int listsz;
1285	int num_nodes;
1286	int num_devs;
1287	uint64_t cfghdl;
1288	char *md_name;
1289	mde_cookie_t *listp;
1290	dev_info_t *cdip = NULL;
1291
1292	num_nodes = md_node_count(mdp);
1293	ASSERT(num_nodes > 0);
1294	listsz = num_nodes * sizeof (mde_cookie_t);
1295	listp = (mde_cookie_t *)kmem_zalloc(listsz, KM_SLEEP);
1296
1297	num_devs = md_scan_dag(mdp, mde, md_find_name(mdp, "virtual-device"),
1298	    md_find_name(mdp, "back"), listp);
1299	ASSERT(num_devs <= 1);
1300	if (num_devs <= 0) {
1301		DWARN("cnex_find_chan_dip:channel(0x%llx): "
1302		    "No virtual-device found\n", chan_id);
1303		goto fdip_exit;
1304	}
1305	if (md_get_prop_str(mdp, listp[0], "name", &md_name) != 0) {
1306		DWARN("cnex_find_chan_dip:channel(0x%llx): "
1307		    "name property not found\n", chan_id);
1308		goto fdip_exit;
1309	}
1310
1311	D1("cnex_find_chan_dip: channel(0x%llx): virtual-device "
1312	    "name property value = %s\n", chan_id, md_name);
1313
1314	if (md_get_prop_val(mdp, listp[0], "cfg-handle", &cfghdl) != 0) {
1315		DWARN("cnex_find_chan_dip:channel(0x%llx): virtual-device's "
1316		    "cfg-handle property not found\n", chan_id);
1317		goto fdip_exit;
1318	}
1319
1320	D1("cnex_find_chan_dip:channel(0x%llx): virtual-device cfg-handle "
1321	    " property value = 0x%x\n", chan_id, cfghdl);
1322
1323	for (cdip = ddi_get_child(dip); cdip != NULL;
1324	    cdip = ddi_get_next_sibling(cdip)) {
1325
1326		int *cnex_regspec;
1327		uint32_t reglen;
1328		char	*dev_name;
1329
1330		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip,
1331		    DDI_PROP_DONTPASS, "name",
1332		    &dev_name) != DDI_PROP_SUCCESS) {
1333			DWARN("cnex_find_chan_dip: name property not"
1334			    " found for dip(0x%p)\n", cdip);
1335			continue;
1336		}
1337		if (strcmp(md_name, dev_name) != 0) {
1338			ddi_prop_free(dev_name);
1339			continue;
1340		}
1341		ddi_prop_free(dev_name);
1342		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
1343		    DDI_PROP_DONTPASS, "reg",
1344		    &cnex_regspec, &reglen) != DDI_SUCCESS) {
1345			DWARN("cnex_find_chan_dip: reg property not"
1346			    " found for dip(0x%p)\n", cdip);
1347			continue;
1348		}
1349		if (*cnex_regspec == cfghdl) {
1350			D1("cnex_find_chan_dip:channel(0x%llx): found "
1351			    "dip(0x%p) drvname=%s\n", chan_id, cdip,
1352			    ddi_driver_name(cdip));
1353			ddi_prop_free(cnex_regspec);
1354			break;
1355		}
1356		ddi_prop_free(cnex_regspec);
1357	}
1358
1359fdip_exit:
1360	if (cdip == NULL) {
1361		/*
1362		 * If a virtual-device node exists but no dip found,
1363		 * then for now print a DEBUG error message only.
1364		 */
1365		if (num_devs > 0) {
1366			DERR("cnex_find_chan_dip:channel(0x%llx): "
1367			    "No device found\n", chan_id);
1368		}
1369
1370		/* If no dip was found, return cnex device's dip. */
1371		cdip = dip;
1372	}
1373
1374	kmem_free(listp, listsz);
1375	D1("cnex_find_chan_dip:channel(0x%llx): returning dip=0x%p\n",
1376	    chan_id, cdip);
1377	return (cdip);
1378}
1379
1380/* -------------------------------------------------------------------------- */
1381