sunddi.c revision 7656:2621e50fdf4a
133965Sjdp/*
260484Sobrien * CDDL HEADER START
333965Sjdp *
433965Sjdp * The contents of this file are subject to the terms of the
533965Sjdp * Common Development and Distribution License (the "License").
633965Sjdp * You may not use this file except in compliance with the License.
733965Sjdp *
833965Sjdp * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
933965Sjdp * or http://www.opensolaris.org/os/licensing.
1033965Sjdp * See the License for the specific language governing permissions
1133965Sjdp * and limitations under the License.
1233965Sjdp *
1333965Sjdp * When distributing Covered Code, include this CDDL HEADER in each
1433965Sjdp * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1533965Sjdp * If applicable, add the following below this CDDL HEADER, with the
1633965Sjdp * fields enclosed by brackets "[]" replaced with your own identifying
1733965Sjdp * information: Portions Copyright [yyyy] [name of copyright owner]
1833965Sjdp *
1933965Sjdp * CDDL HEADER END
2033965Sjdp */
2133965Sjdp
2233965Sjdp/*
2333965Sjdp * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
2433965Sjdp * Use is subject to license terms.
2533965Sjdp */
2633965Sjdp
2733965Sjdp#include <sys/note.h>
2833965Sjdp#include <sys/types.h>
2933965Sjdp#include <sys/param.h>
3033965Sjdp#include <sys/systm.h>
3133965Sjdp#include <sys/buf.h>
3233965Sjdp#include <sys/uio.h>
3333965Sjdp#include <sys/cred.h>
3433965Sjdp#include <sys/poll.h>
3533965Sjdp#include <sys/mman.h>
3633965Sjdp#include <sys/kmem.h>
3733965Sjdp#include <sys/model.h>
3833965Sjdp#include <sys/file.h>
3933965Sjdp#include <sys/proc.h>
4033965Sjdp#include <sys/open.h>
4133965Sjdp#include <sys/user.h>
4233965Sjdp#include <sys/t_lock.h>
4333965Sjdp#include <sys/vm.h>
4433965Sjdp#include <sys/stat.h>
4533965Sjdp#include <vm/hat.h>
4633965Sjdp#include <vm/seg.h>
4733965Sjdp#include <vm/seg_vn.h>
4833965Sjdp#include <vm/seg_dev.h>
4933965Sjdp#include <vm/as.h>
5033965Sjdp#include <sys/cmn_err.h>
5133965Sjdp#include <sys/cpuvar.h>
5233965Sjdp#include <sys/debug.h>
5333965Sjdp#include <sys/autoconf.h>
5433965Sjdp#include <sys/sunddi.h>
5533965Sjdp#include <sys/esunddi.h>
5633965Sjdp#include <sys/sunndi.h>
5733965Sjdp#include <sys/kstat.h>
5833965Sjdp#include <sys/conf.h>
5933965Sjdp#include <sys/ddi_impldefs.h>	/* include implementation structure defs */
6033965Sjdp#include <sys/ndi_impldefs.h>	/* include prototypes */
6133965Sjdp#include <sys/ddi_timer.h>
6233965Sjdp#include <sys/hwconf.h>
6333965Sjdp#include <sys/pathname.h>
6477298Sobrien#include <sys/modctl.h>
6533965Sjdp#include <sys/epm.h>
6633965Sjdp#include <sys/devctl.h>
6733965Sjdp#include <sys/callb.h>
6833965Sjdp#include <sys/cladm.h>
6933965Sjdp#include <sys/sysevent.h>
7033965Sjdp#include <sys/dacf_impl.h>
7133965Sjdp#include <sys/ddidevmap.h>
7233965Sjdp#include <sys/bootconf.h>
7333965Sjdp#include <sys/disp.h>
7433965Sjdp#include <sys/atomic.h>
7533965Sjdp#include <sys/promif.h>
7633965Sjdp#include <sys/instance.h>
7733965Sjdp#include <sys/sysevent/eventdefs.h>
7833965Sjdp#include <sys/task.h>
7933965Sjdp#include <sys/project.h>
8033965Sjdp#include <sys/taskq.h>
8133965Sjdp#include <sys/devpolicy.h>
8233965Sjdp#include <sys/ctype.h>
8333965Sjdp#include <net/if.h>
8433965Sjdp#include <sys/rctl.h>
8533965Sjdp
8633965Sjdpextern	pri_t	minclsyspri;
8777298Sobrien
8833965Sjdpextern	rctl_hndl_t rc_project_locked_mem;
8933965Sjdpextern	rctl_hndl_t rc_zone_locked_mem;
9033965Sjdp
9133965Sjdp#ifdef DEBUG
9233965Sjdpstatic int sunddi_debug = 0;
9333965Sjdp#endif /* DEBUG */
9433965Sjdp
9533965Sjdp/* ddi_umem_unlock miscellaneous */
9633965Sjdp
9733965Sjdpstatic	void	i_ddi_umem_unlock_thread_start(void);
9833965Sjdp
9933965Sjdpstatic	kmutex_t	ddi_umem_unlock_mutex; /* unlock list mutex */
10033965Sjdpstatic	kcondvar_t	ddi_umem_unlock_cv; /* unlock list block/unblock */
10133965Sjdpstatic	kthread_t	*ddi_umem_unlock_thread;
10233965Sjdp/*
10333965Sjdp * The ddi_umem_unlock FIFO list.  NULL head pointer indicates empty list.
10433965Sjdp */
10533965Sjdpstatic	struct	ddi_umem_cookie *ddi_umem_unlock_head = NULL;
10633965Sjdpstatic	struct	ddi_umem_cookie *ddi_umem_unlock_tail = NULL;
10733965Sjdp
10833965Sjdp
10933965Sjdp/*
11033965Sjdp * DDI(Sun) Function and flag definitions:
11133965Sjdp */
11233965Sjdp
11333965Sjdp#if defined(__x86)
11433965Sjdp/*
11533965Sjdp * Used to indicate which entries were chosen from a range.
11633965Sjdp */
11733965Sjdpchar	*chosen_reg = "chosen-reg";
11833965Sjdp#endif
11933965Sjdp
12033965Sjdp/*
12133965Sjdp * Function used to ring system console bell
12233965Sjdp */
12333965Sjdpvoid (*ddi_console_bell_func)(clock_t duration);
12433965Sjdp
12533965Sjdp/*
12633965Sjdp * Creating register mappings and handling interrupts:
12733965Sjdp */
12833965Sjdp
12933965Sjdp/*
13033965Sjdp * Generic ddi_map: Call parent to fulfill request...
13133965Sjdp */
13233965Sjdp
13333965Sjdpint
13433965Sjdpddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset,
13533965Sjdp    off_t len, caddr_t *addrp)
13633965Sjdp{
13733965Sjdp	dev_info_t *pdip;
13833965Sjdp
13933965Sjdp	ASSERT(dp);
14033965Sjdp	pdip = (dev_info_t *)DEVI(dp)->devi_parent;
14133965Sjdp	return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip,
14233965Sjdp	    dp, mp, offset, len, addrp));
14333965Sjdp}
14433965Sjdp
14533965Sjdp/*
14633965Sjdp * ddi_apply_range: (Called by nexi only.)
14733965Sjdp * Apply ranges in parent node dp, to child regspec rp...
14833965Sjdp */
14933965Sjdp
15033965Sjdpint
15133965Sjdpddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp)
15233965Sjdp{
15333965Sjdp	return (i_ddi_apply_range(dp, rdip, rp));
15433965Sjdp}
15533965Sjdp
15633965Sjdpint
15733965Sjdpddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
15833965Sjdp    off_t len)
15933965Sjdp{
16033965Sjdp	ddi_map_req_t mr;
16133965Sjdp#if defined(__x86)
16233965Sjdp	struct {
16333965Sjdp		int	bus;
16433965Sjdp		int	addr;
16533965Sjdp		int	size;
16633965Sjdp	} reg, *reglist;
16777298Sobrien	uint_t	length;
16833965Sjdp	int	rc;
16933965Sjdp
17033965Sjdp	/*
17133965Sjdp	 * get the 'registers' or the 'reg' property.
17233965Sjdp	 * We look up the reg property as an array of
17333965Sjdp	 * int's.
17433965Sjdp	 */
17533965Sjdp	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
17633965Sjdp	    DDI_PROP_DONTPASS, "registers", (int **)&reglist, &length);
17733965Sjdp	if (rc != DDI_PROP_SUCCESS)
17833965Sjdp		rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
17933965Sjdp		    DDI_PROP_DONTPASS, "reg", (int **)&reglist, &length);
18033965Sjdp	if (rc == DDI_PROP_SUCCESS) {
18177298Sobrien		/*
18233965Sjdp		 * point to the required entry.
18333965Sjdp		 */
18433965Sjdp		reg = reglist[rnumber];
18533965Sjdp		reg.addr += offset;
18633965Sjdp		if (len != 0)
18733965Sjdp			reg.size = len;
18833965Sjdp		/*
18933965Sjdp		 * make a new property containing ONLY the required tuple.
19033965Sjdp		 */
19133965Sjdp		if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
19233965Sjdp		    chosen_reg, (int *)&reg, (sizeof (reg)/sizeof (int)))
19333965Sjdp		    != DDI_PROP_SUCCESS) {
19433965Sjdp			cmn_err(CE_WARN, "%s%d: cannot create '%s' "
19533965Sjdp			    "property", DEVI(dip)->devi_name,
19633965Sjdp			    DEVI(dip)->devi_instance, chosen_reg);
19733965Sjdp		}
19833965Sjdp		/*
19933965Sjdp		 * free the memory allocated by
20033965Sjdp		 * ddi_prop_lookup_int_array ().
20133965Sjdp		 */
20233965Sjdp		ddi_prop_free((void *)reglist);
20333965Sjdp	}
20433965Sjdp#endif
20533965Sjdp	mr.map_op = DDI_MO_MAP_LOCKED;
20633965Sjdp	mr.map_type = DDI_MT_RNUMBER;
20733965Sjdp	mr.map_obj.rnumber = rnumber;
20877298Sobrien	mr.map_prot = PROT_READ | PROT_WRITE;
20933965Sjdp	mr.map_flags = DDI_MF_KERNEL_MAPPING;
21033965Sjdp	mr.map_handlep = NULL;
21133965Sjdp	mr.map_vers = DDI_MAP_VERSION;
21233965Sjdp
21333965Sjdp	/*
21433965Sjdp	 * Call my parent to map in my regs.
21533965Sjdp	 */
21633965Sjdp
21733965Sjdp	return (ddi_map(dip, &mr, offset, len, kaddrp));
21833965Sjdp}
21933965Sjdp
22033965Sjdpvoid
22133965Sjdpddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
22233965Sjdp    off_t len)
22333965Sjdp{
22433965Sjdp	ddi_map_req_t mr;
22533965Sjdp
22633965Sjdp	mr.map_op = DDI_MO_UNMAP;
22733965Sjdp	mr.map_type = DDI_MT_RNUMBER;
22833965Sjdp	mr.map_flags = DDI_MF_KERNEL_MAPPING;
22933965Sjdp	mr.map_prot = PROT_READ | PROT_WRITE;	/* who cares? */
23033965Sjdp	mr.map_obj.rnumber = rnumber;
23133965Sjdp	mr.map_handlep = NULL;
23233965Sjdp	mr.map_vers = DDI_MAP_VERSION;
23333965Sjdp
23433965Sjdp	/*
23560484Sobrien	 * Call my parent to unmap my regs.
23633965Sjdp	 */
23760484Sobrien
23833965Sjdp	(void) ddi_map(dip, &mr, offset, len, kaddrp);
23933965Sjdp	*kaddrp = (caddr_t)0;
24033965Sjdp#if defined(__x86)
24133965Sjdp	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg);
24233965Sjdp#endif
24333965Sjdp}
24433965Sjdp
24533965Sjdpint
24633965Sjdpddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
24733965Sjdp	off_t offset, off_t len, caddr_t *vaddrp)
24833965Sjdp{
24933965Sjdp	return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp));
25033965Sjdp}
25133965Sjdp
25233965Sjdp/*
25333965Sjdp * nullbusmap:	The/DDI default bus_map entry point for nexi
25433965Sjdp *		not conforming to the reg/range paradigm (i.e. scsi, etc.)
25533965Sjdp *		with no HAT/MMU layer to be programmed at this level.
25633965Sjdp *
25733965Sjdp *		If the call is to map by rnumber, return an error,
25833965Sjdp *		otherwise pass anything else up the tree to my parent.
25933965Sjdp */
26033965Sjdpint
26133965Sjdpnullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
26233965Sjdp	off_t offset, off_t len, caddr_t *vaddrp)
26333965Sjdp{
26433965Sjdp	_NOTE(ARGUNUSED(rdip))
26533965Sjdp	if (mp->map_type == DDI_MT_RNUMBER)
26633965Sjdp		return (DDI_ME_UNSUPPORTED);
26733965Sjdp
26833965Sjdp	return (ddi_map(dip, mp, offset, len, vaddrp));
26933965Sjdp}
27033965Sjdp
27133965Sjdp/*
27233965Sjdp * ddi_rnumber_to_regspec: Not for use by leaf drivers.
27333965Sjdp *			   Only for use by nexi using the reg/range paradigm.
27433965Sjdp */
27533965Sjdpstruct regspec *
27633965Sjdpddi_rnumber_to_regspec(dev_info_t *dip, int rnumber)
27733965Sjdp{
27833965Sjdp	return (i_ddi_rnumber_to_regspec(dip, rnumber));
27933965Sjdp}
28033965Sjdp
28133965Sjdp
28233965Sjdp/*
28333965Sjdp * Note that we allow the dip to be nil because we may be called
28433965Sjdp * prior even to the instantiation of the devinfo tree itself - all
28533965Sjdp * regular leaf and nexus drivers should always use a non-nil dip!
28633965Sjdp *
28733965Sjdp * We treat peek in a somewhat cavalier fashion .. assuming that we'll
28833965Sjdp * simply get a synchronous fault as soon as we touch a missing address.
28933965Sjdp *
29033965Sjdp * Poke is rather more carefully handled because we might poke to a write
29133965Sjdp * buffer, "succeed", then only find some time later that we got an
29233965Sjdp * asynchronous fault that indicated that the address we were writing to
29333965Sjdp * was not really backed by hardware.
29433965Sjdp */
29533965Sjdp
29633965Sjdpstatic int
29733965Sjdpi_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size,
29833965Sjdp    void *addr, void *value_p)
29933965Sjdp{
30033965Sjdp	union {
30133965Sjdp		uint64_t	u64;
30233965Sjdp		uint32_t	u32;
30333965Sjdp		uint16_t	u16;
30433965Sjdp		uint8_t		u8;
30533965Sjdp	} peekpoke_value;
30633965Sjdp
30733965Sjdp	peekpoke_ctlops_t peekpoke_args;
30833965Sjdp	uint64_t dummy_result;
30933965Sjdp	int rval;
31033965Sjdp
31133965Sjdp	/* Note: size is assumed to be correct;  it is not checked. */
31233965Sjdp	peekpoke_args.size = size;
31333965Sjdp	peekpoke_args.dev_addr = (uintptr_t)addr;
31433965Sjdp	peekpoke_args.handle = NULL;
31533965Sjdp	peekpoke_args.repcount = 1;
31633965Sjdp	peekpoke_args.flags = 0;
31733965Sjdp
31833965Sjdp	if (cmd == DDI_CTLOPS_POKE) {
31933965Sjdp		switch (size) {
32033965Sjdp		case sizeof (uint8_t):
32133965Sjdp			peekpoke_value.u8 = *(uint8_t *)value_p;
32233965Sjdp			break;
32333965Sjdp		case sizeof (uint16_t):
32433965Sjdp			peekpoke_value.u16 = *(uint16_t *)value_p;
32533965Sjdp			break;
32633965Sjdp		case sizeof (uint32_t):
32733965Sjdp			peekpoke_value.u32 = *(uint32_t *)value_p;
32833965Sjdp			break;
32933965Sjdp		case sizeof (uint64_t):
33033965Sjdp			peekpoke_value.u64 = *(uint64_t *)value_p;
33133965Sjdp			break;
33233965Sjdp		}
33333965Sjdp	}
33433965Sjdp
33533965Sjdp	peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64;
33633965Sjdp
33733965Sjdp	if (devi != NULL)
33833965Sjdp		rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args,
33933965Sjdp		    &dummy_result);
34033965Sjdp	else
34133965Sjdp		rval = peekpoke_mem(cmd, &peekpoke_args);
34233965Sjdp
34333965Sjdp	/*
34433965Sjdp	 * A NULL value_p is permitted by ddi_peek(9F); discard the result.
34533965Sjdp	 */
34633965Sjdp	if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) {
34733965Sjdp		switch (size) {
34833965Sjdp		case sizeof (uint8_t):
34933965Sjdp			*(uint8_t *)value_p = peekpoke_value.u8;
35033965Sjdp			break;
35133965Sjdp		case sizeof (uint16_t):
35233965Sjdp			*(uint16_t *)value_p = peekpoke_value.u16;
35333965Sjdp			break;
35433965Sjdp		case sizeof (uint32_t):
35533965Sjdp			*(uint32_t *)value_p = peekpoke_value.u32;
35633965Sjdp			break;
35733965Sjdp		case sizeof (uint64_t):
35833965Sjdp			*(uint64_t *)value_p = peekpoke_value.u64;
35933965Sjdp			break;
36033965Sjdp		}
36133965Sjdp	}
36233965Sjdp
36333965Sjdp	return (rval);
36433965Sjdp}
36533965Sjdp
36633965Sjdp/*
36733965Sjdp * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
36833965Sjdp * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
36933965Sjdp */
37033965Sjdpint
37133965Sjdpddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p)
37233965Sjdp{
37333965Sjdp	switch (size) {
37433965Sjdp	case sizeof (uint8_t):
37533965Sjdp	case sizeof (uint16_t):
37633965Sjdp	case sizeof (uint32_t):
37733965Sjdp	case sizeof (uint64_t):
37833965Sjdp		break;
37933965Sjdp	default:
38033965Sjdp		return (DDI_FAILURE);
38133965Sjdp	}
38233965Sjdp
38333965Sjdp	return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p));
38433965Sjdp}
38533965Sjdp
38633965Sjdpint
38733965Sjdpddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p)
38833965Sjdp{
38933965Sjdp	switch (size) {
39033965Sjdp	case sizeof (uint8_t):
39133965Sjdp	case sizeof (uint16_t):
39233965Sjdp	case sizeof (uint32_t):
39333965Sjdp	case sizeof (uint64_t):
39433965Sjdp		break;
39533965Sjdp	default:
39633965Sjdp		return (DDI_FAILURE);
39733965Sjdp	}
39833965Sjdp
39933965Sjdp	return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p));
40033965Sjdp}
40133965Sjdp
40233965Sjdpint
40333965Sjdpddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p)
40433965Sjdp{
40533965Sjdp	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
40633965Sjdp	    val_p));
40733965Sjdp}
40833965Sjdp
40933965Sjdpint
41033965Sjdpddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p)
41133965Sjdp{
41233965Sjdp	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
41333965Sjdp	    val_p));
41433965Sjdp}
41533965Sjdp
41633965Sjdpint
41733965Sjdpddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p)
41833965Sjdp{
41933965Sjdp	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
42033965Sjdp	    val_p));
42133965Sjdp}
42233965Sjdp
42333965Sjdpint
42433965Sjdpddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p)
42533965Sjdp{
42633965Sjdp	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
42733965Sjdp	    val_p));
42833965Sjdp}
42933965Sjdp
43033965Sjdp
43133965Sjdp/*
43233965Sjdp * We need to separate the old interfaces from the new ones and leave them
43333965Sjdp * in here for a while. Previous versions of the OS defined the new interfaces
43433965Sjdp * to the old interfaces. This way we can fix things up so that we can
43533965Sjdp * eventually remove these interfaces.
43633965Sjdp * e.g. A 3rd party module/driver using ddi_peek8 and built against S10
43733965Sjdp * or earlier will actually have a reference to ddi_peekc in the binary.
43833965Sjdp */
43933965Sjdp#ifdef _ILP32
44033965Sjdpint
44133965Sjdpddi_peekc(dev_info_t *dip, int8_t *addr, int8_t *val_p)
44233965Sjdp{
44333965Sjdp	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
44433965Sjdp	    val_p));
44533965Sjdp}
44633965Sjdp
44733965Sjdpint
44833965Sjdpddi_peeks(dev_info_t *dip, int16_t *addr, int16_t *val_p)
44933965Sjdp{
45033965Sjdp	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
45133965Sjdp	    val_p));
45233965Sjdp}
45333965Sjdp
45433965Sjdpint
45533965Sjdpddi_peekl(dev_info_t *dip, int32_t *addr, int32_t *val_p)
45633965Sjdp{
45733965Sjdp	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
45833965Sjdp	    val_p));
45933965Sjdp}
46033965Sjdp
46133965Sjdpint
46233965Sjdpddi_peekd(dev_info_t *dip, int64_t *addr, int64_t *val_p)
46333965Sjdp{
46433965Sjdp	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
46533965Sjdp	    val_p));
46633965Sjdp}
46733965Sjdp#endif /* _ILP32 */
46833965Sjdp
46933965Sjdpint
47033965Sjdpddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val)
47133965Sjdp{
47233965Sjdp	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
47333965Sjdp}
47433965Sjdp
47533965Sjdpint
47633965Sjdpddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val)
47733965Sjdp{
47833965Sjdp	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
47933965Sjdp}
48033965Sjdp
48133965Sjdpint
48233965Sjdpddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val)
48333965Sjdp{
48433965Sjdp	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
48533965Sjdp}
48633965Sjdp
48733965Sjdpint
48833965Sjdpddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val)
48933965Sjdp{
49033965Sjdp	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
49133965Sjdp}
49233965Sjdp
49333965Sjdp/*
49433965Sjdp * We need to separate the old interfaces from the new ones and leave them
49533965Sjdp * in here for a while. Previous versions of the OS defined the new interfaces
49633965Sjdp * to the old interfaces. This way we can fix things up so that we can
49733965Sjdp * eventually remove these interfaces.
49833965Sjdp * e.g. A 3rd party module/driver using ddi_poke8 and built against S10
49933965Sjdp * or earlier will actually have a reference to ddi_pokec in the binary.
50033965Sjdp */
50133965Sjdp#ifdef _ILP32
50233965Sjdpint
50333965Sjdpddi_pokec(dev_info_t *dip, int8_t *addr, int8_t val)
50433965Sjdp{
50533965Sjdp	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
50660484Sobrien}
50760484Sobrien
50860484Sobrienint
50933965Sjdpddi_pokes(dev_info_t *dip, int16_t *addr, int16_t val)
51033965Sjdp{
51133965Sjdp	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
51233965Sjdp}
51333965Sjdp
51433965Sjdpint
51533965Sjdpddi_pokel(dev_info_t *dip, int32_t *addr, int32_t val)
51633965Sjdp{
51733965Sjdp	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
51833965Sjdp}
51933965Sjdp
52033965Sjdpint
52133965Sjdpddi_poked(dev_info_t *dip, int64_t *addr, int64_t val)
52233965Sjdp{
52333965Sjdp	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
52433965Sjdp}
52533965Sjdp#endif /* _ILP32 */
52633965Sjdp
52733965Sjdp/*
52833965Sjdp * ddi_peekpokeio() is used primarily by the mem drivers for moving
52933965Sjdp * data to and from uio structures via peek and poke.  Note that we
53033965Sjdp * use "internal" routines ddi_peek and ddi_poke to make this go
53133965Sjdp * slightly faster, avoiding the call overhead ..
53233965Sjdp */
53333965Sjdpint
53433965Sjdpddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw,
53533965Sjdp    caddr_t addr, size_t len, uint_t xfersize)
53633965Sjdp{
53733965Sjdp	int64_t	ibuffer;
53833965Sjdp	int8_t w8;
53933965Sjdp	size_t sz;
54033965Sjdp	int o;
54133965Sjdp
54233965Sjdp	if (xfersize > sizeof (long))
54333965Sjdp		xfersize = sizeof (long);
54433965Sjdp
54533965Sjdp	while (len != 0) {
54633965Sjdp		if ((len | (uintptr_t)addr) & 1) {
54733965Sjdp			sz = sizeof (int8_t);
54833965Sjdp			if (rw == UIO_WRITE) {
54933965Sjdp				if ((o = uwritec(uio)) == -1)
55033965Sjdp					return (DDI_FAILURE);
55133965Sjdp				if (ddi_poke8(devi, (int8_t *)addr,
55233965Sjdp				    (int8_t)o) != DDI_SUCCESS)
55333965Sjdp					return (DDI_FAILURE);
55433965Sjdp			} else {
55533965Sjdp				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
55633965Sjdp				    (int8_t *)addr, &w8) != DDI_SUCCESS)
55733965Sjdp					return (DDI_FAILURE);
55833965Sjdp				if (ureadc(w8, uio))
55933965Sjdp					return (DDI_FAILURE);
56033965Sjdp			}
56133965Sjdp		} else {
56233965Sjdp			switch (xfersize) {
56333965Sjdp			case sizeof (int64_t):
56433965Sjdp				if (((len | (uintptr_t)addr) &
56533965Sjdp				    (sizeof (int64_t) - 1)) == 0) {
56633965Sjdp					sz = xfersize;
56733965Sjdp					break;
56833965Sjdp				}
56933965Sjdp				/*FALLTHROUGH*/
57033965Sjdp			case sizeof (int32_t):
57133965Sjdp				if (((len | (uintptr_t)addr) &
57233965Sjdp				    (sizeof (int32_t) - 1)) == 0) {
57333965Sjdp					sz = xfersize;
57433965Sjdp					break;
57533965Sjdp				}
57633965Sjdp				/*FALLTHROUGH*/
57733965Sjdp			default:
57833965Sjdp				/*
57933965Sjdp				 * This still assumes that we might have an
58033965Sjdp				 * I/O bus out there that permits 16-bit
58177298Sobrien				 * transfers (and that it would be upset by
58233965Sjdp				 * 32-bit transfers from such locations).
58333965Sjdp				 */
58433965Sjdp				sz = sizeof (int16_t);
58533965Sjdp				break;
58633965Sjdp			}
58733965Sjdp
58833965Sjdp			if (rw == UIO_READ) {
58933965Sjdp				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
59033965Sjdp				    addr, &ibuffer) != DDI_SUCCESS)
59133965Sjdp					return (DDI_FAILURE);
59233965Sjdp			}
59333965Sjdp
59433965Sjdp			if (uiomove(&ibuffer, sz, rw, uio))
59533965Sjdp				return (DDI_FAILURE);
59633965Sjdp
59733965Sjdp			if (rw == UIO_WRITE) {
59833965Sjdp				if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz,
59933965Sjdp				    addr, &ibuffer) != DDI_SUCCESS)
60033965Sjdp					return (DDI_FAILURE);
60133965Sjdp			}
60233965Sjdp		}
60333965Sjdp		addr += sz;
60433965Sjdp		len -= sz;
60538889Sjdp	}
60633965Sjdp	return (DDI_SUCCESS);
60733965Sjdp}
60833965Sjdp
60933965Sjdp/*
61033965Sjdp * These routines are used by drivers that do layered ioctls
61133965Sjdp * On sparc, they're implemented in assembler to avoid spilling
61233965Sjdp * register windows in the common (copyin) case ..
61333965Sjdp */
61433965Sjdp#if !defined(__sparc)
61533965Sjdpint
61633965Sjdpddi_copyin(const void *buf, void *kernbuf, size_t size, int flags)
61733965Sjdp{
61833965Sjdp	if (flags & FKIOCTL)
61933965Sjdp		return (kcopy(buf, kernbuf, size) ? -1 : 0);
62033965Sjdp	return (copyin(buf, kernbuf, size));
62133965Sjdp}
62233965Sjdp
62333965Sjdpint
62433965Sjdpddi_copyout(const void *buf, void *kernbuf, size_t size, int flags)
62533965Sjdp{
62633965Sjdp	if (flags & FKIOCTL)
62733965Sjdp		return (kcopy(buf, kernbuf, size) ? -1 : 0);
62833965Sjdp	return (copyout(buf, kernbuf, size));
62933965Sjdp}
63033965Sjdp#endif	/* !__sparc */
63133965Sjdp
63233965Sjdp/*
63333965Sjdp * Conversions in nexus pagesize units.  We don't duplicate the
63433965Sjdp * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
63533965Sjdp * routines anyway.
63633965Sjdp */
63733965Sjdpunsigned long
63833965Sjdpddi_btop(dev_info_t *dip, unsigned long bytes)
63933965Sjdp{
64033965Sjdp	unsigned long pages;
64160484Sobrien
64260484Sobrien	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages);
64377298Sobrien	return (pages);
64460484Sobrien}
64533965Sjdp
64633965Sjdpunsigned long
647ddi_btopr(dev_info_t *dip, unsigned long bytes)
648{
649	unsigned long pages;
650
651	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages);
652	return (pages);
653}
654
655unsigned long
656ddi_ptob(dev_info_t *dip, unsigned long pages)
657{
658	unsigned long bytes;
659
660	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes);
661	return (bytes);
662}
663
664unsigned int
665ddi_enter_critical(void)
666{
667	return ((uint_t)spl7());
668}
669
670void
671ddi_exit_critical(unsigned int spl)
672{
673	splx((int)spl);
674}
675
676/*
677 * Nexus ctlops punter
678 */
679
680#if !defined(__sparc)
681/*
682 * Request bus_ctl parent to handle a bus_ctl request
683 *
684 * (The sparc version is in sparc_ddi.s)
685 */
686int
687ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v)
688{
689	int (*fp)();
690
691	if (!d || !r)
692		return (DDI_FAILURE);
693
694	if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL)
695		return (DDI_FAILURE);
696
697	fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl;
698	return ((*fp)(d, r, op, a, v));
699}
700
701#endif
702
703/*
704 * DMA/DVMA setup
705 */
706
707#if defined(__sparc)
708static ddi_dma_lim_t standard_limits = {
709	(uint_t)0,	/* addr_t dlim_addr_lo */
710	(uint_t)-1,	/* addr_t dlim_addr_hi */
711	(uint_t)-1,	/* uint_t dlim_cntr_max */
712	(uint_t)1,	/* uint_t dlim_burstsizes */
713	(uint_t)1,	/* uint_t dlim_minxfer */
714	0		/* uint_t dlim_dmaspeed */
715};
716#elif defined(__x86)
717static ddi_dma_lim_t standard_limits = {
718	(uint_t)0,		/* addr_t dlim_addr_lo */
719	(uint_t)0xffffff,	/* addr_t dlim_addr_hi */
720	(uint_t)0,		/* uint_t dlim_cntr_max */
721	(uint_t)0x00000001,	/* uint_t dlim_burstsizes */
722	(uint_t)DMA_UNIT_8,	/* uint_t dlim_minxfer */
723	(uint_t)0,		/* uint_t dlim_dmaspeed */
724	(uint_t)0x86<<24+0,	/* uint_t dlim_version */
725	(uint_t)0xffff,		/* uint_t dlim_adreg_max */
726	(uint_t)0xffff,		/* uint_t dlim_ctreg_max */
727	(uint_t)512,		/* uint_t dlim_granular */
728	(int)1,			/* int dlim_sgllen */
729	(uint_t)0xffffffff	/* uint_t dlim_reqsizes */
730};
731
732#endif
733
734int
735ddi_dma_setup(dev_info_t *dip, struct ddi_dma_req *dmareqp,
736    ddi_dma_handle_t *handlep)
737{
738	int (*funcp)() = ddi_dma_map;
739	struct bus_ops *bop;
740#if defined(__sparc)
741	auto ddi_dma_lim_t dma_lim;
742
743	if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0) {
744		dma_lim = standard_limits;
745	} else {
746		dma_lim = *dmareqp->dmar_limits;
747	}
748	dmareqp->dmar_limits = &dma_lim;
749#endif
750#if defined(__x86)
751	if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0)
752		return (DDI_FAILURE);
753#endif
754
755	/*
756	 * Handle the case that the requester is both a leaf
757	 * and a nexus driver simultaneously by calling the
758	 * requester's bus_dma_map function directly instead
759	 * of ddi_dma_map.
760	 */
761	bop = DEVI(dip)->devi_ops->devo_bus_ops;
762	if (bop && bop->bus_dma_map)
763		funcp = bop->bus_dma_map;
764	return ((*funcp)(dip, dip, dmareqp, handlep));
765}
766
767int
768ddi_dma_addr_setup(dev_info_t *dip, struct as *as, caddr_t addr, size_t len,
769    uint_t flags, int (*waitfp)(), caddr_t arg,
770    ddi_dma_lim_t *limits, ddi_dma_handle_t *handlep)
771{
772	int (*funcp)() = ddi_dma_map;
773	ddi_dma_lim_t dma_lim;
774	struct ddi_dma_req dmareq;
775	struct bus_ops *bop;
776
777	if (len == 0) {
778		return (DDI_DMA_NOMAPPING);
779	}
780	if (limits == (ddi_dma_lim_t *)0) {
781		dma_lim = standard_limits;
782	} else {
783		dma_lim = *limits;
784	}
785	dmareq.dmar_limits = &dma_lim;
786	dmareq.dmar_flags = flags;
787	dmareq.dmar_fp = waitfp;
788	dmareq.dmar_arg = arg;
789	dmareq.dmar_object.dmao_size = len;
790	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
791	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
792	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
793	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
794
795	/*
796	 * Handle the case that the requester is both a leaf
797	 * and a nexus driver simultaneously by calling the
798	 * requester's bus_dma_map function directly instead
799	 * of ddi_dma_map.
800	 */
801	bop = DEVI(dip)->devi_ops->devo_bus_ops;
802	if (bop && bop->bus_dma_map)
803		funcp = bop->bus_dma_map;
804
805	return ((*funcp)(dip, dip, &dmareq, handlep));
806}
807
808int
809ddi_dma_buf_setup(dev_info_t *dip, struct buf *bp, uint_t flags,
810    int (*waitfp)(), caddr_t arg, ddi_dma_lim_t *limits,
811    ddi_dma_handle_t *handlep)
812{
813	int (*funcp)() = ddi_dma_map;
814	ddi_dma_lim_t dma_lim;
815	struct ddi_dma_req dmareq;
816	struct bus_ops *bop;
817
818	if (limits == (ddi_dma_lim_t *)0) {
819		dma_lim = standard_limits;
820	} else {
821		dma_lim = *limits;
822	}
823	dmareq.dmar_limits = &dma_lim;
824	dmareq.dmar_flags = flags;
825	dmareq.dmar_fp = waitfp;
826	dmareq.dmar_arg = arg;
827	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
828
829	if (bp->b_flags & B_PAGEIO) {
830		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
831		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
832		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
833		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
834	} else {
835		dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
836		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
837		if (bp->b_flags & B_SHADOW) {
838			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
839			    bp->b_shadow;
840		} else {
841			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
842		}
843
844		/*
845		 * If the buffer has no proc pointer, or the proc
846		 * struct has the kernel address space, or the buffer has
847		 * been marked B_REMAPPED (meaning that it is now
848		 * mapped into the kernel's address space), then
849		 * the address space is kas (kernel address space).
850		 */
851		if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
852		    (bp->b_flags & B_REMAPPED)) {
853			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
854		} else {
855			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
856			    bp->b_proc->p_as;
857		}
858	}
859
860	/*
861	 * Handle the case that the requester is both a leaf
862	 * and a nexus driver simultaneously by calling the
863	 * requester's bus_dma_map function directly instead
864	 * of ddi_dma_map.
865	 */
866	bop = DEVI(dip)->devi_ops->devo_bus_ops;
867	if (bop && bop->bus_dma_map)
868		funcp = bop->bus_dma_map;
869
870	return ((*funcp)(dip, dip, &dmareq, handlep));
871}
872
873#if !defined(__sparc)
874/*
875 * Request bus_dma_ctl parent to fiddle with a dma request.
876 *
877 * (The sparc version is in sparc_subr.s)
878 */
879int
880ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
881    ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
882    off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
883{
884	int (*fp)();
885
886	dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl;
887	fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl;
888	return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags));
889}
890#endif
891
892/*
893 * For all DMA control functions, call the DMA control
894 * routine and return status.
895 *
896 * Just plain assume that the parent is to be called.
897 * If a nexus driver or a thread outside the framework
898 * of a nexus driver or a leaf driver calls these functions,
899 * it is up to them to deal with the fact that the parent's
900 * bus_dma_ctl function will be the first one called.
901 */
902
903#define	HD	((ddi_dma_impl_t *)h)->dmai_rdip
904
905int
906ddi_dma_kvaddrp(ddi_dma_handle_t h, off_t off, size_t len, caddr_t *kp)
907{
908	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_KVADDR, &off, &len, kp, 0));
909}
910
911int
912ddi_dma_htoc(ddi_dma_handle_t h, off_t o, ddi_dma_cookie_t *c)
913{
914	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_HTOC, &o, 0, (caddr_t *)c, 0));
915}
916
917int
918ddi_dma_coff(ddi_dma_handle_t h, ddi_dma_cookie_t *c, off_t *o)
919{
920	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_COFF,
921	    (off_t *)c, 0, (caddr_t *)o, 0));
922}
923
924int
925ddi_dma_movwin(ddi_dma_handle_t h, off_t *o, size_t *l, ddi_dma_cookie_t *c)
926{
927	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_MOVWIN, o,
928	    l, (caddr_t *)c, 0));
929}
930
931int
932ddi_dma_curwin(ddi_dma_handle_t h, off_t *o, size_t *l)
933{
934	if ((((ddi_dma_impl_t *)h)->dmai_rflags & DDI_DMA_PARTIAL) == 0)
935		return (DDI_FAILURE);
936	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_REPWIN, o, l, 0, 0));
937}
938
939int
940ddi_dma_nextwin(ddi_dma_handle_t h, ddi_dma_win_t win,
941    ddi_dma_win_t *nwin)
942{
943	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTWIN, (off_t *)&win, 0,
944	    (caddr_t *)nwin, 0));
945}
946
947int
948ddi_dma_nextseg(ddi_dma_win_t win, ddi_dma_seg_t seg, ddi_dma_seg_t *nseg)
949{
950	ddi_dma_handle_t h = (ddi_dma_handle_t)win;
951
952	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTSEG, (off_t *)&win,
953	    (size_t *)&seg, (caddr_t *)nseg, 0));
954}
955
956#if (defined(__i386) && !defined(__amd64)) || defined(__sparc)
957/*
958 * This routine is Obsolete and should be removed from ALL architectures
959 * in a future release of Solaris.
960 *
961 * It is deliberately NOT ported to amd64; please fix the code that
962 * depends on this routine to use ddi_dma_nextcookie(9F).
963 *
964 * NOTE: even though we fixed the pointer through a 32-bit param issue (the fix
965 * is a side effect to some other cleanup), we're still not going to support
966 * this interface on x64.
967 */
968int
969ddi_dma_segtocookie(ddi_dma_seg_t seg, off_t *o, off_t *l,
970    ddi_dma_cookie_t *cookiep)
971{
972	ddi_dma_handle_t h = (ddi_dma_handle_t)seg;
973
974	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SEGTOC, o, (size_t *)l,
975	    (caddr_t *)cookiep, 0));
976}
977#endif	/* (__i386 && !__amd64) || __sparc */
978
979#if !defined(__sparc)
980
981/*
982 * The SPARC versions of these routines are done in assembler to
983 * save register windows, so they're in sparc_subr.s.
984 */
985
986int
987ddi_dma_map(dev_info_t *dip, dev_info_t *rdip,
988	struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
989{
990	dev_info_t	*hdip;
991	int (*funcp)(dev_info_t *, dev_info_t *, struct ddi_dma_req *,
992	    ddi_dma_handle_t *);
993
994	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_map;
995
996	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_map;
997	return ((*funcp)(hdip, rdip, dmareqp, handlep));
998}
999
1000int
1001ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1002    int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
1003{
1004	dev_info_t	*hdip;
1005	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *,
1006	    int (*)(caddr_t), caddr_t, ddi_dma_handle_t *);
1007
1008	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
1009
1010	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_allochdl;
1011	return ((*funcp)(hdip, rdip, attr, waitfp, arg, handlep));
1012}
1013
1014int
1015ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep)
1016{
1017	dev_info_t	*hdip;
1018	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1019
1020	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
1021
1022	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_freehdl;
1023	return ((*funcp)(hdip, rdip, handlep));
1024}
1025
1026int
1027ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
1028    ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
1029    ddi_dma_cookie_t *cp, uint_t *ccountp)
1030{
1031	dev_info_t	*hdip;
1032	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1033	    struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *);
1034
1035	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
1036
1037	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_bindhdl;
1038	return ((*funcp)(hdip, rdip, handle, dmareq, cp, ccountp));
1039}
1040
1041int
1042ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
1043    ddi_dma_handle_t handle)
1044{
1045	dev_info_t	*hdip;
1046	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1047
1048	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
1049
1050	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl;
1051	return ((*funcp)(hdip, rdip, handle));
1052}
1053
1054
1055int
1056ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
1057    ddi_dma_handle_t handle, off_t off, size_t len,
1058    uint_t cache_flags)
1059{
1060	dev_info_t	*hdip;
1061	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1062	    off_t, size_t, uint_t);
1063
1064	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
1065
1066	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_flush;
1067	return ((*funcp)(hdip, rdip, handle, off, len, cache_flags));
1068}
1069
1070int
1071ddi_dma_win(dev_info_t *dip, dev_info_t *rdip,
1072    ddi_dma_handle_t handle, uint_t win, off_t *offp,
1073    size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1074{
1075	dev_info_t	*hdip;
1076	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1077	    uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
1078
1079	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win;
1080
1081	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_win;
1082	return ((*funcp)(hdip, rdip, handle, win, offp, lenp,
1083	    cookiep, ccountp));
1084}
1085
1086int
1087ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom)
1088{
1089	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
1090	dev_info_t *hdip, *dip;
1091	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t,
1092	    size_t, uint_t);
1093
1094	/*
1095	 * the DMA nexus driver will set DMP_NOSYNC if the
1096	 * platform does not require any sync operation. For
1097	 * example if the memory is uncached or consistent
1098	 * and without any I/O write buffers involved.
1099	 */
1100	if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
1101		return (DDI_SUCCESS);
1102
1103	dip = hp->dmai_rdip;
1104	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
1105	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_flush;
1106	return ((*funcp)(hdip, dip, h, o, l, whom));
1107}
1108
1109int
1110ddi_dma_unbind_handle(ddi_dma_handle_t h)
1111{
1112	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
1113	dev_info_t *hdip, *dip;
1114	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1115
1116	dip = hp->dmai_rdip;
1117	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
1118	funcp = DEVI(dip)->devi_bus_dma_unbindfunc;
1119	return ((*funcp)(hdip, dip, h));
1120}
1121
1122#endif	/* !__sparc */
1123
1124int
1125ddi_dma_free(ddi_dma_handle_t h)
1126{
1127	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_FREE, 0, 0, 0, 0));
1128}
1129
1130int
1131ddi_iopb_alloc(dev_info_t *dip, ddi_dma_lim_t *limp, uint_t len, caddr_t *iopbp)
1132{
1133	ddi_dma_lim_t defalt;
1134	size_t size = len;
1135
1136	if (!limp) {
1137		defalt = standard_limits;
1138		limp = &defalt;
1139	}
1140	return (i_ddi_mem_alloc_lim(dip, limp, size, 0, 0, 0,
1141	    iopbp, NULL, NULL));
1142}
1143
1144void
1145ddi_iopb_free(caddr_t iopb)
1146{
1147	i_ddi_mem_free(iopb, NULL);
1148}
1149
1150int
1151ddi_mem_alloc(dev_info_t *dip, ddi_dma_lim_t *limits, uint_t length,
1152	uint_t flags, caddr_t *kaddrp, uint_t *real_length)
1153{
1154	ddi_dma_lim_t defalt;
1155	size_t size = length;
1156
1157	if (!limits) {
1158		defalt = standard_limits;
1159		limits = &defalt;
1160	}
1161	return (i_ddi_mem_alloc_lim(dip, limits, size, flags & 0x1,
1162	    1, 0, kaddrp, real_length, NULL));
1163}
1164
1165void
1166ddi_mem_free(caddr_t kaddr)
1167{
1168	i_ddi_mem_free(kaddr, NULL);
1169}
1170
1171/*
1172 * DMA attributes, alignment, burst sizes, and transfer minimums
1173 */
1174int
1175ddi_dma_get_attr(ddi_dma_handle_t handle, ddi_dma_attr_t *attrp)
1176{
1177	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1178
1179	if (attrp == NULL)
1180		return (DDI_FAILURE);
1181	*attrp = dimp->dmai_attr;
1182	return (DDI_SUCCESS);
1183}
1184
1185int
1186ddi_dma_burstsizes(ddi_dma_handle_t handle)
1187{
1188	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1189
1190	if (!dimp)
1191		return (0);
1192	else
1193		return (dimp->dmai_burstsizes);
1194}
1195
1196int
1197ddi_dma_devalign(ddi_dma_handle_t handle, uint_t *alignment, uint_t *mineffect)
1198{
1199	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1200
1201	if (!dimp || !alignment || !mineffect)
1202		return (DDI_FAILURE);
1203	if (!(dimp->dmai_rflags & DDI_DMA_SBUS_64BIT)) {
1204		*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes);
1205	} else {
1206		if (dimp->dmai_burstsizes & 0xff0000) {
1207			*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes >> 16);
1208		} else {
1209			*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes);
1210		}
1211	}
1212	*mineffect = dimp->dmai_minxfer;
1213	return (DDI_SUCCESS);
1214}
1215
1216int
1217ddi_iomin(dev_info_t *a, int i, int stream)
1218{
1219	int r;
1220
1221	/*
1222	 * Make sure that the initial value is sane
1223	 */
1224	if (i & (i - 1))
1225		return (0);
1226	if (i == 0)
1227		i = (stream) ? 4 : 1;
1228
1229	r = ddi_ctlops(a, a,
1230	    DDI_CTLOPS_IOMIN, (void *)(uintptr_t)stream, (void *)&i);
1231	if (r != DDI_SUCCESS || (i & (i - 1)))
1232		return (0);
1233	return (i);
1234}
1235
1236/*
1237 * Given two DMA attribute structures, apply the attributes
1238 * of one to the other, following the rules of attributes
1239 * and the wishes of the caller.
1240 *
1241 * The rules of DMA attribute structures are that you cannot
1242 * make things *less* restrictive as you apply one set
1243 * of attributes to another.
1244 *
1245 */
1246void
1247ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod)
1248{
1249	attr->dma_attr_addr_lo =
1250	    MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo);
1251	attr->dma_attr_addr_hi =
1252	    MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi);
1253	attr->dma_attr_count_max =
1254	    MIN(attr->dma_attr_count_max, mod->dma_attr_count_max);
1255	attr->dma_attr_align =
1256	    MAX(attr->dma_attr_align,  mod->dma_attr_align);
1257	attr->dma_attr_burstsizes =
1258	    (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes);
1259	attr->dma_attr_minxfer =
1260	    maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer);
1261	attr->dma_attr_maxxfer =
1262	    MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer);
1263	attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg);
1264	attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen,
1265	    (uint_t)mod->dma_attr_sgllen);
1266	attr->dma_attr_granular =
1267	    MAX(attr->dma_attr_granular, mod->dma_attr_granular);
1268}
1269
1270/*
1271 * mmap/segmap interface:
1272 */
1273
1274/*
1275 * ddi_segmap:		setup the default segment driver. Calls the drivers
1276 *			XXmmap routine to validate the range to be mapped.
1277 *			Return ENXIO of the range is not valid.  Create
1278 *			a seg_dev segment that contains all of the
1279 *			necessary information and will reference the
1280 *			default segment driver routines. It returns zero
1281 *			on success or non-zero on failure.
1282 */
1283int
1284ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len,
1285    uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp)
1286{
1287	extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *,
1288	    off_t, uint_t, uint_t, uint_t, struct cred *);
1289
1290	return (spec_segmap(dev, offset, asp, addrp, len,
1291	    prot, maxprot, flags, credp));
1292}
1293
1294/*
1295 * ddi_map_fault:	Resolve mappings at fault time.  Used by segment
1296 *			drivers. Allows each successive parent to resolve
1297 *			address translations and add its mappings to the
1298 *			mapping list supplied in the page structure. It
1299 *			returns zero on success	or non-zero on failure.
1300 */
1301
1302int
1303ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg,
1304    caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock)
1305{
1306	return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock));
1307}
1308
1309/*
1310 * ddi_device_mapping_check:	Called from ddi_segmap_setup.
1311 *	Invokes platform specific DDI to determine whether attributes specified
1312 *	in attr(9s) are	valid for the region of memory that will be made
1313 *	available for direct access to user process via the mmap(2) system call.
1314 */
1315int
1316ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp,
1317    uint_t rnumber, uint_t *hat_flags)
1318{
1319	ddi_acc_handle_t handle;
1320	ddi_map_req_t mr;
1321	ddi_acc_hdl_t *hp;
1322	int result;
1323	dev_info_t *dip;
1324
1325	/*
1326	 * we use e_ddi_hold_devi_by_dev to search for the devi.  We
1327	 * release it immediately since it should already be held by
1328	 * a devfs vnode.
1329	 */
1330	if ((dip =
1331	    e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL)
1332		return (-1);
1333	ddi_release_devi(dip);		/* for e_ddi_hold_devi_by_dev() */
1334
1335	/*
1336	 * Allocate and initialize the common elements of data
1337	 * access handle.
1338	 */
1339	handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1340	if (handle == NULL)
1341		return (-1);
1342
1343	hp = impl_acc_hdl_get(handle);
1344	hp->ah_vers = VERS_ACCHDL;
1345	hp->ah_dip = dip;
1346	hp->ah_rnumber = rnumber;
1347	hp->ah_offset = 0;
1348	hp->ah_len = 0;
1349	hp->ah_acc = *accattrp;
1350
1351	/*
1352	 * Set up the mapping request and call to parent.
1353	 */
1354	mr.map_op = DDI_MO_MAP_HANDLE;
1355	mr.map_type = DDI_MT_RNUMBER;
1356	mr.map_obj.rnumber = rnumber;
1357	mr.map_prot = PROT_READ | PROT_WRITE;
1358	mr.map_flags = DDI_MF_KERNEL_MAPPING;
1359	mr.map_handlep = hp;
1360	mr.map_vers = DDI_MAP_VERSION;
1361	result = ddi_map(dip, &mr, 0, 0, NULL);
1362
1363	/*
1364	 * Region must be mappable, pick up flags from the framework.
1365	 */
1366	*hat_flags = hp->ah_hat_flags;
1367
1368	impl_acc_hdl_free(handle);
1369
1370	/*
1371	 * check for end result.
1372	 */
1373	if (result != DDI_SUCCESS)
1374		return (-1);
1375	return (0);
1376}
1377
1378
1379/*
1380 * Property functions:	 See also, ddipropdefs.h.
1381 *
1382 * These functions are the framework for the property functions,
1383 * i.e. they support software defined properties.  All implementation
1384 * specific property handling (i.e.: self-identifying devices and
1385 * PROM defined properties are handled in the implementation specific
1386 * functions (defined in ddi_implfuncs.h).
1387 */
1388
1389/*
1390 * nopropop:	Shouldn't be called, right?
1391 */
1392int
1393nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1394    char *name, caddr_t valuep, int *lengthp)
1395{
1396	_NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp))
1397	return (DDI_PROP_NOT_FOUND);
1398}
1399
1400#ifdef	DDI_PROP_DEBUG
1401int ddi_prop_debug_flag = 0;
1402
1403int
1404ddi_prop_debug(int enable)
1405{
1406	int prev = ddi_prop_debug_flag;
1407
1408	if ((enable != 0) || (prev != 0))
1409		printf("ddi_prop_debug: debugging %s\n",
1410		    enable ? "enabled" : "disabled");
1411	ddi_prop_debug_flag = enable;
1412	return (prev);
1413}
1414
1415#endif	/* DDI_PROP_DEBUG */
1416
1417/*
1418 * Search a property list for a match, if found return pointer
1419 * to matching prop struct, else return NULL.
1420 */
1421
1422ddi_prop_t *
1423i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head)
1424{
1425	ddi_prop_t	*propp;
1426
1427	/*
1428	 * find the property in child's devinfo:
1429	 * Search order defined by this search function is first matching
1430	 * property with input dev == DDI_DEV_T_ANY matching any dev or
1431	 * dev == propp->prop_dev, name == propp->name, and the correct
1432	 * data type as specified in the flags.  If a DDI_DEV_T_NONE dev
1433	 * value made it this far then it implies a DDI_DEV_T_ANY search.
1434	 */
1435	if (dev == DDI_DEV_T_NONE)
1436		dev = DDI_DEV_T_ANY;
1437
1438	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
1439
1440		if (!DDI_STRSAME(propp->prop_name, name))
1441			continue;
1442
1443		if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev))
1444			continue;
1445
1446		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1447			continue;
1448
1449		return (propp);
1450	}
1451
1452	return ((ddi_prop_t *)0);
1453}
1454
1455/*
1456 * Search for property within devnames structures
1457 */
1458ddi_prop_t *
1459i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags)
1460{
1461	major_t		major;
1462	struct devnames	*dnp;
1463	ddi_prop_t	*propp;
1464
1465	/*
1466	 * Valid dev_t value is needed to index into the
1467	 * correct devnames entry, therefore a dev_t
1468	 * value of DDI_DEV_T_ANY is not appropriate.
1469	 */
1470	ASSERT(dev != DDI_DEV_T_ANY);
1471	if (dev == DDI_DEV_T_ANY) {
1472		return ((ddi_prop_t *)0);
1473	}
1474
1475	major = getmajor(dev);
1476	dnp = &(devnamesp[major]);
1477
1478	if (dnp->dn_global_prop_ptr == NULL)
1479		return ((ddi_prop_t *)0);
1480
1481	LOCK_DEV_OPS(&dnp->dn_lock);
1482
1483	for (propp = dnp->dn_global_prop_ptr->prop_list;
1484	    propp != NULL;
1485	    propp = (ddi_prop_t *)propp->prop_next) {
1486
1487		if (!DDI_STRSAME(propp->prop_name, name))
1488			continue;
1489
1490		if ((!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev))
1491			continue;
1492
1493		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1494			continue;
1495
1496		/* Property found, return it */
1497		UNLOCK_DEV_OPS(&dnp->dn_lock);
1498		return (propp);
1499	}
1500
1501	UNLOCK_DEV_OPS(&dnp->dn_lock);
1502	return ((ddi_prop_t *)0);
1503}
1504
1505static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>";
1506
1507/*
1508 * ddi_prop_search_global:
1509 *	Search the global property list within devnames
1510 *	for the named property.  Return the encoded value.
1511 */
1512static int
1513i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name,
1514    void *valuep, uint_t *lengthp)
1515{
1516	ddi_prop_t	*propp;
1517	caddr_t		buffer;
1518
1519	propp =  i_ddi_search_global_prop(dev, name, flags);
1520
1521	/* Property NOT found, bail */
1522	if (propp == (ddi_prop_t *)0)
1523		return (DDI_PROP_NOT_FOUND);
1524
1525	if (propp->prop_flags & DDI_PROP_UNDEF_IT)
1526		return (DDI_PROP_UNDEFINED);
1527
1528	if ((buffer = kmem_alloc(propp->prop_len,
1529	    (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) {
1530		cmn_err(CE_CONT, prop_no_mem_msg, name);
1531		return (DDI_PROP_NO_MEMORY);
1532	}
1533
1534	/*
1535	 * Return the encoded data
1536	 */
1537	*(caddr_t *)valuep = buffer;
1538	*lengthp = propp->prop_len;
1539	bcopy(propp->prop_val, buffer, propp->prop_len);
1540
1541	return (DDI_PROP_SUCCESS);
1542}
1543
1544/*
1545 * ddi_prop_search_common:	Lookup and return the encoded value
1546 */
1547int
1548ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1549    uint_t flags, char *name, void *valuep, uint_t *lengthp)
1550{
1551	ddi_prop_t	*propp;
1552	int		i;
1553	caddr_t		buffer;
1554	caddr_t		prealloc = NULL;
1555	int		plength = 0;
1556	dev_info_t	*pdip;
1557	int		(*bop)();
1558
1559	/*CONSTANTCONDITION*/
1560	while (1)  {
1561
1562		mutex_enter(&(DEVI(dip)->devi_lock));
1563
1564
1565		/*
1566		 * find the property in child's devinfo:
1567		 * Search order is:
1568		 *	1. driver defined properties
1569		 *	2. system defined properties
1570		 *	3. driver global properties
1571		 *	4. boot defined properties
1572		 */
1573
1574		propp = i_ddi_prop_search(dev, name, flags,
1575		    &(DEVI(dip)->devi_drv_prop_ptr));
1576		if (propp == NULL)  {
1577			propp = i_ddi_prop_search(dev, name, flags,
1578			    &(DEVI(dip)->devi_sys_prop_ptr));
1579		}
1580		if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) {
1581			propp = i_ddi_prop_search(dev, name, flags,
1582			    &DEVI(dip)->devi_global_prop_list->prop_list);
1583		}
1584
1585		if (propp == NULL)  {
1586			propp = i_ddi_prop_search(dev, name, flags,
1587			    &(DEVI(dip)->devi_hw_prop_ptr));
1588		}
1589
1590		/*
1591		 * Software property found?
1592		 */
1593		if (propp != (ddi_prop_t *)0)	{
1594
1595			/*
1596			 * If explicit undefine, return now.
1597			 */
1598			if (propp->prop_flags & DDI_PROP_UNDEF_IT) {
1599				mutex_exit(&(DEVI(dip)->devi_lock));
1600				if (prealloc)
1601					kmem_free(prealloc, plength);
1602				return (DDI_PROP_UNDEFINED);
1603			}
1604
1605			/*
1606			 * If we only want to know if it exists, return now
1607			 */
1608			if (prop_op == PROP_EXISTS) {
1609				mutex_exit(&(DEVI(dip)->devi_lock));
1610				ASSERT(prealloc == NULL);
1611				return (DDI_PROP_SUCCESS);
1612			}
1613
1614			/*
1615			 * If length only request or prop length == 0,
1616			 * service request and return now.
1617			 */
1618			if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) {
1619				*lengthp = propp->prop_len;
1620
1621				/*
1622				 * if prop_op is PROP_LEN_AND_VAL_ALLOC
1623				 * that means prop_len is 0, so set valuep
1624				 * also to NULL
1625				 */
1626				if (prop_op == PROP_LEN_AND_VAL_ALLOC)
1627					*(caddr_t *)valuep = NULL;
1628
1629				mutex_exit(&(DEVI(dip)->devi_lock));
1630				if (prealloc)
1631					kmem_free(prealloc, plength);
1632				return (DDI_PROP_SUCCESS);
1633			}
1634
1635			/*
1636			 * If LEN_AND_VAL_ALLOC and the request can sleep,
1637			 * drop the mutex, allocate the buffer, and go
1638			 * through the loop again.  If we already allocated
1639			 * the buffer, and the size of the property changed,
1640			 * keep trying...
1641			 */
1642			if ((prop_op == PROP_LEN_AND_VAL_ALLOC) &&
1643			    (flags & DDI_PROP_CANSLEEP))  {
1644				if (prealloc && (propp->prop_len != plength)) {
1645					kmem_free(prealloc, plength);
1646					prealloc = NULL;
1647				}
1648				if (prealloc == NULL)  {
1649					plength = propp->prop_len;
1650					mutex_exit(&(DEVI(dip)->devi_lock));
1651					prealloc = kmem_alloc(plength,
1652					    KM_SLEEP);
1653					continue;
1654				}
1655			}
1656
1657			/*
1658			 * Allocate buffer, if required.  Either way,
1659			 * set `buffer' variable.
1660			 */
1661			i = *lengthp;			/* Get callers length */
1662			*lengthp = propp->prop_len;	/* Set callers length */
1663
1664			switch (prop_op) {
1665
1666			case PROP_LEN_AND_VAL_ALLOC:
1667
1668				if (prealloc == NULL) {
1669					buffer = kmem_alloc(propp->prop_len,
1670					    KM_NOSLEEP);
1671				} else {
1672					buffer = prealloc;
1673				}
1674
1675				if (buffer == NULL)  {
1676					mutex_exit(&(DEVI(dip)->devi_lock));
1677					cmn_err(CE_CONT, prop_no_mem_msg, name);
1678					return (DDI_PROP_NO_MEMORY);
1679				}
1680				/* Set callers buf ptr */
1681				*(caddr_t *)valuep = buffer;
1682				break;
1683
1684			case PROP_LEN_AND_VAL_BUF:
1685
1686				if (propp->prop_len > (i)) {
1687					mutex_exit(&(DEVI(dip)->devi_lock));
1688					return (DDI_PROP_BUF_TOO_SMALL);
1689				}
1690
1691				buffer = valuep;  /* Get callers buf ptr */
1692				break;
1693
1694			default:
1695				break;
1696			}
1697
1698			/*
1699			 * Do the copy.
1700			 */
1701			bcopy(propp->prop_val, buffer, propp->prop_len);
1702			mutex_exit(&(DEVI(dip)->devi_lock));
1703			return (DDI_PROP_SUCCESS);
1704		}
1705
1706		mutex_exit(&(DEVI(dip)->devi_lock));
1707		if (prealloc)
1708			kmem_free(prealloc, plength);
1709		prealloc = NULL;
1710
1711		/*
1712		 * Prop not found, call parent bus_ops to deal with possible
1713		 * h/w layer (possible PROM defined props, etc.) and to
1714		 * possibly ascend the hierarchy, if allowed by flags.
1715		 */
1716		pdip = (dev_info_t *)DEVI(dip)->devi_parent;
1717
1718		/*
1719		 * One last call for the root driver PROM props?
1720		 */
1721		if (dip == ddi_root_node())  {
1722			return (ddi_bus_prop_op(dev, dip, dip, prop_op,
1723			    flags, name, valuep, (int *)lengthp));
1724		}
1725
1726		/*
1727		 * We may have been called to check for properties
1728		 * within a single devinfo node that has no parent -
1729		 * see make_prop()
1730		 */
1731		if (pdip == NULL) {
1732			ASSERT((flags &
1733			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) ==
1734			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM));
1735			return (DDI_PROP_NOT_FOUND);
1736		}
1737
1738		/*
1739		 * Instead of recursing, we do iterative calls up the tree.
1740		 * As a bit of optimization, skip the bus_op level if the
1741		 * node is a s/w node and if the parent's bus_prop_op function
1742		 * is `ddi_bus_prop_op', because we know that in this case,
1743		 * this function does nothing.
1744		 *
1745		 * 4225415: If the parent isn't attached, or the child
1746		 * hasn't been named by the parent yet, use the default
1747		 * ddi_bus_prop_op as a proxy for the parent.  This
1748		 * allows property lookups in any child/parent state to
1749		 * include 'prom' and inherited properties, even when
1750		 * there are no drivers attached to the child or parent.
1751		 */
1752
1753		bop = ddi_bus_prop_op;
1754		if (i_ddi_devi_attached(pdip) &&
1755		    (i_ddi_node_state(dip) >= DS_INITIALIZED))
1756			bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op;
1757
1758		i = DDI_PROP_NOT_FOUND;
1759
1760		if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) {
1761			i = (*bop)(dev, pdip, dip, prop_op,
1762			    flags | DDI_PROP_DONTPASS,
1763			    name, valuep, lengthp);
1764		}
1765
1766		if ((flags & DDI_PROP_DONTPASS) ||
1767		    (i != DDI_PROP_NOT_FOUND))
1768			return (i);
1769
1770		dip = pdip;
1771	}
1772	/*NOTREACHED*/
1773}
1774
1775
1776/*
1777 * ddi_prop_op: The basic property operator for drivers.
1778 *
1779 * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1780 *
1781 *	prop_op			valuep
1782 *	------			------
1783 *
1784 *	PROP_LEN		<unused>
1785 *
1786 *	PROP_LEN_AND_VAL_BUF	Pointer to callers buffer
1787 *
1788 *	PROP_LEN_AND_VAL_ALLOC	Address of callers pointer (will be set to
1789 *				address of allocated buffer, if successful)
1790 */
1791int
1792ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1793    char *name, caddr_t valuep, int *lengthp)
1794{
1795	int	i;
1796
1797	ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0);
1798
1799	/*
1800	 * If this was originally an LDI prop lookup then we bail here.
1801	 * The reason is that the LDI property lookup interfaces first call
1802	 * a drivers prop_op() entry point to allow it to override
1803	 * properties.  But if we've made it here, then the driver hasn't
1804	 * overriden any properties.  We don't want to continue with the
1805	 * property search here because we don't have any type inforamtion.
1806	 * When we return failure, the LDI interfaces will then proceed to
1807	 * call the typed property interfaces to look up the property.
1808	 */
1809	if (mod_flags & DDI_PROP_DYNAMIC)
1810		return (DDI_PROP_NOT_FOUND);
1811
1812	/*
1813	 * check for pre-typed property consumer asking for typed property:
1814	 * see e_ddi_getprop_int64.
1815	 */
1816	if (mod_flags & DDI_PROP_CONSUMER_TYPED)
1817		mod_flags |= DDI_PROP_TYPE_INT64;
1818	mod_flags |= DDI_PROP_TYPE_ANY;
1819
1820	i = ddi_prop_search_common(dev, dip, prop_op,
1821	    mod_flags, name, valuep, (uint_t *)lengthp);
1822	if (i == DDI_PROP_FOUND_1275)
1823		return (DDI_PROP_SUCCESS);
1824	return (i);
1825}
1826
1827/*
1828 * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that
1829 * maintain size in number of blksize blocks.  Provides a dynamic property
1830 * implementation for size oriented properties based on nblocks64 and blksize
1831 * values passed in by the driver.  Fallback to ddi_prop_op if the nblocks64
1832 * is too large.  This interface should not be used with a nblocks64 that
1833 * represents the driver's idea of how to represent unknown, if nblocks is
1834 * unknown use ddi_prop_op.
1835 */
1836int
1837ddi_prop_op_nblocks_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1838    int mod_flags, char *name, caddr_t valuep, int *lengthp,
1839    uint64_t nblocks64, uint_t blksize)
1840{
1841	uint64_t size64;
1842	int	blkshift;
1843
1844	/* convert block size to shift value */
1845	ASSERT(BIT_ONLYONESET(blksize));
1846	blkshift = highbit(blksize) - 1;
1847
1848	/*
1849	 * There is no point in supporting nblocks64 values that don't have
1850	 * an accurate uint64_t byte count representation.
1851	 */
1852	if (nblocks64 >= (UINT64_MAX >> blkshift))
1853		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1854		    name, valuep, lengthp));
1855
1856	size64 = nblocks64 << blkshift;
1857	return (ddi_prop_op_size_blksize(dev, dip, prop_op, mod_flags,
1858	    name, valuep, lengthp, size64, blksize));
1859}
1860
1861/*
1862 * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize.
1863 */
1864int
1865ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1866    int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64)
1867{
1868	return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op,
1869	    mod_flags, name, valuep, lengthp, nblocks64, DEV_BSIZE));
1870}
1871
1872/*
1873 * ddi_prop_op_size_blksize: The basic property operator for block drivers that
1874 * maintain size in bytes. Provides a of dynamic property implementation for
1875 * size oriented properties based on size64 value and blksize passed in by the
1876 * driver.  Fallback to ddi_prop_op if the size64 is too large. This interface
1877 * should not be used with a size64 that represents the driver's idea of how
1878 * to represent unknown, if size is unknown use ddi_prop_op.
1879 *
1880 * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1881 * integers. While the most likely interface to request them ([bc]devi_size)
1882 * is declared int (signed) there is no enforcement of this, which means we
1883 * can't enforce limitations here without risking regression.
1884 */
1885int
1886ddi_prop_op_size_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1887    int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64,
1888    uint_t blksize)
1889{
1890	uint64_t nblocks64;
1891	int	callers_length;
1892	caddr_t	buffer;
1893	int	blkshift;
1894
1895	/*
1896	 * This is a kludge to support capture of size(9P) pure dynamic
1897	 * properties in snapshots for non-cmlb code (without exposing
1898	 * i_ddi_prop_dyn changes). When everyone uses cmlb, this code
1899	 * should be removed.
1900	 */
1901	if (i_ddi_prop_dyn_driver_get(dip) == NULL) {
1902		static i_ddi_prop_dyn_t prop_dyn_size[] = {
1903		    {"Size",		DDI_PROP_TYPE_INT64,	S_IFCHR},
1904		    {"Nblocks",		DDI_PROP_TYPE_INT64,	S_IFBLK},
1905		    {NULL}
1906		};
1907		i_ddi_prop_dyn_driver_set(dip, prop_dyn_size);
1908	}
1909
1910	/* convert block size to shift value */
1911	ASSERT(BIT_ONLYONESET(blksize));
1912	blkshift = highbit(blksize) - 1;
1913
1914	/* compute DEV_BSIZE nblocks value */
1915	nblocks64 = size64 >> blkshift;
1916
1917	/* get callers length, establish length of our dynamic properties */
1918	callers_length = *lengthp;
1919
1920	if (strcmp(name, "Nblocks") == 0)
1921		*lengthp = sizeof (uint64_t);
1922	else if (strcmp(name, "Size") == 0)
1923		*lengthp = sizeof (uint64_t);
1924	else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX))
1925		*lengthp = sizeof (uint32_t);
1926	else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX))
1927		*lengthp = sizeof (uint32_t);
1928	else if ((strcmp(name, "blksize") == 0) && (blksize < UINT_MAX))
1929		*lengthp = sizeof (uint32_t);
1930	else {
1931		/* fallback to ddi_prop_op */
1932		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1933		    name, valuep, lengthp));
1934	}
1935
1936	/* service request for the length of the property */
1937	if (prop_op == PROP_LEN)
1938		return (DDI_PROP_SUCCESS);
1939
1940	switch (prop_op) {
1941	case PROP_LEN_AND_VAL_ALLOC:
1942		if ((buffer = kmem_alloc(*lengthp,
1943		    (mod_flags & DDI_PROP_CANSLEEP) ?
1944		    KM_SLEEP : KM_NOSLEEP)) == NULL)
1945			return (DDI_PROP_NO_MEMORY);
1946
1947		*(caddr_t *)valuep = buffer;	/* set callers buf ptr */
1948		break;
1949
1950	case PROP_LEN_AND_VAL_BUF:
1951		/* the length of the property and the request must match */
1952		if (callers_length != *lengthp)
1953			return (DDI_PROP_INVAL_ARG);
1954
1955		buffer = valuep;		/* get callers buf ptr */
1956		break;
1957
1958	default:
1959		return (DDI_PROP_INVAL_ARG);
1960	}
1961
1962	/* transfer the value into the buffer */
1963	if (strcmp(name, "Nblocks") == 0)
1964		*((uint64_t *)buffer) = nblocks64;
1965	else if (strcmp(name, "Size") == 0)
1966		*((uint64_t *)buffer) = size64;
1967	else if (strcmp(name, "nblocks") == 0)
1968		*((uint32_t *)buffer) = (uint32_t)nblocks64;
1969	else if (strcmp(name, "size") == 0)
1970		*((uint32_t *)buffer) = (uint32_t)size64;
1971	else if (strcmp(name, "blksize") == 0)
1972		*((uint32_t *)buffer) = (uint32_t)blksize;
1973	return (DDI_PROP_SUCCESS);
1974}
1975
1976/*
1977 * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size.
1978 */
1979int
1980ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1981    int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64)
1982{
1983	return (ddi_prop_op_size_blksize(dev, dip, prop_op,
1984	    mod_flags, name, valuep, lengthp, size64, DEV_BSIZE));
1985}
1986
1987/*
1988 * Variable length props...
1989 */
1990
1991/*
1992 * ddi_getlongprop:	Get variable length property len+val into a buffer
1993 *		allocated by property provider via kmem_alloc. Requester
1994 *		is responsible for freeing returned property via kmem_free.
1995 *
1996 *	Arguments:
1997 *
1998 *	dev_t:	Input:	dev_t of property.
1999 *	dip:	Input:	dev_info_t pointer of child.
2000 *	flags:	Input:	Possible flag modifiers are:
2001 *		DDI_PROP_DONTPASS:	Don't pass to parent if prop not found.
2002 *		DDI_PROP_CANSLEEP:	Memory allocation may sleep.
2003 *	name:	Input:	name of property.
2004 *	valuep:	Output:	Addr of callers buffer pointer.
2005 *	lengthp:Output:	*lengthp will contain prop length on exit.
2006 *
2007 *	Possible Returns:
2008 *
2009 *		DDI_PROP_SUCCESS:	Prop found and returned.
2010 *		DDI_PROP_NOT_FOUND:	Prop not found
2011 *		DDI_PROP_UNDEFINED:	Prop explicitly undefined.
2012 *		DDI_PROP_NO_MEMORY:	Prop found, but unable to alloc mem.
2013 */
2014
2015int
2016ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags,
2017    char *name, caddr_t valuep, int *lengthp)
2018{
2019	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC,
2020	    flags, name, valuep, lengthp));
2021}
2022
2023/*
2024 *
2025 * ddi_getlongprop_buf:		Get long prop into pre-allocated callers
2026 *				buffer. (no memory allocation by provider).
2027 *
2028 *	dev_t:	Input:	dev_t of property.
2029 *	dip:	Input:	dev_info_t pointer of child.
2030 *	flags:	Input:	DDI_PROP_DONTPASS or NULL
2031 *	name:	Input:	name of property
2032 *	valuep:	Input:	ptr to callers buffer.
2033 *	lengthp:I/O:	ptr to length of callers buffer on entry,
2034 *			actual length of property on exit.
2035 *
2036 *	Possible returns:
2037 *
2038 *		DDI_PROP_SUCCESS	Prop found and returned
2039 *		DDI_PROP_NOT_FOUND	Prop not found
2040 *		DDI_PROP_UNDEFINED	Prop explicitly undefined.
2041 *		DDI_PROP_BUF_TOO_SMALL	Prop found, callers buf too small,
2042 *					no value returned, but actual prop
2043 *					length returned in *lengthp
2044 *
2045 */
2046
2047int
2048ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags,
2049    char *name, caddr_t valuep, int *lengthp)
2050{
2051	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2052	    flags, name, valuep, lengthp));
2053}
2054
2055/*
2056 * Integer/boolean sized props.
2057 *
2058 * Call is value only... returns found boolean or int sized prop value or
2059 * defvalue if prop not found or is wrong length or is explicitly undefined.
2060 * Only flag is DDI_PROP_DONTPASS...
2061 *
2062 * By convention, this interface returns boolean (0) sized properties
2063 * as value (int)1.
2064 *
2065 * This never returns an error, if property not found or specifically
2066 * undefined, the input `defvalue' is returned.
2067 */
2068
2069int
2070ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue)
2071{
2072	int	propvalue = defvalue;
2073	int	proplength = sizeof (int);
2074	int	error;
2075
2076	error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2077	    flags, name, (caddr_t)&propvalue, &proplength);
2078
2079	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
2080		propvalue = 1;
2081
2082	return (propvalue);
2083}
2084
2085/*
2086 * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
2087 * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
2088 */
2089
2090int
2091ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp)
2092{
2093	return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp));
2094}
2095
2096/*
2097 * Allocate a struct prop_driver_data, along with 'size' bytes
2098 * for decoded property data.  This structure is freed by
2099 * calling ddi_prop_free(9F).
2100 */
2101static void *
2102ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *))
2103{
2104	struct prop_driver_data *pdd;
2105
2106	/*
2107	 * Allocate a structure with enough memory to store the decoded data.
2108	 */
2109	pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP);
2110	pdd->pdd_size = (sizeof (struct prop_driver_data) + size);
2111	pdd->pdd_prop_free = prop_free;
2112
2113	/*
2114	 * Return a pointer to the location to put the decoded data.
2115	 */
2116	return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data)));
2117}
2118
2119/*
2120 * Allocated the memory needed to store the encoded data in the property
2121 * handle.
2122 */
2123static int
2124ddi_prop_encode_alloc(prop_handle_t *ph, size_t size)
2125{
2126	/*
2127	 * If size is zero, then set data to NULL and size to 0.  This
2128	 * is a boolean property.
2129	 */
2130	if (size == 0) {
2131		ph->ph_size = 0;
2132		ph->ph_data = NULL;
2133		ph->ph_cur_pos = NULL;
2134		ph->ph_save_pos = NULL;
2135	} else {
2136		if (ph->ph_flags == DDI_PROP_DONTSLEEP) {
2137			ph->ph_data = kmem_zalloc(size, KM_NOSLEEP);
2138			if (ph->ph_data == NULL)
2139				return (DDI_PROP_NO_MEMORY);
2140		} else
2141			ph->ph_data = kmem_zalloc(size, KM_SLEEP);
2142		ph->ph_size = size;
2143		ph->ph_cur_pos = ph->ph_data;
2144		ph->ph_save_pos = ph->ph_data;
2145	}
2146	return (DDI_PROP_SUCCESS);
2147}
2148
2149/*
2150 * Free the space allocated by the lookup routines.  Each lookup routine
2151 * returns a pointer to the decoded data to the driver.  The driver then
2152 * passes this pointer back to us.  This data actually lives in a struct
2153 * prop_driver_data.  We use negative indexing to find the beginning of
2154 * the structure and then free the entire structure using the size and
2155 * the free routine stored in the structure.
2156 */
2157void
2158ddi_prop_free(void *datap)
2159{
2160	struct prop_driver_data *pdd;
2161
2162	/*
2163	 * Get the structure
2164	 */
2165	pdd = (struct prop_driver_data *)
2166	    ((caddr_t)datap - sizeof (struct prop_driver_data));
2167	/*
2168	 * Call the free routine to free it
2169	 */
2170	(*pdd->pdd_prop_free)(pdd);
2171}
2172
2173/*
2174 * Free the data associated with an array of ints,
2175 * allocated with ddi_prop_decode_alloc().
2176 */
2177static void
2178ddi_prop_free_ints(struct prop_driver_data *pdd)
2179{
2180	kmem_free(pdd, pdd->pdd_size);
2181}
2182
2183/*
2184 * Free a single string property or a single string contained within
2185 * the argv style return value of an array of strings.
2186 */
2187static void
2188ddi_prop_free_string(struct prop_driver_data *pdd)
2189{
2190	kmem_free(pdd, pdd->pdd_size);
2191
2192}
2193
2194/*
2195 * Free an array of strings.
2196 */
2197static void
2198ddi_prop_free_strings(struct prop_driver_data *pdd)
2199{
2200	kmem_free(pdd, pdd->pdd_size);
2201}
2202
2203/*
2204 * Free the data associated with an array of bytes.
2205 */
2206static void
2207ddi_prop_free_bytes(struct prop_driver_data *pdd)
2208{
2209	kmem_free(pdd, pdd->pdd_size);
2210}
2211
2212/*
2213 * Reset the current location pointer in the property handle to the
2214 * beginning of the data.
2215 */
2216void
2217ddi_prop_reset_pos(prop_handle_t *ph)
2218{
2219	ph->ph_cur_pos = ph->ph_data;
2220	ph->ph_save_pos = ph->ph_data;
2221}
2222
2223/*
2224 * Restore the current location pointer in the property handle to the
2225 * saved position.
2226 */
2227void
2228ddi_prop_save_pos(prop_handle_t *ph)
2229{
2230	ph->ph_save_pos = ph->ph_cur_pos;
2231}
2232
2233/*
2234 * Save the location that the current location pointer is pointing to..
2235 */
2236void
2237ddi_prop_restore_pos(prop_handle_t *ph)
2238{
2239	ph->ph_cur_pos = ph->ph_save_pos;
2240}
2241
2242/*
2243 * Property encode/decode functions
2244 */
2245
2246/*
2247 * Decode a single integer property
2248 */
2249static int
2250ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements)
2251{
2252	int	i;
2253	int	tmp;
2254
2255	/*
2256	 * If there is nothing to decode return an error
2257	 */
2258	if (ph->ph_size == 0)
2259		return (DDI_PROP_END_OF_DATA);
2260
2261	/*
2262	 * Decode the property as a single integer and return it
2263	 * in data if we were able to decode it.
2264	 */
2265	i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp);
2266	if (i < DDI_PROP_RESULT_OK) {
2267		switch (i) {
2268		case DDI_PROP_RESULT_EOF:
2269			return (DDI_PROP_END_OF_DATA);
2270
2271		case DDI_PROP_RESULT_ERROR:
2272			return (DDI_PROP_CANNOT_DECODE);
2273		}
2274	}
2275
2276	*(int *)data = tmp;
2277	*nelements = 1;
2278	return (DDI_PROP_SUCCESS);
2279}
2280
2281/*
2282 * Decode a single 64 bit integer property
2283 */
2284static int
2285ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements)
2286{
2287	int	i;
2288	int64_t	tmp;
2289
2290	/*
2291	 * If there is nothing to decode return an error
2292	 */
2293	if (ph->ph_size == 0)
2294		return (DDI_PROP_END_OF_DATA);
2295
2296	/*
2297	 * Decode the property as a single integer and return it
2298	 * in data if we were able to decode it.
2299	 */
2300	i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp);
2301	if (i < DDI_PROP_RESULT_OK) {
2302		switch (i) {
2303		case DDI_PROP_RESULT_EOF:
2304			return (DDI_PROP_END_OF_DATA);
2305
2306		case DDI_PROP_RESULT_ERROR:
2307			return (DDI_PROP_CANNOT_DECODE);
2308		}
2309	}
2310
2311	*(int64_t *)data = tmp;
2312	*nelements = 1;
2313	return (DDI_PROP_SUCCESS);
2314}
2315
2316/*
2317 * Decode an array of integers property
2318 */
2319static int
2320ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements)
2321{
2322	int	i;
2323	int	cnt = 0;
2324	int	*tmp;
2325	int	*intp;
2326	int	n;
2327
2328	/*
2329	 * Figure out how many array elements there are by going through the
2330	 * data without decoding it first and counting.
2331	 */
2332	for (;;) {
2333		i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL);
2334		if (i < 0)
2335			break;
2336		cnt++;
2337	}
2338
2339	/*
2340	 * If there are no elements return an error
2341	 */
2342	if (cnt == 0)
2343		return (DDI_PROP_END_OF_DATA);
2344
2345	/*
2346	 * If we cannot skip through the data, we cannot decode it
2347	 */
2348	if (i == DDI_PROP_RESULT_ERROR)
2349		return (DDI_PROP_CANNOT_DECODE);
2350
2351	/*
2352	 * Reset the data pointer to the beginning of the encoded data
2353	 */
2354	ddi_prop_reset_pos(ph);
2355
2356	/*
2357	 * Allocated memory to store the decoded value in.
2358	 */
2359	intp = ddi_prop_decode_alloc((cnt * sizeof (int)),
2360	    ddi_prop_free_ints);
2361
2362	/*
2363	 * Decode each element and place it in the space we just allocated
2364	 */
2365	tmp = intp;
2366	for (n = 0; n < cnt; n++, tmp++) {
2367		i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp);
2368		if (i < DDI_PROP_RESULT_OK) {
2369			/*
2370			 * Free the space we just allocated
2371			 * and return an error.
2372			 */
2373			ddi_prop_free(intp);
2374			switch (i) {
2375			case DDI_PROP_RESULT_EOF:
2376				return (DDI_PROP_END_OF_DATA);
2377
2378			case DDI_PROP_RESULT_ERROR:
2379				return (DDI_PROP_CANNOT_DECODE);
2380			}
2381		}
2382	}
2383
2384	*nelements = cnt;
2385	*(int **)data = intp;
2386
2387	return (DDI_PROP_SUCCESS);
2388}
2389
2390/*
2391 * Decode a 64 bit integer array property
2392 */
2393static int
2394ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements)
2395{
2396	int	i;
2397	int	n;
2398	int	cnt = 0;
2399	int64_t	*tmp;
2400	int64_t	*intp;
2401
2402	/*
2403	 * Count the number of array elements by going
2404	 * through the data without decoding it.
2405	 */
2406	for (;;) {
2407		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL);
2408		if (i < 0)
2409			break;
2410		cnt++;
2411	}
2412
2413	/*
2414	 * If there are no elements return an error
2415	 */
2416	if (cnt == 0)
2417		return (DDI_PROP_END_OF_DATA);
2418
2419	/*
2420	 * If we cannot skip through the data, we cannot decode it
2421	 */
2422	if (i == DDI_PROP_RESULT_ERROR)
2423		return (DDI_PROP_CANNOT_DECODE);
2424
2425	/*
2426	 * Reset the data pointer to the beginning of the encoded data
2427	 */
2428	ddi_prop_reset_pos(ph);
2429
2430	/*
2431	 * Allocate memory to store the decoded value.
2432	 */
2433	intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)),
2434	    ddi_prop_free_ints);
2435
2436	/*
2437	 * Decode each element and place it in the space allocated
2438	 */
2439	tmp = intp;
2440	for (n = 0; n < cnt; n++, tmp++) {
2441		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp);
2442		if (i < DDI_PROP_RESULT_OK) {
2443			/*
2444			 * Free the space we just allocated
2445			 * and return an error.
2446			 */
2447			ddi_prop_free(intp);
2448			switch (i) {
2449			case DDI_PROP_RESULT_EOF:
2450				return (DDI_PROP_END_OF_DATA);
2451
2452			case DDI_PROP_RESULT_ERROR:
2453				return (DDI_PROP_CANNOT_DECODE);
2454			}
2455		}
2456	}
2457
2458	*nelements = cnt;
2459	*(int64_t **)data = intp;
2460
2461	return (DDI_PROP_SUCCESS);
2462}
2463
2464/*
2465 * Encode an array of integers property (Can be one element)
2466 */
2467int
2468ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements)
2469{
2470	int	i;
2471	int	*tmp;
2472	int	cnt;
2473	int	size;
2474
2475	/*
2476	 * If there is no data, we cannot do anything
2477	 */
2478	if (nelements == 0)
2479		return (DDI_PROP_CANNOT_ENCODE);
2480
2481	/*
2482	 * Get the size of an encoded int.
2483	 */
2484	size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2485
2486	if (size < DDI_PROP_RESULT_OK) {
2487		switch (size) {
2488		case DDI_PROP_RESULT_EOF:
2489			return (DDI_PROP_END_OF_DATA);
2490
2491		case DDI_PROP_RESULT_ERROR:
2492			return (DDI_PROP_CANNOT_ENCODE);
2493		}
2494	}
2495
2496	/*
2497	 * Allocate space in the handle to store the encoded int.
2498	 */
2499	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2500	    DDI_PROP_SUCCESS)
2501		return (DDI_PROP_NO_MEMORY);
2502
2503	/*
2504	 * Encode the array of ints.
2505	 */
2506	tmp = (int *)data;
2507	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2508		i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp);
2509		if (i < DDI_PROP_RESULT_OK) {
2510			switch (i) {
2511			case DDI_PROP_RESULT_EOF:
2512				return (DDI_PROP_END_OF_DATA);
2513
2514			case DDI_PROP_RESULT_ERROR:
2515				return (DDI_PROP_CANNOT_ENCODE);
2516			}
2517		}
2518	}
2519
2520	return (DDI_PROP_SUCCESS);
2521}
2522
2523
2524/*
2525 * Encode a 64 bit integer array property
2526 */
2527int
2528ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements)
2529{
2530	int i;
2531	int cnt;
2532	int size;
2533	int64_t *tmp;
2534
2535	/*
2536	 * If there is no data, we cannot do anything
2537	 */
2538	if (nelements == 0)
2539		return (DDI_PROP_CANNOT_ENCODE);
2540
2541	/*
2542	 * Get the size of an encoded 64 bit int.
2543	 */
2544	size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2545
2546	if (size < DDI_PROP_RESULT_OK) {
2547		switch (size) {
2548		case DDI_PROP_RESULT_EOF:
2549			return (DDI_PROP_END_OF_DATA);
2550
2551		case DDI_PROP_RESULT_ERROR:
2552			return (DDI_PROP_CANNOT_ENCODE);
2553		}
2554	}
2555
2556	/*
2557	 * Allocate space in the handle to store the encoded int.
2558	 */
2559	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2560	    DDI_PROP_SUCCESS)
2561		return (DDI_PROP_NO_MEMORY);
2562
2563	/*
2564	 * Encode the array of ints.
2565	 */
2566	tmp = (int64_t *)data;
2567	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2568		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp);
2569		if (i < DDI_PROP_RESULT_OK) {
2570			switch (i) {
2571			case DDI_PROP_RESULT_EOF:
2572				return (DDI_PROP_END_OF_DATA);
2573
2574			case DDI_PROP_RESULT_ERROR:
2575				return (DDI_PROP_CANNOT_ENCODE);
2576			}
2577		}
2578	}
2579
2580	return (DDI_PROP_SUCCESS);
2581}
2582
2583/*
2584 * Decode a single string property
2585 */
2586static int
2587ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements)
2588{
2589	char		*tmp;
2590	char		*str;
2591	int		i;
2592	int		size;
2593
2594	/*
2595	 * If there is nothing to decode return an error
2596	 */
2597	if (ph->ph_size == 0)
2598		return (DDI_PROP_END_OF_DATA);
2599
2600	/*
2601	 * Get the decoded size of the encoded string.
2602	 */
2603	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2604	if (size < DDI_PROP_RESULT_OK) {
2605		switch (size) {
2606		case DDI_PROP_RESULT_EOF:
2607			return (DDI_PROP_END_OF_DATA);
2608
2609		case DDI_PROP_RESULT_ERROR:
2610			return (DDI_PROP_CANNOT_DECODE);
2611		}
2612	}
2613
2614	/*
2615	 * Allocated memory to store the decoded value in.
2616	 */
2617	str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string);
2618
2619	ddi_prop_reset_pos(ph);
2620
2621	/*
2622	 * Decode the str and place it in the space we just allocated
2623	 */
2624	tmp = str;
2625	i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp);
2626	if (i < DDI_PROP_RESULT_OK) {
2627		/*
2628		 * Free the space we just allocated
2629		 * and return an error.
2630		 */
2631		ddi_prop_free(str);
2632		switch (i) {
2633		case DDI_PROP_RESULT_EOF:
2634			return (DDI_PROP_END_OF_DATA);
2635
2636		case DDI_PROP_RESULT_ERROR:
2637			return (DDI_PROP_CANNOT_DECODE);
2638		}
2639	}
2640
2641	*(char **)data = str;
2642	*nelements = 1;
2643
2644	return (DDI_PROP_SUCCESS);
2645}
2646
2647/*
2648 * Decode an array of strings.
2649 */
2650int
2651ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements)
2652{
2653	int		cnt = 0;
2654	char		**strs;
2655	char		**tmp;
2656	char		*ptr;
2657	int		i;
2658	int		n;
2659	int		size;
2660	size_t		nbytes;
2661
2662	/*
2663	 * Figure out how many array elements there are by going through the
2664	 * data without decoding it first and counting.
2665	 */
2666	for (;;) {
2667		i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL);
2668		if (i < 0)
2669			break;
2670		cnt++;
2671	}
2672
2673	/*
2674	 * If there are no elements return an error
2675	 */
2676	if (cnt == 0)
2677		return (DDI_PROP_END_OF_DATA);
2678
2679	/*
2680	 * If we cannot skip through the data, we cannot decode it
2681	 */
2682	if (i == DDI_PROP_RESULT_ERROR)
2683		return (DDI_PROP_CANNOT_DECODE);
2684
2685	/*
2686	 * Reset the data pointer to the beginning of the encoded data
2687	 */
2688	ddi_prop_reset_pos(ph);
2689
2690	/*
2691	 * Figure out how much memory we need for the sum total
2692	 */
2693	nbytes = (cnt + 1) * sizeof (char *);
2694
2695	for (n = 0; n < cnt; n++) {
2696		/*
2697		 * Get the decoded size of the current encoded string.
2698		 */
2699		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2700		if (size < DDI_PROP_RESULT_OK) {
2701			switch (size) {
2702			case DDI_PROP_RESULT_EOF:
2703				return (DDI_PROP_END_OF_DATA);
2704
2705			case DDI_PROP_RESULT_ERROR:
2706				return (DDI_PROP_CANNOT_DECODE);
2707			}
2708		}
2709
2710		nbytes += size;
2711	}
2712
2713	/*
2714	 * Allocate memory in which to store the decoded strings.
2715	 */
2716	strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings);
2717
2718	/*
2719	 * Set up pointers for each string by figuring out yet
2720	 * again how long each string is.
2721	 */
2722	ddi_prop_reset_pos(ph);
2723	ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *));
2724	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2725		/*
2726		 * Get the decoded size of the current encoded string.
2727		 */
2728		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2729		if (size < DDI_PROP_RESULT_OK) {
2730			ddi_prop_free(strs);
2731			switch (size) {
2732			case DDI_PROP_RESULT_EOF:
2733				return (DDI_PROP_END_OF_DATA);
2734
2735			case DDI_PROP_RESULT_ERROR:
2736				return (DDI_PROP_CANNOT_DECODE);
2737			}
2738		}
2739
2740		*tmp = ptr;
2741		ptr += size;
2742	}
2743
2744	/*
2745	 * String array is terminated by a NULL
2746	 */
2747	*tmp = NULL;
2748
2749	/*
2750	 * Finally, we can decode each string
2751	 */
2752	ddi_prop_reset_pos(ph);
2753	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2754		i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp);
2755		if (i < DDI_PROP_RESULT_OK) {
2756			/*
2757			 * Free the space we just allocated
2758			 * and return an error
2759			 */
2760			ddi_prop_free(strs);
2761			switch (i) {
2762			case DDI_PROP_RESULT_EOF:
2763				return (DDI_PROP_END_OF_DATA);
2764
2765			case DDI_PROP_RESULT_ERROR:
2766				return (DDI_PROP_CANNOT_DECODE);
2767			}
2768		}
2769	}
2770
2771	*(char ***)data = strs;
2772	*nelements = cnt;
2773
2774	return (DDI_PROP_SUCCESS);
2775}
2776
2777/*
2778 * Encode a string.
2779 */
2780int
2781ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements)
2782{
2783	char		**tmp;
2784	int		size;
2785	int		i;
2786
2787	/*
2788	 * If there is no data, we cannot do anything
2789	 */
2790	if (nelements == 0)
2791		return (DDI_PROP_CANNOT_ENCODE);
2792
2793	/*
2794	 * Get the size of the encoded string.
2795	 */
2796	tmp = (char **)data;
2797	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2798	if (size < DDI_PROP_RESULT_OK) {
2799		switch (size) {
2800		case DDI_PROP_RESULT_EOF:
2801			return (DDI_PROP_END_OF_DATA);
2802
2803		case DDI_PROP_RESULT_ERROR:
2804			return (DDI_PROP_CANNOT_ENCODE);
2805		}
2806	}
2807
2808	/*
2809	 * Allocate space in the handle to store the encoded string.
2810	 */
2811	if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS)
2812		return (DDI_PROP_NO_MEMORY);
2813
2814	ddi_prop_reset_pos(ph);
2815
2816	/*
2817	 * Encode the string.
2818	 */
2819	tmp = (char **)data;
2820	i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2821	if (i < DDI_PROP_RESULT_OK) {
2822		switch (i) {
2823		case DDI_PROP_RESULT_EOF:
2824			return (DDI_PROP_END_OF_DATA);
2825
2826		case DDI_PROP_RESULT_ERROR:
2827			return (DDI_PROP_CANNOT_ENCODE);
2828		}
2829	}
2830
2831	return (DDI_PROP_SUCCESS);
2832}
2833
2834
2835/*
2836 * Encode an array of strings.
2837 */
2838int
2839ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements)
2840{
2841	int		cnt = 0;
2842	char		**tmp;
2843	int		size;
2844	uint_t		total_size;
2845	int		i;
2846
2847	/*
2848	 * If there is no data, we cannot do anything
2849	 */
2850	if (nelements == 0)
2851		return (DDI_PROP_CANNOT_ENCODE);
2852
2853	/*
2854	 * Get the total size required to encode all the strings.
2855	 */
2856	total_size = 0;
2857	tmp = (char **)data;
2858	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2859		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2860		if (size < DDI_PROP_RESULT_OK) {
2861			switch (size) {
2862			case DDI_PROP_RESULT_EOF:
2863				return (DDI_PROP_END_OF_DATA);
2864
2865			case DDI_PROP_RESULT_ERROR:
2866				return (DDI_PROP_CANNOT_ENCODE);
2867			}
2868		}
2869		total_size += (uint_t)size;
2870	}
2871
2872	/*
2873	 * Allocate space in the handle to store the encoded strings.
2874	 */
2875	if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS)
2876		return (DDI_PROP_NO_MEMORY);
2877
2878	ddi_prop_reset_pos(ph);
2879
2880	/*
2881	 * Encode the array of strings.
2882	 */
2883	tmp = (char **)data;
2884	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2885		i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2886		if (i < DDI_PROP_RESULT_OK) {
2887			switch (i) {
2888			case DDI_PROP_RESULT_EOF:
2889				return (DDI_PROP_END_OF_DATA);
2890
2891			case DDI_PROP_RESULT_ERROR:
2892				return (DDI_PROP_CANNOT_ENCODE);
2893			}
2894		}
2895	}
2896
2897	return (DDI_PROP_SUCCESS);
2898}
2899
2900
2901/*
2902 * Decode an array of bytes.
2903 */
2904static int
2905ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements)
2906{
2907	uchar_t		*tmp;
2908	int		nbytes;
2909	int		i;
2910
2911	/*
2912	 * If there are no elements return an error
2913	 */
2914	if (ph->ph_size == 0)
2915		return (DDI_PROP_END_OF_DATA);
2916
2917	/*
2918	 * Get the size of the encoded array of bytes.
2919	 */
2920	nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE,
2921	    data, ph->ph_size);
2922	if (nbytes < DDI_PROP_RESULT_OK) {
2923		switch (nbytes) {
2924		case DDI_PROP_RESULT_EOF:
2925			return (DDI_PROP_END_OF_DATA);
2926
2927		case DDI_PROP_RESULT_ERROR:
2928			return (DDI_PROP_CANNOT_DECODE);
2929		}
2930	}
2931
2932	/*
2933	 * Allocated memory to store the decoded value in.
2934	 */
2935	tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes);
2936
2937	/*
2938	 * Decode each element and place it in the space we just allocated
2939	 */
2940	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes);
2941	if (i < DDI_PROP_RESULT_OK) {
2942		/*
2943		 * Free the space we just allocated
2944		 * and return an error
2945		 */
2946		ddi_prop_free(tmp);
2947		switch (i) {
2948		case DDI_PROP_RESULT_EOF:
2949			return (DDI_PROP_END_OF_DATA);
2950
2951		case DDI_PROP_RESULT_ERROR:
2952			return (DDI_PROP_CANNOT_DECODE);
2953		}
2954	}
2955
2956	*(uchar_t **)data = tmp;
2957	*nelements = nbytes;
2958
2959	return (DDI_PROP_SUCCESS);
2960}
2961
2962/*
2963 * Encode an array of bytes.
2964 */
2965int
2966ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements)
2967{
2968	int		size;
2969	int		i;
2970
2971	/*
2972	 * If there are no elements, then this is a boolean property,
2973	 * so just create a property handle with no data and return.
2974	 */
2975	if (nelements == 0) {
2976		(void) ddi_prop_encode_alloc(ph, 0);
2977		return (DDI_PROP_SUCCESS);
2978	}
2979
2980	/*
2981	 * Get the size of the encoded array of bytes.
2982	 */
2983	size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data,
2984	    nelements);
2985	if (size < DDI_PROP_RESULT_OK) {
2986		switch (size) {
2987		case DDI_PROP_RESULT_EOF:
2988			return (DDI_PROP_END_OF_DATA);
2989
2990		case DDI_PROP_RESULT_ERROR:
2991			return (DDI_PROP_CANNOT_DECODE);
2992		}
2993	}
2994
2995	/*
2996	 * Allocate space in the handle to store the encoded bytes.
2997	 */
2998	if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS)
2999		return (DDI_PROP_NO_MEMORY);
3000
3001	/*
3002	 * Encode the array of bytes.
3003	 */
3004	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data,
3005	    nelements);
3006	if (i < DDI_PROP_RESULT_OK) {
3007		switch (i) {
3008		case DDI_PROP_RESULT_EOF:
3009			return (DDI_PROP_END_OF_DATA);
3010
3011		case DDI_PROP_RESULT_ERROR:
3012			return (DDI_PROP_CANNOT_ENCODE);
3013		}
3014	}
3015
3016	return (DDI_PROP_SUCCESS);
3017}
3018
3019/*
3020 * OBP 1275 integer, string and byte operators.
3021 *
3022 * DDI_PROP_CMD_DECODE:
3023 *
3024 *	DDI_PROP_RESULT_ERROR:		cannot decode the data
3025 *	DDI_PROP_RESULT_EOF:		end of data
3026 *	DDI_PROP_OK:			data was decoded
3027 *
3028 * DDI_PROP_CMD_ENCODE:
3029 *
3030 *	DDI_PROP_RESULT_ERROR:		cannot encode the data
3031 *	DDI_PROP_RESULT_EOF:		end of data
3032 *	DDI_PROP_OK:			data was encoded
3033 *
3034 * DDI_PROP_CMD_SKIP:
3035 *
3036 *	DDI_PROP_RESULT_ERROR:		cannot skip the data
3037 *	DDI_PROP_RESULT_EOF:		end of data
3038 *	DDI_PROP_OK:			data was skipped
3039 *
3040 * DDI_PROP_CMD_GET_ESIZE:
3041 *
3042 *	DDI_PROP_RESULT_ERROR:		cannot get encoded size
3043 *	DDI_PROP_RESULT_EOF:		end of data
3044 *	> 0:				the encoded size
3045 *
3046 * DDI_PROP_CMD_GET_DSIZE:
3047 *
3048 *	DDI_PROP_RESULT_ERROR:		cannot get decoded size
3049 *	DDI_PROP_RESULT_EOF:		end of data
3050 *	> 0:				the decoded size
3051 */
3052
3053/*
3054 * OBP 1275 integer operator
3055 *
3056 * OBP properties are a byte stream of data, so integers may not be
3057 * properly aligned.  Therefore we need to copy them one byte at a time.
3058 */
3059int
3060ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data)
3061{
3062	int	i;
3063
3064	switch (cmd) {
3065	case DDI_PROP_CMD_DECODE:
3066		/*
3067		 * Check that there is encoded data
3068		 */
3069		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
3070			return (DDI_PROP_RESULT_ERROR);
3071		if (ph->ph_flags & PH_FROM_PROM) {
3072			i = MIN(ph->ph_size, PROP_1275_INT_SIZE);
3073			if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3074			    ph->ph_size - i))
3075				return (DDI_PROP_RESULT_ERROR);
3076		} else {
3077			if (ph->ph_size < sizeof (int) ||
3078			    ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3079			    ph->ph_size - sizeof (int))))
3080				return (DDI_PROP_RESULT_ERROR);
3081		}
3082
3083		/*
3084		 * Copy the integer, using the implementation-specific
3085		 * copy function if the property is coming from the PROM.
3086		 */
3087		if (ph->ph_flags & PH_FROM_PROM) {
3088			*data = impl_ddi_prop_int_from_prom(
3089			    (uchar_t *)ph->ph_cur_pos,
3090			    (ph->ph_size < PROP_1275_INT_SIZE) ?
3091			    ph->ph_size : PROP_1275_INT_SIZE);
3092		} else {
3093			bcopy(ph->ph_cur_pos, data, sizeof (int));
3094		}
3095
3096		/*
3097		 * Move the current location to the start of the next
3098		 * bit of undecoded data.
3099		 */
3100		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3101		    PROP_1275_INT_SIZE;
3102		return (DDI_PROP_RESULT_OK);
3103
3104	case DDI_PROP_CMD_ENCODE:
3105		/*
3106		 * Check that there is room to encoded the data
3107		 */
3108		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3109		    ph->ph_size < PROP_1275_INT_SIZE ||
3110		    ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3111		    ph->ph_size - sizeof (int))))
3112			return (DDI_PROP_RESULT_ERROR);
3113
3114		/*
3115		 * Encode the integer into the byte stream one byte at a
3116		 * time.
3117		 */
3118		bcopy(data, ph->ph_cur_pos, sizeof (int));
3119
3120		/*
3121		 * Move the current location to the start of the next bit of
3122		 * space where we can store encoded data.
3123		 */
3124		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
3125		return (DDI_PROP_RESULT_OK);
3126
3127	case DDI_PROP_CMD_SKIP:
3128		/*
3129		 * Check that there is encoded data
3130		 */
3131		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3132		    ph->ph_size < PROP_1275_INT_SIZE)
3133			return (DDI_PROP_RESULT_ERROR);
3134
3135
3136		if ((caddr_t)ph->ph_cur_pos ==
3137		    (caddr_t)ph->ph_data + ph->ph_size) {
3138			return (DDI_PROP_RESULT_EOF);
3139		} else if ((caddr_t)ph->ph_cur_pos >
3140		    (caddr_t)ph->ph_data + ph->ph_size) {
3141			return (DDI_PROP_RESULT_EOF);
3142		}
3143
3144		/*
3145		 * Move the current location to the start of the next bit of
3146		 * undecoded data.
3147		 */
3148		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
3149		return (DDI_PROP_RESULT_OK);
3150
3151	case DDI_PROP_CMD_GET_ESIZE:
3152		/*
3153		 * Return the size of an encoded integer on OBP
3154		 */
3155		return (PROP_1275_INT_SIZE);
3156
3157	case DDI_PROP_CMD_GET_DSIZE:
3158		/*
3159		 * Return the size of a decoded integer on the system.
3160		 */
3161		return (sizeof (int));
3162
3163	default:
3164#ifdef DEBUG
3165		panic("ddi_prop_1275_int: %x impossible", cmd);
3166		/*NOTREACHED*/
3167#else
3168		return (DDI_PROP_RESULT_ERROR);
3169#endif	/* DEBUG */
3170	}
3171}
3172
3173/*
3174 * 64 bit integer operator.
3175 *
3176 * This is an extension, defined by Sun, to the 1275 integer
3177 * operator.  This routine handles the encoding/decoding of
3178 * 64 bit integer properties.
3179 */
3180int
3181ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data)
3182{
3183
3184	switch (cmd) {
3185	case DDI_PROP_CMD_DECODE:
3186		/*
3187		 * Check that there is encoded data
3188		 */
3189		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
3190			return (DDI_PROP_RESULT_ERROR);
3191		if (ph->ph_flags & PH_FROM_PROM) {
3192			return (DDI_PROP_RESULT_ERROR);
3193		} else {
3194			if (ph->ph_size < sizeof (int64_t) ||
3195			    ((int64_t *)ph->ph_cur_pos >
3196			    ((int64_t *)ph->ph_data +
3197			    ph->ph_size - sizeof (int64_t))))
3198				return (DDI_PROP_RESULT_ERROR);
3199		}
3200		/*
3201		 * Copy the integer, using the implementation-specific
3202		 * copy function if the property is coming from the PROM.
3203		 */
3204		if (ph->ph_flags & PH_FROM_PROM) {
3205			return (DDI_PROP_RESULT_ERROR);
3206		} else {
3207			bcopy(ph->ph_cur_pos, data, sizeof (int64_t));
3208		}
3209
3210		/*
3211		 * Move the current location to the start of the next
3212		 * bit of undecoded data.
3213		 */
3214		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3215		    sizeof (int64_t);
3216			return (DDI_PROP_RESULT_OK);
3217
3218	case DDI_PROP_CMD_ENCODE:
3219		/*
3220		 * Check that there is room to encoded the data
3221		 */
3222		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3223		    ph->ph_size < sizeof (int64_t) ||
3224		    ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data +
3225		    ph->ph_size - sizeof (int64_t))))
3226			return (DDI_PROP_RESULT_ERROR);
3227
3228		/*
3229		 * Encode the integer into the byte stream one byte at a
3230		 * time.
3231		 */
3232		bcopy(data, ph->ph_cur_pos, sizeof (int64_t));
3233
3234		/*
3235		 * Move the current location to the start of the next bit of
3236		 * space where we can store encoded data.
3237		 */
3238		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3239		    sizeof (int64_t);
3240		return (DDI_PROP_RESULT_OK);
3241
3242	case DDI_PROP_CMD_SKIP:
3243		/*
3244		 * Check that there is encoded data
3245		 */
3246		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3247		    ph->ph_size < sizeof (int64_t))
3248			return (DDI_PROP_RESULT_ERROR);
3249
3250		if ((caddr_t)ph->ph_cur_pos ==
3251		    (caddr_t)ph->ph_data + ph->ph_size) {
3252			return (DDI_PROP_RESULT_EOF);
3253		} else if ((caddr_t)ph->ph_cur_pos >
3254		    (caddr_t)ph->ph_data + ph->ph_size) {
3255			return (DDI_PROP_RESULT_EOF);
3256		}
3257
3258		/*
3259		 * Move the current location to the start of
3260		 * the next bit of undecoded data.
3261		 */
3262		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3263		    sizeof (int64_t);
3264			return (DDI_PROP_RESULT_OK);
3265
3266	case DDI_PROP_CMD_GET_ESIZE:
3267		/*
3268		 * Return the size of an encoded integer on OBP
3269		 */
3270		return (sizeof (int64_t));
3271
3272	case DDI_PROP_CMD_GET_DSIZE:
3273		/*
3274		 * Return the size of a decoded integer on the system.
3275		 */
3276		return (sizeof (int64_t));
3277
3278	default:
3279#ifdef DEBUG
3280		panic("ddi_prop_int64_op: %x impossible", cmd);
3281		/*NOTREACHED*/
3282#else
3283		return (DDI_PROP_RESULT_ERROR);
3284#endif  /* DEBUG */
3285	}
3286}
3287
3288/*
3289 * OBP 1275 string operator.
3290 *
3291 * OBP strings are NULL terminated.
3292 */
3293int
3294ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data)
3295{
3296	int	n;
3297	char	*p;
3298	char	*end;
3299
3300	switch (cmd) {
3301	case DDI_PROP_CMD_DECODE:
3302		/*
3303		 * Check that there is encoded data
3304		 */
3305		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3306			return (DDI_PROP_RESULT_ERROR);
3307		}
3308
3309		/*
3310		 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and
3311		 * how to NULL terminate result.
3312		 */
3313		p = (char *)ph->ph_cur_pos;
3314		end = (char *)ph->ph_data + ph->ph_size;
3315		if (p >= end)
3316			return (DDI_PROP_RESULT_EOF);
3317
3318		while (p < end) {
3319			*data++ = *p;
3320			if (*p++ == 0) {	/* NULL from OBP */
3321				ph->ph_cur_pos = p;
3322				return (DDI_PROP_RESULT_OK);
3323			}
3324		}
3325
3326		/*
3327		 * If OBP did not NULL terminate string, which happens
3328		 * (at least) for 'true'/'false' boolean values, account for
3329		 * the space and store null termination on decode.
3330		 */
3331		ph->ph_cur_pos = p;
3332		*data = 0;
3333		return (DDI_PROP_RESULT_OK);
3334
3335	case DDI_PROP_CMD_ENCODE:
3336		/*
3337		 * Check that there is room to encoded the data
3338		 */
3339		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3340			return (DDI_PROP_RESULT_ERROR);
3341		}
3342
3343		n = strlen(data) + 1;
3344		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3345		    ph->ph_size - n)) {
3346			return (DDI_PROP_RESULT_ERROR);
3347		}
3348
3349		/*
3350		 * Copy the NULL terminated string
3351		 */
3352		bcopy(data, ph->ph_cur_pos, n);
3353
3354		/*
3355		 * Move the current location to the start of the next bit of
3356		 * space where we can store encoded data.
3357		 */
3358		ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
3359		return (DDI_PROP_RESULT_OK);
3360
3361	case DDI_PROP_CMD_SKIP:
3362		/*
3363		 * Check that there is encoded data
3364		 */
3365		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3366			return (DDI_PROP_RESULT_ERROR);
3367		}
3368
3369		/*
3370		 * Return the string length plus one for the NULL
3371		 * We know the size of the property, we need to
3372		 * ensure that the string is properly formatted,
3373		 * since we may be looking up random OBP data.
3374		 */
3375		p = (char *)ph->ph_cur_pos;
3376		end = (char *)ph->ph_data + ph->ph_size;
3377		if (p >= end)
3378			return (DDI_PROP_RESULT_EOF);
3379
3380		while (p < end) {
3381			if (*p++ == 0) {	/* NULL from OBP */
3382				ph->ph_cur_pos = p;
3383				return (DDI_PROP_RESULT_OK);
3384			}
3385		}
3386
3387		/*
3388		 * Accommodate the fact that OBP does not always NULL
3389		 * terminate strings.
3390		 */
3391		ph->ph_cur_pos = p;
3392		return (DDI_PROP_RESULT_OK);
3393
3394	case DDI_PROP_CMD_GET_ESIZE:
3395		/*
3396		 * Return the size of the encoded string on OBP.
3397		 */
3398		return (strlen(data) + 1);
3399
3400	case DDI_PROP_CMD_GET_DSIZE:
3401		/*
3402		 * Return the string length plus one for the NULL.
3403		 * We know the size of the property, we need to
3404		 * ensure that the string is properly formatted,
3405		 * since we may be looking up random OBP data.
3406		 */
3407		p = (char *)ph->ph_cur_pos;
3408		end = (char *)ph->ph_data + ph->ph_size;
3409		if (p >= end)
3410			return (DDI_PROP_RESULT_EOF);
3411
3412		for (n = 0; p < end; n++) {
3413			if (*p++ == 0) {	/* NULL from OBP */
3414				ph->ph_cur_pos = p;
3415				return (n + 1);
3416			}
3417		}
3418
3419		/*
3420		 * If OBP did not NULL terminate string, which happens for
3421		 * 'true'/'false' boolean values, account for the space
3422		 * to store null termination here.
3423		 */
3424		ph->ph_cur_pos = p;
3425		return (n + 1);
3426
3427	default:
3428#ifdef DEBUG
3429		panic("ddi_prop_1275_string: %x impossible", cmd);
3430		/*NOTREACHED*/
3431#else
3432		return (DDI_PROP_RESULT_ERROR);
3433#endif	/* DEBUG */
3434	}
3435}
3436
3437/*
3438 * OBP 1275 byte operator
3439 *
3440 * Caller must specify the number of bytes to get.  OBP encodes bytes
3441 * as a byte so there is a 1-to-1 translation.
3442 */
3443int
3444ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data,
3445	uint_t nelements)
3446{
3447	switch (cmd) {
3448	case DDI_PROP_CMD_DECODE:
3449		/*
3450		 * Check that there is encoded data
3451		 */
3452		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3453		    ph->ph_size < nelements ||
3454		    ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3455		    ph->ph_size - nelements)))
3456			return (DDI_PROP_RESULT_ERROR);
3457
3458		/*
3459		 * Copy out the bytes
3460		 */
3461		bcopy(ph->ph_cur_pos, data, nelements);
3462
3463		/*
3464		 * Move the current location
3465		 */
3466		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3467		return (DDI_PROP_RESULT_OK);
3468
3469	case DDI_PROP_CMD_ENCODE:
3470		/*
3471		 * Check that there is room to encode the data
3472		 */
3473		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3474		    ph->ph_size < nelements ||
3475		    ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3476		    ph->ph_size - nelements)))
3477			return (DDI_PROP_RESULT_ERROR);
3478
3479		/*
3480		 * Copy in the bytes
3481		 */
3482		bcopy(data, ph->ph_cur_pos, nelements);
3483
3484		/*
3485		 * Move the current location to the start of the next bit of
3486		 * space where we can store encoded data.
3487		 */
3488		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3489		return (DDI_PROP_RESULT_OK);
3490
3491	case DDI_PROP_CMD_SKIP:
3492		/*
3493		 * Check that there is encoded data
3494		 */
3495		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3496		    ph->ph_size < nelements)
3497			return (DDI_PROP_RESULT_ERROR);
3498
3499		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3500		    ph->ph_size - nelements))
3501			return (DDI_PROP_RESULT_EOF);
3502
3503		/*
3504		 * Move the current location
3505		 */
3506		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3507		return (DDI_PROP_RESULT_OK);
3508
3509	case DDI_PROP_CMD_GET_ESIZE:
3510		/*
3511		 * The size in bytes of the encoded size is the
3512		 * same as the decoded size provided by the caller.
3513		 */
3514		return (nelements);
3515
3516	case DDI_PROP_CMD_GET_DSIZE:
3517		/*
3518		 * Just return the number of bytes specified by the caller.
3519		 */
3520		return (nelements);
3521
3522	default:
3523#ifdef DEBUG
3524		panic("ddi_prop_1275_bytes: %x impossible", cmd);
3525		/*NOTREACHED*/
3526#else
3527		return (DDI_PROP_RESULT_ERROR);
3528#endif	/* DEBUG */
3529	}
3530}
3531
3532/*
3533 * Used for properties that come from the OBP, hardware configuration files,
3534 * or that are created by calls to ddi_prop_update(9F).
3535 */
3536static struct prop_handle_ops prop_1275_ops = {
3537	ddi_prop_1275_int,
3538	ddi_prop_1275_string,
3539	ddi_prop_1275_bytes,
3540	ddi_prop_int64_op
3541};
3542
3543
3544/*
3545 * Interface to create/modify a managed property on child's behalf...
3546 * Flags interpreted are:
3547 *	DDI_PROP_CANSLEEP:	Allow memory allocation to sleep.
3548 *	DDI_PROP_SYSTEM_DEF:	Manipulate system list rather than driver list.
3549 *
3550 * Use same dev_t when modifying or undefining a property.
3551 * Search for properties with DDI_DEV_T_ANY to match first named
3552 * property on the list.
3553 *
3554 * Properties are stored LIFO and subsequently will match the first
3555 * `matching' instance.
3556 */
3557
3558/*
3559 * ddi_prop_add:	Add a software defined property
3560 */
3561
3562/*
3563 * define to get a new ddi_prop_t.
3564 * km_flags are KM_SLEEP or KM_NOSLEEP.
3565 */
3566
3567#define	DDI_NEW_PROP_T(km_flags)	\
3568	(kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3569
3570static int
3571ddi_prop_add(dev_t dev, dev_info_t *dip, int flags,
3572    char *name, caddr_t value, int length)
3573{
3574	ddi_prop_t	*new_propp, *propp;
3575	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
3576	int		km_flags = KM_NOSLEEP;
3577	int		name_buf_len;
3578
3579	/*
3580	 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3581	 */
3582
3583	if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0)
3584		return (DDI_PROP_INVAL_ARG);
3585
3586	if (flags & DDI_PROP_CANSLEEP)
3587		km_flags = KM_SLEEP;
3588
3589	if (flags & DDI_PROP_SYSTEM_DEF)
3590		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
3591	else if (flags & DDI_PROP_HW_DEF)
3592		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
3593
3594	if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL)  {
3595		cmn_err(CE_CONT, prop_no_mem_msg, name);
3596		return (DDI_PROP_NO_MEMORY);
3597	}
3598
3599	/*
3600	 * If dev is major number 0, then we need to do a ddi_name_to_major
3601	 * to get the real major number for the device.  This needs to be
3602	 * done because some drivers need to call ddi_prop_create in their
3603	 * attach routines but they don't have a dev.  By creating the dev
3604	 * ourself if the major number is 0, drivers will not have to know what
3605	 * their major number.	They can just create a dev with major number
3606	 * 0 and pass it in.  For device 0, we will be doing a little extra
3607	 * work by recreating the same dev that we already have, but its the
3608	 * price you pay :-).
3609	 *
3610	 * This fixes bug #1098060.
3611	 */
3612	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) {
3613		new_propp->prop_dev =
3614		    makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name),
3615		    getminor(dev));
3616	} else
3617		new_propp->prop_dev = dev;
3618
3619	/*
3620	 * Allocate space for property name and copy it in...
3621	 */
3622
3623	name_buf_len = strlen(name) + 1;
3624	new_propp->prop_name = kmem_alloc(name_buf_len, km_flags);
3625	if (new_propp->prop_name == 0)	{
3626		kmem_free(new_propp, sizeof (ddi_prop_t));
3627		cmn_err(CE_CONT, prop_no_mem_msg, name);
3628		return (DDI_PROP_NO_MEMORY);
3629	}
3630	bcopy(name, new_propp->prop_name, name_buf_len);
3631
3632	/*
3633	 * Set the property type
3634	 */
3635	new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK;
3636
3637	/*
3638	 * Set length and value ONLY if not an explicit property undefine:
3639	 * NOTE: value and length are zero for explicit undefines.
3640	 */
3641
3642	if (flags & DDI_PROP_UNDEF_IT) {
3643		new_propp->prop_flags |= DDI_PROP_UNDEF_IT;
3644	} else {
3645		if ((new_propp->prop_len = length) != 0) {
3646			new_propp->prop_val = kmem_alloc(length, km_flags);
3647			if (new_propp->prop_val == 0)  {
3648				kmem_free(new_propp->prop_name, name_buf_len);
3649				kmem_free(new_propp, sizeof (ddi_prop_t));
3650				cmn_err(CE_CONT, prop_no_mem_msg, name);
3651				return (DDI_PROP_NO_MEMORY);
3652			}
3653			bcopy(value, new_propp->prop_val, length);
3654		}
3655	}
3656
3657	/*
3658	 * Link property into beginning of list. (Properties are LIFO order.)
3659	 */
3660
3661	mutex_enter(&(DEVI(dip)->devi_lock));
3662	propp = *list_head;
3663	new_propp->prop_next = propp;
3664	*list_head = new_propp;
3665	mutex_exit(&(DEVI(dip)->devi_lock));
3666	return (DDI_PROP_SUCCESS);
3667}
3668
3669
3670/*
3671 * ddi_prop_change:	Modify a software managed property value
3672 *
3673 *			Set new length and value if found.
3674 *			returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3675 *			input name is the NULL string.
3676 *			returns DDI_PROP_NO_MEMORY if unable to allocate memory
3677 *
3678 *			Note: an undef can be modified to be a define,
3679 *			(you can't go the other way.)
3680 */
3681
3682static int
3683ddi_prop_change(dev_t dev, dev_info_t *dip, int flags,
3684    char *name, caddr_t value, int length)
3685{
3686	ddi_prop_t	*propp;
3687	ddi_prop_t	**ppropp;
3688	caddr_t		p = NULL;
3689
3690	if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0))
3691		return (DDI_PROP_INVAL_ARG);
3692
3693	/*
3694	 * Preallocate buffer, even if we don't need it...
3695	 */
3696	if (length != 0)  {
3697		p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ?
3698		    KM_SLEEP : KM_NOSLEEP);
3699		if (p == NULL)	{
3700			cmn_err(CE_CONT, prop_no_mem_msg, name);
3701			return (DDI_PROP_NO_MEMORY);
3702		}
3703	}
3704
3705	/*
3706	 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major
3707	 * number, a real dev_t value should be created based upon the dip's
3708	 * binding driver.  See ddi_prop_add...
3709	 */
3710	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN)
3711		dev = makedevice(
3712		    ddi_name_to_major(DEVI(dip)->devi_binding_name),
3713		    getminor(dev));
3714
3715	/*
3716	 * Check to see if the property exists.  If so we modify it.
3717	 * Else we create it by calling ddi_prop_add().
3718	 */
3719	mutex_enter(&(DEVI(dip)->devi_lock));
3720	ppropp = &DEVI(dip)->devi_drv_prop_ptr;
3721	if (flags & DDI_PROP_SYSTEM_DEF)
3722		ppropp = &DEVI(dip)->devi_sys_prop_ptr;
3723	else if (flags & DDI_PROP_HW_DEF)
3724		ppropp = &DEVI(dip)->devi_hw_prop_ptr;
3725
3726	if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) {
3727		/*
3728		 * Need to reallocate buffer?  If so, do it
3729		 * carefully (reuse same space if new prop
3730		 * is same size and non-NULL sized).
3731		 */
3732		if (length != 0)
3733			bcopy(value, p, length);
3734
3735		if (propp->prop_len != 0)
3736			kmem_free(propp->prop_val, propp->prop_len);
3737
3738		propp->prop_len = length;
3739		propp->prop_val = p;
3740		propp->prop_flags &= ~DDI_PROP_UNDEF_IT;
3741		mutex_exit(&(DEVI(dip)->devi_lock));
3742		return (DDI_PROP_SUCCESS);
3743	}
3744
3745	mutex_exit(&(DEVI(dip)->devi_lock));
3746	if (length != 0)
3747		kmem_free(p, length);
3748
3749	return (ddi_prop_add(dev, dip, flags, name, value, length));
3750}
3751
3752/*
3753 * Common update routine used to update and encode a property.	Creates
3754 * a property handle, calls the property encode routine, figures out if
3755 * the property already exists and updates if it does.	Otherwise it
3756 * creates if it does not exist.
3757 */
3758int
3759ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags,
3760    char *name, void *data, uint_t nelements,
3761    int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3762{
3763	prop_handle_t	ph;
3764	int		rval;
3765	uint_t		ourflags;
3766
3767	/*
3768	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3769	 * return error.
3770	 */
3771	if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3772		return (DDI_PROP_INVAL_ARG);
3773
3774	/*
3775	 * Create the handle
3776	 */
3777	ph.ph_data = NULL;
3778	ph.ph_cur_pos = NULL;
3779	ph.ph_save_pos = NULL;
3780	ph.ph_size = 0;
3781	ph.ph_ops = &prop_1275_ops;
3782
3783	/*
3784	 * ourflags:
3785	 * For compatibility with the old interfaces.  The old interfaces
3786	 * didn't sleep by default and slept when the flag was set.  These
3787	 * interfaces to the opposite.	So the old interfaces now set the
3788	 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3789	 *
3790	 * ph.ph_flags:
3791	 * Blocked data or unblocked data allocation
3792	 * for ph.ph_data in ddi_prop_encode_alloc()
3793	 */
3794	if (flags & DDI_PROP_DONTSLEEP) {
3795		ourflags = flags;
3796		ph.ph_flags = DDI_PROP_DONTSLEEP;
3797	} else {
3798		ourflags = flags | DDI_PROP_CANSLEEP;
3799		ph.ph_flags = DDI_PROP_CANSLEEP;
3800	}
3801
3802	/*
3803	 * Encode the data and store it in the property handle by
3804	 * calling the prop_encode routine.
3805	 */
3806	if ((rval = (*prop_create)(&ph, data, nelements)) !=
3807	    DDI_PROP_SUCCESS) {
3808		if (rval == DDI_PROP_NO_MEMORY)
3809			cmn_err(CE_CONT, prop_no_mem_msg, name);
3810		if (ph.ph_size != 0)
3811			kmem_free(ph.ph_data, ph.ph_size);
3812		return (rval);
3813	}
3814
3815	/*
3816	 * The old interfaces use a stacking approach to creating
3817	 * properties.	If we are being called from the old interfaces,
3818	 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3819	 * create without checking.
3820	 */
3821	if (flags & DDI_PROP_STACK_CREATE) {
3822		rval = ddi_prop_add(match_dev, dip,
3823		    ourflags, name, ph.ph_data, ph.ph_size);
3824	} else {
3825		rval = ddi_prop_change(match_dev, dip,
3826		    ourflags, name, ph.ph_data, ph.ph_size);
3827	}
3828
3829	/*
3830	 * Free the encoded data allocated in the prop_encode routine.
3831	 */
3832	if (ph.ph_size != 0)
3833		kmem_free(ph.ph_data, ph.ph_size);
3834
3835	return (rval);
3836}
3837
3838
3839/*
3840 * ddi_prop_create:	Define a managed property:
3841 *			See above for details.
3842 */
3843
3844int
3845ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3846    char *name, caddr_t value, int length)
3847{
3848	if (!(flag & DDI_PROP_CANSLEEP)) {
3849		flag |= DDI_PROP_DONTSLEEP;
3850#ifdef DDI_PROP_DEBUG
3851		if (length != 0)
3852			cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete,"
3853			    "use ddi_prop_update (prop = %s, node = %s%d)",
3854			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3855#endif /* DDI_PROP_DEBUG */
3856	}
3857	flag &= ~DDI_PROP_SYSTEM_DEF;
3858	flag |= DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3859	return (ddi_prop_update_common(dev, dip, flag, name,
3860	    value, length, ddi_prop_fm_encode_bytes));
3861}
3862
3863int
3864e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3865    char *name, caddr_t value, int length)
3866{
3867	if (!(flag & DDI_PROP_CANSLEEP))
3868		flag |= DDI_PROP_DONTSLEEP;
3869	flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3870	return (ddi_prop_update_common(dev, dip, flag,
3871	    name, value, length, ddi_prop_fm_encode_bytes));
3872}
3873
3874int
3875ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3876    char *name, caddr_t value, int length)
3877{
3878	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3879
3880	/*
3881	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3882	 * return error.
3883	 */
3884	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3885		return (DDI_PROP_INVAL_ARG);
3886
3887	if (!(flag & DDI_PROP_CANSLEEP))
3888		flag |= DDI_PROP_DONTSLEEP;
3889	flag &= ~DDI_PROP_SYSTEM_DEF;
3890	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0)
3891		return (DDI_PROP_NOT_FOUND);
3892
3893	return (ddi_prop_update_common(dev, dip,
3894	    (flag | DDI_PROP_TYPE_BYTE), name,
3895	    value, length, ddi_prop_fm_encode_bytes));
3896}
3897
3898int
3899e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3900    char *name, caddr_t value, int length)
3901{
3902	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3903
3904	/*
3905	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3906	 * return error.
3907	 */
3908	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3909		return (DDI_PROP_INVAL_ARG);
3910
3911	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0)
3912		return (DDI_PROP_NOT_FOUND);
3913
3914	if (!(flag & DDI_PROP_CANSLEEP))
3915		flag |= DDI_PROP_DONTSLEEP;
3916	return (ddi_prop_update_common(dev, dip,
3917	    (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE),
3918	    name, value, length, ddi_prop_fm_encode_bytes));
3919}
3920
3921
3922/*
3923 * Common lookup routine used to lookup and decode a property.
3924 * Creates a property handle, searches for the raw encoded data,
3925 * fills in the handle, and calls the property decode functions
3926 * passed in.
3927 *
3928 * This routine is not static because ddi_bus_prop_op() which lives in
3929 * ddi_impl.c calls it.  No driver should be calling this routine.
3930 */
3931int
3932ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip,
3933    uint_t flags, char *name, void *data, uint_t *nelements,
3934    int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3935{
3936	int		rval;
3937	uint_t		ourflags;
3938	prop_handle_t	ph;
3939
3940	if ((match_dev == DDI_DEV_T_NONE) ||
3941	    (name == NULL) || (strlen(name) == 0))
3942		return (DDI_PROP_INVAL_ARG);
3943
3944	ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags :
3945	    flags | DDI_PROP_CANSLEEP;
3946
3947	/*
3948	 * Get the encoded data
3949	 */
3950	bzero(&ph, sizeof (prop_handle_t));
3951
3952	if (flags & DDI_UNBND_DLPI2) {
3953		/*
3954		 * For unbound dlpi style-2 devices, index into
3955		 * the devnames' array and search the global
3956		 * property list.
3957		 */
3958		ourflags &= ~DDI_UNBND_DLPI2;
3959		rval = i_ddi_prop_search_global(match_dev,
3960		    ourflags, name, &ph.ph_data, &ph.ph_size);
3961	} else {
3962		rval = ddi_prop_search_common(match_dev, dip,
3963		    PROP_LEN_AND_VAL_ALLOC, ourflags, name,
3964		    &ph.ph_data, &ph.ph_size);
3965
3966	}
3967
3968	if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) {
3969		ASSERT(ph.ph_data == NULL);
3970		ASSERT(ph.ph_size == 0);
3971		return (rval);
3972	}
3973
3974	/*
3975	 * If the encoded data came from a OBP or software
3976	 * use the 1275 OBP decode/encode routines.
3977	 */
3978	ph.ph_cur_pos = ph.ph_data;
3979	ph.ph_save_pos = ph.ph_data;
3980	ph.ph_ops = &prop_1275_ops;
3981	ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0;
3982
3983	rval = (*prop_decoder)(&ph, data, nelements);
3984
3985	/*
3986	 * Free the encoded data
3987	 */
3988	if (ph.ph_size != 0)
3989		kmem_free(ph.ph_data, ph.ph_size);
3990
3991	return (rval);
3992}
3993
3994/*
3995 * Lookup and return an array of composite properties.  The driver must
3996 * provide the decode routine.
3997 */
3998int
3999ddi_prop_lookup(dev_t match_dev, dev_info_t *dip,
4000    uint_t flags, char *name, void *data, uint_t *nelements,
4001    int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
4002{
4003	return (ddi_prop_lookup_common(match_dev, dip,
4004	    (flags | DDI_PROP_TYPE_COMPOSITE), name,
4005	    data, nelements, prop_decoder));
4006}
4007
4008/*
4009 * Return 1 if a property exists (no type checking done).
4010 * Return 0 if it does not exist.
4011 */
4012int
4013ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name)
4014{
4015	int	i;
4016	uint_t	x = 0;
4017
4018	i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS,
4019	    flags | DDI_PROP_TYPE_MASK, name, NULL, &x);
4020	return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275);
4021}
4022
4023
4024/*
4025 * Update an array of composite properties.  The driver must
4026 * provide the encode routine.
4027 */
4028int
4029ddi_prop_update(dev_t match_dev, dev_info_t *dip,
4030    char *name, void *data, uint_t nelements,
4031    int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
4032{
4033	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE,
4034	    name, data, nelements, prop_create));
4035}
4036
4037/*
4038 * Get a single integer or boolean property and return it.
4039 * If the property does not exists, or cannot be decoded,
4040 * then return the defvalue passed in.
4041 *
4042 * This routine always succeeds.
4043 */
4044int
4045ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags,
4046    char *name, int defvalue)
4047{
4048	int	data;
4049	uint_t	nelements;
4050	int	rval;
4051
4052	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4053	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4054#ifdef DEBUG
4055		if (dip != NULL) {
4056			cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag"
4057			    " 0x%x (prop = %s, node = %s%d)", flags,
4058			    name, ddi_driver_name(dip), ddi_get_instance(dip));
4059		}
4060#endif /* DEBUG */
4061		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4062		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4063	}
4064
4065	if ((rval = ddi_prop_lookup_common(match_dev, dip,
4066	    (flags | DDI_PROP_TYPE_INT), name, &data, &nelements,
4067	    ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) {
4068		if (rval == DDI_PROP_END_OF_DATA)
4069			data = 1;
4070		else
4071			data = defvalue;
4072	}
4073	return (data);
4074}
4075
4076/*
4077 * Get a single 64 bit integer or boolean property and return it.
4078 * If the property does not exists, or cannot be decoded,
4079 * then return the defvalue passed in.
4080 *
4081 * This routine always succeeds.
4082 */
4083int64_t
4084ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags,
4085    char *name, int64_t defvalue)
4086{
4087	int64_t	data;
4088	uint_t	nelements;
4089	int	rval;
4090
4091	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4092	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4093#ifdef DEBUG
4094		if (dip != NULL) {
4095			cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag"
4096			    " 0x%x (prop = %s, node = %s%d)", flags,
4097			    name, ddi_driver_name(dip), ddi_get_instance(dip));
4098		}
4099#endif /* DEBUG */
4100		return (DDI_PROP_INVAL_ARG);
4101	}
4102
4103	if ((rval = ddi_prop_lookup_common(match_dev, dip,
4104	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
4105	    name, &data, &nelements, ddi_prop_fm_decode_int64))
4106	    != DDI_PROP_SUCCESS) {
4107		if (rval == DDI_PROP_END_OF_DATA)
4108			data = 1;
4109		else
4110			data = defvalue;
4111	}
4112	return (data);
4113}
4114
4115/*
4116 * Get an array of integer property
4117 */
4118int
4119ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4120    char *name, int **data, uint_t *nelements)
4121{
4122	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4123	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4124#ifdef DEBUG
4125		if (dip != NULL) {
4126			cmn_err(CE_WARN, "ddi_prop_lookup_int_array: "
4127			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4128			    flags, name, ddi_driver_name(dip),
4129			    ddi_get_instance(dip));
4130		}
4131#endif /* DEBUG */
4132		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4133		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4134	}
4135
4136	return (ddi_prop_lookup_common(match_dev, dip,
4137	    (flags | DDI_PROP_TYPE_INT), name, data,
4138	    nelements, ddi_prop_fm_decode_ints));
4139}
4140
4141/*
4142 * Get an array of 64 bit integer properties
4143 */
4144int
4145ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4146    char *name, int64_t **data, uint_t *nelements)
4147{
4148	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4149	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4150#ifdef DEBUG
4151		if (dip != NULL) {
4152			cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: "
4153			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4154			    flags, name, ddi_driver_name(dip),
4155			    ddi_get_instance(dip));
4156		}
4157#endif /* DEBUG */
4158		return (DDI_PROP_INVAL_ARG);
4159	}
4160
4161	return (ddi_prop_lookup_common(match_dev, dip,
4162	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
4163	    name, data, nelements, ddi_prop_fm_decode_int64_array));
4164}
4165
4166/*
4167 * Update a single integer property.  If the property exists on the drivers
4168 * property list it updates, else it creates it.
4169 */
4170int
4171ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
4172    char *name, int data)
4173{
4174	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
4175	    name, &data, 1, ddi_prop_fm_encode_ints));
4176}
4177
4178/*
4179 * Update a single 64 bit integer property.
4180 * Update the driver property list if it exists, else create it.
4181 */
4182int
4183ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
4184    char *name, int64_t data)
4185{
4186	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
4187	    name, &data, 1, ddi_prop_fm_encode_int64));
4188}
4189
4190int
4191e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
4192    char *name, int data)
4193{
4194	return (ddi_prop_update_common(match_dev, dip,
4195	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
4196	    name, &data, 1, ddi_prop_fm_encode_ints));
4197}
4198
4199int
4200e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
4201    char *name, int64_t data)
4202{
4203	return (ddi_prop_update_common(match_dev, dip,
4204	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
4205	    name, &data, 1, ddi_prop_fm_encode_int64));
4206}
4207
4208/*
4209 * Update an array of integer property.  If the property exists on the drivers
4210 * property list it updates, else it creates it.
4211 */
4212int
4213ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
4214    char *name, int *data, uint_t nelements)
4215{
4216	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
4217	    name, data, nelements, ddi_prop_fm_encode_ints));
4218}
4219
4220/*
4221 * Update an array of 64 bit integer properties.
4222 * Update the driver property list if it exists, else create it.
4223 */
4224int
4225ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
4226    char *name, int64_t *data, uint_t nelements)
4227{
4228	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
4229	    name, data, nelements, ddi_prop_fm_encode_int64));
4230}
4231
4232int
4233e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
4234    char *name, int64_t *data, uint_t nelements)
4235{
4236	return (ddi_prop_update_common(match_dev, dip,
4237	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
4238	    name, data, nelements, ddi_prop_fm_encode_int64));
4239}
4240
4241int
4242e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
4243    char *name, int *data, uint_t nelements)
4244{
4245	return (ddi_prop_update_common(match_dev, dip,
4246	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
4247	    name, data, nelements, ddi_prop_fm_encode_ints));
4248}
4249
4250/*
4251 * Get a single string property.
4252 */
4253int
4254ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags,
4255    char *name, char **data)
4256{
4257	uint_t x;
4258
4259	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4260	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4261#ifdef DEBUG
4262		if (dip != NULL) {
4263			cmn_err(CE_WARN, "%s: invalid flag 0x%x "
4264			    "(prop = %s, node = %s%d); invalid bits ignored",
4265			    "ddi_prop_lookup_string", flags, name,
4266			    ddi_driver_name(dip), ddi_get_instance(dip));
4267		}
4268#endif /* DEBUG */
4269		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4270		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4271	}
4272
4273	return (ddi_prop_lookup_common(match_dev, dip,
4274	    (flags | DDI_PROP_TYPE_STRING), name, data,
4275	    &x, ddi_prop_fm_decode_string));
4276}
4277
4278/*
4279 * Get an array of strings property.
4280 */
4281int
4282ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4283    char *name, char ***data, uint_t *nelements)
4284{
4285	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4286	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4287#ifdef DEBUG
4288		if (dip != NULL) {
4289			cmn_err(CE_WARN, "ddi_prop_lookup_string_array: "
4290			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4291			    flags, name, ddi_driver_name(dip),
4292			    ddi_get_instance(dip));
4293		}
4294#endif /* DEBUG */
4295		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4296		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4297	}
4298
4299	return (ddi_prop_lookup_common(match_dev, dip,
4300	    (flags | DDI_PROP_TYPE_STRING), name, data,
4301	    nelements, ddi_prop_fm_decode_strings));
4302}
4303
4304/*
4305 * Update a single string property.
4306 */
4307int
4308ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4309    char *name, char *data)
4310{
4311	return (ddi_prop_update_common(match_dev, dip,
4312	    DDI_PROP_TYPE_STRING, name, &data, 1,
4313	    ddi_prop_fm_encode_string));
4314}
4315
4316int
4317e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4318    char *name, char *data)
4319{
4320	return (ddi_prop_update_common(match_dev, dip,
4321	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4322	    name, &data, 1, ddi_prop_fm_encode_string));
4323}
4324
4325
4326/*
4327 * Update an array of strings property.
4328 */
4329int
4330ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4331    char *name, char **data, uint_t nelements)
4332{
4333	return (ddi_prop_update_common(match_dev, dip,
4334	    DDI_PROP_TYPE_STRING, name, data, nelements,
4335	    ddi_prop_fm_encode_strings));
4336}
4337
4338int
4339e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4340    char *name, char **data, uint_t nelements)
4341{
4342	return (ddi_prop_update_common(match_dev, dip,
4343	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4344	    name, data, nelements,
4345	    ddi_prop_fm_encode_strings));
4346}
4347
4348
4349/*
4350 * Get an array of bytes property.
4351 */
4352int
4353ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4354    char *name, uchar_t **data, uint_t *nelements)
4355{
4356	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4357	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4358#ifdef DEBUG
4359		if (dip != NULL) {
4360			cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: "
4361			    " invalid flag 0x%x (prop = %s, node = %s%d)",
4362			    flags, name, ddi_driver_name(dip),
4363			    ddi_get_instance(dip));
4364		}
4365#endif /* DEBUG */
4366		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4367		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4368	}
4369
4370	return (ddi_prop_lookup_common(match_dev, dip,
4371	    (flags | DDI_PROP_TYPE_BYTE), name, data,
4372	    nelements, ddi_prop_fm_decode_bytes));
4373}
4374
4375/*
4376 * Update an array of bytes property.
4377 */
4378int
4379ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4380    char *name, uchar_t *data, uint_t nelements)
4381{
4382	if (nelements == 0)
4383		return (DDI_PROP_INVAL_ARG);
4384
4385	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE,
4386	    name, data, nelements, ddi_prop_fm_encode_bytes));
4387}
4388
4389
4390int
4391e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4392    char *name, uchar_t *data, uint_t nelements)
4393{
4394	if (nelements == 0)
4395		return (DDI_PROP_INVAL_ARG);
4396
4397	return (ddi_prop_update_common(match_dev, dip,
4398	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE,
4399	    name, data, nelements, ddi_prop_fm_encode_bytes));
4400}
4401
4402
4403/*
4404 * ddi_prop_remove_common:	Undefine a managed property:
4405 *			Input dev_t must match dev_t when defined.
4406 *			Returns DDI_PROP_NOT_FOUND, possibly.
4407 *			DDI_PROP_INVAL_ARG is also possible if dev is
4408 *			DDI_DEV_T_ANY or incoming name is the NULL string.
4409 */
4410int
4411ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag)
4412{
4413	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4414	ddi_prop_t	*propp;
4415	ddi_prop_t	*lastpropp = NULL;
4416
4417	if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) ||
4418	    (strlen(name) == 0)) {
4419		return (DDI_PROP_INVAL_ARG);
4420	}
4421
4422	if (flag & DDI_PROP_SYSTEM_DEF)
4423		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4424	else if (flag & DDI_PROP_HW_DEF)
4425		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4426
4427	mutex_enter(&(DEVI(dip)->devi_lock));
4428
4429	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
4430		if (DDI_STRSAME(propp->prop_name, name) &&
4431		    (dev == propp->prop_dev)) {
4432			/*
4433			 * Unlink this propp allowing for it to
4434			 * be first in the list:
4435			 */
4436
4437			if (lastpropp == NULL)
4438				*list_head = propp->prop_next;
4439			else
4440				lastpropp->prop_next = propp->prop_next;
4441
4442			mutex_exit(&(DEVI(dip)->devi_lock));
4443
4444			/*
4445			 * Free memory and return...
4446			 */
4447			kmem_free(propp->prop_name,
4448			    strlen(propp->prop_name) + 1);
4449			if (propp->prop_len != 0)
4450				kmem_free(propp->prop_val, propp->prop_len);
4451			kmem_free(propp, sizeof (ddi_prop_t));
4452			return (DDI_PROP_SUCCESS);
4453		}
4454		lastpropp = propp;
4455	}
4456	mutex_exit(&(DEVI(dip)->devi_lock));
4457	return (DDI_PROP_NOT_FOUND);
4458}
4459
4460int
4461ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4462{
4463	return (ddi_prop_remove_common(dev, dip, name, 0));
4464}
4465
4466int
4467e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4468{
4469	return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF));
4470}
4471
4472/*
4473 * e_ddi_prop_list_delete: remove a list of properties
4474 *	Note that the caller needs to provide the required protection
4475 *	(eg. devi_lock if these properties are still attached to a devi)
4476 */
4477void
4478e_ddi_prop_list_delete(ddi_prop_t *props)
4479{
4480	i_ddi_prop_list_delete(props);
4481}
4482
4483/*
4484 * ddi_prop_remove_all_common:
4485 *	Used before unloading a driver to remove
4486 *	all properties. (undefines all dev_t's props.)
4487 *	Also removes `explicitly undefined' props.
4488 *	No errors possible.
4489 */
4490void
4491ddi_prop_remove_all_common(dev_info_t *dip, int flag)
4492{
4493	ddi_prop_t	**list_head;
4494
4495	mutex_enter(&(DEVI(dip)->devi_lock));
4496	if (flag & DDI_PROP_SYSTEM_DEF) {
4497		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4498	} else if (flag & DDI_PROP_HW_DEF) {
4499		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4500	} else {
4501		list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4502	}
4503	i_ddi_prop_list_delete(*list_head);
4504	*list_head = NULL;
4505	mutex_exit(&(DEVI(dip)->devi_lock));
4506}
4507
4508
4509/*
4510 * ddi_prop_remove_all:		Remove all driver prop definitions.
4511 */
4512
4513void
4514ddi_prop_remove_all(dev_info_t *dip)
4515{
4516	i_ddi_prop_dyn_driver_set(dip, NULL);
4517	ddi_prop_remove_all_common(dip, 0);
4518}
4519
4520/*
4521 * e_ddi_prop_remove_all:	Remove all system prop definitions.
4522 */
4523
4524void
4525e_ddi_prop_remove_all(dev_info_t *dip)
4526{
4527	ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF);
4528}
4529
4530
4531/*
4532 * ddi_prop_undefine:	Explicitly undefine a property.  Property
4533 *			searches which match this property return
4534 *			the error code DDI_PROP_UNDEFINED.
4535 *
4536 *			Use ddi_prop_remove to negate effect of
4537 *			ddi_prop_undefine
4538 *
4539 *			See above for error returns.
4540 */
4541
4542int
4543ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4544{
4545	if (!(flag & DDI_PROP_CANSLEEP))
4546		flag |= DDI_PROP_DONTSLEEP;
4547	flag |= DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4548	return (ddi_prop_update_common(dev, dip, flag,
4549	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4550}
4551
4552int
4553e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4554{
4555	if (!(flag & DDI_PROP_CANSLEEP))
4556		flag |= DDI_PROP_DONTSLEEP;
4557	flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
4558	    DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4559	return (ddi_prop_update_common(dev, dip, flag,
4560	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4561}
4562
4563/*
4564 * Support for gathering dynamic properties in devinfo snapshot.
4565 */
4566void
4567i_ddi_prop_dyn_driver_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4568{
4569	DEVI(dip)->devi_prop_dyn_driver = dp;
4570}
4571
4572i_ddi_prop_dyn_t *
4573i_ddi_prop_dyn_driver_get(dev_info_t *dip)
4574{
4575	return (DEVI(dip)->devi_prop_dyn_driver);
4576}
4577
4578void
4579i_ddi_prop_dyn_parent_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4580{
4581	DEVI(dip)->devi_prop_dyn_parent = dp;
4582}
4583
4584i_ddi_prop_dyn_t *
4585i_ddi_prop_dyn_parent_get(dev_info_t *dip)
4586{
4587	return (DEVI(dip)->devi_prop_dyn_parent);
4588}
4589
4590void
4591i_ddi_prop_dyn_cache_invalidate(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4592{
4593	/* for now we invalidate the entire cached snapshot */
4594	if (dip && dp)
4595		i_ddi_di_cache_invalidate(KM_SLEEP);
4596}
4597
4598/* ARGSUSED */
4599void
4600ddi_prop_cache_invalidate(dev_t dev, dev_info_t *dip, char *name, int flags)
4601{
4602	/* for now we invalidate the entire cached snapshot */
4603	i_ddi_di_cache_invalidate(KM_SLEEP);
4604}
4605
4606
4607/*
4608 * Code to search hardware layer (PROM), if it exists, on behalf of child.
4609 *
4610 * if input dip != child_dip, then call is on behalf of child
4611 * to search PROM, do it via ddi_prop_search_common() and ascend only
4612 * if allowed.
4613 *
4614 * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4615 * to search for PROM defined props only.
4616 *
4617 * Note that the PROM search is done only if the requested dev
4618 * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4619 * have no associated dev, thus are automatically associated with
4620 * DDI_DEV_T_NONE.
4621 *
4622 * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4623 *
4624 * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4625 * that the property resides in the prom.
4626 */
4627int
4628impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4629    ddi_prop_op_t prop_op, int mod_flags,
4630    char *name, caddr_t valuep, int *lengthp)
4631{
4632	int	len;
4633	caddr_t buffer;
4634
4635	/*
4636	 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4637	 * look in caller's PROM if it's a self identifying device...
4638	 *
4639	 * Note that this is very similar to ddi_prop_op, but we
4640	 * search the PROM instead of the s/w defined properties,
4641	 * and we are called on by the parent driver to do this for
4642	 * the child.
4643	 */
4644
4645	if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) &&
4646	    ndi_dev_is_prom_node(ch_dip) &&
4647	    ((mod_flags & DDI_PROP_NOTPROM) == 0)) {
4648		len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name);
4649		if (len == -1) {
4650			return (DDI_PROP_NOT_FOUND);
4651		}
4652
4653		/*
4654		 * If exists only request, we're done
4655		 */
4656		if (prop_op == PROP_EXISTS) {
4657			return (DDI_PROP_FOUND_1275);
4658		}
4659
4660		/*
4661		 * If length only request or prop length == 0, get out
4662		 */
4663		if ((prop_op == PROP_LEN) || (len == 0)) {
4664			*lengthp = len;
4665			return (DDI_PROP_FOUND_1275);
4666		}
4667
4668		/*
4669		 * Allocate buffer if required... (either way `buffer'
4670		 * is receiving address).
4671		 */
4672
4673		switch (prop_op) {
4674
4675		case PROP_LEN_AND_VAL_ALLOC:
4676
4677			buffer = kmem_alloc((size_t)len,
4678			    mod_flags & DDI_PROP_CANSLEEP ?
4679			    KM_SLEEP : KM_NOSLEEP);
4680			if (buffer == NULL) {
4681				return (DDI_PROP_NO_MEMORY);
4682			}
4683			*(caddr_t *)valuep = buffer;
4684			break;
4685
4686		case PROP_LEN_AND_VAL_BUF:
4687
4688			if (len > (*lengthp)) {
4689				*lengthp = len;
4690				return (DDI_PROP_BUF_TOO_SMALL);
4691			}
4692
4693			buffer = valuep;
4694			break;
4695
4696		default:
4697			break;
4698		}
4699
4700		/*
4701		 * Call the PROM function to do the copy.
4702		 */
4703		(void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid,
4704		    name, buffer);
4705
4706		*lengthp = len; /* return the actual length to the caller */
4707		(void) impl_fix_props(dip, ch_dip, name, len, buffer);
4708		return (DDI_PROP_FOUND_1275);
4709	}
4710
4711	return (DDI_PROP_NOT_FOUND);
4712}
4713
4714/*
4715 * The ddi_bus_prop_op default bus nexus prop op function.
4716 *
4717 * Code to search hardware layer (PROM), if it exists,
4718 * on behalf of child, then, if appropriate, ascend and check
4719 * my own software defined properties...
4720 */
4721int
4722ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4723    ddi_prop_op_t prop_op, int mod_flags,
4724    char *name, caddr_t valuep, int *lengthp)
4725{
4726	int	error;
4727
4728	error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags,
4729	    name, valuep, lengthp);
4730
4731	if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 ||
4732	    error == DDI_PROP_BUF_TOO_SMALL)
4733		return (error);
4734
4735	if (error == DDI_PROP_NO_MEMORY) {
4736		cmn_err(CE_CONT, prop_no_mem_msg, name);
4737		return (DDI_PROP_NO_MEMORY);
4738	}
4739
4740	/*
4741	 * Check the 'options' node as a last resort
4742	 */
4743	if ((mod_flags & DDI_PROP_DONTPASS) != 0)
4744		return (DDI_PROP_NOT_FOUND);
4745
4746	if (ch_dip == ddi_root_node())	{
4747		/*
4748		 * As a last resort, when we've reached
4749		 * the top and still haven't found the
4750		 * property, see if the desired property
4751		 * is attached to the options node.
4752		 *
4753		 * The options dip is attached right after boot.
4754		 */
4755		ASSERT(options_dip != NULL);
4756		/*
4757		 * Force the "don't pass" flag to *just* see
4758		 * what the options node has to offer.
4759		 */
4760		return (ddi_prop_search_common(dev, options_dip, prop_op,
4761		    mod_flags|DDI_PROP_DONTPASS, name, valuep,
4762		    (uint_t *)lengthp));
4763	}
4764
4765	/*
4766	 * Otherwise, continue search with parent's s/w defined properties...
4767	 * NOTE: Using `dip' in following call increments the level.
4768	 */
4769
4770	return (ddi_prop_search_common(dev, dip, prop_op, mod_flags,
4771	    name, valuep, (uint_t *)lengthp));
4772}
4773
4774/*
4775 * External property functions used by other parts of the kernel...
4776 */
4777
4778/*
4779 * e_ddi_getlongprop: See comments for ddi_get_longprop.
4780 */
4781
4782int
4783e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags,
4784    caddr_t valuep, int *lengthp)
4785{
4786	_NOTE(ARGUNUSED(type))
4787	dev_info_t *devi;
4788	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC;
4789	int error;
4790
4791	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4792		return (DDI_PROP_NOT_FOUND);
4793
4794	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4795	ddi_release_devi(devi);
4796	return (error);
4797}
4798
4799/*
4800 * e_ddi_getlongprop_buf:	See comments for ddi_getlongprop_buf.
4801 */
4802
4803int
4804e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags,
4805    caddr_t valuep, int *lengthp)
4806{
4807	_NOTE(ARGUNUSED(type))
4808	dev_info_t *devi;
4809	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4810	int error;
4811
4812	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4813		return (DDI_PROP_NOT_FOUND);
4814
4815	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4816	ddi_release_devi(devi);
4817	return (error);
4818}
4819
4820/*
4821 * e_ddi_getprop:	See comments for ddi_getprop.
4822 */
4823int
4824e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue)
4825{
4826	_NOTE(ARGUNUSED(type))
4827	dev_info_t *devi;
4828	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4829	int	propvalue = defvalue;
4830	int	proplength = sizeof (int);
4831	int	error;
4832
4833	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4834		return (defvalue);
4835
4836	error = cdev_prop_op(dev, devi, prop_op,
4837	    flags, name, (caddr_t)&propvalue, &proplength);
4838	ddi_release_devi(devi);
4839
4840	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4841		propvalue = 1;
4842
4843	return (propvalue);
4844}
4845
4846/*
4847 * e_ddi_getprop_int64:
4848 *
4849 * This is a typed interfaces, but predates typed properties. With the
4850 * introduction of typed properties the framework tries to ensure
4851 * consistent use of typed interfaces. This is why TYPE_INT64 is not
4852 * part of TYPE_ANY.  E_ddi_getprop_int64 is a special case where a
4853 * typed interface invokes legacy (non-typed) interfaces:
4854 * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)).  In this case the
4855 * fact that TYPE_INT64 is not part of TYPE_ANY matters.  To support
4856 * this type of lookup as a single operation we invoke the legacy
4857 * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4858 * framework ddi_prop_op(9F) implementation is expected to check for
4859 * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4860 * (currently TYPE_INT64).
4861 */
4862int64_t
4863e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name,
4864    int flags, int64_t defvalue)
4865{
4866	_NOTE(ARGUNUSED(type))
4867	dev_info_t	*devi;
4868	ddi_prop_op_t	prop_op = PROP_LEN_AND_VAL_BUF;
4869	int64_t		propvalue = defvalue;
4870	int		proplength = sizeof (propvalue);
4871	int		error;
4872
4873	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4874		return (defvalue);
4875
4876	error = cdev_prop_op(dev, devi, prop_op, flags |
4877	    DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength);
4878	ddi_release_devi(devi);
4879
4880	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4881		propvalue = 1;
4882
4883	return (propvalue);
4884}
4885
4886/*
4887 * e_ddi_getproplen:	See comments for ddi_getproplen.
4888 */
4889int
4890e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp)
4891{
4892	_NOTE(ARGUNUSED(type))
4893	dev_info_t *devi;
4894	ddi_prop_op_t prop_op = PROP_LEN;
4895	int error;
4896
4897	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4898		return (DDI_PROP_NOT_FOUND);
4899
4900	error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp);
4901	ddi_release_devi(devi);
4902	return (error);
4903}
4904
4905/*
4906 * Routines to get at elements of the dev_info structure
4907 */
4908
4909/*
4910 * ddi_binding_name: Return the driver binding name of the devinfo node
4911 *		This is the name the OS used to bind the node to a driver.
4912 */
4913char *
4914ddi_binding_name(dev_info_t *dip)
4915{
4916	return (DEVI(dip)->devi_binding_name);
4917}
4918
4919/*
4920 * ddi_driver_major: Return the major number of the driver that
4921 *		the supplied devinfo is bound to (-1 if none)
4922 */
4923major_t
4924ddi_driver_major(dev_info_t *devi)
4925{
4926	return (DEVI(devi)->devi_major);
4927}
4928
4929/*
4930 * ddi_driver_name: Return the normalized driver name. this is the
4931 *		actual driver name
4932 */
4933const char *
4934ddi_driver_name(dev_info_t *devi)
4935{
4936	major_t major;
4937
4938	if ((major = ddi_driver_major(devi)) != DDI_MAJOR_T_NONE)
4939		return (ddi_major_to_name(major));
4940
4941	return (ddi_node_name(devi));
4942}
4943
4944/*
4945 * i_ddi_set_binding_name:	Set binding name.
4946 *
4947 *	Set the binding name to the given name.
4948 *	This routine is for use by the ddi implementation, not by drivers.
4949 */
4950void
4951i_ddi_set_binding_name(dev_info_t *dip, char *name)
4952{
4953	DEVI(dip)->devi_binding_name = name;
4954
4955}
4956
4957/*
4958 * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4959 * the implementation has used to bind the node to a driver.
4960 */
4961char *
4962ddi_get_name(dev_info_t *dip)
4963{
4964	return (DEVI(dip)->devi_binding_name);
4965}
4966
4967/*
4968 * ddi_node_name: Return the name property of the devinfo node
4969 *		This may differ from ddi_binding_name if the node name
4970 *		does not define a binding to a driver (i.e. generic names).
4971 */
4972char *
4973ddi_node_name(dev_info_t *dip)
4974{
4975	return (DEVI(dip)->devi_node_name);
4976}
4977
4978
4979/*
4980 * ddi_get_nodeid:	Get nodeid stored in dev_info structure.
4981 */
4982int
4983ddi_get_nodeid(dev_info_t *dip)
4984{
4985	return (DEVI(dip)->devi_nodeid);
4986}
4987
4988int
4989ddi_get_instance(dev_info_t *dip)
4990{
4991	return (DEVI(dip)->devi_instance);
4992}
4993
4994struct dev_ops *
4995ddi_get_driver(dev_info_t *dip)
4996{
4997	return (DEVI(dip)->devi_ops);
4998}
4999
5000void
5001ddi_set_driver(dev_info_t *dip, struct dev_ops *devo)
5002{
5003	DEVI(dip)->devi_ops = devo;
5004}
5005
5006/*
5007 * ddi_set_driver_private/ddi_get_driver_private:
5008 * Get/set device driver private data in devinfo.
5009 */
5010void
5011ddi_set_driver_private(dev_info_t *dip, void *data)
5012{
5013	DEVI(dip)->devi_driver_data = data;
5014}
5015
5016void *
5017ddi_get_driver_private(dev_info_t *dip)
5018{
5019	return (DEVI(dip)->devi_driver_data);
5020}
5021
5022/*
5023 * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
5024 */
5025
5026dev_info_t *
5027ddi_get_parent(dev_info_t *dip)
5028{
5029	return ((dev_info_t *)DEVI(dip)->devi_parent);
5030}
5031
5032dev_info_t *
5033ddi_get_child(dev_info_t *dip)
5034{
5035	return ((dev_info_t *)DEVI(dip)->devi_child);
5036}
5037
5038dev_info_t *
5039ddi_get_next_sibling(dev_info_t *dip)
5040{
5041	return ((dev_info_t *)DEVI(dip)->devi_sibling);
5042}
5043
5044dev_info_t *
5045ddi_get_next(dev_info_t *dip)
5046{
5047	return ((dev_info_t *)DEVI(dip)->devi_next);
5048}
5049
5050void
5051ddi_set_next(dev_info_t *dip, dev_info_t *nextdip)
5052{
5053	DEVI(dip)->devi_next = DEVI(nextdip);
5054}
5055
5056/*
5057 * ddi_root_node:		Return root node of devinfo tree
5058 */
5059
5060dev_info_t *
5061ddi_root_node(void)
5062{
5063	extern dev_info_t *top_devinfo;
5064
5065	return (top_devinfo);
5066}
5067
5068/*
5069 * Miscellaneous functions:
5070 */
5071
5072/*
5073 * Implementation specific hooks
5074 */
5075
5076void
5077ddi_report_dev(dev_info_t *d)
5078{
5079	char *b;
5080
5081	(void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0);
5082
5083	/*
5084	 * If this devinfo node has cb_ops, it's implicitly accessible from
5085	 * userland, so we print its full name together with the instance
5086	 * number 'abbreviation' that the driver may use internally.
5087	 */
5088	if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 &&
5089	    (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) {
5090		cmn_err(CE_CONT, "?%s%d is %s\n",
5091		    ddi_driver_name(d), ddi_get_instance(d),
5092		    ddi_pathname(d, b));
5093		kmem_free(b, MAXPATHLEN);
5094	}
5095}
5096
5097/*
5098 * ddi_ctlops() is described in the assembler not to buy a new register
5099 * window when it's called and can reduce cost in climbing the device tree
5100 * without using the tail call optimization.
5101 */
5102int
5103ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result)
5104{
5105	int ret;
5106
5107	ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE,
5108	    (void *)&rnumber, (void *)result);
5109
5110	return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
5111}
5112
5113int
5114ddi_dev_nregs(dev_info_t *dev, int *result)
5115{
5116	return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result));
5117}
5118
5119int
5120ddi_dev_is_sid(dev_info_t *d)
5121{
5122	return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0));
5123}
5124
5125int
5126ddi_slaveonly(dev_info_t *d)
5127{
5128	return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0));
5129}
5130
5131int
5132ddi_dev_affinity(dev_info_t *a, dev_info_t *b)
5133{
5134	return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0));
5135}
5136
5137int
5138ddi_streams_driver(dev_info_t *dip)
5139{
5140	if (i_ddi_devi_attached(dip) &&
5141	    (DEVI(dip)->devi_ops->devo_cb_ops != NULL) &&
5142	    (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL))
5143		return (DDI_SUCCESS);
5144	return (DDI_FAILURE);
5145}
5146
5147/*
5148 * callback free list
5149 */
5150
5151static int ncallbacks;
5152static int nc_low = 170;
5153static int nc_med = 512;
5154static int nc_high = 2048;
5155static struct ddi_callback *callbackq;
5156static struct ddi_callback *callbackqfree;
5157
5158/*
5159 * set/run callback lists
5160 */
5161struct	cbstats	{
5162	kstat_named_t	cb_asked;
5163	kstat_named_t	cb_new;
5164	kstat_named_t	cb_run;
5165	kstat_named_t	cb_delete;
5166	kstat_named_t	cb_maxreq;
5167	kstat_named_t	cb_maxlist;
5168	kstat_named_t	cb_alloc;
5169	kstat_named_t	cb_runouts;
5170	kstat_named_t	cb_L2;
5171	kstat_named_t	cb_grow;
5172} cbstats = {
5173	{"asked",	KSTAT_DATA_UINT32},
5174	{"new",		KSTAT_DATA_UINT32},
5175	{"run",		KSTAT_DATA_UINT32},
5176	{"delete",	KSTAT_DATA_UINT32},
5177	{"maxreq",	KSTAT_DATA_UINT32},
5178	{"maxlist",	KSTAT_DATA_UINT32},
5179	{"alloc",	KSTAT_DATA_UINT32},
5180	{"runouts",	KSTAT_DATA_UINT32},
5181	{"L2",		KSTAT_DATA_UINT32},
5182	{"grow",	KSTAT_DATA_UINT32},
5183};
5184
5185#define	nc_asked	cb_asked.value.ui32
5186#define	nc_new		cb_new.value.ui32
5187#define	nc_run		cb_run.value.ui32
5188#define	nc_delete	cb_delete.value.ui32
5189#define	nc_maxreq	cb_maxreq.value.ui32
5190#define	nc_maxlist	cb_maxlist.value.ui32
5191#define	nc_alloc	cb_alloc.value.ui32
5192#define	nc_runouts	cb_runouts.value.ui32
5193#define	nc_L2		cb_L2.value.ui32
5194#define	nc_grow		cb_grow.value.ui32
5195
5196static kmutex_t ddi_callback_mutex;
5197
5198/*
5199 * callbacks are handled using a L1/L2 cache. The L1 cache
5200 * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
5201 * we can't get callbacks from the L1 cache [because pageout is doing
5202 * I/O at the time freemem is 0], we allocate callbacks out of the
5203 * L2 cache. The L2 cache is static and depends on the memory size.
5204 * [We might also count the number of devices at probe time and
5205 * allocate one structure per device and adjust for deferred attach]
5206 */
5207void
5208impl_ddi_callback_init(void)
5209{
5210	int	i;
5211	uint_t	physmegs;
5212	kstat_t	*ksp;
5213
5214	physmegs = physmem >> (20 - PAGESHIFT);
5215	if (physmegs < 48) {
5216		ncallbacks = nc_low;
5217	} else if (physmegs < 128) {
5218		ncallbacks = nc_med;
5219	} else {
5220		ncallbacks = nc_high;
5221	}
5222
5223	/*
5224	 * init free list
5225	 */
5226	callbackq = kmem_zalloc(
5227	    ncallbacks * sizeof (struct ddi_callback), KM_SLEEP);
5228	for (i = 0; i < ncallbacks-1; i++)
5229		callbackq[i].c_nfree = &callbackq[i+1];
5230	callbackqfree = callbackq;
5231
5232	/* init kstats */
5233	if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED,
5234	    sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) {
5235		ksp->ks_data = (void *) &cbstats;
5236		kstat_install(ksp);
5237	}
5238
5239}
5240
5241static void
5242callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid,
5243	int count)
5244{
5245	struct ddi_callback *list, *marker, *new;
5246	size_t size = sizeof (struct ddi_callback);
5247
5248	list = marker = (struct ddi_callback *)*listid;
5249	while (list != NULL) {
5250		if (list->c_call == funcp && list->c_arg == arg) {
5251			list->c_count += count;
5252			return;
5253		}
5254		marker = list;
5255		list = list->c_nlist;
5256	}
5257	new = kmem_alloc(size, KM_NOSLEEP);
5258	if (new == NULL) {
5259		new = callbackqfree;
5260		if (new == NULL) {
5261			new = kmem_alloc_tryhard(sizeof (struct ddi_callback),
5262			    &size, KM_NOSLEEP | KM_PANIC);
5263			cbstats.nc_grow++;
5264		} else {
5265			callbackqfree = new->c_nfree;
5266			cbstats.nc_L2++;
5267		}
5268	}
5269	if (marker != NULL) {
5270		marker->c_nlist = new;
5271	} else {
5272		*listid = (uintptr_t)new;
5273	}
5274	new->c_size = size;
5275	new->c_nlist = NULL;
5276	new->c_call = funcp;
5277	new->c_arg = arg;
5278	new->c_count = count;
5279	cbstats.nc_new++;
5280	cbstats.nc_alloc++;
5281	if (cbstats.nc_alloc > cbstats.nc_maxlist)
5282		cbstats.nc_maxlist = cbstats.nc_alloc;
5283}
5284
5285void
5286ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid)
5287{
5288	mutex_enter(&ddi_callback_mutex);
5289	cbstats.nc_asked++;
5290	if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq)
5291		cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run);
5292	(void) callback_insert(funcp, arg, listid, 1);
5293	mutex_exit(&ddi_callback_mutex);
5294}
5295
5296static void
5297real_callback_run(void *Queue)
5298{
5299	int (*funcp)(caddr_t);
5300	caddr_t arg;
5301	int count, rval;
5302	uintptr_t *listid;
5303	struct ddi_callback *list, *marker;
5304	int check_pending = 1;
5305	int pending = 0;
5306
5307	do {
5308		mutex_enter(&ddi_callback_mutex);
5309		listid = Queue;
5310		list = (struct ddi_callback *)*listid;
5311		if (list == NULL) {
5312			mutex_exit(&ddi_callback_mutex);
5313			return;
5314		}
5315		if (check_pending) {
5316			marker = list;
5317			while (marker != NULL) {
5318				pending += marker->c_count;
5319				marker = marker->c_nlist;
5320			}
5321			check_pending = 0;
5322		}
5323		ASSERT(pending > 0);
5324		ASSERT(list->c_count > 0);
5325		funcp = list->c_call;
5326		arg = list->c_arg;
5327		count = list->c_count;
5328		*(uintptr_t *)Queue = (uintptr_t)list->c_nlist;
5329		if (list >= &callbackq[0] &&
5330		    list <= &callbackq[ncallbacks-1]) {
5331			list->c_nfree = callbackqfree;
5332			callbackqfree = list;
5333		} else
5334			kmem_free(list, list->c_size);
5335
5336		cbstats.nc_delete++;
5337		cbstats.nc_alloc--;
5338		mutex_exit(&ddi_callback_mutex);
5339
5340		do {
5341			if ((rval = (*funcp)(arg)) == 0) {
5342				pending -= count;
5343				mutex_enter(&ddi_callback_mutex);
5344				(void) callback_insert(funcp, arg, listid,
5345				    count);
5346				cbstats.nc_runouts++;
5347			} else {
5348				pending--;
5349				mutex_enter(&ddi_callback_mutex);
5350				cbstats.nc_run++;
5351			}
5352			mutex_exit(&ddi_callback_mutex);
5353		} while (rval != 0 && (--count > 0));
5354	} while (pending > 0);
5355}
5356
5357void
5358ddi_run_callback(uintptr_t *listid)
5359{
5360	softcall(real_callback_run, listid);
5361}
5362
5363/*
5364 * ddi_periodic_t
5365 * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval,
5366 *     int level)
5367 *
5368 * INTERFACE LEVEL
5369 *      Solaris DDI specific (Solaris DDI)
5370 *
5371 * PARAMETERS
5372 *      func: the callback function
5373 *
5374 *            The callback function will be invoked. The function is invoked
5375 *            in kernel context if the argument level passed is the zero.
5376 *            Otherwise it's invoked in interrupt context at the specified
5377 *            level.
5378 *
5379 *       arg: the argument passed to the callback function
5380 *
5381 *  interval: interval time
5382 *
5383 *    level : callback interrupt level
5384 *
5385 *            If the value is the zero, the callback function is invoked
5386 *            in kernel context. If the value is more than the zero, but
5387 *            less than or equal to ten, the callback function is invoked in
5388 *            interrupt context at the specified interrupt level, which may
5389 *            be used for real time applications.
5390 *
5391 *            This value must be in range of 0-10, which can be a numeric
5392 *            number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10).
5393 *
5394 * DESCRIPTION
5395 *      ddi_periodic_add(9F) schedules the specified function to be
5396 *      periodically invoked in the interval time.
5397 *
5398 *      As well as timeout(9F), the exact time interval over which the function
5399 *      takes effect cannot be guaranteed, but the value given is a close
5400 *      approximation.
5401 *
5402 *      Drivers waiting on behalf of processes with real-time constraints must
5403 *      pass non-zero value with the level argument to ddi_periodic_add(9F).
5404 *
5405 * RETURN VALUES
5406 *      ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t),
5407 *      which must be used for ddi_periodic_delete(9F) to specify the request.
5408 *
5409 * CONTEXT
5410 *      ddi_periodic_add(9F) can be called in user or kernel context, but
5411 *      it cannot be called in interrupt context, which is different from
5412 *      timeout(9F).
5413 */
5414ddi_periodic_t
5415ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, int level)
5416{
5417	/*
5418	 * Sanity check of the argument level.
5419	 */
5420	if (level < DDI_IPL_0 || level > DDI_IPL_10)
5421		cmn_err(CE_PANIC,
5422		    "ddi_periodic_add: invalid interrupt level (%d).", level);
5423
5424	/*
5425	 * Sanity check of the context. ddi_periodic_add() cannot be
5426	 * called in either interrupt context or high interrupt context.
5427	 */
5428	if (servicing_interrupt())
5429		cmn_err(CE_PANIC,
5430		    "ddi_periodic_add: called in (high) interrupt context.");
5431
5432	return ((ddi_periodic_t)i_timeout(func, arg, interval, level));
5433}
5434
5435/*
5436 * void
5437 * ddi_periodic_delete(ddi_periodic_t req)
5438 *
5439 * INTERFACE LEVEL
5440 *     Solaris DDI specific (Solaris DDI)
5441 *
5442 * PARAMETERS
5443 *     req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned
5444 *     previously.
5445 *
5446 * DESCRIPTION
5447 *     ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request
5448 *     previously requested.
5449 *
5450 *     ddi_periodic_delete(9F) will not return until the pending request
5451 *     is canceled or executed.
5452 *
5453 *     As well as untimeout(9F), calling ddi_periodic_delete(9F) for a
5454 *     timeout which is either running on another CPU, or has already
5455 *     completed causes no problems. However, unlike untimeout(9F), there is
5456 *     no restrictions on the lock which might be held across the call to
5457 *     ddi_periodic_delete(9F).
5458 *
5459 *     Drivers should be structured with the understanding that the arrival of
5460 *     both an interrupt and a timeout for that interrupt can occasionally
5461 *     occur, in either order.
5462 *
5463 * CONTEXT
5464 *     ddi_periodic_delete(9F) can be called in user or kernel context, but
5465 *     it cannot be called in interrupt context, which is different from
5466 *     untimeout(9F).
5467 */
5468void
5469ddi_periodic_delete(ddi_periodic_t req)
5470{
5471	/*
5472	 * Sanity check of the context. ddi_periodic_delete() cannot be
5473	 * called in either interrupt context or high interrupt context.
5474	 */
5475	if (servicing_interrupt())
5476		cmn_err(CE_PANIC,
5477		    "ddi_periodic_delete: called in (high) interrupt context.");
5478
5479	i_untimeout((timeout_t)req);
5480}
5481
5482dev_info_t *
5483nodevinfo(dev_t dev, int otyp)
5484{
5485	_NOTE(ARGUNUSED(dev, otyp))
5486	return ((dev_info_t *)0);
5487}
5488
5489/*
5490 * A driver should support its own getinfo(9E) entry point. This function
5491 * is provided as a convenience for ON drivers that don't expect their
5492 * getinfo(9E) entry point to be called. A driver that uses this must not
5493 * call ddi_create_minor_node.
5494 */
5495int
5496ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5497{
5498	_NOTE(ARGUNUSED(dip, infocmd, arg, result))
5499	return (DDI_FAILURE);
5500}
5501
5502/*
5503 * A driver should support its own getinfo(9E) entry point. This function
5504 * is provided as a convenience for ON drivers that where the minor number
5505 * is the instance. Drivers that do not have 1:1 mapping must implement
5506 * their own getinfo(9E) function.
5507 */
5508int
5509ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd,
5510    void *arg, void **result)
5511{
5512	_NOTE(ARGUNUSED(dip))
5513	int	instance;
5514
5515	if (infocmd != DDI_INFO_DEVT2INSTANCE)
5516		return (DDI_FAILURE);
5517
5518	instance = getminor((dev_t)(uintptr_t)arg);
5519	*result = (void *)(uintptr_t)instance;
5520	return (DDI_SUCCESS);
5521}
5522
5523int
5524ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd)
5525{
5526	_NOTE(ARGUNUSED(devi, cmd))
5527	return (DDI_FAILURE);
5528}
5529
5530int
5531ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip,
5532    struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
5533{
5534	_NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep))
5535	return (DDI_DMA_NOMAPPING);
5536}
5537
5538int
5539ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
5540    int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
5541{
5542	_NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep))
5543	return (DDI_DMA_BADATTR);
5544}
5545
5546int
5547ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
5548    ddi_dma_handle_t handle)
5549{
5550	_NOTE(ARGUNUSED(dip, rdip, handle))
5551	return (DDI_FAILURE);
5552}
5553
5554int
5555ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
5556    ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
5557    ddi_dma_cookie_t *cp, uint_t *ccountp)
5558{
5559	_NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp))
5560	return (DDI_DMA_NOMAPPING);
5561}
5562
5563int
5564ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
5565    ddi_dma_handle_t handle)
5566{
5567	_NOTE(ARGUNUSED(dip, rdip, handle))
5568	return (DDI_FAILURE);
5569}
5570
5571int
5572ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip,
5573    ddi_dma_handle_t handle, off_t off, size_t len,
5574    uint_t cache_flags)
5575{
5576	_NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags))
5577	return (DDI_FAILURE);
5578}
5579
5580int
5581ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip,
5582    ddi_dma_handle_t handle, uint_t win, off_t *offp,
5583    size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
5584{
5585	_NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp))
5586	return (DDI_FAILURE);
5587}
5588
5589int
5590ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
5591    ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
5592    off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
5593{
5594	_NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags))
5595	return (DDI_FAILURE);
5596}
5597
5598void
5599ddivoid(void)
5600{}
5601
5602int
5603nochpoll(dev_t dev, short events, int anyyet, short *reventsp,
5604    struct pollhead **pollhdrp)
5605{
5606	_NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp))
5607	return (ENXIO);
5608}
5609
5610cred_t *
5611ddi_get_cred(void)
5612{
5613	return (CRED());
5614}
5615
5616clock_t
5617ddi_get_lbolt(void)
5618{
5619	return (lbolt);
5620}
5621
5622time_t
5623ddi_get_time(void)
5624{
5625	time_t	now;
5626
5627	if ((now = gethrestime_sec()) == 0) {
5628		timestruc_t ts;
5629		mutex_enter(&tod_lock);
5630		ts = tod_get();
5631		mutex_exit(&tod_lock);
5632		return (ts.tv_sec);
5633	} else {
5634		return (now);
5635	}
5636}
5637
5638pid_t
5639ddi_get_pid(void)
5640{
5641	return (ttoproc(curthread)->p_pid);
5642}
5643
5644kt_did_t
5645ddi_get_kt_did(void)
5646{
5647	return (curthread->t_did);
5648}
5649
5650/*
5651 * This function returns B_TRUE if the caller can reasonably expect that a call
5652 * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5653 * by user-level signal.  If it returns B_FALSE, then the caller should use
5654 * other means to make certain that the wait will not hang "forever."
5655 *
5656 * It does not check the signal mask, nor for reception of any particular
5657 * signal.
5658 *
5659 * Currently, a thread can receive a signal if it's not a kernel thread and it
5660 * is not in the middle of exit(2) tear-down.  Threads that are in that
5661 * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5662 * cv_timedwait, and qwait_sig to qwait.
5663 */
5664boolean_t
5665ddi_can_receive_sig(void)
5666{
5667	proc_t *pp;
5668
5669	if (curthread->t_proc_flag & TP_LWPEXIT)
5670		return (B_FALSE);
5671	if ((pp = ttoproc(curthread)) == NULL)
5672		return (B_FALSE);
5673	return (pp->p_as != &kas);
5674}
5675
5676/*
5677 * Swap bytes in 16-bit [half-]words
5678 */
5679void
5680swab(void *src, void *dst, size_t nbytes)
5681{
5682	uchar_t *pf = (uchar_t *)src;
5683	uchar_t *pt = (uchar_t *)dst;
5684	uchar_t tmp;
5685	int nshorts;
5686
5687	nshorts = nbytes >> 1;
5688
5689	while (--nshorts >= 0) {
5690		tmp = *pf++;
5691		*pt++ = *pf++;
5692		*pt++ = tmp;
5693	}
5694}
5695
5696static void
5697ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp)
5698{
5699	int			circ;
5700	struct ddi_minor_data	*dp;
5701
5702	ndi_devi_enter(ddip, &circ);
5703	if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) {
5704		DEVI(ddip)->devi_minor = dmdp;
5705	} else {
5706		while (dp->next != (struct ddi_minor_data *)NULL)
5707			dp = dp->next;
5708		dp->next = dmdp;
5709	}
5710	ndi_devi_exit(ddip, circ);
5711}
5712
5713/*
5714 * Part of the obsolete SunCluster DDI Hooks.
5715 * Keep for binary compatibility
5716 */
5717minor_t
5718ddi_getiminor(dev_t dev)
5719{
5720	return (getminor(dev));
5721}
5722
5723static int
5724i_log_devfs_minor_create(dev_info_t *dip, char *minor_name)
5725{
5726	int se_flag;
5727	int kmem_flag;
5728	int se_err;
5729	char *pathname, *class_name;
5730	sysevent_t *ev = NULL;
5731	sysevent_id_t eid;
5732	sysevent_value_t se_val;
5733	sysevent_attr_list_t *ev_attr_list = NULL;
5734
5735	/* determine interrupt context */
5736	se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
5737	kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
5738
5739	i_ddi_di_cache_invalidate(kmem_flag);
5740
5741#ifdef DEBUG
5742	if ((se_flag == SE_NOSLEEP) && sunddi_debug) {
5743		cmn_err(CE_CONT, "ddi_create_minor_node: called from "
5744		    "interrupt level by driver %s",
5745		    ddi_driver_name(dip));
5746	}
5747#endif /* DEBUG */
5748
5749	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag);
5750	if (ev == NULL) {
5751		goto fail;
5752	}
5753
5754	pathname = kmem_alloc(MAXPATHLEN, kmem_flag);
5755	if (pathname == NULL) {
5756		sysevent_free(ev);
5757		goto fail;
5758	}
5759
5760	(void) ddi_pathname(dip, pathname);
5761	ASSERT(strlen(pathname));
5762	se_val.value_type = SE_DATA_TYPE_STRING;
5763	se_val.value.sv_string = pathname;
5764	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5765	    &se_val, se_flag) != 0) {
5766		kmem_free(pathname, MAXPATHLEN);
5767		sysevent_free(ev);
5768		goto fail;
5769	}
5770	kmem_free(pathname, MAXPATHLEN);
5771
5772	/* add the device class attribute */
5773	if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5774		se_val.value_type = SE_DATA_TYPE_STRING;
5775		se_val.value.sv_string = class_name;
5776		if (sysevent_add_attr(&ev_attr_list,
5777		    DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5778			sysevent_free_attr(ev_attr_list);
5779			goto fail;
5780		}
5781	}
5782
5783	/*
5784	 * allow for NULL minor names
5785	 */
5786	if (minor_name != NULL) {
5787		se_val.value.sv_string = minor_name;
5788		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5789		    &se_val, se_flag) != 0) {
5790			sysevent_free_attr(ev_attr_list);
5791			sysevent_free(ev);
5792			goto fail;
5793		}
5794	}
5795
5796	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5797		sysevent_free_attr(ev_attr_list);
5798		sysevent_free(ev);
5799		goto fail;
5800	}
5801
5802	if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) {
5803		if (se_err == SE_NO_TRANSPORT) {
5804			cmn_err(CE_WARN, "/devices or /dev may not be current "
5805			    "for driver %s (%s). Run devfsadm -i %s",
5806			    ddi_driver_name(dip), "syseventd not responding",
5807			    ddi_driver_name(dip));
5808		} else {
5809			sysevent_free(ev);
5810			goto fail;
5811		}
5812	}
5813
5814	sysevent_free(ev);
5815	return (DDI_SUCCESS);
5816fail:
5817	cmn_err(CE_WARN, "/devices or /dev may not be current "
5818	    "for driver %s. Run devfsadm -i %s",
5819	    ddi_driver_name(dip), ddi_driver_name(dip));
5820	return (DDI_SUCCESS);
5821}
5822
5823/*
5824 * failing to remove a minor node is not of interest
5825 * therefore we do not generate an error message
5826 */
5827static int
5828i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name)
5829{
5830	char *pathname, *class_name;
5831	sysevent_t *ev;
5832	sysevent_id_t eid;
5833	sysevent_value_t se_val;
5834	sysevent_attr_list_t *ev_attr_list = NULL;
5835
5836	/*
5837	 * only log ddi_remove_minor_node() calls outside the scope
5838	 * of attach/detach reconfigurations and when the dip is
5839	 * still initialized.
5840	 */
5841	if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) ||
5842	    (i_ddi_node_state(dip) < DS_INITIALIZED)) {
5843		return (DDI_SUCCESS);
5844	}
5845
5846	i_ddi_di_cache_invalidate(KM_SLEEP);
5847
5848	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP);
5849	if (ev == NULL) {
5850		return (DDI_SUCCESS);
5851	}
5852
5853	pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5854	if (pathname == NULL) {
5855		sysevent_free(ev);
5856		return (DDI_SUCCESS);
5857	}
5858
5859	(void) ddi_pathname(dip, pathname);
5860	ASSERT(strlen(pathname));
5861	se_val.value_type = SE_DATA_TYPE_STRING;
5862	se_val.value.sv_string = pathname;
5863	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5864	    &se_val, SE_SLEEP) != 0) {
5865		kmem_free(pathname, MAXPATHLEN);
5866		sysevent_free(ev);
5867		return (DDI_SUCCESS);
5868	}
5869
5870	kmem_free(pathname, MAXPATHLEN);
5871
5872	/*
5873	 * allow for NULL minor names
5874	 */
5875	if (minor_name != NULL) {
5876		se_val.value.sv_string = minor_name;
5877		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5878		    &se_val, SE_SLEEP) != 0) {
5879			sysevent_free_attr(ev_attr_list);
5880			goto fail;
5881		}
5882	}
5883
5884	if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5885		/* add the device class, driver name and instance attributes */
5886
5887		se_val.value_type = SE_DATA_TYPE_STRING;
5888		se_val.value.sv_string = class_name;
5889		if (sysevent_add_attr(&ev_attr_list,
5890		    DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5891			sysevent_free_attr(ev_attr_list);
5892			goto fail;
5893		}
5894
5895		se_val.value_type = SE_DATA_TYPE_STRING;
5896		se_val.value.sv_string = (char *)ddi_driver_name(dip);
5897		if (sysevent_add_attr(&ev_attr_list,
5898		    DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) {
5899			sysevent_free_attr(ev_attr_list);
5900			goto fail;
5901		}
5902
5903		se_val.value_type = SE_DATA_TYPE_INT32;
5904		se_val.value.sv_int32 = ddi_get_instance(dip);
5905		if (sysevent_add_attr(&ev_attr_list,
5906		    DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) {
5907			sysevent_free_attr(ev_attr_list);
5908			goto fail;
5909		}
5910
5911	}
5912
5913	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5914		sysevent_free_attr(ev_attr_list);
5915	} else {
5916		(void) log_sysevent(ev, SE_SLEEP, &eid);
5917	}
5918fail:
5919	sysevent_free(ev);
5920	return (DDI_SUCCESS);
5921}
5922
5923/*
5924 * Derive the device class of the node.
5925 * Device class names aren't defined yet. Until this is done we use
5926 * devfs event subclass names as device class names.
5927 */
5928static int
5929derive_devi_class(dev_info_t *dip, char *node_type, int flag)
5930{
5931	int rv = DDI_SUCCESS;
5932
5933	if (i_ddi_devi_class(dip) == NULL) {
5934		if (strncmp(node_type, DDI_NT_BLOCK,
5935		    sizeof (DDI_NT_BLOCK) - 1) == 0 &&
5936		    (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' ||
5937		    node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') &&
5938		    strcmp(node_type, DDI_NT_FD) != 0) {
5939
5940			rv = i_ddi_set_devi_class(dip, ESC_DISK, flag);
5941
5942		} else if (strncmp(node_type, DDI_NT_NET,
5943		    sizeof (DDI_NT_NET) - 1) == 0 &&
5944		    (node_type[sizeof (DDI_NT_NET) - 1] == '\0' ||
5945		    node_type[sizeof (DDI_NT_NET) - 1] == ':')) {
5946
5947			rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag);
5948
5949		} else if (strncmp(node_type, DDI_NT_PRINTER,
5950		    sizeof (DDI_NT_PRINTER) - 1) == 0 &&
5951		    (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' ||
5952		    node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) {
5953
5954			rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag);
5955
5956		} else if (strncmp(node_type, DDI_PSEUDO,
5957		    sizeof (DDI_PSEUDO) -1) == 0 &&
5958		    (strncmp(ESC_LOFI, ddi_node_name(dip),
5959		    sizeof (ESC_LOFI) -1) == 0)) {
5960			rv = i_ddi_set_devi_class(dip, ESC_LOFI, flag);
5961		}
5962	}
5963
5964	return (rv);
5965}
5966
5967/*
5968 * Check compliance with PSARC 2003/375:
5969 *
5970 * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5971 * exceed IFNAMSIZ (16) characters in length.
5972 */
5973static boolean_t
5974verify_name(char *name)
5975{
5976	size_t	len = strlen(name);
5977	char	*cp;
5978
5979	if (len == 0 || len > IFNAMSIZ)
5980		return (B_FALSE);
5981
5982	for (cp = name; *cp != '\0'; cp++) {
5983		if (!isalnum(*cp) && *cp != '_')
5984			return (B_FALSE);
5985	}
5986
5987	return (B_TRUE);
5988}
5989
5990/*
5991 * ddi_create_minor_common:	Create a  ddi_minor_data structure and
5992 *				attach it to the given devinfo node.
5993 */
5994
5995int
5996ddi_create_minor_common(dev_info_t *dip, char *name, int spec_type,
5997    minor_t minor_num, char *node_type, int flag, ddi_minor_type mtype,
5998    const char *read_priv, const char *write_priv, mode_t priv_mode)
5999{
6000	struct ddi_minor_data *dmdp;
6001	major_t major;
6002
6003	if (spec_type != S_IFCHR && spec_type != S_IFBLK)
6004		return (DDI_FAILURE);
6005
6006	if (name == NULL)
6007		return (DDI_FAILURE);
6008
6009	/*
6010	 * Log a message if the minor number the driver is creating
6011	 * is not expressible on the on-disk filesystem (currently
6012	 * this is limited to 18 bits both by UFS). The device can
6013	 * be opened via devfs, but not by device special files created
6014	 * via mknod().
6015	 */
6016	if (minor_num > L_MAXMIN32) {
6017		cmn_err(CE_WARN,
6018		    "%s%d:%s minor 0x%x too big for 32-bit applications",
6019		    ddi_driver_name(dip), ddi_get_instance(dip),
6020		    name, minor_num);
6021		return (DDI_FAILURE);
6022	}
6023
6024	/* dip must be bound and attached */
6025	major = ddi_driver_major(dip);
6026	ASSERT(major != DDI_MAJOR_T_NONE);
6027
6028	/*
6029	 * Default node_type to DDI_PSEUDO and issue notice in debug mode
6030	 */
6031	if (node_type == NULL) {
6032		node_type = DDI_PSEUDO;
6033		NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d "
6034		    " minor node %s; default to DDI_PSEUDO",
6035		    ddi_driver_name(dip), ddi_get_instance(dip), name));
6036	}
6037
6038	/*
6039	 * If the driver is a network driver, ensure that the name falls within
6040	 * the interface naming constraints specified by PSARC/2003/375.
6041	 */
6042	if (strcmp(node_type, DDI_NT_NET) == 0) {
6043		if (!verify_name(name))
6044			return (DDI_FAILURE);
6045
6046		if (mtype == DDM_MINOR) {
6047			struct devnames *dnp = &devnamesp[major];
6048
6049			/* Mark driver as a network driver */
6050			LOCK_DEV_OPS(&dnp->dn_lock);
6051			dnp->dn_flags |= DN_NETWORK_DRIVER;
6052			UNLOCK_DEV_OPS(&dnp->dn_lock);
6053		}
6054	}
6055
6056	if (mtype == DDM_MINOR) {
6057		if (derive_devi_class(dip,  node_type, KM_NOSLEEP) !=
6058		    DDI_SUCCESS)
6059			return (DDI_FAILURE);
6060	}
6061
6062	/*
6063	 * Take care of minor number information for the node.
6064	 */
6065
6066	if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data),
6067	    KM_NOSLEEP)) == NULL) {
6068		return (DDI_FAILURE);
6069	}
6070	if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) {
6071		kmem_free(dmdp, sizeof (struct ddi_minor_data));
6072		return (DDI_FAILURE);
6073	}
6074	dmdp->dip = dip;
6075	dmdp->ddm_dev = makedevice(major, minor_num);
6076	dmdp->ddm_spec_type = spec_type;
6077	dmdp->ddm_node_type = node_type;
6078	dmdp->type = mtype;
6079	if (flag & CLONE_DEV) {
6080		dmdp->type = DDM_ALIAS;
6081		dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major);
6082	}
6083	if (flag & PRIVONLY_DEV) {
6084		dmdp->ddm_flags |= DM_NO_FSPERM;
6085	}
6086	if (read_priv || write_priv) {
6087		dmdp->ddm_node_priv =
6088		    devpolicy_priv_by_name(read_priv, write_priv);
6089	}
6090	dmdp->ddm_priv_mode = priv_mode;
6091
6092	ddi_append_minor_node(dip, dmdp);
6093
6094	/*
6095	 * only log ddi_create_minor_node() calls which occur
6096	 * outside the scope of attach(9e)/detach(9e) reconfigurations
6097	 */
6098	if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) &&
6099	    mtype != DDM_INTERNAL_PATH) {
6100		(void) i_log_devfs_minor_create(dip, name);
6101	}
6102
6103	/*
6104	 * Check if any dacf rules match the creation of this minor node
6105	 */
6106	dacfc_match_create_minor(name, node_type, dip, dmdp, flag);
6107	return (DDI_SUCCESS);
6108}
6109
6110int
6111ddi_create_minor_node(dev_info_t *dip, char *name, int spec_type,
6112    minor_t minor_num, char *node_type, int flag)
6113{
6114	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6115	    node_type, flag, DDM_MINOR, NULL, NULL, 0));
6116}
6117
6118int
6119ddi_create_priv_minor_node(dev_info_t *dip, char *name, int spec_type,
6120    minor_t minor_num, char *node_type, int flag,
6121    const char *rdpriv, const char *wrpriv, mode_t priv_mode)
6122{
6123	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6124	    node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode));
6125}
6126
6127int
6128ddi_create_default_minor_node(dev_info_t *dip, char *name, int spec_type,
6129    minor_t minor_num, char *node_type, int flag)
6130{
6131	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6132	    node_type, flag, DDM_DEFAULT, NULL, NULL, 0));
6133}
6134
6135/*
6136 * Internal (non-ddi) routine for drivers to export names known
6137 * to the kernel (especially ddi_pathname_to_dev_t and friends)
6138 * but not exported externally to /dev
6139 */
6140int
6141ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type,
6142    minor_t minor_num)
6143{
6144	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6145	    "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0));
6146}
6147
6148void
6149ddi_remove_minor_node(dev_info_t *dip, char *name)
6150{
6151	int			circ;
6152	struct ddi_minor_data	*dmdp, *dmdp1;
6153	struct ddi_minor_data	**dmdp_prev;
6154
6155	ndi_devi_enter(dip, &circ);
6156	dmdp_prev = &DEVI(dip)->devi_minor;
6157	dmdp = DEVI(dip)->devi_minor;
6158	while (dmdp != NULL) {
6159		dmdp1 = dmdp->next;
6160		if ((name == NULL || (dmdp->ddm_name != NULL &&
6161		    strcmp(name, dmdp->ddm_name) == 0))) {
6162			if (dmdp->ddm_name != NULL) {
6163				if (dmdp->type != DDM_INTERNAL_PATH)
6164					(void) i_log_devfs_minor_remove(dip,
6165					    dmdp->ddm_name);
6166				kmem_free(dmdp->ddm_name,
6167				    strlen(dmdp->ddm_name) + 1);
6168			}
6169			/*
6170			 * Release device privilege, if any.
6171			 * Release dacf client data associated with this minor
6172			 * node by storing NULL.
6173			 */
6174			if (dmdp->ddm_node_priv)
6175				dpfree(dmdp->ddm_node_priv);
6176			dacf_store_info((dacf_infohdl_t)dmdp, NULL);
6177			kmem_free(dmdp, sizeof (struct ddi_minor_data));
6178			*dmdp_prev = dmdp1;
6179			/*
6180			 * OK, we found it, so get out now -- if we drive on,
6181			 * we will strcmp against garbage.  See 1139209.
6182			 */
6183			if (name != NULL)
6184				break;
6185		} else {
6186			dmdp_prev = &dmdp->next;
6187		}
6188		dmdp = dmdp1;
6189	}
6190	ndi_devi_exit(dip, circ);
6191}
6192
6193
6194int
6195ddi_in_panic()
6196{
6197	return (panicstr != NULL);
6198}
6199
6200
6201/*
6202 * Find first bit set in a mask (returned counting from 1 up)
6203 */
6204
6205int
6206ddi_ffs(long mask)
6207{
6208	return (ffs(mask));
6209}
6210
6211/*
6212 * Find last bit set. Take mask and clear
6213 * all but the most significant bit, and
6214 * then let ffs do the rest of the work.
6215 *
6216 * Algorithm courtesy of Steve Chessin.
6217 */
6218
6219int
6220ddi_fls(long mask)
6221{
6222	while (mask) {
6223		long nx;
6224
6225		if ((nx = (mask & (mask - 1))) == 0)
6226			break;
6227		mask = nx;
6228	}
6229	return (ffs(mask));
6230}
6231
6232/*
6233 * The next five routines comprise generic storage management utilities
6234 * for driver soft state structures (in "the old days," this was done
6235 * with a statically sized array - big systems and dynamic loading
6236 * and unloading make heap allocation more attractive)
6237 */
6238
6239/*
6240 * Allocate a set of pointers to 'n_items' objects of size 'size'
6241 * bytes.  Each pointer is initialized to nil.
6242 *
6243 * The 'size' and 'n_items' values are stashed in the opaque
6244 * handle returned to the caller.
6245 *
6246 * This implementation interprets 'set of pointers' to mean 'array
6247 * of pointers' but note that nothing in the interface definition
6248 * precludes an implementation that uses, for example, a linked list.
6249 * However there should be a small efficiency gain from using an array
6250 * at lookup time.
6251 *
6252 * NOTE	As an optimization, we make our growable array allocations in
6253 *	powers of two (bytes), since that's how much kmem_alloc (currently)
6254 *	gives us anyway.  It should save us some free/realloc's ..
6255 *
6256 *	As a further optimization, we make the growable array start out
6257 *	with MIN_N_ITEMS in it.
6258 */
6259
6260#define	MIN_N_ITEMS	8	/* 8 void *'s == 32 bytes */
6261
6262int
6263ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
6264{
6265	struct i_ddi_soft_state *ss;
6266
6267	if (state_p == NULL || *state_p != NULL || size == 0)
6268		return (EINVAL);
6269
6270	ss = kmem_zalloc(sizeof (*ss), KM_SLEEP);
6271	mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL);
6272	ss->size = size;
6273
6274	if (n_items < MIN_N_ITEMS)
6275		ss->n_items = MIN_N_ITEMS;
6276	else {
6277		int bitlog;
6278
6279		if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items))
6280			bitlog--;
6281		ss->n_items = 1 << bitlog;
6282	}
6283
6284	ASSERT(ss->n_items >= n_items);
6285
6286	ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP);
6287
6288	*state_p = ss;
6289
6290	return (0);
6291}
6292
6293
6294/*
6295 * Allocate a state structure of size 'size' to be associated
6296 * with item 'item'.
6297 *
6298 * In this implementation, the array is extended to
6299 * allow the requested offset, if needed.
6300 */
6301int
6302ddi_soft_state_zalloc(void *state, int item)
6303{
6304	struct i_ddi_soft_state *ss;
6305	void **array;
6306	void *new_element;
6307
6308	if ((ss = state) == NULL || item < 0)
6309		return (DDI_FAILURE);
6310
6311	mutex_enter(&ss->lock);
6312	if (ss->size == 0) {
6313		mutex_exit(&ss->lock);
6314		cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s",
6315		    mod_containing_pc(caller()));
6316		return (DDI_FAILURE);
6317	}
6318
6319	array = ss->array;	/* NULL if ss->n_items == 0 */
6320	ASSERT(ss->n_items != 0 && array != NULL);
6321
6322	/*
6323	 * refuse to tread on an existing element
6324	 */
6325	if (item < ss->n_items && array[item] != NULL) {
6326		mutex_exit(&ss->lock);
6327		return (DDI_FAILURE);
6328	}
6329
6330	/*
6331	 * Allocate a new element to plug in
6332	 */
6333	new_element = kmem_zalloc(ss->size, KM_SLEEP);
6334
6335	/*
6336	 * Check if the array is big enough, if not, grow it.
6337	 */
6338	if (item >= ss->n_items) {
6339		void	**new_array;
6340		size_t	new_n_items;
6341		struct i_ddi_soft_state *dirty;
6342
6343		/*
6344		 * Allocate a new array of the right length, copy
6345		 * all the old pointers to the new array, then
6346		 * if it exists at all, put the old array on the
6347		 * dirty list.
6348		 *
6349		 * Note that we can't kmem_free() the old array.
6350		 *
6351		 * Why -- well the 'get' operation is 'mutex-free', so we
6352		 * can't easily catch a suspended thread that is just about
6353		 * to dereference the array we just grew out of.  So we
6354		 * cons up a header and put it on a list of 'dirty'
6355		 * pointer arrays.  (Dirty in the sense that there may
6356		 * be suspended threads somewhere that are in the middle
6357		 * of referencing them).  Fortunately, we -can- garbage
6358		 * collect it all at ddi_soft_state_fini time.
6359		 */
6360		new_n_items = ss->n_items;
6361		while (new_n_items < (1 + item))
6362			new_n_items <<= 1;	/* double array size .. */
6363
6364		ASSERT(new_n_items >= (1 + item));	/* sanity check! */
6365
6366		new_array = kmem_zalloc(new_n_items * sizeof (void *),
6367		    KM_SLEEP);
6368		/*
6369		 * Copy the pointers into the new array
6370		 */
6371		bcopy(array, new_array, ss->n_items * sizeof (void *));
6372
6373		/*
6374		 * Save the old array on the dirty list
6375		 */
6376		dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP);
6377		dirty->array = ss->array;
6378		dirty->n_items = ss->n_items;
6379		dirty->next = ss->next;
6380		ss->next = dirty;
6381
6382		ss->array = (array = new_array);
6383		ss->n_items = new_n_items;
6384	}
6385
6386	ASSERT(array != NULL && item < ss->n_items && array[item] == NULL);
6387
6388	array[item] = new_element;
6389
6390	mutex_exit(&ss->lock);
6391	return (DDI_SUCCESS);
6392}
6393
6394
6395/*
6396 * Fetch a pointer to the allocated soft state structure.
6397 *
6398 * This is designed to be cheap.
6399 *
6400 * There's an argument that there should be more checking for
6401 * nil pointers and out of bounds on the array.. but we do a lot
6402 * of that in the alloc/free routines.
6403 *
6404 * An array has the convenience that we don't need to lock read-access
6405 * to it c.f. a linked list.  However our "expanding array" strategy
6406 * means that we should hold a readers lock on the i_ddi_soft_state
6407 * structure.
6408 *
6409 * However, from a performance viewpoint, we need to do it without
6410 * any locks at all -- this also makes it a leaf routine.  The algorithm
6411 * is 'lock-free' because we only discard the pointer arrays at
6412 * ddi_soft_state_fini() time.
6413 */
6414void *
6415ddi_get_soft_state(void *state, int item)
6416{
6417	struct i_ddi_soft_state *ss = state;
6418
6419	ASSERT(ss != NULL && item >= 0);
6420
6421	if (item < ss->n_items && ss->array != NULL)
6422		return (ss->array[item]);
6423	return (NULL);
6424}
6425
6426/*
6427 * Free the state structure corresponding to 'item.'   Freeing an
6428 * element that has either gone or was never allocated is not
6429 * considered an error.  Note that we free the state structure, but
6430 * we don't shrink our pointer array, or discard 'dirty' arrays,
6431 * since even a few pointers don't really waste too much memory.
6432 *
6433 * Passing an item number that is out of bounds, or a null pointer will
6434 * provoke an error message.
6435 */
6436void
6437ddi_soft_state_free(void *state, int item)
6438{
6439	struct i_ddi_soft_state *ss;
6440	void **array;
6441	void *element;
6442	static char msg[] = "ddi_soft_state_free:";
6443
6444	if ((ss = state) == NULL) {
6445		cmn_err(CE_WARN, "%s null handle: %s",
6446		    msg, mod_containing_pc(caller()));
6447		return;
6448	}
6449
6450	element = NULL;
6451
6452	mutex_enter(&ss->lock);
6453
6454	if ((array = ss->array) == NULL || ss->size == 0) {
6455		cmn_err(CE_WARN, "%s bad handle: %s",
6456		    msg, mod_containing_pc(caller()));
6457	} else if (item < 0 || item >= ss->n_items) {
6458		cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s",
6459		    msg, item, ss->n_items - 1, mod_containing_pc(caller()));
6460	} else if (array[item] != NULL) {
6461		element = array[item];
6462		array[item] = NULL;
6463	}
6464
6465	mutex_exit(&ss->lock);
6466
6467	if (element)
6468		kmem_free(element, ss->size);
6469}
6470
6471
6472/*
6473 * Free the entire set of pointers, and any
6474 * soft state structures contained therein.
6475 *
6476 * Note that we don't grab the ss->lock mutex, even though
6477 * we're inspecting the various fields of the data structure.
6478 *
6479 * There is an implicit assumption that this routine will
6480 * never run concurrently with any of the above on this
6481 * particular state structure i.e. by the time the driver
6482 * calls this routine, there should be no other threads
6483 * running in the driver.
6484 */
6485void
6486ddi_soft_state_fini(void **state_p)
6487{
6488	struct i_ddi_soft_state *ss, *dirty;
6489	int item;
6490	static char msg[] = "ddi_soft_state_fini:";
6491
6492	if (state_p == NULL || (ss = *state_p) == NULL) {
6493		cmn_err(CE_WARN, "%s null handle: %s",
6494		    msg, mod_containing_pc(caller()));
6495		return;
6496	}
6497
6498	if (ss->size == 0) {
6499		cmn_err(CE_WARN, "%s bad handle: %s",
6500		    msg, mod_containing_pc(caller()));
6501		return;
6502	}
6503
6504	if (ss->n_items > 0) {
6505		for (item = 0; item < ss->n_items; item++)
6506			ddi_soft_state_free(ss, item);
6507		kmem_free(ss->array, ss->n_items * sizeof (void *));
6508	}
6509
6510	/*
6511	 * Now delete any dirty arrays from previous 'grow' operations
6512	 */
6513	for (dirty = ss->next; dirty; dirty = ss->next) {
6514		ss->next = dirty->next;
6515		kmem_free(dirty->array, dirty->n_items * sizeof (void *));
6516		kmem_free(dirty, sizeof (*dirty));
6517	}
6518
6519	mutex_destroy(&ss->lock);
6520	kmem_free(ss, sizeof (*ss));
6521
6522	*state_p = NULL;
6523}
6524
6525/*
6526 * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
6527 * Storage is double buffered to prevent updates during devi_addr use -
6528 * double buffering is adaquate for reliable ddi_deviname() consumption.
6529 * The double buffer is not freed until dev_info structure destruction
6530 * (by i_ddi_free_node).
6531 */
6532void
6533ddi_set_name_addr(dev_info_t *dip, char *name)
6534{
6535	char	*buf = DEVI(dip)->devi_addr_buf;
6536	char	*newaddr;
6537
6538	if (buf == NULL) {
6539		buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP);
6540		DEVI(dip)->devi_addr_buf = buf;
6541	}
6542
6543	if (name) {
6544		ASSERT(strlen(name) < MAXNAMELEN);
6545		newaddr = (DEVI(dip)->devi_addr == buf) ?
6546		    (buf + MAXNAMELEN) : buf;
6547		(void) strlcpy(newaddr, name, MAXNAMELEN);
6548	} else
6549		newaddr = NULL;
6550
6551	DEVI(dip)->devi_addr = newaddr;
6552}
6553
6554char *
6555ddi_get_name_addr(dev_info_t *dip)
6556{
6557	return (DEVI(dip)->devi_addr);
6558}
6559
6560void
6561ddi_set_parent_data(dev_info_t *dip, void *pd)
6562{
6563	DEVI(dip)->devi_parent_data = pd;
6564}
6565
6566void *
6567ddi_get_parent_data(dev_info_t *dip)
6568{
6569	return (DEVI(dip)->devi_parent_data);
6570}
6571
6572/*
6573 * ddi_name_to_major: Returns the major number of a module given its name.
6574 */
6575major_t
6576ddi_name_to_major(char *name)
6577{
6578	return (mod_name_to_major(name));
6579}
6580
6581/*
6582 * ddi_major_to_name: Returns the module name bound to a major number.
6583 */
6584char *
6585ddi_major_to_name(major_t major)
6586{
6587	return (mod_major_to_name(major));
6588}
6589
6590/*
6591 * Return the name of the devinfo node pointed at by 'dip' in the buffer
6592 * pointed at by 'name.'  A devinfo node is named as a result of calling
6593 * ddi_initchild().
6594 *
6595 * Note: the driver must be held before calling this function!
6596 */
6597char *
6598ddi_deviname(dev_info_t *dip, char *name)
6599{
6600	char *addrname;
6601	char none = '\0';
6602
6603	if (dip == ddi_root_node()) {
6604		*name = '\0';
6605		return (name);
6606	}
6607
6608	if (i_ddi_node_state(dip) < DS_BOUND) {
6609		addrname = &none;
6610	} else {
6611		/*
6612		 * Use ddi_get_name_addr() without checking state so we get
6613		 * a unit-address if we are called after ddi_set_name_addr()
6614		 * by nexus DDI_CTL_INITCHILD code, but before completing
6615		 * node promotion to DS_INITIALIZED.  We currently have
6616		 * two situations where we are called in this state:
6617		 *   o  For framework processing of a path-oriented alias.
6618		 *   o  If a SCSA nexus driver calls ddi_devid_register()
6619		 *	from it's tran_tgt_init(9E) implementation.
6620		 */
6621		addrname = ddi_get_name_addr(dip);
6622		if (addrname == NULL)
6623			addrname = &none;
6624	}
6625
6626	if (*addrname == '\0') {
6627		(void) sprintf(name, "/%s", ddi_node_name(dip));
6628	} else {
6629		(void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname);
6630	}
6631
6632	return (name);
6633}
6634
6635/*
6636 * Spits out the name of device node, typically name@addr, for a given node,
6637 * using the driver name, not the nodename.
6638 *
6639 * Used by match_parent. Not to be used elsewhere.
6640 */
6641char *
6642i_ddi_parname(dev_info_t *dip, char *name)
6643{
6644	char *addrname;
6645
6646	if (dip == ddi_root_node()) {
6647		*name = '\0';
6648		return (name);
6649	}
6650
6651	ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED);
6652
6653	if (*(addrname = ddi_get_name_addr(dip)) == '\0')
6654		(void) sprintf(name, "%s", ddi_binding_name(dip));
6655	else
6656		(void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname);
6657	return (name);
6658}
6659
6660static char *
6661pathname_work(dev_info_t *dip, char *path)
6662{
6663	char *bp;
6664
6665	if (dip == ddi_root_node()) {
6666		*path = '\0';
6667		return (path);
6668	}
6669	(void) pathname_work(ddi_get_parent(dip), path);
6670	bp = path + strlen(path);
6671	(void) ddi_deviname(dip, bp);
6672	return (path);
6673}
6674
6675char *
6676ddi_pathname(dev_info_t *dip, char *path)
6677{
6678	return (pathname_work(dip, path));
6679}
6680
6681char *
6682ddi_pathname_minor(struct ddi_minor_data *dmdp, char *path)
6683{
6684	if (dmdp->dip == NULL)
6685		*path = '\0';
6686	else {
6687		(void) ddi_pathname(dmdp->dip, path);
6688		if (dmdp->ddm_name) {
6689			(void) strcat(path, ":");
6690			(void) strcat(path, dmdp->ddm_name);
6691		}
6692	}
6693	return (path);
6694}
6695
6696static char *
6697pathname_work_obp(dev_info_t *dip, char *path)
6698{
6699	char *bp;
6700	char *obp_path;
6701
6702	/*
6703	 * look up the "obp-path" property, return the path if it exists
6704	 */
6705	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
6706	    "obp-path", &obp_path) == DDI_PROP_SUCCESS) {
6707		(void) strcpy(path, obp_path);
6708		ddi_prop_free(obp_path);
6709		return (path);
6710	}
6711
6712	/*
6713	 * stop at root, no obp path
6714	 */
6715	if (dip == ddi_root_node()) {
6716		return (NULL);
6717	}
6718
6719	obp_path = pathname_work_obp(ddi_get_parent(dip), path);
6720	if (obp_path == NULL)
6721		return (NULL);
6722
6723	/*
6724	 * append our component to parent's obp path
6725	 */
6726	bp = path + strlen(path);
6727	if (*(bp - 1) != '/')
6728		(void) strcat(bp++, "/");
6729	(void) ddi_deviname(dip, bp);
6730	return (path);
6731}
6732
6733/*
6734 * return the 'obp-path' based path for the given node, or NULL if the node
6735 * does not have a different obp path. NOTE: Unlike ddi_pathname, this
6736 * function can't be called from interrupt context (since we need to
6737 * lookup a string property).
6738 */
6739char *
6740ddi_pathname_obp(dev_info_t *dip, char *path)
6741{
6742	ASSERT(!servicing_interrupt());
6743	if (dip == NULL || path == NULL)
6744		return (NULL);
6745
6746	/* split work into a separate function to aid debugging */
6747	return (pathname_work_obp(dip, path));
6748}
6749
6750int
6751ddi_pathname_obp_set(dev_info_t *dip, char *component)
6752{
6753	dev_info_t *pdip;
6754	char obp_path[MAXPATHLEN];
6755
6756	bzero(obp_path, sizeof (obp_path));
6757
6758	if (dip == NULL)
6759		return (DDI_FAILURE);
6760	pdip = ddi_get_parent(dip);
6761
6762	if (ddi_pathname_obp(pdip, obp_path) == NULL) {
6763		(void) ddi_pathname(pdip, obp_path);
6764	}
6765
6766	if (component) {
6767		(void) strncat(obp_path, "/", sizeof (obp_path));
6768		(void) strncat(obp_path, component, sizeof (obp_path));
6769	}
6770	return (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "obp-path",
6771	    obp_path));
6772}
6773
6774/*
6775 * Given a dev_t, return the pathname of the corresponding device in the
6776 * buffer pointed at by "path."  The buffer is assumed to be large enough
6777 * to hold the pathname of the device (MAXPATHLEN).
6778 *
6779 * The pathname of a device is the pathname of the devinfo node to which
6780 * the device "belongs," concatenated with the character ':' and the name
6781 * of the minor node corresponding to the dev_t.  If spec_type is 0 then
6782 * just the pathname of the devinfo node is returned without driving attach
6783 * of that node.  For a non-zero spec_type, an attach is performed and a
6784 * search of the minor list occurs.
6785 *
6786 * It is possible that the path associated with the dev_t is not
6787 * currently available in the devinfo tree.  In order to have a
6788 * dev_t, a device must have been discovered before, which means
6789 * that the path is always in the instance tree.  The one exception
6790 * to this is if the dev_t is associated with a pseudo driver, in
6791 * which case the device must exist on the pseudo branch of the
6792 * devinfo tree as a result of parsing .conf files.
6793 */
6794int
6795ddi_dev_pathname(dev_t devt, int spec_type, char *path)
6796{
6797	int		circ;
6798	major_t		major = getmajor(devt);
6799	int		instance;
6800	dev_info_t	*dip;
6801	char		*minorname;
6802	char		*drvname;
6803
6804	if (major >= devcnt)
6805		goto fail;
6806	if (major == clone_major) {
6807		/* clone has no minor nodes, manufacture the path here */
6808		if ((drvname = ddi_major_to_name(getminor(devt))) == NULL)
6809			goto fail;
6810
6811		(void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname);
6812		return (DDI_SUCCESS);
6813	}
6814
6815	/* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
6816	if ((instance = dev_to_instance(devt)) == -1)
6817		goto fail;
6818
6819	/* reconstruct the path given the major/instance */
6820	if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS)
6821		goto fail;
6822
6823	/* if spec_type given we must drive attach and search minor nodes */
6824	if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) {
6825		/* attach the path so we can search minors */
6826		if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL)
6827			goto fail;
6828
6829		/* Add minorname to path. */
6830		ndi_devi_enter(dip, &circ);
6831		minorname = i_ddi_devtspectype_to_minorname(dip,
6832		    devt, spec_type);
6833		if (minorname) {
6834			(void) strcat(path, ":");
6835			(void) strcat(path, minorname);
6836		}
6837		ndi_devi_exit(dip, circ);
6838		ddi_release_devi(dip);
6839		if (minorname == NULL)
6840			goto fail;
6841	}
6842	ASSERT(strlen(path) < MAXPATHLEN);
6843	return (DDI_SUCCESS);
6844
6845fail:	*path = 0;
6846	return (DDI_FAILURE);
6847}
6848
6849/*
6850 * Given a major number and an instance, return the path.
6851 * This interface does NOT drive attach.
6852 */
6853int
6854e_ddi_majorinstance_to_path(major_t major, int instance, char *path)
6855{
6856	struct devnames *dnp;
6857	dev_info_t	*dip;
6858
6859	if ((major >= devcnt) || (instance == -1)) {
6860		*path = 0;
6861		return (DDI_FAILURE);
6862	}
6863
6864	/* look for the major/instance in the instance tree */
6865	if (e_ddi_instance_majorinstance_to_path(major, instance,
6866	    path) == DDI_SUCCESS) {
6867		ASSERT(strlen(path) < MAXPATHLEN);
6868		return (DDI_SUCCESS);
6869	}
6870
6871	/*
6872	 * Not in instance tree, find the instance on the per driver list and
6873	 * construct path to instance via ddi_pathname(). This is how paths
6874	 * down the 'pseudo' branch are constructed.
6875	 */
6876	dnp = &(devnamesp[major]);
6877	LOCK_DEV_OPS(&(dnp->dn_lock));
6878	for (dip = dnp->dn_head; dip;
6879	    dip = (dev_info_t *)DEVI(dip)->devi_next) {
6880		/* Skip if instance does not match. */
6881		if (DEVI(dip)->devi_instance != instance)
6882			continue;
6883
6884		/*
6885		 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND
6886		 * node demotion, so it is not an effective way of ensuring
6887		 * that the ddi_pathname result has a unit-address.  Instead,
6888		 * we reverify the node state after calling ddi_pathname().
6889		 */
6890		if (i_ddi_node_state(dip) >= DS_INITIALIZED) {
6891			(void) ddi_pathname(dip, path);
6892			if (i_ddi_node_state(dip) < DS_INITIALIZED)
6893				continue;
6894			UNLOCK_DEV_OPS(&(dnp->dn_lock));
6895			ASSERT(strlen(path) < MAXPATHLEN);
6896			return (DDI_SUCCESS);
6897		}
6898	}
6899	UNLOCK_DEV_OPS(&(dnp->dn_lock));
6900
6901	/* can't reconstruct the path */
6902	*path = 0;
6903	return (DDI_FAILURE);
6904}
6905
6906#define	GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
6907
6908/*
6909 * Given the dip for a network interface return the ppa for that interface.
6910 *
6911 * In all cases except GLD v0 drivers, the ppa == instance.
6912 * In the case of GLD v0 drivers, the ppa is equal to the attach order.
6913 * So for these drivers when the attach routine calls gld_register(),
6914 * the GLD framework creates an integer property called "gld_driver_ppa"
6915 * that can be queried here.
6916 *
6917 * The only time this function is used is when a system is booting over nfs.
6918 * In this case the system has to resolve the pathname of the boot device
6919 * to it's ppa.
6920 */
6921int
6922i_ddi_devi_get_ppa(dev_info_t *dip)
6923{
6924	return (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
6925	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
6926	    GLD_DRIVER_PPA, ddi_get_instance(dip)));
6927}
6928
6929/*
6930 * i_ddi_devi_set_ppa() should only be called from gld_register()
6931 * and only for GLD v0 drivers
6932 */
6933void
6934i_ddi_devi_set_ppa(dev_info_t *dip, int ppa)
6935{
6936	(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa);
6937}
6938
6939
6940/*
6941 * Private DDI Console bell functions.
6942 */
6943void
6944ddi_ring_console_bell(clock_t duration)
6945{
6946	if (ddi_console_bell_func != NULL)
6947		(*ddi_console_bell_func)(duration);
6948}
6949
6950void
6951ddi_set_console_bell(void (*bellfunc)(clock_t duration))
6952{
6953	ddi_console_bell_func = bellfunc;
6954}
6955
6956int
6957ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr,
6958	int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
6959{
6960	int (*funcp)() = ddi_dma_allochdl;
6961	ddi_dma_attr_t dma_attr;
6962	struct bus_ops *bop;
6963
6964	if (attr == (ddi_dma_attr_t *)0)
6965		return (DDI_DMA_BADATTR);
6966
6967	dma_attr = *attr;
6968
6969	bop = DEVI(dip)->devi_ops->devo_bus_ops;
6970	if (bop && bop->bus_dma_allochdl)
6971		funcp = bop->bus_dma_allochdl;
6972
6973	return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep));
6974}
6975
6976void
6977ddi_dma_free_handle(ddi_dma_handle_t *handlep)
6978{
6979	ddi_dma_handle_t h = *handlep;
6980	(void) ddi_dma_freehdl(HD, HD, h);
6981}
6982
6983static uintptr_t dma_mem_list_id = 0;
6984
6985
6986int
6987ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
6988	ddi_device_acc_attr_t *accattrp, uint_t flags,
6989	int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp,
6990	size_t *real_length, ddi_acc_handle_t *handlep)
6991{
6992	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6993	dev_info_t *dip = hp->dmai_rdip;
6994	ddi_acc_hdl_t *ap;
6995	ddi_dma_attr_t *attrp = &hp->dmai_attr;
6996	uint_t sleepflag, xfermodes;
6997	int (*fp)(caddr_t);
6998	int rval;
6999
7000	if (waitfp == DDI_DMA_SLEEP)
7001		fp = (int (*)())KM_SLEEP;
7002	else if (waitfp == DDI_DMA_DONTWAIT)
7003		fp = (int (*)())KM_NOSLEEP;
7004	else
7005		fp = waitfp;
7006	*handlep = impl_acc_hdl_alloc(fp, arg);
7007	if (*handlep == NULL)
7008		return (DDI_FAILURE);
7009
7010	/* check if the cache attributes are supported */
7011	if (i_ddi_check_cache_attr(flags) == B_FALSE)
7012		return (DDI_FAILURE);
7013
7014	/*
7015	 * Transfer the meaningful bits to xfermodes.
7016	 * Double-check if the 3rd party driver correctly sets the bits.
7017	 * If not, set DDI_DMA_STREAMING to keep compatibility.
7018	 */
7019	xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING);
7020	if (xfermodes == 0) {
7021		xfermodes = DDI_DMA_STREAMING;
7022	}
7023
7024	/*
7025	 * initialize the common elements of data access handle
7026	 */
7027	ap = impl_acc_hdl_get(*handlep);
7028	ap->ah_vers = VERS_ACCHDL;
7029	ap->ah_dip = dip;
7030	ap->ah_offset = 0;
7031	ap->ah_len = 0;
7032	ap->ah_xfermodes = flags;
7033	ap->ah_acc = *accattrp;
7034
7035	sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0);
7036	if (xfermodes == DDI_DMA_CONSISTENT) {
7037		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7038		    flags, accattrp, kaddrp, NULL, ap);
7039		*real_length = length;
7040	} else {
7041		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7042		    flags, accattrp, kaddrp, real_length, ap);
7043	}
7044	if (rval == DDI_SUCCESS) {
7045		ap->ah_len = (off_t)(*real_length);
7046		ap->ah_addr = *kaddrp;
7047	} else {
7048		impl_acc_hdl_free(*handlep);
7049		*handlep = (ddi_acc_handle_t)NULL;
7050		if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) {
7051			ddi_set_callback(waitfp, arg, &dma_mem_list_id);
7052		}
7053		rval = DDI_FAILURE;
7054	}
7055	return (rval);
7056}
7057
7058void
7059ddi_dma_mem_free(ddi_acc_handle_t *handlep)
7060{
7061	ddi_acc_hdl_t *ap;
7062
7063	ap = impl_acc_hdl_get(*handlep);
7064	ASSERT(ap);
7065
7066	i_ddi_mem_free((caddr_t)ap->ah_addr, ap);
7067
7068	/*
7069	 * free the handle
7070	 */
7071	impl_acc_hdl_free(*handlep);
7072	*handlep = (ddi_acc_handle_t)NULL;
7073
7074	if (dma_mem_list_id != 0) {
7075		ddi_run_callback(&dma_mem_list_id);
7076	}
7077}
7078
7079int
7080ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp,
7081	uint_t flags, int (*waitfp)(caddr_t), caddr_t arg,
7082	ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7083{
7084	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7085	dev_info_t *hdip, *dip;
7086	struct ddi_dma_req dmareq;
7087	int (*funcp)();
7088
7089	dmareq.dmar_flags = flags;
7090	dmareq.dmar_fp = waitfp;
7091	dmareq.dmar_arg = arg;
7092	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
7093
7094	if (bp->b_flags & B_PAGEIO) {
7095		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
7096		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
7097		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
7098		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
7099	} else {
7100		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
7101		if (bp->b_flags & B_SHADOW) {
7102			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
7103			    bp->b_shadow;
7104			dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
7105		} else {
7106			dmareq.dmar_object.dmao_type =
7107			    (bp->b_flags & (B_PHYS | B_REMAPPED)) ?
7108			    DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR;
7109			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7110		}
7111
7112		/*
7113		 * If the buffer has no proc pointer, or the proc
7114		 * struct has the kernel address space, or the buffer has
7115		 * been marked B_REMAPPED (meaning that it is now
7116		 * mapped into the kernel's address space), then
7117		 * the address space is kas (kernel address space).
7118		 */
7119		if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
7120		    (bp->b_flags & B_REMAPPED)) {
7121			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
7122		} else {
7123			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
7124			    bp->b_proc->p_as;
7125		}
7126	}
7127
7128	dip = hp->dmai_rdip;
7129	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7130	funcp = DEVI(dip)->devi_bus_dma_bindfunc;
7131	return ((*funcp)(hdip, dip, handle, &dmareq, cookiep, ccountp));
7132}
7133
7134int
7135ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as,
7136	caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t),
7137	caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7138{
7139	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7140	dev_info_t *hdip, *dip;
7141	struct ddi_dma_req dmareq;
7142	int (*funcp)();
7143
7144	if (len == (uint_t)0) {
7145		return (DDI_DMA_NOMAPPING);
7146	}
7147	dmareq.dmar_flags = flags;
7148	dmareq.dmar_fp = waitfp;
7149	dmareq.dmar_arg = arg;
7150	dmareq.dmar_object.dmao_size = len;
7151	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
7152	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
7153	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
7154	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7155
7156	dip = hp->dmai_rdip;
7157	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7158	funcp = DEVI(dip)->devi_bus_dma_bindfunc;
7159	return ((*funcp)(hdip, dip, handle, &dmareq, cookiep, ccountp));
7160}
7161
7162void
7163ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep)
7164{
7165	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7166	ddi_dma_cookie_t *cp;
7167
7168	cp = hp->dmai_cookie;
7169	ASSERT(cp);
7170
7171	cookiep->dmac_notused = cp->dmac_notused;
7172	cookiep->dmac_type = cp->dmac_type;
7173	cookiep->dmac_address = cp->dmac_address;
7174	cookiep->dmac_size = cp->dmac_size;
7175	hp->dmai_cookie++;
7176}
7177
7178int
7179ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp)
7180{
7181	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7182	if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
7183		return (DDI_FAILURE);
7184	} else {
7185		*nwinp = hp->dmai_nwin;
7186		return (DDI_SUCCESS);
7187	}
7188}
7189
7190int
7191ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp,
7192	size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7193{
7194	int (*funcp)() = ddi_dma_win;
7195	struct bus_ops *bop;
7196
7197	bop = DEVI(HD)->devi_ops->devo_bus_ops;
7198	if (bop && bop->bus_dma_win)
7199		funcp = bop->bus_dma_win;
7200
7201	return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp));
7202}
7203
7204int
7205ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes)
7206{
7207	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0,
7208	    &burstsizes, 0, 0));
7209}
7210
7211int
7212i_ddi_dma_fault_check(ddi_dma_impl_t *hp)
7213{
7214	return (hp->dmai_fault);
7215}
7216
7217int
7218ddi_check_dma_handle(ddi_dma_handle_t handle)
7219{
7220	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7221	int (*check)(ddi_dma_impl_t *);
7222
7223	if ((check = hp->dmai_fault_check) == NULL)
7224		check = i_ddi_dma_fault_check;
7225
7226	return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
7227}
7228
7229void
7230i_ddi_dma_set_fault(ddi_dma_handle_t handle)
7231{
7232	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7233	void (*notify)(ddi_dma_impl_t *);
7234
7235	if (!hp->dmai_fault) {
7236		hp->dmai_fault = 1;
7237		if ((notify = hp->dmai_fault_notify) != NULL)
7238			(*notify)(hp);
7239	}
7240}
7241
7242void
7243i_ddi_dma_clr_fault(ddi_dma_handle_t handle)
7244{
7245	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7246	void (*notify)(ddi_dma_impl_t *);
7247
7248	if (hp->dmai_fault) {
7249		hp->dmai_fault = 0;
7250		if ((notify = hp->dmai_fault_notify) != NULL)
7251			(*notify)(hp);
7252	}
7253}
7254
7255/*
7256 * register mapping routines.
7257 */
7258int
7259ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp,
7260	offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp,
7261	ddi_acc_handle_t *handle)
7262{
7263	ddi_map_req_t mr;
7264	ddi_acc_hdl_t *hp;
7265	int result;
7266
7267	/*
7268	 * Allocate and initialize the common elements of data access handle.
7269	 */
7270	*handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
7271	hp = impl_acc_hdl_get(*handle);
7272	hp->ah_vers = VERS_ACCHDL;
7273	hp->ah_dip = dip;
7274	hp->ah_rnumber = rnumber;
7275	hp->ah_offset = offset;
7276	hp->ah_len = len;
7277	hp->ah_acc = *accattrp;
7278
7279	/*
7280	 * Set up the mapping request and call to parent.
7281	 */
7282	mr.map_op = DDI_MO_MAP_LOCKED;
7283	mr.map_type = DDI_MT_RNUMBER;
7284	mr.map_obj.rnumber = rnumber;
7285	mr.map_prot = PROT_READ | PROT_WRITE;
7286	mr.map_flags = DDI_MF_KERNEL_MAPPING;
7287	mr.map_handlep = hp;
7288	mr.map_vers = DDI_MAP_VERSION;
7289	result = ddi_map(dip, &mr, offset, len, addrp);
7290
7291	/*
7292	 * check for end result
7293	 */
7294	if (result != DDI_SUCCESS) {
7295		impl_acc_hdl_free(*handle);
7296		*handle = (ddi_acc_handle_t)NULL;
7297	} else {
7298		hp->ah_addr = *addrp;
7299	}
7300
7301	return (result);
7302}
7303
7304void
7305ddi_regs_map_free(ddi_acc_handle_t *handlep)
7306{
7307	ddi_map_req_t mr;
7308	ddi_acc_hdl_t *hp;
7309
7310	hp = impl_acc_hdl_get(*handlep);
7311	ASSERT(hp);
7312
7313	mr.map_op = DDI_MO_UNMAP;
7314	mr.map_type = DDI_MT_RNUMBER;
7315	mr.map_obj.rnumber = hp->ah_rnumber;
7316	mr.map_prot = PROT_READ | PROT_WRITE;
7317	mr.map_flags = DDI_MF_KERNEL_MAPPING;
7318	mr.map_handlep = hp;
7319	mr.map_vers = DDI_MAP_VERSION;
7320
7321	/*
7322	 * Call my parent to unmap my regs.
7323	 */
7324	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
7325	    hp->ah_len, &hp->ah_addr);
7326	/*
7327	 * free the handle
7328	 */
7329	impl_acc_hdl_free(*handlep);
7330	*handlep = (ddi_acc_handle_t)NULL;
7331}
7332
7333int
7334ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount,
7335	ssize_t dev_advcnt, uint_t dev_datasz)
7336{
7337	uint8_t *b;
7338	uint16_t *w;
7339	uint32_t *l;
7340	uint64_t *ll;
7341
7342	/* check for total byte count is multiple of data transfer size */
7343	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7344		return (DDI_FAILURE);
7345
7346	switch (dev_datasz) {
7347	case DDI_DATA_SZ01_ACC:
7348		for (b = (uint8_t *)dev_addr;
7349		    bytecount != 0; bytecount -= 1, b += dev_advcnt)
7350			ddi_put8(handle, b, 0);
7351		break;
7352	case DDI_DATA_SZ02_ACC:
7353		for (w = (uint16_t *)dev_addr;
7354		    bytecount != 0; bytecount -= 2, w += dev_advcnt)
7355			ddi_put16(handle, w, 0);
7356		break;
7357	case DDI_DATA_SZ04_ACC:
7358		for (l = (uint32_t *)dev_addr;
7359		    bytecount != 0; bytecount -= 4, l += dev_advcnt)
7360			ddi_put32(handle, l, 0);
7361		break;
7362	case DDI_DATA_SZ08_ACC:
7363		for (ll = (uint64_t *)dev_addr;
7364		    bytecount != 0; bytecount -= 8, ll += dev_advcnt)
7365			ddi_put64(handle, ll, 0x0ll);
7366		break;
7367	default:
7368		return (DDI_FAILURE);
7369	}
7370	return (DDI_SUCCESS);
7371}
7372
7373int
7374ddi_device_copy(
7375	ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt,
7376	ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt,
7377	size_t bytecount, uint_t dev_datasz)
7378{
7379	uint8_t *b_src, *b_dst;
7380	uint16_t *w_src, *w_dst;
7381	uint32_t *l_src, *l_dst;
7382	uint64_t *ll_src, *ll_dst;
7383
7384	/* check for total byte count is multiple of data transfer size */
7385	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7386		return (DDI_FAILURE);
7387
7388	switch (dev_datasz) {
7389	case DDI_DATA_SZ01_ACC:
7390		b_src = (uint8_t *)src_addr;
7391		b_dst = (uint8_t *)dest_addr;
7392
7393		for (; bytecount != 0; bytecount -= 1) {
7394			ddi_put8(dest_handle, b_dst,
7395			    ddi_get8(src_handle, b_src));
7396			b_dst += dest_advcnt;
7397			b_src += src_advcnt;
7398		}
7399		break;
7400	case DDI_DATA_SZ02_ACC:
7401		w_src = (uint16_t *)src_addr;
7402		w_dst = (uint16_t *)dest_addr;
7403
7404		for (; bytecount != 0; bytecount -= 2) {
7405			ddi_put16(dest_handle, w_dst,
7406			    ddi_get16(src_handle, w_src));
7407			w_dst += dest_advcnt;
7408			w_src += src_advcnt;
7409		}
7410		break;
7411	case DDI_DATA_SZ04_ACC:
7412		l_src = (uint32_t *)src_addr;
7413		l_dst = (uint32_t *)dest_addr;
7414
7415		for (; bytecount != 0; bytecount -= 4) {
7416			ddi_put32(dest_handle, l_dst,
7417			    ddi_get32(src_handle, l_src));
7418			l_dst += dest_advcnt;
7419			l_src += src_advcnt;
7420		}
7421		break;
7422	case DDI_DATA_SZ08_ACC:
7423		ll_src = (uint64_t *)src_addr;
7424		ll_dst = (uint64_t *)dest_addr;
7425
7426		for (; bytecount != 0; bytecount -= 8) {
7427			ddi_put64(dest_handle, ll_dst,
7428			    ddi_get64(src_handle, ll_src));
7429			ll_dst += dest_advcnt;
7430			ll_src += src_advcnt;
7431		}
7432		break;
7433	default:
7434		return (DDI_FAILURE);
7435	}
7436	return (DDI_SUCCESS);
7437}
7438
7439#define	swap16(value)  \
7440	((((value) & 0xff) << 8) | ((value) >> 8))
7441
7442#define	swap32(value)	\
7443	(((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7444	(uint32_t)swap16((uint16_t)((value) >> 16)))
7445
7446#define	swap64(value)	\
7447	(((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7448	    << 32) | \
7449	(uint64_t)swap32((uint32_t)((value) >> 32)))
7450
7451uint16_t
7452ddi_swap16(uint16_t value)
7453{
7454	return (swap16(value));
7455}
7456
7457uint32_t
7458ddi_swap32(uint32_t value)
7459{
7460	return (swap32(value));
7461}
7462
7463uint64_t
7464ddi_swap64(uint64_t value)
7465{
7466	return (swap64(value));
7467}
7468
7469/*
7470 * Convert a binding name to a driver name.
7471 * A binding name is the name used to determine the driver for a
7472 * device - it may be either an alias for the driver or the name
7473 * of the driver itself.
7474 */
7475char *
7476i_binding_to_drv_name(char *bname)
7477{
7478	major_t major_no;
7479
7480	ASSERT(bname != NULL);
7481
7482	if ((major_no = ddi_name_to_major(bname)) == -1)
7483		return (NULL);
7484	return (ddi_major_to_name(major_no));
7485}
7486
7487/*
7488 * Search for minor name that has specified dev_t and spec_type.
7489 * If spec_type is zero then any dev_t match works.  Since we
7490 * are returning a pointer to the minor name string, we require the
7491 * caller to do the locking.
7492 */
7493char *
7494i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type)
7495{
7496	struct ddi_minor_data	*dmdp;
7497
7498	/*
7499	 * The did layered driver currently intentionally returns a
7500	 * devinfo ptr for an underlying sd instance based on a did
7501	 * dev_t. In this case it is not an error.
7502	 *
7503	 * The did layered driver is associated with Sun Cluster.
7504	 */
7505	ASSERT((ddi_driver_major(dip) == getmajor(dev)) ||
7506	    (strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0));
7507
7508	ASSERT(DEVI_BUSY_OWNED(dip));
7509	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7510		if (((dmdp->type == DDM_MINOR) ||
7511		    (dmdp->type == DDM_INTERNAL_PATH) ||
7512		    (dmdp->type == DDM_DEFAULT)) &&
7513		    (dmdp->ddm_dev == dev) &&
7514		    ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) ||
7515		    (dmdp->ddm_spec_type == spec_type)))
7516			return (dmdp->ddm_name);
7517	}
7518
7519	return (NULL);
7520}
7521
7522/*
7523 * Find the devt and spectype of the specified minor_name.
7524 * Return DDI_FAILURE if minor_name not found. Since we are
7525 * returning everything via arguments we can do the locking.
7526 */
7527int
7528i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name,
7529	dev_t *devtp, int *spectypep)
7530{
7531	int			circ;
7532	struct ddi_minor_data	*dmdp;
7533
7534	/* deal with clone minor nodes */
7535	if (dip == clone_dip) {
7536		major_t	major;
7537		/*
7538		 * Make sure minor_name is a STREAMS driver.
7539		 * We load the driver but don't attach to any instances.
7540		 */
7541
7542		major = ddi_name_to_major(minor_name);
7543		if (major == DDI_MAJOR_T_NONE)
7544			return (DDI_FAILURE);
7545
7546		if (ddi_hold_driver(major) == NULL)
7547			return (DDI_FAILURE);
7548
7549		if (STREAMSTAB(major) == NULL) {
7550			ddi_rele_driver(major);
7551			return (DDI_FAILURE);
7552		}
7553		ddi_rele_driver(major);
7554
7555		if (devtp)
7556			*devtp = makedevice(clone_major, (minor_t)major);
7557
7558		if (spectypep)
7559			*spectypep = S_IFCHR;
7560
7561		return (DDI_SUCCESS);
7562	}
7563
7564	ndi_devi_enter(dip, &circ);
7565	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7566		if (((dmdp->type != DDM_MINOR) &&
7567		    (dmdp->type != DDM_INTERNAL_PATH) &&
7568		    (dmdp->type != DDM_DEFAULT)) ||
7569		    strcmp(minor_name, dmdp->ddm_name))
7570			continue;
7571
7572		if (devtp)
7573			*devtp = dmdp->ddm_dev;
7574
7575		if (spectypep)
7576			*spectypep = dmdp->ddm_spec_type;
7577
7578		ndi_devi_exit(dip, circ);
7579		return (DDI_SUCCESS);
7580	}
7581	ndi_devi_exit(dip, circ);
7582
7583	return (DDI_FAILURE);
7584}
7585
7586extern char	hw_serial[];
7587static kmutex_t devid_gen_mutex;
7588static short	devid_gen_number;
7589
7590#ifdef DEBUG
7591
7592static int	devid_register_corrupt = 0;
7593static int	devid_register_corrupt_major = 0;
7594static int	devid_register_corrupt_hint = 0;
7595static int	devid_register_corrupt_hint_major = 0;
7596
7597static int devid_lyr_debug = 0;
7598
7599#define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)		\
7600	if (devid_lyr_debug)					\
7601		ddi_debug_devid_devts(msg, ndevs, devs)
7602
7603#else
7604
7605#define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7606
7607#endif /* DEBUG */
7608
7609
7610#ifdef	DEBUG
7611
7612static void
7613ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs)
7614{
7615	int i;
7616
7617	cmn_err(CE_CONT, "%s:\n", msg);
7618	for (i = 0; i < ndevs; i++) {
7619		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7620	}
7621}
7622
7623static void
7624ddi_debug_devid_paths(char *msg, int npaths, char **paths)
7625{
7626	int i;
7627
7628	cmn_err(CE_CONT, "%s:\n", msg);
7629	for (i = 0; i < npaths; i++) {
7630		cmn_err(CE_CONT, "    %s\n", paths[i]);
7631	}
7632}
7633
7634static void
7635ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs)
7636{
7637	int i;
7638
7639	cmn_err(CE_CONT, "dev_ts per path %s\n", path);
7640	for (i = 0; i < ndevs; i++) {
7641		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7642	}
7643}
7644
7645#endif	/* DEBUG */
7646
7647/*
7648 * Register device id into DDI framework.
7649 * Must be called when device is attached.
7650 */
7651static int
7652i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7653{
7654	impl_devid_t	*i_devid = (impl_devid_t *)devid;
7655	size_t		driver_len;
7656	const char	*driver_name;
7657	char		*devid_str;
7658	major_t		major;
7659
7660	if ((dip == NULL) ||
7661	    ((major = ddi_driver_major(dip)) == DDI_MAJOR_T_NONE))
7662		return (DDI_FAILURE);
7663
7664	/* verify that the devid is valid */
7665	if (ddi_devid_valid(devid) != DDI_SUCCESS)
7666		return (DDI_FAILURE);
7667
7668	/* Updating driver name hint in devid */
7669	driver_name = ddi_driver_name(dip);
7670	driver_len = strlen(driver_name);
7671	if (driver_len > DEVID_HINT_SIZE) {
7672		/* Pick up last four characters of driver name */
7673		driver_name += driver_len - DEVID_HINT_SIZE;
7674		driver_len = DEVID_HINT_SIZE;
7675	}
7676	bzero(i_devid->did_driver, DEVID_HINT_SIZE);
7677	bcopy(driver_name, i_devid->did_driver, driver_len);
7678
7679#ifdef DEBUG
7680	/* Corrupt the devid for testing. */
7681	if (devid_register_corrupt)
7682		i_devid->did_id[0] += devid_register_corrupt;
7683	if (devid_register_corrupt_major &&
7684	    (major == devid_register_corrupt_major))
7685		i_devid->did_id[0] += 1;
7686	if (devid_register_corrupt_hint)
7687		i_devid->did_driver[0] += devid_register_corrupt_hint;
7688	if (devid_register_corrupt_hint_major &&
7689	    (major == devid_register_corrupt_hint_major))
7690		i_devid->did_driver[0] += 1;
7691#endif /* DEBUG */
7692
7693	/* encode the devid as a string */
7694	if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL)
7695		return (DDI_FAILURE);
7696
7697	/* add string as a string property */
7698	if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
7699	    DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) {
7700		cmn_err(CE_WARN, "%s%d: devid property update failed",
7701		    ddi_driver_name(dip), ddi_get_instance(dip));
7702		ddi_devid_str_free(devid_str);
7703		return (DDI_FAILURE);
7704	}
7705
7706	/* keep pointer to devid string for interrupt context fma code */
7707	if (DEVI(dip)->devi_devid_str)
7708		ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7709	DEVI(dip)->devi_devid_str = devid_str;
7710	return (DDI_SUCCESS);
7711}
7712
7713int
7714ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7715{
7716	int rval;
7717
7718	rval = i_ddi_devid_register(dip, devid);
7719	if (rval == DDI_SUCCESS) {
7720		/*
7721		 * Register devid in devid-to-path cache
7722		 */
7723		if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) {
7724			mutex_enter(&DEVI(dip)->devi_lock);
7725			DEVI(dip)->devi_flags |= DEVI_REGISTERED_DEVID;
7726			mutex_exit(&DEVI(dip)->devi_lock);
7727		} else {
7728			cmn_err(CE_WARN, "%s%d: failed to cache devid",
7729			    ddi_driver_name(dip), ddi_get_instance(dip));
7730		}
7731	} else {
7732		cmn_err(CE_WARN, "%s%d: failed to register devid",
7733		    ddi_driver_name(dip), ddi_get_instance(dip));
7734	}
7735	return (rval);
7736}
7737
7738/*
7739 * Remove (unregister) device id from DDI framework.
7740 * Must be called when device is detached.
7741 */
7742static void
7743i_ddi_devid_unregister(dev_info_t *dip)
7744{
7745	if (DEVI(dip)->devi_devid_str) {
7746		ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7747		DEVI(dip)->devi_devid_str = NULL;
7748	}
7749
7750	/* remove the devid property */
7751	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME);
7752}
7753
7754void
7755ddi_devid_unregister(dev_info_t *dip)
7756{
7757	mutex_enter(&DEVI(dip)->devi_lock);
7758	DEVI(dip)->devi_flags &= ~DEVI_REGISTERED_DEVID;
7759	mutex_exit(&DEVI(dip)->devi_lock);
7760	e_devid_cache_unregister(dip);
7761	i_ddi_devid_unregister(dip);
7762}
7763
7764/*
7765 * Allocate and initialize a device id.
7766 */
7767int
7768ddi_devid_init(
7769	dev_info_t	*dip,
7770	ushort_t	devid_type,
7771	ushort_t	nbytes,
7772	void		*id,
7773	ddi_devid_t	*ret_devid)
7774{
7775	impl_devid_t	*i_devid;
7776	int		sz = sizeof (*i_devid) + nbytes - sizeof (char);
7777	int		driver_len;
7778	const char	*driver_name;
7779
7780	switch (devid_type) {
7781	case DEVID_SCSI3_WWN:
7782		/*FALLTHRU*/
7783	case DEVID_SCSI_SERIAL:
7784		/*FALLTHRU*/
7785	case DEVID_ATA_SERIAL:
7786		/*FALLTHRU*/
7787	case DEVID_ENCAP:
7788		if (nbytes == 0)
7789			return (DDI_FAILURE);
7790		if (id == NULL)
7791			return (DDI_FAILURE);
7792		break;
7793	case DEVID_FAB:
7794		if (nbytes != 0)
7795			return (DDI_FAILURE);
7796		if (id != NULL)
7797			return (DDI_FAILURE);
7798		nbytes = sizeof (int) +
7799		    sizeof (struct timeval32) + sizeof (short);
7800		sz += nbytes;
7801		break;
7802	default:
7803		return (DDI_FAILURE);
7804	}
7805
7806	if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL)
7807		return (DDI_FAILURE);
7808
7809	i_devid->did_magic_hi = DEVID_MAGIC_MSB;
7810	i_devid->did_magic_lo = DEVID_MAGIC_LSB;
7811	i_devid->did_rev_hi = DEVID_REV_MSB;
7812	i_devid->did_rev_lo = DEVID_REV_LSB;
7813	DEVID_FORMTYPE(i_devid, devid_type);
7814	DEVID_FORMLEN(i_devid, nbytes);
7815
7816	/* Fill in driver name hint */
7817	driver_name = ddi_driver_name(dip);
7818	driver_len = strlen(driver_name);
7819	if (driver_len > DEVID_HINT_SIZE) {
7820		/* Pick up last four characters of driver name */
7821		driver_name += driver_len - DEVID_HINT_SIZE;
7822		driver_len = DEVID_HINT_SIZE;
7823	}
7824
7825	bcopy(driver_name, i_devid->did_driver, driver_len);
7826
7827	/* Fill in id field */
7828	if (devid_type == DEVID_FAB) {
7829		char		*cp;
7830		int		hostid;
7831		char		*hostid_cp = &hw_serial[0];
7832		struct timeval32 timestamp32;
7833		int		i;
7834		int		*ip;
7835		short		gen;
7836
7837		/* increase the generation number */
7838		mutex_enter(&devid_gen_mutex);
7839		gen = devid_gen_number++;
7840		mutex_exit(&devid_gen_mutex);
7841
7842		cp = i_devid->did_id;
7843
7844		/* Fill in host id (big-endian byte ordering) */
7845		hostid = stoi(&hostid_cp);
7846		*cp++ = hibyte(hiword(hostid));
7847		*cp++ = lobyte(hiword(hostid));
7848		*cp++ = hibyte(loword(hostid));
7849		*cp++ = lobyte(loword(hostid));
7850
7851		/*
7852		 * Fill in timestamp (big-endian byte ordering)
7853		 *
7854		 * (Note that the format may have to be changed
7855		 * before 2038 comes around, though it's arguably
7856		 * unique enough as it is..)
7857		 */
7858		uniqtime32(&timestamp32);
7859		ip = (int *)&timestamp32;
7860		for (i = 0;
7861		    i < sizeof (timestamp32) / sizeof (int); i++, ip++) {
7862			int	val;
7863			val = *ip;
7864			*cp++ = hibyte(hiword(val));
7865			*cp++ = lobyte(hiword(val));
7866			*cp++ = hibyte(loword(val));
7867			*cp++ = lobyte(loword(val));
7868		}
7869
7870		/* fill in the generation number */
7871		*cp++ = hibyte(gen);
7872		*cp++ = lobyte(gen);
7873	} else
7874		bcopy(id, i_devid->did_id, nbytes);
7875
7876	/* return device id */
7877	*ret_devid = (ddi_devid_t)i_devid;
7878	return (DDI_SUCCESS);
7879}
7880
7881int
7882ddi_devid_get(dev_info_t *dip, ddi_devid_t *ret_devid)
7883{
7884	return (i_ddi_devi_get_devid(DDI_DEV_T_ANY, dip, ret_devid));
7885}
7886
7887int
7888i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid)
7889{
7890	char		*devidstr;
7891
7892	ASSERT(dev != DDI_DEV_T_NONE);
7893
7894	/* look up the property, devt specific first */
7895	if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS,
7896	    DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) {
7897		if ((dev == DDI_DEV_T_ANY) ||
7898		    (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
7899		    DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) !=
7900		    DDI_PROP_SUCCESS)) {
7901			return (DDI_FAILURE);
7902		}
7903	}
7904
7905	/* convert to binary form */
7906	if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) {
7907		ddi_prop_free(devidstr);
7908		return (DDI_FAILURE);
7909	}
7910	ddi_prop_free(devidstr);
7911	return (DDI_SUCCESS);
7912}
7913
7914/*
7915 * Return a copy of the device id for dev_t
7916 */
7917int
7918ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid)
7919{
7920	dev_info_t	*dip;
7921	int		rval;
7922
7923	/* get the dip */
7924	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
7925		return (DDI_FAILURE);
7926
7927	rval = i_ddi_devi_get_devid(dev, dip, ret_devid);
7928
7929	ddi_release_devi(dip);		/* e_ddi_hold_devi_by_dev() */
7930	return (rval);
7931}
7932
7933/*
7934 * Return a copy of the minor name for dev_t and spec_type
7935 */
7936int
7937ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name)
7938{
7939	char		*buf;
7940	int		circ;
7941	dev_info_t	*dip;
7942	char		*nm;
7943	int		rval;
7944
7945	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) {
7946		*minor_name = NULL;
7947		return (DDI_FAILURE);
7948	}
7949
7950	/* Find the minor name and copy into max size buf */
7951	buf = kmem_alloc(MAXNAMELEN, KM_SLEEP);
7952	ndi_devi_enter(dip, &circ);
7953	nm = i_ddi_devtspectype_to_minorname(dip, dev, spec_type);
7954	if (nm)
7955		(void) strcpy(buf, nm);
7956	ndi_devi_exit(dip, circ);
7957	ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
7958
7959	if (nm) {
7960		/* duplicate into min size buf for return result */
7961		*minor_name = i_ddi_strdup(buf, KM_SLEEP);
7962		rval = DDI_SUCCESS;
7963	} else {
7964		*minor_name = NULL;
7965		rval = DDI_FAILURE;
7966	}
7967
7968	/* free max size buf and return */
7969	kmem_free(buf, MAXNAMELEN);
7970	return (rval);
7971}
7972
7973int
7974ddi_lyr_devid_to_devlist(
7975	ddi_devid_t	devid,
7976	char		*minor_name,
7977	int		*retndevs,
7978	dev_t		**retdevs)
7979{
7980	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
7981
7982	if (e_devid_cache_to_devt_list(devid, minor_name,
7983	    retndevs, retdevs) == DDI_SUCCESS) {
7984		ASSERT(*retndevs > 0);
7985		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7986		    *retndevs, *retdevs);
7987		return (DDI_SUCCESS);
7988	}
7989
7990	if (e_ddi_devid_discovery(devid) == DDI_FAILURE) {
7991		return (DDI_FAILURE);
7992	}
7993
7994	if (e_devid_cache_to_devt_list(devid, minor_name,
7995	    retndevs, retdevs) == DDI_SUCCESS) {
7996		ASSERT(*retndevs > 0);
7997		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7998		    *retndevs, *retdevs);
7999		return (DDI_SUCCESS);
8000	}
8001
8002	return (DDI_FAILURE);
8003}
8004
8005void
8006ddi_lyr_free_devlist(dev_t *devlist, int ndevs)
8007{
8008	kmem_free(devlist, sizeof (dev_t) * ndevs);
8009}
8010
8011/*
8012 * Note: This will need to be fixed if we ever allow processes to
8013 * have more than one data model per exec.
8014 */
8015model_t
8016ddi_mmap_get_model(void)
8017{
8018	return (get_udatamodel());
8019}
8020
8021model_t
8022ddi_model_convert_from(model_t model)
8023{
8024	return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE);
8025}
8026
8027/*
8028 * ddi interfaces managing storage and retrieval of eventcookies.
8029 */
8030
8031/*
8032 * Invoke bus nexus driver's implementation of the
8033 * (*bus_remove_eventcall)() interface to remove a registered
8034 * callback handler for "event".
8035 */
8036int
8037ddi_remove_event_handler(ddi_callback_id_t id)
8038{
8039	ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id;
8040	dev_info_t *ddip;
8041
8042	ASSERT(cb);
8043	if (!cb) {
8044		return (DDI_FAILURE);
8045	}
8046
8047	ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie);
8048	return (ndi_busop_remove_eventcall(ddip, id));
8049}
8050
8051/*
8052 * Invoke bus nexus driver's implementation of the
8053 * (*bus_add_eventcall)() interface to register a callback handler
8054 * for "event".
8055 */
8056int
8057ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event,
8058    void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
8059    void *arg, ddi_callback_id_t *id)
8060{
8061	return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id));
8062}
8063
8064
8065/*
8066 * Return a handle for event "name" by calling up the device tree
8067 * hierarchy via  (*bus_get_eventcookie)() interface until claimed
8068 * by a bus nexus or top of dev_info tree is reached.
8069 */
8070int
8071ddi_get_eventcookie(dev_info_t *dip, char *name,
8072    ddi_eventcookie_t *event_cookiep)
8073{
8074	return (ndi_busop_get_eventcookie(dip, dip,
8075	    name, event_cookiep));
8076}
8077
8078/*
8079 * This procedure is provided as the general callback function when
8080 * umem_lockmemory calls as_add_callback for long term memory locking.
8081 * When as_unmap, as_setprot, or as_free encounter segments which have
8082 * locked memory, this callback will be invoked.
8083 */
8084void
8085umem_lock_undo(struct as *as, void *arg, uint_t event)
8086{
8087	_NOTE(ARGUNUSED(as, event))
8088	struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
8089
8090	/*
8091	 * Call the cleanup function.  Decrement the cookie reference
8092	 * count, if it goes to zero, return the memory for the cookie.
8093	 * The i_ddi_umem_unlock for this cookie may or may not have been
8094	 * called already.  It is the responsibility of the caller of
8095	 * umem_lockmemory to handle the case of the cleanup routine
8096	 * being called after a ddi_umem_unlock for the cookie
8097	 * was called.
8098	 */
8099
8100	(*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
8101
8102	/* remove the cookie if reference goes to zero */
8103	if (atomic_add_long_nv((ulong_t *)(&(cp->cook_refcnt)), -1) == 0) {
8104		kmem_free(cp, sizeof (struct ddi_umem_cookie));
8105	}
8106}
8107
8108/*
8109 * The following two Consolidation Private routines provide generic
8110 * interfaces to increase/decrease the amount of device-locked memory.
8111 *
8112 * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
8113 * must be called every time i_ddi_incr_locked_memory() is called.
8114 */
8115int
8116/* ARGSUSED */
8117i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc)
8118{
8119	ASSERT(procp != NULL);
8120	mutex_enter(&procp->p_lock);
8121	if (rctl_incr_locked_mem(procp, NULL, inc, 1)) {
8122		mutex_exit(&procp->p_lock);
8123		return (ENOMEM);
8124	}
8125	mutex_exit(&procp->p_lock);
8126	return (0);
8127}
8128
8129/*
8130 * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
8131 * must be called every time i_ddi_decr_locked_memory() is called.
8132 */
8133/* ARGSUSED */
8134void
8135i_ddi_decr_locked_memory(proc_t *procp, rctl_qty_t dec)
8136{
8137	ASSERT(procp != NULL);
8138	mutex_enter(&procp->p_lock);
8139	rctl_decr_locked_mem(procp, NULL, dec, 1);
8140	mutex_exit(&procp->p_lock);
8141}
8142
8143/*
8144 * This routine checks if the max-locked-memory resource ctl is
8145 * exceeded, if not increments it, grabs a hold on the project.
8146 * Returns 0 if successful otherwise returns error code
8147 */
8148static int
8149umem_incr_devlockmem(struct ddi_umem_cookie *cookie)
8150{
8151	proc_t		*procp;
8152	int		ret;
8153
8154	ASSERT(cookie);
8155	procp = cookie->procp;
8156	ASSERT(procp);
8157
8158	if ((ret = i_ddi_incr_locked_memory(procp,
8159	    cookie->size)) != 0) {
8160		return (ret);
8161	}
8162	return (0);
8163}
8164
8165/*
8166 * Decrements the max-locked-memory resource ctl and releases
8167 * the hold on the project that was acquired during umem_incr_devlockmem
8168 */
8169static void
8170umem_decr_devlockmem(struct ddi_umem_cookie *cookie)
8171{
8172	proc_t		*proc;
8173
8174	proc = (proc_t *)cookie->procp;
8175	if (!proc)
8176		return;
8177
8178	i_ddi_decr_locked_memory(proc, cookie->size);
8179}
8180
8181/*
8182 * A consolidation private function which is essentially equivalent to
8183 * ddi_umem_lock but with the addition of arguments ops_vector and procp.
8184 * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
8185 * the ops_vector is valid.
8186 *
8187 * Lock the virtual address range in the current process and create a
8188 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8189 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8190 * to user space.
8191 *
8192 * Note: The resource control accounting currently uses a full charge model
8193 * in other words attempts to lock the same/overlapping areas of memory
8194 * will deduct the full size of the buffer from the projects running
8195 * counter for the device locked memory.
8196 *
8197 * addr, size should be PAGESIZE aligned
8198 *
8199 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8200 *	identifies whether the locked memory will be read or written or both
8201 *      DDI_UMEMLOCK_LONGTERM  must be set when the locking will
8202 * be maintained for an indefinitely long period (essentially permanent),
8203 * rather than for what would be required for a typical I/O completion.
8204 * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
8205 * if the memory pertains to a regular file which is mapped MAP_SHARED.
8206 * This is to prevent a deadlock if a file truncation is attempted after
8207 * after the locking is done.
8208 *
8209 * Returns 0 on success
8210 *	EINVAL - for invalid parameters
8211 *	EPERM, ENOMEM and other error codes returned by as_pagelock
8212 *	ENOMEM - is returned if the current request to lock memory exceeds
8213 *		*.max-locked-memory resource control value.
8214 *      EFAULT - memory pertains to a regular file mapped shared and
8215 *		and DDI_UMEMLOCK_LONGTERM flag is set
8216 *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8217 */
8218int
8219umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie,
8220		struct umem_callback_ops *ops_vector,
8221		proc_t *procp)
8222{
8223	int	error;
8224	struct ddi_umem_cookie *p;
8225	void	(*driver_callback)() = NULL;
8226	struct as *as = procp->p_as;
8227	struct seg		*seg;
8228	vnode_t			*vp;
8229
8230	*cookie = NULL;		/* in case of any error return */
8231
8232	/* These are the only three valid flags */
8233	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE |
8234	    DDI_UMEMLOCK_LONGTERM)) != 0)
8235		return (EINVAL);
8236
8237	/* At least one (can be both) of the two access flags must be set */
8238	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0)
8239		return (EINVAL);
8240
8241	/* addr and len must be page-aligned */
8242	if (((uintptr_t)addr & PAGEOFFSET) != 0)
8243		return (EINVAL);
8244
8245	if ((len & PAGEOFFSET) != 0)
8246		return (EINVAL);
8247
8248	/*
8249	 * For longterm locking a driver callback must be specified; if
8250	 * not longterm then a callback is optional.
8251	 */
8252	if (ops_vector != NULL) {
8253		if (ops_vector->cbo_umem_callback_version !=
8254		    UMEM_CALLBACK_VERSION)
8255			return (EINVAL);
8256		else
8257			driver_callback = ops_vector->cbo_umem_lock_cleanup;
8258	}
8259	if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM))
8260		return (EINVAL);
8261
8262	/*
8263	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8264	 * be called on first ddi_umem_lock or umem_lockmemory call.
8265	 */
8266	if (ddi_umem_unlock_thread == NULL)
8267		i_ddi_umem_unlock_thread_start();
8268
8269	/* Allocate memory for the cookie */
8270	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8271
8272	/* Convert the flags to seg_rw type */
8273	if (flags & DDI_UMEMLOCK_WRITE) {
8274		p->s_flags = S_WRITE;
8275	} else {
8276		p->s_flags = S_READ;
8277	}
8278
8279	/* Store procp in cookie for later iosetup/unlock */
8280	p->procp = (void *)procp;
8281
8282	/*
8283	 * Store the struct as pointer in cookie for later use by
8284	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8285	 * is called after relvm is called.
8286	 */
8287	p->asp = as;
8288
8289	/*
8290	 * The size field is needed for lockmem accounting.
8291	 */
8292	p->size = len;
8293
8294	if (umem_incr_devlockmem(p) != 0) {
8295		/*
8296		 * The requested memory cannot be locked
8297		 */
8298		kmem_free(p, sizeof (struct ddi_umem_cookie));
8299		*cookie = (ddi_umem_cookie_t)NULL;
8300		return (ENOMEM);
8301	}
8302
8303	/* Lock the pages corresponding to addr, len in memory */
8304	error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags);
8305	if (error != 0) {
8306		umem_decr_devlockmem(p);
8307		kmem_free(p, sizeof (struct ddi_umem_cookie));
8308		*cookie = (ddi_umem_cookie_t)NULL;
8309		return (error);
8310	}
8311
8312	/*
8313	 * For longterm locking the addr must pertain to a seg_vn segment or
8314	 * or a seg_spt segment.
8315	 * If the segment pertains to a regular file, it cannot be
8316	 * mapped MAP_SHARED.
8317	 * This is to prevent a deadlock if a file truncation is attempted
8318	 * after the locking is done.
8319	 * Doing this after as_pagelock guarantees persistence of the as; if
8320	 * an unacceptable segment is found, the cleanup includes calling
8321	 * as_pageunlock before returning EFAULT.
8322	 */
8323	if (flags & DDI_UMEMLOCK_LONGTERM) {
8324		extern  struct seg_ops segspt_shmops;
8325		AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
8326		for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) {
8327			if (seg == NULL || seg->s_base > addr + len)
8328				break;
8329			if (((seg->s_ops != &segvn_ops) &&
8330			    (seg->s_ops != &segspt_shmops)) ||
8331			    ((SEGOP_GETVP(seg, addr, &vp) == 0 &&
8332			    vp != NULL && vp->v_type == VREG) &&
8333			    (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) {
8334				as_pageunlock(as, p->pparray,
8335				    addr, len, p->s_flags);
8336				AS_LOCK_EXIT(as, &as->a_lock);
8337				umem_decr_devlockmem(p);
8338				kmem_free(p, sizeof (struct ddi_umem_cookie));
8339				*cookie = (ddi_umem_cookie_t)NULL;
8340				return (EFAULT);
8341			}
8342		}
8343		AS_LOCK_EXIT(as, &as->a_lock);
8344	}
8345
8346
8347	/* Initialize the fields in the ddi_umem_cookie */
8348	p->cvaddr = addr;
8349	p->type = UMEM_LOCKED;
8350	if (driver_callback != NULL) {
8351		/* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8352		p->cook_refcnt = 2;
8353		p->callbacks = *ops_vector;
8354	} else {
8355		/* only i_ddi_umme_unlock needs the cookie */
8356		p->cook_refcnt = 1;
8357	}
8358
8359	*cookie = (ddi_umem_cookie_t)p;
8360
8361	/*
8362	 * If a driver callback was specified, add an entry to the
8363	 * as struct callback list. The as_pagelock above guarantees
8364	 * the persistence of as.
8365	 */
8366	if (driver_callback) {
8367		error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT,
8368		    addr, len, KM_SLEEP);
8369		if (error != 0) {
8370			as_pageunlock(as, p->pparray,
8371			    addr, len, p->s_flags);
8372			umem_decr_devlockmem(p);
8373			kmem_free(p, sizeof (struct ddi_umem_cookie));
8374			*cookie = (ddi_umem_cookie_t)NULL;
8375		}
8376	}
8377	return (error);
8378}
8379
8380/*
8381 * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8382 * the cookie.  Called from i_ddi_umem_unlock_thread.
8383 */
8384
8385static void
8386i_ddi_umem_unlock(struct ddi_umem_cookie *p)
8387{
8388	uint_t	rc;
8389
8390	/*
8391	 * There is no way to determine whether a callback to
8392	 * umem_lock_undo was registered via as_add_callback.
8393	 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8394	 * a valid callback function structure.)  as_delete_callback
8395	 * is called to delete a possible registered callback.  If the
8396	 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8397	 * indicates that there was a callback registered, and that is was
8398	 * successfully deleted.  Thus, the cookie reference count
8399	 * will never be decremented by umem_lock_undo.  Just return the
8400	 * memory for the cookie, since both users of the cookie are done.
8401	 * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8402	 * never registered.  A return of AS_CALLBACK_DELETE_DEFERRED
8403	 * indicates that callback processing is taking place and, and
8404	 * umem_lock_undo is, or will be, executing, and thus decrementing
8405	 * the cookie reference count when it is complete.
8406	 *
8407	 * This needs to be done before as_pageunlock so that the
8408	 * persistence of as is guaranteed because of the locked pages.
8409	 *
8410	 */
8411	rc = as_delete_callback(p->asp, p);
8412
8413
8414	/*
8415	 * The proc->p_as will be stale if i_ddi_umem_unlock is called
8416	 * after relvm is called so use p->asp.
8417	 */
8418	as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags);
8419
8420	/*
8421	 * Now that we have unlocked the memory decrement the
8422	 * *.max-locked-memory rctl
8423	 */
8424	umem_decr_devlockmem(p);
8425
8426	if (rc == AS_CALLBACK_DELETED) {
8427		/* umem_lock_undo will not happen, return the cookie memory */
8428		ASSERT(p->cook_refcnt == 2);
8429		kmem_free(p, sizeof (struct ddi_umem_cookie));
8430	} else {
8431		/*
8432		 * umem_undo_lock may happen if as_delete_callback returned
8433		 * AS_CALLBACK_DELETE_DEFERRED.  In that case, decrement the
8434		 * reference count, atomically, and return the cookie
8435		 * memory if the reference count goes to zero.  The only
8436		 * other value for rc is AS_CALLBACK_NOTFOUND.  In that
8437		 * case, just return the cookie memory.
8438		 */
8439		if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8440		    (atomic_add_long_nv((ulong_t *)(&(p->cook_refcnt)), -1)
8441		    == 0)) {
8442			kmem_free(p, sizeof (struct ddi_umem_cookie));
8443		}
8444	}
8445}
8446
8447/*
8448 * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8449 *
8450 * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8451 * until it is empty.  Then, wait for more to be added.  This thread is awoken
8452 * via calls to ddi_umem_unlock.
8453 */
8454
8455static void
8456i_ddi_umem_unlock_thread(void)
8457{
8458	struct ddi_umem_cookie	*ret_cookie;
8459	callb_cpr_t	cprinfo;
8460
8461	/* process the ddi_umem_unlock list */
8462	CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex,
8463	    callb_generic_cpr, "unlock_thread");
8464	for (;;) {
8465		mutex_enter(&ddi_umem_unlock_mutex);
8466		if (ddi_umem_unlock_head != NULL) {	/* list not empty */
8467			ret_cookie = ddi_umem_unlock_head;
8468			/* take if off the list */
8469			if ((ddi_umem_unlock_head =
8470			    ddi_umem_unlock_head->unl_forw) == NULL) {
8471				ddi_umem_unlock_tail = NULL;
8472			}
8473			mutex_exit(&ddi_umem_unlock_mutex);
8474			/* unlock the pages in this cookie */
8475			(void) i_ddi_umem_unlock(ret_cookie);
8476		} else {   /* list is empty, wait for next ddi_umem_unlock */
8477			CALLB_CPR_SAFE_BEGIN(&cprinfo);
8478			cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex);
8479			CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex);
8480			mutex_exit(&ddi_umem_unlock_mutex);
8481		}
8482	}
8483	/* ddi_umem_unlock_thread does not exit */
8484	/* NOTREACHED */
8485}
8486
8487/*
8488 * Start the thread that will process the ddi_umem_unlock list if it is
8489 * not already started (i_ddi_umem_unlock_thread).
8490 */
8491static void
8492i_ddi_umem_unlock_thread_start(void)
8493{
8494	mutex_enter(&ddi_umem_unlock_mutex);
8495	if (ddi_umem_unlock_thread == NULL) {
8496		ddi_umem_unlock_thread = thread_create(NULL, 0,
8497		    i_ddi_umem_unlock_thread, NULL, 0, &p0,
8498		    TS_RUN, minclsyspri);
8499	}
8500	mutex_exit(&ddi_umem_unlock_mutex);
8501}
8502
8503/*
8504 * Lock the virtual address range in the current process and create a
8505 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8506 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8507 * to user space.
8508 *
8509 * Note: The resource control accounting currently uses a full charge model
8510 * in other words attempts to lock the same/overlapping areas of memory
8511 * will deduct the full size of the buffer from the projects running
8512 * counter for the device locked memory. This applies to umem_lockmemory too.
8513 *
8514 * addr, size should be PAGESIZE aligned
8515 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8516 *	identifies whether the locked memory will be read or written or both
8517 *
8518 * Returns 0 on success
8519 *	EINVAL - for invalid parameters
8520 *	EPERM, ENOMEM and other error codes returned by as_pagelock
8521 *	ENOMEM - is returned if the current request to lock memory exceeds
8522 *		*.max-locked-memory resource control value.
8523 *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8524 */
8525int
8526ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie)
8527{
8528	int	error;
8529	struct ddi_umem_cookie *p;
8530
8531	*cookie = NULL;		/* in case of any error return */
8532
8533	/* These are the only two valid flags */
8534	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) {
8535		return (EINVAL);
8536	}
8537
8538	/* At least one of the two flags (or both) must be set */
8539	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) {
8540		return (EINVAL);
8541	}
8542
8543	/* addr and len must be page-aligned */
8544	if (((uintptr_t)addr & PAGEOFFSET) != 0) {
8545		return (EINVAL);
8546	}
8547
8548	if ((len & PAGEOFFSET) != 0) {
8549		return (EINVAL);
8550	}
8551
8552	/*
8553	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8554	 * be called on first ddi_umem_lock or umem_lockmemory call.
8555	 */
8556	if (ddi_umem_unlock_thread == NULL)
8557		i_ddi_umem_unlock_thread_start();
8558
8559	/* Allocate memory for the cookie */
8560	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8561
8562	/* Convert the flags to seg_rw type */
8563	if (flags & DDI_UMEMLOCK_WRITE) {
8564		p->s_flags = S_WRITE;
8565	} else {
8566		p->s_flags = S_READ;
8567	}
8568
8569	/* Store curproc in cookie for later iosetup/unlock */
8570	p->procp = (void *)curproc;
8571
8572	/*
8573	 * Store the struct as pointer in cookie for later use by
8574	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8575	 * is called after relvm is called.
8576	 */
8577	p->asp = curproc->p_as;
8578	/*
8579	 * The size field is needed for lockmem accounting.
8580	 */
8581	p->size = len;
8582
8583	if (umem_incr_devlockmem(p) != 0) {
8584		/*
8585		 * The requested memory cannot be locked
8586		 */
8587		kmem_free(p, sizeof (struct ddi_umem_cookie));
8588		*cookie = (ddi_umem_cookie_t)NULL;
8589		return (ENOMEM);
8590	}
8591
8592	/* Lock the pages corresponding to addr, len in memory */
8593	error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray),
8594	    addr, len, p->s_flags);
8595	if (error != 0) {
8596		umem_decr_devlockmem(p);
8597		kmem_free(p, sizeof (struct ddi_umem_cookie));
8598		*cookie = (ddi_umem_cookie_t)NULL;
8599		return (error);
8600	}
8601
8602	/* Initialize the fields in the ddi_umem_cookie */
8603	p->cvaddr = addr;
8604	p->type = UMEM_LOCKED;
8605	p->cook_refcnt = 1;
8606
8607	*cookie = (ddi_umem_cookie_t)p;
8608	return (error);
8609}
8610
8611/*
8612 * Add the cookie to the ddi_umem_unlock list.  Pages will be
8613 * unlocked by i_ddi_umem_unlock_thread.
8614 */
8615
8616void
8617ddi_umem_unlock(ddi_umem_cookie_t cookie)
8618{
8619	struct ddi_umem_cookie	*p = (struct ddi_umem_cookie *)cookie;
8620
8621	ASSERT(p->type == UMEM_LOCKED);
8622	ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */
8623	ASSERT(ddi_umem_unlock_thread != NULL);
8624
8625	p->unl_forw = (struct ddi_umem_cookie *)NULL;	/* end of list */
8626	/*
8627	 * Queue the unlock request and notify i_ddi_umem_unlock thread
8628	 * if it's called in the interrupt context. Otherwise, unlock pages
8629	 * immediately.
8630	 */
8631	if (servicing_interrupt()) {
8632		/* queue the unlock request and notify the thread */
8633		mutex_enter(&ddi_umem_unlock_mutex);
8634		if (ddi_umem_unlock_head == NULL) {
8635			ddi_umem_unlock_head = ddi_umem_unlock_tail = p;
8636			cv_broadcast(&ddi_umem_unlock_cv);
8637		} else {
8638			ddi_umem_unlock_tail->unl_forw = p;
8639			ddi_umem_unlock_tail = p;
8640		}
8641		mutex_exit(&ddi_umem_unlock_mutex);
8642	} else {
8643		/* unlock the pages right away */
8644		(void) i_ddi_umem_unlock(p);
8645	}
8646}
8647
8648/*
8649 * Create a buf structure from a ddi_umem_cookie
8650 * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
8651 *		(only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
8652 * off, len - identifies the portion of the memory represented by the cookie
8653 *		that the buf points to.
8654 *	NOTE: off, len need to follow the alignment/size restrictions of the
8655 *		device (dev) that this buf will be passed to. Some devices
8656 *		will accept unrestricted alignment/size, whereas others (such as
8657 *		st) require some block-size alignment/size. It is the caller's
8658 *		responsibility to ensure that the alignment/size restrictions
8659 *		are met (we cannot assert as we do not know the restrictions)
8660 *
8661 * direction - is one of B_READ or B_WRITE and needs to be compatible with
8662 *		the flags used in ddi_umem_lock
8663 *
8664 * The following three arguments are used to initialize fields in the
8665 * buf structure and are uninterpreted by this routine.
8666 *
8667 * dev
8668 * blkno
8669 * iodone
8670 *
8671 * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
8672 *
8673 * Returns a buf structure pointer on success (to be freed by freerbuf)
8674 *	NULL on any parameter error or memory alloc failure
8675 *
8676 */
8677struct buf *
8678ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len,
8679	int direction, dev_t dev, daddr_t blkno,
8680	int (*iodone)(struct buf *), int sleepflag)
8681{
8682	struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8683	struct buf *bp;
8684
8685	/*
8686	 * check for valid cookie offset, len
8687	 */
8688	if ((off + len) > p->size) {
8689		return (NULL);
8690	}
8691
8692	if (len > p->size) {
8693		return (NULL);
8694	}
8695
8696	/* direction has to be one of B_READ or B_WRITE */
8697	if ((direction != B_READ) && (direction != B_WRITE)) {
8698		return (NULL);
8699	}
8700
8701	/* These are the only two valid sleepflags */
8702	if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) {
8703		return (NULL);
8704	}
8705
8706	/*
8707	 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
8708	 */
8709	if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) {
8710		return (NULL);
8711	}
8712
8713	/* If type is KMEM_NON_PAGEABLE procp is NULL */
8714	ASSERT((p->type == KMEM_NON_PAGEABLE) ?
8715	    (p->procp == NULL) : (p->procp != NULL));
8716
8717	bp = kmem_alloc(sizeof (struct buf), sleepflag);
8718	if (bp == NULL) {
8719		return (NULL);
8720	}
8721	bioinit(bp);
8722
8723	bp->b_flags = B_BUSY | B_PHYS | direction;
8724	bp->b_edev = dev;
8725	bp->b_lblkno = blkno;
8726	bp->b_iodone = iodone;
8727	bp->b_bcount = len;
8728	bp->b_proc = (proc_t *)p->procp;
8729	ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8730	bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off);
8731	if (p->pparray != NULL) {
8732		bp->b_flags |= B_SHADOW;
8733		ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8734		bp->b_shadow = p->pparray + btop(off);
8735	}
8736	return (bp);
8737}
8738
8739/*
8740 * Fault-handling and related routines
8741 */
8742
8743ddi_devstate_t
8744ddi_get_devstate(dev_info_t *dip)
8745{
8746	if (DEVI_IS_DEVICE_OFFLINE(dip))
8747		return (DDI_DEVSTATE_OFFLINE);
8748	else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip))
8749		return (DDI_DEVSTATE_DOWN);
8750	else if (DEVI_IS_BUS_QUIESCED(dip))
8751		return (DDI_DEVSTATE_QUIESCED);
8752	else if (DEVI_IS_DEVICE_DEGRADED(dip))
8753		return (DDI_DEVSTATE_DEGRADED);
8754	else
8755		return (DDI_DEVSTATE_UP);
8756}
8757
8758void
8759ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact,
8760	ddi_fault_location_t location, const char *message)
8761{
8762	struct ddi_fault_event_data fd;
8763	ddi_eventcookie_t ec;
8764
8765	/*
8766	 * Assemble all the information into a fault-event-data structure
8767	 */
8768	fd.f_dip = dip;
8769	fd.f_impact = impact;
8770	fd.f_location = location;
8771	fd.f_message = message;
8772	fd.f_oldstate = ddi_get_devstate(dip);
8773
8774	/*
8775	 * Get eventcookie from defining parent.
8776	 */
8777	if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) !=
8778	    DDI_SUCCESS)
8779		return;
8780
8781	(void) ndi_post_event(dip, dip, ec, &fd);
8782}
8783
8784char *
8785i_ddi_devi_class(dev_info_t *dip)
8786{
8787	return (DEVI(dip)->devi_device_class);
8788}
8789
8790int
8791i_ddi_set_devi_class(dev_info_t *dip, char *devi_class, int flag)
8792{
8793	struct dev_info *devi = DEVI(dip);
8794
8795	mutex_enter(&devi->devi_lock);
8796
8797	if (devi->devi_device_class)
8798		kmem_free(devi->devi_device_class,
8799		    strlen(devi->devi_device_class) + 1);
8800
8801	if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag))
8802	    != NULL) {
8803		mutex_exit(&devi->devi_lock);
8804		return (DDI_SUCCESS);
8805	}
8806
8807	mutex_exit(&devi->devi_lock);
8808
8809	return (DDI_FAILURE);
8810}
8811
8812
8813/*
8814 * Task Queues DDI interfaces.
8815 */
8816
8817/* ARGSUSED */
8818ddi_taskq_t *
8819ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads,
8820    pri_t pri, uint_t cflags)
8821{
8822	char full_name[TASKQ_NAMELEN];
8823	const char *tq_name;
8824	int nodeid = 0;
8825
8826	if (dip == NULL)
8827		tq_name = name;
8828	else {
8829		nodeid = ddi_get_instance(dip);
8830
8831		if (name == NULL)
8832			name = "tq";
8833
8834		(void) snprintf(full_name, sizeof (full_name), "%s_%s",
8835		    ddi_driver_name(dip), name);
8836
8837		tq_name = full_name;
8838	}
8839
8840	return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads,
8841	    pri == TASKQ_DEFAULTPRI ? minclsyspri : pri,
8842	    nthreads, INT_MAX, TASKQ_PREPOPULATE));
8843}
8844
8845void
8846ddi_taskq_destroy(ddi_taskq_t *tq)
8847{
8848	taskq_destroy((taskq_t *)tq);
8849}
8850
8851int
8852ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *),
8853    void *arg, uint_t dflags)
8854{
8855	taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg,
8856	    dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP);
8857
8858	return (id != 0 ? DDI_SUCCESS : DDI_FAILURE);
8859}
8860
8861void
8862ddi_taskq_wait(ddi_taskq_t *tq)
8863{
8864	taskq_wait((taskq_t *)tq);
8865}
8866
8867void
8868ddi_taskq_suspend(ddi_taskq_t *tq)
8869{
8870	taskq_suspend((taskq_t *)tq);
8871}
8872
8873boolean_t
8874ddi_taskq_suspended(ddi_taskq_t *tq)
8875{
8876	return (taskq_suspended((taskq_t *)tq));
8877}
8878
8879void
8880ddi_taskq_resume(ddi_taskq_t *tq)
8881{
8882	taskq_resume((taskq_t *)tq);
8883}
8884
8885int
8886ddi_parse(
8887	const char	*ifname,
8888	char		*alnum,
8889	uint_t		*nump)
8890{
8891	const char	*p;
8892	int		l;
8893	ulong_t		num;
8894	boolean_t	nonum = B_TRUE;
8895	char		c;
8896
8897	l = strlen(ifname);
8898	for (p = ifname + l; p != ifname; l--) {
8899		c = *--p;
8900		if (!isdigit(c)) {
8901			(void) strlcpy(alnum, ifname, l + 1);
8902			if (ddi_strtoul(p + 1, NULL, 10, &num) != 0)
8903				return (DDI_FAILURE);
8904			break;
8905		}
8906		nonum = B_FALSE;
8907	}
8908	if (l == 0 || nonum)
8909		return (DDI_FAILURE);
8910
8911	*nump = num;
8912	return (DDI_SUCCESS);
8913}
8914
8915/*
8916 * Default initialization function for drivers that don't need to quiesce.
8917 */
8918/* ARGSUSED */
8919int
8920ddi_quiesce_not_needed(dev_info_t *dip)
8921{
8922	return (DDI_SUCCESS);
8923}
8924
8925/*
8926 * Initialization function for drivers that should implement quiesce()
8927 * but haven't yet.
8928 */
8929/* ARGSUSED */
8930int
8931ddi_quiesce_not_supported(dev_info_t *dip)
8932{
8933	return (DDI_FAILURE);
8934}
8935