1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27
28/*
29 * dnet -- DEC 21x4x
30 *
31 * Currently supports:
32 *	21040, 21041, 21140, 21142, 21143
33 *	SROM versions 1, 3, 3.03, 4
34 *	TP, AUI, BNC, 100BASETX, 100BASET4
35 *
36 * XXX NEEDSWORK
37 *	All media SHOULD work, FX is untested
38 *
39 * Depends on the Generic LAN Driver utility functions in /kernel/misc/mac
40 */
41
42#define	BUG_4010796	/* See 4007871, 4010796 */
43
44#include <sys/types.h>
45#include <sys/errno.h>
46#include <sys/param.h>
47#include <sys/stropts.h>
48#include <sys/stream.h>
49#include <sys/kmem.h>
50#include <sys/conf.h>
51#include <sys/devops.h>
52#include <sys/ksynch.h>
53#include <sys/stat.h>
54#include <sys/modctl.h>
55#include <sys/debug.h>
56#include <sys/dlpi.h>
57#include <sys/ethernet.h>
58#include <sys/vlan.h>
59#include <sys/mac.h>
60#include <sys/mac_ether.h>
61#include <sys/mac_provider.h>
62#include <sys/pci.h>
63#include <sys/ddi.h>
64#include <sys/sunddi.h>
65#include <sys/strsun.h>
66
67#include "dnet_mii.h"
68#include "dnet.h"
69
70/*
71 *	Declarations and Module Linkage
72 */
73
74#define	IDENT	"DNET 21x4x"
75
76/*
77 * #define	DNET_NOISY
78 * #define	SROMDEBUG
79 * #define	SROMDUMPSTRUCTURES
80 */
81
82#ifdef DNETDEBUG
83#ifdef DNET_NOISY
84int	dnetdebug = -1;
85#else
86int	dnetdebug = 0;
87#endif
88#endif
89
90/* used for message allocated using desballoc() */
91struct free_ptr {
92	struct free_rtn	free_rtn;
93	caddr_t buf;
94};
95
96struct rbuf_list {
97	struct rbuf_list	*rbuf_next;	/* next in the list */
98	caddr_t			rbuf_vaddr;	/* virual addr of the buf */
99	uint32_t		rbuf_paddr;	/* physical addr of the buf */
100	uint32_t		rbuf_endpaddr;	/* physical addr at the end */
101	ddi_dma_handle_t	rbuf_dmahdl;	/* dma handle */
102	ddi_acc_handle_t	rbuf_acchdl;	/* handle for DDI functions */
103};
104
105/* Required system entry points */
106static int dnet_probe(dev_info_t *);
107static int dnet_attach(dev_info_t *, ddi_attach_cmd_t);
108static int dnet_detach(dev_info_t *, ddi_detach_cmd_t);
109static int dnet_quiesce(dev_info_t *);
110
111/* Required driver entry points for GLDv3 */
112static int dnet_m_start(void *);
113static void dnet_m_stop(void *);
114static int dnet_m_getstat(void *, uint_t, uint64_t *);
115static int dnet_m_setpromisc(void *, boolean_t);
116static int dnet_m_multicst(void *, boolean_t, const uint8_t *);
117static int dnet_m_unicst(void *, const uint8_t *);
118static mblk_t *dnet_m_tx(void *, mblk_t *);
119
120static uint_t dnet_intr(caddr_t);
121
122/* Internal functions used by the above entry points */
123static void write_gpr(struct dnetinstance *dnetp, uint32_t val);
124static void dnet_reset_board(struct dnetinstance *);
125static void dnet_init_board(struct dnetinstance *);
126static void dnet_chip_init(struct dnetinstance *);
127static uint32_t hashindex(const uint8_t *);
128static int dnet_start(struct dnetinstance *);
129static int dnet_set_addr(struct dnetinstance *);
130
131static boolean_t dnet_send(struct dnetinstance *, mblk_t *);
132
133static void dnet_getp(struct dnetinstance *);
134static void update_rx_stats(struct dnetinstance *, int);
135static void update_tx_stats(struct dnetinstance *, int);
136
137/* Media Selection Setup Routines */
138static void set_gpr(struct dnetinstance *);
139static void set_opr(struct dnetinstance *);
140static void set_sia(struct dnetinstance *);
141
142/* Buffer Management Routines */
143static int dnet_alloc_bufs(struct dnetinstance *);
144static void dnet_free_bufs(struct dnetinstance *);
145static void dnet_init_txrx_bufs(struct dnetinstance *);
146static int alloc_descriptor(struct dnetinstance *);
147static void dnet_reclaim_Tx_desc(struct dnetinstance *);
148static int dnet_rbuf_init(dev_info_t *, int);
149static int dnet_rbuf_destroy();
150static struct rbuf_list *dnet_rbuf_alloc(dev_info_t *, int);
151static void dnet_rbuf_free(caddr_t);
152static void dnet_freemsg_buf(struct free_ptr *);
153
154static void setup_block(struct dnetinstance *);
155
156/* SROM read functions */
157static int dnet_read_srom(dev_info_t *, int, ddi_acc_handle_t, caddr_t,
158    uchar_t *, int);
159static void dnet_read21040addr(dev_info_t *, ddi_acc_handle_t, caddr_t,
160    uchar_t *, int *);
161static void dnet_read21140srom(ddi_acc_handle_t, caddr_t, uchar_t *, int);
162static int get_alternative_srom_image(dev_info_t *, uchar_t *, int);
163static void dnet_print_srom(SROM_FORMAT *sr);
164static void dnet_dump_leaf(LEAF_FORMAT *leaf);
165static void dnet_dump_block(media_block_t *block);
166#ifdef BUG_4010796
167static void set_alternative_srom_image(dev_info_t *, uchar_t *, int);
168static int dnet_hack(dev_info_t *);
169#endif
170
171static int dnet_hack_interrupts(struct dnetinstance *, int);
172static int dnet_detach_hacked_interrupt(dev_info_t *devinfo);
173static void enable_interrupts(struct dnetinstance *);
174
175/* SROM parsing functions */
176static void dnet_parse_srom(struct dnetinstance *dnetp, SROM_FORMAT *sr,
177    uchar_t *vi);
178static void parse_controller_leaf(struct dnetinstance *dnetp, LEAF_FORMAT *leaf,
179    uchar_t *vi);
180static uchar_t *parse_media_block(struct dnetinstance *dnetp,
181    media_block_t *block, uchar_t *vi);
182static int check_srom_valid(uchar_t *);
183static void dnet_dumpbin(char *msg, uchar_t *, int size, int len);
184static void setup_legacy_blocks();
185/* Active Media Determination Routines */
186static void find_active_media(struct dnetinstance *);
187static int send_test_packet(struct dnetinstance *);
188static int dnet_link_sense(struct dnetinstance *);
189
190/* PHY MII Routines */
191static ushort_t dnet_mii_read(dev_info_t *dip, int phy_addr, int reg_num);
192static void dnet_mii_write(dev_info_t *dip, int phy_addr, int reg_num,
193			int reg_dat);
194static void write_mii(struct dnetinstance *, uint32_t, int);
195static void mii_tristate(struct dnetinstance *);
196static void do_phy(struct dnetinstance *);
197static void dnet_mii_link_cb(dev_info_t *, int, enum mii_phy_state);
198static void set_leaf(SROM_FORMAT *sr, LEAF_FORMAT *leaf);
199
200#ifdef DNETDEBUG
201uint32_t dnet_usecelapsed(struct dnetinstance *dnetp);
202void dnet_timestamp(struct dnetinstance *, char *);
203void dnet_usectimeout(struct dnetinstance *, uint32_t, int, timercb_t);
204#endif
205static char *media_str[] = {
206	"10BaseT",
207	"10Base2",
208	"10Base5",
209	"100BaseTX",
210	"10BaseT FD",
211	"100BaseTX FD",
212	"100BaseT4",
213	"100BaseFX",
214	"100BaseFX FD",
215	"MII"
216};
217
218/* default SROM info for cards with no SROMs */
219static LEAF_FORMAT leaf_default_100;
220static LEAF_FORMAT leaf_asante;
221static LEAF_FORMAT leaf_phylegacy;
222static LEAF_FORMAT leaf_cogent_100;
223static LEAF_FORMAT leaf_21041;
224static LEAF_FORMAT leaf_21040;
225
226/* rx buffer size (rounded up to 4) */
227int rx_buf_size = (ETHERMAX + ETHERFCSL + VLAN_TAGSZ + 3) & ~3;
228
229int max_rx_desc_21040 = MAX_RX_DESC_21040;
230int max_rx_desc_21140 = MAX_RX_DESC_21140;
231int max_tx_desc = MAX_TX_DESC;
232int dnet_xmit_threshold = MAX_TX_DESC >> 2;	/* XXX need tuning? */
233
234static kmutex_t dnet_rbuf_lock;		/* mutex to protect rbuf_list data */
235
236/* used for buffers allocated by ddi_dma_mem_alloc() */
237static ddi_dma_attr_t dma_attr = {
238	DMA_ATTR_V0,		/* dma_attr version */
239	0,			/* dma_attr_addr_lo */
240	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
241	0x7FFFFFFF,		/* dma_attr_count_max */
242	4,			/* dma_attr_align */
243	0x3F,			/* dma_attr_burstsizes */
244	1,			/* dma_attr_minxfer */
245	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
246	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
247	1,			/* dma_attr_sgllen */
248	1,			/* dma_attr_granular */
249	0,			/* dma_attr_flags */
250};
251
252/* used for buffers allocated for rbuf, allow 2 cookies */
253static ddi_dma_attr_t dma_attr_rb = {
254	DMA_ATTR_V0,		/* dma_attr version */
255	0,			/* dma_attr_addr_lo */
256	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
257	0x7FFFFFFF,		/* dma_attr_count_max */
258	4,			/* dma_attr_align */
259	0x3F,			/* dma_attr_burstsizes */
260	1,			/* dma_attr_minxfer */
261	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
262	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
263	2,			/* dma_attr_sgllen */
264	1,			/* dma_attr_granular */
265	0,			/* dma_attr_flags */
266};
267/* used for buffers which are NOT from ddi_dma_mem_alloc() - xmit side */
268static ddi_dma_attr_t dma_attr_tx = {
269	DMA_ATTR_V0,		/* dma_attr version */
270	0,			/* dma_attr_addr_lo */
271	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
272	0x7FFFFFFF,		/* dma_attr_count_max */
273	1,			/* dma_attr_align */
274	0x3F,			/* dma_attr_burstsizes */
275	1,			/* dma_attr_minxfer */
276	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
277	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
278	0x7FFF,			/* dma_attr_sgllen */
279	1,			/* dma_attr_granular */
280	0,			/* dma_attr_flags */
281};
282
283static ddi_device_acc_attr_t accattr = {
284	DDI_DEVICE_ATTR_V0,
285	DDI_NEVERSWAP_ACC,
286	DDI_STRICTORDER_ACC,
287};
288
289uchar_t dnet_broadcastaddr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
290
291/* Standard Module linkage initialization for a Streams driver */
292extern struct mod_ops mod_driverops;
293
294DDI_DEFINE_STREAM_OPS(dnet_devops, nulldev, dnet_probe, dnet_attach,
295    dnet_detach, nodev, NULL, D_MP, NULL, dnet_quiesce);
296
297static struct modldrv dnet_modldrv = {
298	&mod_driverops,		/* Type of module.  This one is a driver */
299	IDENT,			/* short description */
300	&dnet_devops		/* driver specific ops */
301};
302
303static struct modlinkage dnet_modlinkage = {
304	MODREV_1,		/* ml_rev */
305	{ &dnet_modldrv, NULL }	/* ml_linkage */
306};
307
308static mac_callbacks_t dnet_m_callbacks = {
309	0,			/* mc_callbacks */
310	dnet_m_getstat,		/* mc_getstat */
311	dnet_m_start,		/* mc_start */
312	dnet_m_stop,		/* mc_stop */
313	dnet_m_setpromisc,	/* mc_setpromisc */
314	dnet_m_multicst,	/* mc_multicst */
315	dnet_m_unicst,		/* mc_unicst */
316	dnet_m_tx,		/* mc_tx */
317	NULL,
318	NULL,			/* mc_ioctl */
319	NULL,			/* mc_getcapab */
320	NULL,			/* mc_open */
321	NULL			/* mc_close */
322};
323
324/*
325 * Passed to the hacked interrupt for multiport Cogent and ZNYX cards with
326 * dodgy interrupt routing
327 */
328#define	MAX_INST 8 /* Maximum instances on a multiport adapter. */
329struct hackintr_inf
330{
331	struct dnetinstance *dnetps[MAX_INST]; /* dnetps for each port */
332	dev_info_t *devinfo;		    /* Devinfo of the primary device */
333	kmutex_t lock;
334		/* Ensures the interrupt doesn't get called while detaching */
335};
336static char hackintr_propname[] = "InterruptData";
337static char macoffset_propname[] = "MAC_offset";
338static char speed_propname[] = "speed";
339static char ofloprob_propname[] = "dmaworkaround";
340static char duplex_propname[] = "full-duplex"; /* Must agree with MII */
341static char printsrom_propname[] = "print-srom";
342
343static uint_t dnet_hack_intr(struct hackintr_inf *);
344
345int
346_init(void)
347{
348	int i;
349
350	/* Configure fake sroms for legacy cards */
351	mutex_init(&dnet_rbuf_lock, NULL, MUTEX_DRIVER, NULL);
352	setup_legacy_blocks();
353
354	mac_init_ops(&dnet_devops, "dnet");
355
356	if ((i = mod_install(&dnet_modlinkage)) != 0) {
357		mac_fini_ops(&dnet_devops);
358		mutex_destroy(&dnet_rbuf_lock);
359	}
360	return (i);
361}
362
363int
364_fini(void)
365{
366	int i;
367
368	if ((i = mod_remove(&dnet_modlinkage)) == 0) {
369		mac_fini_ops(&dnet_devops);
370
371		/* loop until all the receive buffers are freed */
372		while (dnet_rbuf_destroy() != 0) {
373			delay(drv_usectohz(100000));
374#ifdef DNETDEBUG
375			if (dnetdebug & DNETDDI)
376				cmn_err(CE_WARN, "dnet _fini delay");
377#endif
378		}
379		mutex_destroy(&dnet_rbuf_lock);
380	}
381	return (i);
382}
383
384int
385_info(struct modinfo *modinfop)
386{
387	return (mod_info(&dnet_modlinkage, modinfop));
388}
389
390/*
391 * probe(9E) -- Determine if a device is present
392 */
393static int
394dnet_probe(dev_info_t *devinfo)
395{
396	ddi_acc_handle_t handle;
397	uint16_t	vendorid;
398	uint16_t	deviceid;
399
400	if (pci_config_setup(devinfo, &handle) != DDI_SUCCESS)
401		return (DDI_PROBE_FAILURE);
402
403	vendorid = pci_config_get16(handle, PCI_CONF_VENID);
404
405	if (vendorid != DEC_VENDOR_ID) {
406		pci_config_teardown(&handle);
407		return (DDI_PROBE_FAILURE);
408	}
409
410	deviceid = pci_config_get16(handle, PCI_CONF_DEVID);
411	switch (deviceid) {
412	case DEVICE_ID_21040:
413	case DEVICE_ID_21041:
414	case DEVICE_ID_21140:
415	case DEVICE_ID_21143: /* And 142 */
416		break;
417	default:
418		pci_config_teardown(&handle);
419		return (DDI_PROBE_FAILURE);
420	}
421
422	pci_config_teardown(&handle);
423#ifndef BUG_4010796
424	return (DDI_PROBE_SUCCESS);
425#else
426	return (dnet_hack(devinfo));
427#endif
428}
429
430#ifdef BUG_4010796
431/*
432 * If we have a device, but we cannot presently access its SROM data,
433 * then we return DDI_PROBE_PARTIAL and hope that sometime later we
434 * will be able to get at the SROM data.  This can only happen if we
435 * are a secondary port with no SROM, and the bootstrap failed to set
436 * our DNET_SROM property, and our primary sibling has not yet probed.
437 */
438static int
439dnet_hack(dev_info_t *devinfo)
440{
441	uchar_t 	vendor_info[SROM_SIZE];
442	uint32_t	csr;
443	uint16_t	deviceid;
444	ddi_acc_handle_t handle;
445	uint32_t	retval;
446	int		secondary;
447	ddi_acc_handle_t io_handle;
448	caddr_t		io_reg;
449
450#define	DNET_PCI_RNUMBER	1
451
452	if (pci_config_setup(devinfo, &handle) != DDI_SUCCESS)
453		return (DDI_PROBE_FAILURE);
454
455	deviceid = pci_config_get16(handle, PCI_CONF_DEVID);
456
457	/*
458	 * Turn on Master Enable and IO Enable bits.
459	 */
460	csr = pci_config_get32(handle, PCI_CONF_COMM);
461	pci_config_put32(handle, PCI_CONF_COMM, (csr |PCI_COMM_ME|PCI_COMM_IO));
462
463	pci_config_teardown(&handle);
464
465	/* Now map I/O register */
466	if (ddi_regs_map_setup(devinfo, DNET_PCI_RNUMBER,
467	    &io_reg, 0, 0, &accattr, &io_handle) != DDI_SUCCESS) {
468		return (DDI_PROBE_FAILURE);
469	}
470
471	/*
472	 * Reset the chip
473	 */
474	ddi_put32(io_handle, REG32(io_reg, BUS_MODE_REG), SW_RESET);
475	drv_usecwait(3);
476	ddi_put32(io_handle, REG32(io_reg, BUS_MODE_REG), 0);
477	drv_usecwait(8);
478
479	secondary = dnet_read_srom(devinfo, deviceid, io_handle,
480	    io_reg, vendor_info, sizeof (vendor_info));
481
482	switch (secondary) {
483	case -1:
484		/* We can't access our SROM data! */
485		retval = DDI_PROBE_PARTIAL;
486		break;
487	case 0:
488		retval = DDI_PROBE_SUCCESS;
489		break;
490	default:
491		retval = DDI_PROBE_SUCCESS;
492	}
493
494	ddi_regs_map_free(&io_handle);
495	return (retval);
496}
497#endif /* BUG_4010796 */
498
499/*
500 * attach(9E) -- Attach a device to the system
501 *
502 * Called once for each board successfully probed.
503 */
504static int
505dnet_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
506{
507	uint16_t revid;
508	struct dnetinstance 	*dnetp;		/* Our private device info */
509	mac_register_t		*macp;
510	uchar_t 		vendor_info[SROM_SIZE];
511	uint32_t		csr;
512	uint16_t		deviceid;
513	ddi_acc_handle_t 	handle;
514	int			secondary;
515
516#define	DNET_PCI_RNUMBER	1
517
518	switch (cmd) {
519	case DDI_ATTACH:
520		break;
521
522	case DDI_RESUME:
523		/* Get the driver private (dnetinstance) structure */
524		dnetp = ddi_get_driver_private(devinfo);
525
526		mutex_enter(&dnetp->intrlock);
527		mutex_enter(&dnetp->txlock);
528		dnet_reset_board(dnetp);
529		dnet_init_board(dnetp);
530		dnetp->suspended = B_FALSE;
531
532		if (dnetp->running) {
533			dnetp->need_tx_update = B_FALSE;
534			mutex_exit(&dnetp->txlock);
535			(void) dnet_start(dnetp);
536			mutex_exit(&dnetp->intrlock);
537			mac_tx_update(dnetp->mac_handle);
538		} else {
539			mutex_exit(&dnetp->txlock);
540			mutex_exit(&dnetp->intrlock);
541		}
542		return (DDI_SUCCESS);
543	default:
544		return (DDI_FAILURE);
545	}
546
547	if (pci_config_setup(devinfo, &handle) != DDI_SUCCESS)
548		return (DDI_FAILURE);
549
550	deviceid = pci_config_get16(handle, PCI_CONF_DEVID);
551	switch (deviceid) {
552	case DEVICE_ID_21040:
553	case DEVICE_ID_21041:
554	case DEVICE_ID_21140:
555	case DEVICE_ID_21143: /* And 142 */
556		break;
557	default:
558		pci_config_teardown(&handle);
559		return (DDI_FAILURE);
560	}
561
562	/*
563	 * Turn on Master Enable and IO Enable bits.
564	 */
565	csr = pci_config_get32(handle, PCI_CONF_COMM);
566	pci_config_put32(handle, PCI_CONF_COMM, (csr |PCI_COMM_ME|PCI_COMM_IO));
567
568	/* Make sure the device is not asleep */
569	csr = pci_config_get32(handle, PCI_DNET_CONF_CFDD);
570	pci_config_put32(handle, PCI_DNET_CONF_CFDD,
571	    csr &  ~(CFDD_SLEEP|CFDD_SNOOZE));
572
573	revid = pci_config_get8(handle, PCI_CONF_REVID);
574	pci_config_teardown(&handle);
575
576	dnetp = kmem_zalloc(sizeof (struct dnetinstance), KM_SLEEP);
577	ddi_set_driver_private(devinfo, dnetp);
578
579	/* Now map I/O register */
580	if (ddi_regs_map_setup(devinfo, DNET_PCI_RNUMBER, &dnetp->io_reg,
581	    0, 0, &accattr, &dnetp->io_handle) != DDI_SUCCESS) {
582		kmem_free(dnetp, sizeof (struct dnetinstance));
583		return (DDI_FAILURE);
584	}
585
586	dnetp->devinfo = devinfo;
587	dnetp->board_type = deviceid;
588
589	/*
590	 * Get the iblock cookie with which to initialize the mutexes.
591	 */
592	if (ddi_get_iblock_cookie(devinfo, 0, &dnetp->icookie)
593	    != DDI_SUCCESS)
594		goto fail;
595
596	/*
597	 * Initialize mutex's for this device.
598	 * Do this before registering the interrupt handler to avoid
599	 * condition where interrupt handler can try using uninitialized
600	 * mutex.
601	 * Lock ordering rules: always lock intrlock first before
602	 * txlock if both are required.
603	 */
604	mutex_init(&dnetp->txlock, NULL, MUTEX_DRIVER, dnetp->icookie);
605	mutex_init(&dnetp->intrlock, NULL, MUTEX_DRIVER, dnetp->icookie);
606
607	/*
608	 * Get the BNC/TP indicator from the conf file for 21040
609	 */
610	dnetp->bnc_indicator =
611	    ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
612	    "bncaui", -1);
613
614	/*
615	 * For 21140 check the data rate set in the conf file. Default is
616	 * 100Mb/s. Disallow connections at settings that would conflict
617	 * with what's in the conf file
618	 */
619	dnetp->speed =
620	    ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
621	    speed_propname, 0);
622	dnetp->full_duplex =
623	    ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
624	    duplex_propname, -1);
625
626	if (dnetp->speed == 100) {
627		dnetp->disallowed_media |= (1UL<<MEDIA_TP) | (1UL<<MEDIA_TP_FD);
628	} else if (dnetp->speed == 10) {
629		dnetp->disallowed_media |=
630		    (1UL<<MEDIA_SYM_SCR) | (1UL<<MEDIA_SYM_SCR_FD);
631	}
632
633	if (dnetp->full_duplex == 1) {
634		dnetp->disallowed_media |=
635		    (1UL<<MEDIA_TP) | (1UL<<MEDIA_SYM_SCR);
636	} else if (dnetp->full_duplex == 0) {
637		dnetp->disallowed_media |=
638		    (1UL<<MEDIA_TP_FD) | (1UL<<MEDIA_SYM_SCR_FD);
639	}
640
641	if (dnetp->bnc_indicator == 0) /* Disable BNC and AUI media */
642		dnetp->disallowed_media |= (1UL<<MEDIA_BNC) | (1UL<<MEDIA_AUI);
643	else if (dnetp->bnc_indicator == 1) /* Force BNC only */
644		dnetp->disallowed_media =  (uint32_t)~(1U<<MEDIA_BNC);
645	else if (dnetp->bnc_indicator == 2) /* Force AUI only */
646		dnetp->disallowed_media = (uint32_t)~(1U<<MEDIA_AUI);
647
648	dnet_reset_board(dnetp);
649
650	secondary = dnet_read_srom(devinfo, dnetp->board_type, dnetp->io_handle,
651	    dnetp->io_reg, vendor_info, sizeof (vendor_info));
652
653	if (secondary == -1) /* ASSERT (vendor_info not big enough) */
654		goto fail1;
655
656	dnet_parse_srom(dnetp, &dnetp->sr, vendor_info);
657
658	if (ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
659	    printsrom_propname, 0))
660		dnet_print_srom(&dnetp->sr);
661
662	dnetp->sr.netaddr[ETHERADDRL-1] += secondary;	/* unique ether addr */
663
664	BCOPY((caddr_t)dnetp->sr.netaddr,
665	    (caddr_t)dnetp->vendor_addr, ETHERADDRL);
666
667	BCOPY((caddr_t)dnetp->sr.netaddr,
668	    (caddr_t)dnetp->curr_macaddr, ETHERADDRL);
669
670	/*
671	 * determine whether to implement workaround from DEC
672	 * for DMA overrun errata.
673	 */
674	dnetp->overrun_workaround =
675	    ((dnetp->board_type == DEVICE_ID_21140 && revid >= 0x20) ||
676	    (dnetp->board_type == DEVICE_ID_21143 && revid <= 0x30)) ? 1 : 0;
677
678	dnetp->overrun_workaround =
679	    ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
680	    ofloprob_propname, dnetp->overrun_workaround);
681
682	/*
683	 * Add the interrupt handler if dnet_hack_interrupts() returns 0.
684	 * Otherwise dnet_hack_interrupts() itself adds the handler.
685	 */
686	if (!dnet_hack_interrupts(dnetp, secondary)) {
687		(void) ddi_add_intr(devinfo, 0, NULL,
688		    NULL, dnet_intr, (caddr_t)dnetp);
689	}
690
691	dnetp->max_tx_desc = max_tx_desc;
692	dnetp->max_rx_desc = max_rx_desc_21040;
693	if (dnetp->board_type != DEVICE_ID_21040 &&
694	    dnetp->board_type != DEVICE_ID_21041 &&
695	    dnetp->speed != 10)
696		dnetp->max_rx_desc = max_rx_desc_21140;
697
698	/* Allocate the TX and RX descriptors/buffers. */
699	if (dnet_alloc_bufs(dnetp) == FAILURE) {
700		cmn_err(CE_WARN, "DNET: Not enough DMA memory for buffers.");
701		goto fail2;
702	}
703
704	/*
705	 *	Register ourselves with the GLDv3 interface
706	 */
707	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
708		goto fail2;
709
710	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
711	macp->m_driver = dnetp;
712	macp->m_dip = devinfo;
713	macp->m_src_addr = dnetp->curr_macaddr;
714	macp->m_callbacks = &dnet_m_callbacks;
715	macp->m_min_sdu = 0;
716	macp->m_max_sdu = ETHERMTU;
717	macp->m_margin = VLAN_TAGSZ;
718
719	if (mac_register(macp, &dnetp->mac_handle) == 0) {
720		mac_free(macp);
721
722		mutex_enter(&dnetp->intrlock);
723
724		dnetp->phyaddr = -1;
725		if (dnetp->board_type == DEVICE_ID_21140 ||
726		    dnetp->board_type == DEVICE_ID_21143)
727			do_phy(dnetp);	/* Initialize the PHY, if any */
728		find_active_media(dnetp);
729
730		/* if the chosen media is non-MII, stop the port monitor */
731		if (dnetp->selected_media_block->media_code != MEDIA_MII &&
732		    dnetp->mii != NULL) {
733			mii_destroy(dnetp->mii);
734			dnetp->mii = NULL;
735			dnetp->phyaddr = -1;
736		}
737
738#ifdef DNETDEBUG
739		if (dnetdebug & DNETSENSE)
740			cmn_err(CE_NOTE, "dnet: link configured : %s",
741			    media_str[dnetp->selected_media_block->media_code]);
742#endif
743		bzero(dnetp->setup_buf_vaddr, SETUPBUF_SIZE);
744
745		dnet_reset_board(dnetp);
746		dnet_init_board(dnetp);
747
748		mutex_exit(&dnetp->intrlock);
749
750		(void) dnet_m_unicst(dnetp, dnetp->curr_macaddr);
751		(void) dnet_m_multicst(dnetp, B_TRUE, dnet_broadcastaddr);
752
753		return (DDI_SUCCESS);
754	}
755
756	mac_free(macp);
757fail2:
758	/* XXX function return value ignored */
759	/*
760	 * dnet_detach_hacked_interrupt() will remove
761	 * interrupt for the non-hacked case also.
762	 */
763	(void) dnet_detach_hacked_interrupt(devinfo);
764	dnet_free_bufs(dnetp);
765fail1:
766	mutex_destroy(&dnetp->txlock);
767	mutex_destroy(&dnetp->intrlock);
768fail:
769	ddi_regs_map_free(&dnetp->io_handle);
770	kmem_free(dnetp, sizeof (struct dnetinstance));
771	return (DDI_FAILURE);
772}
773
774/*
775 * detach(9E) -- Detach a device from the system
776 */
777static int
778dnet_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
779{
780	int32_t rc;
781	struct dnetinstance *dnetp;		/* Our private device info */
782	int32_t		proplen;
783
784	/* Get the driver private (dnetinstance) structure */
785	dnetp = ddi_get_driver_private(devinfo);
786
787	switch (cmd) {
788	case DDI_DETACH:
789		break;
790
791	case DDI_SUSPEND:
792		/*
793		 * NB: dnetp->suspended can only be modified (marked true)
794		 * if both intrlock and txlock are held.  This keeps both
795		 * tx and rx code paths excluded.
796		 */
797		mutex_enter(&dnetp->intrlock);
798		mutex_enter(&dnetp->txlock);
799		dnetp->suspended = B_TRUE;
800		dnet_reset_board(dnetp);
801		mutex_exit(&dnetp->txlock);
802		mutex_exit(&dnetp->intrlock);
803		return (DDI_SUCCESS);
804
805	default:
806		return (DDI_FAILURE);
807	}
808
809	/*
810	 *	Unregister ourselves from the GLDv3 interface
811	 */
812	if (mac_unregister(dnetp->mac_handle) != 0)
813		return (DDI_FAILURE);
814
815	/* stop the board if it is running */
816	dnet_reset_board(dnetp);
817
818	if ((rc = dnet_detach_hacked_interrupt(devinfo)) != DDI_SUCCESS)
819		return (rc);
820
821	if (dnetp->mii != NULL)
822		mii_destroy(dnetp->mii);
823
824	/* Free leaf information */
825	set_leaf(&dnetp->sr, NULL);
826
827	ddi_regs_map_free(&dnetp->io_handle);
828	dnet_free_bufs(dnetp);
829	mutex_destroy(&dnetp->txlock);
830	mutex_destroy(&dnetp->intrlock);
831	kmem_free(dnetp, sizeof (struct dnetinstance));
832
833#ifdef BUG_4010796
834	if (ddi_getproplen(DDI_DEV_T_ANY, devinfo, 0,
835	    "DNET_HACK", &proplen) != DDI_PROP_SUCCESS)
836		return (DDI_SUCCESS);
837
838	/*
839	 * We must remove the properties we added, because if we leave
840	 * them in the devinfo nodes and the driver is unloaded, when
841	 * the driver is reloaded the info will still be there, causing
842	 * nodes which had returned PROBE_PARTIAL the first time to
843	 * instead return PROBE_SUCCESS, in turn causing the nodes to be
844	 * attached in a different order, causing their PPA numbers to
845	 * be different the second time around, which is undesirable.
846	 */
847	(void) ddi_prop_remove(DDI_DEV_T_NONE, devinfo, "DNET_HACK");
848	(void) ddi_prop_remove(DDI_DEV_T_NONE, ddi_get_parent(devinfo),
849	    "DNET_SROM");
850	(void) ddi_prop_remove(DDI_DEV_T_NONE, ddi_get_parent(devinfo),
851	    "DNET_DEVNUM");
852#endif
853
854	return (DDI_SUCCESS);
855}
856
857int
858dnet_quiesce(dev_info_t *dip)
859{
860	struct dnetinstance *dnetp = ddi_get_driver_private(dip);
861
862	/*
863	 * Reset chip (disables interrupts).
864	 */
865	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, INT_MASK_REG), 0);
866	ddi_put32(dnetp->io_handle,
867	    REG32(dnetp->io_reg, BUS_MODE_REG), SW_RESET);
868
869	return (DDI_SUCCESS);
870}
871
872static void
873dnet_reset_board(struct dnetinstance *dnetp)
874{
875	uint32_t	val;
876
877	/*
878	 * before initializing the dnet should be in STOP state
879	 */
880	val = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG));
881	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG),
882	    val & ~(START_TRANSMIT | START_RECEIVE));
883
884	/*
885	 * Reset the chip
886	 */
887	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, INT_MASK_REG), 0);
888	ddi_put32(dnetp->io_handle,
889	    REG32(dnetp->io_reg, BUS_MODE_REG), SW_RESET);
890	drv_usecwait(5);
891}
892
893/*
894 * dnet_init_board() -- initialize the specified network board short of
895 * actually starting the board.  Call after dnet_reset_board().
896 * called with intrlock held.
897 */
898static void
899dnet_init_board(struct dnetinstance *dnetp)
900{
901	set_opr(dnetp);
902	set_gpr(dnetp);
903	set_sia(dnetp);
904	dnet_chip_init(dnetp);
905}
906
907/* dnet_chip_init() - called with intrlock held */
908static void
909dnet_chip_init(struct dnetinstance *dnetp)
910{
911	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, BUS_MODE_REG),
912	    CACHE_ALIGN | BURST_SIZE);		/* CSR0 */
913
914	/*
915	 * Initialize the TX and RX descriptors/buffers
916	 */
917	dnet_init_txrx_bufs(dnetp);
918
919	/*
920	 * Set the base address of the Rx descriptor list in CSR3
921	 */
922	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, RX_BASE_ADDR_REG),
923	    dnetp->rx_desc_paddr);
924
925	/*
926	 * Set the base address of the Tx descrptor list in CSR4
927	 */
928	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, TX_BASE_ADDR_REG),
929	    dnetp->tx_desc_paddr);
930
931	dnetp->tx_current_desc = dnetp->rx_current_desc = 0;
932	dnetp->transmitted_desc = 0;
933	dnetp->free_desc = dnetp->max_tx_desc;
934	enable_interrupts(dnetp);
935}
936
937/*
938 *	dnet_start() -- start the board receiving and allow transmits.
939 *  Called with intrlock held.
940 */
941static int
942dnet_start(struct dnetinstance *dnetp)
943{
944	uint32_t val;
945
946	ASSERT(MUTEX_HELD(&dnetp->intrlock));
947	/*
948	 * start the board and enable receiving
949	 */
950	val = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG));
951	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG),
952	    val | START_TRANSMIT);
953	(void) dnet_set_addr(dnetp);
954	val = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG));
955	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG),
956	    val | START_RECEIVE);
957	enable_interrupts(dnetp);
958	return (0);
959}
960
961static int
962dnet_m_start(void *arg)
963{
964	struct dnetinstance *dnetp = arg;
965
966	mutex_enter(&dnetp->intrlock);
967	dnetp->running = B_TRUE;
968	/*
969	 * start the board and enable receiving
970	 */
971	if (!dnetp->suspended)
972		(void) dnet_start(dnetp);
973	mutex_exit(&dnetp->intrlock);
974	return (0);
975}
976
977static void
978dnet_m_stop(void *arg)
979{
980	struct dnetinstance *dnetp = arg;
981	uint32_t val;
982
983	/*
984	 * stop the board and disable transmit/receive
985	 */
986	mutex_enter(&dnetp->intrlock);
987	if (!dnetp->suspended) {
988		val = ddi_get32(dnetp->io_handle,
989		    REG32(dnetp->io_reg, OPN_MODE_REG));
990		ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG),
991		    val & ~(START_TRANSMIT | START_RECEIVE));
992	}
993	mac_link_update(dnetp->mac_handle, LINK_STATE_UNKNOWN);
994	dnetp->running = B_FALSE;
995	mutex_exit(&dnetp->intrlock);
996}
997
998/*
999 *	dnet_set_addr() -- set the physical network address on the board
1000 *  Called with intrlock held.
1001 */
1002static int
1003dnet_set_addr(struct dnetinstance *dnetp)
1004{
1005	struct tx_desc_type *desc;
1006	int 		current_desc;
1007	uint32_t	val;
1008
1009	ASSERT(MUTEX_HELD(&dnetp->intrlock));
1010
1011	val = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG));
1012	if (!(val & START_TRANSMIT))
1013		return (0);
1014
1015	current_desc = dnetp->tx_current_desc;
1016	desc = &dnetp->tx_desc[current_desc];
1017
1018	mutex_enter(&dnetp->txlock);
1019	dnetp->need_saddr = 0;
1020	mutex_exit(&dnetp->txlock);
1021
1022	if ((alloc_descriptor(dnetp)) == FAILURE) {
1023		mutex_enter(&dnetp->txlock);
1024		dnetp->need_saddr = 1;
1025		mutex_exit(&dnetp->txlock);
1026#ifdef DNETDEBUG
1027		if (dnetdebug & DNETTRACE)
1028			cmn_err(CE_WARN, "DNET saddr:alloc descriptor failure");
1029#endif
1030		return (0);
1031	}
1032
1033	desc->buffer1			= dnetp->setup_buf_paddr;
1034	desc->buffer2			= 0;
1035	desc->desc1.buffer_size1 	= SETUPBUF_SIZE;
1036	desc->desc1.buffer_size2 	= 0;
1037	desc->desc1.setup_packet	= 1;
1038	desc->desc1.first_desc		= 0;
1039	desc->desc1.last_desc 		= 0;
1040	desc->desc1.filter_type0 	= 1;
1041	desc->desc1.filter_type1 	= 1;
1042	desc->desc1.int_on_comp		= 1;
1043
1044	desc->desc0.own = 1;
1045	ddi_put8(dnetp->io_handle, REG8(dnetp->io_reg, TX_POLL_REG),
1046	    TX_POLL_DEMAND);
1047	return (0);
1048}
1049
1050static int
1051dnet_m_unicst(void *arg, const uint8_t *macaddr)
1052{
1053	struct dnetinstance *dnetp = arg;
1054	uint32_t	index;
1055	uint32_t	*hashp;
1056
1057	mutex_enter(&dnetp->intrlock);
1058
1059	bcopy(macaddr, dnetp->curr_macaddr, ETHERADDRL);
1060
1061	/*
1062	 * As we are using Imperfect filtering, the broadcast address has to
1063	 * be set explicitly in the 512 bit hash table.  Hence the index into
1064	 * the hash table is calculated and the bit set to enable reception
1065	 * of broadcast packets.
1066	 *
1067	 * We also use HASH_ONLY mode, without using the perfect filter for
1068	 * our station address, because there appears to be a bug in the
1069	 * 21140 where it fails to receive the specified perfect filter
1070	 * address.
1071	 *
1072	 * Since dlsdmult comes through here, it doesn't matter that the count
1073	 * is wrong for the two bits that correspond to the cases below. The
1074	 * worst that could happen is that we'd leave on a bit for an old
1075	 * macaddr, in the case where the macaddr gets changed, which is rare.
1076	 * Since filtering is imperfect, it is OK if that happens.
1077	 */
1078	hashp = (uint32_t *)dnetp->setup_buf_vaddr;
1079	index = hashindex((uint8_t *)dnet_broadcastaddr);
1080	hashp[ index / 16 ] |= 1 << (index % 16);
1081
1082	index = hashindex((uint8_t *)dnetp->curr_macaddr);
1083	hashp[ index / 16 ] |= 1 << (index % 16);
1084
1085	if (!dnetp->suspended)
1086		(void) dnet_set_addr(dnetp);
1087	mutex_exit(&dnetp->intrlock);
1088	return (0);
1089}
1090
1091static int
1092dnet_m_multicst(void *arg, boolean_t add, const uint8_t *macaddr)
1093{
1094	struct dnetinstance *dnetp = arg;
1095	uint32_t	index;
1096	uint32_t	*hashp;
1097	uint32_t	retval;
1098
1099	mutex_enter(&dnetp->intrlock);
1100	index = hashindex(macaddr);
1101	hashp = (uint32_t *)dnetp->setup_buf_vaddr;
1102	if (add) {
1103		if (dnetp->multicast_cnt[index]++) {
1104			mutex_exit(&dnetp->intrlock);
1105			return (0);
1106		}
1107		hashp[ index / 16 ] |= 1 << (index % 16);
1108	} else {
1109		if (--dnetp->multicast_cnt[index]) {
1110			mutex_exit(&dnetp->intrlock);
1111			return (0);
1112		}
1113		hashp[ index / 16 ] &= ~ (1 << (index % 16));
1114	}
1115	if (!dnetp->suspended)
1116		retval = dnet_set_addr(dnetp);
1117	else
1118		retval = 0;
1119	mutex_exit(&dnetp->intrlock);
1120	return (retval);
1121}
1122
1123/*
1124 * A hashing function used for setting the
1125 * node address or a multicast address
1126 */
1127static uint32_t
1128hashindex(const uint8_t *address)
1129{
1130	uint32_t	crc = (uint32_t)HASH_CRC;
1131	uint32_t const 	POLY = HASH_POLY;
1132	uint32_t	msb;
1133	int32_t 	byteslength;
1134	uint8_t 	currentbyte;
1135	uint32_t 	index;
1136	int32_t 	bit;
1137	int32_t		shift;
1138
1139	for (byteslength = 0; byteslength < ETHERADDRL; byteslength++) {
1140		currentbyte = address[byteslength];
1141		for (bit = 0; bit < 8; bit++) {
1142			msb = crc >> 31;
1143			crc <<= 1;
1144			if (msb ^ (currentbyte & 1)) {
1145				crc ^= POLY;
1146				crc |= 0x00000001;
1147			}
1148			currentbyte >>= 1;
1149		}
1150	}
1151
1152	for (index = 0, bit = 23, shift = 8; shift >= 0; bit++, shift--) {
1153		index |= (((crc >> bit) & 1) << shift);
1154	}
1155	return (index);
1156}
1157
1158static int
1159dnet_m_setpromisc(void *arg, boolean_t on)
1160{
1161	struct dnetinstance *dnetp = arg;
1162	uint32_t val;
1163
1164	mutex_enter(&dnetp->intrlock);
1165	if (dnetp->promisc == on) {
1166		mutex_exit(&dnetp->intrlock);
1167		return (0);
1168	}
1169	dnetp->promisc = on;
1170
1171	if (!dnetp->suspended) {
1172		val = ddi_get32(dnetp->io_handle,
1173		    REG32(dnetp->io_reg, OPN_MODE_REG));
1174		if (on)
1175			ddi_put32(dnetp->io_handle,
1176			    REG32(dnetp->io_reg, OPN_MODE_REG),
1177			    val | PROM_MODE);
1178		else
1179			ddi_put32(dnetp->io_handle,
1180			    REG32(dnetp->io_reg, OPN_MODE_REG),
1181			    val & (~PROM_MODE));
1182	}
1183	mutex_exit(&dnetp->intrlock);
1184	return (0);
1185}
1186
1187static int
1188dnet_m_getstat(void *arg, uint_t stat, uint64_t *val)
1189{
1190	struct dnetinstance *dnetp = arg;
1191
1192	switch (stat) {
1193	case MAC_STAT_IFSPEED:
1194		if (!dnetp->running) {
1195			*val = 0;
1196		} else {
1197			*val = (dnetp->mii_up ?
1198			    dnetp->mii_speed : dnetp->speed) * 1000000;
1199		}
1200		break;
1201
1202	case MAC_STAT_NORCVBUF:
1203		*val = dnetp->stat_norcvbuf;
1204		break;
1205
1206	case MAC_STAT_IERRORS:
1207		*val = dnetp->stat_errrcv;
1208		break;
1209
1210	case MAC_STAT_OERRORS:
1211		*val = dnetp->stat_errxmt;
1212		break;
1213
1214	case MAC_STAT_COLLISIONS:
1215		*val = dnetp->stat_collisions;
1216		break;
1217
1218	case ETHER_STAT_DEFER_XMTS:
1219		*val = dnetp->stat_defer;
1220		break;
1221
1222	case ETHER_STAT_CARRIER_ERRORS:
1223		*val = dnetp->stat_nocarrier;
1224		break;
1225
1226	case ETHER_STAT_TOOSHORT_ERRORS:
1227		*val = dnetp->stat_short;
1228		break;
1229
1230	case ETHER_STAT_LINK_DUPLEX:
1231		if (!dnetp->running) {
1232			*val = LINK_DUPLEX_UNKNOWN;
1233
1234		} else if (dnetp->mii_up) {
1235			*val = dnetp->mii_duplex ?
1236			    LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
1237		} else {
1238			*val = dnetp->full_duplex ?
1239			    LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
1240		}
1241		break;
1242
1243	case ETHER_STAT_TX_LATE_COLLISIONS:
1244		*val = dnetp->stat_xmtlatecoll;
1245		break;
1246
1247	case ETHER_STAT_EX_COLLISIONS:
1248		*val = dnetp->stat_excoll;
1249		break;
1250
1251	case MAC_STAT_OVERFLOWS:
1252		*val = dnetp->stat_overflow;
1253		break;
1254
1255	case MAC_STAT_UNDERFLOWS:
1256		*val = dnetp->stat_underflow;
1257		break;
1258
1259	default:
1260		return (ENOTSUP);
1261	}
1262
1263	return (0);
1264}
1265
1266#define	NextTXIndex(index) (((index)+1) % dnetp->max_tx_desc)
1267#define	PrevTXIndex(index) (((index)-1) < 0 ? dnetp->max_tx_desc - 1: (index)-1)
1268
1269static mblk_t *
1270dnet_m_tx(void *arg, mblk_t *mp)
1271{
1272	struct dnetinstance *dnetp = arg;
1273
1274	mutex_enter(&dnetp->txlock);
1275
1276	/* if suspended, drop the packet on the floor, we missed it */
1277	if (dnetp->suspended) {
1278		mutex_exit(&dnetp->txlock);
1279		freemsg(mp);
1280		return (NULL);
1281	}
1282
1283	if (dnetp->need_saddr) {
1284		/* XXX function return value ignored */
1285		mutex_exit(&dnetp->txlock);
1286		mutex_enter(&dnetp->intrlock);
1287		(void) dnet_set_addr(dnetp);
1288		mutex_exit(&dnetp->intrlock);
1289		mutex_enter(&dnetp->txlock);
1290	}
1291
1292	while (mp != NULL) {
1293		if (!dnet_send(dnetp, mp)) {
1294			mutex_exit(&dnetp->txlock);
1295			return (mp);
1296		}
1297		mp = mp->b_next;
1298	}
1299
1300	mutex_exit(&dnetp->txlock);
1301
1302	/*
1303	 * Enable xmit interrupt in case we are running out of xmit descriptors
1304	 * or there are more packets on the queue waiting to be transmitted.
1305	 */
1306	mutex_enter(&dnetp->intrlock);
1307
1308	enable_interrupts(dnetp);
1309
1310	/*
1311	 * Kick the transmitter
1312	 */
1313	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, TX_POLL_REG),
1314	    TX_POLL_DEMAND);
1315
1316	mutex_exit(&dnetp->intrlock);
1317
1318	return (NULL);
1319}
1320
1321static boolean_t
1322dnet_send(struct dnetinstance *dnetp, mblk_t *mp)
1323{
1324	struct tx_desc_type	*ring = dnetp->tx_desc;
1325	int		mblen, totlen;
1326	int		index, end_index, start_index;
1327	int		avail;
1328	int		error;
1329	int		bufn;
1330	int		retval;
1331	mblk_t		*bp;
1332
1333	ASSERT(MUTEX_HELD(&dnetp->txlock));
1334
1335	/* reclaim any xmit descriptors completed */
1336	dnet_reclaim_Tx_desc(dnetp);
1337
1338	/*
1339	 * Use the data buffers from the message and construct the
1340	 * scatter/gather list by calling ddi_dma_addr_bind_handle().
1341	 */
1342	error = 0;
1343	totlen = 0;
1344	bp = mp;
1345	bufn = 0;
1346	index = start_index = dnetp->tx_current_desc;
1347	avail = dnetp->free_desc;
1348	while (bp != NULL) {
1349		uint_t ncookies;
1350		ddi_dma_cookie_t dma_cookie;
1351
1352		mblen = MBLKL(bp);
1353
1354		if (!mblen) {	/* skip zero-length message blocks */
1355			bp = bp->b_cont;
1356			continue;
1357		}
1358
1359		retval = ddi_dma_addr_bind_handle(dnetp->dma_handle_tx, NULL,
1360		    (caddr_t)bp->b_rptr, mblen,
1361		    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 0,
1362		    &dma_cookie, &ncookies);
1363
1364		switch (retval) {
1365		case DDI_DMA_MAPPED:
1366			break;		/* everything's fine */
1367
1368		case DDI_DMA_NORESOURCES:
1369			error = 1;	/* allow retry by gld */
1370			break;
1371
1372		case DDI_DMA_NOMAPPING:
1373		case DDI_DMA_INUSE:
1374		case DDI_DMA_TOOBIG:
1375		default:
1376			error = 2;	/* error, no retry */
1377			break;
1378		}
1379
1380		/*
1381		 * we can use two cookies per descriptor (i.e buffer1 and
1382		 * buffer2) so we need at least (ncookies+1)/2 descriptors.
1383		 */
1384		if (((ncookies + 1) >> 1) > dnetp->free_desc) {
1385			(void) ddi_dma_unbind_handle(dnetp->dma_handle_tx);
1386			error = 1;
1387			break;
1388		}
1389
1390		/* setup the descriptors for this data buffer */
1391		while (ncookies) {
1392			end_index = index;
1393			if (bufn % 2) {
1394				ring[index].buffer2 =
1395				    (uint32_t)dma_cookie.dmac_address;
1396				ring[index].desc1.buffer_size2 =
1397				    dma_cookie.dmac_size;
1398				index = NextTXIndex(index); /* goto next desc */
1399			} else {
1400				/* initialize the descriptor */
1401				ASSERT(ring[index].desc0.own == 0);
1402				*(uint32_t *)&ring[index].desc0 = 0;
1403				*(uint32_t *)&ring[index].desc1 &=
1404				    DNET_END_OF_RING;
1405				ring[index].buffer1 =
1406				    (uint32_t)dma_cookie.dmac_address;
1407				ring[index].desc1.buffer_size1 =
1408				    dma_cookie.dmac_size;
1409				ring[index].buffer2 = (uint32_t)(0);
1410				dnetp->free_desc--;
1411				ASSERT(dnetp->free_desc >= 0);
1412			}
1413			totlen += dma_cookie.dmac_size;
1414			bufn++;
1415			if (--ncookies)
1416				ddi_dma_nextcookie(dnetp->dma_handle_tx,
1417				    &dma_cookie);
1418		}
1419		(void) ddi_dma_unbind_handle(dnetp->dma_handle_tx);
1420		bp = bp->b_cont;
1421	}
1422
1423	if (error == 1) {
1424		dnetp->stat_defer++;
1425		dnetp->free_desc = avail;
1426		dnetp->need_tx_update = B_TRUE;
1427		return (B_FALSE);
1428	} else if (error) {
1429		dnetp->free_desc = avail;
1430		freemsg(mp);
1431		return (B_TRUE);	/* Drop packet, don't retry */
1432	}
1433
1434	if (totlen > ETHERMAX + VLAN_TAGSZ) {
1435		cmn_err(CE_WARN, "DNET: tried to send large %d packet", totlen);
1436		dnetp->free_desc = avail;
1437		freemsg(mp);
1438		return (B_TRUE);	/* Don't repeat this attempt */
1439	}
1440
1441	/*
1442	 * Remeber the message buffer pointer to do freemsg() at xmit
1443	 * interrupt time.
1444	 */
1445	dnetp->tx_msgbufp[end_index] = mp;
1446
1447	/*
1448	 * Now set the first/last buffer and own bits
1449	 * Since the 21040 looks for these bits set in the
1450	 * first buffer, work backwards in multiple buffers.
1451	 */
1452	ring[end_index].desc1.last_desc = 1;
1453	ring[end_index].desc1.int_on_comp = 1;
1454	for (index = end_index; index != start_index;
1455	    index = PrevTXIndex(index))
1456		ring[index].desc0.own = 1;
1457	ring[start_index].desc1.first_desc = 1;
1458	ring[start_index].desc0.own = 1;
1459
1460	dnetp->tx_current_desc = NextTXIndex(end_index);
1461
1462	/*
1463	 * Safety check: make sure end-of-ring is set in last desc.
1464	 */
1465	ASSERT(ring[dnetp->max_tx_desc-1].desc1.end_of_ring != 0);
1466
1467	return (B_TRUE);
1468}
1469
1470/*
1471 *	dnet_intr() -- interrupt from board to inform us that a receive or
1472 *	transmit has completed.
1473 */
1474static uint_t
1475dnet_intr(caddr_t arg)
1476{
1477	struct dnetinstance *dnetp = (struct dnetinstance *)arg;
1478	uint32_t int_status;
1479
1480	mutex_enter(&dnetp->intrlock);
1481
1482	if (dnetp->suspended) {
1483		mutex_exit(&dnetp->intrlock);
1484		return (DDI_INTR_UNCLAIMED);
1485	}
1486
1487	int_status = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg,
1488	    STATUS_REG));
1489
1490	/*
1491	 * If interrupt was not from this board
1492	 */
1493	if (!(int_status & (NORMAL_INTR_SUMM | ABNORMAL_INTR_SUMM))) {
1494		mutex_exit(&dnetp->intrlock);
1495		return (DDI_INTR_UNCLAIMED);
1496	}
1497
1498	dnetp->stat_intr++;
1499
1500	if (int_status & GPTIMER_INTR) {
1501		ddi_put32(dnetp->io_handle,
1502		    REG32(dnetp->io_reg, STATUS_REG), GPTIMER_INTR);
1503		if (dnetp->timer.cb)
1504			dnetp->timer.cb(dnetp);
1505		else
1506			cmn_err(CE_WARN, "dnet: unhandled timer interrupt");
1507	}
1508
1509	if (int_status & TX_INTR) {
1510		ddi_put32(dnetp->io_handle,
1511		    REG32(dnetp->io_reg, STATUS_REG), TX_INTR);
1512		mutex_enter(&dnetp->txlock);
1513		if (dnetp->need_tx_update) {
1514			mutex_exit(&dnetp->txlock);
1515			mutex_exit(&dnetp->intrlock);
1516			mac_tx_update(dnetp->mac_handle);
1517			mutex_enter(&dnetp->intrlock);
1518			mutex_enter(&dnetp->txlock);
1519			dnetp->need_tx_update = B_FALSE;
1520		}
1521		/* reclaim any xmit descriptors that are completed */
1522		dnet_reclaim_Tx_desc(dnetp);
1523		mutex_exit(&dnetp->txlock);
1524	}
1525
1526	/*
1527	 * Check if receive interrupt bit is set
1528	 */
1529	if (int_status & (RX_INTR | RX_UNAVAIL_INTR)) {
1530		ddi_put32(dnetp->io_handle,
1531		    REG32(dnetp->io_reg, STATUS_REG),
1532		    int_status & (RX_INTR | RX_UNAVAIL_INTR));
1533		dnet_getp(dnetp);
1534	}
1535
1536	if (int_status & ABNORMAL_INTR_SUMM) {
1537		/*
1538		 * Check for system error
1539		 */
1540		if (int_status & SYS_ERR) {
1541			if ((int_status & SYS_ERR_BITS) == MASTER_ABORT)
1542				cmn_err(CE_WARN, "DNET: Bus Master Abort");
1543			if ((int_status & SYS_ERR_BITS) == TARGET_ABORT)
1544				cmn_err(CE_WARN, "DNET: Bus Target Abort");
1545			if ((int_status & SYS_ERR_BITS) == PARITY_ERROR)
1546				cmn_err(CE_WARN, "DNET: Parity error");
1547		}
1548
1549		/*
1550		 * If the jabber has timed out then reset the chip
1551		 */
1552		if (int_status & TX_JABBER_TIMEOUT)
1553			cmn_err(CE_WARN, "DNET: Jabber timeout.");
1554
1555		/*
1556		 * If an underflow has occurred, reset the chip
1557		 */
1558		if (int_status & TX_UNDERFLOW)
1559			cmn_err(CE_WARN, "DNET: Tx Underflow.");
1560
1561#ifdef DNETDEBUG
1562		if (dnetdebug & DNETINT)
1563			cmn_err(CE_NOTE, "Trying to reset...");
1564#endif
1565		dnet_reset_board(dnetp);
1566		dnet_init_board(dnetp);
1567		/* XXX function return value ignored */
1568		(void) dnet_start(dnetp);
1569	}
1570
1571	/*
1572	 * Enable the interrupts. Enable xmit interrupt in case we are
1573	 * running out of free descriptors or if there are packets
1574	 * in the queue waiting to be transmitted.
1575	 */
1576	enable_interrupts(dnetp);
1577	mutex_exit(&dnetp->intrlock);
1578	return (DDI_INTR_CLAIMED);	/* Indicate it was our interrupt */
1579}
1580
1581static void
1582dnet_getp(struct dnetinstance *dnetp)
1583{
1584	int packet_length, index;
1585	mblk_t	*mp;
1586	caddr_t 	virtual_address;
1587	struct	rx_desc_type *desc = dnetp->rx_desc;
1588	int marker = dnetp->rx_current_desc;
1589	int misses;
1590
1591	if (!dnetp->overrun_workaround) {
1592		/*
1593		 * If the workaround is not in place, we must still update
1594		 * the missed frame statistic from the on-chip counter.
1595		 */
1596		misses = ddi_get32(dnetp->io_handle,
1597		    REG32(dnetp->io_reg, MISSED_FRAME_REG));
1598		dnetp->stat_missed += (misses & MISSED_FRAME_MASK);
1599	}
1600
1601	/* While host owns the current descriptor */
1602	while (!(desc[dnetp->rx_current_desc].desc0.own)) {
1603		struct free_ptr *frp;
1604		caddr_t newbuf;
1605		struct rbuf_list *rp;
1606
1607		index = dnetp->rx_current_desc;
1608		ASSERT(desc[index].desc0.first_desc != 0);
1609
1610		/*
1611		 * DMA overrun errata from DEC: avoid possible bus hangs
1612		 * and data corruption
1613		 */
1614		if (dnetp->overrun_workaround &&
1615		    marker == dnetp->rx_current_desc) {
1616			int opn;
1617			do {
1618				marker = (marker+1) % dnetp->max_rx_desc;
1619			} while (!(dnetp->rx_desc[marker].desc0.own) &&
1620			    marker != index);
1621
1622			misses = ddi_get32(dnetp->io_handle,
1623			    REG32(dnetp->io_reg, MISSED_FRAME_REG));
1624			dnetp->stat_missed +=
1625			    (misses & MISSED_FRAME_MASK);
1626			if (misses & OVERFLOW_COUNTER_MASK) {
1627				/*
1628				 * Overflow(s) have occurred : stop receiver,
1629				 * and wait until in stopped state
1630				 */
1631				opn = ddi_get32(dnetp->io_handle,
1632				    REG32(dnetp->io_reg, OPN_MODE_REG));
1633				ddi_put32(dnetp->io_handle,
1634				    REG32(dnetp->io_reg, OPN_MODE_REG),
1635				    opn & ~(START_RECEIVE));
1636
1637				do {
1638					drv_usecwait(10);
1639				} while ((ddi_get32(dnetp->io_handle,
1640				    REG32(dnetp->io_reg, STATUS_REG)) &
1641				    RECEIVE_PROCESS_STATE) != 0);
1642#ifdef DNETDEBUG
1643				if (dnetdebug & DNETRECV)
1644					cmn_err(CE_CONT, "^*");
1645#endif
1646				/* Discard probably corrupt frames */
1647				while (!(dnetp->rx_desc[index].desc0.own)) {
1648					dnetp->rx_desc[index].desc0.own = 1;
1649					index = (index+1) % dnetp->max_rx_desc;
1650					dnetp->stat_missed++;
1651				}
1652
1653				/* restart the receiver */
1654				opn = ddi_get32(dnetp->io_handle,
1655				    REG32(dnetp->io_reg, OPN_MODE_REG));
1656				ddi_put32(dnetp->io_handle,
1657				    REG32(dnetp->io_reg, OPN_MODE_REG),
1658				    opn | START_RECEIVE);
1659				marker = dnetp->rx_current_desc = index;
1660				continue;
1661			}
1662			/*
1663			 * At this point, we know that all packets before
1664			 * "marker" were received before a dma overrun occurred
1665			 */
1666		}
1667
1668		/*
1669		 * If we get an oversized packet it could span multiple
1670		 * descriptors.  If this happens an error bit should be set.
1671		 */
1672		while (desc[index].desc0.last_desc == 0) {
1673			index = (index + 1) % dnetp->max_rx_desc;
1674			if (desc[index].desc0.own)
1675				return;	/* not done receiving large packet */
1676		}
1677		while (dnetp->rx_current_desc != index) {
1678			desc[dnetp->rx_current_desc].desc0.own = 1;
1679			dnetp->rx_current_desc =
1680			    (dnetp->rx_current_desc + 1) % dnetp->max_rx_desc;
1681#ifdef DNETDEBUG
1682			if (dnetdebug & DNETRECV)
1683				cmn_err(CE_WARN, "dnet: received large packet");
1684#endif
1685		}
1686
1687		packet_length = desc[index].desc0.frame_len;
1688
1689		/*
1690		 * Remove CRC from received data. This is an artefact of the
1691		 * 21x4x chip and should not be passed higher up the network
1692		 * stack.
1693		 */
1694		packet_length -= ETHERFCSL;
1695
1696		/* get the virtual address of the packet received */
1697		virtual_address =
1698		    dnetp->rx_buf_vaddr[index];
1699
1700		/*
1701		 * If no packet errors then do:
1702		 * 	1. Allocate a new receive buffer so that we can
1703		 *	   use the current buffer as streams buffer to
1704		 *	   avoid bcopy.
1705		 *	2. If we got a new receive buffer then allocate
1706		 *	   an mblk using desballoc().
1707		 *	3. Otherwise use the mblk from allocb() and do
1708		 *	   the bcopy.
1709		 */
1710		frp = NULL;
1711		rp = NULL;
1712		newbuf = NULL;
1713		mp = NULL;
1714		if (!desc[index].desc0.err_summary ||
1715		    (desc[index].desc0.frame2long &&
1716		    packet_length < rx_buf_size)) {
1717			ASSERT(packet_length < rx_buf_size);
1718			/*
1719			 * Allocate another receive buffer for this descriptor.
1720			 * If we fail to allocate then we do the normal bcopy.
1721			 */
1722			rp = dnet_rbuf_alloc(dnetp->devinfo, 0);
1723			if (rp != NULL) {
1724				newbuf = rp->rbuf_vaddr;
1725				frp = kmem_zalloc(sizeof (*frp), KM_NOSLEEP);
1726				if (frp != NULL) {
1727					frp->free_rtn.free_func =
1728					    dnet_freemsg_buf;
1729					frp->free_rtn.free_arg = (char *)frp;
1730					frp->buf = virtual_address;
1731					mp = desballoc(
1732					    (uchar_t *)virtual_address,
1733					    packet_length, 0, &frp->free_rtn);
1734					if (mp == NULL) {
1735						kmem_free(frp, sizeof (*frp));
1736						dnet_rbuf_free((caddr_t)newbuf);
1737						frp = NULL;
1738						newbuf = NULL;
1739					}
1740				}
1741			}
1742			if (mp == NULL) {
1743				if (newbuf != NULL)
1744					dnet_rbuf_free((caddr_t)newbuf);
1745				mp = allocb(packet_length, 0);
1746			}
1747		}
1748
1749		if ((desc[index].desc0.err_summary &&
1750		    packet_length >= rx_buf_size) || mp == NULL) {
1751
1752			/* Update gld statistics */
1753			if (desc[index].desc0.err_summary)
1754				update_rx_stats(dnetp, index);
1755			else
1756				dnetp->stat_norcvbuf++;
1757
1758			/*
1759			 * Reset ownership of the descriptor.
1760			 */
1761			desc[index].desc0.own = 1;
1762			dnetp->rx_current_desc =
1763			    (dnetp->rx_current_desc+1) % dnetp->max_rx_desc;
1764
1765			/* Demand receive polling by the chip */
1766			ddi_put32(dnetp->io_handle,
1767			    REG32(dnetp->io_reg, RX_POLL_REG), RX_POLL_DEMAND);
1768
1769			continue;
1770		}
1771
1772		if (newbuf != NULL) {
1773			uint32_t end_paddr;
1774			/* attach the new buffer to the rx descriptor */
1775			dnetp->rx_buf_vaddr[index] = newbuf;
1776			dnetp->rx_buf_paddr[index] = rp->rbuf_paddr;
1777			desc[index].buffer1 = rp->rbuf_paddr;
1778			desc[index].desc1.buffer_size1 = rx_buf_size;
1779			desc[index].desc1.buffer_size2 = 0;
1780			end_paddr = rp->rbuf_endpaddr;
1781			if ((desc[index].buffer1 & ~dnetp->pgmask) !=
1782			    (end_paddr & ~dnetp->pgmask)) {
1783				/* discontiguous */
1784				desc[index].buffer2 = end_paddr&~dnetp->pgmask;
1785				desc[index].desc1.buffer_size2 =
1786				    (end_paddr & dnetp->pgmask) + 1;
1787				desc[index].desc1.buffer_size1 =
1788				    rx_buf_size-desc[index].desc1.buffer_size2;
1789			}
1790		} else {
1791			/* couldn't allocate another buffer; copy the data */
1792			BCOPY((caddr_t)virtual_address, (caddr_t)mp->b_wptr,
1793			    packet_length);
1794		}
1795
1796		mp->b_wptr += packet_length;
1797
1798		desc[dnetp->rx_current_desc].desc0.own = 1;
1799
1800		/*
1801		 * Increment receive desc index. This is for the scan of
1802		 * next packet
1803		 */
1804		dnetp->rx_current_desc =
1805		    (dnetp->rx_current_desc+1) % dnetp->max_rx_desc;
1806
1807		/* Demand polling by chip */
1808		ddi_put32(dnetp->io_handle,
1809		    REG32(dnetp->io_reg, RX_POLL_REG), RX_POLL_DEMAND);
1810
1811		/* send the packet upstream */
1812		mutex_exit(&dnetp->intrlock);
1813		mac_rx(dnetp->mac_handle, NULL, mp);
1814		mutex_enter(&dnetp->intrlock);
1815	}
1816}
1817/*
1818 * Function to update receive statistics
1819 */
1820static void
1821update_rx_stats(struct dnetinstance *dnetp, int index)
1822{
1823	struct rx_desc_type *descp = &(dnetp->rx_desc[index]);
1824
1825	/*
1826	 * Update gld statistics
1827	 */
1828	dnetp->stat_errrcv++;
1829
1830	if (descp->desc0.overflow)	{
1831		/* FIFO Overrun */
1832		dnetp->stat_overflow++;
1833	}
1834
1835	if (descp->desc0.collision) {
1836		/*EMPTY*/
1837		/* Late Colllision on receive */
1838		/* no appropriate counter */
1839	}
1840
1841	if (descp->desc0.crc) {
1842		/* CRC Error */
1843		dnetp->stat_crc++;
1844	}
1845
1846	if (descp->desc0.runt_frame) {
1847		/* Runt Error */
1848		dnetp->stat_short++;
1849	}
1850
1851	if (descp->desc0.desc_err) {
1852		/*EMPTY*/
1853		/* Not enough receive descriptors */
1854		/* This condition is accounted in dnet_intr() */
1855	}
1856
1857	if (descp->desc0.frame2long) {
1858		dnetp->stat_frame++;
1859	}
1860}
1861
1862/*
1863 * Function to update transmit statistics
1864 */
1865static void
1866update_tx_stats(struct dnetinstance *dnetp, int index)
1867{
1868	struct tx_desc_type *descp = &(dnetp->tx_desc[index]);
1869	int	fd;
1870	media_block_t	*block = dnetp->selected_media_block;
1871
1872
1873	/* Update gld statistics */
1874	dnetp->stat_errxmt++;
1875
1876	/* If we're in full-duplex don't count collisions or carrier loss. */
1877	if (dnetp->mii_up) {
1878		fd = dnetp->mii_duplex;
1879	} else {
1880		/* Rely on media code */
1881		fd = block->media_code == MEDIA_TP_FD ||
1882		    block->media_code == MEDIA_SYM_SCR_FD;
1883	}
1884
1885	if (descp->desc0.collision_count && !fd) {
1886		dnetp->stat_collisions += descp->desc0.collision_count;
1887	}
1888
1889	if (descp->desc0.late_collision && !fd) {
1890		dnetp->stat_xmtlatecoll++;
1891	}
1892
1893	if (descp->desc0.excess_collision && !fd) {
1894		dnetp->stat_excoll++;
1895	}
1896
1897	if (descp->desc0.underflow) {
1898		dnetp->stat_underflow++;
1899	}
1900
1901#if 0
1902	if (descp->desc0.tx_jabber_to) {
1903		/* no appropriate counter */
1904	}
1905#endif
1906
1907	if (descp->desc0.carrier_loss && !fd) {
1908		dnetp->stat_nocarrier++;
1909	}
1910
1911	if (descp->desc0.no_carrier && !fd) {
1912		dnetp->stat_nocarrier++;
1913	}
1914}
1915
1916/*
1917 *	========== Media Selection Setup Routines ==========
1918 */
1919
1920
1921static void
1922write_gpr(struct dnetinstance *dnetp, uint32_t val)
1923{
1924#ifdef DEBUG
1925	if (dnetdebug & DNETREGCFG)
1926		cmn_err(CE_NOTE, "GPR: %x", val);
1927#endif
1928	switch (dnetp->board_type) {
1929	case DEVICE_ID_21143:
1930		/* Set the correct bit for a control write */
1931		if (val & GPR_CONTROL_WRITE)
1932			val |= CWE_21143, val &= ~GPR_CONTROL_WRITE;
1933		/* Write to upper half of CSR15 */
1934		dnetp->gprsia = (dnetp->gprsia & 0xffff) | (val << 16);
1935		ddi_put32(dnetp->io_handle,
1936		    REG32(dnetp->io_reg, SIA_GENERAL_REG), dnetp->gprsia);
1937		break;
1938	default:
1939		/* Set the correct bit for a control write */
1940		if (val & GPR_CONTROL_WRITE)
1941			val |= CWE_21140, val &= ~GPR_CONTROL_WRITE;
1942		ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, GP_REG), val);
1943		break;
1944	}
1945}
1946
1947static uint32_t
1948read_gpr(struct dnetinstance *dnetp)
1949{
1950	switch (dnetp->board_type) {
1951	case DEVICE_ID_21143:
1952		/* Read upper half of CSR15 */
1953		return (ddi_get32(dnetp->io_handle,
1954		    REG32(dnetp->io_reg, SIA_GENERAL_REG)) >> 16);
1955	default:
1956		return (ddi_get32(dnetp->io_handle,
1957		    REG32(dnetp->io_reg, GP_REG)));
1958	}
1959}
1960
1961static void
1962set_gpr(struct dnetinstance *dnetp)
1963{
1964	uint32_t *sequence;
1965	int len;
1966	LEAF_FORMAT *leaf = &dnetp->sr.leaf[dnetp->leaf];
1967	media_block_t *block = dnetp->selected_media_block;
1968	int i;
1969
1970	if (ddi_getlongprop(DDI_DEV_T_ANY, dnetp->devinfo,
1971	    DDI_PROP_DONTPASS, "gpr-sequence", (caddr_t)&sequence,
1972	    &len) == DDI_PROP_SUCCESS) {
1973		for (i = 0; i < len / sizeof (uint32_t); i++)
1974			write_gpr(dnetp, sequence[i]);
1975		kmem_free(sequence, len);
1976	} else {
1977		/*
1978		 * Write the reset sequence if this is the first time this
1979		 * block has been selected.
1980		 */
1981		if (block->rstseqlen) {
1982			for (i = 0; i < block->rstseqlen; i++)
1983				write_gpr(dnetp, block->rstseq[i]);
1984			/*
1985			 * XXX Legacy blocks do not have reset sequences, so the
1986			 * static blocks will never be modified by this
1987			 */
1988			block->rstseqlen = 0;
1989		}
1990		if (leaf->gpr)
1991			write_gpr(dnetp, leaf->gpr | GPR_CONTROL_WRITE);
1992
1993		/* write GPR sequence each time */
1994		for (i = 0; i < block->gprseqlen; i++)
1995			write_gpr(dnetp, block->gprseq[i]);
1996	}
1997
1998	/* This has possibly caused a PHY to reset.  Let MII know */
1999	if (dnetp->phyaddr != -1)
2000		/* XXX function return value ignored */
2001		(void) mii_sync(dnetp->mii, dnetp->phyaddr);
2002	drv_usecwait(5);
2003}
2004
2005/* set_opr() - must be called with intrlock held */
2006
2007static void
2008set_opr(struct dnetinstance *dnetp)
2009{
2010	uint32_t fd, mb1, sf;
2011
2012	int 		opnmode_len;
2013	uint32_t val;
2014	media_block_t *block = dnetp->selected_media_block;
2015
2016	ASSERT(block);
2017
2018	/* Check for custom "opnmode_reg" property */
2019	opnmode_len = sizeof (val);
2020	if (ddi_prop_op(DDI_DEV_T_ANY, dnetp->devinfo,
2021	    PROP_LEN_AND_VAL_BUF, DDI_PROP_DONTPASS, "opnmode_reg",
2022	    (caddr_t)&val, &opnmode_len) != DDI_PROP_SUCCESS)
2023		opnmode_len = 0;
2024
2025	/* Some bits exist only on 21140 and greater */
2026	if (dnetp->board_type != DEVICE_ID_21040 &&
2027	    dnetp->board_type != DEVICE_ID_21041) {
2028		mb1 = OPN_REG_MB1;
2029		sf = STORE_AND_FORWARD;
2030	} else {
2031		mb1 = sf = 0;
2032		mb1 = OPN_REG_MB1; /* Needed for 21040? */
2033	}
2034
2035	if (opnmode_len) {
2036		ddi_put32(dnetp->io_handle,
2037		    REG32(dnetp->io_reg, OPN_MODE_REG), val);
2038		dnet_reset_board(dnetp);
2039		ddi_put32(dnetp->io_handle,
2040		    REG32(dnetp->io_reg, OPN_MODE_REG), val);
2041		return;
2042	}
2043
2044	/*
2045	 * Set each bit in CSR6 that we want
2046	 */
2047
2048	/* Always want these bits set */
2049	val = HASH_FILTERING | HASH_ONLY | TX_THRESHOLD_160 | mb1 | sf;
2050
2051	/* Promiscuous mode */
2052	val |= dnetp->promisc ? PROM_MODE : 0;
2053
2054	/* Scrambler for SYM style media */
2055	val |= ((block->command & CMD_SCR) && !dnetp->disable_scrambler) ?
2056	    SCRAMBLER_MODE : 0;
2057
2058	/* Full duplex */
2059	if (dnetp->mii_up) {
2060		fd = dnetp->mii_duplex;
2061	} else {
2062		/* Rely on media code */
2063		fd = block->media_code == MEDIA_TP_FD ||
2064		    block->media_code == MEDIA_SYM_SCR_FD;
2065	}
2066
2067	/* Port select (and therefore, heartbeat disable) */
2068	val |= block->command & CMD_PS ? (PORT_SELECT | HEARTBEAT_DISABLE) : 0;
2069
2070	/* PCS function */
2071	val |= (block->command) & CMD_PCS ? PCS_FUNCTION : 0;
2072	val |= fd ? FULL_DUPLEX : 0;
2073
2074#ifdef DNETDEBUG
2075	if (dnetdebug & DNETREGCFG)
2076		cmn_err(CE_NOTE, "OPN: %x", val);
2077#endif
2078	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG), val);
2079	dnet_reset_board(dnetp);
2080	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG), val);
2081}
2082
2083static void
2084set_sia(struct dnetinstance *dnetp)
2085{
2086	media_block_t *block = dnetp->selected_media_block;
2087
2088	ASSERT(MUTEX_HELD(&dnetp->intrlock));
2089	if (block->type == 2) {
2090		int sia_delay;
2091#ifdef DNETDEBUG
2092		if (dnetdebug & DNETREGCFG)
2093			cmn_err(CE_NOTE,
2094			    "SIA: CSR13: %x, CSR14: %x, CSR15: %x",
2095			    block->un.sia.csr13,
2096			    block->un.sia.csr14,
2097			    block->un.sia.csr15);
2098#endif
2099		sia_delay = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
2100		    DDI_PROP_DONTPASS, "sia-delay", 10000);
2101
2102		ddi_put32(dnetp->io_handle,
2103		    REG32(dnetp->io_reg, SIA_CONNECT_REG), 0);
2104
2105		ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, SIA_TXRX_REG),
2106		    block->un.sia.csr14);
2107
2108		/*
2109		 * For '143, we need to write through a copy of the register
2110		 * to keep the GP half intact
2111		 */
2112		dnetp->gprsia = (dnetp->gprsia&0xffff0000)|block->un.sia.csr15;
2113		ddi_put32(dnetp->io_handle,
2114		    REG32(dnetp->io_reg, SIA_GENERAL_REG),
2115		    dnetp->gprsia);
2116
2117		ddi_put32(dnetp->io_handle,
2118		    REG32(dnetp->io_reg, SIA_CONNECT_REG),
2119		    block->un.sia.csr13);
2120
2121		drv_usecwait(sia_delay);
2122
2123	} else if (dnetp->board_type != DEVICE_ID_21140) {
2124		ddi_put32(dnetp->io_handle,
2125		    REG32(dnetp->io_reg, SIA_CONNECT_REG), 0);
2126		ddi_put32(dnetp->io_handle,
2127		    REG32(dnetp->io_reg, SIA_TXRX_REG), 0);
2128	}
2129}
2130
2131/*
2132 * This function (re)allocates the receive and transmit buffers and
2133 * descriptors.  It can be called more than once per instance, though
2134 * currently it is only called from attach.  It should only be called
2135 * while the device is reset.
2136 */
2137static int
2138dnet_alloc_bufs(struct dnetinstance *dnetp)
2139{
2140	int i;
2141	size_t len;
2142	int page_size;
2143	int realloc = 0;
2144	int nrecv_desc_old = 0;
2145	ddi_dma_cookie_t cookie;
2146	uint_t ncookies;
2147
2148	/*
2149	 * check if we are trying to reallocate with different xmit/recv
2150	 * descriptor ring sizes.
2151	 */
2152	if ((dnetp->tx_desc != NULL) &&
2153	    (dnetp->nxmit_desc != dnetp->max_tx_desc))
2154		realloc = 1;
2155
2156	if ((dnetp->rx_desc != NULL) &&
2157	    (dnetp->nrecv_desc != dnetp->max_rx_desc))
2158		realloc = 1;
2159
2160	/* free up the old buffers if we are reallocating them */
2161	if (realloc) {
2162		nrecv_desc_old = dnetp->nrecv_desc;
2163		dnet_free_bufs(dnetp); /* free the old buffers */
2164	}
2165
2166	if (dnetp->dma_handle == NULL)
2167		if (ddi_dma_alloc_handle(dnetp->devinfo, &dma_attr,
2168		    DDI_DMA_SLEEP, 0, &dnetp->dma_handle) != DDI_SUCCESS)
2169			return (FAILURE);
2170
2171	if (dnetp->dma_handle_tx == NULL)
2172		if (ddi_dma_alloc_handle(dnetp->devinfo, &dma_attr_tx,
2173		    DDI_DMA_SLEEP, 0, &dnetp->dma_handle_tx) != DDI_SUCCESS)
2174			return (FAILURE);
2175
2176	if (dnetp->dma_handle_txdesc == NULL)
2177		if (ddi_dma_alloc_handle(dnetp->devinfo, &dma_attr,
2178		    DDI_DMA_SLEEP, 0, &dnetp->dma_handle_txdesc) != DDI_SUCCESS)
2179			return (FAILURE);
2180
2181	if (dnetp->dma_handle_setbuf == NULL)
2182		if (ddi_dma_alloc_handle(dnetp->devinfo, &dma_attr,
2183		    DDI_DMA_SLEEP, 0, &dnetp->dma_handle_setbuf) != DDI_SUCCESS)
2184			return (FAILURE);
2185
2186	page_size = ddi_ptob(dnetp->devinfo, 1);
2187
2188	dnetp->pgmask = page_size - 1;
2189
2190	/* allocate setup buffer if necessary */
2191	if (dnetp->setup_buf_vaddr == NULL) {
2192		if (ddi_dma_mem_alloc(dnetp->dma_handle_setbuf,
2193		    SETUPBUF_SIZE, &accattr, DDI_DMA_STREAMING,
2194		    DDI_DMA_DONTWAIT, 0, (caddr_t *)&dnetp->setup_buf_vaddr,
2195		    &len, &dnetp->setup_buf_acchdl) != DDI_SUCCESS)
2196			return (FAILURE);
2197
2198		if (ddi_dma_addr_bind_handle(dnetp->dma_handle_setbuf,
2199		    NULL, dnetp->setup_buf_vaddr, SETUPBUF_SIZE,
2200		    DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
2201		    NULL, &cookie, &ncookies) != DDI_DMA_MAPPED)
2202			return (FAILURE);
2203
2204		dnetp->setup_buf_paddr = cookie.dmac_address;
2205		bzero(dnetp->setup_buf_vaddr, len);
2206	}
2207
2208	/* allocate xmit descriptor array of size dnetp->max_tx_desc */
2209	if (dnetp->tx_desc == NULL) {
2210		if (ddi_dma_mem_alloc(dnetp->dma_handle_txdesc,
2211		    sizeof (struct tx_desc_type) * dnetp->max_tx_desc,
2212		    &accattr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
2213		    (caddr_t *)&dnetp->tx_desc, &len,
2214		    &dnetp->tx_desc_acchdl) != DDI_SUCCESS)
2215			return (FAILURE);
2216
2217		if (ddi_dma_addr_bind_handle(dnetp->dma_handle_txdesc,
2218		    NULL, (caddr_t)dnetp->tx_desc,
2219		    sizeof (struct tx_desc_type) * dnetp->max_tx_desc,
2220		    DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
2221		    NULL, &cookie, &ncookies) != DDI_DMA_MAPPED)
2222			return (FAILURE);
2223		dnetp->tx_desc_paddr = cookie.dmac_address;
2224		bzero(dnetp->tx_desc, len);
2225		dnetp->nxmit_desc = dnetp->max_tx_desc;
2226
2227		dnetp->tx_msgbufp =
2228		    kmem_zalloc(dnetp->max_tx_desc * sizeof (mblk_t **),
2229		    KM_SLEEP);
2230	}
2231
2232	/* allocate receive descriptor array of size dnetp->max_rx_desc */
2233	if (dnetp->rx_desc == NULL) {
2234		int ndesc;
2235
2236		if (ddi_dma_mem_alloc(dnetp->dma_handle,
2237		    sizeof (struct rx_desc_type) * dnetp->max_rx_desc,
2238		    &accattr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
2239		    (caddr_t *)&dnetp->rx_desc, &len,
2240		    &dnetp->rx_desc_acchdl) != DDI_SUCCESS)
2241			return (FAILURE);
2242
2243		if (ddi_dma_addr_bind_handle(dnetp->dma_handle,
2244		    NULL, (caddr_t)dnetp->rx_desc,
2245		    sizeof (struct rx_desc_type) * dnetp->max_rx_desc,
2246		    DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
2247		    NULL, &cookie, &ncookies) != DDI_DMA_MAPPED)
2248			return (FAILURE);
2249
2250		dnetp->rx_desc_paddr = cookie.dmac_address;
2251		bzero(dnetp->rx_desc, len);
2252		dnetp->nrecv_desc = dnetp->max_rx_desc;
2253
2254		dnetp->rx_buf_vaddr =
2255		    kmem_zalloc(dnetp->max_rx_desc * sizeof (caddr_t),
2256		    KM_SLEEP);
2257		dnetp->rx_buf_paddr =
2258		    kmem_zalloc(dnetp->max_rx_desc * sizeof (uint32_t),
2259		    KM_SLEEP);
2260		/*
2261		 * Allocate or add to the pool of receive buffers.  The pool
2262		 * is shared among all instances of dnet.
2263		 *
2264		 * XXX NEEDSWORK
2265		 *
2266		 * We arbitrarily allocate twice as many receive buffers as
2267		 * receive descriptors because we use the buffers for streams
2268		 * messages to pass the packets up the stream.  We should
2269		 * instead have initialized constants reflecting
2270		 * MAX_RX_BUF_2104x and MAX_RX_BUF_2114x, and we should also
2271		 * probably have a total maximum for the free pool, so that we
2272		 * don't get out of hand when someone puts in an 8-port board.
2273		 * The maximum for the entire pool should be the total number
2274		 * of descriptors for all attached instances together, plus the
2275		 * total maximum for the free pool.  This maximum would only be
2276		 * reached after some number of instances allocate buffers:
2277		 * each instance would add (max_rx_buf-max_rx_desc) to the free
2278		 * pool.
2279		 */
2280		ndesc = dnetp->max_rx_desc - nrecv_desc_old;
2281		if ((ndesc > 0) &&
2282		    (dnet_rbuf_init(dnetp->devinfo, ndesc * 2) != 0))
2283			return (FAILURE);
2284
2285		for (i = 0; i < dnetp->max_rx_desc; i++) {
2286			struct rbuf_list *rp;
2287
2288			rp = dnet_rbuf_alloc(dnetp->devinfo, 1);
2289			if (rp == NULL)
2290				return (FAILURE);
2291			dnetp->rx_buf_vaddr[i] = rp->rbuf_vaddr;
2292			dnetp->rx_buf_paddr[i] = rp->rbuf_paddr;
2293		}
2294	}
2295
2296	return (SUCCESS);
2297}
2298/*
2299 * free descriptors/buffers allocated for this device instance.  This routine
2300 * should only be called while the device is reset.
2301 */
2302static void
2303dnet_free_bufs(struct dnetinstance *dnetp)
2304{
2305	int i;
2306	/* free up any xmit descriptors/buffers */
2307	if (dnetp->tx_desc != NULL) {
2308		ddi_dma_mem_free(&dnetp->tx_desc_acchdl);
2309		dnetp->tx_desc = NULL;
2310		/* we use streams buffers for DMA in xmit process */
2311		if (dnetp->tx_msgbufp != NULL) {
2312			/* free up any streams message buffers unclaimed */
2313			for (i = 0; i < dnetp->nxmit_desc; i++) {
2314				if (dnetp->tx_msgbufp[i] != NULL) {
2315					freemsg(dnetp->tx_msgbufp[i]);
2316				}
2317			}
2318			kmem_free(dnetp->tx_msgbufp,
2319			    dnetp->nxmit_desc * sizeof (mblk_t **));
2320			dnetp->tx_msgbufp = NULL;
2321		}
2322		dnetp->nxmit_desc = 0;
2323	}
2324
2325	/* free up any receive descriptors/buffers */
2326	if (dnetp->rx_desc != NULL) {
2327		ddi_dma_mem_free(&dnetp->rx_desc_acchdl);
2328		dnetp->rx_desc = NULL;
2329		if (dnetp->rx_buf_vaddr != NULL) {
2330			/* free up the attached rbufs if any */
2331			for (i = 0; i < dnetp->nrecv_desc; i++) {
2332				if (dnetp->rx_buf_vaddr[i])
2333					dnet_rbuf_free(
2334					    (caddr_t)dnetp->rx_buf_vaddr[i]);
2335			}
2336			kmem_free(dnetp->rx_buf_vaddr,
2337			    dnetp->nrecv_desc * sizeof (caddr_t));
2338			kmem_free(dnetp->rx_buf_paddr,
2339			    dnetp->nrecv_desc * sizeof (uint32_t));
2340			dnetp->rx_buf_vaddr = NULL;
2341			dnetp->rx_buf_paddr = NULL;
2342		}
2343		dnetp->nrecv_desc = 0;
2344	}
2345
2346	if (dnetp->setup_buf_vaddr != NULL) {
2347		ddi_dma_mem_free(&dnetp->setup_buf_acchdl);
2348		dnetp->setup_buf_vaddr = NULL;
2349	}
2350
2351	if (dnetp->dma_handle != NULL) {
2352		(void) ddi_dma_unbind_handle(dnetp->dma_handle);
2353		ddi_dma_free_handle(&dnetp->dma_handle);
2354		dnetp->dma_handle = NULL;
2355	}
2356
2357	if (dnetp->dma_handle_tx != NULL) {
2358		(void) ddi_dma_unbind_handle(dnetp->dma_handle_tx);
2359		ddi_dma_free_handle(&dnetp->dma_handle_tx);
2360		dnetp->dma_handle_tx = NULL;
2361	}
2362
2363	if (dnetp->dma_handle_txdesc != NULL) {
2364		(void) ddi_dma_unbind_handle(dnetp->dma_handle_txdesc);
2365		ddi_dma_free_handle(&dnetp->dma_handle_txdesc);
2366		dnetp->dma_handle_txdesc = NULL;
2367	}
2368
2369	if (dnetp->dma_handle_setbuf != NULL) {
2370		(void) ddi_dma_unbind_handle(dnetp->dma_handle_setbuf);
2371		ddi_dma_free_handle(&dnetp->dma_handle_setbuf);
2372		dnetp->dma_handle_setbuf = NULL;
2373	}
2374
2375}
2376
2377/*
2378 * Initialize transmit and receive descriptors.
2379 */
2380static void
2381dnet_init_txrx_bufs(struct dnetinstance *dnetp)
2382{
2383	int		i;
2384
2385	/*
2386	 * Initilize all the Tx descriptors
2387	 */
2388	for (i = 0; i < dnetp->nxmit_desc; i++) {
2389		/*
2390		 * We may be resetting the device due to errors,
2391		 * so free up any streams message buffer unclaimed.
2392		 */
2393		if (dnetp->tx_msgbufp[i] != NULL) {
2394			freemsg(dnetp->tx_msgbufp[i]);
2395			dnetp->tx_msgbufp[i] = NULL;
2396		}
2397		*(uint32_t *)&dnetp->tx_desc[i].desc0 = 0;
2398		*(uint32_t *)&dnetp->tx_desc[i].desc1 = 0;
2399		dnetp->tx_desc[i].buffer1 = 0;
2400		dnetp->tx_desc[i].buffer2 = 0;
2401	}
2402	dnetp->tx_desc[i - 1].desc1.end_of_ring = 1;
2403
2404	/*
2405	 * Initialize the Rx descriptors
2406	 */
2407	for (i = 0; i < dnetp->nrecv_desc; i++) {
2408		uint32_t end_paddr;
2409		*(uint32_t *)&dnetp->rx_desc[i].desc0 = 0;
2410		*(uint32_t *)&dnetp->rx_desc[i].desc1 = 0;
2411		dnetp->rx_desc[i].desc0.own = 1;
2412		dnetp->rx_desc[i].desc1.buffer_size1 = rx_buf_size;
2413		dnetp->rx_desc[i].buffer1 = dnetp->rx_buf_paddr[i];
2414		dnetp->rx_desc[i].buffer2 = 0;
2415		end_paddr = dnetp->rx_buf_paddr[i]+rx_buf_size-1;
2416
2417		if ((dnetp->rx_desc[i].buffer1 & ~dnetp->pgmask) !=
2418		    (end_paddr & ~dnetp->pgmask)) {
2419			/* discontiguous */
2420			dnetp->rx_desc[i].buffer2 = end_paddr&~dnetp->pgmask;
2421			dnetp->rx_desc[i].desc1.buffer_size2 =
2422			    (end_paddr & dnetp->pgmask) + 1;
2423			dnetp->rx_desc[i].desc1.buffer_size1 =
2424			    rx_buf_size-dnetp->rx_desc[i].desc1.buffer_size2;
2425		}
2426	}
2427	dnetp->rx_desc[i - 1].desc1.end_of_ring = 1;
2428}
2429
2430static int
2431alloc_descriptor(struct dnetinstance *dnetp)
2432{
2433	int index;
2434	struct tx_desc_type    *ring = dnetp->tx_desc;
2435
2436	ASSERT(MUTEX_HELD(&dnetp->intrlock));
2437alloctop:
2438	mutex_enter(&dnetp->txlock);
2439	index = dnetp->tx_current_desc;
2440
2441	dnet_reclaim_Tx_desc(dnetp);
2442
2443	/* we do have free descriptors, right? */
2444	if (dnetp->free_desc <= 0) {
2445#ifdef DNETDEBUG
2446		if (dnetdebug & DNETRECV)
2447			cmn_err(CE_NOTE, "dnet: Ring buffer is full");
2448#endif
2449		mutex_exit(&dnetp->txlock);
2450		return (FAILURE);
2451	}
2452
2453	/* sanity, make sure the next descriptor is free for use (should be) */
2454	if (ring[index].desc0.own) {
2455#ifdef DNETDEBUG
2456		if (dnetdebug & DNETRECV)
2457			cmn_err(CE_WARN,
2458			    "dnet: next descriptor is not free for use");
2459#endif
2460		mutex_exit(&dnetp->txlock);
2461		return (FAILURE);
2462	}
2463	if (dnetp->need_saddr) {
2464		mutex_exit(&dnetp->txlock);
2465		/* XXX function return value ignored */
2466		if (!dnetp->suspended)
2467			(void) dnet_set_addr(dnetp);
2468		goto alloctop;
2469	}
2470
2471	*(uint32_t *)&ring[index].desc0 = 0;  /* init descs */
2472	*(uint32_t *)&ring[index].desc1 &= DNET_END_OF_RING;
2473
2474	/* hardware will own this descriptor when poll activated */
2475	dnetp->free_desc--;
2476
2477	/* point to next free descriptor to be used */
2478	dnetp->tx_current_desc = NextTXIndex(index);
2479
2480#ifdef DNET_NOISY
2481	cmn_err(CE_WARN, "sfree 0x%x, transmitted 0x%x, tx_current 0x%x",
2482	    dnetp->free_desc, dnetp->transmitted_desc, dnetp->tx_current_desc);
2483#endif
2484	mutex_exit(&dnetp->txlock);
2485	return (SUCCESS);
2486}
2487
2488/*
2489 * dnet_reclaim_Tx_desc() - called with txlock held.
2490 */
2491static void
2492dnet_reclaim_Tx_desc(struct dnetinstance *dnetp)
2493{
2494	struct tx_desc_type	*desc = dnetp->tx_desc;
2495	int index;
2496
2497	ASSERT(MUTEX_HELD(&dnetp->txlock));
2498
2499	index = dnetp->transmitted_desc;
2500	while (((dnetp->free_desc == 0) || (index != dnetp->tx_current_desc)) &&
2501	    !(desc[index].desc0.own)) {
2502		/*
2503		 * Check for Tx Error that gets set
2504		 * in the last desc.
2505		 */
2506		if (desc[index].desc1.setup_packet == 0 &&
2507		    desc[index].desc1.last_desc &&
2508		    desc[index].desc0.err_summary)
2509			update_tx_stats(dnetp, index);
2510
2511		/*
2512		 * If we have used the streams message buffer for this
2513		 * descriptor then free up the message now.
2514		 */
2515		if (dnetp->tx_msgbufp[index] != NULL) {
2516			freemsg(dnetp->tx_msgbufp[index]);
2517			dnetp->tx_msgbufp[index] = NULL;
2518		}
2519		dnetp->free_desc++;
2520		index = (index+1) % dnetp->max_tx_desc;
2521	}
2522
2523	dnetp->transmitted_desc = index;
2524}
2525
2526/*
2527 * Receive buffer allocation/freeing routines.
2528 *
2529 * There is a common pool of receive buffers shared by all dnet instances.
2530 *
2531 * XXX NEEDSWORK
2532 *
2533 * We arbitrarily allocate twice as many receive buffers as
2534 * receive descriptors because we use the buffers for streams
2535 * messages to pass the packets up the stream.  We should
2536 * instead have initialized constants reflecting
2537 * MAX_RX_BUF_2104x and MAX_RX_BUF_2114x, and we should also
2538 * probably have a total maximum for the free pool, so that we
2539 * don't get out of hand when someone puts in an 8-port board.
2540 * The maximum for the entire pool should be the total number
2541 * of descriptors for all attached instances together, plus the
2542 * total maximum for the free pool.  This maximum would only be
2543 * reached after some number of instances allocate buffers:
2544 * each instance would add (max_rx_buf-max_rx_desc) to the free
2545 * pool.
2546 */
2547
2548static struct rbuf_list *rbuf_usedlist_head;
2549static struct rbuf_list *rbuf_freelist_head;
2550static struct rbuf_list *rbuf_usedlist_end;	/* last buffer allocated */
2551
2552static int rbuf_freebufs;	/* no. of free buffers in the pool */
2553static int rbuf_pool_size;	/* total no. of buffers in the pool */
2554
2555/* initialize/add 'nbufs' buffers to the rbuf pool */
2556/* ARGSUSED */
2557static int
2558dnet_rbuf_init(dev_info_t *dip, int nbufs)
2559{
2560	int i;
2561	struct rbuf_list *rp;
2562	ddi_dma_cookie_t cookie;
2563	uint_t ncookies;
2564	size_t len;
2565
2566	mutex_enter(&dnet_rbuf_lock);
2567
2568	/* allocate buffers and add them to the pool */
2569	for (i = 0; i < nbufs; i++) {
2570		/* allocate rbuf_list element */
2571		rp = kmem_zalloc(sizeof (struct rbuf_list), KM_SLEEP);
2572		if (ddi_dma_alloc_handle(dip, &dma_attr_rb, DDI_DMA_SLEEP,
2573		    0, &rp->rbuf_dmahdl) != DDI_SUCCESS)
2574			goto fail_kfree;
2575
2576		/* allocate dma memory for the buffer */
2577		if (ddi_dma_mem_alloc(rp->rbuf_dmahdl, rx_buf_size, &accattr,
2578		    DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
2579		    &rp->rbuf_vaddr, &len,
2580		    &rp->rbuf_acchdl) != DDI_SUCCESS)
2581			goto fail_freehdl;
2582
2583		if (ddi_dma_addr_bind_handle(rp->rbuf_dmahdl, NULL,
2584		    rp->rbuf_vaddr, len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
2585		    DDI_DMA_SLEEP, NULL, &cookie,
2586		    &ncookies) != DDI_DMA_MAPPED)
2587			goto fail_free;
2588
2589		if (ncookies > 2)
2590			goto fail_unbind;
2591		if (ncookies == 1) {
2592			rp->rbuf_endpaddr =
2593			    cookie.dmac_address + rx_buf_size - 1;
2594		} else {
2595			ddi_dma_nextcookie(rp->rbuf_dmahdl, &cookie);
2596			rp->rbuf_endpaddr =
2597			    cookie.dmac_address + cookie.dmac_size - 1;
2598		}
2599		rp->rbuf_paddr = cookie.dmac_address;
2600
2601		rp->rbuf_next = rbuf_freelist_head;
2602		rbuf_freelist_head = rp;
2603		rbuf_pool_size++;
2604		rbuf_freebufs++;
2605	}
2606
2607	mutex_exit(&dnet_rbuf_lock);
2608	return (0);
2609fail_unbind:
2610	(void) ddi_dma_unbind_handle(rp->rbuf_dmahdl);
2611fail_free:
2612	ddi_dma_mem_free(&rp->rbuf_acchdl);
2613fail_freehdl:
2614	ddi_dma_free_handle(&rp->rbuf_dmahdl);
2615fail_kfree:
2616	kmem_free(rp, sizeof (struct rbuf_list));
2617
2618	mutex_exit(&dnet_rbuf_lock);
2619	return (-1);
2620}
2621
2622/*
2623 * Try to free up all the rbufs in the pool. Returns 0 if it frees up all
2624 * buffers. The buffers in the used list are considered busy so these
2625 * buffers are not freed.
2626 */
2627static int
2628dnet_rbuf_destroy()
2629{
2630	struct rbuf_list *rp, *next;
2631
2632	mutex_enter(&dnet_rbuf_lock);
2633
2634	for (rp = rbuf_freelist_head; rp; rp = next) {
2635		next = rp->rbuf_next;
2636		ddi_dma_mem_free(&rp->rbuf_acchdl);
2637		(void) ddi_dma_unbind_handle(rp->rbuf_dmahdl);
2638		kmem_free(rp, sizeof (struct rbuf_list));
2639		rbuf_pool_size--;
2640		rbuf_freebufs--;
2641	}
2642	rbuf_freelist_head = NULL;
2643
2644	if (rbuf_pool_size) { /* pool is still not empty */
2645		mutex_exit(&dnet_rbuf_lock);
2646		return (-1);
2647	}
2648	mutex_exit(&dnet_rbuf_lock);
2649	return (0);
2650}
2651static struct rbuf_list *
2652dnet_rbuf_alloc(dev_info_t *dip, int cansleep)
2653{
2654	struct rbuf_list *rp;
2655	size_t len;
2656	ddi_dma_cookie_t cookie;
2657	uint_t ncookies;
2658
2659	mutex_enter(&dnet_rbuf_lock);
2660
2661	if (rbuf_freelist_head == NULL) {
2662
2663		if (!cansleep) {
2664			mutex_exit(&dnet_rbuf_lock);
2665			return (NULL);
2666		}
2667
2668		/* allocate rbuf_list element */
2669		rp = kmem_zalloc(sizeof (struct rbuf_list), KM_SLEEP);
2670		if (ddi_dma_alloc_handle(dip, &dma_attr_rb, DDI_DMA_SLEEP,
2671		    0, &rp->rbuf_dmahdl) != DDI_SUCCESS)
2672			goto fail_kfree;
2673
2674		/* allocate dma memory for the buffer */
2675		if (ddi_dma_mem_alloc(rp->rbuf_dmahdl, rx_buf_size, &accattr,
2676		    DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
2677		    &rp->rbuf_vaddr, &len,
2678		    &rp->rbuf_acchdl) != DDI_SUCCESS)
2679			goto fail_freehdl;
2680
2681		if (ddi_dma_addr_bind_handle(rp->rbuf_dmahdl, NULL,
2682		    rp->rbuf_vaddr, len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
2683		    DDI_DMA_SLEEP, NULL, &cookie,
2684		    &ncookies) != DDI_DMA_MAPPED)
2685			goto fail_free;
2686
2687		if (ncookies > 2)
2688			goto fail_unbind;
2689		if (ncookies == 1) {
2690			rp->rbuf_endpaddr =
2691			    cookie.dmac_address + rx_buf_size - 1;
2692		} else {
2693			ddi_dma_nextcookie(rp->rbuf_dmahdl, &cookie);
2694			rp->rbuf_endpaddr =
2695			    cookie.dmac_address + cookie.dmac_size - 1;
2696		}
2697		rp->rbuf_paddr = cookie.dmac_address;
2698
2699		rbuf_freelist_head = rp;
2700		rbuf_pool_size++;
2701		rbuf_freebufs++;
2702	}
2703
2704	/* take the buffer from the head of the free list */
2705	rp = rbuf_freelist_head;
2706	rbuf_freelist_head = rbuf_freelist_head->rbuf_next;
2707
2708	/* update the used list; put the entry at the end */
2709	if (rbuf_usedlist_head == NULL)
2710		rbuf_usedlist_head = rp;
2711	else
2712		rbuf_usedlist_end->rbuf_next = rp;
2713	rp->rbuf_next = NULL;
2714	rbuf_usedlist_end = rp;
2715	rbuf_freebufs--;
2716
2717	mutex_exit(&dnet_rbuf_lock);
2718
2719	return (rp);
2720fail_unbind:
2721	(void) ddi_dma_unbind_handle(rp->rbuf_dmahdl);
2722fail_free:
2723	ddi_dma_mem_free(&rp->rbuf_acchdl);
2724fail_freehdl:
2725	ddi_dma_free_handle(&rp->rbuf_dmahdl);
2726fail_kfree:
2727	kmem_free(rp, sizeof (struct rbuf_list));
2728	mutex_exit(&dnet_rbuf_lock);
2729	return (NULL);
2730}
2731
2732static void
2733dnet_rbuf_free(caddr_t vaddr)
2734{
2735	struct rbuf_list *rp, *prev;
2736
2737	ASSERT(vaddr != NULL);
2738	ASSERT(rbuf_usedlist_head != NULL);
2739
2740	mutex_enter(&dnet_rbuf_lock);
2741
2742	/* find the entry in the used list */
2743	for (prev = rp = rbuf_usedlist_head; rp; rp = rp->rbuf_next) {
2744		if (rp->rbuf_vaddr == vaddr)
2745			break;
2746		prev = rp;
2747	}
2748
2749	if (rp == NULL) {
2750		cmn_err(CE_WARN, "DNET: rbuf_free: bad addr 0x%p",
2751		    (void *)vaddr);
2752		mutex_exit(&dnet_rbuf_lock);
2753		return;
2754	}
2755
2756	/* update the used list and put the buffer back in the free list */
2757	if (rbuf_usedlist_head != rp) {
2758		prev->rbuf_next = rp->rbuf_next;
2759		if (rbuf_usedlist_end == rp)
2760			rbuf_usedlist_end = prev;
2761	} else {
2762		rbuf_usedlist_head = rp->rbuf_next;
2763		if (rbuf_usedlist_end == rp)
2764			rbuf_usedlist_end = NULL;
2765	}
2766	rp->rbuf_next = rbuf_freelist_head;
2767	rbuf_freelist_head = rp;
2768	rbuf_freebufs++;
2769
2770	mutex_exit(&dnet_rbuf_lock);
2771}
2772
2773/*
2774 * Free the receive buffer used in a stream's message block allocated
2775 * thru desballoc().
2776 */
2777static void
2778dnet_freemsg_buf(struct free_ptr *frp)
2779{
2780	dnet_rbuf_free((caddr_t)frp->buf); /* buffer goes back to the pool */
2781	kmem_free(frp, sizeof (*frp)); /* free up the free_rtn structure */
2782}
2783
2784/*
2785 *	========== SROM Read Routines ==========
2786 */
2787
2788/*
2789 * The following code gets the SROM information, either by reading it
2790 * from the device or, failing that, by reading a property.
2791 */
2792static int
2793dnet_read_srom(dev_info_t *devinfo, int board_type, ddi_acc_handle_t io_handle,
2794    caddr_t io_reg, uchar_t *vi, int maxlen)
2795{
2796	int all_ones, zerocheck, i;
2797
2798	/*
2799	 * Load SROM into vendor_info
2800	 */
2801	if (board_type == DEVICE_ID_21040)
2802		dnet_read21040addr(devinfo, io_handle, io_reg, vi, &maxlen);
2803	else
2804		/* 21041/21140 serial rom */
2805		dnet_read21140srom(io_handle, io_reg, vi, maxlen);
2806	/*
2807	 * If the dumpsrom property is present in the conf file, print
2808	 * the contents of the SROM to the console
2809	 */
2810	if (ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
2811	    "dumpsrom", 0))
2812		dnet_dumpbin("SROM", vi, 1, maxlen);
2813
2814	for (zerocheck = i = 0, all_ones = 0xff; i < maxlen; i++) {
2815		zerocheck |= vi[i];
2816		all_ones &= vi[i];
2817	}
2818	if (zerocheck == 0 || all_ones == 0xff) {
2819		return (get_alternative_srom_image(devinfo, vi, maxlen));
2820	} else {
2821#ifdef BUG_4010796
2822		set_alternative_srom_image(devinfo, vi, maxlen);
2823#endif
2824		return (0);	/* Primary */
2825	}
2826}
2827
2828/*
2829 * The function reads the ethernet address of the 21040 adapter
2830 */
2831static void
2832dnet_read21040addr(dev_info_t *dip, ddi_acc_handle_t io_handle, caddr_t io_reg,
2833    uchar_t *addr, int *len)
2834{
2835	uint32_t	val;
2836	int		i;
2837
2838	/* No point reading more than the ethernet address */
2839	*len = ddi_getprop(DDI_DEV_T_ANY, dip,
2840	    DDI_PROP_DONTPASS, macoffset_propname, 0) + ETHERADDRL;
2841
2842	/* Reset ROM pointer */
2843	ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 0);
2844	for (i = 0; i < *len; i++) {
2845		do {
2846			val = ddi_get32(io_handle,
2847			    REG32(io_reg, ETHER_ROM_REG));
2848		} while (val & 0x80000000);
2849		addr[i] = val & 0xFF;
2850	}
2851}
2852
2853#define	drv_nsecwait(x)	drv_usecwait(((x)+999)/1000) /* XXX */
2854
2855/*
2856 * The function reads the SROM	of the 21140 adapter
2857 */
2858static void
2859dnet_read21140srom(ddi_acc_handle_t io_handle, caddr_t io_reg, uchar_t *addr,
2860    int maxlen)
2861{
2862	uint32_t 	i, j;
2863	uint32_t	dout;
2864	uint16_t	word;
2865	uint8_t		rom_addr;
2866	uint8_t		bit;
2867
2868
2869	rom_addr = 0;
2870	for (i = 0; i <	maxlen; i += 2) {
2871		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2872		    READ_OP | SEL_ROM);
2873		drv_nsecwait(30);
2874		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2875		    READ_OP | SEL_ROM | SEL_CHIP);
2876		drv_nsecwait(50);
2877		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2878		    READ_OP | SEL_ROM | SEL_CHIP | SEL_CLK);
2879		drv_nsecwait(250);
2880		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2881		    READ_OP | SEL_ROM | SEL_CHIP);
2882		drv_nsecwait(100);
2883
2884		/* command */
2885		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2886		    READ_OP | SEL_ROM | SEL_CHIP | DATA_IN);
2887		drv_nsecwait(150);
2888		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2889		    READ_OP | SEL_ROM | SEL_CHIP | DATA_IN | SEL_CLK);
2890		drv_nsecwait(250);
2891		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2892		    READ_OP | SEL_ROM | SEL_CHIP | DATA_IN);
2893		drv_nsecwait(250);
2894		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2895		    READ_OP | SEL_ROM | SEL_CHIP | DATA_IN | SEL_CLK);
2896		drv_nsecwait(250);
2897		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2898		    READ_OP | SEL_ROM | SEL_CHIP | DATA_IN);
2899		drv_nsecwait(100);
2900		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2901		    READ_OP | SEL_ROM | SEL_CHIP);
2902		drv_nsecwait(150);
2903		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2904		    READ_OP | SEL_ROM | SEL_CHIP | SEL_CLK);
2905		drv_nsecwait(250);
2906		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2907		    READ_OP | SEL_ROM | SEL_CHIP);
2908		drv_nsecwait(100);
2909
2910		/* Address */
2911		for (j = HIGH_ADDRESS_BIT; j >= 1; j >>= 1) {
2912			bit = (rom_addr & j) ? DATA_IN : 0;
2913			ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2914			    READ_OP | SEL_ROM | SEL_CHIP | bit);
2915			drv_nsecwait(150);
2916			ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2917			    READ_OP | SEL_ROM | SEL_CHIP | bit | SEL_CLK);
2918			drv_nsecwait(250);
2919			ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2920			    READ_OP | SEL_ROM | SEL_CHIP | bit);
2921			drv_nsecwait(100);
2922		}
2923		drv_nsecwait(150);
2924
2925		/* Data */
2926		word = 0;
2927		for (j = 0x8000; j >= 1; j >>= 1) {
2928			ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2929			    READ_OP | SEL_ROM | SEL_CHIP | SEL_CLK);
2930			drv_nsecwait(100);
2931			dout = ddi_get32(io_handle,
2932			    REG32(io_reg, ETHER_ROM_REG));
2933			drv_nsecwait(150);
2934			if (dout & DATA_OUT)
2935				word |= j;
2936			ddi_put32(io_handle,
2937			    REG32(io_reg, ETHER_ROM_REG),
2938			    READ_OP | SEL_ROM | SEL_CHIP);
2939			drv_nsecwait(250);
2940		}
2941		addr[i] = (word & 0x0000FF);
2942		addr[i + 1] = (word >> 8);
2943		rom_addr++;
2944		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2945		    READ_OP | SEL_ROM);
2946		drv_nsecwait(100);
2947	}
2948}
2949
2950
2951/*
2952 * XXX NEEDSWORK
2953 *
2954 * Some lame multiport cards have only one SROM, which can be accessed
2955 * only from the "first" 21x4x chip, whichever that one is.  If we can't
2956 * get at our SROM, we look for its contents in a property instead, which
2957 * we rely on the bootstrap to have properly set.
2958 * #ifdef BUG_4010796
2959 * We also have a hack to try to set it ourselves, when the "first" port
2960 * attaches, if it has not already been properly set.  However, this method
2961 * is not reliable, since it makes the unwarrented assumption that the
2962 * "first" port will attach first.
2963 * #endif
2964 */
2965
2966static int
2967get_alternative_srom_image(dev_info_t *devinfo, uchar_t *vi, int len)
2968{
2969	int	l = len;
2970
2971	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
2972	    "DNET_SROM", (caddr_t)vi, &len) != DDI_PROP_SUCCESS &&
2973	    (len = l) && ddi_getlongprop_buf(DDI_DEV_T_ANY,
2974	    ddi_get_parent(devinfo), DDI_PROP_DONTPASS, "DNET_SROM",
2975	    (caddr_t)vi, &len) != DDI_PROP_SUCCESS)
2976		return (-1);	/* Can't find it! */
2977
2978	/*
2979	 * The return value from this routine specifies which port number
2980	 * we are.  The primary port is denoted port 0.  On a QUAD card we
2981	 * should return 1, 2, and 3 from this routine.  The return value
2982	 * is used to modify the ethernet address from the SROM data.
2983	 */
2984
2985#ifdef BUG_4010796
2986	{
2987	/*
2988	 * For the present, we remember the device number of our primary
2989	 * sibling and hope we and our other siblings are consecutively
2990	 * numbered up from there.  In the future perhaps the bootstrap
2991	 * will pass us the necessary information telling us which physical
2992	 * port we really are.
2993	 */
2994	pci_regspec_t	*assignp;
2995	int		assign_len;
2996	int 		devnum;
2997	int		primary_devnum;
2998
2999	primary_devnum = ddi_getprop(DDI_DEV_T_ANY, devinfo, 0,
3000	    "DNET_DEVNUM", -1);
3001	if (primary_devnum == -1)
3002		return (1);	/* XXX NEEDSWORK -- We have no better idea */
3003
3004	if ((ddi_getlongprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
3005	    "assigned-addresses", (caddr_t)&assignp,
3006	    &assign_len)) != DDI_PROP_SUCCESS)
3007		return (1);	/* XXX NEEDSWORK -- We have no better idea */
3008
3009	devnum = PCI_REG_DEV_G(assignp->pci_phys_hi);
3010	kmem_free(assignp, assign_len);
3011	return (devnum - primary_devnum);
3012	}
3013#else
3014	return (1);	/* XXX NEEDSWORK -- We have no better idea */
3015#endif
3016}
3017
3018
3019#ifdef BUG_4010796
3020static void
3021set_alternative_srom_image(dev_info_t *devinfo, uchar_t *vi, int len)
3022{
3023	int 		proplen;
3024	pci_regspec_t	*assignp;
3025	int		assign_len;
3026	int 		devnum;
3027
3028	if (ddi_getproplen(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
3029	    "DNET_SROM", &proplen) == DDI_PROP_SUCCESS ||
3030	    ddi_getproplen(DDI_DEV_T_ANY, ddi_get_parent(devinfo),
3031	    DDI_PROP_DONTPASS, "DNET_SROM", &proplen) == DDI_PROP_SUCCESS)
3032		return;		/* Already done! */
3033
3034	/* function return value ignored */
3035	(void) ddi_prop_update_byte_array(DDI_DEV_T_NONE,
3036	    ddi_get_parent(devinfo), "DNET_SROM", (uchar_t *)vi, len);
3037	(void) ddi_prop_update_string(DDI_DEV_T_NONE, devinfo,
3038	    "DNET_HACK", "hack");
3039
3040	if ((ddi_getlongprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
3041	    "assigned-addresses", (caddr_t)&assignp,
3042	    &assign_len)) == DDI_PROP_SUCCESS) {
3043		devnum = PCI_REG_DEV_G(assignp->pci_phys_hi);
3044		kmem_free(assignp, assign_len);
3045		/* function return value ignored */
3046		(void) ddi_prop_update_int(DDI_DEV_T_NONE,
3047		    ddi_get_parent(devinfo), "DNET_DEVNUM", devnum);
3048	}
3049}
3050#endif
3051
3052/*
3053 *	========== SROM Parsing Routines ==========
3054 */
3055
3056static int
3057check_srom_valid(uchar_t *vi)
3058{
3059	int		word, bit;
3060	uint8_t		crc;
3061	uint16_t	*wvi;		/* word16 pointer to vendor info */
3062	uint16_t	bitval;
3063
3064	/* verify that the number of controllers on the card is within range */
3065	if (vi[SROM_ADAPTER_CNT] < 1 || vi[SROM_ADAPTER_CNT] > MAX_ADAPTERS)
3066		return (0);
3067
3068	/*
3069	 * version 1 and 3 of this card did not check the id block CRC value
3070	 * and this can't be changed without retesting every supported card
3071	 *
3072	 * however version 4 of the SROM can have this test applied
3073	 * without fear of breaking something that used to work.
3074	 * the CRC algorithm is taken from the Intel document
3075	 *	"21x4 Serial ROM Format"
3076	 *	version 4.09
3077	 *	3-Mar-1999
3078	 */
3079
3080	switch (vi[SROM_VERSION]) {
3081	case 1:
3082	    /* fallthru */
3083	case 3:
3084		return (vi[SROM_MBZ] == 0 &&	/* must be zero */
3085		    vi[SROM_MBZ2] == 0 &&	/* must be zero */
3086		    vi[SROM_MBZ3] == 0);	/* must be zero */
3087
3088	case 4:
3089		wvi = (uint16_t *)vi;
3090		crc = 0xff;
3091		for (word = 0; word < 9; word++)
3092			for (bit = 15; bit >= 0; bit--) {
3093				if (word == 8 && bit == 7)
3094					return (crc == vi[16]);
3095				bitval =
3096				    ((wvi[word] >> bit) & 1) ^ ((crc >> 7) & 1);
3097				crc <<= 1;
3098				if (bitval == 1) {
3099					crc ^= 7;
3100				}
3101			}
3102
3103	default:
3104		return (0);
3105	}
3106}
3107
3108/*
3109 *	========== Active Media Determination Routines ==========
3110 */
3111
3112/* This routine is also called for V3 Compact and extended type 0 SROMs */
3113static int
3114is_fdmedia(int media)
3115{
3116	if (media == MEDIA_TP_FD || media == MEDIA_SYM_SCR_FD)
3117		return (1);
3118	else
3119		return (0);
3120}
3121
3122/*
3123 * "Linkset" is used to merge media that use the same link test check. So,
3124 * if the TP link is added to the linkset, so is the TP Full duplex link.
3125 * Used to avoid checking the same link status twice.
3126 */
3127static void
3128linkset_add(uint32_t *set, int media)
3129{
3130	if (media == MEDIA_TP_FD || media == MEDIA_TP)
3131		*set |= (1UL<<MEDIA_TP_FD) | (1UL<<MEDIA_TP);
3132	else if (media == MEDIA_SYM_SCR_FD || media == MEDIA_SYM_SCR)
3133		*set |= (1UL<<MEDIA_SYM_SCR_FD) | (1UL<<MEDIA_SYM_SCR);
3134	else *set |= 1UL<<media;
3135}
3136static int
3137linkset_isset(uint32_t linkset, int media)
3138{
3139	return (((1UL<<media)  & linkset) ? 1:0);
3140}
3141
3142/*
3143 * The following code detects which Media is connected for 21041/21140
3144 * Expect to change this code to support new 21140 variants.
3145 * find_active_media() - called with intrlock held.
3146 */
3147static void
3148find_active_media(struct dnetinstance *dnetp)
3149{
3150	int i;
3151	media_block_t *block;
3152	media_block_t *best_allowed = NULL;
3153	media_block_t *hd_found = NULL;
3154	media_block_t *fd_found = NULL;
3155	LEAF_FORMAT *leaf = &dnetp->sr.leaf[dnetp->leaf];
3156	uint32_t checked = 0, links_up = 0;
3157
3158	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3159
3160	dnetp->selected_media_block = leaf->default_block;
3161
3162	if (dnetp->phyaddr != -1) {
3163		dnetp->selected_media_block = leaf->mii_block;
3164		setup_block(dnetp);
3165
3166		if (ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
3167		    DDI_PROP_DONTPASS, "portmon", 1)) {
3168			/* XXX return value ignored */
3169			(void) mii_start_portmon(dnetp->mii, dnet_mii_link_cb,
3170			    &dnetp->intrlock);
3171			/*
3172			 * If the port monitor detects the link is already
3173			 * up, there is no point going through the rest of the
3174			 * link sense
3175			 */
3176			if (dnetp->mii_up) {
3177				return;
3178			}
3179		}
3180	}
3181
3182	/*
3183	 * Media is searched for in order of Precedence. This DEC SROM spec
3184	 * tells us that the first media entry in the SROM is the lowest
3185	 * precedence and should be checked last. This is why we go to the last
3186	 * Media block and work back to the beginning.
3187	 *
3188	 * However, some older SROMs (Cogent EM110's etc.) have this the wrong
3189	 * way around. As a result, following the SROM spec would result in a
3190	 * 10 link being chosen over a 100 link if both media are available.
3191	 * So we continue trying the media until we have at least tried the
3192	 * DEFAULT media.
3193	 */
3194
3195	/* Search for an active medium, and select it */
3196	for (block = leaf->block + leaf->block_count  - 1;
3197	    block >= leaf->block; block--) {
3198		int media = block->media_code;
3199
3200		/* User settings disallow selection of this block */
3201		if (dnetp->disallowed_media & (1UL<<media))
3202			continue;
3203
3204		/* We may not be able to pick the default */
3205		if (best_allowed == NULL || block == leaf->default_block)
3206			best_allowed = block;
3207#ifdef DEBUG
3208		if (dnetdebug & DNETSENSE)
3209			cmn_err(CE_NOTE, "Testing %s medium (block type %d)",
3210			    media_str[media], block->type);
3211#endif
3212
3213		dnetp->selected_media_block = block;
3214		switch (block->type) {
3215
3216		case 2: /* SIA Media block: Best we can do is send a packet */
3217			setup_block(dnetp);
3218			if (send_test_packet(dnetp)) {
3219				if (!is_fdmedia(media))
3220					return;
3221				if (!fd_found)
3222					fd_found = block;
3223			}
3224			break;
3225
3226		/* SYM/SCR or TP block: Use the link-sense bits */
3227		case 0:
3228			if (!linkset_isset(checked, media)) {
3229				linkset_add(&checked, media);
3230				if (((media == MEDIA_BNC ||
3231				    media == MEDIA_AUI) &&
3232				    send_test_packet(dnetp)) ||
3233				    dnet_link_sense(dnetp))
3234					linkset_add(&links_up, media);
3235			}
3236
3237			if (linkset_isset(links_up, media)) {
3238				/*
3239				 * Half Duplex is *always* the favoured media.
3240				 * Full Duplex can be set and forced via the
3241				 * conf file.
3242				 */
3243				if (!is_fdmedia(media) &&
3244				    dnetp->selected_media_block ==
3245				    leaf->default_block) {
3246					/*
3247					 * Cogent cards have the media in
3248					 * opposite order to the spec.,
3249					 * this code forces the media test to
3250					 * keep going until the default media
3251					 * is tested.
3252					 *
3253					 * In Cogent case, 10, 10FD, 100FD, 100
3254					 * 100 is the default but 10 could have
3255					 * been detected and would have been
3256					 * chosen but now we force it through to
3257					 * 100.
3258					 */
3259					setup_block(dnetp);
3260					return;
3261				} else if (!is_fdmedia(media)) {
3262					/*
3263					 * This allows all the others to work
3264					 * properly by remembering the media
3265					 * that works and not defaulting to
3266					 * a FD link.
3267					 */
3268						if (hd_found == NULL)
3269							hd_found = block;
3270				} else if (fd_found == NULL) {
3271					/*
3272					 * No media have already been found
3273					 * so far, this is FD, it works so
3274					 * remember it and if no others are
3275					 * detected, use it.
3276					 */
3277					fd_found = block;
3278				}
3279			}
3280			break;
3281
3282		/*
3283		 * MII block: May take up to a second or so to settle if
3284		 * setup causes a PHY reset
3285		 */
3286		case 1: case 3:
3287			setup_block(dnetp);
3288			for (i = 0; ; i++) {
3289				if (mii_linkup(dnetp->mii, dnetp->phyaddr)) {
3290					/* XXX function return value ignored */
3291					(void) mii_getspeed(dnetp->mii,
3292					    dnetp->phyaddr,
3293					    &dnetp->mii_speed,
3294					    &dnetp->mii_duplex);
3295					dnetp->mii_up = 1;
3296					leaf->mii_block = block;
3297					return;
3298				}
3299				if (i == 10)
3300					break;
3301				delay(drv_usectohz(150000));
3302			}
3303			dnetp->mii_up = 0;
3304			break;
3305		}
3306	} /* for loop */
3307	if (hd_found) {
3308		dnetp->selected_media_block = hd_found;
3309	} else if (fd_found) {
3310		dnetp->selected_media_block = fd_found;
3311	} else {
3312		if (best_allowed == NULL)
3313			best_allowed = leaf->default_block;
3314		dnetp->selected_media_block = best_allowed;
3315		cmn_err(CE_WARN, "!dnet: Default media selected\n");
3316	}
3317	setup_block(dnetp);
3318}
3319
3320/*
3321 * Do anything neccessary to select the selected_media_block.
3322 * setup_block() - called with intrlock held.
3323 */
3324static void
3325setup_block(struct dnetinstance *dnetp)
3326{
3327	dnet_reset_board(dnetp);
3328	dnet_init_board(dnetp);
3329	/* XXX function return value ignored */
3330	(void) dnet_start(dnetp);
3331}
3332
3333/* dnet_link_sense() - called with intrlock held */
3334static int
3335dnet_link_sense(struct dnetinstance *dnetp)
3336{
3337	/*
3338	 * This routine makes use of the command word from the srom config.
3339	 * Details of the auto-sensing information contained in this can
3340	 * be found in the "Digital Semiconductor 21X4 Serial ROM Format v3.03"
3341	 * spec. Section 4.3.2.1, and 4.5.2.1.3
3342	 */
3343	media_block_t *block = dnetp->selected_media_block;
3344	uint32_t link, status, mask, polarity;
3345	int settletime, stabletime, waittime, upsamples;
3346	int delay_100, delay_10;
3347
3348
3349	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3350	/* Don't autosense if the medium does not support it */
3351	if (block->command & (1 << 15)) {
3352		/* This should be the default block */
3353		if (block->command & (1UL<<14))
3354			dnetp->sr.leaf[dnetp->leaf].default_block = block;
3355		return (0);
3356	}
3357
3358	delay_100 = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
3359	    DDI_PROP_DONTPASS, "autosense-delay-100", 2000);
3360
3361	delay_10 = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
3362	    DDI_PROP_DONTPASS, "autosense-delay-10", 400);
3363
3364	/*
3365	 * Scrambler may need to be disabled for link sensing
3366	 * to work
3367	 */
3368	dnetp->disable_scrambler = 1;
3369	setup_block(dnetp);
3370	dnetp->disable_scrambler = 0;
3371
3372	if (block->media_code == MEDIA_TP || block->media_code == MEDIA_TP_FD)
3373		settletime = delay_10;
3374	else
3375		settletime = delay_100;
3376	stabletime = settletime / 4;
3377
3378	mask = 1 << ((block->command & CMD_MEDIABIT_MASK) >> 1);
3379	polarity = block->command & CMD_POL ? 0xffffffff : 0;
3380
3381	for (waittime = 0, upsamples = 0;
3382	    waittime <= settletime + stabletime && upsamples < 8;
3383	    waittime += stabletime/8) {
3384		delay(drv_usectohz(stabletime*1000 / 8));
3385		status = read_gpr(dnetp);
3386		link = (status^polarity) & mask;
3387		if (link)
3388			upsamples++;
3389		else
3390			upsamples = 0;
3391	}
3392#ifdef DNETDEBUG
3393	if (dnetdebug & DNETSENSE)
3394		cmn_err(CE_NOTE, "%s upsamples:%d stat:%x polarity:%x "
3395		    "mask:%x link:%x",
3396		    upsamples == 8 ? "UP":"DOWN",
3397		    upsamples, status, polarity, mask, link);
3398#endif
3399	if (upsamples == 8)
3400		return (1);
3401	return (0);
3402}
3403
3404static int
3405send_test_packet(struct dnetinstance *dnetp)
3406{
3407	int packet_delay;
3408	struct tx_desc_type *desc;
3409	int bufindex;
3410	int media_code = dnetp->selected_media_block->media_code;
3411	uint32_t del;
3412
3413	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3414	/*
3415	 * For a successful test packet, the card must have settled into
3416	 * its current setting.  Almost all cards we've tested manage to
3417	 * do this with all media within 50ms.  However, the SMC 8432
3418	 * requires 300ms to settle into BNC mode.  We now only do this
3419	 * from attach, and we do sleeping delay() instead of drv_usecwait()
3420	 * so we hope this .2 second delay won't cause too much suffering.
3421	 * ALSO: with an autonegotiating hub, an aditional 1 second delay is
3422	 * required. This is done if the media type is TP
3423	 */
3424	if (media_code == MEDIA_TP || media_code == MEDIA_TP_FD) {
3425		packet_delay = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
3426		    DDI_PROP_DONTPASS, "test_packet_delay_tp", 1300000);
3427	} else {
3428		packet_delay = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
3429		    DDI_PROP_DONTPASS, "test_packet_delay", 300000);
3430	}
3431	delay(drv_usectohz(packet_delay));
3432
3433	desc = dnetp->tx_desc;
3434
3435	bufindex = dnetp->tx_current_desc;
3436	if (alloc_descriptor(dnetp) == FAILURE) {
3437		cmn_err(CE_WARN, "DNET: send_test_packet: alloc_descriptor"
3438		    "failed");
3439		return (0);
3440	}
3441
3442	/*
3443	 * use setup buffer as the buffer for the test packet
3444	 * instead of allocating one.
3445	 */
3446
3447	ASSERT(dnetp->setup_buf_vaddr != NULL);
3448	/* Put something decent in dest address so we don't annoy other cards */
3449	BCOPY((caddr_t)dnetp->curr_macaddr,
3450	    (caddr_t)dnetp->setup_buf_vaddr, ETHERADDRL);
3451	BCOPY((caddr_t)dnetp->curr_macaddr,
3452	    (caddr_t)dnetp->setup_buf_vaddr+ETHERADDRL, ETHERADDRL);
3453
3454	desc[bufindex].buffer1 = dnetp->setup_buf_paddr;
3455	desc[bufindex].desc1.buffer_size1 = SETUPBUF_SIZE;
3456	desc[bufindex].buffer2 = (uint32_t)(0);
3457	desc[bufindex].desc1.first_desc = 1;
3458	desc[bufindex].desc1.last_desc = 1;
3459	desc[bufindex].desc1.int_on_comp = 1;
3460	desc[bufindex].desc0.own = 1;
3461
3462	ddi_put8(dnetp->io_handle, REG8(dnetp->io_reg, TX_POLL_REG),
3463	    TX_POLL_DEMAND);
3464
3465	/*
3466	 * Give enough time for the chip to transmit the packet
3467	 */
3468#if 1
3469	del = 1000;
3470	while (desc[bufindex].desc0.own && --del)
3471		drv_usecwait(10);	/* quickly wait up to 10ms */
3472	if (desc[bufindex].desc0.own)
3473		delay(drv_usectohz(200000));	/* nicely wait a longer time */
3474#else
3475	del = 0x10000;
3476	while (desc[bufindex].desc0.own && --del)
3477		drv_usecwait(10);
3478#endif
3479
3480#ifdef DNETDEBUG
3481	if (dnetdebug & DNETSENSE)
3482		cmn_err(CE_NOTE, "desc0 bits = %u, %u, %u, %u, %u, %u",
3483		    desc[bufindex].desc0.own,
3484		    desc[bufindex].desc0.err_summary,
3485		    desc[bufindex].desc0.carrier_loss,
3486		    desc[bufindex].desc0.no_carrier,
3487		    desc[bufindex].desc0.late_collision,
3488		    desc[bufindex].desc0.link_fail);
3489#endif
3490	if (desc[bufindex].desc0.own) /* it shouldn't take this long, error */
3491		return (0);
3492
3493	return (!desc[bufindex].desc0.err_summary);
3494}
3495
3496/* enable_interrupts - called with intrlock held */
3497static void
3498enable_interrupts(struct dnetinstance *dnetp)
3499{
3500	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3501	/* Don't enable interrupts if they have been forced off */
3502	if (dnetp->interrupts_disabled)
3503		return;
3504	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, INT_MASK_REG),
3505	    ABNORMAL_INTR_MASK | NORMAL_INTR_MASK | SYSTEM_ERROR_MASK |
3506	    (dnetp->timer.cb ? GPTIMER_INTR : 0) |
3507	    RX_INTERRUPT_MASK |
3508	    TX_INTERRUPT_MASK | TX_JABBER_MASK | TX_UNDERFLOW_MASK);
3509}
3510
3511/*
3512 * Some older multiport cards are non-PCI compliant in their interrupt routing.
3513 * Second and subsequent devices are incorrectly configured by the BIOS
3514 * (either in their ILINE configuration or the MP Configuration Table for PC+MP
3515 * systems).
3516 * The hack stops registering the interrupt routine for the FIRST
3517 * device on the adapter, and registers its own. It builds up a table
3518 * of dnetp structures for each device, and the new interrupt routine
3519 * calls dnet_intr for each of them.
3520 * Known cards that suffer from this problem are:
3521 *	All Cogent multiport cards;
3522 * 	Znyx 314;
3523 *	Znyx 315.
3524 *
3525 * XXX NEEDSWORK -- see comments above get_alternative_srom_image(). This
3526 * hack relies on the fact that the offending cards will have only one SROM.
3527 * It uses this fact to identify devices that are on the same multiport
3528 * adapter, as opposed to multiple devices from the same vendor (as
3529 * indicated by "secondary")
3530 */
3531static int
3532dnet_hack_interrupts(struct dnetinstance *dnetp, int secondary)
3533{
3534	int i;
3535	struct hackintr_inf *hackintr_inf;
3536	dev_info_t *devinfo = dnetp->devinfo;
3537	uint32_t oui = 0;	/* Organizationally Unique ID */
3538
3539	if (ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
3540	    "no_INTA_workaround", 0) != 0)
3541		return (0);
3542
3543	for (i = 0; i < 3; i++)
3544		oui = (oui << 8) | dnetp->vendor_addr[i];
3545
3546	/* Check wheather or not we need to implement the hack */
3547
3548	switch (oui) {
3549	case ZNYX_ETHER:
3550		/* Znyx multiport 21040 cards <<==>> ZX314 or ZX315 */
3551		if (dnetp->board_type != DEVICE_ID_21040)
3552			return (0);
3553		break;
3554
3555	case COGENT_ETHER:
3556		/* All known Cogent multiport cards */
3557		break;
3558
3559	case ADAPTEC_ETHER:
3560		/* Adaptec multiport cards */
3561		break;
3562
3563	default:
3564		/* Other cards work correctly */
3565		return (0);
3566	}
3567
3568	/* card is (probably) non-PCI compliant in its interrupt routing */
3569
3570
3571	if (!secondary) {
3572
3573		/*
3574		 * If we have already registered a hacked interrupt, and
3575		 * this is also a 'primary' adapter, then this is NOT part of
3576		 * a multiport card, but a second card on the same PCI bus.
3577		 * BUGID: 4057747
3578		 */
3579		if (ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(devinfo),
3580		    DDI_PROP_DONTPASS, hackintr_propname, 0) != 0)
3581			return (0);
3582				/* ... Primary not part of a multiport device */
3583
3584#ifdef DNETDEBUG
3585		if (dnetdebug & DNETTRACE)
3586			cmn_err(CE_NOTE, "dnet: Implementing hardware "
3587			    "interrupt flaw workaround");
3588#endif
3589		dnetp->hackintr_inf = hackintr_inf =
3590		    kmem_zalloc(sizeof (struct hackintr_inf), KM_SLEEP);
3591		if (hackintr_inf == NULL)
3592			goto fail;
3593
3594		hackintr_inf->dnetps[0] = dnetp;
3595		hackintr_inf->devinfo = devinfo;
3596
3597		/*
3598		 * Add a property to allow successive attaches to find the
3599		 * table
3600		 */
3601
3602		if (ddi_prop_update_byte_array(DDI_DEV_T_NONE,
3603		    ddi_get_parent(devinfo), hackintr_propname,
3604		    (uchar_t *)&dnetp->hackintr_inf,
3605		    sizeof (void *)) != DDI_PROP_SUCCESS)
3606			goto fail;
3607
3608
3609		/* Register our hacked interrupt routine */
3610		if (ddi_add_intr(devinfo, 0, &dnetp->icookie, NULL,
3611		    (uint_t (*)(char *))dnet_hack_intr,
3612		    (caddr_t)hackintr_inf) != DDI_SUCCESS) {
3613			/* XXX function return value ignored */
3614			(void) ddi_prop_remove(DDI_DEV_T_NONE,
3615			    ddi_get_parent(devinfo),
3616			    hackintr_propname);
3617			goto fail;
3618		}
3619
3620		/*
3621		 * Mutex required to ensure interrupt routine has completed
3622		 * when detaching devices
3623		 */
3624		mutex_init(&hackintr_inf->lock, NULL, MUTEX_DRIVER,
3625		    dnetp->icookie);
3626
3627		/* Stop GLD registering an interrupt */
3628		return (-1);
3629	} else {
3630
3631		/* Add the dnetp for this secondary device to the table */
3632
3633		hackintr_inf = (struct hackintr_inf *)(uintptr_t)
3634		    ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(devinfo),
3635		    DDI_PROP_DONTPASS, hackintr_propname, 0);
3636
3637		if (hackintr_inf == NULL)
3638			goto fail;
3639
3640		/* Find an empty slot */
3641		for (i = 0; i < MAX_INST; i++)
3642			if (hackintr_inf->dnetps[i] == NULL)
3643				break;
3644
3645		/* More than 8 ports on adapter ?! */
3646		if (i == MAX_INST)
3647			goto fail;
3648
3649		hackintr_inf->dnetps[i] = dnetp;
3650
3651		/*
3652		 * Allow GLD to register a handler for this
3653		 * device. If the card is actually broken, as we suspect, this
3654		 * handler will never get called. However, by registering the
3655		 * interrupt handler, we can copy gracefully with new multiport
3656		 * Cogent cards that decide to fix the hardware problem
3657		 */
3658		return (0);
3659	}
3660
3661fail:
3662	cmn_err(CE_WARN, "dnet: Could not work around hardware interrupt"
3663	    " routing problem");
3664	return (0);
3665}
3666
3667/*
3668 * Call dnet_intr for all adapters on a multiport card
3669 */
3670static uint_t
3671dnet_hack_intr(struct hackintr_inf *hackintr_inf)
3672{
3673	int i;
3674	int claimed = DDI_INTR_UNCLAIMED;
3675
3676	/* Stop detaches while processing interrupts */
3677	mutex_enter(&hackintr_inf->lock);
3678
3679	for (i = 0; i < MAX_INST; i++) {
3680		if (hackintr_inf->dnetps[i] &&
3681		    dnet_intr((caddr_t)hackintr_inf->dnetps[i]) ==
3682		    DDI_INTR_CLAIMED) {
3683			claimed = DDI_INTR_CLAIMED;
3684		}
3685	}
3686	mutex_exit(&hackintr_inf->lock);
3687	return (claimed);
3688}
3689
3690/*
3691 * This removes the detaching device from the table procesed by the hacked
3692 * interrupt routine. Because the interrupts from all devices come in to the
3693 * same interrupt handler, ALL devices must stop interrupting once the
3694 * primary device detaches. This isn't a problem at present, because all
3695 * instances of a device are detached when the driver is unloaded.
3696 */
3697static int
3698dnet_detach_hacked_interrupt(dev_info_t *devinfo)
3699{
3700	int i;
3701	struct hackintr_inf *hackintr_inf;
3702	struct dnetinstance *altdnetp, *dnetp =
3703	    ddi_get_driver_private(devinfo);
3704
3705	hackintr_inf = (struct hackintr_inf *)(uintptr_t)
3706	    ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(devinfo),
3707	    DDI_PROP_DONTPASS, hackintr_propname, 0);
3708
3709	/*
3710	 * No hackintr_inf implies hack was not required or the primary has
3711	 * detached, and our interrupts are already disabled
3712	 */
3713	if (!hackintr_inf) {
3714		/* remove the interrupt for the non-hacked case */
3715		ddi_remove_intr(devinfo, 0, dnetp->icookie);
3716		return (DDI_SUCCESS);
3717	}
3718
3719	/* Remove this device from the handled table */
3720	mutex_enter(&hackintr_inf->lock);
3721	for (i = 0; i < MAX_INST; i++) {
3722		if (hackintr_inf->dnetps[i] == dnetp) {
3723			hackintr_inf->dnetps[i] = NULL;
3724			break;
3725		}
3726	}
3727
3728	mutex_exit(&hackintr_inf->lock);
3729
3730	/* Not the primary card, we are done */
3731	if (devinfo != hackintr_inf->devinfo)
3732		return (DDI_SUCCESS);
3733
3734	/*
3735	 * This is the primary card. All remaining adapters on this device
3736	 * must have their interrupts disabled before we remove the handler
3737	 */
3738	for (i = 0; i < MAX_INST; i++) {
3739		if ((altdnetp = hackintr_inf->dnetps[i]) != NULL) {
3740			altdnetp->interrupts_disabled = 1;
3741			ddi_put32(altdnetp->io_handle,
3742			    REG32(altdnetp->io_reg, INT_MASK_REG), 0);
3743		}
3744	}
3745
3746	/* It should now be safe to remove the interrupt handler */
3747
3748	ddi_remove_intr(devinfo, 0, dnetp->icookie);
3749	mutex_destroy(&hackintr_inf->lock);
3750	/* XXX function return value ignored */
3751	(void) ddi_prop_remove(DDI_DEV_T_NONE, ddi_get_parent(devinfo),
3752	    hackintr_propname);
3753	kmem_free(hackintr_inf, sizeof (struct hackintr_inf));
3754	return (DDI_SUCCESS);
3755}
3756
3757/* do_phy() - called with intrlock held */
3758static void
3759do_phy(struct dnetinstance *dnetp)
3760{
3761	dev_info_t *dip;
3762	LEAF_FORMAT *leaf = dnetp->sr.leaf + dnetp->leaf;
3763	media_block_t *block;
3764	int phy;
3765
3766	dip = dnetp->devinfo;
3767
3768	/*
3769	 * Find and configure the PHY media block. If NO PHY blocks are
3770	 * found on the SROM, but a PHY device is present, we assume the card
3771	 * is a legacy device, and that there is ONLY a PHY interface on the
3772	 * card (ie, no BNC or AUI, and 10BaseT is implemented by the PHY
3773	 */
3774
3775	for (block = leaf->block + leaf->block_count -1;
3776	    block >= leaf->block; block --) {
3777		if (block->type == 3 || block->type == 1) {
3778			leaf->mii_block = block;
3779			break;
3780		}
3781	}
3782
3783	/*
3784	 * If no MII block, select default, and hope this configuration will
3785	 * allow the phy to be read/written if it is present
3786	 */
3787	dnetp->selected_media_block = leaf->mii_block ?
3788	    leaf->mii_block : leaf->default_block;
3789
3790	setup_block(dnetp);
3791	/* XXX function return value ignored */
3792	(void) mii_create(dip, dnet_mii_write, dnet_mii_read, &dnetp->mii);
3793
3794	/*
3795	 * We try PHY 0 LAST because it is less likely to be connected
3796	 */
3797	for (phy = 1; phy < 33; phy++)
3798		if (mii_probe_phy(dnetp->mii, phy % 32) == MII_SUCCESS &&
3799		    mii_init_phy(dnetp->mii, phy % 32) == MII_SUCCESS) {
3800#ifdef DNETDEBUG
3801			if (dnetdebug & DNETSENSE)
3802				cmn_err(CE_NOTE, "dnet: "
3803				    "PHY at address %d", phy % 32);
3804#endif
3805			dnetp->phyaddr = phy % 32;
3806			if (!leaf->mii_block) {
3807				/* Legacy card, change the leaf node */
3808				set_leaf(&dnetp->sr, &leaf_phylegacy);
3809			}
3810			return;
3811		}
3812#ifdef DNETDEBUG
3813	if (dnetdebug & DNETSENSE)
3814		cmn_err(CE_NOTE, "dnet: No PHY found");
3815#endif
3816}
3817
3818static ushort_t
3819dnet_mii_read(dev_info_t *dip, int phy_addr, int reg_num)
3820{
3821	struct dnetinstance *dnetp;
3822
3823	uint32_t command_word;
3824	uint32_t tmp;
3825	uint32_t data = 0;
3826	int i;
3827	int bits_in_ushort = ((sizeof (ushort_t))*8);
3828	int turned_around = 0;
3829
3830	dnetp = ddi_get_driver_private(dip);
3831
3832	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3833	/* Write Preamble */
3834	write_mii(dnetp, MII_PRE, 2*bits_in_ushort);
3835
3836	/* Prepare command word */
3837	command_word = (uint32_t)phy_addr << MII_PHY_ADDR_ALIGN;
3838	command_word |= (uint32_t)reg_num << MII_REG_ADDR_ALIGN;
3839	command_word |= MII_READ_FRAME;
3840
3841	write_mii(dnetp, command_word, bits_in_ushort-2);
3842
3843	mii_tristate(dnetp);
3844
3845	/* Check that the PHY generated a zero bit the 2nd clock */
3846	tmp = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, ETHER_ROM_REG));
3847
3848	turned_around = (tmp & MII_DATA_IN) ? 0 : 1;
3849
3850	/* read data WORD */
3851	for (i = 0; i < bits_in_ushort; i++) {
3852		ddi_put32(dnetp->io_handle,
3853		    REG32(dnetp->io_reg, ETHER_ROM_REG), MII_READ);
3854		drv_usecwait(MII_DELAY);
3855		ddi_put32(dnetp->io_handle,
3856		    REG32(dnetp->io_reg, ETHER_ROM_REG), MII_READ | MII_CLOCK);
3857		drv_usecwait(MII_DELAY);
3858		tmp = ddi_get32(dnetp->io_handle,
3859		    REG32(dnetp->io_reg, ETHER_ROM_REG));
3860		drv_usecwait(MII_DELAY);
3861		data = (data << 1) | (tmp >> MII_DATA_IN_POSITION) & 0x0001;
3862	}
3863
3864	mii_tristate(dnetp);
3865	return (turned_around ? data: -1);
3866}
3867
3868static void
3869dnet_mii_write(dev_info_t *dip, int phy_addr, int reg_num, int reg_dat)
3870{
3871	struct dnetinstance *dnetp;
3872	uint32_t command_word;
3873	int bits_in_ushort = ((sizeof (ushort_t))*8);
3874
3875	dnetp = ddi_get_driver_private(dip);
3876
3877	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3878	write_mii(dnetp, MII_PRE, 2*bits_in_ushort);
3879
3880	/* Prepare command word */
3881	command_word = ((uint32_t)phy_addr << MII_PHY_ADDR_ALIGN);
3882	command_word |= ((uint32_t)reg_num << MII_REG_ADDR_ALIGN);
3883	command_word |= (MII_WRITE_FRAME | (uint32_t)reg_dat);
3884
3885	write_mii(dnetp, command_word, 2*bits_in_ushort);
3886	mii_tristate(dnetp);
3887}
3888
3889/*
3890 * Write data size bits from mii_data to the MII control lines.
3891 */
3892static void
3893write_mii(struct dnetinstance *dnetp, uint32_t mii_data, int data_size)
3894{
3895	int i;
3896	uint32_t dbit;
3897
3898	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3899	for (i = data_size; i > 0; i--) {
3900		dbit = ((mii_data >>
3901		    (31 - MII_WRITE_DATA_POSITION)) & MII_WRITE_DATA);
3902		ddi_put32(dnetp->io_handle,
3903		    REG32(dnetp->io_reg, ETHER_ROM_REG),
3904		    MII_WRITE | dbit);
3905		drv_usecwait(MII_DELAY);
3906		ddi_put32(dnetp->io_handle,
3907		    REG32(dnetp->io_reg, ETHER_ROM_REG),
3908		    MII_WRITE | MII_CLOCK | dbit);
3909		drv_usecwait(MII_DELAY);
3910		mii_data <<= 1;
3911	}
3912}
3913
3914/*
3915 * Put the MDIO port in tri-state for the turn around bits
3916 * in MII read and at end of MII management sequence.
3917 */
3918static void
3919mii_tristate(struct dnetinstance *dnetp)
3920{
3921	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3922	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, ETHER_ROM_REG),
3923	    MII_WRITE_TS);
3924	drv_usecwait(MII_DELAY);
3925	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, ETHER_ROM_REG),
3926	    MII_WRITE_TS | MII_CLOCK);
3927	drv_usecwait(MII_DELAY);
3928}
3929
3930
3931static void
3932set_leaf(SROM_FORMAT *sr, LEAF_FORMAT *leaf)
3933{
3934	if (sr->leaf && !sr->leaf->is_static)
3935		kmem_free(sr->leaf, sr->adapters * sizeof (LEAF_FORMAT));
3936	sr->leaf = leaf;
3937}
3938
3939/*
3940 * Callback from MII module. Makes sure that the CSR registers are
3941 * configured properly if the PHY changes mode.
3942 */
3943/* ARGSUSED */
3944/* dnet_mii_link_cb - called with intrlock held */
3945static void
3946dnet_mii_link_cb(dev_info_t *dip, int phy, enum mii_phy_state state)
3947{
3948	struct dnetinstance *dnetp = ddi_get_driver_private(dip);
3949	LEAF_FORMAT *leaf;
3950
3951	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3952
3953	leaf = dnetp->sr.leaf + dnetp->leaf;
3954	if (state == phy_state_linkup) {
3955		dnetp->mii_up = 1;
3956
3957		(void) mii_getspeed(dnetp->mii, dnetp->phyaddr,
3958		    &dnetp->mii_speed, &dnetp->mii_duplex);
3959
3960		dnetp->selected_media_block = leaf->mii_block;
3961		setup_block(dnetp);
3962	} else {
3963		/* NEEDSWORK: Probably can call find_active_media here */
3964		dnetp->mii_up = 0;
3965
3966		if (leaf->default_block->media_code == MEDIA_MII)
3967			dnetp->selected_media_block = leaf->default_block;
3968		setup_block(dnetp);
3969	}
3970
3971	if (dnetp->running) {
3972		mac_link_update(dnetp->mac_handle,
3973		    (dnetp->mii_up ? LINK_STATE_UP : LINK_STATE_DOWN));
3974	}
3975}
3976
3977/*
3978 * SROM parsing routines.
3979 * Refer to the Digital 3.03 SROM spec while reading this! (references refer
3980 * to this document)
3981 * Where possible ALL vendor specific changes should be localised here. The
3982 * SROM data should be capable of describing any programmatic irregularities
3983 * of DNET cards (via SIA or GP registers, in particular), so vendor specific
3984 * code elsewhere should not be required
3985 */
3986static void
3987dnet_parse_srom(struct dnetinstance *dnetp, SROM_FORMAT *sr, uchar_t *vi)
3988{
3989	uint32_t ether_mfg = 0;
3990	int i;
3991	uchar_t *p;
3992
3993	if (!ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
3994	    DDI_PROP_DONTPASS, "no_sromconfig", 0))
3995		dnetp->sr.init_from_srom = check_srom_valid(vi);
3996
3997	if (dnetp->sr.init_from_srom && dnetp->board_type != DEVICE_ID_21040) {
3998		/* Section 2/3: General SROM Format/ ID Block */
3999		p = vi+18;
4000		sr->version = *p++;
4001		sr->adapters = *p++;
4002
4003		sr->leaf =
4004		    kmem_zalloc(sr->adapters * sizeof (LEAF_FORMAT), KM_SLEEP);
4005		for (i = 0; i < 6; i++)
4006			sr->netaddr[i] = *p++;
4007
4008		for (i = 0; i < sr->adapters; i++) {
4009			uchar_t devno = *p++;
4010			uint16_t offset = *p++;
4011			offset |= *p++ << 8;
4012			sr->leaf[i].device_number = devno;
4013			parse_controller_leaf(dnetp, sr->leaf+i, vi+offset);
4014		}
4015		/*
4016		 * 'Orrible hack for cogent cards. The 6911A board seems to
4017		 * have an incorrect SROM. (From the OEMDEMO program
4018		 * supplied by cogent, it seems that the ROM matches a setup
4019		 * or a board with a QSI or ICS PHY.
4020		 */
4021		for (i = 0; i < 3; i++)
4022			ether_mfg = (ether_mfg << 8) | sr->netaddr[i];
4023
4024		if (ether_mfg == ADAPTEC_ETHER) {
4025			static uint16_t cogent_gprseq[] = {0x821, 0};
4026			switch (vi[COGENT_SROM_ID]) {
4027			case COGENT_ANA6911A_C:
4028			case COGENT_ANA6911AC_C:
4029#ifdef DNETDEBUG
4030				if (dnetdebug & DNETTRACE)
4031					cmn_err(CE_WARN,
4032					    "Suspected bad GPR sequence."
4033					    " Making a guess (821,0)");
4034#endif
4035
4036				/* XXX function return value ignored */
4037				(void) ddi_prop_update_byte_array(
4038				    DDI_DEV_T_NONE, dnetp->devinfo,
4039				    "gpr-sequence", (uchar_t *)cogent_gprseq,
4040				    sizeof (cogent_gprseq));
4041				break;
4042			}
4043		}
4044	} else {
4045		/*
4046		 * Adhoc SROM, check for some cards which need special handling
4047		 * Assume vendor info contains ether address in first six bytes
4048		 */
4049
4050		uchar_t *mac = vi + ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
4051		    DDI_PROP_DONTPASS, macoffset_propname, 0);
4052
4053		for (i = 0; i < 6; i++)
4054			sr->netaddr[i] = mac[i];
4055
4056		if (dnetp->board_type == DEVICE_ID_21140) {
4057			for (i = 0; i < 3; i++)
4058				ether_mfg = (ether_mfg << 8) | mac[i];
4059
4060			switch (ether_mfg) {
4061			case ASANTE_ETHER:
4062				dnetp->vendor_21140 = ASANTE_TYPE;
4063				dnetp->vendor_revision = 0;
4064				set_leaf(sr, &leaf_asante);
4065				sr->adapters = 1;
4066				break;
4067
4068			case COGENT_ETHER:
4069			case ADAPTEC_ETHER:
4070				dnetp->vendor_21140 = COGENT_EM_TYPE;
4071				dnetp->vendor_revision =
4072				    vi[VENDOR_REVISION_OFFSET];
4073				set_leaf(sr, &leaf_cogent_100);
4074				sr->adapters = 1;
4075				break;
4076
4077			default:
4078				dnetp->vendor_21140 = DEFAULT_TYPE;
4079				dnetp->vendor_revision = 0;
4080				set_leaf(sr, &leaf_default_100);
4081				sr->adapters = 1;
4082				break;
4083			}
4084		} else if (dnetp->board_type == DEVICE_ID_21041) {
4085			set_leaf(sr, &leaf_21041);
4086		} else if (dnetp->board_type == DEVICE_ID_21040) {
4087			set_leaf(sr, &leaf_21040);
4088		}
4089	}
4090}
4091
4092/* Section 4.2, 4.3, 4.4, 4.5 */
4093static void
4094parse_controller_leaf(struct dnetinstance *dnetp, LEAF_FORMAT *leaf,
4095	uchar_t *vi)
4096{
4097	int i;
4098
4099	leaf->selected_contype = *vi++;
4100	leaf->selected_contype |= *vi++ << 8;
4101
4102	if (dnetp->board_type == DEVICE_ID_21140) /* Sect. 4.3 */
4103		leaf->gpr = *vi++;
4104
4105	leaf->block_count = *vi++;
4106
4107	if (leaf->block_count > MAX_MEDIA) {
4108		cmn_err(CE_WARN, "dnet: Too many media in SROM!");
4109		leaf->block_count = 1;
4110	}
4111	for (i = 0; i <= leaf->block_count; i++) {
4112		vi = parse_media_block(dnetp, leaf->block + i, vi);
4113		if (leaf->block[i].command & CMD_DEFAULT_MEDIUM)
4114			leaf->default_block = leaf->block+i;
4115	}
4116	/* No explicit default block: use last in the ROM */
4117	if (leaf->default_block == NULL)
4118		leaf->default_block = leaf->block + leaf->block_count -1;
4119
4120}
4121
4122static uchar_t *
4123parse_media_block(struct dnetinstance *dnetp, media_block_t *block, uchar_t *vi)
4124{
4125	int i;
4126
4127	/*
4128	 * There are three kinds of media block we need to worry about:
4129	 * The 21041 blocks.
4130	 * 21140 blocks from a version 1 SROM
4131	 * 2114[023] block from a version 3 SROM
4132	 */
4133
4134	if (dnetp->board_type == DEVICE_ID_21041) {
4135		/* Section 4.2 */
4136		block->media_code = *vi & 0x3f;
4137		block->type = 2;
4138		if (*vi++ & 0x40) {
4139			block->un.sia.csr13 = *vi++;
4140			block->un.sia.csr13 |= *vi++ << 8;
4141			block->un.sia.csr14 = *vi++;
4142			block->un.sia.csr14 |= *vi++ << 8;
4143			block->un.sia.csr15 = *vi++;
4144			block->un.sia.csr15 |= *vi++ << 8;
4145		} else {
4146			/* No media data (csrs 13,14,15). Insert defaults */
4147			switch (block->media_code) {
4148			case MEDIA_TP:
4149				block->un.sia.csr13 = 0xef01;
4150				block->un.sia.csr14 = 0x7f3f;
4151				block->un.sia.csr15 = 0x0008;
4152				break;
4153			case MEDIA_TP_FD:
4154				block->un.sia.csr13 = 0xef01;
4155				block->un.sia.csr14 = 0x7f3d;
4156				block->un.sia.csr15 = 0x0008;
4157				break;
4158			case MEDIA_BNC:
4159				block->un.sia.csr13 = 0xef09;
4160				block->un.sia.csr14 = 0x0705;
4161				block->un.sia.csr15 = 0x0006;
4162				break;
4163			case MEDIA_AUI:
4164				block->un.sia.csr13 = 0xef09;
4165				block->un.sia.csr14 = 0x0705;
4166				block->un.sia.csr15 = 0x000e;
4167				break;
4168			}
4169		}
4170	} else  if (*vi & 0x80) {  /* Extended format: Section 4.3.2.2 */
4171		int blocklen = *vi++ & 0x7f;
4172		block->type = *vi++;
4173		switch (block->type) {
4174		case 0: /* "non-MII": Section 4.3.2.2.1 */
4175			block->media_code = (*vi++) & 0x3f;
4176			block->gprseqlen = 1;
4177			block->gprseq[0] = *vi++;
4178			block->command = *vi++;
4179			block->command |= *vi++ << 8;
4180			break;
4181
4182		case 1: /* MII/PHY: Section 4.3.2.2.2 */
4183			block->command = CMD_PS;
4184			block->media_code = MEDIA_MII;
4185				/* This is whats needed in CSR6 */
4186
4187			block->un.mii.phy_num = *vi++;
4188			block->gprseqlen = *vi++;
4189
4190			for (i = 0; i < block->gprseqlen; i++)
4191				block->gprseq[i] = *vi++;
4192			block->rstseqlen = *vi++;
4193			for (i = 0; i < block->rstseqlen; i++)
4194				block->rstseq[i] = *vi++;
4195
4196			block->un.mii.mediacaps = *vi++;
4197			block->un.mii.mediacaps |= *vi++ << 8;
4198			block->un.mii.nwayadvert = *vi++;
4199			block->un.mii.nwayadvert |= *vi++ << 8;
4200			block->un.mii.fdxmask = *vi++;
4201			block->un.mii.fdxmask |= *vi++ << 8;
4202			block->un.mii.ttmmask = *vi++;
4203			block->un.mii.ttmmask |= *vi++ << 8;
4204			break;
4205
4206		case 2: /* SIA Media: Section 4.4.2.1.1 */
4207			block->media_code = *vi & 0x3f;
4208			if (*vi++ & 0x40) {
4209				block->un.sia.csr13 = *vi++;
4210				block->un.sia.csr13 |= *vi++ << 8;
4211				block->un.sia.csr14 = *vi++;
4212				block->un.sia.csr14 |= *vi++ << 8;
4213				block->un.sia.csr15 = *vi++;
4214				block->un.sia.csr15 |= *vi++ << 8;
4215			} else {
4216				/*
4217				 * SIA values not provided by SROM; provide
4218				 * defaults. See appendix D of 2114[23] manuals.
4219				 */
4220				switch (block->media_code) {
4221				case MEDIA_BNC:
4222					block->un.sia.csr13 = 0x0009;
4223					block->un.sia.csr14 = 0x0705;
4224					block->un.sia.csr15 = 0x0000;
4225					break;
4226				case MEDIA_AUI:
4227					block->un.sia.csr13 = 0x0009;
4228					block->un.sia.csr14 = 0x0705;
4229					block->un.sia.csr15 = 0x0008;
4230					break;
4231				case MEDIA_TP:
4232					block->un.sia.csr13 = 0x0001;
4233					block->un.sia.csr14 = 0x7f3f;
4234					block->un.sia.csr15 = 0x0000;
4235					break;
4236				case MEDIA_TP_FD:
4237					block->un.sia.csr13 = 0x0001;
4238					block->un.sia.csr14 = 0x7f3d;
4239					block->un.sia.csr15 = 0x0000;
4240					break;
4241				default:
4242					block->un.sia.csr13 = 0x0000;
4243					block->un.sia.csr14 = 0x0000;
4244					block->un.sia.csr15 = 0x0000;
4245				}
4246			}
4247
4248			/* Treat GP control/data as a GPR sequence */
4249			block->gprseqlen = 2;
4250			block->gprseq[0] = *vi++;
4251			block->gprseq[0] |= *vi++ << 8;
4252			block->gprseq[0] |= GPR_CONTROL_WRITE;
4253			block->gprseq[1] = *vi++;
4254			block->gprseq[1] |= *vi++ << 8;
4255			break;
4256
4257		case 3: /* MII/PHY : Section 4.4.2.1.2 */
4258			block->command = CMD_PS;
4259			block->media_code = MEDIA_MII;
4260			block->un.mii.phy_num = *vi++;
4261
4262			block->gprseqlen = *vi++;
4263			for (i = 0; i < block->gprseqlen; i++) {
4264				block->gprseq[i] = *vi++;
4265				block->gprseq[i] |= *vi++ << 8;
4266			}
4267
4268			block->rstseqlen = *vi++;
4269			for (i = 0; i < block->rstseqlen; i++) {
4270				block->rstseq[i] = *vi++;
4271				block->rstseq[i] |= *vi++ << 8;
4272			}
4273			block->un.mii.mediacaps = *vi++;
4274			block->un.mii.mediacaps |= *vi++ << 8;
4275			block->un.mii.nwayadvert = *vi++;
4276			block->un.mii.nwayadvert |= *vi++ << 8;
4277			block->un.mii.fdxmask = *vi++;
4278			block->un.mii.fdxmask |= *vi++ << 8;
4279			block->un.mii.ttmmask = *vi++;
4280			block->un.mii.ttmmask |= *vi++ << 8;
4281			block->un.mii.miiintr |= *vi++;
4282			break;
4283
4284		case 4: /* SYM Media: 4.5.2.1.3 */
4285			block->media_code = *vi++ & 0x3f;
4286			/* Treat GP control and data as a GPR sequence */
4287			block->gprseqlen = 2;
4288			block->gprseq[0] = *vi++;
4289			block->gprseq[0] |= *vi++ << 8;
4290			block->gprseq[0] |= GPR_CONTROL_WRITE;
4291			block->gprseq[1]  = *vi++;
4292			block->gprseq[1] |= *vi++ << 8;
4293			block->command = *vi++;
4294			block->command |= *vi++ << 8;
4295			break;
4296
4297		case 5: /* GPR reset sequence:  Section 4.5.2.1.4 */
4298			block->rstseqlen = *vi++;
4299			for (i = 0; i < block->rstseqlen; i++)
4300				block->rstseq[i] = *vi++;
4301			break;
4302
4303		default: /* Unknown media block. Skip it. */
4304			cmn_err(CE_WARN, "dnet: Unsupported SROM block.");
4305			vi += blocklen;
4306			break;
4307		}
4308	} else { /* Compact format (or V1 SROM): Section 4.3.2.1 */
4309		block->type = 0;
4310		block->media_code = *vi++ & 0x3f;
4311		block->gprseqlen = 1;
4312		block->gprseq[0] = *vi++;
4313		block->command = *vi++;
4314		block->command |= (*vi++) << 8;
4315	}
4316	return (vi);
4317}
4318
4319
4320/*
4321 * An alternative to doing this would be to store the legacy ROMs in binary
4322 * format in the conf file, and in read_srom, pick out the data. This would
4323 * then allow the parser to continue on as normal. This makes it a little
4324 * easier to read.
4325 */
4326static void
4327setup_legacy_blocks()
4328{
4329	LEAF_FORMAT *leaf;
4330	media_block_t *block;
4331
4332	/* Default FAKE SROM */
4333	leaf = &leaf_default_100;
4334	leaf->is_static = 1;
4335	leaf->default_block = &leaf->block[3];
4336	leaf->block_count = 4; /* 100 cards are highly unlikely to have BNC */
4337	block = leaf->block;
4338	block->media_code = MEDIA_TP_FD;
4339	block->type = 0;
4340	block->command = 0x8e;  /* PCS, PS off, media sense: bit7, pol=1 */
4341	block++;
4342	block->media_code = MEDIA_TP;
4343	block->type = 0;
4344	block->command = 0x8e;  /* PCS, PS off, media sense: bit7, pol=1 */
4345	block++;
4346	block->media_code = MEDIA_SYM_SCR_FD;
4347	block->type = 0;
4348	block->command = 0x6d;  /* PCS, PS, SCR on, media sense: bit6, pol=0 */
4349	block++;
4350	block->media_code = MEDIA_SYM_SCR;
4351	block->type = 0;
4352	block->command = 0x406d; /* PCS, PS, SCR on, media sense: bit6, pol=0 */
4353
4354	/* COGENT FAKE SROM */
4355	leaf = &leaf_cogent_100;
4356	leaf->is_static = 1;
4357	leaf->default_block = &leaf->block[4];
4358	leaf->block_count = 5; /* 100TX, 100TX-FD, 10T 10T-FD, BNC */
4359	block = leaf->block; /* BNC */
4360	block->media_code = MEDIA_BNC;
4361	block->type = 0;
4362	block->command =  0x8000; /* No media sense, PCS, SCR, PS all off */
4363	block->gprseqlen = 2;
4364	block->rstseqlen = 0;
4365	block->gprseq[0] = 0x13f;
4366	block->gprseq[1] = 1;
4367
4368	block++;
4369	block->media_code = MEDIA_TP_FD;
4370	block->type = 0;
4371	block->command = 0x8e;  /* PCS, PS off, media sense: bit7, pol=1 */
4372	block->gprseqlen = 2;
4373	block->rstseqlen = 0;
4374	block->gprseq[0] = 0x13f;
4375	block->gprseq[1] = 0x26;
4376
4377	block++; /* 10BaseT */
4378	block->media_code = MEDIA_TP;
4379	block->type = 0;
4380	block->command = 0x8e;  /* PCS, PS off, media sense: bit7, pol=1 */
4381	block->gprseqlen = 2;
4382	block->rstseqlen = 0;
4383	block->gprseq[0] = 0x13f;
4384	block->gprseq[1] = 0x3e;
4385
4386	block++; /* 100BaseTX-FD */
4387	block->media_code = MEDIA_SYM_SCR_FD;
4388	block->type = 0;
4389	block->command = 0x6d;  /* PCS, PS, SCR on, media sense: bit6, pol=0 */
4390	block->gprseqlen = 2;
4391	block->rstseqlen = 0;
4392	block->gprseq[0] = 0x13f;
4393	block->gprseq[1] = 1;
4394
4395	block++; /* 100BaseTX */
4396	block->media_code = MEDIA_SYM_SCR;
4397	block->type = 0;
4398	block->command = 0x406d; /* PCS, PS, SCR on, media sense: bit6, pol=0 */
4399	block->gprseqlen = 2;
4400	block->rstseqlen = 0;
4401	block->gprseq[0] = 0x13f;
4402	block->gprseq[1] = 1;
4403
4404	/* Generic legacy card with a PHY. */
4405	leaf = &leaf_phylegacy;
4406	leaf->block_count = 1;
4407	leaf->mii_block = leaf->block;
4408	leaf->default_block = &leaf->block[0];
4409	leaf->is_static = 1;
4410	block = leaf->block;
4411	block->media_code = MEDIA_MII;
4412	block->type = 1; /* MII Block type 1 */
4413	block->command = 1; /* Port select */
4414	block->gprseqlen = 0;
4415	block->rstseqlen = 0;
4416
4417	/* ASANTE FAKE SROM */
4418	leaf = &leaf_asante;
4419	leaf->is_static = 1;
4420	leaf->default_block = &leaf->block[0];
4421	leaf->block_count = 1;
4422	block = leaf->block;
4423	block->media_code = MEDIA_MII;
4424	block->type = 1; /* MII Block type 1 */
4425	block->command = 1; /* Port select */
4426	block->gprseqlen = 3;
4427	block->rstseqlen = 0;
4428	block->gprseq[0] = 0x180;
4429	block->gprseq[1] = 0x80;
4430	block->gprseq[2] = 0x0;
4431
4432	/* LEGACY 21041 card FAKE SROM */
4433	leaf = &leaf_21041;
4434	leaf->is_static = 1;
4435	leaf->block_count = 4;  /* SIA Blocks for TP, TPfd, BNC, AUI */
4436	leaf->default_block = &leaf->block[3];
4437
4438	block = leaf->block;
4439	block->media_code = MEDIA_AUI;
4440	block->type = 2;
4441	block->un.sia.csr13 = 0xef09;
4442	block->un.sia.csr14 = 0x0705;
4443	block->un.sia.csr15 = 0x000e;
4444
4445	block++;
4446	block->media_code = MEDIA_TP_FD;
4447	block->type = 2;
4448	block->un.sia.csr13 = 0xef01;
4449	block->un.sia.csr14 = 0x7f3d;
4450	block->un.sia.csr15 = 0x0008;
4451
4452	block++;
4453	block->media_code = MEDIA_BNC;
4454	block->type = 2;
4455	block->un.sia.csr13 = 0xef09;
4456	block->un.sia.csr14 = 0x0705;
4457	block->un.sia.csr15 = 0x0006;
4458
4459	block++;
4460	block->media_code = MEDIA_TP;
4461	block->type = 2;
4462	block->un.sia.csr13 = 0xef01;
4463	block->un.sia.csr14 = 0x7f3f;
4464	block->un.sia.csr15 = 0x0008;
4465
4466	/* LEGACY 21040 card FAKE SROM */
4467	leaf = &leaf_21040;
4468	leaf->is_static = 1;
4469	leaf->block_count = 4;  /* SIA Blocks for TP, TPfd, BNC, AUI */
4470	block = leaf->block;
4471	block->media_code = MEDIA_AUI;
4472	block->type = 2;
4473	block->un.sia.csr13 = 0x8f09;
4474	block->un.sia.csr14 = 0x0705;
4475	block->un.sia.csr15 = 0x000e;
4476	block++;
4477	block->media_code = MEDIA_TP_FD;
4478	block->type = 2;
4479	block->un.sia.csr13 = 0x0f01;
4480	block->un.sia.csr14 = 0x7f3d;
4481	block->un.sia.csr15 = 0x0008;
4482	block++;
4483	block->media_code = MEDIA_BNC;
4484	block->type = 2;
4485	block->un.sia.csr13 = 0xef09;
4486	block->un.sia.csr14 = 0x0705;
4487	block->un.sia.csr15 = 0x0006;
4488	block++;
4489	block->media_code = MEDIA_TP;
4490	block->type = 2;
4491	block->un.sia.csr13 = 0x8f01;
4492	block->un.sia.csr14 = 0x7f3f;
4493	block->un.sia.csr15 = 0x0008;
4494}
4495
4496static void
4497dnet_print_srom(SROM_FORMAT *sr)
4498{
4499	int i;
4500	uchar_t *a = sr->netaddr;
4501	cmn_err(CE_NOTE, "SROM Dump: %d. ver %d, Num adapters %d,"
4502	    "Addr:%x:%x:%x:%x:%x:%x",
4503	    sr->init_from_srom, sr->version, sr->adapters,
4504	    a[0], a[1], a[2], a[3], a[4], a[5]);
4505
4506	for (i = 0; i < sr->adapters; i++)
4507		dnet_dump_leaf(sr->leaf+i);
4508}
4509
4510static void
4511dnet_dump_leaf(LEAF_FORMAT *leaf)
4512{
4513	int i;
4514	cmn_err(CE_NOTE, "Leaf: Device %d, block_count %d, gpr: %x",
4515	    leaf->device_number, leaf->block_count, leaf->gpr);
4516	for (i = 0; i < leaf->block_count; i++)
4517		dnet_dump_block(leaf->block+i);
4518}
4519
4520static void
4521dnet_dump_block(media_block_t *block)
4522{
4523	cmn_err(CE_NOTE, "Block(%p): type %x, media %s, command: %x ",
4524	    (void *)block,
4525	    block->type, media_str[block->media_code], block->command);
4526	dnet_dumpbin("\tGPR Seq", (uchar_t *)block->gprseq, 2,
4527	    block->gprseqlen *2);
4528	dnet_dumpbin("\tGPR Reset", (uchar_t *)block->rstseq, 2,
4529	    block->rstseqlen *2);
4530	switch (block->type) {
4531	case 1: case 3:
4532		cmn_err(CE_NOTE, "\tMII Info: phy %d, nway %x, fdx"
4533		    "%x, ttm %x, mediacap %x",
4534		    block->un.mii.phy_num, block->un.mii.nwayadvert,
4535		    block->un.mii.fdxmask, block->un.mii.ttmmask,
4536		    block->un.mii.mediacaps);
4537		break;
4538	case 2:
4539		cmn_err(CE_NOTE, "\tSIA Regs: CSR13:%x, CSR14:%x, CSR15:%x",
4540		    block->un.sia.csr13, block->un.sia.csr14,
4541		    block->un.sia.csr15);
4542		break;
4543	}
4544}
4545
4546
4547/* Utility to print out binary info dumps. Handy for SROMs, etc */
4548
4549static int
4550hexcode(unsigned val)
4551{
4552	if (val <= 9)
4553		return (val +'0');
4554	if (val <= 15)
4555		return (val + 'a' - 10);
4556	return (-1);
4557}
4558
4559static void
4560dnet_dumpbin(char *msg, unsigned char *data, int size, int len)
4561{
4562	char hex[128], *p = hex;
4563	char ascii[128], *q = ascii;
4564	int i, j;
4565
4566	if (!len)
4567		return;
4568
4569	for (i = 0; i < len; i += size) {
4570		for (j = size - 1; j >= 0; j--) { /* PORTABILITY: byte order */
4571			*p++ = hexcode(data[i+j] >> 4);
4572			*p++ = hexcode(data[i+j] & 0xf);
4573			*q++ = (data[i+j] < 32 || data[i+j] > 127) ?
4574			    '.' : data[i];
4575		}
4576		*p++ = ' ';
4577		if (q-ascii >= 8) {
4578			*p = *q = 0;
4579			cmn_err(CE_NOTE, "%s: %s\t%s", msg, hex, ascii);
4580			p = hex;
4581			q = ascii;
4582		}
4583	}
4584	if (p != hex) {
4585		while ((p - hex) < 8*3)
4586			*p++ = ' ';
4587		*p = *q = 0;
4588		cmn_err(CE_NOTE, "%s: %s\t%s", msg, hex, ascii);
4589	}
4590}
4591
4592#ifdef DNETDEBUG
4593void
4594dnet_usectimeout(struct dnetinstance *dnetp, uint32_t usecs, int contin,
4595    timercb_t cback)
4596{
4597	mutex_enter(&dnetp->intrlock);
4598	dnetp->timer.start_ticks = (usecs * 100) / 8192;
4599	dnetp->timer.cb = cback;
4600	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, GP_TIMER_REG),
4601	    dnetp->timer.start_ticks | (contin ? GPTIMER_CONT : 0));
4602	if (dnetp->timer.cb)
4603		enable_interrupts(dnetp);
4604	mutex_exit(&dnetp->intrlock);
4605}
4606
4607uint32_t
4608dnet_usecelapsed(struct dnetinstance *dnetp)
4609{
4610	uint32_t ticks = dnetp->timer.start_ticks -
4611	    (ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, GP_TIMER_REG)) &
4612	    0xffff);
4613	return ((ticks * 8192) / 100);
4614}
4615
4616/* ARGSUSED */
4617void
4618dnet_timestamp(struct dnetinstance *dnetp,  char *buf)
4619{
4620	uint32_t elapsed = dnet_usecelapsed(dnetp);
4621	char loc[32], *p = loc;
4622	int firstdigit = 1;
4623	uint32_t divisor;
4624
4625	while (*p++ = *buf++)
4626		;
4627	p--;
4628
4629	for (divisor = 1000000000; divisor /= 10; ) {
4630		int digit = (elapsed / divisor);
4631		elapsed -= digit * divisor;
4632		if (!firstdigit || digit) {
4633			*p++ = digit + '0';
4634			firstdigit = 0;
4635		}
4636
4637	}
4638
4639	/* Actual zero, output it */
4640	if (firstdigit)
4641		*p++ = '0';
4642
4643	*p++ = '-';
4644	*p++ = '>';
4645	*p++ = 0;
4646
4647	printf(loc);
4648	dnet_usectimeout(dnetp, 1000000, 0, 0);
4649}
4650
4651#endif
4652