1/*	$NetBSD: if_wm.c,v 1.227.2.8 2013/02/18 18:05:29 riz Exp $	*/
2
3/*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed for the NetBSD Project by
20 *	Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38/*******************************************************************************
39
40  Copyright (c) 2001-2005, Intel Corporation
41  All rights reserved.
42
43  Redistribution and use in source and binary forms, with or without
44  modification, are permitted provided that the following conditions are met:
45
46   1. Redistributions of source code must retain the above copyright notice,
47      this list of conditions and the following disclaimer.
48
49   2. Redistributions in binary form must reproduce the above copyright
50      notice, this list of conditions and the following disclaimer in the
51      documentation and/or other materials provided with the distribution.
52
53   3. Neither the name of the Intel Corporation nor the names of its
54      contributors may be used to endorse or promote products derived from
55      this software without specific prior written permission.
56
57  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67  POSSIBILITY OF SUCH DAMAGE.
68
69*******************************************************************************/
70/*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 *	- Rework how parameters are loaded from the EEPROM.
76 */
77
78#include <sys/cdefs.h>
79__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.227.2.8 2013/02/18 18:05:29 riz Exp $");
80
81#include <sys/param.h>
82#include <sys/systm.h>
83#include <sys/callout.h>
84#include <sys/mbuf.h>
85#include <sys/malloc.h>
86#include <sys/kernel.h>
87#include <sys/socket.h>
88#include <sys/ioctl.h>
89#include <sys/errno.h>
90#include <sys/device.h>
91#include <sys/queue.h>
92#include <sys/syslog.h>
93
94#include <sys/rnd.h>
95
96#include <net/if.h>
97#include <net/if_dl.h>
98#include <net/if_media.h>
99#include <net/if_ether.h>
100
101#include <net/bpf.h>
102
103#include <netinet/in.h>			/* XXX for struct ip */
104#include <netinet/in_systm.h>		/* XXX for struct ip */
105#include <netinet/ip.h>			/* XXX for struct ip */
106#include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
107#include <netinet/tcp.h>		/* XXX for struct tcphdr */
108
109#include <sys/bus.h>
110#include <sys/intr.h>
111#include <machine/endian.h>
112
113#include <dev/mii/mii.h>
114#include <dev/mii/miivar.h>
115#include <dev/mii/miidevs.h>
116#include <dev/mii/mii_bitbang.h>
117#include <dev/mii/ikphyreg.h>
118#include <dev/mii/igphyreg.h>
119#include <dev/mii/igphyvar.h>
120#include <dev/mii/inbmphyreg.h>
121
122#include <dev/pci/pcireg.h>
123#include <dev/pci/pcivar.h>
124#include <dev/pci/pcidevs.h>
125
126#include <dev/pci/if_wmreg.h>
127#include <dev/pci/if_wmvar.h>
128
129#ifdef WM_DEBUG
130#define	WM_DEBUG_LINK		0x01
131#define	WM_DEBUG_TX		0x02
132#define	WM_DEBUG_RX		0x04
133#define	WM_DEBUG_GMII		0x08
134#define	WM_DEBUG_MANAGE		0x10
135#define	WM_DEBUG_NVM		0x20
136int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
137    | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
138
139#define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
140#else
141#define	DPRINTF(x, y)	/* nothing */
142#endif /* WM_DEBUG */
143
144/*
145 * Transmit descriptor list size.  Due to errata, we can only have
146 * 256 hardware descriptors in the ring on < 82544, but we use 4096
147 * on >= 82544.  We tell the upper layers that they can queue a lot
148 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
149 * of them at a time.
150 *
151 * We allow up to 256 (!) DMA segments per packet.  Pathological packet
152 * chains containing many small mbufs have been observed in zero-copy
153 * situations with jumbo frames.
154 */
155#define	WM_NTXSEGS		256
156#define	WM_IFQUEUELEN		256
157#define	WM_TXQUEUELEN_MAX	64
158#define	WM_TXQUEUELEN_MAX_82547	16
159#define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
160#define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
161#define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
162#define	WM_NTXDESC_82542	256
163#define	WM_NTXDESC_82544	4096
164#define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
165#define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
166#define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
167#define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
168#define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
169
170#define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
171
172/*
173 * Receive descriptor list size.  We have one Rx buffer for normal
174 * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
175 * packet.  We allocate 256 receive descriptors, each with a 2k
176 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
177 */
178#define	WM_NRXDESC		256
179#define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
180#define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
181#define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
182
183/*
184 * Control structures are DMA'd to the i82542 chip.  We allocate them in
185 * a single clump that maps to a single DMA segment to make several things
186 * easier.
187 */
188struct wm_control_data_82544 {
189	/*
190	 * The receive descriptors.
191	 */
192	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
193
194	/*
195	 * The transmit descriptors.  Put these at the end, because
196	 * we might use a smaller number of them.
197	 */
198	union {
199		wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
200		nq_txdesc_t      wcdu_nq_txdescs[WM_NTXDESC_82544];
201	} wdc_u;
202};
203
204struct wm_control_data_82542 {
205	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
207};
208
209#define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
210#define	WM_CDTXOFF(x)	WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
211#define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
212
213/*
214 * Software state for transmit jobs.
215 */
216struct wm_txsoft {
217	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
218	bus_dmamap_t txs_dmamap;	/* our DMA map */
219	int txs_firstdesc;		/* first descriptor in packet */
220	int txs_lastdesc;		/* last descriptor in packet */
221	int txs_ndesc;			/* # of descriptors used */
222};
223
224/*
225 * Software state for receive buffers.  Each descriptor gets a
226 * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
227 * more than one buffer, we chain them together.
228 */
229struct wm_rxsoft {
230	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
231	bus_dmamap_t rxs_dmamap;	/* our DMA map */
232};
233
234#define WM_LINKUP_TIMEOUT	50
235
236static uint16_t swfwphysem[] = {
237	SWFW_PHY0_SM,
238	SWFW_PHY1_SM,
239	SWFW_PHY2_SM,
240	SWFW_PHY3_SM
241};
242
243/*
244 * Software state per device.
245 */
246struct wm_softc {
247	device_t sc_dev;		/* generic device information */
248	bus_space_tag_t sc_st;		/* bus space tag */
249	bus_space_handle_t sc_sh;	/* bus space handle */
250	bus_size_t sc_ss;		/* bus space size */
251	bus_space_tag_t sc_iot;		/* I/O space tag */
252	bus_space_handle_t sc_ioh;	/* I/O space handle */
253	bus_size_t sc_ios;		/* I/O space size */
254	bus_space_tag_t sc_flasht;	/* flash registers space tag */
255	bus_space_handle_t sc_flashh;	/* flash registers space handle */
256	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
257
258	struct ethercom sc_ethercom;	/* ethernet common data */
259	struct mii_data sc_mii;		/* MII/media information */
260
261	pci_chipset_tag_t sc_pc;
262	pcitag_t sc_pcitag;
263	int sc_bus_speed;		/* PCI/PCIX bus speed */
264	int sc_pcixe_capoff;		/* PCI[Xe] capability register offset */
265
266	const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
267	wm_chip_type sc_type;		/* MAC type */
268	int sc_rev;			/* MAC revision */
269	wm_phy_type sc_phytype;		/* PHY type */
270	int sc_funcid;			/* unit number of the chip (0 to 3) */
271	int sc_flags;			/* flags; see below */
272	int sc_if_flags;		/* last if_flags */
273	int sc_flowflags;		/* 802.3x flow control flags */
274	int sc_align_tweak;
275
276	void *sc_ih;			/* interrupt cookie */
277	callout_t sc_tick_ch;		/* tick callout */
278
279	int sc_ee_addrbits;		/* EEPROM address bits */
280	int sc_ich8_flash_base;
281	int sc_ich8_flash_bank_size;
282	int sc_nvm_k1_enabled;
283
284	/*
285	 * Software state for the transmit and receive descriptors.
286	 */
287	int sc_txnum;			/* must be a power of two */
288	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
289	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
290
291	/*
292	 * Control data structures.
293	 */
294	int sc_ntxdesc;			/* must be a power of two */
295	struct wm_control_data_82544 *sc_control_data;
296	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
297	bus_dma_segment_t sc_cd_seg;	/* control data segment */
298	int sc_cd_rseg;			/* real number of control segment */
299	size_t sc_cd_size;		/* control data size */
300#define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
301#define	sc_txdescs	sc_control_data->wdc_u.wcdu_txdescs
302#define	sc_nq_txdescs	sc_control_data->wdc_u.wcdu_nq_txdescs
303#define	sc_rxdescs	sc_control_data->wcd_rxdescs
304
305#ifdef WM_EVENT_COUNTERS
306	/* Event counters. */
307	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
308	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
309	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
310	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
311	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
312	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
313	struct evcnt sc_ev_linkintr;	/* Link interrupts */
314
315	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
316	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
317	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
318	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
319	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
320	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
321	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
322	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
323
324	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
325	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
326
327	struct evcnt sc_ev_tu;		/* Tx underrun */
328
329	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
330	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
331	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
332	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
333	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
334#endif /* WM_EVENT_COUNTERS */
335
336	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
337
338	int	sc_txfree;		/* number of free Tx descriptors */
339	int	sc_txnext;		/* next ready Tx descriptor */
340
341	int	sc_txsfree;		/* number of free Tx jobs */
342	int	sc_txsnext;		/* next free Tx job */
343	int	sc_txsdirty;		/* dirty Tx jobs */
344
345	/* These 5 variables are used only on the 82547. */
346	int	sc_txfifo_size;		/* Tx FIFO size */
347	int	sc_txfifo_head;		/* current head of FIFO */
348	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
349	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
350	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
351
352	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
353
354	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
355	int	sc_rxdiscard;
356	int	sc_rxlen;
357	struct mbuf *sc_rxhead;
358	struct mbuf *sc_rxtail;
359	struct mbuf **sc_rxtailp;
360
361	uint32_t sc_ctrl;		/* prototype CTRL register */
362#if 0
363	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
364#endif
365	uint32_t sc_icr;		/* prototype interrupt bits */
366	uint32_t sc_itr;		/* prototype intr throttling reg */
367	uint32_t sc_tctl;		/* prototype TCTL register */
368	uint32_t sc_rctl;		/* prototype RCTL register */
369	uint32_t sc_txcw;		/* prototype TXCW register */
370	uint32_t sc_tipg;		/* prototype TIPG register */
371	uint32_t sc_fcrtl;		/* prototype FCRTL register */
372	uint32_t sc_pba;		/* prototype PBA register */
373
374	int sc_tbi_linkup;		/* TBI link status */
375	int sc_tbi_anegticks;		/* autonegotiation ticks */
376	int sc_tbi_ticks;		/* tbi ticks */
377	int sc_tbi_nrxcfg;		/* count of ICR_RXCFG */
378	int sc_tbi_lastnrxcfg;		/* count of ICR_RXCFG (on last tick) */
379
380	int sc_mchash_type;		/* multicast filter offset */
381
382	krndsource_t rnd_source;	/* random source */
383};
384
385#define	WM_RXCHAIN_RESET(sc)						\
386do {									\
387	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
388	*(sc)->sc_rxtailp = NULL;					\
389	(sc)->sc_rxlen = 0;						\
390} while (/*CONSTCOND*/0)
391
392#define	WM_RXCHAIN_LINK(sc, m)						\
393do {									\
394	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
395	(sc)->sc_rxtailp = &(m)->m_next;				\
396} while (/*CONSTCOND*/0)
397
398#ifdef WM_EVENT_COUNTERS
399#define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
400#define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
401#else
402#define	WM_EVCNT_INCR(ev)	/* nothing */
403#define	WM_EVCNT_ADD(ev, val)	/* nothing */
404#endif
405
406#define	CSR_READ(sc, reg)						\
407	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
408#define	CSR_WRITE(sc, reg, val)						\
409	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
410#define	CSR_WRITE_FLUSH(sc)						\
411	(void) CSR_READ((sc), WMREG_STATUS)
412
413#define ICH8_FLASH_READ32(sc, reg) \
414	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
415#define ICH8_FLASH_WRITE32(sc, reg, data) \
416	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
417
418#define ICH8_FLASH_READ16(sc, reg) \
419	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
420#define ICH8_FLASH_WRITE16(sc, reg, data) \
421	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
422
423#define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
424#define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
425
426#define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
427#define	WM_CDTXADDR_HI(sc, x)						\
428	(sizeof(bus_addr_t) == 8 ?					\
429	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
430
431#define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
432#define	WM_CDRXADDR_HI(sc, x)						\
433	(sizeof(bus_addr_t) == 8 ?					\
434	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
435
436#define	WM_CDTXSYNC(sc, x, n, ops)					\
437do {									\
438	int __x, __n;							\
439									\
440	__x = (x);							\
441	__n = (n);							\
442									\
443	/* If it will wrap around, sync to the end of the ring. */	\
444	if ((__x + __n) > WM_NTXDESC(sc)) {				\
445		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
446		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
447		    (WM_NTXDESC(sc) - __x), (ops));			\
448		__n -= (WM_NTXDESC(sc) - __x);				\
449		__x = 0;						\
450	}								\
451									\
452	/* Now sync whatever is left. */				\
453	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
454	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
455} while (/*CONSTCOND*/0)
456
457#define	WM_CDRXSYNC(sc, x, ops)						\
458do {									\
459	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
460	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
461} while (/*CONSTCOND*/0)
462
463#define	WM_INIT_RXDESC(sc, x)						\
464do {									\
465	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
466	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
467	struct mbuf *__m = __rxs->rxs_mbuf;				\
468									\
469	/*								\
470	 * Note: We scoot the packet forward 2 bytes in the buffer	\
471	 * so that the payload after the Ethernet header is aligned	\
472	 * to a 4-byte boundary.					\
473	 *								\
474	 * XXX BRAINDAMAGE ALERT!					\
475	 * The stupid chip uses the same size for every buffer, which	\
476	 * is set in the Receive Control register.  We are using the 2K	\
477	 * size option, but what we REALLY want is (2K - 2)!  For this	\
478	 * reason, we can't "scoot" packets longer than the standard	\
479	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
480	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
481	 * the upper layer copy the headers.				\
482	 */								\
483	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
484									\
485	wm_set_dma_addr(&__rxd->wrx_addr,				\
486	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
487	__rxd->wrx_len = 0;						\
488	__rxd->wrx_cksum = 0;						\
489	__rxd->wrx_status = 0;						\
490	__rxd->wrx_errors = 0;						\
491	__rxd->wrx_special = 0;						\
492	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
493									\
494	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
495} while (/*CONSTCOND*/0)
496
497static void	wm_start(struct ifnet *);
498static void	wm_nq_start(struct ifnet *);
499static void	wm_watchdog(struct ifnet *);
500static int	wm_ifflags_cb(struct ethercom *);
501static int	wm_ioctl(struct ifnet *, u_long, void *);
502static int	wm_init(struct ifnet *);
503static void	wm_stop(struct ifnet *, int);
504static bool	wm_suspend(device_t, const pmf_qual_t *);
505static bool	wm_resume(device_t, const pmf_qual_t *);
506
507static void	wm_reset(struct wm_softc *);
508static void	wm_rxdrain(struct wm_softc *);
509static int	wm_add_rxbuf(struct wm_softc *, int);
510static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
511static int	wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
512static int	wm_validate_eeprom_checksum(struct wm_softc *);
513static int	wm_check_alt_mac_addr(struct wm_softc *);
514static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
515static void	wm_tick(void *);
516
517static void	wm_set_filter(struct wm_softc *);
518static void	wm_set_vlan(struct wm_softc *);
519
520static int	wm_intr(void *);
521static void	wm_txintr(struct wm_softc *);
522static void	wm_rxintr(struct wm_softc *);
523static void	wm_linkintr(struct wm_softc *, uint32_t);
524
525static void	wm_tbi_mediainit(struct wm_softc *);
526static int	wm_tbi_mediachange(struct ifnet *);
527static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
528
529static void	wm_tbi_set_linkled(struct wm_softc *);
530static void	wm_tbi_check_link(struct wm_softc *);
531
532static void	wm_gmii_reset(struct wm_softc *);
533
534static int	wm_gmii_i82543_readreg(device_t, int, int);
535static void	wm_gmii_i82543_writereg(device_t, int, int, int);
536
537static int	wm_gmii_i82544_readreg(device_t, int, int);
538static void	wm_gmii_i82544_writereg(device_t, int, int, int);
539
540static int	wm_gmii_i80003_readreg(device_t, int, int);
541static void	wm_gmii_i80003_writereg(device_t, int, int, int);
542static int	wm_gmii_bm_readreg(device_t, int, int);
543static void	wm_gmii_bm_writereg(device_t, int, int, int);
544static int	wm_gmii_hv_readreg(device_t, int, int);
545static void	wm_gmii_hv_writereg(device_t, int, int, int);
546static int	wm_gmii_82580_readreg(device_t, int, int);
547static void	wm_gmii_82580_writereg(device_t, int, int, int);
548static int	wm_sgmii_readreg(device_t, int, int);
549static void	wm_sgmii_writereg(device_t, int, int, int);
550
551static void	wm_gmii_statchg(device_t);
552
553static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
554static int	wm_gmii_mediachange(struct ifnet *);
555static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
556
557static int	wm_kmrn_readreg(struct wm_softc *, int);
558static void	wm_kmrn_writereg(struct wm_softc *, int, int);
559
560static void	wm_set_spiaddrbits(struct wm_softc *);
561static int	wm_match(device_t, cfdata_t, void *);
562static void	wm_attach(device_t, device_t, void *);
563static int	wm_detach(device_t, int);
564static int	wm_is_onboard_nvm_eeprom(struct wm_softc *);
565static void	wm_get_auto_rd_done(struct wm_softc *);
566static void	wm_lan_init_done(struct wm_softc *);
567static void	wm_get_cfg_done(struct wm_softc *);
568static int	wm_get_swsm_semaphore(struct wm_softc *);
569static void	wm_put_swsm_semaphore(struct wm_softc *);
570static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
571static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
572static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
573static int	wm_get_swfwhw_semaphore(struct wm_softc *);
574static void	wm_put_swfwhw_semaphore(struct wm_softc *);
575
576static int	wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
577static int32_t	wm_ich8_cycle_init(struct wm_softc *);
578static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
579static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t,
580		     uint32_t, uint16_t *);
581static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
582static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
583static void	wm_82547_txfifo_stall(void *);
584static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
585static int	wm_check_mng_mode(struct wm_softc *);
586static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
587static int	wm_check_mng_mode_82574(struct wm_softc *);
588static int	wm_check_mng_mode_generic(struct wm_softc *);
589static int	wm_enable_mng_pass_thru(struct wm_softc *);
590static int	wm_check_reset_block(struct wm_softc *);
591static void	wm_get_hw_control(struct wm_softc *);
592static int	wm_check_for_link(struct wm_softc *);
593static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
594static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
595#ifdef WM_WOL
596static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
597#endif
598static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
599static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
600static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
601static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
602static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
603static void	wm_smbustopci(struct wm_softc *);
604static void	wm_set_pcie_completion_timeout(struct wm_softc *);
605static void	wm_reset_init_script_82575(struct wm_softc *);
606static void	wm_release_manageability(struct wm_softc *);
607static void	wm_release_hw_control(struct wm_softc *);
608static void	wm_get_wakeup(struct wm_softc *);
609#ifdef WM_WOL
610static void	wm_enable_phy_wakeup(struct wm_softc *);
611static void	wm_enable_wakeup(struct wm_softc *);
612#endif
613static void	wm_init_manageability(struct wm_softc *);
614static void	wm_set_eee_i350(struct wm_softc *);
615
616CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
617    wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
618
619/*
620 * Devices supported by this driver.
621 */
622static const struct wm_product {
623	pci_vendor_id_t		wmp_vendor;
624	pci_product_id_t	wmp_product;
625	const char		*wmp_name;
626	wm_chip_type		wmp_type;
627	int			wmp_flags;
628#define	WMP_F_1000X		0x01
629#define	WMP_F_1000T		0x02
630#define	WMP_F_SERDES		0x04
631} wm_products[] = {
632	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
633	  "Intel i82542 1000BASE-X Ethernet",
634	  WM_T_82542_2_1,	WMP_F_1000X },
635
636	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
637	  "Intel i82543GC 1000BASE-X Ethernet",
638	  WM_T_82543,		WMP_F_1000X },
639
640	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
641	  "Intel i82543GC 1000BASE-T Ethernet",
642	  WM_T_82543,		WMP_F_1000T },
643
644	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
645	  "Intel i82544EI 1000BASE-T Ethernet",
646	  WM_T_82544,		WMP_F_1000T },
647
648	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
649	  "Intel i82544EI 1000BASE-X Ethernet",
650	  WM_T_82544,		WMP_F_1000X },
651
652	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
653	  "Intel i82544GC 1000BASE-T Ethernet",
654	  WM_T_82544,		WMP_F_1000T },
655
656	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
657	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
658	  WM_T_82544,		WMP_F_1000T },
659
660	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
661	  "Intel i82540EM 1000BASE-T Ethernet",
662	  WM_T_82540,		WMP_F_1000T },
663
664	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
665	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
666	  WM_T_82540,		WMP_F_1000T },
667
668	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
669	  "Intel i82540EP 1000BASE-T Ethernet",
670	  WM_T_82540,		WMP_F_1000T },
671
672	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
673	  "Intel i82540EP 1000BASE-T Ethernet",
674	  WM_T_82540,		WMP_F_1000T },
675
676	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
677	  "Intel i82540EP 1000BASE-T Ethernet",
678	  WM_T_82540,		WMP_F_1000T },
679
680	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
681	  "Intel i82545EM 1000BASE-T Ethernet",
682	  WM_T_82545,		WMP_F_1000T },
683
684	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
685	  "Intel i82545GM 1000BASE-T Ethernet",
686	  WM_T_82545_3,		WMP_F_1000T },
687
688	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
689	  "Intel i82545GM 1000BASE-X Ethernet",
690	  WM_T_82545_3,		WMP_F_1000X },
691#if 0
692	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
693	  "Intel i82545GM Gigabit Ethernet (SERDES)",
694	  WM_T_82545_3,		WMP_F_SERDES },
695#endif
696	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
697	  "Intel i82546EB 1000BASE-T Ethernet",
698	  WM_T_82546,		WMP_F_1000T },
699
700	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
701	  "Intel i82546EB 1000BASE-T Ethernet",
702	  WM_T_82546,		WMP_F_1000T },
703
704	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
705	  "Intel i82545EM 1000BASE-X Ethernet",
706	  WM_T_82545,		WMP_F_1000X },
707
708	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
709	  "Intel i82546EB 1000BASE-X Ethernet",
710	  WM_T_82546,		WMP_F_1000X },
711
712	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
713	  "Intel i82546GB 1000BASE-T Ethernet",
714	  WM_T_82546_3,		WMP_F_1000T },
715
716	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
717	  "Intel i82546GB 1000BASE-X Ethernet",
718	  WM_T_82546_3,		WMP_F_1000X },
719#if 0
720	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
721	  "Intel i82546GB Gigabit Ethernet (SERDES)",
722	  WM_T_82546_3,		WMP_F_SERDES },
723#endif
724	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
725	  "i82546GB quad-port Gigabit Ethernet",
726	  WM_T_82546_3,		WMP_F_1000T },
727
728	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
729	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
730	  WM_T_82546_3,		WMP_F_1000T },
731
732	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
733	  "Intel PRO/1000MT (82546GB)",
734	  WM_T_82546_3,		WMP_F_1000T },
735
736	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
737	  "Intel i82541EI 1000BASE-T Ethernet",
738	  WM_T_82541,		WMP_F_1000T },
739
740	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
741	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
742	  WM_T_82541,		WMP_F_1000T },
743
744	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
745	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
746	  WM_T_82541,		WMP_F_1000T },
747
748	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
749	  "Intel i82541ER 1000BASE-T Ethernet",
750	  WM_T_82541_2,		WMP_F_1000T },
751
752	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
753	  "Intel i82541GI 1000BASE-T Ethernet",
754	  WM_T_82541_2,		WMP_F_1000T },
755
756	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
757	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
758	  WM_T_82541_2,		WMP_F_1000T },
759
760	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
761	  "Intel i82541PI 1000BASE-T Ethernet",
762	  WM_T_82541_2,		WMP_F_1000T },
763
764	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
765	  "Intel i82547EI 1000BASE-T Ethernet",
766	  WM_T_82547,		WMP_F_1000T },
767
768	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
769	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
770	  WM_T_82547,		WMP_F_1000T },
771
772	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
773	  "Intel i82547GI 1000BASE-T Ethernet",
774	  WM_T_82547_2,		WMP_F_1000T },
775
776	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
777	  "Intel PRO/1000 PT (82571EB)",
778	  WM_T_82571,		WMP_F_1000T },
779
780	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
781	  "Intel PRO/1000 PF (82571EB)",
782	  WM_T_82571,		WMP_F_1000X },
783#if 0
784	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
785	  "Intel PRO/1000 PB (82571EB)",
786	  WM_T_82571,		WMP_F_SERDES },
787#endif
788	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
789	  "Intel PRO/1000 QT (82571EB)",
790	  WM_T_82571,		WMP_F_1000T },
791
792	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
793	  "Intel i82572EI 1000baseT Ethernet",
794	  WM_T_82572,		WMP_F_1000T },
795
796	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
797	  "Intel PRO/1000 PT Quad Port Server Adapter",
798	  WM_T_82571,		WMP_F_1000T, },
799
800	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
801	  "Intel i82572EI 1000baseX Ethernet",
802	  WM_T_82572,		WMP_F_1000X },
803#if 0
804	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
805	  "Intel i82572EI Gigabit Ethernet (SERDES)",
806	  WM_T_82572,		WMP_F_SERDES },
807#endif
808
809	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
810	  "Intel i82572EI 1000baseT Ethernet",
811	  WM_T_82572,		WMP_F_1000T },
812
813	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
814	  "Intel i82573E",
815	  WM_T_82573,		WMP_F_1000T },
816
817	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
818	  "Intel i82573E IAMT",
819	  WM_T_82573,		WMP_F_1000T },
820
821	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
822	  "Intel i82573L Gigabit Ethernet",
823	  WM_T_82573,		WMP_F_1000T },
824
825	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
826	  "Intel i82574L",
827	  WM_T_82574,		WMP_F_1000T },
828
829	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
830	  "Intel i82583V",
831	  WM_T_82583,		WMP_F_1000T },
832
833	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
834	  "i80003 dual 1000baseT Ethernet",
835	  WM_T_80003,		WMP_F_1000T },
836
837	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
838	  "i80003 dual 1000baseX Ethernet",
839	  WM_T_80003,		WMP_F_1000T },
840#if 0
841	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
842	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
843	  WM_T_80003,		WMP_F_SERDES },
844#endif
845
846	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
847	  "Intel i80003 1000baseT Ethernet",
848	  WM_T_80003,		WMP_F_1000T },
849#if 0
850	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
851	  "Intel i80003 Gigabit Ethernet (SERDES)",
852	  WM_T_80003,		WMP_F_SERDES },
853#endif
854	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
855	  "Intel i82801H (M_AMT) LAN Controller",
856	  WM_T_ICH8,		WMP_F_1000T },
857	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
858	  "Intel i82801H (AMT) LAN Controller",
859	  WM_T_ICH8,		WMP_F_1000T },
860	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
861	  "Intel i82801H LAN Controller",
862	  WM_T_ICH8,		WMP_F_1000T },
863	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
864	  "Intel i82801H (IFE) LAN Controller",
865	  WM_T_ICH8,		WMP_F_1000T },
866	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
867	  "Intel i82801H (M) LAN Controller",
868	  WM_T_ICH8,		WMP_F_1000T },
869	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
870	  "Intel i82801H IFE (GT) LAN Controller",
871	  WM_T_ICH8,		WMP_F_1000T },
872	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
873	  "Intel i82801H IFE (G) LAN Controller",
874	  WM_T_ICH8,		WMP_F_1000T },
875	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
876	  "82801I (AMT) LAN Controller",
877	  WM_T_ICH9,		WMP_F_1000T },
878	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
879	  "82801I LAN Controller",
880	  WM_T_ICH9,		WMP_F_1000T },
881	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
882	  "82801I (G) LAN Controller",
883	  WM_T_ICH9,		WMP_F_1000T },
884	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
885	  "82801I (GT) LAN Controller",
886	  WM_T_ICH9,		WMP_F_1000T },
887	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
888	  "82801I (C) LAN Controller",
889	  WM_T_ICH9,		WMP_F_1000T },
890	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
891	  "82801I mobile LAN Controller",
892	  WM_T_ICH9,		WMP_F_1000T },
893	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
894	  "82801I mobile (V) LAN Controller",
895	  WM_T_ICH9,		WMP_F_1000T },
896	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
897	  "82801I mobile (AMT) LAN Controller",
898	  WM_T_ICH9,		WMP_F_1000T },
899	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
900	  "82567LM-4 LAN Controller",
901	  WM_T_ICH9,		WMP_F_1000T },
902	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
903	  "82567V-3 LAN Controller",
904	  WM_T_ICH9,		WMP_F_1000T },
905	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
906	  "82567LM-2 LAN Controller",
907	  WM_T_ICH10,		WMP_F_1000T },
908	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
909	  "82567LF-2 LAN Controller",
910	  WM_T_ICH10,		WMP_F_1000T },
911	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
912	  "82567LM-3 LAN Controller",
913	  WM_T_ICH10,		WMP_F_1000T },
914	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
915	  "82567LF-3 LAN Controller",
916	  WM_T_ICH10,		WMP_F_1000T },
917	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
918	  "82567V-2 LAN Controller",
919	  WM_T_ICH10,		WMP_F_1000T },
920	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
921	  "82567V-3? LAN Controller",
922	  WM_T_ICH10,		WMP_F_1000T },
923	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
924	  "HANKSVILLE LAN Controller",
925	  WM_T_ICH10,		WMP_F_1000T },
926	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
927	  "PCH LAN (82577LM) Controller",
928	  WM_T_PCH,		WMP_F_1000T },
929	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
930	  "PCH LAN (82577LC) Controller",
931	  WM_T_PCH,		WMP_F_1000T },
932	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
933	  "PCH LAN (82578DM) Controller",
934	  WM_T_PCH,		WMP_F_1000T },
935	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
936	  "PCH LAN (82578DC) Controller",
937	  WM_T_PCH,		WMP_F_1000T },
938	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
939	  "PCH2 LAN (82579LM) Controller",
940	  WM_T_PCH2,		WMP_F_1000T },
941	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
942	  "PCH2 LAN (82579V) Controller",
943	  WM_T_PCH2,		WMP_F_1000T },
944	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
945	  "82575EB dual-1000baseT Ethernet",
946	  WM_T_82575,		WMP_F_1000T },
947#if 0
948	/*
949	 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
950	 * disabled for now ...
951	 */
952	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
953	  "82575EB dual-1000baseX Ethernet (SERDES)",
954	  WM_T_82575,		WMP_F_SERDES },
955#endif
956	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
957	  "82575GB quad-1000baseT Ethernet",
958	  WM_T_82575,		WMP_F_1000T },
959	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
960	  "82575GB quad-1000baseT Ethernet (PM)",
961	  WM_T_82575,		WMP_F_1000T },
962	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
963	  "82576 1000BaseT Ethernet",
964	  WM_T_82576,		WMP_F_1000T },
965	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
966	  "82576 1000BaseX Ethernet",
967	  WM_T_82576,		WMP_F_1000X },
968#if 0
969	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
970	  "82576 gigabit Ethernet (SERDES)",
971	  WM_T_82576,		WMP_F_SERDES },
972#endif
973	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
974	  "82576 quad-1000BaseT Ethernet",
975	  WM_T_82576,		WMP_F_1000T },
976	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
977	  "82576 gigabit Ethernet",
978	  WM_T_82576,		WMP_F_1000T },
979#if 0
980	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
981	  "82576 gigabit Ethernet (SERDES)",
982	  WM_T_82576,		WMP_F_SERDES },
983	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
984	  "82576 quad-gigabit Ethernet (SERDES)",
985	  WM_T_82576,		WMP_F_SERDES },
986#endif
987	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
988	  "82580 1000BaseT Ethernet",
989	  WM_T_82580,		WMP_F_1000T },
990	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
991	  "82580 1000BaseX Ethernet",
992	  WM_T_82580,		WMP_F_1000X },
993#if 0
994	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
995	  "82580 1000BaseT Ethernet (SERDES)",
996	  WM_T_82580,		WMP_F_SERDES },
997#endif
998	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
999	  "82580 gigabit Ethernet (SGMII)",
1000	  WM_T_82580,		WMP_F_1000T },
1001	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1002	  "82580 dual-1000BaseT Ethernet",
1003	  WM_T_82580,		WMP_F_1000T },
1004	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER,
1005	  "82580 1000BaseT Ethernet",
1006	  WM_T_82580ER,		WMP_F_1000T },
1007	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER_DUAL,
1008	  "82580 dual-1000BaseT Ethernet",
1009	  WM_T_82580ER,		WMP_F_1000T },
1010	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1011	  "82580 quad-1000BaseX Ethernet",
1012	  WM_T_82580,		WMP_F_1000X },
1013	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1014	  "I350 Gigabit Network Connection",
1015	  WM_T_I350,		WMP_F_1000T },
1016	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1017	  "I350 Gigabit Fiber Network Connection",
1018	  WM_T_I350,		WMP_F_1000X },
1019#if 0
1020	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1021	  "I350 Gigabit Backplane Connection",
1022	  WM_T_I350,		WMP_F_SERDES },
1023	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1024	  "I350 Gigabit Connection",
1025	  WM_T_I350,		WMP_F_1000T },
1026#endif
1027	{ 0,			0,
1028	  NULL,
1029	  0,			0 },
1030};
1031
1032#ifdef WM_EVENT_COUNTERS
1033static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1034#endif /* WM_EVENT_COUNTERS */
1035
1036#if 0 /* Not currently used */
1037static inline uint32_t
1038wm_io_read(struct wm_softc *sc, int reg)
1039{
1040
1041	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1042	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1043}
1044#endif
1045
1046static inline void
1047wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1048{
1049
1050	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1051	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1052}
1053
1054static inline void
1055wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1056    uint32_t data)
1057{
1058	uint32_t regval;
1059	int i;
1060
1061	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1062
1063	CSR_WRITE(sc, reg, regval);
1064
1065	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1066		delay(5);
1067		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1068			break;
1069	}
1070	if (i == SCTL_CTL_POLL_TIMEOUT) {
1071		aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1072		    device_xname(sc->sc_dev), reg);
1073	}
1074}
1075
1076static inline void
1077wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1078{
1079	wa->wa_low = htole32(v & 0xffffffffU);
1080	if (sizeof(bus_addr_t) == 8)
1081		wa->wa_high = htole32((uint64_t) v >> 32);
1082	else
1083		wa->wa_high = 0;
1084}
1085
1086static void
1087wm_set_spiaddrbits(struct wm_softc *sc)
1088{
1089	uint32_t reg;
1090
1091	sc->sc_flags |= WM_F_EEPROM_SPI;
1092	reg = CSR_READ(sc, WMREG_EECD);
1093	sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1094}
1095
1096static const struct wm_product *
1097wm_lookup(const struct pci_attach_args *pa)
1098{
1099	const struct wm_product *wmp;
1100
1101	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1102		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1103		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1104			return wmp;
1105	}
1106	return NULL;
1107}
1108
1109static int
1110wm_match(device_t parent, cfdata_t cf, void *aux)
1111{
1112	struct pci_attach_args *pa = aux;
1113
1114	if (wm_lookup(pa) != NULL)
1115		return 1;
1116
1117	return 0;
1118}
1119
1120static void
1121wm_attach(device_t parent, device_t self, void *aux)
1122{
1123	struct wm_softc *sc = device_private(self);
1124	struct pci_attach_args *pa = aux;
1125	prop_dictionary_t dict;
1126	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1127	pci_chipset_tag_t pc = pa->pa_pc;
1128	pci_intr_handle_t ih;
1129	const char *intrstr = NULL;
1130	const char *eetype, *xname;
1131	bus_space_tag_t memt;
1132	bus_space_handle_t memh;
1133	bus_size_t memsize;
1134	int memh_valid;
1135	int i, error;
1136	const struct wm_product *wmp;
1137	prop_data_t ea;
1138	prop_number_t pn;
1139	uint8_t enaddr[ETHER_ADDR_LEN];
1140	uint16_t cfg1, cfg2, swdpin, io3;
1141	pcireg_t preg, memtype;
1142	uint16_t eeprom_data, apme_mask;
1143	uint32_t reg;
1144
1145	sc->sc_dev = self;
1146	callout_init(&sc->sc_tick_ch, 0);
1147
1148	sc->sc_wmp = wmp = wm_lookup(pa);
1149	if (wmp == NULL) {
1150		printf("\n");
1151		panic("wm_attach: impossible");
1152	}
1153
1154	sc->sc_pc = pa->pa_pc;
1155	sc->sc_pcitag = pa->pa_tag;
1156
1157	if (pci_dma64_available(pa))
1158		sc->sc_dmat = pa->pa_dmat64;
1159	else
1160		sc->sc_dmat = pa->pa_dmat;
1161
1162	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1163	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1164
1165	sc->sc_type = wmp->wmp_type;
1166	if (sc->sc_type < WM_T_82543) {
1167		if (sc->sc_rev < 2) {
1168			aprint_error_dev(sc->sc_dev,
1169			    "i82542 must be at least rev. 2\n");
1170			return;
1171		}
1172		if (sc->sc_rev < 3)
1173			sc->sc_type = WM_T_82542_2_0;
1174	}
1175
1176	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1177	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1178	    || (sc->sc_type == WM_T_I350))
1179		sc->sc_flags |= WM_F_NEWQUEUE;
1180
1181	/* Set device properties (mactype) */
1182	dict = device_properties(sc->sc_dev);
1183	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1184
1185	/*
1186	 * Map the device.  All devices support memory-mapped acccess,
1187	 * and it is really required for normal operation.
1188	 */
1189	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1190	switch (memtype) {
1191	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1192	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1193		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1194		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1195		break;
1196	default:
1197		memh_valid = 0;
1198		break;
1199	}
1200
1201	if (memh_valid) {
1202		sc->sc_st = memt;
1203		sc->sc_sh = memh;
1204		sc->sc_ss = memsize;
1205	} else {
1206		aprint_error_dev(sc->sc_dev,
1207		    "unable to map device registers\n");
1208		return;
1209	}
1210
1211	wm_get_wakeup(sc);
1212
1213	/*
1214	 * In addition, i82544 and later support I/O mapped indirect
1215	 * register access.  It is not desirable (nor supported in
1216	 * this driver) to use it for normal operation, though it is
1217	 * required to work around bugs in some chip versions.
1218	 */
1219	if (sc->sc_type >= WM_T_82544) {
1220		/* First we have to find the I/O BAR. */
1221		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1222			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1223			if (memtype == PCI_MAPREG_TYPE_IO)
1224				break;
1225			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1226			    PCI_MAPREG_MEM_TYPE_64BIT)
1227				i += 4;	/* skip high bits, too */
1228		}
1229		if (i < PCI_MAPREG_END) {
1230			/*
1231			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1232			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1233			 * It's no problem because newer chips has no this
1234			 * bug.
1235			 *
1236			 * The i8254x doesn't apparently respond when the
1237			 * I/O BAR is 0, which looks somewhat like it's not
1238			 * been configured.
1239			 */
1240			preg = pci_conf_read(pc, pa->pa_tag, i);
1241			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1242				aprint_error_dev(sc->sc_dev,
1243				    "WARNING: I/O BAR at zero.\n");
1244			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1245					0, &sc->sc_iot, &sc->sc_ioh,
1246					NULL, &sc->sc_ios) == 0) {
1247				sc->sc_flags |= WM_F_IOH_VALID;
1248			} else {
1249				aprint_error_dev(sc->sc_dev,
1250				    "WARNING: unable to map I/O space\n");
1251			}
1252		}
1253
1254	}
1255
1256	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1257	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1258	preg |= PCI_COMMAND_MASTER_ENABLE;
1259	if (sc->sc_type < WM_T_82542_2_1)
1260		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1261	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1262
1263	/* power up chip */
1264	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1265	    NULL)) && error != EOPNOTSUPP) {
1266		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1267		return;
1268	}
1269
1270	/*
1271	 * Map and establish our interrupt.
1272	 */
1273	if (pci_intr_map(pa, &ih)) {
1274		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1275		return;
1276	}
1277	intrstr = pci_intr_string(pc, ih);
1278	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1279	if (sc->sc_ih == NULL) {
1280		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1281		if (intrstr != NULL)
1282			aprint_error(" at %s", intrstr);
1283		aprint_error("\n");
1284		return;
1285	}
1286	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1287
1288	/*
1289	 * Check the function ID (unit number of the chip).
1290	 */
1291	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1292	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1293	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1294	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1295	    || (sc->sc_type == WM_T_I350))
1296		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1297		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1298	else
1299		sc->sc_funcid = 0;
1300
1301	/*
1302	 * Determine a few things about the bus we're connected to.
1303	 */
1304	if (sc->sc_type < WM_T_82543) {
1305		/* We don't really know the bus characteristics here. */
1306		sc->sc_bus_speed = 33;
1307	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1308		/*
1309		 * CSA (Communication Streaming Architecture) is about as fast
1310		 * a 32-bit 66MHz PCI Bus.
1311		 */
1312		sc->sc_flags |= WM_F_CSA;
1313		sc->sc_bus_speed = 66;
1314		aprint_verbose_dev(sc->sc_dev,
1315		    "Communication Streaming Architecture\n");
1316		if (sc->sc_type == WM_T_82547) {
1317			callout_init(&sc->sc_txfifo_ch, 0);
1318			callout_setfunc(&sc->sc_txfifo_ch,
1319					wm_82547_txfifo_stall, sc);
1320			aprint_verbose_dev(sc->sc_dev,
1321			    "using 82547 Tx FIFO stall work-around\n");
1322		}
1323	} else if (sc->sc_type >= WM_T_82571) {
1324		sc->sc_flags |= WM_F_PCIE;
1325		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1326		    && (sc->sc_type != WM_T_ICH10)
1327		    && (sc->sc_type != WM_T_PCH)
1328		    && (sc->sc_type != WM_T_PCH2)) {
1329			sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1330			/* ICH* and PCH* have no PCIe capability registers */
1331			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1332				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1333				NULL) == 0)
1334				aprint_error_dev(sc->sc_dev,
1335				    "unable to find PCIe capability\n");
1336		}
1337		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1338	} else {
1339		reg = CSR_READ(sc, WMREG_STATUS);
1340		if (reg & STATUS_BUS64)
1341			sc->sc_flags |= WM_F_BUS64;
1342		if ((reg & STATUS_PCIX_MODE) != 0) {
1343			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1344
1345			sc->sc_flags |= WM_F_PCIX;
1346			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1347				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1348				aprint_error_dev(sc->sc_dev,
1349				    "unable to find PCIX capability\n");
1350			else if (sc->sc_type != WM_T_82545_3 &&
1351				 sc->sc_type != WM_T_82546_3) {
1352				/*
1353				 * Work around a problem caused by the BIOS
1354				 * setting the max memory read byte count
1355				 * incorrectly.
1356				 */
1357				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1358				    sc->sc_pcixe_capoff + PCI_PCIX_CMD);
1359				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1360				    sc->sc_pcixe_capoff + PCI_PCIX_STATUS);
1361
1362				bytecnt =
1363				    (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1364				    PCI_PCIX_CMD_BYTECNT_SHIFT;
1365				maxb =
1366				    (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1367				    PCI_PCIX_STATUS_MAXB_SHIFT;
1368				if (bytecnt > maxb) {
1369					aprint_verbose_dev(sc->sc_dev,
1370					    "resetting PCI-X MMRBC: %d -> %d\n",
1371					    512 << bytecnt, 512 << maxb);
1372					pcix_cmd = (pcix_cmd &
1373					    ~PCI_PCIX_CMD_BYTECNT_MASK) |
1374					   (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1375					pci_conf_write(pa->pa_pc, pa->pa_tag,
1376					    sc->sc_pcixe_capoff + PCI_PCIX_CMD,
1377					    pcix_cmd);
1378				}
1379			}
1380		}
1381		/*
1382		 * The quad port adapter is special; it has a PCIX-PCIX
1383		 * bridge on the board, and can run the secondary bus at
1384		 * a higher speed.
1385		 */
1386		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1387			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1388								      : 66;
1389		} else if (sc->sc_flags & WM_F_PCIX) {
1390			switch (reg & STATUS_PCIXSPD_MASK) {
1391			case STATUS_PCIXSPD_50_66:
1392				sc->sc_bus_speed = 66;
1393				break;
1394			case STATUS_PCIXSPD_66_100:
1395				sc->sc_bus_speed = 100;
1396				break;
1397			case STATUS_PCIXSPD_100_133:
1398				sc->sc_bus_speed = 133;
1399				break;
1400			default:
1401				aprint_error_dev(sc->sc_dev,
1402				    "unknown PCIXSPD %d; assuming 66MHz\n",
1403				    reg & STATUS_PCIXSPD_MASK);
1404				sc->sc_bus_speed = 66;
1405				break;
1406			}
1407		} else
1408			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1409		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1410		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1411		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1412	}
1413
1414	/*
1415	 * Allocate the control data structures, and create and load the
1416	 * DMA map for it.
1417	 *
1418	 * NOTE: All Tx descriptors must be in the same 4G segment of
1419	 * memory.  So must Rx descriptors.  We simplify by allocating
1420	 * both sets within the same 4G segment.
1421	 */
1422	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1423	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
1424	sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1425	    sizeof(struct wm_control_data_82542) :
1426	    sizeof(struct wm_control_data_82544);
1427	if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1428		    (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1429		    &sc->sc_cd_rseg, 0)) != 0) {
1430		aprint_error_dev(sc->sc_dev,
1431		    "unable to allocate control data, error = %d\n",
1432		    error);
1433		goto fail_0;
1434	}
1435
1436	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1437		    sc->sc_cd_rseg, sc->sc_cd_size,
1438		    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1439		aprint_error_dev(sc->sc_dev,
1440		    "unable to map control data, error = %d\n", error);
1441		goto fail_1;
1442	}
1443
1444	if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1445		    sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1446		aprint_error_dev(sc->sc_dev,
1447		    "unable to create control data DMA map, error = %d\n",
1448		    error);
1449		goto fail_2;
1450	}
1451
1452	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1453		    sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1454		aprint_error_dev(sc->sc_dev,
1455		    "unable to load control data DMA map, error = %d\n",
1456		    error);
1457		goto fail_3;
1458	}
1459
1460	/*
1461	 * Create the transmit buffer DMA maps.
1462	 */
1463	WM_TXQUEUELEN(sc) =
1464	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1465	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1466	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1467		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1468			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1469			    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1470			aprint_error_dev(sc->sc_dev,
1471			    "unable to create Tx DMA map %d, error = %d\n",
1472			    i, error);
1473			goto fail_4;
1474		}
1475	}
1476
1477	/*
1478	 * Create the receive buffer DMA maps.
1479	 */
1480	for (i = 0; i < WM_NRXDESC; i++) {
1481		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1482			    MCLBYTES, 0, 0,
1483			    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1484			aprint_error_dev(sc->sc_dev,
1485			    "unable to create Rx DMA map %d error = %d\n",
1486			    i, error);
1487			goto fail_5;
1488		}
1489		sc->sc_rxsoft[i].rxs_mbuf = NULL;
1490	}
1491
1492	/* clear interesting stat counters */
1493	CSR_READ(sc, WMREG_COLC);
1494	CSR_READ(sc, WMREG_RXERRC);
1495
1496	/* get PHY control from SMBus to PCIe */
1497	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2))
1498		wm_smbustopci(sc);
1499
1500	/*
1501	 * Reset the chip to a known state.
1502	 */
1503	wm_reset(sc);
1504
1505	switch (sc->sc_type) {
1506	case WM_T_82571:
1507	case WM_T_82572:
1508	case WM_T_82573:
1509	case WM_T_82574:
1510	case WM_T_82583:
1511	case WM_T_80003:
1512	case WM_T_ICH8:
1513	case WM_T_ICH9:
1514	case WM_T_ICH10:
1515	case WM_T_PCH:
1516	case WM_T_PCH2:
1517		if (wm_check_mng_mode(sc) != 0)
1518			wm_get_hw_control(sc);
1519		break;
1520	default:
1521		break;
1522	}
1523
1524	/*
1525	 * Get some information about the EEPROM.
1526	 */
1527	switch (sc->sc_type) {
1528	case WM_T_82542_2_0:
1529	case WM_T_82542_2_1:
1530	case WM_T_82543:
1531	case WM_T_82544:
1532		/* Microwire */
1533		sc->sc_ee_addrbits = 6;
1534		break;
1535	case WM_T_82540:
1536	case WM_T_82545:
1537	case WM_T_82545_3:
1538	case WM_T_82546:
1539	case WM_T_82546_3:
1540		/* Microwire */
1541		reg = CSR_READ(sc, WMREG_EECD);
1542		if (reg & EECD_EE_SIZE)
1543			sc->sc_ee_addrbits = 8;
1544		else
1545			sc->sc_ee_addrbits = 6;
1546		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1547		break;
1548	case WM_T_82541:
1549	case WM_T_82541_2:
1550	case WM_T_82547:
1551	case WM_T_82547_2:
1552		reg = CSR_READ(sc, WMREG_EECD);
1553		if (reg & EECD_EE_TYPE) {
1554			/* SPI */
1555			wm_set_spiaddrbits(sc);
1556		} else
1557			/* Microwire */
1558			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1559		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1560		break;
1561	case WM_T_82571:
1562	case WM_T_82572:
1563		/* SPI */
1564		wm_set_spiaddrbits(sc);
1565		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1566		break;
1567	case WM_T_82573:
1568	case WM_T_82574:
1569	case WM_T_82583:
1570		if (wm_is_onboard_nvm_eeprom(sc) == 0)
1571			sc->sc_flags |= WM_F_EEPROM_FLASH;
1572		else {
1573			/* SPI */
1574			wm_set_spiaddrbits(sc);
1575		}
1576		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1577		break;
1578	case WM_T_82575:
1579	case WM_T_82576:
1580	case WM_T_82580:
1581	case WM_T_82580ER:
1582	case WM_T_I350:
1583	case WM_T_80003:
1584		/* SPI */
1585		wm_set_spiaddrbits(sc);
1586		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1587		break;
1588	case WM_T_ICH8:
1589	case WM_T_ICH9:
1590	case WM_T_ICH10:
1591	case WM_T_PCH:
1592	case WM_T_PCH2:
1593		/* FLASH */
1594		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1595		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1596		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1597		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1598			aprint_error_dev(sc->sc_dev,
1599			    "can't map FLASH registers\n");
1600			return;
1601		}
1602		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1603		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1604						ICH_FLASH_SECTOR_SIZE;
1605		sc->sc_ich8_flash_bank_size =
1606		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1607		sc->sc_ich8_flash_bank_size -=
1608		    (reg & ICH_GFPREG_BASE_MASK);
1609		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1610		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1611		break;
1612	default:
1613		break;
1614	}
1615
1616	/*
1617	 * Defer printing the EEPROM type until after verifying the checksum
1618	 * This allows the EEPROM type to be printed correctly in the case
1619	 * that no EEPROM is attached.
1620	 */
1621	/*
1622	 * Validate the EEPROM checksum. If the checksum fails, flag
1623	 * this for later, so we can fail future reads from the EEPROM.
1624	 */
1625	if (wm_validate_eeprom_checksum(sc)) {
1626		/*
1627		 * Read twice again because some PCI-e parts fail the
1628		 * first check due to the link being in sleep state.
1629		 */
1630		if (wm_validate_eeprom_checksum(sc))
1631			sc->sc_flags |= WM_F_EEPROM_INVALID;
1632	}
1633
1634	/* Set device properties (macflags) */
1635	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1636
1637	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1638		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1639	else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1640		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1641	} else {
1642		if (sc->sc_flags & WM_F_EEPROM_SPI)
1643			eetype = "SPI";
1644		else
1645			eetype = "MicroWire";
1646		aprint_verbose_dev(sc->sc_dev,
1647		    "%u word (%d address bits) %s EEPROM\n",
1648		    1U << sc->sc_ee_addrbits,
1649		    sc->sc_ee_addrbits, eetype);
1650	}
1651
1652	/*
1653	 * Read the Ethernet address from the EEPROM, if not first found
1654	 * in device properties.
1655	 */
1656	ea = prop_dictionary_get(dict, "mac-address");
1657	if (ea != NULL) {
1658		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1659		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1660		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1661	} else {
1662		if (wm_read_mac_addr(sc, enaddr) != 0) {
1663			aprint_error_dev(sc->sc_dev,
1664			    "unable to read Ethernet address\n");
1665			return;
1666		}
1667	}
1668
1669	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1670	    ether_sprintf(enaddr));
1671
1672	/*
1673	 * Read the config info from the EEPROM, and set up various
1674	 * bits in the control registers based on their contents.
1675	 */
1676	pn = prop_dictionary_get(dict, "i82543-cfg1");
1677	if (pn != NULL) {
1678		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1679		cfg1 = (uint16_t) prop_number_integer_value(pn);
1680	} else {
1681		if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1682			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1683			return;
1684		}
1685	}
1686
1687	pn = prop_dictionary_get(dict, "i82543-cfg2");
1688	if (pn != NULL) {
1689		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1690		cfg2 = (uint16_t) prop_number_integer_value(pn);
1691	} else {
1692		if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1693			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1694			return;
1695		}
1696	}
1697
1698	/* check for WM_F_WOL */
1699	switch (sc->sc_type) {
1700	case WM_T_82542_2_0:
1701	case WM_T_82542_2_1:
1702	case WM_T_82543:
1703		/* dummy? */
1704		eeprom_data = 0;
1705		apme_mask = EEPROM_CFG3_APME;
1706		break;
1707	case WM_T_82544:
1708		apme_mask = EEPROM_CFG2_82544_APM_EN;
1709		eeprom_data = cfg2;
1710		break;
1711	case WM_T_82546:
1712	case WM_T_82546_3:
1713	case WM_T_82571:
1714	case WM_T_82572:
1715	case WM_T_82573:
1716	case WM_T_82574:
1717	case WM_T_82583:
1718	case WM_T_80003:
1719	default:
1720		apme_mask = EEPROM_CFG3_APME;
1721		wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1722		    : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1723		break;
1724	case WM_T_82575:
1725	case WM_T_82576:
1726	case WM_T_82580:
1727	case WM_T_82580ER:
1728	case WM_T_I350:
1729	case WM_T_ICH8:
1730	case WM_T_ICH9:
1731	case WM_T_ICH10:
1732	case WM_T_PCH:
1733	case WM_T_PCH2:
1734		/* XXX The funcid should be checked on some devices */
1735		apme_mask = WUC_APME;
1736		eeprom_data = CSR_READ(sc, WMREG_WUC);
1737		break;
1738	}
1739
1740	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1741	if ((eeprom_data & apme_mask) != 0)
1742		sc->sc_flags |= WM_F_WOL;
1743#ifdef WM_DEBUG
1744	if ((sc->sc_flags & WM_F_WOL) != 0)
1745		printf("WOL\n");
1746#endif
1747
1748	/*
1749	 * XXX need special handling for some multiple port cards
1750	 * to disable a paticular port.
1751	 */
1752
1753	if (sc->sc_type >= WM_T_82544) {
1754		pn = prop_dictionary_get(dict, "i82543-swdpin");
1755		if (pn != NULL) {
1756			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1757			swdpin = (uint16_t) prop_number_integer_value(pn);
1758		} else {
1759			if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1760				aprint_error_dev(sc->sc_dev,
1761				    "unable to read SWDPIN\n");
1762				return;
1763			}
1764		}
1765	}
1766
1767	if (cfg1 & EEPROM_CFG1_ILOS)
1768		sc->sc_ctrl |= CTRL_ILOS;
1769	if (sc->sc_type >= WM_T_82544) {
1770		sc->sc_ctrl |=
1771		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1772		    CTRL_SWDPIO_SHIFT;
1773		sc->sc_ctrl |=
1774		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1775		    CTRL_SWDPINS_SHIFT;
1776	} else {
1777		sc->sc_ctrl |=
1778		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1779		    CTRL_SWDPIO_SHIFT;
1780	}
1781
1782#if 0
1783	if (sc->sc_type >= WM_T_82544) {
1784		if (cfg1 & EEPROM_CFG1_IPS0)
1785			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1786		if (cfg1 & EEPROM_CFG1_IPS1)
1787			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1788		sc->sc_ctrl_ext |=
1789		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1790		    CTRL_EXT_SWDPIO_SHIFT;
1791		sc->sc_ctrl_ext |=
1792		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1793		    CTRL_EXT_SWDPINS_SHIFT;
1794	} else {
1795		sc->sc_ctrl_ext |=
1796		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1797		    CTRL_EXT_SWDPIO_SHIFT;
1798	}
1799#endif
1800
1801	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1802#if 0
1803	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1804#endif
1805
1806	/*
1807	 * Set up some register offsets that are different between
1808	 * the i82542 and the i82543 and later chips.
1809	 */
1810	if (sc->sc_type < WM_T_82543) {
1811		sc->sc_rdt_reg = WMREG_OLD_RDT0;
1812		sc->sc_tdt_reg = WMREG_OLD_TDT;
1813	} else {
1814		sc->sc_rdt_reg = WMREG_RDT;
1815		sc->sc_tdt_reg = WMREG_TDT;
1816	}
1817
1818	if (sc->sc_type == WM_T_PCH) {
1819		uint16_t val;
1820
1821		/* Save the NVM K1 bit setting */
1822		wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1823
1824		if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1825			sc->sc_nvm_k1_enabled = 1;
1826		else
1827			sc->sc_nvm_k1_enabled = 0;
1828	}
1829
1830	/*
1831	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1832	 * media structures accordingly.
1833	 */
1834	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1835	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1836	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_82573
1837	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1838		/* STATUS_TBIMODE reserved/reused, can't rely on it */
1839		wm_gmii_mediainit(sc, wmp->wmp_product);
1840	} else if (sc->sc_type < WM_T_82543 ||
1841	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1842		if (wmp->wmp_flags & WMP_F_1000T)
1843			aprint_error_dev(sc->sc_dev,
1844			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
1845		wm_tbi_mediainit(sc);
1846	} else {
1847		switch (sc->sc_type) {
1848		case WM_T_82575:
1849		case WM_T_82576:
1850		case WM_T_82580:
1851		case WM_T_82580ER:
1852		case WM_T_I350:
1853			reg = CSR_READ(sc, WMREG_CTRL_EXT);
1854			switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1855			case CTRL_EXT_LINK_MODE_SGMII:
1856				aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1857				sc->sc_flags |= WM_F_SGMII;
1858				CSR_WRITE(sc, WMREG_CTRL_EXT,
1859				    reg | CTRL_EXT_I2C_ENA);
1860				wm_gmii_mediainit(sc, wmp->wmp_product);
1861				break;
1862			case CTRL_EXT_LINK_MODE_1000KX:
1863			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1864				aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1865				CSR_WRITE(sc, WMREG_CTRL_EXT,
1866				    reg | CTRL_EXT_I2C_ENA);
1867				panic("not supported yet\n");
1868				break;
1869			case CTRL_EXT_LINK_MODE_GMII:
1870			default:
1871				CSR_WRITE(sc, WMREG_CTRL_EXT,
1872				    reg & ~CTRL_EXT_I2C_ENA);
1873				wm_gmii_mediainit(sc, wmp->wmp_product);
1874				break;
1875			}
1876			break;
1877		default:
1878			if (wmp->wmp_flags & WMP_F_1000X)
1879				aprint_error_dev(sc->sc_dev,
1880				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1881			wm_gmii_mediainit(sc, wmp->wmp_product);
1882		}
1883	}
1884
1885	ifp = &sc->sc_ethercom.ec_if;
1886	xname = device_xname(sc->sc_dev);
1887	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1888	ifp->if_softc = sc;
1889	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1890	ifp->if_ioctl = wm_ioctl;
1891	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
1892		ifp->if_start = wm_nq_start;
1893	else
1894		ifp->if_start = wm_start;
1895	ifp->if_watchdog = wm_watchdog;
1896	ifp->if_init = wm_init;
1897	ifp->if_stop = wm_stop;
1898	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1899	IFQ_SET_READY(&ifp->if_snd);
1900
1901	/* Check for jumbo frame */
1902	switch (sc->sc_type) {
1903	case WM_T_82573:
1904		/* XXX limited to 9234 if ASPM is disabled */
1905		wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1906		if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1907			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1908		break;
1909	case WM_T_82571:
1910	case WM_T_82572:
1911	case WM_T_82574:
1912	case WM_T_82575:
1913	case WM_T_82576:
1914	case WM_T_82580:
1915	case WM_T_82580ER:
1916	case WM_T_I350:
1917	case WM_T_80003:
1918	case WM_T_ICH9:
1919	case WM_T_ICH10:
1920	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
1921		/* XXX limited to 9234 */
1922		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1923		break;
1924	case WM_T_PCH:
1925		/* XXX limited to 4096 */
1926		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1927		break;
1928	case WM_T_82542_2_0:
1929	case WM_T_82542_2_1:
1930	case WM_T_82583:
1931	case WM_T_ICH8:
1932		/* No support for jumbo frame */
1933		break;
1934	default:
1935		/* ETHER_MAX_LEN_JUMBO */
1936		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1937		break;
1938	}
1939
1940	/*
1941	 * If we're a i82543 or greater, we can support VLANs.
1942	 */
1943	if (sc->sc_type >= WM_T_82543)
1944		sc->sc_ethercom.ec_capabilities |=
1945		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1946
1947	/*
1948	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
1949	 * on i82543 and later.
1950	 */
1951	if (sc->sc_type >= WM_T_82543) {
1952		ifp->if_capabilities |=
1953		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1954		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1955		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1956		    IFCAP_CSUM_TCPv6_Tx |
1957		    IFCAP_CSUM_UDPv6_Tx;
1958	}
1959
1960	/*
1961	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1962	 *
1963	 *	82541GI (8086:1076) ... no
1964	 *	82572EI (8086:10b9) ... yes
1965	 */
1966	if (sc->sc_type >= WM_T_82571) {
1967		ifp->if_capabilities |=
1968		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1969	}
1970
1971	/*
1972	 * If we're a i82544 or greater (except i82547), we can do
1973	 * TCP segmentation offload.
1974	 */
1975	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1976		ifp->if_capabilities |= IFCAP_TSOv4;
1977	}
1978
1979	if (sc->sc_type >= WM_T_82571) {
1980		ifp->if_capabilities |= IFCAP_TSOv6;
1981	}
1982
1983	/*
1984	 * Attach the interface.
1985	 */
1986	if_attach(ifp);
1987	ether_ifattach(ifp, enaddr);
1988	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
1989	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1990
1991#ifdef WM_EVENT_COUNTERS
1992	/* Attach event counters. */
1993	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1994	    NULL, xname, "txsstall");
1995	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1996	    NULL, xname, "txdstall");
1997	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1998	    NULL, xname, "txfifo_stall");
1999	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2000	    NULL, xname, "txdw");
2001	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2002	    NULL, xname, "txqe");
2003	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2004	    NULL, xname, "rxintr");
2005	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2006	    NULL, xname, "linkintr");
2007
2008	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2009	    NULL, xname, "rxipsum");
2010	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2011	    NULL, xname, "rxtusum");
2012	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2013	    NULL, xname, "txipsum");
2014	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2015	    NULL, xname, "txtusum");
2016	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2017	    NULL, xname, "txtusum6");
2018
2019	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2020	    NULL, xname, "txtso");
2021	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2022	    NULL, xname, "txtso6");
2023	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2024	    NULL, xname, "txtsopain");
2025
2026	for (i = 0; i < WM_NTXSEGS; i++) {
2027		sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
2028		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2029		    NULL, xname, wm_txseg_evcnt_names[i]);
2030	}
2031
2032	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2033	    NULL, xname, "txdrop");
2034
2035	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2036	    NULL, xname, "tu");
2037
2038	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2039	    NULL, xname, "tx_xoff");
2040	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2041	    NULL, xname, "tx_xon");
2042	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2043	    NULL, xname, "rx_xoff");
2044	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2045	    NULL, xname, "rx_xon");
2046	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2047	    NULL, xname, "rx_macctl");
2048#endif /* WM_EVENT_COUNTERS */
2049
2050	if (pmf_device_register(self, wm_suspend, wm_resume))
2051		pmf_class_network_register(self, ifp);
2052	else
2053		aprint_error_dev(self, "couldn't establish power handler\n");
2054
2055	return;
2056
2057	/*
2058	 * Free any resources we've allocated during the failed attach
2059	 * attempt.  Do this in reverse order and fall through.
2060	 */
2061 fail_5:
2062	for (i = 0; i < WM_NRXDESC; i++) {
2063		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2064			bus_dmamap_destroy(sc->sc_dmat,
2065			    sc->sc_rxsoft[i].rxs_dmamap);
2066	}
2067 fail_4:
2068	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2069		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2070			bus_dmamap_destroy(sc->sc_dmat,
2071			    sc->sc_txsoft[i].txs_dmamap);
2072	}
2073	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2074 fail_3:
2075	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2076 fail_2:
2077	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2078	    sc->sc_cd_size);
2079 fail_1:
2080	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2081 fail_0:
2082	return;
2083}
2084
2085static int
2086wm_detach(device_t self, int flags __unused)
2087{
2088	struct wm_softc *sc = device_private(self);
2089	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2090	int i, s;
2091
2092	s = splnet();
2093	/* Stop the interface. Callouts are stopped in it. */
2094	wm_stop(ifp, 1);
2095	splx(s);
2096
2097	pmf_device_deregister(self);
2098
2099	/* Tell the firmware about the release */
2100	wm_release_manageability(sc);
2101	wm_release_hw_control(sc);
2102
2103	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2104
2105	/* Delete all remaining media. */
2106	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2107
2108	ether_ifdetach(ifp);
2109	if_detach(ifp);
2110
2111
2112	/* Unload RX dmamaps and free mbufs */
2113	wm_rxdrain(sc);
2114
2115	/* Free dmamap. It's the same as the end of the wm_attach() function */
2116	for (i = 0; i < WM_NRXDESC; i++) {
2117		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2118			bus_dmamap_destroy(sc->sc_dmat,
2119			    sc->sc_rxsoft[i].rxs_dmamap);
2120	}
2121	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2122		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2123			bus_dmamap_destroy(sc->sc_dmat,
2124			    sc->sc_txsoft[i].txs_dmamap);
2125	}
2126	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2127	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2128	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2129	    sc->sc_cd_size);
2130	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2131
2132	/* Disestablish the interrupt handler */
2133	if (sc->sc_ih != NULL) {
2134		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2135		sc->sc_ih = NULL;
2136	}
2137
2138	/* Unmap the registers */
2139	if (sc->sc_ss) {
2140		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2141		sc->sc_ss = 0;
2142	}
2143
2144	if (sc->sc_ios) {
2145		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2146		sc->sc_ios = 0;
2147	}
2148
2149	return 0;
2150}
2151
2152/*
2153 * wm_tx_offload:
2154 *
2155 *	Set up TCP/IP checksumming parameters for the
2156 *	specified packet.
2157 */
2158static int
2159wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2160    uint8_t *fieldsp)
2161{
2162	struct mbuf *m0 = txs->txs_mbuf;
2163	struct livengood_tcpip_ctxdesc *t;
2164	uint32_t ipcs, tucs, cmd, cmdlen, seg;
2165	uint32_t ipcse;
2166	struct ether_header *eh;
2167	int offset, iphl;
2168	uint8_t fields;
2169
2170	/*
2171	 * XXX It would be nice if the mbuf pkthdr had offset
2172	 * fields for the protocol headers.
2173	 */
2174
2175	eh = mtod(m0, struct ether_header *);
2176	switch (htons(eh->ether_type)) {
2177	case ETHERTYPE_IP:
2178	case ETHERTYPE_IPV6:
2179		offset = ETHER_HDR_LEN;
2180		break;
2181
2182	case ETHERTYPE_VLAN:
2183		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2184		break;
2185
2186	default:
2187		/*
2188		 * Don't support this protocol or encapsulation.
2189		 */
2190		*fieldsp = 0;
2191		*cmdp = 0;
2192		return 0;
2193	}
2194
2195	if ((m0->m_pkthdr.csum_flags &
2196	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2197		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2198	} else {
2199		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2200	}
2201	ipcse = offset + iphl - 1;
2202
2203	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2204	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2205	seg = 0;
2206	fields = 0;
2207
2208	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2209		int hlen = offset + iphl;
2210		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2211
2212		if (__predict_false(m0->m_len <
2213				    (hlen + sizeof(struct tcphdr)))) {
2214			/*
2215			 * TCP/IP headers are not in the first mbuf; we need
2216			 * to do this the slow and painful way.  Let's just
2217			 * hope this doesn't happen very often.
2218			 */
2219			struct tcphdr th;
2220
2221			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2222
2223			m_copydata(m0, hlen, sizeof(th), &th);
2224			if (v4) {
2225				struct ip ip;
2226
2227				m_copydata(m0, offset, sizeof(ip), &ip);
2228				ip.ip_len = 0;
2229				m_copyback(m0,
2230				    offset + offsetof(struct ip, ip_len),
2231				    sizeof(ip.ip_len), &ip.ip_len);
2232				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2233				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2234			} else {
2235				struct ip6_hdr ip6;
2236
2237				m_copydata(m0, offset, sizeof(ip6), &ip6);
2238				ip6.ip6_plen = 0;
2239				m_copyback(m0,
2240				    offset + offsetof(struct ip6_hdr, ip6_plen),
2241				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2242				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2243				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2244			}
2245			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2246			    sizeof(th.th_sum), &th.th_sum);
2247
2248			hlen += th.th_off << 2;
2249		} else {
2250			/*
2251			 * TCP/IP headers are in the first mbuf; we can do
2252			 * this the easy way.
2253			 */
2254			struct tcphdr *th;
2255
2256			if (v4) {
2257				struct ip *ip =
2258				    (void *)(mtod(m0, char *) + offset);
2259				th = (void *)(mtod(m0, char *) + hlen);
2260
2261				ip->ip_len = 0;
2262				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2263				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2264			} else {
2265				struct ip6_hdr *ip6 =
2266				    (void *)(mtod(m0, char *) + offset);
2267				th = (void *)(mtod(m0, char *) + hlen);
2268
2269				ip6->ip6_plen = 0;
2270				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2271				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2272			}
2273			hlen += th->th_off << 2;
2274		}
2275
2276		if (v4) {
2277			WM_EVCNT_INCR(&sc->sc_ev_txtso);
2278			cmdlen |= WTX_TCPIP_CMD_IP;
2279		} else {
2280			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2281			ipcse = 0;
2282		}
2283		cmd |= WTX_TCPIP_CMD_TSE;
2284		cmdlen |= WTX_TCPIP_CMD_TSE |
2285		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2286		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2287		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2288	}
2289
2290	/*
2291	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2292	 * offload feature, if we load the context descriptor, we
2293	 * MUST provide valid values for IPCSS and TUCSS fields.
2294	 */
2295
2296	ipcs = WTX_TCPIP_IPCSS(offset) |
2297	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2298	    WTX_TCPIP_IPCSE(ipcse);
2299	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2300		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2301		fields |= WTX_IXSM;
2302	}
2303
2304	offset += iphl;
2305
2306	if (m0->m_pkthdr.csum_flags &
2307	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2308		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2309		fields |= WTX_TXSM;
2310		tucs = WTX_TCPIP_TUCSS(offset) |
2311		    WTX_TCPIP_TUCSO(offset +
2312		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2313		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2314	} else if ((m0->m_pkthdr.csum_flags &
2315	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2316		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2317		fields |= WTX_TXSM;
2318		tucs = WTX_TCPIP_TUCSS(offset) |
2319		    WTX_TCPIP_TUCSO(offset +
2320		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2321		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2322	} else {
2323		/* Just initialize it to a valid TCP context. */
2324		tucs = WTX_TCPIP_TUCSS(offset) |
2325		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2326		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2327	}
2328
2329	/* Fill in the context descriptor. */
2330	t = (struct livengood_tcpip_ctxdesc *)
2331	    &sc->sc_txdescs[sc->sc_txnext];
2332	t->tcpip_ipcs = htole32(ipcs);
2333	t->tcpip_tucs = htole32(tucs);
2334	t->tcpip_cmdlen = htole32(cmdlen);
2335	t->tcpip_seg = htole32(seg);
2336	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2337
2338	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2339	txs->txs_ndesc++;
2340
2341	*cmdp = cmd;
2342	*fieldsp = fields;
2343
2344	return 0;
2345}
2346
2347static void
2348wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2349{
2350	struct mbuf *m;
2351	int i;
2352
2353	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2354	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2355		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2356		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2357		    m->m_data, m->m_len, m->m_flags);
2358	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2359	    i, i == 1 ? "" : "s");
2360}
2361
2362/*
2363 * wm_82547_txfifo_stall:
2364 *
2365 *	Callout used to wait for the 82547 Tx FIFO to drain,
2366 *	reset the FIFO pointers, and restart packet transmission.
2367 */
2368static void
2369wm_82547_txfifo_stall(void *arg)
2370{
2371	struct wm_softc *sc = arg;
2372	int s;
2373
2374	s = splnet();
2375
2376	if (sc->sc_txfifo_stall) {
2377		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2378		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2379		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2380			/*
2381			 * Packets have drained.  Stop transmitter, reset
2382			 * FIFO pointers, restart transmitter, and kick
2383			 * the packet queue.
2384			 */
2385			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2386			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2387			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2388			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2389			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2390			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2391			CSR_WRITE(sc, WMREG_TCTL, tctl);
2392			CSR_WRITE_FLUSH(sc);
2393
2394			sc->sc_txfifo_head = 0;
2395			sc->sc_txfifo_stall = 0;
2396			wm_start(&sc->sc_ethercom.ec_if);
2397		} else {
2398			/*
2399			 * Still waiting for packets to drain; try again in
2400			 * another tick.
2401			 */
2402			callout_schedule(&sc->sc_txfifo_ch, 1);
2403		}
2404	}
2405
2406	splx(s);
2407}
2408
2409static void
2410wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
2411{
2412	uint32_t reg;
2413
2414	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
2415
2416	if (on != 0)
2417		reg |= EXTCNFCTR_GATE_PHY_CFG;
2418	else
2419		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
2420
2421	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
2422}
2423
2424/*
2425 * wm_82547_txfifo_bugchk:
2426 *
2427 *	Check for bug condition in the 82547 Tx FIFO.  We need to
2428 *	prevent enqueueing a packet that would wrap around the end
2429 *	if the Tx FIFO ring buffer, otherwise the chip will croak.
2430 *
2431 *	We do this by checking the amount of space before the end
2432 *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
2433 *	the Tx FIFO, wait for all remaining packets to drain, reset
2434 *	the internal FIFO pointers to the beginning, and restart
2435 *	transmission on the interface.
2436 */
2437#define	WM_FIFO_HDR		0x10
2438#define	WM_82547_PAD_LEN	0x3e0
2439static int
2440wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2441{
2442	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2443	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2444
2445	/* Just return if already stalled. */
2446	if (sc->sc_txfifo_stall)
2447		return 1;
2448
2449	if (sc->sc_mii.mii_media_active & IFM_FDX) {
2450		/* Stall only occurs in half-duplex mode. */
2451		goto send_packet;
2452	}
2453
2454	if (len >= WM_82547_PAD_LEN + space) {
2455		sc->sc_txfifo_stall = 1;
2456		callout_schedule(&sc->sc_txfifo_ch, 1);
2457		return 1;
2458	}
2459
2460 send_packet:
2461	sc->sc_txfifo_head += len;
2462	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2463		sc->sc_txfifo_head -= sc->sc_txfifo_size;
2464
2465	return 0;
2466}
2467
2468/*
2469 * wm_start:		[ifnet interface function]
2470 *
2471 *	Start packet transmission on the interface.
2472 */
2473static void
2474wm_start(struct ifnet *ifp)
2475{
2476	struct wm_softc *sc = ifp->if_softc;
2477	struct mbuf *m0;
2478	struct m_tag *mtag;
2479	struct wm_txsoft *txs;
2480	bus_dmamap_t dmamap;
2481	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2482	bus_addr_t curaddr;
2483	bus_size_t seglen, curlen;
2484	uint32_t cksumcmd;
2485	uint8_t cksumfields;
2486
2487	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2488		return;
2489
2490	/*
2491	 * Remember the previous number of free descriptors.
2492	 */
2493	ofree = sc->sc_txfree;
2494
2495	/*
2496	 * Loop through the send queue, setting up transmit descriptors
2497	 * until we drain the queue, or use up all available transmit
2498	 * descriptors.
2499	 */
2500	for (;;) {
2501		/* Grab a packet off the queue. */
2502		IFQ_POLL(&ifp->if_snd, m0);
2503		if (m0 == NULL)
2504			break;
2505
2506		DPRINTF(WM_DEBUG_TX,
2507		    ("%s: TX: have packet to transmit: %p\n",
2508		    device_xname(sc->sc_dev), m0));
2509
2510		/* Get a work queue entry. */
2511		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2512			wm_txintr(sc);
2513			if (sc->sc_txsfree == 0) {
2514				DPRINTF(WM_DEBUG_TX,
2515				    ("%s: TX: no free job descriptors\n",
2516					device_xname(sc->sc_dev)));
2517				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2518				break;
2519			}
2520		}
2521
2522		txs = &sc->sc_txsoft[sc->sc_txsnext];
2523		dmamap = txs->txs_dmamap;
2524
2525		use_tso = (m0->m_pkthdr.csum_flags &
2526		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2527
2528		/*
2529		 * So says the Linux driver:
2530		 * The controller does a simple calculation to make sure
2531		 * there is enough room in the FIFO before initiating the
2532		 * DMA for each buffer.  The calc is:
2533		 *	4 = ceil(buffer len / MSS)
2534		 * To make sure we don't overrun the FIFO, adjust the max
2535		 * buffer len if the MSS drops.
2536		 */
2537		dmamap->dm_maxsegsz =
2538		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2539		    ? m0->m_pkthdr.segsz << 2
2540		    : WTX_MAX_LEN;
2541
2542		/*
2543		 * Load the DMA map.  If this fails, the packet either
2544		 * didn't fit in the allotted number of segments, or we
2545		 * were short on resources.  For the too-many-segments
2546		 * case, we simply report an error and drop the packet,
2547		 * since we can't sanely copy a jumbo packet to a single
2548		 * buffer.
2549		 */
2550		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2551		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2552		if (error) {
2553			if (error == EFBIG) {
2554				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2555				log(LOG_ERR, "%s: Tx packet consumes too many "
2556				    "DMA segments, dropping...\n",
2557				    device_xname(sc->sc_dev));
2558				IFQ_DEQUEUE(&ifp->if_snd, m0);
2559				wm_dump_mbuf_chain(sc, m0);
2560				m_freem(m0);
2561				continue;
2562			}
2563			/*
2564			 * Short on resources, just stop for now.
2565			 */
2566			DPRINTF(WM_DEBUG_TX,
2567			    ("%s: TX: dmamap load failed: %d\n",
2568			    device_xname(sc->sc_dev), error));
2569			break;
2570		}
2571
2572		segs_needed = dmamap->dm_nsegs;
2573		if (use_tso) {
2574			/* For sentinel descriptor; see below. */
2575			segs_needed++;
2576		}
2577
2578		/*
2579		 * Ensure we have enough descriptors free to describe
2580		 * the packet.  Note, we always reserve one descriptor
2581		 * at the end of the ring due to the semantics of the
2582		 * TDT register, plus one more in the event we need
2583		 * to load offload context.
2584		 */
2585		if (segs_needed > sc->sc_txfree - 2) {
2586			/*
2587			 * Not enough free descriptors to transmit this
2588			 * packet.  We haven't committed anything yet,
2589			 * so just unload the DMA map, put the packet
2590			 * pack on the queue, and punt.  Notify the upper
2591			 * layer that there are no more slots left.
2592			 */
2593			DPRINTF(WM_DEBUG_TX,
2594			    ("%s: TX: need %d (%d) descriptors, have %d\n",
2595			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
2596			    segs_needed, sc->sc_txfree - 1));
2597			ifp->if_flags |= IFF_OACTIVE;
2598			bus_dmamap_unload(sc->sc_dmat, dmamap);
2599			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2600			break;
2601		}
2602
2603		/*
2604		 * Check for 82547 Tx FIFO bug.  We need to do this
2605		 * once we know we can transmit the packet, since we
2606		 * do some internal FIFO space accounting here.
2607		 */
2608		if (sc->sc_type == WM_T_82547 &&
2609		    wm_82547_txfifo_bugchk(sc, m0)) {
2610			DPRINTF(WM_DEBUG_TX,
2611			    ("%s: TX: 82547 Tx FIFO bug detected\n",
2612			    device_xname(sc->sc_dev)));
2613			ifp->if_flags |= IFF_OACTIVE;
2614			bus_dmamap_unload(sc->sc_dmat, dmamap);
2615			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2616			break;
2617		}
2618
2619		IFQ_DEQUEUE(&ifp->if_snd, m0);
2620
2621		/*
2622		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2623		 */
2624
2625		DPRINTF(WM_DEBUG_TX,
2626		    ("%s: TX: packet has %d (%d) DMA segments\n",
2627		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2628
2629		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2630
2631		/*
2632		 * Store a pointer to the packet so that we can free it
2633		 * later.
2634		 *
2635		 * Initially, we consider the number of descriptors the
2636		 * packet uses the number of DMA segments.  This may be
2637		 * incremented by 1 if we do checksum offload (a descriptor
2638		 * is used to set the checksum context).
2639		 */
2640		txs->txs_mbuf = m0;
2641		txs->txs_firstdesc = sc->sc_txnext;
2642		txs->txs_ndesc = segs_needed;
2643
2644		/* Set up offload parameters for this packet. */
2645		if (m0->m_pkthdr.csum_flags &
2646		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
2647		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2648		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2649			if (wm_tx_offload(sc, txs, &cksumcmd,
2650					  &cksumfields) != 0) {
2651				/* Error message already displayed. */
2652				bus_dmamap_unload(sc->sc_dmat, dmamap);
2653				continue;
2654			}
2655		} else {
2656			cksumcmd = 0;
2657			cksumfields = 0;
2658		}
2659
2660		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2661
2662		/* Sync the DMA map. */
2663		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2664		    BUS_DMASYNC_PREWRITE);
2665
2666		/*
2667		 * Initialize the transmit descriptor.
2668		 */
2669		for (nexttx = sc->sc_txnext, seg = 0;
2670		     seg < dmamap->dm_nsegs; seg++) {
2671			for (seglen = dmamap->dm_segs[seg].ds_len,
2672			     curaddr = dmamap->dm_segs[seg].ds_addr;
2673			     seglen != 0;
2674			     curaddr += curlen, seglen -= curlen,
2675			     nexttx = WM_NEXTTX(sc, nexttx)) {
2676				curlen = seglen;
2677
2678				/*
2679				 * So says the Linux driver:
2680				 * Work around for premature descriptor
2681				 * write-backs in TSO mode.  Append a
2682				 * 4-byte sentinel descriptor.
2683				 */
2684				if (use_tso &&
2685				    seg == dmamap->dm_nsegs - 1 &&
2686				    curlen > 8)
2687					curlen -= 4;
2688
2689				wm_set_dma_addr(
2690				    &sc->sc_txdescs[nexttx].wtx_addr,
2691				    curaddr);
2692				sc->sc_txdescs[nexttx].wtx_cmdlen =
2693				    htole32(cksumcmd | curlen);
2694				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2695				    0;
2696				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2697				    cksumfields;
2698				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2699				lasttx = nexttx;
2700
2701				DPRINTF(WM_DEBUG_TX,
2702				    ("%s: TX: desc %d: low %#" PRIx64 ", "
2703				     "len %#04zx\n",
2704				    device_xname(sc->sc_dev), nexttx,
2705				    (uint64_t)curaddr, curlen));
2706			}
2707		}
2708
2709		KASSERT(lasttx != -1);
2710
2711		/*
2712		 * Set up the command byte on the last descriptor of
2713		 * the packet.  If we're in the interrupt delay window,
2714		 * delay the interrupt.
2715		 */
2716		sc->sc_txdescs[lasttx].wtx_cmdlen |=
2717		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
2718
2719		/*
2720		 * If VLANs are enabled and the packet has a VLAN tag, set
2721		 * up the descriptor to encapsulate the packet for us.
2722		 *
2723		 * This is only valid on the last descriptor of the packet.
2724		 */
2725		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2726			sc->sc_txdescs[lasttx].wtx_cmdlen |=
2727			    htole32(WTX_CMD_VLE);
2728			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2729			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2730		}
2731
2732		txs->txs_lastdesc = lasttx;
2733
2734		DPRINTF(WM_DEBUG_TX,
2735		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
2736		    device_xname(sc->sc_dev),
2737		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2738
2739		/* Sync the descriptors we're using. */
2740		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2741		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2742
2743		/* Give the packet to the chip. */
2744		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2745
2746		DPRINTF(WM_DEBUG_TX,
2747		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2748
2749		DPRINTF(WM_DEBUG_TX,
2750		    ("%s: TX: finished transmitting packet, job %d\n",
2751		    device_xname(sc->sc_dev), sc->sc_txsnext));
2752
2753		/* Advance the tx pointer. */
2754		sc->sc_txfree -= txs->txs_ndesc;
2755		sc->sc_txnext = nexttx;
2756
2757		sc->sc_txsfree--;
2758		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2759
2760		/* Pass the packet to any BPF listeners. */
2761		bpf_mtap(ifp, m0);
2762	}
2763
2764	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2765		/* No more slots; notify upper layer. */
2766		ifp->if_flags |= IFF_OACTIVE;
2767	}
2768
2769	if (sc->sc_txfree != ofree) {
2770		/* Set a watchdog timer in case the chip flakes out. */
2771		ifp->if_timer = 5;
2772	}
2773}
2774
2775/*
2776 * wm_nq_tx_offload:
2777 *
2778 *	Set up TCP/IP checksumming parameters for the
2779 *	specified packet, for NEWQUEUE devices
2780 */
2781static int
2782wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
2783    uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
2784{
2785	struct mbuf *m0 = txs->txs_mbuf;
2786	struct m_tag *mtag;
2787	uint32_t vl_len, mssidx, cmdc;
2788	struct ether_header *eh;
2789	int offset, iphl;
2790
2791	/*
2792	 * XXX It would be nice if the mbuf pkthdr had offset
2793	 * fields for the protocol headers.
2794	 */
2795	*cmdlenp = 0;
2796	*fieldsp = 0;
2797
2798	eh = mtod(m0, struct ether_header *);
2799	switch (htons(eh->ether_type)) {
2800	case ETHERTYPE_IP:
2801	case ETHERTYPE_IPV6:
2802		offset = ETHER_HDR_LEN;
2803		break;
2804
2805	case ETHERTYPE_VLAN:
2806		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2807		break;
2808
2809	default:
2810		/*
2811		 * Don't support this protocol or encapsulation.
2812		 */
2813		*do_csum = false;
2814		return 0;
2815	}
2816	*do_csum = true;
2817	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
2818	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
2819
2820	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
2821	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
2822
2823	if ((m0->m_pkthdr.csum_flags &
2824	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
2825		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2826	} else {
2827		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2828	}
2829	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
2830	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
2831
2832	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2833		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
2834		     << NQTXC_VLLEN_VLAN_SHIFT);
2835		*cmdlenp |= NQTX_CMD_VLE;
2836	}
2837
2838	mssidx = 0;
2839
2840	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2841		int hlen = offset + iphl;
2842		int tcp_hlen;
2843		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2844
2845		if (__predict_false(m0->m_len <
2846				    (hlen + sizeof(struct tcphdr)))) {
2847			/*
2848			 * TCP/IP headers are not in the first mbuf; we need
2849			 * to do this the slow and painful way.  Let's just
2850			 * hope this doesn't happen very often.
2851			 */
2852			struct tcphdr th;
2853
2854			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2855
2856			m_copydata(m0, hlen, sizeof(th), &th);
2857			if (v4) {
2858				struct ip ip;
2859
2860				m_copydata(m0, offset, sizeof(ip), &ip);
2861				ip.ip_len = 0;
2862				m_copyback(m0,
2863				    offset + offsetof(struct ip, ip_len),
2864				    sizeof(ip.ip_len), &ip.ip_len);
2865				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2866				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2867			} else {
2868				struct ip6_hdr ip6;
2869
2870				m_copydata(m0, offset, sizeof(ip6), &ip6);
2871				ip6.ip6_plen = 0;
2872				m_copyback(m0,
2873				    offset + offsetof(struct ip6_hdr, ip6_plen),
2874				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2875				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2876				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2877			}
2878			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2879			    sizeof(th.th_sum), &th.th_sum);
2880
2881			tcp_hlen = th.th_off << 2;
2882		} else {
2883			/*
2884			 * TCP/IP headers are in the first mbuf; we can do
2885			 * this the easy way.
2886			 */
2887			struct tcphdr *th;
2888
2889			if (v4) {
2890				struct ip *ip =
2891				    (void *)(mtod(m0, char *) + offset);
2892				th = (void *)(mtod(m0, char *) + hlen);
2893
2894				ip->ip_len = 0;
2895				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2896				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2897			} else {
2898				struct ip6_hdr *ip6 =
2899				    (void *)(mtod(m0, char *) + offset);
2900				th = (void *)(mtod(m0, char *) + hlen);
2901
2902				ip6->ip6_plen = 0;
2903				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2904				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2905			}
2906			tcp_hlen = th->th_off << 2;
2907		}
2908		hlen += tcp_hlen;
2909		*cmdlenp |= NQTX_CMD_TSE;
2910
2911		if (v4) {
2912			WM_EVCNT_INCR(&sc->sc_ev_txtso);
2913			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
2914		} else {
2915			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2916			*fieldsp |= NQTXD_FIELDS_TUXSM;
2917		}
2918		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
2919		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2920		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
2921		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
2922		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
2923		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
2924	} else {
2925		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
2926		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2927	}
2928
2929	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2930		*fieldsp |= NQTXD_FIELDS_IXSM;
2931		cmdc |= NQTXC_CMD_IP4;
2932	}
2933
2934	if (m0->m_pkthdr.csum_flags &
2935	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
2936		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2937		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
2938			cmdc |= NQTXC_CMD_TCP;
2939		} else {
2940			cmdc |= NQTXC_CMD_UDP;
2941		}
2942		cmdc |= NQTXC_CMD_IP4;
2943		*fieldsp |= NQTXD_FIELDS_TUXSM;
2944	}
2945	if (m0->m_pkthdr.csum_flags &
2946	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
2947		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2948		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
2949			cmdc |= NQTXC_CMD_TCP;
2950		} else {
2951			cmdc |= NQTXC_CMD_UDP;
2952		}
2953		cmdc |= NQTXC_CMD_IP6;
2954		*fieldsp |= NQTXD_FIELDS_TUXSM;
2955	}
2956
2957	/* Fill in the context descriptor. */
2958	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
2959	    htole32(vl_len);
2960	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
2961	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
2962	    htole32(cmdc);
2963	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
2964	    htole32(mssidx);
2965	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2966	DPRINTF(WM_DEBUG_TX,
2967	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
2968	    sc->sc_txnext, 0, vl_len));
2969	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
2970	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2971	txs->txs_ndesc++;
2972	return 0;
2973}
2974
2975/*
2976 * wm_nq_start:		[ifnet interface function]
2977 *
2978 *	Start packet transmission on the interface for NEWQUEUE devices
2979 */
2980static void
2981wm_nq_start(struct ifnet *ifp)
2982{
2983	struct wm_softc *sc = ifp->if_softc;
2984	struct mbuf *m0;
2985	struct m_tag *mtag;
2986	struct wm_txsoft *txs;
2987	bus_dmamap_t dmamap;
2988	int error, nexttx, lasttx = -1, seg, segs_needed;
2989	bool do_csum, sent;
2990
2991	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2992		return;
2993
2994	sent = false;
2995
2996	/*
2997	 * Loop through the send queue, setting up transmit descriptors
2998	 * until we drain the queue, or use up all available transmit
2999	 * descriptors.
3000	 */
3001	for (;;) {
3002		/* Grab a packet off the queue. */
3003		IFQ_POLL(&ifp->if_snd, m0);
3004		if (m0 == NULL)
3005			break;
3006
3007		DPRINTF(WM_DEBUG_TX,
3008		    ("%s: TX: have packet to transmit: %p\n",
3009		    device_xname(sc->sc_dev), m0));
3010
3011		/* Get a work queue entry. */
3012		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
3013			wm_txintr(sc);
3014			if (sc->sc_txsfree == 0) {
3015				DPRINTF(WM_DEBUG_TX,
3016				    ("%s: TX: no free job descriptors\n",
3017					device_xname(sc->sc_dev)));
3018				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
3019				break;
3020			}
3021		}
3022
3023		txs = &sc->sc_txsoft[sc->sc_txsnext];
3024		dmamap = txs->txs_dmamap;
3025
3026		/*
3027		 * Load the DMA map.  If this fails, the packet either
3028		 * didn't fit in the allotted number of segments, or we
3029		 * were short on resources.  For the too-many-segments
3030		 * case, we simply report an error and drop the packet,
3031		 * since we can't sanely copy a jumbo packet to a single
3032		 * buffer.
3033		 */
3034		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
3035		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
3036		if (error) {
3037			if (error == EFBIG) {
3038				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
3039				log(LOG_ERR, "%s: Tx packet consumes too many "
3040				    "DMA segments, dropping...\n",
3041				    device_xname(sc->sc_dev));
3042				IFQ_DEQUEUE(&ifp->if_snd, m0);
3043				wm_dump_mbuf_chain(sc, m0);
3044				m_freem(m0);
3045				continue;
3046			}
3047			/*
3048			 * Short on resources, just stop for now.
3049			 */
3050			DPRINTF(WM_DEBUG_TX,
3051			    ("%s: TX: dmamap load failed: %d\n",
3052			    device_xname(sc->sc_dev), error));
3053			break;
3054		}
3055
3056		segs_needed = dmamap->dm_nsegs;
3057
3058		/*
3059		 * Ensure we have enough descriptors free to describe
3060		 * the packet.  Note, we always reserve one descriptor
3061		 * at the end of the ring due to the semantics of the
3062		 * TDT register, plus one more in the event we need
3063		 * to load offload context.
3064		 */
3065		if (segs_needed > sc->sc_txfree - 2) {
3066			/*
3067			 * Not enough free descriptors to transmit this
3068			 * packet.  We haven't committed anything yet,
3069			 * so just unload the DMA map, put the packet
3070			 * pack on the queue, and punt.  Notify the upper
3071			 * layer that there are no more slots left.
3072			 */
3073			DPRINTF(WM_DEBUG_TX,
3074			    ("%s: TX: need %d (%d) descriptors, have %d\n",
3075			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
3076			    segs_needed, sc->sc_txfree - 1));
3077			ifp->if_flags |= IFF_OACTIVE;
3078			bus_dmamap_unload(sc->sc_dmat, dmamap);
3079			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
3080			break;
3081		}
3082
3083		IFQ_DEQUEUE(&ifp->if_snd, m0);
3084
3085		/*
3086		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
3087		 */
3088
3089		DPRINTF(WM_DEBUG_TX,
3090		    ("%s: TX: packet has %d (%d) DMA segments\n",
3091		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
3092
3093		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
3094
3095		/*
3096		 * Store a pointer to the packet so that we can free it
3097		 * later.
3098		 *
3099		 * Initially, we consider the number of descriptors the
3100		 * packet uses the number of DMA segments.  This may be
3101		 * incremented by 1 if we do checksum offload (a descriptor
3102		 * is used to set the checksum context).
3103		 */
3104		txs->txs_mbuf = m0;
3105		txs->txs_firstdesc = sc->sc_txnext;
3106		txs->txs_ndesc = segs_needed;
3107
3108		/* Set up offload parameters for this packet. */
3109		uint32_t cmdlen, fields, dcmdlen;
3110		if (m0->m_pkthdr.csum_flags &
3111		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
3112		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
3113		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3114			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
3115			    &do_csum) != 0) {
3116				/* Error message already displayed. */
3117				bus_dmamap_unload(sc->sc_dmat, dmamap);
3118				continue;
3119			}
3120		} else {
3121			do_csum = false;
3122			cmdlen = 0;
3123			fields = 0;
3124		}
3125
3126		/* Sync the DMA map. */
3127		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
3128		    BUS_DMASYNC_PREWRITE);
3129
3130		/*
3131		 * Initialize the first transmit descriptor.
3132		 */
3133		nexttx = sc->sc_txnext;
3134		if (!do_csum) {
3135			/* setup a legacy descriptor */
3136			wm_set_dma_addr(
3137			    &sc->sc_txdescs[nexttx].wtx_addr,
3138			    dmamap->dm_segs[0].ds_addr);
3139			sc->sc_txdescs[nexttx].wtx_cmdlen =
3140			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
3141			sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
3142			sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
3143			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
3144			    NULL) {
3145				sc->sc_txdescs[nexttx].wtx_cmdlen |=
3146				    htole32(WTX_CMD_VLE);
3147				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
3148				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
3149			} else {
3150				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
3151			}
3152			dcmdlen = 0;
3153		} else {
3154			/* setup an advanced data descriptor */
3155			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3156			    htole64(dmamap->dm_segs[0].ds_addr);
3157			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
3158			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3159			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
3160			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
3161			    htole32(fields);
3162			DPRINTF(WM_DEBUG_TX,
3163			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
3164			    device_xname(sc->sc_dev), nexttx,
3165			    (uint64_t)dmamap->dm_segs[0].ds_addr));
3166			DPRINTF(WM_DEBUG_TX,
3167			    ("\t 0x%08x%08x\n", fields,
3168			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
3169			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
3170		}
3171
3172		lasttx = nexttx;
3173		nexttx = WM_NEXTTX(sc, nexttx);
3174		/*
3175		 * fill in the next descriptors. legacy or adcanced format
3176		 * is the same here
3177		 */
3178		for (seg = 1; seg < dmamap->dm_nsegs;
3179		    seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
3180			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3181			    htole64(dmamap->dm_segs[seg].ds_addr);
3182			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3183			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
3184			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
3185			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
3186			lasttx = nexttx;
3187
3188			DPRINTF(WM_DEBUG_TX,
3189			    ("%s: TX: desc %d: %#" PRIx64 ", "
3190			     "len %#04zx\n",
3191			    device_xname(sc->sc_dev), nexttx,
3192			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
3193			    dmamap->dm_segs[seg].ds_len));
3194		}
3195
3196		KASSERT(lasttx != -1);
3197
3198		/*
3199		 * Set up the command byte on the last descriptor of
3200		 * the packet.  If we're in the interrupt delay window,
3201		 * delay the interrupt.
3202		 */
3203		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
3204		    (NQTX_CMD_EOP | NQTX_CMD_RS));
3205		sc->sc_txdescs[lasttx].wtx_cmdlen |=
3206		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
3207
3208		txs->txs_lastdesc = lasttx;
3209
3210		DPRINTF(WM_DEBUG_TX,
3211		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
3212		    device_xname(sc->sc_dev),
3213		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
3214
3215		/* Sync the descriptors we're using. */
3216		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
3217		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3218
3219		/* Give the packet to the chip. */
3220		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
3221		sent = true;
3222
3223		DPRINTF(WM_DEBUG_TX,
3224		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
3225
3226		DPRINTF(WM_DEBUG_TX,
3227		    ("%s: TX: finished transmitting packet, job %d\n",
3228		    device_xname(sc->sc_dev), sc->sc_txsnext));
3229
3230		/* Advance the tx pointer. */
3231		sc->sc_txfree -= txs->txs_ndesc;
3232		sc->sc_txnext = nexttx;
3233
3234		sc->sc_txsfree--;
3235		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
3236
3237		/* Pass the packet to any BPF listeners. */
3238		bpf_mtap(ifp, m0);
3239	}
3240
3241	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
3242		/* No more slots; notify upper layer. */
3243		ifp->if_flags |= IFF_OACTIVE;
3244	}
3245
3246	if (sent) {
3247		/* Set a watchdog timer in case the chip flakes out. */
3248		ifp->if_timer = 5;
3249	}
3250}
3251
3252/*
3253 * wm_watchdog:		[ifnet interface function]
3254 *
3255 *	Watchdog timer handler.
3256 */
3257static void
3258wm_watchdog(struct ifnet *ifp)
3259{
3260	struct wm_softc *sc = ifp->if_softc;
3261
3262	/*
3263	 * Since we're using delayed interrupts, sweep up
3264	 * before we report an error.
3265	 */
3266	wm_txintr(sc);
3267
3268	if (sc->sc_txfree != WM_NTXDESC(sc)) {
3269#ifdef WM_DEBUG
3270		int i, j;
3271		struct wm_txsoft *txs;
3272#endif
3273		log(LOG_ERR,
3274		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3275		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
3276		    sc->sc_txnext);
3277		ifp->if_oerrors++;
3278#ifdef WM_DEBUG
3279		for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
3280		    i = WM_NEXTTXS(sc, i)) {
3281		    txs = &sc->sc_txsoft[i];
3282		    printf("txs %d tx %d -> %d\n",
3283			i, txs->txs_firstdesc, txs->txs_lastdesc);
3284		    for (j = txs->txs_firstdesc; ;
3285			j = WM_NEXTTX(sc, j)) {
3286			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3287			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
3288			printf("\t %#08x%08x\n",
3289			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
3290			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
3291			if (j == txs->txs_lastdesc)
3292				break;
3293			}
3294		}
3295#endif
3296		/* Reset the interface. */
3297		(void) wm_init(ifp);
3298	}
3299
3300	/* Try to get more packets going. */
3301	ifp->if_start(ifp);
3302}
3303
3304static int
3305wm_ifflags_cb(struct ethercom *ec)
3306{
3307	struct ifnet *ifp = &ec->ec_if;
3308	struct wm_softc *sc = ifp->if_softc;
3309	int change = ifp->if_flags ^ sc->sc_if_flags;
3310
3311	if (change != 0)
3312		sc->sc_if_flags = ifp->if_flags;
3313
3314	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
3315		return ENETRESET;
3316
3317	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3318		wm_set_filter(sc);
3319
3320	wm_set_vlan(sc);
3321
3322	return 0;
3323}
3324
3325/*
3326 * wm_ioctl:		[ifnet interface function]
3327 *
3328 *	Handle control requests from the operator.
3329 */
3330static int
3331wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3332{
3333	struct wm_softc *sc = ifp->if_softc;
3334	struct ifreq *ifr = (struct ifreq *) data;
3335	struct ifaddr *ifa = (struct ifaddr *)data;
3336	struct sockaddr_dl *sdl;
3337	int s, error;
3338
3339	s = splnet();
3340
3341	switch (cmd) {
3342	case SIOCSIFMEDIA:
3343	case SIOCGIFMEDIA:
3344		/* Flow control requires full-duplex mode. */
3345		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3346		    (ifr->ifr_media & IFM_FDX) == 0)
3347			ifr->ifr_media &= ~IFM_ETH_FMASK;
3348		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3349			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3350				/* We can do both TXPAUSE and RXPAUSE. */
3351				ifr->ifr_media |=
3352				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3353			}
3354			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3355		}
3356		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3357		break;
3358	case SIOCINITIFADDR:
3359		if (ifa->ifa_addr->sa_family == AF_LINK) {
3360			sdl = satosdl(ifp->if_dl->ifa_addr);
3361			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3362			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3363			/* unicast address is first multicast entry */
3364			wm_set_filter(sc);
3365			error = 0;
3366			break;
3367		}
3368		/*FALLTHROUGH*/
3369	default:
3370		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
3371			break;
3372
3373		error = 0;
3374
3375		if (cmd == SIOCSIFCAP)
3376			error = (*ifp->if_init)(ifp);
3377		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3378			;
3379		else if (ifp->if_flags & IFF_RUNNING) {
3380			/*
3381			 * Multicast list has changed; set the hardware filter
3382			 * accordingly.
3383			 */
3384			wm_set_filter(sc);
3385		}
3386		break;
3387	}
3388
3389	/* Try to get more packets going. */
3390	ifp->if_start(ifp);
3391
3392	splx(s);
3393	return error;
3394}
3395
3396/*
3397 * wm_intr:
3398 *
3399 *	Interrupt service routine.
3400 */
3401static int
3402wm_intr(void *arg)
3403{
3404	struct wm_softc *sc = arg;
3405	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3406	uint32_t icr;
3407	int handled = 0;
3408
3409	while (1 /* CONSTCOND */) {
3410		icr = CSR_READ(sc, WMREG_ICR);
3411		if ((icr & sc->sc_icr) == 0)
3412			break;
3413		rnd_add_uint32(&sc->rnd_source, icr);
3414
3415		handled = 1;
3416
3417#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3418		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
3419			DPRINTF(WM_DEBUG_RX,
3420			    ("%s: RX: got Rx intr 0x%08x\n",
3421			    device_xname(sc->sc_dev),
3422			    icr & (ICR_RXDMT0|ICR_RXT0)));
3423			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
3424		}
3425#endif
3426		wm_rxintr(sc);
3427
3428#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3429		if (icr & ICR_TXDW) {
3430			DPRINTF(WM_DEBUG_TX,
3431			    ("%s: TX: got TXDW interrupt\n",
3432			    device_xname(sc->sc_dev)));
3433			WM_EVCNT_INCR(&sc->sc_ev_txdw);
3434		}
3435#endif
3436		wm_txintr(sc);
3437
3438		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
3439			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
3440			wm_linkintr(sc, icr);
3441		}
3442
3443		if (icr & ICR_RXO) {
3444#if defined(WM_DEBUG)
3445			log(LOG_WARNING, "%s: Receive overrun\n",
3446			    device_xname(sc->sc_dev));
3447#endif /* defined(WM_DEBUG) */
3448		}
3449	}
3450
3451	if (handled) {
3452		/* Try to get more packets going. */
3453		ifp->if_start(ifp);
3454	}
3455
3456	return handled;
3457}
3458
3459/*
3460 * wm_txintr:
3461 *
3462 *	Helper; handle transmit interrupts.
3463 */
3464static void
3465wm_txintr(struct wm_softc *sc)
3466{
3467	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3468	struct wm_txsoft *txs;
3469	uint8_t status;
3470	int i;
3471
3472	ifp->if_flags &= ~IFF_OACTIVE;
3473
3474	/*
3475	 * Go through the Tx list and free mbufs for those
3476	 * frames which have been transmitted.
3477	 */
3478	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
3479	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
3480		txs = &sc->sc_txsoft[i];
3481
3482		DPRINTF(WM_DEBUG_TX,
3483		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
3484
3485		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
3486		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3487
3488		status =
3489		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
3490		if ((status & WTX_ST_DD) == 0) {
3491			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
3492			    BUS_DMASYNC_PREREAD);
3493			break;
3494		}
3495
3496		DPRINTF(WM_DEBUG_TX,
3497		    ("%s: TX: job %d done: descs %d..%d\n",
3498		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
3499		    txs->txs_lastdesc));
3500
3501		/*
3502		 * XXX We should probably be using the statistics
3503		 * XXX registers, but I don't know if they exist
3504		 * XXX on chips before the i82544.
3505		 */
3506
3507#ifdef WM_EVENT_COUNTERS
3508		if (status & WTX_ST_TU)
3509			WM_EVCNT_INCR(&sc->sc_ev_tu);
3510#endif /* WM_EVENT_COUNTERS */
3511
3512		if (status & (WTX_ST_EC|WTX_ST_LC)) {
3513			ifp->if_oerrors++;
3514			if (status & WTX_ST_LC)
3515				log(LOG_WARNING, "%s: late collision\n",
3516				    device_xname(sc->sc_dev));
3517			else if (status & WTX_ST_EC) {
3518				ifp->if_collisions += 16;
3519				log(LOG_WARNING, "%s: excessive collisions\n",
3520				    device_xname(sc->sc_dev));
3521			}
3522		} else
3523			ifp->if_opackets++;
3524
3525		sc->sc_txfree += txs->txs_ndesc;
3526		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
3527		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3528		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3529		m_freem(txs->txs_mbuf);
3530		txs->txs_mbuf = NULL;
3531	}
3532
3533	/* Update the dirty transmit buffer pointer. */
3534	sc->sc_txsdirty = i;
3535	DPRINTF(WM_DEBUG_TX,
3536	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3537
3538	/*
3539	 * If there are no more pending transmissions, cancel the watchdog
3540	 * timer.
3541	 */
3542	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3543		ifp->if_timer = 0;
3544}
3545
3546/*
3547 * wm_rxintr:
3548 *
3549 *	Helper; handle receive interrupts.
3550 */
3551static void
3552wm_rxintr(struct wm_softc *sc)
3553{
3554	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3555	struct wm_rxsoft *rxs;
3556	struct mbuf *m;
3557	int i, len;
3558	uint8_t status, errors;
3559	uint16_t vlantag;
3560
3561	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3562		rxs = &sc->sc_rxsoft[i];
3563
3564		DPRINTF(WM_DEBUG_RX,
3565		    ("%s: RX: checking descriptor %d\n",
3566		    device_xname(sc->sc_dev), i));
3567
3568		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3569
3570		status = sc->sc_rxdescs[i].wrx_status;
3571		errors = sc->sc_rxdescs[i].wrx_errors;
3572		len = le16toh(sc->sc_rxdescs[i].wrx_len);
3573		vlantag = sc->sc_rxdescs[i].wrx_special;
3574
3575		if ((status & WRX_ST_DD) == 0) {
3576			/*
3577			 * We have processed all of the receive descriptors.
3578			 */
3579			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3580			break;
3581		}
3582
3583		if (__predict_false(sc->sc_rxdiscard)) {
3584			DPRINTF(WM_DEBUG_RX,
3585			    ("%s: RX: discarding contents of descriptor %d\n",
3586			    device_xname(sc->sc_dev), i));
3587			WM_INIT_RXDESC(sc, i);
3588			if (status & WRX_ST_EOP) {
3589				/* Reset our state. */
3590				DPRINTF(WM_DEBUG_RX,
3591				    ("%s: RX: resetting rxdiscard -> 0\n",
3592				    device_xname(sc->sc_dev)));
3593				sc->sc_rxdiscard = 0;
3594			}
3595			continue;
3596		}
3597
3598		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3599		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3600
3601		m = rxs->rxs_mbuf;
3602
3603		/*
3604		 * Add a new receive buffer to the ring, unless of
3605		 * course the length is zero. Treat the latter as a
3606		 * failed mapping.
3607		 */
3608		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3609			/*
3610			 * Failed, throw away what we've done so
3611			 * far, and discard the rest of the packet.
3612			 */
3613			ifp->if_ierrors++;
3614			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3615			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3616			WM_INIT_RXDESC(sc, i);
3617			if ((status & WRX_ST_EOP) == 0)
3618				sc->sc_rxdiscard = 1;
3619			if (sc->sc_rxhead != NULL)
3620				m_freem(sc->sc_rxhead);
3621			WM_RXCHAIN_RESET(sc);
3622			DPRINTF(WM_DEBUG_RX,
3623			    ("%s: RX: Rx buffer allocation failed, "
3624			    "dropping packet%s\n", device_xname(sc->sc_dev),
3625			    sc->sc_rxdiscard ? " (discard)" : ""));
3626			continue;
3627		}
3628
3629		m->m_len = len;
3630		sc->sc_rxlen += len;
3631		DPRINTF(WM_DEBUG_RX,
3632		    ("%s: RX: buffer at %p len %d\n",
3633		    device_xname(sc->sc_dev), m->m_data, len));
3634
3635		/*
3636		 * If this is not the end of the packet, keep
3637		 * looking.
3638		 */
3639		if ((status & WRX_ST_EOP) == 0) {
3640			WM_RXCHAIN_LINK(sc, m);
3641			DPRINTF(WM_DEBUG_RX,
3642			    ("%s: RX: not yet EOP, rxlen -> %d\n",
3643			    device_xname(sc->sc_dev), sc->sc_rxlen));
3644			continue;
3645		}
3646
3647		/*
3648		 * Okay, we have the entire packet now.  The chip is
3649		 * configured to include the FCS except I350
3650		 * (not all chips can be configured to strip it),
3651		 * so we need to trim it.
3652		 * May need to adjust length of previous mbuf in the
3653		 * chain if the current mbuf is too short.
3654		 * For an eratta, the RCTL_SECRC bit in RCTL register
3655		 * is always set in I350, so we don't trim it.
3656		 */
3657		if (sc->sc_type != WM_T_I350) {
3658			if (m->m_len < ETHER_CRC_LEN) {
3659				sc->sc_rxtail->m_len
3660				    -= (ETHER_CRC_LEN - m->m_len);
3661				m->m_len = 0;
3662			} else
3663				m->m_len -= ETHER_CRC_LEN;
3664			len = sc->sc_rxlen - ETHER_CRC_LEN;
3665		} else
3666			len = sc->sc_rxlen;
3667
3668		WM_RXCHAIN_LINK(sc, m);
3669
3670		*sc->sc_rxtailp = NULL;
3671		m = sc->sc_rxhead;
3672
3673		WM_RXCHAIN_RESET(sc);
3674
3675		DPRINTF(WM_DEBUG_RX,
3676		    ("%s: RX: have entire packet, len -> %d\n",
3677		    device_xname(sc->sc_dev), len));
3678
3679		/*
3680		 * If an error occurred, update stats and drop the packet.
3681		 */
3682		if (errors &
3683		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3684			if (errors & WRX_ER_SE)
3685				log(LOG_WARNING, "%s: symbol error\n",
3686				    device_xname(sc->sc_dev));
3687			else if (errors & WRX_ER_SEQ)
3688				log(LOG_WARNING, "%s: receive sequence error\n",
3689				    device_xname(sc->sc_dev));
3690			else if (errors & WRX_ER_CE)
3691				log(LOG_WARNING, "%s: CRC error\n",
3692				    device_xname(sc->sc_dev));
3693			m_freem(m);
3694			continue;
3695		}
3696
3697		/*
3698		 * No errors.  Receive the packet.
3699		 */
3700		m->m_pkthdr.rcvif = ifp;
3701		m->m_pkthdr.len = len;
3702
3703		/*
3704		 * If VLANs are enabled, VLAN packets have been unwrapped
3705		 * for us.  Associate the tag with the packet.
3706		 */
3707		if ((status & WRX_ST_VP) != 0) {
3708			VLAN_INPUT_TAG(ifp, m,
3709			    le16toh(vlantag),
3710			    continue);
3711		}
3712
3713		/*
3714		 * Set up checksum info for this packet.
3715		 */
3716		if ((status & WRX_ST_IXSM) == 0) {
3717			if (status & WRX_ST_IPCS) {
3718				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3719				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3720				if (errors & WRX_ER_IPE)
3721					m->m_pkthdr.csum_flags |=
3722					    M_CSUM_IPv4_BAD;
3723			}
3724			if (status & WRX_ST_TCPCS) {
3725				/*
3726				 * Note: we don't know if this was TCP or UDP,
3727				 * so we just set both bits, and expect the
3728				 * upper layers to deal.
3729				 */
3730				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3731				m->m_pkthdr.csum_flags |=
3732				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3733				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
3734				if (errors & WRX_ER_TCPE)
3735					m->m_pkthdr.csum_flags |=
3736					    M_CSUM_TCP_UDP_BAD;
3737			}
3738		}
3739
3740		ifp->if_ipackets++;
3741
3742		/* Pass this up to any BPF listeners. */
3743		bpf_mtap(ifp, m);
3744
3745		/* Pass it on. */
3746		(*ifp->if_input)(ifp, m);
3747	}
3748
3749	/* Update the receive pointer. */
3750	sc->sc_rxptr = i;
3751
3752	DPRINTF(WM_DEBUG_RX,
3753	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3754}
3755
3756/*
3757 * wm_linkintr_gmii:
3758 *
3759 *	Helper; handle link interrupts for GMII.
3760 */
3761static void
3762wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3763{
3764
3765	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3766		__func__));
3767
3768	if (icr & ICR_LSC) {
3769		DPRINTF(WM_DEBUG_LINK,
3770		    ("%s: LINK: LSC -> mii_tick\n",
3771			device_xname(sc->sc_dev)));
3772		mii_tick(&sc->sc_mii);
3773		if (sc->sc_type == WM_T_82543) {
3774			int miistatus, active;
3775
3776			/*
3777			 * With 82543, we need to force speed and
3778			 * duplex on the MAC equal to what the PHY
3779			 * speed and duplex configuration is.
3780			 */
3781			miistatus = sc->sc_mii.mii_media_status;
3782
3783			if (miistatus & IFM_ACTIVE) {
3784				active = sc->sc_mii.mii_media_active;
3785				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3786				switch (IFM_SUBTYPE(active)) {
3787				case IFM_10_T:
3788					sc->sc_ctrl |= CTRL_SPEED_10;
3789					break;
3790				case IFM_100_TX:
3791					sc->sc_ctrl |= CTRL_SPEED_100;
3792					break;
3793				case IFM_1000_T:
3794					sc->sc_ctrl |= CTRL_SPEED_1000;
3795					break;
3796				default:
3797					/*
3798					 * fiber?
3799					 * Shoud not enter here.
3800					 */
3801					printf("unknown media (%x)\n",
3802					    active);
3803					break;
3804				}
3805				if (active & IFM_FDX)
3806					sc->sc_ctrl |= CTRL_FD;
3807				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3808			}
3809		} else if ((sc->sc_type == WM_T_ICH8)
3810		    && (sc->sc_phytype == WMPHY_IGP_3)) {
3811			wm_kmrn_lock_loss_workaround_ich8lan(sc);
3812		} else if (sc->sc_type == WM_T_PCH) {
3813			wm_k1_gig_workaround_hv(sc,
3814			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3815		}
3816
3817		if ((sc->sc_phytype == WMPHY_82578)
3818		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3819			== IFM_1000_T)) {
3820
3821			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3822				delay(200*1000); /* XXX too big */
3823
3824				/* Link stall fix for link up */
3825				wm_gmii_hv_writereg(sc->sc_dev, 1,
3826				    HV_MUX_DATA_CTRL,
3827				    HV_MUX_DATA_CTRL_GEN_TO_MAC
3828				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
3829				wm_gmii_hv_writereg(sc->sc_dev, 1,
3830				    HV_MUX_DATA_CTRL,
3831				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
3832			}
3833		}
3834	} else if (icr & ICR_RXSEQ) {
3835		DPRINTF(WM_DEBUG_LINK,
3836		    ("%s: LINK Receive sequence error\n",
3837			device_xname(sc->sc_dev)));
3838	}
3839}
3840
3841/*
3842 * wm_linkintr_tbi:
3843 *
3844 *	Helper; handle link interrupts for TBI mode.
3845 */
3846static void
3847wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3848{
3849	uint32_t status;
3850
3851	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3852		__func__));
3853
3854	status = CSR_READ(sc, WMREG_STATUS);
3855	if (icr & ICR_LSC) {
3856		if (status & STATUS_LU) {
3857			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3858			    device_xname(sc->sc_dev),
3859			    (status & STATUS_FD) ? "FDX" : "HDX"));
3860			/*
3861			 * NOTE: CTRL will update TFCE and RFCE automatically,
3862			 * so we should update sc->sc_ctrl
3863			 */
3864
3865			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3866			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3867			sc->sc_fcrtl &= ~FCRTL_XONE;
3868			if (status & STATUS_FD)
3869				sc->sc_tctl |=
3870				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3871			else
3872				sc->sc_tctl |=
3873				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3874			if (sc->sc_ctrl & CTRL_TFCE)
3875				sc->sc_fcrtl |= FCRTL_XONE;
3876			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3877			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3878				      WMREG_OLD_FCRTL : WMREG_FCRTL,
3879				      sc->sc_fcrtl);
3880			sc->sc_tbi_linkup = 1;
3881		} else {
3882			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3883			    device_xname(sc->sc_dev)));
3884			sc->sc_tbi_linkup = 0;
3885		}
3886		wm_tbi_set_linkled(sc);
3887	} else if (icr & ICR_RXCFG) {
3888		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3889		    device_xname(sc->sc_dev)));
3890		sc->sc_tbi_nrxcfg++;
3891		wm_check_for_link(sc);
3892	} else if (icr & ICR_RXSEQ) {
3893		DPRINTF(WM_DEBUG_LINK,
3894		    ("%s: LINK: Receive sequence error\n",
3895		    device_xname(sc->sc_dev)));
3896	}
3897}
3898
3899/*
3900 * wm_linkintr:
3901 *
3902 *	Helper; handle link interrupts.
3903 */
3904static void
3905wm_linkintr(struct wm_softc *sc, uint32_t icr)
3906{
3907
3908	if (sc->sc_flags & WM_F_HAS_MII)
3909		wm_linkintr_gmii(sc, icr);
3910	else
3911		wm_linkintr_tbi(sc, icr);
3912}
3913
3914/*
3915 * wm_tick:
3916 *
3917 *	One second timer, used to check link status, sweep up
3918 *	completed transmit jobs, etc.
3919 */
3920static void
3921wm_tick(void *arg)
3922{
3923	struct wm_softc *sc = arg;
3924	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3925	int s;
3926
3927	s = splnet();
3928
3929	if (sc->sc_type >= WM_T_82542_2_1) {
3930		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3931		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3932		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3933		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3934		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3935	}
3936
3937	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3938	ifp->if_ierrors += 0ULL + /* ensure quad_t */
3939	    + CSR_READ(sc, WMREG_CRCERRS)
3940	    + CSR_READ(sc, WMREG_ALGNERRC)
3941	    + CSR_READ(sc, WMREG_SYMERRC)
3942	    + CSR_READ(sc, WMREG_RXERRC)
3943	    + CSR_READ(sc, WMREG_SEC)
3944	    + CSR_READ(sc, WMREG_CEXTERR)
3945	    + CSR_READ(sc, WMREG_RLEC);
3946	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
3947
3948	if (sc->sc_flags & WM_F_HAS_MII)
3949		mii_tick(&sc->sc_mii);
3950	else
3951		wm_tbi_check_link(sc);
3952
3953	splx(s);
3954
3955	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3956}
3957
3958/*
3959 * wm_reset:
3960 *
3961 *	Reset the i82542 chip.
3962 */
3963static void
3964wm_reset(struct wm_softc *sc)
3965{
3966	int phy_reset = 0;
3967	uint32_t reg, mask;
3968	int i;
3969
3970	/*
3971	 * Allocate on-chip memory according to the MTU size.
3972	 * The Packet Buffer Allocation register must be written
3973	 * before the chip is reset.
3974	 */
3975	switch (sc->sc_type) {
3976	case WM_T_82547:
3977	case WM_T_82547_2:
3978		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3979		    PBA_22K : PBA_30K;
3980		sc->sc_txfifo_head = 0;
3981		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3982		sc->sc_txfifo_size =
3983		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3984		sc->sc_txfifo_stall = 0;
3985		break;
3986	case WM_T_82571:
3987	case WM_T_82572:
3988	case WM_T_82575:	/* XXX need special handing for jumbo frames */
3989	case WM_T_I350:
3990	case WM_T_80003:
3991		sc->sc_pba = PBA_32K;
3992		break;
3993	case WM_T_82580:
3994	case WM_T_82580ER:
3995		sc->sc_pba = PBA_35K;
3996		break;
3997	case WM_T_82576:
3998		sc->sc_pba = PBA_64K;
3999		break;
4000	case WM_T_82573:
4001		sc->sc_pba = PBA_12K;
4002		break;
4003	case WM_T_82574:
4004	case WM_T_82583:
4005		sc->sc_pba = PBA_20K;
4006		break;
4007	case WM_T_ICH8:
4008		sc->sc_pba = PBA_8K;
4009		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4010		break;
4011	case WM_T_ICH9:
4012	case WM_T_ICH10:
4013		sc->sc_pba = PBA_10K;
4014		break;
4015	case WM_T_PCH:
4016	case WM_T_PCH2:
4017		sc->sc_pba = PBA_26K;
4018		break;
4019	default:
4020		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4021		    PBA_40K : PBA_48K;
4022		break;
4023	}
4024	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4025
4026	/* Prevent the PCI-E bus from sticking */
4027	if (sc->sc_flags & WM_F_PCIE) {
4028		int timeout = 800;
4029
4030		sc->sc_ctrl |= CTRL_GIO_M_DIS;
4031		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4032
4033		while (timeout--) {
4034			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
4035				break;
4036			delay(100);
4037		}
4038	}
4039
4040	/* Set the completion timeout for interface */
4041	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4042	    || (sc->sc_type == WM_T_I350))
4043		wm_set_pcie_completion_timeout(sc);
4044
4045	/* Clear interrupt */
4046	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4047
4048	/* Stop the transmit and receive processes. */
4049	CSR_WRITE(sc, WMREG_RCTL, 0);
4050	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4051	sc->sc_rctl &= ~RCTL_EN;
4052
4053	/* XXX set_tbi_sbp_82543() */
4054
4055	delay(10*1000);
4056
4057	/* Must acquire the MDIO ownership before MAC reset */
4058	switch (sc->sc_type) {
4059	case WM_T_82573:
4060	case WM_T_82574:
4061	case WM_T_82583:
4062		i = 0;
4063		reg = CSR_READ(sc, WMREG_EXTCNFCTR)
4064		    | EXTCNFCTR_MDIO_SW_OWNERSHIP;
4065		do {
4066			CSR_WRITE(sc, WMREG_EXTCNFCTR,
4067			    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
4068			reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4069			if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
4070				break;
4071			reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
4072			delay(2*1000);
4073			i++;
4074		} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
4075		break;
4076	default:
4077		break;
4078	}
4079
4080	/*
4081	 * 82541 Errata 29? & 82547 Errata 28?
4082	 * See also the description about PHY_RST bit in CTRL register
4083	 * in 8254x_GBe_SDM.pdf.
4084	 */
4085	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4086		CSR_WRITE(sc, WMREG_CTRL,
4087		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4088		delay(5000);
4089	}
4090
4091	switch (sc->sc_type) {
4092	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4093	case WM_T_82541:
4094	case WM_T_82541_2:
4095	case WM_T_82547:
4096	case WM_T_82547_2:
4097		/*
4098		 * On some chipsets, a reset through a memory-mapped write
4099		 * cycle can cause the chip to reset before completing the
4100		 * write cycle.  This causes major headache that can be
4101		 * avoided by issuing the reset via indirect register writes
4102		 * through I/O space.
4103		 *
4104		 * So, if we successfully mapped the I/O BAR at attach time,
4105		 * use that.  Otherwise, try our luck with a memory-mapped
4106		 * reset.
4107		 */
4108		if (sc->sc_flags & WM_F_IOH_VALID)
4109			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4110		else
4111			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4112		break;
4113	case WM_T_82545_3:
4114	case WM_T_82546_3:
4115		/* Use the shadow control register on these chips. */
4116		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4117		break;
4118	case WM_T_80003:
4119		mask = swfwphysem[sc->sc_funcid];
4120		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4121		wm_get_swfw_semaphore(sc, mask);
4122		CSR_WRITE(sc, WMREG_CTRL, reg);
4123		wm_put_swfw_semaphore(sc, mask);
4124		break;
4125	case WM_T_ICH8:
4126	case WM_T_ICH9:
4127	case WM_T_ICH10:
4128	case WM_T_PCH:
4129	case WM_T_PCH2:
4130		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4131		if (wm_check_reset_block(sc) == 0) {
4132			/*
4133			 * Gate automatic PHY configuration by hardware on
4134			 * non-managed 82579
4135			 */
4136			if ((sc->sc_type == WM_T_PCH2)
4137			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4138				!= 0))
4139				wm_gate_hw_phy_config_ich8lan(sc, 1);
4140
4141
4142			reg |= CTRL_PHY_RESET;
4143			phy_reset = 1;
4144		}
4145		wm_get_swfwhw_semaphore(sc);
4146		CSR_WRITE(sc, WMREG_CTRL, reg);
4147		delay(20*1000);
4148		wm_put_swfwhw_semaphore(sc);
4149		break;
4150	case WM_T_82542_2_0:
4151	case WM_T_82542_2_1:
4152	case WM_T_82543:
4153	case WM_T_82540:
4154	case WM_T_82545:
4155	case WM_T_82546:
4156	case WM_T_82571:
4157	case WM_T_82572:
4158	case WM_T_82573:
4159	case WM_T_82574:
4160	case WM_T_82575:
4161	case WM_T_82576:
4162	case WM_T_82580:
4163	case WM_T_82580ER:
4164	case WM_T_82583:
4165	case WM_T_I350:
4166	default:
4167		/* Everything else can safely use the documented method. */
4168		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4169		break;
4170	}
4171
4172	if (phy_reset != 0)
4173		wm_get_cfg_done(sc);
4174
4175	/* reload EEPROM */
4176	switch (sc->sc_type) {
4177	case WM_T_82542_2_0:
4178	case WM_T_82542_2_1:
4179	case WM_T_82543:
4180	case WM_T_82544:
4181		delay(10);
4182		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4183		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4184		delay(2000);
4185		break;
4186	case WM_T_82540:
4187	case WM_T_82545:
4188	case WM_T_82545_3:
4189	case WM_T_82546:
4190	case WM_T_82546_3:
4191		delay(5*1000);
4192		/* XXX Disable HW ARPs on ASF enabled adapters */
4193		break;
4194	case WM_T_82541:
4195	case WM_T_82541_2:
4196	case WM_T_82547:
4197	case WM_T_82547_2:
4198		delay(20000);
4199		/* XXX Disable HW ARPs on ASF enabled adapters */
4200		break;
4201	case WM_T_82571:
4202	case WM_T_82572:
4203	case WM_T_82573:
4204	case WM_T_82574:
4205	case WM_T_82583:
4206		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4207			delay(10);
4208			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4209			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4210		}
4211		/* check EECD_EE_AUTORD */
4212		wm_get_auto_rd_done(sc);
4213		/*
4214		 * Phy configuration from NVM just starts after EECD_AUTO_RD
4215		 * is set.
4216		 */
4217		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4218		    || (sc->sc_type == WM_T_82583))
4219			delay(25*1000);
4220		break;
4221	case WM_T_82575:
4222	case WM_T_82576:
4223	case WM_T_82580:
4224	case WM_T_82580ER:
4225	case WM_T_I350:
4226	case WM_T_80003:
4227	case WM_T_ICH8:
4228	case WM_T_ICH9:
4229		/* check EECD_EE_AUTORD */
4230		wm_get_auto_rd_done(sc);
4231		break;
4232	case WM_T_ICH10:
4233	case WM_T_PCH:
4234	case WM_T_PCH2:
4235		wm_lan_init_done(sc);
4236		break;
4237	default:
4238		panic("%s: unknown type\n", __func__);
4239	}
4240
4241	/* Check whether EEPROM is present or not */
4242	switch (sc->sc_type) {
4243	case WM_T_82575:
4244	case WM_T_82576:
4245#if 0 /* XXX */
4246	case WM_T_82580:
4247	case WM_T_82580ER:
4248#endif
4249	case WM_T_I350:
4250	case WM_T_ICH8:
4251	case WM_T_ICH9:
4252		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4253			/* Not found */
4254			sc->sc_flags |= WM_F_EEPROM_INVALID;
4255			if ((sc->sc_type == WM_T_82575)
4256			    || (sc->sc_type == WM_T_82576)
4257			    || (sc->sc_type == WM_T_82580)
4258			    || (sc->sc_type == WM_T_82580ER)
4259			    || (sc->sc_type == WM_T_I350))
4260				wm_reset_init_script_82575(sc);
4261		}
4262		break;
4263	default:
4264		break;
4265	}
4266
4267	if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
4268	    || (sc->sc_type == WM_T_I350)) {
4269		/* clear global device reset status bit */
4270		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4271	}
4272
4273	/* Clear any pending interrupt events. */
4274	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4275	reg = CSR_READ(sc, WMREG_ICR);
4276
4277	/* reload sc_ctrl */
4278	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4279
4280	if (sc->sc_type == WM_T_I350)
4281		wm_set_eee_i350(sc);
4282
4283	/* dummy read from WUC */
4284	if (sc->sc_type == WM_T_PCH)
4285		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4286	/*
4287	 * For PCH, this write will make sure that any noise will be detected
4288	 * as a CRC error and be dropped rather than show up as a bad packet
4289	 * to the DMA engine
4290	 */
4291	if (sc->sc_type == WM_T_PCH)
4292		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4293
4294	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4295		CSR_WRITE(sc, WMREG_WUC, 0);
4296
4297	/* XXX need special handling for 82580 */
4298}
4299
4300static void
4301wm_set_vlan(struct wm_softc *sc)
4302{
4303	/* Deal with VLAN enables. */
4304	if (VLAN_ATTACHED(&sc->sc_ethercom))
4305		sc->sc_ctrl |= CTRL_VME;
4306	else
4307		sc->sc_ctrl &= ~CTRL_VME;
4308
4309	/* Write the control registers. */
4310	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4311}
4312
4313/*
4314 * wm_init:		[ifnet interface function]
4315 *
4316 *	Initialize the interface.  Must be called at splnet().
4317 */
4318static int
4319wm_init(struct ifnet *ifp)
4320{
4321	struct wm_softc *sc = ifp->if_softc;
4322	struct wm_rxsoft *rxs;
4323	int i, j, trynum, error = 0;
4324	uint32_t reg;
4325
4326	/*
4327	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4328	 * There is a small but measurable benefit to avoiding the adjusment
4329	 * of the descriptor so that the headers are aligned, for normal mtu,
4330	 * on such platforms.  One possibility is that the DMA itself is
4331	 * slightly more efficient if the front of the entire packet (instead
4332	 * of the front of the headers) is aligned.
4333	 *
4334	 * Note we must always set align_tweak to 0 if we are using
4335	 * jumbo frames.
4336	 */
4337#ifdef __NO_STRICT_ALIGNMENT
4338	sc->sc_align_tweak = 0;
4339#else
4340	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4341		sc->sc_align_tweak = 0;
4342	else
4343		sc->sc_align_tweak = 2;
4344#endif /* __NO_STRICT_ALIGNMENT */
4345
4346	/* Cancel any pending I/O. */
4347	wm_stop(ifp, 0);
4348
4349	/* update statistics before reset */
4350	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4351	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4352
4353	/* Reset the chip to a known state. */
4354	wm_reset(sc);
4355
4356	switch (sc->sc_type) {
4357	case WM_T_82571:
4358	case WM_T_82572:
4359	case WM_T_82573:
4360	case WM_T_82574:
4361	case WM_T_82583:
4362	case WM_T_80003:
4363	case WM_T_ICH8:
4364	case WM_T_ICH9:
4365	case WM_T_ICH10:
4366	case WM_T_PCH:
4367	case WM_T_PCH2:
4368		if (wm_check_mng_mode(sc) != 0)
4369			wm_get_hw_control(sc);
4370		break;
4371	default:
4372		break;
4373	}
4374
4375	/* Reset the PHY. */
4376	if (sc->sc_flags & WM_F_HAS_MII)
4377		wm_gmii_reset(sc);
4378
4379	reg = CSR_READ(sc, WMREG_CTRL_EXT);
4380	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
4381	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2))
4382		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
4383
4384	/* Initialize the transmit descriptor ring. */
4385	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4386	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4387	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4388	sc->sc_txfree = WM_NTXDESC(sc);
4389	sc->sc_txnext = 0;
4390
4391	if (sc->sc_type < WM_T_82543) {
4392		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4393		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4394		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4395		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4396		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4397		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4398	} else {
4399		CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4400		CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4401		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4402		CSR_WRITE(sc, WMREG_TDH, 0);
4403		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
4404		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
4405
4406		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4407			/*
4408			 * Don't write TDT before TCTL.EN is set.
4409			 * See the document.
4410			 */
4411			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
4412			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4413			    | TXDCTL_WTHRESH(0));
4414		else {
4415			CSR_WRITE(sc, WMREG_TDT, 0);
4416			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
4417			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4418			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4419			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4420		}
4421	}
4422	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
4423	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
4424
4425	/* Initialize the transmit job descriptors. */
4426	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4427		sc->sc_txsoft[i].txs_mbuf = NULL;
4428	sc->sc_txsfree = WM_TXQUEUELEN(sc);
4429	sc->sc_txsnext = 0;
4430	sc->sc_txsdirty = 0;
4431
4432	/*
4433	 * Initialize the receive descriptor and receive job
4434	 * descriptor rings.
4435	 */
4436	if (sc->sc_type < WM_T_82543) {
4437		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4438		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4439		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4440		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4441		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4442		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4443
4444		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4445		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4446		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4447		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4448		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4449		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4450	} else {
4451		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4452		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4453		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4454		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4455			CSR_WRITE(sc, WMREG_EITR(0), 450);
4456			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4457				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4458			CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4459			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4460			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4461			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4462			    | RXDCTL_WTHRESH(1));
4463		} else {
4464			CSR_WRITE(sc, WMREG_RDH, 0);
4465			CSR_WRITE(sc, WMREG_RDT, 0);
4466			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD);	/* ITR/4 */
4467			CSR_WRITE(sc, WMREG_RADV, 375);		/* MUST be same */
4468		}
4469	}
4470	for (i = 0; i < WM_NRXDESC; i++) {
4471		rxs = &sc->sc_rxsoft[i];
4472		if (rxs->rxs_mbuf == NULL) {
4473			if ((error = wm_add_rxbuf(sc, i)) != 0) {
4474				log(LOG_ERR, "%s: unable to allocate or map rx "
4475				    "buffer %d, error = %d\n",
4476				    device_xname(sc->sc_dev), i, error);
4477				/*
4478				 * XXX Should attempt to run with fewer receive
4479				 * XXX buffers instead of just failing.
4480				 */
4481				wm_rxdrain(sc);
4482				goto out;
4483			}
4484		} else {
4485			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4486				WM_INIT_RXDESC(sc, i);
4487			/*
4488			 * For 82575 and newer device, the RX descriptors
4489			 * must be initialized after the setting of RCTL.EN in
4490			 * wm_set_filter()
4491			 */
4492		}
4493	}
4494	sc->sc_rxptr = 0;
4495	sc->sc_rxdiscard = 0;
4496	WM_RXCHAIN_RESET(sc);
4497
4498	/*
4499	 * Clear out the VLAN table -- we don't use it (yet).
4500	 */
4501	CSR_WRITE(sc, WMREG_VET, 0);
4502	if (sc->sc_type == WM_T_I350)
4503		trynum = 10; /* Due to hw errata */
4504	else
4505		trynum = 1;
4506	for (i = 0; i < WM_VLAN_TABSIZE; i++)
4507		for (j = 0; j < trynum; j++)
4508			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4509
4510	/*
4511	 * Set up flow-control parameters.
4512	 *
4513	 * XXX Values could probably stand some tuning.
4514	 */
4515	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4516	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4517	    && (sc->sc_type != WM_T_PCH2)) {
4518		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4519		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4520		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4521	}
4522
4523	sc->sc_fcrtl = FCRTL_DFLT;
4524	if (sc->sc_type < WM_T_82543) {
4525		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4526		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4527	} else {
4528		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4529		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4530	}
4531
4532	if (sc->sc_type == WM_T_80003)
4533		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4534	else
4535		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4536
4537	/* Writes the control register. */
4538	wm_set_vlan(sc);
4539
4540	if (sc->sc_flags & WM_F_HAS_MII) {
4541		int val;
4542
4543		switch (sc->sc_type) {
4544		case WM_T_80003:
4545		case WM_T_ICH8:
4546		case WM_T_ICH9:
4547		case WM_T_ICH10:
4548		case WM_T_PCH:
4549		case WM_T_PCH2:
4550			/*
4551			 * Set the mac to wait the maximum time between each
4552			 * iteration and increase the max iterations when
4553			 * polling the phy; this fixes erroneous timeouts at
4554			 * 10Mbps.
4555			 */
4556			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4557			    0xFFFF);
4558			val = wm_kmrn_readreg(sc,
4559			    KUMCTRLSTA_OFFSET_INB_PARAM);
4560			val |= 0x3F;
4561			wm_kmrn_writereg(sc,
4562			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
4563			break;
4564		default:
4565			break;
4566		}
4567
4568		if (sc->sc_type == WM_T_80003) {
4569			val = CSR_READ(sc, WMREG_CTRL_EXT);
4570			val &= ~CTRL_EXT_LINK_MODE_MASK;
4571			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4572
4573			/* Bypass RX and TX FIFO's */
4574			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4575			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4576			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4577			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4578			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4579			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4580		}
4581	}
4582#if 0
4583	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4584#endif
4585
4586	/*
4587	 * Set up checksum offload parameters.
4588	 */
4589	reg = CSR_READ(sc, WMREG_RXCSUM);
4590	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4591	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4592		reg |= RXCSUM_IPOFL;
4593	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4594		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4595	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4596		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4597	CSR_WRITE(sc, WMREG_RXCSUM, reg);
4598
4599	/* Reset TBI's RXCFG count */
4600	sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4601
4602	/*
4603	 * Set up the interrupt registers.
4604	 */
4605	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4606	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4607	    ICR_RXO | ICR_RXT0;
4608	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4609		sc->sc_icr |= ICR_RXCFG;
4610	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4611
4612	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4613	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4614		 || (sc->sc_type == WM_T_PCH2)) {
4615		reg = CSR_READ(sc, WMREG_KABGTXD);
4616		reg |= KABGTXD_BGSQLBIAS;
4617		CSR_WRITE(sc, WMREG_KABGTXD, reg);
4618	}
4619
4620	/* Set up the inter-packet gap. */
4621	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4622
4623	if (sc->sc_type >= WM_T_82543) {
4624		/*
4625		 * Set up the interrupt throttling register (units of 256ns)
4626		 * Note that a footnote in Intel's documentation says this
4627		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4628		 * or 10Mbit mode.  Empirically, it appears to be the case
4629		 * that that is also true for the 1024ns units of the other
4630		 * interrupt-related timer registers -- so, really, we ought
4631		 * to divide this value by 4 when the link speed is low.
4632		 *
4633		 * XXX implement this division at link speed change!
4634		 */
4635
4636		 /*
4637		  * For N interrupts/sec, set this value to:
4638		  * 1000000000 / (N * 256).  Note that we set the
4639		  * absolute and packet timer values to this value
4640		  * divided by 4 to get "simple timer" behavior.
4641		  */
4642
4643		sc->sc_itr = 1500;		/* 2604 ints/sec */
4644		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4645	}
4646
4647	/* Set the VLAN ethernetype. */
4648	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4649
4650	/*
4651	 * Set up the transmit control register; we start out with
4652	 * a collision distance suitable for FDX, but update it whe
4653	 * we resolve the media type.
4654	 */
4655	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4656	    | TCTL_CT(TX_COLLISION_THRESHOLD)
4657	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4658	if (sc->sc_type >= WM_T_82571)
4659		sc->sc_tctl |= TCTL_MULR;
4660	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4661
4662	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4663		/*
4664		 * Write TDT after TCTL.EN is set.
4665		 * See the document.
4666		 */
4667		CSR_WRITE(sc, WMREG_TDT, 0);
4668	}
4669
4670	if (sc->sc_type == WM_T_80003) {
4671		reg = CSR_READ(sc, WMREG_TCTL_EXT);
4672		reg &= ~TCTL_EXT_GCEX_MASK;
4673		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4674		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4675	}
4676
4677	/* Set the media. */
4678	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4679		goto out;
4680
4681	/* Configure for OS presence */
4682	wm_init_manageability(sc);
4683
4684	/*
4685	 * Set up the receive control register; we actually program
4686	 * the register when we set the receive filter.  Use multicast
4687	 * address offset type 0.
4688	 *
4689	 * Only the i82544 has the ability to strip the incoming
4690	 * CRC, so we don't enable that feature.
4691	 */
4692	sc->sc_mchash_type = 0;
4693	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4694	    | RCTL_MO(sc->sc_mchash_type);
4695
4696	/*
4697	 * The I350 has a bug where it always strips the CRC whether
4698	 * asked to or not. So ask for stripped CRC here and cope in rxeof
4699	 */
4700	if (sc->sc_type == WM_T_I350)
4701		sc->sc_rctl |= RCTL_SECRC;
4702
4703	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4704	    && (ifp->if_mtu > ETHERMTU)) {
4705		sc->sc_rctl |= RCTL_LPE;
4706		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4707			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4708	}
4709
4710	if (MCLBYTES == 2048) {
4711		sc->sc_rctl |= RCTL_2k;
4712	} else {
4713		if (sc->sc_type >= WM_T_82543) {
4714			switch (MCLBYTES) {
4715			case 4096:
4716				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4717				break;
4718			case 8192:
4719				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4720				break;
4721			case 16384:
4722				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4723				break;
4724			default:
4725				panic("wm_init: MCLBYTES %d unsupported",
4726				    MCLBYTES);
4727				break;
4728			}
4729		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
4730	}
4731
4732	/* Set the receive filter. */
4733	wm_set_filter(sc);
4734
4735	/* On 575 and later set RDT only if RX enabled */
4736	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4737		for (i = 0; i < WM_NRXDESC; i++)
4738			WM_INIT_RXDESC(sc, i);
4739
4740	/* Start the one second link check clock. */
4741	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4742
4743	/* ...all done! */
4744	ifp->if_flags |= IFF_RUNNING;
4745	ifp->if_flags &= ~IFF_OACTIVE;
4746
4747 out:
4748	sc->sc_if_flags = ifp->if_flags;
4749	if (error)
4750		log(LOG_ERR, "%s: interface not running\n",
4751		    device_xname(sc->sc_dev));
4752	return error;
4753}
4754
4755/*
4756 * wm_rxdrain:
4757 *
4758 *	Drain the receive queue.
4759 */
4760static void
4761wm_rxdrain(struct wm_softc *sc)
4762{
4763	struct wm_rxsoft *rxs;
4764	int i;
4765
4766	for (i = 0; i < WM_NRXDESC; i++) {
4767		rxs = &sc->sc_rxsoft[i];
4768		if (rxs->rxs_mbuf != NULL) {
4769			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4770			m_freem(rxs->rxs_mbuf);
4771			rxs->rxs_mbuf = NULL;
4772		}
4773	}
4774}
4775
4776/*
4777 * wm_stop:		[ifnet interface function]
4778 *
4779 *	Stop transmission on the interface.
4780 */
4781static void
4782wm_stop(struct ifnet *ifp, int disable)
4783{
4784	struct wm_softc *sc = ifp->if_softc;
4785	struct wm_txsoft *txs;
4786	int i;
4787
4788	/* Stop the one second clock. */
4789	callout_stop(&sc->sc_tick_ch);
4790
4791	/* Stop the 82547 Tx FIFO stall check timer. */
4792	if (sc->sc_type == WM_T_82547)
4793		callout_stop(&sc->sc_txfifo_ch);
4794
4795	if (sc->sc_flags & WM_F_HAS_MII) {
4796		/* Down the MII. */
4797		mii_down(&sc->sc_mii);
4798	} else {
4799#if 0
4800		/* Should we clear PHY's status properly? */
4801		wm_reset(sc);
4802#endif
4803	}
4804
4805	/* Stop the transmit and receive processes. */
4806	CSR_WRITE(sc, WMREG_TCTL, 0);
4807	CSR_WRITE(sc, WMREG_RCTL, 0);
4808	sc->sc_rctl &= ~RCTL_EN;
4809
4810	/*
4811	 * Clear the interrupt mask to ensure the device cannot assert its
4812	 * interrupt line.
4813	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4814	 * any currently pending or shared interrupt.
4815	 */
4816	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4817	sc->sc_icr = 0;
4818
4819	/* Release any queued transmit buffers. */
4820	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4821		txs = &sc->sc_txsoft[i];
4822		if (txs->txs_mbuf != NULL) {
4823			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4824			m_freem(txs->txs_mbuf);
4825			txs->txs_mbuf = NULL;
4826		}
4827	}
4828
4829	/* Mark the interface as down and cancel the watchdog timer. */
4830	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4831	ifp->if_timer = 0;
4832
4833	if (disable)
4834		wm_rxdrain(sc);
4835
4836#if 0 /* notyet */
4837	if (sc->sc_type >= WM_T_82544)
4838		CSR_WRITE(sc, WMREG_WUC, 0);
4839#endif
4840}
4841
4842void
4843wm_get_auto_rd_done(struct wm_softc *sc)
4844{
4845	int i;
4846
4847	/* wait for eeprom to reload */
4848	switch (sc->sc_type) {
4849	case WM_T_82571:
4850	case WM_T_82572:
4851	case WM_T_82573:
4852	case WM_T_82574:
4853	case WM_T_82583:
4854	case WM_T_82575:
4855	case WM_T_82576:
4856	case WM_T_82580:
4857	case WM_T_82580ER:
4858	case WM_T_I350:
4859	case WM_T_80003:
4860	case WM_T_ICH8:
4861	case WM_T_ICH9:
4862		for (i = 0; i < 10; i++) {
4863			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4864				break;
4865			delay(1000);
4866		}
4867		if (i == 10) {
4868			log(LOG_ERR, "%s: auto read from eeprom failed to "
4869			    "complete\n", device_xname(sc->sc_dev));
4870		}
4871		break;
4872	default:
4873		break;
4874	}
4875}
4876
4877void
4878wm_lan_init_done(struct wm_softc *sc)
4879{
4880	uint32_t reg = 0;
4881	int i;
4882
4883	/* wait for eeprom to reload */
4884	switch (sc->sc_type) {
4885	case WM_T_ICH10:
4886	case WM_T_PCH:
4887	case WM_T_PCH2:
4888		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4889			reg = CSR_READ(sc, WMREG_STATUS);
4890			if ((reg & STATUS_LAN_INIT_DONE) != 0)
4891				break;
4892			delay(100);
4893		}
4894		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4895			log(LOG_ERR, "%s: %s: lan_init_done failed to "
4896			    "complete\n", device_xname(sc->sc_dev), __func__);
4897		}
4898		break;
4899	default:
4900		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4901		    __func__);
4902		break;
4903	}
4904
4905	reg &= ~STATUS_LAN_INIT_DONE;
4906	CSR_WRITE(sc, WMREG_STATUS, reg);
4907}
4908
4909void
4910wm_get_cfg_done(struct wm_softc *sc)
4911{
4912	int mask;
4913	uint32_t reg;
4914	int i;
4915
4916	/* wait for eeprom to reload */
4917	switch (sc->sc_type) {
4918	case WM_T_82542_2_0:
4919	case WM_T_82542_2_1:
4920		/* null */
4921		break;
4922	case WM_T_82543:
4923	case WM_T_82544:
4924	case WM_T_82540:
4925	case WM_T_82545:
4926	case WM_T_82545_3:
4927	case WM_T_82546:
4928	case WM_T_82546_3:
4929	case WM_T_82541:
4930	case WM_T_82541_2:
4931	case WM_T_82547:
4932	case WM_T_82547_2:
4933	case WM_T_82573:
4934	case WM_T_82574:
4935	case WM_T_82583:
4936		/* generic */
4937		delay(10*1000);
4938		break;
4939	case WM_T_80003:
4940	case WM_T_82571:
4941	case WM_T_82572:
4942	case WM_T_82575:
4943	case WM_T_82576:
4944	case WM_T_82580:
4945	case WM_T_82580ER:
4946	case WM_T_I350:
4947		if (sc->sc_type == WM_T_82571) {
4948			/* Only 82571 shares port 0 */
4949			mask = EEMNGCTL_CFGDONE_0;
4950		} else
4951			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4952		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4953			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4954				break;
4955			delay(1000);
4956		}
4957		if (i >= WM_PHY_CFG_TIMEOUT) {
4958			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4959				device_xname(sc->sc_dev), __func__));
4960		}
4961		break;
4962	case WM_T_ICH8:
4963	case WM_T_ICH9:
4964	case WM_T_ICH10:
4965	case WM_T_PCH:
4966	case WM_T_PCH2:
4967		if (sc->sc_type >= WM_T_PCH) {
4968			reg = CSR_READ(sc, WMREG_STATUS);
4969			if ((reg & STATUS_PHYRA) != 0)
4970				CSR_WRITE(sc, WMREG_STATUS,
4971				    reg & ~STATUS_PHYRA);
4972		}
4973		delay(10*1000);
4974		break;
4975	default:
4976		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4977		    __func__);
4978		break;
4979	}
4980}
4981
4982/*
4983 * wm_acquire_eeprom:
4984 *
4985 *	Perform the EEPROM handshake required on some chips.
4986 */
4987static int
4988wm_acquire_eeprom(struct wm_softc *sc)
4989{
4990	uint32_t reg;
4991	int x;
4992	int ret = 0;
4993
4994	/* always success */
4995	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4996		return 0;
4997
4998	if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
4999		ret = wm_get_swfwhw_semaphore(sc);
5000	} else if (sc->sc_flags & WM_F_SWFW_SYNC) {
5001		/* this will also do wm_get_swsm_semaphore() if needed */
5002		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
5003	} else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5004		ret = wm_get_swsm_semaphore(sc);
5005	}
5006
5007	if (ret) {
5008		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5009			__func__);
5010		return 1;
5011	}
5012
5013	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5014		reg = CSR_READ(sc, WMREG_EECD);
5015
5016		/* Request EEPROM access. */
5017		reg |= EECD_EE_REQ;
5018		CSR_WRITE(sc, WMREG_EECD, reg);
5019
5020		/* ..and wait for it to be granted. */
5021		for (x = 0; x < 1000; x++) {
5022			reg = CSR_READ(sc, WMREG_EECD);
5023			if (reg & EECD_EE_GNT)
5024				break;
5025			delay(5);
5026		}
5027		if ((reg & EECD_EE_GNT) == 0) {
5028			aprint_error_dev(sc->sc_dev,
5029			    "could not acquire EEPROM GNT\n");
5030			reg &= ~EECD_EE_REQ;
5031			CSR_WRITE(sc, WMREG_EECD, reg);
5032			if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5033				wm_put_swfwhw_semaphore(sc);
5034			if (sc->sc_flags & WM_F_SWFW_SYNC)
5035				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5036			else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5037				wm_put_swsm_semaphore(sc);
5038			return 1;
5039		}
5040	}
5041
5042	return 0;
5043}
5044
5045/*
5046 * wm_release_eeprom:
5047 *
5048 *	Release the EEPROM mutex.
5049 */
5050static void
5051wm_release_eeprom(struct wm_softc *sc)
5052{
5053	uint32_t reg;
5054
5055	/* always success */
5056	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5057		return;
5058
5059	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5060		reg = CSR_READ(sc, WMREG_EECD);
5061		reg &= ~EECD_EE_REQ;
5062		CSR_WRITE(sc, WMREG_EECD, reg);
5063	}
5064
5065	if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5066		wm_put_swfwhw_semaphore(sc);
5067	if (sc->sc_flags & WM_F_SWFW_SYNC)
5068		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5069	else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5070		wm_put_swsm_semaphore(sc);
5071}
5072
5073/*
5074 * wm_eeprom_sendbits:
5075 *
5076 *	Send a series of bits to the EEPROM.
5077 */
5078static void
5079wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
5080{
5081	uint32_t reg;
5082	int x;
5083
5084	reg = CSR_READ(sc, WMREG_EECD);
5085
5086	for (x = nbits; x > 0; x--) {
5087		if (bits & (1U << (x - 1)))
5088			reg |= EECD_DI;
5089		else
5090			reg &= ~EECD_DI;
5091		CSR_WRITE(sc, WMREG_EECD, reg);
5092		delay(2);
5093		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5094		delay(2);
5095		CSR_WRITE(sc, WMREG_EECD, reg);
5096		delay(2);
5097	}
5098}
5099
5100/*
5101 * wm_eeprom_recvbits:
5102 *
5103 *	Receive a series of bits from the EEPROM.
5104 */
5105static void
5106wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
5107{
5108	uint32_t reg, val;
5109	int x;
5110
5111	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
5112
5113	val = 0;
5114	for (x = nbits; x > 0; x--) {
5115		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5116		delay(2);
5117		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
5118			val |= (1U << (x - 1));
5119		CSR_WRITE(sc, WMREG_EECD, reg);
5120		delay(2);
5121	}
5122	*valp = val;
5123}
5124
5125/*
5126 * wm_read_eeprom_uwire:
5127 *
5128 *	Read a word from the EEPROM using the MicroWire protocol.
5129 */
5130static int
5131wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5132{
5133	uint32_t reg, val;
5134	int i;
5135
5136	for (i = 0; i < wordcnt; i++) {
5137		/* Clear SK and DI. */
5138		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
5139		CSR_WRITE(sc, WMREG_EECD, reg);
5140
5141		/*
5142		 * XXX: workaround for a bug in qemu-0.12.x and prior
5143		 * and Xen.
5144		 *
5145		 * We use this workaround only for 82540 because qemu's
5146		 * e1000 act as 82540.
5147		 */
5148		if (sc->sc_type == WM_T_82540) {
5149			reg |= EECD_SK;
5150			CSR_WRITE(sc, WMREG_EECD, reg);
5151			reg &= ~EECD_SK;
5152			CSR_WRITE(sc, WMREG_EECD, reg);
5153			delay(2);
5154		}
5155		/* XXX: end of workaround */
5156
5157		/* Set CHIP SELECT. */
5158		reg |= EECD_CS;
5159		CSR_WRITE(sc, WMREG_EECD, reg);
5160		delay(2);
5161
5162		/* Shift in the READ command. */
5163		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
5164
5165		/* Shift in address. */
5166		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
5167
5168		/* Shift out the data. */
5169		wm_eeprom_recvbits(sc, &val, 16);
5170		data[i] = val & 0xffff;
5171
5172		/* Clear CHIP SELECT. */
5173		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
5174		CSR_WRITE(sc, WMREG_EECD, reg);
5175		delay(2);
5176	}
5177
5178	return 0;
5179}
5180
5181/*
5182 * wm_spi_eeprom_ready:
5183 *
5184 *	Wait for a SPI EEPROM to be ready for commands.
5185 */
5186static int
5187wm_spi_eeprom_ready(struct wm_softc *sc)
5188{
5189	uint32_t val;
5190	int usec;
5191
5192	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
5193		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
5194		wm_eeprom_recvbits(sc, &val, 8);
5195		if ((val & SPI_SR_RDY) == 0)
5196			break;
5197	}
5198	if (usec >= SPI_MAX_RETRIES) {
5199		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
5200		return 1;
5201	}
5202	return 0;
5203}
5204
5205/*
5206 * wm_read_eeprom_spi:
5207 *
5208 *	Read a work from the EEPROM using the SPI protocol.
5209 */
5210static int
5211wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5212{
5213	uint32_t reg, val;
5214	int i;
5215	uint8_t opc;
5216
5217	/* Clear SK and CS. */
5218	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
5219	CSR_WRITE(sc, WMREG_EECD, reg);
5220	delay(2);
5221
5222	if (wm_spi_eeprom_ready(sc))
5223		return 1;
5224
5225	/* Toggle CS to flush commands. */
5226	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
5227	delay(2);
5228	CSR_WRITE(sc, WMREG_EECD, reg);
5229	delay(2);
5230
5231	opc = SPI_OPC_READ;
5232	if (sc->sc_ee_addrbits == 8 && word >= 128)
5233		opc |= SPI_OPC_A8;
5234
5235	wm_eeprom_sendbits(sc, opc, 8);
5236	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
5237
5238	for (i = 0; i < wordcnt; i++) {
5239		wm_eeprom_recvbits(sc, &val, 16);
5240		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
5241	}
5242
5243	/* Raise CS and clear SK. */
5244	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
5245	CSR_WRITE(sc, WMREG_EECD, reg);
5246	delay(2);
5247
5248	return 0;
5249}
5250
5251#define EEPROM_CHECKSUM		0xBABA
5252#define EEPROM_SIZE		0x0040
5253
5254/*
5255 * wm_validate_eeprom_checksum
5256 *
5257 * The checksum is defined as the sum of the first 64 (16 bit) words.
5258 */
5259static int
5260wm_validate_eeprom_checksum(struct wm_softc *sc)
5261{
5262	uint16_t checksum;
5263	uint16_t eeprom_data;
5264	int i;
5265
5266	checksum = 0;
5267
5268#ifdef WM_DEBUG
5269	/* Dump EEPROM image for debug */
5270	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5271	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5272	    || (sc->sc_type == WM_T_PCH2)) {
5273		wm_read_eeprom(sc, 0x19, 1, &eeprom_data);
5274		if ((eeprom_data & 0x40) == 0) {
5275			DPRINTF(WM_DEBUG_NVM,("%s: NVM need to be updated\n",
5276				device_xname(sc->sc_dev)));
5277		}
5278	}
5279
5280	if ((wm_debug & WM_DEBUG_NVM) != 0) {
5281		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
5282		for (i = 0; i < EEPROM_SIZE; i++) {
5283			if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5284				printf("XX ");
5285			else
5286				printf("%04x ", eeprom_data);
5287			if (i % 8 == 7)
5288				printf("\n");
5289		}
5290	}
5291
5292#endif /* WM_DEBUG */
5293
5294	for (i = 0; i < EEPROM_SIZE; i++) {
5295		if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5296			return 1;
5297		checksum += eeprom_data;
5298	}
5299
5300	if (checksum != (uint16_t) EEPROM_CHECKSUM)
5301		return 1;
5302
5303	return 0;
5304}
5305
5306/*
5307 * wm_read_eeprom:
5308 *
5309 *	Read data from the serial EEPROM.
5310 */
5311static int
5312wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5313{
5314	int rv;
5315
5316	if (sc->sc_flags & WM_F_EEPROM_INVALID)
5317		return 1;
5318
5319	if (wm_acquire_eeprom(sc))
5320		return 1;
5321
5322	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5323	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5324		 || (sc->sc_type == WM_T_PCH2))
5325		rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
5326	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
5327		rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
5328	else if (sc->sc_flags & WM_F_EEPROM_SPI)
5329		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
5330	else
5331		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
5332
5333	wm_release_eeprom(sc);
5334	return rv;
5335}
5336
5337static int
5338wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
5339    uint16_t *data)
5340{
5341	int i, eerd = 0;
5342	int error = 0;
5343
5344	for (i = 0; i < wordcnt; i++) {
5345		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
5346
5347		CSR_WRITE(sc, WMREG_EERD, eerd);
5348		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
5349		if (error != 0)
5350			break;
5351
5352		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
5353	}
5354
5355	return error;
5356}
5357
5358static int
5359wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
5360{
5361	uint32_t attempts = 100000;
5362	uint32_t i, reg = 0;
5363	int32_t done = -1;
5364
5365	for (i = 0; i < attempts; i++) {
5366		reg = CSR_READ(sc, rw);
5367
5368		if (reg & EERD_DONE) {
5369			done = 0;
5370			break;
5371		}
5372		delay(5);
5373	}
5374
5375	return done;
5376}
5377
5378static int
5379wm_check_alt_mac_addr(struct wm_softc *sc)
5380{
5381	uint16_t myea[ETHER_ADDR_LEN / 2];
5382	uint16_t offset = EEPROM_OFF_MACADDR;
5383
5384	/* Try to read alternative MAC address pointer */
5385	if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
5386		return -1;
5387
5388	/* Check pointer */
5389	if (offset == 0xffff)
5390		return -1;
5391
5392	/*
5393	 * Check whether alternative MAC address is valid or not.
5394	 * Some cards have non 0xffff pointer but those don't use
5395	 * alternative MAC address in reality.
5396	 *
5397	 * Check whether the broadcast bit is set or not.
5398	 */
5399	if (wm_read_eeprom(sc, offset, 1, myea) == 0)
5400		if (((myea[0] & 0xff) & 0x01) == 0)
5401			return 0; /* found! */
5402
5403	/* not found */
5404	return -1;
5405}
5406
5407static int
5408wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
5409{
5410	uint16_t myea[ETHER_ADDR_LEN / 2];
5411	uint16_t offset = EEPROM_OFF_MACADDR;
5412	int do_invert = 0;
5413
5414	switch (sc->sc_type) {
5415	case WM_T_82580:
5416	case WM_T_82580ER:
5417	case WM_T_I350:
5418		switch (sc->sc_funcid) {
5419		case 0:
5420			/* default value (== EEPROM_OFF_MACADDR) */
5421			break;
5422		case 1:
5423			offset = EEPROM_OFF_LAN1;
5424			break;
5425		case 2:
5426			offset = EEPROM_OFF_LAN2;
5427			break;
5428		case 3:
5429			offset = EEPROM_OFF_LAN3;
5430			break;
5431		default:
5432			goto bad;
5433			/* NOTREACHED */
5434			break;
5435		}
5436		break;
5437	case WM_T_82571:
5438	case WM_T_82575:
5439	case WM_T_82576:
5440	case WM_T_80003:
5441		if (wm_check_alt_mac_addr(sc) != 0) {
5442			/* reset the offset to LAN0 */
5443			offset = EEPROM_OFF_MACADDR;
5444			if ((sc->sc_funcid & 0x01) == 1)
5445				do_invert = 1;
5446			goto do_read;
5447		}
5448		switch (sc->sc_funcid) {
5449		case 0:
5450			/*
5451			 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
5452			 * itself.
5453			 */
5454			break;
5455		case 1:
5456			offset += EEPROM_OFF_MACADDR_LAN1;
5457			break;
5458		case 2:
5459			offset += EEPROM_OFF_MACADDR_LAN2;
5460			break;
5461		case 3:
5462			offset += EEPROM_OFF_MACADDR_LAN3;
5463			break;
5464		default:
5465			goto bad;
5466			/* NOTREACHED */
5467			break;
5468		}
5469		break;
5470	default:
5471		if ((sc->sc_funcid & 0x01) == 1)
5472			do_invert = 1;
5473		break;
5474	}
5475
5476 do_read:
5477	if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
5478		myea) != 0) {
5479		goto bad;
5480	}
5481
5482	enaddr[0] = myea[0] & 0xff;
5483	enaddr[1] = myea[0] >> 8;
5484	enaddr[2] = myea[1] & 0xff;
5485	enaddr[3] = myea[1] >> 8;
5486	enaddr[4] = myea[2] & 0xff;
5487	enaddr[5] = myea[2] >> 8;
5488
5489	/*
5490	 * Toggle the LSB of the MAC address on the second port
5491	 * of some dual port cards.
5492	 */
5493	if (do_invert != 0)
5494		enaddr[5] ^= 1;
5495
5496	return 0;
5497
5498 bad:
5499	aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
5500
5501	return -1;
5502}
5503
5504/*
5505 * wm_add_rxbuf:
5506 *
5507 *	Add a receive buffer to the indiciated descriptor.
5508 */
5509static int
5510wm_add_rxbuf(struct wm_softc *sc, int idx)
5511{
5512	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
5513	struct mbuf *m;
5514	int error;
5515
5516	MGETHDR(m, M_DONTWAIT, MT_DATA);
5517	if (m == NULL)
5518		return ENOBUFS;
5519
5520	MCLGET(m, M_DONTWAIT);
5521	if ((m->m_flags & M_EXT) == 0) {
5522		m_freem(m);
5523		return ENOBUFS;
5524	}
5525
5526	if (rxs->rxs_mbuf != NULL)
5527		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5528
5529	rxs->rxs_mbuf = m;
5530
5531	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5532	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
5533	    BUS_DMA_READ|BUS_DMA_NOWAIT);
5534	if (error) {
5535		/* XXX XXX XXX */
5536		aprint_error_dev(sc->sc_dev,
5537		    "unable to load rx DMA map %d, error = %d\n",
5538		    idx, error);
5539		panic("wm_add_rxbuf");
5540	}
5541
5542	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5543	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5544
5545	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5546		if ((sc->sc_rctl & RCTL_EN) != 0)
5547			WM_INIT_RXDESC(sc, idx);
5548	} else
5549		WM_INIT_RXDESC(sc, idx);
5550
5551	return 0;
5552}
5553
5554/*
5555 * wm_set_ral:
5556 *
5557 *	Set an entery in the receive address list.
5558 */
5559static void
5560wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
5561{
5562	uint32_t ral_lo, ral_hi;
5563
5564	if (enaddr != NULL) {
5565		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
5566		    (enaddr[3] << 24);
5567		ral_hi = enaddr[4] | (enaddr[5] << 8);
5568		ral_hi |= RAL_AV;
5569	} else {
5570		ral_lo = 0;
5571		ral_hi = 0;
5572	}
5573
5574	if (sc->sc_type >= WM_T_82544) {
5575		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
5576		    ral_lo);
5577		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
5578		    ral_hi);
5579	} else {
5580		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
5581		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
5582	}
5583}
5584
5585/*
5586 * wm_mchash:
5587 *
5588 *	Compute the hash of the multicast address for the 4096-bit
5589 *	multicast filter.
5590 */
5591static uint32_t
5592wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
5593{
5594	static const int lo_shift[4] = { 4, 3, 2, 0 };
5595	static const int hi_shift[4] = { 4, 5, 6, 8 };
5596	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
5597	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
5598	uint32_t hash;
5599
5600	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5601	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5602	    || (sc->sc_type == WM_T_PCH2)) {
5603		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
5604		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5605		return (hash & 0x3ff);
5606	}
5607	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5608	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5609
5610	return (hash & 0xfff);
5611}
5612
5613/*
5614 * wm_set_filter:
5615 *
5616 *	Set up the receive filter.
5617 */
5618static void
5619wm_set_filter(struct wm_softc *sc)
5620{
5621	struct ethercom *ec = &sc->sc_ethercom;
5622	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5623	struct ether_multi *enm;
5624	struct ether_multistep step;
5625	bus_addr_t mta_reg;
5626	uint32_t hash, reg, bit;
5627	int i, size;
5628
5629	if (sc->sc_type >= WM_T_82544)
5630		mta_reg = WMREG_CORDOVA_MTA;
5631	else
5632		mta_reg = WMREG_MTA;
5633
5634	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
5635
5636	if (ifp->if_flags & IFF_BROADCAST)
5637		sc->sc_rctl |= RCTL_BAM;
5638	if (ifp->if_flags & IFF_PROMISC) {
5639		sc->sc_rctl |= RCTL_UPE;
5640		goto allmulti;
5641	}
5642
5643	/*
5644	 * Set the station address in the first RAL slot, and
5645	 * clear the remaining slots.
5646	 */
5647	if (sc->sc_type == WM_T_ICH8)
5648		size = WM_RAL_TABSIZE_ICH8 -1;
5649	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
5650	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2))
5651		size = WM_RAL_TABSIZE_ICH8;
5652	else if (sc->sc_type == WM_T_82575)
5653		size = WM_RAL_TABSIZE_82575;
5654	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
5655		size = WM_RAL_TABSIZE_82576;
5656	else if (sc->sc_type == WM_T_I350)
5657		size = WM_RAL_TABSIZE_I350;
5658	else
5659		size = WM_RAL_TABSIZE;
5660	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
5661	for (i = 1; i < size; i++)
5662		wm_set_ral(sc, NULL, i);
5663
5664	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5665	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5666	    || (sc->sc_type == WM_T_PCH2))
5667		size = WM_ICH8_MC_TABSIZE;
5668	else
5669		size = WM_MC_TABSIZE;
5670	/* Clear out the multicast table. */
5671	for (i = 0; i < size; i++)
5672		CSR_WRITE(sc, mta_reg + (i << 2), 0);
5673
5674	ETHER_FIRST_MULTI(step, ec, enm);
5675	while (enm != NULL) {
5676		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
5677			/*
5678			 * We must listen to a range of multicast addresses.
5679			 * For now, just accept all multicasts, rather than
5680			 * trying to set only those filter bits needed to match
5681			 * the range.  (At this time, the only use of address
5682			 * ranges is for IP multicast routing, for which the
5683			 * range is big enough to require all bits set.)
5684			 */
5685			goto allmulti;
5686		}
5687
5688		hash = wm_mchash(sc, enm->enm_addrlo);
5689
5690		reg = (hash >> 5);
5691		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5692		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5693		    || (sc->sc_type == WM_T_PCH2))
5694			reg &= 0x1f;
5695		else
5696			reg &= 0x7f;
5697		bit = hash & 0x1f;
5698
5699		hash = CSR_READ(sc, mta_reg + (reg << 2));
5700		hash |= 1U << bit;
5701
5702		/* XXX Hardware bug?? */
5703		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5704			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5705			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5706			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5707		} else
5708			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5709
5710		ETHER_NEXT_MULTI(step, enm);
5711	}
5712
5713	ifp->if_flags &= ~IFF_ALLMULTI;
5714	goto setit;
5715
5716 allmulti:
5717	ifp->if_flags |= IFF_ALLMULTI;
5718	sc->sc_rctl |= RCTL_MPE;
5719
5720 setit:
5721	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5722}
5723
5724/*
5725 * wm_tbi_mediainit:
5726 *
5727 *	Initialize media for use on 1000BASE-X devices.
5728 */
5729static void
5730wm_tbi_mediainit(struct wm_softc *sc)
5731{
5732	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5733	const char *sep = "";
5734
5735	if (sc->sc_type < WM_T_82543)
5736		sc->sc_tipg = TIPG_WM_DFLT;
5737	else
5738		sc->sc_tipg = TIPG_LG_DFLT;
5739
5740	sc->sc_tbi_anegticks = 5;
5741
5742	/* Initialize our media structures */
5743	sc->sc_mii.mii_ifp = ifp;
5744
5745	sc->sc_ethercom.ec_mii = &sc->sc_mii;
5746	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5747	    wm_tbi_mediastatus);
5748
5749	/*
5750	 * SWD Pins:
5751	 *
5752	 *	0 = Link LED (output)
5753	 *	1 = Loss Of Signal (input)
5754	 */
5755	sc->sc_ctrl |= CTRL_SWDPIO(0);
5756	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5757
5758	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5759
5760#define	ADD(ss, mm, dd)							\
5761do {									\
5762	aprint_normal("%s%s", sep, ss);					\
5763	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
5764	sep = ", ";							\
5765} while (/*CONSTCOND*/0)
5766
5767	aprint_normal_dev(sc->sc_dev, "");
5768	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5769	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5770	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5771	aprint_normal("\n");
5772
5773#undef ADD
5774
5775	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5776}
5777
5778/*
5779 * wm_tbi_mediastatus:	[ifmedia interface function]
5780 *
5781 *	Get the current interface media status on a 1000BASE-X device.
5782 */
5783static void
5784wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5785{
5786	struct wm_softc *sc = ifp->if_softc;
5787	uint32_t ctrl, status;
5788
5789	ifmr->ifm_status = IFM_AVALID;
5790	ifmr->ifm_active = IFM_ETHER;
5791
5792	status = CSR_READ(sc, WMREG_STATUS);
5793	if ((status & STATUS_LU) == 0) {
5794		ifmr->ifm_active |= IFM_NONE;
5795		return;
5796	}
5797
5798	ifmr->ifm_status |= IFM_ACTIVE;
5799	ifmr->ifm_active |= IFM_1000_SX;
5800	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5801		ifmr->ifm_active |= IFM_FDX;
5802	ctrl = CSR_READ(sc, WMREG_CTRL);
5803	if (ctrl & CTRL_RFCE)
5804		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5805	if (ctrl & CTRL_TFCE)
5806		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5807}
5808
5809/*
5810 * wm_tbi_mediachange:	[ifmedia interface function]
5811 *
5812 *	Set hardware to newly-selected media on a 1000BASE-X device.
5813 */
5814static int
5815wm_tbi_mediachange(struct ifnet *ifp)
5816{
5817	struct wm_softc *sc = ifp->if_softc;
5818	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5819	uint32_t status;
5820	int i;
5821
5822	sc->sc_txcw = 0;
5823	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5824	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5825		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5826	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5827		sc->sc_txcw |= TXCW_ANE;
5828	} else {
5829		/*
5830		 * If autonegotiation is turned off, force link up and turn on
5831		 * full duplex
5832		 */
5833		sc->sc_txcw &= ~TXCW_ANE;
5834		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5835		sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5836		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5837		delay(1000);
5838	}
5839
5840	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5841		    device_xname(sc->sc_dev),sc->sc_txcw));
5842	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5843	delay(10000);
5844
5845	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5846	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5847
5848	/*
5849	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5850	 * optics detect a signal, 0 if they don't.
5851	 */
5852	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5853		/* Have signal; wait for the link to come up. */
5854
5855		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5856			/*
5857			 * Reset the link, and let autonegotiation do its thing
5858			 */
5859			sc->sc_ctrl |= CTRL_LRST;
5860			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5861			delay(1000);
5862			sc->sc_ctrl &= ~CTRL_LRST;
5863			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5864			delay(1000);
5865		}
5866
5867		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5868			delay(10000);
5869			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5870				break;
5871		}
5872
5873		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5874			    device_xname(sc->sc_dev),i));
5875
5876		status = CSR_READ(sc, WMREG_STATUS);
5877		DPRINTF(WM_DEBUG_LINK,
5878		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
5879			device_xname(sc->sc_dev),status, STATUS_LU));
5880		if (status & STATUS_LU) {
5881			/* Link is up. */
5882			DPRINTF(WM_DEBUG_LINK,
5883			    ("%s: LINK: set media -> link up %s\n",
5884			    device_xname(sc->sc_dev),
5885			    (status & STATUS_FD) ? "FDX" : "HDX"));
5886
5887			/*
5888			 * NOTE: CTRL will update TFCE and RFCE automatically,
5889			 * so we should update sc->sc_ctrl
5890			 */
5891			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5892			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5893			sc->sc_fcrtl &= ~FCRTL_XONE;
5894			if (status & STATUS_FD)
5895				sc->sc_tctl |=
5896				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5897			else
5898				sc->sc_tctl |=
5899				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5900			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
5901				sc->sc_fcrtl |= FCRTL_XONE;
5902			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5903			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5904				      WMREG_OLD_FCRTL : WMREG_FCRTL,
5905				      sc->sc_fcrtl);
5906			sc->sc_tbi_linkup = 1;
5907		} else {
5908			if (i == WM_LINKUP_TIMEOUT)
5909				wm_check_for_link(sc);
5910			/* Link is down. */
5911			DPRINTF(WM_DEBUG_LINK,
5912			    ("%s: LINK: set media -> link down\n",
5913			    device_xname(sc->sc_dev)));
5914			sc->sc_tbi_linkup = 0;
5915		}
5916	} else {
5917		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
5918		    device_xname(sc->sc_dev)));
5919		sc->sc_tbi_linkup = 0;
5920	}
5921
5922	wm_tbi_set_linkled(sc);
5923
5924	return 0;
5925}
5926
5927/*
5928 * wm_tbi_set_linkled:
5929 *
5930 *	Update the link LED on 1000BASE-X devices.
5931 */
5932static void
5933wm_tbi_set_linkled(struct wm_softc *sc)
5934{
5935
5936	if (sc->sc_tbi_linkup)
5937		sc->sc_ctrl |= CTRL_SWDPIN(0);
5938	else
5939		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
5940
5941	/* 82540 or newer devices are active low */
5942	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
5943
5944	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5945}
5946
5947/*
5948 * wm_tbi_check_link:
5949 *
5950 *	Check the link on 1000BASE-X devices.
5951 */
5952static void
5953wm_tbi_check_link(struct wm_softc *sc)
5954{
5955	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5956	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5957	uint32_t rxcw, ctrl, status;
5958
5959	status = CSR_READ(sc, WMREG_STATUS);
5960
5961	rxcw = CSR_READ(sc, WMREG_RXCW);
5962	ctrl = CSR_READ(sc, WMREG_CTRL);
5963
5964	/* set link status */
5965	if ((status & STATUS_LU) == 0) {
5966		DPRINTF(WM_DEBUG_LINK,
5967		    ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
5968		sc->sc_tbi_linkup = 0;
5969	} else if (sc->sc_tbi_linkup == 0) {
5970		DPRINTF(WM_DEBUG_LINK,
5971		    ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
5972		    (status & STATUS_FD) ? "FDX" : "HDX"));
5973		sc->sc_tbi_linkup = 1;
5974	}
5975
5976	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
5977	    && ((status & STATUS_LU) == 0)) {
5978		sc->sc_tbi_linkup = 0;
5979		if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
5980			/* RXCFG storm! */
5981			DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
5982				sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
5983			wm_init(ifp);
5984			ifp->if_start(ifp);
5985		} else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5986			/* If the timer expired, retry autonegotiation */
5987			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
5988				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
5989				sc->sc_tbi_ticks = 0;
5990				/*
5991				 * Reset the link, and let autonegotiation do
5992				 * its thing
5993				 */
5994				sc->sc_ctrl |= CTRL_LRST;
5995				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5996				delay(1000);
5997				sc->sc_ctrl &= ~CTRL_LRST;
5998				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5999				delay(1000);
6000				CSR_WRITE(sc, WMREG_TXCW,
6001				    sc->sc_txcw & ~TXCW_ANE);
6002				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6003			}
6004		}
6005	}
6006
6007	wm_tbi_set_linkled(sc);
6008}
6009
6010/*
6011 * wm_gmii_reset:
6012 *
6013 *	Reset the PHY.
6014 */
6015static void
6016wm_gmii_reset(struct wm_softc *sc)
6017{
6018	uint32_t reg;
6019	int rv;
6020
6021	/* get phy semaphore */
6022	switch (sc->sc_type) {
6023	case WM_T_82571:
6024	case WM_T_82572:
6025	case WM_T_82573:
6026	case WM_T_82574:
6027	case WM_T_82583:
6028		 /* XXX should get sw semaphore, too */
6029		rv = wm_get_swsm_semaphore(sc);
6030		break;
6031	case WM_T_82575:
6032	case WM_T_82576:
6033	case WM_T_82580:
6034	case WM_T_82580ER:
6035	case WM_T_I350:
6036	case WM_T_80003:
6037		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6038		break;
6039	case WM_T_ICH8:
6040	case WM_T_ICH9:
6041	case WM_T_ICH10:
6042	case WM_T_PCH:
6043	case WM_T_PCH2:
6044		rv = wm_get_swfwhw_semaphore(sc);
6045		break;
6046	default:
6047		/* nothing to do*/
6048		rv = 0;
6049		break;
6050	}
6051	if (rv != 0) {
6052		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6053		    __func__);
6054		return;
6055	}
6056
6057	switch (sc->sc_type) {
6058	case WM_T_82542_2_0:
6059	case WM_T_82542_2_1:
6060		/* null */
6061		break;
6062	case WM_T_82543:
6063		/*
6064		 * With 82543, we need to force speed and duplex on the MAC
6065		 * equal to what the PHY speed and duplex configuration is.
6066		 * In addition, we need to perform a hardware reset on the PHY
6067		 * to take it out of reset.
6068		 */
6069		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6070		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6071
6072		/* The PHY reset pin is active-low. */
6073		reg = CSR_READ(sc, WMREG_CTRL_EXT);
6074		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6075		    CTRL_EXT_SWDPIN(4));
6076		reg |= CTRL_EXT_SWDPIO(4);
6077
6078		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6079		delay(10*1000);
6080
6081		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6082		delay(150);
6083#if 0
6084		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6085#endif
6086		delay(20*1000);	/* XXX extra delay to get PHY ID? */
6087		break;
6088	case WM_T_82544:	/* reset 10000us */
6089	case WM_T_82540:
6090	case WM_T_82545:
6091	case WM_T_82545_3:
6092	case WM_T_82546:
6093	case WM_T_82546_3:
6094	case WM_T_82541:
6095	case WM_T_82541_2:
6096	case WM_T_82547:
6097	case WM_T_82547_2:
6098	case WM_T_82571:	/* reset 100us */
6099	case WM_T_82572:
6100	case WM_T_82573:
6101	case WM_T_82574:
6102	case WM_T_82575:
6103	case WM_T_82576:
6104	case WM_T_82580:
6105	case WM_T_82580ER:
6106	case WM_T_I350:
6107	case WM_T_82583:
6108	case WM_T_80003:
6109		/* generic reset */
6110		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6111		delay(20000);
6112		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6113		delay(20000);
6114
6115		if ((sc->sc_type == WM_T_82541)
6116		    || (sc->sc_type == WM_T_82541_2)
6117		    || (sc->sc_type == WM_T_82547)
6118		    || (sc->sc_type == WM_T_82547_2)) {
6119			/* workaround for igp are done in igp_reset() */
6120			/* XXX add code to set LED after phy reset */
6121		}
6122		break;
6123	case WM_T_ICH8:
6124	case WM_T_ICH9:
6125	case WM_T_ICH10:
6126	case WM_T_PCH:
6127	case WM_T_PCH2:
6128		/* generic reset */
6129		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6130		delay(100);
6131		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6132		delay(150);
6133		break;
6134	default:
6135		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6136		    __func__);
6137		break;
6138	}
6139
6140	/* release PHY semaphore */
6141	switch (sc->sc_type) {
6142	case WM_T_82571:
6143	case WM_T_82572:
6144	case WM_T_82573:
6145	case WM_T_82574:
6146	case WM_T_82583:
6147		 /* XXX should put sw semaphore, too */
6148		wm_put_swsm_semaphore(sc);
6149		break;
6150	case WM_T_82575:
6151	case WM_T_82576:
6152	case WM_T_82580:
6153	case WM_T_82580ER:
6154	case WM_T_I350:
6155	case WM_T_80003:
6156		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6157		break;
6158	case WM_T_ICH8:
6159	case WM_T_ICH9:
6160	case WM_T_ICH10:
6161	case WM_T_PCH:
6162	case WM_T_PCH2:
6163		wm_put_swfwhw_semaphore(sc);
6164		break;
6165	default:
6166		/* nothing to do*/
6167		rv = 0;
6168		break;
6169	}
6170
6171	/* get_cfg_done */
6172	wm_get_cfg_done(sc);
6173
6174	/* extra setup */
6175	switch (sc->sc_type) {
6176	case WM_T_82542_2_0:
6177	case WM_T_82542_2_1:
6178	case WM_T_82543:
6179	case WM_T_82544:
6180	case WM_T_82540:
6181	case WM_T_82545:
6182	case WM_T_82545_3:
6183	case WM_T_82546:
6184	case WM_T_82546_3:
6185	case WM_T_82541_2:
6186	case WM_T_82547_2:
6187	case WM_T_82571:
6188	case WM_T_82572:
6189	case WM_T_82573:
6190	case WM_T_82574:
6191	case WM_T_82575:
6192	case WM_T_82576:
6193	case WM_T_82580:
6194	case WM_T_82580ER:
6195	case WM_T_I350:
6196	case WM_T_82583:
6197	case WM_T_80003:
6198		/* null */
6199		break;
6200	case WM_T_82541:
6201	case WM_T_82547:
6202		/* XXX Configure actively LED after PHY reset */
6203		break;
6204	case WM_T_ICH8:
6205	case WM_T_ICH9:
6206	case WM_T_ICH10:
6207	case WM_T_PCH:
6208	case WM_T_PCH2:
6209		/* Allow time for h/w to get to a quiescent state afer reset */
6210		delay(10*1000);
6211
6212		if (sc->sc_type == WM_T_PCH)
6213			wm_hv_phy_workaround_ich8lan(sc);
6214
6215		if (sc->sc_type == WM_T_PCH2)
6216			wm_lv_phy_workaround_ich8lan(sc);
6217
6218		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6219			/*
6220			 * dummy read to clear the phy wakeup bit after lcd
6221			 * reset
6222			 */
6223			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6224		}
6225
6226		/*
6227		 * XXX Configure the LCD with th extended configuration region
6228		 * in NVM
6229		 */
6230
6231		/* Configure the LCD with the OEM bits in NVM */
6232		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6233			/*
6234			 * Disable LPLU.
6235			 * XXX It seems that 82567 has LPLU, too.
6236			 */
6237			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6238			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6239			reg |= HV_OEM_BITS_ANEGNOW;
6240			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6241		}
6242		break;
6243	default:
6244		panic("%s: unknown type\n", __func__);
6245		break;
6246	}
6247}
6248
6249/*
6250 * wm_gmii_mediainit:
6251 *
6252 *	Initialize media for use on 1000BASE-T devices.
6253 */
6254static void
6255wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6256{
6257	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6258
6259	/* We have MII. */
6260	sc->sc_flags |= WM_F_HAS_MII;
6261
6262	if (sc->sc_type == WM_T_80003)
6263		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
6264	else
6265		sc->sc_tipg = TIPG_1000T_DFLT;
6266
6267	/*
6268	 * Let the chip set speed/duplex on its own based on
6269	 * signals from the PHY.
6270	 * XXXbouyer - I'm not sure this is right for the 80003,
6271	 * the em driver only sets CTRL_SLU here - but it seems to work.
6272	 */
6273	sc->sc_ctrl |= CTRL_SLU;
6274	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6275
6276	/* Initialize our media structures and probe the GMII. */
6277	sc->sc_mii.mii_ifp = ifp;
6278
6279	switch (prodid) {
6280	case PCI_PRODUCT_INTEL_PCH_M_LM:
6281	case PCI_PRODUCT_INTEL_PCH_M_LC:
6282		/* 82577 */
6283		sc->sc_phytype = WMPHY_82577;
6284		sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
6285		sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
6286		break;
6287	case PCI_PRODUCT_INTEL_PCH_D_DM:
6288	case PCI_PRODUCT_INTEL_PCH_D_DC:
6289		/* 82578 */
6290		sc->sc_phytype = WMPHY_82578;
6291		sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
6292		sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
6293		break;
6294	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6295	case PCI_PRODUCT_INTEL_PCH2_LV_V:
6296		/* 82578 */
6297		sc->sc_phytype = WMPHY_82579;
6298		sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
6299		sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
6300		break;
6301	case PCI_PRODUCT_INTEL_82801I_BM:
6302	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6303	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6304	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6305	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6306	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6307		/* 82567 */
6308		sc->sc_phytype = WMPHY_BM;
6309		sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
6310		sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
6311		break;
6312	default:
6313		if ((sc->sc_flags & WM_F_SGMII) != 0) {
6314			sc->sc_mii.mii_readreg = wm_sgmii_readreg;
6315			sc->sc_mii.mii_writereg = wm_sgmii_writereg;
6316		} else if (sc->sc_type >= WM_T_80003) {
6317			sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
6318			sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
6319		} else if (sc->sc_type >= WM_T_82580) {
6320			sc->sc_phytype = WMPHY_82580;
6321			sc->sc_mii.mii_readreg = wm_gmii_82580_readreg;
6322			sc->sc_mii.mii_writereg = wm_gmii_82580_writereg;
6323		} else if (sc->sc_type >= WM_T_82544) {
6324			sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
6325			sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
6326		} else {
6327			sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
6328			sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
6329		}
6330		break;
6331	}
6332	sc->sc_mii.mii_statchg = wm_gmii_statchg;
6333
6334	wm_gmii_reset(sc);
6335
6336	sc->sc_ethercom.ec_mii = &sc->sc_mii;
6337	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
6338	    wm_gmii_mediastatus);
6339
6340	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6341	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6342	    || (sc->sc_type == WM_T_I350)) {
6343		if ((sc->sc_flags & WM_F_SGMII) == 0) {
6344			/* Attach only one port */
6345			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6346			    MII_OFFSET_ANY, MIIF_DOPAUSE);
6347		} else {
6348			int i;
6349			uint32_t ctrl_ext;
6350
6351			/* Power on sgmii phy if it is disabled */
6352			ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6353			CSR_WRITE(sc, WMREG_CTRL_EXT,
6354			    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6355			CSR_WRITE_FLUSH(sc);
6356			delay(300*1000); /* XXX too long */
6357
6358			/* from 1 to 8 */
6359			for (i = 1; i < 8; i++)
6360				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6361				    i, MII_OFFSET_ANY, MIIF_DOPAUSE);
6362
6363			/* restore previous sfp cage power state */
6364			CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6365		}
6366	} else {
6367		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6368		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6369	}
6370
6371	if ((sc->sc_type == WM_T_PCH2) &&
6372	    (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL)) {
6373		wm_set_mdio_slow_mode_hv(sc);
6374		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6375		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6376	}
6377
6378	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
6379		/* if failed, retry with *_bm_* */
6380		sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
6381		sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
6382
6383		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6384		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6385	}
6386	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
6387		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6388		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
6389		sc->sc_phytype = WMPHY_NONE;
6390	} else {
6391		/* Check PHY type */
6392		uint32_t model;
6393		struct mii_softc *child;
6394
6395		child = LIST_FIRST(&sc->sc_mii.mii_phys);
6396		if (device_is_a(child->mii_dev, "igphy")) {
6397			struct igphy_softc *isc = (struct igphy_softc *)child;
6398
6399			model = isc->sc_mii.mii_mpd_model;
6400			if (model == MII_MODEL_yyINTEL_I82566)
6401				sc->sc_phytype = WMPHY_IGP_3;
6402		}
6403
6404		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
6405	}
6406}
6407
6408/*
6409 * wm_gmii_mediastatus:	[ifmedia interface function]
6410 *
6411 *	Get the current interface media status on a 1000BASE-T device.
6412 */
6413static void
6414wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6415{
6416	struct wm_softc *sc = ifp->if_softc;
6417
6418	ether_mediastatus(ifp, ifmr);
6419	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6420	    | sc->sc_flowflags;
6421}
6422
6423/*
6424 * wm_gmii_mediachange:	[ifmedia interface function]
6425 *
6426 *	Set hardware to newly-selected media on a 1000BASE-T device.
6427 */
6428static int
6429wm_gmii_mediachange(struct ifnet *ifp)
6430{
6431	struct wm_softc *sc = ifp->if_softc;
6432	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6433	int rc;
6434
6435	if ((ifp->if_flags & IFF_UP) == 0)
6436		return 0;
6437
6438	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6439	sc->sc_ctrl |= CTRL_SLU;
6440	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6441	    || (sc->sc_type > WM_T_82543)) {
6442		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6443	} else {
6444		sc->sc_ctrl &= ~CTRL_ASDE;
6445		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6446		if (ife->ifm_media & IFM_FDX)
6447			sc->sc_ctrl |= CTRL_FD;
6448		switch (IFM_SUBTYPE(ife->ifm_media)) {
6449		case IFM_10_T:
6450			sc->sc_ctrl |= CTRL_SPEED_10;
6451			break;
6452		case IFM_100_TX:
6453			sc->sc_ctrl |= CTRL_SPEED_100;
6454			break;
6455		case IFM_1000_T:
6456			sc->sc_ctrl |= CTRL_SPEED_1000;
6457			break;
6458		default:
6459			panic("wm_gmii_mediachange: bad media 0x%x",
6460			    ife->ifm_media);
6461		}
6462	}
6463	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6464	if (sc->sc_type <= WM_T_82543)
6465		wm_gmii_reset(sc);
6466
6467	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6468		return 0;
6469	return rc;
6470}
6471
6472#define	MDI_IO		CTRL_SWDPIN(2)
6473#define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
6474#define	MDI_CLK		CTRL_SWDPIN(3)
6475
6476static void
6477i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6478{
6479	uint32_t i, v;
6480
6481	v = CSR_READ(sc, WMREG_CTRL);
6482	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6483	v |= MDI_DIR | CTRL_SWDPIO(3);
6484
6485	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6486		if (data & i)
6487			v |= MDI_IO;
6488		else
6489			v &= ~MDI_IO;
6490		CSR_WRITE(sc, WMREG_CTRL, v);
6491		delay(10);
6492		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6493		delay(10);
6494		CSR_WRITE(sc, WMREG_CTRL, v);
6495		delay(10);
6496	}
6497}
6498
6499static uint32_t
6500i82543_mii_recvbits(struct wm_softc *sc)
6501{
6502	uint32_t v, i, data = 0;
6503
6504	v = CSR_READ(sc, WMREG_CTRL);
6505	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6506	v |= CTRL_SWDPIO(3);
6507
6508	CSR_WRITE(sc, WMREG_CTRL, v);
6509	delay(10);
6510	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6511	delay(10);
6512	CSR_WRITE(sc, WMREG_CTRL, v);
6513	delay(10);
6514
6515	for (i = 0; i < 16; i++) {
6516		data <<= 1;
6517		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6518		delay(10);
6519		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6520			data |= 1;
6521		CSR_WRITE(sc, WMREG_CTRL, v);
6522		delay(10);
6523	}
6524
6525	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6526	delay(10);
6527	CSR_WRITE(sc, WMREG_CTRL, v);
6528	delay(10);
6529
6530	return data;
6531}
6532
6533#undef MDI_IO
6534#undef MDI_DIR
6535#undef MDI_CLK
6536
6537/*
6538 * wm_gmii_i82543_readreg:	[mii interface function]
6539 *
6540 *	Read a PHY register on the GMII (i82543 version).
6541 */
6542static int
6543wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6544{
6545	struct wm_softc *sc = device_private(self);
6546	int rv;
6547
6548	i82543_mii_sendbits(sc, 0xffffffffU, 32);
6549	i82543_mii_sendbits(sc, reg | (phy << 5) |
6550	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6551	rv = i82543_mii_recvbits(sc) & 0xffff;
6552
6553	DPRINTF(WM_DEBUG_GMII,
6554	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6555	    device_xname(sc->sc_dev), phy, reg, rv));
6556
6557	return rv;
6558}
6559
6560/*
6561 * wm_gmii_i82543_writereg:	[mii interface function]
6562 *
6563 *	Write a PHY register on the GMII (i82543 version).
6564 */
6565static void
6566wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6567{
6568	struct wm_softc *sc = device_private(self);
6569
6570	i82543_mii_sendbits(sc, 0xffffffffU, 32);
6571	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6572	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6573	    (MII_COMMAND_START << 30), 32);
6574}
6575
6576/*
6577 * wm_gmii_i82544_readreg:	[mii interface function]
6578 *
6579 *	Read a PHY register on the GMII.
6580 */
6581static int
6582wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6583{
6584	struct wm_softc *sc = device_private(self);
6585	uint32_t mdic = 0;
6586	int i, rv;
6587
6588	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6589	    MDIC_REGADD(reg));
6590
6591	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6592		mdic = CSR_READ(sc, WMREG_MDIC);
6593		if (mdic & MDIC_READY)
6594			break;
6595		delay(50);
6596	}
6597
6598	if ((mdic & MDIC_READY) == 0) {
6599		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6600		    device_xname(sc->sc_dev), phy, reg);
6601		rv = 0;
6602	} else if (mdic & MDIC_E) {
6603#if 0 /* This is normal if no PHY is present. */
6604		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6605		    device_xname(sc->sc_dev), phy, reg);
6606#endif
6607		rv = 0;
6608	} else {
6609		rv = MDIC_DATA(mdic);
6610		if (rv == 0xffff)
6611			rv = 0;
6612	}
6613
6614	return rv;
6615}
6616
6617/*
6618 * wm_gmii_i82544_writereg:	[mii interface function]
6619 *
6620 *	Write a PHY register on the GMII.
6621 */
6622static void
6623wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6624{
6625	struct wm_softc *sc = device_private(self);
6626	uint32_t mdic = 0;
6627	int i;
6628
6629	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6630	    MDIC_REGADD(reg) | MDIC_DATA(val));
6631
6632	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6633		mdic = CSR_READ(sc, WMREG_MDIC);
6634		if (mdic & MDIC_READY)
6635			break;
6636		delay(50);
6637	}
6638
6639	if ((mdic & MDIC_READY) == 0)
6640		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6641		    device_xname(sc->sc_dev), phy, reg);
6642	else if (mdic & MDIC_E)
6643		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6644		    device_xname(sc->sc_dev), phy, reg);
6645}
6646
6647/*
6648 * wm_gmii_i80003_readreg:	[mii interface function]
6649 *
6650 *	Read a PHY register on the kumeran
6651 * This could be handled by the PHY layer if we didn't have to lock the
6652 * ressource ...
6653 */
6654static int
6655wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6656{
6657	struct wm_softc *sc = device_private(self);
6658	int sem;
6659	int rv;
6660
6661	if (phy != 1) /* only one PHY on kumeran bus */
6662		return 0;
6663
6664	sem = swfwphysem[sc->sc_funcid];
6665	if (wm_get_swfw_semaphore(sc, sem)) {
6666		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6667		    __func__);
6668		return 0;
6669	}
6670
6671	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6672		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6673		    reg >> GG82563_PAGE_SHIFT);
6674	} else {
6675		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6676		    reg >> GG82563_PAGE_SHIFT);
6677	}
6678	/* Wait more 200us for a bug of the ready bit in the MDIC register */
6679	delay(200);
6680	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6681	delay(200);
6682
6683	wm_put_swfw_semaphore(sc, sem);
6684	return rv;
6685}
6686
6687/*
6688 * wm_gmii_i80003_writereg:	[mii interface function]
6689 *
6690 *	Write a PHY register on the kumeran.
6691 * This could be handled by the PHY layer if we didn't have to lock the
6692 * ressource ...
6693 */
6694static void
6695wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6696{
6697	struct wm_softc *sc = device_private(self);
6698	int sem;
6699
6700	if (phy != 1) /* only one PHY on kumeran bus */
6701		return;
6702
6703	sem = swfwphysem[sc->sc_funcid];
6704	if (wm_get_swfw_semaphore(sc, sem)) {
6705		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6706		    __func__);
6707		return;
6708	}
6709
6710	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6711		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6712		    reg >> GG82563_PAGE_SHIFT);
6713	} else {
6714		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6715		    reg >> GG82563_PAGE_SHIFT);
6716	}
6717	/* Wait more 200us for a bug of the ready bit in the MDIC register */
6718	delay(200);
6719	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6720	delay(200);
6721
6722	wm_put_swfw_semaphore(sc, sem);
6723}
6724
6725/*
6726 * wm_gmii_bm_readreg:	[mii interface function]
6727 *
6728 *	Read a PHY register on the kumeran
6729 * This could be handled by the PHY layer if we didn't have to lock the
6730 * ressource ...
6731 */
6732static int
6733wm_gmii_bm_readreg(device_t self, int phy, int reg)
6734{
6735	struct wm_softc *sc = device_private(self);
6736	int sem;
6737	int rv;
6738
6739	sem = swfwphysem[sc->sc_funcid];
6740	if (wm_get_swfw_semaphore(sc, sem)) {
6741		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6742		    __func__);
6743		return 0;
6744	}
6745
6746	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6747		if (phy == 1)
6748			wm_gmii_i82544_writereg(self, phy, 0x1f,
6749			    reg);
6750		else
6751			wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6752			    reg >> GG82563_PAGE_SHIFT);
6753
6754	}
6755
6756	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6757	wm_put_swfw_semaphore(sc, sem);
6758	return rv;
6759}
6760
6761/*
6762 * wm_gmii_bm_writereg:	[mii interface function]
6763 *
6764 *	Write a PHY register on the kumeran.
6765 * This could be handled by the PHY layer if we didn't have to lock the
6766 * ressource ...
6767 */
6768static void
6769wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6770{
6771	struct wm_softc *sc = device_private(self);
6772	int sem;
6773
6774	sem = swfwphysem[sc->sc_funcid];
6775	if (wm_get_swfw_semaphore(sc, sem)) {
6776		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6777		    __func__);
6778		return;
6779	}
6780
6781	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6782		if (phy == 1)
6783			wm_gmii_i82544_writereg(self, phy, 0x1f,
6784			    reg);
6785		else
6786			wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6787			    reg >> GG82563_PAGE_SHIFT);
6788
6789	}
6790
6791	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6792	wm_put_swfw_semaphore(sc, sem);
6793}
6794
6795static void
6796wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6797{
6798	struct wm_softc *sc = device_private(self);
6799	uint16_t regnum = BM_PHY_REG_NUM(offset);
6800	uint16_t wuce;
6801
6802	/* XXX Gig must be disabled for MDIO accesses to page 800 */
6803	if (sc->sc_type == WM_T_PCH) {
6804		/* XXX e1000 driver do nothing... why? */
6805	}
6806
6807	/* Set page 769 */
6808	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6809	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6810
6811	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6812
6813	wuce &= ~BM_WUC_HOST_WU_BIT;
6814	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6815	    wuce | BM_WUC_ENABLE_BIT);
6816
6817	/* Select page 800 */
6818	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6819	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6820
6821	/* Write page 800 */
6822	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6823
6824	if (rd)
6825		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6826	else
6827		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6828
6829	/* Set page 769 */
6830	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6831	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6832
6833	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6834}
6835
6836/*
6837 * wm_gmii_hv_readreg:	[mii interface function]
6838 *
6839 *	Read a PHY register on the kumeran
6840 * This could be handled by the PHY layer if we didn't have to lock the
6841 * ressource ...
6842 */
6843static int
6844wm_gmii_hv_readreg(device_t self, int phy, int reg)
6845{
6846	struct wm_softc *sc = device_private(self);
6847	uint16_t page = BM_PHY_REG_PAGE(reg);
6848	uint16_t regnum = BM_PHY_REG_NUM(reg);
6849	uint16_t val;
6850	int rv;
6851
6852	if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6853		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6854		    __func__);
6855		return 0;
6856	}
6857
6858	/* XXX Workaround failure in MDIO access while cable is disconnected */
6859	if (sc->sc_phytype == WMPHY_82577) {
6860		/* XXX must write */
6861	}
6862
6863	/* Page 800 works differently than the rest so it has its own func */
6864	if (page == BM_WUC_PAGE) {
6865		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6866		return val;
6867	}
6868
6869	/*
6870	 * Lower than page 768 works differently than the rest so it has its
6871	 * own func
6872	 */
6873	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6874		printf("gmii_hv_readreg!!!\n");
6875		return 0;
6876	}
6877
6878	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6879		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6880		    page << BME1000_PAGE_SHIFT);
6881	}
6882
6883	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6884	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6885	return rv;
6886}
6887
6888/*
6889 * wm_gmii_hv_writereg:	[mii interface function]
6890 *
6891 *	Write a PHY register on the kumeran.
6892 * This could be handled by the PHY layer if we didn't have to lock the
6893 * ressource ...
6894 */
6895static void
6896wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6897{
6898	struct wm_softc *sc = device_private(self);
6899	uint16_t page = BM_PHY_REG_PAGE(reg);
6900	uint16_t regnum = BM_PHY_REG_NUM(reg);
6901
6902	if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6903		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6904		    __func__);
6905		return;
6906	}
6907
6908	/* XXX Workaround failure in MDIO access while cable is disconnected */
6909
6910	/* Page 800 works differently than the rest so it has its own func */
6911	if (page == BM_WUC_PAGE) {
6912		uint16_t tmp;
6913
6914		tmp = val;
6915		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6916		return;
6917	}
6918
6919	/*
6920	 * Lower than page 768 works differently than the rest so it has its
6921	 * own func
6922	 */
6923	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6924		printf("gmii_hv_writereg!!!\n");
6925		return;
6926	}
6927
6928	/*
6929	 * XXX Workaround MDIO accesses being disabled after entering IEEE
6930	 * Power Down (whenever bit 11 of the PHY control register is set)
6931	 */
6932
6933	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6934		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6935		    page << BME1000_PAGE_SHIFT);
6936	}
6937
6938	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6939	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6940}
6941
6942/*
6943 * wm_gmii_hv_readreg:	[mii interface function]
6944 *
6945 *	Read a PHY register on the kumeran
6946 * This could be handled by the PHY layer if we didn't have to lock the
6947 * ressource ...
6948 */
6949static int
6950wm_sgmii_readreg(device_t self, int phy, int reg)
6951{
6952	struct wm_softc *sc = device_private(self);
6953	uint32_t i2ccmd;
6954	int i, rv;
6955
6956	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6957		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6958		    __func__);
6959		return 0;
6960	}
6961
6962	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6963	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
6964	    | I2CCMD_OPCODE_READ;
6965	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6966
6967	/* Poll the ready bit */
6968	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6969		delay(50);
6970		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6971		if (i2ccmd & I2CCMD_READY)
6972			break;
6973	}
6974	if ((i2ccmd & I2CCMD_READY) == 0)
6975		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
6976	if ((i2ccmd & I2CCMD_ERROR) != 0)
6977		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6978
6979	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
6980
6981	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6982	return rv;
6983}
6984
6985/*
6986 * wm_gmii_hv_writereg:	[mii interface function]
6987 *
6988 *	Write a PHY register on the kumeran.
6989 * This could be handled by the PHY layer if we didn't have to lock the
6990 * ressource ...
6991 */
6992static void
6993wm_sgmii_writereg(device_t self, int phy, int reg, int val)
6994{
6995	struct wm_softc *sc = device_private(self);
6996	uint32_t i2ccmd;
6997	int i;
6998
6999	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7000		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7001		    __func__);
7002		return;
7003	}
7004
7005	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7006	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
7007	    | I2CCMD_OPCODE_WRITE;
7008	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7009
7010	/* Poll the ready bit */
7011	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7012		delay(50);
7013		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7014		if (i2ccmd & I2CCMD_READY)
7015			break;
7016	}
7017	if ((i2ccmd & I2CCMD_READY) == 0)
7018		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7019	if ((i2ccmd & I2CCMD_ERROR) != 0)
7020		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7021
7022	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7023}
7024
7025/*
7026 * wm_gmii_82580_readreg:	[mii interface function]
7027 *
7028 *	Read a PHY register on the 82580 and I350.
7029 * This could be handled by the PHY layer if we didn't have to lock the
7030 * ressource ...
7031 */
7032static int
7033wm_gmii_82580_readreg(device_t self, int phy, int reg)
7034{
7035	struct wm_softc *sc = device_private(self);
7036	int sem;
7037	int rv;
7038
7039	sem = swfwphysem[sc->sc_funcid];
7040	if (wm_get_swfw_semaphore(sc, sem)) {
7041		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7042		    __func__);
7043		return 0;
7044	}
7045
7046	rv = wm_gmii_i82544_readreg(self, phy, reg);
7047
7048	wm_put_swfw_semaphore(sc, sem);
7049	return rv;
7050}
7051
7052/*
7053 * wm_gmii_82580_writereg:	[mii interface function]
7054 *
7055 *	Write a PHY register on the 82580 and I350.
7056 * This could be handled by the PHY layer if we didn't have to lock the
7057 * ressource ...
7058 */
7059static void
7060wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7061{
7062	struct wm_softc *sc = device_private(self);
7063	int sem;
7064
7065	sem = swfwphysem[sc->sc_funcid];
7066	if (wm_get_swfw_semaphore(sc, sem)) {
7067		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7068		    __func__);
7069		return;
7070	}
7071
7072	wm_gmii_i82544_writereg(self, phy, reg, val);
7073
7074	wm_put_swfw_semaphore(sc, sem);
7075}
7076
7077/*
7078 * wm_gmii_statchg:	[mii interface function]
7079 *
7080 *	Callback from MII layer when media changes.
7081 */
7082static void
7083wm_gmii_statchg(device_t self)
7084{
7085	struct wm_softc *sc = device_private(self);
7086	struct mii_data *mii = &sc->sc_mii;
7087
7088	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7089	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7090	sc->sc_fcrtl &= ~FCRTL_XONE;
7091
7092	/*
7093	 * Get flow control negotiation result.
7094	 */
7095	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7096	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7097		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7098		mii->mii_media_active &= ~IFM_ETH_FMASK;
7099	}
7100
7101	if (sc->sc_flowflags & IFM_FLOW) {
7102		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7103			sc->sc_ctrl |= CTRL_TFCE;
7104			sc->sc_fcrtl |= FCRTL_XONE;
7105		}
7106		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7107			sc->sc_ctrl |= CTRL_RFCE;
7108	}
7109
7110	if (sc->sc_mii.mii_media_active & IFM_FDX) {
7111		DPRINTF(WM_DEBUG_LINK,
7112		    ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
7113		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7114	} else {
7115		DPRINTF(WM_DEBUG_LINK,
7116		    ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
7117		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7118	}
7119
7120	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7121	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7122	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7123						 : WMREG_FCRTL, sc->sc_fcrtl);
7124	if (sc->sc_type == WM_T_80003) {
7125		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7126		case IFM_1000_T:
7127			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7128			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7129			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
7130			break;
7131		default:
7132			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7133			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7134			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
7135			break;
7136		}
7137		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7138	}
7139}
7140
7141/*
7142 * wm_kmrn_readreg:
7143 *
7144 *	Read a kumeran register
7145 */
7146static int
7147wm_kmrn_readreg(struct wm_softc *sc, int reg)
7148{
7149	int rv;
7150
7151	if (sc->sc_flags == WM_F_SWFW_SYNC) {
7152		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7153			aprint_error_dev(sc->sc_dev,
7154			    "%s: failed to get semaphore\n", __func__);
7155			return 0;
7156		}
7157	} else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7158		if (wm_get_swfwhw_semaphore(sc)) {
7159			aprint_error_dev(sc->sc_dev,
7160			    "%s: failed to get semaphore\n", __func__);
7161			return 0;
7162		}
7163	}
7164
7165	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7166	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7167	    KUMCTRLSTA_REN);
7168	delay(2);
7169
7170	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7171
7172	if (sc->sc_flags == WM_F_SWFW_SYNC)
7173		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7174	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7175		wm_put_swfwhw_semaphore(sc);
7176
7177	return rv;
7178}
7179
7180/*
7181 * wm_kmrn_writereg:
7182 *
7183 *	Write a kumeran register
7184 */
7185static void
7186wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7187{
7188
7189	if (sc->sc_flags == WM_F_SWFW_SYNC) {
7190		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7191			aprint_error_dev(sc->sc_dev,
7192			    "%s: failed to get semaphore\n", __func__);
7193			return;
7194		}
7195	} else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7196		if (wm_get_swfwhw_semaphore(sc)) {
7197			aprint_error_dev(sc->sc_dev,
7198			    "%s: failed to get semaphore\n", __func__);
7199			return;
7200		}
7201	}
7202
7203	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7204	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7205	    (val & KUMCTRLSTA_MASK));
7206
7207	if (sc->sc_flags == WM_F_SWFW_SYNC)
7208		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7209	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7210		wm_put_swfwhw_semaphore(sc);
7211}
7212
7213static int
7214wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
7215{
7216	uint32_t eecd = 0;
7217
7218	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
7219	    || sc->sc_type == WM_T_82583) {
7220		eecd = CSR_READ(sc, WMREG_EECD);
7221
7222		/* Isolate bits 15 & 16 */
7223		eecd = ((eecd >> 15) & 0x03);
7224
7225		/* If both bits are set, device is Flash type */
7226		if (eecd == 0x03)
7227			return 0;
7228	}
7229	return 1;
7230}
7231
7232static int
7233wm_get_swsm_semaphore(struct wm_softc *sc)
7234{
7235	int32_t timeout;
7236	uint32_t swsm;
7237
7238	/* Get the FW semaphore. */
7239	timeout = 1000 + 1; /* XXX */
7240	while (timeout) {
7241		swsm = CSR_READ(sc, WMREG_SWSM);
7242		swsm |= SWSM_SWESMBI;
7243		CSR_WRITE(sc, WMREG_SWSM, swsm);
7244		/* if we managed to set the bit we got the semaphore. */
7245		swsm = CSR_READ(sc, WMREG_SWSM);
7246		if (swsm & SWSM_SWESMBI)
7247			break;
7248
7249		delay(50);
7250		timeout--;
7251	}
7252
7253	if (timeout == 0) {
7254		aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
7255		/* Release semaphores */
7256		wm_put_swsm_semaphore(sc);
7257		return 1;
7258	}
7259	return 0;
7260}
7261
7262static void
7263wm_put_swsm_semaphore(struct wm_softc *sc)
7264{
7265	uint32_t swsm;
7266
7267	swsm = CSR_READ(sc, WMREG_SWSM);
7268	swsm &= ~(SWSM_SWESMBI);
7269	CSR_WRITE(sc, WMREG_SWSM, swsm);
7270}
7271
7272static int
7273wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7274{
7275	uint32_t swfw_sync;
7276	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
7277	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
7278	int timeout = 200;
7279
7280	for (timeout = 0; timeout < 200; timeout++) {
7281		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7282			if (wm_get_swsm_semaphore(sc)) {
7283				aprint_error_dev(sc->sc_dev,
7284				    "%s: failed to get semaphore\n",
7285				    __func__);
7286				return 1;
7287			}
7288		}
7289		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7290		if ((swfw_sync & (swmask | fwmask)) == 0) {
7291			swfw_sync |= swmask;
7292			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7293			if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7294				wm_put_swsm_semaphore(sc);
7295			return 0;
7296		}
7297		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7298			wm_put_swsm_semaphore(sc);
7299		delay(5000);
7300	}
7301	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
7302	    device_xname(sc->sc_dev), mask, swfw_sync);
7303	return 1;
7304}
7305
7306static void
7307wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7308{
7309	uint32_t swfw_sync;
7310
7311	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7312		while (wm_get_swsm_semaphore(sc) != 0)
7313			continue;
7314	}
7315	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7316	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
7317	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7318	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7319		wm_put_swsm_semaphore(sc);
7320}
7321
7322static int
7323wm_get_swfwhw_semaphore(struct wm_softc *sc)
7324{
7325	uint32_t ext_ctrl;
7326	int timeout = 200;
7327
7328	for (timeout = 0; timeout < 200; timeout++) {
7329		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7330		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
7331		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7332
7333		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7334		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
7335			return 0;
7336		delay(5000);
7337	}
7338	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
7339	    device_xname(sc->sc_dev), ext_ctrl);
7340	return 1;
7341}
7342
7343static void
7344wm_put_swfwhw_semaphore(struct wm_softc *sc)
7345{
7346	uint32_t ext_ctrl;
7347	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7348	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
7349	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7350}
7351
7352static int
7353wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7354{
7355	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7356	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7357
7358	if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
7359		/* Value of bit 22 corresponds to the flash bank we're on. */
7360		*bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
7361	} else {
7362		uint8_t bank_high_byte;
7363		wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
7364		if ((bank_high_byte & 0xc0) == 0x80)
7365			*bank = 0;
7366		else {
7367			wm_read_ich8_byte(sc, act_offset + bank1_offset,
7368			    &bank_high_byte);
7369			if ((bank_high_byte & 0xc0) == 0x80)
7370				*bank = 1;
7371			else {
7372				aprint_error_dev(sc->sc_dev,
7373				    "EEPROM not present\n");
7374				return -1;
7375			}
7376		}
7377	}
7378
7379	return 0;
7380}
7381
7382/******************************************************************************
7383 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
7384 * register.
7385 *
7386 * sc - Struct containing variables accessed by shared code
7387 * offset - offset of word in the EEPROM to read
7388 * data - word read from the EEPROM
7389 * words - number of words to read
7390 *****************************************************************************/
7391static int
7392wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
7393{
7394	int32_t  error = 0;
7395	uint32_t flash_bank = 0;
7396	uint32_t act_offset = 0;
7397	uint32_t bank_offset = 0;
7398	uint16_t word = 0;
7399	uint16_t i = 0;
7400
7401	/* We need to know which is the valid flash bank.  In the event
7402	 * that we didn't allocate eeprom_shadow_ram, we may not be
7403	 * managing flash_bank.  So it cannot be trusted and needs
7404	 * to be updated with each read.
7405	 */
7406	error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
7407	if (error) {
7408		aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
7409		    __func__);
7410		return error;
7411	}
7412
7413	/* Adjust offset appropriately if we're on bank 1 - adjust for word size */
7414	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
7415
7416	error = wm_get_swfwhw_semaphore(sc);
7417	if (error) {
7418		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7419		    __func__);
7420		return error;
7421	}
7422
7423	for (i = 0; i < words; i++) {
7424		/* The NVM part needs a byte offset, hence * 2 */
7425		act_offset = bank_offset + ((offset + i) * 2);
7426		error = wm_read_ich8_word(sc, act_offset, &word);
7427		if (error) {
7428			aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
7429			    __func__);
7430			break;
7431		}
7432		data[i] = word;
7433	}
7434
7435	wm_put_swfwhw_semaphore(sc);
7436	return error;
7437}
7438
7439/******************************************************************************
7440 * This function does initial flash setup so that a new read/write/erase cycle
7441 * can be started.
7442 *
7443 * sc - The pointer to the hw structure
7444 ****************************************************************************/
7445static int32_t
7446wm_ich8_cycle_init(struct wm_softc *sc)
7447{
7448	uint16_t hsfsts;
7449	int32_t error = 1;
7450	int32_t i     = 0;
7451
7452	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7453
7454	/* May be check the Flash Des Valid bit in Hw status */
7455	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
7456		return error;
7457	}
7458
7459	/* Clear FCERR in Hw status by writing 1 */
7460	/* Clear DAEL in Hw status by writing a 1 */
7461	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
7462
7463	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7464
7465	/*
7466	 * Either we should have a hardware SPI cycle in progress bit to check
7467	 * against, in order to start a new cycle or FDONE bit should be
7468	 * changed in the hardware so that it is 1 after harware reset, which
7469	 * can then be used as an indication whether a cycle is in progress or
7470	 * has been completed .. we should also have some software semaphore
7471	 * mechanism to guard FDONE or the cycle in progress bit so that two
7472	 * threads access to those bits can be sequentiallized or a way so that
7473	 * 2 threads dont start the cycle at the same time
7474	 */
7475
7476	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7477		/*
7478		 * There is no cycle running at present, so we can start a
7479		 * cycle
7480		 */
7481
7482		/* Begin by setting Flash Cycle Done. */
7483		hsfsts |= HSFSTS_DONE;
7484		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7485		error = 0;
7486	} else {
7487		/*
7488		 * otherwise poll for sometime so the current cycle has a
7489		 * chance to end before giving up.
7490		 */
7491		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
7492			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7493			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7494				error = 0;
7495				break;
7496			}
7497			delay(1);
7498		}
7499		if (error == 0) {
7500			/*
7501			 * Successful in waiting for previous cycle to timeout,
7502			 * now set the Flash Cycle Done.
7503			 */
7504			hsfsts |= HSFSTS_DONE;
7505			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7506		}
7507	}
7508	return error;
7509}
7510
7511/******************************************************************************
7512 * This function starts a flash cycle and waits for its completion
7513 *
7514 * sc - The pointer to the hw structure
7515 ****************************************************************************/
7516static int32_t
7517wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
7518{
7519	uint16_t hsflctl;
7520	uint16_t hsfsts;
7521	int32_t error = 1;
7522	uint32_t i = 0;
7523
7524	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
7525	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7526	hsflctl |= HSFCTL_GO;
7527	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7528
7529	/* wait till FDONE bit is set to 1 */
7530	do {
7531		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7532		if (hsfsts & HSFSTS_DONE)
7533			break;
7534		delay(1);
7535		i++;
7536	} while (i < timeout);
7537	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
7538		error = 0;
7539
7540	return error;
7541}
7542
7543/******************************************************************************
7544 * Reads a byte or word from the NVM using the ICH8 flash access registers.
7545 *
7546 * sc - The pointer to the hw structure
7547 * index - The index of the byte or word to read.
7548 * size - Size of data to read, 1=byte 2=word
7549 * data - Pointer to the word to store the value read.
7550 *****************************************************************************/
7551static int32_t
7552wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
7553    uint32_t size, uint16_t* data)
7554{
7555	uint16_t hsfsts;
7556	uint16_t hsflctl;
7557	uint32_t flash_linear_address;
7558	uint32_t flash_data = 0;
7559	int32_t error = 1;
7560	int32_t count = 0;
7561
7562	if (size < 1  || size > 2 || data == 0x0 ||
7563	    index > ICH_FLASH_LINEAR_ADDR_MASK)
7564		return error;
7565
7566	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
7567	    sc->sc_ich8_flash_base;
7568
7569	do {
7570		delay(1);
7571		/* Steps */
7572		error = wm_ich8_cycle_init(sc);
7573		if (error)
7574			break;
7575
7576		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7577		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
7578		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
7579		    & HSFCTL_BCOUNT_MASK;
7580		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
7581		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7582
7583		/*
7584		 * Write the last 24 bits of index into Flash Linear address
7585		 * field in Flash Address
7586		 */
7587		/* TODO: TBD maybe check the index against the size of flash */
7588
7589		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
7590
7591		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
7592
7593		/*
7594		 * Check if FCERR is set to 1, if set to 1, clear it and try
7595		 * the whole sequence a few more times, else read in (shift in)
7596		 * the Flash Data0, the order is least significant byte first
7597		 * msb to lsb
7598		 */
7599		if (error == 0) {
7600			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
7601			if (size == 1)
7602				*data = (uint8_t)(flash_data & 0x000000FF);
7603			else if (size == 2)
7604				*data = (uint16_t)(flash_data & 0x0000FFFF);
7605			break;
7606		} else {
7607			/*
7608			 * If we've gotten here, then things are probably
7609			 * completely hosed, but if the error condition is
7610			 * detected, it won't hurt to give it another try...
7611			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
7612			 */
7613			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7614			if (hsfsts & HSFSTS_ERR) {
7615				/* Repeat for some time before giving up. */
7616				continue;
7617			} else if ((hsfsts & HSFSTS_DONE) == 0)
7618				break;
7619		}
7620	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
7621
7622	return error;
7623}
7624
7625/******************************************************************************
7626 * Reads a single byte from the NVM using the ICH8 flash access registers.
7627 *
7628 * sc - pointer to wm_hw structure
7629 * index - The index of the byte to read.
7630 * data - Pointer to a byte to store the value read.
7631 *****************************************************************************/
7632static int32_t
7633wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
7634{
7635	int32_t status;
7636	uint16_t word = 0;
7637
7638	status = wm_read_ich8_data(sc, index, 1, &word);
7639	if (status == 0)
7640		*data = (uint8_t)word;
7641	else
7642		*data = 0;
7643
7644	return status;
7645}
7646
7647/******************************************************************************
7648 * Reads a word from the NVM using the ICH8 flash access registers.
7649 *
7650 * sc - pointer to wm_hw structure
7651 * index - The starting byte index of the word to read.
7652 * data - Pointer to a word to store the value read.
7653 *****************************************************************************/
7654static int32_t
7655wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
7656{
7657	int32_t status;
7658
7659	status = wm_read_ich8_data(sc, index, 2, data);
7660	return status;
7661}
7662
7663static int
7664wm_check_mng_mode(struct wm_softc *sc)
7665{
7666	int rv;
7667
7668	switch (sc->sc_type) {
7669	case WM_T_ICH8:
7670	case WM_T_ICH9:
7671	case WM_T_ICH10:
7672	case WM_T_PCH:
7673	case WM_T_PCH2:
7674		rv = wm_check_mng_mode_ich8lan(sc);
7675		break;
7676	case WM_T_82574:
7677	case WM_T_82583:
7678		rv = wm_check_mng_mode_82574(sc);
7679		break;
7680	case WM_T_82571:
7681	case WM_T_82572:
7682	case WM_T_82573:
7683	case WM_T_80003:
7684		rv = wm_check_mng_mode_generic(sc);
7685		break;
7686	default:
7687		/* noting to do */
7688		rv = 0;
7689		break;
7690	}
7691
7692	return rv;
7693}
7694
7695static int
7696wm_check_mng_mode_ich8lan(struct wm_softc *sc)
7697{
7698	uint32_t fwsm;
7699
7700	fwsm = CSR_READ(sc, WMREG_FWSM);
7701
7702	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
7703		return 1;
7704
7705	return 0;
7706}
7707
7708static int
7709wm_check_mng_mode_82574(struct wm_softc *sc)
7710{
7711	uint16_t data;
7712
7713	wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
7714
7715	if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
7716		return 1;
7717
7718	return 0;
7719}
7720
7721static int
7722wm_check_mng_mode_generic(struct wm_softc *sc)
7723{
7724	uint32_t fwsm;
7725
7726	fwsm = CSR_READ(sc, WMREG_FWSM);
7727
7728	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
7729		return 1;
7730
7731	return 0;
7732}
7733
7734static int
7735wm_enable_mng_pass_thru(struct wm_softc *sc)
7736{
7737	uint32_t manc, fwsm, factps;
7738
7739	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
7740		return 0;
7741
7742	manc = CSR_READ(sc, WMREG_MANC);
7743
7744	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
7745		device_xname(sc->sc_dev), manc));
7746	if (((manc & MANC_RECV_TCO_EN) == 0)
7747	    || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
7748		return 0;
7749
7750	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
7751		fwsm = CSR_READ(sc, WMREG_FWSM);
7752		factps = CSR_READ(sc, WMREG_FACTPS);
7753		if (((factps & FACTPS_MNGCG) == 0)
7754		    && ((fwsm & FWSM_MODE_MASK)
7755			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
7756			return 1;
7757	} else if (((manc & MANC_SMBUS_EN) != 0)
7758	    && ((manc & MANC_ASF_EN) == 0))
7759		return 1;
7760
7761	return 0;
7762}
7763
7764static int
7765wm_check_reset_block(struct wm_softc *sc)
7766{
7767	uint32_t reg;
7768
7769	switch (sc->sc_type) {
7770	case WM_T_ICH8:
7771	case WM_T_ICH9:
7772	case WM_T_ICH10:
7773	case WM_T_PCH:
7774	case WM_T_PCH2:
7775		reg = CSR_READ(sc, WMREG_FWSM);
7776		if ((reg & FWSM_RSPCIPHY) != 0)
7777			return 0;
7778		else
7779			return -1;
7780		break;
7781	case WM_T_82571:
7782	case WM_T_82572:
7783	case WM_T_82573:
7784	case WM_T_82574:
7785	case WM_T_82583:
7786	case WM_T_80003:
7787		reg = CSR_READ(sc, WMREG_MANC);
7788		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
7789			return -1;
7790		else
7791			return 0;
7792		break;
7793	default:
7794		/* no problem */
7795		break;
7796	}
7797
7798	return 0;
7799}
7800
7801static void
7802wm_get_hw_control(struct wm_softc *sc)
7803{
7804	uint32_t reg;
7805
7806	switch (sc->sc_type) {
7807	case WM_T_82573:
7808		reg = CSR_READ(sc, WMREG_SWSM);
7809		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
7810		break;
7811	case WM_T_82571:
7812	case WM_T_82572:
7813	case WM_T_82574:
7814	case WM_T_82583:
7815	case WM_T_80003:
7816	case WM_T_ICH8:
7817	case WM_T_ICH9:
7818	case WM_T_ICH10:
7819	case WM_T_PCH:
7820	case WM_T_PCH2:
7821		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7822		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
7823		break;
7824	default:
7825		break;
7826	}
7827}
7828
7829static void
7830wm_release_hw_control(struct wm_softc *sc)
7831{
7832	uint32_t reg;
7833
7834	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
7835		return;
7836
7837	if (sc->sc_type == WM_T_82573) {
7838		reg = CSR_READ(sc, WMREG_SWSM);
7839		reg &= ~SWSM_DRV_LOAD;
7840		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
7841	} else {
7842		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7843		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
7844	}
7845}
7846
7847/* XXX Currently TBI only */
7848static int
7849wm_check_for_link(struct wm_softc *sc)
7850{
7851	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7852	uint32_t rxcw;
7853	uint32_t ctrl;
7854	uint32_t status;
7855	uint32_t sig;
7856
7857	rxcw = CSR_READ(sc, WMREG_RXCW);
7858	ctrl = CSR_READ(sc, WMREG_CTRL);
7859	status = CSR_READ(sc, WMREG_STATUS);
7860
7861	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7862
7863	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7864		device_xname(sc->sc_dev), __func__,
7865		((ctrl & CTRL_SWDPIN(1)) == sig),
7866		((status & STATUS_LU) != 0),
7867		((rxcw & RXCW_C) != 0)
7868		    ));
7869
7870	/*
7871	 * SWDPIN   LU RXCW
7872	 *      0    0    0
7873	 *      0    0    1	(should not happen)
7874	 *      0    1    0	(should not happen)
7875	 *      0    1    1	(should not happen)
7876	 *      1    0    0	Disable autonego and force linkup
7877	 *      1    0    1	got /C/ but not linkup yet
7878	 *      1    1    0	(linkup)
7879	 *      1    1    1	If IFM_AUTO, back to autonego
7880	 *
7881	 */
7882	if (((ctrl & CTRL_SWDPIN(1)) == sig)
7883	    && ((status & STATUS_LU) == 0)
7884	    && ((rxcw & RXCW_C) == 0)) {
7885		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7886			__func__));
7887		sc->sc_tbi_linkup = 0;
7888		/* Disable auto-negotiation in the TXCW register */
7889		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7890
7891		/*
7892		 * Force link-up and also force full-duplex.
7893		 *
7894		 * NOTE: CTRL was updated TFCE and RFCE automatically,
7895		 * so we should update sc->sc_ctrl
7896		 */
7897		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7898		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7899	} else if (((status & STATUS_LU) != 0)
7900	    && ((rxcw & RXCW_C) != 0)
7901	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7902		sc->sc_tbi_linkup = 1;
7903		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7904			__func__));
7905		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7906		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7907	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7908	    && ((rxcw & RXCW_C) != 0)) {
7909		DPRINTF(WM_DEBUG_LINK, ("/C/"));
7910	} else {
7911		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7912			status));
7913	}
7914
7915	return 0;
7916}
7917
7918/* Work-around for 82566 Kumeran PCS lock loss */
7919static void
7920wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
7921{
7922	int miistatus, active, i;
7923	int reg;
7924
7925	miistatus = sc->sc_mii.mii_media_status;
7926
7927	/* If the link is not up, do nothing */
7928	if ((miistatus & IFM_ACTIVE) != 0)
7929		return;
7930
7931	active = sc->sc_mii.mii_media_active;
7932
7933	/* Nothing to do if the link is other than 1Gbps */
7934	if (IFM_SUBTYPE(active) != IFM_1000_T)
7935		return;
7936
7937	for (i = 0; i < 10; i++) {
7938		/* read twice */
7939		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7940		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7941		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
7942			goto out;	/* GOOD! */
7943
7944		/* Reset the PHY */
7945		wm_gmii_reset(sc);
7946		delay(5*1000);
7947	}
7948
7949	/* Disable GigE link negotiation */
7950	reg = CSR_READ(sc, WMREG_PHY_CTRL);
7951	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7952	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7953
7954	/*
7955	 * Call gig speed drop workaround on Gig disable before accessing
7956	 * any PHY registers.
7957	 */
7958	wm_gig_downshift_workaround_ich8lan(sc);
7959
7960out:
7961	return;
7962}
7963
7964/* WOL from S5 stops working */
7965static void
7966wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
7967{
7968	uint16_t kmrn_reg;
7969
7970	/* Only for igp3 */
7971	if (sc->sc_phytype == WMPHY_IGP_3) {
7972		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
7973		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
7974		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7975		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
7976		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7977	}
7978}
7979
7980#ifdef WM_WOL
7981/* Power down workaround on D3 */
7982static void
7983wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
7984{
7985	uint32_t reg;
7986	int i;
7987
7988	for (i = 0; i < 2; i++) {
7989		/* Disable link */
7990		reg = CSR_READ(sc, WMREG_PHY_CTRL);
7991		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7992		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7993
7994		/*
7995		 * Call gig speed drop workaround on Gig disable before
7996		 * accessing any PHY registers
7997		 */
7998		if (sc->sc_type == WM_T_ICH8)
7999			wm_gig_downshift_workaround_ich8lan(sc);
8000
8001		/* Write VR power-down enable */
8002		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8003		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8004		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
8005		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
8006
8007		/* Read it back and test */
8008		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8009		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8010		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
8011			break;
8012
8013		/* Issue PHY reset and repeat at most one more time */
8014		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8015	}
8016}
8017#endif /* WM_WOL */
8018
8019/*
8020 * Workaround for pch's PHYs
8021 * XXX should be moved to new PHY driver?
8022 */
8023static void
8024wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
8025{
8026	if (sc->sc_phytype == WMPHY_82577)
8027		wm_set_mdio_slow_mode_hv(sc);
8028
8029	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
8030
8031	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
8032
8033	/* 82578 */
8034	if (sc->sc_phytype == WMPHY_82578) {
8035		/* PCH rev. < 3 */
8036		if (sc->sc_rev < 3) {
8037			/* XXX 6 bit shift? Why? Is it page2? */
8038			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
8039			    0x66c0);
8040			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
8041			    0xffff);
8042		}
8043
8044		/* XXX phy rev. < 2 */
8045	}
8046
8047	/* Select page 0 */
8048
8049	/* XXX acquire semaphore */
8050	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
8051	/* XXX release semaphore */
8052
8053	/*
8054	 * Configure the K1 Si workaround during phy reset assuming there is
8055	 * link so that it disables K1 if link is in 1Gbps.
8056	 */
8057	wm_k1_gig_workaround_hv(sc, 1);
8058}
8059
8060static void
8061wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
8062{
8063
8064	wm_set_mdio_slow_mode_hv(sc);
8065}
8066
8067static void
8068wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
8069{
8070	int k1_enable = sc->sc_nvm_k1_enabled;
8071
8072	/* XXX acquire semaphore */
8073
8074	if (link) {
8075		k1_enable = 0;
8076
8077		/* Link stall fix for link up */
8078		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
8079	} else {
8080		/* Link stall fix for link down */
8081		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
8082	}
8083
8084	wm_configure_k1_ich8lan(sc, k1_enable);
8085
8086	/* XXX release semaphore */
8087}
8088
8089static void
8090wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
8091{
8092	uint32_t reg;
8093
8094	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
8095	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
8096	    reg | HV_KMRN_MDIO_SLOW);
8097}
8098
8099static void
8100wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
8101{
8102	uint32_t ctrl, ctrl_ext, tmp;
8103	uint16_t kmrn_reg;
8104
8105	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
8106
8107	if (k1_enable)
8108		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
8109	else
8110		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
8111
8112	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
8113
8114	delay(20);
8115
8116	ctrl = CSR_READ(sc, WMREG_CTRL);
8117	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8118
8119	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
8120	tmp |= CTRL_FRCSPD;
8121
8122	CSR_WRITE(sc, WMREG_CTRL, tmp);
8123	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
8124	delay(20);
8125
8126	CSR_WRITE(sc, WMREG_CTRL, ctrl);
8127	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8128	delay(20);
8129}
8130
8131static void
8132wm_smbustopci(struct wm_softc *sc)
8133{
8134	uint32_t fwsm;
8135
8136	fwsm = CSR_READ(sc, WMREG_FWSM);
8137	if (((fwsm & FWSM_FW_VALID) == 0)
8138	    && ((wm_check_reset_block(sc) == 0))) {
8139		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8140		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8141		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8142		delay(10);
8143		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8144		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8145		delay(50*1000);
8146
8147		/*
8148		 * Gate automatic PHY configuration by hardware on non-managed
8149		 * 82579
8150		 */
8151		if (sc->sc_type == WM_T_PCH2)
8152			wm_gate_hw_phy_config_ich8lan(sc, 1);
8153	}
8154}
8155
8156static void
8157wm_set_pcie_completion_timeout(struct wm_softc *sc)
8158{
8159	uint32_t gcr;
8160	pcireg_t ctrl2;
8161
8162	gcr = CSR_READ(sc, WMREG_GCR);
8163
8164	/* Only take action if timeout value is defaulted to 0 */
8165	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
8166		goto out;
8167
8168	if ((gcr & GCR_CAP_VER2) == 0) {
8169		gcr |= GCR_CMPL_TMOUT_10MS;
8170		goto out;
8171	}
8172
8173	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
8174	    sc->sc_pcixe_capoff + PCI_PCIE_DCSR2);
8175	ctrl2 |= WM_PCI_PCIE_DCSR2_16MS;
8176	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
8177	    sc->sc_pcixe_capoff + PCI_PCIE_DCSR2, ctrl2);
8178
8179out:
8180	/* Disable completion timeout resend */
8181	gcr &= ~GCR_CMPL_TMOUT_RESEND;
8182
8183	CSR_WRITE(sc, WMREG_GCR, gcr);
8184}
8185
8186/* special case - for 82575 - need to do manual init ... */
8187static void
8188wm_reset_init_script_82575(struct wm_softc *sc)
8189{
8190	/*
8191	 * remark: this is untested code - we have no board without EEPROM
8192	 *  same setup as mentioned int the freeBSD driver for the i82575
8193	 */
8194
8195	/* SerDes configuration via SERDESCTRL */
8196	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
8197	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
8198	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
8199	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
8200
8201	/* CCM configuration via CCMCTL register */
8202	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
8203	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
8204
8205	/* PCIe lanes configuration */
8206	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
8207	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
8208	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
8209	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
8210
8211	/* PCIe PLL Configuration */
8212	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
8213	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
8214	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
8215}
8216
8217static void
8218wm_init_manageability(struct wm_softc *sc)
8219{
8220
8221	if (sc->sc_flags & WM_F_HAS_MANAGE) {
8222		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8223		uint32_t manc = CSR_READ(sc, WMREG_MANC);
8224
8225		/* disabl hardware interception of ARP */
8226		manc &= ~MANC_ARP_EN;
8227
8228		/* enable receiving management packets to the host */
8229		if (sc->sc_type >= WM_T_82571) {
8230			manc |= MANC_EN_MNG2HOST;
8231			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8232			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8233
8234		}
8235
8236		CSR_WRITE(sc, WMREG_MANC, manc);
8237	}
8238}
8239
8240static void
8241wm_release_manageability(struct wm_softc *sc)
8242{
8243
8244	if (sc->sc_flags & WM_F_HAS_MANAGE) {
8245		uint32_t manc = CSR_READ(sc, WMREG_MANC);
8246
8247		if (sc->sc_type >= WM_T_82571)
8248			manc &= ~MANC_EN_MNG2HOST;
8249
8250		CSR_WRITE(sc, WMREG_MANC, manc);
8251	}
8252}
8253
8254static void
8255wm_get_wakeup(struct wm_softc *sc)
8256{
8257
8258	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
8259	switch (sc->sc_type) {
8260	case WM_T_82573:
8261	case WM_T_82583:
8262		sc->sc_flags |= WM_F_HAS_AMT;
8263		/* FALLTHROUGH */
8264	case WM_T_80003:
8265	case WM_T_82541:
8266	case WM_T_82547:
8267	case WM_T_82571:
8268	case WM_T_82572:
8269	case WM_T_82574:
8270	case WM_T_82575:
8271	case WM_T_82576:
8272#if 0 /* XXX */
8273	case WM_T_82580:
8274	case WM_T_82580ER:
8275	case WM_T_I350:
8276#endif
8277		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
8278			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
8279		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8280		break;
8281	case WM_T_ICH8:
8282	case WM_T_ICH9:
8283	case WM_T_ICH10:
8284	case WM_T_PCH:
8285	case WM_T_PCH2:
8286		sc->sc_flags |= WM_F_HAS_AMT;
8287		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8288		break;
8289	default:
8290		break;
8291	}
8292
8293	/* 1: HAS_MANAGE */
8294	if (wm_enable_mng_pass_thru(sc) != 0)
8295		sc->sc_flags |= WM_F_HAS_MANAGE;
8296
8297#ifdef WM_DEBUG
8298	printf("\n");
8299	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
8300		printf("HAS_AMT,");
8301	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
8302		printf("ARC_SUBSYS_VALID,");
8303	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
8304		printf("ASF_FIRMWARE_PRES,");
8305	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
8306		printf("HAS_MANAGE,");
8307	printf("\n");
8308#endif
8309	/*
8310	 * Note that the WOL flags is set after the resetting of the eeprom
8311	 * stuff
8312	 */
8313}
8314
8315#ifdef WM_WOL
8316/* WOL in the newer chipset interfaces (pchlan) */
8317static void
8318wm_enable_phy_wakeup(struct wm_softc *sc)
8319{
8320#if 0
8321	uint16_t preg;
8322
8323	/* Copy MAC RARs to PHY RARs */
8324
8325	/* Copy MAC MTA to PHY MTA */
8326
8327	/* Configure PHY Rx Control register */
8328
8329	/* Enable PHY wakeup in MAC register */
8330
8331	/* Configure and enable PHY wakeup in PHY registers */
8332
8333	/* Activate PHY wakeup */
8334
8335	/* XXX */
8336#endif
8337}
8338
8339static void
8340wm_enable_wakeup(struct wm_softc *sc)
8341{
8342	uint32_t reg, pmreg;
8343	pcireg_t pmode;
8344
8345	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
8346		&pmreg, NULL) == 0)
8347		return;
8348
8349	/* Advertise the wakeup capability */
8350	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
8351	    | CTRL_SWDPIN(3));
8352	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
8353
8354	/* ICH workaround */
8355	switch (sc->sc_type) {
8356	case WM_T_ICH8:
8357	case WM_T_ICH9:
8358	case WM_T_ICH10:
8359	case WM_T_PCH:
8360	case WM_T_PCH2:
8361		/* Disable gig during WOL */
8362		reg = CSR_READ(sc, WMREG_PHY_CTRL);
8363		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
8364		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8365		if (sc->sc_type == WM_T_PCH)
8366			wm_gmii_reset(sc);
8367
8368		/* Power down workaround */
8369		if (sc->sc_phytype == WMPHY_82577) {
8370			struct mii_softc *child;
8371
8372			/* Assume that the PHY is copper */
8373			child = LIST_FIRST(&sc->sc_mii.mii_phys);
8374			if (child->mii_mpd_rev <= 2)
8375				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
8376				    (768 << 5) | 25, 0x0444); /* magic num */
8377		}
8378		break;
8379	default:
8380		break;
8381	}
8382
8383	/* Keep the laser running on fiber adapters */
8384	if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
8385	    || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
8386		reg = CSR_READ(sc, WMREG_CTRL_EXT);
8387		reg |= CTRL_EXT_SWDPIN(3);
8388		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8389	}
8390
8391	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
8392#if 0	/* for the multicast packet */
8393	reg |= WUFC_MC;
8394	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
8395#endif
8396
8397	if (sc->sc_type == WM_T_PCH) {
8398		wm_enable_phy_wakeup(sc);
8399	} else {
8400		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
8401		CSR_WRITE(sc, WMREG_WUFC, reg);
8402	}
8403
8404	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8405		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8406		|| (sc->sc_type == WM_T_PCH2))
8407		    && (sc->sc_phytype == WMPHY_IGP_3))
8408			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
8409
8410	/* Request PME */
8411	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
8412#if 0
8413	/* Disable WOL */
8414	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
8415#else
8416	/* For WOL */
8417	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
8418#endif
8419	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
8420}
8421#endif /* WM_WOL */
8422
8423static bool
8424wm_suspend(device_t self, const pmf_qual_t *qual)
8425{
8426	struct wm_softc *sc = device_private(self);
8427
8428	wm_release_manageability(sc);
8429	wm_release_hw_control(sc);
8430#ifdef WM_WOL
8431	wm_enable_wakeup(sc);
8432#endif
8433
8434	return true;
8435}
8436
8437static bool
8438wm_resume(device_t self, const pmf_qual_t *qual)
8439{
8440	struct wm_softc *sc = device_private(self);
8441
8442	wm_init_manageability(sc);
8443
8444	return true;
8445}
8446
8447static void
8448wm_set_eee_i350(struct wm_softc * sc)
8449{
8450	uint32_t ipcnfg, eeer;
8451
8452	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
8453	eeer = CSR_READ(sc, WMREG_EEER);
8454
8455	if ((sc->sc_flags & WM_F_EEE) != 0) {
8456		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8457		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
8458		    | EEER_LPI_FC);
8459	} else {
8460		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8461		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
8462		    | EEER_LPI_FC);
8463	}
8464
8465	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
8466	CSR_WRITE(sc, WMREG_EEER, eeer);
8467	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
8468	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
8469}
8470