if_cas.c revision 1.33
1/*	$NetBSD: if_cas.c,v 1.33 2019/05/23 10:57:28 msaitoh Exp $	*/
2/*	$OpenBSD: if_cas.c,v 1.29 2009/11/29 16:19:38 kettenis Exp $	*/
3
4/*
5 *
6 * Copyright (C) 2007 Mark Kettenis.
7 * Copyright (C) 2001 Eduardo Horvath.
8 * All rights reserved.
9 *
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 */
33
34/*
35 * Driver for Sun Cassini ethernet controllers.
36 *
37 * There are basically two variants of this chip: Cassini and
38 * Cassini+.  We can distinguish between the two by revision: 0x10 and
39 * up are Cassini+.  The most important difference is that Cassini+
40 * has a second RX descriptor ring.  Cassini+ will not work without
41 * configuring that second ring.  However, since we don't use it we
42 * don't actually fill the descriptors, and only hand off the first
43 * four to the chip.
44 */
45
46#include <sys/cdefs.h>
47__KERNEL_RCSID(0, "$NetBSD: if_cas.c,v 1.33 2019/05/23 10:57:28 msaitoh Exp $");
48
49#ifndef _MODULE
50#include "opt_inet.h"
51#endif
52
53#include <sys/param.h>
54#include <sys/systm.h>
55#include <sys/callout.h>
56#include <sys/mbuf.h>
57#include <sys/syslog.h>
58#include <sys/malloc.h>
59#include <sys/kernel.h>
60#include <sys/socket.h>
61#include <sys/ioctl.h>
62#include <sys/errno.h>
63#include <sys/device.h>
64#include <sys/module.h>
65
66#include <machine/endian.h>
67
68#include <net/if.h>
69#include <net/if_dl.h>
70#include <net/if_media.h>
71#include <net/if_ether.h>
72
73#ifdef INET
74#include <netinet/in.h>
75#include <netinet/in_systm.h>
76#include <netinet/in_var.h>
77#include <netinet/ip.h>
78#include <netinet/tcp.h>
79#include <netinet/udp.h>
80#endif
81
82#include <net/bpf.h>
83
84#include <sys/bus.h>
85#include <sys/intr.h>
86#include <sys/rndsource.h>
87
88#include <dev/mii/mii.h>
89#include <dev/mii/miivar.h>
90#include <dev/mii/mii_bitbang.h>
91
92#include <dev/pci/pcivar.h>
93#include <dev/pci/pcireg.h>
94#include <dev/pci/pcidevs.h>
95#include <prop/proplib.h>
96
97#include <dev/pci/if_casreg.h>
98#include <dev/pci/if_casvar.h>
99
100#define TRIES	10000
101
102static bool	cas_estintr(struct cas_softc *sc, int);
103bool		cas_shutdown(device_t, int);
104static bool	cas_suspend(device_t, const pmf_qual_t *);
105static bool	cas_resume(device_t, const pmf_qual_t *);
106static int	cas_detach(device_t, int);
107static void	cas_partial_detach(struct cas_softc *, enum cas_attach_stage);
108
109int		cas_match(device_t, cfdata_t, void *);
110void		cas_attach(device_t, device_t, void *);
111
112
113CFATTACH_DECL3_NEW(cas, sizeof(struct cas_softc),
114    cas_match, cas_attach, cas_detach, NULL, NULL, NULL,
115    DVF_DETACH_SHUTDOWN);
116
117int	cas_pci_enaddr(struct cas_softc *, struct pci_attach_args *, uint8_t *);
118
119void		cas_config(struct cas_softc *, const uint8_t *);
120void		cas_start(struct ifnet *);
121void		cas_stop(struct ifnet *, int);
122int		cas_ioctl(struct ifnet *, u_long, void *);
123void		cas_tick(void *);
124void		cas_watchdog(struct ifnet *);
125int		cas_init(struct ifnet *);
126void		cas_init_regs(struct cas_softc *);
127int		cas_ringsize(int);
128int		cas_cringsize(int);
129int		cas_meminit(struct cas_softc *);
130void		cas_mifinit(struct cas_softc *);
131int		cas_bitwait(struct cas_softc *, bus_space_handle_t, int,
132		    uint32_t, uint32_t);
133void		cas_reset(struct cas_softc *);
134int		cas_reset_rx(struct cas_softc *);
135int		cas_reset_tx(struct cas_softc *);
136int		cas_disable_rx(struct cas_softc *);
137int		cas_disable_tx(struct cas_softc *);
138void		cas_rxdrain(struct cas_softc *);
139int		cas_add_rxbuf(struct cas_softc *, int);
140void		cas_iff(struct cas_softc *);
141int		cas_encap(struct cas_softc *, struct mbuf *, uint32_t *);
142
143/* MII methods & callbacks */
144int		cas_mii_readreg(device_t, int, int, uint16_t*);
145int		cas_mii_writereg(device_t, int, int, uint16_t);
146void		cas_mii_statchg(struct ifnet *);
147int		cas_pcs_readreg(device_t, int, int, uint16_t *);
148int		cas_pcs_writereg(device_t, int, int, uint16_t);
149
150int		cas_mediachange(struct ifnet *);
151void		cas_mediastatus(struct ifnet *, struct ifmediareq *);
152
153int		cas_eint(struct cas_softc *, u_int);
154int		cas_rint(struct cas_softc *);
155int		cas_tint(struct cas_softc *, uint32_t);
156int		cas_pint(struct cas_softc *);
157int		cas_intr(void *);
158
159#ifdef CAS_DEBUG
160#define	DPRINTF(sc, x)	if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \
161				printf x
162#else
163#define	DPRINTF(sc, x)	/* nothing */
164#endif
165
166int
167cas_match(device_t parent, cfdata_t cf, void *aux)
168{
169	struct pci_attach_args *pa = aux;
170
171	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SUN &&
172	    (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SUN_CASSINI))
173		return 1;
174
175	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NS &&
176	    (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NS_SATURN))
177		return 1;
178
179	return 0;
180}
181
182#define	PROMHDR_PTR_DATA	0x18
183#define	PROMDATA_PTR_VPD	0x08
184#define	PROMDATA_DATA2		0x0a
185
186static const uint8_t cas_promhdr[] = { 0x55, 0xaa };
187static const uint8_t cas_promdat[] = {
188	'P', 'C', 'I', 'R',
189	PCI_VENDOR_SUN & 0xff, PCI_VENDOR_SUN >> 8,
190	PCI_PRODUCT_SUN_CASSINI & 0xff, PCI_PRODUCT_SUN_CASSINI >> 8
191};
192static const uint8_t cas_promdat_ns[] = {
193	'P', 'C', 'I', 'R',
194	PCI_VENDOR_NS & 0xff, PCI_VENDOR_NS >> 8,
195	PCI_PRODUCT_NS_SATURN & 0xff, PCI_PRODUCT_NS_SATURN >> 8
196};
197
198static const uint8_t cas_promdat2[] = {
199	0x18, 0x00,			/* structure length */
200	0x00,				/* structure revision */
201	0x00,				/* interface revision */
202	PCI_SUBCLASS_NETWORK_ETHERNET,	/* subclass code */
203	PCI_CLASS_NETWORK		/* class code */
204};
205
206#define CAS_LMA_MAXNUM	4
207int
208cas_pci_enaddr(struct cas_softc *sc, struct pci_attach_args *pa,
209    uint8_t *enaddr)
210{
211	struct pci_vpd_largeres *res;
212	struct pci_vpd *vpd;
213	bus_space_handle_t romh;
214	bus_space_tag_t romt;
215	bus_size_t romsize = 0;
216	uint8_t enaddrs[CAS_LMA_MAXNUM][ETHER_ADDR_LEN];
217	uint8_t buf[32], *desc;
218	pcireg_t address;
219	int dataoff, vpdoff, len, lma = 0;
220	int i, rv = -1;
221
222	if (pci_mapreg_map(pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_MEM, 0,
223	    &romt, &romh, NULL, &romsize))
224		return (-1);
225
226	address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
227	address |= PCI_MAPREG_ROM_ENABLE;
228	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START, address);
229
230	bus_space_read_region_1(romt, romh, 0, buf, sizeof(buf));
231	if (bcmp(buf, cas_promhdr, sizeof(cas_promhdr)))
232		goto fail;
233
234	dataoff = buf[PROMHDR_PTR_DATA] | (buf[PROMHDR_PTR_DATA + 1] << 8);
235	if (dataoff < 0x1c)
236		goto fail;
237
238	bus_space_read_region_1(romt, romh, dataoff, buf, sizeof(buf));
239	if ((bcmp(buf, cas_promdat, sizeof(cas_promdat)) &&
240	     bcmp(buf, cas_promdat_ns, sizeof(cas_promdat_ns))) ||
241	    bcmp(buf + PROMDATA_DATA2, cas_promdat2, sizeof(cas_promdat2)))
242		goto fail;
243
244	vpdoff = buf[PROMDATA_PTR_VPD] | (buf[PROMDATA_PTR_VPD + 1] << 8);
245	if (vpdoff < 0x1c)
246		goto fail;
247
248next:
249	bus_space_read_region_1(romt, romh, vpdoff, buf, sizeof(buf));
250	if (!PCI_VPDRES_ISLARGE(buf[0]))
251		goto fail;
252
253	res = (struct pci_vpd_largeres *)buf;
254	vpdoff += sizeof(*res);
255
256	len = ((res->vpdres_len_msb << 8) + res->vpdres_len_lsb);
257	switch (PCI_VPDRES_LARGE_NAME(res->vpdres_byte0)) {
258	case PCI_VPDRES_TYPE_IDENTIFIER_STRING:
259		/* Skip identifier string. */
260		vpdoff += len;
261		goto next;
262
263	case PCI_VPDRES_TYPE_VPD:
264		while (len > 0) {
265			bus_space_read_region_1(romt, romh, vpdoff,
266			     buf, sizeof(buf));
267
268			vpd = (struct pci_vpd *)buf;
269			vpdoff += sizeof(*vpd) + vpd->vpd_len;
270			len -= sizeof(*vpd) + vpd->vpd_len;
271
272			/*
273			 * We're looking for an "Enhanced" VPD...
274			 */
275			if (vpd->vpd_key0 != 'Z')
276				continue;
277
278			desc = buf + sizeof(*vpd);
279
280			/*
281			 * ...which is an instance property...
282			 */
283			if (desc[0] != 'I')
284				continue;
285			desc += 3;
286
287			/*
288			 * ...that's a byte array with the proper
289			 * length for a MAC address...
290			 */
291			if (desc[0] != 'B' || desc[1] != ETHER_ADDR_LEN)
292				continue;
293			desc += 2;
294
295			/*
296			 * ...named "local-mac-address".
297			 */
298			if (strcmp(desc, "local-mac-address") != 0)
299				continue;
300			desc += strlen("local-mac-address") + 1;
301
302			memcpy(enaddrs[lma], desc, ETHER_ADDR_LEN);
303			lma++;
304			rv = 0;
305			if (lma == CAS_LMA_MAXNUM)
306				break;
307		}
308		break;
309
310	default:
311		goto fail;
312	}
313
314	i = 0;
315	/*
316	 * Multi port card has bridge chip. The device number is fixed:
317	 * e.g.
318	 * p0: 005:00:0
319	 * p1: 005:01:0
320	 * p2: 006:02:0
321	 * p3: 006:03:0
322	 */
323	if ((lma > 1) && (pa->pa_device < CAS_LMA_MAXNUM)
324	    && (pa->pa_device < lma))
325		i = pa->pa_device;
326	memcpy(enaddr, enaddrs[i], ETHER_ADDR_LEN);
327 fail:
328	if (romsize != 0)
329		bus_space_unmap(romt, romh, romsize);
330
331	address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM);
332	address &= ~PCI_MAPREG_ROM_ENABLE;
333	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM, address);
334
335	return (rv);
336}
337
338void
339cas_attach(device_t parent, device_t self, void *aux)
340{
341	struct pci_attach_args *pa = aux;
342	struct cas_softc *sc = device_private(self);
343	prop_data_t data;
344	uint8_t enaddr[ETHER_ADDR_LEN];
345
346	sc->sc_dev = self;
347	pci_aprint_devinfo(pa, NULL);
348	sc->sc_rev = PCI_REVISION(pa->pa_class);
349	sc->sc_dmatag = pa->pa_dmat;
350
351#define PCI_CAS_BASEADDR	0x10
352	if (pci_mapreg_map(pa, PCI_CAS_BASEADDR, PCI_MAPREG_TYPE_MEM, 0,
353	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_size) != 0) {
354		aprint_error_dev(sc->sc_dev,
355		    "unable to map device registers\n");
356		return;
357	}
358
359	if ((data = prop_dictionary_get(device_properties(sc->sc_dev),
360	    "mac-address")) != NULL)
361		memcpy(enaddr, prop_data_data_nocopy(data), ETHER_ADDR_LEN);
362	else if (cas_pci_enaddr(sc, pa, enaddr) != 0) {
363		aprint_error_dev(sc->sc_dev, "no Ethernet address found\n");
364		memset(enaddr, 0, sizeof(enaddr));
365	}
366
367	sc->sc_burst = 16;	/* XXX */
368
369	sc->sc_att_stage = CAS_ATT_BACKEND_0;
370
371	if (pci_intr_map(pa, &sc->sc_handle) != 0) {
372		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
373		bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_size);
374		return;
375	}
376	sc->sc_pc = pa->pa_pc;
377	if (!cas_estintr(sc, CAS_INTR_PCI)) {
378		bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_size);
379		aprint_error_dev(sc->sc_dev, "unable to establish interrupt\n");
380		return;
381	}
382
383	sc->sc_att_stage = CAS_ATT_BACKEND_1;
384
385	/*
386	 * call the main configure
387	 */
388	cas_config(sc, enaddr);
389
390	if (pmf_device_register1(sc->sc_dev,
391	    cas_suspend, cas_resume, cas_shutdown))
392		pmf_class_network_register(sc->sc_dev, &sc->sc_ethercom.ec_if);
393	else
394		aprint_error_dev(sc->sc_dev,
395		    "could not establish power handlers\n");
396
397	sc->sc_att_stage = CAS_ATT_FINISHED;
398		/*FALLTHROUGH*/
399}
400
401/*
402 * cas_config:
403 *
404 *	Attach a Cassini interface to the system.
405 */
406void
407cas_config(struct cas_softc *sc, const uint8_t *enaddr)
408{
409	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
410	struct mii_data *mii = &sc->sc_mii;
411	struct mii_softc *child;
412	int i, error;
413
414	/* Make sure the chip is stopped. */
415	ifp->if_softc = sc;
416	cas_reset(sc);
417
418	/*
419	 * Allocate the control data structures, and create and load the
420	 * DMA map for it.
421	 */
422	if ((error = bus_dmamem_alloc(sc->sc_dmatag,
423	    sizeof(struct cas_control_data), CAS_PAGE_SIZE, 0, &sc->sc_cdseg,
424	    1, &sc->sc_cdnseg, 0)) != 0) {
425		aprint_error_dev(sc->sc_dev,
426		    "unable to allocate control data, error = %d\n",
427		    error);
428		cas_partial_detach(sc, CAS_ATT_0);
429	}
430
431	/* XXX should map this in with correct endianness */
432	if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg,
433	    sc->sc_cdnseg, sizeof(struct cas_control_data),
434	    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
435		aprint_error_dev(sc->sc_dev,
436		    "unable to map control data, error = %d\n", error);
437		cas_partial_detach(sc, CAS_ATT_1);
438	}
439
440	if ((error = bus_dmamap_create(sc->sc_dmatag,
441	    sizeof(struct cas_control_data), 1,
442	    sizeof(struct cas_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
443		aprint_error_dev(sc->sc_dev,
444		    "unable to create control data DMA map, error = %d\n",
445		    error);
446		cas_partial_detach(sc, CAS_ATT_2);
447	}
448
449	if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap,
450	    sc->sc_control_data, sizeof(struct cas_control_data), NULL,
451	    0)) != 0) {
452		aprint_error_dev(sc->sc_dev,
453		    "unable to load control data DMA map, error = %d\n",
454		    error);
455		cas_partial_detach(sc, CAS_ATT_3);
456	}
457
458	memset(sc->sc_control_data, 0, sizeof(struct cas_control_data));
459
460	/*
461	 * Create the receive buffer DMA maps.
462	 */
463	for (i = 0; i < CAS_NRXDESC; i++) {
464		bus_dma_segment_t seg;
465		char *kva;
466		int rseg;
467
468		if ((error = bus_dmamem_alloc(sc->sc_dmatag, CAS_PAGE_SIZE,
469		    CAS_PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
470			aprint_error_dev(sc->sc_dev,
471			    "unable to alloc rx DMA mem %d, error = %d\n",
472			    i, error);
473			cas_partial_detach(sc, CAS_ATT_5);
474		}
475		sc->sc_rxsoft[i].rxs_dmaseg = seg;
476
477		if ((error = bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
478		    CAS_PAGE_SIZE, (void **)&kva, BUS_DMA_NOWAIT)) != 0) {
479			aprint_error_dev(sc->sc_dev,
480			    "unable to alloc rx DMA mem %d, error = %d\n",
481			    i, error);
482			cas_partial_detach(sc, CAS_ATT_5);
483		}
484		sc->sc_rxsoft[i].rxs_kva = kva;
485
486		if ((error = bus_dmamap_create(sc->sc_dmatag, CAS_PAGE_SIZE, 1,
487		    CAS_PAGE_SIZE, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
488			aprint_error_dev(sc->sc_dev,
489			    "unable to create rx DMA map %d, error = %d\n",
490			    i, error);
491			cas_partial_detach(sc, CAS_ATT_5);
492		}
493
494		if ((error = bus_dmamap_load(sc->sc_dmatag,
495		   sc->sc_rxsoft[i].rxs_dmamap, kva, CAS_PAGE_SIZE, NULL,
496		   BUS_DMA_NOWAIT)) != 0) {
497			aprint_error_dev(sc->sc_dev,
498			    "unable to load rx DMA map %d, error = %d\n",
499			    i, error);
500			cas_partial_detach(sc, CAS_ATT_5);
501		}
502	}
503
504	/*
505	 * Create the transmit buffer DMA maps.
506	 */
507	for (i = 0; i < CAS_NTXDESC; i++) {
508		if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES,
509		    CAS_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
510		    &sc->sc_txd[i].sd_map)) != 0) {
511			aprint_error_dev(sc->sc_dev,
512			    "unable to create tx DMA map %d, error = %d\n",
513			    i, error);
514			cas_partial_detach(sc, CAS_ATT_6);
515		}
516		sc->sc_txd[i].sd_mbuf = NULL;
517	}
518
519	/*
520	 * From this point forward, the attachment cannot fail.  A failure
521	 * before this point releases all resources that may have been
522	 * allocated.
523	 */
524
525	/* Announce ourselves. */
526	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
527	    ether_sprintf(enaddr));
528	aprint_naive(": Ethernet controller\n");
529
530	/* Get RX FIFO size */
531	sc->sc_rxfifosize = 16 * 1024;
532
533	/* Initialize ifnet structure. */
534	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
535	ifp->if_softc = sc;
536	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
537	ifp->if_start = cas_start;
538	ifp->if_ioctl = cas_ioctl;
539	ifp->if_watchdog = cas_watchdog;
540	ifp->if_stop = cas_stop;
541	ifp->if_init = cas_init;
542	IFQ_SET_MAXLEN(&ifp->if_snd, CAS_NTXDESC - 1);
543	IFQ_SET_READY(&ifp->if_snd);
544
545	/* Initialize ifmedia structures and MII info */
546	mii->mii_ifp = ifp;
547	mii->mii_readreg = cas_mii_readreg;
548	mii->mii_writereg = cas_mii_writereg;
549	mii->mii_statchg = cas_mii_statchg;
550
551	ifmedia_init(&mii->mii_media, 0, cas_mediachange, cas_mediastatus);
552	sc->sc_ethercom.ec_mii = mii;
553
554	bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_MII_DATAPATH_MODE, 0);
555
556	cas_mifinit(sc);
557
558	if (sc->sc_mif_config & CAS_MIF_CONFIG_MDI1) {
559		sc->sc_mif_config |= CAS_MIF_CONFIG_PHY_SEL;
560		bus_space_write_4(sc->sc_memt, sc->sc_memh,
561	            CAS_MIF_CONFIG, sc->sc_mif_config);
562	}
563
564	mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
565	    MII_OFFSET_ANY, 0);
566
567	child = LIST_FIRST(&mii->mii_phys);
568	if (child == NULL &&
569	    sc->sc_mif_config & (CAS_MIF_CONFIG_MDI0 | CAS_MIF_CONFIG_MDI1)) {
570		/*
571		 * Try the external PCS SERDES if we didn't find any
572		 * MII devices.
573		 */
574		bus_space_write_4(sc->sc_memt, sc->sc_memh,
575		    CAS_MII_DATAPATH_MODE, CAS_MII_DATAPATH_SERDES);
576
577		bus_space_write_4(sc->sc_memt, sc->sc_memh,
578		     CAS_MII_CONFIG, CAS_MII_CONFIG_ENABLE);
579
580		mii->mii_readreg = cas_pcs_readreg;
581		mii->mii_writereg = cas_pcs_writereg;
582
583		mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
584		    MII_OFFSET_ANY, MIIF_NOISOLATE);
585	}
586
587	child = LIST_FIRST(&mii->mii_phys);
588	if (child == NULL) {
589		/* No PHY attached */
590		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
591		ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
592	} else {
593		/*
594		 * Walk along the list of attached MII devices and
595		 * establish an `MII instance' to `phy number'
596		 * mapping. We'll use this mapping in media change
597		 * requests to determine which phy to use to program
598		 * the MIF configuration register.
599		 */
600		for (; child != NULL; child = LIST_NEXT(child, mii_list)) {
601			/*
602			 * Note: we support just two PHYs: the built-in
603			 * internal device and an external on the MII
604			 * connector.
605			 */
606			if (child->mii_phy > 1 || child->mii_inst > 1) {
607				aprint_error_dev(sc->sc_dev,
608				    "cannot accommodate MII device %s"
609				    " at phy %d, instance %d\n",
610				    device_xname(child->mii_dev),
611				    child->mii_phy, child->mii_inst);
612				continue;
613			}
614
615			sc->sc_phys[child->mii_inst] = child->mii_phy;
616		}
617
618		/*
619		 * XXX - we can really do the following ONLY if the
620		 * phy indeed has the auto negotiation capability!!
621		 */
622		ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
623	}
624
625	/* claim 802.1q capability */
626	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
627
628	/* Attach the interface. */
629	if_attach(ifp);
630	if_deferred_start_init(ifp, NULL);
631	ether_ifattach(ifp, enaddr);
632
633	rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
634			  RND_TYPE_NET, RND_FLAG_DEFAULT);
635
636	evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
637	    NULL, device_xname(sc->sc_dev), "interrupts");
638
639	callout_init(&sc->sc_tick_ch, 0);
640
641	return;
642}
643
644int
645cas_detach(device_t self, int flags)
646{
647	int i;
648	struct cas_softc *sc = device_private(self);
649	bus_space_tag_t t = sc->sc_memt;
650	bus_space_handle_t h = sc->sc_memh;
651	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
652
653	/*
654	 * Free any resources we've allocated during the failed attach
655	 * attempt.  Do this in reverse order and fall through.
656	 */
657	switch (sc->sc_att_stage) {
658	case CAS_ATT_FINISHED:
659		bus_space_write_4(t, h, CAS_INTMASK, ~(uint32_t)0);
660		pmf_device_deregister(self);
661		cas_stop(&sc->sc_ethercom.ec_if, 1);
662		evcnt_detach(&sc->sc_ev_intr);
663
664		rnd_detach_source(&sc->rnd_source);
665
666		ether_ifdetach(ifp);
667		if_detach(ifp);
668		ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
669
670		callout_destroy(&sc->sc_tick_ch);
671
672		mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
673
674		/*FALLTHROUGH*/
675	case CAS_ATT_MII:
676	case CAS_ATT_7:
677	case CAS_ATT_6:
678		for (i = 0; i < CAS_NTXDESC; i++) {
679			if (sc->sc_txd[i].sd_map != NULL)
680				bus_dmamap_destroy(sc->sc_dmatag,
681				    sc->sc_txd[i].sd_map);
682		}
683		/*FALLTHROUGH*/
684	case CAS_ATT_5:
685		for (i = 0; i < CAS_NRXDESC; i++) {
686			if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
687				bus_dmamap_unload(sc->sc_dmatag,
688				    sc->sc_rxsoft[i].rxs_dmamap);
689			if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
690				bus_dmamap_destroy(sc->sc_dmatag,
691				    sc->sc_rxsoft[i].rxs_dmamap);
692			if (sc->sc_rxsoft[i].rxs_kva != NULL)
693				bus_dmamem_unmap(sc->sc_dmatag,
694				    sc->sc_rxsoft[i].rxs_kva, CAS_PAGE_SIZE);
695			/* XXX   need to check that bus_dmamem_alloc suceeded
696			if (sc->sc_rxsoft[i].rxs_dmaseg != NULL)
697			*/
698				bus_dmamem_free(sc->sc_dmatag,
699				    &(sc->sc_rxsoft[i].rxs_dmaseg), 1);
700		}
701		bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
702		/*FALLTHROUGH*/
703	case CAS_ATT_4:
704	case CAS_ATT_3:
705		bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
706		/*FALLTHROUGH*/
707	case CAS_ATT_2:
708		bus_dmamem_unmap(sc->sc_dmatag, sc->sc_control_data,
709		    sizeof(struct cas_control_data));
710		/*FALLTHROUGH*/
711	case CAS_ATT_1:
712		bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
713		/*FALLTHROUGH*/
714	case CAS_ATT_0:
715		sc->sc_att_stage = CAS_ATT_0;
716		/*FALLTHROUGH*/
717	case CAS_ATT_BACKEND_2:
718	case CAS_ATT_BACKEND_1:
719		if (sc->sc_ih != NULL) {
720			pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
721			sc->sc_ih = NULL;
722		}
723		bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_size);
724		/*FALLTHROUGH*/
725	case CAS_ATT_BACKEND_0:
726		break;
727	}
728	return 0;
729}
730
731static void
732cas_partial_detach(struct cas_softc *sc, enum cas_attach_stage stage)
733{
734	cfattach_t ca = device_cfattach(sc->sc_dev);
735
736	sc->sc_att_stage = stage;
737	(*ca->ca_detach)(sc->sc_dev, 0);
738}
739
740void
741cas_tick(void *arg)
742{
743	struct cas_softc *sc = arg;
744	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
745	bus_space_tag_t t = sc->sc_memt;
746	bus_space_handle_t mac = sc->sc_memh;
747	int s;
748	uint32_t v;
749
750	/* unload collisions counters */
751	v = bus_space_read_4(t, mac, CAS_MAC_EXCESS_COLL_CNT) +
752	    bus_space_read_4(t, mac, CAS_MAC_LATE_COLL_CNT);
753	ifp->if_collisions += v +
754	    bus_space_read_4(t, mac, CAS_MAC_NORM_COLL_CNT) +
755	    bus_space_read_4(t, mac, CAS_MAC_FIRST_COLL_CNT);
756	ifp->if_oerrors += v;
757
758	/* read error counters */
759	ifp->if_ierrors +=
760	    bus_space_read_4(t, mac, CAS_MAC_RX_LEN_ERR_CNT) +
761	    bus_space_read_4(t, mac, CAS_MAC_RX_ALIGN_ERR) +
762	    bus_space_read_4(t, mac, CAS_MAC_RX_CRC_ERR_CNT) +
763	    bus_space_read_4(t, mac, CAS_MAC_RX_CODE_VIOL);
764
765	/* clear the hardware counters */
766	bus_space_write_4(t, mac, CAS_MAC_NORM_COLL_CNT, 0);
767	bus_space_write_4(t, mac, CAS_MAC_FIRST_COLL_CNT, 0);
768	bus_space_write_4(t, mac, CAS_MAC_EXCESS_COLL_CNT, 0);
769	bus_space_write_4(t, mac, CAS_MAC_LATE_COLL_CNT, 0);
770	bus_space_write_4(t, mac, CAS_MAC_RX_LEN_ERR_CNT, 0);
771	bus_space_write_4(t, mac, CAS_MAC_RX_ALIGN_ERR, 0);
772	bus_space_write_4(t, mac, CAS_MAC_RX_CRC_ERR_CNT, 0);
773	bus_space_write_4(t, mac, CAS_MAC_RX_CODE_VIOL, 0);
774
775	s = splnet();
776	mii_tick(&sc->sc_mii);
777	splx(s);
778
779	callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc);
780}
781
782int
783cas_bitwait(struct cas_softc *sc, bus_space_handle_t h, int r,
784    uint32_t clr, uint32_t set)
785{
786	int i;
787	uint32_t reg;
788
789	for (i = TRIES; i--; DELAY(100)) {
790		reg = bus_space_read_4(sc->sc_memt, h, r);
791		if ((reg & clr) == 0 && (reg & set) == set)
792			return (1);
793	}
794
795	return (0);
796}
797
798void
799cas_reset(struct cas_softc *sc)
800{
801	bus_space_tag_t t = sc->sc_memt;
802	bus_space_handle_t h = sc->sc_memh;
803	int s;
804
805	s = splnet();
806	DPRINTF(sc, ("%s: cas_reset\n", device_xname(sc->sc_dev)));
807	cas_reset_rx(sc);
808	cas_reset_tx(sc);
809
810	/* Disable interrupts */
811	bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_INTMASK, ~(uint32_t)0);
812
813	/* Do a full reset */
814	bus_space_write_4(t, h, CAS_RESET,
815	    CAS_RESET_RX | CAS_RESET_TX | CAS_RESET_BLOCK_PCS);
816	if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0))
817		aprint_error_dev(sc->sc_dev, "cannot reset device\n");
818	splx(s);
819}
820
821
822/*
823 * cas_rxdrain:
824 *
825 *	Drain the receive queue.
826 */
827void
828cas_rxdrain(struct cas_softc *sc)
829{
830	/* Nothing to do yet. */
831}
832
833/*
834 * Reset the whole thing.
835 */
836void
837cas_stop(struct ifnet *ifp, int disable)
838{
839	struct cas_softc *sc = (struct cas_softc *)ifp->if_softc;
840	struct cas_sxd *sd;
841	uint32_t i;
842
843	DPRINTF(sc, ("%s: cas_stop\n", device_xname(sc->sc_dev)));
844
845	callout_stop(&sc->sc_tick_ch);
846
847	/*
848	 * Mark the interface down and cancel the watchdog timer.
849	 */
850	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
851	ifp->if_timer = 0;
852
853	mii_down(&sc->sc_mii);
854
855	cas_reset_rx(sc);
856	cas_reset_tx(sc);
857
858	/*
859	 * Release any queued transmit buffers.
860	 */
861	for (i = 0; i < CAS_NTXDESC; i++) {
862		sd = &sc->sc_txd[i];
863		if (sd->sd_mbuf != NULL) {
864			bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
865			    sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
866			bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
867			m_freem(sd->sd_mbuf);
868			sd->sd_mbuf = NULL;
869		}
870	}
871	sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0;
872
873	if (disable)
874		cas_rxdrain(sc);
875}
876
877
878/*
879 * Reset the receiver
880 */
881int
882cas_reset_rx(struct cas_softc *sc)
883{
884	bus_space_tag_t t = sc->sc_memt;
885	bus_space_handle_t h = sc->sc_memh;
886
887	/*
888	 * Resetting while DMA is in progress can cause a bus hang, so we
889	 * disable DMA first.
890	 */
891	cas_disable_rx(sc);
892	bus_space_write_4(t, h, CAS_RX_CONFIG, 0);
893	/* Wait till it finishes */
894	if (!cas_bitwait(sc, h, CAS_RX_CONFIG, 1, 0))
895		aprint_error_dev(sc->sc_dev, "cannot disable rx dma\n");
896	/* Wait 5ms extra. */
897	delay(5000);
898
899	/* Finally, reset the ERX */
900	bus_space_write_4(t, h, CAS_RESET, CAS_RESET_RX);
901	/* Wait till it finishes */
902	if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_RX, 0)) {
903		aprint_error_dev(sc->sc_dev, "cannot reset receiver\n");
904		return (1);
905	}
906	return (0);
907}
908
909
910/*
911 * Reset the transmitter
912 */
913int
914cas_reset_tx(struct cas_softc *sc)
915{
916	bus_space_tag_t t = sc->sc_memt;
917	bus_space_handle_t h = sc->sc_memh;
918
919	/*
920	 * Resetting while DMA is in progress can cause a bus hang, so we
921	 * disable DMA first.
922	 */
923	cas_disable_tx(sc);
924	bus_space_write_4(t, h, CAS_TX_CONFIG, 0);
925	/* Wait till it finishes */
926	if (!cas_bitwait(sc, h, CAS_TX_CONFIG, 1, 0))
927		aprint_error_dev(sc->sc_dev, "cannot disable tx dma\n");
928	/* Wait 5ms extra. */
929	delay(5000);
930
931	/* Finally, reset the ETX */
932	bus_space_write_4(t, h, CAS_RESET, CAS_RESET_TX);
933	/* Wait till it finishes */
934	if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_TX, 0)) {
935		aprint_error_dev(sc->sc_dev, "cannot reset transmitter\n");
936		return (1);
937	}
938	return (0);
939}
940
941/*
942 * Disable receiver.
943 */
944int
945cas_disable_rx(struct cas_softc *sc)
946{
947	bus_space_tag_t t = sc->sc_memt;
948	bus_space_handle_t h = sc->sc_memh;
949	uint32_t cfg;
950
951	/* Flip the enable bit */
952	cfg = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG);
953	cfg &= ~CAS_MAC_RX_ENABLE;
954	bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, cfg);
955
956	/* Wait for it to finish */
957	return (cas_bitwait(sc, h, CAS_MAC_RX_CONFIG, CAS_MAC_RX_ENABLE, 0));
958}
959
960/*
961 * Disable transmitter.
962 */
963int
964cas_disable_tx(struct cas_softc *sc)
965{
966	bus_space_tag_t t = sc->sc_memt;
967	bus_space_handle_t h = sc->sc_memh;
968	uint32_t cfg;
969
970	/* Flip the enable bit */
971	cfg = bus_space_read_4(t, h, CAS_MAC_TX_CONFIG);
972	cfg &= ~CAS_MAC_TX_ENABLE;
973	bus_space_write_4(t, h, CAS_MAC_TX_CONFIG, cfg);
974
975	/* Wait for it to finish */
976	return (cas_bitwait(sc, h, CAS_MAC_TX_CONFIG, CAS_MAC_TX_ENABLE, 0));
977}
978
979/*
980 * Initialize interface.
981 */
982int
983cas_meminit(struct cas_softc *sc)
984{
985	int i;
986
987	/*
988	 * Initialize the transmit descriptor ring.
989	 */
990	for (i = 0; i < CAS_NTXDESC; i++) {
991		sc->sc_txdescs[i].cd_flags = 0;
992		sc->sc_txdescs[i].cd_addr = 0;
993	}
994	CAS_CDTXSYNC(sc, 0, CAS_NTXDESC,
995	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
996
997	/*
998	 * Initialize the receive descriptor and receive job
999	 * descriptor rings.
1000	 */
1001	for (i = 0; i < CAS_NRXDESC; i++)
1002		CAS_INIT_RXDESC(sc, i, i);
1003	sc->sc_rxdptr = 0;
1004	sc->sc_rxptr = 0;
1005
1006	/*
1007	 * Initialize the receive completion ring.
1008	 */
1009	for (i = 0; i < CAS_NRXCOMP; i++) {
1010		sc->sc_rxcomps[i].cc_word[0] = 0;
1011		sc->sc_rxcomps[i].cc_word[1] = 0;
1012		sc->sc_rxcomps[i].cc_word[2] = 0;
1013		sc->sc_rxcomps[i].cc_word[3] = CAS_DMA_WRITE(CAS_RC3_OWN);
1014		CAS_CDRXCSYNC(sc, i,
1015		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1016	}
1017
1018	return (0);
1019}
1020
1021int
1022cas_ringsize(int sz)
1023{
1024	switch (sz) {
1025	case 32:
1026		return CAS_RING_SZ_32;
1027	case 64:
1028		return CAS_RING_SZ_64;
1029	case 128:
1030		return CAS_RING_SZ_128;
1031	case 256:
1032		return CAS_RING_SZ_256;
1033	case 512:
1034		return CAS_RING_SZ_512;
1035	case 1024:
1036		return CAS_RING_SZ_1024;
1037	case 2048:
1038		return CAS_RING_SZ_2048;
1039	case 4096:
1040		return CAS_RING_SZ_4096;
1041	case 8192:
1042		return CAS_RING_SZ_8192;
1043	default:
1044		aprint_error("cas: invalid Receive Descriptor ring size %d\n",
1045		    sz);
1046		return CAS_RING_SZ_32;
1047	}
1048}
1049
1050int
1051cas_cringsize(int sz)
1052{
1053	int i;
1054
1055	for (i = 0; i < 9; i++)
1056		if (sz == (128 << i))
1057			return i;
1058
1059	aprint_error("cas: invalid completion ring size %d\n", sz);
1060	return 128;
1061}
1062
1063/*
1064 * Initialization of interface; set up initialization block
1065 * and transmit/receive descriptor rings.
1066 */
1067int
1068cas_init(struct ifnet *ifp)
1069{
1070	struct cas_softc *sc = (struct cas_softc *)ifp->if_softc;
1071	bus_space_tag_t t = sc->sc_memt;
1072	bus_space_handle_t h = sc->sc_memh;
1073	int s;
1074	u_int max_frame_size;
1075	uint32_t v;
1076
1077	s = splnet();
1078
1079	DPRINTF(sc, ("%s: cas_init: calling stop\n", device_xname(sc->sc_dev)));
1080	/*
1081	 * Initialization sequence. The numbered steps below correspond
1082	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
1083	 * Channel Engine manual (part of the PCIO manual).
1084	 * See also the STP2002-STQ document from Sun Microsystems.
1085	 */
1086
1087	/* step 1 & 2. Reset the Ethernet Channel */
1088	cas_stop(ifp, 0);
1089	cas_reset(sc);
1090	DPRINTF(sc, ("%s: cas_init: restarting\n", device_xname(sc->sc_dev)));
1091
1092	/* Re-initialize the MIF */
1093	cas_mifinit(sc);
1094
1095	/* step 3. Setup data structures in host memory */
1096	cas_meminit(sc);
1097
1098	/* step 4. TX MAC registers & counters */
1099	cas_init_regs(sc);
1100	max_frame_size = ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN;
1101	v = (max_frame_size) | (0x2000 << 16) /* Burst size */;
1102	bus_space_write_4(t, h, CAS_MAC_MAC_MAX_FRAME, v);
1103
1104	/* step 5. RX MAC registers & counters */
1105	cas_iff(sc);
1106
1107	/* step 6 & 7. Program Descriptor Ring Base Addresses */
1108	KASSERT((CAS_CDTXADDR(sc, 0) & 0x1fff) == 0);
1109	bus_space_write_4(t, h, CAS_TX_RING_PTR_HI,
1110	    (((uint64_t)CAS_CDTXADDR(sc, 0)) >> 32));
1111	bus_space_write_4(t, h, CAS_TX_RING_PTR_LO, CAS_CDTXADDR(sc, 0));
1112
1113	KASSERT((CAS_CDRXADDR(sc, 0) & 0x1fff) == 0);
1114	bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI,
1115	    (((uint64_t)CAS_CDRXADDR(sc, 0)) >> 32));
1116	bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO, CAS_CDRXADDR(sc, 0));
1117
1118	KASSERT((CAS_CDRXCADDR(sc, 0) & 0x1fff) == 0);
1119	bus_space_write_4(t, h, CAS_RX_CRING_PTR_HI,
1120	    (((uint64_t)CAS_CDRXCADDR(sc, 0)) >> 32));
1121	bus_space_write_4(t, h, CAS_RX_CRING_PTR_LO, CAS_CDRXCADDR(sc, 0));
1122
1123	if (CAS_PLUS(sc)) {
1124		KASSERT((CAS_CDRXADDR2(sc, 0) & 0x1fff) == 0);
1125		bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI2,
1126		    (((uint64_t)CAS_CDRXADDR2(sc, 0)) >> 32));
1127		bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO2,
1128		    CAS_CDRXADDR2(sc, 0));
1129	}
1130
1131	/* step 8. Global Configuration & Interrupt Mask */
1132	cas_estintr(sc, CAS_INTR_REG);
1133
1134	/* step 9. ETX Configuration: use mostly default values */
1135
1136	/* Enable DMA */
1137	v = cas_ringsize(CAS_NTXDESC /*XXX*/) << 10;
1138	bus_space_write_4(t, h, CAS_TX_CONFIG,
1139	    v | CAS_TX_CONFIG_TXDMA_EN | (1 << 24) | (1 << 29));
1140	bus_space_write_4(t, h, CAS_TX_KICK, 0);
1141
1142	/* step 10. ERX Configuration */
1143
1144	/* Encode Receive Descriptor ring size */
1145	v = cas_ringsize(CAS_NRXDESC) << CAS_RX_CONFIG_RXDRNG_SZ_SHIFT;
1146	if (CAS_PLUS(sc))
1147		v |= cas_ringsize(32) << CAS_RX_CONFIG_RXDRNG2_SZ_SHIFT;
1148
1149	/* Encode Receive Completion ring size */
1150	v |= cas_cringsize(CAS_NRXCOMP) << CAS_RX_CONFIG_RXCRNG_SZ_SHIFT;
1151
1152	/* Enable DMA */
1153	bus_space_write_4(t, h, CAS_RX_CONFIG,
1154	    v|(2<<CAS_RX_CONFIG_FBOFF_SHFT) | CAS_RX_CONFIG_RXDMA_EN);
1155
1156	/*
1157	 * The following value is for an OFF Threshold of about 3/4 full
1158	 * and an ON Threshold of 1/4 full.
1159	 */
1160	bus_space_write_4(t, h, CAS_RX_PAUSE_THRESH,
1161	    (3 * sc->sc_rxfifosize / 256) |
1162	    ((sc->sc_rxfifosize / 256) << 12));
1163	bus_space_write_4(t, h, CAS_RX_BLANKING, (6 << 12) | 6);
1164
1165	/* step 11. Configure Media */
1166	mii_ifmedia_change(&sc->sc_mii);
1167
1168	/* step 12. RX_MAC Configuration Register */
1169	v = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG);
1170	v |= CAS_MAC_RX_ENABLE | CAS_MAC_RX_STRIP_CRC;
1171	bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, v);
1172
1173	/* step 14. Issue Transmit Pending command */
1174
1175	/* step 15.  Give the receiver a swift kick */
1176	bus_space_write_4(t, h, CAS_RX_KICK, CAS_NRXDESC-4);
1177	if (CAS_PLUS(sc))
1178		bus_space_write_4(t, h, CAS_RX_KICK2, 4);
1179
1180	/* Start the one second timer. */
1181	callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc);
1182
1183	ifp->if_flags |= IFF_RUNNING;
1184	ifp->if_flags &= ~IFF_OACTIVE;
1185	ifp->if_timer = 0;
1186	splx(s);
1187
1188	return (0);
1189}
1190
1191void
1192cas_init_regs(struct cas_softc *sc)
1193{
1194	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1195	bus_space_tag_t t = sc->sc_memt;
1196	bus_space_handle_t h = sc->sc_memh;
1197	const u_char *laddr = CLLADDR(ifp->if_sadl);
1198	uint32_t v, r;
1199
1200	/* These regs are not cleared on reset */
1201	sc->sc_inited = 0;
1202	if (!sc->sc_inited) {
1203		/* Load recommended values  */
1204		bus_space_write_4(t, h, CAS_MAC_IPG0, 0x00);
1205		bus_space_write_4(t, h, CAS_MAC_IPG1, 0x08);
1206		bus_space_write_4(t, h, CAS_MAC_IPG2, 0x04);
1207
1208		bus_space_write_4(t, h, CAS_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
1209		/* Max frame and max burst size */
1210		v = ETHER_MAX_LEN | (0x2000 << 16) /* Burst size */;
1211		bus_space_write_4(t, h, CAS_MAC_MAC_MAX_FRAME, v);
1212
1213		bus_space_write_4(t, h, CAS_MAC_PREAMBLE_LEN, 0x07);
1214		bus_space_write_4(t, h, CAS_MAC_JAM_SIZE, 0x04);
1215		bus_space_write_4(t, h, CAS_MAC_ATTEMPT_LIMIT, 0x10);
1216		bus_space_write_4(t, h, CAS_MAC_CONTROL_TYPE, 0x8088);
1217		bus_space_write_4(t, h, CAS_MAC_RANDOM_SEED,
1218		    ((laddr[5]<<8)|laddr[4])&0x3ff);
1219
1220		/* Secondary MAC addresses set to 0:0:0:0:0:0 */
1221		for (r = CAS_MAC_ADDR3; r < CAS_MAC_ADDR42; r += 4)
1222			bus_space_write_4(t, h, r, 0);
1223
1224		/* MAC control addr set to 0:1:c2:0:1:80 */
1225		bus_space_write_4(t, h, CAS_MAC_ADDR42, 0x0001);
1226		bus_space_write_4(t, h, CAS_MAC_ADDR43, 0xc200);
1227		bus_space_write_4(t, h, CAS_MAC_ADDR44, 0x0180);
1228
1229		/* MAC filter addr set to 0:0:0:0:0:0 */
1230		bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER0, 0);
1231		bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER1, 0);
1232		bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER2, 0);
1233
1234		bus_space_write_4(t, h, CAS_MAC_ADR_FLT_MASK1_2, 0);
1235		bus_space_write_4(t, h, CAS_MAC_ADR_FLT_MASK0, 0);
1236
1237		/* Hash table initialized to 0 */
1238		for (r = CAS_MAC_HASH0; r <= CAS_MAC_HASH15; r += 4)
1239			bus_space_write_4(t, h, r, 0);
1240
1241		sc->sc_inited = 1;
1242	}
1243
1244	/* Counters need to be zeroed */
1245	bus_space_write_4(t, h, CAS_MAC_NORM_COLL_CNT, 0);
1246	bus_space_write_4(t, h, CAS_MAC_FIRST_COLL_CNT, 0);
1247	bus_space_write_4(t, h, CAS_MAC_EXCESS_COLL_CNT, 0);
1248	bus_space_write_4(t, h, CAS_MAC_LATE_COLL_CNT, 0);
1249	bus_space_write_4(t, h, CAS_MAC_DEFER_TMR_CNT, 0);
1250	bus_space_write_4(t, h, CAS_MAC_PEAK_ATTEMPTS, 0);
1251	bus_space_write_4(t, h, CAS_MAC_RX_FRAME_COUNT, 0);
1252	bus_space_write_4(t, h, CAS_MAC_RX_LEN_ERR_CNT, 0);
1253	bus_space_write_4(t, h, CAS_MAC_RX_ALIGN_ERR, 0);
1254	bus_space_write_4(t, h, CAS_MAC_RX_CRC_ERR_CNT, 0);
1255	bus_space_write_4(t, h, CAS_MAC_RX_CODE_VIOL, 0);
1256
1257	/* Un-pause stuff */
1258	bus_space_write_4(t, h, CAS_MAC_SEND_PAUSE_CMD, 0);
1259
1260	/*
1261	 * Set the station address.
1262	 */
1263	bus_space_write_4(t, h, CAS_MAC_ADDR0, (laddr[4]<<8) | laddr[5]);
1264	bus_space_write_4(t, h, CAS_MAC_ADDR1, (laddr[2]<<8) | laddr[3]);
1265	bus_space_write_4(t, h, CAS_MAC_ADDR2, (laddr[0]<<8) | laddr[1]);
1266}
1267
1268/*
1269 * Receive interrupt.
1270 */
1271int
1272cas_rint(struct cas_softc *sc)
1273{
1274	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1275	bus_space_tag_t t = sc->sc_memt;
1276	bus_space_handle_t h = sc->sc_memh;
1277	struct cas_rxsoft *rxs;
1278	struct mbuf *m;
1279	uint64_t word[4];
1280	int len, off, idx;
1281	int i, skip;
1282	void *cp;
1283
1284	for (i = sc->sc_rxptr;; i = CAS_NEXTRX(i + skip)) {
1285		CAS_CDRXCSYNC(sc, i,
1286		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1287
1288		word[0] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[0]);
1289		word[1] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[1]);
1290		word[2] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[2]);
1291		word[3] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[3]);
1292
1293		/* Stop if the hardware still owns the descriptor. */
1294		if ((word[0] & CAS_RC0_TYPE) == 0 || word[3] & CAS_RC3_OWN)
1295			break;
1296
1297		len = CAS_RC1_HDR_LEN(word[1]);
1298		if (len > 0) {
1299			off = CAS_RC1_HDR_OFF(word[1]);
1300			idx = CAS_RC1_HDR_IDX(word[1]);
1301			rxs = &sc->sc_rxsoft[idx];
1302
1303			DPRINTF(sc, ("hdr at idx %d, off %d, len %d\n",
1304			    idx, off, len));
1305
1306			bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1307			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1308
1309			cp = rxs->rxs_kva + off * 256 + ETHER_ALIGN;
1310			m = m_devget(cp, len, 0, ifp);
1311
1312			if (word[0] & CAS_RC0_RELEASE_HDR)
1313				cas_add_rxbuf(sc, idx);
1314
1315			if (m != NULL) {
1316
1317				/*
1318				 * Pass this up to any BPF listeners, but only
1319				 * pass it up the stack if its for us.
1320				 */
1321				m->m_pkthdr.csum_flags = 0;
1322				if_percpuq_enqueue(ifp->if_percpuq, m);
1323			} else
1324				ifp->if_ierrors++;
1325		}
1326
1327		len = CAS_RC0_DATA_LEN(word[0]);
1328		if (len > 0) {
1329			off = CAS_RC0_DATA_OFF(word[0]);
1330			idx = CAS_RC0_DATA_IDX(word[0]);
1331			rxs = &sc->sc_rxsoft[idx];
1332
1333			DPRINTF(sc, ("data at idx %d, off %d, len %d\n",
1334			    idx, off, len));
1335
1336			bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1337			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1338
1339			/* XXX We should not be copying the packet here. */
1340			cp = rxs->rxs_kva + off + ETHER_ALIGN;
1341			m = m_devget(cp, len, 0, ifp);
1342
1343			if (word[0] & CAS_RC0_RELEASE_DATA)
1344				cas_add_rxbuf(sc, idx);
1345
1346			if (m != NULL) {
1347				/*
1348				 * Pass this up to any BPF listeners, but only
1349				 * pass it up the stack if its for us.
1350				 */
1351				m->m_pkthdr.csum_flags = 0;
1352				if_percpuq_enqueue(ifp->if_percpuq, m);
1353			} else
1354				ifp->if_ierrors++;
1355		}
1356
1357		if (word[0] & CAS_RC0_SPLIT)
1358			aprint_error_dev(sc->sc_dev, "split packet\n");
1359
1360		skip = CAS_RC0_SKIP(word[0]);
1361	}
1362
1363	while (sc->sc_rxptr != i) {
1364		sc->sc_rxcomps[sc->sc_rxptr].cc_word[0] = 0;
1365		sc->sc_rxcomps[sc->sc_rxptr].cc_word[1] = 0;
1366		sc->sc_rxcomps[sc->sc_rxptr].cc_word[2] = 0;
1367		sc->sc_rxcomps[sc->sc_rxptr].cc_word[3] =
1368		    CAS_DMA_WRITE(CAS_RC3_OWN);
1369		CAS_CDRXCSYNC(sc, sc->sc_rxptr,
1370		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1371
1372		sc->sc_rxptr = CAS_NEXTRX(sc->sc_rxptr);
1373	}
1374
1375	bus_space_write_4(t, h, CAS_RX_COMP_TAIL, sc->sc_rxptr);
1376
1377	DPRINTF(sc, ("cas_rint: done sc->rxptr %d, complete %d\n",
1378		sc->sc_rxptr, bus_space_read_4(t, h, CAS_RX_COMPLETION)));
1379
1380	return (1);
1381}
1382
1383/*
1384 * cas_add_rxbuf:
1385 *
1386 *	Add a receive buffer to the indicated descriptor.
1387 */
1388int
1389cas_add_rxbuf(struct cas_softc *sc, int idx)
1390{
1391	bus_space_tag_t t = sc->sc_memt;
1392	bus_space_handle_t h = sc->sc_memh;
1393
1394	CAS_INIT_RXDESC(sc, sc->sc_rxdptr, idx);
1395
1396	if ((sc->sc_rxdptr % 4) == 0)
1397		bus_space_write_4(t, h, CAS_RX_KICK, sc->sc_rxdptr);
1398
1399	if (++sc->sc_rxdptr == CAS_NRXDESC)
1400		sc->sc_rxdptr = 0;
1401
1402	return (0);
1403}
1404
1405int
1406cas_eint(struct cas_softc *sc, u_int status)
1407{
1408	char bits[128];
1409	if ((status & CAS_INTR_MIF) != 0) {
1410		DPRINTF(sc, ("%s: link status changed\n",
1411		    device_xname(sc->sc_dev)));
1412		return (1);
1413	}
1414
1415	snprintb(bits, sizeof(bits), CAS_INTR_BITS, status);
1416	printf("%s: status=%s\n", device_xname(sc->sc_dev), bits);
1417	return (1);
1418}
1419
1420int
1421cas_pint(struct cas_softc *sc)
1422{
1423	bus_space_tag_t t = sc->sc_memt;
1424	bus_space_handle_t seb = sc->sc_memh;
1425	uint32_t status;
1426
1427	status = bus_space_read_4(t, seb, CAS_MII_INTERRUP_STATUS);
1428	status |= bus_space_read_4(t, seb, CAS_MII_INTERRUP_STATUS);
1429#ifdef CAS_DEBUG
1430	if (status)
1431		printf("%s: link status changed\n", device_xname(sc->sc_dev));
1432#endif
1433	return (1);
1434}
1435
1436int
1437cas_intr(void *v)
1438{
1439	struct cas_softc *sc = (struct cas_softc *)v;
1440	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1441	bus_space_tag_t t = sc->sc_memt;
1442	bus_space_handle_t seb = sc->sc_memh;
1443	uint32_t status;
1444	int r = 0;
1445#ifdef CAS_DEBUG
1446	char bits[128];
1447#endif
1448
1449	sc->sc_ev_intr.ev_count++;
1450
1451	status = bus_space_read_4(t, seb, CAS_STATUS);
1452#ifdef CAS_DEBUG
1453	snprintb(bits, sizeof(bits), CAS_INTR_BITS, status);
1454#endif
1455	DPRINTF(sc, ("%s: cas_intr: cplt %x status %s\n",
1456		device_xname(sc->sc_dev), (status>>19), bits));
1457
1458	if ((status & CAS_INTR_PCS) != 0)
1459		r |= cas_pint(sc);
1460
1461	if ((status & (CAS_INTR_TX_TAG_ERR | CAS_INTR_RX_TAG_ERR |
1462	    CAS_INTR_RX_COMP_FULL | CAS_INTR_BERR)) != 0)
1463		r |= cas_eint(sc, status);
1464
1465	if ((status & (CAS_INTR_TX_EMPTY | CAS_INTR_TX_INTME)) != 0)
1466		r |= cas_tint(sc, status);
1467
1468	if ((status & (CAS_INTR_RX_DONE | CAS_INTR_RX_NOBUF)) != 0)
1469		r |= cas_rint(sc);
1470
1471	/* We should eventually do more than just print out error stats. */
1472	if (status & CAS_INTR_TX_MAC) {
1473		int txstat = bus_space_read_4(t, seb, CAS_MAC_TX_STATUS);
1474#ifdef CAS_DEBUG
1475		if (txstat & ~CAS_MAC_TX_XMIT_DONE)
1476			printf("%s: MAC tx fault, status %x\n",
1477			    device_xname(sc->sc_dev), txstat);
1478#endif
1479		if (txstat & (CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_PKT_TOO_LONG))
1480			cas_init(ifp);
1481	}
1482	if (status & CAS_INTR_RX_MAC) {
1483		int rxstat = bus_space_read_4(t, seb, CAS_MAC_RX_STATUS);
1484#ifdef CAS_DEBUG
1485		if (rxstat & ~CAS_MAC_RX_DONE)
1486			printf("%s: MAC rx fault, status %x\n",
1487			    device_xname(sc->sc_dev), rxstat);
1488#endif
1489		/*
1490		 * On some chip revisions CAS_MAC_RX_OVERFLOW happen often
1491		 * due to a silicon bug so handle them silently.
1492		 */
1493		if (rxstat & CAS_MAC_RX_OVERFLOW) {
1494			ifp->if_ierrors++;
1495			cas_init(ifp);
1496		}
1497#ifdef CAS_DEBUG
1498		else if (rxstat & ~(CAS_MAC_RX_DONE | CAS_MAC_RX_FRAME_CNT))
1499			printf("%s: MAC rx fault, status %x\n",
1500			    device_xname(sc->sc_dev), rxstat);
1501#endif
1502	}
1503	rnd_add_uint32(&sc->rnd_source, status);
1504	return (r);
1505}
1506
1507
1508void
1509cas_watchdog(struct ifnet *ifp)
1510{
1511	struct cas_softc *sc = ifp->if_softc;
1512
1513	DPRINTF(sc, ("cas_watchdog: CAS_RX_CONFIG %x CAS_MAC_RX_STATUS %x "
1514		"CAS_MAC_RX_CONFIG %x\n",
1515		bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_RX_CONFIG),
1516		bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_MAC_RX_STATUS),
1517		bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_MAC_RX_CONFIG)));
1518
1519	log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev));
1520	++ifp->if_oerrors;
1521
1522	/* Try to get more packets going. */
1523	cas_init(ifp);
1524}
1525
1526/*
1527 * Initialize the MII Management Interface
1528 */
1529void
1530cas_mifinit(struct cas_softc *sc)
1531{
1532	bus_space_tag_t t = sc->sc_memt;
1533	bus_space_handle_t mif = sc->sc_memh;
1534
1535	/* Configure the MIF in frame mode */
1536	sc->sc_mif_config = bus_space_read_4(t, mif, CAS_MIF_CONFIG);
1537	sc->sc_mif_config &= ~CAS_MIF_CONFIG_BB_ENA;
1538	bus_space_write_4(t, mif, CAS_MIF_CONFIG, sc->sc_mif_config);
1539}
1540
1541/*
1542 * MII interface
1543 *
1544 * The Cassini MII interface supports at least three different operating modes:
1545 *
1546 * Bitbang mode is implemented using data, clock and output enable registers.
1547 *
1548 * Frame mode is implemented by loading a complete frame into the frame
1549 * register and polling the valid bit for completion.
1550 *
1551 * Polling mode uses the frame register but completion is indicated by
1552 * an interrupt.
1553 *
1554 */
1555int
1556cas_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
1557{
1558	struct cas_softc *sc = device_private(self);
1559	bus_space_tag_t t = sc->sc_memt;
1560	bus_space_handle_t mif = sc->sc_memh;
1561	int n;
1562	uint32_t v;
1563
1564#ifdef CAS_DEBUG
1565	if (sc->sc_debug)
1566		printf("cas_mii_readreg: phy %d reg %d\n", phy, reg);
1567#endif
1568
1569	/* Construct the frame command */
1570	v = (reg << CAS_MIF_REG_SHIFT)	| (phy << CAS_MIF_PHY_SHIFT) |
1571		CAS_MIF_FRAME_READ;
1572
1573	bus_space_write_4(t, mif, CAS_MIF_FRAME, v);
1574	for (n = 0; n < 100; n++) {
1575		DELAY(1);
1576		v = bus_space_read_4(t, mif, CAS_MIF_FRAME);
1577		if (v & CAS_MIF_FRAME_TA0) {
1578			*val = v & CAS_MIF_FRAME_DATA;
1579			return 0;
1580		}
1581	}
1582
1583	printf("%s: mii_read timeout\n", device_xname(sc->sc_dev));
1584	return ETIMEDOUT;
1585}
1586
1587int
1588cas_mii_writereg(device_t self, int phy, int reg, uint16_t val)
1589{
1590	struct cas_softc *sc = device_private(self);
1591	bus_space_tag_t t = sc->sc_memt;
1592	bus_space_handle_t mif = sc->sc_memh;
1593	int n;
1594	uint32_t v;
1595
1596#ifdef CAS_DEBUG
1597	if (sc->sc_debug)
1598		printf("cas_mii_writereg: phy %d reg %d val %x\n",
1599			phy, reg, val);
1600#endif
1601
1602	/* Construct the frame command */
1603	v = CAS_MIF_FRAME_WRITE			|
1604	    (phy << CAS_MIF_PHY_SHIFT)		|
1605	    (reg << CAS_MIF_REG_SHIFT)		|
1606	    (val & CAS_MIF_FRAME_DATA);
1607
1608	bus_space_write_4(t, mif, CAS_MIF_FRAME, v);
1609	for (n = 0; n < 100; n++) {
1610		DELAY(1);
1611		v = bus_space_read_4(t, mif, CAS_MIF_FRAME);
1612		if (v & CAS_MIF_FRAME_TA0)
1613			return 0;
1614	}
1615
1616	printf("%s: mii_write timeout\n", device_xname(sc->sc_dev));
1617	return ETIMEDOUT;
1618}
1619
1620void
1621cas_mii_statchg(struct ifnet *ifp)
1622{
1623	struct cas_softc *sc = ifp->if_softc;
1624#ifdef CAS_DEBUG
1625	int instance = IFM_INST(sc->sc_media.ifm_cur->ifm_media);
1626#endif
1627	bus_space_tag_t t = sc->sc_memt;
1628	bus_space_handle_t mac = sc->sc_memh;
1629	uint32_t v;
1630
1631#ifdef CAS_DEBUG
1632	if (sc->sc_debug)
1633		printf("cas_mii_statchg: status change: phy = %d\n",
1634		    sc->sc_phys[instance]);
1635#endif
1636
1637	/* Set tx full duplex options */
1638	bus_space_write_4(t, mac, CAS_MAC_TX_CONFIG, 0);
1639	delay(10000); /* reg must be cleared and delay before changing. */
1640	v = CAS_MAC_TX_ENA_IPG0 | CAS_MAC_TX_NGU | CAS_MAC_TX_NGU_LIMIT |
1641		CAS_MAC_TX_ENABLE;
1642	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) {
1643		v |= CAS_MAC_TX_IGN_CARRIER | CAS_MAC_TX_IGN_COLLIS;
1644	}
1645	bus_space_write_4(t, mac, CAS_MAC_TX_CONFIG, v);
1646
1647	/* XIF Configuration */
1648	v = CAS_MAC_XIF_TX_MII_ENA;
1649	v |= CAS_MAC_XIF_LINK_LED;
1650
1651	/* MII needs echo disable if half duplex. */
1652	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
1653		/* turn on full duplex LED */
1654		v |= CAS_MAC_XIF_FDPLX_LED;
1655	else
1656		/* half duplex -- disable echo */
1657		v |= CAS_MAC_XIF_ECHO_DISABL;
1658
1659	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
1660	case IFM_1000_T:  /* Gigabit using GMII interface */
1661	case IFM_1000_SX:
1662		v |= CAS_MAC_XIF_GMII_MODE;
1663		break;
1664	default:
1665		v &= ~CAS_MAC_XIF_GMII_MODE;
1666	}
1667	bus_space_write_4(t, mac, CAS_MAC_XIF_CONFIG, v);
1668}
1669
1670int
1671cas_pcs_readreg(device_t self, int phy, int reg, uint16_t *val)
1672{
1673	struct cas_softc *sc = device_private(self);
1674	bus_space_tag_t t = sc->sc_memt;
1675	bus_space_handle_t pcs = sc->sc_memh;
1676
1677#ifdef CAS_DEBUG
1678	if (sc->sc_debug)
1679		printf("cas_pcs_readreg: phy %d reg %d\n", phy, reg);
1680#endif
1681
1682	if (phy != CAS_PHYAD_EXTERNAL)
1683		return -1;
1684
1685	switch (reg) {
1686	case MII_BMCR:
1687		reg = CAS_MII_CONTROL;
1688		break;
1689	case MII_BMSR:
1690		reg = CAS_MII_STATUS;
1691		break;
1692	case MII_ANAR:
1693		reg = CAS_MII_ANAR;
1694		break;
1695	case MII_ANLPAR:
1696		reg = CAS_MII_ANLPAR;
1697		break;
1698	case MII_EXTSR:
1699		*val = EXTSR_1000XFDX | EXTSR_1000XHDX;
1700		return 0;
1701	default:
1702		return (0);
1703	}
1704
1705	*val = bus_space_read_4(t, pcs, reg) & 0xffff;
1706	return 0;
1707}
1708
1709int
1710cas_pcs_writereg(device_t self, int phy, int reg, uint16_t val)
1711{
1712	struct cas_softc *sc = device_private(self);
1713	bus_space_tag_t t = sc->sc_memt;
1714	bus_space_handle_t pcs = sc->sc_memh;
1715	int reset = 0;
1716
1717#ifdef CAS_DEBUG
1718	if (sc->sc_debug)
1719		printf("cas_pcs_writereg: phy %d reg %d val %x\n",
1720			phy, reg, val);
1721#endif
1722
1723	if (phy != CAS_PHYAD_EXTERNAL)
1724		return -1;
1725
1726	if (reg == MII_ANAR)
1727		bus_space_write_4(t, pcs, CAS_MII_CONFIG, 0);
1728
1729	switch (reg) {
1730	case MII_BMCR:
1731		reset = (val & CAS_MII_CONTROL_RESET);
1732		reg = CAS_MII_CONTROL;
1733		break;
1734	case MII_BMSR:
1735		reg = CAS_MII_STATUS;
1736		break;
1737	case MII_ANAR:
1738		reg = CAS_MII_ANAR;
1739		break;
1740	case MII_ANLPAR:
1741		reg = CAS_MII_ANLPAR;
1742		break;
1743	default:
1744		return 0;
1745	}
1746
1747	bus_space_write_4(t, pcs, reg, val);
1748
1749	if (reset)
1750		cas_bitwait(sc, pcs, CAS_MII_CONTROL, CAS_MII_CONTROL_RESET, 0);
1751
1752	if (reg == CAS_MII_ANAR || reset)
1753		bus_space_write_4(t, pcs, CAS_MII_CONFIG,
1754		    CAS_MII_CONFIG_ENABLE);
1755
1756	return 0;
1757}
1758
1759int
1760cas_mediachange(struct ifnet *ifp)
1761{
1762	struct cas_softc *sc = ifp->if_softc;
1763	struct mii_data *mii = &sc->sc_mii;
1764
1765	if (mii->mii_instance) {
1766		struct mii_softc *miisc;
1767		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1768			mii_phy_reset(miisc);
1769	}
1770
1771	return (mii_mediachg(&sc->sc_mii));
1772}
1773
1774void
1775cas_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1776{
1777	struct cas_softc *sc = ifp->if_softc;
1778
1779	mii_pollstat(&sc->sc_mii);
1780	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1781	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1782}
1783
1784/*
1785 * Process an ioctl request.
1786 */
1787int
1788cas_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1789{
1790	struct cas_softc *sc = ifp->if_softc;
1791	int s, error = 0;
1792
1793	s = splnet();
1794
1795	if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
1796		error = 0;
1797		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1798			;
1799		else if (ifp->if_flags & IFF_RUNNING) {
1800			/*
1801			 * Multicast list has changed; set the hardware filter
1802			 * accordingly.
1803			 */
1804			cas_iff(sc);
1805		}
1806	}
1807
1808	splx(s);
1809	return (error);
1810}
1811
1812static bool
1813cas_suspend(device_t self, const pmf_qual_t *qual)
1814{
1815	struct cas_softc *sc = device_private(self);
1816	bus_space_tag_t t = sc->sc_memt;
1817	bus_space_handle_t h = sc->sc_memh;
1818
1819	bus_space_write_4(t, h, CAS_INTMASK, ~(uint32_t)0);
1820	if (sc->sc_ih != NULL) {
1821		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
1822		sc->sc_ih = NULL;
1823	}
1824
1825	return true;
1826}
1827
1828static bool
1829cas_resume(device_t self, const pmf_qual_t *qual)
1830{
1831	struct cas_softc *sc = device_private(self);
1832
1833	return cas_estintr(sc, CAS_INTR_PCI | CAS_INTR_REG);
1834}
1835
1836static bool
1837cas_estintr(struct cas_softc *sc, int what)
1838{
1839	bus_space_tag_t t = sc->sc_memt;
1840	bus_space_handle_t h = sc->sc_memh;
1841	const char *intrstr = NULL;
1842	char intrbuf[PCI_INTRSTR_LEN];
1843
1844	/* PCI interrupts */
1845	if (what & CAS_INTR_PCI) {
1846		intrstr = pci_intr_string(sc->sc_pc, sc->sc_handle, intrbuf,
1847		    sizeof(intrbuf));
1848		sc->sc_ih = pci_intr_establish_xname(sc->sc_pc, sc->sc_handle,
1849		    IPL_NET, cas_intr, sc, device_xname(sc->sc_dev));
1850		if (sc->sc_ih == NULL) {
1851			aprint_error_dev(sc->sc_dev,
1852			    "unable to establish interrupt");
1853			if (intrstr != NULL)
1854				aprint_error(" at %s", intrstr);
1855			aprint_error("\n");
1856			return false;
1857		}
1858
1859		aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1860	}
1861
1862	/* Interrupt register */
1863	if (what & CAS_INTR_REG) {
1864		bus_space_write_4(t, h, CAS_INTMASK,
1865		    ~(CAS_INTR_TX_INTME | CAS_INTR_TX_EMPTY |
1866		    CAS_INTR_TX_TAG_ERR |
1867		    CAS_INTR_RX_DONE | CAS_INTR_RX_NOBUF |
1868		    CAS_INTR_RX_TAG_ERR |
1869		    CAS_INTR_RX_COMP_FULL | CAS_INTR_PCS |
1870		    CAS_INTR_MAC_CONTROL | CAS_INTR_MIF |
1871		    CAS_INTR_BERR));
1872		bus_space_write_4(t, h, CAS_MAC_RX_MASK,
1873		    CAS_MAC_RX_DONE | CAS_MAC_RX_FRAME_CNT);
1874		bus_space_write_4(t, h, CAS_MAC_TX_MASK, CAS_MAC_TX_XMIT_DONE);
1875		bus_space_write_4(t, h, CAS_MAC_CONTROL_MASK, 0); /* XXXX */
1876	}
1877	return true;
1878}
1879
1880bool
1881cas_shutdown(device_t self, int howto)
1882{
1883	struct cas_softc *sc = device_private(self);
1884	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1885
1886	cas_stop(ifp, 1);
1887
1888	return true;
1889}
1890
1891void
1892cas_iff(struct cas_softc *sc)
1893{
1894	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1895	struct ethercom *ec = &sc->sc_ethercom;
1896	struct ether_multi *enm;
1897	struct ether_multistep step;
1898	bus_space_tag_t t = sc->sc_memt;
1899	bus_space_handle_t h = sc->sc_memh;
1900	uint32_t crc, hash[16], rxcfg;
1901	int i;
1902
1903	rxcfg = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG);
1904	rxcfg &= ~(CAS_MAC_RX_HASH_FILTER | CAS_MAC_RX_PROMISCUOUS |
1905	    CAS_MAC_RX_PROMISC_GRP);
1906	ifp->if_flags &= ~IFF_ALLMULTI;
1907
1908	if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) {
1909		ifp->if_flags |= IFF_ALLMULTI;
1910		if (ifp->if_flags & IFF_PROMISC)
1911			rxcfg |= CAS_MAC_RX_PROMISCUOUS;
1912		else
1913			rxcfg |= CAS_MAC_RX_PROMISC_GRP;
1914        } else {
1915		/*
1916		 * Set up multicast address filter by passing all multicast
1917		 * addresses through a crc generator, and then using the
1918		 * high order 8 bits as an index into the 256 bit logical
1919		 * address filter.  The high order 4 bits selects the word,
1920		 * while the other 4 bits select the bit within the word
1921		 * (where bit 0 is the MSB).
1922		 */
1923
1924		rxcfg |= CAS_MAC_RX_HASH_FILTER;
1925
1926		/* Clear hash table */
1927		for (i = 0; i < 16; i++)
1928			hash[i] = 0;
1929
1930		ETHER_FIRST_MULTI(step, ec, enm);
1931		while (enm != NULL) {
1932                        crc = ether_crc32_le(enm->enm_addrlo,
1933                            ETHER_ADDR_LEN);
1934
1935                        /* Just want the 8 most significant bits. */
1936                        crc >>= 24;
1937
1938                        /* Set the corresponding bit in the filter. */
1939                        hash[crc >> 4] |= 1 << (15 - (crc & 15));
1940
1941			ETHER_NEXT_MULTI(step, enm);
1942		}
1943
1944		/* Now load the hash table into the chip (if we are using it) */
1945		for (i = 0; i < 16; i++) {
1946			bus_space_write_4(t, h,
1947			    CAS_MAC_HASH0 + i * (CAS_MAC_HASH1 - CAS_MAC_HASH0),
1948			    hash[i]);
1949		}
1950	}
1951
1952	bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, rxcfg);
1953}
1954
1955int
1956cas_encap(struct cas_softc *sc, struct mbuf *mhead, uint32_t *bixp)
1957{
1958	uint64_t flags;
1959	uint32_t cur, frag, i;
1960	bus_dmamap_t map;
1961
1962	cur = frag = *bixp;
1963	map = sc->sc_txd[cur].sd_map;
1964
1965	if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, mhead,
1966	    BUS_DMA_NOWAIT) != 0) {
1967		return (ENOBUFS);
1968	}
1969
1970	if ((sc->sc_tx_cnt + map->dm_nsegs) > (CAS_NTXDESC - 2)) {
1971		bus_dmamap_unload(sc->sc_dmatag, map);
1972		return (ENOBUFS);
1973	}
1974
1975	bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,
1976	    BUS_DMASYNC_PREWRITE);
1977
1978	for (i = 0; i < map->dm_nsegs; i++) {
1979		sc->sc_txdescs[frag].cd_addr =
1980		    CAS_DMA_WRITE(map->dm_segs[i].ds_addr);
1981		flags = (map->dm_segs[i].ds_len & CAS_TD_BUFSIZE) |
1982		    (i == 0 ? CAS_TD_START_OF_PACKET : 0) |
1983		    ((i == (map->dm_nsegs - 1)) ? CAS_TD_END_OF_PACKET : 0);
1984		sc->sc_txdescs[frag].cd_flags = CAS_DMA_WRITE(flags);
1985		bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap,
1986		    CAS_CDTXOFF(frag), sizeof(struct cas_desc),
1987		    BUS_DMASYNC_PREWRITE);
1988		cur = frag;
1989		if (++frag == CAS_NTXDESC)
1990			frag = 0;
1991	}
1992
1993	sc->sc_tx_cnt += map->dm_nsegs;
1994	sc->sc_txd[*bixp].sd_map = sc->sc_txd[cur].sd_map;
1995	sc->sc_txd[cur].sd_map = map;
1996	sc->sc_txd[cur].sd_mbuf = mhead;
1997
1998	bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_TX_KICK, frag);
1999
2000	*bixp = frag;
2001
2002	/* sync descriptors */
2003
2004	return (0);
2005}
2006
2007/*
2008 * Transmit interrupt.
2009 */
2010int
2011cas_tint(struct cas_softc *sc, uint32_t status)
2012{
2013	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2014	struct cas_sxd *sd;
2015	uint32_t cons, comp;
2016
2017	comp = bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_TX_COMPLETION);
2018	cons = sc->sc_tx_cons;
2019	while (cons != comp) {
2020		sd = &sc->sc_txd[cons];
2021		if (sd->sd_mbuf != NULL) {
2022			bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
2023			    sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2024			bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
2025			m_freem(sd->sd_mbuf);
2026			sd->sd_mbuf = NULL;
2027			ifp->if_opackets++;
2028		}
2029		sc->sc_tx_cnt--;
2030		if (++cons == CAS_NTXDESC)
2031			cons = 0;
2032	}
2033	sc->sc_tx_cons = cons;
2034
2035	if (sc->sc_tx_cnt < CAS_NTXDESC - 2)
2036		ifp->if_flags &= ~IFF_OACTIVE;
2037	if (sc->sc_tx_cnt == 0)
2038		ifp->if_timer = 0;
2039
2040	if_schedule_deferred_start(ifp);
2041
2042	return (1);
2043}
2044
2045void
2046cas_start(struct ifnet *ifp)
2047{
2048	struct cas_softc *sc = ifp->if_softc;
2049	struct mbuf *m;
2050	uint32_t bix;
2051
2052	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
2053		return;
2054
2055	bix = sc->sc_tx_prod;
2056	while (sc->sc_txd[bix].sd_mbuf == NULL) {
2057		IFQ_POLL(&ifp->if_snd, m);
2058		if (m == NULL)
2059			break;
2060
2061		/*
2062		 * If BPF is listening on this interface, let it see the
2063		 * packet before we commit it to the wire.
2064		 */
2065		bpf_mtap(ifp, m, BPF_D_OUT);
2066
2067		/*
2068		 * Encapsulate this packet and start it going...
2069		 * or fail...
2070		 */
2071		if (cas_encap(sc, m, &bix)) {
2072			ifp->if_flags |= IFF_OACTIVE;
2073			break;
2074		}
2075
2076		IFQ_DEQUEUE(&ifp->if_snd, m);
2077		ifp->if_timer = 5;
2078	}
2079
2080	sc->sc_tx_prod = bix;
2081}
2082
2083MODULE(MODULE_CLASS_DRIVER, if_cas, "pci");
2084
2085#ifdef _MODULE
2086#include "ioconf.c"
2087#endif
2088
2089static int
2090if_cas_modcmd(modcmd_t cmd, void *opaque)
2091{
2092	int error = 0;
2093
2094	switch (cmd) {
2095	case MODULE_CMD_INIT:
2096#ifdef _MODULE
2097		error = config_init_component(cfdriver_ioconf_cas,
2098		    cfattach_ioconf_cas, cfdata_ioconf_cas);
2099#endif
2100		return error;
2101	case MODULE_CMD_FINI:
2102#ifdef _MODULE
2103		error = config_fini_component(cfdriver_ioconf_cas,
2104		    cfattach_ioconf_cas, cfdata_ioconf_cas);
2105#endif
2106		return error;
2107	default:
2108		return ENOTTY;
2109	}
2110}
2111