1/*	$NetBSD: if_cas.c,v 1.48 2024/06/29 12:11:11 riastradh Exp $	*/
2/*	$OpenBSD: if_cas.c,v 1.29 2009/11/29 16:19:38 kettenis Exp $	*/
3
4/*
5 *
6 * Copyright (C) 2007 Mark Kettenis.
7 * Copyright (C) 2001 Eduardo Horvath.
8 * All rights reserved.
9 *
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 */
33
34/*
35 * Driver for Sun Cassini ethernet controllers.
36 *
37 * There are basically two variants of this chip: Cassini and
38 * Cassini+.  We can distinguish between the two by revision: 0x10 and
39 * up are Cassini+.  The most important difference is that Cassini+
40 * has a second RX descriptor ring.  Cassini+ will not work without
41 * configuring that second ring.  However, since we don't use it we
42 * don't actually fill the descriptors, and only hand off the first
43 * four to the chip.
44 */
45
46#include <sys/cdefs.h>
47__KERNEL_RCSID(0, "$NetBSD: if_cas.c,v 1.48 2024/06/29 12:11:11 riastradh Exp $");
48
49#ifndef _MODULE
50#include "opt_inet.h"
51#endif
52
53#include <sys/param.h>
54#include <sys/systm.h>
55#include <sys/callout.h>
56#include <sys/mbuf.h>
57#include <sys/syslog.h>
58#include <sys/kernel.h>
59#include <sys/socket.h>
60#include <sys/ioctl.h>
61#include <sys/errno.h>
62#include <sys/device.h>
63#include <sys/module.h>
64
65#include <machine/endian.h>
66
67#include <net/if.h>
68#include <net/if_dl.h>
69#include <net/if_media.h>
70#include <net/if_ether.h>
71
72#ifdef INET
73#include <netinet/in.h>
74#include <netinet/in_systm.h>
75#include <netinet/in_var.h>
76#include <netinet/ip.h>
77#include <netinet/tcp.h>
78#include <netinet/udp.h>
79#endif
80
81#include <net/bpf.h>
82
83#include <sys/bus.h>
84#include <sys/intr.h>
85#include <sys/rndsource.h>
86
87#include <dev/mii/mii.h>
88#include <dev/mii/miivar.h>
89#include <dev/mii/mii_bitbang.h>
90
91#include <dev/pci/pcivar.h>
92#include <dev/pci/pcireg.h>
93#include <dev/pci/pcidevs.h>
94#include <prop/proplib.h>
95
96#include <dev/pci/if_casreg.h>
97#include <dev/pci/if_casvar.h>
98
99#define TRIES	10000
100
101static bool	cas_estintr(struct cas_softc *sc, int);
102bool		cas_shutdown(device_t, int);
103static bool	cas_suspend(device_t, const pmf_qual_t *);
104static bool	cas_resume(device_t, const pmf_qual_t *);
105static int	cas_detach(device_t, int);
106static void	cas_partial_detach(struct cas_softc *, enum cas_attach_stage);
107
108int		cas_match(device_t, cfdata_t, void *);
109void		cas_attach(device_t, device_t, void *);
110
111
112CFATTACH_DECL3_NEW(cas, sizeof(struct cas_softc),
113    cas_match, cas_attach, cas_detach, NULL, NULL, NULL,
114    DVF_DETACH_SHUTDOWN);
115
116int	cas_pci_readvpd(struct cas_softc *, struct pci_attach_args *, uint8_t *);
117
118void		cas_config(struct cas_softc *, const uint8_t *);
119void		cas_start(struct ifnet *);
120void		cas_stop(struct ifnet *, int);
121int		cas_ioctl(struct ifnet *, u_long, void *);
122void		cas_tick(void *);
123void		cas_watchdog(struct ifnet *);
124int		cas_init(struct ifnet *);
125void		cas_init_regs(struct cas_softc *);
126int		cas_ringsize(int);
127int		cas_cringsize(int);
128int		cas_meminit(struct cas_softc *);
129void		cas_mifinit(struct cas_softc *);
130int		cas_bitwait(struct cas_softc *, bus_space_handle_t, int,
131		    uint32_t, uint32_t);
132void		cas_reset(struct cas_softc *);
133int		cas_reset_rx(struct cas_softc *);
134int		cas_reset_tx(struct cas_softc *);
135int		cas_disable_rx(struct cas_softc *);
136int		cas_disable_tx(struct cas_softc *);
137void		cas_rxdrain(struct cas_softc *);
138int		cas_add_rxbuf(struct cas_softc *, int);
139void		cas_iff(struct cas_softc *);
140int		cas_encap(struct cas_softc *, struct mbuf *, uint32_t *);
141
142/* MII methods & callbacks */
143int		cas_mii_readreg(device_t, int, int, uint16_t*);
144int		cas_mii_writereg(device_t, int, int, uint16_t);
145void		cas_mii_statchg(struct ifnet *);
146int		cas_pcs_readreg(device_t, int, int, uint16_t *);
147int		cas_pcs_writereg(device_t, int, int, uint16_t);
148
149int		cas_mediachange(struct ifnet *);
150void		cas_mediastatus(struct ifnet *, struct ifmediareq *);
151
152int		cas_eint(struct cas_softc *, u_int);
153int		cas_rint(struct cas_softc *);
154int		cas_tint(struct cas_softc *, uint32_t);
155int		cas_pint(struct cas_softc *);
156int		cas_intr(void *);
157
158#ifdef CAS_DEBUG
159#define	DPRINTF(sc, x)	if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \
160				printf x
161#else
162#define	DPRINTF(sc, x)	/* nothing */
163#endif
164
165static const struct device_compatible_entry compat_data[] = {
166	{ .id = PCI_ID_CODE(PCI_VENDOR_SUN,
167		PCI_PRODUCT_SUN_CASSINI),
168	  .value = CAS_CAS },
169
170	{ .id = PCI_ID_CODE(PCI_VENDOR_NS,
171		PCI_PRODUCT_NS_SATURN),
172	  .value = CAS_SATURN },
173
174	PCI_COMPAT_EOL
175};
176
177#define	CAS_LOCAL_MAC_ADDRESS	"local-mac-address"
178#define	CAS_PHY_INTERFACE	"phy-interface"
179#define	CAS_PHY_TYPE		"phy-type"
180#define	CAS_PHY_TYPE_PCS	"pcs"
181
182int
183cas_match(device_t parent, cfdata_t cf, void *aux)
184{
185	struct pci_attach_args *pa = aux;
186
187	return pci_compatible_match(pa, compat_data);
188}
189
190#define	PROMHDR_PTR_DATA	0x18
191#define	PROMDATA_PTR_VPD	0x08
192#define	PROMDATA_DATA2		0x0a
193
194static const uint8_t cas_promhdr[] = { 0x55, 0xaa };
195static const uint8_t cas_promdat[] = {
196	'P', 'C', 'I', 'R',
197	PCI_VENDOR_SUN & 0xff, PCI_VENDOR_SUN >> 8,
198	PCI_PRODUCT_SUN_CASSINI & 0xff, PCI_PRODUCT_SUN_CASSINI >> 8
199};
200static const uint8_t cas_promdat_ns[] = {
201	'P', 'C', 'I', 'R',
202	PCI_VENDOR_NS & 0xff, PCI_VENDOR_NS >> 8,
203	PCI_PRODUCT_NS_SATURN & 0xff, PCI_PRODUCT_NS_SATURN >> 8
204};
205
206static const uint8_t cas_promdat2[] = {
207	0x18, 0x00,			/* structure length */
208	0x00,				/* structure revision */
209	0x00,				/* interface revision */
210	PCI_SUBCLASS_NETWORK_ETHERNET,	/* subclass code */
211	PCI_CLASS_NETWORK		/* class code */
212};
213
214#define CAS_LMA_MAXNUM	4
215int
216cas_pci_readvpd(struct cas_softc *sc, struct pci_attach_args *pa,
217    uint8_t *enaddr)
218{
219	struct pci_vpd_largeres *res;
220	struct pci_vpd *vpd;
221	bus_space_handle_t romh;
222	bus_space_tag_t romt;
223	bus_size_t romsize = 0;
224	uint8_t enaddrs[CAS_LMA_MAXNUM][ETHER_ADDR_LEN];
225	bool pcs[4] = {false, false, false, false};
226	uint8_t buf[32], *desc;
227	pcireg_t address;
228	int dataoff, vpdoff, len, lma = 0, phy = 0;
229	int i, rv = -1;
230
231	if (pci_mapreg_map(pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_MEM, 0,
232	    &romt, &romh, NULL, &romsize))
233		return (-1);
234
235	address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
236	address |= PCI_MAPREG_ROM_ENABLE;
237	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START, address);
238
239	bus_space_read_region_1(romt, romh, 0, buf, sizeof(buf));
240	if (bcmp(buf, cas_promhdr, sizeof(cas_promhdr)))
241		goto fail;
242
243	dataoff = buf[PROMHDR_PTR_DATA] | (buf[PROMHDR_PTR_DATA + 1] << 8);
244	if (dataoff < 0x1c)
245		goto fail;
246
247	bus_space_read_region_1(romt, romh, dataoff, buf, sizeof(buf));
248	if ((bcmp(buf, cas_promdat, sizeof(cas_promdat)) &&
249	     bcmp(buf, cas_promdat_ns, sizeof(cas_promdat_ns))) ||
250	    bcmp(buf + PROMDATA_DATA2, cas_promdat2, sizeof(cas_promdat2)))
251		goto fail;
252
253	vpdoff = buf[PROMDATA_PTR_VPD] | (buf[PROMDATA_PTR_VPD + 1] << 8);
254	if (vpdoff < 0x1c)
255		goto fail;
256
257next:
258	bus_space_read_region_1(romt, romh, vpdoff, buf, sizeof(buf));
259	if (!PCI_VPDRES_ISLARGE(buf[0]))
260		goto fail;
261
262	res = (struct pci_vpd_largeres *)buf;
263	vpdoff += sizeof(*res);
264
265	len = ((res->vpdres_len_msb << 8) + res->vpdres_len_lsb);
266	switch (PCI_VPDRES_LARGE_NAME(res->vpdres_byte0)) {
267	case PCI_VPDRES_TYPE_IDENTIFIER_STRING:
268		/* Skip identifier string. */
269		vpdoff += len;
270		goto next;
271
272	case PCI_VPDRES_TYPE_VPD:
273#ifdef CAS_DEBUG
274	printf("\n");
275	for (i = 0; i < len; i++) {
276		uint8_t byte;
277		if (i % 16 == 0)
278			printf("%04x :", i);
279		byte = bus_space_read_1(romt, romh, vpdoff + i);
280		printf(" %02x", byte);
281		if (i % 16 == 15)
282			printf("\n");
283	}
284	printf("\n");
285#endif
286
287		while (len > 0) {
288			bus_space_read_region_1(romt, romh, vpdoff,
289			     buf, sizeof(buf));
290
291			vpd = (struct pci_vpd *)buf;
292			vpdoff += sizeof(*vpd) + vpd->vpd_len;
293			len -= sizeof(*vpd) + vpd->vpd_len;
294
295			/*
296			 * We're looking for an "Enhanced" VPD...
297			 */
298			if (vpd->vpd_key0 != 'Z')
299				continue;
300
301			desc = buf + sizeof(*vpd);
302
303			/*
304			 * ...which is an instance property...
305			 */
306			if (desc[0] != 'I')
307				continue;
308			desc += 3;
309
310			if (desc[0] == 'B' || desc[1] == ETHER_ADDR_LEN) {
311				/*
312				 * ...that's a byte array with the proper
313				 * length for a MAC address...
314				 */
315				desc += 2;
316
317				/*
318				 * ...named "local-mac-address".
319				 */
320				if (strcmp(desc, CAS_LOCAL_MAC_ADDRESS) != 0)
321					continue;
322				desc += sizeof(CAS_LOCAL_MAC_ADDRESS);
323
324				if (lma == CAS_LMA_MAXNUM)
325					continue;
326
327				memcpy(enaddrs[lma], desc, ETHER_ADDR_LEN);
328				lma++;
329				rv = 0;
330				continue;
331			} else if (desc[0] == 'S') {
332				size_t k;
333
334				/* String */
335				desc += 2;
336#ifdef CAS_DEBUG
337				/* ...named "pcs". */
338				printf("STR: \"%s\"\n", desc);
339				if (strcmp(desc, CAS_PHY_TYPE_PCS) != 0)
340					continue;
341				desc += sizeof(CAS_PHY_TYPE_PCS);
342				printf("STR: \"%s\"\n", desc);
343#endif
344				/* ...named "phy-interface" or "phy-type". */
345				if (strcmp(desc, CAS_PHY_INTERFACE) == 0)
346					k = sizeof(CAS_PHY_INTERFACE);
347				else if (strcmp(desc, CAS_PHY_TYPE) == 0)
348					k = sizeof(CAS_PHY_TYPE);
349				else
350					continue;
351
352				desc += k;
353#ifdef CAS_DEBUG
354				printf("STR: \"%s\"\n", desc);
355#endif
356				if (strcmp(desc, CAS_PHY_TYPE_PCS) == 0)
357					pcs[phy] = true;
358				phy++;
359				continue;
360			}
361		}
362		break;
363
364	default:
365		goto fail;
366	}
367
368	/*
369	 * Multi port card has bridge chip. The device number is fixed:
370	 * e.g.
371	 * p0: 005:00:0
372	 * p1: 005:01:0
373	 * p2: 006:02:0
374	 * p3: 006:03:0
375	 */
376	if (enaddr != 0) {
377		i = 0;
378		if ((lma > 1) && (pa->pa_device < CAS_LMA_MAXNUM)
379		    && (pa->pa_device < lma))
380			i = pa->pa_device;
381		memcpy(enaddr, enaddrs[i], ETHER_ADDR_LEN);
382	}
383	if (pcs[pa->pa_device])
384		sc->sc_flags |= CAS_SERDES;
385 fail:
386	if (romsize != 0)
387		bus_space_unmap(romt, romh, romsize);
388
389	address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM);
390	address &= ~PCI_MAPREG_ROM_ENABLE;
391	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM, address);
392
393	return (rv);
394}
395
396void
397cas_attach(device_t parent, device_t self, void *aux)
398{
399	struct pci_attach_args *pa = aux;
400	const struct device_compatible_entry *dce;
401	struct cas_softc *sc = device_private(self);
402	prop_data_t data;
403	uint8_t enaddr[ETHER_ADDR_LEN];
404
405	sc->sc_dev = self;
406	pci_aprint_devinfo(pa, NULL);
407	sc->sc_rev = PCI_REVISION(pa->pa_class);
408
409	if (pci_dma64_available(pa))
410		sc->sc_dmatag = pa->pa_dmat64;
411	else
412		sc->sc_dmatag = pa->pa_dmat;
413
414	dce = pci_compatible_lookup(pa, compat_data);
415	KASSERT(dce != NULL);
416	sc->sc_variant = (u_int)dce->value;
417
418	aprint_debug_dev(sc->sc_dev, "variant = %d\n", sc->sc_variant);
419
420#define PCI_CAS_BASEADDR	0x10
421	if (pci_mapreg_map(pa, PCI_CAS_BASEADDR, PCI_MAPREG_TYPE_MEM, 0,
422	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_size) != 0) {
423		aprint_error_dev(sc->sc_dev,
424		    "unable to map device registers\n");
425		return;
426	}
427
428	if ((data = prop_dictionary_get(device_properties(sc->sc_dev),
429	    "mac-address")) != NULL)
430		memcpy(enaddr, prop_data_value(data), ETHER_ADDR_LEN);
431	if (cas_pci_readvpd(sc, pa, (data == NULL) ? enaddr : 0) != 0) {
432		aprint_error_dev(sc->sc_dev, "no Ethernet address found\n");
433		memset(enaddr, 0, sizeof(enaddr));
434	}
435
436	sc->sc_burst = 16;	/* XXX */
437
438	sc->sc_att_stage = CAS_ATT_BACKEND_0;
439
440	if (pci_intr_map(pa, &sc->sc_handle) != 0) {
441		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
442		bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_size);
443		return;
444	}
445	sc->sc_pc = pa->pa_pc;
446	if (!cas_estintr(sc, CAS_INTR_PCI)) {
447		bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_size);
448		aprint_error_dev(sc->sc_dev, "unable to establish interrupt\n");
449		return;
450	}
451
452	sc->sc_att_stage = CAS_ATT_BACKEND_1;
453
454	/*
455	 * call the main configure
456	 */
457	cas_config(sc, enaddr);
458
459	if (pmf_device_register1(sc->sc_dev,
460	    cas_suspend, cas_resume, cas_shutdown))
461		pmf_class_network_register(sc->sc_dev, &sc->sc_ethercom.ec_if);
462	else
463		aprint_error_dev(sc->sc_dev,
464		    "could not establish power handlers\n");
465
466	sc->sc_att_stage = CAS_ATT_FINISHED;
467		/*FALLTHROUGH*/
468}
469
470/*
471 * cas_config:
472 *
473 *	Attach a Cassini interface to the system.
474 */
475void
476cas_config(struct cas_softc *sc, const uint8_t *enaddr)
477{
478	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
479	struct mii_data *mii = &sc->sc_mii;
480	struct mii_softc *child;
481	uint32_t reg;
482	int i, error;
483
484	/* Make sure the chip is stopped. */
485	ifp->if_softc = sc;
486	cas_reset(sc);
487
488	/*
489	 * Allocate the control data structures, and create and load the
490	 * DMA map for it.
491	 */
492	if ((error = bus_dmamem_alloc(sc->sc_dmatag,
493	    sizeof(struct cas_control_data), CAS_PAGE_SIZE, 0, &sc->sc_cdseg,
494	    1, &sc->sc_cdnseg, 0)) != 0) {
495		aprint_error_dev(sc->sc_dev,
496		    "unable to allocate control data, error = %d\n",
497		    error);
498		cas_partial_detach(sc, CAS_ATT_0);
499	}
500
501	/* XXX should map this in with correct endianness */
502	if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg,
503	    sc->sc_cdnseg, sizeof(struct cas_control_data),
504	    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
505		aprint_error_dev(sc->sc_dev,
506		    "unable to map control data, error = %d\n", error);
507		cas_partial_detach(sc, CAS_ATT_1);
508	}
509
510	if ((error = bus_dmamap_create(sc->sc_dmatag,
511	    sizeof(struct cas_control_data), 1,
512	    sizeof(struct cas_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
513		aprint_error_dev(sc->sc_dev,
514		    "unable to create control data DMA map, error = %d\n",
515		    error);
516		cas_partial_detach(sc, CAS_ATT_2);
517	}
518
519	if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap,
520	    sc->sc_control_data, sizeof(struct cas_control_data), NULL,
521	    0)) != 0) {
522		aprint_error_dev(sc->sc_dev,
523		    "unable to load control data DMA map, error = %d\n",
524		    error);
525		cas_partial_detach(sc, CAS_ATT_3);
526	}
527
528	memset(sc->sc_control_data, 0, sizeof(struct cas_control_data));
529
530	/*
531	 * Create the receive buffer DMA maps.
532	 */
533	for (i = 0; i < CAS_NRXDESC; i++) {
534		bus_dma_segment_t seg;
535		char *kva;
536		int rseg;
537
538		if ((error = bus_dmamem_alloc(sc->sc_dmatag, CAS_PAGE_SIZE,
539		    CAS_PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
540			aprint_error_dev(sc->sc_dev,
541			    "unable to alloc rx DMA mem %d, error = %d\n",
542			    i, error);
543			cas_partial_detach(sc, CAS_ATT_5);
544		}
545		sc->sc_rxsoft[i].rxs_dmaseg = seg;
546
547		if ((error = bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
548		    CAS_PAGE_SIZE, (void **)&kva, BUS_DMA_NOWAIT)) != 0) {
549			aprint_error_dev(sc->sc_dev,
550			    "unable to alloc rx DMA mem %d, error = %d\n",
551			    i, error);
552			cas_partial_detach(sc, CAS_ATT_5);
553		}
554		sc->sc_rxsoft[i].rxs_kva = kva;
555
556		if ((error = bus_dmamap_create(sc->sc_dmatag, CAS_PAGE_SIZE, 1,
557		    CAS_PAGE_SIZE, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
558			aprint_error_dev(sc->sc_dev,
559			    "unable to create rx DMA map %d, error = %d\n",
560			    i, error);
561			cas_partial_detach(sc, CAS_ATT_5);
562		}
563
564		if ((error = bus_dmamap_load(sc->sc_dmatag,
565		   sc->sc_rxsoft[i].rxs_dmamap, kva, CAS_PAGE_SIZE, NULL,
566		   BUS_DMA_NOWAIT)) != 0) {
567			aprint_error_dev(sc->sc_dev,
568			    "unable to load rx DMA map %d, error = %d\n",
569			    i, error);
570			cas_partial_detach(sc, CAS_ATT_5);
571		}
572	}
573
574	/*
575	 * Create the transmit buffer DMA maps.
576	 */
577	for (i = 0; i < CAS_NTXDESC; i++) {
578		if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES,
579		    CAS_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
580		    &sc->sc_txd[i].sd_map)) != 0) {
581			aprint_error_dev(sc->sc_dev,
582			    "unable to create tx DMA map %d, error = %d\n",
583			    i, error);
584			cas_partial_detach(sc, CAS_ATT_6);
585		}
586		sc->sc_txd[i].sd_mbuf = NULL;
587	}
588
589	/*
590	 * From this point forward, the attachment cannot fail.  A failure
591	 * before this point releases all resources that may have been
592	 * allocated.
593	 */
594
595	/* Announce ourselves. */
596	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
597	    ether_sprintf(enaddr));
598	aprint_naive(": Ethernet controller\n");
599
600	/* Get RX FIFO size */
601	sc->sc_rxfifosize = 16 * 1024;
602
603	/* Initialize ifnet structure. */
604	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
605	ifp->if_softc = sc;
606	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
607	ifp->if_start = cas_start;
608	ifp->if_ioctl = cas_ioctl;
609	ifp->if_watchdog = cas_watchdog;
610	ifp->if_stop = cas_stop;
611	ifp->if_init = cas_init;
612	IFQ_SET_MAXLEN(&ifp->if_snd, CAS_NTXDESC - 1);
613	IFQ_SET_READY(&ifp->if_snd);
614
615	/* Initialize ifmedia structures and MII info */
616	mii->mii_ifp = ifp;
617	mii->mii_readreg = cas_mii_readreg;
618	mii->mii_writereg = cas_mii_writereg;
619	mii->mii_statchg = cas_mii_statchg;
620
621	ifmedia_init(&mii->mii_media, 0, cas_mediachange, cas_mediastatus);
622	sc->sc_ethercom.ec_mii = mii;
623
624	bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_MII_DATAPATH_MODE, 0);
625
626	cas_mifinit(sc);
627
628	if (sc->sc_mif_config & (CAS_MIF_CONFIG_MDI1 | CAS_MIF_CONFIG_MDI0)) {
629		if (sc->sc_mif_config & CAS_MIF_CONFIG_MDI1) {
630			sc->sc_mif_config |= CAS_MIF_CONFIG_PHY_SEL;
631			bus_space_write_4(sc->sc_memt, sc->sc_memh,
632			    CAS_MIF_CONFIG, sc->sc_mif_config);
633		}
634		/* Enable/unfreeze the GMII pins of Saturn. */
635		if (sc->sc_variant == CAS_SATURN) {
636			reg = bus_space_read_4(sc->sc_memt, sc->sc_memh,
637			    CAS_SATURN_PCFG) & ~CAS_SATURN_PCFG_FSI;
638			if ((sc->sc_mif_config & CAS_MIF_CONFIG_MDI0) != 0)
639				reg |= CAS_SATURN_PCFG_FSI;
640			bus_space_write_4(sc->sc_memt, sc->sc_memh,
641			    CAS_SATURN_PCFG, reg);
642			/* Read to flush */
643			bus_space_read_4(sc->sc_memt, sc->sc_memh,
644			    CAS_SATURN_PCFG);
645			DELAY(10000);
646		}
647	}
648
649	mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
650	    MII_OFFSET_ANY, 0);
651
652	child = LIST_FIRST(&mii->mii_phys);
653	if (child == NULL &&
654	    sc->sc_mif_config & (CAS_MIF_CONFIG_MDI0 | CAS_MIF_CONFIG_MDI1)) {
655		/*
656		 * Try the external PCS SERDES if we didn't find any
657		 * MII devices.
658		 */
659		bus_space_write_4(sc->sc_memt, sc->sc_memh,
660		    CAS_MII_DATAPATH_MODE, CAS_MII_DATAPATH_SERDES);
661
662		bus_space_write_4(sc->sc_memt, sc->sc_memh,
663		     CAS_MII_CONFIG, CAS_MII_CONFIG_ENABLE);
664
665		mii->mii_readreg = cas_pcs_readreg;
666		mii->mii_writereg = cas_pcs_writereg;
667
668		mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
669		    MII_OFFSET_ANY, MIIF_NOISOLATE);
670	}
671
672	child = LIST_FIRST(&mii->mii_phys);
673	if (child == NULL) {
674		/* No PHY attached */
675		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
676		ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
677	} else {
678		/*
679		 * Walk along the list of attached MII devices and
680		 * establish an `MII instance' to `phy number'
681		 * mapping. We'll use this mapping in media change
682		 * requests to determine which phy to use to program
683		 * the MIF configuration register.
684		 */
685		for (; child != NULL; child = LIST_NEXT(child, mii_list)) {
686			/*
687			 * Note: we support just two PHYs: the built-in
688			 * internal device and an external on the MII
689			 * connector.
690			 */
691			if (child->mii_phy > 1 || child->mii_inst > 1) {
692				aprint_error_dev(sc->sc_dev,
693				    "cannot accommodate MII device %s"
694				    " at phy %d, instance %d\n",
695				    device_xname(child->mii_dev),
696				    child->mii_phy, child->mii_inst);
697				continue;
698			}
699
700			sc->sc_phys[child->mii_inst] = child->mii_phy;
701		}
702
703		/*
704		 * XXX - we can really do the following ONLY if the
705		 * phy indeed has the auto negotiation capability!!
706		 */
707		ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
708	}
709
710	/* claim 802.1q capability */
711	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
712
713	/* Attach the interface. */
714	if_attach(ifp);
715	if_deferred_start_init(ifp, NULL);
716	ether_ifattach(ifp, enaddr);
717
718	rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
719			  RND_TYPE_NET, RND_FLAG_DEFAULT);
720
721	evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
722	    NULL, device_xname(sc->sc_dev), "interrupts");
723
724	callout_init(&sc->sc_tick_ch, 0);
725	callout_setfunc(&sc->sc_tick_ch, cas_tick, sc);
726
727	return;
728}
729
730int
731cas_detach(device_t self, int flags)
732{
733	int i;
734	struct cas_softc *sc = device_private(self);
735	bus_space_tag_t t = sc->sc_memt;
736	bus_space_handle_t h = sc->sc_memh;
737	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
738
739	/*
740	 * Free any resources we've allocated during the failed attach
741	 * attempt.  Do this in reverse order and fall through.
742	 */
743	switch (sc->sc_att_stage) {
744	case CAS_ATT_FINISHED:
745		bus_space_write_4(t, h, CAS_INTMASK, ~(uint32_t)0);
746		pmf_device_deregister(self);
747		cas_stop(&sc->sc_ethercom.ec_if, 1);
748		evcnt_detach(&sc->sc_ev_intr);
749
750		rnd_detach_source(&sc->rnd_source);
751
752		ether_ifdetach(ifp);
753		if_detach(ifp);
754
755		callout_destroy(&sc->sc_tick_ch);
756
757		mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
758
759		ifmedia_fini(&sc->sc_mii.mii_media);
760
761		/*FALLTHROUGH*/
762	case CAS_ATT_MII:
763	case CAS_ATT_7:
764	case CAS_ATT_6:
765		for (i = 0; i < CAS_NTXDESC; i++) {
766			if (sc->sc_txd[i].sd_map != NULL)
767				bus_dmamap_destroy(sc->sc_dmatag,
768				    sc->sc_txd[i].sd_map);
769		}
770		/*FALLTHROUGH*/
771	case CAS_ATT_5:
772		for (i = 0; i < CAS_NRXDESC; i++) {
773			if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
774				bus_dmamap_unload(sc->sc_dmatag,
775				    sc->sc_rxsoft[i].rxs_dmamap);
776			if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
777				bus_dmamap_destroy(sc->sc_dmatag,
778				    sc->sc_rxsoft[i].rxs_dmamap);
779			if (sc->sc_rxsoft[i].rxs_kva != NULL)
780				bus_dmamem_unmap(sc->sc_dmatag,
781				    sc->sc_rxsoft[i].rxs_kva, CAS_PAGE_SIZE);
782			/* XXX   need to check that bus_dmamem_alloc suceeded
783			if (sc->sc_rxsoft[i].rxs_dmaseg != NULL)
784			*/
785			bus_dmamem_free(sc->sc_dmatag,
786			    &(sc->sc_rxsoft[i].rxs_dmaseg), 1);
787		}
788		bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
789		/*FALLTHROUGH*/
790	case CAS_ATT_4:
791	case CAS_ATT_3:
792		bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
793		/*FALLTHROUGH*/
794	case CAS_ATT_2:
795		bus_dmamem_unmap(sc->sc_dmatag, sc->sc_control_data,
796		    sizeof(struct cas_control_data));
797		/*FALLTHROUGH*/
798	case CAS_ATT_1:
799		bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
800		/*FALLTHROUGH*/
801	case CAS_ATT_0:
802		sc->sc_att_stage = CAS_ATT_0;
803		/*FALLTHROUGH*/
804	case CAS_ATT_BACKEND_2:
805	case CAS_ATT_BACKEND_1:
806		if (sc->sc_ih != NULL) {
807			pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
808			sc->sc_ih = NULL;
809		}
810		bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_size);
811		/*FALLTHROUGH*/
812	case CAS_ATT_BACKEND_0:
813		break;
814	}
815	return 0;
816}
817
818static void
819cas_partial_detach(struct cas_softc *sc, enum cas_attach_stage stage)
820{
821	cfattach_t ca = device_cfattach(sc->sc_dev);
822
823	sc->sc_att_stage = stage;
824	(*ca->ca_detach)(sc->sc_dev, 0);
825}
826
827void
828cas_tick(void *arg)
829{
830	struct cas_softc *sc = arg;
831	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
832	bus_space_tag_t t = sc->sc_memt;
833	bus_space_handle_t mac = sc->sc_memh;
834	int s;
835	uint32_t v;
836
837	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
838
839	/* unload collisions counters */
840	v = bus_space_read_4(t, mac, CAS_MAC_EXCESS_COLL_CNT) +
841	    bus_space_read_4(t, mac, CAS_MAC_LATE_COLL_CNT);
842	if_statadd_ref(ifp, nsr, if_collisions, v +
843	    bus_space_read_4(t, mac, CAS_MAC_NORM_COLL_CNT) +
844	    bus_space_read_4(t, mac, CAS_MAC_FIRST_COLL_CNT));
845	if_statadd_ref(ifp, nsr, if_oerrors, v);
846
847	/* read error counters */
848	if_statadd_ref(ifp, nsr, if_ierrors,
849	    bus_space_read_4(t, mac, CAS_MAC_RX_LEN_ERR_CNT) +
850	    bus_space_read_4(t, mac, CAS_MAC_RX_ALIGN_ERR) +
851	    bus_space_read_4(t, mac, CAS_MAC_RX_CRC_ERR_CNT) +
852	    bus_space_read_4(t, mac, CAS_MAC_RX_CODE_VIOL));
853
854	IF_STAT_PUTREF(ifp);
855
856	/* clear the hardware counters */
857	bus_space_write_4(t, mac, CAS_MAC_NORM_COLL_CNT, 0);
858	bus_space_write_4(t, mac, CAS_MAC_FIRST_COLL_CNT, 0);
859	bus_space_write_4(t, mac, CAS_MAC_EXCESS_COLL_CNT, 0);
860	bus_space_write_4(t, mac, CAS_MAC_LATE_COLL_CNT, 0);
861	bus_space_write_4(t, mac, CAS_MAC_RX_LEN_ERR_CNT, 0);
862	bus_space_write_4(t, mac, CAS_MAC_RX_ALIGN_ERR, 0);
863	bus_space_write_4(t, mac, CAS_MAC_RX_CRC_ERR_CNT, 0);
864	bus_space_write_4(t, mac, CAS_MAC_RX_CODE_VIOL, 0);
865
866	s = splnet();
867	mii_tick(&sc->sc_mii);
868	splx(s);
869
870	callout_schedule(&sc->sc_tick_ch, hz);
871}
872
873int
874cas_bitwait(struct cas_softc *sc, bus_space_handle_t h, int r,
875    uint32_t clr, uint32_t set)
876{
877	int i;
878	uint32_t reg;
879
880	for (i = TRIES; i--; DELAY(100)) {
881		reg = bus_space_read_4(sc->sc_memt, h, r);
882		if ((reg & clr) == 0 && (reg & set) == set)
883			return (1);
884	}
885
886	return (0);
887}
888
889void
890cas_reset(struct cas_softc *sc)
891{
892	bus_space_tag_t t = sc->sc_memt;
893	bus_space_handle_t h = sc->sc_memh;
894	int s;
895
896	s = splnet();
897	DPRINTF(sc, ("%s: cas_reset\n", device_xname(sc->sc_dev)));
898	cas_reset_rx(sc);
899	cas_reset_tx(sc);
900
901	/* Disable interrupts */
902	bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_INTMASK, ~(uint32_t)0);
903
904	/* Do a full reset */
905	bus_space_write_4(t, h, CAS_RESET,
906	    CAS_RESET_RX | CAS_RESET_TX | CAS_RESET_BLOCK_PCS);
907	if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0))
908		aprint_error_dev(sc->sc_dev, "cannot reset device\n");
909	splx(s);
910}
911
912
913/*
914 * cas_rxdrain:
915 *
916 *	Drain the receive queue.
917 */
918void
919cas_rxdrain(struct cas_softc *sc)
920{
921	/* Nothing to do yet. */
922}
923
924/*
925 * Reset the whole thing.
926 */
927void
928cas_stop(struct ifnet *ifp, int disable)
929{
930	struct cas_softc *sc = (struct cas_softc *)ifp->if_softc;
931	struct cas_sxd *sd;
932	uint32_t i;
933
934	DPRINTF(sc, ("%s: cas_stop\n", device_xname(sc->sc_dev)));
935
936	callout_stop(&sc->sc_tick_ch);
937
938	/*
939	 * Mark the interface down and cancel the watchdog timer.
940	 */
941	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
942	ifp->if_timer = 0;
943
944	mii_down(&sc->sc_mii);
945
946	cas_reset_rx(sc);
947	cas_reset_tx(sc);
948
949	/*
950	 * Release any queued transmit buffers.
951	 */
952	for (i = 0; i < CAS_NTXDESC; i++) {
953		sd = &sc->sc_txd[i];
954		if (sd->sd_mbuf != NULL) {
955			bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
956			    sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
957			bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
958			m_freem(sd->sd_mbuf);
959			sd->sd_mbuf = NULL;
960		}
961	}
962	sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0;
963
964	if (disable)
965		cas_rxdrain(sc);
966}
967
968
969/*
970 * Reset the receiver
971 */
972int
973cas_reset_rx(struct cas_softc *sc)
974{
975	bus_space_tag_t t = sc->sc_memt;
976	bus_space_handle_t h = sc->sc_memh;
977
978	/*
979	 * Resetting while DMA is in progress can cause a bus hang, so we
980	 * disable DMA first.
981	 */
982	cas_disable_rx(sc);
983	bus_space_write_4(t, h, CAS_RX_CONFIG, 0);
984	/* Wait till it finishes */
985	if (!cas_bitwait(sc, h, CAS_RX_CONFIG, 1, 0))
986		aprint_error_dev(sc->sc_dev, "cannot disable rx dma\n");
987	/* Wait 5ms extra. */
988	delay(5000);
989
990	/* Finally, reset the ERX */
991	bus_space_write_4(t, h, CAS_RESET, CAS_RESET_RX);
992	/* Wait till it finishes */
993	if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_RX, 0)) {
994		aprint_error_dev(sc->sc_dev, "cannot reset receiver\n");
995		return (1);
996	}
997	return (0);
998}
999
1000
1001/*
1002 * Reset the transmitter
1003 */
1004int
1005cas_reset_tx(struct cas_softc *sc)
1006{
1007	bus_space_tag_t t = sc->sc_memt;
1008	bus_space_handle_t h = sc->sc_memh;
1009
1010	/*
1011	 * Resetting while DMA is in progress can cause a bus hang, so we
1012	 * disable DMA first.
1013	 */
1014	cas_disable_tx(sc);
1015	bus_space_write_4(t, h, CAS_TX_CONFIG, 0);
1016	/* Wait till it finishes */
1017	if (!cas_bitwait(sc, h, CAS_TX_CONFIG, 1, 0))
1018		aprint_error_dev(sc->sc_dev, "cannot disable tx dma\n");
1019	/* Wait 5ms extra. */
1020	delay(5000);
1021
1022	/* Finally, reset the ETX */
1023	bus_space_write_4(t, h, CAS_RESET, CAS_RESET_TX);
1024	/* Wait till it finishes */
1025	if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_TX, 0)) {
1026		aprint_error_dev(sc->sc_dev, "cannot reset transmitter\n");
1027		return (1);
1028	}
1029	return (0);
1030}
1031
1032/*
1033 * Disable receiver.
1034 */
1035int
1036cas_disable_rx(struct cas_softc *sc)
1037{
1038	bus_space_tag_t t = sc->sc_memt;
1039	bus_space_handle_t h = sc->sc_memh;
1040	uint32_t cfg;
1041
1042	/* Flip the enable bit */
1043	cfg = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG);
1044	cfg &= ~CAS_MAC_RX_ENABLE;
1045	bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, cfg);
1046
1047	/* Wait for it to finish */
1048	return (cas_bitwait(sc, h, CAS_MAC_RX_CONFIG, CAS_MAC_RX_ENABLE, 0));
1049}
1050
1051/*
1052 * Disable transmitter.
1053 */
1054int
1055cas_disable_tx(struct cas_softc *sc)
1056{
1057	bus_space_tag_t t = sc->sc_memt;
1058	bus_space_handle_t h = sc->sc_memh;
1059	uint32_t cfg;
1060
1061	/* Flip the enable bit */
1062	cfg = bus_space_read_4(t, h, CAS_MAC_TX_CONFIG);
1063	cfg &= ~CAS_MAC_TX_ENABLE;
1064	bus_space_write_4(t, h, CAS_MAC_TX_CONFIG, cfg);
1065
1066	/* Wait for it to finish */
1067	return (cas_bitwait(sc, h, CAS_MAC_TX_CONFIG, CAS_MAC_TX_ENABLE, 0));
1068}
1069
1070/*
1071 * Initialize interface.
1072 */
1073int
1074cas_meminit(struct cas_softc *sc)
1075{
1076	int i;
1077
1078	/*
1079	 * Initialize the transmit descriptor ring.
1080	 */
1081	for (i = 0; i < CAS_NTXDESC; i++) {
1082		sc->sc_txdescs[i].cd_flags = 0;
1083		sc->sc_txdescs[i].cd_addr = 0;
1084	}
1085	CAS_CDTXSYNC(sc, 0, CAS_NTXDESC,
1086	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1087
1088	/*
1089	 * Initialize the receive descriptor and receive job
1090	 * descriptor rings.
1091	 */
1092	for (i = 0; i < CAS_NRXDESC; i++)
1093		CAS_INIT_RXDESC(sc, i, i);
1094	sc->sc_rxdptr = 0;
1095	sc->sc_rxptr = 0;
1096
1097	/*
1098	 * Initialize the receive completion ring.
1099	 */
1100	for (i = 0; i < CAS_NRXCOMP; i++) {
1101		sc->sc_rxcomps[i].cc_word[0] = 0;
1102		sc->sc_rxcomps[i].cc_word[1] = 0;
1103		sc->sc_rxcomps[i].cc_word[2] = 0;
1104		sc->sc_rxcomps[i].cc_word[3] = CAS_DMA_WRITE(CAS_RC3_OWN);
1105		CAS_CDRXCSYNC(sc, i,
1106		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1107	}
1108
1109	return (0);
1110}
1111
1112int
1113cas_ringsize(int sz)
1114{
1115	switch (sz) {
1116	case 32:
1117		return CAS_RING_SZ_32;
1118	case 64:
1119		return CAS_RING_SZ_64;
1120	case 128:
1121		return CAS_RING_SZ_128;
1122	case 256:
1123		return CAS_RING_SZ_256;
1124	case 512:
1125		return CAS_RING_SZ_512;
1126	case 1024:
1127		return CAS_RING_SZ_1024;
1128	case 2048:
1129		return CAS_RING_SZ_2048;
1130	case 4096:
1131		return CAS_RING_SZ_4096;
1132	case 8192:
1133		return CAS_RING_SZ_8192;
1134	default:
1135		aprint_error("cas: invalid Receive Descriptor ring size %d\n",
1136		    sz);
1137		return CAS_RING_SZ_32;
1138	}
1139}
1140
1141int
1142cas_cringsize(int sz)
1143{
1144	int i;
1145
1146	for (i = 0; i < 9; i++)
1147		if (sz == (128 << i))
1148			return i;
1149
1150	aprint_error("cas: invalid completion ring size %d\n", sz);
1151	return 128;
1152}
1153
1154/*
1155 * Initialization of interface; set up initialization block
1156 * and transmit/receive descriptor rings.
1157 */
1158int
1159cas_init(struct ifnet *ifp)
1160{
1161	struct cas_softc *sc = (struct cas_softc *)ifp->if_softc;
1162	bus_space_tag_t t = sc->sc_memt;
1163	bus_space_handle_t h = sc->sc_memh;
1164	int s;
1165	u_int max_frame_size;
1166	uint32_t v;
1167
1168	s = splnet();
1169
1170	DPRINTF(sc, ("%s: cas_init: calling stop\n", device_xname(sc->sc_dev)));
1171	/*
1172	 * Initialization sequence. The numbered steps below correspond
1173	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
1174	 * Channel Engine manual (part of the PCIO manual).
1175	 * See also the STP2002-STQ document from Sun Microsystems.
1176	 */
1177
1178	/* step 1 & 2. Reset the Ethernet Channel */
1179	cas_stop(ifp, 0);
1180	cas_reset(sc);
1181	DPRINTF(sc, ("%s: cas_init: restarting\n", device_xname(sc->sc_dev)));
1182
1183	/* Re-initialize the MIF */
1184	cas_mifinit(sc);
1185
1186	/* step 3. Setup data structures in host memory */
1187	cas_meminit(sc);
1188
1189	/* step 4. TX MAC registers & counters */
1190	cas_init_regs(sc);
1191	max_frame_size = ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN;
1192	v = (max_frame_size) | (0x2000 << 16) /* Burst size */;
1193	bus_space_write_4(t, h, CAS_MAC_MAC_MAX_FRAME, v);
1194
1195	/* step 5. RX MAC registers & counters */
1196	cas_iff(sc);
1197
1198	/* step 6 & 7. Program Descriptor Ring Base Addresses */
1199	KASSERT((CAS_CDTXADDR(sc, 0) & 0x1fff) == 0);
1200	bus_space_write_4(t, h, CAS_TX_RING_PTR_HI,
1201	    BUS_ADDR_HI32(CAS_CDTXADDR(sc, 0)));
1202	bus_space_write_4(t, h, CAS_TX_RING_PTR_LO,
1203	    BUS_ADDR_LO32(CAS_CDTXADDR(sc, 0)));
1204
1205	KASSERT((CAS_CDRXADDR(sc, 0) & 0x1fff) == 0);
1206	bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI,
1207	    BUS_ADDR_HI32(CAS_CDRXADDR(sc, 0)));
1208	bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO,
1209	    BUS_ADDR_LO32(CAS_CDRXADDR(sc, 0)));
1210
1211	KASSERT((CAS_CDRXCADDR(sc, 0) & 0x1fff) == 0);
1212	bus_space_write_4(t, h, CAS_RX_CRING_PTR_HI,
1213	    BUS_ADDR_HI32(CAS_CDRXCADDR(sc, 0)));
1214	bus_space_write_4(t, h, CAS_RX_CRING_PTR_LO,
1215	    BUS_ADDR_LO32(CAS_CDRXCADDR(sc, 0)));
1216
1217	if (CAS_PLUS(sc)) {
1218		KASSERT((CAS_CDRXADDR2(sc, 0) & 0x1fff) == 0);
1219		bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI2,
1220		    BUS_ADDR_HI32(CAS_CDRXADDR2(sc, 0)));
1221		bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO2,
1222		    BUS_ADDR_LO32(CAS_CDRXADDR2(sc, 0)));
1223	}
1224
1225	/* step 8. Global Configuration & Interrupt Mask */
1226	cas_estintr(sc, CAS_INTR_REG);
1227
1228	/* step 9. ETX Configuration: use mostly default values */
1229
1230	/* Enable DMA */
1231	v = cas_ringsize(CAS_NTXDESC /*XXX*/) << 10;
1232	bus_space_write_4(t, h, CAS_TX_CONFIG,
1233	    v | CAS_TX_CONFIG_TXDMA_EN | (1 << 24) | (1 << 29));
1234	bus_space_write_4(t, h, CAS_TX_KICK, 0);
1235
1236	/* step 10. ERX Configuration */
1237
1238	/* Encode Receive Descriptor ring size */
1239	v = cas_ringsize(CAS_NRXDESC) << CAS_RX_CONFIG_RXDRNG_SZ_SHIFT;
1240	if (CAS_PLUS(sc))
1241		v |= cas_ringsize(32) << CAS_RX_CONFIG_RXDRNG2_SZ_SHIFT;
1242
1243	/* Encode Receive Completion ring size */
1244	v |= cas_cringsize(CAS_NRXCOMP) << CAS_RX_CONFIG_RXCRNG_SZ_SHIFT;
1245
1246	/* Enable DMA */
1247	bus_space_write_4(t, h, CAS_RX_CONFIG,
1248	    v|(2<<CAS_RX_CONFIG_FBOFF_SHFT) | CAS_RX_CONFIG_RXDMA_EN);
1249
1250	/*
1251	 * The following value is for an OFF Threshold of about 3/4 full
1252	 * and an ON Threshold of 1/4 full.
1253	 */
1254	bus_space_write_4(t, h, CAS_RX_PAUSE_THRESH,
1255	    (3 * sc->sc_rxfifosize / 256) |
1256	    ((sc->sc_rxfifosize / 256) << 12));
1257	bus_space_write_4(t, h, CAS_RX_BLANKING, (6 << 12) | 6);
1258
1259	/* step 11. Configure Media */
1260	mii_ifmedia_change(&sc->sc_mii);
1261
1262	/* step 12. RX_MAC Configuration Register */
1263	v = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG);
1264	v |= CAS_MAC_RX_ENABLE | CAS_MAC_RX_STRIP_CRC;
1265	bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, v);
1266
1267	/* step 14. Issue Transmit Pending command */
1268
1269	/* step 15.  Give the receiver a swift kick */
1270	bus_space_write_4(t, h, CAS_RX_KICK, CAS_NRXDESC-4);
1271	if (CAS_PLUS(sc))
1272		bus_space_write_4(t, h, CAS_RX_KICK2, 4);
1273
1274	/* Start the one second timer. */
1275	callout_schedule(&sc->sc_tick_ch, hz);
1276
1277	ifp->if_flags |= IFF_RUNNING;
1278	ifp->if_flags &= ~IFF_OACTIVE;
1279	ifp->if_timer = 0;
1280	splx(s);
1281
1282	return (0);
1283}
1284
1285void
1286cas_init_regs(struct cas_softc *sc)
1287{
1288	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1289	bus_space_tag_t t = sc->sc_memt;
1290	bus_space_handle_t h = sc->sc_memh;
1291	const u_char *laddr = CLLADDR(ifp->if_sadl);
1292	uint32_t v, r;
1293
1294	/* These regs are not cleared on reset */
1295	sc->sc_inited = 0;
1296	if (!sc->sc_inited) {
1297		/* Load recommended values  */
1298		bus_space_write_4(t, h, CAS_MAC_IPG0, 0x00);
1299		bus_space_write_4(t, h, CAS_MAC_IPG1, 0x08);
1300		bus_space_write_4(t, h, CAS_MAC_IPG2, 0x04);
1301
1302		bus_space_write_4(t, h, CAS_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
1303		/* Max frame and max burst size */
1304		v = ETHER_MAX_LEN | (0x2000 << 16) /* Burst size */;
1305		bus_space_write_4(t, h, CAS_MAC_MAC_MAX_FRAME, v);
1306
1307		bus_space_write_4(t, h, CAS_MAC_PREAMBLE_LEN, 0x07);
1308		bus_space_write_4(t, h, CAS_MAC_JAM_SIZE, 0x04);
1309		bus_space_write_4(t, h, CAS_MAC_ATTEMPT_LIMIT, 0x10);
1310		bus_space_write_4(t, h, CAS_MAC_CONTROL_TYPE, 0x8088);
1311		bus_space_write_4(t, h, CAS_MAC_RANDOM_SEED,
1312		    ((laddr[5]<<8)|laddr[4])&0x3ff);
1313
1314		/* Secondary MAC addresses set to 0:0:0:0:0:0 */
1315		for (r = CAS_MAC_ADDR3; r < CAS_MAC_ADDR42; r += 4)
1316			bus_space_write_4(t, h, r, 0);
1317
1318		/* MAC control addr set to 0:1:c2:0:1:80 */
1319		bus_space_write_4(t, h, CAS_MAC_ADDR42, 0x0001);
1320		bus_space_write_4(t, h, CAS_MAC_ADDR43, 0xc200);
1321		bus_space_write_4(t, h, CAS_MAC_ADDR44, 0x0180);
1322
1323		/* MAC filter addr set to 0:0:0:0:0:0 */
1324		bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER0, 0);
1325		bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER1, 0);
1326		bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER2, 0);
1327
1328		bus_space_write_4(t, h, CAS_MAC_ADR_FLT_MASK1_2, 0);
1329		bus_space_write_4(t, h, CAS_MAC_ADR_FLT_MASK0, 0);
1330
1331		/* Hash table initialized to 0 */
1332		for (r = CAS_MAC_HASH0; r <= CAS_MAC_HASH15; r += 4)
1333			bus_space_write_4(t, h, r, 0);
1334
1335		sc->sc_inited = 1;
1336	}
1337
1338	/* Counters need to be zeroed */
1339	bus_space_write_4(t, h, CAS_MAC_NORM_COLL_CNT, 0);
1340	bus_space_write_4(t, h, CAS_MAC_FIRST_COLL_CNT, 0);
1341	bus_space_write_4(t, h, CAS_MAC_EXCESS_COLL_CNT, 0);
1342	bus_space_write_4(t, h, CAS_MAC_LATE_COLL_CNT, 0);
1343	bus_space_write_4(t, h, CAS_MAC_DEFER_TMR_CNT, 0);
1344	bus_space_write_4(t, h, CAS_MAC_PEAK_ATTEMPTS, 0);
1345	bus_space_write_4(t, h, CAS_MAC_RX_FRAME_COUNT, 0);
1346	bus_space_write_4(t, h, CAS_MAC_RX_LEN_ERR_CNT, 0);
1347	bus_space_write_4(t, h, CAS_MAC_RX_ALIGN_ERR, 0);
1348	bus_space_write_4(t, h, CAS_MAC_RX_CRC_ERR_CNT, 0);
1349	bus_space_write_4(t, h, CAS_MAC_RX_CODE_VIOL, 0);
1350
1351	/* Un-pause stuff */
1352	bus_space_write_4(t, h, CAS_MAC_SEND_PAUSE_CMD, 0);
1353
1354	/*
1355	 * Set the station address.
1356	 */
1357	bus_space_write_4(t, h, CAS_MAC_ADDR0, (laddr[4]<<8) | laddr[5]);
1358	bus_space_write_4(t, h, CAS_MAC_ADDR1, (laddr[2]<<8) | laddr[3]);
1359	bus_space_write_4(t, h, CAS_MAC_ADDR2, (laddr[0]<<8) | laddr[1]);
1360}
1361
1362/*
1363 * Receive interrupt.
1364 */
1365int
1366cas_rint(struct cas_softc *sc)
1367{
1368	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1369	bus_space_tag_t t = sc->sc_memt;
1370	bus_space_handle_t h = sc->sc_memh;
1371	struct cas_rxsoft *rxs;
1372	struct mbuf *m;
1373	uint64_t word[4];
1374	int len, off, idx;
1375	int i, skip;
1376	void *cp;
1377
1378	for (i = sc->sc_rxptr;; i = CAS_NEXTRX(i + skip)) {
1379		CAS_CDRXCSYNC(sc, i,
1380		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1381
1382		word[0] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[0]);
1383		word[1] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[1]);
1384		word[2] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[2]);
1385		word[3] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[3]);
1386
1387		/* Stop if the hardware still owns the descriptor. */
1388		if ((word[0] & CAS_RC0_TYPE) == 0 || word[3] & CAS_RC3_OWN)
1389			break;
1390
1391		len = CAS_RC1_HDR_LEN(word[1]);
1392		if (len > 0) {
1393			off = CAS_RC1_HDR_OFF(word[1]);
1394			idx = CAS_RC1_HDR_IDX(word[1]);
1395			rxs = &sc->sc_rxsoft[idx];
1396
1397			DPRINTF(sc, ("hdr at idx %d, off %d, len %d\n",
1398			    idx, off, len));
1399
1400			bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1401			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1402
1403			cp = rxs->rxs_kva + off * 256 + ETHER_ALIGN;
1404			m = m_devget(cp, len, 0, ifp);
1405
1406			if (word[0] & CAS_RC0_RELEASE_HDR)
1407				cas_add_rxbuf(sc, idx);
1408
1409			if (m != NULL) {
1410
1411				/*
1412				 * Pass this up to any BPF listeners, but only
1413				 * pass it up the stack if its for us.
1414				 */
1415				m->m_pkthdr.csum_flags = 0;
1416				if_percpuq_enqueue(ifp->if_percpuq, m);
1417			} else
1418				if_statinc(ifp, if_ierrors);
1419		}
1420
1421		len = CAS_RC0_DATA_LEN(word[0]);
1422		if (len > 0) {
1423			off = CAS_RC0_DATA_OFF(word[0]);
1424			idx = CAS_RC0_DATA_IDX(word[0]);
1425			rxs = &sc->sc_rxsoft[idx];
1426
1427			DPRINTF(sc, ("data at idx %d, off %d, len %d\n",
1428			    idx, off, len));
1429
1430			bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1431			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1432
1433			/* XXX We should not be copying the packet here. */
1434			cp = rxs->rxs_kva + off + ETHER_ALIGN;
1435			m = m_devget(cp, len, 0, ifp);
1436
1437			if (word[0] & CAS_RC0_RELEASE_DATA)
1438				cas_add_rxbuf(sc, idx);
1439
1440			if (m != NULL) {
1441				/*
1442				 * Pass this up to any BPF listeners, but only
1443				 * pass it up the stack if its for us.
1444				 */
1445				m->m_pkthdr.csum_flags = 0;
1446				if_percpuq_enqueue(ifp->if_percpuq, m);
1447			} else
1448				if_statinc(ifp, if_ierrors);
1449		}
1450
1451		if (word[0] & CAS_RC0_SPLIT)
1452			aprint_error_dev(sc->sc_dev, "split packet\n");
1453
1454		skip = CAS_RC0_SKIP(word[0]);
1455	}
1456
1457	while (sc->sc_rxptr != i) {
1458		sc->sc_rxcomps[sc->sc_rxptr].cc_word[0] = 0;
1459		sc->sc_rxcomps[sc->sc_rxptr].cc_word[1] = 0;
1460		sc->sc_rxcomps[sc->sc_rxptr].cc_word[2] = 0;
1461		sc->sc_rxcomps[sc->sc_rxptr].cc_word[3] =
1462		    CAS_DMA_WRITE(CAS_RC3_OWN);
1463		CAS_CDRXCSYNC(sc, sc->sc_rxptr,
1464		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1465
1466		sc->sc_rxptr = CAS_NEXTRX(sc->sc_rxptr);
1467	}
1468
1469	bus_space_write_4(t, h, CAS_RX_COMP_TAIL, sc->sc_rxptr);
1470
1471	DPRINTF(sc, ("cas_rint: done sc->rxptr %d, complete %d\n",
1472		sc->sc_rxptr, bus_space_read_4(t, h, CAS_RX_COMPLETION)));
1473
1474	return (1);
1475}
1476
1477/*
1478 * cas_add_rxbuf:
1479 *
1480 *	Add a receive buffer to the indicated descriptor.
1481 */
1482int
1483cas_add_rxbuf(struct cas_softc *sc, int idx)
1484{
1485	bus_space_tag_t t = sc->sc_memt;
1486	bus_space_handle_t h = sc->sc_memh;
1487
1488	CAS_INIT_RXDESC(sc, sc->sc_rxdptr, idx);
1489
1490	if ((sc->sc_rxdptr % 4) == 0)
1491		bus_space_write_4(t, h, CAS_RX_KICK, sc->sc_rxdptr);
1492
1493	if (++sc->sc_rxdptr == CAS_NRXDESC)
1494		sc->sc_rxdptr = 0;
1495
1496	return (0);
1497}
1498
1499int
1500cas_eint(struct cas_softc *sc, u_int status)
1501{
1502	char bits[128];
1503	if ((status & CAS_INTR_MIF) != 0) {
1504		DPRINTF(sc, ("%s: link status changed\n",
1505		    device_xname(sc->sc_dev)));
1506		return (1);
1507	}
1508
1509	snprintb(bits, sizeof(bits), CAS_INTR_BITS, status);
1510	printf("%s: status=%s\n", device_xname(sc->sc_dev), bits);
1511	return (1);
1512}
1513
1514int
1515cas_pint(struct cas_softc *sc)
1516{
1517	bus_space_tag_t t = sc->sc_memt;
1518	bus_space_handle_t seb = sc->sc_memh;
1519	uint32_t status;
1520
1521	status = bus_space_read_4(t, seb, CAS_MII_INTERRUP_STATUS);
1522	status |= bus_space_read_4(t, seb, CAS_MII_INTERRUP_STATUS);
1523#ifdef CAS_DEBUG
1524	if (status)
1525		printf("%s: link status changed\n", device_xname(sc->sc_dev));
1526#endif
1527	return (1);
1528}
1529
1530int
1531cas_intr(void *v)
1532{
1533	struct cas_softc *sc = (struct cas_softc *)v;
1534	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1535	bus_space_tag_t t = sc->sc_memt;
1536	bus_space_handle_t seb = sc->sc_memh;
1537	uint32_t status;
1538	int r = 0;
1539#ifdef CAS_DEBUG
1540	char bits[128];
1541#endif
1542
1543	sc->sc_ev_intr.ev_count++;
1544
1545	status = bus_space_read_4(t, seb, CAS_STATUS);
1546#ifdef CAS_DEBUG
1547	snprintb(bits, sizeof(bits), CAS_INTR_BITS, status);
1548#endif
1549	DPRINTF(sc, ("%s: cas_intr: cplt %x status %s\n",
1550		device_xname(sc->sc_dev), (status>>19), bits));
1551
1552	if ((status & CAS_INTR_PCS) != 0)
1553		r |= cas_pint(sc);
1554
1555	if ((status & (CAS_INTR_TX_TAG_ERR | CAS_INTR_RX_TAG_ERR |
1556	    CAS_INTR_RX_COMP_FULL | CAS_INTR_BERR)) != 0)
1557		r |= cas_eint(sc, status);
1558
1559	if ((status & (CAS_INTR_TX_EMPTY | CAS_INTR_TX_INTME)) != 0)
1560		r |= cas_tint(sc, status);
1561
1562	if ((status & (CAS_INTR_RX_DONE | CAS_INTR_RX_NOBUF)) != 0)
1563		r |= cas_rint(sc);
1564
1565	/* We should eventually do more than just print out error stats. */
1566	if (status & CAS_INTR_TX_MAC) {
1567		int txstat = bus_space_read_4(t, seb, CAS_MAC_TX_STATUS);
1568#ifdef CAS_DEBUG
1569		if (txstat & ~CAS_MAC_TX_XMIT_DONE)
1570			printf("%s: MAC tx fault, status %x\n",
1571			    device_xname(sc->sc_dev), txstat);
1572#endif
1573		if (txstat & (CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_PKT_TOO_LONG))
1574			cas_init(ifp);
1575	}
1576	if (status & CAS_INTR_RX_MAC) {
1577		int rxstat = bus_space_read_4(t, seb, CAS_MAC_RX_STATUS);
1578#ifdef CAS_DEBUG
1579		if (rxstat & ~CAS_MAC_RX_DONE)
1580			printf("%s: MAC rx fault, status %x\n",
1581			    device_xname(sc->sc_dev), rxstat);
1582#endif
1583		/*
1584		 * On some chip revisions CAS_MAC_RX_OVERFLOW happen often
1585		 * due to a silicon bug so handle them silently.
1586		 */
1587		if (rxstat & CAS_MAC_RX_OVERFLOW) {
1588			if_statinc(ifp, if_ierrors);
1589			cas_init(ifp);
1590		}
1591#ifdef CAS_DEBUG
1592		else if (rxstat & ~(CAS_MAC_RX_DONE | CAS_MAC_RX_FRAME_CNT))
1593			printf("%s: MAC rx fault, status %x\n",
1594			    device_xname(sc->sc_dev), rxstat);
1595#endif
1596	}
1597	rnd_add_uint32(&sc->rnd_source, status);
1598	return (r);
1599}
1600
1601
1602void
1603cas_watchdog(struct ifnet *ifp)
1604{
1605	struct cas_softc *sc = ifp->if_softc;
1606
1607	DPRINTF(sc, ("cas_watchdog: CAS_RX_CONFIG %x CAS_MAC_RX_STATUS %x "
1608		"CAS_MAC_RX_CONFIG %x\n",
1609		bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_RX_CONFIG),
1610		bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_MAC_RX_STATUS),
1611		bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_MAC_RX_CONFIG)));
1612
1613	log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev));
1614	if_statinc(ifp, if_oerrors);
1615
1616	/* Try to get more packets going. */
1617	cas_init(ifp);
1618}
1619
1620/*
1621 * Initialize the MII Management Interface
1622 */
1623void
1624cas_mifinit(struct cas_softc *sc)
1625{
1626	bus_space_tag_t t = sc->sc_memt;
1627	bus_space_handle_t mif = sc->sc_memh;
1628
1629	/* Configure the MIF in frame mode */
1630	sc->sc_mif_config = bus_space_read_4(t, mif, CAS_MIF_CONFIG);
1631	sc->sc_mif_config &= ~CAS_MIF_CONFIG_BB_ENA;
1632	bus_space_write_4(t, mif, CAS_MIF_CONFIG, sc->sc_mif_config);
1633}
1634
1635/*
1636 * MII interface
1637 *
1638 * The Cassini MII interface supports at least three different operating modes:
1639 *
1640 * Bitbang mode is implemented using data, clock and output enable registers.
1641 *
1642 * Frame mode is implemented by loading a complete frame into the frame
1643 * register and polling the valid bit for completion.
1644 *
1645 * Polling mode uses the frame register but completion is indicated by
1646 * an interrupt.
1647 *
1648 */
1649int
1650cas_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
1651{
1652	struct cas_softc *sc = device_private(self);
1653	bus_space_tag_t t = sc->sc_memt;
1654	bus_space_handle_t mif = sc->sc_memh;
1655	int n;
1656	uint32_t v;
1657
1658#ifdef CAS_DEBUG
1659	if (sc->sc_debug)
1660		printf("cas_mii_readreg: phy %d reg %d\n", phy, reg);
1661#endif
1662
1663	/* Construct the frame command */
1664	v = (reg << CAS_MIF_REG_SHIFT)	| (phy << CAS_MIF_PHY_SHIFT) |
1665		CAS_MIF_FRAME_READ;
1666
1667	bus_space_write_4(t, mif, CAS_MIF_FRAME, v);
1668	for (n = 0; n < 100; n++) {
1669		DELAY(1);
1670		v = bus_space_read_4(t, mif, CAS_MIF_FRAME);
1671		if (v & CAS_MIF_FRAME_TA0) {
1672			*val = v & CAS_MIF_FRAME_DATA;
1673			return 0;
1674		}
1675	}
1676
1677	printf("%s: mii_read timeout\n", device_xname(sc->sc_dev));
1678	return ETIMEDOUT;
1679}
1680
1681int
1682cas_mii_writereg(device_t self, int phy, int reg, uint16_t val)
1683{
1684	struct cas_softc *sc = device_private(self);
1685	bus_space_tag_t t = sc->sc_memt;
1686	bus_space_handle_t mif = sc->sc_memh;
1687	int n;
1688	uint32_t v;
1689
1690#ifdef CAS_DEBUG
1691	if (sc->sc_debug)
1692		printf("cas_mii_writereg: phy %d reg %d val %x\n",
1693			phy, reg, val);
1694#endif
1695
1696	/* Construct the frame command */
1697	v = CAS_MIF_FRAME_WRITE			|
1698	    (phy << CAS_MIF_PHY_SHIFT)		|
1699	    (reg << CAS_MIF_REG_SHIFT)		|
1700	    (val & CAS_MIF_FRAME_DATA);
1701
1702	bus_space_write_4(t, mif, CAS_MIF_FRAME, v);
1703	for (n = 0; n < 100; n++) {
1704		DELAY(1);
1705		v = bus_space_read_4(t, mif, CAS_MIF_FRAME);
1706		if (v & CAS_MIF_FRAME_TA0)
1707			return 0;
1708	}
1709
1710	printf("%s: mii_write timeout\n", device_xname(sc->sc_dev));
1711	return ETIMEDOUT;
1712}
1713
1714void
1715cas_mii_statchg(struct ifnet *ifp)
1716{
1717	struct cas_softc *sc = ifp->if_softc;
1718#ifdef CAS_DEBUG
1719	int instance = IFM_INST(sc->sc_media.ifm_cur->ifm_media);
1720#endif
1721	bus_space_tag_t t = sc->sc_memt;
1722	bus_space_handle_t mac = sc->sc_memh;
1723	uint32_t v;
1724
1725#ifdef CAS_DEBUG
1726	if (sc->sc_debug)
1727		printf("cas_mii_statchg: status change: phy = %d\n",
1728		    sc->sc_phys[instance]);
1729#endif
1730
1731	/* Set tx full duplex options */
1732	bus_space_write_4(t, mac, CAS_MAC_TX_CONFIG, 0);
1733	delay(10000); /* reg must be cleared and delay before changing. */
1734	v = CAS_MAC_TX_ENA_IPG0 | CAS_MAC_TX_NGU | CAS_MAC_TX_NGU_LIMIT |
1735		CAS_MAC_TX_ENABLE;
1736	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) {
1737		v |= CAS_MAC_TX_IGN_CARRIER | CAS_MAC_TX_IGN_COLLIS;
1738	}
1739	bus_space_write_4(t, mac, CAS_MAC_TX_CONFIG, v);
1740
1741	/* XIF Configuration */
1742	v = CAS_MAC_XIF_TX_MII_ENA;
1743	v |= CAS_MAC_XIF_LINK_LED;
1744
1745	/* MII needs echo disable if half duplex. */
1746	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
1747		/* turn on full duplex LED */
1748		v |= CAS_MAC_XIF_FDPLX_LED;
1749	else
1750		/* half duplex -- disable echo */
1751		v |= CAS_MAC_XIF_ECHO_DISABL;
1752
1753	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
1754	case IFM_1000_T:  /* Gigabit using GMII interface */
1755	case IFM_1000_SX:
1756		v |= CAS_MAC_XIF_GMII_MODE;
1757		break;
1758	default:
1759		v &= ~CAS_MAC_XIF_GMII_MODE;
1760	}
1761	bus_space_write_4(t, mac, CAS_MAC_XIF_CONFIG, v);
1762}
1763
1764int
1765cas_pcs_readreg(device_t self, int phy, int reg, uint16_t *val)
1766{
1767	struct cas_softc *sc = device_private(self);
1768	bus_space_tag_t t = sc->sc_memt;
1769	bus_space_handle_t pcs = sc->sc_memh;
1770
1771#ifdef CAS_DEBUG
1772	if (sc->sc_debug)
1773		printf("cas_pcs_readreg: phy %d reg %d\n", phy, reg);
1774#endif
1775
1776	if (phy != CAS_PHYAD_EXTERNAL)
1777		return -1;
1778
1779	switch (reg) {
1780	case MII_BMCR:
1781		reg = CAS_MII_CONTROL;
1782		break;
1783	case MII_BMSR:
1784		reg = CAS_MII_STATUS;
1785		break;
1786	case MII_ANAR:
1787		reg = CAS_MII_ANAR;
1788		break;
1789	case MII_ANLPAR:
1790		reg = CAS_MII_ANLPAR;
1791		break;
1792	case MII_EXTSR:
1793		*val = EXTSR_1000XFDX | EXTSR_1000XHDX;
1794		return 0;
1795	default:
1796		return (0);
1797	}
1798
1799	*val = bus_space_read_4(t, pcs, reg) & 0xffff;
1800	return 0;
1801}
1802
1803int
1804cas_pcs_writereg(device_t self, int phy, int reg, uint16_t val)
1805{
1806	struct cas_softc *sc = device_private(self);
1807	bus_space_tag_t t = sc->sc_memt;
1808	bus_space_handle_t pcs = sc->sc_memh;
1809	int reset = 0;
1810
1811#ifdef CAS_DEBUG
1812	if (sc->sc_debug)
1813		printf("cas_pcs_writereg: phy %d reg %d val %x\n",
1814			phy, reg, val);
1815#endif
1816
1817	if (phy != CAS_PHYAD_EXTERNAL)
1818		return -1;
1819
1820	if (reg == MII_ANAR)
1821		bus_space_write_4(t, pcs, CAS_MII_CONFIG, 0);
1822
1823	switch (reg) {
1824	case MII_BMCR:
1825		reset = (val & CAS_MII_CONTROL_RESET);
1826		reg = CAS_MII_CONTROL;
1827		break;
1828	case MII_BMSR:
1829		reg = CAS_MII_STATUS;
1830		break;
1831	case MII_ANAR:
1832		reg = CAS_MII_ANAR;
1833		break;
1834	case MII_ANLPAR:
1835		reg = CAS_MII_ANLPAR;
1836		break;
1837	default:
1838		return 0;
1839	}
1840
1841	bus_space_write_4(t, pcs, reg, val);
1842
1843	if (reset)
1844		cas_bitwait(sc, pcs, CAS_MII_CONTROL, CAS_MII_CONTROL_RESET, 0);
1845
1846	if (reg == CAS_MII_ANAR || reset)
1847		bus_space_write_4(t, pcs, CAS_MII_CONFIG,
1848		    CAS_MII_CONFIG_ENABLE);
1849
1850	return 0;
1851}
1852
1853int
1854cas_mediachange(struct ifnet *ifp)
1855{
1856	struct cas_softc *sc = ifp->if_softc;
1857	struct mii_data *mii = &sc->sc_mii;
1858
1859	if (mii->mii_instance) {
1860		struct mii_softc *miisc;
1861		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1862			mii_phy_reset(miisc);
1863	}
1864
1865	return (mii_mediachg(&sc->sc_mii));
1866}
1867
1868void
1869cas_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1870{
1871	struct cas_softc *sc = ifp->if_softc;
1872
1873	mii_pollstat(&sc->sc_mii);
1874	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1875	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1876}
1877
1878/*
1879 * Process an ioctl request.
1880 */
1881int
1882cas_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1883{
1884	struct cas_softc *sc = ifp->if_softc;
1885	int s, error = 0;
1886
1887	s = splnet();
1888
1889	if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
1890		error = 0;
1891		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1892			;
1893		else if (ifp->if_flags & IFF_RUNNING) {
1894			/*
1895			 * Multicast list has changed; set the hardware filter
1896			 * accordingly.
1897			 */
1898			cas_iff(sc);
1899		}
1900	}
1901
1902	splx(s);
1903	return (error);
1904}
1905
1906static bool
1907cas_suspend(device_t self, const pmf_qual_t *qual)
1908{
1909	struct cas_softc *sc = device_private(self);
1910	bus_space_tag_t t = sc->sc_memt;
1911	bus_space_handle_t h = sc->sc_memh;
1912
1913	bus_space_write_4(t, h, CAS_INTMASK, ~(uint32_t)0);
1914	if (sc->sc_ih != NULL) {
1915		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
1916		sc->sc_ih = NULL;
1917	}
1918
1919	return true;
1920}
1921
1922static bool
1923cas_resume(device_t self, const pmf_qual_t *qual)
1924{
1925	struct cas_softc *sc = device_private(self);
1926
1927	return cas_estintr(sc, CAS_INTR_PCI | CAS_INTR_REG);
1928}
1929
1930static bool
1931cas_estintr(struct cas_softc *sc, int what)
1932{
1933	bus_space_tag_t t = sc->sc_memt;
1934	bus_space_handle_t h = sc->sc_memh;
1935	const char *intrstr = NULL;
1936	char intrbuf[PCI_INTRSTR_LEN];
1937
1938	/* PCI interrupts */
1939	if (what & CAS_INTR_PCI) {
1940		intrstr = pci_intr_string(sc->sc_pc, sc->sc_handle, intrbuf,
1941		    sizeof(intrbuf));
1942		sc->sc_ih = pci_intr_establish_xname(sc->sc_pc, sc->sc_handle,
1943		    IPL_NET, cas_intr, sc, device_xname(sc->sc_dev));
1944		if (sc->sc_ih == NULL) {
1945			aprint_error_dev(sc->sc_dev,
1946			    "unable to establish interrupt");
1947			if (intrstr != NULL)
1948				aprint_error(" at %s", intrstr);
1949			aprint_error("\n");
1950			return false;
1951		}
1952
1953		aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1954	}
1955
1956	/* Interrupt register */
1957	if (what & CAS_INTR_REG) {
1958		bus_space_write_4(t, h, CAS_INTMASK,
1959		    ~(CAS_INTR_TX_INTME | CAS_INTR_TX_EMPTY |
1960		    CAS_INTR_TX_TAG_ERR |
1961		    CAS_INTR_RX_DONE | CAS_INTR_RX_NOBUF |
1962		    CAS_INTR_RX_TAG_ERR |
1963		    CAS_INTR_RX_COMP_FULL | CAS_INTR_PCS |
1964		    CAS_INTR_MAC_CONTROL | CAS_INTR_MIF |
1965		    CAS_INTR_BERR));
1966		bus_space_write_4(t, h, CAS_MAC_RX_MASK,
1967		    CAS_MAC_RX_DONE | CAS_MAC_RX_FRAME_CNT);
1968		bus_space_write_4(t, h, CAS_MAC_TX_MASK, CAS_MAC_TX_XMIT_DONE);
1969		bus_space_write_4(t, h, CAS_MAC_CONTROL_MASK, 0); /* XXXX */
1970	}
1971	return true;
1972}
1973
1974bool
1975cas_shutdown(device_t self, int howto)
1976{
1977	struct cas_softc *sc = device_private(self);
1978	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1979
1980	cas_stop(ifp, 1);
1981
1982	return true;
1983}
1984
1985void
1986cas_iff(struct cas_softc *sc)
1987{
1988	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1989	struct ethercom *ec = &sc->sc_ethercom;
1990	struct ether_multi *enm;
1991	struct ether_multistep step;
1992	bus_space_tag_t t = sc->sc_memt;
1993	bus_space_handle_t h = sc->sc_memh;
1994	uint32_t crc, hash[16], rxcfg;
1995	int i;
1996
1997	rxcfg = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG);
1998	rxcfg &= ~(CAS_MAC_RX_HASH_FILTER | CAS_MAC_RX_PROMISCUOUS |
1999	    CAS_MAC_RX_PROMISC_GRP);
2000	ifp->if_flags &= ~IFF_ALLMULTI;
2001
2002	if ((ifp->if_flags & IFF_PROMISC) != 0)
2003		goto update;
2004
2005	/*
2006	 * Set up multicast address filter by passing all multicast
2007	 * addresses through a crc generator, and then using the
2008	 * high order 8 bits as an index into the 256 bit logical
2009	 * address filter.  The high order 4 bits selects the word,
2010	 * while the other 4 bits select the bit within the word
2011	 * (where bit 0 is the MSB).
2012	 */
2013
2014	/* Clear hash table */
2015	for (i = 0; i < 16; i++)
2016		hash[i] = 0;
2017
2018	ETHER_LOCK(ec);
2019	ETHER_FIRST_MULTI(step, ec, enm);
2020	while (enm != NULL) {
2021		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2022			/* XXX Use ETHER_F_ALLMULTI in future. */
2023			ifp->if_flags |= IFF_ALLMULTI;
2024			ETHER_UNLOCK(ec);
2025			goto update;
2026		}
2027
2028		crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
2029
2030		/* Just want the 8 most significant bits. */
2031		crc >>= 24;
2032
2033		/* Set the corresponding bit in the filter. */
2034		hash[crc >> 4] |= 1 << (15 - (crc & 15));
2035
2036		ETHER_NEXT_MULTI(step, enm);
2037	}
2038	ETHER_UNLOCK(ec);
2039
2040	rxcfg |= CAS_MAC_RX_HASH_FILTER;
2041
2042	/* Now load the hash table into the chip (if we are using it) */
2043	for (i = 0; i < 16; i++) {
2044		bus_space_write_4(t, h,
2045		    CAS_MAC_HASH0 + i * (CAS_MAC_HASH1 - CAS_MAC_HASH0),
2046		    hash[i]);
2047	}
2048
2049update:
2050	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2051		if (ifp->if_flags & IFF_PROMISC) {
2052			rxcfg |= CAS_MAC_RX_PROMISCUOUS;
2053			/* XXX Use ETHER_F_ALLMULTI in future. */
2054			ifp->if_flags |= IFF_ALLMULTI;
2055		} else
2056			rxcfg |= CAS_MAC_RX_PROMISC_GRP;
2057	}
2058	bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, rxcfg);
2059}
2060
2061int
2062cas_encap(struct cas_softc *sc, struct mbuf *mhead, uint32_t *bixp)
2063{
2064	uint64_t flags;
2065	uint32_t cur, frag, i;
2066	bus_dmamap_t map;
2067
2068	cur = frag = *bixp;
2069	map = sc->sc_txd[cur].sd_map;
2070
2071	if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, mhead,
2072	    BUS_DMA_NOWAIT) != 0) {
2073		return (ENOBUFS);
2074	}
2075
2076	if ((sc->sc_tx_cnt + map->dm_nsegs) > (CAS_NTXDESC - 2)) {
2077		bus_dmamap_unload(sc->sc_dmatag, map);
2078		return (ENOBUFS);
2079	}
2080
2081	bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,
2082	    BUS_DMASYNC_PREWRITE);
2083
2084	for (i = 0; i < map->dm_nsegs; i++) {
2085		sc->sc_txdescs[frag].cd_addr =
2086		    CAS_DMA_WRITE(map->dm_segs[i].ds_addr);
2087		flags = (map->dm_segs[i].ds_len & CAS_TD_BUFSIZE) |
2088		    (i == 0 ? CAS_TD_START_OF_PACKET : 0) |
2089		    ((i == (map->dm_nsegs - 1)) ? CAS_TD_END_OF_PACKET : 0);
2090		sc->sc_txdescs[frag].cd_flags = CAS_DMA_WRITE(flags);
2091		bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap,
2092		    CAS_CDTXOFF(frag), sizeof(struct cas_desc),
2093		    BUS_DMASYNC_PREWRITE);
2094		cur = frag;
2095		if (++frag == CAS_NTXDESC)
2096			frag = 0;
2097	}
2098
2099	sc->sc_tx_cnt += map->dm_nsegs;
2100	sc->sc_txd[*bixp].sd_map = sc->sc_txd[cur].sd_map;
2101	sc->sc_txd[cur].sd_map = map;
2102	sc->sc_txd[cur].sd_mbuf = mhead;
2103
2104	bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_TX_KICK, frag);
2105
2106	*bixp = frag;
2107
2108	/* sync descriptors */
2109
2110	return (0);
2111}
2112
2113/*
2114 * Transmit interrupt.
2115 */
2116int
2117cas_tint(struct cas_softc *sc, uint32_t status)
2118{
2119	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2120	struct cas_sxd *sd;
2121	uint32_t cons, comp;
2122
2123	comp = bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_TX_COMPLETION);
2124	cons = sc->sc_tx_cons;
2125	while (cons != comp) {
2126		sd = &sc->sc_txd[cons];
2127		if (sd->sd_mbuf != NULL) {
2128			bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
2129			    sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2130			bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
2131			m_freem(sd->sd_mbuf);
2132			sd->sd_mbuf = NULL;
2133			if_statinc(ifp, if_opackets);
2134		}
2135		sc->sc_tx_cnt--;
2136		if (++cons == CAS_NTXDESC)
2137			cons = 0;
2138	}
2139	sc->sc_tx_cons = cons;
2140
2141	if (sc->sc_tx_cnt < CAS_NTXDESC - 2)
2142		ifp->if_flags &= ~IFF_OACTIVE;
2143	if (sc->sc_tx_cnt == 0)
2144		ifp->if_timer = 0;
2145
2146	if_schedule_deferred_start(ifp);
2147
2148	return (1);
2149}
2150
2151void
2152cas_start(struct ifnet *ifp)
2153{
2154	struct cas_softc *sc = ifp->if_softc;
2155	struct mbuf *m;
2156	uint32_t bix;
2157
2158	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
2159		return;
2160
2161	bix = sc->sc_tx_prod;
2162	while (sc->sc_txd[bix].sd_mbuf == NULL) {
2163		IFQ_POLL(&ifp->if_snd, m);
2164		if (m == NULL)
2165			break;
2166
2167		/*
2168		 * If BPF is listening on this interface, let it see the
2169		 * packet before we commit it to the wire.
2170		 */
2171		bpf_mtap(ifp, m, BPF_D_OUT);
2172
2173		/*
2174		 * Encapsulate this packet and start it going...
2175		 * or fail...
2176		 */
2177		if (cas_encap(sc, m, &bix)) {
2178			ifp->if_flags |= IFF_OACTIVE;
2179			break;
2180		}
2181
2182		IFQ_DEQUEUE(&ifp->if_snd, m);
2183		ifp->if_timer = 5;
2184	}
2185
2186	sc->sc_tx_prod = bix;
2187}
2188
2189MODULE(MODULE_CLASS_DRIVER, if_cas, "pci");
2190
2191#ifdef _MODULE
2192#include "ioconf.c"
2193#endif
2194
2195static int
2196if_cas_modcmd(modcmd_t cmd, void *opaque)
2197{
2198	int error = 0;
2199
2200	switch (cmd) {
2201	case MODULE_CMD_INIT:
2202#ifdef _MODULE
2203		error = config_init_component(cfdriver_ioconf_cas,
2204		    cfattach_ioconf_cas, cfdata_ioconf_cas);
2205#endif
2206		return error;
2207	case MODULE_CMD_FINI:
2208#ifdef _MODULE
2209		error = config_fini_component(cfdriver_ioconf_cas,
2210		    cfattach_ioconf_cas, cfdata_ioconf_cas);
2211#endif
2212		return error;
2213	default:
2214		return ENOTTY;
2215	}
2216}
2217