ti.c revision 1.7
1/*	$OpenBSD: ti.c,v 1.7 2013/10/01 20:06:00 sf Exp $	*/
2
3/*
4 * Copyright (c) 1997, 1998, 1999
5 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * $FreeBSD: src/sys/pci/if_ti.c,v 1.25 2000/01/18 00:26:29 wpaul Exp $
35 */
36
37/*
38 * Alteon Networks Tigon PCI gigabit ethernet driver for OpenBSD.
39 *
40 * Written by Bill Paul <wpaul@ctr.columbia.edu>
41 * Electrical Engineering Department
42 * Columbia University, New York City
43 */
44
45/*
46 * The Alteon Networks Tigon chip contains an embedded R4000 CPU,
47 * gigabit MAC, dual DMA channels and a PCI interface unit. NICs
48 * using the Tigon may have anywhere from 512K to 2MB of SRAM. The
49 * Tigon supports hardware IP, TCP and UCP checksumming, multicast
50 * filtering and jumbo (9014 byte) frames. The hardware is largely
51 * controlled by firmware, which must be loaded into the NIC during
52 * initialization.
53 *
54 * The Tigon 2 contains 2 R4000 CPUs and requires a newer firmware
55 * revision, which supports new features such as extended commands,
56 * extended jumbo receive ring desciptors and a mini receive ring.
57 *
58 * Alteon Networks is to be commended for releasing such a vast amount
59 * of development material for the Tigon NIC without requiring an NDA
60 * (although they really should have done it a long time ago). With
61 * any luck, the other vendors will finally wise up and follow Alteon's
62 * stellar example.
63 *
64 * The following people deserve special thanks:
65 * - Terry Murphy of 3Com, for providing a 3c985 Tigon 1 board
66 *   for testing
67 * - Raymond Lee of Netgear, for providing a pair of Netgear
68 *   GA620 Tigon 2 boards for testing
69 * - Ulf Zimmermann, for bringing the GA260 to my attention and
70 *   convincing me to write this driver.
71 * - Andrew Gallatin for providing FreeBSD/Alpha support.
72 */
73
74#include "bpfilter.h"
75#include "vlan.h"
76
77#include <sys/param.h>
78#include <sys/systm.h>
79#include <sys/sockio.h>
80#include <sys/mbuf.h>
81#include <sys/malloc.h>
82#include <sys/kernel.h>
83#include <sys/socket.h>
84#include <sys/device.h>
85#include <sys/queue.h>
86
87#include <net/if.h>
88#include <net/if_dl.h>
89#include <net/if_types.h>
90
91#ifdef INET
92#include <netinet/in.h>
93#include <netinet/in_systm.h>
94#include <netinet/ip.h>
95#include <netinet/if_ether.h>
96#endif
97
98#include <net/if_media.h>
99
100#if NBPFILTER > 0
101#include <net/bpf.h>
102#endif
103
104#if NVLAN > 0
105#include <net/if_types.h>
106#include <net/if_vlan_var.h>
107#endif
108
109#include <machine/bus.h>
110
111#include <dev/ic/tireg.h>
112#include <dev/ic/tivar.h>
113#include <dev/pci/pcireg.h>
114
115struct cfdriver ti_cd = {
116	NULL, "ti", DV_IFNET
117};
118
119void ti_txeof_tigon1(struct ti_softc *);
120void ti_txeof_tigon2(struct ti_softc *);
121void ti_rxeof(struct ti_softc *);
122
123void ti_stats_update(struct ti_softc *);
124int ti_encap_tigon1(struct ti_softc *, struct mbuf *, u_int32_t *);
125int ti_encap_tigon2(struct ti_softc *, struct mbuf *, u_int32_t *);
126
127int ti_intr(void *);
128void ti_start(struct ifnet *);
129int ti_ioctl(struct ifnet *, u_long, caddr_t);
130void ti_init(void *);
131void ti_init2(struct ti_softc *);
132void ti_stop(struct ti_softc *);
133void ti_watchdog(struct ifnet *);
134int ti_ifmedia_upd(struct ifnet *);
135void ti_ifmedia_sts(struct ifnet *, struct ifmediareq *);
136
137u_int32_t ti_eeprom_putbyte(struct ti_softc *, int);
138u_int8_t ti_eeprom_getbyte(struct ti_softc *, int, u_int8_t *);
139int ti_read_eeprom(struct ti_softc *, caddr_t, int, int);
140
141void ti_add_mcast(struct ti_softc *, struct ether_addr *);
142void ti_del_mcast(struct ti_softc *, struct ether_addr *);
143void ti_iff(struct ti_softc *);
144
145void ti_mem_read(struct ti_softc *, u_int32_t, u_int32_t, void *);
146void ti_mem_write(struct ti_softc *, u_int32_t, u_int32_t, const void*);
147void ti_mem_set(struct ti_softc *, u_int32_t, u_int32_t);
148void ti_loadfw(struct ti_softc *);
149void ti_cmd(struct ti_softc *, struct ti_cmd_desc *);
150void ti_cmd_ext(struct ti_softc *, struct ti_cmd_desc *,
151    caddr_t, int);
152void ti_handle_events(struct ti_softc *);
153int ti_alloc_jumbo_mem(struct ti_softc *);
154void *ti_jalloc(struct ti_softc *);
155void ti_jfree(caddr_t, u_int, void *);
156int ti_newbuf_std(struct ti_softc *, int, struct mbuf *, bus_dmamap_t);
157int ti_newbuf_mini(struct ti_softc *, int, struct mbuf *, bus_dmamap_t);
158int ti_newbuf_jumbo(struct ti_softc *, int, struct mbuf *);
159int ti_init_rx_ring_std(struct ti_softc *);
160void ti_free_rx_ring_std(struct ti_softc *);
161int ti_init_rx_ring_jumbo(struct ti_softc *);
162void ti_free_rx_ring_jumbo(struct ti_softc *);
163int ti_init_rx_ring_mini(struct ti_softc *);
164void ti_free_rx_ring_mini(struct ti_softc *);
165void ti_free_tx_ring(struct ti_softc *);
166int ti_init_tx_ring(struct ti_softc *);
167
168int ti_64bitslot_war(struct ti_softc *);
169int ti_chipinit(struct ti_softc *);
170void ti_chipinit_pci(struct ti_softc *);
171void ti_chipinit_sbus(struct ti_softc *);
172int ti_gibinit(struct ti_softc *);
173
174/*
175 * Send an instruction or address to the EEPROM, check for ACK.
176 */
177u_int32_t
178ti_eeprom_putbyte(struct ti_softc *sc, int byte)
179{
180	int		i, ack = 0;
181
182	/*
183	 * Make sure we're in TX mode.
184	 */
185	TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN);
186
187	/*
188	 * Feed in each bit and strobe the clock.
189	 */
190	for (i = 0x80; i; i >>= 1) {
191		if (byte & i)
192			TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT);
193		else
194			TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT);
195		DELAY(1);
196		TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
197		DELAY(1);
198		TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
199	}
200
201	/*
202	 * Turn off TX mode.
203	 */
204	TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN);
205
206	/*
207	 * Check for ack.
208	 */
209	TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
210	ack = CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN;
211	TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
212
213	return (ack);
214}
215
216/*
217 * Read a byte of data stored in the EEPROM at address 'addr.'
218 * We have to send two address bytes since the EEPROM can hold
219 * more than 256 bytes of data.
220 */
221u_int8_t
222ti_eeprom_getbyte(struct ti_softc *sc, int addr, u_int8_t *dest)
223{
224	int		i;
225	u_int8_t		byte = 0;
226
227	EEPROM_START;
228
229	/*
230	 * Send write control code to EEPROM.
231	 */
232	if (ti_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) {
233		printf("%s: failed to send write command, status: %x\n",
234		    sc->sc_dv.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
235		return (1);
236	}
237
238	/*
239	 * Send first byte of address of byte we want to read.
240	 */
241	if (ti_eeprom_putbyte(sc, (addr >> 8) & 0xFF)) {
242		printf("%s: failed to send address, status: %x\n",
243		    sc->sc_dv.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
244		return (1);
245	}
246	/*
247	 * Send second byte address of byte we want to read.
248	 */
249	if (ti_eeprom_putbyte(sc, addr & 0xFF)) {
250		printf("%s: failed to send address, status: %x\n",
251		    sc->sc_dv.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
252		return (1);
253	}
254
255	EEPROM_STOP;
256	EEPROM_START;
257	/*
258	 * Send read control code to EEPROM.
259	 */
260	if (ti_eeprom_putbyte(sc, EEPROM_CTL_READ)) {
261		printf("%s: failed to send read command, status: %x\n",
262		    sc->sc_dv.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
263		return (1);
264	}
265
266	/*
267	 * Start reading bits from EEPROM.
268	 */
269	TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN);
270	for (i = 0x80; i; i >>= 1) {
271		TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
272		DELAY(1);
273		if (CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN)
274			byte |= i;
275		TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
276		DELAY(1);
277	}
278
279	EEPROM_STOP;
280
281	/*
282	 * No ACK generated for read, so just return byte.
283	 */
284
285	*dest = byte;
286
287	return (0);
288}
289
290/*
291 * Read a sequence of bytes from the EEPROM.
292 */
293int
294ti_read_eeprom(struct ti_softc *sc, caddr_t dest, int off, int cnt)
295{
296	int			err = 0, i;
297	u_int8_t		byte = 0;
298
299	for (i = 0; i < cnt; i++) {
300		err = ti_eeprom_getbyte(sc, off + i, &byte);
301		if (err)
302			break;
303		*(dest + i) = byte;
304	}
305
306	return (err ? 1 : 0);
307}
308
309/*
310 * NIC memory read function.
311 * Can be used to copy data from NIC local memory.
312 */
313void
314ti_mem_read(struct ti_softc *sc, u_int32_t addr, u_int32_t len, void *buf)
315{
316	int			segptr, segsize, cnt;
317	caddr_t			ptr;
318
319	segptr = addr;
320	cnt = len;
321	ptr = buf;
322
323	while(cnt) {
324		if (cnt < TI_WINLEN)
325			segsize = cnt;
326		else
327			segsize = TI_WINLEN - (segptr % TI_WINLEN);
328		CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
329		bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle,
330		    TI_WINDOW + (segptr & (TI_WINLEN - 1)), (u_int32_t *)ptr,
331		    segsize / 4);
332		ptr += segsize;
333		segptr += segsize;
334		cnt -= segsize;
335	}
336}
337
338/*
339 * NIC memory write function.
340 * Can be used to copy data into  NIC local memory.
341 */
342void
343ti_mem_write(struct ti_softc *sc, u_int32_t addr, u_int32_t len,
344    const void *buf)
345{
346	int			segptr, segsize, cnt;
347	const char		*ptr;
348
349	segptr = addr;
350	cnt = len;
351	ptr = buf;
352
353	while(cnt) {
354		if (cnt < TI_WINLEN)
355			segsize = cnt;
356		else
357			segsize = TI_WINLEN - (segptr % TI_WINLEN);
358		CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
359		bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle,
360		    TI_WINDOW + (segptr & (TI_WINLEN - 1)), (u_int32_t *)ptr,
361		    segsize / 4);
362		ptr += segsize;
363		segptr += segsize;
364		cnt -= segsize;
365	}
366}
367
368/*
369 * NIC memory write function.
370 * Can be used to clear a section of NIC local memory.
371 */
372void
373ti_mem_set(struct ti_softc *sc, u_int32_t addr, u_int32_t len)
374{
375	int			segptr, segsize, cnt;
376
377	segptr = addr;
378	cnt = len;
379
380	while(cnt) {
381		if (cnt < TI_WINLEN)
382			segsize = cnt;
383		else
384			segsize = TI_WINLEN - (segptr % TI_WINLEN);
385		CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
386		bus_space_set_region_4(sc->ti_btag, sc->ti_bhandle,
387		    TI_WINDOW + (segptr & (TI_WINLEN - 1)), 0, segsize / 4);
388		segptr += segsize;
389		cnt -= segsize;
390	}
391}
392
393/*
394 * Load firmware image into the NIC. Check that the firmware revision
395 * is acceptable and see if we want the firmware for the Tigon 1 or
396 * Tigon 2.
397 */
398void
399ti_loadfw(struct ti_softc *sc)
400{
401	struct tigon_firmware *tf;
402	u_char *buf = NULL;
403	u_int32_t *b;
404	size_t buflen, i, cnt;
405	char *name;
406	int error;
407
408	switch(sc->ti_hwrev) {
409	case TI_HWREV_TIGON:
410		name = "tigon1";
411		break;
412	case TI_HWREV_TIGON_II:
413		name = "tigon2";
414		break;
415	default:
416		printf("%s: can't load firmware: unknown hardware rev\n",
417		    sc->sc_dv.dv_xname);
418		return;
419	}
420
421	error = loadfirmware(name, &buf, &buflen);
422	if (error)
423		return;
424	/* convert firmware to host byte order */
425	b = (u_int32_t *)buf;
426	cnt = buflen / sizeof(u_int32_t);
427	for (i = 0; i < cnt; i++)
428		b[i] = letoh32(b[i]);
429
430	tf = (struct tigon_firmware *)buf;
431	if (tf->FwReleaseMajor != TI_FIRMWARE_MAJOR ||
432	    tf->FwReleaseMinor != TI_FIRMWARE_MINOR ||
433	    tf->FwReleaseFix != TI_FIRMWARE_FIX) {
434		printf("%s: firmware revision mismatch; want "
435		    "%d.%d.%d, got %d.%d.%d\n", sc->sc_dv.dv_xname,
436		    TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR,
437		    TI_FIRMWARE_FIX, tf->FwReleaseMajor,
438		    tf->FwReleaseMinor, tf->FwReleaseFix);
439		free(buf, M_DEVBUF);
440		return;
441	}
442	ti_mem_write(sc, tf->FwTextAddr, tf->FwTextLen,
443	    (caddr_t)&tf->data[tf->FwTextOffset]);
444	ti_mem_write(sc, tf->FwRodataAddr, tf->FwRodataLen,
445	    (caddr_t)&tf->data[tf->FwRodataOffset]);
446	ti_mem_write(sc, tf->FwDataAddr, tf->FwDataLen,
447	    (caddr_t)&tf->data[tf->FwDataOffset]);
448	ti_mem_set(sc, tf->FwBssAddr, tf->FwBssLen);
449	ti_mem_set(sc, tf->FwSbssAddr, tf->FwSbssLen);
450	CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tf->FwStartAddr);
451	free(buf, M_DEVBUF);
452}
453
454/*
455 * Send the NIC a command via the command ring.
456 */
457void
458ti_cmd(struct ti_softc *sc, struct ti_cmd_desc *cmd)
459{
460	u_int32_t		index;
461
462	index = sc->ti_cmd_saved_prodidx;
463	CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd));
464	TI_INC(index, TI_CMD_RING_CNT);
465	CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index);
466	sc->ti_cmd_saved_prodidx = index;
467}
468
469/*
470 * Send the NIC an extended command. The 'len' parameter specifies the
471 * number of command slots to include after the initial command.
472 */
473void
474ti_cmd_ext(struct ti_softc *sc, struct ti_cmd_desc *cmd, caddr_t arg,
475    int len)
476{
477	u_int32_t		index;
478	int		i;
479
480	index = sc->ti_cmd_saved_prodidx;
481	CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd));
482	TI_INC(index, TI_CMD_RING_CNT);
483	for (i = 0; i < len; i++) {
484		CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4),
485		    *(u_int32_t *)(&arg[i * 4]));
486		TI_INC(index, TI_CMD_RING_CNT);
487	}
488	CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index);
489	sc->ti_cmd_saved_prodidx = index;
490}
491
492/*
493 * Handle events that have triggered interrupts.
494 */
495void
496ti_handle_events(struct ti_softc *sc)
497{
498	struct ti_event_desc	*e;
499	struct ifnet		*ifp = &sc->arpcom.ac_if;
500
501	if (sc->ti_rdata->ti_event_ring == NULL)
502		return;
503
504	while (sc->ti_ev_saved_considx != sc->ti_ev_prodidx.ti_idx) {
505		e = &sc->ti_rdata->ti_event_ring[sc->ti_ev_saved_considx];
506		switch (TI_EVENT_EVENT(e)) {
507		case TI_EV_LINKSTAT_CHANGED:
508			sc->ti_linkstat = TI_EVENT_CODE(e);
509			switch (sc->ti_linkstat) {
510			case TI_EV_CODE_LINK_UP:
511			case TI_EV_CODE_GIG_LINK_UP:
512			    {
513				struct ifmediareq ifmr;
514
515				bzero(&ifmr, sizeof(ifmr));
516				ti_ifmedia_sts(ifp, &ifmr);
517				if (ifmr.ifm_active & IFM_FDX) {
518					ifp->if_link_state =
519					    LINK_STATE_FULL_DUPLEX;
520				} else {
521					ifp->if_link_state =
522					    LINK_STATE_HALF_DUPLEX;
523				}
524				if_link_state_change(ifp);
525				ifp->if_baudrate =
526				    ifmedia_baudrate(ifmr.ifm_active);
527				break;
528			    }
529			case TI_EV_CODE_LINK_DOWN:
530				ifp->if_link_state = LINK_STATE_DOWN;
531				if_link_state_change(ifp);
532				ifp->if_baudrate = 0;
533				break;
534			default:
535				printf("%s: unknown link state code %d\n",
536				    sc->sc_dv.dv_xname, sc->ti_linkstat);
537			}
538			break;
539		case TI_EV_ERROR:
540			if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_INVAL_CMD)
541				printf("%s: invalid command\n",
542				    sc->sc_dv.dv_xname);
543			else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_UNIMP_CMD)
544				printf("%s: unknown command\n",
545				    sc->sc_dv.dv_xname);
546			else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_BADCFG)
547				printf("%s: bad config data\n",
548				    sc->sc_dv.dv_xname);
549			break;
550		case TI_EV_FIRMWARE_UP:
551			ti_init2(sc);
552			break;
553		case TI_EV_STATS_UPDATED:
554			ti_stats_update(sc);
555			break;
556		case TI_EV_RESET_JUMBO_RING:
557		case TI_EV_MCAST_UPDATED:
558			/* Who cares. */
559			break;
560		default:
561			printf("%s: unknown event: %d\n", sc->sc_dv.dv_xname,
562			       TI_EVENT_EVENT(e));
563			break;
564		}
565		/* Advance the consumer index. */
566		TI_INC(sc->ti_ev_saved_considx, TI_EVENT_RING_CNT);
567		CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, sc->ti_ev_saved_considx);
568	}
569}
570
571/*
572 * Memory management for the jumbo receive ring is a pain in the
573 * butt. We need to allocate at least 9018 bytes of space per frame,
574 * _and_ it has to be contiguous (unless you use the extended
575 * jumbo descriptor format). Using malloc() all the time won't
576 * work: malloc() allocates memory in powers of two, which means we
577 * would end up wasting a considerable amount of space by allocating
578 * 9K chunks. We don't have a jumbo mbuf cluster pool. Thus, we have
579 * to do our own memory management.
580 *
581 * The driver needs to allocate a contiguous chunk of memory at boot
582 * time. We then chop this up ourselves into 9K pieces and use them
583 * as external mbuf storage.
584 *
585 * One issue here is how much memory to allocate. The jumbo ring has
586 * 256 slots in it, but at 9K per slot than can consume over 2MB of
587 * RAM. This is a bit much, especially considering we also need
588 * RAM for the standard ring and mini ring (on the Tigon 2). To
589 * save space, we only actually allocate enough memory for 64 slots
590 * by default, which works out to between 500 and 600K. This can
591 * be tuned by changing a #define in if_tireg.h.
592 */
593
594int
595ti_alloc_jumbo_mem(struct ti_softc *sc)
596{
597	caddr_t ptr, kva;
598	bus_dma_segment_t seg;
599	int i, rseg, state, error;
600	struct ti_jpool_entry *entry;
601
602	state = error = 0;
603
604	/* Grab a big chunk o' storage. */
605	if (bus_dmamem_alloc(sc->sc_dmatag, TI_JMEM, PAGE_SIZE, 0,
606	    &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
607		printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname);
608		return (ENOBUFS);
609	}
610
611	state = 1;
612	if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, TI_JMEM, &kva,
613	    BUS_DMA_NOWAIT)) {
614		printf("%s: can't map dma buffers (%zu bytes)\n",
615		    sc->sc_dv.dv_xname, TI_JMEM);
616		error = ENOBUFS;
617		goto out;
618	}
619
620	state = 2;
621	if (bus_dmamap_create(sc->sc_dmatag, TI_JMEM, 1, TI_JMEM, 0,
622	    BUS_DMA_NOWAIT, &sc->ti_cdata.ti_rx_jumbo_map)) {
623		printf("%s: can't create dma map\n", sc->sc_dv.dv_xname);
624		error = ENOBUFS;
625		goto out;
626	}
627
628	state = 3;
629	if (bus_dmamap_load(sc->sc_dmatag, sc->ti_cdata.ti_rx_jumbo_map, kva,
630	    TI_JMEM, NULL, BUS_DMA_NOWAIT)) {
631		printf("%s: can't load dma map\n", sc->sc_dv.dv_xname);
632		error = ENOBUFS;
633		goto out;
634	}
635
636	state = 4;
637	sc->ti_cdata.ti_jumbo_buf = (caddr_t)kva;
638
639	SLIST_INIT(&sc->ti_jfree_listhead);
640	SLIST_INIT(&sc->ti_jinuse_listhead);
641
642	/*
643	 * Now divide it up into 9K pieces and save the addresses
644	 * in an array.
645	 */
646	ptr = sc->ti_cdata.ti_jumbo_buf;
647	for (i = 0; i < TI_JSLOTS; i++) {
648		sc->ti_cdata.ti_jslots[i].ti_buf = ptr;
649		sc->ti_cdata.ti_jslots[i].ti_inuse = 0;
650		ptr += TI_JLEN;
651		entry = malloc(sizeof(struct ti_jpool_entry),
652			       M_DEVBUF, M_NOWAIT);
653		if (entry == NULL) {
654			sc->ti_cdata.ti_jumbo_buf = NULL;
655			printf("%s: no memory for jumbo buffer queue\n",
656			    sc->sc_dv.dv_xname);
657			error = ENOBUFS;
658			goto out;
659		}
660		entry->slot = i;
661		SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries);
662	}
663out:
664	if (error != 0) {
665		switch (state) {
666		case 4:
667			bus_dmamap_unload(sc->sc_dmatag,
668			    sc->ti_cdata.ti_rx_jumbo_map);
669		case 3:
670			bus_dmamap_destroy(sc->sc_dmatag,
671			    sc->ti_cdata.ti_rx_jumbo_map);
672		case 2:
673			bus_dmamem_unmap(sc->sc_dmatag, kva, TI_JMEM);
674		case 1:
675			bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
676			break;
677		default:
678			break;
679		}
680	}
681
682	return (error);
683}
684
685/*
686 * Allocate a jumbo buffer.
687 */
688void *
689ti_jalloc(struct ti_softc *sc)
690{
691	struct ti_jpool_entry   *entry;
692
693	entry = SLIST_FIRST(&sc->ti_jfree_listhead);
694
695	if (entry == NULL)
696		return (NULL);
697
698	SLIST_REMOVE_HEAD(&sc->ti_jfree_listhead, jpool_entries);
699	SLIST_INSERT_HEAD(&sc->ti_jinuse_listhead, entry, jpool_entries);
700	sc->ti_cdata.ti_jslots[entry->slot].ti_inuse = 1;
701	return (sc->ti_cdata.ti_jslots[entry->slot].ti_buf);
702}
703
704/*
705 * Release a jumbo buffer.
706 */
707void
708ti_jfree(caddr_t buf, u_int size, void *arg)
709{
710	struct ti_softc		*sc;
711	int			i;
712	struct ti_jpool_entry	*entry;
713
714	/* Extract the softc struct pointer. */
715	sc = (struct ti_softc *)arg;
716
717	if (sc == NULL)
718		panic("ti_jfree: can't find softc pointer!");
719
720	/* calculate the slot this buffer belongs to */
721	i = ((vaddr_t)buf - (vaddr_t)sc->ti_cdata.ti_jumbo_buf) / TI_JLEN;
722
723	if ((i < 0) || (i >= TI_JSLOTS))
724		panic("ti_jfree: asked to free buffer that we don't manage!");
725	else if (sc->ti_cdata.ti_jslots[i].ti_inuse == 0)
726		panic("ti_jfree: buffer already free!");
727
728	sc->ti_cdata.ti_jslots[i].ti_inuse--;
729	if(sc->ti_cdata.ti_jslots[i].ti_inuse == 0) {
730		entry = SLIST_FIRST(&sc->ti_jinuse_listhead);
731		if (entry == NULL)
732			panic("ti_jfree: buffer not in use!");
733		entry->slot = i;
734		SLIST_REMOVE_HEAD(&sc->ti_jinuse_listhead, jpool_entries);
735		SLIST_INSERT_HEAD(&sc->ti_jfree_listhead,
736				  entry, jpool_entries);
737	}
738}
739
740/*
741 * Intialize a standard receive ring descriptor.
742 */
743int
744ti_newbuf_std(struct ti_softc *sc, int i, struct mbuf *m,
745    bus_dmamap_t dmamap)
746{
747	struct mbuf		*m_new = NULL;
748	struct ti_rx_desc	*r;
749
750	if (dmamap == NULL) {
751		/* if (m) panic() */
752
753		if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, MCLBYTES,
754				      0, BUS_DMA_NOWAIT, &dmamap)) {
755			printf("%s: can't create recv map\n",
756			       sc->sc_dv.dv_xname);
757			return (ENOMEM);
758		}
759	} else if (m == NULL)
760		bus_dmamap_unload(sc->sc_dmatag, dmamap);
761
762	sc->ti_cdata.ti_rx_std_map[i] = dmamap;
763
764	if (m == NULL) {
765		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
766		if (m_new == NULL)
767			return (ENOBUFS);
768
769		MCLGET(m_new, M_DONTWAIT);
770		if (!(m_new->m_flags & M_EXT)) {
771			m_freem(m_new);
772			return (ENOBUFS);
773		}
774		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
775
776		m_adj(m_new, ETHER_ALIGN);
777
778		if (bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, m_new,
779					 BUS_DMA_NOWAIT))
780			return (ENOBUFS);
781
782	} else {
783		/*
784		 * We're re-using a previously allocated mbuf;
785		 * be sure to re-init pointers and lengths to
786		 * default values.
787		 */
788		m_new = m;
789		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
790		m_new->m_data = m_new->m_ext.ext_buf;
791		m_adj(m_new, ETHER_ALIGN);
792	}
793
794	sc->ti_cdata.ti_rx_std_chain[i] = m_new;
795	r = &sc->ti_rdata->ti_rx_std_ring[i];
796	TI_HOSTADDR(r->ti_addr) = dmamap->dm_segs[0].ds_addr;
797	r->ti_type = TI_BDTYPE_RECV_BD;
798	r->ti_flags = TI_BDFLAG_IP_CKSUM;
799	r->ti_len = dmamap->dm_segs[0].ds_len;
800	r->ti_idx = i;
801
802	if ((dmamap->dm_segs[0].ds_addr & ~(MCLBYTES - 1)) !=
803	    ((dmamap->dm_segs[0].ds_addr + dmamap->dm_segs[0].ds_len - 1) &
804	     ~(MCLBYTES - 1)))
805	    panic("%s: overwritten!!!", sc->sc_dv.dv_xname);
806
807	return (0);
808}
809
810/*
811 * Intialize a mini receive ring descriptor. This only applies to
812 * the Tigon 2.
813 */
814int
815ti_newbuf_mini(struct ti_softc *sc, int i, struct mbuf *m,
816    bus_dmamap_t dmamap)
817{
818	struct mbuf		*m_new = NULL;
819	struct ti_rx_desc	*r;
820
821	if (dmamap == NULL) {
822		/* if (m) panic() */
823
824		if (bus_dmamap_create(sc->sc_dmatag, MHLEN, 1, MHLEN,
825				      0, BUS_DMA_NOWAIT, &dmamap)) {
826			printf("%s: can't create recv map\n",
827			       sc->sc_dv.dv_xname);
828			return (ENOMEM);
829		}
830	} else if (m == NULL)
831		bus_dmamap_unload(sc->sc_dmatag, dmamap);
832
833	sc->ti_cdata.ti_rx_mini_map[i] = dmamap;
834
835	if (m == NULL) {
836		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
837		if (m_new == NULL)
838			return (ENOBUFS);
839		m_new->m_len = m_new->m_pkthdr.len = MHLEN;
840		m_adj(m_new, ETHER_ALIGN);
841
842		if (bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, m_new,
843					 BUS_DMA_NOWAIT))
844		return (ENOBUFS);
845
846	} else {
847		/*
848		 * We're re-using a previously allocated mbuf;
849		 * be sure to re-init pointers and lengths to
850		 * default values.
851		 */
852		m_new = m;
853		m_new->m_data = m_new->m_pktdat;
854		m_new->m_len = m_new->m_pkthdr.len = MHLEN;
855	}
856
857	r = &sc->ti_rdata->ti_rx_mini_ring[i];
858	sc->ti_cdata.ti_rx_mini_chain[i] = m_new;
859	TI_HOSTADDR(r->ti_addr) = dmamap->dm_segs[0].ds_addr;
860	r->ti_type = TI_BDTYPE_RECV_BD;
861	r->ti_flags = TI_BDFLAG_MINI_RING | TI_BDFLAG_IP_CKSUM;
862	r->ti_len = dmamap->dm_segs[0].ds_len;
863	r->ti_idx = i;
864
865	return (0);
866}
867
868/*
869 * Initialize a jumbo receive ring descriptor. This allocates
870 * a jumbo buffer from the pool managed internally by the driver.
871 */
872int
873ti_newbuf_jumbo(struct ti_softc *sc, int i, struct mbuf *m)
874{
875	struct mbuf		*m_new = NULL;
876	struct ti_rx_desc	*r;
877
878	if (m == NULL) {
879		caddr_t			buf = NULL;
880
881		/* Allocate the mbuf. */
882		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
883		if (m_new == NULL)
884			return (ENOBUFS);
885
886		/* Allocate the jumbo buffer */
887		buf = ti_jalloc(sc);
888		if (buf == NULL) {
889			m_freem(m_new);
890			return (ENOBUFS);
891		}
892
893		/* Attach the buffer to the mbuf. */
894		m_new->m_len = m_new->m_pkthdr.len = TI_JUMBO_FRAMELEN;
895		MEXTADD(m_new, buf, TI_JUMBO_FRAMELEN, 0, ti_jfree, sc);
896	} else {
897		/*
898		 * We're re-using a previously allocated mbuf;
899		 * be sure to re-init pointers and lengths to
900		 * default values.
901		 */
902		m_new = m;
903		m_new->m_data = m_new->m_ext.ext_buf;
904		m_new->m_ext.ext_size = TI_JUMBO_FRAMELEN;
905	}
906
907	m_adj(m_new, ETHER_ALIGN);
908	/* Set up the descriptor. */
909	r = &sc->ti_rdata->ti_rx_jumbo_ring[i];
910	sc->ti_cdata.ti_rx_jumbo_chain[i] = m_new;
911	TI_HOSTADDR(r->ti_addr) = TI_JUMBO_DMA_ADDR(sc, m_new);
912	r->ti_type = TI_BDTYPE_RECV_JUMBO_BD;
913	r->ti_flags = TI_BDFLAG_JUMBO_RING | TI_BDFLAG_IP_CKSUM;
914	r->ti_len = m_new->m_len;
915	r->ti_idx = i;
916
917	return (0);
918}
919
920/*
921 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
922 * that's 1MB of memory, which is a lot. For now, we fill only the first
923 * 256 ring entries and hope that our CPU is fast enough to keep up with
924 * the NIC.
925 */
926int
927ti_init_rx_ring_std(struct ti_softc *sc)
928{
929	int		i;
930	struct ti_cmd_desc	cmd;
931
932	for (i = 0; i < TI_SSLOTS; i++) {
933		if (ti_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
934			return (ENOBUFS);
935	}
936
937	TI_UPDATE_STDPROD(sc, i - 1);
938	sc->ti_std = i - 1;
939
940	return (0);
941}
942
943void
944ti_free_rx_ring_std(struct ti_softc *sc)
945{
946	int		i;
947
948	for (i = 0; i < TI_STD_RX_RING_CNT; i++) {
949		if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) {
950			m_freem(sc->ti_cdata.ti_rx_std_chain[i]);
951			sc->ti_cdata.ti_rx_std_chain[i] = NULL;
952			bus_dmamap_destroy(sc->sc_dmatag,
953					   sc->ti_cdata.ti_rx_std_map[i]);
954			sc->ti_cdata.ti_rx_std_map[i] = 0;
955		}
956		bzero(&sc->ti_rdata->ti_rx_std_ring[i],
957		    sizeof(struct ti_rx_desc));
958	}
959}
960
961int
962ti_init_rx_ring_jumbo(struct ti_softc *sc)
963{
964	int		i;
965	struct ti_cmd_desc	cmd;
966
967	for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
968		if (ti_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
969			return (ENOBUFS);
970	};
971
972	TI_UPDATE_JUMBOPROD(sc, i - 1);
973	sc->ti_jumbo = i - 1;
974
975	return (0);
976}
977
978void
979ti_free_rx_ring_jumbo(struct ti_softc *sc)
980{
981	int		i;
982
983	for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
984		if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) {
985			m_freem(sc->ti_cdata.ti_rx_jumbo_chain[i]);
986			sc->ti_cdata.ti_rx_jumbo_chain[i] = NULL;
987		}
988		bzero(&sc->ti_rdata->ti_rx_jumbo_ring[i],
989		    sizeof(struct ti_rx_desc));
990	}
991}
992
993int
994ti_init_rx_ring_mini(struct ti_softc *sc)
995{
996	int		i;
997
998	for (i = 0; i < TI_MSLOTS; i++) {
999		if (ti_newbuf_mini(sc, i, NULL, 0) == ENOBUFS)
1000			return (ENOBUFS);
1001	};
1002
1003	TI_UPDATE_MINIPROD(sc, i - 1);
1004	sc->ti_mini = i - 1;
1005
1006	return (0);
1007}
1008
1009void
1010ti_free_rx_ring_mini(struct ti_softc *sc)
1011{
1012	int		i;
1013
1014	for (i = 0; i < TI_MINI_RX_RING_CNT; i++) {
1015		if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) {
1016			m_freem(sc->ti_cdata.ti_rx_mini_chain[i]);
1017			sc->ti_cdata.ti_rx_mini_chain[i] = NULL;
1018			bus_dmamap_destroy(sc->sc_dmatag,
1019					   sc->ti_cdata.ti_rx_mini_map[i]);
1020			sc->ti_cdata.ti_rx_mini_map[i] = 0;
1021		}
1022		bzero(&sc->ti_rdata->ti_rx_mini_ring[i],
1023		    sizeof(struct ti_rx_desc));
1024	}
1025}
1026
1027void
1028ti_free_tx_ring(struct ti_softc *sc)
1029{
1030	int		i;
1031	struct ti_txmap_entry *entry;
1032
1033	if (sc->ti_rdata->ti_tx_ring == NULL)
1034		return;
1035
1036	for (i = 0; i < TI_TX_RING_CNT; i++) {
1037		if (sc->ti_cdata.ti_tx_chain[i] != NULL) {
1038			m_freem(sc->ti_cdata.ti_tx_chain[i]);
1039			sc->ti_cdata.ti_tx_chain[i] = NULL;
1040			SLIST_INSERT_HEAD(&sc->ti_tx_map_listhead,
1041					    sc->ti_cdata.ti_tx_map[i], link);
1042			sc->ti_cdata.ti_tx_map[i] = 0;
1043		}
1044		bzero(&sc->ti_rdata->ti_tx_ring[i],
1045		    sizeof(struct ti_tx_desc));
1046	}
1047
1048	while ((entry = SLIST_FIRST(&sc->ti_tx_map_listhead))) {
1049		SLIST_REMOVE_HEAD(&sc->ti_tx_map_listhead, link);
1050		bus_dmamap_destroy(sc->sc_dmatag, entry->dmamap);
1051		free(entry, M_DEVBUF);
1052	}
1053}
1054
1055int
1056ti_init_tx_ring(struct ti_softc *sc)
1057{
1058	int i;
1059	bus_dmamap_t dmamap;
1060	struct ti_txmap_entry *entry;
1061
1062	sc->ti_txcnt = 0;
1063	sc->ti_tx_saved_considx = 0;
1064	sc->ti_tx_saved_prodidx = 0;
1065	CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, 0);
1066
1067	SLIST_INIT(&sc->ti_tx_map_listhead);
1068	for (i = 0; i < TI_TX_RING_CNT; i++) {
1069		if (bus_dmamap_create(sc->sc_dmatag, TI_JUMBO_FRAMELEN,
1070		    TI_NTXSEG, MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap))
1071			return (ENOBUFS);
1072
1073		entry = malloc(sizeof(*entry), M_DEVBUF, M_NOWAIT);
1074		if (!entry) {
1075			bus_dmamap_destroy(sc->sc_dmatag, dmamap);
1076			return (ENOBUFS);
1077		}
1078		entry->dmamap = dmamap;
1079		SLIST_INSERT_HEAD(&sc->ti_tx_map_listhead, entry, link);
1080	}
1081
1082	return (0);
1083}
1084
1085/*
1086 * The Tigon 2 firmware has a new way to add/delete multicast addresses,
1087 * but we have to support the old way too so that Tigon 1 cards will
1088 * work.
1089 */
1090void
1091ti_add_mcast(struct ti_softc *sc, struct ether_addr *addr)
1092{
1093	struct ti_cmd_desc	cmd;
1094	u_int16_t		*m;
1095	u_int32_t		ext[2] = {0, 0};
1096
1097	m = (u_int16_t *)&addr->ether_addr_octet[0];
1098
1099	switch(sc->ti_hwrev) {
1100	case TI_HWREV_TIGON:
1101		CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0]));
1102		CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2]));
1103		TI_DO_CMD(TI_CMD_ADD_MCAST_ADDR, 0, 0);
1104		break;
1105	case TI_HWREV_TIGON_II:
1106		ext[0] = htons(m[0]);
1107		ext[1] = (htons(m[1]) << 16) | htons(m[2]);
1108		TI_DO_CMD_EXT(TI_CMD_EXT_ADD_MCAST, 0, 0, (caddr_t)&ext, 2);
1109		break;
1110	default:
1111		printf("%s: unknown hwrev\n", sc->sc_dv.dv_xname);
1112		break;
1113	}
1114}
1115
1116void
1117ti_del_mcast(struct ti_softc *sc, struct ether_addr *addr)
1118{
1119	struct ti_cmd_desc	cmd;
1120	u_int16_t		*m;
1121	u_int32_t		ext[2] = {0, 0};
1122
1123	m = (u_int16_t *)&addr->ether_addr_octet[0];
1124
1125	switch(sc->ti_hwrev) {
1126	case TI_HWREV_TIGON:
1127		CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0]));
1128		CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2]));
1129		TI_DO_CMD(TI_CMD_DEL_MCAST_ADDR, 0, 0);
1130		break;
1131	case TI_HWREV_TIGON_II:
1132		ext[0] = htons(m[0]);
1133		ext[1] = (htons(m[1]) << 16) | htons(m[2]);
1134		TI_DO_CMD_EXT(TI_CMD_EXT_DEL_MCAST, 0, 0, (caddr_t)&ext, 2);
1135		break;
1136	default:
1137		printf("%s: unknown hwrev\n", sc->sc_dv.dv_xname);
1138		break;
1139	}
1140}
1141
1142/*
1143 * Configure the Tigon's multicast address filter.
1144 *
1145 * The actual multicast table management is a bit of a pain, thanks to
1146 * slight brain damage on the part of both Alteon and us. With our
1147 * multicast code, we are only alerted when the multicast address table
1148 * changes and at that point we only have the current list of addresses:
1149 * we only know the current state, not the previous state, so we don't
1150 * actually know what addresses were removed or added. The firmware has
1151 * state, but we can't get our grubby mits on it, and there is no 'delete
1152 * all multicast addresses' command. Hence, we have to maintain our own
1153 * state so we know what addresses have been programmed into the NIC at
1154 * any given time.
1155 */
1156void
1157ti_iff(struct ti_softc *sc)
1158{
1159	struct ifnet		*ifp = &sc->arpcom.ac_if;
1160	struct arpcom		*ac = &sc->arpcom;
1161	struct ether_multi	*enm;
1162	struct ether_multistep	step;
1163	struct ti_cmd_desc	cmd;
1164	struct ti_mc_entry	*mc;
1165	u_int32_t		intrs;
1166
1167	TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_DIS, 0);
1168	TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_DIS, 0);
1169	ifp->if_flags &= ~IFF_ALLMULTI;
1170
1171	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1172		ifp->if_flags |= IFF_ALLMULTI;
1173		if (ifp->if_flags & IFF_PROMISC) {
1174			TI_DO_CMD(TI_CMD_SET_PROMISC_MODE,
1175			    TI_CMD_CODE_PROMISC_ENB, 0);
1176		} else {
1177			TI_DO_CMD(TI_CMD_SET_ALLMULTI,
1178			    TI_CMD_CODE_ALLMULTI_ENB, 0);
1179		}
1180	} else {
1181		/* Disable interrupts. */
1182		intrs = CSR_READ_4(sc, TI_MB_HOSTINTR);
1183		CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
1184
1185		/* First, zot all the existing filters. */
1186		while (SLIST_FIRST(&sc->ti_mc_listhead) != NULL) {
1187			mc = SLIST_FIRST(&sc->ti_mc_listhead);
1188			ti_del_mcast(sc, &mc->mc_addr);
1189			SLIST_REMOVE_HEAD(&sc->ti_mc_listhead, mc_entries);
1190			free(mc, M_DEVBUF);
1191		}
1192
1193		/* Now program new ones. */
1194		ETHER_FIRST_MULTI(step, ac, enm);
1195		while (enm != NULL) {
1196			mc = malloc(sizeof(struct ti_mc_entry), M_DEVBUF,
1197			    M_NOWAIT);
1198			if (mc == NULL)
1199				panic("ti_iff");
1200
1201			bcopy(enm->enm_addrlo, &mc->mc_addr,
1202			    ETHER_ADDR_LEN);
1203			SLIST_INSERT_HEAD(&sc->ti_mc_listhead, mc,
1204			    mc_entries);
1205			ti_add_mcast(sc, &mc->mc_addr);
1206
1207                        ETHER_NEXT_MULTI(step, enm);
1208                }
1209
1210                /* Re-enable interrupts. */
1211                CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs);
1212        }
1213}
1214
1215/*
1216 * Check to see if the BIOS has configured us for a 64 bit slot when
1217 * we aren't actually in one. If we detect this condition, we can work
1218 * around it on the Tigon 2 by setting a bit in the PCI state register,
1219 * but for the Tigon 1 we must give up and abort the interface attach.
1220 */
1221int
1222ti_64bitslot_war(struct ti_softc *sc)
1223{
1224	if (!(CSR_READ_4(sc, TI_PCI_STATE) & TI_PCISTATE_32BIT_BUS)) {
1225		CSR_WRITE_4(sc, 0x600, 0);
1226		CSR_WRITE_4(sc, 0x604, 0);
1227		CSR_WRITE_4(sc, 0x600, 0x5555AAAA);
1228		if (CSR_READ_4(sc, 0x604) == 0x5555AAAA) {
1229			if (sc->ti_hwrev == TI_HWREV_TIGON)
1230				return (EINVAL);
1231			else {
1232				TI_SETBIT(sc, TI_PCI_STATE,
1233				    TI_PCISTATE_32BIT_BUS);
1234				return (0);
1235			}
1236		}
1237	}
1238
1239	return (0);
1240}
1241
1242/*
1243 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1244 * self-test results.
1245 */
1246int
1247ti_chipinit(struct ti_softc *sc)
1248{
1249	u_int32_t		chip_rev;
1250
1251	/* Initialize link to down state. */
1252	sc->ti_linkstat = TI_EV_CODE_LINK_DOWN;
1253
1254	/* Set endianness before we access any non-PCI registers. */
1255	CSR_WRITE_4(sc, TI_MISC_HOST_CTL,
1256	    TI_MHC_LITTLEENDIAN_INIT | (TI_MHC_LITTLEENDIAN_INIT << 24));
1257
1258	/* Check the ROM failed bit to see if self-tests passed. */
1259	if (CSR_READ_4(sc, TI_CPU_STATE) & TI_CPUSTATE_ROMFAIL) {
1260		printf("%s: board self-diagnostics failed!\n",
1261		    sc->sc_dv.dv_xname);
1262		return (ENODEV);
1263	}
1264
1265	/* Halt the CPU. */
1266	TI_SETBIT(sc, TI_CPU_STATE, TI_CPUSTATE_HALT);
1267
1268	/* Figure out the hardware revision. */
1269	chip_rev = CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_CHIP_REV_MASK;
1270	switch(chip_rev) {
1271	case TI_REV_TIGON_I:
1272		sc->ti_hwrev = TI_HWREV_TIGON;
1273		break;
1274	case TI_REV_TIGON_II:
1275		sc->ti_hwrev = TI_HWREV_TIGON_II;
1276		break;
1277	default:
1278		printf("\n");
1279		printf("%s: unsupported chip revision: %x\n",
1280		    sc->sc_dv.dv_xname, chip_rev);
1281		return (ENODEV);
1282	}
1283
1284	/* Do special setup for Tigon 2. */
1285	if (sc->ti_hwrev == TI_HWREV_TIGON_II) {
1286		TI_SETBIT(sc, TI_CPU_CTL_B, TI_CPUSTATE_HALT);
1287		TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_SRAM_BANK_512K);
1288		TI_SETBIT(sc, TI_MISC_CONF, TI_MCR_SRAM_SYNCHRONOUS);
1289	}
1290
1291	if (sc->ti_sbus)
1292		ti_chipinit_sbus(sc);
1293	else
1294		ti_chipinit_pci(sc);
1295
1296	/* Recommended settings from Tigon manual. */
1297	CSR_WRITE_4(sc, TI_GCR_DMA_WRITECFG, TI_DMA_STATE_THRESH_8W);
1298	CSR_WRITE_4(sc, TI_GCR_DMA_READCFG, TI_DMA_STATE_THRESH_8W);
1299
1300	if (ti_64bitslot_war(sc)) {
1301		printf("%s: bios thinks we're in a 64 bit slot, "
1302		    "but we aren't", sc->sc_dv.dv_xname);
1303		return (EINVAL);
1304	}
1305
1306	return (0);
1307}
1308
1309void
1310ti_chipinit_pci(struct ti_softc *sc)
1311{
1312	u_int32_t		cacheline;
1313	u_int32_t		pci_writemax = 0;
1314
1315	/* Set up the PCI state register. */
1316	CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD | TI_PCI_WRITE_CMD);
1317	if (sc->ti_hwrev == TI_HWREV_TIGON_II)
1318		TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_USE_MEM_RD_MULT);
1319
1320	/* Clear the read/write max DMA parameters. */
1321	TI_CLRBIT(sc, TI_PCI_STATE, (TI_PCISTATE_WRITE_MAXDMA|
1322	    TI_PCISTATE_READ_MAXDMA));
1323
1324	/* Get cache line size. */
1325	cacheline = CSR_READ_4(sc, TI_PCI_BIST) & 0xFF;
1326
1327	/*
1328	 * If the system has set enabled the PCI memory write
1329	 * and invalidate command in the command register, set
1330	 * the write max parameter accordingly. This is necessary
1331	 * to use MWI with the Tigon 2.
1332	 */
1333	if (CSR_READ_4(sc, TI_PCI_CMDSTAT) & PCI_COMMAND_INVALIDATE_ENABLE) {
1334		switch(cacheline) {
1335		case 1:
1336		case 4:
1337		case 8:
1338		case 16:
1339		case 32:
1340		case 64:
1341			break;
1342		default:
1343		/* Disable PCI memory write and invalidate. */
1344			CSR_WRITE_4(sc, TI_PCI_CMDSTAT, CSR_READ_4(sc,
1345			    TI_PCI_CMDSTAT) & ~PCI_COMMAND_INVALIDATE_ENABLE);
1346			break;
1347		}
1348	}
1349
1350#ifdef __brokenalpha__
1351	/*
1352	 * From the Alteon sample driver:
1353	 * Must insure that we do not cross an 8K (bytes) boundary
1354	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1355	 * restriction on some ALPHA platforms with early revision
1356	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1357	 */
1358	TI_SETBIT(sc, TI_PCI_STATE, pci_writemax|TI_PCI_READMAX_1024);
1359#else
1360	TI_SETBIT(sc, TI_PCI_STATE, pci_writemax);
1361#endif
1362
1363	/* This sets the min dma param all the way up (0xff). */
1364	TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_MINDMA);
1365
1366	/* Configure DMA variables. */
1367	CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_DMA_SWAP_OPTIONS |
1368	    TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB |
1369	    TI_OPMODE_DONT_FRAG_JUMBO);
1370}
1371
1372void
1373ti_chipinit_sbus(struct ti_softc *sc)
1374{
1375	/* Set up the PCI state register. */
1376	CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD | TI_PCI_WRITE_CMD |
1377	    TI_PCISTATE_NO_SWAP_READ_DMA | TI_PCISTATE_NO_SWAP_WRITE_DMA |
1378	    TI_PCI_WRITEMAX_64 | TI_PCI_READMAX_64 |
1379	    TI_PCISTATE_PROVIDE_LEN);
1380
1381	/* Configure DMA variables. */
1382	CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_WORDSWAP_BD |
1383	    TI_OPMODE_1_DMA_ACTIVE | TI_OPMODE_SBUS |
1384	    TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB |
1385	    TI_OPMODE_DONT_FRAG_JUMBO);
1386}
1387
1388/*
1389 * Initialize the general information block and firmware, and
1390 * start the CPU(s) running.
1391 */
1392int
1393ti_gibinit(struct ti_softc *sc)
1394{
1395	struct ti_rcb		*rcb;
1396	int			i;
1397	struct ifnet		*ifp;
1398
1399	ifp = &sc->arpcom.ac_if;
1400
1401	/* Disable interrupts for now. */
1402	CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
1403
1404	/*
1405	 * Tell the chip where to find the general information block.
1406	 * While this struct could go into >4GB memory, we allocate it in a
1407	 * single slab with the other descriptors, and those don't seem to
1408	 * support being located in a 64-bit region.
1409	 */
1410	CSR_WRITE_4(sc, TI_GCR_GENINFO_HI, 0);
1411	CSR_WRITE_4(sc, TI_GCR_GENINFO_LO,
1412		    TI_RING_DMA_ADDR(sc, ti_info) & 0xffffffff);
1413
1414	/* Load the firmware into SRAM. */
1415	ti_loadfw(sc);
1416
1417	/* Set up the contents of the general info and ring control blocks. */
1418
1419	/* Set up the event ring and producer pointer. */
1420	rcb = &sc->ti_rdata->ti_info.ti_ev_rcb;
1421
1422	TI_HOSTADDR(rcb->ti_hostaddr) = TI_RING_DMA_ADDR(sc, ti_event_ring);
1423	rcb->ti_flags = 0;
1424	TI_HOSTADDR(sc->ti_rdata->ti_info.ti_ev_prodidx_ptr) =
1425		TI_RING_DMA_ADDR(sc, ti_ev_prodidx_r);
1426	sc->ti_ev_prodidx.ti_idx = 0;
1427	CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, 0);
1428	sc->ti_ev_saved_considx = 0;
1429
1430	/* Set up the command ring and producer mailbox. */
1431	rcb = &sc->ti_rdata->ti_info.ti_cmd_rcb;
1432
1433	TI_HOSTADDR(rcb->ti_hostaddr) = TI_GCR_NIC_ADDR(TI_GCR_CMDRING);
1434	rcb->ti_flags = 0;
1435	rcb->ti_max_len = 0;
1436	for (i = 0; i < TI_CMD_RING_CNT; i++) {
1437		CSR_WRITE_4(sc, TI_GCR_CMDRING + (i * 4), 0);
1438	}
1439	CSR_WRITE_4(sc, TI_GCR_CMDCONS_IDX, 0);
1440	CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, 0);
1441	sc->ti_cmd_saved_prodidx = 0;
1442
1443	/*
1444	 * Assign the address of the stats refresh buffer.
1445	 * We re-use the current stats buffer for this to
1446	 * conserve memory.
1447	 */
1448	TI_HOSTADDR(sc->ti_rdata->ti_info.ti_refresh_stats_ptr) =
1449		TI_RING_DMA_ADDR(sc, ti_info.ti_stats);
1450
1451	/* Set up the standard receive ring. */
1452	rcb = &sc->ti_rdata->ti_info.ti_std_rx_rcb;
1453	TI_HOSTADDR(rcb->ti_hostaddr) =
1454		TI_RING_DMA_ADDR(sc, ti_rx_std_ring);
1455	rcb->ti_max_len = ETHER_MAX_LEN;
1456	rcb->ti_flags = 0;
1457	rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
1458#if NVLAN > 0
1459	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1460		rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
1461#endif
1462
1463	/* Set up the jumbo receive ring. */
1464	rcb = &sc->ti_rdata->ti_info.ti_jumbo_rx_rcb;
1465	TI_HOSTADDR(rcb->ti_hostaddr) = TI_RING_DMA_ADDR(sc, ti_rx_jumbo_ring);
1466	rcb->ti_max_len = TI_JUMBO_FRAMELEN;
1467	rcb->ti_flags = 0;
1468	rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
1469#if NVLAN > 0
1470	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1471		rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
1472#endif
1473
1474	/*
1475	 * Set up the mini ring. Only activated on the
1476	 * Tigon 2 but the slot in the config block is
1477	 * still there on the Tigon 1.
1478	 */
1479	rcb = &sc->ti_rdata->ti_info.ti_mini_rx_rcb;
1480	TI_HOSTADDR(rcb->ti_hostaddr) = TI_RING_DMA_ADDR(sc, ti_rx_mini_ring);
1481	rcb->ti_max_len = MHLEN - ETHER_ALIGN;
1482	if (sc->ti_hwrev == TI_HWREV_TIGON)
1483		rcb->ti_flags = TI_RCB_FLAG_RING_DISABLED;
1484	else
1485		rcb->ti_flags = 0;
1486	rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
1487#if NVLAN > 0
1488	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1489		rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
1490#endif
1491
1492	/*
1493	 * Set up the receive return ring.
1494	 */
1495	rcb = &sc->ti_rdata->ti_info.ti_return_rcb;
1496	TI_HOSTADDR(rcb->ti_hostaddr) = TI_RING_DMA_ADDR(sc,ti_rx_return_ring);
1497	rcb->ti_flags = 0;
1498	rcb->ti_max_len = TI_RETURN_RING_CNT;
1499	TI_HOSTADDR(sc->ti_rdata->ti_info.ti_return_prodidx_ptr) =
1500	    TI_RING_DMA_ADDR(sc, ti_return_prodidx_r);
1501
1502	/*
1503	 * Set up the tx ring. Note: for the Tigon 2, we have the option
1504	 * of putting the transmit ring in the host's address space and
1505	 * letting the chip DMA it instead of leaving the ring in the NIC's
1506	 * memory and accessing it through the shared memory region. We
1507	 * do this for the Tigon 2, but it doesn't work on the Tigon 1,
1508	 * so we have to revert to the shared memory scheme if we detect
1509	 * a Tigon 1 chip.
1510	 */
1511	CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE);
1512	bzero(sc->ti_rdata->ti_tx_ring,
1513	    TI_TX_RING_CNT * sizeof(struct ti_tx_desc));
1514	rcb = &sc->ti_rdata->ti_info.ti_tx_rcb;
1515	if (sc->ti_hwrev == TI_HWREV_TIGON)
1516		rcb->ti_flags = 0;
1517	else
1518		rcb->ti_flags = TI_RCB_FLAG_HOST_RING;
1519	rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
1520#if NVLAN > 0
1521	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1522		rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
1523#endif
1524	rcb->ti_max_len = TI_TX_RING_CNT;
1525	if (sc->ti_hwrev == TI_HWREV_TIGON)
1526		TI_HOSTADDR(rcb->ti_hostaddr) = TI_TX_RING_BASE;
1527	else
1528		TI_HOSTADDR(rcb->ti_hostaddr) =
1529			TI_RING_DMA_ADDR(sc, ti_tx_ring);
1530	TI_HOSTADDR(sc->ti_rdata->ti_info.ti_tx_considx_ptr) =
1531		TI_RING_DMA_ADDR(sc, ti_tx_considx_r);
1532
1533	TI_RING_DMASYNC(sc, ti_info, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1534
1535	/* Set up tuneables */
1536	CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, (sc->ti_rx_coal_ticks / 10));
1537	CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, sc->ti_tx_coal_ticks);
1538	CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks);
1539	CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, sc->ti_rx_max_coal_bds);
1540	CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, sc->ti_tx_max_coal_bds);
1541	CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, sc->ti_tx_buf_ratio);
1542
1543	/* Turn interrupts on. */
1544	CSR_WRITE_4(sc, TI_GCR_MASK_INTRS, 0);
1545	CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0);
1546
1547	/* Start CPU. */
1548	TI_CLRBIT(sc, TI_CPU_STATE, (TI_CPUSTATE_HALT|TI_CPUSTATE_STEP));
1549
1550	return (0);
1551}
1552
1553int
1554ti_attach(struct ti_softc *sc)
1555{
1556	bus_dma_segment_t seg;
1557	int rseg;
1558	struct ifnet *ifp;
1559	caddr_t kva;
1560
1561	if (ti_chipinit(sc)) {
1562		printf("%s: chip initialization failed\n", sc->sc_dv.dv_xname);
1563		return (1);
1564	}
1565
1566	/* Zero out the NIC's on-board SRAM. */
1567	ti_mem_set(sc, 0x2000, 0x100000 - 0x2000);
1568
1569	/* Init again -- zeroing memory may have clobbered some registers. */
1570	if (ti_chipinit(sc)) {
1571		printf("%s: chip initialization failed\n", sc->sc_dv.dv_xname);
1572		return (1);
1573	}
1574
1575	/*
1576	 * Get station address from the EEPROM. Note: the manual states
1577	 * that the MAC address is at offset 0x8c, however the data is
1578	 * stored as two longwords (since that's how it's loaded into
1579	 * the NIC). This means the MAC address is actually preceded
1580	 * by two zero bytes. We need to skip over those.
1581	 */
1582	if (ti_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
1583				TI_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
1584		printf("%s: failed to read station address\n",
1585		    sc->sc_dv.dv_xname);
1586		return (1);
1587	}
1588
1589	/*
1590	 * A Tigon chip was detected. Inform the world.
1591	 */
1592	printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
1593
1594	/* Allocate the general information block and ring buffers. */
1595	if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct ti_ring_data),
1596	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1597		printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname);
1598		return (1);
1599	}
1600	if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
1601	    sizeof(struct ti_ring_data), &kva, BUS_DMA_NOWAIT)) {
1602		printf("%s: can't map dma buffers (%zu bytes)\n",
1603		       sc->sc_dv.dv_xname, sizeof(struct ti_ring_data));
1604		goto fail_1;
1605	}
1606	if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct ti_ring_data), 1,
1607	    sizeof(struct ti_ring_data), 0, BUS_DMA_NOWAIT,
1608	    &sc->ti_ring_map)) {
1609		printf("%s: can't create dma map\n", sc->sc_dv.dv_xname);
1610		goto fail_2;
1611	}
1612	if (bus_dmamap_load(sc->sc_dmatag, sc->ti_ring_map, kva,
1613	    sizeof(struct ti_ring_data), NULL, BUS_DMA_NOWAIT)) {
1614		goto fail_3;
1615	}
1616	sc->ti_rdata = (struct ti_ring_data *)kva;
1617	bzero(sc->ti_rdata, sizeof(struct ti_ring_data));
1618
1619	/* Try to allocate memory for jumbo buffers. */
1620	if (ti_alloc_jumbo_mem(sc)) {
1621		printf("%s: jumbo buffer allocation failed\n",
1622		    sc->sc_dv.dv_xname);
1623		goto fail_3;
1624	}
1625
1626	/* Set default tuneable values. */
1627	sc->ti_stat_ticks = 2 * TI_TICKS_PER_SEC;
1628	sc->ti_rx_coal_ticks = TI_TICKS_PER_SEC / 5000;
1629	sc->ti_tx_coal_ticks = TI_TICKS_PER_SEC / 500;
1630	sc->ti_rx_max_coal_bds = 64;
1631	sc->ti_tx_max_coal_bds = 128;
1632	sc->ti_tx_buf_ratio = 21;
1633
1634	/* Set up ifnet structure */
1635	ifp = &sc->arpcom.ac_if;
1636	ifp->if_softc = sc;
1637	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1638	ifp->if_ioctl = ti_ioctl;
1639	ifp->if_start = ti_start;
1640	ifp->if_watchdog = ti_watchdog;
1641	ifp->if_hardmtu = TI_JUMBO_FRAMELEN - ETHER_HDR_LEN;
1642	IFQ_SET_MAXLEN(&ifp->if_snd, TI_TX_RING_CNT - 1);
1643	IFQ_SET_READY(&ifp->if_snd);
1644	bcopy(sc->sc_dv.dv_xname, ifp->if_xname, IFNAMSIZ);
1645
1646	ifp->if_capabilities = IFCAP_VLAN_MTU;
1647
1648#if NVLAN > 0
1649	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1650#endif
1651
1652	/* Set up ifmedia support. */
1653	ifmedia_init(&sc->ifmedia, IFM_IMASK, ti_ifmedia_upd, ti_ifmedia_sts);
1654	if (sc->ti_copper) {
1655		/*
1656		 * Copper cards allow manual 10/100 mode selection,
1657		 * but not manual 1000baseTX mode selection. Why?
1658		 * Because currently there's no way to specify the
1659		 * master/slave setting through the firmware interface,
1660		 * so Alteon decided to just bag it and handle it
1661		 * via autonegotiation.
1662		 */
1663		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1664		ifmedia_add(&sc->ifmedia,
1665		    IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1666		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
1667		ifmedia_add(&sc->ifmedia,
1668		    IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
1669		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_T, 0, NULL);
1670		ifmedia_add(&sc->ifmedia,
1671		    IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);
1672	} else {
1673		/* Fiber cards don't support 10/100 modes. */
1674		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1675		ifmedia_add(&sc->ifmedia,
1676		    IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1677	}
1678	ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1679	ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO);
1680
1681	/*
1682	 * Call MI attach routines.
1683	 */
1684	if_attach(ifp);
1685	ether_ifattach(ifp);
1686
1687	return (0);
1688
1689fail_3:
1690	bus_dmamap_destroy(sc->sc_dmatag, sc->ti_ring_map);
1691
1692fail_2:
1693	bus_dmamem_unmap(sc->sc_dmatag, kva,
1694	    sizeof(struct ti_ring_data));
1695
1696fail_1:
1697	bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
1698
1699	return (1);
1700}
1701
1702/*
1703 * Frame reception handling. This is called if there's a frame
1704 * on the receive return list.
1705 *
1706 * Note: we have to be able to handle three possibilities here:
1707 * 1) the frame is from the mini receive ring (can only happen)
1708 *    on Tigon 2 boards)
1709 * 2) the frame is from the jumbo receive ring
1710 * 3) the frame is from the standard receive ring
1711 */
1712
1713void
1714ti_rxeof(struct ti_softc *sc)
1715{
1716	struct ifnet		*ifp;
1717	struct ti_cmd_desc	cmd;
1718
1719	ifp = &sc->arpcom.ac_if;
1720
1721	while(sc->ti_rx_saved_considx != sc->ti_return_prodidx.ti_idx) {
1722		struct ti_rx_desc	*cur_rx;
1723		u_int32_t		rxidx;
1724		struct mbuf		*m = NULL;
1725		bus_dmamap_t		dmamap;
1726
1727		cur_rx =
1728		    &sc->ti_rdata->ti_rx_return_ring[sc->ti_rx_saved_considx];
1729		rxidx = cur_rx->ti_idx;
1730		TI_INC(sc->ti_rx_saved_considx, TI_RETURN_RING_CNT);
1731
1732		if (cur_rx->ti_flags & TI_BDFLAG_JUMBO_RING) {
1733			TI_INC(sc->ti_jumbo, TI_JUMBO_RX_RING_CNT);
1734			m = sc->ti_cdata.ti_rx_jumbo_chain[rxidx];
1735			sc->ti_cdata.ti_rx_jumbo_chain[rxidx] = NULL;
1736			if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
1737				ifp->if_ierrors++;
1738				ti_newbuf_jumbo(sc, sc->ti_jumbo, m);
1739				continue;
1740			}
1741			if (ti_newbuf_jumbo(sc, sc->ti_jumbo, NULL)
1742			    == ENOBUFS) {
1743				struct mbuf             *m0;
1744				m0 = m_devget(mtod(m, char *), cur_rx->ti_len,
1745				    ETHER_ALIGN, ifp);
1746				ti_newbuf_jumbo(sc, sc->ti_jumbo, m);
1747				if (m0 == NULL) {
1748					ifp->if_ierrors++;
1749					continue;
1750				}
1751				m = m0;
1752			}
1753		} else if (cur_rx->ti_flags & TI_BDFLAG_MINI_RING) {
1754			TI_INC(sc->ti_mini, TI_MINI_RX_RING_CNT);
1755			m = sc->ti_cdata.ti_rx_mini_chain[rxidx];
1756			sc->ti_cdata.ti_rx_mini_chain[rxidx] = NULL;
1757			dmamap = sc->ti_cdata.ti_rx_mini_map[rxidx];
1758			sc->ti_cdata.ti_rx_mini_map[rxidx] = 0;
1759			if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
1760				ifp->if_ierrors++;
1761				ti_newbuf_mini(sc, sc->ti_mini, m, dmamap);
1762				continue;
1763			}
1764			if (ti_newbuf_mini(sc, sc->ti_mini, NULL, dmamap)
1765			    == ENOBUFS) {
1766				ifp->if_ierrors++;
1767				ti_newbuf_mini(sc, sc->ti_mini, m, dmamap);
1768				continue;
1769			}
1770		} else {
1771			TI_INC(sc->ti_std, TI_STD_RX_RING_CNT);
1772			m = sc->ti_cdata.ti_rx_std_chain[rxidx];
1773			sc->ti_cdata.ti_rx_std_chain[rxidx] = NULL;
1774			dmamap = sc->ti_cdata.ti_rx_std_map[rxidx];
1775			sc->ti_cdata.ti_rx_std_map[rxidx] = 0;
1776			if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
1777				ifp->if_ierrors++;
1778				ti_newbuf_std(sc, sc->ti_std, m, dmamap);
1779				continue;
1780			}
1781			if (ti_newbuf_std(sc, sc->ti_std, NULL, dmamap)
1782			    == ENOBUFS) {
1783				ifp->if_ierrors++;
1784				ti_newbuf_std(sc, sc->ti_std, m, dmamap);
1785				continue;
1786			}
1787		}
1788
1789		if (m == NULL)
1790			panic("%s: couldn't get mbuf", sc->sc_dv.dv_xname);
1791
1792		m->m_pkthdr.len = m->m_len = cur_rx->ti_len;
1793		ifp->if_ipackets++;
1794		m->m_pkthdr.rcvif = ifp;
1795
1796#if NVLAN > 0
1797		if (cur_rx->ti_flags & TI_BDFLAG_VLAN_TAG) {
1798			m->m_pkthdr.ether_vtag = cur_rx->ti_vlan_tag;
1799			m->m_flags |= M_VLANTAG;
1800		}
1801#endif
1802
1803#if NBPFILTER > 0
1804		/*
1805	 	 * Handle BPF listeners. Let the BPF user see the packet.
1806	 	 */
1807		if (ifp->if_bpf)
1808			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN);
1809#endif
1810
1811		if ((cur_rx->ti_ip_cksum ^ 0xffff) == 0)
1812			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1813
1814		ether_input_mbuf(ifp, m);
1815	}
1816
1817	/* Only necessary on the Tigon 1. */
1818	if (sc->ti_hwrev == TI_HWREV_TIGON)
1819		CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX,
1820		    sc->ti_rx_saved_considx);
1821
1822	TI_UPDATE_STDPROD(sc, sc->ti_std);
1823	TI_UPDATE_MINIPROD(sc, sc->ti_mini);
1824	TI_UPDATE_JUMBOPROD(sc, sc->ti_jumbo);
1825}
1826
1827void
1828ti_txeof_tigon1(struct ti_softc *sc)
1829{
1830	struct ifnet		*ifp;
1831	struct ti_txmap_entry	*entry;
1832	int			active = 1;
1833
1834	ifp = &sc->arpcom.ac_if;
1835
1836	/*
1837	 * Go through our tx ring and free mbufs for those
1838	 * frames that have been sent.
1839	 */
1840	while (sc->ti_tx_saved_considx != sc->ti_tx_considx.ti_idx) {
1841		u_int32_t		idx = 0;
1842		struct ti_tx_desc	txdesc;
1843
1844		idx = sc->ti_tx_saved_considx;
1845		ti_mem_read(sc, TI_TX_RING_BASE + idx * sizeof(txdesc),
1846			    sizeof(txdesc), (caddr_t)&txdesc);
1847
1848		if (txdesc.ti_flags & TI_BDFLAG_END)
1849			ifp->if_opackets++;
1850
1851		if (sc->ti_cdata.ti_tx_chain[idx] != NULL) {
1852			m_freem(sc->ti_cdata.ti_tx_chain[idx]);
1853			sc->ti_cdata.ti_tx_chain[idx] = NULL;
1854
1855			entry = sc->ti_cdata.ti_tx_map[idx];
1856			bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0,
1857			    entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1858
1859			bus_dmamap_unload(sc->sc_dmatag, entry->dmamap);
1860			SLIST_INSERT_HEAD(&sc->ti_tx_map_listhead, entry,
1861			    link);
1862			sc->ti_cdata.ti_tx_map[idx] = NULL;
1863
1864		}
1865		sc->ti_txcnt--;
1866		TI_INC(sc->ti_tx_saved_considx, TI_TX_RING_CNT);
1867		ifp->if_timer = 0;
1868
1869		active = 0;
1870	}
1871
1872	if (!active)
1873		ifp->if_flags &= ~IFF_OACTIVE;
1874}
1875
1876void
1877ti_txeof_tigon2(struct ti_softc *sc)
1878{
1879	struct ti_tx_desc	*cur_tx = NULL;
1880	struct ifnet		*ifp;
1881	struct ti_txmap_entry	*entry;
1882
1883	ifp = &sc->arpcom.ac_if;
1884
1885	/*
1886	 * Go through our tx ring and free mbufs for those
1887	 * frames that have been sent.
1888	 */
1889	while (sc->ti_tx_saved_considx != sc->ti_tx_considx.ti_idx) {
1890		u_int32_t		idx = 0;
1891
1892		idx = sc->ti_tx_saved_considx;
1893		cur_tx = &sc->ti_rdata->ti_tx_ring[idx];
1894
1895		if (cur_tx->ti_flags & TI_BDFLAG_END)
1896			ifp->if_opackets++;
1897		if (sc->ti_cdata.ti_tx_chain[idx] != NULL) {
1898			m_freem(sc->ti_cdata.ti_tx_chain[idx]);
1899			sc->ti_cdata.ti_tx_chain[idx] = NULL;
1900
1901			entry = sc->ti_cdata.ti_tx_map[idx];
1902			bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0,
1903			    entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1904
1905			bus_dmamap_unload(sc->sc_dmatag, entry->dmamap);
1906			SLIST_INSERT_HEAD(&sc->ti_tx_map_listhead, entry,
1907			    link);
1908			sc->ti_cdata.ti_tx_map[idx] = NULL;
1909
1910		}
1911		sc->ti_txcnt--;
1912		TI_INC(sc->ti_tx_saved_considx, TI_TX_RING_CNT);
1913		ifp->if_timer = 0;
1914	}
1915
1916	if (cur_tx != NULL)
1917		ifp->if_flags &= ~IFF_OACTIVE;
1918}
1919
1920int
1921ti_intr(void *xsc)
1922{
1923	struct ti_softc		*sc;
1924	struct ifnet		*ifp;
1925
1926	sc = xsc;
1927	ifp = &sc->arpcom.ac_if;
1928
1929	/* XXX checking this register is expensive. */
1930	/* Make sure this is really our interrupt. */
1931	if (!(CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_INTSTATE))
1932		return (0);
1933
1934	/* Ack interrupt and stop others from occurring. */
1935	CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
1936
1937	if (ifp->if_flags & IFF_RUNNING) {
1938		/* Check RX return ring producer/consumer */
1939		ti_rxeof(sc);
1940
1941		/* Check TX ring producer/consumer */
1942		if (sc->ti_hwrev == TI_HWREV_TIGON)
1943			ti_txeof_tigon1(sc);
1944		else
1945			ti_txeof_tigon2(sc);
1946	}
1947
1948	ti_handle_events(sc);
1949
1950	/* Re-enable interrupts. */
1951	CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0);
1952
1953	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
1954		ti_start(ifp);
1955
1956	return (1);
1957}
1958
1959void
1960ti_stats_update(struct ti_softc *sc)
1961{
1962	struct ifnet		*ifp;
1963	struct ti_stats		*stats = &sc->ti_rdata->ti_info.ti_stats;
1964
1965	ifp = &sc->arpcom.ac_if;
1966
1967	TI_RING_DMASYNC(sc, ti_info.ti_stats, BUS_DMASYNC_POSTREAD);
1968
1969	ifp->if_collisions += stats->dot3StatsSingleCollisionFrames +
1970		stats->dot3StatsMultipleCollisionFrames +
1971		stats->dot3StatsExcessiveCollisions +
1972		stats->dot3StatsLateCollisions -
1973		ifp->if_collisions;
1974
1975	TI_RING_DMASYNC(sc, ti_info.ti_stats, BUS_DMASYNC_PREREAD);
1976}
1977
1978/*
1979 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
1980 * pointers to descriptors.
1981 */
1982int
1983ti_encap_tigon1(struct ti_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
1984{
1985	u_int32_t		frag, cur;
1986	struct ti_txmap_entry	*entry;
1987	bus_dmamap_t		txmap;
1988	struct ti_tx_desc	txdesc;
1989	int			i = 0;
1990
1991	entry = SLIST_FIRST(&sc->ti_tx_map_listhead);
1992	if (entry == NULL)
1993		return (ENOBUFS);
1994	txmap = entry->dmamap;
1995
1996	cur = frag = *txidx;
1997
1998	/*
1999 	 * Start packing the mbufs in this chain into
2000	 * the fragment pointers. Stop when we run out
2001 	 * of fragments or hit the end of the mbuf chain.
2002	 */
2003	if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head,
2004	    BUS_DMA_NOWAIT))
2005		return (ENOBUFS);
2006
2007	/*
2008	 * Sanity check: avoid coming within 16 descriptors
2009	 * of the end of the ring.
2010	 */
2011	if (txmap->dm_nsegs > (TI_TX_RING_CNT - sc->ti_txcnt - 16))
2012		goto fail_unload;
2013
2014	for (i = 0; i < txmap->dm_nsegs; i++) {
2015		if (sc->ti_cdata.ti_tx_chain[frag] != NULL)
2016			break;
2017
2018		memset(&txdesc, 0, sizeof(txdesc));
2019
2020		TI_HOSTADDR(txdesc.ti_addr) = txmap->dm_segs[i].ds_addr;
2021		txdesc.ti_len = txmap->dm_segs[i].ds_len & 0xffff;
2022		txdesc.ti_flags = 0;
2023		txdesc.ti_vlan_tag = 0;
2024
2025#if NVLAN > 0
2026		if (m_head->m_flags & M_VLANTAG) {
2027			txdesc.ti_flags |= TI_BDFLAG_VLAN_TAG;
2028			txdesc.ti_vlan_tag = m_head->m_pkthdr.ether_vtag;
2029		}
2030#endif
2031
2032		ti_mem_write(sc, TI_TX_RING_BASE + frag * sizeof(txdesc),
2033			     sizeof(txdesc), (caddr_t)&txdesc);
2034
2035		cur = frag;
2036		TI_INC(frag, TI_TX_RING_CNT);
2037	}
2038
2039	if (frag == sc->ti_tx_saved_considx)
2040		goto fail_unload;
2041
2042	txdesc.ti_flags |= TI_BDFLAG_END;
2043	ti_mem_write(sc, TI_TX_RING_BASE + cur * sizeof(txdesc),
2044		     sizeof(txdesc), (caddr_t)&txdesc);
2045
2046	bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize,
2047	    BUS_DMASYNC_PREWRITE);
2048
2049	sc->ti_cdata.ti_tx_chain[cur] = m_head;
2050	SLIST_REMOVE_HEAD(&sc->ti_tx_map_listhead, link);
2051	sc->ti_cdata.ti_tx_map[cur] = entry;
2052	sc->ti_txcnt += txmap->dm_nsegs;
2053
2054	*txidx = frag;
2055
2056	return (0);
2057
2058fail_unload:
2059	bus_dmamap_unload(sc->sc_dmatag, txmap);
2060
2061	return (ENOBUFS);
2062}
2063
2064/*
2065 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2066 * pointers to descriptors.
2067 */
2068int
2069ti_encap_tigon2(struct ti_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
2070{
2071	struct ti_tx_desc	*f = NULL;
2072	u_int32_t		frag, cur;
2073	struct ti_txmap_entry	*entry;
2074	bus_dmamap_t		txmap;
2075	int			i = 0;
2076
2077	entry = SLIST_FIRST(&sc->ti_tx_map_listhead);
2078	if (entry == NULL)
2079		return (ENOBUFS);
2080	txmap = entry->dmamap;
2081
2082	cur = frag = *txidx;
2083
2084	/*
2085 	 * Start packing the mbufs in this chain into
2086	 * the fragment pointers. Stop when we run out
2087 	 * of fragments or hit the end of the mbuf chain.
2088	 */
2089	if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head,
2090	    BUS_DMA_NOWAIT))
2091		return (ENOBUFS);
2092
2093	/*
2094	 * Sanity check: avoid coming within 16 descriptors
2095	 * of the end of the ring.
2096	 */
2097	if (txmap->dm_nsegs > (TI_TX_RING_CNT - sc->ti_txcnt - 16))
2098		goto fail_unload;
2099
2100	for (i = 0; i < txmap->dm_nsegs; i++) {
2101		f = &sc->ti_rdata->ti_tx_ring[frag];
2102
2103		if (sc->ti_cdata.ti_tx_chain[frag] != NULL)
2104			break;
2105
2106		TI_HOSTADDR(f->ti_addr) = txmap->dm_segs[i].ds_addr;
2107		f->ti_len = txmap->dm_segs[i].ds_len & 0xffff;
2108		f->ti_flags = 0;
2109		f->ti_vlan_tag = 0;
2110
2111#if NVLAN > 0
2112		if (m_head->m_flags & M_VLANTAG) {
2113			f->ti_flags |= TI_BDFLAG_VLAN_TAG;
2114			f->ti_vlan_tag = m_head->m_pkthdr.ether_vtag;
2115		}
2116#endif
2117
2118		cur = frag;
2119		TI_INC(frag, TI_TX_RING_CNT);
2120	}
2121
2122	if (frag == sc->ti_tx_saved_considx)
2123		goto fail_unload;
2124
2125	sc->ti_rdata->ti_tx_ring[cur].ti_flags |= TI_BDFLAG_END;
2126
2127	bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize,
2128	    BUS_DMASYNC_PREWRITE);
2129
2130	TI_RING_DMASYNC(sc, ti_tx_ring[cur], BUS_DMASYNC_POSTREAD);
2131
2132	sc->ti_cdata.ti_tx_chain[cur] = m_head;
2133	SLIST_REMOVE_HEAD(&sc->ti_tx_map_listhead, link);
2134	sc->ti_cdata.ti_tx_map[cur] = entry;
2135	sc->ti_txcnt += txmap->dm_nsegs;
2136
2137	*txidx = frag;
2138
2139	return (0);
2140
2141fail_unload:
2142	bus_dmamap_unload(sc->sc_dmatag, txmap);
2143
2144	return (ENOBUFS);
2145}
2146
2147/*
2148 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2149 * to the mbuf data regions directly in the transmit descriptors.
2150 */
2151void
2152ti_start(struct ifnet *ifp)
2153{
2154	struct ti_softc		*sc;
2155	struct mbuf		*m_head = NULL;
2156	u_int32_t		prodidx;
2157	int			pkts = 0, error;
2158
2159	sc = ifp->if_softc;
2160
2161	prodidx = sc->ti_tx_saved_prodidx;
2162
2163	while(sc->ti_cdata.ti_tx_chain[prodidx] == NULL) {
2164		IFQ_POLL(&ifp->if_snd, m_head);
2165		if (m_head == NULL)
2166			break;
2167
2168		/*
2169		 * Pack the data into the transmit ring. If we
2170		 * don't have room, set the OACTIVE flag and wait
2171		 * for the NIC to drain the ring.
2172		 */
2173		if (sc->ti_hwrev == TI_HWREV_TIGON)
2174			error = ti_encap_tigon1(sc, m_head, &prodidx);
2175		else
2176			error = ti_encap_tigon2(sc, m_head, &prodidx);
2177
2178		if (error) {
2179			ifp->if_flags |= IFF_OACTIVE;
2180			break;
2181		}
2182
2183		/* now we are committed to transmit the packet */
2184		IFQ_DEQUEUE(&ifp->if_snd, m_head);
2185		pkts++;
2186
2187		/*
2188		 * If there's a BPF listener, bounce a copy of this frame
2189		 * to him.
2190		 */
2191#if NBPFILTER > 0
2192		if (ifp->if_bpf)
2193			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
2194#endif
2195	}
2196	if (pkts == 0)
2197		return;
2198
2199	/* Transmit */
2200	sc->ti_tx_saved_prodidx = prodidx;
2201	CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, prodidx);
2202
2203	/*
2204	 * Set a timeout in case the chip goes out to lunch.
2205	 */
2206	ifp->if_timer = 5;
2207}
2208
2209void
2210ti_init(void *xsc)
2211{
2212	struct ti_softc		*sc = xsc;
2213        int			s;
2214
2215	s = splnet();
2216
2217	/* Cancel pending I/O and flush buffers. */
2218	ti_stop(sc);
2219
2220	/* Init the gen info block, ring control blocks and firmware. */
2221	if (ti_gibinit(sc)) {
2222		printf("%s: initialization failure\n", sc->sc_dv.dv_xname);
2223		splx(s);
2224		return;
2225	}
2226
2227	splx(s);
2228}
2229
2230void
2231ti_init2(struct ti_softc *sc)
2232{
2233	struct ti_cmd_desc	cmd;
2234	struct ifnet		*ifp;
2235	u_int16_t		*m;
2236	struct ifmedia		*ifm;
2237	int			tmp;
2238
2239	ifp = &sc->arpcom.ac_if;
2240
2241	/* Specify MTU and interface index. */
2242	CSR_WRITE_4(sc, TI_GCR_IFINDEX, sc->sc_dv.dv_unit);
2243	CSR_WRITE_4(sc, TI_GCR_IFMTU,
2244		TI_JUMBO_FRAMELEN + ETHER_VLAN_ENCAP_LEN);
2245	TI_DO_CMD(TI_CMD_UPDATE_GENCOM, 0, 0);
2246
2247	/* Load our MAC address. */
2248	m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
2249	CSR_WRITE_4(sc, TI_GCR_PAR0, htons(m[0]));
2250	CSR_WRITE_4(sc, TI_GCR_PAR1, (htons(m[1]) << 16) | htons(m[2]));
2251	TI_DO_CMD(TI_CMD_SET_MAC_ADDR, 0, 0);
2252
2253	/* Program promiscuous mode and multicast filters. */
2254	ti_iff(sc);
2255
2256	/*
2257	 * If this is a Tigon 1, we should tell the
2258	 * firmware to use software packet filtering.
2259	 */
2260	if (sc->ti_hwrev == TI_HWREV_TIGON)
2261		TI_DO_CMD(TI_CMD_FDR_FILTERING, TI_CMD_CODE_FILT_ENB, 0);
2262
2263	/* Init RX ring. */
2264	if (ti_init_rx_ring_std(sc) == ENOBUFS)
2265		panic("not enough mbufs for rx ring");
2266
2267	/* Init jumbo RX ring. */
2268	ti_init_rx_ring_jumbo(sc);
2269
2270	/*
2271	 * If this is a Tigon 2, we can also configure the
2272	 * mini ring.
2273	 */
2274	if (sc->ti_hwrev == TI_HWREV_TIGON_II)
2275		ti_init_rx_ring_mini(sc);
2276
2277	CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 0);
2278	sc->ti_rx_saved_considx = 0;
2279
2280	/* Init TX ring. */
2281	ti_init_tx_ring(sc);
2282
2283	/* Tell firmware we're alive. */
2284	TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_UP, 0);
2285
2286	/* Enable host interrupts. */
2287	CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0);
2288
2289	ifp->if_flags |= IFF_RUNNING;
2290	ifp->if_flags &= ~IFF_OACTIVE;
2291
2292	/*
2293	 * Make sure to set media properly. We have to do this
2294	 * here since we have to issue commands in order to set
2295	 * the link negotiation and we can't issue commands until
2296	 * the firmware is running.
2297	 */
2298	ifm = &sc->ifmedia;
2299	tmp = ifm->ifm_media;
2300	ifm->ifm_media = ifm->ifm_cur->ifm_media;
2301	ti_ifmedia_upd(ifp);
2302	ifm->ifm_media = tmp;
2303}
2304
2305/*
2306 * Set media options.
2307 */
2308int
2309ti_ifmedia_upd(struct ifnet *ifp)
2310{
2311	struct ti_softc		*sc;
2312	struct ifmedia		*ifm;
2313	struct ti_cmd_desc	cmd;
2314
2315	sc = ifp->if_softc;
2316	ifm = &sc->ifmedia;
2317
2318	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2319		return(EINVAL);
2320
2321	switch(IFM_SUBTYPE(ifm->ifm_media)) {
2322	case IFM_AUTO:
2323		CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB|
2324		    TI_GLNK_FULL_DUPLEX|TI_GLNK_RX_FLOWCTL_Y|
2325		    TI_GLNK_AUTONEGENB|TI_GLNK_ENB);
2326		CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_100MB|TI_LNK_10MB|
2327		    TI_LNK_FULL_DUPLEX|TI_LNK_HALF_DUPLEX|
2328		    TI_LNK_AUTONEGENB|TI_LNK_ENB);
2329		TI_DO_CMD(TI_CMD_LINK_NEGOTIATION,
2330		    TI_CMD_CODE_NEGOTIATE_BOTH, 0);
2331		break;
2332	case IFM_1000_SX:
2333	case IFM_1000_T:
2334		CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB|
2335		    TI_GLNK_RX_FLOWCTL_Y|TI_GLNK_ENB);
2336		CSR_WRITE_4(sc, TI_GCR_LINK, 0);
2337		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2338			TI_SETBIT(sc, TI_GCR_GLINK, TI_GLNK_FULL_DUPLEX);
2339		}
2340		TI_DO_CMD(TI_CMD_LINK_NEGOTIATION,
2341		    TI_CMD_CODE_NEGOTIATE_GIGABIT, 0);
2342		break;
2343	case IFM_100_FX:
2344	case IFM_10_FL:
2345	case IFM_100_TX:
2346	case IFM_10_T:
2347		CSR_WRITE_4(sc, TI_GCR_GLINK, 0);
2348		CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_ENB|TI_LNK_PREF);
2349		if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX ||
2350		    IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
2351			TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_100MB);
2352		} else {
2353			TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_10MB);
2354		}
2355		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2356			TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_FULL_DUPLEX);
2357		} else {
2358			TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_HALF_DUPLEX);
2359		}
2360		TI_DO_CMD(TI_CMD_LINK_NEGOTIATION,
2361		    TI_CMD_CODE_NEGOTIATE_10_100, 0);
2362		break;
2363	}
2364
2365	return (0);
2366}
2367
2368/*
2369 * Report current media status.
2370 */
2371void
2372ti_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2373{
2374	struct ti_softc		*sc;
2375	u_int32_t		media = 0;
2376
2377	sc = ifp->if_softc;
2378
2379	ifmr->ifm_status = IFM_AVALID;
2380	ifmr->ifm_active = IFM_ETHER;
2381
2382	if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) {
2383		ifmr->ifm_active |= IFM_NONE;
2384		return;
2385	}
2386
2387	ifmr->ifm_status |= IFM_ACTIVE;
2388
2389	if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) {
2390		media = CSR_READ_4(sc, TI_GCR_GLINK_STAT);
2391		if (sc->ti_copper)
2392			ifmr->ifm_active |= IFM_1000_T;
2393		else
2394			ifmr->ifm_active |= IFM_1000_SX;
2395		if (media & TI_GLNK_FULL_DUPLEX)
2396			ifmr->ifm_active |= IFM_FDX;
2397		else
2398			ifmr->ifm_active |= IFM_HDX;
2399	} else if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) {
2400		media = CSR_READ_4(sc, TI_GCR_LINK_STAT);
2401		if (sc->ti_copper) {
2402			if (media & TI_LNK_100MB)
2403				ifmr->ifm_active |= IFM_100_TX;
2404			if (media & TI_LNK_10MB)
2405				ifmr->ifm_active |= IFM_10_T;
2406		} else {
2407			if (media & TI_LNK_100MB)
2408				ifmr->ifm_active |= IFM_100_FX;
2409			if (media & TI_LNK_10MB)
2410				ifmr->ifm_active |= IFM_10_FL;
2411		}
2412		if (media & TI_LNK_FULL_DUPLEX)
2413			ifmr->ifm_active |= IFM_FDX;
2414		if (media & TI_LNK_HALF_DUPLEX)
2415			ifmr->ifm_active |= IFM_HDX;
2416	}
2417}
2418
2419int
2420ti_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2421{
2422	struct ti_softc		*sc = ifp->if_softc;
2423	struct ifaddr		*ifa = (struct ifaddr *)data;
2424	struct ifreq		*ifr = (struct ifreq *)data;
2425	int			s, error = 0;
2426
2427	s = splnet();
2428
2429	switch(command) {
2430	case SIOCSIFADDR:
2431		ifp->if_flags |= IFF_UP;
2432		if ((ifp->if_flags & IFF_RUNNING) == 0)
2433			ti_init(sc);
2434#ifdef INET
2435		if (ifa->ifa_addr->sa_family == AF_INET)
2436			arp_ifinit(&sc->arpcom, ifa);
2437#endif
2438		break;
2439
2440	case SIOCSIFFLAGS:
2441		if (ifp->if_flags & IFF_UP) {
2442			if (ifp->if_flags & IFF_RUNNING)
2443				error = ENETRESET;
2444			else
2445				ti_init(sc);
2446		} else {
2447			if (ifp->if_flags & IFF_RUNNING)
2448				ti_stop(sc);
2449		}
2450		break;
2451
2452	case SIOCSIFMEDIA:
2453	case SIOCGIFMEDIA:
2454		error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
2455		break;
2456
2457	default:
2458		error = ether_ioctl(ifp, &sc->arpcom, command, data);
2459	}
2460
2461	if (error == ENETRESET) {
2462		if (ifp->if_flags & IFF_RUNNING)
2463			ti_iff(sc);
2464		error = 0;
2465	}
2466
2467	splx(s);
2468	return (error);
2469}
2470
2471void
2472ti_watchdog(struct ifnet *ifp)
2473{
2474	struct ti_softc		*sc;
2475
2476	sc = ifp->if_softc;
2477
2478	printf("%s: watchdog timeout -- resetting\n", sc->sc_dv.dv_xname);
2479	ti_stop(sc);
2480	ti_init(sc);
2481
2482	ifp->if_oerrors++;
2483}
2484
2485/*
2486 * Stop the adapter and free any mbufs allocated to the
2487 * RX and TX lists.
2488 */
2489void
2490ti_stop(struct ti_softc *sc)
2491{
2492	struct ifnet		*ifp;
2493	struct ti_cmd_desc	cmd;
2494
2495	ifp = &sc->arpcom.ac_if;
2496
2497	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2498
2499	/* Disable host interrupts. */
2500	CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
2501	/*
2502	 * Tell firmware we're shutting down.
2503	 */
2504	TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_DOWN, 0);
2505
2506	/* Halt and reinitialize. */
2507	ti_chipinit(sc);
2508	ti_mem_set(sc, 0x2000, 0x100000 - 0x2000);
2509	ti_chipinit(sc);
2510
2511	/* Free the RX lists. */
2512	ti_free_rx_ring_std(sc);
2513
2514	/* Free jumbo RX list. */
2515	ti_free_rx_ring_jumbo(sc);
2516
2517	/* Free mini RX list. */
2518	ti_free_rx_ring_mini(sc);
2519
2520	/* Free TX buffers. */
2521	ti_free_tx_ring(sc);
2522
2523	sc->ti_ev_prodidx.ti_idx = 0;
2524	sc->ti_return_prodidx.ti_idx = 0;
2525	sc->ti_tx_considx.ti_idx = 0;
2526	sc->ti_tx_saved_considx = TI_TXCONS_UNSET;
2527}
2528