if_ti.c revision 227505
1/*-
2 * Copyright (c) 1997, 1998, 1999
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33/*
34 * Alteon Networks Tigon PCI gigabit ethernet driver for FreeBSD.
35 * Manuals, sample driver and firmware source kits are available
36 * from http://www.alteon.com/support/openkits.
37 *
38 * Written by Bill Paul <wpaul@ctr.columbia.edu>
39 * Electrical Engineering Department
40 * Columbia University, New York City
41 */
42
43/*
44 * The Alteon Networks Tigon chip contains an embedded R4000 CPU,
45 * gigabit MAC, dual DMA channels and a PCI interface unit. NICs
46 * using the Tigon may have anywhere from 512K to 2MB of SRAM. The
47 * Tigon supports hardware IP, TCP and UCP checksumming, multicast
48 * filtering and jumbo (9014 byte) frames. The hardware is largely
49 * controlled by firmware, which must be loaded into the NIC during
50 * initialization.
51 *
52 * The Tigon 2 contains 2 R4000 CPUs and requires a newer firmware
53 * revision, which supports new features such as extended commands,
54 * extended jumbo receive ring desciptors and a mini receive ring.
55 *
56 * Alteon Networks is to be commended for releasing such a vast amount
57 * of development material for the Tigon NIC without requiring an NDA
58 * (although they really should have done it a long time ago). With
59 * any luck, the other vendors will finally wise up and follow Alteon's
60 * stellar example.
61 *
62 * The firmware for the Tigon 1 and 2 NICs is compiled directly into
63 * this driver by #including it as a C header file. This bloats the
64 * driver somewhat, but it's the easiest method considering that the
65 * driver code and firmware code need to be kept in sync. The source
66 * for the firmware is not provided with the FreeBSD distribution since
67 * compiling it requires a GNU toolchain targeted for mips-sgi-irix5.3.
68 *
69 * The following people deserve special thanks:
70 * - Terry Murphy of 3Com, for providing a 3c985 Tigon 1 board
71 *   for testing
72 * - Raymond Lee of Netgear, for providing a pair of Netgear
73 *   GA620 Tigon 2 boards for testing
74 * - Ulf Zimmermann, for bringing the GA260 to my attention and
75 *   convincing me to write this driver.
76 * - Andrew Gallatin for providing FreeBSD/Alpha support.
77 */
78
79#include <sys/cdefs.h>
80__FBSDID("$FreeBSD: head/sys/dev/ti/if_ti.c 227505 2011-11-14 18:40:04Z yongari $");
81
82#include "opt_ti.h"
83
84#include <sys/param.h>
85#include <sys/systm.h>
86#include <sys/sockio.h>
87#include <sys/mbuf.h>
88#include <sys/malloc.h>
89#include <sys/kernel.h>
90#include <sys/module.h>
91#include <sys/socket.h>
92#include <sys/queue.h>
93#include <sys/conf.h>
94#include <sys/sf_buf.h>
95
96#include <net/if.h>
97#include <net/if_arp.h>
98#include <net/ethernet.h>
99#include <net/if_dl.h>
100#include <net/if_media.h>
101#include <net/if_types.h>
102#include <net/if_vlan_var.h>
103
104#include <net/bpf.h>
105
106#include <netinet/in_systm.h>
107#include <netinet/in.h>
108#include <netinet/ip.h>
109
110#include <machine/bus.h>
111#include <machine/resource.h>
112#include <sys/bus.h>
113#include <sys/rman.h>
114
115#ifdef TI_SF_BUF_JUMBO
116#include <vm/vm.h>
117#include <vm/vm_page.h>
118#endif
119
120#include <dev/pci/pcireg.h>
121#include <dev/pci/pcivar.h>
122
123#include <sys/tiio.h>
124#include <dev/ti/if_tireg.h>
125#include <dev/ti/ti_fw.h>
126#include <dev/ti/ti_fw2.h>
127
128#define TI_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_IP_FRAGS)
129/*
130 * We can only turn on header splitting if we're using extended receive
131 * BDs.
132 */
133#if defined(TI_JUMBO_HDRSPLIT) && !defined(TI_SF_BUF_JUMBO)
134#error "options TI_JUMBO_HDRSPLIT requires TI_SF_BUF_JUMBO"
135#endif /* TI_JUMBO_HDRSPLIT && !TI_SF_BUF_JUMBO */
136
137typedef enum {
138	TI_SWAP_HTON,
139	TI_SWAP_NTOH
140} ti_swap_type;
141
142/*
143 * Various supported device vendors/types and their names.
144 */
145
146static const struct ti_type const ti_devs[] = {
147	{ ALT_VENDORID,	ALT_DEVICEID_ACENIC,
148		"Alteon AceNIC 1000baseSX Gigabit Ethernet" },
149	{ ALT_VENDORID,	ALT_DEVICEID_ACENIC_COPPER,
150		"Alteon AceNIC 1000baseT Gigabit Ethernet" },
151	{ TC_VENDORID,	TC_DEVICEID_3C985,
152		"3Com 3c985-SX Gigabit Ethernet" },
153	{ NG_VENDORID, NG_DEVICEID_GA620,
154		"Netgear GA620 1000baseSX Gigabit Ethernet" },
155	{ NG_VENDORID, NG_DEVICEID_GA620T,
156		"Netgear GA620 1000baseT Gigabit Ethernet" },
157	{ SGI_VENDORID, SGI_DEVICEID_TIGON,
158		"Silicon Graphics Gigabit Ethernet" },
159	{ DEC_VENDORID, DEC_DEVICEID_FARALLON_PN9000SX,
160		"Farallon PN9000SX Gigabit Ethernet" },
161	{ 0, 0, NULL }
162};
163
164
165static	d_open_t	ti_open;
166static	d_close_t	ti_close;
167static	d_ioctl_t	ti_ioctl2;
168
169static struct cdevsw ti_cdevsw = {
170	.d_version =	D_VERSION,
171	.d_flags =	0,
172	.d_open =	ti_open,
173	.d_close =	ti_close,
174	.d_ioctl =	ti_ioctl2,
175	.d_name =	"ti",
176};
177
178static int ti_probe(device_t);
179static int ti_attach(device_t);
180static int ti_detach(device_t);
181static void ti_txeof(struct ti_softc *);
182static void ti_rxeof(struct ti_softc *);
183
184static void ti_stats_update(struct ti_softc *);
185static int ti_encap(struct ti_softc *, struct mbuf **);
186
187static void ti_intr(void *);
188static void ti_start(struct ifnet *);
189static void ti_start_locked(struct ifnet *);
190static int ti_ioctl(struct ifnet *, u_long, caddr_t);
191static void ti_init(void *);
192static void ti_init_locked(void *);
193static void ti_init2(struct ti_softc *);
194static void ti_stop(struct ti_softc *);
195static void ti_watchdog(void *);
196static int ti_shutdown(device_t);
197static int ti_ifmedia_upd(struct ifnet *);
198static int ti_ifmedia_upd_locked(struct ti_softc *);
199static void ti_ifmedia_sts(struct ifnet *, struct ifmediareq *);
200
201static uint32_t ti_eeprom_putbyte(struct ti_softc *, int);
202static uint8_t	ti_eeprom_getbyte(struct ti_softc *, int, uint8_t *);
203static int ti_read_eeprom(struct ti_softc *, caddr_t, int, int);
204
205static void ti_add_mcast(struct ti_softc *, struct ether_addr *);
206static void ti_del_mcast(struct ti_softc *, struct ether_addr *);
207static void ti_setmulti(struct ti_softc *);
208
209static void ti_mem_read(struct ti_softc *, uint32_t, uint32_t, void *);
210static void ti_mem_write(struct ti_softc *, uint32_t, uint32_t, void *);
211static void ti_mem_zero(struct ti_softc *, uint32_t, uint32_t);
212static int ti_copy_mem(struct ti_softc *, uint32_t, uint32_t, caddr_t, int,
213    int);
214static int ti_copy_scratch(struct ti_softc *, uint32_t, uint32_t, caddr_t,
215    int, int, int);
216static int ti_bcopy_swap(const void *, void *, size_t, ti_swap_type);
217static void ti_loadfw(struct ti_softc *);
218static void ti_cmd(struct ti_softc *, struct ti_cmd_desc *);
219static void ti_cmd_ext(struct ti_softc *, struct ti_cmd_desc *, caddr_t, int);
220static void ti_handle_events(struct ti_softc *);
221static int ti_alloc_dmamaps(struct ti_softc *);
222static void ti_free_dmamaps(struct ti_softc *);
223static int ti_alloc_jumbo_mem(struct ti_softc *);
224static int ti_newbuf_std(struct ti_softc *, int);
225static int ti_newbuf_mini(struct ti_softc *, int);
226static int ti_newbuf_jumbo(struct ti_softc *, int, struct mbuf *);
227static int ti_init_rx_ring_std(struct ti_softc *);
228static void ti_free_rx_ring_std(struct ti_softc *);
229static int ti_init_rx_ring_jumbo(struct ti_softc *);
230static void ti_free_rx_ring_jumbo(struct ti_softc *);
231static int ti_init_rx_ring_mini(struct ti_softc *);
232static void ti_free_rx_ring_mini(struct ti_softc *);
233static void ti_free_tx_ring(struct ti_softc *);
234static int ti_init_tx_ring(struct ti_softc *);
235static void ti_discard_std(struct ti_softc *, int);
236#ifndef TI_SF_BUF_JUMBO
237static void ti_discard_jumbo(struct ti_softc *, int);
238#endif
239static void ti_discard_mini(struct ti_softc *, int);
240
241static int ti_64bitslot_war(struct ti_softc *);
242static int ti_chipinit(struct ti_softc *);
243static int ti_gibinit(struct ti_softc *);
244
245#ifdef TI_JUMBO_HDRSPLIT
246static __inline void ti_hdr_split(struct mbuf *top, int hdr_len, int pkt_len,
247    int idx);
248#endif /* TI_JUMBO_HDRSPLIT */
249
250static device_method_t ti_methods[] = {
251	/* Device interface */
252	DEVMETHOD(device_probe,		ti_probe),
253	DEVMETHOD(device_attach,	ti_attach),
254	DEVMETHOD(device_detach,	ti_detach),
255	DEVMETHOD(device_shutdown,	ti_shutdown),
256	{ 0, 0 }
257};
258
259static driver_t ti_driver = {
260	"ti",
261	ti_methods,
262	sizeof(struct ti_softc)
263};
264
265static devclass_t ti_devclass;
266
267DRIVER_MODULE(ti, pci, ti_driver, ti_devclass, 0, 0);
268MODULE_DEPEND(ti, pci, 1, 1, 1);
269MODULE_DEPEND(ti, ether, 1, 1, 1);
270
271/*
272 * Send an instruction or address to the EEPROM, check for ACK.
273 */
274static uint32_t
275ti_eeprom_putbyte(struct ti_softc *sc, int byte)
276{
277	int i, ack = 0;
278
279	/*
280	 * Make sure we're in TX mode.
281	 */
282	TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN);
283
284	/*
285	 * Feed in each bit and stobe the clock.
286	 */
287	for (i = 0x80; i; i >>= 1) {
288		if (byte & i) {
289			TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT);
290		} else {
291			TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT);
292		}
293		DELAY(1);
294		TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
295		DELAY(1);
296		TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
297	}
298
299	/*
300	 * Turn off TX mode.
301	 */
302	TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN);
303
304	/*
305	 * Check for ack.
306	 */
307	TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
308	ack = CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN;
309	TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
310
311	return (ack);
312}
313
314/*
315 * Read a byte of data stored in the EEPROM at address 'addr.'
316 * We have to send two address bytes since the EEPROM can hold
317 * more than 256 bytes of data.
318 */
319static uint8_t
320ti_eeprom_getbyte(struct ti_softc *sc, int addr, uint8_t *dest)
321{
322	int i;
323	uint8_t byte = 0;
324
325	EEPROM_START;
326
327	/*
328	 * Send write control code to EEPROM.
329	 */
330	if (ti_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) {
331		device_printf(sc->ti_dev,
332		    "failed to send write command, status: %x\n",
333		    CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
334		return (1);
335	}
336
337	/*
338	 * Send first byte of address of byte we want to read.
339	 */
340	if (ti_eeprom_putbyte(sc, (addr >> 8) & 0xFF)) {
341		device_printf(sc->ti_dev, "failed to send address, status: %x\n",
342		    CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
343		return (1);
344	}
345	/*
346	 * Send second byte address of byte we want to read.
347	 */
348	if (ti_eeprom_putbyte(sc, addr & 0xFF)) {
349		device_printf(sc->ti_dev, "failed to send address, status: %x\n",
350		    CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
351		return (1);
352	}
353
354	EEPROM_STOP;
355	EEPROM_START;
356	/*
357	 * Send read control code to EEPROM.
358	 */
359	if (ti_eeprom_putbyte(sc, EEPROM_CTL_READ)) {
360		device_printf(sc->ti_dev,
361		    "failed to send read command, status: %x\n",
362		    CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
363		return (1);
364	}
365
366	/*
367	 * Start reading bits from EEPROM.
368	 */
369	TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN);
370	for (i = 0x80; i; i >>= 1) {
371		TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
372		DELAY(1);
373		if (CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN)
374			byte |= i;
375		TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
376		DELAY(1);
377	}
378
379	EEPROM_STOP;
380
381	/*
382	 * No ACK generated for read, so just return byte.
383	 */
384
385	*dest = byte;
386
387	return (0);
388}
389
390/*
391 * Read a sequence of bytes from the EEPROM.
392 */
393static int
394ti_read_eeprom(struct ti_softc *sc, caddr_t dest, int off, int cnt)
395{
396	int err = 0, i;
397	uint8_t byte = 0;
398
399	for (i = 0; i < cnt; i++) {
400		err = ti_eeprom_getbyte(sc, off + i, &byte);
401		if (err)
402			break;
403		*(dest + i) = byte;
404	}
405
406	return (err ? 1 : 0);
407}
408
409/*
410 * NIC memory read function.
411 * Can be used to copy data from NIC local memory.
412 */
413static void
414ti_mem_read(struct ti_softc *sc, uint32_t addr, uint32_t len, void *buf)
415{
416	int segptr, segsize, cnt;
417	char *ptr;
418
419	segptr = addr;
420	cnt = len;
421	ptr = buf;
422
423	while (cnt) {
424		if (cnt < TI_WINLEN)
425			segsize = cnt;
426		else
427			segsize = TI_WINLEN - (segptr % TI_WINLEN);
428		CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
429		bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle,
430		    TI_WINDOW + (segptr & (TI_WINLEN - 1)), (uint32_t *)ptr,
431		    segsize / 4);
432		ptr += segsize;
433		segptr += segsize;
434		cnt -= segsize;
435	}
436}
437
438
439/*
440 * NIC memory write function.
441 * Can be used to copy data into NIC local memory.
442 */
443static void
444ti_mem_write(struct ti_softc *sc, uint32_t addr, uint32_t len, void *buf)
445{
446	int segptr, segsize, cnt;
447	char *ptr;
448
449	segptr = addr;
450	cnt = len;
451	ptr = buf;
452
453	while (cnt) {
454		if (cnt < TI_WINLEN)
455			segsize = cnt;
456		else
457			segsize = TI_WINLEN - (segptr % TI_WINLEN);
458		CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
459		bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle,
460		    TI_WINDOW + (segptr & (TI_WINLEN - 1)), (uint32_t *)ptr,
461		    segsize / 4);
462		ptr += segsize;
463		segptr += segsize;
464		cnt -= segsize;
465	}
466}
467
468/*
469 * NIC memory read function.
470 * Can be used to clear a section of NIC local memory.
471 */
472static void
473ti_mem_zero(struct ti_softc *sc, uint32_t addr, uint32_t len)
474{
475	int segptr, segsize, cnt;
476
477	segptr = addr;
478	cnt = len;
479
480	while (cnt) {
481		if (cnt < TI_WINLEN)
482			segsize = cnt;
483		else
484			segsize = TI_WINLEN - (segptr % TI_WINLEN);
485		CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
486		bus_space_set_region_4(sc->ti_btag, sc->ti_bhandle,
487		    TI_WINDOW + (segptr & (TI_WINLEN - 1)), 0, segsize / 4);
488		segptr += segsize;
489		cnt -= segsize;
490	}
491}
492
493static int
494ti_copy_mem(struct ti_softc *sc, uint32_t tigon_addr, uint32_t len,
495    caddr_t buf, int useraddr, int readdata)
496{
497	int segptr, segsize, cnt;
498	caddr_t ptr;
499	uint32_t origwin;
500	int resid, segresid;
501	int first_pass;
502
503	TI_LOCK_ASSERT(sc);
504
505	/*
506	 * At the moment, we don't handle non-aligned cases, we just bail.
507	 * If this proves to be a problem, it will be fixed.
508	 */
509	if (readdata == 0 && (tigon_addr & 0x3) != 0) {
510		device_printf(sc->ti_dev, "%s: tigon address %#x isn't "
511		    "word-aligned\n", __func__, tigon_addr);
512		device_printf(sc->ti_dev, "%s: unaligned writes aren't "
513		    "yet supported\n", __func__);
514		return (EINVAL);
515	}
516
517	segptr = tigon_addr & ~0x3;
518	segresid = tigon_addr - segptr;
519
520	/*
521	 * This is the non-aligned amount left over that we'll need to
522	 * copy.
523	 */
524	resid = len & 0x3;
525
526	/* Add in the left over amount at the front of the buffer */
527	resid += segresid;
528
529	cnt = len & ~0x3;
530	/*
531	 * If resid + segresid is >= 4, add multiples of 4 to the count and
532	 * decrease the residual by that much.
533	 */
534	cnt += resid & ~0x3;
535	resid -= resid & ~0x3;
536
537	ptr = buf;
538
539	first_pass = 1;
540
541	/*
542	 * Save the old window base value.
543	 */
544	origwin = CSR_READ_4(sc, TI_WINBASE);
545
546	while (cnt) {
547		bus_size_t ti_offset;
548
549		if (cnt < TI_WINLEN)
550			segsize = cnt;
551		else
552			segsize = TI_WINLEN - (segptr % TI_WINLEN);
553		CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
554
555		ti_offset = TI_WINDOW + (segptr & (TI_WINLEN -1));
556
557		if (readdata) {
558			bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle,
559			    ti_offset, (uint32_t *)sc->ti_membuf, segsize >> 2);
560			if (useraddr) {
561				/*
562				 * Yeah, this is a little on the kludgy
563				 * side, but at least this code is only
564				 * used for debugging.
565				 */
566				ti_bcopy_swap(sc->ti_membuf, sc->ti_membuf2,
567				    segsize, TI_SWAP_NTOH);
568
569				TI_UNLOCK(sc);
570				if (first_pass) {
571					copyout(&sc->ti_membuf2[segresid], ptr,
572					    segsize - segresid);
573					first_pass = 0;
574				} else
575					copyout(sc->ti_membuf2, ptr, segsize);
576				TI_LOCK(sc);
577			} else {
578				if (first_pass) {
579
580					ti_bcopy_swap(sc->ti_membuf,
581					    sc->ti_membuf2, segsize,
582					    TI_SWAP_NTOH);
583					TI_UNLOCK(sc);
584					bcopy(&sc->ti_membuf2[segresid], ptr,
585					    segsize - segresid);
586					TI_LOCK(sc);
587					first_pass = 0;
588				} else
589					ti_bcopy_swap(sc->ti_membuf, ptr,
590					    segsize, TI_SWAP_NTOH);
591			}
592
593		} else {
594			if (useraddr) {
595				TI_UNLOCK(sc);
596				copyin(ptr, sc->ti_membuf2, segsize);
597				TI_LOCK(sc);
598				ti_bcopy_swap(sc->ti_membuf2, sc->ti_membuf,
599				    segsize, TI_SWAP_HTON);
600			} else
601				ti_bcopy_swap(ptr, sc->ti_membuf, segsize,
602				    TI_SWAP_HTON);
603
604			bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle,
605			    ti_offset, (uint32_t *)sc->ti_membuf, segsize >> 2);
606		}
607		segptr += segsize;
608		ptr += segsize;
609		cnt -= segsize;
610	}
611
612	/*
613	 * Handle leftover, non-word-aligned bytes.
614	 */
615	if (resid != 0) {
616		uint32_t tmpval, tmpval2;
617		bus_size_t ti_offset;
618
619		/*
620		 * Set the segment pointer.
621		 */
622		CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
623
624		ti_offset = TI_WINDOW + (segptr & (TI_WINLEN - 1));
625
626		/*
627		 * First, grab whatever is in our source/destination.
628		 * We'll obviously need this for reads, but also for
629		 * writes, since we'll be doing read/modify/write.
630		 */
631		bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle,
632		    ti_offset, &tmpval, 1);
633
634		/*
635		 * Next, translate this from little-endian to big-endian
636		 * (at least on i386 boxes).
637		 */
638		tmpval2 = ntohl(tmpval);
639
640		if (readdata) {
641			/*
642			 * If we're reading, just copy the leftover number
643			 * of bytes from the host byte order buffer to
644			 * the user's buffer.
645			 */
646			if (useraddr) {
647				TI_UNLOCK(sc);
648				copyout(&tmpval2, ptr, resid);
649				TI_LOCK(sc);
650			} else
651				bcopy(&tmpval2, ptr, resid);
652		} else {
653			/*
654			 * If we're writing, first copy the bytes to be
655			 * written into the network byte order buffer,
656			 * leaving the rest of the buffer with whatever was
657			 * originally in there.  Then, swap the bytes
658			 * around into host order and write them out.
659			 *
660			 * XXX KDM the read side of this has been verified
661			 * to work, but the write side of it has not been
662			 * verified.  So user beware.
663			 */
664			if (useraddr) {
665				TI_UNLOCK(sc);
666				copyin(ptr, &tmpval2, resid);
667				TI_LOCK(sc);
668			} else
669				bcopy(ptr, &tmpval2, resid);
670
671			tmpval = htonl(tmpval2);
672
673			bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle,
674			    ti_offset, &tmpval, 1);
675		}
676	}
677
678	CSR_WRITE_4(sc, TI_WINBASE, origwin);
679
680	return (0);
681}
682
683static int
684ti_copy_scratch(struct ti_softc *sc, uint32_t tigon_addr, uint32_t len,
685    caddr_t buf, int useraddr, int readdata, int cpu)
686{
687	uint32_t segptr;
688	int cnt;
689	uint32_t tmpval, tmpval2;
690	caddr_t ptr;
691
692	TI_LOCK_ASSERT(sc);
693
694	/*
695	 * At the moment, we don't handle non-aligned cases, we just bail.
696	 * If this proves to be a problem, it will be fixed.
697	 */
698	if (tigon_addr & 0x3) {
699		device_printf(sc->ti_dev, "%s: tigon address %#x "
700		    "isn't word-aligned\n", __func__, tigon_addr);
701		return (EINVAL);
702	}
703
704	if (len & 0x3) {
705		device_printf(sc->ti_dev, "%s: transfer length %d "
706		    "isn't word-aligned\n", __func__, len);
707		return (EINVAL);
708	}
709
710	segptr = tigon_addr;
711	cnt = len;
712	ptr = buf;
713
714	while (cnt) {
715		CSR_WRITE_4(sc, CPU_REG(TI_SRAM_ADDR, cpu), segptr);
716
717		if (readdata) {
718			tmpval2 = CSR_READ_4(sc, CPU_REG(TI_SRAM_DATA, cpu));
719
720			tmpval = ntohl(tmpval2);
721
722			/*
723			 * Note:  I've used this debugging interface
724			 * extensively with Alteon's 12.3.15 firmware,
725			 * compiled with GCC 2.7.2.1 and binutils 2.9.1.
726			 *
727			 * When you compile the firmware without
728			 * optimization, which is necessary sometimes in
729			 * order to properly step through it, you sometimes
730			 * read out a bogus value of 0xc0017c instead of
731			 * whatever was supposed to be in that scratchpad
732			 * location.  That value is on the stack somewhere,
733			 * but I've never been able to figure out what was
734			 * causing the problem.
735			 *
736			 * The address seems to pop up in random places,
737			 * often not in the same place on two subsequent
738			 * reads.
739			 *
740			 * In any case, the underlying data doesn't seem
741			 * to be affected, just the value read out.
742			 *
743			 * KDM, 3/7/2000
744			 */
745
746			if (tmpval2 == 0xc0017c)
747				device_printf(sc->ti_dev, "found 0xc0017c at "
748				    "%#x (tmpval2)\n", segptr);
749
750			if (tmpval == 0xc0017c)
751				device_printf(sc->ti_dev, "found 0xc0017c at "
752				    "%#x (tmpval)\n", segptr);
753
754			if (useraddr)
755				copyout(&tmpval, ptr, 4);
756			else
757				bcopy(&tmpval, ptr, 4);
758		} else {
759			if (useraddr)
760				copyin(ptr, &tmpval2, 4);
761			else
762				bcopy(ptr, &tmpval2, 4);
763
764			tmpval = htonl(tmpval2);
765
766			CSR_WRITE_4(sc, CPU_REG(TI_SRAM_DATA, cpu), tmpval);
767		}
768
769		cnt -= 4;
770		segptr += 4;
771		ptr += 4;
772	}
773
774	return (0);
775}
776
777static int
778ti_bcopy_swap(const void *src, void *dst, size_t len, ti_swap_type swap_type)
779{
780	const uint8_t *tmpsrc;
781	uint8_t *tmpdst;
782	size_t tmplen;
783
784	if (len & 0x3) {
785		printf("ti_bcopy_swap: length %zd isn't 32-bit aligned\n", len);
786		return (-1);
787	}
788
789	tmpsrc = src;
790	tmpdst = dst;
791	tmplen = len;
792
793	while (tmplen) {
794		if (swap_type == TI_SWAP_NTOH)
795			*(uint32_t *)tmpdst = ntohl(*(const uint32_t *)tmpsrc);
796		else
797			*(uint32_t *)tmpdst = htonl(*(const uint32_t *)tmpsrc);
798		tmpsrc += 4;
799		tmpdst += 4;
800		tmplen -= 4;
801	}
802
803	return (0);
804}
805
806/*
807 * Load firmware image into the NIC. Check that the firmware revision
808 * is acceptable and see if we want the firmware for the Tigon 1 or
809 * Tigon 2.
810 */
811static void
812ti_loadfw(struct ti_softc *sc)
813{
814
815	TI_LOCK_ASSERT(sc);
816
817	switch (sc->ti_hwrev) {
818	case TI_HWREV_TIGON:
819		if (tigonFwReleaseMajor != TI_FIRMWARE_MAJOR ||
820		    tigonFwReleaseMinor != TI_FIRMWARE_MINOR ||
821		    tigonFwReleaseFix != TI_FIRMWARE_FIX) {
822			device_printf(sc->ti_dev, "firmware revision mismatch; "
823			    "want %d.%d.%d, got %d.%d.%d\n",
824			    TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR,
825			    TI_FIRMWARE_FIX, tigonFwReleaseMajor,
826			    tigonFwReleaseMinor, tigonFwReleaseFix);
827			return;
828		}
829		ti_mem_write(sc, tigonFwTextAddr, tigonFwTextLen, tigonFwText);
830		ti_mem_write(sc, tigonFwDataAddr, tigonFwDataLen, tigonFwData);
831		ti_mem_write(sc, tigonFwRodataAddr, tigonFwRodataLen,
832		    tigonFwRodata);
833		ti_mem_zero(sc, tigonFwBssAddr, tigonFwBssLen);
834		ti_mem_zero(sc, tigonFwSbssAddr, tigonFwSbssLen);
835		CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigonFwStartAddr);
836		break;
837	case TI_HWREV_TIGON_II:
838		if (tigon2FwReleaseMajor != TI_FIRMWARE_MAJOR ||
839		    tigon2FwReleaseMinor != TI_FIRMWARE_MINOR ||
840		    tigon2FwReleaseFix != TI_FIRMWARE_FIX) {
841			device_printf(sc->ti_dev, "firmware revision mismatch; "
842			    "want %d.%d.%d, got %d.%d.%d\n",
843			    TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR,
844			    TI_FIRMWARE_FIX, tigon2FwReleaseMajor,
845			    tigon2FwReleaseMinor, tigon2FwReleaseFix);
846			return;
847		}
848		ti_mem_write(sc, tigon2FwTextAddr, tigon2FwTextLen,
849		    tigon2FwText);
850		ti_mem_write(sc, tigon2FwDataAddr, tigon2FwDataLen,
851		    tigon2FwData);
852		ti_mem_write(sc, tigon2FwRodataAddr, tigon2FwRodataLen,
853		    tigon2FwRodata);
854		ti_mem_zero(sc, tigon2FwBssAddr, tigon2FwBssLen);
855		ti_mem_zero(sc, tigon2FwSbssAddr, tigon2FwSbssLen);
856		CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigon2FwStartAddr);
857		break;
858	default:
859		device_printf(sc->ti_dev,
860		    "can't load firmware: unknown hardware rev\n");
861		break;
862	}
863}
864
865/*
866 * Send the NIC a command via the command ring.
867 */
868static void
869ti_cmd(struct ti_softc *sc, struct ti_cmd_desc *cmd)
870{
871	int index;
872
873	index = sc->ti_cmd_saved_prodidx;
874	CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(uint32_t *)(cmd));
875	TI_INC(index, TI_CMD_RING_CNT);
876	CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index);
877	sc->ti_cmd_saved_prodidx = index;
878}
879
880/*
881 * Send the NIC an extended command. The 'len' parameter specifies the
882 * number of command slots to include after the initial command.
883 */
884static void
885ti_cmd_ext(struct ti_softc *sc, struct ti_cmd_desc *cmd, caddr_t arg, int len)
886{
887	int index;
888	int i;
889
890	index = sc->ti_cmd_saved_prodidx;
891	CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(uint32_t *)(cmd));
892	TI_INC(index, TI_CMD_RING_CNT);
893	for (i = 0; i < len; i++) {
894		CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4),
895		    *(uint32_t *)(&arg[i * 4]));
896		TI_INC(index, TI_CMD_RING_CNT);
897	}
898	CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index);
899	sc->ti_cmd_saved_prodidx = index;
900}
901
902/*
903 * Handle events that have triggered interrupts.
904 */
905static void
906ti_handle_events(struct ti_softc *sc)
907{
908	struct ti_event_desc *e;
909
910	if (sc->ti_rdata->ti_event_ring == NULL)
911		return;
912
913	while (sc->ti_ev_saved_considx != sc->ti_ev_prodidx.ti_idx) {
914		e = &sc->ti_rdata->ti_event_ring[sc->ti_ev_saved_considx];
915		switch (TI_EVENT_EVENT(e)) {
916		case TI_EV_LINKSTAT_CHANGED:
917			sc->ti_linkstat = TI_EVENT_CODE(e);
918			if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) {
919				if_link_state_change(sc->ti_ifp, LINK_STATE_UP);
920				sc->ti_ifp->if_baudrate = IF_Mbps(100);
921				if (bootverbose)
922					device_printf(sc->ti_dev,
923					    "10/100 link up\n");
924			} else if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) {
925				if_link_state_change(sc->ti_ifp, LINK_STATE_UP);
926				sc->ti_ifp->if_baudrate = IF_Gbps(1UL);
927				if (bootverbose)
928					device_printf(sc->ti_dev,
929					    "gigabit link up\n");
930			} else if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) {
931				if_link_state_change(sc->ti_ifp,
932				    LINK_STATE_DOWN);
933				sc->ti_ifp->if_baudrate = 0;
934				if (bootverbose)
935					device_printf(sc->ti_dev,
936					    "link down\n");
937			}
938			break;
939		case TI_EV_ERROR:
940			if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_INVAL_CMD)
941				device_printf(sc->ti_dev, "invalid command\n");
942			else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_UNIMP_CMD)
943				device_printf(sc->ti_dev, "unknown command\n");
944			else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_BADCFG)
945				device_printf(sc->ti_dev, "bad config data\n");
946			break;
947		case TI_EV_FIRMWARE_UP:
948			ti_init2(sc);
949			break;
950		case TI_EV_STATS_UPDATED:
951			ti_stats_update(sc);
952			break;
953		case TI_EV_RESET_JUMBO_RING:
954		case TI_EV_MCAST_UPDATED:
955			/* Who cares. */
956			break;
957		default:
958			device_printf(sc->ti_dev, "unknown event: %d\n",
959			    TI_EVENT_EVENT(e));
960			break;
961		}
962		/* Advance the consumer index. */
963		TI_INC(sc->ti_ev_saved_considx, TI_EVENT_RING_CNT);
964		CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, sc->ti_ev_saved_considx);
965	}
966}
967
968static int
969ti_alloc_dmamaps(struct ti_softc *sc)
970{
971	int i;
972
973	for (i = 0; i < TI_TX_RING_CNT; i++) {
974		sc->ti_cdata.ti_txdesc[i].tx_m = NULL;
975		sc->ti_cdata.ti_txdesc[i].tx_dmamap = NULL;
976		if (bus_dmamap_create(sc->ti_mbuftx_dmat, 0,
977		    &sc->ti_cdata.ti_txdesc[i].tx_dmamap)) {
978			device_printf(sc->ti_dev,
979			    "cannot create DMA map for TX\n");
980			return (ENOBUFS);
981		}
982	}
983	for (i = 0; i < TI_STD_RX_RING_CNT; i++) {
984		if (bus_dmamap_create(sc->ti_mbufrx_dmat, 0,
985		    &sc->ti_cdata.ti_rx_std_maps[i])) {
986			device_printf(sc->ti_dev,
987			    "cannot create DMA map for RX\n");
988			return (ENOBUFS);
989		}
990	}
991	if (bus_dmamap_create(sc->ti_mbufrx_dmat, 0,
992	    &sc->ti_cdata.ti_rx_std_sparemap)) {
993		device_printf(sc->ti_dev,
994		    "cannot create spare DMA map for RX\n");
995		return (ENOBUFS);
996	}
997
998	for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
999		if (bus_dmamap_create(sc->ti_jumbo_dmat, 0,
1000		    &sc->ti_cdata.ti_rx_jumbo_maps[i])) {
1001			device_printf(sc->ti_dev,
1002			    "cannot create DMA map for jumbo RX\n");
1003			return (ENOBUFS);
1004		}
1005	}
1006	if (bus_dmamap_create(sc->ti_jumbo_dmat, 0,
1007	    &sc->ti_cdata.ti_rx_jumbo_sparemap)) {
1008		device_printf(sc->ti_dev,
1009		    "cannot create spare DMA map for jumbo RX\n");
1010		return (ENOBUFS);
1011	}
1012
1013	/* Mini ring is not available on Tigon 1. */
1014	if (sc->ti_hwrev == TI_HWREV_TIGON)
1015		return (0);
1016
1017	for (i = 0; i < TI_MINI_RX_RING_CNT; i++) {
1018		if (bus_dmamap_create(sc->ti_mbufrx_dmat, 0,
1019		    &sc->ti_cdata.ti_rx_mini_maps[i])) {
1020			device_printf(sc->ti_dev,
1021			    "cannot create DMA map for mini RX\n");
1022			return (ENOBUFS);
1023		}
1024	}
1025	if (bus_dmamap_create(sc->ti_mbufrx_dmat, 0,
1026	    &sc->ti_cdata.ti_rx_mini_sparemap)) {
1027		device_printf(sc->ti_dev,
1028		    "cannot create DMA map for mini RX\n");
1029		return (ENOBUFS);
1030	}
1031
1032	return (0);
1033}
1034
1035static void
1036ti_free_dmamaps(struct ti_softc *sc)
1037{
1038	int i;
1039
1040	if (sc->ti_mbuftx_dmat) {
1041		for (i = 0; i < TI_TX_RING_CNT; i++) {
1042			if (sc->ti_cdata.ti_txdesc[i].tx_dmamap) {
1043				bus_dmamap_destroy(sc->ti_mbuftx_dmat,
1044				    sc->ti_cdata.ti_txdesc[i].tx_dmamap);
1045				sc->ti_cdata.ti_txdesc[i].tx_dmamap = NULL;
1046			}
1047		}
1048	}
1049
1050	if (sc->ti_mbufrx_dmat) {
1051		for (i = 0; i < TI_STD_RX_RING_CNT; i++) {
1052			if (sc->ti_cdata.ti_rx_std_maps[i]) {
1053				bus_dmamap_destroy(sc->ti_mbufrx_dmat,
1054				    sc->ti_cdata.ti_rx_std_maps[i]);
1055				sc->ti_cdata.ti_rx_std_maps[i] = NULL;
1056			}
1057		}
1058		if (sc->ti_cdata.ti_rx_std_sparemap) {
1059			bus_dmamap_destroy(sc->ti_mbufrx_dmat,
1060			    sc->ti_cdata.ti_rx_std_sparemap);
1061			sc->ti_cdata.ti_rx_std_sparemap = NULL;
1062		}
1063	}
1064
1065	if (sc->ti_jumbo_dmat) {
1066		for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
1067			if (sc->ti_cdata.ti_rx_jumbo_maps[i]) {
1068				bus_dmamap_destroy(sc->ti_jumbo_dmat,
1069				    sc->ti_cdata.ti_rx_jumbo_maps[i]);
1070				sc->ti_cdata.ti_rx_jumbo_maps[i] = NULL;
1071			}
1072		}
1073		if (sc->ti_cdata.ti_rx_jumbo_sparemap) {
1074			bus_dmamap_destroy(sc->ti_jumbo_dmat,
1075			    sc->ti_cdata.ti_rx_jumbo_sparemap);
1076			sc->ti_cdata.ti_rx_jumbo_sparemap = NULL;
1077		}
1078	}
1079
1080	if (sc->ti_mbufrx_dmat) {
1081		for (i = 0; i < TI_MINI_RX_RING_CNT; i++) {
1082			if (sc->ti_cdata.ti_rx_mini_maps[i]) {
1083				bus_dmamap_destroy(sc->ti_mbufrx_dmat,
1084				    sc->ti_cdata.ti_rx_mini_maps[i]);
1085				sc->ti_cdata.ti_rx_mini_maps[i] = NULL;
1086			}
1087		}
1088		if (sc->ti_cdata.ti_rx_mini_sparemap) {
1089			bus_dmamap_destroy(sc->ti_mbufrx_dmat,
1090			    sc->ti_cdata.ti_rx_mini_sparemap);
1091			sc->ti_cdata.ti_rx_mini_sparemap = NULL;
1092		}
1093	}
1094}
1095
1096#ifndef TI_SF_BUF_JUMBO
1097
1098static int
1099ti_alloc_jumbo_mem(struct ti_softc *sc)
1100{
1101
1102	if (bus_dma_tag_create(sc->ti_parent_dmat, 1, 0, BUS_SPACE_MAXADDR,
1103	    BUS_SPACE_MAXADDR, NULL, NULL, MJUM9BYTES, 1, MJUM9BYTES, 0, NULL,
1104	    NULL, &sc->ti_jumbo_dmat) != 0) {
1105		device_printf(sc->ti_dev, "Failed to allocate jumbo dmat\n");
1106                return (ENOBUFS);
1107	}
1108	return (0);
1109}
1110
1111#else
1112
1113static int
1114ti_alloc_jumbo_mem(struct ti_softc *sc)
1115{
1116
1117	/*
1118	 * The VM system will take care of providing aligned pages.  Alignment
1119	 * is set to 1 here so that busdma resources won't be wasted.
1120	 */
1121	if (bus_dma_tag_create(sc->ti_parent_dmat,	/* parent */
1122				1, 0,			/* algnmnt, boundary */
1123				BUS_SPACE_MAXADDR,	/* lowaddr */
1124				BUS_SPACE_MAXADDR,	/* highaddr */
1125				NULL, NULL,		/* filter, filterarg */
1126				PAGE_SIZE * 4 /*XXX*/,	/* maxsize */
1127				4,			/* nsegments */
1128				PAGE_SIZE,		/* maxsegsize */
1129				0,			/* flags */
1130				NULL, NULL,		/* lockfunc, lockarg */
1131				&sc->ti_jumbo_dmat) != 0) {
1132		device_printf(sc->ti_dev, "Failed to allocate jumbo dmat\n");
1133		return (ENOBUFS);
1134	}
1135
1136	return (0);
1137}
1138
1139#endif /* TI_SF_BUF_JUMBO */
1140
1141/*
1142 * Intialize a standard receive ring descriptor.
1143 */
1144static int
1145ti_newbuf_std(struct ti_softc *sc, int i)
1146{
1147	bus_dmamap_t map;
1148	bus_dma_segment_t segs[1];
1149	struct mbuf *m;
1150	struct ti_rx_desc *r;
1151	int error, nsegs;
1152
1153	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1154	if (m == NULL)
1155		return (ENOBUFS);
1156	m->m_len = m->m_pkthdr.len = MCLBYTES;
1157	m_adj(m, ETHER_ALIGN);
1158
1159	error = bus_dmamap_load_mbuf_sg(sc->ti_mbufrx_dmat,
1160	    sc->ti_cdata.ti_rx_std_sparemap, m, segs, &nsegs, 0);
1161	if (error != 0) {
1162		m_freem(m);
1163		return (error);
1164        }
1165	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1166
1167	if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) {
1168		bus_dmamap_sync(sc->ti_mbufrx_dmat,
1169		    sc->ti_cdata.ti_rx_std_maps[i], BUS_DMASYNC_POSTREAD);
1170		bus_dmamap_unload(sc->ti_mbufrx_dmat,
1171		    sc->ti_cdata.ti_rx_std_maps[i]);
1172	}
1173
1174	map = sc->ti_cdata.ti_rx_std_maps[i];
1175	sc->ti_cdata.ti_rx_std_maps[i] = sc->ti_cdata.ti_rx_std_sparemap;
1176	sc->ti_cdata.ti_rx_std_sparemap = map;
1177	sc->ti_cdata.ti_rx_std_chain[i] = m;
1178
1179	r = &sc->ti_rdata->ti_rx_std_ring[i];
1180	ti_hostaddr64(&r->ti_addr, segs[0].ds_addr);
1181	r->ti_len = segs[0].ds_len;
1182	r->ti_type = TI_BDTYPE_RECV_BD;
1183	r->ti_flags = 0;
1184	r->ti_vlan_tag = 0;
1185	r->ti_tcp_udp_cksum = 0;
1186	if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
1187		r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
1188	r->ti_idx = i;
1189
1190	bus_dmamap_sync(sc->ti_mbufrx_dmat, sc->ti_cdata.ti_rx_std_maps[i],
1191	    BUS_DMASYNC_PREREAD);
1192	return (0);
1193}
1194
1195/*
1196 * Intialize a mini receive ring descriptor. This only applies to
1197 * the Tigon 2.
1198 */
1199static int
1200ti_newbuf_mini(struct ti_softc *sc, int i)
1201{
1202	bus_dmamap_t map;
1203	bus_dma_segment_t segs[1];
1204	struct mbuf *m;
1205	struct ti_rx_desc *r;
1206	int error, nsegs;
1207
1208	MGETHDR(m, M_DONTWAIT, MT_DATA);
1209	if (m == NULL)
1210		return (ENOBUFS);
1211	m->m_len = m->m_pkthdr.len = MHLEN;
1212	m_adj(m, ETHER_ALIGN);
1213
1214	error = bus_dmamap_load_mbuf_sg(sc->ti_mbufrx_dmat,
1215	    sc->ti_cdata.ti_rx_mini_sparemap, m, segs, &nsegs, 0);
1216	if (error != 0) {
1217		m_freem(m);
1218		return (error);
1219        }
1220	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1221
1222	if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) {
1223		bus_dmamap_sync(sc->ti_mbufrx_dmat,
1224		    sc->ti_cdata.ti_rx_mini_maps[i], BUS_DMASYNC_POSTREAD);
1225		bus_dmamap_unload(sc->ti_mbufrx_dmat,
1226		    sc->ti_cdata.ti_rx_mini_maps[i]);
1227	}
1228
1229	map = sc->ti_cdata.ti_rx_mini_maps[i];
1230	sc->ti_cdata.ti_rx_mini_maps[i] = sc->ti_cdata.ti_rx_mini_sparemap;
1231	sc->ti_cdata.ti_rx_mini_sparemap = map;
1232	sc->ti_cdata.ti_rx_mini_chain[i] = m;
1233
1234	r = &sc->ti_rdata->ti_rx_mini_ring[i];
1235	ti_hostaddr64(&r->ti_addr, segs[0].ds_addr);
1236	r->ti_len = segs[0].ds_len;
1237	r->ti_type = TI_BDTYPE_RECV_BD;
1238	r->ti_flags = TI_BDFLAG_MINI_RING;
1239	r->ti_vlan_tag = 0;
1240	r->ti_tcp_udp_cksum = 0;
1241	if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
1242		r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
1243	r->ti_idx = i;
1244
1245	bus_dmamap_sync(sc->ti_mbufrx_dmat, sc->ti_cdata.ti_rx_mini_maps[i],
1246	    BUS_DMASYNC_PREREAD);
1247	return (0);
1248}
1249
1250#ifndef TI_SF_BUF_JUMBO
1251
1252/*
1253 * Initialize a jumbo receive ring descriptor. This allocates
1254 * a jumbo buffer from the pool managed internally by the driver.
1255 */
1256static int
1257ti_newbuf_jumbo(struct ti_softc *sc, int i, struct mbuf *dummy)
1258{
1259	bus_dmamap_t map;
1260	bus_dma_segment_t segs[1];
1261	struct mbuf *m;
1262	struct ti_rx_desc *r;
1263	int error, nsegs;
1264
1265	(void)dummy;
1266
1267	m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1268	if (m == NULL)
1269		return (ENOBUFS);
1270	m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1271	m_adj(m, ETHER_ALIGN);
1272
1273	error = bus_dmamap_load_mbuf_sg(sc->ti_jumbo_dmat,
1274	    sc->ti_cdata.ti_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1275	if (error != 0) {
1276		m_freem(m);
1277		return (error);
1278        }
1279	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1280
1281	if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) {
1282		bus_dmamap_sync(sc->ti_jumbo_dmat,
1283		    sc->ti_cdata.ti_rx_jumbo_maps[i], BUS_DMASYNC_POSTREAD);
1284		bus_dmamap_unload(sc->ti_jumbo_dmat,
1285		    sc->ti_cdata.ti_rx_jumbo_maps[i]);
1286	}
1287
1288	map = sc->ti_cdata.ti_rx_jumbo_maps[i];
1289	sc->ti_cdata.ti_rx_jumbo_maps[i] = sc->ti_cdata.ti_rx_jumbo_sparemap;
1290	sc->ti_cdata.ti_rx_jumbo_sparemap = map;
1291	sc->ti_cdata.ti_rx_jumbo_chain[i] = m;
1292
1293	r = &sc->ti_rdata->ti_rx_jumbo_ring[i];
1294	ti_hostaddr64(&r->ti_addr, segs[0].ds_addr);
1295	r->ti_len = segs[0].ds_len;
1296	r->ti_type = TI_BDTYPE_RECV_JUMBO_BD;
1297	r->ti_flags = TI_BDFLAG_JUMBO_RING;
1298	r->ti_vlan_tag = 0;
1299	r->ti_tcp_udp_cksum = 0;
1300	if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
1301		r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
1302	r->ti_idx = i;
1303
1304	bus_dmamap_sync(sc->ti_jumbo_dmat, sc->ti_cdata.ti_rx_jumbo_maps[i],
1305	    BUS_DMASYNC_PREREAD);
1306	return (0);
1307}
1308
1309#else
1310
1311#if (PAGE_SIZE == 4096)
1312#define NPAYLOAD 2
1313#else
1314#define NPAYLOAD 1
1315#endif
1316
1317#define TCP_HDR_LEN (52 + sizeof(struct ether_header))
1318#define UDP_HDR_LEN (28 + sizeof(struct ether_header))
1319#define NFS_HDR_LEN (UDP_HDR_LEN)
1320static int HDR_LEN = TCP_HDR_LEN;
1321
1322/*
1323 * Initialize a jumbo receive ring descriptor. This allocates
1324 * a jumbo buffer from the pool managed internally by the driver.
1325 */
1326static int
1327ti_newbuf_jumbo(struct ti_softc *sc, int idx, struct mbuf *m_old)
1328{
1329	bus_dmamap_t map;
1330	struct mbuf *cur, *m_new = NULL;
1331	struct mbuf *m[3] = {NULL, NULL, NULL};
1332	struct ti_rx_desc_ext *r;
1333	vm_page_t frame;
1334	static int color;
1335	/* 1 extra buf to make nobufs easy*/
1336	struct sf_buf *sf[3] = {NULL, NULL, NULL};
1337	int i;
1338	bus_dma_segment_t segs[4];
1339	int nsegs;
1340
1341	if (m_old != NULL) {
1342		m_new = m_old;
1343		cur = m_old->m_next;
1344		for (i = 0; i <= NPAYLOAD; i++){
1345			m[i] = cur;
1346			cur = cur->m_next;
1347		}
1348	} else {
1349		/* Allocate the mbufs. */
1350		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1351		if (m_new == NULL) {
1352			device_printf(sc->ti_dev, "mbuf allocation failed "
1353			    "-- packet dropped!\n");
1354			goto nobufs;
1355		}
1356		MGET(m[NPAYLOAD], M_DONTWAIT, MT_DATA);
1357		if (m[NPAYLOAD] == NULL) {
1358			device_printf(sc->ti_dev, "cluster mbuf allocation "
1359			    "failed -- packet dropped!\n");
1360			goto nobufs;
1361		}
1362		MCLGET(m[NPAYLOAD], M_DONTWAIT);
1363		if ((m[NPAYLOAD]->m_flags & M_EXT) == 0) {
1364			device_printf(sc->ti_dev, "mbuf allocation failed "
1365			    "-- packet dropped!\n");
1366			goto nobufs;
1367		}
1368		m[NPAYLOAD]->m_len = MCLBYTES;
1369
1370		for (i = 0; i < NPAYLOAD; i++){
1371			MGET(m[i], M_DONTWAIT, MT_DATA);
1372			if (m[i] == NULL) {
1373				device_printf(sc->ti_dev, "mbuf allocation "
1374				    "failed -- packet dropped!\n");
1375				goto nobufs;
1376			}
1377			frame = vm_page_alloc(NULL, color++,
1378			    VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
1379			    VM_ALLOC_WIRED);
1380			if (frame == NULL) {
1381				device_printf(sc->ti_dev, "buffer allocation "
1382				    "failed -- packet dropped!\n");
1383				printf("      index %d page %d\n", idx, i);
1384				goto nobufs;
1385			}
1386			sf[i] = sf_buf_alloc(frame, SFB_NOWAIT);
1387			if (sf[i] == NULL) {
1388				vm_page_unwire(frame, 0);
1389				vm_page_free(frame);
1390				device_printf(sc->ti_dev, "buffer allocation "
1391				    "failed -- packet dropped!\n");
1392				printf("      index %d page %d\n", idx, i);
1393				goto nobufs;
1394			}
1395		}
1396		for (i = 0; i < NPAYLOAD; i++){
1397		/* Attach the buffer to the mbuf. */
1398			m[i]->m_data = (void *)sf_buf_kva(sf[i]);
1399			m[i]->m_len = PAGE_SIZE;
1400			MEXTADD(m[i], sf_buf_kva(sf[i]), PAGE_SIZE,
1401			    sf_buf_mext, (void*)sf_buf_kva(sf[i]), sf[i],
1402			    0, EXT_DISPOSABLE);
1403			m[i]->m_next = m[i+1];
1404		}
1405		/* link the buffers to the header */
1406		m_new->m_next = m[0];
1407		m_new->m_data += ETHER_ALIGN;
1408		if (sc->ti_hdrsplit)
1409			m_new->m_len = MHLEN - ETHER_ALIGN;
1410		else
1411			m_new->m_len = HDR_LEN;
1412		m_new->m_pkthdr.len = NPAYLOAD * PAGE_SIZE + m_new->m_len;
1413	}
1414
1415	/* Set up the descriptor. */
1416	r = &sc->ti_rdata->ti_rx_jumbo_ring[idx];
1417	sc->ti_cdata.ti_rx_jumbo_chain[idx] = m_new;
1418	map = sc->ti_cdata.ti_rx_jumbo_maps[i];
1419	if (bus_dmamap_load_mbuf_sg(sc->ti_jumbo_dmat, map, m_new, segs,
1420				    &nsegs, 0))
1421		return (ENOBUFS);
1422	if ((nsegs < 1) || (nsegs > 4))
1423		return (ENOBUFS);
1424	ti_hostaddr64(&r->ti_addr0, segs[0].ds_addr);
1425	r->ti_len0 = m_new->m_len;
1426
1427	ti_hostaddr64(&r->ti_addr1, segs[1].ds_addr);
1428	r->ti_len1 = PAGE_SIZE;
1429
1430	ti_hostaddr64(&r->ti_addr2, segs[2].ds_addr);
1431	r->ti_len2 = m[1]->m_ext.ext_size; /* could be PAGE_SIZE or MCLBYTES */
1432
1433	if (PAGE_SIZE == 4096) {
1434		ti_hostaddr64(&r->ti_addr3, segs[3].ds_addr);
1435		r->ti_len3 = MCLBYTES;
1436	} else {
1437		r->ti_len3 = 0;
1438	}
1439	r->ti_type = TI_BDTYPE_RECV_JUMBO_BD;
1440
1441	r->ti_flags = TI_BDFLAG_JUMBO_RING|TI_RCB_FLAG_USE_EXT_RX_BD;
1442
1443	if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
1444		r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM|TI_BDFLAG_IP_CKSUM;
1445
1446	r->ti_idx = idx;
1447
1448	bus_dmamap_sync(sc->ti_jumbo_dmat, map, BUS_DMASYNC_PREREAD);
1449	return (0);
1450
1451nobufs:
1452
1453	/*
1454	 * Warning! :
1455	 * This can only be called before the mbufs are strung together.
1456	 * If the mbufs are strung together, m_freem() will free the chain,
1457	 * so that the later mbufs will be freed multiple times.
1458	 */
1459	if (m_new)
1460		m_freem(m_new);
1461
1462	for (i = 0; i < 3; i++) {
1463		if (m[i])
1464			m_freem(m[i]);
1465		if (sf[i])
1466			sf_buf_mext((void *)sf_buf_kva(sf[i]), sf[i]);
1467	}
1468	return (ENOBUFS);
1469}
1470#endif
1471
1472/*
1473 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
1474 * that's 1MB or memory, which is a lot. For now, we fill only the first
1475 * 256 ring entries and hope that our CPU is fast enough to keep up with
1476 * the NIC.
1477 */
1478static int
1479ti_init_rx_ring_std(struct ti_softc *sc)
1480{
1481	int i;
1482	struct ti_cmd_desc cmd;
1483
1484	for (i = 0; i < TI_STD_RX_RING_CNT; i++) {
1485		if (ti_newbuf_std(sc, i) != 0)
1486			return (ENOBUFS);
1487	};
1488
1489	sc->ti_std = TI_STD_RX_RING_CNT - 1;
1490	TI_UPDATE_STDPROD(sc, TI_STD_RX_RING_CNT - 1);
1491
1492	return (0);
1493}
1494
1495static void
1496ti_free_rx_ring_std(struct ti_softc *sc)
1497{
1498	bus_dmamap_t map;
1499	int i;
1500
1501	for (i = 0; i < TI_STD_RX_RING_CNT; i++) {
1502		if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) {
1503			map = sc->ti_cdata.ti_rx_std_maps[i];
1504			bus_dmamap_sync(sc->ti_mbufrx_dmat, map,
1505			    BUS_DMASYNC_POSTREAD);
1506			bus_dmamap_unload(sc->ti_mbufrx_dmat, map);
1507			m_freem(sc->ti_cdata.ti_rx_std_chain[i]);
1508			sc->ti_cdata.ti_rx_std_chain[i] = NULL;
1509		}
1510		bzero((char *)&sc->ti_rdata->ti_rx_std_ring[i],
1511		    sizeof(struct ti_rx_desc));
1512	}
1513}
1514
1515static int
1516ti_init_rx_ring_jumbo(struct ti_softc *sc)
1517{
1518	struct ti_cmd_desc cmd;
1519	int i;
1520
1521	for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
1522		if (ti_newbuf_jumbo(sc, i, NULL) != 0)
1523			return (ENOBUFS);
1524	};
1525
1526	sc->ti_jumbo = TI_JUMBO_RX_RING_CNT - 1;
1527	TI_UPDATE_JUMBOPROD(sc, TI_JUMBO_RX_RING_CNT - 1);
1528
1529	return (0);
1530}
1531
1532static void
1533ti_free_rx_ring_jumbo(struct ti_softc *sc)
1534{
1535	bus_dmamap_t map;
1536	int i;
1537
1538	for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
1539		if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) {
1540			map = sc->ti_cdata.ti_rx_jumbo_maps[i];
1541			bus_dmamap_sync(sc->ti_jumbo_dmat, map,
1542			    BUS_DMASYNC_POSTREAD);
1543			bus_dmamap_unload(sc->ti_jumbo_dmat, map);
1544			m_freem(sc->ti_cdata.ti_rx_jumbo_chain[i]);
1545			sc->ti_cdata.ti_rx_jumbo_chain[i] = NULL;
1546		}
1547		bzero((char *)&sc->ti_rdata->ti_rx_jumbo_ring[i],
1548		    sizeof(struct ti_rx_desc));
1549	}
1550}
1551
1552static int
1553ti_init_rx_ring_mini(struct ti_softc *sc)
1554{
1555	int i;
1556
1557	for (i = 0; i < TI_MINI_RX_RING_CNT; i++) {
1558		if (ti_newbuf_mini(sc, i) != 0)
1559			return (ENOBUFS);
1560	};
1561
1562	sc->ti_mini = TI_MINI_RX_RING_CNT - 1;
1563	TI_UPDATE_MINIPROD(sc, TI_MINI_RX_RING_CNT - 1);
1564
1565	return (0);
1566}
1567
1568static void
1569ti_free_rx_ring_mini(struct ti_softc *sc)
1570{
1571	bus_dmamap_t map;
1572	int i;
1573
1574	for (i = 0; i < TI_MINI_RX_RING_CNT; i++) {
1575		if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) {
1576			map = sc->ti_cdata.ti_rx_mini_maps[i];
1577			bus_dmamap_sync(sc->ti_mbufrx_dmat, map,
1578			    BUS_DMASYNC_POSTREAD);
1579			bus_dmamap_unload(sc->ti_mbufrx_dmat, map);
1580			m_freem(sc->ti_cdata.ti_rx_mini_chain[i]);
1581			sc->ti_cdata.ti_rx_mini_chain[i] = NULL;
1582		}
1583		bzero((char *)&sc->ti_rdata->ti_rx_mini_ring[i],
1584		    sizeof(struct ti_rx_desc));
1585	}
1586}
1587
1588static void
1589ti_free_tx_ring(struct ti_softc *sc)
1590{
1591	struct ti_txdesc *txd;
1592	int i;
1593
1594	if (sc->ti_rdata->ti_tx_ring == NULL)
1595		return;
1596
1597	for (i = 0; i < TI_TX_RING_CNT; i++) {
1598		txd = &sc->ti_cdata.ti_txdesc[i];
1599		if (txd->tx_m != NULL) {
1600			bus_dmamap_sync(sc->ti_mbuftx_dmat, txd->tx_dmamap,
1601			    BUS_DMASYNC_POSTWRITE);
1602			bus_dmamap_unload(sc->ti_mbuftx_dmat, txd->tx_dmamap);
1603			m_freem(txd->tx_m);
1604			txd->tx_m = NULL;
1605		}
1606		bzero((char *)&sc->ti_rdata->ti_tx_ring[i],
1607		    sizeof(struct ti_tx_desc));
1608	}
1609}
1610
1611static int
1612ti_init_tx_ring(struct ti_softc *sc)
1613{
1614	struct ti_txdesc *txd;
1615	int i;
1616
1617	STAILQ_INIT(&sc->ti_cdata.ti_txfreeq);
1618	STAILQ_INIT(&sc->ti_cdata.ti_txbusyq);
1619	for (i = 0; i < TI_TX_RING_CNT; i++) {
1620		txd = &sc->ti_cdata.ti_txdesc[i];
1621		STAILQ_INSERT_TAIL(&sc->ti_cdata.ti_txfreeq, txd, tx_q);
1622	}
1623	sc->ti_txcnt = 0;
1624	sc->ti_tx_saved_considx = 0;
1625	sc->ti_tx_saved_prodidx = 0;
1626	CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, 0);
1627	return (0);
1628}
1629
1630/*
1631 * The Tigon 2 firmware has a new way to add/delete multicast addresses,
1632 * but we have to support the old way too so that Tigon 1 cards will
1633 * work.
1634 */
1635static void
1636ti_add_mcast(struct ti_softc *sc, struct ether_addr *addr)
1637{
1638	struct ti_cmd_desc cmd;
1639	uint16_t *m;
1640	uint32_t ext[2] = {0, 0};
1641
1642	m = (uint16_t *)&addr->octet[0];
1643
1644	switch (sc->ti_hwrev) {
1645	case TI_HWREV_TIGON:
1646		CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0]));
1647		CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2]));
1648		TI_DO_CMD(TI_CMD_ADD_MCAST_ADDR, 0, 0);
1649		break;
1650	case TI_HWREV_TIGON_II:
1651		ext[0] = htons(m[0]);
1652		ext[1] = (htons(m[1]) << 16) | htons(m[2]);
1653		TI_DO_CMD_EXT(TI_CMD_EXT_ADD_MCAST, 0, 0, (caddr_t)&ext, 2);
1654		break;
1655	default:
1656		device_printf(sc->ti_dev, "unknown hwrev\n");
1657		break;
1658	}
1659}
1660
1661static void
1662ti_del_mcast(struct ti_softc *sc, struct ether_addr *addr)
1663{
1664	struct ti_cmd_desc cmd;
1665	uint16_t *m;
1666	uint32_t ext[2] = {0, 0};
1667
1668	m = (uint16_t *)&addr->octet[0];
1669
1670	switch (sc->ti_hwrev) {
1671	case TI_HWREV_TIGON:
1672		CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0]));
1673		CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2]));
1674		TI_DO_CMD(TI_CMD_DEL_MCAST_ADDR, 0, 0);
1675		break;
1676	case TI_HWREV_TIGON_II:
1677		ext[0] = htons(m[0]);
1678		ext[1] = (htons(m[1]) << 16) | htons(m[2]);
1679		TI_DO_CMD_EXT(TI_CMD_EXT_DEL_MCAST, 0, 0, (caddr_t)&ext, 2);
1680		break;
1681	default:
1682		device_printf(sc->ti_dev, "unknown hwrev\n");
1683		break;
1684	}
1685}
1686
1687/*
1688 * Configure the Tigon's multicast address filter.
1689 *
1690 * The actual multicast table management is a bit of a pain, thanks to
1691 * slight brain damage on the part of both Alteon and us. With our
1692 * multicast code, we are only alerted when the multicast address table
1693 * changes and at that point we only have the current list of addresses:
1694 * we only know the current state, not the previous state, so we don't
1695 * actually know what addresses were removed or added. The firmware has
1696 * state, but we can't get our grubby mits on it, and there is no 'delete
1697 * all multicast addresses' command. Hence, we have to maintain our own
1698 * state so we know what addresses have been programmed into the NIC at
1699 * any given time.
1700 */
1701static void
1702ti_setmulti(struct ti_softc *sc)
1703{
1704	struct ifnet *ifp;
1705	struct ifmultiaddr *ifma;
1706	struct ti_cmd_desc cmd;
1707	struct ti_mc_entry *mc;
1708	uint32_t intrs;
1709
1710	TI_LOCK_ASSERT(sc);
1711
1712	ifp = sc->ti_ifp;
1713
1714	if (ifp->if_flags & IFF_ALLMULTI) {
1715		TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_ENB, 0);
1716		return;
1717	} else {
1718		TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_DIS, 0);
1719	}
1720
1721	/* Disable interrupts. */
1722	intrs = CSR_READ_4(sc, TI_MB_HOSTINTR);
1723	CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
1724
1725	/* First, zot all the existing filters. */
1726	while (SLIST_FIRST(&sc->ti_mc_listhead) != NULL) {
1727		mc = SLIST_FIRST(&sc->ti_mc_listhead);
1728		ti_del_mcast(sc, &mc->mc_addr);
1729		SLIST_REMOVE_HEAD(&sc->ti_mc_listhead, mc_entries);
1730		free(mc, M_DEVBUF);
1731	}
1732
1733	/* Now program new ones. */
1734	if_maddr_rlock(ifp);
1735	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1736		if (ifma->ifma_addr->sa_family != AF_LINK)
1737			continue;
1738		mc = malloc(sizeof(struct ti_mc_entry), M_DEVBUF, M_NOWAIT);
1739		if (mc == NULL) {
1740			device_printf(sc->ti_dev,
1741			    "no memory for mcast filter entry\n");
1742			continue;
1743		}
1744		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1745		    (char *)&mc->mc_addr, ETHER_ADDR_LEN);
1746		SLIST_INSERT_HEAD(&sc->ti_mc_listhead, mc, mc_entries);
1747		ti_add_mcast(sc, &mc->mc_addr);
1748	}
1749	if_maddr_runlock(ifp);
1750
1751	/* Re-enable interrupts. */
1752	CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs);
1753}
1754
1755/*
1756 * Check to see if the BIOS has configured us for a 64 bit slot when
1757 * we aren't actually in one. If we detect this condition, we can work
1758 * around it on the Tigon 2 by setting a bit in the PCI state register,
1759 * but for the Tigon 1 we must give up and abort the interface attach.
1760 */
1761static int
1762ti_64bitslot_war(struct ti_softc *sc)
1763{
1764
1765	if (!(CSR_READ_4(sc, TI_PCI_STATE) & TI_PCISTATE_32BIT_BUS)) {
1766		CSR_WRITE_4(sc, 0x600, 0);
1767		CSR_WRITE_4(sc, 0x604, 0);
1768		CSR_WRITE_4(sc, 0x600, 0x5555AAAA);
1769		if (CSR_READ_4(sc, 0x604) == 0x5555AAAA) {
1770			if (sc->ti_hwrev == TI_HWREV_TIGON)
1771				return (EINVAL);
1772			else {
1773				TI_SETBIT(sc, TI_PCI_STATE,
1774				    TI_PCISTATE_32BIT_BUS);
1775				return (0);
1776			}
1777		}
1778	}
1779
1780	return (0);
1781}
1782
1783/*
1784 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1785 * self-test results.
1786 */
1787static int
1788ti_chipinit(struct ti_softc *sc)
1789{
1790	uint32_t cacheline;
1791	uint32_t pci_writemax = 0;
1792	uint32_t hdrsplit;
1793
1794	/* Initialize link to down state. */
1795	sc->ti_linkstat = TI_EV_CODE_LINK_DOWN;
1796
1797	/* Set endianness before we access any non-PCI registers. */
1798#if 0 && BYTE_ORDER == BIG_ENDIAN
1799	CSR_WRITE_4(sc, TI_MISC_HOST_CTL,
1800	    TI_MHC_BIGENDIAN_INIT | (TI_MHC_BIGENDIAN_INIT << 24));
1801#else
1802	CSR_WRITE_4(sc, TI_MISC_HOST_CTL,
1803	    TI_MHC_LITTLEENDIAN_INIT | (TI_MHC_LITTLEENDIAN_INIT << 24));
1804#endif
1805
1806	/* Check the ROM failed bit to see if self-tests passed. */
1807	if (CSR_READ_4(sc, TI_CPU_STATE) & TI_CPUSTATE_ROMFAIL) {
1808		device_printf(sc->ti_dev, "board self-diagnostics failed!\n");
1809		return (ENODEV);
1810	}
1811
1812	/* Halt the CPU. */
1813	TI_SETBIT(sc, TI_CPU_STATE, TI_CPUSTATE_HALT);
1814
1815	/* Figure out the hardware revision. */
1816	switch (CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_CHIP_REV_MASK) {
1817	case TI_REV_TIGON_I:
1818		sc->ti_hwrev = TI_HWREV_TIGON;
1819		break;
1820	case TI_REV_TIGON_II:
1821		sc->ti_hwrev = TI_HWREV_TIGON_II;
1822		break;
1823	default:
1824		device_printf(sc->ti_dev, "unsupported chip revision\n");
1825		return (ENODEV);
1826	}
1827
1828	/* Do special setup for Tigon 2. */
1829	if (sc->ti_hwrev == TI_HWREV_TIGON_II) {
1830		TI_SETBIT(sc, TI_CPU_CTL_B, TI_CPUSTATE_HALT);
1831		TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_SRAM_BANK_512K);
1832		TI_SETBIT(sc, TI_MISC_CONF, TI_MCR_SRAM_SYNCHRONOUS);
1833	}
1834
1835	/*
1836	 * We don't have firmware source for the Tigon 1, so Tigon 1 boards
1837	 * can't do header splitting.
1838	 */
1839#ifdef TI_JUMBO_HDRSPLIT
1840	if (sc->ti_hwrev != TI_HWREV_TIGON)
1841		sc->ti_hdrsplit = 1;
1842	else
1843		device_printf(sc->ti_dev,
1844		    "can't do header splitting on a Tigon I board\n");
1845#endif /* TI_JUMBO_HDRSPLIT */
1846
1847	/* Set up the PCI state register. */
1848	CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD|TI_PCI_WRITE_CMD);
1849	if (sc->ti_hwrev == TI_HWREV_TIGON_II) {
1850		TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_USE_MEM_RD_MULT);
1851	}
1852
1853	/* Clear the read/write max DMA parameters. */
1854	TI_CLRBIT(sc, TI_PCI_STATE, (TI_PCISTATE_WRITE_MAXDMA|
1855	    TI_PCISTATE_READ_MAXDMA));
1856
1857	/* Get cache line size. */
1858	cacheline = CSR_READ_4(sc, TI_PCI_BIST) & 0xFF;
1859
1860	/*
1861	 * If the system has set enabled the PCI memory write
1862	 * and invalidate command in the command register, set
1863	 * the write max parameter accordingly. This is necessary
1864	 * to use MWI with the Tigon 2.
1865	 */
1866	if (CSR_READ_4(sc, TI_PCI_CMDSTAT) & PCIM_CMD_MWIEN) {
1867		switch (cacheline) {
1868		case 1:
1869		case 4:
1870		case 8:
1871		case 16:
1872		case 32:
1873		case 64:
1874			break;
1875		default:
1876		/* Disable PCI memory write and invalidate. */
1877			if (bootverbose)
1878				device_printf(sc->ti_dev, "cache line size %d"
1879				    " not supported; disabling PCI MWI\n",
1880				    cacheline);
1881			CSR_WRITE_4(sc, TI_PCI_CMDSTAT, CSR_READ_4(sc,
1882			    TI_PCI_CMDSTAT) & ~PCIM_CMD_MWIEN);
1883			break;
1884		}
1885	}
1886
1887	TI_SETBIT(sc, TI_PCI_STATE, pci_writemax);
1888
1889	/* This sets the min dma param all the way up (0xff). */
1890	TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_MINDMA);
1891
1892	if (sc->ti_hdrsplit)
1893		hdrsplit = TI_OPMODE_JUMBO_HDRSPLIT;
1894	else
1895		hdrsplit = 0;
1896
1897	/* Configure DMA variables. */
1898#if BYTE_ORDER == BIG_ENDIAN
1899	CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_BD |
1900	    TI_OPMODE_BYTESWAP_DATA | TI_OPMODE_WORDSWAP_BD |
1901	    TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB |
1902	    TI_OPMODE_DONT_FRAG_JUMBO | hdrsplit);
1903#else /* BYTE_ORDER */
1904	CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_DATA|
1905	    TI_OPMODE_WORDSWAP_BD|TI_OPMODE_DONT_FRAG_JUMBO|
1906	    TI_OPMODE_WARN_ENB|TI_OPMODE_FATAL_ENB | hdrsplit);
1907#endif /* BYTE_ORDER */
1908
1909	/*
1910	 * Only allow 1 DMA channel to be active at a time.
1911	 * I don't think this is a good idea, but without it
1912	 * the firmware racks up lots of nicDmaReadRingFull
1913	 * errors.  This is not compatible with hardware checksums.
1914	 */
1915	if ((sc->ti_ifp->if_capenable & (IFCAP_TXCSUM | IFCAP_RXCSUM)) == 0)
1916		TI_SETBIT(sc, TI_GCR_OPMODE, TI_OPMODE_1_DMA_ACTIVE);
1917
1918	/* Recommended settings from Tigon manual. */
1919	CSR_WRITE_4(sc, TI_GCR_DMA_WRITECFG, TI_DMA_STATE_THRESH_8W);
1920	CSR_WRITE_4(sc, TI_GCR_DMA_READCFG, TI_DMA_STATE_THRESH_8W);
1921
1922	if (ti_64bitslot_war(sc)) {
1923		device_printf(sc->ti_dev, "bios thinks we're in a 64 bit slot, "
1924		    "but we aren't");
1925		return (EINVAL);
1926	}
1927
1928	return (0);
1929}
1930
1931/*
1932 * Initialize the general information block and firmware, and
1933 * start the CPU(s) running.
1934 */
1935static int
1936ti_gibinit(struct ti_softc *sc)
1937{
1938	struct ifnet *ifp;
1939	struct ti_rcb *rcb;
1940	uint32_t rdphys;
1941	int i;
1942
1943	TI_LOCK_ASSERT(sc);
1944
1945	ifp = sc->ti_ifp;
1946	rdphys = sc->ti_rdata_phys;
1947
1948	/* Disable interrupts for now. */
1949	CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
1950
1951	/*
1952	 * Tell the chip where to find the general information block.
1953	 * While this struct could go into >4GB memory, we allocate it in a
1954	 * single slab with the other descriptors, and those don't seem to
1955	 * support being located in a 64-bit region.
1956	 */
1957	CSR_WRITE_4(sc, TI_GCR_GENINFO_HI, 0);
1958	CSR_WRITE_4(sc, TI_GCR_GENINFO_LO, rdphys + TI_RD_OFF(ti_info));
1959
1960	/* Load the firmware into SRAM. */
1961	ti_loadfw(sc);
1962
1963	/* Set up the contents of the general info and ring control blocks. */
1964
1965	/* Set up the event ring and producer pointer. */
1966	rcb = &sc->ti_rdata->ti_info.ti_ev_rcb;
1967
1968	TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_event_ring);
1969	rcb->ti_flags = 0;
1970	TI_HOSTADDR(sc->ti_rdata->ti_info.ti_ev_prodidx_ptr) =
1971	    rdphys + TI_RD_OFF(ti_ev_prodidx_r);
1972	sc->ti_ev_prodidx.ti_idx = 0;
1973	CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, 0);
1974	sc->ti_ev_saved_considx = 0;
1975
1976	/* Set up the command ring and producer mailbox. */
1977	rcb = &sc->ti_rdata->ti_info.ti_cmd_rcb;
1978
1979	TI_HOSTADDR(rcb->ti_hostaddr) = TI_GCR_NIC_ADDR(TI_GCR_CMDRING);
1980	rcb->ti_flags = 0;
1981	rcb->ti_max_len = 0;
1982	for (i = 0; i < TI_CMD_RING_CNT; i++) {
1983		CSR_WRITE_4(sc, TI_GCR_CMDRING + (i * 4), 0);
1984	}
1985	CSR_WRITE_4(sc, TI_GCR_CMDCONS_IDX, 0);
1986	CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, 0);
1987	sc->ti_cmd_saved_prodidx = 0;
1988
1989	/*
1990	 * Assign the address of the stats refresh buffer.
1991	 * We re-use the current stats buffer for this to
1992	 * conserve memory.
1993	 */
1994	TI_HOSTADDR(sc->ti_rdata->ti_info.ti_refresh_stats_ptr) =
1995	    rdphys + TI_RD_OFF(ti_info.ti_stats);
1996
1997	/* Set up the standard receive ring. */
1998	rcb = &sc->ti_rdata->ti_info.ti_std_rx_rcb;
1999	TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_std_ring);
2000	rcb->ti_max_len = TI_FRAMELEN;
2001	rcb->ti_flags = 0;
2002	if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
2003		rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM |
2004		     TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
2005	if (sc->ti_ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2006		rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
2007
2008	/* Set up the jumbo receive ring. */
2009	rcb = &sc->ti_rdata->ti_info.ti_jumbo_rx_rcb;
2010	TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_jumbo_ring);
2011
2012#ifndef TI_SF_BUF_JUMBO
2013	rcb->ti_max_len = MJUM9BYTES - ETHER_ALIGN;
2014	rcb->ti_flags = 0;
2015#else
2016	rcb->ti_max_len = PAGE_SIZE;
2017	rcb->ti_flags = TI_RCB_FLAG_USE_EXT_RX_BD;
2018#endif
2019	if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
2020		rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM |
2021		     TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
2022	if (sc->ti_ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2023		rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
2024
2025	/*
2026	 * Set up the mini ring. Only activated on the
2027	 * Tigon 2 but the slot in the config block is
2028	 * still there on the Tigon 1.
2029	 */
2030	rcb = &sc->ti_rdata->ti_info.ti_mini_rx_rcb;
2031	TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_mini_ring);
2032	rcb->ti_max_len = MHLEN - ETHER_ALIGN;
2033	if (sc->ti_hwrev == TI_HWREV_TIGON)
2034		rcb->ti_flags = TI_RCB_FLAG_RING_DISABLED;
2035	else
2036		rcb->ti_flags = 0;
2037	if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
2038		rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM |
2039		     TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
2040	if (sc->ti_ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2041		rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
2042
2043	/*
2044	 * Set up the receive return ring.
2045	 */
2046	rcb = &sc->ti_rdata->ti_info.ti_return_rcb;
2047	TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_return_ring);
2048	rcb->ti_flags = 0;
2049	rcb->ti_max_len = TI_RETURN_RING_CNT;
2050	TI_HOSTADDR(sc->ti_rdata->ti_info.ti_return_prodidx_ptr) =
2051	    rdphys + TI_RD_OFF(ti_return_prodidx_r);
2052
2053	/*
2054	 * Set up the tx ring. Note: for the Tigon 2, we have the option
2055	 * of putting the transmit ring in the host's address space and
2056	 * letting the chip DMA it instead of leaving the ring in the NIC's
2057	 * memory and accessing it through the shared memory region. We
2058	 * do this for the Tigon 2, but it doesn't work on the Tigon 1,
2059	 * so we have to revert to the shared memory scheme if we detect
2060	 * a Tigon 1 chip.
2061	 */
2062	CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE);
2063	bzero((char *)sc->ti_rdata->ti_tx_ring,
2064	    TI_TX_RING_CNT * sizeof(struct ti_tx_desc));
2065	rcb = &sc->ti_rdata->ti_info.ti_tx_rcb;
2066	if (sc->ti_hwrev == TI_HWREV_TIGON)
2067		rcb->ti_flags = 0;
2068	else
2069		rcb->ti_flags = TI_RCB_FLAG_HOST_RING;
2070	if (sc->ti_ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2071		rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
2072	if (sc->ti_ifp->if_capenable & IFCAP_TXCSUM)
2073		rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM |
2074		     TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
2075	rcb->ti_max_len = TI_TX_RING_CNT;
2076	if (sc->ti_hwrev == TI_HWREV_TIGON)
2077		TI_HOSTADDR(rcb->ti_hostaddr) = TI_TX_RING_BASE;
2078	else
2079		TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_tx_ring);
2080	TI_HOSTADDR(sc->ti_rdata->ti_info.ti_tx_considx_ptr) =
2081	    rdphys + TI_RD_OFF(ti_tx_considx_r);
2082
2083	bus_dmamap_sync(sc->ti_rdata_dmat, sc->ti_rdata_dmamap,
2084	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2085
2086	/* Set up tuneables */
2087#if 0
2088	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2089		CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS,
2090		    (sc->ti_rx_coal_ticks / 10));
2091	else
2092#endif
2093		CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, sc->ti_rx_coal_ticks);
2094	CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, sc->ti_tx_coal_ticks);
2095	CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks);
2096	CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, sc->ti_rx_max_coal_bds);
2097	CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, sc->ti_tx_max_coal_bds);
2098	CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, sc->ti_tx_buf_ratio);
2099
2100	/* Turn interrupts on. */
2101	CSR_WRITE_4(sc, TI_GCR_MASK_INTRS, 0);
2102	CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0);
2103
2104	/* Start CPU. */
2105	TI_CLRBIT(sc, TI_CPU_STATE, (TI_CPUSTATE_HALT|TI_CPUSTATE_STEP));
2106
2107	return (0);
2108}
2109
2110static void
2111ti_rdata_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2112{
2113	struct ti_softc *sc;
2114
2115	sc = arg;
2116	if (error || nseg != 1)
2117		return;
2118
2119	/*
2120	 * All of the Tigon data structures need to live at <4GB.  This
2121	 * cast is fine since busdma was told about this constraint.
2122	 */
2123	sc->ti_rdata_phys = segs[0].ds_addr;
2124	return;
2125}
2126
2127/*
2128 * Probe for a Tigon chip. Check the PCI vendor and device IDs
2129 * against our list and return its name if we find a match.
2130 */
2131static int
2132ti_probe(device_t dev)
2133{
2134	const struct ti_type *t;
2135
2136	t = ti_devs;
2137
2138	while (t->ti_name != NULL) {
2139		if ((pci_get_vendor(dev) == t->ti_vid) &&
2140		    (pci_get_device(dev) == t->ti_did)) {
2141			device_set_desc(dev, t->ti_name);
2142			return (BUS_PROBE_DEFAULT);
2143		}
2144		t++;
2145	}
2146
2147	return (ENXIO);
2148}
2149
2150static int
2151ti_attach(device_t dev)
2152{
2153	struct ifnet *ifp;
2154	struct ti_softc *sc;
2155	int error = 0, rid;
2156	u_char eaddr[6];
2157
2158	sc = device_get_softc(dev);
2159	sc->ti_dev = dev;
2160
2161	mtx_init(&sc->ti_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
2162	    MTX_DEF);
2163	callout_init_mtx(&sc->ti_watchdog, &sc->ti_mtx, 0);
2164	ifmedia_init(&sc->ifmedia, IFM_IMASK, ti_ifmedia_upd, ti_ifmedia_sts);
2165	ifp = sc->ti_ifp = if_alloc(IFT_ETHER);
2166	if (ifp == NULL) {
2167		device_printf(dev, "can not if_alloc()\n");
2168		error = ENOSPC;
2169		goto fail;
2170	}
2171	sc->ti_ifp->if_hwassist = TI_CSUM_FEATURES;
2172	sc->ti_ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM;
2173	sc->ti_ifp->if_capenable = sc->ti_ifp->if_capabilities;
2174
2175	/*
2176	 * Map control/status registers.
2177	 */
2178	pci_enable_busmaster(dev);
2179
2180	rid = PCIR_BAR(0);
2181	sc->ti_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2182	    RF_ACTIVE);
2183
2184	if (sc->ti_res == NULL) {
2185		device_printf(dev, "couldn't map memory\n");
2186		error = ENXIO;
2187		goto fail;
2188	}
2189
2190	sc->ti_btag = rman_get_bustag(sc->ti_res);
2191	sc->ti_bhandle = rman_get_bushandle(sc->ti_res);
2192
2193	/* Allocate interrupt */
2194	rid = 0;
2195
2196	sc->ti_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2197	    RF_SHAREABLE | RF_ACTIVE);
2198
2199	if (sc->ti_irq == NULL) {
2200		device_printf(dev, "couldn't map interrupt\n");
2201		error = ENXIO;
2202		goto fail;
2203	}
2204
2205	if (ti_chipinit(sc)) {
2206		device_printf(dev, "chip initialization failed\n");
2207		error = ENXIO;
2208		goto fail;
2209	}
2210
2211	/* Zero out the NIC's on-board SRAM. */
2212	ti_mem_zero(sc, 0x2000, 0x100000 - 0x2000);
2213
2214	/* Init again -- zeroing memory may have clobbered some registers. */
2215	if (ti_chipinit(sc)) {
2216		device_printf(dev, "chip initialization failed\n");
2217		error = ENXIO;
2218		goto fail;
2219	}
2220
2221	/*
2222	 * Get station address from the EEPROM. Note: the manual states
2223	 * that the MAC address is at offset 0x8c, however the data is
2224	 * stored as two longwords (since that's how it's loaded into
2225	 * the NIC). This means the MAC address is actually preceded
2226	 * by two zero bytes. We need to skip over those.
2227	 */
2228	if (ti_read_eeprom(sc, eaddr,
2229				TI_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2230		device_printf(dev, "failed to read station address\n");
2231		error = ENXIO;
2232		goto fail;
2233	}
2234
2235	/* Allocate working area for memory dump. */
2236	sc->ti_membuf = malloc(sizeof(uint8_t) * TI_WINLEN, M_DEVBUF, M_NOWAIT);
2237	sc->ti_membuf2 = malloc(sizeof(uint8_t) * TI_WINLEN, M_DEVBUF,
2238	    M_NOWAIT);
2239	if (sc->ti_membuf == NULL || sc->ti_membuf2 == NULL) {
2240		device_printf(dev, "cannot allocate memory buffer\n");
2241		error = ENOMEM;
2242		goto fail;
2243	}
2244
2245	/* Allocate the general information block and ring buffers. */
2246	if (bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
2247				1, 0,			/* algnmnt, boundary */
2248				BUS_SPACE_MAXADDR,	/* lowaddr */
2249				BUS_SPACE_MAXADDR,	/* highaddr */
2250				NULL, NULL,		/* filter, filterarg */
2251				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
2252				0,			/* nsegments */
2253				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
2254				0,			/* flags */
2255				NULL, NULL,		/* lockfunc, lockarg */
2256				&sc->ti_parent_dmat) != 0) {
2257		device_printf(dev, "Failed to allocate parent dmat\n");
2258		error = ENOMEM;
2259		goto fail;
2260	}
2261
2262	if (bus_dma_tag_create(sc->ti_parent_dmat,	/* parent */
2263				PAGE_SIZE, 0,		/* algnmnt, boundary */
2264				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2265				BUS_SPACE_MAXADDR,	/* highaddr */
2266				NULL, NULL,		/* filter, filterarg */
2267				sizeof(struct ti_ring_data),	/* maxsize */
2268				1,			/* nsegments */
2269				sizeof(struct ti_ring_data),	/* maxsegsize */
2270				0,			/* flags */
2271				NULL, NULL,		/* lockfunc, lockarg */
2272				&sc->ti_rdata_dmat) != 0) {
2273		device_printf(dev, "Failed to allocate rdata dmat\n");
2274		error = ENOMEM;
2275		goto fail;
2276	}
2277
2278	if (bus_dmamem_alloc(sc->ti_rdata_dmat, (void**)&sc->ti_rdata,
2279			     BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2280			     &sc->ti_rdata_dmamap) != 0) {
2281		device_printf(dev, "Failed to allocate rdata memory\n");
2282		error = ENOMEM;
2283		goto fail;
2284	}
2285
2286	if (bus_dmamap_load(sc->ti_rdata_dmat, sc->ti_rdata_dmamap,
2287			    sc->ti_rdata, sizeof(struct ti_ring_data),
2288			    ti_rdata_cb, sc, BUS_DMA_NOWAIT) != 0) {
2289		device_printf(dev, "Failed to load rdata segments\n");
2290		error = ENOMEM;
2291		goto fail;
2292	}
2293
2294	bzero(sc->ti_rdata, sizeof(struct ti_ring_data));
2295
2296	/* Try to allocate memory for jumbo buffers. */
2297	if (ti_alloc_jumbo_mem(sc)) {
2298		device_printf(dev, "jumbo buffer allocation failed\n");
2299		error = ENXIO;
2300		goto fail;
2301	}
2302
2303	if (bus_dma_tag_create(sc->ti_parent_dmat,	/* parent */
2304				1, 0,			/* algnmnt, boundary */
2305				BUS_SPACE_MAXADDR,	/* lowaddr */
2306				BUS_SPACE_MAXADDR,	/* highaddr */
2307				NULL, NULL,		/* filter, filterarg */
2308				MCLBYTES * TI_MAXTXSEGS,/* maxsize */
2309				TI_MAXTXSEGS,		/* nsegments */
2310				MCLBYTES,		/* maxsegsize */
2311				0,			/* flags */
2312				NULL, NULL,		/* lockfunc, lockarg */
2313				&sc->ti_mbuftx_dmat) != 0) {
2314		device_printf(dev, "Failed to allocate rdata dmat\n");
2315		error = ENOMEM;
2316		goto fail;
2317	}
2318
2319	if (bus_dma_tag_create(sc->ti_parent_dmat,	/* parent */
2320				1, 0,			/* algnmnt, boundary */
2321				BUS_SPACE_MAXADDR,	/* lowaddr */
2322				BUS_SPACE_MAXADDR,	/* highaddr */
2323				NULL, NULL,		/* filter, filterarg */
2324				MCLBYTES,		/* maxsize */
2325				1,			/* nsegments */
2326				MCLBYTES,		/* maxsegsize */
2327				0,			/* flags */
2328				NULL, NULL,		/* lockfunc, lockarg */
2329				&sc->ti_mbufrx_dmat) != 0) {
2330		device_printf(dev, "Failed to allocate rdata dmat\n");
2331		error = ENOMEM;
2332		goto fail;
2333	}
2334
2335	if (ti_alloc_dmamaps(sc)) {
2336		error = ENXIO;
2337		goto fail;
2338	}
2339
2340	/*
2341	 * We really need a better way to tell a 1000baseTX card
2342	 * from a 1000baseSX one, since in theory there could be
2343	 * OEMed 1000baseTX cards from lame vendors who aren't
2344	 * clever enough to change the PCI ID. For the moment
2345	 * though, the AceNIC is the only copper card available.
2346	 */
2347	if (pci_get_vendor(dev) == ALT_VENDORID &&
2348	    pci_get_device(dev) == ALT_DEVICEID_ACENIC_COPPER)
2349		sc->ti_copper = 1;
2350	/* Ok, it's not the only copper card available. */
2351	if (pci_get_vendor(dev) == NG_VENDORID &&
2352	    pci_get_device(dev) == NG_DEVICEID_GA620T)
2353		sc->ti_copper = 1;
2354
2355	/* Set default tuneable values. */
2356	sc->ti_stat_ticks = 2 * TI_TICKS_PER_SEC;
2357#if 0
2358	sc->ti_rx_coal_ticks = TI_TICKS_PER_SEC / 5000;
2359#endif
2360	sc->ti_rx_coal_ticks = 170;
2361	sc->ti_tx_coal_ticks = TI_TICKS_PER_SEC / 500;
2362	sc->ti_rx_max_coal_bds = 64;
2363#if 0
2364	sc->ti_tx_max_coal_bds = 128;
2365#endif
2366	sc->ti_tx_max_coal_bds = 32;
2367	sc->ti_tx_buf_ratio = 21;
2368
2369	/* Set up ifnet structure */
2370	ifp->if_softc = sc;
2371	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2372	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2373	ifp->if_ioctl = ti_ioctl;
2374	ifp->if_start = ti_start;
2375	ifp->if_init = ti_init;
2376	ifp->if_baudrate = IF_Gbps(1UL);
2377	ifp->if_snd.ifq_drv_maxlen = TI_TX_RING_CNT - 1;
2378	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2379	IFQ_SET_READY(&ifp->if_snd);
2380
2381	/* Set up ifmedia support. */
2382	if (sc->ti_copper) {
2383		/*
2384		 * Copper cards allow manual 10/100 mode selection,
2385		 * but not manual 1000baseTX mode selection. Why?
2386		 * Becuase currently there's no way to specify the
2387		 * master/slave setting through the firmware interface,
2388		 * so Alteon decided to just bag it and handle it
2389		 * via autonegotiation.
2390		 */
2391		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
2392		ifmedia_add(&sc->ifmedia,
2393		    IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
2394		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
2395		ifmedia_add(&sc->ifmedia,
2396		    IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
2397		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_T, 0, NULL);
2398		ifmedia_add(&sc->ifmedia,
2399		    IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);
2400	} else {
2401		/* Fiber cards don't support 10/100 modes. */
2402		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2403		ifmedia_add(&sc->ifmedia,
2404		    IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2405	}
2406	ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2407	ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO);
2408
2409	/*
2410	 * We're assuming here that card initialization is a sequential
2411	 * thing.  If it isn't, multiple cards probing at the same time
2412	 * could stomp on the list of softcs here.
2413	 */
2414
2415	/* Register the device */
2416	sc->dev = make_dev(&ti_cdevsw, device_get_unit(dev), UID_ROOT,
2417	    GID_OPERATOR, 0600, "ti%d", device_get_unit(dev));
2418	sc->dev->si_drv1 = sc;
2419
2420	/*
2421	 * Call MI attach routine.
2422	 */
2423	ether_ifattach(ifp, eaddr);
2424
2425	/* VLAN capability setup. */
2426	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM |
2427	    IFCAP_VLAN_HWTAGGING;
2428	ifp->if_capenable = ifp->if_capabilities;
2429	/* Tell the upper layer we support VLAN over-sized frames. */
2430	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2431
2432	/* Driver supports link state tracking. */
2433	ifp->if_capabilities |= IFCAP_LINKSTATE;
2434	ifp->if_capenable |= IFCAP_LINKSTATE;
2435
2436	/* Hook interrupt last to avoid having to lock softc */
2437	error = bus_setup_intr(dev, sc->ti_irq, INTR_TYPE_NET|INTR_MPSAFE,
2438	   NULL, ti_intr, sc, &sc->ti_intrhand);
2439
2440	if (error) {
2441		device_printf(dev, "couldn't set up irq\n");
2442		goto fail;
2443	}
2444
2445fail:
2446	if (error)
2447		ti_detach(dev);
2448
2449	return (error);
2450}
2451
2452/*
2453 * Shutdown hardware and free up resources. This can be called any
2454 * time after the mutex has been initialized. It is called in both
2455 * the error case in attach and the normal detach case so it needs
2456 * to be careful about only freeing resources that have actually been
2457 * allocated.
2458 */
2459static int
2460ti_detach(device_t dev)
2461{
2462	struct ti_softc *sc;
2463	struct ifnet *ifp;
2464
2465	sc = device_get_softc(dev);
2466	if (sc->dev)
2467		destroy_dev(sc->dev);
2468	KASSERT(mtx_initialized(&sc->ti_mtx), ("ti mutex not initialized"));
2469	ifp = sc->ti_ifp;
2470	if (device_is_attached(dev)) {
2471		ether_ifdetach(ifp);
2472		TI_LOCK(sc);
2473		ti_stop(sc);
2474		TI_UNLOCK(sc);
2475	}
2476
2477	/* These should only be active if attach succeeded */
2478	callout_drain(&sc->ti_watchdog);
2479	bus_generic_detach(dev);
2480	ti_free_dmamaps(sc);
2481	ifmedia_removeall(&sc->ifmedia);
2482
2483	if (sc->ti_jumbo_dmat)
2484		bus_dma_tag_destroy(sc->ti_jumbo_dmat);
2485	if (sc->ti_mbuftx_dmat)
2486		bus_dma_tag_destroy(sc->ti_mbuftx_dmat);
2487	if (sc->ti_mbufrx_dmat)
2488		bus_dma_tag_destroy(sc->ti_mbufrx_dmat);
2489	if (sc->ti_rdata && sc->ti_rdata_dmamap)
2490		bus_dmamap_unload(sc->ti_rdata_dmat, sc->ti_rdata_dmamap);
2491	if (sc->ti_rdata)
2492		bus_dmamem_free(sc->ti_rdata_dmat, sc->ti_rdata,
2493				sc->ti_rdata_dmamap);
2494	if (sc->ti_rdata_dmat)
2495		bus_dma_tag_destroy(sc->ti_rdata_dmat);
2496	if (sc->ti_parent_dmat)
2497		bus_dma_tag_destroy(sc->ti_parent_dmat);
2498	if (sc->ti_intrhand)
2499		bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand);
2500	if (sc->ti_irq)
2501		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq);
2502	if (sc->ti_res) {
2503		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
2504		    sc->ti_res);
2505	}
2506	if (ifp)
2507		if_free(ifp);
2508	if (sc->ti_membuf)
2509		free(sc->ti_membuf, M_DEVBUF);
2510	if (sc->ti_membuf2)
2511		free(sc->ti_membuf2, M_DEVBUF);
2512
2513	mtx_destroy(&sc->ti_mtx);
2514
2515	return (0);
2516}
2517
2518#ifdef TI_JUMBO_HDRSPLIT
2519/*
2520 * If hdr_len is 0, that means that header splitting wasn't done on
2521 * this packet for some reason.  The two most likely reasons are that
2522 * the protocol isn't a supported protocol for splitting, or this
2523 * packet had a fragment offset that wasn't 0.
2524 *
2525 * The header length, if it is non-zero, will always be the length of
2526 * the headers on the packet, but that length could be longer than the
2527 * first mbuf.  So we take the minimum of the two as the actual
2528 * length.
2529 */
2530static __inline void
2531ti_hdr_split(struct mbuf *top, int hdr_len, int pkt_len, int idx)
2532{
2533	int i = 0;
2534	int lengths[4] = {0, 0, 0, 0};
2535	struct mbuf *m, *mp;
2536
2537	if (hdr_len != 0)
2538		top->m_len = min(hdr_len, top->m_len);
2539	pkt_len -= top->m_len;
2540	lengths[i++] = top->m_len;
2541
2542	mp = top;
2543	for (m = top->m_next; m && pkt_len; m = m->m_next) {
2544		m->m_len = m->m_ext.ext_size = min(m->m_len, pkt_len);
2545		pkt_len -= m->m_len;
2546		lengths[i++] = m->m_len;
2547		mp = m;
2548	}
2549
2550#if 0
2551	if (hdr_len != 0)
2552		printf("got split packet: ");
2553	else
2554		printf("got non-split packet: ");
2555
2556	printf("%d,%d,%d,%d = %d\n", lengths[0],
2557	    lengths[1], lengths[2], lengths[3],
2558	    lengths[0] + lengths[1] + lengths[2] +
2559	    lengths[3]);
2560#endif
2561
2562	if (pkt_len)
2563		panic("header splitting didn't");
2564
2565	if (m) {
2566		m_freem(m);
2567		mp->m_next = NULL;
2568
2569	}
2570	if (mp->m_next != NULL)
2571		panic("ti_hdr_split: last mbuf in chain should be null");
2572}
2573#endif /* TI_JUMBO_HDRSPLIT */
2574
2575static void
2576ti_discard_std(struct ti_softc *sc, int i)
2577{
2578
2579	struct ti_rx_desc *r;
2580
2581	r = &sc->ti_rdata->ti_rx_std_ring[i];
2582	r->ti_len = MCLBYTES - ETHER_ALIGN;
2583	r->ti_type = TI_BDTYPE_RECV_BD;
2584	r->ti_flags = 0;
2585	r->ti_vlan_tag = 0;
2586	r->ti_tcp_udp_cksum = 0;
2587	if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
2588		r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
2589	r->ti_idx = i;
2590}
2591
2592static void
2593ti_discard_mini(struct ti_softc *sc, int i)
2594{
2595
2596	struct ti_rx_desc *r;
2597
2598	r = &sc->ti_rdata->ti_rx_mini_ring[i];
2599	r->ti_len = MHLEN - ETHER_ALIGN;
2600	r->ti_type = TI_BDTYPE_RECV_BD;
2601	r->ti_flags = TI_BDFLAG_MINI_RING;
2602	r->ti_vlan_tag = 0;
2603	r->ti_tcp_udp_cksum = 0;
2604	if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
2605		r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
2606	r->ti_idx = i;
2607}
2608
2609#ifndef TI_SF_BUF_JUMBO
2610static void
2611ti_discard_jumbo(struct ti_softc *sc, int i)
2612{
2613
2614	struct ti_rx_desc *r;
2615
2616	r = &sc->ti_rdata->ti_rx_mini_ring[i];
2617	r->ti_len = MJUM9BYTES - ETHER_ALIGN;
2618	r->ti_type = TI_BDTYPE_RECV_JUMBO_BD;
2619	r->ti_flags = TI_BDFLAG_JUMBO_RING;
2620	r->ti_vlan_tag = 0;
2621	r->ti_tcp_udp_cksum = 0;
2622	if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
2623		r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
2624	r->ti_idx = i;
2625}
2626#endif
2627
2628/*
2629 * Frame reception handling. This is called if there's a frame
2630 * on the receive return list.
2631 *
2632 * Note: we have to be able to handle three possibilities here:
2633 * 1) the frame is from the mini receive ring (can only happen)
2634 *    on Tigon 2 boards)
2635 * 2) the frame is from the jumbo recieve ring
2636 * 3) the frame is from the standard receive ring
2637 */
2638
2639static void
2640ti_rxeof(struct ti_softc *sc)
2641{
2642	struct ifnet *ifp;
2643#ifdef TI_SF_BUF_JUMBO
2644	bus_dmamap_t map;
2645#endif
2646	struct ti_cmd_desc cmd;
2647	int jumbocnt, minicnt, stdcnt, ti_len;
2648
2649	TI_LOCK_ASSERT(sc);
2650
2651	ifp = sc->ti_ifp;
2652
2653	jumbocnt = minicnt = stdcnt = 0;
2654	while (sc->ti_rx_saved_considx != sc->ti_return_prodidx.ti_idx) {
2655		struct ti_rx_desc *cur_rx;
2656		uint32_t rxidx;
2657		struct mbuf *m = NULL;
2658		uint16_t vlan_tag = 0;
2659		int have_tag = 0;
2660
2661		cur_rx =
2662		    &sc->ti_rdata->ti_rx_return_ring[sc->ti_rx_saved_considx];
2663		rxidx = cur_rx->ti_idx;
2664		ti_len = cur_rx->ti_len;
2665		TI_INC(sc->ti_rx_saved_considx, TI_RETURN_RING_CNT);
2666
2667		if (cur_rx->ti_flags & TI_BDFLAG_VLAN_TAG) {
2668			have_tag = 1;
2669			vlan_tag = cur_rx->ti_vlan_tag;
2670		}
2671
2672		if (cur_rx->ti_flags & TI_BDFLAG_JUMBO_RING) {
2673			jumbocnt++;
2674			TI_INC(sc->ti_jumbo, TI_JUMBO_RX_RING_CNT);
2675			m = sc->ti_cdata.ti_rx_jumbo_chain[rxidx];
2676#ifndef TI_SF_BUF_JUMBO
2677			if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
2678				ifp->if_ierrors++;
2679				ti_discard_jumbo(sc, rxidx);
2680				continue;
2681			}
2682			if (ti_newbuf_jumbo(sc, rxidx, NULL) != 0) {
2683				ifp->if_iqdrops++;
2684				ti_discard_jumbo(sc, rxidx);
2685				continue;
2686			}
2687			m->m_len = ti_len;
2688#else /* !TI_SF_BUF_JUMBO */
2689			sc->ti_cdata.ti_rx_jumbo_chain[rxidx] = NULL;
2690			map = sc->ti_cdata.ti_rx_jumbo_maps[rxidx];
2691			bus_dmamap_sync(sc->ti_jumbo_dmat, map,
2692			    BUS_DMASYNC_POSTREAD);
2693			bus_dmamap_unload(sc->ti_jumbo_dmat, map);
2694			if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
2695				ifp->if_ierrors++;
2696				ti_newbuf_jumbo(sc, sc->ti_jumbo, m);
2697				continue;
2698			}
2699			if (ti_newbuf_jumbo(sc, sc->ti_jumbo, NULL) == ENOBUFS) {
2700				ifp->if_iqdrops++;
2701				ti_newbuf_jumbo(sc, sc->ti_jumbo, m);
2702				continue;
2703			}
2704#ifdef TI_JUMBO_HDRSPLIT
2705			if (sc->ti_hdrsplit)
2706				ti_hdr_split(m, TI_HOSTADDR(cur_rx->ti_addr),
2707					     ti_len, rxidx);
2708			else
2709#endif /* TI_JUMBO_HDRSPLIT */
2710			m_adj(m, ti_len - m->m_pkthdr.len);
2711#endif /* TI_SF_BUF_JUMBO */
2712		} else if (cur_rx->ti_flags & TI_BDFLAG_MINI_RING) {
2713			minicnt++;
2714			TI_INC(sc->ti_mini, TI_MINI_RX_RING_CNT);
2715			m = sc->ti_cdata.ti_rx_mini_chain[rxidx];
2716			if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
2717				ifp->if_ierrors++;
2718				ti_discard_mini(sc, rxidx);
2719				continue;
2720			}
2721			if (ti_newbuf_mini(sc, rxidx) != 0) {
2722				ifp->if_iqdrops++;
2723				ti_discard_mini(sc, rxidx);
2724				continue;
2725			}
2726			m->m_len = ti_len;
2727		} else {
2728			stdcnt++;
2729			TI_INC(sc->ti_std, TI_STD_RX_RING_CNT);
2730			m = sc->ti_cdata.ti_rx_std_chain[rxidx];
2731			if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
2732				ifp->if_ierrors++;
2733				ti_discard_std(sc, rxidx);
2734				continue;
2735			}
2736			if (ti_newbuf_std(sc, rxidx) != 0) {
2737				ifp->if_iqdrops++;
2738				ti_discard_std(sc, rxidx);
2739				continue;
2740			}
2741			m->m_len = ti_len;
2742		}
2743
2744		m->m_pkthdr.len = ti_len;
2745		ifp->if_ipackets++;
2746		m->m_pkthdr.rcvif = ifp;
2747
2748		if (ifp->if_capenable & IFCAP_RXCSUM) {
2749			if (cur_rx->ti_flags & TI_BDFLAG_IP_CKSUM) {
2750				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2751				if ((cur_rx->ti_ip_cksum ^ 0xffff) == 0)
2752					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2753			}
2754			if (cur_rx->ti_flags & TI_BDFLAG_TCP_UDP_CKSUM) {
2755				m->m_pkthdr.csum_data =
2756				    cur_rx->ti_tcp_udp_cksum;
2757				m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2758			}
2759		}
2760
2761		/*
2762		 * If we received a packet with a vlan tag,
2763		 * tag it before passing the packet upward.
2764		 */
2765		if (have_tag) {
2766			m->m_pkthdr.ether_vtag = vlan_tag;
2767			m->m_flags |= M_VLANTAG;
2768		}
2769		TI_UNLOCK(sc);
2770		(*ifp->if_input)(ifp, m);
2771		TI_LOCK(sc);
2772	}
2773
2774	/* Only necessary on the Tigon 1. */
2775	if (sc->ti_hwrev == TI_HWREV_TIGON)
2776		CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX,
2777		    sc->ti_rx_saved_considx);
2778
2779	if (stdcnt > 0)
2780		TI_UPDATE_STDPROD(sc, sc->ti_std);
2781	if (minicnt > 0)
2782		TI_UPDATE_MINIPROD(sc, sc->ti_mini);
2783	if (jumbocnt > 0)
2784		TI_UPDATE_JUMBOPROD(sc, sc->ti_jumbo);
2785}
2786
2787static void
2788ti_txeof(struct ti_softc *sc)
2789{
2790	struct ti_txdesc *txd;
2791	struct ti_tx_desc txdesc;
2792	struct ti_tx_desc *cur_tx = NULL;
2793	struct ifnet *ifp;
2794	int idx;
2795
2796	ifp = sc->ti_ifp;
2797
2798	txd = STAILQ_FIRST(&sc->ti_cdata.ti_txbusyq);
2799	if (txd == NULL)
2800		return;
2801	/*
2802	 * Go through our tx ring and free mbufs for those
2803	 * frames that have been sent.
2804	 */
2805	for (idx = sc->ti_tx_saved_considx; idx != sc->ti_tx_considx.ti_idx;
2806	    TI_INC(idx, TI_TX_RING_CNT)) {
2807		if (sc->ti_hwrev == TI_HWREV_TIGON) {
2808			ti_mem_read(sc, TI_TX_RING_BASE + idx * sizeof(txdesc),
2809			    sizeof(txdesc), &txdesc);
2810			cur_tx = &txdesc;
2811		} else
2812			cur_tx = &sc->ti_rdata->ti_tx_ring[idx];
2813		sc->ti_txcnt--;
2814		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2815		if ((cur_tx->ti_flags & TI_BDFLAG_END) == 0)
2816			continue;
2817		bus_dmamap_sync(sc->ti_mbuftx_dmat, txd->tx_dmamap,
2818		    BUS_DMASYNC_POSTWRITE);
2819		bus_dmamap_unload(sc->ti_mbuftx_dmat, txd->tx_dmamap);
2820
2821		ifp->if_opackets++;
2822		m_freem(txd->tx_m);
2823		txd->tx_m = NULL;
2824		STAILQ_REMOVE_HEAD(&sc->ti_cdata.ti_txbusyq, tx_q);
2825		STAILQ_INSERT_TAIL(&sc->ti_cdata.ti_txfreeq, txd, tx_q);
2826		txd = STAILQ_FIRST(&sc->ti_cdata.ti_txbusyq);
2827	}
2828	sc->ti_tx_saved_considx = idx;
2829
2830	sc->ti_timer = sc->ti_txcnt > 0 ? 5 : 0;
2831}
2832
2833static void
2834ti_intr(void *xsc)
2835{
2836	struct ti_softc *sc;
2837	struct ifnet *ifp;
2838
2839	sc = xsc;
2840	TI_LOCK(sc);
2841	ifp = sc->ti_ifp;
2842
2843	/* Make sure this is really our interrupt. */
2844	if (!(CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_INTSTATE)) {
2845		TI_UNLOCK(sc);
2846		return;
2847	}
2848
2849	/* Ack interrupt and stop others from occuring. */
2850	CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
2851
2852	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2853		/* Check RX return ring producer/consumer */
2854		ti_rxeof(sc);
2855
2856		/* Check TX ring producer/consumer */
2857		ti_txeof(sc);
2858	}
2859
2860	ti_handle_events(sc);
2861
2862	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2863		/* Re-enable interrupts. */
2864		CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0);
2865		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2866			ti_start_locked(ifp);
2867	}
2868
2869	TI_UNLOCK(sc);
2870}
2871
2872static void
2873ti_stats_update(struct ti_softc *sc)
2874{
2875	struct ifnet *ifp;
2876
2877	ifp = sc->ti_ifp;
2878
2879	bus_dmamap_sync(sc->ti_rdata_dmat, sc->ti_rdata_dmamap,
2880	    BUS_DMASYNC_POSTREAD);
2881
2882	ifp->if_collisions +=
2883	   (sc->ti_rdata->ti_info.ti_stats.dot3StatsSingleCollisionFrames +
2884	   sc->ti_rdata->ti_info.ti_stats.dot3StatsMultipleCollisionFrames +
2885	   sc->ti_rdata->ti_info.ti_stats.dot3StatsExcessiveCollisions +
2886	   sc->ti_rdata->ti_info.ti_stats.dot3StatsLateCollisions) -
2887	   ifp->if_collisions;
2888
2889	bus_dmamap_sync(sc->ti_rdata_dmat, sc->ti_rdata_dmamap,
2890	    BUS_DMASYNC_PREREAD);
2891}
2892
2893/*
2894 * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2895 * pointers to descriptors.
2896 */
2897static int
2898ti_encap(struct ti_softc *sc, struct mbuf **m_head)
2899{
2900	struct ti_txdesc *txd;
2901	struct ti_tx_desc *f;
2902	struct ti_tx_desc txdesc;
2903	struct mbuf *m;
2904	bus_dma_segment_t txsegs[TI_MAXTXSEGS];
2905	uint16_t csum_flags;
2906	int error, frag, i, nseg;
2907
2908	if ((txd = STAILQ_FIRST(&sc->ti_cdata.ti_txfreeq)) == NULL)
2909		return (ENOBUFS);
2910
2911	error = bus_dmamap_load_mbuf_sg(sc->ti_mbuftx_dmat, txd->tx_dmamap,
2912	    *m_head, txsegs, &nseg, 0);
2913	if (error == EFBIG) {
2914		m = m_defrag(*m_head, M_DONTWAIT);
2915		if (m == NULL) {
2916			m_freem(*m_head);
2917			*m_head = NULL;
2918			return (ENOMEM);
2919		}
2920		*m_head = m;
2921		error = bus_dmamap_load_mbuf_sg(sc->ti_mbuftx_dmat,
2922		    txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
2923		if (error) {
2924			m_freem(*m_head);
2925			*m_head = NULL;
2926			return (error);
2927		}
2928	} else if (error != 0)
2929		return (error);
2930	if (nseg == 0) {
2931		m_freem(*m_head);
2932		*m_head = NULL;
2933		return (EIO);
2934	}
2935
2936	if (sc->ti_txcnt + nseg >= TI_TX_RING_CNT) {
2937		bus_dmamap_unload(sc->ti_mbuftx_dmat, txd->tx_dmamap);
2938		return (ENOBUFS);
2939	}
2940
2941	m = *m_head;
2942	csum_flags = 0;
2943	if (m->m_pkthdr.csum_flags) {
2944		if (m->m_pkthdr.csum_flags & CSUM_IP)
2945			csum_flags |= TI_BDFLAG_IP_CKSUM;
2946		if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2947			csum_flags |= TI_BDFLAG_TCP_UDP_CKSUM;
2948		if (m->m_flags & M_LASTFRAG)
2949			csum_flags |= TI_BDFLAG_IP_FRAG_END;
2950		else if (m->m_flags & M_FRAG)
2951			csum_flags |= TI_BDFLAG_IP_FRAG;
2952	}
2953
2954	bus_dmamap_sync(sc->ti_mbuftx_dmat, txd->tx_dmamap,
2955	    BUS_DMASYNC_PREWRITE);
2956	bus_dmamap_sync(sc->ti_rdata_dmat, sc->ti_rdata_dmamap,
2957	    BUS_DMASYNC_PREWRITE);
2958
2959	frag = sc->ti_tx_saved_prodidx;
2960	for (i = 0; i < nseg; i++) {
2961		if (sc->ti_hwrev == TI_HWREV_TIGON) {
2962			bzero(&txdesc, sizeof(txdesc));
2963			f = &txdesc;
2964		} else
2965			f = &sc->ti_rdata->ti_tx_ring[frag];
2966		ti_hostaddr64(&f->ti_addr, txsegs[i].ds_addr);
2967		f->ti_len = txsegs[i].ds_len;
2968		f->ti_flags = csum_flags;
2969		if (m->m_flags & M_VLANTAG) {
2970			f->ti_flags |= TI_BDFLAG_VLAN_TAG;
2971			f->ti_vlan_tag = m->m_pkthdr.ether_vtag;
2972		} else {
2973			f->ti_vlan_tag = 0;
2974		}
2975
2976		if (sc->ti_hwrev == TI_HWREV_TIGON)
2977			ti_mem_write(sc, TI_TX_RING_BASE + frag *
2978			    sizeof(txdesc), sizeof(txdesc), &txdesc);
2979		TI_INC(frag, TI_TX_RING_CNT);
2980	}
2981
2982	sc->ti_tx_saved_prodidx = frag;
2983	/* set TI_BDFLAG_END on the last descriptor */
2984	frag = (frag + TI_TX_RING_CNT - 1) % TI_TX_RING_CNT;
2985	if (sc->ti_hwrev == TI_HWREV_TIGON) {
2986		txdesc.ti_flags |= TI_BDFLAG_END;
2987		ti_mem_write(sc, TI_TX_RING_BASE + frag * sizeof(txdesc),
2988		    sizeof(txdesc), &txdesc);
2989	} else
2990		sc->ti_rdata->ti_tx_ring[frag].ti_flags |= TI_BDFLAG_END;
2991
2992	STAILQ_REMOVE_HEAD(&sc->ti_cdata.ti_txfreeq, tx_q);
2993	STAILQ_INSERT_TAIL(&sc->ti_cdata.ti_txbusyq, txd, tx_q);
2994	txd->tx_m = m;
2995	sc->ti_txcnt += nseg;
2996
2997	return (0);
2998}
2999
3000static void
3001ti_start(struct ifnet *ifp)
3002{
3003	struct ti_softc *sc;
3004
3005	sc = ifp->if_softc;
3006	TI_LOCK(sc);
3007	ti_start_locked(ifp);
3008	TI_UNLOCK(sc);
3009}
3010
3011/*
3012 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3013 * to the mbuf data regions directly in the transmit descriptors.
3014 */
3015static void
3016ti_start_locked(struct ifnet *ifp)
3017{
3018	struct ti_softc *sc;
3019	struct mbuf *m_head = NULL;
3020	int enq = 0;
3021
3022	sc = ifp->if_softc;
3023
3024	for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
3025	    sc->ti_txcnt < (TI_TX_RING_CNT - 16);) {
3026		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3027		if (m_head == NULL)
3028			break;
3029
3030		/*
3031		 * XXX
3032		 * safety overkill.  If this is a fragmented packet chain
3033		 * with delayed TCP/UDP checksums, then only encapsulate
3034		 * it if we have enough descriptors to handle the entire
3035		 * chain at once.
3036		 * (paranoia -- may not actually be needed)
3037		 */
3038		if (m_head->m_flags & M_FIRSTFRAG &&
3039		    m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3040			if ((TI_TX_RING_CNT - sc->ti_txcnt) <
3041			    m_head->m_pkthdr.csum_data + 16) {
3042				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3043				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3044				break;
3045			}
3046		}
3047
3048		/*
3049		 * Pack the data into the transmit ring. If we
3050		 * don't have room, set the OACTIVE flag and wait
3051		 * for the NIC to drain the ring.
3052		 */
3053		if (ti_encap(sc, &m_head)) {
3054			if (m_head == NULL)
3055				break;
3056			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3057			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3058			break;
3059		}
3060
3061		enq++;
3062		/*
3063		 * If there's a BPF listener, bounce a copy of this frame
3064		 * to him.
3065		 */
3066		ETHER_BPF_MTAP(ifp, m_head);
3067	}
3068
3069	if (enq > 0) {
3070		/* Transmit */
3071		CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, sc->ti_tx_saved_prodidx);
3072
3073		/*
3074		 * Set a timeout in case the chip goes out to lunch.
3075		 */
3076		sc->ti_timer = 5;
3077	}
3078}
3079
3080static void
3081ti_init(void *xsc)
3082{
3083	struct ti_softc *sc;
3084
3085	sc = xsc;
3086	TI_LOCK(sc);
3087	ti_init_locked(sc);
3088	TI_UNLOCK(sc);
3089}
3090
3091static void
3092ti_init_locked(void *xsc)
3093{
3094	struct ti_softc *sc = xsc;
3095
3096	if (sc->ti_ifp->if_drv_flags & IFF_DRV_RUNNING)
3097		return;
3098
3099	/* Cancel pending I/O and flush buffers. */
3100	ti_stop(sc);
3101
3102	/* Init the gen info block, ring control blocks and firmware. */
3103	if (ti_gibinit(sc)) {
3104		device_printf(sc->ti_dev, "initialization failure\n");
3105		return;
3106	}
3107}
3108
3109static void ti_init2(struct ti_softc *sc)
3110{
3111	struct ti_cmd_desc cmd;
3112	struct ifnet *ifp;
3113	uint8_t *ea;
3114	struct ifmedia *ifm;
3115	int tmp;
3116
3117	TI_LOCK_ASSERT(sc);
3118
3119	ifp = sc->ti_ifp;
3120
3121	/* Specify MTU and interface index. */
3122	CSR_WRITE_4(sc, TI_GCR_IFINDEX, device_get_unit(sc->ti_dev));
3123	CSR_WRITE_4(sc, TI_GCR_IFMTU, ifp->if_mtu +
3124	    ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3125	TI_DO_CMD(TI_CMD_UPDATE_GENCOM, 0, 0);
3126
3127	/* Load our MAC address. */
3128	ea = IF_LLADDR(sc->ti_ifp);
3129	CSR_WRITE_4(sc, TI_GCR_PAR0, (ea[0] << 8) | ea[1]);
3130	CSR_WRITE_4(sc, TI_GCR_PAR1,
3131	    (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5]);
3132	TI_DO_CMD(TI_CMD_SET_MAC_ADDR, 0, 0);
3133
3134	/* Enable or disable promiscuous mode as needed. */
3135	if (ifp->if_flags & IFF_PROMISC) {
3136		TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_ENB, 0);
3137	} else {
3138		TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_DIS, 0);
3139	}
3140
3141	/* Program multicast filter. */
3142	ti_setmulti(sc);
3143
3144	/*
3145	 * If this is a Tigon 1, we should tell the
3146	 * firmware to use software packet filtering.
3147	 */
3148	if (sc->ti_hwrev == TI_HWREV_TIGON) {
3149		TI_DO_CMD(TI_CMD_FDR_FILTERING, TI_CMD_CODE_FILT_ENB, 0);
3150	}
3151
3152	/* Init RX ring. */
3153	if (ti_init_rx_ring_std(sc) != 0) {
3154		/* XXX */
3155		device_printf(sc->ti_dev, "no memory for std Rx buffers.\n");
3156		return;
3157	}
3158
3159	/* Init jumbo RX ring. */
3160	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
3161		if (ti_init_rx_ring_jumbo(sc) != 0) {
3162			/* XXX */
3163			device_printf(sc->ti_dev,
3164			    "no memory for jumbo Rx buffers.\n");
3165			return;
3166		}
3167	}
3168
3169	/*
3170	 * If this is a Tigon 2, we can also configure the
3171	 * mini ring.
3172	 */
3173	if (sc->ti_hwrev == TI_HWREV_TIGON_II) {
3174		if (ti_init_rx_ring_mini(sc) != 0) {
3175			/* XXX */
3176			device_printf(sc->ti_dev,
3177			    "no memory for mini Rx buffers.\n");
3178			return;
3179		}
3180	}
3181
3182	CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 0);
3183	sc->ti_rx_saved_considx = 0;
3184
3185	/* Init TX ring. */
3186	ti_init_tx_ring(sc);
3187
3188	/* Tell firmware we're alive. */
3189	TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_UP, 0);
3190
3191	/* Enable host interrupts. */
3192	CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0);
3193
3194	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3195	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3196	callout_reset(&sc->ti_watchdog, hz, ti_watchdog, sc);
3197
3198	/*
3199	 * Make sure to set media properly. We have to do this
3200	 * here since we have to issue commands in order to set
3201	 * the link negotiation and we can't issue commands until
3202	 * the firmware is running.
3203	 */
3204	ifm = &sc->ifmedia;
3205	tmp = ifm->ifm_media;
3206	ifm->ifm_media = ifm->ifm_cur->ifm_media;
3207	ti_ifmedia_upd_locked(sc);
3208	ifm->ifm_media = tmp;
3209}
3210
3211/*
3212 * Set media options.
3213 */
3214static int
3215ti_ifmedia_upd(struct ifnet *ifp)
3216{
3217	struct ti_softc *sc;
3218	int error;
3219
3220	sc = ifp->if_softc;
3221	TI_LOCK(sc);
3222	error = ti_ifmedia_upd(ifp);
3223	TI_UNLOCK(sc);
3224
3225	return (error);
3226}
3227
3228static int
3229ti_ifmedia_upd_locked(struct ti_softc *sc)
3230{
3231	struct ifmedia *ifm;
3232	struct ti_cmd_desc cmd;
3233	uint32_t flowctl;
3234
3235	ifm = &sc->ifmedia;
3236
3237	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3238		return (EINVAL);
3239
3240	flowctl = 0;
3241
3242	switch (IFM_SUBTYPE(ifm->ifm_media)) {
3243	case IFM_AUTO:
3244		/*
3245		 * Transmit flow control doesn't work on the Tigon 1.
3246		 */
3247		flowctl = TI_GLNK_RX_FLOWCTL_Y;
3248
3249		/*
3250		 * Transmit flow control can also cause problems on the
3251		 * Tigon 2, apparantly with both the copper and fiber
3252		 * boards.  The symptom is that the interface will just
3253		 * hang.  This was reproduced with Alteon 180 switches.
3254		 */
3255#if 0
3256		if (sc->ti_hwrev != TI_HWREV_TIGON)
3257			flowctl |= TI_GLNK_TX_FLOWCTL_Y;
3258#endif
3259
3260		CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB|
3261		    TI_GLNK_FULL_DUPLEX| flowctl |
3262		    TI_GLNK_AUTONEGENB|TI_GLNK_ENB);
3263
3264		flowctl = TI_LNK_RX_FLOWCTL_Y;
3265#if 0
3266		if (sc->ti_hwrev != TI_HWREV_TIGON)
3267			flowctl |= TI_LNK_TX_FLOWCTL_Y;
3268#endif
3269
3270		CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_100MB|TI_LNK_10MB|
3271		    TI_LNK_FULL_DUPLEX|TI_LNK_HALF_DUPLEX| flowctl |
3272		    TI_LNK_AUTONEGENB|TI_LNK_ENB);
3273		TI_DO_CMD(TI_CMD_LINK_NEGOTIATION,
3274		    TI_CMD_CODE_NEGOTIATE_BOTH, 0);
3275		break;
3276	case IFM_1000_SX:
3277	case IFM_1000_T:
3278		flowctl = TI_GLNK_RX_FLOWCTL_Y;
3279#if 0
3280		if (sc->ti_hwrev != TI_HWREV_TIGON)
3281			flowctl |= TI_GLNK_TX_FLOWCTL_Y;
3282#endif
3283
3284		CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB|
3285		    flowctl |TI_GLNK_ENB);
3286		CSR_WRITE_4(sc, TI_GCR_LINK, 0);
3287		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3288			TI_SETBIT(sc, TI_GCR_GLINK, TI_GLNK_FULL_DUPLEX);
3289		}
3290		TI_DO_CMD(TI_CMD_LINK_NEGOTIATION,
3291		    TI_CMD_CODE_NEGOTIATE_GIGABIT, 0);
3292		break;
3293	case IFM_100_FX:
3294	case IFM_10_FL:
3295	case IFM_100_TX:
3296	case IFM_10_T:
3297		flowctl = TI_LNK_RX_FLOWCTL_Y;
3298#if 0
3299		if (sc->ti_hwrev != TI_HWREV_TIGON)
3300			flowctl |= TI_LNK_TX_FLOWCTL_Y;
3301#endif
3302
3303		CSR_WRITE_4(sc, TI_GCR_GLINK, 0);
3304		CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_ENB|TI_LNK_PREF|flowctl);
3305		if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX ||
3306		    IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
3307			TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_100MB);
3308		} else {
3309			TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_10MB);
3310		}
3311		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3312			TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_FULL_DUPLEX);
3313		} else {
3314			TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_HALF_DUPLEX);
3315		}
3316		TI_DO_CMD(TI_CMD_LINK_NEGOTIATION,
3317		    TI_CMD_CODE_NEGOTIATE_10_100, 0);
3318		break;
3319	}
3320
3321	return (0);
3322}
3323
3324/*
3325 * Report current media status.
3326 */
3327static void
3328ti_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3329{
3330	struct ti_softc *sc;
3331	uint32_t media = 0;
3332
3333	sc = ifp->if_softc;
3334
3335	TI_LOCK(sc);
3336
3337	ifmr->ifm_status = IFM_AVALID;
3338	ifmr->ifm_active = IFM_ETHER;
3339
3340	if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) {
3341		TI_UNLOCK(sc);
3342		return;
3343	}
3344
3345	ifmr->ifm_status |= IFM_ACTIVE;
3346
3347	if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) {
3348		media = CSR_READ_4(sc, TI_GCR_GLINK_STAT);
3349		if (sc->ti_copper)
3350			ifmr->ifm_active |= IFM_1000_T;
3351		else
3352			ifmr->ifm_active |= IFM_1000_SX;
3353		if (media & TI_GLNK_FULL_DUPLEX)
3354			ifmr->ifm_active |= IFM_FDX;
3355		else
3356			ifmr->ifm_active |= IFM_HDX;
3357	} else if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) {
3358		media = CSR_READ_4(sc, TI_GCR_LINK_STAT);
3359		if (sc->ti_copper) {
3360			if (media & TI_LNK_100MB)
3361				ifmr->ifm_active |= IFM_100_TX;
3362			if (media & TI_LNK_10MB)
3363				ifmr->ifm_active |= IFM_10_T;
3364		} else {
3365			if (media & TI_LNK_100MB)
3366				ifmr->ifm_active |= IFM_100_FX;
3367			if (media & TI_LNK_10MB)
3368				ifmr->ifm_active |= IFM_10_FL;
3369		}
3370		if (media & TI_LNK_FULL_DUPLEX)
3371			ifmr->ifm_active |= IFM_FDX;
3372		if (media & TI_LNK_HALF_DUPLEX)
3373			ifmr->ifm_active |= IFM_HDX;
3374	}
3375	TI_UNLOCK(sc);
3376}
3377
3378static int
3379ti_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3380{
3381	struct ti_softc *sc = ifp->if_softc;
3382	struct ifreq *ifr = (struct ifreq *) data;
3383	struct ti_cmd_desc cmd;
3384	int mask, error = 0;
3385
3386	switch (command) {
3387	case SIOCSIFMTU:
3388		TI_LOCK(sc);
3389		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > TI_JUMBO_MTU)
3390			error = EINVAL;
3391		else {
3392			ifp->if_mtu = ifr->ifr_mtu;
3393			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3394				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3395				ti_init_locked(sc);
3396			}
3397		}
3398		TI_UNLOCK(sc);
3399		break;
3400	case SIOCSIFFLAGS:
3401		TI_LOCK(sc);
3402		if (ifp->if_flags & IFF_UP) {
3403			/*
3404			 * If only the state of the PROMISC flag changed,
3405			 * then just use the 'set promisc mode' command
3406			 * instead of reinitializing the entire NIC. Doing
3407			 * a full re-init means reloading the firmware and
3408			 * waiting for it to start up, which may take a
3409			 * second or two.
3410			 */
3411			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3412			    ifp->if_flags & IFF_PROMISC &&
3413			    !(sc->ti_if_flags & IFF_PROMISC)) {
3414				TI_DO_CMD(TI_CMD_SET_PROMISC_MODE,
3415				    TI_CMD_CODE_PROMISC_ENB, 0);
3416			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3417			    !(ifp->if_flags & IFF_PROMISC) &&
3418			    sc->ti_if_flags & IFF_PROMISC) {
3419				TI_DO_CMD(TI_CMD_SET_PROMISC_MODE,
3420				    TI_CMD_CODE_PROMISC_DIS, 0);
3421			} else
3422				ti_init_locked(sc);
3423		} else {
3424			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3425				ti_stop(sc);
3426			}
3427		}
3428		sc->ti_if_flags = ifp->if_flags;
3429		TI_UNLOCK(sc);
3430		break;
3431	case SIOCADDMULTI:
3432	case SIOCDELMULTI:
3433		TI_LOCK(sc);
3434		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3435			ti_setmulti(sc);
3436		TI_UNLOCK(sc);
3437		break;
3438	case SIOCSIFMEDIA:
3439	case SIOCGIFMEDIA:
3440		error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
3441		break;
3442	case SIOCSIFCAP:
3443		TI_LOCK(sc);
3444		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3445		if ((mask & IFCAP_TXCSUM) != 0 &&
3446		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
3447			ifp->if_capenable ^= IFCAP_TXCSUM;
3448			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
3449				ifp->if_hwassist |= TI_CSUM_FEATURES;
3450                        else
3451				ifp->if_hwassist &= ~TI_CSUM_FEATURES;
3452                }
3453		if ((mask & IFCAP_RXCSUM) != 0 &&
3454		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
3455			ifp->if_capenable ^= IFCAP_RXCSUM;
3456		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
3457		    (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0)
3458                        ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3459		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
3460		    (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
3461			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
3462		if ((mask & (IFCAP_TXCSUM | IFCAP_RXCSUM |
3463		    IFCAP_VLAN_HWTAGGING)) != 0) {
3464			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3465				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3466				ti_init_locked(sc);
3467			}
3468		}
3469		TI_UNLOCK(sc);
3470		VLAN_CAPABILITIES(ifp);
3471		break;
3472	default:
3473		error = ether_ioctl(ifp, command, data);
3474		break;
3475	}
3476
3477	return (error);
3478}
3479
3480static int
3481ti_open(struct cdev *dev, int flags, int fmt, struct thread *td)
3482{
3483	struct ti_softc *sc;
3484
3485	sc = dev->si_drv1;
3486	if (sc == NULL)
3487		return (ENODEV);
3488
3489	TI_LOCK(sc);
3490	sc->ti_flags |= TI_FLAG_DEBUGING;
3491	TI_UNLOCK(sc);
3492
3493	return (0);
3494}
3495
3496static int
3497ti_close(struct cdev *dev, int flag, int fmt, struct thread *td)
3498{
3499	struct ti_softc *sc;
3500
3501	sc = dev->si_drv1;
3502	if (sc == NULL)
3503		return (ENODEV);
3504
3505	TI_LOCK(sc);
3506	sc->ti_flags &= ~TI_FLAG_DEBUGING;
3507	TI_UNLOCK(sc);
3508
3509	return (0);
3510}
3511
3512/*
3513 * This ioctl routine goes along with the Tigon character device.
3514 */
3515static int
3516ti_ioctl2(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
3517    struct thread *td)
3518{
3519	struct ti_softc *sc;
3520	int error;
3521
3522	sc = dev->si_drv1;
3523	if (sc == NULL)
3524		return (ENODEV);
3525
3526	error = 0;
3527
3528	switch (cmd) {
3529	case TIIOCGETSTATS:
3530	{
3531		struct ti_stats *outstats;
3532
3533		outstats = (struct ti_stats *)addr;
3534
3535		TI_LOCK(sc);
3536		bcopy(&sc->ti_rdata->ti_info.ti_stats, outstats,
3537		    sizeof(struct ti_stats));
3538		TI_UNLOCK(sc);
3539		break;
3540	}
3541	case TIIOCGETPARAMS:
3542	{
3543		struct ti_params *params;
3544
3545		params = (struct ti_params *)addr;
3546
3547		TI_LOCK(sc);
3548		params->ti_stat_ticks = sc->ti_stat_ticks;
3549		params->ti_rx_coal_ticks = sc->ti_rx_coal_ticks;
3550		params->ti_tx_coal_ticks = sc->ti_tx_coal_ticks;
3551		params->ti_rx_max_coal_bds = sc->ti_rx_max_coal_bds;
3552		params->ti_tx_max_coal_bds = sc->ti_tx_max_coal_bds;
3553		params->ti_tx_buf_ratio = sc->ti_tx_buf_ratio;
3554		params->param_mask = TI_PARAM_ALL;
3555		TI_UNLOCK(sc);
3556		break;
3557	}
3558	case TIIOCSETPARAMS:
3559	{
3560		struct ti_params *params;
3561
3562		params = (struct ti_params *)addr;
3563
3564		TI_LOCK(sc);
3565		if (params->param_mask & TI_PARAM_STAT_TICKS) {
3566			sc->ti_stat_ticks = params->ti_stat_ticks;
3567			CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks);
3568		}
3569
3570		if (params->param_mask & TI_PARAM_RX_COAL_TICKS) {
3571			sc->ti_rx_coal_ticks = params->ti_rx_coal_ticks;
3572			CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS,
3573				    sc->ti_rx_coal_ticks);
3574		}
3575
3576		if (params->param_mask & TI_PARAM_TX_COAL_TICKS) {
3577			sc->ti_tx_coal_ticks = params->ti_tx_coal_ticks;
3578			CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS,
3579				    sc->ti_tx_coal_ticks);
3580		}
3581
3582		if (params->param_mask & TI_PARAM_RX_COAL_BDS) {
3583			sc->ti_rx_max_coal_bds = params->ti_rx_max_coal_bds;
3584			CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD,
3585				    sc->ti_rx_max_coal_bds);
3586		}
3587
3588		if (params->param_mask & TI_PARAM_TX_COAL_BDS) {
3589			sc->ti_tx_max_coal_bds = params->ti_tx_max_coal_bds;
3590			CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD,
3591				    sc->ti_tx_max_coal_bds);
3592		}
3593
3594		if (params->param_mask & TI_PARAM_TX_BUF_RATIO) {
3595			sc->ti_tx_buf_ratio = params->ti_tx_buf_ratio;
3596			CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO,
3597				    sc->ti_tx_buf_ratio);
3598		}
3599		TI_UNLOCK(sc);
3600		break;
3601	}
3602	case TIIOCSETTRACE: {
3603		ti_trace_type trace_type;
3604
3605		trace_type = *(ti_trace_type *)addr;
3606
3607		/*
3608		 * Set tracing to whatever the user asked for.  Setting
3609		 * this register to 0 should have the effect of disabling
3610		 * tracing.
3611		 */
3612		TI_LOCK(sc);
3613		CSR_WRITE_4(sc, TI_GCR_NIC_TRACING, trace_type);
3614		TI_UNLOCK(sc);
3615		break;
3616	}
3617	case TIIOCGETTRACE: {
3618		struct ti_trace_buf *trace_buf;
3619		uint32_t trace_start, cur_trace_ptr, trace_len;
3620
3621		trace_buf = (struct ti_trace_buf *)addr;
3622
3623		TI_LOCK(sc);
3624		trace_start = CSR_READ_4(sc, TI_GCR_NICTRACE_START);
3625		cur_trace_ptr = CSR_READ_4(sc, TI_GCR_NICTRACE_PTR);
3626		trace_len = CSR_READ_4(sc, TI_GCR_NICTRACE_LEN);
3627#if 0
3628		if_printf(sc->ti_ifp, "trace_start = %#x, cur_trace_ptr = %#x, "
3629		       "trace_len = %d\n", trace_start,
3630		       cur_trace_ptr, trace_len);
3631		if_printf(sc->ti_ifp, "trace_buf->buf_len = %d\n",
3632		       trace_buf->buf_len);
3633#endif
3634		error = ti_copy_mem(sc, trace_start, min(trace_len,
3635		    trace_buf->buf_len), (caddr_t)trace_buf->buf, 1, 1);
3636		if (error == 0) {
3637			trace_buf->fill_len = min(trace_len,
3638			    trace_buf->buf_len);
3639			if (cur_trace_ptr < trace_start)
3640				trace_buf->cur_trace_ptr =
3641				    trace_start - cur_trace_ptr;
3642			else
3643				trace_buf->cur_trace_ptr =
3644				    cur_trace_ptr - trace_start;
3645		} else
3646			trace_buf->fill_len = 0;
3647		TI_UNLOCK(sc);
3648		break;
3649	}
3650
3651	/*
3652	 * For debugging, five ioctls are needed:
3653	 * ALT_ATTACH
3654	 * ALT_READ_TG_REG
3655	 * ALT_WRITE_TG_REG
3656	 * ALT_READ_TG_MEM
3657	 * ALT_WRITE_TG_MEM
3658	 */
3659	case ALT_ATTACH:
3660		/*
3661		 * From what I can tell, Alteon's Solaris Tigon driver
3662		 * only has one character device, so you have to attach
3663		 * to the Tigon board you're interested in.  This seems
3664		 * like a not-so-good way to do things, since unless you
3665		 * subsequently specify the unit number of the device
3666		 * you're interested in every ioctl, you'll only be
3667		 * able to debug one board at a time.
3668		 */
3669		break;
3670	case ALT_READ_TG_MEM:
3671	case ALT_WRITE_TG_MEM:
3672	{
3673		struct tg_mem *mem_param;
3674		uint32_t sram_end, scratch_end;
3675
3676		mem_param = (struct tg_mem *)addr;
3677
3678		if (sc->ti_hwrev == TI_HWREV_TIGON) {
3679			sram_end = TI_END_SRAM_I;
3680			scratch_end = TI_END_SCRATCH_I;
3681		} else {
3682			sram_end = TI_END_SRAM_II;
3683			scratch_end = TI_END_SCRATCH_II;
3684		}
3685
3686		/*
3687		 * For now, we'll only handle accessing regular SRAM,
3688		 * nothing else.
3689		 */
3690		TI_LOCK(sc);
3691		if (mem_param->tgAddr >= TI_BEG_SRAM &&
3692		    mem_param->tgAddr + mem_param->len <= sram_end) {
3693			/*
3694			 * In this instance, we always copy to/from user
3695			 * space, so the user space argument is set to 1.
3696			 */
3697			error = ti_copy_mem(sc, mem_param->tgAddr,
3698			    mem_param->len, mem_param->userAddr, 1,
3699			    cmd == ALT_READ_TG_MEM ? 1 : 0);
3700		} else if (mem_param->tgAddr >= TI_BEG_SCRATCH &&
3701		    mem_param->tgAddr <= scratch_end) {
3702			error = ti_copy_scratch(sc, mem_param->tgAddr,
3703			    mem_param->len, mem_param->userAddr, 1,
3704			    cmd == ALT_READ_TG_MEM ?  1 : 0, TI_PROCESSOR_A);
3705		} else if (mem_param->tgAddr >= TI_BEG_SCRATCH_B_DEBUG &&
3706		    mem_param->tgAddr <= TI_BEG_SCRATCH_B_DEBUG) {
3707			if (sc->ti_hwrev == TI_HWREV_TIGON) {
3708				if_printf(sc->ti_ifp,
3709				    "invalid memory range for Tigon I\n");
3710				error = EINVAL;
3711				break;
3712			}
3713			error = ti_copy_scratch(sc, mem_param->tgAddr -
3714			    TI_SCRATCH_DEBUG_OFF, mem_param->len,
3715			    mem_param->userAddr, 1,
3716			    cmd == ALT_READ_TG_MEM ? 1 : 0, TI_PROCESSOR_B);
3717		} else {
3718			if_printf(sc->ti_ifp, "memory address %#x len %d is "
3719			        "out of supported range\n",
3720			        mem_param->tgAddr, mem_param->len);
3721			error = EINVAL;
3722		}
3723		TI_UNLOCK(sc);
3724		break;
3725	}
3726	case ALT_READ_TG_REG:
3727	case ALT_WRITE_TG_REG:
3728	{
3729		struct tg_reg *regs;
3730		uint32_t tmpval;
3731
3732		regs = (struct tg_reg *)addr;
3733
3734		/*
3735		 * Make sure the address in question isn't out of range.
3736		 */
3737		if (regs->addr > TI_REG_MAX) {
3738			error = EINVAL;
3739			break;
3740		}
3741		TI_LOCK(sc);
3742		if (cmd == ALT_READ_TG_REG) {
3743			bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle,
3744			    regs->addr, &tmpval, 1);
3745			regs->data = ntohl(tmpval);
3746#if 0
3747			if ((regs->addr == TI_CPU_STATE)
3748			 || (regs->addr == TI_CPU_CTL_B)) {
3749				if_printf(sc->ti_ifp, "register %#x = %#x\n",
3750				       regs->addr, tmpval);
3751			}
3752#endif
3753		} else {
3754			tmpval = htonl(regs->data);
3755			bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle,
3756			    regs->addr, &tmpval, 1);
3757		}
3758		TI_UNLOCK(sc);
3759		break;
3760	}
3761	default:
3762		error = ENOTTY;
3763		break;
3764	}
3765	return (error);
3766}
3767
3768static void
3769ti_watchdog(void *arg)
3770{
3771	struct ti_softc *sc;
3772	struct ifnet *ifp;
3773
3774	sc = arg;
3775	TI_LOCK_ASSERT(sc);
3776	callout_reset(&sc->ti_watchdog, hz, ti_watchdog, sc);
3777	if (sc->ti_timer == 0 || --sc->ti_timer > 0)
3778		return;
3779
3780	/*
3781	 * When we're debugging, the chip is often stopped for long periods
3782	 * of time, and that would normally cause the watchdog timer to fire.
3783	 * Since that impedes debugging, we don't want to do that.
3784	 */
3785	if (sc->ti_flags & TI_FLAG_DEBUGING)
3786		return;
3787
3788	ifp = sc->ti_ifp;
3789	if_printf(ifp, "watchdog timeout -- resetting\n");
3790	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3791	ti_init_locked(sc);
3792
3793	ifp->if_oerrors++;
3794}
3795
3796/*
3797 * Stop the adapter and free any mbufs allocated to the
3798 * RX and TX lists.
3799 */
3800static void
3801ti_stop(struct ti_softc *sc)
3802{
3803	struct ifnet *ifp;
3804	struct ti_cmd_desc cmd;
3805
3806	TI_LOCK_ASSERT(sc);
3807
3808	ifp = sc->ti_ifp;
3809
3810	/* Disable host interrupts. */
3811	CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
3812	/*
3813	 * Tell firmware we're shutting down.
3814	 */
3815	TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_DOWN, 0);
3816
3817	/* Halt and reinitialize. */
3818	if (ti_chipinit(sc) == 0) {
3819		ti_mem_zero(sc, 0x2000, 0x100000 - 0x2000);
3820		/* XXX ignore init errors. */
3821		ti_chipinit(sc);
3822	}
3823
3824	/* Free the RX lists. */
3825	ti_free_rx_ring_std(sc);
3826
3827	/* Free jumbo RX list. */
3828	ti_free_rx_ring_jumbo(sc);
3829
3830	/* Free mini RX list. */
3831	ti_free_rx_ring_mini(sc);
3832
3833	/* Free TX buffers. */
3834	ti_free_tx_ring(sc);
3835
3836	sc->ti_ev_prodidx.ti_idx = 0;
3837	sc->ti_return_prodidx.ti_idx = 0;
3838	sc->ti_tx_considx.ti_idx = 0;
3839	sc->ti_tx_saved_considx = TI_TXCONS_UNSET;
3840
3841	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3842	callout_stop(&sc->ti_watchdog);
3843}
3844
3845/*
3846 * Stop all chip I/O so that the kernel's probe routines don't
3847 * get confused by errant DMAs when rebooting.
3848 */
3849static int
3850ti_shutdown(device_t dev)
3851{
3852	struct ti_softc *sc;
3853
3854	sc = device_get_softc(dev);
3855	TI_LOCK(sc);
3856	ti_chipinit(sc);
3857	TI_UNLOCK(sc);
3858
3859	return (0);
3860}
3861