if_txp.c revision 243857
1/*	$OpenBSD: if_txp.c,v 1.48 2001/06/27 06:34:50 kjc Exp $	*/
2
3/*-
4 * Copyright (c) 2001
5 *	Jason L. Wright <jason@thought.net>, Theo de Raadt, and
6 *	Aaron Campbell <aaron@monkey.org>.  All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by Jason L. Wright,
19 *	Theo de Raadt and Aaron Campbell.
20 * 4. Neither the name of the author nor the names of any co-contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
34 * THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/dev/txp/if_txp.c 243857 2012-12-04 09:32:43Z glebius $");
39
40/*
41 * Driver for 3c990 (Typhoon) Ethernet ASIC
42 */
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/bus.h>
46#include <sys/endian.h>
47#include <sys/kernel.h>
48#include <sys/lock.h>
49#include <sys/malloc.h>
50#include <sys/mbuf.h>
51#include <sys/module.h>
52#include <sys/mutex.h>
53#include <sys/queue.h>
54#include <sys/rman.h>
55#include <sys/socket.h>
56#include <sys/sockio.h>
57#include <sys/sysctl.h>
58#include <sys/taskqueue.h>
59
60#include <net/bpf.h>
61#include <net/if.h>
62#include <net/if_arp.h>
63#include <net/ethernet.h>
64#include <net/if_dl.h>
65#include <net/if_media.h>
66#include <net/if_types.h>
67#include <net/if_vlan_var.h>
68
69#include <netinet/in.h>
70#include <netinet/in_systm.h>
71#include <netinet/ip.h>
72
73#include <dev/mii/mii.h>
74
75#include <dev/pci/pcireg.h>
76#include <dev/pci/pcivar.h>
77
78#include <machine/bus.h>
79#include <machine/in_cksum.h>
80
81#include <dev/txp/if_txpreg.h>
82#include <dev/txp/3c990img.h>
83
84MODULE_DEPEND(txp, pci, 1, 1, 1);
85MODULE_DEPEND(txp, ether, 1, 1, 1);
86
87/*
88 * XXX Known Typhoon firmware issues.
89 *
90 * 1. It seems that firmware has Tx TCP/UDP checksum offloading bug.
91 *    The firmware hangs when it's told to compute TCP/UDP checksum.
92 *    I'm not sure whether the firmware requires special alignment to
93 *    do checksum offloading but datasheet says nothing about that.
94 * 2. Datasheet says nothing for maximum number of fragmented
95 *    descriptors supported. Experimentation shows up to 16 fragment
96 *    descriptors are supported in the firmware. For TSO case, upper
97 *    stack can send 64KB sized IP datagram plus link header size(
98 *    ethernet header + VLAN tag)  frame but controller can handle up
99 *    to 64KB frame given that PAGE_SIZE is 4KB(i.e. 16 * PAGE_SIZE).
100 *    Because frames that need TSO operation of hardware can be
101 *    larger than 64KB I disabled TSO capability. TSO operation for
102 *    less than or equal to 16 fragment descriptors works without
103 *    problems, though.
104 * 3. VLAN hardware tag stripping is always enabled in the firmware
105 *    even if it's explicitly told to not strip the tag. It's
106 *    possible to add the tag back in Rx handler if VLAN hardware
107 *    tag is not active but I didn't try that as it would be
108 *    layering violation.
109 * 4. TXP_CMD_RECV_BUFFER_CONTROL does not work as expected in
110 *    datasheet such that driver should handle the alignment
111 *    restriction by copying received frame to align the frame on
112 *    32bit boundary on strict-alignment architectures. This adds a
113 *    lot of CPU burden and it effectively reduce Rx performance on
114 *    strict-alignment architectures(e.g. sparc64, arm, mips and ia64).
115 *
116 * Unfortunately it seems that 3Com have no longer interests in
117 * releasing fixed firmware so we may have to live with these bugs.
118 */
119
120#define	TXP_CSUM_FEATURES	(CSUM_IP)
121
122/*
123 * Various supported device vendors/types and their names.
124 */
125static struct txp_type txp_devs[] = {
126	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_95,
127	    "3Com 3cR990-TX-95 Etherlink with 3XP Processor" },
128	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_97,
129	    "3Com 3cR990-TX-97 Etherlink with 3XP Processor" },
130	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_TXM,
131	    "3Com 3cR990B-TXM Etherlink with 3XP Processor" },
132	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_95,
133	    "3Com 3cR990-SRV-95 Etherlink Server with 3XP Processor" },
134	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_97,
135	    "3Com 3cR990-SRV-97 Etherlink Server with 3XP Processor" },
136	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_SRV,
137	    "3Com 3cR990B-SRV Etherlink Server with 3XP Processor" },
138	{ 0, 0, NULL }
139};
140
141static int txp_probe(device_t);
142static int txp_attach(device_t);
143static int txp_detach(device_t);
144static int txp_shutdown(device_t);
145static int txp_suspend(device_t);
146static int txp_resume(device_t);
147static int txp_intr(void *);
148static void txp_int_task(void *, int);
149static void txp_tick(void *);
150static int txp_ioctl(struct ifnet *, u_long, caddr_t);
151static void txp_start(struct ifnet *);
152static void txp_start_locked(struct ifnet *);
153static int txp_encap(struct txp_softc *, struct txp_tx_ring *, struct mbuf **);
154static void txp_stop(struct txp_softc *);
155static void txp_init(void *);
156static void txp_init_locked(struct txp_softc *);
157static void txp_watchdog(struct txp_softc *);
158
159static int txp_reset(struct txp_softc *);
160static int txp_boot(struct txp_softc *, uint32_t);
161static int txp_sleep(struct txp_softc *, int);
162static int txp_wait(struct txp_softc *, uint32_t);
163static int txp_download_fw(struct txp_softc *);
164static int txp_download_fw_wait(struct txp_softc *);
165static int txp_download_fw_section(struct txp_softc *,
166    struct txp_fw_section_header *, int);
167static int txp_alloc_rings(struct txp_softc *);
168static void txp_init_rings(struct txp_softc *);
169static int txp_dma_alloc(struct txp_softc *, char *, bus_dma_tag_t *,
170    bus_size_t, bus_size_t, bus_dmamap_t *, void **, bus_size_t, bus_addr_t *);
171static void txp_dma_free(struct txp_softc *, bus_dma_tag_t *, bus_dmamap_t *,
172    void **);
173static void txp_free_rings(struct txp_softc *);
174static int txp_rxring_fill(struct txp_softc *);
175static void txp_rxring_empty(struct txp_softc *);
176static void txp_set_filter(struct txp_softc *);
177
178static int txp_cmd_desc_numfree(struct txp_softc *);
179static int txp_command(struct txp_softc *, uint16_t, uint16_t, uint32_t,
180    uint32_t, uint16_t *, uint32_t *, uint32_t *, int);
181static int txp_ext_command(struct txp_softc *, uint16_t, uint16_t,
182    uint32_t, uint32_t, struct txp_ext_desc *, uint8_t,
183    struct txp_rsp_desc **, int);
184static int txp_response(struct txp_softc *, uint16_t, uint16_t,
185    struct txp_rsp_desc **);
186static void txp_rsp_fixup(struct txp_softc *, struct txp_rsp_desc *,
187    struct txp_rsp_desc *);
188static int txp_set_capabilities(struct txp_softc *);
189
190static void txp_ifmedia_sts(struct ifnet *, struct ifmediareq *);
191static int txp_ifmedia_upd(struct ifnet *);
192#ifdef TXP_DEBUG
193static void txp_show_descriptor(void *);
194#endif
195static void txp_tx_reclaim(struct txp_softc *, struct txp_tx_ring *);
196static void txp_rxbuf_reclaim(struct txp_softc *);
197#ifndef __NO_STRICT_ALIGNMENT
198static __inline void txp_fixup_rx(struct mbuf *);
199#endif
200static int txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *, int);
201static void txp_stats_save(struct txp_softc *);
202static void txp_stats_update(struct txp_softc *, struct txp_rsp_desc *);
203static void txp_sysctl_node(struct txp_softc *);
204static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
205static int sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS);
206
207static int prefer_iomap = 0;
208TUNABLE_INT("hw.txp.prefer_iomap", &prefer_iomap);
209
210static device_method_t txp_methods[] = {
211        /* Device interface */
212	DEVMETHOD(device_probe,		txp_probe),
213	DEVMETHOD(device_attach,	txp_attach),
214	DEVMETHOD(device_detach,	txp_detach),
215	DEVMETHOD(device_shutdown,	txp_shutdown),
216	DEVMETHOD(device_suspend,	txp_suspend),
217	DEVMETHOD(device_resume,	txp_resume),
218
219	{ NULL, NULL }
220};
221
222static driver_t txp_driver = {
223	"txp",
224	txp_methods,
225	sizeof(struct txp_softc)
226};
227
228static devclass_t txp_devclass;
229
230DRIVER_MODULE(txp, pci, txp_driver, txp_devclass, 0, 0);
231
232static int
233txp_probe(device_t dev)
234{
235	struct txp_type *t;
236
237	t = txp_devs;
238
239	while (t->txp_name != NULL) {
240		if ((pci_get_vendor(dev) == t->txp_vid) &&
241		    (pci_get_device(dev) == t->txp_did)) {
242			device_set_desc(dev, t->txp_name);
243			return (BUS_PROBE_DEFAULT);
244		}
245		t++;
246	}
247
248	return (ENXIO);
249}
250
251static int
252txp_attach(device_t dev)
253{
254	struct txp_softc *sc;
255	struct ifnet *ifp;
256	struct txp_rsp_desc *rsp;
257	uint16_t p1;
258	uint32_t p2, reg;
259	int error = 0, pmc, rid;
260	uint8_t eaddr[ETHER_ADDR_LEN], *ver;
261
262	sc = device_get_softc(dev);
263	sc->sc_dev = dev;
264
265	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
266	    MTX_DEF);
267	callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
268	TASK_INIT(&sc->sc_int_task, 0, txp_int_task, sc);
269	TAILQ_INIT(&sc->sc_busy_list);
270	TAILQ_INIT(&sc->sc_free_list);
271
272	ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts);
273	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
274	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX, 0, NULL);
275	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
276	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
277	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX, 0, NULL);
278	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
279	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
280
281	pci_enable_busmaster(dev);
282	/* Prefer memory space register mapping over IO space. */
283	if (prefer_iomap == 0) {
284		sc->sc_res_id = PCIR_BAR(1);
285		sc->sc_res_type = SYS_RES_MEMORY;
286	} else {
287		sc->sc_res_id = PCIR_BAR(0);
288		sc->sc_res_type = SYS_RES_IOPORT;
289	}
290	sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
291	    &sc->sc_res_id, RF_ACTIVE);
292	if (sc->sc_res == NULL && prefer_iomap == 0) {
293		sc->sc_res_id = PCIR_BAR(0);
294		sc->sc_res_type = SYS_RES_IOPORT;
295		sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
296		    &sc->sc_res_id, RF_ACTIVE);
297	}
298	if (sc->sc_res == NULL) {
299		device_printf(dev, "couldn't map ports/memory\n");
300		ifmedia_removeall(&sc->sc_ifmedia);
301		mtx_destroy(&sc->sc_mtx);
302		return (ENXIO);
303	}
304
305	/* Enable MWI. */
306	reg = pci_read_config(dev, PCIR_COMMAND, 2);
307	reg |= PCIM_CMD_MWRICEN;
308	pci_write_config(dev, PCIR_COMMAND, reg, 2);
309	/* Check cache line size. */
310	reg = pci_read_config(dev, PCIR_CACHELNSZ, 1);
311	reg <<= 4;
312	if (reg == 0 || (reg % 16) != 0)
313		device_printf(sc->sc_dev,
314		    "invalid cache line size : %u\n", reg);
315
316	/* Allocate interrupt */
317	rid = 0;
318	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
319	    RF_SHAREABLE | RF_ACTIVE);
320
321	if (sc->sc_irq == NULL) {
322		device_printf(dev, "couldn't map interrupt\n");
323		error = ENXIO;
324		goto fail;
325	}
326
327	if ((error = txp_alloc_rings(sc)) != 0)
328		goto fail;
329	txp_init_rings(sc);
330	txp_sysctl_node(sc);
331	/* Reset controller and make it reload sleep image. */
332	if (txp_reset(sc) != 0) {
333		error = ENXIO;
334		goto fail;
335	}
336
337	/* Let controller boot from sleep image. */
338	if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) {
339		device_printf(sc->sc_dev, "could not boot sleep image\n");
340		error = ENXIO;
341		goto fail;
342	}
343
344	/* Get station address. */
345	if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0,
346	    &p1, &p2, NULL, TXP_CMD_WAIT)) {
347		error = ENXIO;
348		goto fail;
349	}
350
351	p1 = le16toh(p1);
352	eaddr[0] = ((uint8_t *)&p1)[1];
353	eaddr[1] = ((uint8_t *)&p1)[0];
354	p2 = le32toh(p2);
355	eaddr[2] = ((uint8_t *)&p2)[3];
356	eaddr[3] = ((uint8_t *)&p2)[2];
357	eaddr[4] = ((uint8_t *)&p2)[1];
358	eaddr[5] = ((uint8_t *)&p2)[0];
359
360	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
361	if (ifp == NULL) {
362		device_printf(dev, "can not allocate ifnet structure\n");
363		error = ENOSPC;
364		goto fail;
365	}
366
367	/*
368	 * Show sleep image version information which may help to
369	 * diagnose sleep image specific issues.
370	 */
371	rsp = NULL;
372	if (txp_ext_command(sc, TXP_CMD_READ_VERSION, 0, 0, 0, NULL, 0,
373	    &rsp, TXP_CMD_WAIT)) {
374		device_printf(dev, "can not read sleep image version\n");
375		error = ENXIO;
376		goto fail;
377	}
378	if (rsp->rsp_numdesc == 0) {
379		p2 = le32toh(rsp->rsp_par2) & 0xFFFF;
380		device_printf(dev, "Typhoon 1.0 sleep image (2000/%02u/%02u)\n",
381		    p2 >> 8, p2 & 0xFF);
382	} else if (rsp->rsp_numdesc == 2) {
383		p2 = le32toh(rsp->rsp_par2);
384		ver = (uint8_t *)(rsp + 1);
385		/*
386		 * Even if datasheet says the command returns a NULL
387		 * terminated version string, explicitly terminate
388		 * the string. Given that several bugs of firmware
389		 * I can't trust this simple one.
390		 */
391		ver[25] = '\0';
392		device_printf(dev,
393		    "Typhoon 1.1+ sleep image %02u.%03u.%03u %s\n",
394		    p2 >> 24, (p2 >> 12) & 0xFFF, p2 & 0xFFF, ver);
395	} else {
396		p2 = le32toh(rsp->rsp_par2);
397		device_printf(dev,
398		    "Unknown Typhoon sleep image version: %u:0x%08x\n",
399		    rsp->rsp_numdesc, p2);
400	}
401	if (rsp != NULL)
402		free(rsp, M_DEVBUF);
403
404	sc->sc_xcvr = TXP_XCVR_AUTO;
405	txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0,
406	    NULL, NULL, NULL, TXP_CMD_NOWAIT);
407	ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO);
408
409	ifp->if_softc = sc;
410	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
411	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
412	ifp->if_ioctl = txp_ioctl;
413	ifp->if_start = txp_start;
414	ifp->if_init = txp_init;
415	ifp->if_snd.ifq_drv_maxlen = TX_ENTRIES - 1;
416	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
417	IFQ_SET_READY(&ifp->if_snd);
418	/*
419	 * It's possible to read firmware's offload capability but
420	 * we have not downloaded the firmware yet so announce
421	 * working capability here. We're not interested in IPSec
422	 * capability and due to the lots of firmware bug we can't
423	 * advertise the whole capability anyway.
424	 */
425	ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TXCSUM;
426	if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
427		ifp->if_capabilities |= IFCAP_WOL_MAGIC;
428	/* Enable all capabilities. */
429	ifp->if_capenable = ifp->if_capabilities;
430
431	ether_ifattach(ifp, eaddr);
432
433	/* VLAN capability setup. */
434	ifp->if_capabilities |= IFCAP_VLAN_MTU;
435	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
436	ifp->if_capenable = ifp->if_capabilities;
437	/* Tell the upper layer(s) we support long frames. */
438	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
439
440	WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
441	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
442
443	/* Create local taskq. */
444	sc->sc_tq = taskqueue_create_fast("txp_taskq", M_WAITOK,
445	    taskqueue_thread_enqueue, &sc->sc_tq);
446	if (sc->sc_tq == NULL) {
447		device_printf(dev, "could not create taskqueue.\n");
448		ether_ifdetach(ifp);
449		error = ENXIO;
450		goto fail;
451	}
452	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
453	    device_get_nameunit(sc->sc_dev));
454
455	/* Put controller into sleep. */
456	if (txp_sleep(sc, 0) != 0) {
457		ether_ifdetach(ifp);
458		error = ENXIO;
459		goto fail;
460	}
461
462	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
463	    txp_intr, NULL, sc, &sc->sc_intrhand);
464
465	if (error != 0) {
466		ether_ifdetach(ifp);
467		device_printf(dev, "couldn't set up interrupt handler.\n");
468		goto fail;
469	}
470
471	return (0);
472
473fail:
474	if (error != 0)
475		txp_detach(dev);
476	return (error);
477}
478
479static int
480txp_detach(device_t dev)
481{
482	struct txp_softc *sc;
483	struct ifnet *ifp;
484
485	sc = device_get_softc(dev);
486
487	ifp = sc->sc_ifp;
488	if (device_is_attached(dev)) {
489		TXP_LOCK(sc);
490		sc->sc_flags |= TXP_FLAG_DETACH;
491		txp_stop(sc);
492		TXP_UNLOCK(sc);
493		callout_drain(&sc->sc_tick);
494		taskqueue_drain(sc->sc_tq, &sc->sc_int_task);
495		ether_ifdetach(ifp);
496	}
497	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
498
499	ifmedia_removeall(&sc->sc_ifmedia);
500	if (sc->sc_intrhand != NULL)
501		bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
502	if (sc->sc_irq != NULL)
503		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
504	if (sc->sc_res != NULL)
505		bus_release_resource(dev, sc->sc_res_type, sc->sc_res_id,
506		    sc->sc_res);
507	if (sc->sc_ifp != NULL) {
508		if_free(sc->sc_ifp);
509		sc->sc_ifp = NULL;
510	}
511	txp_free_rings(sc);
512	mtx_destroy(&sc->sc_mtx);
513
514	return (0);
515}
516
517static int
518txp_reset(struct txp_softc *sc)
519{
520	uint32_t r;
521	int i;
522
523	/* Disable interrupts. */
524	WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
525	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
526	/* Ack all pending interrupts. */
527	WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
528
529	r = 0;
530	WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL);
531	DELAY(1000);
532	WRITE_REG(sc, TXP_SRR, 0);
533
534	/* Should wait max 6 seconds. */
535	for (i = 0; i < 6000; i++) {
536		r = READ_REG(sc, TXP_A2H_0);
537		if (r == STAT_WAITING_FOR_HOST_REQUEST)
538			break;
539		DELAY(1000);
540	}
541
542	if (r != STAT_WAITING_FOR_HOST_REQUEST)
543		device_printf(sc->sc_dev, "reset hung\n");
544
545	WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
546	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
547	WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
548
549	/*
550	 * Give more time to complete loading sleep image before
551	 * trying to boot from sleep image.
552	 */
553	DELAY(5000);
554
555	return (0);
556}
557
558static int
559txp_boot(struct txp_softc *sc, uint32_t state)
560{
561
562	/* See if it's waiting for boot, and try to boot it. */
563	if (txp_wait(sc, state) != 0) {
564		device_printf(sc->sc_dev, "not waiting for boot\n");
565		return (ENXIO);
566	}
567
568	WRITE_REG(sc, TXP_H2A_2, TXP_ADDR_HI(sc->sc_ldata.txp_boot_paddr));
569	TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
570	WRITE_REG(sc, TXP_H2A_1, TXP_ADDR_LO(sc->sc_ldata.txp_boot_paddr));
571	TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
572	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD);
573	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
574
575	/* See if it booted. */
576	if (txp_wait(sc, STAT_RUNNING) != 0) {
577		device_printf(sc->sc_dev, "firmware not running\n");
578		return (ENXIO);
579	}
580
581	/* Clear TX and CMD ring write registers. */
582	WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL);
583	TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
584	WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL);
585	TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
586	WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL);
587	TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
588	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL);
589	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
590
591	return (0);
592}
593
594static int
595txp_download_fw(struct txp_softc *sc)
596{
597	struct txp_fw_file_header *fileheader;
598	struct txp_fw_section_header *secthead;
599	int sect;
600	uint32_t error, ier, imr;
601
602	TXP_LOCK_ASSERT(sc);
603
604	error = 0;
605	ier = READ_REG(sc, TXP_IER);
606	WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0);
607
608	imr = READ_REG(sc, TXP_IMR);
609	WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0);
610
611	if (txp_wait(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) {
612		device_printf(sc->sc_dev, "not waiting for host request\n");
613		error = ETIMEDOUT;
614		goto fail;
615	}
616
617	/* Ack the status. */
618	WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
619
620	fileheader = (struct txp_fw_file_header *)tc990image;
621	if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) {
622		device_printf(sc->sc_dev, "firmware invalid magic\n");
623		goto fail;
624	}
625
626	/* Tell boot firmware to get ready for image. */
627	WRITE_REG(sc, TXP_H2A_1, le32toh(fileheader->addr));
628	TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
629	WRITE_REG(sc, TXP_H2A_2, le32toh(fileheader->hmac[0]));
630	TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
631	WRITE_REG(sc, TXP_H2A_3, le32toh(fileheader->hmac[1]));
632	TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
633	WRITE_REG(sc, TXP_H2A_4, le32toh(fileheader->hmac[2]));
634	TXP_BARRIER(sc, TXP_H2A_4, 4, BUS_SPACE_BARRIER_WRITE);
635	WRITE_REG(sc, TXP_H2A_5, le32toh(fileheader->hmac[3]));
636	TXP_BARRIER(sc, TXP_H2A_5, 4, BUS_SPACE_BARRIER_WRITE);
637	WRITE_REG(sc, TXP_H2A_6, le32toh(fileheader->hmac[4]));
638	TXP_BARRIER(sc, TXP_H2A_6, 4, BUS_SPACE_BARRIER_WRITE);
639	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE);
640	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
641
642	if (txp_download_fw_wait(sc)) {
643		device_printf(sc->sc_dev, "firmware wait failed, initial\n");
644		error = ETIMEDOUT;
645		goto fail;
646	}
647
648	secthead = (struct txp_fw_section_header *)(((uint8_t *)tc990image) +
649	    sizeof(struct txp_fw_file_header));
650
651	for (sect = 0; sect < le32toh(fileheader->nsections); sect++) {
652		if ((error = txp_download_fw_section(sc, secthead, sect)) != 0)
653			goto fail;
654		secthead = (struct txp_fw_section_header *)
655		    (((uint8_t *)secthead) + le32toh(secthead->nbytes) +
656		    sizeof(*secthead));
657	}
658
659	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE);
660	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
661
662	if (txp_wait(sc, STAT_WAITING_FOR_BOOT) != 0) {
663		device_printf(sc->sc_dev, "not waiting for boot\n");
664		error = ETIMEDOUT;
665		goto fail;
666	}
667
668fail:
669	WRITE_REG(sc, TXP_IER, ier);
670	WRITE_REG(sc, TXP_IMR, imr);
671
672	return (error);
673}
674
675static int
676txp_download_fw_wait(struct txp_softc *sc)
677{
678	uint32_t i;
679
680	TXP_LOCK_ASSERT(sc);
681
682	for (i = 0; i < TXP_TIMEOUT; i++) {
683		if ((READ_REG(sc, TXP_ISR) & TXP_INT_A2H_0) != 0)
684			break;
685		DELAY(50);
686	}
687
688	if (i == TXP_TIMEOUT) {
689		device_printf(sc->sc_dev, "firmware wait failed comm0\n");
690		return (ETIMEDOUT);
691	}
692
693	WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
694
695	if (READ_REG(sc, TXP_A2H_0) != STAT_WAITING_FOR_SEGMENT) {
696		device_printf(sc->sc_dev, "firmware not waiting for segment\n");
697		return (ETIMEDOUT);
698	}
699	return (0);
700}
701
702static int
703txp_download_fw_section(struct txp_softc *sc,
704    struct txp_fw_section_header *sect, int sectnum)
705{
706	bus_dma_tag_t sec_tag;
707	bus_dmamap_t sec_map;
708	bus_addr_t sec_paddr;
709	uint8_t *sec_buf;
710	int rseg, err = 0;
711	struct mbuf m;
712	uint16_t csum;
713
714	TXP_LOCK_ASSERT(sc);
715
716	/* Skip zero length sections. */
717	if (le32toh(sect->nbytes) == 0)
718		return (0);
719
720	/* Make sure we aren't past the end of the image. */
721	rseg = ((uint8_t *)sect) - ((uint8_t *)tc990image);
722	if (rseg >= sizeof(tc990image)) {
723		device_printf(sc->sc_dev,
724		    "firmware invalid section address, section %d\n", sectnum);
725		return (EIO);
726	}
727
728	/* Make sure this section doesn't go past the end. */
729	rseg += le32toh(sect->nbytes);
730	if (rseg >= sizeof(tc990image)) {
731		device_printf(sc->sc_dev, "firmware truncated section %d\n",
732		    sectnum);
733		return (EIO);
734	}
735
736	sec_tag = NULL;
737	sec_map = NULL;
738	sec_buf = NULL;
739	/* XXX */
740	TXP_UNLOCK(sc);
741	err = txp_dma_alloc(sc, "firmware sections", &sec_tag, sizeof(uint32_t),
742	    0, &sec_map, (void **)&sec_buf, le32toh(sect->nbytes), &sec_paddr);
743	TXP_LOCK(sc);
744	if (err != 0)
745		goto bail;
746	bcopy(((uint8_t *)sect) + sizeof(*sect), sec_buf,
747	    le32toh(sect->nbytes));
748
749	/*
750	 * dummy up mbuf and verify section checksum
751	 */
752	m.m_type = MT_DATA;
753	m.m_next = m.m_nextpkt = NULL;
754	m.m_len = le32toh(sect->nbytes);
755	m.m_data = sec_buf;
756	m.m_flags = 0;
757	csum = in_cksum(&m, le32toh(sect->nbytes));
758	if (csum != sect->cksum) {
759		device_printf(sc->sc_dev,
760		    "firmware section %d, bad cksum (expected 0x%x got 0x%x)\n",
761		    sectnum, le16toh(sect->cksum), csum);
762		err = EIO;
763		goto bail;
764	}
765
766	bus_dmamap_sync(sec_tag, sec_map, BUS_DMASYNC_PREWRITE);
767
768	WRITE_REG(sc, TXP_H2A_1, le32toh(sect->nbytes));
769	TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
770	WRITE_REG(sc, TXP_H2A_2, le16toh(sect->cksum));
771	TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
772	WRITE_REG(sc, TXP_H2A_3, le32toh(sect->addr));
773	TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
774	WRITE_REG(sc, TXP_H2A_4, TXP_ADDR_HI(sec_paddr));
775	TXP_BARRIER(sc, TXP_H2A_4, 4, BUS_SPACE_BARRIER_WRITE);
776	WRITE_REG(sc, TXP_H2A_5, TXP_ADDR_LO(sec_paddr));
777	TXP_BARRIER(sc, TXP_H2A_5, 4, BUS_SPACE_BARRIER_WRITE);
778	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE);
779	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
780
781	if (txp_download_fw_wait(sc)) {
782		device_printf(sc->sc_dev,
783		    "firmware wait failed, section %d\n", sectnum);
784		err = ETIMEDOUT;
785	}
786
787	bus_dmamap_sync(sec_tag, sec_map, BUS_DMASYNC_POSTWRITE);
788bail:
789	txp_dma_free(sc, &sec_tag, &sec_map, (void **)&sec_buf);
790	return (err);
791}
792
793static int
794txp_intr(void *vsc)
795{
796	struct txp_softc *sc;
797	uint32_t status;
798
799	sc = vsc;
800	status = READ_REG(sc, TXP_ISR);
801	if ((status & TXP_INT_LATCH) == 0)
802		return (FILTER_STRAY);
803	WRITE_REG(sc, TXP_ISR, status);
804	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
805	taskqueue_enqueue(sc->sc_tq, &sc->sc_int_task);
806
807	return (FILTER_HANDLED);
808}
809
810static void
811txp_int_task(void *arg, int pending)
812{
813	struct txp_softc *sc;
814	struct ifnet *ifp;
815	struct txp_hostvar *hv;
816	uint32_t isr;
817	int more;
818
819	sc = (struct txp_softc *)arg;
820
821	TXP_LOCK(sc);
822	ifp = sc->sc_ifp;
823	hv = sc->sc_hostvar;
824	isr = READ_REG(sc, TXP_ISR);
825	if ((isr & TXP_INT_LATCH) != 0)
826		WRITE_REG(sc, TXP_ISR, isr);
827
828	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
829		bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
830		    sc->sc_cdata.txp_hostvar_map,
831		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
832		more = 0;
833		if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff))
834			more += txp_rx_reclaim(sc, &sc->sc_rxhir,
835			    sc->sc_process_limit);
836		if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff))
837			more += txp_rx_reclaim(sc, &sc->sc_rxlor,
838			    sc->sc_process_limit);
839		/*
840		 * XXX
841		 * It seems controller is not smart enough to handle
842		 * FIFO overflow conditions under heavy network load.
843		 * No matter how often new Rx buffers are passed to
844		 * controller the situation didn't change. Maybe
845		 * flow-control would be the only way to mitigate the
846		 * issue but firmware does not have commands that
847		 * control the threshold of emitting pause frames.
848		 */
849		if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx)
850			txp_rxbuf_reclaim(sc);
851		if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
852		    TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
853			txp_tx_reclaim(sc, &sc->sc_txhir);
854		if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
855		    TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
856			txp_tx_reclaim(sc, &sc->sc_txlor);
857		bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
858		    sc->sc_cdata.txp_hostvar_map,
859		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
860		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
861			txp_start_locked(sc->sc_ifp);
862		if (more != 0 || READ_REG(sc, TXP_ISR & TXP_INT_LATCH) != 0) {
863			taskqueue_enqueue(sc->sc_tq, &sc->sc_int_task);
864			TXP_UNLOCK(sc);
865			return;
866		}
867	}
868
869	/* Re-enable interrupts. */
870	WRITE_REG(sc, TXP_IMR, TXP_INTR_NONE);
871	TXP_UNLOCK(sc);
872}
873
874#ifndef __NO_STRICT_ALIGNMENT
875static __inline void
876txp_fixup_rx(struct mbuf *m)
877{
878	int i;
879	uint16_t *src, *dst;
880
881	src = mtod(m, uint16_t *);
882	dst = src - (TXP_RXBUF_ALIGN - ETHER_ALIGN) / sizeof *src;
883
884	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
885		*dst++ = *src++;
886
887	m->m_data -= TXP_RXBUF_ALIGN - ETHER_ALIGN;
888}
889#endif
890
891static int
892txp_rx_reclaim(struct txp_softc *sc, struct txp_rx_ring *r, int count)
893{
894	struct ifnet *ifp;
895	struct txp_rx_desc *rxd;
896	struct mbuf *m;
897	struct txp_rx_swdesc *sd;
898	uint32_t roff, woff, rx_stat, prog;
899
900	TXP_LOCK_ASSERT(sc);
901
902	ifp = sc->sc_ifp;
903
904	bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_POSTREAD |
905	    BUS_DMASYNC_POSTWRITE);
906
907	roff = le32toh(*r->r_roff);
908	woff = le32toh(*r->r_woff);
909	rxd = r->r_desc + roff / sizeof(struct txp_rx_desc);
910	for (prog = 0; roff != woff; prog++, count--) {
911		if (count <= 0)
912			break;
913		bcopy((u_long *)&rxd->rx_vaddrlo, &sd, sizeof(sd));
914		KASSERT(sd != NULL, ("%s: Rx desc ring corrupted", __func__));
915		bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
916		    BUS_DMASYNC_POSTREAD);
917		bus_dmamap_unload(sc->sc_cdata.txp_rx_tag, sd->sd_map);
918		m = sd->sd_mbuf;
919		KASSERT(m != NULL, ("%s: Rx buffer ring corrupted", __func__));
920		sd->sd_mbuf = NULL;
921		TAILQ_REMOVE(&sc->sc_busy_list, sd, sd_next);
922		TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
923		if ((rxd->rx_flags & RX_FLAGS_ERROR) != 0) {
924			if (bootverbose)
925				device_printf(sc->sc_dev, "Rx error %u\n",
926				    le32toh(rxd->rx_stat) & RX_ERROR_MASK);
927			m_freem(m);
928			goto next;
929		}
930
931		m->m_pkthdr.len = m->m_len = le16toh(rxd->rx_len);
932		m->m_pkthdr.rcvif = ifp;
933#ifndef __NO_STRICT_ALIGNMENT
934		txp_fixup_rx(m);
935#endif
936		rx_stat = le32toh(rxd->rx_stat);
937		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
938			if ((rx_stat & RX_STAT_IPCKSUMBAD) != 0)
939				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
940			else if ((rx_stat & RX_STAT_IPCKSUMGOOD) != 0)
941				m->m_pkthdr.csum_flags |=
942				    CSUM_IP_CHECKED|CSUM_IP_VALID;
943
944			if ((rx_stat & RX_STAT_TCPCKSUMGOOD) != 0 ||
945			    (rx_stat & RX_STAT_UDPCKSUMGOOD) != 0) {
946				m->m_pkthdr.csum_flags |=
947				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
948				m->m_pkthdr.csum_data = 0xffff;
949			}
950		}
951
952		/*
953		 * XXX
954		 * Typhoon has a firmware bug that VLAN tag is always
955		 * stripped out even if it is told to not remove the tag.
956		 * Therefore don't check if_capenable here.
957		 */
958		if (/* (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && */
959		    (rx_stat & RX_STAT_VLAN) != 0) {
960			m->m_pkthdr.ether_vtag =
961			    bswap16((le32toh(rxd->rx_vlan) >> 16));
962			m->m_flags |= M_VLANTAG;
963		}
964
965		TXP_UNLOCK(sc);
966		(*ifp->if_input)(ifp, m);
967		TXP_LOCK(sc);
968
969next:
970		roff += sizeof(struct txp_rx_desc);
971		if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) {
972			roff = 0;
973			rxd = r->r_desc;
974		} else
975			rxd++;
976		prog++;
977	}
978
979	if (prog == 0)
980		return (0);
981
982	bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_PREREAD |
983	    BUS_DMASYNC_PREWRITE);
984	*r->r_roff = le32toh(roff);
985
986	return (count > 0 ? 0 : EAGAIN);
987}
988
989static void
990txp_rxbuf_reclaim(struct txp_softc *sc)
991{
992	struct txp_hostvar *hv;
993	struct txp_rxbuf_desc *rbd;
994	struct txp_rx_swdesc *sd;
995	bus_dma_segment_t segs[1];
996	int nsegs, prod, prog;
997	uint32_t cons;
998
999	TXP_LOCK_ASSERT(sc);
1000
1001	hv = sc->sc_hostvar;
1002	cons = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_read_idx));
1003	prod = sc->sc_rxbufprod;
1004	TXP_DESC_INC(prod, RXBUF_ENTRIES);
1005	if (prod == cons)
1006		return;
1007
1008	bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1009	    sc->sc_cdata.txp_rxbufs_map,
1010	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1011
1012	for (prog = 0; prod != cons; prog++) {
1013		sd = TAILQ_FIRST(&sc->sc_free_list);
1014		if (sd == NULL)
1015			break;
1016		rbd = sc->sc_rxbufs + prod;
1017		bcopy((u_long *)&rbd->rb_vaddrlo, &sd, sizeof(sd));
1018		sd->sd_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1019		if (sd->sd_mbuf == NULL)
1020			break;
1021		sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
1022#ifndef __NO_STRICT_ALIGNMENT
1023		m_adj(sd->sd_mbuf, TXP_RXBUF_ALIGN);
1024#endif
1025		if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_rx_tag,
1026		    sd->sd_map, sd->sd_mbuf, segs, &nsegs, 0) != 0) {
1027			m_freem(sd->sd_mbuf);
1028			sd->sd_mbuf = NULL;
1029			break;
1030		}
1031		KASSERT(nsegs == 1, ("%s : %d segments returned!", __func__,
1032		    nsegs));
1033		TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1034		TAILQ_INSERT_TAIL(&sc->sc_busy_list, sd, sd_next);
1035		bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1036		    BUS_DMASYNC_PREREAD);
1037		rbd->rb_paddrlo = htole32(TXP_ADDR_LO(segs[0].ds_addr));
1038		rbd->rb_paddrhi = htole32(TXP_ADDR_HI(segs[0].ds_addr));
1039		TXP_DESC_INC(prod, RXBUF_ENTRIES);
1040	}
1041
1042	if (prog == 0)
1043		return;
1044	bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1045	    sc->sc_cdata.txp_rxbufs_map,
1046	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1047	prod = (prod + RXBUF_ENTRIES - 1) % RXBUF_ENTRIES;
1048	sc->sc_rxbufprod = prod;
1049	hv->hv_rx_buf_write_idx = htole32(TXP_IDX2OFFSET(prod));
1050}
1051
1052/*
1053 * Reclaim mbufs and entries from a transmit ring.
1054 */
1055static void
1056txp_tx_reclaim(struct txp_softc *sc, struct txp_tx_ring *r)
1057{
1058	struct ifnet *ifp;
1059	uint32_t idx;
1060	uint32_t cons, cnt;
1061	struct txp_tx_desc *txd;
1062	struct txp_swdesc *sd;
1063
1064	TXP_LOCK_ASSERT(sc);
1065
1066	bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_POSTREAD |
1067	    BUS_DMASYNC_POSTWRITE);
1068	ifp = sc->sc_ifp;
1069	idx = TXP_OFFSET2IDX(le32toh(*(r->r_off)));
1070	cons = r->r_cons;
1071	cnt = r->r_cnt;
1072	txd = r->r_desc + cons;
1073	sd = sc->sc_txd + cons;
1074
1075	for (cnt = r->r_cnt; cons != idx && cnt > 0; cnt--) {
1076		if ((txd->tx_flags & TX_FLAGS_TYPE_M) == TX_FLAGS_TYPE_DATA) {
1077			if (sd->sd_mbuf != NULL) {
1078				bus_dmamap_sync(sc->sc_cdata.txp_tx_tag,
1079				    sd->sd_map, BUS_DMASYNC_POSTWRITE);
1080				bus_dmamap_unload(sc->sc_cdata.txp_tx_tag,
1081				    sd->sd_map);
1082				m_freem(sd->sd_mbuf);
1083				sd->sd_mbuf = NULL;
1084				txd->tx_addrlo = 0;
1085				txd->tx_addrhi = 0;
1086				txd->tx_flags = 0;
1087			}
1088		}
1089		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1090
1091		if (++cons == TX_ENTRIES) {
1092			txd = r->r_desc;
1093			cons = 0;
1094			sd = sc->sc_txd;
1095		} else {
1096			txd++;
1097			sd++;
1098		}
1099	}
1100
1101	bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_PREREAD |
1102	    BUS_DMASYNC_PREWRITE);
1103	r->r_cons = cons;
1104	r->r_cnt = cnt;
1105	if (cnt == 0)
1106		sc->sc_watchdog_timer = 0;
1107}
1108
1109static int
1110txp_shutdown(device_t dev)
1111{
1112
1113	return (txp_suspend(dev));
1114}
1115
1116static int
1117txp_suspend(device_t dev)
1118{
1119	struct txp_softc *sc;
1120	struct ifnet *ifp;
1121	uint8_t *eaddr;
1122	uint16_t p1;
1123	uint32_t p2;
1124	int pmc;
1125	uint16_t pmstat;
1126
1127	sc = device_get_softc(dev);
1128
1129	TXP_LOCK(sc);
1130	ifp = sc->sc_ifp;
1131	txp_stop(sc);
1132	txp_init_rings(sc);
1133	/* Reset controller and make it reload sleep image. */
1134	txp_reset(sc);
1135	/* Let controller boot from sleep image. */
1136	if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0)
1137		device_printf(sc->sc_dev, "couldn't boot sleep image\n");
1138
1139	/* Set station address. */
1140	eaddr = IF_LLADDR(sc->sc_ifp);
1141	p1 = 0;
1142	((uint8_t *)&p1)[1] = eaddr[0];
1143	((uint8_t *)&p1)[0] = eaddr[1];
1144	p1 = le16toh(p1);
1145	((uint8_t *)&p2)[3] = eaddr[2];
1146	((uint8_t *)&p2)[2] = eaddr[3];
1147	((uint8_t *)&p2)[1] = eaddr[4];
1148	((uint8_t *)&p2)[0] = eaddr[5];
1149	p2 = le32toh(p2);
1150	txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0, NULL, NULL,
1151	    NULL, TXP_CMD_WAIT);
1152	txp_set_filter(sc);
1153	WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
1154	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
1155	txp_sleep(sc, sc->sc_ifp->if_capenable);
1156	if (pci_find_cap(sc->sc_dev, PCIY_PMG, &pmc) == 0) {
1157		/* Request PME. */
1158		pmstat = pci_read_config(sc->sc_dev,
1159		    pmc + PCIR_POWER_STATUS, 2);
1160		pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1161		if ((ifp->if_capenable & IFCAP_WOL) != 0)
1162			pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1163		pci_write_config(sc->sc_dev,
1164		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1165	}
1166	TXP_UNLOCK(sc);
1167
1168	return (0);
1169}
1170
1171static int
1172txp_resume(device_t dev)
1173{
1174	struct txp_softc *sc;
1175	int pmc;
1176	uint16_t pmstat;
1177
1178	sc = device_get_softc(dev);
1179
1180	TXP_LOCK(sc);
1181	if (pci_find_cap(sc->sc_dev, PCIY_PMG, &pmc) == 0) {
1182		/* Disable PME and clear PME status. */
1183		pmstat = pci_read_config(sc->sc_dev,
1184		    pmc + PCIR_POWER_STATUS, 2);
1185		if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
1186			pmstat &= ~PCIM_PSTAT_PMEENABLE;
1187			pci_write_config(sc->sc_dev,
1188			    pmc + PCIR_POWER_STATUS, pmstat, 2);
1189		}
1190	}
1191	if ((sc->sc_ifp->if_flags & IFF_UP) != 0)
1192		txp_init_locked(sc);
1193	TXP_UNLOCK(sc);
1194
1195	return (0);
1196}
1197
1198struct txp_dmamap_arg {
1199	bus_addr_t	txp_busaddr;
1200};
1201
1202static void
1203txp_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1204{
1205	struct txp_dmamap_arg *ctx;
1206
1207	if (error != 0)
1208		return;
1209
1210	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1211
1212	ctx = (struct txp_dmamap_arg *)arg;
1213	ctx->txp_busaddr = segs[0].ds_addr;
1214}
1215
1216static int
1217txp_dma_alloc(struct txp_softc *sc, char *type, bus_dma_tag_t *tag,
1218    bus_size_t alignment, bus_size_t boundary, bus_dmamap_t *map, void **buf,
1219    bus_size_t size, bus_addr_t *paddr)
1220{
1221	struct txp_dmamap_arg ctx;
1222	int error;
1223
1224	/* Create DMA block tag. */
1225	error = bus_dma_tag_create(
1226	    sc->sc_cdata.txp_parent_tag,	/* parent */
1227	    alignment, boundary,	/* algnmnt, boundary */
1228	    BUS_SPACE_MAXADDR,		/* lowaddr */
1229	    BUS_SPACE_MAXADDR,		/* highaddr */
1230	    NULL, NULL,			/* filter, filterarg */
1231	    size,			/* maxsize */
1232	    1,				/* nsegments */
1233	    size,			/* maxsegsize */
1234	    0,				/* flags */
1235	    NULL, NULL,			/* lockfunc, lockarg */
1236	    tag);
1237	if (error != 0) {
1238		device_printf(sc->sc_dev,
1239		    "could not create DMA tag for %s.\n", type);
1240		return (error);
1241	}
1242
1243	*paddr = 0;
1244	/* Allocate DMA'able memory and load the DMA map. */
1245	error = bus_dmamem_alloc(*tag, buf, BUS_DMA_WAITOK | BUS_DMA_ZERO |
1246	    BUS_DMA_COHERENT, map);
1247	if (error != 0) {
1248		device_printf(sc->sc_dev,
1249		    "could not allocate DMA'able memory for %s.\n", type);
1250		return (error);
1251	}
1252
1253	ctx.txp_busaddr = 0;
1254	error = bus_dmamap_load(*tag, *map, *(uint8_t **)buf,
1255	    size, txp_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1256	if (error != 0 || ctx.txp_busaddr == 0) {
1257		device_printf(sc->sc_dev,
1258		    "could not load DMA'able memory for %s.\n", type);
1259		return (error);
1260	}
1261	*paddr = ctx.txp_busaddr;
1262
1263	return (0);
1264}
1265
1266static void
1267txp_dma_free(struct txp_softc *sc, bus_dma_tag_t *tag, bus_dmamap_t *map,
1268    void **buf)
1269{
1270
1271	if (*tag != NULL) {
1272		if (*map != NULL)
1273			bus_dmamap_unload(*tag, *map);
1274		if (*map != NULL && buf != NULL)
1275			bus_dmamem_free(*tag, *(uint8_t **)buf, *map);
1276		*(uint8_t **)buf = NULL;
1277		*map = NULL;
1278		bus_dma_tag_destroy(*tag);
1279		*tag = NULL;
1280	}
1281}
1282
1283static int
1284txp_alloc_rings(struct txp_softc *sc)
1285{
1286	struct txp_boot_record *boot;
1287	struct txp_ldata *ld;
1288	struct txp_swdesc *txd;
1289	struct txp_rxbuf_desc *rbd;
1290	struct txp_rx_swdesc *sd;
1291	int error, i;
1292
1293	ld = &sc->sc_ldata;
1294	boot = ld->txp_boot;
1295
1296	/* boot record */
1297	sc->sc_boot = boot;
1298
1299	/*
1300	 * Create parent ring/DMA block tag.
1301	 * Datasheet says that all ring addresses and descriptors
1302	 * support 64bits addressing. However the controller is
1303	 * known to have no support DAC so limit DMA address space
1304	 * to 32bits.
1305	 */
1306	error = bus_dma_tag_create(
1307	    bus_get_dma_tag(sc->sc_dev), /* parent */
1308	    1, 0,			/* algnmnt, boundary */
1309	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1310	    BUS_SPACE_MAXADDR,		/* highaddr */
1311	    NULL, NULL,			/* filter, filterarg */
1312	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1313	    0,				/* nsegments */
1314	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1315	    0,				/* flags */
1316	    NULL, NULL,			/* lockfunc, lockarg */
1317	    &sc->sc_cdata.txp_parent_tag);
1318	if (error != 0) {
1319		device_printf(sc->sc_dev, "could not create parent DMA tag.\n");
1320		return (error);
1321	}
1322
1323	/* Boot record. */
1324	error = txp_dma_alloc(sc, "boot record",
1325	    &sc->sc_cdata.txp_boot_tag, sizeof(uint32_t), 0,
1326	    &sc->sc_cdata.txp_boot_map, (void **)&sc->sc_ldata.txp_boot,
1327	    sizeof(struct txp_boot_record),
1328	    &sc->sc_ldata.txp_boot_paddr);
1329	if (error != 0)
1330		return (error);
1331	boot = sc->sc_ldata.txp_boot;
1332	sc->sc_boot = boot;
1333
1334	/* Host variables. */
1335	error = txp_dma_alloc(sc, "host variables",
1336	    &sc->sc_cdata.txp_hostvar_tag, sizeof(uint32_t), 0,
1337	    &sc->sc_cdata.txp_hostvar_map, (void **)&sc->sc_ldata.txp_hostvar,
1338	    sizeof(struct txp_hostvar),
1339	    &sc->sc_ldata.txp_hostvar_paddr);
1340	if (error != 0)
1341		return (error);
1342	boot->br_hostvar_lo =
1343	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_hostvar_paddr));
1344	boot->br_hostvar_hi =
1345	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_hostvar_paddr));
1346	sc->sc_hostvar = sc->sc_ldata.txp_hostvar;
1347
1348	/* Hi priority tx ring. */
1349	error = txp_dma_alloc(sc, "hi priority tx ring",
1350	    &sc->sc_cdata.txp_txhiring_tag, sizeof(struct txp_tx_desc), 0,
1351	    &sc->sc_cdata.txp_txhiring_map, (void **)&sc->sc_ldata.txp_txhiring,
1352	    sizeof(struct txp_tx_desc) * TX_ENTRIES,
1353	    &sc->sc_ldata.txp_txhiring_paddr);
1354	if (error != 0)
1355		return (error);
1356	boot->br_txhipri_lo =
1357	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_txhiring_paddr));
1358	boot->br_txhipri_hi =
1359	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_txhiring_paddr));
1360	boot->br_txhipri_siz =
1361	    htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
1362	sc->sc_txhir.r_tag = sc->sc_cdata.txp_txhiring_tag;
1363	sc->sc_txhir.r_map = sc->sc_cdata.txp_txhiring_map;
1364	sc->sc_txhir.r_reg = TXP_H2A_1;
1365	sc->sc_txhir.r_desc = sc->sc_ldata.txp_txhiring;
1366	sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0;
1367	sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx;
1368
1369	/* Low priority tx ring. */
1370	error = txp_dma_alloc(sc, "low priority tx ring",
1371	    &sc->sc_cdata.txp_txloring_tag, sizeof(struct txp_tx_desc), 0,
1372	    &sc->sc_cdata.txp_txloring_map, (void **)&sc->sc_ldata.txp_txloring,
1373	    sizeof(struct txp_tx_desc) * TX_ENTRIES,
1374	    &sc->sc_ldata.txp_txloring_paddr);
1375	if (error != 0)
1376		return (error);
1377	boot->br_txlopri_lo =
1378	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_txloring_paddr));
1379	boot->br_txlopri_hi =
1380	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_txloring_paddr));
1381	boot->br_txlopri_siz =
1382	    htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
1383	sc->sc_txlor.r_tag = sc->sc_cdata.txp_txloring_tag;
1384	sc->sc_txlor.r_map = sc->sc_cdata.txp_txloring_map;
1385	sc->sc_txlor.r_reg = TXP_H2A_3;
1386	sc->sc_txlor.r_desc = sc->sc_ldata.txp_txloring;
1387	sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0;
1388	sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx;
1389
1390	/* High priority rx ring. */
1391	error = txp_dma_alloc(sc, "hi priority rx ring",
1392	    &sc->sc_cdata.txp_rxhiring_tag,
1393	    roundup(sizeof(struct txp_rx_desc), 16), 0,
1394	    &sc->sc_cdata.txp_rxhiring_map, (void **)&sc->sc_ldata.txp_rxhiring,
1395	    sizeof(struct txp_rx_desc) * RX_ENTRIES,
1396	    &sc->sc_ldata.txp_rxhiring_paddr);
1397	if (error != 0)
1398		return (error);
1399	boot->br_rxhipri_lo =
1400	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxhiring_paddr));
1401	boot->br_rxhipri_hi =
1402	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxhiring_paddr));
1403	boot->br_rxhipri_siz =
1404	    htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
1405	sc->sc_rxhir.r_tag = sc->sc_cdata.txp_rxhiring_tag;
1406	sc->sc_rxhir.r_map = sc->sc_cdata.txp_rxhiring_map;
1407	sc->sc_rxhir.r_desc = sc->sc_ldata.txp_rxhiring;
1408	sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx;
1409	sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx;
1410
1411	/* Low priority rx ring. */
1412	error = txp_dma_alloc(sc, "low priority rx ring",
1413	    &sc->sc_cdata.txp_rxloring_tag,
1414	    roundup(sizeof(struct txp_rx_desc), 16), 0,
1415	    &sc->sc_cdata.txp_rxloring_map, (void **)&sc->sc_ldata.txp_rxloring,
1416	    sizeof(struct txp_rx_desc) * RX_ENTRIES,
1417	    &sc->sc_ldata.txp_rxloring_paddr);
1418	if (error != 0)
1419		return (error);
1420	boot->br_rxlopri_lo =
1421	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxloring_paddr));
1422	boot->br_rxlopri_hi =
1423	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxloring_paddr));
1424	boot->br_rxlopri_siz =
1425	    htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
1426	sc->sc_rxlor.r_tag = sc->sc_cdata.txp_rxloring_tag;
1427	sc->sc_rxlor.r_map = sc->sc_cdata.txp_rxloring_map;
1428	sc->sc_rxlor.r_desc = sc->sc_ldata.txp_rxloring;
1429	sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx;
1430	sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx;
1431
1432	/* Command ring. */
1433	error = txp_dma_alloc(sc, "command ring",
1434	    &sc->sc_cdata.txp_cmdring_tag, sizeof(struct txp_cmd_desc), 0,
1435	    &sc->sc_cdata.txp_cmdring_map, (void **)&sc->sc_ldata.txp_cmdring,
1436	    sizeof(struct txp_cmd_desc) * CMD_ENTRIES,
1437	    &sc->sc_ldata.txp_cmdring_paddr);
1438	if (error != 0)
1439		return (error);
1440	boot->br_cmd_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_cmdring_paddr));
1441	boot->br_cmd_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_cmdring_paddr));
1442	boot->br_cmd_siz = htole32(CMD_ENTRIES * sizeof(struct txp_cmd_desc));
1443	sc->sc_cmdring.base = sc->sc_ldata.txp_cmdring;
1444	sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc);
1445	sc->sc_cmdring.lastwrite = 0;
1446
1447	/* Response ring. */
1448	error = txp_dma_alloc(sc, "response ring",
1449	    &sc->sc_cdata.txp_rspring_tag, sizeof(struct txp_rsp_desc), 0,
1450	    &sc->sc_cdata.txp_rspring_map, (void **)&sc->sc_ldata.txp_rspring,
1451	    sizeof(struct txp_rsp_desc) * RSP_ENTRIES,
1452	    &sc->sc_ldata.txp_rspring_paddr);
1453	if (error != 0)
1454		return (error);
1455	boot->br_resp_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rspring_paddr));
1456	boot->br_resp_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rspring_paddr));
1457	boot->br_resp_siz = htole32(RSP_ENTRIES * sizeof(struct txp_rsp_desc));
1458	sc->sc_rspring.base = sc->sc_ldata.txp_rspring;
1459	sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc);
1460	sc->sc_rspring.lastwrite = 0;
1461
1462	/* Receive buffer ring. */
1463	error = txp_dma_alloc(sc, "receive buffer ring",
1464	    &sc->sc_cdata.txp_rxbufs_tag, sizeof(struct txp_rxbuf_desc), 0,
1465	    &sc->sc_cdata.txp_rxbufs_map, (void **)&sc->sc_ldata.txp_rxbufs,
1466	    sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES,
1467	    &sc->sc_ldata.txp_rxbufs_paddr);
1468	if (error != 0)
1469		return (error);
1470	boot->br_rxbuf_lo =
1471	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxbufs_paddr));
1472	boot->br_rxbuf_hi =
1473	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxbufs_paddr));
1474	boot->br_rxbuf_siz =
1475	    htole32(RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc));
1476	sc->sc_rxbufs = sc->sc_ldata.txp_rxbufs;
1477
1478	/* Zero ring. */
1479	error = txp_dma_alloc(sc, "zero buffer",
1480	    &sc->sc_cdata.txp_zero_tag, sizeof(uint32_t), 0,
1481	    &sc->sc_cdata.txp_zero_map, (void **)&sc->sc_ldata.txp_zero,
1482	    sizeof(uint32_t), &sc->sc_ldata.txp_zero_paddr);
1483	if (error != 0)
1484		return (error);
1485	boot->br_zero_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_zero_paddr));
1486	boot->br_zero_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_zero_paddr));
1487
1488	bus_dmamap_sync(sc->sc_cdata.txp_boot_tag, sc->sc_cdata.txp_boot_map,
1489	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1490
1491	/* Create Tx buffers. */
1492	error = bus_dma_tag_create(
1493	    sc->sc_cdata.txp_parent_tag,	/* parent */
1494	    1, 0,			/* algnmnt, boundary */
1495	    BUS_SPACE_MAXADDR,		/* lowaddr */
1496	    BUS_SPACE_MAXADDR,		/* highaddr */
1497	    NULL, NULL,			/* filter, filterarg */
1498	    MCLBYTES * TXP_MAXTXSEGS,	/* maxsize */
1499	    TXP_MAXTXSEGS,		/* nsegments */
1500	    MCLBYTES,			/* maxsegsize */
1501	    0,				/* flags */
1502	    NULL, NULL,			/* lockfunc, lockarg */
1503	    &sc->sc_cdata.txp_tx_tag);
1504	if (error != 0) {
1505		device_printf(sc->sc_dev, "could not create Tx DMA tag.\n");
1506		goto fail;
1507	}
1508
1509	/* Create tag for Rx buffers. */
1510	error = bus_dma_tag_create(
1511	    sc->sc_cdata.txp_parent_tag,	/* parent */
1512	    TXP_RXBUF_ALIGN, 0,		/* algnmnt, boundary */
1513	    BUS_SPACE_MAXADDR,		/* lowaddr */
1514	    BUS_SPACE_MAXADDR,		/* highaddr */
1515	    NULL, NULL,			/* filter, filterarg */
1516	    MCLBYTES,			/* maxsize */
1517	    1,				/* nsegments */
1518	    MCLBYTES,			/* maxsegsize */
1519	    0,				/* flags */
1520	    NULL, NULL,			/* lockfunc, lockarg */
1521	    &sc->sc_cdata.txp_rx_tag);
1522	if (error != 0) {
1523		device_printf(sc->sc_dev, "could not create Rx DMA tag.\n");
1524		goto fail;
1525	}
1526
1527	/* Create DMA maps for Tx buffers. */
1528	for (i = 0; i < TX_ENTRIES; i++) {
1529		txd = &sc->sc_txd[i];
1530		txd->sd_mbuf = NULL;
1531		txd->sd_map = NULL;
1532		error = bus_dmamap_create(sc->sc_cdata.txp_tx_tag, 0,
1533		    &txd->sd_map);
1534		if (error != 0) {
1535			device_printf(sc->sc_dev,
1536			    "could not create Tx dmamap.\n");
1537			goto fail;
1538		}
1539	}
1540
1541	/* Create DMA maps for Rx buffers. */
1542	for (i = 0; i < RXBUF_ENTRIES; i++) {
1543		sd = malloc(sizeof(struct txp_rx_swdesc), M_DEVBUF,
1544		    M_NOWAIT | M_ZERO);
1545		if (sd == NULL) {
1546			error = ENOMEM;
1547			goto fail;
1548		}
1549		/*
1550		 * The virtual address part of descriptor is not used
1551		 * by hardware so use that to save an ring entry. We
1552		 * need bcopy here otherwise the address wouldn't be
1553		 * valid on big-endian architectures.
1554		 */
1555		rbd = sc->sc_rxbufs + i;
1556		bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
1557		sd->sd_mbuf = NULL;
1558		sd->sd_map = NULL;
1559		error = bus_dmamap_create(sc->sc_cdata.txp_rx_tag, 0,
1560		    &sd->sd_map);
1561		if (error != 0) {
1562			device_printf(sc->sc_dev,
1563			    "could not create Rx dmamap.\n");
1564			goto fail;
1565		}
1566		TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
1567	}
1568
1569fail:
1570	return (error);
1571}
1572
1573static void
1574txp_init_rings(struct txp_softc *sc)
1575{
1576
1577	bzero(sc->sc_ldata.txp_hostvar, sizeof(struct txp_hostvar));
1578	bzero(sc->sc_ldata.txp_zero, sizeof(uint32_t));
1579	sc->sc_txhir.r_cons = 0;
1580	sc->sc_txhir.r_prod = 0;
1581	sc->sc_txhir.r_cnt = 0;
1582	sc->sc_txlor.r_cons = 0;
1583	sc->sc_txlor.r_prod = 0;
1584	sc->sc_txlor.r_cnt = 0;
1585	sc->sc_cmdring.lastwrite = 0;
1586	sc->sc_rspring.lastwrite = 0;
1587	sc->sc_rxbufprod = 0;
1588	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1589	    sc->sc_cdata.txp_hostvar_map,
1590	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1591}
1592
1593static int
1594txp_wait(struct txp_softc *sc, uint32_t state)
1595{
1596	uint32_t reg;
1597	int i;
1598
1599	for (i = 0; i < TXP_TIMEOUT; i++) {
1600		reg = READ_REG(sc, TXP_A2H_0);
1601		if (reg == state)
1602			break;
1603		DELAY(50);
1604	}
1605
1606	return (i == TXP_TIMEOUT ? ETIMEDOUT : 0);
1607}
1608
1609static void
1610txp_free_rings(struct txp_softc *sc)
1611{
1612	struct txp_swdesc *txd;
1613	struct txp_rx_swdesc *sd;
1614	int i;
1615
1616	/* Tx buffers. */
1617	if (sc->sc_cdata.txp_tx_tag != NULL) {
1618		for (i = 0; i < TX_ENTRIES; i++) {
1619			txd = &sc->sc_txd[i];
1620			if (txd->sd_map != NULL) {
1621				bus_dmamap_destroy(sc->sc_cdata.txp_tx_tag,
1622				    txd->sd_map);
1623				txd->sd_map = NULL;
1624			}
1625		}
1626		bus_dma_tag_destroy(sc->sc_cdata.txp_tx_tag);
1627		sc->sc_cdata.txp_tx_tag = NULL;
1628	}
1629	/* Rx buffers. */
1630	if (sc->sc_cdata.txp_rx_tag != NULL) {
1631		if (sc->sc_rxbufs != NULL) {
1632			KASSERT(TAILQ_FIRST(&sc->sc_busy_list) == NULL,
1633			    ("%s : still have busy Rx buffers", __func__));
1634			while ((sd = TAILQ_FIRST(&sc->sc_free_list)) != NULL) {
1635				TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1636				if (sd->sd_map != NULL) {
1637					bus_dmamap_destroy(
1638					    sc->sc_cdata.txp_rx_tag,
1639					    sd->sd_map);
1640					sd->sd_map = NULL;
1641				}
1642				free(sd, M_DEVBUF);
1643			}
1644		}
1645		bus_dma_tag_destroy(sc->sc_cdata.txp_rx_tag);
1646		sc->sc_cdata.txp_rx_tag = NULL;
1647	}
1648
1649	/* Hi priority Tx ring. */
1650	txp_dma_free(sc, &sc->sc_cdata.txp_txhiring_tag,
1651	    &sc->sc_cdata.txp_txhiring_map,
1652	    (void **)&sc->sc_ldata.txp_txhiring);
1653	/* Low priority Tx ring. */
1654	txp_dma_free(sc, &sc->sc_cdata.txp_txloring_tag,
1655	    &sc->sc_cdata.txp_txloring_map,
1656	    (void **)&sc->sc_ldata.txp_txloring);
1657	/* Hi priority Rx ring. */
1658	txp_dma_free(sc, &sc->sc_cdata.txp_rxhiring_tag,
1659	    &sc->sc_cdata.txp_rxhiring_map,
1660	    (void **)&sc->sc_ldata.txp_rxhiring);
1661	/* Low priority Rx ring. */
1662	txp_dma_free(sc, &sc->sc_cdata.txp_rxloring_tag,
1663	    &sc->sc_cdata.txp_rxloring_map,
1664	    (void **)&sc->sc_ldata.txp_rxloring);
1665	/* Receive buffer ring. */
1666	txp_dma_free(sc, &sc->sc_cdata.txp_rxbufs_tag,
1667	    &sc->sc_cdata.txp_rxbufs_map, (void **)&sc->sc_ldata.txp_rxbufs);
1668	/* Command ring. */
1669	txp_dma_free(sc, &sc->sc_cdata.txp_cmdring_tag,
1670	    &sc->sc_cdata.txp_cmdring_map, (void **)&sc->sc_ldata.txp_cmdring);
1671	/* Response ring. */
1672	txp_dma_free(sc, &sc->sc_cdata.txp_rspring_tag,
1673	    &sc->sc_cdata.txp_rspring_map, (void **)&sc->sc_ldata.txp_rspring);
1674	/* Zero ring. */
1675	txp_dma_free(sc, &sc->sc_cdata.txp_zero_tag,
1676	    &sc->sc_cdata.txp_zero_map, (void **)&sc->sc_ldata.txp_zero);
1677	/* Host variables. */
1678	txp_dma_free(sc, &sc->sc_cdata.txp_hostvar_tag,
1679	    &sc->sc_cdata.txp_hostvar_map, (void **)&sc->sc_ldata.txp_hostvar);
1680	/* Boot record. */
1681	txp_dma_free(sc, &sc->sc_cdata.txp_boot_tag,
1682	    &sc->sc_cdata.txp_boot_map, (void **)&sc->sc_ldata.txp_boot);
1683
1684	if (sc->sc_cdata.txp_parent_tag != NULL) {
1685		bus_dma_tag_destroy(sc->sc_cdata.txp_parent_tag);
1686		sc->sc_cdata.txp_parent_tag = NULL;
1687	}
1688
1689}
1690
1691static int
1692txp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1693{
1694	struct txp_softc *sc = ifp->if_softc;
1695	struct ifreq *ifr = (struct ifreq *)data;
1696	int capenable, error = 0, mask;
1697
1698	switch(command) {
1699	case SIOCSIFFLAGS:
1700		TXP_LOCK(sc);
1701		if ((ifp->if_flags & IFF_UP) != 0) {
1702			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1703				if (((ifp->if_flags ^ sc->sc_if_flags)
1704				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1705					txp_set_filter(sc);
1706			} else {
1707				if ((sc->sc_flags & TXP_FLAG_DETACH) == 0)
1708					txp_init_locked(sc);
1709			}
1710		} else {
1711			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1712				txp_stop(sc);
1713		}
1714		sc->sc_if_flags = ifp->if_flags;
1715		TXP_UNLOCK(sc);
1716		break;
1717	case SIOCADDMULTI:
1718	case SIOCDELMULTI:
1719		/*
1720		 * Multicast list has changed; set the hardware
1721		 * filter accordingly.
1722		 */
1723		TXP_LOCK(sc);
1724		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1725			txp_set_filter(sc);
1726		TXP_UNLOCK(sc);
1727		break;
1728	case SIOCSIFCAP:
1729		TXP_LOCK(sc);
1730		capenable = ifp->if_capenable;
1731		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1732		if ((mask & IFCAP_TXCSUM) != 0 &&
1733		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1734			ifp->if_capenable ^= IFCAP_TXCSUM;
1735			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1736				ifp->if_hwassist |= TXP_CSUM_FEATURES;
1737			else
1738				ifp->if_hwassist &= ~TXP_CSUM_FEATURES;
1739		}
1740		if ((mask & IFCAP_RXCSUM) != 0 &&
1741		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
1742			ifp->if_capenable ^= IFCAP_RXCSUM;
1743		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1744		    (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
1745			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1746		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1747		    (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0)
1748			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1749		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1750		    (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
1751			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1752		if ((ifp->if_capenable & IFCAP_TXCSUM) == 0)
1753			ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
1754		if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
1755			ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
1756		if (capenable != ifp->if_capenable)
1757			txp_set_capabilities(sc);
1758		TXP_UNLOCK(sc);
1759		VLAN_CAPABILITIES(ifp);
1760		break;
1761	case SIOCGIFMEDIA:
1762	case SIOCSIFMEDIA:
1763		error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command);
1764		break;
1765	default:
1766		error = ether_ioctl(ifp, command, data);
1767		break;
1768	}
1769
1770	return (error);
1771}
1772
1773static int
1774txp_rxring_fill(struct txp_softc *sc)
1775{
1776	struct txp_rxbuf_desc *rbd;
1777	struct txp_rx_swdesc *sd;
1778	bus_dma_segment_t segs[1];
1779	int error, i, nsegs;
1780
1781	TXP_LOCK_ASSERT(sc);
1782
1783	bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1784	    sc->sc_cdata.txp_rxbufs_map,
1785	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1786
1787	for (i = 0; i < RXBUF_ENTRIES; i++) {
1788		sd = TAILQ_FIRST(&sc->sc_free_list);
1789		if (sd == NULL)
1790			return (ENOMEM);
1791		rbd = sc->sc_rxbufs + i;
1792		bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
1793		KASSERT(sd->sd_mbuf == NULL,
1794		    ("%s : Rx buffer ring corrupted", __func__));
1795		sd->sd_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1796		if (sd->sd_mbuf == NULL)
1797			return (ENOMEM);
1798		sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
1799#ifndef __NO_STRICT_ALIGNMENT
1800		m_adj(sd->sd_mbuf, TXP_RXBUF_ALIGN);
1801#endif
1802		if ((error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_rx_tag,
1803		    sd->sd_map, sd->sd_mbuf, segs, &nsegs, 0)) != 0) {
1804			m_freem(sd->sd_mbuf);
1805			sd->sd_mbuf = NULL;
1806			return (error);
1807		}
1808		KASSERT(nsegs == 1, ("%s : %d segments returned!", __func__,
1809		    nsegs));
1810		TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1811		TAILQ_INSERT_TAIL(&sc->sc_busy_list, sd, sd_next);
1812		bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1813		    BUS_DMASYNC_PREREAD);
1814		rbd->rb_paddrlo = htole32(TXP_ADDR_LO(segs[0].ds_addr));
1815		rbd->rb_paddrhi = htole32(TXP_ADDR_HI(segs[0].ds_addr));
1816	}
1817
1818	bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1819	    sc->sc_cdata.txp_rxbufs_map,
1820	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1821	sc->sc_rxbufprod = RXBUF_ENTRIES - 1;
1822	sc->sc_hostvar->hv_rx_buf_write_idx =
1823	    htole32(TXP_IDX2OFFSET(RXBUF_ENTRIES - 1));
1824
1825	return (0);
1826}
1827
1828static void
1829txp_rxring_empty(struct txp_softc *sc)
1830{
1831	struct txp_rx_swdesc *sd;
1832	int cnt;
1833
1834	TXP_LOCK_ASSERT(sc);
1835
1836	if (sc->sc_rxbufs == NULL)
1837		return;
1838	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1839	    sc->sc_cdata.txp_hostvar_map,
1840	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1841
1842	/* Release allocated Rx buffers. */
1843	cnt = 0;
1844	while ((sd = TAILQ_FIRST(&sc->sc_busy_list)) != NULL) {
1845		TAILQ_REMOVE(&sc->sc_busy_list, sd, sd_next);
1846		KASSERT(sd->sd_mbuf != NULL,
1847		    ("%s : Rx buffer ring corrupted", __func__));
1848		bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1849		    BUS_DMASYNC_POSTREAD);
1850		bus_dmamap_unload(sc->sc_cdata.txp_rx_tag, sd->sd_map);
1851		m_freem(sd->sd_mbuf);
1852		sd->sd_mbuf = NULL;
1853		TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
1854		cnt++;
1855	}
1856}
1857
1858static void
1859txp_init(void *xsc)
1860{
1861	struct txp_softc *sc;
1862
1863	sc = xsc;
1864	TXP_LOCK(sc);
1865	txp_init_locked(sc);
1866	TXP_UNLOCK(sc);
1867}
1868
1869static void
1870txp_init_locked(struct txp_softc *sc)
1871{
1872	struct ifnet *ifp;
1873	uint8_t *eaddr;
1874	uint16_t p1;
1875	uint32_t p2;
1876	int error;
1877
1878	TXP_LOCK_ASSERT(sc);
1879	ifp = sc->sc_ifp;
1880
1881	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1882		return;
1883
1884	/* Initialize ring structure. */
1885	txp_init_rings(sc);
1886	/* Wakeup controller. */
1887	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_WAKEUP);
1888	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
1889	/*
1890	 * It seems that earlier NV image can go back to online from
1891	 * wakeup command but newer ones require controller reset.
1892	 * So jut reset controller again.
1893	 */
1894	if (txp_reset(sc) != 0)
1895		goto init_fail;
1896	/* Download firmware. */
1897	error = txp_download_fw(sc);
1898	if (error != 0) {
1899		device_printf(sc->sc_dev, "could not download firmware.\n");
1900		goto init_fail;
1901	}
1902	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1903	    sc->sc_cdata.txp_hostvar_map,
1904	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1905	if ((error = txp_rxring_fill(sc)) != 0) {
1906		device_printf(sc->sc_dev, "no memory for Rx buffers.\n");
1907		goto init_fail;
1908	}
1909	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1910	    sc->sc_cdata.txp_hostvar_map,
1911	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1912	if (txp_boot(sc, STAT_WAITING_FOR_BOOT) != 0) {
1913		device_printf(sc->sc_dev, "could not boot firmware.\n");
1914		goto init_fail;
1915	}
1916
1917	/*
1918	 * Quite contrary to Typhoon T2 software functional specification,
1919	 * it seems that TXP_CMD_RECV_BUFFER_CONTROL command is not
1920	 * implemented in the firmware. This means driver should have to
1921	 * handle misaligned frames on alignment architectures. AFAIK this
1922	 * is the only controller manufactured by 3Com that has this stupid
1923	 * bug. 3Com should fix this.
1924	 */
1925	if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0,
1926	    NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1927		goto init_fail;
1928	/* Undocumented command(interrupt coalescing disable?) - From Linux. */
1929	if (txp_command(sc, TXP_CMD_FILTER_DEFINE, 0, 0, 0, NULL, NULL, NULL,
1930	    TXP_CMD_NOWAIT) != 0)
1931		goto init_fail;
1932
1933	/* Set station address. */
1934	eaddr = IF_LLADDR(sc->sc_ifp);
1935	p1 = 0;
1936	((uint8_t *)&p1)[1] = eaddr[0];
1937	((uint8_t *)&p1)[0] = eaddr[1];
1938	p1 = le16toh(p1);
1939	((uint8_t *)&p2)[3] = eaddr[2];
1940	((uint8_t *)&p2)[2] = eaddr[3];
1941	((uint8_t *)&p2)[1] = eaddr[4];
1942	((uint8_t *)&p2)[0] = eaddr[5];
1943	p2 = le32toh(p2);
1944	if (txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0,
1945	    NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1946		goto init_fail;
1947
1948	txp_set_filter(sc);
1949	txp_set_capabilities(sc);
1950
1951	if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0,
1952	    NULL, NULL, NULL, TXP_CMD_NOWAIT))
1953		goto init_fail;
1954	if (txp_command(sc, TXP_CMD_XCVR_SELECT, sc->sc_xcvr, 0, 0,
1955	    NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1956		goto init_fail;
1957	if (txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL,
1958	    TXP_CMD_NOWAIT) != 0)
1959		goto init_fail;
1960	if (txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL,
1961	    TXP_CMD_NOWAIT) != 0)
1962		goto init_fail;
1963
1964	/* Ack all pending interrupts and enable interrupts. */
1965	WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
1966	WRITE_REG(sc, TXP_IER, TXP_INTRS);
1967	WRITE_REG(sc, TXP_IMR, TXP_INTR_NONE);
1968
1969	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1970	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1971
1972	callout_reset(&sc->sc_tick, hz, txp_tick, sc);
1973	return;
1974
1975init_fail:
1976	txp_rxring_empty(sc);
1977	txp_init_rings(sc);
1978	txp_reset(sc);
1979	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
1980}
1981
1982static void
1983txp_tick(void *vsc)
1984{
1985	struct txp_softc *sc;
1986	struct ifnet *ifp;
1987	struct txp_rsp_desc *rsp;
1988	struct txp_ext_desc *ext;
1989	int link;
1990
1991	sc = vsc;
1992	TXP_LOCK_ASSERT(sc);
1993	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1994	    sc->sc_cdata.txp_hostvar_map,
1995	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1996	txp_rxbuf_reclaim(sc);
1997	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1998	    sc->sc_cdata.txp_hostvar_map,
1999	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2000
2001	ifp = sc->sc_ifp;
2002	rsp = NULL;
2003
2004	link = sc->sc_flags & TXP_FLAG_LINK;
2005	if (txp_ext_command(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
2006	    &rsp, TXP_CMD_WAIT))
2007		goto out;
2008	if (rsp->rsp_numdesc != 6)
2009		goto out;
2010	txp_stats_update(sc, rsp);
2011	if (link == 0 && (sc->sc_flags & TXP_FLAG_LINK) != 0) {
2012		ext = (struct txp_ext_desc *)(rsp + 1);
2013		/* Update baudrate with resolved speed. */
2014		if ((ext[5].ext_2 & 0x02) != 0)
2015			ifp->if_baudrate = IF_Mbps(100);
2016		else
2017			ifp->if_baudrate = IF_Mbps(10);
2018	}
2019
2020out:
2021	if (rsp != NULL)
2022		free(rsp, M_DEVBUF);
2023	txp_watchdog(sc);
2024	callout_reset(&sc->sc_tick, hz, txp_tick, sc);
2025}
2026
2027static void
2028txp_start(struct ifnet *ifp)
2029{
2030	struct txp_softc *sc;
2031
2032	sc = ifp->if_softc;
2033	TXP_LOCK(sc);
2034	txp_start_locked(ifp);
2035	TXP_UNLOCK(sc);
2036}
2037
2038static void
2039txp_start_locked(struct ifnet *ifp)
2040{
2041	struct txp_softc *sc;
2042	struct mbuf *m_head;
2043	int enq;
2044
2045	sc = ifp->if_softc;
2046	TXP_LOCK_ASSERT(sc);
2047
2048	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2049	   IFF_DRV_RUNNING || (sc->sc_flags & TXP_FLAG_LINK) == 0)
2050		return;
2051
2052	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
2053		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2054		if (m_head == NULL)
2055			break;
2056		/*
2057		 * Pack the data into the transmit ring. If we
2058		 * don't have room, set the OACTIVE flag and wait
2059		 * for the NIC to drain the ring.
2060		 * ATM only Hi-ring is used.
2061		 */
2062		if (txp_encap(sc, &sc->sc_txhir, &m_head)) {
2063			if (m_head == NULL)
2064				break;
2065			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2066			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2067			break;
2068		}
2069
2070		/*
2071		 * If there's a BPF listener, bounce a copy of this frame
2072		 * to him.
2073		 */
2074		ETHER_BPF_MTAP(ifp, m_head);
2075
2076		/* Send queued frame. */
2077		WRITE_REG(sc, sc->sc_txhir.r_reg,
2078		    TXP_IDX2OFFSET(sc->sc_txhir.r_prod));
2079	}
2080
2081	if (enq > 0) {
2082		/* Set a timeout in case the chip goes out to lunch. */
2083		sc->sc_watchdog_timer = TXP_TX_TIMEOUT;
2084	}
2085}
2086
2087static int
2088txp_encap(struct txp_softc *sc, struct txp_tx_ring *r, struct mbuf **m_head)
2089{
2090	struct txp_tx_desc *first_txd;
2091	struct txp_frag_desc *fxd;
2092	struct txp_swdesc *sd;
2093	struct mbuf *m;
2094	bus_dma_segment_t txsegs[TXP_MAXTXSEGS];
2095	int error, i, nsegs;
2096
2097	TXP_LOCK_ASSERT(sc);
2098
2099	M_ASSERTPKTHDR((*m_head));
2100
2101	m = *m_head;
2102	first_txd = r->r_desc + r->r_prod;
2103	sd = sc->sc_txd + r->r_prod;
2104
2105	error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_tx_tag, sd->sd_map,
2106	    *m_head, txsegs, &nsegs, 0);
2107	if (error == EFBIG) {
2108		m = m_collapse(*m_head, M_NOWAIT, TXP_MAXTXSEGS);
2109		if (m == NULL) {
2110			m_freem(*m_head);
2111			*m_head = NULL;
2112			return (ENOMEM);
2113		}
2114		*m_head = m;
2115		error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_tx_tag,
2116		    sd->sd_map, *m_head, txsegs, &nsegs, 0);
2117		if (error != 0) {
2118			m_freem(*m_head);
2119			*m_head = NULL;
2120			return (error);
2121		}
2122	} else if (error != 0)
2123		return (error);
2124	if (nsegs == 0) {
2125		m_freem(*m_head);
2126		*m_head = NULL;
2127		return (EIO);
2128	}
2129
2130	/* Check descriptor overrun. */
2131	if (r->r_cnt + nsegs >= TX_ENTRIES - TXP_TXD_RESERVED) {
2132		bus_dmamap_unload(sc->sc_cdata.txp_tx_tag, sd->sd_map);
2133		return (ENOBUFS);
2134	}
2135	bus_dmamap_sync(sc->sc_cdata.txp_tx_tag, sd->sd_map,
2136	    BUS_DMASYNC_PREWRITE);
2137	sd->sd_mbuf = m;
2138
2139	first_txd->tx_flags = TX_FLAGS_TYPE_DATA;
2140	first_txd->tx_numdesc = 0;
2141	first_txd->tx_addrlo = 0;
2142	first_txd->tx_addrhi = 0;
2143	first_txd->tx_totlen = 0;
2144	first_txd->tx_pflags = 0;
2145	r->r_cnt++;
2146	TXP_DESC_INC(r->r_prod, TX_ENTRIES);
2147
2148	/* Configure Tx IP/TCP/UDP checksum offload. */
2149	if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2150		first_txd->tx_pflags |= htole32(TX_PFLAGS_IPCKSUM);
2151#ifdef notyet
2152	/* XXX firmware bug. */
2153	if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2154		first_txd->tx_pflags |= htole32(TX_PFLAGS_TCPCKSUM);
2155	if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2156		first_txd->tx_pflags |= htole32(TX_PFLAGS_UDPCKSUM);
2157#endif
2158
2159	/* Configure VLAN hardware tag insertion. */
2160	if ((m->m_flags & M_VLANTAG) != 0)
2161		first_txd->tx_pflags |=
2162		    htole32(TX_PFLAGS_VLAN | TX_PFLAGS_PRIO |
2163		    (bswap16(m->m_pkthdr.ether_vtag) << TX_PFLAGS_VLANTAG_S));
2164
2165	for (i = 0; i < nsegs; i++) {
2166		fxd = (struct txp_frag_desc *)(r->r_desc + r->r_prod);
2167		fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG | TX_FLAGS_VALID;
2168		fxd->frag_rsvd1 = 0;
2169		fxd->frag_len = htole16(txsegs[i].ds_len);
2170		fxd->frag_addrhi = htole32(TXP_ADDR_HI(txsegs[i].ds_addr));
2171		fxd->frag_addrlo = htole32(TXP_ADDR_LO(txsegs[i].ds_addr));
2172		fxd->frag_rsvd2 = 0;
2173		first_txd->tx_numdesc++;
2174		r->r_cnt++;
2175		TXP_DESC_INC(r->r_prod, TX_ENTRIES);
2176	}
2177
2178	/* Lastly set valid flag. */
2179	first_txd->tx_flags |= TX_FLAGS_VALID;
2180
2181	/* Sync descriptors. */
2182	bus_dmamap_sync(r->r_tag, r->r_map,
2183	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2184
2185	return (0);
2186}
2187
2188/*
2189 * Handle simple commands sent to the typhoon
2190 */
2191static int
2192txp_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
2193    uint32_t in3, uint16_t *out1, uint32_t *out2, uint32_t *out3, int wait)
2194{
2195	struct txp_rsp_desc *rsp;
2196
2197	rsp = NULL;
2198	if (txp_ext_command(sc, id, in1, in2, in3, NULL, 0, &rsp, wait) != 0) {
2199		device_printf(sc->sc_dev, "command 0x%02x failed\n", id);
2200		return (-1);
2201	}
2202
2203	if (wait == TXP_CMD_NOWAIT)
2204		return (0);
2205
2206	KASSERT(rsp != NULL, ("rsp is NULL!\n"));
2207	if (out1 != NULL)
2208		*out1 = le16toh(rsp->rsp_par1);
2209	if (out2 != NULL)
2210		*out2 = le32toh(rsp->rsp_par2);
2211	if (out3 != NULL)
2212		*out3 = le32toh(rsp->rsp_par3);
2213	free(rsp, M_DEVBUF);
2214	return (0);
2215}
2216
2217static int
2218txp_ext_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
2219    uint32_t in3, struct txp_ext_desc *in_extp, uint8_t in_extn,
2220    struct txp_rsp_desc **rspp, int wait)
2221{
2222	struct txp_hostvar *hv;
2223	struct txp_cmd_desc *cmd;
2224	struct txp_ext_desc *ext;
2225	uint32_t idx, i;
2226	uint16_t seq;
2227	int error;
2228
2229	error = 0;
2230	hv = sc->sc_hostvar;
2231	if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) {
2232		device_printf(sc->sc_dev,
2233		    "%s : out of free cmd descriptors for command 0x%02x\n",
2234		    __func__, id);
2235		return (ENOBUFS);
2236	}
2237
2238	bus_dmamap_sync(sc->sc_cdata.txp_cmdring_tag,
2239	    sc->sc_cdata.txp_cmdring_map, BUS_DMASYNC_POSTWRITE);
2240	idx = sc->sc_cmdring.lastwrite;
2241	cmd = (struct txp_cmd_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx);
2242	bzero(cmd, sizeof(*cmd));
2243
2244	cmd->cmd_numdesc = in_extn;
2245	seq = sc->sc_seq++;
2246	cmd->cmd_seq = htole16(seq);
2247	cmd->cmd_id = htole16(id);
2248	cmd->cmd_par1 = htole16(in1);
2249	cmd->cmd_par2 = htole32(in2);
2250	cmd->cmd_par3 = htole32(in3);
2251	cmd->cmd_flags = CMD_FLAGS_TYPE_CMD |
2252	    (wait == TXP_CMD_WAIT ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID;
2253
2254	idx += sizeof(struct txp_cmd_desc);
2255	if (idx == sc->sc_cmdring.size)
2256		idx = 0;
2257
2258	for (i = 0; i < in_extn; i++) {
2259		ext = (struct txp_ext_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx);
2260		bcopy(in_extp, ext, sizeof(struct txp_ext_desc));
2261		in_extp++;
2262		idx += sizeof(struct txp_cmd_desc);
2263		if (idx == sc->sc_cmdring.size)
2264			idx = 0;
2265	}
2266
2267	sc->sc_cmdring.lastwrite = idx;
2268	bus_dmamap_sync(sc->sc_cdata.txp_cmdring_tag,
2269	    sc->sc_cdata.txp_cmdring_map, BUS_DMASYNC_PREWRITE);
2270	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2271	    sc->sc_cdata.txp_hostvar_map, BUS_DMASYNC_PREREAD |
2272	    BUS_DMASYNC_PREWRITE);
2273	WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite);
2274	TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
2275
2276	if (wait == TXP_CMD_NOWAIT)
2277		return (0);
2278
2279	for (i = 0; i < TXP_TIMEOUT; i++) {
2280		bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2281		    sc->sc_cdata.txp_hostvar_map, BUS_DMASYNC_POSTREAD |
2282		    BUS_DMASYNC_POSTWRITE);
2283		if (le32toh(hv->hv_resp_read_idx) !=
2284		    le32toh(hv->hv_resp_write_idx)) {
2285			error = txp_response(sc, id, seq, rspp);
2286			bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2287			    sc->sc_cdata.txp_hostvar_map,
2288			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2289			if (error != 0)
2290				return (error);
2291 			if (*rspp != NULL)
2292				break;
2293		}
2294		DELAY(50);
2295	}
2296	if (i == TXP_TIMEOUT) {
2297		device_printf(sc->sc_dev, "command 0x%02x timedout\n", id);
2298		error = ETIMEDOUT;
2299	}
2300
2301	return (error);
2302}
2303
2304static int
2305txp_response(struct txp_softc *sc, uint16_t id, uint16_t seq,
2306    struct txp_rsp_desc **rspp)
2307{
2308	struct txp_hostvar *hv;
2309	struct txp_rsp_desc *rsp;
2310	uint32_t ridx;
2311
2312	bus_dmamap_sync(sc->sc_cdata.txp_rspring_tag,
2313	    sc->sc_cdata.txp_rspring_map, BUS_DMASYNC_POSTREAD);
2314	hv = sc->sc_hostvar;
2315	ridx = le32toh(hv->hv_resp_read_idx);
2316	while (ridx != le32toh(hv->hv_resp_write_idx)) {
2317		rsp = (struct txp_rsp_desc *)(((uint8_t *)sc->sc_rspring.base) + ridx);
2318
2319		if (id == le16toh(rsp->rsp_id) &&
2320		    le16toh(rsp->rsp_seq) == seq) {
2321			*rspp = (struct txp_rsp_desc *)malloc(
2322			    sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1),
2323			    M_DEVBUF, M_NOWAIT);
2324			if (*rspp == NULL) {
2325				device_printf(sc->sc_dev,"%s : command 0x%02x "
2326				    "memory allocation failure\n",
2327				    __func__, id);
2328				return (ENOMEM);
2329			}
2330			txp_rsp_fixup(sc, rsp, *rspp);
2331			return (0);
2332		}
2333
2334		if ((rsp->rsp_flags & RSP_FLAGS_ERROR) != 0) {
2335			device_printf(sc->sc_dev,
2336			    "%s : command 0x%02x response error!\n", __func__,
2337			    le16toh(rsp->rsp_id));
2338			txp_rsp_fixup(sc, rsp, NULL);
2339			ridx = le32toh(hv->hv_resp_read_idx);
2340			continue;
2341		}
2342
2343		/*
2344		 * The following unsolicited responses are handled during
2345		 * processing of TXP_CMD_READ_STATISTICS which requires
2346		 * response. Driver abuses the command to detect media
2347		 * status change.
2348		 * TXP_CMD_FILTER_DEFINE is not an unsolicited response
2349		 * but we don't process response ring in interrupt handler
2350		 * so we have to ignore this command here, otherwise
2351		 * unknown command message would be printed.
2352		 */
2353		switch (le16toh(rsp->rsp_id)) {
2354		case TXP_CMD_CYCLE_STATISTICS:
2355		case TXP_CMD_FILTER_DEFINE:
2356			break;
2357		case TXP_CMD_MEDIA_STATUS_READ:
2358			if ((le16toh(rsp->rsp_par1) & 0x0800) == 0) {
2359				sc->sc_flags |= TXP_FLAG_LINK;
2360				if_link_state_change(sc->sc_ifp,
2361				    LINK_STATE_UP);
2362			} else {
2363				sc->sc_flags &= ~TXP_FLAG_LINK;
2364				if_link_state_change(sc->sc_ifp,
2365				    LINK_STATE_DOWN);
2366			}
2367			break;
2368		case TXP_CMD_HELLO_RESPONSE:
2369			/*
2370			 * Driver should repsond to hello message but
2371			 * TXP_CMD_READ_STATISTICS is issued for every
2372			 * hz, therefore there is no need to send an
2373			 * explicit command here.
2374			 */
2375			device_printf(sc->sc_dev, "%s : hello\n", __func__);
2376			break;
2377		default:
2378			device_printf(sc->sc_dev,
2379			    "%s : unknown command 0x%02x\n", __func__,
2380			    le16toh(rsp->rsp_id));
2381		}
2382		txp_rsp_fixup(sc, rsp, NULL);
2383		ridx = le32toh(hv->hv_resp_read_idx);
2384	}
2385
2386	return (0);
2387}
2388
2389static void
2390txp_rsp_fixup(struct txp_softc *sc, struct txp_rsp_desc *rsp,
2391    struct txp_rsp_desc *dst)
2392{
2393	struct txp_rsp_desc *src;
2394	struct txp_hostvar *hv;
2395	uint32_t i, ridx;
2396
2397	src = rsp;
2398	hv = sc->sc_hostvar;
2399	ridx = le32toh(hv->hv_resp_read_idx);
2400
2401	for (i = 0; i < rsp->rsp_numdesc + 1; i++) {
2402		if (dst != NULL)
2403			bcopy(src, dst++, sizeof(struct txp_rsp_desc));
2404		ridx += sizeof(struct txp_rsp_desc);
2405		if (ridx == sc->sc_rspring.size) {
2406			src = sc->sc_rspring.base;
2407			ridx = 0;
2408		} else
2409			src++;
2410		sc->sc_rspring.lastwrite = ridx;
2411	}
2412
2413	hv->hv_resp_read_idx = htole32(ridx);
2414}
2415
2416static int
2417txp_cmd_desc_numfree(struct txp_softc *sc)
2418{
2419	struct txp_hostvar *hv;
2420	struct txp_boot_record *br;
2421	uint32_t widx, ridx, nfree;
2422
2423	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2424	    sc->sc_cdata.txp_hostvar_map,
2425	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2426	hv = sc->sc_hostvar;
2427	br = sc->sc_boot;
2428	widx = sc->sc_cmdring.lastwrite;
2429	ridx = le32toh(hv->hv_cmd_read_idx);
2430
2431	if (widx == ridx) {
2432		/* Ring is completely free */
2433		nfree = le32toh(br->br_cmd_siz) - sizeof(struct txp_cmd_desc);
2434	} else {
2435		if (widx > ridx)
2436			nfree = le32toh(br->br_cmd_siz) -
2437			    (widx - ridx + sizeof(struct txp_cmd_desc));
2438		else
2439			nfree = ridx - widx - sizeof(struct txp_cmd_desc);
2440	}
2441
2442	return (nfree / sizeof(struct txp_cmd_desc));
2443}
2444
2445static int
2446txp_sleep(struct txp_softc *sc, int capenable)
2447{
2448	uint16_t events;
2449	int error;
2450
2451	events = 0;
2452	if ((capenable & IFCAP_WOL_MAGIC) != 0)
2453		events |= 0x01;
2454	error = txp_command(sc, TXP_CMD_ENABLE_WAKEUP_EVENTS, events, 0, 0,
2455	    NULL, NULL, NULL, TXP_CMD_NOWAIT);
2456	if (error == 0) {
2457		/* Goto sleep. */
2458		error = txp_command(sc, TXP_CMD_GOTO_SLEEP, 0, 0, 0, NULL,
2459		    NULL, NULL, TXP_CMD_NOWAIT);
2460		if (error == 0) {
2461			error = txp_wait(sc, STAT_SLEEPING);
2462			if (error != 0)
2463				device_printf(sc->sc_dev,
2464				    "unable to enter into sleep\n");
2465		}
2466	}
2467
2468	return (error);
2469}
2470
2471static void
2472txp_stop(struct txp_softc *sc)
2473{
2474	struct ifnet *ifp;
2475
2476	TXP_LOCK_ASSERT(sc);
2477	ifp = sc->sc_ifp;
2478
2479	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2480		return;
2481
2482	WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
2483	WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
2484
2485	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2486	sc->sc_flags &= ~TXP_FLAG_LINK;
2487
2488	callout_stop(&sc->sc_tick);
2489
2490	txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL,
2491	    TXP_CMD_NOWAIT);
2492	txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL,
2493	    TXP_CMD_NOWAIT);
2494	/* Save statistics for later use. */
2495	txp_stats_save(sc);
2496	/* Halt controller. */
2497	txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL,
2498	    TXP_CMD_NOWAIT);
2499
2500	if (txp_wait(sc, STAT_HALTED) != 0)
2501		device_printf(sc->sc_dev, "controller halt timedout!\n");
2502	/* Reclaim Tx/Rx buffers. */
2503	if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
2504	    TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
2505		txp_tx_reclaim(sc, &sc->sc_txhir);
2506	if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
2507	    TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
2508		txp_tx_reclaim(sc, &sc->sc_txlor);
2509	txp_rxring_empty(sc);
2510
2511	txp_init_rings(sc);
2512	/* Reset controller and make it reload sleep image. */
2513	txp_reset(sc);
2514	/* Let controller boot from sleep image. */
2515	if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0)
2516		device_printf(sc->sc_dev, "could not boot sleep image\n");
2517	txp_sleep(sc, 0);
2518}
2519
2520static void
2521txp_watchdog(struct txp_softc *sc)
2522{
2523	struct ifnet *ifp;
2524
2525	TXP_LOCK_ASSERT(sc);
2526
2527	if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer)
2528		return;
2529
2530	ifp = sc->sc_ifp;
2531	if_printf(ifp, "watchdog timeout -- resetting\n");
2532	ifp->if_oerrors++;
2533	txp_stop(sc);
2534	txp_init_locked(sc);
2535}
2536
2537static int
2538txp_ifmedia_upd(struct ifnet *ifp)
2539{
2540	struct txp_softc *sc = ifp->if_softc;
2541	struct ifmedia *ifm = &sc->sc_ifmedia;
2542	uint16_t new_xcvr;
2543
2544	TXP_LOCK(sc);
2545	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
2546		TXP_UNLOCK(sc);
2547		return (EINVAL);
2548	}
2549
2550	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) {
2551		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2552			new_xcvr = TXP_XCVR_10_FDX;
2553		else
2554			new_xcvr = TXP_XCVR_10_HDX;
2555	} else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
2556		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2557			new_xcvr = TXP_XCVR_100_FDX;
2558		else
2559			new_xcvr = TXP_XCVR_100_HDX;
2560	} else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
2561		new_xcvr = TXP_XCVR_AUTO;
2562	} else {
2563		TXP_UNLOCK(sc);
2564		return (EINVAL);
2565	}
2566
2567	/* nothing to do */
2568	if (sc->sc_xcvr == new_xcvr) {
2569		TXP_UNLOCK(sc);
2570		return (0);
2571	}
2572
2573	txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0,
2574	    NULL, NULL, NULL, TXP_CMD_NOWAIT);
2575	sc->sc_xcvr = new_xcvr;
2576	TXP_UNLOCK(sc);
2577
2578	return (0);
2579}
2580
2581static void
2582txp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2583{
2584	struct txp_softc *sc = ifp->if_softc;
2585	struct ifmedia *ifm = &sc->sc_ifmedia;
2586	uint16_t bmsr, bmcr, anar, anlpar;
2587
2588	ifmr->ifm_status = IFM_AVALID;
2589	ifmr->ifm_active = IFM_ETHER;
2590
2591	TXP_LOCK(sc);
2592	/* Check whether firmware is running. */
2593	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2594		goto bail;
2595	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
2596	    &bmsr, NULL, NULL, TXP_CMD_WAIT))
2597		goto bail;
2598	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
2599	    &bmsr, NULL, NULL, TXP_CMD_WAIT))
2600		goto bail;
2601
2602	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0,
2603	    &bmcr, NULL, NULL, TXP_CMD_WAIT))
2604		goto bail;
2605
2606	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0,
2607	    &anlpar, NULL, NULL, TXP_CMD_WAIT))
2608		goto bail;
2609
2610	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANAR, 0,
2611	    &anar, NULL, NULL, TXP_CMD_WAIT))
2612		goto bail;
2613	TXP_UNLOCK(sc);
2614
2615	if (bmsr & BMSR_LINK)
2616		ifmr->ifm_status |= IFM_ACTIVE;
2617
2618	if (bmcr & BMCR_ISO) {
2619		ifmr->ifm_active |= IFM_NONE;
2620		ifmr->ifm_status = 0;
2621		return;
2622	}
2623
2624	if (bmcr & BMCR_LOOP)
2625		ifmr->ifm_active |= IFM_LOOP;
2626
2627	if (bmcr & BMCR_AUTOEN) {
2628		if ((bmsr & BMSR_ACOMP) == 0) {
2629			ifmr->ifm_active |= IFM_NONE;
2630			return;
2631		}
2632
2633		anlpar &= anar;
2634		if (anlpar & ANLPAR_TX_FD)
2635			ifmr->ifm_active |= IFM_100_TX|IFM_FDX;
2636		else if (anlpar & ANLPAR_T4)
2637			ifmr->ifm_active |= IFM_100_T4;
2638		else if (anlpar & ANLPAR_TX)
2639			ifmr->ifm_active |= IFM_100_TX;
2640		else if (anlpar & ANLPAR_10_FD)
2641			ifmr->ifm_active |= IFM_10_T|IFM_FDX;
2642		else if (anlpar & ANLPAR_10)
2643			ifmr->ifm_active |= IFM_10_T;
2644		else
2645			ifmr->ifm_active |= IFM_NONE;
2646	} else
2647		ifmr->ifm_active = ifm->ifm_cur->ifm_media;
2648	return;
2649
2650bail:
2651	TXP_UNLOCK(sc);
2652	ifmr->ifm_active |= IFM_NONE;
2653	ifmr->ifm_status &= ~IFM_AVALID;
2654}
2655
2656#ifdef TXP_DEBUG
2657static void
2658txp_show_descriptor(void *d)
2659{
2660	struct txp_cmd_desc *cmd = d;
2661	struct txp_rsp_desc *rsp = d;
2662	struct txp_tx_desc *txd = d;
2663	struct txp_frag_desc *frgd = d;
2664
2665	switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) {
2666	case CMD_FLAGS_TYPE_CMD:
2667		/* command descriptor */
2668		printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2669		    cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
2670		    le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
2671		    le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
2672		break;
2673	case CMD_FLAGS_TYPE_RESP:
2674		/* response descriptor */
2675		printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2676		    rsp->rsp_flags, rsp->rsp_numdesc, le16toh(rsp->rsp_id),
2677		    le16toh(rsp->rsp_seq), le16toh(rsp->rsp_par1),
2678		    le32toh(rsp->rsp_par2), le32toh(rsp->rsp_par3));
2679		break;
2680	case CMD_FLAGS_TYPE_DATA:
2681		/* data header (assuming tx for now) */
2682		printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]",
2683		    txd->tx_flags, txd->tx_numdesc, le16toh(txd->tx_totlen),
2684		    le32toh(txd->tx_addrlo), le32toh(txd->tx_addrhi),
2685		    le32toh(txd->tx_pflags));
2686		break;
2687	case CMD_FLAGS_TYPE_FRAG:
2688		/* fragment descriptor */
2689		printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]",
2690		    frgd->frag_flags, frgd->frag_rsvd1, le16toh(frgd->frag_len),
2691		    le32toh(frgd->frag_addrlo), le32toh(frgd->frag_addrhi),
2692		    le32toh(frgd->frag_rsvd2));
2693		break;
2694	default:
2695		printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2696		    cmd->cmd_flags & CMD_FLAGS_TYPE_M,
2697		    cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
2698		    le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
2699		    le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
2700		break;
2701	}
2702}
2703#endif
2704
2705static void
2706txp_set_filter(struct txp_softc *sc)
2707{
2708	struct ifnet *ifp;
2709	uint32_t crc, mchash[2];
2710	uint16_t filter;
2711	struct ifmultiaddr *ifma;
2712	int mcnt;
2713
2714	TXP_LOCK_ASSERT(sc);
2715
2716	ifp = sc->sc_ifp;
2717	filter = TXP_RXFILT_DIRECT;
2718	if ((ifp->if_flags & IFF_BROADCAST) != 0)
2719		filter |= TXP_RXFILT_BROADCAST;
2720	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2721		if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2722			filter |= TXP_RXFILT_ALLMULTI;
2723		if ((ifp->if_flags & IFF_PROMISC) != 0)
2724			filter = TXP_RXFILT_PROMISC;
2725		goto setit;
2726	}
2727
2728	mchash[0] = mchash[1] = 0;
2729	mcnt = 0;
2730	if_maddr_rlock(ifp);
2731	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2732		if (ifma->ifma_addr->sa_family != AF_LINK)
2733			continue;
2734		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2735		    ifma->ifma_addr), ETHER_ADDR_LEN);
2736		crc &= 0x3f;
2737		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2738		mcnt++;
2739	}
2740	if_maddr_runlock(ifp);
2741
2742	if (mcnt > 0) {
2743		filter |= TXP_RXFILT_HASHMULTI;
2744		txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE, 2, mchash[0],
2745		    mchash[1], NULL, NULL, NULL, TXP_CMD_NOWAIT);
2746	}
2747
2748setit:
2749	txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0,
2750	    NULL, NULL, NULL, TXP_CMD_NOWAIT);
2751}
2752
2753static int
2754txp_set_capabilities(struct txp_softc *sc)
2755{
2756	struct ifnet *ifp;
2757	uint32_t rxcap, txcap;
2758
2759	TXP_LOCK_ASSERT(sc);
2760
2761	rxcap = txcap = 0;
2762	ifp = sc->sc_ifp;
2763	if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) {
2764		if ((ifp->if_hwassist & CSUM_IP) != 0)
2765			txcap |= OFFLOAD_IPCKSUM;
2766		if ((ifp->if_hwassist & CSUM_TCP) != 0)
2767			txcap |= OFFLOAD_TCPCKSUM;
2768		if ((ifp->if_hwassist & CSUM_UDP) != 0)
2769			txcap |= OFFLOAD_UDPCKSUM;
2770		rxcap = txcap;
2771	}
2772	if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
2773		rxcap &= ~(OFFLOAD_IPCKSUM | OFFLOAD_TCPCKSUM |
2774		    OFFLOAD_UDPCKSUM);
2775	if ((ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
2776		rxcap |= OFFLOAD_VLAN;
2777		txcap |= OFFLOAD_VLAN;
2778	}
2779
2780	/* Tell firmware new offload configuration. */
2781	return (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0, txcap, rxcap, NULL,
2782	    NULL, NULL, TXP_CMD_NOWAIT));
2783}
2784
2785static void
2786txp_stats_save(struct txp_softc *sc)
2787{
2788	struct txp_rsp_desc *rsp;
2789
2790	TXP_LOCK_ASSERT(sc);
2791
2792	rsp = NULL;
2793	if (txp_ext_command(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
2794	    &rsp, TXP_CMD_WAIT))
2795		goto out;
2796	if (rsp->rsp_numdesc != 6)
2797		goto out;
2798	txp_stats_update(sc, rsp);
2799out:
2800	if (rsp != NULL)
2801		free(rsp, M_DEVBUF);
2802	bcopy(&sc->sc_stats, &sc->sc_ostats, sizeof(struct txp_hw_stats));
2803}
2804
2805static void
2806txp_stats_update(struct txp_softc *sc, struct txp_rsp_desc *rsp)
2807{
2808	struct ifnet *ifp;
2809	struct txp_hw_stats *ostats, *stats;
2810	struct txp_ext_desc *ext;
2811
2812	TXP_LOCK_ASSERT(sc);
2813
2814	ifp = sc->sc_ifp;
2815	ext = (struct txp_ext_desc *)(rsp + 1);
2816	ostats = &sc->sc_ostats;
2817	stats = &sc->sc_stats;
2818	stats->tx_frames = ostats->tx_frames + le32toh(rsp->rsp_par2);
2819	stats->tx_bytes = ostats->tx_bytes + (uint64_t)le32toh(rsp->rsp_par3) +
2820	    ((uint64_t)le32toh(ext[0].ext_1) << 32);
2821	stats->tx_deferred = ostats->tx_deferred + le32toh(ext[0].ext_2);
2822	stats->tx_late_colls = ostats->tx_late_colls + le32toh(ext[0].ext_3);
2823	stats->tx_colls = ostats->tx_colls + le32toh(ext[0].ext_4);
2824	stats->tx_carrier_lost = ostats->tx_carrier_lost +
2825	    le32toh(ext[1].ext_1);
2826	stats->tx_multi_colls = ostats->tx_multi_colls +
2827	    le32toh(ext[1].ext_2);
2828	stats->tx_excess_colls = ostats->tx_excess_colls +
2829	    le32toh(ext[1].ext_3);
2830	stats->tx_fifo_underruns = ostats->tx_fifo_underruns +
2831	    le32toh(ext[1].ext_4);
2832	stats->tx_mcast_oflows = ostats->tx_mcast_oflows +
2833	    le32toh(ext[2].ext_1);
2834	stats->tx_filtered = ostats->tx_filtered + le32toh(ext[2].ext_2);
2835	stats->rx_frames = ostats->rx_frames + le32toh(ext[2].ext_3);
2836	stats->rx_bytes = ostats->rx_bytes + (uint64_t)le32toh(ext[2].ext_4) +
2837	    ((uint64_t)le32toh(ext[3].ext_1) << 32);
2838	stats->rx_fifo_oflows = ostats->rx_fifo_oflows + le32toh(ext[3].ext_2);
2839	stats->rx_badssd = ostats->rx_badssd + le32toh(ext[3].ext_3);
2840	stats->rx_crcerrs = ostats->rx_crcerrs + le32toh(ext[3].ext_4);
2841	stats->rx_lenerrs = ostats->rx_lenerrs + le32toh(ext[4].ext_1);
2842	stats->rx_bcast_frames = ostats->rx_bcast_frames +
2843	    le32toh(ext[4].ext_2);
2844	stats->rx_mcast_frames = ostats->rx_mcast_frames +
2845	    le32toh(ext[4].ext_3);
2846	stats->rx_oflows = ostats->rx_oflows + le32toh(ext[4].ext_4);
2847	stats->rx_filtered = ostats->rx_filtered + le32toh(ext[5].ext_1);
2848
2849	ifp->if_ierrors = stats->rx_fifo_oflows + stats->rx_badssd +
2850	    stats->rx_crcerrs + stats->rx_lenerrs + stats->rx_oflows;
2851	ifp->if_oerrors = stats->tx_deferred + stats->tx_carrier_lost +
2852	    stats->tx_fifo_underruns + stats->tx_mcast_oflows;
2853	ifp->if_collisions = stats->tx_late_colls + stats->tx_multi_colls +
2854	    stats->tx_excess_colls;
2855	ifp->if_opackets = stats->tx_frames;
2856	ifp->if_ipackets = stats->rx_frames;
2857}
2858
2859#define	TXP_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
2860	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2861
2862#if __FreeBSD_version >= 900030
2863#define	TXP_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
2864	    SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2865#elif __FreeBSD_version > 800000
2866#define	TXP_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
2867	    SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2868#else
2869#define	TXP_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
2870	    SYSCTL_ADD_ULONG(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2871#endif
2872
2873static void
2874txp_sysctl_node(struct txp_softc *sc)
2875{
2876	struct sysctl_ctx_list *ctx;
2877	struct sysctl_oid_list *child, *parent;
2878	struct sysctl_oid *tree;
2879	struct txp_hw_stats *stats;
2880	int error;
2881
2882	stats = &sc->sc_stats;
2883	ctx = device_get_sysctl_ctx(sc->sc_dev);
2884	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
2885	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
2886	    CTLTYPE_INT | CTLFLAG_RW, &sc->sc_process_limit, 0,
2887	    sysctl_hw_txp_proc_limit, "I",
2888	    "max number of Rx events to process");
2889	/* Pull in device tunables. */
2890	sc->sc_process_limit = TXP_PROC_DEFAULT;
2891	error = resource_int_value(device_get_name(sc->sc_dev),
2892	    device_get_unit(sc->sc_dev), "process_limit",
2893	    &sc->sc_process_limit);
2894	if (error == 0) {
2895		if (sc->sc_process_limit < TXP_PROC_MIN ||
2896		    sc->sc_process_limit > TXP_PROC_MAX) {
2897			device_printf(sc->sc_dev,
2898			    "process_limit value out of range; "
2899			    "using default: %d\n", TXP_PROC_DEFAULT);
2900			sc->sc_process_limit = TXP_PROC_DEFAULT;
2901		}
2902	}
2903	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
2904	    NULL, "TXP statistics");
2905	parent = SYSCTL_CHILDREN(tree);
2906
2907	/* Tx statistics. */
2908	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2909	    NULL, "Tx MAC statistics");
2910	child = SYSCTL_CHILDREN(tree);
2911
2912	TXP_SYSCTL_STAT_ADD32(ctx, child, "frames",
2913	    &stats->tx_frames, "Frames");
2914	TXP_SYSCTL_STAT_ADD64(ctx, child, "octets",
2915	    &stats->tx_bytes, "Octets");
2916	TXP_SYSCTL_STAT_ADD32(ctx, child, "deferred",
2917	    &stats->tx_deferred, "Deferred frames");
2918	TXP_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
2919	    &stats->tx_late_colls, "Late collisions");
2920	TXP_SYSCTL_STAT_ADD32(ctx, child, "colls",
2921	    &stats->tx_colls, "Collisions");
2922	TXP_SYSCTL_STAT_ADD32(ctx, child, "carrier_lost",
2923	    &stats->tx_carrier_lost, "Carrier lost");
2924	TXP_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
2925	    &stats->tx_multi_colls, "Multiple collisions");
2926	TXP_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
2927	    &stats->tx_excess_colls, "Excessive collisions");
2928	TXP_SYSCTL_STAT_ADD32(ctx, child, "fifo_underruns",
2929	    &stats->tx_fifo_underruns, "FIFO underruns");
2930	TXP_SYSCTL_STAT_ADD32(ctx, child, "mcast_oflows",
2931	    &stats->tx_mcast_oflows, "Multicast overflows");
2932	TXP_SYSCTL_STAT_ADD32(ctx, child, "filtered",
2933	    &stats->tx_filtered, "Filtered frames");
2934
2935	/* Rx statistics. */
2936	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2937	    NULL, "Rx MAC statistics");
2938	child = SYSCTL_CHILDREN(tree);
2939
2940	TXP_SYSCTL_STAT_ADD32(ctx, child, "frames",
2941	    &stats->rx_frames, "Frames");
2942	TXP_SYSCTL_STAT_ADD64(ctx, child, "octets",
2943	    &stats->rx_bytes, "Octets");
2944	TXP_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
2945	    &stats->rx_fifo_oflows, "FIFO overflows");
2946	TXP_SYSCTL_STAT_ADD32(ctx, child, "badssd",
2947	    &stats->rx_badssd, "Bad SSD");
2948	TXP_SYSCTL_STAT_ADD32(ctx, child, "crcerrs",
2949	    &stats->rx_crcerrs, "CRC errors");
2950	TXP_SYSCTL_STAT_ADD32(ctx, child, "lenerrs",
2951	    &stats->rx_lenerrs, "Length errors");
2952	TXP_SYSCTL_STAT_ADD32(ctx, child, "bcast_frames",
2953	    &stats->rx_bcast_frames, "Broadcast frames");
2954	TXP_SYSCTL_STAT_ADD32(ctx, child, "mcast_frames",
2955	    &stats->rx_mcast_frames, "Multicast frames");
2956	TXP_SYSCTL_STAT_ADD32(ctx, child, "oflows",
2957	    &stats->rx_oflows, "Overflows");
2958	TXP_SYSCTL_STAT_ADD32(ctx, child, "filtered",
2959	    &stats->rx_filtered, "Filtered frames");
2960}
2961
2962#undef TXP_SYSCTL_STAT_ADD32
2963#undef TXP_SYSCTL_STAT_ADD64
2964
2965static int
2966sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2967{
2968	int error, value;
2969
2970	if (arg1 == NULL)
2971		return (EINVAL);
2972	value = *(int *)arg1;
2973	error = sysctl_handle_int(oidp, &value, 0, req);
2974	if (error || req->newptr == NULL)
2975		return (error);
2976	if (value < low || value > high)
2977		return (EINVAL);
2978        *(int *)arg1 = value;
2979
2980        return (0);
2981}
2982
2983static int
2984sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS)
2985{
2986	return (sysctl_int_range(oidp, arg1, arg2, req,
2987	    TXP_PROC_MIN, TXP_PROC_MAX));
2988}
2989