if_txp.c revision 267580
1/*	$OpenBSD: if_txp.c,v 1.48 2001/06/27 06:34:50 kjc Exp $	*/
2
3/*-
4 * Copyright (c) 2001
5 *	Jason L. Wright <jason@thought.net>, Theo de Raadt, and
6 *	Aaron Campbell <aaron@monkey.org>.  All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by Jason L. Wright,
19 *	Theo de Raadt and Aaron Campbell.
20 * 4. Neither the name of the author nor the names of any co-contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
34 * THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/dev/txp/if_txp.c 267580 2014-06-17 14:47:49Z jhb $");
39
40/*
41 * Driver for 3c990 (Typhoon) Ethernet ASIC
42 */
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/bus.h>
46#include <sys/endian.h>
47#include <sys/kernel.h>
48#include <sys/lock.h>
49#include <sys/malloc.h>
50#include <sys/mbuf.h>
51#include <sys/module.h>
52#include <sys/mutex.h>
53#include <sys/queue.h>
54#include <sys/rman.h>
55#include <sys/socket.h>
56#include <sys/sockio.h>
57#include <sys/sysctl.h>
58#include <sys/taskqueue.h>
59
60#include <net/bpf.h>
61#include <net/if.h>
62#include <net/if_var.h>
63#include <net/if_arp.h>
64#include <net/ethernet.h>
65#include <net/if_dl.h>
66#include <net/if_media.h>
67#include <net/if_types.h>
68#include <net/if_vlan_var.h>
69
70#include <netinet/in.h>
71#include <netinet/in_systm.h>
72#include <netinet/ip.h>
73
74#include <dev/mii/mii.h>
75
76#include <dev/pci/pcireg.h>
77#include <dev/pci/pcivar.h>
78
79#include <machine/bus.h>
80#include <machine/in_cksum.h>
81
82#include <dev/txp/if_txpreg.h>
83#include <dev/txp/3c990img.h>
84
85MODULE_DEPEND(txp, pci, 1, 1, 1);
86MODULE_DEPEND(txp, ether, 1, 1, 1);
87
88/*
89 * XXX Known Typhoon firmware issues.
90 *
91 * 1. It seems that firmware has Tx TCP/UDP checksum offloading bug.
92 *    The firmware hangs when it's told to compute TCP/UDP checksum.
93 *    I'm not sure whether the firmware requires special alignment to
94 *    do checksum offloading but datasheet says nothing about that.
95 * 2. Datasheet says nothing for maximum number of fragmented
96 *    descriptors supported. Experimentation shows up to 16 fragment
97 *    descriptors are supported in the firmware. For TSO case, upper
98 *    stack can send 64KB sized IP datagram plus link header size(
99 *    ethernet header + VLAN tag)  frame but controller can handle up
100 *    to 64KB frame given that PAGE_SIZE is 4KB(i.e. 16 * PAGE_SIZE).
101 *    Because frames that need TSO operation of hardware can be
102 *    larger than 64KB I disabled TSO capability. TSO operation for
103 *    less than or equal to 16 fragment descriptors works without
104 *    problems, though.
105 * 3. VLAN hardware tag stripping is always enabled in the firmware
106 *    even if it's explicitly told to not strip the tag. It's
107 *    possible to add the tag back in Rx handler if VLAN hardware
108 *    tag is not active but I didn't try that as it would be
109 *    layering violation.
110 * 4. TXP_CMD_RECV_BUFFER_CONTROL does not work as expected in
111 *    datasheet such that driver should handle the alignment
112 *    restriction by copying received frame to align the frame on
113 *    32bit boundary on strict-alignment architectures. This adds a
114 *    lot of CPU burden and it effectively reduce Rx performance on
115 *    strict-alignment architectures(e.g. sparc64, arm, mips and ia64).
116 *
117 * Unfortunately it seems that 3Com have no longer interests in
118 * releasing fixed firmware so we may have to live with these bugs.
119 */
120
121#define	TXP_CSUM_FEATURES	(CSUM_IP)
122
123/*
124 * Various supported device vendors/types and their names.
125 */
126static struct txp_type txp_devs[] = {
127	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_95,
128	    "3Com 3cR990-TX-95 Etherlink with 3XP Processor" },
129	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_97,
130	    "3Com 3cR990-TX-97 Etherlink with 3XP Processor" },
131	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_TXM,
132	    "3Com 3cR990B-TXM Etherlink with 3XP Processor" },
133	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_95,
134	    "3Com 3cR990-SRV-95 Etherlink Server with 3XP Processor" },
135	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_97,
136	    "3Com 3cR990-SRV-97 Etherlink Server with 3XP Processor" },
137	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_SRV,
138	    "3Com 3cR990B-SRV Etherlink Server with 3XP Processor" },
139	{ 0, 0, NULL }
140};
141
142static int txp_probe(device_t);
143static int txp_attach(device_t);
144static int txp_detach(device_t);
145static int txp_shutdown(device_t);
146static int txp_suspend(device_t);
147static int txp_resume(device_t);
148static int txp_intr(void *);
149static void txp_int_task(void *, int);
150static void txp_tick(void *);
151static int txp_ioctl(struct ifnet *, u_long, caddr_t);
152static void txp_start(struct ifnet *);
153static void txp_start_locked(struct ifnet *);
154static int txp_encap(struct txp_softc *, struct txp_tx_ring *, struct mbuf **);
155static void txp_stop(struct txp_softc *);
156static void txp_init(void *);
157static void txp_init_locked(struct txp_softc *);
158static void txp_watchdog(struct txp_softc *);
159
160static int txp_reset(struct txp_softc *);
161static int txp_boot(struct txp_softc *, uint32_t);
162static int txp_sleep(struct txp_softc *, int);
163static int txp_wait(struct txp_softc *, uint32_t);
164static int txp_download_fw(struct txp_softc *);
165static int txp_download_fw_wait(struct txp_softc *);
166static int txp_download_fw_section(struct txp_softc *,
167    struct txp_fw_section_header *, int);
168static int txp_alloc_rings(struct txp_softc *);
169static void txp_init_rings(struct txp_softc *);
170static int txp_dma_alloc(struct txp_softc *, char *, bus_dma_tag_t *,
171    bus_size_t, bus_size_t, bus_dmamap_t *, void **, bus_size_t, bus_addr_t *);
172static void txp_dma_free(struct txp_softc *, bus_dma_tag_t *, bus_dmamap_t,
173    void **, bus_addr_t *);
174static void txp_free_rings(struct txp_softc *);
175static int txp_rxring_fill(struct txp_softc *);
176static void txp_rxring_empty(struct txp_softc *);
177static void txp_set_filter(struct txp_softc *);
178
179static int txp_cmd_desc_numfree(struct txp_softc *);
180static int txp_command(struct txp_softc *, uint16_t, uint16_t, uint32_t,
181    uint32_t, uint16_t *, uint32_t *, uint32_t *, int);
182static int txp_ext_command(struct txp_softc *, uint16_t, uint16_t,
183    uint32_t, uint32_t, struct txp_ext_desc *, uint8_t,
184    struct txp_rsp_desc **, int);
185static int txp_response(struct txp_softc *, uint16_t, uint16_t,
186    struct txp_rsp_desc **);
187static void txp_rsp_fixup(struct txp_softc *, struct txp_rsp_desc *,
188    struct txp_rsp_desc *);
189static int txp_set_capabilities(struct txp_softc *);
190
191static void txp_ifmedia_sts(struct ifnet *, struct ifmediareq *);
192static int txp_ifmedia_upd(struct ifnet *);
193#ifdef TXP_DEBUG
194static void txp_show_descriptor(void *);
195#endif
196static void txp_tx_reclaim(struct txp_softc *, struct txp_tx_ring *);
197static void txp_rxbuf_reclaim(struct txp_softc *);
198#ifndef __NO_STRICT_ALIGNMENT
199static __inline void txp_fixup_rx(struct mbuf *);
200#endif
201static int txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *, int);
202static void txp_stats_save(struct txp_softc *);
203static void txp_stats_update(struct txp_softc *, struct txp_rsp_desc *);
204static void txp_sysctl_node(struct txp_softc *);
205static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
206static int sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS);
207
208static int prefer_iomap = 0;
209TUNABLE_INT("hw.txp.prefer_iomap", &prefer_iomap);
210
211static device_method_t txp_methods[] = {
212        /* Device interface */
213	DEVMETHOD(device_probe,		txp_probe),
214	DEVMETHOD(device_attach,	txp_attach),
215	DEVMETHOD(device_detach,	txp_detach),
216	DEVMETHOD(device_shutdown,	txp_shutdown),
217	DEVMETHOD(device_suspend,	txp_suspend),
218	DEVMETHOD(device_resume,	txp_resume),
219
220	{ NULL, NULL }
221};
222
223static driver_t txp_driver = {
224	"txp",
225	txp_methods,
226	sizeof(struct txp_softc)
227};
228
229static devclass_t txp_devclass;
230
231DRIVER_MODULE(txp, pci, txp_driver, txp_devclass, 0, 0);
232
233static int
234txp_probe(device_t dev)
235{
236	struct txp_type *t;
237
238	t = txp_devs;
239
240	while (t->txp_name != NULL) {
241		if ((pci_get_vendor(dev) == t->txp_vid) &&
242		    (pci_get_device(dev) == t->txp_did)) {
243			device_set_desc(dev, t->txp_name);
244			return (BUS_PROBE_DEFAULT);
245		}
246		t++;
247	}
248
249	return (ENXIO);
250}
251
252static int
253txp_attach(device_t dev)
254{
255	struct txp_softc *sc;
256	struct ifnet *ifp;
257	struct txp_rsp_desc *rsp;
258	uint16_t p1;
259	uint32_t p2, reg;
260	int error = 0, pmc, rid;
261	uint8_t eaddr[ETHER_ADDR_LEN], *ver;
262
263	sc = device_get_softc(dev);
264	sc->sc_dev = dev;
265
266	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
267	    MTX_DEF);
268	callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
269	TASK_INIT(&sc->sc_int_task, 0, txp_int_task, sc);
270	TAILQ_INIT(&sc->sc_busy_list);
271	TAILQ_INIT(&sc->sc_free_list);
272
273	ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts);
274	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
275	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX, 0, NULL);
276	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
277	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
278	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX, 0, NULL);
279	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
280	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
281
282	pci_enable_busmaster(dev);
283	/* Prefer memory space register mapping over IO space. */
284	if (prefer_iomap == 0) {
285		sc->sc_res_id = PCIR_BAR(1);
286		sc->sc_res_type = SYS_RES_MEMORY;
287	} else {
288		sc->sc_res_id = PCIR_BAR(0);
289		sc->sc_res_type = SYS_RES_IOPORT;
290	}
291	sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
292	    &sc->sc_res_id, RF_ACTIVE);
293	if (sc->sc_res == NULL && prefer_iomap == 0) {
294		sc->sc_res_id = PCIR_BAR(0);
295		sc->sc_res_type = SYS_RES_IOPORT;
296		sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
297		    &sc->sc_res_id, RF_ACTIVE);
298	}
299	if (sc->sc_res == NULL) {
300		device_printf(dev, "couldn't map ports/memory\n");
301		ifmedia_removeall(&sc->sc_ifmedia);
302		mtx_destroy(&sc->sc_mtx);
303		return (ENXIO);
304	}
305
306	/* Enable MWI. */
307	reg = pci_read_config(dev, PCIR_COMMAND, 2);
308	reg |= PCIM_CMD_MWRICEN;
309	pci_write_config(dev, PCIR_COMMAND, reg, 2);
310	/* Check cache line size. */
311	reg = pci_read_config(dev, PCIR_CACHELNSZ, 1);
312	reg <<= 4;
313	if (reg == 0 || (reg % 16) != 0)
314		device_printf(sc->sc_dev,
315		    "invalid cache line size : %u\n", reg);
316
317	/* Allocate interrupt */
318	rid = 0;
319	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
320	    RF_SHAREABLE | RF_ACTIVE);
321
322	if (sc->sc_irq == NULL) {
323		device_printf(dev, "couldn't map interrupt\n");
324		error = ENXIO;
325		goto fail;
326	}
327
328	if ((error = txp_alloc_rings(sc)) != 0)
329		goto fail;
330	txp_init_rings(sc);
331	txp_sysctl_node(sc);
332	/* Reset controller and make it reload sleep image. */
333	if (txp_reset(sc) != 0) {
334		error = ENXIO;
335		goto fail;
336	}
337
338	/* Let controller boot from sleep image. */
339	if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) {
340		device_printf(sc->sc_dev, "could not boot sleep image\n");
341		error = ENXIO;
342		goto fail;
343	}
344
345	/* Get station address. */
346	if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0,
347	    &p1, &p2, NULL, TXP_CMD_WAIT)) {
348		error = ENXIO;
349		goto fail;
350	}
351
352	p1 = le16toh(p1);
353	eaddr[0] = ((uint8_t *)&p1)[1];
354	eaddr[1] = ((uint8_t *)&p1)[0];
355	p2 = le32toh(p2);
356	eaddr[2] = ((uint8_t *)&p2)[3];
357	eaddr[3] = ((uint8_t *)&p2)[2];
358	eaddr[4] = ((uint8_t *)&p2)[1];
359	eaddr[5] = ((uint8_t *)&p2)[0];
360
361	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
362	if (ifp == NULL) {
363		device_printf(dev, "can not allocate ifnet structure\n");
364		error = ENOSPC;
365		goto fail;
366	}
367
368	/*
369	 * Show sleep image version information which may help to
370	 * diagnose sleep image specific issues.
371	 */
372	rsp = NULL;
373	if (txp_ext_command(sc, TXP_CMD_READ_VERSION, 0, 0, 0, NULL, 0,
374	    &rsp, TXP_CMD_WAIT)) {
375		device_printf(dev, "can not read sleep image version\n");
376		error = ENXIO;
377		goto fail;
378	}
379	if (rsp->rsp_numdesc == 0) {
380		p2 = le32toh(rsp->rsp_par2) & 0xFFFF;
381		device_printf(dev, "Typhoon 1.0 sleep image (2000/%02u/%02u)\n",
382		    p2 >> 8, p2 & 0xFF);
383	} else if (rsp->rsp_numdesc == 2) {
384		p2 = le32toh(rsp->rsp_par2);
385		ver = (uint8_t *)(rsp + 1);
386		/*
387		 * Even if datasheet says the command returns a NULL
388		 * terminated version string, explicitly terminate
389		 * the string. Given that several bugs of firmware
390		 * I can't trust this simple one.
391		 */
392		ver[25] = '\0';
393		device_printf(dev,
394		    "Typhoon 1.1+ sleep image %02u.%03u.%03u %s\n",
395		    p2 >> 24, (p2 >> 12) & 0xFFF, p2 & 0xFFF, ver);
396	} else {
397		p2 = le32toh(rsp->rsp_par2);
398		device_printf(dev,
399		    "Unknown Typhoon sleep image version: %u:0x%08x\n",
400		    rsp->rsp_numdesc, p2);
401	}
402	if (rsp != NULL)
403		free(rsp, M_DEVBUF);
404
405	sc->sc_xcvr = TXP_XCVR_AUTO;
406	txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0,
407	    NULL, NULL, NULL, TXP_CMD_NOWAIT);
408	ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO);
409
410	ifp->if_softc = sc;
411	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
412	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
413	ifp->if_ioctl = txp_ioctl;
414	ifp->if_start = txp_start;
415	ifp->if_init = txp_init;
416	ifp->if_snd.ifq_drv_maxlen = TX_ENTRIES - 1;
417	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
418	IFQ_SET_READY(&ifp->if_snd);
419	/*
420	 * It's possible to read firmware's offload capability but
421	 * we have not downloaded the firmware yet so announce
422	 * working capability here. We're not interested in IPSec
423	 * capability and due to the lots of firmware bug we can't
424	 * advertise the whole capability anyway.
425	 */
426	ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TXCSUM;
427	if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
428		ifp->if_capabilities |= IFCAP_WOL_MAGIC;
429	/* Enable all capabilities. */
430	ifp->if_capenable = ifp->if_capabilities;
431
432	ether_ifattach(ifp, eaddr);
433
434	/* VLAN capability setup. */
435	ifp->if_capabilities |= IFCAP_VLAN_MTU;
436	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
437	ifp->if_capenable = ifp->if_capabilities;
438	/* Tell the upper layer(s) we support long frames. */
439	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
440
441	WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
442	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
443
444	/* Create local taskq. */
445	sc->sc_tq = taskqueue_create_fast("txp_taskq", M_WAITOK,
446	    taskqueue_thread_enqueue, &sc->sc_tq);
447	if (sc->sc_tq == NULL) {
448		device_printf(dev, "could not create taskqueue.\n");
449		ether_ifdetach(ifp);
450		error = ENXIO;
451		goto fail;
452	}
453	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
454	    device_get_nameunit(sc->sc_dev));
455
456	/* Put controller into sleep. */
457	if (txp_sleep(sc, 0) != 0) {
458		ether_ifdetach(ifp);
459		error = ENXIO;
460		goto fail;
461	}
462
463	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
464	    txp_intr, NULL, sc, &sc->sc_intrhand);
465
466	if (error != 0) {
467		ether_ifdetach(ifp);
468		device_printf(dev, "couldn't set up interrupt handler.\n");
469		goto fail;
470	}
471
472	return (0);
473
474fail:
475	if (error != 0)
476		txp_detach(dev);
477	return (error);
478}
479
480static int
481txp_detach(device_t dev)
482{
483	struct txp_softc *sc;
484	struct ifnet *ifp;
485
486	sc = device_get_softc(dev);
487
488	ifp = sc->sc_ifp;
489	if (device_is_attached(dev)) {
490		TXP_LOCK(sc);
491		sc->sc_flags |= TXP_FLAG_DETACH;
492		txp_stop(sc);
493		TXP_UNLOCK(sc);
494		callout_drain(&sc->sc_tick);
495		taskqueue_drain(sc->sc_tq, &sc->sc_int_task);
496		ether_ifdetach(ifp);
497	}
498	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
499
500	ifmedia_removeall(&sc->sc_ifmedia);
501	if (sc->sc_intrhand != NULL)
502		bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
503	if (sc->sc_irq != NULL)
504		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
505	if (sc->sc_res != NULL)
506		bus_release_resource(dev, sc->sc_res_type, sc->sc_res_id,
507		    sc->sc_res);
508	if (sc->sc_ifp != NULL) {
509		if_free(sc->sc_ifp);
510		sc->sc_ifp = NULL;
511	}
512	txp_free_rings(sc);
513	mtx_destroy(&sc->sc_mtx);
514
515	return (0);
516}
517
518static int
519txp_reset(struct txp_softc *sc)
520{
521	uint32_t r;
522	int i;
523
524	/* Disable interrupts. */
525	WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
526	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
527	/* Ack all pending interrupts. */
528	WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
529
530	r = 0;
531	WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL);
532	DELAY(1000);
533	WRITE_REG(sc, TXP_SRR, 0);
534
535	/* Should wait max 6 seconds. */
536	for (i = 0; i < 6000; i++) {
537		r = READ_REG(sc, TXP_A2H_0);
538		if (r == STAT_WAITING_FOR_HOST_REQUEST)
539			break;
540		DELAY(1000);
541	}
542
543	if (r != STAT_WAITING_FOR_HOST_REQUEST)
544		device_printf(sc->sc_dev, "reset hung\n");
545
546	WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
547	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
548	WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
549
550	/*
551	 * Give more time to complete loading sleep image before
552	 * trying to boot from sleep image.
553	 */
554	DELAY(5000);
555
556	return (0);
557}
558
559static int
560txp_boot(struct txp_softc *sc, uint32_t state)
561{
562
563	/* See if it's waiting for boot, and try to boot it. */
564	if (txp_wait(sc, state) != 0) {
565		device_printf(sc->sc_dev, "not waiting for boot\n");
566		return (ENXIO);
567	}
568
569	WRITE_REG(sc, TXP_H2A_2, TXP_ADDR_HI(sc->sc_ldata.txp_boot_paddr));
570	TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
571	WRITE_REG(sc, TXP_H2A_1, TXP_ADDR_LO(sc->sc_ldata.txp_boot_paddr));
572	TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
573	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD);
574	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
575
576	/* See if it booted. */
577	if (txp_wait(sc, STAT_RUNNING) != 0) {
578		device_printf(sc->sc_dev, "firmware not running\n");
579		return (ENXIO);
580	}
581
582	/* Clear TX and CMD ring write registers. */
583	WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL);
584	TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
585	WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL);
586	TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
587	WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL);
588	TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
589	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL);
590	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
591
592	return (0);
593}
594
595static int
596txp_download_fw(struct txp_softc *sc)
597{
598	struct txp_fw_file_header *fileheader;
599	struct txp_fw_section_header *secthead;
600	int sect;
601	uint32_t error, ier, imr;
602
603	TXP_LOCK_ASSERT(sc);
604
605	error = 0;
606	ier = READ_REG(sc, TXP_IER);
607	WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0);
608
609	imr = READ_REG(sc, TXP_IMR);
610	WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0);
611
612	if (txp_wait(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) {
613		device_printf(sc->sc_dev, "not waiting for host request\n");
614		error = ETIMEDOUT;
615		goto fail;
616	}
617
618	/* Ack the status. */
619	WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
620
621	fileheader = (struct txp_fw_file_header *)tc990image;
622	if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) {
623		device_printf(sc->sc_dev, "firmware invalid magic\n");
624		goto fail;
625	}
626
627	/* Tell boot firmware to get ready for image. */
628	WRITE_REG(sc, TXP_H2A_1, le32toh(fileheader->addr));
629	TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
630	WRITE_REG(sc, TXP_H2A_2, le32toh(fileheader->hmac[0]));
631	TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
632	WRITE_REG(sc, TXP_H2A_3, le32toh(fileheader->hmac[1]));
633	TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
634	WRITE_REG(sc, TXP_H2A_4, le32toh(fileheader->hmac[2]));
635	TXP_BARRIER(sc, TXP_H2A_4, 4, BUS_SPACE_BARRIER_WRITE);
636	WRITE_REG(sc, TXP_H2A_5, le32toh(fileheader->hmac[3]));
637	TXP_BARRIER(sc, TXP_H2A_5, 4, BUS_SPACE_BARRIER_WRITE);
638	WRITE_REG(sc, TXP_H2A_6, le32toh(fileheader->hmac[4]));
639	TXP_BARRIER(sc, TXP_H2A_6, 4, BUS_SPACE_BARRIER_WRITE);
640	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE);
641	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
642
643	if (txp_download_fw_wait(sc)) {
644		device_printf(sc->sc_dev, "firmware wait failed, initial\n");
645		error = ETIMEDOUT;
646		goto fail;
647	}
648
649	secthead = (struct txp_fw_section_header *)(((uint8_t *)tc990image) +
650	    sizeof(struct txp_fw_file_header));
651
652	for (sect = 0; sect < le32toh(fileheader->nsections); sect++) {
653		if ((error = txp_download_fw_section(sc, secthead, sect)) != 0)
654			goto fail;
655		secthead = (struct txp_fw_section_header *)
656		    (((uint8_t *)secthead) + le32toh(secthead->nbytes) +
657		    sizeof(*secthead));
658	}
659
660	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE);
661	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
662
663	if (txp_wait(sc, STAT_WAITING_FOR_BOOT) != 0) {
664		device_printf(sc->sc_dev, "not waiting for boot\n");
665		error = ETIMEDOUT;
666		goto fail;
667	}
668
669fail:
670	WRITE_REG(sc, TXP_IER, ier);
671	WRITE_REG(sc, TXP_IMR, imr);
672
673	return (error);
674}
675
676static int
677txp_download_fw_wait(struct txp_softc *sc)
678{
679	uint32_t i;
680
681	TXP_LOCK_ASSERT(sc);
682
683	for (i = 0; i < TXP_TIMEOUT; i++) {
684		if ((READ_REG(sc, TXP_ISR) & TXP_INT_A2H_0) != 0)
685			break;
686		DELAY(50);
687	}
688
689	if (i == TXP_TIMEOUT) {
690		device_printf(sc->sc_dev, "firmware wait failed comm0\n");
691		return (ETIMEDOUT);
692	}
693
694	WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
695
696	if (READ_REG(sc, TXP_A2H_0) != STAT_WAITING_FOR_SEGMENT) {
697		device_printf(sc->sc_dev, "firmware not waiting for segment\n");
698		return (ETIMEDOUT);
699	}
700	return (0);
701}
702
703static int
704txp_download_fw_section(struct txp_softc *sc,
705    struct txp_fw_section_header *sect, int sectnum)
706{
707	bus_dma_tag_t sec_tag;
708	bus_dmamap_t sec_map;
709	bus_addr_t sec_paddr;
710	uint8_t *sec_buf;
711	int rseg, err = 0;
712	struct mbuf m;
713	uint16_t csum;
714
715	TXP_LOCK_ASSERT(sc);
716
717	/* Skip zero length sections. */
718	if (le32toh(sect->nbytes) == 0)
719		return (0);
720
721	/* Make sure we aren't past the end of the image. */
722	rseg = ((uint8_t *)sect) - ((uint8_t *)tc990image);
723	if (rseg >= sizeof(tc990image)) {
724		device_printf(sc->sc_dev,
725		    "firmware invalid section address, section %d\n", sectnum);
726		return (EIO);
727	}
728
729	/* Make sure this section doesn't go past the end. */
730	rseg += le32toh(sect->nbytes);
731	if (rseg >= sizeof(tc990image)) {
732		device_printf(sc->sc_dev, "firmware truncated section %d\n",
733		    sectnum);
734		return (EIO);
735	}
736
737	sec_tag = NULL;
738	sec_map = NULL;
739	sec_buf = NULL;
740	/* XXX */
741	TXP_UNLOCK(sc);
742	err = txp_dma_alloc(sc, "firmware sections", &sec_tag, sizeof(uint32_t),
743	    0, &sec_map, (void **)&sec_buf, le32toh(sect->nbytes), &sec_paddr);
744	TXP_LOCK(sc);
745	if (err != 0)
746		goto bail;
747	bcopy(((uint8_t *)sect) + sizeof(*sect), sec_buf,
748	    le32toh(sect->nbytes));
749
750	/*
751	 * dummy up mbuf and verify section checksum
752	 */
753	m.m_type = MT_DATA;
754	m.m_next = m.m_nextpkt = NULL;
755	m.m_len = le32toh(sect->nbytes);
756	m.m_data = sec_buf;
757	m.m_flags = 0;
758	csum = in_cksum(&m, le32toh(sect->nbytes));
759	if (csum != sect->cksum) {
760		device_printf(sc->sc_dev,
761		    "firmware section %d, bad cksum (expected 0x%x got 0x%x)\n",
762		    sectnum, le16toh(sect->cksum), csum);
763		err = EIO;
764		goto bail;
765	}
766
767	bus_dmamap_sync(sec_tag, sec_map, BUS_DMASYNC_PREWRITE);
768
769	WRITE_REG(sc, TXP_H2A_1, le32toh(sect->nbytes));
770	TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
771	WRITE_REG(sc, TXP_H2A_2, le16toh(sect->cksum));
772	TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
773	WRITE_REG(sc, TXP_H2A_3, le32toh(sect->addr));
774	TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
775	WRITE_REG(sc, TXP_H2A_4, TXP_ADDR_HI(sec_paddr));
776	TXP_BARRIER(sc, TXP_H2A_4, 4, BUS_SPACE_BARRIER_WRITE);
777	WRITE_REG(sc, TXP_H2A_5, TXP_ADDR_LO(sec_paddr));
778	TXP_BARRIER(sc, TXP_H2A_5, 4, BUS_SPACE_BARRIER_WRITE);
779	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE);
780	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
781
782	if (txp_download_fw_wait(sc)) {
783		device_printf(sc->sc_dev,
784		    "firmware wait failed, section %d\n", sectnum);
785		err = ETIMEDOUT;
786	}
787
788	bus_dmamap_sync(sec_tag, sec_map, BUS_DMASYNC_POSTWRITE);
789bail:
790	txp_dma_free(sc, &sec_tag, sec_map, (void **)&sec_buf, &sec_paddr);
791	return (err);
792}
793
794static int
795txp_intr(void *vsc)
796{
797	struct txp_softc *sc;
798	uint32_t status;
799
800	sc = vsc;
801	status = READ_REG(sc, TXP_ISR);
802	if ((status & TXP_INT_LATCH) == 0)
803		return (FILTER_STRAY);
804	WRITE_REG(sc, TXP_ISR, status);
805	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
806	taskqueue_enqueue(sc->sc_tq, &sc->sc_int_task);
807
808	return (FILTER_HANDLED);
809}
810
811static void
812txp_int_task(void *arg, int pending)
813{
814	struct txp_softc *sc;
815	struct ifnet *ifp;
816	struct txp_hostvar *hv;
817	uint32_t isr;
818	int more;
819
820	sc = (struct txp_softc *)arg;
821
822	TXP_LOCK(sc);
823	ifp = sc->sc_ifp;
824	hv = sc->sc_hostvar;
825	isr = READ_REG(sc, TXP_ISR);
826	if ((isr & TXP_INT_LATCH) != 0)
827		WRITE_REG(sc, TXP_ISR, isr);
828
829	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
830		bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
831		    sc->sc_cdata.txp_hostvar_map,
832		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
833		more = 0;
834		if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff))
835			more += txp_rx_reclaim(sc, &sc->sc_rxhir,
836			    sc->sc_process_limit);
837		if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff))
838			more += txp_rx_reclaim(sc, &sc->sc_rxlor,
839			    sc->sc_process_limit);
840		/*
841		 * XXX
842		 * It seems controller is not smart enough to handle
843		 * FIFO overflow conditions under heavy network load.
844		 * No matter how often new Rx buffers are passed to
845		 * controller the situation didn't change. Maybe
846		 * flow-control would be the only way to mitigate the
847		 * issue but firmware does not have commands that
848		 * control the threshold of emitting pause frames.
849		 */
850		if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx)
851			txp_rxbuf_reclaim(sc);
852		if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
853		    TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
854			txp_tx_reclaim(sc, &sc->sc_txhir);
855		if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
856		    TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
857			txp_tx_reclaim(sc, &sc->sc_txlor);
858		bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
859		    sc->sc_cdata.txp_hostvar_map,
860		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
861		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
862			txp_start_locked(sc->sc_ifp);
863		if (more != 0 || READ_REG(sc, TXP_ISR & TXP_INT_LATCH) != 0) {
864			taskqueue_enqueue(sc->sc_tq, &sc->sc_int_task);
865			TXP_UNLOCK(sc);
866			return;
867		}
868	}
869
870	/* Re-enable interrupts. */
871	WRITE_REG(sc, TXP_IMR, TXP_INTR_NONE);
872	TXP_UNLOCK(sc);
873}
874
875#ifndef __NO_STRICT_ALIGNMENT
876static __inline void
877txp_fixup_rx(struct mbuf *m)
878{
879	int i;
880	uint16_t *src, *dst;
881
882	src = mtod(m, uint16_t *);
883	dst = src - (TXP_RXBUF_ALIGN - ETHER_ALIGN) / sizeof *src;
884
885	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
886		*dst++ = *src++;
887
888	m->m_data -= TXP_RXBUF_ALIGN - ETHER_ALIGN;
889}
890#endif
891
892static int
893txp_rx_reclaim(struct txp_softc *sc, struct txp_rx_ring *r, int count)
894{
895	struct ifnet *ifp;
896	struct txp_rx_desc *rxd;
897	struct mbuf *m;
898	struct txp_rx_swdesc *sd;
899	uint32_t roff, woff, rx_stat, prog;
900
901	TXP_LOCK_ASSERT(sc);
902
903	ifp = sc->sc_ifp;
904
905	bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_POSTREAD |
906	    BUS_DMASYNC_POSTWRITE);
907
908	roff = le32toh(*r->r_roff);
909	woff = le32toh(*r->r_woff);
910	rxd = r->r_desc + roff / sizeof(struct txp_rx_desc);
911	for (prog = 0; roff != woff; prog++, count--) {
912		if (count <= 0)
913			break;
914		bcopy((u_long *)&rxd->rx_vaddrlo, &sd, sizeof(sd));
915		KASSERT(sd != NULL, ("%s: Rx desc ring corrupted", __func__));
916		bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
917		    BUS_DMASYNC_POSTREAD);
918		bus_dmamap_unload(sc->sc_cdata.txp_rx_tag, sd->sd_map);
919		m = sd->sd_mbuf;
920		KASSERT(m != NULL, ("%s: Rx buffer ring corrupted", __func__));
921		sd->sd_mbuf = NULL;
922		TAILQ_REMOVE(&sc->sc_busy_list, sd, sd_next);
923		TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
924		if ((rxd->rx_flags & RX_FLAGS_ERROR) != 0) {
925			if (bootverbose)
926				device_printf(sc->sc_dev, "Rx error %u\n",
927				    le32toh(rxd->rx_stat) & RX_ERROR_MASK);
928			m_freem(m);
929			goto next;
930		}
931
932		m->m_pkthdr.len = m->m_len = le16toh(rxd->rx_len);
933		m->m_pkthdr.rcvif = ifp;
934#ifndef __NO_STRICT_ALIGNMENT
935		txp_fixup_rx(m);
936#endif
937		rx_stat = le32toh(rxd->rx_stat);
938		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
939			if ((rx_stat & RX_STAT_IPCKSUMBAD) != 0)
940				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
941			else if ((rx_stat & RX_STAT_IPCKSUMGOOD) != 0)
942				m->m_pkthdr.csum_flags |=
943				    CSUM_IP_CHECKED|CSUM_IP_VALID;
944
945			if ((rx_stat & RX_STAT_TCPCKSUMGOOD) != 0 ||
946			    (rx_stat & RX_STAT_UDPCKSUMGOOD) != 0) {
947				m->m_pkthdr.csum_flags |=
948				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
949				m->m_pkthdr.csum_data = 0xffff;
950			}
951		}
952
953		/*
954		 * XXX
955		 * Typhoon has a firmware bug that VLAN tag is always
956		 * stripped out even if it is told to not remove the tag.
957		 * Therefore don't check if_capenable here.
958		 */
959		if (/* (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && */
960		    (rx_stat & RX_STAT_VLAN) != 0) {
961			m->m_pkthdr.ether_vtag =
962			    bswap16((le32toh(rxd->rx_vlan) >> 16));
963			m->m_flags |= M_VLANTAG;
964		}
965
966		TXP_UNLOCK(sc);
967		(*ifp->if_input)(ifp, m);
968		TXP_LOCK(sc);
969
970next:
971		roff += sizeof(struct txp_rx_desc);
972		if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) {
973			roff = 0;
974			rxd = r->r_desc;
975		} else
976			rxd++;
977		prog++;
978	}
979
980	if (prog == 0)
981		return (0);
982
983	bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_PREREAD |
984	    BUS_DMASYNC_PREWRITE);
985	*r->r_roff = le32toh(roff);
986
987	return (count > 0 ? 0 : EAGAIN);
988}
989
990static void
991txp_rxbuf_reclaim(struct txp_softc *sc)
992{
993	struct txp_hostvar *hv;
994	struct txp_rxbuf_desc *rbd;
995	struct txp_rx_swdesc *sd;
996	bus_dma_segment_t segs[1];
997	int nsegs, prod, prog;
998	uint32_t cons;
999
1000	TXP_LOCK_ASSERT(sc);
1001
1002	hv = sc->sc_hostvar;
1003	cons = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_read_idx));
1004	prod = sc->sc_rxbufprod;
1005	TXP_DESC_INC(prod, RXBUF_ENTRIES);
1006	if (prod == cons)
1007		return;
1008
1009	bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1010	    sc->sc_cdata.txp_rxbufs_map,
1011	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1012
1013	for (prog = 0; prod != cons; prog++) {
1014		sd = TAILQ_FIRST(&sc->sc_free_list);
1015		if (sd == NULL)
1016			break;
1017		rbd = sc->sc_rxbufs + prod;
1018		bcopy((u_long *)&rbd->rb_vaddrlo, &sd, sizeof(sd));
1019		sd->sd_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1020		if (sd->sd_mbuf == NULL)
1021			break;
1022		sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
1023#ifndef __NO_STRICT_ALIGNMENT
1024		m_adj(sd->sd_mbuf, TXP_RXBUF_ALIGN);
1025#endif
1026		if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_rx_tag,
1027		    sd->sd_map, sd->sd_mbuf, segs, &nsegs, 0) != 0) {
1028			m_freem(sd->sd_mbuf);
1029			sd->sd_mbuf = NULL;
1030			break;
1031		}
1032		KASSERT(nsegs == 1, ("%s : %d segments returned!", __func__,
1033		    nsegs));
1034		TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1035		TAILQ_INSERT_TAIL(&sc->sc_busy_list, sd, sd_next);
1036		bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1037		    BUS_DMASYNC_PREREAD);
1038		rbd->rb_paddrlo = htole32(TXP_ADDR_LO(segs[0].ds_addr));
1039		rbd->rb_paddrhi = htole32(TXP_ADDR_HI(segs[0].ds_addr));
1040		TXP_DESC_INC(prod, RXBUF_ENTRIES);
1041	}
1042
1043	if (prog == 0)
1044		return;
1045	bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1046	    sc->sc_cdata.txp_rxbufs_map,
1047	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1048	prod = (prod + RXBUF_ENTRIES - 1) % RXBUF_ENTRIES;
1049	sc->sc_rxbufprod = prod;
1050	hv->hv_rx_buf_write_idx = htole32(TXP_IDX2OFFSET(prod));
1051}
1052
1053/*
1054 * Reclaim mbufs and entries from a transmit ring.
1055 */
1056static void
1057txp_tx_reclaim(struct txp_softc *sc, struct txp_tx_ring *r)
1058{
1059	struct ifnet *ifp;
1060	uint32_t idx;
1061	uint32_t cons, cnt;
1062	struct txp_tx_desc *txd;
1063	struct txp_swdesc *sd;
1064
1065	TXP_LOCK_ASSERT(sc);
1066
1067	bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_POSTREAD |
1068	    BUS_DMASYNC_POSTWRITE);
1069	ifp = sc->sc_ifp;
1070	idx = TXP_OFFSET2IDX(le32toh(*(r->r_off)));
1071	cons = r->r_cons;
1072	cnt = r->r_cnt;
1073	txd = r->r_desc + cons;
1074	sd = sc->sc_txd + cons;
1075
1076	for (cnt = r->r_cnt; cons != idx && cnt > 0; cnt--) {
1077		if ((txd->tx_flags & TX_FLAGS_TYPE_M) == TX_FLAGS_TYPE_DATA) {
1078			if (sd->sd_mbuf != NULL) {
1079				bus_dmamap_sync(sc->sc_cdata.txp_tx_tag,
1080				    sd->sd_map, BUS_DMASYNC_POSTWRITE);
1081				bus_dmamap_unload(sc->sc_cdata.txp_tx_tag,
1082				    sd->sd_map);
1083				m_freem(sd->sd_mbuf);
1084				sd->sd_mbuf = NULL;
1085				txd->tx_addrlo = 0;
1086				txd->tx_addrhi = 0;
1087				txd->tx_flags = 0;
1088			}
1089		}
1090		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1091
1092		if (++cons == TX_ENTRIES) {
1093			txd = r->r_desc;
1094			cons = 0;
1095			sd = sc->sc_txd;
1096		} else {
1097			txd++;
1098			sd++;
1099		}
1100	}
1101
1102	bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_PREREAD |
1103	    BUS_DMASYNC_PREWRITE);
1104	r->r_cons = cons;
1105	r->r_cnt = cnt;
1106	if (cnt == 0)
1107		sc->sc_watchdog_timer = 0;
1108}
1109
1110static int
1111txp_shutdown(device_t dev)
1112{
1113
1114	return (txp_suspend(dev));
1115}
1116
1117static int
1118txp_suspend(device_t dev)
1119{
1120	struct txp_softc *sc;
1121	struct ifnet *ifp;
1122	uint8_t *eaddr;
1123	uint16_t p1;
1124	uint32_t p2;
1125	int pmc;
1126	uint16_t pmstat;
1127
1128	sc = device_get_softc(dev);
1129
1130	TXP_LOCK(sc);
1131	ifp = sc->sc_ifp;
1132	txp_stop(sc);
1133	txp_init_rings(sc);
1134	/* Reset controller and make it reload sleep image. */
1135	txp_reset(sc);
1136	/* Let controller boot from sleep image. */
1137	if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0)
1138		device_printf(sc->sc_dev, "couldn't boot sleep image\n");
1139
1140	/* Set station address. */
1141	eaddr = IF_LLADDR(sc->sc_ifp);
1142	p1 = 0;
1143	((uint8_t *)&p1)[1] = eaddr[0];
1144	((uint8_t *)&p1)[0] = eaddr[1];
1145	p1 = le16toh(p1);
1146	((uint8_t *)&p2)[3] = eaddr[2];
1147	((uint8_t *)&p2)[2] = eaddr[3];
1148	((uint8_t *)&p2)[1] = eaddr[4];
1149	((uint8_t *)&p2)[0] = eaddr[5];
1150	p2 = le32toh(p2);
1151	txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0, NULL, NULL,
1152	    NULL, TXP_CMD_WAIT);
1153	txp_set_filter(sc);
1154	WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
1155	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
1156	txp_sleep(sc, sc->sc_ifp->if_capenable);
1157	if (pci_find_cap(sc->sc_dev, PCIY_PMG, &pmc) == 0) {
1158		/* Request PME. */
1159		pmstat = pci_read_config(sc->sc_dev,
1160		    pmc + PCIR_POWER_STATUS, 2);
1161		pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1162		if ((ifp->if_capenable & IFCAP_WOL) != 0)
1163			pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1164		pci_write_config(sc->sc_dev,
1165		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1166	}
1167	TXP_UNLOCK(sc);
1168
1169	return (0);
1170}
1171
1172static int
1173txp_resume(device_t dev)
1174{
1175	struct txp_softc *sc;
1176	int pmc;
1177	uint16_t pmstat;
1178
1179	sc = device_get_softc(dev);
1180
1181	TXP_LOCK(sc);
1182	if (pci_find_cap(sc->sc_dev, PCIY_PMG, &pmc) == 0) {
1183		/* Disable PME and clear PME status. */
1184		pmstat = pci_read_config(sc->sc_dev,
1185		    pmc + PCIR_POWER_STATUS, 2);
1186		if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
1187			pmstat &= ~PCIM_PSTAT_PMEENABLE;
1188			pci_write_config(sc->sc_dev,
1189			    pmc + PCIR_POWER_STATUS, pmstat, 2);
1190		}
1191	}
1192	if ((sc->sc_ifp->if_flags & IFF_UP) != 0)
1193		txp_init_locked(sc);
1194	TXP_UNLOCK(sc);
1195
1196	return (0);
1197}
1198
1199struct txp_dmamap_arg {
1200	bus_addr_t	txp_busaddr;
1201};
1202
1203static void
1204txp_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1205{
1206	struct txp_dmamap_arg *ctx;
1207
1208	if (error != 0)
1209		return;
1210
1211	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1212
1213	ctx = (struct txp_dmamap_arg *)arg;
1214	ctx->txp_busaddr = segs[0].ds_addr;
1215}
1216
1217static int
1218txp_dma_alloc(struct txp_softc *sc, char *type, bus_dma_tag_t *tag,
1219    bus_size_t alignment, bus_size_t boundary, bus_dmamap_t *map, void **buf,
1220    bus_size_t size, bus_addr_t *paddr)
1221{
1222	struct txp_dmamap_arg ctx;
1223	int error;
1224
1225	/* Create DMA block tag. */
1226	error = bus_dma_tag_create(
1227	    sc->sc_cdata.txp_parent_tag,	/* parent */
1228	    alignment, boundary,	/* algnmnt, boundary */
1229	    BUS_SPACE_MAXADDR,		/* lowaddr */
1230	    BUS_SPACE_MAXADDR,		/* highaddr */
1231	    NULL, NULL,			/* filter, filterarg */
1232	    size,			/* maxsize */
1233	    1,				/* nsegments */
1234	    size,			/* maxsegsize */
1235	    0,				/* flags */
1236	    NULL, NULL,			/* lockfunc, lockarg */
1237	    tag);
1238	if (error != 0) {
1239		device_printf(sc->sc_dev,
1240		    "could not create DMA tag for %s.\n", type);
1241		return (error);
1242	}
1243
1244	*paddr = 0;
1245	/* Allocate DMA'able memory and load the DMA map. */
1246	error = bus_dmamem_alloc(*tag, buf, BUS_DMA_WAITOK | BUS_DMA_ZERO |
1247	    BUS_DMA_COHERENT, map);
1248	if (error != 0) {
1249		device_printf(sc->sc_dev,
1250		    "could not allocate DMA'able memory for %s.\n", type);
1251		return (error);
1252	}
1253
1254	ctx.txp_busaddr = 0;
1255	error = bus_dmamap_load(*tag, *map, *(uint8_t **)buf,
1256	    size, txp_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1257	if (error != 0 || ctx.txp_busaddr == 0) {
1258		device_printf(sc->sc_dev,
1259		    "could not load DMA'able memory for %s.\n", type);
1260		return (error);
1261	}
1262	*paddr = ctx.txp_busaddr;
1263
1264	return (0);
1265}
1266
1267static void
1268txp_dma_free(struct txp_softc *sc, bus_dma_tag_t *tag, bus_dmamap_t map,
1269    void **buf, bus_addr_t *paddr)
1270{
1271
1272	if (*tag != NULL) {
1273		if (*paddr != 0)
1274			bus_dmamap_unload(*tag, map);
1275		if (buf != NULL)
1276			bus_dmamem_free(*tag, *(uint8_t **)buf, map);
1277		*(uint8_t **)buf = NULL;
1278		*paddr = 0;
1279		bus_dma_tag_destroy(*tag);
1280		*tag = NULL;
1281	}
1282}
1283
1284static int
1285txp_alloc_rings(struct txp_softc *sc)
1286{
1287	struct txp_boot_record *boot;
1288	struct txp_ldata *ld;
1289	struct txp_swdesc *txd;
1290	struct txp_rxbuf_desc *rbd;
1291	struct txp_rx_swdesc *sd;
1292	int error, i;
1293
1294	ld = &sc->sc_ldata;
1295	boot = ld->txp_boot;
1296
1297	/* boot record */
1298	sc->sc_boot = boot;
1299
1300	/*
1301	 * Create parent ring/DMA block tag.
1302	 * Datasheet says that all ring addresses and descriptors
1303	 * support 64bits addressing. However the controller is
1304	 * known to have no support DAC so limit DMA address space
1305	 * to 32bits.
1306	 */
1307	error = bus_dma_tag_create(
1308	    bus_get_dma_tag(sc->sc_dev), /* parent */
1309	    1, 0,			/* algnmnt, boundary */
1310	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1311	    BUS_SPACE_MAXADDR,		/* highaddr */
1312	    NULL, NULL,			/* filter, filterarg */
1313	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1314	    0,				/* nsegments */
1315	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1316	    0,				/* flags */
1317	    NULL, NULL,			/* lockfunc, lockarg */
1318	    &sc->sc_cdata.txp_parent_tag);
1319	if (error != 0) {
1320		device_printf(sc->sc_dev, "could not create parent DMA tag.\n");
1321		return (error);
1322	}
1323
1324	/* Boot record. */
1325	error = txp_dma_alloc(sc, "boot record",
1326	    &sc->sc_cdata.txp_boot_tag, sizeof(uint32_t), 0,
1327	    &sc->sc_cdata.txp_boot_map, (void **)&sc->sc_ldata.txp_boot,
1328	    sizeof(struct txp_boot_record),
1329	    &sc->sc_ldata.txp_boot_paddr);
1330	if (error != 0)
1331		return (error);
1332	boot = sc->sc_ldata.txp_boot;
1333	sc->sc_boot = boot;
1334
1335	/* Host variables. */
1336	error = txp_dma_alloc(sc, "host variables",
1337	    &sc->sc_cdata.txp_hostvar_tag, sizeof(uint32_t), 0,
1338	    &sc->sc_cdata.txp_hostvar_map, (void **)&sc->sc_ldata.txp_hostvar,
1339	    sizeof(struct txp_hostvar),
1340	    &sc->sc_ldata.txp_hostvar_paddr);
1341	if (error != 0)
1342		return (error);
1343	boot->br_hostvar_lo =
1344	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_hostvar_paddr));
1345	boot->br_hostvar_hi =
1346	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_hostvar_paddr));
1347	sc->sc_hostvar = sc->sc_ldata.txp_hostvar;
1348
1349	/* Hi priority tx ring. */
1350	error = txp_dma_alloc(sc, "hi priority tx ring",
1351	    &sc->sc_cdata.txp_txhiring_tag, sizeof(struct txp_tx_desc), 0,
1352	    &sc->sc_cdata.txp_txhiring_map, (void **)&sc->sc_ldata.txp_txhiring,
1353	    sizeof(struct txp_tx_desc) * TX_ENTRIES,
1354	    &sc->sc_ldata.txp_txhiring_paddr);
1355	if (error != 0)
1356		return (error);
1357	boot->br_txhipri_lo =
1358	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_txhiring_paddr));
1359	boot->br_txhipri_hi =
1360	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_txhiring_paddr));
1361	boot->br_txhipri_siz =
1362	    htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
1363	sc->sc_txhir.r_tag = sc->sc_cdata.txp_txhiring_tag;
1364	sc->sc_txhir.r_map = sc->sc_cdata.txp_txhiring_map;
1365	sc->sc_txhir.r_reg = TXP_H2A_1;
1366	sc->sc_txhir.r_desc = sc->sc_ldata.txp_txhiring;
1367	sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0;
1368	sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx;
1369
1370	/* Low priority tx ring. */
1371	error = txp_dma_alloc(sc, "low priority tx ring",
1372	    &sc->sc_cdata.txp_txloring_tag, sizeof(struct txp_tx_desc), 0,
1373	    &sc->sc_cdata.txp_txloring_map, (void **)&sc->sc_ldata.txp_txloring,
1374	    sizeof(struct txp_tx_desc) * TX_ENTRIES,
1375	    &sc->sc_ldata.txp_txloring_paddr);
1376	if (error != 0)
1377		return (error);
1378	boot->br_txlopri_lo =
1379	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_txloring_paddr));
1380	boot->br_txlopri_hi =
1381	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_txloring_paddr));
1382	boot->br_txlopri_siz =
1383	    htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
1384	sc->sc_txlor.r_tag = sc->sc_cdata.txp_txloring_tag;
1385	sc->sc_txlor.r_map = sc->sc_cdata.txp_txloring_map;
1386	sc->sc_txlor.r_reg = TXP_H2A_3;
1387	sc->sc_txlor.r_desc = sc->sc_ldata.txp_txloring;
1388	sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0;
1389	sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx;
1390
1391	/* High priority rx ring. */
1392	error = txp_dma_alloc(sc, "hi priority rx ring",
1393	    &sc->sc_cdata.txp_rxhiring_tag,
1394	    roundup(sizeof(struct txp_rx_desc), 16), 0,
1395	    &sc->sc_cdata.txp_rxhiring_map, (void **)&sc->sc_ldata.txp_rxhiring,
1396	    sizeof(struct txp_rx_desc) * RX_ENTRIES,
1397	    &sc->sc_ldata.txp_rxhiring_paddr);
1398	if (error != 0)
1399		return (error);
1400	boot->br_rxhipri_lo =
1401	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxhiring_paddr));
1402	boot->br_rxhipri_hi =
1403	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxhiring_paddr));
1404	boot->br_rxhipri_siz =
1405	    htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
1406	sc->sc_rxhir.r_tag = sc->sc_cdata.txp_rxhiring_tag;
1407	sc->sc_rxhir.r_map = sc->sc_cdata.txp_rxhiring_map;
1408	sc->sc_rxhir.r_desc = sc->sc_ldata.txp_rxhiring;
1409	sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx;
1410	sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx;
1411
1412	/* Low priority rx ring. */
1413	error = txp_dma_alloc(sc, "low priority rx ring",
1414	    &sc->sc_cdata.txp_rxloring_tag,
1415	    roundup(sizeof(struct txp_rx_desc), 16), 0,
1416	    &sc->sc_cdata.txp_rxloring_map, (void **)&sc->sc_ldata.txp_rxloring,
1417	    sizeof(struct txp_rx_desc) * RX_ENTRIES,
1418	    &sc->sc_ldata.txp_rxloring_paddr);
1419	if (error != 0)
1420		return (error);
1421	boot->br_rxlopri_lo =
1422	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxloring_paddr));
1423	boot->br_rxlopri_hi =
1424	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxloring_paddr));
1425	boot->br_rxlopri_siz =
1426	    htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
1427	sc->sc_rxlor.r_tag = sc->sc_cdata.txp_rxloring_tag;
1428	sc->sc_rxlor.r_map = sc->sc_cdata.txp_rxloring_map;
1429	sc->sc_rxlor.r_desc = sc->sc_ldata.txp_rxloring;
1430	sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx;
1431	sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx;
1432
1433	/* Command ring. */
1434	error = txp_dma_alloc(sc, "command ring",
1435	    &sc->sc_cdata.txp_cmdring_tag, sizeof(struct txp_cmd_desc), 0,
1436	    &sc->sc_cdata.txp_cmdring_map, (void **)&sc->sc_ldata.txp_cmdring,
1437	    sizeof(struct txp_cmd_desc) * CMD_ENTRIES,
1438	    &sc->sc_ldata.txp_cmdring_paddr);
1439	if (error != 0)
1440		return (error);
1441	boot->br_cmd_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_cmdring_paddr));
1442	boot->br_cmd_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_cmdring_paddr));
1443	boot->br_cmd_siz = htole32(CMD_ENTRIES * sizeof(struct txp_cmd_desc));
1444	sc->sc_cmdring.base = sc->sc_ldata.txp_cmdring;
1445	sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc);
1446	sc->sc_cmdring.lastwrite = 0;
1447
1448	/* Response ring. */
1449	error = txp_dma_alloc(sc, "response ring",
1450	    &sc->sc_cdata.txp_rspring_tag, sizeof(struct txp_rsp_desc), 0,
1451	    &sc->sc_cdata.txp_rspring_map, (void **)&sc->sc_ldata.txp_rspring,
1452	    sizeof(struct txp_rsp_desc) * RSP_ENTRIES,
1453	    &sc->sc_ldata.txp_rspring_paddr);
1454	if (error != 0)
1455		return (error);
1456	boot->br_resp_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rspring_paddr));
1457	boot->br_resp_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rspring_paddr));
1458	boot->br_resp_siz = htole32(RSP_ENTRIES * sizeof(struct txp_rsp_desc));
1459	sc->sc_rspring.base = sc->sc_ldata.txp_rspring;
1460	sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc);
1461	sc->sc_rspring.lastwrite = 0;
1462
1463	/* Receive buffer ring. */
1464	error = txp_dma_alloc(sc, "receive buffer ring",
1465	    &sc->sc_cdata.txp_rxbufs_tag, sizeof(struct txp_rxbuf_desc), 0,
1466	    &sc->sc_cdata.txp_rxbufs_map, (void **)&sc->sc_ldata.txp_rxbufs,
1467	    sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES,
1468	    &sc->sc_ldata.txp_rxbufs_paddr);
1469	if (error != 0)
1470		return (error);
1471	boot->br_rxbuf_lo =
1472	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxbufs_paddr));
1473	boot->br_rxbuf_hi =
1474	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxbufs_paddr));
1475	boot->br_rxbuf_siz =
1476	    htole32(RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc));
1477	sc->sc_rxbufs = sc->sc_ldata.txp_rxbufs;
1478
1479	/* Zero ring. */
1480	error = txp_dma_alloc(sc, "zero buffer",
1481	    &sc->sc_cdata.txp_zero_tag, sizeof(uint32_t), 0,
1482	    &sc->sc_cdata.txp_zero_map, (void **)&sc->sc_ldata.txp_zero,
1483	    sizeof(uint32_t), &sc->sc_ldata.txp_zero_paddr);
1484	if (error != 0)
1485		return (error);
1486	boot->br_zero_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_zero_paddr));
1487	boot->br_zero_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_zero_paddr));
1488
1489	bus_dmamap_sync(sc->sc_cdata.txp_boot_tag, sc->sc_cdata.txp_boot_map,
1490	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1491
1492	/* Create Tx buffers. */
1493	error = bus_dma_tag_create(
1494	    sc->sc_cdata.txp_parent_tag,	/* parent */
1495	    1, 0,			/* algnmnt, boundary */
1496	    BUS_SPACE_MAXADDR,		/* lowaddr */
1497	    BUS_SPACE_MAXADDR,		/* highaddr */
1498	    NULL, NULL,			/* filter, filterarg */
1499	    MCLBYTES * TXP_MAXTXSEGS,	/* maxsize */
1500	    TXP_MAXTXSEGS,		/* nsegments */
1501	    MCLBYTES,			/* maxsegsize */
1502	    0,				/* flags */
1503	    NULL, NULL,			/* lockfunc, lockarg */
1504	    &sc->sc_cdata.txp_tx_tag);
1505	if (error != 0) {
1506		device_printf(sc->sc_dev, "could not create Tx DMA tag.\n");
1507		goto fail;
1508	}
1509
1510	/* Create tag for Rx buffers. */
1511	error = bus_dma_tag_create(
1512	    sc->sc_cdata.txp_parent_tag,	/* parent */
1513	    TXP_RXBUF_ALIGN, 0,		/* algnmnt, boundary */
1514	    BUS_SPACE_MAXADDR,		/* lowaddr */
1515	    BUS_SPACE_MAXADDR,		/* highaddr */
1516	    NULL, NULL,			/* filter, filterarg */
1517	    MCLBYTES,			/* maxsize */
1518	    1,				/* nsegments */
1519	    MCLBYTES,			/* maxsegsize */
1520	    0,				/* flags */
1521	    NULL, NULL,			/* lockfunc, lockarg */
1522	    &sc->sc_cdata.txp_rx_tag);
1523	if (error != 0) {
1524		device_printf(sc->sc_dev, "could not create Rx DMA tag.\n");
1525		goto fail;
1526	}
1527
1528	/* Create DMA maps for Tx buffers. */
1529	for (i = 0; i < TX_ENTRIES; i++) {
1530		txd = &sc->sc_txd[i];
1531		txd->sd_mbuf = NULL;
1532		txd->sd_map = NULL;
1533		error = bus_dmamap_create(sc->sc_cdata.txp_tx_tag, 0,
1534		    &txd->sd_map);
1535		if (error != 0) {
1536			device_printf(sc->sc_dev,
1537			    "could not create Tx dmamap.\n");
1538			goto fail;
1539		}
1540	}
1541
1542	/* Create DMA maps for Rx buffers. */
1543	for (i = 0; i < RXBUF_ENTRIES; i++) {
1544		sd = malloc(sizeof(struct txp_rx_swdesc), M_DEVBUF,
1545		    M_NOWAIT | M_ZERO);
1546		if (sd == NULL) {
1547			error = ENOMEM;
1548			goto fail;
1549		}
1550		/*
1551		 * The virtual address part of descriptor is not used
1552		 * by hardware so use that to save an ring entry. We
1553		 * need bcopy here otherwise the address wouldn't be
1554		 * valid on big-endian architectures.
1555		 */
1556		rbd = sc->sc_rxbufs + i;
1557		bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
1558		sd->sd_mbuf = NULL;
1559		sd->sd_map = NULL;
1560		error = bus_dmamap_create(sc->sc_cdata.txp_rx_tag, 0,
1561		    &sd->sd_map);
1562		if (error != 0) {
1563			device_printf(sc->sc_dev,
1564			    "could not create Rx dmamap.\n");
1565			goto fail;
1566		}
1567		TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
1568	}
1569
1570fail:
1571	return (error);
1572}
1573
1574static void
1575txp_init_rings(struct txp_softc *sc)
1576{
1577
1578	bzero(sc->sc_ldata.txp_hostvar, sizeof(struct txp_hostvar));
1579	bzero(sc->sc_ldata.txp_zero, sizeof(uint32_t));
1580	sc->sc_txhir.r_cons = 0;
1581	sc->sc_txhir.r_prod = 0;
1582	sc->sc_txhir.r_cnt = 0;
1583	sc->sc_txlor.r_cons = 0;
1584	sc->sc_txlor.r_prod = 0;
1585	sc->sc_txlor.r_cnt = 0;
1586	sc->sc_cmdring.lastwrite = 0;
1587	sc->sc_rspring.lastwrite = 0;
1588	sc->sc_rxbufprod = 0;
1589	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1590	    sc->sc_cdata.txp_hostvar_map,
1591	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1592}
1593
1594static int
1595txp_wait(struct txp_softc *sc, uint32_t state)
1596{
1597	uint32_t reg;
1598	int i;
1599
1600	for (i = 0; i < TXP_TIMEOUT; i++) {
1601		reg = READ_REG(sc, TXP_A2H_0);
1602		if (reg == state)
1603			break;
1604		DELAY(50);
1605	}
1606
1607	return (i == TXP_TIMEOUT ? ETIMEDOUT : 0);
1608}
1609
1610static void
1611txp_free_rings(struct txp_softc *sc)
1612{
1613	struct txp_swdesc *txd;
1614	struct txp_rx_swdesc *sd;
1615	int i;
1616
1617	/* Tx buffers. */
1618	if (sc->sc_cdata.txp_tx_tag != NULL) {
1619		for (i = 0; i < TX_ENTRIES; i++) {
1620			txd = &sc->sc_txd[i];
1621			if (txd->sd_map != NULL) {
1622				bus_dmamap_destroy(sc->sc_cdata.txp_tx_tag,
1623				    txd->sd_map);
1624				txd->sd_map = NULL;
1625			}
1626		}
1627		bus_dma_tag_destroy(sc->sc_cdata.txp_tx_tag);
1628		sc->sc_cdata.txp_tx_tag = NULL;
1629	}
1630	/* Rx buffers. */
1631	if (sc->sc_cdata.txp_rx_tag != NULL) {
1632		if (sc->sc_rxbufs != NULL) {
1633			KASSERT(TAILQ_FIRST(&sc->sc_busy_list) == NULL,
1634			    ("%s : still have busy Rx buffers", __func__));
1635			while ((sd = TAILQ_FIRST(&sc->sc_free_list)) != NULL) {
1636				TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1637				if (sd->sd_map != NULL) {
1638					bus_dmamap_destroy(
1639					    sc->sc_cdata.txp_rx_tag,
1640					    sd->sd_map);
1641					sd->sd_map = NULL;
1642				}
1643				free(sd, M_DEVBUF);
1644			}
1645		}
1646		bus_dma_tag_destroy(sc->sc_cdata.txp_rx_tag);
1647		sc->sc_cdata.txp_rx_tag = NULL;
1648	}
1649
1650	/* Hi priority Tx ring. */
1651	txp_dma_free(sc, &sc->sc_cdata.txp_txhiring_tag,
1652	    sc->sc_cdata.txp_txhiring_map,
1653	    (void **)&sc->sc_ldata.txp_txhiring,
1654	    &sc->sc_ldata.txp_txhiring_paddr);
1655	/* Low priority Tx ring. */
1656	txp_dma_free(sc, &sc->sc_cdata.txp_txloring_tag,
1657	    sc->sc_cdata.txp_txloring_map,
1658	    (void **)&sc->sc_ldata.txp_txloring,
1659	    &sc->sc_ldata.txp_txloring_paddr);
1660	/* Hi priority Rx ring. */
1661	txp_dma_free(sc, &sc->sc_cdata.txp_rxhiring_tag,
1662	    sc->sc_cdata.txp_rxhiring_map,
1663	    (void **)&sc->sc_ldata.txp_rxhiring,
1664	    &sc->sc_ldata.txp_rxhiring_paddr);
1665	/* Low priority Rx ring. */
1666	txp_dma_free(sc, &sc->sc_cdata.txp_rxloring_tag,
1667	    sc->sc_cdata.txp_rxloring_map,
1668	    (void **)&sc->sc_ldata.txp_rxloring,
1669	    &sc->sc_ldata.txp_rxloring_paddr);
1670	/* Receive buffer ring. */
1671	txp_dma_free(sc, &sc->sc_cdata.txp_rxbufs_tag,
1672	    sc->sc_cdata.txp_rxbufs_map, (void **)&sc->sc_ldata.txp_rxbufs,
1673	    &sc->sc_ldata.txp_rxbufs_paddr);
1674	/* Command ring. */
1675	txp_dma_free(sc, &sc->sc_cdata.txp_cmdring_tag,
1676	    sc->sc_cdata.txp_cmdring_map, (void **)&sc->sc_ldata.txp_cmdring,
1677	    &sc->sc_ldata.txp_cmdring_paddr);
1678	/* Response ring. */
1679	txp_dma_free(sc, &sc->sc_cdata.txp_rspring_tag,
1680	    sc->sc_cdata.txp_rspring_map, (void **)&sc->sc_ldata.txp_rspring,
1681	    &sc->sc_ldata.txp_rspring_paddr);
1682	/* Zero ring. */
1683	txp_dma_free(sc, &sc->sc_cdata.txp_zero_tag,
1684	    sc->sc_cdata.txp_zero_map, (void **)&sc->sc_ldata.txp_zero,
1685	    &sc->sc_ldata.txp_zero_paddr);
1686	/* Host variables. */
1687	txp_dma_free(sc, &sc->sc_cdata.txp_hostvar_tag,
1688	    sc->sc_cdata.txp_hostvar_map, (void **)&sc->sc_ldata.txp_hostvar,
1689	    &sc->sc_ldata.txp_hostvar_paddr);
1690	/* Boot record. */
1691	txp_dma_free(sc, &sc->sc_cdata.txp_boot_tag,
1692	    sc->sc_cdata.txp_boot_map, (void **)&sc->sc_ldata.txp_boot,
1693	    &sc->sc_ldata.txp_boot_paddr);
1694
1695	if (sc->sc_cdata.txp_parent_tag != NULL) {
1696		bus_dma_tag_destroy(sc->sc_cdata.txp_parent_tag);
1697		sc->sc_cdata.txp_parent_tag = NULL;
1698	}
1699
1700}
1701
1702static int
1703txp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1704{
1705	struct txp_softc *sc = ifp->if_softc;
1706	struct ifreq *ifr = (struct ifreq *)data;
1707	int capenable, error = 0, mask;
1708
1709	switch(command) {
1710	case SIOCSIFFLAGS:
1711		TXP_LOCK(sc);
1712		if ((ifp->if_flags & IFF_UP) != 0) {
1713			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1714				if (((ifp->if_flags ^ sc->sc_if_flags)
1715				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1716					txp_set_filter(sc);
1717			} else {
1718				if ((sc->sc_flags & TXP_FLAG_DETACH) == 0)
1719					txp_init_locked(sc);
1720			}
1721		} else {
1722			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1723				txp_stop(sc);
1724		}
1725		sc->sc_if_flags = ifp->if_flags;
1726		TXP_UNLOCK(sc);
1727		break;
1728	case SIOCADDMULTI:
1729	case SIOCDELMULTI:
1730		/*
1731		 * Multicast list has changed; set the hardware
1732		 * filter accordingly.
1733		 */
1734		TXP_LOCK(sc);
1735		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1736			txp_set_filter(sc);
1737		TXP_UNLOCK(sc);
1738		break;
1739	case SIOCSIFCAP:
1740		TXP_LOCK(sc);
1741		capenable = ifp->if_capenable;
1742		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1743		if ((mask & IFCAP_TXCSUM) != 0 &&
1744		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1745			ifp->if_capenable ^= IFCAP_TXCSUM;
1746			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1747				ifp->if_hwassist |= TXP_CSUM_FEATURES;
1748			else
1749				ifp->if_hwassist &= ~TXP_CSUM_FEATURES;
1750		}
1751		if ((mask & IFCAP_RXCSUM) != 0 &&
1752		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
1753			ifp->if_capenable ^= IFCAP_RXCSUM;
1754		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1755		    (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
1756			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1757		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1758		    (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0)
1759			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1760		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1761		    (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
1762			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1763		if ((ifp->if_capenable & IFCAP_TXCSUM) == 0)
1764			ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
1765		if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
1766			ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
1767		if (capenable != ifp->if_capenable)
1768			txp_set_capabilities(sc);
1769		TXP_UNLOCK(sc);
1770		VLAN_CAPABILITIES(ifp);
1771		break;
1772	case SIOCGIFMEDIA:
1773	case SIOCSIFMEDIA:
1774		error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command);
1775		break;
1776	default:
1777		error = ether_ioctl(ifp, command, data);
1778		break;
1779	}
1780
1781	return (error);
1782}
1783
1784static int
1785txp_rxring_fill(struct txp_softc *sc)
1786{
1787	struct txp_rxbuf_desc *rbd;
1788	struct txp_rx_swdesc *sd;
1789	bus_dma_segment_t segs[1];
1790	int error, i, nsegs;
1791
1792	TXP_LOCK_ASSERT(sc);
1793
1794	bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1795	    sc->sc_cdata.txp_rxbufs_map,
1796	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1797
1798	for (i = 0; i < RXBUF_ENTRIES; i++) {
1799		sd = TAILQ_FIRST(&sc->sc_free_list);
1800		if (sd == NULL)
1801			return (ENOMEM);
1802		rbd = sc->sc_rxbufs + i;
1803		bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
1804		KASSERT(sd->sd_mbuf == NULL,
1805		    ("%s : Rx buffer ring corrupted", __func__));
1806		sd->sd_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1807		if (sd->sd_mbuf == NULL)
1808			return (ENOMEM);
1809		sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
1810#ifndef __NO_STRICT_ALIGNMENT
1811		m_adj(sd->sd_mbuf, TXP_RXBUF_ALIGN);
1812#endif
1813		if ((error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_rx_tag,
1814		    sd->sd_map, sd->sd_mbuf, segs, &nsegs, 0)) != 0) {
1815			m_freem(sd->sd_mbuf);
1816			sd->sd_mbuf = NULL;
1817			return (error);
1818		}
1819		KASSERT(nsegs == 1, ("%s : %d segments returned!", __func__,
1820		    nsegs));
1821		TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1822		TAILQ_INSERT_TAIL(&sc->sc_busy_list, sd, sd_next);
1823		bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1824		    BUS_DMASYNC_PREREAD);
1825		rbd->rb_paddrlo = htole32(TXP_ADDR_LO(segs[0].ds_addr));
1826		rbd->rb_paddrhi = htole32(TXP_ADDR_HI(segs[0].ds_addr));
1827	}
1828
1829	bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1830	    sc->sc_cdata.txp_rxbufs_map,
1831	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1832	sc->sc_rxbufprod = RXBUF_ENTRIES - 1;
1833	sc->sc_hostvar->hv_rx_buf_write_idx =
1834	    htole32(TXP_IDX2OFFSET(RXBUF_ENTRIES - 1));
1835
1836	return (0);
1837}
1838
1839static void
1840txp_rxring_empty(struct txp_softc *sc)
1841{
1842	struct txp_rx_swdesc *sd;
1843	int cnt;
1844
1845	TXP_LOCK_ASSERT(sc);
1846
1847	if (sc->sc_rxbufs == NULL)
1848		return;
1849	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1850	    sc->sc_cdata.txp_hostvar_map,
1851	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1852
1853	/* Release allocated Rx buffers. */
1854	cnt = 0;
1855	while ((sd = TAILQ_FIRST(&sc->sc_busy_list)) != NULL) {
1856		TAILQ_REMOVE(&sc->sc_busy_list, sd, sd_next);
1857		KASSERT(sd->sd_mbuf != NULL,
1858		    ("%s : Rx buffer ring corrupted", __func__));
1859		bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1860		    BUS_DMASYNC_POSTREAD);
1861		bus_dmamap_unload(sc->sc_cdata.txp_rx_tag, sd->sd_map);
1862		m_freem(sd->sd_mbuf);
1863		sd->sd_mbuf = NULL;
1864		TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
1865		cnt++;
1866	}
1867}
1868
1869static void
1870txp_init(void *xsc)
1871{
1872	struct txp_softc *sc;
1873
1874	sc = xsc;
1875	TXP_LOCK(sc);
1876	txp_init_locked(sc);
1877	TXP_UNLOCK(sc);
1878}
1879
1880static void
1881txp_init_locked(struct txp_softc *sc)
1882{
1883	struct ifnet *ifp;
1884	uint8_t *eaddr;
1885	uint16_t p1;
1886	uint32_t p2;
1887	int error;
1888
1889	TXP_LOCK_ASSERT(sc);
1890	ifp = sc->sc_ifp;
1891
1892	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1893		return;
1894
1895	/* Initialize ring structure. */
1896	txp_init_rings(sc);
1897	/* Wakeup controller. */
1898	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_WAKEUP);
1899	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
1900	/*
1901	 * It seems that earlier NV image can go back to online from
1902	 * wakeup command but newer ones require controller reset.
1903	 * So jut reset controller again.
1904	 */
1905	if (txp_reset(sc) != 0)
1906		goto init_fail;
1907	/* Download firmware. */
1908	error = txp_download_fw(sc);
1909	if (error != 0) {
1910		device_printf(sc->sc_dev, "could not download firmware.\n");
1911		goto init_fail;
1912	}
1913	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1914	    sc->sc_cdata.txp_hostvar_map,
1915	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1916	if ((error = txp_rxring_fill(sc)) != 0) {
1917		device_printf(sc->sc_dev, "no memory for Rx buffers.\n");
1918		goto init_fail;
1919	}
1920	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1921	    sc->sc_cdata.txp_hostvar_map,
1922	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1923	if (txp_boot(sc, STAT_WAITING_FOR_BOOT) != 0) {
1924		device_printf(sc->sc_dev, "could not boot firmware.\n");
1925		goto init_fail;
1926	}
1927
1928	/*
1929	 * Quite contrary to Typhoon T2 software functional specification,
1930	 * it seems that TXP_CMD_RECV_BUFFER_CONTROL command is not
1931	 * implemented in the firmware. This means driver should have to
1932	 * handle misaligned frames on alignment architectures. AFAIK this
1933	 * is the only controller manufactured by 3Com that has this stupid
1934	 * bug. 3Com should fix this.
1935	 */
1936	if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0,
1937	    NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1938		goto init_fail;
1939	/* Undocumented command(interrupt coalescing disable?) - From Linux. */
1940	if (txp_command(sc, TXP_CMD_FILTER_DEFINE, 0, 0, 0, NULL, NULL, NULL,
1941	    TXP_CMD_NOWAIT) != 0)
1942		goto init_fail;
1943
1944	/* Set station address. */
1945	eaddr = IF_LLADDR(sc->sc_ifp);
1946	p1 = 0;
1947	((uint8_t *)&p1)[1] = eaddr[0];
1948	((uint8_t *)&p1)[0] = eaddr[1];
1949	p1 = le16toh(p1);
1950	((uint8_t *)&p2)[3] = eaddr[2];
1951	((uint8_t *)&p2)[2] = eaddr[3];
1952	((uint8_t *)&p2)[1] = eaddr[4];
1953	((uint8_t *)&p2)[0] = eaddr[5];
1954	p2 = le32toh(p2);
1955	if (txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0,
1956	    NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1957		goto init_fail;
1958
1959	txp_set_filter(sc);
1960	txp_set_capabilities(sc);
1961
1962	if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0,
1963	    NULL, NULL, NULL, TXP_CMD_NOWAIT))
1964		goto init_fail;
1965	if (txp_command(sc, TXP_CMD_XCVR_SELECT, sc->sc_xcvr, 0, 0,
1966	    NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1967		goto init_fail;
1968	if (txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL,
1969	    TXP_CMD_NOWAIT) != 0)
1970		goto init_fail;
1971	if (txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL,
1972	    TXP_CMD_NOWAIT) != 0)
1973		goto init_fail;
1974
1975	/* Ack all pending interrupts and enable interrupts. */
1976	WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
1977	WRITE_REG(sc, TXP_IER, TXP_INTRS);
1978	WRITE_REG(sc, TXP_IMR, TXP_INTR_NONE);
1979
1980	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1981	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1982
1983	callout_reset(&sc->sc_tick, hz, txp_tick, sc);
1984	return;
1985
1986init_fail:
1987	txp_rxring_empty(sc);
1988	txp_init_rings(sc);
1989	txp_reset(sc);
1990	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
1991}
1992
1993static void
1994txp_tick(void *vsc)
1995{
1996	struct txp_softc *sc;
1997	struct ifnet *ifp;
1998	struct txp_rsp_desc *rsp;
1999	struct txp_ext_desc *ext;
2000	int link;
2001
2002	sc = vsc;
2003	TXP_LOCK_ASSERT(sc);
2004	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2005	    sc->sc_cdata.txp_hostvar_map,
2006	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2007	txp_rxbuf_reclaim(sc);
2008	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2009	    sc->sc_cdata.txp_hostvar_map,
2010	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2011
2012	ifp = sc->sc_ifp;
2013	rsp = NULL;
2014
2015	link = sc->sc_flags & TXP_FLAG_LINK;
2016	if (txp_ext_command(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
2017	    &rsp, TXP_CMD_WAIT))
2018		goto out;
2019	if (rsp->rsp_numdesc != 6)
2020		goto out;
2021	txp_stats_update(sc, rsp);
2022	if (link == 0 && (sc->sc_flags & TXP_FLAG_LINK) != 0) {
2023		ext = (struct txp_ext_desc *)(rsp + 1);
2024		/* Update baudrate with resolved speed. */
2025		if ((ext[5].ext_2 & 0x02) != 0)
2026			ifp->if_baudrate = IF_Mbps(100);
2027		else
2028			ifp->if_baudrate = IF_Mbps(10);
2029	}
2030
2031out:
2032	if (rsp != NULL)
2033		free(rsp, M_DEVBUF);
2034	txp_watchdog(sc);
2035	callout_reset(&sc->sc_tick, hz, txp_tick, sc);
2036}
2037
2038static void
2039txp_start(struct ifnet *ifp)
2040{
2041	struct txp_softc *sc;
2042
2043	sc = ifp->if_softc;
2044	TXP_LOCK(sc);
2045	txp_start_locked(ifp);
2046	TXP_UNLOCK(sc);
2047}
2048
2049static void
2050txp_start_locked(struct ifnet *ifp)
2051{
2052	struct txp_softc *sc;
2053	struct mbuf *m_head;
2054	int enq;
2055
2056	sc = ifp->if_softc;
2057	TXP_LOCK_ASSERT(sc);
2058
2059	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2060	   IFF_DRV_RUNNING || (sc->sc_flags & TXP_FLAG_LINK) == 0)
2061		return;
2062
2063	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
2064		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2065		if (m_head == NULL)
2066			break;
2067		/*
2068		 * Pack the data into the transmit ring. If we
2069		 * don't have room, set the OACTIVE flag and wait
2070		 * for the NIC to drain the ring.
2071		 * ATM only Hi-ring is used.
2072		 */
2073		if (txp_encap(sc, &sc->sc_txhir, &m_head)) {
2074			if (m_head == NULL)
2075				break;
2076			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2077			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2078			break;
2079		}
2080
2081		/*
2082		 * If there's a BPF listener, bounce a copy of this frame
2083		 * to him.
2084		 */
2085		ETHER_BPF_MTAP(ifp, m_head);
2086
2087		/* Send queued frame. */
2088		WRITE_REG(sc, sc->sc_txhir.r_reg,
2089		    TXP_IDX2OFFSET(sc->sc_txhir.r_prod));
2090	}
2091
2092	if (enq > 0) {
2093		/* Set a timeout in case the chip goes out to lunch. */
2094		sc->sc_watchdog_timer = TXP_TX_TIMEOUT;
2095	}
2096}
2097
2098static int
2099txp_encap(struct txp_softc *sc, struct txp_tx_ring *r, struct mbuf **m_head)
2100{
2101	struct txp_tx_desc *first_txd;
2102	struct txp_frag_desc *fxd;
2103	struct txp_swdesc *sd;
2104	struct mbuf *m;
2105	bus_dma_segment_t txsegs[TXP_MAXTXSEGS];
2106	int error, i, nsegs;
2107
2108	TXP_LOCK_ASSERT(sc);
2109
2110	M_ASSERTPKTHDR((*m_head));
2111
2112	m = *m_head;
2113	first_txd = r->r_desc + r->r_prod;
2114	sd = sc->sc_txd + r->r_prod;
2115
2116	error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_tx_tag, sd->sd_map,
2117	    *m_head, txsegs, &nsegs, 0);
2118	if (error == EFBIG) {
2119		m = m_collapse(*m_head, M_NOWAIT, TXP_MAXTXSEGS);
2120		if (m == NULL) {
2121			m_freem(*m_head);
2122			*m_head = NULL;
2123			return (ENOMEM);
2124		}
2125		*m_head = m;
2126		error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_tx_tag,
2127		    sd->sd_map, *m_head, txsegs, &nsegs, 0);
2128		if (error != 0) {
2129			m_freem(*m_head);
2130			*m_head = NULL;
2131			return (error);
2132		}
2133	} else if (error != 0)
2134		return (error);
2135	if (nsegs == 0) {
2136		m_freem(*m_head);
2137		*m_head = NULL;
2138		return (EIO);
2139	}
2140
2141	/* Check descriptor overrun. */
2142	if (r->r_cnt + nsegs >= TX_ENTRIES - TXP_TXD_RESERVED) {
2143		bus_dmamap_unload(sc->sc_cdata.txp_tx_tag, sd->sd_map);
2144		return (ENOBUFS);
2145	}
2146	bus_dmamap_sync(sc->sc_cdata.txp_tx_tag, sd->sd_map,
2147	    BUS_DMASYNC_PREWRITE);
2148	sd->sd_mbuf = m;
2149
2150	first_txd->tx_flags = TX_FLAGS_TYPE_DATA;
2151	first_txd->tx_numdesc = 0;
2152	first_txd->tx_addrlo = 0;
2153	first_txd->tx_addrhi = 0;
2154	first_txd->tx_totlen = 0;
2155	first_txd->tx_pflags = 0;
2156	r->r_cnt++;
2157	TXP_DESC_INC(r->r_prod, TX_ENTRIES);
2158
2159	/* Configure Tx IP/TCP/UDP checksum offload. */
2160	if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2161		first_txd->tx_pflags |= htole32(TX_PFLAGS_IPCKSUM);
2162#ifdef notyet
2163	/* XXX firmware bug. */
2164	if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2165		first_txd->tx_pflags |= htole32(TX_PFLAGS_TCPCKSUM);
2166	if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2167		first_txd->tx_pflags |= htole32(TX_PFLAGS_UDPCKSUM);
2168#endif
2169
2170	/* Configure VLAN hardware tag insertion. */
2171	if ((m->m_flags & M_VLANTAG) != 0)
2172		first_txd->tx_pflags |=
2173		    htole32(TX_PFLAGS_VLAN | TX_PFLAGS_PRIO |
2174		    (bswap16(m->m_pkthdr.ether_vtag) << TX_PFLAGS_VLANTAG_S));
2175
2176	for (i = 0; i < nsegs; i++) {
2177		fxd = (struct txp_frag_desc *)(r->r_desc + r->r_prod);
2178		fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG | TX_FLAGS_VALID;
2179		fxd->frag_rsvd1 = 0;
2180		fxd->frag_len = htole16(txsegs[i].ds_len);
2181		fxd->frag_addrhi = htole32(TXP_ADDR_HI(txsegs[i].ds_addr));
2182		fxd->frag_addrlo = htole32(TXP_ADDR_LO(txsegs[i].ds_addr));
2183		fxd->frag_rsvd2 = 0;
2184		first_txd->tx_numdesc++;
2185		r->r_cnt++;
2186		TXP_DESC_INC(r->r_prod, TX_ENTRIES);
2187	}
2188
2189	/* Lastly set valid flag. */
2190	first_txd->tx_flags |= TX_FLAGS_VALID;
2191
2192	/* Sync descriptors. */
2193	bus_dmamap_sync(r->r_tag, r->r_map,
2194	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2195
2196	return (0);
2197}
2198
2199/*
2200 * Handle simple commands sent to the typhoon
2201 */
2202static int
2203txp_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
2204    uint32_t in3, uint16_t *out1, uint32_t *out2, uint32_t *out3, int wait)
2205{
2206	struct txp_rsp_desc *rsp;
2207
2208	rsp = NULL;
2209	if (txp_ext_command(sc, id, in1, in2, in3, NULL, 0, &rsp, wait) != 0) {
2210		device_printf(sc->sc_dev, "command 0x%02x failed\n", id);
2211		return (-1);
2212	}
2213
2214	if (wait == TXP_CMD_NOWAIT)
2215		return (0);
2216
2217	KASSERT(rsp != NULL, ("rsp is NULL!\n"));
2218	if (out1 != NULL)
2219		*out1 = le16toh(rsp->rsp_par1);
2220	if (out2 != NULL)
2221		*out2 = le32toh(rsp->rsp_par2);
2222	if (out3 != NULL)
2223		*out3 = le32toh(rsp->rsp_par3);
2224	free(rsp, M_DEVBUF);
2225	return (0);
2226}
2227
2228static int
2229txp_ext_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
2230    uint32_t in3, struct txp_ext_desc *in_extp, uint8_t in_extn,
2231    struct txp_rsp_desc **rspp, int wait)
2232{
2233	struct txp_hostvar *hv;
2234	struct txp_cmd_desc *cmd;
2235	struct txp_ext_desc *ext;
2236	uint32_t idx, i;
2237	uint16_t seq;
2238	int error;
2239
2240	error = 0;
2241	hv = sc->sc_hostvar;
2242	if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) {
2243		device_printf(sc->sc_dev,
2244		    "%s : out of free cmd descriptors for command 0x%02x\n",
2245		    __func__, id);
2246		return (ENOBUFS);
2247	}
2248
2249	bus_dmamap_sync(sc->sc_cdata.txp_cmdring_tag,
2250	    sc->sc_cdata.txp_cmdring_map, BUS_DMASYNC_POSTWRITE);
2251	idx = sc->sc_cmdring.lastwrite;
2252	cmd = (struct txp_cmd_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx);
2253	bzero(cmd, sizeof(*cmd));
2254
2255	cmd->cmd_numdesc = in_extn;
2256	seq = sc->sc_seq++;
2257	cmd->cmd_seq = htole16(seq);
2258	cmd->cmd_id = htole16(id);
2259	cmd->cmd_par1 = htole16(in1);
2260	cmd->cmd_par2 = htole32(in2);
2261	cmd->cmd_par3 = htole32(in3);
2262	cmd->cmd_flags = CMD_FLAGS_TYPE_CMD |
2263	    (wait == TXP_CMD_WAIT ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID;
2264
2265	idx += sizeof(struct txp_cmd_desc);
2266	if (idx == sc->sc_cmdring.size)
2267		idx = 0;
2268
2269	for (i = 0; i < in_extn; i++) {
2270		ext = (struct txp_ext_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx);
2271		bcopy(in_extp, ext, sizeof(struct txp_ext_desc));
2272		in_extp++;
2273		idx += sizeof(struct txp_cmd_desc);
2274		if (idx == sc->sc_cmdring.size)
2275			idx = 0;
2276	}
2277
2278	sc->sc_cmdring.lastwrite = idx;
2279	bus_dmamap_sync(sc->sc_cdata.txp_cmdring_tag,
2280	    sc->sc_cdata.txp_cmdring_map, BUS_DMASYNC_PREWRITE);
2281	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2282	    sc->sc_cdata.txp_hostvar_map, BUS_DMASYNC_PREREAD |
2283	    BUS_DMASYNC_PREWRITE);
2284	WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite);
2285	TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
2286
2287	if (wait == TXP_CMD_NOWAIT)
2288		return (0);
2289
2290	for (i = 0; i < TXP_TIMEOUT; i++) {
2291		bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2292		    sc->sc_cdata.txp_hostvar_map, BUS_DMASYNC_POSTREAD |
2293		    BUS_DMASYNC_POSTWRITE);
2294		if (le32toh(hv->hv_resp_read_idx) !=
2295		    le32toh(hv->hv_resp_write_idx)) {
2296			error = txp_response(sc, id, seq, rspp);
2297			bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2298			    sc->sc_cdata.txp_hostvar_map,
2299			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2300			if (error != 0)
2301				return (error);
2302 			if (*rspp != NULL)
2303				break;
2304		}
2305		DELAY(50);
2306	}
2307	if (i == TXP_TIMEOUT) {
2308		device_printf(sc->sc_dev, "command 0x%02x timedout\n", id);
2309		error = ETIMEDOUT;
2310	}
2311
2312	return (error);
2313}
2314
2315static int
2316txp_response(struct txp_softc *sc, uint16_t id, uint16_t seq,
2317    struct txp_rsp_desc **rspp)
2318{
2319	struct txp_hostvar *hv;
2320	struct txp_rsp_desc *rsp;
2321	uint32_t ridx;
2322
2323	bus_dmamap_sync(sc->sc_cdata.txp_rspring_tag,
2324	    sc->sc_cdata.txp_rspring_map, BUS_DMASYNC_POSTREAD);
2325	hv = sc->sc_hostvar;
2326	ridx = le32toh(hv->hv_resp_read_idx);
2327	while (ridx != le32toh(hv->hv_resp_write_idx)) {
2328		rsp = (struct txp_rsp_desc *)(((uint8_t *)sc->sc_rspring.base) + ridx);
2329
2330		if (id == le16toh(rsp->rsp_id) &&
2331		    le16toh(rsp->rsp_seq) == seq) {
2332			*rspp = (struct txp_rsp_desc *)malloc(
2333			    sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1),
2334			    M_DEVBUF, M_NOWAIT);
2335			if (*rspp == NULL) {
2336				device_printf(sc->sc_dev,"%s : command 0x%02x "
2337				    "memory allocation failure\n",
2338				    __func__, id);
2339				return (ENOMEM);
2340			}
2341			txp_rsp_fixup(sc, rsp, *rspp);
2342			return (0);
2343		}
2344
2345		if ((rsp->rsp_flags & RSP_FLAGS_ERROR) != 0) {
2346			device_printf(sc->sc_dev,
2347			    "%s : command 0x%02x response error!\n", __func__,
2348			    le16toh(rsp->rsp_id));
2349			txp_rsp_fixup(sc, rsp, NULL);
2350			ridx = le32toh(hv->hv_resp_read_idx);
2351			continue;
2352		}
2353
2354		/*
2355		 * The following unsolicited responses are handled during
2356		 * processing of TXP_CMD_READ_STATISTICS which requires
2357		 * response. Driver abuses the command to detect media
2358		 * status change.
2359		 * TXP_CMD_FILTER_DEFINE is not an unsolicited response
2360		 * but we don't process response ring in interrupt handler
2361		 * so we have to ignore this command here, otherwise
2362		 * unknown command message would be printed.
2363		 */
2364		switch (le16toh(rsp->rsp_id)) {
2365		case TXP_CMD_CYCLE_STATISTICS:
2366		case TXP_CMD_FILTER_DEFINE:
2367			break;
2368		case TXP_CMD_MEDIA_STATUS_READ:
2369			if ((le16toh(rsp->rsp_par1) & 0x0800) == 0) {
2370				sc->sc_flags |= TXP_FLAG_LINK;
2371				if_link_state_change(sc->sc_ifp,
2372				    LINK_STATE_UP);
2373			} else {
2374				sc->sc_flags &= ~TXP_FLAG_LINK;
2375				if_link_state_change(sc->sc_ifp,
2376				    LINK_STATE_DOWN);
2377			}
2378			break;
2379		case TXP_CMD_HELLO_RESPONSE:
2380			/*
2381			 * Driver should repsond to hello message but
2382			 * TXP_CMD_READ_STATISTICS is issued for every
2383			 * hz, therefore there is no need to send an
2384			 * explicit command here.
2385			 */
2386			device_printf(sc->sc_dev, "%s : hello\n", __func__);
2387			break;
2388		default:
2389			device_printf(sc->sc_dev,
2390			    "%s : unknown command 0x%02x\n", __func__,
2391			    le16toh(rsp->rsp_id));
2392		}
2393		txp_rsp_fixup(sc, rsp, NULL);
2394		ridx = le32toh(hv->hv_resp_read_idx);
2395	}
2396
2397	return (0);
2398}
2399
2400static void
2401txp_rsp_fixup(struct txp_softc *sc, struct txp_rsp_desc *rsp,
2402    struct txp_rsp_desc *dst)
2403{
2404	struct txp_rsp_desc *src;
2405	struct txp_hostvar *hv;
2406	uint32_t i, ridx;
2407
2408	src = rsp;
2409	hv = sc->sc_hostvar;
2410	ridx = le32toh(hv->hv_resp_read_idx);
2411
2412	for (i = 0; i < rsp->rsp_numdesc + 1; i++) {
2413		if (dst != NULL)
2414			bcopy(src, dst++, sizeof(struct txp_rsp_desc));
2415		ridx += sizeof(struct txp_rsp_desc);
2416		if (ridx == sc->sc_rspring.size) {
2417			src = sc->sc_rspring.base;
2418			ridx = 0;
2419		} else
2420			src++;
2421		sc->sc_rspring.lastwrite = ridx;
2422	}
2423
2424	hv->hv_resp_read_idx = htole32(ridx);
2425}
2426
2427static int
2428txp_cmd_desc_numfree(struct txp_softc *sc)
2429{
2430	struct txp_hostvar *hv;
2431	struct txp_boot_record *br;
2432	uint32_t widx, ridx, nfree;
2433
2434	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2435	    sc->sc_cdata.txp_hostvar_map,
2436	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2437	hv = sc->sc_hostvar;
2438	br = sc->sc_boot;
2439	widx = sc->sc_cmdring.lastwrite;
2440	ridx = le32toh(hv->hv_cmd_read_idx);
2441
2442	if (widx == ridx) {
2443		/* Ring is completely free */
2444		nfree = le32toh(br->br_cmd_siz) - sizeof(struct txp_cmd_desc);
2445	} else {
2446		if (widx > ridx)
2447			nfree = le32toh(br->br_cmd_siz) -
2448			    (widx - ridx + sizeof(struct txp_cmd_desc));
2449		else
2450			nfree = ridx - widx - sizeof(struct txp_cmd_desc);
2451	}
2452
2453	return (nfree / sizeof(struct txp_cmd_desc));
2454}
2455
2456static int
2457txp_sleep(struct txp_softc *sc, int capenable)
2458{
2459	uint16_t events;
2460	int error;
2461
2462	events = 0;
2463	if ((capenable & IFCAP_WOL_MAGIC) != 0)
2464		events |= 0x01;
2465	error = txp_command(sc, TXP_CMD_ENABLE_WAKEUP_EVENTS, events, 0, 0,
2466	    NULL, NULL, NULL, TXP_CMD_NOWAIT);
2467	if (error == 0) {
2468		/* Goto sleep. */
2469		error = txp_command(sc, TXP_CMD_GOTO_SLEEP, 0, 0, 0, NULL,
2470		    NULL, NULL, TXP_CMD_NOWAIT);
2471		if (error == 0) {
2472			error = txp_wait(sc, STAT_SLEEPING);
2473			if (error != 0)
2474				device_printf(sc->sc_dev,
2475				    "unable to enter into sleep\n");
2476		}
2477	}
2478
2479	return (error);
2480}
2481
2482static void
2483txp_stop(struct txp_softc *sc)
2484{
2485	struct ifnet *ifp;
2486
2487	TXP_LOCK_ASSERT(sc);
2488	ifp = sc->sc_ifp;
2489
2490	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2491		return;
2492
2493	WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
2494	WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
2495
2496	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2497	sc->sc_flags &= ~TXP_FLAG_LINK;
2498
2499	callout_stop(&sc->sc_tick);
2500
2501	txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL,
2502	    TXP_CMD_NOWAIT);
2503	txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL,
2504	    TXP_CMD_NOWAIT);
2505	/* Save statistics for later use. */
2506	txp_stats_save(sc);
2507	/* Halt controller. */
2508	txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL,
2509	    TXP_CMD_NOWAIT);
2510
2511	if (txp_wait(sc, STAT_HALTED) != 0)
2512		device_printf(sc->sc_dev, "controller halt timedout!\n");
2513	/* Reclaim Tx/Rx buffers. */
2514	if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
2515	    TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
2516		txp_tx_reclaim(sc, &sc->sc_txhir);
2517	if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
2518	    TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
2519		txp_tx_reclaim(sc, &sc->sc_txlor);
2520	txp_rxring_empty(sc);
2521
2522	txp_init_rings(sc);
2523	/* Reset controller and make it reload sleep image. */
2524	txp_reset(sc);
2525	/* Let controller boot from sleep image. */
2526	if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0)
2527		device_printf(sc->sc_dev, "could not boot sleep image\n");
2528	txp_sleep(sc, 0);
2529}
2530
2531static void
2532txp_watchdog(struct txp_softc *sc)
2533{
2534	struct ifnet *ifp;
2535
2536	TXP_LOCK_ASSERT(sc);
2537
2538	if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer)
2539		return;
2540
2541	ifp = sc->sc_ifp;
2542	if_printf(ifp, "watchdog timeout -- resetting\n");
2543	ifp->if_oerrors++;
2544	txp_stop(sc);
2545	txp_init_locked(sc);
2546}
2547
2548static int
2549txp_ifmedia_upd(struct ifnet *ifp)
2550{
2551	struct txp_softc *sc = ifp->if_softc;
2552	struct ifmedia *ifm = &sc->sc_ifmedia;
2553	uint16_t new_xcvr;
2554
2555	TXP_LOCK(sc);
2556	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
2557		TXP_UNLOCK(sc);
2558		return (EINVAL);
2559	}
2560
2561	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) {
2562		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2563			new_xcvr = TXP_XCVR_10_FDX;
2564		else
2565			new_xcvr = TXP_XCVR_10_HDX;
2566	} else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
2567		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2568			new_xcvr = TXP_XCVR_100_FDX;
2569		else
2570			new_xcvr = TXP_XCVR_100_HDX;
2571	} else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
2572		new_xcvr = TXP_XCVR_AUTO;
2573	} else {
2574		TXP_UNLOCK(sc);
2575		return (EINVAL);
2576	}
2577
2578	/* nothing to do */
2579	if (sc->sc_xcvr == new_xcvr) {
2580		TXP_UNLOCK(sc);
2581		return (0);
2582	}
2583
2584	txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0,
2585	    NULL, NULL, NULL, TXP_CMD_NOWAIT);
2586	sc->sc_xcvr = new_xcvr;
2587	TXP_UNLOCK(sc);
2588
2589	return (0);
2590}
2591
2592static void
2593txp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2594{
2595	struct txp_softc *sc = ifp->if_softc;
2596	struct ifmedia *ifm = &sc->sc_ifmedia;
2597	uint16_t bmsr, bmcr, anar, anlpar;
2598
2599	ifmr->ifm_status = IFM_AVALID;
2600	ifmr->ifm_active = IFM_ETHER;
2601
2602	TXP_LOCK(sc);
2603	/* Check whether firmware is running. */
2604	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2605		goto bail;
2606	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
2607	    &bmsr, NULL, NULL, TXP_CMD_WAIT))
2608		goto bail;
2609	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
2610	    &bmsr, NULL, NULL, TXP_CMD_WAIT))
2611		goto bail;
2612
2613	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0,
2614	    &bmcr, NULL, NULL, TXP_CMD_WAIT))
2615		goto bail;
2616
2617	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0,
2618	    &anlpar, NULL, NULL, TXP_CMD_WAIT))
2619		goto bail;
2620
2621	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANAR, 0,
2622	    &anar, NULL, NULL, TXP_CMD_WAIT))
2623		goto bail;
2624	TXP_UNLOCK(sc);
2625
2626	if (bmsr & BMSR_LINK)
2627		ifmr->ifm_status |= IFM_ACTIVE;
2628
2629	if (bmcr & BMCR_ISO) {
2630		ifmr->ifm_active |= IFM_NONE;
2631		ifmr->ifm_status = 0;
2632		return;
2633	}
2634
2635	if (bmcr & BMCR_LOOP)
2636		ifmr->ifm_active |= IFM_LOOP;
2637
2638	if (bmcr & BMCR_AUTOEN) {
2639		if ((bmsr & BMSR_ACOMP) == 0) {
2640			ifmr->ifm_active |= IFM_NONE;
2641			return;
2642		}
2643
2644		anlpar &= anar;
2645		if (anlpar & ANLPAR_TX_FD)
2646			ifmr->ifm_active |= IFM_100_TX|IFM_FDX;
2647		else if (anlpar & ANLPAR_T4)
2648			ifmr->ifm_active |= IFM_100_T4;
2649		else if (anlpar & ANLPAR_TX)
2650			ifmr->ifm_active |= IFM_100_TX;
2651		else if (anlpar & ANLPAR_10_FD)
2652			ifmr->ifm_active |= IFM_10_T|IFM_FDX;
2653		else if (anlpar & ANLPAR_10)
2654			ifmr->ifm_active |= IFM_10_T;
2655		else
2656			ifmr->ifm_active |= IFM_NONE;
2657	} else
2658		ifmr->ifm_active = ifm->ifm_cur->ifm_media;
2659	return;
2660
2661bail:
2662	TXP_UNLOCK(sc);
2663	ifmr->ifm_active |= IFM_NONE;
2664	ifmr->ifm_status &= ~IFM_AVALID;
2665}
2666
2667#ifdef TXP_DEBUG
2668static void
2669txp_show_descriptor(void *d)
2670{
2671	struct txp_cmd_desc *cmd = d;
2672	struct txp_rsp_desc *rsp = d;
2673	struct txp_tx_desc *txd = d;
2674	struct txp_frag_desc *frgd = d;
2675
2676	switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) {
2677	case CMD_FLAGS_TYPE_CMD:
2678		/* command descriptor */
2679		printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2680		    cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
2681		    le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
2682		    le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
2683		break;
2684	case CMD_FLAGS_TYPE_RESP:
2685		/* response descriptor */
2686		printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2687		    rsp->rsp_flags, rsp->rsp_numdesc, le16toh(rsp->rsp_id),
2688		    le16toh(rsp->rsp_seq), le16toh(rsp->rsp_par1),
2689		    le32toh(rsp->rsp_par2), le32toh(rsp->rsp_par3));
2690		break;
2691	case CMD_FLAGS_TYPE_DATA:
2692		/* data header (assuming tx for now) */
2693		printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]",
2694		    txd->tx_flags, txd->tx_numdesc, le16toh(txd->tx_totlen),
2695		    le32toh(txd->tx_addrlo), le32toh(txd->tx_addrhi),
2696		    le32toh(txd->tx_pflags));
2697		break;
2698	case CMD_FLAGS_TYPE_FRAG:
2699		/* fragment descriptor */
2700		printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]",
2701		    frgd->frag_flags, frgd->frag_rsvd1, le16toh(frgd->frag_len),
2702		    le32toh(frgd->frag_addrlo), le32toh(frgd->frag_addrhi),
2703		    le32toh(frgd->frag_rsvd2));
2704		break;
2705	default:
2706		printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2707		    cmd->cmd_flags & CMD_FLAGS_TYPE_M,
2708		    cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
2709		    le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
2710		    le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
2711		break;
2712	}
2713}
2714#endif
2715
2716static void
2717txp_set_filter(struct txp_softc *sc)
2718{
2719	struct ifnet *ifp;
2720	uint32_t crc, mchash[2];
2721	uint16_t filter;
2722	struct ifmultiaddr *ifma;
2723	int mcnt;
2724
2725	TXP_LOCK_ASSERT(sc);
2726
2727	ifp = sc->sc_ifp;
2728	filter = TXP_RXFILT_DIRECT;
2729	if ((ifp->if_flags & IFF_BROADCAST) != 0)
2730		filter |= TXP_RXFILT_BROADCAST;
2731	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2732		if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2733			filter |= TXP_RXFILT_ALLMULTI;
2734		if ((ifp->if_flags & IFF_PROMISC) != 0)
2735			filter = TXP_RXFILT_PROMISC;
2736		goto setit;
2737	}
2738
2739	mchash[0] = mchash[1] = 0;
2740	mcnt = 0;
2741	if_maddr_rlock(ifp);
2742	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2743		if (ifma->ifma_addr->sa_family != AF_LINK)
2744			continue;
2745		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2746		    ifma->ifma_addr), ETHER_ADDR_LEN);
2747		crc &= 0x3f;
2748		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2749		mcnt++;
2750	}
2751	if_maddr_runlock(ifp);
2752
2753	if (mcnt > 0) {
2754		filter |= TXP_RXFILT_HASHMULTI;
2755		txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE, 2, mchash[0],
2756		    mchash[1], NULL, NULL, NULL, TXP_CMD_NOWAIT);
2757	}
2758
2759setit:
2760	txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0,
2761	    NULL, NULL, NULL, TXP_CMD_NOWAIT);
2762}
2763
2764static int
2765txp_set_capabilities(struct txp_softc *sc)
2766{
2767	struct ifnet *ifp;
2768	uint32_t rxcap, txcap;
2769
2770	TXP_LOCK_ASSERT(sc);
2771
2772	rxcap = txcap = 0;
2773	ifp = sc->sc_ifp;
2774	if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) {
2775		if ((ifp->if_hwassist & CSUM_IP) != 0)
2776			txcap |= OFFLOAD_IPCKSUM;
2777		if ((ifp->if_hwassist & CSUM_TCP) != 0)
2778			txcap |= OFFLOAD_TCPCKSUM;
2779		if ((ifp->if_hwassist & CSUM_UDP) != 0)
2780			txcap |= OFFLOAD_UDPCKSUM;
2781		rxcap = txcap;
2782	}
2783	if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
2784		rxcap &= ~(OFFLOAD_IPCKSUM | OFFLOAD_TCPCKSUM |
2785		    OFFLOAD_UDPCKSUM);
2786	if ((ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
2787		rxcap |= OFFLOAD_VLAN;
2788		txcap |= OFFLOAD_VLAN;
2789	}
2790
2791	/* Tell firmware new offload configuration. */
2792	return (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0, txcap, rxcap, NULL,
2793	    NULL, NULL, TXP_CMD_NOWAIT));
2794}
2795
2796static void
2797txp_stats_save(struct txp_softc *sc)
2798{
2799	struct txp_rsp_desc *rsp;
2800
2801	TXP_LOCK_ASSERT(sc);
2802
2803	rsp = NULL;
2804	if (txp_ext_command(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
2805	    &rsp, TXP_CMD_WAIT))
2806		goto out;
2807	if (rsp->rsp_numdesc != 6)
2808		goto out;
2809	txp_stats_update(sc, rsp);
2810out:
2811	if (rsp != NULL)
2812		free(rsp, M_DEVBUF);
2813	bcopy(&sc->sc_stats, &sc->sc_ostats, sizeof(struct txp_hw_stats));
2814}
2815
2816static void
2817txp_stats_update(struct txp_softc *sc, struct txp_rsp_desc *rsp)
2818{
2819	struct ifnet *ifp;
2820	struct txp_hw_stats *ostats, *stats;
2821	struct txp_ext_desc *ext;
2822
2823	TXP_LOCK_ASSERT(sc);
2824
2825	ifp = sc->sc_ifp;
2826	ext = (struct txp_ext_desc *)(rsp + 1);
2827	ostats = &sc->sc_ostats;
2828	stats = &sc->sc_stats;
2829	stats->tx_frames = ostats->tx_frames + le32toh(rsp->rsp_par2);
2830	stats->tx_bytes = ostats->tx_bytes + (uint64_t)le32toh(rsp->rsp_par3) +
2831	    ((uint64_t)le32toh(ext[0].ext_1) << 32);
2832	stats->tx_deferred = ostats->tx_deferred + le32toh(ext[0].ext_2);
2833	stats->tx_late_colls = ostats->tx_late_colls + le32toh(ext[0].ext_3);
2834	stats->tx_colls = ostats->tx_colls + le32toh(ext[0].ext_4);
2835	stats->tx_carrier_lost = ostats->tx_carrier_lost +
2836	    le32toh(ext[1].ext_1);
2837	stats->tx_multi_colls = ostats->tx_multi_colls +
2838	    le32toh(ext[1].ext_2);
2839	stats->tx_excess_colls = ostats->tx_excess_colls +
2840	    le32toh(ext[1].ext_3);
2841	stats->tx_fifo_underruns = ostats->tx_fifo_underruns +
2842	    le32toh(ext[1].ext_4);
2843	stats->tx_mcast_oflows = ostats->tx_mcast_oflows +
2844	    le32toh(ext[2].ext_1);
2845	stats->tx_filtered = ostats->tx_filtered + le32toh(ext[2].ext_2);
2846	stats->rx_frames = ostats->rx_frames + le32toh(ext[2].ext_3);
2847	stats->rx_bytes = ostats->rx_bytes + (uint64_t)le32toh(ext[2].ext_4) +
2848	    ((uint64_t)le32toh(ext[3].ext_1) << 32);
2849	stats->rx_fifo_oflows = ostats->rx_fifo_oflows + le32toh(ext[3].ext_2);
2850	stats->rx_badssd = ostats->rx_badssd + le32toh(ext[3].ext_3);
2851	stats->rx_crcerrs = ostats->rx_crcerrs + le32toh(ext[3].ext_4);
2852	stats->rx_lenerrs = ostats->rx_lenerrs + le32toh(ext[4].ext_1);
2853	stats->rx_bcast_frames = ostats->rx_bcast_frames +
2854	    le32toh(ext[4].ext_2);
2855	stats->rx_mcast_frames = ostats->rx_mcast_frames +
2856	    le32toh(ext[4].ext_3);
2857	stats->rx_oflows = ostats->rx_oflows + le32toh(ext[4].ext_4);
2858	stats->rx_filtered = ostats->rx_filtered + le32toh(ext[5].ext_1);
2859
2860	ifp->if_ierrors = stats->rx_fifo_oflows + stats->rx_badssd +
2861	    stats->rx_crcerrs + stats->rx_lenerrs + stats->rx_oflows;
2862	ifp->if_oerrors = stats->tx_deferred + stats->tx_carrier_lost +
2863	    stats->tx_fifo_underruns + stats->tx_mcast_oflows;
2864	ifp->if_collisions = stats->tx_late_colls + stats->tx_multi_colls +
2865	    stats->tx_excess_colls;
2866	ifp->if_opackets = stats->tx_frames;
2867	ifp->if_ipackets = stats->rx_frames;
2868}
2869
2870#define	TXP_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
2871	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2872
2873#if __FreeBSD_version >= 900030
2874#define	TXP_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
2875	    SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2876#elif __FreeBSD_version > 800000
2877#define	TXP_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
2878	    SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2879#else
2880#define	TXP_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
2881	    SYSCTL_ADD_ULONG(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2882#endif
2883
2884static void
2885txp_sysctl_node(struct txp_softc *sc)
2886{
2887	struct sysctl_ctx_list *ctx;
2888	struct sysctl_oid_list *child, *parent;
2889	struct sysctl_oid *tree;
2890	struct txp_hw_stats *stats;
2891	int error;
2892
2893	stats = &sc->sc_stats;
2894	ctx = device_get_sysctl_ctx(sc->sc_dev);
2895	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
2896	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
2897	    CTLTYPE_INT | CTLFLAG_RW, &sc->sc_process_limit, 0,
2898	    sysctl_hw_txp_proc_limit, "I",
2899	    "max number of Rx events to process");
2900	/* Pull in device tunables. */
2901	sc->sc_process_limit = TXP_PROC_DEFAULT;
2902	error = resource_int_value(device_get_name(sc->sc_dev),
2903	    device_get_unit(sc->sc_dev), "process_limit",
2904	    &sc->sc_process_limit);
2905	if (error == 0) {
2906		if (sc->sc_process_limit < TXP_PROC_MIN ||
2907		    sc->sc_process_limit > TXP_PROC_MAX) {
2908			device_printf(sc->sc_dev,
2909			    "process_limit value out of range; "
2910			    "using default: %d\n", TXP_PROC_DEFAULT);
2911			sc->sc_process_limit = TXP_PROC_DEFAULT;
2912		}
2913	}
2914	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
2915	    NULL, "TXP statistics");
2916	parent = SYSCTL_CHILDREN(tree);
2917
2918	/* Tx statistics. */
2919	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2920	    NULL, "Tx MAC statistics");
2921	child = SYSCTL_CHILDREN(tree);
2922
2923	TXP_SYSCTL_STAT_ADD32(ctx, child, "frames",
2924	    &stats->tx_frames, "Frames");
2925	TXP_SYSCTL_STAT_ADD64(ctx, child, "octets",
2926	    &stats->tx_bytes, "Octets");
2927	TXP_SYSCTL_STAT_ADD32(ctx, child, "deferred",
2928	    &stats->tx_deferred, "Deferred frames");
2929	TXP_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
2930	    &stats->tx_late_colls, "Late collisions");
2931	TXP_SYSCTL_STAT_ADD32(ctx, child, "colls",
2932	    &stats->tx_colls, "Collisions");
2933	TXP_SYSCTL_STAT_ADD32(ctx, child, "carrier_lost",
2934	    &stats->tx_carrier_lost, "Carrier lost");
2935	TXP_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
2936	    &stats->tx_multi_colls, "Multiple collisions");
2937	TXP_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
2938	    &stats->tx_excess_colls, "Excessive collisions");
2939	TXP_SYSCTL_STAT_ADD32(ctx, child, "fifo_underruns",
2940	    &stats->tx_fifo_underruns, "FIFO underruns");
2941	TXP_SYSCTL_STAT_ADD32(ctx, child, "mcast_oflows",
2942	    &stats->tx_mcast_oflows, "Multicast overflows");
2943	TXP_SYSCTL_STAT_ADD32(ctx, child, "filtered",
2944	    &stats->tx_filtered, "Filtered frames");
2945
2946	/* Rx statistics. */
2947	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2948	    NULL, "Rx MAC statistics");
2949	child = SYSCTL_CHILDREN(tree);
2950
2951	TXP_SYSCTL_STAT_ADD32(ctx, child, "frames",
2952	    &stats->rx_frames, "Frames");
2953	TXP_SYSCTL_STAT_ADD64(ctx, child, "octets",
2954	    &stats->rx_bytes, "Octets");
2955	TXP_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
2956	    &stats->rx_fifo_oflows, "FIFO overflows");
2957	TXP_SYSCTL_STAT_ADD32(ctx, child, "badssd",
2958	    &stats->rx_badssd, "Bad SSD");
2959	TXP_SYSCTL_STAT_ADD32(ctx, child, "crcerrs",
2960	    &stats->rx_crcerrs, "CRC errors");
2961	TXP_SYSCTL_STAT_ADD32(ctx, child, "lenerrs",
2962	    &stats->rx_lenerrs, "Length errors");
2963	TXP_SYSCTL_STAT_ADD32(ctx, child, "bcast_frames",
2964	    &stats->rx_bcast_frames, "Broadcast frames");
2965	TXP_SYSCTL_STAT_ADD32(ctx, child, "mcast_frames",
2966	    &stats->rx_mcast_frames, "Multicast frames");
2967	TXP_SYSCTL_STAT_ADD32(ctx, child, "oflows",
2968	    &stats->rx_oflows, "Overflows");
2969	TXP_SYSCTL_STAT_ADD32(ctx, child, "filtered",
2970	    &stats->rx_filtered, "Filtered frames");
2971}
2972
2973#undef TXP_SYSCTL_STAT_ADD32
2974#undef TXP_SYSCTL_STAT_ADD64
2975
2976static int
2977sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2978{
2979	int error, value;
2980
2981	if (arg1 == NULL)
2982		return (EINVAL);
2983	value = *(int *)arg1;
2984	error = sysctl_handle_int(oidp, &value, 0, req);
2985	if (error || req->newptr == NULL)
2986		return (error);
2987	if (value < low || value > high)
2988		return (EINVAL);
2989        *(int *)arg1 = value;
2990
2991        return (0);
2992}
2993
2994static int
2995sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS)
2996{
2997	return (sysctl_int_range(oidp, arg1, arg2, req,
2998	    TXP_PROC_MIN, TXP_PROC_MAX));
2999}
3000