if_txp.c revision 272067
1/*	$OpenBSD: if_txp.c,v 1.48 2001/06/27 06:34:50 kjc Exp $	*/
2
3/*-
4 * Copyright (c) 2001
5 *	Jason L. Wright <jason@thought.net>, Theo de Raadt, and
6 *	Aaron Campbell <aaron@monkey.org>.  All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by Jason L. Wright,
19 *	Theo de Raadt and Aaron Campbell.
20 * 4. Neither the name of the author nor the names of any co-contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
34 * THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/dev/txp/if_txp.c 272067 2014-09-24 11:58:23Z glebius $");
39
40/*
41 * Driver for 3c990 (Typhoon) Ethernet ASIC
42 */
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/bus.h>
46#include <sys/endian.h>
47#include <sys/kernel.h>
48#include <sys/lock.h>
49#include <sys/malloc.h>
50#include <sys/mbuf.h>
51#include <sys/module.h>
52#include <sys/mutex.h>
53#include <sys/queue.h>
54#include <sys/rman.h>
55#include <sys/socket.h>
56#include <sys/sockio.h>
57#include <sys/sysctl.h>
58#include <sys/taskqueue.h>
59
60#include <net/bpf.h>
61#include <net/if.h>
62#include <net/if_var.h>
63#include <net/if_arp.h>
64#include <net/ethernet.h>
65#include <net/if_dl.h>
66#include <net/if_media.h>
67#include <net/if_types.h>
68#include <net/if_vlan_var.h>
69
70#include <netinet/in.h>
71#include <netinet/in_systm.h>
72#include <netinet/ip.h>
73
74#include <dev/mii/mii.h>
75
76#include <dev/pci/pcireg.h>
77#include <dev/pci/pcivar.h>
78
79#include <machine/bus.h>
80#include <machine/in_cksum.h>
81
82#include <dev/txp/if_txpreg.h>
83#include <dev/txp/3c990img.h>
84
85MODULE_DEPEND(txp, pci, 1, 1, 1);
86MODULE_DEPEND(txp, ether, 1, 1, 1);
87
88/*
89 * XXX Known Typhoon firmware issues.
90 *
91 * 1. It seems that firmware has Tx TCP/UDP checksum offloading bug.
92 *    The firmware hangs when it's told to compute TCP/UDP checksum.
93 *    I'm not sure whether the firmware requires special alignment to
94 *    do checksum offloading but datasheet says nothing about that.
95 * 2. Datasheet says nothing for maximum number of fragmented
96 *    descriptors supported. Experimentation shows up to 16 fragment
97 *    descriptors are supported in the firmware. For TSO case, upper
98 *    stack can send 64KB sized IP datagram plus link header size(
99 *    ethernet header + VLAN tag)  frame but controller can handle up
100 *    to 64KB frame given that PAGE_SIZE is 4KB(i.e. 16 * PAGE_SIZE).
101 *    Because frames that need TSO operation of hardware can be
102 *    larger than 64KB I disabled TSO capability. TSO operation for
103 *    less than or equal to 16 fragment descriptors works without
104 *    problems, though.
105 * 3. VLAN hardware tag stripping is always enabled in the firmware
106 *    even if it's explicitly told to not strip the tag. It's
107 *    possible to add the tag back in Rx handler if VLAN hardware
108 *    tag is not active but I didn't try that as it would be
109 *    layering violation.
110 * 4. TXP_CMD_RECV_BUFFER_CONTROL does not work as expected in
111 *    datasheet such that driver should handle the alignment
112 *    restriction by copying received frame to align the frame on
113 *    32bit boundary on strict-alignment architectures. This adds a
114 *    lot of CPU burden and it effectively reduce Rx performance on
115 *    strict-alignment architectures(e.g. sparc64, arm and mips).
116 *
117 * Unfortunately it seems that 3Com have no longer interests in
118 * releasing fixed firmware so we may have to live with these bugs.
119 */
120
121#define	TXP_CSUM_FEATURES	(CSUM_IP)
122
123/*
124 * Various supported device vendors/types and their names.
125 */
126static struct txp_type txp_devs[] = {
127	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_95,
128	    "3Com 3cR990-TX-95 Etherlink with 3XP Processor" },
129	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_97,
130	    "3Com 3cR990-TX-97 Etherlink with 3XP Processor" },
131	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_TXM,
132	    "3Com 3cR990B-TXM Etherlink with 3XP Processor" },
133	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_95,
134	    "3Com 3cR990-SRV-95 Etherlink Server with 3XP Processor" },
135	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_97,
136	    "3Com 3cR990-SRV-97 Etherlink Server with 3XP Processor" },
137	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_SRV,
138	    "3Com 3cR990B-SRV Etherlink Server with 3XP Processor" },
139	{ 0, 0, NULL }
140};
141
142static int txp_probe(device_t);
143static int txp_attach(device_t);
144static int txp_detach(device_t);
145static int txp_shutdown(device_t);
146static int txp_suspend(device_t);
147static int txp_resume(device_t);
148static int txp_intr(void *);
149static void txp_int_task(void *, int);
150static void txp_tick(void *);
151static int txp_ioctl(struct ifnet *, u_long, caddr_t);
152static uint64_t txp_get_counter(struct ifnet *, ift_counter);
153static void txp_start(struct ifnet *);
154static void txp_start_locked(struct ifnet *);
155static int txp_encap(struct txp_softc *, struct txp_tx_ring *, struct mbuf **);
156static void txp_stop(struct txp_softc *);
157static void txp_init(void *);
158static void txp_init_locked(struct txp_softc *);
159static void txp_watchdog(struct txp_softc *);
160
161static int txp_reset(struct txp_softc *);
162static int txp_boot(struct txp_softc *, uint32_t);
163static int txp_sleep(struct txp_softc *, int);
164static int txp_wait(struct txp_softc *, uint32_t);
165static int txp_download_fw(struct txp_softc *);
166static int txp_download_fw_wait(struct txp_softc *);
167static int txp_download_fw_section(struct txp_softc *,
168    struct txp_fw_section_header *, int);
169static int txp_alloc_rings(struct txp_softc *);
170static void txp_init_rings(struct txp_softc *);
171static int txp_dma_alloc(struct txp_softc *, char *, bus_dma_tag_t *,
172    bus_size_t, bus_size_t, bus_dmamap_t *, void **, bus_size_t, bus_addr_t *);
173static void txp_dma_free(struct txp_softc *, bus_dma_tag_t *, bus_dmamap_t,
174    void **, bus_addr_t *);
175static void txp_free_rings(struct txp_softc *);
176static int txp_rxring_fill(struct txp_softc *);
177static void txp_rxring_empty(struct txp_softc *);
178static void txp_set_filter(struct txp_softc *);
179
180static int txp_cmd_desc_numfree(struct txp_softc *);
181static int txp_command(struct txp_softc *, uint16_t, uint16_t, uint32_t,
182    uint32_t, uint16_t *, uint32_t *, uint32_t *, int);
183static int txp_ext_command(struct txp_softc *, uint16_t, uint16_t,
184    uint32_t, uint32_t, struct txp_ext_desc *, uint8_t,
185    struct txp_rsp_desc **, int);
186static int txp_response(struct txp_softc *, uint16_t, uint16_t,
187    struct txp_rsp_desc **);
188static void txp_rsp_fixup(struct txp_softc *, struct txp_rsp_desc *,
189    struct txp_rsp_desc *);
190static int txp_set_capabilities(struct txp_softc *);
191
192static void txp_ifmedia_sts(struct ifnet *, struct ifmediareq *);
193static int txp_ifmedia_upd(struct ifnet *);
194#ifdef TXP_DEBUG
195static void txp_show_descriptor(void *);
196#endif
197static void txp_tx_reclaim(struct txp_softc *, struct txp_tx_ring *);
198static void txp_rxbuf_reclaim(struct txp_softc *);
199#ifndef __NO_STRICT_ALIGNMENT
200static __inline void txp_fixup_rx(struct mbuf *);
201#endif
202static int txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *, int);
203static void txp_stats_save(struct txp_softc *);
204static void txp_stats_update(struct txp_softc *, struct txp_rsp_desc *);
205static void txp_sysctl_node(struct txp_softc *);
206static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
207static int sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS);
208
209static int prefer_iomap = 0;
210TUNABLE_INT("hw.txp.prefer_iomap", &prefer_iomap);
211
212static device_method_t txp_methods[] = {
213        /* Device interface */
214	DEVMETHOD(device_probe,		txp_probe),
215	DEVMETHOD(device_attach,	txp_attach),
216	DEVMETHOD(device_detach,	txp_detach),
217	DEVMETHOD(device_shutdown,	txp_shutdown),
218	DEVMETHOD(device_suspend,	txp_suspend),
219	DEVMETHOD(device_resume,	txp_resume),
220
221	{ NULL, NULL }
222};
223
224static driver_t txp_driver = {
225	"txp",
226	txp_methods,
227	sizeof(struct txp_softc)
228};
229
230static devclass_t txp_devclass;
231
232DRIVER_MODULE(txp, pci, txp_driver, txp_devclass, 0, 0);
233
234static int
235txp_probe(device_t dev)
236{
237	struct txp_type *t;
238
239	t = txp_devs;
240
241	while (t->txp_name != NULL) {
242		if ((pci_get_vendor(dev) == t->txp_vid) &&
243		    (pci_get_device(dev) == t->txp_did)) {
244			device_set_desc(dev, t->txp_name);
245			return (BUS_PROBE_DEFAULT);
246		}
247		t++;
248	}
249
250	return (ENXIO);
251}
252
253static int
254txp_attach(device_t dev)
255{
256	struct txp_softc *sc;
257	struct ifnet *ifp;
258	struct txp_rsp_desc *rsp;
259	uint16_t p1;
260	uint32_t p2, reg;
261	int error = 0, pmc, rid;
262	uint8_t eaddr[ETHER_ADDR_LEN], *ver;
263
264	sc = device_get_softc(dev);
265	sc->sc_dev = dev;
266
267	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
268	    MTX_DEF);
269	callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
270	TASK_INIT(&sc->sc_int_task, 0, txp_int_task, sc);
271	TAILQ_INIT(&sc->sc_busy_list);
272	TAILQ_INIT(&sc->sc_free_list);
273
274	ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts);
275	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
276	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX, 0, NULL);
277	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
278	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
279	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX, 0, NULL);
280	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
281	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
282
283	pci_enable_busmaster(dev);
284	/* Prefer memory space register mapping over IO space. */
285	if (prefer_iomap == 0) {
286		sc->sc_res_id = PCIR_BAR(1);
287		sc->sc_res_type = SYS_RES_MEMORY;
288	} else {
289		sc->sc_res_id = PCIR_BAR(0);
290		sc->sc_res_type = SYS_RES_IOPORT;
291	}
292	sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
293	    &sc->sc_res_id, RF_ACTIVE);
294	if (sc->sc_res == NULL && prefer_iomap == 0) {
295		sc->sc_res_id = PCIR_BAR(0);
296		sc->sc_res_type = SYS_RES_IOPORT;
297		sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
298		    &sc->sc_res_id, RF_ACTIVE);
299	}
300	if (sc->sc_res == NULL) {
301		device_printf(dev, "couldn't map ports/memory\n");
302		ifmedia_removeall(&sc->sc_ifmedia);
303		mtx_destroy(&sc->sc_mtx);
304		return (ENXIO);
305	}
306
307	/* Enable MWI. */
308	reg = pci_read_config(dev, PCIR_COMMAND, 2);
309	reg |= PCIM_CMD_MWRICEN;
310	pci_write_config(dev, PCIR_COMMAND, reg, 2);
311	/* Check cache line size. */
312	reg = pci_read_config(dev, PCIR_CACHELNSZ, 1);
313	reg <<= 4;
314	if (reg == 0 || (reg % 16) != 0)
315		device_printf(sc->sc_dev,
316		    "invalid cache line size : %u\n", reg);
317
318	/* Allocate interrupt */
319	rid = 0;
320	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
321	    RF_SHAREABLE | RF_ACTIVE);
322
323	if (sc->sc_irq == NULL) {
324		device_printf(dev, "couldn't map interrupt\n");
325		error = ENXIO;
326		goto fail;
327	}
328
329	if ((error = txp_alloc_rings(sc)) != 0)
330		goto fail;
331	txp_init_rings(sc);
332	txp_sysctl_node(sc);
333	/* Reset controller and make it reload sleep image. */
334	if (txp_reset(sc) != 0) {
335		error = ENXIO;
336		goto fail;
337	}
338
339	/* Let controller boot from sleep image. */
340	if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) {
341		device_printf(sc->sc_dev, "could not boot sleep image\n");
342		error = ENXIO;
343		goto fail;
344	}
345
346	/* Get station address. */
347	if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0,
348	    &p1, &p2, NULL, TXP_CMD_WAIT)) {
349		error = ENXIO;
350		goto fail;
351	}
352
353	p1 = le16toh(p1);
354	eaddr[0] = ((uint8_t *)&p1)[1];
355	eaddr[1] = ((uint8_t *)&p1)[0];
356	p2 = le32toh(p2);
357	eaddr[2] = ((uint8_t *)&p2)[3];
358	eaddr[3] = ((uint8_t *)&p2)[2];
359	eaddr[4] = ((uint8_t *)&p2)[1];
360	eaddr[5] = ((uint8_t *)&p2)[0];
361
362	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
363	if (ifp == NULL) {
364		device_printf(dev, "can not allocate ifnet structure\n");
365		error = ENOSPC;
366		goto fail;
367	}
368
369	/*
370	 * Show sleep image version information which may help to
371	 * diagnose sleep image specific issues.
372	 */
373	rsp = NULL;
374	if (txp_ext_command(sc, TXP_CMD_READ_VERSION, 0, 0, 0, NULL, 0,
375	    &rsp, TXP_CMD_WAIT)) {
376		device_printf(dev, "can not read sleep image version\n");
377		error = ENXIO;
378		goto fail;
379	}
380	if (rsp->rsp_numdesc == 0) {
381		p2 = le32toh(rsp->rsp_par2) & 0xFFFF;
382		device_printf(dev, "Typhoon 1.0 sleep image (2000/%02u/%02u)\n",
383		    p2 >> 8, p2 & 0xFF);
384	} else if (rsp->rsp_numdesc == 2) {
385		p2 = le32toh(rsp->rsp_par2);
386		ver = (uint8_t *)(rsp + 1);
387		/*
388		 * Even if datasheet says the command returns a NULL
389		 * terminated version string, explicitly terminate
390		 * the string. Given that several bugs of firmware
391		 * I can't trust this simple one.
392		 */
393		ver[25] = '\0';
394		device_printf(dev,
395		    "Typhoon 1.1+ sleep image %02u.%03u.%03u %s\n",
396		    p2 >> 24, (p2 >> 12) & 0xFFF, p2 & 0xFFF, ver);
397	} else {
398		p2 = le32toh(rsp->rsp_par2);
399		device_printf(dev,
400		    "Unknown Typhoon sleep image version: %u:0x%08x\n",
401		    rsp->rsp_numdesc, p2);
402	}
403	if (rsp != NULL)
404		free(rsp, M_DEVBUF);
405
406	sc->sc_xcvr = TXP_XCVR_AUTO;
407	txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0,
408	    NULL, NULL, NULL, TXP_CMD_NOWAIT);
409	ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO);
410
411	ifp->if_softc = sc;
412	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
413	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
414	ifp->if_ioctl = txp_ioctl;
415	ifp->if_start = txp_start;
416	ifp->if_init = txp_init;
417	ifp->if_get_counter = txp_get_counter;
418	ifp->if_snd.ifq_drv_maxlen = TX_ENTRIES - 1;
419	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
420	IFQ_SET_READY(&ifp->if_snd);
421	/*
422	 * It's possible to read firmware's offload capability but
423	 * we have not downloaded the firmware yet so announce
424	 * working capability here. We're not interested in IPSec
425	 * capability and due to the lots of firmware bug we can't
426	 * advertise the whole capability anyway.
427	 */
428	ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TXCSUM;
429	if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
430		ifp->if_capabilities |= IFCAP_WOL_MAGIC;
431	/* Enable all capabilities. */
432	ifp->if_capenable = ifp->if_capabilities;
433
434	ether_ifattach(ifp, eaddr);
435
436	/* VLAN capability setup. */
437	ifp->if_capabilities |= IFCAP_VLAN_MTU;
438	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
439	ifp->if_capenable = ifp->if_capabilities;
440	/* Tell the upper layer(s) we support long frames. */
441	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
442
443	WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
444	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
445
446	/* Create local taskq. */
447	sc->sc_tq = taskqueue_create_fast("txp_taskq", M_WAITOK,
448	    taskqueue_thread_enqueue, &sc->sc_tq);
449	if (sc->sc_tq == NULL) {
450		device_printf(dev, "could not create taskqueue.\n");
451		ether_ifdetach(ifp);
452		error = ENXIO;
453		goto fail;
454	}
455	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
456	    device_get_nameunit(sc->sc_dev));
457
458	/* Put controller into sleep. */
459	if (txp_sleep(sc, 0) != 0) {
460		ether_ifdetach(ifp);
461		error = ENXIO;
462		goto fail;
463	}
464
465	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
466	    txp_intr, NULL, sc, &sc->sc_intrhand);
467
468	if (error != 0) {
469		ether_ifdetach(ifp);
470		device_printf(dev, "couldn't set up interrupt handler.\n");
471		goto fail;
472	}
473
474	return (0);
475
476fail:
477	if (error != 0)
478		txp_detach(dev);
479	return (error);
480}
481
482static int
483txp_detach(device_t dev)
484{
485	struct txp_softc *sc;
486	struct ifnet *ifp;
487
488	sc = device_get_softc(dev);
489
490	ifp = sc->sc_ifp;
491	if (device_is_attached(dev)) {
492		TXP_LOCK(sc);
493		sc->sc_flags |= TXP_FLAG_DETACH;
494		txp_stop(sc);
495		TXP_UNLOCK(sc);
496		callout_drain(&sc->sc_tick);
497		taskqueue_drain(sc->sc_tq, &sc->sc_int_task);
498		ether_ifdetach(ifp);
499	}
500	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
501
502	ifmedia_removeall(&sc->sc_ifmedia);
503	if (sc->sc_intrhand != NULL)
504		bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
505	if (sc->sc_irq != NULL)
506		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
507	if (sc->sc_res != NULL)
508		bus_release_resource(dev, sc->sc_res_type, sc->sc_res_id,
509		    sc->sc_res);
510	if (sc->sc_ifp != NULL) {
511		if_free(sc->sc_ifp);
512		sc->sc_ifp = NULL;
513	}
514	txp_free_rings(sc);
515	mtx_destroy(&sc->sc_mtx);
516
517	return (0);
518}
519
520static int
521txp_reset(struct txp_softc *sc)
522{
523	uint32_t r;
524	int i;
525
526	/* Disable interrupts. */
527	WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
528	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
529	/* Ack all pending interrupts. */
530	WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
531
532	r = 0;
533	WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL);
534	DELAY(1000);
535	WRITE_REG(sc, TXP_SRR, 0);
536
537	/* Should wait max 6 seconds. */
538	for (i = 0; i < 6000; i++) {
539		r = READ_REG(sc, TXP_A2H_0);
540		if (r == STAT_WAITING_FOR_HOST_REQUEST)
541			break;
542		DELAY(1000);
543	}
544
545	if (r != STAT_WAITING_FOR_HOST_REQUEST)
546		device_printf(sc->sc_dev, "reset hung\n");
547
548	WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
549	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
550	WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
551
552	/*
553	 * Give more time to complete loading sleep image before
554	 * trying to boot from sleep image.
555	 */
556	DELAY(5000);
557
558	return (0);
559}
560
561static int
562txp_boot(struct txp_softc *sc, uint32_t state)
563{
564
565	/* See if it's waiting for boot, and try to boot it. */
566	if (txp_wait(sc, state) != 0) {
567		device_printf(sc->sc_dev, "not waiting for boot\n");
568		return (ENXIO);
569	}
570
571	WRITE_REG(sc, TXP_H2A_2, TXP_ADDR_HI(sc->sc_ldata.txp_boot_paddr));
572	TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
573	WRITE_REG(sc, TXP_H2A_1, TXP_ADDR_LO(sc->sc_ldata.txp_boot_paddr));
574	TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
575	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD);
576	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
577
578	/* See if it booted. */
579	if (txp_wait(sc, STAT_RUNNING) != 0) {
580		device_printf(sc->sc_dev, "firmware not running\n");
581		return (ENXIO);
582	}
583
584	/* Clear TX and CMD ring write registers. */
585	WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL);
586	TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
587	WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL);
588	TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
589	WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL);
590	TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
591	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL);
592	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
593
594	return (0);
595}
596
597static int
598txp_download_fw(struct txp_softc *sc)
599{
600	struct txp_fw_file_header *fileheader;
601	struct txp_fw_section_header *secthead;
602	int sect;
603	uint32_t error, ier, imr;
604
605	TXP_LOCK_ASSERT(sc);
606
607	error = 0;
608	ier = READ_REG(sc, TXP_IER);
609	WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0);
610
611	imr = READ_REG(sc, TXP_IMR);
612	WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0);
613
614	if (txp_wait(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) {
615		device_printf(sc->sc_dev, "not waiting for host request\n");
616		error = ETIMEDOUT;
617		goto fail;
618	}
619
620	/* Ack the status. */
621	WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
622
623	fileheader = (struct txp_fw_file_header *)tc990image;
624	if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) {
625		device_printf(sc->sc_dev, "firmware invalid magic\n");
626		goto fail;
627	}
628
629	/* Tell boot firmware to get ready for image. */
630	WRITE_REG(sc, TXP_H2A_1, le32toh(fileheader->addr));
631	TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
632	WRITE_REG(sc, TXP_H2A_2, le32toh(fileheader->hmac[0]));
633	TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
634	WRITE_REG(sc, TXP_H2A_3, le32toh(fileheader->hmac[1]));
635	TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
636	WRITE_REG(sc, TXP_H2A_4, le32toh(fileheader->hmac[2]));
637	TXP_BARRIER(sc, TXP_H2A_4, 4, BUS_SPACE_BARRIER_WRITE);
638	WRITE_REG(sc, TXP_H2A_5, le32toh(fileheader->hmac[3]));
639	TXP_BARRIER(sc, TXP_H2A_5, 4, BUS_SPACE_BARRIER_WRITE);
640	WRITE_REG(sc, TXP_H2A_6, le32toh(fileheader->hmac[4]));
641	TXP_BARRIER(sc, TXP_H2A_6, 4, BUS_SPACE_BARRIER_WRITE);
642	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE);
643	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
644
645	if (txp_download_fw_wait(sc)) {
646		device_printf(sc->sc_dev, "firmware wait failed, initial\n");
647		error = ETIMEDOUT;
648		goto fail;
649	}
650
651	secthead = (struct txp_fw_section_header *)(((uint8_t *)tc990image) +
652	    sizeof(struct txp_fw_file_header));
653
654	for (sect = 0; sect < le32toh(fileheader->nsections); sect++) {
655		if ((error = txp_download_fw_section(sc, secthead, sect)) != 0)
656			goto fail;
657		secthead = (struct txp_fw_section_header *)
658		    (((uint8_t *)secthead) + le32toh(secthead->nbytes) +
659		    sizeof(*secthead));
660	}
661
662	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE);
663	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
664
665	if (txp_wait(sc, STAT_WAITING_FOR_BOOT) != 0) {
666		device_printf(sc->sc_dev, "not waiting for boot\n");
667		error = ETIMEDOUT;
668		goto fail;
669	}
670
671fail:
672	WRITE_REG(sc, TXP_IER, ier);
673	WRITE_REG(sc, TXP_IMR, imr);
674
675	return (error);
676}
677
678static int
679txp_download_fw_wait(struct txp_softc *sc)
680{
681	uint32_t i;
682
683	TXP_LOCK_ASSERT(sc);
684
685	for (i = 0; i < TXP_TIMEOUT; i++) {
686		if ((READ_REG(sc, TXP_ISR) & TXP_INT_A2H_0) != 0)
687			break;
688		DELAY(50);
689	}
690
691	if (i == TXP_TIMEOUT) {
692		device_printf(sc->sc_dev, "firmware wait failed comm0\n");
693		return (ETIMEDOUT);
694	}
695
696	WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
697
698	if (READ_REG(sc, TXP_A2H_0) != STAT_WAITING_FOR_SEGMENT) {
699		device_printf(sc->sc_dev, "firmware not waiting for segment\n");
700		return (ETIMEDOUT);
701	}
702	return (0);
703}
704
705static int
706txp_download_fw_section(struct txp_softc *sc,
707    struct txp_fw_section_header *sect, int sectnum)
708{
709	bus_dma_tag_t sec_tag;
710	bus_dmamap_t sec_map;
711	bus_addr_t sec_paddr;
712	uint8_t *sec_buf;
713	int rseg, err = 0;
714	struct mbuf m;
715	uint16_t csum;
716
717	TXP_LOCK_ASSERT(sc);
718
719	/* Skip zero length sections. */
720	if (le32toh(sect->nbytes) == 0)
721		return (0);
722
723	/* Make sure we aren't past the end of the image. */
724	rseg = ((uint8_t *)sect) - ((uint8_t *)tc990image);
725	if (rseg >= sizeof(tc990image)) {
726		device_printf(sc->sc_dev,
727		    "firmware invalid section address, section %d\n", sectnum);
728		return (EIO);
729	}
730
731	/* Make sure this section doesn't go past the end. */
732	rseg += le32toh(sect->nbytes);
733	if (rseg >= sizeof(tc990image)) {
734		device_printf(sc->sc_dev, "firmware truncated section %d\n",
735		    sectnum);
736		return (EIO);
737	}
738
739	sec_tag = NULL;
740	sec_map = NULL;
741	sec_buf = NULL;
742	/* XXX */
743	TXP_UNLOCK(sc);
744	err = txp_dma_alloc(sc, "firmware sections", &sec_tag, sizeof(uint32_t),
745	    0, &sec_map, (void **)&sec_buf, le32toh(sect->nbytes), &sec_paddr);
746	TXP_LOCK(sc);
747	if (err != 0)
748		goto bail;
749	bcopy(((uint8_t *)sect) + sizeof(*sect), sec_buf,
750	    le32toh(sect->nbytes));
751
752	/*
753	 * dummy up mbuf and verify section checksum
754	 */
755	m.m_type = MT_DATA;
756	m.m_next = m.m_nextpkt = NULL;
757	m.m_len = le32toh(sect->nbytes);
758	m.m_data = sec_buf;
759	m.m_flags = 0;
760	csum = in_cksum(&m, le32toh(sect->nbytes));
761	if (csum != sect->cksum) {
762		device_printf(sc->sc_dev,
763		    "firmware section %d, bad cksum (expected 0x%x got 0x%x)\n",
764		    sectnum, le16toh(sect->cksum), csum);
765		err = EIO;
766		goto bail;
767	}
768
769	bus_dmamap_sync(sec_tag, sec_map, BUS_DMASYNC_PREWRITE);
770
771	WRITE_REG(sc, TXP_H2A_1, le32toh(sect->nbytes));
772	TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
773	WRITE_REG(sc, TXP_H2A_2, le16toh(sect->cksum));
774	TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
775	WRITE_REG(sc, TXP_H2A_3, le32toh(sect->addr));
776	TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
777	WRITE_REG(sc, TXP_H2A_4, TXP_ADDR_HI(sec_paddr));
778	TXP_BARRIER(sc, TXP_H2A_4, 4, BUS_SPACE_BARRIER_WRITE);
779	WRITE_REG(sc, TXP_H2A_5, TXP_ADDR_LO(sec_paddr));
780	TXP_BARRIER(sc, TXP_H2A_5, 4, BUS_SPACE_BARRIER_WRITE);
781	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE);
782	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
783
784	if (txp_download_fw_wait(sc)) {
785		device_printf(sc->sc_dev,
786		    "firmware wait failed, section %d\n", sectnum);
787		err = ETIMEDOUT;
788	}
789
790	bus_dmamap_sync(sec_tag, sec_map, BUS_DMASYNC_POSTWRITE);
791bail:
792	txp_dma_free(sc, &sec_tag, sec_map, (void **)&sec_buf, &sec_paddr);
793	return (err);
794}
795
796static int
797txp_intr(void *vsc)
798{
799	struct txp_softc *sc;
800	uint32_t status;
801
802	sc = vsc;
803	status = READ_REG(sc, TXP_ISR);
804	if ((status & TXP_INT_LATCH) == 0)
805		return (FILTER_STRAY);
806	WRITE_REG(sc, TXP_ISR, status);
807	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
808	taskqueue_enqueue(sc->sc_tq, &sc->sc_int_task);
809
810	return (FILTER_HANDLED);
811}
812
813static void
814txp_int_task(void *arg, int pending)
815{
816	struct txp_softc *sc;
817	struct ifnet *ifp;
818	struct txp_hostvar *hv;
819	uint32_t isr;
820	int more;
821
822	sc = (struct txp_softc *)arg;
823
824	TXP_LOCK(sc);
825	ifp = sc->sc_ifp;
826	hv = sc->sc_hostvar;
827	isr = READ_REG(sc, TXP_ISR);
828	if ((isr & TXP_INT_LATCH) != 0)
829		WRITE_REG(sc, TXP_ISR, isr);
830
831	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
832		bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
833		    sc->sc_cdata.txp_hostvar_map,
834		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
835		more = 0;
836		if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff))
837			more += txp_rx_reclaim(sc, &sc->sc_rxhir,
838			    sc->sc_process_limit);
839		if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff))
840			more += txp_rx_reclaim(sc, &sc->sc_rxlor,
841			    sc->sc_process_limit);
842		/*
843		 * XXX
844		 * It seems controller is not smart enough to handle
845		 * FIFO overflow conditions under heavy network load.
846		 * No matter how often new Rx buffers are passed to
847		 * controller the situation didn't change. Maybe
848		 * flow-control would be the only way to mitigate the
849		 * issue but firmware does not have commands that
850		 * control the threshold of emitting pause frames.
851		 */
852		if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx)
853			txp_rxbuf_reclaim(sc);
854		if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
855		    TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
856			txp_tx_reclaim(sc, &sc->sc_txhir);
857		if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
858		    TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
859			txp_tx_reclaim(sc, &sc->sc_txlor);
860		bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
861		    sc->sc_cdata.txp_hostvar_map,
862		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
863		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
864			txp_start_locked(sc->sc_ifp);
865		if (more != 0 || READ_REG(sc, TXP_ISR & TXP_INT_LATCH) != 0) {
866			taskqueue_enqueue(sc->sc_tq, &sc->sc_int_task);
867			TXP_UNLOCK(sc);
868			return;
869		}
870	}
871
872	/* Re-enable interrupts. */
873	WRITE_REG(sc, TXP_IMR, TXP_INTR_NONE);
874	TXP_UNLOCK(sc);
875}
876
877#ifndef __NO_STRICT_ALIGNMENT
878static __inline void
879txp_fixup_rx(struct mbuf *m)
880{
881	int i;
882	uint16_t *src, *dst;
883
884	src = mtod(m, uint16_t *);
885	dst = src - (TXP_RXBUF_ALIGN - ETHER_ALIGN) / sizeof *src;
886
887	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
888		*dst++ = *src++;
889
890	m->m_data -= TXP_RXBUF_ALIGN - ETHER_ALIGN;
891}
892#endif
893
894static int
895txp_rx_reclaim(struct txp_softc *sc, struct txp_rx_ring *r, int count)
896{
897	struct ifnet *ifp;
898	struct txp_rx_desc *rxd;
899	struct mbuf *m;
900	struct txp_rx_swdesc *sd;
901	uint32_t roff, woff, rx_stat, prog;
902
903	TXP_LOCK_ASSERT(sc);
904
905	ifp = sc->sc_ifp;
906
907	bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_POSTREAD |
908	    BUS_DMASYNC_POSTWRITE);
909
910	roff = le32toh(*r->r_roff);
911	woff = le32toh(*r->r_woff);
912	rxd = r->r_desc + roff / sizeof(struct txp_rx_desc);
913	for (prog = 0; roff != woff; prog++, count--) {
914		if (count <= 0)
915			break;
916		bcopy((u_long *)&rxd->rx_vaddrlo, &sd, sizeof(sd));
917		KASSERT(sd != NULL, ("%s: Rx desc ring corrupted", __func__));
918		bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
919		    BUS_DMASYNC_POSTREAD);
920		bus_dmamap_unload(sc->sc_cdata.txp_rx_tag, sd->sd_map);
921		m = sd->sd_mbuf;
922		KASSERT(m != NULL, ("%s: Rx buffer ring corrupted", __func__));
923		sd->sd_mbuf = NULL;
924		TAILQ_REMOVE(&sc->sc_busy_list, sd, sd_next);
925		TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
926		if ((rxd->rx_flags & RX_FLAGS_ERROR) != 0) {
927			if (bootverbose)
928				device_printf(sc->sc_dev, "Rx error %u\n",
929				    le32toh(rxd->rx_stat) & RX_ERROR_MASK);
930			m_freem(m);
931			goto next;
932		}
933
934		m->m_pkthdr.len = m->m_len = le16toh(rxd->rx_len);
935		m->m_pkthdr.rcvif = ifp;
936#ifndef __NO_STRICT_ALIGNMENT
937		txp_fixup_rx(m);
938#endif
939		rx_stat = le32toh(rxd->rx_stat);
940		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
941			if ((rx_stat & RX_STAT_IPCKSUMBAD) != 0)
942				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
943			else if ((rx_stat & RX_STAT_IPCKSUMGOOD) != 0)
944				m->m_pkthdr.csum_flags |=
945				    CSUM_IP_CHECKED|CSUM_IP_VALID;
946
947			if ((rx_stat & RX_STAT_TCPCKSUMGOOD) != 0 ||
948			    (rx_stat & RX_STAT_UDPCKSUMGOOD) != 0) {
949				m->m_pkthdr.csum_flags |=
950				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
951				m->m_pkthdr.csum_data = 0xffff;
952			}
953		}
954
955		/*
956		 * XXX
957		 * Typhoon has a firmware bug that VLAN tag is always
958		 * stripped out even if it is told to not remove the tag.
959		 * Therefore don't check if_capenable here.
960		 */
961		if (/* (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && */
962		    (rx_stat & RX_STAT_VLAN) != 0) {
963			m->m_pkthdr.ether_vtag =
964			    bswap16((le32toh(rxd->rx_vlan) >> 16));
965			m->m_flags |= M_VLANTAG;
966		}
967
968		TXP_UNLOCK(sc);
969		(*ifp->if_input)(ifp, m);
970		TXP_LOCK(sc);
971
972next:
973		roff += sizeof(struct txp_rx_desc);
974		if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) {
975			roff = 0;
976			rxd = r->r_desc;
977		} else
978			rxd++;
979		prog++;
980	}
981
982	if (prog == 0)
983		return (0);
984
985	bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_PREREAD |
986	    BUS_DMASYNC_PREWRITE);
987	*r->r_roff = le32toh(roff);
988
989	return (count > 0 ? 0 : EAGAIN);
990}
991
992static void
993txp_rxbuf_reclaim(struct txp_softc *sc)
994{
995	struct txp_hostvar *hv;
996	struct txp_rxbuf_desc *rbd;
997	struct txp_rx_swdesc *sd;
998	bus_dma_segment_t segs[1];
999	int nsegs, prod, prog;
1000	uint32_t cons;
1001
1002	TXP_LOCK_ASSERT(sc);
1003
1004	hv = sc->sc_hostvar;
1005	cons = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_read_idx));
1006	prod = sc->sc_rxbufprod;
1007	TXP_DESC_INC(prod, RXBUF_ENTRIES);
1008	if (prod == cons)
1009		return;
1010
1011	bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1012	    sc->sc_cdata.txp_rxbufs_map,
1013	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1014
1015	for (prog = 0; prod != cons; prog++) {
1016		sd = TAILQ_FIRST(&sc->sc_free_list);
1017		if (sd == NULL)
1018			break;
1019		rbd = sc->sc_rxbufs + prod;
1020		bcopy((u_long *)&rbd->rb_vaddrlo, &sd, sizeof(sd));
1021		sd->sd_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1022		if (sd->sd_mbuf == NULL)
1023			break;
1024		sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
1025#ifndef __NO_STRICT_ALIGNMENT
1026		m_adj(sd->sd_mbuf, TXP_RXBUF_ALIGN);
1027#endif
1028		if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_rx_tag,
1029		    sd->sd_map, sd->sd_mbuf, segs, &nsegs, 0) != 0) {
1030			m_freem(sd->sd_mbuf);
1031			sd->sd_mbuf = NULL;
1032			break;
1033		}
1034		KASSERT(nsegs == 1, ("%s : %d segments returned!", __func__,
1035		    nsegs));
1036		TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1037		TAILQ_INSERT_TAIL(&sc->sc_busy_list, sd, sd_next);
1038		bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1039		    BUS_DMASYNC_PREREAD);
1040		rbd->rb_paddrlo = htole32(TXP_ADDR_LO(segs[0].ds_addr));
1041		rbd->rb_paddrhi = htole32(TXP_ADDR_HI(segs[0].ds_addr));
1042		TXP_DESC_INC(prod, RXBUF_ENTRIES);
1043	}
1044
1045	if (prog == 0)
1046		return;
1047	bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1048	    sc->sc_cdata.txp_rxbufs_map,
1049	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1050	prod = (prod + RXBUF_ENTRIES - 1) % RXBUF_ENTRIES;
1051	sc->sc_rxbufprod = prod;
1052	hv->hv_rx_buf_write_idx = htole32(TXP_IDX2OFFSET(prod));
1053}
1054
1055/*
1056 * Reclaim mbufs and entries from a transmit ring.
1057 */
1058static void
1059txp_tx_reclaim(struct txp_softc *sc, struct txp_tx_ring *r)
1060{
1061	struct ifnet *ifp;
1062	uint32_t idx;
1063	uint32_t cons, cnt;
1064	struct txp_tx_desc *txd;
1065	struct txp_swdesc *sd;
1066
1067	TXP_LOCK_ASSERT(sc);
1068
1069	bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_POSTREAD |
1070	    BUS_DMASYNC_POSTWRITE);
1071	ifp = sc->sc_ifp;
1072	idx = TXP_OFFSET2IDX(le32toh(*(r->r_off)));
1073	cons = r->r_cons;
1074	cnt = r->r_cnt;
1075	txd = r->r_desc + cons;
1076	sd = sc->sc_txd + cons;
1077
1078	for (cnt = r->r_cnt; cons != idx && cnt > 0; cnt--) {
1079		if ((txd->tx_flags & TX_FLAGS_TYPE_M) == TX_FLAGS_TYPE_DATA) {
1080			if (sd->sd_mbuf != NULL) {
1081				bus_dmamap_sync(sc->sc_cdata.txp_tx_tag,
1082				    sd->sd_map, BUS_DMASYNC_POSTWRITE);
1083				bus_dmamap_unload(sc->sc_cdata.txp_tx_tag,
1084				    sd->sd_map);
1085				m_freem(sd->sd_mbuf);
1086				sd->sd_mbuf = NULL;
1087				txd->tx_addrlo = 0;
1088				txd->tx_addrhi = 0;
1089				txd->tx_flags = 0;
1090			}
1091		}
1092		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1093
1094		if (++cons == TX_ENTRIES) {
1095			txd = r->r_desc;
1096			cons = 0;
1097			sd = sc->sc_txd;
1098		} else {
1099			txd++;
1100			sd++;
1101		}
1102	}
1103
1104	bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_PREREAD |
1105	    BUS_DMASYNC_PREWRITE);
1106	r->r_cons = cons;
1107	r->r_cnt = cnt;
1108	if (cnt == 0)
1109		sc->sc_watchdog_timer = 0;
1110}
1111
1112static int
1113txp_shutdown(device_t dev)
1114{
1115
1116	return (txp_suspend(dev));
1117}
1118
1119static int
1120txp_suspend(device_t dev)
1121{
1122	struct txp_softc *sc;
1123	struct ifnet *ifp;
1124	uint8_t *eaddr;
1125	uint16_t p1;
1126	uint32_t p2;
1127	int pmc;
1128	uint16_t pmstat;
1129
1130	sc = device_get_softc(dev);
1131
1132	TXP_LOCK(sc);
1133	ifp = sc->sc_ifp;
1134	txp_stop(sc);
1135	txp_init_rings(sc);
1136	/* Reset controller and make it reload sleep image. */
1137	txp_reset(sc);
1138	/* Let controller boot from sleep image. */
1139	if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0)
1140		device_printf(sc->sc_dev, "couldn't boot sleep image\n");
1141
1142	/* Set station address. */
1143	eaddr = IF_LLADDR(sc->sc_ifp);
1144	p1 = 0;
1145	((uint8_t *)&p1)[1] = eaddr[0];
1146	((uint8_t *)&p1)[0] = eaddr[1];
1147	p1 = le16toh(p1);
1148	((uint8_t *)&p2)[3] = eaddr[2];
1149	((uint8_t *)&p2)[2] = eaddr[3];
1150	((uint8_t *)&p2)[1] = eaddr[4];
1151	((uint8_t *)&p2)[0] = eaddr[5];
1152	p2 = le32toh(p2);
1153	txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0, NULL, NULL,
1154	    NULL, TXP_CMD_WAIT);
1155	txp_set_filter(sc);
1156	WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
1157	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
1158	txp_sleep(sc, sc->sc_ifp->if_capenable);
1159	if (pci_find_cap(sc->sc_dev, PCIY_PMG, &pmc) == 0) {
1160		/* Request PME. */
1161		pmstat = pci_read_config(sc->sc_dev,
1162		    pmc + PCIR_POWER_STATUS, 2);
1163		pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1164		if ((ifp->if_capenable & IFCAP_WOL) != 0)
1165			pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1166		pci_write_config(sc->sc_dev,
1167		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1168	}
1169	TXP_UNLOCK(sc);
1170
1171	return (0);
1172}
1173
1174static int
1175txp_resume(device_t dev)
1176{
1177	struct txp_softc *sc;
1178	int pmc;
1179	uint16_t pmstat;
1180
1181	sc = device_get_softc(dev);
1182
1183	TXP_LOCK(sc);
1184	if (pci_find_cap(sc->sc_dev, PCIY_PMG, &pmc) == 0) {
1185		/* Disable PME and clear PME status. */
1186		pmstat = pci_read_config(sc->sc_dev,
1187		    pmc + PCIR_POWER_STATUS, 2);
1188		if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
1189			pmstat &= ~PCIM_PSTAT_PMEENABLE;
1190			pci_write_config(sc->sc_dev,
1191			    pmc + PCIR_POWER_STATUS, pmstat, 2);
1192		}
1193	}
1194	if ((sc->sc_ifp->if_flags & IFF_UP) != 0)
1195		txp_init_locked(sc);
1196	TXP_UNLOCK(sc);
1197
1198	return (0);
1199}
1200
1201struct txp_dmamap_arg {
1202	bus_addr_t	txp_busaddr;
1203};
1204
1205static void
1206txp_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1207{
1208	struct txp_dmamap_arg *ctx;
1209
1210	if (error != 0)
1211		return;
1212
1213	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1214
1215	ctx = (struct txp_dmamap_arg *)arg;
1216	ctx->txp_busaddr = segs[0].ds_addr;
1217}
1218
1219static int
1220txp_dma_alloc(struct txp_softc *sc, char *type, bus_dma_tag_t *tag,
1221    bus_size_t alignment, bus_size_t boundary, bus_dmamap_t *map, void **buf,
1222    bus_size_t size, bus_addr_t *paddr)
1223{
1224	struct txp_dmamap_arg ctx;
1225	int error;
1226
1227	/* Create DMA block tag. */
1228	error = bus_dma_tag_create(
1229	    sc->sc_cdata.txp_parent_tag,	/* parent */
1230	    alignment, boundary,	/* algnmnt, boundary */
1231	    BUS_SPACE_MAXADDR,		/* lowaddr */
1232	    BUS_SPACE_MAXADDR,		/* highaddr */
1233	    NULL, NULL,			/* filter, filterarg */
1234	    size,			/* maxsize */
1235	    1,				/* nsegments */
1236	    size,			/* maxsegsize */
1237	    0,				/* flags */
1238	    NULL, NULL,			/* lockfunc, lockarg */
1239	    tag);
1240	if (error != 0) {
1241		device_printf(sc->sc_dev,
1242		    "could not create DMA tag for %s.\n", type);
1243		return (error);
1244	}
1245
1246	*paddr = 0;
1247	/* Allocate DMA'able memory and load the DMA map. */
1248	error = bus_dmamem_alloc(*tag, buf, BUS_DMA_WAITOK | BUS_DMA_ZERO |
1249	    BUS_DMA_COHERENT, map);
1250	if (error != 0) {
1251		device_printf(sc->sc_dev,
1252		    "could not allocate DMA'able memory for %s.\n", type);
1253		return (error);
1254	}
1255
1256	ctx.txp_busaddr = 0;
1257	error = bus_dmamap_load(*tag, *map, *(uint8_t **)buf,
1258	    size, txp_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1259	if (error != 0 || ctx.txp_busaddr == 0) {
1260		device_printf(sc->sc_dev,
1261		    "could not load DMA'able memory for %s.\n", type);
1262		return (error);
1263	}
1264	*paddr = ctx.txp_busaddr;
1265
1266	return (0);
1267}
1268
1269static void
1270txp_dma_free(struct txp_softc *sc, bus_dma_tag_t *tag, bus_dmamap_t map,
1271    void **buf, bus_addr_t *paddr)
1272{
1273
1274	if (*tag != NULL) {
1275		if (*paddr != 0)
1276			bus_dmamap_unload(*tag, map);
1277		if (buf != NULL)
1278			bus_dmamem_free(*tag, *(uint8_t **)buf, map);
1279		*(uint8_t **)buf = NULL;
1280		*paddr = 0;
1281		bus_dma_tag_destroy(*tag);
1282		*tag = NULL;
1283	}
1284}
1285
1286static int
1287txp_alloc_rings(struct txp_softc *sc)
1288{
1289	struct txp_boot_record *boot;
1290	struct txp_ldata *ld;
1291	struct txp_swdesc *txd;
1292	struct txp_rxbuf_desc *rbd;
1293	struct txp_rx_swdesc *sd;
1294	int error, i;
1295
1296	ld = &sc->sc_ldata;
1297	boot = ld->txp_boot;
1298
1299	/* boot record */
1300	sc->sc_boot = boot;
1301
1302	/*
1303	 * Create parent ring/DMA block tag.
1304	 * Datasheet says that all ring addresses and descriptors
1305	 * support 64bits addressing. However the controller is
1306	 * known to have no support DAC so limit DMA address space
1307	 * to 32bits.
1308	 */
1309	error = bus_dma_tag_create(
1310	    bus_get_dma_tag(sc->sc_dev), /* parent */
1311	    1, 0,			/* algnmnt, boundary */
1312	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1313	    BUS_SPACE_MAXADDR,		/* highaddr */
1314	    NULL, NULL,			/* filter, filterarg */
1315	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1316	    0,				/* nsegments */
1317	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1318	    0,				/* flags */
1319	    NULL, NULL,			/* lockfunc, lockarg */
1320	    &sc->sc_cdata.txp_parent_tag);
1321	if (error != 0) {
1322		device_printf(sc->sc_dev, "could not create parent DMA tag.\n");
1323		return (error);
1324	}
1325
1326	/* Boot record. */
1327	error = txp_dma_alloc(sc, "boot record",
1328	    &sc->sc_cdata.txp_boot_tag, sizeof(uint32_t), 0,
1329	    &sc->sc_cdata.txp_boot_map, (void **)&sc->sc_ldata.txp_boot,
1330	    sizeof(struct txp_boot_record),
1331	    &sc->sc_ldata.txp_boot_paddr);
1332	if (error != 0)
1333		return (error);
1334	boot = sc->sc_ldata.txp_boot;
1335	sc->sc_boot = boot;
1336
1337	/* Host variables. */
1338	error = txp_dma_alloc(sc, "host variables",
1339	    &sc->sc_cdata.txp_hostvar_tag, sizeof(uint32_t), 0,
1340	    &sc->sc_cdata.txp_hostvar_map, (void **)&sc->sc_ldata.txp_hostvar,
1341	    sizeof(struct txp_hostvar),
1342	    &sc->sc_ldata.txp_hostvar_paddr);
1343	if (error != 0)
1344		return (error);
1345	boot->br_hostvar_lo =
1346	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_hostvar_paddr));
1347	boot->br_hostvar_hi =
1348	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_hostvar_paddr));
1349	sc->sc_hostvar = sc->sc_ldata.txp_hostvar;
1350
1351	/* Hi priority tx ring. */
1352	error = txp_dma_alloc(sc, "hi priority tx ring",
1353	    &sc->sc_cdata.txp_txhiring_tag, sizeof(struct txp_tx_desc), 0,
1354	    &sc->sc_cdata.txp_txhiring_map, (void **)&sc->sc_ldata.txp_txhiring,
1355	    sizeof(struct txp_tx_desc) * TX_ENTRIES,
1356	    &sc->sc_ldata.txp_txhiring_paddr);
1357	if (error != 0)
1358		return (error);
1359	boot->br_txhipri_lo =
1360	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_txhiring_paddr));
1361	boot->br_txhipri_hi =
1362	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_txhiring_paddr));
1363	boot->br_txhipri_siz =
1364	    htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
1365	sc->sc_txhir.r_tag = sc->sc_cdata.txp_txhiring_tag;
1366	sc->sc_txhir.r_map = sc->sc_cdata.txp_txhiring_map;
1367	sc->sc_txhir.r_reg = TXP_H2A_1;
1368	sc->sc_txhir.r_desc = sc->sc_ldata.txp_txhiring;
1369	sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0;
1370	sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx;
1371
1372	/* Low priority tx ring. */
1373	error = txp_dma_alloc(sc, "low priority tx ring",
1374	    &sc->sc_cdata.txp_txloring_tag, sizeof(struct txp_tx_desc), 0,
1375	    &sc->sc_cdata.txp_txloring_map, (void **)&sc->sc_ldata.txp_txloring,
1376	    sizeof(struct txp_tx_desc) * TX_ENTRIES,
1377	    &sc->sc_ldata.txp_txloring_paddr);
1378	if (error != 0)
1379		return (error);
1380	boot->br_txlopri_lo =
1381	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_txloring_paddr));
1382	boot->br_txlopri_hi =
1383	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_txloring_paddr));
1384	boot->br_txlopri_siz =
1385	    htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
1386	sc->sc_txlor.r_tag = sc->sc_cdata.txp_txloring_tag;
1387	sc->sc_txlor.r_map = sc->sc_cdata.txp_txloring_map;
1388	sc->sc_txlor.r_reg = TXP_H2A_3;
1389	sc->sc_txlor.r_desc = sc->sc_ldata.txp_txloring;
1390	sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0;
1391	sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx;
1392
1393	/* High priority rx ring. */
1394	error = txp_dma_alloc(sc, "hi priority rx ring",
1395	    &sc->sc_cdata.txp_rxhiring_tag,
1396	    roundup(sizeof(struct txp_rx_desc), 16), 0,
1397	    &sc->sc_cdata.txp_rxhiring_map, (void **)&sc->sc_ldata.txp_rxhiring,
1398	    sizeof(struct txp_rx_desc) * RX_ENTRIES,
1399	    &sc->sc_ldata.txp_rxhiring_paddr);
1400	if (error != 0)
1401		return (error);
1402	boot->br_rxhipri_lo =
1403	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxhiring_paddr));
1404	boot->br_rxhipri_hi =
1405	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxhiring_paddr));
1406	boot->br_rxhipri_siz =
1407	    htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
1408	sc->sc_rxhir.r_tag = sc->sc_cdata.txp_rxhiring_tag;
1409	sc->sc_rxhir.r_map = sc->sc_cdata.txp_rxhiring_map;
1410	sc->sc_rxhir.r_desc = sc->sc_ldata.txp_rxhiring;
1411	sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx;
1412	sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx;
1413
1414	/* Low priority rx ring. */
1415	error = txp_dma_alloc(sc, "low priority rx ring",
1416	    &sc->sc_cdata.txp_rxloring_tag,
1417	    roundup(sizeof(struct txp_rx_desc), 16), 0,
1418	    &sc->sc_cdata.txp_rxloring_map, (void **)&sc->sc_ldata.txp_rxloring,
1419	    sizeof(struct txp_rx_desc) * RX_ENTRIES,
1420	    &sc->sc_ldata.txp_rxloring_paddr);
1421	if (error != 0)
1422		return (error);
1423	boot->br_rxlopri_lo =
1424	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxloring_paddr));
1425	boot->br_rxlopri_hi =
1426	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxloring_paddr));
1427	boot->br_rxlopri_siz =
1428	    htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
1429	sc->sc_rxlor.r_tag = sc->sc_cdata.txp_rxloring_tag;
1430	sc->sc_rxlor.r_map = sc->sc_cdata.txp_rxloring_map;
1431	sc->sc_rxlor.r_desc = sc->sc_ldata.txp_rxloring;
1432	sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx;
1433	sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx;
1434
1435	/* Command ring. */
1436	error = txp_dma_alloc(sc, "command ring",
1437	    &sc->sc_cdata.txp_cmdring_tag, sizeof(struct txp_cmd_desc), 0,
1438	    &sc->sc_cdata.txp_cmdring_map, (void **)&sc->sc_ldata.txp_cmdring,
1439	    sizeof(struct txp_cmd_desc) * CMD_ENTRIES,
1440	    &sc->sc_ldata.txp_cmdring_paddr);
1441	if (error != 0)
1442		return (error);
1443	boot->br_cmd_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_cmdring_paddr));
1444	boot->br_cmd_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_cmdring_paddr));
1445	boot->br_cmd_siz = htole32(CMD_ENTRIES * sizeof(struct txp_cmd_desc));
1446	sc->sc_cmdring.base = sc->sc_ldata.txp_cmdring;
1447	sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc);
1448	sc->sc_cmdring.lastwrite = 0;
1449
1450	/* Response ring. */
1451	error = txp_dma_alloc(sc, "response ring",
1452	    &sc->sc_cdata.txp_rspring_tag, sizeof(struct txp_rsp_desc), 0,
1453	    &sc->sc_cdata.txp_rspring_map, (void **)&sc->sc_ldata.txp_rspring,
1454	    sizeof(struct txp_rsp_desc) * RSP_ENTRIES,
1455	    &sc->sc_ldata.txp_rspring_paddr);
1456	if (error != 0)
1457		return (error);
1458	boot->br_resp_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rspring_paddr));
1459	boot->br_resp_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rspring_paddr));
1460	boot->br_resp_siz = htole32(RSP_ENTRIES * sizeof(struct txp_rsp_desc));
1461	sc->sc_rspring.base = sc->sc_ldata.txp_rspring;
1462	sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc);
1463	sc->sc_rspring.lastwrite = 0;
1464
1465	/* Receive buffer ring. */
1466	error = txp_dma_alloc(sc, "receive buffer ring",
1467	    &sc->sc_cdata.txp_rxbufs_tag, sizeof(struct txp_rxbuf_desc), 0,
1468	    &sc->sc_cdata.txp_rxbufs_map, (void **)&sc->sc_ldata.txp_rxbufs,
1469	    sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES,
1470	    &sc->sc_ldata.txp_rxbufs_paddr);
1471	if (error != 0)
1472		return (error);
1473	boot->br_rxbuf_lo =
1474	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxbufs_paddr));
1475	boot->br_rxbuf_hi =
1476	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxbufs_paddr));
1477	boot->br_rxbuf_siz =
1478	    htole32(RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc));
1479	sc->sc_rxbufs = sc->sc_ldata.txp_rxbufs;
1480
1481	/* Zero ring. */
1482	error = txp_dma_alloc(sc, "zero buffer",
1483	    &sc->sc_cdata.txp_zero_tag, sizeof(uint32_t), 0,
1484	    &sc->sc_cdata.txp_zero_map, (void **)&sc->sc_ldata.txp_zero,
1485	    sizeof(uint32_t), &sc->sc_ldata.txp_zero_paddr);
1486	if (error != 0)
1487		return (error);
1488	boot->br_zero_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_zero_paddr));
1489	boot->br_zero_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_zero_paddr));
1490
1491	bus_dmamap_sync(sc->sc_cdata.txp_boot_tag, sc->sc_cdata.txp_boot_map,
1492	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1493
1494	/* Create Tx buffers. */
1495	error = bus_dma_tag_create(
1496	    sc->sc_cdata.txp_parent_tag,	/* parent */
1497	    1, 0,			/* algnmnt, boundary */
1498	    BUS_SPACE_MAXADDR,		/* lowaddr */
1499	    BUS_SPACE_MAXADDR,		/* highaddr */
1500	    NULL, NULL,			/* filter, filterarg */
1501	    MCLBYTES * TXP_MAXTXSEGS,	/* maxsize */
1502	    TXP_MAXTXSEGS,		/* nsegments */
1503	    MCLBYTES,			/* maxsegsize */
1504	    0,				/* flags */
1505	    NULL, NULL,			/* lockfunc, lockarg */
1506	    &sc->sc_cdata.txp_tx_tag);
1507	if (error != 0) {
1508		device_printf(sc->sc_dev, "could not create Tx DMA tag.\n");
1509		goto fail;
1510	}
1511
1512	/* Create tag for Rx buffers. */
1513	error = bus_dma_tag_create(
1514	    sc->sc_cdata.txp_parent_tag,	/* parent */
1515	    TXP_RXBUF_ALIGN, 0,		/* algnmnt, boundary */
1516	    BUS_SPACE_MAXADDR,		/* lowaddr */
1517	    BUS_SPACE_MAXADDR,		/* highaddr */
1518	    NULL, NULL,			/* filter, filterarg */
1519	    MCLBYTES,			/* maxsize */
1520	    1,				/* nsegments */
1521	    MCLBYTES,			/* maxsegsize */
1522	    0,				/* flags */
1523	    NULL, NULL,			/* lockfunc, lockarg */
1524	    &sc->sc_cdata.txp_rx_tag);
1525	if (error != 0) {
1526		device_printf(sc->sc_dev, "could not create Rx DMA tag.\n");
1527		goto fail;
1528	}
1529
1530	/* Create DMA maps for Tx buffers. */
1531	for (i = 0; i < TX_ENTRIES; i++) {
1532		txd = &sc->sc_txd[i];
1533		txd->sd_mbuf = NULL;
1534		txd->sd_map = NULL;
1535		error = bus_dmamap_create(sc->sc_cdata.txp_tx_tag, 0,
1536		    &txd->sd_map);
1537		if (error != 0) {
1538			device_printf(sc->sc_dev,
1539			    "could not create Tx dmamap.\n");
1540			goto fail;
1541		}
1542	}
1543
1544	/* Create DMA maps for Rx buffers. */
1545	for (i = 0; i < RXBUF_ENTRIES; i++) {
1546		sd = malloc(sizeof(struct txp_rx_swdesc), M_DEVBUF,
1547		    M_NOWAIT | M_ZERO);
1548		if (sd == NULL) {
1549			error = ENOMEM;
1550			goto fail;
1551		}
1552		/*
1553		 * The virtual address part of descriptor is not used
1554		 * by hardware so use that to save an ring entry. We
1555		 * need bcopy here otherwise the address wouldn't be
1556		 * valid on big-endian architectures.
1557		 */
1558		rbd = sc->sc_rxbufs + i;
1559		bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
1560		sd->sd_mbuf = NULL;
1561		sd->sd_map = NULL;
1562		error = bus_dmamap_create(sc->sc_cdata.txp_rx_tag, 0,
1563		    &sd->sd_map);
1564		if (error != 0) {
1565			device_printf(sc->sc_dev,
1566			    "could not create Rx dmamap.\n");
1567			goto fail;
1568		}
1569		TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
1570	}
1571
1572fail:
1573	return (error);
1574}
1575
1576static void
1577txp_init_rings(struct txp_softc *sc)
1578{
1579
1580	bzero(sc->sc_ldata.txp_hostvar, sizeof(struct txp_hostvar));
1581	bzero(sc->sc_ldata.txp_zero, sizeof(uint32_t));
1582	sc->sc_txhir.r_cons = 0;
1583	sc->sc_txhir.r_prod = 0;
1584	sc->sc_txhir.r_cnt = 0;
1585	sc->sc_txlor.r_cons = 0;
1586	sc->sc_txlor.r_prod = 0;
1587	sc->sc_txlor.r_cnt = 0;
1588	sc->sc_cmdring.lastwrite = 0;
1589	sc->sc_rspring.lastwrite = 0;
1590	sc->sc_rxbufprod = 0;
1591	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1592	    sc->sc_cdata.txp_hostvar_map,
1593	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1594}
1595
1596static int
1597txp_wait(struct txp_softc *sc, uint32_t state)
1598{
1599	uint32_t reg;
1600	int i;
1601
1602	for (i = 0; i < TXP_TIMEOUT; i++) {
1603		reg = READ_REG(sc, TXP_A2H_0);
1604		if (reg == state)
1605			break;
1606		DELAY(50);
1607	}
1608
1609	return (i == TXP_TIMEOUT ? ETIMEDOUT : 0);
1610}
1611
1612static void
1613txp_free_rings(struct txp_softc *sc)
1614{
1615	struct txp_swdesc *txd;
1616	struct txp_rx_swdesc *sd;
1617	int i;
1618
1619	/* Tx buffers. */
1620	if (sc->sc_cdata.txp_tx_tag != NULL) {
1621		for (i = 0; i < TX_ENTRIES; i++) {
1622			txd = &sc->sc_txd[i];
1623			if (txd->sd_map != NULL) {
1624				bus_dmamap_destroy(sc->sc_cdata.txp_tx_tag,
1625				    txd->sd_map);
1626				txd->sd_map = NULL;
1627			}
1628		}
1629		bus_dma_tag_destroy(sc->sc_cdata.txp_tx_tag);
1630		sc->sc_cdata.txp_tx_tag = NULL;
1631	}
1632	/* Rx buffers. */
1633	if (sc->sc_cdata.txp_rx_tag != NULL) {
1634		if (sc->sc_rxbufs != NULL) {
1635			KASSERT(TAILQ_FIRST(&sc->sc_busy_list) == NULL,
1636			    ("%s : still have busy Rx buffers", __func__));
1637			while ((sd = TAILQ_FIRST(&sc->sc_free_list)) != NULL) {
1638				TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1639				if (sd->sd_map != NULL) {
1640					bus_dmamap_destroy(
1641					    sc->sc_cdata.txp_rx_tag,
1642					    sd->sd_map);
1643					sd->sd_map = NULL;
1644				}
1645				free(sd, M_DEVBUF);
1646			}
1647		}
1648		bus_dma_tag_destroy(sc->sc_cdata.txp_rx_tag);
1649		sc->sc_cdata.txp_rx_tag = NULL;
1650	}
1651
1652	/* Hi priority Tx ring. */
1653	txp_dma_free(sc, &sc->sc_cdata.txp_txhiring_tag,
1654	    sc->sc_cdata.txp_txhiring_map,
1655	    (void **)&sc->sc_ldata.txp_txhiring,
1656	    &sc->sc_ldata.txp_txhiring_paddr);
1657	/* Low priority Tx ring. */
1658	txp_dma_free(sc, &sc->sc_cdata.txp_txloring_tag,
1659	    sc->sc_cdata.txp_txloring_map,
1660	    (void **)&sc->sc_ldata.txp_txloring,
1661	    &sc->sc_ldata.txp_txloring_paddr);
1662	/* Hi priority Rx ring. */
1663	txp_dma_free(sc, &sc->sc_cdata.txp_rxhiring_tag,
1664	    sc->sc_cdata.txp_rxhiring_map,
1665	    (void **)&sc->sc_ldata.txp_rxhiring,
1666	    &sc->sc_ldata.txp_rxhiring_paddr);
1667	/* Low priority Rx ring. */
1668	txp_dma_free(sc, &sc->sc_cdata.txp_rxloring_tag,
1669	    sc->sc_cdata.txp_rxloring_map,
1670	    (void **)&sc->sc_ldata.txp_rxloring,
1671	    &sc->sc_ldata.txp_rxloring_paddr);
1672	/* Receive buffer ring. */
1673	txp_dma_free(sc, &sc->sc_cdata.txp_rxbufs_tag,
1674	    sc->sc_cdata.txp_rxbufs_map, (void **)&sc->sc_ldata.txp_rxbufs,
1675	    &sc->sc_ldata.txp_rxbufs_paddr);
1676	/* Command ring. */
1677	txp_dma_free(sc, &sc->sc_cdata.txp_cmdring_tag,
1678	    sc->sc_cdata.txp_cmdring_map, (void **)&sc->sc_ldata.txp_cmdring,
1679	    &sc->sc_ldata.txp_cmdring_paddr);
1680	/* Response ring. */
1681	txp_dma_free(sc, &sc->sc_cdata.txp_rspring_tag,
1682	    sc->sc_cdata.txp_rspring_map, (void **)&sc->sc_ldata.txp_rspring,
1683	    &sc->sc_ldata.txp_rspring_paddr);
1684	/* Zero ring. */
1685	txp_dma_free(sc, &sc->sc_cdata.txp_zero_tag,
1686	    sc->sc_cdata.txp_zero_map, (void **)&sc->sc_ldata.txp_zero,
1687	    &sc->sc_ldata.txp_zero_paddr);
1688	/* Host variables. */
1689	txp_dma_free(sc, &sc->sc_cdata.txp_hostvar_tag,
1690	    sc->sc_cdata.txp_hostvar_map, (void **)&sc->sc_ldata.txp_hostvar,
1691	    &sc->sc_ldata.txp_hostvar_paddr);
1692	/* Boot record. */
1693	txp_dma_free(sc, &sc->sc_cdata.txp_boot_tag,
1694	    sc->sc_cdata.txp_boot_map, (void **)&sc->sc_ldata.txp_boot,
1695	    &sc->sc_ldata.txp_boot_paddr);
1696
1697	if (sc->sc_cdata.txp_parent_tag != NULL) {
1698		bus_dma_tag_destroy(sc->sc_cdata.txp_parent_tag);
1699		sc->sc_cdata.txp_parent_tag = NULL;
1700	}
1701
1702}
1703
1704static int
1705txp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1706{
1707	struct txp_softc *sc = ifp->if_softc;
1708	struct ifreq *ifr = (struct ifreq *)data;
1709	int capenable, error = 0, mask;
1710
1711	switch(command) {
1712	case SIOCSIFFLAGS:
1713		TXP_LOCK(sc);
1714		if ((ifp->if_flags & IFF_UP) != 0) {
1715			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1716				if (((ifp->if_flags ^ sc->sc_if_flags)
1717				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1718					txp_set_filter(sc);
1719			} else {
1720				if ((sc->sc_flags & TXP_FLAG_DETACH) == 0)
1721					txp_init_locked(sc);
1722			}
1723		} else {
1724			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1725				txp_stop(sc);
1726		}
1727		sc->sc_if_flags = ifp->if_flags;
1728		TXP_UNLOCK(sc);
1729		break;
1730	case SIOCADDMULTI:
1731	case SIOCDELMULTI:
1732		/*
1733		 * Multicast list has changed; set the hardware
1734		 * filter accordingly.
1735		 */
1736		TXP_LOCK(sc);
1737		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1738			txp_set_filter(sc);
1739		TXP_UNLOCK(sc);
1740		break;
1741	case SIOCSIFCAP:
1742		TXP_LOCK(sc);
1743		capenable = ifp->if_capenable;
1744		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1745		if ((mask & IFCAP_TXCSUM) != 0 &&
1746		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1747			ifp->if_capenable ^= IFCAP_TXCSUM;
1748			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1749				ifp->if_hwassist |= TXP_CSUM_FEATURES;
1750			else
1751				ifp->if_hwassist &= ~TXP_CSUM_FEATURES;
1752		}
1753		if ((mask & IFCAP_RXCSUM) != 0 &&
1754		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
1755			ifp->if_capenable ^= IFCAP_RXCSUM;
1756		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1757		    (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
1758			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1759		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1760		    (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0)
1761			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1762		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1763		    (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
1764			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1765		if ((ifp->if_capenable & IFCAP_TXCSUM) == 0)
1766			ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
1767		if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
1768			ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
1769		if (capenable != ifp->if_capenable)
1770			txp_set_capabilities(sc);
1771		TXP_UNLOCK(sc);
1772		VLAN_CAPABILITIES(ifp);
1773		break;
1774	case SIOCGIFMEDIA:
1775	case SIOCSIFMEDIA:
1776		error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command);
1777		break;
1778	default:
1779		error = ether_ioctl(ifp, command, data);
1780		break;
1781	}
1782
1783	return (error);
1784}
1785
1786static int
1787txp_rxring_fill(struct txp_softc *sc)
1788{
1789	struct txp_rxbuf_desc *rbd;
1790	struct txp_rx_swdesc *sd;
1791	bus_dma_segment_t segs[1];
1792	int error, i, nsegs;
1793
1794	TXP_LOCK_ASSERT(sc);
1795
1796	bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1797	    sc->sc_cdata.txp_rxbufs_map,
1798	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1799
1800	for (i = 0; i < RXBUF_ENTRIES; i++) {
1801		sd = TAILQ_FIRST(&sc->sc_free_list);
1802		if (sd == NULL)
1803			return (ENOMEM);
1804		rbd = sc->sc_rxbufs + i;
1805		bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
1806		KASSERT(sd->sd_mbuf == NULL,
1807		    ("%s : Rx buffer ring corrupted", __func__));
1808		sd->sd_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1809		if (sd->sd_mbuf == NULL)
1810			return (ENOMEM);
1811		sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
1812#ifndef __NO_STRICT_ALIGNMENT
1813		m_adj(sd->sd_mbuf, TXP_RXBUF_ALIGN);
1814#endif
1815		if ((error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_rx_tag,
1816		    sd->sd_map, sd->sd_mbuf, segs, &nsegs, 0)) != 0) {
1817			m_freem(sd->sd_mbuf);
1818			sd->sd_mbuf = NULL;
1819			return (error);
1820		}
1821		KASSERT(nsegs == 1, ("%s : %d segments returned!", __func__,
1822		    nsegs));
1823		TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1824		TAILQ_INSERT_TAIL(&sc->sc_busy_list, sd, sd_next);
1825		bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1826		    BUS_DMASYNC_PREREAD);
1827		rbd->rb_paddrlo = htole32(TXP_ADDR_LO(segs[0].ds_addr));
1828		rbd->rb_paddrhi = htole32(TXP_ADDR_HI(segs[0].ds_addr));
1829	}
1830
1831	bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1832	    sc->sc_cdata.txp_rxbufs_map,
1833	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1834	sc->sc_rxbufprod = RXBUF_ENTRIES - 1;
1835	sc->sc_hostvar->hv_rx_buf_write_idx =
1836	    htole32(TXP_IDX2OFFSET(RXBUF_ENTRIES - 1));
1837
1838	return (0);
1839}
1840
1841static void
1842txp_rxring_empty(struct txp_softc *sc)
1843{
1844	struct txp_rx_swdesc *sd;
1845	int cnt;
1846
1847	TXP_LOCK_ASSERT(sc);
1848
1849	if (sc->sc_rxbufs == NULL)
1850		return;
1851	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1852	    sc->sc_cdata.txp_hostvar_map,
1853	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1854
1855	/* Release allocated Rx buffers. */
1856	cnt = 0;
1857	while ((sd = TAILQ_FIRST(&sc->sc_busy_list)) != NULL) {
1858		TAILQ_REMOVE(&sc->sc_busy_list, sd, sd_next);
1859		KASSERT(sd->sd_mbuf != NULL,
1860		    ("%s : Rx buffer ring corrupted", __func__));
1861		bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1862		    BUS_DMASYNC_POSTREAD);
1863		bus_dmamap_unload(sc->sc_cdata.txp_rx_tag, sd->sd_map);
1864		m_freem(sd->sd_mbuf);
1865		sd->sd_mbuf = NULL;
1866		TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
1867		cnt++;
1868	}
1869}
1870
1871static void
1872txp_init(void *xsc)
1873{
1874	struct txp_softc *sc;
1875
1876	sc = xsc;
1877	TXP_LOCK(sc);
1878	txp_init_locked(sc);
1879	TXP_UNLOCK(sc);
1880}
1881
1882static void
1883txp_init_locked(struct txp_softc *sc)
1884{
1885	struct ifnet *ifp;
1886	uint8_t *eaddr;
1887	uint16_t p1;
1888	uint32_t p2;
1889	int error;
1890
1891	TXP_LOCK_ASSERT(sc);
1892	ifp = sc->sc_ifp;
1893
1894	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1895		return;
1896
1897	/* Initialize ring structure. */
1898	txp_init_rings(sc);
1899	/* Wakeup controller. */
1900	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_WAKEUP);
1901	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
1902	/*
1903	 * It seems that earlier NV image can go back to online from
1904	 * wakeup command but newer ones require controller reset.
1905	 * So jut reset controller again.
1906	 */
1907	if (txp_reset(sc) != 0)
1908		goto init_fail;
1909	/* Download firmware. */
1910	error = txp_download_fw(sc);
1911	if (error != 0) {
1912		device_printf(sc->sc_dev, "could not download firmware.\n");
1913		goto init_fail;
1914	}
1915	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1916	    sc->sc_cdata.txp_hostvar_map,
1917	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1918	if ((error = txp_rxring_fill(sc)) != 0) {
1919		device_printf(sc->sc_dev, "no memory for Rx buffers.\n");
1920		goto init_fail;
1921	}
1922	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1923	    sc->sc_cdata.txp_hostvar_map,
1924	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1925	if (txp_boot(sc, STAT_WAITING_FOR_BOOT) != 0) {
1926		device_printf(sc->sc_dev, "could not boot firmware.\n");
1927		goto init_fail;
1928	}
1929
1930	/*
1931	 * Quite contrary to Typhoon T2 software functional specification,
1932	 * it seems that TXP_CMD_RECV_BUFFER_CONTROL command is not
1933	 * implemented in the firmware. This means driver should have to
1934	 * handle misaligned frames on alignment architectures. AFAIK this
1935	 * is the only controller manufactured by 3Com that has this stupid
1936	 * bug. 3Com should fix this.
1937	 */
1938	if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0,
1939	    NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1940		goto init_fail;
1941	/* Undocumented command(interrupt coalescing disable?) - From Linux. */
1942	if (txp_command(sc, TXP_CMD_FILTER_DEFINE, 0, 0, 0, NULL, NULL, NULL,
1943	    TXP_CMD_NOWAIT) != 0)
1944		goto init_fail;
1945
1946	/* Set station address. */
1947	eaddr = IF_LLADDR(sc->sc_ifp);
1948	p1 = 0;
1949	((uint8_t *)&p1)[1] = eaddr[0];
1950	((uint8_t *)&p1)[0] = eaddr[1];
1951	p1 = le16toh(p1);
1952	((uint8_t *)&p2)[3] = eaddr[2];
1953	((uint8_t *)&p2)[2] = eaddr[3];
1954	((uint8_t *)&p2)[1] = eaddr[4];
1955	((uint8_t *)&p2)[0] = eaddr[5];
1956	p2 = le32toh(p2);
1957	if (txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0,
1958	    NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1959		goto init_fail;
1960
1961	txp_set_filter(sc);
1962	txp_set_capabilities(sc);
1963
1964	if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0,
1965	    NULL, NULL, NULL, TXP_CMD_NOWAIT))
1966		goto init_fail;
1967	if (txp_command(sc, TXP_CMD_XCVR_SELECT, sc->sc_xcvr, 0, 0,
1968	    NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1969		goto init_fail;
1970	if (txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL,
1971	    TXP_CMD_NOWAIT) != 0)
1972		goto init_fail;
1973	if (txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL,
1974	    TXP_CMD_NOWAIT) != 0)
1975		goto init_fail;
1976
1977	/* Ack all pending interrupts and enable interrupts. */
1978	WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
1979	WRITE_REG(sc, TXP_IER, TXP_INTRS);
1980	WRITE_REG(sc, TXP_IMR, TXP_INTR_NONE);
1981
1982	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1983	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1984
1985	callout_reset(&sc->sc_tick, hz, txp_tick, sc);
1986	return;
1987
1988init_fail:
1989	txp_rxring_empty(sc);
1990	txp_init_rings(sc);
1991	txp_reset(sc);
1992	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
1993}
1994
1995static void
1996txp_tick(void *vsc)
1997{
1998	struct txp_softc *sc;
1999	struct ifnet *ifp;
2000	struct txp_rsp_desc *rsp;
2001	struct txp_ext_desc *ext;
2002	int link;
2003
2004	sc = vsc;
2005	TXP_LOCK_ASSERT(sc);
2006	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2007	    sc->sc_cdata.txp_hostvar_map,
2008	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2009	txp_rxbuf_reclaim(sc);
2010	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2011	    sc->sc_cdata.txp_hostvar_map,
2012	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2013
2014	ifp = sc->sc_ifp;
2015	rsp = NULL;
2016
2017	link = sc->sc_flags & TXP_FLAG_LINK;
2018	if (txp_ext_command(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
2019	    &rsp, TXP_CMD_WAIT))
2020		goto out;
2021	if (rsp->rsp_numdesc != 6)
2022		goto out;
2023	txp_stats_update(sc, rsp);
2024	if (link == 0 && (sc->sc_flags & TXP_FLAG_LINK) != 0) {
2025		ext = (struct txp_ext_desc *)(rsp + 1);
2026		/* Update baudrate with resolved speed. */
2027		if ((ext[5].ext_2 & 0x02) != 0)
2028			ifp->if_baudrate = IF_Mbps(100);
2029		else
2030			ifp->if_baudrate = IF_Mbps(10);
2031	}
2032
2033out:
2034	if (rsp != NULL)
2035		free(rsp, M_DEVBUF);
2036	txp_watchdog(sc);
2037	callout_reset(&sc->sc_tick, hz, txp_tick, sc);
2038}
2039
2040static void
2041txp_start(struct ifnet *ifp)
2042{
2043	struct txp_softc *sc;
2044
2045	sc = ifp->if_softc;
2046	TXP_LOCK(sc);
2047	txp_start_locked(ifp);
2048	TXP_UNLOCK(sc);
2049}
2050
2051static void
2052txp_start_locked(struct ifnet *ifp)
2053{
2054	struct txp_softc *sc;
2055	struct mbuf *m_head;
2056	int enq;
2057
2058	sc = ifp->if_softc;
2059	TXP_LOCK_ASSERT(sc);
2060
2061	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2062	   IFF_DRV_RUNNING || (sc->sc_flags & TXP_FLAG_LINK) == 0)
2063		return;
2064
2065	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
2066		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2067		if (m_head == NULL)
2068			break;
2069		/*
2070		 * Pack the data into the transmit ring. If we
2071		 * don't have room, set the OACTIVE flag and wait
2072		 * for the NIC to drain the ring.
2073		 * ATM only Hi-ring is used.
2074		 */
2075		if (txp_encap(sc, &sc->sc_txhir, &m_head)) {
2076			if (m_head == NULL)
2077				break;
2078			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2079			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2080			break;
2081		}
2082
2083		/*
2084		 * If there's a BPF listener, bounce a copy of this frame
2085		 * to him.
2086		 */
2087		ETHER_BPF_MTAP(ifp, m_head);
2088
2089		/* Send queued frame. */
2090		WRITE_REG(sc, sc->sc_txhir.r_reg,
2091		    TXP_IDX2OFFSET(sc->sc_txhir.r_prod));
2092	}
2093
2094	if (enq > 0) {
2095		/* Set a timeout in case the chip goes out to lunch. */
2096		sc->sc_watchdog_timer = TXP_TX_TIMEOUT;
2097	}
2098}
2099
2100static int
2101txp_encap(struct txp_softc *sc, struct txp_tx_ring *r, struct mbuf **m_head)
2102{
2103	struct txp_tx_desc *first_txd;
2104	struct txp_frag_desc *fxd;
2105	struct txp_swdesc *sd;
2106	struct mbuf *m;
2107	bus_dma_segment_t txsegs[TXP_MAXTXSEGS];
2108	int error, i, nsegs;
2109
2110	TXP_LOCK_ASSERT(sc);
2111
2112	M_ASSERTPKTHDR((*m_head));
2113
2114	m = *m_head;
2115	first_txd = r->r_desc + r->r_prod;
2116	sd = sc->sc_txd + r->r_prod;
2117
2118	error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_tx_tag, sd->sd_map,
2119	    *m_head, txsegs, &nsegs, 0);
2120	if (error == EFBIG) {
2121		m = m_collapse(*m_head, M_NOWAIT, TXP_MAXTXSEGS);
2122		if (m == NULL) {
2123			m_freem(*m_head);
2124			*m_head = NULL;
2125			return (ENOMEM);
2126		}
2127		*m_head = m;
2128		error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_tx_tag,
2129		    sd->sd_map, *m_head, txsegs, &nsegs, 0);
2130		if (error != 0) {
2131			m_freem(*m_head);
2132			*m_head = NULL;
2133			return (error);
2134		}
2135	} else if (error != 0)
2136		return (error);
2137	if (nsegs == 0) {
2138		m_freem(*m_head);
2139		*m_head = NULL;
2140		return (EIO);
2141	}
2142
2143	/* Check descriptor overrun. */
2144	if (r->r_cnt + nsegs >= TX_ENTRIES - TXP_TXD_RESERVED) {
2145		bus_dmamap_unload(sc->sc_cdata.txp_tx_tag, sd->sd_map);
2146		return (ENOBUFS);
2147	}
2148	bus_dmamap_sync(sc->sc_cdata.txp_tx_tag, sd->sd_map,
2149	    BUS_DMASYNC_PREWRITE);
2150	sd->sd_mbuf = m;
2151
2152	first_txd->tx_flags = TX_FLAGS_TYPE_DATA;
2153	first_txd->tx_numdesc = 0;
2154	first_txd->tx_addrlo = 0;
2155	first_txd->tx_addrhi = 0;
2156	first_txd->tx_totlen = 0;
2157	first_txd->tx_pflags = 0;
2158	r->r_cnt++;
2159	TXP_DESC_INC(r->r_prod, TX_ENTRIES);
2160
2161	/* Configure Tx IP/TCP/UDP checksum offload. */
2162	if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2163		first_txd->tx_pflags |= htole32(TX_PFLAGS_IPCKSUM);
2164#ifdef notyet
2165	/* XXX firmware bug. */
2166	if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2167		first_txd->tx_pflags |= htole32(TX_PFLAGS_TCPCKSUM);
2168	if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2169		first_txd->tx_pflags |= htole32(TX_PFLAGS_UDPCKSUM);
2170#endif
2171
2172	/* Configure VLAN hardware tag insertion. */
2173	if ((m->m_flags & M_VLANTAG) != 0)
2174		first_txd->tx_pflags |=
2175		    htole32(TX_PFLAGS_VLAN | TX_PFLAGS_PRIO |
2176		    (bswap16(m->m_pkthdr.ether_vtag) << TX_PFLAGS_VLANTAG_S));
2177
2178	for (i = 0; i < nsegs; i++) {
2179		fxd = (struct txp_frag_desc *)(r->r_desc + r->r_prod);
2180		fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG | TX_FLAGS_VALID;
2181		fxd->frag_rsvd1 = 0;
2182		fxd->frag_len = htole16(txsegs[i].ds_len);
2183		fxd->frag_addrhi = htole32(TXP_ADDR_HI(txsegs[i].ds_addr));
2184		fxd->frag_addrlo = htole32(TXP_ADDR_LO(txsegs[i].ds_addr));
2185		fxd->frag_rsvd2 = 0;
2186		first_txd->tx_numdesc++;
2187		r->r_cnt++;
2188		TXP_DESC_INC(r->r_prod, TX_ENTRIES);
2189	}
2190
2191	/* Lastly set valid flag. */
2192	first_txd->tx_flags |= TX_FLAGS_VALID;
2193
2194	/* Sync descriptors. */
2195	bus_dmamap_sync(r->r_tag, r->r_map,
2196	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2197
2198	return (0);
2199}
2200
2201/*
2202 * Handle simple commands sent to the typhoon
2203 */
2204static int
2205txp_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
2206    uint32_t in3, uint16_t *out1, uint32_t *out2, uint32_t *out3, int wait)
2207{
2208	struct txp_rsp_desc *rsp;
2209
2210	rsp = NULL;
2211	if (txp_ext_command(sc, id, in1, in2, in3, NULL, 0, &rsp, wait) != 0) {
2212		device_printf(sc->sc_dev, "command 0x%02x failed\n", id);
2213		return (-1);
2214	}
2215
2216	if (wait == TXP_CMD_NOWAIT)
2217		return (0);
2218
2219	KASSERT(rsp != NULL, ("rsp is NULL!\n"));
2220	if (out1 != NULL)
2221		*out1 = le16toh(rsp->rsp_par1);
2222	if (out2 != NULL)
2223		*out2 = le32toh(rsp->rsp_par2);
2224	if (out3 != NULL)
2225		*out3 = le32toh(rsp->rsp_par3);
2226	free(rsp, M_DEVBUF);
2227	return (0);
2228}
2229
2230static int
2231txp_ext_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
2232    uint32_t in3, struct txp_ext_desc *in_extp, uint8_t in_extn,
2233    struct txp_rsp_desc **rspp, int wait)
2234{
2235	struct txp_hostvar *hv;
2236	struct txp_cmd_desc *cmd;
2237	struct txp_ext_desc *ext;
2238	uint32_t idx, i;
2239	uint16_t seq;
2240	int error;
2241
2242	error = 0;
2243	hv = sc->sc_hostvar;
2244	if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) {
2245		device_printf(sc->sc_dev,
2246		    "%s : out of free cmd descriptors for command 0x%02x\n",
2247		    __func__, id);
2248		return (ENOBUFS);
2249	}
2250
2251	bus_dmamap_sync(sc->sc_cdata.txp_cmdring_tag,
2252	    sc->sc_cdata.txp_cmdring_map, BUS_DMASYNC_POSTWRITE);
2253	idx = sc->sc_cmdring.lastwrite;
2254	cmd = (struct txp_cmd_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx);
2255	bzero(cmd, sizeof(*cmd));
2256
2257	cmd->cmd_numdesc = in_extn;
2258	seq = sc->sc_seq++;
2259	cmd->cmd_seq = htole16(seq);
2260	cmd->cmd_id = htole16(id);
2261	cmd->cmd_par1 = htole16(in1);
2262	cmd->cmd_par2 = htole32(in2);
2263	cmd->cmd_par3 = htole32(in3);
2264	cmd->cmd_flags = CMD_FLAGS_TYPE_CMD |
2265	    (wait == TXP_CMD_WAIT ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID;
2266
2267	idx += sizeof(struct txp_cmd_desc);
2268	if (idx == sc->sc_cmdring.size)
2269		idx = 0;
2270
2271	for (i = 0; i < in_extn; i++) {
2272		ext = (struct txp_ext_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx);
2273		bcopy(in_extp, ext, sizeof(struct txp_ext_desc));
2274		in_extp++;
2275		idx += sizeof(struct txp_cmd_desc);
2276		if (idx == sc->sc_cmdring.size)
2277			idx = 0;
2278	}
2279
2280	sc->sc_cmdring.lastwrite = idx;
2281	bus_dmamap_sync(sc->sc_cdata.txp_cmdring_tag,
2282	    sc->sc_cdata.txp_cmdring_map, BUS_DMASYNC_PREWRITE);
2283	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2284	    sc->sc_cdata.txp_hostvar_map, BUS_DMASYNC_PREREAD |
2285	    BUS_DMASYNC_PREWRITE);
2286	WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite);
2287	TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
2288
2289	if (wait == TXP_CMD_NOWAIT)
2290		return (0);
2291
2292	for (i = 0; i < TXP_TIMEOUT; i++) {
2293		bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2294		    sc->sc_cdata.txp_hostvar_map, BUS_DMASYNC_POSTREAD |
2295		    BUS_DMASYNC_POSTWRITE);
2296		if (le32toh(hv->hv_resp_read_idx) !=
2297		    le32toh(hv->hv_resp_write_idx)) {
2298			error = txp_response(sc, id, seq, rspp);
2299			bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2300			    sc->sc_cdata.txp_hostvar_map,
2301			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2302			if (error != 0)
2303				return (error);
2304 			if (*rspp != NULL)
2305				break;
2306		}
2307		DELAY(50);
2308	}
2309	if (i == TXP_TIMEOUT) {
2310		device_printf(sc->sc_dev, "command 0x%02x timedout\n", id);
2311		error = ETIMEDOUT;
2312	}
2313
2314	return (error);
2315}
2316
2317static int
2318txp_response(struct txp_softc *sc, uint16_t id, uint16_t seq,
2319    struct txp_rsp_desc **rspp)
2320{
2321	struct txp_hostvar *hv;
2322	struct txp_rsp_desc *rsp;
2323	uint32_t ridx;
2324
2325	bus_dmamap_sync(sc->sc_cdata.txp_rspring_tag,
2326	    sc->sc_cdata.txp_rspring_map, BUS_DMASYNC_POSTREAD);
2327	hv = sc->sc_hostvar;
2328	ridx = le32toh(hv->hv_resp_read_idx);
2329	while (ridx != le32toh(hv->hv_resp_write_idx)) {
2330		rsp = (struct txp_rsp_desc *)(((uint8_t *)sc->sc_rspring.base) + ridx);
2331
2332		if (id == le16toh(rsp->rsp_id) &&
2333		    le16toh(rsp->rsp_seq) == seq) {
2334			*rspp = (struct txp_rsp_desc *)malloc(
2335			    sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1),
2336			    M_DEVBUF, M_NOWAIT);
2337			if (*rspp == NULL) {
2338				device_printf(sc->sc_dev,"%s : command 0x%02x "
2339				    "memory allocation failure\n",
2340				    __func__, id);
2341				return (ENOMEM);
2342			}
2343			txp_rsp_fixup(sc, rsp, *rspp);
2344			return (0);
2345		}
2346
2347		if ((rsp->rsp_flags & RSP_FLAGS_ERROR) != 0) {
2348			device_printf(sc->sc_dev,
2349			    "%s : command 0x%02x response error!\n", __func__,
2350			    le16toh(rsp->rsp_id));
2351			txp_rsp_fixup(sc, rsp, NULL);
2352			ridx = le32toh(hv->hv_resp_read_idx);
2353			continue;
2354		}
2355
2356		/*
2357		 * The following unsolicited responses are handled during
2358		 * processing of TXP_CMD_READ_STATISTICS which requires
2359		 * response. Driver abuses the command to detect media
2360		 * status change.
2361		 * TXP_CMD_FILTER_DEFINE is not an unsolicited response
2362		 * but we don't process response ring in interrupt handler
2363		 * so we have to ignore this command here, otherwise
2364		 * unknown command message would be printed.
2365		 */
2366		switch (le16toh(rsp->rsp_id)) {
2367		case TXP_CMD_CYCLE_STATISTICS:
2368		case TXP_CMD_FILTER_DEFINE:
2369			break;
2370		case TXP_CMD_MEDIA_STATUS_READ:
2371			if ((le16toh(rsp->rsp_par1) & 0x0800) == 0) {
2372				sc->sc_flags |= TXP_FLAG_LINK;
2373				if_link_state_change(sc->sc_ifp,
2374				    LINK_STATE_UP);
2375			} else {
2376				sc->sc_flags &= ~TXP_FLAG_LINK;
2377				if_link_state_change(sc->sc_ifp,
2378				    LINK_STATE_DOWN);
2379			}
2380			break;
2381		case TXP_CMD_HELLO_RESPONSE:
2382			/*
2383			 * Driver should repsond to hello message but
2384			 * TXP_CMD_READ_STATISTICS is issued for every
2385			 * hz, therefore there is no need to send an
2386			 * explicit command here.
2387			 */
2388			device_printf(sc->sc_dev, "%s : hello\n", __func__);
2389			break;
2390		default:
2391			device_printf(sc->sc_dev,
2392			    "%s : unknown command 0x%02x\n", __func__,
2393			    le16toh(rsp->rsp_id));
2394		}
2395		txp_rsp_fixup(sc, rsp, NULL);
2396		ridx = le32toh(hv->hv_resp_read_idx);
2397	}
2398
2399	return (0);
2400}
2401
2402static void
2403txp_rsp_fixup(struct txp_softc *sc, struct txp_rsp_desc *rsp,
2404    struct txp_rsp_desc *dst)
2405{
2406	struct txp_rsp_desc *src;
2407	struct txp_hostvar *hv;
2408	uint32_t i, ridx;
2409
2410	src = rsp;
2411	hv = sc->sc_hostvar;
2412	ridx = le32toh(hv->hv_resp_read_idx);
2413
2414	for (i = 0; i < rsp->rsp_numdesc + 1; i++) {
2415		if (dst != NULL)
2416			bcopy(src, dst++, sizeof(struct txp_rsp_desc));
2417		ridx += sizeof(struct txp_rsp_desc);
2418		if (ridx == sc->sc_rspring.size) {
2419			src = sc->sc_rspring.base;
2420			ridx = 0;
2421		} else
2422			src++;
2423		sc->sc_rspring.lastwrite = ridx;
2424	}
2425
2426	hv->hv_resp_read_idx = htole32(ridx);
2427}
2428
2429static int
2430txp_cmd_desc_numfree(struct txp_softc *sc)
2431{
2432	struct txp_hostvar *hv;
2433	struct txp_boot_record *br;
2434	uint32_t widx, ridx, nfree;
2435
2436	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2437	    sc->sc_cdata.txp_hostvar_map,
2438	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2439	hv = sc->sc_hostvar;
2440	br = sc->sc_boot;
2441	widx = sc->sc_cmdring.lastwrite;
2442	ridx = le32toh(hv->hv_cmd_read_idx);
2443
2444	if (widx == ridx) {
2445		/* Ring is completely free */
2446		nfree = le32toh(br->br_cmd_siz) - sizeof(struct txp_cmd_desc);
2447	} else {
2448		if (widx > ridx)
2449			nfree = le32toh(br->br_cmd_siz) -
2450			    (widx - ridx + sizeof(struct txp_cmd_desc));
2451		else
2452			nfree = ridx - widx - sizeof(struct txp_cmd_desc);
2453	}
2454
2455	return (nfree / sizeof(struct txp_cmd_desc));
2456}
2457
2458static int
2459txp_sleep(struct txp_softc *sc, int capenable)
2460{
2461	uint16_t events;
2462	int error;
2463
2464	events = 0;
2465	if ((capenable & IFCAP_WOL_MAGIC) != 0)
2466		events |= 0x01;
2467	error = txp_command(sc, TXP_CMD_ENABLE_WAKEUP_EVENTS, events, 0, 0,
2468	    NULL, NULL, NULL, TXP_CMD_NOWAIT);
2469	if (error == 0) {
2470		/* Goto sleep. */
2471		error = txp_command(sc, TXP_CMD_GOTO_SLEEP, 0, 0, 0, NULL,
2472		    NULL, NULL, TXP_CMD_NOWAIT);
2473		if (error == 0) {
2474			error = txp_wait(sc, STAT_SLEEPING);
2475			if (error != 0)
2476				device_printf(sc->sc_dev,
2477				    "unable to enter into sleep\n");
2478		}
2479	}
2480
2481	return (error);
2482}
2483
2484static void
2485txp_stop(struct txp_softc *sc)
2486{
2487	struct ifnet *ifp;
2488
2489	TXP_LOCK_ASSERT(sc);
2490	ifp = sc->sc_ifp;
2491
2492	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2493		return;
2494
2495	WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
2496	WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
2497
2498	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2499	sc->sc_flags &= ~TXP_FLAG_LINK;
2500
2501	callout_stop(&sc->sc_tick);
2502
2503	txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL,
2504	    TXP_CMD_NOWAIT);
2505	txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL,
2506	    TXP_CMD_NOWAIT);
2507	/* Save statistics for later use. */
2508	txp_stats_save(sc);
2509	/* Halt controller. */
2510	txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL,
2511	    TXP_CMD_NOWAIT);
2512
2513	if (txp_wait(sc, STAT_HALTED) != 0)
2514		device_printf(sc->sc_dev, "controller halt timedout!\n");
2515	/* Reclaim Tx/Rx buffers. */
2516	if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
2517	    TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
2518		txp_tx_reclaim(sc, &sc->sc_txhir);
2519	if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
2520	    TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
2521		txp_tx_reclaim(sc, &sc->sc_txlor);
2522	txp_rxring_empty(sc);
2523
2524	txp_init_rings(sc);
2525	/* Reset controller and make it reload sleep image. */
2526	txp_reset(sc);
2527	/* Let controller boot from sleep image. */
2528	if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0)
2529		device_printf(sc->sc_dev, "could not boot sleep image\n");
2530	txp_sleep(sc, 0);
2531}
2532
2533static void
2534txp_watchdog(struct txp_softc *sc)
2535{
2536	struct ifnet *ifp;
2537
2538	TXP_LOCK_ASSERT(sc);
2539
2540	if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer)
2541		return;
2542
2543	ifp = sc->sc_ifp;
2544	if_printf(ifp, "watchdog timeout -- resetting\n");
2545	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2546	txp_stop(sc);
2547	txp_init_locked(sc);
2548}
2549
2550static int
2551txp_ifmedia_upd(struct ifnet *ifp)
2552{
2553	struct txp_softc *sc = ifp->if_softc;
2554	struct ifmedia *ifm = &sc->sc_ifmedia;
2555	uint16_t new_xcvr;
2556
2557	TXP_LOCK(sc);
2558	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
2559		TXP_UNLOCK(sc);
2560		return (EINVAL);
2561	}
2562
2563	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) {
2564		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2565			new_xcvr = TXP_XCVR_10_FDX;
2566		else
2567			new_xcvr = TXP_XCVR_10_HDX;
2568	} else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
2569		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2570			new_xcvr = TXP_XCVR_100_FDX;
2571		else
2572			new_xcvr = TXP_XCVR_100_HDX;
2573	} else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
2574		new_xcvr = TXP_XCVR_AUTO;
2575	} else {
2576		TXP_UNLOCK(sc);
2577		return (EINVAL);
2578	}
2579
2580	/* nothing to do */
2581	if (sc->sc_xcvr == new_xcvr) {
2582		TXP_UNLOCK(sc);
2583		return (0);
2584	}
2585
2586	txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0,
2587	    NULL, NULL, NULL, TXP_CMD_NOWAIT);
2588	sc->sc_xcvr = new_xcvr;
2589	TXP_UNLOCK(sc);
2590
2591	return (0);
2592}
2593
2594static void
2595txp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2596{
2597	struct txp_softc *sc = ifp->if_softc;
2598	struct ifmedia *ifm = &sc->sc_ifmedia;
2599	uint16_t bmsr, bmcr, anar, anlpar;
2600
2601	ifmr->ifm_status = IFM_AVALID;
2602	ifmr->ifm_active = IFM_ETHER;
2603
2604	TXP_LOCK(sc);
2605	/* Check whether firmware is running. */
2606	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2607		goto bail;
2608	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
2609	    &bmsr, NULL, NULL, TXP_CMD_WAIT))
2610		goto bail;
2611	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
2612	    &bmsr, NULL, NULL, TXP_CMD_WAIT))
2613		goto bail;
2614
2615	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0,
2616	    &bmcr, NULL, NULL, TXP_CMD_WAIT))
2617		goto bail;
2618
2619	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0,
2620	    &anlpar, NULL, NULL, TXP_CMD_WAIT))
2621		goto bail;
2622
2623	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANAR, 0,
2624	    &anar, NULL, NULL, TXP_CMD_WAIT))
2625		goto bail;
2626	TXP_UNLOCK(sc);
2627
2628	if (bmsr & BMSR_LINK)
2629		ifmr->ifm_status |= IFM_ACTIVE;
2630
2631	if (bmcr & BMCR_ISO) {
2632		ifmr->ifm_active |= IFM_NONE;
2633		ifmr->ifm_status = 0;
2634		return;
2635	}
2636
2637	if (bmcr & BMCR_LOOP)
2638		ifmr->ifm_active |= IFM_LOOP;
2639
2640	if (bmcr & BMCR_AUTOEN) {
2641		if ((bmsr & BMSR_ACOMP) == 0) {
2642			ifmr->ifm_active |= IFM_NONE;
2643			return;
2644		}
2645
2646		anlpar &= anar;
2647		if (anlpar & ANLPAR_TX_FD)
2648			ifmr->ifm_active |= IFM_100_TX|IFM_FDX;
2649		else if (anlpar & ANLPAR_T4)
2650			ifmr->ifm_active |= IFM_100_T4;
2651		else if (anlpar & ANLPAR_TX)
2652			ifmr->ifm_active |= IFM_100_TX;
2653		else if (anlpar & ANLPAR_10_FD)
2654			ifmr->ifm_active |= IFM_10_T|IFM_FDX;
2655		else if (anlpar & ANLPAR_10)
2656			ifmr->ifm_active |= IFM_10_T;
2657		else
2658			ifmr->ifm_active |= IFM_NONE;
2659	} else
2660		ifmr->ifm_active = ifm->ifm_cur->ifm_media;
2661	return;
2662
2663bail:
2664	TXP_UNLOCK(sc);
2665	ifmr->ifm_active |= IFM_NONE;
2666	ifmr->ifm_status &= ~IFM_AVALID;
2667}
2668
2669#ifdef TXP_DEBUG
2670static void
2671txp_show_descriptor(void *d)
2672{
2673	struct txp_cmd_desc *cmd = d;
2674	struct txp_rsp_desc *rsp = d;
2675	struct txp_tx_desc *txd = d;
2676	struct txp_frag_desc *frgd = d;
2677
2678	switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) {
2679	case CMD_FLAGS_TYPE_CMD:
2680		/* command descriptor */
2681		printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2682		    cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
2683		    le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
2684		    le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
2685		break;
2686	case CMD_FLAGS_TYPE_RESP:
2687		/* response descriptor */
2688		printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2689		    rsp->rsp_flags, rsp->rsp_numdesc, le16toh(rsp->rsp_id),
2690		    le16toh(rsp->rsp_seq), le16toh(rsp->rsp_par1),
2691		    le32toh(rsp->rsp_par2), le32toh(rsp->rsp_par3));
2692		break;
2693	case CMD_FLAGS_TYPE_DATA:
2694		/* data header (assuming tx for now) */
2695		printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]",
2696		    txd->tx_flags, txd->tx_numdesc, le16toh(txd->tx_totlen),
2697		    le32toh(txd->tx_addrlo), le32toh(txd->tx_addrhi),
2698		    le32toh(txd->tx_pflags));
2699		break;
2700	case CMD_FLAGS_TYPE_FRAG:
2701		/* fragment descriptor */
2702		printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]",
2703		    frgd->frag_flags, frgd->frag_rsvd1, le16toh(frgd->frag_len),
2704		    le32toh(frgd->frag_addrlo), le32toh(frgd->frag_addrhi),
2705		    le32toh(frgd->frag_rsvd2));
2706		break;
2707	default:
2708		printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2709		    cmd->cmd_flags & CMD_FLAGS_TYPE_M,
2710		    cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
2711		    le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
2712		    le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
2713		break;
2714	}
2715}
2716#endif
2717
2718static void
2719txp_set_filter(struct txp_softc *sc)
2720{
2721	struct ifnet *ifp;
2722	uint32_t crc, mchash[2];
2723	uint16_t filter;
2724	struct ifmultiaddr *ifma;
2725	int mcnt;
2726
2727	TXP_LOCK_ASSERT(sc);
2728
2729	ifp = sc->sc_ifp;
2730	filter = TXP_RXFILT_DIRECT;
2731	if ((ifp->if_flags & IFF_BROADCAST) != 0)
2732		filter |= TXP_RXFILT_BROADCAST;
2733	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2734		if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2735			filter |= TXP_RXFILT_ALLMULTI;
2736		if ((ifp->if_flags & IFF_PROMISC) != 0)
2737			filter = TXP_RXFILT_PROMISC;
2738		goto setit;
2739	}
2740
2741	mchash[0] = mchash[1] = 0;
2742	mcnt = 0;
2743	if_maddr_rlock(ifp);
2744	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2745		if (ifma->ifma_addr->sa_family != AF_LINK)
2746			continue;
2747		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2748		    ifma->ifma_addr), ETHER_ADDR_LEN);
2749		crc &= 0x3f;
2750		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2751		mcnt++;
2752	}
2753	if_maddr_runlock(ifp);
2754
2755	if (mcnt > 0) {
2756		filter |= TXP_RXFILT_HASHMULTI;
2757		txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE, 2, mchash[0],
2758		    mchash[1], NULL, NULL, NULL, TXP_CMD_NOWAIT);
2759	}
2760
2761setit:
2762	txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0,
2763	    NULL, NULL, NULL, TXP_CMD_NOWAIT);
2764}
2765
2766static int
2767txp_set_capabilities(struct txp_softc *sc)
2768{
2769	struct ifnet *ifp;
2770	uint32_t rxcap, txcap;
2771
2772	TXP_LOCK_ASSERT(sc);
2773
2774	rxcap = txcap = 0;
2775	ifp = sc->sc_ifp;
2776	if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) {
2777		if ((ifp->if_hwassist & CSUM_IP) != 0)
2778			txcap |= OFFLOAD_IPCKSUM;
2779		if ((ifp->if_hwassist & CSUM_TCP) != 0)
2780			txcap |= OFFLOAD_TCPCKSUM;
2781		if ((ifp->if_hwassist & CSUM_UDP) != 0)
2782			txcap |= OFFLOAD_UDPCKSUM;
2783		rxcap = txcap;
2784	}
2785	if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
2786		rxcap &= ~(OFFLOAD_IPCKSUM | OFFLOAD_TCPCKSUM |
2787		    OFFLOAD_UDPCKSUM);
2788	if ((ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
2789		rxcap |= OFFLOAD_VLAN;
2790		txcap |= OFFLOAD_VLAN;
2791	}
2792
2793	/* Tell firmware new offload configuration. */
2794	return (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0, txcap, rxcap, NULL,
2795	    NULL, NULL, TXP_CMD_NOWAIT));
2796}
2797
2798static void
2799txp_stats_save(struct txp_softc *sc)
2800{
2801	struct txp_rsp_desc *rsp;
2802
2803	TXP_LOCK_ASSERT(sc);
2804
2805	rsp = NULL;
2806	if (txp_ext_command(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
2807	    &rsp, TXP_CMD_WAIT))
2808		goto out;
2809	if (rsp->rsp_numdesc != 6)
2810		goto out;
2811	txp_stats_update(sc, rsp);
2812out:
2813	if (rsp != NULL)
2814		free(rsp, M_DEVBUF);
2815	bcopy(&sc->sc_stats, &sc->sc_ostats, sizeof(struct txp_hw_stats));
2816}
2817
2818static void
2819txp_stats_update(struct txp_softc *sc, struct txp_rsp_desc *rsp)
2820{
2821	struct txp_hw_stats *ostats, *stats;
2822	struct txp_ext_desc *ext;
2823
2824	TXP_LOCK_ASSERT(sc);
2825
2826	ext = (struct txp_ext_desc *)(rsp + 1);
2827	ostats = &sc->sc_ostats;
2828	stats = &sc->sc_stats;
2829	stats->tx_frames = ostats->tx_frames + le32toh(rsp->rsp_par2);
2830	stats->tx_bytes = ostats->tx_bytes + (uint64_t)le32toh(rsp->rsp_par3) +
2831	    ((uint64_t)le32toh(ext[0].ext_1) << 32);
2832	stats->tx_deferred = ostats->tx_deferred + le32toh(ext[0].ext_2);
2833	stats->tx_late_colls = ostats->tx_late_colls + le32toh(ext[0].ext_3);
2834	stats->tx_colls = ostats->tx_colls + le32toh(ext[0].ext_4);
2835	stats->tx_carrier_lost = ostats->tx_carrier_lost +
2836	    le32toh(ext[1].ext_1);
2837	stats->tx_multi_colls = ostats->tx_multi_colls +
2838	    le32toh(ext[1].ext_2);
2839	stats->tx_excess_colls = ostats->tx_excess_colls +
2840	    le32toh(ext[1].ext_3);
2841	stats->tx_fifo_underruns = ostats->tx_fifo_underruns +
2842	    le32toh(ext[1].ext_4);
2843	stats->tx_mcast_oflows = ostats->tx_mcast_oflows +
2844	    le32toh(ext[2].ext_1);
2845	stats->tx_filtered = ostats->tx_filtered + le32toh(ext[2].ext_2);
2846	stats->rx_frames = ostats->rx_frames + le32toh(ext[2].ext_3);
2847	stats->rx_bytes = ostats->rx_bytes + (uint64_t)le32toh(ext[2].ext_4) +
2848	    ((uint64_t)le32toh(ext[3].ext_1) << 32);
2849	stats->rx_fifo_oflows = ostats->rx_fifo_oflows + le32toh(ext[3].ext_2);
2850	stats->rx_badssd = ostats->rx_badssd + le32toh(ext[3].ext_3);
2851	stats->rx_crcerrs = ostats->rx_crcerrs + le32toh(ext[3].ext_4);
2852	stats->rx_lenerrs = ostats->rx_lenerrs + le32toh(ext[4].ext_1);
2853	stats->rx_bcast_frames = ostats->rx_bcast_frames +
2854	    le32toh(ext[4].ext_2);
2855	stats->rx_mcast_frames = ostats->rx_mcast_frames +
2856	    le32toh(ext[4].ext_3);
2857	stats->rx_oflows = ostats->rx_oflows + le32toh(ext[4].ext_4);
2858	stats->rx_filtered = ostats->rx_filtered + le32toh(ext[5].ext_1);
2859}
2860
2861static uint64_t
2862txp_get_counter(struct ifnet *ifp, ift_counter cnt)
2863{
2864	struct txp_softc *sc;
2865	struct txp_hw_stats *stats;
2866
2867	sc = if_getsoftc(ifp);
2868	stats = &sc->sc_stats;
2869
2870	switch (cnt) {
2871	case IFCOUNTER_IERRORS:
2872		return (stats->rx_fifo_oflows + stats->rx_badssd +
2873		    stats->rx_crcerrs + stats->rx_lenerrs + stats->rx_oflows);
2874	case IFCOUNTER_OERRORS:
2875		return (stats->tx_deferred + stats->tx_carrier_lost +
2876		    stats->tx_fifo_underruns + stats->tx_mcast_oflows);
2877	case IFCOUNTER_COLLISIONS:
2878		return (stats->tx_late_colls + stats->tx_multi_colls +
2879		    stats->tx_excess_colls);
2880	case IFCOUNTER_OPACKETS:
2881		return (stats->tx_frames);
2882	case IFCOUNTER_IPACKETS:
2883		return (stats->rx_frames);
2884	default:
2885		return (if_get_counter_default(ifp, cnt));
2886	}
2887}
2888
2889#define	TXP_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
2890	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2891
2892#if __FreeBSD_version >= 900030
2893#define	TXP_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
2894	    SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2895#elif __FreeBSD_version > 800000
2896#define	TXP_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
2897	    SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2898#else
2899#define	TXP_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
2900	    SYSCTL_ADD_ULONG(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2901#endif
2902
2903static void
2904txp_sysctl_node(struct txp_softc *sc)
2905{
2906	struct sysctl_ctx_list *ctx;
2907	struct sysctl_oid_list *child, *parent;
2908	struct sysctl_oid *tree;
2909	struct txp_hw_stats *stats;
2910	int error;
2911
2912	stats = &sc->sc_stats;
2913	ctx = device_get_sysctl_ctx(sc->sc_dev);
2914	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
2915	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
2916	    CTLTYPE_INT | CTLFLAG_RW, &sc->sc_process_limit, 0,
2917	    sysctl_hw_txp_proc_limit, "I",
2918	    "max number of Rx events to process");
2919	/* Pull in device tunables. */
2920	sc->sc_process_limit = TXP_PROC_DEFAULT;
2921	error = resource_int_value(device_get_name(sc->sc_dev),
2922	    device_get_unit(sc->sc_dev), "process_limit",
2923	    &sc->sc_process_limit);
2924	if (error == 0) {
2925		if (sc->sc_process_limit < TXP_PROC_MIN ||
2926		    sc->sc_process_limit > TXP_PROC_MAX) {
2927			device_printf(sc->sc_dev,
2928			    "process_limit value out of range; "
2929			    "using default: %d\n", TXP_PROC_DEFAULT);
2930			sc->sc_process_limit = TXP_PROC_DEFAULT;
2931		}
2932	}
2933	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
2934	    NULL, "TXP statistics");
2935	parent = SYSCTL_CHILDREN(tree);
2936
2937	/* Tx statistics. */
2938	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2939	    NULL, "Tx MAC statistics");
2940	child = SYSCTL_CHILDREN(tree);
2941
2942	TXP_SYSCTL_STAT_ADD32(ctx, child, "frames",
2943	    &stats->tx_frames, "Frames");
2944	TXP_SYSCTL_STAT_ADD64(ctx, child, "octets",
2945	    &stats->tx_bytes, "Octets");
2946	TXP_SYSCTL_STAT_ADD32(ctx, child, "deferred",
2947	    &stats->tx_deferred, "Deferred frames");
2948	TXP_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
2949	    &stats->tx_late_colls, "Late collisions");
2950	TXP_SYSCTL_STAT_ADD32(ctx, child, "colls",
2951	    &stats->tx_colls, "Collisions");
2952	TXP_SYSCTL_STAT_ADD32(ctx, child, "carrier_lost",
2953	    &stats->tx_carrier_lost, "Carrier lost");
2954	TXP_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
2955	    &stats->tx_multi_colls, "Multiple collisions");
2956	TXP_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
2957	    &stats->tx_excess_colls, "Excessive collisions");
2958	TXP_SYSCTL_STAT_ADD32(ctx, child, "fifo_underruns",
2959	    &stats->tx_fifo_underruns, "FIFO underruns");
2960	TXP_SYSCTL_STAT_ADD32(ctx, child, "mcast_oflows",
2961	    &stats->tx_mcast_oflows, "Multicast overflows");
2962	TXP_SYSCTL_STAT_ADD32(ctx, child, "filtered",
2963	    &stats->tx_filtered, "Filtered frames");
2964
2965	/* Rx statistics. */
2966	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2967	    NULL, "Rx MAC statistics");
2968	child = SYSCTL_CHILDREN(tree);
2969
2970	TXP_SYSCTL_STAT_ADD32(ctx, child, "frames",
2971	    &stats->rx_frames, "Frames");
2972	TXP_SYSCTL_STAT_ADD64(ctx, child, "octets",
2973	    &stats->rx_bytes, "Octets");
2974	TXP_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
2975	    &stats->rx_fifo_oflows, "FIFO overflows");
2976	TXP_SYSCTL_STAT_ADD32(ctx, child, "badssd",
2977	    &stats->rx_badssd, "Bad SSD");
2978	TXP_SYSCTL_STAT_ADD32(ctx, child, "crcerrs",
2979	    &stats->rx_crcerrs, "CRC errors");
2980	TXP_SYSCTL_STAT_ADD32(ctx, child, "lenerrs",
2981	    &stats->rx_lenerrs, "Length errors");
2982	TXP_SYSCTL_STAT_ADD32(ctx, child, "bcast_frames",
2983	    &stats->rx_bcast_frames, "Broadcast frames");
2984	TXP_SYSCTL_STAT_ADD32(ctx, child, "mcast_frames",
2985	    &stats->rx_mcast_frames, "Multicast frames");
2986	TXP_SYSCTL_STAT_ADD32(ctx, child, "oflows",
2987	    &stats->rx_oflows, "Overflows");
2988	TXP_SYSCTL_STAT_ADD32(ctx, child, "filtered",
2989	    &stats->rx_filtered, "Filtered frames");
2990}
2991
2992#undef TXP_SYSCTL_STAT_ADD32
2993#undef TXP_SYSCTL_STAT_ADD64
2994
2995static int
2996sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2997{
2998	int error, value;
2999
3000	if (arg1 == NULL)
3001		return (EINVAL);
3002	value = *(int *)arg1;
3003	error = sysctl_handle_int(oidp, &value, 0, req);
3004	if (error || req->newptr == NULL)
3005		return (error);
3006	if (value < low || value > high)
3007		return (EINVAL);
3008        *(int *)arg1 = value;
3009
3010        return (0);
3011}
3012
3013static int
3014sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS)
3015{
3016	return (sysctl_int_range(oidp, arg1, arg2, req,
3017	    TXP_PROC_MIN, TXP_PROC_MAX));
3018}
3019