1/*	$OpenBSD: if_txp.c,v 1.48 2001/06/27 06:34:50 kjc Exp $	*/
2
3/*-
4 * Copyright (c) 2001
5 *	Jason L. Wright <jason@thought.net>, Theo de Raadt, and
6 *	Aaron Campbell <aaron@monkey.org>.  All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by Jason L. Wright,
19 *	Theo de Raadt and Aaron Campbell.
20 * 4. Neither the name of the author nor the names of any co-contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
34 * THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: stable/11/sys/dev/txp/if_txp.c 347962 2019-05-18 20:43:13Z brooks $");
39
40/*
41 * Driver for 3c990 (Typhoon) Ethernet ASIC
42 */
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/bus.h>
46#include <sys/endian.h>
47#include <sys/kernel.h>
48#include <sys/lock.h>
49#include <sys/malloc.h>
50#include <sys/mbuf.h>
51#include <sys/module.h>
52#include <sys/mutex.h>
53#include <sys/queue.h>
54#include <sys/rman.h>
55#include <sys/socket.h>
56#include <sys/sockio.h>
57#include <sys/sysctl.h>
58#include <sys/taskqueue.h>
59
60#include <net/bpf.h>
61#include <net/if.h>
62#include <net/if_var.h>
63#include <net/if_arp.h>
64#include <net/ethernet.h>
65#include <net/if_dl.h>
66#include <net/if_media.h>
67#include <net/if_types.h>
68#include <net/if_vlan_var.h>
69
70#include <netinet/in.h>
71#include <netinet/in_systm.h>
72#include <netinet/ip.h>
73
74#include <dev/mii/mii.h>
75
76#include <dev/pci/pcireg.h>
77#include <dev/pci/pcivar.h>
78
79#include <machine/bus.h>
80#include <machine/in_cksum.h>
81
82#include <dev/txp/if_txpreg.h>
83#include <dev/txp/3c990img.h>
84
85MODULE_DEPEND(txp, pci, 1, 1, 1);
86MODULE_DEPEND(txp, ether, 1, 1, 1);
87
88/*
89 * XXX Known Typhoon firmware issues.
90 *
91 * 1. It seems that firmware has Tx TCP/UDP checksum offloading bug.
92 *    The firmware hangs when it's told to compute TCP/UDP checksum.
93 *    I'm not sure whether the firmware requires special alignment to
94 *    do checksum offloading but datasheet says nothing about that.
95 * 2. Datasheet says nothing for maximum number of fragmented
96 *    descriptors supported. Experimentation shows up to 16 fragment
97 *    descriptors are supported in the firmware. For TSO case, upper
98 *    stack can send 64KB sized IP datagram plus link header size(
99 *    ethernet header + VLAN tag)  frame but controller can handle up
100 *    to 64KB frame given that PAGE_SIZE is 4KB(i.e. 16 * PAGE_SIZE).
101 *    Because frames that need TSO operation of hardware can be
102 *    larger than 64KB I disabled TSO capability. TSO operation for
103 *    less than or equal to 16 fragment descriptors works without
104 *    problems, though.
105 * 3. VLAN hardware tag stripping is always enabled in the firmware
106 *    even if it's explicitly told to not strip the tag. It's
107 *    possible to add the tag back in Rx handler if VLAN hardware
108 *    tag is not active but I didn't try that as it would be
109 *    layering violation.
110 * 4. TXP_CMD_RECV_BUFFER_CONTROL does not work as expected in
111 *    datasheet such that driver should handle the alignment
112 *    restriction by copying received frame to align the frame on
113 *    32bit boundary on strict-alignment architectures. This adds a
114 *    lot of CPU burden and it effectively reduce Rx performance on
115 *    strict-alignment architectures(e.g. sparc64, arm and mips).
116 *
117 * Unfortunately it seems that 3Com have no longer interests in
118 * releasing fixed firmware so we may have to live with these bugs.
119 */
120
121#define	TXP_CSUM_FEATURES	(CSUM_IP)
122
123/*
124 * Various supported device vendors/types and their names.
125 */
126static struct txp_type txp_devs[] = {
127	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_95,
128	    "3Com 3cR990-TX-95 Etherlink with 3XP Processor" },
129	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_97,
130	    "3Com 3cR990-TX-97 Etherlink with 3XP Processor" },
131	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_TXM,
132	    "3Com 3cR990B-TXM Etherlink with 3XP Processor" },
133	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_95,
134	    "3Com 3cR990-SRV-95 Etherlink Server with 3XP Processor" },
135	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_97,
136	    "3Com 3cR990-SRV-97 Etherlink Server with 3XP Processor" },
137	{ TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_SRV,
138	    "3Com 3cR990B-SRV Etherlink Server with 3XP Processor" },
139	{ 0, 0, NULL }
140};
141
142static int txp_probe(device_t);
143static int txp_attach(device_t);
144static int txp_detach(device_t);
145static int txp_shutdown(device_t);
146static int txp_suspend(device_t);
147static int txp_resume(device_t);
148static int txp_intr(void *);
149static void txp_int_task(void *, int);
150static void txp_tick(void *);
151static int txp_ioctl(struct ifnet *, u_long, caddr_t);
152static uint64_t txp_get_counter(struct ifnet *, ift_counter);
153static void txp_start(struct ifnet *);
154static void txp_start_locked(struct ifnet *);
155static int txp_encap(struct txp_softc *, struct txp_tx_ring *, struct mbuf **);
156static void txp_stop(struct txp_softc *);
157static void txp_init(void *);
158static void txp_init_locked(struct txp_softc *);
159static void txp_watchdog(struct txp_softc *);
160
161static int txp_reset(struct txp_softc *);
162static int txp_boot(struct txp_softc *, uint32_t);
163static int txp_sleep(struct txp_softc *, int);
164static int txp_wait(struct txp_softc *, uint32_t);
165static int txp_download_fw(struct txp_softc *);
166static int txp_download_fw_wait(struct txp_softc *);
167static int txp_download_fw_section(struct txp_softc *,
168    struct txp_fw_section_header *, int);
169static int txp_alloc_rings(struct txp_softc *);
170static void txp_init_rings(struct txp_softc *);
171static int txp_dma_alloc(struct txp_softc *, char *, bus_dma_tag_t *,
172    bus_size_t, bus_size_t, bus_dmamap_t *, void **, bus_size_t, bus_addr_t *);
173static void txp_dma_free(struct txp_softc *, bus_dma_tag_t *, bus_dmamap_t,
174    void **, bus_addr_t *);
175static void txp_free_rings(struct txp_softc *);
176static int txp_rxring_fill(struct txp_softc *);
177static void txp_rxring_empty(struct txp_softc *);
178static void txp_set_filter(struct txp_softc *);
179
180static int txp_cmd_desc_numfree(struct txp_softc *);
181static int txp_command(struct txp_softc *, uint16_t, uint16_t, uint32_t,
182    uint32_t, uint16_t *, uint32_t *, uint32_t *, int);
183static int txp_ext_command(struct txp_softc *, uint16_t, uint16_t,
184    uint32_t, uint32_t, struct txp_ext_desc *, uint8_t,
185    struct txp_rsp_desc **, int);
186static int txp_response(struct txp_softc *, uint16_t, uint16_t,
187    struct txp_rsp_desc **);
188static void txp_rsp_fixup(struct txp_softc *, struct txp_rsp_desc *,
189    struct txp_rsp_desc *);
190static int txp_set_capabilities(struct txp_softc *);
191
192static void txp_ifmedia_sts(struct ifnet *, struct ifmediareq *);
193static int txp_ifmedia_upd(struct ifnet *);
194#ifdef TXP_DEBUG
195static void txp_show_descriptor(void *);
196#endif
197static void txp_tx_reclaim(struct txp_softc *, struct txp_tx_ring *);
198static void txp_rxbuf_reclaim(struct txp_softc *);
199#ifndef __NO_STRICT_ALIGNMENT
200static __inline void txp_fixup_rx(struct mbuf *);
201#endif
202static int txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *, int);
203static void txp_stats_save(struct txp_softc *);
204static void txp_stats_update(struct txp_softc *, struct txp_rsp_desc *);
205static void txp_sysctl_node(struct txp_softc *);
206static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
207static int sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS);
208
209static int prefer_iomap = 0;
210TUNABLE_INT("hw.txp.prefer_iomap", &prefer_iomap);
211
212static device_method_t txp_methods[] = {
213        /* Device interface */
214	DEVMETHOD(device_probe,		txp_probe),
215	DEVMETHOD(device_attach,	txp_attach),
216	DEVMETHOD(device_detach,	txp_detach),
217	DEVMETHOD(device_shutdown,	txp_shutdown),
218	DEVMETHOD(device_suspend,	txp_suspend),
219	DEVMETHOD(device_resume,	txp_resume),
220
221	{ NULL, NULL }
222};
223
224static driver_t txp_driver = {
225	"txp",
226	txp_methods,
227	sizeof(struct txp_softc)
228};
229
230static devclass_t txp_devclass;
231
232DRIVER_MODULE(txp, pci, txp_driver, txp_devclass, 0, 0);
233
234static int
235txp_probe(device_t dev)
236{
237	struct txp_type *t;
238
239	t = txp_devs;
240
241	while (t->txp_name != NULL) {
242		if ((pci_get_vendor(dev) == t->txp_vid) &&
243		    (pci_get_device(dev) == t->txp_did)) {
244			device_set_desc(dev, t->txp_name);
245			return (BUS_PROBE_DEFAULT);
246		}
247		t++;
248	}
249
250	return (ENXIO);
251}
252
253static int
254txp_attach(device_t dev)
255{
256	struct txp_softc *sc;
257	struct ifnet *ifp;
258	struct txp_rsp_desc *rsp;
259	uint16_t p1;
260	uint32_t p2, reg;
261	int error = 0, pmc, rid;
262	uint8_t eaddr[ETHER_ADDR_LEN], *ver;
263
264	sc = device_get_softc(dev);
265	sc->sc_dev = dev;
266
267	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
268	    MTX_DEF);
269	callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
270	TASK_INIT(&sc->sc_int_task, 0, txp_int_task, sc);
271	TAILQ_INIT(&sc->sc_busy_list);
272	TAILQ_INIT(&sc->sc_free_list);
273
274	ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts);
275	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
276	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX, 0, NULL);
277	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
278	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
279	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX, 0, NULL);
280	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
281	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
282
283	pci_enable_busmaster(dev);
284	/* Prefer memory space register mapping over IO space. */
285	if (prefer_iomap == 0) {
286		sc->sc_res_id = PCIR_BAR(1);
287		sc->sc_res_type = SYS_RES_MEMORY;
288	} else {
289		sc->sc_res_id = PCIR_BAR(0);
290		sc->sc_res_type = SYS_RES_IOPORT;
291	}
292	sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
293	    &sc->sc_res_id, RF_ACTIVE);
294	if (sc->sc_res == NULL && prefer_iomap == 0) {
295		sc->sc_res_id = PCIR_BAR(0);
296		sc->sc_res_type = SYS_RES_IOPORT;
297		sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
298		    &sc->sc_res_id, RF_ACTIVE);
299	}
300	if (sc->sc_res == NULL) {
301		device_printf(dev, "couldn't map ports/memory\n");
302		ifmedia_removeall(&sc->sc_ifmedia);
303		mtx_destroy(&sc->sc_mtx);
304		return (ENXIO);
305	}
306
307	/* Enable MWI. */
308	reg = pci_read_config(dev, PCIR_COMMAND, 2);
309	reg |= PCIM_CMD_MWRICEN;
310	pci_write_config(dev, PCIR_COMMAND, reg, 2);
311	/* Check cache line size. */
312	reg = pci_read_config(dev, PCIR_CACHELNSZ, 1);
313	reg <<= 4;
314	if (reg == 0 || (reg % 16) != 0)
315		device_printf(sc->sc_dev,
316		    "invalid cache line size : %u\n", reg);
317
318	/* Allocate interrupt */
319	rid = 0;
320	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
321	    RF_SHAREABLE | RF_ACTIVE);
322
323	if (sc->sc_irq == NULL) {
324		device_printf(dev, "couldn't map interrupt\n");
325		error = ENXIO;
326		goto fail;
327	}
328
329	if ((error = txp_alloc_rings(sc)) != 0)
330		goto fail;
331	txp_init_rings(sc);
332	txp_sysctl_node(sc);
333	/* Reset controller and make it reload sleep image. */
334	if (txp_reset(sc) != 0) {
335		error = ENXIO;
336		goto fail;
337	}
338
339	/* Let controller boot from sleep image. */
340	if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) {
341		device_printf(sc->sc_dev, "could not boot sleep image\n");
342		error = ENXIO;
343		goto fail;
344	}
345
346	/* Get station address. */
347	if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0,
348	    &p1, &p2, NULL, TXP_CMD_WAIT)) {
349		error = ENXIO;
350		goto fail;
351	}
352
353	p1 = le16toh(p1);
354	eaddr[0] = ((uint8_t *)&p1)[1];
355	eaddr[1] = ((uint8_t *)&p1)[0];
356	p2 = le32toh(p2);
357	eaddr[2] = ((uint8_t *)&p2)[3];
358	eaddr[3] = ((uint8_t *)&p2)[2];
359	eaddr[4] = ((uint8_t *)&p2)[1];
360	eaddr[5] = ((uint8_t *)&p2)[0];
361
362	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
363	if (ifp == NULL) {
364		device_printf(dev, "can not allocate ifnet structure\n");
365		error = ENOSPC;
366		goto fail;
367	}
368
369	/*
370	 * Show sleep image version information which may help to
371	 * diagnose sleep image specific issues.
372	 */
373	rsp = NULL;
374	if (txp_ext_command(sc, TXP_CMD_VERSIONS_READ, 0, 0, 0, NULL, 0,
375	    &rsp, TXP_CMD_WAIT)) {
376		device_printf(dev, "can not read sleep image version\n");
377		error = ENXIO;
378		goto fail;
379	}
380	if (rsp->rsp_numdesc == 0) {
381		p2 = le32toh(rsp->rsp_par2) & 0xFFFF;
382		device_printf(dev, "Typhoon 1.0 sleep image (2000/%02u/%02u)\n",
383		    p2 >> 8, p2 & 0xFF);
384	} else if (rsp->rsp_numdesc == 2) {
385		p2 = le32toh(rsp->rsp_par2);
386		ver = (uint8_t *)(rsp + 1);
387		/*
388		 * Even if datasheet says the command returns a NULL
389		 * terminated version string, explicitly terminate
390		 * the string. Given that several bugs of firmware
391		 * I can't trust this simple one.
392		 */
393		ver[25] = '\0';
394		device_printf(dev,
395		    "Typhoon 1.1+ sleep image %02u.%03u.%03u %s\n",
396		    p2 >> 24, (p2 >> 12) & 0xFFF, p2 & 0xFFF, ver);
397	} else {
398		p2 = le32toh(rsp->rsp_par2);
399		device_printf(dev,
400		    "Unknown Typhoon sleep image version: %u:0x%08x\n",
401		    rsp->rsp_numdesc, p2);
402	}
403	if (rsp != NULL)
404		free(rsp, M_DEVBUF);
405
406	sc->sc_xcvr = TXP_XCVR_AUTO;
407	txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0,
408	    NULL, NULL, NULL, TXP_CMD_NOWAIT);
409	ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO);
410
411	ifp->if_softc = sc;
412	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
413	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
414	ifp->if_ioctl = txp_ioctl;
415	ifp->if_start = txp_start;
416	ifp->if_init = txp_init;
417	ifp->if_get_counter = txp_get_counter;
418	ifp->if_snd.ifq_drv_maxlen = TX_ENTRIES - 1;
419	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
420	IFQ_SET_READY(&ifp->if_snd);
421	/*
422	 * It's possible to read firmware's offload capability but
423	 * we have not downloaded the firmware yet so announce
424	 * working capability here. We're not interested in IPSec
425	 * capability and due to the lots of firmware bug we can't
426	 * advertise the whole capability anyway.
427	 */
428	ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TXCSUM;
429	if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
430		ifp->if_capabilities |= IFCAP_WOL_MAGIC;
431	/* Enable all capabilities. */
432	ifp->if_capenable = ifp->if_capabilities;
433
434	ether_ifattach(ifp, eaddr);
435
436	/* VLAN capability setup. */
437	ifp->if_capabilities |= IFCAP_VLAN_MTU;
438	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
439	ifp->if_capenable = ifp->if_capabilities;
440	/* Tell the upper layer(s) we support long frames. */
441	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
442
443	WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
444	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
445
446	/* Create local taskq. */
447	sc->sc_tq = taskqueue_create_fast("txp_taskq", M_WAITOK,
448	    taskqueue_thread_enqueue, &sc->sc_tq);
449	if (sc->sc_tq == NULL) {
450		device_printf(dev, "could not create taskqueue.\n");
451		ether_ifdetach(ifp);
452		error = ENXIO;
453		goto fail;
454	}
455	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
456	    device_get_nameunit(sc->sc_dev));
457
458	/* Put controller into sleep. */
459	if (txp_sleep(sc, 0) != 0) {
460		ether_ifdetach(ifp);
461		error = ENXIO;
462		goto fail;
463	}
464
465	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
466	    txp_intr, NULL, sc, &sc->sc_intrhand);
467
468	if (error != 0) {
469		ether_ifdetach(ifp);
470		device_printf(dev, "couldn't set up interrupt handler.\n");
471		goto fail;
472	}
473
474	gone_by_fcp101_dev(dev);
475
476	return (0);
477
478fail:
479	if (error != 0)
480		txp_detach(dev);
481	return (error);
482}
483
484static int
485txp_detach(device_t dev)
486{
487	struct txp_softc *sc;
488	struct ifnet *ifp;
489
490	sc = device_get_softc(dev);
491
492	ifp = sc->sc_ifp;
493	if (device_is_attached(dev)) {
494		TXP_LOCK(sc);
495		sc->sc_flags |= TXP_FLAG_DETACH;
496		txp_stop(sc);
497		TXP_UNLOCK(sc);
498		callout_drain(&sc->sc_tick);
499		taskqueue_drain(sc->sc_tq, &sc->sc_int_task);
500		ether_ifdetach(ifp);
501	}
502	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
503
504	ifmedia_removeall(&sc->sc_ifmedia);
505	if (sc->sc_intrhand != NULL)
506		bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
507	if (sc->sc_irq != NULL)
508		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
509	if (sc->sc_res != NULL)
510		bus_release_resource(dev, sc->sc_res_type, sc->sc_res_id,
511		    sc->sc_res);
512	if (sc->sc_ifp != NULL) {
513		if_free(sc->sc_ifp);
514		sc->sc_ifp = NULL;
515	}
516	txp_free_rings(sc);
517	mtx_destroy(&sc->sc_mtx);
518
519	return (0);
520}
521
522static int
523txp_reset(struct txp_softc *sc)
524{
525	uint32_t r;
526	int i;
527
528	/* Disable interrupts. */
529	WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
530	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
531	/* Ack all pending interrupts. */
532	WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
533
534	r = 0;
535	WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL);
536	DELAY(1000);
537	WRITE_REG(sc, TXP_SRR, 0);
538
539	/* Should wait max 6 seconds. */
540	for (i = 0; i < 6000; i++) {
541		r = READ_REG(sc, TXP_A2H_0);
542		if (r == STAT_WAITING_FOR_HOST_REQUEST)
543			break;
544		DELAY(1000);
545	}
546
547	if (r != STAT_WAITING_FOR_HOST_REQUEST)
548		device_printf(sc->sc_dev, "reset hung\n");
549
550	WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
551	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
552	WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
553
554	/*
555	 * Give more time to complete loading sleep image before
556	 * trying to boot from sleep image.
557	 */
558	DELAY(5000);
559
560	return (0);
561}
562
563static int
564txp_boot(struct txp_softc *sc, uint32_t state)
565{
566
567	/* See if it's waiting for boot, and try to boot it. */
568	if (txp_wait(sc, state) != 0) {
569		device_printf(sc->sc_dev, "not waiting for boot\n");
570		return (ENXIO);
571	}
572
573	WRITE_REG(sc, TXP_H2A_2, TXP_ADDR_HI(sc->sc_ldata.txp_boot_paddr));
574	TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
575	WRITE_REG(sc, TXP_H2A_1, TXP_ADDR_LO(sc->sc_ldata.txp_boot_paddr));
576	TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
577	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD);
578	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
579
580	/* See if it booted. */
581	if (txp_wait(sc, STAT_RUNNING) != 0) {
582		device_printf(sc->sc_dev, "firmware not running\n");
583		return (ENXIO);
584	}
585
586	/* Clear TX and CMD ring write registers. */
587	WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL);
588	TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
589	WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL);
590	TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
591	WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL);
592	TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
593	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL);
594	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
595
596	return (0);
597}
598
599static int
600txp_download_fw(struct txp_softc *sc)
601{
602	struct txp_fw_file_header *fileheader;
603	struct txp_fw_section_header *secthead;
604	int sect;
605	uint32_t error, ier, imr;
606
607	TXP_LOCK_ASSERT(sc);
608
609	error = 0;
610	ier = READ_REG(sc, TXP_IER);
611	WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0);
612
613	imr = READ_REG(sc, TXP_IMR);
614	WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0);
615
616	if (txp_wait(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) {
617		device_printf(sc->sc_dev, "not waiting for host request\n");
618		error = ETIMEDOUT;
619		goto fail;
620	}
621
622	/* Ack the status. */
623	WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
624
625	fileheader = (struct txp_fw_file_header *)tc990image;
626	if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) {
627		device_printf(sc->sc_dev, "firmware invalid magic\n");
628		goto fail;
629	}
630
631	/* Tell boot firmware to get ready for image. */
632	WRITE_REG(sc, TXP_H2A_1, le32toh(fileheader->addr));
633	TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
634	WRITE_REG(sc, TXP_H2A_2, le32toh(fileheader->hmac[0]));
635	TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
636	WRITE_REG(sc, TXP_H2A_3, le32toh(fileheader->hmac[1]));
637	TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
638	WRITE_REG(sc, TXP_H2A_4, le32toh(fileheader->hmac[2]));
639	TXP_BARRIER(sc, TXP_H2A_4, 4, BUS_SPACE_BARRIER_WRITE);
640	WRITE_REG(sc, TXP_H2A_5, le32toh(fileheader->hmac[3]));
641	TXP_BARRIER(sc, TXP_H2A_5, 4, BUS_SPACE_BARRIER_WRITE);
642	WRITE_REG(sc, TXP_H2A_6, le32toh(fileheader->hmac[4]));
643	TXP_BARRIER(sc, TXP_H2A_6, 4, BUS_SPACE_BARRIER_WRITE);
644	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE);
645	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
646
647	if (txp_download_fw_wait(sc)) {
648		device_printf(sc->sc_dev, "firmware wait failed, initial\n");
649		error = ETIMEDOUT;
650		goto fail;
651	}
652
653	secthead = (struct txp_fw_section_header *)(((uint8_t *)tc990image) +
654	    sizeof(struct txp_fw_file_header));
655
656	for (sect = 0; sect < le32toh(fileheader->nsections); sect++) {
657		if ((error = txp_download_fw_section(sc, secthead, sect)) != 0)
658			goto fail;
659		secthead = (struct txp_fw_section_header *)
660		    (((uint8_t *)secthead) + le32toh(secthead->nbytes) +
661		    sizeof(*secthead));
662	}
663
664	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE);
665	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
666
667	if (txp_wait(sc, STAT_WAITING_FOR_BOOT) != 0) {
668		device_printf(sc->sc_dev, "not waiting for boot\n");
669		error = ETIMEDOUT;
670		goto fail;
671	}
672
673fail:
674	WRITE_REG(sc, TXP_IER, ier);
675	WRITE_REG(sc, TXP_IMR, imr);
676
677	return (error);
678}
679
680static int
681txp_download_fw_wait(struct txp_softc *sc)
682{
683	uint32_t i;
684
685	TXP_LOCK_ASSERT(sc);
686
687	for (i = 0; i < TXP_TIMEOUT; i++) {
688		if ((READ_REG(sc, TXP_ISR) & TXP_INT_A2H_0) != 0)
689			break;
690		DELAY(50);
691	}
692
693	if (i == TXP_TIMEOUT) {
694		device_printf(sc->sc_dev, "firmware wait failed comm0\n");
695		return (ETIMEDOUT);
696	}
697
698	WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
699
700	if (READ_REG(sc, TXP_A2H_0) != STAT_WAITING_FOR_SEGMENT) {
701		device_printf(sc->sc_dev, "firmware not waiting for segment\n");
702		return (ETIMEDOUT);
703	}
704	return (0);
705}
706
707static int
708txp_download_fw_section(struct txp_softc *sc,
709    struct txp_fw_section_header *sect, int sectnum)
710{
711	bus_dma_tag_t sec_tag;
712	bus_dmamap_t sec_map;
713	bus_addr_t sec_paddr;
714	uint8_t *sec_buf;
715	int rseg, err = 0;
716	struct mbuf m;
717	uint16_t csum;
718
719	TXP_LOCK_ASSERT(sc);
720
721	/* Skip zero length sections. */
722	if (le32toh(sect->nbytes) == 0)
723		return (0);
724
725	/* Make sure we aren't past the end of the image. */
726	rseg = ((uint8_t *)sect) - ((uint8_t *)tc990image);
727	if (rseg >= sizeof(tc990image)) {
728		device_printf(sc->sc_dev,
729		    "firmware invalid section address, section %d\n", sectnum);
730		return (EIO);
731	}
732
733	/* Make sure this section doesn't go past the end. */
734	rseg += le32toh(sect->nbytes);
735	if (rseg >= sizeof(tc990image)) {
736		device_printf(sc->sc_dev, "firmware truncated section %d\n",
737		    sectnum);
738		return (EIO);
739	}
740
741	sec_tag = NULL;
742	sec_map = NULL;
743	sec_buf = NULL;
744	/* XXX */
745	TXP_UNLOCK(sc);
746	err = txp_dma_alloc(sc, "firmware sections", &sec_tag, sizeof(uint32_t),
747	    0, &sec_map, (void **)&sec_buf, le32toh(sect->nbytes), &sec_paddr);
748	TXP_LOCK(sc);
749	if (err != 0)
750		goto bail;
751	bcopy(((uint8_t *)sect) + sizeof(*sect), sec_buf,
752	    le32toh(sect->nbytes));
753
754	/*
755	 * dummy up mbuf and verify section checksum
756	 */
757	m.m_type = MT_DATA;
758	m.m_next = m.m_nextpkt = NULL;
759	m.m_len = le32toh(sect->nbytes);
760	m.m_data = sec_buf;
761	m.m_flags = 0;
762	csum = in_cksum(&m, le32toh(sect->nbytes));
763	if (csum != sect->cksum) {
764		device_printf(sc->sc_dev,
765		    "firmware section %d, bad cksum (expected 0x%x got 0x%x)\n",
766		    sectnum, le16toh(sect->cksum), csum);
767		err = EIO;
768		goto bail;
769	}
770
771	bus_dmamap_sync(sec_tag, sec_map, BUS_DMASYNC_PREWRITE);
772
773	WRITE_REG(sc, TXP_H2A_1, le32toh(sect->nbytes));
774	TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
775	WRITE_REG(sc, TXP_H2A_2, le16toh(sect->cksum));
776	TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
777	WRITE_REG(sc, TXP_H2A_3, le32toh(sect->addr));
778	TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
779	WRITE_REG(sc, TXP_H2A_4, TXP_ADDR_HI(sec_paddr));
780	TXP_BARRIER(sc, TXP_H2A_4, 4, BUS_SPACE_BARRIER_WRITE);
781	WRITE_REG(sc, TXP_H2A_5, TXP_ADDR_LO(sec_paddr));
782	TXP_BARRIER(sc, TXP_H2A_5, 4, BUS_SPACE_BARRIER_WRITE);
783	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE);
784	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
785
786	if (txp_download_fw_wait(sc)) {
787		device_printf(sc->sc_dev,
788		    "firmware wait failed, section %d\n", sectnum);
789		err = ETIMEDOUT;
790	}
791
792	bus_dmamap_sync(sec_tag, sec_map, BUS_DMASYNC_POSTWRITE);
793bail:
794	txp_dma_free(sc, &sec_tag, sec_map, (void **)&sec_buf, &sec_paddr);
795	return (err);
796}
797
798static int
799txp_intr(void *vsc)
800{
801	struct txp_softc *sc;
802	uint32_t status;
803
804	sc = vsc;
805	status = READ_REG(sc, TXP_ISR);
806	if ((status & TXP_INT_LATCH) == 0)
807		return (FILTER_STRAY);
808	WRITE_REG(sc, TXP_ISR, status);
809	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
810	taskqueue_enqueue(sc->sc_tq, &sc->sc_int_task);
811
812	return (FILTER_HANDLED);
813}
814
815static void
816txp_int_task(void *arg, int pending)
817{
818	struct txp_softc *sc;
819	struct ifnet *ifp;
820	struct txp_hostvar *hv;
821	uint32_t isr;
822	int more;
823
824	sc = (struct txp_softc *)arg;
825
826	TXP_LOCK(sc);
827	ifp = sc->sc_ifp;
828	hv = sc->sc_hostvar;
829	isr = READ_REG(sc, TXP_ISR);
830	if ((isr & TXP_INT_LATCH) != 0)
831		WRITE_REG(sc, TXP_ISR, isr);
832
833	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
834		bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
835		    sc->sc_cdata.txp_hostvar_map,
836		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
837		more = 0;
838		if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff))
839			more += txp_rx_reclaim(sc, &sc->sc_rxhir,
840			    sc->sc_process_limit);
841		if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff))
842			more += txp_rx_reclaim(sc, &sc->sc_rxlor,
843			    sc->sc_process_limit);
844		/*
845		 * XXX
846		 * It seems controller is not smart enough to handle
847		 * FIFO overflow conditions under heavy network load.
848		 * No matter how often new Rx buffers are passed to
849		 * controller the situation didn't change. Maybe
850		 * flow-control would be the only way to mitigate the
851		 * issue but firmware does not have commands that
852		 * control the threshold of emitting pause frames.
853		 */
854		if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx)
855			txp_rxbuf_reclaim(sc);
856		if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
857		    TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
858			txp_tx_reclaim(sc, &sc->sc_txhir);
859		if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
860		    TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
861			txp_tx_reclaim(sc, &sc->sc_txlor);
862		bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
863		    sc->sc_cdata.txp_hostvar_map,
864		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
865		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
866			txp_start_locked(sc->sc_ifp);
867		if (more != 0 || READ_REG(sc, TXP_ISR & TXP_INT_LATCH) != 0) {
868			taskqueue_enqueue(sc->sc_tq, &sc->sc_int_task);
869			TXP_UNLOCK(sc);
870			return;
871		}
872	}
873
874	/* Re-enable interrupts. */
875	WRITE_REG(sc, TXP_IMR, TXP_INTR_NONE);
876	TXP_UNLOCK(sc);
877}
878
879#ifndef __NO_STRICT_ALIGNMENT
880static __inline void
881txp_fixup_rx(struct mbuf *m)
882{
883	int i;
884	uint16_t *src, *dst;
885
886	src = mtod(m, uint16_t *);
887	dst = src - (TXP_RXBUF_ALIGN - ETHER_ALIGN) / sizeof *src;
888
889	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
890		*dst++ = *src++;
891
892	m->m_data -= TXP_RXBUF_ALIGN - ETHER_ALIGN;
893}
894#endif
895
896static int
897txp_rx_reclaim(struct txp_softc *sc, struct txp_rx_ring *r, int count)
898{
899	struct ifnet *ifp;
900	struct txp_rx_desc *rxd;
901	struct mbuf *m;
902	struct txp_rx_swdesc *sd;
903	uint32_t roff, woff, rx_stat, prog;
904
905	TXP_LOCK_ASSERT(sc);
906
907	ifp = sc->sc_ifp;
908
909	bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_POSTREAD |
910	    BUS_DMASYNC_POSTWRITE);
911
912	roff = le32toh(*r->r_roff);
913	woff = le32toh(*r->r_woff);
914	rxd = r->r_desc + roff / sizeof(struct txp_rx_desc);
915	for (prog = 0; roff != woff; prog++, count--) {
916		if (count <= 0)
917			break;
918		bcopy((u_long *)&rxd->rx_vaddrlo, &sd, sizeof(sd));
919		KASSERT(sd != NULL, ("%s: Rx desc ring corrupted", __func__));
920		bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
921		    BUS_DMASYNC_POSTREAD);
922		bus_dmamap_unload(sc->sc_cdata.txp_rx_tag, sd->sd_map);
923		m = sd->sd_mbuf;
924		KASSERT(m != NULL, ("%s: Rx buffer ring corrupted", __func__));
925		sd->sd_mbuf = NULL;
926		TAILQ_REMOVE(&sc->sc_busy_list, sd, sd_next);
927		TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
928		if ((rxd->rx_flags & RX_FLAGS_ERROR) != 0) {
929			if (bootverbose)
930				device_printf(sc->sc_dev, "Rx error %u\n",
931				    le32toh(rxd->rx_stat) & RX_ERROR_MASK);
932			m_freem(m);
933			goto next;
934		}
935
936		m->m_pkthdr.len = m->m_len = le16toh(rxd->rx_len);
937		m->m_pkthdr.rcvif = ifp;
938#ifndef __NO_STRICT_ALIGNMENT
939		txp_fixup_rx(m);
940#endif
941		rx_stat = le32toh(rxd->rx_stat);
942		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
943			if ((rx_stat & RX_STAT_IPCKSUMBAD) != 0)
944				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
945			else if ((rx_stat & RX_STAT_IPCKSUMGOOD) != 0)
946				m->m_pkthdr.csum_flags |=
947				    CSUM_IP_CHECKED|CSUM_IP_VALID;
948
949			if ((rx_stat & RX_STAT_TCPCKSUMGOOD) != 0 ||
950			    (rx_stat & RX_STAT_UDPCKSUMGOOD) != 0) {
951				m->m_pkthdr.csum_flags |=
952				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
953				m->m_pkthdr.csum_data = 0xffff;
954			}
955		}
956
957		/*
958		 * XXX
959		 * Typhoon has a firmware bug that VLAN tag is always
960		 * stripped out even if it is told to not remove the tag.
961		 * Therefore don't check if_capenable here.
962		 */
963		if (/* (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && */
964		    (rx_stat & RX_STAT_VLAN) != 0) {
965			m->m_pkthdr.ether_vtag =
966			    bswap16((le32toh(rxd->rx_vlan) >> 16));
967			m->m_flags |= M_VLANTAG;
968		}
969
970		TXP_UNLOCK(sc);
971		(*ifp->if_input)(ifp, m);
972		TXP_LOCK(sc);
973
974next:
975		roff += sizeof(struct txp_rx_desc);
976		if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) {
977			roff = 0;
978			rxd = r->r_desc;
979		} else
980			rxd++;
981		prog++;
982	}
983
984	if (prog == 0)
985		return (0);
986
987	bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_PREREAD |
988	    BUS_DMASYNC_PREWRITE);
989	*r->r_roff = le32toh(roff);
990
991	return (count > 0 ? 0 : EAGAIN);
992}
993
994static void
995txp_rxbuf_reclaim(struct txp_softc *sc)
996{
997	struct txp_hostvar *hv;
998	struct txp_rxbuf_desc *rbd;
999	struct txp_rx_swdesc *sd;
1000	bus_dma_segment_t segs[1];
1001	int nsegs, prod, prog;
1002	uint32_t cons;
1003
1004	TXP_LOCK_ASSERT(sc);
1005
1006	hv = sc->sc_hostvar;
1007	cons = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_read_idx));
1008	prod = sc->sc_rxbufprod;
1009	TXP_DESC_INC(prod, RXBUF_ENTRIES);
1010	if (prod == cons)
1011		return;
1012
1013	bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1014	    sc->sc_cdata.txp_rxbufs_map,
1015	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1016
1017	for (prog = 0; prod != cons; prog++) {
1018		sd = TAILQ_FIRST(&sc->sc_free_list);
1019		if (sd == NULL)
1020			break;
1021		rbd = sc->sc_rxbufs + prod;
1022		bcopy((u_long *)&rbd->rb_vaddrlo, &sd, sizeof(sd));
1023		sd->sd_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1024		if (sd->sd_mbuf == NULL)
1025			break;
1026		sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
1027#ifndef __NO_STRICT_ALIGNMENT
1028		m_adj(sd->sd_mbuf, TXP_RXBUF_ALIGN);
1029#endif
1030		if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_rx_tag,
1031		    sd->sd_map, sd->sd_mbuf, segs, &nsegs, 0) != 0) {
1032			m_freem(sd->sd_mbuf);
1033			sd->sd_mbuf = NULL;
1034			break;
1035		}
1036		KASSERT(nsegs == 1, ("%s : %d segments returned!", __func__,
1037		    nsegs));
1038		TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1039		TAILQ_INSERT_TAIL(&sc->sc_busy_list, sd, sd_next);
1040		bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1041		    BUS_DMASYNC_PREREAD);
1042		rbd->rb_paddrlo = htole32(TXP_ADDR_LO(segs[0].ds_addr));
1043		rbd->rb_paddrhi = htole32(TXP_ADDR_HI(segs[0].ds_addr));
1044		TXP_DESC_INC(prod, RXBUF_ENTRIES);
1045	}
1046
1047	if (prog == 0)
1048		return;
1049	bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1050	    sc->sc_cdata.txp_rxbufs_map,
1051	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1052	prod = (prod + RXBUF_ENTRIES - 1) % RXBUF_ENTRIES;
1053	sc->sc_rxbufprod = prod;
1054	hv->hv_rx_buf_write_idx = htole32(TXP_IDX2OFFSET(prod));
1055}
1056
1057/*
1058 * Reclaim mbufs and entries from a transmit ring.
1059 */
1060static void
1061txp_tx_reclaim(struct txp_softc *sc, struct txp_tx_ring *r)
1062{
1063	struct ifnet *ifp;
1064	uint32_t idx;
1065	uint32_t cons, cnt;
1066	struct txp_tx_desc *txd;
1067	struct txp_swdesc *sd;
1068
1069	TXP_LOCK_ASSERT(sc);
1070
1071	bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_POSTREAD |
1072	    BUS_DMASYNC_POSTWRITE);
1073	ifp = sc->sc_ifp;
1074	idx = TXP_OFFSET2IDX(le32toh(*(r->r_off)));
1075	cons = r->r_cons;
1076	cnt = r->r_cnt;
1077	txd = r->r_desc + cons;
1078	sd = sc->sc_txd + cons;
1079
1080	for (cnt = r->r_cnt; cons != idx && cnt > 0; cnt--) {
1081		if ((txd->tx_flags & TX_FLAGS_TYPE_M) == TX_FLAGS_TYPE_DATA) {
1082			if (sd->sd_mbuf != NULL) {
1083				bus_dmamap_sync(sc->sc_cdata.txp_tx_tag,
1084				    sd->sd_map, BUS_DMASYNC_POSTWRITE);
1085				bus_dmamap_unload(sc->sc_cdata.txp_tx_tag,
1086				    sd->sd_map);
1087				m_freem(sd->sd_mbuf);
1088				sd->sd_mbuf = NULL;
1089				txd->tx_addrlo = 0;
1090				txd->tx_addrhi = 0;
1091				txd->tx_flags = 0;
1092			}
1093		}
1094		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1095
1096		if (++cons == TX_ENTRIES) {
1097			txd = r->r_desc;
1098			cons = 0;
1099			sd = sc->sc_txd;
1100		} else {
1101			txd++;
1102			sd++;
1103		}
1104	}
1105
1106	bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_PREREAD |
1107	    BUS_DMASYNC_PREWRITE);
1108	r->r_cons = cons;
1109	r->r_cnt = cnt;
1110	if (cnt == 0)
1111		sc->sc_watchdog_timer = 0;
1112}
1113
1114static int
1115txp_shutdown(device_t dev)
1116{
1117
1118	return (txp_suspend(dev));
1119}
1120
1121static int
1122txp_suspend(device_t dev)
1123{
1124	struct txp_softc *sc;
1125	struct ifnet *ifp;
1126	uint8_t *eaddr;
1127	uint16_t p1;
1128	uint32_t p2;
1129	int pmc;
1130	uint16_t pmstat;
1131
1132	sc = device_get_softc(dev);
1133
1134	TXP_LOCK(sc);
1135	ifp = sc->sc_ifp;
1136	txp_stop(sc);
1137	txp_init_rings(sc);
1138	/* Reset controller and make it reload sleep image. */
1139	txp_reset(sc);
1140	/* Let controller boot from sleep image. */
1141	if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0)
1142		device_printf(sc->sc_dev, "couldn't boot sleep image\n");
1143
1144	/* Set station address. */
1145	eaddr = IF_LLADDR(sc->sc_ifp);
1146	p1 = 0;
1147	((uint8_t *)&p1)[1] = eaddr[0];
1148	((uint8_t *)&p1)[0] = eaddr[1];
1149	p1 = le16toh(p1);
1150	((uint8_t *)&p2)[3] = eaddr[2];
1151	((uint8_t *)&p2)[2] = eaddr[3];
1152	((uint8_t *)&p2)[1] = eaddr[4];
1153	((uint8_t *)&p2)[0] = eaddr[5];
1154	p2 = le32toh(p2);
1155	txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0, NULL, NULL,
1156	    NULL, TXP_CMD_WAIT);
1157	txp_set_filter(sc);
1158	WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
1159	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
1160	txp_sleep(sc, sc->sc_ifp->if_capenable);
1161	if (pci_find_cap(sc->sc_dev, PCIY_PMG, &pmc) == 0) {
1162		/* Request PME. */
1163		pmstat = pci_read_config(sc->sc_dev,
1164		    pmc + PCIR_POWER_STATUS, 2);
1165		pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1166		if ((ifp->if_capenable & IFCAP_WOL) != 0)
1167			pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1168		pci_write_config(sc->sc_dev,
1169		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1170	}
1171	TXP_UNLOCK(sc);
1172
1173	return (0);
1174}
1175
1176static int
1177txp_resume(device_t dev)
1178{
1179	struct txp_softc *sc;
1180	int pmc;
1181	uint16_t pmstat;
1182
1183	sc = device_get_softc(dev);
1184
1185	TXP_LOCK(sc);
1186	if (pci_find_cap(sc->sc_dev, PCIY_PMG, &pmc) == 0) {
1187		/* Disable PME and clear PME status. */
1188		pmstat = pci_read_config(sc->sc_dev,
1189		    pmc + PCIR_POWER_STATUS, 2);
1190		if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
1191			pmstat &= ~PCIM_PSTAT_PMEENABLE;
1192			pci_write_config(sc->sc_dev,
1193			    pmc + PCIR_POWER_STATUS, pmstat, 2);
1194		}
1195	}
1196	if ((sc->sc_ifp->if_flags & IFF_UP) != 0)
1197		txp_init_locked(sc);
1198	TXP_UNLOCK(sc);
1199
1200	return (0);
1201}
1202
1203struct txp_dmamap_arg {
1204	bus_addr_t	txp_busaddr;
1205};
1206
1207static void
1208txp_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1209{
1210	struct txp_dmamap_arg *ctx;
1211
1212	if (error != 0)
1213		return;
1214
1215	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1216
1217	ctx = (struct txp_dmamap_arg *)arg;
1218	ctx->txp_busaddr = segs[0].ds_addr;
1219}
1220
1221static int
1222txp_dma_alloc(struct txp_softc *sc, char *type, bus_dma_tag_t *tag,
1223    bus_size_t alignment, bus_size_t boundary, bus_dmamap_t *map, void **buf,
1224    bus_size_t size, bus_addr_t *paddr)
1225{
1226	struct txp_dmamap_arg ctx;
1227	int error;
1228
1229	/* Create DMA block tag. */
1230	error = bus_dma_tag_create(
1231	    sc->sc_cdata.txp_parent_tag,	/* parent */
1232	    alignment, boundary,	/* algnmnt, boundary */
1233	    BUS_SPACE_MAXADDR,		/* lowaddr */
1234	    BUS_SPACE_MAXADDR,		/* highaddr */
1235	    NULL, NULL,			/* filter, filterarg */
1236	    size,			/* maxsize */
1237	    1,				/* nsegments */
1238	    size,			/* maxsegsize */
1239	    0,				/* flags */
1240	    NULL, NULL,			/* lockfunc, lockarg */
1241	    tag);
1242	if (error != 0) {
1243		device_printf(sc->sc_dev,
1244		    "could not create DMA tag for %s.\n", type);
1245		return (error);
1246	}
1247
1248	*paddr = 0;
1249	/* Allocate DMA'able memory and load the DMA map. */
1250	error = bus_dmamem_alloc(*tag, buf, BUS_DMA_WAITOK | BUS_DMA_ZERO |
1251	    BUS_DMA_COHERENT, map);
1252	if (error != 0) {
1253		device_printf(sc->sc_dev,
1254		    "could not allocate DMA'able memory for %s.\n", type);
1255		return (error);
1256	}
1257
1258	ctx.txp_busaddr = 0;
1259	error = bus_dmamap_load(*tag, *map, *(uint8_t **)buf,
1260	    size, txp_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1261	if (error != 0 || ctx.txp_busaddr == 0) {
1262		device_printf(sc->sc_dev,
1263		    "could not load DMA'able memory for %s.\n", type);
1264		return (error);
1265	}
1266	*paddr = ctx.txp_busaddr;
1267
1268	return (0);
1269}
1270
1271static void
1272txp_dma_free(struct txp_softc *sc, bus_dma_tag_t *tag, bus_dmamap_t map,
1273    void **buf, bus_addr_t *paddr)
1274{
1275
1276	if (*tag != NULL) {
1277		if (*paddr != 0)
1278			bus_dmamap_unload(*tag, map);
1279		if (buf != NULL)
1280			bus_dmamem_free(*tag, *(uint8_t **)buf, map);
1281		*(uint8_t **)buf = NULL;
1282		*paddr = 0;
1283		bus_dma_tag_destroy(*tag);
1284		*tag = NULL;
1285	}
1286}
1287
1288static int
1289txp_alloc_rings(struct txp_softc *sc)
1290{
1291	struct txp_boot_record *boot;
1292	struct txp_ldata *ld;
1293	struct txp_swdesc *txd;
1294	struct txp_rxbuf_desc *rbd;
1295	struct txp_rx_swdesc *sd;
1296	int error, i;
1297
1298	ld = &sc->sc_ldata;
1299	boot = ld->txp_boot;
1300
1301	/* boot record */
1302	sc->sc_boot = boot;
1303
1304	/*
1305	 * Create parent ring/DMA block tag.
1306	 * Datasheet says that all ring addresses and descriptors
1307	 * support 64bits addressing. However the controller is
1308	 * known to have no support DAC so limit DMA address space
1309	 * to 32bits.
1310	 */
1311	error = bus_dma_tag_create(
1312	    bus_get_dma_tag(sc->sc_dev), /* parent */
1313	    1, 0,			/* algnmnt, boundary */
1314	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1315	    BUS_SPACE_MAXADDR,		/* highaddr */
1316	    NULL, NULL,			/* filter, filterarg */
1317	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1318	    0,				/* nsegments */
1319	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1320	    0,				/* flags */
1321	    NULL, NULL,			/* lockfunc, lockarg */
1322	    &sc->sc_cdata.txp_parent_tag);
1323	if (error != 0) {
1324		device_printf(sc->sc_dev, "could not create parent DMA tag.\n");
1325		return (error);
1326	}
1327
1328	/* Boot record. */
1329	error = txp_dma_alloc(sc, "boot record",
1330	    &sc->sc_cdata.txp_boot_tag, sizeof(uint32_t), 0,
1331	    &sc->sc_cdata.txp_boot_map, (void **)&sc->sc_ldata.txp_boot,
1332	    sizeof(struct txp_boot_record),
1333	    &sc->sc_ldata.txp_boot_paddr);
1334	if (error != 0)
1335		return (error);
1336	boot = sc->sc_ldata.txp_boot;
1337	sc->sc_boot = boot;
1338
1339	/* Host variables. */
1340	error = txp_dma_alloc(sc, "host variables",
1341	    &sc->sc_cdata.txp_hostvar_tag, sizeof(uint32_t), 0,
1342	    &sc->sc_cdata.txp_hostvar_map, (void **)&sc->sc_ldata.txp_hostvar,
1343	    sizeof(struct txp_hostvar),
1344	    &sc->sc_ldata.txp_hostvar_paddr);
1345	if (error != 0)
1346		return (error);
1347	boot->br_hostvar_lo =
1348	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_hostvar_paddr));
1349	boot->br_hostvar_hi =
1350	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_hostvar_paddr));
1351	sc->sc_hostvar = sc->sc_ldata.txp_hostvar;
1352
1353	/* Hi priority tx ring. */
1354	error = txp_dma_alloc(sc, "hi priority tx ring",
1355	    &sc->sc_cdata.txp_txhiring_tag, sizeof(struct txp_tx_desc), 0,
1356	    &sc->sc_cdata.txp_txhiring_map, (void **)&sc->sc_ldata.txp_txhiring,
1357	    sizeof(struct txp_tx_desc) * TX_ENTRIES,
1358	    &sc->sc_ldata.txp_txhiring_paddr);
1359	if (error != 0)
1360		return (error);
1361	boot->br_txhipri_lo =
1362	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_txhiring_paddr));
1363	boot->br_txhipri_hi =
1364	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_txhiring_paddr));
1365	boot->br_txhipri_siz =
1366	    htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
1367	sc->sc_txhir.r_tag = sc->sc_cdata.txp_txhiring_tag;
1368	sc->sc_txhir.r_map = sc->sc_cdata.txp_txhiring_map;
1369	sc->sc_txhir.r_reg = TXP_H2A_1;
1370	sc->sc_txhir.r_desc = sc->sc_ldata.txp_txhiring;
1371	sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0;
1372	sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx;
1373
1374	/* Low priority tx ring. */
1375	error = txp_dma_alloc(sc, "low priority tx ring",
1376	    &sc->sc_cdata.txp_txloring_tag, sizeof(struct txp_tx_desc), 0,
1377	    &sc->sc_cdata.txp_txloring_map, (void **)&sc->sc_ldata.txp_txloring,
1378	    sizeof(struct txp_tx_desc) * TX_ENTRIES,
1379	    &sc->sc_ldata.txp_txloring_paddr);
1380	if (error != 0)
1381		return (error);
1382	boot->br_txlopri_lo =
1383	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_txloring_paddr));
1384	boot->br_txlopri_hi =
1385	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_txloring_paddr));
1386	boot->br_txlopri_siz =
1387	    htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
1388	sc->sc_txlor.r_tag = sc->sc_cdata.txp_txloring_tag;
1389	sc->sc_txlor.r_map = sc->sc_cdata.txp_txloring_map;
1390	sc->sc_txlor.r_reg = TXP_H2A_3;
1391	sc->sc_txlor.r_desc = sc->sc_ldata.txp_txloring;
1392	sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0;
1393	sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx;
1394
1395	/* High priority rx ring. */
1396	error = txp_dma_alloc(sc, "hi priority rx ring",
1397	    &sc->sc_cdata.txp_rxhiring_tag,
1398	    roundup(sizeof(struct txp_rx_desc), 16), 0,
1399	    &sc->sc_cdata.txp_rxhiring_map, (void **)&sc->sc_ldata.txp_rxhiring,
1400	    sizeof(struct txp_rx_desc) * RX_ENTRIES,
1401	    &sc->sc_ldata.txp_rxhiring_paddr);
1402	if (error != 0)
1403		return (error);
1404	boot->br_rxhipri_lo =
1405	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxhiring_paddr));
1406	boot->br_rxhipri_hi =
1407	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxhiring_paddr));
1408	boot->br_rxhipri_siz =
1409	    htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
1410	sc->sc_rxhir.r_tag = sc->sc_cdata.txp_rxhiring_tag;
1411	sc->sc_rxhir.r_map = sc->sc_cdata.txp_rxhiring_map;
1412	sc->sc_rxhir.r_desc = sc->sc_ldata.txp_rxhiring;
1413	sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx;
1414	sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx;
1415
1416	/* Low priority rx ring. */
1417	error = txp_dma_alloc(sc, "low priority rx ring",
1418	    &sc->sc_cdata.txp_rxloring_tag,
1419	    roundup(sizeof(struct txp_rx_desc), 16), 0,
1420	    &sc->sc_cdata.txp_rxloring_map, (void **)&sc->sc_ldata.txp_rxloring,
1421	    sizeof(struct txp_rx_desc) * RX_ENTRIES,
1422	    &sc->sc_ldata.txp_rxloring_paddr);
1423	if (error != 0)
1424		return (error);
1425	boot->br_rxlopri_lo =
1426	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxloring_paddr));
1427	boot->br_rxlopri_hi =
1428	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxloring_paddr));
1429	boot->br_rxlopri_siz =
1430	    htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
1431	sc->sc_rxlor.r_tag = sc->sc_cdata.txp_rxloring_tag;
1432	sc->sc_rxlor.r_map = sc->sc_cdata.txp_rxloring_map;
1433	sc->sc_rxlor.r_desc = sc->sc_ldata.txp_rxloring;
1434	sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx;
1435	sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx;
1436
1437	/* Command ring. */
1438	error = txp_dma_alloc(sc, "command ring",
1439	    &sc->sc_cdata.txp_cmdring_tag, sizeof(struct txp_cmd_desc), 0,
1440	    &sc->sc_cdata.txp_cmdring_map, (void **)&sc->sc_ldata.txp_cmdring,
1441	    sizeof(struct txp_cmd_desc) * CMD_ENTRIES,
1442	    &sc->sc_ldata.txp_cmdring_paddr);
1443	if (error != 0)
1444		return (error);
1445	boot->br_cmd_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_cmdring_paddr));
1446	boot->br_cmd_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_cmdring_paddr));
1447	boot->br_cmd_siz = htole32(CMD_ENTRIES * sizeof(struct txp_cmd_desc));
1448	sc->sc_cmdring.base = sc->sc_ldata.txp_cmdring;
1449	sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc);
1450	sc->sc_cmdring.lastwrite = 0;
1451
1452	/* Response ring. */
1453	error = txp_dma_alloc(sc, "response ring",
1454	    &sc->sc_cdata.txp_rspring_tag, sizeof(struct txp_rsp_desc), 0,
1455	    &sc->sc_cdata.txp_rspring_map, (void **)&sc->sc_ldata.txp_rspring,
1456	    sizeof(struct txp_rsp_desc) * RSP_ENTRIES,
1457	    &sc->sc_ldata.txp_rspring_paddr);
1458	if (error != 0)
1459		return (error);
1460	boot->br_resp_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rspring_paddr));
1461	boot->br_resp_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rspring_paddr));
1462	boot->br_resp_siz = htole32(RSP_ENTRIES * sizeof(struct txp_rsp_desc));
1463	sc->sc_rspring.base = sc->sc_ldata.txp_rspring;
1464	sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc);
1465	sc->sc_rspring.lastwrite = 0;
1466
1467	/* Receive buffer ring. */
1468	error = txp_dma_alloc(sc, "receive buffer ring",
1469	    &sc->sc_cdata.txp_rxbufs_tag, sizeof(struct txp_rxbuf_desc), 0,
1470	    &sc->sc_cdata.txp_rxbufs_map, (void **)&sc->sc_ldata.txp_rxbufs,
1471	    sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES,
1472	    &sc->sc_ldata.txp_rxbufs_paddr);
1473	if (error != 0)
1474		return (error);
1475	boot->br_rxbuf_lo =
1476	    htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxbufs_paddr));
1477	boot->br_rxbuf_hi =
1478	    htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxbufs_paddr));
1479	boot->br_rxbuf_siz =
1480	    htole32(RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc));
1481	sc->sc_rxbufs = sc->sc_ldata.txp_rxbufs;
1482
1483	/* Zero ring. */
1484	error = txp_dma_alloc(sc, "zero buffer",
1485	    &sc->sc_cdata.txp_zero_tag, sizeof(uint32_t), 0,
1486	    &sc->sc_cdata.txp_zero_map, (void **)&sc->sc_ldata.txp_zero,
1487	    sizeof(uint32_t), &sc->sc_ldata.txp_zero_paddr);
1488	if (error != 0)
1489		return (error);
1490	boot->br_zero_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_zero_paddr));
1491	boot->br_zero_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_zero_paddr));
1492
1493	bus_dmamap_sync(sc->sc_cdata.txp_boot_tag, sc->sc_cdata.txp_boot_map,
1494	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1495
1496	/* Create Tx buffers. */
1497	error = bus_dma_tag_create(
1498	    sc->sc_cdata.txp_parent_tag,	/* parent */
1499	    1, 0,			/* algnmnt, boundary */
1500	    BUS_SPACE_MAXADDR,		/* lowaddr */
1501	    BUS_SPACE_MAXADDR,		/* highaddr */
1502	    NULL, NULL,			/* filter, filterarg */
1503	    MCLBYTES * TXP_MAXTXSEGS,	/* maxsize */
1504	    TXP_MAXTXSEGS,		/* nsegments */
1505	    MCLBYTES,			/* maxsegsize */
1506	    0,				/* flags */
1507	    NULL, NULL,			/* lockfunc, lockarg */
1508	    &sc->sc_cdata.txp_tx_tag);
1509	if (error != 0) {
1510		device_printf(sc->sc_dev, "could not create Tx DMA tag.\n");
1511		goto fail;
1512	}
1513
1514	/* Create tag for Rx buffers. */
1515	error = bus_dma_tag_create(
1516	    sc->sc_cdata.txp_parent_tag,	/* parent */
1517	    TXP_RXBUF_ALIGN, 0,		/* algnmnt, boundary */
1518	    BUS_SPACE_MAXADDR,		/* lowaddr */
1519	    BUS_SPACE_MAXADDR,		/* highaddr */
1520	    NULL, NULL,			/* filter, filterarg */
1521	    MCLBYTES,			/* maxsize */
1522	    1,				/* nsegments */
1523	    MCLBYTES,			/* maxsegsize */
1524	    0,				/* flags */
1525	    NULL, NULL,			/* lockfunc, lockarg */
1526	    &sc->sc_cdata.txp_rx_tag);
1527	if (error != 0) {
1528		device_printf(sc->sc_dev, "could not create Rx DMA tag.\n");
1529		goto fail;
1530	}
1531
1532	/* Create DMA maps for Tx buffers. */
1533	for (i = 0; i < TX_ENTRIES; i++) {
1534		txd = &sc->sc_txd[i];
1535		txd->sd_mbuf = NULL;
1536		txd->sd_map = NULL;
1537		error = bus_dmamap_create(sc->sc_cdata.txp_tx_tag, 0,
1538		    &txd->sd_map);
1539		if (error != 0) {
1540			device_printf(sc->sc_dev,
1541			    "could not create Tx dmamap.\n");
1542			goto fail;
1543		}
1544	}
1545
1546	/* Create DMA maps for Rx buffers. */
1547	for (i = 0; i < RXBUF_ENTRIES; i++) {
1548		sd = malloc(sizeof(struct txp_rx_swdesc), M_DEVBUF,
1549		    M_NOWAIT | M_ZERO);
1550		if (sd == NULL) {
1551			error = ENOMEM;
1552			goto fail;
1553		}
1554		/*
1555		 * The virtual address part of descriptor is not used
1556		 * by hardware so use that to save an ring entry. We
1557		 * need bcopy here otherwise the address wouldn't be
1558		 * valid on big-endian architectures.
1559		 */
1560		rbd = sc->sc_rxbufs + i;
1561		bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
1562		sd->sd_mbuf = NULL;
1563		sd->sd_map = NULL;
1564		error = bus_dmamap_create(sc->sc_cdata.txp_rx_tag, 0,
1565		    &sd->sd_map);
1566		if (error != 0) {
1567			device_printf(sc->sc_dev,
1568			    "could not create Rx dmamap.\n");
1569			goto fail;
1570		}
1571		TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
1572	}
1573
1574fail:
1575	return (error);
1576}
1577
1578static void
1579txp_init_rings(struct txp_softc *sc)
1580{
1581
1582	bzero(sc->sc_ldata.txp_hostvar, sizeof(struct txp_hostvar));
1583	bzero(sc->sc_ldata.txp_zero, sizeof(uint32_t));
1584	sc->sc_txhir.r_cons = 0;
1585	sc->sc_txhir.r_prod = 0;
1586	sc->sc_txhir.r_cnt = 0;
1587	sc->sc_txlor.r_cons = 0;
1588	sc->sc_txlor.r_prod = 0;
1589	sc->sc_txlor.r_cnt = 0;
1590	sc->sc_cmdring.lastwrite = 0;
1591	sc->sc_rspring.lastwrite = 0;
1592	sc->sc_rxbufprod = 0;
1593	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1594	    sc->sc_cdata.txp_hostvar_map,
1595	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1596}
1597
1598static int
1599txp_wait(struct txp_softc *sc, uint32_t state)
1600{
1601	uint32_t reg;
1602	int i;
1603
1604	for (i = 0; i < TXP_TIMEOUT; i++) {
1605		reg = READ_REG(sc, TXP_A2H_0);
1606		if (reg == state)
1607			break;
1608		DELAY(50);
1609	}
1610
1611	return (i == TXP_TIMEOUT ? ETIMEDOUT : 0);
1612}
1613
1614static void
1615txp_free_rings(struct txp_softc *sc)
1616{
1617	struct txp_swdesc *txd;
1618	struct txp_rx_swdesc *sd;
1619	int i;
1620
1621	/* Tx buffers. */
1622	if (sc->sc_cdata.txp_tx_tag != NULL) {
1623		for (i = 0; i < TX_ENTRIES; i++) {
1624			txd = &sc->sc_txd[i];
1625			if (txd->sd_map != NULL) {
1626				bus_dmamap_destroy(sc->sc_cdata.txp_tx_tag,
1627				    txd->sd_map);
1628				txd->sd_map = NULL;
1629			}
1630		}
1631		bus_dma_tag_destroy(sc->sc_cdata.txp_tx_tag);
1632		sc->sc_cdata.txp_tx_tag = NULL;
1633	}
1634	/* Rx buffers. */
1635	if (sc->sc_cdata.txp_rx_tag != NULL) {
1636		if (sc->sc_rxbufs != NULL) {
1637			KASSERT(TAILQ_FIRST(&sc->sc_busy_list) == NULL,
1638			    ("%s : still have busy Rx buffers", __func__));
1639			while ((sd = TAILQ_FIRST(&sc->sc_free_list)) != NULL) {
1640				TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1641				if (sd->sd_map != NULL) {
1642					bus_dmamap_destroy(
1643					    sc->sc_cdata.txp_rx_tag,
1644					    sd->sd_map);
1645					sd->sd_map = NULL;
1646				}
1647				free(sd, M_DEVBUF);
1648			}
1649		}
1650		bus_dma_tag_destroy(sc->sc_cdata.txp_rx_tag);
1651		sc->sc_cdata.txp_rx_tag = NULL;
1652	}
1653
1654	/* Hi priority Tx ring. */
1655	txp_dma_free(sc, &sc->sc_cdata.txp_txhiring_tag,
1656	    sc->sc_cdata.txp_txhiring_map,
1657	    (void **)&sc->sc_ldata.txp_txhiring,
1658	    &sc->sc_ldata.txp_txhiring_paddr);
1659	/* Low priority Tx ring. */
1660	txp_dma_free(sc, &sc->sc_cdata.txp_txloring_tag,
1661	    sc->sc_cdata.txp_txloring_map,
1662	    (void **)&sc->sc_ldata.txp_txloring,
1663	    &sc->sc_ldata.txp_txloring_paddr);
1664	/* Hi priority Rx ring. */
1665	txp_dma_free(sc, &sc->sc_cdata.txp_rxhiring_tag,
1666	    sc->sc_cdata.txp_rxhiring_map,
1667	    (void **)&sc->sc_ldata.txp_rxhiring,
1668	    &sc->sc_ldata.txp_rxhiring_paddr);
1669	/* Low priority Rx ring. */
1670	txp_dma_free(sc, &sc->sc_cdata.txp_rxloring_tag,
1671	    sc->sc_cdata.txp_rxloring_map,
1672	    (void **)&sc->sc_ldata.txp_rxloring,
1673	    &sc->sc_ldata.txp_rxloring_paddr);
1674	/* Receive buffer ring. */
1675	txp_dma_free(sc, &sc->sc_cdata.txp_rxbufs_tag,
1676	    sc->sc_cdata.txp_rxbufs_map, (void **)&sc->sc_ldata.txp_rxbufs,
1677	    &sc->sc_ldata.txp_rxbufs_paddr);
1678	/* Command ring. */
1679	txp_dma_free(sc, &sc->sc_cdata.txp_cmdring_tag,
1680	    sc->sc_cdata.txp_cmdring_map, (void **)&sc->sc_ldata.txp_cmdring,
1681	    &sc->sc_ldata.txp_cmdring_paddr);
1682	/* Response ring. */
1683	txp_dma_free(sc, &sc->sc_cdata.txp_rspring_tag,
1684	    sc->sc_cdata.txp_rspring_map, (void **)&sc->sc_ldata.txp_rspring,
1685	    &sc->sc_ldata.txp_rspring_paddr);
1686	/* Zero ring. */
1687	txp_dma_free(sc, &sc->sc_cdata.txp_zero_tag,
1688	    sc->sc_cdata.txp_zero_map, (void **)&sc->sc_ldata.txp_zero,
1689	    &sc->sc_ldata.txp_zero_paddr);
1690	/* Host variables. */
1691	txp_dma_free(sc, &sc->sc_cdata.txp_hostvar_tag,
1692	    sc->sc_cdata.txp_hostvar_map, (void **)&sc->sc_ldata.txp_hostvar,
1693	    &sc->sc_ldata.txp_hostvar_paddr);
1694	/* Boot record. */
1695	txp_dma_free(sc, &sc->sc_cdata.txp_boot_tag,
1696	    sc->sc_cdata.txp_boot_map, (void **)&sc->sc_ldata.txp_boot,
1697	    &sc->sc_ldata.txp_boot_paddr);
1698
1699	if (sc->sc_cdata.txp_parent_tag != NULL) {
1700		bus_dma_tag_destroy(sc->sc_cdata.txp_parent_tag);
1701		sc->sc_cdata.txp_parent_tag = NULL;
1702	}
1703
1704}
1705
1706static int
1707txp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1708{
1709	struct txp_softc *sc = ifp->if_softc;
1710	struct ifreq *ifr = (struct ifreq *)data;
1711	int capenable, error = 0, mask;
1712
1713	switch(command) {
1714	case SIOCSIFFLAGS:
1715		TXP_LOCK(sc);
1716		if ((ifp->if_flags & IFF_UP) != 0) {
1717			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1718				if (((ifp->if_flags ^ sc->sc_if_flags)
1719				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1720					txp_set_filter(sc);
1721			} else {
1722				if ((sc->sc_flags & TXP_FLAG_DETACH) == 0)
1723					txp_init_locked(sc);
1724			}
1725		} else {
1726			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1727				txp_stop(sc);
1728		}
1729		sc->sc_if_flags = ifp->if_flags;
1730		TXP_UNLOCK(sc);
1731		break;
1732	case SIOCADDMULTI:
1733	case SIOCDELMULTI:
1734		/*
1735		 * Multicast list has changed; set the hardware
1736		 * filter accordingly.
1737		 */
1738		TXP_LOCK(sc);
1739		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1740			txp_set_filter(sc);
1741		TXP_UNLOCK(sc);
1742		break;
1743	case SIOCSIFCAP:
1744		TXP_LOCK(sc);
1745		capenable = ifp->if_capenable;
1746		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1747		if ((mask & IFCAP_TXCSUM) != 0 &&
1748		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1749			ifp->if_capenable ^= IFCAP_TXCSUM;
1750			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1751				ifp->if_hwassist |= TXP_CSUM_FEATURES;
1752			else
1753				ifp->if_hwassist &= ~TXP_CSUM_FEATURES;
1754		}
1755		if ((mask & IFCAP_RXCSUM) != 0 &&
1756		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
1757			ifp->if_capenable ^= IFCAP_RXCSUM;
1758		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1759		    (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
1760			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1761		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1762		    (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0)
1763			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1764		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1765		    (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
1766			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1767		if ((ifp->if_capenable & IFCAP_TXCSUM) == 0)
1768			ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
1769		if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
1770			ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
1771		if (capenable != ifp->if_capenable)
1772			txp_set_capabilities(sc);
1773		TXP_UNLOCK(sc);
1774		VLAN_CAPABILITIES(ifp);
1775		break;
1776	case SIOCGIFMEDIA:
1777	case SIOCSIFMEDIA:
1778		error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command);
1779		break;
1780	default:
1781		error = ether_ioctl(ifp, command, data);
1782		break;
1783	}
1784
1785	return (error);
1786}
1787
1788static int
1789txp_rxring_fill(struct txp_softc *sc)
1790{
1791	struct txp_rxbuf_desc *rbd;
1792	struct txp_rx_swdesc *sd;
1793	bus_dma_segment_t segs[1];
1794	int error, i, nsegs;
1795
1796	TXP_LOCK_ASSERT(sc);
1797
1798	bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1799	    sc->sc_cdata.txp_rxbufs_map,
1800	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1801
1802	for (i = 0; i < RXBUF_ENTRIES; i++) {
1803		sd = TAILQ_FIRST(&sc->sc_free_list);
1804		if (sd == NULL)
1805			return (ENOMEM);
1806		rbd = sc->sc_rxbufs + i;
1807		bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
1808		KASSERT(sd->sd_mbuf == NULL,
1809		    ("%s : Rx buffer ring corrupted", __func__));
1810		sd->sd_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1811		if (sd->sd_mbuf == NULL)
1812			return (ENOMEM);
1813		sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
1814#ifndef __NO_STRICT_ALIGNMENT
1815		m_adj(sd->sd_mbuf, TXP_RXBUF_ALIGN);
1816#endif
1817		if ((error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_rx_tag,
1818		    sd->sd_map, sd->sd_mbuf, segs, &nsegs, 0)) != 0) {
1819			m_freem(sd->sd_mbuf);
1820			sd->sd_mbuf = NULL;
1821			return (error);
1822		}
1823		KASSERT(nsegs == 1, ("%s : %d segments returned!", __func__,
1824		    nsegs));
1825		TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1826		TAILQ_INSERT_TAIL(&sc->sc_busy_list, sd, sd_next);
1827		bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1828		    BUS_DMASYNC_PREREAD);
1829		rbd->rb_paddrlo = htole32(TXP_ADDR_LO(segs[0].ds_addr));
1830		rbd->rb_paddrhi = htole32(TXP_ADDR_HI(segs[0].ds_addr));
1831	}
1832
1833	bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1834	    sc->sc_cdata.txp_rxbufs_map,
1835	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1836	sc->sc_rxbufprod = RXBUF_ENTRIES - 1;
1837	sc->sc_hostvar->hv_rx_buf_write_idx =
1838	    htole32(TXP_IDX2OFFSET(RXBUF_ENTRIES - 1));
1839
1840	return (0);
1841}
1842
1843static void
1844txp_rxring_empty(struct txp_softc *sc)
1845{
1846	struct txp_rx_swdesc *sd;
1847	int cnt;
1848
1849	TXP_LOCK_ASSERT(sc);
1850
1851	if (sc->sc_rxbufs == NULL)
1852		return;
1853	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1854	    sc->sc_cdata.txp_hostvar_map,
1855	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1856
1857	/* Release allocated Rx buffers. */
1858	cnt = 0;
1859	while ((sd = TAILQ_FIRST(&sc->sc_busy_list)) != NULL) {
1860		TAILQ_REMOVE(&sc->sc_busy_list, sd, sd_next);
1861		KASSERT(sd->sd_mbuf != NULL,
1862		    ("%s : Rx buffer ring corrupted", __func__));
1863		bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1864		    BUS_DMASYNC_POSTREAD);
1865		bus_dmamap_unload(sc->sc_cdata.txp_rx_tag, sd->sd_map);
1866		m_freem(sd->sd_mbuf);
1867		sd->sd_mbuf = NULL;
1868		TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
1869		cnt++;
1870	}
1871}
1872
1873static void
1874txp_init(void *xsc)
1875{
1876	struct txp_softc *sc;
1877
1878	sc = xsc;
1879	TXP_LOCK(sc);
1880	txp_init_locked(sc);
1881	TXP_UNLOCK(sc);
1882}
1883
1884static void
1885txp_init_locked(struct txp_softc *sc)
1886{
1887	struct ifnet *ifp;
1888	uint8_t *eaddr;
1889	uint16_t p1;
1890	uint32_t p2;
1891	int error;
1892
1893	TXP_LOCK_ASSERT(sc);
1894	ifp = sc->sc_ifp;
1895
1896	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1897		return;
1898
1899	/* Initialize ring structure. */
1900	txp_init_rings(sc);
1901	/* Wakeup controller. */
1902	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_WAKEUP);
1903	TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
1904	/*
1905	 * It seems that earlier NV image can go back to online from
1906	 * wakeup command but newer ones require controller reset.
1907	 * So jut reset controller again.
1908	 */
1909	if (txp_reset(sc) != 0)
1910		goto init_fail;
1911	/* Download firmware. */
1912	error = txp_download_fw(sc);
1913	if (error != 0) {
1914		device_printf(sc->sc_dev, "could not download firmware.\n");
1915		goto init_fail;
1916	}
1917	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1918	    sc->sc_cdata.txp_hostvar_map,
1919	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1920	if ((error = txp_rxring_fill(sc)) != 0) {
1921		device_printf(sc->sc_dev, "no memory for Rx buffers.\n");
1922		goto init_fail;
1923	}
1924	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1925	    sc->sc_cdata.txp_hostvar_map,
1926	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1927	if (txp_boot(sc, STAT_WAITING_FOR_BOOT) != 0) {
1928		device_printf(sc->sc_dev, "could not boot firmware.\n");
1929		goto init_fail;
1930	}
1931
1932	/*
1933	 * Quite contrary to Typhoon T2 software functional specification,
1934	 * it seems that TXP_CMD_RECV_BUFFER_CONTROL command is not
1935	 * implemented in the firmware. This means driver should have to
1936	 * handle misaligned frames on alignment architectures. AFAIK this
1937	 * is the only controller manufactured by 3Com that has this stupid
1938	 * bug. 3Com should fix this.
1939	 */
1940	if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0,
1941	    NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1942		goto init_fail;
1943	/* Undocumented command(interrupt coalescing disable?) - From Linux. */
1944	if (txp_command(sc, TXP_CMD_FILTER_DEFINE, 0, 0, 0, NULL, NULL, NULL,
1945	    TXP_CMD_NOWAIT) != 0)
1946		goto init_fail;
1947
1948	/* Set station address. */
1949	eaddr = IF_LLADDR(sc->sc_ifp);
1950	p1 = 0;
1951	((uint8_t *)&p1)[1] = eaddr[0];
1952	((uint8_t *)&p1)[0] = eaddr[1];
1953	p1 = le16toh(p1);
1954	((uint8_t *)&p2)[3] = eaddr[2];
1955	((uint8_t *)&p2)[2] = eaddr[3];
1956	((uint8_t *)&p2)[1] = eaddr[4];
1957	((uint8_t *)&p2)[0] = eaddr[5];
1958	p2 = le32toh(p2);
1959	if (txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0,
1960	    NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1961		goto init_fail;
1962
1963	txp_set_filter(sc);
1964	txp_set_capabilities(sc);
1965
1966	if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0,
1967	    NULL, NULL, NULL, TXP_CMD_NOWAIT))
1968		goto init_fail;
1969	if (txp_command(sc, TXP_CMD_XCVR_SELECT, sc->sc_xcvr, 0, 0,
1970	    NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1971		goto init_fail;
1972	if (txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL,
1973	    TXP_CMD_NOWAIT) != 0)
1974		goto init_fail;
1975	if (txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL,
1976	    TXP_CMD_NOWAIT) != 0)
1977		goto init_fail;
1978
1979	/* Ack all pending interrupts and enable interrupts. */
1980	WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
1981	WRITE_REG(sc, TXP_IER, TXP_INTRS);
1982	WRITE_REG(sc, TXP_IMR, TXP_INTR_NONE);
1983
1984	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1985	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1986
1987	callout_reset(&sc->sc_tick, hz, txp_tick, sc);
1988	return;
1989
1990init_fail:
1991	txp_rxring_empty(sc);
1992	txp_init_rings(sc);
1993	txp_reset(sc);
1994	WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
1995}
1996
1997static void
1998txp_tick(void *vsc)
1999{
2000	struct txp_softc *sc;
2001	struct ifnet *ifp;
2002	struct txp_rsp_desc *rsp;
2003	struct txp_ext_desc *ext;
2004	int link;
2005
2006	sc = vsc;
2007	TXP_LOCK_ASSERT(sc);
2008	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2009	    sc->sc_cdata.txp_hostvar_map,
2010	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2011	txp_rxbuf_reclaim(sc);
2012	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2013	    sc->sc_cdata.txp_hostvar_map,
2014	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2015
2016	ifp = sc->sc_ifp;
2017	rsp = NULL;
2018
2019	link = sc->sc_flags & TXP_FLAG_LINK;
2020	if (txp_ext_command(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
2021	    &rsp, TXP_CMD_WAIT))
2022		goto out;
2023	if (rsp->rsp_numdesc != 6)
2024		goto out;
2025	txp_stats_update(sc, rsp);
2026	if (link == 0 && (sc->sc_flags & TXP_FLAG_LINK) != 0) {
2027		ext = (struct txp_ext_desc *)(rsp + 1);
2028		/* Update baudrate with resolved speed. */
2029		if ((ext[5].ext_2 & 0x02) != 0)
2030			ifp->if_baudrate = IF_Mbps(100);
2031		else
2032			ifp->if_baudrate = IF_Mbps(10);
2033	}
2034
2035out:
2036	if (rsp != NULL)
2037		free(rsp, M_DEVBUF);
2038	txp_watchdog(sc);
2039	callout_reset(&sc->sc_tick, hz, txp_tick, sc);
2040}
2041
2042static void
2043txp_start(struct ifnet *ifp)
2044{
2045	struct txp_softc *sc;
2046
2047	sc = ifp->if_softc;
2048	TXP_LOCK(sc);
2049	txp_start_locked(ifp);
2050	TXP_UNLOCK(sc);
2051}
2052
2053static void
2054txp_start_locked(struct ifnet *ifp)
2055{
2056	struct txp_softc *sc;
2057	struct mbuf *m_head;
2058	int enq;
2059
2060	sc = ifp->if_softc;
2061	TXP_LOCK_ASSERT(sc);
2062
2063	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2064	   IFF_DRV_RUNNING || (sc->sc_flags & TXP_FLAG_LINK) == 0)
2065		return;
2066
2067	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
2068		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2069		if (m_head == NULL)
2070			break;
2071		/*
2072		 * Pack the data into the transmit ring. If we
2073		 * don't have room, set the OACTIVE flag and wait
2074		 * for the NIC to drain the ring.
2075		 * ATM only Hi-ring is used.
2076		 */
2077		if (txp_encap(sc, &sc->sc_txhir, &m_head)) {
2078			if (m_head == NULL)
2079				break;
2080			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2081			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2082			break;
2083		}
2084
2085		/*
2086		 * If there's a BPF listener, bounce a copy of this frame
2087		 * to him.
2088		 */
2089		ETHER_BPF_MTAP(ifp, m_head);
2090
2091		/* Send queued frame. */
2092		WRITE_REG(sc, sc->sc_txhir.r_reg,
2093		    TXP_IDX2OFFSET(sc->sc_txhir.r_prod));
2094	}
2095
2096	if (enq > 0) {
2097		/* Set a timeout in case the chip goes out to lunch. */
2098		sc->sc_watchdog_timer = TXP_TX_TIMEOUT;
2099	}
2100}
2101
2102static int
2103txp_encap(struct txp_softc *sc, struct txp_tx_ring *r, struct mbuf **m_head)
2104{
2105	struct txp_tx_desc *first_txd;
2106	struct txp_frag_desc *fxd;
2107	struct txp_swdesc *sd;
2108	struct mbuf *m;
2109	bus_dma_segment_t txsegs[TXP_MAXTXSEGS];
2110	int error, i, nsegs;
2111
2112	TXP_LOCK_ASSERT(sc);
2113
2114	M_ASSERTPKTHDR((*m_head));
2115
2116	m = *m_head;
2117	first_txd = r->r_desc + r->r_prod;
2118	sd = sc->sc_txd + r->r_prod;
2119
2120	error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_tx_tag, sd->sd_map,
2121	    *m_head, txsegs, &nsegs, 0);
2122	if (error == EFBIG) {
2123		m = m_collapse(*m_head, M_NOWAIT, TXP_MAXTXSEGS);
2124		if (m == NULL) {
2125			m_freem(*m_head);
2126			*m_head = NULL;
2127			return (ENOMEM);
2128		}
2129		*m_head = m;
2130		error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_tx_tag,
2131		    sd->sd_map, *m_head, txsegs, &nsegs, 0);
2132		if (error != 0) {
2133			m_freem(*m_head);
2134			*m_head = NULL;
2135			return (error);
2136		}
2137	} else if (error != 0)
2138		return (error);
2139	if (nsegs == 0) {
2140		m_freem(*m_head);
2141		*m_head = NULL;
2142		return (EIO);
2143	}
2144
2145	/* Check descriptor overrun. */
2146	if (r->r_cnt + nsegs >= TX_ENTRIES - TXP_TXD_RESERVED) {
2147		bus_dmamap_unload(sc->sc_cdata.txp_tx_tag, sd->sd_map);
2148		return (ENOBUFS);
2149	}
2150	bus_dmamap_sync(sc->sc_cdata.txp_tx_tag, sd->sd_map,
2151	    BUS_DMASYNC_PREWRITE);
2152	sd->sd_mbuf = m;
2153
2154	first_txd->tx_flags = TX_FLAGS_TYPE_DATA;
2155	first_txd->tx_numdesc = 0;
2156	first_txd->tx_addrlo = 0;
2157	first_txd->tx_addrhi = 0;
2158	first_txd->tx_totlen = 0;
2159	first_txd->tx_pflags = 0;
2160	r->r_cnt++;
2161	TXP_DESC_INC(r->r_prod, TX_ENTRIES);
2162
2163	/* Configure Tx IP/TCP/UDP checksum offload. */
2164	if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2165		first_txd->tx_pflags |= htole32(TX_PFLAGS_IPCKSUM);
2166#ifdef notyet
2167	/* XXX firmware bug. */
2168	if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2169		first_txd->tx_pflags |= htole32(TX_PFLAGS_TCPCKSUM);
2170	if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2171		first_txd->tx_pflags |= htole32(TX_PFLAGS_UDPCKSUM);
2172#endif
2173
2174	/* Configure VLAN hardware tag insertion. */
2175	if ((m->m_flags & M_VLANTAG) != 0)
2176		first_txd->tx_pflags |=
2177		    htole32(TX_PFLAGS_VLAN | TX_PFLAGS_PRIO |
2178		    (bswap16(m->m_pkthdr.ether_vtag) << TX_PFLAGS_VLANTAG_S));
2179
2180	for (i = 0; i < nsegs; i++) {
2181		fxd = (struct txp_frag_desc *)(r->r_desc + r->r_prod);
2182		fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG | TX_FLAGS_VALID;
2183		fxd->frag_rsvd1 = 0;
2184		fxd->frag_len = htole16(txsegs[i].ds_len);
2185		fxd->frag_addrhi = htole32(TXP_ADDR_HI(txsegs[i].ds_addr));
2186		fxd->frag_addrlo = htole32(TXP_ADDR_LO(txsegs[i].ds_addr));
2187		fxd->frag_rsvd2 = 0;
2188		first_txd->tx_numdesc++;
2189		r->r_cnt++;
2190		TXP_DESC_INC(r->r_prod, TX_ENTRIES);
2191	}
2192
2193	/* Lastly set valid flag. */
2194	first_txd->tx_flags |= TX_FLAGS_VALID;
2195
2196	/* Sync descriptors. */
2197	bus_dmamap_sync(r->r_tag, r->r_map,
2198	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2199
2200	return (0);
2201}
2202
2203/*
2204 * Handle simple commands sent to the typhoon
2205 */
2206static int
2207txp_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
2208    uint32_t in3, uint16_t *out1, uint32_t *out2, uint32_t *out3, int wait)
2209{
2210	struct txp_rsp_desc *rsp;
2211
2212	rsp = NULL;
2213	if (txp_ext_command(sc, id, in1, in2, in3, NULL, 0, &rsp, wait) != 0) {
2214		device_printf(sc->sc_dev, "command 0x%02x failed\n", id);
2215		return (-1);
2216	}
2217
2218	if (wait == TXP_CMD_NOWAIT)
2219		return (0);
2220
2221	KASSERT(rsp != NULL, ("rsp is NULL!\n"));
2222	if (out1 != NULL)
2223		*out1 = le16toh(rsp->rsp_par1);
2224	if (out2 != NULL)
2225		*out2 = le32toh(rsp->rsp_par2);
2226	if (out3 != NULL)
2227		*out3 = le32toh(rsp->rsp_par3);
2228	free(rsp, M_DEVBUF);
2229	return (0);
2230}
2231
2232static int
2233txp_ext_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
2234    uint32_t in3, struct txp_ext_desc *in_extp, uint8_t in_extn,
2235    struct txp_rsp_desc **rspp, int wait)
2236{
2237	struct txp_hostvar *hv;
2238	struct txp_cmd_desc *cmd;
2239	struct txp_ext_desc *ext;
2240	uint32_t idx, i;
2241	uint16_t seq;
2242	int error;
2243
2244	error = 0;
2245	hv = sc->sc_hostvar;
2246	if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) {
2247		device_printf(sc->sc_dev,
2248		    "%s : out of free cmd descriptors for command 0x%02x\n",
2249		    __func__, id);
2250		return (ENOBUFS);
2251	}
2252
2253	bus_dmamap_sync(sc->sc_cdata.txp_cmdring_tag,
2254	    sc->sc_cdata.txp_cmdring_map, BUS_DMASYNC_POSTWRITE);
2255	idx = sc->sc_cmdring.lastwrite;
2256	cmd = (struct txp_cmd_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx);
2257	bzero(cmd, sizeof(*cmd));
2258
2259	cmd->cmd_numdesc = in_extn;
2260	seq = sc->sc_seq++;
2261	cmd->cmd_seq = htole16(seq);
2262	cmd->cmd_id = htole16(id);
2263	cmd->cmd_par1 = htole16(in1);
2264	cmd->cmd_par2 = htole32(in2);
2265	cmd->cmd_par3 = htole32(in3);
2266	cmd->cmd_flags = CMD_FLAGS_TYPE_CMD |
2267	    (wait == TXP_CMD_WAIT ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID;
2268
2269	idx += sizeof(struct txp_cmd_desc);
2270	if (idx == sc->sc_cmdring.size)
2271		idx = 0;
2272
2273	for (i = 0; i < in_extn; i++) {
2274		ext = (struct txp_ext_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx);
2275		bcopy(in_extp, ext, sizeof(struct txp_ext_desc));
2276		in_extp++;
2277		idx += sizeof(struct txp_cmd_desc);
2278		if (idx == sc->sc_cmdring.size)
2279			idx = 0;
2280	}
2281
2282	sc->sc_cmdring.lastwrite = idx;
2283	bus_dmamap_sync(sc->sc_cdata.txp_cmdring_tag,
2284	    sc->sc_cdata.txp_cmdring_map, BUS_DMASYNC_PREWRITE);
2285	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2286	    sc->sc_cdata.txp_hostvar_map, BUS_DMASYNC_PREREAD |
2287	    BUS_DMASYNC_PREWRITE);
2288	WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite);
2289	TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
2290
2291	if (wait == TXP_CMD_NOWAIT)
2292		return (0);
2293
2294	for (i = 0; i < TXP_TIMEOUT; i++) {
2295		bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2296		    sc->sc_cdata.txp_hostvar_map, BUS_DMASYNC_POSTREAD |
2297		    BUS_DMASYNC_POSTWRITE);
2298		if (le32toh(hv->hv_resp_read_idx) !=
2299		    le32toh(hv->hv_resp_write_idx)) {
2300			error = txp_response(sc, id, seq, rspp);
2301			bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2302			    sc->sc_cdata.txp_hostvar_map,
2303			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2304			if (error != 0)
2305				return (error);
2306 			if (*rspp != NULL)
2307				break;
2308		}
2309		DELAY(50);
2310	}
2311	if (i == TXP_TIMEOUT) {
2312		device_printf(sc->sc_dev, "command 0x%02x timedout\n", id);
2313		error = ETIMEDOUT;
2314	}
2315
2316	return (error);
2317}
2318
2319static int
2320txp_response(struct txp_softc *sc, uint16_t id, uint16_t seq,
2321    struct txp_rsp_desc **rspp)
2322{
2323	struct txp_hostvar *hv;
2324	struct txp_rsp_desc *rsp;
2325	uint32_t ridx;
2326
2327	bus_dmamap_sync(sc->sc_cdata.txp_rspring_tag,
2328	    sc->sc_cdata.txp_rspring_map, BUS_DMASYNC_POSTREAD);
2329	hv = sc->sc_hostvar;
2330	ridx = le32toh(hv->hv_resp_read_idx);
2331	while (ridx != le32toh(hv->hv_resp_write_idx)) {
2332		rsp = (struct txp_rsp_desc *)(((uint8_t *)sc->sc_rspring.base) + ridx);
2333
2334		if (id == le16toh(rsp->rsp_id) &&
2335		    le16toh(rsp->rsp_seq) == seq) {
2336			*rspp = (struct txp_rsp_desc *)malloc(
2337			    sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1),
2338			    M_DEVBUF, M_NOWAIT);
2339			if (*rspp == NULL) {
2340				device_printf(sc->sc_dev,"%s : command 0x%02x "
2341				    "memory allocation failure\n",
2342				    __func__, id);
2343				return (ENOMEM);
2344			}
2345			txp_rsp_fixup(sc, rsp, *rspp);
2346			return (0);
2347		}
2348
2349		if ((rsp->rsp_flags & RSP_FLAGS_ERROR) != 0) {
2350			device_printf(sc->sc_dev,
2351			    "%s : command 0x%02x response error!\n", __func__,
2352			    le16toh(rsp->rsp_id));
2353			txp_rsp_fixup(sc, rsp, NULL);
2354			ridx = le32toh(hv->hv_resp_read_idx);
2355			continue;
2356		}
2357
2358		/*
2359		 * The following unsolicited responses are handled during
2360		 * processing of TXP_CMD_READ_STATISTICS which requires
2361		 * response. Driver abuses the command to detect media
2362		 * status change.
2363		 * TXP_CMD_FILTER_DEFINE is not an unsolicited response
2364		 * but we don't process response ring in interrupt handler
2365		 * so we have to ignore this command here, otherwise
2366		 * unknown command message would be printed.
2367		 */
2368		switch (le16toh(rsp->rsp_id)) {
2369		case TXP_CMD_CYCLE_STATISTICS:
2370		case TXP_CMD_FILTER_DEFINE:
2371			break;
2372		case TXP_CMD_MEDIA_STATUS_READ:
2373			if ((le16toh(rsp->rsp_par1) & 0x0800) == 0) {
2374				sc->sc_flags |= TXP_FLAG_LINK;
2375				if_link_state_change(sc->sc_ifp,
2376				    LINK_STATE_UP);
2377			} else {
2378				sc->sc_flags &= ~TXP_FLAG_LINK;
2379				if_link_state_change(sc->sc_ifp,
2380				    LINK_STATE_DOWN);
2381			}
2382			break;
2383		case TXP_CMD_HELLO_RESPONSE:
2384			/*
2385			 * Driver should repsond to hello message but
2386			 * TXP_CMD_READ_STATISTICS is issued for every
2387			 * hz, therefore there is no need to send an
2388			 * explicit command here.
2389			 */
2390			device_printf(sc->sc_dev, "%s : hello\n", __func__);
2391			break;
2392		default:
2393			device_printf(sc->sc_dev,
2394			    "%s : unknown command 0x%02x\n", __func__,
2395			    le16toh(rsp->rsp_id));
2396		}
2397		txp_rsp_fixup(sc, rsp, NULL);
2398		ridx = le32toh(hv->hv_resp_read_idx);
2399	}
2400
2401	return (0);
2402}
2403
2404static void
2405txp_rsp_fixup(struct txp_softc *sc, struct txp_rsp_desc *rsp,
2406    struct txp_rsp_desc *dst)
2407{
2408	struct txp_rsp_desc *src;
2409	struct txp_hostvar *hv;
2410	uint32_t i, ridx;
2411
2412	src = rsp;
2413	hv = sc->sc_hostvar;
2414	ridx = le32toh(hv->hv_resp_read_idx);
2415
2416	for (i = 0; i < rsp->rsp_numdesc + 1; i++) {
2417		if (dst != NULL)
2418			bcopy(src, dst++, sizeof(struct txp_rsp_desc));
2419		ridx += sizeof(struct txp_rsp_desc);
2420		if (ridx == sc->sc_rspring.size) {
2421			src = sc->sc_rspring.base;
2422			ridx = 0;
2423		} else
2424			src++;
2425		sc->sc_rspring.lastwrite = ridx;
2426	}
2427
2428	hv->hv_resp_read_idx = htole32(ridx);
2429}
2430
2431static int
2432txp_cmd_desc_numfree(struct txp_softc *sc)
2433{
2434	struct txp_hostvar *hv;
2435	struct txp_boot_record *br;
2436	uint32_t widx, ridx, nfree;
2437
2438	bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2439	    sc->sc_cdata.txp_hostvar_map,
2440	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2441	hv = sc->sc_hostvar;
2442	br = sc->sc_boot;
2443	widx = sc->sc_cmdring.lastwrite;
2444	ridx = le32toh(hv->hv_cmd_read_idx);
2445
2446	if (widx == ridx) {
2447		/* Ring is completely free */
2448		nfree = le32toh(br->br_cmd_siz) - sizeof(struct txp_cmd_desc);
2449	} else {
2450		if (widx > ridx)
2451			nfree = le32toh(br->br_cmd_siz) -
2452			    (widx - ridx + sizeof(struct txp_cmd_desc));
2453		else
2454			nfree = ridx - widx - sizeof(struct txp_cmd_desc);
2455	}
2456
2457	return (nfree / sizeof(struct txp_cmd_desc));
2458}
2459
2460static int
2461txp_sleep(struct txp_softc *sc, int capenable)
2462{
2463	uint16_t events;
2464	int error;
2465
2466	events = 0;
2467	if ((capenable & IFCAP_WOL_MAGIC) != 0)
2468		events |= 0x01;
2469	error = txp_command(sc, TXP_CMD_ENABLE_WAKEUP_EVENTS, events, 0, 0,
2470	    NULL, NULL, NULL, TXP_CMD_NOWAIT);
2471	if (error == 0) {
2472		/* Goto sleep. */
2473		error = txp_command(sc, TXP_CMD_GOTO_SLEEP, 0, 0, 0, NULL,
2474		    NULL, NULL, TXP_CMD_NOWAIT);
2475		if (error == 0) {
2476			error = txp_wait(sc, STAT_SLEEPING);
2477			if (error != 0)
2478				device_printf(sc->sc_dev,
2479				    "unable to enter into sleep\n");
2480		}
2481	}
2482
2483	return (error);
2484}
2485
2486static void
2487txp_stop(struct txp_softc *sc)
2488{
2489	struct ifnet *ifp;
2490
2491	TXP_LOCK_ASSERT(sc);
2492	ifp = sc->sc_ifp;
2493
2494	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2495		return;
2496
2497	WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
2498	WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
2499
2500	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2501	sc->sc_flags &= ~TXP_FLAG_LINK;
2502
2503	callout_stop(&sc->sc_tick);
2504
2505	txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL,
2506	    TXP_CMD_NOWAIT);
2507	txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL,
2508	    TXP_CMD_NOWAIT);
2509	/* Save statistics for later use. */
2510	txp_stats_save(sc);
2511	/* Halt controller. */
2512	txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL,
2513	    TXP_CMD_NOWAIT);
2514
2515	if (txp_wait(sc, STAT_HALTED) != 0)
2516		device_printf(sc->sc_dev, "controller halt timedout!\n");
2517	/* Reclaim Tx/Rx buffers. */
2518	if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
2519	    TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
2520		txp_tx_reclaim(sc, &sc->sc_txhir);
2521	if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
2522	    TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
2523		txp_tx_reclaim(sc, &sc->sc_txlor);
2524	txp_rxring_empty(sc);
2525
2526	txp_init_rings(sc);
2527	/* Reset controller and make it reload sleep image. */
2528	txp_reset(sc);
2529	/* Let controller boot from sleep image. */
2530	if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0)
2531		device_printf(sc->sc_dev, "could not boot sleep image\n");
2532	txp_sleep(sc, 0);
2533}
2534
2535static void
2536txp_watchdog(struct txp_softc *sc)
2537{
2538	struct ifnet *ifp;
2539
2540	TXP_LOCK_ASSERT(sc);
2541
2542	if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer)
2543		return;
2544
2545	ifp = sc->sc_ifp;
2546	if_printf(ifp, "watchdog timeout -- resetting\n");
2547	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2548	txp_stop(sc);
2549	txp_init_locked(sc);
2550}
2551
2552static int
2553txp_ifmedia_upd(struct ifnet *ifp)
2554{
2555	struct txp_softc *sc = ifp->if_softc;
2556	struct ifmedia *ifm = &sc->sc_ifmedia;
2557	uint16_t new_xcvr;
2558
2559	TXP_LOCK(sc);
2560	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
2561		TXP_UNLOCK(sc);
2562		return (EINVAL);
2563	}
2564
2565	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) {
2566		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2567			new_xcvr = TXP_XCVR_10_FDX;
2568		else
2569			new_xcvr = TXP_XCVR_10_HDX;
2570	} else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
2571		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2572			new_xcvr = TXP_XCVR_100_FDX;
2573		else
2574			new_xcvr = TXP_XCVR_100_HDX;
2575	} else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
2576		new_xcvr = TXP_XCVR_AUTO;
2577	} else {
2578		TXP_UNLOCK(sc);
2579		return (EINVAL);
2580	}
2581
2582	/* nothing to do */
2583	if (sc->sc_xcvr == new_xcvr) {
2584		TXP_UNLOCK(sc);
2585		return (0);
2586	}
2587
2588	txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0,
2589	    NULL, NULL, NULL, TXP_CMD_NOWAIT);
2590	sc->sc_xcvr = new_xcvr;
2591	TXP_UNLOCK(sc);
2592
2593	return (0);
2594}
2595
2596static void
2597txp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2598{
2599	struct txp_softc *sc = ifp->if_softc;
2600	struct ifmedia *ifm = &sc->sc_ifmedia;
2601	uint16_t bmsr, bmcr, anar, anlpar;
2602
2603	ifmr->ifm_status = IFM_AVALID;
2604	ifmr->ifm_active = IFM_ETHER;
2605
2606	TXP_LOCK(sc);
2607	/* Check whether firmware is running. */
2608	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2609		goto bail;
2610	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
2611	    &bmsr, NULL, NULL, TXP_CMD_WAIT))
2612		goto bail;
2613	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
2614	    &bmsr, NULL, NULL, TXP_CMD_WAIT))
2615		goto bail;
2616
2617	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0,
2618	    &bmcr, NULL, NULL, TXP_CMD_WAIT))
2619		goto bail;
2620
2621	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0,
2622	    &anlpar, NULL, NULL, TXP_CMD_WAIT))
2623		goto bail;
2624
2625	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANAR, 0,
2626	    &anar, NULL, NULL, TXP_CMD_WAIT))
2627		goto bail;
2628	TXP_UNLOCK(sc);
2629
2630	if (bmsr & BMSR_LINK)
2631		ifmr->ifm_status |= IFM_ACTIVE;
2632
2633	if (bmcr & BMCR_ISO) {
2634		ifmr->ifm_active |= IFM_NONE;
2635		ifmr->ifm_status = 0;
2636		return;
2637	}
2638
2639	if (bmcr & BMCR_LOOP)
2640		ifmr->ifm_active |= IFM_LOOP;
2641
2642	if (bmcr & BMCR_AUTOEN) {
2643		if ((bmsr & BMSR_ACOMP) == 0) {
2644			ifmr->ifm_active |= IFM_NONE;
2645			return;
2646		}
2647
2648		anlpar &= anar;
2649		if (anlpar & ANLPAR_TX_FD)
2650			ifmr->ifm_active |= IFM_100_TX|IFM_FDX;
2651		else if (anlpar & ANLPAR_T4)
2652			ifmr->ifm_active |= IFM_100_T4;
2653		else if (anlpar & ANLPAR_TX)
2654			ifmr->ifm_active |= IFM_100_TX;
2655		else if (anlpar & ANLPAR_10_FD)
2656			ifmr->ifm_active |= IFM_10_T|IFM_FDX;
2657		else if (anlpar & ANLPAR_10)
2658			ifmr->ifm_active |= IFM_10_T;
2659		else
2660			ifmr->ifm_active |= IFM_NONE;
2661	} else
2662		ifmr->ifm_active = ifm->ifm_cur->ifm_media;
2663	return;
2664
2665bail:
2666	TXP_UNLOCK(sc);
2667	ifmr->ifm_active |= IFM_NONE;
2668	ifmr->ifm_status &= ~IFM_AVALID;
2669}
2670
2671#ifdef TXP_DEBUG
2672static void
2673txp_show_descriptor(void *d)
2674{
2675	struct txp_cmd_desc *cmd = d;
2676	struct txp_rsp_desc *rsp = d;
2677	struct txp_tx_desc *txd = d;
2678	struct txp_frag_desc *frgd = d;
2679
2680	switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) {
2681	case CMD_FLAGS_TYPE_CMD:
2682		/* command descriptor */
2683		printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2684		    cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
2685		    le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
2686		    le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
2687		break;
2688	case CMD_FLAGS_TYPE_RESP:
2689		/* response descriptor */
2690		printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2691		    rsp->rsp_flags, rsp->rsp_numdesc, le16toh(rsp->rsp_id),
2692		    le16toh(rsp->rsp_seq), le16toh(rsp->rsp_par1),
2693		    le32toh(rsp->rsp_par2), le32toh(rsp->rsp_par3));
2694		break;
2695	case CMD_FLAGS_TYPE_DATA:
2696		/* data header (assuming tx for now) */
2697		printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]",
2698		    txd->tx_flags, txd->tx_numdesc, le16toh(txd->tx_totlen),
2699		    le32toh(txd->tx_addrlo), le32toh(txd->tx_addrhi),
2700		    le32toh(txd->tx_pflags));
2701		break;
2702	case CMD_FLAGS_TYPE_FRAG:
2703		/* fragment descriptor */
2704		printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]",
2705		    frgd->frag_flags, frgd->frag_rsvd1, le16toh(frgd->frag_len),
2706		    le32toh(frgd->frag_addrlo), le32toh(frgd->frag_addrhi),
2707		    le32toh(frgd->frag_rsvd2));
2708		break;
2709	default:
2710		printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2711		    cmd->cmd_flags & CMD_FLAGS_TYPE_M,
2712		    cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
2713		    le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
2714		    le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
2715		break;
2716	}
2717}
2718#endif
2719
2720static void
2721txp_set_filter(struct txp_softc *sc)
2722{
2723	struct ifnet *ifp;
2724	uint32_t crc, mchash[2];
2725	uint16_t filter;
2726	struct ifmultiaddr *ifma;
2727	int mcnt;
2728
2729	TXP_LOCK_ASSERT(sc);
2730
2731	ifp = sc->sc_ifp;
2732	filter = TXP_RXFILT_DIRECT;
2733	if ((ifp->if_flags & IFF_BROADCAST) != 0)
2734		filter |= TXP_RXFILT_BROADCAST;
2735	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2736		if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2737			filter |= TXP_RXFILT_ALLMULTI;
2738		if ((ifp->if_flags & IFF_PROMISC) != 0)
2739			filter = TXP_RXFILT_PROMISC;
2740		goto setit;
2741	}
2742
2743	mchash[0] = mchash[1] = 0;
2744	mcnt = 0;
2745	if_maddr_rlock(ifp);
2746	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2747		if (ifma->ifma_addr->sa_family != AF_LINK)
2748			continue;
2749		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2750		    ifma->ifma_addr), ETHER_ADDR_LEN);
2751		crc &= 0x3f;
2752		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2753		mcnt++;
2754	}
2755	if_maddr_runlock(ifp);
2756
2757	if (mcnt > 0) {
2758		filter |= TXP_RXFILT_HASHMULTI;
2759		txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE, 2, mchash[0],
2760		    mchash[1], NULL, NULL, NULL, TXP_CMD_NOWAIT);
2761	}
2762
2763setit:
2764	txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0,
2765	    NULL, NULL, NULL, TXP_CMD_NOWAIT);
2766}
2767
2768static int
2769txp_set_capabilities(struct txp_softc *sc)
2770{
2771	struct ifnet *ifp;
2772	uint32_t rxcap, txcap;
2773
2774	TXP_LOCK_ASSERT(sc);
2775
2776	rxcap = txcap = 0;
2777	ifp = sc->sc_ifp;
2778	if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) {
2779		if ((ifp->if_hwassist & CSUM_IP) != 0)
2780			txcap |= OFFLOAD_IPCKSUM;
2781		if ((ifp->if_hwassist & CSUM_TCP) != 0)
2782			txcap |= OFFLOAD_TCPCKSUM;
2783		if ((ifp->if_hwassist & CSUM_UDP) != 0)
2784			txcap |= OFFLOAD_UDPCKSUM;
2785		rxcap = txcap;
2786	}
2787	if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
2788		rxcap &= ~(OFFLOAD_IPCKSUM | OFFLOAD_TCPCKSUM |
2789		    OFFLOAD_UDPCKSUM);
2790	if ((ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
2791		rxcap |= OFFLOAD_VLAN;
2792		txcap |= OFFLOAD_VLAN;
2793	}
2794
2795	/* Tell firmware new offload configuration. */
2796	return (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0, txcap, rxcap, NULL,
2797	    NULL, NULL, TXP_CMD_NOWAIT));
2798}
2799
2800static void
2801txp_stats_save(struct txp_softc *sc)
2802{
2803	struct txp_rsp_desc *rsp;
2804
2805	TXP_LOCK_ASSERT(sc);
2806
2807	rsp = NULL;
2808	if (txp_ext_command(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
2809	    &rsp, TXP_CMD_WAIT))
2810		goto out;
2811	if (rsp->rsp_numdesc != 6)
2812		goto out;
2813	txp_stats_update(sc, rsp);
2814out:
2815	if (rsp != NULL)
2816		free(rsp, M_DEVBUF);
2817	bcopy(&sc->sc_stats, &sc->sc_ostats, sizeof(struct txp_hw_stats));
2818}
2819
2820static void
2821txp_stats_update(struct txp_softc *sc, struct txp_rsp_desc *rsp)
2822{
2823	struct txp_hw_stats *ostats, *stats;
2824	struct txp_ext_desc *ext;
2825
2826	TXP_LOCK_ASSERT(sc);
2827
2828	ext = (struct txp_ext_desc *)(rsp + 1);
2829	ostats = &sc->sc_ostats;
2830	stats = &sc->sc_stats;
2831	stats->tx_frames = ostats->tx_frames + le32toh(rsp->rsp_par2);
2832	stats->tx_bytes = ostats->tx_bytes + (uint64_t)le32toh(rsp->rsp_par3) +
2833	    ((uint64_t)le32toh(ext[0].ext_1) << 32);
2834	stats->tx_deferred = ostats->tx_deferred + le32toh(ext[0].ext_2);
2835	stats->tx_late_colls = ostats->tx_late_colls + le32toh(ext[0].ext_3);
2836	stats->tx_colls = ostats->tx_colls + le32toh(ext[0].ext_4);
2837	stats->tx_carrier_lost = ostats->tx_carrier_lost +
2838	    le32toh(ext[1].ext_1);
2839	stats->tx_multi_colls = ostats->tx_multi_colls +
2840	    le32toh(ext[1].ext_2);
2841	stats->tx_excess_colls = ostats->tx_excess_colls +
2842	    le32toh(ext[1].ext_3);
2843	stats->tx_fifo_underruns = ostats->tx_fifo_underruns +
2844	    le32toh(ext[1].ext_4);
2845	stats->tx_mcast_oflows = ostats->tx_mcast_oflows +
2846	    le32toh(ext[2].ext_1);
2847	stats->tx_filtered = ostats->tx_filtered + le32toh(ext[2].ext_2);
2848	stats->rx_frames = ostats->rx_frames + le32toh(ext[2].ext_3);
2849	stats->rx_bytes = ostats->rx_bytes + (uint64_t)le32toh(ext[2].ext_4) +
2850	    ((uint64_t)le32toh(ext[3].ext_1) << 32);
2851	stats->rx_fifo_oflows = ostats->rx_fifo_oflows + le32toh(ext[3].ext_2);
2852	stats->rx_badssd = ostats->rx_badssd + le32toh(ext[3].ext_3);
2853	stats->rx_crcerrs = ostats->rx_crcerrs + le32toh(ext[3].ext_4);
2854	stats->rx_lenerrs = ostats->rx_lenerrs + le32toh(ext[4].ext_1);
2855	stats->rx_bcast_frames = ostats->rx_bcast_frames +
2856	    le32toh(ext[4].ext_2);
2857	stats->rx_mcast_frames = ostats->rx_mcast_frames +
2858	    le32toh(ext[4].ext_3);
2859	stats->rx_oflows = ostats->rx_oflows + le32toh(ext[4].ext_4);
2860	stats->rx_filtered = ostats->rx_filtered + le32toh(ext[5].ext_1);
2861}
2862
2863static uint64_t
2864txp_get_counter(struct ifnet *ifp, ift_counter cnt)
2865{
2866	struct txp_softc *sc;
2867	struct txp_hw_stats *stats;
2868
2869	sc = if_getsoftc(ifp);
2870	stats = &sc->sc_stats;
2871
2872	switch (cnt) {
2873	case IFCOUNTER_IERRORS:
2874		return (stats->rx_fifo_oflows + stats->rx_badssd +
2875		    stats->rx_crcerrs + stats->rx_lenerrs + stats->rx_oflows);
2876	case IFCOUNTER_OERRORS:
2877		return (stats->tx_deferred + stats->tx_carrier_lost +
2878		    stats->tx_fifo_underruns + stats->tx_mcast_oflows);
2879	case IFCOUNTER_COLLISIONS:
2880		return (stats->tx_late_colls + stats->tx_multi_colls +
2881		    stats->tx_excess_colls);
2882	case IFCOUNTER_OPACKETS:
2883		return (stats->tx_frames);
2884	case IFCOUNTER_IPACKETS:
2885		return (stats->rx_frames);
2886	default:
2887		return (if_get_counter_default(ifp, cnt));
2888	}
2889}
2890
2891#define	TXP_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
2892	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2893
2894#if __FreeBSD_version >= 900030
2895#define	TXP_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
2896	    SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2897#elif __FreeBSD_version > 800000
2898#define	TXP_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
2899	    SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2900#else
2901#define	TXP_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
2902	    SYSCTL_ADD_ULONG(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2903#endif
2904
2905static void
2906txp_sysctl_node(struct txp_softc *sc)
2907{
2908	struct sysctl_ctx_list *ctx;
2909	struct sysctl_oid_list *child, *parent;
2910	struct sysctl_oid *tree;
2911	struct txp_hw_stats *stats;
2912	int error;
2913
2914	stats = &sc->sc_stats;
2915	ctx = device_get_sysctl_ctx(sc->sc_dev);
2916	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
2917	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
2918	    CTLTYPE_INT | CTLFLAG_RW, &sc->sc_process_limit, 0,
2919	    sysctl_hw_txp_proc_limit, "I",
2920	    "max number of Rx events to process");
2921	/* Pull in device tunables. */
2922	sc->sc_process_limit = TXP_PROC_DEFAULT;
2923	error = resource_int_value(device_get_name(sc->sc_dev),
2924	    device_get_unit(sc->sc_dev), "process_limit",
2925	    &sc->sc_process_limit);
2926	if (error == 0) {
2927		if (sc->sc_process_limit < TXP_PROC_MIN ||
2928		    sc->sc_process_limit > TXP_PROC_MAX) {
2929			device_printf(sc->sc_dev,
2930			    "process_limit value out of range; "
2931			    "using default: %d\n", TXP_PROC_DEFAULT);
2932			sc->sc_process_limit = TXP_PROC_DEFAULT;
2933		}
2934	}
2935	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
2936	    NULL, "TXP statistics");
2937	parent = SYSCTL_CHILDREN(tree);
2938
2939	/* Tx statistics. */
2940	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2941	    NULL, "Tx MAC statistics");
2942	child = SYSCTL_CHILDREN(tree);
2943
2944	TXP_SYSCTL_STAT_ADD32(ctx, child, "frames",
2945	    &stats->tx_frames, "Frames");
2946	TXP_SYSCTL_STAT_ADD64(ctx, child, "octets",
2947	    &stats->tx_bytes, "Octets");
2948	TXP_SYSCTL_STAT_ADD32(ctx, child, "deferred",
2949	    &stats->tx_deferred, "Deferred frames");
2950	TXP_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
2951	    &stats->tx_late_colls, "Late collisions");
2952	TXP_SYSCTL_STAT_ADD32(ctx, child, "colls",
2953	    &stats->tx_colls, "Collisions");
2954	TXP_SYSCTL_STAT_ADD32(ctx, child, "carrier_lost",
2955	    &stats->tx_carrier_lost, "Carrier lost");
2956	TXP_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
2957	    &stats->tx_multi_colls, "Multiple collisions");
2958	TXP_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
2959	    &stats->tx_excess_colls, "Excessive collisions");
2960	TXP_SYSCTL_STAT_ADD32(ctx, child, "fifo_underruns",
2961	    &stats->tx_fifo_underruns, "FIFO underruns");
2962	TXP_SYSCTL_STAT_ADD32(ctx, child, "mcast_oflows",
2963	    &stats->tx_mcast_oflows, "Multicast overflows");
2964	TXP_SYSCTL_STAT_ADD32(ctx, child, "filtered",
2965	    &stats->tx_filtered, "Filtered frames");
2966
2967	/* Rx statistics. */
2968	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2969	    NULL, "Rx MAC statistics");
2970	child = SYSCTL_CHILDREN(tree);
2971
2972	TXP_SYSCTL_STAT_ADD32(ctx, child, "frames",
2973	    &stats->rx_frames, "Frames");
2974	TXP_SYSCTL_STAT_ADD64(ctx, child, "octets",
2975	    &stats->rx_bytes, "Octets");
2976	TXP_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
2977	    &stats->rx_fifo_oflows, "FIFO overflows");
2978	TXP_SYSCTL_STAT_ADD32(ctx, child, "badssd",
2979	    &stats->rx_badssd, "Bad SSD");
2980	TXP_SYSCTL_STAT_ADD32(ctx, child, "crcerrs",
2981	    &stats->rx_crcerrs, "CRC errors");
2982	TXP_SYSCTL_STAT_ADD32(ctx, child, "lenerrs",
2983	    &stats->rx_lenerrs, "Length errors");
2984	TXP_SYSCTL_STAT_ADD32(ctx, child, "bcast_frames",
2985	    &stats->rx_bcast_frames, "Broadcast frames");
2986	TXP_SYSCTL_STAT_ADD32(ctx, child, "mcast_frames",
2987	    &stats->rx_mcast_frames, "Multicast frames");
2988	TXP_SYSCTL_STAT_ADD32(ctx, child, "oflows",
2989	    &stats->rx_oflows, "Overflows");
2990	TXP_SYSCTL_STAT_ADD32(ctx, child, "filtered",
2991	    &stats->rx_filtered, "Filtered frames");
2992}
2993
2994#undef TXP_SYSCTL_STAT_ADD32
2995#undef TXP_SYSCTL_STAT_ADD64
2996
2997static int
2998sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2999{
3000	int error, value;
3001
3002	if (arg1 == NULL)
3003		return (EINVAL);
3004	value = *(int *)arg1;
3005	error = sysctl_handle_int(oidp, &value, 0, req);
3006	if (error || req->newptr == NULL)
3007		return (error);
3008	if (value < low || value > high)
3009		return (EINVAL);
3010        *(int *)arg1 = value;
3011
3012        return (0);
3013}
3014
3015static int
3016sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS)
3017{
3018	return (sysctl_int_range(oidp, arg1, arg2, req,
3019	    TXP_PROC_MIN, TXP_PROC_MAX));
3020}
3021