if_hme.c revision 151639
1/*- 2 * Copyright (c) 1999 The NetBSD Foundation, Inc. 3 * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Paul Kranenburg. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the NetBSD 20 * Foundation, Inc. and its contributors. 21 * 4. Neither the name of The NetBSD Foundation nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 * 37 * from: NetBSD: hme.c,v 1.29 2002/05/05 03:02:38 thorpej Exp 38 */ 39 40#include <sys/cdefs.h> 41__FBSDID("$FreeBSD: head/sys/dev/hme/if_hme.c 151639 2005-10-25 03:56:21Z yongari $"); 42 43/* 44 * HME Ethernet module driver. 45 * 46 * The HME is e.g. part of the PCIO PCI multi function device. 47 * It supports TX gathering and TX and RX checksum offloading. 48 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2 49 * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes 50 * are skipped to make sure the header after the ethernet header is aligned on a 51 * natural boundary, so this ensures minimal wastage in the most common case. 52 * 53 * Also, apparently, the buffers must extend to a DMA burst boundary beyond the 54 * maximum packet size (this is not verified). Buffers starting on odd 55 * boundaries must be mapped so that the burst can start on a natural boundary. 56 * 57 * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading. 58 * In reality, we can do the same technique for UDP datagram too. However, 59 * the hardware doesn't compensate the checksum for UDP datagram which can yield 60 * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It 61 * can be reactivated by setting special link option link0 with ifconfig(8). 62 */ 63#define HME_CSUM_FEATURES (CSUM_TCP) 64#define HMEDEBUG 65#define KTR_HME KTR_CT2 /* XXX */ 66 67#include <sys/param.h> 68#include <sys/systm.h> 69#include <sys/bus.h> 70#include <sys/endian.h> 71#include <sys/kernel.h> 72#include <sys/module.h> 73#include <sys/ktr.h> 74#include <sys/mbuf.h> 75#include <sys/malloc.h> 76#include <sys/socket.h> 77#include <sys/sockio.h> 78 79#include <net/bpf.h> 80#include <net/ethernet.h> 81#include <net/if.h> 82#include <net/if_arp.h> 83#include <net/if_dl.h> 84#include <net/if_media.h> 85#include <net/if_types.h> 86#include <net/if_vlan_var.h> 87 88#include <netinet/in.h> 89#include <netinet/in_systm.h> 90#include <netinet/ip.h> 91#include <netinet/tcp.h> 92#include <netinet/udp.h> 93 94#include <dev/mii/mii.h> 95#include <dev/mii/miivar.h> 96 97#include <machine/bus.h> 98 99#include <dev/hme/if_hmereg.h> 100#include <dev/hme/if_hmevar.h> 101 102static void hme_start(struct ifnet *); 103static void hme_start_locked(struct ifnet *); 104static void hme_stop(struct hme_softc *); 105static int hme_ioctl(struct ifnet *, u_long, caddr_t); 106static void hme_tick(void *); 107static void hme_watchdog(struct ifnet *); 108static void hme_init(void *); 109static void hme_init_locked(struct hme_softc *); 110static int hme_add_rxbuf(struct hme_softc *, unsigned int, int); 111static int hme_meminit(struct hme_softc *); 112static int hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t, 113 u_int32_t, u_int32_t); 114static void hme_mifinit(struct hme_softc *); 115static void hme_setladrf(struct hme_softc *, int); 116 117static int hme_mediachange(struct ifnet *); 118static void hme_mediastatus(struct ifnet *, struct ifmediareq *); 119 120static int hme_load_txmbuf(struct hme_softc *, struct mbuf **); 121static void hme_read(struct hme_softc *, int, int, u_int32_t); 122static void hme_eint(struct hme_softc *, u_int); 123static void hme_rint(struct hme_softc *); 124static void hme_tint(struct hme_softc *); 125static void hme_txcksum(struct mbuf *, u_int32_t *); 126static void hme_rxcksum(struct mbuf *, u_int32_t); 127 128static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int); 129 130devclass_t hme_devclass; 131 132static int hme_nerr; 133 134DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0); 135MODULE_DEPEND(hme, miibus, 1, 1, 1); 136 137#define HME_SPC_READ_4(spc, sc, offs) \ 138 bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \ 139 (offs)) 140#define HME_SPC_WRITE_4(spc, sc, offs, v) \ 141 bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \ 142 (offs), (v)) 143 144#define HME_SEB_READ_4(sc, offs) HME_SPC_READ_4(seb, (sc), (offs)) 145#define HME_SEB_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(seb, (sc), (offs), (v)) 146#define HME_ERX_READ_4(sc, offs) HME_SPC_READ_4(erx, (sc), (offs)) 147#define HME_ERX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(erx, (sc), (offs), (v)) 148#define HME_ETX_READ_4(sc, offs) HME_SPC_READ_4(etx, (sc), (offs)) 149#define HME_ETX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(etx, (sc), (offs), (v)) 150#define HME_MAC_READ_4(sc, offs) HME_SPC_READ_4(mac, (sc), (offs)) 151#define HME_MAC_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mac, (sc), (offs), (v)) 152#define HME_MIF_READ_4(sc, offs) HME_SPC_READ_4(mif, (sc), (offs)) 153#define HME_MIF_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mif, (sc), (offs), (v)) 154 155#define HME_MAXERR 5 156#define HME_WHINE(dev, ...) do { \ 157 if (hme_nerr++ < HME_MAXERR) \ 158 device_printf(dev, __VA_ARGS__); \ 159 if (hme_nerr == HME_MAXERR) { \ 160 device_printf(dev, "too may errors; not reporting any " \ 161 "more\n"); \ 162 } \ 163} while(0) 164 165/* Support oversized VLAN frames. */ 166#define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) 167 168int 169hme_config(struct hme_softc *sc) 170{ 171 struct ifnet *ifp; 172 struct mii_softc *child; 173 bus_size_t size; 174 int error, rdesc, tdesc, i; 175 176 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 177 if (ifp == NULL) 178 return (ENOSPC); 179 180 /* 181 * HME common initialization. 182 * 183 * hme_softc fields that must be initialized by the front-end: 184 * 185 * the DMA bus tag: 186 * sc_dmatag 187 * 188 * the bus handles, tags and offsets (splitted for SBus compatability): 189 * sc_seb{t,h,o} (Shared Ethernet Block registers) 190 * sc_erx{t,h,o} (Receiver Unit registers) 191 * sc_etx{t,h,o} (Transmitter Unit registers) 192 * sc_mac{t,h,o} (MAC registers) 193 * sc_mif{t,h,o} (Management Interface registers) 194 * 195 * the maximum bus burst size: 196 * sc_burst 197 * 198 */ 199 200 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_lock, 0); 201 202 /* Make sure the chip is stopped. */ 203 HME_LOCK(sc); 204 hme_stop(sc); 205 HME_UNLOCK(sc); 206 207 /* 208 * Allocate DMA capable memory 209 * Buffer descriptors must be aligned on a 2048 byte boundary; 210 * take this into account when calculating the size. Note that 211 * the maximum number of descriptors (256) occupies 2048 bytes, 212 * so we allocate that much regardless of HME_N*DESC. 213 */ 214 size = 4096; 215 216 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 217 BUS_SPACE_MAXADDR, NULL, NULL, size, HME_NTXDESC + HME_NRXDESC + 1, 218 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag); 219 if (error) 220 goto fail_ifnet; 221 222 error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0, 223 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 224 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, busdma_lock_mutex, 225 &sc->sc_lock, &sc->sc_cdmatag); 226 if (error) 227 goto fail_ptag; 228 229 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0, 230 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 231 HME_NRXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 232 NULL, NULL, &sc->sc_rdmatag); 233 if (error) 234 goto fail_ctag; 235 236 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0, 237 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 238 HME_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 239 NULL, NULL, &sc->sc_tdmatag); 240 if (error) 241 goto fail_rtag; 242 243 /* Allocate control/TX DMA buffer */ 244 error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase, 245 0, &sc->sc_cdmamap); 246 if (error != 0) { 247 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error); 248 goto fail_ttag; 249 } 250 251 /* Load the buffer */ 252 sc->sc_rb.rb_dmabase = 0; 253 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap, 254 sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 || 255 sc->sc_rb.rb_dmabase == 0) { 256 device_printf(sc->sc_dev, "DMA buffer map load error %d\n", 257 error); 258 goto fail_free; 259 } 260 CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase, 261 sc->sc_rb.rb_dmabase); 262 263 /* 264 * Prepare the RX descriptors. rdesc serves as marker for the last 265 * processed descriptor and may be used later on. 266 */ 267 for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) { 268 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL; 269 error = bus_dmamap_create(sc->sc_rdmatag, 0, 270 &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap); 271 if (error != 0) 272 goto fail_rxdesc; 273 } 274 error = bus_dmamap_create(sc->sc_rdmatag, 0, 275 &sc->sc_rb.rb_spare_dmamap); 276 if (error != 0) 277 goto fail_rxdesc; 278 /* Same for the TX descs. */ 279 for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) { 280 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL; 281 error = bus_dmamap_create(sc->sc_tdmatag, 0, 282 &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap); 283 if (error != 0) 284 goto fail_txdesc; 285 } 286 287 sc->sc_csum_features = HME_CSUM_FEATURES; 288 /* Initialize ifnet structure. */ 289 ifp->if_softc = sc; 290 if_initname(ifp, device_get_name(sc->sc_dev), 291 device_get_unit(sc->sc_dev)); 292 ifp->if_mtu = ETHERMTU; 293 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 294 ifp->if_start = hme_start; 295 ifp->if_ioctl = hme_ioctl; 296 ifp->if_init = hme_init; 297 ifp->if_watchdog = hme_watchdog; 298 IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ); 299 ifp->if_snd.ifq_drv_maxlen = HME_NTXQ; 300 IFQ_SET_READY(&ifp->if_snd); 301 302 hme_mifinit(sc); 303 304 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange, 305 hme_mediastatus)) != 0) { 306 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 307 goto fail_rxdesc; 308 } 309 sc->sc_mii = device_get_softc(sc->sc_miibus); 310 311 /* 312 * Walk along the list of attached MII devices and 313 * establish an `MII instance' to `phy number' 314 * mapping. We'll use this mapping in media change 315 * requests to determine which phy to use to program 316 * the MIF configuration register. 317 */ 318 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 319 child = LIST_NEXT(child, mii_list)) { 320 /* 321 * Note: we support just two PHYs: the built-in 322 * internal device and an external on the MII 323 * connector. 324 */ 325 if (child->mii_phy > 1 || child->mii_inst > 1) { 326 device_printf(sc->sc_dev, "cannot accommodate " 327 "MII device %s at phy %d, instance %d\n", 328 device_get_name(child->mii_dev), 329 child->mii_phy, child->mii_inst); 330 continue; 331 } 332 333 sc->sc_phys[child->mii_inst] = child->mii_phy; 334 } 335 336 /* Attach the interface. */ 337 ether_ifattach(ifp, sc->sc_enaddr); 338 339 /* 340 * Tell the upper layer(s) we support long frames/checksum offloads. 341 */ 342 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 343 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 344 ifp->if_hwassist |= sc->sc_csum_features; 345 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 346 return (0); 347 348fail_txdesc: 349 for (i = 0; i < tdesc; i++) { 350 bus_dmamap_destroy(sc->sc_tdmatag, 351 sc->sc_rb.rb_txdesc[i].htx_dmamap); 352 } 353 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap); 354fail_rxdesc: 355 for (i = 0; i < rdesc; i++) { 356 bus_dmamap_destroy(sc->sc_rdmatag, 357 sc->sc_rb.rb_rxdesc[i].hrx_dmamap); 358 } 359 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap); 360fail_free: 361 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap); 362fail_ttag: 363 bus_dma_tag_destroy(sc->sc_tdmatag); 364fail_rtag: 365 bus_dma_tag_destroy(sc->sc_rdmatag); 366fail_ctag: 367 bus_dma_tag_destroy(sc->sc_cdmatag); 368fail_ptag: 369 bus_dma_tag_destroy(sc->sc_pdmatag); 370fail_ifnet: 371 if_free(ifp); 372 return (error); 373} 374 375void 376hme_detach(struct hme_softc *sc) 377{ 378 struct ifnet *ifp = sc->sc_ifp; 379 int i; 380 381 HME_LOCK(sc); 382 hme_stop(sc); 383 HME_UNLOCK(sc); 384 callout_drain(&sc->sc_tick_ch); 385 ether_ifdetach(ifp); 386 if_free(ifp); 387 device_delete_child(sc->sc_dev, sc->sc_miibus); 388 389 for (i = 0; i < HME_NTXQ; i++) { 390 bus_dmamap_destroy(sc->sc_tdmatag, 391 sc->sc_rb.rb_txdesc[i].htx_dmamap); 392 } 393 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap); 394 for (i = 0; i < HME_NRXDESC; i++) { 395 bus_dmamap_destroy(sc->sc_rdmatag, 396 sc->sc_rb.rb_rxdesc[i].hrx_dmamap); 397 } 398 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD); 399 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTWRITE); 400 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap); 401 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap); 402 bus_dma_tag_destroy(sc->sc_tdmatag); 403 bus_dma_tag_destroy(sc->sc_rdmatag); 404 bus_dma_tag_destroy(sc->sc_cdmatag); 405 bus_dma_tag_destroy(sc->sc_pdmatag); 406} 407 408void 409hme_suspend(struct hme_softc *sc) 410{ 411 412 HME_LOCK(sc); 413 hme_stop(sc); 414 HME_UNLOCK(sc); 415} 416 417void 418hme_resume(struct hme_softc *sc) 419{ 420 struct ifnet *ifp = sc->sc_ifp; 421 422 HME_LOCK(sc); 423 if ((ifp->if_flags & IFF_UP) != 0) 424 hme_init_locked(sc); 425 HME_UNLOCK(sc); 426} 427 428static void 429hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 430{ 431 struct hme_softc *sc = (struct hme_softc *)xsc; 432 433 if (error != 0) 434 return; 435 KASSERT(nsegs == 1, ("hme_cdma_callback: bad dma segment count")); 436 sc->sc_rb.rb_dmabase = segs[0].ds_addr; 437} 438 439static void 440hme_tick(void *arg) 441{ 442 struct hme_softc *sc = arg; 443 struct ifnet *ifp; 444 445 HME_LOCK_ASSERT(sc, MA_OWNED); 446 447 ifp = sc->sc_ifp; 448 /* 449 * Unload collision counters 450 */ 451 ifp->if_collisions += 452 HME_MAC_READ_4(sc, HME_MACI_NCCNT) + 453 HME_MAC_READ_4(sc, HME_MACI_FCCNT) + 454 HME_MAC_READ_4(sc, HME_MACI_EXCNT) + 455 HME_MAC_READ_4(sc, HME_MACI_LTCNT); 456 457 /* 458 * then clear the hardware counters. 459 */ 460 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0); 461 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0); 462 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0); 463 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0); 464 465 mii_tick(sc->sc_mii); 466 467 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); 468} 469 470static void 471hme_stop(struct hme_softc *sc) 472{ 473 u_int32_t v; 474 int n; 475 476 callout_stop(&sc->sc_tick_ch); 477 sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 478 479 /* Reset transmitter and receiver */ 480 HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX | 481 HME_SEB_RESET_ERX); 482 483 for (n = 0; n < 20; n++) { 484 v = HME_SEB_READ_4(sc, HME_SEBI_RESET); 485 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0) 486 return; 487 DELAY(20); 488 } 489 490 device_printf(sc->sc_dev, "hme_stop: reset failed\n"); 491} 492 493/* 494 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the 495 * ring for subsequent use. 496 */ 497static __inline void 498hme_discard_rxbuf(struct hme_softc *sc, int ix) 499{ 500 501 /* 502 * Dropped a packet, reinitialize the descriptor and turn the 503 * ownership back to the hardware. 504 */ 505 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN | 506 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, &sc->sc_rb.rb_rxdesc[ix]))); 507} 508 509static int 510hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold) 511{ 512 struct hme_rxdesc *rd; 513 struct mbuf *m; 514 bus_dma_segment_t segs[1]; 515 bus_dmamap_t map; 516 uintptr_t b; 517 int a, unmap, nsegs; 518 519 rd = &sc->sc_rb.rb_rxdesc[ri]; 520 unmap = rd->hrx_m != NULL; 521 if (unmap && keepold) { 522 /* 523 * Reinitialize the descriptor flags, as they may have been 524 * altered by the hardware. 525 */ 526 hme_discard_rxbuf(sc, ri); 527 return (0); 528 } 529 if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL) 530 return (ENOBUFS); 531 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 532 b = mtod(m, uintptr_t); 533 /* 534 * Required alignment boundary. At least 16 is needed, but since 535 * the mapping must be done in a way that a burst can start on a 536 * natural boundary we might need to extend this. 537 */ 538 a = imax(HME_MINRXALIGN, sc->sc_burst); 539 /* 540 * Make sure the buffer suitably aligned. The 2 byte offset is removed 541 * when the mbuf is handed up. XXX: this ensures at least 16 byte 542 * alignment of the header adjacent to the ethernet header, which 543 * should be sufficient in all cases. Nevertheless, this second-guesses 544 * ALIGN(). 545 */ 546 m_adj(m, roundup2(b, a) - b); 547 if (bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap, 548 m, segs, &nsegs, 0) != 0) { 549 m_freem(m); 550 return (ENOBUFS); 551 } 552 /* If nsegs is wrong then the stack is corrupt */ 553 KASSERT(nsegs == 1, ("Too many segments returned!")); 554 if (unmap) { 555 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, 556 BUS_DMASYNC_POSTREAD); 557 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap); 558 } 559 map = rd->hrx_dmamap; 560 rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap; 561 sc->sc_rb.rb_spare_dmamap = map; 562 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD); 563 HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, segs[0].ds_addr); 564 rd->hrx_m = m; 565 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN | 566 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd))); 567 return (0); 568} 569 570static int 571hme_meminit(struct hme_softc *sc) 572{ 573 struct hme_ring *hr = &sc->sc_rb; 574 struct hme_txdesc *td; 575 bus_addr_t dma; 576 caddr_t p; 577 unsigned int i; 578 int error; 579 580 p = hr->rb_membase; 581 dma = hr->rb_dmabase; 582 583 /* 584 * Allocate transmit descriptors 585 */ 586 hr->rb_txd = p; 587 hr->rb_txddma = dma; 588 p += HME_NTXDESC * HME_XD_SIZE; 589 dma += HME_NTXDESC * HME_XD_SIZE; 590 /* We have reserved descriptor space until the next 2048 byte boundary.*/ 591 dma = (bus_addr_t)roundup((u_long)dma, 2048); 592 p = (caddr_t)roundup((u_long)p, 2048); 593 594 /* 595 * Allocate receive descriptors 596 */ 597 hr->rb_rxd = p; 598 hr->rb_rxddma = dma; 599 p += HME_NRXDESC * HME_XD_SIZE; 600 dma += HME_NRXDESC * HME_XD_SIZE; 601 /* Again move forward to the next 2048 byte boundary.*/ 602 dma = (bus_addr_t)roundup((u_long)dma, 2048); 603 p = (caddr_t)roundup((u_long)p, 2048); 604 605 /* 606 * Initialize transmit buffer descriptors 607 */ 608 for (i = 0; i < HME_NTXDESC; i++) { 609 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0); 610 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0); 611 } 612 613 STAILQ_INIT(&sc->sc_rb.rb_txfreeq); 614 STAILQ_INIT(&sc->sc_rb.rb_txbusyq); 615 for (i = 0; i < HME_NTXQ; i++) { 616 td = &sc->sc_rb.rb_txdesc[i]; 617 if (td->htx_m != NULL) { 618 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap, 619 BUS_DMASYNC_POSTWRITE); 620 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap); 621 m_freem(td->htx_m); 622 td->htx_m = NULL; 623 } 624 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q); 625 } 626 627 /* 628 * Initialize receive buffer descriptors 629 */ 630 for (i = 0; i < HME_NRXDESC; i++) { 631 error = hme_add_rxbuf(sc, i, 1); 632 if (error != 0) 633 return (error); 634 } 635 636 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREREAD); 637 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREWRITE); 638 639 hr->rb_tdhead = hr->rb_tdtail = 0; 640 hr->rb_td_nbusy = 0; 641 hr->rb_rdtail = 0; 642 CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd, 643 hr->rb_txddma); 644 CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd, 645 hr->rb_rxddma); 646 CTR2(KTR_HME, "rx entry 1: flags %x, address %x", 647 *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4)); 648 CTR2(KTR_HME, "tx entry 1: flags %x, address %x", 649 *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4)); 650 return (0); 651} 652 653static int 654hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val, 655 u_int32_t clr, u_int32_t set) 656{ 657 int i = 0; 658 659 val &= ~clr; 660 val |= set; 661 HME_MAC_WRITE_4(sc, reg, val); 662 if (clr == 0 && set == 0) 663 return (1); /* just write, no bits to wait for */ 664 do { 665 DELAY(100); 666 i++; 667 val = HME_MAC_READ_4(sc, reg); 668 if (i > 40) { 669 /* After 3.5ms, we should have been done. */ 670 device_printf(sc->sc_dev, "timeout while writing to " 671 "MAC configuration register\n"); 672 return (0); 673 } 674 } while ((val & clr) != 0 && (val & set) != set); 675 return (1); 676} 677 678/* 679 * Initialization of interface; set up initialization block 680 * and transmit/receive descriptor rings. 681 */ 682static void 683hme_init(void *xsc) 684{ 685 struct hme_softc *sc = (struct hme_softc *)xsc; 686 687 HME_LOCK(sc); 688 hme_init_locked(sc); 689 HME_UNLOCK(sc); 690} 691 692static void 693hme_init_locked(struct hme_softc *sc) 694{ 695 struct ifnet *ifp = sc->sc_ifp; 696 u_int8_t *ea; 697 u_int32_t n, v; 698 699 HME_LOCK_ASSERT(sc, MA_OWNED); 700 /* 701 * Initialization sequence. The numbered steps below correspond 702 * to the sequence outlined in section 6.3.5.1 in the Ethernet 703 * Channel Engine manual (part of the PCIO manual). 704 * See also the STP2002-STQ document from Sun Microsystems. 705 */ 706 707 /* step 1 & 2. Reset the Ethernet Channel */ 708 hme_stop(sc); 709 710 /* Re-initialize the MIF */ 711 hme_mifinit(sc); 712 713#if 0 714 /* Mask all MIF interrupts, just in case */ 715 HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff); 716#endif 717 718 /* step 3. Setup data structures in host memory */ 719 if (hme_meminit(sc) != 0) { 720 device_printf(sc->sc_dev, "out of buffers; init aborted."); 721 return; 722 } 723 724 /* step 4. TX MAC registers & counters */ 725 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0); 726 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0); 727 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0); 728 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0); 729 HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE); 730 731 /* Load station MAC address */ 732 ea = IFP2ENADDR(sc->sc_ifp); 733 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]); 734 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]); 735 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]); 736 737 /* 738 * Init seed for backoff 739 * (source suggested by manual: low 10 bits of MAC address) 740 */ 741 v = ((ea[4] << 8) | ea[5]) & 0x3fff; 742 HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v); 743 744 745 /* Note: Accepting power-on default for other MAC registers here.. */ 746 747 /* step 5. RX MAC registers & counters */ 748 hme_setladrf(sc, 0); 749 750 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 751 HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma); 752 /* Transmit Descriptor ring size: in increments of 16 */ 753 HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1); 754 755 HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma); 756 HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE); 757 758 /* step 8. Global Configuration & Interrupt Mask */ 759 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 760 ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/ 761 HME_SEB_STAT_HOSTTOTX | 762 HME_SEB_STAT_RXTOHOST | 763 HME_SEB_STAT_TXALL | 764 HME_SEB_STAT_TXPERR | 765 HME_SEB_STAT_RCNTEXP | 766 HME_SEB_STAT_ALL_ERRORS )); 767 768 switch (sc->sc_burst) { 769 default: 770 v = 0; 771 break; 772 case 16: 773 v = HME_SEB_CFG_BURST16; 774 break; 775 case 32: 776 v = HME_SEB_CFG_BURST32; 777 break; 778 case 64: 779 v = HME_SEB_CFG_BURST64; 780 break; 781 } 782 /* 783 * Blindly setting 64bit transfers may hang PCI cards(Cheerio?). 784 * Allowing 64bit transfers breaks TX checksum offload as well. 785 * Don't know this comes from hardware bug or driver's DMAing 786 * scheme. 787 * 788 * if (sc->sc_pci == 0) 789 * v |= HME_SEB_CFG_64BIT; 790 */ 791 HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v); 792 793 /* step 9. ETX Configuration: use mostly default values */ 794 795 /* Enable DMA */ 796 v = HME_ETX_READ_4(sc, HME_ETXI_CFG); 797 v |= HME_ETX_CFG_DMAENABLE; 798 HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v); 799 800 /* step 10. ERX Configuration */ 801 v = HME_ERX_READ_4(sc, HME_ERXI_CFG); 802 803 /* Encode Receive Descriptor ring size: four possible values */ 804 v &= ~HME_ERX_CFG_RINGSIZEMSK; 805 switch (HME_NRXDESC) { 806 case 32: 807 v |= HME_ERX_CFG_RINGSIZE32; 808 break; 809 case 64: 810 v |= HME_ERX_CFG_RINGSIZE64; 811 break; 812 case 128: 813 v |= HME_ERX_CFG_RINGSIZE128; 814 break; 815 case 256: 816 v |= HME_ERX_CFG_RINGSIZE256; 817 break; 818 default: 819 printf("hme: invalid Receive Descriptor ring size\n"); 820 break; 821 } 822 823 /* Enable DMA, fix RX first byte offset. */ 824 v &= ~HME_ERX_CFG_FBO_MASK; 825 v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT); 826 /* RX TCP/UDP checksum offset */ 827 n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2; 828 n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK; 829 v |= n; 830 CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v); 831 HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v); 832 833 /* step 11. XIF Configuration */ 834 v = HME_MAC_READ_4(sc, HME_MACI_XIF); 835 v |= HME_MAC_XIF_OE; 836 /* If an external transceiver is connected, enable its MII drivers */ 837 if ((HME_MIF_READ_4(sc, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0) 838 v |= HME_MAC_XIF_MIIENABLE; 839 CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v); 840 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v); 841 842 /* step 12. RX_MAC Configuration Register */ 843 v = HME_MAC_READ_4(sc, HME_MACI_RXCFG); 844 v |= HME_MAC_RXCFG_ENABLE; 845 v &= ~(HME_MAC_RXCFG_DCRCS); 846 CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v); 847 HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v); 848 849 /* step 13. TX_MAC Configuration Register */ 850 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG); 851 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP); 852 CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v); 853 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v); 854 855 /* step 14. Issue Transmit Pending command */ 856 857#ifdef HMEDEBUG 858 /* Debug: double-check. */ 859 CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, " 860 "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING), 861 HME_ETX_READ_4(sc, HME_ETXI_RSIZE), 862 HME_ERX_READ_4(sc, HME_ERXI_RING), 863 HME_MAC_READ_4(sc, HME_MACI_RXSIZE)); 864 CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x", 865 HME_SEB_READ_4(sc, HME_SEBI_IMASK), 866 HME_ERX_READ_4(sc, HME_ERXI_CFG), 867 HME_ETX_READ_4(sc, HME_ETXI_CFG)); 868 CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x", 869 HME_MAC_READ_4(sc, HME_MACI_RXCFG), 870 HME_MAC_READ_4(sc, HME_MACI_TXCFG)); 871#endif 872 873 /* Set the current media. */ 874 /* 875 * mii_mediachg(sc->sc_mii); 876 */ 877 878 /* Start the one second timer. */ 879 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); 880 881 ifp->if_drv_flags |= IFF_DRV_RUNNING; 882 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 883 ifp->if_timer = 0; 884 hme_start_locked(ifp); 885} 886 887/* TX TCP/UDP checksum */ 888static void 889hme_txcksum(struct mbuf *m, u_int32_t *cflags) 890{ 891 struct ip *ip; 892 u_int32_t offset, offset2; 893 caddr_t p; 894 895 for(; m && m->m_len == 0; m = m->m_next) 896 ; 897 if (m == NULL || m->m_len < ETHER_HDR_LEN) { 898 printf("hme_txcksum: m_len < ETHER_HDR_LEN\n"); 899 return; /* checksum will be corrupted */ 900 } 901 if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) { 902 if (m->m_len != ETHER_HDR_LEN) { 903 printf("hme_txcksum: m_len != ETHER_HDR_LEN\n"); 904 return; /* checksum will be corrupted */ 905 } 906 /* XXX */ 907 for(m = m->m_next; m && m->m_len == 0; m = m->m_next) 908 ; 909 if (m == NULL) 910 return; /* checksum will be corrupted */ 911 ip = mtod(m, struct ip *); 912 } else { 913 p = mtod(m, caddr_t); 914 p += ETHER_HDR_LEN; 915 ip = (struct ip *)p; 916 } 917 offset2 = m->m_pkthdr.csum_data; 918 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN; 919 *cflags = offset << HME_XD_TXCKSUM_SSHIFT; 920 *cflags |= ((offset + offset2) << HME_XD_TXCKSUM_OSHIFT); 921 *cflags |= HME_XD_TXCKSUM; 922} 923 924/* 925 * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and 926 * start the transmission. 927 * Returns 0 on success, -1 if there were not enough free descriptors to map 928 * the packet, or an errno otherwise. 929 * 930 * XXX: this relies on the fact that segments returned by bus_dmamap_load_mbuf() 931 * are readable from the nearest burst boundary on (i.e. potentially before 932 * ds_addr) to the first boundary beyond the end. This is usually a safe 933 * assumption to make, but is not documented. 934 */ 935static int 936hme_load_txmbuf(struct hme_softc *sc, struct mbuf **m0) 937{ 938 struct hme_txdesc *htx; 939 struct mbuf *m, *n; 940 caddr_t txd; 941 int i, pci, si, ri, nseg; 942 u_int32_t flags, cflags = 0; 943 int error = 0; 944 945 if ((htx = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL) 946 return (-1); 947 m = *m0; 948 if ((m->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) 949 hme_txcksum(m, &cflags); 950 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap, 951 m, sc->sc_rb.rb_txsegs, &nseg, 0); 952 if (error == EFBIG) { 953 n = m_defrag(m, M_DONTWAIT); 954 if (n == NULL) { 955 m_freem(m); 956 m = NULL; 957 return (ENOMEM); 958 } 959 m = n; 960 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap, 961 m, sc->sc_rb.rb_txsegs, &nseg, 0); 962 if (error != 0) { 963 m_freem(m); 964 m = NULL; 965 return (error); 966 } 967 } else if (error != 0) 968 return (error); 969 if (nseg == 0) { 970 m_freem(m); 971 m = NULL; 972 return (EIO); 973 } 974 if (sc->sc_rb.rb_td_nbusy + nseg >= HME_NTXDESC) { 975 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap); 976 /* retry with m_defrag(9)? */ 977 return (-2); 978 } 979 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, BUS_DMASYNC_PREWRITE); 980 981 si = ri = sc->sc_rb.rb_tdhead; 982 txd = sc->sc_rb.rb_txd; 983 pci = sc->sc_pci; 984 CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", ri, 985 HME_XD_GETFLAGS(pci, txd, ri)); 986 for (i = 0; i < nseg; i++) { 987 /* Fill the ring entry. */ 988 flags = HME_XD_ENCODE_TSIZE(sc->sc_rb.rb_txsegs[i].ds_len); 989 if (i == 0) 990 flags |= HME_XD_SOP | cflags; 991 else 992 flags |= HME_XD_OWN | cflags; 993 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)", 994 ri, si, flags); 995 HME_XD_SETADDR(pci, txd, ri, sc->sc_rb.rb_txsegs[i].ds_addr); 996 HME_XD_SETFLAGS(pci, txd, ri, flags); 997 sc->sc_rb.rb_td_nbusy++; 998 htx->htx_lastdesc = ri; 999 ri = (ri + 1) % HME_NTXDESC; 1000 } 1001 sc->sc_rb.rb_tdhead = ri; 1002 1003 /* set EOP on the last descriptor */ 1004 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC; 1005 flags = HME_XD_GETFLAGS(pci, txd, ri); 1006 flags |= HME_XD_EOP; 1007 CTR3(KTR_HME, "hme_load_mbuf: setting EOP ri %d, si %d (%#x)", ri, si, 1008 flags); 1009 HME_XD_SETFLAGS(pci, txd, ri, flags); 1010 1011 /* Turn the first descriptor ownership to the hme */ 1012 flags = HME_XD_GETFLAGS(pci, txd, si); 1013 flags |= HME_XD_OWN; 1014 CTR2(KTR_HME, "hme_load_mbuf: setting OWN for 1st desc ri %d, (%#x)", 1015 ri, flags); 1016 HME_XD_SETFLAGS(pci, txd, si, flags); 1017 1018 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q); 1019 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, htx, htx_q); 1020 htx->htx_m = m; 1021 1022 /* start the transmission. */ 1023 HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP); 1024 1025 return (0); 1026} 1027 1028/* 1029 * Pass a packet to the higher levels. 1030 */ 1031static void 1032hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags) 1033{ 1034 struct ifnet *ifp = sc->sc_ifp; 1035 struct mbuf *m; 1036 1037 if (len <= sizeof(struct ether_header) || 1038 len > HME_MAX_FRAMESIZE) { 1039#ifdef HMEDEBUG 1040 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n", 1041 len); 1042#endif 1043 ifp->if_ierrors++; 1044 hme_discard_rxbuf(sc, ix); 1045 return; 1046 } 1047 1048 m = sc->sc_rb.rb_rxdesc[ix].hrx_m; 1049 CTR1(KTR_HME, "hme_read: len %d", len); 1050 1051 if (hme_add_rxbuf(sc, ix, 0) != 0) { 1052 /* 1053 * hme_add_rxbuf will leave the old buffer in the ring until 1054 * it is sure that a new buffer can be mapped. If it can not, 1055 * drop the packet, but leave the interface up. 1056 */ 1057 ifp->if_iqdrops++; 1058 hme_discard_rxbuf(sc, ix); 1059 return; 1060 } 1061 1062 ifp->if_ipackets++; 1063 1064 m->m_pkthdr.rcvif = ifp; 1065 m->m_pkthdr.len = m->m_len = len + HME_RXOFFS; 1066 m_adj(m, HME_RXOFFS); 1067 /* RX TCP/UDP checksum */ 1068 if (ifp->if_capenable & IFCAP_RXCSUM) 1069 hme_rxcksum(m, flags); 1070 /* Pass the packet up. */ 1071 HME_UNLOCK(sc); 1072 (*ifp->if_input)(ifp, m); 1073 HME_LOCK(sc); 1074} 1075 1076static void 1077hme_start(struct ifnet *ifp) 1078{ 1079 struct hme_softc *sc = ifp->if_softc; 1080 1081 HME_LOCK(sc); 1082 hme_start_locked(ifp); 1083 HME_UNLOCK(sc); 1084} 1085 1086static void 1087hme_start_locked(struct ifnet *ifp) 1088{ 1089 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc; 1090 struct mbuf *m; 1091 int error, enq = 0; 1092 1093 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1094 IFF_DRV_RUNNING) 1095 return; 1096 1097 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1098 sc->sc_rb.rb_td_nbusy < HME_NTXDESC - 1;) { 1099 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 1100 if (m == NULL) 1101 break; 1102 1103 error = hme_load_txmbuf(sc, &m); 1104 if (error != 0) { 1105 if (m == NULL) 1106 break; 1107 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1108 IFQ_DRV_PREPEND(&ifp->if_snd, m); 1109 break; 1110 } 1111 enq++; 1112 BPF_MTAP(ifp, m); 1113 } 1114 1115 /* Set watchdog timer if a packet was queued */ 1116 if (enq > 0) { 1117 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 1118 BUS_DMASYNC_PREWRITE); 1119 ifp->if_timer = 5; 1120 } 1121} 1122 1123/* 1124 * Transmit interrupt. 1125 */ 1126static void 1127hme_tint(struct hme_softc *sc) 1128{ 1129 caddr_t txd; 1130 struct ifnet *ifp = sc->sc_ifp; 1131 struct hme_txdesc *htx; 1132 unsigned int ri, txflags; 1133 1134 txd = sc->sc_rb.rb_txd; 1135 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq); 1136 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD); 1137 /* Fetch current position in the transmit ring */ 1138 for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) { 1139 if (sc->sc_rb.rb_td_nbusy <= 0) { 1140 CTR0(KTR_HME, "hme_tint: not busy!"); 1141 break; 1142 } 1143 1144 txflags = HME_XD_GETFLAGS(sc->sc_pci, txd, ri); 1145 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags); 1146 1147 if ((txflags & HME_XD_OWN) != 0) 1148 break; 1149 1150 CTR0(KTR_HME, "hme_tint: not owned"); 1151 --sc->sc_rb.rb_td_nbusy; 1152 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1153 1154 /* Complete packet transmitted? */ 1155 if ((txflags & HME_XD_EOP) == 0) 1156 continue; 1157 1158 KASSERT(htx->htx_lastdesc == ri, 1159 ("hme_tint: ring indices skewed: %d != %d!", 1160 htx->htx_lastdesc, ri)); 1161 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, 1162 BUS_DMASYNC_POSTWRITE); 1163 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap); 1164 1165 ifp->if_opackets++; 1166 m_freem(htx->htx_m); 1167 htx->htx_m = NULL; 1168 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q); 1169 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q); 1170 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq); 1171 } 1172 /* Turn off watchdog if hme(4) transmitted queued packet */ 1173 ifp->if_timer = sc->sc_rb.rb_td_nbusy > 0 ? 5 : 0; 1174 1175 /* Update ring */ 1176 sc->sc_rb.rb_tdtail = ri; 1177 1178 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1179 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1180 hme_start_locked(ifp); 1181} 1182 1183/* 1184 * RX TCP/UDP checksum 1185 */ 1186static void 1187hme_rxcksum(struct mbuf *m, u_int32_t flags) 1188{ 1189 struct ether_header *eh; 1190 struct ip *ip; 1191 struct udphdr *uh; 1192 int32_t hlen, len, pktlen; 1193 u_int16_t cksum, *opts; 1194 u_int32_t temp32; 1195 1196 pktlen = m->m_pkthdr.len; 1197 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 1198 return; 1199 eh = mtod(m, struct ether_header *); 1200 if (eh->ether_type != htons(ETHERTYPE_IP)) 1201 return; 1202 ip = (struct ip *)(eh + 1); 1203 if (ip->ip_v != IPVERSION) 1204 return; 1205 1206 hlen = ip->ip_hl << 2; 1207 pktlen -= sizeof(struct ether_header); 1208 if (hlen < sizeof(struct ip)) 1209 return; 1210 if (ntohs(ip->ip_len) < hlen) 1211 return; 1212 if (ntohs(ip->ip_len) != pktlen) 1213 return; 1214 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 1215 return; /* can't handle fragmented packet */ 1216 1217 switch (ip->ip_p) { 1218 case IPPROTO_TCP: 1219 if (pktlen < (hlen + sizeof(struct tcphdr))) 1220 return; 1221 break; 1222 case IPPROTO_UDP: 1223 if (pktlen < (hlen + sizeof(struct udphdr))) 1224 return; 1225 uh = (struct udphdr *)((caddr_t)ip + hlen); 1226 if (uh->uh_sum == 0) 1227 return; /* no checksum */ 1228 break; 1229 default: 1230 return; 1231 } 1232 1233 cksum = ~(flags & HME_XD_RXCKSUM); 1234 /* checksum fixup for IP options */ 1235 len = hlen - sizeof(struct ip); 1236 if (len > 0) { 1237 opts = (u_int16_t *)(ip + 1); 1238 for (; len > 0; len -= sizeof(u_int16_t), opts++) { 1239 temp32 = cksum - *opts; 1240 temp32 = (temp32 >> 16) + (temp32 & 65535); 1241 cksum = temp32 & 65535; 1242 } 1243 } 1244 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 1245 m->m_pkthdr.csum_data = cksum; 1246} 1247 1248/* 1249 * Receive interrupt. 1250 */ 1251static void 1252hme_rint(struct hme_softc *sc) 1253{ 1254 caddr_t xdr = sc->sc_rb.rb_rxd; 1255 struct ifnet *ifp = sc->sc_ifp; 1256 unsigned int ri, len; 1257 int progress = 0; 1258 u_int32_t flags; 1259 1260 /* 1261 * Process all buffers with valid data. 1262 */ 1263 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD); 1264 for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) { 1265 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri); 1266 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags); 1267 if ((flags & HME_XD_OWN) != 0) 1268 break; 1269 1270 progress++; 1271 if ((flags & HME_XD_OFL) != 0) { 1272 device_printf(sc->sc_dev, "buffer overflow, ri=%d; " 1273 "flags=0x%x\n", ri, flags); 1274 ifp->if_ierrors++; 1275 hme_discard_rxbuf(sc, ri); 1276 } else { 1277 len = HME_XD_DECODE_RSIZE(flags); 1278 hme_read(sc, ri, len, flags); 1279 } 1280 } 1281 if (progress) { 1282 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 1283 BUS_DMASYNC_PREWRITE); 1284 } 1285 sc->sc_rb.rb_rdtail = ri; 1286} 1287 1288static void 1289hme_eint(struct hme_softc *sc, u_int status) 1290{ 1291 1292 if ((status & HME_SEB_STAT_MIFIRQ) != 0) { 1293 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1294 return; 1295 } 1296 1297 /* check for fatal errors that needs reset to unfreeze DMA engine */ 1298 if ((status & HME_SEB_STAT_FATAL_ERRORS) != 0) { 1299 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status); 1300 hme_init_locked(sc); 1301 } 1302} 1303 1304void 1305hme_intr(void *v) 1306{ 1307 struct hme_softc *sc = (struct hme_softc *)v; 1308 u_int32_t status; 1309 1310 HME_LOCK(sc); 1311 status = HME_SEB_READ_4(sc, HME_SEBI_STAT); 1312 CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status); 1313 1314 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0) 1315 hme_eint(sc, status); 1316 1317 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0) 1318 hme_tint(sc); 1319 1320 if ((status & HME_SEB_STAT_RXTOHOST) != 0) 1321 hme_rint(sc); 1322 HME_UNLOCK(sc); 1323} 1324 1325 1326static void 1327hme_watchdog(struct ifnet *ifp) 1328{ 1329 struct hme_softc *sc = ifp->if_softc; 1330#ifdef HMEDEBUG 1331 u_int32_t status; 1332#endif 1333 1334 HME_LOCK(sc); 1335#ifdef HMEDEBUG 1336 status = HME_SEB_READ_4(sc, HME_SEBI_STAT); 1337 CTR1(KTR_HME, "hme_watchdog: status %x", (u_int)status); 1338#endif 1339 device_printf(sc->sc_dev, "device timeout\n"); 1340 ++ifp->if_oerrors; 1341 1342 hme_init_locked(sc); 1343 HME_UNLOCK(sc); 1344} 1345 1346/* 1347 * Initialize the MII Management Interface 1348 */ 1349static void 1350hme_mifinit(struct hme_softc *sc) 1351{ 1352 u_int32_t v; 1353 1354 /* Configure the MIF in frame mode */ 1355 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1356 v &= ~HME_MIF_CFG_BBMODE; 1357 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1358} 1359 1360/* 1361 * MII interface 1362 */ 1363int 1364hme_mii_readreg(device_t dev, int phy, int reg) 1365{ 1366 struct hme_softc *sc = device_get_softc(dev); 1367 int n; 1368 u_int32_t v; 1369 1370 /* Select the desired PHY in the MIF configuration register */ 1371 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1372 /* Clear PHY select bit */ 1373 v &= ~HME_MIF_CFG_PHY; 1374 if (phy == HME_PHYAD_EXTERNAL) 1375 /* Set PHY select bit to get at external device */ 1376 v |= HME_MIF_CFG_PHY; 1377 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1378 1379 /* Construct the frame command */ 1380 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1381 HME_MIF_FO_TAMSB | 1382 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) | 1383 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1384 (reg << HME_MIF_FO_REGAD_SHIFT); 1385 1386 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v); 1387 for (n = 0; n < 100; n++) { 1388 DELAY(1); 1389 v = HME_MIF_READ_4(sc, HME_MIFI_FO); 1390 if (v & HME_MIF_FO_TALSB) { 1391 return (v & HME_MIF_FO_DATA); 1392 } 1393 } 1394 1395 device_printf(sc->sc_dev, "mii_read timeout\n"); 1396 return (0); 1397} 1398 1399int 1400hme_mii_writereg(device_t dev, int phy, int reg, int val) 1401{ 1402 struct hme_softc *sc = device_get_softc(dev); 1403 int n; 1404 u_int32_t v; 1405 1406 /* Select the desired PHY in the MIF configuration register */ 1407 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1408 /* Clear PHY select bit */ 1409 v &= ~HME_MIF_CFG_PHY; 1410 if (phy == HME_PHYAD_EXTERNAL) 1411 /* Set PHY select bit to get at external device */ 1412 v |= HME_MIF_CFG_PHY; 1413 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1414 1415 /* Construct the frame command */ 1416 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1417 HME_MIF_FO_TAMSB | 1418 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) | 1419 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1420 (reg << HME_MIF_FO_REGAD_SHIFT) | 1421 (val & HME_MIF_FO_DATA); 1422 1423 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v); 1424 for (n = 0; n < 100; n++) { 1425 DELAY(1); 1426 v = HME_MIF_READ_4(sc, HME_MIFI_FO); 1427 if (v & HME_MIF_FO_TALSB) 1428 return (1); 1429 } 1430 1431 device_printf(sc->sc_dev, "mii_write timeout\n"); 1432 return (0); 1433} 1434 1435void 1436hme_mii_statchg(device_t dev) 1437{ 1438 struct hme_softc *sc = device_get_softc(dev); 1439 int instance; 1440 int phy; 1441 u_int32_t v; 1442 1443 instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1444 phy = sc->sc_phys[instance]; 1445#ifdef HMEDEBUG 1446 if (sc->sc_debug) 1447 printf("hme_mii_statchg: status change: phy = %d\n", phy); 1448#endif 1449 1450 /* Select the current PHY in the MIF configuration register */ 1451 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1452 v &= ~HME_MIF_CFG_PHY; 1453 if (phy == HME_PHYAD_EXTERNAL) 1454 v |= HME_MIF_CFG_PHY; 1455 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1456 1457 /* Set the MAC Full Duplex bit appropriately */ 1458 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG); 1459 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, HME_MAC_TXCFG_ENABLE, 0)) 1460 return; 1461 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1462 v |= HME_MAC_TXCFG_FULLDPLX; 1463 else 1464 v &= ~HME_MAC_TXCFG_FULLDPLX; 1465 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v); 1466 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, 0, HME_MAC_TXCFG_ENABLE)) 1467 return; 1468} 1469 1470static int 1471hme_mediachange(struct ifnet *ifp) 1472{ 1473 struct hme_softc *sc = ifp->if_softc; 1474 int error; 1475 1476 HME_LOCK(sc); 1477 error = mii_mediachg(sc->sc_mii); 1478 HME_UNLOCK(sc); 1479 return (error); 1480} 1481 1482static void 1483hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1484{ 1485 struct hme_softc *sc = ifp->if_softc; 1486 1487 HME_LOCK(sc); 1488 if ((ifp->if_flags & IFF_UP) == 0) { 1489 HME_UNLOCK(sc); 1490 return; 1491 } 1492 1493 mii_pollstat(sc->sc_mii); 1494 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1495 ifmr->ifm_status = sc->sc_mii->mii_media_status; 1496 HME_UNLOCK(sc); 1497} 1498 1499/* 1500 * Process an ioctl request. 1501 */ 1502static int 1503hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1504{ 1505 struct hme_softc *sc = ifp->if_softc; 1506 struct ifreq *ifr = (struct ifreq *)data; 1507 int error = 0; 1508 1509 switch (cmd) { 1510 case SIOCSIFFLAGS: 1511 HME_LOCK(sc); 1512 if ((ifp->if_flags & IFF_UP) == 0 && 1513 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1514 /* 1515 * If interface is marked down and it is running, then 1516 * stop it. 1517 */ 1518 hme_stop(sc); 1519 } else if ((ifp->if_flags & IFF_UP) != 0 && 1520 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1521 /* 1522 * If interface is marked up and it is stopped, then 1523 * start it. 1524 */ 1525 hme_init_locked(sc); 1526 } else if ((ifp->if_flags & IFF_UP) != 0) { 1527 /* 1528 * Reset the interface to pick up changes in any other 1529 * flags that affect hardware registers. 1530 */ 1531 hme_init_locked(sc); 1532 } 1533 if ((ifp->if_flags & IFF_LINK0) != 0) 1534 sc->sc_csum_features |= CSUM_UDP; 1535 else 1536 sc->sc_csum_features &= ~CSUM_UDP; 1537 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1538 ifp->if_hwassist = sc->sc_csum_features; 1539#ifdef HMEDEBUG 1540 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 1541#endif 1542 HME_UNLOCK(sc); 1543 break; 1544 1545 case SIOCADDMULTI: 1546 case SIOCDELMULTI: 1547 HME_LOCK(sc); 1548 hme_setladrf(sc, 1); 1549 HME_UNLOCK(sc); 1550 error = 0; 1551 break; 1552 case SIOCGIFMEDIA: 1553 case SIOCSIFMEDIA: 1554 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 1555 break; 1556 case SIOCSIFCAP: 1557 HME_LOCK(sc); 1558 ifp->if_capenable = ifr->ifr_reqcap; 1559 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1560 ifp->if_hwassist = sc->sc_csum_features; 1561 else 1562 ifp->if_hwassist = 0; 1563 HME_UNLOCK(sc); 1564 break; 1565 default: 1566 error = ether_ioctl(ifp, cmd, data); 1567 break; 1568 } 1569 1570 return (error); 1571} 1572 1573/* 1574 * Set up the logical address filter. 1575 */ 1576static void 1577hme_setladrf(struct hme_softc *sc, int reenable) 1578{ 1579 struct ifnet *ifp = sc->sc_ifp; 1580 struct ifmultiaddr *inm; 1581 u_int32_t crc; 1582 u_int32_t hash[4]; 1583 u_int32_t macc; 1584 1585 HME_LOCK_ASSERT(sc, MA_OWNED); 1586 /* Clear hash table */ 1587 hash[3] = hash[2] = hash[1] = hash[0] = 0; 1588 1589 /* Get current RX configuration */ 1590 macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG); 1591 1592 /* 1593 * Disable the receiver while changing it's state as the documentation 1594 * mandates. 1595 * We then must wait until the bit clears in the register. This should 1596 * take at most 3.5ms. 1597 */ 1598 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_ENABLE, 0)) 1599 return; 1600 /* Disable the hash filter before writing to the filter registers. */ 1601 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 1602 HME_MAC_RXCFG_HENABLE, 0)) 1603 return; 1604 1605 /* make RXMAC really SIMPLEX */ 1606 macc |= HME_MAC_RXCFG_ME; 1607 if (reenable) 1608 macc |= HME_MAC_RXCFG_ENABLE; 1609 else 1610 macc &= ~HME_MAC_RXCFG_ENABLE; 1611 1612 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1613 /* Turn on promiscuous mode; turn off the hash filter */ 1614 macc |= HME_MAC_RXCFG_PMISC; 1615 macc &= ~HME_MAC_RXCFG_HENABLE; 1616 ifp->if_flags |= IFF_ALLMULTI; 1617 goto chipit; 1618 } 1619 1620 /* Turn off promiscuous mode; turn on the hash filter */ 1621 macc &= ~HME_MAC_RXCFG_PMISC; 1622 macc |= HME_MAC_RXCFG_HENABLE; 1623 1624 /* 1625 * Set up multicast address filter by passing all multicast addresses 1626 * through a crc generator, and then using the high order 6 bits as an 1627 * index into the 64 bit logical address filter. The high order bit 1628 * selects the word, while the rest of the bits select the bit within 1629 * the word. 1630 */ 1631 1632 IF_ADDR_LOCK(sc->sc_ifp); 1633 TAILQ_FOREACH(inm, &sc->sc_ifp->if_multiaddrs, ifma_link) { 1634 if (inm->ifma_addr->sa_family != AF_LINK) 1635 continue; 1636 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 1637 inm->ifma_addr), ETHER_ADDR_LEN); 1638 1639 /* Just want the 6 most significant bits. */ 1640 crc >>= 26; 1641 1642 /* Set the corresponding bit in the filter. */ 1643 hash[crc >> 4] |= 1 << (crc & 0xf); 1644 } 1645 IF_ADDR_UNLOCK(sc->sc_ifp); 1646 1647 ifp->if_flags &= ~IFF_ALLMULTI; 1648 1649chipit: 1650 /* Now load the hash table into the chip */ 1651 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]); 1652 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]); 1653 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]); 1654 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]); 1655 hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0, 1656 macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE | 1657 HME_MAC_RXCFG_ME)); 1658} 1659