if_hme.c revision 127238
1/*- 2 * Copyright (c) 1999 The NetBSD Foundation, Inc. 3 * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Paul Kranenburg. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the NetBSD 20 * Foundation, Inc. and its contributors. 21 * 4. Neither the name of The NetBSD Foundation nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 * 37 * from: NetBSD: hme.c,v 1.20 2000/12/14 06:27:25 thorpej Exp 38 */ 39 40#include <sys/cdefs.h> 41__FBSDID("$FreeBSD: head/sys/dev/hme/if_hme.c 127238 2004-03-20 20:12:13Z mdodd $"); 42 43/* 44 * HME Ethernet module driver. 45 * 46 * The HME is e.g. part of the PCIO PCI multi function device. 47 * It supports TX gathering and TX and RX checksum offloading. 48 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2 49 * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes 50 * are skipped to make sure the header after the ethernet header is aligned on a 51 * natural boundary, so this ensures minimal wastage in the most common case. 52 * 53 * Also, apparently, the buffers must extend to a DMA burst boundary beyond the 54 * maximum packet size (this is not verified). Buffers starting on odd 55 * boundaries must be mapped so that the burst can start on a natural boundary. 56 * 57 * Checksumming is not yet supported. 58 */ 59 60#define HMEDEBUG 61#define KTR_HME KTR_CT2 /* XXX */ 62 63#include <sys/param.h> 64#include <sys/systm.h> 65#include <sys/bus.h> 66#include <sys/endian.h> 67#include <sys/kernel.h> 68#include <sys/ktr.h> 69#include <sys/mbuf.h> 70#include <sys/malloc.h> 71#include <sys/socket.h> 72#include <sys/sockio.h> 73 74#include <net/bpf.h> 75#include <net/ethernet.h> 76#include <net/if.h> 77#include <net/if_arp.h> 78#include <net/if_dl.h> 79#include <net/if_media.h> 80 81#include <dev/mii/mii.h> 82#include <dev/mii/miivar.h> 83 84#include <machine/bus.h> 85 86#include <dev/hme/if_hmereg.h> 87#include <dev/hme/if_hmevar.h> 88 89static void hme_start(struct ifnet *); 90static void hme_stop(struct hme_softc *); 91static int hme_ioctl(struct ifnet *, u_long, caddr_t); 92static void hme_tick(void *); 93static void hme_watchdog(struct ifnet *); 94static void hme_init(void *); 95static int hme_add_rxbuf(struct hme_softc *, unsigned int, int); 96static int hme_meminit(struct hme_softc *); 97static int hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t, 98 u_int32_t, u_int32_t); 99static void hme_mifinit(struct hme_softc *); 100static void hme_reset(struct hme_softc *); 101static void hme_setladrf(struct hme_softc *, int); 102 103static int hme_mediachange(struct ifnet *); 104static void hme_mediastatus(struct ifnet *, struct ifmediareq *); 105 106static int hme_load_txmbuf(struct hme_softc *, struct mbuf *); 107static void hme_read(struct hme_softc *, int, int); 108static void hme_eint(struct hme_softc *, u_int); 109static void hme_rint(struct hme_softc *); 110static void hme_tint(struct hme_softc *); 111 112static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int); 113static void hme_rxdma_callback(void *, bus_dma_segment_t *, int, 114 bus_size_t, int); 115static void hme_txdma_callback(void *, bus_dma_segment_t *, int, 116 bus_size_t, int); 117 118devclass_t hme_devclass; 119 120static int hme_nerr; 121 122DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0); 123MODULE_DEPEND(hme, miibus, 1, 1, 1); 124 125#define HME_SPC_READ_4(spc, sc, offs) \ 126 bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \ 127 (sc)->sc_ ## spc ## o + (offs)) 128#define HME_SPC_WRITE_4(spc, sc, offs, v) \ 129 bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \ 130 (sc)->sc_ ## spc ## o + (offs), (v)) 131 132#define HME_SEB_READ_4(sc, offs) HME_SPC_READ_4(seb, (sc), (offs)) 133#define HME_SEB_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(seb, (sc), (offs), (v)) 134#define HME_ERX_READ_4(sc, offs) HME_SPC_READ_4(erx, (sc), (offs)) 135#define HME_ERX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(erx, (sc), (offs), (v)) 136#define HME_ETX_READ_4(sc, offs) HME_SPC_READ_4(etx, (sc), (offs)) 137#define HME_ETX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(etx, (sc), (offs), (v)) 138#define HME_MAC_READ_4(sc, offs) HME_SPC_READ_4(mac, (sc), (offs)) 139#define HME_MAC_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mac, (sc), (offs), (v)) 140#define HME_MIF_READ_4(sc, offs) HME_SPC_READ_4(mif, (sc), (offs)) 141#define HME_MIF_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mif, (sc), (offs), (v)) 142 143#define HME_MAXERR 5 144#define HME_WHINE(dev, ...) do { \ 145 if (hme_nerr++ < HME_MAXERR) \ 146 device_printf(dev, __VA_ARGS__); \ 147 if (hme_nerr == HME_MAXERR) { \ 148 device_printf(dev, "too may errors; not reporting any " \ 149 "more\n"); \ 150 } \ 151} while(0) 152 153int 154hme_config(struct hme_softc *sc) 155{ 156 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 157 struct mii_softc *child; 158 bus_size_t size; 159 int error, rdesc, tdesc, i; 160 161 /* 162 * HME common initialization. 163 * 164 * hme_softc fields that must be initialized by the front-end: 165 * 166 * the dma bus tag: 167 * sc_dmatag 168 * 169 * the bus handles, tags and offsets (splitted for SBus compatability): 170 * sc_seb{t,h,o} (Shared Ethernet Block registers) 171 * sc_erx{t,h,o} (Receiver Unit registers) 172 * sc_etx{t,h,o} (Transmitter Unit registers) 173 * sc_mac{t,h,o} (MAC registers) 174 * sc_mif{t,h,o} (Managment Interface registers) 175 * 176 * the maximum bus burst size: 177 * sc_burst 178 * 179 */ 180 181 /* Make sure the chip is stopped. */ 182 hme_stop(sc); 183 184 /* 185 * Allocate DMA capable memory 186 * Buffer descriptors must be aligned on a 2048 byte boundary; 187 * take this into account when calculating the size. Note that 188 * the maximum number of descriptors (256) occupies 2048 bytes, 189 * so we allocate that much regardless of HME_N*DESC. 190 */ 191 size = 4096; 192 193 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 194 BUS_SPACE_MAXADDR, NULL, NULL, size, HME_NTXDESC + HME_NRXDESC + 1, 195 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag); 196 if (error) 197 return (error); 198 199 error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0, 200 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 201 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, busdma_lock_mutex, 202 &Giant, &sc->sc_cdmatag); 203 if (error) 204 goto fail_ptag; 205 206 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0, 207 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 208 HME_NRXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 209 NULL, NULL, &sc->sc_rdmatag); 210 if (error) 211 goto fail_ctag; 212 213 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0, 214 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 215 HME_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 216 NULL, NULL, &sc->sc_tdmatag); 217 if (error) 218 goto fail_rtag; 219 220 /* Allocate control/TX DMA buffer */ 221 error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase, 222 0, &sc->sc_cdmamap); 223 if (error != 0) { 224 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error); 225 goto fail_ttag; 226 } 227 228 /* Load the buffer */ 229 sc->sc_rb.rb_dmabase = 0; 230 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap, 231 sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 || 232 sc->sc_rb.rb_dmabase == 0) { 233 device_printf(sc->sc_dev, "DMA buffer map load error %d\n", 234 error); 235 goto fail_free; 236 } 237 CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase, 238 sc->sc_rb.rb_dmabase); 239 240 /* 241 * Prepare the RX descriptors. rdesc serves as marker for the last 242 * processed descriptor and may be used later on. 243 */ 244 for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) { 245 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL; 246 error = bus_dmamap_create(sc->sc_rdmatag, 0, 247 &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap); 248 if (error != 0) 249 goto fail_rxdesc; 250 } 251 error = bus_dmamap_create(sc->sc_rdmatag, 0, 252 &sc->sc_rb.rb_spare_dmamap); 253 if (error != 0) 254 goto fail_rxdesc; 255 /* Same for the TX descs. */ 256 for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) { 257 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL; 258 error = bus_dmamap_create(sc->sc_tdmatag, 0, 259 &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap); 260 if (error != 0) 261 goto fail_txdesc; 262 } 263 264 /* Initialize ifnet structure. */ 265 ifp->if_softc = sc; 266 if_initname(ifp, device_get_name(sc->sc_dev), 267 device_get_unit(sc->sc_dev)); 268 ifp->if_mtu = ETHERMTU; 269 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX |IFF_MULTICAST; 270 ifp->if_start = hme_start; 271 ifp->if_ioctl = hme_ioctl; 272 ifp->if_init = hme_init; 273 ifp->if_output = ether_output; 274 ifp->if_watchdog = hme_watchdog; 275 ifp->if_snd.ifq_maxlen = HME_NTXQ; 276 277 hme_mifinit(sc); 278 279 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange, 280 hme_mediastatus)) != 0) { 281 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 282 goto fail_rxdesc; 283 } 284 sc->sc_mii = device_get_softc(sc->sc_miibus); 285 286 /* 287 * Walk along the list of attached MII devices and 288 * establish an `MII instance' to `phy number' 289 * mapping. We'll use this mapping in media change 290 * requests to determine which phy to use to program 291 * the MIF configuration register. 292 */ 293 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 294 child = LIST_NEXT(child, mii_list)) { 295 /* 296 * Note: we support just two PHYs: the built-in 297 * internal device and an external on the MII 298 * connector. 299 */ 300 if (child->mii_phy > 1 || child->mii_inst > 1) { 301 device_printf(sc->sc_dev, "cannot accomodate " 302 "MII device %s at phy %d, instance %d\n", 303 device_get_name(child->mii_dev), 304 child->mii_phy, child->mii_inst); 305 continue; 306 } 307 308 sc->sc_phys[child->mii_inst] = child->mii_phy; 309 } 310 311 /* Attach the interface. */ 312 ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr); 313 314 callout_init(&sc->sc_tick_ch, 0); 315 return (0); 316 317fail_txdesc: 318 for (i = 0; i < tdesc; i++) { 319 bus_dmamap_destroy(sc->sc_tdmatag, 320 sc->sc_rb.rb_txdesc[i].htx_dmamap); 321 } 322 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap); 323fail_rxdesc: 324 for (i = 0; i < rdesc; i++) { 325 bus_dmamap_destroy(sc->sc_rdmatag, 326 sc->sc_rb.rb_rxdesc[i].hrx_dmamap); 327 } 328 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap); 329fail_free: 330 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap); 331fail_ttag: 332 bus_dma_tag_destroy(sc->sc_tdmatag); 333fail_rtag: 334 bus_dma_tag_destroy(sc->sc_rdmatag); 335fail_ctag: 336 bus_dma_tag_destroy(sc->sc_cdmatag); 337fail_ptag: 338 bus_dma_tag_destroy(sc->sc_pdmatag); 339 return (error); 340} 341 342void 343hme_detach(struct hme_softc *sc) 344{ 345 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 346 int i; 347 348 ether_ifdetach(ifp); 349 hme_stop(sc); 350 device_delete_child(sc->sc_dev, sc->sc_miibus); 351 352 for (i = 0; i < HME_NTXQ; i++) { 353 bus_dmamap_destroy(sc->sc_tdmatag, 354 sc->sc_rb.rb_txdesc[i].htx_dmamap); 355 } 356 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap); 357 for (i = 0; i < HME_NRXDESC; i++) { 358 bus_dmamap_destroy(sc->sc_rdmatag, 359 sc->sc_rb.rb_rxdesc[i].hrx_dmamap); 360 } 361 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD); 362 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTWRITE); 363 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap); 364 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap); 365 bus_dma_tag_destroy(sc->sc_tdmatag); 366 bus_dma_tag_destroy(sc->sc_rdmatag); 367 bus_dma_tag_destroy(sc->sc_cdmatag); 368 bus_dma_tag_destroy(sc->sc_pdmatag); 369} 370 371void 372hme_suspend(struct hme_softc *sc) 373{ 374 375 hme_stop(sc); 376} 377 378void 379hme_resume(struct hme_softc *sc) 380{ 381 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 382 383 if ((ifp->if_flags & IFF_UP) != 0) 384 hme_init(ifp); 385} 386 387static void 388hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 389{ 390 struct hme_softc *sc = (struct hme_softc *)xsc; 391 392 if (error != 0) 393 return; 394 KASSERT(nsegs == 1, ("hme_cdma_callback: bad dma segment count")); 395 sc->sc_rb.rb_dmabase = segs[0].ds_addr; 396} 397 398static void 399hme_tick(void *arg) 400{ 401 struct hme_softc *sc = arg; 402 int s; 403 404 s = splnet(); 405 mii_tick(sc->sc_mii); 406 splx(s); 407 408 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); 409} 410 411static void 412hme_reset(struct hme_softc *sc) 413{ 414 int s; 415 416 s = splnet(); 417 hme_init(sc); 418 splx(s); 419} 420 421static void 422hme_stop(struct hme_softc *sc) 423{ 424 u_int32_t v; 425 int n; 426 427 callout_stop(&sc->sc_tick_ch); 428 429 /* Reset transmitter and receiver */ 430 HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX | 431 HME_SEB_RESET_ERX); 432 433 for (n = 0; n < 20; n++) { 434 v = HME_SEB_READ_4(sc, HME_SEBI_RESET); 435 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0) 436 return; 437 DELAY(20); 438 } 439 440 device_printf(sc->sc_dev, "hme_stop: reset failed\n"); 441} 442 443static void 444hme_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, 445 bus_size_t totsize, int error) 446{ 447 bus_addr_t *a = xsc; 448 449 KASSERT(nsegs == 1, ("hme_rxdma_callback: multiple segments!")); 450 if (error != 0) 451 return; 452 *a = segs[0].ds_addr; 453} 454 455/* 456 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the 457 * ring for subsequent use. 458 */ 459static __inline void 460hme_discard_rxbuf(struct hme_softc *sc, int ix) 461{ 462 463 /* 464 * Dropped a packet, reinitialize the descriptor and turn the 465 * ownership back to the hardware. 466 */ 467 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN | 468 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, &sc->sc_rb.rb_rxdesc[ix]))); 469} 470 471static int 472hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold) 473{ 474 struct hme_rxdesc *rd; 475 struct mbuf *m; 476 bus_addr_t ba; 477 bus_dmamap_t map; 478 uintptr_t b; 479 int a, unmap; 480 481 rd = &sc->sc_rb.rb_rxdesc[ri]; 482 unmap = rd->hrx_m != NULL; 483 if (unmap && keepold) { 484 /* 485 * Reinitialize the descriptor flags, as they may have been 486 * altered by the hardware. 487 */ 488 hme_discard_rxbuf(sc, ri); 489 return (0); 490 } 491 if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL) 492 return (ENOBUFS); 493 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 494 b = mtod(m, uintptr_t); 495 /* 496 * Required alignment boundary. At least 16 is needed, but since 497 * the mapping must be done in a way that a burst can start on a 498 * natural boundary we might need to extend this. 499 */ 500 a = max(HME_MINRXALIGN, sc->sc_burst); 501 /* 502 * Make sure the buffer suitably aligned. The 2 byte offset is removed 503 * when the mbuf is handed up. XXX: this ensures at least 16 byte 504 * alignment of the header adjacent to the ethernet header, which 505 * should be sufficient in all cases. Nevertheless, this second-guesses 506 * ALIGN(). 507 */ 508 m_adj(m, roundup2(b, a) - b); 509 if (bus_dmamap_load_mbuf(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap, 510 m, hme_rxdma_callback, &ba, 0) != 0) { 511 m_freem(m); 512 return (ENOBUFS); 513 } 514 if (unmap) { 515 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, 516 BUS_DMASYNC_POSTREAD); 517 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap); 518 } 519 map = rd->hrx_dmamap; 520 rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap; 521 sc->sc_rb.rb_spare_dmamap = map; 522 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD); 523 HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, ba); 524 rd->hrx_m = m; 525 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN | 526 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd))); 527 return (0); 528} 529 530static int 531hme_meminit(struct hme_softc *sc) 532{ 533 struct hme_ring *hr = &sc->sc_rb; 534 struct hme_txdesc *td; 535 bus_addr_t dma; 536 caddr_t p; 537 unsigned int i; 538 int error; 539 540 p = hr->rb_membase; 541 dma = hr->rb_dmabase; 542 543 /* 544 * Allocate transmit descriptors 545 */ 546 hr->rb_txd = p; 547 hr->rb_txddma = dma; 548 p += HME_NTXDESC * HME_XD_SIZE; 549 dma += HME_NTXDESC * HME_XD_SIZE; 550 /* We have reserved descriptor space until the next 2048 byte boundary.*/ 551 dma = (bus_addr_t)roundup((u_long)dma, 2048); 552 p = (caddr_t)roundup((u_long)p, 2048); 553 554 /* 555 * Allocate receive descriptors 556 */ 557 hr->rb_rxd = p; 558 hr->rb_rxddma = dma; 559 p += HME_NRXDESC * HME_XD_SIZE; 560 dma += HME_NRXDESC * HME_XD_SIZE; 561 /* Again move forward to the next 2048 byte boundary.*/ 562 dma = (bus_addr_t)roundup((u_long)dma, 2048); 563 p = (caddr_t)roundup((u_long)p, 2048); 564 565 /* 566 * Initialize transmit buffer descriptors 567 */ 568 for (i = 0; i < HME_NTXDESC; i++) { 569 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0); 570 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0); 571 } 572 573 STAILQ_INIT(&sc->sc_rb.rb_txfreeq); 574 STAILQ_INIT(&sc->sc_rb.rb_txbusyq); 575 for (i = 0; i < HME_NTXQ; i++) { 576 td = &sc->sc_rb.rb_txdesc[i]; 577 if (td->htx_m != NULL) { 578 m_freem(td->htx_m); 579 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap, 580 BUS_DMASYNC_POSTWRITE); 581 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap); 582 td->htx_m = NULL; 583 } 584 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q); 585 } 586 587 /* 588 * Initialize receive buffer descriptors 589 */ 590 for (i = 0; i < HME_NRXDESC; i++) { 591 error = hme_add_rxbuf(sc, i, 1); 592 if (error != 0) 593 return (error); 594 } 595 596 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREREAD); 597 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREWRITE); 598 599 hr->rb_tdhead = hr->rb_tdtail = 0; 600 hr->rb_td_nbusy = 0; 601 hr->rb_rdtail = 0; 602 CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd, 603 hr->rb_txddma); 604 CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd, 605 hr->rb_rxddma); 606 CTR2(KTR_HME, "rx entry 1: flags %x, address %x", 607 *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4)); 608 CTR2(KTR_HME, "tx entry 1: flags %x, address %x", 609 *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4)); 610 return (0); 611} 612 613static int 614hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val, 615 u_int32_t clr, u_int32_t set) 616{ 617 int i = 0; 618 619 val &= ~clr; 620 val |= set; 621 HME_MAC_WRITE_4(sc, reg, val); 622 if (clr == 0 && set == 0) 623 return (1); /* just write, no bits to wait for */ 624 do { 625 DELAY(100); 626 i++; 627 val = HME_MAC_READ_4(sc, reg); 628 if (i > 40) { 629 /* After 3.5ms, we should have been done. */ 630 device_printf(sc->sc_dev, "timeout while writing to " 631 "MAC configuration register\n"); 632 return (0); 633 } 634 } while ((val & clr) != 0 && (val & set) != set); 635 return (1); 636} 637 638/* 639 * Initialization of interface; set up initialization block 640 * and transmit/receive descriptor rings. 641 */ 642static void 643hme_init(void *xsc) 644{ 645 struct hme_softc *sc = (struct hme_softc *)xsc; 646 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 647 u_int8_t *ea; 648 u_int32_t v; 649 650 /* 651 * Initialization sequence. The numbered steps below correspond 652 * to the sequence outlined in section 6.3.5.1 in the Ethernet 653 * Channel Engine manual (part of the PCIO manual). 654 * See also the STP2002-STQ document from Sun Microsystems. 655 */ 656 657 /* step 1 & 2. Reset the Ethernet Channel */ 658 hme_stop(sc); 659 660 /* Re-initialize the MIF */ 661 hme_mifinit(sc); 662 663#if 0 664 /* Mask all MIF interrupts, just in case */ 665 HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff); 666#endif 667 668 /* step 3. Setup data structures in host memory */ 669 if (hme_meminit(sc) != 0) { 670 device_printf(sc->sc_dev, "out of buffers; init aborted."); 671 return; 672 } 673 674 /* step 4. TX MAC registers & counters */ 675 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0); 676 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0); 677 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0); 678 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0); 679 HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, ETHER_MAX_LEN); 680 681 /* Load station MAC address */ 682 ea = sc->sc_arpcom.ac_enaddr; 683 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]); 684 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]); 685 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]); 686 687 /* 688 * Init seed for backoff 689 * (source suggested by manual: low 10 bits of MAC address) 690 */ 691 v = ((ea[4] << 8) | ea[5]) & 0x3fff; 692 HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v); 693 694 695 /* Note: Accepting power-on default for other MAC registers here.. */ 696 697 /* step 5. RX MAC registers & counters */ 698 hme_setladrf(sc, 0); 699 700 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 701 HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma); 702 /* Transmit Descriptor ring size: in increments of 16 */ 703 HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1); 704 705 HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma); 706 HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, ETHER_MAX_LEN); 707 708 /* step 8. Global Configuration & Interrupt Mask */ 709 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 710 ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/ 711 HME_SEB_STAT_HOSTTOTX | 712 HME_SEB_STAT_RXTOHOST | 713 HME_SEB_STAT_TXALL | 714 HME_SEB_STAT_TXPERR | 715 HME_SEB_STAT_RCNTEXP | 716 HME_SEB_STAT_ALL_ERRORS )); 717 718 switch (sc->sc_burst) { 719 default: 720 v = 0; 721 break; 722 case 16: 723 v = HME_SEB_CFG_BURST16; 724 break; 725 case 32: 726 v = HME_SEB_CFG_BURST32; 727 break; 728 case 64: 729 v = HME_SEB_CFG_BURST64; 730 break; 731 } 732 HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v); 733 734 /* step 9. ETX Configuration: use mostly default values */ 735 736 /* Enable DMA */ 737 v = HME_ETX_READ_4(sc, HME_ETXI_CFG); 738 v |= HME_ETX_CFG_DMAENABLE; 739 HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v); 740 741 /* step 10. ERX Configuration */ 742 v = HME_ERX_READ_4(sc, HME_ERXI_CFG); 743 744 /* Encode Receive Descriptor ring size: four possible values */ 745 v &= ~HME_ERX_CFG_RINGSIZEMSK; 746 switch (HME_NRXDESC) { 747 case 32: 748 v |= HME_ERX_CFG_RINGSIZE32; 749 break; 750 case 64: 751 v |= HME_ERX_CFG_RINGSIZE64; 752 break; 753 case 128: 754 v |= HME_ERX_CFG_RINGSIZE128; 755 break; 756 case 256: 757 v |= HME_ERX_CFG_RINGSIZE256; 758 break; 759 default: 760 printf("hme: invalid Receive Descriptor ring size\n"); 761 break; 762 } 763 764 /* Enable DMA, fix RX first byte offset. */ 765 v &= ~HME_ERX_CFG_FBO_MASK; 766 v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT); 767 CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v); 768 HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v); 769 770 /* step 11. XIF Configuration */ 771 v = HME_MAC_READ_4(sc, HME_MACI_XIF); 772 v |= HME_MAC_XIF_OE; 773 /* If an external transceiver is connected, enable its MII drivers */ 774 if ((HME_MIF_READ_4(sc, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0) 775 v |= HME_MAC_XIF_MIIENABLE; 776 CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v); 777 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v); 778 779 /* step 12. RX_MAC Configuration Register */ 780 v = HME_MAC_READ_4(sc, HME_MACI_RXCFG); 781 v |= HME_MAC_RXCFG_ENABLE; 782 v &= ~(HME_MAC_RXCFG_DCRCS); 783 CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v); 784 HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v); 785 786 /* step 13. TX_MAC Configuration Register */ 787 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG); 788 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP); 789 CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v); 790 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v); 791 792 /* step 14. Issue Transmit Pending command */ 793 794#ifdef HMEDEBUG 795 /* Debug: double-check. */ 796 CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, " 797 "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING), 798 HME_ETX_READ_4(sc, HME_ETXI_RSIZE), 799 HME_ERX_READ_4(sc, HME_ERXI_RING), 800 HME_MAC_READ_4(sc, HME_MACI_RXSIZE)); 801 CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x", 802 HME_SEB_READ_4(sc, HME_SEBI_IMASK), 803 HME_ERX_READ_4(sc, HME_ERXI_CFG), 804 HME_ETX_READ_4(sc, HME_ETXI_CFG)); 805 CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x", 806 HME_MAC_READ_4(sc, HME_MACI_RXCFG), 807 HME_MAC_READ_4(sc, HME_MACI_TXCFG)); 808#endif 809 810 /* Start the one second timer. */ 811 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); 812 813 ifp->if_flags |= IFF_RUNNING; 814 ifp->if_flags &= ~IFF_OACTIVE; 815 ifp->if_timer = 0; 816 hme_start(ifp); 817} 818 819struct hme_txdma_arg { 820 struct hme_softc *hta_sc; 821 struct hme_txdesc *hta_htx; 822 int hta_ndescs; 823}; 824 825/* 826 * XXX: this relies on the fact that segments returned by bus_dmamap_load_mbuf() 827 * are readable from the nearest burst boundary on (i.e. potentially before 828 * ds_addr) to the first boundary beyond the end. This is usually a safe 829 * assumption to make, but is not documented. 830 */ 831static void 832hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, 833 bus_size_t totsz, int error) 834{ 835 struct hme_txdma_arg *ta = xsc; 836 struct hme_txdesc *htx; 837 bus_size_t len = 0; 838 caddr_t txd; 839 u_int32_t flags = 0; 840 int i, tdhead, pci; 841 842 if (error != 0) 843 return; 844 845 tdhead = ta->hta_sc->sc_rb.rb_tdhead; 846 pci = ta->hta_sc->sc_pci; 847 txd = ta->hta_sc->sc_rb.rb_txd; 848 htx = ta->hta_htx; 849 850 if (ta->hta_sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) { 851 ta->hta_ndescs = -1; 852 return; 853 } 854 ta->hta_ndescs = nsegs; 855 856 for (i = 0; i < nsegs; i++) { 857 if (segs[i].ds_len == 0) 858 continue; 859 860 /* Fill the ring entry. */ 861 flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len); 862 if (len == 0) 863 flags |= HME_XD_SOP; 864 if (len + segs[i].ds_len == totsz) 865 flags |= HME_XD_EOP; 866 CTR5(KTR_HME, "hme_txdma_callback: seg %d/%d, ri %d, " 867 "flags %#x, addr %#x", i + 1, nsegs, tdhead, (u_int)flags, 868 (u_int)segs[i].ds_addr); 869 HME_XD_SETFLAGS(pci, txd, tdhead, flags); 870 HME_XD_SETADDR(pci, txd, tdhead, segs[i].ds_addr); 871 872 ta->hta_sc->sc_rb.rb_td_nbusy++; 873 htx->htx_lastdesc = tdhead; 874 tdhead = (tdhead + 1) % HME_NTXDESC; 875 len += segs[i].ds_len; 876 } 877 ta->hta_sc->sc_rb.rb_tdhead = tdhead; 878 KASSERT((flags & HME_XD_EOP) != 0, 879 ("hme_txdma_callback: missed end of packet!")); 880} 881 882/* 883 * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and 884 * start the transmission. 885 * Returns 0 on success, -1 if there were not enough free descriptors to map 886 * the packet, or an errno otherwise. 887 */ 888static int 889hme_load_txmbuf(struct hme_softc *sc, struct mbuf *m0) 890{ 891 struct hme_txdma_arg cba; 892 struct hme_txdesc *td; 893 int error, si, ri; 894 u_int32_t flags; 895 896 si = sc->sc_rb.rb_tdhead; 897 if ((td = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL) 898 return (-1); 899 td->htx_m = m0; 900 cba.hta_sc = sc; 901 cba.hta_htx = td; 902 if ((error = bus_dmamap_load_mbuf(sc->sc_tdmatag, td->htx_dmamap, 903 m0, hme_txdma_callback, &cba, 0)) != 0) 904 goto fail; 905 if (cba.hta_ndescs == -1) { 906 error = -1; 907 goto fail; 908 } 909 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap, 910 BUS_DMASYNC_PREWRITE); 911 912 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q); 913 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, td, htx_q); 914 915 /* Turn descriptor ownership to the hme, back to forth. */ 916 ri = sc->sc_rb.rb_tdhead; 917 CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", 918 ri, HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri)); 919 do { 920 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC; 921 flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri) | 922 HME_XD_OWN; 923 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)", 924 ri, si, flags); 925 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, flags); 926 } while (ri != si); 927 928 /* start the transmission. */ 929 HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP); 930 return (0); 931fail: 932 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap); 933 return (error); 934} 935 936/* 937 * Pass a packet to the higher levels. 938 */ 939static void 940hme_read(struct hme_softc *sc, int ix, int len) 941{ 942 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 943 struct mbuf *m; 944 945 if (len <= sizeof(struct ether_header) || 946 len > ETHERMTU + sizeof(struct ether_header)) { 947#ifdef HMEDEBUG 948 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n", 949 len); 950#endif 951 ifp->if_ierrors++; 952 hme_discard_rxbuf(sc, ix); 953 return; 954 } 955 956 m = sc->sc_rb.rb_rxdesc[ix].hrx_m; 957 CTR1(KTR_HME, "hme_read: len %d", len); 958 959 if (hme_add_rxbuf(sc, ix, 0) != 0) { 960 /* 961 * hme_add_rxbuf will leave the old buffer in the ring until 962 * it is sure that a new buffer can be mapped. If it can not, 963 * drop the packet, but leave the interface up. 964 */ 965 ifp->if_iqdrops++; 966 hme_discard_rxbuf(sc, ix); 967 return; 968 } 969 970 ifp->if_ipackets++; 971 972 m->m_pkthdr.rcvif = ifp; 973 m->m_pkthdr.len = m->m_len = len + HME_RXOFFS; 974 m_adj(m, HME_RXOFFS); 975 /* Pass the packet up. */ 976 (*ifp->if_input)(ifp, m); 977} 978 979static void 980hme_start(struct ifnet *ifp) 981{ 982 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc; 983 struct mbuf *m; 984 int error, enq = 0; 985 986 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 987 return; 988 989 error = 0; 990 for (;;) { 991 IF_DEQUEUE(&ifp->if_snd, m); 992 if (m == NULL) 993 break; 994 995 error = hme_load_txmbuf(sc, m); 996 if (error == -1) { 997 ifp->if_flags |= IFF_OACTIVE; 998 IF_PREPEND(&ifp->if_snd, m); 999 break; 1000 } else if (error > 0) { 1001 printf("hme_start: error %d while loading mbuf\n", 1002 error); 1003 } else { 1004 enq = 1; 1005 BPF_MTAP(ifp, m); 1006 } 1007 } 1008 1009 if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC || error == -1) 1010 ifp->if_flags |= IFF_OACTIVE; 1011 /* Set watchdog timer if a packet was queued */ 1012 if (enq) { 1013 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 1014 BUS_DMASYNC_PREWRITE); 1015 ifp->if_timer = 5; 1016 } 1017} 1018 1019/* 1020 * Transmit interrupt. 1021 */ 1022static void 1023hme_tint(struct hme_softc *sc) 1024{ 1025 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1026 struct hme_txdesc *htx; 1027 unsigned int ri, txflags; 1028 1029 /* 1030 * Unload collision counters 1031 */ 1032 ifp->if_collisions += 1033 HME_MAC_READ_4(sc, HME_MACI_NCCNT) + 1034 HME_MAC_READ_4(sc, HME_MACI_FCCNT) + 1035 HME_MAC_READ_4(sc, HME_MACI_EXCNT) + 1036 HME_MAC_READ_4(sc, HME_MACI_LTCNT); 1037 1038 /* 1039 * then clear the hardware counters. 1040 */ 1041 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0); 1042 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0); 1043 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0); 1044 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0); 1045 1046 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq); 1047 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD); 1048 /* Fetch current position in the transmit ring */ 1049 for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) { 1050 if (sc->sc_rb.rb_td_nbusy <= 0) { 1051 CTR0(KTR_HME, "hme_tint: not busy!"); 1052 break; 1053 } 1054 1055 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri); 1056 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags); 1057 1058 if ((txflags & HME_XD_OWN) != 0) 1059 break; 1060 1061 CTR0(KTR_HME, "hme_tint: not owned"); 1062 --sc->sc_rb.rb_td_nbusy; 1063 ifp->if_flags &= ~IFF_OACTIVE; 1064 1065 /* Complete packet transmitted? */ 1066 if ((txflags & HME_XD_EOP) == 0) 1067 continue; 1068 1069 KASSERT(htx->htx_lastdesc == ri, 1070 ("hme_tint: ring indices skewed: %d != %d!", 1071 htx->htx_lastdesc, ri)); 1072 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, 1073 BUS_DMASYNC_POSTWRITE); 1074 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap); 1075 1076 ifp->if_opackets++; 1077 m_freem(htx->htx_m); 1078 htx->htx_m = NULL; 1079 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q); 1080 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q); 1081 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq); 1082 } 1083 /* Turn off watchdog */ 1084 if (sc->sc_rb.rb_td_nbusy == 0) 1085 ifp->if_timer = 0; 1086 1087 /* Update ring */ 1088 sc->sc_rb.rb_tdtail = ri; 1089 1090 hme_start(ifp); 1091 1092 if (sc->sc_rb.rb_td_nbusy == 0) 1093 ifp->if_timer = 0; 1094} 1095 1096/* 1097 * Receive interrupt. 1098 */ 1099static void 1100hme_rint(struct hme_softc *sc) 1101{ 1102 caddr_t xdr = sc->sc_rb.rb_rxd; 1103 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1104 unsigned int ri, len; 1105 int progress = 0; 1106 u_int32_t flags; 1107 1108 /* 1109 * Process all buffers with valid data. 1110 */ 1111 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD); 1112 for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) { 1113 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri); 1114 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags); 1115 if ((flags & HME_XD_OWN) != 0) 1116 break; 1117 1118 progress++; 1119 if ((flags & HME_XD_OFL) != 0) { 1120 device_printf(sc->sc_dev, "buffer overflow, ri=%d; " 1121 "flags=0x%x\n", ri, flags); 1122 ifp->if_ierrors++; 1123 hme_discard_rxbuf(sc, ri); 1124 } else { 1125 len = HME_XD_DECODE_RSIZE(flags); 1126 hme_read(sc, ri, len); 1127 } 1128 } 1129 if (progress) { 1130 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 1131 BUS_DMASYNC_PREWRITE); 1132 } 1133 sc->sc_rb.rb_rdtail = ri; 1134} 1135 1136static void 1137hme_eint(struct hme_softc *sc, u_int status) 1138{ 1139 1140 if ((status & HME_SEB_STAT_MIFIRQ) != 0) { 1141 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1142 return; 1143 } 1144 1145 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status); 1146} 1147 1148void 1149hme_intr(void *v) 1150{ 1151 struct hme_softc *sc = (struct hme_softc *)v; 1152 u_int32_t status; 1153 1154 status = HME_SEB_READ_4(sc, HME_SEBI_STAT); 1155 CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status); 1156 1157 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0) 1158 hme_eint(sc, status); 1159 1160 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0) 1161 hme_tint(sc); 1162 1163 if ((status & HME_SEB_STAT_RXTOHOST) != 0) 1164 hme_rint(sc); 1165} 1166 1167 1168static void 1169hme_watchdog(struct ifnet *ifp) 1170{ 1171 struct hme_softc *sc = ifp->if_softc; 1172#ifdef HMEDEBUG 1173 u_int32_t status; 1174 1175 status = HME_SEB_READ_4(sc, HME_SEBI_STAT); 1176 CTR1(KTR_HME, "hme_watchdog: status %x", (u_int)status); 1177#endif 1178 device_printf(sc->sc_dev, "device timeout\n"); 1179 ++ifp->if_oerrors; 1180 1181 hme_reset(sc); 1182} 1183 1184/* 1185 * Initialize the MII Management Interface 1186 */ 1187static void 1188hme_mifinit(struct hme_softc *sc) 1189{ 1190 u_int32_t v; 1191 1192 /* Configure the MIF in frame mode */ 1193 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1194 v &= ~HME_MIF_CFG_BBMODE; 1195 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1196} 1197 1198/* 1199 * MII interface 1200 */ 1201int 1202hme_mii_readreg(device_t dev, int phy, int reg) 1203{ 1204 struct hme_softc *sc = device_get_softc(dev); 1205 int n; 1206 u_int32_t v; 1207 1208 /* Select the desired PHY in the MIF configuration register */ 1209 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1210 /* Clear PHY select bit */ 1211 v &= ~HME_MIF_CFG_PHY; 1212 if (phy == HME_PHYAD_EXTERNAL) 1213 /* Set PHY select bit to get at external device */ 1214 v |= HME_MIF_CFG_PHY; 1215 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1216 1217 /* Construct the frame command */ 1218 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1219 HME_MIF_FO_TAMSB | 1220 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) | 1221 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1222 (reg << HME_MIF_FO_REGAD_SHIFT); 1223 1224 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v); 1225 for (n = 0; n < 100; n++) { 1226 DELAY(1); 1227 v = HME_MIF_READ_4(sc, HME_MIFI_FO); 1228 if (v & HME_MIF_FO_TALSB) 1229 return (v & HME_MIF_FO_DATA); 1230 } 1231 1232 device_printf(sc->sc_dev, "mii_read timeout\n"); 1233 return (0); 1234} 1235 1236int 1237hme_mii_writereg(device_t dev, int phy, int reg, int val) 1238{ 1239 struct hme_softc *sc = device_get_softc(dev); 1240 int n; 1241 u_int32_t v; 1242 1243 /* Select the desired PHY in the MIF configuration register */ 1244 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1245 /* Clear PHY select bit */ 1246 v &= ~HME_MIF_CFG_PHY; 1247 if (phy == HME_PHYAD_EXTERNAL) 1248 /* Set PHY select bit to get at external device */ 1249 v |= HME_MIF_CFG_PHY; 1250 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1251 1252 /* Construct the frame command */ 1253 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1254 HME_MIF_FO_TAMSB | 1255 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) | 1256 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1257 (reg << HME_MIF_FO_REGAD_SHIFT) | 1258 (val & HME_MIF_FO_DATA); 1259 1260 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v); 1261 for (n = 0; n < 100; n++) { 1262 DELAY(1); 1263 v = HME_MIF_READ_4(sc, HME_MIFI_FO); 1264 if (v & HME_MIF_FO_TALSB) 1265 return (1); 1266 } 1267 1268 device_printf(sc->sc_dev, "mii_write timeout\n"); 1269 return (0); 1270} 1271 1272void 1273hme_mii_statchg(device_t dev) 1274{ 1275 struct hme_softc *sc = device_get_softc(dev); 1276 int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1277 int phy = sc->sc_phys[instance]; 1278 u_int32_t v; 1279 1280#ifdef HMEDEBUG 1281 if (sc->sc_debug) 1282 printf("hme_mii_statchg: status change: phy = %d\n", phy); 1283#endif 1284 1285 /* Select the current PHY in the MIF configuration register */ 1286 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1287 v &= ~HME_MIF_CFG_PHY; 1288 if (phy == HME_PHYAD_EXTERNAL) 1289 v |= HME_MIF_CFG_PHY; 1290 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1291 1292 /* Set the MAC Full Duplex bit appropriately */ 1293 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG); 1294 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, HME_MAC_TXCFG_ENABLE, 0)) 1295 return; 1296 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1297 v |= HME_MAC_TXCFG_FULLDPLX; 1298 else 1299 v &= ~HME_MAC_TXCFG_FULLDPLX; 1300 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v); 1301 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, 0, HME_MAC_TXCFG_ENABLE)) 1302 return; 1303} 1304 1305static int 1306hme_mediachange(struct ifnet *ifp) 1307{ 1308 struct hme_softc *sc = ifp->if_softc; 1309 1310 return (mii_mediachg(sc->sc_mii)); 1311} 1312 1313static void 1314hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1315{ 1316 struct hme_softc *sc = ifp->if_softc; 1317 1318 if ((ifp->if_flags & IFF_UP) == 0) 1319 return; 1320 1321 mii_pollstat(sc->sc_mii); 1322 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1323 ifmr->ifm_status = sc->sc_mii->mii_media_status; 1324} 1325 1326/* 1327 * Process an ioctl request. 1328 */ 1329static int 1330hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1331{ 1332 struct hme_softc *sc = ifp->if_softc; 1333 struct ifreq *ifr = (struct ifreq *)data; 1334 int s, error = 0; 1335 1336 s = splnet(); 1337 1338 switch (cmd) { 1339 case SIOCSIFFLAGS: 1340 if ((ifp->if_flags & IFF_UP) == 0 && 1341 (ifp->if_flags & IFF_RUNNING) != 0) { 1342 /* 1343 * If interface is marked down and it is running, then 1344 * stop it. 1345 */ 1346 hme_stop(sc); 1347 ifp->if_flags &= ~IFF_RUNNING; 1348 } else if ((ifp->if_flags & IFF_UP) != 0 && 1349 (ifp->if_flags & IFF_RUNNING) == 0) { 1350 /* 1351 * If interface is marked up and it is stopped, then 1352 * start it. 1353 */ 1354 hme_init(sc); 1355 } else if ((ifp->if_flags & IFF_UP) != 0) { 1356 /* 1357 * Reset the interface to pick up changes in any other 1358 * flags that affect hardware registers. 1359 */ 1360 hme_init(sc); 1361 } 1362#ifdef HMEDEBUG 1363 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 1364#endif 1365 break; 1366 1367 case SIOCADDMULTI: 1368 case SIOCDELMULTI: 1369 hme_setladrf(sc, 1); 1370 error = 0; 1371 break; 1372 case SIOCGIFMEDIA: 1373 case SIOCSIFMEDIA: 1374 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 1375 break; 1376 default: 1377 error = ether_ioctl(ifp, cmd, data); 1378 break; 1379 } 1380 1381 splx(s); 1382 return (error); 1383} 1384 1385/* 1386 * Set up the logical address filter. 1387 */ 1388static void 1389hme_setladrf(struct hme_softc *sc, int reenable) 1390{ 1391 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1392 struct ifmultiaddr *inm; 1393 struct sockaddr_dl *sdl; 1394 u_char *cp; 1395 u_int32_t crc; 1396 u_int32_t hash[4]; 1397 u_int32_t macc; 1398 int len; 1399 1400 /* Clear hash table */ 1401 hash[3] = hash[2] = hash[1] = hash[0] = 0; 1402 1403 /* Get current RX configuration */ 1404 macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG); 1405 1406 /* 1407 * Disable the receiver while changing it's state as the documentation 1408 * mandates. 1409 * We then must wait until the bit clears in the register. This should 1410 * take at most 3.5ms. 1411 */ 1412 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_ENABLE, 0)) 1413 return; 1414 /* Disable the hash filter before writing to the filter registers. */ 1415 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 1416 HME_MAC_RXCFG_HENABLE, 0)) 1417 return; 1418 1419 if (reenable) 1420 macc |= HME_MAC_RXCFG_ENABLE; 1421 else 1422 macc &= ~HME_MAC_RXCFG_ENABLE; 1423 1424 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1425 /* Turn on promiscuous mode; turn off the hash filter */ 1426 macc |= HME_MAC_RXCFG_PMISC; 1427 macc &= ~HME_MAC_RXCFG_HENABLE; 1428 ifp->if_flags |= IFF_ALLMULTI; 1429 goto chipit; 1430 } 1431 1432 /* Turn off promiscuous mode; turn on the hash filter */ 1433 macc &= ~HME_MAC_RXCFG_PMISC; 1434 macc |= HME_MAC_RXCFG_HENABLE; 1435 1436 /* 1437 * Set up multicast address filter by passing all multicast addresses 1438 * through a crc generator, and then using the high order 6 bits as an 1439 * index into the 64 bit logical address filter. The high order bit 1440 * selects the word, while the rest of the bits select the bit within 1441 * the word. 1442 */ 1443 1444 TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) { 1445 if (inm->ifma_addr->sa_family != AF_LINK) 1446 continue; 1447 sdl = (struct sockaddr_dl *)inm->ifma_addr; 1448 cp = LLADDR(sdl); 1449 crc = 0xffffffff; 1450 for (len = sdl->sdl_alen; --len >= 0;) { 1451 int octet = *cp++; 1452 int i; 1453 1454#define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */ 1455 for (i = 0; i < 8; i++) { 1456 if ((crc & 1) ^ (octet & 1)) { 1457 crc >>= 1; 1458 crc ^= MC_POLY_LE; 1459 } else { 1460 crc >>= 1; 1461 } 1462 octet >>= 1; 1463 } 1464 } 1465 /* Just want the 6 most significant bits. */ 1466 crc >>= 26; 1467 1468 /* Set the corresponding bit in the filter. */ 1469 hash[crc >> 4] |= 1 << (crc & 0xf); 1470 } 1471 1472 ifp->if_flags &= ~IFF_ALLMULTI; 1473 1474chipit: 1475 /* Now load the hash table into the chip */ 1476 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]); 1477 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]); 1478 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]); 1479 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]); 1480 hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0, 1481 macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE)); 1482} 1483