if_hme.c revision 111119
1/*- 2 * Copyright (c) 1999 The NetBSD Foundation, Inc. 3 * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Paul Kranenburg. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the NetBSD 20 * Foundation, Inc. and its contributors. 21 * 4. Neither the name of The NetBSD Foundation nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 * 37 * from: NetBSD: hme.c,v 1.20 2000/12/14 06:27:25 thorpej Exp 38 * 39 * $FreeBSD: head/sys/dev/hme/if_hme.c 111119 2003-02-19 05:47:46Z imp $ 40 */ 41 42/* 43 * HME Ethernet module driver. 44 * 45 * The HME is e.g. part of the PCIO PCI multi function device. 46 * It supports TX gathering and TX and RX checksum offloading. 47 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2 48 * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes 49 * are skipped to make sure the header after the ethernet header is aligned on a 50 * natural boundary, so this ensures minimal wastage in the most common case. 51 * 52 * Also, apparently, the buffers must extend to a DMA burst boundary beyond the 53 * maximum packet size (this is not verified). Buffers starting on odd 54 * boundaries must be mapped so that the burst can start on a natural boundary. 55 * 56 * Checksumming is not yet supported. 57 */ 58 59#define HMEDEBUG 60#define KTR_HME KTR_CT2 /* XXX */ 61 62#include <sys/param.h> 63#include <sys/systm.h> 64#include <sys/bus.h> 65#include <sys/endian.h> 66#include <sys/kernel.h> 67#include <sys/ktr.h> 68#include <sys/mbuf.h> 69#include <sys/malloc.h> 70#include <sys/socket.h> 71#include <sys/sockio.h> 72 73#include <net/bpf.h> 74#include <net/ethernet.h> 75#include <net/if.h> 76#include <net/if_arp.h> 77#include <net/if_dl.h> 78#include <net/if_media.h> 79 80#include <dev/mii/mii.h> 81#include <dev/mii/miivar.h> 82 83#include <machine/bus.h> 84 85#include <hme/if_hmereg.h> 86#include <hme/if_hmevar.h> 87 88static void hme_start(struct ifnet *); 89static void hme_stop(struct hme_softc *); 90static int hme_ioctl(struct ifnet *, u_long, caddr_t); 91static void hme_tick(void *); 92static void hme_watchdog(struct ifnet *); 93static void hme_init(void *); 94static int hme_add_rxbuf(struct hme_softc *, unsigned int, int); 95static int hme_meminit(struct hme_softc *); 96static int hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t, 97 u_int32_t, u_int32_t); 98static void hme_mifinit(struct hme_softc *); 99static void hme_reset(struct hme_softc *); 100static void hme_setladrf(struct hme_softc *, int); 101 102static int hme_mediachange(struct ifnet *); 103static void hme_mediastatus(struct ifnet *, struct ifmediareq *); 104 105static int hme_load_txmbuf(struct hme_softc *, struct mbuf *); 106static void hme_read(struct hme_softc *, int, int); 107static void hme_eint(struct hme_softc *, u_int); 108static void hme_rint(struct hme_softc *); 109static void hme_tint(struct hme_softc *); 110 111static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int); 112static void hme_rxdma_callback(void *, bus_dma_segment_t *, int, 113 bus_size_t, int); 114static void hme_txdma_callback(void *, bus_dma_segment_t *, int, 115 bus_size_t, int); 116 117devclass_t hme_devclass; 118 119static int hme_nerr; 120 121DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0); 122MODULE_DEPEND(hme, miibus, 1, 1, 1); 123 124#define HME_SPC_READ_4(spc, sc, offs) \ 125 bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \ 126 (sc)->sc_ ## spc ## o + (offs)) 127#define HME_SPC_WRITE_4(spc, sc, offs, v) \ 128 bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \ 129 (sc)->sc_ ## spc ## o + (offs), (v)) 130 131#define HME_SEB_READ_4(sc, offs) HME_SPC_READ_4(seb, (sc), (offs)) 132#define HME_SEB_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(seb, (sc), (offs), (v)) 133#define HME_ERX_READ_4(sc, offs) HME_SPC_READ_4(erx, (sc), (offs)) 134#define HME_ERX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(erx, (sc), (offs), (v)) 135#define HME_ETX_READ_4(sc, offs) HME_SPC_READ_4(etx, (sc), (offs)) 136#define HME_ETX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(etx, (sc), (offs), (v)) 137#define HME_MAC_READ_4(sc, offs) HME_SPC_READ_4(mac, (sc), (offs)) 138#define HME_MAC_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mac, (sc), (offs), (v)) 139#define HME_MIF_READ_4(sc, offs) HME_SPC_READ_4(mif, (sc), (offs)) 140#define HME_MIF_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mif, (sc), (offs), (v)) 141 142#define HME_MAXERR 5 143#define HME_WHINE(dev, ...) do { \ 144 if (hme_nerr++ < HME_MAXERR) \ 145 device_printf(dev, __VA_ARGS__); \ 146 if (hme_nerr == HME_MAXERR) { \ 147 device_printf(dev, "too may errors; not reporting any " \ 148 "more\n"); \ 149 } \ 150} while(0) 151 152int 153hme_config(struct hme_softc *sc) 154{ 155 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 156 struct mii_softc *child; 157 bus_size_t size; 158 int error, rdesc, tdesc, i; 159 160 /* 161 * HME common initialization. 162 * 163 * hme_softc fields that must be initialized by the front-end: 164 * 165 * the dma bus tag: 166 * sc_dmatag 167 * 168 * the bus handles, tags and offsets (splitted for SBus compatability): 169 * sc_seb{t,h,o} (Shared Ethernet Block registers) 170 * sc_erx{t,h,o} (Receiver Unit registers) 171 * sc_etx{t,h,o} (Transmitter Unit registers) 172 * sc_mac{t,h,o} (MAC registers) 173 * sc_mif{t,h,o} (Managment Interface registers) 174 * 175 * the maximum bus burst size: 176 * sc_burst 177 * 178 */ 179 180 /* Make sure the chip is stopped. */ 181 hme_stop(sc); 182 183 /* 184 * Allocate DMA capable memory 185 * Buffer descriptors must be aligned on a 2048 byte boundary; 186 * take this into account when calculating the size. Note that 187 * the maximum number of descriptors (256) occupies 2048 bytes, 188 * so we allocate that much regardless of HME_N*DESC. 189 */ 190 size = 4096; 191 192 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 193 BUS_SPACE_MAXADDR, NULL, NULL, size, HME_NTXDESC + HME_NRXDESC + 1, 194 BUS_SPACE_MAXSIZE_32BIT, 0, &sc->sc_pdmatag); 195 if (error) 196 return (error); 197 198 error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0, 199 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 200 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, &sc->sc_cdmatag); 201 if (error) 202 goto fail_ptag; 203 204 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0, 205 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 206 HME_NRXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 207 &sc->sc_rdmatag); 208 if (error) 209 goto fail_ctag; 210 211 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0, 212 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 213 HME_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 214 &sc->sc_tdmatag); 215 if (error) 216 goto fail_rtag; 217 218 /* Allocate control/TX DMA buffer */ 219 error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase, 220 0, &sc->sc_cdmamap); 221 if (error != 0) { 222 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error); 223 goto fail_ttag; 224 } 225 226 /* Load the buffer */ 227 sc->sc_rb.rb_dmabase = 0; 228 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap, 229 sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 || 230 sc->sc_rb.rb_dmabase == 0) { 231 device_printf(sc->sc_dev, "DMA buffer map load error %d\n", 232 error); 233 goto fail_free; 234 } 235 CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase, 236 sc->sc_rb.rb_dmabase); 237 238 /* 239 * Prepare the RX descriptors. rdesc serves as marker for the last 240 * processed descriptor and may be used later on. 241 */ 242 for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) { 243 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL; 244 error = bus_dmamap_create(sc->sc_rdmatag, 0, 245 &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap); 246 if (error != 0) 247 goto fail_rxdesc; 248 } 249 error = bus_dmamap_create(sc->sc_rdmatag, 0, 250 &sc->sc_rb.rb_spare_dmamap); 251 if (error != 0) 252 goto fail_rxdesc; 253 /* Same for the TX descs. */ 254 for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) { 255 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL; 256 error = bus_dmamap_create(sc->sc_tdmatag, 0, 257 &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap); 258 if (error != 0) 259 goto fail_txdesc; 260 } 261 262 device_printf(sc->sc_dev, "Ethernet address:"); 263 for (i = 0; i < 6; i++) 264 printf("%c%02x", i > 0 ? ':' : ' ', sc->sc_arpcom.ac_enaddr[i]); 265 printf("\n"); 266 267 /* Initialize ifnet structure. */ 268 ifp->if_softc = sc; 269 ifp->if_unit = device_get_unit(sc->sc_dev); 270 ifp->if_name = "hme"; 271 ifp->if_mtu = ETHERMTU; 272 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX |IFF_MULTICAST; 273 ifp->if_start = hme_start; 274 ifp->if_ioctl = hme_ioctl; 275 ifp->if_init = hme_init; 276 ifp->if_output = ether_output; 277 ifp->if_watchdog = hme_watchdog; 278 ifp->if_snd.ifq_maxlen = HME_NTXQ; 279 280 hme_mifinit(sc); 281 282 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange, 283 hme_mediastatus)) != 0) { 284 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 285 goto fail_rxdesc; 286 } 287 sc->sc_mii = device_get_softc(sc->sc_miibus); 288 289 /* 290 * Walk along the list of attached MII devices and 291 * establish an `MII instance' to `phy number' 292 * mapping. We'll use this mapping in media change 293 * requests to determine which phy to use to program 294 * the MIF configuration register. 295 */ 296 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 297 child = LIST_NEXT(child, mii_list)) { 298 /* 299 * Note: we support just two PHYs: the built-in 300 * internal device and an external on the MII 301 * connector. 302 */ 303 if (child->mii_phy > 1 || child->mii_inst > 1) { 304 device_printf(sc->sc_dev, "cannot accomodate " 305 "MII device %s at phy %d, instance %d\n", 306 device_get_name(child->mii_dev), 307 child->mii_phy, child->mii_inst); 308 continue; 309 } 310 311 sc->sc_phys[child->mii_inst] = child->mii_phy; 312 } 313 314 /* Attach the interface. */ 315 ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr); 316 317 callout_init(&sc->sc_tick_ch, 0); 318 return (0); 319 320fail_txdesc: 321 for (i = 0; i < tdesc; i++) { 322 bus_dmamap_destroy(sc->sc_tdmatag, 323 sc->sc_rb.rb_txdesc[i].htx_dmamap); 324 } 325 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap); 326fail_rxdesc: 327 for (i = 0; i < rdesc; i++) { 328 bus_dmamap_destroy(sc->sc_rdmatag, 329 sc->sc_rb.rb_rxdesc[i].hrx_dmamap); 330 } 331 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap); 332fail_free: 333 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap); 334fail_ttag: 335 bus_dma_tag_destroy(sc->sc_tdmatag); 336fail_rtag: 337 bus_dma_tag_destroy(sc->sc_rdmatag); 338fail_ctag: 339 bus_dma_tag_destroy(sc->sc_cdmatag); 340fail_ptag: 341 bus_dma_tag_destroy(sc->sc_pdmatag); 342 return (error); 343} 344 345void 346hme_detach(struct hme_softc *sc) 347{ 348 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 349 int i; 350 351 ether_ifdetach(ifp); 352 hme_stop(sc); 353 device_delete_child(sc->sc_dev, sc->sc_miibus); 354 355 for (i = 0; i < HME_NTXQ; i++) { 356 bus_dmamap_destroy(sc->sc_tdmatag, 357 sc->sc_rb.rb_txdesc[i].htx_dmamap); 358 } 359 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap); 360 for (i = 0; i < HME_NRXDESC; i++) { 361 bus_dmamap_destroy(sc->sc_rdmatag, 362 sc->sc_rb.rb_rxdesc[i].hrx_dmamap); 363 } 364 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD); 365 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTWRITE); 366 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap); 367 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap); 368 bus_dma_tag_destroy(sc->sc_tdmatag); 369 bus_dma_tag_destroy(sc->sc_rdmatag); 370 bus_dma_tag_destroy(sc->sc_cdmatag); 371 bus_dma_tag_destroy(sc->sc_pdmatag); 372} 373 374void 375hme_suspend(struct hme_softc *sc) 376{ 377 378 hme_stop(sc); 379} 380 381void 382hme_resume(struct hme_softc *sc) 383{ 384 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 385 386 if ((ifp->if_flags & IFF_UP) != 0) 387 hme_init(ifp); 388} 389 390static void 391hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 392{ 393 struct hme_softc *sc = (struct hme_softc *)xsc; 394 395 if (error != 0) 396 return; 397 KASSERT(nsegs == 1, ("hme_cdma_callback: bad dma segment count")); 398 sc->sc_rb.rb_dmabase = segs[0].ds_addr; 399} 400 401static void 402hme_tick(void *arg) 403{ 404 struct hme_softc *sc = arg; 405 int s; 406 407 s = splnet(); 408 mii_tick(sc->sc_mii); 409 splx(s); 410 411 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); 412} 413 414static void 415hme_reset(struct hme_softc *sc) 416{ 417 int s; 418 419 s = splnet(); 420 hme_init(sc); 421 splx(s); 422} 423 424static void 425hme_stop(struct hme_softc *sc) 426{ 427 u_int32_t v; 428 int n; 429 430 callout_stop(&sc->sc_tick_ch); 431 432 /* Reset transmitter and receiver */ 433 HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX | 434 HME_SEB_RESET_ERX); 435 436 for (n = 0; n < 20; n++) { 437 v = HME_SEB_READ_4(sc, HME_SEBI_RESET); 438 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0) 439 return; 440 DELAY(20); 441 } 442 443 device_printf(sc->sc_dev, "hme_stop: reset failed\n"); 444} 445 446static void 447hme_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, 448 bus_size_t totsize, int error) 449{ 450 bus_addr_t *a = xsc; 451 452 KASSERT(nsegs == 1, ("hme_rxdma_callback: multiple segments!")); 453 if (error != 0) 454 return; 455 *a = segs[0].ds_addr; 456} 457 458/* 459 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the 460 * ring for subsequent use. 461 */ 462static __inline void 463hme_discard_rxbuf(struct hme_softc *sc, int ix) 464{ 465 466 /* 467 * Dropped a packet, reinitialize the descriptor and turn the 468 * ownership back to the hardware. 469 */ 470 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN | 471 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, &sc->sc_rb.rb_rxdesc[ix]))); 472} 473 474static int 475hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold) 476{ 477 struct hme_rxdesc *rd; 478 struct mbuf *m; 479 bus_addr_t ba; 480 bus_dmamap_t map; 481 uintptr_t b; 482 int a, unmap; 483 484 rd = &sc->sc_rb.rb_rxdesc[ri]; 485 unmap = rd->hrx_m != NULL; 486 if (unmap && keepold) { 487 /* 488 * Reinitialize the descriptor flags, as they may have been 489 * altered by the hardware. 490 */ 491 hme_discard_rxbuf(sc, ri); 492 return (0); 493 } 494 if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL) 495 return (ENOBUFS); 496 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 497 b = mtod(m, uintptr_t); 498 /* 499 * Required alignment boundary. At least 16 is needed, but since 500 * the mapping must be done in a way that a burst can start on a 501 * natural boundary we might need to extend this. 502 */ 503 a = max(HME_MINRXALIGN, sc->sc_burst); 504 /* 505 * Make sure the buffer suitably aligned. The 2 byte offset is removed 506 * when the mbuf is handed up. XXX: this ensures at least 16 byte 507 * alignment of the header adjacent to the ethernet header, which 508 * should be sufficient in all cases. Nevertheless, this second-guesses 509 * ALIGN(). 510 */ 511 m_adj(m, roundup2(b, a) - b); 512 if (bus_dmamap_load_mbuf(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap, 513 m, hme_rxdma_callback, &ba, 0) != 0) { 514 m_freem(m); 515 return (ENOBUFS); 516 } 517 if (unmap) { 518 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, 519 BUS_DMASYNC_POSTREAD); 520 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap); 521 } 522 map = rd->hrx_dmamap; 523 rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap; 524 sc->sc_rb.rb_spare_dmamap = map; 525 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD); 526 HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, ba); 527 rd->hrx_m = m; 528 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN | 529 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd))); 530 return (0); 531} 532 533static int 534hme_meminit(struct hme_softc *sc) 535{ 536 struct hme_ring *hr = &sc->sc_rb; 537 struct hme_txdesc *td; 538 bus_addr_t dma; 539 caddr_t p; 540 unsigned int i; 541 int error; 542 543 p = hr->rb_membase; 544 dma = hr->rb_dmabase; 545 546 /* 547 * Allocate transmit descriptors 548 */ 549 hr->rb_txd = p; 550 hr->rb_txddma = dma; 551 p += HME_NTXDESC * HME_XD_SIZE; 552 dma += HME_NTXDESC * HME_XD_SIZE; 553 /* We have reserved descriptor space until the next 2048 byte boundary.*/ 554 dma = (bus_addr_t)roundup((u_long)dma, 2048); 555 p = (caddr_t)roundup((u_long)p, 2048); 556 557 /* 558 * Allocate receive descriptors 559 */ 560 hr->rb_rxd = p; 561 hr->rb_rxddma = dma; 562 p += HME_NRXDESC * HME_XD_SIZE; 563 dma += HME_NRXDESC * HME_XD_SIZE; 564 /* Again move forward to the next 2048 byte boundary.*/ 565 dma = (bus_addr_t)roundup((u_long)dma, 2048); 566 p = (caddr_t)roundup((u_long)p, 2048); 567 568 /* 569 * Initialize transmit buffer descriptors 570 */ 571 for (i = 0; i < HME_NTXDESC; i++) { 572 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0); 573 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0); 574 } 575 576 STAILQ_INIT(&sc->sc_rb.rb_txfreeq); 577 STAILQ_INIT(&sc->sc_rb.rb_txbusyq); 578 for (i = 0; i < HME_NTXQ; i++) { 579 td = &sc->sc_rb.rb_txdesc[i]; 580 if (td->htx_m != NULL) { 581 m_freem(td->htx_m); 582 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap, 583 BUS_DMASYNC_POSTWRITE); 584 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap); 585 td->htx_m = NULL; 586 } 587 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q); 588 } 589 590 /* 591 * Initialize receive buffer descriptors 592 */ 593 for (i = 0; i < HME_NRXDESC; i++) { 594 error = hme_add_rxbuf(sc, i, 1); 595 if (error != 0) 596 return (error); 597 } 598 599 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREREAD); 600 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREWRITE); 601 602 hr->rb_tdhead = hr->rb_tdtail = 0; 603 hr->rb_td_nbusy = 0; 604 hr->rb_rdtail = 0; 605 CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd, 606 hr->rb_txddma); 607 CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd, 608 hr->rb_rxddma); 609 CTR2(KTR_HME, "rx entry 1: flags %x, address %x", 610 *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4)); 611 CTR2(KTR_HME, "tx entry 1: flags %x, address %x", 612 *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4)); 613 return (0); 614} 615 616static int 617hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val, 618 u_int32_t clr, u_int32_t set) 619{ 620 int i = 0; 621 622 val &= ~clr; 623 val |= set; 624 HME_MAC_WRITE_4(sc, reg, val); 625 if (clr == 0 && set == 0) 626 return (1); /* just write, no bits to wait for */ 627 do { 628 DELAY(100); 629 i++; 630 val = HME_MAC_READ_4(sc, reg); 631 if (i > 40) { 632 /* After 3.5ms, we should have been done. */ 633 device_printf(sc->sc_dev, "timeout while writing to " 634 "MAC configuration register\n"); 635 return (0); 636 } 637 } while ((val & clr) != 0 && (val & set) != set); 638 return (1); 639} 640 641/* 642 * Initialization of interface; set up initialization block 643 * and transmit/receive descriptor rings. 644 */ 645static void 646hme_init(void *xsc) 647{ 648 struct hme_softc *sc = (struct hme_softc *)xsc; 649 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 650 u_int8_t *ea; 651 u_int32_t v; 652 653 /* 654 * Initialization sequence. The numbered steps below correspond 655 * to the sequence outlined in section 6.3.5.1 in the Ethernet 656 * Channel Engine manual (part of the PCIO manual). 657 * See also the STP2002-STQ document from Sun Microsystems. 658 */ 659 660 /* step 1 & 2. Reset the Ethernet Channel */ 661 hme_stop(sc); 662 663 /* Re-initialize the MIF */ 664 hme_mifinit(sc); 665 666#if 0 667 /* Mask all MIF interrupts, just in case */ 668 HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff); 669#endif 670 671 /* step 3. Setup data structures in host memory */ 672 if (hme_meminit(sc) != 0) { 673 device_printf(sc->sc_dev, "out of buffers; init aborted."); 674 return; 675 } 676 677 /* step 4. TX MAC registers & counters */ 678 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0); 679 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0); 680 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0); 681 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0); 682 HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, ETHER_MAX_LEN); 683 684 /* Load station MAC address */ 685 ea = sc->sc_arpcom.ac_enaddr; 686 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]); 687 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]); 688 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]); 689 690 /* 691 * Init seed for backoff 692 * (source suggested by manual: low 10 bits of MAC address) 693 */ 694 v = ((ea[4] << 8) | ea[5]) & 0x3fff; 695 HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v); 696 697 698 /* Note: Accepting power-on default for other MAC registers here.. */ 699 700 /* step 5. RX MAC registers & counters */ 701 hme_setladrf(sc, 0); 702 703 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 704 HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma); 705 /* Transmit Descriptor ring size: in increments of 16 */ 706 HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1); 707 708 HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma); 709 HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, ETHER_MAX_LEN); 710 711 /* step 8. Global Configuration & Interrupt Mask */ 712 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 713 ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/ 714 HME_SEB_STAT_HOSTTOTX | 715 HME_SEB_STAT_RXTOHOST | 716 HME_SEB_STAT_TXALL | 717 HME_SEB_STAT_TXPERR | 718 HME_SEB_STAT_RCNTEXP | 719 HME_SEB_STAT_ALL_ERRORS )); 720 721 switch (sc->sc_burst) { 722 default: 723 v = 0; 724 break; 725 case 16: 726 v = HME_SEB_CFG_BURST16; 727 break; 728 case 32: 729 v = HME_SEB_CFG_BURST32; 730 break; 731 case 64: 732 v = HME_SEB_CFG_BURST64; 733 break; 734 } 735 HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v); 736 737 /* step 9. ETX Configuration: use mostly default values */ 738 739 /* Enable DMA */ 740 v = HME_ETX_READ_4(sc, HME_ETXI_CFG); 741 v |= HME_ETX_CFG_DMAENABLE; 742 HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v); 743 744 /* step 10. ERX Configuration */ 745 v = HME_ERX_READ_4(sc, HME_ERXI_CFG); 746 747 /* Encode Receive Descriptor ring size: four possible values */ 748 v &= ~HME_ERX_CFG_RINGSIZEMSK; 749 switch (HME_NRXDESC) { 750 case 32: 751 v |= HME_ERX_CFG_RINGSIZE32; 752 break; 753 case 64: 754 v |= HME_ERX_CFG_RINGSIZE64; 755 break; 756 case 128: 757 v |= HME_ERX_CFG_RINGSIZE128; 758 break; 759 case 256: 760 v |= HME_ERX_CFG_RINGSIZE256; 761 break; 762 default: 763 printf("hme: invalid Receive Descriptor ring size\n"); 764 break; 765 } 766 767 /* Enable DMA, fix RX first byte offset. */ 768 v &= ~HME_ERX_CFG_FBO_MASK; 769 v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT); 770 CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v); 771 HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v); 772 773 /* step 11. XIF Configuration */ 774 v = HME_MAC_READ_4(sc, HME_MACI_XIF); 775 v |= HME_MAC_XIF_OE; 776 /* If an external transceiver is connected, enable its MII drivers */ 777 if ((HME_MIF_READ_4(sc, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0) 778 v |= HME_MAC_XIF_MIIENABLE; 779 CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v); 780 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v); 781 782 /* step 12. RX_MAC Configuration Register */ 783 v = HME_MAC_READ_4(sc, HME_MACI_RXCFG); 784 v |= HME_MAC_RXCFG_ENABLE; 785 v &= ~(HME_MAC_RXCFG_DCRCS); 786 CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v); 787 HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v); 788 789 /* step 13. TX_MAC Configuration Register */ 790 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG); 791 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP); 792 CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v); 793 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v); 794 795 /* step 14. Issue Transmit Pending command */ 796 797#ifdef HMEDEBUG 798 /* Debug: double-check. */ 799 CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, " 800 "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING), 801 HME_ETX_READ_4(sc, HME_ETXI_RSIZE), 802 HME_ERX_READ_4(sc, HME_ERXI_RING), 803 HME_MAC_READ_4(sc, HME_MACI_RXSIZE)); 804 CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x", 805 HME_SEB_READ_4(sc, HME_SEBI_IMASK), 806 HME_ERX_READ_4(sc, HME_ERXI_CFG), 807 HME_ETX_READ_4(sc, HME_ETXI_CFG)); 808 CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x", 809 HME_MAC_READ_4(sc, HME_MACI_RXCFG), 810 HME_MAC_READ_4(sc, HME_MACI_TXCFG)); 811#endif 812 813 /* Start the one second timer. */ 814 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); 815 816 ifp->if_flags |= IFF_RUNNING; 817 ifp->if_flags &= ~IFF_OACTIVE; 818 ifp->if_timer = 0; 819 hme_start(ifp); 820} 821 822struct hme_txdma_arg { 823 struct hme_softc *hta_sc; 824 struct hme_txdesc *hta_htx; 825 int hta_ndescs; 826}; 827 828/* 829 * XXX: this relies on the fact that segments returned by bus_dmamap_load_mbuf() 830 * are readable from the nearest burst boundary on (i.e. potentially before 831 * ds_addr) to the first boundary beyond the end. This is usually a safe 832 * assumption to make, but is not documented. 833 */ 834static void 835hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, 836 bus_size_t totsz, int error) 837{ 838 struct hme_txdma_arg *ta = xsc; 839 struct hme_txdesc *htx; 840 bus_size_t len = 0; 841 caddr_t txd; 842 u_int32_t flags = 0; 843 int i, tdhead, pci; 844 845 if (error != 0) 846 return; 847 848 tdhead = ta->hta_sc->sc_rb.rb_tdhead; 849 pci = ta->hta_sc->sc_pci; 850 txd = ta->hta_sc->sc_rb.rb_txd; 851 htx = ta->hta_htx; 852 853 if (ta->hta_sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) { 854 ta->hta_ndescs = -1; 855 return; 856 } 857 ta->hta_ndescs = nsegs; 858 859 for (i = 0; i < nsegs; i++) { 860 if (segs[i].ds_len == 0) 861 continue; 862 863 /* Fill the ring entry. */ 864 flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len); 865 if (len == 0) 866 flags |= HME_XD_SOP; 867 if (len + segs[i].ds_len == totsz) 868 flags |= HME_XD_EOP; 869 CTR5(KTR_HME, "hme_txdma_callback: seg %d/%d, ri %d, " 870 "flags %#x, addr %#x", i + 1, nsegs, tdhead, (u_int)flags, 871 (u_int)segs[i].ds_addr); 872 HME_XD_SETFLAGS(pci, txd, tdhead, flags); 873 HME_XD_SETADDR(pci, txd, tdhead, segs[i].ds_addr); 874 875 ta->hta_sc->sc_rb.rb_td_nbusy++; 876 htx->htx_lastdesc = tdhead; 877 tdhead = (tdhead + 1) % HME_NTXDESC; 878 len += segs[i].ds_len; 879 } 880 ta->hta_sc->sc_rb.rb_tdhead = tdhead; 881 KASSERT((flags & HME_XD_EOP) != 0, 882 ("hme_txdma_callback: missed end of packet!")); 883} 884 885/* 886 * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and 887 * start the transmission. 888 * Returns 0 on success, -1 if there were not enough free descriptors to map 889 * the packet, or an errno otherwise. 890 */ 891static int 892hme_load_txmbuf(struct hme_softc *sc, struct mbuf *m0) 893{ 894 struct hme_txdma_arg cba; 895 struct hme_txdesc *td; 896 int error, si, ri; 897 u_int32_t flags; 898 899 si = sc->sc_rb.rb_tdhead; 900 if ((td = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL) 901 return (-1); 902 td->htx_m = m0; 903 cba.hta_sc = sc; 904 cba.hta_htx = td; 905 if ((error = bus_dmamap_load_mbuf(sc->sc_tdmatag, td->htx_dmamap, 906 m0, hme_txdma_callback, &cba, 0)) != 0) 907 goto fail; 908 if (cba.hta_ndescs == -1) { 909 error = -1; 910 goto fail; 911 } 912 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap, 913 BUS_DMASYNC_PREWRITE); 914 915 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q); 916 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, td, htx_q); 917 918 /* Turn descriptor ownership to the hme, back to forth. */ 919 ri = sc->sc_rb.rb_tdhead; 920 CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", 921 ri, HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri)); 922 do { 923 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC; 924 flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri) | 925 HME_XD_OWN; 926 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)", 927 ri, si, flags); 928 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, flags); 929 } while (ri != si); 930 931 /* start the transmission. */ 932 HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP); 933 return (0); 934fail: 935 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap); 936 return (error); 937} 938 939/* 940 * Pass a packet to the higher levels. 941 */ 942static void 943hme_read(struct hme_softc *sc, int ix, int len) 944{ 945 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 946 struct mbuf *m; 947 948 if (len <= sizeof(struct ether_header) || 949 len > ETHERMTU + sizeof(struct ether_header)) { 950#ifdef HMEDEBUG 951 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n", 952 len); 953#endif 954 ifp->if_ierrors++; 955 hme_discard_rxbuf(sc, ix); 956 return; 957 } 958 959 m = sc->sc_rb.rb_rxdesc[ix].hrx_m; 960 CTR1(KTR_HME, "hme_read: len %d", len); 961 962 if (hme_add_rxbuf(sc, ix, 0) != 0) { 963 /* 964 * hme_add_rxbuf will leave the old buffer in the ring until 965 * it is sure that a new buffer can be mapped. If it can not, 966 * drop the packet, but leave the interface up. 967 */ 968 ifp->if_iqdrops++; 969 hme_discard_rxbuf(sc, ix); 970 return; 971 } 972 973 ifp->if_ipackets++; 974 975 m->m_pkthdr.rcvif = ifp; 976 m->m_pkthdr.len = m->m_len = len + HME_RXOFFS; 977 m_adj(m, HME_RXOFFS); 978 /* Pass the packet up. */ 979 (*ifp->if_input)(ifp, m); 980} 981 982static void 983hme_start(struct ifnet *ifp) 984{ 985 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc; 986 struct mbuf *m; 987 int error, enq = 0; 988 989 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 990 return; 991 992 error = 0; 993 for (;;) { 994 IF_DEQUEUE(&ifp->if_snd, m); 995 if (m == NULL) 996 break; 997 998 error = hme_load_txmbuf(sc, m); 999 if (error == -1) { 1000 ifp->if_flags |= IFF_OACTIVE; 1001 IF_PREPEND(&ifp->if_snd, m); 1002 break; 1003 } else if (error > 0) { 1004 printf("hme_start: error %d while loading mbuf\n", 1005 error); 1006 } else { 1007 enq = 1; 1008 BPF_MTAP(ifp, m); 1009 } 1010 } 1011 1012 if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC || error == -1) 1013 ifp->if_flags |= IFF_OACTIVE; 1014 /* Set watchdog timer if a packet was queued */ 1015 if (enq) { 1016 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 1017 BUS_DMASYNC_PREWRITE); 1018 ifp->if_timer = 5; 1019 } 1020} 1021 1022/* 1023 * Transmit interrupt. 1024 */ 1025static void 1026hme_tint(struct hme_softc *sc) 1027{ 1028 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1029 struct hme_txdesc *htx; 1030 unsigned int ri, txflags; 1031 1032 /* 1033 * Unload collision counters 1034 */ 1035 ifp->if_collisions += 1036 HME_MAC_READ_4(sc, HME_MACI_NCCNT) + 1037 HME_MAC_READ_4(sc, HME_MACI_FCCNT) + 1038 HME_MAC_READ_4(sc, HME_MACI_EXCNT) + 1039 HME_MAC_READ_4(sc, HME_MACI_LTCNT); 1040 1041 /* 1042 * then clear the hardware counters. 1043 */ 1044 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0); 1045 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0); 1046 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0); 1047 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0); 1048 1049 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq); 1050 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD); 1051 /* Fetch current position in the transmit ring */ 1052 for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) { 1053 if (sc->sc_rb.rb_td_nbusy <= 0) { 1054 CTR0(KTR_HME, "hme_tint: not busy!"); 1055 break; 1056 } 1057 1058 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri); 1059 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags); 1060 1061 if ((txflags & HME_XD_OWN) != 0) 1062 break; 1063 1064 CTR0(KTR_HME, "hme_tint: not owned"); 1065 --sc->sc_rb.rb_td_nbusy; 1066 ifp->if_flags &= ~IFF_OACTIVE; 1067 1068 /* Complete packet transmitted? */ 1069 if ((txflags & HME_XD_EOP) == 0) 1070 continue; 1071 1072 KASSERT(htx->htx_lastdesc == ri, 1073 ("hme_tint: ring indices skewed: %d != %d!", 1074 htx->htx_lastdesc, ri)); 1075 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, 1076 BUS_DMASYNC_POSTWRITE); 1077 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap); 1078 1079 ifp->if_opackets++; 1080 m_freem(htx->htx_m); 1081 htx->htx_m = NULL; 1082 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q); 1083 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q); 1084 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq); 1085 } 1086 /* Turn off watchdog */ 1087 if (sc->sc_rb.rb_td_nbusy == 0) 1088 ifp->if_timer = 0; 1089 1090 /* Update ring */ 1091 sc->sc_rb.rb_tdtail = ri; 1092 1093 hme_start(ifp); 1094 1095 if (sc->sc_rb.rb_td_nbusy == 0) 1096 ifp->if_timer = 0; 1097} 1098 1099/* 1100 * Receive interrupt. 1101 */ 1102static void 1103hme_rint(struct hme_softc *sc) 1104{ 1105 caddr_t xdr = sc->sc_rb.rb_rxd; 1106 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1107 unsigned int ri, len; 1108 int progress = 0; 1109 u_int32_t flags; 1110 1111 /* 1112 * Process all buffers with valid data. 1113 */ 1114 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD); 1115 for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) { 1116 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri); 1117 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags); 1118 if ((flags & HME_XD_OWN) != 0) 1119 break; 1120 1121 progress++; 1122 if ((flags & HME_XD_OFL) != 0) { 1123 device_printf(sc->sc_dev, "buffer overflow, ri=%d; " 1124 "flags=0x%x\n", ri, flags); 1125 ifp->if_ierrors++; 1126 hme_discard_rxbuf(sc, ri); 1127 } else { 1128 len = HME_XD_DECODE_RSIZE(flags); 1129 hme_read(sc, ri, len); 1130 } 1131 } 1132 if (progress) { 1133 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 1134 BUS_DMASYNC_PREWRITE); 1135 } 1136 sc->sc_rb.rb_rdtail = ri; 1137} 1138 1139static void 1140hme_eint(struct hme_softc *sc, u_int status) 1141{ 1142 1143 if ((status & HME_SEB_STAT_MIFIRQ) != 0) { 1144 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1145 return; 1146 } 1147 1148 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status); 1149} 1150 1151void 1152hme_intr(void *v) 1153{ 1154 struct hme_softc *sc = (struct hme_softc *)v; 1155 u_int32_t status; 1156 1157 status = HME_SEB_READ_4(sc, HME_SEBI_STAT); 1158 CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status); 1159 1160 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0) 1161 hme_eint(sc, status); 1162 1163 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0) 1164 hme_tint(sc); 1165 1166 if ((status & HME_SEB_STAT_RXTOHOST) != 0) 1167 hme_rint(sc); 1168} 1169 1170 1171static void 1172hme_watchdog(struct ifnet *ifp) 1173{ 1174 struct hme_softc *sc = ifp->if_softc; 1175#ifdef HMEDEBUG 1176 u_int32_t status; 1177 1178 status = HME_SEB_READ_4(sc, HME_SEBI_STAT); 1179 CTR1(KTR_HME, "hme_watchdog: status %x", (u_int)status); 1180#endif 1181 device_printf(sc->sc_dev, "device timeout\n"); 1182 ++ifp->if_oerrors; 1183 1184 hme_reset(sc); 1185} 1186 1187/* 1188 * Initialize the MII Management Interface 1189 */ 1190static void 1191hme_mifinit(struct hme_softc *sc) 1192{ 1193 u_int32_t v; 1194 1195 /* Configure the MIF in frame mode */ 1196 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1197 v &= ~HME_MIF_CFG_BBMODE; 1198 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1199} 1200 1201/* 1202 * MII interface 1203 */ 1204int 1205hme_mii_readreg(device_t dev, int phy, int reg) 1206{ 1207 struct hme_softc *sc = device_get_softc(dev); 1208 int n; 1209 u_int32_t v; 1210 1211 /* Select the desired PHY in the MIF configuration register */ 1212 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1213 /* Clear PHY select bit */ 1214 v &= ~HME_MIF_CFG_PHY; 1215 if (phy == HME_PHYAD_EXTERNAL) 1216 /* Set PHY select bit to get at external device */ 1217 v |= HME_MIF_CFG_PHY; 1218 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1219 1220 /* Construct the frame command */ 1221 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1222 HME_MIF_FO_TAMSB | 1223 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) | 1224 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1225 (reg << HME_MIF_FO_REGAD_SHIFT); 1226 1227 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v); 1228 for (n = 0; n < 100; n++) { 1229 DELAY(1); 1230 v = HME_MIF_READ_4(sc, HME_MIFI_FO); 1231 if (v & HME_MIF_FO_TALSB) 1232 return (v & HME_MIF_FO_DATA); 1233 } 1234 1235 device_printf(sc->sc_dev, "mii_read timeout\n"); 1236 return (0); 1237} 1238 1239int 1240hme_mii_writereg(device_t dev, int phy, int reg, int val) 1241{ 1242 struct hme_softc *sc = device_get_softc(dev); 1243 int n; 1244 u_int32_t v; 1245 1246 /* Select the desired PHY in the MIF configuration register */ 1247 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1248 /* Clear PHY select bit */ 1249 v &= ~HME_MIF_CFG_PHY; 1250 if (phy == HME_PHYAD_EXTERNAL) 1251 /* Set PHY select bit to get at external device */ 1252 v |= HME_MIF_CFG_PHY; 1253 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1254 1255 /* Construct the frame command */ 1256 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1257 HME_MIF_FO_TAMSB | 1258 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) | 1259 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1260 (reg << HME_MIF_FO_REGAD_SHIFT) | 1261 (val & HME_MIF_FO_DATA); 1262 1263 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v); 1264 for (n = 0; n < 100; n++) { 1265 DELAY(1); 1266 v = HME_MIF_READ_4(sc, HME_MIFI_FO); 1267 if (v & HME_MIF_FO_TALSB) 1268 return (1); 1269 } 1270 1271 device_printf(sc->sc_dev, "mii_write timeout\n"); 1272 return (0); 1273} 1274 1275void 1276hme_mii_statchg(device_t dev) 1277{ 1278 struct hme_softc *sc = device_get_softc(dev); 1279 int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1280 int phy = sc->sc_phys[instance]; 1281 u_int32_t v; 1282 1283#ifdef HMEDEBUG 1284 if (sc->sc_debug) 1285 printf("hme_mii_statchg: status change: phy = %d\n", phy); 1286#endif 1287 1288 /* Select the current PHY in the MIF configuration register */ 1289 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1290 v &= ~HME_MIF_CFG_PHY; 1291 if (phy == HME_PHYAD_EXTERNAL) 1292 v |= HME_MIF_CFG_PHY; 1293 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1294 1295 /* Set the MAC Full Duplex bit appropriately */ 1296 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG); 1297 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, HME_MAC_TXCFG_ENABLE, 0)) 1298 return; 1299 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1300 v |= HME_MAC_TXCFG_FULLDPLX; 1301 else 1302 v &= ~HME_MAC_TXCFG_FULLDPLX; 1303 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v); 1304 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, 0, HME_MAC_TXCFG_ENABLE)) 1305 return; 1306} 1307 1308static int 1309hme_mediachange(struct ifnet *ifp) 1310{ 1311 struct hme_softc *sc = ifp->if_softc; 1312 1313 return (mii_mediachg(sc->sc_mii)); 1314} 1315 1316static void 1317hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1318{ 1319 struct hme_softc *sc = ifp->if_softc; 1320 1321 if ((ifp->if_flags & IFF_UP) == 0) 1322 return; 1323 1324 mii_pollstat(sc->sc_mii); 1325 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1326 ifmr->ifm_status = sc->sc_mii->mii_media_status; 1327} 1328 1329/* 1330 * Process an ioctl request. 1331 */ 1332static int 1333hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1334{ 1335 struct hme_softc *sc = ifp->if_softc; 1336 struct ifreq *ifr = (struct ifreq *)data; 1337 int s, error = 0; 1338 1339 s = splnet(); 1340 1341 switch (cmd) { 1342 case SIOCSIFFLAGS: 1343 if ((ifp->if_flags & IFF_UP) == 0 && 1344 (ifp->if_flags & IFF_RUNNING) != 0) { 1345 /* 1346 * If interface is marked down and it is running, then 1347 * stop it. 1348 */ 1349 hme_stop(sc); 1350 ifp->if_flags &= ~IFF_RUNNING; 1351 } else if ((ifp->if_flags & IFF_UP) != 0 && 1352 (ifp->if_flags & IFF_RUNNING) == 0) { 1353 /* 1354 * If interface is marked up and it is stopped, then 1355 * start it. 1356 */ 1357 hme_init(sc); 1358 } else if ((ifp->if_flags & IFF_UP) != 0) { 1359 /* 1360 * Reset the interface to pick up changes in any other 1361 * flags that affect hardware registers. 1362 */ 1363 hme_init(sc); 1364 } 1365#ifdef HMEDEBUG 1366 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 1367#endif 1368 break; 1369 1370 case SIOCADDMULTI: 1371 case SIOCDELMULTI: 1372 hme_setladrf(sc, 1); 1373 error = 0; 1374 break; 1375 case SIOCGIFMEDIA: 1376 case SIOCSIFMEDIA: 1377 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 1378 break; 1379 default: 1380 error = ether_ioctl(ifp, cmd, data); 1381 break; 1382 } 1383 1384 splx(s); 1385 return (error); 1386} 1387 1388/* 1389 * Set up the logical address filter. 1390 */ 1391static void 1392hme_setladrf(struct hme_softc *sc, int reenable) 1393{ 1394 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1395 struct ifmultiaddr *inm; 1396 struct sockaddr_dl *sdl; 1397 u_char *cp; 1398 u_int32_t crc; 1399 u_int32_t hash[4]; 1400 u_int32_t macc; 1401 int len; 1402 1403 /* Clear hash table */ 1404 hash[3] = hash[2] = hash[1] = hash[0] = 0; 1405 1406 /* Get current RX configuration */ 1407 macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG); 1408 1409 /* 1410 * Disable the receiver while changing it's state as the documentation 1411 * mandates. 1412 * We then must wait until the bit clears in the register. This should 1413 * take at most 3.5ms. 1414 */ 1415 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_ENABLE, 0)) 1416 return; 1417 /* Disable the hash filter before writing to the filter registers. */ 1418 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 1419 HME_MAC_RXCFG_HENABLE, 0)) 1420 return; 1421 1422 if (reenable) 1423 macc |= HME_MAC_RXCFG_ENABLE; 1424 else 1425 macc &= ~HME_MAC_RXCFG_ENABLE; 1426 1427 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1428 /* Turn on promiscuous mode; turn off the hash filter */ 1429 macc |= HME_MAC_RXCFG_PMISC; 1430 macc &= ~HME_MAC_RXCFG_HENABLE; 1431 ifp->if_flags |= IFF_ALLMULTI; 1432 goto chipit; 1433 } 1434 1435 /* Turn off promiscuous mode; turn on the hash filter */ 1436 macc &= ~HME_MAC_RXCFG_PMISC; 1437 macc |= HME_MAC_RXCFG_HENABLE; 1438 1439 /* 1440 * Set up multicast address filter by passing all multicast addresses 1441 * through a crc generator, and then using the high order 6 bits as an 1442 * index into the 64 bit logical address filter. The high order bit 1443 * selects the word, while the rest of the bits select the bit within 1444 * the word. 1445 */ 1446 1447 TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) { 1448 if (inm->ifma_addr->sa_family != AF_LINK) 1449 continue; 1450 sdl = (struct sockaddr_dl *)inm->ifma_addr; 1451 cp = LLADDR(sdl); 1452 crc = 0xffffffff; 1453 for (len = sdl->sdl_alen; --len >= 0;) { 1454 int octet = *cp++; 1455 int i; 1456 1457#define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */ 1458 for (i = 0; i < 8; i++) { 1459 if ((crc & 1) ^ (octet & 1)) { 1460 crc >>= 1; 1461 crc ^= MC_POLY_LE; 1462 } else { 1463 crc >>= 1; 1464 } 1465 octet >>= 1; 1466 } 1467 } 1468 /* Just want the 6 most significant bits. */ 1469 crc >>= 26; 1470 1471 /* Set the corresponding bit in the filter. */ 1472 hash[crc >> 4] |= 1 << (crc & 0xf); 1473 } 1474 1475 ifp->if_flags &= ~IFF_ALLMULTI; 1476 1477chipit: 1478 /* Now load the hash table into the chip */ 1479 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]); 1480 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]); 1481 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]); 1482 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]); 1483 hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0, 1484 macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE)); 1485} 1486