1/* $NetBSD: sunxi_emac.c,v 1.37 2022/09/18 15:44:29 thorpej Exp $ */ 2 3/*- 4 * Copyright (c) 2016-2017 Jared McNeill <jmcneill@invisible.ca> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29/* 30 * Allwinner Gigabit Ethernet MAC (EMAC) controller 31 */ 32 33#include "opt_net_mpsafe.h" 34 35#include <sys/cdefs.h> 36__KERNEL_RCSID(0, "$NetBSD: sunxi_emac.c,v 1.37 2022/09/18 15:44:29 thorpej Exp $"); 37 38#include <sys/param.h> 39#include <sys/bus.h> 40#include <sys/device.h> 41#include <sys/intr.h> 42#include <sys/systm.h> 43#include <sys/kernel.h> 44#include <sys/mutex.h> 45#include <sys/callout.h> 46#include <sys/gpio.h> 47#include <sys/cprng.h> 48 49#include <net/if.h> 50#include <net/if_dl.h> 51#include <net/if_ether.h> 52#include <net/if_media.h> 53#include <net/bpf.h> 54 55#include <dev/mii/miivar.h> 56 57#include <dev/fdt/fdtvar.h> 58#include <dev/fdt/syscon.h> 59 60#include <arm/sunxi/sunxi_emac.h> 61 62#ifdef NET_MPSAFE 63#define EMAC_MPSAFE 1 64#define CALLOUT_FLAGS CALLOUT_MPSAFE 65#define FDT_INTR_FLAGS FDT_INTR_MPSAFE 66#else 67#define CALLOUT_FLAGS 0 68#define FDT_INTR_FLAGS 0 69#endif 70 71#define EMAC_IFNAME "emac%d" 72 73#define EMAC_LOCK(sc) mutex_enter(&(sc)->mtx) 74#define EMAC_UNLOCK(sc) mutex_exit(&(sc)->mtx) 75#define EMAC_ASSERT_LOCKED(sc) KASSERT(mutex_owned(&(sc)->mtx)) 76 77#define DESC_ALIGN sizeof(struct sunxi_emac_desc) 78#define TX_DESC_COUNT 1024 79#define TX_DESC_SIZE (sizeof(struct sunxi_emac_desc) * TX_DESC_COUNT) 80#define RX_DESC_COUNT 256 81#define RX_DESC_SIZE (sizeof(struct sunxi_emac_desc) * RX_DESC_COUNT) 82 83#define DESC_OFF(n) ((n) * sizeof(struct sunxi_emac_desc)) 84#define TX_NEXT(n) (((n) + 1) & (TX_DESC_COUNT - 1)) 85#define TX_SKIP(n, o) (((n) + (o)) & (TX_DESC_COUNT - 1)) 86#define RX_NEXT(n) (((n) + 1) & (RX_DESC_COUNT - 1)) 87 88#define TX_MAX_SEGS 128 89 90#define SOFT_RST_RETRY 1000 91#define MII_BUSY_RETRY 1000 92#define MDIO_FREQ 2500000 93 94#define BURST_LEN_DEFAULT 8 95#define RX_TX_PRI_DEFAULT 0 96#define PAUSE_TIME_DEFAULT 0x400 97 98/* syscon EMAC clock register */ 99#define EMAC_CLK_REG 0x30 100#define EMAC_CLK_EPHY_ADDR (0x1f << 20) /* H3 */ 101#define EMAC_CLK_EPHY_ADDR_SHIFT 20 102#define EMAC_CLK_EPHY_LED_POL (1 << 17) /* H3 */ 103#define EMAC_CLK_EPHY_SHUTDOWN (1 << 16) /* H3 */ 104#define EMAC_CLK_EPHY_SELECT (1 << 15) /* H3 */ 105#define EMAC_CLK_RMII_EN (1 << 13) 106#define EMAC_CLK_ETXDC (0x7 << 10) 107#define EMAC_CLK_ETXDC_SHIFT 10 108#define EMAC_CLK_ERXDC (0x1f << 5) 109#define EMAC_CLK_ERXDC_SHIFT 5 110#define EMAC_CLK_PIT (0x1 << 2) 111#define EMAC_CLK_PIT_MII (0 << 2) 112#define EMAC_CLK_PIT_RGMII (1 << 2) 113#define EMAC_CLK_SRC (0x3 << 0) 114#define EMAC_CLK_SRC_MII (0 << 0) 115#define EMAC_CLK_SRC_EXT_RGMII (1 << 0) 116#define EMAC_CLK_SRC_RGMII (2 << 0) 117 118/* Burst length of RX and TX DMA transfers */ 119static int sunxi_emac_burst_len = BURST_LEN_DEFAULT; 120 121/* RX / TX DMA priority. If 1, RX DMA has priority over TX DMA. */ 122static int sunxi_emac_rx_tx_pri = RX_TX_PRI_DEFAULT; 123 124/* Pause time field in the transmitted control frame */ 125static int sunxi_emac_pause_time = PAUSE_TIME_DEFAULT; 126 127enum sunxi_emac_type { 128 EMAC_A64 = 1, 129 EMAC_A83T, 130 EMAC_H3, 131 EMAC_H6, 132}; 133 134static const struct device_compatible_entry compat_data[] = { 135 { .compat = "allwinner,sun8i-a83t-emac", .value = EMAC_A83T }, 136 { .compat = "allwinner,sun8i-h3-emac", .value = EMAC_H3 }, 137 { .compat = "allwinner,sun8i-v3s-emac", .value = EMAC_H3 }, 138 { .compat = "allwinner,sun50i-a64-emac", .value = EMAC_A64 }, 139 { .compat = "allwinner,sun50i-h6-emac", .value = EMAC_H6 }, 140 DEVICE_COMPAT_EOL 141}; 142 143struct sunxi_emac_bufmap { 144 bus_dmamap_t map; 145 struct mbuf *mbuf; 146}; 147 148struct sunxi_emac_txring { 149 bus_dma_tag_t desc_tag; 150 bus_dmamap_t desc_map; 151 bus_dma_segment_t desc_dmaseg; 152 struct sunxi_emac_desc *desc_ring; 153 bus_addr_t desc_ring_paddr; 154 bus_dma_tag_t buf_tag; 155 struct sunxi_emac_bufmap buf_map[TX_DESC_COUNT]; 156 u_int cur, next, queued; 157}; 158 159struct sunxi_emac_rxring { 160 bus_dma_tag_t desc_tag; 161 bus_dmamap_t desc_map; 162 bus_dma_segment_t desc_dmaseg; 163 struct sunxi_emac_desc *desc_ring; 164 bus_addr_t desc_ring_paddr; 165 bus_dma_tag_t buf_tag; 166 struct sunxi_emac_bufmap buf_map[RX_DESC_COUNT]; 167 u_int cur; 168}; 169 170struct sunxi_emac_softc { 171 device_t dev; 172 int phandle; 173 enum sunxi_emac_type type; 174 bus_space_tag_t bst; 175 bus_dma_tag_t dmat; 176 177 bus_space_handle_t bsh; 178 struct clk *clk_ahb; 179 struct clk *clk_ephy; 180 struct fdtbus_reset *rst_ahb; 181 struct fdtbus_reset *rst_ephy; 182 struct fdtbus_regulator *reg_phy; 183 struct fdtbus_gpio_pin *pin_reset; 184 185 struct syscon *syscon; 186 187 int phy_id; 188 189 kmutex_t mtx; 190 struct ethercom ec; 191 struct mii_data mii; 192 callout_t stat_ch; 193 void *ih; 194 u_int mdc_div_ratio_m; 195 196 struct sunxi_emac_txring tx; 197 struct sunxi_emac_rxring rx; 198}; 199 200#define RD4(sc, reg) \ 201 bus_space_read_4((sc)->bst, (sc)->bsh, (reg)) 202#define WR4(sc, reg, val) \ 203 bus_space_write_4((sc)->bst, (sc)->bsh, (reg), (val)) 204 205static int 206sunxi_emac_mii_readreg(device_t dev, int phy, int reg, uint16_t *val) 207{ 208 struct sunxi_emac_softc *sc = device_private(dev); 209 int retry; 210 211 WR4(sc, EMAC_MII_CMD, 212 (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) | 213 (phy << PHY_ADDR_SHIFT) | 214 (reg << PHY_REG_ADDR_SHIFT) | 215 MII_BUSY); 216 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 217 if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) { 218 *val = RD4(sc, EMAC_MII_DATA) & 0xffff; 219 break; 220 } 221 delay(10); 222 } 223 224 if (retry == 0) { 225 device_printf(dev, "phy read timeout, phy=%d reg=%d\n", 226 phy, reg); 227 return ETIMEDOUT; 228 } 229 230 return 0; 231} 232 233static int 234sunxi_emac_mii_writereg(device_t dev, int phy, int reg, uint16_t val) 235{ 236 struct sunxi_emac_softc *sc = device_private(dev); 237 int retry; 238 239 WR4(sc, EMAC_MII_DATA, val); 240 WR4(sc, EMAC_MII_CMD, 241 (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) | 242 (phy << PHY_ADDR_SHIFT) | 243 (reg << PHY_REG_ADDR_SHIFT) | 244 MII_WR | MII_BUSY); 245 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 246 if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) 247 break; 248 delay(10); 249 } 250 251 if (retry == 0) { 252 device_printf(dev, "phy write timeout, phy=%d reg=%d\n", 253 phy, reg); 254 return ETIMEDOUT; 255 } 256 257 return 0; 258} 259 260static void 261sunxi_emac_update_link(struct sunxi_emac_softc *sc) 262{ 263 struct mii_data *mii = &sc->mii; 264 uint32_t val; 265 266 val = RD4(sc, EMAC_BASIC_CTL_0); 267 val &= ~(BASIC_CTL_SPEED | BASIC_CTL_DUPLEX); 268 269 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 270 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 271 val |= BASIC_CTL_SPEED_1000 << BASIC_CTL_SPEED_SHIFT; 272 else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 273 val |= BASIC_CTL_SPEED_100 << BASIC_CTL_SPEED_SHIFT; 274 else 275 val |= BASIC_CTL_SPEED_10 << BASIC_CTL_SPEED_SHIFT; 276 277 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 278 val |= BASIC_CTL_DUPLEX; 279 280 WR4(sc, EMAC_BASIC_CTL_0, val); 281 282 val = RD4(sc, EMAC_RX_CTL_0); 283 val &= ~RX_FLOW_CTL_EN; 284 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 285 val |= RX_FLOW_CTL_EN; 286 WR4(sc, EMAC_RX_CTL_0, val); 287 288 val = RD4(sc, EMAC_TX_FLOW_CTL); 289 val &= ~(PAUSE_TIME | TX_FLOW_CTL_EN); 290 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 291 val |= TX_FLOW_CTL_EN; 292 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 293 val |= sunxi_emac_pause_time << PAUSE_TIME_SHIFT; 294 WR4(sc, EMAC_TX_FLOW_CTL, val); 295} 296 297static void 298sunxi_emac_mii_statchg(struct ifnet *ifp) 299{ 300 struct sunxi_emac_softc * const sc = ifp->if_softc; 301 302 sunxi_emac_update_link(sc); 303} 304 305static void 306sunxi_emac_dma_sync(struct sunxi_emac_softc *sc, bus_dma_tag_t dmat, 307 bus_dmamap_t map, int start, int end, int total, int flags) 308{ 309 if (end > start) { 310 bus_dmamap_sync(dmat, map, DESC_OFF(start), 311 DESC_OFF(end) - DESC_OFF(start), flags); 312 } else { 313 bus_dmamap_sync(dmat, map, DESC_OFF(start), 314 DESC_OFF(total) - DESC_OFF(start), flags); 315 if (DESC_OFF(end) - DESC_OFF(0) > 0) 316 bus_dmamap_sync(dmat, map, DESC_OFF(0), 317 DESC_OFF(end) - DESC_OFF(0), flags); 318 } 319} 320 321static void 322sunxi_emac_setup_txdesc(struct sunxi_emac_softc *sc, int index, int flags, 323 bus_addr_t paddr, u_int len) 324{ 325 uint32_t status, size; 326 327 if (paddr == 0 || len == 0) { 328 status = 0; 329 size = 0; 330 --sc->tx.queued; 331 } else { 332 status = TX_DESC_CTL; 333 size = flags | len; 334 ++sc->tx.queued; 335 } 336 337 sc->tx.desc_ring[index].addr = htole32((uint32_t)paddr); 338 sc->tx.desc_ring[index].size = htole32(size); 339 sc->tx.desc_ring[index].status = htole32(status); 340} 341 342static int 343sunxi_emac_setup_txbuf(struct sunxi_emac_softc *sc, int index, struct mbuf *m) 344{ 345 bus_dma_segment_t *segs; 346 int error, nsegs, cur, i, flags; 347 u_int csum_flags; 348 349 error = bus_dmamap_load_mbuf(sc->tx.buf_tag, 350 sc->tx.buf_map[index].map, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 351 if (error == EFBIG) { 352 device_printf(sc->dev, 353 "TX packet needs too many DMA segments, dropping...\n"); 354 /* Caller will dequeue and free packet. */ 355 return -1; 356 } 357 if (error != 0) 358 return 0; 359 360 segs = sc->tx.buf_map[index].map->dm_segs; 361 nsegs = sc->tx.buf_map[index].map->dm_nsegs; 362 363 flags = TX_FIR_DESC; 364 if ((m->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0) { 365 if ((m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) != 0) 366 csum_flags = TX_CHECKSUM_CTL_FULL; 367 else 368 csum_flags = TX_CHECKSUM_CTL_IP; 369 flags |= (csum_flags << TX_CHECKSUM_CTL_SHIFT); 370 } 371 372 for (cur = index, i = 0; i < nsegs; i++) { 373 sc->tx.buf_map[cur].mbuf = (i == 0 ? m : NULL); 374 if (i == nsegs - 1) 375 flags |= TX_LAST_DESC | TX_INT_CTL; 376 377 sunxi_emac_setup_txdesc(sc, cur, flags, segs[i].ds_addr, 378 segs[i].ds_len); 379 flags &= ~TX_FIR_DESC; 380 cur = TX_NEXT(cur); 381 } 382 383 bus_dmamap_sync(sc->tx.buf_tag, sc->tx.buf_map[index].map, 384 0, sc->tx.buf_map[index].map->dm_mapsize, BUS_DMASYNC_PREWRITE); 385 386 return nsegs; 387} 388 389static void 390sunxi_emac_setup_rxdesc(struct sunxi_emac_softc *sc, int index, 391 bus_addr_t paddr) 392{ 393 uint32_t status, size; 394 395 status = RX_DESC_CTL; 396 size = MCLBYTES - 1; 397 398 sc->rx.desc_ring[index].addr = htole32((uint32_t)paddr); 399 sc->rx.desc_ring[index].size = htole32(size); 400 sc->rx.desc_ring[index].next = 401 htole32(sc->rx.desc_ring_paddr + DESC_OFF(RX_NEXT(index))); 402 sc->rx.desc_ring[index].status = htole32(status); 403} 404 405static int 406sunxi_emac_setup_rxbuf(struct sunxi_emac_softc *sc, int index, struct mbuf *m) 407{ 408 int error; 409 410 m_adj(m, ETHER_ALIGN); 411 412 error = bus_dmamap_load_mbuf(sc->rx.buf_tag, 413 sc->rx.buf_map[index].map, m, BUS_DMA_READ | BUS_DMA_NOWAIT); 414 if (error != 0) 415 return error; 416 417 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, 418 0, sc->rx.buf_map[index].map->dm_mapsize, 419 BUS_DMASYNC_PREREAD); 420 421 sc->rx.buf_map[index].mbuf = m; 422 sunxi_emac_setup_rxdesc(sc, index, 423 sc->rx.buf_map[index].map->dm_segs[0].ds_addr); 424 425 return 0; 426} 427 428static struct mbuf * 429sunxi_emac_alloc_mbufcl(struct sunxi_emac_softc *sc) 430{ 431 struct mbuf *m; 432 433 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 434 if (m != NULL) 435 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 436 437 return m; 438} 439 440static void 441sunxi_emac_start_locked(struct sunxi_emac_softc *sc) 442{ 443 struct ifnet *ifp = &sc->ec.ec_if; 444 struct mbuf *m; 445 uint32_t val; 446 int cnt, nsegs, start; 447 448 EMAC_ASSERT_LOCKED(sc); 449 450 if ((ifp->if_flags & IFF_RUNNING) == 0) 451 return; 452 453 for (cnt = 0, start = sc->tx.cur; ; cnt++) { 454 if (sc->tx.queued >= TX_DESC_COUNT - TX_MAX_SEGS) { 455 break; 456 } 457 458 IFQ_POLL(&ifp->if_snd, m); 459 if (m == NULL) 460 break; 461 462 nsegs = sunxi_emac_setup_txbuf(sc, sc->tx.cur, m); 463 if (__predict_false(nsegs <= 0)) { 464 if (nsegs == -1) { 465 /* 466 * We're being asked to discard this packet, 467 * but we can try to continue. 468 */ 469 IFQ_DEQUEUE(&ifp->if_snd, m); 470 m_freem(m); 471 continue; 472 } 473 break; 474 } 475 IFQ_DEQUEUE(&ifp->if_snd, m); 476 bpf_mtap(ifp, m, BPF_D_OUT); 477 478 sc->tx.cur = TX_SKIP(sc->tx.cur, nsegs); 479 } 480 481 if (cnt != 0) { 482 sunxi_emac_dma_sync(sc, sc->tx.desc_tag, sc->tx.desc_map, 483 start, sc->tx.cur, TX_DESC_COUNT, 484 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 485 486 /* Start and run TX DMA */ 487 val = RD4(sc, EMAC_TX_CTL_1); 488 WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_START); 489 } 490} 491 492static void 493sunxi_emac_start(struct ifnet *ifp) 494{ 495 struct sunxi_emac_softc *sc = ifp->if_softc; 496 497 EMAC_LOCK(sc); 498 sunxi_emac_start_locked(sc); 499 EMAC_UNLOCK(sc); 500} 501 502static void 503sunxi_emac_tick(void *softc) 504{ 505 struct sunxi_emac_softc *sc = softc; 506 struct mii_data *mii = &sc->mii; 507#ifndef EMAC_MPSAFE 508 int s = splnet(); 509#endif 510 511 EMAC_LOCK(sc); 512 mii_tick(mii); 513 callout_schedule(&sc->stat_ch, hz); 514 EMAC_UNLOCK(sc); 515 516#ifndef EMAC_MPSAFE 517 splx(s); 518#endif 519} 520 521/* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */ 522static uint32_t 523bitrev32(uint32_t x) 524{ 525 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); 526 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); 527 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); 528 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); 529 530 return (x >> 16) | (x << 16); 531} 532 533static void 534sunxi_emac_setup_rxfilter(struct sunxi_emac_softc *sc) 535{ 536 struct ethercom *ec = &sc->ec; 537 struct ifnet *ifp = &ec->ec_if; 538 uint32_t val, crc, hashreg, hashbit, hash[2], machi, maclo; 539 struct ether_multi *enm; 540 struct ether_multistep step; 541 const uint8_t *eaddr; 542 543 EMAC_ASSERT_LOCKED(sc); 544 545 val = 0; 546 hash[0] = hash[1] = 0; 547 548 if ((ifp->if_flags & IFF_PROMISC) != 0) 549 val |= DIS_ADDR_FILTER; 550 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 551 val |= RX_ALL_MULTICAST; 552 hash[0] = hash[1] = ~0; 553 } else { 554 val |= HASH_MULTICAST; 555 ETHER_LOCK(ec); 556 ETHER_FIRST_MULTI(step, ec, enm); 557 while (enm != NULL) { 558 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 559 crc &= 0x7f; 560 crc = bitrev32(~crc) >> 26; 561 hashreg = (crc >> 5); 562 hashbit = (crc & 0x1f); 563 hash[hashreg] |= (1 << hashbit); 564 ETHER_NEXT_MULTI(step, enm); 565 } 566 ETHER_UNLOCK(ec); 567 } 568 569 /* Write our unicast address */ 570 eaddr = CLLADDR(ifp->if_sadl); 571 machi = (eaddr[5] << 8) | eaddr[4]; 572 maclo = (eaddr[3] << 24) | (eaddr[2] << 16) | (eaddr[1] << 8) | 573 (eaddr[0] << 0); 574 WR4(sc, EMAC_ADDR_HIGH(0), machi); 575 WR4(sc, EMAC_ADDR_LOW(0), maclo); 576 577 /* Multicast hash filters */ 578 WR4(sc, EMAC_RX_HASH_0, hash[1]); 579 WR4(sc, EMAC_RX_HASH_1, hash[0]); 580 581 /* RX frame filter config */ 582 WR4(sc, EMAC_RX_FRM_FLT, val); 583} 584 585static void 586sunxi_emac_enable_intr(struct sunxi_emac_softc *sc) 587{ 588 /* Enable interrupts */ 589 WR4(sc, EMAC_INT_EN, RX_INT_EN | TX_INT_EN | TX_BUF_UA_INT_EN); 590} 591 592static void 593sunxi_emac_disable_intr(struct sunxi_emac_softc *sc) 594{ 595 /* Disable interrupts */ 596 WR4(sc, EMAC_INT_EN, 0); 597} 598 599#ifdef SUNXI_EMAC_DEBUG 600static void 601sunxi_emac_dump_regs(struct sunxi_emac_softc *sc) 602{ 603 static const struct { 604 const char *name; 605 u_int reg; 606 } regs[] = { 607 { "BASIC_CTL_0", EMAC_BASIC_CTL_0 }, 608 { "BASIC_CTL_1", EMAC_BASIC_CTL_1 }, 609 { "INT_STA", EMAC_INT_STA }, 610 { "INT_EN", EMAC_INT_EN }, 611 { "TX_CTL_0", EMAC_TX_CTL_0 }, 612 { "TX_CTL_1", EMAC_TX_CTL_1 }, 613 { "TX_FLOW_CTL", EMAC_TX_FLOW_CTL }, 614 { "TX_DMA_LIST", EMAC_TX_DMA_LIST }, 615 { "RX_CTL_0", EMAC_RX_CTL_0 }, 616 { "RX_CTL_1", EMAC_RX_CTL_1 }, 617 { "RX_DMA_LIST", EMAC_RX_DMA_LIST }, 618 { "RX_FRM_FLT", EMAC_RX_FRM_FLT }, 619 { "RX_HASH_0", EMAC_RX_HASH_0 }, 620 { "RX_HASH_1", EMAC_RX_HASH_1 }, 621 { "MII_CMD", EMAC_MII_CMD }, 622 { "ADDR_HIGH0", EMAC_ADDR_HIGH(0) }, 623 { "ADDR_LOW0", EMAC_ADDR_LOW(0) }, 624 { "TX_DMA_STA", EMAC_TX_DMA_STA }, 625 { "TX_DMA_CUR_DESC", EMAC_TX_DMA_CUR_DESC }, 626 { "TX_DMA_CUR_BUF", EMAC_TX_DMA_CUR_BUF }, 627 { "RX_DMA_STA", EMAC_RX_DMA_STA }, 628 { "RX_DMA_CUR_DESC", EMAC_RX_DMA_CUR_DESC }, 629 { "RX_DMA_CUR_BUF", EMAC_RX_DMA_CUR_BUF }, 630 { "RGMII_STA", EMAC_RGMII_STA }, 631 }; 632 u_int n; 633 634 for (n = 0; n < __arraycount(regs); n++) 635 device_printf(sc->dev, " %-20s %08x\n", regs[n].name, 636 RD4(sc, regs[n].reg)); 637} 638#endif 639 640static int 641sunxi_emac_reset(struct sunxi_emac_softc *sc) 642{ 643 int retry; 644 645 /* Soft reset all registers and logic */ 646 WR4(sc, EMAC_BASIC_CTL_1, BASIC_CTL_SOFT_RST); 647 648 /* Wait for soft reset bit to self-clear */ 649 for (retry = SOFT_RST_RETRY; retry > 0; retry--) { 650 if ((RD4(sc, EMAC_BASIC_CTL_1) & BASIC_CTL_SOFT_RST) == 0) 651 break; 652 delay(10); 653 } 654 if (retry == 0) { 655 aprint_debug_dev(sc->dev, "soft reset timed out\n"); 656#ifdef SUNXI_EMAC_DEBUG 657 sunxi_emac_dump_regs(sc); 658#endif 659 return ETIMEDOUT; 660 } 661 662 return 0; 663} 664 665static int 666sunxi_emac_init_locked(struct sunxi_emac_softc *sc) 667{ 668 struct ifnet *ifp = &sc->ec.ec_if; 669 struct mii_data *mii = &sc->mii; 670 uint32_t val; 671 672 EMAC_ASSERT_LOCKED(sc); 673 674 if ((ifp->if_flags & IFF_RUNNING) != 0) 675 return 0; 676 677 /* Soft reset EMAC core */ 678 sunxi_emac_reset(sc); 679 680 /* Write transmit and receive descriptor base address registers */ 681 WR4(sc, EMAC_TX_DMA_LIST, sc->tx.desc_ring_paddr); 682 WR4(sc, EMAC_RX_DMA_LIST, sc->rx.desc_ring_paddr); 683 684 sunxi_emac_setup_rxfilter(sc); 685 686 /* Configure DMA burst length and priorities */ 687 val = sunxi_emac_burst_len << BASIC_CTL_BURST_LEN_SHIFT; 688 if (sunxi_emac_rx_tx_pri) 689 val |= BASIC_CTL_RX_TX_PRI; 690 WR4(sc, EMAC_BASIC_CTL_1, val); 691 692 /* Enable interrupts */ 693 sunxi_emac_enable_intr(sc); 694 695 /* Enable transmit DMA */ 696 val = RD4(sc, EMAC_TX_CTL_1); 697 WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_EN | TX_MD | TX_NEXT_FRAME); 698 699 /* Enable receive DMA */ 700 val = RD4(sc, EMAC_RX_CTL_1); 701 WR4(sc, EMAC_RX_CTL_1, val | RX_DMA_EN | RX_MD); 702 703 /* Enable transmitter */ 704 val = RD4(sc, EMAC_TX_CTL_0); 705 WR4(sc, EMAC_TX_CTL_0, val | TX_EN); 706 707 /* Enable receiver */ 708 val = RD4(sc, EMAC_RX_CTL_0); 709 WR4(sc, EMAC_RX_CTL_0, val | RX_EN | CHECK_CRC); 710 711 ifp->if_flags |= IFF_RUNNING; 712 713 mii_mediachg(mii); 714 callout_schedule(&sc->stat_ch, hz); 715 716 return 0; 717} 718 719static int 720sunxi_emac_init(struct ifnet *ifp) 721{ 722 struct sunxi_emac_softc *sc = ifp->if_softc; 723 int error; 724 725 EMAC_LOCK(sc); 726 error = sunxi_emac_init_locked(sc); 727 EMAC_UNLOCK(sc); 728 729 return error; 730} 731 732static void 733sunxi_emac_stop_locked(struct sunxi_emac_softc *sc, int disable) 734{ 735 struct ifnet *ifp = &sc->ec.ec_if; 736 uint32_t val; 737 738 EMAC_ASSERT_LOCKED(sc); 739 740 callout_stop(&sc->stat_ch); 741 742 mii_down(&sc->mii); 743 744 /* Stop transmit DMA and flush data in the TX FIFO */ 745 val = RD4(sc, EMAC_TX_CTL_1); 746 val &= ~TX_DMA_EN; 747 val |= FLUSH_TX_FIFO; 748 WR4(sc, EMAC_TX_CTL_1, val); 749 750 /* Disable transmitter */ 751 val = RD4(sc, EMAC_TX_CTL_0); 752 WR4(sc, EMAC_TX_CTL_0, val & ~TX_EN); 753 754 /* Disable receiver */ 755 val = RD4(sc, EMAC_RX_CTL_0); 756 WR4(sc, EMAC_RX_CTL_0, val & ~RX_EN); 757 758 /* Disable interrupts */ 759 sunxi_emac_disable_intr(sc); 760 761 /* Disable transmit DMA */ 762 val = RD4(sc, EMAC_TX_CTL_1); 763 WR4(sc, EMAC_TX_CTL_1, val & ~TX_DMA_EN); 764 765 /* Disable receive DMA */ 766 val = RD4(sc, EMAC_RX_CTL_1); 767 WR4(sc, EMAC_RX_CTL_1, val & ~RX_DMA_EN); 768 769 ifp->if_flags &= ~IFF_RUNNING; 770} 771 772static void 773sunxi_emac_stop(struct ifnet *ifp, int disable) 774{ 775 struct sunxi_emac_softc * const sc = ifp->if_softc; 776 777 EMAC_LOCK(sc); 778 sunxi_emac_stop_locked(sc, disable); 779 EMAC_UNLOCK(sc); 780} 781 782static int 783sunxi_emac_rxintr(struct sunxi_emac_softc *sc) 784{ 785 struct ifnet *ifp = &sc->ec.ec_if; 786 int error, index, len, npkt; 787 struct mbuf *m, *m0; 788 uint32_t status; 789 790 npkt = 0; 791 792 for (index = sc->rx.cur; ; index = RX_NEXT(index)) { 793 sunxi_emac_dma_sync(sc, sc->rx.desc_tag, sc->rx.desc_map, 794 index, index + 1, RX_DESC_COUNT, 795 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 796 797 status = le32toh(sc->rx.desc_ring[index].status); 798 if ((status & RX_DESC_CTL) != 0) 799 break; 800 801 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, 802 0, sc->rx.buf_map[index].map->dm_mapsize, 803 BUS_DMASYNC_POSTREAD); 804 bus_dmamap_unload(sc->rx.buf_tag, sc->rx.buf_map[index].map); 805 806 len = (status & RX_FRM_LEN) >> RX_FRM_LEN_SHIFT; 807 if (len != 0) { 808 m = sc->rx.buf_map[index].mbuf; 809 m_set_rcvif(m, ifp); 810 m->m_flags |= M_HASFCS; 811 m->m_pkthdr.len = len; 812 m->m_len = len; 813 m->m_nextpkt = NULL; 814 815 if ((ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) != 0 && 816 (status & RX_FRM_TYPE) != 0) { 817 m->m_pkthdr.csum_flags = M_CSUM_IPv4 | 818 M_CSUM_TCPv4 | M_CSUM_UDPv4; 819 if ((status & RX_HEADER_ERR) != 0) 820 m->m_pkthdr.csum_flags |= 821 M_CSUM_IPv4_BAD; 822 if ((status & RX_PAYLOAD_ERR) != 0) 823 m->m_pkthdr.csum_flags |= 824 M_CSUM_TCP_UDP_BAD; 825 } 826 827 ++npkt; 828 829 if_percpuq_enqueue(ifp->if_percpuq, m); 830 } 831 832 if ((m0 = sunxi_emac_alloc_mbufcl(sc)) != NULL) { 833 error = sunxi_emac_setup_rxbuf(sc, index, m0); 834 if (error != 0) { 835 /* XXX hole in RX ring */ 836 } 837 } else 838 if_statinc(ifp, if_ierrors); 839 840 sunxi_emac_dma_sync(sc, sc->rx.desc_tag, sc->rx.desc_map, 841 index, index + 1, 842 RX_DESC_COUNT, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 843 } 844 845 sc->rx.cur = index; 846 847 return npkt; 848} 849 850static void 851sunxi_emac_txintr(struct sunxi_emac_softc *sc) 852{ 853 struct ifnet *ifp = &sc->ec.ec_if; 854 struct sunxi_emac_bufmap *bmap; 855 struct sunxi_emac_desc *desc; 856 uint32_t status; 857 int i; 858 859 EMAC_ASSERT_LOCKED(sc); 860 861 for (i = sc->tx.next; sc->tx.queued > 0; i = TX_NEXT(i)) { 862 KASSERT(sc->tx.queued > 0 && sc->tx.queued <= TX_DESC_COUNT); 863 sunxi_emac_dma_sync(sc, sc->tx.desc_tag, sc->tx.desc_map, 864 i, i + 1, TX_DESC_COUNT, 865 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 866 desc = &sc->tx.desc_ring[i]; 867 status = le32toh(desc->status); 868 if ((status & TX_DESC_CTL) != 0) 869 break; 870 bmap = &sc->tx.buf_map[i]; 871 if (bmap->mbuf != NULL) { 872 bus_dmamap_sync(sc->tx.buf_tag, bmap->map, 873 0, bmap->map->dm_mapsize, 874 BUS_DMASYNC_POSTWRITE); 875 bus_dmamap_unload(sc->tx.buf_tag, bmap->map); 876 m_freem(bmap->mbuf); 877 bmap->mbuf = NULL; 878 } 879 880 sunxi_emac_setup_txdesc(sc, i, 0, 0, 0); 881 sunxi_emac_dma_sync(sc, sc->tx.desc_tag, sc->tx.desc_map, 882 i, i + 1, TX_DESC_COUNT, 883 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 884 885 if_statinc(ifp, if_opackets); 886 } 887 888 sc->tx.next = i; 889} 890 891static int 892sunxi_emac_intr(void *arg) 893{ 894 struct sunxi_emac_softc *sc = arg; 895 struct ifnet *ifp = &sc->ec.ec_if; 896 uint32_t val; 897 898 EMAC_LOCK(sc); 899 900 val = RD4(sc, EMAC_INT_STA); 901 WR4(sc, EMAC_INT_STA, val); 902 903 if (val & RX_INT) 904 sunxi_emac_rxintr(sc); 905 906 if (val & (TX_INT | TX_BUF_UA_INT)) { 907 sunxi_emac_txintr(sc); 908 if_schedule_deferred_start(ifp); 909 } 910 911 EMAC_UNLOCK(sc); 912 913 return 1; 914} 915 916static int 917sunxi_emac_ioctl(struct ifnet *ifp, u_long cmd, void *data) 918{ 919 struct sunxi_emac_softc *sc = ifp->if_softc; 920 int error, s; 921 922#ifndef EMAC_MPSAFE 923 s = splnet(); 924#endif 925 926 switch (cmd) { 927 default: 928#ifdef EMAC_MPSAFE 929 s = splnet(); 930#endif 931 error = ether_ioctl(ifp, cmd, data); 932#ifdef EMAC_MPSAFE 933 splx(s); 934#endif 935 if (error != ENETRESET) 936 break; 937 938 error = 0; 939 940 if (cmd == SIOCSIFCAP) 941 error = if_init(ifp); 942 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 943 ; 944 else if ((ifp->if_flags & IFF_RUNNING) != 0) { 945 EMAC_LOCK(sc); 946 sunxi_emac_setup_rxfilter(sc); 947 EMAC_UNLOCK(sc); 948 } 949 break; 950 } 951 952#ifndef EMAC_MPSAFE 953 splx(s); 954#endif 955 956 return error; 957} 958 959static bool 960sunxi_emac_has_internal_phy(struct sunxi_emac_softc *sc) 961{ 962 static const struct device_compatible_entry mdio_internal_compat[] = { 963 { .compat = "allwinner,sun8i-h3-mdio-internal" }, 964 DEVICE_COMPAT_EOL 965 }; 966 int phy; 967 968 /* Non-standard property, for compatible with old dts files */ 969 if (of_hasprop(sc->phandle, "allwinner,use-internal-phy")) 970 return true; 971 972 phy = fdtbus_get_phandle(sc->phandle, "phy-handle"); 973 if (phy == -1) 974 return false; 975 976 /* For internal PHY, check compatible string of parent node */ 977 return of_compatible_match(OF_parent(phy), mdio_internal_compat); 978} 979 980static int 981sunxi_emac_setup_phy(struct sunxi_emac_softc *sc) 982{ 983 uint32_t reg, tx_delay, rx_delay; 984 const char *phy_type; 985 986 phy_type = fdtbus_get_string(sc->phandle, "phy-mode"); 987 if (phy_type == NULL) 988 return 0; 989 990 aprint_debug_dev(sc->dev, "PHY type: %s\n", phy_type); 991 992 syscon_lock(sc->syscon); 993 reg = syscon_read_4(sc->syscon, EMAC_CLK_REG); 994 995 reg &= ~(EMAC_CLK_PIT | EMAC_CLK_SRC | EMAC_CLK_RMII_EN); 996 if (strncmp(phy_type, "rgmii", 5) == 0) 997 reg |= EMAC_CLK_PIT_RGMII | EMAC_CLK_SRC_RGMII; 998 else if (strcmp(phy_type, "rmii") == 0) 999 reg |= EMAC_CLK_RMII_EN; 1000 else 1001 reg |= EMAC_CLK_PIT_MII | EMAC_CLK_SRC_MII; 1002 1003 if (of_getprop_uint32(sc->phandle, "allwinner,tx-delay-ps", 1004 &tx_delay) == 0) { 1005 reg &= ~EMAC_CLK_ETXDC; 1006 reg |= ((tx_delay / 100) << EMAC_CLK_ETXDC_SHIFT); 1007 } else if (of_getprop_uint32(sc->phandle, "tx-delay", &tx_delay) == 0) { 1008 reg &= ~EMAC_CLK_ETXDC; 1009 reg |= (tx_delay << EMAC_CLK_ETXDC_SHIFT); 1010 } 1011 if (of_getprop_uint32(sc->phandle, "allwinner,rx-delay-ps", 1012 &rx_delay) == 0) { 1013 reg &= ~EMAC_CLK_ERXDC; 1014 reg |= ((rx_delay / 100) << EMAC_CLK_ERXDC_SHIFT); 1015 } else if (of_getprop_uint32(sc->phandle, "rx-delay", &rx_delay) == 0) { 1016 reg &= ~EMAC_CLK_ERXDC; 1017 reg |= (rx_delay << EMAC_CLK_ERXDC_SHIFT); 1018 } 1019 1020 if (sc->type == EMAC_H3 || sc->type == EMAC_H6) { 1021 if (sunxi_emac_has_internal_phy(sc)) { 1022 reg |= EMAC_CLK_EPHY_SELECT; 1023 reg &= ~EMAC_CLK_EPHY_SHUTDOWN; 1024 if (of_hasprop(sc->phandle, 1025 "allwinner,leds-active-low")) 1026 reg |= EMAC_CLK_EPHY_LED_POL; 1027 else 1028 reg &= ~EMAC_CLK_EPHY_LED_POL; 1029 1030 /* Set internal PHY addr to 1 */ 1031 reg &= ~EMAC_CLK_EPHY_ADDR; 1032 reg |= (1 << EMAC_CLK_EPHY_ADDR_SHIFT); 1033 } else { 1034 reg &= ~EMAC_CLK_EPHY_SELECT; 1035 } 1036 } 1037 1038 aprint_debug_dev(sc->dev, "EMAC clock: 0x%08x\n", reg); 1039 1040 syscon_write_4(sc->syscon, EMAC_CLK_REG, reg); 1041 syscon_unlock(sc->syscon); 1042 1043 return 0; 1044} 1045 1046static int 1047sunxi_emac_setup_resources(struct sunxi_emac_softc *sc) 1048{ 1049 u_int freq; 1050 int error, div; 1051 1052 /* Configure PHY for MII or RGMII mode */ 1053 if (sunxi_emac_setup_phy(sc) != 0) 1054 return ENXIO; 1055 1056 /* Enable clocks */ 1057 error = clk_enable(sc->clk_ahb); 1058 if (error != 0) { 1059 aprint_error_dev(sc->dev, "cannot enable ahb clock\n"); 1060 return error; 1061 } 1062 1063 if (sc->clk_ephy != NULL) { 1064 error = clk_enable(sc->clk_ephy); 1065 if (error != 0) { 1066 aprint_error_dev(sc->dev, "cannot enable ephy clock\n"); 1067 return error; 1068 } 1069 } 1070 1071 /* De-assert reset */ 1072 error = fdtbus_reset_deassert(sc->rst_ahb); 1073 if (error != 0) { 1074 aprint_error_dev(sc->dev, "cannot de-assert ahb reset\n"); 1075 return error; 1076 } 1077 if (sc->rst_ephy != NULL) { 1078 error = fdtbus_reset_deassert(sc->rst_ephy); 1079 if (error != 0) { 1080 aprint_error_dev(sc->dev, 1081 "cannot de-assert ephy reset\n"); 1082 return error; 1083 } 1084 } 1085 1086 /* Enable PHY regulator if applicable */ 1087 if (sc->reg_phy != NULL) { 1088 error = fdtbus_regulator_enable(sc->reg_phy); 1089 if (error != 0) { 1090 aprint_error_dev(sc->dev, 1091 "cannot enable PHY regulator\n"); 1092 return error; 1093 } 1094 } 1095 1096 /* Determine MDC clock divide ratio based on AHB clock */ 1097 freq = clk_get_rate(sc->clk_ahb); 1098 if (freq == 0) { 1099 aprint_error_dev(sc->dev, "cannot get AHB clock frequency\n"); 1100 return ENXIO; 1101 } 1102 div = freq / MDIO_FREQ; 1103 if (div <= 16) 1104 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_16; 1105 else if (div <= 32) 1106 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_32; 1107 else if (div <= 64) 1108 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_64; 1109 else if (div <= 128) 1110 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_128; 1111 else { 1112 aprint_error_dev(sc->dev, 1113 "cannot determine MDC clock divide ratio\n"); 1114 return ENXIO; 1115 } 1116 1117 aprint_debug_dev(sc->dev, "AHB frequency %u Hz, MDC div: 0x%x\n", 1118 freq, sc->mdc_div_ratio_m); 1119 1120 return 0; 1121} 1122 1123static void 1124sunxi_emac_get_eaddr(struct sunxi_emac_softc *sc, uint8_t *eaddr) 1125{ 1126 uint32_t maclo, machi; 1127#if notyet 1128 u_char rootkey[16]; 1129#endif 1130 1131 machi = RD4(sc, EMAC_ADDR_HIGH(0)) & 0xffff; 1132 maclo = RD4(sc, EMAC_ADDR_LOW(0)); 1133 1134 if (maclo == 0xffffffff && machi == 0xffff) { 1135#if notyet 1136 /* MAC address in hardware is invalid, create one */ 1137 if (aw_sid_get_rootkey(rootkey) == 0 && 1138 (rootkey[3] | rootkey[12] | rootkey[13] | rootkey[14] | 1139 rootkey[15]) != 0) { 1140 /* MAC address is derived from the root key in SID */ 1141 maclo = (rootkey[13] << 24) | (rootkey[12] << 16) | 1142 (rootkey[3] << 8) | 0x02; 1143 machi = (rootkey[15] << 8) | rootkey[14]; 1144 } else { 1145#endif 1146 /* Create one */ 1147 maclo = 0x00f2 | (cprng_strong32() & 0xffff0000); 1148 machi = cprng_strong32() & 0xffff; 1149#if notyet 1150 } 1151#endif 1152 } 1153 1154 eaddr[0] = maclo & 0xff; 1155 eaddr[1] = (maclo >> 8) & 0xff; 1156 eaddr[2] = (maclo >> 16) & 0xff; 1157 eaddr[3] = (maclo >> 24) & 0xff; 1158 eaddr[4] = machi & 0xff; 1159 eaddr[5] = (machi >> 8) & 0xff; 1160} 1161 1162static int 1163sunxi_emac_phy_reset(struct sunxi_emac_softc *sc) 1164{ 1165 uint32_t delay_prop[3]; 1166 int pin_value; 1167 1168 if (sc->pin_reset == NULL) 1169 return 0; 1170 1171 if (OF_getprop(sc->phandle, "allwinner,reset-delays-us", delay_prop, 1172 sizeof(delay_prop)) <= 0) 1173 return ENXIO; 1174 1175 pin_value = of_hasprop(sc->phandle, "allwinner,reset-active-low"); 1176 1177 fdtbus_gpio_write(sc->pin_reset, pin_value); 1178 delay(htole32(delay_prop[0])); 1179 fdtbus_gpio_write(sc->pin_reset, !pin_value); 1180 delay(htole32(delay_prop[1])); 1181 fdtbus_gpio_write(sc->pin_reset, pin_value); 1182 delay(htole32(delay_prop[2])); 1183 1184 return 0; 1185} 1186 1187static int 1188sunxi_emac_setup_dma(struct sunxi_emac_softc *sc) 1189{ 1190 struct mbuf *m; 1191 int error, nsegs, i; 1192 1193 /* Setup TX ring */ 1194 sc->tx.buf_tag = sc->tx.desc_tag = sc->dmat; 1195 error = bus_dmamap_create(sc->dmat, TX_DESC_SIZE, 1, TX_DESC_SIZE, 0, 1196 BUS_DMA_WAITOK, &sc->tx.desc_map); 1197 if (error) 1198 return error; 1199 error = bus_dmamem_alloc(sc->dmat, TX_DESC_SIZE, DESC_ALIGN, 0, 1200 &sc->tx.desc_dmaseg, 1, &nsegs, BUS_DMA_WAITOK); 1201 if (error) 1202 return error; 1203 error = bus_dmamem_map(sc->dmat, &sc->tx.desc_dmaseg, nsegs, 1204 TX_DESC_SIZE, (void *)&sc->tx.desc_ring, 1205 BUS_DMA_WAITOK); 1206 if (error) 1207 return error; 1208 error = bus_dmamap_load(sc->dmat, sc->tx.desc_map, sc->tx.desc_ring, 1209 TX_DESC_SIZE, NULL, BUS_DMA_WAITOK); 1210 if (error) 1211 return error; 1212 sc->tx.desc_ring_paddr = sc->tx.desc_map->dm_segs[0].ds_addr; 1213 1214 memset(sc->tx.desc_ring, 0, TX_DESC_SIZE); 1215 bus_dmamap_sync(sc->dmat, sc->tx.desc_map, 0, TX_DESC_SIZE, 1216 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1217 1218 for (i = 0; i < TX_DESC_COUNT; i++) 1219 sc->tx.desc_ring[i].next = 1220 htole32(sc->tx.desc_ring_paddr + DESC_OFF(TX_NEXT(i))); 1221 1222 sc->tx.queued = TX_DESC_COUNT; 1223 for (i = 0; i < TX_DESC_COUNT; i++) { 1224 error = bus_dmamap_create(sc->tx.buf_tag, MCLBYTES, 1225 TX_MAX_SEGS, MCLBYTES, 0, BUS_DMA_WAITOK, 1226 &sc->tx.buf_map[i].map); 1227 if (error != 0) { 1228 device_printf(sc->dev, "cannot create TX buffer map\n"); 1229 return error; 1230 } 1231 sunxi_emac_setup_txdesc(sc, i, 0, 0, 0); 1232 } 1233 1234 /* Setup RX ring */ 1235 sc->rx.buf_tag = sc->rx.desc_tag = sc->dmat; 1236 error = bus_dmamap_create(sc->dmat, RX_DESC_SIZE, 1, RX_DESC_SIZE, 0, 1237 BUS_DMA_WAITOK, &sc->rx.desc_map); 1238 if (error) 1239 return error; 1240 error = bus_dmamem_alloc(sc->dmat, RX_DESC_SIZE, DESC_ALIGN, 0, 1241 &sc->rx.desc_dmaseg, 1, &nsegs, BUS_DMA_WAITOK); 1242 if (error) 1243 return error; 1244 error = bus_dmamem_map(sc->dmat, &sc->rx.desc_dmaseg, nsegs, 1245 RX_DESC_SIZE, (void *)&sc->rx.desc_ring, 1246 BUS_DMA_WAITOK); 1247 if (error) 1248 return error; 1249 error = bus_dmamap_load(sc->dmat, sc->rx.desc_map, sc->rx.desc_ring, 1250 RX_DESC_SIZE, NULL, BUS_DMA_WAITOK); 1251 if (error) 1252 return error; 1253 sc->rx.desc_ring_paddr = sc->rx.desc_map->dm_segs[0].ds_addr; 1254 1255 memset(sc->rx.desc_ring, 0, RX_DESC_SIZE); 1256 1257 for (i = 0; i < RX_DESC_COUNT; i++) { 1258 error = bus_dmamap_create(sc->rx.buf_tag, MCLBYTES, 1259 RX_DESC_COUNT, MCLBYTES, 0, BUS_DMA_WAITOK, 1260 &sc->rx.buf_map[i].map); 1261 if (error != 0) { 1262 device_printf(sc->dev, "cannot create RX buffer map\n"); 1263 return error; 1264 } 1265 if ((m = sunxi_emac_alloc_mbufcl(sc)) == NULL) { 1266 device_printf(sc->dev, "cannot allocate RX mbuf\n"); 1267 return ENOMEM; 1268 } 1269 error = sunxi_emac_setup_rxbuf(sc, i, m); 1270 if (error != 0) { 1271 device_printf(sc->dev, "cannot create RX buffer\n"); 1272 return error; 1273 } 1274 } 1275 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, 1276 0, sc->rx.desc_map->dm_mapsize, 1277 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1278 1279 return 0; 1280} 1281 1282static int 1283sunxi_emac_get_resources(struct sunxi_emac_softc *sc) 1284{ 1285 const int phandle = sc->phandle; 1286 bus_addr_t addr, size; 1287 1288 /* Map EMAC registers */ 1289 if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) { 1290 aprint_error_dev(sc->dev, "unable to get registers\n"); 1291 return ENXIO; 1292 } 1293 if (bus_space_map(sc->bst, addr, size, 0, &sc->bsh) != 0) { 1294 aprint_error_dev(sc->dev, "unable to map registers\n"); 1295 return ENXIO; 1296 } 1297 1298 /* Get SYSCON registers */ 1299 sc->syscon = fdtbus_syscon_acquire(phandle, "syscon"); 1300 if (sc->syscon == NULL) { 1301 aprint_error_dev(sc->dev, "unable to acquire syscon\n"); 1302 return ENXIO; 1303 } 1304 1305 /* The "ahb"/"stmmaceth" clock and reset is required */ 1306 if ((sc->clk_ahb = fdtbus_clock_get(phandle, "ahb")) == NULL && 1307 (sc->clk_ahb = fdtbus_clock_get(phandle, "stmmaceth")) == NULL) { 1308 aprint_error_dev(sc->dev, "unable to get clock\n"); 1309 return ENXIO; 1310 } 1311 if ((sc->rst_ahb = fdtbus_reset_get(phandle, "ahb")) == NULL && 1312 (sc->rst_ahb = fdtbus_reset_get(phandle, "stmmaceth")) == NULL) { 1313 aprint_error_dev(sc->dev, "unable to get reset\n"); 1314 return ENXIO; 1315 } 1316 1317 /* Internal PHY clock and reset are optional properties. */ 1318 sc->clk_ephy = fdtbus_clock_get(phandle, "ephy"); 1319 if (sc->clk_ephy == NULL) { 1320 int phy_phandle = fdtbus_get_phandle(phandle, "phy-handle"); 1321 if (phy_phandle != -1) 1322 sc->clk_ephy = fdtbus_clock_get_index(phy_phandle, 0); 1323 } 1324 sc->rst_ephy = fdtbus_reset_get(phandle, "ephy"); 1325 if (sc->rst_ephy == NULL) { 1326 int phy_phandle = fdtbus_get_phandle(phandle, "phy-handle"); 1327 if (phy_phandle != -1) 1328 sc->rst_ephy = fdtbus_reset_get_index(phy_phandle, 0); 1329 } 1330 1331 /* Regulator is optional */ 1332 sc->reg_phy = fdtbus_regulator_acquire(phandle, "phy-supply"); 1333 1334 /* Reset GPIO is optional */ 1335 sc->pin_reset = fdtbus_gpio_acquire(sc->phandle, 1336 "allwinner,reset-gpio", GPIO_PIN_OUTPUT); 1337 1338 return 0; 1339} 1340 1341static int 1342sunxi_emac_get_phyid(struct sunxi_emac_softc *sc) 1343{ 1344 bus_addr_t addr; 1345 int phy_phandle; 1346 1347 phy_phandle = fdtbus_get_phandle(sc->phandle, "phy"); 1348 if (phy_phandle == -1) 1349 phy_phandle = fdtbus_get_phandle(sc->phandle, "phy-handle"); 1350 if (phy_phandle == -1) 1351 return MII_PHY_ANY; 1352 1353 if (fdtbus_get_reg(phy_phandle, 0, &addr, NULL) != 0) 1354 return MII_PHY_ANY; 1355 1356 return (int)addr; 1357} 1358 1359static int 1360sunxi_emac_match(device_t parent, cfdata_t cf, void *aux) 1361{ 1362 struct fdt_attach_args * const faa = aux; 1363 1364 return of_compatible_match(faa->faa_phandle, compat_data); 1365} 1366 1367static void 1368sunxi_emac_attach(device_t parent, device_t self, void *aux) 1369{ 1370 struct fdt_attach_args * const faa = aux; 1371 struct sunxi_emac_softc * const sc = device_private(self); 1372 const int phandle = faa->faa_phandle; 1373 struct mii_data *mii = &sc->mii; 1374 struct ifnet *ifp = &sc->ec.ec_if; 1375 uint8_t eaddr[ETHER_ADDR_LEN]; 1376 char intrstr[128]; 1377 1378 sc->dev = self; 1379 sc->phandle = phandle; 1380 sc->bst = faa->faa_bst; 1381 sc->dmat = faa->faa_dmat; 1382 sc->type = of_compatible_lookup(phandle, compat_data)->value; 1383 sc->phy_id = sunxi_emac_get_phyid(sc); 1384 1385 aprint_naive("\n"); 1386 aprint_normal(": EMAC\n"); 1387 1388 if (sunxi_emac_get_resources(sc) != 0) { 1389 aprint_error_dev(self, 1390 "cannot allocate resources for device\n"); 1391 return; 1392 } 1393 if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) { 1394 aprint_error_dev(self, "cannot decode interrupt\n"); 1395 return; 1396 } 1397 1398 mutex_init(&sc->mtx, MUTEX_DEFAULT, IPL_NET); 1399 callout_init(&sc->stat_ch, CALLOUT_FLAGS); 1400 callout_setfunc(&sc->stat_ch, sunxi_emac_tick, sc); 1401 1402 /* Setup clocks and regulators */ 1403 if (sunxi_emac_setup_resources(sc) != 0) 1404 return; 1405 1406 /* Read MAC address before resetting the chip */ 1407 sunxi_emac_get_eaddr(sc, eaddr); 1408 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr)); 1409 1410 /* Reset PHY if necessary */ 1411 if (sunxi_emac_phy_reset(sc) != 0) { 1412 aprint_error_dev(self, "failed to reset PHY\n"); 1413 return; 1414 } 1415 1416 /* Setup DMA descriptors */ 1417 if (sunxi_emac_setup_dma(sc) != 0) { 1418 aprint_error_dev(self, "failed to setup DMA descriptors\n"); 1419 return; 1420 } 1421 1422 /* Install interrupt handler */ 1423 sc->ih = fdtbus_intr_establish_xname(phandle, 0, IPL_NET, 1424 FDT_INTR_FLAGS, sunxi_emac_intr, sc, device_xname(self)); 1425 if (sc->ih == NULL) { 1426 aprint_error_dev(self, "failed to establish interrupt on %s\n", 1427 intrstr); 1428 return; 1429 } 1430 aprint_normal_dev(self, "interrupting on %s\n", intrstr); 1431 1432 /* Setup ethernet interface */ 1433 ifp->if_softc = sc; 1434 snprintf(ifp->if_xname, IFNAMSIZ, EMAC_IFNAME, device_unit(self)); 1435 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1436#ifdef EMAC_MPSAFE 1437 ifp->if_extflags = IFEF_MPSAFE; 1438#endif 1439 ifp->if_start = sunxi_emac_start; 1440 ifp->if_ioctl = sunxi_emac_ioctl; 1441 ifp->if_init = sunxi_emac_init; 1442 ifp->if_stop = sunxi_emac_stop; 1443 ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx | 1444 IFCAP_CSUM_IPv4_Tx | 1445 IFCAP_CSUM_TCPv4_Rx | 1446 IFCAP_CSUM_TCPv4_Tx | 1447 IFCAP_CSUM_UDPv4_Rx | 1448 IFCAP_CSUM_UDPv4_Tx; 1449 ifp->if_capenable = ifp->if_capabilities; 1450 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 1451 IFQ_SET_READY(&ifp->if_snd); 1452 1453 /* 802.1Q VLAN-sized frames are supported */ 1454 sc->ec.ec_capabilities |= ETHERCAP_VLAN_MTU; 1455 1456 /* Attach MII driver */ 1457 sc->ec.ec_mii = mii; 1458 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); 1459 mii->mii_ifp = ifp; 1460 mii->mii_readreg = sunxi_emac_mii_readreg; 1461 mii->mii_writereg = sunxi_emac_mii_writereg; 1462 mii->mii_statchg = sunxi_emac_mii_statchg; 1463 mii_attach(self, mii, 0xffffffff, sc->phy_id, MII_OFFSET_ANY, 1464 MIIF_DOPAUSE); 1465 1466 if (LIST_EMPTY(&mii->mii_phys)) { 1467 aprint_error_dev(self, "no PHY found!\n"); 1468 return; 1469 } 1470 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 1471 1472 /* Attach interface */ 1473 if_attach(ifp); 1474 if_deferred_start_init(ifp, NULL); 1475 1476 /* Attach ethernet interface */ 1477 ether_ifattach(ifp, eaddr); 1478} 1479 1480CFATTACH_DECL_NEW(sunxi_emac, sizeof(struct sunxi_emac_softc), 1481 sunxi_emac_match, sunxi_emac_attach, NULL, NULL); 1482