1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright (c) 2016, NVIDIA CORPORATION. 4 * 5 * Portions based on U-Boot's rtl8169.c. 6 */ 7 8/* 9 * This driver supports the Synopsys Designware Ethernet QOS (Quality Of 10 * Service) IP block. The IP supports multiple options for bus type, clocking/ 11 * reset structure, and feature list. 12 * 13 * The driver is written such that generic core logic is kept separate from 14 * configuration-specific logic. Code that interacts with configuration- 15 * specific resources is split out into separate functions to avoid polluting 16 * common code. If/when this driver is enhanced to support multiple 17 * configurations, the core code should be adapted to call all configuration- 18 * specific functions through function pointers, with the definition of those 19 * function pointers being supplied by struct udevice_id eqos_ids[]'s .data 20 * field. 21 * 22 * The following configurations are currently supported: 23 * tegra186: 24 * NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an 25 * AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and 26 * supports a single RGMII PHY. This configuration also has SW control over 27 * all clock and reset signals to the HW block. 28 */ 29 30#define LOG_CATEGORY UCLASS_ETH 31 32#include <common.h> 33#include <clk.h> 34#include <cpu_func.h> 35#include <dm.h> 36#include <errno.h> 37#include <eth_phy.h> 38#include <log.h> 39#include <malloc.h> 40#include <memalign.h> 41#include <miiphy.h> 42#include <net.h> 43#include <netdev.h> 44#include <phy.h> 45#include <reset.h> 46#include <wait_bit.h> 47#include <asm/cache.h> 48#include <asm/gpio.h> 49#include <asm/io.h> 50#ifdef CONFIG_ARCH_IMX8M 51#include <asm/arch/clock.h> 52#include <asm/mach-imx/sys_proto.h> 53#endif 54#include <linux/delay.h> 55#include <linux/printk.h> 56 57#include "dwc_eth_qos.h" 58 59/* 60 * TX and RX descriptors are 16 bytes. This causes problems with the cache 61 * maintenance on CPUs where the cache-line size exceeds the size of these 62 * descriptors. What will happen is that when the driver receives a packet 63 * it will be immediately requeued for the hardware to reuse. The CPU will 64 * therefore need to flush the cache-line containing the descriptor, which 65 * will cause all other descriptors in the same cache-line to be flushed 66 * along with it. If one of those descriptors had been written to by the 67 * device those changes (and the associated packet) will be lost. 68 * 69 * To work around this, we make use of non-cached memory if available. If 70 * descriptors are mapped uncached there's no need to manually flush them 71 * or invalidate them. 72 * 73 * Note that this only applies to descriptors. The packet data buffers do 74 * not have the same constraints since they are 1536 bytes large, so they 75 * are unlikely to share cache-lines. 76 */ 77static void *eqos_alloc_descs(struct eqos_priv *eqos, unsigned int num) 78{ 79 return memalign(ARCH_DMA_MINALIGN, num * eqos->desc_size); 80} 81 82static void eqos_free_descs(void *descs) 83{ 84 free(descs); 85} 86 87static struct eqos_desc *eqos_get_desc(struct eqos_priv *eqos, 88 unsigned int num, bool rx) 89{ 90 return (rx ? eqos->rx_descs : eqos->tx_descs) + 91 (num * eqos->desc_size); 92} 93 94void eqos_inval_desc_generic(void *desc) 95{ 96 unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1); 97 unsigned long end = ALIGN(start + sizeof(struct eqos_desc), 98 ARCH_DMA_MINALIGN); 99 100 invalidate_dcache_range(start, end); 101} 102 103void eqos_flush_desc_generic(void *desc) 104{ 105 unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1); 106 unsigned long end = ALIGN(start + sizeof(struct eqos_desc), 107 ARCH_DMA_MINALIGN); 108 109 flush_dcache_range(start, end); 110} 111 112static void eqos_inval_buffer_tegra186(void *buf, size_t size) 113{ 114 unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1); 115 unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN); 116 117 invalidate_dcache_range(start, end); 118} 119 120void eqos_inval_buffer_generic(void *buf, size_t size) 121{ 122 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 123 unsigned long end = roundup((unsigned long)buf + size, 124 ARCH_DMA_MINALIGN); 125 126 invalidate_dcache_range(start, end); 127} 128 129static void eqos_flush_buffer_tegra186(void *buf, size_t size) 130{ 131 flush_cache((unsigned long)buf, size); 132} 133 134void eqos_flush_buffer_generic(void *buf, size_t size) 135{ 136 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 137 unsigned long end = roundup((unsigned long)buf + size, 138 ARCH_DMA_MINALIGN); 139 140 flush_dcache_range(start, end); 141} 142 143static int eqos_mdio_wait_idle(struct eqos_priv *eqos) 144{ 145 return wait_for_bit_le32(&eqos->mac_regs->mdio_address, 146 EQOS_MAC_MDIO_ADDRESS_GB, false, 147 1000000, true); 148} 149 150static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad, 151 int mdio_reg) 152{ 153 struct eqos_priv *eqos = bus->priv; 154 u32 val; 155 int ret; 156 157 debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr, 158 mdio_reg); 159 160 ret = eqos_mdio_wait_idle(eqos); 161 if (ret) { 162 pr_err("MDIO not idle at entry\n"); 163 return ret; 164 } 165 166 val = readl(&eqos->mac_regs->mdio_address); 167 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 168 EQOS_MAC_MDIO_ADDRESS_C45E; 169 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 170 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 171 (eqos->config->config_mac_mdio << 172 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 173 (EQOS_MAC_MDIO_ADDRESS_GOC_READ << 174 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 175 EQOS_MAC_MDIO_ADDRESS_GB; 176 writel(val, &eqos->mac_regs->mdio_address); 177 178 udelay(eqos->config->mdio_wait); 179 180 ret = eqos_mdio_wait_idle(eqos); 181 if (ret) { 182 pr_err("MDIO read didn't complete\n"); 183 return ret; 184 } 185 186 val = readl(&eqos->mac_regs->mdio_data); 187 val &= EQOS_MAC_MDIO_DATA_GD_MASK; 188 189 debug("%s: val=%x\n", __func__, val); 190 191 return val; 192} 193 194static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad, 195 int mdio_reg, u16 mdio_val) 196{ 197 struct eqos_priv *eqos = bus->priv; 198 u32 val; 199 int ret; 200 201 debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev, 202 mdio_addr, mdio_reg, mdio_val); 203 204 ret = eqos_mdio_wait_idle(eqos); 205 if (ret) { 206 pr_err("MDIO not idle at entry\n"); 207 return ret; 208 } 209 210 writel(mdio_val, &eqos->mac_regs->mdio_data); 211 212 val = readl(&eqos->mac_regs->mdio_address); 213 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 214 EQOS_MAC_MDIO_ADDRESS_C45E; 215 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 216 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 217 (eqos->config->config_mac_mdio << 218 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 219 (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE << 220 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 221 EQOS_MAC_MDIO_ADDRESS_GB; 222 writel(val, &eqos->mac_regs->mdio_address); 223 224 udelay(eqos->config->mdio_wait); 225 226 ret = eqos_mdio_wait_idle(eqos); 227 if (ret) { 228 pr_err("MDIO read didn't complete\n"); 229 return ret; 230 } 231 232 return 0; 233} 234 235static int eqos_start_clks_tegra186(struct udevice *dev) 236{ 237#ifdef CONFIG_CLK 238 struct eqos_priv *eqos = dev_get_priv(dev); 239 int ret; 240 241 debug("%s(dev=%p):\n", __func__, dev); 242 243 ret = clk_enable(&eqos->clk_slave_bus); 244 if (ret < 0) { 245 pr_err("clk_enable(clk_slave_bus) failed: %d\n", ret); 246 goto err; 247 } 248 249 ret = clk_enable(&eqos->clk_master_bus); 250 if (ret < 0) { 251 pr_err("clk_enable(clk_master_bus) failed: %d\n", ret); 252 goto err_disable_clk_slave_bus; 253 } 254 255 ret = clk_enable(&eqos->clk_rx); 256 if (ret < 0) { 257 pr_err("clk_enable(clk_rx) failed: %d\n", ret); 258 goto err_disable_clk_master_bus; 259 } 260 261 ret = clk_enable(&eqos->clk_ptp_ref); 262 if (ret < 0) { 263 pr_err("clk_enable(clk_ptp_ref) failed: %d\n", ret); 264 goto err_disable_clk_rx; 265 } 266 267 ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000); 268 if (ret < 0) { 269 pr_err("clk_set_rate(clk_ptp_ref) failed: %d\n", ret); 270 goto err_disable_clk_ptp_ref; 271 } 272 273 ret = clk_enable(&eqos->clk_tx); 274 if (ret < 0) { 275 pr_err("clk_enable(clk_tx) failed: %d\n", ret); 276 goto err_disable_clk_ptp_ref; 277 } 278#endif 279 280 debug("%s: OK\n", __func__); 281 return 0; 282 283#ifdef CONFIG_CLK 284err_disable_clk_ptp_ref: 285 clk_disable(&eqos->clk_ptp_ref); 286err_disable_clk_rx: 287 clk_disable(&eqos->clk_rx); 288err_disable_clk_master_bus: 289 clk_disable(&eqos->clk_master_bus); 290err_disable_clk_slave_bus: 291 clk_disable(&eqos->clk_slave_bus); 292err: 293 debug("%s: FAILED: %d\n", __func__, ret); 294 return ret; 295#endif 296} 297 298static int eqos_stop_clks_tegra186(struct udevice *dev) 299{ 300#ifdef CONFIG_CLK 301 struct eqos_priv *eqos = dev_get_priv(dev); 302 303 debug("%s(dev=%p):\n", __func__, dev); 304 305 clk_disable(&eqos->clk_tx); 306 clk_disable(&eqos->clk_ptp_ref); 307 clk_disable(&eqos->clk_rx); 308 clk_disable(&eqos->clk_master_bus); 309 clk_disable(&eqos->clk_slave_bus); 310#endif 311 312 debug("%s: OK\n", __func__); 313 return 0; 314} 315 316static int eqos_start_resets_tegra186(struct udevice *dev) 317{ 318 struct eqos_priv *eqos = dev_get_priv(dev); 319 int ret; 320 321 debug("%s(dev=%p):\n", __func__, dev); 322 323 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 324 if (ret < 0) { 325 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d\n", ret); 326 return ret; 327 } 328 329 udelay(2); 330 331 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 332 if (ret < 0) { 333 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d\n", ret); 334 return ret; 335 } 336 337 ret = reset_assert(&eqos->reset_ctl); 338 if (ret < 0) { 339 pr_err("reset_assert() failed: %d\n", ret); 340 return ret; 341 } 342 343 udelay(2); 344 345 ret = reset_deassert(&eqos->reset_ctl); 346 if (ret < 0) { 347 pr_err("reset_deassert() failed: %d\n", ret); 348 return ret; 349 } 350 351 debug("%s: OK\n", __func__); 352 return 0; 353} 354 355static int eqos_stop_resets_tegra186(struct udevice *dev) 356{ 357 struct eqos_priv *eqos = dev_get_priv(dev); 358 359 reset_assert(&eqos->reset_ctl); 360 dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 361 362 return 0; 363} 364 365static int eqos_calibrate_pads_tegra186(struct udevice *dev) 366{ 367 struct eqos_priv *eqos = dev_get_priv(dev); 368 int ret; 369 370 debug("%s(dev=%p):\n", __func__, dev); 371 372 setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 373 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 374 375 udelay(1); 376 377 setbits_le32(&eqos->tegra186_regs->auto_cal_config, 378 EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE); 379 380 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 381 EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false); 382 if (ret) { 383 pr_err("calibrate didn't start\n"); 384 goto failed; 385 } 386 387 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 388 EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false); 389 if (ret) { 390 pr_err("calibrate didn't finish\n"); 391 goto failed; 392 } 393 394 ret = 0; 395 396failed: 397 clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 398 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 399 400 debug("%s: returns %d\n", __func__, ret); 401 402 return ret; 403} 404 405static int eqos_disable_calibration_tegra186(struct udevice *dev) 406{ 407 struct eqos_priv *eqos = dev_get_priv(dev); 408 409 debug("%s(dev=%p):\n", __func__, dev); 410 411 clrbits_le32(&eqos->tegra186_regs->auto_cal_config, 412 EQOS_AUTO_CAL_CONFIG_ENABLE); 413 414 return 0; 415} 416 417static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev) 418{ 419#ifdef CONFIG_CLK 420 struct eqos_priv *eqos = dev_get_priv(dev); 421 422 return clk_get_rate(&eqos->clk_slave_bus); 423#else 424 return 0; 425#endif 426} 427 428static int eqos_set_full_duplex(struct udevice *dev) 429{ 430 struct eqos_priv *eqos = dev_get_priv(dev); 431 432 debug("%s(dev=%p):\n", __func__, dev); 433 434 setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 435 436 return 0; 437} 438 439static int eqos_set_half_duplex(struct udevice *dev) 440{ 441 struct eqos_priv *eqos = dev_get_priv(dev); 442 443 debug("%s(dev=%p):\n", __func__, dev); 444 445 clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 446 447 /* WAR: Flush TX queue when switching to half-duplex */ 448 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 449 EQOS_MTL_TXQ0_OPERATION_MODE_FTQ); 450 451 return 0; 452} 453 454static int eqos_set_gmii_speed(struct udevice *dev) 455{ 456 struct eqos_priv *eqos = dev_get_priv(dev); 457 458 debug("%s(dev=%p):\n", __func__, dev); 459 460 clrbits_le32(&eqos->mac_regs->configuration, 461 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 462 463 return 0; 464} 465 466static int eqos_set_mii_speed_100(struct udevice *dev) 467{ 468 struct eqos_priv *eqos = dev_get_priv(dev); 469 470 debug("%s(dev=%p):\n", __func__, dev); 471 472 setbits_le32(&eqos->mac_regs->configuration, 473 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 474 475 return 0; 476} 477 478static int eqos_set_mii_speed_10(struct udevice *dev) 479{ 480 struct eqos_priv *eqos = dev_get_priv(dev); 481 482 debug("%s(dev=%p):\n", __func__, dev); 483 484 clrsetbits_le32(&eqos->mac_regs->configuration, 485 EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS); 486 487 return 0; 488} 489 490static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev) 491{ 492#ifdef CONFIG_CLK 493 struct eqos_priv *eqos = dev_get_priv(dev); 494 ulong rate; 495 int ret; 496 497 debug("%s(dev=%p):\n", __func__, dev); 498 499 switch (eqos->phy->speed) { 500 case SPEED_1000: 501 rate = 125 * 1000 * 1000; 502 break; 503 case SPEED_100: 504 rate = 25 * 1000 * 1000; 505 break; 506 case SPEED_10: 507 rate = 2.5 * 1000 * 1000; 508 break; 509 default: 510 pr_err("invalid speed %d\n", eqos->phy->speed); 511 return -EINVAL; 512 } 513 514 ret = clk_set_rate(&eqos->clk_tx, rate); 515 if (ret < 0) { 516 pr_err("clk_set_rate(tx_clk, %lu) failed: %d\n", rate, ret); 517 return ret; 518 } 519#endif 520 521 return 0; 522} 523 524static int eqos_adjust_link(struct udevice *dev) 525{ 526 struct eqos_priv *eqos = dev_get_priv(dev); 527 int ret; 528 bool en_calibration; 529 530 debug("%s(dev=%p):\n", __func__, dev); 531 532 if (eqos->phy->duplex) 533 ret = eqos_set_full_duplex(dev); 534 else 535 ret = eqos_set_half_duplex(dev); 536 if (ret < 0) { 537 pr_err("eqos_set_*_duplex() failed: %d\n", ret); 538 return ret; 539 } 540 541 switch (eqos->phy->speed) { 542 case SPEED_1000: 543 en_calibration = true; 544 ret = eqos_set_gmii_speed(dev); 545 break; 546 case SPEED_100: 547 en_calibration = true; 548 ret = eqos_set_mii_speed_100(dev); 549 break; 550 case SPEED_10: 551 en_calibration = false; 552 ret = eqos_set_mii_speed_10(dev); 553 break; 554 default: 555 pr_err("invalid speed %d\n", eqos->phy->speed); 556 return -EINVAL; 557 } 558 if (ret < 0) { 559 pr_err("eqos_set_*mii_speed*() failed: %d\n", ret); 560 return ret; 561 } 562 563 if (en_calibration) { 564 ret = eqos->config->ops->eqos_calibrate_pads(dev); 565 if (ret < 0) { 566 pr_err("eqos_calibrate_pads() failed: %d\n", 567 ret); 568 return ret; 569 } 570 } else { 571 ret = eqos->config->ops->eqos_disable_calibration(dev); 572 if (ret < 0) { 573 pr_err("eqos_disable_calibration() failed: %d\n", 574 ret); 575 return ret; 576 } 577 } 578 ret = eqos->config->ops->eqos_set_tx_clk_speed(dev); 579 if (ret < 0) { 580 pr_err("eqos_set_tx_clk_speed() failed: %d\n", ret); 581 return ret; 582 } 583 584 return 0; 585} 586 587static int eqos_write_hwaddr(struct udevice *dev) 588{ 589 struct eth_pdata *plat = dev_get_plat(dev); 590 struct eqos_priv *eqos = dev_get_priv(dev); 591 uint32_t val; 592 593 /* 594 * This function may be called before start() or after stop(). At that 595 * time, on at least some configurations of the EQoS HW, all clocks to 596 * the EQoS HW block will be stopped, and a reset signal applied. If 597 * any register access is attempted in this state, bus timeouts or CPU 598 * hangs may occur. This check prevents that. 599 * 600 * A simple solution to this problem would be to not implement 601 * write_hwaddr(), since start() always writes the MAC address into HW 602 * anyway. However, it is desirable to implement write_hwaddr() to 603 * support the case of SW that runs subsequent to U-Boot which expects 604 * the MAC address to already be programmed into the EQoS registers, 605 * which must happen irrespective of whether the U-Boot user (or 606 * scripts) actually made use of the EQoS device, and hence 607 * irrespective of whether start() was ever called. 608 * 609 * Note that this requirement by subsequent SW is not valid for 610 * Tegra186, and is likely not valid for any non-PCI instantiation of 611 * the EQoS HW block. This function is implemented solely as 612 * future-proofing with the expectation the driver will eventually be 613 * ported to some system where the expectation above is true. 614 */ 615 if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok) 616 return 0; 617 618 /* Update the MAC address */ 619 val = (plat->enetaddr[5] << 8) | 620 (plat->enetaddr[4]); 621 writel(val, &eqos->mac_regs->address0_high); 622 val = (plat->enetaddr[3] << 24) | 623 (plat->enetaddr[2] << 16) | 624 (plat->enetaddr[1] << 8) | 625 (plat->enetaddr[0]); 626 writel(val, &eqos->mac_regs->address0_low); 627 628 return 0; 629} 630 631static int eqos_read_rom_hwaddr(struct udevice *dev) 632{ 633 struct eth_pdata *pdata = dev_get_plat(dev); 634 struct eqos_priv *eqos = dev_get_priv(dev); 635 int ret; 636 637 ret = eqos->config->ops->eqos_get_enetaddr(dev); 638 if (ret < 0) 639 return ret; 640 641 return !is_valid_ethaddr(pdata->enetaddr); 642} 643 644static int eqos_get_phy_addr(struct eqos_priv *priv, struct udevice *dev) 645{ 646 struct ofnode_phandle_args phandle_args; 647 int reg; 648 649 if (dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0, 650 &phandle_args)) { 651 debug("Failed to find phy-handle"); 652 return -ENODEV; 653 } 654 655 priv->phy_of_node = phandle_args.node; 656 657 reg = ofnode_read_u32_default(phandle_args.node, "reg", 0); 658 659 return reg; 660} 661 662static int eqos_start(struct udevice *dev) 663{ 664 struct eqos_priv *eqos = dev_get_priv(dev); 665 int ret, i; 666 ulong rate; 667 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl; 668 ulong last_rx_desc; 669 ulong desc_pad; 670 ulong addr64; 671 672 debug("%s(dev=%p):\n", __func__, dev); 673 674 eqos->tx_desc_idx = 0; 675 eqos->rx_desc_idx = 0; 676 677 ret = eqos->config->ops->eqos_start_resets(dev); 678 if (ret < 0) { 679 pr_err("eqos_start_resets() failed: %d\n", ret); 680 goto err; 681 } 682 683 udelay(10); 684 685 eqos->reg_access_ok = true; 686 687 /* 688 * Assert the SWR first, the actually reset the MAC and to latch in 689 * e.g. i.MX8M Plus GPR[1] content, which selects interface mode. 690 */ 691 setbits_le32(&eqos->dma_regs->mode, EQOS_DMA_MODE_SWR); 692 693 ret = wait_for_bit_le32(&eqos->dma_regs->mode, 694 EQOS_DMA_MODE_SWR, false, 695 eqos->config->swr_wait, false); 696 if (ret) { 697 pr_err("EQOS_DMA_MODE_SWR stuck\n"); 698 goto err_stop_resets; 699 } 700 701 ret = eqos->config->ops->eqos_calibrate_pads(dev); 702 if (ret < 0) { 703 pr_err("eqos_calibrate_pads() failed: %d\n", ret); 704 goto err_stop_resets; 705 } 706 707 if (eqos->config->ops->eqos_get_tick_clk_rate) { 708 rate = eqos->config->ops->eqos_get_tick_clk_rate(dev); 709 710 val = (rate / 1000000) - 1; 711 writel(val, &eqos->mac_regs->us_tic_counter); 712 } 713 714 /* 715 * if PHY was already connected and configured, 716 * don't need to reconnect/reconfigure again 717 */ 718 if (!eqos->phy) { 719 int addr = -1; 720 ofnode fixed_node; 721 722 if (IS_ENABLED(CONFIG_PHY_FIXED)) { 723 fixed_node = ofnode_find_subnode(dev_ofnode(dev), 724 "fixed-link"); 725 if (ofnode_valid(fixed_node)) 726 eqos->phy = fixed_phy_create(dev_ofnode(dev)); 727 } 728 729 if (!eqos->phy) { 730 addr = eqos_get_phy_addr(eqos, dev); 731 eqos->phy = phy_connect(eqos->mii, addr, dev, 732 eqos->config->interface(dev)); 733 } 734 735 if (!eqos->phy) { 736 pr_err("phy_connect() failed\n"); 737 ret = -ENODEV; 738 goto err_stop_resets; 739 } 740 741 if (eqos->max_speed) { 742 ret = phy_set_supported(eqos->phy, eqos->max_speed); 743 if (ret) { 744 pr_err("phy_set_supported() failed: %d\n", ret); 745 goto err_shutdown_phy; 746 } 747 } 748 749 eqos->phy->node = eqos->phy_of_node; 750 ret = phy_config(eqos->phy); 751 if (ret < 0) { 752 pr_err("phy_config() failed: %d\n", ret); 753 goto err_shutdown_phy; 754 } 755 } 756 757 ret = phy_startup(eqos->phy); 758 if (ret < 0) { 759 pr_err("phy_startup() failed: %d\n", ret); 760 goto err_shutdown_phy; 761 } 762 763 if (!eqos->phy->link) { 764 pr_err("No link\n"); 765 ret = -EAGAIN; 766 goto err_shutdown_phy; 767 } 768 769 ret = eqos_adjust_link(dev); 770 if (ret < 0) { 771 pr_err("eqos_adjust_link() failed: %d\n", ret); 772 goto err_shutdown_phy; 773 } 774 775 /* Configure MTL */ 776 777 /* Enable Store and Forward mode for TX */ 778 /* Program Tx operating mode */ 779 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 780 EQOS_MTL_TXQ0_OPERATION_MODE_TSF | 781 (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED << 782 EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT)); 783 784 /* Transmit Queue weight */ 785 writel(0x10, &eqos->mtl_regs->txq0_quantum_weight); 786 787 /* Enable Store and Forward mode for RX, since no jumbo frame */ 788 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 789 EQOS_MTL_RXQ0_OPERATION_MODE_RSF); 790 791 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */ 792 val = readl(&eqos->mac_regs->hw_feature1); 793 tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) & 794 EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK; 795 rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) & 796 EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK; 797 798 /* r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting */ 799 tx_fifo_sz = 128 << tx_fifo_sz; 800 rx_fifo_sz = 128 << rx_fifo_sz; 801 802 /* Allow platform to override TX/RX fifo size */ 803 if (eqos->tx_fifo_sz) 804 tx_fifo_sz = eqos->tx_fifo_sz; 805 if (eqos->rx_fifo_sz) 806 rx_fifo_sz = eqos->rx_fifo_sz; 807 808 /* r/tqs is encoded as (n / 256) - 1 */ 809 tqs = tx_fifo_sz / 256 - 1; 810 rqs = rx_fifo_sz / 256 - 1; 811 812 clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode, 813 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK << 814 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT, 815 tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT); 816 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 817 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK << 818 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT, 819 rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT); 820 821 /* Flow control used only if each channel gets 4KB or more FIFO */ 822 if (rqs >= ((4096 / 256) - 1)) { 823 u32 rfd, rfa; 824 825 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 826 EQOS_MTL_RXQ0_OPERATION_MODE_EHFC); 827 828 /* 829 * Set Threshold for Activating Flow Contol space for min 2 830 * frames ie, (1500 * 1) = 1500 bytes. 831 * 832 * Set Threshold for Deactivating Flow Contol for space of 833 * min 1 frame (frame size 1500bytes) in receive fifo 834 */ 835 if (rqs == ((4096 / 256) - 1)) { 836 /* 837 * This violates the above formula because of FIFO size 838 * limit therefore overflow may occur inspite of this. 839 */ 840 rfd = 0x3; /* Full-3K */ 841 rfa = 0x1; /* Full-1.5K */ 842 } else if (rqs == ((8192 / 256) - 1)) { 843 rfd = 0x6; /* Full-4K */ 844 rfa = 0xa; /* Full-6K */ 845 } else if (rqs == ((16384 / 256) - 1)) { 846 rfd = 0x6; /* Full-4K */ 847 rfa = 0x12; /* Full-10K */ 848 } else { 849 rfd = 0x6; /* Full-4K */ 850 rfa = 0x1E; /* Full-16K */ 851 } 852 853 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 854 (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK << 855 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 856 (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK << 857 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT), 858 (rfd << 859 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 860 (rfa << 861 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT)); 862 } 863 864 /* Configure MAC */ 865 866 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 867 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 868 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 869 eqos->config->config_mac << 870 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 871 872 /* Multicast and Broadcast Queue Enable */ 873 setbits_le32(&eqos->mac_regs->unused_0a4, 874 0x00100000); 875 /* enable promise mode */ 876 setbits_le32(&eqos->mac_regs->unused_004[1], 877 0x1); 878 879 /* Set TX flow control parameters */ 880 /* Set Pause Time */ 881 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 882 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT); 883 /* Assign priority for TX flow control */ 884 clrbits_le32(&eqos->mac_regs->txq_prty_map0, 885 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK << 886 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT); 887 /* Assign priority for RX flow control */ 888 clrbits_le32(&eqos->mac_regs->rxq_ctrl2, 889 EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK << 890 EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT); 891 /* Enable flow control */ 892 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 893 EQOS_MAC_Q0_TX_FLOW_CTRL_TFE); 894 setbits_le32(&eqos->mac_regs->rx_flow_ctrl, 895 EQOS_MAC_RX_FLOW_CTRL_RFE); 896 897 clrsetbits_le32(&eqos->mac_regs->configuration, 898 EQOS_MAC_CONFIGURATION_GPSLCE | 899 EQOS_MAC_CONFIGURATION_WD | 900 EQOS_MAC_CONFIGURATION_JD | 901 EQOS_MAC_CONFIGURATION_JE, 902 EQOS_MAC_CONFIGURATION_CST | 903 EQOS_MAC_CONFIGURATION_ACS); 904 905 eqos_write_hwaddr(dev); 906 907 /* Configure DMA */ 908 909 /* Enable OSP mode */ 910 setbits_le32(&eqos->dma_regs->ch0_tx_control, 911 EQOS_DMA_CH0_TX_CONTROL_OSP); 912 913 /* RX buffer size. Must be a multiple of bus width */ 914 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 915 EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK << 916 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT, 917 EQOS_MAX_PACKET_SIZE << 918 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT); 919 920 desc_pad = (eqos->desc_size - sizeof(struct eqos_desc)) / 921 eqos->config->axi_bus_width; 922 923 setbits_le32(&eqos->dma_regs->ch0_control, 924 EQOS_DMA_CH0_CONTROL_PBLX8 | 925 (desc_pad << EQOS_DMA_CH0_CONTROL_DSL_SHIFT)); 926 927 /* 928 * Burst length must be < 1/2 FIFO size. 929 * FIFO size in tqs is encoded as (n / 256) - 1. 930 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes. 931 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1. 932 */ 933 pbl = tqs + 1; 934 if (pbl > 32) 935 pbl = 32; 936 clrsetbits_le32(&eqos->dma_regs->ch0_tx_control, 937 EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK << 938 EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT, 939 pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT); 940 941 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 942 EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK << 943 EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT, 944 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT); 945 946 /* DMA performance configuration */ 947 val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) | 948 EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 | 949 EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4; 950 writel(val, &eqos->dma_regs->sysbus_mode); 951 952 /* Set up descriptors */ 953 954 memset(eqos->tx_descs, 0, eqos->desc_size * EQOS_DESCRIPTORS_TX); 955 memset(eqos->rx_descs, 0, eqos->desc_size * EQOS_DESCRIPTORS_RX); 956 957 for (i = 0; i < EQOS_DESCRIPTORS_TX; i++) { 958 struct eqos_desc *tx_desc = eqos_get_desc(eqos, i, false); 959 eqos->config->ops->eqos_flush_desc(tx_desc); 960 } 961 962 for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) { 963 struct eqos_desc *rx_desc = eqos_get_desc(eqos, i, true); 964 965 addr64 = (ulong)(eqos->rx_dma_buf + (i * EQOS_MAX_PACKET_SIZE)); 966 rx_desc->des0 = lower_32_bits(addr64); 967 rx_desc->des1 = upper_32_bits(addr64); 968 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 969 mb(); 970 eqos->config->ops->eqos_flush_desc(rx_desc); 971 eqos->config->ops->eqos_inval_buffer((void *)addr64, EQOS_MAX_PACKET_SIZE); 972 } 973 974 addr64 = (ulong)eqos_get_desc(eqos, 0, false); 975 writel(upper_32_bits(addr64), &eqos->dma_regs->ch0_txdesc_list_haddress); 976 writel(lower_32_bits(addr64), &eqos->dma_regs->ch0_txdesc_list_address); 977 writel(EQOS_DESCRIPTORS_TX - 1, 978 &eqos->dma_regs->ch0_txdesc_ring_length); 979 980 addr64 = (ulong)eqos_get_desc(eqos, 0, true); 981 writel(upper_32_bits(addr64), &eqos->dma_regs->ch0_rxdesc_list_haddress); 982 writel(lower_32_bits(addr64), &eqos->dma_regs->ch0_rxdesc_list_address); 983 writel(EQOS_DESCRIPTORS_RX - 1, 984 &eqos->dma_regs->ch0_rxdesc_ring_length); 985 986 /* Enable everything */ 987 setbits_le32(&eqos->dma_regs->ch0_tx_control, 988 EQOS_DMA_CH0_TX_CONTROL_ST); 989 setbits_le32(&eqos->dma_regs->ch0_rx_control, 990 EQOS_DMA_CH0_RX_CONTROL_SR); 991 setbits_le32(&eqos->mac_regs->configuration, 992 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 993 994 /* TX tail pointer not written until we need to TX a packet */ 995 /* 996 * Point RX tail pointer at last descriptor. Ideally, we'd point at the 997 * first descriptor, implying all descriptors were available. However, 998 * that's not distinguishable from none of the descriptors being 999 * available. 1000 */ 1001 last_rx_desc = (ulong)eqos_get_desc(eqos, EQOS_DESCRIPTORS_RX - 1, true); 1002 writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1003 1004 eqos->started = true; 1005 1006 debug("%s: OK\n", __func__); 1007 return 0; 1008 1009err_shutdown_phy: 1010 phy_shutdown(eqos->phy); 1011err_stop_resets: 1012 eqos->config->ops->eqos_stop_resets(dev); 1013err: 1014 pr_err("FAILED: %d\n", ret); 1015 return ret; 1016} 1017 1018static void eqos_stop(struct udevice *dev) 1019{ 1020 struct eqos_priv *eqos = dev_get_priv(dev); 1021 int i; 1022 1023 debug("%s(dev=%p):\n", __func__, dev); 1024 1025 if (!eqos->started) 1026 return; 1027 eqos->started = false; 1028 eqos->reg_access_ok = false; 1029 1030 /* Disable TX DMA */ 1031 clrbits_le32(&eqos->dma_regs->ch0_tx_control, 1032 EQOS_DMA_CH0_TX_CONTROL_ST); 1033 1034 /* Wait for TX all packets to drain out of MTL */ 1035 for (i = 0; i < 1000000; i++) { 1036 u32 val = readl(&eqos->mtl_regs->txq0_debug); 1037 u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) & 1038 EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK; 1039 u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS; 1040 if ((trcsts != 1) && (!txqsts)) 1041 break; 1042 } 1043 1044 /* Turn off MAC TX and RX */ 1045 clrbits_le32(&eqos->mac_regs->configuration, 1046 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1047 1048 /* Wait for all RX packets to drain out of MTL */ 1049 for (i = 0; i < 1000000; i++) { 1050 u32 val = readl(&eqos->mtl_regs->rxq0_debug); 1051 u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) & 1052 EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK; 1053 u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) & 1054 EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK; 1055 if ((!prxq) && (!rxqsts)) 1056 break; 1057 } 1058 1059 /* Turn off RX DMA */ 1060 clrbits_le32(&eqos->dma_regs->ch0_rx_control, 1061 EQOS_DMA_CH0_RX_CONTROL_SR); 1062 1063 if (eqos->phy) { 1064 phy_shutdown(eqos->phy); 1065 } 1066 eqos->config->ops->eqos_stop_resets(dev); 1067 1068 debug("%s: OK\n", __func__); 1069} 1070 1071static int eqos_send(struct udevice *dev, void *packet, int length) 1072{ 1073 struct eqos_priv *eqos = dev_get_priv(dev); 1074 struct eqos_desc *tx_desc; 1075 int i; 1076 1077 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet, 1078 length); 1079 1080 memcpy(eqos->tx_dma_buf, packet, length); 1081 eqos->config->ops->eqos_flush_buffer(eqos->tx_dma_buf, length); 1082 1083 tx_desc = eqos_get_desc(eqos, eqos->tx_desc_idx, false); 1084 eqos->tx_desc_idx++; 1085 eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX; 1086 1087 tx_desc->des0 = lower_32_bits((ulong)eqos->tx_dma_buf); 1088 tx_desc->des1 = upper_32_bits((ulong)eqos->tx_dma_buf); 1089 tx_desc->des2 = length; 1090 /* 1091 * Make sure that if HW sees the _OWN write below, it will see all the 1092 * writes to the rest of the descriptor too. 1093 */ 1094 mb(); 1095 tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length; 1096 eqos->config->ops->eqos_flush_desc(tx_desc); 1097 1098 writel((ulong)eqos_get_desc(eqos, eqos->tx_desc_idx, false), 1099 &eqos->dma_regs->ch0_txdesc_tail_pointer); 1100 1101 for (i = 0; i < 1000000; i++) { 1102 eqos->config->ops->eqos_inval_desc(tx_desc); 1103 if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN)) 1104 return 0; 1105 udelay(1); 1106 } 1107 1108 debug("%s: TX timeout\n", __func__); 1109 1110 return -ETIMEDOUT; 1111} 1112 1113static int eqos_recv(struct udevice *dev, int flags, uchar **packetp) 1114{ 1115 struct eqos_priv *eqos = dev_get_priv(dev); 1116 struct eqos_desc *rx_desc; 1117 int length; 1118 1119 rx_desc = eqos_get_desc(eqos, eqos->rx_desc_idx, true); 1120 eqos->config->ops->eqos_inval_desc(rx_desc); 1121 if (rx_desc->des3 & EQOS_DESC3_OWN) 1122 return -EAGAIN; 1123 1124 debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags); 1125 1126 *packetp = eqos->rx_dma_buf + 1127 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1128 length = rx_desc->des3 & 0x7fff; 1129 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length); 1130 1131 eqos->config->ops->eqos_inval_buffer(*packetp, length); 1132 1133 return length; 1134} 1135 1136static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length) 1137{ 1138 struct eqos_priv *eqos = dev_get_priv(dev); 1139 u32 idx, idx_mask = eqos->desc_per_cacheline - 1; 1140 uchar *packet_expected; 1141 struct eqos_desc *rx_desc = NULL; 1142 1143 debug("%s(packet=%p, length=%d)\n", __func__, packet, length); 1144 1145 packet_expected = eqos->rx_dma_buf + 1146 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1147 if (packet != packet_expected) { 1148 debug("%s: Unexpected packet (expected %p)\n", __func__, 1149 packet_expected); 1150 return -EINVAL; 1151 } 1152 1153 eqos->config->ops->eqos_inval_buffer(packet, length); 1154 1155 if ((eqos->rx_desc_idx & idx_mask) == idx_mask) { 1156 for (idx = eqos->rx_desc_idx - idx_mask; 1157 idx <= eqos->rx_desc_idx; 1158 idx++) { 1159 ulong addr64; 1160 1161 rx_desc = eqos_get_desc(eqos, idx, true); 1162 rx_desc->des0 = 0; 1163 rx_desc->des1 = 0; 1164 mb(); 1165 eqos->config->ops->eqos_flush_desc(rx_desc); 1166 eqos->config->ops->eqos_inval_buffer(packet, length); 1167 addr64 = (ulong)(eqos->rx_dma_buf + (idx * EQOS_MAX_PACKET_SIZE)); 1168 rx_desc->des0 = lower_32_bits(addr64); 1169 rx_desc->des1 = upper_32_bits(addr64); 1170 rx_desc->des2 = 0; 1171 /* 1172 * Make sure that if HW sees the _OWN write below, 1173 * it will see all the writes to the rest of the 1174 * descriptor too. 1175 */ 1176 mb(); 1177 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1178 eqos->config->ops->eqos_flush_desc(rx_desc); 1179 } 1180 writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1181 } 1182 1183 eqos->rx_desc_idx++; 1184 eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX; 1185 1186 return 0; 1187} 1188 1189static int eqos_probe_resources_core(struct udevice *dev) 1190{ 1191 struct eqos_priv *eqos = dev_get_priv(dev); 1192 unsigned int desc_step; 1193 int ret; 1194 1195 debug("%s(dev=%p):\n", __func__, dev); 1196 1197 /* Maximum distance between neighboring descriptors, in Bytes. */ 1198 desc_step = sizeof(struct eqos_desc) + 1199 EQOS_DMA_CH0_CONTROL_DSL_MASK * eqos->config->axi_bus_width; 1200 if (desc_step < ARCH_DMA_MINALIGN) { 1201 /* 1202 * The EQoS hardware implementation cannot place one descriptor 1203 * per cacheline, it is necessary to place multiple descriptors 1204 * per cacheline in memory and do cache management carefully. 1205 */ 1206 eqos->desc_size = BIT(fls(desc_step) - 1); 1207 } else { 1208 eqos->desc_size = ALIGN(sizeof(struct eqos_desc), 1209 (unsigned int)ARCH_DMA_MINALIGN); 1210 } 1211 eqos->desc_per_cacheline = ARCH_DMA_MINALIGN / eqos->desc_size; 1212 1213 eqos->tx_descs = eqos_alloc_descs(eqos, EQOS_DESCRIPTORS_TX); 1214 if (!eqos->tx_descs) { 1215 debug("%s: eqos_alloc_descs(tx) failed\n", __func__); 1216 ret = -ENOMEM; 1217 goto err; 1218 } 1219 1220 eqos->rx_descs = eqos_alloc_descs(eqos, EQOS_DESCRIPTORS_RX); 1221 if (!eqos->rx_descs) { 1222 debug("%s: eqos_alloc_descs(rx) failed\n", __func__); 1223 ret = -ENOMEM; 1224 goto err_free_tx_descs; 1225 } 1226 1227 eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE); 1228 if (!eqos->tx_dma_buf) { 1229 debug("%s: memalign(tx_dma_buf) failed\n", __func__); 1230 ret = -ENOMEM; 1231 goto err_free_descs; 1232 } 1233 debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf); 1234 1235 eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE); 1236 if (!eqos->rx_dma_buf) { 1237 debug("%s: memalign(rx_dma_buf) failed\n", __func__); 1238 ret = -ENOMEM; 1239 goto err_free_tx_dma_buf; 1240 } 1241 debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf); 1242 1243 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf, 1244 EQOS_MAX_PACKET_SIZE * EQOS_DESCRIPTORS_RX); 1245 1246 debug("%s: OK\n", __func__); 1247 return 0; 1248 1249err_free_tx_dma_buf: 1250 free(eqos->tx_dma_buf); 1251err_free_descs: 1252 eqos_free_descs(eqos->rx_descs); 1253err_free_tx_descs: 1254 eqos_free_descs(eqos->tx_descs); 1255err: 1256 1257 debug("%s: returns %d\n", __func__, ret); 1258 return ret; 1259} 1260 1261static int eqos_remove_resources_core(struct udevice *dev) 1262{ 1263 struct eqos_priv *eqos = dev_get_priv(dev); 1264 1265 debug("%s(dev=%p):\n", __func__, dev); 1266 1267 free(eqos->rx_dma_buf); 1268 free(eqos->tx_dma_buf); 1269 eqos_free_descs(eqos->rx_descs); 1270 eqos_free_descs(eqos->tx_descs); 1271 1272 debug("%s: OK\n", __func__); 1273 return 0; 1274} 1275 1276static int eqos_probe_resources_tegra186(struct udevice *dev) 1277{ 1278 struct eqos_priv *eqos = dev_get_priv(dev); 1279 int ret; 1280 1281 debug("%s(dev=%p):\n", __func__, dev); 1282 1283 ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl); 1284 if (ret) { 1285 pr_err("reset_get_by_name(rst) failed: %d\n", ret); 1286 return ret; 1287 } 1288 1289 ret = gpio_request_by_name(dev, "phy-reset-gpios", 0, 1290 &eqos->phy_reset_gpio, 1291 GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE); 1292 if (ret) { 1293 pr_err("gpio_request_by_name(phy reset) failed: %d\n", ret); 1294 goto err_free_reset_eqos; 1295 } 1296 1297 ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus); 1298 if (ret) { 1299 pr_err("clk_get_by_name(slave_bus) failed: %d\n", ret); 1300 goto err_free_gpio_phy_reset; 1301 } 1302 1303 ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus); 1304 if (ret) { 1305 pr_err("clk_get_by_name(master_bus) failed: %d\n", ret); 1306 goto err_free_gpio_phy_reset; 1307 } 1308 1309 ret = clk_get_by_name(dev, "rx", &eqos->clk_rx); 1310 if (ret) { 1311 pr_err("clk_get_by_name(rx) failed: %d\n", ret); 1312 goto err_free_gpio_phy_reset; 1313 } 1314 1315 ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref); 1316 if (ret) { 1317 pr_err("clk_get_by_name(ptp_ref) failed: %d\n", ret); 1318 goto err_free_gpio_phy_reset; 1319 } 1320 1321 ret = clk_get_by_name(dev, "tx", &eqos->clk_tx); 1322 if (ret) { 1323 pr_err("clk_get_by_name(tx) failed: %d\n", ret); 1324 goto err_free_gpio_phy_reset; 1325 } 1326 1327 debug("%s: OK\n", __func__); 1328 return 0; 1329 1330err_free_gpio_phy_reset: 1331 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1332err_free_reset_eqos: 1333 reset_free(&eqos->reset_ctl); 1334 1335 debug("%s: returns %d\n", __func__, ret); 1336 return ret; 1337} 1338 1339static phy_interface_t eqos_get_interface_tegra186(const struct udevice *dev) 1340{ 1341 return PHY_INTERFACE_MODE_MII; 1342} 1343 1344static int eqos_remove_resources_tegra186(struct udevice *dev) 1345{ 1346 struct eqos_priv *eqos = dev_get_priv(dev); 1347 1348 debug("%s(dev=%p):\n", __func__, dev); 1349 1350 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1351 reset_free(&eqos->reset_ctl); 1352 1353 debug("%s: OK\n", __func__); 1354 return 0; 1355} 1356 1357static int eqos_probe(struct udevice *dev) 1358{ 1359 struct eqos_priv *eqos = dev_get_priv(dev); 1360 int ret; 1361 1362 debug("%s(dev=%p):\n", __func__, dev); 1363 1364 eqos->dev = dev; 1365 eqos->config = (void *)dev_get_driver_data(dev); 1366 1367 eqos->regs = dev_read_addr(dev); 1368 if (eqos->regs == FDT_ADDR_T_NONE) { 1369 pr_err("dev_read_addr() failed\n"); 1370 return -ENODEV; 1371 } 1372 eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE); 1373 eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE); 1374 eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE); 1375 eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE); 1376 1377 eqos->max_speed = dev_read_u32_default(dev, "max-speed", 0); 1378 1379 ret = eqos_probe_resources_core(dev); 1380 if (ret < 0) { 1381 pr_err("eqos_probe_resources_core() failed: %d\n", ret); 1382 return ret; 1383 } 1384 1385 ret = eqos->config->ops->eqos_probe_resources(dev); 1386 if (ret < 0) { 1387 pr_err("eqos_probe_resources() failed: %d\n", ret); 1388 goto err_remove_resources_core; 1389 } 1390 1391 ret = eqos->config->ops->eqos_start_clks(dev); 1392 if (ret < 0) { 1393 pr_err("eqos_start_clks() failed: %d\n", ret); 1394 goto err_remove_resources_tegra; 1395 } 1396 1397#ifdef CONFIG_DM_ETH_PHY 1398 eqos->mii = eth_phy_get_mdio_bus(dev); 1399#endif 1400 if (!eqos->mii) { 1401 eqos->mii = mdio_alloc(); 1402 if (!eqos->mii) { 1403 pr_err("mdio_alloc() failed\n"); 1404 ret = -ENOMEM; 1405 goto err_stop_clks; 1406 } 1407 eqos->mii->read = eqos_mdio_read; 1408 eqos->mii->write = eqos_mdio_write; 1409 eqos->mii->priv = eqos; 1410 strcpy(eqos->mii->name, dev->name); 1411 1412 ret = mdio_register(eqos->mii); 1413 if (ret < 0) { 1414 pr_err("mdio_register() failed: %d\n", ret); 1415 goto err_free_mdio; 1416 } 1417 } 1418 1419#ifdef CONFIG_DM_ETH_PHY 1420 eth_phy_set_mdio_bus(dev, eqos->mii); 1421#endif 1422 1423 debug("%s: OK\n", __func__); 1424 return 0; 1425 1426err_free_mdio: 1427 mdio_free(eqos->mii); 1428err_stop_clks: 1429 eqos->config->ops->eqos_stop_clks(dev); 1430err_remove_resources_tegra: 1431 eqos->config->ops->eqos_remove_resources(dev); 1432err_remove_resources_core: 1433 eqos_remove_resources_core(dev); 1434 1435 debug("%s: returns %d\n", __func__, ret); 1436 return ret; 1437} 1438 1439static int eqos_remove(struct udevice *dev) 1440{ 1441 struct eqos_priv *eqos = dev_get_priv(dev); 1442 1443 debug("%s(dev=%p):\n", __func__, dev); 1444 1445 mdio_unregister(eqos->mii); 1446 mdio_free(eqos->mii); 1447 eqos->config->ops->eqos_stop_clks(dev); 1448 eqos->config->ops->eqos_remove_resources(dev); 1449 1450 eqos_remove_resources_core(dev); 1451 1452 debug("%s: OK\n", __func__); 1453 return 0; 1454} 1455 1456int eqos_null_ops(struct udevice *dev) 1457{ 1458 return 0; 1459} 1460 1461static const struct eth_ops eqos_ops = { 1462 .start = eqos_start, 1463 .stop = eqos_stop, 1464 .send = eqos_send, 1465 .recv = eqos_recv, 1466 .free_pkt = eqos_free_pkt, 1467 .write_hwaddr = eqos_write_hwaddr, 1468 .read_rom_hwaddr = eqos_read_rom_hwaddr, 1469}; 1470 1471static struct eqos_ops eqos_tegra186_ops = { 1472 .eqos_inval_desc = eqos_inval_desc_generic, 1473 .eqos_flush_desc = eqos_flush_desc_generic, 1474 .eqos_inval_buffer = eqos_inval_buffer_tegra186, 1475 .eqos_flush_buffer = eqos_flush_buffer_tegra186, 1476 .eqos_probe_resources = eqos_probe_resources_tegra186, 1477 .eqos_remove_resources = eqos_remove_resources_tegra186, 1478 .eqos_stop_resets = eqos_stop_resets_tegra186, 1479 .eqos_start_resets = eqos_start_resets_tegra186, 1480 .eqos_stop_clks = eqos_stop_clks_tegra186, 1481 .eqos_start_clks = eqos_start_clks_tegra186, 1482 .eqos_calibrate_pads = eqos_calibrate_pads_tegra186, 1483 .eqos_disable_calibration = eqos_disable_calibration_tegra186, 1484 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_tegra186, 1485 .eqos_get_enetaddr = eqos_null_ops, 1486 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_tegra186 1487}; 1488 1489static const struct eqos_config __maybe_unused eqos_tegra186_config = { 1490 .reg_access_always_ok = false, 1491 .mdio_wait = 10, 1492 .swr_wait = 10, 1493 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB, 1494 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35, 1495 .axi_bus_width = EQOS_AXI_WIDTH_128, 1496 .interface = eqos_get_interface_tegra186, 1497 .ops = &eqos_tegra186_ops 1498}; 1499 1500static const struct udevice_id eqos_ids[] = { 1501#if IS_ENABLED(CONFIG_DWC_ETH_QOS_TEGRA186) 1502 { 1503 .compatible = "nvidia,tegra186-eqos", 1504 .data = (ulong)&eqos_tegra186_config 1505 }, 1506#endif 1507#if IS_ENABLED(CONFIG_DWC_ETH_QOS_STM32) 1508 { 1509 .compatible = "st,stm32mp13-dwmac", 1510 .data = (ulong)&eqos_stm32mp13_config 1511 }, 1512 { 1513 .compatible = "st,stm32mp1-dwmac", 1514 .data = (ulong)&eqos_stm32mp15_config 1515 }, 1516#endif 1517#if IS_ENABLED(CONFIG_DWC_ETH_QOS_IMX) 1518 { 1519 .compatible = "nxp,imx8mp-dwmac-eqos", 1520 .data = (ulong)&eqos_imx_config 1521 }, 1522 { 1523 .compatible = "nxp,imx93-dwmac-eqos", 1524 .data = (ulong)&eqos_imx_config 1525 }, 1526#endif 1527#if IS_ENABLED(CONFIG_DWC_ETH_QOS_ROCKCHIP) 1528 { 1529 .compatible = "rockchip,rk3568-gmac", 1530 .data = (ulong)&eqos_rockchip_config 1531 }, 1532 { 1533 .compatible = "rockchip,rk3588-gmac", 1534 .data = (ulong)&eqos_rockchip_config 1535 }, 1536#endif 1537#if IS_ENABLED(CONFIG_DWC_ETH_QOS_QCOM) 1538 { 1539 .compatible = "qcom,qcs404-ethqos", 1540 .data = (ulong)&eqos_qcom_config 1541 }, 1542#endif 1543#if IS_ENABLED(CONFIG_DWC_ETH_QOS_STARFIVE) 1544 { 1545 .compatible = "starfive,jh7110-dwmac", 1546 .data = (ulong)&eqos_jh7110_config 1547 }, 1548#endif 1549 { } 1550}; 1551 1552U_BOOT_DRIVER(eth_eqos) = { 1553 .name = "eth_eqos", 1554 .id = UCLASS_ETH, 1555 .of_match = of_match_ptr(eqos_ids), 1556 .probe = eqos_probe, 1557 .remove = eqos_remove, 1558 .ops = &eqos_ops, 1559 .priv_auto = sizeof(struct eqos_priv), 1560 .plat_auto = sizeof(struct eth_pdata), 1561}; 1562