1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * Copyright 2014-2016 Freescale Semiconductor, Inc. 4 * Copyright 2017, 2023 NXP 5 */ 6 7#include <common.h> 8#include <cpu_func.h> 9#include <dm/device_compat.h> 10#include <fsl-mc/fsl_dpmac.h> 11#include <fsl-mc/ldpaa_wriop.h> 12#include <hwconfig.h> 13#include <log.h> 14#include <malloc.h> 15#include <miiphy.h> 16#include <net.h> 17#include <phy.h> 18#include <asm/io.h> 19#include <asm/types.h> 20#include <linux/bug.h> 21#include <linux/compat.h> 22#include <linux/delay.h> 23#include <asm/global_data.h> 24#include <net/ldpaa_eth.h> 25#include "ldpaa_eth.h" 26 27#ifdef CONFIG_PHYLIB 28static void init_phy(struct udevice *dev) 29{ 30 struct ldpaa_eth_priv *priv = dev_get_priv(dev); 31 32 priv->phy = dm_eth_phy_connect(dev); 33 34 if (!priv->phy) 35 return; 36 37 phy_config(priv->phy); 38} 39#endif 40 41static void ldpaa_eth_collect_dpni_stats(struct udevice *dev, u64 *data) 42{ 43 union dpni_statistics dpni_stats; 44 int dpni_stats_page_size[DPNI_STATISTICS_CNT] = { 45 sizeof(dpni_stats.page_0), 46 sizeof(dpni_stats.page_1), 47 sizeof(dpni_stats.page_2), 48 sizeof(dpni_stats.page_3), 49 sizeof(dpni_stats.page_4), 50 sizeof(dpni_stats.page_5), 51 sizeof(dpni_stats.page_6), 52 }; 53 int j, k, num_cnt, err, i = 0; 54 55 for (j = 0; j <= 6; j++) { 56 /* We're not interested in pages 4 & 5 for now */ 57 if (j == 4 || j == 5) 58 continue; 59 err = dpni_get_statistics(dflt_mc_io, MC_CMD_NO_FLAGS, 60 dflt_dpni->dpni_handle, 61 j, 0, &dpni_stats); 62 if (err) { 63 memset(&dpni_stats, 0, sizeof(dpni_stats)); 64 printf("dpni_get_stats(%d) failed\n", j); 65 } 66 67 num_cnt = dpni_stats_page_size[j] / sizeof(u64); 68 for (k = 0; k < num_cnt; k++) 69 *(data + i++) = dpni_stats.raw.counter[k]; 70 } 71} 72 73static void ldpaa_eth_add_dpni_stats(struct udevice *dev, u64 *data) 74{ 75 struct ldpaa_eth_priv *priv = dev_get_priv(dev); 76 int i; 77 78 for (i = 0; i < LDPAA_ETH_DPNI_NUM_STATS; i++) 79 priv->dpni_stats[i] += data[i]; 80} 81 82static void ldpaa_eth_collect_dpmac_stats(struct udevice *dev, u64 *data) 83{ 84 struct ldpaa_eth_priv *priv = dev_get_priv(dev); 85 int err, i; 86 u64 value; 87 88 for (i = 0; i < LDPAA_ETH_DPMAC_NUM_STATS; i++) { 89 err = dpmac_get_counter(dflt_mc_io, MC_CMD_NO_FLAGS, 90 priv->dpmac_handle, i, 91 &value); 92 if (err) 93 printf("dpmac_get_counter(%d) failed\n", i); 94 95 *(data + i) = value; 96 } 97} 98 99static void ldpaa_eth_add_dpmac_stats(struct udevice *dev, u64 *data) 100{ 101 struct ldpaa_eth_priv *priv = dev_get_priv(dev); 102 int i; 103 104 for (i = 0; i < LDPAA_ETH_DPMAC_NUM_STATS; i++) 105 priv->dpmac_stats[i] += data[i]; 106} 107 108#ifdef DEBUG 109static void ldpaa_eth_dump_dpni_stats(struct udevice *dev, u64 *data) 110{ 111 int i; 112 113 printf("DPNI counters:\n"); 114 for (i = 0; i < LDPAA_ETH_DPNI_NUM_STATS; i++) 115 printf(" %s: %llu\n", ldpaa_eth_dpni_stat_strings[i], data[i]); 116} 117 118static void ldpaa_eth_dump_dpmac_stats(struct udevice *dev, u64 *data) 119{ 120 int i; 121 122 printf("DPMAC counters:\n"); 123 for (i = 0; i < LDPAA_ETH_DPMAC_NUM_STATS; i++) 124 printf(" %s: %llu\n", ldpaa_eth_dpmac_stat_strings[i], data[i]); 125} 126#endif 127 128static void ldpaa_eth_rx(struct ldpaa_eth_priv *priv, 129 const struct dpaa_fd *fd) 130{ 131 u64 fd_addr; 132 uint16_t fd_offset; 133 uint32_t fd_length; 134 struct ldpaa_fas *fas; 135 uint32_t status, err; 136 u32 timeo = (CONFIG_SYS_HZ * 2) / 1000; 137 u32 time_start; 138 struct qbman_release_desc releasedesc; 139 struct qbman_swp *swp = dflt_dpio->sw_portal; 140 141 fd_addr = ldpaa_fd_get_addr(fd); 142 fd_offset = ldpaa_fd_get_offset(fd); 143 fd_length = ldpaa_fd_get_len(fd); 144 145 debug("Rx frame:data addr=0x%p size=0x%x\n", (u64 *)fd_addr, fd_length); 146 147 if (fd->simple.frc & LDPAA_FD_FRC_FASV) { 148 /* Read the frame annotation status word and check for errors */ 149 fas = (struct ldpaa_fas *) 150 ((uint8_t *)(fd_addr) + 151 dflt_dpni->buf_layout.private_data_size); 152 status = le32_to_cpu(fas->status); 153 if (status & LDPAA_ETH_RX_ERR_MASK) { 154 printf("Rx frame error(s): 0x%08x\n", 155 status & LDPAA_ETH_RX_ERR_MASK); 156 goto error; 157 } else if (status & LDPAA_ETH_RX_UNSUPP_MASK) { 158 printf("Unsupported feature in bitmask: 0x%08x\n", 159 status & LDPAA_ETH_RX_UNSUPP_MASK); 160 goto error; 161 } 162 } 163 164 debug("Rx frame: To Upper layer\n"); 165 net_process_received_packet((uint8_t *)(fd_addr) + fd_offset, 166 fd_length); 167 168error: 169 flush_dcache_range(fd_addr, fd_addr + LDPAA_ETH_RX_BUFFER_SIZE); 170 qbman_release_desc_clear(&releasedesc); 171 qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid); 172 time_start = get_timer(0); 173 do { 174 /* Release buffer into the QBMAN */ 175 err = qbman_swp_release(swp, &releasedesc, &fd_addr, 1); 176 } while (get_timer(time_start) < timeo && err == -EBUSY); 177 178 if (err == -EBUSY) 179 printf("Rx frame: QBMAN buffer release fails\n"); 180 181 return; 182} 183 184static int ldpaa_eth_pull_dequeue_rx(struct udevice *dev, 185 int flags, uchar **packetp) 186{ 187 struct ldpaa_eth_priv *priv = dev_get_priv(dev); 188 const struct ldpaa_dq *dq; 189 const struct dpaa_fd *fd; 190 int i = 5, err = 0, status; 191 u32 timeo = (CONFIG_SYS_HZ * 2) / 1000; 192 u32 time_start; 193 static struct qbman_pull_desc pulldesc; 194 struct qbman_swp *swp = dflt_dpio->sw_portal; 195 196 while (--i) { 197 qbman_pull_desc_clear(&pulldesc); 198 qbman_pull_desc_set_numframes(&pulldesc, 1); 199 qbman_pull_desc_set_fq(&pulldesc, priv->rx_dflt_fqid); 200 201 err = qbman_swp_pull(swp, &pulldesc); 202 if (err < 0) { 203 printf("Dequeue frames error:0x%08x\n", err); 204 continue; 205 } 206 207 time_start = get_timer(0); 208 209 do { 210 dq = qbman_swp_dqrr_next(swp); 211 } while (get_timer(time_start) < timeo && !dq); 212 213 if (dq) { 214 /* Check for valid frame. If not sent a consume 215 * confirmation to QBMAN otherwise give it to NADK 216 * application and then send consume confirmation to 217 * QBMAN. 218 */ 219 status = (uint8_t)ldpaa_dq_flags(dq); 220 if ((status & LDPAA_DQ_STAT_VALIDFRAME) == 0) { 221 debug("Dequeue RX frames:"); 222 debug("No frame delivered\n"); 223 224 qbman_swp_dqrr_consume(swp, dq); 225 continue; 226 } 227 228 fd = ldpaa_dq_fd(dq); 229 230 /* Obtain FD and process it */ 231 ldpaa_eth_rx(priv, fd); 232 qbman_swp_dqrr_consume(swp, dq); 233 break; 234 } else { 235 err = -ENODATA; 236 debug("No DQRR entries\n"); 237 break; 238 } 239 } 240 241 return err; 242} 243 244static int ldpaa_eth_tx(struct udevice *dev, void *buf, int len) 245{ 246 struct ldpaa_eth_priv *priv = dev_get_priv(dev); 247 struct dpaa_fd fd; 248 u64 buffer_start; 249 int data_offset, err; 250 u32 timeo = (CONFIG_SYS_HZ * 10) / 1000; 251 u32 time_start; 252 struct qbman_swp *swp = dflt_dpio->sw_portal; 253 struct qbman_eq_desc ed; 254 struct qbman_release_desc releasedesc; 255 256 /* Setup the FD fields */ 257 memset(&fd, 0, sizeof(fd)); 258 259 data_offset = priv->tx_data_offset; 260 261 do { 262 err = qbman_swp_acquire(dflt_dpio->sw_portal, 263 dflt_dpbp->dpbp_attr.bpid, 264 &buffer_start, 1); 265 } while (err == -EBUSY); 266 267 if (err <= 0) { 268 printf("qbman_swp_acquire() failed\n"); 269 return -ENOMEM; 270 } 271 272 debug("TX data: malloc buffer start=0x%p\n", (u64 *)buffer_start); 273 274 memcpy(((uint8_t *)(buffer_start) + data_offset), buf, len); 275 276 flush_dcache_range(buffer_start, buffer_start + 277 LDPAA_ETH_RX_BUFFER_SIZE); 278 279 ldpaa_fd_set_addr(&fd, (u64)buffer_start); 280 ldpaa_fd_set_offset(&fd, (uint16_t)(data_offset)); 281 ldpaa_fd_set_bpid(&fd, dflt_dpbp->dpbp_attr.bpid); 282 ldpaa_fd_set_len(&fd, len); 283 284 fd.simple.ctrl = LDPAA_FD_CTRL_ASAL | LDPAA_FD_CTRL_PTA | 285 LDPAA_FD_CTRL_PTV1; 286 287 qbman_eq_desc_clear(&ed); 288 qbman_eq_desc_set_no_orp(&ed, 0); 289 qbman_eq_desc_set_qd(&ed, priv->tx_qdid, priv->tx_flow_id, 0); 290 291 time_start = get_timer(0); 292 293 while (get_timer(time_start) < timeo) { 294 err = qbman_swp_enqueue(swp, &ed, 295 (const struct qbman_fd *)(&fd)); 296 if (err != -EBUSY) 297 break; 298 } 299 300 if (err < 0) { 301 printf("error enqueueing Tx frame\n"); 302 goto error; 303 } 304 305 return err; 306 307error: 308 qbman_release_desc_clear(&releasedesc); 309 qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid); 310 time_start = get_timer(0); 311 do { 312 /* Release buffer into the QBMAN */ 313 err = qbman_swp_release(swp, &releasedesc, &buffer_start, 1); 314 } while (get_timer(time_start) < timeo && err == -EBUSY); 315 316 if (err == -EBUSY) 317 printf("TX data: QBMAN buffer release fails\n"); 318 319 return err; 320} 321 322static int ldpaa_get_dpmac_state(struct ldpaa_eth_priv *priv, 323 struct dpmac_link_state *state) 324{ 325 phy_interface_t enet_if; 326 struct phy_device *phydev = NULL; 327 int err; 328 329 /* let's start off with maximum capabilities */ 330 enet_if = wriop_get_enet_if(priv->dpmac_id); 331 switch (enet_if) { 332 case PHY_INTERFACE_MODE_XGMII: 333 state->rate = SPEED_10000; 334 break; 335 default: 336 state->rate = SPEED_1000; 337 break; 338 } 339 340 state->up = 1; 341 state->options |= DPMAC_LINK_OPT_AUTONEG; 342 phydev = priv->phy; 343 344 if (phydev) { 345 err = phy_startup(phydev); 346 if (err) { 347 printf("%s: Could not initialize\n", phydev->dev->name); 348 state->up = 0; 349 } else if (phydev->link) { 350 state->rate = min(state->rate, (uint32_t)phydev->speed); 351 if (!phydev->duplex) 352 state->options |= DPMAC_LINK_OPT_HALF_DUPLEX; 353 if (!phydev->autoneg) 354 state->options &= ~DPMAC_LINK_OPT_AUTONEG; 355 } else { 356 state->up = 0; 357 } 358 } 359 360 if (!phydev) 361 state->options &= ~DPMAC_LINK_OPT_AUTONEG; 362 363 if (!state->up) { 364 state->rate = 0; 365 state->options = 0; 366 return -ENOLINK; 367 } 368 369 return 0; 370} 371 372static int ldpaa_eth_open(struct udevice *dev) 373{ 374 struct eth_pdata *plat = dev_get_plat(dev); 375 struct ldpaa_eth_priv *priv = dev_get_priv(dev); 376 struct dpmac_link_state dpmac_link_state = { 0 }; 377#ifdef DEBUG 378 struct dpni_link_state link_state; 379#endif 380 int err = 0; 381 struct dpni_queue d_queue_cfg = { 0 }; 382 struct dpni_queue_id d_queue; 383 384 if (eth_is_active(dev)) 385 return 0; 386 387 if (get_mc_boot_status() != 0) { 388 printf("ERROR (MC is not booted)\n"); 389 return -ENODEV; 390 } 391 392 if (get_dpl_apply_status() == 0) { 393 printf("ERROR (DPL is deployed. No device available)\n"); 394 return -ENODEV; 395 } 396 397 /* DPMAC initialization */ 398 err = ldpaa_dpmac_setup(priv); 399 if (err < 0) 400 goto err_dpmac_setup; 401 402 err = ldpaa_get_dpmac_state(priv, &dpmac_link_state); 403 if (err < 0) 404 goto err_dpmac_bind; 405 406 /* DPMAC binding DPNI */ 407 err = ldpaa_dpmac_bind(priv); 408 if (err) 409 goto err_dpmac_bind; 410 411 /* DPNI initialization */ 412 err = ldpaa_dpni_setup(priv); 413 if (err < 0) 414 goto err_dpni_setup; 415 416 err = ldpaa_dpbp_setup(); 417 if (err < 0) 418 goto err_dpbp_setup; 419 420 /* DPNI binding DPBP */ 421 err = ldpaa_dpni_bind(priv); 422 if (err) 423 goto err_dpni_bind; 424 425 err = dpni_add_mac_addr(dflt_mc_io, MC_CMD_NO_FLAGS, 426 dflt_dpni->dpni_handle, plat->enetaddr, 0, 0, 0); 427 if (err) { 428 printf("dpni_add_mac_addr() failed\n"); 429 return err; 430 } 431 432 err = dpni_enable(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpni->dpni_handle); 433 if (err < 0) { 434 printf("dpni_enable() failed\n"); 435 return err; 436 } 437 438 err = dpmac_set_link_state(dflt_mc_io, MC_CMD_NO_FLAGS, 439 priv->dpmac_handle, &dpmac_link_state); 440 if (err < 0) { 441 printf("dpmac_set_link_state() failed\n"); 442 return err; 443 } 444 445#ifdef DEBUG 446 printf("DPMAC link status: %d - ", dpmac_link_state.up); 447 dpmac_link_state.up == 0 ? printf("down\n") : 448 dpmac_link_state.up == 1 ? printf("up\n") : printf("error state\n"); 449 450 err = dpni_get_link_state(dflt_mc_io, MC_CMD_NO_FLAGS, 451 dflt_dpni->dpni_handle, &link_state); 452 if (err < 0) { 453 printf("dpni_get_link_state() failed\n"); 454 return err; 455 } 456 457 printf("DPNI link status: %d - ", link_state.up); 458 link_state.up == 0 ? printf("down\n") : 459 link_state.up == 1 ? printf("up\n") : printf("error state\n"); 460#endif 461 462 memset(&d_queue, 0, sizeof(struct dpni_queue)); 463 err = dpni_get_queue(dflt_mc_io, MC_CMD_NO_FLAGS, 464 dflt_dpni->dpni_handle, DPNI_QUEUE_RX, 465 0, 0, &d_queue_cfg, &d_queue); 466 if (err) { 467 printf("dpni_get_queue failed\n"); 468 goto err_get_queue; 469 } 470 471 priv->rx_dflt_fqid = d_queue.fqid; 472 473 err = dpni_get_qdid(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpni->dpni_handle, 474 DPNI_QUEUE_TX, &priv->tx_qdid); 475 if (err) { 476 printf("dpni_get_qdid() failed\n"); 477 goto err_qdid; 478 } 479 480 return dpmac_link_state.up; 481 482err_qdid: 483err_get_queue: 484 dpni_disable(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpni->dpni_handle); 485err_dpni_bind: 486 ldpaa_dpbp_free(); 487err_dpbp_setup: 488 dpni_close(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpni->dpni_handle); 489err_dpni_setup: 490err_dpmac_bind: 491 dpmac_close(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpmac_handle); 492 dpmac_destroy(dflt_mc_io, 493 dflt_dprc_handle, 494 MC_CMD_NO_FLAGS, priv->dpmac_id); 495err_dpmac_setup: 496 return err; 497} 498 499static void ldpaa_eth_stop(struct udevice *dev) 500{ 501 struct ldpaa_eth_priv *priv = dev_get_priv(dev); 502 struct phy_device *phydev = NULL; 503 int err = 0; 504 u64 *data; 505 506 if (!eth_is_active(dev)) 507 return; 508 509 data = kzalloc(sizeof(u64) * LDPAA_ETH_DPNI_NUM_STATS, GFP_KERNEL); 510 if (data) { 511 ldpaa_eth_collect_dpni_stats(dev, data); 512 ldpaa_eth_add_dpni_stats(dev, data); 513#ifdef DEBUG 514 ldpaa_eth_dump_dpni_stats(dev, data); 515#endif 516 } 517 kfree(data); 518 519 data = kzalloc(sizeof(u64) * LDPAA_ETH_DPMAC_NUM_STATS, GFP_KERNEL); 520 if (data) { 521 ldpaa_eth_collect_dpmac_stats(dev, data); 522 ldpaa_eth_add_dpmac_stats(dev, data); 523#ifdef DEBUG 524 ldpaa_eth_dump_dpmac_stats(dev, data); 525#endif 526 } 527 kfree(data); 528 529 err = dprc_disconnect(dflt_mc_io, MC_CMD_NO_FLAGS, 530 dflt_dprc_handle, &dpmac_endpoint); 531 if (err < 0) 532 printf("dprc_disconnect() failed dpmac_endpoint\n"); 533 534 err = dpmac_close(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpmac_handle); 535 if (err < 0) 536 printf("dpmac_close() failed\n"); 537 538 err = dpmac_destroy(dflt_mc_io, 539 dflt_dprc_handle, 540 MC_CMD_NO_FLAGS, 541 priv->dpmac_id); 542 if (err < 0) 543 printf("dpmac_destroy() failed\n"); 544 545 /* Stop Tx and Rx traffic */ 546 err = dpni_disable(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpni->dpni_handle); 547 if (err < 0) 548 printf("dpni_disable() failed\n"); 549 550 phydev = priv->phy; 551 if (phydev) 552 phy_shutdown(phydev); 553 554 /* Free DPBP handle and reset. */ 555 ldpaa_dpbp_free(); 556 557 dpni_reset(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpni->dpni_handle); 558 if (err < 0) 559 printf("dpni_reset() failed\n"); 560 561 dpni_close(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpni->dpni_handle); 562 if (err < 0) 563 printf("dpni_close() failed\n"); 564} 565 566static void ldpaa_dpbp_drain_cnt(int count) 567{ 568 uint64_t buf_array[7]; 569 void *addr; 570 int ret, i; 571 572 BUG_ON(count > 7); 573 574 do { 575 ret = qbman_swp_acquire(dflt_dpio->sw_portal, 576 dflt_dpbp->dpbp_attr.bpid, 577 buf_array, count); 578 if (ret < 0) { 579 printf("qbman_swp_acquire() failed\n"); 580 return; 581 } 582 for (i = 0; i < ret; i++) { 583 addr = (void *)buf_array[i]; 584 debug("Free: buffer addr =0x%p\n", addr); 585 free(addr); 586 } 587 } while (ret); 588} 589 590static void ldpaa_dpbp_drain(void) 591{ 592 int i; 593 for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7) 594 ldpaa_dpbp_drain_cnt(7); 595} 596 597static int ldpaa_bp_add_7(uint16_t bpid) 598{ 599 uint64_t buf_array[7]; 600 u8 *addr; 601 int i; 602 struct qbman_release_desc rd; 603 604 for (i = 0; i < 7; i++) { 605 addr = memalign(LDPAA_ETH_BUF_ALIGN, LDPAA_ETH_RX_BUFFER_SIZE); 606 if (!addr) { 607 printf("addr allocation failed\n"); 608 goto err_alloc; 609 } 610 memset(addr, 0x00, LDPAA_ETH_RX_BUFFER_SIZE); 611 flush_dcache_range((u64)addr, 612 (u64)(addr + LDPAA_ETH_RX_BUFFER_SIZE)); 613 614 buf_array[i] = (uint64_t)addr; 615 debug("Release: buffer addr =0x%p\n", addr); 616 } 617 618release_bufs: 619 /* In case the portal is busy, retry until successful. 620 * This function is guaranteed to succeed in a reasonable amount 621 * of time. 622 */ 623 624 do { 625 mdelay(1); 626 qbman_release_desc_clear(&rd); 627 qbman_release_desc_set_bpid(&rd, bpid); 628 } while (qbman_swp_release(dflt_dpio->sw_portal, &rd, buf_array, i)); 629 630 return i; 631 632err_alloc: 633 if (i) 634 goto release_bufs; 635 636 return 0; 637} 638 639static int ldpaa_dpbp_seed(uint16_t bpid) 640{ 641 int i; 642 int count; 643 644 for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7) { 645 count = ldpaa_bp_add_7(bpid); 646 if (count < 7) 647 printf("Buffer Seed= %d\n", count); 648 } 649 650 return 0; 651} 652 653static int ldpaa_dpbp_setup(void) 654{ 655 int err; 656 657 err = dpbp_open(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_attr.id, 658 &dflt_dpbp->dpbp_handle); 659 if (err) { 660 printf("dpbp_open() failed\n"); 661 goto err_open; 662 } 663 664 err = dpbp_enable(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle); 665 if (err) { 666 printf("dpbp_enable() failed\n"); 667 goto err_enable; 668 } 669 670 err = dpbp_get_attributes(dflt_mc_io, MC_CMD_NO_FLAGS, 671 dflt_dpbp->dpbp_handle, 672 &dflt_dpbp->dpbp_attr); 673 if (err) { 674 printf("dpbp_get_attributes() failed\n"); 675 goto err_get_attr; 676 } 677 678 err = ldpaa_dpbp_seed(dflt_dpbp->dpbp_attr.bpid); 679 680 if (err) { 681 printf("Buffer seeding failed for DPBP %d (bpid=%d)\n", 682 dflt_dpbp->dpbp_attr.id, dflt_dpbp->dpbp_attr.bpid); 683 goto err_seed; 684 } 685 686 return 0; 687 688err_seed: 689err_get_attr: 690 dpbp_disable(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle); 691err_enable: 692 dpbp_close(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle); 693err_open: 694 return err; 695} 696 697static void ldpaa_dpbp_free(void) 698{ 699 ldpaa_dpbp_drain(); 700 dpbp_disable(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle); 701 dpbp_reset(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle); 702 dpbp_close(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle); 703} 704 705static int ldpaa_dpmac_version_check(struct fsl_mc_io *mc_io, 706 struct ldpaa_eth_priv *priv) 707{ 708 int error; 709 uint16_t major_ver, minor_ver; 710 711 error = dpmac_get_api_version(dflt_mc_io, 0, 712 &major_ver, 713 &minor_ver); 714 if ((major_ver < DPMAC_VER_MAJOR) || 715 (major_ver == DPMAC_VER_MAJOR && minor_ver < DPMAC_VER_MINOR)) { 716 printf("DPMAC version mismatch found %u.%u,", 717 major_ver, minor_ver); 718 printf("supported version is %u.%u\n", 719 DPMAC_VER_MAJOR, DPMAC_VER_MINOR); 720 return error; 721 } 722 723 return error; 724} 725 726static int ldpaa_dpmac_setup(struct ldpaa_eth_priv *priv) 727{ 728 int err = 0; 729 struct dpmac_cfg dpmac_cfg; 730 731 dpmac_cfg.mac_id = priv->dpmac_id; 732 733 err = dpmac_create(dflt_mc_io, 734 dflt_dprc_handle, 735 MC_CMD_NO_FLAGS, &dpmac_cfg, 736 &priv->dpmac_id); 737 if (err) 738 printf("dpmac_create() failed\n"); 739 740 err = ldpaa_dpmac_version_check(dflt_mc_io, priv); 741 if (err < 0) { 742 printf("ldpaa_dpmac_version_check() failed: %d\n", err); 743 goto err_version_check; 744 } 745 746 err = dpmac_open(dflt_mc_io, 747 MC_CMD_NO_FLAGS, 748 priv->dpmac_id, 749 &priv->dpmac_handle); 750 if (err < 0) { 751 printf("dpmac_open() failed: %d\n", err); 752 goto err_open; 753 } 754 755 return err; 756 757err_open: 758err_version_check: 759 dpmac_destroy(dflt_mc_io, 760 dflt_dprc_handle, 761 MC_CMD_NO_FLAGS, priv->dpmac_id); 762 763 return err; 764} 765 766static int ldpaa_dpmac_bind(struct ldpaa_eth_priv *priv) 767{ 768 int err = 0; 769 struct dprc_connection_cfg dprc_connection_cfg = { 770 /* If both rates are zero the connection */ 771 /* will be configured in "best effort" mode. */ 772 .committed_rate = 0, 773 .max_rate = 0 774 }; 775 776#ifdef DEBUG 777 struct dprc_endpoint dbg_endpoint; 778 int state = 0; 779#endif 780 781 memset(&dpmac_endpoint, 0, sizeof(struct dprc_endpoint)); 782 strcpy(dpmac_endpoint.type, "dpmac"); 783 dpmac_endpoint.id = priv->dpmac_id; 784 785 memset(&dpni_endpoint, 0, sizeof(struct dprc_endpoint)); 786 strcpy(dpni_endpoint.type, "dpni"); 787 dpni_endpoint.id = dflt_dpni->dpni_id; 788 789 err = dprc_connect(dflt_mc_io, MC_CMD_NO_FLAGS, 790 dflt_dprc_handle, 791 &dpmac_endpoint, 792 &dpni_endpoint, 793 &dprc_connection_cfg); 794 if (err) 795 printf("dprc_connect() failed\n"); 796 797#ifdef DEBUG 798 err = dprc_get_connection(dflt_mc_io, MC_CMD_NO_FLAGS, 799 dflt_dprc_handle, &dpni_endpoint, 800 &dbg_endpoint, &state); 801 printf("%s, DPMAC Type= %s\n", __func__, dbg_endpoint.type); 802 printf("%s, DPMAC ID= %d\n", __func__, dbg_endpoint.id); 803 printf("%s, DPMAC State= %d\n", __func__, state); 804 805 memset(&dbg_endpoint, 0, sizeof(struct dprc_endpoint)); 806 err = dprc_get_connection(dflt_mc_io, MC_CMD_NO_FLAGS, 807 dflt_dprc_handle, &dpmac_endpoint, 808 &dbg_endpoint, &state); 809 printf("%s, DPNI Type= %s\n", __func__, dbg_endpoint.type); 810 printf("%s, DPNI ID= %d\n", __func__, dbg_endpoint.id); 811 printf("%s, DPNI State= %d\n", __func__, state); 812#endif 813 return err; 814} 815 816static int ldpaa_dpni_setup(struct ldpaa_eth_priv *priv) 817{ 818 int err; 819 820 /* and get a handle for the DPNI this interface is associate with */ 821 err = dpni_open(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpni->dpni_id, 822 &dflt_dpni->dpni_handle); 823 if (err) { 824 printf("dpni_open() failed\n"); 825 goto err_open; 826 } 827 err = dpni_get_attributes(dflt_mc_io, MC_CMD_NO_FLAGS, 828 dflt_dpni->dpni_handle, 829 &dflt_dpni->dpni_attrs); 830 if (err) { 831 printf("dpni_get_attributes() failed (err=%d)\n", err); 832 goto err_get_attr; 833 } 834 835 /* Configure our buffers' layout */ 836 dflt_dpni->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | 837 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | 838 DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | 839 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN; 840 dflt_dpni->buf_layout.pass_parser_result = true; 841 dflt_dpni->buf_layout.pass_frame_status = true; 842 dflt_dpni->buf_layout.private_data_size = LDPAA_ETH_SWA_SIZE; 843 /* HW erratum mandates data alignment in multiples of 256 */ 844 dflt_dpni->buf_layout.data_align = LDPAA_ETH_BUF_ALIGN; 845 846 /* ...rx, ... */ 847 err = dpni_set_buffer_layout(dflt_mc_io, MC_CMD_NO_FLAGS, 848 dflt_dpni->dpni_handle, 849 DPNI_QUEUE_RX, &dflt_dpni->buf_layout); 850 if (err) { 851 printf("dpni_set_buffer_layout() failed"); 852 goto err_buf_layout; 853 } 854 855 /* ... tx, ... */ 856 /* remove Rx-only options */ 857 dflt_dpni->buf_layout.options &= ~(DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | 858 DPNI_BUF_LAYOUT_OPT_PARSER_RESULT); 859 err = dpni_set_buffer_layout(dflt_mc_io, MC_CMD_NO_FLAGS, 860 dflt_dpni->dpni_handle, 861 DPNI_QUEUE_TX, &dflt_dpni->buf_layout); 862 if (err) { 863 printf("dpni_set_buffer_layout() failed"); 864 goto err_buf_layout; 865 } 866 867 /* ... tx-confirm. */ 868 dflt_dpni->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE; 869 err = dpni_set_buffer_layout(dflt_mc_io, MC_CMD_NO_FLAGS, 870 dflt_dpni->dpni_handle, 871 DPNI_QUEUE_TX_CONFIRM, &dflt_dpni->buf_layout); 872 if (err) { 873 printf("dpni_set_buffer_layout() failed"); 874 goto err_buf_layout; 875 } 876 877 /* Now that we've set our tx buffer layout, retrieve the minimum 878 * required tx data offset. 879 */ 880 err = dpni_get_tx_data_offset(dflt_mc_io, MC_CMD_NO_FLAGS, 881 dflt_dpni->dpni_handle, 882 &priv->tx_data_offset); 883 if (err) { 884 printf("dpni_get_tx_data_offset() failed\n"); 885 goto err_data_offset; 886 } 887 888 /* Warn in case TX data offset is not multiple of 64 bytes. */ 889 WARN_ON(priv->tx_data_offset % 64); 890 891 /* Accomodate SWA space. */ 892 priv->tx_data_offset += LDPAA_ETH_SWA_SIZE; 893 debug("priv->tx_data_offset=%d\n", priv->tx_data_offset); 894 895 return 0; 896 897err_data_offset: 898err_buf_layout: 899err_get_attr: 900 dpni_close(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpni->dpni_handle); 901err_open: 902 return err; 903} 904 905static int ldpaa_dpni_bind(struct ldpaa_eth_priv *priv) 906{ 907 struct dpni_pools_cfg pools_params; 908 struct dpni_queue tx_queue; 909 int err = 0; 910 911 memset(&pools_params, 0, sizeof(pools_params)); 912 pools_params.num_dpbp = 1; 913 pools_params.pools[0].dpbp_id = (uint16_t)dflt_dpbp->dpbp_attr.id; 914 pools_params.pools[0].buffer_size = LDPAA_ETH_RX_BUFFER_SIZE; 915 err = dpni_set_pools(dflt_mc_io, MC_CMD_NO_FLAGS, 916 dflt_dpni->dpni_handle, &pools_params); 917 if (err) { 918 printf("dpni_set_pools() failed\n"); 919 return err; 920 } 921 922 memset(&tx_queue, 0, sizeof(struct dpni_queue)); 923 924 err = dpni_set_queue(dflt_mc_io, MC_CMD_NO_FLAGS, 925 dflt_dpni->dpni_handle, 926 DPNI_QUEUE_TX, 0, 0, 0, &tx_queue); 927 928 if (err) { 929 printf("dpni_set_queue() failed\n"); 930 return err; 931 } 932 933 err = dpni_set_tx_confirmation_mode(dflt_mc_io, MC_CMD_NO_FLAGS, 934 dflt_dpni->dpni_handle, 935 0, DPNI_CONF_DISABLE); 936 if (err) { 937 printf("dpni_set_tx_confirmation_mode() failed\n"); 938 return err; 939 } 940 941 return 0; 942} 943 944static int ldpaa_eth_probe(struct udevice *dev) 945{ 946 struct ofnode_phandle_args phandle; 947 948 /* Nothing to do if there is no "phy-handle" in the DTS node */ 949 if (dev_read_phandle_with_args(dev, "phy-handle", NULL, 950 0, 0, &phandle)) { 951 return 0; 952 } 953 954 init_phy(dev); 955 956 return 0; 957} 958 959uint32_t ldpaa_eth_get_dpmac_id(struct udevice *dev) 960{ 961 int port_node = dev_of_offset(dev); 962 963 return fdtdec_get_uint(gd->fdt_blob, port_node, "reg", -1); 964} 965 966static int ldpaa_eth_bind(struct udevice *dev) 967{ 968 uint32_t dpmac_id; 969 char eth_name[16]; 970 int phy_mode = -1; 971 972 phy_mode = dev_read_phy_mode(dev); 973 if (phy_mode == PHY_INTERFACE_MODE_NA) { 974 dev_err(dev, "incorrect phy mode\n"); 975 return -EINVAL; 976 } 977 978 dpmac_id = ldpaa_eth_get_dpmac_id(dev); 979 if (dpmac_id == -1) { 980 dev_err(dev, "missing reg field from the dpmac node\n"); 981 return -EINVAL; 982 } 983 984 sprintf(eth_name, "DPMAC%d@%s", dpmac_id, 985 phy_string_for_interface(phy_mode)); 986 device_set_name(dev, eth_name); 987 988 return 0; 989} 990 991static int ldpaa_eth_of_to_plat(struct udevice *dev) 992{ 993 struct ldpaa_eth_priv *priv = dev_get_priv(dev); 994 995 priv->dpmac_id = ldpaa_eth_get_dpmac_id(dev); 996 priv->phy_mode = dev_read_phy_mode(dev); 997 998 return 0; 999} 1000 1001static int ldpaa_eth_get_sset_count(struct udevice *dev) 1002{ 1003 return LDPAA_ETH_DPNI_NUM_STATS + LDPAA_ETH_DPMAC_NUM_STATS; 1004} 1005 1006static void ldpaa_eth_get_strings(struct udevice *dev, u8 *data) 1007{ 1008 u8 *p = data; 1009 int i; 1010 1011 for (i = 0; i < LDPAA_ETH_DPNI_NUM_STATS; i++) { 1012 strlcpy(p, ldpaa_eth_dpni_stat_strings[i], ETH_GSTRING_LEN); 1013 p += ETH_GSTRING_LEN; 1014 } 1015 1016 for (i = 0; i < LDPAA_ETH_DPMAC_NUM_STATS; i++) { 1017 strlcpy(p, ldpaa_eth_dpmac_stat_strings[i], ETH_GSTRING_LEN); 1018 p += ETH_GSTRING_LEN; 1019 } 1020} 1021 1022static void ldpaa_eth_get_stats(struct udevice *dev, u64 *data) 1023{ 1024 struct ldpaa_eth_priv *priv = dev_get_priv(dev); 1025 int i, j = 0; 1026 1027 for (i = 0; i < LDPAA_ETH_DPNI_NUM_STATS; i++) 1028 *(data + j++) = priv->dpni_stats[i]; 1029 1030 for (i = 0; i < LDPAA_ETH_DPMAC_NUM_STATS; i++) 1031 *(data + j++) = priv->dpmac_stats[i]; 1032} 1033 1034static const struct eth_ops ldpaa_eth_ops = { 1035 .start = ldpaa_eth_open, 1036 .send = ldpaa_eth_tx, 1037 .recv = ldpaa_eth_pull_dequeue_rx, 1038 .stop = ldpaa_eth_stop, 1039 .get_sset_count = ldpaa_eth_get_sset_count, 1040 .get_strings = ldpaa_eth_get_strings, 1041 .get_stats = ldpaa_eth_get_stats, 1042}; 1043 1044static const struct udevice_id ldpaa_eth_of_ids[] = { 1045 { .compatible = "fsl,qoriq-mc-dpmac" }, 1046}; 1047 1048U_BOOT_DRIVER(ldpaa_eth) = { 1049 .name = LDPAA_ETH_DRIVER_NAME, 1050 .id = UCLASS_ETH, 1051 .of_match = ldpaa_eth_of_ids, 1052 .of_to_plat = ldpaa_eth_of_to_plat, 1053 .bind = ldpaa_eth_bind, 1054 .probe = ldpaa_eth_probe, 1055 .ops = &ldpaa_eth_ops, 1056 .priv_auto = sizeof(struct ldpaa_eth_priv), 1057 .plat_auto = sizeof(struct eth_pdata), 1058}; 1059