1/*
2 * @TAG(OTHER_GPL)
3 */
4// SPDX-License-Identifier: GPL-2.0
5/*
6 * Copyright (c) 2016, NVIDIA CORPORATION.
7 *
8 * Portions based on U-Boot's rtl8169.c.
9 */
10
11/*
12 * This driver supports the Synopsys Designware Ethernet QOS (Quality Of
13 * Service) IP block. The IP supports multiple options for bus type, clocking/
14 * reset structure, and feature list.
15 *
16 * The driver is written such that generic core logic is kept separate from
17 * configuration-specific logic. Code that interacts with configuration-
18 * specific resources is split out into separate functions to avoid polluting
19 * common code. If/when this driver is enhanced to support multiple
20 * configurations, the core code should be adapted to call all configuration-
21 * specific functions through function pointers, with the definition of those
22 * function pointers being supplied by struct udevice_id eqos_ids[]'s .data
23 * field.
24 *
25 * The following configurations are currently supported:
26 * tegra186:
27 *    NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an
28 *    AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and
29 *    supports a single RGMII PHY. This configuration also has SW control over
30 *    all clock and reset signals to the HW block.
31 */
32
33#include "miiphy.h"
34#include "net.h"
35#include "phy.h"
36
37#include "wait_bit.h"
38
39#include <platsupport/clock.h>
40#include <platsupport/io.h>
41#include <platsupportports/plat/gpio.h>
42#include <platsupport/gpio.h>
43#include <platsupport/reset.h>
44
45#include "../tx2.h"
46#include "tx2_configs.h"
47
48#include <string.h>
49#include <ethdrivers/helpers.h>
50
51#include "dwc_eth_qos.h"
52void eqos_dma_disable_rxirq(struct tx2_eth_data *dev)
53{
54    struct eqos_priv *eqos = (struct eqos_priv *)dev->eth_dev;
55    uint32_t regval;
56
57    regval = eqos->dma_regs->ch0_dma_ie;
58    regval &= ~DWCEQOS_DMA_CH0_IE_RIE;
59    eqos->dma_regs->ch0_dma_ie = regval;
60}
61
62void eqos_dma_enable_rxirq(struct tx2_eth_data *dev)
63{
64    struct eqos_priv *eqos = (struct eqos_priv *)dev->eth_dev;
65    uint32_t regval;
66
67    regval = eqos->dma_regs->ch0_dma_ie;
68    regval |= DWCEQOS_DMA_CH0_IE_RIE;
69    eqos->dma_regs->ch0_dma_ie = regval;
70}
71
72void eqos_dma_disable_txirq(struct tx2_eth_data *dev)
73{
74    struct eqos_priv *eqos = (struct eqos_priv *)dev->eth_dev;
75    uint32_t regval;
76
77    regval = eqos->dma_regs->ch0_dma_ie;
78    regval &= ~DWCEQOS_DMA_CH0_IE_TIE;
79    eqos->dma_regs->ch0_dma_ie = regval;
80}
81
82void eqos_dma_enable_txirq(struct tx2_eth_data *dev)
83{
84    struct eqos_priv *eqos = (struct eqos_priv *)dev->eth_dev;
85    uint32_t regval;
86
87    regval = eqos->dma_regs->ch0_dma_ie;
88    regval |= DWCEQOS_DMA_CH0_IE_TIE;
89    eqos->dma_regs->ch0_dma_ie = regval;
90}
91
92void eqos_set_rx_tail_pointer(struct tx2_eth_data *dev)
93{
94    struct eqos_priv *eqos = (struct eqos_priv *)dev->eth_dev;
95    uint32_t *dma_status = (uint32_t *)(eqos->regs + REG_DWCEQOS_DMA_CH0_STA);
96    *dma_status |= DWCEQOS_DMA_CH0_IS_RI;
97    size_t num_buffers_in_ring = dev->rx_size - dev->rx_remain;
98
99    if (num_buffers_in_ring > 0) {
100        uintptr_t last_rx_desc = (dev->rx_ring_phys + ((dev->rdh + num_buffers_in_ring) * sizeof(struct eqos_desc)));
101        eqos->dma_regs->ch0_rxdesc_tail_pointer = last_rx_desc;
102    }
103}
104
105int eqos_handle_irq(struct tx2_eth_data *dev, int irq)
106{
107    struct eqos_priv *eqos = (struct eqos_priv *)dev->eth_dev;
108
109    uint32_t cause = eqos->dma_regs->dma_control[0];
110    uint32_t *dma_status;
111    int ret = 0;
112
113    if (cause & DWCEQOS_DMA_IS_DC0IS) {
114        dma_status = (uint32_t *)(eqos->regs + REG_DWCEQOS_DMA_CH0_STA);
115
116        /* Transmit Interrupt */
117        if (*dma_status & DWCEQOS_DMA_CH0_IS_TI) {
118            ret |= TX_IRQ;
119        }
120
121        /* Receive Interrupt */
122        if (*dma_status & DWCEQOS_DMA_CH0_IS_RI) {
123            ret |= RX_IRQ;
124        }
125
126        /* Ack */
127        *dma_status = *dma_status;
128    }
129
130    return ret;
131}
132
133static int eqos_mdio_wait_idle(struct eqos_priv *eqos)
134{
135    return wait_for_bit_le32(&eqos->mac_regs->mdio_address,
136                             EQOS_MAC_MDIO_ADDRESS_GB, false,
137                             1000000, true);
138}
139
140static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad,
141                          int mdio_reg)
142{
143    struct eqos_priv *eqos = bus->priv;
144    uint32_t val;
145    int ret;
146
147    ret = eqos_mdio_wait_idle(eqos);
148    if (ret) {
149        ZF_LOGF("MDIO not idle at entry");
150        return ret;
151    }
152
153    val = eqos->mac_regs->mdio_address;
154    val &= EQOS_MAC_MDIO_ADDRESS_SKAP |
155           EQOS_MAC_MDIO_ADDRESS_C45E;
156    val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) |
157           (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) |
158           (eqos->config->config_mac_mdio <<
159            EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) |
160           (EQOS_MAC_MDIO_ADDRESS_GOC_READ <<
161            EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) |
162           EQOS_MAC_MDIO_ADDRESS_GB;
163    eqos->mac_regs->mdio_address = val;
164
165    udelay(eqos->config->mdio_wait);
166
167    ret = eqos_mdio_wait_idle(eqos);
168    if (ret) {
169        ZF_LOGF("MDIO read didn't complete");
170        return ret;
171    }
172
173    val = eqos->mac_regs->mdio_data;
174    val &= EQOS_MAC_MDIO_DATA_GD_MASK;
175
176    return val;
177}
178
179static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad,
180                           int mdio_reg, u16 mdio_val)
181{
182    struct eqos_priv *eqos = bus->priv;
183    u32 val;
184    int ret;
185
186    ret = eqos_mdio_wait_idle(eqos);
187    if (ret) {
188        ZF_LOGF("MDIO not idle at entry");
189        return ret;
190    }
191
192    writel(mdio_val, &eqos->mac_regs->mdio_data);
193
194    val = readl(&eqos->mac_regs->mdio_address);
195    val &= EQOS_MAC_MDIO_ADDRESS_SKAP |
196           EQOS_MAC_MDIO_ADDRESS_C45E;
197    val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) |
198           (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) |
199           (eqos->config->config_mac_mdio <<
200            EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) |
201           (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE <<
202            EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) |
203           EQOS_MAC_MDIO_ADDRESS_GB;
204    writel(val, &eqos->mac_regs->mdio_address);
205
206    udelay(eqos->config->mdio_wait);
207
208    ret = eqos_mdio_wait_idle(eqos);
209    if (ret) {
210        ZF_LOGF("MDIO read didn't complete");
211        return ret;
212    }
213
214    return 0;
215}
216
217static int eqos_start_clks_tegra186(struct eqos_priv *eqos)
218{
219    int ret;
220
221    assert(clock_sys_valid(eqos->clock_sys));
222
223    eqos->clk_slave_bus = clk_get_clock(eqos->clock_sys, CLK_AXI_CBB);
224    if (eqos->clk_slave_bus == NULL) {
225        ZF_LOGE("clk_get_clock failed CLK_SLAVE_BUS");
226        return -ENODEV;
227    }
228    ret = clk_gate_enable(eqos->clock_sys, CLK_GATE_AXI_CBB, CLKGATE_ON);
229    if (ret) {
230        ZF_LOGE("Failed to enable CLK_GATE_AXI_CBB") ;
231        return -EIO;
232    }
233
234    ret = clk_gate_enable(eqos->clock_sys, CLK_GATE_EQOS_AXI, CLKGATE_ON);
235    if (ret) {
236        ZF_LOGE("Failed to enable CLK_GATE_EQOS_AXI");
237        return -EIO;
238    }
239
240    eqos->clk_rx = clk_get_clock(eqos->clock_sys, CLK_EQOS_RX_INPUT);
241    if (eqos->clk_rx == NULL) {
242        ZF_LOGE("clk_get_clock failed CLK_RX");
243        return -ENODEV;
244    }
245    ret = clk_gate_enable(eqos->clock_sys, CLK_GATE_EQOS_RX, CLKGATE_ON);
246    if (ret) {
247        ZF_LOGE("Failed to enable CLK_GATE_EQOS_RX");
248        return -EIO;
249    }
250
251    eqos->clk_ptp_ref = clk_get_clock(eqos->clock_sys, CLK_EQOS_PTP_REF);
252    if (eqos->clk_ptp_ref == NULL) {
253        ZF_LOGE("clk_get_clock failed CLK_EQOS_PTP_REF");
254        return -ENODEV;
255    }
256    ret = clk_gate_enable(eqos->clock_sys, CLK_GATE_EQOS_PTP_REF, CLKGATE_ON);
257    if (ret) {
258        ZF_LOGE("Failed to enable CLK_GATE_EQOS_PTP_REF");
259        return -EIO;
260    }
261
262    eqos->clk_tx = clk_get_clock(eqos->clock_sys, CLK_EQOS_TX);
263    if (eqos->clk_tx == NULL) {
264        ZF_LOGE("clk_get_clock failed CLK_TX");
265        return -ENODEV;
266    }
267    ret = clk_gate_enable(eqos->clock_sys, CLK_GATE_EQOS_TX, CLKGATE_ON);
268    if (ret) {
269        ZF_LOGE("Failed to enable CLK_GATE_EQOS_TX");
270        return -EIO;
271    }
272
273    return 0;
274}
275
276static int eqos_calibrate_pads_tegra186(struct eqos_priv *eqos)
277{
278    int ret;
279
280    eqos->tegra186_regs->sdmemcomppadctrl |= (EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
281
282    udelay(1);
283
284    eqos->tegra186_regs->auto_cal_config |= (EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE);
285
286    ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status,
287                            EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false);
288    if (ret) {
289        ZF_LOGE("calibrate didn't start");
290        goto failed;
291    }
292
293    ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status,
294                            EQOS_AUTO_CAL_STATUS_ACTIVE, false, 100, false);
295    if (ret) {
296        ZF_LOGE("calibrate didn't finish");
297        goto failed;
298    }
299
300    ret = 0;
301
302failed:
303    eqos->tegra186_regs->sdmemcomppadctrl &= ~(EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
304
305    return ret;
306}
307
308static int eqos_disable_calibration_tegra186(struct eqos_priv *eqos)
309{
310
311    eqos->tegra186_regs->auto_cal_config &= ~(EQOS_AUTO_CAL_CONFIG_ENABLE);
312
313    return 0;
314}
315
316static UNUSED freq_t eqos_get_tick_clk_rate_tegra186(struct eqos_priv *eqos)
317{
318    return clk_get_freq(eqos->clk_slave_bus);
319}
320
321
322static int eqos_set_full_duplex(struct eqos_priv *eqos)
323{
324
325    eqos->mac_regs->configuration |= (EQOS_MAC_CONFIGURATION_DM);
326
327    return 0;
328}
329
330static int eqos_set_half_duplex(struct eqos_priv *eqos)
331{
332
333    eqos->mac_regs->configuration &= ~(EQOS_MAC_CONFIGURATION_DM);
334
335    /* WAR: Flush TX queue when switching to half-duplex */
336    eqos->mtl_regs->txq0_operation_mode |= (EQOS_MTL_TXQ0_OPERATION_MODE_FTQ);
337
338    return 0;
339}
340
341static int eqos_set_gmii_speed(struct eqos_priv *eqos)
342{
343
344    eqos->mac_regs->configuration &= ~(EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
345
346    return 0;
347}
348
349static int eqos_set_mii_speed_100(struct eqos_priv *eqos)
350{
351
352    eqos->mac_regs->configuration |=
353        (EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
354
355    return 0;
356}
357
358static int eqos_set_mii_speed_10(struct eqos_priv *eqos)
359{
360
361    eqos->mac_regs->configuration &= ~(EQOS_MAC_CONFIGURATION_FES);
362    eqos->mac_regs->configuration |= (EQOS_MAC_CONFIGURATION_PS);
363
364    return 0;
365}
366
367static int eqos_set_tx_clk_speed_tegra186(struct eqos_priv *eqos)
368{
369    ulong rate;
370    int ret;
371
372    switch (eqos->phy->speed) {
373    case SPEED_1000:
374        rate = 125 * 1000 * 1000;
375        break;
376    case SPEED_100:
377        rate = 25 * 1000 * 1000;
378        break;
379    case SPEED_10:
380        rate = 2.5 * 1000 * 1000;
381        break;
382    default:
383        ZF_LOGE("invalid speed %d", eqos->phy->speed);
384        return -EINVAL;
385    }
386
387    ret = clk_set_freq(eqos->clk_tx, rate);
388
389    if (ret < 0) {
390        ZF_LOGE("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret);
391        return ret;
392    }
393
394    return 0;
395}
396
397static int eqos_adjust_link(struct eqos_priv *eqos)
398{
399    int ret;
400    bool en_calibration;
401
402
403    if (eqos->phy->duplex) {
404        ret = eqos_set_full_duplex(eqos);
405    } else {
406        ret = eqos_set_half_duplex(eqos);
407    }
408    if (ret < 0) {
409        return ret;
410    }
411
412    switch (eqos->phy->speed) {
413    case SPEED_1000:
414        en_calibration = true;
415        ret = eqos_set_gmii_speed(eqos);
416        break;
417    case SPEED_100:
418
419        en_calibration = true;
420        ret = eqos_set_mii_speed_100(eqos);
421        break;
422    case SPEED_10:
423
424        en_calibration = false;
425        ret = eqos_set_mii_speed_10(eqos);
426        break;
427    default:
428        return -EINVAL;
429    }
430    if (ret < 0) {
431        return ret;
432    }
433
434    ret = eqos_set_tx_clk_speed_tegra186(eqos);
435    if (ret < 0) {
436        ZF_LOGE("eqos_set_tx_clk_speed() failed: %d", ret);
437        return ret;
438    }
439
440    if (en_calibration) {
441        ret = eqos_calibrate_pads_tegra186(eqos);
442        if (ret < 0) {
443            ZF_LOGE("eqos_calibrate_pads() failed: %d", ret);
444            return ret;
445        }
446    } else {
447        ret = eqos_disable_calibration_tegra186(eqos);
448        if (ret < 0) {
449            return ret;
450        }
451    }
452
453    return 0;
454}
455
456int eqos_send(struct tx2_eth_data *dev, void *packet, int length)
457{
458    struct eqos_priv *eqos = (struct eqos_priv *)dev->eth_dev;
459    volatile struct eqos_desc *tx_desc;
460    uint32_t ioc = 0;
461    if (dev->tdt % 32 == 0) {
462        ioc = EQOS_DESC2_IOC;
463    }
464    tx_desc = &(dev->tx_ring[dev->tdt]);
465
466    tx_desc->des0 = (uintptr_t)packet;
467    tx_desc->des1 = 0;
468    tx_desc->des2 = ioc | length;
469    tx_desc->des3 = EQOS_DESC3_FD | EQOS_DESC3_LD | length;
470
471    __sync_synchronize();
472
473    tx_desc->des3 |= EQOS_DESC3_OWN;
474
475    eqos->dma_regs->ch0_txdesc_tail_pointer = (uintptr_t)(&(dev->tx_ring[dev->tdt + 1])) +
476                                              sizeof(struct eqos_desc);
477
478    return 0;
479}
480
481static const struct eqos_config eqos_tegra186_config = {
482    .reg_access_always_ok = false,
483    .mdio_wait = 10,
484    .swr_wait = 10,
485    .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
486    .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35,
487};
488
489static int eqos_start_resets_tegra186(struct eqos_priv *eqos)
490{
491    int ret;
492
493    ret = gpio_set(&eqos->gpio);
494    if (ret < 0) {
495        ZF_LOGF("dm_gpio_set_value(phy_reset, assert) failed: %d", ret);
496        return ret;
497    }
498
499    udelay(2);
500
501    ret = gpio_clr(&eqos->gpio);
502    if (ret < 0) {
503        ZF_LOGF("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret);
504        return ret;
505    }
506
507    ret = reset_sys_assert(eqos->reset_sys, RESET_EQOS);
508    if (ret < 0) {
509        ZF_LOGF("reset_assert() failed: %d", ret);
510        return ret;
511    }
512
513    udelay(2);
514
515    ret = reset_sys_deassert(eqos->reset_sys, RESET_EQOS);
516    if (ret < 0) {
517        ZF_LOGF("reset_deassert() failed: %d", ret);
518        return ret;
519    }
520
521    return 0;
522}
523
524int eqos_start(struct tx2_eth_data *d)
525{
526    struct eqos_priv *eqos = (struct eqos_priv *)d->eth_dev;
527    int ret;
528    u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl;
529
530    eqos->reg_access_ok = true;
531    uint32_t *dma_ie;
532
533    ret = eqos_start_clks_tegra186(eqos);
534    if (ret) {
535        ZF_LOGE("eqos_start_clks_tegra186 failed");
536        goto err;
537    }
538
539    ret = eqos_start_resets_tegra186(eqos);
540    if (ret) {
541        ZF_LOGE("eqos_start_resets_tegra186 failed");
542        goto err_stop_clks;
543    }
544
545    udelay(10);
546
547    ret = wait_for_bit_le32(&eqos->dma_regs->mode,
548                            EQOS_DMA_MODE_SWR, false,
549                            eqos->config->swr_wait, false);
550    if (ret) {
551        ZF_LOGE("EQOS_DMA_MODE_SWR stuck");
552        goto err_stop_resets;
553    }
554
555    ret = eqos_calibrate_pads_tegra186(eqos);
556    if (ret < 0) {
557        ZF_LOGE("eqos_calibrate_pads() failed: %d", ret);
558        goto err_stop_resets;
559    }
560
561    /*
562     * if PHY was already connected and configured,
563     * don't need to reconnect/reconfigure again
564     */
565    if (!eqos->phy) {
566        eqos->phy = phy_connect(eqos->mii, 0, NULL, PHY_INTERFACE_MODE_MII);
567        if (!eqos->phy) {
568            ZF_LOGE("phy_connect() failed");
569            goto err_stop_resets;
570        }
571        ret = phy_config(eqos->phy);
572        if (ret < 0) {
573            ZF_LOGE("phy_config() failed: %d", ret);
574            goto err_shutdown_phy;
575        }
576    }
577    ZF_LOGF_IF(!eqos->phy, "For some reason the phy is not on????");
578
579    ret = phy_startup(eqos->phy);
580    if (ret < 0) {
581        ZF_LOGE("phy_startup() failed: %d", ret);
582        goto err_shutdown_phy;
583    }
584
585    if (!eqos->phy->link) {
586        ZF_LOGE("No link");
587        goto err_shutdown_phy;
588    }
589
590    ret = eqos_adjust_link(eqos);
591    if (ret < 0) {
592        ZF_LOGE("eqos_adjust_link() failed: %d", ret);
593        goto err_shutdown_phy;
594    }
595
596    /* Configure MTL */
597
598    /* Flush TX queue */
599    eqos->mtl_regs->txq0_operation_mode = (EQOS_MTL_TXQ0_OPERATION_MODE_FTQ);
600
601    while (*((uint32_t *)eqos->regs + 0xd00));
602    /* Enable Store and Forward mode for TX */
603    eqos->mtl_regs->txq0_operation_mode = (EQOS_MTL_TXQ0_OPERATION_MODE_TSF);
604    /* Program Tx operating mode */
605    eqos->mtl_regs->txq0_operation_mode |= (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED <<
606                                            EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT);
607    /* Transmit Queue weight */
608    eqos->mtl_regs->txq0_quantum_weight = 0x10;
609
610    /* Enable Store and Forward mode for RX, since no jumbo frame */
611    eqos->mtl_regs->rxq0_operation_mode = (EQOS_MTL_RXQ0_OPERATION_MODE_RSF);
612
613    /* Transmit/Receive queue fifo size; use all RAM for 1 queue */
614    val = eqos->mac_regs->hw_feature1;
615    tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) &
616                 EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK;
617    rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) &
618                 EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK;
619
620    /*
621     * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting.
622     * r/tqs is encoded as (n / 256) - 1.
623     */
624    tqs = (128 << tx_fifo_sz) / 256 - 1;
625    rqs = (128 << rx_fifo_sz) / 256 - 1;
626
627    eqos->mtl_regs->txq0_operation_mode &= ~(EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK <<
628                                             EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT);
629    eqos->mtl_regs->txq0_operation_mode |=
630        tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT;
631    eqos->mtl_regs->rxq0_operation_mode &= ~(EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK <<
632                                             EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT);
633    eqos->mtl_regs->rxq0_operation_mode |=
634        rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT;
635
636    /* Flow control used only if each channel gets 4KB or more FIFO */
637    if (rqs >= ((4096 / 256) - 1)) {
638        u32 rfd, rfa;
639
640        eqos->mtl_regs->rxq0_operation_mode |= (EQOS_MTL_RXQ0_OPERATION_MODE_EHFC);
641
642        /*
643         * Set Threshold for Activating Flow Contol space for min 2
644         * frames ie, (1500 * 1) = 1500 bytes.
645         *
646         * Set Threshold for Deactivating Flow Contol for space of
647         * min 1 frame (frame size 1500bytes) in receive fifo
648         */
649        if (rqs == ((4096 / 256) - 1)) {
650            /*
651             * This violates the above formula because of FIFO size
652             * limit therefore overflow may occur inspite of this.
653             */
654            rfd = 0x3;  /* Full-3K */
655            rfa = 0x1;  /* Full-1.5K */
656        } else if (rqs == ((8192 / 256) - 1)) {
657            rfd = 0x6;  /* Full-4K */
658            rfa = 0xa;  /* Full-6K */
659        } else if (rqs == ((16384 / 256) - 1)) {
660            rfd = 0x6;  /* Full-4K */
661            rfa = 0x12; /* Full-10K */
662        } else {
663            rfd = 0x6;  /* Full-4K */
664            rfa = 0x1E; /* Full-16K */
665        }
666
667        eqos->mtl_regs->rxq0_operation_mode &= ~((EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK <<
668                                                  EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
669                                                 (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK <<
670                                                  EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT));
671        eqos->mtl_regs->rxq0_operation_mode |= (rfd <<
672                                                EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
673                                               (rfa <<
674                                                EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT);
675    }
676
677    dma_ie = (uint32_t *)(eqos->regs + 0xc30);
678    *dma_ie = 0x3020100;
679
680    /* Configure MAC, not sure if L4T is the same */
681    eqos->mac_regs->rxq_ctrl0 =
682        (eqos->config->config_mac <<
683         EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT);
684
685    /* Set TX flow control parameters */
686    /* Set Pause Time */
687    eqos->mac_regs->q0_tx_flow_ctrl = (0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT);
688    /* Assign priority for RX flow control */
689    eqos->mac_regs->rxq_ctrl2 = (1 << EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT);
690
691    /* Enable flow control */
692    eqos->mac_regs->q0_tx_flow_ctrl |= (EQOS_MAC_Q0_TX_FLOW_CTRL_TFE);
693
694    eqos->mac_regs->rx_flow_ctrl = (EQOS_MAC_RX_FLOW_CTRL_RFE);
695
696    eqos->mac_regs->configuration &=
697        ~(EQOS_MAC_CONFIGURATION_GPSLCE |
698          EQOS_MAC_CONFIGURATION_WD |
699          EQOS_MAC_CONFIGURATION_JD |
700          EQOS_MAC_CONFIGURATION_JE);
701
702    /* PLSEN is set to 1 so that LPI is not initiated */
703    // MAC_LPS_PLSEN_WR(1); << this macro below
704    uint32_t v = eqos->mac_regs->unused_0ac[9];
705    v = (v & (MAC_LPS_RES_WR_MASK_20)) | (((0) & (MAC_LPS_MASK_20)) << 20);
706    v = (v & (MAC_LPS_RES_WR_MASK_10)) | (((0) & (MAC_LPS_MASK_10)) << 10);
707    v = (v & (MAC_LPS_RES_WR_MASK_4)) | (((0) & (MAC_LPS_MASK_4)) << 4);
708    v = ((v & MAC_LPS_PLSEN_WR_MASK) | ((1 & MAC_LPS_PLSEN_MASK) << 18));
709    eqos->mac_regs->unused_0ac[9] = v;
710
711    /* Update the MAC address */
712    memcpy(eqos->enetaddr, TX2_DEFAULT_MAC, 6);
713    uint32_t val1 = (eqos->enetaddr[5] << 8) | (eqos->enetaddr[4]);
714    eqos->mac_regs->address0_high = val1;
715    val1 = (eqos->enetaddr[3] << 24) | (eqos->enetaddr[2] << 16) |
716           (eqos->enetaddr[1] << 8) | (eqos->enetaddr[0]);
717    eqos->mac_regs->address0_low = val1;
718
719    eqos->mac_regs->configuration &= 0xffcfff7c;
720    eqos->mac_regs->configuration |=  DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE;
721
722    /* Configure DMA */
723    /* Enable OSP mode */
724    eqos->dma_regs->ch0_tx_control = EQOS_DMA_CH0_TX_CONTROL_OSP;
725
726    /* RX buffer size. Must be a multiple of bus width */
727    eqos->dma_regs->ch0_rx_control = (EQOS_MAX_PACKET_SIZE << EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT);
728
729    eqos->dma_regs->ch0_control = (EQOS_DMA_CH0_CONTROL_PBLX8);
730
731    /*
732     * Burst length must be < 1/2 FIFO size.
733     * FIFO size in tqs is encoded as (n / 256) - 1.
734     * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes.
735     * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1.
736     */
737    pbl = tqs + 1;
738    if (pbl > 32) {
739        pbl = 32;
740    }
741    eqos->dma_regs->ch0_tx_control &=
742        ~(EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK <<
743          EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT);
744    eqos->dma_regs->ch0_tx_control |= (pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT);
745
746    eqos->dma_regs->ch0_rx_control &=
747        ~(EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK <<
748          EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT);
749    eqos->dma_regs->ch0_rx_control |= (1 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT);
750
751    /* DMA performance configuration */
752    val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) |
753          EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 |
754          EQOS_DMA_SYSBUS_MODE_BLEN8;
755    eqos->dma_regs->sysbus_mode = val;
756
757    eqos->dma_regs->ch0_txdesc_list_haddress = 0;
758    eqos->dma_regs->ch0_txdesc_list_address = d->tx_ring_phys;
759    eqos->dma_regs->ch0_txdesc_ring_length = EQOS_DESCRIPTORS_TX - 1;
760
761    eqos->dma_regs->ch0_rxdesc_list_haddress = 0;
762    eqos->dma_regs->ch0_rxdesc_list_address = d->rx_ring_phys;
763    eqos->dma_regs->ch0_rxdesc_ring_length = EQOS_DESCRIPTORS_RX - 1;
764
765    eqos->dma_regs->ch0_dma_ie = 0;
766    eqos->dma_regs->ch0_dma_ie = DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
767                                 DWCEQOS_DMA_CH0_IE_NIE | DWCEQOS_DMA_CH0_IE_AIE |
768                                 DWCEQOS_DMA_CH0_IE_FBEE | DWCEQOS_DMA_CH0_IE_RWTE;
769    eqos->dma_regs->ch0_dma_rx_int_wd_timer = 120;
770    udelay(100);
771
772    eqos->dma_regs->ch0_tx_control = EQOS_DMA_CH0_TX_CONTROL_ST;
773    eqos->dma_regs->ch0_rx_control = EQOS_DMA_CH0_RX_CONTROL_SR;
774
775    eqos->last_rx_desc = (d->rx_ring_phys + ((EQOS_DESCRIPTORS_RX) * (uintptr_t)(sizeof(struct eqos_desc))));
776    eqos->last_tx_desc = (d->tx_ring_phys + ((EQOS_DESCRIPTORS_TX) * (uintptr_t)(sizeof(struct eqos_desc))));
777
778    /* Disable MMC event counters */
779    *(uint32_t *)(eqos->regs + REG_DWCEQOS_ETH_MMC_CONTROL) |= REG_DWCEQOS_MMC_CNTFREEZ;
780
781    return 0;
782
783err_shutdown_phy:
784    phy_shutdown(eqos->phy);
785err_stop_resets:
786    // eqos_stop_resets_tegra186(dev);
787err_stop_clks:
788    // eqos_stop_clks_tegra186(dev);
789err:
790    return ret;
791}
792
793static int hardware_interface_searcher(void *handler_data, void *interface_instance, char **properties)
794{
795
796    /* For now, just take the first one that appears, note that we pass the
797     * pointer of each individual subsystem as the cookie. */
798    *((void **) handler_data) = interface_instance;
799    return PS_INTERFACE_FOUND_MATCH;
800}
801
802static int tx2_initialise_hardware(struct eqos_priv *eqos)
803{
804    if (!eqos) {
805        ZF_LOGE("eqos is NULL");
806        return -EINVAL;
807    }
808
809    bool found_clock_interface = false;
810    bool found_reset_interface = false;
811    bool found_gpio_interface = false;
812
813    /* Check if a clock, reset, and gpio interface was registered, if not
814     * initialise them ourselves */
815    int error = ps_interface_find(&eqos->tx2_io_ops->interface_registration_ops,
816                                  PS_CLOCK_INTERFACE, hardware_interface_searcher, &eqos->clock_sys);
817    if (!error) {
818        found_clock_interface = true;
819    }
820
821    error = ps_interface_find(&eqos->tx2_io_ops->interface_registration_ops,
822                              PS_RESET_INTERFACE, hardware_interface_searcher, &eqos->reset_sys);
823    if (!error) {
824        found_reset_interface = true;
825    }
826
827    error = ps_interface_find(&eqos->tx2_io_ops->interface_registration_ops,
828                              PS_GPIO_INTERFACE, hardware_interface_searcher, &eqos->gpio_sys);
829    if (!error) {
830        found_gpio_interface = true;
831    }
832
833    if (found_clock_interface && found_reset_interface && found_gpio_interface) {
834        return 0;
835    }
836
837    if (!found_clock_interface) {
838        ZF_LOGW("Did not found a suitable clock interface, going to be initialising our own");
839        error = ps_calloc(&eqos->tx2_io_ops->malloc_ops, 1, sizeof(*(eqos->clock_sys)),
840                          (void **) &eqos->clock_sys);
841        if (error) {
842            /* Too early to be cleaning up anything */
843            return error;
844        }
845        error = clock_sys_init(eqos->tx2_io_ops, eqos->clock_sys);
846        if (error) {
847            goto fail;
848        }
849    }
850
851    if (!found_reset_interface) {
852        ZF_LOGW("Did not found a suitable reset interface, going to be initialising our own");
853        error = ps_calloc(&eqos->tx2_io_ops->malloc_ops, 1, sizeof(*(eqos->reset_sys)),
854                          (void **) &eqos->reset_sys);
855        if (error) {
856            goto fail;
857        }
858        error = reset_sys_init(eqos->tx2_io_ops, NULL, eqos->reset_sys);
859        if (error) {
860            goto fail;
861        }
862    }
863
864    if (!found_gpio_interface) {
865        ZF_LOGW("Did not found a suitable gpio interface, going to be initialising our own");
866        error = ps_calloc(&eqos->tx2_io_ops->malloc_ops, 1, sizeof(*(eqos->gpio_sys)),
867                          (void **) &eqos->gpio_sys);
868        if (error) {
869            goto fail;
870        }
871        error = gpio_sys_init(eqos->tx2_io_ops, eqos->gpio_sys);
872        if (error) {
873            goto fail;
874        }
875    }
876
877    return 0;
878
879fail:
880
881    if (eqos->clock_sys) {
882        ZF_LOGF_IF(ps_free(&eqos->tx2_io_ops->malloc_ops, sizeof(*(eqos->clock_sys)), eqos->clock_sys),
883                   "Failed to clean up the clock interface after a failed initialisation process");
884    }
885
886    if (eqos->reset_sys) {
887        ZF_LOGF_IF(ps_free(&eqos->tx2_io_ops->malloc_ops, sizeof(*(eqos->reset_sys)), eqos->reset_sys),
888                   "Failed to clean up the reset interface after a failed initialisation process");
889    }
890
891    if (eqos->gpio_sys) {
892        ZF_LOGF_IF(ps_free(&eqos->tx2_io_ops->malloc_ops, sizeof(*(eqos->gpio_sys)), eqos->gpio_sys),
893                   "Failed to clean up the gpio interface after a failed initialisation process");
894    }
895
896    return error;
897}
898
899void *tx2_initialise(uintptr_t base_addr, ps_io_ops_t *io_ops)
900{
901    struct eqos_priv *eqos;
902    int ret;
903
904    if (io_ops == NULL) {
905        return NULL;
906    }
907
908    eqos = calloc(1, sizeof(struct eqos_priv));
909    if (eqos == NULL) {
910        free(eqos);
911        return NULL;
912    }
913    eqos->tx2_io_ops = io_ops;
914
915    /* initialise miiphy */
916    miiphy_init();
917
918    /* initialise phy */
919    ret = phy_init();
920    if (ret != 0) {
921        ZF_LOGF("failed to initialise phy");
922    }
923
924    ret = tx2_initialise_hardware(eqos);
925    if (ret) {
926        return NULL;
927    }
928
929    /* initialise the phy reset gpio gpio */
930    ret = eqos->gpio_sys->init(eqos->gpio_sys, GPIO_PM4, GPIO_DIR_OUT, &eqos->gpio);
931    if (ret != 0) {
932        ZF_LOGF("failed to init phy reset gpio pin");
933    }
934
935    eqos->config = &eqos_tegra186_config;
936
937    eqos->regs = base_addr;
938
939    /* allocate register structs and mdio */
940    assert((eqos->regs >> 32) == 0);
941    eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE);
942    eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE);
943    eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE);
944    eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE);
945
946    eqos->mii = mdio_alloc();
947    if (!eqos->mii) {
948        ZF_LOGF("Mdio alloc failed");
949        goto err;
950    }
951    eqos->mii->read = eqos_mdio_read;
952    eqos->mii->write = eqos_mdio_write;
953    eqos->mii->priv = eqos;
954    strcpy(eqos->mii->name, "mii\0");
955
956    ret = mdio_register(eqos->mii);
957    if (ret < 0) {
958        ZF_LOGE("Mdio register failed");
959        goto err_free_mdio;
960    }
961
962    return (void *)eqos;
963err_free_mdio:
964    mdio_free(eqos->mii);
965err:
966    ZF_LOGE("Tx2 initialise failed");
967    return NULL;
968}
969