1/*
2 * Copyright 2017, DornerWorks
3 * Copyright 2017, Data61
4 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
5 * ABN 41 687 119 230.
6 *
7 * This software may be distributed and modified according to the terms of
8 * the GNU General Public License version 2. Note that NO WARRANTY is provided.
9 * See "LICENSE_GPLv2.txt" for details.
10 *
11 * @TAG(DATA61_GPL)
12 */
13
14#include <platsupport/driver_module.h>
15#include <platsupport/fdt.h>
16#include <ethdrivers/gen_config.h>
17#include <ethdrivers/imx6.h>
18#include <ethdrivers/raw.h>
19#include <ethdrivers/helpers.h>
20#include <string.h>
21#include <utils/util.h>
22#include "enet.h"
23#include "ocotp_ctrl.h"
24#include "uboot/fec_mxc.h"
25#include "uboot/miiphy.h"
26#include "uboot/mx6qsabrelite.h"
27#include "uboot/micrel.h"
28#include "unimplemented.h"
29
30#define DEFAULT_MAC "\x00\x19\xb8\x00\xf0\xa3"
31
32#define BUF_SIZE MAX_PKT_SIZE
33#define DMA_ALIGN 32
34
35struct descriptor {
36    /* NOTE: little endian packing: len before stat */
37#if BYTE_ORDER == LITTLE_ENDIAN
38    uint16_t len;
39    uint16_t stat;
40#elif BYTE_ORDER == BIG_ENDIAN
41    uint16_t stat;
42    uint16_t len;
43#else
44#error Could not determine endianess
45#endif
46    uint32_t phys;
47};
48
49struct imx6_eth_data {
50    struct enet *enet;
51    uintptr_t tx_ring_phys;
52    uintptr_t rx_ring_phys;
53    volatile struct descriptor *tx_ring;
54    volatile struct descriptor *rx_ring;
55    unsigned int rx_size;
56    unsigned int tx_size;
57    void **rx_cookies;                   // Array (of rx_size elements) of type 'void *'
58    unsigned int rx_remain;
59    unsigned int tx_remain;
60    void **tx_cookies;
61    unsigned int *tx_lengths;
62    /* track where the head and tail of the queues are for
63     * enqueueing buffers / checking for completions */
64    unsigned int rdt, rdh, tdt, tdh;
65};
66
67int setup_iomux_enet(ps_io_ops_t *io_ops);
68
69/* Receive descriptor status */
70#define RXD_EMPTY     BIT(15) /* Buffer has no data. Waiting for reception. */
71#define RXD_OWN0      BIT(14) /* Receive software ownership. R/W by user */
72#define RXD_WRAP      BIT(13) /* Next buffer is found in ENET_RDSR */
73#define RXD_OWN1      BIT(12) /* Receive software ownership. R/W by user */
74#define RXD_LAST      BIT(11) /* Last buffer in frame. Written by the uDMA. */
75#define RXD_MISS      BIT( 8) /* Frame does not match MAC (promiscuous mode) */
76#define RXD_BROADCAST BIT( 7) /* frame is a broadcast frame */
77#define RXD_MULTICAST BIT( 6) /* frame is a multicast frame */
78#define RXD_BADLEN    BIT( 5) /* Incoming frame was larger than RCR[MAX_FL] */
79#define RXD_BADALIGN  BIT( 4) /* Frame length does not align to a byte */
80#define RXD_CRCERR    BIT( 2) /* The frame has a CRC error */
81#define RXD_OVERRUN   BIT( 1) /* FIFO overrun */
82#define RXD_TRUNC     BIT( 0) /* Receive frame > TRUNC_FL */
83
84#define RXD_ERROR    (RXD_BADLEN  | RXD_BADALIGN | RXD_CRCERR |\
85                      RXD_OVERRUN | RXD_TRUNC)
86
87/* Transmit descriptor status */
88#define TXD_READY     BIT(15) /* buffer in use waiting to be transmitted */
89#define TXD_OWN0      BIT(14) /* Receive software ownership. R/W by user */
90#define TXD_WRAP      BIT(13) /* Next buffer is found in ENET_TDSR */
91#define TXD_OWN1      BIT(12) /* Receive software ownership. R/W by user */
92#define TXD_LAST      BIT(11) /* Last buffer in frame. Written by the uDMA. */
93#define TXD_ADDCRC    BIT(10) /* Append a CRC to the end of the frame */
94#define TXD_ADDBADCRC BIT( 9) /* Append a bad CRC to the end of the frame */
95
96static void low_level_init(struct eth_driver *driver, uint8_t *mac, int *mtu)
97{
98    struct imx6_eth_data *dev = (struct imx6_eth_data *)driver->eth_data;
99    enet_get_mac(dev->enet, mac);
100    *mtu = MAX_PKT_SIZE;
101}
102
103static void fill_rx_bufs(struct eth_driver *driver)
104{
105    struct imx6_eth_data *dev = (struct imx6_eth_data *)driver->eth_data;
106    __sync_synchronize();
107    while (dev->rx_remain > 0) {
108        /* request a buffer */
109        void *cookie = NULL;
110        int next_rdt = (dev->rdt + 1) % dev->rx_size;
111
112        // This fn ptr is either lwip_allocate_rx_buf or lwip_pbuf_allocate_rx_buf (in src/lwip.c)
113        uintptr_t phys = driver->i_cb.allocate_rx_buf? driver->i_cb.allocate_rx_buf(driver->cb_cookie, BUF_SIZE, &cookie): 0;
114        if (!phys) {
115            // NOTE: This condition could happen if
116            //       CONFIG_LIB_ETHDRIVER_NUM_PREALLOCATED_BUFFERS < CONFIG_LIB_ETHDRIVER_RX_DESC_COUNT
117            break;
118        }
119
120        dev->rx_cookies[dev->rdt] = cookie;
121        dev->rx_ring[dev->rdt].phys = phys;
122        dev->rx_ring[dev->rdt].len = 0;
123
124        __sync_synchronize();
125        dev->rx_ring[dev->rdt].stat = RXD_EMPTY | (next_rdt == 0 ? RXD_WRAP : 0);
126        dev->rdt = next_rdt;
127        dev->rx_remain--;
128    }
129    __sync_synchronize();
130    if (dev->rdt != dev->rdh && !enet_rx_enabled(dev->enet)) {
131        enet_rx_enable(dev->enet);
132    }
133}
134
135static void enable_interrupts(struct imx6_eth_data *dev)
136{
137    struct enet *enet = dev->enet;
138    assert(enet);
139    enet_enable_events(enet, 0);
140    enet_clr_events(enet, (uint32_t) ~(NETIRQ_RXF | NETIRQ_TXF | NETIRQ_EBERR));
141    enet_enable_events(enet, (uint32_t) NETIRQ_RXF | NETIRQ_TXF | NETIRQ_EBERR);
142}
143
144static void free_desc_ring(struct imx6_eth_data *dev, ps_dma_man_t *dma_man)
145{
146    if (dev->rx_ring) {
147        dma_unpin_free(dma_man, (void *)dev->rx_ring, sizeof(struct descriptor) * dev->rx_size);
148        dev->rx_ring = NULL;
149    }
150    if (dev->tx_ring) {
151        dma_unpin_free(dma_man, (void *)dev->tx_ring, sizeof(struct descriptor) * dev->tx_size);
152        dev->tx_ring = NULL;
153    }
154    if (dev->rx_cookies) {
155        free(dev->rx_cookies);
156        dev->rx_cookies = NULL;
157    }
158    if (dev->tx_cookies) {
159        free(dev->tx_cookies);
160        dev->tx_cookies = NULL;
161    }
162    if (dev->tx_lengths) {
163        free(dev->tx_lengths);
164        dev->tx_lengths = NULL;
165    }
166}
167
168static int initialize_desc_ring(struct imx6_eth_data *dev, ps_dma_man_t *dma_man)
169{
170    dma_addr_t rx_ring = dma_alloc_pin(dma_man, sizeof(struct descriptor) * dev->rx_size, 0, DMA_ALIGN);
171    if (!rx_ring.phys) {
172        LOG_ERROR("Failed to allocate rx_ring");
173        return -1;
174    }
175    dev->rx_ring = rx_ring.virt;
176    dev->rx_ring_phys = rx_ring.phys;
177    dma_addr_t tx_ring = dma_alloc_pin(dma_man, sizeof(struct descriptor) * dev->tx_size, 0, DMA_ALIGN);
178    if (!tx_ring.phys) {
179        LOG_ERROR("Failed to allocate tx_ring");
180        free_desc_ring(dev, dma_man);
181        return -1;
182    }
183    ps_dma_cache_clean_invalidate(dma_man, rx_ring.virt, sizeof(struct descriptor) * dev->rx_size);
184    ps_dma_cache_clean_invalidate(dma_man, tx_ring.virt, sizeof(struct descriptor) * dev->tx_size);
185    dev->rx_cookies = malloc(sizeof(void *) * dev->rx_size);
186    dev->tx_cookies = malloc(sizeof(void *) * dev->tx_size);
187    dev->tx_lengths = malloc(sizeof(unsigned int) * dev->tx_size);
188    if (!dev->rx_cookies || !dev->tx_cookies || !dev->tx_lengths) {
189        if (dev->rx_cookies) {
190            free(dev->rx_cookies);
191        }
192        if (dev->tx_cookies) {
193            free(dev->tx_cookies);
194        }
195        if (dev->tx_lengths) {
196            free(dev->tx_lengths);
197        }
198        LOG_ERROR("Failed to malloc");
199        free_desc_ring(dev, dma_man);
200        return -1;
201    }
202    dev->tx_ring = tx_ring.virt;
203    dev->tx_ring_phys = tx_ring.phys;
204    /* Remaining needs to be 2 less than size as we cannot actually enqueue size many descriptors,
205     * since then the head and tail pointers would be equal, indicating empty. */
206    dev->rx_remain = dev->rx_size - 2;
207    dev->tx_remain = dev->tx_size - 2;
208
209    dev->rdt = dev->rdh = dev->tdt = dev->tdh = 0;
210
211    /* zero both rings */
212    for (unsigned int i = 0; i < dev->tx_size; i++) {
213        dev->tx_ring[i] = (struct descriptor) {
214            .phys = 0,
215            .len = 0,
216            .stat = (i + 1 == dev->tx_size) ? TXD_WRAP : 0
217        };
218    }
219    for (unsigned int i = 0; i < dev->rx_size; i++) {
220        dev->rx_ring[i] = (struct descriptor) {
221            .phys = 0,
222            .len = 0,
223            .stat = (i + 1 == dev->rx_size) ? RXD_WRAP : 0
224        };
225    }
226    __sync_synchronize();
227
228    return 0;
229}
230
231static void complete_rx(struct eth_driver *eth_driver)
232{
233    struct imx6_eth_data *dev = (struct imx6_eth_data *)eth_driver->eth_data;
234    unsigned int rdt = dev->rdt;
235    while (dev->rdh != rdt) {
236        unsigned int status = dev->rx_ring[dev->rdh].stat;
237        /* Ensure no memory references get ordered before we checked the descriptor was written back */
238        __sync_synchronize();
239        if (status & RXD_EMPTY) {
240            /* not complete yet */
241            break;
242        }
243        void *cookie = dev->rx_cookies[dev->rdh];
244        unsigned int len = dev->rx_ring[dev->rdh].len;
245        /* update rdh */
246        dev->rdh = (dev->rdh + 1) % dev->rx_size;
247        dev->rx_remain++;
248        /* Give the buffers back */
249        eth_driver->i_cb.rx_complete(eth_driver->cb_cookie, 1, &cookie, &len);
250    }
251    if (dev->rdt != dev->rdh && !enet_rx_enabled(dev->enet)) {
252        enet_rx_enable(dev->enet);
253    }
254}
255
256static void complete_tx(struct eth_driver *driver)
257{
258    struct imx6_eth_data *dev = (struct imx6_eth_data *)driver->eth_data;
259    while (dev->tdh != dev->tdt) {
260        unsigned int i;
261        for (i = 0; i < dev->tx_lengths[dev->tdh]; i++) {
262            if (dev->tx_ring[(i + dev->tdh) % dev->tx_size].stat & TXD_READY) {
263                /* not all parts complete */
264                return;
265            }
266        }
267        /* do not let memory loads happen before our checking of the descriptor write back */
268        __sync_synchronize();
269        /* increase where we believe tdh to be */
270        void *cookie = dev->tx_cookies[dev->tdh];
271        dev->tx_remain += dev->tx_lengths[dev->tdh];
272        dev->tdh = (dev->tdh + dev->tx_lengths[dev->tdh]) % dev->tx_size;
273        /* give the buffer back */
274        driver->i_cb.tx_complete(driver->cb_cookie, cookie);
275    }
276    if (dev->tdh != dev->tdt && !enet_tx_enabled(dev->enet)) {
277        enet_tx_enable(dev->enet);
278    }
279}
280
281static void print_state(struct eth_driver *eth_driver)
282{
283    struct imx6_eth_data *eth_data = (struct imx6_eth_data *)eth_driver->eth_data;
284    enet_print_mib(eth_data->enet);
285}
286
287static void handle_irq(struct eth_driver *driver, int irq)
288{
289    struct imx6_eth_data *eth_data = (struct imx6_eth_data *)driver->eth_data;
290    struct enet *enet = eth_data->enet;
291    uint32_t e;
292    e = enet_clr_events(enet, NETIRQ_RXF | NETIRQ_TXF | NETIRQ_EBERR);
293    if (e & NETIRQ_TXF) {
294        complete_tx(driver);
295    }
296    if (e & NETIRQ_RXF) {
297        complete_rx(driver);
298        fill_rx_bufs(driver);
299    }
300    if (e & NETIRQ_EBERR) {
301        printf("Error: System bus/uDMA\n");
302        //ethif_print_state(netif_get_eth_driver(netif));
303        assert(0);
304        while (1);
305    }
306}
307
308/* This is a platsuport IRQ interface IRQ handler wrapper for handle_irq() */
309static void eth_irq_handle(void *data, ps_irq_acknowledge_fn_t acknowledge_fn, void *ack_data)
310{
311    ZF_LOGF_IF(data == NULL, "Passed in NULL for the data");
312    struct eth_driver *driver = data;
313
314    /* handle_irq doesn't really expect an IRQ number */
315    handle_irq(driver, 0);
316
317    int error = acknowledge_fn(ack_data);
318    if (error) {
319        LOG_ERROR("Failed to acknowledge the Ethernet device's IRQ");
320    }
321}
322
323static void raw_poll(struct eth_driver *driver)
324{
325    complete_rx(driver);
326    complete_tx(driver);
327    fill_rx_bufs(driver);
328}
329
330static int raw_tx(struct eth_driver *driver, unsigned int num, uintptr_t *phys, unsigned int *len, void *cookie)
331{
332    struct imx6_eth_data *dev = (struct imx6_eth_data *)driver->eth_data;
333    struct enet *enet = dev->enet;
334    /* Ensure we have room */
335    if (dev->tx_remain < num) {
336        /* try and complete some */
337        complete_tx(driver);
338        if (dev->tx_remain < num) {
339            return ETHIF_TX_FAILED;
340        }
341    }
342    unsigned int i;
343    __sync_synchronize();
344    for (i = 0; i < num; i++) {
345        unsigned int ring = (dev->tdt + i) % dev->tx_size;
346        dev->tx_ring[ring].len = len[i];
347        dev->tx_ring[ring].phys = phys[i];
348        __sync_synchronize();
349        dev->tx_ring[ring].stat = TXD_READY | (ring + 1 == dev->tx_size ? TXD_WRAP : 0) | (i + 1 == num ? TXD_ADDCRC |
350                                                                                           TXD_LAST : 0);
351    }
352    dev->tx_cookies[dev->tdt] = cookie;
353    dev->tx_lengths[dev->tdt] = num;
354    dev->tdt = (dev->tdt + num) % dev->tx_size;
355    dev->tx_remain -= num;
356    __sync_synchronize();
357    if (!enet_tx_enabled(enet)) {
358        enet_tx_enable(enet);
359    }
360
361    return ETHIF_TX_ENQUEUED;
362}
363
364static void get_mac(struct eth_driver *driver, uint8_t *mac)
365{
366    struct enet *enet = ((struct imx6_eth_data *)driver->eth_data)->enet;
367    enet_get_mac(enet, (unsigned char *)mac);
368}
369
370static struct raw_iface_funcs iface_fns = {
371    .raw_handleIRQ = handle_irq,
372    .print_state = print_state,
373    .low_level_init = low_level_init,
374    .raw_tx = raw_tx,
375    .raw_poll = raw_poll,
376    .get_mac = get_mac
377};
378
379int ethif_imx6_init(struct eth_driver *eth_driver, ps_io_ops_t io_ops, void *config)
380{
381    struct ocotp *ocotp = NULL;
382    int err;
383    struct enet *enet;
384    struct imx6_eth_data *eth_data = NULL;
385    uint8_t mac[6];
386
387    if (config == NULL) {
388        LOG_ERROR("Cannot get platform info; Passed in Config Pointer NULL");
389        goto error;
390    }
391
392    struct arm_eth_plat_config *plat_config = (struct arm_eth_plat_config *)config;
393
394    eth_data = (struct imx6_eth_data *)malloc(sizeof(struct imx6_eth_data));
395    if (eth_data == NULL) {
396        LOG_ERROR("Failed to allocate eth data struct");
397        goto error;
398    }
399
400    eth_data->tx_size = CONFIG_LIB_ETHDRIVER_RX_DESC_COUNT;
401    eth_data->rx_size = CONFIG_LIB_ETHDRIVER_TX_DESC_COUNT;
402    eth_driver->eth_data = eth_data;
403    eth_driver->dma_alignment = DMA_ALIGN;
404    eth_driver->i_fn = iface_fns;
405
406    err = initialize_desc_ring(eth_data, &io_ops.dma_manager);
407    if (err) {
408        LOG_ERROR("Failed to allocate descriptor rings");
409        goto error;
410    }
411
412    /* initialise the eFuse controller so we can get a MAC address */
413    ocotp = ocotp_init(&io_ops.io_mapper);
414    if (!ocotp) {
415        LOG_ERROR("Failed to initialize ocotp");
416        goto error;
417    }
418    /* Initialise ethernet pins */
419    err = setup_iomux_enet(&io_ops);
420    if (err) {
421        LOG_ERROR("Failed to setup iomux enet");
422        goto error;
423    }
424    /* Initialise the phy library */
425    miiphy_init();
426    /* Initialise the phy */
427    phy_micrel_init();
428    /* Initialise the RGMII interface */
429    enet = enet_init((struct desc_data) {
430        .tx_phys = eth_data->tx_ring_phys, .rx_phys = eth_data->rx_ring_phys, .rx_bufsize = BUF_SIZE
431    }, &io_ops);
432    if (!enet) {
433        LOG_ERROR("Failed to initialize RGMII");
434        /* currently no way to properly clean up enet */
435        assert(!"enet cannot be cleaned up");
436        goto error;
437    }
438    eth_data->enet = enet;
439
440    if (plat_config->prom_mode) {
441        enet_prom_enable(enet);
442    } else {
443        enet_prom_disable(enet);
444    }
445
446    if (ocotp == NULL || ocotp_get_mac(ocotp, mac)) {
447        memcpy(mac, DEFAULT_MAC, 6);
448    }
449
450    enet_set_mac(enet, mac);
451
452    /* Connect the phy to the ethernet controller */
453    unsigned phy_mask = 0xffffffff;
454    if (fec_init(phy_mask, enet)) {
455        LOG_ERROR("Failed to initialize fec");
456        goto error;
457    }
458
459    /* Start the controller */
460    enet_enable(enet);
461
462    fill_rx_bufs(eth_driver);
463    enable_interrupts(eth_data);
464
465    /* done */
466    return 0;
467error:
468    if (ocotp) {
469        ocotp_free(ocotp, &io_ops.io_mapper);
470    }
471    if (eth_data) {
472        free(eth_data);
473    }
474    free_desc_ring(eth_data, &io_ops.dma_manager);
475    return -1;
476}
477
478typedef struct {
479    void *addr;
480    ps_io_ops_t *io_ops;
481    struct eth_driver *eth_driver;
482    int irq_id;
483} callback_args_t;
484
485static int allocate_register_callback(pmem_region_t pmem, unsigned curr_num, size_t num_regs, void *token)
486{
487    if (token == NULL) {
488        ZF_LOGE("Expected a token!");
489        return -EINVAL;
490    }
491
492    callback_args_t *args = token;
493    if (curr_num == 0) {
494        args->addr = ps_pmem_map(args->io_ops, pmem, false, PS_MEM_NORMAL);
495        if (!args->addr) {
496            ZF_LOGE("Failed to map the Ethernet device");
497            return -EIO;
498        }
499    }
500
501    return 0;
502}
503
504static int allocate_irq_callback(ps_irq_t irq, unsigned curr_num, size_t num_irqs, void *token)
505{
506    if (token == NULL) {
507        ZF_LOGE("Expected a token!");
508        return -EINVAL;
509    }
510
511    callback_args_t *args = token;
512    if (curr_num == 0) {
513        args->irq_id = ps_irq_register(&args->io_ops->irq_ops, irq, eth_irq_handle, args->eth_driver);
514        if (args->irq_id < 0) {
515            ZF_LOGE("Failed to register the Ethernet device's IRQ");
516            return -EIO;
517        }
518    }
519
520    return 0;
521}
522
523int ethif_imx_init_module(ps_io_ops_t *io_ops, const char *device_path)
524{
525    struct arm_eth_plat_config plat_config;
526    struct eth_driver *eth_driver;
527
528    int error = ps_calloc(&io_ops->malloc_ops, 1, sizeof(*eth_driver), (void **) &eth_driver);
529    if (error) {
530        ZF_LOGE("Failed to allocate memory for the Ethernet driver");
531        return -ENOMEM;
532    }
533
534    ps_fdt_cookie_t *cookie = NULL;
535    callback_args_t args = { .io_ops = io_ops, .eth_driver = eth_driver };
536    error = ps_fdt_read_path(&io_ops->io_fdt, &io_ops->malloc_ops, device_path, &cookie);
537    if (error) {
538        ZF_LOGE("Failed to read the path of the Ethernet device");
539        return -ENODEV;
540    }
541
542    error = ps_fdt_walk_registers(&io_ops->io_fdt, cookie, allocate_register_callback, &args);
543    if (error) {
544        ZF_LOGE("Failed to walk the Ethernet device's registers and allocate them");
545        return -ENODEV;
546    }
547
548    error = ps_fdt_walk_irqs(&io_ops->io_fdt, cookie, allocate_irq_callback, &args);
549    if (error) {
550        ZF_LOGE("Failed to walk the Ethernet device's IRQs and allocate them");
551        return -ENODEV;
552    }
553
554    error = ps_fdt_cleanup_cookie(&io_ops->malloc_ops, cookie);
555    if (error) {
556        ZF_LOGE("Failed to free the cookie used to allocate resources");
557        return -ENODEV;
558    }
559
560    /* Setup the config and hand initialisation off to the proper
561     * initialisation method */
562    plat_config.buffer_addr = args.addr;
563    plat_config.prom_mode = 1;
564
565    error = ethif_imx6_init(eth_driver, *io_ops, &plat_config);
566    if (error) {
567        ZF_LOGE("Failed to initialise the Ethernet driver");
568        return -ENODEV;
569    }
570
571    return ps_interface_register(&io_ops->interface_registration_ops, PS_ETHERNET_INTERFACE, eth_driver, NULL);
572}
573
574static const char *compatible_strings[] = {
575    /* Other i.MX platforms may also be compatible but the platforms that have
576     * been tested are the SABRE Lite (i.MX6Quad) and i.MX8MQ Evaluation Kit */
577    "fsl,imx6q-fec",
578    "fsl,imx8mq-fec",
579    NULL
580};
581PS_DRIVER_MODULE_DEFINE(imx_fec, compatible_strings, ethif_imx_init_module);
582