1/*
2 * Copyright 2017, DornerWorks, Ltd.
3 * Copyright 2017, Data61
4 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
5 * ABN 41 687 119 230.
6 *
7 * This software may be distributed and modified according to the terms of
8 * the GNU General Public License version 2. Note that NO WARRANTY is provided.
9 * See "LICENSE_GPLv2.txt" for details.
10 *
11 * @TAG(DATA61_GPL)
12 */
13
14#include "unimplemented.h"
15#include "io.h"
16#include <platsupport/driver_module.h>
17#include <platsupport/fdt.h>
18#include <ethdrivers/gen_config.h>
19#include <ethdrivers/zynq7000.h>
20#include <ethdrivers/raw.h>
21#include <ethdrivers/helpers.h>
22#include <string.h>
23#include <utils/util.h>
24#include "zynq_gem.h"
25#include "uboot/net.h"
26#include "uboot/miiphy.h"
27#include "uboot/phy.h"
28#include <stdio.h>
29
30#define BUF_SIZE MAX_PKT_SIZE
31
32struct zynq7000_eth_data {
33    struct eth_device *eth_dev;
34    uintptr_t tx_ring_phys;
35    uintptr_t rx_ring_phys;
36    volatile struct emac_bd *tx_ring;
37    volatile struct emac_bd *rx_ring;
38    unsigned int rx_size;
39    unsigned int tx_size;
40    void **rx_cookies;
41    unsigned int rx_remain;
42    unsigned int tx_remain;
43    void **tx_cookies;
44    unsigned int *tx_lengths;
45    /* track where the head and tail of the queues are for
46     * enqueueing buffers / checking for completions */
47    unsigned int rdt, rdh, tdt, tdh;
48};
49
50static void free_desc_ring(struct zynq7000_eth_data *dev, ps_dma_man_t *dma_man)
51{
52
53    if (dev->rx_ring != NULL) {
54        dma_unpin_free(dma_man, (void *)dev->rx_ring, sizeof(struct emac_bd) * dev->rx_size);
55        dev->rx_ring = NULL;
56    }
57
58    if (dev->tx_ring != NULL) {
59        dma_unpin_free(dma_man, (void *)dev->tx_ring, sizeof(struct emac_bd) * dev->tx_size);
60        dev->tx_ring = NULL;
61    }
62
63    if (dev->rx_cookies != NULL) {
64        free(dev->rx_cookies);
65        dev->rx_cookies = NULL;
66    }
67
68    if (dev->tx_cookies != NULL) {
69        free(dev->tx_cookies);
70        dev->tx_cookies = NULL;
71    }
72
73    if (dev->tx_lengths != NULL) {
74        free(dev->tx_lengths);
75        dev->tx_lengths = NULL;
76    }
77}
78
79static int initialize_desc_ring(struct zynq7000_eth_data *dev, ps_dma_man_t *dma_man)
80{
81    dma_addr_t rx_ring = dma_alloc_pin(dma_man, sizeof(struct emac_bd) * dev->rx_size, 0, ARCH_DMA_MINALIGN);
82    if (!rx_ring.phys) {
83        LOG_ERROR("Failed to allocate rx_ring");
84        return -1;
85    }
86
87    dev->rx_ring = rx_ring.virt;
88    dev->rx_ring_phys = rx_ring.phys;
89    dma_addr_t tx_ring = dma_alloc_pin(dma_man, sizeof(struct emac_bd) * dev->tx_size, 0, ARCH_DMA_MINALIGN);
90    if (!tx_ring.phys) {
91        LOG_ERROR("Failed to allocate tx_ring");
92        free_desc_ring(dev, dma_man);
93        return -1;
94    }
95
96    ps_dma_cache_clean_invalidate(dma_man, rx_ring.virt, sizeof(struct emac_bd) * dev->rx_size);
97    ps_dma_cache_clean_invalidate(dma_man, tx_ring.virt, sizeof(struct emac_bd) * dev->tx_size);
98
99    dev->rx_cookies = malloc(sizeof(void *) * dev->rx_size);
100    dev->tx_cookies = malloc(sizeof(void *) * dev->tx_size);
101    dev->tx_lengths = malloc(sizeof(unsigned int) * dev->tx_size);
102
103    if (dev->rx_cookies == NULL || dev->tx_cookies == NULL || dev->tx_lengths == NULL) {
104
105        if (dev->rx_cookies != NULL) {
106            free(dev->rx_cookies);
107        }
108
109        if (dev->tx_cookies != NULL) {
110            free(dev->tx_cookies);
111        }
112
113        if (dev->tx_lengths != NULL) {
114            free(dev->tx_lengths);
115        }
116
117        LOG_ERROR("Failed to malloc");
118        free_desc_ring(dev, dma_man);
119        return -1;
120    }
121
122    dev->tx_ring = tx_ring.virt;
123    dev->tx_ring_phys = tx_ring.phys;
124
125    /* Remaining needs to be 2 less than size as we cannot actually enqueue size many descriptors,
126     * since then the head and tail pointers would be equal, indicating empty. */
127    dev->rx_remain = dev->rx_size - 2;
128    dev->tx_remain = dev->tx_size - 2;
129
130    dev->rdt = dev->rdh = dev->tdt = dev->tdh = 0;
131
132    /* initialise both rings */
133    for (unsigned int i = 0; i < dev->tx_size; i++) {
134        dev->tx_ring[i] = (struct emac_bd) {
135            .addr = 0,
136            .status = ZYNQ_GEM_TXBUF_USED_MASK
137        };
138    }
139
140    dev->tx_ring[dev->tx_size - 1].status |= ZYNQ_GEM_TXBUF_WRAP_MASK;
141
142    for (unsigned int i = 0; i < dev->rx_size; i++) {
143        dev->rx_ring[i] = (struct emac_bd) {
144            .addr = ZYNQ_GEM_RXBUF_NEW_MASK,
145            .status = 0
146        };
147    }
148
149    dev->rx_ring[dev->rx_size - 1].addr |= ZYNQ_GEM_RXBUF_WRAP_MASK;
150
151    __sync_synchronize();
152
153    return 0;
154}
155
156static void fill_rx_bufs(struct eth_driver *driver)
157{
158
159    struct zynq7000_eth_data *dev = (struct zynq7000_eth_data *)driver->eth_data;
160    __sync_synchronize();
161
162    while (dev->rx_remain > 0) {
163
164        /* request a buffer */
165        void *cookie = NULL;
166        int next_rdt = (dev->rdt + 1) % dev->rx_size;
167
168        uintptr_t phys = driver->i_cb.allocate_rx_buf ? driver->i_cb.allocate_rx_buf(driver->cb_cookie, BUF_SIZE, &cookie) : 0;
169        if (!phys) {
170            break;
171        }
172
173        dev->rx_cookies[dev->rdt] = cookie;
174
175        dev->rx_ring[dev->rdt].status = 0;
176
177        /* Remove the used bit so the controller knows this descriptor is
178         * available to be written to */
179        dev->rx_ring[dev->rdt].addr &= ~(ZYNQ_GEM_RXBUF_NEW_MASK | ZYNQ_GEM_RXBUF_ADD_MASK);
180
181        dev->rx_ring[dev->rdt].addr |= (phys & ZYNQ_GEM_RXBUF_ADD_MASK);
182
183        __sync_synchronize();
184
185        dev->rdt = next_rdt;
186        dev->rx_remain--;
187    }
188
189    __sync_synchronize();
190
191    if (dev->rdt != dev->rdh && !zynq_gem_recv_enabled(dev->eth_dev)) {
192        zynq_gem_recv_enable(dev->eth_dev);
193    }
194}
195
196static void complete_rx(struct eth_driver *eth_driver)
197{
198
199    struct zynq7000_eth_data *dev = (struct zynq7000_eth_data *)eth_driver->eth_data;
200    unsigned int rdt = dev->rdt;
201
202    while (dev->rdh != rdt) {
203        unsigned int status = dev->rx_ring[dev->rdh].status;
204        unsigned int addr = dev->rx_ring[dev->rdh].addr;
205
206        /* Ensure no memory references get ordered before we checked the descriptor was written back */
207        __sync_synchronize();
208        if (!(addr & ZYNQ_GEM_RXBUF_NEW_MASK)) {
209            /* not complete yet */
210            break;
211        }
212
213        // TBD: Need to handle multiple buffers for single frame?
214        void *cookie = dev->rx_cookies[dev->rdh];
215        unsigned int len = status & ZYNQ_GEM_RXBUF_LEN_MASK;
216
217        /* update rdh */
218        dev->rdh = (dev->rdh + 1) % dev->rx_size;
219        dev->rx_remain++;
220
221        /* Give the buffers back */
222        eth_driver->i_cb.rx_complete(eth_driver->cb_cookie, 1, &cookie, &len);
223    }
224
225    if (dev->rdt != dev->rdh && !zynq_gem_recv_enabled(dev->eth_dev)) {
226        zynq_gem_recv_enabled(dev->eth_dev);
227    }
228}
229
230static void complete_tx(struct eth_driver *driver)
231{
232
233    struct zynq7000_eth_data *dev = (struct zynq7000_eth_data *)driver->eth_data;
234
235    while (dev->tdh != dev->tdt) {
236        unsigned int i;
237
238        for (i = 0; i < dev->tx_lengths[dev->tdh]; i++) {
239            int ring_pos = (i + dev->tdh) % dev->tx_size;
240
241            if (i == 0 && !(dev->tx_ring[ring_pos].status & ZYNQ_GEM_TXBUF_USED_MASK)) {
242                /* not all parts complete */
243                return;
244            }
245
246            dev->tx_ring[ring_pos].status &= (ZYNQ_GEM_TXBUF_USED_MASK | ZYNQ_GEM_TXBUF_WRAP_MASK);
247            dev->tx_ring[ring_pos].status |= ZYNQ_GEM_TXBUF_USED_MASK;
248        }
249
250        /* do not let memory loads happen before our checking of the descriptor write back */
251        __sync_synchronize();
252
253        /* increase TX Descriptor head */
254        void *cookie = dev->tx_cookies[dev->tdh];
255        dev->tx_remain += dev->tx_lengths[dev->tdh];
256        dev->tdh = (dev->tdh + dev->tx_lengths[dev->tdh]) % dev->tx_size;
257
258        /* give the buffer back */
259        driver->i_cb.tx_complete(driver->cb_cookie, cookie);
260    }
261
262    if (dev->tdh != dev->tdt) {
263        zynq_gem_start_send(dev->eth_dev);
264    }
265}
266
267static void handle_irq(struct eth_driver *driver, int irq)
268{
269    struct zynq7000_eth_data *eth_data = (struct zynq7000_eth_data *)driver->eth_data;
270    struct zynq_gem_regs *regs = (struct zynq_gem_regs *)eth_data->eth_dev->iobase;
271
272    // Clear Interrupts
273    u32 isr = readl(&regs->isr);
274    writel(isr, &regs->isr);
275
276    if (isr & ZYNQ_GEM_IXR_TXCOMPLETE) {
277        /* Clear TX Status register */
278        u32 val = readl(&regs->txsr);
279        writel(val, &regs->txsr);
280
281        complete_tx(driver);
282    }
283
284    if (isr & ZYNQ_GEM_IXR_FRAMERX) {
285        /* Clear RX Status register */
286        u32 val = readl(&regs->rxsr);
287        writel(val, &regs->rxsr);
288
289        complete_rx(driver);
290        fill_rx_bufs(driver);
291    }
292}
293
294/* This is a platsuport IRQ interface IRQ handler wrapper for handle_irq() */
295static void eth_irq_handle(void *data, ps_irq_acknowledge_fn_t acknowledge_fn, void *ack_data)
296{
297    ZF_LOGF_IF(data == NULL, "Passed in NULL for the data");
298    struct eth_driver *driver = data;
299
300    /* handle_irq doesn't really expect an IRQ number */
301    handle_irq(driver, 0);
302
303    int error = acknowledge_fn(ack_data);
304    if (error) {
305        LOG_ERROR("Failed to acknowledge the Ethernet device's IRQ");
306    }
307}
308
309static void print_state(struct eth_driver *eth_driver)
310{
311    printf("Zynq7000: print_state not implemented\n");
312}
313
314static void low_level_init(struct eth_driver *driver, uint8_t *mac, int *mtu)
315{
316    printf("Zynq7000: low_level_init not implemented\n");
317}
318
319static int raw_tx(struct eth_driver *driver, unsigned int num, uintptr_t *phys, unsigned int *len, void *cookie)
320{
321
322    struct zynq7000_eth_data *dev = (struct zynq7000_eth_data *)driver->eth_data;
323
324    /* Ensure we have room */
325    if (dev->tx_remain < num) {
326        /* try and complete some */
327        complete_tx(driver);
328
329        if (dev->tx_remain < num) {
330            return ETHIF_TX_FAILED;
331        }
332    }
333
334    unsigned int i;
335    __sync_synchronize();
336
337    for (i = 0; i < num; i++) {
338        unsigned int ring = (dev->tdt + i) % dev->tx_size;
339        dev->tx_ring[ring].addr = phys[i];
340        dev->tx_ring[ring].status &= ~(ZYNQ_GEM_TXBUF_USED_MASK | ZYNQ_GEM_TXBUF_FRMLEN_MASK | ZYNQ_GEM_TXBUF_LAST_MASK);
341        dev->tx_ring[ring].status |= (len[i] & ZYNQ_GEM_TXBUF_FRMLEN_MASK);
342        if (i == (num - 1)) {
343            dev->tx_ring[ring].status |= ZYNQ_GEM_TXBUF_LAST_MASK;
344        }
345    }
346
347    dev->tx_cookies[dev->tdt] = cookie;
348    dev->tx_lengths[dev->tdt] = num;
349    dev->tdt = (dev->tdt + num) % dev->tx_size;
350    dev->tx_remain -= num;
351
352    __sync_synchronize();
353
354    zynq_gem_start_send(dev->eth_dev);
355
356    return ETHIF_TX_ENQUEUED;
357}
358
359static void raw_poll(struct eth_driver *driver)
360{
361    complete_rx(driver);
362    complete_tx(driver);
363    fill_rx_bufs(driver);
364}
365
366static void get_mac(struct eth_driver *driver, uint8_t *mac)
367{
368    struct eth_device *eth_dev = ((struct zynq7000_eth_data *)driver->eth_data)->eth_dev;
369    memcpy(mac, eth_dev->enetaddr, 6);
370}
371
372
373static struct raw_iface_funcs iface_fns = {
374    .raw_handleIRQ = handle_irq,
375    .print_state = print_state,
376    .low_level_init = low_level_init,
377    .raw_tx = raw_tx,
378    .raw_poll = raw_poll,
379    .get_mac = get_mac
380};
381
382int ethif_zynq7000_init(struct eth_driver *eth_driver, ps_io_ops_t io_ops, void *config)
383{
384    int err;
385    struct arm_eth_plat_config *plat_config = (struct arm_eth_plat_config *)config;
386    struct zynq7000_eth_data *eth_data = NULL;
387    struct eth_device *eth_dev;
388
389    printf("ethif_zynq7000_init: Start\n");
390
391    eth_data = (struct zynq7000_eth_data *)malloc(sizeof(struct zynq7000_eth_data));
392    if (eth_data == NULL) {
393        LOG_ERROR("Failed to allocate eth data struct");
394        goto error;
395    }
396
397    if (config == NULL) {
398        LOG_ERROR("Cannot get platform info; Passed in Config Pointer NULL");
399        goto error;
400    }
401    uint32_t base_addr = (uint32_t)plat_config->buffer_addr;
402
403    eth_data->tx_size = CONFIG_LIB_ETHDRIVER_TX_DESC_COUNT;
404    eth_data->rx_size = CONFIG_LIB_ETHDRIVER_RX_DESC_COUNT;
405    eth_driver->eth_data = eth_data;
406    eth_driver->dma_alignment = ARCH_DMA_MINALIGN;
407    eth_driver->i_fn = iface_fns;
408
409    /* Initialize Descriptors */
410    err = initialize_desc_ring(eth_data, &io_ops.dma_manager);
411    if (err) {
412        LOG_ERROR("Failed to allocate descriptor rings");
413        goto error;
414    }
415
416#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
417    miiphy_init();
418#endif
419
420#ifdef CONFIG_PHYLIB
421    phy_init();
422#endif
423
424    zynq_set_gem_ioops(&io_ops);
425
426    eth_dev = (struct eth_device *)zynq_gem_initialize(base_addr,
427                                                       CONFIG_ZYNQ_GEM_PHY_ADDR0,
428                                                       CONFIG_ZYNQ_GEM_EMIO0);
429    if (NULL == eth_dev) {
430        LOG_ERROR("Failed to initialize Zynq Ethernet Device");
431        goto error;
432    }
433    eth_data->eth_dev = eth_dev;
434
435    struct zynq_gem_regs *regs = (struct zynq_gem_regs *)eth_dev->iobase;
436
437    /* Initialize the buffer descriptor registers */
438    writel((uint32_t)eth_data->tx_ring_phys, &regs->txqbase);
439    writel((uint32_t)eth_data->rx_ring_phys, &regs->rxqbase);
440
441    zynq_gem_init(eth_dev);
442
443    if (plat_config->prom_mode) {
444        zynq_gem_prom_enable(eth_dev);
445    } else {
446        zynq_gem_prom_disable(eth_dev);
447    }
448
449    fill_rx_bufs(eth_driver);
450
451    /* done */
452    return 0;
453error:
454    if (eth_data != NULL) {
455        free(eth_data);
456    }
457    free_desc_ring(eth_data, &io_ops.dma_manager);
458    return -1;
459}
460
461typedef struct {
462    void *addr;
463    ps_io_ops_t *io_ops;
464    struct eth_driver *eth_driver;
465    int irq_id;
466} callback_args_t;
467
468static int allocate_register_callback(pmem_region_t pmem, unsigned curr_num, size_t num_regs, void *token)
469{
470    if (token == NULL) {
471        ZF_LOGE("Expected a token!");
472        return -EINVAL;
473    }
474
475    callback_args_t *args = token;
476    if (curr_num == 0) {
477        args->addr = ps_pmem_map(args->io_ops, pmem, false, PS_MEM_NORMAL);
478        if (!args->addr) {
479            ZF_LOGE("Failed to map the Ethernet device");
480            return -EIO;
481        }
482    }
483
484    return 0;
485}
486
487static int allocate_irq_callback(ps_irq_t irq, unsigned curr_num, size_t num_irqs, void *token)
488{
489    if (token == NULL) {
490        ZF_LOGE("Expected a token!");
491        return -EINVAL;
492    }
493
494    callback_args_t *args = token;
495    if (curr_num == 0) {
496        args->irq_id = ps_irq_register(&args->io_ops->irq_ops, irq, eth_irq_handle, args->eth_driver);
497        if (args->irq_id < 0) {
498            ZF_LOGE("Failed to register the Ethernet device's IRQ");
499            return -EIO;
500        }
501    }
502
503    return 0;
504}
505
506int ethif_zynq7000_init_module(ps_io_ops_t *io_ops, const char *dev_path)
507{
508    struct arm_eth_plat_config plat_config;
509    struct eth_driver *eth_driver;
510
511    int error = ps_calloc(&io_ops->malloc_ops, 1, sizeof(*eth_driver), (void **) &eth_driver);
512    if (error) {
513        ZF_LOGE("Failed to allocate memory for the Ethernet driver");
514        return -ENOMEM;
515    }
516
517    ps_fdt_cookie_t *cookie = NULL;
518    callback_args_t args = { .io_ops = io_ops, .eth_driver = eth_driver };
519    error = ps_fdt_read_path(&io_ops->io_fdt, &io_ops->malloc_ops, (char *) dev_path, &cookie);
520    if (error) {
521        ZF_LOGE("Failed to read the path of the Ethernet device");
522        return -ENODEV;
523    }
524
525    error = ps_fdt_walk_registers(&io_ops->io_fdt, cookie, allocate_register_callback, &args);
526    if (error) {
527        ZF_LOGE("Failed to walk the Ethernet device's registers and allocate them");
528        return -ENODEV;
529    }
530
531    error = ps_fdt_walk_irqs(&io_ops->io_fdt, cookie, allocate_irq_callback, &args);
532    if (error) {
533        ZF_LOGE("Failed to walk the Ethernet device's IRQs and allocate them");
534        return -ENODEV;
535    }
536
537    error = ps_fdt_cleanup_cookie(&io_ops->malloc_ops, cookie);
538    if (error) {
539        ZF_LOGE("Failed to free the cookie used to allocate resources");
540        return -ENODEV;
541    }
542
543    /* Setup the config and hand initialisation off to the proper
544     * initialisation method */
545    plat_config.buffer_addr = args.addr;
546    plat_config.prom_mode = 1;
547
548    error = ethif_zynq7000_init(eth_driver, *io_ops, &plat_config);
549    if (error) {
550        ZF_LOGE("Failed to initialise the Ethernet driver");
551        return -ENODEV;
552    }
553
554    return ps_interface_register(&io_ops->interface_registration_ops, PS_ETHERNET_INTERFACE, eth_driver, NULL);
555}
556
557static const char *compatible_strings[] = {
558    "cdns,zynq-gem",
559    "cdns,gem",
560    NULL
561};
562PS_DRIVER_MODULE_DEFINE(zynq7000_gem, compatible_strings, ethif_zynq7000_init_module);
563