1/*
2 * Copyright 2017, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the GNU General Public License version 2. Note that NO WARRANTY is provided.
8 * See "LICENSE_GPLv2.txt" for details.
9 *
10 * @TAG(DATA61_GPL)
11 */
12
13#include <autoconf.h>
14#include <ethdrivers/gen_config.h>
15#include <lwip/gen_config.h>
16
17#ifdef CONFIG_LIB_LWIP
18
19#include <ethdrivers/lwip.h>
20#include <ethdrivers/helpers.h>
21#include <string.h>
22#include <lwip/netif.h>
23#include <netif/etharp.h>
24#include <lwip/stats.h>
25#include <lwip/snmp.h>
26#include "debug.h"
27
28static void initialize_free_bufs(lwip_iface_t *iface)
29{
30    dma_addr_t *dma_bufs = NULL;
31    dma_bufs = malloc(sizeof(dma_addr_t) * CONFIG_LIB_ETHDRIVER_NUM_PREALLOCATED_BUFFERS);
32    if (!dma_bufs) {
33        goto error;
34    }
35    memset(dma_bufs, 0, sizeof(dma_addr_t) * CONFIG_LIB_ETHDRIVER_NUM_PREALLOCATED_BUFFERS);
36    iface->bufs = malloc(sizeof(dma_addr_t *) * CONFIG_LIB_ETHDRIVER_NUM_PREALLOCATED_BUFFERS);
37    if (!iface->bufs) {
38        goto error;
39    }
40    for (int i = 0; i < CONFIG_LIB_ETHDRIVER_NUM_PREALLOCATED_BUFFERS; i++) {
41        dma_bufs[i] = dma_alloc_pin(&iface->dma_man, CONFIG_LIB_ETHDRIVER_PREALLOCATED_BUF_SIZE, 1,
42                                    iface->driver.dma_alignment);
43        if (!dma_bufs[i].phys) {
44            goto error;
45        }
46        ps_dma_cache_clean_invalidate(&iface->dma_man, dma_bufs[i].virt, CONFIG_LIB_ETHDRIVER_PREALLOCATED_BUF_SIZE);
47        iface->bufs[i] = &dma_bufs[i];
48    }
49    iface->num_free_bufs = CONFIG_LIB_ETHDRIVER_NUM_PREALLOCATED_BUFFERS;
50    return;
51error:
52    if (iface->bufs) {
53        free(iface->bufs);
54    }
55    if (dma_bufs) {
56        for (int i = 0; i < CONFIG_LIB_ETHDRIVER_NUM_PREALLOCATED_BUFFERS; i++) {
57            if (dma_bufs[i].virt) {
58                dma_unpin_free(&iface->dma_man, dma_bufs[i].virt, CONFIG_LIB_ETHDRIVER_PREALLOCATED_BUF_SIZE);
59            }
60        }
61        free(dma_bufs);
62    }
63    iface->bufs = NULL;
64}
65
66static uintptr_t lwip_allocate_rx_buf(void *iface, size_t buf_size, void **cookie)
67{
68    lwip_iface_t *lwip_iface = (lwip_iface_t *)iface;
69    if (buf_size > CONFIG_LIB_ETHDRIVER_PREALLOCATED_BUF_SIZE) {
70        LOG_ERROR("Requested RX buffer of size %zu which can never be fullfilled by preallocated buffers of size %d", buf_size,
71                  CONFIG_LIB_ETHDRIVER_PREALLOCATED_BUF_SIZE);
72        return 0;
73    }
74    if (lwip_iface->num_free_bufs == 0) {
75        if (!lwip_iface->bufs) {
76            initialize_free_bufs(lwip_iface);
77            if (!lwip_iface->bufs) {
78                LOG_ERROR("Failed lazy initialization of preallocated free buffers");
79                return 0;
80            }
81        } else {
82            return 0;
83        }
84    }
85    lwip_iface->num_free_bufs--;
86    dma_addr_t *buf = lwip_iface->bufs[lwip_iface->num_free_bufs];
87    ps_dma_cache_invalidate(&lwip_iface->dma_man, buf->virt, buf_size);
88    *cookie = (void *)buf;
89    return buf->phys;
90}
91
92static void lwip_tx_complete(void *iface, void *cookie)
93{
94    lwip_iface_t *lwip_iface = (lwip_iface_t *)iface;
95    lwip_iface->bufs[lwip_iface->num_free_bufs] = cookie;
96    lwip_iface->num_free_bufs++;
97}
98
99static void lwip_rx_complete(void *iface, unsigned int num_bufs, void **cookies, unsigned int *lens)
100{
101    struct pbuf *p;
102    int len;
103    lwip_iface_t *lwip_iface = (lwip_iface_t *)iface;
104    int i;
105    len = 0;
106    for (i = 0; i < num_bufs; i++) {
107        ps_dma_cache_invalidate(&lwip_iface->dma_man, ((dma_addr_t *)cookies[i])->virt, lens[i]);
108        len += lens[i];
109    }
110#if ETH_PAD_SIZE
111    len += ETH_PAD_SIZE; /* allow room for Ethernet padding */
112#endif
113    /* Get a buffer from the pool */
114    p = pbuf_alloc(PBUF_RAW, len, PBUF_POOL);
115    if (p == NULL) {
116        for (i = 0; i < num_bufs; i++) {
117            lwip_tx_complete(iface, cookies[i]);
118        }
119        return;
120    }
121
122#if ETH_PAD_SIZE
123    pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */
124    len -= ETH_PAD_SIZE;
125#endif
126
127    /* fill the pbuf chain */
128    struct pbuf *q = p;
129    unsigned int copied = 0;
130    unsigned int buf = 0;
131    unsigned int buf_done = 0;
132    unsigned int pbuf_done = 0;
133    while (copied < len) {
134        unsigned int next = MIN(q->len - pbuf_done, lens[buf] - buf_done);
135        memcpy(q->payload + pbuf_done, ((dma_addr_t *)cookies[buf])->virt + buf_done, next);
136        buf_done += next;
137        pbuf_done += next;
138        copied += next;
139        if (buf_done == lens[buf]) {
140            buf++;
141            buf_done = 0;
142        }
143        if (pbuf_done == q->len) {
144            q = q->next;
145            pbuf_done = 0;
146        }
147    }
148
149//    PKT_DEBUG(printf("Receiving packet\n"));
150//    PKT_DEBUG(print_packet(COL_RX, p->payload, len));
151
152#if ETH_PAD_SIZE
153    pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */
154#endif
155    LINK_STATS_INC(link.recv);
156
157    for (i = 0; i < num_bufs; i++) {
158        lwip_tx_complete(iface, cookies[i]);
159    }
160
161    struct eth_hdr *ethhdr;
162    ethhdr = p->payload;
163
164    switch (htons(ethhdr->type)) {
165    /* IP or ARP packet? */
166    case ETHTYPE_IP:
167    case ETHTYPE_ARP:
168#if PPPOE_SUPPORT
169    /* PPPoE packet? */
170    case ETHTYPE_PPPOEDISC:
171    case ETHTYPE_PPPOE:
172#endif /* PPPOE_SUPPORT */
173        /* full packet send to tcpip_thread to process */
174        if (lwip_iface->netif->input(p, lwip_iface->netif) != ERR_OK) {
175            LWIP_DEBUGF(NETIF_DEBUG, ("ethernetif_input: IP input error\n"));
176            pbuf_free(p);
177            p = NULL;
178        }
179        break;
180
181    default:
182        pbuf_free(p);
183        break;
184    }
185}
186
187static err_t ethif_link_output(struct netif *netif, struct pbuf *p)
188{
189    lwip_iface_t *iface = (lwip_iface_t *)netif->state;
190    dma_addr_t buf;
191    struct pbuf *q;
192    int status;
193
194#if ETH_PAD_SIZE
195    pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */
196#endif
197
198    if (p->tot_len > CONFIG_LIB_ETHDRIVER_PREALLOCATED_BUF_SIZE) {
199        return ERR_MEM;
200    }
201    if (iface->num_free_bufs == 0) {
202        return ERR_MEM;
203    }
204    iface->num_free_bufs--;
205    dma_addr_t *orig_buf = iface->bufs[iface->num_free_bufs];
206    buf = *orig_buf;
207
208    char *pkt_pos = (char *)buf.virt;
209    for (q = p; q != NULL; q = q->next) {
210        memcpy(pkt_pos, q->payload, q->len);
211        pkt_pos += q->len;
212    }
213    ps_dma_cache_clean(&iface->dma_man, buf.virt, p->tot_len);
214//    PKT_DEBUG(cprintf(COL_TX, "Sending packet"));
215//    PKT_DEBUG(print_packet(COL_TX, (void*)buf.virt, p->tot_len));
216
217#if ETH_PAD_SIZE
218    pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */
219#endif
220
221    unsigned int length = p->tot_len;
222    status = iface->driver.i_fn.raw_tx(&iface->driver, 1, &buf.phys, &length, orig_buf);
223    switch (status) {
224    case ETHIF_TX_FAILED:
225        lwip_tx_complete(iface, orig_buf);
226        return ERR_WOULDBLOCK;
227    case ETHIF_TX_COMPLETE:
228        lwip_tx_complete(iface, orig_buf);
229    case ETHIF_TX_ENQUEUED:
230        break;
231    }
232
233    LINK_STATS_INC(link.xmit);
234
235    return ERR_OK;
236}
237
238static uintptr_t lwip_pbuf_allocate_rx_buf(void *iface, size_t buf_size, void **cookie)
239{
240    lwip_iface_t *lwip_iface = (lwip_iface_t *)iface;
241#if ETH_PAD_SIZE
242    buf_size += ETH_PAD_SIZE; /* allow room for Ethernet padding */
243#endif
244    /* add space for alignment */
245    buf_size += lwip_iface->driver.dma_alignment;
246    struct pbuf *p = pbuf_alloc(PBUF_RAW, buf_size, PBUF_RAM);
247    if (!p) {
248        return 0;
249    }
250    /* we cannot support chained pbufs when doing this */
251    if (p->next) {
252        pbuf_free(p);
253        return 0;
254    }
255#if ETH_PAD_SIZE
256    pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */
257#endif
258    uintptr_t new_payload = (uintptr_t)p->payload;
259    /* round up to dma_alignment */
260    new_payload = ROUND_UP(new_payload, lwip_iface->driver.dma_alignment);
261    pbuf_header(p, -(new_payload - (uintptr_t)p->payload));
262    uintptr_t phys = ps_dma_pin(&lwip_iface->dma_man, p->payload, buf_size);
263    if (!phys) {
264        pbuf_free(p);
265        return 0;
266    }
267    ps_dma_cache_invalidate(&lwip_iface->dma_man, p->payload, buf_size);
268    *cookie = p;
269    return phys;
270}
271
272static void lwip_pbuf_tx_complete(void *iface, void *cookie)
273{
274    lwip_iface_t *lwip_iface = (lwip_iface_t *)iface;
275    struct pbuf *p = (struct pbuf *)cookie;
276    for (; p; p = p->next) {
277        uintptr_t loc = (uintptr_t)p->payload;
278        uintptr_t end = (uintptr_t)p->payload + p->len;
279        while (loc < end) {
280            uintptr_t next = ROUND_UP(loc + 1, PAGE_SIZE_4K);
281            if (next > end) {
282                next = end;
283            }
284            ps_dma_unpin(&lwip_iface->dma_man, (void *)loc, next - loc);
285            loc = next;
286        }
287    }
288    pbuf_free(cookie);
289}
290
291static void lwip_pbuf_rx_complete(void *iface, unsigned int num_bufs, void **cookies, unsigned int *lens)
292{
293    struct pbuf *p = NULL;
294    int i;
295    lwip_iface_t *lwip_iface = (lwip_iface_t *)iface;
296
297    assert(num_bufs > 0);
298    /* staple all the bufs together, do it in reverse order for efficiency
299     * of traversing pbuf chains */
300    for (i = num_bufs - 1; i >= 0; i--) {
301        struct pbuf *q = (struct pbuf *)cookies[i];
302        ps_dma_cache_invalidate(&lwip_iface->dma_man, q->payload, lens[i]);
303        pbuf_realloc(q, lens[i]);
304        if (p) {
305            pbuf_cat(q, p);
306        }
307        p = q;
308    }
309
310#if ETH_PAD_SIZE
311    pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */
312#endif
313
314    LINK_STATS_INC(link.recv);
315
316    struct eth_hdr *ethhdr;
317    ethhdr = p->payload;
318
319    switch (htons(ethhdr->type)) {
320    /* IP or ARP packet? */
321    case ETHTYPE_IP:
322    case ETHTYPE_ARP:
323#if PPPOE_SUPPORT
324    /* PPPoE packet? */
325    case ETHTYPE_PPPOEDISC:
326    case ETHTYPE_PPPOE:
327#endif /* PPPOE_SUPPORT */
328        /* full packet send to tcpip_thread to process */
329        if (lwip_iface->netif->input(p, lwip_iface->netif) != ERR_OK) {
330            LWIP_DEBUGF(NETIF_DEBUG, ("ethernetif_input: IP input error\n"));
331            LOG_INFO("failed to input\n");
332            pbuf_free(p);
333        }
334        break;
335
336    default:
337        pbuf_free(p);
338        break;
339    }
340}
341
342static err_t ethif_pbuf_link_output(struct netif *netif, struct pbuf *p)
343{
344    lwip_iface_t *iface = (lwip_iface_t *)netif->state;
345    struct pbuf *q;
346    int status;
347
348    /* grab a reference to the pbuf */
349    pbuf_ref(p);
350
351#if ETH_PAD_SIZE
352    pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */
353#endif
354    int max_frames = 0;
355
356    /* work out how many pieces this buffer could potentially take up */
357    for (q = p; q; q = q->next) {
358        uintptr_t base = PAGE_ALIGN_4K((uintptr_t)q->payload);
359        uintptr_t top = PAGE_ALIGN_4K((uintptr_t)q->payload + q->len - 1);
360        max_frames += ((top - base) / PAGE_SIZE_4K) + 1;
361    }
362    int num_frames = 0;
363    unsigned int lengths[max_frames];
364    uintptr_t phys[max_frames];
365    for (q = p; q; q = q->next) {
366        uintptr_t loc = (uintptr_t)q->payload;
367        uintptr_t end = (uintptr_t)q->payload + q->len;
368        while (loc < end) {
369            uintptr_t next = ROUND_UP(loc + 1, PAGE_SIZE_4K);
370            if (next > end) {
371                next = end;
372            }
373            lengths[num_frames] = next - loc;
374            phys[num_frames] = ps_dma_pin(&iface->dma_man, (void *)loc, lengths[num_frames]);
375            ps_dma_cache_clean(&iface->dma_man, (void *)loc, lengths[num_frames]);
376            assert(phys[num_frames]);
377            num_frames++;
378            loc = next;
379        }
380    }
381
382#if ETH_PAD_SIZE
383    pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */
384#endif
385
386    status = iface->driver.i_fn.raw_tx(&iface->driver, num_frames, phys, lengths, p);
387    switch (status) {
388    case ETHIF_TX_FAILED:
389        lwip_pbuf_tx_complete(iface, p);
390        return ERR_WOULDBLOCK;
391    case ETHIF_TX_COMPLETE:
392        lwip_pbuf_tx_complete(iface, p);
393    case ETHIF_TX_ENQUEUED:
394        break;
395    }
396
397    LINK_STATS_INC(link.xmit);
398
399    return ERR_OK;
400}
401
402static struct raw_iface_callbacks lwip_prealloc_callbacks = {
403    .tx_complete = lwip_tx_complete,
404    .rx_complete = lwip_rx_complete,
405    .allocate_rx_buf = lwip_allocate_rx_buf
406};
407
408static struct raw_iface_callbacks lwip_pbuf_callbacks = {
409    .tx_complete = lwip_pbuf_tx_complete,
410    .rx_complete = lwip_pbuf_rx_complete,
411    .allocate_rx_buf = lwip_pbuf_allocate_rx_buf
412};
413
414static err_t ethif_init(struct netif *netif)
415{
416    if (netif -> state == NULL) {
417        return ERR_ARG;
418    }
419
420    lwip_iface_t *iface = (lwip_iface_t *)netif->state;
421    int mtu;
422    iface->driver.i_fn.low_level_init(&iface->driver, netif->hwaddr, &mtu);
423    netif->mtu = mtu;
424
425    netif->hwaddr_len = ETHARP_HWADDR_LEN;
426    netif->output = etharp_output;
427    if (iface->bufs == NULL) {
428        netif->linkoutput = ethif_pbuf_link_output;
429    } else {
430        netif->linkoutput = ethif_link_output;
431    }
432
433    NETIF_INIT_SNMP(netif, snmp_ifType_ethernet_csmacd,
434                    LINK_SPEED_OF_YOUR_NETIF_IN_BPS);
435
436    netif -> flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP |
437                     NETIF_FLAG_LINK_UP | NETIF_FLAG_IGMP;
438
439    iface->netif = netif;
440    return ERR_OK;
441}
442
443lwip_iface_t *ethif_new_lwip_driver_no_malloc(ps_io_ops_t io_ops, ps_dma_man_t *pbuf_dma, ethif_driver_init driver,
444                                              void *driver_config, lwip_iface_t *iface)
445{
446    memset(iface, 0, sizeof(*iface));
447    iface->driver.cb_cookie = iface;
448    if (pbuf_dma) {
449        iface->driver.i_cb = lwip_pbuf_callbacks;
450        iface->dma_man = *pbuf_dma;
451    } else {
452        iface->driver.i_cb = lwip_prealloc_callbacks;
453        iface->dma_man = io_ops.dma_manager;
454    }
455    int err;
456    err = driver(&iface->driver, io_ops, driver_config);
457    if (err) {
458        goto error;
459    }
460    /* if the driver did not already cause it to happen, allocate the preallocated buffers */
461    if (!pbuf_dma && !iface->bufs) {
462        initialize_free_bufs(iface);
463        if (iface->bufs == NULL) {
464            LOG_ERROR("Fault preallocating bufs");
465            goto error;
466        }
467    }
468    iface->ethif_init = ethif_init;
469    return iface;
470error:
471    return NULL;
472}
473
474lwip_iface_t *ethif_new_lwip_driver(ps_io_ops_t io_ops, ps_dma_man_t *pbuf_dma, ethif_driver_init driver,
475                                    void *driver_config)
476{
477    lwip_iface_t *ret;
478    lwip_iface_t *iface = malloc(sizeof(*iface));
479    if (!iface) {
480        LOG_ERROR("Failed to malloc");
481        return NULL;
482    }
483    ret = ethif_new_lwip_driver_no_malloc(io_ops, pbuf_dma, driver, driver_config, iface);
484    if (!ret) {
485        free(iface);
486    }
487    return ret;
488}
489
490#endif /* CONFIG_LIB_LWIP */
491