1/*
2 * Copyright (c) 2014 ETH Zurich.
3 * All rights reserved.
4 *
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
8 */
9
10#include <stdio.h>
11#include <sys/param.h>
12
13#include <barrelfish/barrelfish.h>
14#include <bulk_transfer/bulk_transfer.h>
15
16#include "bulk_net_backend.h"
17#include "bulk_net_transfer.h"
18
19#if BULK_NET_ENABLE_DEBUG_TRANSF
20#define BT_DEBUG_TRACE BULK_NET_TRACE
21#define BT_DEBUG(fmt, msg...) BULK_NET_DEBUG(fmt, msg)
22#else
23#define BT_DEBUG(fmt, msg...) do{}while(0);
24#define BT_DEBUG_TRACE do{}while(0);
25#endif
26
27#if BULK_NET_ENABLE_STATUS_TRANSF
28#define BT_STATUS(fmt, msg...) BULK_NET_STATUS(fmt, msg)
29#else
30#define BT_STATUS(fmt, msg...) do{} while(0);
31#endif
32
33errval_t bulk_net_transfer_bind(struct bulk_net_control *tc,
34                                void (*tx_cb)(struct bulk_e10k *bu,
35                                              void *opaque),
36                                void (*rx_cb)(struct bulk_e10k* bu,
37                                              struct bulk_net_msgdesc *msg))
38{
39    errval_t err;
40
41    err = bulk_net_transfer_init(tc, tx_cb, rx_cb);
42    if (err_is_fail(err)) {
43        return err;
44    }
45
46    err = bulk_e10k_arp_lookup(&tc->transfer, tc->r_ip, &tc->r_mac);
47    if (err_is_fail(err)) {
48        return err;
49    }
50
51    err = bulk_e10k_port_alloc(&tc->transfer, &tc->l_port);
52    if (err_is_fail(err)) {
53        return err;
54    }
55
56    bulk_net_transfer_update_tx_headers(tc);
57    return err;
58}
59
60errval_t bulk_net_transfer_export(struct bulk_net_control *tc,
61                                  void (*tx_cb)(struct bulk_e10k *bu,
62                                                void *opaque),
63                                  void (*rx_cb)(struct bulk_e10k* bu,
64                                                struct bulk_net_msgdesc *msg))
65{
66    errval_t err;
67
68    err = bulk_net_transfer_init(tc, tx_cb, rx_cb);
69    if (err_is_fail(err)) {
70        return err;
71    }
72
73    err = bulk_e10k_port_add(&tc->transfer, tc->l_port);
74    if (err_is_fail(err)) {
75        return err;
76    }
77
78    return err;
79}
80
81errval_t bulk_net_transfer_init(struct bulk_net_control *tc,
82                                void (*tx_cb)(struct bulk_e10k *bu,
83                                              void *opaque),
84                                void (*rx_cb)(struct bulk_e10k* bu,
85                                              struct bulk_net_msgdesc *msg))
86{
87    errval_t err;
88    size_t i;
89    size_t n = BULK_NET_TRANSFER_NUM_DESCS - 1;
90    struct receive_buffer *rb;
91    struct transmit_buffer *tb;
92    void *h_vbase, *i_vbase;
93    uintptr_t h_pbase, i_pbase;
94
95    tc->transfer.opaque = tc;
96
97    err = bulk_e10k_init(&tc->transfer, tc->ws, tc->card, tc->queue,
98                         tc->buffer_size,
99                         BULK_NET_TRANSFER_NUM_DESCS,
100                         rx_cb, tx_cb);
101
102    stack_alloc_init(&tc->rb_stack, n);
103    stack_alloc_init(&tc->tb_stack, n);
104    rb = calloc(n, sizeof(*rb));
105    tc->tb = tb = calloc(n, sizeof(*tb));
106
107    err = allocmap_frame(E10K_HDRSZ * n * 2, &h_vbase, &h_pbase, NULL);
108    assert(err_is_ok(err));
109    err = allocmap_frame(BULK_NET_INTERNAL_BUFER_SIZE * n, &i_vbase, &i_pbase, NULL);
110    assert(err_is_ok(err));
111
112    for (i = 0; i < n; i++) {
113        rb[i].hdr_virt = h_vbase;
114        rb[i].hdr_phys = h_pbase;
115        h_pbase += E10K_HDRSZ;
116        h_vbase = (void *) ((uintptr_t) h_vbase + E10K_HDRSZ);
117
118        tb[i].hdr_virt = h_vbase;
119        tb[i].hdr_phys = h_pbase;
120        tb[i].int_virt = i_vbase;
121        tb[i].int_phys = i_pbase;
122        h_pbase += E10K_HDRSZ;
123        h_vbase = (void *) ((uintptr_t) h_vbase + E10K_HDRSZ);
124        i_pbase += BULK_NET_INTERNAL_BUFER_SIZE;
125        i_vbase = (void *) ((uintptr_t) i_vbase + BULK_NET_INTERNAL_BUFER_SIZE);
126
127        stack_alloc_free(&tc->rb_stack, rb + i);
128        stack_alloc_free(&tc->tb_stack, tb + i);
129    }
130
131    for (uint32_t j=0; j < tc->buffer_count; ++j) {
132        rb = stack_alloc_alloc(&tc->rb_stack);
133        rb->buffer = NULL;
134        err = allocmap_frame(tc->buffer_size, &rb->virt, &rb->phys, NULL);
135        assert(err_is_ok(err));
136
137        err = bulk_e10k_rx_add(&tc->transfer, rb->phys, rb->hdr_phys, rb);
138        assert(err_is_ok(err));
139    }
140    tc->l_mac = tc->transfer.mac;
141    bulk_e10k_ip_info(&tc->transfer, &tc->l_ip);
142    return err;
143}
144
145void bulk_net_transfer_update_tx_headers(struct bulk_net_control *p)
146{
147    size_t i;
148    struct packet_header *hdr;
149
150    BT_DEBUG("Updating TX headers %"PRIx64"  %"PRIx64"   frst=%"PRIx64"\n",
151                 p->r_mac, p->l_mac,
152                 p->tb[BULK_NET_TRANSFER_NUM_DESCS - 2].hdr_phys);
153
154    for (i = 0; i < BULK_NET_TRANSFER_NUM_DESCS - 1; i++) {
155        hdr = p->tb[i].hdr_virt;
156        memset(hdr, 0, sizeof(*hdr));
157        memcpy(hdr->l2.dmac, &p->r_mac, 6);
158        memcpy(hdr->l2.smac, &p->l_mac, 6);
159        hdr->l2.type = htons(0x0800);
160
161        hdr->l3.ver_ihl = 5 | (4 << 4);
162        hdr->l3.ttl = 64;
163        hdr->l3.proto = 0x11;
164        hdr->l3.s_ip = htonl(p->l_ip);
165        hdr->l3.d_ip = htonl(p->r_ip);
166
167        hdr->l4.s_port = htons(p->l_port);
168        hdr->l4.d_port = htons(p->r_port);
169    }
170}
171
172void bulk_net_transfer_add_header(struct bulk_net_msgdesc *msg)
173{
174    struct transmit_buffer *tb = msg->parts[1].opaque;
175    struct packet_header *h = tb->hdr_virt;
176    size_t i;
177    size_t len = 0;
178
179    for (i = 1; i < BULK_NET_DESCLEN && msg->parts[i].size != 0; i++) {
180        len += msg->parts[i].size;
181    }
182
183    msg->parts[0].phys = tb->hdr_phys;
184    msg->parts[0].size = sizeof(*h);
185    msg->parts[0].opaque = NULL;
186
187    h->l4.len = htons(len + 8);
188    h->l3.len = htons(len + 8 + 20);
189}
190
191void bulk_net_transfer_strip_padding(struct bulk_net_msgdesc *msg)
192{
193    struct receive_buffer *rb = msg->parts[0].opaque;
194    struct packet_header *h = rb->hdr_virt;
195    size_t len = ntohs(h->l4.len) - 8;
196    size_t i;
197
198    for (i = 1; i < BULK_NET_DESCLEN && msg->parts[i].size != 0; i++) {
199        msg->parts[i].size = MIN(msg->parts[i].size, len);
200        len -= msg->parts[i].size;
201    }
202}
203
204void bulk_net_transfer_free_rb(struct bulk_net_control *tc,
205                               struct receive_buffer *rb)
206{
207    if (rb->buffer == NULL) {
208        // Temporary initialization buffer -> do not reenqueue after
209        // initialization is done
210        if (false) {
211            // TODO: free, currently leaking here
212            stack_alloc_free(&tc->rb_stack, rb);
213            return;
214        }
215    }
216
217    bulk_e10k_rx_add(&tc->transfer, rb->phys, rb->hdr_phys, rb);
218}
219
220void bulk_net_transfer_free_rx(struct bulk_net_control *tc,
221                               struct bulk_net_msgdesc *msg)
222{
223    size_t i;
224
225    for (i = 1; i < BULK_NET_DESCLEN && msg->parts[i].size != 0; i++) {
226        bulk_net_transfer_free_rb(tc, msg->parts[i].opaque);
227    }
228}
229