1/**
2 * \file
3 * \brief Network server thread of the bulk server
4 */
5
6/*
7 * Copyright (c) 2014 ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#include <barrelfish/barrelfish.h>
16#include <barrelfish/waitset.h>
17
18#include <bulk_transfer/bulk_transfer.h>
19#include <bulk_transfer/bulk_net.h>
20
21#include <lwip/tcp.h>
22#include <lwip/init.h>
23#include <arpa/inet.h>
24
25#include "block_server.h"
26#include "network_common.h"
27#include "network_server.h"
28#include "block_storage.h"
29
30#if BULK_NET_BACKEND_PROXY
31#include <bulk_transfer/bulk_allocator.h>
32#include <bulk_transfer/bulk_net_proxy.h>
33#include <bulk_transfer/bulk_local.h>
34
35static struct bulk_allocator allocator_tx;
36static struct bulk_allocator allocator_rx;
37
38#endif
39
40static errval_t bulk_pool_assigned_cb(struct bulk_channel *channel,
41                                      struct bulk_pool *pool);
42static errval_t bulk_pool_removed_cb(struct bulk_channel *channel,
43                                     struct bulk_pool *pool);
44static void bulk_move_received(struct bulk_channel *channel,
45                               struct bulk_buffer *buffer,
46                               void *meta);
47static void bulk_buffer_received(struct bulk_channel *channel,
48                                 struct bulk_buffer *buffer,
49                                 void *meta);
50static void bulk_copy_received(struct bulk_channel *channel,
51                               struct bulk_buffer *buffer,
52                               void *meta);
53static void bulk_copy_released(struct bulk_channel *channel,
54                               struct bulk_buffer *buffer);
55static errval_t bulk_bind_received(struct bulk_channel *channel);
56
57static struct bulk_channel_callbacks bulk_rx_cb = {
58    .bind_received = bulk_bind_received,
59    .pool_assigned = bulk_pool_assigned_cb,
60    .pool_removed = bulk_pool_removed_cb,
61    .move_received = bulk_move_received,
62    .copy_received = bulk_copy_received };
63
64static struct bulk_channel_callbacks bulk_tx_cb = {
65    .bind_received = bulk_bind_received,
66    .pool_assigned = bulk_pool_assigned_cb,
67    .pool_removed = bulk_pool_removed_cb,
68    .buffer_received = bulk_buffer_received,
69    .copy_released = bulk_copy_released };
70
71static void block_send_status_msg(struct block_net_service *c,
72                                  enum block_net_msg_type req,
73                                  uint32_t reqid,
74                                  enum block_net_err stats)
75{
76
77    debug_printf("Sending status message\n");
78
79    uint16_t size = sizeof(struct block_net_msg);
80    struct block_net_msg *msg = malloc(size);
81    assert(msg);
82
83    msg->size = size;
84    msg->type = BLOCK_NET_MSG_STATUS;
85    msg->msg.status.code = stats;
86    msg->msg.status.req = req;
87    msg->msg.status.reqid = reqid;
88    err_t err;
89
90    err = tcp_write(c->tpcb, msg, size, TCP_WRITE_FLAG_COPY);
91    if (err != ERR_OK) {
92        /* TODO error handling */
93        debug_printf("ERROR: tcp_write returned with error %i\n", err);
94    }
95
96    err = tcp_output(c->tpcb);
97    if (err != ERR_OK) {
98        /* TODO: Error handling */
99        debug_printf("ERROR: tcp_output returned with error %i\n", err);
100    }
101
102    /* XXX: assume that the data has already been copied */
103    free(msg);
104}
105
106#if !BULK_NET_BACKEND_PROXY
107/**
108 *
109 */
110static void chan_bind_cb(void *arg, errval_t err, struct bulk_channel *channel)
111{
112    BS_NET_DEBUG_BULK("chan=%p, sucecss=%i", channel, (err == SYS_ERR_OK));
113
114    struct block_net_service *c = (struct block_net_service *) arg;
115
116    if (err_is_fail(err)) {
117        /* there is some thing wrong */
118
119    }
120    c->bound++;
121
122    if (c->bound != 2) {
123        return;
124    }
125    /* both channels are bound we can send a reply back */
126    block_send_status_msg(c, BLOCK_NET_MSG_INIT, 0, SYS_ERR_OK);
127}
128#endif
129
130static errval_t bulk_pool_assigned_cb(struct bulk_channel *channel,
131                                      struct bulk_pool *pool)
132{
133    BS_NET_DEBUG_BULK("chan=%p, pool=%p", channel, pool);
134
135    /* buffers must hold a whole block a once */
136    if (pool->buffer_size < block_storage_get_block_size()) {
137        return BULK_TRANSFER_ALLOC_BUFFER_SIZE;
138    }
139
140    return SYS_ERR_OK;
141}
142
143static errval_t bulk_pool_removed_cb(struct bulk_channel *channel,
144                                     struct bulk_pool *pool)
145{
146    BS_NET_DEBUG_BULK("chan=%p, pool=%p", channel, pool);
147    return SYS_ERR_OK;
148}
149
150static errval_t bulk_bind_received(struct bulk_channel *channel)
151{
152    debug_printf("APP: bind received");
153    return SYS_ERR_OK;
154}
155
156static void bulk_move_received(struct bulk_channel *channel,
157                               struct bulk_buffer *buffer,
158                               void *meta)
159{
160    BS_NET_DEBUG_BULK("chan=%p, buf=%p", channel, buffer);
161
162    struct block_net_service*c = channel->user_state;
163
164    errval_t err;
165
166    struct bs_meta_data *bs_meta = (struct bs_meta_data*) meta;
167    err = block_storage_write(bs_meta->block_id, buffer->address);
168    if (err_is_fail(err)) {
169        block_send_status_msg(c, BLOCK_NET_MSG_WRITE, bs_meta->req_id, err);
170        debug_printf("Failed to update the block!");
171    }
172    err = bulk_channel_pass(channel, buffer, meta, BULK_CONT_NOP);
173    if (err_is_fail(err)) {
174        debug_printf("could not pass back the buffer");
175    }
176#if !BLOCK_BENCH_ENABLE
177    block_send_status_msg(c, BLOCK_NET_MSG_WRITE, bs_meta->req_id, SYS_ERR_OK);
178#endif
179}
180
181static void bulk_buffer_received(struct bulk_channel *channel,
182                                 struct bulk_buffer *buffer,
183                                 void *meta)
184{
185    BS_NET_DEBUG_BULK("chan=%p, buf=%p", channel, buffer);
186    /* add the buffer to the list */
187#if BULK_NET_BACKEND_PROXY
188    errval_t err = bulk_alloc_return_buffer(&allocator_tx, buffer);
189    if (err_is_fail(err)) {
190        debug_printf("ERROR: failed to return buffer");
191    }
192#else
193    block_server_insert_buffer(&bs_bulk_buffers, buffer, channel);
194#endif
195}
196
197/**
198 * callback when a buffer arrives on the bulk channel. this triggers the update
199 * of the block in the block store
200 */
201static void bulk_copy_received(struct bulk_channel *channel,
202                               struct bulk_buffer *buffer,
203                               void *meta)
204{
205    BS_NET_DEBUG_BULK("chan=%p, buf=%p", channel, buffer);
206
207    errval_t err;
208
209    struct bs_meta_data *bs_meta = (struct bs_meta_data*) meta;
210    err = block_storage_write(bs_meta->block_id, buffer->address);
211    if (err_is_fail(err)) {
212        debug_printf("Failed to update the block!");
213    }
214    err = bulk_channel_release(channel, buffer, BULK_CONT_NOP);
215    if (err_is_fail(err)) {
216        debug_printf("could not pass back the buffer");
217    }
218}
219
220static void bulk_copy_released(struct bulk_channel *channel,
221                               struct bulk_buffer *buffer)
222{
223    BS_NET_DEBUG_BULK("chan=%p, buf=%p", channel, buffer);
224
225    assert(!"this shoud not happen... ");
226}
227
228/**
229 * \brief handles block read request of a connected client
230 */
231static err_t handle_block_read(struct block_net_service*c,
232                               struct tcp_pcb *tpcb,
233                               size_t start_block,
234                               size_t count,
235                               uint32_t reqid,
236                               struct bulk_continuation cont)
237{
238    BS_NET_DEBUG_TRACE
239
240    errval_t err;
241
242    struct bulk_channel *chan = &c->tx_chan;
243
244    struct bs_meta_data *meta_data = malloc(
245                    count * sizeof(struct bs_meta_data));
246    assert(meta_data);
247
248    for (uint32_t i = 0; i < count; ++i) {
249        /* TODO: specify a pool */
250#if BULK_NET_BACKEND_PROXY
251        struct bulk_buffer *buf = bulk_alloc_new_buffer(&allocator_tx);
252#else
253        struct bulk_buffer *buf = block_server_get_buffer(&bs_bulk_buffers,
254                        &c->tx_chan);
255#endif
256        if (!buf) {
257            debug_printf("ERROR: Has no buffers left...(i=%i)\n", i);
258            block_send_status_msg(c, BLOCK_NET_MSG_READ, reqid,
259                                  BLOCK_NET_ERR_NO_BUFS);
260            free(meta_data);
261            return ERR_BUF;
262        }
263
264        err = block_storage_read(start_block + i, buf->address);
265        if (err_is_fail(err)) {
266            debug_printf("ERROR: block id is out of range: %i",
267                         (uint32_t) (start_block + count));
268            block_send_status_msg(c, BLOCK_NET_MSG_READ, reqid,
269                                  BLOCK_NET_ERR_BLOCK_ID);
270        }
271        meta_data[i].block_id = start_block + i;
272        meta_data[i].req_id = reqid;
273        meta_data[i].cont = cont;
274        BS_NET_DEBUG_BULK("bulk_channel_move: chan=%p, buf=%p\n", chan, buf);
275        err = bulk_channel_move(chan, buf, meta_data + i, BULK_CONT_NOP);
276        if (err_is_fail(err)) {
277            block_send_status_msg(c, BLOCK_NET_MSG_READ, reqid, err);
278            debug_printf("channel move failed");
279        }
280    }
281
282    /* XXX: assume that the meta data has been copied... */
283    free(meta_data);
284
285    return ERR_OK;
286}
287
288#if BULK_NET_BACKEND_PROXY
289
290static uint32_t pools_assigned = 0;
291static uint32_t tx_bufs_other_side = 0;
292
293static volatile uint32_t wait_cond = 0;
294
295static inline void wait_for_condition(void)
296{
297    while (wait_cond) {
298        messages_wait_and_handle_next();
299    }
300}
301
302static void pool_assigned_cb(void *arg,
303                             errval_t err,
304                             struct bulk_channel *channel)
305{
306    debug_printf("pool_assigned_cb: sucess=%i\n", (err == SYS_ERR_OK));
307    pools_assigned++;
308
309    if (pools_assigned == 2) {
310        wait_cond = 0;
311    }
312}
313
314static void buffer_passed_cb(void *arg,
315                             errval_t err,
316                             struct bulk_channel *channel)
317{
318    tx_bufs_other_side++;
319    if (tx_bufs_other_side == BULK_NET_PROXY_NUMBUFS) {
320        wait_cond = 0;
321    }
322}
323
324static void net_proxy_connected_cb(struct bulk_net_proxy *proxy)
325{
326    errval_t err;
327
328    struct block_net_service *c = (struct block_net_service *) proxy->user_state;
329    c->bound++;
330
331    BS_NET_DEBUG_BULK(" > Channel Connected. #conn=%i", c->bound);
332
333    if (c->bound != 2) {
334        return;
335    }
336
337    BS_NET_DEBUG_BULK("%s", "all channels connected. allocating pools\n");
338    err = bulk_alloc_init(&allocator_tx, BULK_NET_PROXY_NUMBUFS,
339                          BULK_NET_PROXY_BUFSIZE, NULL);
340    if (err_is_fail(err)) {
341        USER_PANIC_ERR(err, "Failed to allocate pool\n");
342    }
343
344    err = bulk_alloc_init(&allocator_rx, BULK_NET_PROXY_NUMBUFS,
345                          BULK_NET_PROXY_BUFSIZE, NULL);
346    if (err_is_fail(err)) {
347        USER_PANIC_ERR(err, "Failed to allocate pool");
348    }
349    struct bulk_continuation cont = {
350        .handler = pool_assigned_cb,
351        .arg = NULL, };
352
353    BS_NET_DEBUG_BULK("%s", " > Assigning pools to channels");
354    err = bulk_channel_assign_pool(&c->rx_chan, allocator_rx.pool, cont);
355    if (err_is_fail(err)) {
356        USER_PANIC_ERR(err, "failed to assign pool to channel");
357    }
358    err = bulk_channel_assign_pool(&c->tx_chan, allocator_tx.pool, cont);
359    if (err_is_fail(err)) {
360        USER_PANIC_ERR(err, "Failed to assign pool to channel");
361    }
362
363    wait_cond = 1;
364    wait_for_condition();
365
366    BS_NET_DEBUG_BULK("%s", "Passing buffer to RX channel");
367    cont.handler = buffer_passed_cb;
368    struct bulk_buffer *buf = bulk_alloc_new_buffer(&allocator_rx);
369    wait_cond = 1;
370    while (buf) {
371        err = bulk_channel_pass(&c->rx_chan, buf, NULL, cont);
372        if (err_is_fail(err)) {
373            USER_PANIC_ERR(err, "failed to pass the buffer");
374        }
375        buf = bulk_alloc_new_buffer(&allocator_rx);
376        thread_yield();
377    }
378
379    wait_for_condition();
380
381    BS_NET_DEBUG_BULK("%s", "All buffers passed.");
382
383    block_send_status_msg(c, BLOCK_NET_MSG_INIT, 0, BLOCK_NET_ERR_OK);
384
385    BS_NET_DEBUG_BULK("%s", "Initialization done.\n\n\n");
386}
387#endif
388
389/**
390 * \brief handles the initialization of a new bulk channel
391 */
392static err_t handle_init(struct block_net_service *c,
393                         struct tcp_pcb *tpcb,
394                         struct bulk_net_endpoint_descriptor* rx_ep,
395                         struct bulk_net_endpoint_descriptor* tx_ep)
396{
397    BS_NET_DEBUG_TRACE
398
399    errval_t err;
400
401    if (c->rx_chan.state != BULK_STATE_UNINITIALIZED
402
403    || c->tx_chan.state != BULK_STATE_UNINITIALIZED) {
404        /* this is an error, already initialized */
405        debug_printf("Notice: channels already initialized.\n");
406        return ERR_OK;
407    }
408    c->bound = 0;
409#if BULK_NET_BACKEND_PROXY
410    BS_NET_DEBUG_NET("%s", "creating bulk net proxy channel");
411
412    debug_printf("initializing endpoints\n");
413    bulk_local_init_endpoint(&c->rx_ep, NULL);
414    bulk_local_init_endpoint(&c->tx_ep, NULL);
415
416    struct bulk_channel_setup chan_setup = {
417        .direction = BULK_DIRECTION_TX,
418        .role = BULK_ROLE_MASTER,
419        .trust = BULK_TRUST_FULL,
420        .meta_size = sizeof(struct bs_meta_data),
421        .waitset = get_default_waitset(),
422        .user_state = c, };
423
424    err = bulk_channel_create(&c->tx_chan,
425                              (struct bulk_endpoint_descriptor *) &c->tx_ep,
426                              &bulk_tx_cb, &chan_setup);
427    if (err_is_fail(err)) {
428        bulk_channel_destroy(&c->tx_chan, BULK_CONT_NOP);
429        debug_printf("ERROR: Failed to create the TX channel\n");
430        return err;
431    }
432
433    chan_setup.direction = BULK_DIRECTION_RX;
434    err = bulk_channel_create(&c->rx_chan,
435                              (struct bulk_endpoint_descriptor *) &c->rx_ep,
436                              &bulk_rx_cb, &chan_setup);
437    if (err_is_fail(err)) {
438        bulk_channel_destroy(&c->tx_chan, BULK_CONT_NOP);
439        debug_printf("ERROR: Failed to create the RX channel\n");
440        return err;
441    }
442
443    bulk_local_init_endpoint(&c->rx_p_ep, &c->rx_chan);
444    bulk_local_init_endpoint(&c->tx_p_ep, &c->tx_chan);
445
446    c->tx_proxy.user_state = c;
447    c->rx_proxy.user_state = c;
448    /* XXX: tx_ep->ip.addr */
449
450    BS_NET_DEBUG_BULK("bulk net proxy connect RX port=%i\n", tx_ep->port);
451    err = bulk_net_proxy_connect(&c->tx_proxy, &c->tx_p_ep.generic,
452                                 c->tx_chan.waitset, BLOCK_SIZE, "e10k",
453                                 BLOCK_NET_TX_QUEUE,
454                                 ntohl(tx_ep->ip.addr), tx_ep->port,
455                                 net_proxy_connected_cb);
456    if (err_is_fail(err)) {
457        debug_printf("ERROR: failed to create net proxy\n");
458        return err;
459    }
460
461    BS_NET_DEBUG_BULK("bulk net proxy connect RX port=%i\n", rx_ep->port);
462    err = bulk_net_proxy_connect(&c->rx_proxy, &c->rx_p_ep.generic,
463                                 c->rx_chan.waitset, BLOCK_SIZE, "e10k",
464                                 BLOCK_NET_RX_QUEUE,
465                                 ntohl(tx_ep->ip.addr), rx_ep->port,
466                                 net_proxy_connected_cb);
467    if (err_is_fail(err)) {
468        debug_printf("ERROR: failed to create net proxy\n");
469        return err;
470    }
471
472#else
473    struct bulk_net_ep_setup ep_setup = {
474        .port = rx_ep->port,
475        .ip.addr = ntohl(rx_ep->ip.addr),
476        .queue = BLOCK_NET_RX_QUEUE,
477        .max_queues = BLOCK_NET_MAX_QUEUES,
478        .buffer_size = BLOCK_SIZE,
479        .buffer_count = BLOCK_NET_BUFFER_COUNT,
480        .no_copy = BULK_NET_BACKEND_NOCOPY};
481
482    /* create the RX endpoint */
483    err = bulk_net_ep_create_remote(&c->rx_ep, &ep_setup);
484    assert(!err_is_fail(err));
485
486    ep_setup.port = tx_ep->port;
487    ep_setup.queue = BLOCK_NET_TX_QUEUE;
488
489    /* create the TX endpoint */
490    err = bulk_net_ep_create_remote(&c->tx_ep, &ep_setup);
491    assert(!err_is_fail(err));
492
493    struct bulk_channel_bind_params params = {
494        .role = BULK_ROLE_GENERIC,
495        .user_state = c,
496        .waitset = get_default_waitset(),
497        .trust = BULK_TRUST_FULL};
498
499    struct bulk_continuation cont = {
500        .arg = c,
501        .handler = chan_bind_cb};
502
503    err = bulk_channel_bind(&c->rx_chan,
504                    (struct bulk_endpoint_descriptor *) &c->rx_ep,
505                    &bulk_rx_cb, &params, cont);
506    if (err_is_fail(err)) {
507        return err;
508    }
509
510    err = bulk_channel_bind(&c->tx_chan,
511                    (struct bulk_endpoint_descriptor *) &c->tx_ep,
512                    &bulk_tx_cb, &params, cont);
513    if (err_is_fail(err)) {
514        /* TODO: teardown channel */
515        return err;
516    }
517#endif
518    BS_NET_DEBUG_NET("%s", "handle init done.");
519    return ERR_OK;
520}
521
522/**
523 * \brief handles the reply of an error in case of unkown request
524 */
525static err_t handle_bad_request(struct block_net_service *c,
526                                struct tcp_pcb *tpcb)
527{
528    BS_NET_DEBUG_TRACE
529
530    return ERR_OK;
531}
532
533#if 0
534/**
535 * \brief handler for disconnect requests
536 */
537static err_t handle_disconnect(struct block_net_service *c, struct tcp_pcb *tpcb)
538{
539    // free up resources
540
541    // close the network connection
542    assert(!"NYI: block_net_init");
543    return ERR_OK;
544}
545
546/**
547 * \brief handles the connection event of a new network block server clients
548 */
549static err_t client_connect_cb(void)
550{
551    // setup data structurs for the newly connected client
552
553    assert(!"NYI: block_net_init");
554    return SYS_ERR_OK;
555}
556#endif
557
558static struct tcp_pcb *server_pcb;
559
560static err_t bs_net_recv(void *arg,
561                         struct tcp_pcb *tpcb,
562                         struct pbuf *p,
563                         err_t err)
564{
565    BS_NET_DEBUG_TRACE
566
567    if (p == NULL) {
568        /* connection closed. clean up and EXIT the program. */
569        tcp_close(tpcb);
570        assert(!"NYI: cleaning up the resources!");
571        exit(EXIT_SUCCESS);
572    }
573
574    if ((err != ERR_OK) || !p) {
575        /* there was an error.. */
576    }
577
578    struct block_net_msg *msg = (struct block_net_msg *) p->payload;
579
580    if (p->tot_len != msg->size) {
581        /* some thing wrong... */
582    }
583
584    struct block_net_service *c = (struct block_net_service *) arg;
585    err_t reterr;
586
587    switch (msg->type) {
588        case BLOCK_NET_MSG_INIT:
589            reterr = handle_init(c, tpcb, &msg->msg.setup.rx_ep,
590                                 &msg->msg.setup.tx_ep);
591            break;
592        case BLOCK_NET_MSG_READ:
593            reterr = handle_block_read(c, tpcb, msg->msg.read.block_id,
594                                       msg->msg.read.count,
595                                       msg->msg.read.req_id,
596                                       msg->msg.read.cont);
597            break;
598        default:
599            debug_printf("Received unknown request.");
600            reterr = handle_bad_request(c, tpcb);
601            break;
602    }
603
604    tcp_recved(tpcb, p->tot_len);
605
606    pbuf_free(p);
607
608    return ERR_OK;
609}
610
611/*
612 * This function is called periodically from TCP.
613 * and is also responsible for taking care of stale connections.
614 */
615static err_t bs_net_poll(void *arg, struct tcp_pcb *tpcb)
616{
617    // BS_NET_DEBUG_TRACE
618
619    return ERR_OK;
620}
621
622static void bs_net_err(void *arg, err_t err)
623{
624    BS_NET_DEBUG_TRACE
625
626    assert(!"NYI: bs_net_err");
627}
628
629static err_t bs_net_accept(void *arg, struct tcp_pcb *tpcb, err_t err)
630{
631    BS_NET_DEBUG_TRACE
632
633#if TCP_LISTEN_BACKLOG
634    /* Decrease the listen backlog counter */
635    struct tcp_pcb_listen *lpcb = (struct tcp_pcb_listen*)arg;
636    tcp_accepted(lpcb);
637#endif
638    tcp_setprio(tpcb, TCP_PRIO_NORMAL);
639
640    struct block_net_service *c = malloc(sizeof(struct block_net_service));
641    if (!c) {
642        debug_printf("Failed to allocate memory for client struct\n");
643        return ERR_MEM;
644    }
645
646    memset(c, 0, sizeof(*c));
647
648    c->tpcb = tpcb;
649
650    tcp_arg(tpcb, c);
651
652    tcp_recv(tpcb, bs_net_recv);
653    tcp_err(tpcb, bs_net_err);
654    tcp_poll(tpcb, bs_net_poll, 4);
655
656    return ERR_OK;
657}
658
659/**
660 * \brief initializes the network server of the block service
661 */
662errval_t block_net_init(uint16_t port)
663{
664    BS_NET_DEBUG_TRACE
665
666    BS_NET_DEBUG_NET("%s", "initializing lwip...");
667    if (lwip_init("e10k", 1) == false) {
668        debug_printf("ERROR: lwip_init_auto failed!\n");
669        return 1;
670    }
671
672    server_pcb = tcp_new();
673    if (server_pcb == NULL) {
674        return LWIP_ERR_MEM;
675    }
676
677    BS_NET_DEBUG_NET("binding to port %i", port);
678    err_t e = tcp_bind(server_pcb, IP_ADDR_ANY, port);
679    if (e != ERR_OK) {
680        if (e == ERR_USE) {
681            assert(!"TODO: Change the port. port is in use\n");
682        }
683        printf("ERROR: tcp_bind failed!\n");
684        return 2;
685    }
686
687    return SYS_ERR_OK;
688}
689
690static bool server_running = false;
691
692/**
693 * \brief starts the network server of block service to accept requests
694 */
695errval_t block_net_start(void)
696{
697    BS_NET_DEBUG_TRACE
698
699    errval_t err;
700
701    server_pcb = tcp_listen(server_pcb);
702    if (server_pcb == NULL) {
703        printf("ERROR: tcp_listen failed!\n");
704        return 1;
705    }
706
707    tcp_arg(server_pcb, server_pcb);
708    tcp_accept(server_pcb, bs_net_accept);
709
710    server_running = true;
711
712    struct waitset *ws = get_default_waitset();
713    while (true) {
714        err = event_dispatch_non_block(ws);
715        if (err != LIB_ERR_NO_EVENT) {
716            if (err_is_fail(err)) {
717                DEBUG_ERR(err, "in event_dispatch");
718                break;
719            }
720        }
721
722        wrapper_perform_lwip_work();
723        /*err = event_dispatch(ws);
724         if (err_is_fail(err)) {
725         DEBUG_ERR(err, "in event_dispatch");
726         break;
727         }*/
728    }
729
730    return SYS_ERR_OK;
731}
732
733/**
734 * \brief stops the request handling of the network block service
735 */
736errval_t block_net_stop(void)
737{
738    BS_NET_DEBUG_TRACE
739
740    server_running = false;
741    return SYS_ERR_OK;
742}
743
744/**
745 * \brief lookup of the block server connection based on the requested block
746 *
747 * The client may be connected to multiple network block servers. The request
748 * needs to be forwarded to the correct block server based in the requested
749 * block id.
750 *
751 * XXX: Supply the block server ID instead? or just say there is one block server?
752 */
753struct block_net_server *block_net_server_lookup(size_t block_start)
754{
755    assert(!"NYI: block_net_server_lookup");
756    return NULL;
757}
758