1/**
2 * \file  block_server_client.c
3 * \brief block server client domain
4 */
5
6/*
7 * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#include <stdio.h>
16#include <string.h>
17
18#include <barrelfish/barrelfish.h>
19#include <barrelfish/sys_debug.h>
20#include <bench/bench.h>
21
22/* includes for ip address and lwip wrappers */
23#include <lwip/inet.h>
24#include <lwip/init.h>
25
26/* bulk transfer facilities */
27#include <bulk_transfer/bulk_transfer.h>
28#include <bulk_transfer/bulk_allocator.h>
29
30/* local includes of block server client */
31#include "block_server.h"
32#include "local_server.h"
33#include "network_common.h"
34#include "network_client.h"
35
36
37/**
38 * \brief Contains the connection parameters to the network block server
39 */
40
41
42
43/* ======================= run net test facility =========================== */
44#if BLOCK_ENABLE_NETWORKING
45#if BLOCK_BENCH_ENABLE
46
47static struct bulk_channel *tx_chan;
48static struct bulk_channel *rx_chan;
49
50// switches for test control
51
52volatile static uint32_t num_read = 0;
53volatile static uint32_t num_written = 0;
54
55//#define BS_TEST_DEBUG(fmt, msg...) debug_printf("%s: "fmt"\n", __func__, msg);
56#define BS_TEST_DEBUG(fmt, msg...) do{}while(0);
57#define BS_TEST_CTRL(fmt, msg...) debug_printf("TEST-CTRL: "fmt"\n", msg);
58
59/* the pool allocators of the two channels */
60static struct bulk_allocator allocator_tx;
61static struct bulk_allocator allocator_rx;
62
63/* zero meta */
64struct bs_meta_data zero_meta;
65
66/* state variables for the events */
67static uint32_t tx_bufs_other_side = 0;
68static uint32_t pools_assigned = 0;
69
70/* conditional waiting facility */
71static volatile uint32_t wait_cond = 0;
72
73static inline void wait_for_condition(void)
74{
75    BS_TEST_DEBUG("%s", "Waiting for condition...\n");
76    struct waitset *ws = get_default_waitset();
77    while (wait_cond) {
78        event_dispatch_non_block(rx_chan->waitset);
79        event_dispatch_non_block(tx_chan->waitset);
80        event_dispatch_non_block(ws);
81        wrapper_perform_lwip_work();
82    }
83}
84
85/* ------------------------ callbacks for bulk events ---------------------- */
86
87static void pool_assigned_cb(void *arg,
88                             errval_t err,
89                             struct bulk_channel *channel)
90{
91    BS_TEST_DEBUG(" > channel=%p, success=%i", channel, (err == SYS_ERR_OK));
92
93    if (err_is_fail(err)) {
94        USER_PANIC_ERR(err, "failed to assign the pool. cannot run test");
95    }
96
97    pools_assigned++;
98
99    wait_cond = 0;
100
101    if (pools_assigned == 2) {
102        BS_TEST_CTRL("%s", "both pools added. Signaling condition");
103        wait_cond = 0;
104    }
105}
106
107static void buffer_passed_cb(void *arg,
108                             errval_t err,
109                             struct bulk_channel *channel)
110{
111
112    if (err_is_fail(err)) {
113        USER_PANIC_ERR(err, "failed to pass the buffer. ");
114    }
115
116    tx_bufs_other_side++;
117
118    BS_TEST_DEBUG(" > channel=%p, success=%i, tx=%i", channel,
119                  (err == SYS_ERR_OK), tx_bufs_other_side);
120
121    if (tx_bufs_other_side == BLOCK_BENCH_NUMBUFS) {
122        BS_TEST_CTRL("%s", "all bufs passed. Signaling condition");
123        wait_cond = 0;
124    }
125}
126
127
128errval_t testrun_handle_status(enum block_net_msg_type req,
129                               uint32_t reqid,
130                               enum block_net_err stats)
131{
132    if (stats != BLOCK_NET_ERR_OK || req != BLOCK_NET_MSG_WRITE) {
133        BS_TEST_DEBUG(" > WRONG STATUS REPLY: reqid=%i", reqid);
134        return SYS_ERR_OK;
135    }
136
137    return SYS_ERR_OK;
138}
139
140void testrun_bulk_buffer_received(struct bulk_channel *channel,
141                                  struct bulk_buffer *buffer,
142                                  void *meta)
143{
144    BS_TEST_DEBUG(" buffer = %p", buffer);
145    num_written++;
146    bulk_alloc_return_buffer(&allocator_tx, buffer);
147}
148
149void testrun_bulk_move_received(struct bulk_channel *channel,
150                                struct bulk_buffer *buffer,
151                                void *meta)
152{
153    errval_t err;
154
155    struct bs_meta_data *bsmeta = (struct bs_meta_data *) meta;
156    unsigned char *data = buffer->address;
157    for (uint32_t i = 0; i < BLOCK_BENCH_BUFSIZE; ++i) {
158        if (data[i] != (unsigned char) ((bsmeta->block_id + 1) % 256)) {
159            BS_TEST_CTRL(" > ERROR: wrong data [%i | %i ]", data[i],
160                         (unsigned char )((bsmeta->block_id + 1) % 256));
161            break;
162        }
163    }
164
165    num_read++;
166    BS_TEST_DEBUG(" > TEST passed. value=%i", data[0]);
167
168    err = bulk_channel_pass(channel, buffer, &zero_meta, BULK_CONT_NOP);
169    if (err_is_fail(err)) {
170        USER_PANIC_ERR(err, "Failed to pass back the buffer");
171    }
172}
173
174/* ------------------------ test control ----------------------------------- */
175void run_test(struct bulk_channel *txc, struct bulk_channel *rxc, void *block_service)
176{
177    tx_chan = txc;
178    rx_chan = rxc;
179
180    BS_TEST_CTRL("%s", "test started...");
181
182    errval_t err;
183
184    BS_TEST_DEBUG("Allocating two pools of size %i", BLOCK_BENCH_NUMBUFS*BLOCK_BENCH_BUFSIZE);
185    err = bulk_alloc_init(&allocator_tx, BLOCK_BENCH_NUMBUFS, BLOCK_BENCH_BUFSIZE, NULL);
186    if (err_is_fail(err)) {
187        USER_PANIC_ERR(err, "failed to allocate pool.");
188    }
189
190    err = bulk_alloc_init(&allocator_rx, BLOCK_BENCH_NUMBUFS, BLOCK_BENCH_BUFSIZE, NULL);
191    if (err_is_fail(err)) {
192        debug_printf("failed to allocate the pool");
193    }
194
195    BS_TEST_DEBUG("%s", "Assigning pools to the channels");
196
197    struct bulk_continuation cont = {
198        .handler = pool_assigned_cb,
199        .arg = NULL, };
200
201    wait_cond = 1;
202
203    err = bulk_channel_assign_pool(rx_chan, allocator_rx.pool,
204                                   cont);
205    if (err_is_fail(err)) {
206        USER_PANIC_ERR(err, "failed to assign pool to the RX channel.");
207        debug_printf("Failed to assign pool to RX channel\n");
208    }
209
210    wait_for_condition();
211
212    wait_cond = 1;
213
214    err = bulk_channel_assign_pool(tx_chan, allocator_tx.pool,
215                                   cont);
216    if (err_is_fail(err)) {
217        USER_PANIC_ERR(err, "failed to assign pool to the TX channel.");
218    }
219
220    wait_for_condition();
221
222    BS_TEST_CTRL("%s", "moving buffers over the RX channel.");
223
224    /* setting the new callback handler */
225    cont.handler = buffer_passed_cb;
226
227    wait_cond = 1;
228
229    struct bulk_buffer *buf;
230    for (uint32_t i = 0; i < BLOCK_BENCH_NUMBUFS; ++i) {
231        buf = bulk_alloc_new_buffer(&allocator_rx);
232        assert(buf); /// should not fail
233        err = bulk_channel_pass(rx_chan, buf, NULL, cont);
234        if (err_is_fail(err)) {
235            USER_PANIC_ERR(err, "failed to pass the buffer");
236        }
237        do {
238            err = event_dispatch_non_block(rx_chan->waitset);
239        } while (err_is_ok(err));
240    }
241
242    wait_for_condition();
243
244    BS_TEST_CTRL("%s", "All prepared.\n");
245
246    BS_TEST_CTRL("%s", "Start with Benchmarks (Writing)");
247
248    cycles_t tsc_start;
249    cycles_t results[2];
250    uint64_t tscperus;
251    bench_ctl_t *ctl;
252
253    err = sys_debug_get_tsc_per_ms(&tscperus);
254    assert(err_is_ok(err));
255    tscperus /= 1000;
256
257    ctl = bench_ctl_init(BENCH_MODE_FIXEDRUNS, 2, BLOCK_BENCH_NUMRUNS);
258
259    struct bs_meta_data meta = {
260        .block_id = 0,
261        .req_id = 1 };
262
263    do {
264        BS_TEST_DEBUG("%s", ">>  Starting run");
265        num_written = 0;
266        tsc_start = rdtsc();
267        for (uint32_t i = 0; i < BLOCK_BENCH_NUMREQUESTS; ++i) {
268            meta.block_id = (i % BLOCK_COUNT);
269            BS_TEST_DEBUG("Writing of block %i, data=%i",
270                          (uint32_t )meta.block_id, (i + 1) % 256);
271            buf = bulk_alloc_new_buffer(&allocator_tx);
272            if (buf == NULL) {
273                BS_TEST_DEBUG("%s", "no buffers. waiting.");
274                --i;
275                thread_yield();
276                continue;
277            }
278            memset(buf->address, (i + 1) % 256, BLOCK_BENCH_BUFSIZE);
279            err = bulk_channel_move(tx_chan, buf, &meta, cont);
280            if (err_is_fail(err)) {
281                USER_PANIC_ERR(err, "Failed to move block");
282            }
283            do {
284                err = event_dispatch_non_block(tx_chan->waitset);
285            } while (err_is_ok(err));
286            meta.req_id++;
287
288        }
289        results[0] = rdtsc() - tsc_start;
290        while (num_written < BLOCK_BENCH_NUMREQUESTS) {
291            event_dispatch(tx_chan->waitset);
292        }
293        results[1] = rdtsc() - tsc_start;
294        do {
295            err = event_dispatch_non_block(tx_chan->waitset);
296        } while (err_is_ok(err));
297    } while (!bench_ctl_add_run(ctl, results));
298
299    //bench_ctl_dump_csv(ctl, "", tscperus);
300    bench_ctl_dump_analysis(ctl, 0, "", tscperus);
301    bench_ctl_dump_analysis(ctl, 1, "", tscperus);
302    BS_TEST_CTRL("%s", "Benchmark Finished");
303
304    printf("\n\n");
305    BS_TEST_CTRL("%s", "Start with Benchmarks (READING)");
306
307    ctl = bench_ctl_init(BENCH_MODE_FIXEDRUNS, 2, BLOCK_BENCH_NUMRUNS);
308
309    do {
310        num_read = 0;
311        BS_TEST_DEBUG("%s", ">>  Starting run");
312        wrapper_perform_lwip_work();
313        tsc_start = rdtsc();
314        for (uint32_t i = 0; i < BLOCK_BENCH_NUMBATCH_REQUESTS; ++i) {
315            meta.block_id = (i % BLOCK_COUNT);
316            BS_TEST_DEBUG("Reading of blocks [%i, %i]", (uint32_t )meta.block_id,
317                         (uint32_t )meta.block_id + BATCH_READ);
318#if BLOCK_ENABLE_NETWORKING
319            err = block_net_read(block_service, (uint32_t) meta.block_id,
320                                 BLOCK_BENCH_READ_BATCHSIZE,
321                                 meta.req_id, cont);
322#else
323
324#endif
325            if (err_is_fail(err)) {
326                USER_PANIC_ERR(err, "Failed to move block");
327            }
328            meta.req_id++;
329            break;
330        }
331        results[0] = rdtsc() - tsc_start;
332
333        while (num_read < (BLOCK_BENCH_READ_BATCHSIZE * BLOCK_BENCH_NUMBATCH_REQUESTS)) {
334            event_dispatch(rx_chan->waitset);
335        }
336        results[1] = rdtsc() - tsc_start;
337
338        do {
339            err = event_dispatch_non_block(rx_chan->waitset);
340        } while (err_is_ok(err));
341    } while (!bench_ctl_add_run(ctl, results));
342
343    // bench_ctl_dump_csv(ctl, "", tscperus);
344    bench_ctl_dump_analysis(ctl, 0, "", tscperus);
345    bench_ctl_dump_analysis(ctl, 1, "", tscperus);
346    BS_TEST_CTRL("%s", "Test run finished.");
347}
348#endif //< BLOCK_BENCH_ENABLE
349#endif //< RUN_NET
350