1/*
2 * Copyright (c) 2014 ETH Zurich.
3 * All rights reserved.
4 *
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
8 */
9#include <barrelfish/barrelfish.h>
10#include <barrelfish/spawn_client.h>
11
12#include <flounder/flounder_txqueue.h>
13#include <spawndomain/spawndomain.h>
14
15#include <xeon_phi/xeon_phi.h>
16#include <xeon_phi/xeon_phi_client.h>
17
18#include <bomp_internal.h>
19#include <xomp/xomp.h>
20#include <xomp_debug.h>
21#include <xomp_gateway.h>
22#include <xomp_gateway_client.h>
23
24#if XOMP_BENCH_WORKER_EN
25#include <bench/bench.h>
26#endif
27
28#include <if/xomp_defs.h>
29
30/// XOMP control channel to the master
31static struct xomp_binding *xbinding;
32
33/// flag indicating if the client is bound to the master
34static volatile uint8_t is_bound = 0x0;
35
36/// messaging frame capability
37static struct capref msgframe;
38
39/// where the messaging frame is mapped
40static void *msgbuf;
41
42/// pointer to the thread local storage
43static void *tls;
44
45/// service iref of the master (if local worker)
46static iref_t svc_iref;
47
48/// our own worker id
49static xomp_wid_t worker_id;
50
51/// Flounder TX messaging queue
52static struct tx_queue txq;
53
54#if XOMP_WORKER_ENABLE_DMA
55
56#include <dma/dma.h>
57#include <dma/dma_request.h>
58#include <dma/client/dma_client_device.h>
59#include <dma/dma_manager_client.h>
60
61#ifdef __k1om__
62/// dma device type
63static dma_dev_type_t dma_device_type = DMA_DEV_TYPE_XEON_PHI;
64#else
65/// dma device type
66//static dma_dev_type_t dma_device_type = DMA_DEV_TYPE_IOAT;
67#endif
68/// dma client device
69static struct dma_client_device *dma_dev;
70#endif
71
72/**
73 * \brief Flounder TX queue message state representation
74 */
75struct xomp_msg_st {
76    struct txq_msg_st common;
77
78    /* union of arguments */
79    union {
80        struct {
81            uint64_t arg;
82            uint64_t id;
83        } done_notify;
84    } args;
85};
86
87#if XOMP_WORKER_ENABLE_DMA
88
89static volatile uint8_t dma_replication_done = 0;
90
91static void dma_replication_cb(errval_t err,
92                               dma_req_id_t id,
93                               void *arg)
94{
95    if (err_is_fail(err)) {
96        USER_PANIC_ERR(err, "dma transfer failed\n");
97    }
98    dma_replication_done = 1;
99}
100
101#endif
102
103
104static errval_t replicate_frame(lvaddr_t addr, struct capref *frame)
105{
106    errval_t err;
107
108#if XOMP_BENCH_WORKER_EN
109    cycles_t repl_timer = bench_tsc();
110#endif
111
112    struct frame_identity id;
113    err = frame_identify(*frame, &id);
114    if (err_is_fail(err)) {
115        return err;
116    }
117
118    XWR_DEBUG("Replicating frame: [%016lx]\n", id.base);
119
120    struct capref replicate;
121    err = frame_alloc(&replicate, id.bytes, NULL);
122    if (err_is_fail(err)) {
123        return err;
124    }
125
126    XWR_DEBUG("registering memory with DMA service\n");
127
128#if XOMP_BENCH_WORKER_EN
129    cycles_t register_timer = bench_tsc();
130#endif
131    err = dma_register_memory((struct dma_device *) dma_dev, *frame);
132    if (err_is_fail(err)) {
133        return err;
134    }
135
136    err = dma_register_memory((struct dma_device *) dma_dev, replicate);
137    if (err_is_fail(err)) {
138        return err;
139    }
140
141#if XOMP_BENCH_WORKER_EN
142    cycles_t register_timer_end = bench_tsc();
143#endif
144
145    struct dma_req_setup setup = {
146        .done_cb = dma_replication_cb,
147        .cb_arg = NULL,
148        .args = {
149            .memcpy = {
150                .src = id.base,
151                .bytes = id.bytes
152            }
153        }
154    };
155
156    err = frame_identify(replicate, &id);
157    if (err_is_fail(err)) {
158        return err;
159    }
160    setup.args.memcpy.dst = id.base;
161
162    dma_replication_done = 0x0;
163
164    XWR_DEBUG("DMA request for replication\n");
165
166    err = dma_request_memcpy((struct dma_device *)dma_dev, &setup, NULL);
167    if (err_is_fail(err)) {
168        return err;
169    }
170
171    while (!dma_replication_done) {
172        messages_wait_and_handle_next();
173    }
174
175    XWR_DEBUG("Replication done.\n");
176
177    *frame = replicate;
178
179#if XOMP_BENCH_WORKER_EN
180    cycles_t timer_end = bench_tsc();
181    debug_printf("%lx replication took %lu cycles, %lu ms\n", worker_id,
182                 timer_end - repl_timer, bench_tsc_to_ms(timer_end - repl_timer));
183    debug_printf("%lx register mem took %lu cycles, %lu ms\n", worker_id,
184                 register_timer_end - register_timer, bench_tsc_to_ms(register_timer_end - register_timer));
185#endif
186
187    return SYS_ERR_OK;
188}
189
190#ifdef __k1om__
191
192/*
193 * ----------------------------------------------------------------------------
194 * Xeon Phi Channel callbacks
195 * ----------------------------------------------------------------------------
196 */
197static errval_t msg_open_cb(xphi_dom_id_t domain,
198                            uint64_t usrdata,
199                            struct capref frame,
200                            uint8_t type)
201{
202    errval_t err;
203
204    uint32_t map_flags = 0x0;
205    lvaddr_t addr = 0x0;
206
207    struct frame_identity id;
208    err = frame_identify(frame, &id);
209    if (err_is_fail(err)) {
210        return err;
211    }
212
213    XWI_DEBUG("msg_open_cb: from domid:%lx, usrdata:%lx, frame:%lx\n", domain,
214              usrdata, id.base);
215
216    switch ((xomp_frame_type_t) type) {
217        case XOMP_FRAME_TYPE_MSG:
218            map_flags = VREGION_FLAGS_READ_WRITE;
219            break;
220        case XOMP_FRAME_TYPE_SHARED_RW:
221            addr = (lvaddr_t) usrdata;
222            map_flags = VREGION_FLAGS_READ_WRITE;
223            break;
224        case XOMP_FRAME_TYPE_SHARED_RO:
225            map_flags = VREGION_FLAGS_READ;
226            break;
227        case XOMP_FRAME_TYPE_REPL_RW:
228            map_flags = VREGION_FLAGS_READ_WRITE;
229#if XOMP_WORKER_ENABLE_DMA
230            addr = (lvaddr_t) usrdata;
231            err = replicate_frame(addr, &frame);
232            if (err_is_fail(err)) {
233                return err;
234            }
235            err = frame_identify(frame, &id);
236#else
237            struct capref replicate;
238            err = frame_alloc(&replicate, id.bytes, NULL);
239            if (err_is_fail(err)) {
240                USER_PANIC_ERR(err, "failed to allocate replicate frame\n");
241                return err;
242            }
243            err = vspace_map_one_frame_fixed_attr((lvaddr_t) usrdata, id.bytes,
244                                                  replicate, map_flags, NULL, NULL);
245            if (err_is_fail(err)) {
246                return err;
247            }
248            err = frame_identify(replicate, &id);
249#endif
250            if (err_is_fail(err)) {
251                return err;
252            }
253            break;
254        default:
255            USER_PANIC("unknown type: %u", type)
256            break;
257    }
258    if (addr) {
259        if (worker_id & XOMP_WID_GATEWAY_FLAG) {
260            XWR_DEBUG("registering memory with gateway: [%016lx]\n", addr);
261            err = xomp_gateway_mem_insert(frame, addr);
262            if (err_is_fail(err)) {
263                /* todo: cleanup */
264                return err;
265            }
266        }
267        err = vspace_map_one_frame_fixed_attr(addr, id.bytes, frame,
268                                              map_flags, NULL, NULL);
269    } else {
270        err = vspace_map_one_frame_attr((void **) &addr, id.bytes, frame,
271                                        map_flags, NULL, NULL);
272    }
273    if (err_is_fail(err)) {
274        return err;
275    }
276
277#if !XOMP_WORKER_ENABLE_DMA
278    if ((xomp_frame_type_t) type == XOMP_FRAME_TYPE_REPL_RW) {
279        memcpy((void *)usrdata, (void *)addr, id.bytes);
280    }
281#endif
282
283    XWI_DEBUG("msg_open_cb: frame [%016lx] mapped @ [%016lx, %016lx]\n", id.base,
284              addr, addr + id.bytes);
285
286    if ((xomp_frame_type_t) type == XOMP_FRAME_TYPE_MSG) {
287        USER_PANIC("NYI: initializing messaging");
288    }
289
290    return SYS_ERR_OK;
291}
292
293
294static struct xeon_phi_callbacks callbacks = {
295    .open = msg_open_cb
296};
297#endif
298
299/*
300 * ----------------------------------------------------------------------------
301 * XOMP channel send handlers
302 * ----------------------------------------------------------------------------
303 */
304
305
306
307static errval_t gw_req_memory_response_tx(struct txq_msg_st *msg_st)
308{
309    return xomp_gw_req_memory_response__tx(msg_st->queue->binding, TXQCONT(msg_st),
310                                           msg_st->err);
311}
312
313static errval_t add_memory_response_tx(struct txq_msg_st *msg_st)
314{
315    return xomp_add_memory_response__tx(msg_st->queue->binding, TXQCONT(msg_st),
316                                        msg_st->err);
317}
318
319static errval_t done_notify_tx(struct txq_msg_st *msg_st)
320{
321    struct xomp_msg_st *st = (struct xomp_msg_st *) msg_st;
322
323    return xomp_done_notify__tx(msg_st->queue->binding, TXQCONT(msg_st),
324                                st->args.done_notify.id, msg_st->err);
325}
326
327static errval_t done_with_arg_tx(struct txq_msg_st *msg_st)
328{
329    struct xomp_msg_st *st = (struct xomp_msg_st *) msg_st;
330
331    return xomp_done_with_arg__tx(msg_st->queue->binding, TXQCONT(msg_st),
332                                  st->args.done_notify.id,
333                                  st->args.done_notify.arg, msg_st->err);
334}
335
336
337/*
338 * ----------------------------------------------------------------------------
339 * XOMP channel receive handlers
340 * ----------------------------------------------------------------------------
341 */
342
343
344static void gw_req_memory_call_rx(struct xomp_binding *b,
345                                  uint64_t addr,
346                                  uint8_t type)
347{
348    XWI_DEBUG("gw_req_memory_call_rx: addr:%lx, tyep: %u\n", addr, type);
349
350#if XOMP_BENCH_WORKER_EN
351    cycles_t mem_timer = bench_tsc();
352#endif
353
354    struct txq_msg_st *msg_st = txq_msg_st_alloc(&txq);
355    assert(msg_st != NULL);
356
357    struct capref frame;
358    if (type == XOMP_FRAME_TYPE_REPL_RW) {
359        type = XOMP_FRAME_TYPE_SHARED_RW;
360    }
361    assert(!(worker_id & XOMP_WID_GATEWAY_FLAG));
362
363    msg_st->send = gw_req_memory_response_tx;
364    msg_st->cleanup = NULL;
365
366    XWR_DEBUG("Requesting frame from gateway: [%016lx]\n", usrdata);
367
368    msg_st->err = xomp_gateway_get_memory(addr, &frame);
369    if (err_is_fail(msg_st->err)) {
370        txq_send(msg_st);
371        return;
372    }
373
374    vregion_flags_t map_flags;
375
376    switch ((xomp_frame_type_t) type) {
377        case XOMP_FRAME_TYPE_MSG:
378            map_flags = VREGION_FLAGS_READ_WRITE;
379            break;
380        case XOMP_FRAME_TYPE_SHARED_RW:
381        case XOMP_FRAME_TYPE_REPL_RW:
382            map_flags = VREGION_FLAGS_READ_WRITE;
383            break;
384        case XOMP_FRAME_TYPE_SHARED_RO:
385            map_flags = VREGION_FLAGS_READ;
386            break;
387        default:
388            USER_PANIC("unknown type: %u", type)
389            break;
390    }
391
392    struct frame_identity id;
393    msg_st->err = frame_identify(frame, &id);
394    if (err_is_fail(msg_st->err)) {
395        txq_send(msg_st);
396        return;
397    }
398
399    if (addr) {
400        msg_st->err = vspace_map_one_frame_fixed_attr(addr, id.bytes,
401                                                      frame, map_flags, NULL, NULL);
402    } else {
403        void *map_addr;
404        msg_st->err = vspace_map_one_frame_attr(&map_addr, id.bytes,
405                                                frame, map_flags, NULL, NULL);
406    }
407
408#if XOMP_BENCH_WORKER_EN
409    mem_timer = bench_tsc() - mem_timer;
410    debug_printf("%lx mem request %016lx took  %lu cycles, %lu ms\n", worker_id,
411                 addr, mem_timer, bench_tsc_to_ms(mem_timer));
412#endif
413
414    txq_send(msg_st);
415}
416
417static void add_memory_call_rx(struct xomp_binding *b,
418                               struct capref frame,
419                               uint64_t addr,
420                               uint8_t type)
421{
422    XWI_DEBUG("add_memory_call_rx: addr:%lx, tyep: %u\n", addr, type);
423
424    struct txq_msg_st *msg_st = txq_msg_st_alloc(&txq);
425    assert(msg_st != NULL);
426
427    msg_st->send = add_memory_response_tx;
428    msg_st->cleanup = NULL;
429
430    uint32_t map_flags = 0x0;
431
432    switch ((xomp_frame_type_t) type) {
433        case XOMP_FRAME_TYPE_MSG:
434            map_flags = VREGION_FLAGS_READ_WRITE;
435            break;
436        case XOMP_FRAME_TYPE_SHARED_RW:
437            map_flags = VREGION_FLAGS_READ_WRITE;
438            break;
439        case XOMP_FRAME_TYPE_SHARED_RO:
440            map_flags = VREGION_FLAGS_READ;
441            break;
442        default:
443            USER_PANIC("unknown type: %u", type)
444            break;
445    }
446    struct frame_identity id;
447    msg_st->err = frame_identify(frame, &id);
448    if(err_is_fail(msg_st->err)) {
449        txq_send(msg_st);
450        return;
451    }
452
453#if XOMP_WORKER_ENABLE_DMA
454    if (0) {
455        // todo: replicate frame on the same node if needed..
456        replicate_frame(addr, &frame);
457    }
458#endif
459
460#if XOMP_BENCH_WORKER_EN
461    cycles_t map_start = bench_tsc();
462#endif
463    if (addr) {
464        msg_st->err = vspace_map_one_frame_fixed_attr(addr, id.bytes,
465                                                      frame, map_flags, NULL, NULL);
466    } else {
467        void *map_addr;
468        msg_st->err = vspace_map_one_frame_attr(&map_addr, id.bytes,
469                                                frame, map_flags, NULL, NULL);
470    }
471#if XOMP_BENCH_WORKER_EN
472    cycles_t timer_end = bench_tsc();
473    debug_printf("%lx mem map %016lx took  %lu cycles, %lu ms\n", worker_id, addr,
474                     timer_end - map_start, bench_tsc_to_ms(timer_end - map_start));
475#endif
476
477    txq_send(msg_st);
478}
479
480static void do_work_rx(struct xomp_binding *b,
481                       uint64_t fn,
482                       uint64_t arg,
483                       uint64_t id,
484                       uint64_t flags)
485{
486    errval_t err;
487
488    XWP_DEBUG("do_work_rx: fn:%lx, id:%lx\n", fn, id);
489
490#if XOMP_BENCH_WORKER_EN
491    cycles_t work_timer = bench_tsc();
492#endif
493
494    struct txq_msg_st *msg_st = txq_msg_st_alloc(&txq);
495    assert(msg_st != NULL);
496
497    msg_st->err = SYS_ERR_OK;
498
499    struct bomp_work *work = tls;
500
501    XWP_DEBUG("do_work_rx: threadid = %u, nthreads = %u\n", work->thread_id,
502              work->num_threads);
503
504    g_bomp_state->num_threads = work->num_threads;
505
506    struct xomp_msg_st *st = (struct xomp_msg_st *) msg_st;
507    st->args.done_notify.id = id;
508
509    if (arg) {
510        msg_st->send = done_with_arg_tx;
511        st->args.done_notify.arg = arg;
512    } else {
513        msg_st->send = done_notify_tx;
514    }
515
516    if (fn & XOMP_FN_INDEX_FLAG) {
517        uint32_t idx = fn & ~XOMP_FN_INDEX_FLAG;
518        char *fn_name;
519        err = spawn_symval_lookup_idx(idx, &fn_name, &fn);
520        if (err_is_fail(err)) {
521            msg_st->err = err;
522            txq_send(msg_st);
523            return;
524        }
525        XWP_DEBUG("do_work_rx: function index %u -> %s\n", idx, fn_name);
526    }
527
528    xomp_worker_fn_t fnct = (xomp_worker_fn_t) fn;
529    XWP_DEBUG("do_work_rx: calling fnct %p with argument %p\n", fnct, work->data);
530
531    for (uint32_t i = 0; i < work->num_vtreads; ++i) {
532        fnct(work->data);
533        work->thread_id++;
534    }
535
536
537
538#if XOMP_BENCH_WORKER_EN
539    work_timer = bench_tsc() - work_timer;
540    debug_printf("%lx work took %lu cycles, %lu ms\n", worker_id, work_timer,
541                 bench_tsc_to_ms(work_timer));
542#endif
543
544    txq_send(msg_st);
545}
546
547static struct xomp_rx_vtbl rx_vtbl = {
548    .gw_req_memory_call = gw_req_memory_call_rx,
549    .add_memory_call = add_memory_call_rx,
550    .do_work = do_work_rx
551};
552
553/*
554 * ----------------------------------------------------------------------------
555 * XOMP channel connect handler
556 * ----------------------------------------------------------------------------
557 */
558
559/**
560 * \brief XOMP channel connect callback called by the Flounder backend
561 *
562 * \param st    Supplied worker state
563 * \param err   outcome of the connect attempt
564 * \param xb    XOMP Flounder binding
565 */
566static void master_bind_cb(void *st,
567                           errval_t err,
568                           struct xomp_binding *xb)
569{
570    XWI_DEBUG("bound to master: %s\n", err_getstring(err));
571
572    txq_init(&txq, xb, xb->waitset, (txq_register_fn_t) xb->register_send,
573             sizeof(struct xomp_msg_st));
574
575    xb->rx_vtbl = rx_vtbl;
576    xbinding = xb;
577
578    is_bound = 0x1;
579}
580
581/*
582 * ============================================================================
583 * Public Interface
584 * ============================================================================
585 */
586
587/**
588 * \brief parses the command line arguments to extract
589 *
590 * \param argc  argument count
591 * \param argv  argument values
592 * \param wid   returns the XOMP worker ID
593 *
594 * \returns SYS_ERR_OK iff the command line arguments were parsed succecssfully
595 *          XOMP_ERR_INVALID_WORKER_ARGS if there were no XOMP worker argumetnts
596 *          errval on error
597 *
598 */
599errval_t xomp_worker_parse_cmdline(uint8_t argc,
600                                   char *argv[],
601                                   xomp_wid_t *wid)
602{
603    XWI_DEBUG("xomp_worker_parse_cmdline\n");
604
605    xomp_wid_t retwid = 0;
606    uint8_t parsed = 0;
607    uint8_t is_worker = 0x0;
608    iref_t iref = 0x0;
609    for (uint32_t i = 1; argv[i] != NULL; ++i) {
610        if (strcmp(XOMP_WORKER_ARG, argv[i]) == 0) {
611            parsed++;
612            is_worker = 0x1;
613        } else if (strncmp("--wid=", argv[i], 6) == 0) {
614            retwid = strtoul(argv[i] + 6, NULL, 16);
615            parsed++;
616        } else if (strncmp("--iref=", argv[i], 7) == 0) {
617            iref = strtoul(argv[i] + 7, NULL, 16);
618            parsed++;
619        }
620    }
621
622    if (!is_worker) {
623        return XOMP_ERR_BAD_INVOCATION;
624    }
625
626    if (parsed < 2) {
627        return XOMP_ERR_INVALID_WORKER_ARGS;
628    }
629
630    if (iref) {
631        if (parsed != 3) {
632            return XOMP_ERR_INVALID_WORKER_ARGS;
633        }
634        svc_iref = iref;
635    }
636
637    if (wid) {
638        *wid = retwid;
639    }
640
641    return SYS_ERR_OK;
642}
643
644/**
645 * \brief initializes the XOMP worker library
646 *
647 * \param wid   Xomp worker id
648 *
649 * \returns SYS_ERR_OK on success
650 *          errval on failure
651 */
652errval_t xomp_worker_init(xomp_wid_t wid)
653{
654    errval_t err;
655
656    worker_id = wid;
657
658    XWI_DEBUG("initializing worker {%016lx} iref:%u\n", worker_id, svc_iref);
659
660#if XOMP_BENCH_WORKER_EN
661    bench_init();
662#endif
663
664    struct capref frame = {
665        .cnode = cnode_root,
666        .slot = ROOTCN_SLOT_ARGCN
667    };
668
669    struct frame_identity id;
670    err = frame_identify(frame, &id);
671    if (err_is_fail(err)) {
672        return err_push(err, XOMP_ERR_INVALID_MSG_FRAME);
673    }
674
675    size_t frame_size = 0;
676
677    if (svc_iref) {
678        frame_size = XOMP_TLS_SIZE;
679    } else {
680        frame_size = XOMP_FRAME_SIZE;
681        err = spawn_symval_cache_init(0);
682        if (err_is_fail(err)) {
683            return err;
684        }
685    }
686
687    if (id.bytes < XOMP_TLS_SIZE) {
688        return XOMP_ERR_INVALID_MSG_FRAME;
689    }
690
691    msgframe = frame;
692
693    err = vspace_map_one_frame(&msgbuf, frame_size, frame, NULL, NULL);
694    if (err_is_fail(err)) {
695        err_push(err, XOMP_ERR_WORKER_INIT_FAILED);
696    }
697    if (svc_iref) {
698        tls = msgbuf;
699    } else {
700        tls = ((uint8_t *) msgbuf) + XOMP_MSG_FRAME_SIZE;
701    }
702
703    XWI_DEBUG("messaging frame mapped: [%016lx] @ [%016lx]\n", id.base,
704              (lvaddr_t )msgbuf);
705
706    struct bomp_thread_local_data *tlsinfo = malloc(sizeof(*tlsinfo));
707    tlsinfo->thr = thread_self();
708    tlsinfo->work = (struct bomp_work *) tls;
709    tlsinfo->work->data = tlsinfo->work + 1;
710    g_bomp_state->backend.set_tls(tlsinfo);
711
712#ifdef __k1om__
713    if (worker_id & XOMP_WID_GATEWAY_FLAG) {
714        err = xomp_gateway_init();
715    } else {
716        if (!svc_iref) {
717            err = xomp_gateway_bind_svc();
718        } else {
719            err = SYS_ERR_OK;
720        }
721    }
722    if (err_is_fail(err)) {
723        return err;
724    }
725#endif
726
727#ifdef __k1om__
728    if (!svc_iref) {
729        err = xeon_phi_client_init(disp_xeon_phi_id());
730        if (err_is_fail(err)) {
731            err_push(err, XOMP_ERR_WORKER_INIT_FAILED);
732        }
733
734        xeon_phi_client_set_callbacks(&callbacks);
735    }
736#endif
737
738    struct waitset *ws = get_default_waitset();
739
740// XXX: disabling DMA on the host as there is no replication used at this moment
741#if XOMP_WORKER_ENABLE_DMA && defined(__k1om__)
742    /* XXX: use lib numa */
743
744#ifndef __k1om__
745    uint8_t numanode = 0;
746    if (disp_get_core_id() > 20) {
747        numanode = 1;
748    }
749
750    err = dma_manager_wait_for_driver(dma_device_type, numanode);
751    if (err_is_fail(err)) {
752        USER_PANIC_ERR(err, "could not wait for the DMA driver");
753    }
754#endif
755    char svc_name[30];
756#ifdef __k1om__
757    snprintf(svc_name, 30, "%s", XEON_PHI_DMA_SERVICE_NAME);
758#else
759    snprintf(svc_name, 30, "%s.%u", IOAT_DMA_SERVICE_NAME, numanode);
760#endif
761
762    struct dma_client_info dma_info = {
763        .type = DMA_CLIENT_INFO_TYPE_NAME,
764        .device_type = dma_device_type,
765        .args.name = svc_name
766    };
767    err = dma_client_device_init(&dma_info, &dma_dev);
768    if (err_is_fail(err)) {
769        USER_PANIC_ERR(err, "DMA device initialization");
770    }
771#endif
772
773    if (svc_iref) {
774        err = xomp_bind(svc_iref, master_bind_cb, NULL, ws,
775                        IDC_EXPORT_FLAGS_DEFAULT);
776    } else {
777        struct xomp_frameinfo fi = {
778            .sendbase = id.base,
779            .inbuf = ((uint8_t *) msgbuf) + XOMP_MSG_CHAN_SIZE,
780            .inbufsize = XOMP_MSG_CHAN_SIZE,
781            .outbuf = ((uint8_t *) msgbuf),
782            .outbufsize = XOMP_MSG_CHAN_SIZE
783        };
784        err = xomp_connect(&fi, master_bind_cb, NULL, ws,
785        IDC_EXPORT_FLAGS_DEFAULT);
786    }
787
788    if (err_is_fail(err)) {
789        /* TODO: Clean up */
790        return err_push(err, XOMP_ERR_WORKER_INIT_FAILED);
791    }
792
793    XWI_DEBUG("Waiting until bound to master...\n");
794
795    while (!is_bound) {
796        messages_wait_and_handle_next();
797    }
798
799    if (xbinding == NULL) {
800        return XOMP_ERR_WORKER_INIT_FAILED;
801    }
802
803    return SYS_ERR_OK;
804}
805