1/**
2 * \file
3 * \brief Bidirectional UMP channel implementation
4 */
5
6/*
7 * Copyright (c) 2009, 2010, 2012, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#include <string.h>
16#include <barrelfish/barrelfish.h>
17#include <barrelfish/ump_chan.h>
18#include <barrelfish/idc_export.h>
19#include <if/monitor_defs.h>
20#include <barrelfish/waitset.h>
21#include <barrelfish/waitset_chan.h>
22#include "waitset_chan_priv.h"
23
24#define UMP_MAP_ATTR VREGION_FLAGS_READ_WRITE
25
26#ifndef CONFIG_INTERCONNECT_DRIVER_UMP
27#error "This file shouldn't be compiled without CONFIG_INTERCONNECT_DRIVER_UMP"
28#endif
29
30/**
31 * \brief Initialise a new UMP channel
32 *
33 * Most code should be using one of ump_chan_bind() or ump_chan_accept().
34 *
35 * \param uc Storage for channel state
36 * \param inbuf Pointer to incoming message buffer
37 * \param inbufsize Size of inbuf in bytes (must be multiple of UMP message size)
38 * \param outbuf Pointer to outgoing message buffer
39 * \param outbufsize Size of outbuf in bytes (must be multiple of UMP message size)
40 */
41errval_t ump_chan_init(struct ump_chan *uc,
42                       volatile void *inbuf, size_t inbufsize,
43                       volatile void *outbuf, size_t outbufsize)
44{
45    assert(uc != NULL);
46    errval_t err;
47
48    err = ump_endpoint_init(&uc->endpoint, inbuf, inbufsize);
49    if (err_is_fail(err)) {
50        return err;
51    }
52
53    err = ump_chan_state_init(&uc->send_chan, outbuf, outbufsize, UMP_OUTGOING);
54    if (err_is_fail(err)) {
55        return err;
56    }
57
58    uc->max_send_msgs = outbufsize / UMP_MSG_BYTES;
59    uc->max_recv_msgs = inbufsize / UMP_MSG_BYTES;
60
61    memset(&uc->cap_handlers, 0, sizeof(uc->cap_handlers));
62    uc->iref = 0;
63    uc->monitor_binding = get_monitor_binding(); // TODO: expose non-default to caller
64    waitset_chanstate_init(&uc->send_waitset, CHANTYPE_OTHER);
65
66    uc->prev = NULL;
67    uc->next = NULL;
68
69    return SYS_ERR_OK;
70}
71
72/// Destroy the local state associated with a given channel
73void ump_chan_destroy(struct ump_chan *uc)
74{
75    ump_endpoint_destroy(&uc->endpoint);
76}
77
78/// Handler for UMP bind reply messages from the Monitor
79static void bind_ump_reply_handler(struct monitor_binding *b, uintptr_t mon_id,
80                                   uintptr_t conn_id, errval_t success,
81                                   struct capref notify)
82{
83    struct ump_chan *uc = (void *)conn_id;
84
85    assert(uc->connstate == UMP_BIND_WAIT);
86
87    if (err_is_ok(success)) { /* bind succeeded */
88        uc->connstate = UMP_CONNECTED;
89        uc->monitor_id = mon_id;
90    } else { /* bind failed */
91        uc->connstate = UMP_DISCONNECTED;
92        /* TODO: delete endpoint, destroy local_cap */
93    }
94
95    /* either way, tell the user what happened */
96    assert(uc->bind_continuation.handler != NULL);
97    uc->bind_continuation.handler(uc->bind_continuation.st, success, uc, notify);
98}
99
100struct bind_ump_reply_state {
101    struct monitor_binding *b;
102    struct ump_chan *uc;
103    struct monitor_bind_ump_reply_monitor__tx_args args;
104    struct event_queue_node qnode;
105};
106
107static void send_bind_reply(void *arg)
108{
109    struct bind_ump_reply_state *st = arg;
110    struct monitor_binding *b = st->b;
111    errval_t err;
112
113    // send back a bind success/failure message to the monitor
114    err =
115        st->b->tx_vtbl.bind_ump_reply_monitor(b, NOP_CONT, st->args.mon_id,
116                                              st->args.conn_id, st->args.err,
117                                              st->args.notify);
118    if (err_is_ok(err)) {
119        event_mutex_unlock(&b->mutex);
120        free(st);
121    } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
122        err = st->b->register_send(st->b, st->b->waitset,
123                                   MKCONT(send_bind_reply, st));
124        assert(err_is_ok(err)); // shouldn't fail, as we have the mutex
125    } else {
126        event_mutex_unlock(&b->mutex);
127        USER_PANIC_ERR(err, "failed sending back reply to UMP bind request;"
128                       " request dropped!");
129        if (st->uc != NULL) {
130            ump_chan_destroy(st->uc);
131            // FIXME: how do we tell the binding about this!?
132        }
133        free(st);
134    }
135}
136
137/// Handler for UMP bind request messages from the Monitor
138static void bind_ump_service_request_handler(struct monitor_binding *b,
139                                             uintptr_t service_id,
140                                             uintptr_t mon_id,
141                                             struct capref frame,
142                                             size_t channel_length_in,
143                                             size_t channel_length_out,
144                                             struct capref notify_cap)
145{
146    struct idc_export *e = (void *)service_id;
147    errval_t err;
148
149    // call the binding's connect handler
150    if (e->ump_connect_callback != NULL) {
151        err = e->ump_connect_callback(e->connect_cb_st, b, mon_id, frame,
152                                      channel_length_in, channel_length_out,
153                                      notify_cap);
154    } else {
155        err = LIB_ERR_NO_UMP_BIND_HANDLER;
156    }
157
158    if (err_is_fail(err)) {
159        ump_chan_send_bind_reply(b, NULL, err, mon_id, NULL_CAP);
160    } else {
161        // binding is responsible for sending reply
162    }
163}
164
165void ump_chan_send_bind_reply(struct monitor_binding *mb,
166                              struct ump_chan *uc, errval_t err,
167                              uintptr_t monitor_id, struct capref notify_cap)
168{
169    struct bind_ump_reply_state *st = malloc(sizeof(struct bind_ump_reply_state));
170    assert(st != NULL);
171
172    if (err_is_ok(err)) {
173        assert(uc != NULL);
174    } else {
175        assert(uc == NULL);
176    }
177
178    st->b = mb;
179    st->uc = uc;
180    st->args.err = err;
181    st->args.mon_id = monitor_id;
182    st->args.conn_id = err_is_ok(err) ? (uintptr_t)uc : 0;
183    st->args.notify = notify_cap;
184
185    // wait for the ability to use the monitor binding
186    event_mutex_enqueue_lock(&mb->mutex, &st->qnode,
187                             MKCLOSURE(send_bind_reply, st));
188}
189
190static void send_bind_cont(void *arg)
191{
192    struct ump_chan *uc = arg;
193    struct monitor_binding *b = uc->monitor_binding;
194    errval_t err;
195
196    /* Send bind request to the monitor */
197    assert(uc->monitor_binding == b);
198    assert(b->tx_vtbl.bind_ump_client_request);
199    err = b->tx_vtbl.bind_ump_client_request(b, NOP_CONT, uc->iref,
200                                             (uintptr_t)uc, uc->frame,
201                                             uc->inchanlen, uc->outchanlen,
202                                             uc->notify_cap);
203    if (err_is_ok(err)) { // request sent ok
204        event_mutex_unlock(&b->mutex);
205    } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
206        // register to retry
207        err = b->register_send(b, b->waitset, MKCONT(send_bind_cont,uc));
208        assert(err_is_ok(err)); // we hold the monitor binding mutex
209    } else { // permanent failure sending message
210        event_mutex_unlock(&b->mutex);
211        uc->bind_continuation.handler(uc->bind_continuation.st,
212                                      err_push(err, LIB_ERR_BIND_UMP_REQ),
213                                      NULL, NULL_CAP);
214    }
215}
216
217/**
218 * \brief Initialise a new UMP channel and initiate a binding
219 *
220 * \param uc  Storage for channel state
221 * \param cont Continuation for bind completion/failure
222 * \param qnode Storage for an event queue node (used for queuing bind request)
223 * \param iref IREF to which to bind
224 * \param monitor_binding Monitor binding to use
225 * \param inchanlen Size of incoming channel, in bytes (rounded to #UMP_MSG_BYTES)
226 * \param outchanlen Size of outgoing channel, in bytes (rounded to #UMP_MSG_BYTES)
227 * \param notify_cap Capability to use for notifications, or #NULL_CAP
228 */
229errval_t ump_chan_bind(struct ump_chan *uc, struct ump_bind_continuation cont,
230                       struct event_queue_node *qnode,  iref_t iref,
231                       struct monitor_binding *monitor_binding,
232                       size_t inchanlen, size_t outchanlen,
233                       struct capref notify_cap)
234{
235    errval_t err;
236
237    // round up channel sizes to message size
238    inchanlen = ROUND_UP(inchanlen, UMP_MSG_BYTES);
239    outchanlen = ROUND_UP(outchanlen, UMP_MSG_BYTES);
240
241    // compute size of frame needed and allocate it
242    size_t framesize = inchanlen + outchanlen;
243    err = frame_alloc(&uc->frame, framesize, &framesize);
244    if (err_is_fail(err)) {
245        return err_push(err, LIB_ERR_FRAME_ALLOC);
246    }
247
248    // map it in
249    void *buf;
250    err = vspace_map_one_frame_attr(&buf, framesize, uc->frame, UMP_MAP_ATTR,
251                                    NULL, &uc->vregion);
252    if (err_is_fail(err)) {
253        cap_destroy(uc->frame);
254        return err_push(err, LIB_ERR_VSPACE_MAP);
255    }
256
257    // initialise channel state
258    err = ump_chan_init(uc, buf, inchanlen, (char *)buf + inchanlen, outchanlen);
259    if (err_is_fail(err)) {
260        vregion_destroy(uc->vregion);
261        cap_destroy(uc->frame);
262        return err;
263    }
264
265    // Ids for tracing
266    struct frame_identity id;
267    err = frame_identify(uc->frame, &id);
268    if (err_is_fail(err)) {
269        vregion_destroy(uc->vregion);
270        cap_destroy(uc->frame);
271        return err_push(err, LIB_ERR_FRAME_IDENTIFY);
272    }
273    uc->recvid = (uintptr_t)id.base;
274    uc->sendid = (uintptr_t)(id.base + inchanlen);
275
276    // store bind args
277    uc->bind_continuation = cont;
278    uc->monitor_binding = monitor_binding;
279    uc->iref = iref;
280    uc->inchanlen = inchanlen;
281    uc->outchanlen = outchanlen;
282    uc->notify_cap = notify_cap;
283
284    // wait for the ability to use the monitor binding
285    uc->connstate = UMP_BIND_WAIT;
286    event_mutex_enqueue_lock(&monitor_binding->mutex, qnode,
287                             MKCLOSURE(send_bind_cont, uc));
288
289    return SYS_ERR_OK;
290}
291
292/**
293 * \brief Initialise a new UMP channel to accept an incoming binding request
294 *
295 * \param uc  Storage for channel state
296 * \param mon_id Monitor's connection ID for this channel
297 * \param frame Frame capability containing channel
298 * \param inchanlen Size of incoming channel, in bytes (multiple of #UMP_MSG_BYTES)
299 * \param outchanlen Size of outgoing channel, in bytes (multiple of #UMP_MSG_BYTES)
300 */
301errval_t ump_chan_accept(struct ump_chan *uc, uintptr_t mon_id,
302                         struct capref frame, size_t inchanlen,
303                         size_t outchanlen)
304{
305    errval_t err;
306
307    uc->monitor_id = mon_id;
308    uc->frame = frame;
309
310    // check that the frame is big enough
311    struct frame_identity frameid;
312    err = frame_identify(frame, &frameid);
313    if (err_is_fail(err)) {
314        return err_push(err, LIB_ERR_FRAME_IDENTIFY);
315    }
316
317    // Ids for tracing
318    uc->recvid = (uintptr_t)(frameid.base + outchanlen);
319    uc->sendid = (uintptr_t)frameid.base;
320
321    if (frameid.bytes < inchanlen + outchanlen) {
322        return LIB_ERR_UMP_FRAME_OVERFLOW;
323    }
324
325    // map it in
326    void *buf;
327    err = vspace_map_one_frame_attr(&buf, frameid.bytes, frame, UMP_MAP_ATTR,
328                                    NULL, &uc->vregion);
329    if (err_is_fail(err)) {
330        cap_destroy(uc->frame);
331        return err_push(err, LIB_ERR_VSPACE_MAP);
332    }
333
334    // initialise channel state
335    err = ump_chan_init(uc, (char *)buf + outchanlen, inchanlen, buf, outchanlen);
336    if (err_is_fail(err)) {
337        vregion_destroy(uc->vregion);
338        cap_destroy(uc->frame);
339        return err;
340    }
341
342    /* mark connected */
343    uc->connstate = UMP_CONNECTED;
344    return SYS_ERR_OK;
345}
346
347errval_t ump_chan_register_send(struct ump_chan *uc, struct waitset *ws,
348                                struct event_closure closure)
349{
350    assert(uc != NULL);
351    assert(ws != NULL);
352
353    errval_t err = waitset_chan_register(ws, &uc->send_waitset, closure);
354    assert(err_is_ok(err));
355
356    // enqueue in list of channels with a registered event to retry sending
357    assert(uc->next == NULL && uc->prev == NULL);
358    dispatcher_handle_t handle = disp_disable();
359    struct dispatcher_generic *dp = get_dispatcher_generic(handle);
360    if (dp->ump_send_events_list == NULL) {
361        dp->ump_send_events_list = uc;
362        uc->next = uc->prev = uc;
363    } else {
364        uc->prev = dp->ump_send_events_list->prev;
365        uc->next = dp->ump_send_events_list;
366        uc->prev->next = uc;
367        uc->next->prev = uc;
368    }
369    disp_enable(handle);
370
371    return err;
372}
373
374void ump_channels_retry_send_disabled(dispatcher_handle_t handle)
375{
376    struct dispatcher_generic *dp = get_dispatcher_generic(handle);
377    struct ump_chan *uc, *first = dp->ump_send_events_list, *next;
378    errval_t err;
379
380    for (uc = first; uc != NULL; uc = next) {
381        next = uc->next;
382        assert(next != NULL);
383        bool cs = ump_chan_can_send(uc);
384        if (cs) {
385            if (uc->next == uc) {
386                dp->ump_send_events_list = NULL;
387            } else {
388                uc->prev->next = uc->next;
389                uc->next->prev = uc->prev;
390                if (dp->ump_send_events_list == uc) {
391                    dp->ump_send_events_list = next;
392                    first = next;
393                }
394            }
395            uc->next = uc->prev = NULL;
396            err = waitset_chan_trigger_disabled(&uc->send_waitset, handle);
397            assert_disabled(err_is_ok(err)); // shouldn't fail
398        }
399        if (next == first) {
400            break; // wrapped
401        }
402    }
403}
404
405
406/// Initialise the UMP channel driver
407void ump_init(void)
408{
409    struct monitor_binding *mcb = get_monitor_binding();
410    mcb->rx_vtbl.bind_ump_reply_client = bind_ump_reply_handler;
411    mcb->rx_vtbl.bind_ump_service_request = bind_ump_service_request_handler;
412}
413
414struct waitset_chanstate * monitor_bind_get_receiving_chanstate(struct monitor_binding *b)
415{
416    return b->get_receiving_chanstate(b);
417}
418