1/*
2 * Copyright (c) 2009, 2010, 2011, 2012, ETH Zurich.
3 * All rights reserved.
4 *
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
8 */
9
10#include <barrelfish/barrelfish.h>
11
12#include <bulk_transfer/bulk_transfer.h>
13#include <bulk_transfer/bulk_sm.h>
14
15//#include "../../helpers.h"
16#include "../../bulk_pool.h"
17#include "bulk_sm_impl.h"
18
19// Pool assignment --------------------------------------------------------
20
21/**
22 * Data to identify this pool_assign transaction on RPC client side.
23 */
24struct c_assign_pool_data {
25    struct bulk_channel      *channel;      ///< channel under consideration
26    struct bulk_pool         *pool;         ///< pool to be added to channel
27    struct bulk_continuation continuation;  ///< continuation after completion
28    bulk_ctrl_pool_t         flounder_pool; ///< flounder representation of pool
29};
30
31/**
32 * Data to identify this pool_assign transaction on RPC server side.
33 */
34struct s_assign_pool_data {
35    struct bulk_channel *channel;
36    uint64_t            id;
37    errval_t            err;
38};
39
40/**
41 * Reply handler for pool_assign RPC call. On success, pool is added to channel.
42 */
43void bulk_sm_assign_pool_rx_response(
44        struct bulk_ctrl_binding *b,
45        uint64_t                 error,
46        uint64_t                 id)
47{
48    struct bulk_channel *channel     = VOID2CHANNEL(b->st);
49    struct c_assign_pool_data *cdata = (struct c_assign_pool_data*) id;
50
51    // if accepted, add pool to channel
52    errval_t r_error = (errval_t) error;
53    if (err_is_ok(r_error)) {
54        errval_t err = bulk_pool_assign(cdata->pool, channel);
55        assert(err_is_ok(err));
56    }
57
58    // cleanup storage
59    struct bulk_continuation cont = cdata->continuation;
60    free(cdata);
61
62    // notify user
63    bulk_continuation_call(cont, r_error, channel);
64}
65
66/**
67 * Reply handler for pool_assign RPC call.
68 */
69
70static void bulk_sm_assign_pool_reply_sent(void *a)
71{
72    struct s_assign_pool_data *sdata = a;
73    free(sdata);
74}
75
76static errval_t bulk_sm_assign_pool_send_reply(void *a)
77{
78    struct s_assign_pool_data *sdata   = a;
79    struct bulk_channel       *channel = sdata->channel;
80    struct bulk_ctrl_binding  *b       = CHANNEL_BINDING(channel);
81
82    struct event_closure txcont = MKCONT(bulk_sm_assign_pool_reply_sent, sdata);
83
84    errval_t err = bulk_ctrl_assign_pool_response__tx(b, txcont,
85            sdata->err, sdata->id);
86
87    return err;
88}
89
90/**
91 * Receive handler for pool_assign RPC call on peer side. Asks user for
92 * confirmation of pool and replies to the original side.
93 */
94void bulk_sm_assign_pool_rx_call(
95        struct bulk_ctrl_binding *b,
96        bulk_ctrl_pool_t         pool,
97        uint64_t                 id)
98{
99    struct bulk_channel *channel = VOID2CHANNEL(b->st);
100    errval_t err;
101
102    // Allocte transaction data
103    struct s_assign_pool_data *sdata =
104        malloc(sizeof(struct s_assign_pool_data));
105    if (!sdata) {
106        // not much we can do now.
107        USER_PANIC("No memory to serve assign_pool request.\n");
108    }
109
110    sdata->channel = channel;
111    sdata->id      = id;
112
113    // Create library representation from pool information recevied by flounder.
114    struct bulk_pool *new_pool;
115
116    //first look up if we already know this pool from some other channel
117    bool first_assignment = 0;
118    struct bulk_pool_id pool_id = {
119        .machine = pool.pool_id_machine,
120        .dom     = pool.pool_id_dom,
121        .local   = pool.pool_id_local
122    };
123    new_pool = bulk_pool_domain_list_get(&pool_id);
124
125    if (new_pool != NULL){
126        //we already know that pool -> do nothing
127    } else {
128        first_assignment = 1;
129        err = create_pool_from_flounder(&new_pool, &pool);
130        if (err_is_fail(err)) {
131            sdata->err = err;
132            bulk_sm_assign_pool_send_reply(sdata);
133        }
134        //map the pool into our vspace
135        err = bulk_pool_map(new_pool);
136        if (err_is_fail(err)) {
137            DEBUG_ERR(err, "bulk_pool_map failed in bulk_sm_assign_pool_rx_call");
138            debug_printf("err: %s\n", err_getstring(err));
139            sdata->err = err_push(err, BULK_TRANSFER_POOL_MAP);
140            bulk_sm_assign_pool_send_reply(sdata);
141        }
142    }
143
144
145    // request permission to assign pool from application
146    assert(channel->callbacks->pool_assigned);
147
148    err = channel->callbacks->pool_assigned(channel, new_pool);
149    if (err_is_fail(err)) {
150        // application doesn't accept this pool. that's fine.
151        sdata->err = err;
152        if (first_assignment){
153            err = bulk_pool_unmap(new_pool);
154            err = bulk_pool_dealloc(new_pool);
155            bulk_pool_domain_list_remove(new_pool);
156        }
157
158    } else {
159        sdata->err = SYS_ERR_OK;
160
161        err = bulk_pool_assign(new_pool, channel);
162        assert(err_is_ok(err));
163        if (first_assignment){
164            err = bulk_pool_domain_list_insert(new_pool);
165            assert(err_is_ok(err));
166        }
167    }
168
169    // done
170    bulk_sm_flounder_send_fifo_msg_with_arg(channel,
171                                            bulk_sm_assign_pool_send_reply,
172                                            sdata);
173}
174
175/**
176 * Send handler for pool_assign RPC call.
177 */
178static errval_t bulk_sm_assign_pool_send_request(void *a)
179{
180    struct c_assign_pool_data  *cdata   = a;
181    struct bulk_channel        *channel = cdata->channel;
182    struct bulk_ctrl_binding   *b       = CHANNEL_BINDING(channel);
183
184    struct event_closure txcont = MKCONT(bulk_sm_flounder_msg_sent_debug_cb,
185            "bulk_sm_assign_pool sent");
186
187    errval_t err = bulk_ctrl_assign_pool_call__tx(b, txcont,
188            cdata->flounder_pool, (uint64_t)cdata);
189
190    return err;
191}
192
193/**
194 * Entry point from the public interface bulk_channel_assign_pool function.
195 * Reqeusts confirmation from the other endpoint.
196 */
197errval_t bulk_sm_assign_pool(
198        struct bulk_channel      *channel,
199        struct bulk_pool         *pool,
200        struct bulk_continuation cont)
201{
202    struct c_assign_pool_data *cdata =
203        malloc(sizeof(struct c_assign_pool_data));
204    if (!cdata) {
205        return BULK_TRANSFER_MEM;
206    }
207
208    cdata->channel      = channel;
209    cdata->continuation = cont;
210    cdata->pool         = pool;
211    generate_pool_for_flounder(pool, &cdata->flounder_pool);
212
213    bulk_sm_flounder_send_fifo_msg_with_arg(channel,
214                                            bulk_sm_assign_pool_send_request,
215                                            cdata);
216
217    return SYS_ERR_OK;
218}
219
220// Pool removal -----------------------------------------------------------
221
222errval_t bulk_sm_remove_pool(
223        struct bulk_channel      *channel,
224        struct bulk_pool         *pool,
225        struct bulk_continuation cont)
226{
227    assert(!"NYI");
228    return SYS_ERR_OK;
229}
230