1/**
2 * \file
3 * \brief Generic bulk data transfer mechanism
4 */
5
6/*
7 * Copyright (c) 2013, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#ifndef BULK_TRANSFER_H
16#define BULK_TRANSFER_H
17
18#include <barrelfish/barrelfish.h>
19#include <barrelfish/waitset.h>
20
21/**
22 * Specifies the direction of data flow over a channel.
23 */
24enum bulk_channel_direction {
25    BULK_DIRECTION_TX,  ///< This side of the channel is the data source
26    BULK_DIRECTION_RX   ///< This side of the channel is the data sink
27};
28
29/**
30 * The role of the domain with respect to the channel.
31 *
32 * 1) Creation: upon channel creation the role can either be given or generic
33 * 2) Binding: The roles are given either Master-Slave or Slave-Master
34 */
35enum bulk_channel_role {
36    BULK_ROLE_GENERIC,  ///< the role of this endpoint depends on the binding side
37    BULK_ROLE_MASTER,   ///< this endpoint is the channel master
38    BULK_ROLE_SLAVE     ///< this endpoint is the channel slave
39};
40
41/**
42 * the trust levels of the channel
43 */
44enum bulk_trust_level {
45    BULK_TRUST_UNINITIALIZED, ///< trust level is not initialized
46    BULK_TRUST_NONE,          ///< untrusted case, policies are enforced
47    BULK_TRUST_HALF,          ///< same as untrusted, but no revocation of caps
48    BULK_TRUST_FULL           ///< complete trust, no unmapping
49};
50
51/**
52 *
53 */
54enum bulk_channel_state {
55    BULK_STATE_UNINITIALIZED,  ///< channel not initialized, no endpoint assigned
56    BULK_STATE_INITIALIZED,    ///< local endpoint assigned, ready for binding
57    BULK_STATE_BINDING,        ///< binding is in progress
58    BULK_STATE_BIND_NEGOTIATE, ///< channel properties are negotiated (role, trust)
59    BULK_STATE_CONNECTED,      ///< binding is completed and ready for use
60    BULK_STATE_TEARDOWN,       ///< teardown is initiated
61    BULK_STATE_CLOSED          ///< the channel has been closed
62};
63
64/* forward declarations */
65struct bulk_channel;
66struct bulk_channel_constraints;
67struct bulk_pool;
68struct bulk_pool_list;
69struct bulk_buffer;
70
71
72/**
73 * continuation to make the interface asynchronous
74 */
75struct bulk_continuation {
76    void (*handler)(void *arg, errval_t err, struct bulk_channel *channel);
77    void *arg;
78};
79
80#define MK_BULK_CONT(h,a) ((struct bulk_continuation) {.handler=(h), .arg=(a)})
81#define BULK_CONT_NOP     MK_BULK_CONT(NULL, NULL)
82
83/**
84 * Helper function to call a bulk continuation with given arguments.
85 */
86static inline void bulk_continuation_call(struct bulk_continuation cont,
87                                          errval_t                 err,
88                                          struct bulk_channel      *channel)
89{
90    if (cont.handler) {
91        cont.handler(cont.arg, err, channel);
92    }
93}
94
95
96/**
97 * Function pointers provided by an implementation of the bulk transfer
98 * mechanism over a specific backend. Functions correspond closely to the
99 * public interface.
100 *
101 * XXX: do we want to give a pointer to the closure or the closure itself?
102 *      the event_closure just has two fields, so it may be reasonable to do so.
103 *      - RA
104 */
105struct bulk_implementation {
106    errval_t (*channel_create)(struct bulk_channel  *channel);
107
108    errval_t (*channel_bind)(struct bulk_channel  *channel,
109                             struct bulk_continuation cont);
110
111    errval_t (*channel_destroy)(struct bulk_channel  *channel);
112
113    errval_t (*assign_pool)(struct bulk_channel *channel,
114                            struct bulk_pool    *pool,
115                            struct bulk_continuation cont);
116
117    errval_t (*remove_pool)(struct bulk_channel *channel,
118                            struct bulk_pool    *pool,
119                            struct bulk_continuation cont);
120
121    errval_t (*move)(struct bulk_channel  *channel,
122                     struct bulk_buffer   *buffer,
123                     void                 *meta,
124                     struct bulk_continuation cont);
125
126    errval_t (*copy)(struct bulk_channel  *channel,
127                     struct bulk_buffer   *buffer,
128                     void                 *meta,
129                     struct bulk_continuation cont);
130
131    errval_t (*release)(struct bulk_channel  *channel,
132                        struct bulk_buffer   *buffer,
133                        struct bulk_continuation cont);
134
135    errval_t (*pass)(struct bulk_channel  *channel,
136                     struct bulk_buffer   *buffer,
137                     void                 *meta,
138                     struct bulk_continuation cont);
139    /* XXX: remove ? */
140    errval_t (*request)(struct bulk_channel  *channel,
141                        size_t                count,
142                        struct bulk_continuation cont);
143};
144
145/**
146 * specifies constraints on the channel. This involves limiting the supported
147 * memory range or alignment requirements.
148 */
149struct bulk_channel_constraints {
150    uintptr_t mem_range_min;    ///< minimum physical address supported
151    uintptr_t mem_range_max;    ///< maximum physical address supported
152    uintptr_t men_align;        ///< minimum memory alignment constraint
153};
154
155/** Callbacks for events */
156struct bulk_channel_callbacks {
157    /**
158     * For exporting side: other endpoint successfully bound
159     */
160    errval_t (*bind_received)(struct bulk_channel *channel);
161
162    /**
163     * the other side wants to teardown the channel
164     * For initiating side: teardown completed
165     * For other side: teardown initiated
166     */
167    void (*teardown_received)(struct bulk_channel *channel);
168
169    /**
170     * The other endpoint requests to assign a new pool to this channel.
171     * @return If an error value is returned, the pool is not assigned and the
172     *         error code is sent to the other side (veto).
173     */
174    errval_t (*pool_assigned)(struct bulk_channel *channel,
175                              struct bulk_pool    *pool);
176
177    /**
178     * The other endpoint wants to remove a pool from this channel
179     */
180    errval_t (*pool_removed)(struct bulk_channel *channel,
181                             struct bulk_pool    *pool);
182
183    /** Incoming moved buffer (sink) */
184    void (*move_received)(struct bulk_channel *channel,
185                          struct bulk_buffer  *buffer,
186                          void                *meta);
187
188    /** Incoming passed buffer (source) */
189    void (*buffer_received)(struct bulk_channel *channel,
190                            struct bulk_buffer  *buffer,
191                            void                *meta);
192
193    /** Incoming copied buffer (sink) */
194    void (*copy_received)(struct bulk_channel *channel,
195                          struct bulk_buffer  *buffer,
196                          void                *meta);
197
198    /** Released copied buffer (source) */
199    void (*copy_released)(struct bulk_channel *channel,
200                          struct bulk_buffer  *buffer);
201
202    /** the other endpoint ran out of buffers and requests more buffers */
203     /*
204      * XXX: Its a point of argument to have this on the bulk interface or
205      *      to push it to the service level. Also: maybe need to specify the
206      *      pool id here.
207      *      - RA
208      * */
209    void (*request_received)(struct bulk_channel *channel,
210                             size_t               count);
211};
212
213
214/** Handle/Representation for one end of a bulk transfer channel */
215struct bulk_channel {
216    /** callbacks for the channel events */
217    struct bulk_channel_callbacks    *callbacks;
218    /** the local endpoint for this channel */
219    struct bulk_endpoint_descriptor *ep;
220    /** the current channel state */
221    enum bulk_channel_state          state;
222    /** pool allocators */
223    // struct bulk_pool_allocator      *pool_allocators;
224    /** orderd list of assigned pools to this channel */
225    struct bulk_pool_list           *pools;
226    /** the direction of data flow */
227    enum bulk_channel_direction      direction;
228    /** role of this side of the channel */
229    enum bulk_channel_role           role;
230    /** the trust level of this channel */
231    enum bulk_trust_level            trust;
232    /** constraints of this channel */
233    struct bulk_channel_constraints  constraints;
234    /** the size of the transmitted meta information per bulk transfer */
235    size_t                           meta_size;
236    /** the waitset for this channel */
237    struct waitset                  *waitset;
238    /** pointer to user specific state for this channel */
239    void                            *user_state;
240    /** implementation specific data */
241    /*
242     * XXX: maybe we want to have an abstract channel and specific channel
243     *      as with the endpoints here aswell ?
244     *      - RA
245     */
246    void                            *impl_data;
247};
248
249/**
250 * generic bulk endpoint
251 *
252 * This serves as an abstract representation of an endpoint. This data structure
253 * must be part of the implementation specific endpoint struct.
254 */
255struct bulk_endpoint_descriptor {
256    /** Pointer to backend-function pointers for this endpoint */
257    struct bulk_implementation *f;
258    /** TODO: are there more generic endpoint information ? */
259};
260
261
262/**
263    this struct represents the pool id which consists of the domain id of the
264    allocator and the domain local allocation counter
265    TODO: ensure system wide uniquenes also between different machines
266 */
267struct bulk_pool_id {
268    uint32_t    machine;
269    uint32_t    dom;//warning: disp_get_domain_id() is core-local
270    uint32_t    local;
271};
272
273
274/**
275 * represents the state of a buffer
276 */
277enum bulk_buffer_state {
278    BULK_BUFFER_INVALID,    ///< the buffer is not present XXX: name?
279    BULK_BUFFER_READ_ONLY,  ///< the buffer is mapped read only
280    BULK_BUFFER_RO_OWNED,   ///< the buffer is copied first
281    BULK_BUFFER_READ_WRITE  ///< the buffer is mapped read write
282};
283
284/**
285 * The bulk pool is a continuous region in (virtual) memory that consists of
286 * equally sized buffers.
287 */
288struct bulk_pool {
289    /** TODO: find a unique id*/
290    struct bulk_pool_id     id;
291    /** the base address of the pool */
292    lvaddr_t                 base_address;
293    /** the size of a single buffer in bytes */
294    size_t                   buffer_size;
295    /**  pool trust level depending on first assignment */
296    enum bulk_trust_level    trust;
297    /** capability for the entire pool */
298    struct capref            pool_cap;
299    /** the maximum number of buffers in this pool */
300    size_t                   num_buffers;
301    /** array of the buffers for this pool (pre allocated) */
302    struct bulk_buffer     **buffers;
303};
304
305/**
306 * a list of bulk pools assigned to a channel, keep the list ordered by the id
307 */
308struct bulk_pool_list {
309    struct bulk_pool_list *next;    ///< pointer to the next element
310    struct bulk_pool      *pool;    ///< the pool
311};
312
313/**
314 * a bulk buffer is the base unit for bulk data transfer in the system
315 */
316struct bulk_buffer {
317    /** the virtual address of the buffer */
318    void                     *address;
319    /** the physical address */
320    uintptr_t                 phys;
321    /** XXX: maybe we have to use the pool_id here */
322    struct bulk_pool         *pool;
323    /** index of this buffer within the pool's array of buffers */
324    uint32_t                  bufferid;
325    /** capability for this buffer */
326    struct capref             cap;
327    /** offset in the capability  */
328    lpaddr_t                  cap_offset;
329    /** state of the buffer */
330    enum bulk_buffer_state    state;
331    /** local refrence counting */
332    uint32_t                  local_ref_count;
333};
334
335
336/*
337 * ---------------------------------------------------------------------------
338 * Channel Management >>>
339 */
340
341/**
342 * setup parameters for creating a new bulk channel
343 */
344struct bulk_channel_setup {
345    /** Channel direction (RX/TX) */
346    enum bulk_channel_direction       direction;
347    /** Endpoint role (master/slave) */
348    enum bulk_channel_role            role;
349    /** trust level for this channel */
350    enum bulk_trust_level             trust;
351    /** */
352    struct bulk_channel_constraints   constraints;
353    /** Size of metadata to be passed along with transfers and passed buffers. */
354    size_t                            meta_size;
355    /** Waitset on which events for this channel will be dispatched */
356    struct waitset                   *waitset;
357    /** */
358    void                             *user_state;
359};
360
361/**
362 * parameters used on binding ot a channel
363 */
364struct bulk_channel_bind_params {
365    /** Endpoint role (master/slave) */
366    enum bulk_channel_role            role;
367    /** trust level for this channel */
368    enum bulk_trust_level             trust;
369    /** the channel constraints */
370    struct bulk_channel_constraints   constraints;
371    /** Waitset on which events for this channel will be dispatched */
372    struct waitset                   *waitset;
373    /** user state for the channel */
374    void                             *user_state;
375};
376
377
378/**
379 * Create a new channel.
380 *
381 * @param channel   Pointer to unused channel handle
382 * @param ep_desc   Description of endpoint to bind to
383 * @param callbacks Callbacks for events on this channel
384 * @param setup     struct containing the setup information
385 */
386errval_t bulk_channel_create(struct bulk_channel              *channel,
387                             struct bulk_endpoint_descriptor  *ep_desc,
388                             struct bulk_channel_callbacks    *callbacks,
389                             struct bulk_channel_setup        *setup);
390
391/**
392 * Bind to an existing unbound channel.
393 *
394 * @param channel   Pointer to unused channel handle
395 * @param ep_desc   Description of endpoint to bind to
396 * @param callbacks Callbacks for events on this channel
397 * @param params    parameters for the binding process
398 *
399 * There is the bind done callback that serves as a continuation for this.
400 */
401errval_t bulk_channel_bind(struct bulk_channel              *channel,
402                           struct bulk_endpoint_descriptor  *remote_ep_desc,
403                           struct bulk_channel_callbacks    *callbacks,
404                           struct bulk_channel_bind_params  *params,
405                           struct bulk_continuation cont);
406
407
408/**
409 * Assign a pool to a channel.
410 *
411 * @param channel Channel
412 * @param pool    Pool to assign (must not be assigned to this channel yet)
413 *
414 * * There is the pool assigned callback that serves as a continuation for this.
415 */
416errval_t bulk_channel_assign_pool(struct bulk_channel *channel,
417                                  struct bulk_pool    *pool,
418                                  struct bulk_continuation cont);
419
420/**
421 * Remove a pool from a channel
422 *
423 * @param channel Channel
424 * @param pool    Pool to remove (must be previously assigned to the channel)
425 *
426 */
427errval_t bulk_channel_remove_pool(struct bulk_channel       *channel,
428                                  struct bulk_pool          *pool,
429                                  struct bulk_continuation   cont);
430
431/**
432 * Free a channel
433 *
434 * @param channel        Channel to be freed
435 */
436errval_t bulk_channel_destroy(struct bulk_channel      *channel,
437                              struct bulk_continuation cont);
438
439/*
440 * ---------------------------------------------------------------------------
441 * <<< Channel Management
442 */
443
444
445
446/**
447 * Move buffer on the channel. Data and ownership are passed to the other
448 * endpoint. After the other endpoint is done with the respective buffer, it can
449 * pass it back.
450 *
451 * @param channel Channel, this endpoint must be source
452 * @param buffer  Buffer, must hold ownership and belong to a pool on this
453 *                channel
454 * @param meta    Pointer to metadata to be passed along with the data
455 *                (channel-wide meta_size is used).
456 * @param cont    event continuation
457 */
458errval_t bulk_channel_move(struct bulk_channel      *channel,
459                           struct bulk_buffer       *buffer,
460                           void                     *meta,
461                           struct bulk_continuation  cont);
462
463/**
464 * Pass buffer ownership to the other endpoint, the buffer contents are not
465 * guaranteed to be transported.
466 *
467 * @param channel Channel
468 * @param buffer  Buffer, must hold ownership and belong to a pool on this
469 *                channel
470 * @param meta    Pointer to metadata to be passed along with the buffer
471 *                (channel-wide meta_size is used).
472 * @param cont    event continuation
473 */
474errval_t bulk_channel_pass(struct bulk_channel      *channel,
475                           struct bulk_buffer       *buffer,
476                           void                     *meta,
477                           struct bulk_continuation  cont);
478
479/**
480 * Copy buffer to other endpoint.
481 *
482 * @param channel Channel, this endpoint must be source
483 * @param buffer  Buffer, must belong to a pool on this channel. Must hold
484 *                ownersihp, or hold a copy of this buffer.
485 * @param meta    Pointer to metadata to be passed along with the buffer
486 *                (channel-wide meta_size is used).
487 * @param cont    event continuation
488 */
489errval_t bulk_channel_copy(struct bulk_channel      *channel,
490                           struct bulk_buffer       *buffer,
491                           void                     *meta,
492                           struct bulk_continuation  cont);
493/**
494 * Release copy received over channel. Must only be called after all outgoing
495 * copies from this domain of the same buffer have been released.
496 *
497 * @param channel Channel, this endpoint must be sink
498 * @param buffer  Buffer, must have received it as a copy over this channel, all
499 *                outgoing copies must have been released.
500 * @param cont    event continuation
501 */
502errval_t bulk_channel_release(struct bulk_channel       *channel,
503                              struct bulk_buffer        *buffer,
504                              struct bulk_continuation   cont);
505
506
507
508#endif /* BULK_TRANSFER_H */
509
510