1/** 2 * \file 3 * \brief Unidirectional bulk data transfer via shared memory 4 */ 5 6/* 7 * Copyright (c) 2013, ETH Zurich. 8 * All rights reserved. 9 * 10 * This file is distributed under the terms in the attached LICENSE file. 11 * If you do not find this file, copies can be found by writing to: 12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group. 13 */ 14 15#include <barrelfish/barrelfish.h> 16 17#include <bulk_transfer/bulk_transfer.h> 18 19#include "bulk_pool.h" 20#include "bulk_buffer.h" 21 22/** 23 * Create a new channel. 24 * 25 * @param channel Pointer to unused channel handle 26 * @param ep_desc Description of endpoint to bind to 27 * @param callbacks Callbacks for events on this channel 28 * @param setup struct containing the setup information 29 */ 30errval_t bulk_channel_create(struct bulk_channel *channel, 31 struct bulk_endpoint_descriptor *local_ep_desc, 32 struct bulk_channel_callbacks *callbacks, 33 struct bulk_channel_setup *setup) 34{ 35 channel->state = BULK_STATE_UNINITIALIZED; 36 /* 37 * XXX: do we somehow track that this endpoint has not been assigned to 38 * a channel before? 39 */ 40 channel->ep = local_ep_desc; 41 channel->callbacks = callbacks; 42 43 channel->direction = setup->direction; 44 channel->role = setup->role; 45 channel->trust = setup->trust; 46 channel->constraints = setup->constraints; 47 channel->meta_size = setup->meta_size; 48 channel->waitset = setup->waitset; 49 channel->user_state = setup->user_state; 50 channel->pools = NULL; 51 52 return local_ep_desc->f->channel_create(channel); 53} 54 55/** 56 * Bind to an existing unbound channel. 57 * 58 * @param channel Pointer to unused channel handle 59 * @param ep_desc Description of the remote endpoint to bind to 60 * @param callbacks Callbacks for events on this channel 61 * @param params bind parameters 62 */ 63errval_t bulk_channel_bind(struct bulk_channel *channel, 64 struct bulk_endpoint_descriptor *remote_ep_desc, 65 struct bulk_channel_callbacks *callbacks, 66 struct bulk_channel_bind_params *params, 67 struct bulk_continuation cont) 68{ 69 if (channel->state != BULK_STATE_UNINITIALIZED) { 70 return BULK_TRANSFER_CHAN_STATE; 71 } 72 73 channel->state = BULK_STATE_UNINITIALIZED; 74 channel->ep = remote_ep_desc; 75 channel->callbacks = callbacks; 76 77 channel->role = params->role; 78 channel->trust = params->trust; 79 channel->constraints = params->constraints; 80 channel->waitset = params->waitset; 81 channel->user_state = params->user_state; 82 channel->pools = NULL; 83 84 return remote_ep_desc->f->channel_bind(channel, cont); 85} 86 87/** 88 * Free a channel 89 * 90 * @param channel Channel to be freed 91 * @param free_resources Flag if the resources i.e. pools also should be freed 92 */ 93errval_t bulk_channel_destroy(struct bulk_channel *channel, 94 struct bulk_continuation cont) 95{ 96 assert(!"NYI: bulk_channel_destroy"); 97 switch (channel->state) { 98 case BULK_STATE_UNINITIALIZED: 99 return SYS_ERR_OK; 100 break; 101 case BULK_STATE_INITIALIZED: 102 103 break; 104 case BULK_STATE_BINDING: 105 106 break; 107 108 case BULK_STATE_CONNECTED: 109 break; 110 111 case BULK_STATE_TEARDOWN: 112 break; 113 114 case BULK_STATE_CLOSED: 115 116 break; 117 default: 118 return BULK_TRANSFER_CHAN_STATE; 119 break; 120 } 121 return SYS_ERR_OK; 122} 123 124/** 125 * Assign a pool to a channel. 126 * 127 * @param channel Channel 128 * @param pool Pool to assign (must not be assigned to this channel yet) 129 */ 130errval_t bulk_channel_assign_pool(struct bulk_channel *channel, 131 struct bulk_pool *pool, 132 struct bulk_continuation cont) 133{ 134 assert(channel); 135 assert(pool); 136 137 if (channel->state != BULK_STATE_CONNECTED || !(channel->ep)) { 138 return BULK_TRANSFER_CHAN_STATE; 139 } 140 141 if (pool->trust == BULK_TRUST_UNINITIALIZED) { 142 /* this pool has never been assigned to a channel */ 143 pool->trust = channel->trust; 144 } 145 146 /* a channel must not span trusted and non trusted channels */ 147 if (channel->trust != pool->trust) { 148 return BULK_TRANSFER_CHAN_TRUST; 149 } 150 151 if (bulk_pool_is_assigned(pool, channel)) { 152 debug_printf("bulk_channel_assign_pool: pool already assigned to channel\n"); 153 /* 154 * XXX: do we treat this as a no-op or should we return an 155 * BULK_TRANSFER_POOL_ALREADY_ASSIGNED ? 156 * - RA 157 */ 158 return SYS_ERR_OK; 159 } 160 161 /* 162 * Note, the pool can only be added to the list of pools, once the other 163 * side has acked the assignment request. 164 */ 165 166 return channel->ep->f->assign_pool(channel, pool, cont); 167} 168 169/** 170 * Remove a pool from a channel 171 * 172 * @param channel Channel 173 * @param pool Pool to remove (must be previously assigned to the channel) 174 * 175 */ 176errval_t bulk_channel_remove_pool(struct bulk_channel *channel, 177 struct bulk_pool *pool, 178 struct bulk_continuation cont) 179{ 180 assert(!"NYI: removing a pool from a channel"); 181 182 assert(channel); 183 assert(pool); 184 185 if (channel->state != BULK_STATE_CONNECTED) { 186 return BULK_TRANSFER_CHAN_STATE; 187 } 188 189 if (!bulk_pool_is_assigned(pool, channel)) { 190 /* 191 * XXX: if there is no such pool on this channel simply return 192 * or do we want to indicate an error here? 193 * BULK_TRANSFER_POOL_NOT_ASSIGNED 194 * - RA 195 */ 196 return SYS_ERR_OK; 197 } 198 199 struct bulk_pool_list *list = channel->pools; 200 struct bulk_pool_list *prev = NULL; 201 while (list) { 202 if (bulk_pool_cmp_id(&list->pool->id, &pool->id) == 0) { 203 break; 204 } 205 prev = list; 206 list = list->next; 207 } 208 if (prev == NULL) { 209 channel->pools = list->next; 210 } else { 211 prev->next = list->next; 212 } 213 214 free(list); 215 216 /* 217 * TODO: we may want to track the channels which this pool was used, 218 * so if the last reference is removed, we can unmap the pool 219 */ 220 221 return channel->ep->f->remove_pool(channel, pool, cont); 222} 223 224errval_t bulk_channel_move(struct bulk_channel *channel, 225 struct bulk_buffer *buffer, 226 void *meta, 227 struct bulk_continuation cont) 228{ 229 assert(channel); 230 assert(buffer); 231 232 errval_t err; 233 234 if (channel->state != BULK_STATE_CONNECTED) { 235 return BULK_TRANSFER_CHAN_STATE; 236 } 237 238 if (channel->direction != BULK_DIRECTION_TX) { 239 return BULK_TRANSFER_CHAN_DIRECTION; 240 } 241 242 if (!bulk_pool_is_assigned(buffer->pool, channel)) { 243 return BULK_TRANSFER_POOL_NOT_ASSIGNED; 244 } 245 246 if (!bulk_buffer_is_owner(buffer)) { 247 return BULK_TRANSFER_BUFFER_NOT_OWNED; 248 } 249 250 err = bulk_buffer_change_state(buffer, BULK_BUFFER_INVALID); 251 if (err_is_fail(err)) { 252 /* 253 * XXX: what do we do if the unmap fails? 254 */ 255 USER_PANIC_ERR(err, "failed to change the buffer state"); 256 } 257 258 return channel->ep->f->move(channel, buffer, meta, cont); 259} 260 261/** 262 * 263 */ 264errval_t bulk_channel_pass(struct bulk_channel *channel, 265 struct bulk_buffer *buffer, 266 void *meta, 267 struct bulk_continuation cont) 268{ 269 assert(channel); 270 assert(buffer); 271 272 errval_t err; 273 274 if (channel->state != BULK_STATE_CONNECTED) { 275 return BULK_TRANSFER_CHAN_STATE; 276 } 277 278 if (!bulk_pool_is_assigned(buffer->pool, channel)) { 279 return BULK_TRANSFER_POOL_NOT_ASSIGNED; 280 } 281 282 if (!bulk_buffer_is_owner(buffer)) { 283 return BULK_TRANSFER_BUFFER_NOT_OWNED; 284 } 285 286 err = bulk_buffer_change_state(buffer, BULK_BUFFER_INVALID); 287 if (err_is_fail(err)) { 288 /* 289 * XXX: what do we do if the unmap fails? 290 */ 291 USER_PANIC_ERR(err, "failed to change the buffer state"); 292 } 293 294 return channel->ep->f->pass(channel, buffer, meta, cont); 295} 296 297/** 298 * 299 */ 300errval_t bulk_channel_copy(struct bulk_channel *channel, 301 struct bulk_buffer *buffer, 302 void *meta, 303 struct bulk_continuation cont) 304{ 305 assert(channel); 306 assert(buffer); 307 308 errval_t err; 309 310 if (channel->state != BULK_STATE_CONNECTED) { 311 return BULK_TRANSFER_CHAN_STATE; 312 } 313 314 if (channel->direction != BULK_DIRECTION_TX) { 315 return BULK_TRANSFER_CHAN_DIRECTION; 316 } 317 318 if (!bulk_pool_is_assigned(buffer->pool, channel)) { 319 return BULK_TRANSFER_POOL_NOT_ASSIGNED; 320 } 321 322 if (!bulk_buffer_is_valid(buffer)) { 323 return BULK_TRANSFER_BUFFER_INVALID; 324 } 325 326 enum bulk_buffer_state new_state = BULK_BUFFER_READ_ONLY; 327 if (bulk_buffer_is_owner(buffer)) { 328 new_state = BULK_BUFFER_RO_OWNED; 329 } 330 331 buffer->local_ref_count++; 332 333 err = bulk_buffer_change_state(buffer, new_state); 334 if (err_is_fail(err)) { 335 return BULK_TRANSFER_BUFFER_STATE; 336 } 337 338 return channel->ep->f->copy(channel, buffer, meta, cont); 339} 340 341/** 342 * 343 */ 344errval_t bulk_channel_release(struct bulk_channel *channel, 345 struct bulk_buffer *buffer, 346 struct bulk_continuation cont) 347{ 348 assert(channel); 349 assert(buffer); 350 351 errval_t err; 352 353 if (channel->state != BULK_STATE_CONNECTED) { 354 return BULK_TRANSFER_CHAN_STATE; 355 } 356 357 if (!bulk_buffer_is_copy(buffer)) { 358 return BULK_TRANSFER_BUFFER_NOT_A_COPY; 359 } 360 361 if (!bulk_buffer_is_owner(buffer) && !bulk_buffer_can_release(buffer)) { 362 return BULK_TRANSFER_BUFFER_REFCOUNT; 363 } 364 365 err = bulk_buffer_change_state(buffer, BULK_BUFFER_INVALID); 366 if (err_is_fail(err)) { 367 USER_PANIC_ERR(err, "failed to change the buffer state"); 368 } 369 370 return channel->ep->f->release(channel, buffer, cont); 371} 372 373