1/* 2 * Copyright (c) 2014 ETH Zurich. 3 * All rights reserved. 4 * 5 * This file is distributed under the terms in the attached LICENSE file. 6 * If you do not find this file, copies can be found by writing to: 7 * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group. 8 */ 9#include <string.h> 10#include <barrelfish/barrelfish.h> 11#include <barrelfish/nameservice_client.h> 12#include <flounder/flounder_txqueue.h> 13#include <if/dma_defs.h> 14 15#include <dma_internal.h> 16#include <dma/dma_service.h> 17 18#include <debug.h> 19 20/** 21 * DMA service state that will be assigned to the 22 */ 23struct dma_svc_st 24{ 25 void *usr_st; 26 struct tx_queue queue; 27 struct dma_binding *binding; 28}; 29 30/** 31 * enumration of all possible states of the service exportation process 32 */ 33enum dma_svc_state 34{ 35 DMA_SVC_STATE_INVALID, 36 DMA_SVC_STATE_EXPORTING, 37 DMA_SVC_STATE_EXPORT_OK, 38 DMA_SVC_STATE_EXPORT_FAIL, 39 DMA_SVC_STATE_NS_REGISTERING, 40 DMA_SVC_STATE_NS_REGISTER_OK, 41 DMA_SVC_STATE_NS_REGISTER_FAIL, 42 DMA_SVC_STATE_RUNNING 43}; 44 45/// represents the current state of the exporting process 46static enum dma_svc_state dma_svc_state = DMA_SVC_STATE_EXPORTING; 47 48/// error while exporting 49static errval_t dma_svc_err; 50 51/// our own iref of the exported service 52static iref_t dma_svc_iref; 53 54/// registered callbacks 55struct dma_service_cb *event_handlers; 56 57/* 58 * ---------------------------------------------------------------------------- 59 * Reply State Cache 60 * ---------------------------------------------------------------------------- 61 */ 62 63struct dma_svc_reply_st 64{ 65 struct txq_msg_st common; 66 /* union of arguments */ 67 union 68 { 69 /* request handling */ 70 struct 71 { 72 dma_id_t id; ///< 73 } request; 74 } args; 75}; 76 77/* 78 * ---------------------------------------------------------------------------- 79 * Memory: registration 80 * ---------------------------------------------------------------------------- 81 */ 82 83static errval_t dma_register_response_tx(struct txq_msg_st *msg_st) 84{ 85 struct dma_binding *b = msg_st->queue->binding; 86 87 return dma_register__response__tx(b, TXQCONT(msg_st), msg_st->err); 88} 89 90static void dma_register_call_rx(struct dma_binding *_binding, 91 struct capref memory) 92{ 93 dma_svc_handle_t svc_handle = _binding->st; 94 95 struct txq_msg_st *msg_st = txq_msg_st_alloc(&svc_handle->queue); 96 if (msg_st == NULL) { 97 USER_PANIC("ran out of reply state resources\n"); 98 } 99 100 msg_st->send = dma_register_response_tx; 101 102 if (event_handlers->addregion) { 103 event_handlers->addregion(svc_handle, memory); 104 } else { 105 msg_st->err = DMA_ERR_SVC_REJECT; 106 } 107 108 txq_send(msg_st); 109} 110 111/* 112 * ---------------------------------------------------------------------------- 113 * Memory: de-registration 114 * ---------------------------------------------------------------------------- 115 */ 116 117static errval_t dma_deregister_response_tx(struct txq_msg_st *msg_st) 118{ 119 struct dma_binding *b = msg_st->queue->binding; 120 121 return dma_deregister_response__tx(b, TXQCONT(msg_st), msg_st->err); 122} 123 124static void dma_deregister_call_rx(struct dma_binding *_binding, 125 struct capref memory) 126{ 127 dma_svc_handle_t svc_handle = _binding->st; 128 129 struct txq_msg_st *msg_st = txq_msg_st_alloc(&svc_handle->queue); 130 if (msg_st == NULL) { 131 USER_PANIC("ran out of reply state resources\n"); 132 } 133 134 msg_st->send = dma_deregister_response_tx; 135 136 if (event_handlers->removeregion) { 137 event_handlers->removeregion(svc_handle, memory); 138 } else { 139 msg_st->err = DMA_ERR_SVC_REJECT; 140 } 141 142 txq_send(msg_st); 143} 144 145/* 146 * ---------------------------------------------------------------------------- 147 * Transfer Control: START 148 * ---------------------------------------------------------------------------- 149 */ 150 151static errval_t dma_memcpy_response_tx(struct txq_msg_st *msg_st) 152{ 153 struct dma_svc_reply_st *st = (struct dma_svc_reply_st *) msg_st; 154 struct dma_binding *b = msg_st->queue->binding; 155 156 return dma_memcpy_response__tx(b, TXQCONT(msg_st), msg_st->err, 157 st->args.request.id); 158} 159 160static void dma_memcpy_call_rx(struct dma_binding *_binding, 161 uint64_t src, 162 uint64_t dst, 163 uint64_t length) 164{ 165 166 DMASVC_DEBUG("memcopy request [0x%016lx]->[0x%016lx] of size 0x%lx\n", src, dst, 167 length); 168 169 dma_svc_handle_t svc_handle = _binding->st; 170 171 struct txq_msg_st *msg_st = txq_msg_st_alloc(&svc_handle->queue); 172 if (msg_st == NULL) { 173 USER_PANIC("ran out of reply state resources\n"); 174 } 175 176 msg_st->send = dma_memcpy_response_tx; 177 178 struct dma_svc_reply_st *reply = (struct dma_svc_reply_st *) msg_st; 179 180 if (event_handlers->memcpy) { 181 msg_st->err = event_handlers->memcpy(svc_handle, dst, src, length, 182 &reply->args.request.id); 183 } else { 184 msg_st->err = DMA_ERR_SVC_REJECT; 185 } 186 187 txq_send(msg_st); 188} 189 190struct dma_rx_vtbl dma_rx_vtbl = { 191 .register__call = dma_register_call_rx, 192 .deregister_call = dma_deregister_call_rx, 193 .memcpy_call = dma_memcpy_call_rx, 194}; 195 196/* 197 * ---------------------------------------------------------------------------- 198 * Transmission of done notifications 199 * ---------------------------------------------------------------------------- 200 */ 201 202static errval_t dma_done_tx(struct txq_msg_st *msg_st) 203{ 204 struct dma_svc_reply_st *st = (struct dma_svc_reply_st *) msg_st; 205 struct dma_binding *b = msg_st->queue->binding; 206 207 return dma_done__tx(b, TXQCONT(msg_st), st->args.request.id, msg_st->err); 208} 209 210/* 211 * ---------------------------------------------------------------------------- 212 * Service export and connect handling 213 * ---------------------------------------------------------------------------- 214 */ 215 216struct export_arg 217{ 218 errval_t err; 219 void *user_st; 220}; 221 222 223static errval_t svc_connect_cb(void *st, 224 struct dma_binding *binding) 225{ 226 errval_t err; 227 228 DMASVC_DEBUG("New connection to the DMA service\n"); 229 230 if (event_handlers->connect == NULL) { 231 /* the service is not interested in new connections (anymore) */ 232 return DMA_ERR_SVC_REJECT; 233 } 234 235 struct dma_svc_st *state = malloc(sizeof(*state)); 236 if (state == NULL) { 237 return LIB_ERR_MALLOC_FAIL; 238 } 239 240 txq_init(&state->queue, binding, binding->waitset, 241 (txq_register_fn_t) binding->register_send, 242 sizeof(struct dma_svc_reply_st)); 243 244 err = event_handlers->connect(st, &state->usr_st); 245 if (err_is_fail(err)) { 246 /* reject the connection */ 247 DMASVC_DEBUG("application rejected the connection: %s\n", 248 err_getstring(err)); 249 free(state); 250 return DMA_ERR_SVC_REJECT; 251 } 252 253 state->binding = binding; 254 binding->st = state; 255 binding->rx_vtbl = dma_rx_vtbl; 256 257 return SYS_ERR_OK; 258} 259 260static void svc_export_cb(void *st, 261 errval_t err, 262 iref_t iref) 263{ 264 dma_svc_err = err; 265 266 if (err_is_fail(err)) { 267 dma_svc_state = DMA_SVC_STATE_EXPORT_FAIL; 268 return; 269 } 270 271 dma_svc_iref = iref; 272 dma_svc_state = DMA_SVC_STATE_EXPORT_OK; 273} 274 275/* 276 * ============================================================================ 277 * Public Interface 278 * ============================================================================ 279 */ 280 281/** 282 * \brief initializes the DMA service and registers with the DMA manager 283 * 284 * \param cb Callback function pointers 285 * \param arg Argument passed to the connect callback 286 * \param svc_iref Returns the exported iref 287 * 288 * \returns SYS_ERR_OK on success 289 * errval on error 290 */ 291errval_t dma_service_init(struct dma_service_cb *cb, 292 void *arg, 293 iref_t *svc_iref) 294{ 295 errval_t err; 296 297 DMASVC_DEBUG("Initializing DMA service...\n"); 298 299 300 err = dma_export(arg, svc_export_cb, svc_connect_cb, 301 get_default_waitset(), 302 IDC_EXPORT_FLAGS_DEFAULT); 303 if (err_is_fail(err)) { 304 return err; 305 } 306 307 while (dma_svc_state == DMA_SVC_STATE_EXPORTING) { 308 messages_wait_and_handle_next(); 309 } 310 311 if (dma_svc_state == DMA_SVC_STATE_EXPORT_FAIL) { 312 return dma_svc_err; 313 } 314 315 dma_svc_state = DMA_SVC_STATE_RUNNING; 316 event_handlers = cb; 317 318 if (svc_iref) { 319 *svc_iref = dma_svc_iref; 320 } 321 322 DMASVC_DEBUG("DMA service up and running.\n"); 323 324 return SYS_ERR_OK; 325} 326 327/** 328 * \brief initializes the DMA service and exports it to the nameservice 329 * 330 * \param svc_name The name of the service for nameservice registration 331 * \param cb Callback function pointers 332 * \param arg Argument passed to the connect callback 333 * \param svc_iref Returns the exported iref 334 * 335 * \returns SYS_ERR_OK on success 336 * errval on error 337 */ 338errval_t dma_service_init_with_name(char *svc_name, 339 struct dma_service_cb *cb, 340 void *arg, 341 iref_t *svc_iref) 342{ 343 errval_t err; 344 345 DMASVC_DEBUG("Initializing DMA service...\n"); 346 347 err = dma_export(arg, svc_export_cb, svc_connect_cb, 348 get_default_waitset(), 349 IDC_EXPORT_FLAGS_DEFAULT); 350 if (err_is_fail(err)) { 351 return err; 352 } 353 354 while (dma_svc_state == DMA_SVC_STATE_EXPORTING) { 355 messages_wait_and_handle_next(); 356 } 357 358 if (dma_svc_state == DMA_SVC_STATE_EXPORT_FAIL) { 359 return dma_svc_err; 360 } 361 362 dma_svc_state = DMA_SVC_STATE_NS_REGISTERING; 363 364 DMASVC_DEBUG("Registering service [%s] with iref [0x%"PRIxIREF"]\n", svc_name, 365 dma_svc_iref); 366 367 err = nameservice_register(svc_name, dma_svc_iref); 368 if (err_is_fail(err)) { 369 dma_svc_state = DMA_SVC_STATE_NS_REGISTER_FAIL; 370 return err; 371 } 372 373 event_handlers = cb; 374 *svc_iref = dma_svc_iref; 375 376 DMASVC_DEBUG("DMA service up and running.\n"); 377 378 dma_svc_state = DMA_SVC_STATE_RUNNING; 379 380 return SYS_ERR_OK; 381} 382 383/** 384 * \brief sends a done notification about the transfer that has completed 385 * 386 * \param binding DMA binding 387 * \param err Outcome of the transfer 388 * \param id The id of the completed transfer 389 * 390 * \returns SYS_ERR_OK on success 391 * errval on error 392 */ 393errval_t dma_service_send_done(dma_svc_handle_t svc_handle, 394 errval_t err, 395 dma_req_id_t id) 396{ 397 struct txq_msg_st *msg_st = txq_msg_st_alloc(&svc_handle->queue); 398 if (msg_st == NULL) { 399 USER_PANIC("ran out of reply state resources\n"); 400 } 401 402 msg_st->err = err; 403 msg_st->send = dma_done_tx; 404 405 struct dma_svc_reply_st *reply = (struct dma_svc_reply_st *) msg_st; 406 407 reply->args.request.id = id; 408 409 txq_send(msg_st); 410 411 return SYS_ERR_OK; 412} 413