1/**
2 * \file
3 * \brief Unidirectional bulk data transfer via shared memory
4 */
5
6/*
7 * Copyright (c) 2009, 2010, 2011, 2012, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
13 */
14#include <string.h>
15
16#include <barrelfish/barrelfish.h>
17
18#include <bulk_transfer/bulk_transfer.h>
19#include <bulk_transfer/bulk_allocator.h>
20
21#include "bulk_pool.h"
22#include "bulk_buffer.h"
23
24
25/**
26 * initializes a new bulk allocator with a pool and allocates memory for it.
27 *
28 * @param alloc         pointer to an unused allocator handle
29 * @param buffer_count  the number of buffers to allocate
30 * @param buffer_size   the size of a single buffer
31 * @param constraints   memory requirements for this pool or NULL if none
32 */
33errval_t bulk_alloc_init(struct bulk_allocator *alloc,
34                         size_t buffer_count,
35                         size_t buffer_size,
36                         struct bulk_pool_constraints *constraints)
37{
38    assert(alloc);
39
40    if (!bulk_buffer_check_size(buffer_size)) {
41        return BULK_TRANSFER_ALLOC_BUFFER_SIZE;
42    }
43
44    if (buffer_count == 0) {
45        return BULK_TRANSFER_ALLOC_BUFFER_COUNT;
46    }
47
48    errval_t err;
49
50    size_t pool_size = buffer_count * buffer_size;
51
52    err = bulk_pool_alloc(&alloc->pool, buffer_count, buffer_size);
53    if (err_is_fail(err)) {
54        return err;
55    }
56
57    /* reserve a virtual memory range for the pool */
58
59    /* TODO: how to set the physical alignment constraints ? */
60
61    /* set ram affinity */
62    uint64_t minbase, maxlimit;
63    if ((constraints != NULL) && (constraints->range_min != 0)
64                    && (constraints->range_max + 1) != 0) {
65        ram_get_affinity(&minbase, &maxlimit);
66        ram_set_affinity(constraints->range_min, constraints->range_max);
67    }
68    size_t ret_size;
69    err = frame_alloc(&alloc->pool->pool_cap, pool_size, &ret_size);
70    if (err_is_fail(err)) {
71        return err_push(err, LIB_ERR_FRAME_ALLOC);
72    }
73
74    /* restore ram affinity */
75    if ((constraints != NULL) && (constraints->range_min != 0)
76                    && (constraints->range_max + 1) != 0) {
77        ram_set_affinity(minbase, maxlimit);
78    }
79
80    if (constraints != NULL){
81        alloc->pool->trust = constraints->trust;
82    }
83
84    /*
85     * in the trusted case, we copy the pool_cap into all buffers and map
86     * them with offsets,
87     * in the untrusted case, we split the cap into smaller frames and map those
88     */
89    err = bulk_pool_map(alloc->pool);
90    if (err_is_fail(err)) {
91        /* TODO: error handling */
92        return err;
93    }
94
95    alloc->mngs = calloc(buffer_count, sizeof(*alloc->mngs));
96    for (int i = 0; i < buffer_count; ++i) {
97        struct bulk_buffer *buf = alloc->pool->buffers[i];
98        /* setup the management structure for the free list */
99        struct bulk_buffer_mng *le = alloc->mngs + i;
100        le->buffer = buf;
101        le->next = alloc->free_buffers;
102        alloc->free_buffers = le;
103    }
104    alloc->num_free = buffer_count;
105
106    return SYS_ERR_OK;
107}
108
109/**
110 * creates a new allocator based on the supplied capability. It creates as many
111 * buffers as possible of size buffer_size that fit into the capability.
112 *
113 * @param alloc         an unused allocator handle
114 * @param buffer_size   the size of a single buffer
115 * @param frame         capability for backing the bulk pool
116 */
117errval_t bulk_alloc_init_from_cap(struct bulk_allocator *alloc,
118                                  size_t buffer_size,
119                                  struct capref *frame)
120{
121    assert(!"NYI: bulk_alloc_init");
122    return SYS_ERR_OK;
123}
124
125/**
126 * Frees up the bulk allocator and it's pool.
127 *
128 * @param alloc handle to a bulk allocator to be freed
129 */
130errval_t bulk_alloc_free(struct bulk_allocator *alloc)
131{
132
133    assert(!"NYI: bulk_alloc_init");
134
135    /*
136     * PERFORMING INTERNAL TESTS
137     */
138    debug_printf("PERFORMING INTERNAL TESTS...\n");
139    assert(alloc->num_free == 1);
140
141    struct bulk_buffer *buf = bulk_alloc_new_buffer(alloc);
142
143    buf = alloc->pool->buffers[4];
144    assert(buf);
145
146    errval_t err;
147
148    volatile char *e;
149
150    debug_printf("ABOUT TO UNMAP\n");
151    err = bulk_buffer_unmap(buf);
152    if (err_is_fail(err)) {
153        debug_printf("BUFFER UNMAP FAILED!\n");
154    }
155
156    debug_printf("ABOUT TO MAP AGAIN\n");
157    err = bulk_buffer_map(buf);
158    if (err_is_fail(err)) {
159        debug_printf("BUFFER MAP FAILED\n");
160    }
161    debug_printf("ABOUT CHECKING....\n");
162    e = buf->address;
163    for (int i = 0; i < 4096; ++i) {
164        e[i] = 1;
165    }
166
167
168#if 0
169    debug_printf("ABOUT TO UNMAP\n");
170    err = bulk_buffer_unmap(buf);
171    if (err_is_fail(err)) {
172        debug_printf("BUFFER UNMAP FAILED!\n");
173    }
174
175    debug_printf("ABOUT TO CRASH....\n");
176    e = buf->address;
177    for (int i = 0; i < 4096; ++i) {
178        e[i] = 1;
179    }
180#endif
181    debug_printf("ABOUT TO CHANGE STATE:\n");
182    err = bulk_buffer_change_state(buf, BULK_BUFFER_READ_ONLY);
183    if (err_is_fail(err)) {
184        debug_printf("change state failed");
185    }
186
187
188    e = buf->address;
189    debug_printf("ABOUT TO READ: %p\n", e);
190    for (int i = 0; i < 4096; ++i) {
191        if (e[i] != 1) {
192            debug_printf("ERROR: WRONG VALUE!\n");
193            break;
194        }
195    }
196    debug_printf("ABOUT TO WRITE: %p\n", e);
197    for (int i = 0; i < 4096; ++i) {
198        if (!(i%1024)) {
199            debug_printf("writing %p\n", &e[i]);
200        }
201        e[i] = 2;
202    }
203
204    for (int i = 0; i < 4096; ++i) {
205            if (e[i] != 2) {
206                debug_printf("ERROR: WRONG VALUE!\n");
207                break;
208            }
209        }
210
211
212
213    assert(!"NYI: bulk_alloc_init");
214    return SYS_ERR_OK;
215
216}
217
218/**
219 * Gets a new bulk buffer from the allocator.
220 *
221 * @param   alloc   the allocator handle to allocate the buffer from
222 *
223 * @return  pointer to a bulk_buffer on success
224 *          NULL if there are no buffer left
225 *
226 */
227struct bulk_buffer *bulk_alloc_new_buffer(struct bulk_allocator *alloc)
228{
229    assert(alloc);
230
231    if (alloc->num_free == 0) {
232        return NULL;
233    }
234
235    struct bulk_buffer *buf = alloc->free_buffers->buffer;
236    alloc->free_buffers = alloc->free_buffers->next;
237    alloc->num_free--;
238
239    /*
240     * XXX: do we want to have a special state for being "not allocated"
241     *      i.e. maybe set the state to invalid, even though mapped r/w?
242     */
243    assert(buf->state == BULK_BUFFER_READ_WRITE);
244
245    return buf;
246}
247
248/**
249 * returns a buffer back to the allocator. The pools must match.
250 *
251 * @param alloc     the allocator to hand the buffer back
252 * @param buffer    the buffer to hand back to the allocator
253 */
254errval_t bulk_alloc_return_buffer(struct bulk_allocator *alloc,
255                                  struct bulk_buffer *buffer)
256{
257    assert(alloc);
258
259    if (buffer == NULL || (buffer && buffer->state != BULK_BUFFER_READ_WRITE)) {
260        /* only read_write i.e. owned buffers can be added back */
261        return BULK_TRANSFER_BUFFER_STATE;
262    }
263
264    if (buffer->pool != alloc->pool) {
265        /* the buffers can only be added to the allocator */
266        return BULK_TRANSFER_POOL_INVALD;
267    }
268
269    struct bulk_buffer_mng *bm = alloc->mngs + buffer->bufferid;
270    bm->buffer = buffer;
271    bm->next = alloc->free_buffers;
272    alloc->free_buffers = bm;
273    alloc->num_free++;
274
275    return SYS_ERR_OK;
276
277}
278
279