1/**
2 * \file
3 * \brief Managing the pinned memory for vspace metadata
4 *
5 * Warning: This code is coupled with code in slot_alloc/. and pmap_*.
6 *
7 * Slabs required for various lists in vspace
8 * and memobj are backed by this memobj.
9 * This memory is pinned since the frames are not tracked
10 * and cannot be mapped into multiple vregions.
11 *
12 * If the slabs maintained in the state are out of memory, it needs to be grown.
13 * This file will require 1 slot for frame capability
14 * and additional to create the mappings.
15 * The amount required can be calculated by refering to the pmap_*.
16 *
17 * Growing requires 1 slot from this file.
18 */
19
20/*
21 * Copyright (c) 2010, ETH Zurich.
22 * All rights reserved.
23 *
24 * This file is distributed under the terms in the attached LICENSE file.
25 * If you do not find this file, copies can be found by writing to:
26 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
27 */
28
29#include <barrelfish/barrelfish.h>
30#include <barrelfish/core_state_arch.h>
31#include "vspace_internal.h"
32
33
34/**
35 * \brief Initialize the pinned region
36 *
37 * Allocates a region of virtual address space and initializes its state.
38 */
39errval_t vspace_pinned_init(void)
40{
41    errval_t err;
42
43    struct pinned_state *state = get_current_pinned_state();
44    struct vspace *vspace = get_current_vspace();
45
46    err = memobj_create_pinned(&state->memobj,
47                               VSPACE_PINNED_SIZE, 0);
48    if (err_is_fail(err)) {
49        return err_push(err, LIB_ERR_MEMOBJ_CREATE_PINNED);
50    }
51
52    err = vregion_map(&state->vregion, vspace,
53                      (struct memobj*)&state->memobj, 0, VSPACE_PINNED_SIZE,
54                      VREGION_FLAGS_READ_WRITE);
55    if (err_is_fail(err)) {
56        return err_push(err, LIB_ERR_VREGION_MAP);
57    }
58
59    state->offset = 0;
60    thread_mutex_init(&state->mutex);
61    slab_init(&state->vregion_list_slab, VSPACE_PINNED_UNIT *
62              sizeof(struct vregion_list), NULL);
63    slab_init(&state->frame_list_slab, VSPACE_PINNED_UNIT *
64              sizeof(struct memobj_frame_list), NULL);
65
66    return SYS_ERR_OK;
67}
68
69// Amount of memory to allocate when we have to refill one of the slab
70// allocators backing the pinned vspace state
71#define PINNED_REFILL_SIZE (64 * BASE_PAGE_SIZE)
72
73/**
74 * \brief Allocate some slabs
75 *
76 * \param retbuf     Pointer to return the allocated memory
77 * \param slab_type  Type of slab the memory is allocated for
78 *
79 * Since this region is used for backing specific slabs,
80 * only those types of slabs can be allocated.
81 */
82errval_t vspace_pinned_alloc(void **retbuf, enum slab_type slab_type)
83{
84    errval_t err;
85    struct pinned_state *state = get_current_pinned_state();
86
87    // Select slab type
88    struct slab_allocator *slab;
89    switch(slab_type) {
90    case VREGION_LIST:
91        slab = &state->vregion_list_slab;
92        break;
93    case FRAME_LIST:
94        slab = &state->frame_list_slab;
95        break;
96    default:
97        return LIB_ERR_VSPACE_PINNED_INVALID_TYPE;
98    }
99
100    // memobj->fill() can recurse into vspace_alloc_pinned(), so we need to
101    // acquire this lock in nested mode
102    thread_mutex_lock_nested(&state->mutex);
103
104    // Try allocating
105    static bool is_refilling = false;
106    void *buf = slab_alloc(slab);
107    // If we're unlucky, we will call this function again from inside memobj->fill()
108    // while refilling the slab allocator.  Refilling when there's one slab
109    // left over should eliminate the recursive refilling, and the
110    // is_refilling flag makes sure that we hand out the last slab if we're
111    // refilling already. -SG,2016-11-08.
112    if (slab_freecount(slab) <= 1 && !is_refilling) {
113        is_refilling = true;
114        // Out of memory, grow
115        struct capref frame;
116        size_t alloc_size = PINNED_REFILL_SIZE;
117        err = frame_alloc(&frame, alloc_size, &alloc_size);
118        if (err_no(err) == LIB_ERR_RAM_ALLOC_MS_CONSTRAINTS) {
119            // Handle early refills, before memserv connection established
120            alloc_size = BASE_PAGE_SIZE;
121            err = frame_alloc(&frame, alloc_size, &alloc_size);
122        }
123        if (err_is_fail(err)) {
124            thread_mutex_unlock(&state->mutex);
125            DEBUG_ERR(err, "frame_alloc in vspace_pinned_alloc");
126            return err_push(err, LIB_ERR_FRAME_ALLOC);
127        }
128        err = state->memobj.m.f.fill((struct memobj*)&state->memobj,
129                                     state->offset, frame,
130                                     alloc_size);
131        if (err_is_fail(err)) {
132            thread_mutex_unlock(&state->mutex);
133            DEBUG_ERR(err, "memobj_fill in vspace_pinned_alloc: offset=%zu", state->offset);
134            return err_push(err, LIB_ERR_MEMOBJ_FILL);
135        }
136
137        genvaddr_t gvaddr = vregion_get_base_addr(&state->vregion) +
138            state->offset;
139        void *slab_buf = (void*)vspace_genvaddr_to_lvaddr(gvaddr);
140        slab_grow(slab, slab_buf, alloc_size);
141        state->offset += alloc_size;
142
143        // Try again
144        if (buf == NULL) {
145            buf = slab_alloc(slab);
146        }
147        state->refill_count ++;
148        is_refilling = false;
149    }
150
151    thread_mutex_unlock(&state->mutex);
152
153    if (buf == NULL) {
154        return LIB_ERR_SLAB_ALLOC_FAIL;
155    } else {
156        *retbuf = buf;
157        return SYS_ERR_OK;
158    }
159}
160