1#include "internal.h"
2#include "delete_int.h"
3#include "monitor_debug.h"
4#include <monitor_invocations.h>
5#include <caplock.h>
6#include <barrelfish/event_queue.h>
7#include <barrelfish/slot_alloc.h>
8
9static struct event_queue trigger_queue;
10static bool triggered;
11static bool enqueued;
12static int suspended;
13static struct event_queue_node trigger_qn;
14static struct event_closure step_closure;
15static struct event_queue_node caplock_qn;
16static struct delete_st delete_step_st;
17static struct capref delcap;
18static struct event_queue delete_queue;
19static struct delete_queue_node *pending_head, *pending_tail;
20
21static void delete_steps_cont(void *st);
22static void delete_steps_clear(void *st);
23static void delete_queue_notify(void);
24
25struct waitset*
26delete_steps_get_waitset(void)
27{
28    return delete_queue.waitset;
29}
30
31void
32delete_steps_init(struct waitset *ws)
33{
34    DEBUG_CAPOPS("%s\n", __FUNCTION__);
35    errval_t err;
36
37    struct waitset *myws = delete_steps_get_waitset();
38    if (myws != NULL) {
39        DEBUG_CAPOPS("delete stepping already initialized with ws=%p, doing nothing\n", myws);
40        return;
41    }
42
43    event_queue_init(&trigger_queue, ws, EVENT_QUEUE_CONTINUOUS);
44    triggered = false;
45    enqueued = false;
46    suspended = 0;
47    step_closure = MKCLOSURE(delete_steps_cont, NULL);
48
49    event_queue_init(&delete_queue, ws, EVENT_QUEUE_CONTINUOUS);
50    pending_head = pending_tail = NULL;
51
52    delete_step_st.wait = false;
53    delete_step_st.result_handler = NULL;
54    err = slot_alloc(&delcap);
55    PANIC_IF_ERR(err, "allocating delete_steps slot");
56    delete_step_st.capref = get_cap_domref(delcap);
57    err = slot_alloc(&delete_step_st.newcap);
58    PANIC_IF_ERR(err, "allocating delete_steps new cap slot");
59}
60
61void
62delete_steps_trigger(void)
63{
64    DEBUG_CAPOPS("%s\n", __FUNCTION__);
65    if (!triggered) {
66        triggered = true;
67        if (!suspended && !enqueued) {
68            event_queue_add(&trigger_queue, &trigger_qn, step_closure);
69            enqueued = true;
70        }
71    }
72}
73
74void
75delete_steps_pause(void)
76{
77    DEBUG_CAPOPS("%s: called from %p\n", __FUNCTION__,
78            __builtin_return_address(0));
79    suspended++;
80}
81
82void
83delete_steps_resume(void)
84{
85    DEBUG_CAPOPS("%s\n", __FUNCTION__);
86    assert(suspended > 0);
87    suspended--;
88    if (!suspended) {
89        DEBUG_CAPOPS("%s: !suspended, continuing\n", __FUNCTION__);
90        event_queue_add(&trigger_queue, &trigger_qn, step_closure);
91        enqueued = true;
92    }
93}
94
95static void
96delete_steps_delete_result(errval_t status, void *st)
97{
98    DEBUG_CAPOPS("%s\n", __FUNCTION__);
99    assert(err_is_ok(status));
100    delete_steps_resume();
101}
102
103static void
104delete_steps_cont(void *st)
105{
106    DEBUG_CAPOPS("%s\n", __FUNCTION__);
107    errval_t err;
108    assert(triggered);
109    assert(enqueued);
110    enqueued = false;
111    if (suspended) {
112        DEBUG_CAPOPS("%s: suspended (%d); return\n", __FUNCTION__, suspended);
113        return;
114    }
115
116    err = monitor_delete_step(delcap);
117    if (err_no(err) == SYS_ERR_CAP_LOCKED) {
118        // XXX
119        DEBUG_CAPOPS("%s: cap locked\n", __FUNCTION__);
120        caplock_wait(get_cap_domref(NULL_CAP), &caplock_qn, step_closure);
121    }
122    if (err_no(err) == SYS_ERR_DELETE_LAST_OWNED) {
123        DEBUG_CAPOPS("%s: deleting last owned\n", __FUNCTION__);
124        assert(!delete_step_st.result_handler);
125        delete_step_st.result_handler = delete_steps_delete_result;
126        delete_step_st.st = NULL;
127        capops_delete_int(&delete_step_st);
128    }
129    else if (err_no(err) == SYS_ERR_CAP_NOT_FOUND) {
130        DEBUG_CAPOPS("%s: cap not found, starting clear step\n", __FUNCTION__);
131        delete_steps_clear(st);
132    }
133    else if (err_is_fail(err)) {
134        USER_PANIC_ERR(err, "while performing delete steps");
135    }
136    else {
137        if (err_no(err) == SYS_ERR_RAM_CAP_CREATED) {
138            DEBUG_CAPOPS("%s: sending reclaimed RAM to memserv.\n", __FUNCTION__);
139            send_new_ram_cap(delcap);
140        }
141        if (!enqueued) {
142            DEBUG_CAPOPS("%s: !enqueued, adding to queue\n", __FUNCTION__);
143            event_queue_add(&trigger_queue, &trigger_qn, step_closure);
144            enqueued = true;
145        }
146    }
147    DEBUG_CAPOPS("%s: done\n", __FUNCTION__);
148}
149
150static void
151delete_steps_clear(void *st)
152{
153    DEBUG_CAPOPS("%s\n", __FUNCTION__);
154    errval_t err;
155    while (true) {
156        err = monitor_clear_step(delcap);
157        if (err_no(err) == SYS_ERR_CAP_NOT_FOUND) {
158            break;
159        }
160        else if (err_is_fail(err)) {
161            USER_PANIC_ERR(err, "while performing clear steps");
162        }
163        else if (err_no(err) == SYS_ERR_RAM_CAP_CREATED) {
164            DEBUG_CAPOPS("%s: sending reclaimed RAM to memserv.\n", __FUNCTION__);
165            send_new_ram_cap(delcap);
166        }
167    }
168    DEBUG_CAPOPS("%s: finished, calling delete_queue_notify\n", __FUNCTION__);
169    triggered = false;
170    delete_queue_notify();
171}
172
173void
174delete_queue_wait(struct delete_queue_node *qn, struct event_closure cont)
175{
176    DEBUG_CAPOPS("%s\n", __FUNCTION__);
177    // enqueue the node in the list of pending events
178    if (!pending_head) {
179        assert(!pending_tail);
180        pending_head = pending_tail = qn;
181        qn->next = NULL;
182    }
183    else {
184        assert(pending_tail);
185        assert(!pending_tail->next);
186        pending_tail->next = qn;
187        pending_tail = qn;
188        qn->next = NULL;
189    }
190    qn->cont = cont;
191
192    // trigger "stepping" mode of the delete/revoke state machine
193    delete_steps_trigger();
194}
195
196static void
197delete_queue_notify(void)
198{
199    DEBUG_CAPOPS("%s\n", __FUNCTION__);
200    // this should only be triggered when the "stepping" mode of the
201    // delete/revoke state machine completes, so a notify without any
202    // operations pending would be very strange and is probably a bug
203    assert(pending_head);
204    assert(pending_tail);
205
206    // extract the contents of the queue of currently pending delete operations
207    struct delete_queue_node *curr = pending_head;
208    pending_head = pending_tail = NULL;
209
210    // put them all in the event queue so they are executed
211    for ( ; curr; curr = curr->next) {
212        DEBUG_CAPOPS("%s: adding %p to ev q.\n", __FUNCTION__, curr);
213        event_queue_add(&delete_queue, &curr->qn, curr->cont);
214    }
215}
216
217