1/**
2 * \file
3 * \brief Arch-generic system calls implementation.
4 */
5
6/*
7 * Copyright (c) 2007-2010,2012, ETH Zurich.
8 * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
9 * All rights reserved.
10 *
11 * This file is distributed under the terms in the attached LICENSE file.
12 * If you do not find this file, copies can be found by writing to:
13 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
14 */
15
16#include <kernel.h>
17#include <stdio.h>
18#include <string.h>
19#include <syscall.h>
20#include <barrelfish_kpi/syscalls.h>
21#include <capabilities.h>
22#include <cap_predicates.h>
23#include <coreboot.h>
24#include <mdb/mdb.h>
25#include <mdb/mdb_tree.h>
26#include <cap_predicates.h>
27#include <dispatch.h>
28#include <distcaps.h>
29#include <wakeup.h>
30#include <paging_kernel_helper.h>
31#include <paging_kernel_arch.h>
32#include <exec.h>
33#include <irq.h>
34#include <trace/trace.h>
35#include <trace_definitions/trace_defs.h>
36#include <kcb.h>
37#include <useraccess.h>
38#include <systime.h>
39
40errval_t sys_print(const char *str, size_t length)
41{
42    /* FIXME: check that string is mapped and accessible to caller! */
43    printf("%.*s", (int)length, str);
44    return SYS_ERR_OK;
45}
46
47/* FIXME: lots of missing argument checks in this function */
48struct sysret
49sys_dispatcher_setup(struct capability *to, capaddr_t cptr, uint8_t level,
50                     capaddr_t vptr, capaddr_t dptr, bool run, capaddr_t odptr)
51{
52    errval_t err = SYS_ERR_OK;
53    assert(to->type == ObjType_Dispatcher);
54    struct dcb *dcb = to->u.dispatcher.dcb;
55    assert(dcb != dcb_current);
56
57    lpaddr_t lpaddr;
58
59    /* 0. Handle sys_dispatcher_setup for guest domains */
60    if (cptr == 0x0) {
61        assert(dcb->is_vm_guest);
62        assert(vptr == 0x0);
63        assert(dptr == 0x0);
64        assert(odptr == 0x0);
65        if (!dcb->is_vm_guest || vptr != 0x0 || dptr != 0x0 || odptr != 0x0) {
66            return SYSRET(SYS_ERR_DISP_NOT_RUNNABLE);
67        }
68        if (run) {
69            // Dispatchers run disabled the first time
70            dcb->disabled = 1;
71            make_runnable(dcb);
72        }
73        return SYSRET(SYS_ERR_OK);
74    }
75
76    assert(!dcb->is_vm_guest);
77    assert(!cptr == 0x0);
78    assert(!vptr == 0x0);
79    assert(!dptr == 0x0);
80    assert(!odptr == 0x0);
81
82    if (cptr == 0x0 || vptr == 0x0 || dptr == 0x0 || odptr == 0x0) {
83        return SYSRET(SYS_ERR_DISP_NOT_RUNNABLE);
84    }
85
86    /* 1. set cspace root */
87    struct cte *root;
88    err = caps_lookup_slot(&dcb_current->cspace.cap, cptr, level,
89                           &root, CAPRIGHTS_READ);
90    if (err_is_fail(err)) {
91        debug(SUBSYS_CAPS, "caps_lookup_cap for croot=%"PRIxCADDR", level=%d: %"PRIuERRV"\n", cptr, level, err);
92        return SYSRET(err_push(err, SYS_ERR_DISP_CSPACE_ROOT));
93    }
94    if (root->cap.type != ObjType_L1CNode) {
95        return SYSRET(err_push(err, SYS_ERR_DISP_CSPACE_INVALID));
96    }
97    err = caps_copy_to_cte(&dcb->cspace, root, false, 0, 0);
98    if (err_is_fail(err)) {
99        debug(SUBSYS_CAPS, "caps_copy_to_cte for croot: %"PRIuERRV"\n", err);
100        return SYSRET(err_push(err, SYS_ERR_DISP_CSPACE_ROOT));
101    }
102
103    /* 2. set vspace root */
104    struct capability *vroot;
105    err = caps_lookup_cap(&root->cap, vptr, CNODE_TYPE_COUNT, &vroot, CAPRIGHTS_WRITE);
106    if (err_is_fail(err)) {
107        debug(SUBSYS_CAPS, "caps_lookup_cap for vroot=%"PRIxCADDR": %"PRIuERRV"\n", vptr, err);
108        return SYSRET(err_push(err, SYS_ERR_DISP_VSPACE_ROOT));
109    }
110
111    // Insert as dispatcher's VSpace root
112    if (!type_is_vroot(vroot->type)) {
113        return SYSRET(SYS_ERR_DISP_VSPACE_INVALID);
114    }
115    dcb->vspace = gen_phys_to_local_phys(get_address(vroot));
116
117    /* 3. set dispatcher frame pointer */
118    struct cte *dispcte;
119    err = caps_lookup_slot(&root->cap, dptr, CNODE_TYPE_COUNT, &dispcte,
120                           CAPRIGHTS_READ_WRITE);
121    if (err_is_fail(err)) {
122        return SYSRET(err_push(err, SYS_ERR_DISP_FRAME));
123    }
124    struct capability *dispcap = &dispcte->cap;
125    if (dispcap->type != ObjType_Frame) {
126        return SYSRET(SYS_ERR_DISP_FRAME_INVALID);
127    }
128    if (get_size(dispcap) < DISPATCHER_FRAME_SIZE) {
129        return SYSRET(SYS_ERR_DISP_FRAME_SIZE);
130    }
131    /* FIXME: check rights? */
132
133    lpaddr = gen_phys_to_local_phys(get_address(dispcap));
134    dcb->disp = local_phys_to_mem(lpaddr);
135    // Copy the cap to dcb also
136    err = caps_copy_to_cte(&dcb->disp_cte, dispcte, false, 0, 0);
137    // If copy fails, something wrong in kernel
138    assert(err_is_ok(err));
139
140    /* 5. Make runnable if desired */
141    if (run) {
142        if (dcb->vspace == 0 || dcb->disp == 0 || dcb->cspace.cap.type != ObjType_L1CNode) {
143            return SYSRET(err_push(err, SYS_ERR_DISP_NOT_RUNNABLE));
144        }
145
146        // XXX: dispatchers run disabled the first time they start
147        dcb->disabled = 1;
148        //printf("DCB: %p %.*s\n", dcb, DISP_NAME_LEN, dcb->disp->name);
149        make_runnable(dcb);
150    }
151
152    /* 6. Copy domain ID off given dispatcher */
153    // XXX: We generally pass the current dispatcher as odisp, see e.g.
154    // lib/spawndomain/spawn.c:spawn_run().  In that case the new domain gets
155    // the same domain id as the domain doing the spawning. cf. T271
156    // -SG, 2016-07-21.
157    struct capability *odisp;
158    err = caps_lookup_cap(&dcb_current->cspace.cap, odptr, CNODE_TYPE_COUNT,
159                          &odisp, CAPRIGHTS_READ);
160    if (err_is_fail(err)) {
161        return SYSRET(err_push(err, SYS_ERR_DISP_OCAP_LOOKUP));
162    }
163    if (odisp->type != ObjType_Dispatcher) {
164        return SYSRET(SYS_ERR_DISP_OCAP_TYPE);
165    }
166    dcb->domain_id = odisp->u.dispatcher.dcb->domain_id;
167
168    /* 7. (HACK) Set current core id */
169    struct dispatcher_shared_generic *disp =
170        get_dispatcher_shared_generic(dcb->disp);
171    disp->curr_core_id = my_core_id;
172
173    /* 8. Enable tracing for new domain */
174    err = trace_new_application(disp->name, (uintptr_t) dcb);
175
176    if (err == TRACE_ERR_NO_BUFFER) {
177        // Try to use the boot buffer.
178        trace_new_boot_application(disp->name, (uintptr_t) dcb);
179    }
180
181    // Setup systime frequency
182    disp->systime_frequency = systime_frequency;
183
184    return SYSRET(SYS_ERR_OK);
185}
186
187struct sysret
188sys_dispatcher_properties(struct capability *to,
189                          enum task_type type, unsigned long deadline,
190                          unsigned long wcet, unsigned long period,
191                          unsigned long release, unsigned short weight)
192{
193    assert(to->type == ObjType_Dispatcher);
194
195#ifdef CONFIG_SCHEDULER_RBED
196    struct dcb *dcb = to->u.dispatcher.dcb;
197
198    assert(type >= TASK_TYPE_BEST_EFFORT && type <= TASK_TYPE_HARD_REALTIME);
199    assert(wcet <= deadline);
200    assert(wcet <= period);
201    assert(type != TASK_TYPE_BEST_EFFORT || weight > 0);
202
203    trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_SCHED_REMOVE,
204                152);
205    scheduler_remove(dcb);
206
207    /* Set task properties */
208    dcb->type = type;
209    dcb->deadline = deadline;
210    dcb->wcet = wcet;
211    dcb->period = period;
212    dcb->release_time = (release == 0) ? systime_now() : release;
213    dcb->weight = weight;
214
215    make_runnable(dcb);
216#endif
217
218    return SYSRET(SYS_ERR_OK);
219}
220
221/**
222 * \param root                  Source CSpace root cnode to invoke
223 * \param source_croot          Source capability cspace root
224 * \param source_cptr           Source capability cptr
225 * \param offset                Offset into source capability from which to retype
226 * \param type                  Type to retype to
227 * \param objsize               Object size for variable-sized types
228 * \param count                 number of objects to create
229 * \param dest_cspace_cptr      Destination CSpace cnode cptr relative to
230 *                              source cspace root
231 * \param dest_cnode_cptr       Destination cnode cptr
232 * \param dest_slot             Destination slot number
233 * \param dest_cnode_level      Level/depth of destination cnode
234 */
235struct sysret
236sys_retype(struct capability *root, capaddr_t source_croot, capaddr_t source_cptr,
237           gensize_t offset, enum objtype type, gensize_t objsize, size_t count,
238           capaddr_t dest_cspace_cptr, capaddr_t dest_cnode_cptr,
239           uint8_t dest_cnode_level, cslot_t dest_slot, bool from_monitor)
240{
241    errval_t err;
242
243    /* Parameter checking */
244    if (type == ObjType_Null || type >= ObjType_Num) {
245        return SYSRET(SYS_ERR_ILLEGAL_DEST_TYPE);
246    }
247
248    /* Lookup source cspace root cnode */
249    struct capability *source_root;
250    err = caps_lookup_cap(root, source_croot, 2, &source_root, CAPRIGHTS_READ);
251    if (err_is_fail(err)) {
252        return SYSRET(err_push(err, SYS_ERR_SOURCE_ROOTCN_LOOKUP));
253    }
254    /* Source capability */
255    struct cte *source_cte;
256    // XXX: level from where
257    err = caps_lookup_slot(source_root, source_cptr, 2, &source_cte,
258                           CAPRIGHTS_READ);
259    if (err_is_fail(err)) {
260        return SYSRET(err_push(err, SYS_ERR_SOURCE_CAP_LOOKUP));
261    }
262    assert(source_cte != NULL);
263
264    /* Destination cspace root cnode in source cspace */
265    struct capability *dest_cspace_root;
266    // XXX: level from where?
267    err = caps_lookup_cap(root, dest_cspace_cptr, 2,
268                          &dest_cspace_root, CAPRIGHTS_READ);
269    if (err_is_fail(err)) {
270        return SYSRET(err_push(err, SYS_ERR_DEST_ROOTCN_LOOKUP));
271    }
272    /* dest_cspace_root must be L1 CNode */
273    if (dest_cspace_root->type != ObjType_L1CNode) {
274        return SYSRET(SYS_ERR_CNODE_TYPE);
275    }
276
277    /* Destination cnode in destination cspace */
278    struct capability *dest_cnode_cap;
279    err = caps_lookup_cap(dest_cspace_root, dest_cnode_cptr, dest_cnode_level,
280                          &dest_cnode_cap, CAPRIGHTS_READ_WRITE);
281    if (err_is_fail(err)) {
282        return SYSRET(err_push(err, SYS_ERR_DEST_CNODE_LOOKUP));
283    }
284
285    /* check that destination cnode is actually a cnode */
286    if (dest_cnode_cap->type != ObjType_L1CNode &&
287        dest_cnode_cap->type != ObjType_L2CNode) {
288        debug(SUBSYS_CAPS, "destcn type: %d\n", dest_cnode_cap->type);
289        return SYSRET(SYS_ERR_DEST_CNODE_INVALID);
290    }
291
292    return SYSRET(caps_retype(type, objsize, count, dest_cnode_cap, dest_slot,
293                              source_cte, offset, from_monitor));
294}
295
296struct sysret sys_create(struct capability *root, enum objtype type,
297                         size_t objsize, capaddr_t dest_cnode_cptr,
298                         uint8_t dest_level, cslot_t dest_slot)
299{
300    errval_t err;
301    uint8_t size = 0;
302    genpaddr_t base = 0;
303
304    /* Paramter checking */
305    if (type == ObjType_Null || type >= ObjType_Num) {
306        return SYSRET(SYS_ERR_ILLEGAL_DEST_TYPE);
307    }
308
309    /* Destination CNode */
310    struct capability *dest_cnode_cap;
311    err = caps_lookup_cap(root, dest_cnode_cptr, dest_level,
312                          &dest_cnode_cap, CAPRIGHTS_READ_WRITE);
313    if (err_is_fail(err)) {
314        return SYSRET(err_push(err, SYS_ERR_DEST_CNODE_LOOKUP));
315    }
316
317    /* Destination slot */
318    struct cte *dest_cte;
319    dest_cte = caps_locate_slot(get_address(dest_cnode_cap), dest_slot);
320    if (dest_cte->cap.type != ObjType_Null) {
321        return SYSRET(SYS_ERR_SLOTS_IN_USE);
322    }
323
324    /* List capabilities allowed to be created at runtime. */
325    switch(type) {
326
327    case ObjType_ID:
328        break;
329
330    // only certain types of capabilities can be created at runtime
331    default:
332        return SYSRET(SYS_ERR_TYPE_NOT_CREATABLE);
333    }
334
335    return SYSRET(caps_create_new(type, base, size, objsize, my_core_id, dest_cte));
336}
337
338/**
339 * Common code for copying and minting except the mint flag and param passing
340 *
341 * \param root              Source cspace root cnode
342 * \param dest_cspace_cptr  Destination cspace root cnode cptr in source cspace
343 * \parma destcn_cptr       Destination cnode cptr relative to destination cspace
344 * \param dest_slot         Destination slot
345 * \param source_cptr       Source capability cptr relative to source cspace
346 * \param destcn_level      Level/depth of destination cnode
347 * \param source_level      Level/depth of source cap
348 * \param param1            First parameter for mint
349 * \param param2            Second parameter for mint
350 * \param mint              Call is a minting operation
351 */
352struct sysret
353sys_copy_or_mint(struct capability *root, capaddr_t dest_cspace_cptr,
354                 capaddr_t destcn_cptr, cslot_t dest_slot, capaddr_t
355                 source_croot_ptr, capaddr_t source_cptr,
356                 uint8_t destcn_level, uint8_t source_level,
357                 uintptr_t param1, uintptr_t param2, bool mint)
358{
359    errval_t err;
360
361    if (!mint) {
362        param1 = param2 = 0;
363    }
364
365    if (root->type != ObjType_L1CNode) {
366        debug(SUBSYS_CAPS, "%s: root->type = %d\n", __FUNCTION__, root->type);
367        return SYSRET(SYS_ERR_CNODE_NOT_ROOT);
368    }
369    assert(root->type == ObjType_L1CNode);
370
371    /* Lookup source cspace in our cspace */
372    struct capability *src_croot;
373    err = caps_lookup_cap(root, source_croot_ptr, 2, &src_croot,
374                          CAPRIGHTS_READ);
375    if (err_is_fail(err)) {
376        return SYSRET(err_push(err, SYS_ERR_SOURCE_ROOTCN_LOOKUP));
377    }
378    if (src_croot->type != ObjType_L1CNode) {
379        debug(SUBSYS_CAPS, "%s: src rootcn type = %d\n", __FUNCTION__, src_croot->type);
380        return SYSRET(SYS_ERR_CNODE_NOT_ROOT);
381    }
382    /* Lookup source cap in source cspace */
383    struct cte *src_cap;
384    err = caps_lookup_slot(src_croot, source_cptr, source_level, &src_cap,
385                           CAPRIGHTS_READ);
386    if (err_is_fail(err)) {
387        return SYSRET(err_push(err, SYS_ERR_SOURCE_CAP_LOOKUP));
388    }
389
390    /* Destination cspace root cnode in source cspace */
391    struct capability *dest_cspace_root;
392    // XXX: level from where?
393    err = caps_lookup_cap(root, dest_cspace_cptr, 2, &dest_cspace_root, CAPRIGHTS_READ);
394    if (err_is_fail(err)) {
395        return SYSRET(err_push(err, SYS_ERR_DEST_ROOTCN_LOOKUP));
396    }
397    /* dest_cspace_root must be L1 CNode */
398    if (dest_cspace_root->type != ObjType_L1CNode) {
399        debug(SUBSYS_CAPS, "%s: dest rootcn type = %d\n", __FUNCTION__, src_croot->type);
400        return SYSRET(SYS_ERR_CNODE_TYPE);
401    }
402
403    /* Destination cnode in destination cspace */
404    struct cte *dest_cnode_cap;
405    err = caps_lookup_slot(dest_cspace_root, destcn_cptr, destcn_level,
406                           &dest_cnode_cap, CAPRIGHTS_READ_WRITE);
407    if (err_is_fail(err)) {
408        return SYSRET(err_push(err, SYS_ERR_DEST_CNODE_LOOKUP));
409    }
410
411    /* Perform copy */
412    if (dest_cnode_cap->cap.type == ObjType_L1CNode ||
413        dest_cnode_cap->cap.type == ObjType_L2CNode)
414    {
415        return SYSRET(caps_copy_to_cnode(dest_cnode_cap, dest_slot, src_cap,
416                                         mint, param1, param2));
417    } else {
418        return SYSRET(SYS_ERR_DEST_TYPE_INVALID);
419    }
420}
421
422struct sysret
423sys_map(struct capability *ptable, cslot_t slot, capaddr_t source_root_cptr,
424        capaddr_t source_cptr, uint8_t source_level, uintptr_t flags,
425        uintptr_t offset, uintptr_t pte_count, capaddr_t mapping_crootptr,
426        capaddr_t mapping_cnptr, uint8_t mapping_cn_level, cslot_t mapping_slot)
427{
428    assert (type_is_vnode(ptable->type));
429
430    errval_t err;
431
432    /* XXX: TODO: make root explicit argument for sys_map() */
433    struct capability *root = &dcb_current->cspace.cap;
434
435    if (!(ptable->rights & CAPRIGHTS_WRITE)) {
436        return SYSRET(SYS_ERR_DEST_CAP_RIGHTS);
437    }
438
439    /* Lookup source root cn cap in own cspace */
440    struct capability *src_root;
441    err = caps_lookup_cap(root, source_root_cptr, source_level, &src_root,
442                          CAPRIGHTS_READ);
443    if (err_is_fail(err)) {
444        return SYSRET(err_push(err, SYS_ERR_SOURCE_ROOTCN_LOOKUP));
445    }
446    if (src_root->type != ObjType_L1CNode) {
447        return SYSRET(SYS_ERR_CNODE_NOT_ROOT);
448    }
449    /* Lookup source cap in source cspace */
450    struct cte *src_cte;
451    err = caps_lookup_slot(src_root, source_cptr, source_level, &src_cte,
452                           CAPRIGHTS_READ);
453    if (err_is_fail(err)) {
454        return SYSRET(err_push(err, SYS_ERR_SOURCE_CAP_LOOKUP));
455    }
456
457    /* Lookup mapping cspace root in our cspace */
458    struct capability *mapping_croot;
459    err = caps_lookup_cap(root, mapping_crootptr, 2, &mapping_croot,
460                          CAPRIGHTS_READ_WRITE);
461    if (err_is_fail(err)) {
462        return SYSRET(err_push(err, SYS_ERR_DEST_ROOTCN_LOOKUP));
463    }
464
465    /* Lookup mapping slot in dest cspace */
466    struct cte *mapping_cnode_cte;
467    err = caps_lookup_slot(mapping_croot, mapping_cnptr, mapping_cn_level,
468                           &mapping_cnode_cte, CAPRIGHTS_READ_WRITE);
469    if (err_is_fail(err)) {
470        return SYSRET(err_push(err, SYS_ERR_DEST_CNODE_LOOKUP));
471    }
472
473    if (mapping_cnode_cte->cap.type != ObjType_L2CNode) {
474        return SYSRET(SYS_ERR_DEST_TYPE_INVALID);
475    }
476
477    struct cte *mapping_cte = caps_locate_slot(get_address(&mapping_cnode_cte->cap),
478                                               mapping_slot);
479    if (mapping_cte->cap.type != ObjType_Null) {
480        return SYSRET(SYS_ERR_SLOT_IN_USE);
481    }
482
483    /* Perform map */
484    // XXX: this does not check if we do have CAPRIGHTS_READ_WRITE on
485    // the destination cap (the page table we're inserting into)
486    return SYSRET(caps_copy_to_vnode(cte_for_cap(ptable), slot, src_cte, flags,
487                                     offset, pte_count, mapping_cte));
488}
489
490struct sysret
491sys_copy_remap(struct capability *ptable, cslot_t slot, capaddr_t source_cptr,
492               int source_level, uintptr_t flags, uintptr_t offset,
493               uintptr_t pte_count, capaddr_t mapping_cnptr,
494               uint8_t mapping_cn_level, cslot_t mapping_slot)
495{
496    assert (type_is_vnode(ptable->type));
497
498    errval_t err;
499
500    /* Lookup source cap */
501    struct capability *root = &dcb_current->cspace.cap;
502    struct cte *src_cte;
503    err = caps_lookup_slot(root, source_cptr, source_level, &src_cte,
504                           CAPRIGHTS_READ);
505    if (err_is_fail(err)) {
506        return SYSRET(err_push(err, SYS_ERR_SOURCE_CAP_LOOKUP));
507    }
508
509    /* Lookup slot for mapping in our cspace */
510    struct cte *mapping_cnode_cte;
511    err = caps_lookup_slot(root, mapping_cnptr, mapping_cn_level,
512                           &mapping_cnode_cte, CAPRIGHTS_READ_WRITE);
513    if (err_is_fail(err)) {
514        return SYSRET(err_push(err, SYS_ERR_DEST_CNODE_LOOKUP));
515    }
516
517    if (mapping_cnode_cte->cap.type != ObjType_L2CNode) {
518        return SYSRET(SYS_ERR_DEST_TYPE_INVALID);
519    }
520
521    struct cte *mapping_cte = caps_locate_slot(get_address(&mapping_cnode_cte->cap),
522                                               mapping_slot);
523    if (mapping_cte->cap.type != ObjType_Null) {
524        return SYSRET(SYS_ERR_SLOT_IN_USE);
525    }
526
527    /* Perform map */
528    // XXX: this does not check if we do have CAPRIGHTS_READ_WRITE on
529    // the destination cap (the page table we're inserting into)
530    return SYSRET(paging_copy_remap(cte_for_cap(ptable), slot, src_cte, flags,
531                                    offset, pte_count, mapping_cte));
532}
533
534struct sysret sys_delete(struct capability *root, capaddr_t cptr, uint8_t level)
535{
536    errval_t err;
537    struct cte *slot;
538    err = caps_lookup_slot(root, cptr, level, &slot, CAPRIGHTS_READ_WRITE);
539    if (err_is_fail(err)) {
540        return SYSRET(err);
541    }
542
543    err = caps_delete(slot);
544    return SYSRET(err);
545}
546
547struct sysret sys_revoke(struct capability *root, capaddr_t cptr, uint8_t level)
548{
549    errval_t err;
550    struct cte *slot;
551    err = caps_lookup_slot(root, cptr, level, &slot, CAPRIGHTS_READ_WRITE);
552    if (err_is_fail(err)) {
553        return SYSRET(err);
554    }
555
556    err = caps_revoke(slot);
557    return SYSRET(err);
558}
559
560struct sysret sys_get_state(struct capability *root, capaddr_t cptr, uint8_t level)
561{
562    errval_t err;
563    struct cte *slot;
564    err = caps_lookup_slot(root, cptr, level, &slot, CAPRIGHTS_READ);
565    if (err_is_fail(err)) {
566        return SYSRET(err);
567    }
568
569    distcap_state_t state = distcap_get_state(slot);
570    return (struct sysret) { .error = SYS_ERR_OK, .value = state };
571}
572
573struct sysret sys_get_size_l1cnode(struct capability *root)
574{
575    assert(root->type == ObjType_L1CNode);
576
577    return (struct sysret) { .error = SYS_ERR_OK,
578        .value = root->u.l1cnode.allocated_bytes};
579}
580
581
582struct sysret sys_resize_l1cnode(struct capability *root, capaddr_t newroot_cptr,
583                                 capaddr_t retcn_cptr, cslot_t retslot)
584{
585    errval_t err;
586
587    if (root->type != ObjType_L1CNode) {
588        return SYSRET(SYS_ERR_RESIZE_NOT_L1);
589    }
590    assert(root->type == ObjType_L1CNode);
591
592    // Lookup new L1 CNode cap
593    struct cte *newroot;
594    err = caps_lookup_slot(root, newroot_cptr, 2, &newroot, CAPRIGHTS_ALLRIGHTS);
595    if (err_is_fail(err)) {
596        return SYSRET(err);
597    }
598    if (newroot->cap.type != ObjType_L1CNode) {
599        return SYSRET(SYS_ERR_INVALID_SOURCE_TYPE);
600    }
601    // TODO: check size of new CNode
602
603    // Lookup slot for returning RAM of old CNode
604    struct capability *retcn;
605    err = caps_lookup_cap(root, retcn_cptr, 1, &retcn, CAPRIGHTS_READ_WRITE);
606    if (err_is_fail(err)) {
607        return SYSRET(err);
608    }
609    struct cte *ret = caps_locate_slot(get_address(retcn), retslot);
610    if (ret->cap.type != ObjType_Null) {
611        return SYSRET(SYS_ERR_SLOT_IN_USE);
612    }
613
614    // Copy over caps from old root cnode to new root cnode
615    cslot_t root_slots = cnode_get_slots(root);
616    cslot_t newroot_slots = cnode_get_slots(&newroot->cap);
617    for (cslot_t i = 0; i < min(root_slots, newroot_slots); i++) {
618        struct cte *src = caps_locate_slot(get_address(root), i);
619        if (src->cap.type == ObjType_Null) {
620            // skip empty slots in old root cnode
621            continue;
622        }
623        struct cte *dest = caps_locate_slot(get_address(&newroot->cap), i);
624        if (dest->cap.type != ObjType_Null) {
625            // fail if slot in destination cnode occupied
626            return SYSRET(SYS_ERR_SLOT_IN_USE);
627        }
628        // do proper cap copy
629        err = caps_copy_to_cte(dest, src, false, 0, 0);
630        if (err_is_fail(err)) {
631            return SYSRET(err);
632        }
633    }
634
635    // Copy old root cnode into ret slot, this way we can delete the copies
636    // in the task cnode and the dispatcher that we need to update.
637    err = caps_copy_to_cte(ret, cte_for_cap(root), false, 0, 0);
638    if (err_is_fail(err)) {
639        return SYSRET(err);
640    }
641
642    // Set new root cnode in dispatcher
643    err = caps_delete(&dcb_current->cspace);
644    if (err_is_fail(err)) {
645        return SYSRET(err);
646    }
647    err = caps_copy_to_cte(&dcb_current->cspace, newroot, false, 0, 0);
648    if (err_is_fail(err)) {
649        return SYSRET(err);
650    }
651
652    // Set new root cnode in task cnode
653    struct cte *taskcn = caps_locate_slot(get_address(&newroot->cap),
654                                          ROOTCN_SLOT_TASKCN);
655    struct cte *rootcn_cap = caps_locate_slot(get_address(&taskcn->cap),
656                                              TASKCN_SLOT_ROOTCN);
657    assert(rootcn_cap == cte_for_cap(root));
658    err = caps_delete(rootcn_cap);
659    if (err_is_fail(err)) {
660        return SYSRET(err);
661    }
662    err = caps_copy_to_cte(rootcn_cap, newroot, false, 0, 0);
663    if (err_is_fail(err)) {
664        return SYSRET(err);
665    }
666
667    return SYSRET(SYS_ERR_OK);
668}
669
670/**
671 * \brief return redacted 'struct capability' for given capability
672 */
673struct sysret sys_identify_cap(struct capability *root, capaddr_t cptr,
674                               uint8_t level, struct capability *out)
675{
676    errval_t err;
677    if (!access_ok(ACCESS_WRITE, (lvaddr_t)out, sizeof(*out))) {
678        return SYSRET(SYS_ERR_INVALID_USER_BUFFER);
679    }
680
681    if (root->type != ObjType_L1CNode) {
682        return SYSRET(SYS_ERR_CNODE_NOT_ROOT);
683    }
684
685    struct capability *thecap;
686    // XXX: what's the correct caprights here?
687    err = caps_lookup_cap(root, cptr, level, &thecap, CAPRIGHTS_ALLRIGHTS);
688    if (err_is_fail(err)) {
689        return SYSRET(err);
690    }
691
692    memcpy(out, thecap, sizeof(*out));
693
694    redact_capability(out);
695
696    return SYSRET(SYS_ERR_OK);
697}
698
699struct sysret sys_yield(capaddr_t target)
700{
701    dispatcher_handle_t handle = dcb_current->disp;
702    struct dispatcher_shared_generic *disp =
703        get_dispatcher_shared_generic(handle);
704
705
706    debug(SUBSYS_DISPATCH, "%.*s yields%s\n", DISP_NAME_LEN, disp->name,
707          !disp->haswork && disp->lmp_delivered == disp->lmp_seen
708           ? " and is removed from the runq" : "");
709
710    if (dcb_current->disabled == false) {
711        printk(LOG_ERR, "SYSCALL_YIELD while enabled\n");
712        dump_dispatcher(disp);
713        return SYSRET(SYS_ERR_CALLER_ENABLED);
714    }
715
716    struct capability *yield_to = NULL;
717    if (target != CPTR_NULL) {
718        errval_t err;
719
720        /* directed yield */
721        err = caps_lookup_cap(&dcb_current->cspace.cap, target, 2,
722                              &yield_to, CAPRIGHTS_READ);
723        if (err_is_fail(err)) {
724            return SYSRET(err);
725        } else if (yield_to == NULL ||
726                   (yield_to->type != ObjType_EndPointLMP
727                    && yield_to->type != ObjType_Dispatcher)) {
728            return SYSRET(SYS_ERR_INVALID_YIELD_TARGET);
729        }
730        /* FIXME: check rights? */
731    }
732
733    // Since we've done a yield, we explicitly ensure that the
734    // dispatcher is upcalled the next time (on the understanding that
735    // this is what the dispatcher wants), otherwise why call yield?
736    dcb_current->disabled = false;
737
738    // Remove from queue when no work and no more messages and no missed wakeup
739    systime_t wakeup = disp->wakeup;
740    if (!disp->haswork && disp->lmp_delivered == disp->lmp_seen
741        && (wakeup == 0 || wakeup > (systime_now() + kcb_current->kernel_off))) {
742
743        trace_event(TRACE_SUBSYS_NNET, TRACE_EVENT_NNET_SCHED_REMOVE,
744            (uint32_t)(lvaddr_t)dcb_current & 0xFFFFFFFF);
745        trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_SCHED_REMOVE,
746                151);
747
748        scheduler_remove(dcb_current);
749        if (wakeup != 0) {
750            wakeup_set(dcb_current, wakeup);
751        }
752    } else {
753        // Otherwise yield for the timeslice
754        scheduler_yield(dcb_current);
755    }
756
757    if (yield_to != NULL) {
758        struct dcb *target_dcb = NULL;
759        if (yield_to->type == ObjType_EndPointLMP) {
760            target_dcb = yield_to->u.endpointlmp.listener;
761        } else if (yield_to->type == ObjType_Dispatcher) {
762            target_dcb = yield_to->u.dispatcher.dcb;
763        } else {
764            panic("invalid type in yield cap");
765        }
766
767        trace_event(TRACE_SUBSYS_NNET, TRACE_EVENT_NNET_YIELD,
768            (uint32_t)(lvaddr_t)target_dcb & 0xFFFFFFFF);
769        make_runnable(target_dcb);
770        dispatch(target_dcb);
771    } else {
772//        trace_event(TRACE_SUBSYS_BNET, TRACE_EVENT_BNET_YIELD,
773//            0);
774
775        /* undirected yield */
776        dispatch(schedule());
777    }
778
779    panic("Yield returned!");
780}
781
782struct sysret sys_suspend(bool do_halt)
783{
784    dispatcher_handle_t handle = dcb_current->disp;
785    struct dispatcher_shared_generic *disp =
786        get_dispatcher_shared_generic(handle);
787
788    debug(SUBSYS_DISPATCH, "%.*s suspends (halt: %d)\n", DISP_NAME_LEN, disp->name, do_halt);
789
790    if (dcb_current->disabled == false) {
791        printk(LOG_ERR, "SYSCALL_SUSPEND while enabled\n");
792        return SYSRET(SYS_ERR_CALLER_ENABLED);
793    }
794
795    dcb_current->disabled = false;
796
797    if (do_halt) {
798        //printf("%s:%s:%d: before halt of core (%"PRIuCOREID")\n",
799        //       __FILE__, __FUNCTION__, __LINE__, my_core_id);
800        halt();
801    } else {
802        // Note this only works if we're calling this inside
803        // the kcb we're currently running
804        printk(LOG_NOTE, "in sys_suspend(<no_halt>)!\n");
805        printk(LOG_NOTE, "calling switch_kcb!\n");
806        struct kcb *next = kcb_current->next;
807        kcb_current->next = NULL;
808        switch_kcb(next);
809        // enable kcb scheduler
810        printk(LOG_NOTE, "enabling kcb scheduler!\n");
811        kcb_sched_suspended = false;
812        // schedule something in the other kcb
813        dispatch(schedule());
814    }
815
816    panic("Yield returned!");
817}
818
819
820/**
821 * The format of the returned ID is:
822 *
823 * --------------------------------------------------------------------
824 * |             0 (unused) | coreid |         core_local_id          |
825 * --------------------------------------------------------------------
826 * 63                        39       31                              0 Bit
827 *
828 */
829struct sysret sys_idcap_identify(struct capability *cap, idcap_id_t *id)
830{
831    STATIC_ASSERT_SIZEOF(coreid_t, 1);
832
833    idcap_id_t coreid = (idcap_id_t) cap->u.id.coreid;
834    *id = coreid << 32 | cap->u.id.core_local_id;
835
836    return SYSRET(SYS_ERR_OK);
837}
838
839/**
840 * Calls correct handler function to spawn an app core.
841 *
842 * At the moment spawn_core_handlers is set-up per
843 * architecture inside text_init() usually found in init.c.
844 *
845 * \note Generally the x86 terms of BSP and APP core are used
846 * throughout Barrelfish to distinguish between bootstrap core (BSP)
847 * and application cores (APP).
848 *
849 * \param  core_id  Identifier of the core which we want to boot
850 * \param  cpu_type Architecture of the core.
851 * \param  entry    Entry point for code to start execution.
852 *
853 * \retval SYS_ERR_OK Core successfully booted.
854 * \retval SYS_ERR_ARCHITECTURE_NOT_SUPPORTED No handler registered for
855 *     the specified cpu_type.
856 * \retval SYS_ERR_CORE_NOT_FOUND Core failed to boot.
857 */
858
859struct sysret sys_monitor_spawn_core(hwid_t target, enum cpu_type cpu_type,
860                                     genvaddr_t entry, genpaddr_t context)
861{
862    errval_t err;
863
864    assert(cpu_type < CPU_TYPE_NUM);
865    // TODO(gz): assert core_id valid
866    // TODO(gz): assert entry range?
867
868    if (cpu_type >= CPU_TYPE_NUM) {
869        return SYSRET(SYS_ERR_ARCHITECTURE_NOT_SUPPORTED);
870    }
871
872    coreboot_start_fn_t start_fn = coreboot_get_spawn_handler(cpu_type);
873
874    if (start_fn == NULL) {
875        return SYSRET(SYS_ERR_ARCHITECTURE_NOT_SUPPORTED);
876    }
877
878    err = start_fn(target, entry, context);
879    if(err_is_fail(err)) {
880        err = err_push(err, SYS_ERR_CORE_NOT_FOUND);
881    }
882    return SYSRET(err);
883}
884
885struct sysret sys_kernel_add_kcb(struct kcb *new_kcb)
886{
887    kcb_add(new_kcb);
888
889    // update kernel_now offset
890    new_kcb->kernel_off -= systime_now();
891    // reset scheduler statistics
892    scheduler_reset_time();
893    // update current core id of all domains
894    kcb_update_core_id(new_kcb);
895    // upcall domains with registered interrupts to tell them to re-register
896    irq_table_notify_domains(new_kcb);
897
898    return SYSRET(SYS_ERR_OK);
899}
900
901struct sysret sys_kernel_remove_kcb(struct kcb * to_remove)
902{
903    return SYSRET(kcb_remove(to_remove));
904}
905
906struct sysret sys_kernel_suspend_kcb_sched(bool suspend)
907{
908    printk(LOG_NOTE, "in kernel_suspend_kcb_sched invocation!\n");
909    kcb_sched_suspended = suspend;
910    return SYSRET(SYS_ERR_OK);
911}
912
913struct sysret sys_handle_kcb_identify(struct capability* to, struct frame_identity *fi)
914{
915    // Return with physical base address of frame
916    // XXX: pack size into bottom bits of base address
917    assert(to->type == ObjType_KernelControlBlock);
918    lvaddr_t vkcb = (lvaddr_t) to->u.kernelcontrolblock.kcb;
919    assert((vkcb & BASE_PAGE_MASK) == 0);
920
921    if (!access_ok(ACCESS_WRITE, (lvaddr_t)fi, sizeof(struct frame_identity))) {
922        return SYSRET(SYS_ERR_INVALID_USER_BUFFER);
923    }
924
925    fi->base = get_address(to);
926    fi->bytes = get_size(to);
927
928    return SYSRET(SYS_ERR_OK);
929}
930