1/*
2 * Copyright (c) 2007-2011, ETH Zurich.
3 * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
4 * All rights reserved.
5 *
6 * This file is distributed under the terms in the attached LICENSE file.
7 * If you do not find this file, copies can be found by writing to:
8 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
9 */
10
11/*
12 * "Code" to calculate offsets of fields from structures for assembler.
13 *
14 * This is pretty ugly. GCC gives us the nice offsetof() function, for computing
15 * the byte offset of some member (eg. within a struct), however there is no
16 * easy way to get that value out to standalone assembly code, hence this.
17 *
18 * This file is compiled but never assembled. Instead, we trick GCC (using the
19 * inline asm syntax) into emitting a bunch of #defines for the offsets we need.
20 * Then, a postprocessing script (grep) extracts the actual #defines, and
21 * creates a header file suitable for inclusion in to standalone assembly code.
22 *
23 * As an added bonus, we can use this to do compile-time assertion checking on
24 * statically-known values (eg. struct sizes).
25 */
26
27/* GCC builtin offsetof */
28// #define offsetof(TYPE, MEMBER)  __builtin_offsetof(TYPE, MEMBER)
29
30/* macro to generate the fake assembly output that results in a #define */
31#define EMITX(NAMESTR, VALUE) \
32    __asm("\n#define " NAMESTR "\t%[val]\n" \
33          :: [val] "n" (VALUE));
34
35/* macro to emit a "constant" value */
36#define EMIT(NAME, VALUE) EMITX(#NAME, VALUE)
37
38/* macro to emit an offset */
39#define DECL(NAME, TYPE, MEMBER) \
40    EMITX("OFFSETOF_" #NAME, offsetof(TYPE, MEMBER))
41
42/* macro to emit an offset limit (e.g. stack limit) */
43#define DECL_LIMIT(NAME, TYPE, MEMBER) \
44    EMITX("OFFSETOF_" #NAME, (offsetof(TYPE, MEMBER) + sizeof(((TYPE*)0)->MEMBER)))
45
46#if 0
47// XXX This is nonsense code: if its determinable statically then use
48// a static assert.  If it isnt then we want to know here since there
49// is going to be no dynamic test with this code.
50
51/* macro to generate the fake assembly output that results in a compile-time
52 * error if any of our structures are too big */
53#define XXASSERT(EXP, EXPSTR, LINE)                             \
54    if (!(EXP)) {                                               \
55        __asm("\n#error Compile-time assertion failure: "       \
56              EXPSTR ", " __FILE__ ":" #LINE "\n");             \
57    }
58#define XASSERT(EXP,EXPSTR,LINE) XXASSERT(EXP, EXPSTR, LINE)
59#define ASSERT(EXP) XASSERT(EXP, #EXP, __LINE__)
60#endif
61
62#include <barrelfish/static_assert.h>
63#define ASSERT(EXP) STATIC_ASSERT(EXP, #EXP)
64
65/* XXX: kernel and user includes together */
66#include <deputy/nodeputy.h>
67#include <kernel.h>
68#include <dispatch.h> // XXX: from kernel include dir
69#include <barrelfish/barrelfish.h>
70#include <barrelfish/lmp_endpoints.h>
71
72#include <barrelfish/dispatcher_arch.h>
73
74#ifdef __arm__
75#include <boot_protocol.h>
76#include <barrelfish_kpi/arm_core_data.h>
77#endif
78
79#ifdef __aarch64__
80#include <barrelfish_kpi/arm_core_data.h>
81#endif
82
83/* wrap everything inside a dummy function, to keep the compiler happy */
84#ifdef __ICC
85int main(void)
86#else
87void dummy(void);
88void dummy(void)
89#endif
90{
91    /* preamble */
92    __asm("\n#ifndef ASMOFFSETS_H\n#define ASMOFFSETS_H\n");
93    DECL(DCB_DISP, struct dcb, disp);
94    DECL(DCB_DISABLED, struct dcb, disabled);
95    // XXX: Assumes cap is first member of struct cte
96    DECL(DCB_CSPACE_CAP, struct dcb, cspace.cap);
97    DECL(DCB_VSPACE, struct dcb, vspace);
98    DECL(DCB_IS_VM_GUEST, struct dcb, is_vm_guest);
99    DECL(DCB_RR_PREV, struct dcb, prev);
100    DECL(DCB_RBED_NEXT, struct dcb, next);
101
102    DECL(CAP_TYPE, struct capability, type);
103    DECL(CAP_ENDPOINT_EPOFFSET, struct capability, u.endpoint.epoffset);
104    DECL(CAP_ENDPOINT_EPBUFLEN, struct capability, u.endpoint.epbuflen);
105    DECL(CAP_ENDPOINT_LISTENER, struct capability, u.endpoint.listener);
106
107    DECL(CAP_L1CNODE_CNODE, struct capability, u.l1cnode.cnode);
108    DECL(CAP_L2CNODE_CNODE, struct capability, u.l2cnode.cnode);
109    DECL(CAP_L1CNODE_ALLOCATED_BYTES, struct capability, u.l1cnode.allocated_bytes);
110
111    DECL(DISP_DISABLED, struct dispatcher_shared_generic, disabled);
112    DECL(DISP_RUN, struct dispatcher_shared_generic, dispatcher_run);
113    DECL(DISP_LRPC, struct dispatcher_shared_generic, dispatcher_lrpc);
114    DECL(DISP_UDISP, struct dispatcher_shared_generic, udisp);
115    DECL(DISP_LMP_DELIVERED, struct dispatcher_shared_generic, lmp_delivered);
116    DECL(DISP_SYSTIME, struct dispatcher_shared_generic, systime);
117
118    DECL_LIMIT(DISP_PRIV_STACK_LIMIT, struct dispatcher_generic, stack);
119    DECL_LIMIT(DISP_PRIV_TRAP_STACK_LIMIT, struct dispatcher_generic, trap_stack);
120
121#if defined (__x86_64__) || defined(__k1om__)
122    DECL(DISP_X86_64_CRIT_PC_LOW, struct dispatcher_shared_x86_64, crit_pc_low);
123    DECL(DISP_X86_64_CRIT_PC_HIGH, struct dispatcher_shared_x86_64, crit_pc_high);
124    DECL(DISP_X86_64_LDT_BASE, struct dispatcher_shared_x86_64, ldt_base);
125    DECL(DISP_X86_64_LDT_NPAGES, struct dispatcher_shared_x86_64, ldt_npages);
126    EMIT(LDT_LO_SEL, LDT_LO_SEL);
127    EMIT(LDT_HI_SEL, LDT_HI_SEL);
128    EMIT(LDT_SELECTOR, GSEL(LDT_LO_SEL, SEL_UPL));
129    DECL(DISP_X86_64_ENABLED_AREA, struct dispatcher_shared_x86_64, enabled_save_area);
130    DECL(DISP_X86_64_DISABLED_AREA, struct dispatcher_shared_x86_64, disabled_save_area);
131    DECL(DISP_X86_64_TRAP_AREA, struct dispatcher_shared_x86_64, trap_save_area);
132#endif
133
134#if defined __i386__
135    DECL(DISP_X86_32_CRIT_PC_LOW, struct dispatcher_shared_x86_32, crit_pc_low);
136    DECL(DISP_X86_32_CRIT_PC_HIGH, struct dispatcher_shared_x86_32, crit_pc_high);
137    DECL(DISP_X86_32_ENABLED_AREA, struct dispatcher_shared_x86_32, enabled_save_area);
138    DECL(DISP_X86_32_DISABLED_AREA, struct dispatcher_shared_x86_32, disabled_save_area);
139    DECL(DISP_X86_32_TRAP_AREA, struct dispatcher_shared_x86_32, trap_save_area);
140#endif
141
142#if defined(__arm__)
143    DECL(DISP_CRIT_PC_LOW, struct dispatcher_shared_arm, crit_pc_low);
144    DECL(DISP_CRIT_PC_HIGH, struct dispatcher_shared_arm, crit_pc_high);
145    DECL(DISP_ENABLED_AREA, struct dispatcher_shared_arm, enabled_save_area);
146    DECL(DISP_DISABLED_AREA, struct dispatcher_shared_arm, disabled_save_area);
147    DECL(DISP_TRAP_AREA, struct dispatcher_shared_arm, trap_save_area);
148    DECL(DISP_GENERIC, struct dispatcher_arm, generic);
149    DECL(BOOT_TARGET_MPID, struct armv7_boot_record, target_mpid);
150    DECL(COREDATA_GOT_BASE, struct arm_core_data, got_base);
151    EMIT(SIZEOF_BOOT_RECORD, sizeof(struct armv7_boot_record));
152#endif // __arm__
153
154#if defined(__aarch64__)
155    DECL(DISP_CRIT_PC_LOW, struct dispatcher_shared_aarch64, crit_pc_low);
156    DECL(DISP_CRIT_PC_HIGH, struct dispatcher_shared_aarch64, crit_pc_high);
157    DECL(DISP_ENABLED_AREA, struct dispatcher_shared_aarch64, enabled_save_area);
158    DECL(DISP_DISABLED_AREA, struct dispatcher_shared_aarch64, disabled_save_area);
159    DECL(DISP_TRAP_AREA, struct dispatcher_shared_aarch64, trap_save_area);
160    DECL(DISP_GENERIC, struct dispatcher_aarch64, generic);
161    DECL(COREDATA_KERNEL_STACK, struct armv8_core_data, cpu_driver_stack)
162#endif // __aarch64__
163
164    DECL(LMP_ENDPOINT_DELIVERED, struct lmp_endpoint_kern, delivered);
165    DECL(LMP_ENDPOINT_CONSUMED, struct lmp_endpoint_kern, consumed);
166    DECL(LMP_ENDPOINT_KERNPART, struct lmp_endpoint, k);
167
168    EMIT(OBJTYPE_ENDPOINT, ObjType_EndPoint);
169    EMIT(OBJTYPE_L1CNODE, ObjType_L1CNode);
170    EMIT(OBJTYPE_L2CNODE, ObjType_L2CNode);
171
172    // register offsets in save areas
173#if  defined (__x86_64__) || defined(__k1om__)
174    DECL(RAX_REG, struct registers_x86_64, rax);
175    DECL(RSP_REG, struct registers_x86_64, rsp);
176    DECL(RIP_REG, struct registers_x86_64, rip);
177    DECL(EFLAGS_REG, struct registers_x86_64, eflags);
178    DECL(FS_REG, struct registers_x86_64, fs);
179    DECL(GS_REG, struct registers_x86_64, gs);
180    DECL(FXSAVE_AREA, struct registers_x86_64, fxsave_area);
181#elif __i386__
182    DECL(FS_REG, struct registers_x86_32, fs);
183    DECL(GS_REG, struct registers_x86_32, gs);
184#endif /* __x86_64__ */
185
186    // error codes needed in LRPC path
187    EMIT(SYS_ERR_OK, SYS_ERR_OK);
188    EMIT(SYS_ERR_CAP_NOT_FOUND, SYS_ERR_CAP_NOT_FOUND);
189    EMIT(SYS_ERR_LMP_TARGET_DISABLED, SYS_ERR_LMP_TARGET_DISABLED);
190    EMIT(SYS_ERR_LMP_BUF_OVERFLOW, SYS_ERR_LMP_BUF_OVERFLOW);
191    EMIT(SYS_ERR_LRPC_SLOT_INVALID, SYS_ERR_LRPC_SLOT_INVALID);
192    EMIT(SYS_ERR_LRPC_NOT_ENDPOINT, SYS_ERR_LRPC_NOT_ENDPOINT);
193    EMIT(SYS_ERR_LRPC_NOT_L1, SYS_ERR_LRPC_NOT_L1);
194    EMIT(SYS_ERR_LRPC_NOT_L2, SYS_ERR_LRPC_NOT_L2);
195
196    /* sanity check size of various structures, so we break the build if they
197     * don't match */
198#if   defined (__x86_64__) || defined(__k1om__)
199    ASSERT(sizeof(struct dispatcher_x86_64) <= (1 << DISPATCHER_FRAME_BITS));
200#elif defined __i386__
201    ASSERT(sizeof(struct dispatcher_x86_32) <= (1 << DISPATCHER_FRAME_BITS));
202#elif defined __arm__
203    ASSERT(sizeof(struct dispatcher_arm) <= (1 << DISPATCHER_FRAME_BITS));
204#elif defined __aarch64__
205    ASSERT(sizeof(struct dispatcher_aarch64) <= (1 << DISPATCHER_FRAME_BITS));
206#else
207#error "Define architecture"
208#endif
209    ASSERT(sizeof(struct cte) <= (1UL << OBJBITS_CTE));
210    ASSERT(sizeof(struct dcb) <= OBJSIZE_DISPATCHER);
211
212    union lmp_recv_header rcvheader;
213    EMIT(SIZEOF_LMP_RECV_HEADER, sizeof(rcvheader));
214    EMIT(SIZEOF_LMP_RECV_HEADER_RAW, sizeof(rcvheader.raw));
215    ASSERT(sizeof(rcvheader) == sizeof(rcvheader.raw));
216
217    EMIT(SIZEOF_STRUCT_SYSRET, sizeof(struct sysret));
218
219    /* footer */
220    __asm("\n#endif /* ASMOFFSETS_H */\n");
221}
222