Lines Matching refs:stack

27 // Allocates and maps a kernel stack with one page of padding before and after the mapping.
31 LTRACEF("allocating %s stack\n", unsafe ? "unsafe" : "safe");
37 // Create a VMO for our stack
42 TRACEF("error allocating %s stack for thread\n",
46 const char* name = unsafe ? "unsafe-stack" : "safe-stack";
49 // create a vmar with enough padding for a page before and after the stack
70 LTRACEF("%s stack vmar at %#" PRIxPTR "\n",
86 LTRACEF("%s stack mapping at %#" PRIxPTR "\n",
89 // fault in all the pages so we dont demand fault in the stack
104 zx_status_t vm_allocate_kstack(kstack_t* stack) {
105 DEBUG_ASSERT(stack->base == 0);
106 DEBUG_ASSERT(stack->size == 0);
107 DEBUG_ASSERT(stack->top == 0);
108 DEBUG_ASSERT(stack->vmar == nullptr);
110 DEBUG_ASSERT(stack->unsafe_base == 0);
111 DEBUG_ASSERT(stack->unsafe_vmar == nullptr);
120 stack->size = mapping->size();
121 stack->base = mapping->base();
122 stack->top = mapping->base() + DEFAULT_STACK_SIZE;
125 stack->vmar = vmar.leak_ref();
130 vm_free_kstack(stack);
133 stack->size = mapping->size();
134 stack->unsafe_base = mapping->base();
137 stack->unsafe_vmar = vmar.leak_ref();
143 zx_status_t vm_free_kstack(kstack_t* stack) {
144 stack->base = 0;
145 stack->size = 0;
146 stack->top = 0;
148 if (stack->vmar != nullptr) {
150 fbl::internal::MakeRefPtrNoAdopt(static_cast<VmAddressRegion*>(stack->vmar));
155 stack->vmar = nullptr;
159 stack->unsafe_base = 0;
161 if (stack->unsafe_vmar != nullptr) {
163 fbl::internal::MakeRefPtrNoAdopt(static_cast<VmAddressRegion*>(stack->unsafe_vmar));
168 stack->unsafe_vmar = nullptr;