Lines Matching refs:stack

29  *	Kernel stack management routines.
74 static unsigned int stack_new_count; /* total new stack allocations */
132 * The next field is at the base of the stack,
135 #define stack_next(stack) \
136 (*((vm_offset_t *)((stack) + kernel_stack_size) - 1))
170 panic("stack_init: stack size %p not a multiple of page size %d\n",
180 * Allocate a stack for a thread, may
187 vm_offset_t stack;
194 stack = stack_free_list;
195 if (stack != 0) {
196 stack_free_list = stack_next(stack);
208 if (stack == 0) {
211 * Request guard pages on either side of the stack. Ask
217 if (kernel_memory_allocate(kernel_map, &stack,
225 * The stack address that comes back is the address of the lower
226 * guard page. Skip past it to get the actual stack base address.
229 stack += PAGE_SIZE;
231 return stack;
255 * Detach and free the stack for a thread.
261 vm_offset_t stack = machine_stack_detach(thread);
263 assert(stack);
264 if (stack != thread->reserved_stack) {
266 stack_free_stack(stack);
282 vm_offset_t stack)
290 stack_next(stack) = cache->free;
291 cache->free = stack;
296 stack_next(stack) = stack_free_list;
297 stack_free_list = stack;
310 * stack for a thread.
321 vm_offset_t stack;
324 stack = cache->free;
325 if (stack != 0) {
327 cache->free = stack_next(stack);
333 stack = stack_free_list;
334 if (stack != 0) {
336 stack_free_list = stack_next(stack);
344 if (stack != 0 || (stack = thread->reserved_stack) != 0) {
345 machine_stack_attach(thread, stack);
365 vm_offset_t stack;
375 stack = stack_free_list;
376 stack_free_list = stack_next(stack);
382 * Get the stack base address, then decrement by one page
385 * that were originally requested when the stack was allocated
389 stack = (vm_offset_t)vm_map_trunc_page(stack);
390 stack -= PAGE_SIZE;
393 stack,
394 stack + kernel_stack_size+(2*PAGE_SIZE),
398 stack = 0;
420 * Limits stack collection to once per
495 * Return info on stack usage for threads in a specific processor set