1/* 2 * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org> 3 * Copyright (c) 2000-2001 Jason Evans <jasone@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: releng/10.2/lib/libkse/thread/thr_stack.c 174112 2007-11-30 17:20:29Z deischen $ 28 */ 29 30#include "namespace.h" 31#include <sys/types.h> 32#include <sys/mman.h> 33#include <sys/queue.h> 34#include <stdlib.h> 35#include <pthread.h> 36#include "un-namespace.h" 37#include "thr_private.h" 38 39/* Spare thread stack. */ 40struct stack { 41 LIST_ENTRY(stack) qe; /* Stack queue linkage. */ 42 size_t stacksize; /* Stack size (rounded up). */ 43 size_t guardsize; /* Guard size. */ 44 void *stackaddr; /* Stack address. */ 45}; 46 47/* 48 * Default sized (stack and guard) spare stack queue. Stacks are cached 49 * to avoid additional complexity managing mmap()ed stack regions. Spare 50 * stacks are used in LIFO order to increase cache locality. 51 */ 52static LIST_HEAD(, stack) dstackq = LIST_HEAD_INITIALIZER(dstackq); 53 54/* 55 * Miscellaneous sized (non-default stack and/or guard) spare stack queue. 56 * Stacks are cached to avoid additional complexity managing mmap()ed 57 * stack regions. This list is unordered, since ordering on both stack 58 * size and guard size would be more trouble than it's worth. Stacks are 59 * allocated from this cache on a first size match basis. 60 */ 61static LIST_HEAD(, stack) mstackq = LIST_HEAD_INITIALIZER(mstackq); 62 63/** 64 * Base address of the last stack allocated (including its red zone, if 65 * there is one). Stacks are allocated contiguously, starting beyond the 66 * top of the main stack. When a new stack is created, a red zone is 67 * typically created (actually, the red zone is mapped with PROT_NONE) above 68 * the top of the stack, such that the stack will not be able to grow all 69 * the way to the bottom of the next stack. This isn't fool-proof. It is 70 * possible for a stack to grow by a large amount, such that it grows into 71 * the next stack, and as long as the memory within the red zone is never 72 * accessed, nothing will prevent one thread stack from trouncing all over 73 * the next. 74 * 75 * low memory 76 * . . . . . . . . . . . . . . . . . . 77 * | | 78 * | stack 3 | start of 3rd thread stack 79 * +-----------------------------------+ 80 * | | 81 * | Red Zone (guard page) | red zone for 2nd thread 82 * | | 83 * +-----------------------------------+ 84 * | stack 2 - PTHREAD_STACK_DEFAULT | top of 2nd thread stack 85 * | | 86 * | | 87 * | | 88 * | | 89 * | stack 2 | 90 * +-----------------------------------+ <-- start of 2nd thread stack 91 * | | 92 * | Red Zone | red zone for 1st thread 93 * | | 94 * +-----------------------------------+ 95 * | stack 1 - PTHREAD_STACK_DEFAULT | top of 1st thread stack 96 * | | 97 * | | 98 * | | 99 * | | 100 * | stack 1 | 101 * +-----------------------------------+ <-- start of 1st thread stack 102 * | | (initial value of last_stack) 103 * | Red Zone | 104 * | | red zone for main thread 105 * +-----------------------------------+ 106 * | USRSTACK - PTHREAD_STACK_INITIAL | top of main thread stack 107 * | | ^ 108 * | | | 109 * | | | 110 * | | | stack growth 111 * | | 112 * +-----------------------------------+ <-- start of main thread stack 113 * (USRSTACK) 114 * high memory 115 * 116 */ 117static void *last_stack = NULL; 118 119/* 120 * Round size up to the nearest multiple of 121 * _thr_page_size. 122 */ 123static inline size_t 124round_up(size_t size) 125{ 126 if (size % _thr_page_size != 0) 127 size = ((size / _thr_page_size) + 1) * 128 _thr_page_size; 129 return (size); 130} 131 132int 133_thr_stack_alloc(struct pthread_attr *attr) 134{ 135 struct stack *spare_stack; 136 struct kse *curkse; 137 kse_critical_t crit; 138 size_t stacksize; 139 size_t guardsize; 140 char *stackaddr; 141 142 /* 143 * Round up stack size to nearest multiple of _thr_page_size so 144 * that mmap() * will work. If the stack size is not an even 145 * multiple, we end up initializing things such that there is 146 * unused space above the beginning of the stack, so the stack 147 * sits snugly against its guard. 148 */ 149 stacksize = round_up(attr->stacksize_attr); 150 guardsize = round_up(attr->guardsize_attr); 151 152 attr->stackaddr_attr = NULL; 153 attr->flags &= ~THR_STACK_USER; 154 155 /* 156 * Use the garbage collector lock for synchronization of the 157 * spare stack lists and allocations from usrstack. 158 */ 159 crit = _kse_critical_enter(); 160 curkse = _get_curkse(); 161 KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock); 162 /* 163 * If the stack and guard sizes are default, try to allocate a stack 164 * from the default-size stack cache: 165 */ 166 if ((stacksize == _thr_stack_default) && 167 (guardsize == _thr_guard_default)) { 168 if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) { 169 /* Use the spare stack. */ 170 LIST_REMOVE(spare_stack, qe); 171 attr->stackaddr_attr = spare_stack->stackaddr; 172 } 173 } 174 /* 175 * The user specified a non-default stack and/or guard size, so try to 176 * allocate a stack from the non-default size stack cache, using the 177 * rounded up stack size (stack_size) in the search: 178 */ 179 else { 180 LIST_FOREACH(spare_stack, &mstackq, qe) { 181 if (spare_stack->stacksize == stacksize && 182 spare_stack->guardsize == guardsize) { 183 LIST_REMOVE(spare_stack, qe); 184 attr->stackaddr_attr = spare_stack->stackaddr; 185 break; 186 } 187 } 188 } 189 if (attr->stackaddr_attr != NULL) { 190 /* A cached stack was found. Release the lock. */ 191 KSE_LOCK_RELEASE(curkse, &_thread_list_lock); 192 _kse_critical_leave(crit); 193 } 194 else { 195 /* Allocate a stack from usrstack. */ 196 if (last_stack == NULL) 197 last_stack = (void *)((uintptr_t)_usrstack - 198 (uintptr_t)_thr_stack_initial - 199 (uintptr_t)_thr_guard_default); 200 201 /* Allocate a new stack. */ 202 stackaddr = (void *)((uintptr_t)last_stack - 203 (uintptr_t)stacksize - (uintptr_t)guardsize); 204 205 /* 206 * Even if stack allocation fails, we don't want to try to 207 * use this location again, so unconditionally decrement 208 * last_stack. Under normal operating conditions, the most 209 * likely reason for an mmap() error is a stack overflow of 210 * the adjacent thread stack. 211 */ 212 last_stack = (void *)((uintptr_t)last_stack - 213 (uintptr_t)(stacksize + guardsize)); 214 215 /* Release the lock before mmap'ing it. */ 216 KSE_LOCK_RELEASE(curkse, &_thread_list_lock); 217 _kse_critical_leave(crit); 218 219 /* Map the stack and guard page together, and split guard 220 page from allocated space: */ 221 if ((stackaddr = mmap(stackaddr, stacksize+guardsize, 222 PROT_READ | PROT_WRITE, MAP_STACK, 223 -1, 0)) != MAP_FAILED && 224 (guardsize == 0 || 225 mprotect(stackaddr, guardsize, PROT_NONE) == 0)) { 226 stackaddr += guardsize; 227 } else { 228 if (stackaddr != MAP_FAILED) 229 munmap(stackaddr, stacksize + guardsize); 230 stackaddr = NULL; 231 } 232 attr->stackaddr_attr = stackaddr; 233 } 234 if (attr->stackaddr_attr != NULL) 235 return (0); 236 else 237 return (-1); 238} 239 240/* This function must be called with _thread_list_lock held. */ 241void 242_thr_stack_free(struct pthread_attr *attr) 243{ 244 struct stack *spare_stack; 245 246 if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0) 247 && (attr->stackaddr_attr != NULL)) { 248 spare_stack = (struct stack *)((uintptr_t)attr->stackaddr_attr 249 + (uintptr_t)attr->stacksize_attr - sizeof(struct stack)); 250 spare_stack->stacksize = round_up(attr->stacksize_attr); 251 spare_stack->guardsize = round_up(attr->guardsize_attr); 252 spare_stack->stackaddr = attr->stackaddr_attr; 253 254 if (spare_stack->stacksize == _thr_stack_default && 255 spare_stack->guardsize == _thr_guard_default) { 256 /* Default stack/guard size. */ 257 LIST_INSERT_HEAD(&dstackq, spare_stack, qe); 258 } else { 259 /* Non-default stack/guard size. */ 260 LIST_INSERT_HEAD(&mstackq, spare_stack, qe); 261 } 262 attr->stackaddr_attr = NULL; 263 } 264} 265