thr_stack.c revision 112918
1/*
2 * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
3 * Copyright (c) 2000-2001 Jason Evans <jasone@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: head/lib/libthr/thread/thr_stack.c 112918 2003-04-01 03:46:29Z jeff $
28 */
29#include <sys/types.h>
30#include <sys/mman.h>
31#include <sys/param.h>
32#include <sys/queue.h>
33#include <sys/user.h>
34#include <stdlib.h>
35#include <pthread.h>
36#include "thr_private.h"
37
38/* Spare thread stack. */
39struct stack {
40	LIST_ENTRY(stack)	qe;		/* Stack queue linkage. */
41	size_t			stacksize;	/* Stack size (rounded up). */
42	size_t			guardsize;	/* Guard size. */
43	void			*stackaddr;	/* Stack address. */
44};
45
46/*
47 * Default sized (stack and guard) spare stack queue.  Stacks are cached to
48 * avoid additional complexity managing mmap()ed stack regions.  Spare stacks
49 * are used in LIFO order to increase cache locality.
50 */
51static LIST_HEAD(, stack)	_dstackq = LIST_HEAD_INITIALIZER(_dstackq);
52
53/*
54 * Miscellaneous sized (non-default stack and/or guard) spare stack queue.
55 * Stacks are cached to avoid additional complexity managing mmap()ed stack
56 * regions.  This list is unordered, since ordering on both stack size and guard
57 * size would be more trouble than it's worth.  Stacks are allocated from this
58 * cache on a first size match basis.
59 */
60static LIST_HEAD(, stack)	_mstackq = LIST_HEAD_INITIALIZER(_mstackq);
61
62/**
63 * Base address of the last stack allocated (including its red zone, if there is
64 * one).  Stacks are allocated contiguously, starting beyond the top of the main
65 * stack.  When a new stack is created, a red zone is typically created
66 * (actually, the red zone is simply left unmapped) above the top of the stack,
67 * such that the stack will not be able to grow all the way to the bottom of the
68 * next stack.  This isn't fool-proof.  It is possible for a stack to grow by a
69 * large amount, such that it grows into the next stack, and as long as the
70 * memory within the red zone is never accessed, nothing will prevent one thread
71 * stack from trouncing all over the next.
72 *
73 * low memory
74 *     . . . . . . . . . . . . . . . . . .
75 *    |                                   |
76 *    |             stack 3               | start of 3rd thread stack
77 *    +-----------------------------------+
78 *    |                                   |
79 *    |       Red Zone (guard page)       | red zone for 2nd thread
80 *    |                                   |
81 *    +-----------------------------------+
82 *    |  stack 2 - PTHREAD_STACK_DEFAULT  | top of 2nd thread stack
83 *    |                                   |
84 *    |                                   |
85 *    |                                   |
86 *    |                                   |
87 *    |             stack 2               |
88 *    +-----------------------------------+ <-- start of 2nd thread stack
89 *    |                                   |
90 *    |       Red Zone                    | red zone for 1st thread
91 *    |                                   |
92 *    +-----------------------------------+
93 *    |  stack 1 - PTHREAD_STACK_DEFAULT  | top of 1st thread stack
94 *    |                                   |
95 *    |                                   |
96 *    |                                   |
97 *    |                                   |
98 *    |             stack 1               |
99 *    +-----------------------------------+ <-- start of 1st thread stack
100 *    |                                   |   (initial value of last_stack)
101 *    |       Red Zone                    |
102 *    |                                   | red zone for main thread
103 *    +-----------------------------------+
104 *    | USRSTACK - PTHREAD_STACK_INITIAL  | top of main thread stack
105 *    |                                   | ^
106 *    |                                   | |
107 *    |                                   | |
108 *    |                                   | | stack growth
109 *    |                                   |
110 *    +-----------------------------------+ <-- start of main thread stack
111 *                                              (USRSTACK)
112 * high memory
113 *
114 */
115static void *	last_stack;
116
117void *
118_thread_stack_alloc(size_t stacksize, size_t guardsize)
119{
120	void		*stack = NULL;
121	struct stack	*spare_stack;
122	size_t		stack_size;
123
124	/*
125	 * Round up stack size to nearest multiple of _pthread_page_size,
126	 * so that mmap() * will work.  If the stack size is not an even
127	 * multiple, we end up initializing things such that there is unused
128	 * space above the beginning of the stack, so the stack sits snugly
129	 * against its guard.
130	 */
131	if (stacksize % _pthread_page_size != 0)
132		stack_size = ((stacksize / _pthread_page_size) + 1) *
133		    _pthread_page_size;
134	else
135		stack_size = stacksize;
136
137	/*
138	 * If the stack and guard sizes are default, try to allocate a stack
139	 * from the default-size stack cache:
140	 */
141	if (stack_size == PTHREAD_STACK_DEFAULT &&
142	    guardsize == _pthread_guard_default) {
143		/*
144		 * Use the garbage collector mutex for synchronization of the
145		 * spare stack list.
146		 */
147		if (pthread_mutex_lock(&_gc_mutex) != 0)
148			PANIC("Cannot lock gc mutex");
149
150		if ((spare_stack = LIST_FIRST(&_dstackq)) != NULL) {
151				/* Use the spare stack. */
152			LIST_REMOVE(spare_stack, qe);
153			stack = spare_stack->stackaddr;
154		}
155
156		/* Unlock the garbage collector mutex. */
157		if (pthread_mutex_unlock(&_gc_mutex) != 0)
158			PANIC("Cannot unlock gc mutex");
159	}
160	/*
161	 * The user specified a non-default stack and/or guard size, so try to
162	 * allocate a stack from the non-default size stack cache, using the
163	 * rounded up stack size (stack_size) in the search:
164	 */
165	else {
166		/*
167		 * Use the garbage collector mutex for synchronization of the
168		 * spare stack list.
169		 */
170		if (pthread_mutex_lock(&_gc_mutex) != 0)
171			PANIC("Cannot lock gc mutex");
172
173		LIST_FOREACH(spare_stack, &_mstackq, qe) {
174			if (spare_stack->stacksize == stack_size &&
175			    spare_stack->guardsize == guardsize) {
176				LIST_REMOVE(spare_stack, qe);
177				stack = spare_stack->stackaddr;
178				break;
179			}
180		}
181
182		/* Unlock the garbage collector mutex. */
183		if (pthread_mutex_unlock(&_gc_mutex) != 0)
184			PANIC("Cannot unlock gc mutex");
185	}
186
187	/* Check if a stack was not allocated from a stack cache: */
188	if (stack == NULL) {
189
190		if (last_stack == NULL)
191			last_stack = _usrstack - PTHREAD_STACK_INITIAL -
192			    _pthread_guard_default;
193
194		/* Allocate a new stack. */
195		stack = last_stack - stack_size;
196
197		/*
198		 * Even if stack allocation fails, we don't want to try to use
199		 * this location again, so unconditionally decrement
200		 * last_stack.  Under normal operating conditions, the most
201		 * likely reason for an mmap() error is a stack overflow of the
202		 * adjacent thread stack.
203		 */
204		last_stack -= (stack_size + guardsize);
205
206		/* Stack: */
207		if (mmap(stack, stack_size, PROT_READ | PROT_WRITE, MAP_STACK,
208		    -1, 0) == MAP_FAILED)
209			stack = NULL;
210	}
211
212	return (stack);
213}
214
215/* This function must be called with _gc_mutex held. */
216void
217_thread_stack_free(void *stack, size_t stacksize, size_t guardsize)
218{
219	struct stack	*spare_stack;
220
221	spare_stack = (stack + stacksize - sizeof(struct stack));
222	/* Round stacksize up to nearest multiple of _pthread_page_size. */
223	if (stacksize % _pthread_page_size != 0) {
224		spare_stack->stacksize =
225		    ((stacksize / _pthread_page_size) + 1) *
226		    _pthread_page_size;
227	} else
228		spare_stack->stacksize = stacksize;
229	spare_stack->guardsize = guardsize;
230	spare_stack->stackaddr = stack;
231
232	if (spare_stack->stacksize == PTHREAD_STACK_DEFAULT &&
233	    spare_stack->guardsize == _pthread_guard_default) {
234		/* Default stack/guard size. */
235		LIST_INSERT_HEAD(&_dstackq, spare_stack, qe);
236	} else {
237		/* Non-default stack/guard size. */
238		LIST_INSERT_HEAD(&_mstackq, spare_stack, qe);
239	}
240}
241