thr_stack.c revision 80021
1/*
2 * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
3 * Copyright (c) 2000-2001 Jason Evans <jasone@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: head/lib/libkse/thread/thr_stack.c 80021 2001-07-20 04:23:11Z jasone $
28 */
29#include <sys/types.h>
30#include <sys/mman.h>
31#include <sys/param.h>
32#include <sys/queue.h>
33#include <sys/user.h>
34#include <stdlib.h>
35#include <pthread.h>
36#include "pthread_private.h"
37
38/* Spare thread stack. */
39struct stack {
40	LIST_ENTRY(stack)	qe;		/* Stack queue linkage. */
41	size_t			stacksize;	/* Stack size (rounded up). */
42	size_t			guardsize;	/* Guard size. */
43	void			*stackaddr;	/* Stack address. */
44};
45
46/*
47 * Default sized (stack and guard) spare stack queue.  Stacks are cached to
48 * avoid additional complexity managing mmap()ed stack regions.  Spare stacks
49 * are used in LIFO order to increase cache locality.
50 */
51static LIST_HEAD(, stack)	_dstackq = LIST_HEAD_INITIALIZER(_dstackq);
52
53/*
54 * Miscellaneous sized (non-default stack and/or guard) spare stack queue.
55 * Stacks are cached to avoid additional complexity managing mmap()ed stack
56 * regions.  This list is unordered, since ordering on both stack size and guard
57 * size would be more trouble than it's worth.  Stacks are allocated from this
58 * cache on a first size match basis.
59 */
60static LIST_HEAD(, stack)	_mstackq = LIST_HEAD_INITIALIZER(_mstackq);
61
62/**
63 * Base address of the last stack allocated (including its red zone, if there is
64 * one).  Stacks are allocated contiguously, starting beyond the top of the main
65 * stack.  When a new stack is created, a red zone is typically created
66 * (actually, the red zone is simply left unmapped) above the top of the stack,
67 * such that the stack will not be able to grow all the way to the bottom of the
68 * next stack.  This isn't fool-proof.  It is possible for a stack to grow by a
69 * large amount, such that it grows into the next stack, and as long as the
70 * memory within the red zone is never accessed, nothing will prevent one thread
71 * stack from trouncing all over the next.
72 *
73 * low memory
74 *     . . . . . . . . . . . . . . . . . .
75 *    |                                   |
76 *    |             stack 3               | start of 3rd thread stack
77 *    +-----------------------------------+
78 *    |                                   |
79 *    |       Red Zone (guard page)       | red zone for 2nd thread
80 *    |                                   |
81 *    +-----------------------------------+
82 *    |  stack 2 - PTHREAD_STACK_DEFAULT  | top of 2nd thread stack
83 *    |                                   |
84 *    |                                   |
85 *    |                                   |
86 *    |                                   |
87 *    |             stack 2               |
88 *    +-----------------------------------+ <-- start of 2nd thread stack
89 *    |                                   |
90 *    |       Red Zone                    | red zone for 1st thread
91 *    |                                   |
92 *    +-----------------------------------+
93 *    |  stack 1 - PTHREAD_STACK_DEFAULT  | top of 1st thread stack
94 *    |                                   |
95 *    |                                   |
96 *    |                                   |
97 *    |                                   |
98 *    |             stack 1               |
99 *    +-----------------------------------+ <-- start of 1st thread stack
100 *    |                                   |   (initial value of last_stack)
101 *    |       Red Zone                    |
102 *    |                                   | red zone for main thread
103 *    +-----------------------------------+
104 *    | USRSTACK - PTHREAD_STACK_INITIAL  | top of main thread stack
105 *    |                                   | ^
106 *    |                                   | |
107 *    |                                   | |
108 *    |                                   | | stack growth
109 *    |                                   |
110 *    +-----------------------------------+ <-- start of main thread stack
111 *                                              (USRSTACK)
112 * high memory
113 *
114 */
115static void *	last_stack = (void *) USRSTACK - PTHREAD_STACK_INITIAL
116		    - PTHREAD_GUARD_DEFAULT;
117
118void *
119_thread_stack_alloc(size_t stacksize, size_t guardsize)
120{
121	void		*stack = NULL;
122	struct stack	*spare_stack;
123	size_t		stack_size;
124
125	/*
126	 * Round up stack size to nearest multiple of PAGE_SIZE, so that mmap()
127	 * will work.  If the stack size is not an even multiple, we end up
128	 * initializing things such that there is unused space above the
129	 * beginning of the stack, so the stack sits snugly against its guard.
130	 */
131	if (stacksize % PAGE_SIZE != 0)
132		stack_size = ((stacksize / PAGE_SIZE) + 1) * PAGE_SIZE;
133	else
134		stack_size = stacksize;
135
136	/*
137	 * If the stack and guard sizes are default, try to allocate a stack
138	 * from the default-size stack cache:
139	 */
140	if (stack_size == PTHREAD_STACK_DEFAULT &&
141	    guardsize == PTHREAD_GUARD_DEFAULT) {
142		/*
143		 * Use the garbage collector mutex for synchronization of the
144		 * spare stack list.
145		 */
146		if (pthread_mutex_lock(&_gc_mutex) != 0)
147			PANIC("Cannot lock gc mutex");
148
149		if ((spare_stack = LIST_FIRST(&_dstackq)) != NULL) {
150				/* Use the spare stack. */
151			LIST_REMOVE(spare_stack, qe);
152			stack = spare_stack->stackaddr;
153		}
154
155		/* Unlock the garbage collector mutex. */
156		if (pthread_mutex_unlock(&_gc_mutex) != 0)
157			PANIC("Cannot unlock gc mutex");
158	}
159	/*
160	 * The user specified a non-default stack and/or guard size, so try to
161	 * allocate a stack from the non-default size stack cache, using the
162	 * rounded up stack size (stack_size) in the search:
163	 */
164	else {
165		/*
166		 * Use the garbage collector mutex for synchronization of the
167		 * spare stack list.
168		 */
169		if (pthread_mutex_lock(&_gc_mutex) != 0)
170			PANIC("Cannot lock gc mutex");
171
172		LIST_FOREACH(spare_stack, &_mstackq, qe) {
173			if (spare_stack->stacksize == stack_size &&
174			    spare_stack->guardsize == guardsize) {
175				LIST_REMOVE(spare_stack, qe);
176				stack = spare_stack->stackaddr;
177				break;
178			}
179		}
180
181		/* Unlock the garbage collector mutex. */
182		if (pthread_mutex_unlock(&_gc_mutex) != 0)
183			PANIC("Cannot unlock gc mutex");
184	}
185
186	/* Check if a stack was not allocated from a stack cache: */
187	if (stack == NULL) {
188
189		/* Allocate a new stack. */
190
191		stack = last_stack - stack_size;
192
193		/*
194		 * Even if stack allocation fails, we don't want to try to use
195		 * this location again, so unconditionally decrement
196		 * last_stack.  Under normal operating conditions, the most
197		 * likely reason for an mmap() error is a stack overflow of the
198		 * adjacent thread stack.
199		 */
200		last_stack -= (stack_size + guardsize);
201
202		/* Stack: */
203		if (mmap(stack, stack_size, PROT_READ | PROT_WRITE, MAP_STACK,
204		    -1, 0) == MAP_FAILED)
205			stack = NULL;
206	}
207
208	return (stack);
209}
210
211/* This function must be called with _gc_mutex held. */
212void
213_thread_stack_free(void *stack, size_t stacksize, size_t guardsize)
214{
215	struct stack	*spare_stack;
216
217	spare_stack = (stack + stacksize - sizeof(struct stack));
218	/* Round stacksize up to nearest multiple of PAGE_SIZE. */
219	if (stacksize % PAGE_SIZE != 0) {
220		spare_stack->stacksize = ((stacksize / PAGE_SIZE) + 1) *
221		    PAGE_SIZE;
222	} else
223		spare_stack->stacksize = stacksize;
224	spare_stack->guardsize = guardsize;
225	spare_stack->stackaddr = stack;
226
227	if (spare_stack->stacksize == PTHREAD_STACK_DEFAULT &&
228	    spare_stack->guardsize == PTHREAD_GUARD_DEFAULT) {
229		/* Default stack/guard size. */
230		LIST_INSERT_HEAD(&_dstackq, spare_stack, qe);
231	} else {
232		/* Non-default stack/guard size. */
233		LIST_INSERT_HEAD(&_mstackq, spare_stack, qe);
234	}
235}
236