umem_fork.c revision 1219:f89f56c2d9ac
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22
23/*
24 * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
25 * Use is subject to license terms.
26 */
27
28#pragma ident	"%Z%%M%	%I%	%E% SMI"
29
30#include "c_synonyms.h"
31#include "umem_base.h"
32#include "vmem_base.h"
33
34#include <unistd.h>
35
36/*
37 * The following functions are for pre- and post-fork1(2) handling.  See
38 * "Lock Ordering" in lib/libumem/common/umem.c for the lock ordering used.
39 */
40
41static void
42umem_lockup_cache(umem_cache_t *cp)
43{
44	int idx;
45	int ncpus = cp->cache_cpu_mask + 1;
46
47	for (idx = 0; idx < ncpus; idx++)
48		(void) mutex_lock(&cp->cache_cpu[idx].cc_lock);
49
50	(void) mutex_lock(&cp->cache_depot_lock);
51	(void) mutex_lock(&cp->cache_lock);
52}
53
54static void
55umem_release_cache(umem_cache_t *cp)
56{
57	int idx;
58	int ncpus = cp->cache_cpu_mask + 1;
59
60	(void) mutex_unlock(&cp->cache_lock);
61	(void) mutex_unlock(&cp->cache_depot_lock);
62
63	for (idx = 0; idx < ncpus; idx++)
64		(void) mutex_unlock(&cp->cache_cpu[idx].cc_lock);
65}
66
67static void
68umem_lockup_log_header(umem_log_header_t *lhp)
69{
70	int idx;
71	if (lhp == NULL)
72		return;
73	for (idx = 0; idx < umem_max_ncpus; idx++)
74		(void) mutex_lock(&lhp->lh_cpu[idx].clh_lock);
75
76	(void) mutex_lock(&lhp->lh_lock);
77}
78
79static void
80umem_release_log_header(umem_log_header_t *lhp)
81{
82	int idx;
83	if (lhp == NULL)
84		return;
85
86	(void) mutex_unlock(&lhp->lh_lock);
87
88	for (idx = 0; idx < umem_max_ncpus; idx++)
89		(void) mutex_unlock(&lhp->lh_cpu[idx].clh_lock);
90}
91
92static void
93umem_lockup(void)
94{
95	umem_cache_t *cp;
96
97	(void) mutex_lock(&umem_init_lock);
98	/*
99	 * If another thread is busy initializing the library, we must
100	 * wait for it to complete (by calling umem_init()) before allowing
101	 * the fork() to proceed.
102	 */
103	if (umem_ready == UMEM_READY_INITING && umem_init_thr != thr_self()) {
104		(void) mutex_unlock(&umem_init_lock);
105		(void) umem_init();
106		(void) mutex_lock(&umem_init_lock);
107	}
108
109	vmem_lockup();
110	vmem_sbrk_lockup();
111
112	(void) mutex_lock(&umem_cache_lock);
113	(void) mutex_lock(&umem_update_lock);
114	(void) mutex_lock(&umem_flags_lock);
115
116	umem_lockup_cache(&umem_null_cache);
117	for (cp = umem_null_cache.cache_prev; cp != &umem_null_cache;
118	    cp = cp->cache_prev)
119		umem_lockup_cache(cp);
120
121	umem_lockup_log_header(umem_transaction_log);
122	umem_lockup_log_header(umem_content_log);
123	umem_lockup_log_header(umem_failure_log);
124	umem_lockup_log_header(umem_slab_log);
125
126	(void) cond_broadcast(&umem_update_cv);
127
128}
129
130static void
131umem_do_release(int as_child)
132{
133	umem_cache_t *cp;
134	int cleanup_update = 0;
135
136	/*
137	 * Clean up the update state if we are the child process and
138	 * another thread was processing updates.
139	 */
140	if (as_child) {
141		if (umem_update_thr != thr_self()) {
142			umem_update_thr = 0;
143			cleanup_update = 1;
144		}
145		if (umem_st_update_thr != thr_self()) {
146			umem_st_update_thr = 0;
147			cleanup_update = 1;
148		}
149	}
150
151	if (cleanup_update) {
152		umem_reaping = UMEM_REAP_DONE;
153
154		for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
155		    cp = cp->cache_next) {
156			if (cp->cache_uflags & UMU_NOTIFY)
157				cp->cache_uflags &= ~UMU_NOTIFY;
158
159			/*
160			 * If the cache is active, we just re-add it to
161			 * the update list.  This will re-do any active
162			 * updates on the cache, but that won't break
163			 * anything.
164			 *
165			 * The worst that can happen is a cache has
166			 * its magazines rescaled twice, instead of once.
167			 */
168			if (cp->cache_uflags & UMU_ACTIVE) {
169				umem_cache_t *cnext, *cprev;
170
171				ASSERT(cp->cache_unext == NULL &&
172				    cp->cache_uprev == NULL);
173
174				cp->cache_uflags &= ~UMU_ACTIVE;
175				cp->cache_unext = cnext = &umem_null_cache;
176				cp->cache_uprev = cprev =
177				    umem_null_cache.cache_uprev;
178				cnext->cache_uprev = cp;
179				cprev->cache_unext = cp;
180			}
181		}
182	}
183
184	umem_release_log_header(umem_slab_log);
185	umem_release_log_header(umem_failure_log);
186	umem_release_log_header(umem_content_log);
187	umem_release_log_header(umem_transaction_log);
188
189	for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
190	    cp = cp->cache_next)
191		umem_release_cache(cp);
192	umem_release_cache(&umem_null_cache);
193
194	(void) mutex_unlock(&umem_flags_lock);
195	(void) mutex_unlock(&umem_update_lock);
196	(void) mutex_unlock(&umem_cache_lock);
197
198	vmem_sbrk_release();
199	vmem_release();
200
201	(void) mutex_unlock(&umem_init_lock);
202}
203
204static void
205umem_release(void)
206{
207	umem_do_release(0);
208}
209
210static void
211umem_release_child(void)
212{
213	umem_do_release(1);
214}
215
216void
217umem_forkhandler_init(void)
218{
219	/*
220	 * There is no way to unregister these atfork functions,
221	 * but we don't need to.  The dynamic linker and libc take
222	 * care of unregistering them if/when the library is unloaded.
223	 */
224	(void) pthread_atfork(umem_lockup, umem_release, umem_release_child);
225}
226