Deleted Added
full compact
vm_pager.c (12662) vm_pager.c (12767)
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $Id: vm_pager.c,v 1.18 1995/11/20 12:19:19 phk Exp $
64 * $Id: vm_pager.c,v 1.19 1995/12/07 12:48:26 davidg Exp $
65 */
66
67/*
68 * Paging space routine stubs. Emulates a matchmaker-like interface
69 * for builtin pagers.
70 */
71
72#include <sys/param.h>
73#include <sys/systm.h>
74#include <sys/proc.h>
75#include <sys/malloc.h>
76#include <sys/buf.h>
77#include <sys/ucred.h>
78
79#include <vm/vm.h>
80#include <vm/vm_param.h>
81#include <vm/vm_prot.h>
82#include <vm/vm_object.h>
83#include <vm/vm_page.h>
84#include <vm/vm_kern.h>
85#include <vm/vm_pager.h>
86#include <vm/vm_extern.h>
87
88extern struct pagerops defaultpagerops;
89extern struct pagerops swappagerops;
90extern struct pagerops vnodepagerops;
91extern struct pagerops devicepagerops;
92
93static struct pagerops *pagertab[] = {
94 &defaultpagerops, /* OBJT_DEFAULT */
95 &swappagerops, /* OBJT_SWAP */
96 &vnodepagerops, /* OBJT_VNODE */
97 &devicepagerops, /* OBJT_DEVICE */
98};
99static int npagers = sizeof(pagertab) / sizeof(pagertab[0]);
100
101/*
102 * Kernel address space for mapping pages.
103 * Used by pagers where KVAs are needed for IO.
104 *
105 * XXX needs to be large enough to support the number of pending async
106 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
107 * (MAXPHYS == 64k) if you want to get the most efficiency.
108 */
109#define PAGER_MAP_SIZE (8 * 1024 * 1024)
110
111int pager_map_size = PAGER_MAP_SIZE;
112vm_map_t pager_map;
113boolean_t pager_map_wanted;
114int bswneeded;
115vm_offset_t swapbkva; /* swap buffers kva */
116
117void
118vm_pager_init()
119{
120 struct pagerops **pgops;
121
122 /*
123 * Initialize known pagers
124 */
125 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
126 if (pgops && ((*pgops)->pgo_init != NULL))
127 (*(*pgops)->pgo_init) ();
128}
129
130void
131vm_pager_bufferinit()
132{
133 struct buf *bp;
134 int i;
135
136 bp = swbuf;
137 /*
138 * Now set up swap and physical I/O buffer headers.
139 */
140 for (i = 0; i < nswbuf - 1; i++, bp++) {
141 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
142 bp->b_rcred = bp->b_wcred = NOCRED;
143 bp->b_vnbufs.le_next = NOLIST;
144 }
145 bp->b_rcred = bp->b_wcred = NOCRED;
146 bp->b_vnbufs.le_next = NOLIST;
147 bp->b_actf = NULL;
148
149 swapbkva = kmem_alloc_pageable(pager_map, nswbuf * MAXPHYS);
150 if (!swapbkva)
151 panic("Not enough pager_map VM space for physical buffers");
152}
153
154/*
155 * Allocate an instance of a pager of the given type.
156 * Size, protection and offset parameters are passed in for pagers that
157 * need to perform page-level validation (e.g. the device pager).
158 */
159vm_object_t
160vm_pager_allocate(type, handle, size, prot, off)
161 objtype_t type;
162 void *handle;
163 vm_size_t size;
164 vm_prot_t prot;
65 */
66
67/*
68 * Paging space routine stubs. Emulates a matchmaker-like interface
69 * for builtin pagers.
70 */
71
72#include <sys/param.h>
73#include <sys/systm.h>
74#include <sys/proc.h>
75#include <sys/malloc.h>
76#include <sys/buf.h>
77#include <sys/ucred.h>
78
79#include <vm/vm.h>
80#include <vm/vm_param.h>
81#include <vm/vm_prot.h>
82#include <vm/vm_object.h>
83#include <vm/vm_page.h>
84#include <vm/vm_kern.h>
85#include <vm/vm_pager.h>
86#include <vm/vm_extern.h>
87
88extern struct pagerops defaultpagerops;
89extern struct pagerops swappagerops;
90extern struct pagerops vnodepagerops;
91extern struct pagerops devicepagerops;
92
93static struct pagerops *pagertab[] = {
94 &defaultpagerops, /* OBJT_DEFAULT */
95 &swappagerops, /* OBJT_SWAP */
96 &vnodepagerops, /* OBJT_VNODE */
97 &devicepagerops, /* OBJT_DEVICE */
98};
99static int npagers = sizeof(pagertab) / sizeof(pagertab[0]);
100
101/*
102 * Kernel address space for mapping pages.
103 * Used by pagers where KVAs are needed for IO.
104 *
105 * XXX needs to be large enough to support the number of pending async
106 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
107 * (MAXPHYS == 64k) if you want to get the most efficiency.
108 */
109#define PAGER_MAP_SIZE (8 * 1024 * 1024)
110
111int pager_map_size = PAGER_MAP_SIZE;
112vm_map_t pager_map;
113boolean_t pager_map_wanted;
114int bswneeded;
115vm_offset_t swapbkva; /* swap buffers kva */
116
117void
118vm_pager_init()
119{
120 struct pagerops **pgops;
121
122 /*
123 * Initialize known pagers
124 */
125 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
126 if (pgops && ((*pgops)->pgo_init != NULL))
127 (*(*pgops)->pgo_init) ();
128}
129
130void
131vm_pager_bufferinit()
132{
133 struct buf *bp;
134 int i;
135
136 bp = swbuf;
137 /*
138 * Now set up swap and physical I/O buffer headers.
139 */
140 for (i = 0; i < nswbuf - 1; i++, bp++) {
141 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
142 bp->b_rcred = bp->b_wcred = NOCRED;
143 bp->b_vnbufs.le_next = NOLIST;
144 }
145 bp->b_rcred = bp->b_wcred = NOCRED;
146 bp->b_vnbufs.le_next = NOLIST;
147 bp->b_actf = NULL;
148
149 swapbkva = kmem_alloc_pageable(pager_map, nswbuf * MAXPHYS);
150 if (!swapbkva)
151 panic("Not enough pager_map VM space for physical buffers");
152}
153
154/*
155 * Allocate an instance of a pager of the given type.
156 * Size, protection and offset parameters are passed in for pagers that
157 * need to perform page-level validation (e.g. the device pager).
158 */
159vm_object_t
160vm_pager_allocate(type, handle, size, prot, off)
161 objtype_t type;
162 void *handle;
163 vm_size_t size;
164 vm_prot_t prot;
165 vm_offset_t off;
165 vm_ooffset_t off;
166{
167 struct pagerops *ops;
168
169 ops = pagertab[type];
170 if (ops)
171 return ((*ops->pgo_alloc) (handle, size, prot, off));
172 return (NULL);
173}
174
175void
176vm_pager_deallocate(object)
177 vm_object_t object;
178{
179 (*pagertab[object->type]->pgo_dealloc) (object);
180}
181
182
183int
184vm_pager_get_pages(object, m, count, reqpage)
185 vm_object_t object;
186 vm_page_t *m;
187 int count;
188 int reqpage;
189{
190 return ((*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage));
191}
192
193int
194vm_pager_put_pages(object, m, count, sync, rtvals)
195 vm_object_t object;
196 vm_page_t *m;
197 int count;
198 boolean_t sync;
199 int *rtvals;
200{
201 return ((*pagertab[object->type]->pgo_putpages)(object, m, count, sync, rtvals));
202}
203
204boolean_t
205vm_pager_has_page(object, offset, before, after)
206 vm_object_t object;
166{
167 struct pagerops *ops;
168
169 ops = pagertab[type];
170 if (ops)
171 return ((*ops->pgo_alloc) (handle, size, prot, off));
172 return (NULL);
173}
174
175void
176vm_pager_deallocate(object)
177 vm_object_t object;
178{
179 (*pagertab[object->type]->pgo_dealloc) (object);
180}
181
182
183int
184vm_pager_get_pages(object, m, count, reqpage)
185 vm_object_t object;
186 vm_page_t *m;
187 int count;
188 int reqpage;
189{
190 return ((*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage));
191}
192
193int
194vm_pager_put_pages(object, m, count, sync, rtvals)
195 vm_object_t object;
196 vm_page_t *m;
197 int count;
198 boolean_t sync;
199 int *rtvals;
200{
201 return ((*pagertab[object->type]->pgo_putpages)(object, m, count, sync, rtvals));
202}
203
204boolean_t
205vm_pager_has_page(object, offset, before, after)
206 vm_object_t object;
207 vm_offset_t offset;
207 vm_pindex_t offset;
208 int *before;
209 int *after;
210{
211 return ((*pagertab[object->type]->pgo_haspage) (object, offset, before, after));
212}
213
214/*
215 * Called by pageout daemon before going back to sleep.
216 * Gives pagers a chance to clean up any completed async pageing operations.
217 */
218void
219vm_pager_sync()
220{
221 struct pagerops **pgops;
222
223 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
224 if (pgops && ((*pgops)->pgo_sync != NULL))
225 (*(*pgops)->pgo_sync) ();
226}
227
228vm_offset_t
229vm_pager_map_page(m)
230 vm_page_t m;
231{
232 vm_offset_t kva;
233
234 kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
235 pmap_kenter(kva, VM_PAGE_TO_PHYS(m));
236 return (kva);
237}
238
239void
240vm_pager_unmap_page(kva)
241 vm_offset_t kva;
242{
243 pmap_kremove(kva);
244 kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
245}
246
247vm_object_t
248vm_pager_object_lookup(pg_list, handle)
249 register struct pagerlst *pg_list;
250 void *handle;
251{
252 register vm_object_t object;
253
254 for (object = pg_list->tqh_first; object != NULL; object = object->pager_object_list.tqe_next)
255 if (object->handle == handle)
256 return (object);
257 return (NULL);
258}
259
260/*
261 * This routine loses a reference to the object -
262 * thus a reference must be gained before calling.
263 */
264int
265pager_cache(object, should_cache)
266 vm_object_t object;
267 boolean_t should_cache;
268{
269 if (object == NULL)
270 return (KERN_INVALID_ARGUMENT);
271
272 if (should_cache)
273 object->flags |= OBJ_CANPERSIST;
274 else
275 object->flags &= ~OBJ_CANPERSIST;
276
277 vm_object_deallocate(object);
278
279 return (KERN_SUCCESS);
280}
281
282/*
283 * allocate a physical buffer
284 */
285struct buf *
286getpbuf()
287{
288 int s;
289 struct buf *bp;
290
291 s = splbio();
292 /* get a bp from the swap buffer header pool */
293 while ((bp = bswlist.tqh_first) == NULL) {
294 bswneeded = 1;
295 tsleep(&bswneeded, PVM, "wswbuf", 0);
296 }
297 TAILQ_REMOVE(&bswlist, bp, b_freelist);
298 splx(s);
299
300 bzero(bp, sizeof *bp);
301 bp->b_rcred = NOCRED;
302 bp->b_wcred = NOCRED;
303 bp->b_data = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
304 bp->b_vnbufs.le_next = NOLIST;
305 return bp;
306}
307
308/*
309 * allocate a physical buffer, if one is available
310 */
311struct buf *
312trypbuf()
313{
314 int s;
315 struct buf *bp;
316
317 s = splbio();
318 if ((bp = bswlist.tqh_first) == NULL) {
319 splx(s);
320 return NULL;
321 }
322 TAILQ_REMOVE(&bswlist, bp, b_freelist);
323 splx(s);
324
325 bzero(bp, sizeof *bp);
326 bp->b_rcred = NOCRED;
327 bp->b_wcred = NOCRED;
328 bp->b_data = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
329 bp->b_vnbufs.le_next = NOLIST;
330 return bp;
331}
332
333/*
334 * release a physical buffer
335 */
336void
337relpbuf(bp)
338 struct buf *bp;
339{
340 int s;
341
342 s = splbio();
343
344 if (bp->b_rcred != NOCRED) {
345 crfree(bp->b_rcred);
346 bp->b_rcred = NOCRED;
347 }
348 if (bp->b_wcred != NOCRED) {
349 crfree(bp->b_wcred);
350 bp->b_wcred = NOCRED;
351 }
352 if (bp->b_vp)
353 pbrelvp(bp);
354
355 if (bp->b_flags & B_WANTED)
356 wakeup(bp);
357
358 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
359
360 if (bswneeded) {
361 bswneeded = 0;
362 wakeup(&bswneeded);
363 }
364 splx(s);
365}
208 int *before;
209 int *after;
210{
211 return ((*pagertab[object->type]->pgo_haspage) (object, offset, before, after));
212}
213
214/*
215 * Called by pageout daemon before going back to sleep.
216 * Gives pagers a chance to clean up any completed async pageing operations.
217 */
218void
219vm_pager_sync()
220{
221 struct pagerops **pgops;
222
223 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
224 if (pgops && ((*pgops)->pgo_sync != NULL))
225 (*(*pgops)->pgo_sync) ();
226}
227
228vm_offset_t
229vm_pager_map_page(m)
230 vm_page_t m;
231{
232 vm_offset_t kva;
233
234 kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
235 pmap_kenter(kva, VM_PAGE_TO_PHYS(m));
236 return (kva);
237}
238
239void
240vm_pager_unmap_page(kva)
241 vm_offset_t kva;
242{
243 pmap_kremove(kva);
244 kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
245}
246
247vm_object_t
248vm_pager_object_lookup(pg_list, handle)
249 register struct pagerlst *pg_list;
250 void *handle;
251{
252 register vm_object_t object;
253
254 for (object = pg_list->tqh_first; object != NULL; object = object->pager_object_list.tqe_next)
255 if (object->handle == handle)
256 return (object);
257 return (NULL);
258}
259
260/*
261 * This routine loses a reference to the object -
262 * thus a reference must be gained before calling.
263 */
264int
265pager_cache(object, should_cache)
266 vm_object_t object;
267 boolean_t should_cache;
268{
269 if (object == NULL)
270 return (KERN_INVALID_ARGUMENT);
271
272 if (should_cache)
273 object->flags |= OBJ_CANPERSIST;
274 else
275 object->flags &= ~OBJ_CANPERSIST;
276
277 vm_object_deallocate(object);
278
279 return (KERN_SUCCESS);
280}
281
282/*
283 * allocate a physical buffer
284 */
285struct buf *
286getpbuf()
287{
288 int s;
289 struct buf *bp;
290
291 s = splbio();
292 /* get a bp from the swap buffer header pool */
293 while ((bp = bswlist.tqh_first) == NULL) {
294 bswneeded = 1;
295 tsleep(&bswneeded, PVM, "wswbuf", 0);
296 }
297 TAILQ_REMOVE(&bswlist, bp, b_freelist);
298 splx(s);
299
300 bzero(bp, sizeof *bp);
301 bp->b_rcred = NOCRED;
302 bp->b_wcred = NOCRED;
303 bp->b_data = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
304 bp->b_vnbufs.le_next = NOLIST;
305 return bp;
306}
307
308/*
309 * allocate a physical buffer, if one is available
310 */
311struct buf *
312trypbuf()
313{
314 int s;
315 struct buf *bp;
316
317 s = splbio();
318 if ((bp = bswlist.tqh_first) == NULL) {
319 splx(s);
320 return NULL;
321 }
322 TAILQ_REMOVE(&bswlist, bp, b_freelist);
323 splx(s);
324
325 bzero(bp, sizeof *bp);
326 bp->b_rcred = NOCRED;
327 bp->b_wcred = NOCRED;
328 bp->b_data = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
329 bp->b_vnbufs.le_next = NOLIST;
330 return bp;
331}
332
333/*
334 * release a physical buffer
335 */
336void
337relpbuf(bp)
338 struct buf *bp;
339{
340 int s;
341
342 s = splbio();
343
344 if (bp->b_rcred != NOCRED) {
345 crfree(bp->b_rcred);
346 bp->b_rcred = NOCRED;
347 }
348 if (bp->b_wcred != NOCRED) {
349 crfree(bp->b_wcred);
350 bp->b_wcred = NOCRED;
351 }
352 if (bp->b_vp)
353 pbrelvp(bp);
354
355 if (bp->b_flags & B_WANTED)
356 wakeup(bp);
357
358 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
359
360 if (bswneeded) {
361 bswneeded = 0;
362 wakeup(&bswneeded);
363 }
364 splx(s);
365}