1/*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 *	File:	ipc/ipc_entry.c
60 *	Author:	Rich Draves
61 *	Date:	1989
62 *
63 *	Primitive functions to manipulate translation entries.
64 */
65
66#include <mach_debug.h>
67
68#include <mach/kern_return.h>
69#include <mach/port.h>
70#include <kern/assert.h>
71#include <kern/sched_prim.h>
72#include <kern/zalloc.h>
73#include <kern/misc_protos.h>
74#include <ipc/port.h>
75#include <ipc/ipc_entry.h>
76#include <ipc/ipc_space.h>
77#include <ipc/ipc_object.h>
78#include <ipc/ipc_hash.h>
79#include <ipc/ipc_table.h>
80#include <ipc/ipc_port.h>
81#include <string.h>
82
83/*
84 *	Routine:	ipc_entry_lookup
85 *	Purpose:
86 *		Searches for an entry, given its name.
87 *	Conditions:
88 *		The space must be read or write locked throughout.
89 *		The space must be active.
90 */
91
92ipc_entry_t
93ipc_entry_lookup(
94	ipc_space_t		space,
95	mach_port_name_t	name)
96{
97	mach_port_index_t index;
98	ipc_entry_t entry;
99
100	assert(is_active(space));
101
102	index = MACH_PORT_INDEX(name);
103	if (index <  space->is_table_size) {
104                entry = &space->is_table[index];
105		if (IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name) ||
106		    IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
107			entry = IE_NULL;
108	}
109	else {
110		entry = IE_NULL;
111	}
112
113	assert((entry == IE_NULL) || IE_BITS_TYPE(entry->ie_bits));
114	return entry;
115}
116
117
118/*
119 *	Routine:	ipc_entries_hold
120 *	Purpose:
121 *		Verifies that there are at least 'entries_needed'
122 *		free list members
123 *	Conditions:
124 *		The space is write-locked and active throughout.
125 *		An object may be locked.  Will not allocate memory.
126 *	Returns:
127 *		KERN_SUCCESS		Free entries were found.
128 *		KERN_NO_SPACE		No entry allocated.
129 */
130
131kern_return_t
132ipc_entries_hold(
133	ipc_space_t		space,
134	uint32_t		entries_needed)
135{
136
137	ipc_entry_t table;
138	mach_port_index_t next_free = 0;
139	uint32_t i;
140
141	assert(is_active(space));
142
143	table = &space->is_table[0];
144
145	for (i = 0; i < entries_needed; i++) {
146		next_free = table[next_free].ie_next;
147		if (next_free == 0) {
148			return KERN_NO_SPACE;
149		}
150		assert(next_free < space->is_table_size);
151		assert(table[next_free].ie_object == IO_NULL);
152	}
153	return KERN_SUCCESS;
154}
155
156/*
157 *	Routine:	ipc_entry_claim
158 *	Purpose:
159 *		Take formal ownership of a held entry.
160 *	Conditions:
161 *		The space is write-locked and active throughout.
162 *		An object may be locked.  Will not allocate memory.
163 *
164 * 	Note: The returned entry must be marked as modified before
165 * 	      releasing the space lock
166 */
167
168kern_return_t
169ipc_entry_claim(
170	ipc_space_t		space,
171	mach_port_name_t	*namep,
172	ipc_entry_t		*entryp)
173{
174	ipc_entry_t entry;
175	ipc_entry_t table;
176	mach_port_index_t first_free;
177	mach_port_gen_t gen;
178	mach_port_name_t new_name;
179
180	table = &space->is_table[0];
181
182	first_free = table->ie_next;
183	assert(first_free != 0);
184
185	entry = &table[first_free];
186	table->ie_next = entry->ie_next;
187	space->is_table_free--;
188
189	assert(table->ie_next < space->is_table_size);
190
191	/*
192	 *	Initialize the new entry.  We need only
193	 *	increment the generation number and clear ie_request.
194	 */
195	gen = IE_BITS_NEW_GEN(entry->ie_bits);
196	entry->ie_bits = gen;
197	entry->ie_request = IE_REQ_NONE;
198
199	/*
200	 *	The new name can't be MACH_PORT_NULL because index
201	 *	is non-zero.  It can't be MACH_PORT_DEAD because
202	 *	the table isn't allowed to grow big enough.
203	 *	(See comment in ipc/ipc_table.h.)
204	 */
205	new_name = MACH_PORT_MAKE(first_free, gen);
206	assert(MACH_PORT_VALID(new_name));
207	*namep = new_name;
208	*entryp = entry;
209
210	return KERN_SUCCESS;
211}
212
213/*
214 *	Routine:	ipc_entry_get
215 *	Purpose:
216 *		Tries to allocate an entry out of the space.
217 *	Conditions:
218 *		The space is write-locked and active throughout.
219 *		An object may be locked.  Will not allocate memory.
220 *	Returns:
221 *		KERN_SUCCESS		A free entry was found.
222 *		KERN_NO_SPACE		No entry allocated.
223 */
224
225kern_return_t
226ipc_entry_get(
227	ipc_space_t		space,
228	mach_port_name_t	*namep,
229	ipc_entry_t		*entryp)
230{
231	kern_return_t kr;
232
233	kr = ipc_entries_hold(space, 1);
234	if (KERN_SUCCESS != kr)
235		return kr;
236
237	return ipc_entry_claim(space, namep, entryp);
238}
239
240/*
241 *	Routine:	ipc_entry_alloc
242 *	Purpose:
243 *		Allocate an entry out of the space.
244 *	Conditions:
245 *		The space is not locked before, but it is write-locked after
246 *		if the call is successful.  May allocate memory.
247 *	Returns:
248 *		KERN_SUCCESS		An entry was allocated.
249 *		KERN_INVALID_TASK	The space is dead.
250 *		KERN_NO_SPACE		No room for an entry in the space.
251 *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory for an entry.
252 */
253
254kern_return_t
255ipc_entry_alloc(
256	ipc_space_t		space,
257	mach_port_name_t	*namep,
258	ipc_entry_t		*entryp)
259{
260	kern_return_t kr;
261
262	is_write_lock(space);
263
264	for (;;) {
265		if (!is_active(space)) {
266			is_write_unlock(space);
267			return KERN_INVALID_TASK;
268		}
269
270		kr = ipc_entry_get(space, namep, entryp);
271		if (kr == KERN_SUCCESS)
272			return kr;
273
274		kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
275		if (kr != KERN_SUCCESS)
276			return kr; /* space is unlocked */
277	}
278}
279
280/*
281 *	Routine:	ipc_entry_alloc_name
282 *	Purpose:
283 *		Allocates/finds an entry with a specific name.
284 *		If an existing entry is returned, its type will be nonzero.
285 *	Conditions:
286 *		The space is not locked before, but it is write-locked after
287 *		if the call is successful.  May allocate memory.
288 *	Returns:
289 *		KERN_SUCCESS		Found existing entry with same name.
290 *		KERN_SUCCESS		Allocated a new entry.
291 *		KERN_INVALID_TASK	The space is dead.
292 *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
293 *		KERN_FAILURE		Couldn't allocate requested name.
294 */
295
296kern_return_t
297ipc_entry_alloc_name(
298	ipc_space_t		space,
299	mach_port_name_t	name,
300	ipc_entry_t		*entryp)
301{
302	mach_port_index_t index = MACH_PORT_INDEX(name);
303	mach_port_gen_t gen = MACH_PORT_GEN(name);
304
305	assert(MACH_PORT_VALID(name));
306
307
308	is_write_lock(space);
309
310	for (;;) {
311		ipc_entry_t entry;
312
313		if (!is_active(space)) {
314			is_write_unlock(space);
315			return KERN_INVALID_TASK;
316		}
317
318		/*
319		 *	If we are under the table cutoff,
320		 *	there are usually four cases:
321		 *		1) The entry is reserved (index 0)
322		 *		2) The entry is inuse, for the same name
323		 *		3) The entry is inuse, for a different name
324		 *		4) The entry is free
325		 *	For a task with a "fast" IPC space, we disallow
326		 *	cases 1) and 3), because ports cannot be renamed.
327		 */
328		if (index < space->is_table_size) {
329			ipc_entry_t table = space->is_table;
330
331			entry = &table[index];
332
333			if (index == 0) {
334				/* case #1 - the entry is reserved */
335				assert(!IE_BITS_TYPE(entry->ie_bits));
336				assert(!IE_BITS_GEN(entry->ie_bits));
337				is_write_unlock(space);
338				return KERN_FAILURE;
339			} else if (IE_BITS_TYPE(entry->ie_bits)) {
340				if (IE_BITS_GEN(entry->ie_bits) == gen) {
341					/* case #2 -- the entry is inuse, for the same name */
342					*entryp = entry;
343					return KERN_SUCCESS;
344				} else {
345					/* case #3 -- the entry is inuse, for a different name. */
346					/* Collisions are not allowed */
347					is_write_unlock(space);
348					return KERN_FAILURE;
349				}
350			} else {
351				mach_port_index_t free_index, next_index;
352
353				/*
354				 *      case #4 -- the entry is free
355				 *	Rip the entry out of the free list.
356				 */
357
358				for (free_index = 0;
359				     (next_index = table[free_index].ie_next)
360							!= index;
361				     free_index = next_index)
362					continue;
363
364				table[free_index].ie_next =
365					table[next_index].ie_next;
366				space->is_table_free--;
367
368				/* mark the previous entry modified - reconstructing the name */
369				ipc_entry_modified(space,
370						   MACH_PORT_MAKE(free_index,
371						   	IE_BITS_GEN(table[free_index].ie_bits)),
372						   &table[free_index]);
373
374				entry->ie_bits = gen;
375				entry->ie_request = IE_REQ_NONE;
376				*entryp = entry;
377
378				assert(entry->ie_object == IO_NULL);
379				return KERN_SUCCESS;
380			}
381		}
382
383		/*
384		 *      We grow the table so that the name
385		 *	index fits in the array space.
386		 *      Because the space will be unlocked,
387		 *      we must restart.
388		 */
389                kern_return_t kr;
390		kr = ipc_entry_grow_table(space, index + 1);
391		assert(kr != KERN_NO_SPACE);
392		if (kr != KERN_SUCCESS) {
393			/* space is unlocked */
394			return kr;
395		}
396		continue;
397	}
398}
399
400/*
401 *	Routine:	ipc_entry_dealloc
402 *	Purpose:
403 *		Deallocates an entry from a space.
404 *	Conditions:
405 *		The space must be write-locked throughout.
406 *		The space must be active.
407 */
408
409void
410ipc_entry_dealloc(
411	ipc_space_t		space,
412	mach_port_name_t	name,
413	ipc_entry_t		entry)
414{
415	ipc_entry_t table;
416	ipc_entry_num_t size;
417	mach_port_index_t index;
418
419	assert(is_active(space));
420	assert(entry->ie_object == IO_NULL);
421	assert(entry->ie_request == IE_REQ_NONE);
422
423#if 1
424	if (entry->ie_request != IE_REQ_NONE)
425		panic("ipc_entry_dealloc()\n");
426#endif
427
428	index = MACH_PORT_INDEX(name);
429	table = space->is_table;
430	size = space->is_table_size;
431
432	if ((index < size) && (entry == &table[index])) {
433		assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name));
434		entry->ie_bits &= IE_BITS_GEN_MASK;
435		entry->ie_next = table->ie_next;
436		table->ie_next = index;
437		space->is_table_free++;
438	} else {
439		/*
440		 * Nothing to do.  The entry does not match
441		 * so there is nothing to deallocate.
442		 */
443                assert(index < size);
444		assert(entry == &table[index]);
445		assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name));
446	}
447	ipc_entry_modified(space, name, entry);
448}
449
450/*
451 *	Routine:	ipc_entry_modified
452 *	Purpose:
453 *		Note that an entry was modified in a space.
454 *	Conditions:
455 *		Assumes exclusive write access to the space,
456 *		either through a write lock or being the cleaner
457 *		on an inactive space.
458 */
459
460void
461ipc_entry_modified(
462	ipc_space_t		space,
463	mach_port_name_t	name,
464	__assert_only ipc_entry_t entry)
465{
466	ipc_entry_t table;
467	ipc_entry_num_t size;
468	mach_port_index_t index;
469
470	index = MACH_PORT_INDEX(name);
471	table = space->is_table;
472	size = space->is_table_size;
473
474	assert(index < size);
475	assert(entry == &table[index]);
476
477	assert(space->is_low_mod <= size);
478	assert(space->is_high_mod < size);
479
480	if (index < space->is_low_mod)
481		space->is_low_mod = index;
482	if (index > space->is_high_mod)
483		space->is_high_mod = index;
484}
485
486#define IPC_ENTRY_GROW_STATS 1
487#if IPC_ENTRY_GROW_STATS
488static uint64_t ipc_entry_grow_count = 0;
489static uint64_t ipc_entry_grow_rescan = 0;
490static uint64_t ipc_entry_grow_rescan_max = 0;
491static uint64_t ipc_entry_grow_rescan_entries = 0;
492static uint64_t ipc_entry_grow_rescan_entries_max = 0;
493static uint64_t	ipc_entry_grow_freelist_entries = 0;
494static uint64_t	ipc_entry_grow_freelist_entries_max = 0;
495#endif
496
497/*
498 *	Routine:	ipc_entry_grow_table
499 *	Purpose:
500 *		Grows the table in a space.
501 *	Conditions:
502 *		The space must be write-locked and active before.
503 *		If successful, the space is also returned locked.
504 *		On failure, the space is returned unlocked.
505 *		Allocates memory.
506 *	Returns:
507 *		KERN_SUCCESS		Grew the table.
508 *		KERN_SUCCESS		Somebody else grew the table.
509 *		KERN_SUCCESS		The space died.
510 *		KERN_NO_SPACE		Table has maximum size already.
511 *		KERN_RESOURCE_SHORTAGE	Couldn't allocate a new table.
512 */
513
514kern_return_t
515ipc_entry_grow_table(
516	ipc_space_t		space,
517	ipc_table_elems_t	target_size)
518{
519	ipc_entry_num_t osize, size, nsize, psize;
520
521	ipc_entry_t otable, table;
522	ipc_table_size_t oits, its, nits;
523	mach_port_index_t i, free_index;
524	mach_port_index_t low_mod, hi_mod;
525	ipc_table_index_t sanity;
526#if IPC_ENTRY_GROW_STATS
527	uint64_t rescan_count = 0;
528#endif
529	assert(is_active(space));
530
531	if (is_growing(space)) {
532		/*
533		 *	Somebody else is growing the table.
534		 *	We just wait for them to finish.
535		 */
536
537		is_write_sleep(space);
538		return KERN_SUCCESS;
539	}
540
541	otable = space->is_table;
542
543	its = space->is_table_next;
544	size = its->its_size;
545
546	/*
547	 * Since is_table_next points to the next natural size
548	 * we can identify the current size entry.
549	 */
550	oits = its - 1;
551	osize = oits->its_size;
552
553	/*
554	 * If there is no target size, then the new size is simply
555	 * specified by is_table_next.  If there is a target
556	 * size, then search for the next entry.
557	 */
558	if (target_size != ITS_SIZE_NONE) {
559		if (target_size <= osize) {
560			/* the space is locked */
561			return KERN_SUCCESS;
562		}
563
564		psize = osize;
565		while ((psize != size) && (target_size > size)) {
566			psize = size;
567			its++;
568			size = its->its_size;
569		}
570		if (psize == size) {
571			is_write_unlock(space);
572			return KERN_NO_SPACE;
573		}
574	}
575
576	if (osize == size) {
577		is_write_unlock(space);
578		return KERN_NO_SPACE;
579	}
580
581	nits = its + 1;
582	nsize = nits->its_size;
583	assert((osize < size) && (size <= nsize));
584
585	/*
586	 * We'll attempt to grow the table.
587	 *
588	 * Because we will be copying without the space lock, reset
589	 * the lowest_mod index to just beyond the end of the current
590	 * table.  Modification of entries (other than hashes) will
591	 * bump this downward, and we only have to reprocess entries
592	 * above that mark.  Eventually, we'll get done.
593	 */
594	is_start_growing(space);
595	space->is_low_mod = osize;
596	space->is_high_mod = 0;
597#if IPC_ENTRY_GROW_STATS
598	ipc_entry_grow_count++;
599#endif
600	is_write_unlock(space);
601
602	table = it_entries_alloc(its);
603	if (table == IE_NULL) {
604		is_write_lock(space);
605		is_done_growing(space);
606		is_write_unlock(space);
607		thread_wakeup((event_t) space);
608		return KERN_RESOURCE_SHORTAGE;
609	}
610
611	/* initialize new entries (free chain in backwards order) */
612	for (i = osize; i < size; i++) {
613		table[i].ie_object = IO_NULL;
614		table[i].ie_bits = IE_BITS_GEN_MASK;
615		table[i].ie_index = 0;
616		table[i].ie_next = i + 1;
617	}
618	table[size-1].ie_next = 0;
619
620	/* clear out old entries in new table */
621	memset((void *)table, 0, osize * sizeof(*table));
622
623	low_mod = 0;
624	hi_mod = osize - 1;
625 rescan:
626	/*
627	 * Within the range of the table that changed, determine what we
628	 * have to take action on. For each entry, take a snapshot of the
629	 * corresponding entry in the old table (so it won't change
630	 * during this iteration). The snapshot may not be self-consistent
631	 * (if we caught it in the middle of being changed), so be very
632	 * cautious with the values.
633	 */
634	for (i = low_mod; i <= hi_mod; i++) {
635		ipc_entry_t entry = &table[i];
636		struct ipc_entry osnap = otable[i];
637
638		if (entry->ie_object != osnap.ie_object ||
639		    IE_BITS_TYPE(entry->ie_bits) != IE_BITS_TYPE(osnap.ie_bits)) {
640
641			if (entry->ie_object != IO_NULL &&
642			    IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND)
643				ipc_hash_table_delete(table, size, entry->ie_object, i, entry);
644
645			entry->ie_object = osnap.ie_object;
646			entry->ie_bits = osnap.ie_bits;
647			entry->ie_request = osnap.ie_request; /* or ie_next */
648
649			if (entry->ie_object != IO_NULL &&
650			    IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND)
651				ipc_hash_table_insert(table, size, entry->ie_object, i, entry);
652		} else {
653			assert(entry->ie_object == osnap.ie_object);
654			entry->ie_bits = osnap.ie_bits;
655			entry->ie_request = osnap.ie_request; /* or ie_next */
656		}
657
658	}
659	table[0].ie_next = otable[0].ie_next;  /* always rebase the freelist */
660
661	/*
662	 * find the end of the freelist (should be short). But be careful,
663	 * the list items can change so only follow through truly free entries
664	 * (no problem stopping short in those cases, because we'll rescan).
665	 */
666	free_index = 0;
667	for (sanity = 0; sanity < osize; sanity++) {
668		if (table[free_index].ie_object != IPC_OBJECT_NULL)
669			break;
670		i = table[free_index].ie_next;
671		if (i == 0 || i >= osize)
672			break;
673		free_index = i;
674	}
675#if IPC_ENTRY_GROW_STATS
676	ipc_entry_grow_freelist_entries += sanity;
677	if (sanity > ipc_entry_grow_freelist_entries_max)
678		ipc_entry_grow_freelist_entries_max = sanity;
679#endif
680
681	is_write_lock(space);
682
683	/*
684	 *	We need to do a wakeup on the space,
685	 *	to rouse waiting threads.  We defer
686	 *	this until the space is unlocked,
687	 *	because we don't want them to spin.
688	 */
689
690	if (!is_active(space)) {
691		/*
692		 *	The space died while it was unlocked.
693		 */
694
695		is_done_growing(space);
696		is_write_unlock(space);
697		thread_wakeup((event_t) space);
698		it_entries_free(its, table);
699		is_write_lock(space);
700		return KERN_SUCCESS;
701	}
702
703	/* If the space changed while unlocked, go back and process the changes */
704	if (space->is_low_mod < osize) {
705		assert(space->is_high_mod > 0);
706		low_mod = space->is_low_mod;
707		space->is_low_mod = osize;
708		hi_mod = space->is_high_mod;
709		space->is_high_mod = 0;
710		is_write_unlock(space);
711#if IPC_ENTRY_GROW_STATS
712		rescan_count++;
713		if (rescan_count > ipc_entry_grow_rescan_max)
714			ipc_entry_grow_rescan_max = rescan_count;
715
716		ipc_entry_grow_rescan++;
717		ipc_entry_grow_rescan_entries += hi_mod - low_mod + 1;
718		if (hi_mod - low_mod + 1 > ipc_entry_grow_rescan_entries_max)
719			ipc_entry_grow_rescan_entries_max = hi_mod - low_mod + 1;
720#endif
721		goto rescan;
722	}
723
724	/* link new free entries onto the rest of the freelist */
725	assert(table[free_index].ie_next == 0 &&
726	       table[free_index].ie_object == IO_NULL);
727	table[free_index].ie_next = osize;
728
729	assert(space->is_table == otable);
730	assert((space->is_table_next == its) ||
731	    (target_size != ITS_SIZE_NONE));
732	assert(space->is_table_size == osize);
733
734	space->is_table = table;
735	space->is_table_size = size;
736	space->is_table_next = nits;
737	space->is_table_free += size - osize;
738
739	is_done_growing(space);
740	is_write_unlock(space);
741
742	thread_wakeup((event_t) space);
743
744	/*
745	 *	Now we need to free the old table.
746	 */
747	it_entries_free(oits, otable);
748	is_write_lock(space);
749
750	return KERN_SUCCESS;
751}
752