1/*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*
30 * Copyright (c) 1982, 1986, 1991, 1993
31 *	The Regents of the University of California.  All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 *    notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 *    notice, this list of conditions and the following disclaimer in the
45 *    documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 *    must display the following acknowledgement:
48 *	This product includes software developed by the University of
49 *	California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 *    may be used to endorse or promote products derived from this software
52 *    without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 *	@(#)kern_subr.c	8.3 (Berkeley) 1/21/94
67 */
68
69#include <sys/param.h>
70#include <sys/systm.h>
71#include <sys/proc_internal.h>
72#include <sys/malloc.h>
73#include <sys/queue.h>
74#include <vm/pmap.h>
75#include <sys/uio_internal.h>
76#include <kern/kalloc.h>
77
78#include <kdebug.h>
79
80#include <sys/kdebug.h>
81#define DBG_UIO_COPYOUT 16
82#define DBG_UIO_COPYIN  17
83
84#if DEBUG
85#include <kern/simple_lock.h>
86
87static uint32_t				uio_t_count = 0;
88#endif /* DEBUG */
89
90#define IS_VALID_UIO_SEGFLG(segflg)  \
91	( (segflg) == UIO_USERSPACE || \
92	  (segflg) == UIO_SYSSPACE || \
93	  (segflg) == UIO_USERSPACE32 || \
94	  (segflg) == UIO_USERSPACE64 || \
95	  (segflg) == UIO_SYSSPACE32 || \
96	  (segflg) == UIO_USERISPACE || \
97	  (segflg) == UIO_PHYS_USERSPACE || \
98	  (segflg) == UIO_PHYS_SYSSPACE || \
99	  (segflg) == UIO_USERISPACE32 || \
100	  (segflg) == UIO_PHYS_USERSPACE32 || \
101	  (segflg) == UIO_USERISPACE64 || \
102	  (segflg) == UIO_PHYS_USERSPACE64 )
103
104/*
105 * Returns:	0			Success
106 *	uiomove64:EFAULT
107 *
108 * Notes:	The first argument should be a caddr_t, but const poisoning
109 *		for typedef'ed types doesn't work in gcc.
110 */
111int
112uiomove(const char * cp, int n, uio_t uio)
113{
114	return uiomove64((const addr64_t)(uintptr_t)cp, n, uio);
115}
116
117/*
118 * Returns:	0			Success
119 *		EFAULT
120 *	copyout:EFAULT
121 *	copyin:EFAULT
122 *	copywithin:EFAULT
123 *	copypv:EFAULT
124 */
125int
126uiomove64(const addr64_t c_cp, int n, struct uio *uio)
127{
128	addr64_t cp = c_cp;
129	uint64_t acnt;
130	int error = 0;
131
132#if DIAGNOSTIC
133	if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE)
134		panic("uiomove: mode");
135#endif
136
137#if LP64_DEBUG
138	if (IS_VALID_UIO_SEGFLG(uio->uio_segflg) == 0) {
139		panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__);
140	}
141#endif /* LP64_DEBUG */
142
143	while (n > 0 && uio_resid(uio)) {
144		uio_update(uio, 0);
145		acnt = uio_curriovlen(uio);
146		if (acnt == 0) {
147			continue;
148		}
149		if (n > 0 && acnt > (uint64_t)n)
150			acnt = n;
151
152		switch ((int) uio->uio_segflg) {
153
154		case UIO_USERSPACE64:
155		case UIO_USERISPACE64:
156		case UIO_USERSPACE32:
157		case UIO_USERISPACE32:
158		case UIO_USERSPACE:
159		case UIO_USERISPACE:
160			// LP64 - 3rd argument in debug code is 64 bit, expected to be 32 bit
161			if (uio->uio_rw == UIO_READ)
162			  {
163			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
164					 (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 0,0);
165
166					error = copyout( CAST_DOWN(caddr_t, cp), uio->uio_iovs.uiovp->iov_base, acnt );
167
168			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
169					 (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 0,0);
170			  }
171			else
172			  {
173			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
174					 (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 0,0);
175
176			        error = copyin(uio->uio_iovs.uiovp->iov_base, CAST_DOWN(caddr_t, cp), acnt);
177
178			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
179					 (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 0,0);
180			  }
181			if (error)
182				return (error);
183			break;
184
185		case UIO_SYSSPACE32:
186		case UIO_SYSSPACE:
187			if (uio->uio_rw == UIO_READ)
188				error = copywithin(CAST_DOWN(caddr_t, cp), CAST_DOWN(caddr_t, uio->uio_iovs.kiovp->iov_base),
189						   acnt);
190			else
191				error = copywithin(CAST_DOWN(caddr_t, uio->uio_iovs.kiovp->iov_base), CAST_DOWN(caddr_t, cp),
192						   acnt);
193			break;
194
195		case UIO_PHYS_USERSPACE64:
196		case UIO_PHYS_USERSPACE32:
197		case UIO_PHYS_USERSPACE:
198			if (uio->uio_rw == UIO_READ)
199			  {
200			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
201					 (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 1,0);
202
203				error = copypv((addr64_t)cp, uio->uio_iovs.uiovp->iov_base, acnt, cppvPsrc | cppvNoRefSrc);
204				if (error) 	/* Copy physical to virtual */
205				        error = EFAULT;
206
207			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
208					 (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 1,0);
209			  }
210			else
211			  {
212			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
213					 (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 1,0);
214
215				error = copypv(uio->uio_iovs.uiovp->iov_base, (addr64_t)cp, acnt, cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
216				if (error)	/* Copy virtual to physical */
217				        error = EFAULT;
218
219			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
220					 (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 1,0);
221			  }
222			if (error)
223				return (error);
224			break;
225
226		case UIO_PHYS_SYSSPACE:
227			if (uio->uio_rw == UIO_READ)
228			  {
229			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
230					 (int)cp, (uintptr_t)uio->uio_iovs.kiovp->iov_base, acnt, 2,0);
231
232					error = copypv((addr64_t)cp, uio->uio_iovs.kiovp->iov_base, acnt, cppvKmap | cppvPsrc | cppvNoRefSrc);
233				if (error) 	/* Copy physical to virtual */
234				        error = EFAULT;
235
236			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
237					 (int)cp, (uintptr_t)uio->uio_iovs.kiovp->iov_base, acnt, 2,0);
238			  }
239			else
240			  {
241			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
242					 (uintptr_t)uio->uio_iovs.kiovp->iov_base, (int)cp, acnt, 2,0);
243
244					error = copypv(uio->uio_iovs.kiovp->iov_base, (addr64_t)cp, acnt, cppvKmap | cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
245				if (error)	/* Copy virtual to physical */
246				        error = EFAULT;
247
248			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
249					 (uintptr_t)uio->uio_iovs.kiovp->iov_base, (int)cp, acnt, 2,0);
250			  }
251			if (error)
252				return (error);
253			break;
254
255		default:
256			break;
257		}
258		uio_update(uio, acnt);
259		cp += acnt;
260		n -= acnt;
261	}
262	return (error);
263}
264
265/*
266 * Give next character to user as result of read.
267 */
268int
269ureadc(int c, struct uio *uio)
270{
271	if (uio_resid(uio) <= 0)
272		panic("ureadc: non-positive resid");
273	uio_update(uio, 0);
274	if (uio->uio_iovcnt == 0)
275		panic("ureadc: non-positive iovcnt");
276	if (uio_curriovlen(uio) <= 0)
277		panic("ureadc: non-positive iovlen");
278
279	switch ((int) uio->uio_segflg) {
280
281	case UIO_USERSPACE32:
282	case UIO_USERSPACE:
283	case UIO_USERISPACE32:
284	case UIO_USERISPACE:
285	case UIO_USERSPACE64:
286	case UIO_USERISPACE64:
287		if (subyte((user_addr_t)uio->uio_iovs.uiovp->iov_base, c) < 0)
288			return (EFAULT);
289		break;
290
291	case UIO_SYSSPACE32:
292	case UIO_SYSSPACE:
293		*(CAST_DOWN(caddr_t, uio->uio_iovs.kiovp->iov_base)) = c;
294		break;
295
296	default:
297		break;
298	}
299	uio_update(uio, 1);
300	return (0);
301}
302
303/*
304 * General routine to allocate a hash table.
305 */
306void *
307hashinit(int elements, int type, u_long *hashmask)
308{
309	long hashsize;
310	LIST_HEAD(generic, generic) *hashtbl;
311	int i;
312
313	if (elements <= 0)
314		panic("hashinit: bad cnt");
315	for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
316		continue;
317	hashsize >>= 1;
318	MALLOC(hashtbl, struct generic *,
319		hashsize * sizeof(*hashtbl), type, M_WAITOK|M_ZERO);
320	if (hashtbl != NULL) {
321		for (i = 0; i < hashsize; i++)
322			LIST_INIT(&hashtbl[i]);
323		*hashmask = hashsize - 1;
324	}
325	return (hashtbl);
326}
327
328/*
329 * uio_resid - return the residual IO value for the given uio_t
330 */
331user_ssize_t uio_resid( uio_t a_uio )
332{
333#if DEBUG
334	if (a_uio == NULL) {
335		printf("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
336	}
337/* 	if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
338/* 		panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__);  */
339/* 	} */
340#endif /* DEBUG */
341
342	/* return 0 if there are no active iovecs */
343	if (a_uio == NULL) {
344		return( 0 );
345	}
346
347	return( a_uio->uio_resid_64 );
348}
349
350/*
351 * uio_setresid - set the residual IO value for the given uio_t
352 */
353void uio_setresid( uio_t a_uio, user_ssize_t a_value )
354{
355#if DEBUG
356	if (a_uio == NULL) {
357		panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
358	}
359/* 	if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
360/* 		panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__);  */
361/* 	} */
362#endif /* DEBUG */
363
364	if (a_uio == NULL) {
365		return;
366	}
367
368	a_uio->uio_resid_64 = a_value;
369	return;
370}
371
372/*
373 * uio_curriovbase - return the base address of the current iovec associated
374 *	with the given uio_t.  May return 0.
375 */
376user_addr_t uio_curriovbase( uio_t a_uio )
377{
378#if LP64_DEBUG
379	if (a_uio == NULL) {
380		panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
381	}
382#endif /* LP64_DEBUG */
383
384	if (a_uio == NULL || a_uio->uio_iovcnt < 1) {
385		return(0);
386	}
387
388	if (UIO_IS_USER_SPACE(a_uio)) {
389		return(a_uio->uio_iovs.uiovp->iov_base);
390	}
391	return((user_addr_t)a_uio->uio_iovs.kiovp->iov_base);
392
393}
394
395/*
396 * uio_curriovlen - return the length value of the current iovec associated
397 *	with the given uio_t.
398 */
399user_size_t uio_curriovlen( uio_t a_uio )
400{
401#if LP64_DEBUG
402	if (a_uio == NULL) {
403		panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
404	}
405#endif /* LP64_DEBUG */
406
407	if (a_uio == NULL || a_uio->uio_iovcnt < 1) {
408		return(0);
409	}
410
411	if (UIO_IS_USER_SPACE(a_uio)) {
412		return(a_uio->uio_iovs.uiovp->iov_len);
413	}
414	return((user_size_t)a_uio->uio_iovs.kiovp->iov_len);
415}
416
417/*
418 * uio_setcurriovlen - set the length value of the current iovec associated
419 *	with the given uio_t.
420 */
421__private_extern__ void uio_setcurriovlen( uio_t a_uio, user_size_t a_value )
422{
423#if LP64_DEBUG
424	if (a_uio == NULL) {
425		panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
426	}
427#endif /* LP64_DEBUG */
428
429	if (a_uio == NULL) {
430		return;
431	}
432
433	if (UIO_IS_USER_SPACE(a_uio)) {
434		a_uio->uio_iovs.uiovp->iov_len = a_value;
435	}
436	else {
437#if LP64_DEBUG
438		if (a_value > 0xFFFFFFFFull) {
439			panic("%s :%d - invalid a_value\n", __FILE__, __LINE__);
440		}
441#endif /* LP64_DEBUG */
442		a_uio->uio_iovs.kiovp->iov_len = (size_t)a_value;
443	}
444	return;
445}
446
447/*
448 * uio_iovcnt - return count of active iovecs for the given uio_t
449 */
450int uio_iovcnt( uio_t a_uio )
451{
452#if LP64_DEBUG
453	if (a_uio == NULL) {
454		panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
455	}
456#endif /* LP64_DEBUG */
457
458	if (a_uio == NULL) {
459		return(0);
460	}
461
462	return( a_uio->uio_iovcnt );
463}
464
465/*
466 * uio_offset - return the current offset value for the given uio_t
467 */
468off_t uio_offset( uio_t a_uio )
469{
470#if LP64_DEBUG
471	if (a_uio == NULL) {
472		panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
473	}
474#endif /* LP64_DEBUG */
475
476	if (a_uio == NULL) {
477		return(0);
478	}
479	return( a_uio->uio_offset );
480}
481
482/*
483 * uio_setoffset - set the current offset value for the given uio_t
484 */
485void uio_setoffset( uio_t a_uio, off_t a_offset )
486{
487#if LP64_DEBUG
488	if (a_uio == NULL) {
489		panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
490	}
491#endif /* LP64_DEBUG */
492
493	if (a_uio == NULL) {
494		return;
495	}
496	a_uio->uio_offset = a_offset;
497	return;
498}
499
500/*
501 * uio_rw - return the read / write flag for the given uio_t
502 */
503int uio_rw( uio_t a_uio )
504{
505#if LP64_DEBUG
506	if (a_uio == NULL) {
507		panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
508	}
509#endif /* LP64_DEBUG */
510
511	if (a_uio == NULL) {
512		return(-1);
513	}
514	return( a_uio->uio_rw );
515}
516
517/*
518 * uio_setrw - set the read / write flag for the given uio_t
519 */
520void uio_setrw( uio_t a_uio, int a_value )
521{
522	if (a_uio == NULL) {
523#if LP64_DEBUG
524	panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
525#endif /* LP64_DEBUG */
526		return;
527	}
528
529#if LP64_DEBUG
530	if (!(a_value == UIO_READ || a_value == UIO_WRITE)) {
531		panic("%s :%d - invalid a_value\n", __FILE__, __LINE__);
532	}
533#endif /* LP64_DEBUG */
534
535	if (a_value == UIO_READ || a_value == UIO_WRITE) {
536		a_uio->uio_rw = a_value;
537	}
538	return;
539}
540
541/*
542 * uio_isuserspace - return non zero value if the address space
543 * flag is for a user address space (could be 32 or 64 bit).
544 */
545int uio_isuserspace( uio_t a_uio )
546{
547	if (a_uio == NULL) {
548#if LP64_DEBUG
549		panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
550#endif /* LP64_DEBUG */
551		return(0);
552	}
553
554	if (UIO_SEG_IS_USER_SPACE(a_uio->uio_segflg)) {
555		return( 1 );
556	}
557	return( 0 );
558}
559
560
561/*
562 * uio_create - create an uio_t.
563 * 	Space is allocated to hold up to a_iovcount number of iovecs.  The uio_t
564 *	is not fully initialized until all iovecs are added using uio_addiov calls.
565 *	a_iovcount is the maximum number of iovecs you may add.
566 */
567uio_t uio_create( int a_iovcount,		/* number of iovecs */
568				  off_t a_offset,		/* current offset */
569				  int a_spacetype,		/* type of address space */
570				  int a_iodirection )	/* read or write flag */
571{
572	void *				my_buf_p;
573	size_t				my_size;
574	uio_t				my_uio;
575
576	my_size = UIO_SIZEOF(a_iovcount);
577	my_buf_p = kalloc(my_size);
578	my_uio = uio_createwithbuffer( a_iovcount,
579									 a_offset,
580									 a_spacetype,
581									 a_iodirection,
582									 my_buf_p,
583									 my_size );
584	if (my_uio != 0) {
585		/* leave a note that we allocated this uio_t */
586		my_uio->uio_flags |= UIO_FLAGS_WE_ALLOCED;
587#if DEBUG
588		(void)hw_atomic_add(&uio_t_count, 1);
589#endif
590	}
591
592	return( my_uio );
593}
594
595
596/*
597 * uio_createwithbuffer - create an uio_t.
598 * 	Create a uio_t using the given buffer.  The uio_t
599 *	is not fully initialized until all iovecs are added using uio_addiov calls.
600 *	a_iovcount is the maximum number of iovecs you may add.
601 *	This call may fail if the given buffer is not large enough.
602 */
603__private_extern__ uio_t
604	uio_createwithbuffer( int a_iovcount,		/* number of iovecs */
605				  			off_t a_offset,		/* current offset */
606				  			int a_spacetype,	/* type of address space */
607				 			int a_iodirection,	/* read or write flag */
608				 			void *a_buf_p,		/* pointer to a uio_t buffer */
609				 			size_t a_buffer_size )	/* size of uio_t buffer */
610{
611	uio_t				my_uio = (uio_t) a_buf_p;
612	size_t				my_size;
613
614	my_size = UIO_SIZEOF(a_iovcount);
615	if (a_buffer_size < my_size) {
616#if DEBUG
617		panic("%s :%d - a_buffer_size is too small\n", __FILE__, __LINE__);
618#endif /* DEBUG */
619		return( NULL );
620	}
621	my_size = a_buffer_size;
622
623#if DEBUG
624	if (my_uio == 0) {
625		panic("%s :%d - could not allocate uio_t\n", __FILE__, __LINE__);
626	}
627	if (!IS_VALID_UIO_SEGFLG(a_spacetype)) {
628		panic("%s :%d - invalid address space type\n", __FILE__, __LINE__);
629	}
630	if (!(a_iodirection == UIO_READ || a_iodirection == UIO_WRITE)) {
631		panic("%s :%d - invalid IO direction flag\n", __FILE__, __LINE__);
632	}
633	if (a_iovcount > UIO_MAXIOV) {
634		panic("%s :%d - invalid a_iovcount\n", __FILE__, __LINE__);
635	}
636#endif /* DEBUG */
637
638	bzero(my_uio, my_size);
639	my_uio->uio_size = my_size;
640
641	/*
642	 * we use uio_segflg to indicate if the uio_t is the new format or
643	 * old (pre LP64 support) legacy format
644	 * This switch statement should canonicalize incoming space type
645	 * to one of UIO_USERSPACE32/64, UIO_PHYS_USERSPACE32/64, or
646	 * UIO_SYSSPACE/UIO_PHYS_SYSSPACE
647	 */
648	switch (a_spacetype) {
649	case UIO_USERSPACE:
650		my_uio->uio_segflg = UIO_USERSPACE32;
651		break;
652	case UIO_SYSSPACE32:
653		my_uio->uio_segflg = UIO_SYSSPACE;
654		break;
655	case UIO_PHYS_USERSPACE:
656		my_uio->uio_segflg = UIO_PHYS_USERSPACE32;
657		break;
658	default:
659		my_uio->uio_segflg = a_spacetype;
660		break;
661	}
662
663	if (a_iovcount > 0) {
664		my_uio->uio_iovs.uiovp = (struct user_iovec *)
665			(((uint8_t *)my_uio) + sizeof(struct uio));
666	}
667	else {
668		my_uio->uio_iovs.uiovp = NULL;
669	}
670
671	my_uio->uio_max_iovs = a_iovcount;
672	my_uio->uio_offset = a_offset;
673	my_uio->uio_rw = a_iodirection;
674	my_uio->uio_flags = UIO_FLAGS_INITED;
675
676	return( my_uio );
677}
678
679/*
680 * uio_spacetype - return the address space type for the given uio_t
681 */
682__private_extern__ int uio_spacetype( uio_t a_uio )
683{
684	if (a_uio == NULL) {
685#if LP64_DEBUG
686		panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
687#endif /* LP64_DEBUG */
688		return(-1);
689	}
690
691	return( a_uio->uio_segflg );
692}
693
694/*
695 * uio_iovsaddr - get the address of the iovec array for the given uio_t.
696 * This returns the location of the iovecs within the uio.
697 * NOTE - for compatibility mode we just return the current value in uio_iovs
698 * which will increase as the IO is completed and is NOT embedded within the
699 * uio, it is a seperate array of one or more iovecs.
700 */
701__private_extern__ struct user_iovec * uio_iovsaddr( uio_t a_uio )
702{
703	struct user_iovec *		my_addr;
704
705	if (a_uio == NULL) {
706		return(NULL);
707	}
708
709	if (UIO_SEG_IS_USER_SPACE(a_uio->uio_segflg)) {
710		/* we need this for compatibility mode. */
711		my_addr = (struct user_iovec *) a_uio->uio_iovs.uiovp;
712	}
713	else {
714#if DEBUG
715		panic("uio_iovsaddr called for UIO_SYSSPACE request");
716#endif
717		my_addr = 0;
718	}
719	return(my_addr);
720}
721
722/*
723 * uio_reset - reset an uio_t.
724 * 	Reset the given uio_t to initial values.  The uio_t is not fully initialized
725 * 	until all iovecs are added using uio_addiov calls.
726 *	The a_iovcount value passed in the uio_create is the maximum number of
727 *	iovecs you may add.
728 */
729void uio_reset( uio_t a_uio,
730				off_t a_offset,			/* current offset */
731				int a_spacetype,		/* type of address space */
732				int a_iodirection )		/* read or write flag */
733{
734	vm_size_t	my_size;
735	int			my_max_iovs;
736	u_int32_t	my_old_flags;
737
738#if LP64_DEBUG
739	if (a_uio == NULL) {
740		panic("%s :%d - could not allocate uio_t\n", __FILE__, __LINE__);
741	}
742	if (!IS_VALID_UIO_SEGFLG(a_spacetype)) {
743		panic("%s :%d - invalid address space type\n", __FILE__, __LINE__);
744	}
745	if (!(a_iodirection == UIO_READ || a_iodirection == UIO_WRITE)) {
746		panic("%s :%d - invalid IO direction flag\n", __FILE__, __LINE__);
747	}
748#endif /* LP64_DEBUG */
749
750	if (a_uio == NULL) {
751		return;
752	}
753
754	my_size = a_uio->uio_size;
755	my_old_flags = a_uio->uio_flags;
756	my_max_iovs = a_uio->uio_max_iovs;
757	bzero(a_uio, my_size);
758	a_uio->uio_size = my_size;
759
760	/*
761	 * we use uio_segflg to indicate if the uio_t is the new format or
762	 * old (pre LP64 support) legacy format
763	 * This switch statement should canonicalize incoming space type
764	 * to one of UIO_USERSPACE32/64, UIO_PHYS_USERSPACE32/64, or
765	 * UIO_SYSSPACE/UIO_PHYS_SYSSPACE
766	 */
767	switch (a_spacetype) {
768	case UIO_USERSPACE:
769		a_uio->uio_segflg = UIO_USERSPACE32;
770		break;
771	case UIO_SYSSPACE32:
772		a_uio->uio_segflg = UIO_SYSSPACE;
773		break;
774	case UIO_PHYS_USERSPACE:
775		a_uio->uio_segflg = UIO_PHYS_USERSPACE32;
776		break;
777	default:
778		a_uio->uio_segflg = a_spacetype;
779		break;
780	}
781
782	if (my_max_iovs > 0) {
783		a_uio->uio_iovs.uiovp = (struct user_iovec *)
784			(((uint8_t *)a_uio) + sizeof(struct uio));
785	}
786	else {
787		a_uio->uio_iovs.uiovp = NULL;
788	}
789
790	a_uio->uio_max_iovs = my_max_iovs;
791	a_uio->uio_offset = a_offset;
792	a_uio->uio_rw = a_iodirection;
793	a_uio->uio_flags = my_old_flags;
794
795	return;
796}
797
798/*
799 * uio_free - free a uio_t allocated via uio_init.  this also frees all
800 * 	associated iovecs.
801 */
802void uio_free( uio_t a_uio )
803{
804#if DEBUG
805	if (a_uio == NULL) {
806		panic("%s :%d - passing NULL uio_t\n", __FILE__, __LINE__);
807	}
808#endif /* LP64_DEBUG */
809
810	if (a_uio != NULL && (a_uio->uio_flags & UIO_FLAGS_WE_ALLOCED) != 0) {
811#if DEBUG
812		if (hw_atomic_sub(&uio_t_count, 1) == UINT_MAX)
813			panic("%s :%d - uio_t_count underflow\n", __FILE__, __LINE__);
814#endif
815		kfree(a_uio, a_uio->uio_size);
816	}
817
818
819}
820
821/*
822 * uio_addiov - add an iovec to the given uio_t.  You may call this up to
823 * 	the a_iovcount number that was passed to uio_create.  This call will
824 * 	increment the residual IO count as iovecs are added to the uio_t.
825 *	returns 0 if add was successful else non zero.
826 */
827int uio_addiov( uio_t a_uio, user_addr_t a_baseaddr, user_size_t a_length )
828{
829	int			i;
830
831	if (a_uio == NULL) {
832#if DEBUG
833		panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
834#endif /* LP64_DEBUG */
835		return(-1);
836	}
837
838	if (UIO_IS_USER_SPACE(a_uio)) {
839		for ( i = 0; i < a_uio->uio_max_iovs; i++ ) {
840			if (a_uio->uio_iovs.uiovp[i].iov_len == 0 && a_uio->uio_iovs.uiovp[i].iov_base == 0) {
841				a_uio->uio_iovs.uiovp[i].iov_len = a_length;
842				a_uio->uio_iovs.uiovp[i].iov_base = a_baseaddr;
843				a_uio->uio_iovcnt++;
844				a_uio->uio_resid_64 += a_length;
845				return( 0 );
846			}
847		}
848	}
849	else {
850		for ( i = 0; i < a_uio->uio_max_iovs; i++ ) {
851			if (a_uio->uio_iovs.kiovp[i].iov_len == 0 && a_uio->uio_iovs.kiovp[i].iov_base == 0) {
852				a_uio->uio_iovs.kiovp[i].iov_len = (u_int64_t)a_length;
853				a_uio->uio_iovs.kiovp[i].iov_base = (u_int64_t)a_baseaddr;
854				a_uio->uio_iovcnt++;
855				a_uio->uio_resid_64 += a_length;
856				return( 0 );
857			}
858		}
859	}
860
861	return( -1 );
862}
863
864/*
865 * uio_getiov - get iovec data associated with the given uio_t.  Use
866 *  a_index to iterate over each iovec (0 to (uio_iovcnt(uio_t) - 1)).
867 *  a_baseaddr_p and a_length_p may be NULL.
868 * 	returns -1 when a_index is >= uio_t.uio_iovcnt or invalid uio_t.
869 *	returns 0 when data is returned.
870 */
871int uio_getiov( uio_t a_uio,
872                 int a_index,
873                 user_addr_t * a_baseaddr_p,
874                 user_size_t * a_length_p )
875{
876	if (a_uio == NULL) {
877#if DEBUG
878		panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
879#endif /* DEBUG */
880		return(-1);
881	}
882    if ( a_index < 0 || a_index >= a_uio->uio_iovcnt) {
883		return(-1);
884    }
885
886	if (UIO_IS_USER_SPACE(a_uio)) {
887        if (a_baseaddr_p != NULL) {
888            *a_baseaddr_p = a_uio->uio_iovs.uiovp[a_index].iov_base;
889        }
890        if (a_length_p != NULL) {
891            *a_length_p = a_uio->uio_iovs.uiovp[a_index].iov_len;
892        }
893	}
894	else {
895        if (a_baseaddr_p != NULL) {
896            *a_baseaddr_p = a_uio->uio_iovs.kiovp[a_index].iov_base;
897        }
898        if (a_length_p != NULL) {
899            *a_length_p = a_uio->uio_iovs.kiovp[a_index].iov_len;
900        }
901	}
902
903    return( 0 );
904}
905
906/*
907 * uio_calculateresid - runs through all iovecs associated with this
908 *	uio_t and calculates (and sets) the residual IO count.
909 */
910__private_extern__ int uio_calculateresid( uio_t a_uio )
911{
912	int			i;
913	u_int64_t		resid = 0;
914
915	if (a_uio == NULL) {
916#if LP64_DEBUG
917		panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
918#endif /* LP64_DEBUG */
919		return EINVAL;
920	}
921
922	a_uio->uio_iovcnt = a_uio->uio_max_iovs;
923	if (UIO_IS_USER_SPACE(a_uio)) {
924		a_uio->uio_resid_64 = 0;
925		for ( i = 0; i < a_uio->uio_max_iovs; i++ ) {
926			if (a_uio->uio_iovs.uiovp[i].iov_len != 0 && a_uio->uio_iovs.uiovp[i].iov_base != 0) {
927				if (a_uio->uio_iovs.uiovp[i].iov_len > LONG_MAX)
928					return EINVAL;
929				resid += a_uio->uio_iovs.uiovp[i].iov_len;
930				if (resid > LONG_MAX)
931					return EINVAL;
932			}
933		}
934		a_uio->uio_resid_64 = resid;
935
936		/* position to first non zero length iovec (4235922) */
937		while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.uiovp->iov_len == 0) {
938			a_uio->uio_iovcnt--;
939			if (a_uio->uio_iovcnt > 0) {
940				a_uio->uio_iovs.uiovp++;
941			}
942		}
943	}
944	else {
945		a_uio->uio_resid_64 = 0;
946		for ( i = 0; i < a_uio->uio_max_iovs; i++ ) {
947			if (a_uio->uio_iovs.kiovp[i].iov_len != 0 && a_uio->uio_iovs.kiovp[i].iov_base != 0) {
948				if (a_uio->uio_iovs.kiovp[i].iov_len > LONG_MAX)
949					return EINVAL;
950				resid += a_uio->uio_iovs.kiovp[i].iov_len;
951				if (resid > LONG_MAX)
952					return EINVAL;
953			}
954		}
955		a_uio->uio_resid_64 = resid;
956
957		/* position to first non zero length iovec (4235922) */
958		while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.kiovp->iov_len == 0) {
959			a_uio->uio_iovcnt--;
960			if (a_uio->uio_iovcnt > 0) {
961				a_uio->uio_iovs.kiovp++;
962			}
963		}
964	}
965
966	return 0;
967}
968
969/*
970 * uio_update - update the given uio_t for a_count of completed IO.
971 *	This call decrements the current iovec length and residual IO value
972 *	and increments the current iovec base address and offset value.
973 *	If the current iovec length is 0 then advance to the next
974 *	iovec (if any).
975 * 	If the a_count passed in is 0, than only do the advancement
976 *	over any 0 length iovec's.
977 */
978void uio_update( uio_t a_uio, user_size_t a_count )
979{
980#if LP64_DEBUG
981	if (a_uio == NULL) {
982		panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
983	}
984	if (UIO_IS_32_BIT_SPACE(a_uio) && a_count > 0xFFFFFFFFull) {
985		panic("%s :%d - invalid count value \n", __FILE__, __LINE__);
986	}
987#endif /* LP64_DEBUG */
988
989	if (a_uio == NULL || a_uio->uio_iovcnt < 1) {
990		return;
991	}
992
993	if (UIO_IS_USER_SPACE(a_uio)) {
994	        /*
995		 * if a_count == 0, then we are asking to skip over
996		 * any empty iovs
997		 */
998	        if (a_count) {
999		        if (a_count > a_uio->uio_iovs.uiovp->iov_len) {
1000			        a_uio->uio_iovs.uiovp->iov_base += a_uio->uio_iovs.uiovp->iov_len;
1001				a_uio->uio_iovs.uiovp->iov_len = 0;
1002			}
1003			else {
1004				a_uio->uio_iovs.uiovp->iov_base += a_count;
1005				a_uio->uio_iovs.uiovp->iov_len -= a_count;
1006			}
1007			if (a_uio->uio_resid_64 < 0) {
1008				a_uio->uio_resid_64 = 0;
1009			}
1010			if (a_count > (user_size_t)a_uio->uio_resid_64) {
1011				a_uio->uio_offset += a_uio->uio_resid_64;
1012				a_uio->uio_resid_64 = 0;
1013			}
1014			else {
1015				a_uio->uio_offset += a_count;
1016				a_uio->uio_resid_64 -= a_count;
1017			}
1018		}
1019		/*
1020		 * advance to next iovec if current one is totally consumed
1021		 */
1022		while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.uiovp->iov_len == 0) {
1023			a_uio->uio_iovcnt--;
1024			if (a_uio->uio_iovcnt > 0) {
1025				a_uio->uio_iovs.uiovp++;
1026			}
1027		}
1028	}
1029	else {
1030	        /*
1031		 * if a_count == 0, then we are asking to skip over
1032		 * any empty iovs
1033		 */
1034	        if (a_count) {
1035		        if (a_count > a_uio->uio_iovs.kiovp->iov_len) {
1036			        a_uio->uio_iovs.kiovp->iov_base += a_uio->uio_iovs.kiovp->iov_len;
1037				a_uio->uio_iovs.kiovp->iov_len = 0;
1038			}
1039			else {
1040			        a_uio->uio_iovs.kiovp->iov_base += a_count;
1041				a_uio->uio_iovs.kiovp->iov_len -= a_count;
1042			}
1043			if (a_uio->uio_resid_64 < 0) {
1044			        a_uio->uio_resid_64 = 0;
1045			}
1046			if (a_count > (user_size_t)a_uio->uio_resid_64) {
1047				a_uio->uio_offset += a_uio->uio_resid_64;
1048				a_uio->uio_resid_64 = 0;
1049			}
1050			else {
1051				a_uio->uio_offset += a_count;
1052				a_uio->uio_resid_64 -= a_count;
1053			}
1054		}
1055		/*
1056		 * advance to next iovec if current one is totally consumed
1057		 */
1058		while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.kiovp->iov_len == 0) {
1059			a_uio->uio_iovcnt--;
1060			if (a_uio->uio_iovcnt > 0) {
1061				a_uio->uio_iovs.kiovp++;
1062			}
1063		}
1064	}
1065	return;
1066}
1067
1068/*
1069 * uio_pushback - undo uncommitted I/O by subtracting from the
1070 * current base address and offset, and incrementing the residiual
1071 * IO. If the UIO was previously exhausted, this call will panic.
1072 * New code should not use this functionality.
1073 */
1074__private_extern__ void uio_pushback( uio_t a_uio, user_size_t a_count )
1075{
1076#if LP64_DEBUG
1077	if (a_uio == NULL) {
1078		panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
1079	}
1080	if (UIO_IS_32_BIT_SPACE(a_uio) && a_count > 0xFFFFFFFFull) {
1081		panic("%s :%d - invalid count value \n", __FILE__, __LINE__);
1082	}
1083#endif /* LP64_DEBUG */
1084
1085	if (a_uio == NULL || a_count == 0) {
1086		return;
1087	}
1088
1089	if (a_uio->uio_iovcnt < 1) {
1090		panic("Invalid uio for pushback");
1091	}
1092
1093	if (UIO_IS_USER_SPACE(a_uio)) {
1094		a_uio->uio_iovs.uiovp->iov_base -= a_count;
1095		a_uio->uio_iovs.uiovp->iov_len += a_count;
1096	}
1097	else {
1098		a_uio->uio_iovs.kiovp->iov_base -= a_count;
1099		a_uio->uio_iovs.kiovp->iov_len += a_count;
1100	}
1101
1102	a_uio->uio_offset -= a_count;
1103	a_uio->uio_resid_64 += a_count;
1104
1105	return;
1106}
1107
1108
1109/*
1110 * uio_duplicate - allocate a new uio and make a copy of the given uio_t.
1111 *	may return NULL.
1112 */
1113uio_t uio_duplicate( uio_t a_uio )
1114{
1115	uio_t		my_uio;
1116	int			i;
1117
1118	if (a_uio == NULL) {
1119		return(NULL);
1120	}
1121
1122	my_uio = (uio_t) kalloc(a_uio->uio_size);
1123	if (my_uio == 0) {
1124		panic("%s :%d - allocation failed\n", __FILE__, __LINE__);
1125	}
1126
1127	bcopy((void *)a_uio, (void *)my_uio, a_uio->uio_size);
1128	/* need to set our iovec pointer to point to first active iovec */
1129	if (my_uio->uio_max_iovs > 0) {
1130		my_uio->uio_iovs.uiovp = (struct user_iovec *)
1131			(((uint8_t *)my_uio) + sizeof(struct uio));
1132
1133		/* advance to first nonzero iovec */
1134		if (my_uio->uio_iovcnt > 0) {
1135			for ( i = 0; i < my_uio->uio_max_iovs; i++ ) {
1136				if (UIO_IS_USER_SPACE(a_uio)) {
1137					if (my_uio->uio_iovs.uiovp->iov_len != 0) {
1138						break;
1139					}
1140					my_uio->uio_iovs.uiovp++;
1141				}
1142				else {
1143					if (my_uio->uio_iovs.kiovp->iov_len != 0) {
1144						break;
1145					}
1146					my_uio->uio_iovs.kiovp++;
1147				}
1148			}
1149		}
1150	}
1151
1152	my_uio->uio_flags = UIO_FLAGS_WE_ALLOCED | UIO_FLAGS_INITED;
1153#if DEBUG
1154		(void)hw_atomic_add(&uio_t_count, 1);
1155#endif
1156
1157
1158	return(my_uio);
1159}
1160
1161int copyin_user_iovec_array(user_addr_t uaddr, int spacetype, int count, struct user_iovec *dst)
1162{
1163	size_t size_of_iovec = ( spacetype == UIO_USERSPACE64 ? sizeof(struct user64_iovec) : sizeof(struct user32_iovec));
1164	int error;
1165	int i;
1166
1167	// copyin to the front of "dst", without regard for putting records in the right places
1168	error = copyin(uaddr, dst, count * size_of_iovec);
1169	if (error)
1170		return (error);
1171
1172	// now, unpack the entries in reverse order, so we don't overwrite anything
1173	for (i = count - 1; i >= 0; i--) {
1174		if (spacetype == UIO_USERSPACE64) {
1175			struct user64_iovec iovec = ((struct user64_iovec *)dst)[i];
1176			dst[i].iov_base = iovec.iov_base;
1177			dst[i].iov_len = iovec.iov_len;
1178		} else {
1179			struct user32_iovec iovec = ((struct user32_iovec *)dst)[i];
1180			dst[i].iov_base = iovec.iov_base;
1181			dst[i].iov_len = iovec.iov_len;
1182		}
1183	}
1184
1185	return (0);
1186}
1187