scalls.c revision 11913:283e725df792
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#include "lint.h"
28#include "thr_uberdata.h"
29#include <stdarg.h>
30#include <poll.h>
31#include <stropts.h>
32#include <dlfcn.h>
33#include <wait.h>
34#include <sys/socket.h>
35#include <sys/uio.h>
36#include <sys/file.h>
37#include <sys/door.h>
38
39/*
40 * These leading-underbar symbols exist because mistakes were made
41 * in the past that put them into non-SUNWprivate versions of
42 * the libc mapfiles.  They should be eliminated, but oh well...
43 */
44#pragma weak _fork = fork
45#pragma weak _read = read
46#pragma weak _write = write
47#pragma weak _getmsg = getmsg
48#pragma weak _getpmsg = getpmsg
49#pragma weak _putmsg = putmsg
50#pragma weak _putpmsg = putpmsg
51#pragma weak _sleep = sleep
52#pragma weak _close = close
53#pragma weak _creat = creat
54#pragma weak _fcntl = fcntl
55#pragma weak _fsync = fsync
56#pragma weak _lockf = lockf
57#pragma weak _msgrcv = msgrcv
58#pragma weak _msgsnd = msgsnd
59#pragma weak _msync = msync
60#pragma weak _open = open
61#pragma weak _openat = openat
62#pragma weak _pause = pause
63#pragma weak _readv = readv
64#pragma weak _sigpause = sigpause
65#pragma weak _sigsuspend = sigsuspend
66#pragma weak _tcdrain = tcdrain
67#pragma weak _waitid = waitid
68#pragma weak _writev = writev
69
70#if !defined(_LP64)
71#pragma weak _creat64 = creat64
72#pragma weak _lockf64 = lockf64
73#pragma weak _open64 = open64
74#pragma weak _openat64 = openat64
75#pragma weak _pread64 = pread64
76#pragma weak _pwrite64 = pwrite64
77#endif
78
79/*
80 * These are SUNWprivate, but they are being used by Sun Studio libcollector.
81 */
82#pragma weak _fork1 = fork1
83#pragma weak _forkall = forkall
84
85/*
86 * atfork_lock protects the pthread_atfork() data structures.
87 *
88 * fork_lock does double-duty.  Not only does it (and atfork_lock)
89 * serialize calls to fork() and forkall(), but it also serializes calls
90 * to thr_suspend() and thr_continue() (because fork() and forkall() also
91 * suspend and continue other threads and they want no competition).
92 *
93 * Functions called in dlopen()ed L10N objects can do anything, including
94 * call malloc() and free().  Such calls are not fork-safe when protected
95 * by an ordinary mutex that is acquired in libc's prefork processing
96 * because, with an interposed malloc library present, there would be a
97 * lock ordering violation due to the pthread_atfork() prefork function
98 * in the interposition library acquiring its malloc lock(s) before the
99 * ordinary mutex in libc being acquired by libc's prefork functions.
100 *
101 * Within libc, calls to malloc() and free() are fork-safe if the calls
102 * are made while holding no other libc locks.  This covers almost all
103 * of libc's malloc() and free() calls.  For those libc code paths, such
104 * as the above-mentioned L10N calls, that require serialization and that
105 * may call malloc() or free(), libc uses callout_lock_enter() to perform
106 * the serialization.  This works because callout_lock is not acquired as
107 * part of running the pthread_atfork() prefork handlers (to avoid the
108 * lock ordering violation described above).  Rather, it is simply
109 * reinitialized in postfork1_child() to cover the case that some
110 * now-defunct thread might have been suspended while holding it.
111 */
112
113void
114fork_lock_enter(void)
115{
116	ASSERT(curthread->ul_critical == 0);
117	(void) mutex_lock(&curthread->ul_uberdata->fork_lock);
118}
119
120void
121fork_lock_exit(void)
122{
123	ASSERT(curthread->ul_critical == 0);
124	(void) mutex_unlock(&curthread->ul_uberdata->fork_lock);
125}
126
127/*
128 * Use cancel_safe_mutex_lock() to protect against being cancelled while
129 * holding callout_lock and calling outside of libc (via L10N plugins).
130 * We will honor a pending cancellation request when callout_lock_exit()
131 * is called, by calling cancel_safe_mutex_unlock().
132 */
133void
134callout_lock_enter(void)
135{
136	ASSERT(curthread->ul_critical == 0);
137	cancel_safe_mutex_lock(&curthread->ul_uberdata->callout_lock);
138}
139
140void
141callout_lock_exit(void)
142{
143	ASSERT(curthread->ul_critical == 0);
144	cancel_safe_mutex_unlock(&curthread->ul_uberdata->callout_lock);
145}
146
147pid_t
148forkx(int flags)
149{
150	ulwp_t *self = curthread;
151	uberdata_t *udp = self->ul_uberdata;
152	pid_t pid;
153
154	if (self->ul_vfork) {
155		/*
156		 * We are a child of vfork(); omit all of the fork
157		 * logic and go straight to the system call trap.
158		 * A vfork() child of a multithreaded parent
159		 * must never call fork().
160		 */
161		if (udp->uberflags.uf_mt) {
162			errno = ENOTSUP;
163			return (-1);
164		}
165		pid = __forkx(flags);
166		if (pid == 0) {		/* child */
167			udp->pid = getpid();
168			self->ul_vfork = 0;
169		}
170		return (pid);
171	}
172
173	sigoff(self);
174	if (self->ul_fork) {
175		/*
176		 * Cannot call fork() from a fork handler.
177		 */
178		sigon(self);
179		errno = EDEADLK;
180		return (-1);
181	}
182	self->ul_fork = 1;
183
184	/*
185	 * The functions registered by pthread_atfork() are defined by
186	 * the application and its libraries and we must not hold any
187	 * internal lmutex_lock()-acquired locks while invoking them.
188	 * We hold only udp->atfork_lock to protect the atfork linkages.
189	 * If one of these pthread_atfork() functions attempts to fork
190	 * or to call pthread_atfork(), libc will detect the error and
191	 * fail the call with EDEADLK.  Otherwise, the pthread_atfork()
192	 * functions are free to do anything they please (except they
193	 * will not receive any signals).
194	 */
195	(void) mutex_lock(&udp->atfork_lock);
196
197	/*
198	 * Posix (SUSv3) requires fork() to be async-signal-safe.
199	 * This cannot be made to happen with fork handlers in place
200	 * (they grab locks).  To be in nominal compliance, don't run
201	 * any fork handlers if we are called within a signal context.
202	 * This leaves the child process in a questionable state with
203	 * respect to its locks, but at least the parent process does
204	 * not become deadlocked due to the calling thread attempting
205	 * to acquire a lock that it already owns.
206	 */
207	if (self->ul_siglink == NULL)
208		_prefork_handler();
209
210	/*
211	 * Block every other thread attempting thr_suspend() or thr_continue().
212	 */
213	(void) mutex_lock(&udp->fork_lock);
214
215	/*
216	 * Block all signals.
217	 * Just deferring them via sigoff() is not enough.
218	 * We have to avoid taking a deferred signal in the child
219	 * that was actually sent to the parent before __forkx().
220	 */
221	block_all_signals(self);
222
223	/*
224	 * This suspends all threads but this one, leaving them
225	 * suspended outside of any critical regions in the library.
226	 * Thus, we are assured that no lmutex_lock()-acquired library
227	 * locks are held while we invoke fork() from the current thread.
228	 */
229	suspend_fork();
230
231	pid = __forkx(flags);
232
233	if (pid == 0) {		/* child */
234		/*
235		 * Clear our schedctl pointer.
236		 * Discard any deferred signal that was sent to the parent.
237		 * Because we blocked all signals before __forkx(), a
238		 * deferred signal cannot have been taken by the child.
239		 */
240		self->ul_schedctl_called = NULL;
241		self->ul_schedctl = NULL;
242		self->ul_cursig = 0;
243		self->ul_siginfo.si_signo = 0;
244		udp->pid = getpid();
245		/* reset the library's data structures to reflect one thread */
246		unregister_locks();
247		postfork1_child();
248		restore_signals(self);
249		(void) mutex_unlock(&udp->fork_lock);
250		if (self->ul_siglink == NULL)
251			_postfork_child_handler();
252	} else {
253		/* restart all threads that were suspended for fork() */
254		continue_fork(0);
255		restore_signals(self);
256		(void) mutex_unlock(&udp->fork_lock);
257		if (self->ul_siglink == NULL)
258			_postfork_parent_handler();
259	}
260
261	(void) mutex_unlock(&udp->atfork_lock);
262	self->ul_fork = 0;
263	sigon(self);
264
265	return (pid);
266}
267
268/*
269 * fork() is fork1() for both Posix threads and Solaris threads.
270 * The forkall() interface exists for applications that require
271 * the semantics of replicating all threads.
272 */
273#pragma weak fork1 = fork
274pid_t
275fork(void)
276{
277	return (forkx(0));
278}
279
280/*
281 * Much of the logic here is the same as in forkx().
282 * See the comments in forkx(), above.
283 */
284pid_t
285forkallx(int flags)
286{
287	ulwp_t *self = curthread;
288	uberdata_t *udp = self->ul_uberdata;
289	pid_t pid;
290
291	if (self->ul_vfork) {
292		if (udp->uberflags.uf_mt) {
293			errno = ENOTSUP;
294			return (-1);
295		}
296		pid = __forkallx(flags);
297		if (pid == 0) {		/* child */
298			udp->pid = getpid();
299			self->ul_vfork = 0;
300		}
301		return (pid);
302	}
303
304	sigoff(self);
305	if (self->ul_fork) {
306		sigon(self);
307		errno = EDEADLK;
308		return (-1);
309	}
310	self->ul_fork = 1;
311	(void) mutex_lock(&udp->atfork_lock);
312	(void) mutex_lock(&udp->fork_lock);
313	block_all_signals(self);
314	suspend_fork();
315
316	pid = __forkallx(flags);
317
318	if (pid == 0) {
319		self->ul_schedctl_called = NULL;
320		self->ul_schedctl = NULL;
321		self->ul_cursig = 0;
322		self->ul_siginfo.si_signo = 0;
323		udp->pid = getpid();
324		unregister_locks();
325		continue_fork(1);
326	} else {
327		continue_fork(0);
328	}
329	restore_signals(self);
330	(void) mutex_unlock(&udp->fork_lock);
331	(void) mutex_unlock(&udp->atfork_lock);
332	self->ul_fork = 0;
333	sigon(self);
334
335	return (pid);
336}
337
338pid_t
339forkall(void)
340{
341	return (forkallx(0));
342}
343
344/*
345 * For the implementation of cancellation at cancellation points.
346 */
347#define	PROLOGUE							\
348{									\
349	ulwp_t *self = curthread;					\
350	int nocancel =							\
351	    (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks |	\
352	    self->ul_critical | self->ul_sigdefer);			\
353	int abort = 0;							\
354	if (nocancel == 0) {						\
355		self->ul_save_async = self->ul_cancel_async;		\
356		if (!self->ul_cancel_disabled) {			\
357			self->ul_cancel_async = 1;			\
358			if (self->ul_cancel_pending)			\
359				pthread_exit(PTHREAD_CANCELED);		\
360		}							\
361		self->ul_sp = stkptr();					\
362	} else if (self->ul_cancel_pending &&				\
363	    !self->ul_cancel_disabled) {				\
364		set_cancel_eintr_flag(self);				\
365		abort = 1;						\
366	}
367
368#define	EPILOGUE							\
369	if (nocancel == 0) {						\
370		self->ul_sp = 0;					\
371		self->ul_cancel_async = self->ul_save_async;		\
372	}								\
373}
374
375/*
376 * Perform the body of the action required by most of the cancelable
377 * function calls.  The return(function_call) part is to allow the
378 * compiler to make the call be executed with tail recursion, which
379 * saves a register window on sparc and slightly (not much) improves
380 * the code for x86/x64 compilations.
381 */
382#define	PERFORM(function_call)						\
383	PROLOGUE							\
384	if (abort) {							\
385		*self->ul_errnop = EINTR;				\
386		return (-1);						\
387	}								\
388	if (nocancel)							\
389		return (function_call);					\
390	rv = function_call;						\
391	EPILOGUE							\
392	return (rv);
393
394/*
395 * Specialized prologue for sigsuspend() and pollsys().
396 * These system calls pass a signal mask to the kernel.
397 * The kernel replaces the thread's signal mask with the
398 * temporary mask before the thread goes to sleep.  If
399 * a signal is received, the signal handler will execute
400 * with the temporary mask, as modified by the sigaction
401 * for the particular signal.
402 *
403 * We block all signals until we reach the kernel with the
404 * temporary mask.  This eliminates race conditions with
405 * setting the signal mask while signals are being posted.
406 */
407#define	PROLOGUE_MASK(sigmask)						\
408{									\
409	ulwp_t *self = curthread;					\
410	int nocancel =							\
411	    (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks |	\
412	    self->ul_critical | self->ul_sigdefer);			\
413	if (!self->ul_vfork) {						\
414		if (sigmask) {						\
415			block_all_signals(self);			\
416			self->ul_tmpmask = *sigmask;			\
417			delete_reserved_signals(&self->ul_tmpmask);	\
418			self->ul_sigsuspend = 1;			\
419		}							\
420		if (nocancel == 0) {					\
421			self->ul_save_async = self->ul_cancel_async;	\
422			if (!self->ul_cancel_disabled) {		\
423				self->ul_cancel_async = 1;		\
424				if (self->ul_cancel_pending) {		\
425					if (self->ul_sigsuspend) {	\
426						self->ul_sigsuspend = 0;\
427						restore_signals(self);	\
428					}				\
429					pthread_exit(PTHREAD_CANCELED);	\
430				}					\
431			}						\
432			self->ul_sp = stkptr();				\
433		}							\
434	}
435
436/*
437 * If a signal is taken, we return from the system call wrapper with
438 * our original signal mask restored (see code in call_user_handler()).
439 * If not (self->ul_sigsuspend is still non-zero), we must restore our
440 * original signal mask ourself.
441 */
442#define	EPILOGUE_MASK							\
443	if (nocancel == 0) {						\
444		self->ul_sp = 0;					\
445		self->ul_cancel_async = self->ul_save_async;		\
446	}								\
447	if (self->ul_sigsuspend) {					\
448		self->ul_sigsuspend = 0;				\
449		restore_signals(self);					\
450	}								\
451}
452
453/*
454 * Cancellation prologue and epilogue functions,
455 * for cancellation points too complex to include here.
456 */
457void
458_cancel_prologue(void)
459{
460	ulwp_t *self = curthread;
461
462	self->ul_cancel_prologue =
463	    (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks |
464	    self->ul_critical | self->ul_sigdefer) != 0;
465	if (self->ul_cancel_prologue == 0) {
466		self->ul_save_async = self->ul_cancel_async;
467		if (!self->ul_cancel_disabled) {
468			self->ul_cancel_async = 1;
469			if (self->ul_cancel_pending)
470				pthread_exit(PTHREAD_CANCELED);
471		}
472		self->ul_sp = stkptr();
473	} else if (self->ul_cancel_pending &&
474	    !self->ul_cancel_disabled) {
475		set_cancel_eintr_flag(self);
476	}
477}
478
479void
480_cancel_epilogue(void)
481{
482	ulwp_t *self = curthread;
483
484	if (self->ul_cancel_prologue == 0) {
485		self->ul_sp = 0;
486		self->ul_cancel_async = self->ul_save_async;
487	}
488}
489
490/*
491 * Called from _thrp_join() (thr_join() is a cancellation point)
492 */
493int
494lwp_wait(thread_t tid, thread_t *found)
495{
496	int error;
497
498	PROLOGUE
499	if (abort)
500		return (EINTR);
501	while ((error = __lwp_wait(tid, found)) == EINTR && !cancel_active())
502		continue;
503	EPILOGUE
504	return (error);
505}
506
507ssize_t
508read(int fd, void *buf, size_t size)
509{
510	extern ssize_t __read(int, void *, size_t);
511	ssize_t rv;
512
513	PERFORM(__read(fd, buf, size))
514}
515
516ssize_t
517write(int fd, const void *buf, size_t size)
518{
519	extern ssize_t __write(int, const void *, size_t);
520	ssize_t rv;
521
522	PERFORM(__write(fd, buf, size))
523}
524
525int
526getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
527	int *flagsp)
528{
529	extern int __getmsg(int, struct strbuf *, struct strbuf *, int *);
530	int rv;
531
532	PERFORM(__getmsg(fd, ctlptr, dataptr, flagsp))
533}
534
535int
536getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
537	int *bandp, int *flagsp)
538{
539	extern int __getpmsg(int, struct strbuf *, struct strbuf *,
540	    int *, int *);
541	int rv;
542
543	PERFORM(__getpmsg(fd, ctlptr, dataptr, bandp, flagsp))
544}
545
546int
547putmsg(int fd, const struct strbuf *ctlptr,
548	const struct strbuf *dataptr, int flags)
549{
550	extern int __putmsg(int, const struct strbuf *,
551	    const struct strbuf *, int);
552	int rv;
553
554	PERFORM(__putmsg(fd, ctlptr, dataptr, flags))
555}
556
557int
558__xpg4_putmsg(int fd, const struct strbuf *ctlptr,
559	const struct strbuf *dataptr, int flags)
560{
561	extern int __putmsg(int, const struct strbuf *,
562	    const struct strbuf *, int);
563	int rv;
564
565	PERFORM(__putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4))
566}
567
568int
569putpmsg(int fd, const struct strbuf *ctlptr,
570	const struct strbuf *dataptr, int band, int flags)
571{
572	extern int __putpmsg(int, const struct strbuf *,
573	    const struct strbuf *, int, int);
574	int rv;
575
576	PERFORM(__putpmsg(fd, ctlptr, dataptr, band, flags))
577}
578
579int
580__xpg4_putpmsg(int fd, const struct strbuf *ctlptr,
581	const struct strbuf *dataptr, int band, int flags)
582{
583	extern int __putpmsg(int, const struct strbuf *,
584	    const struct strbuf *, int, int);
585	int rv;
586
587	PERFORM(__putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4))
588}
589
590int
591nanosleep(const timespec_t *rqtp, timespec_t *rmtp)
592{
593	int error;
594
595	PROLOGUE
596	error = abort? EINTR : __nanosleep(rqtp, rmtp);
597	EPILOGUE
598	if (error) {
599		errno = error;
600		return (-1);
601	}
602	return (0);
603}
604
605int
606clock_nanosleep(clockid_t clock_id, int flags,
607	const timespec_t *rqtp, timespec_t *rmtp)
608{
609	timespec_t reltime;
610	hrtime_t start;
611	hrtime_t rqlapse;
612	hrtime_t lapse;
613	int error;
614
615	switch (clock_id) {
616	case CLOCK_VIRTUAL:
617	case CLOCK_PROCESS_CPUTIME_ID:
618	case CLOCK_THREAD_CPUTIME_ID:
619		return (ENOTSUP);
620	case CLOCK_REALTIME:
621	case CLOCK_HIGHRES:
622		break;
623	default:
624		return (EINVAL);
625	}
626	if (flags & TIMER_ABSTIME) {
627		abstime_to_reltime(clock_id, rqtp, &reltime);
628		rmtp = NULL;
629	} else {
630		reltime = *rqtp;
631		if (clock_id == CLOCK_HIGHRES)
632			start = gethrtime();
633	}
634restart:
635	PROLOGUE
636	error = abort? EINTR : __nanosleep(&reltime, rmtp);
637	EPILOGUE
638	if (error == 0 && clock_id == CLOCK_HIGHRES) {
639		/*
640		 * Don't return yet if we didn't really get a timeout.
641		 * This can happen if we return because someone resets
642		 * the system clock.
643		 */
644		if (flags & TIMER_ABSTIME) {
645			if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
646			    rqtp->tv_nsec > gethrtime()) {
647				abstime_to_reltime(clock_id, rqtp, &reltime);
648				goto restart;
649			}
650		} else {
651			rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
652			    rqtp->tv_nsec;
653			lapse = gethrtime() - start;
654			if (rqlapse > lapse) {
655				hrt2ts(rqlapse - lapse, &reltime);
656				goto restart;
657			}
658		}
659	}
660	if (error == 0 && clock_id == CLOCK_REALTIME &&
661	    (flags & TIMER_ABSTIME)) {
662		/*
663		 * Don't return yet just because someone reset the
664		 * system clock.  Recompute the new relative time
665		 * and reissue the nanosleep() call if necessary.
666		 *
667		 * Resetting the system clock causes all sorts of
668		 * problems and the SUSV3 standards body should
669		 * have made the behavior of clock_nanosleep() be
670		 * implementation-defined in such a case rather than
671		 * being specific about honoring the new system time.
672		 * Standards bodies are filled with fools and idiots.
673		 */
674		abstime_to_reltime(clock_id, rqtp, &reltime);
675		if (reltime.tv_sec != 0 || reltime.tv_nsec != 0)
676			goto restart;
677	}
678	return (error);
679}
680
681unsigned int
682sleep(unsigned int sec)
683{
684	unsigned int rem = 0;
685	timespec_t ts;
686	timespec_t tsr;
687
688	ts.tv_sec = (time_t)sec;
689	ts.tv_nsec = 0;
690	if (nanosleep(&ts, &tsr) == -1 && errno == EINTR) {
691		rem = (unsigned int)tsr.tv_sec;
692		if (tsr.tv_nsec >= NANOSEC / 2)
693			rem++;
694	}
695	return (rem);
696}
697
698int
699usleep(useconds_t usec)
700{
701	timespec_t ts;
702
703	ts.tv_sec = usec / MICROSEC;
704	ts.tv_nsec = (long)(usec % MICROSEC) * 1000;
705	(void) nanosleep(&ts, NULL);
706	return (0);
707}
708
709int
710close(int fildes)
711{
712	extern void _aio_close(int);
713	extern int __close(int);
714	int rv;
715
716	/*
717	 * If we call _aio_close() while in a critical region,
718	 * we will draw an ASSERT() failure, so don't do it.
719	 * No calls to close() from within libc need _aio_close();
720	 * only the application's calls to close() need this,
721	 * and such calls are never from a libc critical region.
722	 */
723	if (curthread->ul_critical == 0)
724		_aio_close(fildes);
725	PERFORM(__close(fildes))
726}
727
728int
729door_call(int d, door_arg_t *params)
730{
731	extern int __door_call(int, door_arg_t *);
732	int rv;
733
734	PERFORM(__door_call(d, params))
735}
736
737int
738fcntl(int fildes, int cmd, ...)
739{
740	extern int __fcntl(int, int, ...);
741	intptr_t arg;
742	int rv;
743	va_list ap;
744
745	va_start(ap, cmd);
746	arg = va_arg(ap, intptr_t);
747	va_end(ap);
748	if (cmd != F_SETLKW)
749		return (__fcntl(fildes, cmd, arg));
750	PERFORM(__fcntl(fildes, cmd, arg))
751}
752
753int
754fdatasync(int fildes)
755{
756	extern int __fdsync(int, int);
757	int rv;
758
759	PERFORM(__fdsync(fildes, FDSYNC))
760}
761
762int
763fsync(int fildes)
764{
765	extern int __fdsync(int, int);
766	int rv;
767
768	PERFORM(__fdsync(fildes, FSYNC))
769}
770
771int
772lockf(int fildes, int function, off_t size)
773{
774	extern int __lockf(int, int, off_t);
775	int rv;
776
777	PERFORM(__lockf(fildes, function, size))
778}
779
780#if !defined(_LP64)
781int
782lockf64(int fildes, int function, off64_t size)
783{
784	extern int __lockf64(int, int, off64_t);
785	int rv;
786
787	PERFORM(__lockf64(fildes, function, size))
788}
789#endif	/* !_LP64 */
790
791ssize_t
792msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg)
793{
794	extern ssize_t __msgrcv(int, void *, size_t, long, int);
795	ssize_t rv;
796
797	PERFORM(__msgrcv(msqid, msgp, msgsz, msgtyp, msgflg))
798}
799
800int
801msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg)
802{
803	extern int __msgsnd(int, const void *, size_t, int);
804	int rv;
805
806	PERFORM(__msgsnd(msqid, msgp, msgsz, msgflg))
807}
808
809int
810msync(caddr_t addr, size_t len, int flags)
811{
812	extern int __msync(caddr_t, size_t, int);
813	int rv;
814
815	PERFORM(__msync(addr, len, flags))
816}
817
818int
819openat(int fd, const char *path, int oflag, ...)
820{
821	mode_t mode;
822	int rv;
823	va_list ap;
824
825	va_start(ap, oflag);
826	mode = va_arg(ap, mode_t);
827	va_end(ap);
828	PERFORM(__openat(fd, path, oflag, mode))
829}
830
831int
832open(const char *path, int oflag, ...)
833{
834	mode_t mode;
835	int rv;
836	va_list ap;
837
838	va_start(ap, oflag);
839	mode = va_arg(ap, mode_t);
840	va_end(ap);
841	PERFORM(__open(path, oflag, mode))
842}
843
844int
845creat(const char *path, mode_t mode)
846{
847	return (open(path, O_WRONLY | O_CREAT | O_TRUNC, mode));
848}
849
850#if !defined(_LP64)
851int
852openat64(int fd, const char *path, int oflag, ...)
853{
854	mode_t mode;
855	int rv;
856	va_list ap;
857
858	va_start(ap, oflag);
859	mode = va_arg(ap, mode_t);
860	va_end(ap);
861	PERFORM(__openat64(fd, path, oflag, mode))
862}
863
864int
865open64(const char *path, int oflag, ...)
866{
867	mode_t mode;
868	int rv;
869	va_list ap;
870
871	va_start(ap, oflag);
872	mode = va_arg(ap, mode_t);
873	va_end(ap);
874	PERFORM(__open64(path, oflag, mode))
875}
876
877int
878creat64(const char *path, mode_t mode)
879{
880	return (open64(path, O_WRONLY | O_CREAT | O_TRUNC, mode));
881}
882#endif	/* !_LP64 */
883
884int
885pause(void)
886{
887	extern int __pause(void);
888	int rv;
889
890	PERFORM(__pause())
891}
892
893ssize_t
894pread(int fildes, void *buf, size_t nbyte, off_t offset)
895{
896	extern ssize_t __pread(int, void *, size_t, off_t);
897	ssize_t rv;
898
899	PERFORM(__pread(fildes, buf, nbyte, offset))
900}
901
902#if !defined(_LP64)
903ssize_t
904pread64(int fildes, void *buf, size_t nbyte, off64_t offset)
905{
906	extern ssize_t __pread64(int, void *, size_t, off64_t);
907	ssize_t rv;
908
909	PERFORM(__pread64(fildes, buf, nbyte, offset))
910}
911#endif	/* !_LP64 */
912
913ssize_t
914pwrite(int fildes, const void *buf, size_t nbyte, off_t offset)
915{
916	extern ssize_t __pwrite(int, const void *, size_t, off_t);
917	ssize_t rv;
918
919	PERFORM(__pwrite(fildes, buf, nbyte, offset))
920}
921
922#if !defined(_LP64)
923ssize_t
924pwrite64(int fildes, const void *buf, size_t nbyte, off64_t offset)
925{
926	extern ssize_t __pwrite64(int, const void *, size_t, off64_t);
927	ssize_t rv;
928
929	PERFORM(__pwrite64(fildes, buf, nbyte, offset))
930}
931#endif	/* !_LP64 */
932
933ssize_t
934readv(int fildes, const struct iovec *iov, int iovcnt)
935{
936	extern ssize_t __readv(int, const struct iovec *, int);
937	ssize_t rv;
938
939	PERFORM(__readv(fildes, iov, iovcnt))
940}
941
942int
943sigpause(int sig)
944{
945	extern int __sigpause(int);
946	int rv;
947
948	PERFORM(__sigpause(sig))
949}
950
951int
952sigsuspend(const sigset_t *set)
953{
954	extern int __sigsuspend(const sigset_t *);
955	int rv;
956
957	PROLOGUE_MASK(set)
958	rv = __sigsuspend(set);
959	EPILOGUE_MASK
960	return (rv);
961}
962
963int
964_pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout,
965	const sigset_t *sigmask)
966{
967	extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *,
968	    const sigset_t *);
969	int rv;
970
971	PROLOGUE_MASK(sigmask)
972	rv = __pollsys(fds, nfd, timeout, sigmask);
973	EPILOGUE_MASK
974	return (rv);
975}
976
977int
978sigtimedwait(const sigset_t *set, siginfo_t *infop, const timespec_t *timeout)
979{
980	extern int __sigtimedwait(const sigset_t *, siginfo_t *,
981	    const timespec_t *);
982	siginfo_t info;
983	int sig;
984
985	PROLOGUE
986	if (abort) {
987		*self->ul_errnop = EINTR;
988		sig = -1;
989	} else {
990		sig = __sigtimedwait(set, &info, timeout);
991		if (sig == SIGCANCEL &&
992		    (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) {
993			do_sigcancel();
994			*self->ul_errnop = EINTR;
995			sig = -1;
996		}
997	}
998	EPILOGUE
999	if (sig != -1 && infop)
1000		(void) memcpy(infop, &info, sizeof (*infop));
1001	return (sig);
1002}
1003
1004int
1005sigwait(sigset_t *set)
1006{
1007	return (sigtimedwait(set, NULL, NULL));
1008}
1009
1010int
1011sigwaitinfo(const sigset_t *set, siginfo_t *info)
1012{
1013	return (sigtimedwait(set, info, NULL));
1014}
1015
1016int
1017sigqueue(pid_t pid, int signo, const union sigval value)
1018{
1019	extern int __sigqueue(pid_t pid, int signo,
1020	    /* const union sigval */ void *value, int si_code, int block);
1021	return (__sigqueue(pid, signo, value.sival_ptr, SI_QUEUE, 0));
1022}
1023
1024int
1025_so_accept(int sock, struct sockaddr *addr, uint_t *addrlen, int version)
1026{
1027	extern int __so_accept(int, struct sockaddr *, uint_t *, int);
1028	int rv;
1029
1030	PERFORM(__so_accept(sock, addr, addrlen, version))
1031}
1032
1033int
1034_so_connect(int sock, struct sockaddr *addr, uint_t addrlen, int version)
1035{
1036	extern int __so_connect(int, struct sockaddr *, uint_t, int);
1037	int rv;
1038
1039	PERFORM(__so_connect(sock, addr, addrlen, version))
1040}
1041
1042int
1043_so_recv(int sock, void *buf, size_t len, int flags)
1044{
1045	extern int __so_recv(int, void *, size_t, int);
1046	int rv;
1047
1048	PERFORM(__so_recv(sock, buf, len, flags))
1049}
1050
1051int
1052_so_recvfrom(int sock, void *buf, size_t len, int flags,
1053    struct sockaddr *addr, int *addrlen)
1054{
1055	extern int __so_recvfrom(int, void *, size_t, int,
1056	    struct sockaddr *, int *);
1057	int rv;
1058
1059	PERFORM(__so_recvfrom(sock, buf, len, flags, addr, addrlen))
1060}
1061
1062int
1063_so_recvmsg(int sock, struct msghdr *msg, int flags)
1064{
1065	extern int __so_recvmsg(int, struct msghdr *, int);
1066	int rv;
1067
1068	PERFORM(__so_recvmsg(sock, msg, flags))
1069}
1070
1071int
1072_so_send(int sock, const void *buf, size_t len, int flags)
1073{
1074	extern int __so_send(int, const void *, size_t, int);
1075	int rv;
1076
1077	PERFORM(__so_send(sock, buf, len, flags))
1078}
1079
1080int
1081_so_sendmsg(int sock, const struct msghdr *msg, int flags)
1082{
1083	extern int __so_sendmsg(int, const struct msghdr *, int);
1084	int rv;
1085
1086	PERFORM(__so_sendmsg(sock, msg, flags))
1087}
1088
1089int
1090_so_sendto(int sock, const void *buf, size_t len, int flags,
1091    const struct sockaddr *addr, int *addrlen)
1092{
1093	extern int __so_sendto(int, const void *, size_t, int,
1094	    const struct sockaddr *, int *);
1095	int rv;
1096
1097	PERFORM(__so_sendto(sock, buf, len, flags, addr, addrlen))
1098}
1099
1100int
1101tcdrain(int fildes)
1102{
1103	extern int __tcdrain(int);
1104	int rv;
1105
1106	PERFORM(__tcdrain(fildes))
1107}
1108
1109int
1110waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options)
1111{
1112	extern int __waitid(idtype_t, id_t, siginfo_t *, int);
1113	int rv;
1114
1115	if (options & WNOHANG)
1116		return (__waitid(idtype, id, infop, options));
1117	PERFORM(__waitid(idtype, id, infop, options))
1118}
1119
1120ssize_t
1121writev(int fildes, const struct iovec *iov, int iovcnt)
1122{
1123	extern ssize_t __writev(int, const struct iovec *, int);
1124	ssize_t rv;
1125
1126	PERFORM(__writev(fildes, iov, iovcnt))
1127}
1128