1/*
2 * Copyright (c) 2004-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*
30 * Centralized authorisation framework.
31 */
32
33#include <sys/appleapiopts.h>
34#include <sys/param.h>	/* XXX trim includes */
35#include <sys/acct.h>
36#include <sys/systm.h>
37#include <sys/ucred.h>
38#include <sys/proc_internal.h>
39#include <sys/timeb.h>
40#include <sys/times.h>
41#include <sys/malloc.h>
42#include <sys/vnode_internal.h>
43#include <sys/kauth.h>
44#include <sys/stat.h>
45
46#include <bsm/audit_kernel.h>
47
48#include <sys/mount.h>
49#include <sys/sysproto.h>
50#include <mach/message.h>
51#include <mach/host_security.h>
52
53#include <kern/locks.h>
54
55
56/*
57 * Authorization scopes.
58 */
59
60lck_grp_t *kauth_lck_grp;
61static lck_mtx_t *kauth_scope_mtx;
62#define KAUTH_SCOPELOCK()	lck_mtx_lock(kauth_scope_mtx);
63#define KAUTH_SCOPEUNLOCK()	lck_mtx_unlock(kauth_scope_mtx);
64
65/*
66 * We support listeners for scopes that have not been registered yet.
67 * If a listener comes in for a scope that is not active we hang the listener
68 * off our kauth_dangling_listeners list and once the scope becomes active we
69 * remove it from kauth_dangling_listeners and add it to the active scope.
70 */
71struct kauth_listener {
72	TAILQ_ENTRY(kauth_listener)	kl_link;
73	const char *				kl_identifier;
74	kauth_scope_callback_t		kl_callback;
75	void *						kl_idata;
76};
77
78/* XXX - kauth_todo - there is a race if a scope listener is removed while we
79 * we are in the kauth_authorize_action code path.  We intentionally do not take
80 * a scope lock in order to get the best possible performance.  we will fix this
81 * post Tiger.
82 * Until the race is fixed our kext clients are responsible for all active
83 * requests that may be in their callback code or on the way to their callback
84 * code before they free kauth_listener.kl_callback or kauth_listener.kl_idata.
85 * We keep copies of these in our kauth_local_listener in an attempt to limit
86 * our expose to unlisten race.
87 */
88struct kauth_local_listener {
89	kauth_listener_t			kll_listenerp;
90	kauth_scope_callback_t		kll_callback;
91	void *						kll_idata;
92};
93typedef struct kauth_local_listener *kauth_local_listener_t;
94
95static TAILQ_HEAD(,kauth_listener) kauth_dangling_listeners;
96
97/*
98 * Scope listeners need to be reworked to be dynamic.
99 * We intentionally used a static table to avoid locking issues with linked
100 * lists.  The listeners may be called quite often.
101 * XXX - kauth_todo
102 */
103#define KAUTH_SCOPE_MAX_LISTENERS  15
104
105struct kauth_scope {
106	TAILQ_ENTRY(kauth_scope)	ks_link;
107	volatile struct kauth_local_listener  ks_listeners[KAUTH_SCOPE_MAX_LISTENERS];
108	const char *				ks_identifier;
109	kauth_scope_callback_t		ks_callback;
110	void *						ks_idata;
111	u_int						ks_flags;
112};
113
114/* values for kauth_scope.ks_flags */
115#define KS_F_HAS_LISTENERS		(1 << 0)
116
117static TAILQ_HEAD(,kauth_scope)	kauth_scopes;
118
119static int kauth_add_callback_to_scope(kauth_scope_t sp, kauth_listener_t klp);
120static void	kauth_scope_init(void) __attribute__((section("__TEXT, initcode")));
121static kauth_scope_t kauth_alloc_scope(const char *identifier, kauth_scope_callback_t callback, void *idata);
122static kauth_listener_t kauth_alloc_listener(const char *identifier, kauth_scope_callback_t callback, void *idata);
123#if 0
124static int	kauth_scope_valid(kauth_scope_t scope);
125#endif
126
127kauth_scope_t	kauth_scope_process;
128static int	kauth_authorize_process_callback(kauth_cred_t _credential, void *_idata, kauth_action_t _action,
129    uintptr_t arg0, uintptr_t arg1, __unused uintptr_t arg2, __unused uintptr_t arg3);
130kauth_scope_t	kauth_scope_generic;
131static int	kauth_authorize_generic_callback(kauth_cred_t _credential, void *_idata, kauth_action_t _action,
132    uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3);
133kauth_scope_t	kauth_scope_fileop;
134
135extern int 		cansignal(struct proc *, kauth_cred_t, struct proc *, int, int);
136extern char *	get_pathbuff(void);
137extern void		release_pathbuff(char *path);
138
139/*
140 * Initialization.
141 */
142void
143kauth_init(void)
144{
145	lck_grp_attr_t	*grp_attributes;
146
147	TAILQ_INIT(&kauth_scopes);
148	TAILQ_INIT(&kauth_dangling_listeners);
149
150	/* set up our lock group */
151	grp_attributes = lck_grp_attr_alloc_init();
152	kauth_lck_grp = lck_grp_alloc_init("kauth", grp_attributes);
153	lck_grp_attr_free(grp_attributes);
154
155	/* bring up kauth subsystem components */
156	kauth_cred_init();
157	kauth_identity_init();
158	kauth_groups_init();
159	kauth_scope_init();
160	kauth_resolver_init();
161
162	/* can't alloc locks after this */
163	lck_grp_free(kauth_lck_grp);
164	kauth_lck_grp = NULL;
165}
166
167static void
168kauth_scope_init(void)
169{
170	kauth_scope_mtx = lck_mtx_alloc_init(kauth_lck_grp, 0 /*LCK_ATTR_NULL*/);
171	kauth_scope_process = kauth_register_scope(KAUTH_SCOPE_PROCESS, kauth_authorize_process_callback, NULL);
172	kauth_scope_generic = kauth_register_scope(KAUTH_SCOPE_GENERIC, kauth_authorize_generic_callback, NULL);
173	kauth_scope_fileop = kauth_register_scope(KAUTH_SCOPE_FILEOP, NULL, NULL);
174}
175
176/*
177 * Scope registration.
178 */
179
180static kauth_scope_t
181kauth_alloc_scope(const char *identifier, kauth_scope_callback_t callback, void *idata)
182{
183	kauth_scope_t	sp;
184
185	/*
186	 * Allocate and populate the scope structure.
187	 */
188	MALLOC(sp, kauth_scope_t, sizeof(*sp), M_KAUTH, M_WAITOK);
189	if (sp == NULL)
190		return(NULL);
191	bzero(&sp->ks_listeners, sizeof(sp->ks_listeners));
192	sp->ks_flags = 0;
193	sp->ks_identifier = identifier;
194	sp->ks_idata = idata;
195	sp->ks_callback = callback;
196	return(sp);
197}
198
199static kauth_listener_t
200kauth_alloc_listener(const char *identifier, kauth_scope_callback_t callback, void *idata)
201{
202	kauth_listener_t lsp;
203
204	/*
205	 * Allocate and populate the listener structure.
206	 */
207	MALLOC(lsp, kauth_listener_t, sizeof(*lsp), M_KAUTH, M_WAITOK);
208	if (lsp == NULL)
209		return(NULL);
210	lsp->kl_identifier = identifier;
211	lsp->kl_idata = idata;
212	lsp->kl_callback = callback;
213	return(lsp);
214}
215
216kauth_scope_t
217kauth_register_scope(const char *identifier, kauth_scope_callback_t callback, void *idata)
218{
219	kauth_scope_t		sp, tsp;
220	kauth_listener_t	klp;
221
222	if ((sp = kauth_alloc_scope(identifier, callback, idata)) == NULL)
223		return(NULL);
224
225	/*
226	 * Lock the list and insert.
227	 */
228	KAUTH_SCOPELOCK();
229	TAILQ_FOREACH(tsp, &kauth_scopes, ks_link) {
230		/* duplicate! */
231		if (strncmp(tsp->ks_identifier, identifier,
232					strlen(tsp->ks_identifier) + 1) == 0) {
233			KAUTH_SCOPEUNLOCK();
234			FREE(sp, M_KAUTH);
235			return(NULL);
236		}
237	}
238	TAILQ_INSERT_TAIL(&kauth_scopes, sp, ks_link);
239
240	/*
241	 * Look for listeners waiting for this scope, move them to the active scope
242	 * listener table.
243	 * Note that we have to restart the scan every time we remove an entry
244	 * from the list, since we can't remove the current item from the list.
245	 */
246restart:
247	TAILQ_FOREACH(klp, &kauth_dangling_listeners, kl_link) {
248		if (strncmp(klp->kl_identifier, sp->ks_identifier,
249					strlen(klp->kl_identifier) + 1) == 0) {
250			/* found a match on the dangling listener list.  add it to the
251			 * the active scope.
252			 */
253			if (kauth_add_callback_to_scope(sp, klp) == 0) {
254				TAILQ_REMOVE(&kauth_dangling_listeners, klp, kl_link);
255			}
256			else {
257#if 0
258				printf("%s - failed to add listener to scope \"%s\" \n", __FUNCTION__, sp->ks_identifier);
259#endif
260				break;
261			}
262			goto restart;
263		}
264	}
265
266	KAUTH_SCOPEUNLOCK();
267	return(sp);
268}
269
270
271
272void
273kauth_deregister_scope(kauth_scope_t scope)
274{
275	int		i;
276
277	KAUTH_SCOPELOCK();
278
279	TAILQ_REMOVE(&kauth_scopes, scope, ks_link);
280
281	/* relocate listeners back to the waiting list */
282	for (i = 0; i < KAUTH_SCOPE_MAX_LISTENERS; i++) {
283		if (scope->ks_listeners[i].kll_listenerp != NULL) {
284			TAILQ_INSERT_TAIL(&kauth_dangling_listeners, scope->ks_listeners[i].kll_listenerp, kl_link);
285			scope->ks_listeners[i].kll_listenerp = NULL;
286			/*
287			 * XXX - kauth_todo - WARNING, do not clear kll_callback or
288			 * kll_idata here.  they are part of our scope unlisten race hack
289			 */
290		}
291	}
292	KAUTH_SCOPEUNLOCK();
293	FREE(scope, M_KAUTH);
294
295	return;
296}
297
298kauth_listener_t
299kauth_listen_scope(const char *identifier, kauth_scope_callback_t callback, void *idata)
300{
301	kauth_listener_t klp;
302	kauth_scope_t	sp;
303
304	if ((klp = kauth_alloc_listener(identifier, callback, idata)) == NULL)
305		return(NULL);
306
307	/*
308	 * Lock the scope list and check to see whether this scope already exists.
309	 */
310	KAUTH_SCOPELOCK();
311	TAILQ_FOREACH(sp, &kauth_scopes, ks_link) {
312		if (strncmp(sp->ks_identifier, identifier,
313					strlen(sp->ks_identifier) + 1) == 0) {
314			/* scope exists, add it to scope listener table */
315			if (kauth_add_callback_to_scope(sp, klp) == 0) {
316				KAUTH_SCOPEUNLOCK();
317				return(klp);
318			}
319			/* table already full */
320			KAUTH_SCOPEUNLOCK();
321			FREE(klp, M_KAUTH);
322			return(NULL);
323		}
324	}
325
326	/* scope doesn't exist, put on waiting list. */
327	TAILQ_INSERT_TAIL(&kauth_dangling_listeners, klp, kl_link);
328
329	KAUTH_SCOPEUNLOCK();
330
331	return(klp);
332}
333
334void
335kauth_unlisten_scope(kauth_listener_t listener)
336{
337	kauth_scope_t		sp;
338	kauth_listener_t 	klp;
339	int					i, listener_count, do_free;
340
341	KAUTH_SCOPELOCK();
342
343	/* search the active scope for this listener */
344	TAILQ_FOREACH(sp, &kauth_scopes, ks_link) {
345		do_free = 0;
346		if ((sp->ks_flags & KS_F_HAS_LISTENERS) != 0) {
347			listener_count = 0;
348			for (i = 0; i < KAUTH_SCOPE_MAX_LISTENERS; i++) {
349				if (sp->ks_listeners[i].kll_listenerp == listener) {
350					sp->ks_listeners[i].kll_listenerp = NULL;
351					do_free = 1;
352					/*
353					 * XXX - kauth_todo - WARNING, do not clear kll_callback or
354					 * kll_idata here.  they are part of our scope unlisten race hack
355					 */
356				}
357				else if (sp->ks_listeners[i].kll_listenerp != NULL) {
358					listener_count++;
359				}
360			}
361			if (do_free) {
362				if (listener_count == 0) {
363					sp->ks_flags &= ~KS_F_HAS_LISTENERS;
364				}
365				KAUTH_SCOPEUNLOCK();
366				FREE(listener, M_KAUTH);
367				return;
368			}
369		}
370	}
371
372	/* if not active, check the dangling list */
373	TAILQ_FOREACH(klp, &kauth_dangling_listeners, kl_link) {
374		if (klp == listener) {
375			TAILQ_REMOVE(&kauth_dangling_listeners, klp, kl_link);
376			KAUTH_SCOPEUNLOCK();
377			FREE(listener, M_KAUTH);
378			return;
379		}
380	}
381
382	KAUTH_SCOPEUNLOCK();
383	return;
384}
385
386/*
387 * Authorization requests.
388 *
389 * Returns:	0			Success
390 *		EPERM			Operation not permitted
391 *
392 * Imputed:	*arg3, modified		Callback return - depends on callback
393 *					modification of *arg3, if any
394 */
395int
396kauth_authorize_action(kauth_scope_t scope, kauth_cred_t credential, kauth_action_t action,
397    uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3)
398{
399	int result, ret, i;
400
401	/* ask the scope */
402	if (scope->ks_callback != NULL)
403		result = scope->ks_callback(credential, scope->ks_idata, action, arg0, arg1, arg2, arg3);
404	else
405		result = KAUTH_RESULT_DEFER;
406
407	/* check with listeners */
408	if ((scope->ks_flags & KS_F_HAS_LISTENERS) != 0) {
409		for (i = 0; i < KAUTH_SCOPE_MAX_LISTENERS; i++) {
410			/* XXX - kauth_todo - there is a race here if listener is removed - we will fix this post Tiger.
411			 * Until the race is fixed our kext clients are responsible for all active requests that may
412			 * be in their callbacks or on the way to their callbacks before they free kl_callback or kl_idata.
413			 * We keep copies of these in our kauth_local_listener in an attempt to limit our expose to
414			 * unlisten race.
415			 */
416			if (scope->ks_listeners[i].kll_listenerp == NULL ||
417				scope->ks_listeners[i].kll_callback == NULL)
418				continue;
419
420			ret = scope->ks_listeners[i].kll_callback(
421					credential, scope->ks_listeners[i].kll_idata,
422					action, arg0, arg1, arg2, arg3);
423			if ((ret == KAUTH_RESULT_DENY) ||
424				(result == KAUTH_RESULT_DEFER))
425				result = ret;
426		}
427	}
428
429	/* we need an explicit allow, or the auth fails */
430 	/* XXX need a mechanism for auth failure to be signalled vs. denial */
431 	return(result == KAUTH_RESULT_ALLOW ? 0 : EPERM);
432}
433
434/*
435 * Default authorization handlers.
436 */
437int
438kauth_authorize_allow(__unused kauth_cred_t credential, __unused void *idata, __unused kauth_action_t action,
439     __unused uintptr_t arg0, __unused uintptr_t arg1, __unused uintptr_t arg2, __unused uintptr_t arg3)
440{
441
442	return(KAUTH_RESULT_ALLOW);
443}
444
445#if 0
446/*
447 * Debugging support.
448 */
449static int
450kauth_scope_valid(kauth_scope_t scope)
451{
452	kauth_scope_t	sp;
453
454	KAUTH_SCOPELOCK();
455	TAILQ_FOREACH(sp, &kauth_scopes, ks_link) {
456		if (sp == scope)
457			break;
458	}
459	KAUTH_SCOPEUNLOCK();
460	return((sp == NULL) ? 0 : 1);
461}
462#endif
463
464/*
465 * Process authorization scope.
466 */
467
468int
469kauth_authorize_process(kauth_cred_t credential, kauth_action_t action, struct proc *process, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3)
470{
471	return(kauth_authorize_action(kauth_scope_process, credential, action, (uintptr_t)process, arg1, arg2, arg3));
472}
473
474static int
475kauth_authorize_process_callback(kauth_cred_t credential, __unused void *idata, kauth_action_t action,
476    uintptr_t arg0, uintptr_t arg1, __unused uintptr_t arg2, __unused uintptr_t arg3)
477{
478	switch(action) {
479	case KAUTH_PROCESS_CANSIGNAL:
480		panic("KAUTH_PROCESS_CANSIGNAL not implemented");
481		/* XXX credential wrong here */
482		/* arg0 - process to signal
483		 * arg1 - signal to send the process
484		 */
485		if (cansignal(current_proc(), credential, (struct proc *)arg0, (int)arg1, 0))
486			return(KAUTH_RESULT_ALLOW);
487		break;
488	case KAUTH_PROCESS_CANTRACE:
489		/* current_proc() - process that will do the tracing
490		 * arg0 - process to be traced
491		 * arg1 - pointer to int - reason (errno) for denial
492		 */
493		if (cantrace(current_proc(), credential, (proc_t)arg0, (int *)arg1))
494			return(KAUTH_RESULT_ALLOW);
495		break;
496	}
497
498	/* no explicit result, so defer to others in the chain */
499	return(KAUTH_RESULT_DEFER);
500}
501
502/*
503 * File system operation authorization scope.  This is really only a notification
504 * of the file system operation, not an authorization check.  Thus the result is
505 * not relevant.
506 * arguments passed to KAUTH_FILEOP_OPEN listeners
507 *		arg0 is pointer to vnode (vnode *) for given user path.
508 *		arg1 is pointer to path (char *) passed in to open.
509 * arguments passed to KAUTH_FILEOP_CLOSE listeners
510 *		arg0 is pointer to vnode (vnode *) for file to be closed.
511 *		arg1 is pointer to path (char *) of file to be closed.
512 *		arg2 is close flags.
513 * arguments passed to KAUTH_FILEOP_RENAME listeners
514 *		arg0 is pointer to "from" path (char *).
515 *		arg1 is pointer to "to" path (char *).
516 * arguments passed to KAUTH_FILEOP_EXCHANGE listeners
517 *		arg0 is pointer to file 1 path (char *).
518 *		arg1 is pointer to file 2 path (char *).
519 * arguments passed to KAUTH_FILEOP_EXEC listeners
520 *		arg0 is pointer to vnode (vnode *) for executable.
521 *		arg1 is pointer to path (char *) to executable.
522 */
523
524int
525kauth_authorize_fileop_has_listeners(void)
526{
527	/*
528	 * return 1 if we have any listeners for the fileop scope
529	 * otherwize return 0
530	 */
531	if ((kauth_scope_fileop->ks_flags & KS_F_HAS_LISTENERS) != 0) {
532		return(1);
533	}
534	return (0);
535}
536
537int
538kauth_authorize_fileop(kauth_cred_t credential, kauth_action_t action, uintptr_t arg0, uintptr_t arg1)
539{
540	char 		*namep = NULL;
541	int			name_len;
542	uintptr_t	arg2 = 0;
543
544	/* we do not have a primary handler for the fileop scope so bail out if
545	 * there are no listeners.
546	 */
547	if ((kauth_scope_fileop->ks_flags & KS_F_HAS_LISTENERS) == 0) {
548		return(0);
549	}
550
551	if (action == KAUTH_FILEOP_OPEN || action == KAUTH_FILEOP_CLOSE || action == KAUTH_FILEOP_EXEC) {
552		/* get path to the given vnode as a convenience to our listeners.
553		 */
554		namep = get_pathbuff();
555		name_len = MAXPATHLEN;
556		if (vn_getpath((vnode_t)arg0, namep, &name_len) != 0) {
557			release_pathbuff(namep);
558			return(0);
559		}
560		if (action == KAUTH_FILEOP_CLOSE) {
561			arg2 = arg1;  /* close has some flags that come in via arg1 */
562		}
563		arg1 = (uintptr_t)namep;
564	}
565	kauth_authorize_action(kauth_scope_fileop, credential, action, arg0, arg1, arg2, 0);
566
567	if (namep != NULL) {
568		release_pathbuff(namep);
569	}
570
571	return(0);
572}
573
574/*
575 * Generic authorization scope.
576 */
577
578int
579kauth_authorize_generic(kauth_cred_t credential, kauth_action_t action)
580{
581	if (credential == NULL)
582		panic("auth against NULL credential");
583
584	return(kauth_authorize_action(kauth_scope_generic, credential, action, 0, 0, 0, 0));
585
586}
587
588static int
589kauth_authorize_generic_callback(kauth_cred_t credential, __unused void *idata, kauth_action_t action,
590     __unused uintptr_t arg0, __unused uintptr_t arg1, __unused uintptr_t arg2, __unused uintptr_t arg3)
591{
592	switch(action) {
593	case KAUTH_GENERIC_ISSUSER:
594		/* XXX == 0 ? */
595		return((kauth_cred_getuid(credential) == 0) ?
596		    KAUTH_RESULT_ALLOW : KAUTH_RESULT_DENY);
597		break;
598	}
599
600	/* no explicit result, so defer to others in the chain */
601	return(KAUTH_RESULT_DEFER);
602}
603
604/*
605 * ACL evaluator.
606 *
607 * Determines whether the credential has the requested rights for an object secured by the supplied
608 * ACL.
609 *
610 * Evaluation proceeds from the top down, with access denied if any ACE denies any of the requested
611 * rights, or granted if all of the requested rights are satisfied by the ACEs so far.
612 */
613int
614kauth_acl_evaluate(kauth_cred_t cred, kauth_acl_eval_t eval)
615{
616	int applies, error, i;
617	kauth_ace_t ace;
618	guid_t guid;
619	uint32_t rights;
620	int wkguid;
621
622	/* always allowed to do nothing */
623	if (eval->ae_requested == 0) {
624		eval->ae_result = KAUTH_RESULT_ALLOW;
625		return(0);
626	}
627
628	eval->ae_residual = eval->ae_requested;
629	eval->ae_found_deny = FALSE;
630
631	/*
632	 * Get our guid for comparison purposes.
633	 */
634	if ((error = kauth_cred_getguid(cred, &guid)) != 0) {
635		eval->ae_result = KAUTH_RESULT_DENY;
636		KAUTH_DEBUG("    ACL - can't get credential GUID (%d), ACL denied", error);
637		return(error);
638	}
639
640	KAUTH_DEBUG("    ACL - %d entries, initial residual %x", eval->ae_count, eval->ae_residual);
641	for (i = 0, ace = eval->ae_acl; i < eval->ae_count; i++, ace++) {
642
643		/*
644		 * Skip inherit-only entries.
645		 */
646		if (ace->ace_flags & KAUTH_ACE_ONLY_INHERIT)
647			continue;
648
649		/*
650		 * Expand generic rights, if appropriate.
651		 */
652		rights = ace->ace_rights;
653		if (rights & KAUTH_ACE_GENERIC_ALL)
654			rights |= eval->ae_exp_gall;
655		if (rights & KAUTH_ACE_GENERIC_READ)
656			rights |= eval->ae_exp_gread;
657		if (rights & KAUTH_ACE_GENERIC_WRITE)
658			rights |= eval->ae_exp_gwrite;
659		if (rights & KAUTH_ACE_GENERIC_EXECUTE)
660			rights |= eval->ae_exp_gexec;
661
662		/*
663		 * Determine whether this entry applies to the current request.  This
664		 * saves us checking the GUID if the entry has nothing to do with what
665		 * we're currently doing.
666		 */
667		switch(ace->ace_flags & KAUTH_ACE_KINDMASK) {
668		case KAUTH_ACE_PERMIT:
669			if (!(eval->ae_residual & rights))
670				continue;
671			break;
672		case KAUTH_ACE_DENY:
673			if (!(eval->ae_requested & rights))
674				continue;
675			eval->ae_found_deny = TRUE;
676			break;
677		default:
678			/* we don't recognise this ACE, skip it */
679			continue;
680		}
681
682		/*
683		 * Verify whether this entry applies to the credential.
684		 */
685		wkguid = kauth_wellknown_guid(&ace->ace_applicable);
686		switch(wkguid) {
687		case KAUTH_WKG_OWNER:
688			applies = eval->ae_options & KAUTH_AEVAL_IS_OWNER;
689			break;
690		case KAUTH_WKG_GROUP:
691			applies = eval->ae_options & KAUTH_AEVAL_IN_GROUP;
692			break;
693		/* we short-circuit these here rather than wasting time calling the group membership code */
694		case KAUTH_WKG_EVERYBODY:
695			applies = 1;
696			break;
697		case KAUTH_WKG_NOBODY:
698			applies = 0;
699			break;
700
701		default:
702			/* check to see whether it's exactly us, or a group we are a member of */
703			applies = kauth_guid_equal(&guid, &ace->ace_applicable);
704			KAUTH_DEBUG("    ACL - ACE applicable " K_UUID_FMT " caller " K_UUID_FMT " %smatched",
705			    K_UUID_ARG(ace->ace_applicable), K_UUID_ARG(guid), applies ? "" : "not ");
706
707			if (!applies) {
708				error = kauth_cred_ismember_guid(cred, &ace->ace_applicable, &applies);
709				/*
710				 * If we can't resolve group membership, we have to limit misbehaviour.
711				 * If the ACE is an 'allow' ACE, assume the cred is not a member (avoid
712				 * granting excess access).  If the ACE is a 'deny' ACE, assume the cred
713				 * is a member (avoid failing to deny).
714				 */
715				if (error != 0) {
716					KAUTH_DEBUG("    ACL[%d] - can't get membership, making pessimistic assumption", i);
717					switch(ace->ace_flags & KAUTH_ACE_KINDMASK) {
718					case KAUTH_ACE_PERMIT:
719						applies = 0;
720						break;
721					case KAUTH_ACE_DENY:
722						applies = 1;
723						break;
724					}
725				} else {
726					KAUTH_DEBUG("    ACL - %s group member", applies ? "is" : "not");
727				}
728			} else {
729				KAUTH_DEBUG("    ACL - entry matches caller");
730			}
731		}
732		if (!applies)
733			continue;
734
735		/*
736		 * Apply ACE to outstanding rights.
737		 */
738		switch(ace->ace_flags & KAUTH_ACE_KINDMASK) {
739		case KAUTH_ACE_PERMIT:
740			/* satisfy any rights that this ACE grants */
741			eval->ae_residual = eval->ae_residual & ~rights;
742			KAUTH_DEBUG("    ACL[%d] - rights %x leave residual %x", i, rights, eval->ae_residual);
743			/* all rights satisfied? */
744			if (eval->ae_residual == 0) {
745				eval->ae_result = KAUTH_RESULT_ALLOW;
746				return(0);
747			}
748			break;
749		case KAUTH_ACE_DENY:
750			/* deny the request if any of the requested rights is denied */
751			if (eval->ae_requested & rights) {
752				KAUTH_DEBUG("    ACL[%d] - denying based on %x", i, rights);
753				eval->ae_result = KAUTH_RESULT_DENY;
754				return(0);
755			}
756			break;
757		default:
758			KAUTH_DEBUG("    ACL - unknown entry kind %d", ace->ace_flags & KAUTH_ACE_KINDMASK);
759			break;
760		}
761	}
762	/* if not permitted, defer to other modes of authorisation */
763	eval->ae_result = KAUTH_RESULT_DEFER;
764	return(0);
765}
766
767/*
768 * Perform ACL inheritance and umask-ACL handling.
769 *
770 * Entries are inherited from the ACL on dvp.  A caller-supplied
771 * ACL is in initial, and the result is output into product.
772 * If the process has a umask ACL and one is not supplied, we use
773 * the umask ACL.
774 * If isdir is set, the resultant ACL is for a directory, otherwise it is for a file.
775 */
776int
777kauth_acl_inherit(vnode_t dvp, kauth_acl_t initial, kauth_acl_t *product, int isdir, vfs_context_t ctx)
778{
779	int	entries, error, index;
780	unsigned int i;
781	struct vnode_attr dva;
782	kauth_acl_t inherit, result;
783
784	/*
785	 * Fetch the ACL from the directory.  This should never fail.
786	 * Note that we don't manage inheritance when the remote server is
787	 * doing authorization, since this means server enforcement of
788	 * inheritance semantics; we just want to compose the initial
789	 * ACL and any inherited ACE entries from the container object.
790	 *
791	 * XXX TODO: <rdar://3634665> wants a "umask ACL" from the process.
792	 */
793	inherit = NULL;
794	if ((dvp != NULL) && !vfs_authopaque(vnode_mount(dvp))) {
795		VATTR_INIT(&dva);
796		VATTR_WANTED(&dva, va_acl);
797		if ((error = vnode_getattr(dvp, &dva, ctx)) != 0) {
798			KAUTH_DEBUG("    ERROR - could not get parent directory ACL for inheritance");
799			return(error);
800		}
801		if (VATTR_IS_SUPPORTED(&dva, va_acl))
802			inherit = dva.va_acl;
803	}
804
805	/*
806	 * Compute the number of entries in the result ACL by scanning the
807	 * input lists.
808	 */
809	entries = 0;
810	if (inherit != NULL) {
811		for (i = 0; i < inherit->acl_entrycount; i++) {
812			if (inherit->acl_ace[i].ace_flags & (isdir ? KAUTH_ACE_DIRECTORY_INHERIT : KAUTH_ACE_FILE_INHERIT))
813				entries++;
814		}
815	}
816
817	if (initial == NULL) {
818		/*
819		 * XXX 3634665 TODO: if the initial ACL is not specfied by
820		 * XXX the caller, fetch the umask ACL from the process,
821		 * and use it in place of "initial".
822		 */
823	}
824
825	if (initial != NULL) {
826		if (initial->acl_entrycount != KAUTH_FILESEC_NOACL)
827			entries += initial->acl_entrycount;
828		else
829			initial = NULL;
830	}
831
832	/*
833	 * If there is no initial ACL, and no inheritable entries, the
834	 * object should be created with no ACL at all.
835	 * Note that this differs from the case where the initial ACL
836	 * is empty, in which case the object must also have an empty ACL.
837	 */
838	if ((entries == 0) && (initial == NULL)) {
839		*product = NULL;
840		error = 0;
841		goto out;
842	}
843
844	/*
845	 * Allocate the result buffer.
846	 */
847	if ((result = kauth_acl_alloc(entries)) == NULL) {
848		KAUTH_DEBUG("    ERROR - could not allocate %d-entry result buffer for inherited ACL", entries);
849		error = ENOMEM;
850		goto out;
851	}
852
853	/*
854	 * Composition is simply:
855	 *  - initial
856	 *  - inherited
857	 */
858	index = 0;
859	if (initial != NULL) {
860		for (i = 0; i < initial->acl_entrycount; i++)
861			result->acl_ace[index++] = initial->acl_ace[i];
862		KAUTH_DEBUG("    INHERIT - applied %d initial entries", index);
863	}
864	if (inherit != NULL) {
865		for (i = 0; i < inherit->acl_entrycount; i++) {
866			/*
867			 * Inherit onto this object?  We inherit only if
868			 * the target object is a container object and the
869			 * KAUTH_ACE_DIRECTORY_INHERIT bit is set, OR if
870			 * if the target object is not a container, and
871			 * the KAUTH_ACE_FILE_INHERIT bit is set.
872			 */
873			if (inherit->acl_ace[i].ace_flags & (isdir ? KAUTH_ACE_DIRECTORY_INHERIT : KAUTH_ACE_FILE_INHERIT)) {
874				result->acl_ace[index] = inherit->acl_ace[i];
875				result->acl_ace[index].ace_flags |= KAUTH_ACE_INHERITED;
876				/*
877				 * We do not re-inherit inheritance flags
878				 * if the ACE from the container has a
879				 * KAUTH_ACE_LIMIT_INHERIT, OR if the new
880				 * object is not itself a container (since
881				 * inheritance is always container-based).
882				 */
883				if ((result->acl_ace[index].ace_flags & KAUTH_ACE_LIMIT_INHERIT) || !isdir) {
884					result->acl_ace[index].ace_flags &=
885					    ~(KAUTH_ACE_INHERIT_CONTROL_FLAGS);
886				}
887				index++;
888			}
889		}
890	}
891	result->acl_entrycount = index;
892	*product = result;
893	KAUTH_DEBUG("    INHERIT - product ACL has %d entries", index);
894	error = 0;
895out:
896	if (inherit != NULL)
897		kauth_acl_free(inherit);
898	return(error);
899}
900
901/*
902 * Optimistically copy in a kauth_filesec structure
903 *
904 * Parameters:	xsecurity		user space kauth_filesec_t
905 *		xsecdstpp		pointer to kauth_filesec_t to be
906 *					modified to contain the contain a
907 *					pointer to an allocated copy of the
908 *					user space argument
909 *
910 * Returns:	0			Success
911 *		ENOMEM			Insufficient memory for the copy.
912 *		EINVAL			The user space data was invalid, or
913 *					there were too many ACE entries.
914 *		EFAULT			The user space address was invalid;
915 *					this may mean 'fsec_entrycount' in
916 *					the user copy is corrupt/incorrect.
917 *
918 * Implicit returns: xsecdestpp, modified (only if successful!)
919 *
920 * Notes:	The returned kauth_filesec_t is in host byte order
921 *
922 *		The caller is responsible for freeing the returned
923 *		kauth_filesec_t in the success case using the function
924 *		kauth_filesec_free()
925 *
926 *		Our largest initial guess is 32; this needs to move to
927 *		a manifest constant in <sys/kauth.h>.
928 */
929int
930kauth_copyinfilesec(user_addr_t xsecurity, kauth_filesec_t *xsecdestpp)
931{
932	user_addr_t uaddr, known_bound;
933	int error;
934	kauth_filesec_t fsec;
935	u_int32_t count;
936	size_t copysize;
937
938	error = 0;
939	fsec = NULL;
940
941	/*
942	 * Make a guess at the size of the filesec.  We start with the base
943	 * pointer, and look at how much room is left on the page, clipped
944	 * to a sensible upper bound.  If it turns out this isn't enough,
945	 * we'll size based on the actual ACL contents and come back again.
946	 *
947	 * The upper bound must be less than KAUTH_ACL_MAX_ENTRIES.  The
948	 * value here is fairly arbitrary.  It's ok to have a zero count.
949	 */
950	known_bound = xsecurity + sizeof(struct kauth_filesec);
951	uaddr = mach_vm_round_page(known_bound);
952	count = (uaddr - known_bound) / sizeof(struct kauth_ace);
953	if (count > 32)
954		count = 32;
955restart:
956	if ((fsec = kauth_filesec_alloc(count)) == NULL) {
957		error = ENOMEM;
958		goto out;
959	}
960	copysize = KAUTH_FILESEC_SIZE(count);
961	if ((error = copyin(xsecurity, (caddr_t)fsec, copysize)) != 0)
962		goto out;
963
964	/* validate the filesec header */
965	if (fsec->fsec_magic != KAUTH_FILESEC_MAGIC) {
966		error = EINVAL;
967		goto out;
968	}
969
970	/*
971	 * Is there an ACL payload, and is it too big?
972	 */
973	if ((fsec->fsec_entrycount != KAUTH_FILESEC_NOACL) &&
974	    (fsec->fsec_entrycount > count)) {
975		if (fsec->fsec_entrycount > KAUTH_ACL_MAX_ENTRIES) {
976			/* XXX This should be E2BIG */
977			error = EINVAL;
978			goto out;
979		}
980		count = fsec->fsec_entrycount;
981		kauth_filesec_free(fsec);
982		goto restart;
983	}
984
985out:
986	if (error) {
987		if (fsec)
988			kauth_filesec_free(fsec);
989	} else {
990		*xsecdestpp = fsec;
991	}
992	return(error);
993}
994
995/*
996 * Allocate a block of memory containing a filesec structure, immediately
997 * followed by 'count' kauth_ace structures.
998 *
999 * Parameters:	count			Number of kauth_ace structures needed
1000 *
1001 * Returns:	!NULL			A pointer to the allocated block
1002 *		NULL			Invalid 'count' or insufficient memory
1003 *
1004 * Notes:	Returned memory area assumes that the structures are packed
1005 *		densely, so this function may only be used by code that also
1006 *		assumes no padding following structures.
1007 *
1008 *		The returned structure must be freed by the caller using the
1009 *		function kauth_filesec_free(), in case we decide to use an
1010 *		allocation mechanism that is aware of the object size at some
1011 *		point, since the object size is only available by introspecting
1012 *		the object itself.
1013 */
1014kauth_filesec_t
1015kauth_filesec_alloc(int count)
1016{
1017	kauth_filesec_t	fsp;
1018
1019	/* if the caller hasn't given us a valid size hint, assume the worst */
1020	if ((count < 0) || (count > KAUTH_ACL_MAX_ENTRIES))
1021		return(NULL);
1022
1023	MALLOC(fsp, kauth_filesec_t, KAUTH_FILESEC_SIZE(count), M_KAUTH, M_WAITOK);
1024	if (fsp != NULL) {
1025		fsp->fsec_magic = KAUTH_FILESEC_MAGIC;
1026		fsp->fsec_owner = kauth_null_guid;
1027		fsp->fsec_group = kauth_null_guid;
1028		fsp->fsec_entrycount = KAUTH_FILESEC_NOACL;
1029		fsp->fsec_flags = 0;
1030	}
1031	return(fsp);
1032}
1033
1034/*
1035 * Free a kauth_filesec_t that was previous allocated, either by a direct
1036 * call to kauth_filesec_alloc() or by calling a function that calls it.
1037 *
1038 * Parameters:	fsp			kauth_filesec_t to free
1039 *
1040 * Returns:	(void)
1041 *
1042 * Notes:	The kauth_filesec_t to be freed is assumed to be in host
1043 *		byte order so that this function can introspect it in the
1044 *		future to determine its size, if necesssary.
1045 */
1046void
1047kauth_filesec_free(kauth_filesec_t fsp)
1048{
1049#ifdef KAUTH_DEBUG_ENABLE
1050	if (fsp == KAUTH_FILESEC_NONE)
1051		panic("freeing KAUTH_FILESEC_NONE");
1052	if (fsp == KAUTH_FILESEC_WANTED)
1053		panic("freeing KAUTH_FILESEC_WANTED");
1054#endif
1055	FREE(fsp, M_KAUTH);
1056}
1057
1058/*
1059 * Set the endianness of a filesec and an ACL; if 'acl' is NULL, use the
1060 * ACL interior to 'fsec' instead.  If the endianness doesn't change, then
1061 * this function will have no effect.
1062 *
1063 * Parameters:	kendian			The endianness to set; this is either
1064 *					KAUTH_ENDIAN_HOST or KAUTH_ENDIAN_DISK.
1065 *		fsec			The filesec to convert.
1066 *		acl			The ACL to convert (optional)
1067 *
1068 * Returns:	(void)
1069 *
1070 * Notes:	We use ntohl() because it has a transitive property on Intel
1071 *		machines and no effect on PPC mancines.  This guarantees us
1072 *		that the swapping only occurs if the endiannes is wrong.
1073 */
1074void
1075kauth_filesec_acl_setendian(int kendian, kauth_filesec_t fsec, kauth_acl_t acl)
1076{
1077 	uint32_t	compare_magic = KAUTH_FILESEC_MAGIC;
1078	uint32_t	invert_magic = ntohl(KAUTH_FILESEC_MAGIC);
1079	uint32_t	compare_acl_entrycount;
1080	uint32_t	i;
1081
1082	if (compare_magic == invert_magic)
1083		return;
1084
1085	/* If no ACL, use ACL interior to 'fsec' instead */
1086	if (acl == NULL)
1087		acl = &fsec->fsec_acl;
1088
1089	compare_acl_entrycount = acl->acl_entrycount;
1090
1091	/*
1092	 * Only convert what needs to be converted, and only if the arguments
1093	 * are valid.  The following switch and tests effectively reject
1094	 * conversions on invalid magic numbers as a desirable side effect.
1095	 */
1096 	switch(kendian) {
1097	case KAUTH_ENDIAN_HOST:		/* not in host, convert to host */
1098		if (fsec->fsec_magic != invert_magic)
1099			return;
1100		/* acl_entrycount is byteswapped */
1101		compare_acl_entrycount = ntohl(acl->acl_entrycount);
1102		break;
1103	case KAUTH_ENDIAN_DISK:		/* not in disk, convert to disk */
1104		if (fsec->fsec_magic != compare_magic)
1105			return;
1106		break;
1107	default:			/* bad argument */
1108		return;
1109	}
1110
1111	/* We are go for conversion */
1112	fsec->fsec_magic = ntohl(fsec->fsec_magic);
1113	acl->acl_entrycount = ntohl(acl->acl_entrycount);
1114	if (compare_acl_entrycount != KAUTH_FILESEC_NOACL) {
1115		acl->acl_flags = ntohl(acl->acl_flags);
1116
1117		/* swap ACE rights and flags */
1118		for (i = 0; i < compare_acl_entrycount; i++) {
1119			acl->acl_ace[i].ace_flags = ntohl(acl->acl_ace[i].ace_flags);
1120			acl->acl_ace[i].ace_rights = ntohl(acl->acl_ace[i].ace_rights);
1121		}
1122	}
1123 }
1124
1125
1126/*
1127 * Allocate an ACL buffer.
1128 */
1129kauth_acl_t
1130kauth_acl_alloc(int count)
1131{
1132	kauth_acl_t	aclp;
1133
1134	/* if the caller hasn't given us a valid size hint, assume the worst */
1135	if ((count < 0) || (count > KAUTH_ACL_MAX_ENTRIES))
1136		return(NULL);
1137
1138	MALLOC(aclp, kauth_acl_t, KAUTH_ACL_SIZE(count), M_KAUTH, M_WAITOK);
1139	if (aclp != NULL) {
1140		aclp->acl_entrycount = 0;
1141		aclp->acl_flags = 0;
1142	}
1143	return(aclp);
1144}
1145
1146void
1147kauth_acl_free(kauth_acl_t aclp)
1148{
1149	FREE(aclp, M_KAUTH);
1150}
1151
1152
1153/*
1154 * WARNING - caller must hold KAUTH_SCOPELOCK
1155 */
1156static int kauth_add_callback_to_scope(kauth_scope_t sp, kauth_listener_t klp)
1157{
1158	int		i;
1159
1160	for (i = 0; i < KAUTH_SCOPE_MAX_LISTENERS; i++) {
1161		if (sp->ks_listeners[i].kll_listenerp == NULL) {
1162			sp->ks_listeners[i].kll_callback = klp->kl_callback;
1163			sp->ks_listeners[i].kll_idata = klp->kl_idata;
1164			sp->ks_listeners[i].kll_listenerp = klp;
1165			sp->ks_flags |= KS_F_HAS_LISTENERS;
1166			return(0);
1167		}
1168	}
1169	return(ENOSPC);
1170}
1171