Deleted Added
full compact
subr_acl_posix1e.c (107839) subr_acl_posix1e.c (107849)
1/*-
2 * Copyright (c) 1999-2001 Robert N. M. Watson
3 * All rights reserved.
4 *
5 * This software was developed by Robert Watson for the TrustedBSD Project.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
1/*-
2 * Copyright (c) 1999-2001 Robert N. M. Watson
3 * All rights reserved.
4 *
5 * This software was developed by Robert Watson for the TrustedBSD Project.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD: head/sys/kern/subr_acl_posix1e.c 107839 2002-12-13 22:41:47Z alfred $
28 * $FreeBSD: head/sys/kern/subr_acl_posix1e.c 107849 2002-12-14 01:56:26Z alfred $
29 */
30/*
31 * Developed by the TrustedBSD Project.
32 * Support for POSIX.1e access control lists.
33 */
34
35#include "opt_mac.h"
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/sysproto.h>
40#include <sys/kernel.h>
41#include <sys/mac.h>
42#include <sys/malloc.h>
43#include <sys/vnode.h>
44#include <sys/lock.h>
45#include <sys/mutex.h>
46#include <sys/namei.h>
47#include <sys/file.h>
48#include <sys/proc.h>
49#include <sys/sysent.h>
50#include <sys/errno.h>
51#include <sys/stat.h>
52#include <sys/acl.h>
53
54MALLOC_DEFINE(M_ACL, "acl", "access control list");
55
56static int vacl_set_acl(struct thread *td, struct vnode *vp,
57 acl_type_t type, struct acl *aclp);
58static int vacl_get_acl(struct thread *td, struct vnode *vp,
59 acl_type_t type, struct acl *aclp);
60static int vacl_aclcheck(struct thread *td, struct vnode *vp,
61 acl_type_t type, struct acl *aclp);
62
63/*
64 * Implement a version of vaccess() that understands POSIX.1e ACL semantics.
65 * Return 0 on success, else an errno value. Should be merged into
66 * vaccess() eventually.
67 */
68int
69vaccess_acl_posix1e(enum vtype type, uid_t file_uid, gid_t file_gid,
70 struct acl *acl, mode_t acc_mode, struct ucred *cred, int *privused)
71{
72 struct acl_entry *acl_other, *acl_mask;
73 mode_t dac_granted;
74 mode_t cap_granted;
75 mode_t acl_mask_granted;
76 int group_matched, i;
77
78 /*
79 * Look for a normal, non-privileged way to access the file/directory
80 * as requested. If it exists, go with that. Otherwise, attempt
81 * to use privileges granted via cap_granted. In some cases,
82 * which privileges to use may be ambiguous due to "best match",
83 * in which case fall back on first match for the time being.
84 */
85 if (privused != NULL)
86 *privused = 0;
87
88 /*
89 * Determine privileges now, but don't apply until we've found
90 * a DAC entry that matches but has failed to allow access.
91 */
92#ifndef CAPABILITIES
93 if (suser_cred(cred, PRISON_ROOT) == 0)
94 cap_granted = VALLPERM;
95 else
96 cap_granted = 0;
97#else
98 cap_granted = 0;
99
100 if (type == VDIR) {
101 if ((acc_mode & VEXEC) && !cap_check(cred, NULL,
102 CAP_DAC_READ_SEARCH, PRISON_ROOT))
103 cap_granted |= VEXEC;
104 } else {
105 if ((acc_mode & VEXEC) && !cap_check(cred, NULL,
106 CAP_DAC_EXECUTE, PRISON_ROOT))
107 cap_granted |= VEXEC;
108 }
109
110 if ((acc_mode & VREAD) && !cap_check(cred, NULL, CAP_DAC_READ_SEARCH,
111 PRISON_ROOT))
112 cap_granted |= VREAD;
113
114 if (((acc_mode & VWRITE) || (acc_mode & VAPPEND)) &&
115 !cap_check(cred, NULL, CAP_DAC_WRITE, PRISON_ROOT))
116 cap_granted |= (VWRITE | VAPPEND);
117
118 if ((acc_mode & VADMIN) && !cap_check(cred, NULL, CAP_FOWNER,
119 PRISON_ROOT))
120 cap_granted |= VADMIN;
121#endif /* CAPABILITIES */
122
123 /*
124 * The owner matches if the effective uid associated with the
125 * credential matches that of the ACL_USER_OBJ entry. While we're
126 * doing the first scan, also cache the location of the ACL_MASK
127 * and ACL_OTHER entries, preventing some future iterations.
128 */
129 acl_mask = acl_other = NULL;
130 for (i = 0; i < acl->acl_cnt; i++) {
131 switch (acl->acl_entry[i].ae_tag) {
132 case ACL_USER_OBJ:
133 if (file_uid != cred->cr_uid)
134 break;
135 dac_granted = 0;
136 dac_granted |= VADMIN;
137 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
138 dac_granted |= VEXEC;
139 if (acl->acl_entry[i].ae_perm & ACL_READ)
140 dac_granted |= VREAD;
141 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
142 dac_granted |= (VWRITE | VAPPEND);
143 if ((acc_mode & dac_granted) == acc_mode)
144 return (0);
145 if ((acc_mode & (dac_granted | cap_granted)) ==
146 acc_mode) {
147 if (privused != NULL)
148 *privused = 1;
149 return (0);
150 }
151 goto error;
152
153 case ACL_MASK:
154 acl_mask = &acl->acl_entry[i];
155 break;
156
157 case ACL_OTHER:
158 acl_other = &acl->acl_entry[i];
159 break;
160
161 default:
162 break;
163 }
164 }
165
166 /*
167 * An ACL_OTHER entry should always exist in a valid access
168 * ACL. If it doesn't, then generate a serious failure. For now,
169 * this means a debugging message and EPERM, but in the future
170 * should probably be a panic.
171 */
172 if (acl_other == NULL) {
173 /*
174 * XXX This should never happen
175 */
176 printf("vaccess_acl_posix1e: ACL_OTHER missing\n");
177 return (EPERM);
178 }
179
180 /*
181 * Checks against ACL_USER, ACL_GROUP_OBJ, and ACL_GROUP fields
182 * are masked by an ACL_MASK entry, if any. As such, first identify
183 * the ACL_MASK field, then iterate through identifying potential
184 * user matches, then group matches. If there is no ACL_MASK,
185 * assume that the mask allows all requests to succeed.
186 */
187 if (acl_mask != NULL) {
188 acl_mask_granted = 0;
189 if (acl_mask->ae_perm & ACL_EXECUTE)
190 acl_mask_granted |= VEXEC;
191 if (acl_mask->ae_perm & ACL_READ)
192 acl_mask_granted |= VREAD;
193 if (acl_mask->ae_perm & ACL_WRITE)
194 acl_mask_granted |= (VWRITE | VAPPEND);
195 } else
196 acl_mask_granted = VEXEC | VREAD | VWRITE | VAPPEND;
197
198 /*
199 * Iterate through user ACL entries. Do checks twice, first
200 * without privilege, and then if a match is found but failed,
201 * a second time with privilege.
202 */
203
204 /*
205 * Check ACL_USER ACL entries.
206 */
207 for (i = 0; i < acl->acl_cnt; i++) {
208 switch (acl->acl_entry[i].ae_tag) {
209 case ACL_USER:
210 if (acl->acl_entry[i].ae_id != cred->cr_uid)
211 break;
212 dac_granted = 0;
213 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
214 dac_granted |= VEXEC;
215 if (acl->acl_entry[i].ae_perm & ACL_READ)
216 dac_granted |= VREAD;
217 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
218 dac_granted |= (VWRITE | VAPPEND);
219 dac_granted &= acl_mask_granted;
220 if ((acc_mode & dac_granted) == acc_mode)
221 return (0);
222 if ((acc_mode & (dac_granted | cap_granted)) !=
223 acc_mode)
224 goto error;
225
226 if (privused != NULL)
227 *privused = 1;
228 return (0);
229 }
230 }
231
232 /*
233 * Group match is best-match, not first-match, so find a
234 * "best" match. Iterate across, testing each potential group
235 * match. Make sure we keep track of whether we found a match
236 * or not, so that we know if we should try again with any
237 * available privilege, or if we should move on to ACL_OTHER.
238 */
239 group_matched = 0;
240 for (i = 0; i < acl->acl_cnt; i++) {
241 switch (acl->acl_entry[i].ae_tag) {
242 case ACL_GROUP_OBJ:
243 if (!groupmember(file_gid, cred))
244 break;
245 dac_granted = 0;
246 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
247 dac_granted |= VEXEC;
248 if (acl->acl_entry[i].ae_perm & ACL_READ)
249 dac_granted |= VREAD;
250 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
251 dac_granted |= (VWRITE | VAPPEND);
252 dac_granted &= acl_mask_granted;
253
254 if ((acc_mode & dac_granted) == acc_mode)
255 return (0);
256
257 group_matched = 1;
258 break;
259
260 case ACL_GROUP:
261 if (!groupmember(acl->acl_entry[i].ae_id, cred))
262 break;
263 dac_granted = 0;
264 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
265 dac_granted |= VEXEC;
266 if (acl->acl_entry[i].ae_perm & ACL_READ)
267 dac_granted |= VREAD;
268 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
269 dac_granted |= (VWRITE | VAPPEND);
270 dac_granted &= acl_mask_granted;
271
272 if ((acc_mode & dac_granted) == acc_mode)
273 return (0);
274
275 group_matched = 1;
276 break;
277
278 default:
279 break;
280 }
281 }
282
283 if (group_matched == 1) {
284 /*
285 * There was a match, but it did not grant rights via
286 * pure DAC. Try again, this time with privilege.
287 */
288 for (i = 0; i < acl->acl_cnt; i++) {
289 switch (acl->acl_entry[i].ae_tag) {
290 case ACL_GROUP_OBJ:
291 if (!groupmember(file_gid, cred))
292 break;
293 dac_granted = 0;
294 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
295 dac_granted |= VEXEC;
296 if (acl->acl_entry[i].ae_perm & ACL_READ)
297 dac_granted |= VREAD;
298 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
299 dac_granted |= (VWRITE | VAPPEND);
300 dac_granted &= acl_mask_granted;
301
302 if ((acc_mode & (dac_granted | cap_granted)) !=
303 acc_mode)
304 break;
305
306 if (privused != NULL)
307 *privused = 1;
308 return (0);
309
310 case ACL_GROUP:
311 if (!groupmember(acl->acl_entry[i].ae_id,
312 cred))
313 break;
314 dac_granted = 0;
315 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
316 dac_granted |= VEXEC;
317 if (acl->acl_entry[i].ae_perm & ACL_READ)
318 dac_granted |= VREAD;
319 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
320 dac_granted |= (VWRITE | VAPPEND);
321 dac_granted &= acl_mask_granted;
322
323 if ((acc_mode & (dac_granted | cap_granted)) !=
324 acc_mode)
325 break;
326
327 if (privused != NULL)
328 *privused = 1;
329 return (0);
330
331 default:
332 break;
333 }
334 }
335 /*
336 * Even with privilege, group membership was not sufficient.
337 * Return failure.
338 */
339 goto error;
340 }
341
342 /*
343 * Fall back on ACL_OTHER. ACL_MASK is not applied to ACL_OTHER.
344 */
345 dac_granted = 0;
346 if (acl_other->ae_perm & ACL_EXECUTE)
347 dac_granted |= VEXEC;
348 if (acl_other->ae_perm & ACL_READ)
349 dac_granted |= VREAD;
350 if (acl_other->ae_perm & ACL_WRITE)
351 dac_granted |= (VWRITE | VAPPEND);
352
353 if ((acc_mode & dac_granted) == acc_mode)
354 return (0);
355 if ((acc_mode & (dac_granted | cap_granted)) == acc_mode) {
356 if (privused != NULL)
357 *privused = 1;
358 return (0);
359 }
360
361error:
362 return ((acc_mode & VADMIN) ? EPERM : EACCES);
363}
364
365/*
366 * For the purposes of filesystems maintaining the _OBJ entries in an
367 * inode with a mode_t field, this routine converts a mode_t entry
368 * to an acl_perm_t.
369 */
370acl_perm_t
371acl_posix1e_mode_to_perm(acl_tag_t tag, mode_t mode)
372{
373 acl_perm_t perm = 0;
374
375 switch(tag) {
376 case ACL_USER_OBJ:
377 if (mode & S_IXUSR)
378 perm |= ACL_EXECUTE;
379 if (mode & S_IRUSR)
380 perm |= ACL_READ;
381 if (mode & S_IWUSR)
382 perm |= ACL_WRITE;
383 return (perm);
384
385 case ACL_GROUP_OBJ:
386 if (mode & S_IXGRP)
387 perm |= ACL_EXECUTE;
388 if (mode & S_IRGRP)
389 perm |= ACL_READ;
390 if (mode & S_IWGRP)
391 perm |= ACL_WRITE;
392 return (perm);
393
394 case ACL_OTHER:
395 if (mode & S_IXOTH)
396 perm |= ACL_EXECUTE;
397 if (mode & S_IROTH)
398 perm |= ACL_READ;
399 if (mode & S_IWOTH)
400 perm |= ACL_WRITE;
401 return (perm);
402
403 default:
404 printf("acl_posix1e_mode_to_perm: invalid tag (%d)\n", tag);
405 return (0);
406 }
407}
408
409/*
410 * Given inode information (uid, gid, mode), return an acl entry of the
411 * appropriate type.
412 */
413struct acl_entry
414acl_posix1e_mode_to_entry(acl_tag_t tag, uid_t uid, gid_t gid, mode_t mode)
415{
416 struct acl_entry acl_entry;
417
418 acl_entry.ae_tag = tag;
419 acl_entry.ae_perm = acl_posix1e_mode_to_perm(tag, mode);
420 switch(tag) {
421 case ACL_USER_OBJ:
422 acl_entry.ae_id = uid;
423 break;
424
425 case ACL_GROUP_OBJ:
426 acl_entry.ae_id = gid;
427 break;
428
429 case ACL_OTHER:
430 acl_entry.ae_id = ACL_UNDEFINED_ID;
431 break;
432
433 default:
434 acl_entry.ae_id = ACL_UNDEFINED_ID;
435 printf("acl_posix1e_mode_to_entry: invalid tag (%d)\n", tag);
436 }
437
438 return (acl_entry);
439}
440
441/*
442 * Utility function to generate a file mode given appropriate ACL entries.
443 */
444mode_t
445acl_posix1e_perms_to_mode(struct acl_entry *acl_user_obj_entry,
446 struct acl_entry *acl_group_obj_entry, struct acl_entry *acl_other_entry)
447{
448 mode_t mode;
449
450 mode = 0;
451 if (acl_user_obj_entry->ae_perm & ACL_EXECUTE)
452 mode |= S_IXUSR;
453 if (acl_user_obj_entry->ae_perm & ACL_READ)
454 mode |= S_IRUSR;
455 if (acl_user_obj_entry->ae_perm & ACL_WRITE)
456 mode |= S_IWUSR;
457 if (acl_group_obj_entry->ae_perm & ACL_EXECUTE)
458 mode |= S_IXGRP;
459 if (acl_group_obj_entry->ae_perm & ACL_READ)
460 mode |= S_IRGRP;
461 if (acl_group_obj_entry->ae_perm & ACL_WRITE)
462 mode |= S_IWGRP;
463 if (acl_other_entry->ae_perm & ACL_EXECUTE)
464 mode |= S_IXOTH;
465 if (acl_other_entry->ae_perm & ACL_READ)
466 mode |= S_IROTH;
467 if (acl_other_entry->ae_perm & ACL_WRITE)
468 mode |= S_IWOTH;
469
470 return (mode);
471}
472
473/*
474 * Perform a syntactic check of the ACL, sufficient to allow an
475 * implementing filesystem to determine if it should accept this and
476 * rely on the POSIX.1e ACL properties.
477 */
478int
479acl_posix1e_check(struct acl *acl)
480{
481 int num_acl_user_obj, num_acl_user, num_acl_group_obj, num_acl_group;
482 int num_acl_mask, num_acl_other, i;
483
484 /*
485 * Verify that the number of entries does not exceed the maximum
486 * defined for acl_t.
487 * Verify that the correct number of various sorts of ae_tags are
488 * present:
489 * Exactly one ACL_USER_OBJ
490 * Exactly one ACL_GROUP_OBJ
491 * Exactly one ACL_OTHER
492 * If any ACL_USER or ACL_GROUP entries appear, then exactly one
493 * ACL_MASK entry must also appear.
494 * Verify that all ae_perm entries are in ACL_PERM_BITS.
495 * Verify all ae_tag entries are understood by this implementation.
496 * Note: Does not check for uniqueness of qualifier (ae_id) field.
497 */
498 num_acl_user_obj = num_acl_user = num_acl_group_obj = num_acl_group =
499 num_acl_mask = num_acl_other = 0;
500 if (acl->acl_cnt > ACL_MAX_ENTRIES || acl->acl_cnt < 0)
501 return (EINVAL);
502 for (i = 0; i < acl->acl_cnt; i++) {
503 /*
504 * Check for a valid tag.
505 */
506 switch(acl->acl_entry[i].ae_tag) {
507 case ACL_USER_OBJ:
508 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */
509 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID)
510 return (EINVAL);
511 num_acl_user_obj++;
512 break;
513 case ACL_GROUP_OBJ:
514 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */
515 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID)
516 return (EINVAL);
517 num_acl_group_obj++;
518 break;
519 case ACL_USER:
520 if (acl->acl_entry[i].ae_id == ACL_UNDEFINED_ID)
521 return (EINVAL);
522 num_acl_user++;
523 break;
524 case ACL_GROUP:
525 if (acl->acl_entry[i].ae_id == ACL_UNDEFINED_ID)
526 return (EINVAL);
527 num_acl_group++;
528 break;
529 case ACL_OTHER:
530 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */
531 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID)
532 return (EINVAL);
533 num_acl_other++;
534 break;
535 case ACL_MASK:
536 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */
537 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID)
538 return (EINVAL);
539 num_acl_mask++;
540 break;
541 default:
542 return (EINVAL);
543 }
544 /*
545 * Check for valid perm entries.
546 */
547 if ((acl->acl_entry[i].ae_perm | ACL_PERM_BITS) !=
548 ACL_PERM_BITS)
549 return (EINVAL);
550 }
551 if ((num_acl_user_obj != 1) || (num_acl_group_obj != 1) ||
552 (num_acl_other != 1) || (num_acl_mask != 0 && num_acl_mask != 1))
553 return (EINVAL);
554 if (((num_acl_group != 0) || (num_acl_user != 0)) &&
555 (num_acl_mask != 1))
556 return (EINVAL);
557 return (0);
558}
559
560/*
561 * These calls wrap the real vnode operations, and are called by the
562 * syscall code once the syscall has converted the path or file
563 * descriptor to a vnode (unlocked). The aclp pointer is assumed
564 * still to point to userland, so this should not be consumed within
565 * the kernel except by syscall code. Other code should directly
566 * invoke VOP_{SET,GET}ACL.
567 */
568
569/*
570 * Given a vnode, set its ACL.
571 */
572static int
573vacl_set_acl(struct thread *td, struct vnode *vp, acl_type_t type,
574 struct acl *aclp)
575{
576 struct acl inkernacl;
577 struct mount *mp;
578 int error;
579
580 error = copyin(aclp, &inkernacl, sizeof(struct acl));
581 if (error)
582 return(error);
583 error = vn_start_write(vp, &mp, V_WAIT | PCATCH);
584 if (error != 0)
585 return (error);
586 VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
587 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
588#ifdef MAC
589 error = mac_check_vnode_setacl(td->td_ucred, vp, type, &inkernacl);
590 if (error != 0)
591 goto out;
592#endif
593 error = VOP_SETACL(vp, type, &inkernacl, td->td_ucred, td);
594#ifdef MAC
595out:
596#endif
597 VOP_UNLOCK(vp, 0, td);
598 vn_finished_write(mp);
599 return(error);
600}
601
602/*
603 * Given a vnode, get its ACL.
604 */
605static int
606vacl_get_acl(struct thread *td, struct vnode *vp, acl_type_t type,
607 struct acl *aclp)
608{
609 struct acl inkernelacl;
610 int error;
611
612 VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
613 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
614#ifdef MAC
615 error = mac_check_vnode_getacl(td->td_ucred, vp, type);
616 if (error != 0)
617 goto out;
618#endif
619 error = VOP_GETACL(vp, type, &inkernelacl, td->td_ucred, td);
620#ifdef MAC
621out:
622#endif
623 VOP_UNLOCK(vp, 0, td);
624 if (error == 0)
625 error = copyout(&inkernelacl, aclp, sizeof(struct acl));
626 return (error);
627}
628
629/*
630 * Given a vnode, delete its ACL.
631 */
632static int
633vacl_delete(struct thread *td, struct vnode *vp, acl_type_t type)
634{
635 struct mount *mp;
636 int error;
637
638 error = vn_start_write(vp, &mp, V_WAIT | PCATCH);
639 if (error)
640 return (error);
641 VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
642 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
643#ifdef MAC
644 error = mac_check_vnode_deleteacl(td->td_ucred, vp, type);
645 if (error)
646 goto out;
647#endif
648 error = VOP_SETACL(vp, type, 0, td->td_ucred, td);
649#ifdef MAC
650out:
651#endif
652 VOP_UNLOCK(vp, 0, td);
653 vn_finished_write(mp);
654 return (error);
655}
656
657/*
658 * Given a vnode, check whether an ACL is appropriate for it
659 */
660static int
661vacl_aclcheck(struct thread *td, struct vnode *vp, acl_type_t type,
662 struct acl *aclp)
663{
664 struct acl inkernelacl;
665 int error;
666
667 error = copyin(aclp, &inkernelacl, sizeof(struct acl));
668 if (error)
669 return(error);
670 error = VOP_ACLCHECK(vp, type, &inkernelacl, td->td_ucred, td);
671 return (error);
672}
673
674/*
675 * syscalls -- convert the path/fd to a vnode, and call vacl_whatever.
676 * Don't need to lock, as the vacl_ code will get/release any locks
677 * required.
678 */
679
680/*
681 * Given a file path, get an ACL for it
682 *
683 * MPSAFE
684 */
685int
686__acl_get_file(struct thread *td, struct __acl_get_file_args *uap)
687{
688 struct nameidata nd;
689 int error;
690
691 mtx_lock(&Giant);
29 */
30/*
31 * Developed by the TrustedBSD Project.
32 * Support for POSIX.1e access control lists.
33 */
34
35#include "opt_mac.h"
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/sysproto.h>
40#include <sys/kernel.h>
41#include <sys/mac.h>
42#include <sys/malloc.h>
43#include <sys/vnode.h>
44#include <sys/lock.h>
45#include <sys/mutex.h>
46#include <sys/namei.h>
47#include <sys/file.h>
48#include <sys/proc.h>
49#include <sys/sysent.h>
50#include <sys/errno.h>
51#include <sys/stat.h>
52#include <sys/acl.h>
53
54MALLOC_DEFINE(M_ACL, "acl", "access control list");
55
56static int vacl_set_acl(struct thread *td, struct vnode *vp,
57 acl_type_t type, struct acl *aclp);
58static int vacl_get_acl(struct thread *td, struct vnode *vp,
59 acl_type_t type, struct acl *aclp);
60static int vacl_aclcheck(struct thread *td, struct vnode *vp,
61 acl_type_t type, struct acl *aclp);
62
63/*
64 * Implement a version of vaccess() that understands POSIX.1e ACL semantics.
65 * Return 0 on success, else an errno value. Should be merged into
66 * vaccess() eventually.
67 */
68int
69vaccess_acl_posix1e(enum vtype type, uid_t file_uid, gid_t file_gid,
70 struct acl *acl, mode_t acc_mode, struct ucred *cred, int *privused)
71{
72 struct acl_entry *acl_other, *acl_mask;
73 mode_t dac_granted;
74 mode_t cap_granted;
75 mode_t acl_mask_granted;
76 int group_matched, i;
77
78 /*
79 * Look for a normal, non-privileged way to access the file/directory
80 * as requested. If it exists, go with that. Otherwise, attempt
81 * to use privileges granted via cap_granted. In some cases,
82 * which privileges to use may be ambiguous due to "best match",
83 * in which case fall back on first match for the time being.
84 */
85 if (privused != NULL)
86 *privused = 0;
87
88 /*
89 * Determine privileges now, but don't apply until we've found
90 * a DAC entry that matches but has failed to allow access.
91 */
92#ifndef CAPABILITIES
93 if (suser_cred(cred, PRISON_ROOT) == 0)
94 cap_granted = VALLPERM;
95 else
96 cap_granted = 0;
97#else
98 cap_granted = 0;
99
100 if (type == VDIR) {
101 if ((acc_mode & VEXEC) && !cap_check(cred, NULL,
102 CAP_DAC_READ_SEARCH, PRISON_ROOT))
103 cap_granted |= VEXEC;
104 } else {
105 if ((acc_mode & VEXEC) && !cap_check(cred, NULL,
106 CAP_DAC_EXECUTE, PRISON_ROOT))
107 cap_granted |= VEXEC;
108 }
109
110 if ((acc_mode & VREAD) && !cap_check(cred, NULL, CAP_DAC_READ_SEARCH,
111 PRISON_ROOT))
112 cap_granted |= VREAD;
113
114 if (((acc_mode & VWRITE) || (acc_mode & VAPPEND)) &&
115 !cap_check(cred, NULL, CAP_DAC_WRITE, PRISON_ROOT))
116 cap_granted |= (VWRITE | VAPPEND);
117
118 if ((acc_mode & VADMIN) && !cap_check(cred, NULL, CAP_FOWNER,
119 PRISON_ROOT))
120 cap_granted |= VADMIN;
121#endif /* CAPABILITIES */
122
123 /*
124 * The owner matches if the effective uid associated with the
125 * credential matches that of the ACL_USER_OBJ entry. While we're
126 * doing the first scan, also cache the location of the ACL_MASK
127 * and ACL_OTHER entries, preventing some future iterations.
128 */
129 acl_mask = acl_other = NULL;
130 for (i = 0; i < acl->acl_cnt; i++) {
131 switch (acl->acl_entry[i].ae_tag) {
132 case ACL_USER_OBJ:
133 if (file_uid != cred->cr_uid)
134 break;
135 dac_granted = 0;
136 dac_granted |= VADMIN;
137 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
138 dac_granted |= VEXEC;
139 if (acl->acl_entry[i].ae_perm & ACL_READ)
140 dac_granted |= VREAD;
141 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
142 dac_granted |= (VWRITE | VAPPEND);
143 if ((acc_mode & dac_granted) == acc_mode)
144 return (0);
145 if ((acc_mode & (dac_granted | cap_granted)) ==
146 acc_mode) {
147 if (privused != NULL)
148 *privused = 1;
149 return (0);
150 }
151 goto error;
152
153 case ACL_MASK:
154 acl_mask = &acl->acl_entry[i];
155 break;
156
157 case ACL_OTHER:
158 acl_other = &acl->acl_entry[i];
159 break;
160
161 default:
162 break;
163 }
164 }
165
166 /*
167 * An ACL_OTHER entry should always exist in a valid access
168 * ACL. If it doesn't, then generate a serious failure. For now,
169 * this means a debugging message and EPERM, but in the future
170 * should probably be a panic.
171 */
172 if (acl_other == NULL) {
173 /*
174 * XXX This should never happen
175 */
176 printf("vaccess_acl_posix1e: ACL_OTHER missing\n");
177 return (EPERM);
178 }
179
180 /*
181 * Checks against ACL_USER, ACL_GROUP_OBJ, and ACL_GROUP fields
182 * are masked by an ACL_MASK entry, if any. As such, first identify
183 * the ACL_MASK field, then iterate through identifying potential
184 * user matches, then group matches. If there is no ACL_MASK,
185 * assume that the mask allows all requests to succeed.
186 */
187 if (acl_mask != NULL) {
188 acl_mask_granted = 0;
189 if (acl_mask->ae_perm & ACL_EXECUTE)
190 acl_mask_granted |= VEXEC;
191 if (acl_mask->ae_perm & ACL_READ)
192 acl_mask_granted |= VREAD;
193 if (acl_mask->ae_perm & ACL_WRITE)
194 acl_mask_granted |= (VWRITE | VAPPEND);
195 } else
196 acl_mask_granted = VEXEC | VREAD | VWRITE | VAPPEND;
197
198 /*
199 * Iterate through user ACL entries. Do checks twice, first
200 * without privilege, and then if a match is found but failed,
201 * a second time with privilege.
202 */
203
204 /*
205 * Check ACL_USER ACL entries.
206 */
207 for (i = 0; i < acl->acl_cnt; i++) {
208 switch (acl->acl_entry[i].ae_tag) {
209 case ACL_USER:
210 if (acl->acl_entry[i].ae_id != cred->cr_uid)
211 break;
212 dac_granted = 0;
213 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
214 dac_granted |= VEXEC;
215 if (acl->acl_entry[i].ae_perm & ACL_READ)
216 dac_granted |= VREAD;
217 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
218 dac_granted |= (VWRITE | VAPPEND);
219 dac_granted &= acl_mask_granted;
220 if ((acc_mode & dac_granted) == acc_mode)
221 return (0);
222 if ((acc_mode & (dac_granted | cap_granted)) !=
223 acc_mode)
224 goto error;
225
226 if (privused != NULL)
227 *privused = 1;
228 return (0);
229 }
230 }
231
232 /*
233 * Group match is best-match, not first-match, so find a
234 * "best" match. Iterate across, testing each potential group
235 * match. Make sure we keep track of whether we found a match
236 * or not, so that we know if we should try again with any
237 * available privilege, or if we should move on to ACL_OTHER.
238 */
239 group_matched = 0;
240 for (i = 0; i < acl->acl_cnt; i++) {
241 switch (acl->acl_entry[i].ae_tag) {
242 case ACL_GROUP_OBJ:
243 if (!groupmember(file_gid, cred))
244 break;
245 dac_granted = 0;
246 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
247 dac_granted |= VEXEC;
248 if (acl->acl_entry[i].ae_perm & ACL_READ)
249 dac_granted |= VREAD;
250 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
251 dac_granted |= (VWRITE | VAPPEND);
252 dac_granted &= acl_mask_granted;
253
254 if ((acc_mode & dac_granted) == acc_mode)
255 return (0);
256
257 group_matched = 1;
258 break;
259
260 case ACL_GROUP:
261 if (!groupmember(acl->acl_entry[i].ae_id, cred))
262 break;
263 dac_granted = 0;
264 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
265 dac_granted |= VEXEC;
266 if (acl->acl_entry[i].ae_perm & ACL_READ)
267 dac_granted |= VREAD;
268 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
269 dac_granted |= (VWRITE | VAPPEND);
270 dac_granted &= acl_mask_granted;
271
272 if ((acc_mode & dac_granted) == acc_mode)
273 return (0);
274
275 group_matched = 1;
276 break;
277
278 default:
279 break;
280 }
281 }
282
283 if (group_matched == 1) {
284 /*
285 * There was a match, but it did not grant rights via
286 * pure DAC. Try again, this time with privilege.
287 */
288 for (i = 0; i < acl->acl_cnt; i++) {
289 switch (acl->acl_entry[i].ae_tag) {
290 case ACL_GROUP_OBJ:
291 if (!groupmember(file_gid, cred))
292 break;
293 dac_granted = 0;
294 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
295 dac_granted |= VEXEC;
296 if (acl->acl_entry[i].ae_perm & ACL_READ)
297 dac_granted |= VREAD;
298 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
299 dac_granted |= (VWRITE | VAPPEND);
300 dac_granted &= acl_mask_granted;
301
302 if ((acc_mode & (dac_granted | cap_granted)) !=
303 acc_mode)
304 break;
305
306 if (privused != NULL)
307 *privused = 1;
308 return (0);
309
310 case ACL_GROUP:
311 if (!groupmember(acl->acl_entry[i].ae_id,
312 cred))
313 break;
314 dac_granted = 0;
315 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
316 dac_granted |= VEXEC;
317 if (acl->acl_entry[i].ae_perm & ACL_READ)
318 dac_granted |= VREAD;
319 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
320 dac_granted |= (VWRITE | VAPPEND);
321 dac_granted &= acl_mask_granted;
322
323 if ((acc_mode & (dac_granted | cap_granted)) !=
324 acc_mode)
325 break;
326
327 if (privused != NULL)
328 *privused = 1;
329 return (0);
330
331 default:
332 break;
333 }
334 }
335 /*
336 * Even with privilege, group membership was not sufficient.
337 * Return failure.
338 */
339 goto error;
340 }
341
342 /*
343 * Fall back on ACL_OTHER. ACL_MASK is not applied to ACL_OTHER.
344 */
345 dac_granted = 0;
346 if (acl_other->ae_perm & ACL_EXECUTE)
347 dac_granted |= VEXEC;
348 if (acl_other->ae_perm & ACL_READ)
349 dac_granted |= VREAD;
350 if (acl_other->ae_perm & ACL_WRITE)
351 dac_granted |= (VWRITE | VAPPEND);
352
353 if ((acc_mode & dac_granted) == acc_mode)
354 return (0);
355 if ((acc_mode & (dac_granted | cap_granted)) == acc_mode) {
356 if (privused != NULL)
357 *privused = 1;
358 return (0);
359 }
360
361error:
362 return ((acc_mode & VADMIN) ? EPERM : EACCES);
363}
364
365/*
366 * For the purposes of filesystems maintaining the _OBJ entries in an
367 * inode with a mode_t field, this routine converts a mode_t entry
368 * to an acl_perm_t.
369 */
370acl_perm_t
371acl_posix1e_mode_to_perm(acl_tag_t tag, mode_t mode)
372{
373 acl_perm_t perm = 0;
374
375 switch(tag) {
376 case ACL_USER_OBJ:
377 if (mode & S_IXUSR)
378 perm |= ACL_EXECUTE;
379 if (mode & S_IRUSR)
380 perm |= ACL_READ;
381 if (mode & S_IWUSR)
382 perm |= ACL_WRITE;
383 return (perm);
384
385 case ACL_GROUP_OBJ:
386 if (mode & S_IXGRP)
387 perm |= ACL_EXECUTE;
388 if (mode & S_IRGRP)
389 perm |= ACL_READ;
390 if (mode & S_IWGRP)
391 perm |= ACL_WRITE;
392 return (perm);
393
394 case ACL_OTHER:
395 if (mode & S_IXOTH)
396 perm |= ACL_EXECUTE;
397 if (mode & S_IROTH)
398 perm |= ACL_READ;
399 if (mode & S_IWOTH)
400 perm |= ACL_WRITE;
401 return (perm);
402
403 default:
404 printf("acl_posix1e_mode_to_perm: invalid tag (%d)\n", tag);
405 return (0);
406 }
407}
408
409/*
410 * Given inode information (uid, gid, mode), return an acl entry of the
411 * appropriate type.
412 */
413struct acl_entry
414acl_posix1e_mode_to_entry(acl_tag_t tag, uid_t uid, gid_t gid, mode_t mode)
415{
416 struct acl_entry acl_entry;
417
418 acl_entry.ae_tag = tag;
419 acl_entry.ae_perm = acl_posix1e_mode_to_perm(tag, mode);
420 switch(tag) {
421 case ACL_USER_OBJ:
422 acl_entry.ae_id = uid;
423 break;
424
425 case ACL_GROUP_OBJ:
426 acl_entry.ae_id = gid;
427 break;
428
429 case ACL_OTHER:
430 acl_entry.ae_id = ACL_UNDEFINED_ID;
431 break;
432
433 default:
434 acl_entry.ae_id = ACL_UNDEFINED_ID;
435 printf("acl_posix1e_mode_to_entry: invalid tag (%d)\n", tag);
436 }
437
438 return (acl_entry);
439}
440
441/*
442 * Utility function to generate a file mode given appropriate ACL entries.
443 */
444mode_t
445acl_posix1e_perms_to_mode(struct acl_entry *acl_user_obj_entry,
446 struct acl_entry *acl_group_obj_entry, struct acl_entry *acl_other_entry)
447{
448 mode_t mode;
449
450 mode = 0;
451 if (acl_user_obj_entry->ae_perm & ACL_EXECUTE)
452 mode |= S_IXUSR;
453 if (acl_user_obj_entry->ae_perm & ACL_READ)
454 mode |= S_IRUSR;
455 if (acl_user_obj_entry->ae_perm & ACL_WRITE)
456 mode |= S_IWUSR;
457 if (acl_group_obj_entry->ae_perm & ACL_EXECUTE)
458 mode |= S_IXGRP;
459 if (acl_group_obj_entry->ae_perm & ACL_READ)
460 mode |= S_IRGRP;
461 if (acl_group_obj_entry->ae_perm & ACL_WRITE)
462 mode |= S_IWGRP;
463 if (acl_other_entry->ae_perm & ACL_EXECUTE)
464 mode |= S_IXOTH;
465 if (acl_other_entry->ae_perm & ACL_READ)
466 mode |= S_IROTH;
467 if (acl_other_entry->ae_perm & ACL_WRITE)
468 mode |= S_IWOTH;
469
470 return (mode);
471}
472
473/*
474 * Perform a syntactic check of the ACL, sufficient to allow an
475 * implementing filesystem to determine if it should accept this and
476 * rely on the POSIX.1e ACL properties.
477 */
478int
479acl_posix1e_check(struct acl *acl)
480{
481 int num_acl_user_obj, num_acl_user, num_acl_group_obj, num_acl_group;
482 int num_acl_mask, num_acl_other, i;
483
484 /*
485 * Verify that the number of entries does not exceed the maximum
486 * defined for acl_t.
487 * Verify that the correct number of various sorts of ae_tags are
488 * present:
489 * Exactly one ACL_USER_OBJ
490 * Exactly one ACL_GROUP_OBJ
491 * Exactly one ACL_OTHER
492 * If any ACL_USER or ACL_GROUP entries appear, then exactly one
493 * ACL_MASK entry must also appear.
494 * Verify that all ae_perm entries are in ACL_PERM_BITS.
495 * Verify all ae_tag entries are understood by this implementation.
496 * Note: Does not check for uniqueness of qualifier (ae_id) field.
497 */
498 num_acl_user_obj = num_acl_user = num_acl_group_obj = num_acl_group =
499 num_acl_mask = num_acl_other = 0;
500 if (acl->acl_cnt > ACL_MAX_ENTRIES || acl->acl_cnt < 0)
501 return (EINVAL);
502 for (i = 0; i < acl->acl_cnt; i++) {
503 /*
504 * Check for a valid tag.
505 */
506 switch(acl->acl_entry[i].ae_tag) {
507 case ACL_USER_OBJ:
508 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */
509 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID)
510 return (EINVAL);
511 num_acl_user_obj++;
512 break;
513 case ACL_GROUP_OBJ:
514 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */
515 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID)
516 return (EINVAL);
517 num_acl_group_obj++;
518 break;
519 case ACL_USER:
520 if (acl->acl_entry[i].ae_id == ACL_UNDEFINED_ID)
521 return (EINVAL);
522 num_acl_user++;
523 break;
524 case ACL_GROUP:
525 if (acl->acl_entry[i].ae_id == ACL_UNDEFINED_ID)
526 return (EINVAL);
527 num_acl_group++;
528 break;
529 case ACL_OTHER:
530 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */
531 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID)
532 return (EINVAL);
533 num_acl_other++;
534 break;
535 case ACL_MASK:
536 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */
537 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID)
538 return (EINVAL);
539 num_acl_mask++;
540 break;
541 default:
542 return (EINVAL);
543 }
544 /*
545 * Check for valid perm entries.
546 */
547 if ((acl->acl_entry[i].ae_perm | ACL_PERM_BITS) !=
548 ACL_PERM_BITS)
549 return (EINVAL);
550 }
551 if ((num_acl_user_obj != 1) || (num_acl_group_obj != 1) ||
552 (num_acl_other != 1) || (num_acl_mask != 0 && num_acl_mask != 1))
553 return (EINVAL);
554 if (((num_acl_group != 0) || (num_acl_user != 0)) &&
555 (num_acl_mask != 1))
556 return (EINVAL);
557 return (0);
558}
559
560/*
561 * These calls wrap the real vnode operations, and are called by the
562 * syscall code once the syscall has converted the path or file
563 * descriptor to a vnode (unlocked). The aclp pointer is assumed
564 * still to point to userland, so this should not be consumed within
565 * the kernel except by syscall code. Other code should directly
566 * invoke VOP_{SET,GET}ACL.
567 */
568
569/*
570 * Given a vnode, set its ACL.
571 */
572static int
573vacl_set_acl(struct thread *td, struct vnode *vp, acl_type_t type,
574 struct acl *aclp)
575{
576 struct acl inkernacl;
577 struct mount *mp;
578 int error;
579
580 error = copyin(aclp, &inkernacl, sizeof(struct acl));
581 if (error)
582 return(error);
583 error = vn_start_write(vp, &mp, V_WAIT | PCATCH);
584 if (error != 0)
585 return (error);
586 VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
587 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
588#ifdef MAC
589 error = mac_check_vnode_setacl(td->td_ucred, vp, type, &inkernacl);
590 if (error != 0)
591 goto out;
592#endif
593 error = VOP_SETACL(vp, type, &inkernacl, td->td_ucred, td);
594#ifdef MAC
595out:
596#endif
597 VOP_UNLOCK(vp, 0, td);
598 vn_finished_write(mp);
599 return(error);
600}
601
602/*
603 * Given a vnode, get its ACL.
604 */
605static int
606vacl_get_acl(struct thread *td, struct vnode *vp, acl_type_t type,
607 struct acl *aclp)
608{
609 struct acl inkernelacl;
610 int error;
611
612 VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
613 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
614#ifdef MAC
615 error = mac_check_vnode_getacl(td->td_ucred, vp, type);
616 if (error != 0)
617 goto out;
618#endif
619 error = VOP_GETACL(vp, type, &inkernelacl, td->td_ucred, td);
620#ifdef MAC
621out:
622#endif
623 VOP_UNLOCK(vp, 0, td);
624 if (error == 0)
625 error = copyout(&inkernelacl, aclp, sizeof(struct acl));
626 return (error);
627}
628
629/*
630 * Given a vnode, delete its ACL.
631 */
632static int
633vacl_delete(struct thread *td, struct vnode *vp, acl_type_t type)
634{
635 struct mount *mp;
636 int error;
637
638 error = vn_start_write(vp, &mp, V_WAIT | PCATCH);
639 if (error)
640 return (error);
641 VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
642 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
643#ifdef MAC
644 error = mac_check_vnode_deleteacl(td->td_ucred, vp, type);
645 if (error)
646 goto out;
647#endif
648 error = VOP_SETACL(vp, type, 0, td->td_ucred, td);
649#ifdef MAC
650out:
651#endif
652 VOP_UNLOCK(vp, 0, td);
653 vn_finished_write(mp);
654 return (error);
655}
656
657/*
658 * Given a vnode, check whether an ACL is appropriate for it
659 */
660static int
661vacl_aclcheck(struct thread *td, struct vnode *vp, acl_type_t type,
662 struct acl *aclp)
663{
664 struct acl inkernelacl;
665 int error;
666
667 error = copyin(aclp, &inkernelacl, sizeof(struct acl));
668 if (error)
669 return(error);
670 error = VOP_ACLCHECK(vp, type, &inkernelacl, td->td_ucred, td);
671 return (error);
672}
673
674/*
675 * syscalls -- convert the path/fd to a vnode, and call vacl_whatever.
676 * Don't need to lock, as the vacl_ code will get/release any locks
677 * required.
678 */
679
680/*
681 * Given a file path, get an ACL for it
682 *
683 * MPSAFE
684 */
685int
686__acl_get_file(struct thread *td, struct __acl_get_file_args *uap)
687{
688 struct nameidata nd;
689 int error;
690
691 mtx_lock(&Giant);
692 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
692 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, td);
693 error = namei(&nd);
694 if (error == 0) {
693 error = namei(&nd);
694 if (error == 0) {
695 error = vacl_get_acl(td, nd.ni_vp, SCARG(uap, type),
696 SCARG(uap, aclp));
695 error = vacl_get_acl(td, nd.ni_vp, uap->type,
696 uap->aclp);
697 NDFREE(&nd, 0);
698 }
699 mtx_unlock(&Giant);
700 return (error);
701}
702
703/*
704 * Given a file path, set an ACL for it
705 *
706 * MPSAFE
707 */
708int
709__acl_set_file(struct thread *td, struct __acl_set_file_args *uap)
710{
711 struct nameidata nd;
712 int error;
713
714 mtx_lock(&Giant);
697 NDFREE(&nd, 0);
698 }
699 mtx_unlock(&Giant);
700 return (error);
701}
702
703/*
704 * Given a file path, set an ACL for it
705 *
706 * MPSAFE
707 */
708int
709__acl_set_file(struct thread *td, struct __acl_set_file_args *uap)
710{
711 struct nameidata nd;
712 int error;
713
714 mtx_lock(&Giant);
715 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
715 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, td);
716 error = namei(&nd);
717 if (error == 0) {
716 error = namei(&nd);
717 if (error == 0) {
718 error = vacl_set_acl(td, nd.ni_vp, SCARG(uap, type),
719 SCARG(uap, aclp));
718 error = vacl_set_acl(td, nd.ni_vp, uap->type,
719 uap->aclp);
720 NDFREE(&nd, 0);
721 }
722 mtx_unlock(&Giant);
723 return (error);
724}
725
726/*
727 * Given a file descriptor, get an ACL for it
728 *
729 * MPSAFE
730 */
731int
732__acl_get_fd(struct thread *td, struct __acl_get_fd_args *uap)
733{
734 struct file *fp;
735 int error;
736
737 mtx_lock(&Giant);
720 NDFREE(&nd, 0);
721 }
722 mtx_unlock(&Giant);
723 return (error);
724}
725
726/*
727 * Given a file descriptor, get an ACL for it
728 *
729 * MPSAFE
730 */
731int
732__acl_get_fd(struct thread *td, struct __acl_get_fd_args *uap)
733{
734 struct file *fp;
735 int error;
736
737 mtx_lock(&Giant);
738 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
738 error = getvnode(td->td_proc->p_fd, uap->filedes, &fp);
739 if (error == 0) {
740 error = vacl_get_acl(td, (struct vnode *)fp->f_data,
739 if (error == 0) {
740 error = vacl_get_acl(td, (struct vnode *)fp->f_data,
741 SCARG(uap, type), SCARG(uap, aclp));
741 uap->type, uap->aclp);
742 fdrop(fp, td);
743 }
744 mtx_unlock(&Giant);
745 return (error);
746}
747
748/*
749 * Given a file descriptor, set an ACL for it
750 *
751 * MPSAFE
752 */
753int
754__acl_set_fd(struct thread *td, struct __acl_set_fd_args *uap)
755{
756 struct file *fp;
757 int error;
758
759 mtx_lock(&Giant);
742 fdrop(fp, td);
743 }
744 mtx_unlock(&Giant);
745 return (error);
746}
747
748/*
749 * Given a file descriptor, set an ACL for it
750 *
751 * MPSAFE
752 */
753int
754__acl_set_fd(struct thread *td, struct __acl_set_fd_args *uap)
755{
756 struct file *fp;
757 int error;
758
759 mtx_lock(&Giant);
760 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
760 error = getvnode(td->td_proc->p_fd, uap->filedes, &fp);
761 if (error == 0) {
762 error = vacl_set_acl(td, (struct vnode *)fp->f_data,
761 if (error == 0) {
762 error = vacl_set_acl(td, (struct vnode *)fp->f_data,
763 SCARG(uap, type), SCARG(uap, aclp));
763 uap->type, uap->aclp);
764 fdrop(fp, td);
765 }
766 mtx_unlock(&Giant);
767 return (error);
768}
769
770/*
771 * Given a file path, delete an ACL from it.
772 *
773 * MPSAFE
774 */
775int
776__acl_delete_file(struct thread *td, struct __acl_delete_file_args *uap)
777{
778 struct nameidata nd;
779 int error;
780
781 mtx_lock(&Giant);
764 fdrop(fp, td);
765 }
766 mtx_unlock(&Giant);
767 return (error);
768}
769
770/*
771 * Given a file path, delete an ACL from it.
772 *
773 * MPSAFE
774 */
775int
776__acl_delete_file(struct thread *td, struct __acl_delete_file_args *uap)
777{
778 struct nameidata nd;
779 int error;
780
781 mtx_lock(&Giant);
782 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
782 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, td);
783 error = namei(&nd);
784 if (error == 0) {
783 error = namei(&nd);
784 if (error == 0) {
785 error = vacl_delete(td, nd.ni_vp, SCARG(uap, type));
785 error = vacl_delete(td, nd.ni_vp, uap->type);
786 NDFREE(&nd, 0);
787 }
788 mtx_unlock(&Giant);
789 return (error);
790}
791
792/*
793 * Given a file path, delete an ACL from it.
794 *
795 * MPSAFE
796 */
797int
798__acl_delete_fd(struct thread *td, struct __acl_delete_fd_args *uap)
799{
800 struct file *fp;
801 int error;
802
803 mtx_lock(&Giant);
786 NDFREE(&nd, 0);
787 }
788 mtx_unlock(&Giant);
789 return (error);
790}
791
792/*
793 * Given a file path, delete an ACL from it.
794 *
795 * MPSAFE
796 */
797int
798__acl_delete_fd(struct thread *td, struct __acl_delete_fd_args *uap)
799{
800 struct file *fp;
801 int error;
802
803 mtx_lock(&Giant);
804 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
804 error = getvnode(td->td_proc->p_fd, uap->filedes, &fp);
805 if (error == 0) {
806 error = vacl_delete(td, (struct vnode *)fp->f_data,
805 if (error == 0) {
806 error = vacl_delete(td, (struct vnode *)fp->f_data,
807 SCARG(uap, type));
807 uap->type);
808 fdrop(fp, td);
809 }
810 mtx_unlock(&Giant);
811 return (error);
812}
813
814/*
815 * Given a file path, check an ACL for it
816 *
817 * MPSAFE
818 */
819int
820__acl_aclcheck_file(struct thread *td, struct __acl_aclcheck_file_args *uap)
821{
822 struct nameidata nd;
823 int error;
824
825 mtx_lock(&Giant);
808 fdrop(fp, td);
809 }
810 mtx_unlock(&Giant);
811 return (error);
812}
813
814/*
815 * Given a file path, check an ACL for it
816 *
817 * MPSAFE
818 */
819int
820__acl_aclcheck_file(struct thread *td, struct __acl_aclcheck_file_args *uap)
821{
822 struct nameidata nd;
823 int error;
824
825 mtx_lock(&Giant);
826 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
826 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, td);
827 error = namei(&nd);
828 if (error == 0) {
827 error = namei(&nd);
828 if (error == 0) {
829 error = vacl_aclcheck(td, nd.ni_vp, SCARG(uap, type),
830 SCARG(uap, aclp));
829 error = vacl_aclcheck(td, nd.ni_vp, uap->type,
830 uap->aclp);
831 NDFREE(&nd, 0);
832 }
833 mtx_unlock(&Giant);
834 return (error);
835}
836
837/*
838 * Given a file descriptor, check an ACL for it
839 *
840 * MPSAFE
841 */
842int
843__acl_aclcheck_fd(struct thread *td, struct __acl_aclcheck_fd_args *uap)
844{
845 struct file *fp;
846 int error;
847
848 mtx_lock(&Giant);
831 NDFREE(&nd, 0);
832 }
833 mtx_unlock(&Giant);
834 return (error);
835}
836
837/*
838 * Given a file descriptor, check an ACL for it
839 *
840 * MPSAFE
841 */
842int
843__acl_aclcheck_fd(struct thread *td, struct __acl_aclcheck_fd_args *uap)
844{
845 struct file *fp;
846 int error;
847
848 mtx_lock(&Giant);
849 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
849 error = getvnode(td->td_proc->p_fd, uap->filedes, &fp);
850 if (error == 0) {
851 error = vacl_aclcheck(td, (struct vnode *)fp->f_data,
850 if (error == 0) {
851 error = vacl_aclcheck(td, (struct vnode *)fp->f_data,
852 SCARG(uap, type), SCARG(uap, aclp));
852 uap->type, uap->aclp);
853 fdrop(fp, td);
854 }
855 mtx_unlock(&Giant);
856 return (error);
857}
853 fdrop(fp, td);
854 }
855 mtx_unlock(&Giant);
856 return (error);
857}