Deleted Added
full compact
vfs_acl.c (90202) vfs_acl.c (91406)
1/*-
2 * Copyright (c) 1999-2001 Robert N. M. Watson
3 * All rights reserved.
4 *
5 * This software was developed by Robert Watson for the TrustedBSD Project.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
1/*-
2 * Copyright (c) 1999-2001 Robert N. M. Watson
3 * All rights reserved.
4 *
5 * This software was developed by Robert Watson for the TrustedBSD Project.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD: head/sys/kern/vfs_acl.c 90202 2002-02-04 17:58:15Z rwatson $
28 * $FreeBSD: head/sys/kern/vfs_acl.c 91406 2002-02-27 18:32:23Z jhb $
29 */
30/*
31 * Developed by the TrustedBSD Project.
32 * Support for POSIX.1e access control lists.
33 */
34
35#include "opt_cap.h"
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/sysproto.h>
40#include <sys/kernel.h>
41#include <sys/malloc.h>
42#include <sys/vnode.h>
43#include <sys/lock.h>
44#include <sys/mutex.h>
45#include <sys/namei.h>
46#include <sys/file.h>
47#include <sys/proc.h>
48#include <sys/sysent.h>
49#include <sys/errno.h>
50#include <sys/stat.h>
51#include <sys/acl.h>
52
53MALLOC_DEFINE(M_ACL, "acl", "access control list");
54
55static int vacl_set_acl(struct thread *td, struct vnode *vp, acl_type_t type,
56 struct acl *aclp);
57static int vacl_get_acl(struct thread *td, struct vnode *vp, acl_type_t type,
58 struct acl *aclp);
59static int vacl_aclcheck(struct thread *td, struct vnode *vp,
60 acl_type_t type, struct acl *aclp);
61
62/*
63 * Implement a version of vaccess() that understands POSIX.1e ACL semantics.
64 * Return 0 on success, else an errno value. Should be merged into
65 * vaccess() eventually.
66 */
67int
68vaccess_acl_posix1e(enum vtype type, uid_t file_uid, gid_t file_gid,
69 struct acl *acl, mode_t acc_mode, struct ucred *cred, int *privused)
70{
71 struct acl_entry *acl_other, *acl_mask;
72 mode_t dac_granted;
73 mode_t cap_granted;
74 mode_t acl_mask_granted;
75 int group_matched, i;
76
77 /*
78 * Look for a normal, non-privileged way to access the file/directory
79 * as requested. If it exists, go with that. Otherwise, attempt
80 * to use privileges granted via cap_granted. In some cases,
81 * which privileges to use may be ambiguous due to "best match",
82 * in which case fall back on first match for the time being.
83 */
84 if (privused != NULL)
85 *privused = 0;
86
87 /*
88 * Determine privileges now, but don't apply until we've found
89 * a DAC entry that matches but has failed to allow access.
90 */
91#ifndef CAPABILITIES
92 if (suser_xxx(cred, NULL, PRISON_ROOT) == 0)
93 cap_granted = (VEXEC | VREAD | VWRITE | VADMIN);
94 else
95 cap_granted = 0;
96#else
97 cap_granted = 0;
98
99 if (type == VDIR) {
100 if ((acc_mode & VEXEC) && !cap_check(cred, NULL,
101 CAP_DAC_READ_SEARCH, PRISON_ROOT))
102 cap_granted |= VEXEC;
103 } else {
104 if ((acc_mode & VEXEC) && !cap_check(cred, NULL,
105 CAP_DAC_EXECUTE, PRISON_ROOT))
106 cap_granted |= VEXEC;
107 }
108
109 if ((acc_mode & VREAD) && !cap_check(cred, NULL, CAP_DAC_READ_SEARCH,
110 PRISON_ROOT))
111 cap_granted |= VREAD;
112
113 if ((acc_mode & VWRITE) && !cap_check(cred, NULL, CAP_DAC_WRITE,
114 PRISON_ROOT))
115 cap_granted |= VWRITE;
116
117 if ((acc_mode & VADMIN) && !cap_check(cred, NULL, CAP_FOWNER,
118 PRISON_ROOT))
119 cap_granted |= VADMIN;
120#endif /* CAPABILITIES */
121
122 /*
123 * The owner matches if the effective uid associated with the
124 * credential matches that of the ACL_USER_OBJ entry. While we're
125 * doing the first scan, also cache the location of the ACL_MASK
126 * and ACL_OTHER entries, preventing some future iterations.
127 */
128 acl_mask = acl_other = NULL;
129 for (i = 0; i < acl->acl_cnt; i++) {
130 switch (acl->acl_entry[i].ae_tag) {
131 case ACL_USER_OBJ:
132 if (file_uid != cred->cr_uid)
133 break;
134 dac_granted = 0;
135 dac_granted |= VADMIN;
136 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
137 dac_granted |= VEXEC;
138 if (acl->acl_entry[i].ae_perm & ACL_READ)
139 dac_granted |= VREAD;
140 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
141 dac_granted |= VWRITE;
142 if ((acc_mode & dac_granted) == acc_mode)
143 return (0);
144 if ((acc_mode & (dac_granted | cap_granted)) ==
145 acc_mode) {
146 if (privused != NULL)
147 *privused = 1;
148 return (0);
149 }
150 goto error;
151
152 case ACL_MASK:
153 acl_mask = &acl->acl_entry[i];
154 break;
155
156 case ACL_OTHER:
157 acl_other = &acl->acl_entry[i];
158 break;
159
160 default:
161 }
162 }
163
164 /*
165 * An ACL_OTHER entry should always exist in a valid access
166 * ACL. If it doesn't, then generate a serious failure. For now,
167 * this means a debugging message and EPERM, but in the future
168 * should probably be a panic.
169 */
170 if (acl_other == NULL) {
171 /*
172 * XXX This should never happen
173 */
174 printf("vaccess_acl_posix1e: ACL_OTHER missing\n");
175 return (EPERM);
176 }
177
178 /*
179 * Checks against ACL_USER, ACL_GROUP_OBJ, and ACL_GROUP fields
180 * are masked by an ACL_MASK entry, if any. As such, first identify
181 * the ACL_MASK field, then iterate through identifying potential
182 * user matches, then group matches. If there is no ACL_MASK,
183 * assume that the mask allows all requests to succeed.
184 */
185 if (acl_mask != NULL) {
186 acl_mask_granted = 0;
187 if (acl_mask->ae_perm & ACL_EXECUTE)
188 acl_mask_granted |= VEXEC;
189 if (acl_mask->ae_perm & ACL_READ)
190 acl_mask_granted |= VREAD;
191 if (acl_mask->ae_perm & ACL_WRITE)
192 acl_mask_granted |= VWRITE;
193 } else
194 acl_mask_granted = VEXEC | VREAD | VWRITE;
195
196 /*
197 * Iterate through user ACL entries. Do checks twice, first
198 * without privilege, and then if a match is found but failed,
199 * a second time with privilege.
200 */
201
202 /*
203 * Check ACL_USER ACL entries.
204 */
205 for (i = 0; i < acl->acl_cnt; i++) {
206 switch (acl->acl_entry[i].ae_tag) {
207 case ACL_USER:
208 if (acl->acl_entry[i].ae_id != cred->cr_uid)
209 break;
210 dac_granted = 0;
211 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
212 dac_granted |= VEXEC;
213 if (acl->acl_entry[i].ae_perm & ACL_READ)
214 dac_granted |= VREAD;
215 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
216 dac_granted |= VWRITE;
217 dac_granted &= acl_mask_granted;
218 if ((acc_mode & dac_granted) == acc_mode)
219 return (0);
220 if ((acc_mode & (dac_granted | cap_granted)) !=
221 acc_mode)
222 goto error;
223
224 if (privused != NULL)
225 *privused = 1;
226 return (0);
227 }
228 }
229
230 /*
231 * Group match is best-match, not first-match, so find a
232 * "best" match. Iterate across, testing each potential group
233 * match. Make sure we keep track of whether we found a match
234 * or not, so that we know if we should try again with any
235 * available privilege, or if we should move on to ACL_OTHER.
236 */
237 group_matched = 0;
238 for (i = 0; i < acl->acl_cnt; i++) {
239 switch (acl->acl_entry[i].ae_tag) {
240 case ACL_GROUP_OBJ:
241 if (!groupmember(file_gid, cred))
242 break;
243 dac_granted = 0;
244 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
245 dac_granted |= VEXEC;
246 if (acl->acl_entry[i].ae_perm & ACL_READ)
247 dac_granted |= VREAD;
248 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
249 dac_granted |= VWRITE;
250 dac_granted &= acl_mask_granted;
251
252 if ((acc_mode & dac_granted) == acc_mode)
253 return (0);
254
255 group_matched = 1;
256 break;
257
258 case ACL_GROUP:
259 if (!groupmember(acl->acl_entry[i].ae_id, cred))
260 break;
261 dac_granted = 0;
262 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
263 dac_granted |= VEXEC;
264 if (acl->acl_entry[i].ae_perm & ACL_READ)
265 dac_granted |= VREAD;
266 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
267 dac_granted |= VWRITE;
268 dac_granted &= acl_mask_granted;
269
270 if ((acc_mode & dac_granted) == acc_mode)
271 return (0);
272
273 group_matched = 1;
274 break;
275
276 default:
277 }
278 }
279
280 if (group_matched == 1) {
281 /*
282 * There was a match, but it did not grant rights via
283 * pure DAC. Try again, this time with privilege.
284 */
285 for (i = 0; i < acl->acl_cnt; i++) {
286 switch (acl->acl_entry[i].ae_tag) {
287 case ACL_GROUP_OBJ:
288 if (!groupmember(file_gid, cred))
289 break;
290 dac_granted = 0;
291 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
292 dac_granted |= VEXEC;
293 if (acl->acl_entry[i].ae_perm & ACL_READ)
294 dac_granted |= VREAD;
295 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
296 dac_granted |= VWRITE;
297 dac_granted &= acl_mask_granted;
298
299 if ((acc_mode & (dac_granted | cap_granted)) !=
300 acc_mode)
301 break;
302
303 if (privused != NULL)
304 *privused = 1;
305 return (0);
306
307 case ACL_GROUP:
308 if (!groupmember(acl->acl_entry[i].ae_id,
309 cred))
310 break;
311 dac_granted = 0;
312 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
313 dac_granted |= VEXEC;
314 if (acl->acl_entry[i].ae_perm & ACL_READ)
315 dac_granted |= VREAD;
316 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
317 dac_granted |= VWRITE;
318 dac_granted &= acl_mask_granted;
319
320 if ((acc_mode & (dac_granted | cap_granted)) !=
321 acc_mode)
322 break;
323
324 if (privused != NULL)
325 *privused = 1;
326 return (0);
327
328 default:
329 }
330 }
331 /*
332 * Even with privilege, group membership was not sufficient.
333 * Return failure.
334 */
335 goto error;
336 }
337
338 /*
339 * Fall back on ACL_OTHER. ACL_MASK is not applied to ACL_OTHER.
340 */
341 dac_granted = 0;
342 if (acl_other->ae_perm & ACL_EXECUTE)
343 dac_granted |= VEXEC;
344 if (acl_other->ae_perm & ACL_READ)
345 dac_granted |= VREAD;
346 if (acl_other->ae_perm & ACL_WRITE)
347 dac_granted |= VWRITE;
348
349 if ((acc_mode & dac_granted) == acc_mode)
350 return (0);
351 if ((acc_mode & (dac_granted | cap_granted)) == acc_mode) {
352 if (privused != NULL)
353 *privused = 1;
354 return (0);
355 }
356
357error:
358 return ((acc_mode & VADMIN) ? EPERM : EACCES);
359}
360
361/*
362 * For the purposes of file systems maintaining the _OBJ entries in an
363 * inode with a mode_t field, this routine converts a mode_t entry
364 * to an acl_perm_t.
365 */
366acl_perm_t
367acl_posix1e_mode_to_perm(acl_tag_t tag, mode_t mode)
368{
369 acl_perm_t perm = 0;
370
371 switch(tag) {
372 case ACL_USER_OBJ:
373 if (mode & S_IXUSR)
374 perm |= ACL_EXECUTE;
375 if (mode & S_IRUSR)
376 perm |= ACL_READ;
377 if (mode & S_IWUSR)
378 perm |= ACL_WRITE;
379 return (perm);
380
381 case ACL_GROUP_OBJ:
382 if (mode & S_IXGRP)
383 perm |= ACL_EXECUTE;
384 if (mode & S_IRGRP)
385 perm |= ACL_READ;
386 if (mode & S_IWGRP)
387 perm |= ACL_WRITE;
388 return (perm);
389
390 case ACL_OTHER:
391 if (mode & S_IXOTH)
392 perm |= ACL_EXECUTE;
393 if (mode & S_IROTH)
394 perm |= ACL_READ;
395 if (mode & S_IWOTH)
396 perm |= ACL_WRITE;
397 return (perm);
398
399 default:
400 printf("acl_posix1e_mode_to_perm: invalid tag (%d)\n", tag);
401 return (0);
402 }
403}
404
405/*
406 * Given inode information (uid, gid, mode), return an acl entry of the
407 * appropriate type.
408 */
409struct acl_entry
410acl_posix1e_mode_to_entry(acl_tag_t tag, uid_t uid, gid_t gid, mode_t mode)
411{
412 struct acl_entry acl_entry;
413
414 acl_entry.ae_tag = tag;
415 acl_entry.ae_perm = acl_posix1e_mode_to_perm(tag, mode);
416 switch(tag) {
417 case ACL_USER_OBJ:
418 acl_entry.ae_id = uid;
419 break;
420
421 case ACL_GROUP_OBJ:
422 acl_entry.ae_id = gid;
423 break;
424
425 case ACL_OTHER:
426 acl_entry.ae_id = ACL_UNDEFINED_ID;
427 break;
428
429 default:
430 acl_entry.ae_id = ACL_UNDEFINED_ID;
431 printf("acl_posix1e_mode_to_entry: invalid tag (%d)\n", tag);
432 }
433
434 return (acl_entry);
435}
436
437/*
438 * Utility function to generate a file mode given appropriate ACL entries.
439 */
440mode_t
441acl_posix1e_perms_to_mode(struct acl_entry *acl_user_obj_entry,
442 struct acl_entry *acl_group_obj_entry, struct acl_entry *acl_other_entry)
443{
444 mode_t mode;
445
446 mode = 0;
447 if (acl_user_obj_entry->ae_perm & ACL_EXECUTE)
448 mode |= S_IXUSR;
449 if (acl_user_obj_entry->ae_perm & ACL_READ)
450 mode |= S_IRUSR;
451 if (acl_user_obj_entry->ae_perm & ACL_WRITE)
452 mode |= S_IWUSR;
453 if (acl_group_obj_entry->ae_perm & ACL_EXECUTE)
454 mode |= S_IXGRP;
455 if (acl_group_obj_entry->ae_perm & ACL_READ)
456 mode |= S_IRGRP;
457 if (acl_group_obj_entry->ae_perm & ACL_WRITE)
458 mode |= S_IWGRP;
459 if (acl_other_entry->ae_perm & ACL_EXECUTE)
460 mode |= S_IXOTH;
461 if (acl_other_entry->ae_perm & ACL_READ)
462 mode |= S_IROTH;
463 if (acl_other_entry->ae_perm & ACL_WRITE)
464 mode |= S_IWOTH;
465
466 return (mode);
467}
468
469/*
470 * Perform a syntactic check of the ACL, sufficient to allow an
471 * implementing file system to determine if it should accept this and
472 * rely on the POSIX.1e ACL properties.
473 */
474int
475acl_posix1e_check(struct acl *acl)
476{
477 int num_acl_user_obj, num_acl_user, num_acl_group_obj, num_acl_group;
478 int num_acl_mask, num_acl_other, i;
479
480 /*
481 * Verify that the number of entries does not exceed the maximum
482 * defined for acl_t.
483 * Verify that the correct number of various sorts of ae_tags are
484 * present:
485 * Exactly one ACL_USER_OBJ
486 * Exactly one ACL_GROUP_OBJ
487 * Exactly one ACL_OTHER
488 * If any ACL_USER or ACL_GROUP entries appear, then exactly one
489 * ACL_MASK entry must also appear.
490 * Verify that all ae_perm entries are in ACL_PERM_BITS.
491 * Verify all ae_tag entries are understood by this implementation.
492 * Note: Does not check for uniqueness of qualifier (ae_id) field.
493 */
494 num_acl_user_obj = num_acl_user = num_acl_group_obj = num_acl_group =
495 num_acl_mask = num_acl_other = 0;
496 if (acl->acl_cnt > ACL_MAX_ENTRIES || acl->acl_cnt < 0)
497 return (EINVAL);
498 for (i = 0; i < acl->acl_cnt; i++) {
499 /*
500 * Check for a valid tag.
501 */
502 switch(acl->acl_entry[i].ae_tag) {
503 case ACL_USER_OBJ:
504 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */
505 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID)
506 return (EINVAL);
507 num_acl_user_obj++;
508 break;
509 case ACL_GROUP_OBJ:
510 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */
511 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID)
512 return (EINVAL);
513 num_acl_group_obj++;
514 break;
515 case ACL_USER:
516 if (acl->acl_entry[i].ae_id == ACL_UNDEFINED_ID)
517 return (EINVAL);
518 num_acl_user++;
519 break;
520 case ACL_GROUP:
521 if (acl->acl_entry[i].ae_id == ACL_UNDEFINED_ID)
522 return (EINVAL);
523 num_acl_group++;
524 break;
525 case ACL_OTHER:
526 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */
527 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID)
528 return (EINVAL);
529 num_acl_other++;
530 break;
531 case ACL_MASK:
532 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */
533 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID)
534 return (EINVAL);
535 num_acl_mask++;
536 break;
537 default:
538 return (EINVAL);
539 }
540 /*
541 * Check for valid perm entries.
542 */
543 if ((acl->acl_entry[i].ae_perm | ACL_PERM_BITS) !=
544 ACL_PERM_BITS)
545 return (EINVAL);
546 }
547 if ((num_acl_user_obj != 1) || (num_acl_group_obj != 1) ||
548 (num_acl_other != 1) || (num_acl_mask != 0 && num_acl_mask != 1))
549 return (EINVAL);
550 if (((num_acl_group != 0) || (num_acl_user != 0)) &&
551 (num_acl_mask != 1))
552 return (EINVAL);
553 return (0);
554}
555
556/*
557 * These calls wrap the real vnode operations, and are called by the
558 * syscall code once the syscall has converted the path or file
559 * descriptor to a vnode (unlocked). The aclp pointer is assumed
560 * still to point to userland, so this should not be consumed within
561 * the kernel except by syscall code. Other code should directly
562 * invoke VOP_{SET,GET}ACL.
563 */
564
565/*
566 * Given a vnode, set its ACL.
567 */
568static int
569vacl_set_acl(struct thread *td, struct vnode *vp, acl_type_t type,
570 struct acl *aclp)
571{
572 struct acl inkernacl;
573 struct mount *mp;
574 int error;
575
576 error = copyin(aclp, &inkernacl, sizeof(struct acl));
577 if (error)
578 return(error);
579 error = vn_start_write(vp, &mp, V_WAIT | PCATCH);
580 if (error != 0)
581 return (error);
29 */
30/*
31 * Developed by the TrustedBSD Project.
32 * Support for POSIX.1e access control lists.
33 */
34
35#include "opt_cap.h"
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/sysproto.h>
40#include <sys/kernel.h>
41#include <sys/malloc.h>
42#include <sys/vnode.h>
43#include <sys/lock.h>
44#include <sys/mutex.h>
45#include <sys/namei.h>
46#include <sys/file.h>
47#include <sys/proc.h>
48#include <sys/sysent.h>
49#include <sys/errno.h>
50#include <sys/stat.h>
51#include <sys/acl.h>
52
53MALLOC_DEFINE(M_ACL, "acl", "access control list");
54
55static int vacl_set_acl(struct thread *td, struct vnode *vp, acl_type_t type,
56 struct acl *aclp);
57static int vacl_get_acl(struct thread *td, struct vnode *vp, acl_type_t type,
58 struct acl *aclp);
59static int vacl_aclcheck(struct thread *td, struct vnode *vp,
60 acl_type_t type, struct acl *aclp);
61
62/*
63 * Implement a version of vaccess() that understands POSIX.1e ACL semantics.
64 * Return 0 on success, else an errno value. Should be merged into
65 * vaccess() eventually.
66 */
67int
68vaccess_acl_posix1e(enum vtype type, uid_t file_uid, gid_t file_gid,
69 struct acl *acl, mode_t acc_mode, struct ucred *cred, int *privused)
70{
71 struct acl_entry *acl_other, *acl_mask;
72 mode_t dac_granted;
73 mode_t cap_granted;
74 mode_t acl_mask_granted;
75 int group_matched, i;
76
77 /*
78 * Look for a normal, non-privileged way to access the file/directory
79 * as requested. If it exists, go with that. Otherwise, attempt
80 * to use privileges granted via cap_granted. In some cases,
81 * which privileges to use may be ambiguous due to "best match",
82 * in which case fall back on first match for the time being.
83 */
84 if (privused != NULL)
85 *privused = 0;
86
87 /*
88 * Determine privileges now, but don't apply until we've found
89 * a DAC entry that matches but has failed to allow access.
90 */
91#ifndef CAPABILITIES
92 if (suser_xxx(cred, NULL, PRISON_ROOT) == 0)
93 cap_granted = (VEXEC | VREAD | VWRITE | VADMIN);
94 else
95 cap_granted = 0;
96#else
97 cap_granted = 0;
98
99 if (type == VDIR) {
100 if ((acc_mode & VEXEC) && !cap_check(cred, NULL,
101 CAP_DAC_READ_SEARCH, PRISON_ROOT))
102 cap_granted |= VEXEC;
103 } else {
104 if ((acc_mode & VEXEC) && !cap_check(cred, NULL,
105 CAP_DAC_EXECUTE, PRISON_ROOT))
106 cap_granted |= VEXEC;
107 }
108
109 if ((acc_mode & VREAD) && !cap_check(cred, NULL, CAP_DAC_READ_SEARCH,
110 PRISON_ROOT))
111 cap_granted |= VREAD;
112
113 if ((acc_mode & VWRITE) && !cap_check(cred, NULL, CAP_DAC_WRITE,
114 PRISON_ROOT))
115 cap_granted |= VWRITE;
116
117 if ((acc_mode & VADMIN) && !cap_check(cred, NULL, CAP_FOWNER,
118 PRISON_ROOT))
119 cap_granted |= VADMIN;
120#endif /* CAPABILITIES */
121
122 /*
123 * The owner matches if the effective uid associated with the
124 * credential matches that of the ACL_USER_OBJ entry. While we're
125 * doing the first scan, also cache the location of the ACL_MASK
126 * and ACL_OTHER entries, preventing some future iterations.
127 */
128 acl_mask = acl_other = NULL;
129 for (i = 0; i < acl->acl_cnt; i++) {
130 switch (acl->acl_entry[i].ae_tag) {
131 case ACL_USER_OBJ:
132 if (file_uid != cred->cr_uid)
133 break;
134 dac_granted = 0;
135 dac_granted |= VADMIN;
136 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
137 dac_granted |= VEXEC;
138 if (acl->acl_entry[i].ae_perm & ACL_READ)
139 dac_granted |= VREAD;
140 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
141 dac_granted |= VWRITE;
142 if ((acc_mode & dac_granted) == acc_mode)
143 return (0);
144 if ((acc_mode & (dac_granted | cap_granted)) ==
145 acc_mode) {
146 if (privused != NULL)
147 *privused = 1;
148 return (0);
149 }
150 goto error;
151
152 case ACL_MASK:
153 acl_mask = &acl->acl_entry[i];
154 break;
155
156 case ACL_OTHER:
157 acl_other = &acl->acl_entry[i];
158 break;
159
160 default:
161 }
162 }
163
164 /*
165 * An ACL_OTHER entry should always exist in a valid access
166 * ACL. If it doesn't, then generate a serious failure. For now,
167 * this means a debugging message and EPERM, but in the future
168 * should probably be a panic.
169 */
170 if (acl_other == NULL) {
171 /*
172 * XXX This should never happen
173 */
174 printf("vaccess_acl_posix1e: ACL_OTHER missing\n");
175 return (EPERM);
176 }
177
178 /*
179 * Checks against ACL_USER, ACL_GROUP_OBJ, and ACL_GROUP fields
180 * are masked by an ACL_MASK entry, if any. As such, first identify
181 * the ACL_MASK field, then iterate through identifying potential
182 * user matches, then group matches. If there is no ACL_MASK,
183 * assume that the mask allows all requests to succeed.
184 */
185 if (acl_mask != NULL) {
186 acl_mask_granted = 0;
187 if (acl_mask->ae_perm & ACL_EXECUTE)
188 acl_mask_granted |= VEXEC;
189 if (acl_mask->ae_perm & ACL_READ)
190 acl_mask_granted |= VREAD;
191 if (acl_mask->ae_perm & ACL_WRITE)
192 acl_mask_granted |= VWRITE;
193 } else
194 acl_mask_granted = VEXEC | VREAD | VWRITE;
195
196 /*
197 * Iterate through user ACL entries. Do checks twice, first
198 * without privilege, and then if a match is found but failed,
199 * a second time with privilege.
200 */
201
202 /*
203 * Check ACL_USER ACL entries.
204 */
205 for (i = 0; i < acl->acl_cnt; i++) {
206 switch (acl->acl_entry[i].ae_tag) {
207 case ACL_USER:
208 if (acl->acl_entry[i].ae_id != cred->cr_uid)
209 break;
210 dac_granted = 0;
211 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
212 dac_granted |= VEXEC;
213 if (acl->acl_entry[i].ae_perm & ACL_READ)
214 dac_granted |= VREAD;
215 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
216 dac_granted |= VWRITE;
217 dac_granted &= acl_mask_granted;
218 if ((acc_mode & dac_granted) == acc_mode)
219 return (0);
220 if ((acc_mode & (dac_granted | cap_granted)) !=
221 acc_mode)
222 goto error;
223
224 if (privused != NULL)
225 *privused = 1;
226 return (0);
227 }
228 }
229
230 /*
231 * Group match is best-match, not first-match, so find a
232 * "best" match. Iterate across, testing each potential group
233 * match. Make sure we keep track of whether we found a match
234 * or not, so that we know if we should try again with any
235 * available privilege, or if we should move on to ACL_OTHER.
236 */
237 group_matched = 0;
238 for (i = 0; i < acl->acl_cnt; i++) {
239 switch (acl->acl_entry[i].ae_tag) {
240 case ACL_GROUP_OBJ:
241 if (!groupmember(file_gid, cred))
242 break;
243 dac_granted = 0;
244 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
245 dac_granted |= VEXEC;
246 if (acl->acl_entry[i].ae_perm & ACL_READ)
247 dac_granted |= VREAD;
248 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
249 dac_granted |= VWRITE;
250 dac_granted &= acl_mask_granted;
251
252 if ((acc_mode & dac_granted) == acc_mode)
253 return (0);
254
255 group_matched = 1;
256 break;
257
258 case ACL_GROUP:
259 if (!groupmember(acl->acl_entry[i].ae_id, cred))
260 break;
261 dac_granted = 0;
262 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
263 dac_granted |= VEXEC;
264 if (acl->acl_entry[i].ae_perm & ACL_READ)
265 dac_granted |= VREAD;
266 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
267 dac_granted |= VWRITE;
268 dac_granted &= acl_mask_granted;
269
270 if ((acc_mode & dac_granted) == acc_mode)
271 return (0);
272
273 group_matched = 1;
274 break;
275
276 default:
277 }
278 }
279
280 if (group_matched == 1) {
281 /*
282 * There was a match, but it did not grant rights via
283 * pure DAC. Try again, this time with privilege.
284 */
285 for (i = 0; i < acl->acl_cnt; i++) {
286 switch (acl->acl_entry[i].ae_tag) {
287 case ACL_GROUP_OBJ:
288 if (!groupmember(file_gid, cred))
289 break;
290 dac_granted = 0;
291 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
292 dac_granted |= VEXEC;
293 if (acl->acl_entry[i].ae_perm & ACL_READ)
294 dac_granted |= VREAD;
295 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
296 dac_granted |= VWRITE;
297 dac_granted &= acl_mask_granted;
298
299 if ((acc_mode & (dac_granted | cap_granted)) !=
300 acc_mode)
301 break;
302
303 if (privused != NULL)
304 *privused = 1;
305 return (0);
306
307 case ACL_GROUP:
308 if (!groupmember(acl->acl_entry[i].ae_id,
309 cred))
310 break;
311 dac_granted = 0;
312 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
313 dac_granted |= VEXEC;
314 if (acl->acl_entry[i].ae_perm & ACL_READ)
315 dac_granted |= VREAD;
316 if (acl->acl_entry[i].ae_perm & ACL_WRITE)
317 dac_granted |= VWRITE;
318 dac_granted &= acl_mask_granted;
319
320 if ((acc_mode & (dac_granted | cap_granted)) !=
321 acc_mode)
322 break;
323
324 if (privused != NULL)
325 *privused = 1;
326 return (0);
327
328 default:
329 }
330 }
331 /*
332 * Even with privilege, group membership was not sufficient.
333 * Return failure.
334 */
335 goto error;
336 }
337
338 /*
339 * Fall back on ACL_OTHER. ACL_MASK is not applied to ACL_OTHER.
340 */
341 dac_granted = 0;
342 if (acl_other->ae_perm & ACL_EXECUTE)
343 dac_granted |= VEXEC;
344 if (acl_other->ae_perm & ACL_READ)
345 dac_granted |= VREAD;
346 if (acl_other->ae_perm & ACL_WRITE)
347 dac_granted |= VWRITE;
348
349 if ((acc_mode & dac_granted) == acc_mode)
350 return (0);
351 if ((acc_mode & (dac_granted | cap_granted)) == acc_mode) {
352 if (privused != NULL)
353 *privused = 1;
354 return (0);
355 }
356
357error:
358 return ((acc_mode & VADMIN) ? EPERM : EACCES);
359}
360
361/*
362 * For the purposes of file systems maintaining the _OBJ entries in an
363 * inode with a mode_t field, this routine converts a mode_t entry
364 * to an acl_perm_t.
365 */
366acl_perm_t
367acl_posix1e_mode_to_perm(acl_tag_t tag, mode_t mode)
368{
369 acl_perm_t perm = 0;
370
371 switch(tag) {
372 case ACL_USER_OBJ:
373 if (mode & S_IXUSR)
374 perm |= ACL_EXECUTE;
375 if (mode & S_IRUSR)
376 perm |= ACL_READ;
377 if (mode & S_IWUSR)
378 perm |= ACL_WRITE;
379 return (perm);
380
381 case ACL_GROUP_OBJ:
382 if (mode & S_IXGRP)
383 perm |= ACL_EXECUTE;
384 if (mode & S_IRGRP)
385 perm |= ACL_READ;
386 if (mode & S_IWGRP)
387 perm |= ACL_WRITE;
388 return (perm);
389
390 case ACL_OTHER:
391 if (mode & S_IXOTH)
392 perm |= ACL_EXECUTE;
393 if (mode & S_IROTH)
394 perm |= ACL_READ;
395 if (mode & S_IWOTH)
396 perm |= ACL_WRITE;
397 return (perm);
398
399 default:
400 printf("acl_posix1e_mode_to_perm: invalid tag (%d)\n", tag);
401 return (0);
402 }
403}
404
405/*
406 * Given inode information (uid, gid, mode), return an acl entry of the
407 * appropriate type.
408 */
409struct acl_entry
410acl_posix1e_mode_to_entry(acl_tag_t tag, uid_t uid, gid_t gid, mode_t mode)
411{
412 struct acl_entry acl_entry;
413
414 acl_entry.ae_tag = tag;
415 acl_entry.ae_perm = acl_posix1e_mode_to_perm(tag, mode);
416 switch(tag) {
417 case ACL_USER_OBJ:
418 acl_entry.ae_id = uid;
419 break;
420
421 case ACL_GROUP_OBJ:
422 acl_entry.ae_id = gid;
423 break;
424
425 case ACL_OTHER:
426 acl_entry.ae_id = ACL_UNDEFINED_ID;
427 break;
428
429 default:
430 acl_entry.ae_id = ACL_UNDEFINED_ID;
431 printf("acl_posix1e_mode_to_entry: invalid tag (%d)\n", tag);
432 }
433
434 return (acl_entry);
435}
436
437/*
438 * Utility function to generate a file mode given appropriate ACL entries.
439 */
440mode_t
441acl_posix1e_perms_to_mode(struct acl_entry *acl_user_obj_entry,
442 struct acl_entry *acl_group_obj_entry, struct acl_entry *acl_other_entry)
443{
444 mode_t mode;
445
446 mode = 0;
447 if (acl_user_obj_entry->ae_perm & ACL_EXECUTE)
448 mode |= S_IXUSR;
449 if (acl_user_obj_entry->ae_perm & ACL_READ)
450 mode |= S_IRUSR;
451 if (acl_user_obj_entry->ae_perm & ACL_WRITE)
452 mode |= S_IWUSR;
453 if (acl_group_obj_entry->ae_perm & ACL_EXECUTE)
454 mode |= S_IXGRP;
455 if (acl_group_obj_entry->ae_perm & ACL_READ)
456 mode |= S_IRGRP;
457 if (acl_group_obj_entry->ae_perm & ACL_WRITE)
458 mode |= S_IWGRP;
459 if (acl_other_entry->ae_perm & ACL_EXECUTE)
460 mode |= S_IXOTH;
461 if (acl_other_entry->ae_perm & ACL_READ)
462 mode |= S_IROTH;
463 if (acl_other_entry->ae_perm & ACL_WRITE)
464 mode |= S_IWOTH;
465
466 return (mode);
467}
468
469/*
470 * Perform a syntactic check of the ACL, sufficient to allow an
471 * implementing file system to determine if it should accept this and
472 * rely on the POSIX.1e ACL properties.
473 */
474int
475acl_posix1e_check(struct acl *acl)
476{
477 int num_acl_user_obj, num_acl_user, num_acl_group_obj, num_acl_group;
478 int num_acl_mask, num_acl_other, i;
479
480 /*
481 * Verify that the number of entries does not exceed the maximum
482 * defined for acl_t.
483 * Verify that the correct number of various sorts of ae_tags are
484 * present:
485 * Exactly one ACL_USER_OBJ
486 * Exactly one ACL_GROUP_OBJ
487 * Exactly one ACL_OTHER
488 * If any ACL_USER or ACL_GROUP entries appear, then exactly one
489 * ACL_MASK entry must also appear.
490 * Verify that all ae_perm entries are in ACL_PERM_BITS.
491 * Verify all ae_tag entries are understood by this implementation.
492 * Note: Does not check for uniqueness of qualifier (ae_id) field.
493 */
494 num_acl_user_obj = num_acl_user = num_acl_group_obj = num_acl_group =
495 num_acl_mask = num_acl_other = 0;
496 if (acl->acl_cnt > ACL_MAX_ENTRIES || acl->acl_cnt < 0)
497 return (EINVAL);
498 for (i = 0; i < acl->acl_cnt; i++) {
499 /*
500 * Check for a valid tag.
501 */
502 switch(acl->acl_entry[i].ae_tag) {
503 case ACL_USER_OBJ:
504 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */
505 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID)
506 return (EINVAL);
507 num_acl_user_obj++;
508 break;
509 case ACL_GROUP_OBJ:
510 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */
511 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID)
512 return (EINVAL);
513 num_acl_group_obj++;
514 break;
515 case ACL_USER:
516 if (acl->acl_entry[i].ae_id == ACL_UNDEFINED_ID)
517 return (EINVAL);
518 num_acl_user++;
519 break;
520 case ACL_GROUP:
521 if (acl->acl_entry[i].ae_id == ACL_UNDEFINED_ID)
522 return (EINVAL);
523 num_acl_group++;
524 break;
525 case ACL_OTHER:
526 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */
527 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID)
528 return (EINVAL);
529 num_acl_other++;
530 break;
531 case ACL_MASK:
532 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */
533 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID)
534 return (EINVAL);
535 num_acl_mask++;
536 break;
537 default:
538 return (EINVAL);
539 }
540 /*
541 * Check for valid perm entries.
542 */
543 if ((acl->acl_entry[i].ae_perm | ACL_PERM_BITS) !=
544 ACL_PERM_BITS)
545 return (EINVAL);
546 }
547 if ((num_acl_user_obj != 1) || (num_acl_group_obj != 1) ||
548 (num_acl_other != 1) || (num_acl_mask != 0 && num_acl_mask != 1))
549 return (EINVAL);
550 if (((num_acl_group != 0) || (num_acl_user != 0)) &&
551 (num_acl_mask != 1))
552 return (EINVAL);
553 return (0);
554}
555
556/*
557 * These calls wrap the real vnode operations, and are called by the
558 * syscall code once the syscall has converted the path or file
559 * descriptor to a vnode (unlocked). The aclp pointer is assumed
560 * still to point to userland, so this should not be consumed within
561 * the kernel except by syscall code. Other code should directly
562 * invoke VOP_{SET,GET}ACL.
563 */
564
565/*
566 * Given a vnode, set its ACL.
567 */
568static int
569vacl_set_acl(struct thread *td, struct vnode *vp, acl_type_t type,
570 struct acl *aclp)
571{
572 struct acl inkernacl;
573 struct mount *mp;
574 int error;
575
576 error = copyin(aclp, &inkernacl, sizeof(struct acl));
577 if (error)
578 return(error);
579 error = vn_start_write(vp, &mp, V_WAIT | PCATCH);
580 if (error != 0)
581 return (error);
582 VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
582 VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
583 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
583 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
584 error = VOP_SETACL(vp, type, &inkernacl, td->td_proc->p_ucred, td);
584 error = VOP_SETACL(vp, type, &inkernacl, td->td_ucred, td);
585 VOP_UNLOCK(vp, 0, td);
586 vn_finished_write(mp);
587 return(error);
588}
589
590/*
591 * Given a vnode, get its ACL.
592 */
593static int
594vacl_get_acl(struct thread *td, struct vnode *vp, acl_type_t type,
595 struct acl *aclp)
596{
597 struct acl inkernelacl;
598 int error;
599
585 VOP_UNLOCK(vp, 0, td);
586 vn_finished_write(mp);
587 return(error);
588}
589
590/*
591 * Given a vnode, get its ACL.
592 */
593static int
594vacl_get_acl(struct thread *td, struct vnode *vp, acl_type_t type,
595 struct acl *aclp)
596{
597 struct acl inkernelacl;
598 int error;
599
600 VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
600 VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
601 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
601 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
602 error = VOP_GETACL(vp, type, &inkernelacl, td->td_proc->p_ucred, td);
602 error = VOP_GETACL(vp, type, &inkernelacl, td->td_ucred, td);
603 VOP_UNLOCK(vp, 0, td);
604 if (error == 0)
605 error = copyout(&inkernelacl, aclp, sizeof(struct acl));
606 return (error);
607}
608
609/*
610 * Given a vnode, delete its ACL.
611 */
612static int
613vacl_delete(struct thread *td, struct vnode *vp, acl_type_t type)
614{
615 struct mount *mp;
616 int error;
617
618 error = vn_start_write(vp, &mp, V_WAIT | PCATCH);
619 if (error)
620 return (error);
603 VOP_UNLOCK(vp, 0, td);
604 if (error == 0)
605 error = copyout(&inkernelacl, aclp, sizeof(struct acl));
606 return (error);
607}
608
609/*
610 * Given a vnode, delete its ACL.
611 */
612static int
613vacl_delete(struct thread *td, struct vnode *vp, acl_type_t type)
614{
615 struct mount *mp;
616 int error;
617
618 error = vn_start_write(vp, &mp, V_WAIT | PCATCH);
619 if (error)
620 return (error);
621 VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
621 VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
622 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
622 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
623 error = VOP_SETACL(vp, ACL_TYPE_DEFAULT, 0, td->td_proc->p_ucred,
624 td);
623 error = VOP_SETACL(vp, ACL_TYPE_DEFAULT, 0, td->td_ucred, td);
625 VOP_UNLOCK(vp, 0, td);
626 vn_finished_write(mp);
627 return (error);
628}
629
630/*
631 * Given a vnode, check whether an ACL is appropriate for it
632 */
633static int
634vacl_aclcheck(struct thread *td, struct vnode *vp, acl_type_t type,
635 struct acl *aclp)
636{
637 struct acl inkernelacl;
638 int error;
639
640 error = copyin(aclp, &inkernelacl, sizeof(struct acl));
641 if (error)
642 return(error);
624 VOP_UNLOCK(vp, 0, td);
625 vn_finished_write(mp);
626 return (error);
627}
628
629/*
630 * Given a vnode, check whether an ACL is appropriate for it
631 */
632static int
633vacl_aclcheck(struct thread *td, struct vnode *vp, acl_type_t type,
634 struct acl *aclp)
635{
636 struct acl inkernelacl;
637 int error;
638
639 error = copyin(aclp, &inkernelacl, sizeof(struct acl));
640 if (error)
641 return(error);
643 error = VOP_ACLCHECK(vp, type, &inkernelacl, td->td_proc->p_ucred,
644 td);
642 error = VOP_ACLCHECK(vp, type, &inkernelacl, td->td_ucred, td);
645 return (error);
646}
647
648/*
649 * syscalls -- convert the path/fd to a vnode, and call vacl_whatever.
650 * Don't need to lock, as the vacl_ code will get/release any locks
651 * required.
652 */
653
654/*
655 * Given a file path, get an ACL for it
656 *
657 * MPSAFE
658 */
659int
660__acl_get_file(struct thread *td, struct __acl_get_file_args *uap)
661{
662 struct nameidata nd;
663 int error;
664
665 mtx_lock(&Giant);
666 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
667 error = namei(&nd);
668 if (error == 0) {
669 error = vacl_get_acl(td, nd.ni_vp, SCARG(uap, type),
670 SCARG(uap, aclp));
671 NDFREE(&nd, 0);
672 }
673 mtx_unlock(&Giant);
674 return (error);
675}
676
677/*
678 * Given a file path, set an ACL for it
679 *
680 * MPSAFE
681 */
682int
683__acl_set_file(struct thread *td, struct __acl_set_file_args *uap)
684{
685 struct nameidata nd;
686 int error;
687
688 mtx_lock(&Giant);
689 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
690 error = namei(&nd);
691 if (error == 0) {
692 error = vacl_set_acl(td, nd.ni_vp, SCARG(uap, type),
693 SCARG(uap, aclp));
694 NDFREE(&nd, 0);
695 }
696 mtx_unlock(&Giant);
697 return (error);
698}
699
700/*
701 * Given a file descriptor, get an ACL for it
702 *
703 * MPSAFE
704 */
705int
706__acl_get_fd(struct thread *td, struct __acl_get_fd_args *uap)
707{
708 struct file *fp;
709 int error;
710
711 mtx_lock(&Giant);
712 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
713 if (error == 0) {
714 error = vacl_get_acl(td, (struct vnode *)fp->f_data,
715 SCARG(uap, type), SCARG(uap, aclp));
716 fdrop(fp, td);
717 }
718 mtx_unlock(&Giant);
719 return (error);
720}
721
722/*
723 * Given a file descriptor, set an ACL for it
724 *
725 * MPSAFE
726 */
727int
728__acl_set_fd(struct thread *td, struct __acl_set_fd_args *uap)
729{
730 struct file *fp;
731 int error;
732
733 mtx_lock(&Giant);
734 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
735 if (error == 0) {
736 error = vacl_set_acl(td, (struct vnode *)fp->f_data,
737 SCARG(uap, type), SCARG(uap, aclp));
738 fdrop(fp, td);
739 }
740 mtx_unlock(&Giant);
741 return (error);
742}
743
744/*
745 * Given a file path, delete an ACL from it.
746 *
747 * MPSAFE
748 */
749int
750__acl_delete_file(struct thread *td, struct __acl_delete_file_args *uap)
751{
752 struct nameidata nd;
753 int error;
754
755 mtx_lock(&Giant);
756 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
757 error = namei(&nd);
758 if (error == 0) {
759 error = vacl_delete(td, nd.ni_vp, SCARG(uap, type));
760 NDFREE(&nd, 0);
761 }
762 mtx_unlock(&Giant);
763 return (error);
764}
765
766/*
767 * Given a file path, delete an ACL from it.
768 *
769 * MPSAFE
770 */
771int
772__acl_delete_fd(struct thread *td, struct __acl_delete_fd_args *uap)
773{
774 struct file *fp;
775 int error;
776
777 mtx_lock(&Giant);
778 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
779 if (error == 0) {
780 error = vacl_delete(td, (struct vnode *)fp->f_data,
781 SCARG(uap, type));
782 fdrop(fp, td);
783 }
784 mtx_unlock(&Giant);
785 return (error);
786}
787
788/*
789 * Given a file path, check an ACL for it
790 *
791 * MPSAFE
792 */
793int
794__acl_aclcheck_file(struct thread *td, struct __acl_aclcheck_file_args *uap)
795{
796 struct nameidata nd;
797 int error;
798
799 mtx_lock(&Giant);
800 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
801 error = namei(&nd);
802 if (error == 0) {
803 error = vacl_aclcheck(td, nd.ni_vp, SCARG(uap, type),
804 SCARG(uap, aclp));
805 NDFREE(&nd, 0);
806 }
807 mtx_unlock(&Giant);
808 return (error);
809}
810
811/*
812 * Given a file descriptor, check an ACL for it
813 *
814 * MPSAFE
815 */
816int
817__acl_aclcheck_fd(struct thread *td, struct __acl_aclcheck_fd_args *uap)
818{
819 struct file *fp;
820 int error;
821
822 mtx_lock(&Giant);
823 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
824 if (error == 0) {
825 error = vacl_aclcheck(td, (struct vnode *)fp->f_data,
826 SCARG(uap, type), SCARG(uap, aclp));
827 fdrop(fp, td);
828 }
829 mtx_unlock(&Giant);
830 return (error);
831}
643 return (error);
644}
645
646/*
647 * syscalls -- convert the path/fd to a vnode, and call vacl_whatever.
648 * Don't need to lock, as the vacl_ code will get/release any locks
649 * required.
650 */
651
652/*
653 * Given a file path, get an ACL for it
654 *
655 * MPSAFE
656 */
657int
658__acl_get_file(struct thread *td, struct __acl_get_file_args *uap)
659{
660 struct nameidata nd;
661 int error;
662
663 mtx_lock(&Giant);
664 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
665 error = namei(&nd);
666 if (error == 0) {
667 error = vacl_get_acl(td, nd.ni_vp, SCARG(uap, type),
668 SCARG(uap, aclp));
669 NDFREE(&nd, 0);
670 }
671 mtx_unlock(&Giant);
672 return (error);
673}
674
675/*
676 * Given a file path, set an ACL for it
677 *
678 * MPSAFE
679 */
680int
681__acl_set_file(struct thread *td, struct __acl_set_file_args *uap)
682{
683 struct nameidata nd;
684 int error;
685
686 mtx_lock(&Giant);
687 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
688 error = namei(&nd);
689 if (error == 0) {
690 error = vacl_set_acl(td, nd.ni_vp, SCARG(uap, type),
691 SCARG(uap, aclp));
692 NDFREE(&nd, 0);
693 }
694 mtx_unlock(&Giant);
695 return (error);
696}
697
698/*
699 * Given a file descriptor, get an ACL for it
700 *
701 * MPSAFE
702 */
703int
704__acl_get_fd(struct thread *td, struct __acl_get_fd_args *uap)
705{
706 struct file *fp;
707 int error;
708
709 mtx_lock(&Giant);
710 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
711 if (error == 0) {
712 error = vacl_get_acl(td, (struct vnode *)fp->f_data,
713 SCARG(uap, type), SCARG(uap, aclp));
714 fdrop(fp, td);
715 }
716 mtx_unlock(&Giant);
717 return (error);
718}
719
720/*
721 * Given a file descriptor, set an ACL for it
722 *
723 * MPSAFE
724 */
725int
726__acl_set_fd(struct thread *td, struct __acl_set_fd_args *uap)
727{
728 struct file *fp;
729 int error;
730
731 mtx_lock(&Giant);
732 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
733 if (error == 0) {
734 error = vacl_set_acl(td, (struct vnode *)fp->f_data,
735 SCARG(uap, type), SCARG(uap, aclp));
736 fdrop(fp, td);
737 }
738 mtx_unlock(&Giant);
739 return (error);
740}
741
742/*
743 * Given a file path, delete an ACL from it.
744 *
745 * MPSAFE
746 */
747int
748__acl_delete_file(struct thread *td, struct __acl_delete_file_args *uap)
749{
750 struct nameidata nd;
751 int error;
752
753 mtx_lock(&Giant);
754 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
755 error = namei(&nd);
756 if (error == 0) {
757 error = vacl_delete(td, nd.ni_vp, SCARG(uap, type));
758 NDFREE(&nd, 0);
759 }
760 mtx_unlock(&Giant);
761 return (error);
762}
763
764/*
765 * Given a file path, delete an ACL from it.
766 *
767 * MPSAFE
768 */
769int
770__acl_delete_fd(struct thread *td, struct __acl_delete_fd_args *uap)
771{
772 struct file *fp;
773 int error;
774
775 mtx_lock(&Giant);
776 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
777 if (error == 0) {
778 error = vacl_delete(td, (struct vnode *)fp->f_data,
779 SCARG(uap, type));
780 fdrop(fp, td);
781 }
782 mtx_unlock(&Giant);
783 return (error);
784}
785
786/*
787 * Given a file path, check an ACL for it
788 *
789 * MPSAFE
790 */
791int
792__acl_aclcheck_file(struct thread *td, struct __acl_aclcheck_file_args *uap)
793{
794 struct nameidata nd;
795 int error;
796
797 mtx_lock(&Giant);
798 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
799 error = namei(&nd);
800 if (error == 0) {
801 error = vacl_aclcheck(td, nd.ni_vp, SCARG(uap, type),
802 SCARG(uap, aclp));
803 NDFREE(&nd, 0);
804 }
805 mtx_unlock(&Giant);
806 return (error);
807}
808
809/*
810 * Given a file descriptor, check an ACL for it
811 *
812 * MPSAFE
813 */
814int
815__acl_aclcheck_fd(struct thread *td, struct __acl_aclcheck_fd_args *uap)
816{
817 struct file *fp;
818 int error;
819
820 mtx_lock(&Giant);
821 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
822 if (error == 0) {
823 error = vacl_aclcheck(td, (struct vnode *)fp->f_data,
824 SCARG(uap, type), SCARG(uap, aclp));
825 fdrop(fp, td);
826 }
827 mtx_unlock(&Giant);
828 return (error);
829}