1/*	$OpenBSD: kern_unveil.c,v 1.55 2022/12/05 23:18:37 deraadt Exp $	*/
2
3/*
4 * Copyright (c) 2017-2019 Bob Beck <beck@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <sys/param.h>
20
21#include <sys/acct.h>
22#include <sys/mount.h>
23#include <sys/filedesc.h>
24#include <sys/proc.h>
25#include <sys/namei.h>
26#include <sys/vnode.h>
27#include <sys/types.h>
28#include <sys/malloc.h>
29#include <sys/tree.h>
30#include <sys/lock.h>
31
32#include <sys/syscall.h>
33#include <sys/syscallargs.h>
34#include <sys/systm.h>
35
36#include <sys/pledge.h>
37
38struct unvname {
39	char 			*un_name;
40	size_t 			un_namesize;
41	u_char			un_flags;
42	RBT_ENTRY(unvnmae)	un_rbt;
43};
44
45RBT_HEAD(unvname_rbt, unvname);
46
47struct unveil {
48	struct vnode		*uv_vp;
49	ssize_t			uv_cover;
50	struct unvname_rbt	uv_names;
51	struct rwlock		uv_lock;
52	u_char			uv_flags;
53};
54
55/* #define DEBUG_UNVEIL */
56#ifdef DEBUG_UNVEIL
57#define	DPRINTF(x...)	do { printf(x); } while (0)
58#else
59#define	DPRINTF(x...)
60#endif
61
62#define UNVEIL_MAX_VNODES	128
63#define UNVEIL_MAX_NAMES	128
64
65static inline int
66unvname_compare(const struct unvname *n1, const struct unvname *n2)
67{
68	if (n1->un_namesize == n2->un_namesize)
69		return (memcmp(n1->un_name, n2->un_name, n1->un_namesize));
70	else
71		return (n1->un_namesize - n2->un_namesize);
72}
73
74struct unvname *
75unvname_new(const char *name, size_t size, u_char flags)
76{
77	struct unvname *ret = malloc(sizeof(struct unvname), M_PROC, M_WAITOK);
78	ret->un_name = malloc(size, M_PROC, M_WAITOK);
79	memcpy(ret->un_name, name, size);
80	ret->un_namesize = size;
81	ret->un_flags = flags;
82	return ret;
83}
84
85void
86unvname_delete(struct unvname *name)
87{
88	free(name->un_name, M_PROC, name->un_namesize);
89	free(name, M_PROC, sizeof(struct unvname));
90}
91
92RBT_PROTOTYPE(unvname_rbt, unvname, un_rbt, unvname_compare);
93RBT_GENERATE(unvname_rbt, unvname, un_rbt, unvname_compare);
94
95int
96unveil_delete_names(struct unveil *uv)
97{
98	struct unvname *unvn, *next;
99	int ret = 0;
100
101	rw_enter_write(&uv->uv_lock);
102	RBT_FOREACH_SAFE(unvn, unvname_rbt, &uv->uv_names, next) {
103		RBT_REMOVE(unvname_rbt, &uv->uv_names, unvn);
104		unvname_delete(unvn);
105		ret++;
106	}
107	rw_exit_write(&uv->uv_lock);
108
109	DPRINTF("deleted %d names\n", ret);
110	return ret;
111}
112
113int
114unveil_add_name_unlocked(struct unveil *uv, char *name, u_char flags)
115{
116	struct unvname *unvn;
117
118	unvn = unvname_new(name, strlen(name) + 1, flags);
119	if (RBT_INSERT(unvname_rbt, &uv->uv_names, unvn) != NULL) {
120		/* Name already present. */
121		unvname_delete(unvn);
122		return 0;
123	}
124
125	DPRINTF("added name %s underneath vnode %p\n", name, uv->uv_vp);
126	return 1;
127}
128
129int
130unveil_add_name(struct unveil *uv, char *name, u_char flags)
131{
132	int ret;
133
134	rw_enter_write(&uv->uv_lock);
135	ret = unveil_add_name_unlocked(uv, name, flags);
136	rw_exit_write(&uv->uv_lock);
137	return ret;
138}
139
140struct unvname *
141unveil_namelookup(struct unveil *uv, char *name)
142{
143	struct unvname n, *ret = NULL;
144
145	rw_enter_read(&uv->uv_lock);
146
147	DPRINTF("%s: looking up name %s (%p) in vnode %p\n",
148	    __func__, name, name, uv->uv_vp);
149
150	KASSERT(uv->uv_vp != NULL);
151
152	n.un_name = name;
153	n.un_namesize = strlen(name) + 1;
154
155	ret = RBT_FIND(unvname_rbt, &uv->uv_names, &n);
156
157	rw_exit_read(&uv->uv_lock);
158
159	DPRINTF("%s: %s name %s in vnode %p\n", __func__,
160	    (ret == NULL) ? "no match for" : "matched",
161	    name, uv->uv_vp);
162	return ret;
163}
164
165void
166unveil_destroy(struct process *ps)
167{
168	size_t i;
169
170	for (i = 0; ps->ps_uvpaths != NULL && i < ps->ps_uvvcount; i++) {
171		struct unveil *uv = ps->ps_uvpaths + i;
172
173		struct vnode *vp = uv->uv_vp;
174		/* skip any vnodes zapped by unveil_removevnode */
175		if (vp != NULL) {
176			vp->v_uvcount--;
177
178			DPRINTF("unveil: %s(%d): removing vnode %p uvcount %d "
179			    "in position %ld\n",
180			    ps->ps_comm, ps->ps_pid, vp, vp->v_uvcount, i);
181			vrele(vp);
182		}
183		ps->ps_uvncount -= unveil_delete_names(uv);
184		uv->uv_vp = NULL;
185		uv->uv_flags = 0;
186	}
187
188	KASSERT(ps->ps_uvncount == 0);
189	free(ps->ps_uvpaths, M_PROC, UNVEIL_MAX_VNODES *
190	    sizeof(struct unveil));
191	ps->ps_uvvcount = 0;
192	ps->ps_uvpaths = NULL;
193}
194
195void
196unveil_copy(struct process *parent, struct process *child)
197{
198	size_t i;
199
200	child->ps_uvdone = parent->ps_uvdone;
201	if (parent->ps_uvvcount == 0)
202		return;
203
204	child->ps_uvpaths = mallocarray(UNVEIL_MAX_VNODES,
205	    sizeof(struct unveil), M_PROC, M_WAITOK|M_ZERO);
206
207	child->ps_uvncount = 0;
208	for (i = 0; parent->ps_uvpaths != NULL && i < parent->ps_uvvcount;
209	     i++) {
210		struct unveil *from = parent->ps_uvpaths + i;
211		struct unveil *to = child->ps_uvpaths + i;
212		struct unvname *unvn, *next;
213
214		to->uv_vp = from->uv_vp;
215		if (to->uv_vp != NULL) {
216			vref(to->uv_vp);
217			to->uv_vp->v_uvcount++;
218		}
219		rw_init(&to->uv_lock, "unveil");
220		RBT_INIT(unvname_rbt, &to->uv_names);
221		rw_enter_read(&from->uv_lock);
222		RBT_FOREACH_SAFE(unvn, unvname_rbt, &from->uv_names, next) {
223			if (unveil_add_name_unlocked(&child->ps_uvpaths[i],
224				    unvn->un_name, unvn->un_flags))
225				child->ps_uvncount++;
226		}
227		rw_exit_read(&from->uv_lock);
228		to->uv_flags = from->uv_flags;
229		to->uv_cover = from->uv_cover;
230	}
231	child->ps_uvvcount = parent->ps_uvvcount;
232}
233
234/*
235 * Walk up from vnode dp, until we find a matching unveil, or the root vnode
236 * returns -1 if no unveil to be found above dp or if dp is the root vnode.
237 */
238ssize_t
239unveil_find_cover(struct vnode *dp, struct proc *p)
240{
241	struct vnode *vp = NULL, *parent = NULL, *root;
242	ssize_t ret = -1;
243	int error;
244
245	/* use the correct root to stop at, chrooted or not.. */
246	root = p->p_fd->fd_rdir ? p->p_fd->fd_rdir : rootvnode;
247	vp = dp;
248
249	while (vp != root) {
250		struct componentname cn = {
251			.cn_nameiop = LOOKUP,
252			.cn_flags = ISLASTCN | ISDOTDOT | RDONLY,
253			.cn_proc = p,
254			.cn_cred = p->p_ucred,
255			.cn_pnbuf = NULL,
256			.cn_nameptr = "..",
257			.cn_namelen = 2,
258			.cn_consume = 0
259		};
260
261		/*
262		 * If we are at the root of a filesystem, and we are
263		 * still mounted somewhere, take the .. in the above
264		 * filesystem.
265		 */
266		if (vp != root && (vp->v_flag & VROOT)) {
267			if (vp->v_mount == NULL)
268				return -1;
269			vp = vp->v_mount->mnt_vnodecovered ?
270			    vp->v_mount->mnt_vnodecovered : vp;
271		}
272
273		if (vget(vp, LK_EXCLUSIVE|LK_RETRY) != 0)
274			return -1;
275		/* Get parent vnode of vp using lookup of '..' */
276		/* This returns with vp unlocked but ref'ed*/
277		error = VOP_LOOKUP(vp, &parent, &cn);
278		if (error) {
279			if (!(cn.cn_flags & PDIRUNLOCK))
280				vput(vp);
281			else {
282				/*
283				 * This corner case should not happen because
284				 * we have not set LOCKPARENT in the flags
285				 */
286				DPRINTF("vnode %p PDIRUNLOCK on error\n", vp);
287				vrele(vp);
288			}
289			break;
290		}
291
292		vrele(vp);
293		(void) unveil_lookup(parent, p->p_p, &ret);
294		vput(parent);
295
296		if (ret >= 0)
297			break;
298
299		if (vp == parent) {
300			ret = -1;
301			break;
302		}
303		vp = parent;
304		parent = NULL;
305	}
306	return ret;
307}
308
309
310struct unveil *
311unveil_lookup(struct vnode *vp, struct process *pr, ssize_t *position)
312{
313	struct unveil *uv = pr->ps_uvpaths;
314	ssize_t i;
315
316	if (position != NULL)
317		*position = -1;
318
319	if (vp->v_uvcount == 0)
320		return NULL;
321
322	for (i = 0; i < pr->ps_uvvcount; i++) {
323		if (vp == uv[i].uv_vp) {
324			KASSERT(uv[i].uv_vp->v_uvcount > 0);
325			KASSERT(uv[i].uv_vp->v_usecount > 0);
326			if (position != NULL)
327				*position = i;
328			return &uv[i];
329		}
330	}
331	return NULL;
332}
333
334int
335unveil_parsepermissions(const char *permissions, u_char *perms)
336{
337	size_t i = 0;
338	char c;
339
340	*perms = UNVEIL_USERSET;
341	while ((c = permissions[i++]) != '\0') {
342		switch (c) {
343		case 'r':
344			*perms |= UNVEIL_READ;
345			break;
346		case 'w':
347			*perms |= UNVEIL_WRITE;
348			break;
349		case 'x':
350			*perms |= UNVEIL_EXEC;
351			break;
352		case 'c':
353			*perms |= UNVEIL_CREATE;
354			break;
355		default:
356			return -1;
357		}
358	}
359	return 0;
360}
361
362int
363unveil_setflags(u_char *flags, u_char nflags)
364{
365#if 0
366	if (((~(*flags)) & nflags) != 0) {
367		DPRINTF("Flags escalation %llX -> %llX\n", *flags, nflags);
368		return 1;
369	}
370#endif
371	*flags = nflags;
372	return 1;
373}
374
375struct unveil *
376unveil_add_vnode(struct proc *p, struct vnode *vp)
377{
378	struct process *pr = p->p_p;
379	struct unveil *uv = NULL;
380	ssize_t i;
381
382	KASSERT(pr->ps_uvvcount < UNVEIL_MAX_VNODES);
383
384	uv = &pr->ps_uvpaths[pr->ps_uvvcount++];
385	rw_init(&uv->uv_lock, "unveil");
386	RBT_INIT(unvname_rbt, &uv->uv_names);
387	uv->uv_vp = vp;
388	uv->uv_flags = 0;
389
390	/* find out what we are covered by */
391	uv->uv_cover = unveil_find_cover(vp, p);
392
393	/*
394	 * Find anyone covered by what we are covered by
395	 * and re-check what covers them (we could have
396	 * interposed a cover)
397	 */
398	for (i = 0; i < pr->ps_uvvcount - 1; i++) {
399		if (pr->ps_uvpaths[i].uv_cover == uv->uv_cover)
400			pr->ps_uvpaths[i].uv_cover =
401			    unveil_find_cover(pr->ps_uvpaths[i].uv_vp, p);
402	}
403
404	return (uv);
405}
406
407int
408unveil_add(struct proc *p, struct nameidata *ndp, const char *permissions)
409{
410	struct process *pr = p->p_p;
411	struct vnode *vp;
412	struct unveil *uv;
413	int directory_add;
414	int ret = EINVAL;
415	u_char flags;
416
417	KASSERT(ISSET(ndp->ni_cnd.cn_flags, HASBUF)); /* must have SAVENAME */
418
419	if (unveil_parsepermissions(permissions, &flags) == -1)
420		goto done;
421
422	if (pr->ps_uvpaths == NULL) {
423		pr->ps_uvpaths = mallocarray(UNVEIL_MAX_VNODES,
424		    sizeof(struct unveil), M_PROC, M_WAITOK|M_ZERO);
425	}
426
427	if (pr->ps_uvvcount >= UNVEIL_MAX_VNODES ||
428	    pr->ps_uvncount >= UNVEIL_MAX_NAMES) {
429		ret = E2BIG;
430		goto done;
431	}
432
433	/* Are we a directory? or something else */
434	directory_add = ndp->ni_vp != NULL && ndp->ni_vp->v_type == VDIR;
435
436	if (directory_add)
437		vp = ndp->ni_vp;
438	else
439		vp = ndp->ni_dvp;
440
441	KASSERT(vp->v_type == VDIR);
442	vref(vp);
443	vp->v_uvcount++;
444	if ((uv = unveil_lookup(vp, pr, NULL)) != NULL) {
445		/*
446		 * We already have unveiled this directory
447		 * vnode
448		 */
449		vp->v_uvcount--;
450		vrele(vp);
451
452		/*
453		 * If we are adding a directory which was already
454		 * unveiled containing only specific terminals,
455		 * unrestrict it.
456		 */
457		if (directory_add) {
458			DPRINTF("unveil: %s(%d): updating directory vnode %p"
459			    " to unrestricted uvcount %d\n",
460			    pr->ps_comm, pr->ps_pid, vp, vp->v_uvcount);
461
462			if (!unveil_setflags(&uv->uv_flags, flags))
463				ret = EPERM;
464			else
465				ret = 0;
466			goto done;
467		}
468
469		/*
470		 * If we are adding a terminal that is already unveiled, just
471		 * replace the flags and we are done
472		 */
473		if (!directory_add) {
474			struct unvname *tname;
475			if ((tname = unveil_namelookup(uv,
476			    ndp->ni_cnd.cn_nameptr)) != NULL) {
477				DPRINTF("unveil: %s(%d): changing flags for %s"
478				    "in vnode %p, uvcount %d\n",
479				    pr->ps_comm, pr->ps_pid, tname->un_name, vp,
480				    vp->v_uvcount);
481
482				if (!unveil_setflags(&tname->un_flags, flags))
483					ret = EPERM;
484				else
485					ret = 0;
486				goto done;
487			}
488		}
489
490	} else {
491		/*
492		 * New unveil involving this directory vnode.
493		 */
494		uv = unveil_add_vnode(p, vp);
495	}
496
497	/*
498	 * At this stage with have a unveil in uv with a vnode for a
499	 * directory. If the component we are adding is a directory,
500	 * we are done. Otherwise, we add the component name the name
501	 * list in uv.
502	 */
503
504	if (directory_add) {
505		uv->uv_flags = flags;
506		ret = 0;
507
508		DPRINTF("unveil: %s(%d): added unrestricted directory vnode %p"
509		    ", uvcount %d\n",
510		    pr->ps_comm, pr->ps_pid, vp, vp->v_uvcount);
511		goto done;
512	}
513
514	if (unveil_add_name(uv, ndp->ni_cnd.cn_nameptr, flags))
515		pr->ps_uvncount++;
516	ret = 0;
517
518	DPRINTF("unveil: %s(%d): added name %s beneath %s vnode %p,"
519	    " uvcount %d\n",
520	    pr->ps_comm, pr->ps_pid, ndp->ni_cnd.cn_nameptr,
521	    uv->uv_flags ? "unrestricted" : "restricted",
522	    vp, vp->v_uvcount);
523
524 done:
525	return ret;
526}
527
528/*
529 * XXX this will probably change.
530 * XXX collapse down later once debug surely unneeded
531 */
532int
533unveil_flagmatch(struct nameidata *ni, u_char flags)
534{
535	if (flags == 0) {
536		DPRINTF("All operations forbidden for 0 flags\n");
537		return 0;
538	}
539	if (ni->ni_unveil & UNVEIL_READ) {
540		if ((flags & UNVEIL_READ) == 0) {
541			DPRINTF("unveil lacks UNVEIL_READ\n");
542			return 0;
543		}
544	}
545	if (ni->ni_unveil & UNVEIL_WRITE) {
546		if ((flags & UNVEIL_WRITE) == 0) {
547			DPRINTF("unveil lacks UNVEIL_WRITE\n");
548			return 0;
549		}
550	}
551	if (ni->ni_unveil & UNVEIL_EXEC) {
552		if ((flags & UNVEIL_EXEC) == 0) {
553			DPRINTF("unveil lacks UNVEIL_EXEC\n");
554			return 0;
555		}
556	}
557	if (ni->ni_unveil & UNVEIL_CREATE) {
558		if ((flags & UNVEIL_CREATE) == 0) {
559			DPRINTF("unveil lacks UNVEIL_CREATE\n");
560			return 0;
561		}
562	}
563	return 1;
564}
565
566/*
567 * When traversing up towards the root figure out the proper unveil for
568 * the parent directory.
569 */
570struct unveil *
571unveil_covered(struct unveil *uv, struct vnode *dvp, struct proc *p)
572{
573	if (uv && uv->uv_vp == dvp) {
574		/* if at the root, chrooted or not, return the current uv */
575		if (dvp == (p->p_fd->fd_rdir ? p->p_fd->fd_rdir : rootvnode))
576			return uv;
577		if (uv->uv_cover >=0) {
578			KASSERT(uv->uv_cover < p->p_p->ps_uvvcount);
579			return &p->p_p->ps_uvpaths[uv->uv_cover];
580		}
581		return NULL;
582	}
583	return uv;
584}
585
586
587/*
588 * Start a relative path lookup. Ensure we find whatever unveil covered
589 * where we start from, either by having a saved current working directory
590 * unveil, or by walking up and finding a cover the hard way if we are
591 * doing a non AT_FDCWD relative lookup. Caller passes a NULL dp
592 * if we are using AT_FDCWD.
593 */
594void
595unveil_start_relative(struct proc *p, struct nameidata *ni, struct vnode *dp)
596{
597	struct process *pr = p->p_p;
598	struct unveil *uv = NULL;
599	ssize_t uvi;
600
601	if (pr->ps_uvpaths == NULL)
602		return;
603
604	uv = unveil_lookup(dp, pr, NULL);
605	if (uv == NULL) {
606		uvi = unveil_find_cover(dp, p);
607		if (uvi >= 0) {
608			KASSERT(uvi < pr->ps_uvvcount);
609			uv = &pr->ps_uvpaths[uvi];
610		}
611	}
612
613	/*
614	 * Store this match for later use. Flags are checked at the end.
615	 */
616	if (uv) {
617		DPRINTF("unveil: %s(%d): relative unveil at %p matches",
618		    pr->ps_comm, pr->ps_pid, uv);
619
620		ni->ni_unveil_match = uv;
621	}
622}
623
624/*
625 * unveil checking - for component directories in a namei lookup.
626 */
627void
628unveil_check_component(struct proc *p, struct nameidata *ni, struct vnode *dp)
629{
630	struct process *pr = p->p_p;
631	struct unveil *uv = NULL;
632
633	if (ni->ni_pledge == PLEDGE_UNVEIL || pr->ps_uvpaths == NULL)
634		return;
635	if (ni->ni_cnd.cn_flags & BYPASSUNVEIL)
636		return;
637
638	if (ni->ni_cnd.cn_flags & ISDOTDOT) {
639		/*
640		 * adjust unveil match as necessary
641		 */
642		uv = unveil_covered(ni->ni_unveil_match, dp, p);
643
644		/* clear the match when we DOTDOT above it */
645		if (ni->ni_unveil_match && ni->ni_unveil_match->uv_vp == dp)
646			ni->ni_unveil_match = NULL;
647	} else
648		uv = unveil_lookup(dp, pr, NULL);
649
650	if (uv != NULL) {
651		/* update match */
652		ni->ni_unveil_match = uv;
653
654		DPRINTF("unveil: %s(%d): component directory match for "
655		    "vnode %p\n", pr->ps_comm, pr->ps_pid, dp);
656	}
657}
658
659/*
660 * unveil checking - only done after namei lookup has succeeded on
661 * the last component of a namei lookup.
662 */
663int
664unveil_check_final(struct proc *p, struct nameidata *ni)
665{
666	struct process *pr = p->p_p;
667	struct unveil *uv = NULL, *nuv;
668	struct unvname *tname = NULL;
669
670	if (ni->ni_pledge == PLEDGE_UNVEIL || pr->ps_uvpaths == NULL)
671		return (0);
672
673	if (ni->ni_cnd.cn_flags & BYPASSUNVEIL) {
674		DPRINTF("unveil: %s(%d): BYPASSUNVEIL.\n",
675		    pr->ps_comm, pr->ps_pid);
676
677		return (0);
678	}
679
680	if (ni->ni_vp != NULL && ni->ni_vp->v_type == VDIR) {
681		/* We are matching a directory terminal component */
682		uv = unveil_lookup(ni->ni_vp, pr, NULL);
683		if (uv == NULL || (uv->uv_flags & UNVEIL_USERSET) == 0) {
684			DPRINTF("unveil: %s(%d) no match for vnode %p\n",
685			    pr->ps_comm, pr->ps_pid, ni->ni_vp);
686
687			if (uv != NULL)
688				ni->ni_unveil_match = uv;
689			goto done;
690		}
691		if (!unveil_flagmatch(ni, uv->uv_flags)) {
692			DPRINTF("unveil: %s(%d) flag mismatch for directory"
693			    " vnode %p\n",
694			    pr->ps_comm, pr->ps_pid, ni->ni_vp);
695
696			pr->ps_acflag |= AUNVEIL;
697			if (uv->uv_flags & UNVEIL_MASK)
698				return EACCES;
699			else
700				return ENOENT;
701
702		}
703		/* directory and flags match, success */
704		DPRINTF("unveil: %s(%d): matched directory \"%s\" at vnode %p\n",
705		    pr->ps_comm, pr->ps_pid, ni->ni_cnd.cn_nameptr,
706		    uv->uv_vp);
707
708		return (0);
709	}
710
711	/* Otherwise, we are matching a non-terminal component */
712	uv = unveil_lookup(ni->ni_dvp, pr, NULL);
713	if (uv == NULL) {
714		DPRINTF("unveil: %s(%d) no match for directory vnode %p\n",
715		    pr->ps_comm, pr->ps_pid, ni->ni_dvp);
716
717		goto done;
718	}
719	if ((tname = unveil_namelookup(uv, ni->ni_cnd.cn_nameptr)) == NULL) {
720		DPRINTF("unveil: %s(%d) no match for terminal '%s' in "
721		    "directory vnode %p\n",
722		    pr->ps_comm, pr->ps_pid,
723		    ni->ni_cnd.cn_nameptr, ni->ni_dvp);
724
725		/* no specific name, so check unveil directory flags */
726		if (!unveil_flagmatch(ni, uv->uv_flags)) {
727			DPRINTF("unveil: %s(%d) terminal "
728			    "'%s' flags mismatch in directory "
729			    "vnode %p\n",
730			    pr->ps_comm, pr->ps_pid,
731			    ni->ni_cnd.cn_nameptr, ni->ni_dvp);
732
733			/*
734			 * If dir has user set restrictions fail with
735			 * EACCES or ENOENT. Otherwise, use any covering
736			 * match that we found above this dir.
737			 */
738			if (uv->uv_flags & UNVEIL_USERSET) {
739				pr->ps_acflag |= AUNVEIL;
740				if (uv->uv_flags & UNVEIL_MASK)
741					return EACCES;
742				else
743					return ENOENT;
744			}
745			/* start backtrack from this node */
746			ni->ni_unveil_match = uv;
747			goto done;
748		}
749		/* directory flags match, success */
750		DPRINTF("unveil: %s(%d): matched \"%s\" underneath vnode %p\n",
751		    pr->ps_comm, pr->ps_pid, ni->ni_cnd.cn_nameptr,
752		    uv->uv_vp);
753
754		return (0);
755	}
756	if (!unveil_flagmatch(ni, tname->un_flags)) {
757		/* do flags match for matched name */
758		DPRINTF("unveil: %s(%d) flag mismatch for terminal '%s'\n",
759		    pr->ps_comm, pr->ps_pid, tname->un_name);
760
761		pr->ps_acflag |= AUNVEIL;
762		return EACCES;
763	}
764	/* name and flags match. success */
765	DPRINTF("unveil: %s(%d) matched terminal '%s'\n",
766	    pr->ps_comm, pr->ps_pid, tname->un_name);
767
768	return (0);
769
770done:
771	/*
772	 * last component did not match, check previous matches if
773	 * access is allowed or not.
774	 */
775	for (uv = ni->ni_unveil_match; uv != NULL; uv = nuv) {
776		if (unveil_flagmatch(ni, uv->uv_flags)) {
777			DPRINTF("unveil: %s(%d): matched \"%s\" underneath/at "
778			    "vnode %p\n", pr->ps_comm, pr->ps_pid,
779			    ni->ni_cnd.cn_nameptr, uv->uv_vp);
780
781			return (0);
782		}
783		/* if node has any flags set then this is an access violation */
784		if (uv->uv_flags & UNVEIL_USERSET) {
785			DPRINTF("unveil: %s(%d) flag mismatch for vnode %p\n",
786			    pr->ps_comm, pr->ps_pid, uv->uv_vp);
787
788			pr->ps_acflag |= AUNVEIL;
789			if (uv->uv_flags & UNVEIL_MASK)
790				return EACCES;
791			else
792				return ENOENT;
793		}
794
795		DPRINTF("unveil: %s(%d) check cover for vnode %p, uv_cover %zd\n",
796		    pr->ps_comm, pr->ps_pid, uv->uv_vp, uv->uv_cover);
797
798		nuv = unveil_covered(uv, uv->uv_vp, p);
799		if (nuv == uv)
800			break;
801	}
802	pr->ps_acflag |= AUNVEIL;
803	return ENOENT;
804}
805
806/*
807 * Scan all active processes to see if any of them have a unveil
808 * to this vnode. If so, NULL the vnode in their unveil list,
809 * vrele, drop the reference, and mark their unveil list
810 * as needing to have the hole shrunk the next time the process
811 * uses it for lookup.
812 */
813void
814unveil_removevnode(struct vnode *vp)
815{
816	struct process *pr;
817
818	if (vp->v_uvcount == 0)
819		return;
820
821	DPRINTF("%s: found vnode %p with count %d\n",
822	    __func__, vp, vp->v_uvcount);
823
824	vref(vp); /* make sure it is held till we are done */
825
826	LIST_FOREACH(pr, &allprocess, ps_list) {
827		struct unveil * uv;
828
829		if ((uv = unveil_lookup(vp, pr, NULL)) != NULL &&
830		    uv->uv_vp != NULL) {
831			uv->uv_vp = NULL;
832			uv->uv_flags = 0;
833
834			DPRINTF("%s: vnode %p now count %d\n",
835			    __func__, vp, vp->v_uvcount);
836
837			if (vp->v_uvcount > 0) {
838				vrele(vp);
839				vp->v_uvcount--;
840			} else
841				panic("vp %p, v_uvcount of %d should be 0",
842				    vp, vp->v_uvcount);
843		}
844	}
845	KASSERT(vp->v_uvcount == 0);
846
847	vrele(vp); /* release our ref */
848}
849