Deleted Added
full compact
union_subr.c (101308) union_subr.c (103314)
1/*
2 * Copyright (c) 1994 Jan-Simon Pendry
3 * Copyright (c) 1994
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * Jan-Simon Pendry.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)union_subr.c 8.20 (Berkeley) 5/20/95
1/*
2 * Copyright (c) 1994 Jan-Simon Pendry
3 * Copyright (c) 1994
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * Jan-Simon Pendry.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)union_subr.c 8.20 (Berkeley) 5/20/95
38 * $FreeBSD: head/sys/fs/unionfs/union_subr.c 101308 2002-08-04 10:29:36Z jeff $
38 * $FreeBSD: head/sys/fs/unionfs/union_subr.c 103314 2002-09-14 09:02:28Z njl $
39 */
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/fcntl.h>
44#include <sys/file.h>
45#include <sys/filedesc.h>
46#include <sys/kernel.h>
47#include <sys/lock.h>
48#include <sys/malloc.h>
49#include <sys/module.h>
50#include <sys/mount.h>
51#include <sys/mutex.h>
52#include <sys/namei.h>
53#include <sys/stat.h>
54#include <sys/vnode.h>
55
56#include <vm/vm.h>
57#include <vm/vm_extern.h> /* for vnode_pager_setsize */
58#include <vm/vm_object.h> /* for vm cache coherency */
59#include <vm/uma.h>
60
61#include <fs/unionfs/union.h>
62
63#include <sys/proc.h>
64
65extern int union_init(void);
66
67/* must be power of two, otherwise change UNION_HASH() */
68#define NHASH 32
69
70/* unsigned int ... */
71#define UNION_HASH(u, l) \
72 (((((uintptr_t) (u)) + ((uintptr_t) l)) >> 8) & (NHASH-1))
73
74static LIST_HEAD(unhead, union_node) unhead[NHASH];
75static int unvplock[NHASH];
76
77static void union_dircache_r(struct vnode *vp, struct vnode ***vppp,
78 int *cntp);
79static int union_list_lock(int ix);
80static void union_list_unlock(int ix);
81static int union_relookup(struct union_mount *um, struct vnode *dvp,
82 struct vnode **vpp,
83 struct componentname *cnp,
84 struct componentname *cn, char *path,
85 int pathlen);
86static void union_updatevp(struct union_node *un,
87 struct vnode *uppervp,
88 struct vnode *lowervp);
89static void union_newlower(struct union_node *, struct vnode *);
90static void union_newupper(struct union_node *, struct vnode *);
91static int union_copyfile(struct vnode *, struct vnode *,
92 struct ucred *, struct thread *);
93static int union_vn_create(struct vnode **, struct union_node *,
94 struct thread *);
95static int union_vn_close(struct vnode *, int, struct ucred *,
96 struct thread *);
97
98int
99union_init()
100{
101 int i;
102
103 for (i = 0; i < NHASH; i++)
104 LIST_INIT(&unhead[i]);
105 bzero((caddr_t)unvplock, sizeof(unvplock));
106 return (0);
107}
108
109static int
110union_list_lock(ix)
111 int ix;
112{
113 if (unvplock[ix] & UNVP_LOCKED) {
114 unvplock[ix] |= UNVP_WANT;
115 (void) tsleep((caddr_t) &unvplock[ix], PINOD, "unllck", 0);
116 return (1);
117 }
118 unvplock[ix] |= UNVP_LOCKED;
119 return (0);
120}
121
122static void
123union_list_unlock(ix)
124 int ix;
125{
126 unvplock[ix] &= ~UNVP_LOCKED;
127
128 if (unvplock[ix] & UNVP_WANT) {
129 unvplock[ix] &= ~UNVP_WANT;
130 wakeup((caddr_t) &unvplock[ix]);
131 }
132}
133
134/*
135 * union_updatevp:
136 *
137 * The uppervp, if not NULL, must be referenced and not locked by us
138 * The lowervp, if not NULL, must be referenced.
139 *
140 * if uppervp and lowervp match pointers already installed, nothing
141 * happens. The passed vp's (when matching) are not adjusted. This
142 * routine may only be called by union_newupper() and union_newlower().
143 */
144
145static void
146union_updatevp(un, uppervp, lowervp)
147 struct union_node *un;
148 struct vnode *uppervp;
149 struct vnode *lowervp;
150{
151 int ohash = UNION_HASH(un->un_uppervp, un->un_lowervp);
152 int nhash = UNION_HASH(uppervp, lowervp);
153 int docache = (lowervp != NULLVP || uppervp != NULLVP);
154 int lhash, uhash;
155
156 /*
157 * Ensure locking is ordered from lower to higher
158 * to avoid deadlocks.
159 */
160 if (nhash < ohash) {
161 lhash = nhash;
162 uhash = ohash;
163 } else {
164 lhash = ohash;
165 uhash = nhash;
166 }
167
168 if (lhash != uhash) {
169 while (union_list_lock(lhash))
170 continue;
171 }
172
173 while (union_list_lock(uhash))
174 continue;
175
176 if (ohash != nhash || !docache) {
177 if (un->un_flags & UN_CACHED) {
178 un->un_flags &= ~UN_CACHED;
179 LIST_REMOVE(un, un_cache);
180 }
181 }
182
183 if (ohash != nhash)
184 union_list_unlock(ohash);
185
186 if (un->un_lowervp != lowervp) {
187 if (un->un_lowervp) {
188 vrele(un->un_lowervp);
189 if (un->un_path) {
190 free(un->un_path, M_TEMP);
191 un->un_path = 0;
192 }
193 }
194 un->un_lowervp = lowervp;
195 un->un_lowersz = VNOVAL;
196 }
197
198 if (un->un_uppervp != uppervp) {
199 if (un->un_uppervp)
200 vrele(un->un_uppervp);
201 un->un_uppervp = uppervp;
202 un->un_uppersz = VNOVAL;
203 }
204
205 if (docache && (ohash != nhash)) {
206 LIST_INSERT_HEAD(&unhead[nhash], un, un_cache);
207 un->un_flags |= UN_CACHED;
208 }
209
210 union_list_unlock(nhash);
211}
212
213/*
214 * Set a new lowervp. The passed lowervp must be referenced and will be
215 * stored in the vp in a referenced state.
216 */
217
218static void
219union_newlower(un, lowervp)
220 struct union_node *un;
221 struct vnode *lowervp;
222{
223 union_updatevp(un, un->un_uppervp, lowervp);
224}
225
226/*
227 * Set a new uppervp. The passed uppervp must be locked and will be
228 * stored in the vp in a locked state. The caller should not unlock
229 * uppervp.
230 */
231
232static void
233union_newupper(un, uppervp)
234 struct union_node *un;
235 struct vnode *uppervp;
236{
237 union_updatevp(un, uppervp, un->un_lowervp);
238}
239
240/*
241 * Keep track of size changes in the underlying vnodes.
242 * If the size changes, then callback to the vm layer
243 * giving priority to the upper layer size.
244 */
245void
246union_newsize(vp, uppersz, lowersz)
247 struct vnode *vp;
248 off_t uppersz, lowersz;
249{
250 struct union_node *un;
251 off_t sz;
252
253 /* only interested in regular files */
254 if (vp->v_type != VREG)
255 return;
256
257 un = VTOUNION(vp);
258 sz = VNOVAL;
259
260 if ((uppersz != VNOVAL) && (un->un_uppersz != uppersz)) {
261 un->un_uppersz = uppersz;
262 if (sz == VNOVAL)
263 sz = un->un_uppersz;
264 }
265
266 if ((lowersz != VNOVAL) && (un->un_lowersz != lowersz)) {
267 un->un_lowersz = lowersz;
268 if (sz == VNOVAL)
269 sz = un->un_lowersz;
270 }
271
272 if (sz != VNOVAL) {
273 UDEBUG(("union: %s size now %ld\n",
274 (uppersz != VNOVAL ? "upper" : "lower"), (long)sz));
275 /*
276 * There is no need to change size of non-existent object
277 */
278 /* vnode_pager_setsize(vp, sz); */
279 }
280}
281
282/*
283 * union_allocvp: allocate a union_node and associate it with a
284 * parent union_node and one or two vnodes.
285 *
286 * vpp Holds the returned vnode locked and referenced if no
287 * error occurs.
288 *
289 * mp Holds the mount point. mp may or may not be busied.
290 * allocvp makes no changes to mp.
291 *
292 * dvp Holds the parent union_node to the one we wish to create.
293 * XXX may only be used to traverse an uncopied lowervp-based
294 * tree? XXX
295 *
296 * dvp may or may not be locked. allocvp makes no changes
297 * to dvp.
298 *
299 * upperdvp Holds the parent vnode to uppervp, generally used along
300 * with path component information to create a shadow of
301 * lowervp when uppervp does not exist.
302 *
303 * upperdvp is referenced but unlocked on entry, and will be
304 * dereferenced on return.
305 *
306 * uppervp Holds the new uppervp vnode to be stored in the
307 * union_node we are allocating. uppervp is referenced but
308 * not locked, and will be dereferenced on return.
309 *
310 * lowervp Holds the new lowervp vnode to be stored in the
311 * union_node we are allocating. lowervp is referenced but
312 * not locked, and will be dereferenced on return.
313 *
314 * cnp Holds path component information to be coupled with
315 * lowervp and upperdvp to allow unionfs to create an uppervp
316 * later on. Only used if lowervp is valid. The conents
317 * of cnp is only valid for the duration of the call.
318 *
319 * docache Determine whether this node should be entered in the
320 * cache or whether it should be destroyed as soon as possible.
321 *
322 * all union_nodes are maintained on a singly-linked
323 * list. new nodes are only allocated when they cannot
324 * be found on this list. entries on the list are
325 * removed when the vfs reclaim entry is called.
326 *
327 * a single lock is kept for the entire list. this is
328 * needed because the getnewvnode() function can block
329 * waiting for a vnode to become free, in which case there
330 * may be more than one process trying to get the same
331 * vnode. this lock is only taken if we are going to
332 * call getnewvnode, since the kernel itself is single-threaded.
333 *
334 * if an entry is found on the list, then call vget() to
335 * take a reference. this is done because there may be
336 * zero references to it and so it needs to removed from
337 * the vnode free list.
338 */
339
340int
341union_allocvp(vpp, mp, dvp, upperdvp, cnp, uppervp, lowervp, docache)
342 struct vnode **vpp;
343 struct mount *mp;
344 struct vnode *dvp; /* parent union vnode */
345 struct vnode *upperdvp; /* parent vnode of uppervp */
346 struct componentname *cnp; /* may be null */
347 struct vnode *uppervp; /* may be null */
348 struct vnode *lowervp; /* may be null */
349 int docache;
350{
351 int error;
352 struct union_node *un = 0;
353 struct union_mount *um = MOUNTTOUNIONMOUNT(mp);
354 struct thread *td = (cnp) ? cnp->cn_thread : curthread;
355 int hash = 0;
356 int vflag;
357 int try;
358
359 if (uppervp == NULLVP && lowervp == NULLVP)
360 panic("union: unidentifiable allocation");
361
362 if (uppervp && lowervp && (uppervp->v_type != lowervp->v_type)) {
363 vrele(lowervp);
364 lowervp = NULLVP;
365 }
366
367 /* detect the root vnode (and aliases) */
368 vflag = 0;
369 if ((uppervp == um->um_uppervp) &&
370 ((lowervp == NULLVP) || lowervp == um->um_lowervp)) {
371 if (lowervp == NULLVP) {
372 lowervp = um->um_lowervp;
373 if (lowervp != NULLVP)
374 VREF(lowervp);
375 }
376 vflag = VV_ROOT;
377 }
378
379loop:
380 if (!docache) {
381 un = 0;
382 } else for (try = 0; try < 3; try++) {
383 switch (try) {
384 case 0:
385 if (lowervp == NULLVP)
386 continue;
387 hash = UNION_HASH(uppervp, lowervp);
388 break;
389
390 case 1:
391 if (uppervp == NULLVP)
392 continue;
393 hash = UNION_HASH(uppervp, NULLVP);
394 break;
395
396 case 2:
397 if (lowervp == NULLVP)
398 continue;
399 hash = UNION_HASH(NULLVP, lowervp);
400 break;
401 }
402
403 while (union_list_lock(hash))
404 continue;
405
406 LIST_FOREACH(un, &unhead[hash], un_cache) {
407 if ((un->un_lowervp == lowervp ||
408 un->un_lowervp == NULLVP) &&
409 (un->un_uppervp == uppervp ||
410 un->un_uppervp == NULLVP) &&
411 (UNIONTOV(un)->v_mount == mp)) {
412 if (vget(UNIONTOV(un), 0,
413 cnp ? cnp->cn_thread : NULL)) {
414 union_list_unlock(hash);
415 goto loop;
416 }
417 break;
418 }
419 }
420
421 union_list_unlock(hash);
422
423 if (un)
424 break;
425 }
426
427 if (un) {
428 /*
429 * Obtain a lock on the union_node. Everything is unlocked
430 * except for dvp, so check that case. If they match, our
431 * new un is already locked. Otherwise we have to lock our
432 * new un.
433 *
434 * A potential deadlock situation occurs when we are holding
435 * one lock while trying to get another. We must follow
436 * strict ordering rules to avoid it. We try to locate dvp
437 * by scanning up from un_vnode, since the most likely
438 * scenario is un being under dvp.
439 */
440
441 if (dvp && un->un_vnode != dvp) {
442 struct vnode *scan = un->un_vnode;
443
444 do {
445 scan = VTOUNION(scan)->un_pvp;
39 */
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/fcntl.h>
44#include <sys/file.h>
45#include <sys/filedesc.h>
46#include <sys/kernel.h>
47#include <sys/lock.h>
48#include <sys/malloc.h>
49#include <sys/module.h>
50#include <sys/mount.h>
51#include <sys/mutex.h>
52#include <sys/namei.h>
53#include <sys/stat.h>
54#include <sys/vnode.h>
55
56#include <vm/vm.h>
57#include <vm/vm_extern.h> /* for vnode_pager_setsize */
58#include <vm/vm_object.h> /* for vm cache coherency */
59#include <vm/uma.h>
60
61#include <fs/unionfs/union.h>
62
63#include <sys/proc.h>
64
65extern int union_init(void);
66
67/* must be power of two, otherwise change UNION_HASH() */
68#define NHASH 32
69
70/* unsigned int ... */
71#define UNION_HASH(u, l) \
72 (((((uintptr_t) (u)) + ((uintptr_t) l)) >> 8) & (NHASH-1))
73
74static LIST_HEAD(unhead, union_node) unhead[NHASH];
75static int unvplock[NHASH];
76
77static void union_dircache_r(struct vnode *vp, struct vnode ***vppp,
78 int *cntp);
79static int union_list_lock(int ix);
80static void union_list_unlock(int ix);
81static int union_relookup(struct union_mount *um, struct vnode *dvp,
82 struct vnode **vpp,
83 struct componentname *cnp,
84 struct componentname *cn, char *path,
85 int pathlen);
86static void union_updatevp(struct union_node *un,
87 struct vnode *uppervp,
88 struct vnode *lowervp);
89static void union_newlower(struct union_node *, struct vnode *);
90static void union_newupper(struct union_node *, struct vnode *);
91static int union_copyfile(struct vnode *, struct vnode *,
92 struct ucred *, struct thread *);
93static int union_vn_create(struct vnode **, struct union_node *,
94 struct thread *);
95static int union_vn_close(struct vnode *, int, struct ucred *,
96 struct thread *);
97
98int
99union_init()
100{
101 int i;
102
103 for (i = 0; i < NHASH; i++)
104 LIST_INIT(&unhead[i]);
105 bzero((caddr_t)unvplock, sizeof(unvplock));
106 return (0);
107}
108
109static int
110union_list_lock(ix)
111 int ix;
112{
113 if (unvplock[ix] & UNVP_LOCKED) {
114 unvplock[ix] |= UNVP_WANT;
115 (void) tsleep((caddr_t) &unvplock[ix], PINOD, "unllck", 0);
116 return (1);
117 }
118 unvplock[ix] |= UNVP_LOCKED;
119 return (0);
120}
121
122static void
123union_list_unlock(ix)
124 int ix;
125{
126 unvplock[ix] &= ~UNVP_LOCKED;
127
128 if (unvplock[ix] & UNVP_WANT) {
129 unvplock[ix] &= ~UNVP_WANT;
130 wakeup((caddr_t) &unvplock[ix]);
131 }
132}
133
134/*
135 * union_updatevp:
136 *
137 * The uppervp, if not NULL, must be referenced and not locked by us
138 * The lowervp, if not NULL, must be referenced.
139 *
140 * if uppervp and lowervp match pointers already installed, nothing
141 * happens. The passed vp's (when matching) are not adjusted. This
142 * routine may only be called by union_newupper() and union_newlower().
143 */
144
145static void
146union_updatevp(un, uppervp, lowervp)
147 struct union_node *un;
148 struct vnode *uppervp;
149 struct vnode *lowervp;
150{
151 int ohash = UNION_HASH(un->un_uppervp, un->un_lowervp);
152 int nhash = UNION_HASH(uppervp, lowervp);
153 int docache = (lowervp != NULLVP || uppervp != NULLVP);
154 int lhash, uhash;
155
156 /*
157 * Ensure locking is ordered from lower to higher
158 * to avoid deadlocks.
159 */
160 if (nhash < ohash) {
161 lhash = nhash;
162 uhash = ohash;
163 } else {
164 lhash = ohash;
165 uhash = nhash;
166 }
167
168 if (lhash != uhash) {
169 while (union_list_lock(lhash))
170 continue;
171 }
172
173 while (union_list_lock(uhash))
174 continue;
175
176 if (ohash != nhash || !docache) {
177 if (un->un_flags & UN_CACHED) {
178 un->un_flags &= ~UN_CACHED;
179 LIST_REMOVE(un, un_cache);
180 }
181 }
182
183 if (ohash != nhash)
184 union_list_unlock(ohash);
185
186 if (un->un_lowervp != lowervp) {
187 if (un->un_lowervp) {
188 vrele(un->un_lowervp);
189 if (un->un_path) {
190 free(un->un_path, M_TEMP);
191 un->un_path = 0;
192 }
193 }
194 un->un_lowervp = lowervp;
195 un->un_lowersz = VNOVAL;
196 }
197
198 if (un->un_uppervp != uppervp) {
199 if (un->un_uppervp)
200 vrele(un->un_uppervp);
201 un->un_uppervp = uppervp;
202 un->un_uppersz = VNOVAL;
203 }
204
205 if (docache && (ohash != nhash)) {
206 LIST_INSERT_HEAD(&unhead[nhash], un, un_cache);
207 un->un_flags |= UN_CACHED;
208 }
209
210 union_list_unlock(nhash);
211}
212
213/*
214 * Set a new lowervp. The passed lowervp must be referenced and will be
215 * stored in the vp in a referenced state.
216 */
217
218static void
219union_newlower(un, lowervp)
220 struct union_node *un;
221 struct vnode *lowervp;
222{
223 union_updatevp(un, un->un_uppervp, lowervp);
224}
225
226/*
227 * Set a new uppervp. The passed uppervp must be locked and will be
228 * stored in the vp in a locked state. The caller should not unlock
229 * uppervp.
230 */
231
232static void
233union_newupper(un, uppervp)
234 struct union_node *un;
235 struct vnode *uppervp;
236{
237 union_updatevp(un, uppervp, un->un_lowervp);
238}
239
240/*
241 * Keep track of size changes in the underlying vnodes.
242 * If the size changes, then callback to the vm layer
243 * giving priority to the upper layer size.
244 */
245void
246union_newsize(vp, uppersz, lowersz)
247 struct vnode *vp;
248 off_t uppersz, lowersz;
249{
250 struct union_node *un;
251 off_t sz;
252
253 /* only interested in regular files */
254 if (vp->v_type != VREG)
255 return;
256
257 un = VTOUNION(vp);
258 sz = VNOVAL;
259
260 if ((uppersz != VNOVAL) && (un->un_uppersz != uppersz)) {
261 un->un_uppersz = uppersz;
262 if (sz == VNOVAL)
263 sz = un->un_uppersz;
264 }
265
266 if ((lowersz != VNOVAL) && (un->un_lowersz != lowersz)) {
267 un->un_lowersz = lowersz;
268 if (sz == VNOVAL)
269 sz = un->un_lowersz;
270 }
271
272 if (sz != VNOVAL) {
273 UDEBUG(("union: %s size now %ld\n",
274 (uppersz != VNOVAL ? "upper" : "lower"), (long)sz));
275 /*
276 * There is no need to change size of non-existent object
277 */
278 /* vnode_pager_setsize(vp, sz); */
279 }
280}
281
282/*
283 * union_allocvp: allocate a union_node and associate it with a
284 * parent union_node and one or two vnodes.
285 *
286 * vpp Holds the returned vnode locked and referenced if no
287 * error occurs.
288 *
289 * mp Holds the mount point. mp may or may not be busied.
290 * allocvp makes no changes to mp.
291 *
292 * dvp Holds the parent union_node to the one we wish to create.
293 * XXX may only be used to traverse an uncopied lowervp-based
294 * tree? XXX
295 *
296 * dvp may or may not be locked. allocvp makes no changes
297 * to dvp.
298 *
299 * upperdvp Holds the parent vnode to uppervp, generally used along
300 * with path component information to create a shadow of
301 * lowervp when uppervp does not exist.
302 *
303 * upperdvp is referenced but unlocked on entry, and will be
304 * dereferenced on return.
305 *
306 * uppervp Holds the new uppervp vnode to be stored in the
307 * union_node we are allocating. uppervp is referenced but
308 * not locked, and will be dereferenced on return.
309 *
310 * lowervp Holds the new lowervp vnode to be stored in the
311 * union_node we are allocating. lowervp is referenced but
312 * not locked, and will be dereferenced on return.
313 *
314 * cnp Holds path component information to be coupled with
315 * lowervp and upperdvp to allow unionfs to create an uppervp
316 * later on. Only used if lowervp is valid. The conents
317 * of cnp is only valid for the duration of the call.
318 *
319 * docache Determine whether this node should be entered in the
320 * cache or whether it should be destroyed as soon as possible.
321 *
322 * all union_nodes are maintained on a singly-linked
323 * list. new nodes are only allocated when they cannot
324 * be found on this list. entries on the list are
325 * removed when the vfs reclaim entry is called.
326 *
327 * a single lock is kept for the entire list. this is
328 * needed because the getnewvnode() function can block
329 * waiting for a vnode to become free, in which case there
330 * may be more than one process trying to get the same
331 * vnode. this lock is only taken if we are going to
332 * call getnewvnode, since the kernel itself is single-threaded.
333 *
334 * if an entry is found on the list, then call vget() to
335 * take a reference. this is done because there may be
336 * zero references to it and so it needs to removed from
337 * the vnode free list.
338 */
339
340int
341union_allocvp(vpp, mp, dvp, upperdvp, cnp, uppervp, lowervp, docache)
342 struct vnode **vpp;
343 struct mount *mp;
344 struct vnode *dvp; /* parent union vnode */
345 struct vnode *upperdvp; /* parent vnode of uppervp */
346 struct componentname *cnp; /* may be null */
347 struct vnode *uppervp; /* may be null */
348 struct vnode *lowervp; /* may be null */
349 int docache;
350{
351 int error;
352 struct union_node *un = 0;
353 struct union_mount *um = MOUNTTOUNIONMOUNT(mp);
354 struct thread *td = (cnp) ? cnp->cn_thread : curthread;
355 int hash = 0;
356 int vflag;
357 int try;
358
359 if (uppervp == NULLVP && lowervp == NULLVP)
360 panic("union: unidentifiable allocation");
361
362 if (uppervp && lowervp && (uppervp->v_type != lowervp->v_type)) {
363 vrele(lowervp);
364 lowervp = NULLVP;
365 }
366
367 /* detect the root vnode (and aliases) */
368 vflag = 0;
369 if ((uppervp == um->um_uppervp) &&
370 ((lowervp == NULLVP) || lowervp == um->um_lowervp)) {
371 if (lowervp == NULLVP) {
372 lowervp = um->um_lowervp;
373 if (lowervp != NULLVP)
374 VREF(lowervp);
375 }
376 vflag = VV_ROOT;
377 }
378
379loop:
380 if (!docache) {
381 un = 0;
382 } else for (try = 0; try < 3; try++) {
383 switch (try) {
384 case 0:
385 if (lowervp == NULLVP)
386 continue;
387 hash = UNION_HASH(uppervp, lowervp);
388 break;
389
390 case 1:
391 if (uppervp == NULLVP)
392 continue;
393 hash = UNION_HASH(uppervp, NULLVP);
394 break;
395
396 case 2:
397 if (lowervp == NULLVP)
398 continue;
399 hash = UNION_HASH(NULLVP, lowervp);
400 break;
401 }
402
403 while (union_list_lock(hash))
404 continue;
405
406 LIST_FOREACH(un, &unhead[hash], un_cache) {
407 if ((un->un_lowervp == lowervp ||
408 un->un_lowervp == NULLVP) &&
409 (un->un_uppervp == uppervp ||
410 un->un_uppervp == NULLVP) &&
411 (UNIONTOV(un)->v_mount == mp)) {
412 if (vget(UNIONTOV(un), 0,
413 cnp ? cnp->cn_thread : NULL)) {
414 union_list_unlock(hash);
415 goto loop;
416 }
417 break;
418 }
419 }
420
421 union_list_unlock(hash);
422
423 if (un)
424 break;
425 }
426
427 if (un) {
428 /*
429 * Obtain a lock on the union_node. Everything is unlocked
430 * except for dvp, so check that case. If they match, our
431 * new un is already locked. Otherwise we have to lock our
432 * new un.
433 *
434 * A potential deadlock situation occurs when we are holding
435 * one lock while trying to get another. We must follow
436 * strict ordering rules to avoid it. We try to locate dvp
437 * by scanning up from un_vnode, since the most likely
438 * scenario is un being under dvp.
439 */
440
441 if (dvp && un->un_vnode != dvp) {
442 struct vnode *scan = un->un_vnode;
443
444 do {
445 scan = VTOUNION(scan)->un_pvp;
446 } while (scan && scan->v_tag == VT_UNION && scan != dvp);
446 } while (scan && scan->v_op == union_vnodeop_p &&
447 scan != dvp);
447 if (scan != dvp) {
448 /*
449 * our new un is above dvp (we never saw dvp
450 * while moving up the tree).
451 */
452 VREF(dvp);
453 VOP_UNLOCK(dvp, 0, td);
454 error = vn_lock(un->un_vnode, LK_EXCLUSIVE, td);
455 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
456 vrele(dvp);
457 } else {
458 /*
459 * our new un is under dvp
460 */
461 error = vn_lock(un->un_vnode, LK_EXCLUSIVE, td);
462 }
463 } else if (dvp == NULLVP) {
464 /*
465 * dvp is NULL, we need to lock un.
466 */
467 error = vn_lock(un->un_vnode, LK_EXCLUSIVE, td);
468 } else {
469 /*
470 * dvp == un->un_vnode, we are already locked.
471 */
472 error = 0;
473 }
474
475 if (error)
476 goto loop;
477
478 /*
479 * At this point, the union_node is locked and referenced.
480 *
481 * uppervp is locked and referenced or NULL, lowervp is
482 * referenced or NULL.
483 */
484 UDEBUG(("Modify existing un %p vn %p upper %p(refs %d) -> %p(refs %d)\n",
485 un, un->un_vnode, un->un_uppervp,
486 (un->un_uppervp ? un->un_uppervp->v_usecount : -99),
487 uppervp,
488 (uppervp ? uppervp->v_usecount : -99)
489 ));
490
491 if (uppervp != un->un_uppervp) {
492 KASSERT(uppervp == NULL || uppervp->v_usecount > 0, ("union_allocvp: too few refs %d (at least 1 required) on uppervp", uppervp->v_usecount));
493 union_newupper(un, uppervp);
494 } else if (uppervp) {
495 KASSERT(uppervp->v_usecount > 1, ("union_allocvp: too few refs %d (at least 2 required) on uppervp", uppervp->v_usecount));
496 vrele(uppervp);
497 }
498
499 /*
500 * Save information about the lower layer.
501 * This needs to keep track of pathname
502 * and directory information which union_vn_create
503 * might need.
504 */
505 if (lowervp != un->un_lowervp) {
506 union_newlower(un, lowervp);
507 if (cnp && (lowervp != NULLVP)) {
508 un->un_path = malloc(cnp->cn_namelen+1,
509 M_TEMP, M_WAITOK);
510 bcopy(cnp->cn_nameptr, un->un_path,
511 cnp->cn_namelen);
512 un->un_path[cnp->cn_namelen] = '\0';
513 }
514 } else if (lowervp) {
515 vrele(lowervp);
516 }
517
518 /*
519 * and upperdvp
520 */
521 if (upperdvp != un->un_dirvp) {
522 if (un->un_dirvp)
523 vrele(un->un_dirvp);
524 un->un_dirvp = upperdvp;
525 } else if (upperdvp) {
526 vrele(upperdvp);
527 }
528
529 *vpp = UNIONTOV(un);
530 return (0);
531 }
532
533 if (docache) {
534 /*
535 * otherwise lock the vp list while we call getnewvnode
536 * since that can block.
537 */
538 hash = UNION_HASH(uppervp, lowervp);
539
540 if (union_list_lock(hash))
541 goto loop;
542 }
543
544 /*
545 * Create new node rather then replace old node
546 */
547
448 if (scan != dvp) {
449 /*
450 * our new un is above dvp (we never saw dvp
451 * while moving up the tree).
452 */
453 VREF(dvp);
454 VOP_UNLOCK(dvp, 0, td);
455 error = vn_lock(un->un_vnode, LK_EXCLUSIVE, td);
456 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
457 vrele(dvp);
458 } else {
459 /*
460 * our new un is under dvp
461 */
462 error = vn_lock(un->un_vnode, LK_EXCLUSIVE, td);
463 }
464 } else if (dvp == NULLVP) {
465 /*
466 * dvp is NULL, we need to lock un.
467 */
468 error = vn_lock(un->un_vnode, LK_EXCLUSIVE, td);
469 } else {
470 /*
471 * dvp == un->un_vnode, we are already locked.
472 */
473 error = 0;
474 }
475
476 if (error)
477 goto loop;
478
479 /*
480 * At this point, the union_node is locked and referenced.
481 *
482 * uppervp is locked and referenced or NULL, lowervp is
483 * referenced or NULL.
484 */
485 UDEBUG(("Modify existing un %p vn %p upper %p(refs %d) -> %p(refs %d)\n",
486 un, un->un_vnode, un->un_uppervp,
487 (un->un_uppervp ? un->un_uppervp->v_usecount : -99),
488 uppervp,
489 (uppervp ? uppervp->v_usecount : -99)
490 ));
491
492 if (uppervp != un->un_uppervp) {
493 KASSERT(uppervp == NULL || uppervp->v_usecount > 0, ("union_allocvp: too few refs %d (at least 1 required) on uppervp", uppervp->v_usecount));
494 union_newupper(un, uppervp);
495 } else if (uppervp) {
496 KASSERT(uppervp->v_usecount > 1, ("union_allocvp: too few refs %d (at least 2 required) on uppervp", uppervp->v_usecount));
497 vrele(uppervp);
498 }
499
500 /*
501 * Save information about the lower layer.
502 * This needs to keep track of pathname
503 * and directory information which union_vn_create
504 * might need.
505 */
506 if (lowervp != un->un_lowervp) {
507 union_newlower(un, lowervp);
508 if (cnp && (lowervp != NULLVP)) {
509 un->un_path = malloc(cnp->cn_namelen+1,
510 M_TEMP, M_WAITOK);
511 bcopy(cnp->cn_nameptr, un->un_path,
512 cnp->cn_namelen);
513 un->un_path[cnp->cn_namelen] = '\0';
514 }
515 } else if (lowervp) {
516 vrele(lowervp);
517 }
518
519 /*
520 * and upperdvp
521 */
522 if (upperdvp != un->un_dirvp) {
523 if (un->un_dirvp)
524 vrele(un->un_dirvp);
525 un->un_dirvp = upperdvp;
526 } else if (upperdvp) {
527 vrele(upperdvp);
528 }
529
530 *vpp = UNIONTOV(un);
531 return (0);
532 }
533
534 if (docache) {
535 /*
536 * otherwise lock the vp list while we call getnewvnode
537 * since that can block.
538 */
539 hash = UNION_HASH(uppervp, lowervp);
540
541 if (union_list_lock(hash))
542 goto loop;
543 }
544
545 /*
546 * Create new node rather then replace old node
547 */
548
548 error = getnewvnode(VT_UNION, mp, union_vnodeop_p, vpp);
549 error = getnewvnode("union", mp, union_vnodeop_p, vpp);
549 if (error) {
550 /*
551 * If an error occurs clear out vnodes.
552 */
553 if (lowervp)
554 vrele(lowervp);
555 if (uppervp)
556 vrele(uppervp);
557 if (upperdvp)
558 vrele(upperdvp);
559 *vpp = NULL;
560 goto out;
561 }
562
563 MALLOC((*vpp)->v_data, void *, sizeof(struct union_node),
564 M_TEMP, M_WAITOK);
565
566 ASSERT_VOP_LOCKED(*vpp, "union_allocvp");
567 (*vpp)->v_vflag |= vflag;
568 if (uppervp)
569 (*vpp)->v_type = uppervp->v_type;
570 else
571 (*vpp)->v_type = lowervp->v_type;
572
573 un = VTOUNION(*vpp);
574 bzero(un, sizeof(*un));
575
576 lockinit(&un->un_lock, PVFS, "unlock", VLKTIMEOUT, 0);
577 vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, td);
578
579 un->un_vnode = *vpp;
580 un->un_uppervp = uppervp;
581 un->un_uppersz = VNOVAL;
582 un->un_lowervp = lowervp;
583 un->un_lowersz = VNOVAL;
584 un->un_dirvp = upperdvp;
585 un->un_pvp = dvp; /* only parent dir in new allocation */
586 if (dvp != NULLVP)
587 VREF(dvp);
588 un->un_dircache = 0;
589 un->un_openl = 0;
590
591 if (cnp && (lowervp != NULLVP)) {
592 un->un_path = malloc(cnp->cn_namelen+1, M_TEMP, M_WAITOK);
593 bcopy(cnp->cn_nameptr, un->un_path, cnp->cn_namelen);
594 un->un_path[cnp->cn_namelen] = '\0';
595 } else {
596 un->un_path = 0;
597 un->un_dirvp = NULL;
598 }
599
600 if (docache) {
601 LIST_INSERT_HEAD(&unhead[hash], un, un_cache);
602 un->un_flags |= UN_CACHED;
603 }
604
605out:
606 if (docache)
607 union_list_unlock(hash);
608
609 return (error);
610}
611
612int
613union_freevp(vp)
614 struct vnode *vp;
615{
616 struct union_node *un = VTOUNION(vp);
617
618 if (un->un_flags & UN_CACHED) {
619 un->un_flags &= ~UN_CACHED;
620 LIST_REMOVE(un, un_cache);
621 }
622
623 if (un->un_pvp != NULLVP) {
624 vrele(un->un_pvp);
625 un->un_pvp = NULL;
626 }
627 if (un->un_uppervp != NULLVP) {
628 vrele(un->un_uppervp);
629 un->un_uppervp = NULL;
630 }
631 if (un->un_lowervp != NULLVP) {
632 vrele(un->un_lowervp);
633 un->un_lowervp = NULL;
634 }
635 if (un->un_dirvp != NULLVP) {
636 vrele(un->un_dirvp);
637 un->un_dirvp = NULL;
638 }
639 if (un->un_path) {
640 free(un->un_path, M_TEMP);
641 un->un_path = NULL;
642 }
643 lockdestroy(&un->un_lock);
644
645 FREE(vp->v_data, M_TEMP);
646 vp->v_data = 0;
647
648 return (0);
649}
650
651/*
652 * copyfile. copy the vnode (fvp) to the vnode (tvp)
653 * using a sequence of reads and writes. both (fvp)
654 * and (tvp) are locked on entry and exit.
655 *
656 * fvp and tvp are both exclusive locked on call, but their refcount's
657 * haven't been bumped at all.
658 */
659static int
660union_copyfile(fvp, tvp, cred, td)
661 struct vnode *fvp;
662 struct vnode *tvp;
663 struct ucred *cred;
664 struct thread *td;
665{
666 char *buf;
667 struct uio uio;
668 struct iovec iov;
669 int error = 0;
670
671 /*
672 * strategy:
673 * allocate a buffer of size MAXBSIZE.
674 * loop doing reads and writes, keeping track
675 * of the current uio offset.
676 * give up at the first sign of trouble.
677 */
678
679 bzero(&uio, sizeof(uio));
680
681 uio.uio_td = td;
682 uio.uio_segflg = UIO_SYSSPACE;
683 uio.uio_offset = 0;
684
685 VOP_LEASE(fvp, td, cred, LEASE_READ);
686 VOP_LEASE(tvp, td, cred, LEASE_WRITE);
687
688 buf = malloc(MAXBSIZE, M_TEMP, M_WAITOK);
689
690 /* ugly loop follows... */
691 do {
692 off_t offset = uio.uio_offset;
693 int count;
694 int bufoffset;
695
696 /*
697 * Setup for big read
698 */
699 uio.uio_iov = &iov;
700 uio.uio_iovcnt = 1;
701 iov.iov_base = buf;
702 iov.iov_len = MAXBSIZE;
703 uio.uio_resid = iov.iov_len;
704 uio.uio_rw = UIO_READ;
705
706 if ((error = VOP_READ(fvp, &uio, 0, cred)) != 0)
707 break;
708
709 /*
710 * Get bytes read, handle read eof case and setup for
711 * write loop
712 */
713 if ((count = MAXBSIZE - uio.uio_resid) == 0)
714 break;
715 bufoffset = 0;
716
717 /*
718 * Write until an error occurs or our buffer has been
719 * exhausted, then update the offset for the next read.
720 */
721 while (bufoffset < count) {
722 uio.uio_iov = &iov;
723 uio.uio_iovcnt = 1;
724 iov.iov_base = buf + bufoffset;
725 iov.iov_len = count - bufoffset;
726 uio.uio_offset = offset + bufoffset;
727 uio.uio_rw = UIO_WRITE;
728 uio.uio_resid = iov.iov_len;
729
730 if ((error = VOP_WRITE(tvp, &uio, 0, cred)) != 0)
731 break;
732 bufoffset += (count - bufoffset) - uio.uio_resid;
733 }
734 uio.uio_offset = offset + bufoffset;
735 } while (error == 0);
736
737 free(buf, M_TEMP);
738 return (error);
739}
740
741/*
742 *
743 * un's vnode is assumed to be locked on entry and remains locked on exit.
744 */
745
746int
747union_copyup(un, docopy, cred, td)
748 struct union_node *un;
749 int docopy;
750 struct ucred *cred;
751 struct thread *td;
752{
753 int error;
754 struct mount *mp;
755 struct vnode *lvp, *uvp;
756
757 /*
758 * If the user does not have read permission, the vnode should not
759 * be copied to upper layer.
760 */
761 vn_lock(un->un_lowervp, LK_EXCLUSIVE | LK_RETRY, td);
762 error = VOP_ACCESS(un->un_lowervp, VREAD, cred, td);
763 VOP_UNLOCK(un->un_lowervp, 0, td);
764 if (error)
765 return (error);
766
767 if ((error = vn_start_write(un->un_dirvp, &mp, V_WAIT | PCATCH)) != 0)
768 return (error);
769 if ((error = union_vn_create(&uvp, un, td)) != 0) {
770 vn_finished_write(mp);
771 return (error);
772 }
773
774 lvp = un->un_lowervp;
775
776 KASSERT(uvp->v_usecount > 0, ("copy: uvp refcount 0: %d", uvp->v_usecount));
777 if (docopy) {
778 /*
779 * XX - should not ignore errors
780 * from VOP_CLOSE
781 */
782 vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY, td);
783 error = VOP_OPEN(lvp, FREAD, cred, td);
784 if (error == 0 && vn_canvmio(lvp) == TRUE)
785 error = vfs_object_create(lvp, td, cred);
786 if (error == 0) {
787 error = union_copyfile(lvp, uvp, cred, td);
788 VOP_UNLOCK(lvp, 0, td);
789 (void) VOP_CLOSE(lvp, FREAD, cred, td);
790 }
791 if (error == 0)
792 UDEBUG(("union: copied up %s\n", un->un_path));
793
794 }
795 VOP_UNLOCK(uvp, 0, td);
796 vn_finished_write(mp);
797 union_newupper(un, uvp);
798 KASSERT(uvp->v_usecount > 0, ("copy: uvp refcount 0: %d", uvp->v_usecount));
799 union_vn_close(uvp, FWRITE, cred, td);
800 KASSERT(uvp->v_usecount > 0, ("copy: uvp refcount 0: %d", uvp->v_usecount));
801 /*
802 * Subsequent IOs will go to the top layer, so
803 * call close on the lower vnode and open on the
804 * upper vnode to ensure that the filesystem keeps
805 * its references counts right. This doesn't do
806 * the right thing with (cred) and (FREAD) though.
807 * Ignoring error returns is not right, either.
808 */
809 if (error == 0) {
810 int i;
811
812 for (i = 0; i < un->un_openl; i++) {
813 (void) VOP_CLOSE(lvp, FREAD, cred, td);
814 (void) VOP_OPEN(uvp, FREAD, cred, td);
815 }
816 if (un->un_openl) {
817 if (vn_canvmio(uvp) == TRUE)
818 error = vfs_object_create(uvp, td, cred);
819 }
820 un->un_openl = 0;
821 }
822
823 return (error);
824
825}
826
827/*
828 * union_relookup:
829 *
830 * dvp should be locked on entry and will be locked on return. No
831 * net change in the ref count will occur.
832 *
833 * If an error is returned, *vpp will be invalid, otherwise it
834 * will hold a locked, referenced vnode. If *vpp == dvp then
835 * remember that only one exclusive lock is held.
836 */
837
838static int
839union_relookup(um, dvp, vpp, cnp, cn, path, pathlen)
840 struct union_mount *um;
841 struct vnode *dvp;
842 struct vnode **vpp;
843 struct componentname *cnp;
844 struct componentname *cn;
845 char *path;
846 int pathlen;
847{
848 int error;
849
850 /*
851 * A new componentname structure must be faked up because
852 * there is no way to know where the upper level cnp came
853 * from or what it is being used for. This must duplicate
854 * some of the work done by NDINIT, some of the work done
855 * by namei, some of the work done by lookup and some of
856 * the work done by VOP_LOOKUP when given a CREATE flag.
857 * Conclusion: Horrible.
858 */
859 cn->cn_namelen = pathlen;
860 cn->cn_pnbuf = uma_zalloc(namei_zone, M_WAITOK);
861 bcopy(path, cn->cn_pnbuf, cn->cn_namelen);
862 cn->cn_pnbuf[cn->cn_namelen] = '\0';
863
864 cn->cn_nameiop = CREATE;
865 cn->cn_flags = (LOCKPARENT|LOCKLEAF|HASBUF|SAVENAME|ISLASTCN);
866 cn->cn_thread = cnp->cn_thread;
867 if (um->um_op == UNMNT_ABOVE)
868 cn->cn_cred = cnp->cn_cred;
869 else
870 cn->cn_cred = um->um_cred;
871 cn->cn_nameptr = cn->cn_pnbuf;
872 cn->cn_consume = cnp->cn_consume;
873
874 VREF(dvp);
875 VOP_UNLOCK(dvp, 0, cnp->cn_thread);
876
877 /*
878 * Pass dvp unlocked and referenced on call to relookup().
879 *
880 * If an error occurs, dvp will be returned unlocked and dereferenced.
881 */
882
883 if ((error = relookup(dvp, vpp, cn)) != 0) {
884 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, cnp->cn_thread);
885 return(error);
886 }
887
888 /*
889 * If no error occurs, dvp will be returned locked with the reference
890 * left as before, and vpp will be returned referenced and locked.
891 *
892 * We want to return with dvp as it was passed to us, so we get
893 * rid of our reference.
894 */
895 vrele(dvp);
896 return (0);
897}
898
899/*
900 * Create a shadow directory in the upper layer.
901 * The new vnode is returned locked.
902 *
903 * (um) points to the union mount structure for access to the
904 * the mounting process's credentials.
905 * (dvp) is the directory in which to create the shadow directory,
906 * it is locked (but not ref'd) on entry and return.
907 * (cnp) is the componentname to be created.
908 * (vpp) is the returned newly created shadow directory, which
909 * is returned locked and ref'd
910 */
911int
912union_mkshadow(um, dvp, cnp, vpp)
913 struct union_mount *um;
914 struct vnode *dvp;
915 struct componentname *cnp;
916 struct vnode **vpp;
917{
918 int error;
919 struct vattr va;
920 struct thread *td = cnp->cn_thread;
921 struct componentname cn;
922 struct mount *mp;
923
924 if ((error = vn_start_write(dvp, &mp, V_WAIT | PCATCH)) != 0)
925 return (error);
926 if ((error = union_relookup(um, dvp, vpp, cnp, &cn,
927 cnp->cn_nameptr, cnp->cn_namelen)) != 0) {
928 vn_finished_write(mp);
929 return (error);
930 }
931
932 if (*vpp) {
933 if (cn.cn_flags & HASBUF) {
934 uma_zfree(namei_zone, cn.cn_pnbuf);
935 cn.cn_flags &= ~HASBUF;
936 }
937 if (dvp == *vpp)
938 vrele(*vpp);
939 else
940 vput(*vpp);
941 vn_finished_write(mp);
942 *vpp = NULLVP;
943 return (EEXIST);
944 }
945
946 /*
947 * policy: when creating the shadow directory in the
948 * upper layer, create it owned by the user who did
949 * the mount, group from parent directory, and mode
950 * 777 modified by umask (ie mostly identical to the
951 * mkdir syscall). (jsp, kb)
952 */
953
954 VATTR_NULL(&va);
955 va.va_type = VDIR;
956 va.va_mode = um->um_cmode;
957
958 /* VOP_LEASE: dvp is locked */
959 VOP_LEASE(dvp, td, cn.cn_cred, LEASE_WRITE);
960
961 error = VOP_MKDIR(dvp, vpp, &cn, &va);
962 if (cn.cn_flags & HASBUF) {
963 uma_zfree(namei_zone, cn.cn_pnbuf);
964 cn.cn_flags &= ~HASBUF;
965 }
966 /*vput(dvp);*/
967 vn_finished_write(mp);
968 return (error);
969}
970
971/*
972 * Create a whiteout entry in the upper layer.
973 *
974 * (um) points to the union mount structure for access to the
975 * the mounting process's credentials.
976 * (dvp) is the directory in which to create the whiteout.
977 * it is locked on entry and return.
978 * (cnp) is the componentname to be created.
979 */
980int
981union_mkwhiteout(um, dvp, cnp, path)
982 struct union_mount *um;
983 struct vnode *dvp;
984 struct componentname *cnp;
985 char *path;
986{
987 int error;
988 struct thread *td = cnp->cn_thread;
989 struct vnode *wvp;
990 struct componentname cn;
991 struct mount *mp;
992
993 if ((error = vn_start_write(dvp, &mp, V_WAIT | PCATCH)) != 0)
994 return (error);
995 error = union_relookup(um, dvp, &wvp, cnp, &cn, path, strlen(path));
996 if (error) {
997 vn_finished_write(mp);
998 return (error);
999 }
1000
1001 if (wvp) {
1002 if (cn.cn_flags & HASBUF) {
1003 uma_zfree(namei_zone, cn.cn_pnbuf);
1004 cn.cn_flags &= ~HASBUF;
1005 }
1006 if (wvp == dvp)
1007 vrele(wvp);
1008 else
1009 vput(wvp);
1010 vn_finished_write(mp);
1011 return (EEXIST);
1012 }
1013
1014 /* VOP_LEASE: dvp is locked */
1015 VOP_LEASE(dvp, td, td->td_ucred, LEASE_WRITE);
1016
1017 error = VOP_WHITEOUT(dvp, &cn, CREATE);
1018 if (cn.cn_flags & HASBUF) {
1019 uma_zfree(namei_zone, cn.cn_pnbuf);
1020 cn.cn_flags &= ~HASBUF;
1021 }
1022 vn_finished_write(mp);
1023 return (error);
1024}
1025
1026/*
1027 * union_vn_create: creates and opens a new shadow file
1028 * on the upper union layer. this function is similar
1029 * in spirit to calling vn_open but it avoids calling namei().
1030 * the problem with calling namei is that a) it locks too many
1031 * things, and b) it doesn't start at the "right" directory,
1032 * whereas relookup is told where to start.
1033 *
1034 * On entry, the vnode associated with un is locked. It remains locked
1035 * on return.
1036 *
1037 * If no error occurs, *vpp contains a locked referenced vnode for your
1038 * use. If an error occurs *vpp iis undefined.
1039 */
1040static int
1041union_vn_create(vpp, un, td)
1042 struct vnode **vpp;
1043 struct union_node *un;
1044 struct thread *td;
1045{
1046 struct vnode *vp;
1047 struct ucred *cred = td->td_ucred;
1048 struct vattr vat;
1049 struct vattr *vap = &vat;
1050 int fmode = FFLAGS(O_WRONLY|O_CREAT|O_TRUNC|O_EXCL);
1051 int error;
1052 int cmode;
1053 struct componentname cn;
1054
1055 *vpp = NULLVP;
1056 FILEDESC_LOCK(td->td_proc->p_fd);
1057 cmode = UN_FILEMODE & ~td->td_proc->p_fd->fd_cmask;
1058 FILEDESC_UNLOCK(td->td_proc->p_fd);
1059
1060 /*
1061 * Build a new componentname structure (for the same
1062 * reasons outlines in union_mkshadow).
1063 * The difference here is that the file is owned by
1064 * the current user, rather than by the person who
1065 * did the mount, since the current user needs to be
1066 * able to write the file (that's why it is being
1067 * copied in the first place).
1068 */
1069 cn.cn_namelen = strlen(un->un_path);
1070 cn.cn_pnbuf = uma_zalloc(namei_zone, M_WAITOK);
1071 bcopy(un->un_path, cn.cn_pnbuf, cn.cn_namelen+1);
1072 cn.cn_nameiop = CREATE;
1073 cn.cn_flags = (LOCKPARENT|LOCKLEAF|HASBUF|SAVENAME|ISLASTCN);
1074 cn.cn_thread = td;
1075 cn.cn_cred = td->td_ucred;
1076 cn.cn_nameptr = cn.cn_pnbuf;
1077 cn.cn_consume = 0;
1078
1079 /*
1080 * Pass dvp unlocked and referenced on call to relookup().
1081 *
1082 * If an error occurs, dvp will be returned unlocked and dereferenced.
1083 */
1084 VREF(un->un_dirvp);
1085 error = relookup(un->un_dirvp, &vp, &cn);
1086 if (error)
1087 return (error);
1088
1089 /*
1090 * If no error occurs, dvp will be returned locked with the reference
1091 * left as before, and vpp will be returned referenced and locked.
1092 */
1093 if (vp) {
1094 vput(un->un_dirvp);
1095 if (cn.cn_flags & HASBUF) {
1096 uma_zfree(namei_zone, cn.cn_pnbuf);
1097 cn.cn_flags &= ~HASBUF;
1098 }
1099 if (vp == un->un_dirvp)
1100 vrele(vp);
1101 else
1102 vput(vp);
1103 return (EEXIST);
1104 }
1105
1106 /*
1107 * Good - there was no race to create the file
1108 * so go ahead and create it. The permissions
1109 * on the file will be 0666 modified by the
1110 * current user's umask. Access to the file, while
1111 * it is unioned, will require access to the top *and*
1112 * bottom files. Access when not unioned will simply
1113 * require access to the top-level file.
1114 * TODO: confirm choice of access permissions.
1115 */
1116 VATTR_NULL(vap);
1117 vap->va_type = VREG;
1118 vap->va_mode = cmode;
1119 VOP_LEASE(un->un_dirvp, td, cred, LEASE_WRITE);
1120 error = VOP_CREATE(un->un_dirvp, &vp, &cn, vap);
1121 if (cn.cn_flags & HASBUF) {
1122 uma_zfree(namei_zone, cn.cn_pnbuf);
1123 cn.cn_flags &= ~HASBUF;
1124 }
1125 vput(un->un_dirvp);
1126 if (error)
1127 return (error);
1128
1129 error = VOP_OPEN(vp, fmode, cred, td);
1130 if (error == 0 && vn_canvmio(vp) == TRUE)
1131 error = vfs_object_create(vp, td, cred);
1132 if (error) {
1133 vput(vp);
1134 return (error);
1135 }
1136 vp->v_writecount++;
1137 *vpp = vp;
1138 return (0);
1139}
1140
1141static int
1142union_vn_close(vp, fmode, cred, td)
1143 struct vnode *vp;
1144 int fmode;
1145 struct ucred *cred;
1146 struct thread *td;
1147{
1148
1149 if (fmode & FWRITE)
1150 --vp->v_writecount;
1151 return (VOP_CLOSE(vp, fmode, cred, td));
1152}
1153
1154#if 0
1155
1156/*
1157 * union_removed_upper:
1158 *
1159 * called with union_node unlocked. XXX
1160 */
1161
1162void
1163union_removed_upper(un)
1164 struct union_node *un;
1165{
1166 struct thread *td = curthread; /* XXX */
1167 struct vnode **vpp;
1168
1169 /*
1170 * Do not set the uppervp to NULLVP. If lowervp is NULLVP,
1171 * union node will have neither uppervp nor lowervp. We remove
1172 * the union node from cache, so that it will not be referrenced.
1173 */
1174 union_newupper(un, NULLVP);
1175 if (un->un_dircache != 0) {
1176 for (vpp = un->un_dircache; *vpp != NULLVP; vpp++)
1177 vrele(*vpp);
1178 free(un->un_dircache, M_TEMP);
1179 un->un_dircache = 0;
1180 }
1181
1182 if (un->un_flags & UN_CACHED) {
1183 un->un_flags &= ~UN_CACHED;
1184 LIST_REMOVE(un, un_cache);
1185 }
1186}
1187
1188#endif
1189
1190/*
1191 * determine whether a whiteout is needed
1192 * during a remove/rmdir operation.
1193 */
1194int
1195union_dowhiteout(un, cred, td)
1196 struct union_node *un;
1197 struct ucred *cred;
1198 struct thread *td;
1199{
1200 struct vattr va;
1201
1202 if (un->un_lowervp != NULLVP)
1203 return (1);
1204
1205 if (VOP_GETATTR(un->un_uppervp, &va, cred, td) == 0 &&
1206 (va.va_flags & OPAQUE))
1207 return (1);
1208
1209 return (0);
1210}
1211
1212static void
1213union_dircache_r(vp, vppp, cntp)
1214 struct vnode *vp;
1215 struct vnode ***vppp;
1216 int *cntp;
1217{
1218 struct union_node *un;
1219
1220 if (vp->v_op != union_vnodeop_p) {
1221 if (vppp) {
1222 VREF(vp);
1223 *(*vppp)++ = vp;
1224 if (--(*cntp) == 0)
1225 panic("union: dircache table too small");
1226 } else {
1227 (*cntp)++;
1228 }
1229
1230 return;
1231 }
1232
1233 un = VTOUNION(vp);
1234 if (un->un_uppervp != NULLVP)
1235 union_dircache_r(un->un_uppervp, vppp, cntp);
1236 if (un->un_lowervp != NULLVP)
1237 union_dircache_r(un->un_lowervp, vppp, cntp);
1238}
1239
1240struct vnode *
1241union_dircache(vp, td)
1242 struct vnode *vp;
1243 struct thread *td;
1244{
1245 int cnt;
1246 struct vnode *nvp;
1247 struct vnode **vpp;
1248 struct vnode **dircache;
1249 struct union_node *un;
1250 int error;
1251
1252 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1253 dircache = VTOUNION(vp)->un_dircache;
1254
1255 nvp = NULLVP;
1256
1257 if (dircache == NULL) {
1258 cnt = 0;
1259 union_dircache_r(vp, 0, &cnt);
1260 cnt++;
1261 dircache = malloc(cnt * sizeof(struct vnode *),
1262 M_TEMP, M_WAITOK);
1263 vpp = dircache;
1264 union_dircache_r(vp, &vpp, &cnt);
1265 *vpp = NULLVP;
1266 vpp = dircache + 1;
1267 } else {
1268 vpp = dircache;
1269 do {
1270 if (*vpp++ == VTOUNION(vp)->un_uppervp)
1271 break;
1272 } while (*vpp != NULLVP);
1273 }
1274
1275 if (*vpp == NULLVP)
1276 goto out;
1277
1278 /*vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, td);*/
1279 UDEBUG(("ALLOCVP-3 %p ref %d\n", *vpp, (*vpp ? (*vpp)->v_usecount : -99)));
1280 VREF(*vpp);
1281 error = union_allocvp(&nvp, vp->v_mount, NULLVP, NULLVP, NULL, *vpp, NULLVP, 0);
1282 UDEBUG(("ALLOCVP-3B %p ref %d\n", nvp, (*vpp ? (*vpp)->v_usecount : -99)));
1283 if (error)
1284 goto out;
1285
1286 VTOUNION(vp)->un_dircache = 0;
1287 un = VTOUNION(nvp);
1288 un->un_dircache = dircache;
1289
1290out:
1291 VOP_UNLOCK(vp, 0, td);
1292 return (nvp);
1293}
1294
1295/*
1296 * Module glue to remove #ifdef UNION from vfs_syscalls.c
1297 */
1298static int
1299union_dircheck(struct thread *td, struct vnode **vp, struct file *fp)
1300{
1301 int error = 0;
1302
1303 if ((*vp)->v_op == union_vnodeop_p) {
1304 struct vnode *lvp;
1305
1306 lvp = union_dircache(*vp, td);
1307 if (lvp != NULLVP) {
1308 struct vattr va;
1309
1310 /*
1311 * If the directory is opaque,
1312 * then don't show lower entries
1313 */
1314 error = VOP_GETATTR(*vp, &va, fp->f_cred, td);
1315 if (va.va_flags & OPAQUE) {
1316 vput(lvp);
1317 lvp = NULL;
1318 }
1319 }
1320
1321 if (lvp != NULLVP) {
1322 error = VOP_OPEN(lvp, FREAD, fp->f_cred, td);
1323 if (error == 0 && vn_canvmio(lvp) == TRUE)
1324 error = vfs_object_create(lvp, td, fp->f_cred);
1325 if (error) {
1326 vput(lvp);
1327 return (error);
1328 }
1329 VOP_UNLOCK(lvp, 0, td);
1330 FILE_LOCK(fp);
1331 fp->f_data = (caddr_t) lvp;
1332 fp->f_offset = 0;
1333 FILE_UNLOCK(fp);
1334 error = vn_close(*vp, FREAD, fp->f_cred, td);
1335 if (error)
1336 return (error);
1337 *vp = lvp;
1338 return -1; /* goto unionread */
1339 }
1340 }
1341 return error;
1342}
1343
1344static int
1345union_modevent(module_t mod, int type, void *data)
1346{
1347 switch (type) {
1348 case MOD_LOAD:
1349 union_dircheckp = union_dircheck;
1350 break;
1351 case MOD_UNLOAD:
1352 union_dircheckp = NULL;
1353 break;
1354 default:
1355 break;
1356 }
1357 return 0;
1358}
1359
1360static moduledata_t union_mod = {
1361 "union_dircheck",
1362 union_modevent,
1363 NULL
1364};
1365
1366DECLARE_MODULE(union_dircheck, union_mod, SI_SUB_VFS, SI_ORDER_ANY);
550 if (error) {
551 /*
552 * If an error occurs clear out vnodes.
553 */
554 if (lowervp)
555 vrele(lowervp);
556 if (uppervp)
557 vrele(uppervp);
558 if (upperdvp)
559 vrele(upperdvp);
560 *vpp = NULL;
561 goto out;
562 }
563
564 MALLOC((*vpp)->v_data, void *, sizeof(struct union_node),
565 M_TEMP, M_WAITOK);
566
567 ASSERT_VOP_LOCKED(*vpp, "union_allocvp");
568 (*vpp)->v_vflag |= vflag;
569 if (uppervp)
570 (*vpp)->v_type = uppervp->v_type;
571 else
572 (*vpp)->v_type = lowervp->v_type;
573
574 un = VTOUNION(*vpp);
575 bzero(un, sizeof(*un));
576
577 lockinit(&un->un_lock, PVFS, "unlock", VLKTIMEOUT, 0);
578 vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, td);
579
580 un->un_vnode = *vpp;
581 un->un_uppervp = uppervp;
582 un->un_uppersz = VNOVAL;
583 un->un_lowervp = lowervp;
584 un->un_lowersz = VNOVAL;
585 un->un_dirvp = upperdvp;
586 un->un_pvp = dvp; /* only parent dir in new allocation */
587 if (dvp != NULLVP)
588 VREF(dvp);
589 un->un_dircache = 0;
590 un->un_openl = 0;
591
592 if (cnp && (lowervp != NULLVP)) {
593 un->un_path = malloc(cnp->cn_namelen+1, M_TEMP, M_WAITOK);
594 bcopy(cnp->cn_nameptr, un->un_path, cnp->cn_namelen);
595 un->un_path[cnp->cn_namelen] = '\0';
596 } else {
597 un->un_path = 0;
598 un->un_dirvp = NULL;
599 }
600
601 if (docache) {
602 LIST_INSERT_HEAD(&unhead[hash], un, un_cache);
603 un->un_flags |= UN_CACHED;
604 }
605
606out:
607 if (docache)
608 union_list_unlock(hash);
609
610 return (error);
611}
612
613int
614union_freevp(vp)
615 struct vnode *vp;
616{
617 struct union_node *un = VTOUNION(vp);
618
619 if (un->un_flags & UN_CACHED) {
620 un->un_flags &= ~UN_CACHED;
621 LIST_REMOVE(un, un_cache);
622 }
623
624 if (un->un_pvp != NULLVP) {
625 vrele(un->un_pvp);
626 un->un_pvp = NULL;
627 }
628 if (un->un_uppervp != NULLVP) {
629 vrele(un->un_uppervp);
630 un->un_uppervp = NULL;
631 }
632 if (un->un_lowervp != NULLVP) {
633 vrele(un->un_lowervp);
634 un->un_lowervp = NULL;
635 }
636 if (un->un_dirvp != NULLVP) {
637 vrele(un->un_dirvp);
638 un->un_dirvp = NULL;
639 }
640 if (un->un_path) {
641 free(un->un_path, M_TEMP);
642 un->un_path = NULL;
643 }
644 lockdestroy(&un->un_lock);
645
646 FREE(vp->v_data, M_TEMP);
647 vp->v_data = 0;
648
649 return (0);
650}
651
652/*
653 * copyfile. copy the vnode (fvp) to the vnode (tvp)
654 * using a sequence of reads and writes. both (fvp)
655 * and (tvp) are locked on entry and exit.
656 *
657 * fvp and tvp are both exclusive locked on call, but their refcount's
658 * haven't been bumped at all.
659 */
660static int
661union_copyfile(fvp, tvp, cred, td)
662 struct vnode *fvp;
663 struct vnode *tvp;
664 struct ucred *cred;
665 struct thread *td;
666{
667 char *buf;
668 struct uio uio;
669 struct iovec iov;
670 int error = 0;
671
672 /*
673 * strategy:
674 * allocate a buffer of size MAXBSIZE.
675 * loop doing reads and writes, keeping track
676 * of the current uio offset.
677 * give up at the first sign of trouble.
678 */
679
680 bzero(&uio, sizeof(uio));
681
682 uio.uio_td = td;
683 uio.uio_segflg = UIO_SYSSPACE;
684 uio.uio_offset = 0;
685
686 VOP_LEASE(fvp, td, cred, LEASE_READ);
687 VOP_LEASE(tvp, td, cred, LEASE_WRITE);
688
689 buf = malloc(MAXBSIZE, M_TEMP, M_WAITOK);
690
691 /* ugly loop follows... */
692 do {
693 off_t offset = uio.uio_offset;
694 int count;
695 int bufoffset;
696
697 /*
698 * Setup for big read
699 */
700 uio.uio_iov = &iov;
701 uio.uio_iovcnt = 1;
702 iov.iov_base = buf;
703 iov.iov_len = MAXBSIZE;
704 uio.uio_resid = iov.iov_len;
705 uio.uio_rw = UIO_READ;
706
707 if ((error = VOP_READ(fvp, &uio, 0, cred)) != 0)
708 break;
709
710 /*
711 * Get bytes read, handle read eof case and setup for
712 * write loop
713 */
714 if ((count = MAXBSIZE - uio.uio_resid) == 0)
715 break;
716 bufoffset = 0;
717
718 /*
719 * Write until an error occurs or our buffer has been
720 * exhausted, then update the offset for the next read.
721 */
722 while (bufoffset < count) {
723 uio.uio_iov = &iov;
724 uio.uio_iovcnt = 1;
725 iov.iov_base = buf + bufoffset;
726 iov.iov_len = count - bufoffset;
727 uio.uio_offset = offset + bufoffset;
728 uio.uio_rw = UIO_WRITE;
729 uio.uio_resid = iov.iov_len;
730
731 if ((error = VOP_WRITE(tvp, &uio, 0, cred)) != 0)
732 break;
733 bufoffset += (count - bufoffset) - uio.uio_resid;
734 }
735 uio.uio_offset = offset + bufoffset;
736 } while (error == 0);
737
738 free(buf, M_TEMP);
739 return (error);
740}
741
742/*
743 *
744 * un's vnode is assumed to be locked on entry and remains locked on exit.
745 */
746
747int
748union_copyup(un, docopy, cred, td)
749 struct union_node *un;
750 int docopy;
751 struct ucred *cred;
752 struct thread *td;
753{
754 int error;
755 struct mount *mp;
756 struct vnode *lvp, *uvp;
757
758 /*
759 * If the user does not have read permission, the vnode should not
760 * be copied to upper layer.
761 */
762 vn_lock(un->un_lowervp, LK_EXCLUSIVE | LK_RETRY, td);
763 error = VOP_ACCESS(un->un_lowervp, VREAD, cred, td);
764 VOP_UNLOCK(un->un_lowervp, 0, td);
765 if (error)
766 return (error);
767
768 if ((error = vn_start_write(un->un_dirvp, &mp, V_WAIT | PCATCH)) != 0)
769 return (error);
770 if ((error = union_vn_create(&uvp, un, td)) != 0) {
771 vn_finished_write(mp);
772 return (error);
773 }
774
775 lvp = un->un_lowervp;
776
777 KASSERT(uvp->v_usecount > 0, ("copy: uvp refcount 0: %d", uvp->v_usecount));
778 if (docopy) {
779 /*
780 * XX - should not ignore errors
781 * from VOP_CLOSE
782 */
783 vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY, td);
784 error = VOP_OPEN(lvp, FREAD, cred, td);
785 if (error == 0 && vn_canvmio(lvp) == TRUE)
786 error = vfs_object_create(lvp, td, cred);
787 if (error == 0) {
788 error = union_copyfile(lvp, uvp, cred, td);
789 VOP_UNLOCK(lvp, 0, td);
790 (void) VOP_CLOSE(lvp, FREAD, cred, td);
791 }
792 if (error == 0)
793 UDEBUG(("union: copied up %s\n", un->un_path));
794
795 }
796 VOP_UNLOCK(uvp, 0, td);
797 vn_finished_write(mp);
798 union_newupper(un, uvp);
799 KASSERT(uvp->v_usecount > 0, ("copy: uvp refcount 0: %d", uvp->v_usecount));
800 union_vn_close(uvp, FWRITE, cred, td);
801 KASSERT(uvp->v_usecount > 0, ("copy: uvp refcount 0: %d", uvp->v_usecount));
802 /*
803 * Subsequent IOs will go to the top layer, so
804 * call close on the lower vnode and open on the
805 * upper vnode to ensure that the filesystem keeps
806 * its references counts right. This doesn't do
807 * the right thing with (cred) and (FREAD) though.
808 * Ignoring error returns is not right, either.
809 */
810 if (error == 0) {
811 int i;
812
813 for (i = 0; i < un->un_openl; i++) {
814 (void) VOP_CLOSE(lvp, FREAD, cred, td);
815 (void) VOP_OPEN(uvp, FREAD, cred, td);
816 }
817 if (un->un_openl) {
818 if (vn_canvmio(uvp) == TRUE)
819 error = vfs_object_create(uvp, td, cred);
820 }
821 un->un_openl = 0;
822 }
823
824 return (error);
825
826}
827
828/*
829 * union_relookup:
830 *
831 * dvp should be locked on entry and will be locked on return. No
832 * net change in the ref count will occur.
833 *
834 * If an error is returned, *vpp will be invalid, otherwise it
835 * will hold a locked, referenced vnode. If *vpp == dvp then
836 * remember that only one exclusive lock is held.
837 */
838
839static int
840union_relookup(um, dvp, vpp, cnp, cn, path, pathlen)
841 struct union_mount *um;
842 struct vnode *dvp;
843 struct vnode **vpp;
844 struct componentname *cnp;
845 struct componentname *cn;
846 char *path;
847 int pathlen;
848{
849 int error;
850
851 /*
852 * A new componentname structure must be faked up because
853 * there is no way to know where the upper level cnp came
854 * from or what it is being used for. This must duplicate
855 * some of the work done by NDINIT, some of the work done
856 * by namei, some of the work done by lookup and some of
857 * the work done by VOP_LOOKUP when given a CREATE flag.
858 * Conclusion: Horrible.
859 */
860 cn->cn_namelen = pathlen;
861 cn->cn_pnbuf = uma_zalloc(namei_zone, M_WAITOK);
862 bcopy(path, cn->cn_pnbuf, cn->cn_namelen);
863 cn->cn_pnbuf[cn->cn_namelen] = '\0';
864
865 cn->cn_nameiop = CREATE;
866 cn->cn_flags = (LOCKPARENT|LOCKLEAF|HASBUF|SAVENAME|ISLASTCN);
867 cn->cn_thread = cnp->cn_thread;
868 if (um->um_op == UNMNT_ABOVE)
869 cn->cn_cred = cnp->cn_cred;
870 else
871 cn->cn_cred = um->um_cred;
872 cn->cn_nameptr = cn->cn_pnbuf;
873 cn->cn_consume = cnp->cn_consume;
874
875 VREF(dvp);
876 VOP_UNLOCK(dvp, 0, cnp->cn_thread);
877
878 /*
879 * Pass dvp unlocked and referenced on call to relookup().
880 *
881 * If an error occurs, dvp will be returned unlocked and dereferenced.
882 */
883
884 if ((error = relookup(dvp, vpp, cn)) != 0) {
885 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, cnp->cn_thread);
886 return(error);
887 }
888
889 /*
890 * If no error occurs, dvp will be returned locked with the reference
891 * left as before, and vpp will be returned referenced and locked.
892 *
893 * We want to return with dvp as it was passed to us, so we get
894 * rid of our reference.
895 */
896 vrele(dvp);
897 return (0);
898}
899
900/*
901 * Create a shadow directory in the upper layer.
902 * The new vnode is returned locked.
903 *
904 * (um) points to the union mount structure for access to the
905 * the mounting process's credentials.
906 * (dvp) is the directory in which to create the shadow directory,
907 * it is locked (but not ref'd) on entry and return.
908 * (cnp) is the componentname to be created.
909 * (vpp) is the returned newly created shadow directory, which
910 * is returned locked and ref'd
911 */
912int
913union_mkshadow(um, dvp, cnp, vpp)
914 struct union_mount *um;
915 struct vnode *dvp;
916 struct componentname *cnp;
917 struct vnode **vpp;
918{
919 int error;
920 struct vattr va;
921 struct thread *td = cnp->cn_thread;
922 struct componentname cn;
923 struct mount *mp;
924
925 if ((error = vn_start_write(dvp, &mp, V_WAIT | PCATCH)) != 0)
926 return (error);
927 if ((error = union_relookup(um, dvp, vpp, cnp, &cn,
928 cnp->cn_nameptr, cnp->cn_namelen)) != 0) {
929 vn_finished_write(mp);
930 return (error);
931 }
932
933 if (*vpp) {
934 if (cn.cn_flags & HASBUF) {
935 uma_zfree(namei_zone, cn.cn_pnbuf);
936 cn.cn_flags &= ~HASBUF;
937 }
938 if (dvp == *vpp)
939 vrele(*vpp);
940 else
941 vput(*vpp);
942 vn_finished_write(mp);
943 *vpp = NULLVP;
944 return (EEXIST);
945 }
946
947 /*
948 * policy: when creating the shadow directory in the
949 * upper layer, create it owned by the user who did
950 * the mount, group from parent directory, and mode
951 * 777 modified by umask (ie mostly identical to the
952 * mkdir syscall). (jsp, kb)
953 */
954
955 VATTR_NULL(&va);
956 va.va_type = VDIR;
957 va.va_mode = um->um_cmode;
958
959 /* VOP_LEASE: dvp is locked */
960 VOP_LEASE(dvp, td, cn.cn_cred, LEASE_WRITE);
961
962 error = VOP_MKDIR(dvp, vpp, &cn, &va);
963 if (cn.cn_flags & HASBUF) {
964 uma_zfree(namei_zone, cn.cn_pnbuf);
965 cn.cn_flags &= ~HASBUF;
966 }
967 /*vput(dvp);*/
968 vn_finished_write(mp);
969 return (error);
970}
971
972/*
973 * Create a whiteout entry in the upper layer.
974 *
975 * (um) points to the union mount structure for access to the
976 * the mounting process's credentials.
977 * (dvp) is the directory in which to create the whiteout.
978 * it is locked on entry and return.
979 * (cnp) is the componentname to be created.
980 */
981int
982union_mkwhiteout(um, dvp, cnp, path)
983 struct union_mount *um;
984 struct vnode *dvp;
985 struct componentname *cnp;
986 char *path;
987{
988 int error;
989 struct thread *td = cnp->cn_thread;
990 struct vnode *wvp;
991 struct componentname cn;
992 struct mount *mp;
993
994 if ((error = vn_start_write(dvp, &mp, V_WAIT | PCATCH)) != 0)
995 return (error);
996 error = union_relookup(um, dvp, &wvp, cnp, &cn, path, strlen(path));
997 if (error) {
998 vn_finished_write(mp);
999 return (error);
1000 }
1001
1002 if (wvp) {
1003 if (cn.cn_flags & HASBUF) {
1004 uma_zfree(namei_zone, cn.cn_pnbuf);
1005 cn.cn_flags &= ~HASBUF;
1006 }
1007 if (wvp == dvp)
1008 vrele(wvp);
1009 else
1010 vput(wvp);
1011 vn_finished_write(mp);
1012 return (EEXIST);
1013 }
1014
1015 /* VOP_LEASE: dvp is locked */
1016 VOP_LEASE(dvp, td, td->td_ucred, LEASE_WRITE);
1017
1018 error = VOP_WHITEOUT(dvp, &cn, CREATE);
1019 if (cn.cn_flags & HASBUF) {
1020 uma_zfree(namei_zone, cn.cn_pnbuf);
1021 cn.cn_flags &= ~HASBUF;
1022 }
1023 vn_finished_write(mp);
1024 return (error);
1025}
1026
1027/*
1028 * union_vn_create: creates and opens a new shadow file
1029 * on the upper union layer. this function is similar
1030 * in spirit to calling vn_open but it avoids calling namei().
1031 * the problem with calling namei is that a) it locks too many
1032 * things, and b) it doesn't start at the "right" directory,
1033 * whereas relookup is told where to start.
1034 *
1035 * On entry, the vnode associated with un is locked. It remains locked
1036 * on return.
1037 *
1038 * If no error occurs, *vpp contains a locked referenced vnode for your
1039 * use. If an error occurs *vpp iis undefined.
1040 */
1041static int
1042union_vn_create(vpp, un, td)
1043 struct vnode **vpp;
1044 struct union_node *un;
1045 struct thread *td;
1046{
1047 struct vnode *vp;
1048 struct ucred *cred = td->td_ucred;
1049 struct vattr vat;
1050 struct vattr *vap = &vat;
1051 int fmode = FFLAGS(O_WRONLY|O_CREAT|O_TRUNC|O_EXCL);
1052 int error;
1053 int cmode;
1054 struct componentname cn;
1055
1056 *vpp = NULLVP;
1057 FILEDESC_LOCK(td->td_proc->p_fd);
1058 cmode = UN_FILEMODE & ~td->td_proc->p_fd->fd_cmask;
1059 FILEDESC_UNLOCK(td->td_proc->p_fd);
1060
1061 /*
1062 * Build a new componentname structure (for the same
1063 * reasons outlines in union_mkshadow).
1064 * The difference here is that the file is owned by
1065 * the current user, rather than by the person who
1066 * did the mount, since the current user needs to be
1067 * able to write the file (that's why it is being
1068 * copied in the first place).
1069 */
1070 cn.cn_namelen = strlen(un->un_path);
1071 cn.cn_pnbuf = uma_zalloc(namei_zone, M_WAITOK);
1072 bcopy(un->un_path, cn.cn_pnbuf, cn.cn_namelen+1);
1073 cn.cn_nameiop = CREATE;
1074 cn.cn_flags = (LOCKPARENT|LOCKLEAF|HASBUF|SAVENAME|ISLASTCN);
1075 cn.cn_thread = td;
1076 cn.cn_cred = td->td_ucred;
1077 cn.cn_nameptr = cn.cn_pnbuf;
1078 cn.cn_consume = 0;
1079
1080 /*
1081 * Pass dvp unlocked and referenced on call to relookup().
1082 *
1083 * If an error occurs, dvp will be returned unlocked and dereferenced.
1084 */
1085 VREF(un->un_dirvp);
1086 error = relookup(un->un_dirvp, &vp, &cn);
1087 if (error)
1088 return (error);
1089
1090 /*
1091 * If no error occurs, dvp will be returned locked with the reference
1092 * left as before, and vpp will be returned referenced and locked.
1093 */
1094 if (vp) {
1095 vput(un->un_dirvp);
1096 if (cn.cn_flags & HASBUF) {
1097 uma_zfree(namei_zone, cn.cn_pnbuf);
1098 cn.cn_flags &= ~HASBUF;
1099 }
1100 if (vp == un->un_dirvp)
1101 vrele(vp);
1102 else
1103 vput(vp);
1104 return (EEXIST);
1105 }
1106
1107 /*
1108 * Good - there was no race to create the file
1109 * so go ahead and create it. The permissions
1110 * on the file will be 0666 modified by the
1111 * current user's umask. Access to the file, while
1112 * it is unioned, will require access to the top *and*
1113 * bottom files. Access when not unioned will simply
1114 * require access to the top-level file.
1115 * TODO: confirm choice of access permissions.
1116 */
1117 VATTR_NULL(vap);
1118 vap->va_type = VREG;
1119 vap->va_mode = cmode;
1120 VOP_LEASE(un->un_dirvp, td, cred, LEASE_WRITE);
1121 error = VOP_CREATE(un->un_dirvp, &vp, &cn, vap);
1122 if (cn.cn_flags & HASBUF) {
1123 uma_zfree(namei_zone, cn.cn_pnbuf);
1124 cn.cn_flags &= ~HASBUF;
1125 }
1126 vput(un->un_dirvp);
1127 if (error)
1128 return (error);
1129
1130 error = VOP_OPEN(vp, fmode, cred, td);
1131 if (error == 0 && vn_canvmio(vp) == TRUE)
1132 error = vfs_object_create(vp, td, cred);
1133 if (error) {
1134 vput(vp);
1135 return (error);
1136 }
1137 vp->v_writecount++;
1138 *vpp = vp;
1139 return (0);
1140}
1141
1142static int
1143union_vn_close(vp, fmode, cred, td)
1144 struct vnode *vp;
1145 int fmode;
1146 struct ucred *cred;
1147 struct thread *td;
1148{
1149
1150 if (fmode & FWRITE)
1151 --vp->v_writecount;
1152 return (VOP_CLOSE(vp, fmode, cred, td));
1153}
1154
1155#if 0
1156
1157/*
1158 * union_removed_upper:
1159 *
1160 * called with union_node unlocked. XXX
1161 */
1162
1163void
1164union_removed_upper(un)
1165 struct union_node *un;
1166{
1167 struct thread *td = curthread; /* XXX */
1168 struct vnode **vpp;
1169
1170 /*
1171 * Do not set the uppervp to NULLVP. If lowervp is NULLVP,
1172 * union node will have neither uppervp nor lowervp. We remove
1173 * the union node from cache, so that it will not be referrenced.
1174 */
1175 union_newupper(un, NULLVP);
1176 if (un->un_dircache != 0) {
1177 for (vpp = un->un_dircache; *vpp != NULLVP; vpp++)
1178 vrele(*vpp);
1179 free(un->un_dircache, M_TEMP);
1180 un->un_dircache = 0;
1181 }
1182
1183 if (un->un_flags & UN_CACHED) {
1184 un->un_flags &= ~UN_CACHED;
1185 LIST_REMOVE(un, un_cache);
1186 }
1187}
1188
1189#endif
1190
1191/*
1192 * determine whether a whiteout is needed
1193 * during a remove/rmdir operation.
1194 */
1195int
1196union_dowhiteout(un, cred, td)
1197 struct union_node *un;
1198 struct ucred *cred;
1199 struct thread *td;
1200{
1201 struct vattr va;
1202
1203 if (un->un_lowervp != NULLVP)
1204 return (1);
1205
1206 if (VOP_GETATTR(un->un_uppervp, &va, cred, td) == 0 &&
1207 (va.va_flags & OPAQUE))
1208 return (1);
1209
1210 return (0);
1211}
1212
1213static void
1214union_dircache_r(vp, vppp, cntp)
1215 struct vnode *vp;
1216 struct vnode ***vppp;
1217 int *cntp;
1218{
1219 struct union_node *un;
1220
1221 if (vp->v_op != union_vnodeop_p) {
1222 if (vppp) {
1223 VREF(vp);
1224 *(*vppp)++ = vp;
1225 if (--(*cntp) == 0)
1226 panic("union: dircache table too small");
1227 } else {
1228 (*cntp)++;
1229 }
1230
1231 return;
1232 }
1233
1234 un = VTOUNION(vp);
1235 if (un->un_uppervp != NULLVP)
1236 union_dircache_r(un->un_uppervp, vppp, cntp);
1237 if (un->un_lowervp != NULLVP)
1238 union_dircache_r(un->un_lowervp, vppp, cntp);
1239}
1240
1241struct vnode *
1242union_dircache(vp, td)
1243 struct vnode *vp;
1244 struct thread *td;
1245{
1246 int cnt;
1247 struct vnode *nvp;
1248 struct vnode **vpp;
1249 struct vnode **dircache;
1250 struct union_node *un;
1251 int error;
1252
1253 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1254 dircache = VTOUNION(vp)->un_dircache;
1255
1256 nvp = NULLVP;
1257
1258 if (dircache == NULL) {
1259 cnt = 0;
1260 union_dircache_r(vp, 0, &cnt);
1261 cnt++;
1262 dircache = malloc(cnt * sizeof(struct vnode *),
1263 M_TEMP, M_WAITOK);
1264 vpp = dircache;
1265 union_dircache_r(vp, &vpp, &cnt);
1266 *vpp = NULLVP;
1267 vpp = dircache + 1;
1268 } else {
1269 vpp = dircache;
1270 do {
1271 if (*vpp++ == VTOUNION(vp)->un_uppervp)
1272 break;
1273 } while (*vpp != NULLVP);
1274 }
1275
1276 if (*vpp == NULLVP)
1277 goto out;
1278
1279 /*vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, td);*/
1280 UDEBUG(("ALLOCVP-3 %p ref %d\n", *vpp, (*vpp ? (*vpp)->v_usecount : -99)));
1281 VREF(*vpp);
1282 error = union_allocvp(&nvp, vp->v_mount, NULLVP, NULLVP, NULL, *vpp, NULLVP, 0);
1283 UDEBUG(("ALLOCVP-3B %p ref %d\n", nvp, (*vpp ? (*vpp)->v_usecount : -99)));
1284 if (error)
1285 goto out;
1286
1287 VTOUNION(vp)->un_dircache = 0;
1288 un = VTOUNION(nvp);
1289 un->un_dircache = dircache;
1290
1291out:
1292 VOP_UNLOCK(vp, 0, td);
1293 return (nvp);
1294}
1295
1296/*
1297 * Module glue to remove #ifdef UNION from vfs_syscalls.c
1298 */
1299static int
1300union_dircheck(struct thread *td, struct vnode **vp, struct file *fp)
1301{
1302 int error = 0;
1303
1304 if ((*vp)->v_op == union_vnodeop_p) {
1305 struct vnode *lvp;
1306
1307 lvp = union_dircache(*vp, td);
1308 if (lvp != NULLVP) {
1309 struct vattr va;
1310
1311 /*
1312 * If the directory is opaque,
1313 * then don't show lower entries
1314 */
1315 error = VOP_GETATTR(*vp, &va, fp->f_cred, td);
1316 if (va.va_flags & OPAQUE) {
1317 vput(lvp);
1318 lvp = NULL;
1319 }
1320 }
1321
1322 if (lvp != NULLVP) {
1323 error = VOP_OPEN(lvp, FREAD, fp->f_cred, td);
1324 if (error == 0 && vn_canvmio(lvp) == TRUE)
1325 error = vfs_object_create(lvp, td, fp->f_cred);
1326 if (error) {
1327 vput(lvp);
1328 return (error);
1329 }
1330 VOP_UNLOCK(lvp, 0, td);
1331 FILE_LOCK(fp);
1332 fp->f_data = (caddr_t) lvp;
1333 fp->f_offset = 0;
1334 FILE_UNLOCK(fp);
1335 error = vn_close(*vp, FREAD, fp->f_cred, td);
1336 if (error)
1337 return (error);
1338 *vp = lvp;
1339 return -1; /* goto unionread */
1340 }
1341 }
1342 return error;
1343}
1344
1345static int
1346union_modevent(module_t mod, int type, void *data)
1347{
1348 switch (type) {
1349 case MOD_LOAD:
1350 union_dircheckp = union_dircheck;
1351 break;
1352 case MOD_UNLOAD:
1353 union_dircheckp = NULL;
1354 break;
1355 default:
1356 break;
1357 }
1358 return 0;
1359}
1360
1361static moduledata_t union_mod = {
1362 "union_dircheck",
1363 union_modevent,
1364 NULL
1365};
1366
1367DECLARE_MODULE(union_dircheck, union_mod, SI_SUB_VFS, SI_ORDER_ANY);