coda_vnops.c revision 176307
1139745Simp/*-
238759Srvb *             Coda: an Experimental Distributed File System
338759Srvb *                              Release 3.1
4176139Srwatson *
538759Srvb *           Copyright (c) 1987-1998 Carnegie Mellon University
638759Srvb *                          All Rights Reserved
7176139Srwatson *
838759Srvb * Permission  to  use, copy, modify and distribute this software and its
938759Srvb * documentation is hereby granted,  provided  that  both  the  copyright
1038759Srvb * notice  and  this  permission  notice  appear  in  all  copies  of the
1138759Srvb * software, derivative works or  modified  versions,  and  any  portions
1238759Srvb * thereof, and that both notices appear in supporting documentation, and
1338759Srvb * that credit is given to Carnegie Mellon University  in  all  documents
1438759Srvb * and publicity pertaining to direct or indirect use of this code or its
1538759Srvb * derivatives.
16176139Srwatson *
1738759Srvb * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS  KNOWN  TO  HAVE  BUGS,
1838759Srvb * SOME  OF  WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON ALLOWS
1938759Srvb * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.   CARNEGIE  MELLON
2038759Srvb * DISCLAIMS  ANY  LIABILITY  OF  ANY  KIND  FOR  ANY  DAMAGES WHATSOEVER
2138759Srvb * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE  OR  OF
2238759Srvb * ANY DERIVATIVE WORK.
23176139Srwatson *
2438759Srvb * Carnegie  Mellon  encourages  users  of  this  software  to return any
2538759Srvb * improvements or extensions that  they  make,  and  to  grant  Carnegie
2638759Srvb * Mellon the rights to redistribute these changes without encumbrance.
27176139Srwatson *
2839126Srvb *  	@(#) src/sys/coda/coda_vnops.c,v 1.1.1.1 1998/08/29 21:14:52 rvb Exp $
2938759Srvb */
30176139Srwatson/*
3138625Srvb * Mach Operating System
3238625Srvb * Copyright (c) 1990 Carnegie-Mellon University
3338625Srvb * Copyright (c) 1989 Carnegie-Mellon University
3438625Srvb * All rights reserved.  The CMU software License Agreement specifies
3538625Srvb * the terms and conditions for use and redistribution.
3638625Srvb */
3738625Srvb
3838625Srvb/*
3996755Strhodes * This code was written for the Coda filesystem at Carnegie Mellon
4038625Srvb * University.  Contributers include David Steere, James Kistler, and
41176139Srwatson * M. Satyanarayanan.
4238625Srvb */
4338625Srvb
44116173Sobrien#include <sys/cdefs.h>
45116173Sobrien__FBSDID("$FreeBSD: head/sys/fs/coda/coda_vnops.c 176307 2008-02-15 11:58:11Z rwatson $");
46116173Sobrien
4738625Srvb#include <sys/param.h>
4838625Srvb#include <sys/systm.h>
4976166Smarkm#include <sys/acct.h>
5076166Smarkm#include <sys/errno.h>
5176166Smarkm#include <sys/fcntl.h>
5238759Srvb#include <sys/kernel.h>
5376166Smarkm#include <sys/lock.h>
5476166Smarkm#include <sys/malloc.h>
5576166Smarkm#include <sys/file.h>		/* Must come after sys/malloc.h */
5676166Smarkm#include <sys/mount.h>
5776166Smarkm#include <sys/mutex.h>
5876166Smarkm#include <sys/namei.h>
5938759Srvb#include <sys/proc.h>
6038625Srvb#include <sys/uio.h>
61111903Stjr#include <sys/unistd.h>
6238759Srvb
6338625Srvb#include <vm/vm.h>
6438625Srvb#include <vm/vm_object.h>
6538625Srvb#include <vm/vm_extern.h>
6638625Srvb
67171416Srwatson#include <fs/coda/coda.h>
68171416Srwatson#include <fs/coda/cnode.h>
69171416Srwatson#include <fs/coda/coda_vnops.h>
70171416Srwatson#include <fs/coda/coda_venus.h>
71171416Srwatson#include <fs/coda/coda_opstats.h>
72171416Srwatson#include <fs/coda/coda_subr.h>
73171416Srwatson#include <fs/coda/coda_pioctl.h>
7438625Srvb
75176139Srwatson/*
7638625Srvb * These flags select various performance enhancements.
7738625Srvb */
78176139Srwatsonstatic int coda_attr_cache = 1;		/* Set to cache attributes. */
79176139Srwatsonstatic int coda_symlink_cache = 1;	/* Set to cache symbolic links. */
80176139Srwatsonstatic int coda_access_cache = 1;	/* Set to cache some access checks. */
8138625Srvb
82176139Srwatson/*
83176139Srwatson * Structure to keep track of vfs calls.
84176139Srwatson */
85176139Srwatsonstatic struct coda_op_stats coda_vnodeopstats[CODA_VNODEOPS_SIZE];
8638625Srvb
87176139Srwatson#define	MARK_ENTRY(op)		(coda_vnodeopstats[op].entries++)
88176139Srwatson#define	MARK_INT_SAT(op)	(coda_vnodeopstats[op].sat_intrn++)
89176139Srwatson#define	MARK_INT_FAIL(op)	(coda_vnodeopstats[op].unsat_intrn++)
90176139Srwatson#define	MARK_INT_GEN(op)	(coda_vnodeopstats[op].gen_intrn++)
9138625Srvb
92176139Srwatson/*
93176139Srwatson * What we are delaying for in printf.
94176139Srwatson */
95176139Srwatsonint coda_printf_delay = 0;	/* In microseconds */
9639085Srvbint coda_vnop_print_entry = 0;
9739085Srvbstatic int coda_lockdebug = 0;
9838625Srvb
9938625Srvb/*
100176120Srwatson * Some FreeBSD details:
101176139Srwatson *
102176139Srwatson * codadev_modevent is called at boot time or module load time.
10338625Srvb */
104176139Srwatson#define	ENTRY do {							\
105176139Srwatson	if (coda_vnop_print_entry)					\
106176139Srwatson		myprintf(("Entered %s\n", __func__));			\
107176139Srwatson} while (0)
10838625Srvb
109176139Srwatson/*
110176139Srwatson * Definition of the vnode operation vector.
111176139Srwatson */
112138290Sphkstruct vop_vector coda_vnodeops = {
113176139Srwatson	.vop_default = &default_vnodeops,
114176233Srwatson	.vop_cachedlookup = coda_lookup,	/* uncached lookup */
115176233Srwatson	.vop_lookup = vfs_cache_lookup,		/* namecache lookup */
116176139Srwatson	.vop_create = coda_create,		/* create */
117176139Srwatson	.vop_open = coda_open,			/* open */
118176139Srwatson	.vop_close = coda_close,		/* close */
119176139Srwatson	.vop_access = coda_access,		/* access */
120176139Srwatson	.vop_getattr = coda_getattr,		/* getattr */
121176139Srwatson	.vop_setattr = coda_setattr,		/* setattr */
122176139Srwatson	.vop_read = coda_read,			/* read */
123176139Srwatson	.vop_write = coda_write,		/* write */
124176139Srwatson	.vop_ioctl = coda_ioctl,		/* ioctl */
125176139Srwatson	.vop_fsync = coda_fsync,		/* fsync */
126176139Srwatson	.vop_remove = coda_remove,		/* remove */
127176139Srwatson	.vop_link = coda_link,			/* link */
128176139Srwatson	.vop_rename = coda_rename,		/* rename */
129176139Srwatson	.vop_mkdir = coda_mkdir,		/* mkdir */
130176139Srwatson	.vop_rmdir = coda_rmdir,		/* rmdir */
131176139Srwatson	.vop_symlink = coda_symlink,		/* symlink */
132176139Srwatson	.vop_readdir = coda_readdir,		/* readdir */
133176139Srwatson	.vop_readlink = coda_readlink,		/* readlink */
134176139Srwatson	.vop_inactive = coda_inactive,		/* inactive */
135176139Srwatson	.vop_reclaim = coda_reclaim,		/* reclaim */
136176139Srwatson	.vop_lock1 = coda_lock,			/* lock */
137176139Srwatson	.vop_unlock = coda_unlock,		/* unlock */
138176139Srwatson	.vop_bmap = VOP_EOPNOTSUPP,		/* bmap */
139176139Srwatson	.vop_print = VOP_NULL,			/* print */
140176139Srwatson	.vop_islocked = coda_islocked,		/* islocked */
141176139Srwatson	.vop_pathconf = coda_pathconf,		/* pathconf */
142176139Srwatson	.vop_poll = vop_stdpoll,
143176139Srwatson	.vop_getpages = vop_stdgetpages,	/* pager intf.*/
144176139Srwatson	.vop_putpages = vop_stdputpages,	/* pager intf.*/
145176139Srwatson	.vop_getwritemount = vop_stdgetwritemount,
146138290Sphk#if 0
147176139Srwatson	/* missing */
148176139Srwatson	.vop_cachedlookup = ufs_lookup,
149176139Srwatson	.vop_whiteout =	ufs_whiteout,
15038625Srvb#endif
15177784Sshafeeq
15238625Srvb};
15338625Srvb
154176131Srwatsonstatic void	coda_print_vattr(struct vattr *attr);
15538625Srvb
15638625Srvbint
15739085Srvbcoda_vnodeopstats_init(void)
15838625Srvb{
159176139Srwatson	int i;
160176139Srwatson
161176139Srwatson	for(i=0; i<CODA_VNODEOPS_SIZE; i++) {
16239085Srvb		coda_vnodeopstats[i].opcode = i;
16339085Srvb		coda_vnodeopstats[i].entries = 0;
16439085Srvb		coda_vnodeopstats[i].sat_intrn = 0;
16539085Srvb		coda_vnodeopstats[i].unsat_intrn = 0;
16639085Srvb		coda_vnodeopstats[i].gen_intrn = 0;
16738625Srvb	}
168176139Srwatson	return (0);
16938625Srvb}
170176139Srwatson
171176139Srwatson/*
172176139Srwatson * coda_open calls Venus which returns an open file descriptor the cache file
173176139Srwatson * holding the data.  We get the vnode while we are still in the context of
174176139Srwatson * the venus process in coda_psdev.c.  This vnode is then passed back to the
175176139Srwatson * caller and opened.
17638625Srvb */
17738625Srvbint
178138290Sphkcoda_open(struct vop_open_args *ap)
17938625Srvb{
18038625Srvb
181176139Srwatson	/*
182176139Srwatson	 * FreeBSD can pass the O_EXCL flag in mode, even though the check
183176139Srwatson	 * has already happened.  Venus defensively assumes that if open is
184176139Srwatson	 * passed the EXCL, it must be a bug.  We strip the flag here.
185176139Srwatson	 */
186176139Srwatson	/* true args */
187176139Srwatson	struct vnode **vpp = &(ap->a_vp);
188176139Srwatson	struct cnode *cp = VTOC(*vpp);
189176139Srwatson	int flag = ap->a_mode & (~O_EXCL);
190176139Srwatson	struct ucred *cred = ap->a_cred;
191176139Srwatson	struct thread *td = ap->a_td;
192176139Srwatson	/* locals */
193176139Srwatson	int error;
194176139Srwatson	struct vnode *vp;
19538625Srvb
196176139Srwatson	MARK_ENTRY(CODA_OPEN_STATS);
197176139Srwatson
198176139Srwatson	/*
199176139Srwatson	 * Check for open of control file.
200176139Srwatson	 */
201176139Srwatson	if (IS_CTL_VP(*vpp)) {
202176139Srwatson		/* XXX */
203176139Srwatson		/* if (WRITEABLE(flag)) */
204176139Srwatson		if (flag & (FWRITE | O_TRUNC | O_CREAT | O_EXCL)) {
205176139Srwatson			MARK_INT_FAIL(CODA_OPEN_STATS);
206176139Srwatson			return (EACCES);
207176139Srwatson		}
208176139Srwatson		MARK_INT_SAT(CODA_OPEN_STATS);
209176139Srwatson		return (0);
21038625Srvb	}
211176139Srwatson	error = venus_open(vtomi((*vpp)), &cp->c_fid, flag, cred,
212176139Srwatson	    td->td_proc, &vp);
213176139Srwatson	if (error)
214176139Srwatson		return (error);
215176139Srwatson	CODADEBUG(CODA_OPEN, myprintf(("open: vp %p result %d\n", vp,
216176139Srwatson	    error)););
21738625Srvb
218176139Srwatson	/*
219176139Srwatson	 * Save the vnode pointer for the cache file.
220176139Srwatson	 */
221176139Srwatson	if (cp->c_ovp == NULL) {
222176139Srwatson		cp->c_ovp = vp;
223176139Srwatson	} else {
224176139Srwatson		if (cp->c_ovp != vp)
225176139Srwatson			panic("coda_open: cp->c_ovp != ITOV(ip)");
226176139Srwatson	}
227176139Srwatson	cp->c_ocount++;
22838625Srvb
229176139Srwatson	/*
230176139Srwatson	 * Flush the attribute cached if writing the file.
231176139Srwatson	 */
232176139Srwatson	if (flag & FWRITE) {
233176139Srwatson		cp->c_owrite++;
234176139Srwatson		cp->c_flags &= ~C_VATTR;
235176139Srwatson	}
23638625Srvb
237176139Srwatson	/*
238176139Srwatson	 * Open the cache file.
239176139Srwatson	 */
240176139Srwatson	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
241176139Srwatson	error = VOP_OPEN(vp, flag, cred, td, NULL);
242176139Srwatson	if (error) {
243176139Srwatson		VOP_UNLOCK(vp, 0);
244176139Srwatson    		printf("coda_open: VOP_OPEN on container failed %d\n", error);
245176139Srwatson		return (error);
246176139Srwatson	}
247176139Srwatson	(*vpp)->v_object = vp->v_object;
248176118Srwatson	VOP_UNLOCK(vp, 0);
249176139Srwatson	return (0);
25038625Srvb}
25138625Srvb
25238625Srvb/*
25338625Srvb * Close the cache file used for I/O and notify Venus.
25438625Srvb */
25538625Srvbint
256138290Sphkcoda_close(struct vop_close_args *ap)
25738625Srvb{
258176139Srwatson	/* true args */
259176139Srwatson	struct vnode *vp = ap->a_vp;
260176139Srwatson	struct cnode *cp = VTOC(vp);
261176139Srwatson	int flag = ap->a_fflag;
262176139Srwatson	struct ucred *cred = ap->a_cred;
263176139Srwatson	struct thread *td = ap->a_td;
264176139Srwatson	/* locals */
265176139Srwatson	int error;
26638625Srvb
267176139Srwatson	MARK_ENTRY(CODA_CLOSE_STATS);
26838625Srvb
269176139Srwatson	/*
270176139Srwatson	 * Check for close of control file.
271176139Srwatson	 */
272176139Srwatson	if (IS_CTL_VP(vp)) {
273176139Srwatson		MARK_INT_SAT(CODA_CLOSE_STATS);
274176139Srwatson		return (0);
275176139Srwatson	}
276176139Srwatson	if (cp->c_ovp) {
277176139Srwatson		vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
278176139Srwatson		/* Do errors matter here? */
279176139Srwatson		VOP_CLOSE(cp->c_ovp, flag, cred, td);
280176139Srwatson		vput(cp->c_ovp);
281176139Srwatson	}
282171377Srwatson#ifdef CODA_VERBOSE
283176139Srwatson	else
284176139Srwatson		printf("coda_close: NO container vp %p/cp %p\n", vp, cp);
285171377Srwatson#endif
286176139Srwatson	if (--cp->c_ocount == 0)
287176139Srwatson		cp->c_ovp = NULL;
28838625Srvb
289176139Srwatson	/*
290176139Srwatson	 * File was opened for write.
291176139Srwatson	 */
292176139Srwatson	if (flag & FWRITE)
293176139Srwatson		--cp->c_owrite;
294176139Srwatson	if (!IS_UNMOUNTING(cp))
295176139Srwatson		error = venus_close(vtomi(vp), &cp->c_fid, flag, cred,
296176139Srwatson		    td->td_proc);
297176139Srwatson	else
298176139Srwatson		error = ENODEV;
299176139Srwatson	CODADEBUG(CODA_CLOSE, myprintf(("close: result %d\n",error)););
300176139Srwatson	return (error);
30138625Srvb}
30238625Srvb
30338625Srvbint
304138290Sphkcoda_read(struct vop_read_args *ap)
30538625Srvb{
30638625Srvb
307176139Srwatson	ENTRY;
308176139Srwatson	return (coda_rdwr(ap->a_vp, ap->a_uio, UIO_READ, ap->a_ioflag,
309176139Srwatson	    ap->a_cred, ap->a_uio->uio_td));
31038625Srvb}
31138625Srvb
31238625Srvbint
313138290Sphkcoda_write(struct vop_write_args *ap)
31438625Srvb{
31538625Srvb
316176139Srwatson	ENTRY;
317176139Srwatson	return (coda_rdwr(ap->a_vp, ap->a_uio, UIO_WRITE, ap->a_ioflag,
318176139Srwatson	    ap->a_cred, ap->a_uio->uio_td));
31938625Srvb}
32038625Srvb
32138625Srvbint
322154647Srwatsoncoda_rdwr(struct vnode *vp, struct uio *uiop, enum uio_rw rw, int ioflag,
323154647Srwatson    struct ucred *cred, struct thread *td)
324176139Srwatson{
325176139Srwatson	/* upcall decl */
326176139Srwatson	/* NOTE: container file operation!!! */
327176139Srwatson	/* locals */
328176139Srwatson	struct cnode *cp = VTOC(vp);
329176139Srwatson	struct vnode *cfvp = cp->c_ovp;
330176139Srwatson	int opened_internally = 0;
331176139Srwatson	int error = 0;
33238625Srvb
333176139Srwatson	MARK_ENTRY(CODA_RDWR_STATS);
334176139Srwatson	CODADEBUG(CODA_RDWR, myprintf(("coda_rdwr(%d, %p, %d, %lld, %d)\n",
335176139Srwatson	    rw, (void *)uiop->uio_iov->iov_base, uiop->uio_resid,
336176139Srwatson	    (long long)uiop->uio_offset, uiop->uio_segflg)););
33738625Srvb
338176139Srwatson	/*
339176139Srwatson	 * Check for rdwr of control object.
340176139Srwatson	 */
341176139Srwatson	if (IS_CTL_VP(vp)) {
342176139Srwatson		MARK_INT_FAIL(CODA_RDWR_STATS);
343176139Srwatson		return (EINVAL);
344176139Srwatson	}
34538625Srvb
346176139Srwatson	/*
347176139Srwatson	 * If file is not already open this must be a page {read,write}
348176139Srwatson	 * request and we should open it internally.
349176139Srwatson	 */
350176139Srwatson	if (cfvp == NULL) {
351176139Srwatson		opened_internally = 1;
352176139Srwatson		MARK_INT_GEN(CODA_OPEN_STATS);
353176139Srwatson		error = VOP_OPEN(vp, (rw == UIO_READ ? FREAD : FWRITE), cred,
354176139Srwatson		    td, NULL);
355175545Srwatson#ifdef CODA_VERBOSE
356176139Srwatson		printf("coda_rdwr: Internally Opening %p\n", vp);
357175545Srwatson#endif
358176139Srwatson		if (error) {
359176139Srwatson			printf("coda_rdwr: VOP_OPEN on container failed "
360176139Srwatson			    "%d\n", error);
361176139Srwatson			return (error);
362176139Srwatson		}
363176139Srwatson		cfvp = cp->c_ovp;
36438625Srvb	}
36538625Srvb
366176139Srwatson	/*
367176139Srwatson	 * Have UFS handle the call.
368176139Srwatson	 */
369176139Srwatson	CODADEBUG(CODA_RDWR, myprintf(("indirect rdwr: fid = %s, refcnt = "
370176139Srwatson	    "%d\n", coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount)););
371176139Srwatson	vn_lock(cfvp, LK_EXCLUSIVE | LK_RETRY);
372176139Srwatson	if (rw == UIO_READ) {
373176139Srwatson		error = VOP_READ(cfvp, uiop, ioflag, cred);
374176139Srwatson	} else {
375176139Srwatson		error = VOP_WRITE(cfvp, uiop, ioflag, cred);
376176139Srwatson		/*
377176139Srwatson		 * ufs_write updates the vnode_pager_setsize for the
378176139Srwatson		 * vnode/object.
379176139Srwatson		 *
380176139Srwatson		 * XXX: Since we now share vm objects between layers, this is
381176139Srwatson		 * probably unnecessary.
382176139Srwatson		 */
383176139Srwatson		{
384176139Srwatson			struct vattr attr;
385176139Srwatson			if (VOP_GETATTR(cfvp, &attr, cred, td) == 0)
386176139Srwatson				vnode_pager_setsize(vp, attr.va_size);
387176139Srwatson		}
388176139Srwatson	}
389176139Srwatson	VOP_UNLOCK(cfvp, 0);
390176139Srwatson	if (error)
391176139Srwatson		MARK_INT_FAIL(CODA_RDWR_STATS);
392176139Srwatson	else
393176139Srwatson		MARK_INT_SAT(CODA_RDWR_STATS);
39439650Srvb
395176139Srwatson	/*
396176139Srwatson	 * Do an internal close if necessary.
397176139Srwatson	 */
398176139Srwatson	if (opened_internally) {
399176139Srwatson		MARK_INT_GEN(CODA_CLOSE_STATS);
400176139Srwatson		(void)VOP_CLOSE(vp, (rw == UIO_READ ? FREAD : FWRITE), cred,
401176139Srwatson		    td);
40238625Srvb	}
40338625Srvb
404176139Srwatson	/*
405176139Srwatson	 * Invalidate cached attributes if writing.
406176139Srwatson	 */
407176139Srwatson	if (rw == UIO_WRITE)
408176139Srwatson		cp->c_flags &= ~C_VATTR;
409176139Srwatson	return (error);
41038625Srvb}
41138625Srvb
41238625Srvbint
413138290Sphkcoda_ioctl(struct vop_ioctl_args *ap)
41438625Srvb{
415176139Srwatson	/* true args */
416176139Srwatson	struct vnode *vp = ap->a_vp;
417176139Srwatson	int com = ap->a_command;
418176139Srwatson	caddr_t data = ap->a_data;
419176139Srwatson	int flag = ap->a_fflag;
420176139Srwatson	struct ucred *cred = ap->a_cred;
421176139Srwatson	struct thread *td = ap->a_td;
422176139Srwatson	/* locals */
423176139Srwatson	int error;
424176139Srwatson	struct vnode *tvp;
425176139Srwatson	struct nameidata ndp;
426176139Srwatson	struct PioctlData *iap = (struct PioctlData *)data;
42738625Srvb
428176139Srwatson	MARK_ENTRY(CODA_IOCTL_STATS);
429176139Srwatson	CODADEBUG(CODA_IOCTL, myprintf(("in coda_ioctl on %s\n", iap->path)););
43038625Srvb
431176139Srwatson	/*
432176139Srwatson	 * Don't check for operation on a dying object, for ctlvp it
433176139Srwatson	 * shouldn't matter.
434176139Srwatson	 *
435176139Srwatson	 * Must be control object to succeed.
436176139Srwatson	 */
437176139Srwatson	if (!IS_CTL_VP(vp)) {
438176139Srwatson		MARK_INT_FAIL(CODA_IOCTL_STATS);
439176139Srwatson		CODADEBUG(CODA_IOCTL, myprintf(("coda_ioctl error: vp != "
440176139Srwatson		    "ctlvp")););
441176139Srwatson		return (EOPNOTSUPP);
442176139Srwatson	}
44338625Srvb
444176139Srwatson	/*
445176139Srwatson	 * Look up the pathname.
446176139Srwatson	 *
447176139Srwatson	 * Should we use the name cache here? It would get it from lookupname
448176139Srwatson	 * sooner or later anyway, right?
449176139Srwatson	 */
450176139Srwatson	NDINIT(&ndp, LOOKUP, (iap->follow ? FOLLOW : NOFOLLOW),
451176139Srwatson	    UIO_USERSPACE, iap->path, td);
452176139Srwatson	error = namei(&ndp);
453176139Srwatson	tvp = ndp.ni_vp;
454176139Srwatson	if (error) {
455176139Srwatson		MARK_INT_FAIL(CODA_IOCTL_STATS);
456176139Srwatson		CODADEBUG(CODA_IOCTL, myprintf(("coda_ioctl error: lookup "
457176139Srwatson		    "returns %d\n", error)););
458176139Srwatson		return (error);
459176139Srwatson	}
46038625Srvb
461176139Srwatson	/*
462176139Srwatson	 * Make sure this is a coda style cnode, but it may be a different
463176139Srwatson	 * vfsp.
464176139Srwatson	 */
465176139Srwatson	if (tvp->v_op != &coda_vnodeops) {
466176139Srwatson		vrele(tvp);
467176139Srwatson		NDFREE(&ndp, NDF_ONLY_PNBUF);
468176139Srwatson		MARK_INT_FAIL(CODA_IOCTL_STATS);
469176139Srwatson		CODADEBUG(CODA_IOCTL,
470176139Srwatson		myprintf(("coda_ioctl error: %s not a coda object\n",
471176139Srwatson		    iap->path)););
472176139Srwatson		return (EINVAL);
473176139Srwatson	}
474176139Srwatson	if (iap->vi.in_size > VC_MAXDATASIZE) {
475176139Srwatson		NDFREE(&ndp, 0);
476176139Srwatson		return (EINVAL);
477176139Srwatson	}
478176139Srwatson	error = venus_ioctl(vtomi(tvp), &((VTOC(tvp))->c_fid), com, flag,
479176139Srwatson	    data, cred, td->td_proc);
480176139Srwatson	if (error)
481176139Srwatson		MARK_INT_FAIL(CODA_IOCTL_STATS);
482176139Srwatson	else
483176139Srwatson		CODADEBUG(CODA_IOCTL, myprintf(("Ioctl returns %d \n",
484176139Srwatson		    error)););
48538625Srvb	vrele(tvp);
48654655Seivind	NDFREE(&ndp, NDF_ONLY_PNBUF);
487176139Srwatson	return (error);
48838625Srvb}
48938625Srvb
49038625Srvb/*
491176139Srwatson * To reduce the cost of a user-level venus;we cache attributes in the
492176139Srwatson * kernel.  Each cnode has storage allocated for an attribute.  If c_vattr is
493176139Srwatson * valid, return a reference to it.  Otherwise, get the attributes from venus
494176139Srwatson * and store them in the cnode.  There is some question if this method is a
495176139Srwatson * security leak.  But I think that in order to make this call, the user must
496176139Srwatson * have done a lookup and opened the file, and therefore should already have
497176139Srwatson * access.
49838625Srvb */
49938625Srvbint
500138290Sphkcoda_getattr(struct vop_getattr_args *ap)
50138625Srvb{
502176139Srwatson	/* true args */
503176139Srwatson	struct vnode *vp = ap->a_vp;
504176139Srwatson	struct cnode *cp = VTOC(vp);
505176139Srwatson	struct vattr *vap = ap->a_vap;
506176139Srwatson	struct ucred *cred = ap->a_cred;
507176139Srwatson	struct thread *td = ap->a_td;
508176139Srwatson	/* locals */
509176139Srwatson    	struct vnode *convp;
510176139Srwatson	int error, size;
51138625Srvb
512176139Srwatson	MARK_ENTRY(CODA_GETATTR_STATS);
513176139Srwatson	if (IS_UNMOUNTING(cp))
514176139Srwatson		return (ENODEV);
51538625Srvb
516176139Srwatson	/*
517176139Srwatson	 * Check for getattr of control object.
518176139Srwatson	 */
519176139Srwatson	if (IS_CTL_VP(vp)) {
520176139Srwatson		MARK_INT_FAIL(CODA_GETATTR_STATS);
521176139Srwatson		return (ENOENT);
522176139Srwatson	}
52338759Srvb
524176139Srwatson	/*
525176139Srwatson	 * Check to see if the attributes have already been cached.
526176139Srwatson	 */
527176139Srwatson	if (VALID_VATTR(cp)) {
528176139Srwatson		CODADEBUG(CODA_GETATTR, myprintf(("attr cache hit: %s\n",
529176139Srwatson		    coda_f2s(&cp->c_fid))););
530176139Srwatson		CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
531176139Srwatson		    coda_print_vattr(&cp->c_vattr););
532176139Srwatson		*vap = cp->c_vattr;
533176139Srwatson		MARK_INT_SAT(CODA_GETATTR_STATS);
534176139Srwatson		return (0);
535176139Srwatson	}
536176139Srwatson    	error = venus_getattr(vtomi(vp), &cp->c_fid, cred, td->td_proc, vap);
537176139Srwatson	if (!error) {
538176139Srwatson		CODADEBUG(CODA_GETATTR, myprintf(("getattr miss %s: result "
539176139Srwatson		    "%d\n", coda_f2s(&cp->c_fid), error)););
540176139Srwatson		CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
541176139Srwatson		    coda_print_vattr(vap););
54238625Srvb
543176139Srwatson		/*
544176139Srwatson		 * XXX: Since we now share vm objects between layers, this is
545176139Srwatson		 * probably unnecessary.
546176139Srwatson		 */
547176139Srwatson		size = vap->va_size;
548176139Srwatson    		convp = cp->c_ovp;
549176139Srwatson		if (convp != NULL)
550176139Srwatson			vnode_pager_setsize(convp, size);
55138625Srvb
552176139Srwatson		/*
553176139Srwatson		 * If not open for write, store attributes in cnode.
554176139Srwatson		 */
555176139Srwatson		if ((cp->c_owrite == 0) && (coda_attr_cache)) {
556176139Srwatson			cp->c_vattr = *vap;
557176139Srwatson			cp->c_flags |= C_VATTR;
558176139Srwatson		}
55938625Srvb	}
560176139Srwatson	return (error);
56138625Srvb}
56238625Srvb
56338625Srvbint
564138290Sphkcoda_setattr(struct vop_setattr_args *ap)
56538625Srvb{
566176139Srwatson	/* true args */
567176139Srwatson	struct vnode *vp = ap->a_vp;
568176139Srwatson	struct cnode *cp = VTOC(vp);
569176139Srwatson	struct vattr *vap = ap->a_vap;
570176139Srwatson	struct ucred *cred = ap->a_cred;
571176139Srwatson	struct thread *td = ap->a_td;
572176139Srwatson	/* locals */
573176139Srwatson    	struct vnode *convp;
574176139Srwatson	int error, size;
57538625Srvb
576176139Srwatson	MARK_ENTRY(CODA_SETATTR_STATS);
57738625Srvb
578176139Srwatson	/*
579176139Srwatson	 * Check for setattr of control object.
580176139Srwatson	 */
581176139Srwatson	if (IS_CTL_VP(vp)) {
582176139Srwatson		MARK_INT_FAIL(CODA_SETATTR_STATS);
583176139Srwatson		return (ENOENT);
584176139Srwatson	}
585176139Srwatson	if (codadebug & CODADBGMSK(CODA_SETATTR))
586176139Srwatson		coda_print_vattr(vap);
587176139Srwatson	error = venus_setattr(vtomi(vp), &cp->c_fid, vap, cred, td->td_proc);
588176139Srwatson	if (!error)
589176238Srwatson		cp->c_flags &= ~(C_VATTR | C_ACCCACHE);
59038625Srvb
591176139Srwatson	/*
592176139Srwatson	 * XXX: Since we now share vm objects between layers, this is
593176139Srwatson	 * probably unnecessary.
594176139Srwatson	 *
595176139Srwatson	 * XXX: Shouldn't we only be doing this "set" if C_VATTR remains
596176139Srwatson	 * valid after venus_setattr()?
597176139Srwatson	 */
598176139Srwatson	size = vap->va_size;
599176139Srwatson    	convp = cp->c_ovp;
600176139Srwatson	if (size != VNOVAL && convp != NULL)
601176139Srwatson		vnode_pager_setsize(convp, size);
602176139Srwatson	CODADEBUG(CODA_SETATTR,	myprintf(("setattr %d\n", error)););
603176139Srwatson	return (error);
60438625Srvb}
60538625Srvb
60638625Srvbint
607138290Sphkcoda_access(struct vop_access_args *ap)
60838625Srvb{
609176139Srwatson	/* true args */
610176139Srwatson	struct vnode *vp = ap->a_vp;
611176139Srwatson	struct cnode *cp = VTOC(vp);
612176139Srwatson	int mode = ap->a_mode;
613176139Srwatson	struct ucred *cred = ap->a_cred;
614176139Srwatson	struct thread *td = ap->a_td;
615176139Srwatson	/* locals */
616176238Srwatson	int error;
61738625Srvb
618176139Srwatson	MARK_ENTRY(CODA_ACCESS_STATS);
61938625Srvb
620176139Srwatson	/*
621176139Srwatson	 * Check for access of control object.  Only read access is allowed
622176139Srwatson	 * on it.
623176139Srwatson	 */
624176139Srwatson	if (IS_CTL_VP(vp)) {
625176139Srwatson		/*
626176139Srwatson		 * Bogus hack - all will be marked as successes.
627176139Srwatson		 */
62839085Srvb		MARK_INT_SAT(CODA_ACCESS_STATS);
629176139Srwatson		return (((mode & VREAD) && !(mode & (VWRITE | VEXEC)))
630176139Srwatson		    ? 0 : EACCES);
63138625Srvb	}
63238625Srvb
633176139Srwatson	/*
634176238Srwatson	 * We maintain a one-entry LRU positive access cache with each cnode.
635176238Srwatson	 * In principle we could also track negative results, and for more
636176238Srwatson	 * than one uid, but we don't yet.  Venus is responsible for
637176238Srwatson	 * invalidating this cache as required.
638176139Srwatson	 */
639176238Srwatson	if (coda_access_cache && VALID_ACCCACHE(cp) &&
640176238Srwatson	    (cred->cr_uid == cp->c_cached_uid) &&
641176238Srwatson	    (mode & cp->c_cached_mode) == mode) {
642176238Srwatson		MARK_INT_SAT(CODA_ACCESS_STATS);
643176238Srwatson		return (0);
644176238Srwatson	}
645176238Srwatson	error = venus_access(vtomi(vp), &cp->c_fid, mode, cred, td->td_proc);
646176238Srwatson	if (error == 0 && coda_access_cache) {
647176238Srwatson		/*-
648176238Srwatson		 * When we have a new successful request, we consider three
649176238Srwatson		 * cases:
650176238Srwatson		 *
651176238Srwatson		 * - No initialized access cache, in which case cache the
652176238Srwatson		 *   result.
653176238Srwatson		 * - Cached result for a different user, in which case we
654176238Srwatson		 *   replace the entry.
655176238Srwatson		 * - Cached result for the same user, in which case we add
656176238Srwatson		 *   any newly granted rights to the cached mode.
657176238Srwatson		 *
658176238Srwatson		 * XXXRW: If we ever move to something more interesting than
659176238Srwatson		 * uid-based token lookup, we'll need to change this.
660176238Srwatson		 */
661176238Srwatson		cp->c_flags |= C_ACCCACHE;
662176238Srwatson		if (cp->c_cached_uid != cred->cr_uid) {
663176238Srwatson			cp->c_cached_mode = mode;
664176238Srwatson			cp->c_cached_uid = cred->cr_uid;
665176238Srwatson		} else
666176238Srwatson			cp->c_cached_mode |= mode;
667176238Srwatson	}
668176238Srwatson	return (error);
66938625Srvb}
67038625Srvb
67138625Srvbint
672138290Sphkcoda_readlink(struct vop_readlink_args *ap)
67338625Srvb{
674176139Srwatson	/* true args */
675176139Srwatson	struct vnode *vp = ap->a_vp;
676176139Srwatson	struct cnode *cp = VTOC(vp);
677176139Srwatson	struct uio *uiop = ap->a_uio;
678176139Srwatson	struct ucred *cred = ap->a_cred;
679176139Srwatson	struct thread *td = ap->a_uio->uio_td;
680176139Srwatson	/* locals */
681176139Srwatson	int error;
682176139Srwatson	char *str;
683176139Srwatson	int len;
68438625Srvb
685176139Srwatson	MARK_ENTRY(CODA_READLINK_STATS);
68638625Srvb
687176139Srwatson	/*
688176139Srwatson	 * Check for readlink of control object.
689176139Srwatson	 */
690176139Srwatson	if (IS_CTL_VP(vp)) {
691176139Srwatson		MARK_INT_FAIL(CODA_READLINK_STATS);
692176139Srwatson		return (ENOENT);
693176139Srwatson	}
694176139Srwatson	if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) {
695176139Srwatson		/*
696176139Srwatson		 * Symlink was cached.
697176139Srwatson		 */
698176139Srwatson		uiop->uio_rw = UIO_READ;
699176139Srwatson		error = uiomove(cp->c_symlink, (int)cp->c_symlen, uiop);
700176139Srwatson		if (error)
701176139Srwatson			MARK_INT_FAIL(CODA_READLINK_STATS);
702176139Srwatson		else
703176139Srwatson			MARK_INT_SAT(CODA_READLINK_STATS);
704176139Srwatson		return (error);
705176139Srwatson	}
706176139Srwatson	error = venus_readlink(vtomi(vp), &cp->c_fid, cred, td != NULL ?
707176139Srwatson	    td->td_proc : NULL, &str, &len);
708176139Srwatson	if (!error) {
709176139Srwatson		uiop->uio_rw = UIO_READ;
710176139Srwatson		error = uiomove(str, len, uiop);
711176139Srwatson		if (coda_symlink_cache) {
712176139Srwatson			cp->c_symlink = str;
713176139Srwatson			cp->c_symlen = len;
714176139Srwatson			cp->c_flags |= C_SYMLINK;
715176139Srwatson		} else
716176139Srwatson			CODA_FREE(str, len);
717176139Srwatson	}
718176139Srwatson	CODADEBUG(CODA_READLINK, myprintf(("in readlink result %d\n",
719176139Srwatson	    error)););
720176139Srwatson	return (error);
72138625Srvb}
72238625Srvb
72338625Srvbint
724138290Sphkcoda_fsync(struct vop_fsync_args *ap)
72538625Srvb{
726176139Srwatson	/* true args */
727176139Srwatson	struct vnode *vp = ap->a_vp;
728176139Srwatson	struct cnode *cp = VTOC(vp);
729176139Srwatson	struct thread *td = ap->a_td;
730176139Srwatson	/* locals */
731176139Srwatson	struct vnode *convp = cp->c_ovp;
732176139Srwatson	int error;
73338625Srvb
734176139Srwatson	MARK_ENTRY(CODA_FSYNC_STATS);
73538625Srvb
736176139Srwatson	/*
737176139Srwatson	 * Check for fsync on an unmounting object.
738176139Srwatson	 *
739176139Srwatson	 * XXX: Is this comment true on FreeBSD?  It seems likely, since
740176139Srwatson	 * unmounting is fairly non-atomic.
741176139Srwatson	 *
742176139Srwatson	 * The NetBSD kernel, in it's infinite wisdom, can try to fsync after
743176139Srwatson	 * an unmount has been initiated.  This is a Bad Thing, which we have
744176139Srwatson	 * to avoid.  Not a legitimate failure for stats.
745176139Srwatson	 */
746176139Srwatson	if (IS_UNMOUNTING(cp))
747176139Srwatson		return (ENODEV);
74838625Srvb
749176139Srwatson	/*
750176139Srwatson	 * Check for fsync of control object.
751176139Srwatson	 */
752176139Srwatson	if (IS_CTL_VP(vp)) {
753176139Srwatson		MARK_INT_SAT(CODA_FSYNC_STATS);
754176139Srwatson		return (0);
755176139Srwatson	}
756176139Srwatson	if (convp != NULL) {
757176139Srwatson		vn_lock(convp, LK_EXCLUSIVE | LK_RETRY);
758176139Srwatson		VOP_FSYNC(convp, MNT_WAIT, td);
759176139Srwatson		VOP_UNLOCK(convp, 0);
760176139Srwatson	}
76138625Srvb
762176139Srwatson	/*
763176139Srwatson	 * We see fsyncs with usecount == 1 then usecount == 0.  For now we
764176139Srwatson	 * ignore them.
765176139Srwatson	 */
766176139Srwatson#if 0
767176139Srwatson	VI_LOCK(vp);
768176139Srwatson	if (!vp->v_usecount) {
769176139Srwatson		printf("coda_fsync on vnode %p with %d usecount.  "
770176139Srwatson		    "c_flags = %x (%x)\n", vp, vp->v_usecount, cp->c_flags,
771176139Srwatson		    cp->c_flags&C_PURGING);
772176139Srwatson	}
773176139Srwatson	VI_UNLOCK(vp);
774176139Srwatson#endif
77538625Srvb
776176139Srwatson	/*
777176139Srwatson	 * We can expect fsync on any vnode at all if venus is purging it.
778176139Srwatson	 * Venus can't very well answer the fsync request, now can it?
779176139Srwatson	 * Hopefully, it won't have to, because hopefully, venus preserves
780176139Srwatson	 * the (possibly untrue) invariant that it never purges an open
781176139Srwatson	 * vnode.  Hopefully.
782176139Srwatson	 */
783176139Srwatson	if (cp->c_flags & C_PURGING)
784176139Srwatson		return (0);
78538625Srvb
786176139Srwatson	/* XXX: needs research */
787176139Srwatson	return (0);
788176139Srwatson	error = venus_fsync(vtomi(vp), &cp->c_fid, td->td_proc);
789176139Srwatson	CODADEBUG(CODA_FSYNC, myprintf(("in fsync result %d\n", error)););
790176139Srwatson	return (error);
79138625Srvb}
79238625Srvb
79338625Srvbint
794138290Sphkcoda_inactive(struct vop_inactive_args *ap)
79538625Srvb{
796176139Srwatson	/*
797176139Srwatson	 * XXX - at the moment, inactive doesn't look at cred, and doesn't
798176139Srwatson	 * have a proc pointer.  Oops.
799176139Srwatson	 */
800176139Srwatson	/* true args */
801176139Srwatson	struct vnode *vp = ap->a_vp;
802176139Srwatson	struct cnode *cp = VTOC(vp);
803176139Srwatson	struct ucred *cred __attribute__((unused)) = NULL;
804176139Srwatson	struct thread *td __attribute__((unused)) = curthread;
805176139Srwatson	/* upcall decl */
806176139Srwatson	/* locals */
80738625Srvb
808176139Srwatson	/*
809176139Srwatson	 * We don't need to send inactive to venus - DCS.
810176139Srwatson	 */
811176139Srwatson	MARK_ENTRY(CODA_INACTIVE_STATS);
812176139Srwatson	CODADEBUG(CODA_INACTIVE, myprintf(("in inactive, %s, vfsp %p\n",
813176139Srwatson	    coda_f2s(&cp->c_fid), vp->v_mount)););
814176139Srwatson	vp->v_object = NULL;
81538625Srvb
816176139Srwatson	/*
817176139Srwatson	 * If an array has been allocated to hold the symlink, deallocate it.
818176139Srwatson	 */
819176139Srwatson	if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) {
820176139Srwatson		if (cp->c_symlink == NULL)
821176139Srwatson			panic("coda_inactive: null symlink pointer in cnode");
822176139Srwatson		CODA_FREE(cp->c_symlink, cp->c_symlen);
823176139Srwatson		cp->c_flags &= ~C_SYMLINK;
824176139Srwatson		cp->c_symlen = 0;
825176139Srwatson	}
826175473Srwatson
827176139Srwatson	/*
828176139Srwatson	 * Remove it from the table so it can't be found.
829176139Srwatson	 */
830176139Srwatson	coda_unsave(cp);
831176139Srwatson	if ((struct coda_mntinfo *)(vp->v_mount->mnt_data) == NULL) {
832176139Srwatson		myprintf(("Help! vfsp->vfs_data was NULL, but vnode %p "
833176139Srwatson		    "wasn't dying\n", vp));
834176139Srwatson		panic("badness in coda_inactive\n");
835176139Srwatson	}
836176139Srwatson	if (IS_UNMOUNTING(cp)) {
83738625Srvb#ifdef	DEBUG
838176139Srwatson		printf("coda_inactive: IS_UNMOUNTING use %d: vp %p, cp %p\n",
839176139Srwatson		    vrefcnt(vp), vp, cp);
840176139Srwatson		if (cp->c_ovp != NULL)
841176139Srwatson			printf("coda_inactive: cp->ovp != NULL use %d: vp "
842176139Srwatson			    "%p, cp %p\n", vrefcnt(vp), vp, cp);
84338625Srvb#endif
844176263Srwatson	} else
845176139Srwatson		vgone(vp);
846176139Srwatson	MARK_INT_SAT(CODA_INACTIVE_STATS);
847176139Srwatson	return (0);
84838625Srvb}
84938625Srvb
85038625Srvb/*
85196755Strhodes * Remote filesystem operations having to do with directory manipulation.
85238625Srvb */
85338625Srvb
854176139Srwatson/*
855176120Srwatson * In FreeBSD, lookup returns the vnode locked.
85638625Srvb */
85738625Srvbint
858176233Srwatsoncoda_lookup(struct vop_cachedlookup_args *ap)
85938625Srvb{
860176139Srwatson	/* true args */
861176139Srwatson	struct vnode *dvp = ap->a_dvp;
862176139Srwatson	struct cnode *dcp = VTOC(dvp);
863176139Srwatson	struct vnode **vpp = ap->a_vpp;
864176139Srwatson	/*
865176139Srwatson	 * It looks as though ap->a_cnp->ni_cnd->cn_nameptr holds the rest of
866176139Srwatson	 * the string to xlate, and that we must try to get at least
867176139Srwatson	 * ap->a_cnp->ni_cnd->cn_namelen of those characters to macth.  I
868176139Srwatson	 * could be wrong.
869176139Srwatson	 */
870176139Srwatson	struct componentname  *cnp = ap->a_cnp;
871176139Srwatson	struct ucred *cred = cnp->cn_cred;
872176139Srwatson	struct thread *td = cnp->cn_thread;
873176139Srwatson	/* locals */
874176139Srwatson	struct cnode *cp;
875176139Srwatson	const char *nm = cnp->cn_nameptr;
876176139Srwatson	int len = cnp->cn_namelen;
877176139Srwatson	CodaFid VFid;
878176139Srwatson	int vtype;
879176139Srwatson	int error = 0;
88038625Srvb
881176139Srwatson	MARK_ENTRY(CODA_LOOKUP_STATS);
882176139Srwatson	CODADEBUG(CODA_LOOKUP, myprintf(("lookup: %s in %s\n", nm,
883176139Srwatson	    coda_f2s(&dcp->c_fid))););
88438625Srvb
885176139Srwatson	/*
886176139Srwatson	 * Check for lookup of control object.
887176139Srwatson	 */
888176139Srwatson	if (IS_CTL_NAME(dvp, nm, len)) {
889176139Srwatson		*vpp = coda_ctlvp;
890176139Srwatson		vref(*vpp);
891176139Srwatson		MARK_INT_SAT(CODA_LOOKUP_STATS);
892176139Srwatson		goto exit;
893176139Srwatson	}
894176139Srwatson	if (len+1 > CODA_MAXNAMLEN) {
895176139Srwatson		MARK_INT_FAIL(CODA_LOOKUP_STATS);
896176139Srwatson		CODADEBUG(CODA_LOOKUP, myprintf(("name too long: lookup, "
897176139Srwatson		    "%s (%s)\n", coda_f2s(&dcp->c_fid), nm)););
898176139Srwatson		*vpp = NULL;
899176139Srwatson		error = EINVAL;
900176139Srwatson		goto exit;
901176139Srwatson	}
90238625Srvb
903176233Srwatson	error = venus_lookup(vtomi(dvp), &dcp->c_fid, nm, len, cred,
904176233Srwatson	    td->td_proc, &VFid, &vtype);
905176233Srwatson	if (error) {
906176233Srwatson		MARK_INT_FAIL(CODA_LOOKUP_STATS);
907176233Srwatson		CODADEBUG(CODA_LOOKUP, myprintf(("lookup error on %s "
908176233Srwatson		    "(%s)%d\n", coda_f2s(&dcp->c_fid), nm, error)););
909176233Srwatson		*vpp = NULL;
910176139Srwatson	} else {
911176233Srwatson		MARK_INT_SAT(CODA_LOOKUP_STATS);
912176233Srwatson		CODADEBUG(CODA_LOOKUP, myprintf(("lookup: %s type %o "
913176233Srwatson		    "result %d\n", coda_f2s(&VFid), vtype, error)););
914176233Srwatson		cp = make_coda_node(&VFid, dvp->v_mount, vtype);
915176233Srwatson    		*vpp = CTOV(cp);
916176233Srwatson
917176233Srwatson    		/*
918176233Srwatson		 * Enter the new vnode in the namecache only if the top bit
919176233Srwatson		 * isn't set.
920176233Srwatson		 *
921176233Srwatson		 * And don't enter a new vnode for an invalid one!
922176139Srwatson		 */
923176233Srwatson		if (!(vtype & CODA_NOCACHE) && (cnp->cn_flags & MAKEENTRY))
924176233Srwatson			cache_enter(dvp, *vpp, cnp);
92538625Srvb	}
926176139Srwatsonexit:
927176139Srwatson	/*
928176139Srwatson	 * If we are creating, and this was the last name to be looked up,
929176139Srwatson	 * and the error was ENOENT, then there really shouldn't be an error
930176139Srwatson	 * and we can make the leaf NULL and return success.  Since this is
931176139Srwatson	 * supposed to work under Mach as well as FreeBSD, we're leaving this
932176139Srwatson	 * fn wrapped.  We also must tell lookup/namei that we need to save
933176139Srwatson	 * the last component of the name.  (Create will have to free the
934176139Srwatson	 * name buffer later...lucky us...).
935176139Srwatson	 */
936176139Srwatson	if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME))
937176139Srwatson	    && (cnp->cn_flags & ISLASTCN) && (error == ENOENT)) {
938176139Srwatson		error = EJUSTRETURN;
939176139Srwatson		cnp->cn_flags |= SAVENAME;
940176139Srwatson		*ap->a_vpp = NULL;
941176139Srwatson	}
94238625Srvb
943176139Srwatson	/*
944176139Srwatson	 * If we are removing, and we are at the last element, and we found
945176139Srwatson	 * it, then we need to keep the name around so that the removal will
946176139Srwatson	 * go ahead as planned.  Unfortunately, this will probably also lock
947176139Srwatson	 * the to-be-removed vnode, which may or may not be a good idea.
948176139Srwatson	 * I'll have to look at the bits of coda_remove to make sure.  We'll
949176139Srwatson	 * only save the name if we did in fact find the name, otherwise
950176139Srwatson	 * coda_remove won't have a chance to free the pathname.
951176139Srwatson	 */
952176139Srwatson	if ((cnp->cn_nameiop == DELETE) && (cnp->cn_flags & ISLASTCN)
953176139Srwatson	    && !error)
954176139Srwatson		cnp->cn_flags |= SAVENAME;
95538625Srvb
956176139Srwatson	/*
957176139Srwatson	 * If the lookup went well, we need to (potentially?) unlock the
958176139Srwatson	 * parent, and lock the child.  We are only responsible for checking
959176139Srwatson	 * to see if the parent is supposed to be unlocked before we return.
960176139Srwatson	 * We must always lock the child (provided there is one, and (the
961176139Srwatson	 * parent isn't locked or it isn't the same as the parent.)  Simple,
962176139Srwatson	 * huh?  We can never leave the parent locked unless we are ISLASTCN.
963176139Srwatson	 */
964176139Srwatson	if (!error || (error == EJUSTRETURN)) {
965176139Srwatson		if (cnp->cn_flags & ISDOTDOT) {
966176139Srwatson			VOP_UNLOCK(dvp, 0);
967176139Srwatson			/*
968176139Srwatson			 * The parent is unlocked.  As long as there is a
969176139Srwatson			 * child, lock it without bothering to check anything
970176139Srwatson			 * else.
971176139Srwatson			 */
972176139Srwatson			if (*ap->a_vpp)
973176139Srwatson				vn_lock(*ap->a_vpp, LK_EXCLUSIVE | LK_RETRY);
974176139Srwatson			vn_lock(dvp, LK_RETRY|LK_EXCLUSIVE);
975176139Srwatson		} else {
976176139Srwatson			/*
977176139Srwatson			 * The parent is locked, and may be the same as the
978176139Srwatson			 * child.  If different, go ahead and lock it.
979176139Srwatson			 */
980176139Srwatson			if (*ap->a_vpp && (*ap->a_vpp != dvp))
981176139Srwatson				vn_lock(*ap->a_vpp, LK_EXCLUSIVE | LK_RETRY);
982176139Srwatson		}
98338625Srvb	} else {
984176139Srwatson		/*
985176139Srwatson		 * If the lookup failed, we need to ensure that the leaf is
986176139Srwatson		 * NULL.
987176139Srwatson		 *
988176139Srwatson		 * Don't change any locking?
989176139Srwatson		 */
990176139Srwatson		*ap->a_vpp = NULL;
99138625Srvb	}
992176139Srwatson	return (error);
99338625Srvb}
99438625Srvb
99538625Srvb/*ARGSUSED*/
99638625Srvbint
997138290Sphkcoda_create(struct vop_create_args *ap)
99838625Srvb{
999176139Srwatson	/* true args */
1000176139Srwatson	struct vnode *dvp = ap->a_dvp;
1001176139Srwatson	struct cnode *dcp = VTOC(dvp);
1002176139Srwatson	struct vattr *va = ap->a_vap;
1003176139Srwatson	int exclusive = 1;
1004176139Srwatson	int mode = ap->a_vap->va_mode;
1005176139Srwatson	struct vnode **vpp = ap->a_vpp;
1006176139Srwatson	struct componentname  *cnp = ap->a_cnp;
1007176139Srwatson	struct ucred *cred = cnp->cn_cred;
1008176139Srwatson	struct thread *td = cnp->cn_thread;
1009176139Srwatson	/* locals */
1010176139Srwatson	int error;
1011176139Srwatson	struct cnode *cp;
1012176139Srwatson	const char *nm = cnp->cn_nameptr;
1013176139Srwatson	int len = cnp->cn_namelen;
1014176139Srwatson	CodaFid VFid;
1015176139Srwatson	struct vattr attr;
101638625Srvb
1017176139Srwatson	MARK_ENTRY(CODA_CREATE_STATS);
101838625Srvb
1019176139Srwatson	/*
1020176139Srwatson	 * All creates are exclusive XXX.
1021176139Srwatson	 *
1022176139Srwatson	 * I'm assuming the 'mode' argument is the file mode bits XXX.
1023176139Srwatson	 *
1024176139Srwatson	 * Check for create of control object.
1025176139Srwatson	 */
1026176139Srwatson	if (IS_CTL_NAME(dvp, nm, len)) {
1027176139Srwatson		*vpp = (struct vnode *)0;
1028176139Srwatson		MARK_INT_FAIL(CODA_CREATE_STATS);
1029176139Srwatson		return (EACCES);
1030176139Srwatson	}
1031176139Srwatson	error = venus_create(vtomi(dvp), &dcp->c_fid, nm, len, exclusive,
1032176139Srwatson	    mode, va, cred, td->td_proc, &VFid, &attr);
1033176139Srwatson	if (!error) {
1034176139Srwatson		/*
1035176139Srwatson		 * If this is an exclusive create, panic if the file already
1036176139Srwatson		 * exists.
1037176139Srwatson		 *
1038176139Srwatson		 * Venus should have detected the file and reported EEXIST.
1039176139Srwatson		 */
1040176139Srwatson		if ((exclusive == 1) && (coda_find(&VFid) != NULL))
1041176139Srwatson	  	  	panic("cnode existed for newly created file!");
1042176139Srwatson		cp = make_coda_node(&VFid, dvp->v_mount, attr.va_type);
1043176139Srwatson		*vpp = CTOV(cp);
104438625Srvb
1045176139Srwatson		/*
1046176139Srwatson		 * Update va to reflect the new attributes.
1047176139Srwatson		 */
1048176139Srwatson		(*va) = attr;
104938625Srvb
1050176139Srwatson		/*
1051176139Srwatson		 * Update the attribute cache and mark it as valid.
1052176139Srwatson		 */
1053176139Srwatson		if (coda_attr_cache) {
1054176139Srwatson			VTOC(*vpp)->c_vattr = attr;
1055176139Srwatson			VTOC(*vpp)->c_flags |= C_VATTR;
1056176139Srwatson		}
105738625Srvb
1058176139Srwatson		/*
1059176139Srwatson		 * Invalidate the parent's attr cache, the modification time
1060176139Srwatson		 * has changed.
1061176139Srwatson		 */
1062176139Srwatson		VTOC(dvp)->c_flags &= ~C_VATTR;
1063176233Srwatson		cache_enter(dvp, *vpp, cnp);
1064176139Srwatson		CODADEBUG(CODA_CREATE, myprintf(("create: %s, result %d\n",
1065176139Srwatson		    coda_f2s(&VFid), error)););
1066176139Srwatson	} else {
1067176139Srwatson		*vpp = (struct vnode *)0;
1068176139Srwatson		CODADEBUG(CODA_CREATE, myprintf(("create error %d\n",
1069176139Srwatson		    error)););
107038625Srvb	}
1071176139Srwatson	if (!error) {
1072176233Srwatson		if (cnp->cn_flags & MAKEENTRY)
1073176233Srwatson			cache_enter(dvp, *vpp, cnp);
1074176139Srwatson		if (cnp->cn_flags & LOCKLEAF)
1075176139Srwatson			vn_lock(*ap->a_vpp, LK_EXCLUSIVE | LK_RETRY);
1076176233Srwatson	} else if (error == ENOENT) {
1077176233Srwatson		/*
1078176233Srwatson		 * XXXRW: We only enter a negative entry if ENOENT is
1079176233Srwatson		 * returned, not other errors.  But will Venus invalidate dvp
1080176233Srwatson		 * properly in all cases when new files appear via the
1081176233Srwatson		 * network rather than a local operation?
1082176233Srwatson		 */
1083176233Srwatson		if (cnp->cn_flags & MAKEENTRY)
1084176233Srwatson			cache_enter(dvp, NULL, cnp);
108538625Srvb	}
1086176139Srwatson	return (error);
108738625Srvb}
108838625Srvb
108938625Srvbint
1090138290Sphkcoda_remove(struct vop_remove_args *ap)
109138625Srvb{
1092176139Srwatson	/* true args */
1093176233Srwatson	struct vnode *vp = ap->a_vp;
1094176139Srwatson	struct vnode *dvp = ap->a_dvp;
1095176139Srwatson	struct cnode *cp = VTOC(dvp);
1096176139Srwatson	struct componentname  *cnp = ap->a_cnp;
1097176139Srwatson	struct ucred *cred = cnp->cn_cred;
1098176139Srwatson	struct thread *td = cnp->cn_thread;
1099176139Srwatson	/* locals */
1100176139Srwatson	int error;
1101176139Srwatson	const char *nm = cnp->cn_nameptr;
1102176139Srwatson	int len = cnp->cn_namelen;
1103176233Srwatson#if 0
1104176139Srwatson	struct cnode *tp;
1105176233Srwatson#endif
110638625Srvb
1107176139Srwatson	MARK_ENTRY(CODA_REMOVE_STATS);
1108176139Srwatson	CODADEBUG(CODA_REMOVE, myprintf(("remove: %s in %s\n", nm,
1109176139Srwatson	    coda_f2s(&cp->c_fid))););
111038625Srvb
1111176139Srwatson	/*
1112176233Srwatson	 * Check for remove of control object.
1113176139Srwatson	 */
1114176233Srwatson	if (IS_CTL_NAME(dvp, nm, len)) {
1115176233Srwatson		MARK_INT_FAIL(CODA_REMOVE_STATS);
1116176233Srwatson		return (ENOENT);
111738625Srvb	}
111838625Srvb
1119176139Srwatson	/*
1120176139Srwatson	 * Invalidate the parent's attr cache, the modification time has
1121176233Srwatson	 * changed.  We don't yet know if the last reference to the file is
1122176233Srwatson	 * being removed, but we do know the reference count on the child has
1123176233Srwatson	 * changed, so invalidate its attr cache also.
1124176139Srwatson	 */
1125176139Srwatson	VTOC(dvp)->c_flags &= ~C_VATTR;
1126176238Srwatson	VTOC(vp)->c_flags &= ~(C_VATTR | C_ACCCACHE);
1127176139Srwatson	error = venus_remove(vtomi(dvp), &cp->c_fid, nm, len, cred,
1128176139Srwatson	    td->td_proc);
1129176233Srwatson	cache_purge(vp);
1130176139Srwatson	CODADEBUG(CODA_REMOVE, myprintf(("in remove result %d\n",error)););
1131176139Srwatson	return (error);
113238625Srvb}
113338625Srvb
113438625Srvbint
1135138290Sphkcoda_link(struct vop_link_args *ap)
113638625Srvb{
1137176139Srwatson	/* true args */
1138176139Srwatson	struct vnode *vp = ap->a_vp;
1139176139Srwatson	struct cnode *cp = VTOC(vp);
1140176139Srwatson	struct vnode *tdvp = ap->a_tdvp;
1141176139Srwatson	struct cnode *tdcp = VTOC(tdvp);
1142176139Srwatson	struct componentname *cnp = ap->a_cnp;
1143176139Srwatson	struct ucred *cred = cnp->cn_cred;
1144176139Srwatson	struct thread *td = cnp->cn_thread;
1145176139Srwatson	/* locals */
1146176139Srwatson	int error;
1147176139Srwatson	const char *nm = cnp->cn_nameptr;
1148176139Srwatson	int len = cnp->cn_namelen;
114938625Srvb
1150176139Srwatson	MARK_ENTRY(CODA_LINK_STATS);
115138625Srvb
1152176139Srwatson	if (codadebug & CODADBGMSK(CODA_LINK)) {
1153176139Srwatson		myprintf(("nb_link:   vp fid: %s\n", coda_f2s(&cp->c_fid)));
1154176139Srwatson		myprintf(("nb_link: tdvp fid: %s)\n",
1155176139Srwatson		    coda_f2s(&tdcp->c_fid)));
1156176139Srwatson	}
1157176139Srwatson	if (codadebug & CODADBGMSK(CODA_LINK)) {
1158176139Srwatson		myprintf(("link:   vp fid: %s\n", coda_f2s(&cp->c_fid)));
1159176139Srwatson		myprintf(("link: tdvp fid: %s\n", coda_f2s(&tdcp->c_fid)));
1160176139Srwatson	}
116138625Srvb
1162176139Srwatson	/*
1163176139Srwatson	 * Check for link to/from control object.
1164176139Srwatson	 */
1165176139Srwatson	if (IS_CTL_NAME(tdvp, nm, len) || IS_CTL_VP(vp)) {
1166176139Srwatson		MARK_INT_FAIL(CODA_LINK_STATS);
1167176139Srwatson		return (EACCES);
1168176139Srwatson	}
1169176139Srwatson	error = venus_link(vtomi(vp), &cp->c_fid, &tdcp->c_fid, nm, len,
1170176139Srwatson	    cred, td->td_proc);
117138625Srvb
1172176139Srwatson	/*
1173176139Srwatson	 * Invalidate the parent's attr cache, the modification time has
1174176139Srwatson	 * changed.
1175176139Srwatson	 */
1176176139Srwatson	VTOC(tdvp)->c_flags &= ~C_VATTR;
1177176139Srwatson	VTOC(vp)->c_flags &= ~C_VATTR;
1178176139Srwatson	CODADEBUG(CODA_LINK, myprintf(("in link result %d\n",error)););
1179176139Srwatson	return (error);
118038625Srvb}
118138625Srvb
118238625Srvbint
1183138290Sphkcoda_rename(struct vop_rename_args *ap)
118438625Srvb{
1185176139Srwatson	/* true args */
1186176238Srwatson	struct vnode *fvp = ap->a_fvp;
1187176238Srwatson	struct vnode *tvp = ap->a_tvp;
1188176139Srwatson	struct vnode *odvp = ap->a_fdvp;
1189176139Srwatson	struct cnode *odcp = VTOC(odvp);
1190176139Srwatson	struct componentname  *fcnp = ap->a_fcnp;
1191176139Srwatson	struct vnode *ndvp = ap->a_tdvp;
1192176139Srwatson	struct cnode *ndcp = VTOC(ndvp);
1193176139Srwatson	struct componentname  *tcnp = ap->a_tcnp;
1194176139Srwatson	struct ucred *cred = fcnp->cn_cred;
1195176139Srwatson	struct thread *td = fcnp->cn_thread;
1196176139Srwatson	/* true args */
1197176139Srwatson	int error;
1198176139Srwatson	const char *fnm = fcnp->cn_nameptr;
1199176139Srwatson	int flen = fcnp->cn_namelen;
1200176139Srwatson	const char *tnm = tcnp->cn_nameptr;
1201176139Srwatson	int tlen = tcnp->cn_namelen;
120238625Srvb
1203176139Srwatson	MARK_ENTRY(CODA_RENAME_STATS);
120438625Srvb
1205176139Srwatson	/*
1206176139Srwatson	 * Check for rename involving control object.
1207176139Srwatson	 */
1208176139Srwatson	if (IS_CTL_NAME(odvp, fnm, flen) || IS_CTL_NAME(ndvp, tnm, tlen)) {
1209176139Srwatson		MARK_INT_FAIL(CODA_RENAME_STATS);
1210176139Srwatson		return (EACCES);
121138625Srvb	}
121238625Srvb
1213176139Srwatson	/*
1214176233Srwatson	 * Remove the entries for both source and target directories, which
1215176233Srwatson	 * should catch references to the children.  Perhaps we could purge
1216176233Srwatson	 * less?
1217176139Srwatson	 */
1218176233Srwatson	cache_purge(odvp);
1219176233Srwatson	cache_purge(ndvp);
122038625Srvb
1221176139Srwatson	/*
1222176238Srwatson	 * Invalidate parent directories as modification times have changed.
1223176238Srwatson	 * Invalidate access cache on renamed file as rights may have
1224176139Srwatson	 * changed.
1225176139Srwatson	 */
1226176139Srwatson	VTOC(odvp)->c_flags &= ~C_VATTR;
1227176139Srwatson	VTOC(ndvp)->c_flags &= ~C_VATTR;
1228176238Srwatson	VTOC(fvp)->c_flags &= ~C_ACCCACHE;
1229176139Srwatson	if (flen+1 > CODA_MAXNAMLEN) {
1230176139Srwatson		MARK_INT_FAIL(CODA_RENAME_STATS);
1231176139Srwatson		error = EINVAL;
1232176139Srwatson		goto exit;
1233176139Srwatson	}
1234176139Srwatson	if (tlen+1 > CODA_MAXNAMLEN) {
1235176139Srwatson		MARK_INT_FAIL(CODA_RENAME_STATS);
1236176139Srwatson		error = EINVAL;
1237176139Srwatson		goto exit;
1238176139Srwatson	}
1239176139Srwatson	error = venus_rename(vtomi(odvp), &odcp->c_fid, &ndcp->c_fid, fnm,
1240176139Srwatson	    flen, tnm, tlen, cred, td->td_proc);
1241176139Srwatsonexit:
1242176139Srwatson	CODADEBUG(CODA_RENAME, myprintf(("in rename result %d\n",error)););
124338625Srvb
1244176139Srwatson	/*
1245176238Srwatson	 * Update namecache to reflect that the names of various objects may
1246176238Srwatson	 * have changed (or gone away entirely).
1247176139Srwatson	 */
1248176238Srwatson	cache_purge(fvp);
1249176238Srwatson	cache_purge(tvp);
125038625Srvb
1251176139Srwatson	/*
1252176139Srwatson	 * Release parents first, then children.
1253176139Srwatson	 */
1254176139Srwatson	vrele(odvp);
1255176238Srwatson	if (tvp) {
1256176238Srwatson		if (tvp == ndvp)
1257176139Srwatson			vrele(ndvp);
1258176139Srwatson		else
1259176139Srwatson			vput(ndvp);
1260176238Srwatson		vput(tvp);
1261176139Srwatson	} else
1262176139Srwatson		vput(ndvp);
1263176238Srwatson	vrele(fvp);
1264176139Srwatson	return (error);
126538625Srvb}
126638625Srvb
126738625Srvbint
1268138290Sphkcoda_mkdir(struct vop_mkdir_args *ap)
126938625Srvb{
1270176139Srwatson	/* true args */
1271176139Srwatson	struct vnode *dvp = ap->a_dvp;
1272176139Srwatson	struct cnode *dcp = VTOC(dvp);
1273176139Srwatson	struct componentname  *cnp = ap->a_cnp;
1274176139Srwatson	struct vattr *va = ap->a_vap;
1275176139Srwatson	struct vnode **vpp = ap->a_vpp;
1276176139Srwatson	struct ucred *cred = cnp->cn_cred;
1277176139Srwatson	struct thread *td = cnp->cn_thread;
1278176139Srwatson	/* locals */
1279176139Srwatson	int error;
1280176139Srwatson	const char *nm = cnp->cn_nameptr;
1281176139Srwatson	int len = cnp->cn_namelen;
1282176139Srwatson	struct cnode *cp;
1283176139Srwatson	CodaFid VFid;
1284176139Srwatson	struct vattr ova;
128538625Srvb
1286176139Srwatson	MARK_ENTRY(CODA_MKDIR_STATS);
128738625Srvb
1288176139Srwatson	/*
1289176139Srwatson	 * Check for mkdir of target object.
1290176139Srwatson	 */
1291176139Srwatson	if (IS_CTL_NAME(dvp, nm, len)) {
1292176139Srwatson		*vpp = (struct vnode *)0;
1293176139Srwatson		MARK_INT_FAIL(CODA_MKDIR_STATS);
1294176139Srwatson		return (EACCES);
129538625Srvb	}
1296176139Srwatson	if (len+1 > CODA_MAXNAMLEN) {
1297176139Srwatson		*vpp = (struct vnode *)0;
1298176139Srwatson		MARK_INT_FAIL(CODA_MKDIR_STATS);
1299176139Srwatson		return (EACCES);
1300176139Srwatson	}
1301176139Srwatson	error = venus_mkdir(vtomi(dvp), &dcp->c_fid, nm, len, va, cred,
1302176139Srwatson	    td->td_proc, &VFid, &ova);
1303176139Srwatson	if (!error) {
1304176139Srwatson		if (coda_find(&VFid) != NULL)
1305176139Srwatson			panic("cnode existed for newly created directory!");
1306176139Srwatson		cp =  make_coda_node(&VFid, dvp->v_mount, va->va_type);
1307176139Srwatson		*vpp = CTOV(cp);
130838625Srvb
1309176139Srwatson		/*
1310176139Srwatson		 * Enter the new vnode in the Name Cache.
1311176139Srwatson		 */
1312176233Srwatson		if (cnp->cn_flags & MAKEENTRY)
1313176233Srwatson			cache_enter(dvp, *vpp, cnp);
1314175474Srwatson
1315176139Srwatson		/*
1316176233Srwatson		 * Update the attr cache and mark as valid.
1317176139Srwatson		 */
1318176139Srwatson		if (coda_attr_cache) {
1319176139Srwatson			VTOC(*vpp)->c_vattr = ova;
1320176139Srwatson			VTOC(*vpp)->c_flags |= C_VATTR;
1321176139Srwatson		}
1322175474Srwatson
1323176139Srwatson		/*
1324176139Srwatson		 * Invalidate the parent's attr cache, the modification time
1325176139Srwatson		 * has changed.
1326176139Srwatson		 */
1327176139Srwatson		VTOC(dvp)->c_flags &= ~C_VATTR;
1328176139Srwatson		vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
1329176139Srwatson		CODADEBUG( CODA_MKDIR, myprintf(("mkdir: %s result %d\n",
1330176139Srwatson		    coda_f2s(&VFid), error)););
1331119832Stjr	} else {
1332176139Srwatson		*vpp = NULL;
1333176139Srwatson		CODADEBUG(CODA_MKDIR, myprintf(("mkdir error %d\n",error)););
1334176139Srwatson	}
1335176139Srwatson	return (error);
133638625Srvb}
133738625Srvb
133838625Srvbint
1339138290Sphkcoda_rmdir(struct vop_rmdir_args *ap)
134038625Srvb{
1341176139Srwatson	/* true args */
1342176233Srwatson	struct vnode *vp = ap->a_vp;
1343176139Srwatson	struct vnode *dvp = ap->a_dvp;
1344176139Srwatson	struct cnode *dcp = VTOC(dvp);
1345176139Srwatson	struct componentname  *cnp = ap->a_cnp;
1346176139Srwatson	struct ucred *cred = cnp->cn_cred;
1347176139Srwatson	struct thread *td = cnp->cn_thread;
1348176139Srwatson	/* true args */
1349176139Srwatson	int error;
1350176139Srwatson	const char *nm = cnp->cn_nameptr;
1351176139Srwatson	int len = cnp->cn_namelen;
1352176233Srwatson#if 0
1353176139Srwatson	struct cnode *cp;
1354176233Srwatson#endif
135538625Srvb
1356176139Srwatson	MARK_ENTRY(CODA_RMDIR_STATS);
135738625Srvb
1358176139Srwatson	/*
1359176139Srwatson	 * Check for rmdir of control object.
1360176139Srwatson	 */
1361176139Srwatson	if (IS_CTL_NAME(dvp, nm, len)) {
1362176139Srwatson		MARK_INT_FAIL(CODA_RMDIR_STATS);
1363176139Srwatson		return (ENOENT);
1364176139Srwatson	}
136538625Srvb
1366176139Srwatson	/*
1367176233Srwatson	 * Possibly somewhat conservative purging, perhaps we just need to
1368176233Srwatson	 * purge vp?
1369176139Srwatson	 */
1370176233Srwatson	cache_purge(dvp);
1371176233Srwatson	cache_purge(vp);
137238625Srvb
1373176139Srwatson	/*
1374176139Srwatson	 * Invalidate the parent's attr cache, the modification time has
1375176139Srwatson	 * changed.
1376176139Srwatson	 */
1377176139Srwatson	dcp->c_flags &= ~C_VATTR;
1378176139Srwatson	error = venus_rmdir(vtomi(dvp), &dcp->c_fid, nm, len, cred,
1379176139Srwatson	    td->td_proc);
1380176139Srwatson	CODADEBUG(CODA_RMDIR, myprintf(("in rmdir result %d\n", error)););
1381176139Srwatson	return (error);
138238625Srvb}
138338625Srvb
138438625Srvbint
1385138290Sphkcoda_symlink(struct vop_symlink_args *ap)
138638625Srvb{
1387176139Srwatson	/* true args */
1388176139Srwatson	struct vnode *tdvp = ap->a_dvp;
1389176139Srwatson	struct cnode *tdcp = VTOC(tdvp);
1390176139Srwatson	struct componentname *cnp = ap->a_cnp;
1391176139Srwatson	struct vattr *tva = ap->a_vap;
1392176139Srwatson	char *path = ap->a_target;
1393176139Srwatson	struct ucred *cred = cnp->cn_cred;
1394176139Srwatson	struct thread *td = cnp->cn_thread;
1395176139Srwatson	struct vnode **vpp = ap->a_vpp;
1396176139Srwatson	/* locals */
1397176139Srwatson	int error;
139838625Srvb
1399176139Srwatson	/*-
1400176139Srwatson	 * XXX I'm assuming the following things about coda_symlink's
1401176139Srwatson	 * arguments:
1402176139Srwatson	 *       t(foo) is the new name/parent/etc being created.
1403176139Srwatson	 *       lname is the contents of the new symlink.
1404176139Srwatson	 */
1405176139Srwatson	char *nm = cnp->cn_nameptr;
1406176139Srwatson	int len = cnp->cn_namelen;
1407176139Srwatson	int plen = strlen(path);
140838625Srvb
1409176139Srwatson	/*
1410176139Srwatson	 * Here's the strategy for the moment: perform the symlink, then do a
1411176139Srwatson	 * lookup to grab the resulting vnode.  I know this requires two
1412176139Srwatson	 * communications with Venus for a new sybolic link, but that's the
1413176139Srwatson	 * way the ball bounces.  I don't yet want to change the way the Mach
1414176139Srwatson	 * symlink works.  When Mach support is deprecated, we should change
1415176139Srwatson	 * symlink so that the common case returns the resultant vnode in a
1416176139Srwatson	 * vpp argument.
1417176139Srwatson	 */
1418176139Srwatson	MARK_ENTRY(CODA_SYMLINK_STATS);
141938625Srvb
1420176139Srwatson	/*
1421176139Srwatson	 * Check for symlink of control object.
1422176139Srwatson	 */
1423176139Srwatson	if (IS_CTL_NAME(tdvp, nm, len)) {
1424176139Srwatson		MARK_INT_FAIL(CODA_SYMLINK_STATS);
1425176139Srwatson		return (EACCES);
1426176139Srwatson	}
1427176139Srwatson	if (plen+1 > CODA_MAXPATHLEN) {
1428176139Srwatson		MARK_INT_FAIL(CODA_SYMLINK_STATS);
1429176139Srwatson		return (EINVAL);
1430176139Srwatson	}
1431176139Srwatson	if (len+1 > CODA_MAXNAMLEN) {
1432176139Srwatson		MARK_INT_FAIL(CODA_SYMLINK_STATS);
1433176139Srwatson		error = EINVAL;
1434176139Srwatson		goto exit;
1435176139Srwatson	}
1436176139Srwatson	error = venus_symlink(vtomi(tdvp), &tdcp->c_fid, path, plen, nm, len,
1437176139Srwatson	    tva, cred, td->td_proc);
143838625Srvb
1439176139Srwatson	/*
1440176139Srwatson	 * Invalidate the parent's attr cache, the modification time has
1441176139Srwatson	 * changed.
1442176139Srwatson	 */
1443176139Srwatson	tdcp->c_flags &= ~C_VATTR;
1444176139Srwatson	if (error == 0)
1445176139Srwatson		error = VOP_LOOKUP(tdvp, vpp, cnp);
1446176139Srwatsonexit:
1447176139Srwatson	CODADEBUG(CODA_SYMLINK, myprintf(("in symlink result %d\n",error)););
1448176139Srwatson	return (error);
144938625Srvb}
145038625Srvb
145138625Srvb/*
145238625Srvb * Read directory entries.
1453176131Srwatson *
1454176139Srwatson * XXX: This forwards the operator straight to the cache vnode using
1455176131Srwatson * VOP_READDIR(), rather than calling venus_readdir().  Why?
145638625Srvb */
145738625Srvbint
1458138290Sphkcoda_readdir(struct vop_readdir_args *ap)
145938625Srvb{
1460176139Srwatson	/* true args */
1461176139Srwatson	struct vnode *vp = ap->a_vp;
1462176139Srwatson	struct cnode *cp = VTOC(vp);
1463176139Srwatson	struct uio *uiop = ap->a_uio;
1464176139Srwatson	struct ucred *cred = ap->a_cred;
1465176139Srwatson	int *eofflag = ap->a_eofflag;
1466176139Srwatson	u_long **cookies = ap->a_cookies;
1467176139Srwatson	int *ncookies = ap->a_ncookies;
1468176139Srwatson	struct thread *td = ap->a_uio->uio_td;
1469176139Srwatson	/* upcall decl */
1470176139Srwatson	/* locals */
1471176139Srwatson	int error = 0;
1472176139Srwatson	int opened_internally = 0;
147338625Srvb
1474176139Srwatson	MARK_ENTRY(CODA_READDIR_STATS);
1475176139Srwatson	CODADEBUG(CODA_READDIR, myprintf(("coda_readdir(%p, %d, %lld, %d)\n",
1476176139Srwatson	    (void *)uiop->uio_iov->iov_base, uiop->uio_resid,
1477176139Srwatson	    (long long)uiop->uio_offset, uiop->uio_segflg)););
147838625Srvb
1479176139Srwatson	/*
1480176139Srwatson	 * Check for readdir of control object.
1481176139Srwatson	 */
1482176139Srwatson	if (IS_CTL_VP(vp)) {
1483176139Srwatson		MARK_INT_FAIL(CODA_READDIR_STATS);
1484176139Srwatson		return (ENOENT);
1485176139Srwatson	}
148638625Srvb
1487176139Srwatson	/*
1488176139Srwatson	 * If directory is not already open do an "internal open" on it.
1489176139Srwatson	 *
1490176139Srwatson	 * XXX: Why would this happen?  For files, there's memory mapping,
1491176139Srwatson	 * execution, and other kernel access paths such as ktrace.  For
1492176139Srwatson	 * directories, it is less clear.
1493176139Srwatson	 */
149438625Srvb	if (cp->c_ovp == NULL) {
1495176139Srwatson		opened_internally = 1;
1496176139Srwatson		MARK_INT_GEN(CODA_OPEN_STATS);
1497176139Srwatson		error = VOP_OPEN(vp, FREAD, cred, td, NULL);
1498176139Srwatson		printf("coda_readdir: Internally Opening %p\n", vp);
1499176139Srwatson		if (error) {
1500176139Srwatson			printf("coda_readdir: VOP_OPEN on container failed "
1501176139Srwatson			   "%d\n", error);
1502176139Srwatson			return (error);
1503176139Srwatson		}
150438625Srvb	}
1505176139Srwatson
1506176139Srwatson	/*
1507176139Srwatson	 * Have UFS handle the call.
1508176139Srwatson	 */
1509176139Srwatson	CODADEBUG(CODA_READDIR, myprintf(("indirect readdir: fid = %s, "
1510176139Srwatson	    "refcnt = %d\n", coda_f2s(&cp->c_fid), vp->v_usecount)););
1511176121Srwatson	vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
151238625Srvb	error = VOP_READDIR(cp->c_ovp, uiop, cred, eofflag, ncookies,
1513176139Srwatson	    cookies);
1514176118Srwatson	VOP_UNLOCK(cp->c_ovp, 0);
151538625Srvb	if (error)
1516176139Srwatson		MARK_INT_FAIL(CODA_READDIR_STATS);
151738625Srvb	else
1518176139Srwatson		MARK_INT_SAT(CODA_READDIR_STATS);
1519176139Srwatson
1520176139Srwatson	/*
1521176139Srwatson	 * Do an "internal close" if necessary.
1522176139Srwatson	 */
152338625Srvb	if (opened_internally) {
1524176139Srwatson		MARK_INT_GEN(CODA_CLOSE_STATS);
1525176139Srwatson		(void)VOP_CLOSE(vp, FREAD, cred, td);
152638625Srvb	}
1527176139Srwatson	return (error);
152838625Srvb}
152938625Srvb
153038625Srvbint
1531138290Sphkcoda_reclaim(struct vop_reclaim_args *ap)
153238625Srvb{
1533176139Srwatson	/* true args */
1534176139Srwatson	struct vnode *vp = ap->a_vp;
1535176139Srwatson	struct cnode *cp = VTOC(vp);
1536176139Srwatson	/* upcall decl */
1537176139Srwatson	/* locals */
153838625Srvb
1539176139Srwatson	/*
1540176139Srwatson	 * Forced unmount/flush will let vnodes with non-zero use be
1541176139Srwatson	 * destroyed!
1542176139Srwatson	 */
1543176139Srwatson	ENTRY;
154438625Srvb
1545176139Srwatson	if (IS_UNMOUNTING(cp)) {
154638625Srvb#ifdef	DEBUG
1547176139Srwatson		if (VTOC(vp)->c_ovp) {
1548176139Srwatson			if (IS_UNMOUNTING(cp))
1549176139Srwatson				printf("coda_reclaim: c_ovp not void: vp "
1550176139Srwatson				    "%p, cp %p\n", vp, cp);
1551176139Srwatson		}
155238625Srvb#endif
1553176139Srwatson	} else {
1554176263Srwatson		if (prtactive && vp->v_usecount != 0)
1555176263Srwatson			vprint("coda_reclaim: pushing active", vp);
1556176139Srwatson	}
1557176139Srwatson	cache_purge(vp);
1558176139Srwatson	coda_free(VTOC(vp));
1559176139Srwatson	vp->v_data = NULL;
1560176139Srwatson	vp->v_object = NULL;
1561176139Srwatson	return (0);
156238625Srvb}
156338625Srvb
156438625Srvbint
1565169671Skibcoda_lock(struct vop_lock1_args *ap)
156638625Srvb{
1567176139Srwatson	/* true args */
1568176139Srwatson	struct vnode *vp = ap->a_vp;
1569176139Srwatson	struct cnode *cp = VTOC(vp);
1570176139Srwatson	/* upcall decl */
1571176139Srwatson	/* locals */
157238625Srvb
1573176139Srwatson	ENTRY;
1574176139Srwatson	if ((ap->a_flags & LK_INTERLOCK) == 0) {
1575176139Srwatson		VI_LOCK(vp);
1576176139Srwatson		ap->a_flags |= LK_INTERLOCK;
1577176139Srwatson	}
1578176139Srwatson	if (coda_lockdebug)
1579176139Srwatson		myprintf(("Attempting lock on %s\n", coda_f2s(&cp->c_fid)));
1580176139Srwatson	return (vop_stdlock(ap));
158138625Srvb}
158238625Srvb
158338625Srvbint
1584138290Sphkcoda_unlock(struct vop_unlock_args *ap)
158538625Srvb{
1586176139Srwatson	/* true args */
1587176139Srwatson	struct vnode *vp = ap->a_vp;
1588176139Srwatson	struct cnode *cp = VTOC(vp);
1589176139Srwatson	/* upcall decl */
1590176139Srwatson	/* locals */
159138625Srvb
1592176139Srwatson	ENTRY;
1593176139Srwatson	if (coda_lockdebug)
1594176139Srwatson		myprintf(("Attempting unlock on %s\n",
1595176139Srwatson		    coda_f2s(&cp->c_fid)));
1596176139Srwatson	return (vop_stdunlock(ap));
159738625Srvb}
159838625Srvb
159938625Srvbint
1600138290Sphkcoda_islocked(struct vop_islocked_args *ap)
160138625Srvb{
1602176139Srwatson	/* true args */
160338625Srvb
1604176139Srwatson	ENTRY;
1605176139Srwatson	return (vop_stdislocked(ap));
160638625Srvb}
160738625Srvb
1608176131Srwatsonstatic void
1609176131Srwatsoncoda_print_vattr(struct vattr *attr)
161038625Srvb{
1611176139Srwatson	char *typestr;
161238625Srvb
1613176139Srwatson	switch (attr->va_type) {
1614176139Srwatson	case VNON:
1615176139Srwatson		typestr = "VNON";
1616176139Srwatson		break;
161738625Srvb
1618176139Srwatson	case VREG:
1619176139Srwatson		typestr = "VREG";
1620176139Srwatson		break;
162138625Srvb
1622176139Srwatson	case VDIR:
1623176139Srwatson		typestr = "VDIR";
1624176139Srwatson		break;
162538625Srvb
1626176139Srwatson	case VBLK:
1627176139Srwatson		typestr = "VBLK";
1628176139Srwatson		break;
1629176139Srwatson
1630176139Srwatson	case VCHR:
1631176139Srwatson		typestr = "VCHR";
1632176139Srwatson		break;
1633176139Srwatson
1634176139Srwatson	case VLNK:
1635176139Srwatson		typestr = "VLNK";
1636176139Srwatson		break;
1637176139Srwatson
1638176139Srwatson	case VSOCK:
1639176139Srwatson		typestr = "VSCK";
1640176139Srwatson		break;
1641176139Srwatson
1642176139Srwatson	case VFIFO:
1643176139Srwatson		typestr = "VFFO";
1644176139Srwatson		break;
1645176139Srwatson
1646176139Srwatson	case VBAD:
1647176139Srwatson		typestr = "VBAD";
1648176139Srwatson		break;
1649176139Srwatson
1650176139Srwatson	default:
1651176139Srwatson		typestr = "????";
1652176139Srwatson		break;
1653176139Srwatson	}
1654176139Srwatson	myprintf(("attr: type %s mode %d uid %d gid %d fsid %d rdev %d\n",
1655176139Srwatson	    typestr, (int)attr->va_mode, (int)attr->va_uid,
1656176139Srwatson	    (int)attr->va_gid, (int)attr->va_fsid, (int)attr->va_rdev));
1657176139Srwatson	myprintf(("      fileid %d nlink %d size %d blocksize %d bytes %d\n",
1658176139Srwatson	    (int)attr->va_fileid, (int)attr->va_nlink, (int)attr->va_size,
1659176139Srwatson	    (int)attr->va_blocksize,(int)attr->va_bytes));
1660176139Srwatson	myprintf(("      gen %ld flags %ld vaflags %d\n", attr->va_gen,
1661176139Srwatson	    attr->va_flags, attr->va_vaflags));
1662176139Srwatson	myprintf(("      atime sec %d nsec %d\n", (int)attr->va_atime.tv_sec,
1663176139Srwatson	    (int)attr->va_atime.tv_nsec));
1664176139Srwatson	myprintf(("      mtime sec %d nsec %d\n", (int)attr->va_mtime.tv_sec,
1665176139Srwatson	    (int)attr->va_mtime.tv_nsec));
1666176139Srwatson	myprintf(("      ctime sec %d nsec %d\n", (int)attr->va_ctime.tv_sec,
1667176139Srwatson	    (int)attr->va_ctime.tv_nsec));
166838625Srvb}
166938625Srvb
1670176139Srwatson/*
1671176139Srwatson * How to print a ucred.
1672176139Srwatson */
167338625Srvbvoid
1674176131Srwatsoncoda_print_cred(struct ucred *cred)
167538625Srvb{
167638625Srvb	int i;
167738625Srvb
167838625Srvb	myprintf(("ref %d\tuid %d\n",cred->cr_ref,cred->cr_uid));
167938625Srvb	for (i=0; i < cred->cr_ngroups; i++)
168038625Srvb		myprintf(("\tgroup %d: (%d)\n",i,cred->cr_groups[i]));
168138625Srvb	myprintf(("\n"));
168238625Srvb}
168338625Srvb
168438625Srvb/*
1685176139Srwatson * Return a vnode for the given fid.  If no cnode exists for this fid create
1686176139Srwatson * one and put it in a table hashed by coda_f2i().  If the cnode for this fid
1687176139Srwatson * is already in the table return it (ref count is incremented by coda_find.
1688176139Srwatson * The cnode will be flushed from the table when coda_inactive calls
1689176139Srwatson * coda_unsave.
169038625Srvb */
169138625Srvbstruct cnode *
1692154647Srwatsonmake_coda_node(CodaFid *fid, struct mount *vfsp, short type)
169338625Srvb{
1694176139Srwatson	struct cnode *cp;
1695176307Srwatson	struct vnode *vp;
1696176139Srwatson	int err;
169738625Srvb
1698176307Srwatson	/*
1699176307Srwatson	 * XXXRW: This really needs a moderate amount of reworking.  We need
1700176307Srwatson	 * to properly tolerate failures of getnewvnode() and insmntque(),
1701176307Srwatson	 * and callers need to be able to accept an error back from
1702176307Srwatson	 * make_coda_node.  There may also be more general issues in how we
1703176307Srwatson	 * handle forced unmount.  Finally, if/when Coda loses its dependency
1704176307Srwatson	 * on Giant, the ordering of this needs rethinking.
1705176307Srwatson	 */
1706176307Srwatson	cp = coda_find(fid);
1707176307Srwatson	if (cp != NULL) {
1708176139Srwatson		vref(CTOV(cp));
1709176307Srwatson		return (cp);
1710176307Srwatson	}
1711176307Srwatson	cp = coda_alloc();
1712176307Srwatson	cp->c_fid = *fid;
1713176307Srwatson	err = getnewvnode("coda", vfsp, &coda_vnodeops, &vp);
1714176307Srwatson	if (err)
1715176307Srwatson		panic("coda: getnewvnode returned error %d\n", err);
1716176307Srwatson	vp->v_data = cp;
1717176307Srwatson	vp->v_type = type;
1718176307Srwatson	cp->c_vnode = vp;
1719176307Srwatson	coda_save(cp);
1720176307Srwatson	err = insmntque(vp, vfsp);
1721176307Srwatson	if (err != 0)
1722176307Srwatson		printf("coda: insmntque failed: error %d", err);
1723176139Srwatson	return (cp);
172438625Srvb}
1725111903Stjr
1726111903Stjrint
1727154647Srwatsoncoda_pathconf(struct vop_pathconf_args *ap)
1728111903Stjr{
1729111903Stjr
1730111903Stjr	switch (ap->a_name) {
1731111903Stjr	case _PC_NAME_MAX:
1732176156Srwatson		*ap->a_retval = CODA_MAXNAMLEN;
1733176156Srwatson		return (0);
1734176156Srwatson
1735111903Stjr	case _PC_PATH_MAX:
1736176156Srwatson		*ap->a_retval = CODA_MAXPATHLEN;
1737176156Srwatson		return (0);
1738176156Srwatson
1739111903Stjr	default:
1740176156Srwatson		return (vop_stdpathconf(ap));
1741111903Stjr	}
1742111903Stjr}
1743