Deleted Added
full compact
nfs_vnops.c (83651) nfs_vnops.c (84002)
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
37 */
38
39#include <sys/cdefs.h>
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
37 */
38
39#include <sys/cdefs.h>
40__FBSDID("$FreeBSD: head/sys/nfsclient/nfs_vnops.c 83651 2001-09-18 23:32:09Z peter $");
40__FBSDID("$FreeBSD: head/sys/nfsclient/nfs_vnops.c 84002 2001-09-27 02:33:36Z peter $");
41
42/*
43 * vnode op calls for Sun NFS version 2 and 3
44 */
45
46#include "opt_inet.h"
47
48#include <sys/param.h>
49#include <sys/kernel.h>
50#include <sys/systm.h>
51#include <sys/resourcevar.h>
52#include <sys/proc.h>
53#include <sys/mount.h>
54#include <sys/bio.h>
55#include <sys/buf.h>
56#include <sys/malloc.h>
57#include <sys/mbuf.h>
58#include <sys/namei.h>
59#include <sys/socket.h>
60#include <sys/vnode.h>
61#include <sys/dirent.h>
62#include <sys/fcntl.h>
63#include <sys/lockf.h>
64#include <sys/stat.h>
65#include <sys/sysctl.h>
66
67#include <vm/vm.h>
68#include <vm/vm_extern.h>
69
70#include <fs/fifofs/fifo.h>
71
72#include <nfs/rpcv2.h>
73#include <nfs/nfsproto.h>
74#include <nfsclient/nfs.h>
75#include <nfsclient/nfsnode.h>
76#include <nfsclient/nfsmount.h>
77#include <nfsclient/nfs_lock.h>
78#include <nfs/xdr_subs.h>
79#include <nfsclient/nfsm_subs.h>
80
81#include <net/if.h>
82#include <netinet/in.h>
83#include <netinet/in_var.h>
84
85/* Defs */
86#define TRUE 1
87#define FALSE 0
88
89/*
90 * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these
91 * calls are not in getblk() and brelse() so that they would not be necessary
92 * here.
93 */
94#ifndef B_VMIO
95#define vfs_busy_pages(bp, f)
96#endif
97
98static int nfsspec_read(struct vop_read_args *);
99static int nfsspec_write(struct vop_write_args *);
100static int nfsfifo_read(struct vop_read_args *);
101static int nfsfifo_write(struct vop_write_args *);
102static int nfsspec_close(struct vop_close_args *);
103static int nfsfifo_close(struct vop_close_args *);
104static int nfs_flush(struct vnode *, struct ucred *, int, struct thread *,
105 int);
106static int nfs_setattrrpc(struct vnode *, struct vattr *, struct ucred *,
107 struct thread *);
108static int nfs_lookup(struct vop_lookup_args *);
109static int nfs_create(struct vop_create_args *);
110static int nfs_mknod(struct vop_mknod_args *);
111static int nfs_open(struct vop_open_args *);
112static int nfs_close(struct vop_close_args *);
113static int nfs_access(struct vop_access_args *);
114static int nfs_getattr(struct vop_getattr_args *);
115static int nfs_setattr(struct vop_setattr_args *);
116static int nfs_read(struct vop_read_args *);
117static int nfs_fsync(struct vop_fsync_args *);
118static int nfs_remove(struct vop_remove_args *);
119static int nfs_link(struct vop_link_args *);
120static int nfs_rename(struct vop_rename_args *);
121static int nfs_mkdir(struct vop_mkdir_args *);
122static int nfs_rmdir(struct vop_rmdir_args *);
123static int nfs_symlink(struct vop_symlink_args *);
124static int nfs_readdir(struct vop_readdir_args *);
125static int nfs_strategy(struct vop_strategy_args *);
126static int nfs_lookitup(struct vnode *, const char *, int,
127 struct ucred *, struct thread *, struct nfsnode **);
128static int nfs_sillyrename(struct vnode *, struct vnode *,
129 struct componentname *);
130static int nfsspec_access(struct vop_access_args *);
131static int nfs_readlink(struct vop_readlink_args *);
132static int nfs_print(struct vop_print_args *);
133static int nfs_advlock(struct vop_advlock_args *);
134
135/*
136 * Global vfs data structures for nfs
137 */
138vop_t **nfsv2_vnodeop_p;
139static struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
140 { &vop_default_desc, (vop_t *) vop_defaultop },
141 { &vop_access_desc, (vop_t *) nfs_access },
142 { &vop_advlock_desc, (vop_t *) nfs_advlock },
143 { &vop_close_desc, (vop_t *) nfs_close },
144 { &vop_create_desc, (vop_t *) nfs_create },
145 { &vop_fsync_desc, (vop_t *) nfs_fsync },
146 { &vop_getattr_desc, (vop_t *) nfs_getattr },
147 { &vop_getpages_desc, (vop_t *) nfs_getpages },
148 { &vop_putpages_desc, (vop_t *) nfs_putpages },
149 { &vop_inactive_desc, (vop_t *) nfs_inactive },
150 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
151 { &vop_lease_desc, (vop_t *) vop_null },
152 { &vop_link_desc, (vop_t *) nfs_link },
153 { &vop_lock_desc, (vop_t *) vop_sharedlock },
154 { &vop_lookup_desc, (vop_t *) nfs_lookup },
155 { &vop_mkdir_desc, (vop_t *) nfs_mkdir },
156 { &vop_mknod_desc, (vop_t *) nfs_mknod },
157 { &vop_open_desc, (vop_t *) nfs_open },
158 { &vop_print_desc, (vop_t *) nfs_print },
159 { &vop_read_desc, (vop_t *) nfs_read },
160 { &vop_readdir_desc, (vop_t *) nfs_readdir },
161 { &vop_readlink_desc, (vop_t *) nfs_readlink },
162 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
163 { &vop_remove_desc, (vop_t *) nfs_remove },
164 { &vop_rename_desc, (vop_t *) nfs_rename },
165 { &vop_rmdir_desc, (vop_t *) nfs_rmdir },
166 { &vop_setattr_desc, (vop_t *) nfs_setattr },
167 { &vop_strategy_desc, (vop_t *) nfs_strategy },
168 { &vop_symlink_desc, (vop_t *) nfs_symlink },
169 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
170 { &vop_write_desc, (vop_t *) nfs_write },
171 { NULL, NULL }
172};
173static struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
174 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
175VNODEOP_SET(nfsv2_vnodeop_opv_desc);
176
177/*
178 * Special device vnode ops
179 */
180vop_t **spec_nfsv2nodeop_p;
181static struct vnodeopv_entry_desc nfsv2_specop_entries[] = {
182 { &vop_default_desc, (vop_t *) spec_vnoperate },
183 { &vop_access_desc, (vop_t *) nfsspec_access },
184 { &vop_close_desc, (vop_t *) nfsspec_close },
185 { &vop_fsync_desc, (vop_t *) nfs_fsync },
186 { &vop_getattr_desc, (vop_t *) nfs_getattr },
187 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
188 { &vop_inactive_desc, (vop_t *) nfs_inactive },
189 { &vop_lock_desc, (vop_t *) vop_sharedlock },
190 { &vop_print_desc, (vop_t *) nfs_print },
191 { &vop_read_desc, (vop_t *) nfsspec_read },
192 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
193 { &vop_setattr_desc, (vop_t *) nfs_setattr },
194 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
195 { &vop_write_desc, (vop_t *) nfsspec_write },
196 { NULL, NULL }
197};
198static struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
199 { &spec_nfsv2nodeop_p, nfsv2_specop_entries };
200VNODEOP_SET(spec_nfsv2nodeop_opv_desc);
201
202vop_t **fifo_nfsv2nodeop_p;
203static struct vnodeopv_entry_desc nfsv2_fifoop_entries[] = {
204 { &vop_default_desc, (vop_t *) fifo_vnoperate },
205 { &vop_access_desc, (vop_t *) nfsspec_access },
206 { &vop_close_desc, (vop_t *) nfsfifo_close },
207 { &vop_fsync_desc, (vop_t *) nfs_fsync },
208 { &vop_getattr_desc, (vop_t *) nfs_getattr },
209 { &vop_inactive_desc, (vop_t *) nfs_inactive },
210 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
211 { &vop_lock_desc, (vop_t *) vop_sharedlock },
212 { &vop_print_desc, (vop_t *) nfs_print },
213 { &vop_read_desc, (vop_t *) nfsfifo_read },
214 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
215 { &vop_setattr_desc, (vop_t *) nfs_setattr },
216 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
217 { &vop_write_desc, (vop_t *) nfsfifo_write },
218 { NULL, NULL }
219};
220static struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
221 { &fifo_nfsv2nodeop_p, nfsv2_fifoop_entries };
222VNODEOP_SET(fifo_nfsv2nodeop_opv_desc);
223
224static int nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp,
225 struct componentname *cnp, struct vattr *vap);
226static int nfs_removerpc(struct vnode *dvp, const char *name, int namelen,
227 struct ucred *cred, struct thread *td);
228static int nfs_renamerpc(struct vnode *fdvp, const char *fnameptr,
229 int fnamelen, struct vnode *tdvp,
230 const char *tnameptr, int tnamelen,
231 struct ucred *cred, struct thread *td);
232static int nfs_renameit(struct vnode *sdvp, struct componentname *scnp,
233 struct sillyrename *sp);
234
235/*
236 * Global variables
237 */
238struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON];
239struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON];
240int nfs_numasync = 0;
241#define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1))
242
243SYSCTL_DECL(_vfs_nfs);
244
245static int nfsaccess_cache_timeout = NFS_MAXATTRTIMO;
246SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
247 &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
248
249static int nfsv3_commit_on_close = 0;
250SYSCTL_INT(_vfs_nfs, OID_AUTO, nfsv3_commit_on_close, CTLFLAG_RW,
251 &nfsv3_commit_on_close, 0, "write+commit on close, else only write");
252#if 0
253SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
254 &nfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count");
255
256SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_misses, CTLFLAG_RD,
257 &nfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count");
258#endif
259
260#define NFSV3ACCESS_ALL (NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY \
261 | NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE \
262 | NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP)
263static int
264nfs3_access_otw(struct vnode *vp, int wmode, struct thread *td,
265 struct ucred *cred)
266{
267 const int v3 = 1;
268 u_int32_t *tl;
269 int error = 0, attrflag;
270
271 struct mbuf *mreq, *mrep, *md, *mb;
272 caddr_t bpos, dpos;
273 u_int32_t rmode;
274 struct nfsnode *np = VTONFS(vp);
275
276 nfsstats.rpccnt[NFSPROC_ACCESS]++;
277 mreq = nfsm_reqhead(vp, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
278 mb = mreq;
279 bpos = mtod(mb, caddr_t);
280 nfsm_fhtom(vp, v3);
41
42/*
43 * vnode op calls for Sun NFS version 2 and 3
44 */
45
46#include "opt_inet.h"
47
48#include <sys/param.h>
49#include <sys/kernel.h>
50#include <sys/systm.h>
51#include <sys/resourcevar.h>
52#include <sys/proc.h>
53#include <sys/mount.h>
54#include <sys/bio.h>
55#include <sys/buf.h>
56#include <sys/malloc.h>
57#include <sys/mbuf.h>
58#include <sys/namei.h>
59#include <sys/socket.h>
60#include <sys/vnode.h>
61#include <sys/dirent.h>
62#include <sys/fcntl.h>
63#include <sys/lockf.h>
64#include <sys/stat.h>
65#include <sys/sysctl.h>
66
67#include <vm/vm.h>
68#include <vm/vm_extern.h>
69
70#include <fs/fifofs/fifo.h>
71
72#include <nfs/rpcv2.h>
73#include <nfs/nfsproto.h>
74#include <nfsclient/nfs.h>
75#include <nfsclient/nfsnode.h>
76#include <nfsclient/nfsmount.h>
77#include <nfsclient/nfs_lock.h>
78#include <nfs/xdr_subs.h>
79#include <nfsclient/nfsm_subs.h>
80
81#include <net/if.h>
82#include <netinet/in.h>
83#include <netinet/in_var.h>
84
85/* Defs */
86#define TRUE 1
87#define FALSE 0
88
89/*
90 * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these
91 * calls are not in getblk() and brelse() so that they would not be necessary
92 * here.
93 */
94#ifndef B_VMIO
95#define vfs_busy_pages(bp, f)
96#endif
97
98static int nfsspec_read(struct vop_read_args *);
99static int nfsspec_write(struct vop_write_args *);
100static int nfsfifo_read(struct vop_read_args *);
101static int nfsfifo_write(struct vop_write_args *);
102static int nfsspec_close(struct vop_close_args *);
103static int nfsfifo_close(struct vop_close_args *);
104static int nfs_flush(struct vnode *, struct ucred *, int, struct thread *,
105 int);
106static int nfs_setattrrpc(struct vnode *, struct vattr *, struct ucred *,
107 struct thread *);
108static int nfs_lookup(struct vop_lookup_args *);
109static int nfs_create(struct vop_create_args *);
110static int nfs_mknod(struct vop_mknod_args *);
111static int nfs_open(struct vop_open_args *);
112static int nfs_close(struct vop_close_args *);
113static int nfs_access(struct vop_access_args *);
114static int nfs_getattr(struct vop_getattr_args *);
115static int nfs_setattr(struct vop_setattr_args *);
116static int nfs_read(struct vop_read_args *);
117static int nfs_fsync(struct vop_fsync_args *);
118static int nfs_remove(struct vop_remove_args *);
119static int nfs_link(struct vop_link_args *);
120static int nfs_rename(struct vop_rename_args *);
121static int nfs_mkdir(struct vop_mkdir_args *);
122static int nfs_rmdir(struct vop_rmdir_args *);
123static int nfs_symlink(struct vop_symlink_args *);
124static int nfs_readdir(struct vop_readdir_args *);
125static int nfs_strategy(struct vop_strategy_args *);
126static int nfs_lookitup(struct vnode *, const char *, int,
127 struct ucred *, struct thread *, struct nfsnode **);
128static int nfs_sillyrename(struct vnode *, struct vnode *,
129 struct componentname *);
130static int nfsspec_access(struct vop_access_args *);
131static int nfs_readlink(struct vop_readlink_args *);
132static int nfs_print(struct vop_print_args *);
133static int nfs_advlock(struct vop_advlock_args *);
134
135/*
136 * Global vfs data structures for nfs
137 */
138vop_t **nfsv2_vnodeop_p;
139static struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
140 { &vop_default_desc, (vop_t *) vop_defaultop },
141 { &vop_access_desc, (vop_t *) nfs_access },
142 { &vop_advlock_desc, (vop_t *) nfs_advlock },
143 { &vop_close_desc, (vop_t *) nfs_close },
144 { &vop_create_desc, (vop_t *) nfs_create },
145 { &vop_fsync_desc, (vop_t *) nfs_fsync },
146 { &vop_getattr_desc, (vop_t *) nfs_getattr },
147 { &vop_getpages_desc, (vop_t *) nfs_getpages },
148 { &vop_putpages_desc, (vop_t *) nfs_putpages },
149 { &vop_inactive_desc, (vop_t *) nfs_inactive },
150 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
151 { &vop_lease_desc, (vop_t *) vop_null },
152 { &vop_link_desc, (vop_t *) nfs_link },
153 { &vop_lock_desc, (vop_t *) vop_sharedlock },
154 { &vop_lookup_desc, (vop_t *) nfs_lookup },
155 { &vop_mkdir_desc, (vop_t *) nfs_mkdir },
156 { &vop_mknod_desc, (vop_t *) nfs_mknod },
157 { &vop_open_desc, (vop_t *) nfs_open },
158 { &vop_print_desc, (vop_t *) nfs_print },
159 { &vop_read_desc, (vop_t *) nfs_read },
160 { &vop_readdir_desc, (vop_t *) nfs_readdir },
161 { &vop_readlink_desc, (vop_t *) nfs_readlink },
162 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
163 { &vop_remove_desc, (vop_t *) nfs_remove },
164 { &vop_rename_desc, (vop_t *) nfs_rename },
165 { &vop_rmdir_desc, (vop_t *) nfs_rmdir },
166 { &vop_setattr_desc, (vop_t *) nfs_setattr },
167 { &vop_strategy_desc, (vop_t *) nfs_strategy },
168 { &vop_symlink_desc, (vop_t *) nfs_symlink },
169 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
170 { &vop_write_desc, (vop_t *) nfs_write },
171 { NULL, NULL }
172};
173static struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
174 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
175VNODEOP_SET(nfsv2_vnodeop_opv_desc);
176
177/*
178 * Special device vnode ops
179 */
180vop_t **spec_nfsv2nodeop_p;
181static struct vnodeopv_entry_desc nfsv2_specop_entries[] = {
182 { &vop_default_desc, (vop_t *) spec_vnoperate },
183 { &vop_access_desc, (vop_t *) nfsspec_access },
184 { &vop_close_desc, (vop_t *) nfsspec_close },
185 { &vop_fsync_desc, (vop_t *) nfs_fsync },
186 { &vop_getattr_desc, (vop_t *) nfs_getattr },
187 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
188 { &vop_inactive_desc, (vop_t *) nfs_inactive },
189 { &vop_lock_desc, (vop_t *) vop_sharedlock },
190 { &vop_print_desc, (vop_t *) nfs_print },
191 { &vop_read_desc, (vop_t *) nfsspec_read },
192 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
193 { &vop_setattr_desc, (vop_t *) nfs_setattr },
194 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
195 { &vop_write_desc, (vop_t *) nfsspec_write },
196 { NULL, NULL }
197};
198static struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
199 { &spec_nfsv2nodeop_p, nfsv2_specop_entries };
200VNODEOP_SET(spec_nfsv2nodeop_opv_desc);
201
202vop_t **fifo_nfsv2nodeop_p;
203static struct vnodeopv_entry_desc nfsv2_fifoop_entries[] = {
204 { &vop_default_desc, (vop_t *) fifo_vnoperate },
205 { &vop_access_desc, (vop_t *) nfsspec_access },
206 { &vop_close_desc, (vop_t *) nfsfifo_close },
207 { &vop_fsync_desc, (vop_t *) nfs_fsync },
208 { &vop_getattr_desc, (vop_t *) nfs_getattr },
209 { &vop_inactive_desc, (vop_t *) nfs_inactive },
210 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
211 { &vop_lock_desc, (vop_t *) vop_sharedlock },
212 { &vop_print_desc, (vop_t *) nfs_print },
213 { &vop_read_desc, (vop_t *) nfsfifo_read },
214 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
215 { &vop_setattr_desc, (vop_t *) nfs_setattr },
216 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
217 { &vop_write_desc, (vop_t *) nfsfifo_write },
218 { NULL, NULL }
219};
220static struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
221 { &fifo_nfsv2nodeop_p, nfsv2_fifoop_entries };
222VNODEOP_SET(fifo_nfsv2nodeop_opv_desc);
223
224static int nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp,
225 struct componentname *cnp, struct vattr *vap);
226static int nfs_removerpc(struct vnode *dvp, const char *name, int namelen,
227 struct ucred *cred, struct thread *td);
228static int nfs_renamerpc(struct vnode *fdvp, const char *fnameptr,
229 int fnamelen, struct vnode *tdvp,
230 const char *tnameptr, int tnamelen,
231 struct ucred *cred, struct thread *td);
232static int nfs_renameit(struct vnode *sdvp, struct componentname *scnp,
233 struct sillyrename *sp);
234
235/*
236 * Global variables
237 */
238struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON];
239struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON];
240int nfs_numasync = 0;
241#define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1))
242
243SYSCTL_DECL(_vfs_nfs);
244
245static int nfsaccess_cache_timeout = NFS_MAXATTRTIMO;
246SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
247 &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
248
249static int nfsv3_commit_on_close = 0;
250SYSCTL_INT(_vfs_nfs, OID_AUTO, nfsv3_commit_on_close, CTLFLAG_RW,
251 &nfsv3_commit_on_close, 0, "write+commit on close, else only write");
252#if 0
253SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
254 &nfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count");
255
256SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_misses, CTLFLAG_RD,
257 &nfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count");
258#endif
259
260#define NFSV3ACCESS_ALL (NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY \
261 | NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE \
262 | NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP)
263static int
264nfs3_access_otw(struct vnode *vp, int wmode, struct thread *td,
265 struct ucred *cred)
266{
267 const int v3 = 1;
268 u_int32_t *tl;
269 int error = 0, attrflag;
270
271 struct mbuf *mreq, *mrep, *md, *mb;
272 caddr_t bpos, dpos;
273 u_int32_t rmode;
274 struct nfsnode *np = VTONFS(vp);
275
276 nfsstats.rpccnt[NFSPROC_ACCESS]++;
277 mreq = nfsm_reqhead(vp, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
278 mb = mreq;
279 bpos = mtod(mb, caddr_t);
280 nfsm_fhtom(vp, v3);
281 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
281 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED);
282 *tl = txdr_unsigned(wmode);
283 nfsm_request(vp, NFSPROC_ACCESS, td, cred);
284 nfsm_postop_attr(vp, attrflag);
285 if (!error) {
286 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
287 rmode = fxdr_unsigned(u_int32_t, *tl);
288 np->n_mode = rmode;
289 np->n_modeuid = cred->cr_uid;
290 np->n_modestamp = time_second;
291 }
292 m_freem(mrep);
293nfsmout:
294 return error;
295}
296
297/*
298 * nfs access vnode op.
299 * For nfs version 2, just return ok. File accesses may fail later.
300 * For nfs version 3, use the access rpc to check accessibility. If file modes
301 * are changed on the server, accesses might still fail later.
302 */
303static int
304nfs_access(struct vop_access_args *ap)
305{
306 struct vnode *vp = ap->a_vp;
307 int error = 0;
308 u_int32_t mode, wmode;
309 int v3 = NFS_ISV3(vp);
310 struct nfsnode *np = VTONFS(vp);
311
312 /*
313 * Disallow write attempts on filesystems mounted read-only;
314 * unless the file is a socket, fifo, or a block or character
315 * device resident on the filesystem.
316 */
317 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
318 switch (vp->v_type) {
319 case VREG:
320 case VDIR:
321 case VLNK:
322 return (EROFS);
323 default:
324 break;
325 }
326 }
327 /*
328 * For nfs v3, check to see if we have done this recently, and if
329 * so return our cached result instead of making an ACCESS call.
330 * If not, do an access rpc, otherwise you are stuck emulating
331 * ufs_access() locally using the vattr. This may not be correct,
332 * since the server may apply other access criteria such as
333 * client uid-->server uid mapping that we do not know about.
334 */
335 if (v3) {
336 if (ap->a_mode & VREAD)
337 mode = NFSV3ACCESS_READ;
338 else
339 mode = 0;
340 if (vp->v_type != VDIR) {
341 if (ap->a_mode & VWRITE)
342 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
343 if (ap->a_mode & VEXEC)
344 mode |= NFSV3ACCESS_EXECUTE;
345 } else {
346 if (ap->a_mode & VWRITE)
347 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
348 NFSV3ACCESS_DELETE);
349 if (ap->a_mode & VEXEC)
350 mode |= NFSV3ACCESS_LOOKUP;
351 }
352 /* XXX safety belt, only make blanket request if caching */
353 if (nfsaccess_cache_timeout > 0) {
354 wmode = NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY |
355 NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE |
356 NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP;
357 } else {
358 wmode = mode;
359 }
360
361 /*
362 * Does our cached result allow us to give a definite yes to
363 * this request?
364 */
365 if ((time_second < (np->n_modestamp + nfsaccess_cache_timeout)) &&
366 (ap->a_cred->cr_uid == np->n_modeuid) &&
367 ((np->n_mode & mode) == mode)) {
368 nfsstats.accesscache_hits++;
369 } else {
370 /*
371 * Either a no, or a don't know. Go to the wire.
372 */
373 nfsstats.accesscache_misses++;
374 error = nfs3_access_otw(vp, wmode, ap->a_td,ap->a_cred);
375 if (!error) {
376 if ((np->n_mode & mode) != mode) {
377 error = EACCES;
378 }
379 }
380 }
381 return (error);
382 } else {
383 if ((error = nfsspec_access(ap)) != 0)
384 return (error);
385
386 /*
387 * Attempt to prevent a mapped root from accessing a file
388 * which it shouldn't. We try to read a byte from the file
389 * if the user is root and the file is not zero length.
390 * After calling nfsspec_access, we should have the correct
391 * file size cached.
392 */
393 if (ap->a_cred->cr_uid == 0 && (ap->a_mode & VREAD)
394 && VTONFS(vp)->n_size > 0) {
395 struct iovec aiov;
396 struct uio auio;
397 char buf[1];
398
399 aiov.iov_base = buf;
400 aiov.iov_len = 1;
401 auio.uio_iov = &aiov;
402 auio.uio_iovcnt = 1;
403 auio.uio_offset = 0;
404 auio.uio_resid = 1;
405 auio.uio_segflg = UIO_SYSSPACE;
406 auio.uio_rw = UIO_READ;
407 auio.uio_td = ap->a_td;
408
409 if (vp->v_type == VREG)
410 error = nfs_readrpc(vp, &auio, ap->a_cred);
411 else if (vp->v_type == VDIR) {
412 char* bp;
413 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK);
414 aiov.iov_base = bp;
415 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ;
416 error = nfs_readdirrpc(vp, &auio, ap->a_cred);
417 free(bp, M_TEMP);
418 } else if (vp->v_type == VLNK)
419 error = nfs_readlinkrpc(vp, &auio, ap->a_cred);
420 else
421 error = EACCES;
422 }
423 return (error);
424 }
425}
426
427/*
428 * nfs open vnode op
429 * Check to see if the type is ok
430 * and that deletion is not in progress.
431 * For paged in text files, you will need to flush the page cache
432 * if consistency is lost.
433 */
434/* ARGSUSED */
435static int
436nfs_open(struct vop_open_args *ap)
437{
438 struct vnode *vp = ap->a_vp;
439 struct nfsnode *np = VTONFS(vp);
440 struct vattr vattr;
441 int error;
442
443 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
444#ifdef DIAGNOSTIC
445 printf("open eacces vtyp=%d\n", vp->v_type);
446#endif
447 return (EACCES);
448 }
449 /*
450 * Get a valid lease. If cached data is stale, flush it.
451 */
452 if (np->n_flag & NMODIFIED) {
453 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
454 ap->a_td, 1)) == EINTR)
455 return (error);
456 np->n_attrstamp = 0;
457 if (vp->v_type == VDIR)
458 np->n_direofoffset = 0;
459 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_td);
460 if (error)
461 return (error);
462 np->n_mtime = vattr.va_mtime.tv_sec;
463 } else {
464 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_td);
465 if (error)
466 return (error);
467 if (np->n_mtime != vattr.va_mtime.tv_sec) {
468 if (vp->v_type == VDIR)
469 np->n_direofoffset = 0;
470 if ((error = nfs_vinvalbuf(vp, V_SAVE,
471 ap->a_cred, ap->a_td, 1)) == EINTR)
472 return (error);
473 np->n_mtime = vattr.va_mtime.tv_sec;
474 }
475 }
476 np->n_attrstamp = 0; /* For Open/Close consistency */
477 return (0);
478}
479
480/*
481 * nfs close vnode op
482 * What an NFS client should do upon close after writing is a debatable issue.
483 * Most NFS clients push delayed writes to the server upon close, basically for
484 * two reasons:
485 * 1 - So that any write errors may be reported back to the client process
486 * doing the close system call. By far the two most likely errors are
487 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
488 * 2 - To put a worst case upper bound on cache inconsistency between
489 * multiple clients for the file.
490 * There is also a consistency problem for Version 2 of the protocol w.r.t.
491 * not being able to tell if other clients are writing a file concurrently,
492 * since there is no way of knowing if the changed modify time in the reply
493 * is only due to the write for this client.
494 * (NFS Version 3 provides weak cache consistency data in the reply that
495 * should be sufficient to detect and handle this case.)
496 *
497 * The current code does the following:
498 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
499 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
500 * or commit them (this satisfies 1 and 2 except for the
501 * case where the server crashes after this close but
502 * before the commit RPC, which is felt to be "good
503 * enough". Changing the last argument to nfs_flush() to
504 * a 1 would force a commit operation, if it is felt a
505 * commit is necessary now.
506 */
507/* ARGSUSED */
508static int
509nfs_close(struct vop_close_args *ap)
510{
511 struct vnode *vp = ap->a_vp;
512 struct nfsnode *np = VTONFS(vp);
513 int error = 0;
514
515 if (vp->v_type == VREG) {
516 if (np->n_flag & NMODIFIED) {
517 if (NFS_ISV3(vp)) {
518 /*
519 * Under NFSv3 we have dirty buffers to dispose of. We
520 * must flush them to the NFS server. We have the option
521 * of waiting all the way through the commit rpc or just
522 * waiting for the initial write. The default is to only
523 * wait through the initial write so the data is in the
524 * server's cache, which is roughly similar to the state
525 * a standard disk subsystem leaves the file in on close().
526 *
527 * We cannot clear the NMODIFIED bit in np->n_flag due to
528 * potential races with other processes, and certainly
529 * cannot clear it if we don't commit.
530 */
531 int cm = nfsv3_commit_on_close ? 1 : 0;
532 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_td, cm);
533 /* np->n_flag &= ~NMODIFIED; */
534 } else {
535 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_td, 1);
536 }
537 np->n_attrstamp = 0;
538 }
539 if (np->n_flag & NWRITEERR) {
540 np->n_flag &= ~NWRITEERR;
541 error = np->n_error;
542 }
543 }
544 return (error);
545}
546
547/*
548 * nfs getattr call from vfs.
549 */
550static int
551nfs_getattr(struct vop_getattr_args *ap)
552{
553 struct vnode *vp = ap->a_vp;
554 struct nfsnode *np = VTONFS(vp);
555 u_int32_t *tl;
556 caddr_t bpos, dpos;
557 int error = 0;
558 struct mbuf *mreq, *mrep, *md, *mb;
559 int v3 = NFS_ISV3(vp);
560
561 /*
562 * Update local times for special files.
563 */
564 if (np->n_flag & (NACC | NUPD))
565 np->n_flag |= NCHG;
566 /*
567 * First look in the cache.
568 */
569 if (nfs_getattrcache(vp, ap->a_vap) == 0)
570 return (0);
571
572 if (v3 && nfsaccess_cache_timeout > 0) {
573 nfsstats.accesscache_misses++;
574 nfs3_access_otw(vp, NFSV3ACCESS_ALL, ap->a_td, ap->a_cred);
575 if (nfs_getattrcache(vp, ap->a_vap) == 0)
576 return (0);
577 }
578
579 nfsstats.rpccnt[NFSPROC_GETATTR]++;
580 mreq = nfsm_reqhead(vp, NFSPROC_GETATTR, NFSX_FH(v3));
581 mb = mreq;
582 bpos = mtod(mb, caddr_t);
583 nfsm_fhtom(vp, v3);
584 nfsm_request(vp, NFSPROC_GETATTR, ap->a_td, ap->a_cred);
585 if (!error) {
586 nfsm_loadattr(vp, ap->a_vap);
587 }
588 m_freem(mrep);
589nfsmout:
590 return (error);
591}
592
593/*
594 * nfs setattr call.
595 */
596static int
597nfs_setattr(struct vop_setattr_args *ap)
598{
599 struct vnode *vp = ap->a_vp;
600 struct nfsnode *np = VTONFS(vp);
601 struct vattr *vap = ap->a_vap;
602 int error = 0;
603 u_quad_t tsize;
604
605#ifndef nolint
606 tsize = (u_quad_t)0;
607#endif
608
609 /*
610 * Setting of flags is not supported.
611 */
612 if (vap->va_flags != VNOVAL)
613 return (EOPNOTSUPP);
614
615 /*
616 * Disallow write attempts if the filesystem is mounted read-only.
617 */
618 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
619 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
620 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
621 (vp->v_mount->mnt_flag & MNT_RDONLY))
622 return (EROFS);
623 if (vap->va_size != VNOVAL) {
624 switch (vp->v_type) {
625 case VDIR:
626 return (EISDIR);
627 case VCHR:
628 case VBLK:
629 case VSOCK:
630 case VFIFO:
631 if (vap->va_mtime.tv_sec == VNOVAL &&
632 vap->va_atime.tv_sec == VNOVAL &&
633 vap->va_mode == (mode_t)VNOVAL &&
634 vap->va_uid == (uid_t)VNOVAL &&
635 vap->va_gid == (gid_t)VNOVAL)
636 return (0);
637 vap->va_size = VNOVAL;
638 break;
639 default:
640 /*
641 * Disallow write attempts if the filesystem is
642 * mounted read-only.
643 */
644 if (vp->v_mount->mnt_flag & MNT_RDONLY)
645 return (EROFS);
646 vnode_pager_setsize(vp, vap->va_size);
647 if (np->n_flag & NMODIFIED) {
648 if (vap->va_size == 0)
649 error = nfs_vinvalbuf(vp, 0,
650 ap->a_cred, ap->a_td, 1);
651 else
652 error = nfs_vinvalbuf(vp, V_SAVE,
653 ap->a_cred, ap->a_td, 1);
654 if (error) {
655 vnode_pager_setsize(vp, np->n_size);
656 return (error);
657 }
658 }
659 tsize = np->n_size;
660 np->n_size = np->n_vattr.va_size = vap->va_size;
661 };
662 } else if ((vap->va_mtime.tv_sec != VNOVAL ||
663 vap->va_atime.tv_sec != VNOVAL) && (np->n_flag & NMODIFIED) &&
664 vp->v_type == VREG &&
665 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
666 ap->a_td, 1)) == EINTR)
667 return (error);
668 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_td);
669 if (error && vap->va_size != VNOVAL) {
670 np->n_size = np->n_vattr.va_size = tsize;
671 vnode_pager_setsize(vp, np->n_size);
672 }
673 return (error);
674}
675
676/*
677 * Do an nfs setattr rpc.
678 */
679static int
680nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred,
681 struct thread *td)
682{
683 struct nfsv2_sattr *sp;
684 caddr_t bpos, dpos;
685 u_int32_t *tl;
686 int error = 0, wccflag = NFSV3_WCCRATTR;
687 struct mbuf *mreq, *mrep, *md, *mb;
688 int v3 = NFS_ISV3(vp);
689
690 nfsstats.rpccnt[NFSPROC_SETATTR]++;
691 mreq = nfsm_reqhead(vp, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
692 mb = mreq;
693 bpos = mtod(mb, caddr_t);
694 nfsm_fhtom(vp, v3);
695 if (v3) {
696 nfsm_v3attrbuild(vap, TRUE);
282 *tl = txdr_unsigned(wmode);
283 nfsm_request(vp, NFSPROC_ACCESS, td, cred);
284 nfsm_postop_attr(vp, attrflag);
285 if (!error) {
286 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
287 rmode = fxdr_unsigned(u_int32_t, *tl);
288 np->n_mode = rmode;
289 np->n_modeuid = cred->cr_uid;
290 np->n_modestamp = time_second;
291 }
292 m_freem(mrep);
293nfsmout:
294 return error;
295}
296
297/*
298 * nfs access vnode op.
299 * For nfs version 2, just return ok. File accesses may fail later.
300 * For nfs version 3, use the access rpc to check accessibility. If file modes
301 * are changed on the server, accesses might still fail later.
302 */
303static int
304nfs_access(struct vop_access_args *ap)
305{
306 struct vnode *vp = ap->a_vp;
307 int error = 0;
308 u_int32_t mode, wmode;
309 int v3 = NFS_ISV3(vp);
310 struct nfsnode *np = VTONFS(vp);
311
312 /*
313 * Disallow write attempts on filesystems mounted read-only;
314 * unless the file is a socket, fifo, or a block or character
315 * device resident on the filesystem.
316 */
317 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
318 switch (vp->v_type) {
319 case VREG:
320 case VDIR:
321 case VLNK:
322 return (EROFS);
323 default:
324 break;
325 }
326 }
327 /*
328 * For nfs v3, check to see if we have done this recently, and if
329 * so return our cached result instead of making an ACCESS call.
330 * If not, do an access rpc, otherwise you are stuck emulating
331 * ufs_access() locally using the vattr. This may not be correct,
332 * since the server may apply other access criteria such as
333 * client uid-->server uid mapping that we do not know about.
334 */
335 if (v3) {
336 if (ap->a_mode & VREAD)
337 mode = NFSV3ACCESS_READ;
338 else
339 mode = 0;
340 if (vp->v_type != VDIR) {
341 if (ap->a_mode & VWRITE)
342 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
343 if (ap->a_mode & VEXEC)
344 mode |= NFSV3ACCESS_EXECUTE;
345 } else {
346 if (ap->a_mode & VWRITE)
347 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
348 NFSV3ACCESS_DELETE);
349 if (ap->a_mode & VEXEC)
350 mode |= NFSV3ACCESS_LOOKUP;
351 }
352 /* XXX safety belt, only make blanket request if caching */
353 if (nfsaccess_cache_timeout > 0) {
354 wmode = NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY |
355 NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE |
356 NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP;
357 } else {
358 wmode = mode;
359 }
360
361 /*
362 * Does our cached result allow us to give a definite yes to
363 * this request?
364 */
365 if ((time_second < (np->n_modestamp + nfsaccess_cache_timeout)) &&
366 (ap->a_cred->cr_uid == np->n_modeuid) &&
367 ((np->n_mode & mode) == mode)) {
368 nfsstats.accesscache_hits++;
369 } else {
370 /*
371 * Either a no, or a don't know. Go to the wire.
372 */
373 nfsstats.accesscache_misses++;
374 error = nfs3_access_otw(vp, wmode, ap->a_td,ap->a_cred);
375 if (!error) {
376 if ((np->n_mode & mode) != mode) {
377 error = EACCES;
378 }
379 }
380 }
381 return (error);
382 } else {
383 if ((error = nfsspec_access(ap)) != 0)
384 return (error);
385
386 /*
387 * Attempt to prevent a mapped root from accessing a file
388 * which it shouldn't. We try to read a byte from the file
389 * if the user is root and the file is not zero length.
390 * After calling nfsspec_access, we should have the correct
391 * file size cached.
392 */
393 if (ap->a_cred->cr_uid == 0 && (ap->a_mode & VREAD)
394 && VTONFS(vp)->n_size > 0) {
395 struct iovec aiov;
396 struct uio auio;
397 char buf[1];
398
399 aiov.iov_base = buf;
400 aiov.iov_len = 1;
401 auio.uio_iov = &aiov;
402 auio.uio_iovcnt = 1;
403 auio.uio_offset = 0;
404 auio.uio_resid = 1;
405 auio.uio_segflg = UIO_SYSSPACE;
406 auio.uio_rw = UIO_READ;
407 auio.uio_td = ap->a_td;
408
409 if (vp->v_type == VREG)
410 error = nfs_readrpc(vp, &auio, ap->a_cred);
411 else if (vp->v_type == VDIR) {
412 char* bp;
413 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK);
414 aiov.iov_base = bp;
415 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ;
416 error = nfs_readdirrpc(vp, &auio, ap->a_cred);
417 free(bp, M_TEMP);
418 } else if (vp->v_type == VLNK)
419 error = nfs_readlinkrpc(vp, &auio, ap->a_cred);
420 else
421 error = EACCES;
422 }
423 return (error);
424 }
425}
426
427/*
428 * nfs open vnode op
429 * Check to see if the type is ok
430 * and that deletion is not in progress.
431 * For paged in text files, you will need to flush the page cache
432 * if consistency is lost.
433 */
434/* ARGSUSED */
435static int
436nfs_open(struct vop_open_args *ap)
437{
438 struct vnode *vp = ap->a_vp;
439 struct nfsnode *np = VTONFS(vp);
440 struct vattr vattr;
441 int error;
442
443 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
444#ifdef DIAGNOSTIC
445 printf("open eacces vtyp=%d\n", vp->v_type);
446#endif
447 return (EACCES);
448 }
449 /*
450 * Get a valid lease. If cached data is stale, flush it.
451 */
452 if (np->n_flag & NMODIFIED) {
453 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
454 ap->a_td, 1)) == EINTR)
455 return (error);
456 np->n_attrstamp = 0;
457 if (vp->v_type == VDIR)
458 np->n_direofoffset = 0;
459 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_td);
460 if (error)
461 return (error);
462 np->n_mtime = vattr.va_mtime.tv_sec;
463 } else {
464 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_td);
465 if (error)
466 return (error);
467 if (np->n_mtime != vattr.va_mtime.tv_sec) {
468 if (vp->v_type == VDIR)
469 np->n_direofoffset = 0;
470 if ((error = nfs_vinvalbuf(vp, V_SAVE,
471 ap->a_cred, ap->a_td, 1)) == EINTR)
472 return (error);
473 np->n_mtime = vattr.va_mtime.tv_sec;
474 }
475 }
476 np->n_attrstamp = 0; /* For Open/Close consistency */
477 return (0);
478}
479
480/*
481 * nfs close vnode op
482 * What an NFS client should do upon close after writing is a debatable issue.
483 * Most NFS clients push delayed writes to the server upon close, basically for
484 * two reasons:
485 * 1 - So that any write errors may be reported back to the client process
486 * doing the close system call. By far the two most likely errors are
487 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
488 * 2 - To put a worst case upper bound on cache inconsistency between
489 * multiple clients for the file.
490 * There is also a consistency problem for Version 2 of the protocol w.r.t.
491 * not being able to tell if other clients are writing a file concurrently,
492 * since there is no way of knowing if the changed modify time in the reply
493 * is only due to the write for this client.
494 * (NFS Version 3 provides weak cache consistency data in the reply that
495 * should be sufficient to detect and handle this case.)
496 *
497 * The current code does the following:
498 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
499 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
500 * or commit them (this satisfies 1 and 2 except for the
501 * case where the server crashes after this close but
502 * before the commit RPC, which is felt to be "good
503 * enough". Changing the last argument to nfs_flush() to
504 * a 1 would force a commit operation, if it is felt a
505 * commit is necessary now.
506 */
507/* ARGSUSED */
508static int
509nfs_close(struct vop_close_args *ap)
510{
511 struct vnode *vp = ap->a_vp;
512 struct nfsnode *np = VTONFS(vp);
513 int error = 0;
514
515 if (vp->v_type == VREG) {
516 if (np->n_flag & NMODIFIED) {
517 if (NFS_ISV3(vp)) {
518 /*
519 * Under NFSv3 we have dirty buffers to dispose of. We
520 * must flush them to the NFS server. We have the option
521 * of waiting all the way through the commit rpc or just
522 * waiting for the initial write. The default is to only
523 * wait through the initial write so the data is in the
524 * server's cache, which is roughly similar to the state
525 * a standard disk subsystem leaves the file in on close().
526 *
527 * We cannot clear the NMODIFIED bit in np->n_flag due to
528 * potential races with other processes, and certainly
529 * cannot clear it if we don't commit.
530 */
531 int cm = nfsv3_commit_on_close ? 1 : 0;
532 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_td, cm);
533 /* np->n_flag &= ~NMODIFIED; */
534 } else {
535 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_td, 1);
536 }
537 np->n_attrstamp = 0;
538 }
539 if (np->n_flag & NWRITEERR) {
540 np->n_flag &= ~NWRITEERR;
541 error = np->n_error;
542 }
543 }
544 return (error);
545}
546
547/*
548 * nfs getattr call from vfs.
549 */
550static int
551nfs_getattr(struct vop_getattr_args *ap)
552{
553 struct vnode *vp = ap->a_vp;
554 struct nfsnode *np = VTONFS(vp);
555 u_int32_t *tl;
556 caddr_t bpos, dpos;
557 int error = 0;
558 struct mbuf *mreq, *mrep, *md, *mb;
559 int v3 = NFS_ISV3(vp);
560
561 /*
562 * Update local times for special files.
563 */
564 if (np->n_flag & (NACC | NUPD))
565 np->n_flag |= NCHG;
566 /*
567 * First look in the cache.
568 */
569 if (nfs_getattrcache(vp, ap->a_vap) == 0)
570 return (0);
571
572 if (v3 && nfsaccess_cache_timeout > 0) {
573 nfsstats.accesscache_misses++;
574 nfs3_access_otw(vp, NFSV3ACCESS_ALL, ap->a_td, ap->a_cred);
575 if (nfs_getattrcache(vp, ap->a_vap) == 0)
576 return (0);
577 }
578
579 nfsstats.rpccnt[NFSPROC_GETATTR]++;
580 mreq = nfsm_reqhead(vp, NFSPROC_GETATTR, NFSX_FH(v3));
581 mb = mreq;
582 bpos = mtod(mb, caddr_t);
583 nfsm_fhtom(vp, v3);
584 nfsm_request(vp, NFSPROC_GETATTR, ap->a_td, ap->a_cred);
585 if (!error) {
586 nfsm_loadattr(vp, ap->a_vap);
587 }
588 m_freem(mrep);
589nfsmout:
590 return (error);
591}
592
593/*
594 * nfs setattr call.
595 */
596static int
597nfs_setattr(struct vop_setattr_args *ap)
598{
599 struct vnode *vp = ap->a_vp;
600 struct nfsnode *np = VTONFS(vp);
601 struct vattr *vap = ap->a_vap;
602 int error = 0;
603 u_quad_t tsize;
604
605#ifndef nolint
606 tsize = (u_quad_t)0;
607#endif
608
609 /*
610 * Setting of flags is not supported.
611 */
612 if (vap->va_flags != VNOVAL)
613 return (EOPNOTSUPP);
614
615 /*
616 * Disallow write attempts if the filesystem is mounted read-only.
617 */
618 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
619 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
620 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
621 (vp->v_mount->mnt_flag & MNT_RDONLY))
622 return (EROFS);
623 if (vap->va_size != VNOVAL) {
624 switch (vp->v_type) {
625 case VDIR:
626 return (EISDIR);
627 case VCHR:
628 case VBLK:
629 case VSOCK:
630 case VFIFO:
631 if (vap->va_mtime.tv_sec == VNOVAL &&
632 vap->va_atime.tv_sec == VNOVAL &&
633 vap->va_mode == (mode_t)VNOVAL &&
634 vap->va_uid == (uid_t)VNOVAL &&
635 vap->va_gid == (gid_t)VNOVAL)
636 return (0);
637 vap->va_size = VNOVAL;
638 break;
639 default:
640 /*
641 * Disallow write attempts if the filesystem is
642 * mounted read-only.
643 */
644 if (vp->v_mount->mnt_flag & MNT_RDONLY)
645 return (EROFS);
646 vnode_pager_setsize(vp, vap->va_size);
647 if (np->n_flag & NMODIFIED) {
648 if (vap->va_size == 0)
649 error = nfs_vinvalbuf(vp, 0,
650 ap->a_cred, ap->a_td, 1);
651 else
652 error = nfs_vinvalbuf(vp, V_SAVE,
653 ap->a_cred, ap->a_td, 1);
654 if (error) {
655 vnode_pager_setsize(vp, np->n_size);
656 return (error);
657 }
658 }
659 tsize = np->n_size;
660 np->n_size = np->n_vattr.va_size = vap->va_size;
661 };
662 } else if ((vap->va_mtime.tv_sec != VNOVAL ||
663 vap->va_atime.tv_sec != VNOVAL) && (np->n_flag & NMODIFIED) &&
664 vp->v_type == VREG &&
665 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
666 ap->a_td, 1)) == EINTR)
667 return (error);
668 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_td);
669 if (error && vap->va_size != VNOVAL) {
670 np->n_size = np->n_vattr.va_size = tsize;
671 vnode_pager_setsize(vp, np->n_size);
672 }
673 return (error);
674}
675
676/*
677 * Do an nfs setattr rpc.
678 */
679static int
680nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred,
681 struct thread *td)
682{
683 struct nfsv2_sattr *sp;
684 caddr_t bpos, dpos;
685 u_int32_t *tl;
686 int error = 0, wccflag = NFSV3_WCCRATTR;
687 struct mbuf *mreq, *mrep, *md, *mb;
688 int v3 = NFS_ISV3(vp);
689
690 nfsstats.rpccnt[NFSPROC_SETATTR]++;
691 mreq = nfsm_reqhead(vp, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
692 mb = mreq;
693 bpos = mtod(mb, caddr_t);
694 nfsm_fhtom(vp, v3);
695 if (v3) {
696 nfsm_v3attrbuild(vap, TRUE);
697 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
697 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED);
698 *tl = nfs_false;
699 } else {
698 *tl = nfs_false;
699 } else {
700 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
700 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
701 if (vap->va_mode == (mode_t)VNOVAL)
702 sp->sa_mode = nfs_xdrneg1;
703 else
704 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
705 if (vap->va_uid == (uid_t)VNOVAL)
706 sp->sa_uid = nfs_xdrneg1;
707 else
708 sp->sa_uid = txdr_unsigned(vap->va_uid);
709 if (vap->va_gid == (gid_t)VNOVAL)
710 sp->sa_gid = nfs_xdrneg1;
711 else
712 sp->sa_gid = txdr_unsigned(vap->va_gid);
713 sp->sa_size = txdr_unsigned(vap->va_size);
714 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
715 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
716 }
717 nfsm_request(vp, NFSPROC_SETATTR, td, cred);
718 if (v3) {
719 nfsm_wcc_data(vp, wccflag);
720 } else
721 nfsm_loadattr(vp, (struct vattr *)0);
722 m_freem(mrep);
723nfsmout:
724 return (error);
725}
726
727/*
728 * nfs lookup call, one step at a time...
729 * First look in cache
730 * If not found, unlock the directory nfsnode and do the rpc
731 */
732static int
733nfs_lookup(struct vop_lookup_args *ap)
734{
735 struct componentname *cnp = ap->a_cnp;
736 struct vnode *dvp = ap->a_dvp;
737 struct vnode **vpp = ap->a_vpp;
738 int flags = cnp->cn_flags;
739 struct vnode *newvp;
740 u_int32_t *tl;
741 struct nfsmount *nmp;
742 caddr_t bpos, dpos;
743 struct mbuf *mreq, *mrep, *md, *mb;
744 long len;
745 nfsfh_t *fhp;
746 struct nfsnode *np;
747 int lockparent, wantparent, error = 0, attrflag, fhsize;
748 int v3 = NFS_ISV3(dvp);
749 struct thread *td = cnp->cn_thread;
750
751 *vpp = NULLVP;
752 cnp->cn_flags &= ~PDIRUNLOCK;
753 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
754 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
755 return (EROFS);
756 if (dvp->v_type != VDIR)
757 return (ENOTDIR);
758 lockparent = flags & LOCKPARENT;
759 wantparent = flags & (LOCKPARENT|WANTPARENT);
760 nmp = VFSTONFS(dvp->v_mount);
761 np = VTONFS(dvp);
762 if ((error = cache_lookup(dvp, vpp, cnp)) && error != ENOENT) {
763 struct vattr vattr;
764 int vpid;
765
766 if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td)) != 0) {
767 *vpp = NULLVP;
768 return (error);
769 }
770
771 newvp = *vpp;
772 vpid = newvp->v_id;
773 /*
774 * See the comment starting `Step through' in ufs/ufs_lookup.c
775 * for an explanation of the locking protocol
776 */
777 if (dvp == newvp) {
778 VREF(newvp);
779 error = 0;
780 } else if (flags & ISDOTDOT) {
781 VOP_UNLOCK(dvp, 0, td);
782 cnp->cn_flags |= PDIRUNLOCK;
783 error = vget(newvp, LK_EXCLUSIVE, td);
784 if (!error && lockparent && (flags & ISLASTCN)) {
785 error = vn_lock(dvp, LK_EXCLUSIVE, td);
786 if (error == 0)
787 cnp->cn_flags &= ~PDIRUNLOCK;
788 }
789 } else {
790 error = vget(newvp, LK_EXCLUSIVE, td);
791 if (!lockparent || error || !(flags & ISLASTCN)) {
792 VOP_UNLOCK(dvp, 0, td);
793 cnp->cn_flags |= PDIRUNLOCK;
794 }
795 }
796 if (!error) {
797 if (vpid == newvp->v_id) {
798 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, td)
799 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
800 nfsstats.lookupcache_hits++;
801 if (cnp->cn_nameiop != LOOKUP &&
802 (flags & ISLASTCN))
803 cnp->cn_flags |= SAVENAME;
804 return (0);
805 }
806 cache_purge(newvp);
807 }
808 vput(newvp);
809 if (lockparent && dvp != newvp && (flags & ISLASTCN))
810 VOP_UNLOCK(dvp, 0, td);
811 }
812 error = vn_lock(dvp, LK_EXCLUSIVE, td);
813 *vpp = NULLVP;
814 if (error) {
815 cnp->cn_flags |= PDIRUNLOCK;
816 return (error);
817 }
818 cnp->cn_flags &= ~PDIRUNLOCK;
819 }
820 error = 0;
821 newvp = NULLVP;
822 nfsstats.lookupcache_misses++;
823 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
824 len = cnp->cn_namelen;
825 mreq = nfsm_reqhead(dvp, NFSPROC_LOOKUP,
826 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
827 mb = mreq;
828 bpos = mtod(mb, caddr_t);
829 nfsm_fhtom(dvp, v3);
830 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
831 nfsm_request(dvp, NFSPROC_LOOKUP, cnp->cn_thread, cnp->cn_cred);
832 if (error) {
833 nfsm_postop_attr(dvp, attrflag);
834 m_freem(mrep);
835 goto nfsmout;
836 }
837 nfsm_getfh(fhp, fhsize, v3);
838
839 /*
840 * Handle RENAME case...
841 */
842 if (cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN)) {
843 if (NFS_CMPFH(np, fhp, fhsize)) {
844 m_freem(mrep);
845 return (EISDIR);
846 }
847 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
848 if (error) {
849 m_freem(mrep);
850 return (error);
851 }
852 newvp = NFSTOV(np);
853 if (v3) {
854 nfsm_postop_attr(newvp, attrflag);
855 nfsm_postop_attr(dvp, attrflag);
856 } else
857 nfsm_loadattr(newvp, (struct vattr *)0);
858 *vpp = newvp;
859 m_freem(mrep);
860 cnp->cn_flags |= SAVENAME;
861 if (!lockparent) {
862 VOP_UNLOCK(dvp, 0, td);
863 cnp->cn_flags |= PDIRUNLOCK;
864 }
865 return (0);
866 }
867
868 if (flags & ISDOTDOT) {
869 VOP_UNLOCK(dvp, 0, td);
870 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
871 if (error) {
872 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
873 return (error);
874 }
875 newvp = NFSTOV(np);
876 if (lockparent && (flags & ISLASTCN)) {
877 error = vn_lock(dvp, LK_EXCLUSIVE, td);
878 if (error) {
879 cnp->cn_flags |= PDIRUNLOCK;
880 vput(newvp);
881 return (error);
882 }
883 } else
884 cnp->cn_flags |= PDIRUNLOCK;
885 } else if (NFS_CMPFH(np, fhp, fhsize)) {
886 VREF(dvp);
887 newvp = dvp;
888 } else {
889 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
890 if (error) {
891 m_freem(mrep);
892 return (error);
893 }
894 if (!lockparent || !(flags & ISLASTCN)) {
895 cnp->cn_flags |= PDIRUNLOCK;
896 VOP_UNLOCK(dvp, 0, td);
897 }
898 newvp = NFSTOV(np);
899 }
900 if (v3) {
901 nfsm_postop_attr(newvp, attrflag);
902 nfsm_postop_attr(dvp, attrflag);
903 } else
904 nfsm_loadattr(newvp, (struct vattr *)0);
905 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
906 cnp->cn_flags |= SAVENAME;
907 if ((cnp->cn_flags & MAKEENTRY) &&
908 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) {
909 np->n_ctime = np->n_vattr.va_ctime.tv_sec;
910 cache_enter(dvp, newvp, cnp);
911 }
912 *vpp = newvp;
913 m_freem(mrep);
914nfsmout:
915 if (error) {
916 if (newvp != NULLVP) {
917 vrele(newvp);
918 *vpp = NULLVP;
919 }
920 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
921 (flags & ISLASTCN) && error == ENOENT) {
922 if (!lockparent) {
923 VOP_UNLOCK(dvp, 0, td);
924 cnp->cn_flags |= PDIRUNLOCK;
925 }
926 if (dvp->v_mount->mnt_flag & MNT_RDONLY)
927 error = EROFS;
928 else
929 error = EJUSTRETURN;
930 }
931 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
932 cnp->cn_flags |= SAVENAME;
933 }
934 return (error);
935}
936
937/*
938 * nfs read call.
939 * Just call nfs_bioread() to do the work.
940 */
941static int
942nfs_read(struct vop_read_args *ap)
943{
944 struct vnode *vp = ap->a_vp;
945
946 if (vp->v_type != VREG)
947 return (EPERM);
948 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred));
949}
950
951/*
952 * nfs readlink call
953 */
954static int
955nfs_readlink(struct vop_readlink_args *ap)
956{
957 struct vnode *vp = ap->a_vp;
958
959 if (vp->v_type != VLNK)
960 return (EINVAL);
961 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred));
962}
963
964/*
965 * Do a readlink rpc.
966 * Called by nfs_doio() from below the buffer cache.
967 */
968int
969nfs_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
970{
971 u_int32_t *tl;
972 caddr_t bpos, dpos;
973 int error = 0, len, attrflag;
974 struct mbuf *mreq, *mrep, *md, *mb;
975 int v3 = NFS_ISV3(vp);
976
977 nfsstats.rpccnt[NFSPROC_READLINK]++;
978 mreq = nfsm_reqhead(vp, NFSPROC_READLINK, NFSX_FH(v3));
979 mb = mreq;
980 bpos = mtod(mb, caddr_t);
981 nfsm_fhtom(vp, v3);
982 nfsm_request(vp, NFSPROC_READLINK, uiop->uio_td, cred);
983 if (v3)
984 nfsm_postop_attr(vp, attrflag);
985 if (!error) {
986 nfsm_strsiz(len, NFS_MAXPATHLEN);
987 if (len == NFS_MAXPATHLEN) {
988 struct nfsnode *np = VTONFS(vp);
989 if (np->n_size && np->n_size < NFS_MAXPATHLEN)
990 len = np->n_size;
991 }
992 nfsm_mtouio(uiop, len);
993 }
994 m_freem(mrep);
995nfsmout:
996 return (error);
997}
998
999/*
1000 * nfs read rpc call
1001 * Ditto above
1002 */
1003int
1004nfs_readrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
1005{
1006 u_int32_t *tl;
1007 caddr_t bpos, dpos;
1008 struct mbuf *mreq, *mrep, *md, *mb;
1009 struct nfsmount *nmp;
1010 int error = 0, len, retlen, tsiz, eof, attrflag;
1011 int v3 = NFS_ISV3(vp);
1012
1013#ifndef nolint
1014 eof = 0;
1015#endif
1016 nmp = VFSTONFS(vp->v_mount);
1017 tsiz = uiop->uio_resid;
1018 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1019 return (EFBIG);
1020 while (tsiz > 0) {
1021 nfsstats.rpccnt[NFSPROC_READ]++;
1022 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1023 mreq = nfsm_reqhead(vp, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1024 mb = mreq;
1025 bpos = mtod(mb, caddr_t);
1026 nfsm_fhtom(vp, v3);
701 if (vap->va_mode == (mode_t)VNOVAL)
702 sp->sa_mode = nfs_xdrneg1;
703 else
704 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
705 if (vap->va_uid == (uid_t)VNOVAL)
706 sp->sa_uid = nfs_xdrneg1;
707 else
708 sp->sa_uid = txdr_unsigned(vap->va_uid);
709 if (vap->va_gid == (gid_t)VNOVAL)
710 sp->sa_gid = nfs_xdrneg1;
711 else
712 sp->sa_gid = txdr_unsigned(vap->va_gid);
713 sp->sa_size = txdr_unsigned(vap->va_size);
714 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
715 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
716 }
717 nfsm_request(vp, NFSPROC_SETATTR, td, cred);
718 if (v3) {
719 nfsm_wcc_data(vp, wccflag);
720 } else
721 nfsm_loadattr(vp, (struct vattr *)0);
722 m_freem(mrep);
723nfsmout:
724 return (error);
725}
726
727/*
728 * nfs lookup call, one step at a time...
729 * First look in cache
730 * If not found, unlock the directory nfsnode and do the rpc
731 */
732static int
733nfs_lookup(struct vop_lookup_args *ap)
734{
735 struct componentname *cnp = ap->a_cnp;
736 struct vnode *dvp = ap->a_dvp;
737 struct vnode **vpp = ap->a_vpp;
738 int flags = cnp->cn_flags;
739 struct vnode *newvp;
740 u_int32_t *tl;
741 struct nfsmount *nmp;
742 caddr_t bpos, dpos;
743 struct mbuf *mreq, *mrep, *md, *mb;
744 long len;
745 nfsfh_t *fhp;
746 struct nfsnode *np;
747 int lockparent, wantparent, error = 0, attrflag, fhsize;
748 int v3 = NFS_ISV3(dvp);
749 struct thread *td = cnp->cn_thread;
750
751 *vpp = NULLVP;
752 cnp->cn_flags &= ~PDIRUNLOCK;
753 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
754 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
755 return (EROFS);
756 if (dvp->v_type != VDIR)
757 return (ENOTDIR);
758 lockparent = flags & LOCKPARENT;
759 wantparent = flags & (LOCKPARENT|WANTPARENT);
760 nmp = VFSTONFS(dvp->v_mount);
761 np = VTONFS(dvp);
762 if ((error = cache_lookup(dvp, vpp, cnp)) && error != ENOENT) {
763 struct vattr vattr;
764 int vpid;
765
766 if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td)) != 0) {
767 *vpp = NULLVP;
768 return (error);
769 }
770
771 newvp = *vpp;
772 vpid = newvp->v_id;
773 /*
774 * See the comment starting `Step through' in ufs/ufs_lookup.c
775 * for an explanation of the locking protocol
776 */
777 if (dvp == newvp) {
778 VREF(newvp);
779 error = 0;
780 } else if (flags & ISDOTDOT) {
781 VOP_UNLOCK(dvp, 0, td);
782 cnp->cn_flags |= PDIRUNLOCK;
783 error = vget(newvp, LK_EXCLUSIVE, td);
784 if (!error && lockparent && (flags & ISLASTCN)) {
785 error = vn_lock(dvp, LK_EXCLUSIVE, td);
786 if (error == 0)
787 cnp->cn_flags &= ~PDIRUNLOCK;
788 }
789 } else {
790 error = vget(newvp, LK_EXCLUSIVE, td);
791 if (!lockparent || error || !(flags & ISLASTCN)) {
792 VOP_UNLOCK(dvp, 0, td);
793 cnp->cn_flags |= PDIRUNLOCK;
794 }
795 }
796 if (!error) {
797 if (vpid == newvp->v_id) {
798 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, td)
799 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
800 nfsstats.lookupcache_hits++;
801 if (cnp->cn_nameiop != LOOKUP &&
802 (flags & ISLASTCN))
803 cnp->cn_flags |= SAVENAME;
804 return (0);
805 }
806 cache_purge(newvp);
807 }
808 vput(newvp);
809 if (lockparent && dvp != newvp && (flags & ISLASTCN))
810 VOP_UNLOCK(dvp, 0, td);
811 }
812 error = vn_lock(dvp, LK_EXCLUSIVE, td);
813 *vpp = NULLVP;
814 if (error) {
815 cnp->cn_flags |= PDIRUNLOCK;
816 return (error);
817 }
818 cnp->cn_flags &= ~PDIRUNLOCK;
819 }
820 error = 0;
821 newvp = NULLVP;
822 nfsstats.lookupcache_misses++;
823 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
824 len = cnp->cn_namelen;
825 mreq = nfsm_reqhead(dvp, NFSPROC_LOOKUP,
826 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
827 mb = mreq;
828 bpos = mtod(mb, caddr_t);
829 nfsm_fhtom(dvp, v3);
830 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
831 nfsm_request(dvp, NFSPROC_LOOKUP, cnp->cn_thread, cnp->cn_cred);
832 if (error) {
833 nfsm_postop_attr(dvp, attrflag);
834 m_freem(mrep);
835 goto nfsmout;
836 }
837 nfsm_getfh(fhp, fhsize, v3);
838
839 /*
840 * Handle RENAME case...
841 */
842 if (cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN)) {
843 if (NFS_CMPFH(np, fhp, fhsize)) {
844 m_freem(mrep);
845 return (EISDIR);
846 }
847 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
848 if (error) {
849 m_freem(mrep);
850 return (error);
851 }
852 newvp = NFSTOV(np);
853 if (v3) {
854 nfsm_postop_attr(newvp, attrflag);
855 nfsm_postop_attr(dvp, attrflag);
856 } else
857 nfsm_loadattr(newvp, (struct vattr *)0);
858 *vpp = newvp;
859 m_freem(mrep);
860 cnp->cn_flags |= SAVENAME;
861 if (!lockparent) {
862 VOP_UNLOCK(dvp, 0, td);
863 cnp->cn_flags |= PDIRUNLOCK;
864 }
865 return (0);
866 }
867
868 if (flags & ISDOTDOT) {
869 VOP_UNLOCK(dvp, 0, td);
870 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
871 if (error) {
872 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
873 return (error);
874 }
875 newvp = NFSTOV(np);
876 if (lockparent && (flags & ISLASTCN)) {
877 error = vn_lock(dvp, LK_EXCLUSIVE, td);
878 if (error) {
879 cnp->cn_flags |= PDIRUNLOCK;
880 vput(newvp);
881 return (error);
882 }
883 } else
884 cnp->cn_flags |= PDIRUNLOCK;
885 } else if (NFS_CMPFH(np, fhp, fhsize)) {
886 VREF(dvp);
887 newvp = dvp;
888 } else {
889 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
890 if (error) {
891 m_freem(mrep);
892 return (error);
893 }
894 if (!lockparent || !(flags & ISLASTCN)) {
895 cnp->cn_flags |= PDIRUNLOCK;
896 VOP_UNLOCK(dvp, 0, td);
897 }
898 newvp = NFSTOV(np);
899 }
900 if (v3) {
901 nfsm_postop_attr(newvp, attrflag);
902 nfsm_postop_attr(dvp, attrflag);
903 } else
904 nfsm_loadattr(newvp, (struct vattr *)0);
905 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
906 cnp->cn_flags |= SAVENAME;
907 if ((cnp->cn_flags & MAKEENTRY) &&
908 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) {
909 np->n_ctime = np->n_vattr.va_ctime.tv_sec;
910 cache_enter(dvp, newvp, cnp);
911 }
912 *vpp = newvp;
913 m_freem(mrep);
914nfsmout:
915 if (error) {
916 if (newvp != NULLVP) {
917 vrele(newvp);
918 *vpp = NULLVP;
919 }
920 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
921 (flags & ISLASTCN) && error == ENOENT) {
922 if (!lockparent) {
923 VOP_UNLOCK(dvp, 0, td);
924 cnp->cn_flags |= PDIRUNLOCK;
925 }
926 if (dvp->v_mount->mnt_flag & MNT_RDONLY)
927 error = EROFS;
928 else
929 error = EJUSTRETURN;
930 }
931 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
932 cnp->cn_flags |= SAVENAME;
933 }
934 return (error);
935}
936
937/*
938 * nfs read call.
939 * Just call nfs_bioread() to do the work.
940 */
941static int
942nfs_read(struct vop_read_args *ap)
943{
944 struct vnode *vp = ap->a_vp;
945
946 if (vp->v_type != VREG)
947 return (EPERM);
948 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred));
949}
950
951/*
952 * nfs readlink call
953 */
954static int
955nfs_readlink(struct vop_readlink_args *ap)
956{
957 struct vnode *vp = ap->a_vp;
958
959 if (vp->v_type != VLNK)
960 return (EINVAL);
961 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred));
962}
963
964/*
965 * Do a readlink rpc.
966 * Called by nfs_doio() from below the buffer cache.
967 */
968int
969nfs_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
970{
971 u_int32_t *tl;
972 caddr_t bpos, dpos;
973 int error = 0, len, attrflag;
974 struct mbuf *mreq, *mrep, *md, *mb;
975 int v3 = NFS_ISV3(vp);
976
977 nfsstats.rpccnt[NFSPROC_READLINK]++;
978 mreq = nfsm_reqhead(vp, NFSPROC_READLINK, NFSX_FH(v3));
979 mb = mreq;
980 bpos = mtod(mb, caddr_t);
981 nfsm_fhtom(vp, v3);
982 nfsm_request(vp, NFSPROC_READLINK, uiop->uio_td, cred);
983 if (v3)
984 nfsm_postop_attr(vp, attrflag);
985 if (!error) {
986 nfsm_strsiz(len, NFS_MAXPATHLEN);
987 if (len == NFS_MAXPATHLEN) {
988 struct nfsnode *np = VTONFS(vp);
989 if (np->n_size && np->n_size < NFS_MAXPATHLEN)
990 len = np->n_size;
991 }
992 nfsm_mtouio(uiop, len);
993 }
994 m_freem(mrep);
995nfsmout:
996 return (error);
997}
998
999/*
1000 * nfs read rpc call
1001 * Ditto above
1002 */
1003int
1004nfs_readrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
1005{
1006 u_int32_t *tl;
1007 caddr_t bpos, dpos;
1008 struct mbuf *mreq, *mrep, *md, *mb;
1009 struct nfsmount *nmp;
1010 int error = 0, len, retlen, tsiz, eof, attrflag;
1011 int v3 = NFS_ISV3(vp);
1012
1013#ifndef nolint
1014 eof = 0;
1015#endif
1016 nmp = VFSTONFS(vp->v_mount);
1017 tsiz = uiop->uio_resid;
1018 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1019 return (EFBIG);
1020 while (tsiz > 0) {
1021 nfsstats.rpccnt[NFSPROC_READ]++;
1022 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1023 mreq = nfsm_reqhead(vp, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1024 mb = mreq;
1025 bpos = mtod(mb, caddr_t);
1026 nfsm_fhtom(vp, v3);
1027 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3);
1027 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED * 3);
1028 if (v3) {
1029 txdr_hyper(uiop->uio_offset, tl);
1030 *(tl + 2) = txdr_unsigned(len);
1031 } else {
1032 *tl++ = txdr_unsigned(uiop->uio_offset);
1033 *tl++ = txdr_unsigned(len);
1034 *tl = 0;
1035 }
1036 nfsm_request(vp, NFSPROC_READ, uiop->uio_td, cred);
1037 if (v3) {
1038 nfsm_postop_attr(vp, attrflag);
1039 if (error) {
1040 m_freem(mrep);
1041 goto nfsmout;
1042 }
1043 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1044 eof = fxdr_unsigned(int, *(tl + 1));
1045 } else
1046 nfsm_loadattr(vp, (struct vattr *)0);
1047 nfsm_strsiz(retlen, nmp->nm_rsize);
1048 nfsm_mtouio(uiop, retlen);
1049 m_freem(mrep);
1050 tsiz -= retlen;
1051 if (v3) {
1052 if (eof || retlen == 0)
1053 tsiz = 0;
1054 } else if (retlen < len)
1055 tsiz = 0;
1056 }
1057nfsmout:
1058 return (error);
1059}
1060
1061/*
1062 * nfs write call
1063 */
1064int
1065nfs_writerpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
1066 int *iomode, int *must_commit)
1067{
1068 u_int32_t *tl;
1069 int32_t backup;
1070 caddr_t bpos, dpos;
1071 struct mbuf *mreq, *mrep, *md, *mb;
1072 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1073 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit;
1074 int v3 = NFS_ISV3(vp), committed = NFSV3WRITE_FILESYNC;
1075
1076#ifndef DIAGNOSTIC
1077 if (uiop->uio_iovcnt != 1)
1078 panic("nfs: writerpc iovcnt > 1");
1079#endif
1080 *must_commit = 0;
1081 tsiz = uiop->uio_resid;
1082 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1083 return (EFBIG);
1084 while (tsiz > 0) {
1085 nfsstats.rpccnt[NFSPROC_WRITE]++;
1086 len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz;
1087 mreq = nfsm_reqhead(vp, NFSPROC_WRITE,
1088 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len));
1089 mb = mreq;
1090 bpos = mtod(mb, caddr_t);
1091 nfsm_fhtom(vp, v3);
1092 if (v3) {
1028 if (v3) {
1029 txdr_hyper(uiop->uio_offset, tl);
1030 *(tl + 2) = txdr_unsigned(len);
1031 } else {
1032 *tl++ = txdr_unsigned(uiop->uio_offset);
1033 *tl++ = txdr_unsigned(len);
1034 *tl = 0;
1035 }
1036 nfsm_request(vp, NFSPROC_READ, uiop->uio_td, cred);
1037 if (v3) {
1038 nfsm_postop_attr(vp, attrflag);
1039 if (error) {
1040 m_freem(mrep);
1041 goto nfsmout;
1042 }
1043 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1044 eof = fxdr_unsigned(int, *(tl + 1));
1045 } else
1046 nfsm_loadattr(vp, (struct vattr *)0);
1047 nfsm_strsiz(retlen, nmp->nm_rsize);
1048 nfsm_mtouio(uiop, retlen);
1049 m_freem(mrep);
1050 tsiz -= retlen;
1051 if (v3) {
1052 if (eof || retlen == 0)
1053 tsiz = 0;
1054 } else if (retlen < len)
1055 tsiz = 0;
1056 }
1057nfsmout:
1058 return (error);
1059}
1060
1061/*
1062 * nfs write call
1063 */
1064int
1065nfs_writerpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
1066 int *iomode, int *must_commit)
1067{
1068 u_int32_t *tl;
1069 int32_t backup;
1070 caddr_t bpos, dpos;
1071 struct mbuf *mreq, *mrep, *md, *mb;
1072 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1073 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit;
1074 int v3 = NFS_ISV3(vp), committed = NFSV3WRITE_FILESYNC;
1075
1076#ifndef DIAGNOSTIC
1077 if (uiop->uio_iovcnt != 1)
1078 panic("nfs: writerpc iovcnt > 1");
1079#endif
1080 *must_commit = 0;
1081 tsiz = uiop->uio_resid;
1082 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1083 return (EFBIG);
1084 while (tsiz > 0) {
1085 nfsstats.rpccnt[NFSPROC_WRITE]++;
1086 len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz;
1087 mreq = nfsm_reqhead(vp, NFSPROC_WRITE,
1088 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len));
1089 mb = mreq;
1090 bpos = mtod(mb, caddr_t);
1091 nfsm_fhtom(vp, v3);
1092 if (v3) {
1093 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1093 tl = nfsm_build(u_int32_t *, 5 * NFSX_UNSIGNED);
1094 txdr_hyper(uiop->uio_offset, tl);
1095 tl += 2;
1096 *tl++ = txdr_unsigned(len);
1097 *tl++ = txdr_unsigned(*iomode);
1098 *tl = txdr_unsigned(len);
1099 } else {
1100 u_int32_t x;
1101
1094 txdr_hyper(uiop->uio_offset, tl);
1095 tl += 2;
1096 *tl++ = txdr_unsigned(len);
1097 *tl++ = txdr_unsigned(*iomode);
1098 *tl = txdr_unsigned(len);
1099 } else {
1100 u_int32_t x;
1101
1102 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
1102 tl = nfsm_build(u_int32_t *, 4 * NFSX_UNSIGNED);
1103 /* Set both "begin" and "current" to non-garbage. */
1104 x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1105 *tl++ = x; /* "begin offset" */
1106 *tl++ = x; /* "current offset" */
1107 x = txdr_unsigned(len);
1108 *tl++ = x; /* total to this offset */
1109 *tl = x; /* size of this write */
1110 }
1111 nfsm_uiotom(uiop, len);
1112 nfsm_request(vp, NFSPROC_WRITE, uiop->uio_td, cred);
1113 if (v3) {
1114 wccflag = NFSV3_WCCCHK;
1115 nfsm_wcc_data(vp, wccflag);
1116 if (!error) {
1117 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED
1118 + NFSX_V3WRITEVERF);
1119 rlen = fxdr_unsigned(int, *tl++);
1120 if (rlen == 0) {
1121 error = NFSERR_IO;
1122 m_freem(mrep);
1123 break;
1124 } else if (rlen < len) {
1125 backup = len - rlen;
1126 uiop->uio_iov->iov_base -= backup;
1127 uiop->uio_iov->iov_len += backup;
1128 uiop->uio_offset -= backup;
1129 uiop->uio_resid += backup;
1130 len = rlen;
1131 }
1132 commit = fxdr_unsigned(int, *tl++);
1133
1134 /*
1135 * Return the lowest committment level
1136 * obtained by any of the RPCs.
1137 */
1138 if (committed == NFSV3WRITE_FILESYNC)
1139 committed = commit;
1140 else if (committed == NFSV3WRITE_DATASYNC &&
1141 commit == NFSV3WRITE_UNSTABLE)
1142 committed = commit;
1143 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0){
1144 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1145 NFSX_V3WRITEVERF);
1146 nmp->nm_state |= NFSSTA_HASWRITEVERF;
1147 } else if (bcmp((caddr_t)tl,
1148 (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) {
1149 *must_commit = 1;
1150 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1151 NFSX_V3WRITEVERF);
1152 }
1153 }
1154 } else
1155 nfsm_loadattr(vp, (struct vattr *)0);
1156 if (wccflag)
1157 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime.tv_sec;
1158 m_freem(mrep);
1159 if (error)
1160 break;
1161 tsiz -= len;
1162 }
1163nfsmout:
1164 if (vp->v_mount->mnt_flag & MNT_ASYNC)
1165 committed = NFSV3WRITE_FILESYNC;
1166 *iomode = committed;
1167 if (error)
1168 uiop->uio_resid = tsiz;
1169 return (error);
1170}
1171
1172/*
1173 * nfs mknod rpc
1174 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1175 * mode set to specify the file type and the size field for rdev.
1176 */
1177static int
1178nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
1179 struct vattr *vap)
1180{
1181 struct nfsv2_sattr *sp;
1182 u_int32_t *tl;
1183 struct vnode *newvp = (struct vnode *)0;
1184 struct nfsnode *np = (struct nfsnode *)0;
1185 struct vattr vattr;
1186 caddr_t bpos, dpos;
1187 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1188 struct mbuf *mreq, *mrep, *md, *mb;
1189 u_int32_t rdev;
1190 int v3 = NFS_ISV3(dvp);
1191
1192 if (vap->va_type == VCHR || vap->va_type == VBLK)
1193 rdev = txdr_unsigned(vap->va_rdev);
1194 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1195 rdev = nfs_xdrneg1;
1196 else {
1197 return (EOPNOTSUPP);
1198 }
1199 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_thread)) != 0) {
1200 return (error);
1201 }
1202 nfsstats.rpccnt[NFSPROC_MKNOD]++;
1203 mreq = nfsm_reqhead(dvp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1204 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1205 mb = mreq;
1206 bpos = mtod(mb, caddr_t);
1207 nfsm_fhtom(dvp, v3);
1208 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1209 if (v3) {
1103 /* Set both "begin" and "current" to non-garbage. */
1104 x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1105 *tl++ = x; /* "begin offset" */
1106 *tl++ = x; /* "current offset" */
1107 x = txdr_unsigned(len);
1108 *tl++ = x; /* total to this offset */
1109 *tl = x; /* size of this write */
1110 }
1111 nfsm_uiotom(uiop, len);
1112 nfsm_request(vp, NFSPROC_WRITE, uiop->uio_td, cred);
1113 if (v3) {
1114 wccflag = NFSV3_WCCCHK;
1115 nfsm_wcc_data(vp, wccflag);
1116 if (!error) {
1117 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED
1118 + NFSX_V3WRITEVERF);
1119 rlen = fxdr_unsigned(int, *tl++);
1120 if (rlen == 0) {
1121 error = NFSERR_IO;
1122 m_freem(mrep);
1123 break;
1124 } else if (rlen < len) {
1125 backup = len - rlen;
1126 uiop->uio_iov->iov_base -= backup;
1127 uiop->uio_iov->iov_len += backup;
1128 uiop->uio_offset -= backup;
1129 uiop->uio_resid += backup;
1130 len = rlen;
1131 }
1132 commit = fxdr_unsigned(int, *tl++);
1133
1134 /*
1135 * Return the lowest committment level
1136 * obtained by any of the RPCs.
1137 */
1138 if (committed == NFSV3WRITE_FILESYNC)
1139 committed = commit;
1140 else if (committed == NFSV3WRITE_DATASYNC &&
1141 commit == NFSV3WRITE_UNSTABLE)
1142 committed = commit;
1143 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0){
1144 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1145 NFSX_V3WRITEVERF);
1146 nmp->nm_state |= NFSSTA_HASWRITEVERF;
1147 } else if (bcmp((caddr_t)tl,
1148 (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) {
1149 *must_commit = 1;
1150 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1151 NFSX_V3WRITEVERF);
1152 }
1153 }
1154 } else
1155 nfsm_loadattr(vp, (struct vattr *)0);
1156 if (wccflag)
1157 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime.tv_sec;
1158 m_freem(mrep);
1159 if (error)
1160 break;
1161 tsiz -= len;
1162 }
1163nfsmout:
1164 if (vp->v_mount->mnt_flag & MNT_ASYNC)
1165 committed = NFSV3WRITE_FILESYNC;
1166 *iomode = committed;
1167 if (error)
1168 uiop->uio_resid = tsiz;
1169 return (error);
1170}
1171
1172/*
1173 * nfs mknod rpc
1174 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1175 * mode set to specify the file type and the size field for rdev.
1176 */
1177static int
1178nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
1179 struct vattr *vap)
1180{
1181 struct nfsv2_sattr *sp;
1182 u_int32_t *tl;
1183 struct vnode *newvp = (struct vnode *)0;
1184 struct nfsnode *np = (struct nfsnode *)0;
1185 struct vattr vattr;
1186 caddr_t bpos, dpos;
1187 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1188 struct mbuf *mreq, *mrep, *md, *mb;
1189 u_int32_t rdev;
1190 int v3 = NFS_ISV3(dvp);
1191
1192 if (vap->va_type == VCHR || vap->va_type == VBLK)
1193 rdev = txdr_unsigned(vap->va_rdev);
1194 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1195 rdev = nfs_xdrneg1;
1196 else {
1197 return (EOPNOTSUPP);
1198 }
1199 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_thread)) != 0) {
1200 return (error);
1201 }
1202 nfsstats.rpccnt[NFSPROC_MKNOD]++;
1203 mreq = nfsm_reqhead(dvp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1204 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1205 mb = mreq;
1206 bpos = mtod(mb, caddr_t);
1207 nfsm_fhtom(dvp, v3);
1208 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1209 if (v3) {
1210 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1210 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED);
1211 *tl++ = vtonfsv3_type(vap->va_type);
1212 nfsm_v3attrbuild(vap, FALSE);
1213 if (vap->va_type == VCHR || vap->va_type == VBLK) {
1211 *tl++ = vtonfsv3_type(vap->va_type);
1212 nfsm_v3attrbuild(vap, FALSE);
1213 if (vap->va_type == VCHR || vap->va_type == VBLK) {
1214 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1214 tl = nfsm_build(u_int32_t *, 2 * NFSX_UNSIGNED);
1215 *tl++ = txdr_unsigned(umajor(vap->va_rdev));
1216 *tl = txdr_unsigned(uminor(vap->va_rdev));
1217 }
1218 } else {
1215 *tl++ = txdr_unsigned(umajor(vap->va_rdev));
1216 *tl = txdr_unsigned(uminor(vap->va_rdev));
1217 }
1218 } else {
1219 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1219 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
1220 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1221 sp->sa_uid = nfs_xdrneg1;
1222 sp->sa_gid = nfs_xdrneg1;
1223 sp->sa_size = rdev;
1224 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1225 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1226 }
1227 nfsm_request(dvp, NFSPROC_MKNOD, cnp->cn_thread, cnp->cn_cred);
1228 if (!error) {
1229 nfsm_mtofh(dvp, newvp, v3, gotvp);
1230 if (!gotvp) {
1231 if (newvp) {
1232 vput(newvp);
1233 newvp = (struct vnode *)0;
1234 }
1235 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1236 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, &np);
1237 if (!error)
1238 newvp = NFSTOV(np);
1239 }
1240 }
1241 if (v3)
1242 nfsm_wcc_data(dvp, wccflag);
1243 m_freem(mrep);
1244nfsmout:
1245 if (error) {
1246 if (newvp)
1247 vput(newvp);
1248 } else {
1249 if (cnp->cn_flags & MAKEENTRY)
1250 cache_enter(dvp, newvp, cnp);
1251 *vpp = newvp;
1252 }
1253 VTONFS(dvp)->n_flag |= NMODIFIED;
1254 if (!wccflag)
1255 VTONFS(dvp)->n_attrstamp = 0;
1256 return (error);
1257}
1258
1259/*
1260 * nfs mknod vop
1261 * just call nfs_mknodrpc() to do the work.
1262 */
1263/* ARGSUSED */
1264static int
1265nfs_mknod(struct vop_mknod_args *ap)
1266{
1267
1268 return nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap);
1269}
1270
1271static u_long create_verf;
1272/*
1273 * nfs file create call
1274 */
1275static int
1276nfs_create(struct vop_create_args *ap)
1277{
1278 struct vnode *dvp = ap->a_dvp;
1279 struct vattr *vap = ap->a_vap;
1280 struct componentname *cnp = ap->a_cnp;
1281 struct nfsv2_sattr *sp;
1282 u_int32_t *tl;
1283 struct nfsnode *np = (struct nfsnode *)0;
1284 struct vnode *newvp = (struct vnode *)0;
1285 caddr_t bpos, dpos;
1286 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0;
1287 struct mbuf *mreq, *mrep, *md, *mb;
1288 struct vattr vattr;
1289 int v3 = NFS_ISV3(dvp);
1290
1291 /*
1292 * Oops, not for me..
1293 */
1294 if (vap->va_type == VSOCK)
1295 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1296
1297 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_thread)) != 0) {
1298 return (error);
1299 }
1300 if (vap->va_vaflags & VA_EXCLUSIVE)
1301 fmode |= O_EXCL;
1302again:
1303 nfsstats.rpccnt[NFSPROC_CREATE]++;
1304 mreq = nfsm_reqhead(dvp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1305 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1306 mb = mreq;
1307 bpos = mtod(mb, caddr_t);
1308 nfsm_fhtom(dvp, v3);
1309 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1310 if (v3) {
1220 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1221 sp->sa_uid = nfs_xdrneg1;
1222 sp->sa_gid = nfs_xdrneg1;
1223 sp->sa_size = rdev;
1224 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1225 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1226 }
1227 nfsm_request(dvp, NFSPROC_MKNOD, cnp->cn_thread, cnp->cn_cred);
1228 if (!error) {
1229 nfsm_mtofh(dvp, newvp, v3, gotvp);
1230 if (!gotvp) {
1231 if (newvp) {
1232 vput(newvp);
1233 newvp = (struct vnode *)0;
1234 }
1235 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1236 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, &np);
1237 if (!error)
1238 newvp = NFSTOV(np);
1239 }
1240 }
1241 if (v3)
1242 nfsm_wcc_data(dvp, wccflag);
1243 m_freem(mrep);
1244nfsmout:
1245 if (error) {
1246 if (newvp)
1247 vput(newvp);
1248 } else {
1249 if (cnp->cn_flags & MAKEENTRY)
1250 cache_enter(dvp, newvp, cnp);
1251 *vpp = newvp;
1252 }
1253 VTONFS(dvp)->n_flag |= NMODIFIED;
1254 if (!wccflag)
1255 VTONFS(dvp)->n_attrstamp = 0;
1256 return (error);
1257}
1258
1259/*
1260 * nfs mknod vop
1261 * just call nfs_mknodrpc() to do the work.
1262 */
1263/* ARGSUSED */
1264static int
1265nfs_mknod(struct vop_mknod_args *ap)
1266{
1267
1268 return nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap);
1269}
1270
1271static u_long create_verf;
1272/*
1273 * nfs file create call
1274 */
1275static int
1276nfs_create(struct vop_create_args *ap)
1277{
1278 struct vnode *dvp = ap->a_dvp;
1279 struct vattr *vap = ap->a_vap;
1280 struct componentname *cnp = ap->a_cnp;
1281 struct nfsv2_sattr *sp;
1282 u_int32_t *tl;
1283 struct nfsnode *np = (struct nfsnode *)0;
1284 struct vnode *newvp = (struct vnode *)0;
1285 caddr_t bpos, dpos;
1286 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0;
1287 struct mbuf *mreq, *mrep, *md, *mb;
1288 struct vattr vattr;
1289 int v3 = NFS_ISV3(dvp);
1290
1291 /*
1292 * Oops, not for me..
1293 */
1294 if (vap->va_type == VSOCK)
1295 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1296
1297 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_thread)) != 0) {
1298 return (error);
1299 }
1300 if (vap->va_vaflags & VA_EXCLUSIVE)
1301 fmode |= O_EXCL;
1302again:
1303 nfsstats.rpccnt[NFSPROC_CREATE]++;
1304 mreq = nfsm_reqhead(dvp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1305 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1306 mb = mreq;
1307 bpos = mtod(mb, caddr_t);
1308 nfsm_fhtom(dvp, v3);
1309 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1310 if (v3) {
1311 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1311 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED);
1312 if (fmode & O_EXCL) {
1313 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1312 if (fmode & O_EXCL) {
1313 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1314 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF);
1314 tl = nfsm_build(u_int32_t *, NFSX_V3CREATEVERF);
1315#ifdef INET
1316 if (!TAILQ_EMPTY(&in_ifaddrhead))
1317 *tl++ = IA_SIN(TAILQ_FIRST(&in_ifaddrhead))->sin_addr.s_addr;
1318 else
1319#endif
1320 *tl++ = create_verf;
1321 *tl = ++create_verf;
1322 } else {
1323 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED);
1324 nfsm_v3attrbuild(vap, FALSE);
1325 }
1326 } else {
1315#ifdef INET
1316 if (!TAILQ_EMPTY(&in_ifaddrhead))
1317 *tl++ = IA_SIN(TAILQ_FIRST(&in_ifaddrhead))->sin_addr.s_addr;
1318 else
1319#endif
1320 *tl++ = create_verf;
1321 *tl = ++create_verf;
1322 } else {
1323 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED);
1324 nfsm_v3attrbuild(vap, FALSE);
1325 }
1326 } else {
1327 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1327 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
1328 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1329 sp->sa_uid = nfs_xdrneg1;
1330 sp->sa_gid = nfs_xdrneg1;
1331 sp->sa_size = 0;
1332 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1333 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1334 }
1335 nfsm_request(dvp, NFSPROC_CREATE, cnp->cn_thread, cnp->cn_cred);
1336 if (!error) {
1337 nfsm_mtofh(dvp, newvp, v3, gotvp);
1338 if (!gotvp) {
1339 if (newvp) {
1340 vput(newvp);
1341 newvp = (struct vnode *)0;
1342 }
1343 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1344 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, &np);
1345 if (!error)
1346 newvp = NFSTOV(np);
1347 }
1348 }
1349 if (v3)
1350 nfsm_wcc_data(dvp, wccflag);
1351 m_freem(mrep);
1352nfsmout:
1353 if (error) {
1354 if (v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) {
1355 fmode &= ~O_EXCL;
1356 goto again;
1357 }
1358 if (newvp)
1359 vput(newvp);
1360 } else if (v3 && (fmode & O_EXCL)) {
1361 /*
1362 * We are normally called with only a partially initialized
1363 * VAP. Since the NFSv3 spec says that server may use the
1364 * file attributes to store the verifier, the spec requires
1365 * us to do a SETATTR RPC. FreeBSD servers store the verifier
1366 * in atime, but we can't really assume that all servers will
1367 * so we ensure that our SETATTR sets both atime and mtime.
1368 */
1369 if (vap->va_mtime.tv_sec == VNOVAL)
1370 vfs_timestamp(&vap->va_mtime);
1371 if (vap->va_atime.tv_sec == VNOVAL)
1372 vap->va_atime = vap->va_mtime;
1373 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_thread);
1374 }
1375 if (!error) {
1376 if (cnp->cn_flags & MAKEENTRY)
1377 cache_enter(dvp, newvp, cnp);
1378 *ap->a_vpp = newvp;
1379 }
1380 VTONFS(dvp)->n_flag |= NMODIFIED;
1381 if (!wccflag)
1382 VTONFS(dvp)->n_attrstamp = 0;
1383 return (error);
1384}
1385
1386/*
1387 * nfs file remove call
1388 * To try and make nfs semantics closer to ufs semantics, a file that has
1389 * other processes using the vnode is renamed instead of removed and then
1390 * removed later on the last close.
1391 * - If v_usecount > 1
1392 * If a rename is not already in the works
1393 * call nfs_sillyrename() to set it up
1394 * else
1395 * do the remove rpc
1396 */
1397static int
1398nfs_remove(struct vop_remove_args *ap)
1399{
1400 struct vnode *vp = ap->a_vp;
1401 struct vnode *dvp = ap->a_dvp;
1402 struct componentname *cnp = ap->a_cnp;
1403 struct nfsnode *np = VTONFS(vp);
1404 int error = 0;
1405 struct vattr vattr;
1406
1407#ifndef DIAGNOSTIC
1408 if ((cnp->cn_flags & HASBUF) == 0)
1409 panic("nfs_remove: no name");
1410 if (vp->v_usecount < 1)
1411 panic("nfs_remove: bad v_usecount");
1412#endif
1413 if (vp->v_type == VDIR)
1414 error = EPERM;
1415 else if (vp->v_usecount == 1 || (np->n_sillyrename &&
1416 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_thread) == 0 &&
1417 vattr.va_nlink > 1)) {
1418 /*
1419 * Purge the name cache so that the chance of a lookup for
1420 * the name succeeding while the remove is in progress is
1421 * minimized. Without node locking it can still happen, such
1422 * that an I/O op returns ESTALE, but since you get this if
1423 * another host removes the file..
1424 */
1425 cache_purge(vp);
1426 /*
1427 * throw away biocache buffers, mainly to avoid
1428 * unnecessary delayed writes later.
1429 */
1430 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_thread, 1);
1431 /* Do the rpc */
1432 if (error != EINTR)
1433 error = nfs_removerpc(dvp, cnp->cn_nameptr,
1434 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread);
1435 /*
1436 * Kludge City: If the first reply to the remove rpc is lost..
1437 * the reply to the retransmitted request will be ENOENT
1438 * since the file was in fact removed
1439 * Therefore, we cheat and return success.
1440 */
1441 if (error == ENOENT)
1442 error = 0;
1443 } else if (!np->n_sillyrename)
1444 error = nfs_sillyrename(dvp, vp, cnp);
1445 np->n_attrstamp = 0;
1446 return (error);
1447}
1448
1449/*
1450 * nfs file remove rpc called from nfs_inactive
1451 */
1452int
1453nfs_removeit(struct sillyrename *sp)
1454{
1455
1456 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
1457 (struct thread *)0));
1458}
1459
1460/*
1461 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1462 */
1463static int
1464nfs_removerpc(struct vnode *dvp, const char *name, int namelen,
1465 struct ucred *cred, struct thread *td)
1466{
1467 u_int32_t *tl;
1468 caddr_t bpos, dpos;
1469 int error = 0, wccflag = NFSV3_WCCRATTR;
1470 struct mbuf *mreq, *mrep, *md, *mb;
1471 int v3 = NFS_ISV3(dvp);
1472
1473 nfsstats.rpccnt[NFSPROC_REMOVE]++;
1474 mreq = nfsm_reqhead(dvp, NFSPROC_REMOVE,
1475 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1476 mb = mreq;
1477 bpos = mtod(mb, caddr_t);
1478 nfsm_fhtom(dvp, v3);
1479 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
1480 nfsm_request(dvp, NFSPROC_REMOVE, td, cred);
1481 if (v3)
1482 nfsm_wcc_data(dvp, wccflag);
1483 m_freem(mrep);
1484nfsmout:
1485 VTONFS(dvp)->n_flag |= NMODIFIED;
1486 if (!wccflag)
1487 VTONFS(dvp)->n_attrstamp = 0;
1488 return (error);
1489}
1490
1491/*
1492 * nfs file rename call
1493 */
1494static int
1495nfs_rename(struct vop_rename_args *ap)
1496{
1497 struct vnode *fvp = ap->a_fvp;
1498 struct vnode *tvp = ap->a_tvp;
1499 struct vnode *fdvp = ap->a_fdvp;
1500 struct vnode *tdvp = ap->a_tdvp;
1501 struct componentname *tcnp = ap->a_tcnp;
1502 struct componentname *fcnp = ap->a_fcnp;
1503 int error;
1504
1505#ifndef DIAGNOSTIC
1506 if ((tcnp->cn_flags & HASBUF) == 0 ||
1507 (fcnp->cn_flags & HASBUF) == 0)
1508 panic("nfs_rename: no name");
1509#endif
1510 /* Check for cross-device rename */
1511 if ((fvp->v_mount != tdvp->v_mount) ||
1512 (tvp && (fvp->v_mount != tvp->v_mount))) {
1513 error = EXDEV;
1514 goto out;
1515 }
1516
1517 /*
1518 * We have to flush B_DELWRI data prior to renaming
1519 * the file. If we don't, the delayed-write buffers
1520 * can be flushed out later after the file has gone stale
1521 * under NFSV3. NFSV2 does not have this problem because
1522 * ( as far as I can tell ) it flushes dirty buffers more
1523 * often.
1524 */
1525
1526 VOP_FSYNC(fvp, fcnp->cn_cred, MNT_WAIT, fcnp->cn_thread);
1527 if (tvp)
1528 VOP_FSYNC(tvp, tcnp->cn_cred, MNT_WAIT, tcnp->cn_thread);
1529
1530 /*
1531 * If the tvp exists and is in use, sillyrename it before doing the
1532 * rename of the new file over it.
1533 * XXX Can't sillyrename a directory.
1534 */
1535 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename &&
1536 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
1537 vput(tvp);
1538 tvp = NULL;
1539 }
1540
1541 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1542 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1543 tcnp->cn_thread);
1544
1545 if (fvp->v_type == VDIR) {
1546 if (tvp != NULL && tvp->v_type == VDIR)
1547 cache_purge(tdvp);
1548 cache_purge(fdvp);
1549 }
1550
1551out:
1552 if (tdvp == tvp)
1553 vrele(tdvp);
1554 else
1555 vput(tdvp);
1556 if (tvp)
1557 vput(tvp);
1558 vrele(fdvp);
1559 vrele(fvp);
1560 /*
1561 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1562 */
1563 if (error == ENOENT)
1564 error = 0;
1565 return (error);
1566}
1567
1568/*
1569 * nfs file rename rpc called from nfs_remove() above
1570 */
1571static int
1572nfs_renameit(struct vnode *sdvp, struct componentname *scnp,
1573 struct sillyrename *sp)
1574{
1575
1576 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen, sdvp,
1577 sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_thread));
1578}
1579
1580/*
1581 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1582 */
1583static int
1584nfs_renamerpc(struct vnode *fdvp, const char *fnameptr, int fnamelen,
1585 struct vnode *tdvp, const char *tnameptr, int tnamelen, struct ucred *cred,
1586 struct thread *td)
1587{
1588 u_int32_t *tl;
1589 caddr_t bpos, dpos;
1590 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
1591 struct mbuf *mreq, *mrep, *md, *mb;
1592 int v3 = NFS_ISV3(fdvp);
1593
1594 nfsstats.rpccnt[NFSPROC_RENAME]++;
1595 mreq = nfsm_reqhead(fdvp, NFSPROC_RENAME,
1596 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
1597 nfsm_rndup(tnamelen));
1598 mb = mreq;
1599 bpos = mtod(mb, caddr_t);
1600 nfsm_fhtom(fdvp, v3);
1601 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
1602 nfsm_fhtom(tdvp, v3);
1603 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
1604 nfsm_request(fdvp, NFSPROC_RENAME, td, cred);
1605 if (v3) {
1606 nfsm_wcc_data(fdvp, fwccflag);
1607 nfsm_wcc_data(tdvp, twccflag);
1608 }
1609 m_freem(mrep);
1610nfsmout:
1611 VTONFS(fdvp)->n_flag |= NMODIFIED;
1612 VTONFS(tdvp)->n_flag |= NMODIFIED;
1613 if (!fwccflag)
1614 VTONFS(fdvp)->n_attrstamp = 0;
1615 if (!twccflag)
1616 VTONFS(tdvp)->n_attrstamp = 0;
1617 return (error);
1618}
1619
1620/*
1621 * nfs hard link create call
1622 */
1623static int
1624nfs_link(struct vop_link_args *ap)
1625{
1626 struct vnode *vp = ap->a_vp;
1627 struct vnode *tdvp = ap->a_tdvp;
1628 struct componentname *cnp = ap->a_cnp;
1629 u_int32_t *tl;
1630 caddr_t bpos, dpos;
1631 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
1632 struct mbuf *mreq, *mrep, *md, *mb;
1633 int v3;
1634
1635 if (vp->v_mount != tdvp->v_mount) {
1636 return (EXDEV);
1637 }
1638
1639 /*
1640 * Push all writes to the server, so that the attribute cache
1641 * doesn't get "out of sync" with the server.
1642 * XXX There should be a better way!
1643 */
1644 VOP_FSYNC(vp, cnp->cn_cred, MNT_WAIT, cnp->cn_thread);
1645
1646 v3 = NFS_ISV3(vp);
1647 nfsstats.rpccnt[NFSPROC_LINK]++;
1648 mreq = nfsm_reqhead(vp, NFSPROC_LINK,
1649 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
1650 mb = mreq;
1651 bpos = mtod(mb, caddr_t);
1652 nfsm_fhtom(vp, v3);
1653 nfsm_fhtom(tdvp, v3);
1654 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1655 nfsm_request(vp, NFSPROC_LINK, cnp->cn_thread, cnp->cn_cred);
1656 if (v3) {
1657 nfsm_postop_attr(vp, attrflag);
1658 nfsm_wcc_data(tdvp, wccflag);
1659 }
1660 m_freem(mrep);
1661nfsmout:
1662 VTONFS(tdvp)->n_flag |= NMODIFIED;
1663 if (!attrflag)
1664 VTONFS(vp)->n_attrstamp = 0;
1665 if (!wccflag)
1666 VTONFS(tdvp)->n_attrstamp = 0;
1667 /*
1668 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
1669 */
1670 if (error == EEXIST)
1671 error = 0;
1672 return (error);
1673}
1674
1675/*
1676 * nfs symbolic link create call
1677 */
1678static int
1679nfs_symlink(struct vop_symlink_args *ap)
1680{
1681 struct vnode *dvp = ap->a_dvp;
1682 struct vattr *vap = ap->a_vap;
1683 struct componentname *cnp = ap->a_cnp;
1684 struct nfsv2_sattr *sp;
1685 u_int32_t *tl;
1686 caddr_t bpos, dpos;
1687 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
1688 struct mbuf *mreq, *mrep, *md, *mb;
1689 struct vnode *newvp = (struct vnode *)0;
1690 int v3 = NFS_ISV3(dvp);
1691
1692 nfsstats.rpccnt[NFSPROC_SYMLINK]++;
1693 slen = strlen(ap->a_target);
1694 mreq = nfsm_reqhead(dvp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
1695 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
1696 mb = mreq;
1697 bpos = mtod(mb, caddr_t);
1698 nfsm_fhtom(dvp, v3);
1699 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1700 if (v3) {
1701 nfsm_v3attrbuild(vap, FALSE);
1702 }
1703 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
1704 if (!v3) {
1328 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1329 sp->sa_uid = nfs_xdrneg1;
1330 sp->sa_gid = nfs_xdrneg1;
1331 sp->sa_size = 0;
1332 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1333 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1334 }
1335 nfsm_request(dvp, NFSPROC_CREATE, cnp->cn_thread, cnp->cn_cred);
1336 if (!error) {
1337 nfsm_mtofh(dvp, newvp, v3, gotvp);
1338 if (!gotvp) {
1339 if (newvp) {
1340 vput(newvp);
1341 newvp = (struct vnode *)0;
1342 }
1343 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1344 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, &np);
1345 if (!error)
1346 newvp = NFSTOV(np);
1347 }
1348 }
1349 if (v3)
1350 nfsm_wcc_data(dvp, wccflag);
1351 m_freem(mrep);
1352nfsmout:
1353 if (error) {
1354 if (v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) {
1355 fmode &= ~O_EXCL;
1356 goto again;
1357 }
1358 if (newvp)
1359 vput(newvp);
1360 } else if (v3 && (fmode & O_EXCL)) {
1361 /*
1362 * We are normally called with only a partially initialized
1363 * VAP. Since the NFSv3 spec says that server may use the
1364 * file attributes to store the verifier, the spec requires
1365 * us to do a SETATTR RPC. FreeBSD servers store the verifier
1366 * in atime, but we can't really assume that all servers will
1367 * so we ensure that our SETATTR sets both atime and mtime.
1368 */
1369 if (vap->va_mtime.tv_sec == VNOVAL)
1370 vfs_timestamp(&vap->va_mtime);
1371 if (vap->va_atime.tv_sec == VNOVAL)
1372 vap->va_atime = vap->va_mtime;
1373 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_thread);
1374 }
1375 if (!error) {
1376 if (cnp->cn_flags & MAKEENTRY)
1377 cache_enter(dvp, newvp, cnp);
1378 *ap->a_vpp = newvp;
1379 }
1380 VTONFS(dvp)->n_flag |= NMODIFIED;
1381 if (!wccflag)
1382 VTONFS(dvp)->n_attrstamp = 0;
1383 return (error);
1384}
1385
1386/*
1387 * nfs file remove call
1388 * To try and make nfs semantics closer to ufs semantics, a file that has
1389 * other processes using the vnode is renamed instead of removed and then
1390 * removed later on the last close.
1391 * - If v_usecount > 1
1392 * If a rename is not already in the works
1393 * call nfs_sillyrename() to set it up
1394 * else
1395 * do the remove rpc
1396 */
1397static int
1398nfs_remove(struct vop_remove_args *ap)
1399{
1400 struct vnode *vp = ap->a_vp;
1401 struct vnode *dvp = ap->a_dvp;
1402 struct componentname *cnp = ap->a_cnp;
1403 struct nfsnode *np = VTONFS(vp);
1404 int error = 0;
1405 struct vattr vattr;
1406
1407#ifndef DIAGNOSTIC
1408 if ((cnp->cn_flags & HASBUF) == 0)
1409 panic("nfs_remove: no name");
1410 if (vp->v_usecount < 1)
1411 panic("nfs_remove: bad v_usecount");
1412#endif
1413 if (vp->v_type == VDIR)
1414 error = EPERM;
1415 else if (vp->v_usecount == 1 || (np->n_sillyrename &&
1416 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_thread) == 0 &&
1417 vattr.va_nlink > 1)) {
1418 /*
1419 * Purge the name cache so that the chance of a lookup for
1420 * the name succeeding while the remove is in progress is
1421 * minimized. Without node locking it can still happen, such
1422 * that an I/O op returns ESTALE, but since you get this if
1423 * another host removes the file..
1424 */
1425 cache_purge(vp);
1426 /*
1427 * throw away biocache buffers, mainly to avoid
1428 * unnecessary delayed writes later.
1429 */
1430 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_thread, 1);
1431 /* Do the rpc */
1432 if (error != EINTR)
1433 error = nfs_removerpc(dvp, cnp->cn_nameptr,
1434 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread);
1435 /*
1436 * Kludge City: If the first reply to the remove rpc is lost..
1437 * the reply to the retransmitted request will be ENOENT
1438 * since the file was in fact removed
1439 * Therefore, we cheat and return success.
1440 */
1441 if (error == ENOENT)
1442 error = 0;
1443 } else if (!np->n_sillyrename)
1444 error = nfs_sillyrename(dvp, vp, cnp);
1445 np->n_attrstamp = 0;
1446 return (error);
1447}
1448
1449/*
1450 * nfs file remove rpc called from nfs_inactive
1451 */
1452int
1453nfs_removeit(struct sillyrename *sp)
1454{
1455
1456 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
1457 (struct thread *)0));
1458}
1459
1460/*
1461 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1462 */
1463static int
1464nfs_removerpc(struct vnode *dvp, const char *name, int namelen,
1465 struct ucred *cred, struct thread *td)
1466{
1467 u_int32_t *tl;
1468 caddr_t bpos, dpos;
1469 int error = 0, wccflag = NFSV3_WCCRATTR;
1470 struct mbuf *mreq, *mrep, *md, *mb;
1471 int v3 = NFS_ISV3(dvp);
1472
1473 nfsstats.rpccnt[NFSPROC_REMOVE]++;
1474 mreq = nfsm_reqhead(dvp, NFSPROC_REMOVE,
1475 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1476 mb = mreq;
1477 bpos = mtod(mb, caddr_t);
1478 nfsm_fhtom(dvp, v3);
1479 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
1480 nfsm_request(dvp, NFSPROC_REMOVE, td, cred);
1481 if (v3)
1482 nfsm_wcc_data(dvp, wccflag);
1483 m_freem(mrep);
1484nfsmout:
1485 VTONFS(dvp)->n_flag |= NMODIFIED;
1486 if (!wccflag)
1487 VTONFS(dvp)->n_attrstamp = 0;
1488 return (error);
1489}
1490
1491/*
1492 * nfs file rename call
1493 */
1494static int
1495nfs_rename(struct vop_rename_args *ap)
1496{
1497 struct vnode *fvp = ap->a_fvp;
1498 struct vnode *tvp = ap->a_tvp;
1499 struct vnode *fdvp = ap->a_fdvp;
1500 struct vnode *tdvp = ap->a_tdvp;
1501 struct componentname *tcnp = ap->a_tcnp;
1502 struct componentname *fcnp = ap->a_fcnp;
1503 int error;
1504
1505#ifndef DIAGNOSTIC
1506 if ((tcnp->cn_flags & HASBUF) == 0 ||
1507 (fcnp->cn_flags & HASBUF) == 0)
1508 panic("nfs_rename: no name");
1509#endif
1510 /* Check for cross-device rename */
1511 if ((fvp->v_mount != tdvp->v_mount) ||
1512 (tvp && (fvp->v_mount != tvp->v_mount))) {
1513 error = EXDEV;
1514 goto out;
1515 }
1516
1517 /*
1518 * We have to flush B_DELWRI data prior to renaming
1519 * the file. If we don't, the delayed-write buffers
1520 * can be flushed out later after the file has gone stale
1521 * under NFSV3. NFSV2 does not have this problem because
1522 * ( as far as I can tell ) it flushes dirty buffers more
1523 * often.
1524 */
1525
1526 VOP_FSYNC(fvp, fcnp->cn_cred, MNT_WAIT, fcnp->cn_thread);
1527 if (tvp)
1528 VOP_FSYNC(tvp, tcnp->cn_cred, MNT_WAIT, tcnp->cn_thread);
1529
1530 /*
1531 * If the tvp exists and is in use, sillyrename it before doing the
1532 * rename of the new file over it.
1533 * XXX Can't sillyrename a directory.
1534 */
1535 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename &&
1536 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
1537 vput(tvp);
1538 tvp = NULL;
1539 }
1540
1541 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1542 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1543 tcnp->cn_thread);
1544
1545 if (fvp->v_type == VDIR) {
1546 if (tvp != NULL && tvp->v_type == VDIR)
1547 cache_purge(tdvp);
1548 cache_purge(fdvp);
1549 }
1550
1551out:
1552 if (tdvp == tvp)
1553 vrele(tdvp);
1554 else
1555 vput(tdvp);
1556 if (tvp)
1557 vput(tvp);
1558 vrele(fdvp);
1559 vrele(fvp);
1560 /*
1561 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1562 */
1563 if (error == ENOENT)
1564 error = 0;
1565 return (error);
1566}
1567
1568/*
1569 * nfs file rename rpc called from nfs_remove() above
1570 */
1571static int
1572nfs_renameit(struct vnode *sdvp, struct componentname *scnp,
1573 struct sillyrename *sp)
1574{
1575
1576 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen, sdvp,
1577 sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_thread));
1578}
1579
1580/*
1581 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1582 */
1583static int
1584nfs_renamerpc(struct vnode *fdvp, const char *fnameptr, int fnamelen,
1585 struct vnode *tdvp, const char *tnameptr, int tnamelen, struct ucred *cred,
1586 struct thread *td)
1587{
1588 u_int32_t *tl;
1589 caddr_t bpos, dpos;
1590 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
1591 struct mbuf *mreq, *mrep, *md, *mb;
1592 int v3 = NFS_ISV3(fdvp);
1593
1594 nfsstats.rpccnt[NFSPROC_RENAME]++;
1595 mreq = nfsm_reqhead(fdvp, NFSPROC_RENAME,
1596 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
1597 nfsm_rndup(tnamelen));
1598 mb = mreq;
1599 bpos = mtod(mb, caddr_t);
1600 nfsm_fhtom(fdvp, v3);
1601 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
1602 nfsm_fhtom(tdvp, v3);
1603 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
1604 nfsm_request(fdvp, NFSPROC_RENAME, td, cred);
1605 if (v3) {
1606 nfsm_wcc_data(fdvp, fwccflag);
1607 nfsm_wcc_data(tdvp, twccflag);
1608 }
1609 m_freem(mrep);
1610nfsmout:
1611 VTONFS(fdvp)->n_flag |= NMODIFIED;
1612 VTONFS(tdvp)->n_flag |= NMODIFIED;
1613 if (!fwccflag)
1614 VTONFS(fdvp)->n_attrstamp = 0;
1615 if (!twccflag)
1616 VTONFS(tdvp)->n_attrstamp = 0;
1617 return (error);
1618}
1619
1620/*
1621 * nfs hard link create call
1622 */
1623static int
1624nfs_link(struct vop_link_args *ap)
1625{
1626 struct vnode *vp = ap->a_vp;
1627 struct vnode *tdvp = ap->a_tdvp;
1628 struct componentname *cnp = ap->a_cnp;
1629 u_int32_t *tl;
1630 caddr_t bpos, dpos;
1631 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
1632 struct mbuf *mreq, *mrep, *md, *mb;
1633 int v3;
1634
1635 if (vp->v_mount != tdvp->v_mount) {
1636 return (EXDEV);
1637 }
1638
1639 /*
1640 * Push all writes to the server, so that the attribute cache
1641 * doesn't get "out of sync" with the server.
1642 * XXX There should be a better way!
1643 */
1644 VOP_FSYNC(vp, cnp->cn_cred, MNT_WAIT, cnp->cn_thread);
1645
1646 v3 = NFS_ISV3(vp);
1647 nfsstats.rpccnt[NFSPROC_LINK]++;
1648 mreq = nfsm_reqhead(vp, NFSPROC_LINK,
1649 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
1650 mb = mreq;
1651 bpos = mtod(mb, caddr_t);
1652 nfsm_fhtom(vp, v3);
1653 nfsm_fhtom(tdvp, v3);
1654 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1655 nfsm_request(vp, NFSPROC_LINK, cnp->cn_thread, cnp->cn_cred);
1656 if (v3) {
1657 nfsm_postop_attr(vp, attrflag);
1658 nfsm_wcc_data(tdvp, wccflag);
1659 }
1660 m_freem(mrep);
1661nfsmout:
1662 VTONFS(tdvp)->n_flag |= NMODIFIED;
1663 if (!attrflag)
1664 VTONFS(vp)->n_attrstamp = 0;
1665 if (!wccflag)
1666 VTONFS(tdvp)->n_attrstamp = 0;
1667 /*
1668 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
1669 */
1670 if (error == EEXIST)
1671 error = 0;
1672 return (error);
1673}
1674
1675/*
1676 * nfs symbolic link create call
1677 */
1678static int
1679nfs_symlink(struct vop_symlink_args *ap)
1680{
1681 struct vnode *dvp = ap->a_dvp;
1682 struct vattr *vap = ap->a_vap;
1683 struct componentname *cnp = ap->a_cnp;
1684 struct nfsv2_sattr *sp;
1685 u_int32_t *tl;
1686 caddr_t bpos, dpos;
1687 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
1688 struct mbuf *mreq, *mrep, *md, *mb;
1689 struct vnode *newvp = (struct vnode *)0;
1690 int v3 = NFS_ISV3(dvp);
1691
1692 nfsstats.rpccnt[NFSPROC_SYMLINK]++;
1693 slen = strlen(ap->a_target);
1694 mreq = nfsm_reqhead(dvp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
1695 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
1696 mb = mreq;
1697 bpos = mtod(mb, caddr_t);
1698 nfsm_fhtom(dvp, v3);
1699 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1700 if (v3) {
1701 nfsm_v3attrbuild(vap, FALSE);
1702 }
1703 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
1704 if (!v3) {
1705 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1705 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
1706 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
1707 sp->sa_uid = nfs_xdrneg1;
1708 sp->sa_gid = nfs_xdrneg1;
1709 sp->sa_size = nfs_xdrneg1;
1710 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1711 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1712 }
1713
1714 /*
1715 * Issue the NFS request and get the rpc response.
1716 *
1717 * Only NFSv3 responses returning an error of 0 actually return
1718 * a file handle that can be converted into newvp without having
1719 * to do an extra lookup rpc.
1720 */
1721 nfsm_request(dvp, NFSPROC_SYMLINK, cnp->cn_thread, cnp->cn_cred);
1722 if (v3) {
1723 if (error == 0)
1724 nfsm_mtofh(dvp, newvp, v3, gotvp);
1725 nfsm_wcc_data(dvp, wccflag);
1726 }
1727
1728 /*
1729 * out code jumps -> here, mrep is also freed.
1730 */
1731
1732 m_freem(mrep);
1733nfsmout:
1734
1735 /*
1736 * If we get an EEXIST error, silently convert it to no-error
1737 * in case of an NFS retry.
1738 */
1739 if (error == EEXIST)
1740 error = 0;
1741
1742 /*
1743 * If we do not have (or no longer have) an error, and we could
1744 * not extract the newvp from the response due to the request being
1745 * NFSv2 or the error being EEXIST. We have to do a lookup in order
1746 * to obtain a newvp to return.
1747 */
1748 if (error == 0 && newvp == NULL) {
1749 struct nfsnode *np = NULL;
1750
1751 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
1752 cnp->cn_cred, cnp->cn_thread, &np);
1753 if (!error)
1754 newvp = NFSTOV(np);
1755 }
1756 if (error) {
1757 if (newvp)
1758 vput(newvp);
1759 } else {
1760 *ap->a_vpp = newvp;
1761 }
1762 VTONFS(dvp)->n_flag |= NMODIFIED;
1763 if (!wccflag)
1764 VTONFS(dvp)->n_attrstamp = 0;
1765 return (error);
1766}
1767
1768/*
1769 * nfs make dir call
1770 */
1771static int
1772nfs_mkdir(struct vop_mkdir_args *ap)
1773{
1774 struct vnode *dvp = ap->a_dvp;
1775 struct vattr *vap = ap->a_vap;
1776 struct componentname *cnp = ap->a_cnp;
1777 struct nfsv2_sattr *sp;
1778 u_int32_t *tl;
1779 int len;
1780 struct nfsnode *np = (struct nfsnode *)0;
1781 struct vnode *newvp = (struct vnode *)0;
1782 caddr_t bpos, dpos;
1783 int error = 0, wccflag = NFSV3_WCCRATTR;
1784 int gotvp = 0;
1785 struct mbuf *mreq, *mrep, *md, *mb;
1786 struct vattr vattr;
1787 int v3 = NFS_ISV3(dvp);
1788
1789 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_thread)) != 0) {
1790 return (error);
1791 }
1792 len = cnp->cn_namelen;
1793 nfsstats.rpccnt[NFSPROC_MKDIR]++;
1794 mreq = nfsm_reqhead(dvp, NFSPROC_MKDIR,
1795 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
1796 mb = mreq;
1797 bpos = mtod(mb, caddr_t);
1798 nfsm_fhtom(dvp, v3);
1799 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
1800 if (v3) {
1801 nfsm_v3attrbuild(vap, FALSE);
1802 } else {
1706 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
1707 sp->sa_uid = nfs_xdrneg1;
1708 sp->sa_gid = nfs_xdrneg1;
1709 sp->sa_size = nfs_xdrneg1;
1710 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1711 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1712 }
1713
1714 /*
1715 * Issue the NFS request and get the rpc response.
1716 *
1717 * Only NFSv3 responses returning an error of 0 actually return
1718 * a file handle that can be converted into newvp without having
1719 * to do an extra lookup rpc.
1720 */
1721 nfsm_request(dvp, NFSPROC_SYMLINK, cnp->cn_thread, cnp->cn_cred);
1722 if (v3) {
1723 if (error == 0)
1724 nfsm_mtofh(dvp, newvp, v3, gotvp);
1725 nfsm_wcc_data(dvp, wccflag);
1726 }
1727
1728 /*
1729 * out code jumps -> here, mrep is also freed.
1730 */
1731
1732 m_freem(mrep);
1733nfsmout:
1734
1735 /*
1736 * If we get an EEXIST error, silently convert it to no-error
1737 * in case of an NFS retry.
1738 */
1739 if (error == EEXIST)
1740 error = 0;
1741
1742 /*
1743 * If we do not have (or no longer have) an error, and we could
1744 * not extract the newvp from the response due to the request being
1745 * NFSv2 or the error being EEXIST. We have to do a lookup in order
1746 * to obtain a newvp to return.
1747 */
1748 if (error == 0 && newvp == NULL) {
1749 struct nfsnode *np = NULL;
1750
1751 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
1752 cnp->cn_cred, cnp->cn_thread, &np);
1753 if (!error)
1754 newvp = NFSTOV(np);
1755 }
1756 if (error) {
1757 if (newvp)
1758 vput(newvp);
1759 } else {
1760 *ap->a_vpp = newvp;
1761 }
1762 VTONFS(dvp)->n_flag |= NMODIFIED;
1763 if (!wccflag)
1764 VTONFS(dvp)->n_attrstamp = 0;
1765 return (error);
1766}
1767
1768/*
1769 * nfs make dir call
1770 */
1771static int
1772nfs_mkdir(struct vop_mkdir_args *ap)
1773{
1774 struct vnode *dvp = ap->a_dvp;
1775 struct vattr *vap = ap->a_vap;
1776 struct componentname *cnp = ap->a_cnp;
1777 struct nfsv2_sattr *sp;
1778 u_int32_t *tl;
1779 int len;
1780 struct nfsnode *np = (struct nfsnode *)0;
1781 struct vnode *newvp = (struct vnode *)0;
1782 caddr_t bpos, dpos;
1783 int error = 0, wccflag = NFSV3_WCCRATTR;
1784 int gotvp = 0;
1785 struct mbuf *mreq, *mrep, *md, *mb;
1786 struct vattr vattr;
1787 int v3 = NFS_ISV3(dvp);
1788
1789 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_thread)) != 0) {
1790 return (error);
1791 }
1792 len = cnp->cn_namelen;
1793 nfsstats.rpccnt[NFSPROC_MKDIR]++;
1794 mreq = nfsm_reqhead(dvp, NFSPROC_MKDIR,
1795 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
1796 mb = mreq;
1797 bpos = mtod(mb, caddr_t);
1798 nfsm_fhtom(dvp, v3);
1799 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
1800 if (v3) {
1801 nfsm_v3attrbuild(vap, FALSE);
1802 } else {
1803 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1803 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
1804 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
1805 sp->sa_uid = nfs_xdrneg1;
1806 sp->sa_gid = nfs_xdrneg1;
1807 sp->sa_size = nfs_xdrneg1;
1808 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1809 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1810 }
1811 nfsm_request(dvp, NFSPROC_MKDIR, cnp->cn_thread, cnp->cn_cred);
1812 if (!error)
1813 nfsm_mtofh(dvp, newvp, v3, gotvp);
1814 if (v3)
1815 nfsm_wcc_data(dvp, wccflag);
1816 m_freem(mrep);
1817nfsmout:
1818 VTONFS(dvp)->n_flag |= NMODIFIED;
1819 if (!wccflag)
1820 VTONFS(dvp)->n_attrstamp = 0;
1821 /*
1822 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
1823 * if we can succeed in looking up the directory.
1824 */
1825 if (error == EEXIST || (!error && !gotvp)) {
1826 if (newvp) {
1827 vrele(newvp);
1828 newvp = (struct vnode *)0;
1829 }
1830 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
1831 cnp->cn_thread, &np);
1832 if (!error) {
1833 newvp = NFSTOV(np);
1834 if (newvp->v_type != VDIR)
1835 error = EEXIST;
1836 }
1837 }
1838 if (error) {
1839 if (newvp)
1840 vrele(newvp);
1841 } else
1842 *ap->a_vpp = newvp;
1843 return (error);
1844}
1845
1846/*
1847 * nfs remove directory call
1848 */
1849static int
1850nfs_rmdir(struct vop_rmdir_args *ap)
1851{
1852 struct vnode *vp = ap->a_vp;
1853 struct vnode *dvp = ap->a_dvp;
1854 struct componentname *cnp = ap->a_cnp;
1855 u_int32_t *tl;
1856 caddr_t bpos, dpos;
1857 int error = 0, wccflag = NFSV3_WCCRATTR;
1858 struct mbuf *mreq, *mrep, *md, *mb;
1859 int v3 = NFS_ISV3(dvp);
1860
1861 if (dvp == vp)
1862 return (EINVAL);
1863 nfsstats.rpccnt[NFSPROC_RMDIR]++;
1864 mreq = nfsm_reqhead(dvp, NFSPROC_RMDIR,
1865 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
1866 mb = mreq;
1867 bpos = mtod(mb, caddr_t);
1868 nfsm_fhtom(dvp, v3);
1869 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1870 nfsm_request(dvp, NFSPROC_RMDIR, cnp->cn_thread, cnp->cn_cred);
1871 if (v3)
1872 nfsm_wcc_data(dvp, wccflag);
1873 m_freem(mrep);
1874nfsmout:
1875 VTONFS(dvp)->n_flag |= NMODIFIED;
1876 if (!wccflag)
1877 VTONFS(dvp)->n_attrstamp = 0;
1878 cache_purge(dvp);
1879 cache_purge(vp);
1880 /*
1881 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
1882 */
1883 if (error == ENOENT)
1884 error = 0;
1885 return (error);
1886}
1887
1888/*
1889 * nfs readdir call
1890 */
1891static int
1892nfs_readdir(struct vop_readdir_args *ap)
1893{
1894 struct vnode *vp = ap->a_vp;
1895 struct nfsnode *np = VTONFS(vp);
1896 struct uio *uio = ap->a_uio;
1897 int tresid, error;
1898 struct vattr vattr;
1899
1900 if (vp->v_type != VDIR)
1901 return (EPERM);
1902 /*
1903 * First, check for hit on the EOF offset cache
1904 */
1905 if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset &&
1906 (np->n_flag & NMODIFIED) == 0) {
1907 if (VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_td) == 0 &&
1908 np->n_mtime == vattr.va_mtime.tv_sec) {
1909 nfsstats.direofcache_hits++;
1910 return (0);
1911 }
1912 }
1913
1914 /*
1915 * Call nfs_bioread() to do the real work.
1916 */
1917 tresid = uio->uio_resid;
1918 error = nfs_bioread(vp, uio, 0, ap->a_cred);
1919
1920 if (!error && uio->uio_resid == tresid)
1921 nfsstats.direofcache_misses++;
1922 return (error);
1923}
1924
1925/*
1926 * Readdir rpc call.
1927 * Called from below the buffer cache by nfs_doio().
1928 */
1929int
1930nfs_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
1931{
1932 int len, left;
1933 struct dirent *dp = NULL;
1934 u_int32_t *tl;
1935 caddr_t cp;
1936 nfsuint64 *cookiep;
1937 caddr_t bpos, dpos;
1938 struct mbuf *mreq, *mrep, *md, *mb;
1939 nfsuint64 cookie;
1940 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1941 struct nfsnode *dnp = VTONFS(vp);
1942 u_quad_t fileno;
1943 int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1;
1944 int attrflag;
1945 int v3 = NFS_ISV3(vp);
1946
1947#ifndef DIAGNOSTIC
1948 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
1949 (uiop->uio_resid & (DIRBLKSIZ - 1)))
1950 panic("nfs readdirrpc bad uio");
1951#endif
1952
1953 /*
1954 * If there is no cookie, assume directory was stale.
1955 */
1956 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
1957 if (cookiep)
1958 cookie = *cookiep;
1959 else
1960 return (NFSERR_BAD_COOKIE);
1961 /*
1962 * Loop around doing readdir rpc's of size nm_readdirsize
1963 * truncated to a multiple of DIRBLKSIZ.
1964 * The stopping criteria is EOF or buffer full.
1965 */
1966 while (more_dirs && bigenough) {
1967 nfsstats.rpccnt[NFSPROC_READDIR]++;
1968 mreq = nfsm_reqhead(vp, NFSPROC_READDIR, NFSX_FH(v3) +
1969 NFSX_READDIR(v3));
1970 mb = mreq;
1971 bpos = mtod(mb, caddr_t);
1972 nfsm_fhtom(vp, v3);
1973 if (v3) {
1804 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
1805 sp->sa_uid = nfs_xdrneg1;
1806 sp->sa_gid = nfs_xdrneg1;
1807 sp->sa_size = nfs_xdrneg1;
1808 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1809 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1810 }
1811 nfsm_request(dvp, NFSPROC_MKDIR, cnp->cn_thread, cnp->cn_cred);
1812 if (!error)
1813 nfsm_mtofh(dvp, newvp, v3, gotvp);
1814 if (v3)
1815 nfsm_wcc_data(dvp, wccflag);
1816 m_freem(mrep);
1817nfsmout:
1818 VTONFS(dvp)->n_flag |= NMODIFIED;
1819 if (!wccflag)
1820 VTONFS(dvp)->n_attrstamp = 0;
1821 /*
1822 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
1823 * if we can succeed in looking up the directory.
1824 */
1825 if (error == EEXIST || (!error && !gotvp)) {
1826 if (newvp) {
1827 vrele(newvp);
1828 newvp = (struct vnode *)0;
1829 }
1830 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
1831 cnp->cn_thread, &np);
1832 if (!error) {
1833 newvp = NFSTOV(np);
1834 if (newvp->v_type != VDIR)
1835 error = EEXIST;
1836 }
1837 }
1838 if (error) {
1839 if (newvp)
1840 vrele(newvp);
1841 } else
1842 *ap->a_vpp = newvp;
1843 return (error);
1844}
1845
1846/*
1847 * nfs remove directory call
1848 */
1849static int
1850nfs_rmdir(struct vop_rmdir_args *ap)
1851{
1852 struct vnode *vp = ap->a_vp;
1853 struct vnode *dvp = ap->a_dvp;
1854 struct componentname *cnp = ap->a_cnp;
1855 u_int32_t *tl;
1856 caddr_t bpos, dpos;
1857 int error = 0, wccflag = NFSV3_WCCRATTR;
1858 struct mbuf *mreq, *mrep, *md, *mb;
1859 int v3 = NFS_ISV3(dvp);
1860
1861 if (dvp == vp)
1862 return (EINVAL);
1863 nfsstats.rpccnt[NFSPROC_RMDIR]++;
1864 mreq = nfsm_reqhead(dvp, NFSPROC_RMDIR,
1865 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
1866 mb = mreq;
1867 bpos = mtod(mb, caddr_t);
1868 nfsm_fhtom(dvp, v3);
1869 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1870 nfsm_request(dvp, NFSPROC_RMDIR, cnp->cn_thread, cnp->cn_cred);
1871 if (v3)
1872 nfsm_wcc_data(dvp, wccflag);
1873 m_freem(mrep);
1874nfsmout:
1875 VTONFS(dvp)->n_flag |= NMODIFIED;
1876 if (!wccflag)
1877 VTONFS(dvp)->n_attrstamp = 0;
1878 cache_purge(dvp);
1879 cache_purge(vp);
1880 /*
1881 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
1882 */
1883 if (error == ENOENT)
1884 error = 0;
1885 return (error);
1886}
1887
1888/*
1889 * nfs readdir call
1890 */
1891static int
1892nfs_readdir(struct vop_readdir_args *ap)
1893{
1894 struct vnode *vp = ap->a_vp;
1895 struct nfsnode *np = VTONFS(vp);
1896 struct uio *uio = ap->a_uio;
1897 int tresid, error;
1898 struct vattr vattr;
1899
1900 if (vp->v_type != VDIR)
1901 return (EPERM);
1902 /*
1903 * First, check for hit on the EOF offset cache
1904 */
1905 if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset &&
1906 (np->n_flag & NMODIFIED) == 0) {
1907 if (VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_td) == 0 &&
1908 np->n_mtime == vattr.va_mtime.tv_sec) {
1909 nfsstats.direofcache_hits++;
1910 return (0);
1911 }
1912 }
1913
1914 /*
1915 * Call nfs_bioread() to do the real work.
1916 */
1917 tresid = uio->uio_resid;
1918 error = nfs_bioread(vp, uio, 0, ap->a_cred);
1919
1920 if (!error && uio->uio_resid == tresid)
1921 nfsstats.direofcache_misses++;
1922 return (error);
1923}
1924
1925/*
1926 * Readdir rpc call.
1927 * Called from below the buffer cache by nfs_doio().
1928 */
1929int
1930nfs_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
1931{
1932 int len, left;
1933 struct dirent *dp = NULL;
1934 u_int32_t *tl;
1935 caddr_t cp;
1936 nfsuint64 *cookiep;
1937 caddr_t bpos, dpos;
1938 struct mbuf *mreq, *mrep, *md, *mb;
1939 nfsuint64 cookie;
1940 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1941 struct nfsnode *dnp = VTONFS(vp);
1942 u_quad_t fileno;
1943 int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1;
1944 int attrflag;
1945 int v3 = NFS_ISV3(vp);
1946
1947#ifndef DIAGNOSTIC
1948 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
1949 (uiop->uio_resid & (DIRBLKSIZ - 1)))
1950 panic("nfs readdirrpc bad uio");
1951#endif
1952
1953 /*
1954 * If there is no cookie, assume directory was stale.
1955 */
1956 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
1957 if (cookiep)
1958 cookie = *cookiep;
1959 else
1960 return (NFSERR_BAD_COOKIE);
1961 /*
1962 * Loop around doing readdir rpc's of size nm_readdirsize
1963 * truncated to a multiple of DIRBLKSIZ.
1964 * The stopping criteria is EOF or buffer full.
1965 */
1966 while (more_dirs && bigenough) {
1967 nfsstats.rpccnt[NFSPROC_READDIR]++;
1968 mreq = nfsm_reqhead(vp, NFSPROC_READDIR, NFSX_FH(v3) +
1969 NFSX_READDIR(v3));
1970 mb = mreq;
1971 bpos = mtod(mb, caddr_t);
1972 nfsm_fhtom(vp, v3);
1973 if (v3) {
1974 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1974 tl = nfsm_build(u_int32_t *, 5 * NFSX_UNSIGNED);
1975 *tl++ = cookie.nfsuquad[0];
1976 *tl++ = cookie.nfsuquad[1];
1977 *tl++ = dnp->n_cookieverf.nfsuquad[0];
1978 *tl++ = dnp->n_cookieverf.nfsuquad[1];
1979 } else {
1975 *tl++ = cookie.nfsuquad[0];
1976 *tl++ = cookie.nfsuquad[1];
1977 *tl++ = dnp->n_cookieverf.nfsuquad[0];
1978 *tl++ = dnp->n_cookieverf.nfsuquad[1];
1979 } else {
1980 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1980 tl = nfsm_build(u_int32_t *, 2 * NFSX_UNSIGNED);
1981 *tl++ = cookie.nfsuquad[0];
1982 }
1983 *tl = txdr_unsigned(nmp->nm_readdirsize);
1984 nfsm_request(vp, NFSPROC_READDIR, uiop->uio_td, cred);
1985 if (v3) {
1986 nfsm_postop_attr(vp, attrflag);
1987 if (!error) {
1988 nfsm_dissect(tl, u_int32_t *,
1989 2 * NFSX_UNSIGNED);
1990 dnp->n_cookieverf.nfsuquad[0] = *tl++;
1991 dnp->n_cookieverf.nfsuquad[1] = *tl;
1992 } else {
1993 m_freem(mrep);
1994 goto nfsmout;
1995 }
1996 }
1997 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1998 more_dirs = fxdr_unsigned(int, *tl);
1999
2000 /* loop thru the dir entries, doctoring them to 4bsd form */
2001 while (more_dirs && bigenough) {
2002 if (v3) {
2003 nfsm_dissect(tl, u_int32_t *,
2004 3 * NFSX_UNSIGNED);
2005 fileno = fxdr_hyper(tl);
2006 len = fxdr_unsigned(int, *(tl + 2));
2007 } else {
2008 nfsm_dissect(tl, u_int32_t *,
2009 2 * NFSX_UNSIGNED);
2010 fileno = fxdr_unsigned(u_quad_t, *tl++);
2011 len = fxdr_unsigned(int, *tl);
2012 }
2013 if (len <= 0 || len > NFS_MAXNAMLEN) {
2014 error = EBADRPC;
2015 m_freem(mrep);
2016 goto nfsmout;
2017 }
2018 tlen = nfsm_rndup(len);
2019 if (tlen == len)
2020 tlen += 4; /* To ensure null termination */
2021 left = DIRBLKSIZ - blksiz;
2022 if ((tlen + DIRHDSIZ) > left) {
2023 dp->d_reclen += left;
2024 uiop->uio_iov->iov_base += left;
2025 uiop->uio_iov->iov_len -= left;
2026 uiop->uio_offset += left;
2027 uiop->uio_resid -= left;
2028 blksiz = 0;
2029 }
2030 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2031 bigenough = 0;
2032 if (bigenough) {
2033 dp = (struct dirent *)uiop->uio_iov->iov_base;
2034 dp->d_fileno = (int)fileno;
2035 dp->d_namlen = len;
2036 dp->d_reclen = tlen + DIRHDSIZ;
2037 dp->d_type = DT_UNKNOWN;
2038 blksiz += dp->d_reclen;
2039 if (blksiz == DIRBLKSIZ)
2040 blksiz = 0;
2041 uiop->uio_offset += DIRHDSIZ;
2042 uiop->uio_resid -= DIRHDSIZ;
2043 uiop->uio_iov->iov_base += DIRHDSIZ;
2044 uiop->uio_iov->iov_len -= DIRHDSIZ;
2045 nfsm_mtouio(uiop, len);
2046 cp = uiop->uio_iov->iov_base;
2047 tlen -= len;
2048 *cp = '\0'; /* null terminate */
2049 uiop->uio_iov->iov_base += tlen;
2050 uiop->uio_iov->iov_len -= tlen;
2051 uiop->uio_offset += tlen;
2052 uiop->uio_resid -= tlen;
2053 } else
2054 nfsm_adv(nfsm_rndup(len));
2055 if (v3) {
2056 nfsm_dissect(tl, u_int32_t *,
2057 3 * NFSX_UNSIGNED);
2058 } else {
2059 nfsm_dissect(tl, u_int32_t *,
2060 2 * NFSX_UNSIGNED);
2061 }
2062 if (bigenough) {
2063 cookie.nfsuquad[0] = *tl++;
2064 if (v3)
2065 cookie.nfsuquad[1] = *tl++;
2066 } else if (v3)
2067 tl += 2;
2068 else
2069 tl++;
2070 more_dirs = fxdr_unsigned(int, *tl);
2071 }
2072 /*
2073 * If at end of rpc data, get the eof boolean
2074 */
2075 if (!more_dirs) {
2076 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2077 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2078 }
2079 m_freem(mrep);
2080 }
2081 /*
2082 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2083 * by increasing d_reclen for the last record.
2084 */
2085 if (blksiz > 0) {
2086 left = DIRBLKSIZ - blksiz;
2087 dp->d_reclen += left;
2088 uiop->uio_iov->iov_base += left;
2089 uiop->uio_iov->iov_len -= left;
2090 uiop->uio_offset += left;
2091 uiop->uio_resid -= left;
2092 }
2093
2094 /*
2095 * We are now either at the end of the directory or have filled the
2096 * block.
2097 */
2098 if (bigenough)
2099 dnp->n_direofoffset = uiop->uio_offset;
2100 else {
2101 if (uiop->uio_resid > 0)
2102 printf("EEK! readdirrpc resid > 0\n");
2103 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2104 *cookiep = cookie;
2105 }
2106nfsmout:
2107 return (error);
2108}
2109
2110/*
2111 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2112 */
2113int
2114nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
2115{
2116 int len, left;
2117 struct dirent *dp;
2118 u_int32_t *tl;
2119 caddr_t cp;
2120 struct vnode *newvp;
2121 nfsuint64 *cookiep;
2122 caddr_t bpos, dpos, dpossav1, dpossav2;
2123 struct mbuf *mreq, *mrep, *md, *mb, *mdsav1, *mdsav2;
2124 struct nameidata nami, *ndp = &nami;
2125 struct componentname *cnp = &ndp->ni_cnd;
2126 nfsuint64 cookie;
2127 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2128 struct nfsnode *dnp = VTONFS(vp), *np;
2129 nfsfh_t *fhp;
2130 u_quad_t fileno;
2131 int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2132 int attrflag, fhsize;
2133
2134#ifndef nolint
2135 dp = (struct dirent *)0;
2136#endif
2137#ifndef DIAGNOSTIC
2138 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2139 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2140 panic("nfs readdirplusrpc bad uio");
2141#endif
2142 ndp->ni_dvp = vp;
2143 newvp = NULLVP;
2144
2145 /*
2146 * If there is no cookie, assume directory was stale.
2147 */
2148 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2149 if (cookiep)
2150 cookie = *cookiep;
2151 else
2152 return (NFSERR_BAD_COOKIE);
2153 /*
2154 * Loop around doing readdir rpc's of size nm_readdirsize
2155 * truncated to a multiple of DIRBLKSIZ.
2156 * The stopping criteria is EOF or buffer full.
2157 */
2158 while (more_dirs && bigenough) {
2159 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2160 mreq = nfsm_reqhead(vp, NFSPROC_READDIRPLUS,
2161 NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2162 mb = mreq;
2163 bpos = mtod(mb, caddr_t);
2164 nfsm_fhtom(vp, 1);
1981 *tl++ = cookie.nfsuquad[0];
1982 }
1983 *tl = txdr_unsigned(nmp->nm_readdirsize);
1984 nfsm_request(vp, NFSPROC_READDIR, uiop->uio_td, cred);
1985 if (v3) {
1986 nfsm_postop_attr(vp, attrflag);
1987 if (!error) {
1988 nfsm_dissect(tl, u_int32_t *,
1989 2 * NFSX_UNSIGNED);
1990 dnp->n_cookieverf.nfsuquad[0] = *tl++;
1991 dnp->n_cookieverf.nfsuquad[1] = *tl;
1992 } else {
1993 m_freem(mrep);
1994 goto nfsmout;
1995 }
1996 }
1997 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1998 more_dirs = fxdr_unsigned(int, *tl);
1999
2000 /* loop thru the dir entries, doctoring them to 4bsd form */
2001 while (more_dirs && bigenough) {
2002 if (v3) {
2003 nfsm_dissect(tl, u_int32_t *,
2004 3 * NFSX_UNSIGNED);
2005 fileno = fxdr_hyper(tl);
2006 len = fxdr_unsigned(int, *(tl + 2));
2007 } else {
2008 nfsm_dissect(tl, u_int32_t *,
2009 2 * NFSX_UNSIGNED);
2010 fileno = fxdr_unsigned(u_quad_t, *tl++);
2011 len = fxdr_unsigned(int, *tl);
2012 }
2013 if (len <= 0 || len > NFS_MAXNAMLEN) {
2014 error = EBADRPC;
2015 m_freem(mrep);
2016 goto nfsmout;
2017 }
2018 tlen = nfsm_rndup(len);
2019 if (tlen == len)
2020 tlen += 4; /* To ensure null termination */
2021 left = DIRBLKSIZ - blksiz;
2022 if ((tlen + DIRHDSIZ) > left) {
2023 dp->d_reclen += left;
2024 uiop->uio_iov->iov_base += left;
2025 uiop->uio_iov->iov_len -= left;
2026 uiop->uio_offset += left;
2027 uiop->uio_resid -= left;
2028 blksiz = 0;
2029 }
2030 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2031 bigenough = 0;
2032 if (bigenough) {
2033 dp = (struct dirent *)uiop->uio_iov->iov_base;
2034 dp->d_fileno = (int)fileno;
2035 dp->d_namlen = len;
2036 dp->d_reclen = tlen + DIRHDSIZ;
2037 dp->d_type = DT_UNKNOWN;
2038 blksiz += dp->d_reclen;
2039 if (blksiz == DIRBLKSIZ)
2040 blksiz = 0;
2041 uiop->uio_offset += DIRHDSIZ;
2042 uiop->uio_resid -= DIRHDSIZ;
2043 uiop->uio_iov->iov_base += DIRHDSIZ;
2044 uiop->uio_iov->iov_len -= DIRHDSIZ;
2045 nfsm_mtouio(uiop, len);
2046 cp = uiop->uio_iov->iov_base;
2047 tlen -= len;
2048 *cp = '\0'; /* null terminate */
2049 uiop->uio_iov->iov_base += tlen;
2050 uiop->uio_iov->iov_len -= tlen;
2051 uiop->uio_offset += tlen;
2052 uiop->uio_resid -= tlen;
2053 } else
2054 nfsm_adv(nfsm_rndup(len));
2055 if (v3) {
2056 nfsm_dissect(tl, u_int32_t *,
2057 3 * NFSX_UNSIGNED);
2058 } else {
2059 nfsm_dissect(tl, u_int32_t *,
2060 2 * NFSX_UNSIGNED);
2061 }
2062 if (bigenough) {
2063 cookie.nfsuquad[0] = *tl++;
2064 if (v3)
2065 cookie.nfsuquad[1] = *tl++;
2066 } else if (v3)
2067 tl += 2;
2068 else
2069 tl++;
2070 more_dirs = fxdr_unsigned(int, *tl);
2071 }
2072 /*
2073 * If at end of rpc data, get the eof boolean
2074 */
2075 if (!more_dirs) {
2076 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2077 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2078 }
2079 m_freem(mrep);
2080 }
2081 /*
2082 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2083 * by increasing d_reclen for the last record.
2084 */
2085 if (blksiz > 0) {
2086 left = DIRBLKSIZ - blksiz;
2087 dp->d_reclen += left;
2088 uiop->uio_iov->iov_base += left;
2089 uiop->uio_iov->iov_len -= left;
2090 uiop->uio_offset += left;
2091 uiop->uio_resid -= left;
2092 }
2093
2094 /*
2095 * We are now either at the end of the directory or have filled the
2096 * block.
2097 */
2098 if (bigenough)
2099 dnp->n_direofoffset = uiop->uio_offset;
2100 else {
2101 if (uiop->uio_resid > 0)
2102 printf("EEK! readdirrpc resid > 0\n");
2103 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2104 *cookiep = cookie;
2105 }
2106nfsmout:
2107 return (error);
2108}
2109
2110/*
2111 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2112 */
2113int
2114nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
2115{
2116 int len, left;
2117 struct dirent *dp;
2118 u_int32_t *tl;
2119 caddr_t cp;
2120 struct vnode *newvp;
2121 nfsuint64 *cookiep;
2122 caddr_t bpos, dpos, dpossav1, dpossav2;
2123 struct mbuf *mreq, *mrep, *md, *mb, *mdsav1, *mdsav2;
2124 struct nameidata nami, *ndp = &nami;
2125 struct componentname *cnp = &ndp->ni_cnd;
2126 nfsuint64 cookie;
2127 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2128 struct nfsnode *dnp = VTONFS(vp), *np;
2129 nfsfh_t *fhp;
2130 u_quad_t fileno;
2131 int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2132 int attrflag, fhsize;
2133
2134#ifndef nolint
2135 dp = (struct dirent *)0;
2136#endif
2137#ifndef DIAGNOSTIC
2138 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2139 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2140 panic("nfs readdirplusrpc bad uio");
2141#endif
2142 ndp->ni_dvp = vp;
2143 newvp = NULLVP;
2144
2145 /*
2146 * If there is no cookie, assume directory was stale.
2147 */
2148 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2149 if (cookiep)
2150 cookie = *cookiep;
2151 else
2152 return (NFSERR_BAD_COOKIE);
2153 /*
2154 * Loop around doing readdir rpc's of size nm_readdirsize
2155 * truncated to a multiple of DIRBLKSIZ.
2156 * The stopping criteria is EOF or buffer full.
2157 */
2158 while (more_dirs && bigenough) {
2159 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2160 mreq = nfsm_reqhead(vp, NFSPROC_READDIRPLUS,
2161 NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2162 mb = mreq;
2163 bpos = mtod(mb, caddr_t);
2164 nfsm_fhtom(vp, 1);
2165 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
2165 tl = nfsm_build(u_int32_t *, 6 * NFSX_UNSIGNED);
2166 *tl++ = cookie.nfsuquad[0];
2167 *tl++ = cookie.nfsuquad[1];
2168 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2169 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2170 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
2171 *tl = txdr_unsigned(nmp->nm_rsize);
2172 nfsm_request(vp, NFSPROC_READDIRPLUS, uiop->uio_td, cred);
2173 nfsm_postop_attr(vp, attrflag);
2174 if (error) {
2175 m_freem(mrep);
2176 goto nfsmout;
2177 }
2178 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2179 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2180 dnp->n_cookieverf.nfsuquad[1] = *tl++;
2181 more_dirs = fxdr_unsigned(int, *tl);
2182
2183 /* loop thru the dir entries, doctoring them to 4bsd form */
2184 while (more_dirs && bigenough) {
2185 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2186 fileno = fxdr_hyper(tl);
2187 len = fxdr_unsigned(int, *(tl + 2));
2188 if (len <= 0 || len > NFS_MAXNAMLEN) {
2189 error = EBADRPC;
2190 m_freem(mrep);
2191 goto nfsmout;
2192 }
2193 tlen = nfsm_rndup(len);
2194 if (tlen == len)
2195 tlen += 4; /* To ensure null termination*/
2196 left = DIRBLKSIZ - blksiz;
2197 if ((tlen + DIRHDSIZ) > left) {
2198 dp->d_reclen += left;
2199 uiop->uio_iov->iov_base += left;
2200 uiop->uio_iov->iov_len -= left;
2201 uiop->uio_offset += left;
2202 uiop->uio_resid -= left;
2203 blksiz = 0;
2204 }
2205 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2206 bigenough = 0;
2207 if (bigenough) {
2208 dp = (struct dirent *)uiop->uio_iov->iov_base;
2209 dp->d_fileno = (int)fileno;
2210 dp->d_namlen = len;
2211 dp->d_reclen = tlen + DIRHDSIZ;
2212 dp->d_type = DT_UNKNOWN;
2213 blksiz += dp->d_reclen;
2214 if (blksiz == DIRBLKSIZ)
2215 blksiz = 0;
2216 uiop->uio_offset += DIRHDSIZ;
2217 uiop->uio_resid -= DIRHDSIZ;
2218 uiop->uio_iov->iov_base += DIRHDSIZ;
2219 uiop->uio_iov->iov_len -= DIRHDSIZ;
2220 cnp->cn_nameptr = uiop->uio_iov->iov_base;
2221 cnp->cn_namelen = len;
2222 nfsm_mtouio(uiop, len);
2223 cp = uiop->uio_iov->iov_base;
2224 tlen -= len;
2225 *cp = '\0';
2226 uiop->uio_iov->iov_base += tlen;
2227 uiop->uio_iov->iov_len -= tlen;
2228 uiop->uio_offset += tlen;
2229 uiop->uio_resid -= tlen;
2230 } else
2231 nfsm_adv(nfsm_rndup(len));
2232 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2233 if (bigenough) {
2234 cookie.nfsuquad[0] = *tl++;
2235 cookie.nfsuquad[1] = *tl++;
2236 } else
2237 tl += 2;
2238
2239 /*
2240 * Since the attributes are before the file handle
2241 * (sigh), we must skip over the attributes and then
2242 * come back and get them.
2243 */
2244 attrflag = fxdr_unsigned(int, *tl);
2245 if (attrflag) {
2246 dpossav1 = dpos;
2247 mdsav1 = md;
2248 nfsm_adv(NFSX_V3FATTR);
2249 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2250 doit = fxdr_unsigned(int, *tl);
2251 if (doit) {
2252 nfsm_getfh(fhp, fhsize, 1);
2253 if (NFS_CMPFH(dnp, fhp, fhsize)) {
2254 VREF(vp);
2255 newvp = vp;
2256 np = dnp;
2257 } else {
2258 error = nfs_nget(vp->v_mount, fhp,
2259 fhsize, &np);
2260 if (error)
2261 doit = 0;
2262 else
2263 newvp = NFSTOV(np);
2264 }
2265 }
2266 if (doit && bigenough) {
2267 dpossav2 = dpos;
2268 dpos = dpossav1;
2269 mdsav2 = md;
2270 md = mdsav1;
2271 nfsm_loadattr(newvp, (struct vattr *)0);
2272 dpos = dpossav2;
2273 md = mdsav2;
2274 dp->d_type =
2275 IFTODT(VTTOIF(np->n_vattr.va_type));
2276 ndp->ni_vp = newvp;
2277 cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp);
2278 }
2279 } else {
2280 /* Just skip over the file handle */
2281 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2282 i = fxdr_unsigned(int, *tl);
2283 nfsm_adv(nfsm_rndup(i));
2284 }
2285 if (newvp != NULLVP) {
2286 if (newvp == vp)
2287 vrele(newvp);
2288 else
2289 vput(newvp);
2290 newvp = NULLVP;
2291 }
2292 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2293 more_dirs = fxdr_unsigned(int, *tl);
2294 }
2295 /*
2296 * If at end of rpc data, get the eof boolean
2297 */
2298 if (!more_dirs) {
2299 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2300 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2301 }
2302 m_freem(mrep);
2303 }
2304 /*
2305 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2306 * by increasing d_reclen for the last record.
2307 */
2308 if (blksiz > 0) {
2309 left = DIRBLKSIZ - blksiz;
2310 dp->d_reclen += left;
2311 uiop->uio_iov->iov_base += left;
2312 uiop->uio_iov->iov_len -= left;
2313 uiop->uio_offset += left;
2314 uiop->uio_resid -= left;
2315 }
2316
2317 /*
2318 * We are now either at the end of the directory or have filled the
2319 * block.
2320 */
2321 if (bigenough)
2322 dnp->n_direofoffset = uiop->uio_offset;
2323 else {
2324 if (uiop->uio_resid > 0)
2325 printf("EEK! readdirplusrpc resid > 0\n");
2326 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2327 *cookiep = cookie;
2328 }
2329nfsmout:
2330 if (newvp != NULLVP) {
2331 if (newvp == vp)
2332 vrele(newvp);
2333 else
2334 vput(newvp);
2335 newvp = NULLVP;
2336 }
2337 return (error);
2338}
2339
2340/*
2341 * Silly rename. To make the NFS filesystem that is stateless look a little
2342 * more like the "ufs" a remove of an active vnode is translated to a rename
2343 * to a funny looking filename that is removed by nfs_inactive on the
2344 * nfsnode. There is the potential for another process on a different client
2345 * to create the same funny name between the nfs_lookitup() fails and the
2346 * nfs_rename() completes, but...
2347 */
2348static int
2349nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
2350{
2351 struct sillyrename *sp;
2352 struct nfsnode *np;
2353 int error;
2354 short pid;
2355
2356 cache_purge(dvp);
2357 np = VTONFS(vp);
2358#ifndef DIAGNOSTIC
2359 if (vp->v_type == VDIR)
2360 panic("nfs: sillyrename dir");
2361#endif
2362 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename),
2363 M_NFSREQ, M_WAITOK);
2364 sp->s_cred = crdup(cnp->cn_cred);
2365 sp->s_dvp = dvp;
2366 VREF(dvp);
2367
2368 /* Fudge together a funny name */
2369 pid = cnp->cn_thread->td_proc->p_pid;
2370 sp->s_namlen = sprintf(sp->s_name, ".nfsA%04x4.4", pid);
2371
2372 /* Try lookitups until we get one that isn't there */
2373 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2374 cnp->cn_thread, (struct nfsnode **)0) == 0) {
2375 sp->s_name[4]++;
2376 if (sp->s_name[4] > 'z') {
2377 error = EINVAL;
2378 goto bad;
2379 }
2380 }
2381 error = nfs_renameit(dvp, cnp, sp);
2382 if (error)
2383 goto bad;
2384 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2385 cnp->cn_thread, &np);
2386 np->n_sillyrename = sp;
2387 return (0);
2388bad:
2389 vrele(sp->s_dvp);
2390 crfree(sp->s_cred);
2391 free((caddr_t)sp, M_NFSREQ);
2392 return (error);
2393}
2394
2395/*
2396 * Look up a file name and optionally either update the file handle or
2397 * allocate an nfsnode, depending on the value of npp.
2398 * npp == NULL --> just do the lookup
2399 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2400 * handled too
2401 * *npp != NULL --> update the file handle in the vnode
2402 */
2403static int
2404nfs_lookitup(struct vnode *dvp, const char *name, int len, struct ucred *cred,
2405 struct thread *td, struct nfsnode **npp)
2406{
2407 u_int32_t *tl;
2408 struct vnode *newvp = (struct vnode *)0;
2409 struct nfsnode *np, *dnp = VTONFS(dvp);
2410 caddr_t bpos, dpos;
2411 int error = 0, fhlen, attrflag;
2412 struct mbuf *mreq, *mrep, *md, *mb;
2413 nfsfh_t *nfhp;
2414 int v3 = NFS_ISV3(dvp);
2415
2416 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
2417 mreq = nfsm_reqhead(dvp, NFSPROC_LOOKUP,
2418 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
2419 mb = mreq;
2420 bpos = mtod(mb, caddr_t);
2421 nfsm_fhtom(dvp, v3);
2422 nfsm_strtom(name, len, NFS_MAXNAMLEN);
2423 nfsm_request(dvp, NFSPROC_LOOKUP, td, cred);
2424 if (npp && !error) {
2425 nfsm_getfh(nfhp, fhlen, v3);
2426 if (*npp) {
2427 np = *npp;
2428 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
2429 free((caddr_t)np->n_fhp, M_NFSBIGFH);
2430 np->n_fhp = &np->n_fh;
2431 } else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH)
2432 np->n_fhp =(nfsfh_t *)malloc(fhlen, M_NFSBIGFH, M_WAITOK);
2433 bcopy((caddr_t)nfhp, (caddr_t)np->n_fhp, fhlen);
2434 np->n_fhsize = fhlen;
2435 newvp = NFSTOV(np);
2436 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
2437 VREF(dvp);
2438 newvp = dvp;
2439 } else {
2440 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
2441 if (error) {
2442 m_freem(mrep);
2443 return (error);
2444 }
2445 newvp = NFSTOV(np);
2446 }
2447 if (v3) {
2448 nfsm_postop_attr(newvp, attrflag);
2449 if (!attrflag && *npp == NULL) {
2450 m_freem(mrep);
2451 if (newvp == dvp)
2452 vrele(newvp);
2453 else
2454 vput(newvp);
2455 return (ENOENT);
2456 }
2457 } else
2458 nfsm_loadattr(newvp, (struct vattr *)0);
2459 }
2460 m_freem(mrep);
2461nfsmout:
2462 if (npp && *npp == NULL) {
2463 if (error) {
2464 if (newvp) {
2465 if (newvp == dvp)
2466 vrele(newvp);
2467 else
2468 vput(newvp);
2469 }
2470 } else
2471 *npp = np;
2472 }
2473 return (error);
2474}
2475
2476/*
2477 * Nfs Version 3 commit rpc
2478 */
2479int
2480nfs_commit(struct vnode *vp, u_quad_t offset, int cnt, struct ucred *cred,
2481 struct thread *td)
2482{
2483 u_int32_t *tl;
2484 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2485 caddr_t bpos, dpos;
2486 int error = 0, wccflag = NFSV3_WCCRATTR;
2487 struct mbuf *mreq, *mrep, *md, *mb;
2488
2489 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0)
2490 return (0);
2491 nfsstats.rpccnt[NFSPROC_COMMIT]++;
2492 mreq = nfsm_reqhead(vp, NFSPROC_COMMIT, NFSX_FH(1));
2493 mb = mreq;
2494 bpos = mtod(mb, caddr_t);
2495 nfsm_fhtom(vp, 1);
2166 *tl++ = cookie.nfsuquad[0];
2167 *tl++ = cookie.nfsuquad[1];
2168 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2169 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2170 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
2171 *tl = txdr_unsigned(nmp->nm_rsize);
2172 nfsm_request(vp, NFSPROC_READDIRPLUS, uiop->uio_td, cred);
2173 nfsm_postop_attr(vp, attrflag);
2174 if (error) {
2175 m_freem(mrep);
2176 goto nfsmout;
2177 }
2178 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2179 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2180 dnp->n_cookieverf.nfsuquad[1] = *tl++;
2181 more_dirs = fxdr_unsigned(int, *tl);
2182
2183 /* loop thru the dir entries, doctoring them to 4bsd form */
2184 while (more_dirs && bigenough) {
2185 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2186 fileno = fxdr_hyper(tl);
2187 len = fxdr_unsigned(int, *(tl + 2));
2188 if (len <= 0 || len > NFS_MAXNAMLEN) {
2189 error = EBADRPC;
2190 m_freem(mrep);
2191 goto nfsmout;
2192 }
2193 tlen = nfsm_rndup(len);
2194 if (tlen == len)
2195 tlen += 4; /* To ensure null termination*/
2196 left = DIRBLKSIZ - blksiz;
2197 if ((tlen + DIRHDSIZ) > left) {
2198 dp->d_reclen += left;
2199 uiop->uio_iov->iov_base += left;
2200 uiop->uio_iov->iov_len -= left;
2201 uiop->uio_offset += left;
2202 uiop->uio_resid -= left;
2203 blksiz = 0;
2204 }
2205 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2206 bigenough = 0;
2207 if (bigenough) {
2208 dp = (struct dirent *)uiop->uio_iov->iov_base;
2209 dp->d_fileno = (int)fileno;
2210 dp->d_namlen = len;
2211 dp->d_reclen = tlen + DIRHDSIZ;
2212 dp->d_type = DT_UNKNOWN;
2213 blksiz += dp->d_reclen;
2214 if (blksiz == DIRBLKSIZ)
2215 blksiz = 0;
2216 uiop->uio_offset += DIRHDSIZ;
2217 uiop->uio_resid -= DIRHDSIZ;
2218 uiop->uio_iov->iov_base += DIRHDSIZ;
2219 uiop->uio_iov->iov_len -= DIRHDSIZ;
2220 cnp->cn_nameptr = uiop->uio_iov->iov_base;
2221 cnp->cn_namelen = len;
2222 nfsm_mtouio(uiop, len);
2223 cp = uiop->uio_iov->iov_base;
2224 tlen -= len;
2225 *cp = '\0';
2226 uiop->uio_iov->iov_base += tlen;
2227 uiop->uio_iov->iov_len -= tlen;
2228 uiop->uio_offset += tlen;
2229 uiop->uio_resid -= tlen;
2230 } else
2231 nfsm_adv(nfsm_rndup(len));
2232 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2233 if (bigenough) {
2234 cookie.nfsuquad[0] = *tl++;
2235 cookie.nfsuquad[1] = *tl++;
2236 } else
2237 tl += 2;
2238
2239 /*
2240 * Since the attributes are before the file handle
2241 * (sigh), we must skip over the attributes and then
2242 * come back and get them.
2243 */
2244 attrflag = fxdr_unsigned(int, *tl);
2245 if (attrflag) {
2246 dpossav1 = dpos;
2247 mdsav1 = md;
2248 nfsm_adv(NFSX_V3FATTR);
2249 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2250 doit = fxdr_unsigned(int, *tl);
2251 if (doit) {
2252 nfsm_getfh(fhp, fhsize, 1);
2253 if (NFS_CMPFH(dnp, fhp, fhsize)) {
2254 VREF(vp);
2255 newvp = vp;
2256 np = dnp;
2257 } else {
2258 error = nfs_nget(vp->v_mount, fhp,
2259 fhsize, &np);
2260 if (error)
2261 doit = 0;
2262 else
2263 newvp = NFSTOV(np);
2264 }
2265 }
2266 if (doit && bigenough) {
2267 dpossav2 = dpos;
2268 dpos = dpossav1;
2269 mdsav2 = md;
2270 md = mdsav1;
2271 nfsm_loadattr(newvp, (struct vattr *)0);
2272 dpos = dpossav2;
2273 md = mdsav2;
2274 dp->d_type =
2275 IFTODT(VTTOIF(np->n_vattr.va_type));
2276 ndp->ni_vp = newvp;
2277 cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp);
2278 }
2279 } else {
2280 /* Just skip over the file handle */
2281 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2282 i = fxdr_unsigned(int, *tl);
2283 nfsm_adv(nfsm_rndup(i));
2284 }
2285 if (newvp != NULLVP) {
2286 if (newvp == vp)
2287 vrele(newvp);
2288 else
2289 vput(newvp);
2290 newvp = NULLVP;
2291 }
2292 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2293 more_dirs = fxdr_unsigned(int, *tl);
2294 }
2295 /*
2296 * If at end of rpc data, get the eof boolean
2297 */
2298 if (!more_dirs) {
2299 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2300 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2301 }
2302 m_freem(mrep);
2303 }
2304 /*
2305 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2306 * by increasing d_reclen for the last record.
2307 */
2308 if (blksiz > 0) {
2309 left = DIRBLKSIZ - blksiz;
2310 dp->d_reclen += left;
2311 uiop->uio_iov->iov_base += left;
2312 uiop->uio_iov->iov_len -= left;
2313 uiop->uio_offset += left;
2314 uiop->uio_resid -= left;
2315 }
2316
2317 /*
2318 * We are now either at the end of the directory or have filled the
2319 * block.
2320 */
2321 if (bigenough)
2322 dnp->n_direofoffset = uiop->uio_offset;
2323 else {
2324 if (uiop->uio_resid > 0)
2325 printf("EEK! readdirplusrpc resid > 0\n");
2326 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2327 *cookiep = cookie;
2328 }
2329nfsmout:
2330 if (newvp != NULLVP) {
2331 if (newvp == vp)
2332 vrele(newvp);
2333 else
2334 vput(newvp);
2335 newvp = NULLVP;
2336 }
2337 return (error);
2338}
2339
2340/*
2341 * Silly rename. To make the NFS filesystem that is stateless look a little
2342 * more like the "ufs" a remove of an active vnode is translated to a rename
2343 * to a funny looking filename that is removed by nfs_inactive on the
2344 * nfsnode. There is the potential for another process on a different client
2345 * to create the same funny name between the nfs_lookitup() fails and the
2346 * nfs_rename() completes, but...
2347 */
2348static int
2349nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
2350{
2351 struct sillyrename *sp;
2352 struct nfsnode *np;
2353 int error;
2354 short pid;
2355
2356 cache_purge(dvp);
2357 np = VTONFS(vp);
2358#ifndef DIAGNOSTIC
2359 if (vp->v_type == VDIR)
2360 panic("nfs: sillyrename dir");
2361#endif
2362 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename),
2363 M_NFSREQ, M_WAITOK);
2364 sp->s_cred = crdup(cnp->cn_cred);
2365 sp->s_dvp = dvp;
2366 VREF(dvp);
2367
2368 /* Fudge together a funny name */
2369 pid = cnp->cn_thread->td_proc->p_pid;
2370 sp->s_namlen = sprintf(sp->s_name, ".nfsA%04x4.4", pid);
2371
2372 /* Try lookitups until we get one that isn't there */
2373 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2374 cnp->cn_thread, (struct nfsnode **)0) == 0) {
2375 sp->s_name[4]++;
2376 if (sp->s_name[4] > 'z') {
2377 error = EINVAL;
2378 goto bad;
2379 }
2380 }
2381 error = nfs_renameit(dvp, cnp, sp);
2382 if (error)
2383 goto bad;
2384 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2385 cnp->cn_thread, &np);
2386 np->n_sillyrename = sp;
2387 return (0);
2388bad:
2389 vrele(sp->s_dvp);
2390 crfree(sp->s_cred);
2391 free((caddr_t)sp, M_NFSREQ);
2392 return (error);
2393}
2394
2395/*
2396 * Look up a file name and optionally either update the file handle or
2397 * allocate an nfsnode, depending on the value of npp.
2398 * npp == NULL --> just do the lookup
2399 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2400 * handled too
2401 * *npp != NULL --> update the file handle in the vnode
2402 */
2403static int
2404nfs_lookitup(struct vnode *dvp, const char *name, int len, struct ucred *cred,
2405 struct thread *td, struct nfsnode **npp)
2406{
2407 u_int32_t *tl;
2408 struct vnode *newvp = (struct vnode *)0;
2409 struct nfsnode *np, *dnp = VTONFS(dvp);
2410 caddr_t bpos, dpos;
2411 int error = 0, fhlen, attrflag;
2412 struct mbuf *mreq, *mrep, *md, *mb;
2413 nfsfh_t *nfhp;
2414 int v3 = NFS_ISV3(dvp);
2415
2416 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
2417 mreq = nfsm_reqhead(dvp, NFSPROC_LOOKUP,
2418 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
2419 mb = mreq;
2420 bpos = mtod(mb, caddr_t);
2421 nfsm_fhtom(dvp, v3);
2422 nfsm_strtom(name, len, NFS_MAXNAMLEN);
2423 nfsm_request(dvp, NFSPROC_LOOKUP, td, cred);
2424 if (npp && !error) {
2425 nfsm_getfh(nfhp, fhlen, v3);
2426 if (*npp) {
2427 np = *npp;
2428 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
2429 free((caddr_t)np->n_fhp, M_NFSBIGFH);
2430 np->n_fhp = &np->n_fh;
2431 } else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH)
2432 np->n_fhp =(nfsfh_t *)malloc(fhlen, M_NFSBIGFH, M_WAITOK);
2433 bcopy((caddr_t)nfhp, (caddr_t)np->n_fhp, fhlen);
2434 np->n_fhsize = fhlen;
2435 newvp = NFSTOV(np);
2436 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
2437 VREF(dvp);
2438 newvp = dvp;
2439 } else {
2440 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
2441 if (error) {
2442 m_freem(mrep);
2443 return (error);
2444 }
2445 newvp = NFSTOV(np);
2446 }
2447 if (v3) {
2448 nfsm_postop_attr(newvp, attrflag);
2449 if (!attrflag && *npp == NULL) {
2450 m_freem(mrep);
2451 if (newvp == dvp)
2452 vrele(newvp);
2453 else
2454 vput(newvp);
2455 return (ENOENT);
2456 }
2457 } else
2458 nfsm_loadattr(newvp, (struct vattr *)0);
2459 }
2460 m_freem(mrep);
2461nfsmout:
2462 if (npp && *npp == NULL) {
2463 if (error) {
2464 if (newvp) {
2465 if (newvp == dvp)
2466 vrele(newvp);
2467 else
2468 vput(newvp);
2469 }
2470 } else
2471 *npp = np;
2472 }
2473 return (error);
2474}
2475
2476/*
2477 * Nfs Version 3 commit rpc
2478 */
2479int
2480nfs_commit(struct vnode *vp, u_quad_t offset, int cnt, struct ucred *cred,
2481 struct thread *td)
2482{
2483 u_int32_t *tl;
2484 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2485 caddr_t bpos, dpos;
2486 int error = 0, wccflag = NFSV3_WCCRATTR;
2487 struct mbuf *mreq, *mrep, *md, *mb;
2488
2489 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0)
2490 return (0);
2491 nfsstats.rpccnt[NFSPROC_COMMIT]++;
2492 mreq = nfsm_reqhead(vp, NFSPROC_COMMIT, NFSX_FH(1));
2493 mb = mreq;
2494 bpos = mtod(mb, caddr_t);
2495 nfsm_fhtom(vp, 1);
2496 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2496 tl = nfsm_build(u_int32_t *, 3 * NFSX_UNSIGNED);
2497 txdr_hyper(offset, tl);
2498 tl += 2;
2499 *tl = txdr_unsigned(cnt);
2500 nfsm_request(vp, NFSPROC_COMMIT, td, cred);
2501 nfsm_wcc_data(vp, wccflag);
2502 if (!error) {
2503 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
2504 if (bcmp((caddr_t)nmp->nm_verf, (caddr_t)tl,
2505 NFSX_V3WRITEVERF)) {
2506 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
2507 NFSX_V3WRITEVERF);
2508 error = NFSERR_STALEWRITEVERF;
2509 }
2510 }
2511 m_freem(mrep);
2512nfsmout:
2513 return (error);
2514}
2515
2516/*
2517 * Strategy routine.
2518 * For async requests when nfsiod(s) are running, queue the request by
2519 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
2520 * request.
2521 */
2522static int
2523nfs_strategy(struct vop_strategy_args *ap)
2524{
2525 struct buf *bp = ap->a_bp;
2526 struct ucred *cr;
2527 struct thread *td;
2528 int error = 0;
2529
2530 KASSERT(!(bp->b_flags & B_DONE), ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
2531 KASSERT(BUF_REFCNT(bp) > 0, ("nfs_strategy: buffer %p not locked", bp));
2532
2533 if (bp->b_flags & B_PHYS)
2534 panic("nfs physio");
2535
2536 if (bp->b_flags & B_ASYNC)
2537 td = (struct thread *)0;
2538 else
2539 td = curthread; /* XXX */
2540
2541 if (bp->b_iocmd == BIO_READ)
2542 cr = bp->b_rcred;
2543 else
2544 cr = bp->b_wcred;
2545
2546 /*
2547 * If the op is asynchronous and an i/o daemon is waiting
2548 * queue the request, wake it up and wait for completion
2549 * otherwise just do it ourselves.
2550 */
2551 if ((bp->b_flags & B_ASYNC) == 0 ||
2552 nfs_asyncio(bp, NOCRED, td))
2553 error = nfs_doio(bp, cr, td);
2554 return (error);
2555}
2556
2557/*
2558 * fsync vnode op. Just call nfs_flush() with commit == 1.
2559 */
2560/* ARGSUSED */
2561static int
2562nfs_fsync(struct vop_fsync_args *ap)
2563{
2564
2565 return (nfs_flush(ap->a_vp, ap->a_cred, ap->a_waitfor, ap->a_td, 1));
2566}
2567
2568/*
2569 * Flush all the blocks associated with a vnode.
2570 * Walk through the buffer pool and push any dirty pages
2571 * associated with the vnode.
2572 */
2573static int
2574nfs_flush(struct vnode *vp, struct ucred *cred, int waitfor, struct thread *td,
2575 int commit)
2576{
2577 struct nfsnode *np = VTONFS(vp);
2578 struct buf *bp;
2579 int i;
2580 struct buf *nbp;
2581 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2582 int s, error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos;
2583 int passone = 1;
2584 u_quad_t off, endoff, toff;
2585 struct ucred* wcred = NULL;
2586 struct buf **bvec = NULL;
2587#ifndef NFS_COMMITBVECSIZ
2588#define NFS_COMMITBVECSIZ 20
2589#endif
2590 struct buf *bvec_on_stack[NFS_COMMITBVECSIZ];
2591 int bvecsize = 0, bveccount;
2592
2593 if (nmp->nm_flag & NFSMNT_INT)
2594 slpflag = PCATCH;
2595 if (!commit)
2596 passone = 0;
2597 /*
2598 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the
2599 * server, but nas not been committed to stable storage on the server
2600 * yet. On the first pass, the byte range is worked out and the commit
2601 * rpc is done. On the second pass, nfs_writebp() is called to do the
2602 * job.
2603 */
2604again:
2605 off = (u_quad_t)-1;
2606 endoff = 0;
2607 bvecpos = 0;
2608 if (NFS_ISV3(vp) && commit) {
2609 s = splbio();
2610 /*
2611 * Count up how many buffers waiting for a commit.
2612 */
2613 bveccount = 0;
2614 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2615 nbp = TAILQ_NEXT(bp, b_vnbufs);
2616 if (BUF_REFCNT(bp) == 0 &&
2617 (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
2618 == (B_DELWRI | B_NEEDCOMMIT))
2619 bveccount++;
2620 }
2621 /*
2622 * Allocate space to remember the list of bufs to commit. It is
2623 * important to use M_NOWAIT here to avoid a race with nfs_write.
2624 * If we can't get memory (for whatever reason), we will end up
2625 * committing the buffers one-by-one in the loop below.
2626 */
2627 if (bvec != NULL && bvec != bvec_on_stack)
2628 free(bvec, M_TEMP);
2629 if (bveccount > NFS_COMMITBVECSIZ) {
2630 bvec = (struct buf **)
2631 malloc(bveccount * sizeof(struct buf *),
2632 M_TEMP, M_NOWAIT);
2633 if (bvec == NULL) {
2634 bvec = bvec_on_stack;
2635 bvecsize = NFS_COMMITBVECSIZ;
2636 } else
2637 bvecsize = bveccount;
2638 } else {
2639 bvec = bvec_on_stack;
2640 bvecsize = NFS_COMMITBVECSIZ;
2641 }
2642 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2643 nbp = TAILQ_NEXT(bp, b_vnbufs);
2644 if (bvecpos >= bvecsize)
2645 break;
2646 if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) !=
2647 (B_DELWRI | B_NEEDCOMMIT) ||
2648 BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT))
2649 continue;
2650 bremfree(bp);
2651 /*
2652 * Work out if all buffers are using the same cred
2653 * so we can deal with them all with one commit.
2654 *
2655 * NOTE: we are not clearing B_DONE here, so we have
2656 * to do it later on in this routine if we intend to
2657 * initiate I/O on the bp.
2658 *
2659 * Note: to avoid loopback deadlocks, we do not
2660 * assign b_runningbufspace.
2661 */
2662 if (wcred == NULL)
2663 wcred = bp->b_wcred;
2664 else if (wcred != bp->b_wcred)
2665 wcred = NOCRED;
2666 bp->b_flags |= B_WRITEINPROG;
2667 vfs_busy_pages(bp, 1);
2668
2669 /*
2670 * bp is protected by being locked, but nbp is not
2671 * and vfs_busy_pages() may sleep. We have to
2672 * recalculate nbp.
2673 */
2674 nbp = TAILQ_NEXT(bp, b_vnbufs);
2675
2676 /*
2677 * A list of these buffers is kept so that the
2678 * second loop knows which buffers have actually
2679 * been committed. This is necessary, since there
2680 * may be a race between the commit rpc and new
2681 * uncommitted writes on the file.
2682 */
2683 bvec[bvecpos++] = bp;
2684 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2685 bp->b_dirtyoff;
2686 if (toff < off)
2687 off = toff;
2688 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff);
2689 if (toff > endoff)
2690 endoff = toff;
2691 }
2692 splx(s);
2693 }
2694 if (bvecpos > 0) {
2695 /*
2696 * Commit data on the server, as required.
2697 * If all bufs are using the same wcred, then use that with
2698 * one call for all of them, otherwise commit each one
2699 * separately.
2700 */
2701 if (wcred != NOCRED)
2702 retv = nfs_commit(vp, off, (int)(endoff - off),
2703 wcred, td);
2704 else {
2705 retv = 0;
2706 for (i = 0; i < bvecpos; i++) {
2707 off_t off, size;
2708 bp = bvec[i];
2709 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2710 bp->b_dirtyoff;
2711 size = (u_quad_t)(bp->b_dirtyend
2712 - bp->b_dirtyoff);
2713 retv = nfs_commit(vp, off, (int)size,
2714 bp->b_wcred, td);
2715 if (retv) break;
2716 }
2717 }
2718
2719 if (retv == NFSERR_STALEWRITEVERF)
2720 nfs_clearcommit(vp->v_mount);
2721
2722 /*
2723 * Now, either mark the blocks I/O done or mark the
2724 * blocks dirty, depending on whether the commit
2725 * succeeded.
2726 */
2727 for (i = 0; i < bvecpos; i++) {
2728 bp = bvec[i];
2729 bp->b_flags &= ~(B_NEEDCOMMIT | B_WRITEINPROG | B_CLUSTEROK);
2730 if (retv) {
2731 /*
2732 * Error, leave B_DELWRI intact
2733 */
2734 vfs_unbusy_pages(bp);
2735 brelse(bp);
2736 } else {
2737 /*
2738 * Success, remove B_DELWRI ( bundirty() ).
2739 *
2740 * b_dirtyoff/b_dirtyend seem to be NFS
2741 * specific. We should probably move that
2742 * into bundirty(). XXX
2743 */
2744 s = splbio();
2745 vp->v_numoutput++;
2746 bp->b_flags |= B_ASYNC;
2747 bundirty(bp);
2748 bp->b_flags &= ~B_DONE;
2749 bp->b_ioflags &= ~BIO_ERROR;
2750 bp->b_dirtyoff = bp->b_dirtyend = 0;
2751 splx(s);
2752 bufdone(bp);
2753 }
2754 }
2755 }
2756
2757 /*
2758 * Start/do any write(s) that are required.
2759 */
2760loop:
2761 s = splbio();
2762 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2763 nbp = TAILQ_NEXT(bp, b_vnbufs);
2764 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
2765 if (waitfor != MNT_WAIT || passone)
2766 continue;
2767 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL,
2768 "nfsfsync", slpflag, slptimeo);
2769 splx(s);
2770 if (error == 0)
2771 panic("nfs_fsync: inconsistent lock");
2772 if (error == ENOLCK)
2773 goto loop;
2774 if (nfs_sigintr(nmp, (struct nfsreq *)0, td->td_proc)) {
2775 error = EINTR;
2776 goto done;
2777 }
2778 if (slpflag == PCATCH) {
2779 slpflag = 0;
2780 slptimeo = 2 * hz;
2781 }
2782 goto loop;
2783 }
2784 if ((bp->b_flags & B_DELWRI) == 0)
2785 panic("nfs_fsync: not dirty");
2786 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) {
2787 BUF_UNLOCK(bp);
2788 continue;
2789 }
2790 bremfree(bp);
2791 if (passone || !commit)
2792 bp->b_flags |= B_ASYNC;
2793 else
2794 bp->b_flags |= B_ASYNC | B_WRITEINPROG;
2795 splx(s);
2796 BUF_WRITE(bp);
2797 goto loop;
2798 }
2799 splx(s);
2800 if (passone) {
2801 passone = 0;
2802 goto again;
2803 }
2804 if (waitfor == MNT_WAIT) {
2805 while (vp->v_numoutput) {
2806 vp->v_flag |= VBWAIT;
2807 error = tsleep((caddr_t)&vp->v_numoutput,
2808 slpflag | (PRIBIO + 1), "nfsfsync", slptimeo);
2809 if (error) {
2810 if (nfs_sigintr(nmp, (struct nfsreq *)0, td->td_proc)) {
2811 error = EINTR;
2812 goto done;
2813 }
2814 if (slpflag == PCATCH) {
2815 slpflag = 0;
2816 slptimeo = 2 * hz;
2817 }
2818 }
2819 }
2820 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) && commit) {
2821 goto loop;
2822 }
2823 }
2824 if (np->n_flag & NWRITEERR) {
2825 error = np->n_error;
2826 np->n_flag &= ~NWRITEERR;
2827 }
2828done:
2829 if (bvec != NULL && bvec != bvec_on_stack)
2830 free(bvec, M_TEMP);
2831 return (error);
2832}
2833
2834/*
2835 * NFS advisory byte-level locks.
2836 */
2837static int
2838nfs_advlock(struct vop_advlock_args *ap)
2839{
2840
2841 return (nfs_dolock(ap));
2842}
2843
2844/*
2845 * Print out the contents of an nfsnode.
2846 */
2847static int
2848nfs_print(struct vop_print_args *ap)
2849{
2850 struct vnode *vp = ap->a_vp;
2851 struct nfsnode *np = VTONFS(vp);
2852
2853 printf("tag VT_NFS, fileid %ld fsid 0x%x",
2854 np->n_vattr.va_fileid, np->n_vattr.va_fsid);
2855 if (vp->v_type == VFIFO)
2856 fifo_printinfo(vp);
2857 printf("\n");
2858 return (0);
2859}
2860
2861/*
2862 * This is the "real" nfs::bwrite(struct buf*).
2863 * B_WRITEINPROG isn't set unless the force flag is one and it
2864 * handles the B_NEEDCOMMIT flag.
2865 * We set B_CACHE if this is a VMIO buffer.
2866 */
2867int
2868nfs_writebp(struct buf *bp, int force, struct thread *td)
2869{
2870 int s;
2871 int oldflags = bp->b_flags;
2872#if 0
2873 int retv = 1;
2874 off_t off;
2875#endif
2876
2877 if (BUF_REFCNT(bp) == 0)
2878 panic("bwrite: buffer is not locked???");
2879
2880 if (bp->b_flags & B_INVAL) {
2881 brelse(bp);
2882 return(0);
2883 }
2884
2885 bp->b_flags |= B_CACHE;
2886
2887 /*
2888 * Undirty the bp. We will redirty it later if the I/O fails.
2889 */
2890
2891 s = splbio();
2892 bundirty(bp);
2893 bp->b_flags &= ~B_DONE;
2894 bp->b_ioflags &= ~BIO_ERROR;
2895 bp->b_iocmd = BIO_WRITE;
2896
2897 bp->b_vp->v_numoutput++;
2898 curthread->td_proc->p_stats->p_ru.ru_oublock++;
2899 splx(s);
2900
2901 /*
2902 * Note: to avoid loopback deadlocks, we do not
2903 * assign b_runningbufspace.
2904 */
2905 vfs_busy_pages(bp, 1);
2906
2907 if (force)
2908 bp->b_flags |= B_WRITEINPROG;
2909 BUF_KERNPROC(bp);
2910 BUF_STRATEGY(bp);
2911
2912 if( (oldflags & B_ASYNC) == 0) {
2913 int rtval = bufwait(bp);
2914
2915 if (oldflags & B_DELWRI) {
2916 s = splbio();
2917 reassignbuf(bp, bp->b_vp);
2918 splx(s);
2919 }
2920
2921 brelse(bp);
2922 return (rtval);
2923 }
2924
2925 return (0);
2926}
2927
2928/*
2929 * nfs special file access vnode op.
2930 * Essentially just get vattr and then imitate iaccess() since the device is
2931 * local to the client.
2932 */
2933static int
2934nfsspec_access(struct vop_access_args *ap)
2935{
2936 struct vattr *vap;
2937 gid_t *gp;
2938 struct ucred *cred = ap->a_cred;
2939 struct vnode *vp = ap->a_vp;
2940 mode_t mode = ap->a_mode;
2941 struct vattr vattr;
2942 int i;
2943 int error;
2944
2945 /*
2946 * Disallow write attempts on filesystems mounted read-only;
2947 * unless the file is a socket, fifo, or a block or character
2948 * device resident on the filesystem.
2949 */
2950 if ((mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
2951 switch (vp->v_type) {
2952 case VREG:
2953 case VDIR:
2954 case VLNK:
2955 return (EROFS);
2956 default:
2957 break;
2958 }
2959 }
2960 /*
2961 * If you're the super-user,
2962 * you always get access.
2963 */
2964 if (cred->cr_uid == 0)
2965 return (0);
2966 vap = &vattr;
2967 error = VOP_GETATTR(vp, vap, cred, ap->a_td);
2968 if (error)
2969 return (error);
2970 /*
2971 * Access check is based on only one of owner, group, public.
2972 * If not owner, then check group. If not a member of the
2973 * group, then check public access.
2974 */
2975 if (cred->cr_uid != vap->va_uid) {
2976 mode >>= 3;
2977 gp = cred->cr_groups;
2978 for (i = 0; i < cred->cr_ngroups; i++, gp++)
2979 if (vap->va_gid == *gp)
2980 goto found;
2981 mode >>= 3;
2982found:
2983 ;
2984 }
2985 error = (vap->va_mode & mode) == mode ? 0 : EACCES;
2986 return (error);
2987}
2988
2989/*
2990 * Read wrapper for special devices.
2991 */
2992static int
2993nfsspec_read(struct vop_read_args *ap)
2994{
2995 struct nfsnode *np = VTONFS(ap->a_vp);
2996
2997 /*
2998 * Set access flag.
2999 */
3000 np->n_flag |= NACC;
3001 getnanotime(&np->n_atim);
3002 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap));
3003}
3004
3005/*
3006 * Write wrapper for special devices.
3007 */
3008static int
3009nfsspec_write(struct vop_write_args *ap)
3010{
3011 struct nfsnode *np = VTONFS(ap->a_vp);
3012
3013 /*
3014 * Set update flag.
3015 */
3016 np->n_flag |= NUPD;
3017 getnanotime(&np->n_mtim);
3018 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap));
3019}
3020
3021/*
3022 * Close wrapper for special devices.
3023 *
3024 * Update the times on the nfsnode then do device close.
3025 */
3026static int
3027nfsspec_close(struct vop_close_args *ap)
3028{
3029 struct vnode *vp = ap->a_vp;
3030 struct nfsnode *np = VTONFS(vp);
3031 struct vattr vattr;
3032
3033 if (np->n_flag & (NACC | NUPD)) {
3034 np->n_flag |= NCHG;
3035 if (vp->v_usecount == 1 &&
3036 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3037 VATTR_NULL(&vattr);
3038 if (np->n_flag & NACC)
3039 vattr.va_atime = np->n_atim;
3040 if (np->n_flag & NUPD)
3041 vattr.va_mtime = np->n_mtim;
3042 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_td);
3043 }
3044 }
3045 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
3046}
3047
3048/*
3049 * Read wrapper for fifos.
3050 */
3051static int
3052nfsfifo_read(struct vop_read_args *ap)
3053{
3054 struct nfsnode *np = VTONFS(ap->a_vp);
3055
3056 /*
3057 * Set access flag.
3058 */
3059 np->n_flag |= NACC;
3060 getnanotime(&np->n_atim);
3061 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap));
3062}
3063
3064/*
3065 * Write wrapper for fifos.
3066 */
3067static int
3068nfsfifo_write(struct vop_write_args *ap)
3069{
3070 struct nfsnode *np = VTONFS(ap->a_vp);
3071
3072 /*
3073 * Set update flag.
3074 */
3075 np->n_flag |= NUPD;
3076 getnanotime(&np->n_mtim);
3077 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap));
3078}
3079
3080/*
3081 * Close wrapper for fifos.
3082 *
3083 * Update the times on the nfsnode then do fifo close.
3084 */
3085static int
3086nfsfifo_close(struct vop_close_args *ap)
3087{
3088 struct vnode *vp = ap->a_vp;
3089 struct nfsnode *np = VTONFS(vp);
3090 struct vattr vattr;
3091 struct timespec ts;
3092
3093 if (np->n_flag & (NACC | NUPD)) {
3094 getnanotime(&ts);
3095 if (np->n_flag & NACC)
3096 np->n_atim = ts;
3097 if (np->n_flag & NUPD)
3098 np->n_mtim = ts;
3099 np->n_flag |= NCHG;
3100 if (vp->v_usecount == 1 &&
3101 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3102 VATTR_NULL(&vattr);
3103 if (np->n_flag & NACC)
3104 vattr.va_atime = np->n_atim;
3105 if (np->n_flag & NUPD)
3106 vattr.va_mtime = np->n_mtim;
3107 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_td);
3108 }
3109 }
3110 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
3111}
2497 txdr_hyper(offset, tl);
2498 tl += 2;
2499 *tl = txdr_unsigned(cnt);
2500 nfsm_request(vp, NFSPROC_COMMIT, td, cred);
2501 nfsm_wcc_data(vp, wccflag);
2502 if (!error) {
2503 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
2504 if (bcmp((caddr_t)nmp->nm_verf, (caddr_t)tl,
2505 NFSX_V3WRITEVERF)) {
2506 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
2507 NFSX_V3WRITEVERF);
2508 error = NFSERR_STALEWRITEVERF;
2509 }
2510 }
2511 m_freem(mrep);
2512nfsmout:
2513 return (error);
2514}
2515
2516/*
2517 * Strategy routine.
2518 * For async requests when nfsiod(s) are running, queue the request by
2519 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
2520 * request.
2521 */
2522static int
2523nfs_strategy(struct vop_strategy_args *ap)
2524{
2525 struct buf *bp = ap->a_bp;
2526 struct ucred *cr;
2527 struct thread *td;
2528 int error = 0;
2529
2530 KASSERT(!(bp->b_flags & B_DONE), ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
2531 KASSERT(BUF_REFCNT(bp) > 0, ("nfs_strategy: buffer %p not locked", bp));
2532
2533 if (bp->b_flags & B_PHYS)
2534 panic("nfs physio");
2535
2536 if (bp->b_flags & B_ASYNC)
2537 td = (struct thread *)0;
2538 else
2539 td = curthread; /* XXX */
2540
2541 if (bp->b_iocmd == BIO_READ)
2542 cr = bp->b_rcred;
2543 else
2544 cr = bp->b_wcred;
2545
2546 /*
2547 * If the op is asynchronous and an i/o daemon is waiting
2548 * queue the request, wake it up and wait for completion
2549 * otherwise just do it ourselves.
2550 */
2551 if ((bp->b_flags & B_ASYNC) == 0 ||
2552 nfs_asyncio(bp, NOCRED, td))
2553 error = nfs_doio(bp, cr, td);
2554 return (error);
2555}
2556
2557/*
2558 * fsync vnode op. Just call nfs_flush() with commit == 1.
2559 */
2560/* ARGSUSED */
2561static int
2562nfs_fsync(struct vop_fsync_args *ap)
2563{
2564
2565 return (nfs_flush(ap->a_vp, ap->a_cred, ap->a_waitfor, ap->a_td, 1));
2566}
2567
2568/*
2569 * Flush all the blocks associated with a vnode.
2570 * Walk through the buffer pool and push any dirty pages
2571 * associated with the vnode.
2572 */
2573static int
2574nfs_flush(struct vnode *vp, struct ucred *cred, int waitfor, struct thread *td,
2575 int commit)
2576{
2577 struct nfsnode *np = VTONFS(vp);
2578 struct buf *bp;
2579 int i;
2580 struct buf *nbp;
2581 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2582 int s, error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos;
2583 int passone = 1;
2584 u_quad_t off, endoff, toff;
2585 struct ucred* wcred = NULL;
2586 struct buf **bvec = NULL;
2587#ifndef NFS_COMMITBVECSIZ
2588#define NFS_COMMITBVECSIZ 20
2589#endif
2590 struct buf *bvec_on_stack[NFS_COMMITBVECSIZ];
2591 int bvecsize = 0, bveccount;
2592
2593 if (nmp->nm_flag & NFSMNT_INT)
2594 slpflag = PCATCH;
2595 if (!commit)
2596 passone = 0;
2597 /*
2598 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the
2599 * server, but nas not been committed to stable storage on the server
2600 * yet. On the first pass, the byte range is worked out and the commit
2601 * rpc is done. On the second pass, nfs_writebp() is called to do the
2602 * job.
2603 */
2604again:
2605 off = (u_quad_t)-1;
2606 endoff = 0;
2607 bvecpos = 0;
2608 if (NFS_ISV3(vp) && commit) {
2609 s = splbio();
2610 /*
2611 * Count up how many buffers waiting for a commit.
2612 */
2613 bveccount = 0;
2614 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2615 nbp = TAILQ_NEXT(bp, b_vnbufs);
2616 if (BUF_REFCNT(bp) == 0 &&
2617 (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
2618 == (B_DELWRI | B_NEEDCOMMIT))
2619 bveccount++;
2620 }
2621 /*
2622 * Allocate space to remember the list of bufs to commit. It is
2623 * important to use M_NOWAIT here to avoid a race with nfs_write.
2624 * If we can't get memory (for whatever reason), we will end up
2625 * committing the buffers one-by-one in the loop below.
2626 */
2627 if (bvec != NULL && bvec != bvec_on_stack)
2628 free(bvec, M_TEMP);
2629 if (bveccount > NFS_COMMITBVECSIZ) {
2630 bvec = (struct buf **)
2631 malloc(bveccount * sizeof(struct buf *),
2632 M_TEMP, M_NOWAIT);
2633 if (bvec == NULL) {
2634 bvec = bvec_on_stack;
2635 bvecsize = NFS_COMMITBVECSIZ;
2636 } else
2637 bvecsize = bveccount;
2638 } else {
2639 bvec = bvec_on_stack;
2640 bvecsize = NFS_COMMITBVECSIZ;
2641 }
2642 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2643 nbp = TAILQ_NEXT(bp, b_vnbufs);
2644 if (bvecpos >= bvecsize)
2645 break;
2646 if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) !=
2647 (B_DELWRI | B_NEEDCOMMIT) ||
2648 BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT))
2649 continue;
2650 bremfree(bp);
2651 /*
2652 * Work out if all buffers are using the same cred
2653 * so we can deal with them all with one commit.
2654 *
2655 * NOTE: we are not clearing B_DONE here, so we have
2656 * to do it later on in this routine if we intend to
2657 * initiate I/O on the bp.
2658 *
2659 * Note: to avoid loopback deadlocks, we do not
2660 * assign b_runningbufspace.
2661 */
2662 if (wcred == NULL)
2663 wcred = bp->b_wcred;
2664 else if (wcred != bp->b_wcred)
2665 wcred = NOCRED;
2666 bp->b_flags |= B_WRITEINPROG;
2667 vfs_busy_pages(bp, 1);
2668
2669 /*
2670 * bp is protected by being locked, but nbp is not
2671 * and vfs_busy_pages() may sleep. We have to
2672 * recalculate nbp.
2673 */
2674 nbp = TAILQ_NEXT(bp, b_vnbufs);
2675
2676 /*
2677 * A list of these buffers is kept so that the
2678 * second loop knows which buffers have actually
2679 * been committed. This is necessary, since there
2680 * may be a race between the commit rpc and new
2681 * uncommitted writes on the file.
2682 */
2683 bvec[bvecpos++] = bp;
2684 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2685 bp->b_dirtyoff;
2686 if (toff < off)
2687 off = toff;
2688 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff);
2689 if (toff > endoff)
2690 endoff = toff;
2691 }
2692 splx(s);
2693 }
2694 if (bvecpos > 0) {
2695 /*
2696 * Commit data on the server, as required.
2697 * If all bufs are using the same wcred, then use that with
2698 * one call for all of them, otherwise commit each one
2699 * separately.
2700 */
2701 if (wcred != NOCRED)
2702 retv = nfs_commit(vp, off, (int)(endoff - off),
2703 wcred, td);
2704 else {
2705 retv = 0;
2706 for (i = 0; i < bvecpos; i++) {
2707 off_t off, size;
2708 bp = bvec[i];
2709 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2710 bp->b_dirtyoff;
2711 size = (u_quad_t)(bp->b_dirtyend
2712 - bp->b_dirtyoff);
2713 retv = nfs_commit(vp, off, (int)size,
2714 bp->b_wcred, td);
2715 if (retv) break;
2716 }
2717 }
2718
2719 if (retv == NFSERR_STALEWRITEVERF)
2720 nfs_clearcommit(vp->v_mount);
2721
2722 /*
2723 * Now, either mark the blocks I/O done or mark the
2724 * blocks dirty, depending on whether the commit
2725 * succeeded.
2726 */
2727 for (i = 0; i < bvecpos; i++) {
2728 bp = bvec[i];
2729 bp->b_flags &= ~(B_NEEDCOMMIT | B_WRITEINPROG | B_CLUSTEROK);
2730 if (retv) {
2731 /*
2732 * Error, leave B_DELWRI intact
2733 */
2734 vfs_unbusy_pages(bp);
2735 brelse(bp);
2736 } else {
2737 /*
2738 * Success, remove B_DELWRI ( bundirty() ).
2739 *
2740 * b_dirtyoff/b_dirtyend seem to be NFS
2741 * specific. We should probably move that
2742 * into bundirty(). XXX
2743 */
2744 s = splbio();
2745 vp->v_numoutput++;
2746 bp->b_flags |= B_ASYNC;
2747 bundirty(bp);
2748 bp->b_flags &= ~B_DONE;
2749 bp->b_ioflags &= ~BIO_ERROR;
2750 bp->b_dirtyoff = bp->b_dirtyend = 0;
2751 splx(s);
2752 bufdone(bp);
2753 }
2754 }
2755 }
2756
2757 /*
2758 * Start/do any write(s) that are required.
2759 */
2760loop:
2761 s = splbio();
2762 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2763 nbp = TAILQ_NEXT(bp, b_vnbufs);
2764 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
2765 if (waitfor != MNT_WAIT || passone)
2766 continue;
2767 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL,
2768 "nfsfsync", slpflag, slptimeo);
2769 splx(s);
2770 if (error == 0)
2771 panic("nfs_fsync: inconsistent lock");
2772 if (error == ENOLCK)
2773 goto loop;
2774 if (nfs_sigintr(nmp, (struct nfsreq *)0, td->td_proc)) {
2775 error = EINTR;
2776 goto done;
2777 }
2778 if (slpflag == PCATCH) {
2779 slpflag = 0;
2780 slptimeo = 2 * hz;
2781 }
2782 goto loop;
2783 }
2784 if ((bp->b_flags & B_DELWRI) == 0)
2785 panic("nfs_fsync: not dirty");
2786 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) {
2787 BUF_UNLOCK(bp);
2788 continue;
2789 }
2790 bremfree(bp);
2791 if (passone || !commit)
2792 bp->b_flags |= B_ASYNC;
2793 else
2794 bp->b_flags |= B_ASYNC | B_WRITEINPROG;
2795 splx(s);
2796 BUF_WRITE(bp);
2797 goto loop;
2798 }
2799 splx(s);
2800 if (passone) {
2801 passone = 0;
2802 goto again;
2803 }
2804 if (waitfor == MNT_WAIT) {
2805 while (vp->v_numoutput) {
2806 vp->v_flag |= VBWAIT;
2807 error = tsleep((caddr_t)&vp->v_numoutput,
2808 slpflag | (PRIBIO + 1), "nfsfsync", slptimeo);
2809 if (error) {
2810 if (nfs_sigintr(nmp, (struct nfsreq *)0, td->td_proc)) {
2811 error = EINTR;
2812 goto done;
2813 }
2814 if (slpflag == PCATCH) {
2815 slpflag = 0;
2816 slptimeo = 2 * hz;
2817 }
2818 }
2819 }
2820 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) && commit) {
2821 goto loop;
2822 }
2823 }
2824 if (np->n_flag & NWRITEERR) {
2825 error = np->n_error;
2826 np->n_flag &= ~NWRITEERR;
2827 }
2828done:
2829 if (bvec != NULL && bvec != bvec_on_stack)
2830 free(bvec, M_TEMP);
2831 return (error);
2832}
2833
2834/*
2835 * NFS advisory byte-level locks.
2836 */
2837static int
2838nfs_advlock(struct vop_advlock_args *ap)
2839{
2840
2841 return (nfs_dolock(ap));
2842}
2843
2844/*
2845 * Print out the contents of an nfsnode.
2846 */
2847static int
2848nfs_print(struct vop_print_args *ap)
2849{
2850 struct vnode *vp = ap->a_vp;
2851 struct nfsnode *np = VTONFS(vp);
2852
2853 printf("tag VT_NFS, fileid %ld fsid 0x%x",
2854 np->n_vattr.va_fileid, np->n_vattr.va_fsid);
2855 if (vp->v_type == VFIFO)
2856 fifo_printinfo(vp);
2857 printf("\n");
2858 return (0);
2859}
2860
2861/*
2862 * This is the "real" nfs::bwrite(struct buf*).
2863 * B_WRITEINPROG isn't set unless the force flag is one and it
2864 * handles the B_NEEDCOMMIT flag.
2865 * We set B_CACHE if this is a VMIO buffer.
2866 */
2867int
2868nfs_writebp(struct buf *bp, int force, struct thread *td)
2869{
2870 int s;
2871 int oldflags = bp->b_flags;
2872#if 0
2873 int retv = 1;
2874 off_t off;
2875#endif
2876
2877 if (BUF_REFCNT(bp) == 0)
2878 panic("bwrite: buffer is not locked???");
2879
2880 if (bp->b_flags & B_INVAL) {
2881 brelse(bp);
2882 return(0);
2883 }
2884
2885 bp->b_flags |= B_CACHE;
2886
2887 /*
2888 * Undirty the bp. We will redirty it later if the I/O fails.
2889 */
2890
2891 s = splbio();
2892 bundirty(bp);
2893 bp->b_flags &= ~B_DONE;
2894 bp->b_ioflags &= ~BIO_ERROR;
2895 bp->b_iocmd = BIO_WRITE;
2896
2897 bp->b_vp->v_numoutput++;
2898 curthread->td_proc->p_stats->p_ru.ru_oublock++;
2899 splx(s);
2900
2901 /*
2902 * Note: to avoid loopback deadlocks, we do not
2903 * assign b_runningbufspace.
2904 */
2905 vfs_busy_pages(bp, 1);
2906
2907 if (force)
2908 bp->b_flags |= B_WRITEINPROG;
2909 BUF_KERNPROC(bp);
2910 BUF_STRATEGY(bp);
2911
2912 if( (oldflags & B_ASYNC) == 0) {
2913 int rtval = bufwait(bp);
2914
2915 if (oldflags & B_DELWRI) {
2916 s = splbio();
2917 reassignbuf(bp, bp->b_vp);
2918 splx(s);
2919 }
2920
2921 brelse(bp);
2922 return (rtval);
2923 }
2924
2925 return (0);
2926}
2927
2928/*
2929 * nfs special file access vnode op.
2930 * Essentially just get vattr and then imitate iaccess() since the device is
2931 * local to the client.
2932 */
2933static int
2934nfsspec_access(struct vop_access_args *ap)
2935{
2936 struct vattr *vap;
2937 gid_t *gp;
2938 struct ucred *cred = ap->a_cred;
2939 struct vnode *vp = ap->a_vp;
2940 mode_t mode = ap->a_mode;
2941 struct vattr vattr;
2942 int i;
2943 int error;
2944
2945 /*
2946 * Disallow write attempts on filesystems mounted read-only;
2947 * unless the file is a socket, fifo, or a block or character
2948 * device resident on the filesystem.
2949 */
2950 if ((mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
2951 switch (vp->v_type) {
2952 case VREG:
2953 case VDIR:
2954 case VLNK:
2955 return (EROFS);
2956 default:
2957 break;
2958 }
2959 }
2960 /*
2961 * If you're the super-user,
2962 * you always get access.
2963 */
2964 if (cred->cr_uid == 0)
2965 return (0);
2966 vap = &vattr;
2967 error = VOP_GETATTR(vp, vap, cred, ap->a_td);
2968 if (error)
2969 return (error);
2970 /*
2971 * Access check is based on only one of owner, group, public.
2972 * If not owner, then check group. If not a member of the
2973 * group, then check public access.
2974 */
2975 if (cred->cr_uid != vap->va_uid) {
2976 mode >>= 3;
2977 gp = cred->cr_groups;
2978 for (i = 0; i < cred->cr_ngroups; i++, gp++)
2979 if (vap->va_gid == *gp)
2980 goto found;
2981 mode >>= 3;
2982found:
2983 ;
2984 }
2985 error = (vap->va_mode & mode) == mode ? 0 : EACCES;
2986 return (error);
2987}
2988
2989/*
2990 * Read wrapper for special devices.
2991 */
2992static int
2993nfsspec_read(struct vop_read_args *ap)
2994{
2995 struct nfsnode *np = VTONFS(ap->a_vp);
2996
2997 /*
2998 * Set access flag.
2999 */
3000 np->n_flag |= NACC;
3001 getnanotime(&np->n_atim);
3002 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap));
3003}
3004
3005/*
3006 * Write wrapper for special devices.
3007 */
3008static int
3009nfsspec_write(struct vop_write_args *ap)
3010{
3011 struct nfsnode *np = VTONFS(ap->a_vp);
3012
3013 /*
3014 * Set update flag.
3015 */
3016 np->n_flag |= NUPD;
3017 getnanotime(&np->n_mtim);
3018 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap));
3019}
3020
3021/*
3022 * Close wrapper for special devices.
3023 *
3024 * Update the times on the nfsnode then do device close.
3025 */
3026static int
3027nfsspec_close(struct vop_close_args *ap)
3028{
3029 struct vnode *vp = ap->a_vp;
3030 struct nfsnode *np = VTONFS(vp);
3031 struct vattr vattr;
3032
3033 if (np->n_flag & (NACC | NUPD)) {
3034 np->n_flag |= NCHG;
3035 if (vp->v_usecount == 1 &&
3036 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3037 VATTR_NULL(&vattr);
3038 if (np->n_flag & NACC)
3039 vattr.va_atime = np->n_atim;
3040 if (np->n_flag & NUPD)
3041 vattr.va_mtime = np->n_mtim;
3042 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_td);
3043 }
3044 }
3045 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
3046}
3047
3048/*
3049 * Read wrapper for fifos.
3050 */
3051static int
3052nfsfifo_read(struct vop_read_args *ap)
3053{
3054 struct nfsnode *np = VTONFS(ap->a_vp);
3055
3056 /*
3057 * Set access flag.
3058 */
3059 np->n_flag |= NACC;
3060 getnanotime(&np->n_atim);
3061 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap));
3062}
3063
3064/*
3065 * Write wrapper for fifos.
3066 */
3067static int
3068nfsfifo_write(struct vop_write_args *ap)
3069{
3070 struct nfsnode *np = VTONFS(ap->a_vp);
3071
3072 /*
3073 * Set update flag.
3074 */
3075 np->n_flag |= NUPD;
3076 getnanotime(&np->n_mtim);
3077 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap));
3078}
3079
3080/*
3081 * Close wrapper for fifos.
3082 *
3083 * Update the times on the nfsnode then do fifo close.
3084 */
3085static int
3086nfsfifo_close(struct vop_close_args *ap)
3087{
3088 struct vnode *vp = ap->a_vp;
3089 struct nfsnode *np = VTONFS(vp);
3090 struct vattr vattr;
3091 struct timespec ts;
3092
3093 if (np->n_flag & (NACC | NUPD)) {
3094 getnanotime(&ts);
3095 if (np->n_flag & NACC)
3096 np->n_atim = ts;
3097 if (np->n_flag & NUPD)
3098 np->n_mtim = ts;
3099 np->n_flag |= NCHG;
3100 if (vp->v_usecount == 1 &&
3101 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3102 VATTR_NULL(&vattr);
3103 if (np->n_flag & NACC)
3104 vattr.va_atime = np->n_atim;
3105 if (np->n_flag & NUPD)
3106 vattr.va_mtime = np->n_mtim;
3107 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_td);
3108 }
3109 }
3110 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
3111}