Deleted Added
full compact
fuse_vnops.c (242727) fuse_vnops.c (248084)
1/*
2 * Copyright (c) 2007-2009 Google Inc. and Amit Singh
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
8 *
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following disclaimer
13 * in the documentation and/or other materials provided with the
14 * distribution.
15 * * Neither the name of Google Inc. nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Copyright (C) 2005 Csaba Henk.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 *
43 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 */
55
56#include <sys/cdefs.h>
1/*
2 * Copyright (c) 2007-2009 Google Inc. and Amit Singh
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
8 *
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following disclaimer
13 * in the documentation and/or other materials provided with the
14 * distribution.
15 * * Neither the name of Google Inc. nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Copyright (C) 2005 Csaba Henk.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 *
43 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 */
55
56#include <sys/cdefs.h>
57__FBSDID("$FreeBSD: head/sys/fs/fuse/fuse_vnops.c 242727 2012-11-08 00:32:49Z attilio $");
57__FBSDID("$FreeBSD: head/sys/fs/fuse/fuse_vnops.c 248084 2013-03-09 02:32:23Z attilio $");
58
59#include <sys/types.h>
60#include <sys/module.h>
61#include <sys/systm.h>
62#include <sys/errno.h>
63#include <sys/param.h>
64#include <sys/kernel.h>
65#include <sys/conf.h>
66#include <sys/uio.h>
67#include <sys/malloc.h>
68#include <sys/queue.h>
69#include <sys/lock.h>
58
59#include <sys/types.h>
60#include <sys/module.h>
61#include <sys/systm.h>
62#include <sys/errno.h>
63#include <sys/param.h>
64#include <sys/kernel.h>
65#include <sys/conf.h>
66#include <sys/uio.h>
67#include <sys/malloc.h>
68#include <sys/queue.h>
69#include <sys/lock.h>
70#include <sys/mutex.h>
70#include <sys/rwlock.h>
71#include <sys/sx.h>
72#include <sys/proc.h>
73#include <sys/mount.h>
74#include <sys/vnode.h>
75#include <sys/namei.h>
76#include <sys/stat.h>
77#include <sys/unistd.h>
78#include <sys/filedesc.h>
79#include <sys/file.h>
80#include <sys/fcntl.h>
81#include <sys/dirent.h>
82#include <sys/bio.h>
83#include <sys/buf.h>
84#include <sys/sysctl.h>
85
86#include <vm/vm.h>
87#include <vm/vm_extern.h>
88#include <vm/pmap.h>
89#include <vm/vm_map.h>
90#include <vm/vm_page.h>
91#include <vm/vm_param.h>
92#include <vm/vm_object.h>
93#include <vm/vm_pager.h>
94#include <vm/vnode_pager.h>
95#include <vm/vm_object.h>
96
97#include "fuse.h"
98#include "fuse_file.h"
99#include "fuse_internal.h"
100#include "fuse_ipc.h"
101#include "fuse_node.h"
102#include "fuse_param.h"
103#include "fuse_io.h"
104
105#include <sys/priv.h>
106
107#define FUSE_DEBUG_MODULE VNOPS
108#include "fuse_debug.h"
109
110/* vnode ops */
111static vop_access_t fuse_vnop_access;
112static vop_close_t fuse_vnop_close;
113static vop_create_t fuse_vnop_create;
114static vop_fsync_t fuse_vnop_fsync;
115static vop_getattr_t fuse_vnop_getattr;
116static vop_inactive_t fuse_vnop_inactive;
117static vop_link_t fuse_vnop_link;
118static vop_lookup_t fuse_vnop_lookup;
119static vop_mkdir_t fuse_vnop_mkdir;
120static vop_mknod_t fuse_vnop_mknod;
121static vop_open_t fuse_vnop_open;
122static vop_read_t fuse_vnop_read;
123static vop_readdir_t fuse_vnop_readdir;
124static vop_readlink_t fuse_vnop_readlink;
125static vop_reclaim_t fuse_vnop_reclaim;
126static vop_remove_t fuse_vnop_remove;
127static vop_rename_t fuse_vnop_rename;
128static vop_rmdir_t fuse_vnop_rmdir;
129static vop_setattr_t fuse_vnop_setattr;
130static vop_strategy_t fuse_vnop_strategy;
131static vop_symlink_t fuse_vnop_symlink;
132static vop_write_t fuse_vnop_write;
133static vop_getpages_t fuse_vnop_getpages;
134static vop_putpages_t fuse_vnop_putpages;
135static vop_print_t fuse_vnop_print;
136
137struct vop_vector fuse_vnops = {
138 .vop_default = &default_vnodeops,
139 .vop_access = fuse_vnop_access,
140 .vop_close = fuse_vnop_close,
141 .vop_create = fuse_vnop_create,
142 .vop_fsync = fuse_vnop_fsync,
143 .vop_getattr = fuse_vnop_getattr,
144 .vop_inactive = fuse_vnop_inactive,
145 .vop_link = fuse_vnop_link,
146 .vop_lookup = fuse_vnop_lookup,
147 .vop_mkdir = fuse_vnop_mkdir,
148 .vop_mknod = fuse_vnop_mknod,
149 .vop_open = fuse_vnop_open,
150 .vop_pathconf = vop_stdpathconf,
151 .vop_read = fuse_vnop_read,
152 .vop_readdir = fuse_vnop_readdir,
153 .vop_readlink = fuse_vnop_readlink,
154 .vop_reclaim = fuse_vnop_reclaim,
155 .vop_remove = fuse_vnop_remove,
156 .vop_rename = fuse_vnop_rename,
157 .vop_rmdir = fuse_vnop_rmdir,
158 .vop_setattr = fuse_vnop_setattr,
159 .vop_strategy = fuse_vnop_strategy,
160 .vop_symlink = fuse_vnop_symlink,
161 .vop_write = fuse_vnop_write,
162 .vop_getpages = fuse_vnop_getpages,
163 .vop_putpages = fuse_vnop_putpages,
164 .vop_print = fuse_vnop_print,
165};
166
167static u_long fuse_lookup_cache_hits = 0;
168
169SYSCTL_ULONG(_vfs_fuse, OID_AUTO, lookup_cache_hits, CTLFLAG_RD,
170 &fuse_lookup_cache_hits, 0, "");
171
172static u_long fuse_lookup_cache_misses = 0;
173
174SYSCTL_ULONG(_vfs_fuse, OID_AUTO, lookup_cache_misses, CTLFLAG_RD,
175 &fuse_lookup_cache_misses, 0, "");
176
177int fuse_lookup_cache_enable = 1;
178
179SYSCTL_INT(_vfs_fuse, OID_AUTO, lookup_cache_enable, CTLFLAG_RW,
180 &fuse_lookup_cache_enable, 0, "");
181
182/*
183 * XXX: This feature is highly experimental and can bring to instabilities,
184 * needs revisiting before to be enabled by default.
185 */
186static int fuse_reclaim_revoked = 0;
187
188SYSCTL_INT(_vfs_fuse, OID_AUTO, reclaim_revoked, CTLFLAG_RW,
189 &fuse_reclaim_revoked, 0, "");
190
191int fuse_pbuf_freecnt = -1;
192
193#define fuse_vm_page_lock(m) vm_page_lock((m));
194#define fuse_vm_page_unlock(m) vm_page_unlock((m));
195#define fuse_vm_page_lock_queues() ((void)0)
196#define fuse_vm_page_unlock_queues() ((void)0)
197
198/*
199 struct vnop_access_args {
200 struct vnode *a_vp;
201#if VOP_ACCESS_TAKES_ACCMODE_T
202 accmode_t a_accmode;
203#else
204 int a_mode;
205#endif
206 struct ucred *a_cred;
207 struct thread *a_td;
208 };
209*/
210static int
211fuse_vnop_access(struct vop_access_args *ap)
212{
213 struct vnode *vp = ap->a_vp;
214 int accmode = ap->a_accmode;
215 struct ucred *cred = ap->a_cred;
216
217 struct fuse_access_param facp;
218 struct fuse_data *data = fuse_get_mpdata(vnode_mount(vp));
219
220 int err;
221
222 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
223
224 if (fuse_isdeadfs(vp)) {
225 if (vnode_isvroot(vp)) {
226 return 0;
227 }
228 return ENXIO;
229 }
230 if (!(data->dataflags & FSESS_INITED)) {
231 if (vnode_isvroot(vp)) {
232 if (priv_check_cred(cred, PRIV_VFS_ADMIN, 0) ||
233 (fuse_match_cred(data->daemoncred, cred) == 0)) {
234 return 0;
235 }
236 }
237 return EBADF;
238 }
239 if (vnode_islnk(vp)) {
240 return 0;
241 }
242 bzero(&facp, sizeof(facp));
243
244 err = fuse_internal_access(vp, accmode, &facp, ap->a_td, ap->a_cred);
245 FS_DEBUG2G("err=%d accmode=0x%x\n", err, accmode);
246 return err;
247}
248
249/*
250 struct vnop_close_args {
251 struct vnode *a_vp;
252 int a_fflag;
253 struct ucred *a_cred;
254 struct thread *a_td;
255 };
256*/
257static int
258fuse_vnop_close(struct vop_close_args *ap)
259{
260 struct vnode *vp = ap->a_vp;
261 struct ucred *cred = ap->a_cred;
262 int fflag = ap->a_fflag;
263 fufh_type_t fufh_type;
264
265 fuse_trace_printf_vnop();
266
267 if (fuse_isdeadfs(vp)) {
268 return 0;
269 }
270 if (vnode_isdir(vp)) {
271 if (fuse_filehandle_valid(vp, FUFH_RDONLY)) {
272 fuse_filehandle_close(vp, FUFH_RDONLY, NULL, cred);
273 }
274 return 0;
275 }
276 if (fflag & IO_NDELAY) {
277 return 0;
278 }
279 fufh_type = fuse_filehandle_xlate_from_fflags(fflag);
280
281 if (!fuse_filehandle_valid(vp, fufh_type)) {
282 int i;
283
284 for (i = 0; i < FUFH_MAXTYPE; i++)
285 if (fuse_filehandle_valid(vp, i))
286 break;
287 if (i == FUFH_MAXTYPE)
288 panic("FUSE: fufh type %d found to be invalid in close"
289 " (fflag=0x%x)\n",
290 fufh_type, fflag);
291 }
292 if ((VTOFUD(vp)->flag & FN_SIZECHANGE) != 0) {
293 fuse_vnode_savesize(vp, cred);
294 }
295 return 0;
296}
297
298/*
299 struct vnop_create_args {
300 struct vnode *a_dvp;
301 struct vnode **a_vpp;
302 struct componentname *a_cnp;
303 struct vattr *a_vap;
304 };
305*/
306static int
307fuse_vnop_create(struct vop_create_args *ap)
308{
309 struct vnode *dvp = ap->a_dvp;
310 struct vnode **vpp = ap->a_vpp;
311 struct componentname *cnp = ap->a_cnp;
312 struct vattr *vap = ap->a_vap;
313 struct thread *td = cnp->cn_thread;
314 struct ucred *cred = cnp->cn_cred;
315
316 struct fuse_open_in *foi;
317 struct fuse_entry_out *feo;
318 struct fuse_dispatcher fdi;
319 struct fuse_dispatcher *fdip = &fdi;
320
321 int err;
322
323 struct mount *mp = vnode_mount(dvp);
324 uint64_t parentnid = VTOFUD(dvp)->nid;
325 mode_t mode = MAKEIMODE(vap->va_type, vap->va_mode);
326 uint64_t x_fh_id;
327 uint32_t x_open_flags;
328
329 fuse_trace_printf_vnop();
330
331 if (fuse_isdeadfs(dvp)) {
332 return ENXIO;
333 }
334 bzero(&fdi, sizeof(fdi));
335
336 /* XXX: Will we ever want devices ? */
337 if ((vap->va_type != VREG)) {
338 MPASS(vap->va_type != VFIFO);
339 goto bringup;
340 }
341 debug_printf("parent nid = %ju, mode = %x\n", (uintmax_t)parentnid,
342 mode);
343
344 fdisp_init(fdip, sizeof(*foi) + cnp->cn_namelen + 1);
345 if (!fsess_isimpl(mp, FUSE_CREATE)) {
346 debug_printf("eh, daemon doesn't implement create?\n");
347 return (EINVAL);
348 }
349 fdisp_make(fdip, FUSE_CREATE, vnode_mount(dvp), parentnid, td, cred);
350
351 foi = fdip->indata;
352 foi->mode = mode;
353 foi->flags = O_CREAT | O_RDWR;
354
355 memcpy((char *)fdip->indata + sizeof(*foi), cnp->cn_nameptr,
356 cnp->cn_namelen);
357 ((char *)fdip->indata)[sizeof(*foi) + cnp->cn_namelen] = '\0';
358
359 err = fdisp_wait_answ(fdip);
360
361 if (err) {
362 if (err == ENOSYS)
363 fsess_set_notimpl(mp, FUSE_CREATE);
364 debug_printf("create: got err=%d from daemon\n", err);
365 goto out;
366 }
367bringup:
368 feo = fdip->answ;
369
370 if ((err = fuse_internal_checkentry(feo, VREG))) {
371 goto out;
372 }
373 err = fuse_vnode_get(mp, feo->nodeid, dvp, vpp, cnp, VREG);
374 if (err) {
375 struct fuse_release_in *fri;
376 uint64_t nodeid = feo->nodeid;
377 uint64_t fh_id = ((struct fuse_open_out *)(feo + 1))->fh;
378
379 fdisp_init(fdip, sizeof(*fri));
380 fdisp_make(fdip, FUSE_RELEASE, mp, nodeid, td, cred);
381 fri = fdip->indata;
382 fri->fh = fh_id;
383 fri->flags = OFLAGS(mode);
384 fuse_insert_callback(fdip->tick, fuse_internal_forget_callback);
385 fuse_insert_message(fdip->tick);
386 return err;
387 }
388 ASSERT_VOP_ELOCKED(*vpp, "fuse_vnop_create");
389
390 fdip->answ = feo + 1;
391
392 x_fh_id = ((struct fuse_open_out *)(feo + 1))->fh;
393 x_open_flags = ((struct fuse_open_out *)(feo + 1))->open_flags;
394 fuse_filehandle_init(*vpp, FUFH_RDWR, NULL, x_fh_id);
395 fuse_vnode_open(*vpp, x_open_flags, td);
396 cache_purge_negative(dvp);
397
398out:
399 fdisp_destroy(fdip);
400 return err;
401}
402
403/*
404 * Our vnop_fsync roughly corresponds to the FUSE_FSYNC method. The Linux
405 * version of FUSE also has a FUSE_FLUSH method.
406 *
407 * On Linux, fsync() synchronizes a file's complete in-core state with that
408 * on disk. The call is not supposed to return until the system has completed
409 * that action or until an error is detected.
410 *
411 * Linux also has an fdatasync() call that is similar to fsync() but is not
412 * required to update the metadata such as access time and modification time.
413 */
414
415/*
416 struct vnop_fsync_args {
417 struct vnodeop_desc *a_desc;
418 struct vnode * a_vp;
419 struct ucred * a_cred;
420 int a_waitfor;
421 struct thread * a_td;
422 };
423*/
424static int
425fuse_vnop_fsync(struct vop_fsync_args *ap)
426{
427 struct vnode *vp = ap->a_vp;
428 struct thread *td = ap->a_td;
429
430 struct fuse_filehandle *fufh;
431 struct fuse_vnode_data *fvdat = VTOFUD(vp);
432
433 int type, err = 0;
434
435 fuse_trace_printf_vnop();
436
437 if (fuse_isdeadfs(vp)) {
438 return 0;
439 }
440 if ((err = vop_stdfsync(ap)))
441 return err;
442
443 if (!fsess_isimpl(vnode_mount(vp),
444 (vnode_vtype(vp) == VDIR ? FUSE_FSYNCDIR : FUSE_FSYNC))) {
445 goto out;
446 }
447 for (type = 0; type < FUFH_MAXTYPE; type++) {
448 fufh = &(fvdat->fufh[type]);
449 if (FUFH_IS_VALID(fufh)) {
450 fuse_internal_fsync(vp, td, NULL, fufh);
451 }
452 }
453
454out:
455 return 0;
456}
457
458/*
459 struct vnop_getattr_args {
460 struct vnode *a_vp;
461 struct vattr *a_vap;
462 struct ucred *a_cred;
463 struct thread *a_td;
464 };
465*/
466static int
467fuse_vnop_getattr(struct vop_getattr_args *ap)
468{
469 struct vnode *vp = ap->a_vp;
470 struct vattr *vap = ap->a_vap;
471 struct ucred *cred = ap->a_cred;
472 struct thread *td = curthread;
473 struct fuse_vnode_data *fvdat = VTOFUD(vp);
474
475 int err = 0;
476 int dataflags;
477 struct fuse_dispatcher fdi;
478
479 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
480
481 dataflags = fuse_get_mpdata(vnode_mount(vp))->dataflags;
482
483 /* Note that we are not bailing out on a dead file system just yet. */
484
485 if (!(dataflags & FSESS_INITED)) {
486 if (!vnode_isvroot(vp)) {
487 fdata_set_dead(fuse_get_mpdata(vnode_mount(vp)));
488 err = ENOTCONN;
489 debug_printf("fuse_getattr b: returning ENOTCONN\n");
490 return err;
491 } else {
492 goto fake;
493 }
494 }
495 fdisp_init(&fdi, 0);
496 if ((err = fdisp_simple_putget_vp(&fdi, FUSE_GETATTR, vp, td, cred))) {
497 if ((err == ENOTCONN) && vnode_isvroot(vp)) {
498 /* see comment at similar place in fuse_statfs() */
499 fdisp_destroy(&fdi);
500 goto fake;
501 }
502 if (err == ENOENT) {
503 fuse_internal_vnode_disappear(vp);
504 }
505 goto out;
506 }
507 cache_attrs(vp, (struct fuse_attr_out *)fdi.answ);
508 if (vap != VTOVA(vp)) {
509 memcpy(vap, VTOVA(vp), sizeof(*vap));
510 }
511 if (vap->va_type != vnode_vtype(vp)) {
512 fuse_internal_vnode_disappear(vp);
513 err = ENOENT;
514 goto out;
515 }
516 if ((fvdat->flag & FN_SIZECHANGE) != 0)
517 vap->va_size = fvdat->filesize;
518
519 if (vnode_isreg(vp) && (fvdat->flag & FN_SIZECHANGE) == 0) {
520 /*
521 * This is for those cases when the file size changed without us
522 * knowing, and we want to catch up.
523 */
524 off_t new_filesize = ((struct fuse_attr_out *)
525 fdi.answ)->attr.size;
526
527 if (fvdat->filesize != new_filesize) {
528 fuse_vnode_setsize(vp, cred, new_filesize);
529 }
530 }
531 debug_printf("fuse_getattr e: returning 0\n");
532
533out:
534 fdisp_destroy(&fdi);
535 return err;
536
537fake:
538 bzero(vap, sizeof(*vap));
539 vap->va_type = vnode_vtype(vp);
540
541 return 0;
542}
543
544/*
545 struct vnop_inactive_args {
546 struct vnode *a_vp;
547 struct thread *a_td;
548 };
549*/
550static int
551fuse_vnop_inactive(struct vop_inactive_args *ap)
552{
553 struct vnode *vp = ap->a_vp;
554 struct thread *td = ap->a_td;
555
556 struct fuse_vnode_data *fvdat = VTOFUD(vp);
557 struct fuse_filehandle *fufh = NULL;
558
559 int type, need_flush = 1;
560
561 FS_DEBUG("inode=%ju\n", (uintmax_t)VTOI(vp));
562
563 for (type = 0; type < FUFH_MAXTYPE; type++) {
564 fufh = &(fvdat->fufh[type]);
565 if (FUFH_IS_VALID(fufh)) {
566 if (need_flush && vp->v_type == VREG) {
567 if ((VTOFUD(vp)->flag & FN_SIZECHANGE) != 0) {
568 fuse_vnode_savesize(vp, NULL);
569 }
570 if (fuse_data_cache_invalidate ||
571 (fvdat->flag & FN_REVOKED) != 0)
572 fuse_io_invalbuf(vp, td);
573 else
574 fuse_io_flushbuf(vp, MNT_WAIT, td);
575 need_flush = 0;
576 }
577 fuse_filehandle_close(vp, type, td, NULL);
578 }
579 }
580
581 if ((fvdat->flag & FN_REVOKED) != 0 && fuse_reclaim_revoked) {
582 vrecycle(vp);
583 }
584 return 0;
585}
586
587/*
588 struct vnop_link_args {
589 struct vnode *a_tdvp;
590 struct vnode *a_vp;
591 struct componentname *a_cnp;
592 };
593*/
594static int
595fuse_vnop_link(struct vop_link_args *ap)
596{
597 struct vnode *vp = ap->a_vp;
598 struct vnode *tdvp = ap->a_tdvp;
599 struct componentname *cnp = ap->a_cnp;
600
601 struct vattr *vap = VTOVA(vp);
602
603 struct fuse_dispatcher fdi;
604 struct fuse_entry_out *feo;
605 struct fuse_link_in fli;
606
607 int err;
608
609 fuse_trace_printf_vnop();
610
611 if (fuse_isdeadfs(vp)) {
612 return ENXIO;
613 }
614 if (vnode_mount(tdvp) != vnode_mount(vp)) {
615 return EXDEV;
616 }
617 if (vap->va_nlink >= FUSE_LINK_MAX) {
618 return EMLINK;
619 }
620 fli.oldnodeid = VTOI(vp);
621
622 fdisp_init(&fdi, 0);
623 fuse_internal_newentry_makerequest(vnode_mount(tdvp), VTOI(tdvp), cnp,
624 FUSE_LINK, &fli, sizeof(fli), &fdi);
625 if ((err = fdisp_wait_answ(&fdi))) {
626 goto out;
627 }
628 feo = fdi.answ;
629
630 err = fuse_internal_checkentry(feo, vnode_vtype(vp));
631out:
632 fdisp_destroy(&fdi);
633 return err;
634}
635
636/*
637 struct vnop_lookup_args {
638 struct vnodeop_desc *a_desc;
639 struct vnode *a_dvp;
640 struct vnode **a_vpp;
641 struct componentname *a_cnp;
642 };
643*/
644int
645fuse_vnop_lookup(struct vop_lookup_args *ap)
646{
647 struct vnode *dvp = ap->a_dvp;
648 struct vnode **vpp = ap->a_vpp;
649 struct componentname *cnp = ap->a_cnp;
650 struct thread *td = cnp->cn_thread;
651 struct ucred *cred = cnp->cn_cred;
652
653 int nameiop = cnp->cn_nameiop;
654 int flags = cnp->cn_flags;
655 int wantparent = flags & (LOCKPARENT | WANTPARENT);
656 int islastcn = flags & ISLASTCN;
657 struct mount *mp = vnode_mount(dvp);
658
659 int err = 0;
660 int lookup_err = 0;
661 struct vnode *vp = NULL;
662
663 struct fuse_dispatcher fdi;
664 enum fuse_opcode op;
665
666 uint64_t nid;
667 struct fuse_access_param facp;
668
669 FS_DEBUG2G("parent_inode=%ju - %*s\n",
670 (uintmax_t)VTOI(dvp), (int)cnp->cn_namelen, cnp->cn_nameptr);
671
672 if (fuse_isdeadfs(dvp)) {
673 *vpp = NULL;
674 return ENXIO;
675 }
676 if (!vnode_isdir(dvp)) {
677 return ENOTDIR;
678 }
679 if (islastcn && vfs_isrdonly(mp) && (nameiop != LOOKUP)) {
680 return EROFS;
681 }
682 /*
683 * We do access check prior to doing anything else only in the case
684 * when we are at fs root (we'd like to say, "we are at the first
685 * component", but that's not exactly the same... nevermind).
686 * See further comments at further access checks.
687 */
688
689 bzero(&facp, sizeof(facp));
690 if (vnode_isvroot(dvp)) { /* early permission check hack */
691 if ((err = fuse_internal_access(dvp, VEXEC, &facp, td, cred))) {
692 return err;
693 }
694 }
695 if (flags & ISDOTDOT) {
696 nid = VTOFUD(dvp)->parent_nid;
697 if (nid == 0) {
698 return ENOENT;
699 }
700 fdisp_init(&fdi, 0);
701 op = FUSE_GETATTR;
702 goto calldaemon;
703 } else if (cnp->cn_namelen == 1 && *(cnp->cn_nameptr) == '.') {
704 nid = VTOI(dvp);
705 fdisp_init(&fdi, 0);
706 op = FUSE_GETATTR;
707 goto calldaemon;
708 } else if (fuse_lookup_cache_enable) {
709 err = cache_lookup(dvp, vpp, cnp, NULL, NULL);
710 switch (err) {
711
712 case -1: /* positive match */
713 atomic_add_acq_long(&fuse_lookup_cache_hits, 1);
714 return 0;
715
716 case 0: /* no match in cache */
717 atomic_add_acq_long(&fuse_lookup_cache_misses, 1);
718 break;
719
720 case ENOENT: /* negative match */
721 /* fall through */
722 default:
723 return err;
724 }
725 }
726 nid = VTOI(dvp);
727 fdisp_init(&fdi, cnp->cn_namelen + 1);
728 op = FUSE_LOOKUP;
729
730calldaemon:
731 fdisp_make(&fdi, op, mp, nid, td, cred);
732
733 if (op == FUSE_LOOKUP) {
734 memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen);
735 ((char *)fdi.indata)[cnp->cn_namelen] = '\0';
736 }
737 lookup_err = fdisp_wait_answ(&fdi);
738
739 if ((op == FUSE_LOOKUP) && !lookup_err) { /* lookup call succeeded */
740 nid = ((struct fuse_entry_out *)fdi.answ)->nodeid;
741 if (!nid) {
742 /*
743 * zero nodeid is the same as "not found",
744 * but it's also cacheable (which we keep
745 * keep on doing not as of writing this)
746 */
747 lookup_err = ENOENT;
748 } else if (nid == FUSE_ROOT_ID) {
749 lookup_err = EINVAL;
750 }
751 }
752 if (lookup_err &&
753 (!fdi.answ_stat || lookup_err != ENOENT || op != FUSE_LOOKUP)) {
754 fdisp_destroy(&fdi);
755 return lookup_err;
756 }
757 /* lookup_err, if non-zero, must be ENOENT at this point */
758
759 if (lookup_err) {
760
761 if ((nameiop == CREATE || nameiop == RENAME) && islastcn
762 /* && directory dvp has not been removed */ ) {
763
764 if (vfs_isrdonly(mp)) {
765 err = EROFS;
766 goto out;
767 }
768#if 0 /* THINK_ABOUT_THIS */
769 if ((err = fuse_internal_access(dvp, VWRITE, cred, td, &facp))) {
770 goto out;
771 }
772#endif
773
774 /*
775 * Possibly record the position of a slot in the
776 * directory large enough for the new component name.
777 * This can be recorded in the vnode private data for
778 * dvp. Set the SAVENAME flag to hold onto the
779 * pathname for use later in VOP_CREATE or VOP_RENAME.
780 */
781 cnp->cn_flags |= SAVENAME;
782
783 err = EJUSTRETURN;
784 goto out;
785 }
786 /* Consider inserting name into cache. */
787
788 /*
789 * No we can't use negative caching, as the fs
790 * changes are out of our control.
791 * False positives' falseness turns out just as things
792 * go by, but false negatives' falseness doesn't.
793 * (and aiding the caching mechanism with extra control
794 * mechanisms comes quite close to beating the whole purpose
795 * caching...)
796 */
797#if 0
798 if ((cnp->cn_flags & MAKEENTRY) && nameiop != CREATE) {
799 FS_DEBUG("inserting NULL into cache\n");
800 cache_enter(dvp, NULL, cnp);
801 }
802#endif
803 err = ENOENT;
804 goto out;
805
806 } else {
807
808 /* !lookup_err */
809
810 struct fuse_entry_out *feo = NULL;
811 struct fuse_attr *fattr = NULL;
812
813 if (op == FUSE_GETATTR) {
814 fattr = &((struct fuse_attr_out *)fdi.answ)->attr;
815 } else {
816 feo = (struct fuse_entry_out *)fdi.answ;
817 fattr = &(feo->attr);
818 }
819
820 /*
821 * If deleting, and at end of pathname, return parameters
822 * which can be used to remove file. If the wantparent flag
823 * isn't set, we return only the directory, otherwise we go on
824 * and lock the inode, being careful with ".".
825 */
826 if (nameiop == DELETE && islastcn) {
827 /*
828 * Check for write access on directory.
829 */
830 facp.xuid = fattr->uid;
831 facp.facc_flags |= FACCESS_STICKY;
832 err = fuse_internal_access(dvp, VWRITE, &facp, td, cred);
833 facp.facc_flags &= ~FACCESS_XQUERIES;
834
835 if (err) {
836 goto out;
837 }
838 if (nid == VTOI(dvp)) {
839 vref(dvp);
840 *vpp = dvp;
841 } else {
842 err = fuse_vnode_get(dvp->v_mount, nid, dvp,
843 &vp, cnp, IFTOVT(fattr->mode));
844 if (err)
845 goto out;
846 *vpp = vp;
847 }
848
849 /*
850 * Save the name for use in VOP_RMDIR and VOP_REMOVE
851 * later.
852 */
853 cnp->cn_flags |= SAVENAME;
854 goto out;
855
856 }
857 /*
858 * If rewriting (RENAME), return the inode and the
859 * information required to rewrite the present directory
860 * Must get inode of directory entry to verify it's a
861 * regular file, or empty directory.
862 */
863 if (nameiop == RENAME && wantparent && islastcn) {
864
865#if 0 /* THINK_ABOUT_THIS */
866 if ((err = fuse_internal_access(dvp, VWRITE, cred, td, &facp))) {
867 goto out;
868 }
869#endif
870
871 /*
872 * Check for "."
873 */
874 if (nid == VTOI(dvp)) {
875 err = EISDIR;
876 goto out;
877 }
878 err = fuse_vnode_get(vnode_mount(dvp),
879 nid,
880 dvp,
881 &vp,
882 cnp,
883 IFTOVT(fattr->mode));
884 if (err) {
885 goto out;
886 }
887 *vpp = vp;
888 /*
889 * Save the name for use in VOP_RENAME later.
890 */
891 cnp->cn_flags |= SAVENAME;
892
893 goto out;
894 }
895 if (flags & ISDOTDOT) {
896 struct mount *mp;
897 int ltype;
898
899 /*
900 * Expanded copy of vn_vget_ino() so that
901 * fuse_vnode_get() can be used.
902 */
903 mp = dvp->v_mount;
904 ltype = VOP_ISLOCKED(dvp);
905 err = vfs_busy(mp, MBF_NOWAIT);
906 if (err != 0) {
907 vfs_ref(mp);
908 VOP_UNLOCK(dvp, 0);
909 err = vfs_busy(mp, 0);
910 vn_lock(dvp, ltype | LK_RETRY);
911 vfs_rel(mp);
912 if (err)
913 goto out;
914 if ((dvp->v_iflag & VI_DOOMED) != 0) {
915 err = ENOENT;
916 vfs_unbusy(mp);
917 goto out;
918 }
919 }
920 VOP_UNLOCK(dvp, 0);
921 err = fuse_vnode_get(vnode_mount(dvp),
922 nid,
923 NULL,
924 &vp,
925 cnp,
926 IFTOVT(fattr->mode));
927 vfs_unbusy(mp);
928 vn_lock(dvp, ltype | LK_RETRY);
929 if ((dvp->v_iflag & VI_DOOMED) != 0) {
930 if (err == 0)
931 vput(vp);
932 err = ENOENT;
933 }
934 if (err)
935 goto out;
936 *vpp = vp;
937 } else if (nid == VTOI(dvp)) {
938 vref(dvp);
939 *vpp = dvp;
940 } else {
941 err = fuse_vnode_get(vnode_mount(dvp),
942 nid,
943 dvp,
944 &vp,
945 cnp,
946 IFTOVT(fattr->mode));
947 if (err) {
948 goto out;
949 }
950 fuse_vnode_setparent(vp, dvp);
951 *vpp = vp;
952 }
953
954 if (op == FUSE_GETATTR) {
955 cache_attrs(*vpp, (struct fuse_attr_out *)fdi.answ);
956 } else {
957 cache_attrs(*vpp, (struct fuse_entry_out *)fdi.answ);
958 }
959
960 /* Insert name into cache if appropriate. */
961
962 /*
963 * Nooo, caching is evil. With caching, we can't avoid stale
964 * information taking over the playground (cached info is not
965 * just positive/negative, it does have qualitative aspects,
966 * too). And a (VOP/FUSE)_GETATTR is always thrown anyway, when
967 * walking down along cached path components, and that's not
968 * any cheaper than FUSE_LOOKUP. This might change with
969 * implementing kernel side attr caching, but... In Linux,
970 * lookup results are not cached, and the daemon is bombarded
971 * with FUSE_LOOKUPS on and on. This shows that by design, the
972 * daemon is expected to handle frequent lookup queries
973 * efficiently, do its caching in userspace, and so on.
974 *
975 * So just leave the name cache alone.
976 */
977
978 /*
979 * Well, now I know, Linux caches lookups, but with a
980 * timeout... So it's the same thing as attribute caching:
981 * we can deal with it when implement timeouts.
982 */
983#if 0
984 if (cnp->cn_flags & MAKEENTRY) {
985 cache_enter(dvp, *vpp, cnp);
986 }
987#endif
988 }
989out:
990 if (!lookup_err) {
991
992 /* No lookup error; need to clean up. */
993
994 if (err) { /* Found inode; exit with no vnode. */
995 if (op == FUSE_LOOKUP) {
996 fuse_internal_forget_send(vnode_mount(dvp), td, cred,
997 nid, 1);
998 }
999 fdisp_destroy(&fdi);
1000 return err;
1001 } else {
1002#ifndef NO_EARLY_PERM_CHECK_HACK
1003 if (!islastcn) {
1004 /*
1005 * We have the attributes of the next item
1006 * *now*, and it's a fact, and we do not
1007 * have to do extra work for it (ie, beg the
1008 * daemon), and it neither depends on such
1009 * accidental things like attr caching. So
1010 * the big idea: check credentials *now*,
1011 * not at the beginning of the next call to
1012 * lookup.
1013 *
1014 * The first item of the lookup chain (fs root)
1015 * won't be checked then here, of course, as
1016 * its never "the next". But go and see that
1017 * the root is taken care about at the very
1018 * beginning of this function.
1019 *
1020 * Now, given we want to do the access check
1021 * this way, one might ask: so then why not
1022 * do the access check just after fetching
1023 * the inode and its attributes from the
1024 * daemon? Why bother with producing the
1025 * corresponding vnode at all if something
1026 * is not OK? We know what's the deal as
1027 * soon as we get those attrs... There is
1028 * one bit of info though not given us by
1029 * the daemon: whether his response is
1030 * authorative or not... His response should
1031 * be ignored if something is mounted over
1032 * the dir in question. But that can be
1033 * known only by having the vnode...
1034 */
1035 int tmpvtype = vnode_vtype(*vpp);
1036
1037 bzero(&facp, sizeof(facp));
1038 /*the early perm check hack */
1039 facp.facc_flags |= FACCESS_VA_VALID;
1040
1041 if ((tmpvtype != VDIR) && (tmpvtype != VLNK)) {
1042 err = ENOTDIR;
1043 }
1044 if (!err && !vnode_mountedhere(*vpp)) {
1045 err = fuse_internal_access(*vpp, VEXEC, &facp, td, cred);
1046 }
1047 if (err) {
1048 if (tmpvtype == VLNK)
1049 FS_DEBUG("weird, permission error with a symlink?\n");
1050 vput(*vpp);
1051 *vpp = NULL;
1052 }
1053 }
1054#endif
1055 }
1056 }
1057 fdisp_destroy(&fdi);
1058
1059 return err;
1060}
1061
1062/*
1063 struct vnop_mkdir_args {
1064 struct vnode *a_dvp;
1065 struct vnode **a_vpp;
1066 struct componentname *a_cnp;
1067 struct vattr *a_vap;
1068 };
1069*/
1070static int
1071fuse_vnop_mkdir(struct vop_mkdir_args *ap)
1072{
1073 struct vnode *dvp = ap->a_dvp;
1074 struct vnode **vpp = ap->a_vpp;
1075 struct componentname *cnp = ap->a_cnp;
1076 struct vattr *vap = ap->a_vap;
1077
1078 struct fuse_mkdir_in fmdi;
1079
1080 fuse_trace_printf_vnop();
1081
1082 if (fuse_isdeadfs(dvp)) {
1083 return ENXIO;
1084 }
1085 fmdi.mode = MAKEIMODE(vap->va_type, vap->va_mode);
1086
1087 return (fuse_internal_newentry(dvp, vpp, cnp, FUSE_MKDIR, &fmdi,
1088 sizeof(fmdi), VDIR));
1089}
1090
1091/*
1092 struct vnop_mknod_args {
1093 struct vnode *a_dvp;
1094 struct vnode **a_vpp;
1095 struct componentname *a_cnp;
1096 struct vattr *a_vap;
1097 };
1098*/
1099static int
1100fuse_vnop_mknod(struct vop_mknod_args *ap)
1101{
1102
1103 return (EINVAL);
1104}
1105
1106
1107/*
1108 struct vnop_open_args {
1109 struct vnode *a_vp;
1110 int a_mode;
1111 struct ucred *a_cred;
1112 struct thread *a_td;
1113 int a_fdidx; / struct file *a_fp;
1114 };
1115*/
1116static int
1117fuse_vnop_open(struct vop_open_args *ap)
1118{
1119 struct vnode *vp = ap->a_vp;
1120 int mode = ap->a_mode;
1121 struct thread *td = ap->a_td;
1122 struct ucred *cred = ap->a_cred;
1123
1124 fufh_type_t fufh_type;
1125 struct fuse_vnode_data *fvdat;
1126
1127 int error, isdir = 0;
1128
1129 FS_DEBUG2G("inode=%ju mode=0x%x\n", (uintmax_t)VTOI(vp), mode);
1130
1131 if (fuse_isdeadfs(vp)) {
1132 return ENXIO;
1133 }
1134 fvdat = VTOFUD(vp);
1135
1136 if (vnode_isdir(vp)) {
1137 isdir = 1;
1138 }
1139 if (isdir) {
1140 fufh_type = FUFH_RDONLY;
1141 } else {
1142 fufh_type = fuse_filehandle_xlate_from_fflags(mode);
1143 }
1144
1145 if (fuse_filehandle_valid(vp, fufh_type)) {
1146 fuse_vnode_open(vp, 0, td);
1147 return 0;
1148 }
1149 error = fuse_filehandle_open(vp, fufh_type, NULL, td, cred);
1150
1151 return error;
1152}
1153
1154/*
1155 struct vnop_read_args {
1156 struct vnode *a_vp;
1157 struct uio *a_uio;
1158 int a_ioflag;
1159 struct ucred *a_cred;
1160 };
1161*/
1162static int
1163fuse_vnop_read(struct vop_read_args *ap)
1164{
1165 struct vnode *vp = ap->a_vp;
1166 struct uio *uio = ap->a_uio;
1167 int ioflag = ap->a_ioflag;
1168 struct ucred *cred = ap->a_cred;
1169
1170 FS_DEBUG2G("inode=%ju offset=%jd resid=%zd\n",
1171 (uintmax_t)VTOI(vp), uio->uio_offset, uio->uio_resid);
1172
1173 if (fuse_isdeadfs(vp)) {
1174 return ENXIO;
1175 }
1176 return fuse_io_dispatch(vp, uio, ioflag, cred);
1177}
1178
1179/*
1180 struct vnop_readdir_args {
1181 struct vnode *a_vp;
1182 struct uio *a_uio;
1183 struct ucred *a_cred;
1184 int *a_eofflag;
1185 int *ncookies;
1186 u_long **a_cookies;
1187 };
1188*/
1189static int
1190fuse_vnop_readdir(struct vop_readdir_args *ap)
1191{
1192 struct vnode *vp = ap->a_vp;
1193 struct uio *uio = ap->a_uio;
1194 struct ucred *cred = ap->a_cred;
1195
1196 struct fuse_filehandle *fufh = NULL;
1197 struct fuse_vnode_data *fvdat;
1198 struct fuse_iov cookediov;
1199
1200 int err = 0;
1201 int freefufh = 0;
1202
1203 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
1204
1205 if (fuse_isdeadfs(vp)) {
1206 return ENXIO;
1207 }
1208 if ( /* XXXIP ((uio_iovcnt(uio) > 1)) || */
1209 (uio_resid(uio) < sizeof(struct dirent))) {
1210 return EINVAL;
1211 }
1212 fvdat = VTOFUD(vp);
1213
1214 if (!fuse_filehandle_valid(vp, FUFH_RDONLY)) {
1215 FS_DEBUG("calling readdir() before open()");
1216 err = fuse_filehandle_open(vp, FUFH_RDONLY, &fufh, NULL, cred);
1217 freefufh = 1;
1218 } else {
1219 err = fuse_filehandle_get(vp, FUFH_RDONLY, &fufh);
1220 }
1221 if (err) {
1222 return (err);
1223 }
1224#define DIRCOOKEDSIZE FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + MAXNAMLEN + 1)
1225 fiov_init(&cookediov, DIRCOOKEDSIZE);
1226
1227 err = fuse_internal_readdir(vp, uio, fufh, &cookediov);
1228
1229 fiov_teardown(&cookediov);
1230 if (freefufh) {
1231 fuse_filehandle_close(vp, FUFH_RDONLY, NULL, cred);
1232 }
1233 return err;
1234}
1235
1236/*
1237 struct vnop_readlink_args {
1238 struct vnode *a_vp;
1239 struct uio *a_uio;
1240 struct ucred *a_cred;
1241 };
1242*/
1243static int
1244fuse_vnop_readlink(struct vop_readlink_args *ap)
1245{
1246 struct vnode *vp = ap->a_vp;
1247 struct uio *uio = ap->a_uio;
1248 struct ucred *cred = ap->a_cred;
1249
1250 struct fuse_dispatcher fdi;
1251 int err;
1252
1253 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
1254
1255 if (fuse_isdeadfs(vp)) {
1256 return ENXIO;
1257 }
1258 if (!vnode_islnk(vp)) {
1259 return EINVAL;
1260 }
1261 fdisp_init(&fdi, 0);
1262 err = fdisp_simple_putget_vp(&fdi, FUSE_READLINK, vp, curthread, cred);
1263 if (err) {
1264 goto out;
1265 }
1266 if (((char *)fdi.answ)[0] == '/' &&
1267 fuse_get_mpdata(vnode_mount(vp))->dataflags & FSESS_PUSH_SYMLINKS_IN) {
1268 char *mpth = vnode_mount(vp)->mnt_stat.f_mntonname;
1269
1270 err = uiomove(mpth, strlen(mpth), uio);
1271 }
1272 if (!err) {
1273 err = uiomove(fdi.answ, fdi.iosize, uio);
1274 }
1275out:
1276 fdisp_destroy(&fdi);
1277 return err;
1278}
1279
1280/*
1281 struct vnop_reclaim_args {
1282 struct vnode *a_vp;
1283 struct thread *a_td;
1284 };
1285*/
1286static int
1287fuse_vnop_reclaim(struct vop_reclaim_args *ap)
1288{
1289 struct vnode *vp = ap->a_vp;
1290 struct thread *td = ap->a_td;
1291
1292 struct fuse_vnode_data *fvdat = VTOFUD(vp);
1293 struct fuse_filehandle *fufh = NULL;
1294
1295 int type;
1296
1297 if (!fvdat) {
1298 panic("FUSE: no vnode data during recycling");
1299 }
1300 FS_DEBUG("inode=%ju\n", (uintmax_t)VTOI(vp));
1301
1302 for (type = 0; type < FUFH_MAXTYPE; type++) {
1303 fufh = &(fvdat->fufh[type]);
1304 if (FUFH_IS_VALID(fufh)) {
1305 printf("FUSE: vnode being reclaimed but fufh (type=%d) is valid",
1306 type);
1307 fuse_filehandle_close(vp, type, td, NULL);
1308 }
1309 }
1310
1311 if ((!fuse_isdeadfs(vp)) && (fvdat->nlookup)) {
1312 fuse_internal_forget_send(vnode_mount(vp), td, NULL, VTOI(vp),
1313 fvdat->nlookup);
1314 }
1315 fuse_vnode_setparent(vp, NULL);
1316 cache_purge(vp);
1317 vfs_hash_remove(vp);
1318 vnode_destroy_vobject(vp);
1319 fuse_vnode_destroy(vp);
1320
1321 return 0;
1322}
1323
1324/*
1325 struct vnop_remove_args {
1326 struct vnode *a_dvp;
1327 struct vnode *a_vp;
1328 struct componentname *a_cnp;
1329 };
1330*/
1331static int
1332fuse_vnop_remove(struct vop_remove_args *ap)
1333{
1334 struct vnode *dvp = ap->a_dvp;
1335 struct vnode *vp = ap->a_vp;
1336 struct componentname *cnp = ap->a_cnp;
1337
1338 int err;
1339
1340 FS_DEBUG2G("inode=%ju name=%*s\n",
1341 (uintmax_t)VTOI(vp), (int)cnp->cn_namelen, cnp->cn_nameptr);
1342
1343 if (fuse_isdeadfs(vp)) {
1344 return ENXIO;
1345 }
1346 if (vnode_isdir(vp)) {
1347 return EPERM;
1348 }
1349 cache_purge(vp);
1350
1351 err = fuse_internal_remove(dvp, vp, cnp, FUSE_UNLINK);
1352
1353 if (err == 0)
1354 fuse_internal_vnode_disappear(vp);
1355 return err;
1356}
1357
1358/*
1359 struct vnop_rename_args {
1360 struct vnode *a_fdvp;
1361 struct vnode *a_fvp;
1362 struct componentname *a_fcnp;
1363 struct vnode *a_tdvp;
1364 struct vnode *a_tvp;
1365 struct componentname *a_tcnp;
1366 };
1367*/
1368static int
1369fuse_vnop_rename(struct vop_rename_args *ap)
1370{
1371 struct vnode *fdvp = ap->a_fdvp;
1372 struct vnode *fvp = ap->a_fvp;
1373 struct componentname *fcnp = ap->a_fcnp;
1374 struct vnode *tdvp = ap->a_tdvp;
1375 struct vnode *tvp = ap->a_tvp;
1376 struct componentname *tcnp = ap->a_tcnp;
1377 struct fuse_data *data;
1378
1379 int err = 0;
1380
1381 FS_DEBUG2G("from: inode=%ju name=%*s -> to: inode=%ju name=%*s\n",
1382 (uintmax_t)VTOI(fvp), (int)fcnp->cn_namelen, fcnp->cn_nameptr,
1383 (uintmax_t)(tvp == NULL ? -1 : VTOI(tvp)),
1384 (int)tcnp->cn_namelen, tcnp->cn_nameptr);
1385
1386 if (fuse_isdeadfs(fdvp)) {
1387 return ENXIO;
1388 }
1389 if (fvp->v_mount != tdvp->v_mount ||
1390 (tvp && fvp->v_mount != tvp->v_mount)) {
1391 FS_DEBUG("cross-device rename: %s -> %s\n",
1392 fcnp->cn_nameptr, (tcnp != NULL ? tcnp->cn_nameptr : "(NULL)"));
1393 err = EXDEV;
1394 goto out;
1395 }
1396 cache_purge(fvp);
1397
1398 /*
1399 * FUSE library is expected to check if target directory is not
1400 * under the source directory in the file system tree.
1401 * Linux performs this check at VFS level.
1402 */
1403 data = fuse_get_mpdata(vnode_mount(tdvp));
1404 sx_xlock(&data->rename_lock);
1405 err = fuse_internal_rename(fdvp, fcnp, tdvp, tcnp);
1406 if (err == 0) {
1407 if (tdvp != fdvp)
1408 fuse_vnode_setparent(fvp, tdvp);
1409 if (tvp != NULL)
1410 fuse_vnode_setparent(tvp, NULL);
1411 }
1412 sx_unlock(&data->rename_lock);
1413
1414 if (tvp != NULL && tvp != fvp) {
1415 cache_purge(tvp);
1416 }
1417 if (vnode_isdir(fvp)) {
1418 if ((tvp != NULL) && vnode_isdir(tvp)) {
1419 cache_purge(tdvp);
1420 }
1421 cache_purge(fdvp);
1422 }
1423out:
1424 if (tdvp == tvp) {
1425 vrele(tdvp);
1426 } else {
1427 vput(tdvp);
1428 }
1429 if (tvp != NULL) {
1430 vput(tvp);
1431 }
1432 vrele(fdvp);
1433 vrele(fvp);
1434
1435 return err;
1436}
1437
1438/*
1439 struct vnop_rmdir_args {
1440 struct vnode *a_dvp;
1441 struct vnode *a_vp;
1442 struct componentname *a_cnp;
1443 } *ap;
1444*/
1445static int
1446fuse_vnop_rmdir(struct vop_rmdir_args *ap)
1447{
1448 struct vnode *dvp = ap->a_dvp;
1449 struct vnode *vp = ap->a_vp;
1450
1451 int err;
1452
1453 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
1454
1455 if (fuse_isdeadfs(vp)) {
1456 return ENXIO;
1457 }
1458 if (VTOFUD(vp) == VTOFUD(dvp)) {
1459 return EINVAL;
1460 }
1461 err = fuse_internal_remove(dvp, vp, ap->a_cnp, FUSE_RMDIR);
1462
1463 if (err == 0)
1464 fuse_internal_vnode_disappear(vp);
1465 return err;
1466}
1467
1468/*
1469 struct vnop_setattr_args {
1470 struct vnode *a_vp;
1471 struct vattr *a_vap;
1472 struct ucred *a_cred;
1473 struct thread *a_td;
1474 };
1475*/
1476static int
1477fuse_vnop_setattr(struct vop_setattr_args *ap)
1478{
1479 struct vnode *vp = ap->a_vp;
1480 struct vattr *vap = ap->a_vap;
1481 struct ucred *cred = ap->a_cred;
1482 struct thread *td = curthread;
1483
1484 struct fuse_dispatcher fdi;
1485 struct fuse_setattr_in *fsai;
1486 struct fuse_access_param facp;
1487
1488 int err = 0;
1489 enum vtype vtyp;
1490 int sizechanged = 0;
1491 uint64_t newsize = 0;
1492
1493 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
1494
1495 if (fuse_isdeadfs(vp)) {
1496 return ENXIO;
1497 }
1498 fdisp_init(&fdi, sizeof(*fsai));
1499 fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred);
1500 fsai = fdi.indata;
1501 fsai->valid = 0;
1502
1503 bzero(&facp, sizeof(facp));
1504
1505 facp.xuid = vap->va_uid;
1506 facp.xgid = vap->va_gid;
1507
1508 if (vap->va_uid != (uid_t)VNOVAL) {
1509 facp.facc_flags |= FACCESS_CHOWN;
1510 fsai->uid = vap->va_uid;
1511 fsai->valid |= FATTR_UID;
1512 }
1513 if (vap->va_gid != (gid_t)VNOVAL) {
1514 facp.facc_flags |= FACCESS_CHOWN;
1515 fsai->gid = vap->va_gid;
1516 fsai->valid |= FATTR_GID;
1517 }
1518 if (vap->va_size != VNOVAL) {
1519
1520 struct fuse_filehandle *fufh = NULL;
1521
1522 /*Truncate to a new value. */
1523 fsai->size = vap->va_size;
1524 sizechanged = 1;
1525 newsize = vap->va_size;
1526 fsai->valid |= FATTR_SIZE;
1527
1528 fuse_filehandle_getrw(vp, FUFH_WRONLY, &fufh);
1529 if (fufh) {
1530 fsai->fh = fufh->fh_id;
1531 fsai->valid |= FATTR_FH;
1532 }
1533 }
1534 if (vap->va_atime.tv_sec != VNOVAL) {
1535 fsai->atime = vap->va_atime.tv_sec;
1536 fsai->atimensec = vap->va_atime.tv_nsec;
1537 fsai->valid |= FATTR_ATIME;
1538 }
1539 if (vap->va_mtime.tv_sec != VNOVAL) {
1540 fsai->mtime = vap->va_mtime.tv_sec;
1541 fsai->mtimensec = vap->va_mtime.tv_nsec;
1542 fsai->valid |= FATTR_MTIME;
1543 }
1544 if (vap->va_mode != (mode_t)VNOVAL) {
1545 fsai->mode = vap->va_mode & ALLPERMS;
1546 fsai->valid |= FATTR_MODE;
1547 }
1548 if (!fsai->valid) {
1549 goto out;
1550 }
1551 vtyp = vnode_vtype(vp);
1552
1553 if (fsai->valid & FATTR_SIZE && vtyp == VDIR) {
1554 err = EISDIR;
1555 goto out;
1556 }
1557 if (vfs_isrdonly(vnode_mount(vp)) && (fsai->valid & ~FATTR_SIZE || vtyp == VREG)) {
1558 err = EROFS;
1559 goto out;
1560 }
1561 if (fsai->valid & ~FATTR_SIZE) {
1562 /*err = fuse_internal_access(vp, VADMIN, context, &facp); */
1563 /*XXX */
1564 err = 0;
1565 }
1566 facp.facc_flags &= ~FACCESS_XQUERIES;
1567
1568 if (err && !(fsai->valid & ~(FATTR_ATIME | FATTR_MTIME)) &&
1569 vap->va_vaflags & VA_UTIMES_NULL) {
1570 err = fuse_internal_access(vp, VWRITE, &facp, td, cred);
1571 }
1572 if (err)
1573 goto out;
1574 if ((err = fdisp_wait_answ(&fdi)))
1575 goto out;
1576 vtyp = IFTOVT(((struct fuse_attr_out *)fdi.answ)->attr.mode);
1577
1578 if (vnode_vtype(vp) != vtyp) {
1579 if (vnode_vtype(vp) == VNON && vtyp != VNON) {
1580 debug_printf("FUSE: Dang! vnode_vtype is VNON and vtype isn't.\n");
1581 } else {
1582 /*
1583 * STALE vnode, ditch
1584 *
1585 * The vnode has changed its type "behind our back". There's
1586 * nothing really we can do, so let us just force an internal
1587 * revocation and tell the caller to try again, if interested.
1588 */
1589 fuse_internal_vnode_disappear(vp);
1590 err = EAGAIN;
1591 }
1592 }
1593 if (!err && !sizechanged) {
1594 cache_attrs(vp, (struct fuse_attr_out *)fdi.answ);
1595 }
1596out:
1597 fdisp_destroy(&fdi);
1598 if (!err && sizechanged) {
1599 fuse_vnode_setsize(vp, cred, newsize);
1600 VTOFUD(vp)->flag &= ~FN_SIZECHANGE;
1601 }
1602 return err;
1603}
1604
1605/*
1606 struct vnop_strategy_args {
1607 struct vnode *a_vp;
1608 struct buf *a_bp;
1609 };
1610*/
1611static int
1612fuse_vnop_strategy(struct vop_strategy_args *ap)
1613{
1614 struct vnode *vp = ap->a_vp;
1615 struct buf *bp = ap->a_bp;
1616
1617 fuse_trace_printf_vnop();
1618
1619 if (!vp || fuse_isdeadfs(vp)) {
1620 bp->b_ioflags |= BIO_ERROR;
1621 bp->b_error = ENXIO;
1622 bufdone(bp);
1623 return ENXIO;
1624 }
1625 if (bp->b_iocmd == BIO_WRITE)
1626 fuse_vnode_refreshsize(vp, NOCRED);
1627
1628 (void)fuse_io_strategy(vp, bp);
1629
1630 /*
1631 * This is a dangerous function. If returns error, that might mean a
1632 * panic. We prefer pretty much anything over being forced to panic
1633 * by a malicious daemon (a demon?). So we just return 0 anyway. You
1634 * should never mind this: this function has its own error
1635 * propagation mechanism via the argument buffer, so
1636 * not-that-melodramatic residents of the call chain still will be
1637 * able to know what to do.
1638 */
1639 return 0;
1640}
1641
1642
1643/*
1644 struct vnop_symlink_args {
1645 struct vnode *a_dvp;
1646 struct vnode **a_vpp;
1647 struct componentname *a_cnp;
1648 struct vattr *a_vap;
1649 char *a_target;
1650 };
1651*/
1652static int
1653fuse_vnop_symlink(struct vop_symlink_args *ap)
1654{
1655 struct vnode *dvp = ap->a_dvp;
1656 struct vnode **vpp = ap->a_vpp;
1657 struct componentname *cnp = ap->a_cnp;
1658 char *target = ap->a_target;
1659
1660 struct fuse_dispatcher fdi;
1661
1662 int err;
1663 size_t len;
1664
1665 FS_DEBUG2G("inode=%ju name=%*s\n",
1666 (uintmax_t)VTOI(dvp), (int)cnp->cn_namelen, cnp->cn_nameptr);
1667
1668 if (fuse_isdeadfs(dvp)) {
1669 return ENXIO;
1670 }
1671 /*
1672 * Unlike the other creator type calls, here we have to create a message
1673 * where the name of the new entry comes first, and the data describing
1674 * the entry comes second.
1675 * Hence we can't rely on our handy fuse_internal_newentry() routine,
1676 * but put together the message manually and just call the core part.
1677 */
1678
1679 len = strlen(target) + 1;
1680 fdisp_init(&fdi, len + cnp->cn_namelen + 1);
1681 fdisp_make_vp(&fdi, FUSE_SYMLINK, dvp, curthread, NULL);
1682
1683 memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen);
1684 ((char *)fdi.indata)[cnp->cn_namelen] = '\0';
1685 memcpy((char *)fdi.indata + cnp->cn_namelen + 1, target, len);
1686
1687 err = fuse_internal_newentry_core(dvp, vpp, cnp, VLNK, &fdi);
1688 fdisp_destroy(&fdi);
1689 return err;
1690}
1691
1692/*
1693 struct vnop_write_args {
1694 struct vnode *a_vp;
1695 struct uio *a_uio;
1696 int a_ioflag;
1697 struct ucred *a_cred;
1698 };
1699*/
1700static int
1701fuse_vnop_write(struct vop_write_args *ap)
1702{
1703 struct vnode *vp = ap->a_vp;
1704 struct uio *uio = ap->a_uio;
1705 int ioflag = ap->a_ioflag;
1706 struct ucred *cred = ap->a_cred;
1707
1708 fuse_trace_printf_vnop();
1709
1710 if (fuse_isdeadfs(vp)) {
1711 return ENXIO;
1712 }
1713 fuse_vnode_refreshsize(vp, cred);
1714
1715 return fuse_io_dispatch(vp, uio, ioflag, cred);
1716}
1717
1718/*
1719 struct vnop_getpages_args {
1720 struct vnode *a_vp;
1721 vm_page_t *a_m;
1722 int a_count;
1723 int a_reqpage;
1724 vm_ooffset_t a_offset;
1725 };
1726*/
1727static int
1728fuse_vnop_getpages(struct vop_getpages_args *ap)
1729{
1730 int i, error, nextoff, size, toff, count, npages;
1731 struct uio uio;
1732 struct iovec iov;
1733 vm_offset_t kva;
1734 struct buf *bp;
1735 struct vnode *vp;
1736 struct thread *td;
1737 struct ucred *cred;
1738 vm_page_t *pages;
1739
1740 FS_DEBUG2G("heh\n");
1741
1742 vp = ap->a_vp;
1743 KASSERT(vp->v_object, ("objectless vp passed to getpages"));
1744 td = curthread; /* XXX */
1745 cred = curthread->td_ucred; /* XXX */
1746 pages = ap->a_m;
1747 count = ap->a_count;
1748
1749 if (!fsess_opt_mmap(vnode_mount(vp))) {
1750 FS_DEBUG("called on non-cacheable vnode??\n");
1751 return (VM_PAGER_ERROR);
1752 }
1753 npages = btoc(count);
1754
1755 /*
1756 * If the requested page is partially valid, just return it and
1757 * allow the pager to zero-out the blanks. Partially valid pages
1758 * can only occur at the file EOF.
1759 */
1760
71#include <sys/sx.h>
72#include <sys/proc.h>
73#include <sys/mount.h>
74#include <sys/vnode.h>
75#include <sys/namei.h>
76#include <sys/stat.h>
77#include <sys/unistd.h>
78#include <sys/filedesc.h>
79#include <sys/file.h>
80#include <sys/fcntl.h>
81#include <sys/dirent.h>
82#include <sys/bio.h>
83#include <sys/buf.h>
84#include <sys/sysctl.h>
85
86#include <vm/vm.h>
87#include <vm/vm_extern.h>
88#include <vm/pmap.h>
89#include <vm/vm_map.h>
90#include <vm/vm_page.h>
91#include <vm/vm_param.h>
92#include <vm/vm_object.h>
93#include <vm/vm_pager.h>
94#include <vm/vnode_pager.h>
95#include <vm/vm_object.h>
96
97#include "fuse.h"
98#include "fuse_file.h"
99#include "fuse_internal.h"
100#include "fuse_ipc.h"
101#include "fuse_node.h"
102#include "fuse_param.h"
103#include "fuse_io.h"
104
105#include <sys/priv.h>
106
107#define FUSE_DEBUG_MODULE VNOPS
108#include "fuse_debug.h"
109
110/* vnode ops */
111static vop_access_t fuse_vnop_access;
112static vop_close_t fuse_vnop_close;
113static vop_create_t fuse_vnop_create;
114static vop_fsync_t fuse_vnop_fsync;
115static vop_getattr_t fuse_vnop_getattr;
116static vop_inactive_t fuse_vnop_inactive;
117static vop_link_t fuse_vnop_link;
118static vop_lookup_t fuse_vnop_lookup;
119static vop_mkdir_t fuse_vnop_mkdir;
120static vop_mknod_t fuse_vnop_mknod;
121static vop_open_t fuse_vnop_open;
122static vop_read_t fuse_vnop_read;
123static vop_readdir_t fuse_vnop_readdir;
124static vop_readlink_t fuse_vnop_readlink;
125static vop_reclaim_t fuse_vnop_reclaim;
126static vop_remove_t fuse_vnop_remove;
127static vop_rename_t fuse_vnop_rename;
128static vop_rmdir_t fuse_vnop_rmdir;
129static vop_setattr_t fuse_vnop_setattr;
130static vop_strategy_t fuse_vnop_strategy;
131static vop_symlink_t fuse_vnop_symlink;
132static vop_write_t fuse_vnop_write;
133static vop_getpages_t fuse_vnop_getpages;
134static vop_putpages_t fuse_vnop_putpages;
135static vop_print_t fuse_vnop_print;
136
137struct vop_vector fuse_vnops = {
138 .vop_default = &default_vnodeops,
139 .vop_access = fuse_vnop_access,
140 .vop_close = fuse_vnop_close,
141 .vop_create = fuse_vnop_create,
142 .vop_fsync = fuse_vnop_fsync,
143 .vop_getattr = fuse_vnop_getattr,
144 .vop_inactive = fuse_vnop_inactive,
145 .vop_link = fuse_vnop_link,
146 .vop_lookup = fuse_vnop_lookup,
147 .vop_mkdir = fuse_vnop_mkdir,
148 .vop_mknod = fuse_vnop_mknod,
149 .vop_open = fuse_vnop_open,
150 .vop_pathconf = vop_stdpathconf,
151 .vop_read = fuse_vnop_read,
152 .vop_readdir = fuse_vnop_readdir,
153 .vop_readlink = fuse_vnop_readlink,
154 .vop_reclaim = fuse_vnop_reclaim,
155 .vop_remove = fuse_vnop_remove,
156 .vop_rename = fuse_vnop_rename,
157 .vop_rmdir = fuse_vnop_rmdir,
158 .vop_setattr = fuse_vnop_setattr,
159 .vop_strategy = fuse_vnop_strategy,
160 .vop_symlink = fuse_vnop_symlink,
161 .vop_write = fuse_vnop_write,
162 .vop_getpages = fuse_vnop_getpages,
163 .vop_putpages = fuse_vnop_putpages,
164 .vop_print = fuse_vnop_print,
165};
166
167static u_long fuse_lookup_cache_hits = 0;
168
169SYSCTL_ULONG(_vfs_fuse, OID_AUTO, lookup_cache_hits, CTLFLAG_RD,
170 &fuse_lookup_cache_hits, 0, "");
171
172static u_long fuse_lookup_cache_misses = 0;
173
174SYSCTL_ULONG(_vfs_fuse, OID_AUTO, lookup_cache_misses, CTLFLAG_RD,
175 &fuse_lookup_cache_misses, 0, "");
176
177int fuse_lookup_cache_enable = 1;
178
179SYSCTL_INT(_vfs_fuse, OID_AUTO, lookup_cache_enable, CTLFLAG_RW,
180 &fuse_lookup_cache_enable, 0, "");
181
182/*
183 * XXX: This feature is highly experimental and can bring to instabilities,
184 * needs revisiting before to be enabled by default.
185 */
186static int fuse_reclaim_revoked = 0;
187
188SYSCTL_INT(_vfs_fuse, OID_AUTO, reclaim_revoked, CTLFLAG_RW,
189 &fuse_reclaim_revoked, 0, "");
190
191int fuse_pbuf_freecnt = -1;
192
193#define fuse_vm_page_lock(m) vm_page_lock((m));
194#define fuse_vm_page_unlock(m) vm_page_unlock((m));
195#define fuse_vm_page_lock_queues() ((void)0)
196#define fuse_vm_page_unlock_queues() ((void)0)
197
198/*
199 struct vnop_access_args {
200 struct vnode *a_vp;
201#if VOP_ACCESS_TAKES_ACCMODE_T
202 accmode_t a_accmode;
203#else
204 int a_mode;
205#endif
206 struct ucred *a_cred;
207 struct thread *a_td;
208 };
209*/
210static int
211fuse_vnop_access(struct vop_access_args *ap)
212{
213 struct vnode *vp = ap->a_vp;
214 int accmode = ap->a_accmode;
215 struct ucred *cred = ap->a_cred;
216
217 struct fuse_access_param facp;
218 struct fuse_data *data = fuse_get_mpdata(vnode_mount(vp));
219
220 int err;
221
222 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
223
224 if (fuse_isdeadfs(vp)) {
225 if (vnode_isvroot(vp)) {
226 return 0;
227 }
228 return ENXIO;
229 }
230 if (!(data->dataflags & FSESS_INITED)) {
231 if (vnode_isvroot(vp)) {
232 if (priv_check_cred(cred, PRIV_VFS_ADMIN, 0) ||
233 (fuse_match_cred(data->daemoncred, cred) == 0)) {
234 return 0;
235 }
236 }
237 return EBADF;
238 }
239 if (vnode_islnk(vp)) {
240 return 0;
241 }
242 bzero(&facp, sizeof(facp));
243
244 err = fuse_internal_access(vp, accmode, &facp, ap->a_td, ap->a_cred);
245 FS_DEBUG2G("err=%d accmode=0x%x\n", err, accmode);
246 return err;
247}
248
249/*
250 struct vnop_close_args {
251 struct vnode *a_vp;
252 int a_fflag;
253 struct ucred *a_cred;
254 struct thread *a_td;
255 };
256*/
257static int
258fuse_vnop_close(struct vop_close_args *ap)
259{
260 struct vnode *vp = ap->a_vp;
261 struct ucred *cred = ap->a_cred;
262 int fflag = ap->a_fflag;
263 fufh_type_t fufh_type;
264
265 fuse_trace_printf_vnop();
266
267 if (fuse_isdeadfs(vp)) {
268 return 0;
269 }
270 if (vnode_isdir(vp)) {
271 if (fuse_filehandle_valid(vp, FUFH_RDONLY)) {
272 fuse_filehandle_close(vp, FUFH_RDONLY, NULL, cred);
273 }
274 return 0;
275 }
276 if (fflag & IO_NDELAY) {
277 return 0;
278 }
279 fufh_type = fuse_filehandle_xlate_from_fflags(fflag);
280
281 if (!fuse_filehandle_valid(vp, fufh_type)) {
282 int i;
283
284 for (i = 0; i < FUFH_MAXTYPE; i++)
285 if (fuse_filehandle_valid(vp, i))
286 break;
287 if (i == FUFH_MAXTYPE)
288 panic("FUSE: fufh type %d found to be invalid in close"
289 " (fflag=0x%x)\n",
290 fufh_type, fflag);
291 }
292 if ((VTOFUD(vp)->flag & FN_SIZECHANGE) != 0) {
293 fuse_vnode_savesize(vp, cred);
294 }
295 return 0;
296}
297
298/*
299 struct vnop_create_args {
300 struct vnode *a_dvp;
301 struct vnode **a_vpp;
302 struct componentname *a_cnp;
303 struct vattr *a_vap;
304 };
305*/
306static int
307fuse_vnop_create(struct vop_create_args *ap)
308{
309 struct vnode *dvp = ap->a_dvp;
310 struct vnode **vpp = ap->a_vpp;
311 struct componentname *cnp = ap->a_cnp;
312 struct vattr *vap = ap->a_vap;
313 struct thread *td = cnp->cn_thread;
314 struct ucred *cred = cnp->cn_cred;
315
316 struct fuse_open_in *foi;
317 struct fuse_entry_out *feo;
318 struct fuse_dispatcher fdi;
319 struct fuse_dispatcher *fdip = &fdi;
320
321 int err;
322
323 struct mount *mp = vnode_mount(dvp);
324 uint64_t parentnid = VTOFUD(dvp)->nid;
325 mode_t mode = MAKEIMODE(vap->va_type, vap->va_mode);
326 uint64_t x_fh_id;
327 uint32_t x_open_flags;
328
329 fuse_trace_printf_vnop();
330
331 if (fuse_isdeadfs(dvp)) {
332 return ENXIO;
333 }
334 bzero(&fdi, sizeof(fdi));
335
336 /* XXX: Will we ever want devices ? */
337 if ((vap->va_type != VREG)) {
338 MPASS(vap->va_type != VFIFO);
339 goto bringup;
340 }
341 debug_printf("parent nid = %ju, mode = %x\n", (uintmax_t)parentnid,
342 mode);
343
344 fdisp_init(fdip, sizeof(*foi) + cnp->cn_namelen + 1);
345 if (!fsess_isimpl(mp, FUSE_CREATE)) {
346 debug_printf("eh, daemon doesn't implement create?\n");
347 return (EINVAL);
348 }
349 fdisp_make(fdip, FUSE_CREATE, vnode_mount(dvp), parentnid, td, cred);
350
351 foi = fdip->indata;
352 foi->mode = mode;
353 foi->flags = O_CREAT | O_RDWR;
354
355 memcpy((char *)fdip->indata + sizeof(*foi), cnp->cn_nameptr,
356 cnp->cn_namelen);
357 ((char *)fdip->indata)[sizeof(*foi) + cnp->cn_namelen] = '\0';
358
359 err = fdisp_wait_answ(fdip);
360
361 if (err) {
362 if (err == ENOSYS)
363 fsess_set_notimpl(mp, FUSE_CREATE);
364 debug_printf("create: got err=%d from daemon\n", err);
365 goto out;
366 }
367bringup:
368 feo = fdip->answ;
369
370 if ((err = fuse_internal_checkentry(feo, VREG))) {
371 goto out;
372 }
373 err = fuse_vnode_get(mp, feo->nodeid, dvp, vpp, cnp, VREG);
374 if (err) {
375 struct fuse_release_in *fri;
376 uint64_t nodeid = feo->nodeid;
377 uint64_t fh_id = ((struct fuse_open_out *)(feo + 1))->fh;
378
379 fdisp_init(fdip, sizeof(*fri));
380 fdisp_make(fdip, FUSE_RELEASE, mp, nodeid, td, cred);
381 fri = fdip->indata;
382 fri->fh = fh_id;
383 fri->flags = OFLAGS(mode);
384 fuse_insert_callback(fdip->tick, fuse_internal_forget_callback);
385 fuse_insert_message(fdip->tick);
386 return err;
387 }
388 ASSERT_VOP_ELOCKED(*vpp, "fuse_vnop_create");
389
390 fdip->answ = feo + 1;
391
392 x_fh_id = ((struct fuse_open_out *)(feo + 1))->fh;
393 x_open_flags = ((struct fuse_open_out *)(feo + 1))->open_flags;
394 fuse_filehandle_init(*vpp, FUFH_RDWR, NULL, x_fh_id);
395 fuse_vnode_open(*vpp, x_open_flags, td);
396 cache_purge_negative(dvp);
397
398out:
399 fdisp_destroy(fdip);
400 return err;
401}
402
403/*
404 * Our vnop_fsync roughly corresponds to the FUSE_FSYNC method. The Linux
405 * version of FUSE also has a FUSE_FLUSH method.
406 *
407 * On Linux, fsync() synchronizes a file's complete in-core state with that
408 * on disk. The call is not supposed to return until the system has completed
409 * that action or until an error is detected.
410 *
411 * Linux also has an fdatasync() call that is similar to fsync() but is not
412 * required to update the metadata such as access time and modification time.
413 */
414
415/*
416 struct vnop_fsync_args {
417 struct vnodeop_desc *a_desc;
418 struct vnode * a_vp;
419 struct ucred * a_cred;
420 int a_waitfor;
421 struct thread * a_td;
422 };
423*/
424static int
425fuse_vnop_fsync(struct vop_fsync_args *ap)
426{
427 struct vnode *vp = ap->a_vp;
428 struct thread *td = ap->a_td;
429
430 struct fuse_filehandle *fufh;
431 struct fuse_vnode_data *fvdat = VTOFUD(vp);
432
433 int type, err = 0;
434
435 fuse_trace_printf_vnop();
436
437 if (fuse_isdeadfs(vp)) {
438 return 0;
439 }
440 if ((err = vop_stdfsync(ap)))
441 return err;
442
443 if (!fsess_isimpl(vnode_mount(vp),
444 (vnode_vtype(vp) == VDIR ? FUSE_FSYNCDIR : FUSE_FSYNC))) {
445 goto out;
446 }
447 for (type = 0; type < FUFH_MAXTYPE; type++) {
448 fufh = &(fvdat->fufh[type]);
449 if (FUFH_IS_VALID(fufh)) {
450 fuse_internal_fsync(vp, td, NULL, fufh);
451 }
452 }
453
454out:
455 return 0;
456}
457
458/*
459 struct vnop_getattr_args {
460 struct vnode *a_vp;
461 struct vattr *a_vap;
462 struct ucred *a_cred;
463 struct thread *a_td;
464 };
465*/
466static int
467fuse_vnop_getattr(struct vop_getattr_args *ap)
468{
469 struct vnode *vp = ap->a_vp;
470 struct vattr *vap = ap->a_vap;
471 struct ucred *cred = ap->a_cred;
472 struct thread *td = curthread;
473 struct fuse_vnode_data *fvdat = VTOFUD(vp);
474
475 int err = 0;
476 int dataflags;
477 struct fuse_dispatcher fdi;
478
479 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
480
481 dataflags = fuse_get_mpdata(vnode_mount(vp))->dataflags;
482
483 /* Note that we are not bailing out on a dead file system just yet. */
484
485 if (!(dataflags & FSESS_INITED)) {
486 if (!vnode_isvroot(vp)) {
487 fdata_set_dead(fuse_get_mpdata(vnode_mount(vp)));
488 err = ENOTCONN;
489 debug_printf("fuse_getattr b: returning ENOTCONN\n");
490 return err;
491 } else {
492 goto fake;
493 }
494 }
495 fdisp_init(&fdi, 0);
496 if ((err = fdisp_simple_putget_vp(&fdi, FUSE_GETATTR, vp, td, cred))) {
497 if ((err == ENOTCONN) && vnode_isvroot(vp)) {
498 /* see comment at similar place in fuse_statfs() */
499 fdisp_destroy(&fdi);
500 goto fake;
501 }
502 if (err == ENOENT) {
503 fuse_internal_vnode_disappear(vp);
504 }
505 goto out;
506 }
507 cache_attrs(vp, (struct fuse_attr_out *)fdi.answ);
508 if (vap != VTOVA(vp)) {
509 memcpy(vap, VTOVA(vp), sizeof(*vap));
510 }
511 if (vap->va_type != vnode_vtype(vp)) {
512 fuse_internal_vnode_disappear(vp);
513 err = ENOENT;
514 goto out;
515 }
516 if ((fvdat->flag & FN_SIZECHANGE) != 0)
517 vap->va_size = fvdat->filesize;
518
519 if (vnode_isreg(vp) && (fvdat->flag & FN_SIZECHANGE) == 0) {
520 /*
521 * This is for those cases when the file size changed without us
522 * knowing, and we want to catch up.
523 */
524 off_t new_filesize = ((struct fuse_attr_out *)
525 fdi.answ)->attr.size;
526
527 if (fvdat->filesize != new_filesize) {
528 fuse_vnode_setsize(vp, cred, new_filesize);
529 }
530 }
531 debug_printf("fuse_getattr e: returning 0\n");
532
533out:
534 fdisp_destroy(&fdi);
535 return err;
536
537fake:
538 bzero(vap, sizeof(*vap));
539 vap->va_type = vnode_vtype(vp);
540
541 return 0;
542}
543
544/*
545 struct vnop_inactive_args {
546 struct vnode *a_vp;
547 struct thread *a_td;
548 };
549*/
550static int
551fuse_vnop_inactive(struct vop_inactive_args *ap)
552{
553 struct vnode *vp = ap->a_vp;
554 struct thread *td = ap->a_td;
555
556 struct fuse_vnode_data *fvdat = VTOFUD(vp);
557 struct fuse_filehandle *fufh = NULL;
558
559 int type, need_flush = 1;
560
561 FS_DEBUG("inode=%ju\n", (uintmax_t)VTOI(vp));
562
563 for (type = 0; type < FUFH_MAXTYPE; type++) {
564 fufh = &(fvdat->fufh[type]);
565 if (FUFH_IS_VALID(fufh)) {
566 if (need_flush && vp->v_type == VREG) {
567 if ((VTOFUD(vp)->flag & FN_SIZECHANGE) != 0) {
568 fuse_vnode_savesize(vp, NULL);
569 }
570 if (fuse_data_cache_invalidate ||
571 (fvdat->flag & FN_REVOKED) != 0)
572 fuse_io_invalbuf(vp, td);
573 else
574 fuse_io_flushbuf(vp, MNT_WAIT, td);
575 need_flush = 0;
576 }
577 fuse_filehandle_close(vp, type, td, NULL);
578 }
579 }
580
581 if ((fvdat->flag & FN_REVOKED) != 0 && fuse_reclaim_revoked) {
582 vrecycle(vp);
583 }
584 return 0;
585}
586
587/*
588 struct vnop_link_args {
589 struct vnode *a_tdvp;
590 struct vnode *a_vp;
591 struct componentname *a_cnp;
592 };
593*/
594static int
595fuse_vnop_link(struct vop_link_args *ap)
596{
597 struct vnode *vp = ap->a_vp;
598 struct vnode *tdvp = ap->a_tdvp;
599 struct componentname *cnp = ap->a_cnp;
600
601 struct vattr *vap = VTOVA(vp);
602
603 struct fuse_dispatcher fdi;
604 struct fuse_entry_out *feo;
605 struct fuse_link_in fli;
606
607 int err;
608
609 fuse_trace_printf_vnop();
610
611 if (fuse_isdeadfs(vp)) {
612 return ENXIO;
613 }
614 if (vnode_mount(tdvp) != vnode_mount(vp)) {
615 return EXDEV;
616 }
617 if (vap->va_nlink >= FUSE_LINK_MAX) {
618 return EMLINK;
619 }
620 fli.oldnodeid = VTOI(vp);
621
622 fdisp_init(&fdi, 0);
623 fuse_internal_newentry_makerequest(vnode_mount(tdvp), VTOI(tdvp), cnp,
624 FUSE_LINK, &fli, sizeof(fli), &fdi);
625 if ((err = fdisp_wait_answ(&fdi))) {
626 goto out;
627 }
628 feo = fdi.answ;
629
630 err = fuse_internal_checkentry(feo, vnode_vtype(vp));
631out:
632 fdisp_destroy(&fdi);
633 return err;
634}
635
636/*
637 struct vnop_lookup_args {
638 struct vnodeop_desc *a_desc;
639 struct vnode *a_dvp;
640 struct vnode **a_vpp;
641 struct componentname *a_cnp;
642 };
643*/
644int
645fuse_vnop_lookup(struct vop_lookup_args *ap)
646{
647 struct vnode *dvp = ap->a_dvp;
648 struct vnode **vpp = ap->a_vpp;
649 struct componentname *cnp = ap->a_cnp;
650 struct thread *td = cnp->cn_thread;
651 struct ucred *cred = cnp->cn_cred;
652
653 int nameiop = cnp->cn_nameiop;
654 int flags = cnp->cn_flags;
655 int wantparent = flags & (LOCKPARENT | WANTPARENT);
656 int islastcn = flags & ISLASTCN;
657 struct mount *mp = vnode_mount(dvp);
658
659 int err = 0;
660 int lookup_err = 0;
661 struct vnode *vp = NULL;
662
663 struct fuse_dispatcher fdi;
664 enum fuse_opcode op;
665
666 uint64_t nid;
667 struct fuse_access_param facp;
668
669 FS_DEBUG2G("parent_inode=%ju - %*s\n",
670 (uintmax_t)VTOI(dvp), (int)cnp->cn_namelen, cnp->cn_nameptr);
671
672 if (fuse_isdeadfs(dvp)) {
673 *vpp = NULL;
674 return ENXIO;
675 }
676 if (!vnode_isdir(dvp)) {
677 return ENOTDIR;
678 }
679 if (islastcn && vfs_isrdonly(mp) && (nameiop != LOOKUP)) {
680 return EROFS;
681 }
682 /*
683 * We do access check prior to doing anything else only in the case
684 * when we are at fs root (we'd like to say, "we are at the first
685 * component", but that's not exactly the same... nevermind).
686 * See further comments at further access checks.
687 */
688
689 bzero(&facp, sizeof(facp));
690 if (vnode_isvroot(dvp)) { /* early permission check hack */
691 if ((err = fuse_internal_access(dvp, VEXEC, &facp, td, cred))) {
692 return err;
693 }
694 }
695 if (flags & ISDOTDOT) {
696 nid = VTOFUD(dvp)->parent_nid;
697 if (nid == 0) {
698 return ENOENT;
699 }
700 fdisp_init(&fdi, 0);
701 op = FUSE_GETATTR;
702 goto calldaemon;
703 } else if (cnp->cn_namelen == 1 && *(cnp->cn_nameptr) == '.') {
704 nid = VTOI(dvp);
705 fdisp_init(&fdi, 0);
706 op = FUSE_GETATTR;
707 goto calldaemon;
708 } else if (fuse_lookup_cache_enable) {
709 err = cache_lookup(dvp, vpp, cnp, NULL, NULL);
710 switch (err) {
711
712 case -1: /* positive match */
713 atomic_add_acq_long(&fuse_lookup_cache_hits, 1);
714 return 0;
715
716 case 0: /* no match in cache */
717 atomic_add_acq_long(&fuse_lookup_cache_misses, 1);
718 break;
719
720 case ENOENT: /* negative match */
721 /* fall through */
722 default:
723 return err;
724 }
725 }
726 nid = VTOI(dvp);
727 fdisp_init(&fdi, cnp->cn_namelen + 1);
728 op = FUSE_LOOKUP;
729
730calldaemon:
731 fdisp_make(&fdi, op, mp, nid, td, cred);
732
733 if (op == FUSE_LOOKUP) {
734 memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen);
735 ((char *)fdi.indata)[cnp->cn_namelen] = '\0';
736 }
737 lookup_err = fdisp_wait_answ(&fdi);
738
739 if ((op == FUSE_LOOKUP) && !lookup_err) { /* lookup call succeeded */
740 nid = ((struct fuse_entry_out *)fdi.answ)->nodeid;
741 if (!nid) {
742 /*
743 * zero nodeid is the same as "not found",
744 * but it's also cacheable (which we keep
745 * keep on doing not as of writing this)
746 */
747 lookup_err = ENOENT;
748 } else if (nid == FUSE_ROOT_ID) {
749 lookup_err = EINVAL;
750 }
751 }
752 if (lookup_err &&
753 (!fdi.answ_stat || lookup_err != ENOENT || op != FUSE_LOOKUP)) {
754 fdisp_destroy(&fdi);
755 return lookup_err;
756 }
757 /* lookup_err, if non-zero, must be ENOENT at this point */
758
759 if (lookup_err) {
760
761 if ((nameiop == CREATE || nameiop == RENAME) && islastcn
762 /* && directory dvp has not been removed */ ) {
763
764 if (vfs_isrdonly(mp)) {
765 err = EROFS;
766 goto out;
767 }
768#if 0 /* THINK_ABOUT_THIS */
769 if ((err = fuse_internal_access(dvp, VWRITE, cred, td, &facp))) {
770 goto out;
771 }
772#endif
773
774 /*
775 * Possibly record the position of a slot in the
776 * directory large enough for the new component name.
777 * This can be recorded in the vnode private data for
778 * dvp. Set the SAVENAME flag to hold onto the
779 * pathname for use later in VOP_CREATE or VOP_RENAME.
780 */
781 cnp->cn_flags |= SAVENAME;
782
783 err = EJUSTRETURN;
784 goto out;
785 }
786 /* Consider inserting name into cache. */
787
788 /*
789 * No we can't use negative caching, as the fs
790 * changes are out of our control.
791 * False positives' falseness turns out just as things
792 * go by, but false negatives' falseness doesn't.
793 * (and aiding the caching mechanism with extra control
794 * mechanisms comes quite close to beating the whole purpose
795 * caching...)
796 */
797#if 0
798 if ((cnp->cn_flags & MAKEENTRY) && nameiop != CREATE) {
799 FS_DEBUG("inserting NULL into cache\n");
800 cache_enter(dvp, NULL, cnp);
801 }
802#endif
803 err = ENOENT;
804 goto out;
805
806 } else {
807
808 /* !lookup_err */
809
810 struct fuse_entry_out *feo = NULL;
811 struct fuse_attr *fattr = NULL;
812
813 if (op == FUSE_GETATTR) {
814 fattr = &((struct fuse_attr_out *)fdi.answ)->attr;
815 } else {
816 feo = (struct fuse_entry_out *)fdi.answ;
817 fattr = &(feo->attr);
818 }
819
820 /*
821 * If deleting, and at end of pathname, return parameters
822 * which can be used to remove file. If the wantparent flag
823 * isn't set, we return only the directory, otherwise we go on
824 * and lock the inode, being careful with ".".
825 */
826 if (nameiop == DELETE && islastcn) {
827 /*
828 * Check for write access on directory.
829 */
830 facp.xuid = fattr->uid;
831 facp.facc_flags |= FACCESS_STICKY;
832 err = fuse_internal_access(dvp, VWRITE, &facp, td, cred);
833 facp.facc_flags &= ~FACCESS_XQUERIES;
834
835 if (err) {
836 goto out;
837 }
838 if (nid == VTOI(dvp)) {
839 vref(dvp);
840 *vpp = dvp;
841 } else {
842 err = fuse_vnode_get(dvp->v_mount, nid, dvp,
843 &vp, cnp, IFTOVT(fattr->mode));
844 if (err)
845 goto out;
846 *vpp = vp;
847 }
848
849 /*
850 * Save the name for use in VOP_RMDIR and VOP_REMOVE
851 * later.
852 */
853 cnp->cn_flags |= SAVENAME;
854 goto out;
855
856 }
857 /*
858 * If rewriting (RENAME), return the inode and the
859 * information required to rewrite the present directory
860 * Must get inode of directory entry to verify it's a
861 * regular file, or empty directory.
862 */
863 if (nameiop == RENAME && wantparent && islastcn) {
864
865#if 0 /* THINK_ABOUT_THIS */
866 if ((err = fuse_internal_access(dvp, VWRITE, cred, td, &facp))) {
867 goto out;
868 }
869#endif
870
871 /*
872 * Check for "."
873 */
874 if (nid == VTOI(dvp)) {
875 err = EISDIR;
876 goto out;
877 }
878 err = fuse_vnode_get(vnode_mount(dvp),
879 nid,
880 dvp,
881 &vp,
882 cnp,
883 IFTOVT(fattr->mode));
884 if (err) {
885 goto out;
886 }
887 *vpp = vp;
888 /*
889 * Save the name for use in VOP_RENAME later.
890 */
891 cnp->cn_flags |= SAVENAME;
892
893 goto out;
894 }
895 if (flags & ISDOTDOT) {
896 struct mount *mp;
897 int ltype;
898
899 /*
900 * Expanded copy of vn_vget_ino() so that
901 * fuse_vnode_get() can be used.
902 */
903 mp = dvp->v_mount;
904 ltype = VOP_ISLOCKED(dvp);
905 err = vfs_busy(mp, MBF_NOWAIT);
906 if (err != 0) {
907 vfs_ref(mp);
908 VOP_UNLOCK(dvp, 0);
909 err = vfs_busy(mp, 0);
910 vn_lock(dvp, ltype | LK_RETRY);
911 vfs_rel(mp);
912 if (err)
913 goto out;
914 if ((dvp->v_iflag & VI_DOOMED) != 0) {
915 err = ENOENT;
916 vfs_unbusy(mp);
917 goto out;
918 }
919 }
920 VOP_UNLOCK(dvp, 0);
921 err = fuse_vnode_get(vnode_mount(dvp),
922 nid,
923 NULL,
924 &vp,
925 cnp,
926 IFTOVT(fattr->mode));
927 vfs_unbusy(mp);
928 vn_lock(dvp, ltype | LK_RETRY);
929 if ((dvp->v_iflag & VI_DOOMED) != 0) {
930 if (err == 0)
931 vput(vp);
932 err = ENOENT;
933 }
934 if (err)
935 goto out;
936 *vpp = vp;
937 } else if (nid == VTOI(dvp)) {
938 vref(dvp);
939 *vpp = dvp;
940 } else {
941 err = fuse_vnode_get(vnode_mount(dvp),
942 nid,
943 dvp,
944 &vp,
945 cnp,
946 IFTOVT(fattr->mode));
947 if (err) {
948 goto out;
949 }
950 fuse_vnode_setparent(vp, dvp);
951 *vpp = vp;
952 }
953
954 if (op == FUSE_GETATTR) {
955 cache_attrs(*vpp, (struct fuse_attr_out *)fdi.answ);
956 } else {
957 cache_attrs(*vpp, (struct fuse_entry_out *)fdi.answ);
958 }
959
960 /* Insert name into cache if appropriate. */
961
962 /*
963 * Nooo, caching is evil. With caching, we can't avoid stale
964 * information taking over the playground (cached info is not
965 * just positive/negative, it does have qualitative aspects,
966 * too). And a (VOP/FUSE)_GETATTR is always thrown anyway, when
967 * walking down along cached path components, and that's not
968 * any cheaper than FUSE_LOOKUP. This might change with
969 * implementing kernel side attr caching, but... In Linux,
970 * lookup results are not cached, and the daemon is bombarded
971 * with FUSE_LOOKUPS on and on. This shows that by design, the
972 * daemon is expected to handle frequent lookup queries
973 * efficiently, do its caching in userspace, and so on.
974 *
975 * So just leave the name cache alone.
976 */
977
978 /*
979 * Well, now I know, Linux caches lookups, but with a
980 * timeout... So it's the same thing as attribute caching:
981 * we can deal with it when implement timeouts.
982 */
983#if 0
984 if (cnp->cn_flags & MAKEENTRY) {
985 cache_enter(dvp, *vpp, cnp);
986 }
987#endif
988 }
989out:
990 if (!lookup_err) {
991
992 /* No lookup error; need to clean up. */
993
994 if (err) { /* Found inode; exit with no vnode. */
995 if (op == FUSE_LOOKUP) {
996 fuse_internal_forget_send(vnode_mount(dvp), td, cred,
997 nid, 1);
998 }
999 fdisp_destroy(&fdi);
1000 return err;
1001 } else {
1002#ifndef NO_EARLY_PERM_CHECK_HACK
1003 if (!islastcn) {
1004 /*
1005 * We have the attributes of the next item
1006 * *now*, and it's a fact, and we do not
1007 * have to do extra work for it (ie, beg the
1008 * daemon), and it neither depends on such
1009 * accidental things like attr caching. So
1010 * the big idea: check credentials *now*,
1011 * not at the beginning of the next call to
1012 * lookup.
1013 *
1014 * The first item of the lookup chain (fs root)
1015 * won't be checked then here, of course, as
1016 * its never "the next". But go and see that
1017 * the root is taken care about at the very
1018 * beginning of this function.
1019 *
1020 * Now, given we want to do the access check
1021 * this way, one might ask: so then why not
1022 * do the access check just after fetching
1023 * the inode and its attributes from the
1024 * daemon? Why bother with producing the
1025 * corresponding vnode at all if something
1026 * is not OK? We know what's the deal as
1027 * soon as we get those attrs... There is
1028 * one bit of info though not given us by
1029 * the daemon: whether his response is
1030 * authorative or not... His response should
1031 * be ignored if something is mounted over
1032 * the dir in question. But that can be
1033 * known only by having the vnode...
1034 */
1035 int tmpvtype = vnode_vtype(*vpp);
1036
1037 bzero(&facp, sizeof(facp));
1038 /*the early perm check hack */
1039 facp.facc_flags |= FACCESS_VA_VALID;
1040
1041 if ((tmpvtype != VDIR) && (tmpvtype != VLNK)) {
1042 err = ENOTDIR;
1043 }
1044 if (!err && !vnode_mountedhere(*vpp)) {
1045 err = fuse_internal_access(*vpp, VEXEC, &facp, td, cred);
1046 }
1047 if (err) {
1048 if (tmpvtype == VLNK)
1049 FS_DEBUG("weird, permission error with a symlink?\n");
1050 vput(*vpp);
1051 *vpp = NULL;
1052 }
1053 }
1054#endif
1055 }
1056 }
1057 fdisp_destroy(&fdi);
1058
1059 return err;
1060}
1061
1062/*
1063 struct vnop_mkdir_args {
1064 struct vnode *a_dvp;
1065 struct vnode **a_vpp;
1066 struct componentname *a_cnp;
1067 struct vattr *a_vap;
1068 };
1069*/
1070static int
1071fuse_vnop_mkdir(struct vop_mkdir_args *ap)
1072{
1073 struct vnode *dvp = ap->a_dvp;
1074 struct vnode **vpp = ap->a_vpp;
1075 struct componentname *cnp = ap->a_cnp;
1076 struct vattr *vap = ap->a_vap;
1077
1078 struct fuse_mkdir_in fmdi;
1079
1080 fuse_trace_printf_vnop();
1081
1082 if (fuse_isdeadfs(dvp)) {
1083 return ENXIO;
1084 }
1085 fmdi.mode = MAKEIMODE(vap->va_type, vap->va_mode);
1086
1087 return (fuse_internal_newentry(dvp, vpp, cnp, FUSE_MKDIR, &fmdi,
1088 sizeof(fmdi), VDIR));
1089}
1090
1091/*
1092 struct vnop_mknod_args {
1093 struct vnode *a_dvp;
1094 struct vnode **a_vpp;
1095 struct componentname *a_cnp;
1096 struct vattr *a_vap;
1097 };
1098*/
1099static int
1100fuse_vnop_mknod(struct vop_mknod_args *ap)
1101{
1102
1103 return (EINVAL);
1104}
1105
1106
1107/*
1108 struct vnop_open_args {
1109 struct vnode *a_vp;
1110 int a_mode;
1111 struct ucred *a_cred;
1112 struct thread *a_td;
1113 int a_fdidx; / struct file *a_fp;
1114 };
1115*/
1116static int
1117fuse_vnop_open(struct vop_open_args *ap)
1118{
1119 struct vnode *vp = ap->a_vp;
1120 int mode = ap->a_mode;
1121 struct thread *td = ap->a_td;
1122 struct ucred *cred = ap->a_cred;
1123
1124 fufh_type_t fufh_type;
1125 struct fuse_vnode_data *fvdat;
1126
1127 int error, isdir = 0;
1128
1129 FS_DEBUG2G("inode=%ju mode=0x%x\n", (uintmax_t)VTOI(vp), mode);
1130
1131 if (fuse_isdeadfs(vp)) {
1132 return ENXIO;
1133 }
1134 fvdat = VTOFUD(vp);
1135
1136 if (vnode_isdir(vp)) {
1137 isdir = 1;
1138 }
1139 if (isdir) {
1140 fufh_type = FUFH_RDONLY;
1141 } else {
1142 fufh_type = fuse_filehandle_xlate_from_fflags(mode);
1143 }
1144
1145 if (fuse_filehandle_valid(vp, fufh_type)) {
1146 fuse_vnode_open(vp, 0, td);
1147 return 0;
1148 }
1149 error = fuse_filehandle_open(vp, fufh_type, NULL, td, cred);
1150
1151 return error;
1152}
1153
1154/*
1155 struct vnop_read_args {
1156 struct vnode *a_vp;
1157 struct uio *a_uio;
1158 int a_ioflag;
1159 struct ucred *a_cred;
1160 };
1161*/
1162static int
1163fuse_vnop_read(struct vop_read_args *ap)
1164{
1165 struct vnode *vp = ap->a_vp;
1166 struct uio *uio = ap->a_uio;
1167 int ioflag = ap->a_ioflag;
1168 struct ucred *cred = ap->a_cred;
1169
1170 FS_DEBUG2G("inode=%ju offset=%jd resid=%zd\n",
1171 (uintmax_t)VTOI(vp), uio->uio_offset, uio->uio_resid);
1172
1173 if (fuse_isdeadfs(vp)) {
1174 return ENXIO;
1175 }
1176 return fuse_io_dispatch(vp, uio, ioflag, cred);
1177}
1178
1179/*
1180 struct vnop_readdir_args {
1181 struct vnode *a_vp;
1182 struct uio *a_uio;
1183 struct ucred *a_cred;
1184 int *a_eofflag;
1185 int *ncookies;
1186 u_long **a_cookies;
1187 };
1188*/
1189static int
1190fuse_vnop_readdir(struct vop_readdir_args *ap)
1191{
1192 struct vnode *vp = ap->a_vp;
1193 struct uio *uio = ap->a_uio;
1194 struct ucred *cred = ap->a_cred;
1195
1196 struct fuse_filehandle *fufh = NULL;
1197 struct fuse_vnode_data *fvdat;
1198 struct fuse_iov cookediov;
1199
1200 int err = 0;
1201 int freefufh = 0;
1202
1203 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
1204
1205 if (fuse_isdeadfs(vp)) {
1206 return ENXIO;
1207 }
1208 if ( /* XXXIP ((uio_iovcnt(uio) > 1)) || */
1209 (uio_resid(uio) < sizeof(struct dirent))) {
1210 return EINVAL;
1211 }
1212 fvdat = VTOFUD(vp);
1213
1214 if (!fuse_filehandle_valid(vp, FUFH_RDONLY)) {
1215 FS_DEBUG("calling readdir() before open()");
1216 err = fuse_filehandle_open(vp, FUFH_RDONLY, &fufh, NULL, cred);
1217 freefufh = 1;
1218 } else {
1219 err = fuse_filehandle_get(vp, FUFH_RDONLY, &fufh);
1220 }
1221 if (err) {
1222 return (err);
1223 }
1224#define DIRCOOKEDSIZE FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + MAXNAMLEN + 1)
1225 fiov_init(&cookediov, DIRCOOKEDSIZE);
1226
1227 err = fuse_internal_readdir(vp, uio, fufh, &cookediov);
1228
1229 fiov_teardown(&cookediov);
1230 if (freefufh) {
1231 fuse_filehandle_close(vp, FUFH_RDONLY, NULL, cred);
1232 }
1233 return err;
1234}
1235
1236/*
1237 struct vnop_readlink_args {
1238 struct vnode *a_vp;
1239 struct uio *a_uio;
1240 struct ucred *a_cred;
1241 };
1242*/
1243static int
1244fuse_vnop_readlink(struct vop_readlink_args *ap)
1245{
1246 struct vnode *vp = ap->a_vp;
1247 struct uio *uio = ap->a_uio;
1248 struct ucred *cred = ap->a_cred;
1249
1250 struct fuse_dispatcher fdi;
1251 int err;
1252
1253 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
1254
1255 if (fuse_isdeadfs(vp)) {
1256 return ENXIO;
1257 }
1258 if (!vnode_islnk(vp)) {
1259 return EINVAL;
1260 }
1261 fdisp_init(&fdi, 0);
1262 err = fdisp_simple_putget_vp(&fdi, FUSE_READLINK, vp, curthread, cred);
1263 if (err) {
1264 goto out;
1265 }
1266 if (((char *)fdi.answ)[0] == '/' &&
1267 fuse_get_mpdata(vnode_mount(vp))->dataflags & FSESS_PUSH_SYMLINKS_IN) {
1268 char *mpth = vnode_mount(vp)->mnt_stat.f_mntonname;
1269
1270 err = uiomove(mpth, strlen(mpth), uio);
1271 }
1272 if (!err) {
1273 err = uiomove(fdi.answ, fdi.iosize, uio);
1274 }
1275out:
1276 fdisp_destroy(&fdi);
1277 return err;
1278}
1279
1280/*
1281 struct vnop_reclaim_args {
1282 struct vnode *a_vp;
1283 struct thread *a_td;
1284 };
1285*/
1286static int
1287fuse_vnop_reclaim(struct vop_reclaim_args *ap)
1288{
1289 struct vnode *vp = ap->a_vp;
1290 struct thread *td = ap->a_td;
1291
1292 struct fuse_vnode_data *fvdat = VTOFUD(vp);
1293 struct fuse_filehandle *fufh = NULL;
1294
1295 int type;
1296
1297 if (!fvdat) {
1298 panic("FUSE: no vnode data during recycling");
1299 }
1300 FS_DEBUG("inode=%ju\n", (uintmax_t)VTOI(vp));
1301
1302 for (type = 0; type < FUFH_MAXTYPE; type++) {
1303 fufh = &(fvdat->fufh[type]);
1304 if (FUFH_IS_VALID(fufh)) {
1305 printf("FUSE: vnode being reclaimed but fufh (type=%d) is valid",
1306 type);
1307 fuse_filehandle_close(vp, type, td, NULL);
1308 }
1309 }
1310
1311 if ((!fuse_isdeadfs(vp)) && (fvdat->nlookup)) {
1312 fuse_internal_forget_send(vnode_mount(vp), td, NULL, VTOI(vp),
1313 fvdat->nlookup);
1314 }
1315 fuse_vnode_setparent(vp, NULL);
1316 cache_purge(vp);
1317 vfs_hash_remove(vp);
1318 vnode_destroy_vobject(vp);
1319 fuse_vnode_destroy(vp);
1320
1321 return 0;
1322}
1323
1324/*
1325 struct vnop_remove_args {
1326 struct vnode *a_dvp;
1327 struct vnode *a_vp;
1328 struct componentname *a_cnp;
1329 };
1330*/
1331static int
1332fuse_vnop_remove(struct vop_remove_args *ap)
1333{
1334 struct vnode *dvp = ap->a_dvp;
1335 struct vnode *vp = ap->a_vp;
1336 struct componentname *cnp = ap->a_cnp;
1337
1338 int err;
1339
1340 FS_DEBUG2G("inode=%ju name=%*s\n",
1341 (uintmax_t)VTOI(vp), (int)cnp->cn_namelen, cnp->cn_nameptr);
1342
1343 if (fuse_isdeadfs(vp)) {
1344 return ENXIO;
1345 }
1346 if (vnode_isdir(vp)) {
1347 return EPERM;
1348 }
1349 cache_purge(vp);
1350
1351 err = fuse_internal_remove(dvp, vp, cnp, FUSE_UNLINK);
1352
1353 if (err == 0)
1354 fuse_internal_vnode_disappear(vp);
1355 return err;
1356}
1357
1358/*
1359 struct vnop_rename_args {
1360 struct vnode *a_fdvp;
1361 struct vnode *a_fvp;
1362 struct componentname *a_fcnp;
1363 struct vnode *a_tdvp;
1364 struct vnode *a_tvp;
1365 struct componentname *a_tcnp;
1366 };
1367*/
1368static int
1369fuse_vnop_rename(struct vop_rename_args *ap)
1370{
1371 struct vnode *fdvp = ap->a_fdvp;
1372 struct vnode *fvp = ap->a_fvp;
1373 struct componentname *fcnp = ap->a_fcnp;
1374 struct vnode *tdvp = ap->a_tdvp;
1375 struct vnode *tvp = ap->a_tvp;
1376 struct componentname *tcnp = ap->a_tcnp;
1377 struct fuse_data *data;
1378
1379 int err = 0;
1380
1381 FS_DEBUG2G("from: inode=%ju name=%*s -> to: inode=%ju name=%*s\n",
1382 (uintmax_t)VTOI(fvp), (int)fcnp->cn_namelen, fcnp->cn_nameptr,
1383 (uintmax_t)(tvp == NULL ? -1 : VTOI(tvp)),
1384 (int)tcnp->cn_namelen, tcnp->cn_nameptr);
1385
1386 if (fuse_isdeadfs(fdvp)) {
1387 return ENXIO;
1388 }
1389 if (fvp->v_mount != tdvp->v_mount ||
1390 (tvp && fvp->v_mount != tvp->v_mount)) {
1391 FS_DEBUG("cross-device rename: %s -> %s\n",
1392 fcnp->cn_nameptr, (tcnp != NULL ? tcnp->cn_nameptr : "(NULL)"));
1393 err = EXDEV;
1394 goto out;
1395 }
1396 cache_purge(fvp);
1397
1398 /*
1399 * FUSE library is expected to check if target directory is not
1400 * under the source directory in the file system tree.
1401 * Linux performs this check at VFS level.
1402 */
1403 data = fuse_get_mpdata(vnode_mount(tdvp));
1404 sx_xlock(&data->rename_lock);
1405 err = fuse_internal_rename(fdvp, fcnp, tdvp, tcnp);
1406 if (err == 0) {
1407 if (tdvp != fdvp)
1408 fuse_vnode_setparent(fvp, tdvp);
1409 if (tvp != NULL)
1410 fuse_vnode_setparent(tvp, NULL);
1411 }
1412 sx_unlock(&data->rename_lock);
1413
1414 if (tvp != NULL && tvp != fvp) {
1415 cache_purge(tvp);
1416 }
1417 if (vnode_isdir(fvp)) {
1418 if ((tvp != NULL) && vnode_isdir(tvp)) {
1419 cache_purge(tdvp);
1420 }
1421 cache_purge(fdvp);
1422 }
1423out:
1424 if (tdvp == tvp) {
1425 vrele(tdvp);
1426 } else {
1427 vput(tdvp);
1428 }
1429 if (tvp != NULL) {
1430 vput(tvp);
1431 }
1432 vrele(fdvp);
1433 vrele(fvp);
1434
1435 return err;
1436}
1437
1438/*
1439 struct vnop_rmdir_args {
1440 struct vnode *a_dvp;
1441 struct vnode *a_vp;
1442 struct componentname *a_cnp;
1443 } *ap;
1444*/
1445static int
1446fuse_vnop_rmdir(struct vop_rmdir_args *ap)
1447{
1448 struct vnode *dvp = ap->a_dvp;
1449 struct vnode *vp = ap->a_vp;
1450
1451 int err;
1452
1453 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
1454
1455 if (fuse_isdeadfs(vp)) {
1456 return ENXIO;
1457 }
1458 if (VTOFUD(vp) == VTOFUD(dvp)) {
1459 return EINVAL;
1460 }
1461 err = fuse_internal_remove(dvp, vp, ap->a_cnp, FUSE_RMDIR);
1462
1463 if (err == 0)
1464 fuse_internal_vnode_disappear(vp);
1465 return err;
1466}
1467
1468/*
1469 struct vnop_setattr_args {
1470 struct vnode *a_vp;
1471 struct vattr *a_vap;
1472 struct ucred *a_cred;
1473 struct thread *a_td;
1474 };
1475*/
1476static int
1477fuse_vnop_setattr(struct vop_setattr_args *ap)
1478{
1479 struct vnode *vp = ap->a_vp;
1480 struct vattr *vap = ap->a_vap;
1481 struct ucred *cred = ap->a_cred;
1482 struct thread *td = curthread;
1483
1484 struct fuse_dispatcher fdi;
1485 struct fuse_setattr_in *fsai;
1486 struct fuse_access_param facp;
1487
1488 int err = 0;
1489 enum vtype vtyp;
1490 int sizechanged = 0;
1491 uint64_t newsize = 0;
1492
1493 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
1494
1495 if (fuse_isdeadfs(vp)) {
1496 return ENXIO;
1497 }
1498 fdisp_init(&fdi, sizeof(*fsai));
1499 fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred);
1500 fsai = fdi.indata;
1501 fsai->valid = 0;
1502
1503 bzero(&facp, sizeof(facp));
1504
1505 facp.xuid = vap->va_uid;
1506 facp.xgid = vap->va_gid;
1507
1508 if (vap->va_uid != (uid_t)VNOVAL) {
1509 facp.facc_flags |= FACCESS_CHOWN;
1510 fsai->uid = vap->va_uid;
1511 fsai->valid |= FATTR_UID;
1512 }
1513 if (vap->va_gid != (gid_t)VNOVAL) {
1514 facp.facc_flags |= FACCESS_CHOWN;
1515 fsai->gid = vap->va_gid;
1516 fsai->valid |= FATTR_GID;
1517 }
1518 if (vap->va_size != VNOVAL) {
1519
1520 struct fuse_filehandle *fufh = NULL;
1521
1522 /*Truncate to a new value. */
1523 fsai->size = vap->va_size;
1524 sizechanged = 1;
1525 newsize = vap->va_size;
1526 fsai->valid |= FATTR_SIZE;
1527
1528 fuse_filehandle_getrw(vp, FUFH_WRONLY, &fufh);
1529 if (fufh) {
1530 fsai->fh = fufh->fh_id;
1531 fsai->valid |= FATTR_FH;
1532 }
1533 }
1534 if (vap->va_atime.tv_sec != VNOVAL) {
1535 fsai->atime = vap->va_atime.tv_sec;
1536 fsai->atimensec = vap->va_atime.tv_nsec;
1537 fsai->valid |= FATTR_ATIME;
1538 }
1539 if (vap->va_mtime.tv_sec != VNOVAL) {
1540 fsai->mtime = vap->va_mtime.tv_sec;
1541 fsai->mtimensec = vap->va_mtime.tv_nsec;
1542 fsai->valid |= FATTR_MTIME;
1543 }
1544 if (vap->va_mode != (mode_t)VNOVAL) {
1545 fsai->mode = vap->va_mode & ALLPERMS;
1546 fsai->valid |= FATTR_MODE;
1547 }
1548 if (!fsai->valid) {
1549 goto out;
1550 }
1551 vtyp = vnode_vtype(vp);
1552
1553 if (fsai->valid & FATTR_SIZE && vtyp == VDIR) {
1554 err = EISDIR;
1555 goto out;
1556 }
1557 if (vfs_isrdonly(vnode_mount(vp)) && (fsai->valid & ~FATTR_SIZE || vtyp == VREG)) {
1558 err = EROFS;
1559 goto out;
1560 }
1561 if (fsai->valid & ~FATTR_SIZE) {
1562 /*err = fuse_internal_access(vp, VADMIN, context, &facp); */
1563 /*XXX */
1564 err = 0;
1565 }
1566 facp.facc_flags &= ~FACCESS_XQUERIES;
1567
1568 if (err && !(fsai->valid & ~(FATTR_ATIME | FATTR_MTIME)) &&
1569 vap->va_vaflags & VA_UTIMES_NULL) {
1570 err = fuse_internal_access(vp, VWRITE, &facp, td, cred);
1571 }
1572 if (err)
1573 goto out;
1574 if ((err = fdisp_wait_answ(&fdi)))
1575 goto out;
1576 vtyp = IFTOVT(((struct fuse_attr_out *)fdi.answ)->attr.mode);
1577
1578 if (vnode_vtype(vp) != vtyp) {
1579 if (vnode_vtype(vp) == VNON && vtyp != VNON) {
1580 debug_printf("FUSE: Dang! vnode_vtype is VNON and vtype isn't.\n");
1581 } else {
1582 /*
1583 * STALE vnode, ditch
1584 *
1585 * The vnode has changed its type "behind our back". There's
1586 * nothing really we can do, so let us just force an internal
1587 * revocation and tell the caller to try again, if interested.
1588 */
1589 fuse_internal_vnode_disappear(vp);
1590 err = EAGAIN;
1591 }
1592 }
1593 if (!err && !sizechanged) {
1594 cache_attrs(vp, (struct fuse_attr_out *)fdi.answ);
1595 }
1596out:
1597 fdisp_destroy(&fdi);
1598 if (!err && sizechanged) {
1599 fuse_vnode_setsize(vp, cred, newsize);
1600 VTOFUD(vp)->flag &= ~FN_SIZECHANGE;
1601 }
1602 return err;
1603}
1604
1605/*
1606 struct vnop_strategy_args {
1607 struct vnode *a_vp;
1608 struct buf *a_bp;
1609 };
1610*/
1611static int
1612fuse_vnop_strategy(struct vop_strategy_args *ap)
1613{
1614 struct vnode *vp = ap->a_vp;
1615 struct buf *bp = ap->a_bp;
1616
1617 fuse_trace_printf_vnop();
1618
1619 if (!vp || fuse_isdeadfs(vp)) {
1620 bp->b_ioflags |= BIO_ERROR;
1621 bp->b_error = ENXIO;
1622 bufdone(bp);
1623 return ENXIO;
1624 }
1625 if (bp->b_iocmd == BIO_WRITE)
1626 fuse_vnode_refreshsize(vp, NOCRED);
1627
1628 (void)fuse_io_strategy(vp, bp);
1629
1630 /*
1631 * This is a dangerous function. If returns error, that might mean a
1632 * panic. We prefer pretty much anything over being forced to panic
1633 * by a malicious daemon (a demon?). So we just return 0 anyway. You
1634 * should never mind this: this function has its own error
1635 * propagation mechanism via the argument buffer, so
1636 * not-that-melodramatic residents of the call chain still will be
1637 * able to know what to do.
1638 */
1639 return 0;
1640}
1641
1642
1643/*
1644 struct vnop_symlink_args {
1645 struct vnode *a_dvp;
1646 struct vnode **a_vpp;
1647 struct componentname *a_cnp;
1648 struct vattr *a_vap;
1649 char *a_target;
1650 };
1651*/
1652static int
1653fuse_vnop_symlink(struct vop_symlink_args *ap)
1654{
1655 struct vnode *dvp = ap->a_dvp;
1656 struct vnode **vpp = ap->a_vpp;
1657 struct componentname *cnp = ap->a_cnp;
1658 char *target = ap->a_target;
1659
1660 struct fuse_dispatcher fdi;
1661
1662 int err;
1663 size_t len;
1664
1665 FS_DEBUG2G("inode=%ju name=%*s\n",
1666 (uintmax_t)VTOI(dvp), (int)cnp->cn_namelen, cnp->cn_nameptr);
1667
1668 if (fuse_isdeadfs(dvp)) {
1669 return ENXIO;
1670 }
1671 /*
1672 * Unlike the other creator type calls, here we have to create a message
1673 * where the name of the new entry comes first, and the data describing
1674 * the entry comes second.
1675 * Hence we can't rely on our handy fuse_internal_newentry() routine,
1676 * but put together the message manually and just call the core part.
1677 */
1678
1679 len = strlen(target) + 1;
1680 fdisp_init(&fdi, len + cnp->cn_namelen + 1);
1681 fdisp_make_vp(&fdi, FUSE_SYMLINK, dvp, curthread, NULL);
1682
1683 memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen);
1684 ((char *)fdi.indata)[cnp->cn_namelen] = '\0';
1685 memcpy((char *)fdi.indata + cnp->cn_namelen + 1, target, len);
1686
1687 err = fuse_internal_newentry_core(dvp, vpp, cnp, VLNK, &fdi);
1688 fdisp_destroy(&fdi);
1689 return err;
1690}
1691
1692/*
1693 struct vnop_write_args {
1694 struct vnode *a_vp;
1695 struct uio *a_uio;
1696 int a_ioflag;
1697 struct ucred *a_cred;
1698 };
1699*/
1700static int
1701fuse_vnop_write(struct vop_write_args *ap)
1702{
1703 struct vnode *vp = ap->a_vp;
1704 struct uio *uio = ap->a_uio;
1705 int ioflag = ap->a_ioflag;
1706 struct ucred *cred = ap->a_cred;
1707
1708 fuse_trace_printf_vnop();
1709
1710 if (fuse_isdeadfs(vp)) {
1711 return ENXIO;
1712 }
1713 fuse_vnode_refreshsize(vp, cred);
1714
1715 return fuse_io_dispatch(vp, uio, ioflag, cred);
1716}
1717
1718/*
1719 struct vnop_getpages_args {
1720 struct vnode *a_vp;
1721 vm_page_t *a_m;
1722 int a_count;
1723 int a_reqpage;
1724 vm_ooffset_t a_offset;
1725 };
1726*/
1727static int
1728fuse_vnop_getpages(struct vop_getpages_args *ap)
1729{
1730 int i, error, nextoff, size, toff, count, npages;
1731 struct uio uio;
1732 struct iovec iov;
1733 vm_offset_t kva;
1734 struct buf *bp;
1735 struct vnode *vp;
1736 struct thread *td;
1737 struct ucred *cred;
1738 vm_page_t *pages;
1739
1740 FS_DEBUG2G("heh\n");
1741
1742 vp = ap->a_vp;
1743 KASSERT(vp->v_object, ("objectless vp passed to getpages"));
1744 td = curthread; /* XXX */
1745 cred = curthread->td_ucred; /* XXX */
1746 pages = ap->a_m;
1747 count = ap->a_count;
1748
1749 if (!fsess_opt_mmap(vnode_mount(vp))) {
1750 FS_DEBUG("called on non-cacheable vnode??\n");
1751 return (VM_PAGER_ERROR);
1752 }
1753 npages = btoc(count);
1754
1755 /*
1756 * If the requested page is partially valid, just return it and
1757 * allow the pager to zero-out the blanks. Partially valid pages
1758 * can only occur at the file EOF.
1759 */
1760
1761 VM_OBJECT_LOCK(vp->v_object);
1761 VM_OBJECT_WLOCK(vp->v_object);
1762 fuse_vm_page_lock_queues();
1763 if (pages[ap->a_reqpage]->valid != 0) {
1764 for (i = 0; i < npages; ++i) {
1765 if (i != ap->a_reqpage) {
1766 fuse_vm_page_lock(pages[i]);
1767 vm_page_free(pages[i]);
1768 fuse_vm_page_unlock(pages[i]);
1769 }
1770 }
1771 fuse_vm_page_unlock_queues();
1762 fuse_vm_page_lock_queues();
1763 if (pages[ap->a_reqpage]->valid != 0) {
1764 for (i = 0; i < npages; ++i) {
1765 if (i != ap->a_reqpage) {
1766 fuse_vm_page_lock(pages[i]);
1767 vm_page_free(pages[i]);
1768 fuse_vm_page_unlock(pages[i]);
1769 }
1770 }
1771 fuse_vm_page_unlock_queues();
1772 VM_OBJECT_UNLOCK(vp->v_object);
1772 VM_OBJECT_WUNLOCK(vp->v_object);
1773 return 0;
1774 }
1775 fuse_vm_page_unlock_queues();
1773 return 0;
1774 }
1775 fuse_vm_page_unlock_queues();
1776 VM_OBJECT_UNLOCK(vp->v_object);
1776 VM_OBJECT_WUNLOCK(vp->v_object);
1777
1778 /*
1779 * We use only the kva address for the buffer, but this is extremely
1780 * convienient and fast.
1781 */
1782 bp = getpbuf(&fuse_pbuf_freecnt);
1783
1784 kva = (vm_offset_t)bp->b_data;
1785 pmap_qenter(kva, pages, npages);
1786 PCPU_INC(cnt.v_vnodein);
1787 PCPU_ADD(cnt.v_vnodepgsin, npages);
1788
1789 iov.iov_base = (caddr_t)kva;
1790 iov.iov_len = count;
1791 uio.uio_iov = &iov;
1792 uio.uio_iovcnt = 1;
1793 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
1794 uio.uio_resid = count;
1795 uio.uio_segflg = UIO_SYSSPACE;
1796 uio.uio_rw = UIO_READ;
1797 uio.uio_td = td;
1798
1799 error = fuse_io_dispatch(vp, &uio, IO_DIRECT, cred);
1800 pmap_qremove(kva, npages);
1801
1802 relpbuf(bp, &fuse_pbuf_freecnt);
1803
1804 if (error && (uio.uio_resid == count)) {
1805 FS_DEBUG("error %d\n", error);
1777
1778 /*
1779 * We use only the kva address for the buffer, but this is extremely
1780 * convienient and fast.
1781 */
1782 bp = getpbuf(&fuse_pbuf_freecnt);
1783
1784 kva = (vm_offset_t)bp->b_data;
1785 pmap_qenter(kva, pages, npages);
1786 PCPU_INC(cnt.v_vnodein);
1787 PCPU_ADD(cnt.v_vnodepgsin, npages);
1788
1789 iov.iov_base = (caddr_t)kva;
1790 iov.iov_len = count;
1791 uio.uio_iov = &iov;
1792 uio.uio_iovcnt = 1;
1793 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
1794 uio.uio_resid = count;
1795 uio.uio_segflg = UIO_SYSSPACE;
1796 uio.uio_rw = UIO_READ;
1797 uio.uio_td = td;
1798
1799 error = fuse_io_dispatch(vp, &uio, IO_DIRECT, cred);
1800 pmap_qremove(kva, npages);
1801
1802 relpbuf(bp, &fuse_pbuf_freecnt);
1803
1804 if (error && (uio.uio_resid == count)) {
1805 FS_DEBUG("error %d\n", error);
1806 VM_OBJECT_LOCK(vp->v_object);
1806 VM_OBJECT_WLOCK(vp->v_object);
1807 fuse_vm_page_lock_queues();
1808 for (i = 0; i < npages; ++i) {
1809 if (i != ap->a_reqpage) {
1810 fuse_vm_page_lock(pages[i]);
1811 vm_page_free(pages[i]);
1812 fuse_vm_page_unlock(pages[i]);
1813 }
1814 }
1815 fuse_vm_page_unlock_queues();
1807 fuse_vm_page_lock_queues();
1808 for (i = 0; i < npages; ++i) {
1809 if (i != ap->a_reqpage) {
1810 fuse_vm_page_lock(pages[i]);
1811 vm_page_free(pages[i]);
1812 fuse_vm_page_unlock(pages[i]);
1813 }
1814 }
1815 fuse_vm_page_unlock_queues();
1816 VM_OBJECT_UNLOCK(vp->v_object);
1816 VM_OBJECT_WUNLOCK(vp->v_object);
1817 return VM_PAGER_ERROR;
1818 }
1819 /*
1820 * Calculate the number of bytes read and validate only that number
1821 * of bytes. Note that due to pending writes, size may be 0. This
1822 * does not mean that the remaining data is invalid!
1823 */
1824
1825 size = count - uio.uio_resid;
1817 return VM_PAGER_ERROR;
1818 }
1819 /*
1820 * Calculate the number of bytes read and validate only that number
1821 * of bytes. Note that due to pending writes, size may be 0. This
1822 * does not mean that the remaining data is invalid!
1823 */
1824
1825 size = count - uio.uio_resid;
1826 VM_OBJECT_LOCK(vp->v_object);
1826 VM_OBJECT_WLOCK(vp->v_object);
1827 fuse_vm_page_lock_queues();
1828 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
1829 vm_page_t m;
1830
1831 nextoff = toff + PAGE_SIZE;
1832 m = pages[i];
1833
1834 if (nextoff <= size) {
1835 /*
1836 * Read operation filled an entire page
1837 */
1838 m->valid = VM_PAGE_BITS_ALL;
1839 KASSERT(m->dirty == 0,
1840 ("fuse_getpages: page %p is dirty", m));
1841 } else if (size > toff) {
1842 /*
1843 * Read operation filled a partial page.
1844 */
1845 m->valid = 0;
1846 vm_page_set_valid_range(m, 0, size - toff);
1847 KASSERT(m->dirty == 0,
1848 ("fuse_getpages: page %p is dirty", m));
1849 } else {
1850 /*
1851 * Read operation was short. If no error occured
1852 * we may have hit a zero-fill section. We simply
1853 * leave valid set to 0.
1854 */
1855 ;
1856 }
1857 if (i != ap->a_reqpage) {
1858 /*
1859 * Whether or not to leave the page activated is up in
1860 * the air, but we should put the page on a page queue
1861 * somewhere (it already is in the object). Result:
1862 * It appears that emperical results show that
1863 * deactivating pages is best.
1864 */
1865
1866 /*
1867 * Just in case someone was asking for this page we
1868 * now tell them that it is ok to use.
1869 */
1870 if (!error) {
1871 if (m->oflags & VPO_WANTED) {
1872 fuse_vm_page_lock(m);
1873 vm_page_activate(m);
1874 fuse_vm_page_unlock(m);
1875 } else {
1876 fuse_vm_page_lock(m);
1877 vm_page_deactivate(m);
1878 fuse_vm_page_unlock(m);
1879 }
1880 vm_page_wakeup(m);
1881 } else {
1882 fuse_vm_page_lock(m);
1883 vm_page_free(m);
1884 fuse_vm_page_unlock(m);
1885 }
1886 }
1887 }
1888 fuse_vm_page_unlock_queues();
1827 fuse_vm_page_lock_queues();
1828 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
1829 vm_page_t m;
1830
1831 nextoff = toff + PAGE_SIZE;
1832 m = pages[i];
1833
1834 if (nextoff <= size) {
1835 /*
1836 * Read operation filled an entire page
1837 */
1838 m->valid = VM_PAGE_BITS_ALL;
1839 KASSERT(m->dirty == 0,
1840 ("fuse_getpages: page %p is dirty", m));
1841 } else if (size > toff) {
1842 /*
1843 * Read operation filled a partial page.
1844 */
1845 m->valid = 0;
1846 vm_page_set_valid_range(m, 0, size - toff);
1847 KASSERT(m->dirty == 0,
1848 ("fuse_getpages: page %p is dirty", m));
1849 } else {
1850 /*
1851 * Read operation was short. If no error occured
1852 * we may have hit a zero-fill section. We simply
1853 * leave valid set to 0.
1854 */
1855 ;
1856 }
1857 if (i != ap->a_reqpage) {
1858 /*
1859 * Whether or not to leave the page activated is up in
1860 * the air, but we should put the page on a page queue
1861 * somewhere (it already is in the object). Result:
1862 * It appears that emperical results show that
1863 * deactivating pages is best.
1864 */
1865
1866 /*
1867 * Just in case someone was asking for this page we
1868 * now tell them that it is ok to use.
1869 */
1870 if (!error) {
1871 if (m->oflags & VPO_WANTED) {
1872 fuse_vm_page_lock(m);
1873 vm_page_activate(m);
1874 fuse_vm_page_unlock(m);
1875 } else {
1876 fuse_vm_page_lock(m);
1877 vm_page_deactivate(m);
1878 fuse_vm_page_unlock(m);
1879 }
1880 vm_page_wakeup(m);
1881 } else {
1882 fuse_vm_page_lock(m);
1883 vm_page_free(m);
1884 fuse_vm_page_unlock(m);
1885 }
1886 }
1887 }
1888 fuse_vm_page_unlock_queues();
1889 VM_OBJECT_UNLOCK(vp->v_object);
1889 VM_OBJECT_WUNLOCK(vp->v_object);
1890 return 0;
1891}
1892
1893/*
1894 struct vnop_putpages_args {
1895 struct vnode *a_vp;
1896 vm_page_t *a_m;
1897 int a_count;
1898 int a_sync;
1899 int *a_rtvals;
1900 vm_ooffset_t a_offset;
1901 };
1902*/
1903static int
1904fuse_vnop_putpages(struct vop_putpages_args *ap)
1905{
1906 struct uio uio;
1907 struct iovec iov;
1908 vm_offset_t kva;
1909 struct buf *bp;
1910 int i, error, npages, count;
1911 off_t offset;
1912 int *rtvals;
1913 struct vnode *vp;
1914 struct thread *td;
1915 struct ucred *cred;
1916 vm_page_t *pages;
1917 vm_ooffset_t fsize;
1918
1919 FS_DEBUG2G("heh\n");
1920
1921 vp = ap->a_vp;
1922 KASSERT(vp->v_object, ("objectless vp passed to putpages"));
1923 fsize = vp->v_object->un_pager.vnp.vnp_size;
1924 td = curthread; /* XXX */
1925 cred = curthread->td_ucred; /* XXX */
1926 pages = ap->a_m;
1927 count = ap->a_count;
1928 rtvals = ap->a_rtvals;
1929 npages = btoc(count);
1930 offset = IDX_TO_OFF(pages[0]->pindex);
1931
1932 if (!fsess_opt_mmap(vnode_mount(vp))) {
1933 FS_DEBUG("called on non-cacheable vnode??\n");
1934 }
1935 for (i = 0; i < npages; i++)
1936 rtvals[i] = VM_PAGER_AGAIN;
1937
1938 /*
1939 * When putting pages, do not extend file past EOF.
1940 */
1941
1942 if (offset + count > fsize) {
1943 count = fsize - offset;
1944 if (count < 0)
1945 count = 0;
1946 }
1947 /*
1948 * We use only the kva address for the buffer, but this is extremely
1949 * convienient and fast.
1950 */
1951 bp = getpbuf(&fuse_pbuf_freecnt);
1952
1953 kva = (vm_offset_t)bp->b_data;
1954 pmap_qenter(kva, pages, npages);
1955 PCPU_INC(cnt.v_vnodeout);
1956 PCPU_ADD(cnt.v_vnodepgsout, count);
1957
1958 iov.iov_base = (caddr_t)kva;
1959 iov.iov_len = count;
1960 uio.uio_iov = &iov;
1961 uio.uio_iovcnt = 1;
1962 uio.uio_offset = offset;
1963 uio.uio_resid = count;
1964 uio.uio_segflg = UIO_SYSSPACE;
1965 uio.uio_rw = UIO_WRITE;
1966 uio.uio_td = td;
1967
1968 error = fuse_io_dispatch(vp, &uio, IO_DIRECT, cred);
1969
1970 pmap_qremove(kva, npages);
1971 relpbuf(bp, &fuse_pbuf_freecnt);
1972
1973 if (!error) {
1974 int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
1975
1976 for (i = 0; i < nwritten; i++) {
1977 rtvals[i] = VM_PAGER_OK;
1890 return 0;
1891}
1892
1893/*
1894 struct vnop_putpages_args {
1895 struct vnode *a_vp;
1896 vm_page_t *a_m;
1897 int a_count;
1898 int a_sync;
1899 int *a_rtvals;
1900 vm_ooffset_t a_offset;
1901 };
1902*/
1903static int
1904fuse_vnop_putpages(struct vop_putpages_args *ap)
1905{
1906 struct uio uio;
1907 struct iovec iov;
1908 vm_offset_t kva;
1909 struct buf *bp;
1910 int i, error, npages, count;
1911 off_t offset;
1912 int *rtvals;
1913 struct vnode *vp;
1914 struct thread *td;
1915 struct ucred *cred;
1916 vm_page_t *pages;
1917 vm_ooffset_t fsize;
1918
1919 FS_DEBUG2G("heh\n");
1920
1921 vp = ap->a_vp;
1922 KASSERT(vp->v_object, ("objectless vp passed to putpages"));
1923 fsize = vp->v_object->un_pager.vnp.vnp_size;
1924 td = curthread; /* XXX */
1925 cred = curthread->td_ucred; /* XXX */
1926 pages = ap->a_m;
1927 count = ap->a_count;
1928 rtvals = ap->a_rtvals;
1929 npages = btoc(count);
1930 offset = IDX_TO_OFF(pages[0]->pindex);
1931
1932 if (!fsess_opt_mmap(vnode_mount(vp))) {
1933 FS_DEBUG("called on non-cacheable vnode??\n");
1934 }
1935 for (i = 0; i < npages; i++)
1936 rtvals[i] = VM_PAGER_AGAIN;
1937
1938 /*
1939 * When putting pages, do not extend file past EOF.
1940 */
1941
1942 if (offset + count > fsize) {
1943 count = fsize - offset;
1944 if (count < 0)
1945 count = 0;
1946 }
1947 /*
1948 * We use only the kva address for the buffer, but this is extremely
1949 * convienient and fast.
1950 */
1951 bp = getpbuf(&fuse_pbuf_freecnt);
1952
1953 kva = (vm_offset_t)bp->b_data;
1954 pmap_qenter(kva, pages, npages);
1955 PCPU_INC(cnt.v_vnodeout);
1956 PCPU_ADD(cnt.v_vnodepgsout, count);
1957
1958 iov.iov_base = (caddr_t)kva;
1959 iov.iov_len = count;
1960 uio.uio_iov = &iov;
1961 uio.uio_iovcnt = 1;
1962 uio.uio_offset = offset;
1963 uio.uio_resid = count;
1964 uio.uio_segflg = UIO_SYSSPACE;
1965 uio.uio_rw = UIO_WRITE;
1966 uio.uio_td = td;
1967
1968 error = fuse_io_dispatch(vp, &uio, IO_DIRECT, cred);
1969
1970 pmap_qremove(kva, npages);
1971 relpbuf(bp, &fuse_pbuf_freecnt);
1972
1973 if (!error) {
1974 int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
1975
1976 for (i = 0; i < nwritten; i++) {
1977 rtvals[i] = VM_PAGER_OK;
1978 VM_OBJECT_LOCK(pages[i]->object);
1978 VM_OBJECT_WLOCK(pages[i]->object);
1979 vm_page_undirty(pages[i]);
1979 vm_page_undirty(pages[i]);
1980 VM_OBJECT_UNLOCK(pages[i]->object);
1980 VM_OBJECT_WUNLOCK(pages[i]->object);
1981 }
1982 }
1983 return rtvals[0];
1984}
1985
1986/*
1987 struct vnop_print_args {
1988 struct vnode *a_vp;
1989 };
1990*/
1991static int
1992fuse_vnop_print(struct vop_print_args *ap)
1993{
1994 struct fuse_vnode_data *fvdat = VTOFUD(ap->a_vp);
1995
1996 printf("nodeid: %ju, parent nodeid: %ju, nlookup: %ju, flag: %#x\n",
1997 (uintmax_t)VTOILLU(ap->a_vp), (uintmax_t)fvdat->parent_nid,
1998 (uintmax_t)fvdat->nlookup,
1999 fvdat->flag);
2000
2001 return 0;
2002}
1981 }
1982 }
1983 return rtvals[0];
1984}
1985
1986/*
1987 struct vnop_print_args {
1988 struct vnode *a_vp;
1989 };
1990*/
1991static int
1992fuse_vnop_print(struct vop_print_args *ap)
1993{
1994 struct fuse_vnode_data *fvdat = VTOFUD(ap->a_vp);
1995
1996 printf("nodeid: %ju, parent nodeid: %ju, nlookup: %ju, flag: %#x\n",
1997 (uintmax_t)VTOILLU(ap->a_vp), (uintmax_t)fvdat->parent_nid,
1998 (uintmax_t)fvdat->nlookup,
1999 fvdat->flag);
2000
2001 return 0;
2002}