1/* 2 * 3 * Coda: an Experimental Distributed File System 4 * Release 3.1 5 * 6 * Copyright (c) 1987-1998 Carnegie Mellon University 7 * All Rights Reserved 8 * 9 * Permission to use, copy, modify and distribute this software and its 10 * documentation is hereby granted, provided that both the copyright 11 * notice and this permission notice appear in all copies of the 12 * software, derivative works or modified versions, and any portions 13 * thereof, and that both notices appear in supporting documentation, and 14 * that credit is given to Carnegie Mellon University in all documents 15 * and publicity pertaining to direct or indirect use of this code or its 16 * derivatives. 17 * 18 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS, 19 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS 20 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON 21 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER 22 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF 23 * ANY DERIVATIVE WORK. 24 * 25 * Carnegie Mellon encourages users of this software to return any 26 * improvements or extensions that they make, and to grant Carnegie 27 * Mellon the rights to redistribute these changes without encumbrance. 28 * 29 * @(#) src/sys/cfs/cfs_subr.c,v 1.1.1.1 1998/08/29 21:14:52 rvb Exp $ 30 * $Id: cfs_subr.c,v 1.2 1998/09/02 19:09:53 rvb Exp $ 31 * 32 */ 33 34/* 35 * Mach Operating System 36 * Copyright (c) 1989 Carnegie-Mellon University 37 * All rights reserved. The CMU software License Agreement specifies 38 * the terms and conditions for use and redistribution. 39 */ 40 41/* 42 * This code was written for the Coda file system at Carnegie Mellon 43 * University. Contributers include David Steere, James Kistler, and 44 * M. Satyanarayanan. */ 45 46/* 47 * HISTORY 48 * $Log: cfs_subr.c,v $ 49 * Revision 1.2 1998/09/02 19:09:53 rvb 50 * Pass2 complete 51 * 52 * Revision 1.1.1.1 1998/08/29 21:14:52 rvb 53 * Very Preliminary Coda 54 * 55 * Revision 1.11 1998/08/28 18:12:18 rvb 56 * Now it also works on FreeBSD -current. This code will be 57 * committed to the FreeBSD -current and NetBSD -current 58 * trees. It will then be tailored to the particular platform 59 * by flushing conditional code. 60 * 61 * Revision 1.10 1998/08/18 17:05:16 rvb 62 * Don't use __RCSID now 63 * 64 * Revision 1.9 1998/08/18 16:31:41 rvb 65 * Sync the code for NetBSD -current; test on 1.3 later 66 * 67 * Revision 1.8 98/01/31 20:53:12 rvb 68 * First version that works on FreeBSD 2.2.5 69 * 70 * Revision 1.7 98/01/23 11:53:42 rvb
|
71 * Bring RVB_CFS1_1 to HEAD
|
71 * Bring RVB_CODA1_1 to HEAD |
72 * 73 * Revision 1.6.2.3 98/01/23 11:21:05 rvb 74 * Sync with 2.2.5 75 * 76 * Revision 1.6.2.2 97/12/16 12:40:06 rvb 77 * Sync with 1.3 78 * 79 * Revision 1.6.2.1 97/12/06 17:41:21 rvb 80 * Sync with peters coda.h 81 * 82 * Revision 1.6 97/12/05 10:39:17 rvb 83 * Read CHANGES 84 * 85 * Revision 1.5.4.8 97/11/26 15:28:58 rvb 86 * Cant make downcall pbuf == union cfs_downcalls yet 87 * 88 * Revision 1.5.4.7 97/11/20 11:46:42 rvb 89 * Capture current cfs_venus 90 * 91 * Revision 1.5.4.6 97/11/18 10:27:16 rvb 92 * cfs_nbsd.c is DEAD!!!; integrated into cfs_vf/vnops.c 93 * cfs_nb_foo and cfs_foo are joined 94 * 95 * Revision 1.5.4.5 97/11/13 22:03:00 rvb 96 * pass2 cfs_NetBSD.h mt 97 * 98 * Revision 1.5.4.4 97/11/12 12:09:39 rvb 99 * reorg pass1 100 * 101 * Revision 1.5.4.3 97/11/06 21:02:38 rvb 102 * first pass at ^c ^z 103 * 104 * Revision 1.5.4.2 97/10/29 16:06:27 rvb 105 * Kill DYING 106 * 107 * Revision 1.5.4.1 97/10/28 23:10:16 rvb 108 * >64Meg; venus can be killed! 109 * 110 * Revision 1.5 97/08/05 11:08:17 lily
|
111 * Removed cfsnc_replace, replaced it with a cfs_find, unhash, and
|
111 * Removed cfsnc_replace, replaced it with a coda_find, unhash, and |
112 * rehash. This fixes a cnode leak and a bug in which the fid is 113 * not actually replaced. (cfs_namecache.c, cfsnc.h, cfs_subr.c) 114 * 115 * Revision 1.4 96/12/12 22:10:59 bnoble 116 * Fixed the "downcall invokes venus operation" deadlock in all known cases. 117 * There may be more 118 * 119 * Revision 1.3 1996/12/05 16:20:15 bnoble 120 * Minor debugging aids 121 * 122 * Revision 1.2 1996/01/02 16:57:01 bnoble 123 * Added support for Coda MiniCache and raw inode calls (final commit) 124 * 125 * Revision 1.1.2.1 1995/12/20 01:57:27 bnoble
|
126 * Added CFS-specific files
|
126 * Added CODA-specific files |
127 * 128 * Revision 3.1.1.1 1995/03/04 19:07:59 bnoble 129 * Branch for NetBSD port revisions 130 * 131 * Revision 3.1 1995/03/04 19:07:58 bnoble 132 * Bump to major revision 3 to prepare for NetBSD port 133 * 134 * Revision 2.8 1995/03/03 17:00:04 dcs 135 * Fixed kernel bug involving sleep and upcalls. Basically if you killed 136 * a job waiting on venus, the venus upcall queues got trashed. Depending 137 * on luck, you could kill the kernel or not. 138 * (mods to cfs_subr.c and cfs_mach.d) 139 * 140 * Revision 2.7 95/03/02 22:45:21 dcs 141 * Sun4 compatibility 142 * 143 * Revision 2.6 95/02/17 16:25:17 dcs 144 * These versions represent several changes: 145 * 1. Allow venus to restart even if outstanding references exist. 146 * 2. Have only one ctlvp per client, as opposed to one per mounted cfs device.d 147 * 3. Allow ody_expand to return many members, not just one. 148 * 149 * Revision 2.5 94/11/09 15:56:26 dcs 150 * Had the thread sleeping on the wrong thing! 151 * 152 * Revision 2.4 94/10/14 09:57:57 dcs 153 * Made changes 'cause sun4s have braindead compilers 154 * 155 * Revision 2.3 94/10/12 16:46:26 dcs 156 * Cleaned kernel/venus interface by removing XDR junk, plus 157 * so cleanup to allow this code to be more easily ported. 158 * 159 * Revision 1.2 92/10/27 17:58:22 lily 160 * merge kernel/latest and alpha/src/cfs 161 * 162 * Revision 2.4 92/09/30 14:16:26 mja 163 * Incorporated Dave Steere's fix for the GNU-Emacs bug.
|
164 * Also, included his cfs_flush routine in place of the former cfsnc_flush.
|
164 * Also, included his coda_flush routine in place of the former coda_nc_flush. |
165 * [91/02/07 jjk] 166 * 167 * Added contributors blurb. 168 * [90/12/13 jjk] 169 * 170 * Hack to allow users to keep coda venus calls uninterruptible. THis 171 * basically prevents the Gnu-emacs bug from appearing, in which a call 172 * was being interrupted, and return EINTR, but gnu didn't check for the 173 * error and figured the file was buggered. 174 * [90/12/09 dcs] 175 * 176 * Revision 2.3 90/08/10 10:23:20 mrt 177 * Removed include of vm/vm_page.h as it no longer exists. 178 * [90/08/10 mrt] 179 * 180 * Revision 2.2 90/07/05 11:26:35 mrt 181 * Initialize name cache on first call to vcopen. 182 * [90/05/23 dcs] 183 * 184 * Created for the Coda File System. 185 * [90/05/23 dcs] 186 * 187 * Revision 1.5 90/05/31 17:01:35 dcs 188 * Prepare for merge with facilities kernel. 189 * 190 * Revision 1.2 90/03/19 15:56:25 dcs 191 * Initialize name cache on first call to vcopen. 192 * 193 * Revision 1.1 90/03/15 10:43:26 jjk 194 * Initial revision 195 * 196 */ 197 198/* NOTES: rvb
|
199 * 1. Added cfs_unmounting to mark all cnodes as being UNMOUNTING. This has to
|
199 * 1. Added coda_unmounting to mark all cnodes as being UNMOUNTING. This has to |
200 * be done before dounmount is called. Because some of the routines that
|
201 * dounmount calls before cfs_unmounted might try to force flushes to venus.
|
201 * dounmount calls before coda_unmounted might try to force flushes to venus. |
202 * The vnode pager does this.
|
203 * 2. cfs_unmounting marks all cnodes scanning cfs_cache.
|
203 * 2. coda_unmounting marks all cnodes scanning coda_cache. |
204 * 3. cfs_checkunmounting (under DEBUG) checks all cnodes by chasing the vnodes 205 * under the /coda mount point.
|
206 * 4. cfs_cacheprint (under DEBUG) prints names with vnode/cnode address
|
206 * 4. coda_cacheprint (under DEBUG) prints names with vnode/cnode address |
207 */ 208
|
209#include <vcfs.h>
|
209#include <vcoda.h> |
210 211#include <sys/param.h> 212#include <sys/systm.h> 213#include <sys/proc.h> 214#include <sys/malloc.h> 215#include <sys/select.h> 216#include <sys/mount.h> 217 218#include <cfs/coda.h> 219#include <cfs/cnode.h> 220#include <cfs/cfs_subr.h> 221#include <cfs/cfsnc.h> 222
|
223int cfs_active = 0;
224int cfs_reuse = 0;
225int cfs_new = 0;
|
223int coda_active = 0; 224int coda_reuse = 0; 225int coda_new = 0; |
226
|
227struct cnode *cfs_freelist = NULL;
228struct cnode *cfs_cache[CFS_CACHESIZE];
|
227struct cnode *coda_freelist = NULL; 228struct cnode *coda_cache[CODA_CACHESIZE]; |
229
|
230#define cfshash(fid) (((fid)->Volume + (fid)->Vnode) & (CFS_CACHESIZE-1))
|
230#define coda_hash(fid) (((fid)->Volume + (fid)->Vnode) & (CODA_CACHESIZE-1)) |
231#define CNODE_NEXT(cp) ((cp)->c_next) 232#define ODD(vnode) ((vnode) & 0x1) 233 234/* 235 * Allocate a cnode. 236 */ 237struct cnode *
|
238cfs_alloc(void)
|
238coda_alloc(void) |
239{ 240 struct cnode *cp; 241
|
242 if (cfs_freelist) {
243 cp = cfs_freelist;
244 cfs_freelist = CNODE_NEXT(cp);
245 cfs_reuse++;
|
242 if (coda_freelist) { 243 cp = coda_freelist; 244 coda_freelist = CNODE_NEXT(cp); 245 coda_reuse++; |
246 } 247 else {
|
248 CFS_ALLOC(cp, struct cnode *, sizeof(struct cnode));
|
248 CODA_ALLOC(cp, struct cnode *, sizeof(struct cnode)); |
249 /* NetBSD vnodes don't have any Pager info in them ('cause there are 250 no external pagers, duh!) */ 251#define VNODE_VM_INFO_INIT(vp) /* MT */ 252 VNODE_VM_INFO_INIT(CTOV(cp));
|
253 cfs_new++;
|
253 coda_new++; |
254 } 255 bzero(cp, sizeof (struct cnode)); 256 257 return(cp); 258} 259 260/* 261 * Deallocate a cnode. 262 */ 263void
|
264cfs_free(cp)
|
264coda_free(cp) |
265 register struct cnode *cp; 266{ 267
|
268 CNODE_NEXT(cp) = cfs_freelist;
269 cfs_freelist = cp;
|
268 CNODE_NEXT(cp) = coda_freelist; 269 coda_freelist = cp; |
270} 271 272/* 273 * Put a cnode in the hash table 274 */ 275void
|
276cfs_save(cp)
|
276coda_save(cp) |
277 struct cnode *cp; 278{
|
279 CNODE_NEXT(cp) = cfs_cache[cfshash(&cp->c_fid)];
280 cfs_cache[cfshash(&cp->c_fid)] = cp;
|
279 CNODE_NEXT(cp) = coda_cache[coda_hash(&cp->c_fid)]; 280 coda_cache[coda_hash(&cp->c_fid)] = cp; |
281} 282 283/* 284 * Remove a cnode from the hash table 285 */ 286void
|
287cfs_unsave(cp)
|
287coda_unsave(cp) |
288 struct cnode *cp; 289{ 290 struct cnode *ptr; 291 struct cnode *ptrprev = NULL; 292
|
293 ptr = cfs_cache[cfshash(&cp->c_fid)];
|
293 ptr = coda_cache[coda_hash(&cp->c_fid)]; |
294 while (ptr != NULL) { 295 if (ptr == cp) { 296 if (ptrprev == NULL) {
|
297 cfs_cache[cfshash(&cp->c_fid)]
|
297 coda_cache[coda_hash(&cp->c_fid)] |
298 = CNODE_NEXT(ptr); 299 } else { 300 CNODE_NEXT(ptrprev) = CNODE_NEXT(ptr); 301 } 302 CNODE_NEXT(cp) = (struct cnode *)NULL; 303 304 return; 305 } 306 ptrprev = ptr; 307 ptr = CNODE_NEXT(ptr); 308 } 309} 310 311/* 312 * Lookup a cnode by fid. If the cnode is dying, it is bogus so skip it. 313 * NOTE: this allows multiple cnodes with same fid -- dcs 1/25/95 314 */ 315struct cnode *
|
316cfs_find(fid)
|
316coda_find(fid) |
317 ViceFid *fid; 318{ 319 struct cnode *cp; 320
|
321 cp = cfs_cache[cfshash(fid)];
|
321 cp = coda_cache[coda_hash(fid)]; |
322 while (cp) { 323 if ((cp->c_fid.Vnode == fid->Vnode) && 324 (cp->c_fid.Volume == fid->Volume) && 325 (cp->c_fid.Unique == fid->Unique) && 326 (!IS_UNMOUNTING(cp))) 327 {
|
328 cfs_active++;
|
328 coda_active++; |
329 return(cp); 330 } 331 cp = CNODE_NEXT(cp); 332 } 333 return(NULL); 334} 335 336/*
|
337 * cfs_kill is called as a side effect to vcopen. To prevent any
|
337 * coda_kill is called as a side effect to vcopen. To prevent any |
338 * cnodes left around from an earlier run of a venus or warden from 339 * causing problems with the new instance, mark any outstanding cnodes 340 * as dying. Future operations on these cnodes should fail (excepting
|
341 * cfs_inactive of course!). Since multiple venii/wardens can be
|
341 * coda_inactive of course!). Since multiple venii/wardens can be |
342 * running, only kill the cnodes for a particular entry in the
|
343 * cfs_mnttbl. -- DCS 12/1/94 */
|
343 * coda_mnttbl. -- DCS 12/1/94 */ |
344 345int
|
346cfs_kill(whoIam, dcstat)
|
346coda_kill(whoIam, dcstat) |
347 struct mount *whoIam; 348 enum dc_status dcstat; 349{ 350 int hash, count = 0; 351 struct cnode *cp; 352 353 /* 354 * Algorithm is as follows: 355 * Second, flush whatever vnodes we can from the name cache. 356 * 357 * Finally, step through whatever is left and mark them dying. 358 * This prevents any operation at all. 359 */ 360 361 /* This is slightly overkill, but should work. Eventually it'd be 362 * nice to only flush those entries from the namecache that 363 * reference a vnode in this vfs. */
|
364 cfsnc_flush(dcstat);
|
364 coda_nc_flush(dcstat); |
365
|
366 for (hash = 0; hash < CFS_CACHESIZE; hash++) {
367 for (cp = cfs_cache[hash]; cp != NULL; cp = CNODE_NEXT(cp)) {
|
366 for (hash = 0; hash < CODA_CACHESIZE; hash++) { 367 for (cp = coda_cache[hash]; cp != NULL; cp = CNODE_NEXT(cp)) { |
368 if (CTOV(cp)->v_mount == whoIam) { 369#ifdef DEBUG
|
370 printf("cfs_kill: vp %p, cp %p\n", CTOV(cp), cp);
|
370 printf("coda_kill: vp %p, cp %p\n", CTOV(cp), cp); |
371#endif 372 count++;
|
373 CFSDEBUG(CFS_FLUSH,
|
373 CODADEBUG(CODA_FLUSH, |
374 myprintf(("Live cnode fid %lx.%lx.%lx flags %d count %d\n", 375 (cp->c_fid).Volume, 376 (cp->c_fid).Vnode, 377 (cp->c_fid).Unique, 378 cp->c_flags, 379 CTOV(cp)->v_usecount)); ); 380 } 381 } 382 } 383 return count; 384} 385 386/* 387 * There are two reasons why a cnode may be in use, it may be in the 388 * name cache or it may be executing. 389 */ 390void
|
391cfs_flush(dcstat)
|
391coda_flush(dcstat) |
392 enum dc_status dcstat; 393{ 394 int hash; 395 struct cnode *cp; 396
|
397 cfs_clstat.ncalls++;
398 cfs_clstat.reqs[CFS_FLUSH]++;
|
397 coda_clstat.ncalls++; 398 coda_clstat.reqs[CODA_FLUSH]++; |
399
|
400 cfsnc_flush(dcstat); /* flush files from the name cache */
|
400 coda_nc_flush(dcstat); /* flush files from the name cache */ |
401
|
402 for (hash = 0; hash < CFS_CACHESIZE; hash++) {
403 for (cp = cfs_cache[hash]; cp != NULL; cp = CNODE_NEXT(cp)) {
|
402 for (hash = 0; hash < CODA_CACHESIZE; hash++) { 403 for (cp = coda_cache[hash]; cp != NULL; cp = CNODE_NEXT(cp)) { |
404 if (!ODD(cp->c_fid.Vnode)) /* only files can be executed */
|
405 cfs_vmflush(cp);
|
405 coda_vmflush(cp); |
406 } 407 } 408} 409 410/* 411 * As a debugging measure, print out any cnodes that lived through a 412 * name cache flush. 413 */ 414void
|
415cfs_testflush(void)
|
415coda_testflush(void) |
416{ 417 int hash; 418 struct cnode *cp; 419
|
420 for (hash = 0; hash < CFS_CACHESIZE; hash++) {
421 for (cp = cfs_cache[hash];
|
420 for (hash = 0; hash < CODA_CACHESIZE; hash++) { 421 for (cp = coda_cache[hash]; |
422 cp != NULL; 423 cp = CNODE_NEXT(cp)) { 424 myprintf(("Live cnode fid %lx.%lx.%lx count %d\n", 425 (cp->c_fid).Volume,(cp->c_fid).Vnode, 426 (cp->c_fid).Unique, CTOV(cp)->v_usecount)); 427 } 428 } 429} 430 431/* 432 * First, step through all cnodes and mark them unmounting. 433 * NetBSD kernels may try to fsync them now that venus 434 * is dead, which would be a bad thing. 435 * 436 */ 437void
|
438cfs_unmounting(whoIam)
|
438coda_unmounting(whoIam) |
439 struct mount *whoIam; 440{ 441 int hash; 442 struct cnode *cp; 443
|
444 for (hash = 0; hash < CFS_CACHESIZE; hash++) {
445 for (cp = cfs_cache[hash]; cp != NULL; cp = CNODE_NEXT(cp)) {
|
444 for (hash = 0; hash < CODA_CACHESIZE; hash++) { 445 for (cp = coda_cache[hash]; cp != NULL; cp = CNODE_NEXT(cp)) { |
446 if (CTOV(cp)->v_mount == whoIam) { 447 if (cp->c_flags & (C_LOCKED|C_WANTED)) {
|
448 printf("cfs_unmounting: Unlocking %p\n", cp);
|
448 printf("coda_unmounting: Unlocking %p\n", cp); |
449 cp->c_flags &= ~(C_LOCKED|C_WANTED); 450 wakeup((caddr_t) cp); 451 } 452 cp->c_flags |= C_UNMOUNTING; 453 } 454 } 455 } 456} 457 458#ifdef DEBUG
|
459void
460cfs_checkunmounting(mp)
|
459coda_checkunmounting(mp) |
460 struct mount *mp; 461{ 462 register struct vnode *vp, *nvp; 463 struct cnode *cp; 464 int count = 0, bad = 0; 465loop: 466 for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) { 467 if (vp->v_mount != mp) 468 goto loop; 469 nvp = vp->v_mntvnodes.le_next; 470 cp = VTOC(vp); 471 count++; 472 if (!(cp->c_flags & C_UNMOUNTING)) { 473 bad++; 474 printf("vp %p, cp %p missed\n", vp, cp); 475 cp->c_flags |= C_UNMOUNTING; 476 } 477 } 478} 479
|
481void
482cfs_cacheprint(whoIam)
|
480int 481coda_cacheprint(whoIam) |
482 struct mount *whoIam; 483{ 484 int hash; 485 struct cnode *cp; 486 int count = 0; 487
|
489 printf("cfs_cacheprint: cfs_ctlvp %p, cp %p", cfs_ctlvp, VTOC(cfs_ctlvp));
490 cfsnc_name(VTOC(cfs_ctlvp));
|
488 printf("coda_cacheprint: coda_ctlvp %p, cp %p", coda_ctlvp, VTOC(coda_ctlvp)); 489 coda_nc_name(coda_ctlvp); |
490 printf("\n"); 491
|
493 for (hash = 0; hash < CFS_CACHESIZE; hash++) {
494 for (cp = cfs_cache[hash]; cp != NULL; cp = CNODE_NEXT(cp)) {
|
492 for (hash = 0; hash < CODA_CACHESIZE; hash++) { 493 for (cp = coda_cache[hash]; cp != NULL; cp = CNODE_NEXT(cp)) { |
494 if (CTOV(cp)->v_mount == whoIam) {
|
496 printf("cfs_cacheprint: vp %p, cp %p", CTOV(cp), cp);
497 cfsnc_name(cp);
|
495 printf("coda_cacheprint: vp %p, cp %p", CTOV(cp), cp); 496 coda_nc_name(cp); |
497 printf("\n"); 498 count++; 499 } 500 } 501 }
|
503 printf("cfs_cacheprint: count %d\n", count);
|
502 printf("coda_cacheprint: count %d\n", count); |
503} 504#endif 505 506/* 507 * There are 6 cases where invalidations occur. The semantics of each 508 * is listed here. 509 *
|
511 * CFS_FLUSH -- flush all entries from the name cache and the cnode cache.
512 * CFS_PURGEUSER -- flush all entries from the name cache for a specific user
|
510 * CODA_FLUSH -- flush all entries from the name cache and the cnode cache. 511 * CODA_PURGEUSER -- flush all entries from the name cache for a specific user |
512 * This call is a result of token expiration. 513 * 514 * The next two are the result of callbacks on a file or directory.
|
516 * CFS_ZAPDIR -- flush the attributes for the dir from its cnode.
|
515 * CODA_ZAPDIR -- flush the attributes for the dir from its cnode. |
516 * Zap all children of this directory from the namecache.
|
518 * CFS_ZAPFILE -- flush the attributes for a file.
|
517 * CODA_ZAPFILE -- flush the attributes for a file. |
518 * 519 * The fifth is a result of Venus detecting an inconsistent file.
|
521 * CFS_PURGEFID -- flush the attribute for the file
|
520 * CODA_PURGEFID -- flush the attribute for the file |
521 * If it is a dir (odd vnode), purge its 522 * children from the namecache 523 * remove the file from the namecache. 524 * 525 * The sixth allows Venus to replace local fids with global ones 526 * during reintegration. 527 *
|
529 * CFS_REPLACE -- replace one ViceFid with another throughout the name cache
|
528 * CODA_REPLACE -- replace one ViceFid with another throughout the name cache |
529 */ 530 531int handleDownCall(opcode, out) 532 int opcode; union outputArgs *out; 533{ 534 int error; 535 536 /* Handle invalidate requests. */ 537 switch (opcode) {
|
539 case CFS_FLUSH : {
|
538 case CODA_FLUSH : { |
539
|
541 cfs_flush(IS_DOWNCALL);
|
540 coda_flush(IS_DOWNCALL); |
541
|
543 CFSDEBUG(CFS_FLUSH,cfs_testflush();) /* print remaining cnodes */
|
542 CODADEBUG(CODA_FLUSH,coda_testflush();) /* print remaining cnodes */ |
543 return(0); 544 } 545
|
547 case CFS_PURGEUSER : {
548 cfs_clstat.ncalls++;
549 cfs_clstat.reqs[CFS_PURGEUSER]++;
|
546 case CODA_PURGEUSER : { 547 coda_clstat.ncalls++; 548 coda_clstat.reqs[CODA_PURGEUSER]++; |
549 550 /* XXX - need to prevent fsync's */
|
552 cfsnc_purge_user(out->cfs_purgeuser.cred.cr_uid, IS_DOWNCALL);
|
551 coda_nc_purge_user(out->coda_purgeuser.cred.cr_uid, IS_DOWNCALL); |
552 return(0); 553 } 554
|
556 case CFS_ZAPFILE : {
|
555 case CODA_ZAPFILE : { |
556 struct cnode *cp; 557 558 error = 0;
|
560 cfs_clstat.ncalls++;
561 cfs_clstat.reqs[CFS_ZAPFILE]++;
|
559 coda_clstat.ncalls++; 560 coda_clstat.reqs[CODA_ZAPFILE]++; |
561
|
563 cp = cfs_find(&out->cfs_zapfile.CodaFid);
|
562 cp = coda_find(&out->coda_zapfile.CodaFid); |
563 if (cp != NULL) { 564 vref(CTOV(cp)); 565 566 cp->c_flags &= ~C_VATTR; 567 if (CTOV(cp)->v_flag & VTEXT)
|
569 error = cfs_vmflush(cp);
570 CFSDEBUG(CFS_ZAPFILE, myprintf(("zapfile: fid = (%lx.%lx.%lx),
|
568 error = coda_vmflush(cp); 569 CODADEBUG(CODA_ZAPFILE, myprintf(("zapfile: fid = (%lx.%lx.%lx), |
570 refcnt = %d, error = %d\n", 571 cp->c_fid.Volume, 572 cp->c_fid.Vnode, 573 cp->c_fid.Unique, 574 CTOV(cp)->v_usecount - 1, error));); 575 if (CTOV(cp)->v_usecount == 1) { 576 cp->c_flags |= C_PURGING; 577 } 578 vrele(CTOV(cp)); 579 } 580 581 return(error); 582 } 583
|
585 case CFS_ZAPDIR : {
|
584 case CODA_ZAPDIR : { |
585 struct cnode *cp; 586
|
588 cfs_clstat.ncalls++;
589 cfs_clstat.reqs[CFS_ZAPDIR]++;
|
587 coda_clstat.ncalls++; 588 coda_clstat.reqs[CODA_ZAPDIR]++; |
589
|
591 cp = cfs_find(&out->cfs_zapdir.CodaFid);
|
590 cp = coda_find(&out->coda_zapdir.CodaFid); |
591 if (cp != NULL) { 592 vref(CTOV(cp)); 593 594 cp->c_flags &= ~C_VATTR;
|
596 cfsnc_zapParentfid(&out->cfs_zapdir.CodaFid, IS_DOWNCALL);
|
595 coda_nc_zapParentfid(&out->coda_zapdir.CodaFid, IS_DOWNCALL); |
596
|
598 CFSDEBUG(CFS_ZAPDIR, myprintf(("zapdir: fid = (%lx.%lx.%lx),
|
597 CODADEBUG(CODA_ZAPDIR, myprintf(("zapdir: fid = (%lx.%lx.%lx), |
598 refcnt = %d\n",cp->c_fid.Volume, 599 cp->c_fid.Vnode, 600 cp->c_fid.Unique, 601 CTOV(cp)->v_usecount - 1));); 602 if (CTOV(cp)->v_usecount == 1) { 603 cp->c_flags |= C_PURGING; 604 } 605 vrele(CTOV(cp)); 606 } 607 608 return(0); 609 } 610
|
612 case CFS_ZAPVNODE : {
613 cfs_clstat.ncalls++;
614 cfs_clstat.reqs[CFS_ZAPVNODE]++;
|
611 case CODA_ZAPVNODE : { 612 coda_clstat.ncalls++; 613 coda_clstat.reqs[CODA_ZAPVNODE]++; |
614
|
616 myprintf(("CFS_ZAPVNODE: Called, but uniplemented\n"));
|
615 myprintf(("CODA_ZAPVNODE: Called, but uniplemented\n")); |
616 /* 617 * Not that below we must really translate the returned coda_cred to 618 * a netbsd cred. This is a bit muddled at present and the cfsnc_zapnode 619 * is further unimplemented, so punt! 620 * I suppose we could use just the uid. 621 */
|
623 /* cfsnc_zapvnode(&out->cfs_zapvnode.VFid, &out->cfs_zapvnode.cred,
|
622 /* coda_nc_zapvnode(&out->coda_zapvnode.VFid, &out->coda_zapvnode.cred, |
623 IS_DOWNCALL); */ 624 return(0); 625 } 626
|
628 case CFS_PURGEFID : {
|
627 case CODA_PURGEFID : { |
628 struct cnode *cp; 629 630 error = 0;
|
632 cfs_clstat.ncalls++;
633 cfs_clstat.reqs[CFS_PURGEFID]++;
|
631 coda_clstat.ncalls++; 632 coda_clstat.reqs[CODA_PURGEFID]++; |
633
|
635 cp = cfs_find(&out->cfs_purgefid.CodaFid);
|
634 cp = coda_find(&out->coda_purgefid.CodaFid); |
635 if (cp != NULL) { 636 vref(CTOV(cp));
|
638 if (ODD(out->cfs_purgefid.CodaFid.Vnode)) { /* Vnode is a directory */
639 cfsnc_zapParentfid(&out->cfs_purgefid.CodaFid,
|
637 if (ODD(out->coda_purgefid.CodaFid.Vnode)) { /* Vnode is a directory */ 638 coda_nc_zapParentfid(&out->coda_purgefid.CodaFid, |
639 IS_DOWNCALL); 640 } 641 cp->c_flags &= ~C_VATTR;
|
643 cfsnc_zapfid(&out->cfs_purgefid.CodaFid, IS_DOWNCALL);
644 if (!(ODD(out->cfs_purgefid.CodaFid.Vnode))
|
642 coda_nc_zapfid(&out->coda_purgefid.CodaFid, IS_DOWNCALL); 643 if (!(ODD(out->coda_purgefid.CodaFid.Vnode)) |
644 && (CTOV(cp)->v_flag & VTEXT)) { 645
|
647 error = cfs_vmflush(cp);
|
646 error = coda_vmflush(cp); |
647 }
|
649 CFSDEBUG(CFS_PURGEFID, myprintf(("purgefid: fid = (%lx.%lx.%lx), refcnt = %d, error = %d\n",
|
648 CODADEBUG(CODA_PURGEFID, myprintf(("purgefid: fid = (%lx.%lx.%lx), refcnt = %d, error = %d\n", |
649 cp->c_fid.Volume, cp->c_fid.Vnode, 650 cp->c_fid.Unique, 651 CTOV(cp)->v_usecount - 1, error));); 652 if (CTOV(cp)->v_usecount == 1) { 653 cp->c_flags |= C_PURGING; 654 } 655 vrele(CTOV(cp)); 656 } 657 return(error); 658 } 659
|
661 case CFS_REPLACE : {
|
660 case CODA_REPLACE : { |
661 struct cnode *cp = NULL; 662
|
664 cfs_clstat.ncalls++;
665 cfs_clstat.reqs[CFS_REPLACE]++;
|
663 coda_clstat.ncalls++; 664 coda_clstat.reqs[CODA_REPLACE]++; |
665
|
667 cp = cfs_find(&out->cfs_replace.OldFid);
|
666 cp = coda_find(&out->coda_replace.OldFid); |
667 if (cp != NULL) { 668 /* remove the cnode from the hash table, replace the fid, and reinsert */ 669 vref(CTOV(cp));
|
671 cfs_unsave(cp);
672 cp->c_fid = out->cfs_replace.NewFid;
673 cfs_save(cp);
|
670 coda_unsave(cp); 671 cp->c_fid = out->coda_replace.NewFid; 672 coda_save(cp); |
673
|
675 CFSDEBUG(CFS_REPLACE, myprintf(("replace: oldfid = (%lx.%lx.%lx), newfid = (%lx.%lx.%lx), cp = %p\n",
676 out->cfs_replace.OldFid.Volume,
677 out->cfs_replace.OldFid.Vnode,
678 out->cfs_replace.OldFid.Unique,
|
674 CODADEBUG(CODA_REPLACE, myprintf(("replace: oldfid = (%lx.%lx.%lx), newfid = (%lx.%lx.%lx), cp = %p\n", 675 out->coda_replace.OldFid.Volume, 676 out->coda_replace.OldFid.Vnode, 677 out->coda_replace.OldFid.Unique, |
678 cp->c_fid.Volume, cp->c_fid.Vnode, 679 cp->c_fid.Unique, cp));) 680 vrele(CTOV(cp)); 681 } 682 return (0); 683 } 684 default: 685 myprintf(("handleDownCall: unknown opcode %d\n", opcode)); 686 return (EINVAL); 687 } 688} 689
|
691/* cfs_grab_vnode: lives in either cfs_mach.c or cfs_nbsd.c */
|
690/* coda_grab_vnode: lives in either cfs_mach.c or cfs_nbsd.c */ |
691 692int
|
694cfs_vmflush(cp)
|
693coda_vmflush(cp) |
694 struct cnode *cp; 695{ 696 return 0; 697} 698 699 700/* 701 * kernel-internal debugging switches 702 */
|
704void cfs_debugon(void)
|
703void coda_debugon(void) |
704{
|
706 cfsdebug = -1;
707 cfsnc_debug = -1;
708 cfs_vnop_print_entry = 1;
709 cfs_psdev_print_entry = 1;
710 cfs_vfsop_print_entry = 1;
|
705 codadebug = -1; 706 coda_nc_debug = -1; 707 coda_vnop_print_entry = 1; 708 coda_psdev_print_entry = 1; 709 coda_vfsop_print_entry = 1; |
710} 711
|
713void cfs_debugoff(void)
|
712void coda_debugoff(void) |
713{
|
715 cfsdebug = 0;
716 cfsnc_debug = 0;
717 cfs_vnop_print_entry = 0;
718 cfs_psdev_print_entry = 0;
719 cfs_vfsop_print_entry = 0;
|
714 codadebug = 0; 715 coda_nc_debug = 0; 716 coda_vnop_print_entry = 0; 717 coda_psdev_print_entry = 0; 718 coda_vfsop_print_entry = 0; |
719} 720 721/* 722 * Utilities used by both client and server 723 * Standard levels: 724 * 0) no debugging 725 * 1) hard failures 726 * 2) soft failures 727 * 3) current test software 728 * 4) main procedure entry points 729 * 5) main procedure exit points 730 * 6) utility procedure entry points 731 * 7) utility procedure exit points 732 * 8) obscure procedure entry points 733 * 9) obscure procedure exit points 734 * 10) random stuff 735 * 11) all <= 1 736 * 12) all <= 2 737 * 13) all <= 3 738 * ... 739 */
|