1/*
2 * Copyright (c) 1997-2006 Erez Zadok
3 * Copyright (c) 1990 Jan-Simon Pendry
4 * Copyright (c) 1990 Imperial College of Science, Technology & Medicine
5 * Copyright (c) 1990 The Regents of the University of California.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * Jan-Simon Pendry at Imperial College, London.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgment:
21 *      This product includes software developed by the University of
22 *      California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 *    may be used to endorse or promote products derived from this software
25 *    without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 *
40 * File: am-utils/amd/nfs_subr.c
41 *
42 */
43
44#ifdef HAVE_CONFIG_H
45# include <config.h>
46#endif /* HAVE_CONFIG_H */
47#include <am_defs.h>
48#include <amd.h>
49
50/*
51 * Convert from UN*X to NFS error code.
52 * Some systems like linux define their own (see
53 * conf/mount/mount_linux.h).
54 */
55#ifndef nfs_error
56# define nfs_error(e) ((nfsstat)(e))
57#endif /* nfs_error */
58
59/*
60 * File Handle structure
61 *
62 * This is interpreted by indexing the exported array
63 * by fhh_id (for old-style filehandles), or by retrieving
64 * the node name from fhh_path (for new-style filehandles).
65 *
66 * The whole structure is mapped onto a standard fhandle_t
67 * when transmitted.
68 */
69struct am_fh {
70  u_int fhh_gen;				/* generation number */
71  union {
72    struct {
73      int fhh_type;				/* old or new am_fh */
74      pid_t fhh_pid;				/* process id */
75      int fhh_id;				/* map id */
76    } s;
77    char fhh_path[NFS_FHSIZE-sizeof(u_int)];	/* path to am_node */
78  } u;
79};
80
81
82/* forward declarations */
83/* converting am-filehandles to mount-points */
84static am_node *fh_to_mp3(am_nfs_fh *fhp, int *rp, int vop);
85static am_node *fh_to_mp(am_nfs_fh *fhp);
86static void count_map_entries(const am_node *mp, u_int *out_blocks, u_int *out_bfree, u_int *out_bavail);
87
88
89static char *
90do_readlink(am_node *mp, int *error_return)
91{
92  char *ln;
93
94  /*
95   * If there is a readlink method then use it,
96   * otherwise if a link exists use that,
97   * otherwise use the mount point.
98   */
99  if (mp->am_mnt->mf_ops->readlink) {
100    int retry = 0;
101    mp = (*mp->am_mnt->mf_ops->readlink) (mp, &retry);
102    if (mp == 0) {
103      *error_return = retry;
104      return 0;
105    }
106    /* reschedule_timeout_mp(); */
107  }
108
109  if (mp->am_link) {
110    ln = mp->am_link;
111  } else {
112    ln = mp->am_mnt->mf_mount;
113  }
114
115  return ln;
116}
117
118
119voidp
120nfsproc_null_2_svc(voidp argp, struct svc_req *rqstp)
121{
122  static char res;
123
124  return (voidp) &res;
125}
126
127
128nfsattrstat *
129nfsproc_getattr_2_svc(am_nfs_fh *argp, struct svc_req *rqstp)
130{
131  static nfsattrstat res;
132  am_node *mp;
133  int retry;
134  time_t now = clocktime(NULL);
135
136  if (amuDebug(D_TRACE))
137    plog(XLOG_DEBUG, "getattr:");
138
139  mp = fh_to_mp3(argp, &retry, VLOOK_CREATE);
140  if (mp == 0) {
141    if (amuDebug(D_TRACE))
142      plog(XLOG_DEBUG, "\tretry=%d", retry);
143
144    if (retry < 0) {
145      amd_stats.d_drops++;
146      return 0;
147    }
148    res.ns_status = nfs_error(retry);
149    return &res;
150  }
151
152  res = mp->am_attr;
153  if (amuDebug(D_TRACE))
154    plog(XLOG_DEBUG, "\tstat(%s), size = %d, mtime=%ld.%ld",
155	 mp->am_path,
156	 (int) res.ns_u.ns_attr_u.na_size,
157	 (long) res.ns_u.ns_attr_u.na_mtime.nt_seconds,
158	 (long) res.ns_u.ns_attr_u.na_mtime.nt_useconds);
159
160  /* Delay unmount of what was looked up */
161  if (mp->am_timeo_w < 4 * gopt.am_timeo_w)
162    mp->am_timeo_w += gopt.am_timeo_w;
163  mp->am_ttl = now + mp->am_timeo_w;
164
165  mp->am_stats.s_getattr++;
166  return &res;
167}
168
169
170nfsattrstat *
171nfsproc_setattr_2_svc(nfssattrargs *argp, struct svc_req *rqstp)
172{
173  static nfsattrstat res;
174
175  if (!fh_to_mp(&argp->sag_fhandle))
176    res.ns_status = nfs_error(ESTALE);
177  else
178    res.ns_status = nfs_error(EROFS);
179
180  return &res;
181}
182
183
184voidp
185nfsproc_root_2_svc(voidp argp, struct svc_req *rqstp)
186{
187  static char res;
188
189  return (voidp) &res;
190}
191
192
193nfsdiropres *
194nfsproc_lookup_2_svc(nfsdiropargs *argp, struct svc_req *rqstp)
195{
196  static nfsdiropres res;
197  am_node *mp;
198  int retry;
199  uid_t uid;
200  gid_t gid;
201
202  if (amuDebug(D_TRACE))
203    plog(XLOG_DEBUG, "lookup:");
204
205  /* finally, find the effective uid/gid from RPC request */
206  if (getcreds(rqstp, &uid, &gid, nfsxprt) < 0)
207    plog(XLOG_ERROR, "cannot get uid/gid from RPC credentials");
208  xsnprintf(opt_uid, sizeof(uid_str), "%d", (int) uid);
209  xsnprintf(opt_gid, sizeof(gid_str), "%d", (int) gid);
210
211  mp = fh_to_mp3(&argp->da_fhandle, &retry, VLOOK_CREATE);
212  if (mp == 0) {
213    if (retry < 0) {
214      amd_stats.d_drops++;
215      return 0;
216    }
217    res.dr_status = nfs_error(retry);
218  } else {
219    int error;
220    am_node *ap;
221    if (amuDebug(D_TRACE))
222      plog(XLOG_DEBUG, "\tlookup(%s, %s)", mp->am_path, argp->da_name);
223    ap = mp->am_mnt->mf_ops->lookup_child(mp, argp->da_name, &error, VLOOK_CREATE);
224    if (ap && error < 0)
225      ap = mp->am_mnt->mf_ops->mount_child(ap, &error);
226    if (ap == 0) {
227      if (error < 0) {
228	amd_stats.d_drops++;
229	return 0;
230      }
231      res.dr_status = nfs_error(error);
232    } else {
233      /*
234       * XXX: EXPERIMENTAL! Delay unmount of what was looked up.  This
235       * should reduce the chance for race condition between unmounting an
236       * entry synchronously, and re-mounting it asynchronously.
237       */
238      if (ap->am_ttl < mp->am_ttl)
239 	ap->am_ttl = mp->am_ttl;
240      mp_to_fh(ap, &res.dr_u.dr_drok_u.drok_fhandle);
241      res.dr_u.dr_drok_u.drok_attributes = ap->am_fattr;
242      res.dr_status = NFS_OK;
243    }
244    mp->am_stats.s_lookup++;
245    /* reschedule_timeout_mp(); */
246  }
247
248  return &res;
249}
250
251
252void
253nfs_quick_reply(am_node *mp, int error)
254{
255  SVCXPRT *transp = mp->am_transp;
256  nfsdiropres res;
257  xdrproc_t xdr_result = (xdrproc_t) xdr_diropres;
258
259  /*
260   * If there's a transp structure then we can reply to the client's
261   * nfs lookup request.
262   */
263  if (transp) {
264    if (error == 0) {
265      /*
266       * Construct a valid reply to a lookup request.  Same
267       * code as in nfsproc_lookup_2_svc() above.
268       */
269      mp_to_fh(mp, &res.dr_u.dr_drok_u.drok_fhandle);
270      res.dr_u.dr_drok_u.drok_attributes = mp->am_fattr;
271      res.dr_status = NFS_OK;
272    } else
273      /*
274       * Return the error that was passed to us.
275       */
276      res.dr_status = nfs_error(error);
277
278    /*
279     * Send off our reply
280     */
281    if (!svc_sendreply(transp, (XDRPROC_T_TYPE) xdr_result, (SVC_IN_ARG_TYPE) & res))
282      svcerr_systemerr(transp);
283
284    /*
285     * Free up transp.  It's only used for one reply.
286     */
287    XFREE(mp->am_transp);
288    dlog("Quick reply sent for %s", mp->am_mnt->mf_mount);
289  }
290}
291
292
293nfsreadlinkres *
294nfsproc_readlink_2_svc(am_nfs_fh *argp, struct svc_req *rqstp)
295{
296  static nfsreadlinkres res;
297  am_node *mp;
298  int retry;
299
300  if (amuDebug(D_TRACE))
301    plog(XLOG_DEBUG, "readlink:");
302
303  mp = fh_to_mp3(argp, &retry, VLOOK_CREATE);
304  if (mp == 0) {
305  readlink_retry:
306    if (retry < 0) {
307      amd_stats.d_drops++;
308      return 0;
309    }
310    res.rlr_status = nfs_error(retry);
311  } else {
312    char *ln = do_readlink(mp, &retry);
313    if (ln == 0)
314      goto readlink_retry;
315    res.rlr_status = NFS_OK;
316    if (amuDebug(D_TRACE) && ln)
317      plog(XLOG_DEBUG, "\treadlink(%s) = %s", mp->am_path, ln);
318    res.rlr_u.rlr_data_u = ln;
319    mp->am_stats.s_readlink++;
320  }
321
322  return &res;
323}
324
325
326nfsreadres *
327nfsproc_read_2_svc(nfsreadargs *argp, struct svc_req *rqstp)
328{
329  static nfsreadres res;
330
331  memset((char *) &res, 0, sizeof(res));
332  res.rr_status = nfs_error(EACCES);
333
334  return &res;
335}
336
337
338voidp
339nfsproc_writecache_2_svc(voidp argp, struct svc_req *rqstp)
340{
341  static char res;
342
343  return (voidp) &res;
344}
345
346
347nfsattrstat *
348nfsproc_write_2_svc(nfswriteargs *argp, struct svc_req *rqstp)
349{
350  static nfsattrstat res;
351
352  if (!fh_to_mp(&argp->wra_fhandle))
353    res.ns_status = nfs_error(ESTALE);
354  else
355    res.ns_status = nfs_error(EROFS);
356
357  return &res;
358}
359
360
361nfsdiropres *
362nfsproc_create_2_svc(nfscreateargs *argp, struct svc_req *rqstp)
363{
364  static nfsdiropres res;
365
366  if (!fh_to_mp(&argp->ca_where.da_fhandle))
367    res.dr_status = nfs_error(ESTALE);
368  else
369    res.dr_status = nfs_error(EROFS);
370
371  return &res;
372}
373
374
375static nfsstat *
376unlink_or_rmdir(nfsdiropargs *argp, struct svc_req *rqstp, int unlinkp)
377{
378  static nfsstat res;
379  int retry;
380
381  am_node *mp = fh_to_mp3(&argp->da_fhandle, &retry, VLOOK_DELETE);
382  if (mp == 0) {
383    if (retry < 0) {
384      amd_stats.d_drops++;
385      return 0;
386    }
387    res = nfs_error(retry);
388    goto out;
389  }
390
391  if (mp->am_fattr.na_type != NFDIR) {
392    res = nfs_error(ENOTDIR);
393    goto out;
394  }
395
396  if (amuDebug(D_TRACE))
397    plog(XLOG_DEBUG, "\tremove(%s, %s)", mp->am_path, argp->da_name);
398
399  mp = mp->am_mnt->mf_ops->lookup_child(mp, argp->da_name, &retry, VLOOK_DELETE);
400  if (mp == 0) {
401    /*
402     * Ignore retries...
403     */
404    if (retry < 0)
405      retry = 0;
406    /*
407     * Usual NFS workaround...
408     */
409    else if (retry == ENOENT)
410      retry = 0;
411    res = nfs_error(retry);
412  } else {
413    forcibly_timeout_mp(mp);
414    res = NFS_OK;
415  }
416
417out:
418  return &res;
419}
420
421
422nfsstat *
423nfsproc_remove_2_svc(nfsdiropargs *argp, struct svc_req *rqstp)
424{
425  return unlink_or_rmdir(argp, rqstp, TRUE);
426}
427
428
429nfsstat *
430nfsproc_rename_2_svc(nfsrenameargs *argp, struct svc_req *rqstp)
431{
432  static nfsstat res;
433
434  if (!fh_to_mp(&argp->rna_from.da_fhandle) || !fh_to_mp(&argp->rna_to.da_fhandle))
435    res = nfs_error(ESTALE);
436  /*
437   * If the kernel is doing clever things with referenced files
438   * then let it pretend...
439   */
440  else if (NSTREQ(argp->rna_to.da_name, ".nfs", 4))
441    res = NFS_OK;
442  /*
443   * otherwise a failure
444   */
445  else
446    res = nfs_error(EROFS);
447
448  return &res;
449}
450
451
452nfsstat *
453nfsproc_link_2_svc(nfslinkargs *argp, struct svc_req *rqstp)
454{
455  static nfsstat res;
456
457  if (!fh_to_mp(&argp->la_fhandle) || !fh_to_mp(&argp->la_to.da_fhandle))
458    res = nfs_error(ESTALE);
459  else
460    res = nfs_error(EROFS);
461
462  return &res;
463}
464
465
466nfsstat *
467nfsproc_symlink_2_svc(nfssymlinkargs *argp, struct svc_req *rqstp)
468{
469  static nfsstat res;
470
471  if (!fh_to_mp(&argp->sla_from.da_fhandle))
472    res = nfs_error(ESTALE);
473  else
474    res = nfs_error(EROFS);
475
476  return &res;
477}
478
479
480nfsdiropres *
481nfsproc_mkdir_2_svc(nfscreateargs *argp, struct svc_req *rqstp)
482{
483  static nfsdiropres res;
484
485  if (!fh_to_mp(&argp->ca_where.da_fhandle))
486    res.dr_status = nfs_error(ESTALE);
487  else
488    res.dr_status = nfs_error(EROFS);
489
490  return &res;
491}
492
493
494nfsstat *
495nfsproc_rmdir_2_svc(nfsdiropargs *argp, struct svc_req *rqstp)
496{
497  return unlink_or_rmdir(argp, rqstp, FALSE);
498}
499
500
501nfsreaddirres *
502nfsproc_readdir_2_svc(nfsreaddirargs *argp, struct svc_req *rqstp)
503{
504  static nfsreaddirres res;
505  static nfsentry e_res[MAX_READDIR_ENTRIES];
506  am_node *mp;
507  int retry;
508
509  if (amuDebug(D_TRACE))
510    plog(XLOG_DEBUG, "readdir:");
511
512  mp = fh_to_mp3(&argp->rda_fhandle, &retry, VLOOK_CREATE);
513  if (mp == 0) {
514    if (retry < 0) {
515      amd_stats.d_drops++;
516      return 0;
517    }
518    res.rdr_status = nfs_error(retry);
519  } else {
520    if (amuDebug(D_TRACE))
521      plog(XLOG_DEBUG, "\treaddir(%s)", mp->am_path);
522    res.rdr_status = nfs_error((*mp->am_mnt->mf_ops->readdir)
523			   (mp, argp->rda_cookie,
524			    &res.rdr_u.rdr_reply_u, e_res, argp->rda_count));
525    mp->am_stats.s_readdir++;
526  }
527
528  return &res;
529}
530
531
532nfsstatfsres *
533nfsproc_statfs_2_svc(am_nfs_fh *argp, struct svc_req *rqstp)
534{
535  static nfsstatfsres res;
536  am_node *mp;
537  int retry;
538  mntent_t mnt;
539
540  if (amuDebug(D_TRACE))
541    plog(XLOG_DEBUG, "statfs:");
542
543  mp = fh_to_mp3(argp, &retry, VLOOK_CREATE);
544  if (mp == 0) {
545    if (retry < 0) {
546      amd_stats.d_drops++;
547      return 0;
548    }
549    res.sfr_status = nfs_error(retry);
550  } else {
551    nfsstatfsokres *fp;
552    if (amuDebug(D_TRACE))
553      plog(XLOG_DEBUG, "\tstat_fs(%s)", mp->am_path);
554
555    /*
556     * just return faked up file system information
557     */
558    fp = &res.sfr_u.sfr_reply_u;
559
560    fp->sfrok_tsize = 1024;
561    fp->sfrok_bsize = 1024;
562
563    /* check if map is browsable and show_statfs_entries=yes  */
564    if ((gopt.flags & CFM_SHOW_STATFS_ENTRIES) &&
565	mp->am_mnt && mp->am_mnt->mf_mopts) {
566      mnt.mnt_opts = mp->am_mnt->mf_mopts;
567      if (amu_hasmntopt(&mnt, "browsable")) {
568	count_map_entries(mp,
569			  &fp->sfrok_blocks,
570			  &fp->sfrok_bfree,
571			  &fp->sfrok_bavail);
572      }
573    } else {
574      fp->sfrok_blocks = 0; /* set to 1 if you don't want empty automounts */
575      fp->sfrok_bfree = 0;
576      fp->sfrok_bavail = 0;
577    }
578
579    res.sfr_status = NFS_OK;
580    mp->am_stats.s_statfs++;
581  }
582
583  return &res;
584}
585
586
587/*
588 * count how many total entries there are in a map, and how many
589 * of them are in use.
590 */
591static void
592count_map_entries(const am_node *mp, u_int *out_blocks, u_int *out_bfree, u_int *out_bavail)
593{
594  u_int blocks, bfree, bavail, i;
595  mntfs *mf;
596  mnt_map *mmp;
597  kv *k;
598
599  blocks = bfree = bavail = 0;
600  if (!mp)
601    goto out;
602  mf = mp->am_mnt;
603  if (!mf)
604    goto out;
605  mmp = (mnt_map *) mf->mf_private;
606  if (!mmp)
607    goto out;
608
609  /* iterate over keys */
610  for (i = 0; i < NKVHASH; i++) {
611    for (k = mmp->kvhash[i]; k ; k = k->next) {
612      if (!k->key)
613	continue;
614      blocks++;
615      /*
616       * XXX: Need to count how many are actively in use and recompute
617       * bfree and bavail based on it.
618       */
619    }
620  }
621
622out:
623  *out_blocks = blocks;
624  *out_bfree = bfree;
625  *out_bavail = bavail;
626}
627
628
629/*
630 * Convert from file handle to automount node.
631 */
632static am_node *
633fh_to_mp3(am_nfs_fh *fhp, int *rp, int vop)
634{
635  struct am_fh *fp = (struct am_fh *) fhp;
636  am_node *ap = 0;
637
638  if (fp->u.s.fhh_type != 0) {
639    /* New filehandle type */
640    int len = sizeof(*fhp) - sizeof(fp->fhh_gen);
641    char *path = xmalloc(len+1);
642    /*
643     * Because fhp is treated as a filehandle we use memcpy
644     * instead of xstrlcpy.
645     */
646    memcpy(path, (char *) fp->u.fhh_path, len);
647    path[len] = '\0';
648    /* dlog("fh_to_mp3: new filehandle: %s", path); */
649
650    ap = path_to_exported_ap(path);
651    XFREE(path);
652  } else {
653    /* dlog("fh_to_mp3: old filehandle: %d", fp->u.s.fhh_id); */
654    /*
655     * Check process id matches
656     * If it doesn't then it is probably
657     * from an old kernel-cached filehandle
658     * which is now out of date.
659     */
660    if (fp->u.s.fhh_pid != get_server_pid()) {
661      dlog("fh_to_mp3: wrong pid %ld != my pid %ld",
662	   (long) fp->u.s.fhh_pid, get_server_pid());
663      goto drop;
664    }
665
666    /*
667     * Get hold of the supposed mount node
668     */
669    ap = get_exported_ap(fp->u.s.fhh_id);
670  }
671
672  /*
673   * Check the generation number in the node
674   * matches the one from the kernel.  If not
675   * then the old node has been timed out and
676   * a new one allocated.
677   */
678  if (ap != NULL && ap->am_gen != fp->fhh_gen)
679    ap = 0;
680
681  /*
682   * If it doesn't exists then drop the request
683   */
684  if (!ap)
685    goto drop;
686
687#if 0
688  /*
689   * If the node is hung then locate a new node
690   * for it.  This implements the replicated filesystem
691   * retries.
692   */
693  if (ap->am_mnt && FSRV_ISDOWN(ap->am_mnt->mf_server) && ap->am_parent) {
694    int error;
695    am_node *orig_ap = ap;
696
697    dlog("fh_to_mp3: %s (%s) is hung: lookup alternative file server",
698	 orig_ap->am_path, orig_ap->am_mnt->mf_info);
699
700    /*
701     * Update modify time of parent node.
702     * With any luck the kernel will re-stat
703     * the child node and get new information.
704     */
705    clocktime(&orig_ap->am_fattr.na_mtime);
706
707    /*
708     * Call the parent's lookup routine for an object
709     * with the same name.  This may return -1 in error
710     * if a mount is in progress.  In any case, if no
711     * mount node is returned the error code is propagated
712     * to the caller.
713     */
714    if (vop == VLOOK_CREATE) {
715      ap = orig_ap->am_parent->am_mnt->mf_ops->lookup_child(orig_ap->am_parent, orig_ap->am_name, &error, vop);
716      if (ap && error < 0)
717	ap = orig_ap->am_parent->am_mnt->mf_ops->mount_child(ap, &error);
718    } else {
719      ap = 0;
720      error = ESTALE;
721    }
722    if (ap == 0) {
723      if (error < 0 && amd_state == Finishing)
724	error = ENOENT;
725      *rp = error;
726      return 0;
727    }
728
729    /*
730     * Update last access to original node.  This
731     * avoids timing it out and so sending ESTALE
732     * back to the kernel.
733     * XXX - Not sure we need this anymore (jsp, 90/10/6).
734     */
735    new_ttl(orig_ap);
736
737  }
738#endif
739
740  /*
741   * Disallow references to objects being unmounted, unless
742   * they are automount points.
743   */
744  if (ap->am_mnt && (ap->am_mnt->mf_flags & MFF_UNMOUNTING) &&
745      !(ap->am_flags & AMF_ROOT)) {
746    if (amd_state == Finishing)
747      *rp = ENOENT;
748    else
749      *rp = -1;
750    return 0;
751  }
752  new_ttl(ap);
753
754drop:
755  if (!ap || !ap->am_mnt) {
756    /*
757     * If we are shutting down then it is likely
758     * that this node has disappeared because of
759     * a fast timeout.  To avoid things thrashing
760     * just pretend it doesn't exist at all.  If
761     * ESTALE is returned, some NFS clients just
762     * keep retrying (stupid or what - if it's
763     * stale now, what's it going to be in 5 minutes?)
764     */
765    if (amd_state == Finishing)
766      *rp = ENOENT;
767    else
768      *rp = ESTALE;
769    amd_stats.d_stale++;
770  }
771
772  return ap;
773}
774
775
776static am_node *
777fh_to_mp(am_nfs_fh *fhp)
778{
779  int dummy;
780
781  return fh_to_mp3(fhp, &dummy, VLOOK_CREATE);
782}
783
784
785/*
786 * Convert from automount node to file handle.
787 */
788void
789mp_to_fh(am_node *mp, am_nfs_fh *fhp)
790{
791  u_int pathlen;
792  struct am_fh *fp = (struct am_fh *) fhp;
793
794  memset((char *) fhp, 0, sizeof(am_nfs_fh));
795
796  /* Store the generation number */
797  fp->fhh_gen = mp->am_gen;
798
799  pathlen = strlen(mp->am_path);
800  if (pathlen <= sizeof(*fhp) - sizeof(fp->fhh_gen)) {
801    /* dlog("mp_to_fh: new filehandle: %s", mp->am_path); */
802
803    /*
804     * Because fhp is treated as a filehandle we use memcpy instead of
805     * xstrlcpy.
806     */
807    memcpy(fp->u.fhh_path, mp->am_path, pathlen); /* making a filehandle */
808  } else {
809    /*
810     * Take the process id
811     */
812    fp->u.s.fhh_pid = get_server_pid();
813
814    /*
815     * ... the map number
816     */
817    fp->u.s.fhh_id = mp->am_mapno;
818
819    /*
820     * ... and the generation number (previously stored)
821     * to make a "unique" triple that will never
822     * be reallocated except across reboots (which doesn't matter)
823     * or if we are unlucky enough to be given the same
824     * pid as a previous amd (very unlikely).
825     */
826    /* dlog("mp_to_fh: old filehandle: %d", fp->u.s.fhh_id); */
827  }
828}
829