1/* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)nfs_subs.c 8.3 (Berkeley) 1/4/94 37 * $Id: nfs_subs.c,v 1.3 1994/08/02 07:52:13 davidg Exp $ 38 */ 39 40/* 41 * These functions support the macros and help fiddle mbuf chains for 42 * the nfs op functions. They do things like create the rpc header and 43 * copy data between mbuf chains and uio lists. 44 */ 45#include <sys/param.h> 46#include <sys/proc.h> 47#include <sys/systm.h> 48#include <sys/kernel.h> 49#include <sys/mount.h> 50#include <sys/vnode.h> 51#include <sys/namei.h> 52#include <sys/mbuf.h> 53#include <sys/socket.h> 54#include <sys/stat.h>
| 1/* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)nfs_subs.c 8.3 (Berkeley) 1/4/94 37 * $Id: nfs_subs.c,v 1.3 1994/08/02 07:52:13 davidg Exp $ 38 */ 39 40/* 41 * These functions support the macros and help fiddle mbuf chains for 42 * the nfs op functions. They do things like create the rpc header and 43 * copy data between mbuf chains and uio lists. 44 */ 45#include <sys/param.h> 46#include <sys/proc.h> 47#include <sys/systm.h> 48#include <sys/kernel.h> 49#include <sys/mount.h> 50#include <sys/vnode.h> 51#include <sys/namei.h> 52#include <sys/mbuf.h> 53#include <sys/socket.h> 54#include <sys/stat.h>
|
| 55#ifdef VFS_LKM 56#include <sys/sysent.h> 57#include <sys/syscall.h> 58#endif
|
55 56#include <nfs/rpcv2.h> 57#include <nfs/nfsv2.h> 58#include <nfs/nfsnode.h> 59#include <nfs/nfs.h> 60#include <nfs/xdr_subs.h> 61#include <nfs/nfsm_subs.h> 62#include <nfs/nfsmount.h> 63#include <nfs/nqnfs.h> 64#include <nfs/nfsrtt.h> 65 66#include <miscfs/specfs/specdev.h> 67 68#include <netinet/in.h> 69#ifdef ISO 70#include <netiso/iso.h> 71#endif 72 73#define TRUE 1 74#define FALSE 0 75 76/* 77 * Data items converted to xdr at startup, since they are constant 78 * This is kinda hokey, but may save a little time doing byte swaps 79 */ 80u_long nfs_procids[NFS_NPROCS]; 81u_long nfs_xdrneg1; 82u_long rpc_call, rpc_vers, rpc_reply, rpc_msgdenied, rpc_autherr, 83 rpc_mismatch, rpc_auth_unix, rpc_msgaccepted, rpc_rejectedcred, 84 rpc_auth_kerb; 85u_long nfs_vers, nfs_prog, nfs_true, nfs_false; 86 87/* And other global data */ 88static u_long nfs_xid = 0; 89enum vtype ntov_type[7] = { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VNON }; 90extern struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON]; 91extern struct nfsreq nfsreqh; 92extern int nqnfs_piggy[NFS_NPROCS]; 93extern struct nfsrtt nfsrtt; 94extern time_t nqnfsstarttime; 95extern u_long nqnfs_prog, nqnfs_vers; 96extern int nqsrv_clockskew; 97extern int nqsrv_writeslack; 98extern int nqsrv_maxlease; 99
| 59 60#include <nfs/rpcv2.h> 61#include <nfs/nfsv2.h> 62#include <nfs/nfsnode.h> 63#include <nfs/nfs.h> 64#include <nfs/xdr_subs.h> 65#include <nfs/nfsm_subs.h> 66#include <nfs/nfsmount.h> 67#include <nfs/nqnfs.h> 68#include <nfs/nfsrtt.h> 69 70#include <miscfs/specfs/specdev.h> 71 72#include <netinet/in.h> 73#ifdef ISO 74#include <netiso/iso.h> 75#endif 76 77#define TRUE 1 78#define FALSE 0 79 80/* 81 * Data items converted to xdr at startup, since they are constant 82 * This is kinda hokey, but may save a little time doing byte swaps 83 */ 84u_long nfs_procids[NFS_NPROCS]; 85u_long nfs_xdrneg1; 86u_long rpc_call, rpc_vers, rpc_reply, rpc_msgdenied, rpc_autherr, 87 rpc_mismatch, rpc_auth_unix, rpc_msgaccepted, rpc_rejectedcred, 88 rpc_auth_kerb; 89u_long nfs_vers, nfs_prog, nfs_true, nfs_false; 90 91/* And other global data */ 92static u_long nfs_xid = 0; 93enum vtype ntov_type[7] = { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VNON }; 94extern struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON]; 95extern struct nfsreq nfsreqh; 96extern int nqnfs_piggy[NFS_NPROCS]; 97extern struct nfsrtt nfsrtt; 98extern time_t nqnfsstarttime; 99extern u_long nqnfs_prog, nqnfs_vers; 100extern int nqsrv_clockskew; 101extern int nqsrv_writeslack; 102extern int nqsrv_maxlease; 103
|
| 104#ifdef VFS_LKM 105struct getfh_args; 106extern int getfh(struct proc *, struct getfh_args *, int *); 107struct nfssvc_args; 108extern int nfssvc(struct proc *, struct nfssvc_args *, int *); 109#endif 110
|
100/* 101 * Create the header for an rpc request packet 102 * The hsiz is the size of the rest of the nfs request header. 103 * (just used to decide if a cluster is a good idea) 104 */ 105struct mbuf * 106nfsm_reqh(vp, procid, hsiz, bposp) 107 struct vnode *vp; 108 u_long procid; 109 int hsiz; 110 caddr_t *bposp; 111{ 112 register struct mbuf *mb; 113 register u_long *tl; 114 register caddr_t bpos; 115 struct mbuf *mb2; 116 struct nfsmount *nmp; 117 int nqflag; 118 119 MGET(mb, M_WAIT, MT_DATA); 120 if (hsiz >= MINCLSIZE) 121 MCLGET(mb, M_WAIT); 122 mb->m_len = 0; 123 bpos = mtod(mb, caddr_t); 124 125 /* 126 * For NQNFS, add lease request. 127 */ 128 if (vp) { 129 nmp = VFSTONFS(vp->v_mount); 130 if (nmp->nm_flag & NFSMNT_NQNFS) { 131 nqflag = NQNFS_NEEDLEASE(vp, procid); 132 if (nqflag) { 133 nfsm_build(tl, u_long *, 2*NFSX_UNSIGNED); 134 *tl++ = txdr_unsigned(nqflag); 135 *tl = txdr_unsigned(nmp->nm_leaseterm); 136 } else { 137 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 138 *tl = 0; 139 } 140 } 141 } 142 /* Finally, return values */ 143 *bposp = bpos; 144 return (mb); 145} 146 147/* 148 * Build the RPC header and fill in the authorization info. 149 * The authorization string argument is only used when the credentials 150 * come from outside of the kernel. 151 * Returns the head of the mbuf list. 152 */ 153struct mbuf * 154nfsm_rpchead(cr, nqnfs, procid, auth_type, auth_len, auth_str, mrest, 155 mrest_len, mbp, xidp) 156 register struct ucred *cr; 157 int nqnfs; 158 int procid; 159 int auth_type; 160 int auth_len; 161 char *auth_str; 162 struct mbuf *mrest; 163 int mrest_len; 164 struct mbuf **mbp; 165 u_long *xidp; 166{ 167 register struct mbuf *mb; 168 register u_long *tl; 169 register caddr_t bpos; 170 register int i; 171 struct mbuf *mreq, *mb2; 172 int siz, grpsiz, authsiz; 173 174 authsiz = nfsm_rndup(auth_len); 175 if (auth_type == RPCAUTH_NQNFS) 176 authsiz += 2 * NFSX_UNSIGNED; 177 MGETHDR(mb, M_WAIT, MT_DATA); 178 if ((authsiz + 10*NFSX_UNSIGNED) >= MINCLSIZE) { 179 MCLGET(mb, M_WAIT); 180 } else if ((authsiz + 10*NFSX_UNSIGNED) < MHLEN) { 181 MH_ALIGN(mb, authsiz + 10*NFSX_UNSIGNED); 182 } else { 183 MH_ALIGN(mb, 8*NFSX_UNSIGNED); 184 } 185 mb->m_len = 0; 186 mreq = mb; 187 bpos = mtod(mb, caddr_t); 188 189 /* 190 * First the RPC header. 191 */ 192 nfsm_build(tl, u_long *, 8*NFSX_UNSIGNED); 193 if (++nfs_xid == 0) 194 nfs_xid++; 195 *tl++ = *xidp = txdr_unsigned(nfs_xid); 196 *tl++ = rpc_call; 197 *tl++ = rpc_vers; 198 if (nqnfs) { 199 *tl++ = txdr_unsigned(NQNFS_PROG); 200 *tl++ = txdr_unsigned(NQNFS_VER1); 201 } else { 202 *tl++ = txdr_unsigned(NFS_PROG); 203 *tl++ = txdr_unsigned(NFS_VER2); 204 } 205 *tl++ = txdr_unsigned(procid); 206 207 /* 208 * And then the authorization cred. 209 */ 210 *tl++ = txdr_unsigned(auth_type); 211 *tl = txdr_unsigned(authsiz); 212 switch (auth_type) { 213 case RPCAUTH_UNIX: 214 nfsm_build(tl, u_long *, auth_len); 215 *tl++ = 0; /* stamp ?? */ 216 *tl++ = 0; /* NULL hostname */ 217 *tl++ = txdr_unsigned(cr->cr_uid); 218 *tl++ = txdr_unsigned(cr->cr_groups[0]); 219 grpsiz = (auth_len >> 2) - 5; 220 *tl++ = txdr_unsigned(grpsiz); 221 for (i = 1; i <= grpsiz; i++) 222 *tl++ = txdr_unsigned(cr->cr_groups[i]); 223 break; 224 case RPCAUTH_NQNFS: 225 nfsm_build(tl, u_long *, 2*NFSX_UNSIGNED); 226 *tl++ = txdr_unsigned(cr->cr_uid); 227 *tl = txdr_unsigned(auth_len); 228 siz = auth_len; 229 while (siz > 0) { 230 if (M_TRAILINGSPACE(mb) == 0) { 231 MGET(mb2, M_WAIT, MT_DATA); 232 if (siz >= MINCLSIZE) 233 MCLGET(mb2, M_WAIT); 234 mb->m_next = mb2; 235 mb = mb2; 236 mb->m_len = 0; 237 bpos = mtod(mb, caddr_t); 238 } 239 i = min(siz, M_TRAILINGSPACE(mb)); 240 bcopy(auth_str, bpos, i); 241 mb->m_len += i; 242 auth_str += i; 243 bpos += i; 244 siz -= i; 245 } 246 if ((siz = (nfsm_rndup(auth_len) - auth_len)) > 0) { 247 for (i = 0; i < siz; i++) 248 *bpos++ = '\0'; 249 mb->m_len += siz; 250 } 251 break; 252 }; 253 nfsm_build(tl, u_long *, 2*NFSX_UNSIGNED); 254 *tl++ = txdr_unsigned(RPCAUTH_NULL); 255 *tl = 0; 256 mb->m_next = mrest; 257 mreq->m_pkthdr.len = authsiz + 10*NFSX_UNSIGNED + mrest_len; 258 mreq->m_pkthdr.rcvif = (struct ifnet *)0; 259 *mbp = mb; 260 return (mreq); 261} 262 263/* 264 * copies mbuf chain to the uio scatter/gather list 265 */ 266int 267nfsm_mbuftouio(mrep, uiop, siz, dpos) 268 struct mbuf **mrep; 269 register struct uio *uiop; 270 int siz; 271 caddr_t *dpos; 272{ 273 register char *mbufcp, *uiocp; 274 register int xfer, left, len; 275 register struct mbuf *mp; 276 long uiosiz, rem; 277 int error = 0; 278 279 mp = *mrep; 280 mbufcp = *dpos; 281 len = mtod(mp, caddr_t)+mp->m_len-mbufcp; 282 rem = nfsm_rndup(siz)-siz; 283 while (siz > 0) { 284 if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL) 285 return (EFBIG); 286 left = uiop->uio_iov->iov_len; 287 uiocp = uiop->uio_iov->iov_base; 288 if (left > siz) 289 left = siz; 290 uiosiz = left; 291 while (left > 0) { 292 while (len == 0) { 293 mp = mp->m_next; 294 if (mp == NULL) 295 return (EBADRPC); 296 mbufcp = mtod(mp, caddr_t); 297 len = mp->m_len; 298 } 299 xfer = (left > len) ? len : left; 300#ifdef notdef 301 /* Not Yet.. */ 302 if (uiop->uio_iov->iov_op != NULL) 303 (*(uiop->uio_iov->iov_op)) 304 (mbufcp, uiocp, xfer); 305 else 306#endif 307 if (uiop->uio_segflg == UIO_SYSSPACE) 308 bcopy(mbufcp, uiocp, xfer); 309 else 310 copyout(mbufcp, uiocp, xfer); 311 left -= xfer; 312 len -= xfer; 313 mbufcp += xfer; 314 uiocp += xfer; 315 uiop->uio_offset += xfer; 316 uiop->uio_resid -= xfer; 317 } 318 if (uiop->uio_iov->iov_len <= siz) { 319 uiop->uio_iovcnt--; 320 uiop->uio_iov++; 321 } else { 322 uiop->uio_iov->iov_base += uiosiz; 323 uiop->uio_iov->iov_len -= uiosiz; 324 } 325 siz -= uiosiz; 326 } 327 *dpos = mbufcp; 328 *mrep = mp; 329 if (rem > 0) { 330 if (len < rem) 331 error = nfs_adv(mrep, dpos, rem, len); 332 else 333 *dpos += rem; 334 } 335 return (error); 336} 337 338/* 339 * copies a uio scatter/gather list to an mbuf chain... 340 */ 341int 342nfsm_uiotombuf(uiop, mq, siz, bpos) 343 register struct uio *uiop; 344 struct mbuf **mq; 345 int siz; 346 caddr_t *bpos; 347{ 348 register char *uiocp; 349 register struct mbuf *mp, *mp2; 350 register int xfer, left, mlen; 351 int uiosiz, clflg, rem; 352 char *cp; 353 354 if (siz > MLEN) /* or should it >= MCLBYTES ?? */ 355 clflg = 1; 356 else 357 clflg = 0; 358 rem = nfsm_rndup(siz)-siz; 359 mp = mp2 = *mq; 360 while (siz > 0) { 361 if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL) 362 return (EINVAL); 363 left = uiop->uio_iov->iov_len; 364 uiocp = uiop->uio_iov->iov_base; 365 if (left > siz) 366 left = siz; 367 uiosiz = left; 368 while (left > 0) { 369 mlen = M_TRAILINGSPACE(mp); 370 if (mlen == 0) { 371 MGET(mp, M_WAIT, MT_DATA); 372 if (clflg) 373 MCLGET(mp, M_WAIT); 374 mp->m_len = 0; 375 mp2->m_next = mp; 376 mp2 = mp; 377 mlen = M_TRAILINGSPACE(mp); 378 } 379 xfer = (left > mlen) ? mlen : left; 380#ifdef notdef 381 /* Not Yet.. */ 382 if (uiop->uio_iov->iov_op != NULL) 383 (*(uiop->uio_iov->iov_op)) 384 (uiocp, mtod(mp, caddr_t)+mp->m_len, xfer); 385 else 386#endif 387 if (uiop->uio_segflg == UIO_SYSSPACE) 388 bcopy(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer); 389 else 390 copyin(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer); 391 mp->m_len += xfer; 392 left -= xfer; 393 uiocp += xfer; 394 uiop->uio_offset += xfer; 395 uiop->uio_resid -= xfer; 396 } 397 if (uiop->uio_iov->iov_len <= siz) { 398 uiop->uio_iovcnt--; 399 uiop->uio_iov++; 400 } else { 401 uiop->uio_iov->iov_base += uiosiz; 402 uiop->uio_iov->iov_len -= uiosiz; 403 } 404 siz -= uiosiz; 405 } 406 if (rem > 0) { 407 if (rem > M_TRAILINGSPACE(mp)) { 408 MGET(mp, M_WAIT, MT_DATA); 409 mp->m_len = 0; 410 mp2->m_next = mp; 411 } 412 cp = mtod(mp, caddr_t)+mp->m_len; 413 for (left = 0; left < rem; left++) 414 *cp++ = '\0'; 415 mp->m_len += rem; 416 *bpos = cp; 417 } else 418 *bpos = mtod(mp, caddr_t)+mp->m_len; 419 *mq = mp; 420 return (0); 421} 422 423/* 424 * Help break down an mbuf chain by setting the first siz bytes contiguous 425 * pointed to by returned val. 426 * This is used by the macros nfsm_dissect and nfsm_dissecton for tough 427 * cases. (The macros use the vars. dpos and dpos2) 428 */ 429int 430nfsm_disct(mdp, dposp, siz, left, cp2) 431 struct mbuf **mdp; 432 caddr_t *dposp; 433 int siz; 434 int left; 435 caddr_t *cp2; 436{ 437 register struct mbuf *mp, *mp2; 438 register int siz2, xfer; 439 register caddr_t p; 440 441 mp = *mdp; 442 while (left == 0) { 443 *mdp = mp = mp->m_next; 444 if (mp == NULL) 445 return (EBADRPC); 446 left = mp->m_len; 447 *dposp = mtod(mp, caddr_t); 448 } 449 if (left >= siz) { 450 *cp2 = *dposp; 451 *dposp += siz; 452 } else if (mp->m_next == NULL) { 453 return (EBADRPC); 454 } else if (siz > MHLEN) { 455 panic("nfs S too big"); 456 } else { 457 MGET(mp2, M_WAIT, MT_DATA); 458 mp2->m_next = mp->m_next; 459 mp->m_next = mp2; 460 mp->m_len -= left; 461 mp = mp2; 462 *cp2 = p = mtod(mp, caddr_t); 463 bcopy(*dposp, p, left); /* Copy what was left */ 464 siz2 = siz-left; 465 p += left; 466 mp2 = mp->m_next; 467 /* Loop around copying up the siz2 bytes */ 468 while (siz2 > 0) { 469 if (mp2 == NULL) 470 return (EBADRPC); 471 xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2; 472 if (xfer > 0) { 473 bcopy(mtod(mp2, caddr_t), p, xfer); 474 NFSMADV(mp2, xfer); 475 mp2->m_len -= xfer; 476 p += xfer; 477 siz2 -= xfer; 478 } 479 if (siz2 > 0) 480 mp2 = mp2->m_next; 481 } 482 mp->m_len = siz; 483 *mdp = mp2; 484 *dposp = mtod(mp2, caddr_t); 485 } 486 return (0); 487} 488 489/* 490 * Advance the position in the mbuf chain. 491 */ 492int 493nfs_adv(mdp, dposp, offs, left) 494 struct mbuf **mdp; 495 caddr_t *dposp; 496 int offs; 497 int left; 498{ 499 register struct mbuf *m; 500 register int s; 501 502 m = *mdp; 503 s = left; 504 while (s < offs) { 505 offs -= s; 506 m = m->m_next; 507 if (m == NULL) 508 return (EBADRPC); 509 s = m->m_len; 510 } 511 *mdp = m; 512 *dposp = mtod(m, caddr_t)+offs; 513 return (0); 514} 515 516/* 517 * Copy a string into mbufs for the hard cases... 518 */ 519int 520nfsm_strtmbuf(mb, bpos, cp, siz) 521 struct mbuf **mb; 522 char **bpos; 523 char *cp; 524 long siz; 525{ 526 register struct mbuf *m1 = 0, *m2; 527 long left, xfer, len, tlen; 528 u_long *tl; 529 int putsize; 530 531 putsize = 1; 532 m2 = *mb; 533 left = M_TRAILINGSPACE(m2); 534 if (left > 0) { 535 tl = ((u_long *)(*bpos)); 536 *tl++ = txdr_unsigned(siz); 537 putsize = 0; 538 left -= NFSX_UNSIGNED; 539 m2->m_len += NFSX_UNSIGNED; 540 if (left > 0) { 541 bcopy(cp, (caddr_t) tl, left); 542 siz -= left; 543 cp += left; 544 m2->m_len += left; 545 left = 0; 546 } 547 } 548 /* Loop around adding mbufs */ 549 while (siz > 0) { 550 MGET(m1, M_WAIT, MT_DATA); 551 if (siz > MLEN) 552 MCLGET(m1, M_WAIT); 553 m1->m_len = NFSMSIZ(m1); 554 m2->m_next = m1; 555 m2 = m1; 556 tl = mtod(m1, u_long *); 557 tlen = 0; 558 if (putsize) { 559 *tl++ = txdr_unsigned(siz); 560 m1->m_len -= NFSX_UNSIGNED; 561 tlen = NFSX_UNSIGNED; 562 putsize = 0; 563 } 564 if (siz < m1->m_len) { 565 len = nfsm_rndup(siz); 566 xfer = siz; 567 if (xfer < len) 568 *(tl+(xfer>>2)) = 0; 569 } else { 570 xfer = len = m1->m_len; 571 } 572 bcopy(cp, (caddr_t) tl, xfer); 573 m1->m_len = len+tlen; 574 siz -= xfer; 575 cp += xfer; 576 } 577 *mb = m1; 578 *bpos = mtod(m1, caddr_t)+m1->m_len; 579 return (0); 580} 581 582/* 583 * Called once to initialize data structures... 584 */ 585int 586nfs_init() 587{ 588 register int i; 589 590 nfsrtt.pos = 0; 591 rpc_vers = txdr_unsigned(RPC_VER2); 592 rpc_call = txdr_unsigned(RPC_CALL); 593 rpc_reply = txdr_unsigned(RPC_REPLY); 594 rpc_msgdenied = txdr_unsigned(RPC_MSGDENIED); 595 rpc_msgaccepted = txdr_unsigned(RPC_MSGACCEPTED); 596 rpc_mismatch = txdr_unsigned(RPC_MISMATCH); 597 rpc_autherr = txdr_unsigned(RPC_AUTHERR); 598 rpc_rejectedcred = txdr_unsigned(AUTH_REJECTCRED); 599 rpc_auth_unix = txdr_unsigned(RPCAUTH_UNIX); 600 rpc_auth_kerb = txdr_unsigned(RPCAUTH_NQNFS); 601 nfs_vers = txdr_unsigned(NFS_VER2); 602 nfs_prog = txdr_unsigned(NFS_PROG); 603 nfs_true = txdr_unsigned(TRUE); 604 nfs_false = txdr_unsigned(FALSE); 605 /* Loop thru nfs procids */ 606 for (i = 0; i < NFS_NPROCS; i++) 607 nfs_procids[i] = txdr_unsigned(i); 608 /* Ensure async daemons disabled */ 609 for (i = 0; i < NFS_MAXASYNCDAEMON; i++) 610 nfs_iodwant[i] = (struct proc *)0; 611 TAILQ_INIT(&nfs_bufq); 612 nfs_xdrneg1 = txdr_unsigned(-1); 613 nfs_nhinit(); /* Init the nfsnode table */ 614 nfsrv_init(0); /* Init server data structures */ 615 nfsrv_initcache(); /* Init the server request cache */ 616 617 /* 618 * Initialize the nqnfs server stuff. 619 */ 620 if (nqnfsstarttime == 0) { 621 nqnfsstarttime = boottime.tv_sec + nqsrv_maxlease 622 + nqsrv_clockskew + nqsrv_writeslack; 623 NQLOADNOVRAM(nqnfsstarttime); 624 nqnfs_prog = txdr_unsigned(NQNFS_PROG); 625 nqnfs_vers = txdr_unsigned(NQNFS_VER1); 626 nqthead.th_head[0] = &nqthead; 627 nqthead.th_head[1] = &nqthead; 628 nqfhead = hashinit(NQLCHSZ, M_NQLEASE, &nqfheadhash); 629 } 630 631 /* 632 * Initialize reply list and start timer 633 */ 634 nfsreqh.r_prev = nfsreqh.r_next = &nfsreqh; 635 nfs_timer(); 636
| 111/* 112 * Create the header for an rpc request packet 113 * The hsiz is the size of the rest of the nfs request header. 114 * (just used to decide if a cluster is a good idea) 115 */ 116struct mbuf * 117nfsm_reqh(vp, procid, hsiz, bposp) 118 struct vnode *vp; 119 u_long procid; 120 int hsiz; 121 caddr_t *bposp; 122{ 123 register struct mbuf *mb; 124 register u_long *tl; 125 register caddr_t bpos; 126 struct mbuf *mb2; 127 struct nfsmount *nmp; 128 int nqflag; 129 130 MGET(mb, M_WAIT, MT_DATA); 131 if (hsiz >= MINCLSIZE) 132 MCLGET(mb, M_WAIT); 133 mb->m_len = 0; 134 bpos = mtod(mb, caddr_t); 135 136 /* 137 * For NQNFS, add lease request. 138 */ 139 if (vp) { 140 nmp = VFSTONFS(vp->v_mount); 141 if (nmp->nm_flag & NFSMNT_NQNFS) { 142 nqflag = NQNFS_NEEDLEASE(vp, procid); 143 if (nqflag) { 144 nfsm_build(tl, u_long *, 2*NFSX_UNSIGNED); 145 *tl++ = txdr_unsigned(nqflag); 146 *tl = txdr_unsigned(nmp->nm_leaseterm); 147 } else { 148 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 149 *tl = 0; 150 } 151 } 152 } 153 /* Finally, return values */ 154 *bposp = bpos; 155 return (mb); 156} 157 158/* 159 * Build the RPC header and fill in the authorization info. 160 * The authorization string argument is only used when the credentials 161 * come from outside of the kernel. 162 * Returns the head of the mbuf list. 163 */ 164struct mbuf * 165nfsm_rpchead(cr, nqnfs, procid, auth_type, auth_len, auth_str, mrest, 166 mrest_len, mbp, xidp) 167 register struct ucred *cr; 168 int nqnfs; 169 int procid; 170 int auth_type; 171 int auth_len; 172 char *auth_str; 173 struct mbuf *mrest; 174 int mrest_len; 175 struct mbuf **mbp; 176 u_long *xidp; 177{ 178 register struct mbuf *mb; 179 register u_long *tl; 180 register caddr_t bpos; 181 register int i; 182 struct mbuf *mreq, *mb2; 183 int siz, grpsiz, authsiz; 184 185 authsiz = nfsm_rndup(auth_len); 186 if (auth_type == RPCAUTH_NQNFS) 187 authsiz += 2 * NFSX_UNSIGNED; 188 MGETHDR(mb, M_WAIT, MT_DATA); 189 if ((authsiz + 10*NFSX_UNSIGNED) >= MINCLSIZE) { 190 MCLGET(mb, M_WAIT); 191 } else if ((authsiz + 10*NFSX_UNSIGNED) < MHLEN) { 192 MH_ALIGN(mb, authsiz + 10*NFSX_UNSIGNED); 193 } else { 194 MH_ALIGN(mb, 8*NFSX_UNSIGNED); 195 } 196 mb->m_len = 0; 197 mreq = mb; 198 bpos = mtod(mb, caddr_t); 199 200 /* 201 * First the RPC header. 202 */ 203 nfsm_build(tl, u_long *, 8*NFSX_UNSIGNED); 204 if (++nfs_xid == 0) 205 nfs_xid++; 206 *tl++ = *xidp = txdr_unsigned(nfs_xid); 207 *tl++ = rpc_call; 208 *tl++ = rpc_vers; 209 if (nqnfs) { 210 *tl++ = txdr_unsigned(NQNFS_PROG); 211 *tl++ = txdr_unsigned(NQNFS_VER1); 212 } else { 213 *tl++ = txdr_unsigned(NFS_PROG); 214 *tl++ = txdr_unsigned(NFS_VER2); 215 } 216 *tl++ = txdr_unsigned(procid); 217 218 /* 219 * And then the authorization cred. 220 */ 221 *tl++ = txdr_unsigned(auth_type); 222 *tl = txdr_unsigned(authsiz); 223 switch (auth_type) { 224 case RPCAUTH_UNIX: 225 nfsm_build(tl, u_long *, auth_len); 226 *tl++ = 0; /* stamp ?? */ 227 *tl++ = 0; /* NULL hostname */ 228 *tl++ = txdr_unsigned(cr->cr_uid); 229 *tl++ = txdr_unsigned(cr->cr_groups[0]); 230 grpsiz = (auth_len >> 2) - 5; 231 *tl++ = txdr_unsigned(grpsiz); 232 for (i = 1; i <= grpsiz; i++) 233 *tl++ = txdr_unsigned(cr->cr_groups[i]); 234 break; 235 case RPCAUTH_NQNFS: 236 nfsm_build(tl, u_long *, 2*NFSX_UNSIGNED); 237 *tl++ = txdr_unsigned(cr->cr_uid); 238 *tl = txdr_unsigned(auth_len); 239 siz = auth_len; 240 while (siz > 0) { 241 if (M_TRAILINGSPACE(mb) == 0) { 242 MGET(mb2, M_WAIT, MT_DATA); 243 if (siz >= MINCLSIZE) 244 MCLGET(mb2, M_WAIT); 245 mb->m_next = mb2; 246 mb = mb2; 247 mb->m_len = 0; 248 bpos = mtod(mb, caddr_t); 249 } 250 i = min(siz, M_TRAILINGSPACE(mb)); 251 bcopy(auth_str, bpos, i); 252 mb->m_len += i; 253 auth_str += i; 254 bpos += i; 255 siz -= i; 256 } 257 if ((siz = (nfsm_rndup(auth_len) - auth_len)) > 0) { 258 for (i = 0; i < siz; i++) 259 *bpos++ = '\0'; 260 mb->m_len += siz; 261 } 262 break; 263 }; 264 nfsm_build(tl, u_long *, 2*NFSX_UNSIGNED); 265 *tl++ = txdr_unsigned(RPCAUTH_NULL); 266 *tl = 0; 267 mb->m_next = mrest; 268 mreq->m_pkthdr.len = authsiz + 10*NFSX_UNSIGNED + mrest_len; 269 mreq->m_pkthdr.rcvif = (struct ifnet *)0; 270 *mbp = mb; 271 return (mreq); 272} 273 274/* 275 * copies mbuf chain to the uio scatter/gather list 276 */ 277int 278nfsm_mbuftouio(mrep, uiop, siz, dpos) 279 struct mbuf **mrep; 280 register struct uio *uiop; 281 int siz; 282 caddr_t *dpos; 283{ 284 register char *mbufcp, *uiocp; 285 register int xfer, left, len; 286 register struct mbuf *mp; 287 long uiosiz, rem; 288 int error = 0; 289 290 mp = *mrep; 291 mbufcp = *dpos; 292 len = mtod(mp, caddr_t)+mp->m_len-mbufcp; 293 rem = nfsm_rndup(siz)-siz; 294 while (siz > 0) { 295 if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL) 296 return (EFBIG); 297 left = uiop->uio_iov->iov_len; 298 uiocp = uiop->uio_iov->iov_base; 299 if (left > siz) 300 left = siz; 301 uiosiz = left; 302 while (left > 0) { 303 while (len == 0) { 304 mp = mp->m_next; 305 if (mp == NULL) 306 return (EBADRPC); 307 mbufcp = mtod(mp, caddr_t); 308 len = mp->m_len; 309 } 310 xfer = (left > len) ? len : left; 311#ifdef notdef 312 /* Not Yet.. */ 313 if (uiop->uio_iov->iov_op != NULL) 314 (*(uiop->uio_iov->iov_op)) 315 (mbufcp, uiocp, xfer); 316 else 317#endif 318 if (uiop->uio_segflg == UIO_SYSSPACE) 319 bcopy(mbufcp, uiocp, xfer); 320 else 321 copyout(mbufcp, uiocp, xfer); 322 left -= xfer; 323 len -= xfer; 324 mbufcp += xfer; 325 uiocp += xfer; 326 uiop->uio_offset += xfer; 327 uiop->uio_resid -= xfer; 328 } 329 if (uiop->uio_iov->iov_len <= siz) { 330 uiop->uio_iovcnt--; 331 uiop->uio_iov++; 332 } else { 333 uiop->uio_iov->iov_base += uiosiz; 334 uiop->uio_iov->iov_len -= uiosiz; 335 } 336 siz -= uiosiz; 337 } 338 *dpos = mbufcp; 339 *mrep = mp; 340 if (rem > 0) { 341 if (len < rem) 342 error = nfs_adv(mrep, dpos, rem, len); 343 else 344 *dpos += rem; 345 } 346 return (error); 347} 348 349/* 350 * copies a uio scatter/gather list to an mbuf chain... 351 */ 352int 353nfsm_uiotombuf(uiop, mq, siz, bpos) 354 register struct uio *uiop; 355 struct mbuf **mq; 356 int siz; 357 caddr_t *bpos; 358{ 359 register char *uiocp; 360 register struct mbuf *mp, *mp2; 361 register int xfer, left, mlen; 362 int uiosiz, clflg, rem; 363 char *cp; 364 365 if (siz > MLEN) /* or should it >= MCLBYTES ?? */ 366 clflg = 1; 367 else 368 clflg = 0; 369 rem = nfsm_rndup(siz)-siz; 370 mp = mp2 = *mq; 371 while (siz > 0) { 372 if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL) 373 return (EINVAL); 374 left = uiop->uio_iov->iov_len; 375 uiocp = uiop->uio_iov->iov_base; 376 if (left > siz) 377 left = siz; 378 uiosiz = left; 379 while (left > 0) { 380 mlen = M_TRAILINGSPACE(mp); 381 if (mlen == 0) { 382 MGET(mp, M_WAIT, MT_DATA); 383 if (clflg) 384 MCLGET(mp, M_WAIT); 385 mp->m_len = 0; 386 mp2->m_next = mp; 387 mp2 = mp; 388 mlen = M_TRAILINGSPACE(mp); 389 } 390 xfer = (left > mlen) ? mlen : left; 391#ifdef notdef 392 /* Not Yet.. */ 393 if (uiop->uio_iov->iov_op != NULL) 394 (*(uiop->uio_iov->iov_op)) 395 (uiocp, mtod(mp, caddr_t)+mp->m_len, xfer); 396 else 397#endif 398 if (uiop->uio_segflg == UIO_SYSSPACE) 399 bcopy(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer); 400 else 401 copyin(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer); 402 mp->m_len += xfer; 403 left -= xfer; 404 uiocp += xfer; 405 uiop->uio_offset += xfer; 406 uiop->uio_resid -= xfer; 407 } 408 if (uiop->uio_iov->iov_len <= siz) { 409 uiop->uio_iovcnt--; 410 uiop->uio_iov++; 411 } else { 412 uiop->uio_iov->iov_base += uiosiz; 413 uiop->uio_iov->iov_len -= uiosiz; 414 } 415 siz -= uiosiz; 416 } 417 if (rem > 0) { 418 if (rem > M_TRAILINGSPACE(mp)) { 419 MGET(mp, M_WAIT, MT_DATA); 420 mp->m_len = 0; 421 mp2->m_next = mp; 422 } 423 cp = mtod(mp, caddr_t)+mp->m_len; 424 for (left = 0; left < rem; left++) 425 *cp++ = '\0'; 426 mp->m_len += rem; 427 *bpos = cp; 428 } else 429 *bpos = mtod(mp, caddr_t)+mp->m_len; 430 *mq = mp; 431 return (0); 432} 433 434/* 435 * Help break down an mbuf chain by setting the first siz bytes contiguous 436 * pointed to by returned val. 437 * This is used by the macros nfsm_dissect and nfsm_dissecton for tough 438 * cases. (The macros use the vars. dpos and dpos2) 439 */ 440int 441nfsm_disct(mdp, dposp, siz, left, cp2) 442 struct mbuf **mdp; 443 caddr_t *dposp; 444 int siz; 445 int left; 446 caddr_t *cp2; 447{ 448 register struct mbuf *mp, *mp2; 449 register int siz2, xfer; 450 register caddr_t p; 451 452 mp = *mdp; 453 while (left == 0) { 454 *mdp = mp = mp->m_next; 455 if (mp == NULL) 456 return (EBADRPC); 457 left = mp->m_len; 458 *dposp = mtod(mp, caddr_t); 459 } 460 if (left >= siz) { 461 *cp2 = *dposp; 462 *dposp += siz; 463 } else if (mp->m_next == NULL) { 464 return (EBADRPC); 465 } else if (siz > MHLEN) { 466 panic("nfs S too big"); 467 } else { 468 MGET(mp2, M_WAIT, MT_DATA); 469 mp2->m_next = mp->m_next; 470 mp->m_next = mp2; 471 mp->m_len -= left; 472 mp = mp2; 473 *cp2 = p = mtod(mp, caddr_t); 474 bcopy(*dposp, p, left); /* Copy what was left */ 475 siz2 = siz-left; 476 p += left; 477 mp2 = mp->m_next; 478 /* Loop around copying up the siz2 bytes */ 479 while (siz2 > 0) { 480 if (mp2 == NULL) 481 return (EBADRPC); 482 xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2; 483 if (xfer > 0) { 484 bcopy(mtod(mp2, caddr_t), p, xfer); 485 NFSMADV(mp2, xfer); 486 mp2->m_len -= xfer; 487 p += xfer; 488 siz2 -= xfer; 489 } 490 if (siz2 > 0) 491 mp2 = mp2->m_next; 492 } 493 mp->m_len = siz; 494 *mdp = mp2; 495 *dposp = mtod(mp2, caddr_t); 496 } 497 return (0); 498} 499 500/* 501 * Advance the position in the mbuf chain. 502 */ 503int 504nfs_adv(mdp, dposp, offs, left) 505 struct mbuf **mdp; 506 caddr_t *dposp; 507 int offs; 508 int left; 509{ 510 register struct mbuf *m; 511 register int s; 512 513 m = *mdp; 514 s = left; 515 while (s < offs) { 516 offs -= s; 517 m = m->m_next; 518 if (m == NULL) 519 return (EBADRPC); 520 s = m->m_len; 521 } 522 *mdp = m; 523 *dposp = mtod(m, caddr_t)+offs; 524 return (0); 525} 526 527/* 528 * Copy a string into mbufs for the hard cases... 529 */ 530int 531nfsm_strtmbuf(mb, bpos, cp, siz) 532 struct mbuf **mb; 533 char **bpos; 534 char *cp; 535 long siz; 536{ 537 register struct mbuf *m1 = 0, *m2; 538 long left, xfer, len, tlen; 539 u_long *tl; 540 int putsize; 541 542 putsize = 1; 543 m2 = *mb; 544 left = M_TRAILINGSPACE(m2); 545 if (left > 0) { 546 tl = ((u_long *)(*bpos)); 547 *tl++ = txdr_unsigned(siz); 548 putsize = 0; 549 left -= NFSX_UNSIGNED; 550 m2->m_len += NFSX_UNSIGNED; 551 if (left > 0) { 552 bcopy(cp, (caddr_t) tl, left); 553 siz -= left; 554 cp += left; 555 m2->m_len += left; 556 left = 0; 557 } 558 } 559 /* Loop around adding mbufs */ 560 while (siz > 0) { 561 MGET(m1, M_WAIT, MT_DATA); 562 if (siz > MLEN) 563 MCLGET(m1, M_WAIT); 564 m1->m_len = NFSMSIZ(m1); 565 m2->m_next = m1; 566 m2 = m1; 567 tl = mtod(m1, u_long *); 568 tlen = 0; 569 if (putsize) { 570 *tl++ = txdr_unsigned(siz); 571 m1->m_len -= NFSX_UNSIGNED; 572 tlen = NFSX_UNSIGNED; 573 putsize = 0; 574 } 575 if (siz < m1->m_len) { 576 len = nfsm_rndup(siz); 577 xfer = siz; 578 if (xfer < len) 579 *(tl+(xfer>>2)) = 0; 580 } else { 581 xfer = len = m1->m_len; 582 } 583 bcopy(cp, (caddr_t) tl, xfer); 584 m1->m_len = len+tlen; 585 siz -= xfer; 586 cp += xfer; 587 } 588 *mb = m1; 589 *bpos = mtod(m1, caddr_t)+m1->m_len; 590 return (0); 591} 592 593/* 594 * Called once to initialize data structures... 595 */ 596int 597nfs_init() 598{ 599 register int i; 600 601 nfsrtt.pos = 0; 602 rpc_vers = txdr_unsigned(RPC_VER2); 603 rpc_call = txdr_unsigned(RPC_CALL); 604 rpc_reply = txdr_unsigned(RPC_REPLY); 605 rpc_msgdenied = txdr_unsigned(RPC_MSGDENIED); 606 rpc_msgaccepted = txdr_unsigned(RPC_MSGACCEPTED); 607 rpc_mismatch = txdr_unsigned(RPC_MISMATCH); 608 rpc_autherr = txdr_unsigned(RPC_AUTHERR); 609 rpc_rejectedcred = txdr_unsigned(AUTH_REJECTCRED); 610 rpc_auth_unix = txdr_unsigned(RPCAUTH_UNIX); 611 rpc_auth_kerb = txdr_unsigned(RPCAUTH_NQNFS); 612 nfs_vers = txdr_unsigned(NFS_VER2); 613 nfs_prog = txdr_unsigned(NFS_PROG); 614 nfs_true = txdr_unsigned(TRUE); 615 nfs_false = txdr_unsigned(FALSE); 616 /* Loop thru nfs procids */ 617 for (i = 0; i < NFS_NPROCS; i++) 618 nfs_procids[i] = txdr_unsigned(i); 619 /* Ensure async daemons disabled */ 620 for (i = 0; i < NFS_MAXASYNCDAEMON; i++) 621 nfs_iodwant[i] = (struct proc *)0; 622 TAILQ_INIT(&nfs_bufq); 623 nfs_xdrneg1 = txdr_unsigned(-1); 624 nfs_nhinit(); /* Init the nfsnode table */ 625 nfsrv_init(0); /* Init server data structures */ 626 nfsrv_initcache(); /* Init the server request cache */ 627 628 /* 629 * Initialize the nqnfs server stuff. 630 */ 631 if (nqnfsstarttime == 0) { 632 nqnfsstarttime = boottime.tv_sec + nqsrv_maxlease 633 + nqsrv_clockskew + nqsrv_writeslack; 634 NQLOADNOVRAM(nqnfsstarttime); 635 nqnfs_prog = txdr_unsigned(NQNFS_PROG); 636 nqnfs_vers = txdr_unsigned(NQNFS_VER1); 637 nqthead.th_head[0] = &nqthead; 638 nqthead.th_head[1] = &nqthead; 639 nqfhead = hashinit(NQLCHSZ, M_NQLEASE, &nqfheadhash); 640 } 641 642 /* 643 * Initialize reply list and start timer 644 */ 645 nfsreqh.r_prev = nfsreqh.r_next = &nfsreqh; 646 nfs_timer(); 647
|
| 648 /* 649 * Set up lease_check and lease_updatetime so that other parts 650 * of the system can call us, if we are loadable. 651 */ 652 lease_check = nfs_lease_check; 653 lease_updatetime = nfs_lease_updatetime; 654 vfsconf[MOUNT_NFS]->vfc_refcount++; /* make us non-unloadable */ 655#ifdef VFS_LKM 656 sysent[SYS_nfssvc].sy_narg = 2; 657 sysent[SYS_nfssvc].sy_call = nfssvc; 658 sysent[SYS_getfh].sy_narg = 2; 659 sysent[SYS_getfh].sy_call = getfh; 660#endif 661
|
637 return (0); 638} 639 640/* 641 * Attribute cache routines. 642 * nfs_loadattrcache() - loads or updates the cache contents from attributes 643 * that are on the mbuf list 644 * nfs_getattrcache() - returns valid attributes if found in cache, returns 645 * error otherwise 646 */ 647 648/* 649 * Load the attribute cache (that lives in the nfsnode entry) with 650 * the values on the mbuf list and 651 * Iff vap not NULL 652 * copy the attributes to *vaper 653 */ 654int 655nfs_loadattrcache(vpp, mdp, dposp, vaper) 656 struct vnode **vpp; 657 struct mbuf **mdp; 658 caddr_t *dposp; 659 struct vattr *vaper; 660{ 661 register struct vnode *vp = *vpp; 662 register struct vattr *vap; 663 register struct nfsv2_fattr *fp; 664 extern int (**spec_nfsv2nodeop_p)(); 665 register struct nfsnode *np, *nq, **nhpp; 666 register long t1; 667 caddr_t dpos, cp2; 668 int error = 0, isnq; 669 struct mbuf *md; 670 enum vtype vtyp; 671 u_short vmode; 672 long rdev; 673 struct timespec mtime; 674 struct vnode *nvp; 675 676 md = *mdp; 677 dpos = *dposp; 678 t1 = (mtod(md, caddr_t) + md->m_len) - dpos; 679 isnq = (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS); 680 if (error = nfsm_disct(&md, &dpos, NFSX_FATTR(isnq), t1, &cp2)) 681 return (error); 682 fp = (struct nfsv2_fattr *)cp2; 683 vtyp = nfstov_type(fp->fa_type); 684 vmode = fxdr_unsigned(u_short, fp->fa_mode); 685 if (vtyp == VNON || vtyp == VREG) 686 vtyp = IFTOVT(vmode); 687 if (isnq) { 688 rdev = fxdr_unsigned(long, fp->fa_nqrdev); 689 fxdr_nqtime(&fp->fa_nqmtime, &mtime); 690 } else { 691 rdev = fxdr_unsigned(long, fp->fa_nfsrdev); 692 fxdr_nfstime(&fp->fa_nfsmtime, &mtime); 693 } 694 /* 695 * If v_type == VNON it is a new node, so fill in the v_type, 696 * n_mtime fields. Check to see if it represents a special 697 * device, and if so, check for a possible alias. Once the 698 * correct vnode has been obtained, fill in the rest of the 699 * information. 700 */ 701 np = VTONFS(vp); 702 if (vp->v_type == VNON) { 703 if (vtyp == VCHR && rdev == 0xffffffff) 704 vp->v_type = vtyp = VFIFO; 705 else 706 vp->v_type = vtyp; 707 if (vp->v_type == VFIFO) { 708 extern int (**fifo_nfsv2nodeop_p)(); 709 vp->v_op = fifo_nfsv2nodeop_p; 710 } 711 if (vp->v_type == VCHR || vp->v_type == VBLK) { 712 vp->v_op = spec_nfsv2nodeop_p; 713 if (nvp = checkalias(vp, (dev_t)rdev, vp->v_mount)) { 714 /* 715 * Discard unneeded vnode, but save its nfsnode. 716 */ 717 if (nq = np->n_forw) 718 nq->n_back = np->n_back; 719 *np->n_back = nq; 720 nvp->v_data = vp->v_data; 721 vp->v_data = NULL; 722 vp->v_op = spec_vnodeop_p; 723 vrele(vp); 724 vgone(vp); 725 /* 726 * Reinitialize aliased node. 727 */ 728 np->n_vnode = nvp; 729 nhpp = (struct nfsnode **)nfs_hash(&np->n_fh); 730 if (nq = *nhpp) 731 nq->n_back = &np->n_forw; 732 np->n_forw = nq; 733 np->n_back = nhpp; 734 *nhpp = np; 735 *vpp = vp = nvp; 736 } 737 } 738 np->n_mtime = mtime.ts_sec; 739 } 740 vap = &np->n_vattr; 741 vap->va_type = vtyp; 742 vap->va_mode = (vmode & 07777); 743 vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink); 744 vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid); 745 vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid); 746 vap->va_rdev = (dev_t)rdev; 747 vap->va_mtime = mtime; 748 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 749 if (isnq) { 750 fxdr_hyper(&fp->fa_nqsize, &vap->va_size); 751 vap->va_blocksize = fxdr_unsigned(long, fp->fa_nqblocksize); 752 fxdr_hyper(&fp->fa_nqbytes, &vap->va_bytes); 753 vap->va_fileid = fxdr_unsigned(long, fp->fa_nqfileid); 754 fxdr_nqtime(&fp->fa_nqatime, &vap->va_atime); 755 vap->va_flags = fxdr_unsigned(u_long, fp->fa_nqflags); 756 fxdr_nqtime(&fp->fa_nqctime, &vap->va_ctime); 757 vap->va_gen = fxdr_unsigned(u_long, fp->fa_nqgen); 758 fxdr_hyper(&fp->fa_nqfilerev, &vap->va_filerev); 759 } else { 760 vap->va_size = fxdr_unsigned(u_long, fp->fa_nfssize); 761 vap->va_blocksize = fxdr_unsigned(long, fp->fa_nfsblocksize); 762 vap->va_bytes = fxdr_unsigned(long, fp->fa_nfsblocks) * NFS_FABLKSIZE; 763 vap->va_fileid = fxdr_unsigned(long, fp->fa_nfsfileid); 764 fxdr_nfstime(&fp->fa_nfsatime, &vap->va_atime); 765 vap->va_flags = 0; 766 vap->va_ctime.ts_sec = fxdr_unsigned(long, fp->fa_nfsctime.nfs_sec); 767 vap->va_ctime.ts_nsec = 0; 768 vap->va_gen = fxdr_unsigned(u_long, fp->fa_nfsctime.nfs_usec); 769 vap->va_filerev = 0; 770 } 771 if (vap->va_size != np->n_size) { 772 if (vap->va_type == VREG) { 773 if (np->n_flag & NMODIFIED) { 774 if (vap->va_size < np->n_size) 775 vap->va_size = np->n_size; 776 else 777 np->n_size = vap->va_size; 778 } else 779 np->n_size = vap->va_size; 780 vnode_pager_setsize(vp, (u_long)np->n_size); 781 } else 782 np->n_size = vap->va_size; 783 } 784 np->n_attrstamp = time.tv_sec; 785 *dposp = dpos; 786 *mdp = md; 787 if (vaper != NULL) { 788 bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(*vap)); 789#ifdef notdef 790 if ((np->n_flag & NMODIFIED) && np->n_size > vap->va_size) 791 if (np->n_size > vap->va_size) 792 vaper->va_size = np->n_size; 793#endif 794 if (np->n_flag & NCHG) { 795 if (np->n_flag & NACC) { 796 vaper->va_atime.ts_sec = np->n_atim.tv_sec; 797 vaper->va_atime.ts_nsec = 798 np->n_atim.tv_usec * 1000; 799 } 800 if (np->n_flag & NUPD) { 801 vaper->va_mtime.ts_sec = np->n_mtim.tv_sec; 802 vaper->va_mtime.ts_nsec = 803 np->n_mtim.tv_usec * 1000; 804 } 805 } 806 } 807 return (0); 808} 809 810/* 811 * Check the time stamp 812 * If the cache is valid, copy contents to *vap and return 0 813 * otherwise return an error 814 */ 815int 816nfs_getattrcache(vp, vaper) 817 register struct vnode *vp; 818 struct vattr *vaper; 819{ 820 register struct nfsnode *np = VTONFS(vp); 821 register struct vattr *vap; 822 823 if (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQLOOKLEASE) { 824 if (!NQNFS_CKCACHABLE(vp, NQL_READ) || np->n_attrstamp == 0) { 825 nfsstats.attrcache_misses++; 826 return (ENOENT); 827 } 828 } else if ((time.tv_sec - np->n_attrstamp) >= NFS_ATTRTIMEO(np)) { 829 nfsstats.attrcache_misses++; 830 return (ENOENT); 831 } 832 nfsstats.attrcache_hits++; 833 vap = &np->n_vattr; 834 if (vap->va_size != np->n_size) { 835 if (vap->va_type == VREG) { 836 if (np->n_flag & NMODIFIED) { 837 if (vap->va_size < np->n_size) 838 vap->va_size = np->n_size; 839 else 840 np->n_size = vap->va_size; 841 } else 842 np->n_size = vap->va_size; 843 vnode_pager_setsize(vp, (u_long)np->n_size); 844 } else 845 np->n_size = vap->va_size; 846 } 847 bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(struct vattr)); 848#ifdef notdef 849 if ((np->n_flag & NMODIFIED) == 0) { 850 np->n_size = vaper->va_size; 851 vnode_pager_setsize(vp, (u_long)np->n_size); 852 } else if (np->n_size > vaper->va_size) 853 if (np->n_size > vaper->va_size) 854 vaper->va_size = np->n_size; 855#endif 856 if (np->n_flag & NCHG) { 857 if (np->n_flag & NACC) { 858 vaper->va_atime.ts_sec = np->n_atim.tv_sec; 859 vaper->va_atime.ts_nsec = np->n_atim.tv_usec * 1000; 860 } 861 if (np->n_flag & NUPD) { 862 vaper->va_mtime.ts_sec = np->n_mtim.tv_sec; 863 vaper->va_mtime.ts_nsec = np->n_mtim.tv_usec * 1000; 864 } 865 } 866 return (0); 867} 868 869/* 870 * Set up nameidata for a lookup() call and do it 871 */ 872int 873nfs_namei(ndp, fhp, len, slp, nam, mdp, dposp, p) 874 register struct nameidata *ndp; 875 fhandle_t *fhp; 876 int len; 877 struct nfssvc_sock *slp; 878 struct mbuf *nam; 879 struct mbuf **mdp; 880 caddr_t *dposp; 881 struct proc *p; 882{ 883 register int i, rem; 884 register struct mbuf *md; 885 register char *fromcp, *tocp; 886 struct vnode *dp; 887 int error, rdonly; 888 struct componentname *cnp = &ndp->ni_cnd; 889 890 MALLOC(cnp->cn_pnbuf, char *, len + 1, M_NAMEI, M_WAITOK); 891 /* 892 * Copy the name from the mbuf list to ndp->ni_pnbuf 893 * and set the various ndp fields appropriately. 894 */ 895 fromcp = *dposp; 896 tocp = cnp->cn_pnbuf; 897 md = *mdp; 898 rem = mtod(md, caddr_t) + md->m_len - fromcp; 899 cnp->cn_hash = 0; 900 for (i = 0; i < len; i++) { 901 while (rem == 0) { 902 md = md->m_next; 903 if (md == NULL) { 904 error = EBADRPC; 905 goto out; 906 } 907 fromcp = mtod(md, caddr_t); 908 rem = md->m_len; 909 } 910 if (*fromcp == '\0' || *fromcp == '/') { 911 error = EINVAL; 912 goto out; 913 } 914 cnp->cn_hash += (unsigned char)*fromcp; 915 *tocp++ = *fromcp++; 916 rem--; 917 } 918 *tocp = '\0'; 919 *mdp = md; 920 *dposp = fromcp; 921 len = nfsm_rndup(len)-len; 922 if (len > 0) { 923 if (rem >= len) 924 *dposp += len; 925 else if (error = nfs_adv(mdp, dposp, len, rem)) 926 goto out; 927 } 928 ndp->ni_pathlen = tocp - cnp->cn_pnbuf; 929 cnp->cn_nameptr = cnp->cn_pnbuf; 930 /* 931 * Extract and set starting directory. 932 */ 933 if (error = nfsrv_fhtovp(fhp, FALSE, &dp, ndp->ni_cnd.cn_cred, slp, 934 nam, &rdonly)) 935 goto out; 936 if (dp->v_type != VDIR) { 937 vrele(dp); 938 error = ENOTDIR; 939 goto out; 940 } 941 ndp->ni_startdir = dp; 942 if (rdonly) 943 cnp->cn_flags |= (NOCROSSMOUNT | RDONLY); 944 else 945 cnp->cn_flags |= NOCROSSMOUNT; 946 /* 947 * And call lookup() to do the real work 948 */ 949 cnp->cn_proc = p; 950 if (error = lookup(ndp)) 951 goto out; 952 /* 953 * Check for encountering a symbolic link 954 */ 955 if (cnp->cn_flags & ISSYMLINK) { 956 if ((cnp->cn_flags & LOCKPARENT) && ndp->ni_pathlen == 1) 957 vput(ndp->ni_dvp); 958 else 959 vrele(ndp->ni_dvp); 960 vput(ndp->ni_vp); 961 ndp->ni_vp = NULL; 962 error = EINVAL; 963 goto out; 964 } 965 /* 966 * Check for saved name request 967 */ 968 if (cnp->cn_flags & (SAVENAME | SAVESTART)) { 969 cnp->cn_flags |= HASBUF; 970 return (0); 971 } 972out: 973 FREE(cnp->cn_pnbuf, M_NAMEI); 974 return (error); 975} 976 977/* 978 * A fiddled version of m_adj() that ensures null fill to a long 979 * boundary and only trims off the back end 980 */ 981void 982nfsm_adj(mp, len, nul) 983 struct mbuf *mp; 984 register int len; 985 int nul; 986{ 987 register struct mbuf *m; 988 register int count, i; 989 register char *cp; 990 991 /* 992 * Trim from tail. Scan the mbuf chain, 993 * calculating its length and finding the last mbuf. 994 * If the adjustment only affects this mbuf, then just 995 * adjust and return. Otherwise, rescan and truncate 996 * after the remaining size. 997 */ 998 count = 0; 999 m = mp; 1000 for (;;) { 1001 count += m->m_len; 1002 if (m->m_next == (struct mbuf *)0) 1003 break; 1004 m = m->m_next; 1005 } 1006 if (m->m_len > len) { 1007 m->m_len -= len; 1008 if (nul > 0) { 1009 cp = mtod(m, caddr_t)+m->m_len-nul; 1010 for (i = 0; i < nul; i++) 1011 *cp++ = '\0'; 1012 } 1013 return; 1014 } 1015 count -= len; 1016 if (count < 0) 1017 count = 0; 1018 /* 1019 * Correct length for chain is "count". 1020 * Find the mbuf with last data, adjust its length, 1021 * and toss data from remaining mbufs on chain. 1022 */ 1023 for (m = mp; m; m = m->m_next) { 1024 if (m->m_len >= count) { 1025 m->m_len = count; 1026 if (nul > 0) { 1027 cp = mtod(m, caddr_t)+m->m_len-nul; 1028 for (i = 0; i < nul; i++) 1029 *cp++ = '\0'; 1030 } 1031 break; 1032 } 1033 count -= m->m_len; 1034 } 1035 while (m = m->m_next) 1036 m->m_len = 0; 1037} 1038 1039/* 1040 * nfsrv_fhtovp() - convert a fh to a vnode ptr (optionally locked) 1041 * - look up fsid in mount list (if not found ret error) 1042 * - get vp and export rights by calling VFS_FHTOVP() 1043 * - if cred->cr_uid == 0 or MNT_EXPORTANON set it to credanon 1044 * - if not lockflag unlock it with VOP_UNLOCK() 1045 */ 1046int 1047nfsrv_fhtovp(fhp, lockflag, vpp, cred, slp, nam, rdonlyp) 1048 fhandle_t *fhp; 1049 int lockflag; 1050 struct vnode **vpp; 1051 struct ucred *cred; 1052 struct nfssvc_sock *slp; 1053 struct mbuf *nam; 1054 int *rdonlyp; 1055{ 1056 register struct mount *mp; 1057 register struct nfsuid *uidp; 1058 register int i; 1059 struct ucred *credanon; 1060 int error, exflags; 1061 1062 *vpp = (struct vnode *)0; 1063 if ((mp = getvfs(&fhp->fh_fsid)) == NULL) 1064 return (ESTALE); 1065 if (error = VFS_FHTOVP(mp, &fhp->fh_fid, nam, vpp, &exflags, &credanon)) 1066 return (error); 1067 /* 1068 * Check/setup credentials. 1069 */ 1070 if (exflags & MNT_EXKERB) { 1071 uidp = slp->ns_uidh[NUIDHASH(cred->cr_uid)]; 1072 while (uidp) { 1073 if (uidp->nu_uid == cred->cr_uid) 1074 break; 1075 uidp = uidp->nu_hnext; 1076 } 1077 if (uidp) { 1078 cred->cr_uid = uidp->nu_cr.cr_uid; 1079 for (i = 0; i < uidp->nu_cr.cr_ngroups; i++) 1080 cred->cr_groups[i] = uidp->nu_cr.cr_groups[i]; 1081 } else { 1082 vput(*vpp); 1083 return (NQNFS_AUTHERR); 1084 } 1085 } else if (cred->cr_uid == 0 || (exflags & MNT_EXPORTANON)) { 1086 cred->cr_uid = credanon->cr_uid; 1087 for (i = 0; i < credanon->cr_ngroups && i < NGROUPS; i++) 1088 cred->cr_groups[i] = credanon->cr_groups[i]; 1089 } 1090 if (exflags & MNT_EXRDONLY) 1091 *rdonlyp = 1; 1092 else 1093 *rdonlyp = 0; 1094 if (!lockflag) 1095 VOP_UNLOCK(*vpp); 1096 return (0); 1097} 1098 1099/* 1100 * This function compares two net addresses by family and returns TRUE 1101 * if they are the same host. 1102 * If there is any doubt, return FALSE. 1103 * The AF_INET family is handled as a special case so that address mbufs 1104 * don't need to be saved to store "struct in_addr", which is only 4 bytes. 1105 */ 1106int 1107netaddr_match(family, haddr, nam) 1108 int family; 1109 union nethostaddr *haddr; 1110 struct mbuf *nam; 1111{ 1112 register struct sockaddr_in *inetaddr; 1113 1114 switch (family) { 1115 case AF_INET: 1116 inetaddr = mtod(nam, struct sockaddr_in *); 1117 if (inetaddr->sin_family == AF_INET && 1118 inetaddr->sin_addr.s_addr == haddr->had_inetaddr) 1119 return (1); 1120 break; 1121#ifdef ISO 1122 case AF_ISO: 1123 { 1124 register struct sockaddr_iso *isoaddr1, *isoaddr2; 1125 1126 isoaddr1 = mtod(nam, struct sockaddr_iso *); 1127 isoaddr2 = mtod(haddr->had_nam, struct sockaddr_iso *); 1128 if (isoaddr1->siso_family == AF_ISO && 1129 isoaddr1->siso_nlen > 0 && 1130 isoaddr1->siso_nlen == isoaddr2->siso_nlen && 1131 SAME_ISOADDR(isoaddr1, isoaddr2)) 1132 return (1); 1133 break; 1134 } 1135#endif /* ISO */ 1136 default: 1137 break; 1138 }; 1139 return (0); 1140}
| 662 return (0); 663} 664 665/* 666 * Attribute cache routines. 667 * nfs_loadattrcache() - loads or updates the cache contents from attributes 668 * that are on the mbuf list 669 * nfs_getattrcache() - returns valid attributes if found in cache, returns 670 * error otherwise 671 */ 672 673/* 674 * Load the attribute cache (that lives in the nfsnode entry) with 675 * the values on the mbuf list and 676 * Iff vap not NULL 677 * copy the attributes to *vaper 678 */ 679int 680nfs_loadattrcache(vpp, mdp, dposp, vaper) 681 struct vnode **vpp; 682 struct mbuf **mdp; 683 caddr_t *dposp; 684 struct vattr *vaper; 685{ 686 register struct vnode *vp = *vpp; 687 register struct vattr *vap; 688 register struct nfsv2_fattr *fp; 689 extern int (**spec_nfsv2nodeop_p)(); 690 register struct nfsnode *np, *nq, **nhpp; 691 register long t1; 692 caddr_t dpos, cp2; 693 int error = 0, isnq; 694 struct mbuf *md; 695 enum vtype vtyp; 696 u_short vmode; 697 long rdev; 698 struct timespec mtime; 699 struct vnode *nvp; 700 701 md = *mdp; 702 dpos = *dposp; 703 t1 = (mtod(md, caddr_t) + md->m_len) - dpos; 704 isnq = (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS); 705 if (error = nfsm_disct(&md, &dpos, NFSX_FATTR(isnq), t1, &cp2)) 706 return (error); 707 fp = (struct nfsv2_fattr *)cp2; 708 vtyp = nfstov_type(fp->fa_type); 709 vmode = fxdr_unsigned(u_short, fp->fa_mode); 710 if (vtyp == VNON || vtyp == VREG) 711 vtyp = IFTOVT(vmode); 712 if (isnq) { 713 rdev = fxdr_unsigned(long, fp->fa_nqrdev); 714 fxdr_nqtime(&fp->fa_nqmtime, &mtime); 715 } else { 716 rdev = fxdr_unsigned(long, fp->fa_nfsrdev); 717 fxdr_nfstime(&fp->fa_nfsmtime, &mtime); 718 } 719 /* 720 * If v_type == VNON it is a new node, so fill in the v_type, 721 * n_mtime fields. Check to see if it represents a special 722 * device, and if so, check for a possible alias. Once the 723 * correct vnode has been obtained, fill in the rest of the 724 * information. 725 */ 726 np = VTONFS(vp); 727 if (vp->v_type == VNON) { 728 if (vtyp == VCHR && rdev == 0xffffffff) 729 vp->v_type = vtyp = VFIFO; 730 else 731 vp->v_type = vtyp; 732 if (vp->v_type == VFIFO) { 733 extern int (**fifo_nfsv2nodeop_p)(); 734 vp->v_op = fifo_nfsv2nodeop_p; 735 } 736 if (vp->v_type == VCHR || vp->v_type == VBLK) { 737 vp->v_op = spec_nfsv2nodeop_p; 738 if (nvp = checkalias(vp, (dev_t)rdev, vp->v_mount)) { 739 /* 740 * Discard unneeded vnode, but save its nfsnode. 741 */ 742 if (nq = np->n_forw) 743 nq->n_back = np->n_back; 744 *np->n_back = nq; 745 nvp->v_data = vp->v_data; 746 vp->v_data = NULL; 747 vp->v_op = spec_vnodeop_p; 748 vrele(vp); 749 vgone(vp); 750 /* 751 * Reinitialize aliased node. 752 */ 753 np->n_vnode = nvp; 754 nhpp = (struct nfsnode **)nfs_hash(&np->n_fh); 755 if (nq = *nhpp) 756 nq->n_back = &np->n_forw; 757 np->n_forw = nq; 758 np->n_back = nhpp; 759 *nhpp = np; 760 *vpp = vp = nvp; 761 } 762 } 763 np->n_mtime = mtime.ts_sec; 764 } 765 vap = &np->n_vattr; 766 vap->va_type = vtyp; 767 vap->va_mode = (vmode & 07777); 768 vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink); 769 vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid); 770 vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid); 771 vap->va_rdev = (dev_t)rdev; 772 vap->va_mtime = mtime; 773 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 774 if (isnq) { 775 fxdr_hyper(&fp->fa_nqsize, &vap->va_size); 776 vap->va_blocksize = fxdr_unsigned(long, fp->fa_nqblocksize); 777 fxdr_hyper(&fp->fa_nqbytes, &vap->va_bytes); 778 vap->va_fileid = fxdr_unsigned(long, fp->fa_nqfileid); 779 fxdr_nqtime(&fp->fa_nqatime, &vap->va_atime); 780 vap->va_flags = fxdr_unsigned(u_long, fp->fa_nqflags); 781 fxdr_nqtime(&fp->fa_nqctime, &vap->va_ctime); 782 vap->va_gen = fxdr_unsigned(u_long, fp->fa_nqgen); 783 fxdr_hyper(&fp->fa_nqfilerev, &vap->va_filerev); 784 } else { 785 vap->va_size = fxdr_unsigned(u_long, fp->fa_nfssize); 786 vap->va_blocksize = fxdr_unsigned(long, fp->fa_nfsblocksize); 787 vap->va_bytes = fxdr_unsigned(long, fp->fa_nfsblocks) * NFS_FABLKSIZE; 788 vap->va_fileid = fxdr_unsigned(long, fp->fa_nfsfileid); 789 fxdr_nfstime(&fp->fa_nfsatime, &vap->va_atime); 790 vap->va_flags = 0; 791 vap->va_ctime.ts_sec = fxdr_unsigned(long, fp->fa_nfsctime.nfs_sec); 792 vap->va_ctime.ts_nsec = 0; 793 vap->va_gen = fxdr_unsigned(u_long, fp->fa_nfsctime.nfs_usec); 794 vap->va_filerev = 0; 795 } 796 if (vap->va_size != np->n_size) { 797 if (vap->va_type == VREG) { 798 if (np->n_flag & NMODIFIED) { 799 if (vap->va_size < np->n_size) 800 vap->va_size = np->n_size; 801 else 802 np->n_size = vap->va_size; 803 } else 804 np->n_size = vap->va_size; 805 vnode_pager_setsize(vp, (u_long)np->n_size); 806 } else 807 np->n_size = vap->va_size; 808 } 809 np->n_attrstamp = time.tv_sec; 810 *dposp = dpos; 811 *mdp = md; 812 if (vaper != NULL) { 813 bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(*vap)); 814#ifdef notdef 815 if ((np->n_flag & NMODIFIED) && np->n_size > vap->va_size) 816 if (np->n_size > vap->va_size) 817 vaper->va_size = np->n_size; 818#endif 819 if (np->n_flag & NCHG) { 820 if (np->n_flag & NACC) { 821 vaper->va_atime.ts_sec = np->n_atim.tv_sec; 822 vaper->va_atime.ts_nsec = 823 np->n_atim.tv_usec * 1000; 824 } 825 if (np->n_flag & NUPD) { 826 vaper->va_mtime.ts_sec = np->n_mtim.tv_sec; 827 vaper->va_mtime.ts_nsec = 828 np->n_mtim.tv_usec * 1000; 829 } 830 } 831 } 832 return (0); 833} 834 835/* 836 * Check the time stamp 837 * If the cache is valid, copy contents to *vap and return 0 838 * otherwise return an error 839 */ 840int 841nfs_getattrcache(vp, vaper) 842 register struct vnode *vp; 843 struct vattr *vaper; 844{ 845 register struct nfsnode *np = VTONFS(vp); 846 register struct vattr *vap; 847 848 if (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQLOOKLEASE) { 849 if (!NQNFS_CKCACHABLE(vp, NQL_READ) || np->n_attrstamp == 0) { 850 nfsstats.attrcache_misses++; 851 return (ENOENT); 852 } 853 } else if ((time.tv_sec - np->n_attrstamp) >= NFS_ATTRTIMEO(np)) { 854 nfsstats.attrcache_misses++; 855 return (ENOENT); 856 } 857 nfsstats.attrcache_hits++; 858 vap = &np->n_vattr; 859 if (vap->va_size != np->n_size) { 860 if (vap->va_type == VREG) { 861 if (np->n_flag & NMODIFIED) { 862 if (vap->va_size < np->n_size) 863 vap->va_size = np->n_size; 864 else 865 np->n_size = vap->va_size; 866 } else 867 np->n_size = vap->va_size; 868 vnode_pager_setsize(vp, (u_long)np->n_size); 869 } else 870 np->n_size = vap->va_size; 871 } 872 bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(struct vattr)); 873#ifdef notdef 874 if ((np->n_flag & NMODIFIED) == 0) { 875 np->n_size = vaper->va_size; 876 vnode_pager_setsize(vp, (u_long)np->n_size); 877 } else if (np->n_size > vaper->va_size) 878 if (np->n_size > vaper->va_size) 879 vaper->va_size = np->n_size; 880#endif 881 if (np->n_flag & NCHG) { 882 if (np->n_flag & NACC) { 883 vaper->va_atime.ts_sec = np->n_atim.tv_sec; 884 vaper->va_atime.ts_nsec = np->n_atim.tv_usec * 1000; 885 } 886 if (np->n_flag & NUPD) { 887 vaper->va_mtime.ts_sec = np->n_mtim.tv_sec; 888 vaper->va_mtime.ts_nsec = np->n_mtim.tv_usec * 1000; 889 } 890 } 891 return (0); 892} 893 894/* 895 * Set up nameidata for a lookup() call and do it 896 */ 897int 898nfs_namei(ndp, fhp, len, slp, nam, mdp, dposp, p) 899 register struct nameidata *ndp; 900 fhandle_t *fhp; 901 int len; 902 struct nfssvc_sock *slp; 903 struct mbuf *nam; 904 struct mbuf **mdp; 905 caddr_t *dposp; 906 struct proc *p; 907{ 908 register int i, rem; 909 register struct mbuf *md; 910 register char *fromcp, *tocp; 911 struct vnode *dp; 912 int error, rdonly; 913 struct componentname *cnp = &ndp->ni_cnd; 914 915 MALLOC(cnp->cn_pnbuf, char *, len + 1, M_NAMEI, M_WAITOK); 916 /* 917 * Copy the name from the mbuf list to ndp->ni_pnbuf 918 * and set the various ndp fields appropriately. 919 */ 920 fromcp = *dposp; 921 tocp = cnp->cn_pnbuf; 922 md = *mdp; 923 rem = mtod(md, caddr_t) + md->m_len - fromcp; 924 cnp->cn_hash = 0; 925 for (i = 0; i < len; i++) { 926 while (rem == 0) { 927 md = md->m_next; 928 if (md == NULL) { 929 error = EBADRPC; 930 goto out; 931 } 932 fromcp = mtod(md, caddr_t); 933 rem = md->m_len; 934 } 935 if (*fromcp == '\0' || *fromcp == '/') { 936 error = EINVAL; 937 goto out; 938 } 939 cnp->cn_hash += (unsigned char)*fromcp; 940 *tocp++ = *fromcp++; 941 rem--; 942 } 943 *tocp = '\0'; 944 *mdp = md; 945 *dposp = fromcp; 946 len = nfsm_rndup(len)-len; 947 if (len > 0) { 948 if (rem >= len) 949 *dposp += len; 950 else if (error = nfs_adv(mdp, dposp, len, rem)) 951 goto out; 952 } 953 ndp->ni_pathlen = tocp - cnp->cn_pnbuf; 954 cnp->cn_nameptr = cnp->cn_pnbuf; 955 /* 956 * Extract and set starting directory. 957 */ 958 if (error = nfsrv_fhtovp(fhp, FALSE, &dp, ndp->ni_cnd.cn_cred, slp, 959 nam, &rdonly)) 960 goto out; 961 if (dp->v_type != VDIR) { 962 vrele(dp); 963 error = ENOTDIR; 964 goto out; 965 } 966 ndp->ni_startdir = dp; 967 if (rdonly) 968 cnp->cn_flags |= (NOCROSSMOUNT | RDONLY); 969 else 970 cnp->cn_flags |= NOCROSSMOUNT; 971 /* 972 * And call lookup() to do the real work 973 */ 974 cnp->cn_proc = p; 975 if (error = lookup(ndp)) 976 goto out; 977 /* 978 * Check for encountering a symbolic link 979 */ 980 if (cnp->cn_flags & ISSYMLINK) { 981 if ((cnp->cn_flags & LOCKPARENT) && ndp->ni_pathlen == 1) 982 vput(ndp->ni_dvp); 983 else 984 vrele(ndp->ni_dvp); 985 vput(ndp->ni_vp); 986 ndp->ni_vp = NULL; 987 error = EINVAL; 988 goto out; 989 } 990 /* 991 * Check for saved name request 992 */ 993 if (cnp->cn_flags & (SAVENAME | SAVESTART)) { 994 cnp->cn_flags |= HASBUF; 995 return (0); 996 } 997out: 998 FREE(cnp->cn_pnbuf, M_NAMEI); 999 return (error); 1000} 1001 1002/* 1003 * A fiddled version of m_adj() that ensures null fill to a long 1004 * boundary and only trims off the back end 1005 */ 1006void 1007nfsm_adj(mp, len, nul) 1008 struct mbuf *mp; 1009 register int len; 1010 int nul; 1011{ 1012 register struct mbuf *m; 1013 register int count, i; 1014 register char *cp; 1015 1016 /* 1017 * Trim from tail. Scan the mbuf chain, 1018 * calculating its length and finding the last mbuf. 1019 * If the adjustment only affects this mbuf, then just 1020 * adjust and return. Otherwise, rescan and truncate 1021 * after the remaining size. 1022 */ 1023 count = 0; 1024 m = mp; 1025 for (;;) { 1026 count += m->m_len; 1027 if (m->m_next == (struct mbuf *)0) 1028 break; 1029 m = m->m_next; 1030 } 1031 if (m->m_len > len) { 1032 m->m_len -= len; 1033 if (nul > 0) { 1034 cp = mtod(m, caddr_t)+m->m_len-nul; 1035 for (i = 0; i < nul; i++) 1036 *cp++ = '\0'; 1037 } 1038 return; 1039 } 1040 count -= len; 1041 if (count < 0) 1042 count = 0; 1043 /* 1044 * Correct length for chain is "count". 1045 * Find the mbuf with last data, adjust its length, 1046 * and toss data from remaining mbufs on chain. 1047 */ 1048 for (m = mp; m; m = m->m_next) { 1049 if (m->m_len >= count) { 1050 m->m_len = count; 1051 if (nul > 0) { 1052 cp = mtod(m, caddr_t)+m->m_len-nul; 1053 for (i = 0; i < nul; i++) 1054 *cp++ = '\0'; 1055 } 1056 break; 1057 } 1058 count -= m->m_len; 1059 } 1060 while (m = m->m_next) 1061 m->m_len = 0; 1062} 1063 1064/* 1065 * nfsrv_fhtovp() - convert a fh to a vnode ptr (optionally locked) 1066 * - look up fsid in mount list (if not found ret error) 1067 * - get vp and export rights by calling VFS_FHTOVP() 1068 * - if cred->cr_uid == 0 or MNT_EXPORTANON set it to credanon 1069 * - if not lockflag unlock it with VOP_UNLOCK() 1070 */ 1071int 1072nfsrv_fhtovp(fhp, lockflag, vpp, cred, slp, nam, rdonlyp) 1073 fhandle_t *fhp; 1074 int lockflag; 1075 struct vnode **vpp; 1076 struct ucred *cred; 1077 struct nfssvc_sock *slp; 1078 struct mbuf *nam; 1079 int *rdonlyp; 1080{ 1081 register struct mount *mp; 1082 register struct nfsuid *uidp; 1083 register int i; 1084 struct ucred *credanon; 1085 int error, exflags; 1086 1087 *vpp = (struct vnode *)0; 1088 if ((mp = getvfs(&fhp->fh_fsid)) == NULL) 1089 return (ESTALE); 1090 if (error = VFS_FHTOVP(mp, &fhp->fh_fid, nam, vpp, &exflags, &credanon)) 1091 return (error); 1092 /* 1093 * Check/setup credentials. 1094 */ 1095 if (exflags & MNT_EXKERB) { 1096 uidp = slp->ns_uidh[NUIDHASH(cred->cr_uid)]; 1097 while (uidp) { 1098 if (uidp->nu_uid == cred->cr_uid) 1099 break; 1100 uidp = uidp->nu_hnext; 1101 } 1102 if (uidp) { 1103 cred->cr_uid = uidp->nu_cr.cr_uid; 1104 for (i = 0; i < uidp->nu_cr.cr_ngroups; i++) 1105 cred->cr_groups[i] = uidp->nu_cr.cr_groups[i]; 1106 } else { 1107 vput(*vpp); 1108 return (NQNFS_AUTHERR); 1109 } 1110 } else if (cred->cr_uid == 0 || (exflags & MNT_EXPORTANON)) { 1111 cred->cr_uid = credanon->cr_uid; 1112 for (i = 0; i < credanon->cr_ngroups && i < NGROUPS; i++) 1113 cred->cr_groups[i] = credanon->cr_groups[i]; 1114 } 1115 if (exflags & MNT_EXRDONLY) 1116 *rdonlyp = 1; 1117 else 1118 *rdonlyp = 0; 1119 if (!lockflag) 1120 VOP_UNLOCK(*vpp); 1121 return (0); 1122} 1123 1124/* 1125 * This function compares two net addresses by family and returns TRUE 1126 * if they are the same host. 1127 * If there is any doubt, return FALSE. 1128 * The AF_INET family is handled as a special case so that address mbufs 1129 * don't need to be saved to store "struct in_addr", which is only 4 bytes. 1130 */ 1131int 1132netaddr_match(family, haddr, nam) 1133 int family; 1134 union nethostaddr *haddr; 1135 struct mbuf *nam; 1136{ 1137 register struct sockaddr_in *inetaddr; 1138 1139 switch (family) { 1140 case AF_INET: 1141 inetaddr = mtod(nam, struct sockaddr_in *); 1142 if (inetaddr->sin_family == AF_INET && 1143 inetaddr->sin_addr.s_addr == haddr->had_inetaddr) 1144 return (1); 1145 break; 1146#ifdef ISO 1147 case AF_ISO: 1148 { 1149 register struct sockaddr_iso *isoaddr1, *isoaddr2; 1150 1151 isoaddr1 = mtod(nam, struct sockaddr_iso *); 1152 isoaddr2 = mtod(haddr->had_nam, struct sockaddr_iso *); 1153 if (isoaddr1->siso_family == AF_ISO && 1154 isoaddr1->siso_nlen > 0 && 1155 isoaddr1->siso_nlen == isoaddr2->siso_nlen && 1156 SAME_ISOADDR(isoaddr1, isoaddr2)) 1157 return (1); 1158 break; 1159 } 1160#endif /* ISO */ 1161 default: 1162 break; 1163 }; 1164 return (0); 1165}
|