ufs_quota.c revision 111748
1/* 2 * Copyright (c) 1982, 1986, 1990, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Robert Elz at The University of Melbourne. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)ufs_quota.c 8.5 (Berkeley) 5/20/95 37 * $FreeBSD: head/sys/ufs/ufs/ufs_quota.c 111748 2003-03-02 16:54:40Z des $ 38 */ 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/fcntl.h> 43#include <sys/kernel.h> 44#include <sys/lock.h> 45#include <sys/malloc.h> 46#include <sys/mount.h> 47#include <sys/mutex.h> 48#include <sys/namei.h> 49#include <sys/proc.h> 50#include <sys/socket.h> 51#include <sys/vnode.h> 52 53#include <ufs/ufs/extattr.h> 54#include <ufs/ufs/quota.h> 55#include <ufs/ufs/inode.h> 56#include <ufs/ufs/ufsmount.h> 57#include <ufs/ufs/ufs_extern.h> 58 59static MALLOC_DEFINE(M_DQUOT, "UFS quota", "UFS quota entries"); 60 61/* 62 * Quota name to error message mapping. 63 */ 64static char *quotatypes[] = INITQFNAMES; 65 66static int chkdqchg(struct inode *, ufs2_daddr_t, struct ucred *, int); 67static int chkiqchg(struct inode *, ino_t, struct ucred *, int); 68static int dqget(struct vnode *, 69 u_long, struct ufsmount *, int, struct dquot **); 70static int dqsync(struct vnode *, struct dquot *); 71static void dqflush(struct vnode *); 72 73#ifdef DIAGNOSTIC 74static void dqref(struct dquot *); 75static void chkdquot(struct inode *); 76#endif 77 78/* 79 * Set up the quotas for an inode. 80 * 81 * This routine completely defines the semantics of quotas. 82 * If other criterion want to be used to establish quotas, the 83 * MAXQUOTAS value in quotas.h should be increased, and the 84 * additional dquots set up here. 85 */ 86int 87getinoquota(ip) 88 struct inode *ip; 89{ 90 struct ufsmount *ump; 91 struct vnode *vp = ITOV(ip); 92 int error; 93 94 ump = VFSTOUFS(vp->v_mount); 95 /* 96 * Set up the user quota based on file uid. 97 * EINVAL means that quotas are not enabled. 98 */ 99 if (ip->i_dquot[USRQUOTA] == NODQUOT && 100 (error = 101 dqget(vp, ip->i_uid, ump, USRQUOTA, &ip->i_dquot[USRQUOTA])) && 102 error != EINVAL) 103 return (error); 104 /* 105 * Set up the group quota based on file gid. 106 * EINVAL means that quotas are not enabled. 107 */ 108 if (ip->i_dquot[GRPQUOTA] == NODQUOT && 109 (error = 110 dqget(vp, ip->i_gid, ump, GRPQUOTA, &ip->i_dquot[GRPQUOTA])) && 111 error != EINVAL) 112 return (error); 113 return (0); 114} 115 116/* 117 * Update disk usage, and take corrective action. 118 */ 119int 120chkdq(ip, change, cred, flags) 121 struct inode *ip; 122 ufs2_daddr_t change; 123 struct ucred *cred; 124 int flags; 125{ 126 struct dquot *dq; 127 ufs2_daddr_t ncurblocks; 128 int i, error; 129 130#ifdef DIAGNOSTIC 131 if ((flags & CHOWN) == 0) 132 chkdquot(ip); 133#endif 134 if (change == 0) 135 return (0); 136 if (change < 0) { 137 for (i = 0; i < MAXQUOTAS; i++) { 138 if ((dq = ip->i_dquot[i]) == NODQUOT) 139 continue; 140 while (dq->dq_flags & DQ_LOCK) { 141 dq->dq_flags |= DQ_WANT; 142 (void) tsleep(dq, PINOD+1, "chkdq1", 0); 143 } 144 ncurblocks = dq->dq_curblocks + change; 145 if (ncurblocks >= 0) 146 dq->dq_curblocks = ncurblocks; 147 else 148 dq->dq_curblocks = 0; 149 dq->dq_flags &= ~DQ_BLKS; 150 dq->dq_flags |= DQ_MOD; 151 } 152 return (0); 153 } 154 if ((flags & FORCE) == 0 && suser_cred(cred, 0)) { 155 for (i = 0; i < MAXQUOTAS; i++) { 156 if ((dq = ip->i_dquot[i]) == NODQUOT) 157 continue; 158 error = chkdqchg(ip, change, cred, i); 159 if (error) 160 return (error); 161 } 162 } 163 for (i = 0; i < MAXQUOTAS; i++) { 164 if ((dq = ip->i_dquot[i]) == NODQUOT) 165 continue; 166 while (dq->dq_flags & DQ_LOCK) { 167 dq->dq_flags |= DQ_WANT; 168 (void) tsleep(dq, PINOD+1, "chkdq2", 0); 169 } 170 /* Reset timer when crossing soft limit */ 171 if (dq->dq_curblocks + change >= dq->dq_bsoftlimit && 172 dq->dq_curblocks < dq->dq_bsoftlimit) 173 dq->dq_btime = time_second + 174 VFSTOUFS(ITOV(ip)->v_mount)->um_btime[i]; 175 dq->dq_curblocks += change; 176 dq->dq_flags |= DQ_MOD; 177 } 178 return (0); 179} 180 181/* 182 * Check for a valid change to a users allocation. 183 * Issue an error message if appropriate. 184 */ 185static int 186chkdqchg(ip, change, cred, type) 187 struct inode *ip; 188 ufs2_daddr_t change; 189 struct ucred *cred; 190 int type; 191{ 192 struct dquot *dq = ip->i_dquot[type]; 193 ufs2_daddr_t ncurblocks = dq->dq_curblocks + change; 194 195 /* 196 * If user would exceed their hard limit, disallow space allocation. 197 */ 198 if (ncurblocks >= dq->dq_bhardlimit && dq->dq_bhardlimit) { 199 if ((dq->dq_flags & DQ_BLKS) == 0 && 200 ip->i_uid == cred->cr_uid) { 201 uprintf("\n%s: write failed, %s disk limit reached\n", 202 ITOV(ip)->v_mount->mnt_stat.f_mntonname, 203 quotatypes[type]); 204 dq->dq_flags |= DQ_BLKS; 205 } 206 return (EDQUOT); 207 } 208 /* 209 * If user is over their soft limit for too long, disallow space 210 * allocation. Reset time limit as they cross their soft limit. 211 */ 212 if (ncurblocks >= dq->dq_bsoftlimit && dq->dq_bsoftlimit) { 213 if (dq->dq_curblocks < dq->dq_bsoftlimit) { 214 dq->dq_btime = time_second + 215 VFSTOUFS(ITOV(ip)->v_mount)->um_btime[type]; 216 if (ip->i_uid == cred->cr_uid) 217 uprintf("\n%s: warning, %s %s\n", 218 ITOV(ip)->v_mount->mnt_stat.f_mntonname, 219 quotatypes[type], "disk quota exceeded"); 220 return (0); 221 } 222 if (time_second > dq->dq_btime) { 223 if ((dq->dq_flags & DQ_BLKS) == 0 && 224 ip->i_uid == cred->cr_uid) { 225 uprintf("\n%s: write failed, %s %s\n", 226 ITOV(ip)->v_mount->mnt_stat.f_mntonname, 227 quotatypes[type], 228 "disk quota exceeded for too long"); 229 dq->dq_flags |= DQ_BLKS; 230 } 231 return (EDQUOT); 232 } 233 } 234 return (0); 235} 236 237/* 238 * Check the inode limit, applying corrective action. 239 */ 240int 241chkiq(ip, change, cred, flags) 242 struct inode *ip; 243 ino_t change; 244 struct ucred *cred; 245 int flags; 246{ 247 struct dquot *dq; 248 ino_t ncurinodes; 249 int i, error; 250 251#ifdef DIAGNOSTIC 252 if ((flags & CHOWN) == 0) 253 chkdquot(ip); 254#endif 255 if (change == 0) 256 return (0); 257 /* XXX: change is unsigned */ 258 if (change < 0) { 259 for (i = 0; i < MAXQUOTAS; i++) { 260 if ((dq = ip->i_dquot[i]) == NODQUOT) 261 continue; 262 while (dq->dq_flags & DQ_LOCK) { 263 dq->dq_flags |= DQ_WANT; 264 (void) tsleep(dq, PINOD+1, "chkiq1", 0); 265 } 266 ncurinodes = dq->dq_curinodes + change; 267 /* XXX: ncurinodes is unsigned */ 268 if (ncurinodes >= 0) 269 dq->dq_curinodes = ncurinodes; 270 else 271 dq->dq_curinodes = 0; 272 dq->dq_flags &= ~DQ_INODS; 273 dq->dq_flags |= DQ_MOD; 274 } 275 return (0); 276 } 277 if ((flags & FORCE) == 0 && suser_cred(cred, 0)) { 278 for (i = 0; i < MAXQUOTAS; i++) { 279 if ((dq = ip->i_dquot[i]) == NODQUOT) 280 continue; 281 error = chkiqchg(ip, change, cred, i); 282 if (error) 283 return (error); 284 } 285 } 286 for (i = 0; i < MAXQUOTAS; i++) { 287 if ((dq = ip->i_dquot[i]) == NODQUOT) 288 continue; 289 while (dq->dq_flags & DQ_LOCK) { 290 dq->dq_flags |= DQ_WANT; 291 (void) tsleep(dq, PINOD+1, "chkiq2", 0); 292 } 293 /* Reset timer when crossing soft limit */ 294 if (dq->dq_curinodes + change >= dq->dq_isoftlimit && 295 dq->dq_curinodes < dq->dq_isoftlimit) 296 dq->dq_itime = time_second + 297 VFSTOUFS(ITOV(ip)->v_mount)->um_itime[i]; 298 dq->dq_curinodes += change; 299 dq->dq_flags |= DQ_MOD; 300 } 301 return (0); 302} 303 304/* 305 * Check for a valid change to a users allocation. 306 * Issue an error message if appropriate. 307 */ 308static int 309chkiqchg(ip, change, cred, type) 310 struct inode *ip; 311 ino_t change; 312 struct ucred *cred; 313 int type; 314{ 315 struct dquot *dq = ip->i_dquot[type]; 316 ino_t ncurinodes = dq->dq_curinodes + change; 317 318 /* 319 * If user would exceed their hard limit, disallow inode allocation. 320 */ 321 if (ncurinodes >= dq->dq_ihardlimit && dq->dq_ihardlimit) { 322 if ((dq->dq_flags & DQ_INODS) == 0 && 323 ip->i_uid == cred->cr_uid) { 324 uprintf("\n%s: write failed, %s inode limit reached\n", 325 ITOV(ip)->v_mount->mnt_stat.f_mntonname, 326 quotatypes[type]); 327 dq->dq_flags |= DQ_INODS; 328 } 329 return (EDQUOT); 330 } 331 /* 332 * If user is over their soft limit for too long, disallow inode 333 * allocation. Reset time limit as they cross their soft limit. 334 */ 335 if (ncurinodes >= dq->dq_isoftlimit && dq->dq_isoftlimit) { 336 if (dq->dq_curinodes < dq->dq_isoftlimit) { 337 dq->dq_itime = time_second + 338 VFSTOUFS(ITOV(ip)->v_mount)->um_itime[type]; 339 if (ip->i_uid == cred->cr_uid) 340 uprintf("\n%s: warning, %s %s\n", 341 ITOV(ip)->v_mount->mnt_stat.f_mntonname, 342 quotatypes[type], "inode quota exceeded"); 343 return (0); 344 } 345 if (time_second > dq->dq_itime) { 346 if ((dq->dq_flags & DQ_INODS) == 0 && 347 ip->i_uid == cred->cr_uid) { 348 uprintf("\n%s: write failed, %s %s\n", 349 ITOV(ip)->v_mount->mnt_stat.f_mntonname, 350 quotatypes[type], 351 "inode quota exceeded for too long"); 352 dq->dq_flags |= DQ_INODS; 353 } 354 return (EDQUOT); 355 } 356 } 357 return (0); 358} 359 360#ifdef DIAGNOSTIC 361/* 362 * On filesystems with quotas enabled, it is an error for a file to change 363 * size and not to have a dquot structure associated with it. 364 */ 365static void 366chkdquot(ip) 367 struct inode *ip; 368{ 369 struct ufsmount *ump = VFSTOUFS(ITOV(ip)->v_mount); 370 int i; 371 372 for (i = 0; i < MAXQUOTAS; i++) { 373 if (ump->um_quotas[i] == NULLVP || 374 (ump->um_qflags[i] & (QTF_OPENING|QTF_CLOSING))) 375 continue; 376 if (ip->i_dquot[i] == NODQUOT) { 377 vprint("chkdquot: missing dquot", ITOV(ip)); 378 panic("chkdquot: missing dquot"); 379 } 380 } 381} 382#endif 383 384/* 385 * Code to process quotactl commands. 386 */ 387 388/* 389 * Q_QUOTAON - set up a quota file for a particular filesystem. 390 */ 391int 392quotaon(td, mp, type, fname) 393 struct thread *td; 394 struct mount *mp; 395 int type; 396 caddr_t fname; 397{ 398 struct ufsmount *ump = VFSTOUFS(mp); 399 struct vnode *vp, **vpp; 400 struct vnode *nextvp; 401 struct dquot *dq; 402 int error, flags; 403 struct nameidata nd; 404 405 vpp = &ump->um_quotas[type]; 406 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, fname, td); 407 flags = FREAD | FWRITE; 408 error = vn_open(&nd, &flags, 0); 409 if (error) 410 return (error); 411 NDFREE(&nd, NDF_ONLY_PNBUF); 412 vp = nd.ni_vp; 413 VOP_UNLOCK(vp, 0, td); 414 if (vp->v_type != VREG) { 415 (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td); 416 return (EACCES); 417 } 418 if (*vpp != vp) 419 quotaoff(td, mp, type); 420 ump->um_qflags[type] |= QTF_OPENING; 421 mp->mnt_flag |= MNT_QUOTA; 422 ASSERT_VOP_LOCKED(vp, "quotaon"); 423 vp->v_vflag |= VV_SYSTEM; 424 *vpp = vp; 425 /* 426 * Save the credential of the process that turned on quotas. 427 * Set up the time limits for this quota. 428 */ 429 ump->um_cred[type] = crhold(td->td_ucred); 430 ump->um_btime[type] = MAX_DQ_TIME; 431 ump->um_itime[type] = MAX_IQ_TIME; 432 if (dqget(NULLVP, 0, ump, type, &dq) == 0) { 433 if (dq->dq_btime > 0) 434 ump->um_btime[type] = dq->dq_btime; 435 if (dq->dq_itime > 0) 436 ump->um_itime[type] = dq->dq_itime; 437 dqrele(NULLVP, dq); 438 } 439 /* 440 * Search vnodes associated with this mount point, 441 * adding references to quota file being opened. 442 * NB: only need to add dquot's for inodes being modified. 443 */ 444 mtx_lock(&mntvnode_mtx); 445again: 446 for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nextvp) { 447 if (vp->v_mount != mp) 448 goto again; 449 nextvp = TAILQ_NEXT(vp, v_nmntvnodes); 450 451 mtx_unlock(&mntvnode_mtx); 452 if (vget(vp, LK_EXCLUSIVE, td)) { 453 mtx_lock(&mntvnode_mtx); 454 goto again; 455 } 456 if (vp->v_type == VNON || vp->v_writecount == 0) { 457 vput(vp); 458 mtx_lock(&mntvnode_mtx); 459 continue; 460 } 461 error = getinoquota(VTOI(vp)); 462 vput(vp); 463 mtx_lock(&mntvnode_mtx); 464 if (error) 465 break; 466 if (TAILQ_NEXT(vp, v_nmntvnodes) != nextvp) 467 goto again; 468 } 469 mtx_unlock(&mntvnode_mtx); 470 ump->um_qflags[type] &= ~QTF_OPENING; 471 if (error) 472 quotaoff(td, mp, type); 473 return (error); 474} 475 476/* 477 * Q_QUOTAOFF - turn off disk quotas for a filesystem. 478 */ 479int 480quotaoff(td, mp, type) 481 struct thread *td; 482 struct mount *mp; 483 int type; 484{ 485 struct vnode *vp; 486 struct vnode *qvp, *nextvp; 487 struct ufsmount *ump = VFSTOUFS(mp); 488 struct dquot *dq; 489 struct inode *ip; 490 int error; 491 492 if ((qvp = ump->um_quotas[type]) == NULLVP) 493 return (0); 494 ump->um_qflags[type] |= QTF_CLOSING; 495 /* 496 * Search vnodes associated with this mount point, 497 * deleting any references to quota file being closed. 498 */ 499 mtx_lock(&mntvnode_mtx); 500again: 501 for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nextvp) { 502 if (vp->v_mount != mp) 503 goto again; 504 nextvp = TAILQ_NEXT(vp, v_nmntvnodes); 505 506 mtx_unlock(&mntvnode_mtx); 507 mtx_lock(&vp->v_interlock); 508 if (vp->v_type == VNON) { 509 mtx_unlock(&vp->v_interlock); 510 mtx_lock(&mntvnode_mtx); 511 continue; 512 } 513 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) { 514 mtx_lock(&mntvnode_mtx); 515 goto again; 516 } 517 ip = VTOI(vp); 518 dq = ip->i_dquot[type]; 519 ip->i_dquot[type] = NODQUOT; 520 dqrele(vp, dq); 521 vput(vp); 522 mtx_lock(&mntvnode_mtx); 523 if (TAILQ_NEXT(vp, v_nmntvnodes) != nextvp) 524 goto again; 525 } 526 mtx_unlock(&mntvnode_mtx); 527 dqflush(qvp); 528 ASSERT_VOP_LOCKED(qvp, "quotaoff"); 529 qvp->v_vflag &= ~VV_SYSTEM; 530 error = vn_close(qvp, FREAD|FWRITE, td->td_ucred, td); 531 ump->um_quotas[type] = NULLVP; 532 crfree(ump->um_cred[type]); 533 ump->um_cred[type] = NOCRED; 534 ump->um_qflags[type] &= ~QTF_CLOSING; 535 for (type = 0; type < MAXQUOTAS; type++) 536 if (ump->um_quotas[type] != NULLVP) 537 break; 538 if (type == MAXQUOTAS) 539 mp->mnt_flag &= ~MNT_QUOTA; 540 return (error); 541} 542 543/* 544 * Q_GETQUOTA - return current values in a dqblk structure. 545 */ 546int 547getquota(mp, id, type, addr) 548 struct mount *mp; 549 u_long id; 550 int type; 551 caddr_t addr; 552{ 553 struct dquot *dq; 554 int error; 555 556 error = dqget(NULLVP, id, VFSTOUFS(mp), type, &dq); 557 if (error) 558 return (error); 559 error = copyout((caddr_t)&dq->dq_dqb, addr, sizeof (struct dqblk)); 560 dqrele(NULLVP, dq); 561 return (error); 562} 563 564/* 565 * Q_SETQUOTA - assign an entire dqblk structure. 566 */ 567int 568setquota(mp, id, type, addr) 569 struct mount *mp; 570 u_long id; 571 int type; 572 caddr_t addr; 573{ 574 struct dquot *dq; 575 struct dquot *ndq; 576 struct ufsmount *ump = VFSTOUFS(mp); 577 struct dqblk newlim; 578 int error; 579 580 error = copyin(addr, (caddr_t)&newlim, sizeof (struct dqblk)); 581 if (error) 582 return (error); 583 error = dqget(NULLVP, id, ump, type, &ndq); 584 if (error) 585 return (error); 586 dq = ndq; 587 while (dq->dq_flags & DQ_LOCK) { 588 dq->dq_flags |= DQ_WANT; 589 (void) tsleep(dq, PINOD+1, "setqta", 0); 590 } 591 /* 592 * Copy all but the current values. 593 * Reset time limit if previously had no soft limit or were 594 * under it, but now have a soft limit and are over it. 595 */ 596 newlim.dqb_curblocks = dq->dq_curblocks; 597 newlim.dqb_curinodes = dq->dq_curinodes; 598 if (dq->dq_id != 0) { 599 newlim.dqb_btime = dq->dq_btime; 600 newlim.dqb_itime = dq->dq_itime; 601 } 602 if (newlim.dqb_bsoftlimit && 603 dq->dq_curblocks >= newlim.dqb_bsoftlimit && 604 (dq->dq_bsoftlimit == 0 || dq->dq_curblocks < dq->dq_bsoftlimit)) 605 newlim.dqb_btime = time_second + ump->um_btime[type]; 606 if (newlim.dqb_isoftlimit && 607 dq->dq_curinodes >= newlim.dqb_isoftlimit && 608 (dq->dq_isoftlimit == 0 || dq->dq_curinodes < dq->dq_isoftlimit)) 609 newlim.dqb_itime = time_second + ump->um_itime[type]; 610 dq->dq_dqb = newlim; 611 if (dq->dq_curblocks < dq->dq_bsoftlimit) 612 dq->dq_flags &= ~DQ_BLKS; 613 if (dq->dq_curinodes < dq->dq_isoftlimit) 614 dq->dq_flags &= ~DQ_INODS; 615 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 && 616 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0) 617 dq->dq_flags |= DQ_FAKE; 618 else 619 dq->dq_flags &= ~DQ_FAKE; 620 dq->dq_flags |= DQ_MOD; 621 dqrele(NULLVP, dq); 622 return (0); 623} 624 625/* 626 * Q_SETUSE - set current inode and block usage. 627 */ 628int 629setuse(mp, id, type, addr) 630 struct mount *mp; 631 u_long id; 632 int type; 633 caddr_t addr; 634{ 635 struct dquot *dq; 636 struct ufsmount *ump = VFSTOUFS(mp); 637 struct dquot *ndq; 638 struct dqblk usage; 639 int error; 640 641 error = copyin(addr, (caddr_t)&usage, sizeof (struct dqblk)); 642 if (error) 643 return (error); 644 error = dqget(NULLVP, id, ump, type, &ndq); 645 if (error) 646 return (error); 647 dq = ndq; 648 while (dq->dq_flags & DQ_LOCK) { 649 dq->dq_flags |= DQ_WANT; 650 (void) tsleep(dq, PINOD+1, "setuse", 0); 651 } 652 /* 653 * Reset time limit if have a soft limit and were 654 * previously under it, but are now over it. 655 */ 656 if (dq->dq_bsoftlimit && dq->dq_curblocks < dq->dq_bsoftlimit && 657 usage.dqb_curblocks >= dq->dq_bsoftlimit) 658 dq->dq_btime = time_second + ump->um_btime[type]; 659 if (dq->dq_isoftlimit && dq->dq_curinodes < dq->dq_isoftlimit && 660 usage.dqb_curinodes >= dq->dq_isoftlimit) 661 dq->dq_itime = time_second + ump->um_itime[type]; 662 dq->dq_curblocks = usage.dqb_curblocks; 663 dq->dq_curinodes = usage.dqb_curinodes; 664 if (dq->dq_curblocks < dq->dq_bsoftlimit) 665 dq->dq_flags &= ~DQ_BLKS; 666 if (dq->dq_curinodes < dq->dq_isoftlimit) 667 dq->dq_flags &= ~DQ_INODS; 668 dq->dq_flags |= DQ_MOD; 669 dqrele(NULLVP, dq); 670 return (0); 671} 672 673/* 674 * Q_SYNC - sync quota files to disk. 675 */ 676int 677qsync(mp) 678 struct mount *mp; 679{ 680 struct ufsmount *ump = VFSTOUFS(mp); 681 struct thread *td = curthread; /* XXX */ 682 struct vnode *vp, *nextvp; 683 struct dquot *dq; 684 int i, error; 685 686 /* 687 * Check if the mount point has any quotas. 688 * If not, simply return. 689 */ 690 for (i = 0; i < MAXQUOTAS; i++) 691 if (ump->um_quotas[i] != NULLVP) 692 break; 693 if (i == MAXQUOTAS) 694 return (0); 695 /* 696 * Search vnodes associated with this mount point, 697 * synchronizing any modified dquot structures. 698 */ 699 mtx_lock(&mntvnode_mtx); 700again: 701 for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nextvp) { 702 if (vp->v_mount != mp) 703 goto again; 704 nextvp = TAILQ_NEXT(vp, v_nmntvnodes); 705 mtx_unlock(&mntvnode_mtx); 706 mtx_lock(&vp->v_interlock); 707 if (vp->v_type == VNON) { 708 mtx_unlock(&vp->v_interlock); 709 mtx_lock(&mntvnode_mtx); 710 continue; 711 } 712 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, td); 713 if (error) { 714 mtx_lock(&mntvnode_mtx); 715 if (error == ENOENT) 716 goto again; 717 continue; 718 } 719 for (i = 0; i < MAXQUOTAS; i++) { 720 dq = VTOI(vp)->i_dquot[i]; 721 if (dq != NODQUOT && (dq->dq_flags & DQ_MOD)) 722 dqsync(vp, dq); 723 } 724 vput(vp); 725 mtx_lock(&mntvnode_mtx); 726 if (TAILQ_NEXT(vp, v_nmntvnodes) != nextvp) 727 goto again; 728 } 729 mtx_unlock(&mntvnode_mtx); 730 return (0); 731} 732 733/* 734 * Code pertaining to management of the in-core dquot data structures. 735 */ 736#define DQHASH(dqvp, id) \ 737 (&dqhashtbl[((((intptr_t)(dqvp)) >> 8) + id) & dqhash]) 738static LIST_HEAD(dqhash, dquot) *dqhashtbl; 739static u_long dqhash; 740 741/* 742 * Dquot free list. 743 */ 744#define DQUOTINC 5 /* minimum free dquots desired */ 745static TAILQ_HEAD(dqfreelist, dquot) dqfreelist; 746static long numdquot, desireddquot = DQUOTINC; 747 748/* 749 * Initialize the quota system. 750 */ 751void 752dqinit() 753{ 754 755 dqhashtbl = hashinit(desiredvnodes, M_DQUOT, &dqhash); 756 TAILQ_INIT(&dqfreelist); 757} 758 759/* 760 * Shut down the quota system. 761 */ 762void 763dquninit() 764{ 765 struct dquot *dq; 766 767 hashdestroy(dqhashtbl, M_DQUOT, dqhash); 768 while ((dq = TAILQ_FIRST(&dqfreelist)) != NULL) { 769 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist); 770 free(dq, M_DQUOT); 771 } 772} 773 774/* 775 * Obtain a dquot structure for the specified identifier and quota file 776 * reading the information from the file if necessary. 777 */ 778static int 779dqget(vp, id, ump, type, dqp) 780 struct vnode *vp; 781 u_long id; 782 struct ufsmount *ump; 783 int type; 784 struct dquot **dqp; 785{ 786 struct thread *td = curthread; /* XXX */ 787 struct dquot *dq; 788 struct dqhash *dqh; 789 struct vnode *dqvp; 790 struct iovec aiov; 791 struct uio auio; 792 int error; 793 794 dqvp = ump->um_quotas[type]; 795 if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) { 796 *dqp = NODQUOT; 797 return (EINVAL); 798 } 799 /* 800 * Check the cache first. 801 */ 802 dqh = DQHASH(dqvp, id); 803 LIST_FOREACH(dq, dqh, dq_hash) { 804 if (dq->dq_id != id || 805 dq->dq_ump->um_quotas[dq->dq_type] != dqvp) 806 continue; 807 /* 808 * Cache hit with no references. Take 809 * the structure off the free list. 810 */ 811 if (dq->dq_cnt == 0) 812 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist); 813 DQREF(dq); 814 *dqp = dq; 815 return (0); 816 } 817 /* 818 * Not in cache, allocate a new one. 819 */ 820 if (TAILQ_FIRST(&dqfreelist) == NODQUOT && 821 numdquot < MAXQUOTAS * desiredvnodes) 822 desireddquot += DQUOTINC; 823 if (numdquot < desireddquot) { 824 dq = (struct dquot *)malloc(sizeof *dq, M_DQUOT, 825 M_WAITOK | M_ZERO); 826 numdquot++; 827 } else { 828 if ((dq = TAILQ_FIRST(&dqfreelist)) == NULL) { 829 tablefull("dquot"); 830 *dqp = NODQUOT; 831 return (EUSERS); 832 } 833 if (dq->dq_cnt || (dq->dq_flags & DQ_MOD)) 834 panic("dqget: free dquot isn't"); 835 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist); 836 if (dq->dq_ump != NULL) 837 LIST_REMOVE(dq, dq_hash); 838 } 839 /* 840 * Initialize the contents of the dquot structure. 841 */ 842 if (vp != dqvp) 843 vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY, td); 844 LIST_INSERT_HEAD(dqh, dq, dq_hash); 845 DQREF(dq); 846 dq->dq_flags = DQ_LOCK; 847 dq->dq_id = id; 848 dq->dq_ump = ump; 849 dq->dq_type = type; 850 auio.uio_iov = &aiov; 851 auio.uio_iovcnt = 1; 852 aiov.iov_base = (caddr_t)&dq->dq_dqb; 853 aiov.iov_len = sizeof (struct dqblk); 854 auio.uio_resid = sizeof (struct dqblk); 855 auio.uio_offset = (off_t)(id * sizeof (struct dqblk)); 856 auio.uio_segflg = UIO_SYSSPACE; 857 auio.uio_rw = UIO_READ; 858 auio.uio_td = (struct thread *)0; 859 error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]); 860 if (auio.uio_resid == sizeof(struct dqblk) && error == 0) 861 bzero((caddr_t)&dq->dq_dqb, sizeof(struct dqblk)); 862 if (vp != dqvp) 863 VOP_UNLOCK(dqvp, 0, td); 864 if (dq->dq_flags & DQ_WANT) 865 wakeup(dq); 866 dq->dq_flags = 0; 867 /* 868 * I/O error in reading quota file, release 869 * quota structure and reflect problem to caller. 870 */ 871 if (error) { 872 LIST_REMOVE(dq, dq_hash); 873 dqrele(vp, dq); 874 *dqp = NODQUOT; 875 return (error); 876 } 877 /* 878 * Check for no limit to enforce. 879 * Initialize time values if necessary. 880 */ 881 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 && 882 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0) 883 dq->dq_flags |= DQ_FAKE; 884 if (dq->dq_id != 0) { 885 if (dq->dq_btime == 0) 886 dq->dq_btime = time_second + ump->um_btime[type]; 887 if (dq->dq_itime == 0) 888 dq->dq_itime = time_second + ump->um_itime[type]; 889 } 890 *dqp = dq; 891 return (0); 892} 893 894#ifdef DIAGNOSTIC 895/* 896 * Obtain a reference to a dquot. 897 */ 898static void 899dqref(dq) 900 struct dquot *dq; 901{ 902 903 dq->dq_cnt++; 904} 905#endif 906 907/* 908 * Release a reference to a dquot. 909 */ 910void 911dqrele(vp, dq) 912 struct vnode *vp; 913 struct dquot *dq; 914{ 915 916 if (dq == NODQUOT) 917 return; 918 if (dq->dq_cnt > 1) { 919 dq->dq_cnt--; 920 return; 921 } 922 if (dq->dq_flags & DQ_MOD) 923 (void) dqsync(vp, dq); 924 if (--dq->dq_cnt > 0) 925 return; 926 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist); 927} 928 929/* 930 * Update the disk quota in the quota file. 931 */ 932static int 933dqsync(vp, dq) 934 struct vnode *vp; 935 struct dquot *dq; 936{ 937 struct thread *td = curthread; /* XXX */ 938 struct vnode *dqvp; 939 struct iovec aiov; 940 struct uio auio; 941 int error; 942 943 if (dq == NODQUOT) 944 panic("dqsync: dquot"); 945 if ((dq->dq_flags & DQ_MOD) == 0) 946 return (0); 947 if ((dqvp = dq->dq_ump->um_quotas[dq->dq_type]) == NULLVP) 948 panic("dqsync: file"); 949 (void) vn_write_suspend_wait(dqvp, NULL, V_WAIT); 950 if (vp != dqvp) 951 vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY, td); 952 while (dq->dq_flags & DQ_LOCK) { 953 dq->dq_flags |= DQ_WANT; 954 (void) tsleep(dq, PINOD+2, "dqsync", 0); 955 if ((dq->dq_flags & DQ_MOD) == 0) { 956 if (vp != dqvp) 957 VOP_UNLOCK(dqvp, 0, td); 958 return (0); 959 } 960 } 961 dq->dq_flags |= DQ_LOCK; 962 auio.uio_iov = &aiov; 963 auio.uio_iovcnt = 1; 964 aiov.iov_base = (caddr_t)&dq->dq_dqb; 965 aiov.iov_len = sizeof (struct dqblk); 966 auio.uio_resid = sizeof (struct dqblk); 967 auio.uio_offset = (off_t)(dq->dq_id * sizeof (struct dqblk)); 968 auio.uio_segflg = UIO_SYSSPACE; 969 auio.uio_rw = UIO_WRITE; 970 auio.uio_td = (struct thread *)0; 971 error = VOP_WRITE(dqvp, &auio, 0, dq->dq_ump->um_cred[dq->dq_type]); 972 if (auio.uio_resid && error == 0) 973 error = EIO; 974 if (dq->dq_flags & DQ_WANT) 975 wakeup(dq); 976 dq->dq_flags &= ~(DQ_MOD|DQ_LOCK|DQ_WANT); 977 if (vp != dqvp) 978 VOP_UNLOCK(dqvp, 0, td); 979 return (error); 980} 981 982/* 983 * Flush all entries from the cache for a particular vnode. 984 */ 985static void 986dqflush(vp) 987 struct vnode *vp; 988{ 989 struct dquot *dq, *nextdq; 990 struct dqhash *dqh; 991 992 /* 993 * Move all dquot's that used to refer to this quota 994 * file off their hash chains (they will eventually 995 * fall off the head of the free list and be re-used). 996 */ 997 for (dqh = &dqhashtbl[dqhash]; dqh >= dqhashtbl; dqh--) { 998 for (dq = LIST_FIRST(dqh); dq; dq = nextdq) { 999 nextdq = LIST_NEXT(dq, dq_hash); 1000 if (dq->dq_ump->um_quotas[dq->dq_type] != vp) 1001 continue; 1002 if (dq->dq_cnt) 1003 panic("dqflush: stray dquot"); 1004 LIST_REMOVE(dq, dq_hash); 1005 dq->dq_ump = (struct ufsmount *)0; 1006 } 1007 } 1008} 1009