1/*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1990, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Robert Elz at The University of Melbourne. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)ufs_quota.c 8.5 (Berkeley) 5/20/95 35 */ 36 37#include <sys/cdefs.h> 38__FBSDID("$FreeBSD$"); 39 40#include "opt_ffs.h" 41 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/endian.h> 45#include <sys/fcntl.h> 46#include <sys/kernel.h> 47#include <sys/lock.h> 48#include <sys/malloc.h> 49#include <sys/mount.h> 50#include <sys/mutex.h> 51#include <sys/namei.h> 52#include <sys/priv.h> 53#include <sys/proc.h> 54#include <sys/socket.h> 55#include <sys/stat.h> 56#include <sys/sysctl.h> 57#include <sys/vnode.h> 58 59#include <ufs/ufs/extattr.h> 60#include <ufs/ufs/quota.h> 61#include <ufs/ufs/inode.h> 62#include <ufs/ufs/ufsmount.h> 63#include <ufs/ufs/ufs_extern.h> 64 65CTASSERT(sizeof(struct dqblk64) == sizeof(struct dqhdr64)); 66 67static int unprivileged_get_quota = 0; 68SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_get_quota, CTLFLAG_RW, 69 &unprivileged_get_quota, 0, 70 "Unprivileged processes may retrieve quotas for other uids and gids"); 71 72static MALLOC_DEFINE(M_DQUOT, "ufs_quota", "UFS quota entries"); 73 74/* 75 * Quota name to error message mapping. 76 */ 77static char *quotatypes[] = INITQFNAMES; 78 79static int chkdqchg(struct inode *, ufs2_daddr_t, struct ucred *, int, int *); 80static int chkiqchg(struct inode *, int, struct ucred *, int, int *); 81static int dqopen(struct vnode *, struct ufsmount *, int); 82static int dqget(struct vnode *, 83 u_long, struct ufsmount *, int, struct dquot **); 84static int dqsync(struct vnode *, struct dquot *); 85static int dqflush(struct vnode *); 86static int quotaoff1(struct thread *td, struct mount *mp, int type); 87static int quotaoff_inchange(struct thread *td, struct mount *mp, int type); 88 89/* conversion functions - from_to() */ 90static void dqb32_dq(const struct dqblk32 *, struct dquot *); 91static void dqb64_dq(const struct dqblk64 *, struct dquot *); 92static void dq_dqb32(const struct dquot *, struct dqblk32 *); 93static void dq_dqb64(const struct dquot *, struct dqblk64 *); 94static void dqb32_dqb64(const struct dqblk32 *, struct dqblk64 *); 95static void dqb64_dqb32(const struct dqblk64 *, struct dqblk32 *); 96 97#ifdef DIAGNOSTIC 98static void dqref(struct dquot *); 99static void chkdquot(struct inode *); 100#endif 101 102/* 103 * Set up the quotas for an inode. 104 * 105 * This routine completely defines the semantics of quotas. 106 * If other criterion want to be used to establish quotas, the 107 * MAXQUOTAS value in quota.h should be increased, and the 108 * additional dquots set up here. 109 */ 110int 111getinoquota(struct inode *ip) 112{ 113 struct ufsmount *ump; 114 struct vnode *vp; 115 int error; 116 117 vp = ITOV(ip); 118 119 /* 120 * Disk quotas must be turned off for system files. Currently 121 * snapshot and quota files. 122 */ 123 if ((vp->v_vflag & VV_SYSTEM) != 0) 124 return (0); 125 /* 126 * XXX: Turn off quotas for files with a negative UID or GID. 127 * This prevents the creation of 100GB+ quota files. 128 */ 129 if ((int)ip->i_uid < 0 || (int)ip->i_gid < 0) 130 return (0); 131 ump = VFSTOUFS(vp->v_mount); 132 /* 133 * Set up the user quota based on file uid. 134 * EINVAL means that quotas are not enabled. 135 */ 136 if ((error = 137 dqget(vp, ip->i_uid, ump, USRQUOTA, &ip->i_dquot[USRQUOTA])) && 138 error != EINVAL) 139 return (error); 140 /* 141 * Set up the group quota based on file gid. 142 * EINVAL means that quotas are not enabled. 143 */ 144 if ((error = 145 dqget(vp, ip->i_gid, ump, GRPQUOTA, &ip->i_dquot[GRPQUOTA])) && 146 error != EINVAL) 147 return (error); 148 return (0); 149} 150 151/* 152 * Update disk usage, and take corrective action. 153 */ 154int 155chkdq(struct inode *ip, ufs2_daddr_t change, struct ucred *cred, int flags) 156{ 157 struct dquot *dq; 158 ufs2_daddr_t ncurblocks; 159 struct vnode *vp = ITOV(ip); 160 int i, error, warn, do_check; 161 162 MPASS(cred != NOCRED || (flags & FORCE) != 0); 163 /* 164 * Disk quotas must be turned off for system files. Currently 165 * snapshot and quota files. 166 */ 167 if ((vp->v_vflag & VV_SYSTEM) != 0) 168 return (0); 169 /* 170 * XXX: Turn off quotas for files with a negative UID or GID. 171 * This prevents the creation of 100GB+ quota files. 172 */ 173 if ((int)ip->i_uid < 0 || (int)ip->i_gid < 0) 174 return (0); 175#ifdef DIAGNOSTIC 176 if ((flags & CHOWN) == 0) 177 chkdquot(ip); 178#endif 179 if (change == 0) 180 return (0); 181 if (change < 0) { 182 for (i = 0; i < MAXQUOTAS; i++) { 183 if ((dq = ip->i_dquot[i]) == NODQUOT) 184 continue; 185 DQI_LOCK(dq); 186 DQI_WAIT(dq, PINOD+1, "chkdq1"); 187 ncurblocks = dq->dq_curblocks + change; 188 if (ncurblocks >= 0) 189 dq->dq_curblocks = ncurblocks; 190 else 191 dq->dq_curblocks = 0; 192 dq->dq_flags &= ~DQ_BLKS; 193 dq->dq_flags |= DQ_MOD; 194 DQI_UNLOCK(dq); 195 } 196 return (0); 197 } 198 if ((flags & FORCE) == 0 && 199 priv_check_cred(cred, PRIV_VFS_EXCEEDQUOTA, 0)) 200 do_check = 1; 201 else 202 do_check = 0; 203 for (i = 0; i < MAXQUOTAS; i++) { 204 if ((dq = ip->i_dquot[i]) == NODQUOT) 205 continue; 206 warn = 0; 207 DQI_LOCK(dq); 208 DQI_WAIT(dq, PINOD+1, "chkdq2"); 209 if (do_check) { 210 error = chkdqchg(ip, change, cred, i, &warn); 211 if (error) { 212 /* 213 * Roll back user quota changes when 214 * group quota failed. 215 */ 216 while (i > 0) { 217 --i; 218 dq = ip->i_dquot[i]; 219 if (dq == NODQUOT) 220 continue; 221 DQI_LOCK(dq); 222 DQI_WAIT(dq, PINOD+1, "chkdq3"); 223 ncurblocks = dq->dq_curblocks - change; 224 if (ncurblocks >= 0) 225 dq->dq_curblocks = ncurblocks; 226 else 227 dq->dq_curblocks = 0; 228 dq->dq_flags &= ~DQ_BLKS; 229 dq->dq_flags |= DQ_MOD; 230 DQI_UNLOCK(dq); 231 } 232 return (error); 233 } 234 } 235 /* Reset timer when crossing soft limit */ 236 if (dq->dq_curblocks + change >= dq->dq_bsoftlimit && 237 dq->dq_curblocks < dq->dq_bsoftlimit) 238 dq->dq_btime = time_second + ITOUMP(ip)->um_btime[i]; 239 dq->dq_curblocks += change; 240 dq->dq_flags |= DQ_MOD; 241 DQI_UNLOCK(dq); 242 if (warn) 243 uprintf("\n%s: warning, %s disk quota exceeded\n", 244 ITOVFS(ip)->mnt_stat.f_mntonname, 245 quotatypes[i]); 246 } 247 return (0); 248} 249 250/* 251 * Check for a valid change to a users allocation. 252 * Issue an error message if appropriate. 253 */ 254static int 255chkdqchg(struct inode *ip, ufs2_daddr_t change, struct ucred *cred, 256 int type, int *warn) 257{ 258 struct dquot *dq = ip->i_dquot[type]; 259 ufs2_daddr_t ncurblocks = dq->dq_curblocks + change; 260 261 /* 262 * If user would exceed their hard limit, disallow space allocation. 263 */ 264 if (ncurblocks >= dq->dq_bhardlimit && dq->dq_bhardlimit) { 265 if ((dq->dq_flags & DQ_BLKS) == 0 && 266 ip->i_uid == cred->cr_uid) { 267 dq->dq_flags |= DQ_BLKS; 268 DQI_UNLOCK(dq); 269 uprintf("\n%s: write failed, %s disk limit reached\n", 270 ITOVFS(ip)->mnt_stat.f_mntonname, 271 quotatypes[type]); 272 return (EDQUOT); 273 } 274 DQI_UNLOCK(dq); 275 return (EDQUOT); 276 } 277 /* 278 * If user is over their soft limit for too long, disallow space 279 * allocation. Reset time limit as they cross their soft limit. 280 */ 281 if (ncurblocks >= dq->dq_bsoftlimit && dq->dq_bsoftlimit) { 282 if (dq->dq_curblocks < dq->dq_bsoftlimit) { 283 dq->dq_btime = time_second + ITOUMP(ip)->um_btime[type]; 284 if (ip->i_uid == cred->cr_uid) 285 *warn = 1; 286 return (0); 287 } 288 if (time_second > dq->dq_btime) { 289 if ((dq->dq_flags & DQ_BLKS) == 0 && 290 ip->i_uid == cred->cr_uid) { 291 dq->dq_flags |= DQ_BLKS; 292 DQI_UNLOCK(dq); 293 uprintf("\n%s: write failed, %s " 294 "disk quota exceeded for too long\n", 295 ITOVFS(ip)->mnt_stat.f_mntonname, 296 quotatypes[type]); 297 return (EDQUOT); 298 } 299 DQI_UNLOCK(dq); 300 return (EDQUOT); 301 } 302 } 303 return (0); 304} 305 306/* 307 * Check the inode limit, applying corrective action. 308 */ 309int 310chkiq(struct inode *ip, int change, struct ucred *cred, int flags) 311{ 312 struct dquot *dq; 313 int i, error, warn, do_check; 314 315 MPASS(cred != NOCRED || (flags & FORCE) != 0); 316#ifdef DIAGNOSTIC 317 if ((flags & CHOWN) == 0) 318 chkdquot(ip); 319#endif 320 if (change == 0) 321 return (0); 322 if (change < 0) { 323 for (i = 0; i < MAXQUOTAS; i++) { 324 if ((dq = ip->i_dquot[i]) == NODQUOT) 325 continue; 326 DQI_LOCK(dq); 327 DQI_WAIT(dq, PINOD+1, "chkiq1"); 328 if (dq->dq_curinodes >= -change) 329 dq->dq_curinodes += change; 330 else 331 dq->dq_curinodes = 0; 332 dq->dq_flags &= ~DQ_INODS; 333 dq->dq_flags |= DQ_MOD; 334 DQI_UNLOCK(dq); 335 } 336 return (0); 337 } 338 if ((flags & FORCE) == 0 && 339 priv_check_cred(cred, PRIV_VFS_EXCEEDQUOTA, 0)) 340 do_check = 1; 341 else 342 do_check = 0; 343 for (i = 0; i < MAXQUOTAS; i++) { 344 if ((dq = ip->i_dquot[i]) == NODQUOT) 345 continue; 346 warn = 0; 347 DQI_LOCK(dq); 348 DQI_WAIT(dq, PINOD+1, "chkiq2"); 349 if (do_check) { 350 error = chkiqchg(ip, change, cred, i, &warn); 351 if (error) { 352 /* 353 * Roll back user quota changes when 354 * group quota failed. 355 */ 356 while (i > 0) { 357 --i; 358 dq = ip->i_dquot[i]; 359 if (dq == NODQUOT) 360 continue; 361 DQI_LOCK(dq); 362 DQI_WAIT(dq, PINOD+1, "chkiq3"); 363 if (dq->dq_curinodes >= change) 364 dq->dq_curinodes -= change; 365 else 366 dq->dq_curinodes = 0; 367 dq->dq_flags &= ~DQ_INODS; 368 dq->dq_flags |= DQ_MOD; 369 DQI_UNLOCK(dq); 370 } 371 return (error); 372 } 373 } 374 /* Reset timer when crossing soft limit */ 375 if (dq->dq_curinodes + change >= dq->dq_isoftlimit && 376 dq->dq_curinodes < dq->dq_isoftlimit) 377 dq->dq_itime = time_second + ITOUMP(ip)->um_itime[i]; 378 dq->dq_curinodes += change; 379 dq->dq_flags |= DQ_MOD; 380 DQI_UNLOCK(dq); 381 if (warn) 382 uprintf("\n%s: warning, %s inode quota exceeded\n", 383 ITOVFS(ip)->mnt_stat.f_mntonname, 384 quotatypes[i]); 385 } 386 return (0); 387} 388 389/* 390 * Check for a valid change to a users allocation. 391 * Issue an error message if appropriate. 392 */ 393static int 394chkiqchg(struct inode *ip, int change, struct ucred *cred, int type, int *warn) 395{ 396 struct dquot *dq = ip->i_dquot[type]; 397 ino_t ncurinodes = dq->dq_curinodes + change; 398 399 /* 400 * If user would exceed their hard limit, disallow inode allocation. 401 */ 402 if (ncurinodes >= dq->dq_ihardlimit && dq->dq_ihardlimit) { 403 if ((dq->dq_flags & DQ_INODS) == 0 && 404 ip->i_uid == cred->cr_uid) { 405 dq->dq_flags |= DQ_INODS; 406 DQI_UNLOCK(dq); 407 uprintf("\n%s: write failed, %s inode limit reached\n", 408 ITOVFS(ip)->mnt_stat.f_mntonname, 409 quotatypes[type]); 410 return (EDQUOT); 411 } 412 DQI_UNLOCK(dq); 413 return (EDQUOT); 414 } 415 /* 416 * If user is over their soft limit for too long, disallow inode 417 * allocation. Reset time limit as they cross their soft limit. 418 */ 419 if (ncurinodes >= dq->dq_isoftlimit && dq->dq_isoftlimit) { 420 if (dq->dq_curinodes < dq->dq_isoftlimit) { 421 dq->dq_itime = time_second + ITOUMP(ip)->um_itime[type]; 422 if (ip->i_uid == cred->cr_uid) 423 *warn = 1; 424 return (0); 425 } 426 if (time_second > dq->dq_itime) { 427 if ((dq->dq_flags & DQ_INODS) == 0 && 428 ip->i_uid == cred->cr_uid) { 429 dq->dq_flags |= DQ_INODS; 430 DQI_UNLOCK(dq); 431 uprintf("\n%s: write failed, %s " 432 "inode quota exceeded for too long\n", 433 ITOVFS(ip)->mnt_stat.f_mntonname, 434 quotatypes[type]); 435 return (EDQUOT); 436 } 437 DQI_UNLOCK(dq); 438 return (EDQUOT); 439 } 440 } 441 return (0); 442} 443 444#ifdef DIAGNOSTIC 445/* 446 * On filesystems with quotas enabled, it is an error for a file to change 447 * size and not to have a dquot structure associated with it. 448 */ 449static void 450chkdquot(struct inode *ip) 451{ 452 struct ufsmount *ump; 453 struct vnode *vp; 454 int i; 455 456 ump = ITOUMP(ip); 457 vp = ITOV(ip); 458 459 /* 460 * Disk quotas must be turned off for system files. Currently 461 * these are snapshots and quota files. 462 */ 463 if ((vp->v_vflag & VV_SYSTEM) != 0) 464 return; 465 /* 466 * XXX: Turn off quotas for files with a negative UID or GID. 467 * This prevents the creation of 100GB+ quota files. 468 */ 469 if ((int)ip->i_uid < 0 || (int)ip->i_gid < 0) 470 return; 471 472 UFS_LOCK(ump); 473 for (i = 0; i < MAXQUOTAS; i++) { 474 if (ump->um_quotas[i] == NULLVP || 475 (ump->um_qflags[i] & (QTF_OPENING|QTF_CLOSING))) 476 continue; 477 if (ip->i_dquot[i] == NODQUOT) { 478 UFS_UNLOCK(ump); 479 vn_printf(ITOV(ip), "chkdquot: missing dquot "); 480 panic("chkdquot: missing dquot"); 481 } 482 } 483 UFS_UNLOCK(ump); 484} 485#endif 486 487/* 488 * Code to process quotactl commands. 489 */ 490 491/* 492 * Q_QUOTAON - set up a quota file for a particular filesystem. 493 */ 494int 495quotaon(struct thread *td, struct mount *mp, int type, void *fname) 496{ 497 struct ufsmount *ump; 498 struct vnode *vp, **vpp; 499 struct vnode *mvp; 500 struct dquot *dq; 501 int error, flags; 502 struct nameidata nd; 503 504 error = priv_check(td, PRIV_UFS_QUOTAON); 505 if (error != 0) { 506 vfs_unbusy(mp); 507 return (error); 508 } 509 510 if ((mp->mnt_flag & MNT_RDONLY) != 0) { 511 vfs_unbusy(mp); 512 return (EROFS); 513 } 514 515 ump = VFSTOUFS(mp); 516 dq = NODQUOT; 517 518 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, fname, td); 519 flags = FREAD | FWRITE; 520 vfs_ref(mp); 521 vfs_unbusy(mp); 522 error = vn_open(&nd, &flags, 0, NULL); 523 if (error != 0) { 524 vfs_rel(mp); 525 return (error); 526 } 527 NDFREE(&nd, NDF_ONLY_PNBUF); 528 vp = nd.ni_vp; 529 error = vfs_busy(mp, MBF_NOWAIT); 530 vfs_rel(mp); 531 if (error == 0) { 532 if (vp->v_type != VREG) { 533 error = EACCES; 534 vfs_unbusy(mp); 535 } 536 } 537 if (error != 0) { 538 VOP_UNLOCK(vp, 0); 539 (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td); 540 return (error); 541 } 542 543 UFS_LOCK(ump); 544 if ((ump->um_qflags[type] & (QTF_OPENING|QTF_CLOSING)) != 0) { 545 UFS_UNLOCK(ump); 546 VOP_UNLOCK(vp, 0); 547 (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td); 548 vfs_unbusy(mp); 549 return (EALREADY); 550 } 551 ump->um_qflags[type] |= QTF_OPENING|QTF_CLOSING; 552 UFS_UNLOCK(ump); 553 if ((error = dqopen(vp, ump, type)) != 0) { 554 VOP_UNLOCK(vp, 0); 555 UFS_LOCK(ump); 556 ump->um_qflags[type] &= ~(QTF_OPENING|QTF_CLOSING); 557 UFS_UNLOCK(ump); 558 (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td); 559 vfs_unbusy(mp); 560 return (error); 561 } 562 VOP_UNLOCK(vp, 0); 563 MNT_ILOCK(mp); 564 mp->mnt_flag |= MNT_QUOTA; 565 mp->mnt_stat.f_flags |= MNT_QUOTA; 566 MNT_IUNLOCK(mp); 567 568 vpp = &ump->um_quotas[type]; 569 if (*vpp != vp) 570 quotaoff1(td, mp, type); 571 572 /* 573 * When the directory vnode containing the quota file is 574 * inactivated, due to the shared lookup of the quota file 575 * vput()ing the dvp, the qsyncvp() call for the containing 576 * directory would try to acquire the quota lock exclusive. 577 * At the same time, lookup already locked the quota vnode 578 * shared. Mark the quota vnode lock as allowing recursion 579 * and automatically converting shared locks to exclusive. 580 * 581 * Also mark quota vnode as system. 582 */ 583 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 584 vp->v_vflag |= VV_SYSTEM; 585 VN_LOCK_AREC(vp); 586 VN_LOCK_DSHARE(vp); 587 VOP_UNLOCK(vp, 0); 588 *vpp = vp; 589 /* 590 * Save the credential of the process that turned on quotas. 591 * Set up the time limits for this quota. 592 */ 593 ump->um_cred[type] = crhold(td->td_ucred); 594 ump->um_btime[type] = MAX_DQ_TIME; 595 ump->um_itime[type] = MAX_IQ_TIME; 596 if (dqget(NULLVP, 0, ump, type, &dq) == 0) { 597 if (dq->dq_btime > 0) 598 ump->um_btime[type] = dq->dq_btime; 599 if (dq->dq_itime > 0) 600 ump->um_itime[type] = dq->dq_itime; 601 dqrele(NULLVP, dq); 602 } 603 /* 604 * Allow the getdq from getinoquota below to read the quota 605 * from file. 606 */ 607 UFS_LOCK(ump); 608 ump->um_qflags[type] &= ~QTF_CLOSING; 609 UFS_UNLOCK(ump); 610 /* 611 * Search vnodes associated with this mount point, 612 * adding references to quota file being opened. 613 * NB: only need to add dquot's for inodes being modified. 614 */ 615again: 616 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 617 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) { 618 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 619 goto again; 620 } 621 if (vp->v_type == VNON || vp->v_writecount <= 0) { 622 VOP_UNLOCK(vp, 0); 623 vrele(vp); 624 continue; 625 } 626 error = getinoquota(VTOI(vp)); 627 VOP_UNLOCK(vp, 0); 628 vrele(vp); 629 if (error) { 630 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 631 break; 632 } 633 } 634 635 if (error) 636 quotaoff_inchange(td, mp, type); 637 UFS_LOCK(ump); 638 ump->um_qflags[type] &= ~QTF_OPENING; 639 KASSERT((ump->um_qflags[type] & QTF_CLOSING) == 0, 640 ("quotaon: leaking flags")); 641 UFS_UNLOCK(ump); 642 643 vfs_unbusy(mp); 644 return (error); 645} 646 647/* 648 * Main code to turn off disk quotas for a filesystem. Does not change 649 * flags. 650 */ 651static int 652quotaoff1(struct thread *td, struct mount *mp, int type) 653{ 654 struct vnode *vp; 655 struct vnode *qvp, *mvp; 656 struct ufsmount *ump; 657 struct dquot *dq; 658 struct inode *ip; 659 struct ucred *cr; 660 int error; 661 662 ump = VFSTOUFS(mp); 663 664 UFS_LOCK(ump); 665 KASSERT((ump->um_qflags[type] & QTF_CLOSING) != 0, 666 ("quotaoff1: flags are invalid")); 667 if ((qvp = ump->um_quotas[type]) == NULLVP) { 668 UFS_UNLOCK(ump); 669 return (0); 670 } 671 cr = ump->um_cred[type]; 672 UFS_UNLOCK(ump); 673 674 /* 675 * Search vnodes associated with this mount point, 676 * deleting any references to quota file being closed. 677 */ 678again: 679 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 680 if (vp->v_type == VNON) { 681 VI_UNLOCK(vp); 682 continue; 683 } 684 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) { 685 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 686 goto again; 687 } 688 ip = VTOI(vp); 689 dq = ip->i_dquot[type]; 690 ip->i_dquot[type] = NODQUOT; 691 dqrele(vp, dq); 692 VOP_UNLOCK(vp, 0); 693 vrele(vp); 694 } 695 696 error = dqflush(qvp); 697 if (error != 0) 698 return (error); 699 700 /* 701 * Clear um_quotas before closing the quota vnode to prevent 702 * access to the closed vnode from dqget/dqsync 703 */ 704 UFS_LOCK(ump); 705 ump->um_quotas[type] = NULLVP; 706 ump->um_cred[type] = NOCRED; 707 UFS_UNLOCK(ump); 708 709 vn_lock(qvp, LK_EXCLUSIVE | LK_RETRY); 710 qvp->v_vflag &= ~VV_SYSTEM; 711 VOP_UNLOCK(qvp, 0); 712 error = vn_close(qvp, FREAD|FWRITE, td->td_ucred, td); 713 crfree(cr); 714 715 return (error); 716} 717 718static int 719quotaoff_inchange1(struct thread *td, struct mount *mp, int type) 720{ 721 int error; 722 bool need_resume; 723 724 /* 725 * mp is already suspended on unmount. If not, suspend it, to 726 * avoid the situation where quotaoff operation eventually 727 * failing due to SU structures still keeping references on 728 * dquots, but vnode's references are already clean. This 729 * would cause quota accounting leak and asserts otherwise. 730 * Note that the thread has already called vn_start_write(). 731 */ 732 if (mp->mnt_susp_owner == td) { 733 need_resume = false; 734 } else { 735 error = vfs_write_suspend_umnt(mp); 736 if (error != 0) 737 return (error); 738 need_resume = true; 739 } 740 error = quotaoff1(td, mp, type); 741 if (need_resume) 742 vfs_write_resume(mp, VR_START_WRITE); 743 return (error); 744} 745 746/* 747 * Turns off quotas, assumes that ump->um_qflags are already checked 748 * and QTF_CLOSING is set to indicate operation in progress. Fixes 749 * ump->um_qflags and mp->mnt_flag after. 750 */ 751int 752quotaoff_inchange(struct thread *td, struct mount *mp, int type) 753{ 754 struct ufsmount *ump; 755 int error, i; 756 757 error = quotaoff_inchange1(td, mp, type); 758 759 ump = VFSTOUFS(mp); 760 UFS_LOCK(ump); 761 ump->um_qflags[type] &= ~QTF_CLOSING; 762 for (i = 0; i < MAXQUOTAS; i++) 763 if (ump->um_quotas[i] != NULLVP) 764 break; 765 if (i == MAXQUOTAS) { 766 MNT_ILOCK(mp); 767 mp->mnt_flag &= ~MNT_QUOTA; 768 mp->mnt_stat.f_flags &= ~MNT_QUOTA; 769 MNT_IUNLOCK(mp); 770 } 771 UFS_UNLOCK(ump); 772 return (error); 773} 774 775/* 776 * Q_QUOTAOFF - turn off disk quotas for a filesystem. 777 */ 778int 779quotaoff(struct thread *td, struct mount *mp, int type) 780{ 781 struct ufsmount *ump; 782 int error; 783 784 error = priv_check(td, PRIV_UFS_QUOTAOFF); 785 if (error) 786 return (error); 787 788 ump = VFSTOUFS(mp); 789 UFS_LOCK(ump); 790 if ((ump->um_qflags[type] & (QTF_OPENING|QTF_CLOSING)) != 0) { 791 UFS_UNLOCK(ump); 792 return (EALREADY); 793 } 794 ump->um_qflags[type] |= QTF_CLOSING; 795 UFS_UNLOCK(ump); 796 797 return (quotaoff_inchange(td, mp, type)); 798} 799 800/* 801 * Q_GETQUOTA - return current values in a dqblk structure. 802 */ 803static int 804_getquota(struct thread *td, struct mount *mp, u_long id, int type, 805 struct dqblk64 *dqb) 806{ 807 struct dquot *dq; 808 int error; 809 810 switch (type) { 811 case USRQUOTA: 812 if ((td->td_ucred->cr_uid != id) && !unprivileged_get_quota) { 813 error = priv_check(td, PRIV_VFS_GETQUOTA); 814 if (error) 815 return (error); 816 } 817 break; 818 819 case GRPQUOTA: 820 if (!groupmember(id, td->td_ucred) && 821 !unprivileged_get_quota) { 822 error = priv_check(td, PRIV_VFS_GETQUOTA); 823 if (error) 824 return (error); 825 } 826 break; 827 828 default: 829 return (EINVAL); 830 } 831 832 dq = NODQUOT; 833 error = dqget(NULLVP, id, VFSTOUFS(mp), type, &dq); 834 if (error) 835 return (error); 836 *dqb = dq->dq_dqb; 837 dqrele(NULLVP, dq); 838 return (error); 839} 840 841/* 842 * Q_SETQUOTA - assign an entire dqblk structure. 843 */ 844static int 845_setquota(struct thread *td, struct mount *mp, u_long id, int type, 846 struct dqblk64 *dqb) 847{ 848 struct dquot *dq; 849 struct dquot *ndq; 850 struct ufsmount *ump; 851 struct dqblk64 newlim; 852 int error; 853 854 error = priv_check(td, PRIV_VFS_SETQUOTA); 855 if (error) 856 return (error); 857 858 newlim = *dqb; 859 860 ndq = NODQUOT; 861 ump = VFSTOUFS(mp); 862 863 error = dqget(NULLVP, id, ump, type, &ndq); 864 if (error) 865 return (error); 866 dq = ndq; 867 DQI_LOCK(dq); 868 DQI_WAIT(dq, PINOD+1, "setqta"); 869 /* 870 * Copy all but the current values. 871 * Reset time limit if previously had no soft limit or were 872 * under it, but now have a soft limit and are over it. 873 */ 874 newlim.dqb_curblocks = dq->dq_curblocks; 875 newlim.dqb_curinodes = dq->dq_curinodes; 876 if (dq->dq_id != 0) { 877 newlim.dqb_btime = dq->dq_btime; 878 newlim.dqb_itime = dq->dq_itime; 879 } 880 if (newlim.dqb_bsoftlimit && 881 dq->dq_curblocks >= newlim.dqb_bsoftlimit && 882 (dq->dq_bsoftlimit == 0 || dq->dq_curblocks < dq->dq_bsoftlimit)) 883 newlim.dqb_btime = time_second + ump->um_btime[type]; 884 if (newlim.dqb_isoftlimit && 885 dq->dq_curinodes >= newlim.dqb_isoftlimit && 886 (dq->dq_isoftlimit == 0 || dq->dq_curinodes < dq->dq_isoftlimit)) 887 newlim.dqb_itime = time_second + ump->um_itime[type]; 888 dq->dq_dqb = newlim; 889 if (dq->dq_curblocks < dq->dq_bsoftlimit) 890 dq->dq_flags &= ~DQ_BLKS; 891 if (dq->dq_curinodes < dq->dq_isoftlimit) 892 dq->dq_flags &= ~DQ_INODS; 893 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 && 894 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0) 895 dq->dq_flags |= DQ_FAKE; 896 else 897 dq->dq_flags &= ~DQ_FAKE; 898 dq->dq_flags |= DQ_MOD; 899 DQI_UNLOCK(dq); 900 dqrele(NULLVP, dq); 901 return (0); 902} 903 904/* 905 * Q_SETUSE - set current inode and block usage. 906 */ 907static int 908_setuse(struct thread *td, struct mount *mp, u_long id, int type, 909 struct dqblk64 *dqb) 910{ 911 struct dquot *dq; 912 struct ufsmount *ump; 913 struct dquot *ndq; 914 struct dqblk64 usage; 915 int error; 916 917 error = priv_check(td, PRIV_UFS_SETUSE); 918 if (error) 919 return (error); 920 921 usage = *dqb; 922 923 ump = VFSTOUFS(mp); 924 ndq = NODQUOT; 925 926 error = dqget(NULLVP, id, ump, type, &ndq); 927 if (error) 928 return (error); 929 dq = ndq; 930 DQI_LOCK(dq); 931 DQI_WAIT(dq, PINOD+1, "setuse"); 932 /* 933 * Reset time limit if have a soft limit and were 934 * previously under it, but are now over it. 935 */ 936 if (dq->dq_bsoftlimit && dq->dq_curblocks < dq->dq_bsoftlimit && 937 usage.dqb_curblocks >= dq->dq_bsoftlimit) 938 dq->dq_btime = time_second + ump->um_btime[type]; 939 if (dq->dq_isoftlimit && dq->dq_curinodes < dq->dq_isoftlimit && 940 usage.dqb_curinodes >= dq->dq_isoftlimit) 941 dq->dq_itime = time_second + ump->um_itime[type]; 942 dq->dq_curblocks = usage.dqb_curblocks; 943 dq->dq_curinodes = usage.dqb_curinodes; 944 if (dq->dq_curblocks < dq->dq_bsoftlimit) 945 dq->dq_flags &= ~DQ_BLKS; 946 if (dq->dq_curinodes < dq->dq_isoftlimit) 947 dq->dq_flags &= ~DQ_INODS; 948 dq->dq_flags |= DQ_MOD; 949 DQI_UNLOCK(dq); 950 dqrele(NULLVP, dq); 951 return (0); 952} 953 954int 955getquota32(struct thread *td, struct mount *mp, u_long id, int type, void *addr) 956{ 957 struct dqblk32 dqb32; 958 struct dqblk64 dqb64; 959 int error; 960 961 error = _getquota(td, mp, id, type, &dqb64); 962 if (error) 963 return (error); 964 dqb64_dqb32(&dqb64, &dqb32); 965 error = copyout(&dqb32, addr, sizeof(dqb32)); 966 return (error); 967} 968 969int 970setquota32(struct thread *td, struct mount *mp, u_long id, int type, void *addr) 971{ 972 struct dqblk32 dqb32; 973 struct dqblk64 dqb64; 974 int error; 975 976 error = copyin(addr, &dqb32, sizeof(dqb32)); 977 if (error) 978 return (error); 979 dqb32_dqb64(&dqb32, &dqb64); 980 error = _setquota(td, mp, id, type, &dqb64); 981 return (error); 982} 983 984int 985setuse32(struct thread *td, struct mount *mp, u_long id, int type, void *addr) 986{ 987 struct dqblk32 dqb32; 988 struct dqblk64 dqb64; 989 int error; 990 991 error = copyin(addr, &dqb32, sizeof(dqb32)); 992 if (error) 993 return (error); 994 dqb32_dqb64(&dqb32, &dqb64); 995 error = _setuse(td, mp, id, type, &dqb64); 996 return (error); 997} 998 999int 1000getquota(struct thread *td, struct mount *mp, u_long id, int type, void *addr) 1001{ 1002 struct dqblk64 dqb64; 1003 int error; 1004 1005 error = _getquota(td, mp, id, type, &dqb64); 1006 if (error) 1007 return (error); 1008 error = copyout(&dqb64, addr, sizeof(dqb64)); 1009 return (error); 1010} 1011 1012int 1013setquota(struct thread *td, struct mount *mp, u_long id, int type, void *addr) 1014{ 1015 struct dqblk64 dqb64; 1016 int error; 1017 1018 error = copyin(addr, &dqb64, sizeof(dqb64)); 1019 if (error) 1020 return (error); 1021 error = _setquota(td, mp, id, type, &dqb64); 1022 return (error); 1023} 1024 1025int 1026setuse(struct thread *td, struct mount *mp, u_long id, int type, void *addr) 1027{ 1028 struct dqblk64 dqb64; 1029 int error; 1030 1031 error = copyin(addr, &dqb64, sizeof(dqb64)); 1032 if (error) 1033 return (error); 1034 error = _setuse(td, mp, id, type, &dqb64); 1035 return (error); 1036} 1037 1038/* 1039 * Q_GETQUOTASIZE - get bit-size of quota file fields 1040 */ 1041int 1042getquotasize(struct thread *td, struct mount *mp, u_long id, int type, 1043 void *sizep) 1044{ 1045 struct ufsmount *ump = VFSTOUFS(mp); 1046 int bitsize; 1047 1048 UFS_LOCK(ump); 1049 if (ump->um_quotas[type] == NULLVP || 1050 (ump->um_qflags[type] & QTF_CLOSING)) { 1051 UFS_UNLOCK(ump); 1052 return (EINVAL); 1053 } 1054 if ((ump->um_qflags[type] & QTF_64BIT) != 0) 1055 bitsize = 64; 1056 else 1057 bitsize = 32; 1058 UFS_UNLOCK(ump); 1059 return (copyout(&bitsize, sizep, sizeof(int))); 1060} 1061 1062/* 1063 * Q_SYNC - sync quota files to disk. 1064 */ 1065int 1066qsync(struct mount *mp) 1067{ 1068 struct ufsmount *ump = VFSTOUFS(mp); 1069 struct thread *td = curthread; /* XXX */ 1070 struct vnode *vp, *mvp; 1071 struct dquot *dq; 1072 int i, error; 1073 1074 /* 1075 * Check if the mount point has any quotas. 1076 * If not, simply return. 1077 */ 1078 for (i = 0; i < MAXQUOTAS; i++) 1079 if (ump->um_quotas[i] != NULLVP) 1080 break; 1081 if (i == MAXQUOTAS) 1082 return (0); 1083 /* 1084 * Search vnodes associated with this mount point, 1085 * synchronizing any modified dquot structures. 1086 */ 1087again: 1088 MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { 1089 if (vp->v_type == VNON) { 1090 VI_UNLOCK(vp); 1091 continue; 1092 } 1093 error = vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td); 1094 if (error) { 1095 if (error == ENOENT) { 1096 MNT_VNODE_FOREACH_ACTIVE_ABORT(mp, mvp); 1097 goto again; 1098 } 1099 continue; 1100 } 1101 for (i = 0; i < MAXQUOTAS; i++) { 1102 dq = VTOI(vp)->i_dquot[i]; 1103 if (dq != NODQUOT) 1104 dqsync(vp, dq); 1105 } 1106 vput(vp); 1107 } 1108 return (0); 1109} 1110 1111/* 1112 * Sync quota file for given vnode to disk. 1113 */ 1114int 1115qsyncvp(struct vnode *vp) 1116{ 1117 struct ufsmount *ump = VFSTOUFS(vp->v_mount); 1118 struct dquot *dq; 1119 int i; 1120 1121 /* 1122 * Check if the mount point has any quotas. 1123 * If not, simply return. 1124 */ 1125 for (i = 0; i < MAXQUOTAS; i++) 1126 if (ump->um_quotas[i] != NULLVP) 1127 break; 1128 if (i == MAXQUOTAS) 1129 return (0); 1130 /* 1131 * Search quotas associated with this vnode 1132 * synchronizing any modified dquot structures. 1133 */ 1134 for (i = 0; i < MAXQUOTAS; i++) { 1135 dq = VTOI(vp)->i_dquot[i]; 1136 if (dq != NODQUOT) 1137 dqsync(vp, dq); 1138 } 1139 return (0); 1140} 1141 1142/* 1143 * Code pertaining to management of the in-core dquot data structures. 1144 */ 1145#define DQHASH(dqvp, id) \ 1146 (&dqhashtbl[((((intptr_t)(dqvp)) >> 8) + id) & dqhash]) 1147static LIST_HEAD(dqhash, dquot) *dqhashtbl; 1148static u_long dqhash; 1149 1150/* 1151 * Dquot free list. 1152 */ 1153#define DQUOTINC 5 /* minimum free dquots desired */ 1154static TAILQ_HEAD(dqfreelist, dquot) dqfreelist; 1155static long numdquot, desireddquot = DQUOTINC; 1156 1157/* 1158 * Lock to protect quota hash, dq free list and dq_cnt ref counters of 1159 * _all_ dqs. 1160 */ 1161struct mtx dqhlock; 1162 1163#define DQH_LOCK() mtx_lock(&dqhlock) 1164#define DQH_UNLOCK() mtx_unlock(&dqhlock) 1165 1166static struct dquot *dqhashfind(struct dqhash *dqh, u_long id, 1167 struct vnode *dqvp); 1168 1169/* 1170 * Initialize the quota system. 1171 */ 1172void 1173dqinit(void) 1174{ 1175 1176 mtx_init(&dqhlock, "dqhlock", NULL, MTX_DEF); 1177 dqhashtbl = hashinit(desiredvnodes, M_DQUOT, &dqhash); 1178 TAILQ_INIT(&dqfreelist); 1179} 1180 1181/* 1182 * Shut down the quota system. 1183 */ 1184void 1185dquninit(void) 1186{ 1187 struct dquot *dq; 1188 1189 hashdestroy(dqhashtbl, M_DQUOT, dqhash); 1190 while ((dq = TAILQ_FIRST(&dqfreelist)) != NULL) { 1191 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist); 1192 mtx_destroy(&dq->dq_lock); 1193 free(dq, M_DQUOT); 1194 } 1195 mtx_destroy(&dqhlock); 1196} 1197 1198static struct dquot * 1199dqhashfind(struct dqhash *dqh, u_long id, struct vnode *dqvp) 1200{ 1201 struct dquot *dq; 1202 1203 mtx_assert(&dqhlock, MA_OWNED); 1204 LIST_FOREACH(dq, dqh, dq_hash) { 1205 if (dq->dq_id != id || 1206 dq->dq_ump->um_quotas[dq->dq_type] != dqvp) 1207 continue; 1208 /* 1209 * Cache hit with no references. Take 1210 * the structure off the free list. 1211 */ 1212 if (dq->dq_cnt == 0) 1213 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist); 1214 DQREF(dq); 1215 return (dq); 1216 } 1217 return (NODQUOT); 1218} 1219 1220/* 1221 * Determine the quota file type. 1222 * 1223 * A 32-bit quota file is simply an array of struct dqblk32. 1224 * 1225 * A 64-bit quota file is a struct dqhdr64 followed by an array of struct 1226 * dqblk64. The header contains various magic bits which allow us to be 1227 * reasonably confident that it is indeeda 64-bit quota file and not just 1228 * a 32-bit quota file that just happens to "look right". 1229 * 1230 */ 1231static int 1232dqopen(struct vnode *vp, struct ufsmount *ump, int type) 1233{ 1234 struct dqhdr64 dqh; 1235 struct iovec aiov; 1236 struct uio auio; 1237 int error; 1238 1239 ASSERT_VOP_LOCKED(vp, "dqopen"); 1240 auio.uio_iov = &aiov; 1241 auio.uio_iovcnt = 1; 1242 aiov.iov_base = &dqh; 1243 aiov.iov_len = sizeof(dqh); 1244 auio.uio_resid = sizeof(dqh); 1245 auio.uio_offset = 0; 1246 auio.uio_segflg = UIO_SYSSPACE; 1247 auio.uio_rw = UIO_READ; 1248 auio.uio_td = (struct thread *)0; 1249 error = VOP_READ(vp, &auio, 0, ump->um_cred[type]); 1250 1251 if (error != 0) 1252 return (error); 1253 if (auio.uio_resid > 0) { 1254 /* assume 32 bits */ 1255 return (0); 1256 } 1257 1258 UFS_LOCK(ump); 1259 if (strcmp(dqh.dqh_magic, Q_DQHDR64_MAGIC) == 0 && 1260 be32toh(dqh.dqh_version) == Q_DQHDR64_VERSION && 1261 be32toh(dqh.dqh_hdrlen) == (uint32_t)sizeof(struct dqhdr64) && 1262 be32toh(dqh.dqh_reclen) == (uint32_t)sizeof(struct dqblk64)) { 1263 /* XXX: what if the magic matches, but the sizes are wrong? */ 1264 ump->um_qflags[type] |= QTF_64BIT; 1265 } else { 1266 ump->um_qflags[type] &= ~QTF_64BIT; 1267 } 1268 UFS_UNLOCK(ump); 1269 1270 return (0); 1271} 1272 1273/* 1274 * Obtain a dquot structure for the specified identifier and quota file 1275 * reading the information from the file if necessary. 1276 */ 1277static int 1278dqget(struct vnode *vp, u_long id, struct ufsmount *ump, int type, 1279 struct dquot **dqp) 1280{ 1281 uint8_t buf[sizeof(struct dqblk64)]; 1282 off_t base, recsize; 1283 struct dquot *dq, *dq1; 1284 struct dqhash *dqh; 1285 struct vnode *dqvp; 1286 struct iovec aiov; 1287 struct uio auio; 1288 int dqvplocked, error; 1289 1290#ifdef DEBUG_VFS_LOCKS 1291 if (vp != NULLVP) 1292 ASSERT_VOP_ELOCKED(vp, "dqget"); 1293#endif 1294 1295 if (vp != NULLVP && *dqp != NODQUOT) { 1296 return (0); 1297 } 1298 1299 /* XXX: Disallow negative id values to prevent the 1300 * creation of 100GB+ quota data files. 1301 */ 1302 if ((int)id < 0) 1303 return (EINVAL); 1304 1305 UFS_LOCK(ump); 1306 dqvp = ump->um_quotas[type]; 1307 if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) { 1308 *dqp = NODQUOT; 1309 UFS_UNLOCK(ump); 1310 return (EINVAL); 1311 } 1312 vref(dqvp); 1313 UFS_UNLOCK(ump); 1314 error = 0; 1315 dqvplocked = 0; 1316 1317 /* 1318 * Check the cache first. 1319 */ 1320 dqh = DQHASH(dqvp, id); 1321 DQH_LOCK(); 1322 dq = dqhashfind(dqh, id, dqvp); 1323 if (dq != NULL) { 1324 DQH_UNLOCK(); 1325hfound: DQI_LOCK(dq); 1326 DQI_WAIT(dq, PINOD+1, "dqget"); 1327 DQI_UNLOCK(dq); 1328 if (dq->dq_ump == NULL) { 1329 dqrele(vp, dq); 1330 dq = NODQUOT; 1331 error = EIO; 1332 } 1333 *dqp = dq; 1334 if (dqvplocked) 1335 vput(dqvp); 1336 else 1337 vrele(dqvp); 1338 return (error); 1339 } 1340 1341 /* 1342 * Quota vnode lock is before DQ_LOCK. Acquire dqvp lock there 1343 * since new dq will appear on the hash chain DQ_LOCKed. 1344 */ 1345 if (vp != dqvp) { 1346 DQH_UNLOCK(); 1347 vn_lock(dqvp, LK_SHARED | LK_RETRY); 1348 dqvplocked = 1; 1349 DQH_LOCK(); 1350 /* 1351 * Recheck the cache after sleep for quota vnode lock. 1352 */ 1353 dq = dqhashfind(dqh, id, dqvp); 1354 if (dq != NULL) { 1355 DQH_UNLOCK(); 1356 goto hfound; 1357 } 1358 } 1359 1360 /* 1361 * Not in cache, allocate a new one or take it from the 1362 * free list. 1363 */ 1364 if (TAILQ_FIRST(&dqfreelist) == NODQUOT && 1365 numdquot < MAXQUOTAS * desiredvnodes) 1366 desireddquot += DQUOTINC; 1367 if (numdquot < desireddquot) { 1368 numdquot++; 1369 DQH_UNLOCK(); 1370 dq1 = malloc(sizeof *dq1, M_DQUOT, M_WAITOK | M_ZERO); 1371 mtx_init(&dq1->dq_lock, "dqlock", NULL, MTX_DEF); 1372 DQH_LOCK(); 1373 /* 1374 * Recheck the cache after sleep for memory. 1375 */ 1376 dq = dqhashfind(dqh, id, dqvp); 1377 if (dq != NULL) { 1378 numdquot--; 1379 DQH_UNLOCK(); 1380 mtx_destroy(&dq1->dq_lock); 1381 free(dq1, M_DQUOT); 1382 goto hfound; 1383 } 1384 dq = dq1; 1385 } else { 1386 if ((dq = TAILQ_FIRST(&dqfreelist)) == NULL) { 1387 DQH_UNLOCK(); 1388 tablefull("dquot"); 1389 *dqp = NODQUOT; 1390 if (dqvplocked) 1391 vput(dqvp); 1392 else 1393 vrele(dqvp); 1394 return (EUSERS); 1395 } 1396 if (dq->dq_cnt || (dq->dq_flags & DQ_MOD)) 1397 panic("dqget: free dquot isn't %p", dq); 1398 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist); 1399 if (dq->dq_ump != NULL) 1400 LIST_REMOVE(dq, dq_hash); 1401 } 1402 1403 /* 1404 * Dq is put into hash already locked to prevent parallel 1405 * usage while it is being read from file. 1406 */ 1407 dq->dq_flags = DQ_LOCK; 1408 dq->dq_id = id; 1409 dq->dq_type = type; 1410 dq->dq_ump = ump; 1411 LIST_INSERT_HEAD(dqh, dq, dq_hash); 1412 DQREF(dq); 1413 DQH_UNLOCK(); 1414 1415 /* 1416 * Read the requested quota record from the quota file, performing 1417 * any necessary conversions. 1418 */ 1419 if (ump->um_qflags[type] & QTF_64BIT) { 1420 recsize = sizeof(struct dqblk64); 1421 base = sizeof(struct dqhdr64); 1422 } else { 1423 recsize = sizeof(struct dqblk32); 1424 base = 0; 1425 } 1426 auio.uio_iov = &aiov; 1427 auio.uio_iovcnt = 1; 1428 aiov.iov_base = buf; 1429 aiov.iov_len = recsize; 1430 auio.uio_resid = recsize; 1431 auio.uio_offset = base + id * recsize; 1432 auio.uio_segflg = UIO_SYSSPACE; 1433 auio.uio_rw = UIO_READ; 1434 auio.uio_td = (struct thread *)0; 1435 1436 error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]); 1437 if (auio.uio_resid == recsize && error == 0) { 1438 bzero(&dq->dq_dqb, sizeof(dq->dq_dqb)); 1439 } else { 1440 if (ump->um_qflags[type] & QTF_64BIT) 1441 dqb64_dq((struct dqblk64 *)buf, dq); 1442 else 1443 dqb32_dq((struct dqblk32 *)buf, dq); 1444 } 1445 if (dqvplocked) 1446 vput(dqvp); 1447 else 1448 vrele(dqvp); 1449 /* 1450 * I/O error in reading quota file, release 1451 * quota structure and reflect problem to caller. 1452 */ 1453 if (error) { 1454 DQH_LOCK(); 1455 dq->dq_ump = NULL; 1456 LIST_REMOVE(dq, dq_hash); 1457 DQH_UNLOCK(); 1458 DQI_LOCK(dq); 1459 if (dq->dq_flags & DQ_WANT) 1460 wakeup(dq); 1461 dq->dq_flags = 0; 1462 DQI_UNLOCK(dq); 1463 dqrele(vp, dq); 1464 *dqp = NODQUOT; 1465 return (error); 1466 } 1467 DQI_LOCK(dq); 1468 /* 1469 * Check for no limit to enforce. 1470 * Initialize time values if necessary. 1471 */ 1472 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 && 1473 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0) 1474 dq->dq_flags |= DQ_FAKE; 1475 if (dq->dq_id != 0) { 1476 if (dq->dq_btime == 0) { 1477 dq->dq_btime = time_second + ump->um_btime[type]; 1478 if (dq->dq_bsoftlimit && 1479 dq->dq_curblocks >= dq->dq_bsoftlimit) 1480 dq->dq_flags |= DQ_MOD; 1481 } 1482 if (dq->dq_itime == 0) { 1483 dq->dq_itime = time_second + ump->um_itime[type]; 1484 if (dq->dq_isoftlimit && 1485 dq->dq_curinodes >= dq->dq_isoftlimit) 1486 dq->dq_flags |= DQ_MOD; 1487 } 1488 } 1489 DQI_WAKEUP(dq); 1490 DQI_UNLOCK(dq); 1491 *dqp = dq; 1492 return (0); 1493} 1494 1495#ifdef DIAGNOSTIC 1496/* 1497 * Obtain a reference to a dquot. 1498 */ 1499static void 1500dqref(struct dquot *dq) 1501{ 1502 1503 dq->dq_cnt++; 1504} 1505#endif 1506 1507/* 1508 * Release a reference to a dquot. 1509 */ 1510void 1511dqrele(struct vnode *vp, struct dquot *dq) 1512{ 1513 1514 if (dq == NODQUOT) 1515 return; 1516 DQH_LOCK(); 1517 KASSERT(dq->dq_cnt > 0, ("Lost dq %p reference 1", dq)); 1518 if (dq->dq_cnt > 1) { 1519 dq->dq_cnt--; 1520 DQH_UNLOCK(); 1521 return; 1522 } 1523 DQH_UNLOCK(); 1524sync: 1525 (void) dqsync(vp, dq); 1526 1527 DQH_LOCK(); 1528 KASSERT(dq->dq_cnt > 0, ("Lost dq %p reference 2", dq)); 1529 if (--dq->dq_cnt > 0) 1530 { 1531 DQH_UNLOCK(); 1532 return; 1533 } 1534 1535 /* 1536 * The dq may become dirty after it is synced but before it is 1537 * put to the free list. Checking the DQ_MOD there without 1538 * locking dq should be safe since no other references to the 1539 * dq exist. 1540 */ 1541 if ((dq->dq_flags & DQ_MOD) != 0) { 1542 dq->dq_cnt++; 1543 DQH_UNLOCK(); 1544 goto sync; 1545 } 1546 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist); 1547 DQH_UNLOCK(); 1548} 1549 1550/* 1551 * Update the disk quota in the quota file. 1552 */ 1553static int 1554dqsync(struct vnode *vp, struct dquot *dq) 1555{ 1556 uint8_t buf[sizeof(struct dqblk64)]; 1557 off_t base, recsize; 1558 struct vnode *dqvp; 1559 struct iovec aiov; 1560 struct uio auio; 1561 int error; 1562 struct mount *mp; 1563 struct ufsmount *ump; 1564 1565#ifdef DEBUG_VFS_LOCKS 1566 if (vp != NULL) 1567 ASSERT_VOP_ELOCKED(vp, "dqsync"); 1568#endif 1569 1570 mp = NULL; 1571 error = 0; 1572 if (dq == NODQUOT) 1573 panic("dqsync: dquot"); 1574 if ((ump = dq->dq_ump) == NULL) 1575 return (0); 1576 UFS_LOCK(ump); 1577 if ((dqvp = ump->um_quotas[dq->dq_type]) == NULLVP) { 1578 if (vp == NULL) { 1579 UFS_UNLOCK(ump); 1580 return (0); 1581 } else 1582 panic("dqsync: file"); 1583 } 1584 vref(dqvp); 1585 UFS_UNLOCK(ump); 1586 1587 DQI_LOCK(dq); 1588 if ((dq->dq_flags & DQ_MOD) == 0) { 1589 DQI_UNLOCK(dq); 1590 vrele(dqvp); 1591 return (0); 1592 } 1593 DQI_UNLOCK(dq); 1594 1595 (void) vn_start_secondary_write(dqvp, &mp, V_WAIT); 1596 if (vp != dqvp) 1597 vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY); 1598 1599 DQI_LOCK(dq); 1600 DQI_WAIT(dq, PINOD+2, "dqsync"); 1601 if ((dq->dq_flags & DQ_MOD) == 0) 1602 goto out; 1603 dq->dq_flags |= DQ_LOCK; 1604 DQI_UNLOCK(dq); 1605 1606 /* 1607 * Write the quota record to the quota file, performing any 1608 * necessary conversions. See dqget() for additional details. 1609 */ 1610 if (ump->um_qflags[dq->dq_type] & QTF_64BIT) { 1611 dq_dqb64(dq, (struct dqblk64 *)buf); 1612 recsize = sizeof(struct dqblk64); 1613 base = sizeof(struct dqhdr64); 1614 } else { 1615 dq_dqb32(dq, (struct dqblk32 *)buf); 1616 recsize = sizeof(struct dqblk32); 1617 base = 0; 1618 } 1619 1620 auio.uio_iov = &aiov; 1621 auio.uio_iovcnt = 1; 1622 aiov.iov_base = buf; 1623 aiov.iov_len = recsize; 1624 auio.uio_resid = recsize; 1625 auio.uio_offset = base + dq->dq_id * recsize; 1626 auio.uio_segflg = UIO_SYSSPACE; 1627 auio.uio_rw = UIO_WRITE; 1628 auio.uio_td = (struct thread *)0; 1629 error = VOP_WRITE(dqvp, &auio, 0, dq->dq_ump->um_cred[dq->dq_type]); 1630 if (auio.uio_resid && error == 0) 1631 error = EIO; 1632 1633 DQI_LOCK(dq); 1634 DQI_WAKEUP(dq); 1635 dq->dq_flags &= ~DQ_MOD; 1636out: 1637 DQI_UNLOCK(dq); 1638 if (vp != dqvp) 1639 vput(dqvp); 1640 else 1641 vrele(dqvp); 1642 vn_finished_secondary_write(mp); 1643 return (error); 1644} 1645 1646/* 1647 * Flush all entries from the cache for a particular vnode. 1648 */ 1649static int 1650dqflush(struct vnode *vp) 1651{ 1652 struct dquot *dq, *nextdq; 1653 struct dqhash *dqh; 1654 int error; 1655 1656 /* 1657 * Move all dquot's that used to refer to this quota 1658 * file off their hash chains (they will eventually 1659 * fall off the head of the free list and be re-used). 1660 */ 1661 error = 0; 1662 DQH_LOCK(); 1663 for (dqh = &dqhashtbl[dqhash]; dqh >= dqhashtbl; dqh--) { 1664 for (dq = LIST_FIRST(dqh); dq; dq = nextdq) { 1665 nextdq = LIST_NEXT(dq, dq_hash); 1666 if (dq->dq_ump->um_quotas[dq->dq_type] != vp) 1667 continue; 1668 if (dq->dq_cnt) 1669 error = EBUSY; 1670 else { 1671 LIST_REMOVE(dq, dq_hash); 1672 dq->dq_ump = NULL; 1673 } 1674 } 1675 } 1676 DQH_UNLOCK(); 1677 return (error); 1678} 1679 1680/* 1681 * The following three functions are provided for the adjustment of 1682 * quotas by the soft updates code. 1683 */ 1684#ifdef SOFTUPDATES 1685/* 1686 * Acquire a reference to the quota structures associated with a vnode. 1687 * Return count of number of quota structures found. 1688 */ 1689int 1690quotaref(vp, qrp) 1691 struct vnode *vp; 1692 struct dquot **qrp; 1693{ 1694 struct inode *ip; 1695 struct dquot *dq; 1696 int i, found; 1697 1698 for (i = 0; i < MAXQUOTAS; i++) 1699 qrp[i] = NODQUOT; 1700 /* 1701 * Disk quotas must be turned off for system files. Currently 1702 * snapshot and quota files. 1703 */ 1704 if ((vp->v_vflag & VV_SYSTEM) != 0) 1705 return (0); 1706 /* 1707 * Iterate through and copy active quotas. 1708 */ 1709 found = 0; 1710 ip = VTOI(vp); 1711 mtx_lock(&dqhlock); 1712 for (i = 0; i < MAXQUOTAS; i++) { 1713 if ((dq = ip->i_dquot[i]) == NODQUOT) 1714 continue; 1715 DQREF(dq); 1716 qrp[i] = dq; 1717 found++; 1718 } 1719 mtx_unlock(&dqhlock); 1720 return (found); 1721} 1722 1723/* 1724 * Release a set of quota structures obtained from a vnode. 1725 */ 1726void 1727quotarele(qrp) 1728 struct dquot **qrp; 1729{ 1730 struct dquot *dq; 1731 int i; 1732 1733 for (i = 0; i < MAXQUOTAS; i++) { 1734 if ((dq = qrp[i]) == NODQUOT) 1735 continue; 1736 dqrele(NULL, dq); 1737 } 1738} 1739 1740/* 1741 * Adjust the number of blocks associated with a quota. 1742 * Positive numbers when adding blocks; negative numbers when freeing blocks. 1743 */ 1744void 1745quotaadj(qrp, ump, blkcount) 1746 struct dquot **qrp; 1747 struct ufsmount *ump; 1748 int64_t blkcount; 1749{ 1750 struct dquot *dq; 1751 ufs2_daddr_t ncurblocks; 1752 int i; 1753 1754 if (blkcount == 0) 1755 return; 1756 for (i = 0; i < MAXQUOTAS; i++) { 1757 if ((dq = qrp[i]) == NODQUOT) 1758 continue; 1759 DQI_LOCK(dq); 1760 DQI_WAIT(dq, PINOD+1, "adjqta"); 1761 ncurblocks = dq->dq_curblocks + blkcount; 1762 if (ncurblocks >= 0) 1763 dq->dq_curblocks = ncurblocks; 1764 else 1765 dq->dq_curblocks = 0; 1766 if (blkcount < 0) 1767 dq->dq_flags &= ~DQ_BLKS; 1768 else if (dq->dq_curblocks + blkcount >= dq->dq_bsoftlimit && 1769 dq->dq_curblocks < dq->dq_bsoftlimit) 1770 dq->dq_btime = time_second + ump->um_btime[i]; 1771 dq->dq_flags |= DQ_MOD; 1772 DQI_UNLOCK(dq); 1773 } 1774} 1775#endif /* SOFTUPDATES */ 1776 1777/* 1778 * 32-bit / 64-bit conversion functions. 1779 * 1780 * 32-bit quota records are stored in native byte order. Attention must 1781 * be paid to overflow issues. 1782 * 1783 * 64-bit quota records are stored in network byte order. 1784 */ 1785 1786#define CLIP32(u64) (u64 > UINT32_MAX ? UINT32_MAX : (uint32_t)u64) 1787 1788/* 1789 * Convert 32-bit host-order structure to dquot. 1790 */ 1791static void 1792dqb32_dq(const struct dqblk32 *dqb32, struct dquot *dq) 1793{ 1794 1795 dq->dq_bhardlimit = dqb32->dqb_bhardlimit; 1796 dq->dq_bsoftlimit = dqb32->dqb_bsoftlimit; 1797 dq->dq_curblocks = dqb32->dqb_curblocks; 1798 dq->dq_ihardlimit = dqb32->dqb_ihardlimit; 1799 dq->dq_isoftlimit = dqb32->dqb_isoftlimit; 1800 dq->dq_curinodes = dqb32->dqb_curinodes; 1801 dq->dq_btime = dqb32->dqb_btime; 1802 dq->dq_itime = dqb32->dqb_itime; 1803} 1804 1805/* 1806 * Convert 64-bit network-order structure to dquot. 1807 */ 1808static void 1809dqb64_dq(const struct dqblk64 *dqb64, struct dquot *dq) 1810{ 1811 1812 dq->dq_bhardlimit = be64toh(dqb64->dqb_bhardlimit); 1813 dq->dq_bsoftlimit = be64toh(dqb64->dqb_bsoftlimit); 1814 dq->dq_curblocks = be64toh(dqb64->dqb_curblocks); 1815 dq->dq_ihardlimit = be64toh(dqb64->dqb_ihardlimit); 1816 dq->dq_isoftlimit = be64toh(dqb64->dqb_isoftlimit); 1817 dq->dq_curinodes = be64toh(dqb64->dqb_curinodes); 1818 dq->dq_btime = be64toh(dqb64->dqb_btime); 1819 dq->dq_itime = be64toh(dqb64->dqb_itime); 1820} 1821 1822/* 1823 * Convert dquot to 32-bit host-order structure. 1824 */ 1825static void 1826dq_dqb32(const struct dquot *dq, struct dqblk32 *dqb32) 1827{ 1828 1829 dqb32->dqb_bhardlimit = CLIP32(dq->dq_bhardlimit); 1830 dqb32->dqb_bsoftlimit = CLIP32(dq->dq_bsoftlimit); 1831 dqb32->dqb_curblocks = CLIP32(dq->dq_curblocks); 1832 dqb32->dqb_ihardlimit = CLIP32(dq->dq_ihardlimit); 1833 dqb32->dqb_isoftlimit = CLIP32(dq->dq_isoftlimit); 1834 dqb32->dqb_curinodes = CLIP32(dq->dq_curinodes); 1835 dqb32->dqb_btime = CLIP32(dq->dq_btime); 1836 dqb32->dqb_itime = CLIP32(dq->dq_itime); 1837} 1838 1839/* 1840 * Convert dquot to 64-bit network-order structure. 1841 */ 1842static void 1843dq_dqb64(const struct dquot *dq, struct dqblk64 *dqb64) 1844{ 1845 1846 dqb64->dqb_bhardlimit = htobe64(dq->dq_bhardlimit); 1847 dqb64->dqb_bsoftlimit = htobe64(dq->dq_bsoftlimit); 1848 dqb64->dqb_curblocks = htobe64(dq->dq_curblocks); 1849 dqb64->dqb_ihardlimit = htobe64(dq->dq_ihardlimit); 1850 dqb64->dqb_isoftlimit = htobe64(dq->dq_isoftlimit); 1851 dqb64->dqb_curinodes = htobe64(dq->dq_curinodes); 1852 dqb64->dqb_btime = htobe64(dq->dq_btime); 1853 dqb64->dqb_itime = htobe64(dq->dq_itime); 1854} 1855 1856/* 1857 * Convert 64-bit host-order structure to 32-bit host-order structure. 1858 */ 1859static void 1860dqb64_dqb32(const struct dqblk64 *dqb64, struct dqblk32 *dqb32) 1861{ 1862 1863 dqb32->dqb_bhardlimit = CLIP32(dqb64->dqb_bhardlimit); 1864 dqb32->dqb_bsoftlimit = CLIP32(dqb64->dqb_bsoftlimit); 1865 dqb32->dqb_curblocks = CLIP32(dqb64->dqb_curblocks); 1866 dqb32->dqb_ihardlimit = CLIP32(dqb64->dqb_ihardlimit); 1867 dqb32->dqb_isoftlimit = CLIP32(dqb64->dqb_isoftlimit); 1868 dqb32->dqb_curinodes = CLIP32(dqb64->dqb_curinodes); 1869 dqb32->dqb_btime = CLIP32(dqb64->dqb_btime); 1870 dqb32->dqb_itime = CLIP32(dqb64->dqb_itime); 1871} 1872 1873/* 1874 * Convert 32-bit host-order structure to 64-bit host-order structure. 1875 */ 1876static void 1877dqb32_dqb64(const struct dqblk32 *dqb32, struct dqblk64 *dqb64) 1878{ 1879 1880 dqb64->dqb_bhardlimit = dqb32->dqb_bhardlimit; 1881 dqb64->dqb_bsoftlimit = dqb32->dqb_bsoftlimit; 1882 dqb64->dqb_curblocks = dqb32->dqb_curblocks; 1883 dqb64->dqb_ihardlimit = dqb32->dqb_ihardlimit; 1884 dqb64->dqb_isoftlimit = dqb32->dqb_isoftlimit; 1885 dqb64->dqb_curinodes = dqb32->dqb_curinodes; 1886 dqb64->dqb_btime = dqb32->dqb_btime; 1887 dqb64->dqb_itime = dqb32->dqb_itime; 1888} 1889