14Srgrimes// SPDX-License-Identifier: GPL-2.0 24Srgrimes/* 34Srgrimes * Implementation of the diskquota system for the LINUX operating system. QUOTA 44Srgrimes * is implemented using the BSD system call interface as the means of 54Srgrimes * communication with the user level. This file contains the generic routines 64Srgrimes * called by the different filesystems on allocation of an inode or block. 74Srgrimes * These routines take care of the administration needed to have a consistent 84Srgrimes * diskquota tracking system. The ideas of both user and group quotas are based 94Srgrimes * on the Melbourne quota system as used on BSD derived systems. The internal 104Srgrimes * implementation is based on one of the several variants of the LINUX 114Srgrimes * inode-subsystem with added complexity of the diskquota system. 124Srgrimes * 134Srgrimes * Author: Marco van Wieringen <mvw@planets.elm.net> 144Srgrimes * 154Srgrimes * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96 164Srgrimes * 174Srgrimes * Revised list management to avoid races 184Srgrimes * -- Bill Hawes, <whawes@star.net>, 9/98 194Srgrimes * 204Srgrimes * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...(). 214Srgrimes * As the consequence the locking was moved from dquot_decr_...(), 224Srgrimes * dquot_incr_...() to calling functions. 234Srgrimes * invalidate_dquots() now writes modified dquots. 244Srgrimes * Serialized quota_off() and quota_on() for mount point. 254Srgrimes * Fixed a few bugs in grow_dquots(). 264Srgrimes * Fixed deadlock in write_dquot() - we no longer account quotas on 274Srgrimes * quota files 284Srgrimes * remove_dquot_ref() moved to inode.c - it now traverses through inodes 294Srgrimes * add_dquot_ref() restarts after blocking 30593Srgrimes * Added check for bogus uid and fixed check for group in quotactl. 314Srgrimes * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99 324Srgrimes * 33115703Sobrien * Used struct list_head instead of own list struct 34115703Sobrien * Invalidation of referenced dquots is no longer possible 35115703Sobrien * Improved free_dquots list management 3679609Speter * Quota and i_blocks are now updated in one place to avoid races 3789980Sbde * Warnings are now delayed so we won't block in critical section 3885268Sbde * Write updated not to require dquot lock 3913225Swollman * Jan Kara, <jack@suse.cz>, 9/2000 402056Swollman * 412056Swollman * Added dynamic quota structure allocation 4245720Speter * Jan Kara <jack@suse.cz> 12/2000 4311865Sphk * 4476166Smarkm * Rewritten quota interface. Implemented new quota format and 4533281Sbde * formats registering. 4645720Speter * Jan Kara, <jack@suse.cz>, 2001,2002 4776166Smarkm * 4876166Smarkm * New SMP locking. 4976166Smarkm * Jan Kara, <jack@suse.cz>, 10/2002 50121986Sjhb * 5111865Sphk * Added journalled quota support, fix lock inversion problems 5245720Speter * Jan Kara, <jack@suse.cz>, 2003,2004 5345720Speter * 5422093Sbde * (C) Copyright 1994 - 1997 Marco van Wieringen 554478Sbde */ 5622093Sbde 574478Sbde#include <linux/errno.h> 583816Swollman#include <linux/kernel.h> 5925083Sjdp#include <linux/fs.h> 6030805Sbde#include <linux/mount.h> 6130805Sbde#include <linux/mm.h> 6226309Speter#include <linux/time.h> 632056Swollman#include <linux/types.h> 6430805Sbde#include <linux/string.h> 6545720Speter#include <linux/fcntl.h> 662056Swollman#include <linux/stat.h> 6730805Sbde#include <linux/tty.h> 68103409Smini#include <linux/file.h> 693816Swollman#include <linux/slab.h> 70121986Sjhb#include <linux/sysctl.h> 71181780Skmacy#include <linux/init.h> 72181780Skmacy#include <linux/module.h> 73186557Skmacy#include <linux/proc_fs.h> 74181780Skmacy#include <linux/security.h> 75181780Skmacy#include <linux/sched.h> 7689980Sbde#include <linux/cred.h> 7760008Swollman#include <linux/kmod.h> 7889980Sbde#include <linux/namei.h> 794Srgrimes#include <linux/capability.h> 80147741Sdelphij#include <linux/quotaops.h> 81103064Speter#include <linux/blkdev.h> 82103064Speter#include <linux/sched/mm.h> 83103064Speter#include "../internal.h" /* ugh */ 844Srgrimes 854Srgrimes#include <linux/uaccess.h> 864Srgrimes 874Srgrimes/* 88143063Sjoerg * There are five quota SMP locks: 894Srgrimes * * dq_list_lock protects all lists with quotas and quota formats. 90210521Sjkim * * dquot->dq_dqb_lock protects data from dq_dqb 91210518Sjkim * * inode->i_lock protects inode->i_blocks, i_bytes and also guards 92210518Sjkim * consistency of dquot->dq_dqb with inode->i_blocks, i_bytes so that 9335215Sbde * dquot_transfer() can stabilize amount it transfers 9435215Sbde * * dq_data_lock protects mem_dqinfo structures and modifications of dquot 95210517Sjkim * pointers in the inode 96210518Sjkim * * dq_state_lock protects modifications of quota state (on quotaon and 97210518Sjkim * quotaoff) and readers who care about latest values take it as well. 98210518Sjkim * 9982154Speter * The spinlock ordering is hence: 100210518Sjkim * dq_data_lock > dq_list_lock > i_lock > dquot->dq_dqb_lock, 10179609Speter * dq_list_lock > dq_state_lock 102239993Skib * 10382154Speter * Note that some things (eg. sb pointer, type, id) doesn't change during 104143063Sjoerg * the life of the dquot structure and so needn't to be protected by a lock 1054Srgrimes * 106210521Sjkim * Operation accessing dquots via inode pointers are protected by dquot_srcu. 10793024Sbde * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and 10893024Sbde * synchronize_srcu(&dquot_srcu) is called after clearing pointers from 10993024Sbde * inode and before dropping dquot references to avoid use of dquots after 11093024Sbde * they are freed. dq_data_lock is used to serialize the pointer setting and 11193024Sbde * clearing operations. 11293024Sbde * Special care needs to be taken about S_NOQUOTA inode flag (marking that 11393024Sbde * inode is a quota file). Functions adding pointers from inode to dquots have 11482154Speter * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they 11593024Sbde * have to do all pointer modifications before dropping dq_data_lock. This makes 11693024Sbde * sure they cannot race with quotaon which first sets S_NOQUOTA flag and 117239993Skib * then drops all pointers to dquots from an inode. 11882154Speter * 1194Srgrimes * Each dquot has its dq_lock mutex. Dquot is locked when it is being read to 120143063Sjoerg * memory (or space for it is being allocated) on the first dqget(), when it is 1214Srgrimes * being written out, and when it is being released on the last dqput(). The 122239914Sjhb * allocation and release operations are serialized by the dq_lock and by 123239914Sjhb * checking the use count in dquot_release(). 124239914Sjhb * 125239914Sjhb * Lock ordering (including related VFS locks) is the following: 126239914Sjhb * s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_sem 127239914Sjhb */ 128239914Sjhb 129239914Sjhbstatic __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock); 13079609Speterstatic __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock); 13183366Sjulian__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock); 13279609SpeterEXPORT_SYMBOL(dq_data_lock); 133208833SkibDEFINE_STATIC_SRCU(dquot_srcu); 134208833Skib 13583366Sjulianstatic DECLARE_WAIT_QUEUE_HEAD(dquot_ref_wq); 13679609Speter 137208833Skibvoid __quota_error(struct super_block *sb, const char *func, 138208833Skib const char *fmt, ...) 139189423Sjhb{ 140189423Sjhb if (printk_ratelimit()) { 141189423Sjhb va_list args; 142189423Sjhb struct va_format vaf; 143189423Sjhb 144189423Sjhb va_start(args, fmt); 14579609Speter 14683366Sjulian vaf.fmt = fmt; 147208833Skib vaf.va = &args; 14883366Sjulian 149208833Skib printk(KERN_ERR "Quota error (device %s): %s: %pV\n", 150189423Sjhb sb->s_id, func, &vaf); 151189423Sjhb 15279609Speter va_end(args); 15379609Speter } 1544Srgrimes} 1554SrgrimesEXPORT_SYMBOL(__quota_error); 156157860Scperciva 157157860Scperciva#if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING) 158157860Scpercivastatic char *quotatypes[] = INITQFNAMES; 159157860Scperciva#endif 160106977Sdeischenstatic struct quota_format_type *quota_formats; /* List of registered formats */ 161106977Sdeischenstatic struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; 16293024Sbde 16393024Sbde/* SLAB cache for dquot structures */ 16493024Sbdestatic struct kmem_cache *dquot_cachep; 1654Srgrimes 166209461Skibint register_quota_format(struct quota_format_type *fmt) 16711865Sphk{ 168188938Sjhb spin_lock(&dq_list_lock); 169188938Sjhb fmt->qf_next = quota_formats; 17011865Sphk quota_formats = fmt; 17141591Sarchie spin_unlock(&dq_list_lock); 172190413Sjhb return 0; 1734Srgrimes} 17485029SbdeEXPORT_SYMBOL(register_quota_format); 17535302Sbde 17635302Sbdevoid unregister_quota_format(struct quota_format_type *fmt) 17735302Sbde{ 17846548Sbde struct quota_format_type **actqf; 17935302Sbde 18035302Sbde spin_lock(&dq_list_lock); 18135302Sbde for (actqf = "a_formats; *actqf && *actqf != fmt; 18235302Sbde actqf = &(*actqf)->qf_next) 18335302Sbde ; 1844Srgrimes if (*actqf) 1854Srgrimes *actqf = (*actqf)->qf_next; 1864Srgrimes spin_unlock(&dq_list_lock); 18750181Speter} 18850181SpeterEXPORT_SYMBOL(unregister_quota_format); 18950181Speter 19050181Speterstatic struct quota_format_type *find_quota_format(int id) 19150181Speter{ 19250181Speter struct quota_format_type *actqf; 19350181Speter 19450181Speter spin_lock(&dq_list_lock); 19550181Speter for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; 19650181Speter actqf = actqf->qf_next) 19750181Speter ; 19850181Speter if (!actqf || !try_module_get(actqf->qf_owner)) { 19950181Speter int qm; 20050181Speter 20150181Speter spin_unlock(&dq_list_lock); 202157568Sjhb 203157568Sjhb for (qm = 0; module_names[qm].qm_fmt_id && 2044Srgrimes module_names[qm].qm_fmt_id != id; qm++) 2054Srgrimes ; 206209461Skib if (!module_names[qm].qm_fmt_id || 2074Srgrimes request_module(module_names[qm].qm_mod_name)) 20885029Sbde return NULL; 209209461Skib 21027567Sfsmp spin_lock(&dq_list_lock); 211157568Sjhb for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; 212157568Sjhb actqf = actqf->qf_next) 213157568Sjhb ; 214157568Sjhb if (actqf && !try_module_get(actqf->qf_owner)) 215157568Sjhb actqf = NULL; 216157568Sjhb } 217157568Sjhb spin_unlock(&dq_list_lock); 218157568Sjhb return actqf; 219209461Skib} 220157568Sjhb 221157568Sjhbstatic void put_quota_format(struct quota_format_type *fmt) 222157568Sjhb{ 223157568Sjhb module_put(fmt->qf_owner); 224119935Sjhb} 225119935Sjhb 226119935Sjhb/* 22727567Sfsmp * Dquot List Management: 2284Srgrimes * The quota code uses five lists for dquot management: the inuse_list, 229157568Sjhb * releasing_dquots, free_dquots, dqi_dirty_list, and dquot_hash[] array. 2304Srgrimes * A single dquot structure may be on some of those lists, depending on 2314Srgrimes * its current state. 232157568Sjhb * 2334Srgrimes * All dquots are placed to the end of inuse_list when first created, and this 2344Srgrimes * list is used for invalidate operation, which must look at every dquot. 23585029Sbde * 2364Srgrimes * When the last reference of a dquot is dropped, the dquot is added to 2374Srgrimes * releasing_dquots. We'll then queue work item which will call 2384Srgrimes * synchronize_srcu() and after that perform the final cleanup of all the 2394Srgrimes * dquots on the list. Each cleaned up dquot is moved to free_dquots list. 24027567Sfsmp * Both releasing_dquots and free_dquots use the dq_free list_head in the dquot 2416664Sbde * struct. 2426664Sbde * 2436664Sbde * Unused and cleaned up dquots are in the free_dquots list and this list is 2446664Sbde * searched whenever we need an available dquot. Dquots are removed from the 2455351Sbde * list as soon as they are used again and dqstats.free_dquots gives the number 2464Srgrimes * of dquots on the list. When dquot is invalidated it's completely released 2474Srgrimes * from memory. 2484Srgrimes * 2494Srgrimes * Dirty dquots are added to the dqi_dirty_list of quota_info when mark 2504Srgrimes * dirtied, and this list is searched when writing dirty dquots back to 2514Srgrimes * quota file. Note that some filesystems do dirty dquot tracking on their 2524Srgrimes * own (e.g. in a journal) and thus don't use dqi_dirty_list. 2534Srgrimes * 2544Srgrimes * Dquots with a specific identity (device, type and id) are placed on 2554Srgrimes * one of the dquot_hash[] hash chains. The provides an efficient search 2564Srgrimes * mechanism to locate a specific dquot. 2574Srgrimes */ 2584Srgrimes 2594Srgrimesstatic LIST_HEAD(inuse_list); 2608876Srgrimesstatic LIST_HEAD(free_dquots); 2614Srgrimesstatic LIST_HEAD(releasing_dquots); 2624Srgrimesstatic unsigned int dq_hash_bits, dq_hash_mask; 2634Srgrimesstatic struct hlist_head *dquot_hash; 2644Srgrimes 2654Srgrimesstruct dqstats dqstats; 2664SrgrimesEXPORT_SYMBOL(dqstats); 2674Srgrimes 268210521Sjkimstatic qsize_t inode_get_rsv_space(struct inode *inode); 26985268Sbdestatic qsize_t __inode_get_rsv_space(struct inode *inode); 27085268Sbdestatic int __dquot_initialize(struct inode *inode, int type); 27185268Sbde 27285268Sbdestatic void quota_release_workfn(struct work_struct *work); 27385268Sbdestatic DECLARE_DELAYED_WORK(quota_release_work, quota_release_workfn); 274209461Skib 27585268Sbdestatic inline unsigned int 27685268Sbdehashfn(const struct super_block *sb, struct kqid qid) 277209461Skib{ 2784Srgrimes unsigned int id = from_kqid(&init_user_ns, qid); 2794Srgrimes int type = qid.type; 2804Srgrimes unsigned long tmp; 2814Srgrimes 2824Srgrimes tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type); 283209461Skib return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask; 284209461Skib} 2854Srgrimes 286209461Skib/* 287209461Skib * Following list functions expect dq_list_lock to be held 288209461Skib */ 2894Srgrimesstatic inline void insert_dquot_hash(struct dquot *dquot) 2904Srgrimes{ 291189418Sjhb struct hlist_head *head; 292209461Skib head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id); 293209461Skib hlist_add_head(&dquot->dq_hash, head); 294209461Skib} 295209461Skib 296189418Sjhbstatic inline void remove_dquot_hash(struct dquot *dquot) 297209461Skib{ 298209461Skib hlist_del_init(&dquot->dq_hash); 299189418Sjhb} 300209461Skib 301119935Sjhbstatic struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, 302209461Skib struct kqid qid) 3034Srgrimes{ 3044Srgrimes struct dquot *dquot; 3054Srgrimes 3064Srgrimes hlist_for_each_entry(dquot, dquot_hash+hashent, dq_hash) 3074Srgrimes if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid)) 308104094Sphk return dquot; 309209461Skib 3104Srgrimes return NULL; 31140565Sbde} 312189418Sjhb 313209462Skib/* Add a dquot to the tail of the free list */ 314189418Sjhbstatic inline void put_dquot_last(struct dquot *dquot) 315190413Sjhb{ 316189418Sjhb list_add_tail(&dquot->dq_free, &free_dquots); 317159087Sdavidxu dqstats_inc(DQST_FREE_DQUOTS); 318189418Sjhb} 319190413Sjhb 320189418Sjhbstatic inline void put_releasing_dquots(struct dquot *dquot) 321190413Sjhb{ 322189418Sjhb list_add_tail(&dquot->dq_free, &releasing_dquots); 323189418Sjhb set_bit(DQ_RELEASING_B, &dquot->dq_flags); 324190413Sjhb} 325190413Sjhb 326190413Sjhbstatic inline void remove_free_dquot(struct dquot *dquot) 327190413Sjhb{ 328189418Sjhb if (list_empty(&dquot->dq_free)) 329189418Sjhb return; 330159087Sdavidxu list_del_init(&dquot->dq_free); 331190413Sjhb if (!test_bit(DQ_RELEASING_B, &dquot->dq_flags)) 332190413Sjhb dqstats_dec(DQST_FREE_DQUOTS); 333209462Skib else 33419653Sbde clear_bit(DQ_RELEASING_B, &dquot->dq_flags); 335209460Skib} 3364Srgrimes 3374Srgrimesstatic inline void put_inuse(struct dquot *dquot) 3384Srgrimes{ 3394Srgrimes /* We add to the back of inuse list so we don't have to restart 3404Srgrimes * when traversing this list and we block */ 3414Srgrimes list_add_tail(&dquot->dq_inuse, &inuse_list); 342189418Sjhb dqstats_inc(DQST_ALLOC_DQUOTS); 3434Srgrimes} 34479824Stegge 345214347Sjhbstatic inline void remove_inuse(struct dquot *dquot) 346189418Sjhb{ 3474Srgrimes dqstats_dec(DQST_ALLOC_DQUOTS); 348209461Skib list_del(&dquot->dq_inuse); 3494Srgrimes} 3504Srgrimes/* 3514Srgrimes * End of list functions needing dq_list_lock 3525351Sbde */ 35387702Sjhb 354209462Skibstatic void wait_on_dquot(struct dquot *dquot) 355209462Skib{ 3564Srgrimes mutex_lock(&dquot->dq_lock); 357214347Sjhb mutex_unlock(&dquot->dq_lock); 3584Srgrimes} 3594Srgrimes 36085009Steggestatic inline int dquot_active(struct dquot *dquot) 36185009Stegge{ 36285009Stegge return test_bit(DQ_ACTIVE_B, &dquot->dq_flags); 36385009Stegge} 36485009Stegge 365189418Sjhbstatic inline int dquot_dirty(struct dquot *dquot) 366210521Sjkim{ 3674Srgrimes return test_bit(DQ_MOD_B, &dquot->dq_flags); 368214347Sjhb} 3694Srgrimes 3704Srgrimesstatic inline int mark_dquot_dirty(struct dquot *dquot) 3714Srgrimes{ 3724Srgrimes return dquot->dq_sb->dq_op->mark_dirty(dquot); 3734Srgrimes} 3744Srgrimes 37583366Sjulian/* Mark dquot dirty in atomic manner, and return it's old dirty flag state */ 37683366Sjulianint dquot_mark_dquot_dirty(struct dquot *dquot) 3774Srgrimes{ 3784Srgrimes int ret = 1; 379209462Skib 380103409Smini if (!dquot_active(dquot)) 381239996Skib return 0; 382209462Skib 38322093Sbde if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY) 384209461Skib return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags); 3854478Sbde 3864478Sbde /* If quota is dirty already, we don't have to acquire dq_list_lock */ 387103409Smini if (dquot_dirty(dquot)) 3884478Sbde return 1; 38922093Sbde 39022093Sbde spin_lock(&dq_list_lock); 3914478Sbde if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) { 3924478Sbde list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)-> 3934478Sbde info[dquot->dq_id.type].dqi_dirty_list); 39483640Sjhb ret = 0; 39583640Sjhb } 39683640Sjhb spin_unlock(&dq_list_lock); 3974Srgrimes return ret; 39822093Sbde} 3994SrgrimesEXPORT_SYMBOL(dquot_mark_dquot_dirty); 4004Srgrimes 401103409Smini/* Dirtify all the dquots - this can block when journalling */ 402103409Sministatic inline int mark_all_dquot_dirty(struct dquot __rcu * const *dquots) 403103409Smini{ 404103409Smini int ret, err, cnt; 405209461Skib struct dquot *dquot; 406103409Smini 407103409Smini ret = err = 0; 408103409Smini for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 409103409Smini dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 410103409Smini if (dquot) 411103409Smini /* Even in case of error we have to continue */ 412103409Smini ret = mark_dquot_dirty(dquot); 413103409Smini if (!err) 41449081Scracauer err = ret; 41549081Scracauer } 41649081Scracauer return err; 41749081Scracauer} 41849081Scracauer 41949081Scracauerstatic inline void dqput_all(struct dquot **dquot) 42049098Scracauer{ 42149081Scracauer unsigned int cnt; 42249081Scracauer 42349081Scracauer for (cnt = 0; cnt < MAXQUOTAS; cnt++) 42449081Scracauer dqput(dquot[cnt]); 42549098Scracauer} 42649081Scracauer 42749081Scracauerstatic inline int clear_dquot_dirty(struct dquot *dquot) 42849081Scracauer{ 42949098Scracauer if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY) 43049081Scracauer return test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags); 43149081Scracauer 43249081Scracauer spin_lock(&dq_list_lock); 43349081Scracauer if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) { 43449098Scracauer spin_unlock(&dq_list_lock); 43549081Scracauer return 0; 43649098Scracauer } 43749081Scracauer list_del_init(&dquot->dq_dirty); 43849081Scracauer spin_unlock(&dq_list_lock); 43949081Scracauer return 1; 44049081Scracauer} 44149081Scracauer 44249081Scracauervoid mark_info_dirty(struct super_block *sb, int type) 44349081Scracauer{ 44449081Scracauer spin_lock(&dq_data_lock); 44549081Scracauer sb_dqopt(sb)->info[type].dqi_flags |= DQF_INFO_DIRTY; 44649081Scracauer spin_unlock(&dq_data_lock); 44749081Scracauer} 44849081ScracauerEXPORT_SYMBOL(mark_info_dirty); 44949081Scracauer 45049081Scracauer/* 45149081Scracauer * Read dquot from disk and alloc space for it 45249098Scracauer */ 45349098Scracauer 45449081Scracauerint dquot_acquire(struct dquot *dquot) 45549081Scracauer{ 45649098Scracauer int ret = 0, ret2 = 0; 45749098Scracauer unsigned int memalloc; 45849098Scracauer struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 45949098Scracauer 46049098Scracauer mutex_lock(&dquot->dq_lock); 46149098Scracauer memalloc = memalloc_nofs_save(); 46249098Scracauer if (!test_bit(DQ_READ_B, &dquot->dq_flags)) { 46349098Scracauer ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot); 46449098Scracauer if (ret < 0) 46549098Scracauer goto out_iolock; 46649098Scracauer } 46749098Scracauer /* Make sure flags update is visible after dquot has been filled */ 46849098Scracauer smp_mb__before_atomic(); 46949098Scracauer set_bit(DQ_READ_B, &dquot->dq_flags); 47049098Scracauer /* Instantiate dquot if needed */ 47149098Scracauer if (!dquot_active(dquot) && !dquot->dq_off) { 47249098Scracauer ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot); 47349098Scracauer /* Write the info if needed */ 47449098Scracauer if (info_dirty(&dqopt->info[dquot->dq_id.type])) { 47549098Scracauer ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info( 47649098Scracauer dquot->dq_sb, dquot->dq_id.type); 47749098Scracauer } 47849098Scracauer if (ret < 0) 47949098Scracauer goto out_iolock; 48049098Scracauer if (ret2 < 0) { 48149098Scracauer ret = ret2; 48249098Scracauer goto out_iolock; 48349098Scracauer } 48449098Scracauer } 48549098Scracauer /* 48649098Scracauer * Make sure flags update is visible after on-disk struct has been 48749098Scracauer * allocated. Paired with smp_rmb() in dqget(). 48849098Scracauer */ 48949098Scracauer smp_mb__before_atomic(); 49049098Scracauer set_bit(DQ_ACTIVE_B, &dquot->dq_flags); 49149098Scracauerout_iolock: 49249098Scracauer memalloc_nofs_restore(memalloc); 49349098Scracauer mutex_unlock(&dquot->dq_lock); 49449098Scracauer return ret; 49549098Scracauer} 49649098ScracauerEXPORT_SYMBOL(dquot_acquire); 49749098Scracauer 49849098Scracauer/* 49949098Scracauer * Write dquot to disk 50049098Scracauer */ 50149098Scracauerint dquot_commit(struct dquot *dquot) 50249098Scracauer{ 50349098Scracauer int ret = 0; 50449098Scracauer unsigned int memalloc; 50549098Scracauer struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 50649098Scracauer 50749098Scracauer mutex_lock(&dquot->dq_lock); 50849098Scracauer memalloc = memalloc_nofs_save(); 50949098Scracauer if (!clear_dquot_dirty(dquot)) 51049098Scracauer goto out_lock; 51149098Scracauer /* Inactive dquot can be only if there was error during read/init 51249098Scracauer * => we have better not writing it */ 51349098Scracauer if (dquot_active(dquot)) 51449098Scracauer ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot); 51549098Scracauer else 51649098Scracauer ret = -EIO; 51749098Scracauerout_lock: 51849098Scracauer memalloc_nofs_restore(memalloc); 51949098Scracauer mutex_unlock(&dquot->dq_lock); 52049098Scracauer return ret; 52149098Scracauer} 52249098ScracauerEXPORT_SYMBOL(dquot_commit); 52349098Scracauer 52449098Scracauer/* 52549098Scracauer * Release dquot 52649098Scracauer */ 52749098Scracauerint dquot_release(struct dquot *dquot) 52849098Scracauer{ 52949098Scracauer int ret = 0, ret2 = 0; 53049098Scracauer unsigned int memalloc; 53149098Scracauer struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 53249098Scracauer 53349098Scracauer mutex_lock(&dquot->dq_lock); 53449098Scracauer memalloc = memalloc_nofs_save(); 53549098Scracauer /* Check whether we are not racing with some other dqget() */ 53649098Scracauer if (dquot_is_busy(dquot)) 53749098Scracauer goto out_dqlock; 53849098Scracauer if (dqopt->ops[dquot->dq_id.type]->release_dqblk) { 53949098Scracauer ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot); 54049098Scracauer /* Write the info */ 54149098Scracauer if (info_dirty(&dqopt->info[dquot->dq_id.type])) { 54249098Scracauer ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info( 54349098Scracauer dquot->dq_sb, dquot->dq_id.type); 54449098Scracauer } 54549098Scracauer if (ret >= 0) 54649098Scracauer ret = ret2; 54749098Scracauer } 54849098Scracauer clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); 54949098Scracauerout_dqlock: 55049098Scracauer memalloc_nofs_restore(memalloc); 55149098Scracauer mutex_unlock(&dquot->dq_lock); 55249098Scracauer return ret; 55349098Scracauer} 55449098ScracauerEXPORT_SYMBOL(dquot_release); 55549098Scracauer 55649098Scracauervoid dquot_destroy(struct dquot *dquot) 55749098Scracauer{ 55849098Scracauer kmem_cache_free(dquot_cachep, dquot); 55949098Scracauer} 56049098ScracauerEXPORT_SYMBOL(dquot_destroy); 56149098Scracauer 56249098Scracauerstatic inline void do_destroy_dquot(struct dquot *dquot) 56349098Scracauer{ 56449098Scracauer dquot->dq_sb->dq_op->destroy_dquot(dquot); 56549098Scracauer} 56649098Scracauer 56749098Scracauer/* Invalidate all dquots on the list. Note that this function is called after 56849098Scracauer * quota is disabled and pointers from inodes removed so there cannot be new 56949098Scracauer * quota users. There can still be some users of quotas due to inodes being 57049098Scracauer * just deleted or pruned by prune_icache() (those are not attached to any 57149098Scracauer * list) or parallel quotactl call. We have to wait for such users. 57249098Scracauer */ 57349098Scracauerstatic void invalidate_dquots(struct super_block *sb, int type) 57449098Scracauer{ 57549098Scracauer struct dquot *dquot, *tmp; 57649098Scracauer 57749098Scracauerrestart: 57849098Scracauer flush_delayed_work("a_release_work); 57949098Scracauer 58049098Scracauer spin_lock(&dq_list_lock); 58149098Scracauer list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { 58249098Scracauer if (dquot->dq_sb != sb) 58349081Scracauer continue; 58449081Scracauer if (dquot->dq_id.type != type) 5854Srgrimes continue; 586239994Skib /* Wait for dquot users */ 587239994Skib if (atomic_read(&dquot->dq_count)) { 588239994Skib atomic_inc(&dquot->dq_count); 589239994Skib spin_unlock(&dq_list_lock); 590239994Skib /* 5914Srgrimes * Once dqput() wakes us up, we know it's time to free 592239994Skib * the dquot. 593239994Skib * IMPORTANT: we rely on the fact that there is always 594239994Skib * at most one process waiting for dquot to free. 595239994Skib * Otherwise dq_count would be > 1 and we would never 596239994Skib * wake up. 597239994Skib */ 598239994Skib wait_event(dquot_ref_wq, 5994Srgrimes atomic_read(&dquot->dq_count) == 1); 600239994Skib dqput(dquot); 6014Srgrimes /* At this moment dquot() need not exist (it could be 60277015Sbde * reclaimed by prune_dqcache(). Hence we must 603239995Skib * restart. */ 6044Srgrimes goto restart; 60577015Sbde } 6064Srgrimes /* 607209461Skib * The last user already dropped its reference but dquot didn't 608239995Skib * get fully cleaned up yet. Restart the scan which flushes the 609239995Skib * work cleaning up released dquots. 610209461Skib */ 61177015Sbde if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) { 6124Srgrimes spin_unlock(&dq_list_lock); 613209462Skib goto restart; 6144Srgrimes } 6154Srgrimes /* 61677015Sbde * Quota now has no users and it has been written on last 61777015Sbde * dqput() 61877015Sbde */ 6194Srgrimes remove_dquot_hash(dquot); 62087702Sjhb remove_free_dquot(dquot); 62183366Sjulian remove_inuse(dquot); 62283366Sjulian do_destroy_dquot(dquot); 6234Srgrimes } 62477015Sbde spin_unlock(&dq_list_lock); 62577015Sbde} 6264Srgrimes 627209462Skib/* Call callback for every active dquot on given filesystem */ 62877015Sbdeint dquot_scan_active(struct super_block *sb, 6294Srgrimes int (*fn)(struct dquot *dquot, unsigned long priv), 6304Srgrimes unsigned long priv) 631239995Skib{ 632239995Skib struct dquot *dquot, *old_dquot = NULL; 633239995Skib int ret = 0; 634239995Skib 635239995Skib WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount)); 636239995Skib 637239995Skib spin_lock(&dq_list_lock); 638239995Skib list_for_each_entry(dquot, &inuse_list, dq_inuse) { 639239995Skib if (!dquot_active(dquot)) 640239995Skib continue; 641239995Skib if (dquot->dq_sb != sb) 642239995Skib continue; 643239995Skib /* Now we have active dquot so we can just increase use count */ 644239995Skib atomic_inc(&dquot->dq_count); 645239995Skib spin_unlock(&dq_list_lock); 646239995Skib dqput(old_dquot); 647239995Skib old_dquot = dquot; 648239995Skib /* 649239995Skib * ->release_dquot() can be racing with us. Our reference 650239995Skib * protects us from new calls to it so just wait for any 651239995Skib * outstanding call and recheck the DQ_ACTIVE_B after that. 652239995Skib */ 6534Srgrimes wait_on_dquot(dquot); 6544Srgrimes if (dquot_active(dquot)) { 6554Srgrimes ret = fn(dquot, priv); 65687702Sjhb if (ret < 0) 6575351Sbde goto out; 6585351Sbde } 6594Srgrimes spin_lock(&dq_list_lock); 660103409Smini /* We are safe to continue now because our dquot could not 661103409Smini * be moved out of the inuse list while we hold the reference */ 662103409Smini } 6634Srgrimes spin_unlock(&dq_list_lock); 664189418Sjhbout: 6654Srgrimes dqput(old_dquot); 66671890Sjake return ret; 667209461Skib} 6684SrgrimesEXPORT_SYMBOL(dquot_scan_active); 669209462Skib 670103409Sministatic inline int dquot_write_dquot(struct dquot *dquot) 671103409Smini{ 672103409Smini int ret = dquot->dq_sb->dq_op->write_dquot(dquot); 673103409Smini if (ret < 0) { 674209462Skib quota_error(dquot->dq_sb, "Can't write quota structure " 675103409Smini "(error %d). Quota may get out of sync!", ret); 676103409Smini /* Clear dirty bit anyway to avoid infinite loop. */ 67787702Sjhb clear_dquot_dirty(dquot); 678103409Smini } 679103409Smini return ret; 680103409Smini} 681103409Smini 6824Srgrimes/* Write all dquot structures to quota files */ 6834Srgrimesint dquot_writeback_dquots(struct super_block *sb, int type) 6844Srgrimes{ 6854Srgrimes struct list_head dirty; 6864Srgrimes struct dquot *dquot; 6874Srgrimes struct quota_info *dqopt = sb_dqopt(sb); 68887702Sjhb int cnt; 68979609Speter int err, ret = 0; 690190426Sjhb 691190426Sjhb WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount)); 692190426Sjhb 693190426Sjhb for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 694190426Sjhb if (type != -1 && cnt != type) 695239996Skib continue; 696103409Smini if (!sb_has_quota_active(sb, cnt)) 697103409Smini continue; 698103409Smini spin_lock(&dq_list_lock); 699190413Sjhb /* Move list away to avoid livelock. */ 700103409Smini list_replace_init(&dqopt->info[cnt].dqi_dirty_list, &dirty); 701190413Sjhb while (!list_empty(&dirty)) { 702239996Skib dquot = list_first_entry(&dirty, struct dquot, 703239996Skib dq_dirty); 704239996Skib 705239996Skib WARN_ON(!dquot_active(dquot)); 706239996Skib /* If the dquot is releasing we should not touch it */ 707103409Smini if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) { 708103409Smini spin_unlock(&dq_list_lock); 709188938Sjhb flush_delayed_work("a_release_work); 710188938Sjhb spin_lock(&dq_list_lock); 711103409Smini continue; 712103409Smini } 713103409Smini 714103409Smini /* Now we have active dquot from which someone is 715103409Smini * holding reference so we can safely just increase 716103409Smini * use count */ 717103409Smini dqgrab(dquot); 718103409Smini spin_unlock(&dq_list_lock); 719103409Smini err = dquot_write_dquot(dquot); 720103409Smini if (err && !ret) 721103409Smini ret = err; 722239996Skib dqput(dquot); 723103409Smini spin_lock(&dq_list_lock); 724209462Skib } 7254Srgrimes spin_unlock(&dq_list_lock); 7264Srgrimes } 7274Srgrimes 7284Srgrimes for (cnt = 0; cnt < MAXQUOTAS; cnt++) 7294Srgrimes if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt) 73076906Sbde && info_dirty(&dqopt->info[cnt])) 73176906Sbde sb->dq_op->write_info(sb, cnt); 73276906Sbde dqstats_inc(DQST_SYNCS); 73376906Sbde 73476906Sbde return ret; 73576906Sbde} 73676906SbdeEXPORT_SYMBOL(dquot_writeback_dquots); 73776906Sbde 73876906Sbde/* Write all dquot structures to disk and make them visible from userspace */ 73987702Sjhbint dquot_quota_sync(struct super_block *sb, int type) 74076906Sbde{ 74176906Sbde struct quota_info *dqopt = sb_dqopt(sb); 74276906Sbde int cnt; 74387702Sjhb int ret; 74476906Sbde 74587702Sjhb ret = dquot_writeback_dquots(sb, type); 74676906Sbde if (ret) 74776906Sbde return ret; 74876906Sbde if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) 74976906Sbde return 0; 75076906Sbde 7514Srgrimes /* This is not very clever (and fast) but currently I don't know about 7524Srgrimes * any other simple way of getting quota data to disk and we must get 7534Srgrimes * them there for userspace to be visible... */ 75479609Speter if (sb->s_op->sync_fs) { 7554Srgrimes ret = sb->s_op->sync_fs(sb, 1); 75627567Sfsmp if (ret) 75727567Sfsmp return ret; 75879781Stegge } 75979609Speter ret = sync_blockdev(sb->s_bdev); 76027567Sfsmp if (ret) 76187702Sjhb return ret; 7624Srgrimes 7634Srgrimes /* 764103409Smini * Now when everything is written we can discard the pagecache so 765103409Smini * that userspace sees the changes. 766103409Smini */ 767103409Smini for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 768103409Smini if (type != -1 && cnt != type) 769130663Sbde continue; 770130663Sbde if (!sb_has_quota_active(sb, cnt)) 771130663Sbde continue; 772130663Sbde inode_lock(dqopt->files[cnt]); 773130663Sbde truncate_inode_pages(&dqopt->files[cnt]->i_data, 0); 774130663Sbde inode_unlock(dqopt->files[cnt]); 775130663Sbde } 776130663Sbde 777130663Sbde return 0; 778103409Smini} 779209462SkibEXPORT_SYMBOL(dquot_quota_sync); 780209462Skib 781103409Sministatic unsigned long 782103409Sminidqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 783103409Smini{ 784103409Smini struct dquot *dquot; 785103409Smini unsigned long freed = 0; 786103409Smini 787215865Skib spin_lock(&dq_list_lock); 788215865Skib while (!list_empty(&free_dquots) && sc->nr_to_scan) { 789215865Skib dquot = list_first_entry(&free_dquots, struct dquot, dq_free); 790103409Smini remove_dquot_hash(dquot); 791103409Smini remove_free_dquot(dquot); 792215865Skib remove_inuse(dquot); 793103409Smini do_destroy_dquot(dquot); 794208833Skib sc->nr_to_scan--; 795103409Smini freed++; 796209461Skib } 797103409Smini spin_unlock(&dq_list_lock); 798103409Smini return freed; 799208833Skib} 800208833Skib 801215865Skibstatic unsigned long 802215865Skibdqcache_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 803215865Skib{ 804215865Skib return vfs_pressure_ratio( 805215865Skib percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS])); 806103409Smini} 807209462Skib 808112897Sjeff/* 809215865Skib * Safely release dquot and put reference to dquot. 810103409Smini */ 811103409Sministatic void quota_release_workfn(struct work_struct *work) 812103409Smini{ 813103409Smini struct dquot *dquot; 814103409Smini struct list_head rls_head; 815103409Smini 816103409Smini spin_lock(&dq_list_lock); 817103409Smini /* Exchange the list head to avoid livelock. */ 818103409Smini list_replace_init(&releasing_dquots, &rls_head); 819209462Skib spin_unlock(&dq_list_lock); 820103409Smini synchronize_srcu(&dquot_srcu); 821103409Smini 822209462Skibrestart: 823103409Smini spin_lock(&dq_list_lock); 824103409Smini while (!list_empty(&rls_head)) { 825103409Smini dquot = list_first_entry(&rls_head, struct dquot, dq_free); 826103409Smini WARN_ON_ONCE(atomic_read(&dquot->dq_count)); 827103409Smini /* 828215865Skib * Note that DQ_RELEASING_B protects us from racing with 829103409Smini * invalidate_dquots() calls so we are safe to work with the 830208833Skib * dquot even after we drop dq_list_lock. 831103409Smini */ 832208833Skib if (dquot_dirty(dquot)) { 833208833Skib spin_unlock(&dq_list_lock); 834215865Skib /* Commit dquot before releasing */ 835215865Skib dquot_write_dquot(dquot); 836103409Smini goto restart; 837103409Smini } 838215865Skib if (dquot_active(dquot)) { 839208833Skib spin_unlock(&dq_list_lock); 840215865Skib dquot->dq_sb->dq_op->release_dquot(dquot); 841208833Skib goto restart; 842208833Skib } 843208833Skib /* Dquot is inactive and clean, now move it to free list */ 844209461Skib remove_free_dquot(dquot); 845208833Skib put_dquot_last(dquot); 846208833Skib } 847208833Skib spin_unlock(&dq_list_lock); 848209462Skib} 849208833Skib 850208833Skib/* 851208833Skib * Put reference to dquot 852208833Skib */ 853208833Skibvoid dqput(struct dquot *dquot) 854215865Skib{ 855215865Skib if (!dquot) 856215865Skib return; 857215865Skib#ifdef CONFIG_QUOTA_DEBUG 858215865Skib if (!atomic_read(&dquot->dq_count)) { 859209462Skib quota_error(dquot->dq_sb, "trying to free free dquot of %s %d", 860208833Skib quotatypes[dquot->dq_id.type], 861208833Skib from_kqid(&init_user_ns, dquot->dq_id)); 862209462Skib BUG(); 863208833Skib } 864215865Skib#endif 865208833Skib dqstats_inc(DQST_DROPS); 866208833Skib 867208833Skib spin_lock(&dq_list_lock); 86879609Speter if (atomic_read(&dquot->dq_count) > 1) { 86979781Stegge /* We have more than one user... nothing to do */ 87079609Speter atomic_dec(&dquot->dq_count); 87179609Speter /* Releasing dquot during quotaoff phase? */ 87279609Speter if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) && 87382154Speter atomic_read(&dquot->dq_count) == 1) 87482154Speter wake_up(&dquot_ref_wq); 87582154Speter spin_unlock(&dq_list_lock); 87682154Speter return; 87782154Speter } 87879609Speter 87979609Speter /* Need to release dquot? */ 88079609Speter WARN_ON_ONCE(!list_empty(&dquot->dq_free)); 881157860Scperciva put_releasing_dquots(dquot); 882157860Scperciva atomic_dec(&dquot->dq_count); 883157860Scperciva spin_unlock(&dq_list_lock); 884157860Scperciva queue_delayed_work(system_unbound_wq, "a_release_work, 1); 885157860Scperciva} 886157860ScpercivaEXPORT_SYMBOL(dqput); 887157860Scperciva 888157860Scpercivastruct dquot *dquot_alloc(struct super_block *sb, int type) 889157860Scperciva{ 890157860Scperciva return kmem_cache_zalloc(dquot_cachep, GFP_NOFS); 89179609Speter} 892157860ScpercivaEXPORT_SYMBOL(dquot_alloc); 893157860Scperciva 894190426Sjhbstatic struct dquot *get_empty_dquot(struct super_block *sb, int type) 895157860Scperciva{ 896157860Scperciva struct dquot *dquot; 897157860Scperciva 898157860Scperciva dquot = sb->dq_op->alloc_dquot(sb, type); 899157860Scperciva if(!dquot) 900157860Scperciva return NULL; 901157860Scperciva 902157860Scperciva mutex_init(&dquot->dq_lock); 903157860Scperciva INIT_LIST_HEAD(&dquot->dq_free); 904157860Scperciva INIT_LIST_HEAD(&dquot->dq_inuse); 905157860Scperciva INIT_HLIST_NODE(&dquot->dq_hash); 906157860Scperciva INIT_LIST_HEAD(&dquot->dq_dirty); 907157860Scperciva dquot->dq_sb = sb; 908157860Scperciva dquot->dq_id = make_kqid_invalid(type); 909157860Scperciva atomic_set(&dquot->dq_count, 1); 910215823Sdim spin_lock_init(&dquot->dq_dqb_lock); 911157860Scperciva 912157860Scperciva return dquot; 913157860Scperciva} 914157860Scperciva 91579781Stegge/* 91679609Speter * Get reference to dquot 91779609Speter * 91882154Speter * Locking is slightly tricky here. We are guarded from parallel quotaoff() 91982154Speter * destroying our dquot by: 920190426Sjhb * a) checking for quota flags under dq_list_lock and 92182154Speter * b) getting a reference to dquot before we release dq_list_lock 922190426Sjhb */ 92382154Speterstruct dquot *dqget(struct super_block *sb, struct kqid qid) 92479609Speter{ 92579609Speter unsigned int hashent = hashfn(sb, qid); 92679609Speter struct dquot *dquot, *empty = NULL; 92745720Speter 92845720Speter if (!qid_has_mapping(sb->s_user_ns, qid)) 92950181Speter return ERR_PTR(-EINVAL); 93045720Speter 93145720Speter if (!sb_has_quota_active(sb, qid.type)) 93245720Speter return ERR_PTR(-ESRCH); 93345720Speterwe_slept: 93445720Speter spin_lock(&dq_list_lock); 93545720Speter spin_lock(&dq_state_lock); 93645720Speter if (!sb_has_quota_active(sb, qid.type)) { 93745720Speter spin_unlock(&dq_state_lock); 93845720Speter spin_unlock(&dq_list_lock); 93945720Speter dquot = ERR_PTR(-ESRCH); 94045720Speter goto out; 94145720Speter } 94245720Speter spin_unlock(&dq_state_lock); 94345720Speter 94445720Speter dquot = find_dquot(hashent, sb, qid); 94545720Speter if (!dquot) { 94645720Speter if (!empty) { 94745720Speter spin_unlock(&dq_list_lock); 94845720Speter empty = get_empty_dquot(sb, qid.type); 94945720Speter if (!empty) 95045720Speter schedule(); /* Try to wait for a moment... */ 95145720Speter goto we_slept; 95245720Speter } 95360008Swollman dquot = empty; 954125782Sbde empty = NULL; 95560008Swollman dquot->dq_id = qid; 95682555Smsmith /* all dquots go on the inuse_list */ 95760008Swollman put_inuse(dquot); 95860008Swollman /* hash it first so it can be found */ 95960008Swollman insert_dquot_hash(dquot); 96060008Swollman spin_unlock(&dq_list_lock); 96160008Swollman dqstats_inc(DQST_LOOKUPS); 96260008Swollman } else { 96360008Swollman if (!atomic_read(&dquot->dq_count)) 96460008Swollman remove_free_dquot(dquot); 96560008Swollman atomic_inc(&dquot->dq_count); 96661996Smsmith spin_unlock(&dq_list_lock); 96761996Smsmith dqstats_inc(DQST_CACHE_HITS); 96861996Smsmith dqstats_inc(DQST_LOOKUPS); 96961996Smsmith } 97061996Smsmith /* Wait for dq_lock - after this we know that either dquot_release() is 97160008Swollman * already finished or it will be canceled due to dq_count > 0 test */ 97260008Swollman wait_on_dquot(dquot); 97360008Swollman /* Read the dquot / allocate space in quota file */ 97460008Swollman if (!dquot_active(dquot)) { 97560008Swollman int err; 97660008Swollman 97760008Swollman err = sb->dq_op->acquire_dquot(dquot); 97860008Swollman if (err < 0) { 97960008Swollman dqput(dquot); 98060008Swollman dquot = ERR_PTR(err); 98160008Swollman goto out; 98260008Swollman } 98360008Swollman } 98460008Swollman /* 98560008Swollman * Make sure following reads see filled structure - paired with 98660008Swollman * smp_mb__before_atomic() in dquot_acquire(). 98760008Swollman */ 98860008Swollman smp_rmb(); 98960008Swollman /* Has somebody invalidated entry under us? */ 99060008Swollman WARN_ON_ONCE(hlist_unhashed(&dquot->dq_hash)); 99160008Swollmanout: 99260008Swollman if (empty) 99360008Swollman do_destroy_dquot(empty); 99460008Swollman 99560008Swollman return dquot; 99660008Swollman} 99760008SwollmanEXPORT_SYMBOL(dqget); 99860008Swollman 99960008Swollmanstatic inline struct dquot __rcu **i_dquot(struct inode *inode) 100085271Sbde{ 100182555Smsmith return inode->i_sb->s_op->get_dquots(inode); 100285271Sbde} 100389980Sbde 1004208833Skibstatic int dqinit_needed(struct inode *inode, int type) 1005231979Skib{ 1006231979Skib struct dquot __rcu * const *dquots; 1007231979Skib int cnt; 1008231979Skib 1009231979Skib if (IS_NOQUOTA(inode)) 1010231979Skib return 0; 1011231979Skib 1012231979Skib dquots = i_dquot(inode); 1013231979Skib if (type != -1) 1014231979Skib return !dquots[type]; 1015231979Skib for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1016231979Skib if (!dquots[cnt]) 1017231979Skib return 1; 1018231979Skib return 0; 1019231979Skib} 1020231979Skib 1021231979Skib/* This routine is guarded by s_umount semaphore */ 1022231979Skibstatic int add_dquot_ref(struct super_block *sb, int type) 1023231979Skib{ 1024231979Skib struct inode *inode, *old_inode = NULL; 1025231979Skib#ifdef CONFIG_QUOTA_DEBUG 1026231979Skib int reserved = 0; 1027231979Skib#endif 1028231979Skib int err = 0; 1029231979Skib 1030231979Skib spin_lock(&sb->s_inode_list_lock); 1031231979Skib list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 1032231979Skib spin_lock(&inode->i_lock); 1033231979Skib if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || 1034231979Skib !atomic_read(&inode->i_writecount) || 1035231979Skib !dqinit_needed(inode, type)) { 1036231979Skib spin_unlock(&inode->i_lock); 1037231979Skib continue; 1038231979Skib } 1039231979Skib __iget(inode); 1040231979Skib spin_unlock(&inode->i_lock); 1041231979Skib spin_unlock(&sb->s_inode_list_lock); 1042231979Skib 1043231979Skib#ifdef CONFIG_QUOTA_DEBUG 1044231979Skib if (unlikely(inode_get_rsv_space(inode) > 0)) 1045231979Skib reserved = 1; 1046231979Skib#endif 1047231979Skib iput(old_inode); 1048231979Skib err = __dquot_initialize(inode, type); 1049208833Skib if (err) { 1050208833Skib iput(inode); 1051208833Skib goto out; 1052208833Skib } 1053208833Skib 1054208833Skib /* 1055208833Skib * We hold a reference to 'inode' so it couldn't have been 1056208833Skib * removed from s_inodes list while we dropped the 1057208833Skib * s_inode_list_lock. We cannot iput the inode now as we can be 1058208833Skib * holding the last reference and we cannot iput it under 1059208833Skib * s_inode_list_lock. So we keep the reference and iput it 1060208833Skib * later. 1061208833Skib */ 1062231979Skib old_inode = inode; 1063208833Skib cond_resched(); 1064208833Skib spin_lock(&sb->s_inode_list_lock); 1065208833Skib } 1066208833Skib spin_unlock(&sb->s_inode_list_lock); 1067208833Skib iput(old_inode); 1068208833Skibout: 1069208833Skib#ifdef CONFIG_QUOTA_DEBUG 1070208833Skib if (reserved) { 1071208833Skib quota_error(sb, "Writes happened before quota was turned on " 1072208833Skib "thus quota information is probably inconsistent. " 1073208833Skib "Please run quotacheck(8)"); 1074209462Skib } 1075208833Skib#endif 1076208833Skib return err; 1077209462Skib} 1078208833Skib 1079208833Skibstatic void remove_dquot_ref(struct super_block *sb, int type) 1080208833Skib{ 1081208833Skib struct inode *inode; 1082208833Skib#ifdef CONFIG_QUOTA_DEBUG 1083208833Skib int reserved = 0; 1084208833Skib#endif 1085208833Skib 1086208833Skib spin_lock(&sb->s_inode_list_lock); 1087208833Skib list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 1088208833Skib /* 1089208833Skib * We have to scan also I_NEW inodes because they can already 1090208833Skib * have quota pointer initialized. Luckily, we need to touch 1091208833Skib * only quota pointers and these have separate locking 1092208833Skib * (dq_data_lock). 1093208833Skib */ 1094208833Skib spin_lock(&dq_data_lock); 1095208833Skib if (!IS_NOQUOTA(inode)) { 1096208833Skib struct dquot __rcu **dquots = i_dquot(inode); 1097208833Skib struct dquot *dquot = srcu_dereference_check( 1098208833Skib dquots[type], &dquot_srcu, 1099208833Skib lockdep_is_held(&dq_data_lock)); 1100239996Skib 1101208833Skib#ifdef CONFIG_QUOTA_DEBUG 1102208833Skib if (unlikely(inode_get_rsv_space(inode) > 0)) 1103239996Skib reserved = 1; 1104239996Skib#endif 1105239996Skib rcu_assign_pointer(dquots[type], NULL); 1106208833Skib if (dquot) 1107239996Skib dqput(dquot); 1108208833Skib } 1109208833Skib spin_unlock(&dq_data_lock); 1110208833Skib } 1111208833Skib spin_unlock(&sb->s_inode_list_lock); 1112208833Skib#ifdef CONFIG_QUOTA_DEBUG 1113208833Skib if (reserved) { 1114208833Skib printk(KERN_WARNING "VFS (%s): Writes happened after quota" 1115208833Skib " was disabled thus quota information is probably " 1116208833Skib "inconsistent. Please run quotacheck(8).\n", sb->s_id); 1117239996Skib } 1118208833Skib#endif 1119} 1120 1121/* Gather all references from inodes and drop them */ 1122static void drop_dquot_ref(struct super_block *sb, int type) 1123{ 1124 if (sb->dq_op) 1125 remove_dquot_ref(sb, type); 1126} 1127 1128static inline 1129void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) 1130{ 1131 if (dquot->dq_dqb.dqb_rsvspace >= number) 1132 dquot->dq_dqb.dqb_rsvspace -= number; 1133 else { 1134 WARN_ON_ONCE(1); 1135 dquot->dq_dqb.dqb_rsvspace = 0; 1136 } 1137 if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <= 1138 dquot->dq_dqb.dqb_bsoftlimit) 1139 dquot->dq_dqb.dqb_btime = (time64_t) 0; 1140 clear_bit(DQ_BLKS_B, &dquot->dq_flags); 1141} 1142 1143static void dquot_decr_inodes(struct dquot *dquot, qsize_t number) 1144{ 1145 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || 1146 dquot->dq_dqb.dqb_curinodes >= number) 1147 dquot->dq_dqb.dqb_curinodes -= number; 1148 else 1149 dquot->dq_dqb.dqb_curinodes = 0; 1150 if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit) 1151 dquot->dq_dqb.dqb_itime = (time64_t) 0; 1152 clear_bit(DQ_INODES_B, &dquot->dq_flags); 1153} 1154 1155static void dquot_decr_space(struct dquot *dquot, qsize_t number) 1156{ 1157 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || 1158 dquot->dq_dqb.dqb_curspace >= number) 1159 dquot->dq_dqb.dqb_curspace -= number; 1160 else 1161 dquot->dq_dqb.dqb_curspace = 0; 1162 if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <= 1163 dquot->dq_dqb.dqb_bsoftlimit) 1164 dquot->dq_dqb.dqb_btime = (time64_t) 0; 1165 clear_bit(DQ_BLKS_B, &dquot->dq_flags); 1166} 1167 1168struct dquot_warn { 1169 struct super_block *w_sb; 1170 struct kqid w_dq_id; 1171 short w_type; 1172}; 1173 1174static int warning_issued(struct dquot *dquot, const int warntype) 1175{ 1176 int flag = (warntype == QUOTA_NL_BHARDWARN || 1177 warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B : 1178 ((warntype == QUOTA_NL_IHARDWARN || 1179 warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0); 1180 1181 if (!flag) 1182 return 0; 1183 return test_and_set_bit(flag, &dquot->dq_flags); 1184} 1185 1186#ifdef CONFIG_PRINT_QUOTA_WARNING 1187static int flag_print_warnings = 1; 1188 1189static int need_print_warning(struct dquot_warn *warn) 1190{ 1191 if (!flag_print_warnings) 1192 return 0; 1193 1194 switch (warn->w_dq_id.type) { 1195 case USRQUOTA: 1196 return uid_eq(current_fsuid(), warn->w_dq_id.uid); 1197 case GRPQUOTA: 1198 return in_group_p(warn->w_dq_id.gid); 1199 case PRJQUOTA: 1200 return 1; 1201 } 1202 return 0; 1203} 1204 1205/* Print warning to user which exceeded quota */ 1206static void print_warning(struct dquot_warn *warn) 1207{ 1208 char *msg = NULL; 1209 struct tty_struct *tty; 1210 int warntype = warn->w_type; 1211 1212 if (warntype == QUOTA_NL_IHARDBELOW || 1213 warntype == QUOTA_NL_ISOFTBELOW || 1214 warntype == QUOTA_NL_BHARDBELOW || 1215 warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn)) 1216 return; 1217 1218 tty = get_current_tty(); 1219 if (!tty) 1220 return; 1221 tty_write_message(tty, warn->w_sb->s_id); 1222 if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN) 1223 tty_write_message(tty, ": warning, "); 1224 else 1225 tty_write_message(tty, ": write failed, "); 1226 tty_write_message(tty, quotatypes[warn->w_dq_id.type]); 1227 switch (warntype) { 1228 case QUOTA_NL_IHARDWARN: 1229 msg = " file limit reached.\r\n"; 1230 break; 1231 case QUOTA_NL_ISOFTLONGWARN: 1232 msg = " file quota exceeded too long.\r\n"; 1233 break; 1234 case QUOTA_NL_ISOFTWARN: 1235 msg = " file quota exceeded.\r\n"; 1236 break; 1237 case QUOTA_NL_BHARDWARN: 1238 msg = " block limit reached.\r\n"; 1239 break; 1240 case QUOTA_NL_BSOFTLONGWARN: 1241 msg = " block quota exceeded too long.\r\n"; 1242 break; 1243 case QUOTA_NL_BSOFTWARN: 1244 msg = " block quota exceeded.\r\n"; 1245 break; 1246 } 1247 tty_write_message(tty, msg); 1248 tty_kref_put(tty); 1249} 1250#endif 1251 1252static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot, 1253 int warntype) 1254{ 1255 if (warning_issued(dquot, warntype)) 1256 return; 1257 warn->w_type = warntype; 1258 warn->w_sb = dquot->dq_sb; 1259 warn->w_dq_id = dquot->dq_id; 1260} 1261 1262/* 1263 * Write warnings to the console and send warning messages over netlink. 1264 * 1265 * Note that this function can call into tty and networking code. 1266 */ 1267static void flush_warnings(struct dquot_warn *warn) 1268{ 1269 int i; 1270 1271 for (i = 0; i < MAXQUOTAS; i++) { 1272 if (warn[i].w_type == QUOTA_NL_NOWARN) 1273 continue; 1274#ifdef CONFIG_PRINT_QUOTA_WARNING 1275 print_warning(&warn[i]); 1276#endif 1277 quota_send_warning(warn[i].w_dq_id, 1278 warn[i].w_sb->s_dev, warn[i].w_type); 1279 } 1280} 1281 1282static int ignore_hardlimit(struct dquot *dquot) 1283{ 1284 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; 1285 1286 return capable(CAP_SYS_RESOURCE) && 1287 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || 1288 !(info->dqi_flags & DQF_ROOT_SQUASH)); 1289} 1290 1291static int dquot_add_inodes(struct dquot *dquot, qsize_t inodes, 1292 struct dquot_warn *warn) 1293{ 1294 qsize_t newinodes; 1295 int ret = 0; 1296 1297 spin_lock(&dquot->dq_dqb_lock); 1298 newinodes = dquot->dq_dqb.dqb_curinodes + inodes; 1299 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) || 1300 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1301 goto add; 1302 1303 if (dquot->dq_dqb.dqb_ihardlimit && 1304 newinodes > dquot->dq_dqb.dqb_ihardlimit && 1305 !ignore_hardlimit(dquot)) { 1306 prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN); 1307 ret = -EDQUOT; 1308 goto out; 1309 } 1310 1311 if (dquot->dq_dqb.dqb_isoftlimit && 1312 newinodes > dquot->dq_dqb.dqb_isoftlimit && 1313 dquot->dq_dqb.dqb_itime && 1314 ktime_get_real_seconds() >= dquot->dq_dqb.dqb_itime && 1315 !ignore_hardlimit(dquot)) { 1316 prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN); 1317 ret = -EDQUOT; 1318 goto out; 1319 } 1320 1321 if (dquot->dq_dqb.dqb_isoftlimit && 1322 newinodes > dquot->dq_dqb.dqb_isoftlimit && 1323 dquot->dq_dqb.dqb_itime == 0) { 1324 prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN); 1325 dquot->dq_dqb.dqb_itime = ktime_get_real_seconds() + 1326 sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace; 1327 } 1328add: 1329 dquot->dq_dqb.dqb_curinodes = newinodes; 1330 1331out: 1332 spin_unlock(&dquot->dq_dqb_lock); 1333 return ret; 1334} 1335 1336static int dquot_add_space(struct dquot *dquot, qsize_t space, 1337 qsize_t rsv_space, unsigned int flags, 1338 struct dquot_warn *warn) 1339{ 1340 qsize_t tspace; 1341 struct super_block *sb = dquot->dq_sb; 1342 int ret = 0; 1343 1344 spin_lock(&dquot->dq_dqb_lock); 1345 if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) || 1346 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1347 goto finish; 1348 1349 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace 1350 + space + rsv_space; 1351 1352 if (dquot->dq_dqb.dqb_bhardlimit && 1353 tspace > dquot->dq_dqb.dqb_bhardlimit && 1354 !ignore_hardlimit(dquot)) { 1355 if (flags & DQUOT_SPACE_WARN) 1356 prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN); 1357 ret = -EDQUOT; 1358 goto finish; 1359 } 1360 1361 if (dquot->dq_dqb.dqb_bsoftlimit && 1362 tspace > dquot->dq_dqb.dqb_bsoftlimit && 1363 dquot->dq_dqb.dqb_btime && 1364 ktime_get_real_seconds() >= dquot->dq_dqb.dqb_btime && 1365 !ignore_hardlimit(dquot)) { 1366 if (flags & DQUOT_SPACE_WARN) 1367 prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN); 1368 ret = -EDQUOT; 1369 goto finish; 1370 } 1371 1372 if (dquot->dq_dqb.dqb_bsoftlimit && 1373 tspace > dquot->dq_dqb.dqb_bsoftlimit && 1374 dquot->dq_dqb.dqb_btime == 0) { 1375 if (flags & DQUOT_SPACE_WARN) { 1376 prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN); 1377 dquot->dq_dqb.dqb_btime = ktime_get_real_seconds() + 1378 sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace; 1379 } else { 1380 /* 1381 * We don't allow preallocation to exceed softlimit so exceeding will 1382 * be always printed 1383 */ 1384 ret = -EDQUOT; 1385 goto finish; 1386 } 1387 } 1388finish: 1389 /* 1390 * We have to be careful and go through warning generation & grace time 1391 * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it 1392 * only here... 1393 */ 1394 if (flags & DQUOT_SPACE_NOFAIL) 1395 ret = 0; 1396 if (!ret) { 1397 dquot->dq_dqb.dqb_rsvspace += rsv_space; 1398 dquot->dq_dqb.dqb_curspace += space; 1399 } 1400 spin_unlock(&dquot->dq_dqb_lock); 1401 return ret; 1402} 1403 1404static int info_idq_free(struct dquot *dquot, qsize_t inodes) 1405{ 1406 qsize_t newinodes; 1407 1408 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || 1409 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit || 1410 !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type)) 1411 return QUOTA_NL_NOWARN; 1412 1413 newinodes = dquot->dq_dqb.dqb_curinodes - inodes; 1414 if (newinodes <= dquot->dq_dqb.dqb_isoftlimit) 1415 return QUOTA_NL_ISOFTBELOW; 1416 if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit && 1417 newinodes < dquot->dq_dqb.dqb_ihardlimit) 1418 return QUOTA_NL_IHARDBELOW; 1419 return QUOTA_NL_NOWARN; 1420} 1421 1422static int info_bdq_free(struct dquot *dquot, qsize_t space) 1423{ 1424 qsize_t tspace; 1425 1426 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace; 1427 1428 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || 1429 tspace <= dquot->dq_dqb.dqb_bsoftlimit) 1430 return QUOTA_NL_NOWARN; 1431 1432 if (tspace - space <= dquot->dq_dqb.dqb_bsoftlimit) 1433 return QUOTA_NL_BSOFTBELOW; 1434 if (tspace >= dquot->dq_dqb.dqb_bhardlimit && 1435 tspace - space < dquot->dq_dqb.dqb_bhardlimit) 1436 return QUOTA_NL_BHARDBELOW; 1437 return QUOTA_NL_NOWARN; 1438} 1439 1440static int inode_quota_active(const struct inode *inode) 1441{ 1442 struct super_block *sb = inode->i_sb; 1443 1444 if (IS_NOQUOTA(inode)) 1445 return 0; 1446 return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb); 1447} 1448 1449/* 1450 * Initialize quota pointers in inode 1451 * 1452 * It is better to call this function outside of any transaction as it 1453 * might need a lot of space in journal for dquot structure allocation. 1454 */ 1455static int __dquot_initialize(struct inode *inode, int type) 1456{ 1457 int cnt, init_needed = 0; 1458 struct dquot __rcu **dquots; 1459 struct dquot *got[MAXQUOTAS] = {}; 1460 struct super_block *sb = inode->i_sb; 1461 qsize_t rsv; 1462 int ret = 0; 1463 1464 if (!inode_quota_active(inode)) 1465 return 0; 1466 1467 dquots = i_dquot(inode); 1468 1469 /* First get references to structures we might need. */ 1470 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1471 struct kqid qid; 1472 kprojid_t projid; 1473 int rc; 1474 struct dquot *dquot; 1475 1476 if (type != -1 && cnt != type) 1477 continue; 1478 /* 1479 * The i_dquot should have been initialized in most cases, 1480 * we check it without locking here to avoid unnecessary 1481 * dqget()/dqput() calls. 1482 */ 1483 if (dquots[cnt]) 1484 continue; 1485 1486 if (!sb_has_quota_active(sb, cnt)) 1487 continue; 1488 1489 init_needed = 1; 1490 1491 switch (cnt) { 1492 case USRQUOTA: 1493 qid = make_kqid_uid(inode->i_uid); 1494 break; 1495 case GRPQUOTA: 1496 qid = make_kqid_gid(inode->i_gid); 1497 break; 1498 case PRJQUOTA: 1499 rc = inode->i_sb->dq_op->get_projid(inode, &projid); 1500 if (rc) 1501 continue; 1502 qid = make_kqid_projid(projid); 1503 break; 1504 } 1505 dquot = dqget(sb, qid); 1506 if (IS_ERR(dquot)) { 1507 /* We raced with somebody turning quotas off... */ 1508 if (PTR_ERR(dquot) != -ESRCH) { 1509 ret = PTR_ERR(dquot); 1510 goto out_put; 1511 } 1512 dquot = NULL; 1513 } 1514 got[cnt] = dquot; 1515 } 1516 1517 /* All required i_dquot has been initialized */ 1518 if (!init_needed) 1519 return 0; 1520 1521 spin_lock(&dq_data_lock); 1522 if (IS_NOQUOTA(inode)) 1523 goto out_lock; 1524 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1525 if (type != -1 && cnt != type) 1526 continue; 1527 /* Avoid races with quotaoff() */ 1528 if (!sb_has_quota_active(sb, cnt)) 1529 continue; 1530 /* We could race with quotaon or dqget() could have failed */ 1531 if (!got[cnt]) 1532 continue; 1533 if (!dquots[cnt]) { 1534 rcu_assign_pointer(dquots[cnt], got[cnt]); 1535 got[cnt] = NULL; 1536 /* 1537 * Make quota reservation system happy if someone 1538 * did a write before quota was turned on 1539 */ 1540 rsv = inode_get_rsv_space(inode); 1541 if (unlikely(rsv)) { 1542 struct dquot *dquot = srcu_dereference_check( 1543 dquots[cnt], &dquot_srcu, 1544 lockdep_is_held(&dq_data_lock)); 1545 1546 spin_lock(&inode->i_lock); 1547 /* Get reservation again under proper lock */ 1548 rsv = __inode_get_rsv_space(inode); 1549 spin_lock(&dquot->dq_dqb_lock); 1550 dquot->dq_dqb.dqb_rsvspace += rsv; 1551 spin_unlock(&dquot->dq_dqb_lock); 1552 spin_unlock(&inode->i_lock); 1553 } 1554 } 1555 } 1556out_lock: 1557 spin_unlock(&dq_data_lock); 1558out_put: 1559 /* Drop unused references */ 1560 dqput_all(got); 1561 1562 return ret; 1563} 1564 1565int dquot_initialize(struct inode *inode) 1566{ 1567 return __dquot_initialize(inode, -1); 1568} 1569EXPORT_SYMBOL(dquot_initialize); 1570 1571bool dquot_initialize_needed(struct inode *inode) 1572{ 1573 struct dquot __rcu **dquots; 1574 int i; 1575 1576 if (!inode_quota_active(inode)) 1577 return false; 1578 1579 dquots = i_dquot(inode); 1580 for (i = 0; i < MAXQUOTAS; i++) 1581 if (!dquots[i] && sb_has_quota_active(inode->i_sb, i)) 1582 return true; 1583 return false; 1584} 1585EXPORT_SYMBOL(dquot_initialize_needed); 1586 1587/* 1588 * Release all quotas referenced by inode. 1589 * 1590 * This function only be called on inode free or converting 1591 * a file to quota file, no other users for the i_dquot in 1592 * both cases, so we needn't call synchronize_srcu() after 1593 * clearing i_dquot. 1594 */ 1595static void __dquot_drop(struct inode *inode) 1596{ 1597 int cnt; 1598 struct dquot __rcu **dquots = i_dquot(inode); 1599 struct dquot *put[MAXQUOTAS]; 1600 1601 spin_lock(&dq_data_lock); 1602 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1603 put[cnt] = srcu_dereference_check(dquots[cnt], &dquot_srcu, 1604 lockdep_is_held(&dq_data_lock)); 1605 rcu_assign_pointer(dquots[cnt], NULL); 1606 } 1607 spin_unlock(&dq_data_lock); 1608 dqput_all(put); 1609} 1610 1611void dquot_drop(struct inode *inode) 1612{ 1613 struct dquot __rcu * const *dquots; 1614 int cnt; 1615 1616 if (IS_NOQUOTA(inode)) 1617 return; 1618 1619 /* 1620 * Test before calling to rule out calls from proc and such 1621 * where we are not allowed to block. Note that this is 1622 * actually reliable test even without the lock - the caller 1623 * must assure that nobody can come after the DQUOT_DROP and 1624 * add quota pointers back anyway. 1625 */ 1626 dquots = i_dquot(inode); 1627 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1628 if (dquots[cnt]) 1629 break; 1630 } 1631 1632 if (cnt < MAXQUOTAS) 1633 __dquot_drop(inode); 1634} 1635EXPORT_SYMBOL(dquot_drop); 1636 1637/* 1638 * inode_reserved_space is managed internally by quota, and protected by 1639 * i_lock similar to i_blocks+i_bytes. 1640 */ 1641static qsize_t *inode_reserved_space(struct inode * inode) 1642{ 1643 /* Filesystem must explicitly define it's own method in order to use 1644 * quota reservation interface */ 1645 BUG_ON(!inode->i_sb->dq_op->get_reserved_space); 1646 return inode->i_sb->dq_op->get_reserved_space(inode); 1647} 1648 1649static qsize_t __inode_get_rsv_space(struct inode *inode) 1650{ 1651 if (!inode->i_sb->dq_op->get_reserved_space) 1652 return 0; 1653 return *inode_reserved_space(inode); 1654} 1655 1656static qsize_t inode_get_rsv_space(struct inode *inode) 1657{ 1658 qsize_t ret; 1659 1660 if (!inode->i_sb->dq_op->get_reserved_space) 1661 return 0; 1662 spin_lock(&inode->i_lock); 1663 ret = __inode_get_rsv_space(inode); 1664 spin_unlock(&inode->i_lock); 1665 return ret; 1666} 1667 1668/* 1669 * This functions updates i_blocks+i_bytes fields and quota information 1670 * (together with appropriate checks). 1671 * 1672 * NOTE: We absolutely rely on the fact that caller dirties the inode 1673 * (usually helpers in quotaops.h care about this) and holds a handle for 1674 * the current transaction so that dquot write and inode write go into the 1675 * same transaction. 1676 */ 1677 1678/* 1679 * This operation can block, but only after everything is updated 1680 */ 1681int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) 1682{ 1683 int cnt, ret = 0, index; 1684 struct dquot_warn warn[MAXQUOTAS]; 1685 int reserve = flags & DQUOT_SPACE_RESERVE; 1686 struct dquot __rcu **dquots; 1687 struct dquot *dquot; 1688 1689 if (!inode_quota_active(inode)) { 1690 if (reserve) { 1691 spin_lock(&inode->i_lock); 1692 *inode_reserved_space(inode) += number; 1693 spin_unlock(&inode->i_lock); 1694 } else { 1695 inode_add_bytes(inode, number); 1696 } 1697 goto out; 1698 } 1699 1700 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1701 warn[cnt].w_type = QUOTA_NL_NOWARN; 1702 1703 dquots = i_dquot(inode); 1704 index = srcu_read_lock(&dquot_srcu); 1705 spin_lock(&inode->i_lock); 1706 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1707 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1708 if (!dquot) 1709 continue; 1710 if (reserve) { 1711 ret = dquot_add_space(dquot, 0, number, flags, &warn[cnt]); 1712 } else { 1713 ret = dquot_add_space(dquot, number, 0, flags, &warn[cnt]); 1714 } 1715 if (ret) { 1716 /* Back out changes we already did */ 1717 for (cnt--; cnt >= 0; cnt--) { 1718 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1719 if (!dquot) 1720 continue; 1721 spin_lock(&dquot->dq_dqb_lock); 1722 if (reserve) 1723 dquot_free_reserved_space(dquot, number); 1724 else 1725 dquot_decr_space(dquot, number); 1726 spin_unlock(&dquot->dq_dqb_lock); 1727 } 1728 spin_unlock(&inode->i_lock); 1729 goto out_flush_warn; 1730 } 1731 } 1732 if (reserve) 1733 *inode_reserved_space(inode) += number; 1734 else 1735 __inode_add_bytes(inode, number); 1736 spin_unlock(&inode->i_lock); 1737 1738 if (reserve) 1739 goto out_flush_warn; 1740 mark_all_dquot_dirty(dquots); 1741out_flush_warn: 1742 srcu_read_unlock(&dquot_srcu, index); 1743 flush_warnings(warn); 1744out: 1745 return ret; 1746} 1747EXPORT_SYMBOL(__dquot_alloc_space); 1748 1749/* 1750 * This operation can block, but only after everything is updated 1751 */ 1752int dquot_alloc_inode(struct inode *inode) 1753{ 1754 int cnt, ret = 0, index; 1755 struct dquot_warn warn[MAXQUOTAS]; 1756 struct dquot __rcu * const *dquots; 1757 struct dquot *dquot; 1758 1759 if (!inode_quota_active(inode)) 1760 return 0; 1761 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1762 warn[cnt].w_type = QUOTA_NL_NOWARN; 1763 1764 dquots = i_dquot(inode); 1765 index = srcu_read_lock(&dquot_srcu); 1766 spin_lock(&inode->i_lock); 1767 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1768 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1769 if (!dquot) 1770 continue; 1771 ret = dquot_add_inodes(dquot, 1, &warn[cnt]); 1772 if (ret) { 1773 for (cnt--; cnt >= 0; cnt--) { 1774 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1775 if (!dquot) 1776 continue; 1777 /* Back out changes we already did */ 1778 spin_lock(&dquot->dq_dqb_lock); 1779 dquot_decr_inodes(dquot, 1); 1780 spin_unlock(&dquot->dq_dqb_lock); 1781 } 1782 goto warn_put_all; 1783 } 1784 } 1785 1786warn_put_all: 1787 spin_unlock(&inode->i_lock); 1788 if (ret == 0) 1789 mark_all_dquot_dirty(dquots); 1790 srcu_read_unlock(&dquot_srcu, index); 1791 flush_warnings(warn); 1792 return ret; 1793} 1794EXPORT_SYMBOL(dquot_alloc_inode); 1795 1796/* 1797 * Convert in-memory reserved quotas to real consumed quotas 1798 */ 1799void dquot_claim_space_nodirty(struct inode *inode, qsize_t number) 1800{ 1801 struct dquot __rcu **dquots; 1802 struct dquot *dquot; 1803 int cnt, index; 1804 1805 if (!inode_quota_active(inode)) { 1806 spin_lock(&inode->i_lock); 1807 *inode_reserved_space(inode) -= number; 1808 __inode_add_bytes(inode, number); 1809 spin_unlock(&inode->i_lock); 1810 return; 1811 } 1812 1813 dquots = i_dquot(inode); 1814 index = srcu_read_lock(&dquot_srcu); 1815 spin_lock(&inode->i_lock); 1816 /* Claim reserved quotas to allocated quotas */ 1817 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1818 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1819 if (dquot) { 1820 spin_lock(&dquot->dq_dqb_lock); 1821 if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number)) 1822 number = dquot->dq_dqb.dqb_rsvspace; 1823 dquot->dq_dqb.dqb_curspace += number; 1824 dquot->dq_dqb.dqb_rsvspace -= number; 1825 spin_unlock(&dquot->dq_dqb_lock); 1826 } 1827 } 1828 /* Update inode bytes */ 1829 *inode_reserved_space(inode) -= number; 1830 __inode_add_bytes(inode, number); 1831 spin_unlock(&inode->i_lock); 1832 mark_all_dquot_dirty(dquots); 1833 srcu_read_unlock(&dquot_srcu, index); 1834 return; 1835} 1836EXPORT_SYMBOL(dquot_claim_space_nodirty); 1837 1838/* 1839 * Convert allocated space back to in-memory reserved quotas 1840 */ 1841void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) 1842{ 1843 struct dquot __rcu **dquots; 1844 struct dquot *dquot; 1845 int cnt, index; 1846 1847 if (!inode_quota_active(inode)) { 1848 spin_lock(&inode->i_lock); 1849 *inode_reserved_space(inode) += number; 1850 __inode_sub_bytes(inode, number); 1851 spin_unlock(&inode->i_lock); 1852 return; 1853 } 1854 1855 dquots = i_dquot(inode); 1856 index = srcu_read_lock(&dquot_srcu); 1857 spin_lock(&inode->i_lock); 1858 /* Claim reserved quotas to allocated quotas */ 1859 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1860 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1861 if (dquot) { 1862 spin_lock(&dquot->dq_dqb_lock); 1863 if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number)) 1864 number = dquot->dq_dqb.dqb_curspace; 1865 dquot->dq_dqb.dqb_rsvspace += number; 1866 dquot->dq_dqb.dqb_curspace -= number; 1867 spin_unlock(&dquot->dq_dqb_lock); 1868 } 1869 } 1870 /* Update inode bytes */ 1871 *inode_reserved_space(inode) += number; 1872 __inode_sub_bytes(inode, number); 1873 spin_unlock(&inode->i_lock); 1874 mark_all_dquot_dirty(dquots); 1875 srcu_read_unlock(&dquot_srcu, index); 1876 return; 1877} 1878EXPORT_SYMBOL(dquot_reclaim_space_nodirty); 1879 1880/* 1881 * This operation can block, but only after everything is updated 1882 */ 1883void __dquot_free_space(struct inode *inode, qsize_t number, int flags) 1884{ 1885 unsigned int cnt; 1886 struct dquot_warn warn[MAXQUOTAS]; 1887 struct dquot __rcu **dquots; 1888 struct dquot *dquot; 1889 int reserve = flags & DQUOT_SPACE_RESERVE, index; 1890 1891 if (!inode_quota_active(inode)) { 1892 if (reserve) { 1893 spin_lock(&inode->i_lock); 1894 *inode_reserved_space(inode) -= number; 1895 spin_unlock(&inode->i_lock); 1896 } else { 1897 inode_sub_bytes(inode, number); 1898 } 1899 return; 1900 } 1901 1902 dquots = i_dquot(inode); 1903 index = srcu_read_lock(&dquot_srcu); 1904 spin_lock(&inode->i_lock); 1905 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1906 int wtype; 1907 1908 warn[cnt].w_type = QUOTA_NL_NOWARN; 1909 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1910 if (!dquot) 1911 continue; 1912 spin_lock(&dquot->dq_dqb_lock); 1913 wtype = info_bdq_free(dquot, number); 1914 if (wtype != QUOTA_NL_NOWARN) 1915 prepare_warning(&warn[cnt], dquot, wtype); 1916 if (reserve) 1917 dquot_free_reserved_space(dquot, number); 1918 else 1919 dquot_decr_space(dquot, number); 1920 spin_unlock(&dquot->dq_dqb_lock); 1921 } 1922 if (reserve) 1923 *inode_reserved_space(inode) -= number; 1924 else 1925 __inode_sub_bytes(inode, number); 1926 spin_unlock(&inode->i_lock); 1927 1928 if (reserve) 1929 goto out_unlock; 1930 mark_all_dquot_dirty(dquots); 1931out_unlock: 1932 srcu_read_unlock(&dquot_srcu, index); 1933 flush_warnings(warn); 1934} 1935EXPORT_SYMBOL(__dquot_free_space); 1936 1937/* 1938 * This operation can block, but only after everything is updated 1939 */ 1940void dquot_free_inode(struct inode *inode) 1941{ 1942 unsigned int cnt; 1943 struct dquot_warn warn[MAXQUOTAS]; 1944 struct dquot __rcu * const *dquots; 1945 struct dquot *dquot; 1946 int index; 1947 1948 if (!inode_quota_active(inode)) 1949 return; 1950 1951 dquots = i_dquot(inode); 1952 index = srcu_read_lock(&dquot_srcu); 1953 spin_lock(&inode->i_lock); 1954 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1955 int wtype; 1956 warn[cnt].w_type = QUOTA_NL_NOWARN; 1957 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1958 if (!dquot) 1959 continue; 1960 spin_lock(&dquot->dq_dqb_lock); 1961 wtype = info_idq_free(dquot, 1); 1962 if (wtype != QUOTA_NL_NOWARN) 1963 prepare_warning(&warn[cnt], dquot, wtype); 1964 dquot_decr_inodes(dquot, 1); 1965 spin_unlock(&dquot->dq_dqb_lock); 1966 } 1967 spin_unlock(&inode->i_lock); 1968 mark_all_dquot_dirty(dquots); 1969 srcu_read_unlock(&dquot_srcu, index); 1970 flush_warnings(warn); 1971} 1972EXPORT_SYMBOL(dquot_free_inode); 1973 1974/* 1975 * Transfer the number of inode and blocks from one diskquota to an other. 1976 * On success, dquot references in transfer_to are consumed and references 1977 * to original dquots that need to be released are placed there. On failure, 1978 * references are kept untouched. 1979 * 1980 * This operation can block, but only after everything is updated 1981 * A transaction must be started when entering this function. 1982 * 1983 * We are holding reference on transfer_from & transfer_to, no need to 1984 * protect them by srcu_read_lock(). 1985 */ 1986int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) 1987{ 1988 qsize_t cur_space; 1989 qsize_t rsv_space = 0; 1990 qsize_t inode_usage = 1; 1991 struct dquot __rcu **dquots; 1992 struct dquot *transfer_from[MAXQUOTAS] = {}; 1993 int cnt, index, ret = 0; 1994 char is_valid[MAXQUOTAS] = {}; 1995 struct dquot_warn warn_to[MAXQUOTAS]; 1996 struct dquot_warn warn_from_inodes[MAXQUOTAS]; 1997 struct dquot_warn warn_from_space[MAXQUOTAS]; 1998 1999 if (IS_NOQUOTA(inode)) 2000 return 0; 2001 2002 if (inode->i_sb->dq_op->get_inode_usage) { 2003 ret = inode->i_sb->dq_op->get_inode_usage(inode, &inode_usage); 2004 if (ret) 2005 return ret; 2006 } 2007 2008 /* Initialize the arrays */ 2009 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 2010 warn_to[cnt].w_type = QUOTA_NL_NOWARN; 2011 warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN; 2012 warn_from_space[cnt].w_type = QUOTA_NL_NOWARN; 2013 } 2014 2015 spin_lock(&dq_data_lock); 2016 spin_lock(&inode->i_lock); 2017 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ 2018 spin_unlock(&inode->i_lock); 2019 spin_unlock(&dq_data_lock); 2020 return 0; 2021 } 2022 cur_space = __inode_get_bytes(inode); 2023 rsv_space = __inode_get_rsv_space(inode); 2024 dquots = i_dquot(inode); 2025 /* 2026 * Build the transfer_from list, check limits, and update usage in 2027 * the target structures. 2028 */ 2029 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 2030 /* 2031 * Skip changes for same uid or gid or for turned off quota-type. 2032 */ 2033 if (!transfer_to[cnt]) 2034 continue; 2035 /* Avoid races with quotaoff() */ 2036 if (!sb_has_quota_active(inode->i_sb, cnt)) 2037 continue; 2038 is_valid[cnt] = 1; 2039 transfer_from[cnt] = srcu_dereference_check(dquots[cnt], 2040 &dquot_srcu, lockdep_is_held(&dq_data_lock)); 2041 ret = dquot_add_inodes(transfer_to[cnt], inode_usage, 2042 &warn_to[cnt]); 2043 if (ret) 2044 goto over_quota; 2045 ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space, 2046 DQUOT_SPACE_WARN, &warn_to[cnt]); 2047 if (ret) { 2048 spin_lock(&transfer_to[cnt]->dq_dqb_lock); 2049 dquot_decr_inodes(transfer_to[cnt], inode_usage); 2050 spin_unlock(&transfer_to[cnt]->dq_dqb_lock); 2051 goto over_quota; 2052 } 2053 } 2054 2055 /* Decrease usage for source structures and update quota pointers */ 2056 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 2057 if (!is_valid[cnt]) 2058 continue; 2059 /* Due to IO error we might not have transfer_from[] structure */ 2060 if (transfer_from[cnt]) { 2061 int wtype; 2062 2063 spin_lock(&transfer_from[cnt]->dq_dqb_lock); 2064 wtype = info_idq_free(transfer_from[cnt], inode_usage); 2065 if (wtype != QUOTA_NL_NOWARN) 2066 prepare_warning(&warn_from_inodes[cnt], 2067 transfer_from[cnt], wtype); 2068 wtype = info_bdq_free(transfer_from[cnt], 2069 cur_space + rsv_space); 2070 if (wtype != QUOTA_NL_NOWARN) 2071 prepare_warning(&warn_from_space[cnt], 2072 transfer_from[cnt], wtype); 2073 dquot_decr_inodes(transfer_from[cnt], inode_usage); 2074 dquot_decr_space(transfer_from[cnt], cur_space); 2075 dquot_free_reserved_space(transfer_from[cnt], 2076 rsv_space); 2077 spin_unlock(&transfer_from[cnt]->dq_dqb_lock); 2078 } 2079 rcu_assign_pointer(dquots[cnt], transfer_to[cnt]); 2080 } 2081 spin_unlock(&inode->i_lock); 2082 spin_unlock(&dq_data_lock); 2083 2084 /* 2085 * These arrays are local and we hold dquot references so we don't need 2086 * the srcu protection but still take dquot_srcu to avoid warning in 2087 * mark_all_dquot_dirty(). 2088 */ 2089 index = srcu_read_lock(&dquot_srcu); 2090 mark_all_dquot_dirty((struct dquot __rcu **)transfer_from); 2091 mark_all_dquot_dirty((struct dquot __rcu **)transfer_to); 2092 srcu_read_unlock(&dquot_srcu, index); 2093 2094 flush_warnings(warn_to); 2095 flush_warnings(warn_from_inodes); 2096 flush_warnings(warn_from_space); 2097 /* Pass back references to put */ 2098 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 2099 if (is_valid[cnt]) 2100 transfer_to[cnt] = transfer_from[cnt]; 2101 return 0; 2102over_quota: 2103 /* Back out changes we already did */ 2104 for (cnt--; cnt >= 0; cnt--) { 2105 if (!is_valid[cnt]) 2106 continue; 2107 spin_lock(&transfer_to[cnt]->dq_dqb_lock); 2108 dquot_decr_inodes(transfer_to[cnt], inode_usage); 2109 dquot_decr_space(transfer_to[cnt], cur_space); 2110 dquot_free_reserved_space(transfer_to[cnt], rsv_space); 2111 spin_unlock(&transfer_to[cnt]->dq_dqb_lock); 2112 } 2113 spin_unlock(&inode->i_lock); 2114 spin_unlock(&dq_data_lock); 2115 flush_warnings(warn_to); 2116 return ret; 2117} 2118EXPORT_SYMBOL(__dquot_transfer); 2119 2120/* Wrapper for transferring ownership of an inode for uid/gid only 2121 * Called from FSXXX_setattr() 2122 */ 2123int dquot_transfer(struct mnt_idmap *idmap, struct inode *inode, 2124 struct iattr *iattr) 2125{ 2126 struct dquot *transfer_to[MAXQUOTAS] = {}; 2127 struct dquot *dquot; 2128 struct super_block *sb = inode->i_sb; 2129 int ret; 2130 2131 if (!inode_quota_active(inode)) 2132 return 0; 2133 2134 if (i_uid_needs_update(idmap, iattr, inode)) { 2135 kuid_t kuid = from_vfsuid(idmap, i_user_ns(inode), 2136 iattr->ia_vfsuid); 2137 2138 dquot = dqget(sb, make_kqid_uid(kuid)); 2139 if (IS_ERR(dquot)) { 2140 if (PTR_ERR(dquot) != -ESRCH) { 2141 ret = PTR_ERR(dquot); 2142 goto out_put; 2143 } 2144 dquot = NULL; 2145 } 2146 transfer_to[USRQUOTA] = dquot; 2147 } 2148 if (i_gid_needs_update(idmap, iattr, inode)) { 2149 kgid_t kgid = from_vfsgid(idmap, i_user_ns(inode), 2150 iattr->ia_vfsgid); 2151 2152 dquot = dqget(sb, make_kqid_gid(kgid)); 2153 if (IS_ERR(dquot)) { 2154 if (PTR_ERR(dquot) != -ESRCH) { 2155 ret = PTR_ERR(dquot); 2156 goto out_put; 2157 } 2158 dquot = NULL; 2159 } 2160 transfer_to[GRPQUOTA] = dquot; 2161 } 2162 ret = __dquot_transfer(inode, transfer_to); 2163out_put: 2164 dqput_all(transfer_to); 2165 return ret; 2166} 2167EXPORT_SYMBOL(dquot_transfer); 2168 2169/* 2170 * Write info of quota file to disk 2171 */ 2172int dquot_commit_info(struct super_block *sb, int type) 2173{ 2174 struct quota_info *dqopt = sb_dqopt(sb); 2175 2176 return dqopt->ops[type]->write_file_info(sb, type); 2177} 2178EXPORT_SYMBOL(dquot_commit_info); 2179 2180int dquot_get_next_id(struct super_block *sb, struct kqid *qid) 2181{ 2182 struct quota_info *dqopt = sb_dqopt(sb); 2183 2184 if (!sb_has_quota_active(sb, qid->type)) 2185 return -ESRCH; 2186 if (!dqopt->ops[qid->type]->get_next_id) 2187 return -ENOSYS; 2188 return dqopt->ops[qid->type]->get_next_id(sb, qid); 2189} 2190EXPORT_SYMBOL(dquot_get_next_id); 2191 2192/* 2193 * Definitions of diskquota operations. 2194 */ 2195const struct dquot_operations dquot_operations = { 2196 .write_dquot = dquot_commit, 2197 .acquire_dquot = dquot_acquire, 2198 .release_dquot = dquot_release, 2199 .mark_dirty = dquot_mark_dquot_dirty, 2200 .write_info = dquot_commit_info, 2201 .alloc_dquot = dquot_alloc, 2202 .destroy_dquot = dquot_destroy, 2203 .get_next_id = dquot_get_next_id, 2204}; 2205EXPORT_SYMBOL(dquot_operations); 2206 2207/* 2208 * Generic helper for ->open on filesystems supporting disk quotas. 2209 */ 2210int dquot_file_open(struct inode *inode, struct file *file) 2211{ 2212 int error; 2213 2214 error = generic_file_open(inode, file); 2215 if (!error && (file->f_mode & FMODE_WRITE)) 2216 error = dquot_initialize(inode); 2217 return error; 2218} 2219EXPORT_SYMBOL(dquot_file_open); 2220 2221static void vfs_cleanup_quota_inode(struct super_block *sb, int type) 2222{ 2223 struct quota_info *dqopt = sb_dqopt(sb); 2224 struct inode *inode = dqopt->files[type]; 2225 2226 if (!inode) 2227 return; 2228 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { 2229 inode_lock(inode); 2230 inode->i_flags &= ~S_NOQUOTA; 2231 inode_unlock(inode); 2232 } 2233 dqopt->files[type] = NULL; 2234 iput(inode); 2235} 2236 2237/* 2238 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) 2239 */ 2240int dquot_disable(struct super_block *sb, int type, unsigned int flags) 2241{ 2242 int cnt; 2243 struct quota_info *dqopt = sb_dqopt(sb); 2244 2245 /* s_umount should be held in exclusive mode */ 2246 if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount))) 2247 up_read(&sb->s_umount); 2248 2249 /* Cannot turn off usage accounting without turning off limits, or 2250 * suspend quotas and simultaneously turn quotas off. */ 2251 if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED)) 2252 || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED | 2253 DQUOT_USAGE_ENABLED))) 2254 return -EINVAL; 2255 2256 /* 2257 * Skip everything if there's nothing to do. We have to do this because 2258 * sometimes we are called when fill_super() failed and calling 2259 * sync_fs() in such cases does no good. 2260 */ 2261 if (!sb_any_quota_loaded(sb)) 2262 return 0; 2263 2264 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 2265 if (type != -1 && cnt != type) 2266 continue; 2267 if (!sb_has_quota_loaded(sb, cnt)) 2268 continue; 2269 2270 if (flags & DQUOT_SUSPENDED) { 2271 spin_lock(&dq_state_lock); 2272 dqopt->flags |= 2273 dquot_state_flag(DQUOT_SUSPENDED, cnt); 2274 spin_unlock(&dq_state_lock); 2275 } else { 2276 spin_lock(&dq_state_lock); 2277 dqopt->flags &= ~dquot_state_flag(flags, cnt); 2278 /* Turning off suspended quotas? */ 2279 if (!sb_has_quota_loaded(sb, cnt) && 2280 sb_has_quota_suspended(sb, cnt)) { 2281 dqopt->flags &= ~dquot_state_flag( 2282 DQUOT_SUSPENDED, cnt); 2283 spin_unlock(&dq_state_lock); 2284 vfs_cleanup_quota_inode(sb, cnt); 2285 continue; 2286 } 2287 spin_unlock(&dq_state_lock); 2288 } 2289 2290 /* We still have to keep quota loaded? */ 2291 if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED)) 2292 continue; 2293 2294 /* Note: these are blocking operations */ 2295 drop_dquot_ref(sb, cnt); 2296 invalidate_dquots(sb, cnt); 2297 /* 2298 * Now all dquots should be invalidated, all writes done so we 2299 * should be only users of the info. No locks needed. 2300 */ 2301 if (info_dirty(&dqopt->info[cnt])) 2302 sb->dq_op->write_info(sb, cnt); 2303 if (dqopt->ops[cnt]->free_file_info) 2304 dqopt->ops[cnt]->free_file_info(sb, cnt); 2305 put_quota_format(dqopt->info[cnt].dqi_format); 2306 dqopt->info[cnt].dqi_flags = 0; 2307 dqopt->info[cnt].dqi_igrace = 0; 2308 dqopt->info[cnt].dqi_bgrace = 0; 2309 dqopt->ops[cnt] = NULL; 2310 } 2311 2312 /* Skip syncing and setting flags if quota files are hidden */ 2313 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) 2314 goto put_inodes; 2315 2316 /* Sync the superblock so that buffers with quota data are written to 2317 * disk (and so userspace sees correct data afterwards). */ 2318 if (sb->s_op->sync_fs) 2319 sb->s_op->sync_fs(sb, 1); 2320 sync_blockdev(sb->s_bdev); 2321 /* Now the quota files are just ordinary files and we can set the 2322 * inode flags back. Moreover we discard the pagecache so that 2323 * userspace sees the writes we did bypassing the pagecache. We 2324 * must also discard the blockdev buffers so that we see the 2325 * changes done by userspace on the next quotaon() */ 2326 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 2327 if (!sb_has_quota_loaded(sb, cnt) && dqopt->files[cnt]) { 2328 inode_lock(dqopt->files[cnt]); 2329 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0); 2330 inode_unlock(dqopt->files[cnt]); 2331 } 2332 if (sb->s_bdev) 2333 invalidate_bdev(sb->s_bdev); 2334put_inodes: 2335 /* We are done when suspending quotas */ 2336 if (flags & DQUOT_SUSPENDED) 2337 return 0; 2338 2339 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 2340 if (!sb_has_quota_loaded(sb, cnt)) 2341 vfs_cleanup_quota_inode(sb, cnt); 2342 return 0; 2343} 2344EXPORT_SYMBOL(dquot_disable); 2345 2346int dquot_quota_off(struct super_block *sb, int type) 2347{ 2348 return dquot_disable(sb, type, 2349 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); 2350} 2351EXPORT_SYMBOL(dquot_quota_off); 2352 2353/* 2354 * Turn quotas on on a device 2355 */ 2356 2357static int vfs_setup_quota_inode(struct inode *inode, int type) 2358{ 2359 struct super_block *sb = inode->i_sb; 2360 struct quota_info *dqopt = sb_dqopt(sb); 2361 2362 if (is_bad_inode(inode)) 2363 return -EUCLEAN; 2364 if (!S_ISREG(inode->i_mode)) 2365 return -EACCES; 2366 if (IS_RDONLY(inode)) 2367 return -EROFS; 2368 if (sb_has_quota_loaded(sb, type)) 2369 return -EBUSY; 2370 2371 /* 2372 * Quota files should never be encrypted. They should be thought of as 2373 * filesystem metadata, not user data. New-style internal quota files 2374 * cannot be encrypted by users anyway, but old-style external quota 2375 * files could potentially be incorrectly created in an encrypted 2376 * directory, hence this explicit check. Some reasons why encrypted 2377 * quota files don't work include: (1) some filesystems that support 2378 * encryption don't handle it in their quota_read and quota_write, and 2379 * (2) cleaning up encrypted quota files at unmount would need special 2380 * consideration, as quota files are cleaned up later than user files. 2381 */ 2382 if (IS_ENCRYPTED(inode)) 2383 return -EINVAL; 2384 2385 dqopt->files[type] = igrab(inode); 2386 if (!dqopt->files[type]) 2387 return -EIO; 2388 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { 2389 /* We don't want quota and atime on quota files (deadlocks 2390 * possible) Also nobody should write to the file - we use 2391 * special IO operations which ignore the immutable bit. */ 2392 inode_lock(inode); 2393 inode->i_flags |= S_NOQUOTA; 2394 inode_unlock(inode); 2395 /* 2396 * When S_NOQUOTA is set, remove dquot references as no more 2397 * references can be added 2398 */ 2399 __dquot_drop(inode); 2400 } 2401 return 0; 2402} 2403 2404int dquot_load_quota_sb(struct super_block *sb, int type, int format_id, 2405 unsigned int flags) 2406{ 2407 struct quota_format_type *fmt = find_quota_format(format_id); 2408 struct quota_info *dqopt = sb_dqopt(sb); 2409 int error; 2410 2411 lockdep_assert_held_write(&sb->s_umount); 2412 2413 /* Just unsuspend quotas? */ 2414 if (WARN_ON_ONCE(flags & DQUOT_SUSPENDED)) 2415 return -EINVAL; 2416 2417 if (!fmt) 2418 return -ESRCH; 2419 if (!sb->dq_op || !sb->s_qcop || 2420 (type == PRJQUOTA && sb->dq_op->get_projid == NULL)) { 2421 error = -EINVAL; 2422 goto out_fmt; 2423 } 2424 /* Filesystems outside of init_user_ns not yet supported */ 2425 if (sb->s_user_ns != &init_user_ns) { 2426 error = -EINVAL; 2427 goto out_fmt; 2428 } 2429 /* Usage always has to be set... */ 2430 if (!(flags & DQUOT_USAGE_ENABLED)) { 2431 error = -EINVAL; 2432 goto out_fmt; 2433 } 2434 if (sb_has_quota_loaded(sb, type)) { 2435 error = -EBUSY; 2436 goto out_fmt; 2437 } 2438 2439 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { 2440 /* As we bypass the pagecache we must now flush all the 2441 * dirty data and invalidate caches so that kernel sees 2442 * changes from userspace. It is not enough to just flush 2443 * the quota file since if blocksize < pagesize, invalidation 2444 * of the cache could fail because of other unrelated dirty 2445 * data */ 2446 sync_filesystem(sb); 2447 invalidate_bdev(sb->s_bdev); 2448 } 2449 2450 error = -EINVAL; 2451 if (!fmt->qf_ops->check_quota_file(sb, type)) 2452 goto out_fmt; 2453 2454 dqopt->ops[type] = fmt->qf_ops; 2455 dqopt->info[type].dqi_format = fmt; 2456 dqopt->info[type].dqi_fmt_id = format_id; 2457 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); 2458 error = dqopt->ops[type]->read_file_info(sb, type); 2459 if (error < 0) 2460 goto out_fmt; 2461 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) { 2462 spin_lock(&dq_data_lock); 2463 dqopt->info[type].dqi_flags |= DQF_SYS_FILE; 2464 spin_unlock(&dq_data_lock); 2465 } 2466 spin_lock(&dq_state_lock); 2467 dqopt->flags |= dquot_state_flag(flags, type); 2468 spin_unlock(&dq_state_lock); 2469 2470 error = add_dquot_ref(sb, type); 2471 if (error) 2472 dquot_disable(sb, type, 2473 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); 2474 2475 return error; 2476out_fmt: 2477 put_quota_format(fmt); 2478 2479 return error; 2480} 2481EXPORT_SYMBOL(dquot_load_quota_sb); 2482 2483/* 2484 * More powerful function for turning on quotas on given quota inode allowing 2485 * setting of individual quota flags 2486 */ 2487int dquot_load_quota_inode(struct inode *inode, int type, int format_id, 2488 unsigned int flags) 2489{ 2490 int err; 2491 2492 err = vfs_setup_quota_inode(inode, type); 2493 if (err < 0) 2494 return err; 2495 err = dquot_load_quota_sb(inode->i_sb, type, format_id, flags); 2496 if (err < 0) 2497 vfs_cleanup_quota_inode(inode->i_sb, type); 2498 return err; 2499} 2500EXPORT_SYMBOL(dquot_load_quota_inode); 2501 2502/* Reenable quotas on remount RW */ 2503int dquot_resume(struct super_block *sb, int type) 2504{ 2505 struct quota_info *dqopt = sb_dqopt(sb); 2506 int ret = 0, cnt; 2507 unsigned int flags; 2508 2509 /* s_umount should be held in exclusive mode */ 2510 if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount))) 2511 up_read(&sb->s_umount); 2512 2513 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 2514 if (type != -1 && cnt != type) 2515 continue; 2516 if (!sb_has_quota_suspended(sb, cnt)) 2517 continue; 2518 2519 spin_lock(&dq_state_lock); 2520 flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED | 2521 DQUOT_LIMITS_ENABLED, 2522 cnt); 2523 dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt); 2524 spin_unlock(&dq_state_lock); 2525 2526 flags = dquot_generic_flag(flags, cnt); 2527 ret = dquot_load_quota_sb(sb, cnt, dqopt->info[cnt].dqi_fmt_id, 2528 flags); 2529 if (ret < 0) 2530 vfs_cleanup_quota_inode(sb, cnt); 2531 } 2532 2533 return ret; 2534} 2535EXPORT_SYMBOL(dquot_resume); 2536 2537int dquot_quota_on(struct super_block *sb, int type, int format_id, 2538 const struct path *path) 2539{ 2540 int error = security_quota_on(path->dentry); 2541 if (error) 2542 return error; 2543 /* Quota file not on the same filesystem? */ 2544 if (path->dentry->d_sb != sb) 2545 error = -EXDEV; 2546 else 2547 error = dquot_load_quota_inode(d_inode(path->dentry), type, 2548 format_id, DQUOT_USAGE_ENABLED | 2549 DQUOT_LIMITS_ENABLED); 2550 return error; 2551} 2552EXPORT_SYMBOL(dquot_quota_on); 2553 2554/* 2555 * This function is used when filesystem needs to initialize quotas 2556 * during mount time. 2557 */ 2558int dquot_quota_on_mount(struct super_block *sb, char *qf_name, 2559 int format_id, int type) 2560{ 2561 struct dentry *dentry; 2562 int error; 2563 2564 dentry = lookup_positive_unlocked(qf_name, sb->s_root, strlen(qf_name)); 2565 if (IS_ERR(dentry)) 2566 return PTR_ERR(dentry); 2567 2568 error = security_quota_on(dentry); 2569 if (!error) 2570 error = dquot_load_quota_inode(d_inode(dentry), type, format_id, 2571 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); 2572 2573 dput(dentry); 2574 return error; 2575} 2576EXPORT_SYMBOL(dquot_quota_on_mount); 2577 2578static int dquot_quota_enable(struct super_block *sb, unsigned int flags) 2579{ 2580 int ret; 2581 int type; 2582 struct quota_info *dqopt = sb_dqopt(sb); 2583 2584 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) 2585 return -ENOSYS; 2586 /* Accounting cannot be turned on while fs is mounted */ 2587 flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT); 2588 if (!flags) 2589 return -EINVAL; 2590 for (type = 0; type < MAXQUOTAS; type++) { 2591 if (!(flags & qtype_enforce_flag(type))) 2592 continue; 2593 /* Can't enforce without accounting */ 2594 if (!sb_has_quota_usage_enabled(sb, type)) { 2595 ret = -EINVAL; 2596 goto out_err; 2597 } 2598 if (sb_has_quota_limits_enabled(sb, type)) { 2599 ret = -EBUSY; 2600 goto out_err; 2601 } 2602 spin_lock(&dq_state_lock); 2603 dqopt->flags |= dquot_state_flag(DQUOT_LIMITS_ENABLED, type); 2604 spin_unlock(&dq_state_lock); 2605 } 2606 return 0; 2607out_err: 2608 /* Backout enforcement enablement we already did */ 2609 for (type--; type >= 0; type--) { 2610 if (flags & qtype_enforce_flag(type)) 2611 dquot_disable(sb, type, DQUOT_LIMITS_ENABLED); 2612 } 2613 /* Error code translation for better compatibility with XFS */ 2614 if (ret == -EBUSY) 2615 ret = -EEXIST; 2616 return ret; 2617} 2618 2619static int dquot_quota_disable(struct super_block *sb, unsigned int flags) 2620{ 2621 int ret; 2622 int type; 2623 struct quota_info *dqopt = sb_dqopt(sb); 2624 2625 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) 2626 return -ENOSYS; 2627 /* 2628 * We don't support turning off accounting via quotactl. In principle 2629 * quota infrastructure can do this but filesystems don't expect 2630 * userspace to be able to do it. 2631 */ 2632 if (flags & 2633 (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT)) 2634 return -EOPNOTSUPP; 2635 2636 /* Filter out limits not enabled */ 2637 for (type = 0; type < MAXQUOTAS; type++) 2638 if (!sb_has_quota_limits_enabled(sb, type)) 2639 flags &= ~qtype_enforce_flag(type); 2640 /* Nothing left? */ 2641 if (!flags) 2642 return -EEXIST; 2643 for (type = 0; type < MAXQUOTAS; type++) { 2644 if (flags & qtype_enforce_flag(type)) { 2645 ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED); 2646 if (ret < 0) 2647 goto out_err; 2648 } 2649 } 2650 return 0; 2651out_err: 2652 /* Backout enforcement disabling we already did */ 2653 for (type--; type >= 0; type--) { 2654 if (flags & qtype_enforce_flag(type)) { 2655 spin_lock(&dq_state_lock); 2656 dqopt->flags |= 2657 dquot_state_flag(DQUOT_LIMITS_ENABLED, type); 2658 spin_unlock(&dq_state_lock); 2659 } 2660 } 2661 return ret; 2662} 2663 2664/* Generic routine for getting common part of quota structure */ 2665static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di) 2666{ 2667 struct mem_dqblk *dm = &dquot->dq_dqb; 2668 2669 memset(di, 0, sizeof(*di)); 2670 spin_lock(&dquot->dq_dqb_lock); 2671 di->d_spc_hardlimit = dm->dqb_bhardlimit; 2672 di->d_spc_softlimit = dm->dqb_bsoftlimit; 2673 di->d_ino_hardlimit = dm->dqb_ihardlimit; 2674 di->d_ino_softlimit = dm->dqb_isoftlimit; 2675 di->d_space = dm->dqb_curspace + dm->dqb_rsvspace; 2676 di->d_ino_count = dm->dqb_curinodes; 2677 di->d_spc_timer = dm->dqb_btime; 2678 di->d_ino_timer = dm->dqb_itime; 2679 spin_unlock(&dquot->dq_dqb_lock); 2680} 2681 2682int dquot_get_dqblk(struct super_block *sb, struct kqid qid, 2683 struct qc_dqblk *di) 2684{ 2685 struct dquot *dquot; 2686 2687 dquot = dqget(sb, qid); 2688 if (IS_ERR(dquot)) 2689 return PTR_ERR(dquot); 2690 do_get_dqblk(dquot, di); 2691 dqput(dquot); 2692 2693 return 0; 2694} 2695EXPORT_SYMBOL(dquot_get_dqblk); 2696 2697int dquot_get_next_dqblk(struct super_block *sb, struct kqid *qid, 2698 struct qc_dqblk *di) 2699{ 2700 struct dquot *dquot; 2701 int err; 2702 2703 if (!sb->dq_op->get_next_id) 2704 return -ENOSYS; 2705 err = sb->dq_op->get_next_id(sb, qid); 2706 if (err < 0) 2707 return err; 2708 dquot = dqget(sb, *qid); 2709 if (IS_ERR(dquot)) 2710 return PTR_ERR(dquot); 2711 do_get_dqblk(dquot, di); 2712 dqput(dquot); 2713 2714 return 0; 2715} 2716EXPORT_SYMBOL(dquot_get_next_dqblk); 2717 2718#define VFS_QC_MASK \ 2719 (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \ 2720 QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \ 2721 QC_SPC_TIMER | QC_INO_TIMER) 2722 2723/* Generic routine for setting common part of quota structure */ 2724static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di) 2725{ 2726 struct mem_dqblk *dm = &dquot->dq_dqb; 2727 int check_blim = 0, check_ilim = 0; 2728 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; 2729 2730 if (di->d_fieldmask & ~VFS_QC_MASK) 2731 return -EINVAL; 2732 2733 if (((di->d_fieldmask & QC_SPC_SOFT) && 2734 di->d_spc_softlimit > dqi->dqi_max_spc_limit) || 2735 ((di->d_fieldmask & QC_SPC_HARD) && 2736 di->d_spc_hardlimit > dqi->dqi_max_spc_limit) || 2737 ((di->d_fieldmask & QC_INO_SOFT) && 2738 (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) || 2739 ((di->d_fieldmask & QC_INO_HARD) && 2740 (di->d_ino_hardlimit > dqi->dqi_max_ino_limit))) 2741 return -ERANGE; 2742 2743 spin_lock(&dquot->dq_dqb_lock); 2744 if (di->d_fieldmask & QC_SPACE) { 2745 dm->dqb_curspace = di->d_space - dm->dqb_rsvspace; 2746 check_blim = 1; 2747 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); 2748 } 2749 2750 if (di->d_fieldmask & QC_SPC_SOFT) 2751 dm->dqb_bsoftlimit = di->d_spc_softlimit; 2752 if (di->d_fieldmask & QC_SPC_HARD) 2753 dm->dqb_bhardlimit = di->d_spc_hardlimit; 2754 if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) { 2755 check_blim = 1; 2756 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); 2757 } 2758 2759 if (di->d_fieldmask & QC_INO_COUNT) { 2760 dm->dqb_curinodes = di->d_ino_count; 2761 check_ilim = 1; 2762 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); 2763 } 2764 2765 if (di->d_fieldmask & QC_INO_SOFT) 2766 dm->dqb_isoftlimit = di->d_ino_softlimit; 2767 if (di->d_fieldmask & QC_INO_HARD) 2768 dm->dqb_ihardlimit = di->d_ino_hardlimit; 2769 if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) { 2770 check_ilim = 1; 2771 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); 2772 } 2773 2774 if (di->d_fieldmask & QC_SPC_TIMER) { 2775 dm->dqb_btime = di->d_spc_timer; 2776 check_blim = 1; 2777 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); 2778 } 2779 2780 if (di->d_fieldmask & QC_INO_TIMER) { 2781 dm->dqb_itime = di->d_ino_timer; 2782 check_ilim = 1; 2783 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); 2784 } 2785 2786 if (check_blim) { 2787 if (!dm->dqb_bsoftlimit || 2788 dm->dqb_curspace + dm->dqb_rsvspace <= dm->dqb_bsoftlimit) { 2789 dm->dqb_btime = 0; 2790 clear_bit(DQ_BLKS_B, &dquot->dq_flags); 2791 } else if (!(di->d_fieldmask & QC_SPC_TIMER)) 2792 /* Set grace only if user hasn't provided his own... */ 2793 dm->dqb_btime = ktime_get_real_seconds() + dqi->dqi_bgrace; 2794 } 2795 if (check_ilim) { 2796 if (!dm->dqb_isoftlimit || 2797 dm->dqb_curinodes <= dm->dqb_isoftlimit) { 2798 dm->dqb_itime = 0; 2799 clear_bit(DQ_INODES_B, &dquot->dq_flags); 2800 } else if (!(di->d_fieldmask & QC_INO_TIMER)) 2801 /* Set grace only if user hasn't provided his own... */ 2802 dm->dqb_itime = ktime_get_real_seconds() + dqi->dqi_igrace; 2803 } 2804 if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || 2805 dm->dqb_isoftlimit) 2806 clear_bit(DQ_FAKE_B, &dquot->dq_flags); 2807 else 2808 set_bit(DQ_FAKE_B, &dquot->dq_flags); 2809 spin_unlock(&dquot->dq_dqb_lock); 2810 mark_dquot_dirty(dquot); 2811 2812 return 0; 2813} 2814 2815int dquot_set_dqblk(struct super_block *sb, struct kqid qid, 2816 struct qc_dqblk *di) 2817{ 2818 struct dquot *dquot; 2819 int rc; 2820 2821 dquot = dqget(sb, qid); 2822 if (IS_ERR(dquot)) { 2823 rc = PTR_ERR(dquot); 2824 goto out; 2825 } 2826 rc = do_set_dqblk(dquot, di); 2827 dqput(dquot); 2828out: 2829 return rc; 2830} 2831EXPORT_SYMBOL(dquot_set_dqblk); 2832 2833/* Generic routine for getting common part of quota file information */ 2834int dquot_get_state(struct super_block *sb, struct qc_state *state) 2835{ 2836 struct mem_dqinfo *mi; 2837 struct qc_type_state *tstate; 2838 struct quota_info *dqopt = sb_dqopt(sb); 2839 int type; 2840 2841 memset(state, 0, sizeof(*state)); 2842 for (type = 0; type < MAXQUOTAS; type++) { 2843 if (!sb_has_quota_active(sb, type)) 2844 continue; 2845 tstate = state->s_state + type; 2846 mi = sb_dqopt(sb)->info + type; 2847 tstate->flags = QCI_ACCT_ENABLED; 2848 spin_lock(&dq_data_lock); 2849 if (mi->dqi_flags & DQF_SYS_FILE) 2850 tstate->flags |= QCI_SYSFILE; 2851 if (mi->dqi_flags & DQF_ROOT_SQUASH) 2852 tstate->flags |= QCI_ROOT_SQUASH; 2853 if (sb_has_quota_limits_enabled(sb, type)) 2854 tstate->flags |= QCI_LIMITS_ENFORCED; 2855 tstate->spc_timelimit = mi->dqi_bgrace; 2856 tstate->ino_timelimit = mi->dqi_igrace; 2857 if (dqopt->files[type]) { 2858 tstate->ino = dqopt->files[type]->i_ino; 2859 tstate->blocks = dqopt->files[type]->i_blocks; 2860 } 2861 tstate->nextents = 1; /* We don't know... */ 2862 spin_unlock(&dq_data_lock); 2863 } 2864 return 0; 2865} 2866EXPORT_SYMBOL(dquot_get_state); 2867 2868/* Generic routine for setting common part of quota file information */ 2869int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii) 2870{ 2871 struct mem_dqinfo *mi; 2872 2873 if ((ii->i_fieldmask & QC_WARNS_MASK) || 2874 (ii->i_fieldmask & QC_RT_SPC_TIMER)) 2875 return -EINVAL; 2876 if (!sb_has_quota_active(sb, type)) 2877 return -ESRCH; 2878 mi = sb_dqopt(sb)->info + type; 2879 if (ii->i_fieldmask & QC_FLAGS) { 2880 if ((ii->i_flags & QCI_ROOT_SQUASH && 2881 mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD)) 2882 return -EINVAL; 2883 } 2884 spin_lock(&dq_data_lock); 2885 if (ii->i_fieldmask & QC_SPC_TIMER) 2886 mi->dqi_bgrace = ii->i_spc_timelimit; 2887 if (ii->i_fieldmask & QC_INO_TIMER) 2888 mi->dqi_igrace = ii->i_ino_timelimit; 2889 if (ii->i_fieldmask & QC_FLAGS) { 2890 if (ii->i_flags & QCI_ROOT_SQUASH) 2891 mi->dqi_flags |= DQF_ROOT_SQUASH; 2892 else 2893 mi->dqi_flags &= ~DQF_ROOT_SQUASH; 2894 } 2895 spin_unlock(&dq_data_lock); 2896 mark_info_dirty(sb, type); 2897 /* Force write to disk */ 2898 return sb->dq_op->write_info(sb, type); 2899} 2900EXPORT_SYMBOL(dquot_set_dqinfo); 2901 2902const struct quotactl_ops dquot_quotactl_sysfile_ops = { 2903 .quota_enable = dquot_quota_enable, 2904 .quota_disable = dquot_quota_disable, 2905 .quota_sync = dquot_quota_sync, 2906 .get_state = dquot_get_state, 2907 .set_info = dquot_set_dqinfo, 2908 .get_dqblk = dquot_get_dqblk, 2909 .get_nextdqblk = dquot_get_next_dqblk, 2910 .set_dqblk = dquot_set_dqblk 2911}; 2912EXPORT_SYMBOL(dquot_quotactl_sysfile_ops); 2913 2914static int do_proc_dqstats(struct ctl_table *table, int write, 2915 void *buffer, size_t *lenp, loff_t *ppos) 2916{ 2917 unsigned int type = (unsigned long *)table->data - dqstats.stat; 2918 s64 value = percpu_counter_sum(&dqstats.counter[type]); 2919 2920 /* Filter negative values for non-monotonic counters */ 2921 if (value < 0 && (type == DQST_ALLOC_DQUOTS || 2922 type == DQST_FREE_DQUOTS)) 2923 value = 0; 2924 2925 /* Update global table */ 2926 dqstats.stat[type] = value; 2927 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 2928} 2929 2930static struct ctl_table fs_dqstats_table[] = { 2931 { 2932 .procname = "lookups", 2933 .data = &dqstats.stat[DQST_LOOKUPS], 2934 .maxlen = sizeof(unsigned long), 2935 .mode = 0444, 2936 .proc_handler = do_proc_dqstats, 2937 }, 2938 { 2939 .procname = "drops", 2940 .data = &dqstats.stat[DQST_DROPS], 2941 .maxlen = sizeof(unsigned long), 2942 .mode = 0444, 2943 .proc_handler = do_proc_dqstats, 2944 }, 2945 { 2946 .procname = "reads", 2947 .data = &dqstats.stat[DQST_READS], 2948 .maxlen = sizeof(unsigned long), 2949 .mode = 0444, 2950 .proc_handler = do_proc_dqstats, 2951 }, 2952 { 2953 .procname = "writes", 2954 .data = &dqstats.stat[DQST_WRITES], 2955 .maxlen = sizeof(unsigned long), 2956 .mode = 0444, 2957 .proc_handler = do_proc_dqstats, 2958 }, 2959 { 2960 .procname = "cache_hits", 2961 .data = &dqstats.stat[DQST_CACHE_HITS], 2962 .maxlen = sizeof(unsigned long), 2963 .mode = 0444, 2964 .proc_handler = do_proc_dqstats, 2965 }, 2966 { 2967 .procname = "allocated_dquots", 2968 .data = &dqstats.stat[DQST_ALLOC_DQUOTS], 2969 .maxlen = sizeof(unsigned long), 2970 .mode = 0444, 2971 .proc_handler = do_proc_dqstats, 2972 }, 2973 { 2974 .procname = "free_dquots", 2975 .data = &dqstats.stat[DQST_FREE_DQUOTS], 2976 .maxlen = sizeof(unsigned long), 2977 .mode = 0444, 2978 .proc_handler = do_proc_dqstats, 2979 }, 2980 { 2981 .procname = "syncs", 2982 .data = &dqstats.stat[DQST_SYNCS], 2983 .maxlen = sizeof(unsigned long), 2984 .mode = 0444, 2985 .proc_handler = do_proc_dqstats, 2986 }, 2987#ifdef CONFIG_PRINT_QUOTA_WARNING 2988 { 2989 .procname = "warnings", 2990 .data = &flag_print_warnings, 2991 .maxlen = sizeof(int), 2992 .mode = 0644, 2993 .proc_handler = proc_dointvec, 2994 }, 2995#endif 2996}; 2997 2998static int __init dquot_init(void) 2999{ 3000 int i, ret; 3001 unsigned long nr_hash, order; 3002 struct shrinker *dqcache_shrinker; 3003 3004 printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__); 3005 3006 register_sysctl_init("fs/quota", fs_dqstats_table); 3007 3008 dquot_cachep = kmem_cache_create("dquot", 3009 sizeof(struct dquot), sizeof(unsigned long) * 4, 3010 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 3011 SLAB_PANIC), 3012 NULL); 3013 3014 order = 0; 3015 dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order); 3016 if (!dquot_hash) 3017 panic("Cannot create dquot hash table"); 3018 3019 for (i = 0; i < _DQST_DQSTAT_LAST; i++) { 3020 ret = percpu_counter_init(&dqstats.counter[i], 0, GFP_KERNEL); 3021 if (ret) 3022 panic("Cannot create dquot stat counters"); 3023 } 3024 3025 /* Find power-of-two hlist_heads which can fit into allocation */ 3026 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head); 3027 dq_hash_bits = ilog2(nr_hash); 3028 3029 nr_hash = 1UL << dq_hash_bits; 3030 dq_hash_mask = nr_hash - 1; 3031 for (i = 0; i < nr_hash; i++) 3032 INIT_HLIST_HEAD(dquot_hash + i); 3033 3034 pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld," 3035 " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order)); 3036 3037 dqcache_shrinker = shrinker_alloc(0, "dquota-cache"); 3038 if (!dqcache_shrinker) 3039 panic("Cannot allocate dquot shrinker"); 3040 3041 dqcache_shrinker->count_objects = dqcache_shrink_count; 3042 dqcache_shrinker->scan_objects = dqcache_shrink_scan; 3043 3044 shrinker_register(dqcache_shrinker); 3045 3046 return 0; 3047} 3048fs_initcall(dquot_init); 3049