1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2002 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_inode.h"
14#include "xfs_trans.h"
15#include "xfs_trans_priv.h"
16#include "xfs_quota.h"
17#include "xfs_qm.h"
18#include "xfs_trace.h"
19#include "xfs_error.h"
20#include "xfs_health.h"
21
22STATIC void	xfs_trans_alloc_dqinfo(xfs_trans_t *);
23
24/*
25 * Add the locked dquot to the transaction.
26 * The dquot must be locked, and it cannot be associated with any
27 * transaction.
28 */
29void
30xfs_trans_dqjoin(
31	struct xfs_trans	*tp,
32	struct xfs_dquot	*dqp)
33{
34	ASSERT(XFS_DQ_IS_LOCKED(dqp));
35	ASSERT(dqp->q_logitem.qli_dquot == dqp);
36
37	/*
38	 * Get a log_item_desc to point at the new item.
39	 */
40	xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
41}
42
43/*
44 * This is called to mark the dquot as needing
45 * to be logged when the transaction is committed.  The dquot must
46 * already be associated with the given transaction.
47 * Note that it marks the entire transaction as dirty. In the ordinary
48 * case, this gets called via xfs_trans_commit, after the transaction
49 * is already dirty. However, there's nothing stop this from getting
50 * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
51 * flag.
52 */
53void
54xfs_trans_log_dquot(
55	struct xfs_trans	*tp,
56	struct xfs_dquot	*dqp)
57{
58	ASSERT(XFS_DQ_IS_LOCKED(dqp));
59
60	/* Upgrade the dquot to bigtime format if possible. */
61	if (dqp->q_id != 0 &&
62	    xfs_has_bigtime(tp->t_mountp) &&
63	    !(dqp->q_type & XFS_DQTYPE_BIGTIME))
64		dqp->q_type |= XFS_DQTYPE_BIGTIME;
65
66	tp->t_flags |= XFS_TRANS_DIRTY;
67	set_bit(XFS_LI_DIRTY, &dqp->q_logitem.qli_item.li_flags);
68}
69
70/*
71 * Carry forward whatever is left of the quota blk reservation to
72 * the spanky new transaction
73 */
74void
75xfs_trans_dup_dqinfo(
76	struct xfs_trans	*otp,
77	struct xfs_trans	*ntp)
78{
79	struct xfs_dqtrx	*oq, *nq;
80	int			i, j;
81	struct xfs_dqtrx	*oqa, *nqa;
82	uint64_t		blk_res_used;
83
84	if (!otp->t_dqinfo)
85		return;
86
87	xfs_trans_alloc_dqinfo(ntp);
88
89	for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
90		oqa = otp->t_dqinfo->dqs[j];
91		nqa = ntp->t_dqinfo->dqs[j];
92		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
93			blk_res_used = 0;
94
95			if (oqa[i].qt_dquot == NULL)
96				break;
97			oq = &oqa[i];
98			nq = &nqa[i];
99
100			if (oq->qt_blk_res && oq->qt_bcount_delta > 0)
101				blk_res_used = oq->qt_bcount_delta;
102
103			nq->qt_dquot = oq->qt_dquot;
104			nq->qt_bcount_delta = nq->qt_icount_delta = 0;
105			nq->qt_rtbcount_delta = 0;
106
107			/*
108			 * Transfer whatever is left of the reservations.
109			 */
110			nq->qt_blk_res = oq->qt_blk_res - blk_res_used;
111			oq->qt_blk_res = blk_res_used;
112
113			nq->qt_rtblk_res = oq->qt_rtblk_res -
114				oq->qt_rtblk_res_used;
115			oq->qt_rtblk_res = oq->qt_rtblk_res_used;
116
117			nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used;
118			oq->qt_ino_res = oq->qt_ino_res_used;
119
120		}
121	}
122}
123
124#ifdef CONFIG_XFS_LIVE_HOOKS
125/*
126 * Use a static key here to reduce the overhead of quota live updates.  If the
127 * compiler supports jump labels, the static branch will be replaced by a nop
128 * sled when there are no hook users.  Online fsck is currently the only
129 * caller, so this is a reasonable tradeoff.
130 *
131 * Note: Patching the kernel code requires taking the cpu hotplug lock.  Other
132 * parts of the kernel allocate memory with that lock held, which means that
133 * XFS callers cannot hold any locks that might be used by memory reclaim or
134 * writeback when calling the static_branch_{inc,dec} functions.
135 */
136DEFINE_STATIC_XFS_HOOK_SWITCH(xfs_dqtrx_hooks_switch);
137
138void
139xfs_dqtrx_hook_disable(void)
140{
141	xfs_hooks_switch_off(&xfs_dqtrx_hooks_switch);
142}
143
144void
145xfs_dqtrx_hook_enable(void)
146{
147	xfs_hooks_switch_on(&xfs_dqtrx_hooks_switch);
148}
149
150/* Schedule a transactional dquot update on behalf of an inode. */
151void
152xfs_trans_mod_ino_dquot(
153	struct xfs_trans		*tp,
154	struct xfs_inode		*ip,
155	struct xfs_dquot		*dqp,
156	unsigned int			field,
157	int64_t				delta)
158{
159	xfs_trans_mod_dquot(tp, dqp, field, delta);
160
161	if (xfs_hooks_switched_on(&xfs_dqtrx_hooks_switch)) {
162		struct xfs_mod_ino_dqtrx_params	p = {
163			.tx_id		= (uintptr_t)tp,
164			.ino		= ip->i_ino,
165			.q_type		= xfs_dquot_type(dqp),
166			.q_id		= dqp->q_id,
167			.delta		= delta
168		};
169		struct xfs_quotainfo	*qi = tp->t_mountp->m_quotainfo;
170
171		xfs_hooks_call(&qi->qi_mod_ino_dqtrx_hooks, field, &p);
172	}
173}
174
175/* Call the specified functions during a dquot counter update. */
176int
177xfs_dqtrx_hook_add(
178	struct xfs_quotainfo	*qi,
179	struct xfs_dqtrx_hook	*hook)
180{
181	int			error;
182
183	/*
184	 * Transactional dquot updates first call the mod hook when changes
185	 * are attached to the transaction and then call the apply hook when
186	 * those changes are committed (or canceled).
187	 *
188	 * The apply hook must be installed before the mod hook so that we
189	 * never fail to catch the end of a quota update sequence.
190	 */
191	error = xfs_hooks_add(&qi->qi_apply_dqtrx_hooks, &hook->apply_hook);
192	if (error)
193		goto out;
194
195	error = xfs_hooks_add(&qi->qi_mod_ino_dqtrx_hooks, &hook->mod_hook);
196	if (error)
197		goto out_apply;
198
199	return 0;
200
201out_apply:
202	xfs_hooks_del(&qi->qi_apply_dqtrx_hooks, &hook->apply_hook);
203out:
204	return error;
205}
206
207/* Stop calling the specified function during a dquot counter update. */
208void
209xfs_dqtrx_hook_del(
210	struct xfs_quotainfo	*qi,
211	struct xfs_dqtrx_hook	*hook)
212{
213	/*
214	 * The mod hook must be removed before apply hook to avoid giving the
215	 * hook consumer with an incomplete update.  No hooks should be running
216	 * after these functions return.
217	 */
218	xfs_hooks_del(&qi->qi_mod_ino_dqtrx_hooks, &hook->mod_hook);
219	xfs_hooks_del(&qi->qi_apply_dqtrx_hooks, &hook->apply_hook);
220}
221
222/* Configure dquot update hook functions. */
223void
224xfs_dqtrx_hook_setup(
225	struct xfs_dqtrx_hook	*hook,
226	notifier_fn_t		mod_fn,
227	notifier_fn_t		apply_fn)
228{
229	xfs_hook_setup(&hook->mod_hook, mod_fn);
230	xfs_hook_setup(&hook->apply_hook, apply_fn);
231}
232#endif /* CONFIG_XFS_LIVE_HOOKS */
233
234/*
235 * Wrap around mod_dquot to account for both user and group quotas.
236 */
237void
238xfs_trans_mod_dquot_byino(
239	xfs_trans_t	*tp,
240	xfs_inode_t	*ip,
241	uint		field,
242	int64_t		delta)
243{
244	xfs_mount_t	*mp = tp->t_mountp;
245
246	if (!XFS_IS_QUOTA_ON(mp) ||
247	    xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
248		return;
249
250	if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
251		xfs_trans_mod_ino_dquot(tp, ip, ip->i_udquot, field, delta);
252	if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot)
253		xfs_trans_mod_ino_dquot(tp, ip, ip->i_gdquot, field, delta);
254	if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot)
255		xfs_trans_mod_ino_dquot(tp, ip, ip->i_pdquot, field, delta);
256}
257
258STATIC struct xfs_dqtrx *
259xfs_trans_get_dqtrx(
260	struct xfs_trans	*tp,
261	struct xfs_dquot	*dqp)
262{
263	int			i;
264	struct xfs_dqtrx	*qa;
265
266	switch (xfs_dquot_type(dqp)) {
267	case XFS_DQTYPE_USER:
268		qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR];
269		break;
270	case XFS_DQTYPE_GROUP:
271		qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP];
272		break;
273	case XFS_DQTYPE_PROJ:
274		qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ];
275		break;
276	default:
277		return NULL;
278	}
279
280	for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
281		if (qa[i].qt_dquot == NULL ||
282		    qa[i].qt_dquot == dqp)
283			return &qa[i];
284	}
285
286	return NULL;
287}
288
289/*
290 * Make the changes in the transaction structure.
291 * The moral equivalent to xfs_trans_mod_sb().
292 * We don't touch any fields in the dquot, so we don't care
293 * if it's locked or not (most of the time it won't be).
294 */
295void
296xfs_trans_mod_dquot(
297	struct xfs_trans	*tp,
298	struct xfs_dquot	*dqp,
299	uint			field,
300	int64_t			delta)
301{
302	struct xfs_dqtrx	*qtrx;
303
304	ASSERT(tp);
305	ASSERT(XFS_IS_QUOTA_ON(tp->t_mountp));
306	qtrx = NULL;
307
308	if (!delta)
309		return;
310
311	if (tp->t_dqinfo == NULL)
312		xfs_trans_alloc_dqinfo(tp);
313	/*
314	 * Find either the first free slot or the slot that belongs
315	 * to this dquot.
316	 */
317	qtrx = xfs_trans_get_dqtrx(tp, dqp);
318	ASSERT(qtrx);
319	if (qtrx->qt_dquot == NULL)
320		qtrx->qt_dquot = dqp;
321
322	trace_xfs_trans_mod_dquot_before(qtrx);
323	trace_xfs_trans_mod_dquot(tp, dqp, field, delta);
324
325	switch (field) {
326	/* regular disk blk reservation */
327	case XFS_TRANS_DQ_RES_BLKS:
328		qtrx->qt_blk_res += delta;
329		break;
330
331	/* inode reservation */
332	case XFS_TRANS_DQ_RES_INOS:
333		qtrx->qt_ino_res += delta;
334		break;
335
336	/* disk blocks used. */
337	case XFS_TRANS_DQ_BCOUNT:
338		qtrx->qt_bcount_delta += delta;
339		break;
340
341	case XFS_TRANS_DQ_DELBCOUNT:
342		qtrx->qt_delbcnt_delta += delta;
343		break;
344
345	/* Inode Count */
346	case XFS_TRANS_DQ_ICOUNT:
347		if (qtrx->qt_ino_res && delta > 0) {
348			qtrx->qt_ino_res_used += delta;
349			ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
350		}
351		qtrx->qt_icount_delta += delta;
352		break;
353
354	/* rtblk reservation */
355	case XFS_TRANS_DQ_RES_RTBLKS:
356		qtrx->qt_rtblk_res += delta;
357		break;
358
359	/* rtblk count */
360	case XFS_TRANS_DQ_RTBCOUNT:
361		if (qtrx->qt_rtblk_res && delta > 0) {
362			qtrx->qt_rtblk_res_used += delta;
363			ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used);
364		}
365		qtrx->qt_rtbcount_delta += delta;
366		break;
367
368	case XFS_TRANS_DQ_DELRTBCOUNT:
369		qtrx->qt_delrtb_delta += delta;
370		break;
371
372	default:
373		ASSERT(0);
374	}
375
376	trace_xfs_trans_mod_dquot_after(qtrx);
377}
378
379
380/*
381 * Given an array of dqtrx structures, lock all the dquots associated and join
382 * them to the transaction, provided they have been modified.  We know that the
383 * highest number of dquots of one type - usr, grp and prj - involved in a
384 * transaction is 3 so we don't need to make this very generic.
385 */
386STATIC void
387xfs_trans_dqlockedjoin(
388	struct xfs_trans	*tp,
389	struct xfs_dqtrx	*q)
390{
391	ASSERT(q[0].qt_dquot != NULL);
392	if (q[1].qt_dquot == NULL) {
393		xfs_dqlock(q[0].qt_dquot);
394		xfs_trans_dqjoin(tp, q[0].qt_dquot);
395	} else {
396		ASSERT(XFS_QM_TRANS_MAXDQS == 2);
397		xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
398		xfs_trans_dqjoin(tp, q[0].qt_dquot);
399		xfs_trans_dqjoin(tp, q[1].qt_dquot);
400	}
401}
402
403/* Apply dqtrx changes to the quota reservation counters. */
404static inline void
405xfs_apply_quota_reservation_deltas(
406	struct xfs_dquot_res	*res,
407	uint64_t		reserved,
408	int64_t			res_used,
409	int64_t			count_delta)
410{
411	if (reserved != 0) {
412		/*
413		 * Subtle math here: If reserved > res_used (the normal case),
414		 * we're simply subtracting the unused transaction quota
415		 * reservation from the dquot reservation.
416		 *
417		 * If, however, res_used > reserved, then we have allocated
418		 * more quota blocks than were reserved for the transaction.
419		 * We must add that excess to the dquot reservation since it
420		 * tracks (usage + resv) and by definition we didn't reserve
421		 * that excess.
422		 */
423		res->reserved -= abs(reserved - res_used);
424	} else if (count_delta != 0) {
425		/*
426		 * These blks were never reserved, either inside a transaction
427		 * or outside one (in a delayed allocation). Also, this isn't
428		 * always a negative number since we sometimes deliberately
429		 * skip quota reservations.
430		 */
431		res->reserved += count_delta;
432	}
433}
434
435#ifdef CONFIG_XFS_LIVE_HOOKS
436/* Call downstream hooks now that it's time to apply dquot deltas. */
437static inline void
438xfs_trans_apply_dquot_deltas_hook(
439	struct xfs_trans		*tp,
440	struct xfs_dquot		*dqp)
441{
442	if (xfs_hooks_switched_on(&xfs_dqtrx_hooks_switch)) {
443		struct xfs_apply_dqtrx_params	p = {
444			.tx_id		= (uintptr_t)tp,
445			.q_type		= xfs_dquot_type(dqp),
446			.q_id		= dqp->q_id,
447		};
448		struct xfs_quotainfo	*qi = tp->t_mountp->m_quotainfo;
449
450		xfs_hooks_call(&qi->qi_apply_dqtrx_hooks,
451				XFS_APPLY_DQTRX_COMMIT, &p);
452	}
453}
454#else
455# define xfs_trans_apply_dquot_deltas_hook(tp, dqp)	((void)0)
456#endif /* CONFIG_XFS_LIVE_HOOKS */
457
458/*
459 * Called by xfs_trans_commit() and similar in spirit to
460 * xfs_trans_apply_sb_deltas().
461 * Go thru all the dquots belonging to this transaction and modify the
462 * INCORE dquot to reflect the actual usages.
463 * Unreserve just the reservations done by this transaction.
464 * dquot is still left locked at exit.
465 */
466void
467xfs_trans_apply_dquot_deltas(
468	struct xfs_trans	*tp)
469{
470	int			i, j;
471	struct xfs_dquot	*dqp;
472	struct xfs_dqtrx	*qtrx, *qa;
473	int64_t			totalbdelta;
474	int64_t			totalrtbdelta;
475
476	if (!tp->t_dqinfo)
477		return;
478
479	ASSERT(tp->t_dqinfo);
480	for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
481		qa = tp->t_dqinfo->dqs[j];
482		if (qa[0].qt_dquot == NULL)
483			continue;
484
485		/*
486		 * Lock all of the dquots and join them to the transaction.
487		 */
488		xfs_trans_dqlockedjoin(tp, qa);
489
490		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
491			uint64_t	blk_res_used;
492
493			qtrx = &qa[i];
494			/*
495			 * The array of dquots is filled
496			 * sequentially, not sparsely.
497			 */
498			if ((dqp = qtrx->qt_dquot) == NULL)
499				break;
500
501			ASSERT(XFS_DQ_IS_LOCKED(dqp));
502
503			xfs_trans_apply_dquot_deltas_hook(tp, dqp);
504
505			/*
506			 * adjust the actual number of blocks used
507			 */
508
509			/*
510			 * The issue here is - sometimes we don't make a blkquota
511			 * reservation intentionally to be fair to users
512			 * (when the amount is small). On the other hand,
513			 * delayed allocs do make reservations, but that's
514			 * outside of a transaction, so we have no
515			 * idea how much was really reserved.
516			 * So, here we've accumulated delayed allocation blks and
517			 * non-delay blks. The assumption is that the
518			 * delayed ones are always reserved (outside of a
519			 * transaction), and the others may or may not have
520			 * quota reservations.
521			 */
522			totalbdelta = qtrx->qt_bcount_delta +
523				qtrx->qt_delbcnt_delta;
524			totalrtbdelta = qtrx->qt_rtbcount_delta +
525				qtrx->qt_delrtb_delta;
526
527			if (totalbdelta != 0 || totalrtbdelta != 0 ||
528			    qtrx->qt_icount_delta != 0) {
529				trace_xfs_trans_apply_dquot_deltas_before(dqp);
530				trace_xfs_trans_apply_dquot_deltas(qtrx);
531			}
532
533#ifdef DEBUG
534			if (totalbdelta < 0)
535				ASSERT(dqp->q_blk.count >= -totalbdelta);
536
537			if (totalrtbdelta < 0)
538				ASSERT(dqp->q_rtb.count >= -totalrtbdelta);
539
540			if (qtrx->qt_icount_delta < 0)
541				ASSERT(dqp->q_ino.count >= -qtrx->qt_icount_delta);
542#endif
543			if (totalbdelta)
544				dqp->q_blk.count += totalbdelta;
545
546			if (qtrx->qt_icount_delta)
547				dqp->q_ino.count += qtrx->qt_icount_delta;
548
549			if (totalrtbdelta)
550				dqp->q_rtb.count += totalrtbdelta;
551
552			if (totalbdelta != 0 || totalrtbdelta != 0 ||
553			    qtrx->qt_icount_delta != 0)
554				trace_xfs_trans_apply_dquot_deltas_after(dqp);
555
556			/*
557			 * Get any default limits in use.
558			 * Start/reset the timer(s) if needed.
559			 */
560			if (dqp->q_id) {
561				xfs_qm_adjust_dqlimits(dqp);
562				xfs_qm_adjust_dqtimers(dqp);
563			}
564
565			dqp->q_flags |= XFS_DQFLAG_DIRTY;
566			/*
567			 * add this to the list of items to get logged
568			 */
569			xfs_trans_log_dquot(tp, dqp);
570			/*
571			 * Take off what's left of the original reservation.
572			 * In case of delayed allocations, there's no
573			 * reservation that a transaction structure knows of.
574			 */
575			blk_res_used = max_t(int64_t, 0, qtrx->qt_bcount_delta);
576			xfs_apply_quota_reservation_deltas(&dqp->q_blk,
577					qtrx->qt_blk_res, blk_res_used,
578					qtrx->qt_bcount_delta);
579
580			/*
581			 * Adjust the RT reservation.
582			 */
583			xfs_apply_quota_reservation_deltas(&dqp->q_rtb,
584					qtrx->qt_rtblk_res,
585					qtrx->qt_rtblk_res_used,
586					qtrx->qt_rtbcount_delta);
587
588			/*
589			 * Adjust the inode reservation.
590			 */
591			ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
592			xfs_apply_quota_reservation_deltas(&dqp->q_ino,
593					qtrx->qt_ino_res,
594					qtrx->qt_ino_res_used,
595					qtrx->qt_icount_delta);
596
597			ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count);
598			ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count);
599			ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count);
600		}
601	}
602}
603
604#ifdef CONFIG_XFS_LIVE_HOOKS
605/* Call downstream hooks now that it's time to cancel dquot deltas. */
606static inline void
607xfs_trans_unreserve_and_mod_dquots_hook(
608	struct xfs_trans		*tp,
609	struct xfs_dquot		*dqp)
610{
611	if (xfs_hooks_switched_on(&xfs_dqtrx_hooks_switch)) {
612		struct xfs_apply_dqtrx_params	p = {
613			.tx_id		= (uintptr_t)tp,
614			.q_type		= xfs_dquot_type(dqp),
615			.q_id		= dqp->q_id,
616		};
617		struct xfs_quotainfo	*qi = tp->t_mountp->m_quotainfo;
618
619		xfs_hooks_call(&qi->qi_apply_dqtrx_hooks,
620				XFS_APPLY_DQTRX_UNRESERVE, &p);
621	}
622}
623#else
624# define xfs_trans_unreserve_and_mod_dquots_hook(tp, dqp)	((void)0)
625#endif /* CONFIG_XFS_LIVE_HOOKS */
626
627/*
628 * Release the reservations, and adjust the dquots accordingly.
629 * This is called only when the transaction is being aborted. If by
630 * any chance we have done dquot modifications incore (ie. deltas) already,
631 * we simply throw those away, since that's the expected behavior
632 * when a transaction is curtailed without a commit.
633 */
634void
635xfs_trans_unreserve_and_mod_dquots(
636	struct xfs_trans	*tp)
637{
638	int			i, j;
639	struct xfs_dquot	*dqp;
640	struct xfs_dqtrx	*qtrx, *qa;
641	bool			locked;
642
643	if (!tp->t_dqinfo)
644		return;
645
646	for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
647		qa = tp->t_dqinfo->dqs[j];
648
649		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
650			qtrx = &qa[i];
651			/*
652			 * We assume that the array of dquots is filled
653			 * sequentially, not sparsely.
654			 */
655			if ((dqp = qtrx->qt_dquot) == NULL)
656				break;
657
658			xfs_trans_unreserve_and_mod_dquots_hook(tp, dqp);
659
660			/*
661			 * Unreserve the original reservation. We don't care
662			 * about the number of blocks used field, or deltas.
663			 * Also we don't bother to zero the fields.
664			 */
665			locked = false;
666			if (qtrx->qt_blk_res) {
667				xfs_dqlock(dqp);
668				locked = true;
669				dqp->q_blk.reserved -=
670					(xfs_qcnt_t)qtrx->qt_blk_res;
671			}
672			if (qtrx->qt_ino_res) {
673				if (!locked) {
674					xfs_dqlock(dqp);
675					locked = true;
676				}
677				dqp->q_ino.reserved -=
678					(xfs_qcnt_t)qtrx->qt_ino_res;
679			}
680
681			if (qtrx->qt_rtblk_res) {
682				if (!locked) {
683					xfs_dqlock(dqp);
684					locked = true;
685				}
686				dqp->q_rtb.reserved -=
687					(xfs_qcnt_t)qtrx->qt_rtblk_res;
688			}
689			if (locked)
690				xfs_dqunlock(dqp);
691
692		}
693	}
694}
695
696STATIC void
697xfs_quota_warn(
698	struct xfs_mount	*mp,
699	struct xfs_dquot	*dqp,
700	int			type)
701{
702	enum quota_type		qtype;
703
704	switch (xfs_dquot_type(dqp)) {
705	case XFS_DQTYPE_PROJ:
706		qtype = PRJQUOTA;
707		break;
708	case XFS_DQTYPE_USER:
709		qtype = USRQUOTA;
710		break;
711	case XFS_DQTYPE_GROUP:
712		qtype = GRPQUOTA;
713		break;
714	default:
715		return;
716	}
717
718	quota_send_warning(make_kqid(&init_user_ns, qtype, dqp->q_id),
719			   mp->m_super->s_dev, type);
720}
721
722/*
723 * Decide if we can make an additional reservation against a quota resource.
724 * Returns an inode QUOTA_NL_ warning code and whether or not it's fatal.
725 *
726 * Note that we assume that the numeric difference between the inode and block
727 * warning codes will always be 3 since it's userspace ABI now, and will never
728 * decrease the quota reservation, so the *BELOW messages are irrelevant.
729 */
730static inline int
731xfs_dqresv_check(
732	struct xfs_dquot_res	*res,
733	struct xfs_quota_limits	*qlim,
734	int64_t			delta,
735	bool			*fatal)
736{
737	xfs_qcnt_t		hardlimit = res->hardlimit;
738	xfs_qcnt_t		softlimit = res->softlimit;
739	xfs_qcnt_t		total_count = res->reserved + delta;
740
741	BUILD_BUG_ON(QUOTA_NL_BHARDWARN     != QUOTA_NL_IHARDWARN + 3);
742	BUILD_BUG_ON(QUOTA_NL_BSOFTLONGWARN != QUOTA_NL_ISOFTLONGWARN + 3);
743	BUILD_BUG_ON(QUOTA_NL_BSOFTWARN     != QUOTA_NL_ISOFTWARN + 3);
744
745	*fatal = false;
746	if (delta <= 0)
747		return QUOTA_NL_NOWARN;
748
749	if (!hardlimit)
750		hardlimit = qlim->hard;
751	if (!softlimit)
752		softlimit = qlim->soft;
753
754	if (hardlimit && total_count > hardlimit) {
755		*fatal = true;
756		return QUOTA_NL_IHARDWARN;
757	}
758
759	if (softlimit && total_count > softlimit) {
760		time64_t	now = ktime_get_real_seconds();
761
762		if (res->timer != 0 && now > res->timer) {
763			*fatal = true;
764			return QUOTA_NL_ISOFTLONGWARN;
765		}
766
767		return QUOTA_NL_ISOFTWARN;
768	}
769
770	return QUOTA_NL_NOWARN;
771}
772
773/*
774 * This reserves disk blocks and inodes against a dquot.
775 * Flags indicate if the dquot is to be locked here and also
776 * if the blk reservation is for RT or regular blocks.
777 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
778 */
779STATIC int
780xfs_trans_dqresv(
781	struct xfs_trans	*tp,
782	struct xfs_mount	*mp,
783	struct xfs_dquot	*dqp,
784	int64_t			nblks,
785	long			ninos,
786	uint			flags)
787{
788	struct xfs_quotainfo	*q = mp->m_quotainfo;
789	struct xfs_def_quota	*defq;
790	struct xfs_dquot_res	*blkres;
791	struct xfs_quota_limits	*qlim;
792
793	xfs_dqlock(dqp);
794
795	defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
796
797	if (flags & XFS_TRANS_DQ_RES_BLKS) {
798		blkres = &dqp->q_blk;
799		qlim = &defq->blk;
800	} else {
801		blkres = &dqp->q_rtb;
802		qlim = &defq->rtb;
803	}
804
805	if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_id &&
806	    xfs_dquot_is_enforced(dqp)) {
807		int		quota_nl;
808		bool		fatal;
809
810		/*
811		 * dquot is locked already. See if we'd go over the hardlimit
812		 * or exceed the timelimit if we'd reserve resources.
813		 */
814		quota_nl = xfs_dqresv_check(blkres, qlim, nblks, &fatal);
815		if (quota_nl != QUOTA_NL_NOWARN) {
816			/*
817			 * Quota block warning codes are 3 more than the inode
818			 * codes, which we check above.
819			 */
820			xfs_quota_warn(mp, dqp, quota_nl + 3);
821			if (fatal)
822				goto error_return;
823		}
824
825		quota_nl = xfs_dqresv_check(&dqp->q_ino, &defq->ino, ninos,
826				&fatal);
827		if (quota_nl != QUOTA_NL_NOWARN) {
828			xfs_quota_warn(mp, dqp, quota_nl);
829			if (fatal)
830				goto error_return;
831		}
832	}
833
834	/*
835	 * Change the reservation, but not the actual usage.
836	 * Note that q_blk.reserved = q_blk.count + resv
837	 */
838	blkres->reserved += (xfs_qcnt_t)nblks;
839	dqp->q_ino.reserved += (xfs_qcnt_t)ninos;
840
841	/*
842	 * note the reservation amt in the trans struct too,
843	 * so that the transaction knows how much was reserved by
844	 * it against this particular dquot.
845	 * We don't do this when we are reserving for a delayed allocation,
846	 * because we don't have the luxury of a transaction envelope then.
847	 */
848	if (tp) {
849		ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
850		xfs_trans_mod_dquot(tp, dqp, flags & XFS_QMOPT_RESBLK_MASK,
851				    nblks);
852		xfs_trans_mod_dquot(tp, dqp, XFS_TRANS_DQ_RES_INOS, ninos);
853	}
854
855	if (XFS_IS_CORRUPT(mp, dqp->q_blk.reserved < dqp->q_blk.count) ||
856	    XFS_IS_CORRUPT(mp, dqp->q_rtb.reserved < dqp->q_rtb.count) ||
857	    XFS_IS_CORRUPT(mp, dqp->q_ino.reserved < dqp->q_ino.count))
858		goto error_corrupt;
859
860	xfs_dqunlock(dqp);
861	return 0;
862
863error_return:
864	xfs_dqunlock(dqp);
865	if (xfs_dquot_type(dqp) == XFS_DQTYPE_PROJ)
866		return -ENOSPC;
867	return -EDQUOT;
868error_corrupt:
869	xfs_dqunlock(dqp);
870	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
871	xfs_fs_mark_sick(mp, XFS_SICK_FS_QUOTACHECK);
872	return -EFSCORRUPTED;
873}
874
875
876/*
877 * Given dquot(s), make disk block and/or inode reservations against them.
878 * The fact that this does the reservation against user, group and
879 * project quotas is important, because this follows a all-or-nothing
880 * approach.
881 *
882 * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
883 *	   XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT.  Used by pquota.
884 *	   XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
885 *	   XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
886 * dquots are unlocked on return, if they were not locked by caller.
887 */
888int
889xfs_trans_reserve_quota_bydquots(
890	struct xfs_trans	*tp,
891	struct xfs_mount	*mp,
892	struct xfs_dquot	*udqp,
893	struct xfs_dquot	*gdqp,
894	struct xfs_dquot	*pdqp,
895	int64_t			nblks,
896	long			ninos,
897	uint			flags)
898{
899	int		error;
900
901	if (!XFS_IS_QUOTA_ON(mp))
902		return 0;
903
904	ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
905
906	if (udqp) {
907		error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, flags);
908		if (error)
909			return error;
910	}
911
912	if (gdqp) {
913		error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags);
914		if (error)
915			goto unwind_usr;
916	}
917
918	if (pdqp) {
919		error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags);
920		if (error)
921			goto unwind_grp;
922	}
923
924	/*
925	 * Didn't change anything critical, so, no need to log
926	 */
927	return 0;
928
929unwind_grp:
930	flags |= XFS_QMOPT_FORCE_RES;
931	if (gdqp)
932		xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags);
933unwind_usr:
934	flags |= XFS_QMOPT_FORCE_RES;
935	if (udqp)
936		xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags);
937	return error;
938}
939
940
941/*
942 * Lock the dquot and change the reservation if we can.
943 * This doesn't change the actual usage, just the reservation.
944 * The inode sent in is locked.
945 */
946int
947xfs_trans_reserve_quota_nblks(
948	struct xfs_trans	*tp,
949	struct xfs_inode	*ip,
950	int64_t			dblocks,
951	int64_t			rblocks,
952	bool			force)
953{
954	struct xfs_mount	*mp = ip->i_mount;
955	unsigned int		qflags = 0;
956	int			error;
957
958	if (!XFS_IS_QUOTA_ON(mp))
959		return 0;
960
961	ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino));
962	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
963
964	if (force)
965		qflags |= XFS_QMOPT_FORCE_RES;
966
967	/* Reserve data device quota against the inode's dquots. */
968	error = xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot,
969			ip->i_gdquot, ip->i_pdquot, dblocks, 0,
970			XFS_QMOPT_RES_REGBLKS | qflags);
971	if (error)
972		return error;
973
974	/* Do the same but for realtime blocks. */
975	error = xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot,
976			ip->i_gdquot, ip->i_pdquot, rblocks, 0,
977			XFS_QMOPT_RES_RTBLKS | qflags);
978	if (error) {
979		xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot,
980				ip->i_gdquot, ip->i_pdquot, -dblocks, 0,
981				XFS_QMOPT_RES_REGBLKS);
982		return error;
983	}
984
985	return 0;
986}
987
988/* Change the quota reservations for an inode creation activity. */
989int
990xfs_trans_reserve_quota_icreate(
991	struct xfs_trans	*tp,
992	struct xfs_dquot	*udqp,
993	struct xfs_dquot	*gdqp,
994	struct xfs_dquot	*pdqp,
995	int64_t			dblocks)
996{
997	struct xfs_mount	*mp = tp->t_mountp;
998
999	if (!XFS_IS_QUOTA_ON(mp))
1000		return 0;
1001
1002	return xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp, pdqp,
1003			dblocks, 1, XFS_QMOPT_RES_REGBLKS);
1004}
1005
1006STATIC void
1007xfs_trans_alloc_dqinfo(
1008	xfs_trans_t	*tp)
1009{
1010	tp->t_dqinfo = kmem_cache_zalloc(xfs_dqtrx_cache,
1011					 GFP_KERNEL | __GFP_NOFAIL);
1012}
1013
1014void
1015xfs_trans_free_dqinfo(
1016	xfs_trans_t	*tp)
1017{
1018	if (!tp->t_dqinfo)
1019		return;
1020	kmem_cache_free(xfs_dqtrx_cache, tp->t_dqinfo);
1021	tp->t_dqinfo = NULL;
1022}
1023