Lines Matching defs:dqp

41 STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
55 int (*execute)(struct xfs_dquot *dqp, void *data),
84 struct xfs_dquot *dqp = batch[i];
86 next_index = dqp->q_id + 1;
123 struct xfs_dquot *dqp,
126 struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
129 xfs_dqlock(dqp);
130 if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
133 dqp->q_flags |= XFS_DQFLAG_FREEING;
135 xfs_dqflock(dqp);
142 if (XFS_DQ_IS_DIRTY(dqp)) {
149 error = xfs_qm_dqflush(dqp, &bp);
154 dqp->q_flags &= ~XFS_DQFLAG_FREEING;
157 xfs_dqflock(dqp);
160 ASSERT(atomic_read(&dqp->q_pincount) == 0);
161 ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
162 !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
164 xfs_dqfunlock(dqp);
165 xfs_dqunlock(dqp);
167 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
174 ASSERT(!list_empty(&dqp->q_lru));
175 list_lru_del_obj(&qi->qi_lru, &dqp->q_lru);
176 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
178 xfs_qm_dqdestroy(dqp);
182 xfs_dqunlock(dqp);
255 struct xfs_dquot *dqp;
266 dqp = *IO_idqpp;
267 if (dqp) {
268 trace_xfs_dqattach_found(dqp);
278 error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
282 trace_xfs_dqattach_get(dqp);
288 *IO_idqpp = dqp;
289 xfs_dqunlock(dqp);
419 struct xfs_dquot *dqp = container_of(item,
423 if (!xfs_dqlock_nowait(dqp))
431 if (dqp->q_flags & XFS_DQFLAG_FREEING)
438 if (dqp->q_nrefs) {
439 xfs_dqunlock(dqp);
440 XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
442 trace_xfs_dqreclaim_want(dqp);
443 list_lru_isolate(lru, &dqp->q_lru);
444 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
453 if (!xfs_dqflock_nowait(dqp))
456 if (XFS_DQ_IS_DIRTY(dqp)) {
460 trace_xfs_dqreclaim_dirty(dqp);
465 error = xfs_qm_dqflush(dqp, &bp);
473 xfs_dqfunlock(dqp);
478 dqp->q_flags |= XFS_DQFLAG_FREEING;
479 xfs_dqunlock(dqp);
481 ASSERT(dqp->q_nrefs == 0);
482 list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
483 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
484 trace_xfs_dqreclaim_done(dqp);
485 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
489 xfs_dqunlock(dqp);
491 trace_xfs_dqreclaim_busy(dqp);
492 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
496 trace_xfs_dqreclaim_busy(dqp);
497 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
498 xfs_dqunlock(dqp);
527 struct xfs_dquot *dqp;
529 dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
530 list_del_init(&dqp->q_lru);
531 xfs_qm_dqfree_one(dqp);
553 struct xfs_dquot *dqp;
557 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
561 defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
567 defq->blk.hard = dqp->q_blk.hardlimit;
568 defq->blk.soft = dqp->q_blk.softlimit;
569 defq->ino.hard = dqp->q_ino.hardlimit;
570 defq->ino.soft = dqp->q_ino.softlimit;
571 defq->rtb.hard = dqp->q_rtb.hardlimit;
572 defq->rtb.soft = dqp->q_rtb.softlimit;
573 xfs_qm_dqdestroy(dqp);
584 struct xfs_dquot *dqp;
600 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
609 if (dqp->q_blk.timer)
610 defq->blk.time = dqp->q_blk.timer;
611 if (dqp->q_ino.timer)
612 defq->ino.time = dqp->q_ino.timer;
613 if (dqp->q_rtb.timer)
614 defq->rtb.time = dqp->q_rtb.timer;
616 xfs_qm_dqdestroy(dqp);
1091 struct xfs_dquot *dqp;
1096 error = xfs_qm_dqget(mp, id, type, true, &dqp);
1106 trace_xfs_dqadjust(dqp);
1112 dqp->q_ino.count++;
1113 dqp->q_ino.reserved++;
1115 dqp->q_blk.count += nblks;
1116 dqp->q_blk.reserved += nblks;
1119 dqp->q_rtb.count += rtblks;
1120 dqp->q_rtb.reserved += rtblks;
1128 if (dqp->q_id) {
1129 xfs_qm_adjust_dqlimits(dqp);
1130 xfs_qm_adjust_dqtimers(dqp);
1133 dqp->q_flags |= XFS_DQFLAG_DIRTY;
1134 xfs_qm_dqput(dqp);
1242 struct xfs_dquot *dqp,
1245 struct xfs_mount *mp = dqp->q_mount;
1250 xfs_dqlock(dqp);
1251 if (dqp->q_flags & XFS_DQFLAG_FREEING)
1253 if (!XFS_DQ_IS_DIRTY(dqp))
1264 if (!xfs_dqflock_nowait(dqp)) {
1266 error = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1285 error = xfs_qm_dqflush(dqp, &bp);
1292 xfs_dqunlock(dqp);
1649 struct xfs_dquot *dqp)
1651 struct xfs_mount *mp = dqp->q_mount;
1655 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1660 xfs_qm_dqdestroy(dqp);
1944 struct xfs_dquot *dqp;
1948 dqp = xfs_inode_dquot(ip, type);
1949 if (!dqp || !xfs_dquot_is_enforced(dqp))
1952 if (xfs_dquot_res_over_limits(&dqp->q_ino) ||
1953 xfs_dquot_res_over_limits(&dqp->q_rtb))
1957 if (!dqp->q_prealloc_hi_wmark)
1960 if (dqp->q_blk.reserved < dqp->q_prealloc_lo_wmark)
1963 if (dqp->q_blk.reserved >= dqp->q_prealloc_hi_wmark)
1966 freesp = dqp->q_prealloc_hi_wmark - dqp->q_blk.reserved;
1967 if (freesp < dqp->q_low_space[XFS_QLOWSP_5_PCNT])