1/* $NetBSD: ufs_quota2.c,v 1.46 2023/02/22 21:49:45 riastradh Exp $ */
2/*-
3  * Copyright (c) 2010 Manuel Bouyer
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
16  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
17  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
19  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27
28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: ufs_quota2.c,v 1.46 2023/02/22 21:49:45 riastradh Exp $");
30
31#include <sys/buf.h>
32#include <sys/param.h>
33#include <sys/kernel.h>
34#include <sys/systm.h>
35#include <sys/namei.h>
36#include <sys/file.h>
37#include <sys/proc.h>
38#include <sys/vnode.h>
39#include <sys/mount.h>
40#include <sys/kauth.h>
41#include <sys/wapbl.h>
42#include <sys/quota.h>
43#include <sys/quotactl.h>
44
45#include <ufs/ufs/quota2.h>
46#include <ufs/ufs/inode.h>
47#include <ufs/ufs/ufsmount.h>
48#include <ufs/ufs/ufs_bswap.h>
49#include <ufs/ufs/ufs_extern.h>
50#include <ufs/ufs/ufs_quota.h>
51#include <ufs/ufs/ufs_wapbl.h>
52
53/*
54 * LOCKING:
55 * Data in the entries are protected by the associated struct dquot's
56 * dq_interlock (this means we can't read or change a quota entry without
57 * grabbing a dquot for it).
58 * The header and lists (including pointers in the data entries, and q2e_uid)
59 * are protected by the global dqlock.
60 * the locking order is dq_interlock -> dqlock
61 */
62
63static int quota2_bwrite(struct mount *, struct buf *);
64static int getinoquota2(struct inode *, bool, bool, struct buf **,
65    struct quota2_entry **);
66static int getq2h(struct ufsmount *, int, struct buf **,
67    struct quota2_header **, int);
68static int getq2e(struct ufsmount *, int, daddr_t, int, struct buf **,
69    struct quota2_entry **, int);
70static int quota2_walk_list(struct ufsmount *, struct buf *, int,
71    uint64_t *, int, void *,
72    int (*func)(struct ufsmount *, uint64_t *, struct quota2_entry *,
73      uint64_t, void *));
74
75static const char *limnames[] = INITQLNAMES;
76
77static void
78quota2_dict_update_q2e_limits(int objtype, const struct quotaval *val,
79    struct quota2_entry *q2e)
80{
81	/* make sure we can index q2e_val[] by the fs-independent objtype */
82	CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
83	CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
84
85	q2e->q2e_val[objtype].q2v_hardlimit = val->qv_hardlimit;
86	q2e->q2e_val[objtype].q2v_softlimit = val->qv_softlimit;
87	q2e->q2e_val[objtype].q2v_grace = val->qv_grace;
88}
89
90/*
91 * Convert internal representation to FS-independent representation.
92 * (Note that while the two types are currently identical, the
93 * internal representation is an on-disk struct and the FS-independent
94 * representation is not, and they might diverge in the future.)
95 */
96static void
97q2val_to_quotaval(struct quota2_val *q2v, struct quotaval *qv)
98{
99	qv->qv_softlimit = q2v->q2v_softlimit;
100	qv->qv_hardlimit = q2v->q2v_hardlimit;
101	qv->qv_usage = q2v->q2v_cur;
102	qv->qv_expiretime = q2v->q2v_time;
103	qv->qv_grace = q2v->q2v_grace;
104}
105
106/*
107 * Convert a quota2entry and default-flag to the FS-independent
108 * representation.
109 */
110static void
111q2e_to_quotaval(struct quota2_entry *q2e, int def,
112    id_t *id, int objtype, struct quotaval *ret)
113{
114	if (def) {
115		*id = QUOTA_DEFAULTID;
116	} else {
117		*id = q2e->q2e_uid;
118	}
119
120	KASSERT(objtype >= 0 && objtype < N_QL);
121	q2val_to_quotaval(&q2e->q2e_val[objtype], ret);
122}
123
124
125static int
126quota2_bwrite(struct mount *mp, struct buf *bp)
127{
128	if (mp->mnt_flag & MNT_SYNCHRONOUS)
129		return bwrite(bp);
130	else {
131		bdwrite(bp);
132		return 0;
133	}
134}
135
136static int
137getq2h(struct ufsmount *ump, int type,
138    struct buf **bpp, struct quota2_header **q2hp, int flags)
139{
140	const int needswap = UFS_MPNEEDSWAP(ump);
141	int error;
142	struct buf *bp;
143	struct quota2_header *q2h;
144
145	KASSERT(mutex_owned(&dqlock));
146	error = bread(ump->um_quotas[type], 0, ump->umq2_bsize,
147	    flags, &bp);
148	if (error)
149		return error;
150	if (bp->b_resid != 0)
151		panic("dq2get: %s quota file truncated", quotatypes[type]);
152
153	q2h = (void *)bp->b_data;
154	if (ufs_rw32(q2h->q2h_magic_number, needswap) != Q2_HEAD_MAGIC ||
155	    q2h->q2h_type != type)
156		panic("dq2get: corrupted %s quota header", quotatypes[type]);
157	*bpp = bp;
158	*q2hp = q2h;
159	return 0;
160}
161
162static int
163getq2e(struct ufsmount *ump, int type, daddr_t lblkno, int blkoffset,
164    struct buf **bpp, struct quota2_entry **q2ep, int flags)
165{
166	int error;
167	struct buf *bp;
168
169	if (blkoffset & (sizeof(uint64_t) - 1)) {
170		panic("dq2get: %s quota file corrupted",
171		    quotatypes[type]);
172	}
173	error = bread(ump->um_quotas[type], lblkno, ump->umq2_bsize,
174	    flags, &bp);
175	if (error)
176		return error;
177	if (bp->b_resid != 0) {
178		panic("dq2get: %s quota file corrupted",
179		    quotatypes[type]);
180	}
181	*q2ep = (void *)((char *)bp->b_data + blkoffset);
182	*bpp = bp;
183	return 0;
184}
185
186/* walk a quota entry list, calling the callback for each entry */
187#define Q2WL_ABORT 0x10000000
188
189static int
190quota2_walk_list(struct ufsmount *ump, struct buf *hbp, int type,
191    uint64_t *offp, int flags, void *a,
192    int (*func)(struct ufsmount *, uint64_t *, struct quota2_entry *, uint64_t,
193	void *))
194{
195	const int needswap = UFS_MPNEEDSWAP(ump);
196	daddr_t off = ufs_rw64(*offp, needswap);
197	struct buf *bp, *obp = hbp;
198	int ret = 0, ret2 = 0;
199	struct quota2_entry *q2e;
200	daddr_t lblkno, blkoff, olblkno = 0;
201
202	KASSERT(mutex_owned(&dqlock));
203
204	while (off != 0) {
205		lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
206		blkoff = (off & ump->umq2_bmask);
207		if (lblkno == 0) {
208			/* in the header block */
209			bp = hbp;
210		} else if (lblkno == olblkno) {
211			/* still in the same buf */
212			bp = obp;
213		} else {
214			ret = bread(ump->um_quotas[type], lblkno,
215			    ump->umq2_bsize, flags, &bp);
216			if (ret)
217				return ret;
218			if (bp->b_resid != 0) {
219				panic("%s: %s quota file corrupted",
220				    __func__, quotatypes[type]);
221			}
222		}
223		q2e = (void *)((char *)(bp->b_data) + blkoff);
224		ret = (*func)(ump, offp, q2e, off, a);
225		if (off != ufs_rw64(*offp, needswap)) {
226			/* callback changed parent's pointer, redo */
227			off = ufs_rw64(*offp, needswap);
228			if (bp != hbp && bp != obp)
229				ret2 = bwrite(bp);
230		} else {
231			/* parent if now current */
232			if (obp != bp && obp != hbp) {
233				if (flags & B_MODIFY)
234					ret2 = bwrite(obp);
235				else
236					brelse(obp, 0);
237			}
238			obp = bp;
239			olblkno = lblkno;
240			offp = &(q2e->q2e_next);
241			off = ufs_rw64(*offp, needswap);
242		}
243		if (ret)
244			break;
245		if (ret2) {
246			ret = ret2;
247			break;
248		}
249	}
250	if (obp != hbp) {
251		if (flags & B_MODIFY)
252			ret2 = bwrite(obp);
253		else
254			brelse(obp, 0);
255	}
256	if (ret & Q2WL_ABORT)
257		return 0;
258	if (ret == 0)
259		return ret2;
260	return ret;
261}
262
263int
264quota2_umount(struct mount *mp, int flags)
265{
266	int i, error;
267	struct ufsmount *ump = VFSTOUFS(mp);
268
269	if ((ump->um_flags & UFS_QUOTA2) == 0)
270		return 0;
271
272	for (i = 0; i < MAXQUOTAS; i++) {
273		if (ump->um_quotas[i] != NULLVP) {
274			error = vn_close(ump->um_quotas[i], FREAD|FWRITE,
275			    ump->um_cred[i]);
276			if (error) {
277				printf("quota2_umount failed: close(%p) %d\n",
278				    ump->um_quotas[i], error);
279				return error;
280			}
281		}
282		ump->um_quotas[i] = NULLVP;
283	}
284	return 0;
285}
286
287static int
288quota2_q2ealloc(struct ufsmount *ump, int type, uid_t uid, struct dquot *dq)
289{
290	int error, error2;
291	struct buf *hbp, *bp;
292	struct quota2_header *q2h;
293	struct quota2_entry *q2e;
294	daddr_t offset;
295	u_long hash_mask;
296	const int needswap = UFS_MPNEEDSWAP(ump);
297
298	KASSERT(mutex_owned(&dq->dq_interlock));
299	KASSERT(mutex_owned(&dqlock));
300	error = getq2h(ump, type, &hbp, &q2h, B_MODIFY);
301	if (error)
302		return error;
303	offset = ufs_rw64(q2h->q2h_free, needswap);
304	if (offset == 0) {
305		struct vnode *vp = ump->um_quotas[type];
306		struct inode *ip = VTOI(vp);
307		uint64_t size = ip->i_size;
308		/* need to allocate a new disk block */
309		error = UFS_BALLOC(vp, size, ump->umq2_bsize,
310		    ump->um_cred[type], B_CLRBUF | B_SYNC, &bp);
311		if (error) {
312			brelse(hbp, 0);
313			return error;
314		}
315		KASSERT((ip->i_size % ump->umq2_bsize) == 0);
316		ip->i_size += ump->umq2_bsize;
317		DIP_ASSIGN(ip, size, ip->i_size);
318		ip->i_flag |= IN_CHANGE | IN_UPDATE;
319		uvm_vnp_setsize(vp, ip->i_size);
320		quota2_addfreeq2e(q2h, bp->b_data, size, ump->umq2_bsize,
321		    needswap);
322		error = bwrite(bp);
323		error2 = UFS_UPDATE(vp, NULL, NULL, UPDATE_WAIT);
324		if (error || error2) {
325			brelse(hbp, 0);
326			if (error)
327				return error;
328			return error2;
329		}
330		offset = ufs_rw64(q2h->q2h_free, needswap);
331		KASSERT(offset != 0);
332	}
333	dq->dq2_lblkno = (offset >> ump->um_mountp->mnt_fs_bshift);
334	dq->dq2_blkoff = (offset & ump->umq2_bmask);
335	if (dq->dq2_lblkno == 0) {
336		bp = hbp;
337		q2e = (void *)((char *)bp->b_data + dq->dq2_blkoff);
338	} else {
339		error = getq2e(ump, type, dq->dq2_lblkno,
340		    dq->dq2_blkoff, &bp, &q2e, B_MODIFY);
341		if (error) {
342			brelse(hbp, 0);
343			return error;
344		}
345	}
346	hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
347	/* remove from free list */
348	q2h->q2h_free = q2e->q2e_next;
349
350	memcpy(q2e, &q2h->q2h_defentry, sizeof(*q2e));
351	q2e->q2e_uid = ufs_rw32(uid, needswap);
352	/* insert in hash list */
353	q2e->q2e_next = q2h->q2h_entries[uid & hash_mask];
354	q2h->q2h_entries[uid & hash_mask] = ufs_rw64(offset, needswap);
355	if (hbp != bp) {
356		bwrite(hbp);
357	}
358	bwrite(bp);
359	return 0;
360}
361
362static int
363getinoquota2(struct inode *ip, bool alloc, bool modify, struct buf **bpp,
364    struct quota2_entry **q2ep)
365{
366	int error;
367	int i;
368	struct dquot *dq;
369	struct ufsmount *ump = ip->i_ump;
370	u_int32_t ino_ids[MAXQUOTAS];
371
372	error = getinoquota(ip);
373	if (error)
374		return error;
375
376	if (alloc) {
377		UFS_WAPBL_JLOCK_ASSERT(ump->um_mountp);
378	}
379        ino_ids[USRQUOTA] = ip->i_uid;
380        ino_ids[GRPQUOTA] = ip->i_gid;
381	/* first get the interlock for all dquot */
382	for (i = 0; i < MAXQUOTAS; i++) {
383		dq = ip->i_dquot[i];
384		if (dq == NODQUOT)
385			continue;
386		mutex_enter(&dq->dq_interlock);
387	}
388	/* now get the corresponding quota entry */
389	for (i = 0; i < MAXQUOTAS; i++) {
390		bpp[i] = NULL;
391		q2ep[i] = NULL;
392		dq = ip->i_dquot[i];
393		if (dq == NODQUOT)
394			continue;
395		if (__predict_false(ump->um_quotas[i] == NULL)) {
396			/*
397			 * quotas have been turned off. This can happen
398			 * at umount time.
399			 */
400			mutex_exit(&dq->dq_interlock);
401			dqrele(NULLVP, dq);
402			ip->i_dquot[i] = NULL;
403			continue;
404		}
405
406		if ((dq->dq2_lblkno | dq->dq2_blkoff) == 0) {
407			if (!alloc) {
408				continue;
409			}
410			/* need to alloc a new on-disk quot */
411			mutex_enter(&dqlock);
412			error = quota2_q2ealloc(ump, i, ino_ids[i], dq);
413			mutex_exit(&dqlock);
414			if (error)
415				return error;
416		}
417		KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
418		error = getq2e(ump, i, dq->dq2_lblkno,
419		    dq->dq2_blkoff, &bpp[i], &q2ep[i],
420		    modify ? B_MODIFY : 0);
421		if (error)
422			return error;
423	}
424	return 0;
425}
426
427__inline static int __unused
428quota2_check_limit(struct quota2_val *q2v, uint64_t change, time_t now)
429{
430	return quota_check_limit(q2v->q2v_cur, change, q2v->q2v_softlimit,
431	    q2v->q2v_hardlimit, q2v->q2v_time, now);
432}
433
434static int
435quota2_check(struct inode *ip, int vtype, int64_t change, kauth_cred_t cred,
436    int flags)
437{
438	int error;
439	struct buf *bp[MAXQUOTAS];
440	struct quota2_entry *q2e[MAXQUOTAS];
441	struct quota2_val *q2vp;
442	struct dquot *dq;
443	uint64_t ncurblks;
444	struct ufsmount *ump = ip->i_ump;
445	struct mount *mp = ump->um_mountp;
446	const int needswap = UFS_MPNEEDSWAP(ump);
447	int i;
448
449	if ((error = getinoquota2(ip, change > 0, change != 0, bp, q2e)) != 0)
450		return error;
451	if (change == 0) {
452		for (i = 0; i < MAXQUOTAS; i++) {
453			dq = ip->i_dquot[i];
454			if (dq == NODQUOT)
455				continue;
456			if (bp[i])
457				brelse(bp[i], 0);
458			mutex_exit(&dq->dq_interlock);
459		}
460		return 0;
461	}
462	if (change < 0) {
463		for (i = 0; i < MAXQUOTAS; i++) {
464			dq = ip->i_dquot[i];
465			if (dq == NODQUOT)
466				continue;
467			if (q2e[i] == NULL) {
468				mutex_exit(&dq->dq_interlock);
469				continue;
470			}
471			q2vp = &q2e[i]->q2e_val[vtype];
472			ncurblks = ufs_rw64(q2vp->q2v_cur, needswap);
473			if (ncurblks < -change)
474				ncurblks = 0;
475			else
476				ncurblks += change;
477			q2vp->q2v_cur = ufs_rw64(ncurblks, needswap);
478			quota2_bwrite(mp, bp[i]);
479			mutex_exit(&dq->dq_interlock);
480		}
481		return 0;
482	}
483	/* see if the allocation is allowed */
484	for (i = 0; i < MAXQUOTAS; i++) {
485		struct quota2_val q2v;
486		int ql_stat;
487		dq = ip->i_dquot[i];
488		if (dq == NODQUOT)
489			continue;
490		KASSERT(q2e[i] != NULL);
491		quota2_ufs_rwq2v(&q2e[i]->q2e_val[vtype], &q2v, needswap);
492		ql_stat = quota2_check_limit(&q2v, change, time_second);
493
494		if ((flags & FORCE) == 0 &&
495		    kauth_authorize_system(cred, KAUTH_SYSTEM_FS_QUOTA,
496			KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT,
497			KAUTH_ARG(i), KAUTH_ARG(vtype), NULL) != 0) {
498			/* enforce this limit */
499			switch(QL_STATUS(ql_stat)) {
500			case QL_S_DENY_HARD:
501				if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
502					uprintf("\n%s: write failed, %s %s "
503					    "limit reached\n",
504					    mp->mnt_stat.f_mntonname,
505					    quotatypes[i], limnames[vtype]);
506					dq->dq_flags |= DQ_WARN(vtype);
507				}
508				error = EDQUOT;
509				break;
510			case QL_S_DENY_GRACE:
511				if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
512					uprintf("\n%s: write failed, %s %s "
513					    "limit reached\n",
514					    mp->mnt_stat.f_mntonname,
515					    quotatypes[i], limnames[vtype]);
516					dq->dq_flags |= DQ_WARN(vtype);
517				}
518				error = EDQUOT;
519				break;
520			case QL_S_ALLOW_SOFT:
521				if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
522					uprintf("\n%s: warning, %s %s "
523					    "quota exceeded\n",
524					    mp->mnt_stat.f_mntonname,
525					    quotatypes[i], limnames[vtype]);
526					dq->dq_flags |= DQ_WARN(vtype);
527				}
528				break;
529			}
530		}
531		/*
532		 * always do this; we don't know if the allocation will
533		 * succed or not in the end. if we don't do the allocation
534		 * q2v_time will be ignored anyway
535		 */
536		if (ql_stat & QL_F_CROSS) {
537			q2v.q2v_time = time_second + q2v.q2v_grace;
538			quota2_ufs_rwq2v(&q2v, &q2e[i]->q2e_val[vtype],
539			    needswap);
540		}
541	}
542
543	/* now do the allocation if allowed */
544	for (i = 0; i < MAXQUOTAS; i++) {
545		dq = ip->i_dquot[i];
546		if (dq == NODQUOT)
547			continue;
548		KASSERT(q2e[i] != NULL);
549		if (error == 0) {
550			q2vp = &q2e[i]->q2e_val[vtype];
551			ncurblks = ufs_rw64(q2vp->q2v_cur, needswap);
552			q2vp->q2v_cur = ufs_rw64(ncurblks + change, needswap);
553			quota2_bwrite(mp, bp[i]);
554		} else
555			brelse(bp[i], 0);
556		mutex_exit(&dq->dq_interlock);
557	}
558	return error;
559}
560
561int
562chkdq2(struct inode *ip, int64_t change, kauth_cred_t cred, int flags)
563{
564	return quota2_check(ip, QL_BLOCK, change, cred, flags);
565}
566
567int
568chkiq2(struct inode *ip, int32_t change, kauth_cred_t cred, int flags)
569{
570	return quota2_check(ip, QL_FILE, change, cred, flags);
571}
572
573int
574quota2_handle_cmd_put(struct ufsmount *ump, const struct quotakey *key,
575    const struct quotaval *val)
576{
577	int error;
578	struct dquot *dq;
579	struct quota2_header *q2h;
580	struct quota2_entry q2e, *q2ep;
581	struct buf *bp;
582	const int needswap = UFS_MPNEEDSWAP(ump);
583
584	/* make sure we can index by the fs-independent idtype */
585	CTASSERT(QUOTA_IDTYPE_USER == USRQUOTA);
586	CTASSERT(QUOTA_IDTYPE_GROUP == GRPQUOTA);
587
588	if (ump->um_quotas[key->qk_idtype] == NULLVP)
589		return ENODEV;
590	error = UFS_WAPBL_BEGIN(ump->um_mountp);
591	if (error)
592		return error;
593
594	if (key->qk_id == QUOTA_DEFAULTID) {
595		mutex_enter(&dqlock);
596		error = getq2h(ump, key->qk_idtype, &bp, &q2h, B_MODIFY);
597		if (error) {
598			mutex_exit(&dqlock);
599			goto out_wapbl;
600		}
601		quota2_ufs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
602		quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
603		quota2_ufs_rwq2e(&q2e, &q2h->q2h_defentry, needswap);
604		mutex_exit(&dqlock);
605		quota2_bwrite(ump->um_mountp, bp);
606		goto out_wapbl;
607	}
608
609	error = dqget(NULLVP, key->qk_id, ump, key->qk_idtype, &dq);
610	if (error)
611		goto out_wapbl;
612
613	mutex_enter(&dq->dq_interlock);
614	if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
615		/* need to alloc a new on-disk quot */
616		mutex_enter(&dqlock);
617		error = quota2_q2ealloc(ump, key->qk_idtype, key->qk_id, dq);
618		mutex_exit(&dqlock);
619		if (error)
620			goto out_il;
621	}
622	KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
623	error = getq2e(ump, key->qk_idtype, dq->dq2_lblkno,
624	    dq->dq2_blkoff, &bp, &q2ep, B_MODIFY);
625	if (error)
626		goto out_il;
627
628	quota2_ufs_rwq2e(q2ep, &q2e, needswap);
629	/*
630	 * Reset time limit if previously had no soft limit or were
631	 * under it, but now have a soft limit and are over it.
632	 */
633	if (val->qv_softlimit &&
634	    q2e.q2e_val[key->qk_objtype].q2v_cur >= val->qv_softlimit &&
635	    (q2e.q2e_val[key->qk_objtype].q2v_softlimit == 0 ||
636		(q2e.q2e_val[key->qk_objtype].q2v_cur <
637		    q2e.q2e_val[key->qk_objtype].q2v_softlimit))) {
638		q2e.q2e_val[key->qk_objtype].q2v_time =
639		    time_second + val->qv_grace;
640	}
641	quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
642	quota2_ufs_rwq2e(&q2e, q2ep, needswap);
643	quota2_bwrite(ump->um_mountp, bp);
644
645out_il:
646	mutex_exit(&dq->dq_interlock);
647	dqrele(NULLVP, dq);
648out_wapbl:
649	UFS_WAPBL_END(ump->um_mountp);
650	return error;
651}
652
653struct dq2clear_callback {
654	uid_t id;
655	struct dquot *dq;
656	struct quota2_header *q2h;
657};
658
659static int
660dq2clear_callback(struct ufsmount *ump, uint64_t *offp,
661    struct quota2_entry *q2e,
662    uint64_t off, void *v)
663{
664	struct dq2clear_callback *c = v;
665	const int needswap = UFS_MPNEEDSWAP(ump);
666	uint64_t myoff;
667
668	if (ufs_rw32(q2e->q2e_uid, needswap) == c->id) {
669		KASSERT(mutex_owned(&c->dq->dq_interlock));
670		c->dq->dq2_lblkno = 0;
671		c->dq->dq2_blkoff = 0;
672		myoff = *offp;
673		/* remove from hash list */
674		*offp = q2e->q2e_next;
675		/* add to free list */
676		q2e->q2e_next = c->q2h->q2h_free;
677		c->q2h->q2h_free = myoff;
678		return Q2WL_ABORT;
679	}
680	return 0;
681}
682int
683quota2_handle_cmd_del(struct ufsmount *ump, const struct quotakey *qk)
684{
685	int idtype;
686	id_t id;
687	int objtype;
688	int error, i, canfree;
689	struct dquot *dq;
690	struct quota2_header *q2h;
691	struct quota2_entry q2e, *q2ep;
692	struct buf *hbp, *bp;
693	u_long hash_mask;
694	struct dq2clear_callback c;
695
696	idtype = qk->qk_idtype;
697	id = qk->qk_id;
698	objtype = qk->qk_objtype;
699
700	if (ump->um_quotas[idtype] == NULLVP)
701		return ENODEV;
702	if (id == QUOTA_DEFAULTID)
703		return EOPNOTSUPP;
704
705	/* get the default entry before locking the entry's buffer */
706	mutex_enter(&dqlock);
707	error = getq2h(ump, idtype, &hbp, &q2h, 0);
708	if (error) {
709		mutex_exit(&dqlock);
710		return error;
711	}
712	/* we'll copy to another disk entry, so no need to swap */
713	memcpy(&q2e, &q2h->q2h_defentry, sizeof(q2e));
714	mutex_exit(&dqlock);
715	brelse(hbp, 0);
716
717	error = dqget(NULLVP, id, ump, idtype, &dq);
718	if (error)
719		return error;
720
721	mutex_enter(&dq->dq_interlock);
722	if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
723		/* already clear, nothing to do */
724		error = ENOENT;
725		goto out_il;
726	}
727	error = UFS_WAPBL_BEGIN(ump->um_mountp);
728	if (error)
729		goto out_dq;
730
731	error = getq2e(ump, idtype, dq->dq2_lblkno, dq->dq2_blkoff,
732	    &bp, &q2ep, B_MODIFY);
733	if (error)
734		goto out_wapbl;
735
736	/* make sure we can index by the objtype passed in */
737	CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
738	CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
739
740	/* clear the requested objtype by copying from the default entry */
741	q2ep->q2e_val[objtype].q2v_softlimit =
742		q2e.q2e_val[objtype].q2v_softlimit;
743	q2ep->q2e_val[objtype].q2v_hardlimit =
744		q2e.q2e_val[objtype].q2v_hardlimit;
745	q2ep->q2e_val[objtype].q2v_grace =
746		q2e.q2e_val[objtype].q2v_grace;
747	q2ep->q2e_val[objtype].q2v_time = 0;
748
749	/* if this entry now contains no information, we can free it */
750	canfree = 1;
751	for (i = 0; i < N_QL; i++) {
752		if (q2ep->q2e_val[i].q2v_cur != 0 ||
753		    (q2ep->q2e_val[i].q2v_softlimit !=
754		     q2e.q2e_val[i].q2v_softlimit) ||
755		    (q2ep->q2e_val[i].q2v_hardlimit !=
756		     q2e.q2e_val[i].q2v_hardlimit) ||
757		    (q2ep->q2e_val[i].q2v_grace !=
758		     q2e.q2e_val[i].q2v_grace)) {
759			canfree = 0;
760			break;
761		}
762		/* note: do not need to check q2v_time */
763	}
764
765	if (canfree == 0) {
766		quota2_bwrite(ump->um_mountp, bp);
767		goto out_wapbl;
768	}
769	/* we can free it. release bp so we can walk the list */
770	brelse(bp, 0);
771	mutex_enter(&dqlock);
772	error = getq2h(ump, idtype, &hbp, &q2h, 0);
773	if (error)
774		goto out_dqlock;
775
776	hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
777	c.dq = dq;
778	c.id = id;
779	c.q2h = q2h;
780	error = quota2_walk_list(ump, hbp, idtype,
781	    &q2h->q2h_entries[id & hash_mask], B_MODIFY, &c,
782	    dq2clear_callback);
783
784	bwrite(hbp);
785
786out_dqlock:
787	mutex_exit(&dqlock);
788out_wapbl:
789	UFS_WAPBL_END(ump->um_mountp);
790out_il:
791	mutex_exit(&dq->dq_interlock);
792out_dq:
793	dqrele(NULLVP, dq);
794	return error;
795}
796
797static int
798quota2_fetch_q2e(struct ufsmount *ump, const struct quotakey *qk,
799    struct quota2_entry *ret)
800{
801	struct dquot *dq;
802	int error;
803	struct quota2_entry *q2ep;
804	struct buf *bp;
805	const int needswap = UFS_MPNEEDSWAP(ump);
806
807	error = dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
808	if (error)
809		return error;
810
811	mutex_enter(&dq->dq_interlock);
812	if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
813		mutex_exit(&dq->dq_interlock);
814		dqrele(NULLVP, dq);
815		return ENOENT;
816	}
817	error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
818	    &bp, &q2ep, 0);
819	if (error) {
820		mutex_exit(&dq->dq_interlock);
821		dqrele(NULLVP, dq);
822		return error;
823	}
824	quota2_ufs_rwq2e(q2ep, ret, needswap);
825	brelse(bp, 0);
826	mutex_exit(&dq->dq_interlock);
827	dqrele(NULLVP, dq);
828
829	return 0;
830}
831
832static int
833quota2_fetch_quotaval(struct ufsmount *ump, const struct quotakey *qk,
834    struct quotaval *ret)
835{
836	struct dquot *dq;
837	int error;
838	struct quota2_entry *q2ep, q2e;
839	struct buf  *bp;
840	const int needswap = UFS_MPNEEDSWAP(ump);
841	id_t id2;
842
843	error = dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
844	if (error)
845		return error;
846
847	mutex_enter(&dq->dq_interlock);
848	if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
849		mutex_exit(&dq->dq_interlock);
850		dqrele(NULLVP, dq);
851		return ENOENT;
852	}
853	error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
854	    &bp, &q2ep, 0);
855	if (error) {
856		mutex_exit(&dq->dq_interlock);
857		dqrele(NULLVP, dq);
858		return error;
859	}
860	quota2_ufs_rwq2e(q2ep, &q2e, needswap);
861	brelse(bp, 0);
862	mutex_exit(&dq->dq_interlock);
863	dqrele(NULLVP, dq);
864
865	q2e_to_quotaval(&q2e, 0, &id2, qk->qk_objtype, ret);
866	KASSERT(id2 == qk->qk_id);
867	return 0;
868}
869
870int
871quota2_handle_cmd_get(struct ufsmount *ump, const struct quotakey *qk,
872    struct quotaval *qv)
873{
874	int error;
875	struct quota2_header *q2h;
876	struct quota2_entry q2e;
877	struct buf *bp;
878	const int needswap = UFS_MPNEEDSWAP(ump);
879	id_t id2;
880
881	/*
882	 * Make sure the FS-independent codes match the internal ones,
883	 * so we can use the passed-in objtype without having to
884	 * convert it explicitly to QL_BLOCK/QL_FILE.
885	 */
886	CTASSERT(QL_BLOCK == QUOTA_OBJTYPE_BLOCKS);
887	CTASSERT(QL_FILE == QUOTA_OBJTYPE_FILES);
888	CTASSERT(N_QL == 2);
889
890	if (qk->qk_objtype < 0 || qk->qk_objtype >= N_QL) {
891		return EINVAL;
892	}
893
894	if (ump->um_quotas[qk->qk_idtype] == NULLVP)
895		return ENODEV;
896	if (qk->qk_id == QUOTA_DEFAULTID) {
897		mutex_enter(&dqlock);
898		error = getq2h(ump, qk->qk_idtype, &bp, &q2h, 0);
899		if (error) {
900			mutex_exit(&dqlock);
901			return error;
902		}
903		quota2_ufs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
904		mutex_exit(&dqlock);
905		brelse(bp, 0);
906		q2e_to_quotaval(&q2e, qk->qk_id == QUOTA_DEFAULTID, &id2,
907		    qk->qk_objtype, qv);
908		(void)id2;
909	} else
910		error = quota2_fetch_quotaval(ump, qk, qv);
911
912	return error;
913}
914
915/*
916 * Cursor structure we used.
917 *
918 * This will get stored in userland between calls so we must not assume
919 * it isn't arbitrarily corrupted.
920 */
921struct ufsq2_cursor {
922	uint32_t q2c_magic;	/* magic number */
923	int q2c_hashsize;	/* size of hash table at last go */
924
925	int q2c_users_done;	/* true if we've returned all user data */
926	int q2c_groups_done;	/* true if we've returned all group data */
927	int q2c_defaults_done;	/* true if we've returned the default values */
928	int q2c_hashpos;	/* slot to start at in hash table */
929	int q2c_uidpos;		/* number of ids we've handled */
930	int q2c_blocks_done;	/* true if we've returned the blocks value */
931};
932
933/*
934 * State of a single cursorget call, or at least the part of it that
935 * needs to be passed around.
936 */
937struct q2cursor_state {
938	/* data return pointers */
939	struct quotakey *keys;
940	struct quotaval *vals;
941
942	/* key/value counters */
943	unsigned maxkeyvals;
944	unsigned numkeys;	/* number of keys assigned */
945
946	/* ID to key/value conversion state */
947	int skipfirst;		/* if true skip first key/value */
948	int skiplast;		/* if true skip last key/value */
949
950	/* ID counters */
951	unsigned maxids;	/* maximum number of IDs to handle */
952	unsigned numids;	/* number of IDs handled */
953};
954
955/*
956 * Additional structure for getids callback.
957 */
958struct q2cursor_getids {
959	struct q2cursor_state *state;
960	int idtype;
961	unsigned skip;		/* number of ids to skip over */
962	unsigned new_skip;	/* number of ids to skip over next time */
963	unsigned skipped;	/* number skipped so far */
964	int stopped;		/* true if we stopped quota_walk_list early */
965};
966
967/*
968 * Cursor-related functions
969 */
970
971/* magic number */
972#define Q2C_MAGIC (0xbeebe111)
973
974/* extract cursor from caller form */
975#define Q2CURSOR(qkc) ((struct ufsq2_cursor *)&qkc->u.qkc_space[0])
976
977/*
978 * Check that a cursor we're handed is something like valid. If
979 * someone munges it and it still passes these checks, they'll get
980 * partial or odd results back but won't break anything.
981 */
982static int
983q2cursor_check(struct ufsq2_cursor *cursor)
984{
985	if (cursor->q2c_magic != Q2C_MAGIC) {
986		return EINVAL;
987	}
988	if (cursor->q2c_hashsize < 0) {
989		return EINVAL;
990	}
991
992	if (cursor->q2c_users_done != 0 && cursor->q2c_users_done != 1) {
993		return EINVAL;
994	}
995	if (cursor->q2c_groups_done != 0 && cursor->q2c_groups_done != 1) {
996		return EINVAL;
997	}
998	if (cursor->q2c_defaults_done != 0 && cursor->q2c_defaults_done != 1) {
999		return EINVAL;
1000	}
1001	if (cursor->q2c_hashpos < 0 || cursor->q2c_uidpos < 0) {
1002		return EINVAL;
1003	}
1004	if (cursor->q2c_blocks_done != 0 && cursor->q2c_blocks_done != 1) {
1005		return EINVAL;
1006	}
1007	return 0;
1008}
1009
1010/*
1011 * Set up the q2cursor state.
1012 */
1013static void
1014q2cursor_initstate(struct q2cursor_state *state, struct quotakey *keys,
1015    struct quotaval *vals, unsigned maxkeyvals, int blocks_done)
1016{
1017	state->keys = keys;
1018	state->vals = vals;
1019
1020	state->maxkeyvals = maxkeyvals;
1021	state->numkeys = 0;
1022
1023	/*
1024	 * For each ID there are two quotavals to return. If the
1025	 * maximum number of entries to return is odd, we might want
1026	 * to skip the first quotaval of the first ID, or the last
1027	 * quotaval of the last ID, but not both. So the number of IDs
1028	 * we want is (up to) half the number of return slots we have,
1029	 * rounded up.
1030	 */
1031
1032	state->maxids = (state->maxkeyvals + 1) / 2;
1033	state->numids = 0;
1034	if (state->maxkeyvals % 2) {
1035		if (blocks_done) {
1036			state->skipfirst = 1;
1037			state->skiplast = 0;
1038		} else {
1039			state->skipfirst = 0;
1040			state->skiplast = 1;
1041		}
1042	} else {
1043		state->skipfirst = 0;
1044		state->skiplast = 0;
1045	}
1046}
1047
1048/*
1049 * Choose which idtype we're going to work on. If doing a full
1050 * iteration, we do users first, then groups, but either might be
1051 * disabled or marked to skip via cursorsetidtype(), so don't make
1052 * silly assumptions.
1053 */
1054static int
1055q2cursor_pickidtype(struct ufsq2_cursor *cursor, int *idtype_ret)
1056{
1057	if (cursor->q2c_users_done == 0) {
1058		*idtype_ret = QUOTA_IDTYPE_USER;
1059	} else if (cursor->q2c_groups_done == 0) {
1060		*idtype_ret = QUOTA_IDTYPE_GROUP;
1061	} else {
1062		return EAGAIN;
1063	}
1064	return 0;
1065}
1066
1067/*
1068 * Add an ID to the current state. Sets up either one or two keys to
1069 * refer to it, depending on whether it's first/last and the setting
1070 * of skipfirst. (skiplast does not need to be explicitly tested)
1071 */
1072static void
1073q2cursor_addid(struct q2cursor_state *state, int idtype, id_t id)
1074{
1075	KASSERT(state->numids < state->maxids);
1076	KASSERT(state->numkeys < state->maxkeyvals);
1077
1078	if (!state->skipfirst || state->numkeys > 0) {
1079		state->keys[state->numkeys].qk_idtype = idtype;
1080		state->keys[state->numkeys].qk_id = id;
1081		state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_BLOCKS;
1082		state->numkeys++;
1083	}
1084	if (state->numkeys < state->maxkeyvals) {
1085		state->keys[state->numkeys].qk_idtype = idtype;
1086		state->keys[state->numkeys].qk_id = id;
1087		state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_FILES;
1088		state->numkeys++;
1089	} else {
1090		KASSERT(state->skiplast);
1091	}
1092	state->numids++;
1093}
1094
1095/*
1096 * Callback function for getting IDs. Update counting and call addid.
1097 */
1098static int
1099q2cursor_getids_callback(struct ufsmount *ump, uint64_t *offp,
1100    struct quota2_entry *q2ep, uint64_t off, void *v)
1101{
1102	struct q2cursor_getids *gi = v;
1103	id_t id;
1104	const int needswap = UFS_MPNEEDSWAP(ump);
1105
1106	if (gi->skipped < gi->skip) {
1107		gi->skipped++;
1108		return 0;
1109	}
1110	id = ufs_rw32(q2ep->q2e_uid, needswap);
1111	q2cursor_addid(gi->state, gi->idtype, id);
1112	gi->new_skip++;
1113	if (gi->state->numids >= gi->state->maxids) {
1114		/* got enough ids, stop now */
1115		gi->stopped = 1;
1116		return Q2WL_ABORT;
1117	}
1118	return 0;
1119}
1120
1121/*
1122 * Fill in a batch of quotakeys by scanning one or more hash chains.
1123 */
1124static int
1125q2cursor_getkeys(struct ufsmount *ump, int idtype, struct ufsq2_cursor *cursor,
1126    struct q2cursor_state *state,
1127    int *hashsize_ret, struct quota2_entry *default_q2e_ret)
1128{
1129	const int needswap = UFS_MPNEEDSWAP(ump);
1130	struct buf *hbp;
1131	struct quota2_header *q2h;
1132	int quota2_hash_size;
1133	struct q2cursor_getids gi;
1134	uint64_t offset;
1135	int error;
1136
1137	/*
1138	 * Read the header block.
1139	 */
1140
1141	mutex_enter(&dqlock);
1142	error = getq2h(ump, idtype, &hbp, &q2h, 0);
1143	if (error) {
1144		mutex_exit(&dqlock);
1145		return error;
1146	}
1147
1148	/* if the table size has changed, make the caller start over */
1149	quota2_hash_size = ufs_rw16(q2h->q2h_hash_size, needswap);
1150	if (cursor->q2c_hashsize == 0) {
1151		cursor->q2c_hashsize = quota2_hash_size;
1152	} else if (cursor->q2c_hashsize != quota2_hash_size) {
1153		error = EDEADLK;
1154		goto scanfail;
1155	}
1156
1157	/* grab the entry with the default values out of the header */
1158	quota2_ufs_rwq2e(&q2h->q2h_defentry, default_q2e_ret, needswap);
1159
1160	/* If we haven't done the defaults yet, that goes first. */
1161	if (cursor->q2c_defaults_done == 0) {
1162		q2cursor_addid(state, idtype, QUOTA_DEFAULTID);
1163		/* if we read both halves, mark it done */
1164		if (state->numids < state->maxids || !state->skiplast) {
1165			cursor->q2c_defaults_done = 1;
1166		}
1167	}
1168
1169	gi.state = state;
1170	gi.idtype = idtype;
1171
1172	while (state->numids < state->maxids) {
1173		if (cursor->q2c_hashpos >= quota2_hash_size) {
1174			/* nothing more left */
1175			break;
1176		}
1177
1178		/* scan this hash chain */
1179		gi.skip = cursor->q2c_uidpos;
1180		gi.new_skip = gi.skip;
1181		gi.skipped = 0;
1182		gi.stopped = 0;
1183		offset = q2h->q2h_entries[cursor->q2c_hashpos];
1184
1185		error = quota2_walk_list(ump, hbp, idtype, &offset, 0, &gi,
1186		    q2cursor_getids_callback);
1187		KASSERT(error != Q2WL_ABORT);
1188		if (error) {
1189			break;
1190		}
1191		if (gi.stopped) {
1192			/* callback stopped before reading whole chain */
1193			cursor->q2c_uidpos = gi.new_skip;
1194			/* if we didn't get both halves, back up */
1195			if (state->numids == state->maxids && state->skiplast){
1196				KASSERT(cursor->q2c_uidpos > 0);
1197				cursor->q2c_uidpos--;
1198			}
1199		} else {
1200			/* read whole chain */
1201			/* if we got both halves of the last id, advance */
1202			if (state->numids < state->maxids || !state->skiplast){
1203				cursor->q2c_uidpos = 0;
1204				cursor->q2c_hashpos++;
1205			}
1206		}
1207	}
1208
1209scanfail:
1210	mutex_exit(&dqlock);
1211	brelse(hbp, 0);
1212	if (error)
1213		return error;
1214
1215	*hashsize_ret = quota2_hash_size;
1216	return 0;
1217}
1218
1219/*
1220 * Fetch the quotavals for the quotakeys.
1221 */
1222static int
1223q2cursor_getvals(struct ufsmount *ump, struct q2cursor_state *state,
1224    const struct quota2_entry *default_q2e)
1225{
1226	int hasid;
1227	id_t loadedid, id;
1228	unsigned pos;
1229	struct quota2_entry q2e;
1230	int objtype;
1231	int error;
1232
1233	hasid = 0;
1234	loadedid = 0;
1235	for (pos = 0; pos < state->numkeys; pos++) {
1236		id = state->keys[pos].qk_id;
1237		if (!hasid || id != loadedid) {
1238			hasid = 1;
1239			loadedid = id;
1240			if (id == QUOTA_DEFAULTID) {
1241				q2e = *default_q2e;
1242			} else {
1243				error = quota2_fetch_q2e(ump,
1244				    &state->keys[pos],
1245				    &q2e);
1246				if (error == ENOENT) {
1247					/* something changed - start over */
1248					error = EDEADLK;
1249				}
1250				if (error) {
1251					return error;
1252				}
1253 			}
1254		}
1255
1256
1257		objtype = state->keys[pos].qk_objtype;
1258		KASSERT(objtype >= 0 && objtype < N_QL);
1259		q2val_to_quotaval(&q2e.q2e_val[objtype], &state->vals[pos]);
1260	}
1261
1262	return 0;
1263}
1264
1265/*
1266 * Handle cursorget.
1267 *
1268 * We can't just read keys and values directly, because we can't walk
1269 * the list with qdlock and grab dq_interlock to read the entries at
1270 * the same time. So we're going to do two passes: one to figure out
1271 * which IDs we want and fill in the keys, and then a second to use
1272 * the keys to fetch the values.
1273 */
1274int
1275quota2_handle_cmd_cursorget(struct ufsmount *ump, struct quotakcursor *qkc,
1276    struct quotakey *keys, struct quotaval *vals, unsigned maxreturn,
1277    unsigned *ret)
1278{
1279	int error;
1280	struct ufsq2_cursor *cursor;
1281	struct ufsq2_cursor newcursor;
1282	struct q2cursor_state state;
1283	struct quota2_entry default_q2e;
1284	int idtype;
1285	int quota2_hash_size = 0; /* XXX: sh3 gcc 4.8 -Wuninitialized */
1286
1287	/*
1288	 * Convert and validate the cursor.
1289	 */
1290	cursor = Q2CURSOR(qkc);
1291	error = q2cursor_check(cursor);
1292	if (error) {
1293		return error;
1294	}
1295
1296	/*
1297	 * Make sure our on-disk codes match the values of the
1298	 * FS-independent ones. This avoids the need for explicit
1299	 * conversion (which would be a NOP anyway and thus easily
1300	 * left out or called in the wrong places...)
1301	 */
1302	CTASSERT(QUOTA_IDTYPE_USER == USRQUOTA);
1303	CTASSERT(QUOTA_IDTYPE_GROUP == GRPQUOTA);
1304	CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
1305	CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
1306
1307	/*
1308	 * If some of the idtypes aren't configured/enabled, arrange
1309	 * to skip over them.
1310	 */
1311	if (cursor->q2c_users_done == 0 &&
1312	    ump->um_quotas[USRQUOTA] == NULLVP) {
1313		cursor->q2c_users_done = 1;
1314	}
1315	if (cursor->q2c_groups_done == 0 &&
1316	    ump->um_quotas[GRPQUOTA] == NULLVP) {
1317		cursor->q2c_groups_done = 1;
1318	}
1319
1320	/* Loop over, potentially, both idtypes */
1321	while (1) {
1322
1323		/* Choose id type */
1324		error = q2cursor_pickidtype(cursor, &idtype);
1325		if (error == EAGAIN) {
1326			/* nothing more to do, return 0 */
1327			*ret = 0;
1328			return 0;
1329		}
1330		KASSERT(ump->um_quotas[idtype] != NULLVP);
1331
1332		/*
1333		 * Initialize the per-call iteration state. Copy the
1334		 * cursor state so we can update it in place but back
1335		 * out on error.
1336		 */
1337		q2cursor_initstate(&state, keys, vals, maxreturn,
1338		    cursor->q2c_blocks_done);
1339		newcursor = *cursor;
1340
1341		/* Assign keys */
1342		error = q2cursor_getkeys(ump, idtype, &newcursor, &state,
1343		    &quota2_hash_size, &default_q2e);
1344		if (error) {
1345			return error;
1346		}
1347
1348		/* Now fill in the values. */
1349		error = q2cursor_getvals(ump, &state, &default_q2e);
1350		if (error) {
1351			return error;
1352		}
1353
1354		/*
1355		 * Now that we aren't going to fail and lose what we
1356		 * did so far, we can update the cursor state.
1357		 */
1358
1359		if (newcursor.q2c_hashpos >= quota2_hash_size) {
1360			if (idtype == QUOTA_IDTYPE_USER)
1361				cursor->q2c_users_done = 1;
1362			else
1363				cursor->q2c_groups_done = 1;
1364
1365			/* start over on another id type */
1366			cursor->q2c_hashsize = 0;
1367			cursor->q2c_defaults_done = 0;
1368			cursor->q2c_hashpos = 0;
1369			cursor->q2c_uidpos = 0;
1370			cursor->q2c_blocks_done = 0;
1371		} else {
1372			*cursor = newcursor;
1373			cursor->q2c_blocks_done = state.skiplast;
1374		}
1375
1376		/*
1377		 * If we have something to return, return it.
1378		 * Otherwise, continue to the other idtype, if any,
1379		 * and only return zero at end of iteration.
1380		 */
1381		if (state.numkeys > 0) {
1382			break;
1383		}
1384	}
1385
1386	*ret = state.numkeys;
1387	return 0;
1388}
1389
1390int
1391quota2_handle_cmd_cursoropen(struct ufsmount *ump, struct quotakcursor *qkc)
1392{
1393	struct ufsq2_cursor *cursor;
1394
1395	CTASSERT(sizeof(*cursor) <= sizeof(qkc->u.qkc_space));
1396	cursor = Q2CURSOR(qkc);
1397
1398	cursor->q2c_magic = Q2C_MAGIC;
1399	cursor->q2c_hashsize = 0;
1400
1401	cursor->q2c_users_done = 0;
1402	cursor->q2c_groups_done = 0;
1403	cursor->q2c_defaults_done = 0;
1404	cursor->q2c_hashpos = 0;
1405	cursor->q2c_uidpos = 0;
1406	cursor->q2c_blocks_done = 0;
1407	return 0;
1408}
1409
1410int
1411quota2_handle_cmd_cursorclose(struct ufsmount *ump, struct quotakcursor *qkc)
1412{
1413	struct ufsq2_cursor *cursor;
1414	int error;
1415
1416	cursor = Q2CURSOR(qkc);
1417	error = q2cursor_check(cursor);
1418	if (error) {
1419		return error;
1420	}
1421
1422	/* nothing to do */
1423
1424	return 0;
1425}
1426
1427int
1428quota2_handle_cmd_cursorskipidtype(struct ufsmount *ump,
1429    struct quotakcursor *qkc, int idtype)
1430{
1431	struct ufsq2_cursor *cursor;
1432	int error;
1433
1434	cursor = Q2CURSOR(qkc);
1435	error = q2cursor_check(cursor);
1436	if (error) {
1437		return error;
1438	}
1439
1440	switch (idtype) {
1441	case QUOTA_IDTYPE_USER:
1442		cursor->q2c_users_done = 1;
1443		break;
1444	case QUOTA_IDTYPE_GROUP:
1445		cursor->q2c_groups_done = 1;
1446		break;
1447	default:
1448		return EINVAL;
1449	}
1450
1451	return 0;
1452}
1453
1454int
1455quota2_handle_cmd_cursoratend(struct ufsmount *ump, struct quotakcursor *qkc,
1456    int *ret)
1457{
1458	struct ufsq2_cursor *cursor;
1459	int error;
1460
1461	cursor = Q2CURSOR(qkc);
1462	error = q2cursor_check(cursor);
1463	if (error) {
1464		return error;
1465	}
1466
1467	*ret = (cursor->q2c_users_done && cursor->q2c_groups_done);
1468	return 0;
1469}
1470
1471int
1472quota2_handle_cmd_cursorrewind(struct ufsmount *ump, struct quotakcursor *qkc)
1473{
1474	struct ufsq2_cursor *cursor;
1475	int error;
1476
1477	cursor = Q2CURSOR(qkc);
1478	error = q2cursor_check(cursor);
1479	if (error) {
1480		return error;
1481	}
1482
1483	cursor->q2c_hashsize = 0;
1484
1485	cursor->q2c_users_done = 0;
1486	cursor->q2c_groups_done = 0;
1487	cursor->q2c_defaults_done = 0;
1488	cursor->q2c_hashpos = 0;
1489	cursor->q2c_uidpos = 0;
1490	cursor->q2c_blocks_done = 0;
1491
1492	return 0;
1493}
1494
1495int
1496q2sync(struct mount *mp)
1497{
1498	return 0;
1499}
1500
1501struct dq2get_callback {
1502	uid_t id;
1503	struct dquot *dq;
1504};
1505
1506static int
1507dq2get_callback(struct ufsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
1508    uint64_t off, void *v)
1509{
1510	struct dq2get_callback *c = v;
1511	daddr_t lblkno;
1512	int blkoff;
1513	const int needswap = UFS_MPNEEDSWAP(ump);
1514
1515	if (ufs_rw32(q2e->q2e_uid, needswap) == c->id) {
1516		KASSERT(mutex_owned(&c->dq->dq_interlock));
1517		lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
1518		blkoff = (off & ump->umq2_bmask);
1519		c->dq->dq2_lblkno = lblkno;
1520		c->dq->dq2_blkoff = blkoff;
1521		return Q2WL_ABORT;
1522	}
1523	return 0;
1524}
1525
1526int
1527dq2get(struct vnode *dqvp, u_long id, struct ufsmount *ump, int type,
1528    struct dquot *dq)
1529{
1530	struct buf *bp;
1531	struct quota2_header *q2h;
1532	int error;
1533	daddr_t offset;
1534	u_long hash_mask;
1535	struct dq2get_callback c = {
1536		.id = id,
1537		.dq = dq
1538	};
1539
1540	KASSERT(mutex_owned(&dq->dq_interlock));
1541	mutex_enter(&dqlock);
1542	error = getq2h(ump, type, &bp, &q2h, 0);
1543	if (error)
1544		goto out_mutex;
1545	/* look for our entry */
1546	hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
1547	offset = q2h->q2h_entries[id & hash_mask];
1548	error = quota2_walk_list(ump, bp, type, &offset, 0, (void *)&c,
1549	    dq2get_callback);
1550	brelse(bp, 0);
1551out_mutex:
1552	mutex_exit(&dqlock);
1553	return error;
1554}
1555
1556int
1557dq2sync(struct vnode *vp, struct dquot *dq)
1558{
1559	return 0;
1560}
1561