1/* $NetBSD$ */
2/*-
3  * Copyright (c) 2010 Manuel Bouyer
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
16  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
17  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
19  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27
28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD$");
30
31#include <sys/buf.h>
32#include <sys/param.h>
33#include <sys/kernel.h>
34#include <sys/systm.h>
35#include <sys/namei.h>
36#include <sys/file.h>
37#include <sys/proc.h>
38#include <sys/vnode.h>
39#include <sys/mount.h>
40#include <sys/fstrans.h>
41#include <sys/kauth.h>
42#include <sys/wapbl.h>
43#include <sys/quota.h>
44#include <sys/quotactl.h>
45
46#include <ufs/ufs/quota2.h>
47#include <ufs/ufs/inode.h>
48#include <ufs/ufs/ufsmount.h>
49#include <ufs/ufs/ufs_bswap.h>
50#include <ufs/ufs/ufs_extern.h>
51#include <ufs/ufs/ufs_quota.h>
52#include <ufs/ufs/ufs_wapbl.h>
53
54/*
55 * LOCKING:
56 * Data in the entries are protected by the associated struct dquot's
57 * dq_interlock (this means we can't read or change a quota entry without
58 * grabing a dquot for it).
59 * The header and lists (including pointers in the data entries, and q2e_uid)
60 * are protected by the global dqlock.
61 * the locking order is dq_interlock -> dqlock
62 */
63
64static int quota2_bwrite(struct mount *, struct buf *);
65static int getinoquota2(struct inode *, bool, bool, struct buf **,
66    struct quota2_entry **);
67static int getq2h(struct ufsmount *, int, struct buf **,
68    struct quota2_header **, int);
69static int getq2e(struct ufsmount *, int, daddr_t, int, struct buf **,
70    struct quota2_entry **, int);
71static int quota2_walk_list(struct ufsmount *, struct buf *, int,
72    uint64_t *, int, void *,
73    int (*func)(struct ufsmount *, uint64_t *, struct quota2_entry *,
74      uint64_t, void *));
75
76static const char *limnames[] = INITQLNAMES;
77
78static void
79quota2_dict_update_q2e_limits(int objtype, const struct quotaval *val,
80    struct quota2_entry *q2e)
81{
82	/* make sure we can index q2e_val[] by the fs-independent objtype */
83	CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
84	CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
85
86	q2e->q2e_val[objtype].q2v_hardlimit = val->qv_hardlimit;
87	q2e->q2e_val[objtype].q2v_softlimit = val->qv_softlimit;
88	q2e->q2e_val[objtype].q2v_grace = val->qv_grace;
89}
90
91/*
92 * Convert internal representation to FS-independent representation.
93 * (Note that while the two types are currently identical, the
94 * internal representation is an on-disk struct and the FS-independent
95 * representation is not, and they might diverge in the future.)
96 */
97static void
98q2val_to_quotaval(struct quota2_val *q2v, struct quotaval *qv)
99{
100	qv->qv_softlimit = q2v->q2v_softlimit;
101	qv->qv_hardlimit = q2v->q2v_hardlimit;
102	qv->qv_usage = q2v->q2v_cur;
103	qv->qv_expiretime = q2v->q2v_time;
104	qv->qv_grace = q2v->q2v_grace;
105}
106
107/*
108 * Convert a quota2entry and default-flag to the FS-independent
109 * representation.
110 */
111static void
112q2e_to_quotaval(struct quota2_entry *q2e, int def,
113	       id_t *id, int objtype, struct quotaval *ret)
114{
115	if (def) {
116		*id = QUOTA_DEFAULTID;
117	} else {
118		*id = q2e->q2e_uid;
119	}
120
121	KASSERT(objtype >= 0 && objtype < N_QL);
122	q2val_to_quotaval(&q2e->q2e_val[objtype], ret);
123}
124
125
126static int
127quota2_bwrite(struct mount *mp, struct buf *bp)
128{
129	if (mp->mnt_flag & MNT_SYNCHRONOUS)
130		return bwrite(bp);
131	else {
132		bdwrite(bp);
133		return 0;
134	}
135}
136
137static int
138getq2h(struct ufsmount *ump, int type,
139    struct buf **bpp, struct quota2_header **q2hp, int flags)
140{
141#ifdef FFS_EI
142	const int needswap = UFS_MPNEEDSWAP(ump);
143#endif
144	int error;
145	struct buf *bp;
146	struct quota2_header *q2h;
147
148	KASSERT(mutex_owned(&dqlock));
149	error = bread(ump->um_quotas[type], 0, ump->umq2_bsize,
150	    ump->um_cred[type], flags, &bp);
151	if (error)
152		return error;
153	if (bp->b_resid != 0)
154		panic("dq2get: %s quota file truncated", quotatypes[type]);
155
156	q2h = (void *)bp->b_data;
157	if (ufs_rw32(q2h->q2h_magic_number, needswap) != Q2_HEAD_MAGIC ||
158	    q2h->q2h_type != type)
159		panic("dq2get: corrupted %s quota header", quotatypes[type]);
160	*bpp = bp;
161	*q2hp = q2h;
162	return 0;
163}
164
165static int
166getq2e(struct ufsmount *ump, int type, daddr_t lblkno, int blkoffset,
167    struct buf **bpp, struct quota2_entry **q2ep, int flags)
168{
169	int error;
170	struct buf *bp;
171
172	if (blkoffset & (sizeof(uint64_t) - 1)) {
173		panic("dq2get: %s quota file corrupted",
174		    quotatypes[type]);
175	}
176	error = bread(ump->um_quotas[type], lblkno, ump->umq2_bsize,
177	    ump->um_cred[type], flags, &bp);
178	if (error)
179		return error;
180	if (bp->b_resid != 0) {
181		panic("dq2get: %s quota file corrupted",
182		    quotatypes[type]);
183	}
184	*q2ep = (void *)((char *)bp->b_data + blkoffset);
185	*bpp = bp;
186	return 0;
187}
188
189/* walk a quota entry list, calling the callback for each entry */
190#define Q2WL_ABORT 0x10000000
191
192static int
193quota2_walk_list(struct ufsmount *ump, struct buf *hbp, int type,
194    uint64_t *offp, int flags, void *a,
195    int (*func)(struct ufsmount *, uint64_t *, struct quota2_entry *, uint64_t, void *))
196{
197#ifdef FFS_EI
198	const int needswap = UFS_MPNEEDSWAP(ump);
199#endif
200	daddr_t off = ufs_rw64(*offp, needswap);
201	struct buf *bp, *obp = hbp;
202	int ret = 0, ret2 = 0;
203	struct quota2_entry *q2e;
204	daddr_t lblkno, blkoff, olblkno = 0;
205
206	KASSERT(mutex_owner(&dqlock));
207
208	while (off != 0) {
209		lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
210		blkoff = (off & ump->umq2_bmask);
211		if (lblkno == 0) {
212			/* in the header block */
213			bp = hbp;
214		} else if (lblkno == olblkno) {
215			/* still in the same buf */
216			bp = obp;
217		} else {
218			ret = bread(ump->um_quotas[type], lblkno,
219			    ump->umq2_bsize,
220			    ump->um_cred[type], flags, &bp);
221			if (ret)
222				return ret;
223			if (bp->b_resid != 0) {
224				panic("quota2_walk_list: %s quota file corrupted",
225				    quotatypes[type]);
226			}
227		}
228		q2e = (void *)((char *)(bp->b_data) + blkoff);
229		ret = (*func)(ump, offp, q2e, off, a);
230		if (off != ufs_rw64(*offp, needswap)) {
231			/* callback changed parent's pointer, redo */
232			off = ufs_rw64(*offp, needswap);
233			if (bp != hbp && bp != obp)
234				ret2 = bwrite(bp);
235		} else {
236			/* parent if now current */
237			if (obp != bp && obp != hbp) {
238				if (flags & B_MODIFY)
239					ret2 = bwrite(obp);
240				else
241					brelse(obp, 0);
242			}
243			obp = bp;
244			olblkno = lblkno;
245			offp = &(q2e->q2e_next);
246			off = ufs_rw64(*offp, needswap);
247		}
248		if (ret)
249			break;
250		if (ret2) {
251			ret = ret2;
252			break;
253		}
254	}
255	if (obp != hbp) {
256		if (flags & B_MODIFY)
257			ret2 = bwrite(obp);
258		else
259			brelse(obp, 0);
260	}
261	if (ret & Q2WL_ABORT)
262		return 0;
263	if (ret == 0)
264		return ret2;
265	return ret;
266}
267
268int
269quota2_umount(struct mount *mp, int flags)
270{
271	int i, error;
272	struct ufsmount *ump = VFSTOUFS(mp);
273
274	if ((ump->um_flags & UFS_QUOTA2) == 0)
275		return 0;
276
277	for (i = 0; i < MAXQUOTAS; i++) {
278		if (ump->um_quotas[i] != NULLVP) {
279			error = vn_close(ump->um_quotas[i], FREAD|FWRITE,
280			    ump->um_cred[i]);
281			if (error) {
282				printf("quota2_umount failed: close(%p) %d\n",
283				    ump->um_quotas[i], error);
284				return error;
285			}
286		}
287		ump->um_quotas[i] = NULLVP;
288	}
289	return 0;
290}
291
292static int
293quota2_q2ealloc(struct ufsmount *ump, int type, uid_t uid, struct dquot *dq)
294{
295	int error, error2;
296	struct buf *hbp, *bp;
297	struct quota2_header *q2h;
298	struct quota2_entry *q2e;
299	daddr_t offset;
300	u_long hash_mask;
301	const int needswap = UFS_MPNEEDSWAP(ump);
302
303	KASSERT(mutex_owned(&dq->dq_interlock));
304	KASSERT(mutex_owned(&dqlock));
305	error = getq2h(ump, type, &hbp, &q2h, B_MODIFY);
306	if (error)
307		return error;
308	offset = ufs_rw64(q2h->q2h_free, needswap);
309	if (offset == 0) {
310		struct vnode *vp = ump->um_quotas[type];
311		struct inode *ip = VTOI(vp);
312		uint64_t size = ip->i_size;
313		/* need to alocate a new disk block */
314		error = UFS_BALLOC(vp, size, ump->umq2_bsize,
315		    ump->um_cred[type], B_CLRBUF | B_SYNC, &bp);
316		if (error) {
317			brelse(hbp, 0);
318			return error;
319		}
320		KASSERT((ip->i_size % ump->umq2_bsize) == 0);
321		ip->i_size += ump->umq2_bsize;
322		DIP_ASSIGN(ip, size, ip->i_size);
323		ip->i_flag |= IN_CHANGE | IN_UPDATE;
324		uvm_vnp_setsize(vp, ip->i_size);
325		quota2_addfreeq2e(q2h, bp->b_data, size, ump->umq2_bsize,
326		    needswap);
327		error = bwrite(bp);
328		error2 = UFS_UPDATE(vp, NULL, NULL, UPDATE_WAIT);
329		if (error || error2) {
330			brelse(hbp, 0);
331			if (error)
332				return error;
333			return error2;
334		}
335		offset = ufs_rw64(q2h->q2h_free, needswap);
336		KASSERT(offset != 0);
337	}
338	dq->dq2_lblkno = (offset >> ump->um_mountp->mnt_fs_bshift);
339	dq->dq2_blkoff = (offset & ump->umq2_bmask);
340	if (dq->dq2_lblkno == 0) {
341		bp = hbp;
342		q2e = (void *)((char *)bp->b_data + dq->dq2_blkoff);
343	} else {
344		error = getq2e(ump, type, dq->dq2_lblkno,
345		    dq->dq2_blkoff, &bp, &q2e, B_MODIFY);
346		if (error) {
347			brelse(hbp, 0);
348			return error;
349		}
350	}
351	hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
352	/* remove from free list */
353	q2h->q2h_free = q2e->q2e_next;
354
355	memcpy(q2e, &q2h->q2h_defentry, sizeof(*q2e));
356	q2e->q2e_uid = ufs_rw32(uid, needswap);
357	/* insert in hash list */
358	q2e->q2e_next = q2h->q2h_entries[uid & hash_mask];
359	q2h->q2h_entries[uid & hash_mask] = ufs_rw64(offset, needswap);
360	if (hbp != bp) {
361		bwrite(hbp);
362	}
363	bwrite(bp);
364	return 0;
365}
366
367static int
368getinoquota2(struct inode *ip, bool alloc, bool modify, struct buf **bpp,
369    struct quota2_entry **q2ep)
370{
371	int error;
372	int i;
373	struct dquot *dq;
374	struct ufsmount *ump = ip->i_ump;
375	u_int32_t ino_ids[MAXQUOTAS];
376
377	error = getinoquota(ip);
378	if (error)
379		return error;
380
381	if (alloc) {
382		UFS_WAPBL_JLOCK_ASSERT(ump->um_mountp);
383	}
384        ino_ids[USRQUOTA] = ip->i_uid;
385        ino_ids[GRPQUOTA] = ip->i_gid;
386	/* first get the interlock for all dquot */
387	for (i = 0; i < MAXQUOTAS; i++) {
388		dq = ip->i_dquot[i];
389		if (dq == NODQUOT)
390			continue;
391		mutex_enter(&dq->dq_interlock);
392	}
393	/* now get the corresponding quota entry */
394	for (i = 0; i < MAXQUOTAS; i++) {
395		bpp[i] = NULL;
396		q2ep[i] = NULL;
397		dq = ip->i_dquot[i];
398		if (dq == NODQUOT)
399			continue;
400		if (__predict_false(ump->um_quotas[i] == NULL)) {
401			/*
402			 * quotas have been turned off. This can happen
403			 * at umount time.
404			 */
405			mutex_exit(&dq->dq_interlock);
406			dqrele(NULLVP, dq);
407			ip->i_dquot[i] = NULL;
408			continue;
409		}
410
411		if ((dq->dq2_lblkno | dq->dq2_blkoff) == 0) {
412			if (!alloc) {
413				continue;
414			}
415			/* need to alloc a new on-disk quot */
416			mutex_enter(&dqlock);
417			error = quota2_q2ealloc(ump, i, ino_ids[i], dq);
418			mutex_exit(&dqlock);
419			if (error)
420				return error;
421		}
422		KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
423		error = getq2e(ump, i, dq->dq2_lblkno,
424		    dq->dq2_blkoff, &bpp[i], &q2ep[i],
425		    modify ? B_MODIFY : 0);
426		if (error)
427			return error;
428	}
429	return 0;
430}
431
432__inline static int __unused
433quota2_check_limit(struct quota2_val *q2v, uint64_t change, time_t now)
434{
435	return quota_check_limit(q2v->q2v_cur, change, q2v->q2v_softlimit,
436	    q2v->q2v_hardlimit, q2v->q2v_time, now);
437}
438
439static int
440quota2_check(struct inode *ip, int vtype, int64_t change, kauth_cred_t cred,
441    int flags)
442{
443	int error;
444	struct buf *bp[MAXQUOTAS];
445	struct quota2_entry *q2e[MAXQUOTAS];
446	struct quota2_val *q2vp;
447	struct dquot *dq;
448	uint64_t ncurblks;
449	struct ufsmount *ump = ip->i_ump;
450	struct mount *mp = ump->um_mountp;
451	const int needswap = UFS_MPNEEDSWAP(ump);
452	int i;
453
454	if ((error = getinoquota2(ip, change > 0, change != 0, bp, q2e)) != 0)
455		return error;
456	if (change == 0) {
457		for (i = 0; i < MAXQUOTAS; i++) {
458			dq = ip->i_dquot[i];
459			if (dq == NODQUOT)
460				continue;
461			if (bp[i])
462				brelse(bp[i], 0);
463			mutex_exit(&dq->dq_interlock);
464		}
465		return 0;
466	}
467	if (change < 0) {
468		for (i = 0; i < MAXQUOTAS; i++) {
469			dq = ip->i_dquot[i];
470			if (dq == NODQUOT)
471				continue;
472			if (q2e[i] == NULL) {
473				mutex_exit(&dq->dq_interlock);
474				continue;
475			}
476			q2vp = &q2e[i]->q2e_val[vtype];
477			ncurblks = ufs_rw64(q2vp->q2v_cur, needswap);
478			if (ncurblks < -change)
479				ncurblks = 0;
480			else
481				ncurblks += change;
482			q2vp->q2v_cur = ufs_rw64(ncurblks, needswap);
483			quota2_bwrite(mp, bp[i]);
484			mutex_exit(&dq->dq_interlock);
485		}
486		return 0;
487	}
488	/* see if the allocation is allowed */
489	for (i = 0; i < MAXQUOTAS; i++) {
490		struct quota2_val q2v;
491		int ql_stat;
492		dq = ip->i_dquot[i];
493		if (dq == NODQUOT)
494			continue;
495		KASSERT(q2e[i] != NULL);
496		quota2_ufs_rwq2v(&q2e[i]->q2e_val[vtype], &q2v, needswap);
497		ql_stat = quota2_check_limit(&q2v, change, time_second);
498
499		if ((flags & FORCE) == 0 &&
500		    kauth_authorize_system(cred, KAUTH_SYSTEM_FS_QUOTA,
501		    KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT,
502		    KAUTH_ARG(i), KAUTH_ARG(vtype), NULL) != 0) {
503			/* enforce this limit */
504			switch(QL_STATUS(ql_stat)) {
505			case QL_S_DENY_HARD:
506				if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
507					uprintf("\n%s: write failed, %s %s "
508					    "limit reached\n",
509					    mp->mnt_stat.f_mntonname,
510					    quotatypes[i], limnames[vtype]);
511					dq->dq_flags |= DQ_WARN(vtype);
512				}
513				error = EDQUOT;
514				break;
515			case QL_S_DENY_GRACE:
516				if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
517					uprintf("\n%s: write failed, %s %s "
518					    "limit reached\n",
519					    mp->mnt_stat.f_mntonname,
520					    quotatypes[i], limnames[vtype]);
521					dq->dq_flags |= DQ_WARN(vtype);
522				}
523				error = EDQUOT;
524				break;
525			case QL_S_ALLOW_SOFT:
526				if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
527					uprintf("\n%s: warning, %s %s "
528					    "quota exceeded\n",
529					    mp->mnt_stat.f_mntonname,
530					    quotatypes[i], limnames[vtype]);
531					dq->dq_flags |= DQ_WARN(vtype);
532				}
533				break;
534			}
535		}
536		/*
537		 * always do this; we don't know if the allocation will
538		 * succed or not in the end. if we don't do the allocation
539		 * q2v_time will be ignored anyway
540		 */
541		if (ql_stat & QL_F_CROSS) {
542			q2v.q2v_time = time_second + q2v.q2v_grace;
543			quota2_ufs_rwq2v(&q2v, &q2e[i]->q2e_val[vtype],
544			    needswap);
545		}
546	}
547
548	/* now do the allocation if allowed */
549	for (i = 0; i < MAXQUOTAS; i++) {
550		dq = ip->i_dquot[i];
551		if (dq == NODQUOT)
552			continue;
553		KASSERT(q2e[i] != NULL);
554		if (error == 0) {
555			q2vp = &q2e[i]->q2e_val[vtype];
556			ncurblks = ufs_rw64(q2vp->q2v_cur, needswap);
557			q2vp->q2v_cur = ufs_rw64(ncurblks + change, needswap);
558			quota2_bwrite(mp, bp[i]);
559		} else
560			brelse(bp[i], 0);
561		mutex_exit(&dq->dq_interlock);
562	}
563	return error;
564}
565
566int
567chkdq2(struct inode *ip, int64_t change, kauth_cred_t cred, int flags)
568{
569	return quota2_check(ip, QL_BLOCK, change, cred, flags);
570}
571
572int
573chkiq2(struct inode *ip, int32_t change, kauth_cred_t cred, int flags)
574{
575	return quota2_check(ip, QL_FILE, change, cred, flags);
576}
577
578int
579quota2_handle_cmd_put(struct ufsmount *ump, const struct quotakey *key,
580    const struct quotaval *val)
581{
582	int error;
583	struct dquot *dq;
584	struct quota2_header *q2h;
585	struct quota2_entry q2e, *q2ep;
586	struct buf *bp;
587	const int needswap = UFS_MPNEEDSWAP(ump);
588
589	/* make sure we can index by the fs-independent idtype */
590	CTASSERT(QUOTA_IDTYPE_USER == USRQUOTA);
591	CTASSERT(QUOTA_IDTYPE_GROUP == GRPQUOTA);
592
593	if (ump->um_quotas[key->qk_idtype] == NULLVP)
594		return ENODEV;
595	error = UFS_WAPBL_BEGIN(ump->um_mountp);
596	if (error)
597		return error;
598
599	if (key->qk_id == QUOTA_DEFAULTID) {
600		mutex_enter(&dqlock);
601		error = getq2h(ump, key->qk_idtype, &bp, &q2h, B_MODIFY);
602		if (error) {
603			mutex_exit(&dqlock);
604			goto out_wapbl;
605		}
606		quota2_ufs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
607		quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
608		quota2_ufs_rwq2e(&q2e, &q2h->q2h_defentry, needswap);
609		mutex_exit(&dqlock);
610		quota2_bwrite(ump->um_mountp, bp);
611		goto out_wapbl;
612	}
613
614	error = dqget(NULLVP, key->qk_id, ump, key->qk_idtype, &dq);
615	if (error)
616		goto out_wapbl;
617
618	mutex_enter(&dq->dq_interlock);
619	if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
620		/* need to alloc a new on-disk quot */
621		mutex_enter(&dqlock);
622		error = quota2_q2ealloc(ump, key->qk_idtype, key->qk_id, dq);
623		mutex_exit(&dqlock);
624		if (error)
625			goto out_il;
626	}
627	KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
628	error = getq2e(ump, key->qk_idtype, dq->dq2_lblkno,
629	    dq->dq2_blkoff, &bp, &q2ep, B_MODIFY);
630	if (error)
631		goto out_il;
632
633	quota2_ufs_rwq2e(q2ep, &q2e, needswap);
634	quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
635	quota2_ufs_rwq2e(&q2e, q2ep, needswap);
636	quota2_bwrite(ump->um_mountp, bp);
637
638out_il:
639	mutex_exit(&dq->dq_interlock);
640	dqrele(NULLVP, dq);
641out_wapbl:
642	UFS_WAPBL_END(ump->um_mountp);
643	return error;
644}
645
646struct dq2clear_callback {
647	uid_t id;
648	struct dquot *dq;
649	struct quota2_header *q2h;
650};
651
652static int
653dq2clear_callback(struct ufsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
654    uint64_t off, void *v)
655{
656	struct dq2clear_callback *c = v;
657#ifdef FFS_EI
658	const int needswap = UFS_MPNEEDSWAP(ump);
659#endif
660	uint64_t myoff;
661
662	if (ufs_rw32(q2e->q2e_uid, needswap) == c->id) {
663		KASSERT(mutex_owned(&c->dq->dq_interlock));
664		c->dq->dq2_lblkno = 0;
665		c->dq->dq2_blkoff = 0;
666		myoff = *offp;
667		/* remove from hash list */
668		*offp = q2e->q2e_next;
669		/* add to free list */
670		q2e->q2e_next = c->q2h->q2h_free;
671		c->q2h->q2h_free = myoff;
672		return Q2WL_ABORT;
673	}
674	return 0;
675}
676int
677quota2_handle_cmd_delete(struct ufsmount *ump, const struct quotakey *qk)
678{
679	int idtype;
680	id_t id;
681	int objtype;
682	int error, i, canfree;
683	struct dquot *dq;
684	struct quota2_header *q2h;
685	struct quota2_entry q2e, *q2ep;
686	struct buf *hbp, *bp;
687	u_long hash_mask;
688	struct dq2clear_callback c;
689
690	idtype = qk->qk_idtype;
691	id = qk->qk_id;
692	objtype = qk->qk_objtype;
693
694	if (ump->um_quotas[idtype] == NULLVP)
695		return ENODEV;
696	if (id == QUOTA_DEFAULTID)
697		return EOPNOTSUPP;
698
699	/* get the default entry before locking the entry's buffer */
700	mutex_enter(&dqlock);
701	error = getq2h(ump, idtype, &hbp, &q2h, 0);
702	if (error) {
703		mutex_exit(&dqlock);
704		return error;
705	}
706	/* we'll copy to another disk entry, so no need to swap */
707	memcpy(&q2e, &q2h->q2h_defentry, sizeof(q2e));
708	mutex_exit(&dqlock);
709	brelse(hbp, 0);
710
711	error = dqget(NULLVP, id, ump, idtype, &dq);
712	if (error)
713		return error;
714
715	mutex_enter(&dq->dq_interlock);
716	if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
717		/* already clear, nothing to do */
718		error = ENOENT;
719		goto out_il;
720	}
721	error = UFS_WAPBL_BEGIN(ump->um_mountp);
722	if (error)
723		goto out_dq;
724
725	error = getq2e(ump, idtype, dq->dq2_lblkno, dq->dq2_blkoff,
726	    &bp, &q2ep, B_MODIFY);
727	if (error)
728		goto out_wapbl;
729
730	/* make sure we can index by the objtype passed in */
731	CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
732	CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
733
734	/* clear the requested objtype by copying from the default entry */
735	q2ep->q2e_val[objtype].q2v_softlimit =
736		q2e.q2e_val[objtype].q2v_softlimit;
737	q2ep->q2e_val[objtype].q2v_hardlimit =
738		q2e.q2e_val[objtype].q2v_hardlimit;
739	q2ep->q2e_val[objtype].q2v_grace =
740		q2e.q2e_val[objtype].q2v_grace;
741	q2ep->q2e_val[objtype].q2v_time = 0;
742
743	/* if this entry now contains no information, we can free it */
744	canfree = 1;
745	for (i = 0; i < N_QL; i++) {
746		if (q2ep->q2e_val[i].q2v_cur != 0 ||
747		    (q2ep->q2e_val[i].q2v_softlimit !=
748		     q2e.q2e_val[i].q2v_softlimit) ||
749		    (q2ep->q2e_val[i].q2v_hardlimit !=
750		     q2e.q2e_val[i].q2v_hardlimit) ||
751		    (q2ep->q2e_val[i].q2v_grace !=
752		     q2e.q2e_val[i].q2v_grace)) {
753			canfree = 0;
754			break;
755		}
756		/* note: do not need to check q2v_time */
757	}
758
759	if (canfree == 0) {
760		quota2_bwrite(ump->um_mountp, bp);
761		goto out_wapbl;
762	}
763	/* we can free it. release bp so we can walk the list */
764	brelse(bp, 0);
765	mutex_enter(&dqlock);
766	error = getq2h(ump, idtype, &hbp, &q2h, 0);
767	if (error)
768		goto out_dqlock;
769
770	hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
771	c.dq = dq;
772	c.id = id;
773	c.q2h = q2h;
774	error = quota2_walk_list(ump, hbp, idtype,
775	    &q2h->q2h_entries[id & hash_mask], B_MODIFY, &c,
776	    dq2clear_callback);
777
778	bwrite(hbp);
779
780out_dqlock:
781	mutex_exit(&dqlock);
782out_wapbl:
783	UFS_WAPBL_END(ump->um_mountp);
784out_il:
785	mutex_exit(&dq->dq_interlock);
786out_dq:
787	dqrele(NULLVP, dq);
788	return error;
789}
790
791static int
792quota2_fetch_q2e(struct ufsmount *ump, const struct quotakey *qk,
793    struct quota2_entry *ret)
794{
795	struct dquot *dq;
796	int error;
797	struct quota2_entry *q2ep;
798	struct buf *bp;
799	const int needswap = UFS_MPNEEDSWAP(ump);
800
801	error = dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
802	if (error)
803		return error;
804
805	mutex_enter(&dq->dq_interlock);
806	if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
807		mutex_exit(&dq->dq_interlock);
808		dqrele(NULLVP, dq);
809		return ENOENT;
810	}
811	error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
812	    &bp, &q2ep, 0);
813	if (error) {
814		mutex_exit(&dq->dq_interlock);
815		dqrele(NULLVP, dq);
816		return error;
817	}
818	quota2_ufs_rwq2e(q2ep, ret, needswap);
819	brelse(bp, 0);
820	mutex_exit(&dq->dq_interlock);
821	dqrele(NULLVP, dq);
822
823	return 0;
824}
825
826static int
827quota2_fetch_quotaval(struct ufsmount *ump, const struct quotakey *qk,
828    struct quotaval *ret)
829{
830	struct dquot *dq;
831	int error;
832	struct quota2_entry *q2ep, q2e;
833	struct buf  *bp;
834	const int needswap = UFS_MPNEEDSWAP(ump);
835	id_t id2;
836
837	error = dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
838	if (error)
839		return error;
840
841	mutex_enter(&dq->dq_interlock);
842	if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
843		mutex_exit(&dq->dq_interlock);
844		dqrele(NULLVP, dq);
845		return ENOENT;
846	}
847	error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
848	    &bp, &q2ep, 0);
849	if (error) {
850		mutex_exit(&dq->dq_interlock);
851		dqrele(NULLVP, dq);
852		return error;
853	}
854	quota2_ufs_rwq2e(q2ep, &q2e, needswap);
855	brelse(bp, 0);
856	mutex_exit(&dq->dq_interlock);
857	dqrele(NULLVP, dq);
858
859	q2e_to_quotaval(&q2e, 0, &id2, qk->qk_objtype, ret);
860	KASSERT(id2 == qk->qk_id);
861	return 0;
862}
863
864int
865quota2_handle_cmd_get(struct ufsmount *ump, const struct quotakey *qk,
866    struct quotaval *qv)
867{
868	int error;
869	struct quota2_header *q2h;
870	struct quota2_entry q2e;
871	struct buf *bp;
872	const int needswap = UFS_MPNEEDSWAP(ump);
873	id_t id2;
874
875	/*
876	 * Make sure the FS-independent codes match the internal ones,
877	 * so we can use the passed-in objtype without having to
878	 * convert it explicitly to QL_BLOCK/QL_FILE.
879	 */
880	CTASSERT(QL_BLOCK == QUOTA_OBJTYPE_BLOCKS);
881	CTASSERT(QL_FILE == QUOTA_OBJTYPE_FILES);
882	CTASSERT(N_QL == 2);
883
884	if (qk->qk_objtype < 0 || qk->qk_objtype >= N_QL) {
885		return EINVAL;
886	}
887
888	if (ump->um_quotas[qk->qk_idtype] == NULLVP)
889		return ENODEV;
890	if (qk->qk_id == QUOTA_DEFAULTID) {
891		mutex_enter(&dqlock);
892		error = getq2h(ump, qk->qk_idtype, &bp, &q2h, 0);
893		if (error) {
894			mutex_exit(&dqlock);
895			return error;
896		}
897		quota2_ufs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
898		mutex_exit(&dqlock);
899		brelse(bp, 0);
900		q2e_to_quotaval(&q2e, qk->qk_id == QUOTA_DEFAULTID, &id2,
901				qk->qk_objtype, qv);
902		(void)id2;
903	} else
904		error = quota2_fetch_quotaval(ump, qk, qv);
905
906	return error;
907}
908
909/*
910 * Cursor structure we used.
911 *
912 * This will get stored in userland between calls so we must not assume
913 * it isn't arbitrarily corrupted.
914 */
915struct ufsq2_cursor {
916	uint32_t q2c_magic;	/* magic number */
917	int q2c_hashsize;	/* size of hash table at last go */
918
919	int q2c_users_done;	/* true if we've returned all user data */
920	int q2c_groups_done;	/* true if we've returned all group data */
921	int q2c_defaults_done;	/* true if we've returned the default values */
922	int q2c_hashpos;	/* slot to start at in hash table */
923	int q2c_uidpos;		/* number of ids we've handled */
924	int q2c_blocks_done;	/* true if we've returned the blocks value */
925};
926
927/*
928 * State of a single cursorget call, or at least the part of it that
929 * needs to be passed around.
930 */
931struct q2cursor_state {
932	/* data return pointers */
933	struct quotakey *keys;
934	struct quotaval *vals;
935
936	/* key/value counters */
937	unsigned maxkeyvals;
938	unsigned numkeys;	/* number of keys assigned */
939
940	/* ID to key/value conversion state */
941	int skipfirst;		/* if true skip first key/value */
942	int skiplast;		/* if true skip last key/value */
943
944	/* ID counters */
945	unsigned maxids;	/* maximum number of IDs to handle */
946	unsigned numids;	/* number of IDs handled */
947};
948
949/*
950 * Additional structure for getids callback.
951 */
952struct q2cursor_getids {
953	struct q2cursor_state *state;
954	int idtype;
955	unsigned skip;		/* number of ids to skip over */
956	unsigned new_skip;	/* number of ids to skip over next time */
957	unsigned skipped;	/* number skipped so far */
958	int stopped;		/* true if we stopped quota_walk_list early */
959};
960
961/*
962 * Cursor-related functions
963 */
964
965/* magic number */
966#define Q2C_MAGIC (0xbeebe111)
967
968/* extract cursor from caller form */
969#define Q2CURSOR(qkc) ((struct ufsq2_cursor *)&qkc->u.qkc_space[0])
970
971/*
972 * Check that a cursor we're handed is something like valid. If
973 * someone munges it and it still passes these checks, they'll get
974 * partial or odd results back but won't break anything.
975 */
976static int
977q2cursor_check(struct ufsq2_cursor *cursor)
978{
979	if (cursor->q2c_magic != Q2C_MAGIC) {
980		return EINVAL;
981	}
982	if (cursor->q2c_hashsize < 0) {
983		return EINVAL;
984	}
985
986	if (cursor->q2c_users_done != 0 && cursor->q2c_users_done != 1) {
987		return EINVAL;
988	}
989	if (cursor->q2c_groups_done != 0 && cursor->q2c_groups_done != 1) {
990		return EINVAL;
991	}
992	if (cursor->q2c_defaults_done != 0 && cursor->q2c_defaults_done != 1) {
993		return EINVAL;
994	}
995	if (cursor->q2c_hashpos < 0 || cursor->q2c_uidpos < 0) {
996		return EINVAL;
997	}
998	if (cursor->q2c_blocks_done != 0 && cursor->q2c_blocks_done != 1) {
999		return EINVAL;
1000	}
1001	return 0;
1002}
1003
1004/*
1005 * Set up the q2cursor state.
1006 */
1007static void
1008q2cursor_initstate(struct q2cursor_state *state, struct quotakey *keys,
1009    struct quotaval *vals, unsigned maxkeyvals, int blocks_done)
1010{
1011	state->keys = keys;
1012	state->vals = vals;
1013
1014	state->maxkeyvals = maxkeyvals;
1015	state->numkeys = 0;
1016
1017	/*
1018	 * For each ID there are two quotavals to return. If the
1019	 * maximum number of entries to return is odd, we might want
1020	 * to skip the first quotaval of the first ID, or the last
1021	 * quotaval of the last ID, but not both. So the number of IDs
1022	 * we want is (up to) half the number of return slots we have,
1023	 * rounded up.
1024	 */
1025
1026	state->maxids = (state->maxkeyvals + 1) / 2;
1027	state->numids = 0;
1028	if (state->maxkeyvals % 2) {
1029		if (blocks_done) {
1030			state->skipfirst = 1;
1031			state->skiplast = 0;
1032		} else {
1033			state->skipfirst = 0;
1034			state->skiplast = 1;
1035		}
1036	} else {
1037		state->skipfirst = 0;
1038		state->skiplast = 0;
1039	}
1040}
1041
1042/*
1043 * Choose which idtype we're going to work on. If doing a full
1044 * iteration, we do users first, then groups, but either might be
1045 * disabled or marked to skip via cursorsetidtype(), so don't make
1046 * silly assumptions.
1047 */
1048static int
1049q2cursor_pickidtype(struct ufsq2_cursor *cursor, int *idtype_ret)
1050{
1051	if (cursor->q2c_users_done == 0) {
1052		*idtype_ret = QUOTA_IDTYPE_USER;
1053	} else if (cursor->q2c_groups_done == 0) {
1054		*idtype_ret = QUOTA_IDTYPE_GROUP;
1055	} else {
1056		return EAGAIN;
1057	}
1058	return 0;
1059}
1060
1061/*
1062 * Add an ID to the current state. Sets up either one or two keys to
1063 * refer to it, depending on whether it's first/last and the setting
1064 * of skipfirst. (skiplast does not need to be explicitly tested)
1065 */
1066static void
1067q2cursor_addid(struct q2cursor_state *state, int idtype, id_t id)
1068{
1069	KASSERT(state->numids < state->maxids);
1070	KASSERT(state->numkeys < state->maxkeyvals);
1071
1072	if (!state->skipfirst || state->numkeys > 0) {
1073		state->keys[state->numkeys].qk_idtype = idtype;
1074		state->keys[state->numkeys].qk_id = id;
1075		state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_BLOCKS;
1076		state->numkeys++;
1077	}
1078	if (state->numkeys < state->maxkeyvals) {
1079		state->keys[state->numkeys].qk_idtype = idtype;
1080		state->keys[state->numkeys].qk_id = id;
1081		state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_FILES;
1082		state->numkeys++;
1083	} else {
1084		KASSERT(state->skiplast);
1085	}
1086	state->numids++;
1087}
1088
1089/*
1090 * Callback function for getting IDs. Update counting and call addid.
1091 */
1092static int
1093q2cursor_getids_callback(struct ufsmount *ump, uint64_t *offp,
1094    struct quota2_entry *q2ep, uint64_t off, void *v)
1095{
1096	struct q2cursor_getids *gi = v;
1097	id_t id;
1098#ifdef FFS_EI
1099	const int needswap = UFS_MPNEEDSWAP(ump);
1100#endif
1101
1102	if (gi->skipped < gi->skip) {
1103		gi->skipped++;
1104		return 0;
1105	}
1106	id = ufs_rw32(q2ep->q2e_uid, needswap);
1107	q2cursor_addid(gi->state, gi->idtype, id);
1108	gi->new_skip++;
1109	if (gi->state->numids >= gi->state->maxids) {
1110		/* got enough ids, stop now */
1111		gi->stopped = 1;
1112		return Q2WL_ABORT;
1113	}
1114	return 0;
1115}
1116
1117/*
1118 * Fill in a batch of quotakeys by scanning one or more hash chains.
1119 */
1120static int
1121q2cursor_getkeys(struct ufsmount *ump, int idtype, struct ufsq2_cursor *cursor,
1122    struct q2cursor_state *state,
1123    int *hashsize_ret, struct quota2_entry *default_q2e_ret)
1124{
1125	const int needswap = UFS_MPNEEDSWAP(ump);
1126	struct buf *hbp;
1127	struct quota2_header *q2h;
1128	int quota2_hash_size;
1129	struct q2cursor_getids gi;
1130	uint64_t offset;
1131	int error;
1132
1133	/*
1134	 * Read the header block.
1135	 */
1136
1137	mutex_enter(&dqlock);
1138	error = getq2h(ump, idtype, &hbp, &q2h, 0);
1139	if (error) {
1140		mutex_exit(&dqlock);
1141		return error;
1142	}
1143
1144	/* if the table size has changed, make the caller start over */
1145	quota2_hash_size = ufs_rw16(q2h->q2h_hash_size, needswap);
1146	if (cursor->q2c_hashsize == 0) {
1147		cursor->q2c_hashsize = quota2_hash_size;
1148	} else if (cursor->q2c_hashsize != quota2_hash_size) {
1149		error = EDEADLK;
1150		goto scanfail;
1151	}
1152
1153	/* grab the entry with the default values out of the header */
1154	quota2_ufs_rwq2e(&q2h->q2h_defentry, default_q2e_ret, needswap);
1155
1156	/* If we haven't done the defaults yet, that goes first. */
1157	if (cursor->q2c_defaults_done == 0) {
1158		q2cursor_addid(state, idtype, QUOTA_DEFAULTID);
1159		/* if we read both halves, mark it done */
1160		if (state->numids < state->maxids || !state->skiplast) {
1161			cursor->q2c_defaults_done = 1;
1162		}
1163	}
1164
1165	gi.state = state;
1166	gi.idtype = idtype;
1167
1168	while (state->numids < state->maxids) {
1169		if (cursor->q2c_hashpos >= quota2_hash_size) {
1170			/* nothing more left */
1171			break;
1172		}
1173
1174		/* scan this hash chain */
1175		gi.skip = cursor->q2c_uidpos;
1176		gi.new_skip = gi.skip;
1177		gi.skipped = 0;
1178		gi.stopped = 0;
1179		offset = q2h->q2h_entries[cursor->q2c_hashpos];
1180
1181		error = quota2_walk_list(ump, hbp, idtype, &offset, 0, &gi,
1182		    q2cursor_getids_callback);
1183		KASSERT(error != Q2WL_ABORT);
1184		if (error) {
1185			break;
1186		}
1187		if (gi.stopped) {
1188			/* callback stopped before reading whole chain */
1189			cursor->q2c_uidpos = gi.new_skip;
1190			/* if we didn't get both halves, back up */
1191			if (state->numids == state->maxids && state->skiplast){
1192				KASSERT(cursor->q2c_uidpos > 0);
1193				cursor->q2c_uidpos--;
1194			}
1195		} else {
1196			/* read whole chain */
1197			/* if we got both halves of the last id, advance */
1198			if (state->numids < state->maxids || !state->skiplast){
1199				cursor->q2c_uidpos = 0;
1200				cursor->q2c_hashpos++;
1201			}
1202		}
1203	}
1204
1205scanfail:
1206	mutex_exit(&dqlock);
1207	brelse(hbp, 0);
1208	if (error)
1209		return error;
1210
1211	*hashsize_ret = quota2_hash_size;
1212	return 0;
1213}
1214
1215/*
1216 * Fetch the quotavals for the quotakeys.
1217 */
1218static int
1219q2cursor_getvals(struct ufsmount *ump, struct q2cursor_state *state,
1220    const struct quota2_entry *default_q2e)
1221{
1222	int hasid;
1223	id_t loadedid, id;
1224	unsigned pos;
1225	struct quota2_entry q2e;
1226	int objtype;
1227	int error;
1228
1229	hasid = 0;
1230	loadedid = 0;
1231	for (pos = 0; pos < state->numkeys; pos++) {
1232		id = state->keys[pos].qk_id;
1233		if (!hasid || id != loadedid) {
1234			hasid = 1;
1235			loadedid = id;
1236			if (id == QUOTA_DEFAULTID) {
1237				q2e = *default_q2e;
1238			} else {
1239				error = quota2_fetch_q2e(ump,
1240							 &state->keys[pos],
1241							 &q2e);
1242				if (error == ENOENT) {
1243					/* something changed - start over */
1244					error = EDEADLK;
1245				}
1246				if (error) {
1247					return error;
1248				}
1249 			}
1250		}
1251
1252
1253		objtype = state->keys[pos].qk_objtype;
1254		KASSERT(objtype >= 0 && objtype < N_QL);
1255		q2val_to_quotaval(&q2e.q2e_val[objtype], &state->vals[pos]);
1256	}
1257
1258	return 0;
1259}
1260
1261/*
1262 * Handle cursorget.
1263 *
1264 * We can't just read keys and values directly, because we can't walk
1265 * the list with qdlock and grab dq_interlock to read the entries at
1266 * the same time. So we're going to do two passes: one to figure out
1267 * which IDs we want and fill in the keys, and then a second to use
1268 * the keys to fetch the values.
1269 */
1270int
1271quota2_handle_cmd_cursorget(struct ufsmount *ump, struct quotakcursor *qkc,
1272    struct quotakey *keys, struct quotaval *vals, unsigned maxreturn,
1273    unsigned *ret)
1274{
1275	int error;
1276	struct ufsq2_cursor *cursor;
1277	struct ufsq2_cursor newcursor;
1278	struct q2cursor_state state;
1279	struct quota2_entry default_q2e;
1280	int idtype;
1281	int quota2_hash_size;
1282
1283	/*
1284	 * Convert and validate the cursor.
1285	 */
1286	cursor = Q2CURSOR(qkc);
1287	error = q2cursor_check(cursor);
1288	if (error) {
1289		return error;
1290	}
1291
1292	/*
1293	 * Make sure our on-disk codes match the values of the
1294	 * FS-independent ones. This avoids the need for explicit
1295	 * conversion (which would be a NOP anyway and thus easily
1296	 * left out or called in the wrong places...)
1297	 */
1298	CTASSERT(QUOTA_IDTYPE_USER == USRQUOTA);
1299	CTASSERT(QUOTA_IDTYPE_GROUP == GRPQUOTA);
1300	CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
1301	CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
1302
1303	/*
1304	 * If some of the idtypes aren't configured/enabled, arrange
1305	 * to skip over them.
1306	 */
1307	if (cursor->q2c_users_done == 0 &&
1308	    ump->um_quotas[USRQUOTA] == NULLVP) {
1309		cursor->q2c_users_done = 1;
1310	}
1311	if (cursor->q2c_groups_done == 0 &&
1312	    ump->um_quotas[GRPQUOTA] == NULLVP) {
1313		cursor->q2c_groups_done = 1;
1314	}
1315
1316	/* Loop over, potentially, both idtypes */
1317	while (1) {
1318
1319		/* Choose id type */
1320		error = q2cursor_pickidtype(cursor, &idtype);
1321		if (error == EAGAIN) {
1322			/* nothing more to do, return 0 */
1323			*ret = 0;
1324			return 0;
1325		}
1326		KASSERT(ump->um_quotas[idtype] != NULLVP);
1327
1328		/*
1329		 * Initialize the per-call iteration state. Copy the
1330		 * cursor state so we can update it in place but back
1331		 * out on error.
1332		 */
1333		q2cursor_initstate(&state, keys, vals, maxreturn,
1334				   cursor->q2c_blocks_done);
1335		newcursor = *cursor;
1336
1337		/* Assign keys */
1338		error = q2cursor_getkeys(ump, idtype, &newcursor, &state,
1339					 &quota2_hash_size, &default_q2e);
1340		if (error) {
1341			return error;
1342		}
1343
1344		/* Now fill in the values. */
1345		error = q2cursor_getvals(ump, &state, &default_q2e);
1346		if (error) {
1347			return error;
1348		}
1349
1350		/*
1351		 * Now that we aren't going to fail and lose what we
1352		 * did so far, we can update the cursor state.
1353		 */
1354
1355		if (newcursor.q2c_hashpos >= quota2_hash_size) {
1356			if (idtype == QUOTA_IDTYPE_USER)
1357				cursor->q2c_users_done = 1;
1358			else
1359				cursor->q2c_groups_done = 1;
1360
1361			/* start over on another id type */
1362			cursor->q2c_hashsize = 0;
1363			cursor->q2c_defaults_done = 0;
1364			cursor->q2c_hashpos = 0;
1365			cursor->q2c_uidpos = 0;
1366			cursor->q2c_blocks_done = 0;
1367		} else {
1368			*cursor = newcursor;
1369			cursor->q2c_blocks_done = state.skiplast;
1370		}
1371
1372		/*
1373		 * If we have something to return, return it.
1374		 * Otherwise, continue to the other idtype, if any,
1375		 * and only return zero at end of iteration.
1376		 */
1377		if (state.numkeys > 0) {
1378			break;
1379		}
1380	}
1381
1382	*ret = state.numkeys;
1383	return 0;
1384}
1385
1386int
1387quota2_handle_cmd_cursoropen(struct ufsmount *ump, struct quotakcursor *qkc)
1388{
1389	struct ufsq2_cursor *cursor;
1390
1391	CTASSERT(sizeof(*cursor) <= sizeof(qkc->u.qkc_space));
1392	cursor = Q2CURSOR(qkc);
1393
1394	cursor->q2c_magic = Q2C_MAGIC;
1395	cursor->q2c_hashsize = 0;
1396
1397	cursor->q2c_users_done = 0;
1398	cursor->q2c_groups_done = 0;
1399	cursor->q2c_defaults_done = 0;
1400	cursor->q2c_hashpos = 0;
1401	cursor->q2c_uidpos = 0;
1402	cursor->q2c_blocks_done = 0;
1403	return 0;
1404}
1405
1406int
1407quota2_handle_cmd_cursorclose(struct ufsmount *ump, struct quotakcursor *qkc)
1408{
1409	struct ufsq2_cursor *cursor;
1410	int error;
1411
1412	cursor = Q2CURSOR(qkc);
1413	error = q2cursor_check(cursor);
1414	if (error) {
1415		return error;
1416	}
1417
1418	/* nothing to do */
1419
1420	return 0;
1421}
1422
1423int
1424quota2_handle_cmd_cursorskipidtype(struct ufsmount *ump,
1425    struct quotakcursor *qkc, int idtype)
1426{
1427	struct ufsq2_cursor *cursor;
1428	int error;
1429
1430	cursor = Q2CURSOR(qkc);
1431	error = q2cursor_check(cursor);
1432	if (error) {
1433		return error;
1434	}
1435
1436	switch (idtype) {
1437	    case QUOTA_IDTYPE_USER:
1438		cursor->q2c_users_done = 1;
1439		break;
1440	    case QUOTA_IDTYPE_GROUP:
1441		cursor->q2c_groups_done = 1;
1442		break;
1443	    default:
1444		return EINVAL;
1445	}
1446
1447	return 0;
1448}
1449
1450int
1451quota2_handle_cmd_cursoratend(struct ufsmount *ump, struct quotakcursor *qkc,
1452    int *ret)
1453{
1454	struct ufsq2_cursor *cursor;
1455	int error;
1456
1457	cursor = Q2CURSOR(qkc);
1458	error = q2cursor_check(cursor);
1459	if (error) {
1460		return error;
1461	}
1462
1463	*ret = (cursor->q2c_users_done && cursor->q2c_groups_done);
1464	return 0;
1465}
1466
1467int
1468quota2_handle_cmd_cursorrewind(struct ufsmount *ump, struct quotakcursor *qkc)
1469{
1470	struct ufsq2_cursor *cursor;
1471	int error;
1472
1473	cursor = Q2CURSOR(qkc);
1474	error = q2cursor_check(cursor);
1475	if (error) {
1476		return error;
1477	}
1478
1479	cursor->q2c_hashsize = 0;
1480
1481	cursor->q2c_users_done = 0;
1482	cursor->q2c_groups_done = 0;
1483	cursor->q2c_defaults_done = 0;
1484	cursor->q2c_hashpos = 0;
1485	cursor->q2c_uidpos = 0;
1486	cursor->q2c_blocks_done = 0;
1487
1488	return 0;
1489}
1490
1491int
1492q2sync(struct mount *mp)
1493{
1494	return 0;
1495}
1496
1497struct dq2get_callback {
1498	uid_t id;
1499	struct dquot *dq;
1500};
1501
1502static int
1503dq2get_callback(struct ufsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
1504    uint64_t off, void *v)
1505{
1506	struct dq2get_callback *c = v;
1507	daddr_t lblkno;
1508	int blkoff;
1509#ifdef FFS_EI
1510	const int needswap = UFS_MPNEEDSWAP(ump);
1511#endif
1512
1513	if (ufs_rw32(q2e->q2e_uid, needswap) == c->id) {
1514		KASSERT(mutex_owned(&c->dq->dq_interlock));
1515		lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
1516		blkoff = (off & ump->umq2_bmask);
1517		c->dq->dq2_lblkno = lblkno;
1518		c->dq->dq2_blkoff = blkoff;
1519		return Q2WL_ABORT;
1520	}
1521	return 0;
1522}
1523
1524int
1525dq2get(struct vnode *dqvp, u_long id, struct ufsmount *ump, int type,
1526    struct dquot *dq)
1527{
1528	struct buf *bp;
1529	struct quota2_header *q2h;
1530	int error;
1531	daddr_t offset;
1532	u_long hash_mask;
1533	struct dq2get_callback c = {
1534		.id = id,
1535		.dq = dq
1536	};
1537
1538	KASSERT(mutex_owned(&dq->dq_interlock));
1539	mutex_enter(&dqlock);
1540	error = getq2h(ump, type, &bp, &q2h, 0);
1541	if (error)
1542		goto out_mutex;
1543	/* look for our entry */
1544	hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
1545	offset = q2h->q2h_entries[id & hash_mask];
1546	error = quota2_walk_list(ump, bp, type, &offset, 0, (void *)&c,
1547	    dq2get_callback);
1548	brelse(bp, 0);
1549out_mutex:
1550	mutex_exit(&dqlock);
1551	return error;
1552}
1553
1554int
1555dq2sync(struct vnode *vp, struct dquot *dq)
1556{
1557	return 0;
1558}
1559