1#-
2# SPDX-License-Identifier: BSD-2-Clause
3#
4# Copyright (c) 2022-2023 The FreeBSD Foundation
5#
6# This software was developed by Mark Johnston under sponsorship from
7# the FreeBSD Foundation.
8#
9# Redistribution and use in source and binary forms, with or without
10# modification, are permitted provided that the following conditions are
11# met:
12# 1. Redistributions of source code must retain the above copyright
13#    notice, this list of conditions and the following disclaimer.
14# 2. Redistributions in binary form must reproduce the above copyright
15#    notice, this list of conditions and the following disclaimer in
16#    the documentation and/or other materials provided with the distribution.
17#
18# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28# SUCH DAMAGE.
29#
30
31MAKEFS="makefs -t zfs -o nowarn=true"
32ZFS_POOL_NAME="makefstest$$"
33TEST_ZFS_POOL_NAME="$TMPDIR/poolname"
34
35. "$(dirname "$0")/makefs_tests_common.sh"
36
37common_cleanup()
38{
39	local pool md
40
41	# Try to force a TXG, this can help catch bugs by triggering a panic.
42	sync
43
44	pool=$(cat $TEST_ZFS_POOL_NAME)
45	if zpool list "$pool" >/dev/null; then
46		zpool destroy "$pool"
47	fi
48
49	md=$(cat $TEST_MD_DEVICE_FILE)
50	if [ -c /dev/"$md" ]; then
51		mdconfig -d -u "$md"
52	fi
53}
54
55import_image()
56{
57	atf_check -e empty -o save:$TEST_MD_DEVICE_FILE -s exit:0 \
58	    mdconfig -a -f $TEST_IMAGE
59	atf_check -o ignore -e empty -s exit:0 \
60	    zdb -e -p /dev/$(cat $TEST_MD_DEVICE_FILE) -mmm -ddddd $ZFS_POOL_NAME
61	atf_check zpool import -R $TEST_MOUNT_DIR $ZFS_POOL_NAME
62	echo "$ZFS_POOL_NAME" > $TEST_ZFS_POOL_NAME
63}
64
65#
66# Test autoexpansion of the vdev.
67#
68# The pool is initially 10GB, so we get 10GB minus one metaslab's worth of
69# usable space for data.  Then the pool is expanded to 50GB, and the amount of
70# usable space is 50GB minus one metaslab.
71#
72atf_test_case autoexpand cleanup
73autoexpand_body()
74{
75	local mssize poolsize poolsize1 newpoolsize
76
77	create_test_inputs
78
79	mssize=$((128 * 1024 * 1024))
80	poolsize=$((10 * 1024 * 1024 * 1024))
81	atf_check $MAKEFS -s $poolsize -o mssize=$mssize -o rootpath=/ \
82	    -o poolname=$ZFS_POOL_NAME \
83	    $TEST_IMAGE $TEST_INPUTS_DIR
84
85	newpoolsize=$((50 * 1024 * 1024 * 1024))
86	truncate -s $newpoolsize $TEST_IMAGE
87
88	import_image
89
90	check_image_contents
91
92	poolsize1=$(zpool list -Hp -o size $ZFS_POOL_NAME)
93	atf_check [ $((poolsize1 + $mssize)) -eq $poolsize ]
94
95	atf_check zpool online -e $ZFS_POOL_NAME /dev/$(cat $TEST_MD_DEVICE_FILE)
96
97	check_image_contents
98
99	poolsize1=$(zpool list -Hp -o size $ZFS_POOL_NAME)
100	atf_check [ $((poolsize1 + $mssize)) -eq $newpoolsize ]
101}
102autoexpand_cleanup()
103{
104	common_cleanup
105}
106
107#
108# Test with some default layout defined by the common code.
109#
110atf_test_case basic cleanup
111basic_body()
112{
113	create_test_inputs
114
115	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
116	    $TEST_IMAGE $TEST_INPUTS_DIR
117
118	import_image
119
120	check_image_contents
121}
122basic_cleanup()
123{
124	common_cleanup
125}
126
127atf_test_case dataset_removal cleanup
128dataset_removal_body()
129{
130	create_test_dirs
131
132	cd $TEST_INPUTS_DIR
133	mkdir dir
134	cd -
135
136	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
137	    -o fs=${ZFS_POOL_NAME}/dir \
138	    $TEST_IMAGE $TEST_INPUTS_DIR
139
140	import_image
141
142	check_image_contents
143
144	atf_check zfs destroy ${ZFS_POOL_NAME}/dir
145}
146dataset_removal_cleanup()
147{
148	common_cleanup
149}
150
151#
152# Make sure that we can create and remove an empty directory.
153#
154atf_test_case empty_dir cleanup
155empty_dir_body()
156{
157	create_test_dirs
158
159	cd $TEST_INPUTS_DIR
160	mkdir dir
161	cd -
162
163	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
164	    $TEST_IMAGE $TEST_INPUTS_DIR
165
166	import_image
167
168	check_image_contents
169
170	atf_check rmdir ${TEST_MOUNT_DIR}/dir
171}
172empty_dir_cleanup()
173{
174	common_cleanup
175}
176
177atf_test_case empty_fs cleanup
178empty_fs_body()
179{
180	create_test_dirs
181
182	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
183	    $TEST_IMAGE $TEST_INPUTS_DIR
184
185	import_image
186
187	check_image_contents
188}
189empty_fs_cleanup()
190{
191	common_cleanup
192}
193
194atf_test_case file_extend cleanup
195file_extend_body()
196{
197	local i start
198
199	create_test_dirs
200
201	# Create a file slightly longer than the maximum block size.
202	start=132
203	dd if=/dev/random of=${TEST_INPUTS_DIR}/foo bs=1k count=$start
204	md5 -q ${TEST_INPUTS_DIR}/foo > foo.md5
205
206	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
207	    $TEST_IMAGE $TEST_INPUTS_DIR
208
209	import_image
210
211	check_image_contents
212
213	i=0
214	while [ $i -lt 1000 ]; do
215		dd if=/dev/random of=${TEST_MOUNT_DIR}/foo bs=1k count=1 \
216		    seek=$(($i + $start)) conv=notrunc
217		# Make sure that the first $start blocks are unmodified.
218		dd if=${TEST_MOUNT_DIR}/foo bs=1k count=$start of=foo.copy
219		atf_check -o file:foo.md5 md5 -q foo.copy
220		i=$(($i + 1))
221	done
222}
223file_extend_cleanup()
224{
225	common_cleanup
226}
227
228atf_test_case file_sizes cleanup
229file_sizes_body()
230{
231	local i
232
233	create_test_dirs
234	cd $TEST_INPUTS_DIR
235
236	i=1
237	while [ $i -lt $((1 << 20)) ]; do
238		truncate -s $i ${i}.1
239		truncate -s $(($i - 1)) ${i}.2
240		truncate -s $(($i + 1)) ${i}.3
241		i=$(($i << 1))
242	done
243
244	cd -
245
246	# XXXMJ this creates sparse files, make sure makefs doesn't
247	#       preserve the sparseness.
248	# XXXMJ need to test with larger files (at least 128MB for L2 indirs)
249	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
250	    $TEST_IMAGE $TEST_INPUTS_DIR
251
252	import_image
253
254	check_image_contents
255}
256file_sizes_cleanup()
257{
258	common_cleanup
259}
260
261atf_test_case hard_links cleanup
262hard_links_body()
263{
264	local f
265
266	create_test_dirs
267	cd $TEST_INPUTS_DIR
268
269	mkdir dir
270	echo "hello" > 1
271	ln 1 2
272	ln 1 dir/1
273
274	echo "goodbye" > dir/a
275	ln dir/a dir/b
276	ln dir/a a
277
278	cd -
279
280	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
281	    $TEST_IMAGE $TEST_INPUTS_DIR
282
283	import_image
284
285	check_image_contents
286
287	stat -f '%i' ${TEST_MOUNT_DIR}/1 > ./ino
288	stat -f '%l' ${TEST_MOUNT_DIR}/1 > ./nlink
289	for f in 1 2 dir/1; do
290		atf_check -o file:./nlink -e empty -s exit:0 \
291		    stat -f '%l' ${TEST_MOUNT_DIR}/${f}
292		atf_check -o file:./ino -e empty -s exit:0 \
293		    stat -f '%i' ${TEST_MOUNT_DIR}/${f}
294		atf_check cmp -s ${TEST_INPUTS_DIR}/1 ${TEST_MOUNT_DIR}/${f}
295	done
296
297	stat -f '%i' ${TEST_MOUNT_DIR}/dir/a > ./ino
298	stat -f '%l' ${TEST_MOUNT_DIR}/dir/a > ./nlink
299	for f in dir/a dir/b a; do
300		atf_check -o file:./nlink -e empty -s exit:0 \
301		    stat -f '%l' ${TEST_MOUNT_DIR}/${f}
302		atf_check -o file:./ino -e empty -s exit:0 \
303		    stat -f '%i' ${TEST_MOUNT_DIR}/${f}
304		atf_check cmp -s ${TEST_INPUTS_DIR}/dir/a ${TEST_MOUNT_DIR}/${f}
305	done
306}
307hard_links_cleanup()
308{
309	common_cleanup
310}
311
312# Allocate enough dnodes from an object set that the meta dnode needs to use
313# indirect blocks.
314atf_test_case indirect_dnode_array cleanup
315indirect_dnode_array_body()
316{
317	local count i
318
319	# How many dnodes do we need to allocate?  Well, the data block size
320	# for meta dnodes is always 16KB, so with a dnode size of 512B we get
321	# 32 dnodes per direct block.  The maximum indirect block size is 128KB
322	# and that can fit 1024 block pointers, so we need at least 32 * 1024
323	# files to force the use of two levels of indirection.
324	#
325	# Unfortunately that number of files makes the test run quite slowly,
326	# so we settle for a single indirect block for now...
327	count=$(jot -r 1 32 1024)
328
329	create_test_dirs
330	cd $TEST_INPUTS_DIR
331	for i in $(seq 1 $count); do
332		touch $i
333	done
334	cd -
335
336	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
337	    $TEST_IMAGE $TEST_INPUTS_DIR
338
339	import_image
340
341	check_image_contents
342}
343indirect_dnode_array_cleanup()
344{
345	common_cleanup
346}
347
348#
349# Create some files with long names, so as to test fat ZAP handling.
350#
351atf_test_case long_file_name cleanup
352long_file_name_body()
353{
354	local dir i
355
356	create_test_dirs
357	cd $TEST_INPUTS_DIR
358
359	# micro ZAP keys can be at most 50 bytes.
360	for i in $(seq 1 60); do
361		touch $(jot -s '' $i 1 1)
362	done
363	dir=$(jot -s '' 61 1 1)
364	mkdir $dir
365	for i in $(seq 1 60); do
366		touch ${dir}/$(jot -s '' $i 1 1)
367	done
368
369	cd -
370
371	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
372	    $TEST_IMAGE $TEST_INPUTS_DIR
373
374	import_image
375
376	check_image_contents
377
378	# Add a directory entry in the hope that OpenZFS might catch a bug
379	# in makefs' fat ZAP encoding.
380	touch ${TEST_MOUNT_DIR}/foo
381}
382long_file_name_cleanup()
383{
384	common_cleanup
385}
386
387#
388# Exercise handling of multiple datasets.
389#
390atf_test_case multi_dataset_1 cleanup
391multi_dataset_1_body()
392{
393	create_test_dirs
394	cd $TEST_INPUTS_DIR
395
396	mkdir dir1
397	echo a > dir1/a
398	mkdir dir2
399	echo b > dir2/b
400
401	cd -
402
403	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
404	    -o fs=${ZFS_POOL_NAME}/dir1 -o fs=${ZFS_POOL_NAME}/dir2 \
405	    $TEST_IMAGE $TEST_INPUTS_DIR
406
407	import_image
408
409	check_image_contents
410
411	# Make sure that we have three datasets with the expected mount points.
412	atf_check -o inline:${ZFS_POOL_NAME}\\n -e empty -s exit:0 \
413	    zfs list -H -o name ${ZFS_POOL_NAME}
414	atf_check -o inline:${TEST_MOUNT_DIR}\\n -e empty -s exit:0 \
415	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}
416
417	atf_check -o inline:${ZFS_POOL_NAME}/dir1\\n -e empty -s exit:0 \
418	    zfs list -H -o name ${ZFS_POOL_NAME}/dir1
419	atf_check -o inline:${TEST_MOUNT_DIR}/dir1\\n -e empty -s exit:0 \
420	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir1
421
422	atf_check -o inline:${ZFS_POOL_NAME}/dir2\\n -e empty -s exit:0 \
423	    zfs list -H -o name ${ZFS_POOL_NAME}/dir2
424	atf_check -o inline:${TEST_MOUNT_DIR}/dir2\\n -e empty -s exit:0 \
425	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir2
426}
427multi_dataset_1_cleanup()
428{
429	common_cleanup
430}
431
432#
433# Create a pool with two datasets, where the root dataset is mounted below
434# the child dataset.
435#
436atf_test_case multi_dataset_2 cleanup
437multi_dataset_2_body()
438{
439	create_test_dirs
440	cd $TEST_INPUTS_DIR
441
442	mkdir dir1
443	echo a > dir1/a
444	mkdir dir2
445	echo b > dir2/b
446
447	cd -
448
449	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
450	    -o fs=${ZFS_POOL_NAME}/dir1\;mountpoint=/ \
451	    -o fs=${ZFS_POOL_NAME}\;mountpoint=/dir1 \
452	    $TEST_IMAGE $TEST_INPUTS_DIR
453
454	import_image
455
456	check_image_contents
457}
458multi_dataset_2_cleanup()
459{
460	common_cleanup
461}
462
463#
464# Create a dataset with a non-existent mount point.
465#
466atf_test_case multi_dataset_3 cleanup
467multi_dataset_3_body()
468{
469	create_test_dirs
470	cd $TEST_INPUTS_DIR
471
472	mkdir dir1
473	echo a > dir1/a
474
475	cd -
476
477	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
478	    -o fs=${ZFS_POOL_NAME}/dir1 \
479	    -o fs=${ZFS_POOL_NAME}/dir2 \
480	    $TEST_IMAGE $TEST_INPUTS_DIR
481
482	import_image
483
484	atf_check -o inline:${TEST_MOUNT_DIR}/dir2\\n -e empty -s exit:0 \
485	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir2
486
487	# Mounting dir2 should have created a directory called dir2.  Go
488	# back and create it in the staging tree before comparing.
489	atf_check mkdir ${TEST_INPUTS_DIR}/dir2
490
491	check_image_contents
492}
493multi_dataset_3_cleanup()
494{
495	common_cleanup
496}
497
498#
499# Create an unmounted dataset.
500#
501atf_test_case multi_dataset_4 cleanup
502multi_dataset_4_body()
503{
504	create_test_dirs
505	cd $TEST_INPUTS_DIR
506
507	mkdir dir1
508	echo a > dir1/a
509
510	cd -
511
512	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
513	    -o fs=${ZFS_POOL_NAME}/dir1\;canmount=noauto\;mountpoint=none \
514	    $TEST_IMAGE $TEST_INPUTS_DIR
515
516	import_image
517
518	atf_check -o inline:none\\n -e empty -s exit:0 \
519	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir1
520
521	check_image_contents
522
523	atf_check zfs set mountpoint=/dir1 ${ZFS_POOL_NAME}/dir1
524	atf_check zfs mount ${ZFS_POOL_NAME}/dir1
525	atf_check -o inline:${TEST_MOUNT_DIR}/dir1\\n -e empty -s exit:0 \
526	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir1
527
528	# dir1/a should be part of the root dataset, not dir1.
529	atf_check -s not-exit:0 -e not-empty stat ${TEST_MOUNT_DIR}dir1/a
530}
531multi_dataset_4_cleanup()
532{
533	common_cleanup
534}
535
536#
537# Validate handling of multiple staging directories.
538#
539atf_test_case multi_staging_1 cleanup
540multi_staging_1_body()
541{
542	local tmpdir
543
544	create_test_dirs
545	cd $TEST_INPUTS_DIR
546
547	mkdir dir1
548	echo a > a
549	echo a > dir1/a
550	echo z > z
551
552	cd -
553
554	tmpdir=$(mktemp -d)
555	cd $tmpdir
556
557	mkdir dir2 dir2/dir3
558	echo b > dir2/b
559	echo c > dir2/dir3/c
560	ln -s dir2/dir3c s
561
562	cd -
563
564	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
565	    $TEST_IMAGE ${TEST_INPUTS_DIR} $tmpdir
566
567	import_image
568
569	check_image_contents -d $tmpdir
570}
571multi_staging_1_cleanup()
572{
573	common_cleanup
574}
575
576atf_test_case multi_staging_2 cleanup
577multi_staging_2_body()
578{
579	local tmpdir
580
581	create_test_dirs
582	cd $TEST_INPUTS_DIR
583
584	mkdir dir
585	echo a > dir/foo
586	echo b > dir/bar
587
588	cd -
589
590	tmpdir=$(mktemp -d)
591	cd $tmpdir
592
593	mkdir dir
594	echo c > dir/baz
595
596	cd -
597
598	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
599	    $TEST_IMAGE ${TEST_INPUTS_DIR} $tmpdir
600
601	import_image
602
603	# check_image_contents can't easily handle merged directories, so
604	# just check that the merged directory contains the files we expect.
605	atf_check -o not-empty stat ${TEST_MOUNT_DIR}/dir/foo
606	atf_check -o not-empty stat ${TEST_MOUNT_DIR}/dir/bar
607	atf_check -o not-empty stat ${TEST_MOUNT_DIR}/dir/baz
608
609	if [ "$(ls ${TEST_MOUNT_DIR}/dir | wc -l)" -ne 3 ]; then
610		atf_fail "Expected 3 files in ${TEST_MOUNT_DIR}/dir"
611	fi
612}
613multi_staging_2_cleanup()
614{
615	common_cleanup
616}
617
618#
619# Rudimentary test to verify that two ZFS images created using the same
620# parameters and input hierarchy are byte-identical.  In particular, makefs(1)
621# does not preserve file access times.
622#
623atf_test_case reproducible cleanup
624reproducible_body()
625{
626	create_test_inputs
627
628	atf_check $MAKEFS -s 512m -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
629	    ${TEST_IMAGE}.1 $TEST_INPUTS_DIR
630
631	atf_check $MAKEFS -s 512m -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
632	    ${TEST_IMAGE}.2 $TEST_INPUTS_DIR
633
634	# XXX-MJ cmp(1) is really slow
635	atf_check cmp ${TEST_IMAGE}.1 ${TEST_IMAGE}.2
636}
637reproducible_cleanup()
638{
639}
640
641#
642# Verify that we can take a snapshot of a generated dataset.
643#
644atf_test_case snapshot cleanup
645snapshot_body()
646{
647	create_test_dirs
648	cd $TEST_INPUTS_DIR
649
650	mkdir dir
651	echo "hello" > dir/hello
652	echo "goodbye" > goodbye
653
654	cd -
655
656	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
657	    $TEST_IMAGE $TEST_INPUTS_DIR
658
659	import_image
660
661	atf_check zfs snapshot ${ZFS_POOL_NAME}@1
662}
663snapshot_cleanup()
664{
665	common_cleanup
666}
667
668#
669# Check handling of symbolic links.
670#
671atf_test_case soft_links cleanup
672soft_links_body()
673{
674	create_test_dirs
675	cd $TEST_INPUTS_DIR
676
677	mkdir dir
678	ln -s a a
679	ln -s dir/../a a
680	ln -s dir/b b
681	echo 'c' > dir
682	ln -s dir/c c
683	# XXX-MJ overflows bonus buffer ln -s $(jot -s '' 320 1 1) 1
684
685	cd -
686
687	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
688	    $TEST_IMAGE $TEST_INPUTS_DIR
689
690	import_image
691
692	check_image_contents
693}
694soft_links_cleanup()
695{
696	common_cleanup
697}
698
699#
700# Verify that we can set properties on the root dataset.
701#
702atf_test_case root_props cleanup
703root_props_body()
704{
705	create_test_inputs
706
707	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
708	    -o fs=${ZFS_POOL_NAME}\;atime=off\;setuid=off \
709	    $TEST_IMAGE $TEST_INPUTS_DIR
710
711	import_image
712
713	check_image_contents
714
715	atf_check -o inline:off\\n -e empty -s exit:0 \
716	    zfs get -H -o value atime $ZFS_POOL_NAME
717	atf_check -o inline:local\\n -e empty -s exit:0 \
718	    zfs get -H -o source atime $ZFS_POOL_NAME
719	atf_check -o inline:off\\n -e empty -s exit:0 \
720	    zfs get -H -o value setuid $ZFS_POOL_NAME
721	atf_check -o inline:local\\n -e empty -s exit:0 \
722	    zfs get -H -o source setuid $ZFS_POOL_NAME
723}
724root_props_cleanup()
725{
726	common_cleanup
727}
728
729#
730# Verify that usedds and usedchild props are set properly.
731#
732atf_test_case used_space_props cleanup
733used_space_props_body()
734{
735	local used usedds usedchild
736	local rootmb childmb totalmb fudge
737	local status
738
739	create_test_dirs
740	cd $TEST_INPUTS_DIR
741	mkdir dir
742
743	rootmb=17
744	childmb=39
745	totalmb=$(($rootmb + $childmb))
746	fudge=$((2 * 1024 * 1024))
747
748	atf_check -e ignore dd if=/dev/random of=foo bs=1M count=$rootmb
749	atf_check -e ignore dd if=/dev/random of=dir/bar bs=1M count=$childmb
750
751	cd -
752
753	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
754	    -o fs=${ZFS_POOL_NAME}/dir \
755	    $TEST_IMAGE $TEST_INPUTS_DIR
756
757	import_image
758
759	# Make sure that each dataset's space usage is no more than 2MB larger
760	# than their files.  This number is magic and might need to change
761	# someday.
762	usedds=$(zfs list -o usedds -Hp ${ZFS_POOL_NAME})
763	atf_check test $usedds -gt $(($rootmb * 1024 * 1024)) -a \
764	    $usedds -le $(($rootmb * 1024 * 1024 + $fudge))
765	usedds=$(zfs list -o usedds -Hp ${ZFS_POOL_NAME}/dir)
766	atf_check test $usedds -gt $(($childmb * 1024 * 1024)) -a \
767	    $usedds -le $(($childmb * 1024 * 1024 + $fudge))
768
769	# Make sure that the usedchild property value makes sense: the parent's
770	# value corresponds to the size of the child, and the child has no
771	# children.
772	usedchild=$(zfs list -o usedchild -Hp ${ZFS_POOL_NAME})
773	atf_check test $usedchild -gt $(($childmb * 1024 * 1024)) -a \
774	    $usedchild -le $(($childmb * 1024 * 1024 + $fudge))
775	atf_check -o inline:'0\n' \
776	    zfs list -Hp -o usedchild ${ZFS_POOL_NAME}/dir
777
778	# Make sure that the used property value makes sense: the parent's
779	# value is the sum of the two sizes, and the child's value is the
780	# same as its usedds value, which has already been checked.
781	used=$(zfs list -o used -Hp ${ZFS_POOL_NAME})
782	atf_check test $used -gt $(($totalmb * 1024 * 1024)) -a \
783	    $used -le $(($totalmb * 1024 * 1024 + 2 * $fudge))
784	used=$(zfs list -o used -Hp ${ZFS_POOL_NAME}/dir)
785	atf_check -o inline:$used'\n' \
786	    zfs list -Hp -o usedds ${ZFS_POOL_NAME}/dir
787
788	# Both datasets do not have snapshots.
789	atf_check -o inline:'0\n' zfs list -Hp -o usedsnap ${ZFS_POOL_NAME}
790	atf_check -o inline:'0\n' zfs list -Hp -o usedsnap ${ZFS_POOL_NAME}/dir
791}
792used_space_props_cleanup()
793{
794	common_cleanup
795}
796
797# Verify that file permissions are set properly.  Make sure that non-executable
798# files can't be executed.
799atf_test_case perms cleanup
800perms_body()
801{
802	local mode
803
804	create_test_dirs
805	cd $TEST_INPUTS_DIR
806
807	for mode in $(seq 0 511); do
808		mode=$(printf "%04o\n" $mode)
809		echo 'echo a' > $mode
810		atf_check chmod $mode $mode
811	done
812
813	cd -
814
815	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
816	    $TEST_IMAGE $TEST_INPUTS_DIR
817
818	import_image
819
820	check_image_contents
821
822	for mode in $(seq 0 511); do
823		mode=$(printf "%04o\n" $mode)
824		if [ $(($mode & 0111)) -eq 0 ]; then
825			atf_check -s not-exit:0 -e match:"Permission denied" \
826			    ${TEST_INPUTS_DIR}/$mode
827		fi
828		if [ $(($mode & 0001)) -eq 0 ]; then
829			atf_check -s not-exit:0 -e match:"Permission denied" \
830			    su -m tests -c ${TEST_INPUTS_DIR}/$mode
831		fi
832	done
833
834}
835perms_cleanup()
836{
837	common_cleanup
838}
839
840atf_init_test_cases()
841{
842	atf_add_test_case autoexpand
843	atf_add_test_case basic
844	atf_add_test_case dataset_removal
845	atf_add_test_case empty_dir
846	atf_add_test_case empty_fs
847	atf_add_test_case file_extend
848	atf_add_test_case file_sizes
849	atf_add_test_case hard_links
850	atf_add_test_case indirect_dnode_array
851	atf_add_test_case long_file_name
852	atf_add_test_case multi_dataset_1
853	atf_add_test_case multi_dataset_2
854	atf_add_test_case multi_dataset_3
855	atf_add_test_case multi_dataset_4
856	atf_add_test_case multi_staging_1
857	atf_add_test_case multi_staging_2
858	atf_add_test_case reproducible
859	atf_add_test_case snapshot
860	atf_add_test_case soft_links
861	atf_add_test_case root_props
862	atf_add_test_case used_space_props
863	atf_add_test_case perms
864
865	# XXXMJ tests:
866	# - test with different ashifts (at least, 9 and 12), different image sizes
867	# - create datasets in imported pool
868}
869