libzfs_core.c revision 331393
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
24 * Copyright (c) 2013 Steven Hartland. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
26 * Copyright 2017 RackTop Systems.
27 */
28
29/*
30 * LibZFS_Core (lzc) is intended to replace most functionality in libzfs.
31 * It has the following characteristics:
32 *
33 *  - Thread Safe.  libzfs_core is accessible concurrently from multiple
34 *  threads.  This is accomplished primarily by avoiding global data
35 *  (e.g. caching).  Since it's thread-safe, there is no reason for a
36 *  process to have multiple libzfs "instances".  Therefore, we store
37 *  our few pieces of data (e.g. the file descriptor) in global
38 *  variables.  The fd is reference-counted so that the libzfs_core
39 *  library can be "initialized" multiple times (e.g. by different
40 *  consumers within the same process).
41 *
42 *  - Committed Interface.  The libzfs_core interface will be committed,
43 *  therefore consumers can compile against it and be confident that
44 *  their code will continue to work on future releases of this code.
45 *  Currently, the interface is Evolving (not Committed), but we intend
46 *  to commit to it once it is more complete and we determine that it
47 *  meets the needs of all consumers.
48 *
49 *  - Programatic Error Handling.  libzfs_core communicates errors with
50 *  defined error numbers, and doesn't print anything to stdout/stderr.
51 *
52 *  - Thin Layer.  libzfs_core is a thin layer, marshaling arguments
53 *  to/from the kernel ioctls.  There is generally a 1:1 correspondence
54 *  between libzfs_core functions and ioctls to /dev/zfs.
55 *
56 *  - Clear Atomicity.  Because libzfs_core functions are generally 1:1
57 *  with kernel ioctls, and kernel ioctls are general atomic, each
58 *  libzfs_core function is atomic.  For example, creating multiple
59 *  snapshots with a single call to lzc_snapshot() is atomic -- it
60 *  can't fail with only some of the requested snapshots created, even
61 *  in the event of power loss or system crash.
62 *
63 *  - Continued libzfs Support.  Some higher-level operations (e.g.
64 *  support for "zfs send -R") are too complicated to fit the scope of
65 *  libzfs_core.  This functionality will continue to live in libzfs.
66 *  Where appropriate, libzfs will use the underlying atomic operations
67 *  of libzfs_core.  For example, libzfs may implement "zfs send -R |
68 *  zfs receive" by using individual "send one snapshot", rename,
69 *  destroy, and "receive one snapshot" operations in libzfs_core.
70 *  /sbin/zfs and /zbin/zpool will link with both libzfs and
71 *  libzfs_core.  Other consumers should aim to use only libzfs_core,
72 *  since that will be the supported, stable interface going forwards.
73 */
74
75#define _IN_LIBZFS_CORE_
76
77#include <libzfs_core.h>
78#include <ctype.h>
79#include <unistd.h>
80#include <stdlib.h>
81#include <string.h>
82#include <errno.h>
83#include <fcntl.h>
84#include <pthread.h>
85#include <sys/nvpair.h>
86#include <sys/param.h>
87#include <sys/types.h>
88#include <sys/stat.h>
89#include <sys/zfs_ioctl.h>
90#include "libzfs_core_compat.h"
91#include "libzfs_compat.h"
92
93#ifdef __FreeBSD__
94extern int zfs_ioctl_version;
95#endif
96
97static int g_fd = -1;
98static pthread_mutex_t g_lock = PTHREAD_MUTEX_INITIALIZER;
99static int g_refcount;
100
101int
102libzfs_core_init(void)
103{
104	(void) pthread_mutex_lock(&g_lock);
105	if (g_refcount == 0) {
106		g_fd = open("/dev/zfs", O_RDWR);
107		if (g_fd < 0) {
108			(void) pthread_mutex_unlock(&g_lock);
109			return (errno);
110		}
111	}
112	g_refcount++;
113	(void) pthread_mutex_unlock(&g_lock);
114
115	return (0);
116}
117
118void
119libzfs_core_fini(void)
120{
121	(void) pthread_mutex_lock(&g_lock);
122	ASSERT3S(g_refcount, >, 0);
123
124	if (g_refcount > 0)
125		g_refcount--;
126
127	if (g_refcount == 0 && g_fd != -1) {
128		(void) close(g_fd);
129		g_fd = -1;
130	}
131	(void) pthread_mutex_unlock(&g_lock);
132}
133
134static int
135lzc_ioctl(zfs_ioc_t ioc, const char *name,
136    nvlist_t *source, nvlist_t **resultp)
137{
138	zfs_cmd_t zc = { 0 };
139	int error = 0;
140	char *packed;
141#ifdef __FreeBSD__
142	nvlist_t *oldsource;
143#endif
144	size_t size;
145
146	ASSERT3S(g_refcount, >, 0);
147	VERIFY3S(g_fd, !=, -1);
148
149	(void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name));
150
151#ifdef __FreeBSD__
152	if (zfs_ioctl_version == ZFS_IOCVER_UNDEF)
153		zfs_ioctl_version = get_zfs_ioctl_version();
154
155	if (zfs_ioctl_version < ZFS_IOCVER_LZC) {
156		oldsource = source;
157		error = lzc_compat_pre(&zc, &ioc, &source);
158		if (error)
159			return (error);
160	}
161#endif
162
163	packed = fnvlist_pack(source, &size);
164	zc.zc_nvlist_src = (uint64_t)(uintptr_t)packed;
165	zc.zc_nvlist_src_size = size;
166
167	if (resultp != NULL) {
168		*resultp = NULL;
169		if (ioc == ZFS_IOC_CHANNEL_PROGRAM) {
170			zc.zc_nvlist_dst_size = fnvlist_lookup_uint64(source,
171			    ZCP_ARG_MEMLIMIT);
172		} else {
173			zc.zc_nvlist_dst_size = MAX(size * 2, 128 * 1024);
174		}
175		zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
176		    malloc(zc.zc_nvlist_dst_size);
177#ifdef illumos
178		if (zc.zc_nvlist_dst == NULL) {
179#else
180		if (zc.zc_nvlist_dst == 0) {
181#endif
182			error = ENOMEM;
183			goto out;
184		}
185	}
186
187	while (ioctl(g_fd, ioc, &zc) != 0) {
188		/*
189		 * If ioctl exited with ENOMEM, we retry the ioctl after
190		 * increasing the size of the destination nvlist.
191		 *
192		 * Channel programs that exit with ENOMEM ran over the
193		 * lua memory sandbox; they should not be retried.
194		 */
195		if (errno == ENOMEM && resultp != NULL &&
196		    ioc != ZFS_IOC_CHANNEL_PROGRAM) {
197			free((void *)(uintptr_t)zc.zc_nvlist_dst);
198			zc.zc_nvlist_dst_size *= 2;
199			zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
200			    malloc(zc.zc_nvlist_dst_size);
201#ifdef illumos
202			if (zc.zc_nvlist_dst == NULL) {
203#else
204			if (zc.zc_nvlist_dst == 0) {
205#endif
206				error = ENOMEM;
207				goto out;
208			}
209		} else {
210			error = errno;
211			break;
212		}
213	}
214
215#ifdef __FreeBSD__
216	if (zfs_ioctl_version < ZFS_IOCVER_LZC)
217		lzc_compat_post(&zc, ioc);
218#endif
219	if (zc.zc_nvlist_dst_filled) {
220		*resultp = fnvlist_unpack((void *)(uintptr_t)zc.zc_nvlist_dst,
221		    zc.zc_nvlist_dst_size);
222	}
223#ifdef __FreeBSD__
224	if (zfs_ioctl_version < ZFS_IOCVER_LZC)
225		lzc_compat_outnvl(&zc, ioc, resultp);
226#endif
227out:
228#ifdef __FreeBSD__
229	if (zfs_ioctl_version < ZFS_IOCVER_LZC) {
230		if (source != oldsource)
231			nvlist_free(source);
232		source = oldsource;
233	}
234#endif
235	fnvlist_pack_free(packed, size);
236	free((void *)(uintptr_t)zc.zc_nvlist_dst);
237	return (error);
238}
239
240int
241lzc_create(const char *fsname, enum lzc_dataset_type type, nvlist_t *props)
242{
243	int error;
244	nvlist_t *args = fnvlist_alloc();
245	fnvlist_add_int32(args, "type", (dmu_objset_type_t)type);
246	if (props != NULL)
247		fnvlist_add_nvlist(args, "props", props);
248	error = lzc_ioctl(ZFS_IOC_CREATE, fsname, args, NULL);
249	nvlist_free(args);
250	return (error);
251}
252
253int
254lzc_clone(const char *fsname, const char *origin,
255    nvlist_t *props)
256{
257	int error;
258	nvlist_t *args = fnvlist_alloc();
259	fnvlist_add_string(args, "origin", origin);
260	if (props != NULL)
261		fnvlist_add_nvlist(args, "props", props);
262	error = lzc_ioctl(ZFS_IOC_CLONE, fsname, args, NULL);
263	nvlist_free(args);
264	return (error);
265}
266
267int
268lzc_promote(const char *fsname, char *snapnamebuf, int snapnamelen)
269{
270	/*
271	 * The promote ioctl is still legacy, so we need to construct our
272	 * own zfs_cmd_t rather than using lzc_ioctl().
273	 */
274	zfs_cmd_t zc = { 0 };
275
276	ASSERT3S(g_refcount, >, 0);
277	VERIFY3S(g_fd, !=, -1);
278
279	(void) strlcpy(zc.zc_name, fsname, sizeof (zc.zc_name));
280	if (ioctl(g_fd, ZFS_IOC_PROMOTE, &zc) != 0) {
281		int error = errno;
282		if (error == EEXIST && snapnamebuf != NULL)
283			(void) strlcpy(snapnamebuf, zc.zc_string, snapnamelen);
284		return (error);
285	}
286	return (0);
287}
288
289/*
290 * Creates snapshots.
291 *
292 * The keys in the snaps nvlist are the snapshots to be created.
293 * They must all be in the same pool.
294 *
295 * The props nvlist is properties to set.  Currently only user properties
296 * are supported.  { user:prop_name -> string value }
297 *
298 * The returned results nvlist will have an entry for each snapshot that failed.
299 * The value will be the (int32) error code.
300 *
301 * The return value will be 0 if all snapshots were created, otherwise it will
302 * be the errno of a (unspecified) snapshot that failed.
303 */
304int
305lzc_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t **errlist)
306{
307	nvpair_t *elem;
308	nvlist_t *args;
309	int error;
310	char pool[ZFS_MAX_DATASET_NAME_LEN];
311
312	*errlist = NULL;
313
314	/* determine the pool name */
315	elem = nvlist_next_nvpair(snaps, NULL);
316	if (elem == NULL)
317		return (0);
318	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
319	pool[strcspn(pool, "/@")] = '\0';
320
321	args = fnvlist_alloc();
322	fnvlist_add_nvlist(args, "snaps", snaps);
323	if (props != NULL)
324		fnvlist_add_nvlist(args, "props", props);
325
326	error = lzc_ioctl(ZFS_IOC_SNAPSHOT, pool, args, errlist);
327	nvlist_free(args);
328
329	return (error);
330}
331
332/*
333 * Destroys snapshots.
334 *
335 * The keys in the snaps nvlist are the snapshots to be destroyed.
336 * They must all be in the same pool.
337 *
338 * Snapshots that do not exist will be silently ignored.
339 *
340 * If 'defer' is not set, and a snapshot has user holds or clones, the
341 * destroy operation will fail and none of the snapshots will be
342 * destroyed.
343 *
344 * If 'defer' is set, and a snapshot has user holds or clones, it will be
345 * marked for deferred destruction, and will be destroyed when the last hold
346 * or clone is removed/destroyed.
347 *
348 * The return value will be 0 if all snapshots were destroyed (or marked for
349 * later destruction if 'defer' is set) or didn't exist to begin with.
350 *
351 * Otherwise the return value will be the errno of a (unspecified) snapshot
352 * that failed, no snapshots will be destroyed, and the errlist will have an
353 * entry for each snapshot that failed.  The value in the errlist will be
354 * the (int32) error code.
355 */
356int
357lzc_destroy_snaps(nvlist_t *snaps, boolean_t defer, nvlist_t **errlist)
358{
359	nvpair_t *elem;
360	nvlist_t *args;
361	int error;
362	char pool[ZFS_MAX_DATASET_NAME_LEN];
363
364	/* determine the pool name */
365	elem = nvlist_next_nvpair(snaps, NULL);
366	if (elem == NULL)
367		return (0);
368	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
369	pool[strcspn(pool, "/@")] = '\0';
370
371	args = fnvlist_alloc();
372	fnvlist_add_nvlist(args, "snaps", snaps);
373	if (defer)
374		fnvlist_add_boolean(args, "defer");
375
376	error = lzc_ioctl(ZFS_IOC_DESTROY_SNAPS, pool, args, errlist);
377	nvlist_free(args);
378
379	return (error);
380}
381
382int
383lzc_snaprange_space(const char *firstsnap, const char *lastsnap,
384    uint64_t *usedp)
385{
386	nvlist_t *args;
387	nvlist_t *result;
388	int err;
389	char fs[ZFS_MAX_DATASET_NAME_LEN];
390	char *atp;
391
392	/* determine the fs name */
393	(void) strlcpy(fs, firstsnap, sizeof (fs));
394	atp = strchr(fs, '@');
395	if (atp == NULL)
396		return (EINVAL);
397	*atp = '\0';
398
399	args = fnvlist_alloc();
400	fnvlist_add_string(args, "firstsnap", firstsnap);
401
402	err = lzc_ioctl(ZFS_IOC_SPACE_SNAPS, lastsnap, args, &result);
403	nvlist_free(args);
404	if (err == 0)
405		*usedp = fnvlist_lookup_uint64(result, "used");
406	fnvlist_free(result);
407
408	return (err);
409}
410
411boolean_t
412lzc_exists(const char *dataset)
413{
414	/*
415	 * The objset_stats ioctl is still legacy, so we need to construct our
416	 * own zfs_cmd_t rather than using lzc_ioctl().
417	 */
418	zfs_cmd_t zc = { 0 };
419
420	ASSERT3S(g_refcount, >, 0);
421	VERIFY3S(g_fd, !=, -1);
422
423	(void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
424	return (ioctl(g_fd, ZFS_IOC_OBJSET_STATS, &zc) == 0);
425}
426
427/*
428 * Create "user holds" on snapshots.  If there is a hold on a snapshot,
429 * the snapshot can not be destroyed.  (However, it can be marked for deletion
430 * by lzc_destroy_snaps(defer=B_TRUE).)
431 *
432 * The keys in the nvlist are snapshot names.
433 * The snapshots must all be in the same pool.
434 * The value is the name of the hold (string type).
435 *
436 * If cleanup_fd is not -1, it must be the result of open("/dev/zfs", O_EXCL).
437 * In this case, when the cleanup_fd is closed (including on process
438 * termination), the holds will be released.  If the system is shut down
439 * uncleanly, the holds will be released when the pool is next opened
440 * or imported.
441 *
442 * Holds for snapshots which don't exist will be skipped and have an entry
443 * added to errlist, but will not cause an overall failure.
444 *
445 * The return value will be 0 if all holds, for snapshots that existed,
446 * were succesfully created.
447 *
448 * Otherwise the return value will be the errno of a (unspecified) hold that
449 * failed and no holds will be created.
450 *
451 * In all cases the errlist will have an entry for each hold that failed
452 * (name = snapshot), with its value being the error code (int32).
453 */
454int
455lzc_hold(nvlist_t *holds, int cleanup_fd, nvlist_t **errlist)
456{
457	char pool[ZFS_MAX_DATASET_NAME_LEN];
458	nvlist_t *args;
459	nvpair_t *elem;
460	int error;
461
462	/* determine the pool name */
463	elem = nvlist_next_nvpair(holds, NULL);
464	if (elem == NULL)
465		return (0);
466	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
467	pool[strcspn(pool, "/@")] = '\0';
468
469	args = fnvlist_alloc();
470	fnvlist_add_nvlist(args, "holds", holds);
471	if (cleanup_fd != -1)
472		fnvlist_add_int32(args, "cleanup_fd", cleanup_fd);
473
474	error = lzc_ioctl(ZFS_IOC_HOLD, pool, args, errlist);
475	nvlist_free(args);
476	return (error);
477}
478
479/*
480 * Release "user holds" on snapshots.  If the snapshot has been marked for
481 * deferred destroy (by lzc_destroy_snaps(defer=B_TRUE)), it does not have
482 * any clones, and all the user holds are removed, then the snapshot will be
483 * destroyed.
484 *
485 * The keys in the nvlist are snapshot names.
486 * The snapshots must all be in the same pool.
487 * The value is a nvlist whose keys are the holds to remove.
488 *
489 * Holds which failed to release because they didn't exist will have an entry
490 * added to errlist, but will not cause an overall failure.
491 *
492 * The return value will be 0 if the nvl holds was empty or all holds that
493 * existed, were successfully removed.
494 *
495 * Otherwise the return value will be the errno of a (unspecified) hold that
496 * failed to release and no holds will be released.
497 *
498 * In all cases the errlist will have an entry for each hold that failed to
499 * to release.
500 */
501int
502lzc_release(nvlist_t *holds, nvlist_t **errlist)
503{
504	char pool[ZFS_MAX_DATASET_NAME_LEN];
505	nvpair_t *elem;
506
507	/* determine the pool name */
508	elem = nvlist_next_nvpair(holds, NULL);
509	if (elem == NULL)
510		return (0);
511	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
512	pool[strcspn(pool, "/@")] = '\0';
513
514	return (lzc_ioctl(ZFS_IOC_RELEASE, pool, holds, errlist));
515}
516
517/*
518 * Retrieve list of user holds on the specified snapshot.
519 *
520 * On success, *holdsp will be set to a nvlist which the caller must free.
521 * The keys are the names of the holds, and the value is the creation time
522 * of the hold (uint64) in seconds since the epoch.
523 */
524int
525lzc_get_holds(const char *snapname, nvlist_t **holdsp)
526{
527	int error;
528	nvlist_t *innvl = fnvlist_alloc();
529	error = lzc_ioctl(ZFS_IOC_GET_HOLDS, snapname, innvl, holdsp);
530	fnvlist_free(innvl);
531	return (error);
532}
533
534/*
535 * Generate a zfs send stream for the specified snapshot and write it to
536 * the specified file descriptor.
537 *
538 * "snapname" is the full name of the snapshot to send (e.g. "pool/fs@snap")
539 *
540 * If "from" is NULL, a full (non-incremental) stream will be sent.
541 * If "from" is non-NULL, it must be the full name of a snapshot or
542 * bookmark to send an incremental from (e.g. "pool/fs@earlier_snap" or
543 * "pool/fs#earlier_bmark").  If non-NULL, the specified snapshot or
544 * bookmark must represent an earlier point in the history of "snapname").
545 * It can be an earlier snapshot in the same filesystem or zvol as "snapname",
546 * or it can be the origin of "snapname"'s filesystem, or an earlier
547 * snapshot in the origin, etc.
548 *
549 * "fd" is the file descriptor to write the send stream to.
550 *
551 * If "flags" contains LZC_SEND_FLAG_LARGE_BLOCK, the stream is permitted
552 * to contain DRR_WRITE records with drr_length > 128K, and DRR_OBJECT
553 * records with drr_blksz > 128K.
554 *
555 * If "flags" contains LZC_SEND_FLAG_EMBED_DATA, the stream is permitted
556 * to contain DRR_WRITE_EMBEDDED records with drr_etype==BP_EMBEDDED_TYPE_DATA,
557 * which the receiving system must support (as indicated by support
558 * for the "embedded_data" feature).
559 */
560int
561lzc_send(const char *snapname, const char *from, int fd,
562    enum lzc_send_flags flags)
563{
564	return (lzc_send_resume(snapname, from, fd, flags, 0, 0));
565}
566
567int
568lzc_send_resume(const char *snapname, const char *from, int fd,
569    enum lzc_send_flags flags, uint64_t resumeobj, uint64_t resumeoff)
570{
571	nvlist_t *args;
572	int err;
573
574	args = fnvlist_alloc();
575	fnvlist_add_int32(args, "fd", fd);
576	if (from != NULL)
577		fnvlist_add_string(args, "fromsnap", from);
578	if (flags & LZC_SEND_FLAG_LARGE_BLOCK)
579		fnvlist_add_boolean(args, "largeblockok");
580	if (flags & LZC_SEND_FLAG_EMBED_DATA)
581		fnvlist_add_boolean(args, "embedok");
582	if (flags & LZC_SEND_FLAG_COMPRESS)
583		fnvlist_add_boolean(args, "compressok");
584	if (resumeobj != 0 || resumeoff != 0) {
585		fnvlist_add_uint64(args, "resume_object", resumeobj);
586		fnvlist_add_uint64(args, "resume_offset", resumeoff);
587	}
588	err = lzc_ioctl(ZFS_IOC_SEND_NEW, snapname, args, NULL);
589	nvlist_free(args);
590	return (err);
591}
592
593/*
594 * "from" can be NULL, a snapshot, or a bookmark.
595 *
596 * If from is NULL, a full (non-incremental) stream will be estimated.  This
597 * is calculated very efficiently.
598 *
599 * If from is a snapshot, lzc_send_space uses the deadlists attached to
600 * each snapshot to efficiently estimate the stream size.
601 *
602 * If from is a bookmark, the indirect blocks in the destination snapshot
603 * are traversed, looking for blocks with a birth time since the creation TXG of
604 * the snapshot this bookmark was created from.  This will result in
605 * significantly more I/O and be less efficient than a send space estimation on
606 * an equivalent snapshot.
607 */
608int
609lzc_send_space(const char *snapname, const char *from,
610    enum lzc_send_flags flags, uint64_t *spacep)
611{
612	nvlist_t *args;
613	nvlist_t *result;
614	int err;
615
616	args = fnvlist_alloc();
617	if (from != NULL)
618		fnvlist_add_string(args, "from", from);
619	if (flags & LZC_SEND_FLAG_LARGE_BLOCK)
620		fnvlist_add_boolean(args, "largeblockok");
621	if (flags & LZC_SEND_FLAG_EMBED_DATA)
622		fnvlist_add_boolean(args, "embedok");
623	if (flags & LZC_SEND_FLAG_COMPRESS)
624		fnvlist_add_boolean(args, "compressok");
625	err = lzc_ioctl(ZFS_IOC_SEND_SPACE, snapname, args, &result);
626	nvlist_free(args);
627	if (err == 0)
628		*spacep = fnvlist_lookup_uint64(result, "space");
629	nvlist_free(result);
630	return (err);
631}
632
633static int
634recv_read(int fd, void *buf, int ilen)
635{
636	char *cp = buf;
637	int rv;
638	int len = ilen;
639
640	do {
641		rv = read(fd, cp, len);
642		cp += rv;
643		len -= rv;
644	} while (rv > 0);
645
646	if (rv < 0 || len != 0)
647		return (EIO);
648
649	return (0);
650}
651
652static int
653recv_impl(const char *snapname, nvlist_t *props, const char *origin,
654    boolean_t force, boolean_t resumable, int fd,
655    const dmu_replay_record_t *begin_record)
656{
657	/*
658	 * The receive ioctl is still legacy, so we need to construct our own
659	 * zfs_cmd_t rather than using zfsc_ioctl().
660	 */
661	zfs_cmd_t zc = { 0 };
662	char *atp;
663	char *packed = NULL;
664	size_t size;
665	int error;
666
667	ASSERT3S(g_refcount, >, 0);
668	VERIFY3S(g_fd, !=, -1);
669
670	/* zc_name is name of containing filesystem */
671	(void) strlcpy(zc.zc_name, snapname, sizeof (zc.zc_name));
672	atp = strchr(zc.zc_name, '@');
673	if (atp == NULL)
674		return (EINVAL);
675	*atp = '\0';
676
677	/* if the fs does not exist, try its parent. */
678	if (!lzc_exists(zc.zc_name)) {
679		char *slashp = strrchr(zc.zc_name, '/');
680		if (slashp == NULL)
681			return (ENOENT);
682		*slashp = '\0';
683
684	}
685
686	/* zc_value is full name of the snapshot to create */
687	(void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value));
688
689	if (props != NULL) {
690		/* zc_nvlist_src is props to set */
691		packed = fnvlist_pack(props, &size);
692		zc.zc_nvlist_src = (uint64_t)(uintptr_t)packed;
693		zc.zc_nvlist_src_size = size;
694	}
695
696	/* zc_string is name of clone origin (if DRR_FLAG_CLONE) */
697	if (origin != NULL)
698		(void) strlcpy(zc.zc_string, origin, sizeof (zc.zc_string));
699
700	/* zc_begin_record is non-byteswapped BEGIN record */
701	if (begin_record == NULL) {
702		error = recv_read(fd, &zc.zc_begin_record,
703		    sizeof (zc.zc_begin_record));
704		if (error != 0)
705			goto out;
706	} else {
707		zc.zc_begin_record = *begin_record;
708	}
709
710	/* zc_cookie is fd to read from */
711	zc.zc_cookie = fd;
712
713	/* zc guid is force flag */
714	zc.zc_guid = force;
715
716	zc.zc_resumable = resumable;
717
718	/* zc_cleanup_fd is unused */
719	zc.zc_cleanup_fd = -1;
720
721	error = ioctl(g_fd, ZFS_IOC_RECV, &zc);
722	if (error != 0)
723		error = errno;
724
725out:
726	if (packed != NULL)
727		fnvlist_pack_free(packed, size);
728	free((void*)(uintptr_t)zc.zc_nvlist_dst);
729	return (error);
730}
731
732/*
733 * The simplest receive case: receive from the specified fd, creating the
734 * specified snapshot.  Apply the specified properties as "received" properties
735 * (which can be overridden by locally-set properties).  If the stream is a
736 * clone, its origin snapshot must be specified by 'origin'.  The 'force'
737 * flag will cause the target filesystem to be rolled back or destroyed if
738 * necessary to receive.
739 *
740 * Return 0 on success or an errno on failure.
741 *
742 * Note: this interface does not work on dedup'd streams
743 * (those with DMU_BACKUP_FEATURE_DEDUP).
744 */
745int
746lzc_receive(const char *snapname, nvlist_t *props, const char *origin,
747    boolean_t force, int fd)
748{
749	return (recv_impl(snapname, props, origin, force, B_FALSE, fd, NULL));
750}
751
752/*
753 * Like lzc_receive, but if the receive fails due to premature stream
754 * termination, the intermediate state will be preserved on disk.  In this
755 * case, ECKSUM will be returned.  The receive may subsequently be resumed
756 * with a resuming send stream generated by lzc_send_resume().
757 */
758int
759lzc_receive_resumable(const char *snapname, nvlist_t *props, const char *origin,
760    boolean_t force, int fd)
761{
762	return (recv_impl(snapname, props, origin, force, B_TRUE, fd, NULL));
763}
764
765/*
766 * Like lzc_receive, but allows the caller to read the begin record and then to
767 * pass it in.  That could be useful if the caller wants to derive, for example,
768 * the snapname or the origin parameters based on the information contained in
769 * the begin record.
770 * The begin record must be in its original form as read from the stream,
771 * in other words, it should not be byteswapped.
772 *
773 * The 'resumable' parameter allows to obtain the same behavior as with
774 * lzc_receive_resumable.
775 */
776int
777lzc_receive_with_header(const char *snapname, nvlist_t *props,
778    const char *origin, boolean_t force, boolean_t resumable, int fd,
779    const dmu_replay_record_t *begin_record)
780{
781	if (begin_record == NULL)
782		return (EINVAL);
783	return (recv_impl(snapname, props, origin, force, resumable, fd,
784	    begin_record));
785}
786
787/*
788 * Roll back this filesystem or volume to its most recent snapshot.
789 * If snapnamebuf is not NULL, it will be filled in with the name
790 * of the most recent snapshot.
791 * Note that the latest snapshot may change if a new one is concurrently
792 * created or the current one is destroyed.  lzc_rollback_to can be used
793 * to roll back to a specific latest snapshot.
794 *
795 * Return 0 on success or an errno on failure.
796 */
797int
798lzc_rollback(const char *fsname, char *snapnamebuf, int snapnamelen)
799{
800	nvlist_t *args;
801	nvlist_t *result;
802	int err;
803
804	args = fnvlist_alloc();
805	err = lzc_ioctl(ZFS_IOC_ROLLBACK, fsname, args, &result);
806	nvlist_free(args);
807	if (err == 0 && snapnamebuf != NULL) {
808		const char *snapname = fnvlist_lookup_string(result, "target");
809		(void) strlcpy(snapnamebuf, snapname, snapnamelen);
810	}
811	nvlist_free(result);
812
813	return (err);
814}
815
816/*
817 * Roll back this filesystem or volume to the specified snapshot,
818 * if possible.
819 *
820 * Return 0 on success or an errno on failure.
821 */
822int
823lzc_rollback_to(const char *fsname, const char *snapname)
824{
825	nvlist_t *args;
826	nvlist_t *result;
827	int err;
828
829	args = fnvlist_alloc();
830	fnvlist_add_string(args, "target", snapname);
831	err = lzc_ioctl(ZFS_IOC_ROLLBACK, fsname, args, &result);
832	nvlist_free(args);
833	nvlist_free(result);
834	return (err);
835}
836
837/*
838 * Creates bookmarks.
839 *
840 * The bookmarks nvlist maps from name of the bookmark (e.g. "pool/fs#bmark") to
841 * the name of the snapshot (e.g. "pool/fs@snap").  All the bookmarks and
842 * snapshots must be in the same pool.
843 *
844 * The returned results nvlist will have an entry for each bookmark that failed.
845 * The value will be the (int32) error code.
846 *
847 * The return value will be 0 if all bookmarks were created, otherwise it will
848 * be the errno of a (undetermined) bookmarks that failed.
849 */
850int
851lzc_bookmark(nvlist_t *bookmarks, nvlist_t **errlist)
852{
853	nvpair_t *elem;
854	int error;
855	char pool[ZFS_MAX_DATASET_NAME_LEN];
856
857	/* determine the pool name */
858	elem = nvlist_next_nvpair(bookmarks, NULL);
859	if (elem == NULL)
860		return (0);
861	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
862	pool[strcspn(pool, "/#")] = '\0';
863
864	error = lzc_ioctl(ZFS_IOC_BOOKMARK, pool, bookmarks, errlist);
865
866	return (error);
867}
868
869/*
870 * Retrieve bookmarks.
871 *
872 * Retrieve the list of bookmarks for the given file system. The props
873 * parameter is an nvlist of property names (with no values) that will be
874 * returned for each bookmark.
875 *
876 * The following are valid properties on bookmarks, all of which are numbers
877 * (represented as uint64 in the nvlist)
878 *
879 * "guid" - globally unique identifier of the snapshot it refers to
880 * "createtxg" - txg when the snapshot it refers to was created
881 * "creation" - timestamp when the snapshot it refers to was created
882 *
883 * The format of the returned nvlist as follows:
884 * <short name of bookmark> -> {
885 *     <name of property> -> {
886 *         "value" -> uint64
887 *     }
888 *  }
889 */
890int
891lzc_get_bookmarks(const char *fsname, nvlist_t *props, nvlist_t **bmarks)
892{
893	return (lzc_ioctl(ZFS_IOC_GET_BOOKMARKS, fsname, props, bmarks));
894}
895
896/*
897 * Destroys bookmarks.
898 *
899 * The keys in the bmarks nvlist are the bookmarks to be destroyed.
900 * They must all be in the same pool.  Bookmarks are specified as
901 * <fs>#<bmark>.
902 *
903 * Bookmarks that do not exist will be silently ignored.
904 *
905 * The return value will be 0 if all bookmarks that existed were destroyed.
906 *
907 * Otherwise the return value will be the errno of a (undetermined) bookmark
908 * that failed, no bookmarks will be destroyed, and the errlist will have an
909 * entry for each bookmarks that failed.  The value in the errlist will be
910 * the (int32) error code.
911 */
912int
913lzc_destroy_bookmarks(nvlist_t *bmarks, nvlist_t **errlist)
914{
915	nvpair_t *elem;
916	int error;
917	char pool[ZFS_MAX_DATASET_NAME_LEN];
918
919	/* determine the pool name */
920	elem = nvlist_next_nvpair(bmarks, NULL);
921	if (elem == NULL)
922		return (0);
923	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
924	pool[strcspn(pool, "/#")] = '\0';
925
926	error = lzc_ioctl(ZFS_IOC_DESTROY_BOOKMARKS, pool, bmarks, errlist);
927
928	return (error);
929}
930
931static int
932lzc_channel_program_impl(const char *pool, const char *program, boolean_t sync,
933    uint64_t instrlimit, uint64_t memlimit, nvlist_t *argnvl, nvlist_t **outnvl)
934{
935	int error;
936	nvlist_t *args;
937
938	args = fnvlist_alloc();
939	fnvlist_add_string(args, ZCP_ARG_PROGRAM, program);
940	fnvlist_add_nvlist(args, ZCP_ARG_ARGLIST, argnvl);
941	fnvlist_add_boolean_value(args, ZCP_ARG_SYNC, sync);
942	fnvlist_add_uint64(args, ZCP_ARG_INSTRLIMIT, instrlimit);
943	fnvlist_add_uint64(args, ZCP_ARG_MEMLIMIT, memlimit);
944	error = lzc_ioctl(ZFS_IOC_CHANNEL_PROGRAM, pool, args, outnvl);
945	fnvlist_free(args);
946
947	return (error);
948}
949
950/*
951 * Executes a channel program.
952 *
953 * If this function returns 0 the channel program was successfully loaded and
954 * ran without failing. Note that individual commands the channel program ran
955 * may have failed and the channel program is responsible for reporting such
956 * errors through outnvl if they are important.
957 *
958 * This method may also return:
959 *
960 * EINVAL   The program contains syntax errors, or an invalid memory or time
961 *          limit was given. No part of the channel program was executed.
962 *          If caused by syntax errors, 'outnvl' contains information about the
963 *          errors.
964 *
965 * EDOM     The program was executed, but encountered a runtime error, such as
966 *          calling a function with incorrect arguments, invoking the error()
967 *          function directly, failing an assert() command, etc. Some portion
968 *          of the channel program may have executed and committed changes.
969 *          Information about the failure can be found in 'outnvl'.
970 *
971 * ENOMEM   The program fully executed, but the output buffer was not large
972 *          enough to store the returned value. No output is returned through
973 *          'outnvl'.
974 *
975 * ENOSPC   The program was terminated because it exceeded its memory usage
976 *          limit. Some portion of the channel program may have executed and
977 *          committed changes to disk. No output is returned through 'outnvl'.
978 *
979 * ETIMEDOUT The program was terminated because it exceeded its Lua instruction
980 *           limit. Some portion of the channel program may have executed and
981 *           committed changes to disk. No output is returned through 'outnvl'.
982 */
983int
984lzc_channel_program(const char *pool, const char *program, uint64_t instrlimit,
985    uint64_t memlimit, nvlist_t *argnvl, nvlist_t **outnvl)
986{
987	return (lzc_channel_program_impl(pool, program, B_TRUE, instrlimit,
988	    memlimit, argnvl, outnvl));
989}
990
991/*
992 * Executes a read-only channel program.
993 *
994 * A read-only channel program works programmatically the same way as a
995 * normal channel program executed with lzc_channel_program(). The only
996 * difference is it runs exclusively in open-context and therefore can
997 * return faster. The downside to that, is that the program cannot change
998 * on-disk state by calling functions from the zfs.sync submodule.
999 *
1000 * The return values of this function (and their meaning) are exactly the
1001 * same as the ones described in lzc_channel_program().
1002 */
1003int
1004lzc_channel_program_nosync(const char *pool, const char *program,
1005    uint64_t timeout, uint64_t memlimit, nvlist_t *argnvl, nvlist_t **outnvl)
1006{
1007	return (lzc_channel_program_impl(pool, program, B_FALSE, timeout,
1008	    memlimit, argnvl, outnvl));
1009}
1010