2008-11-20 20:01:55 +00:00
|
|
|
/*
|
|
|
|
* CDDL HEADER START
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the terms of the
|
|
|
|
* Common Development and Distribution License (the "License").
|
|
|
|
* You may not use this file except in compliance with the License.
|
|
|
|
*
|
|
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
2022-07-11 21:16:13 +00:00
|
|
|
* or https://opensource.org/licenses/CDDL-1.0.
|
2008-11-20 20:01:55 +00:00
|
|
|
* See the License for the specific language governing permissions
|
|
|
|
* and limitations under the License.
|
|
|
|
*
|
|
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
*
|
|
|
|
* CDDL HEADER END
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2010-08-26 21:24:34 +00:00
|
|
|
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
2019-06-30 23:38:07 +00:00
|
|
|
* Copyright 2019 Joyent, Inc.
|
2020-07-13 16:19:18 +00:00
|
|
|
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
|
2012-06-06 17:01:43 +00:00
|
|
|
* Copyright (c) 2012 DEY Storage Systems, Inc. All rights reserved.
|
2012-05-29 17:50:50 +00:00
|
|
|
* Copyright (c) 2012 Pawel Jakub Dawidek <pawel@dawidek.net>.
|
2013-05-23 17:07:25 +00:00
|
|
|
* Copyright (c) 2013 Martin Matuska. All rights reserved.
|
2013-05-25 02:06:23 +00:00
|
|
|
* Copyright (c) 2013 Steven Hartland. All rights reserved.
|
2017-12-06 05:19:31 +00:00
|
|
|
* Copyright 2017 Nexenta Systems, Inc.
|
2017-02-07 22:02:27 +00:00
|
|
|
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
|
2018-06-17 10:53:29 +00:00
|
|
|
* Copyright 2017-2018 RackTop Systems.
|
2019-03-12 20:13:22 +00:00
|
|
|
* Copyright (c) 2019 Datto Inc.
|
2019-05-24 20:58:12 +00:00
|
|
|
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
|
2021-04-06 23:05:54 +00:00
|
|
|
* Copyright (c) 2021 Matt Fiddaman
|
2008-11-20 20:01:55 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <ctype.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <libintl.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <strings.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <zone.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <sys/mntent.h>
|
|
|
|
#include <sys/mount.h>
|
|
|
|
#include <pwd.h>
|
|
|
|
#include <grp.h>
|
2010-08-26 18:43:07 +00:00
|
|
|
#ifdef HAVE_IDMAP
|
2009-07-02 22:44:48 +00:00
|
|
|
#include <idmap.h>
|
|
|
|
#include <aclutils.h>
|
2009-08-18 18:43:27 +00:00
|
|
|
#include <directory.h>
|
2010-08-26 18:43:07 +00:00
|
|
|
#endif /* HAVE_IDMAP */
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
#include <sys/dnode.h>
|
2008-11-20 20:01:55 +00:00
|
|
|
#include <sys/spa.h>
|
|
|
|
#include <sys/zap.h>
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
#include <sys/dsl_crypt.h>
|
2008-11-20 20:01:55 +00:00
|
|
|
#include <libzfs.h>
|
2018-11-05 19:22:33 +00:00
|
|
|
#include <libzutil.h>
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
#include "zfs_namecheck.h"
|
|
|
|
#include "zfs_prop.h"
|
|
|
|
#include "libzfs_impl.h"
|
|
|
|
#include "zfs_deleg.h"
|
|
|
|
|
2009-07-02 22:44:48 +00:00
|
|
|
static int userquota_propname_decode(const char *propname, boolean_t zoned,
|
|
|
|
zfs_userquota_prop_t *typep, char *domain, int domainlen, uint64_t *ridp);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Given a single type (not a mask of types), return the type in a human
|
|
|
|
* readable form.
|
|
|
|
*/
|
|
|
|
const char *
|
|
|
|
zfs_type_to_name(zfs_type_t type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case ZFS_TYPE_FILESYSTEM:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "filesystem"));
|
|
|
|
case ZFS_TYPE_SNAPSHOT:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "snapshot"));
|
|
|
|
case ZFS_TYPE_VOLUME:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "volume"));
|
2017-02-07 22:02:27 +00:00
|
|
|
case ZFS_TYPE_POOL:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "pool"));
|
|
|
|
case ZFS_TYPE_BOOKMARK:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "bookmark"));
|
2010-08-26 16:52:41 +00:00
|
|
|
default:
|
2017-02-07 22:02:27 +00:00
|
|
|
assert(!"unhandled zfs_type_t");
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Validate a ZFS path. This is used even before trying to open the dataset, to
|
2009-07-02 22:44:48 +00:00
|
|
|
* provide a more meaningful error message. We call zfs_error_aux() to
|
|
|
|
* explain exactly why the name was not valid.
|
2008-11-20 20:01:55 +00:00
|
|
|
*/
|
2010-08-26 21:24:34 +00:00
|
|
|
int
|
2008-11-20 20:01:55 +00:00
|
|
|
zfs_validate_name(libzfs_handle_t *hdl, const char *path, int type,
|
|
|
|
boolean_t modifying)
|
|
|
|
{
|
|
|
|
namecheck_err_t why;
|
|
|
|
char what;
|
|
|
|
|
2019-02-25 19:20:07 +00:00
|
|
|
if (!(type & ZFS_TYPE_SNAPSHOT) && strchr(path, '@') != NULL) {
|
|
|
|
if (hdl != NULL)
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"snapshot delimiter '@' is not expected here"));
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type == ZFS_TYPE_SNAPSHOT && strchr(path, '@') == NULL) {
|
|
|
|
if (hdl != NULL)
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
2019-11-15 17:52:11 +00:00
|
|
|
"missing '@' delimiter in snapshot name"));
|
2019-02-25 19:20:07 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(type & ZFS_TYPE_BOOKMARK) && strchr(path, '#') != NULL) {
|
|
|
|
if (hdl != NULL)
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"bookmark delimiter '#' is not expected here"));
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type == ZFS_TYPE_BOOKMARK && strchr(path, '#') == NULL) {
|
|
|
|
if (hdl != NULL)
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
2019-11-15 17:52:11 +00:00
|
|
|
"missing '#' delimiter in bookmark name"));
|
2019-02-25 19:20:07 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (modifying && strchr(path, '%') != NULL) {
|
|
|
|
if (hdl != NULL)
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"invalid character %c in name"), '%');
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2017-01-26 22:42:15 +00:00
|
|
|
if (entity_namecheck(path, &why, &what) != 0) {
|
2008-11-20 20:01:55 +00:00
|
|
|
if (hdl != NULL) {
|
|
|
|
switch (why) {
|
|
|
|
case NAME_ERR_TOOLONG:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"name is too long"));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NAME_ERR_LEADING_SLASH:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"leading slash in name"));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NAME_ERR_EMPTY_COMPONENT:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
2019-02-25 19:20:07 +00:00
|
|
|
"empty component or misplaced '@'"
|
|
|
|
" or '#' delimiter in name"));
|
2008-11-20 20:01:55 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case NAME_ERR_TRAILING_SLASH:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"trailing slash in name"));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NAME_ERR_INVALCHAR:
|
|
|
|
zfs_error_aux(hdl,
|
|
|
|
dgettext(TEXT_DOMAIN, "invalid character "
|
|
|
|
"'%c' in name"), what);
|
|
|
|
break;
|
|
|
|
|
2017-01-26 22:42:15 +00:00
|
|
|
case NAME_ERR_MULTIPLE_DELIMITERS:
|
2008-11-20 20:01:55 +00:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
2017-01-26 22:42:15 +00:00
|
|
|
"multiple '@' and/or '#' delimiters in "
|
|
|
|
"name"));
|
2008-11-20 20:01:55 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case NAME_ERR_NOLETTER:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"pool doesn't begin with a letter"));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NAME_ERR_RESERVED:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"name is reserved"));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NAME_ERR_DISKLIKE:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"reserved disk name"));
|
|
|
|
break;
|
2017-02-07 22:02:27 +00:00
|
|
|
|
2019-06-13 15:56:15 +00:00
|
|
|
case NAME_ERR_SELF_REF:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"self reference, '.' is found in name"));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NAME_ERR_PARENT_REF:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"parent reference, '..' is found in name"));
|
|
|
|
break;
|
|
|
|
|
2010-08-26 16:52:41 +00:00
|
|
|
default:
|
2017-02-07 22:02:27 +00:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"(%d) not defined"), why);
|
2010-08-26 16:52:41 +00:00
|
|
|
break;
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_name_valid(const char *name, zfs_type_t type)
|
|
|
|
{
|
|
|
|
if (type == ZFS_TYPE_POOL)
|
|
|
|
return (zpool_name_valid(NULL, B_FALSE, name));
|
|
|
|
return (zfs_validate_name(NULL, name, type, B_FALSE));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function takes the raw DSL properties, and filters out the user-defined
|
|
|
|
* properties into a separate nvlist.
|
|
|
|
*/
|
|
|
|
static nvlist_t *
|
|
|
|
process_user_props(zfs_handle_t *zhp, nvlist_t *props)
|
|
|
|
{
|
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
|
|
|
nvpair_t *elem;
|
|
|
|
nvlist_t *nvl;
|
|
|
|
|
|
|
|
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) {
|
|
|
|
(void) no_memory(hdl);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
elem = NULL;
|
|
|
|
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
|
|
|
|
if (!zfs_prop_user(nvpair_name(elem)))
|
|
|
|
continue;
|
|
|
|
|
2022-03-14 22:44:56 +00:00
|
|
|
nvlist_t *propval = fnvpair_value_nvlist(elem);
|
2008-11-20 20:01:55 +00:00
|
|
|
if (nvlist_add_nvlist(nvl, nvpair_name(elem), propval) != 0) {
|
|
|
|
nvlist_free(nvl);
|
|
|
|
(void) no_memory(hdl);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (nvl);
|
|
|
|
}
|
|
|
|
|
2008-12-03 20:09:06 +00:00
|
|
|
static zpool_handle_t *
|
|
|
|
zpool_add_handle(zfs_handle_t *zhp, const char *pool_name)
|
|
|
|
{
|
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
|
|
|
zpool_handle_t *zph;
|
|
|
|
|
|
|
|
if ((zph = zpool_open_canfail(hdl, pool_name)) != NULL) {
|
|
|
|
if (hdl->libzfs_pool_handles != NULL)
|
|
|
|
zph->zpool_next = hdl->libzfs_pool_handles;
|
|
|
|
hdl->libzfs_pool_handles = zph;
|
|
|
|
}
|
|
|
|
return (zph);
|
|
|
|
}
|
|
|
|
|
|
|
|
static zpool_handle_t *
|
|
|
|
zpool_find_handle(zfs_handle_t *zhp, const char *pool_name, int len)
|
|
|
|
{
|
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
|
|
|
zpool_handle_t *zph = hdl->libzfs_pool_handles;
|
|
|
|
|
|
|
|
while ((zph != NULL) &&
|
|
|
|
(strncmp(pool_name, zpool_get_name(zph), len) != 0))
|
|
|
|
zph = zph->zpool_next;
|
|
|
|
return (zph);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns a handle to the pool that contains the provided dataset.
|
|
|
|
* If a handle to that pool already exists then that handle is returned.
|
|
|
|
* Otherwise, a new handle is created and added to the list of handles.
|
|
|
|
*/
|
|
|
|
static zpool_handle_t *
|
|
|
|
zpool_handle(zfs_handle_t *zhp)
|
|
|
|
{
|
|
|
|
char *pool_name;
|
|
|
|
int len;
|
|
|
|
zpool_handle_t *zph;
|
|
|
|
|
2013-12-11 22:33:41 +00:00
|
|
|
len = strcspn(zhp->zfs_name, "/@#") + 1;
|
2008-12-03 20:09:06 +00:00
|
|
|
pool_name = zfs_alloc(zhp->zfs_hdl, len);
|
|
|
|
(void) strlcpy(pool_name, zhp->zfs_name, len);
|
|
|
|
|
|
|
|
zph = zpool_find_handle(zhp, pool_name, len);
|
|
|
|
if (zph == NULL)
|
|
|
|
zph = zpool_add_handle(zhp, pool_name);
|
|
|
|
|
|
|
|
free(pool_name);
|
|
|
|
return (zph);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
zpool_free_handles(libzfs_handle_t *hdl)
|
|
|
|
{
|
|
|
|
zpool_handle_t *next, *zph = hdl->libzfs_pool_handles;
|
|
|
|
|
|
|
|
while (zph != NULL) {
|
|
|
|
next = zph->zpool_next;
|
|
|
|
zpool_close(zph);
|
|
|
|
zph = next;
|
|
|
|
}
|
|
|
|
hdl->libzfs_pool_handles = NULL;
|
|
|
|
}
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
/*
|
|
|
|
* Utility function to gather stats (objset and zpl) for the given object.
|
|
|
|
*/
|
|
|
|
static int
|
2009-01-15 21:59:39 +00:00
|
|
|
get_stats_ioctl(zfs_handle_t *zhp, zfs_cmd_t *zc)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
|
|
|
|
2009-01-15 21:59:39 +00:00
|
|
|
(void) strlcpy(zc->zc_name, zhp->zfs_name, sizeof (zc->zc_name));
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2019-10-24 00:29:43 +00:00
|
|
|
while (zfs_ioctl(hdl, ZFS_IOC_OBJSET_STATS, zc) != 0) {
|
2022-03-16 18:51:28 +00:00
|
|
|
if (errno == ENOMEM)
|
|
|
|
zcmd_expand_dst_nvlist(hdl, zc);
|
|
|
|
else
|
2008-11-20 20:01:55 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
2009-01-15 21:59:39 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
/*
|
|
|
|
* Utility function to get the received properties of the given object.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
get_recvd_props_ioctl(zfs_handle_t *zhp)
|
|
|
|
{
|
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
|
|
|
nvlist_t *recvdprops;
|
2013-09-04 12:00:57 +00:00
|
|
|
zfs_cmd_t zc = {"\0"};
|
2010-05-28 20:45:14 +00:00
|
|
|
int err;
|
|
|
|
|
2022-03-16 18:51:28 +00:00
|
|
|
zcmd_alloc_dst_nvlist(hdl, &zc, 0);
|
2010-05-28 20:45:14 +00:00
|
|
|
|
|
|
|
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
|
|
|
|
|
2019-10-24 00:29:43 +00:00
|
|
|
while (zfs_ioctl(hdl, ZFS_IOC_OBJSET_RECVD_PROPS, &zc) != 0) {
|
2022-03-16 18:51:28 +00:00
|
|
|
if (errno == ENOMEM)
|
|
|
|
zcmd_expand_dst_nvlist(hdl, &zc);
|
|
|
|
else {
|
2010-05-28 20:45:14 +00:00
|
|
|
zcmd_free_nvlists(&zc);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = zcmd_read_dst_nvlist(zhp->zfs_hdl, &zc, &recvdprops);
|
|
|
|
zcmd_free_nvlists(&zc);
|
|
|
|
if (err != 0)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
nvlist_free(zhp->zfs_recvd_props);
|
|
|
|
zhp->zfs_recvd_props = recvdprops;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2009-01-15 21:59:39 +00:00
|
|
|
static int
|
|
|
|
put_stats_zhdl(zfs_handle_t *zhp, zfs_cmd_t *zc)
|
|
|
|
{
|
|
|
|
nvlist_t *allprops, *userprops;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2009-01-15 21:59:39 +00:00
|
|
|
zhp->zfs_dmustats = zc->zc_objset_stats; /* structure assignment */
|
|
|
|
|
|
|
|
if (zcmd_read_dst_nvlist(zhp->zfs_hdl, zc, &allprops) != 0) {
|
2008-11-20 20:01:55 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
2009-07-02 22:44:48 +00:00
|
|
|
/*
|
|
|
|
* XXX Why do we store the user props separately, in addition to
|
|
|
|
* storing them in zfs_props?
|
|
|
|
*/
|
2008-11-20 20:01:55 +00:00
|
|
|
if ((userprops = process_user_props(zhp, allprops)) == NULL) {
|
|
|
|
nvlist_free(allprops);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
nvlist_free(zhp->zfs_props);
|
|
|
|
nvlist_free(zhp->zfs_user_props);
|
|
|
|
|
|
|
|
zhp->zfs_props = allprops;
|
|
|
|
zhp->zfs_user_props = userprops;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2009-01-15 21:59:39 +00:00
|
|
|
static int
|
|
|
|
get_stats(zfs_handle_t *zhp)
|
|
|
|
{
|
|
|
|
int rc = 0;
|
2013-09-04 12:00:57 +00:00
|
|
|
zfs_cmd_t zc = {"\0"};
|
2009-01-15 21:59:39 +00:00
|
|
|
|
2022-03-16 18:51:28 +00:00
|
|
|
zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0);
|
|
|
|
|
2009-01-15 21:59:39 +00:00
|
|
|
if (get_stats_ioctl(zhp, &zc) != 0)
|
|
|
|
rc = -1;
|
|
|
|
else if (put_stats_zhdl(zhp, &zc) != 0)
|
|
|
|
rc = -1;
|
|
|
|
zcmd_free_nvlists(&zc);
|
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
/*
|
|
|
|
* Refresh the properties currently stored in the handle.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
zfs_refresh_properties(zfs_handle_t *zhp)
|
|
|
|
{
|
|
|
|
(void) get_stats(zhp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Makes a handle from the given dataset name. Used by zfs_open() and
|
|
|
|
* zfs_iter_* to create child handles on the fly.
|
|
|
|
*/
|
2009-01-15 21:59:39 +00:00
|
|
|
static int
|
|
|
|
make_dataset_handle_common(zfs_handle_t *zhp, zfs_cmd_t *zc)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
2010-05-28 20:45:14 +00:00
|
|
|
if (put_stats_zhdl(zhp, zc) != 0)
|
2009-01-15 21:59:39 +00:00
|
|
|
return (-1);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We've managed to open the dataset and gather statistics. Determine
|
|
|
|
* the high-level type.
|
|
|
|
*/
|
2022-02-18 21:09:03 +00:00
|
|
|
if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL) {
|
2008-11-20 20:01:55 +00:00
|
|
|
zhp->zfs_head_type = ZFS_TYPE_VOLUME;
|
2022-02-18 21:09:03 +00:00
|
|
|
} else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS) {
|
2008-11-20 20:01:55 +00:00
|
|
|
zhp->zfs_head_type = ZFS_TYPE_FILESYSTEM;
|
2022-02-18 21:09:03 +00:00
|
|
|
} else if (zhp->zfs_dmustats.dds_type == DMU_OST_OTHER) {
|
|
|
|
errno = EINVAL;
|
2013-12-06 22:20:22 +00:00
|
|
|
return (-1);
|
2022-02-18 21:09:03 +00:00
|
|
|
} else if (zhp->zfs_dmustats.dds_inconsistent) {
|
|
|
|
errno = EBUSY;
|
|
|
|
return (-1);
|
|
|
|
} else {
|
2008-11-20 20:01:55 +00:00
|
|
|
abort();
|
2022-02-18 21:09:03 +00:00
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
if (zhp->zfs_dmustats.dds_is_snapshot)
|
|
|
|
zhp->zfs_type = ZFS_TYPE_SNAPSHOT;
|
|
|
|
else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL)
|
|
|
|
zhp->zfs_type = ZFS_TYPE_VOLUME;
|
|
|
|
else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS)
|
|
|
|
zhp->zfs_type = ZFS_TYPE_FILESYSTEM;
|
|
|
|
else
|
|
|
|
abort(); /* we should never see any other types */
|
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
if ((zhp->zpool_hdl = zpool_handle(zhp)) == NULL)
|
|
|
|
return (-1);
|
|
|
|
|
2009-01-15 21:59:39 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
zfs_handle_t *
|
|
|
|
make_dataset_handle(libzfs_handle_t *hdl, const char *path)
|
|
|
|
{
|
2013-09-04 12:00:57 +00:00
|
|
|
zfs_cmd_t zc = {"\0"};
|
2009-01-15 21:59:39 +00:00
|
|
|
|
2018-04-12 17:50:39 +00:00
|
|
|
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
|
2009-01-15 21:59:39 +00:00
|
|
|
|
|
|
|
if (zhp == NULL)
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
zhp->zfs_hdl = hdl;
|
|
|
|
(void) strlcpy(zhp->zfs_name, path, sizeof (zhp->zfs_name));
|
2022-03-16 18:51:28 +00:00
|
|
|
zcmd_alloc_dst_nvlist(hdl, &zc, 0);
|
|
|
|
|
2009-01-15 21:59:39 +00:00
|
|
|
if (get_stats_ioctl(zhp, &zc) == -1) {
|
|
|
|
zcmd_free_nvlists(&zc);
|
|
|
|
free(zhp);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
if (make_dataset_handle_common(zhp, &zc) == -1) {
|
|
|
|
free(zhp);
|
|
|
|
zhp = NULL;
|
|
|
|
}
|
|
|
|
zcmd_free_nvlists(&zc);
|
|
|
|
return (zhp);
|
|
|
|
}
|
|
|
|
|
2011-11-17 18:14:36 +00:00
|
|
|
zfs_handle_t *
|
2009-01-15 21:59:39 +00:00
|
|
|
make_dataset_handle_zc(libzfs_handle_t *hdl, zfs_cmd_t *zc)
|
|
|
|
{
|
2018-04-12 17:50:39 +00:00
|
|
|
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
|
2009-01-15 21:59:39 +00:00
|
|
|
|
|
|
|
if (zhp == NULL)
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
zhp->zfs_hdl = hdl;
|
|
|
|
(void) strlcpy(zhp->zfs_name, zc->zc_name, sizeof (zhp->zfs_name));
|
|
|
|
if (make_dataset_handle_common(zhp, zc) == -1) {
|
|
|
|
free(zhp);
|
|
|
|
return (NULL);
|
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
return (zhp);
|
|
|
|
}
|
|
|
|
|
2011-11-17 18:14:36 +00:00
|
|
|
zfs_handle_t *
|
2012-05-29 17:50:50 +00:00
|
|
|
make_dataset_simple_handle_zc(zfs_handle_t *pzhp, zfs_cmd_t *zc)
|
|
|
|
{
|
2018-04-12 17:50:39 +00:00
|
|
|
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
|
2012-05-29 17:50:50 +00:00
|
|
|
|
|
|
|
if (zhp == NULL)
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
zhp->zfs_hdl = pzhp->zfs_hdl;
|
|
|
|
(void) strlcpy(zhp->zfs_name, zc->zc_name, sizeof (zhp->zfs_name));
|
2021-12-16 19:56:22 +00:00
|
|
|
zhp->zfs_head_type = pzhp->zfs_type;
|
2022-01-06 19:12:53 +00:00
|
|
|
zhp->zfs_type = ZFS_TYPE_SNAPSHOT;
|
|
|
|
zhp->zpool_hdl = zpool_handle(zhp);
|
2022-12-14 01:27:54 +00:00
|
|
|
|
|
|
|
if (zc->zc_objset_stats.dds_creation_txg != 0) {
|
|
|
|
/* structure assignment */
|
|
|
|
zhp->zfs_dmustats = zc->zc_objset_stats;
|
|
|
|
} else {
|
|
|
|
if (get_stats_ioctl(zhp, zc) == -1) {
|
|
|
|
zcmd_free_nvlists(zc);
|
|
|
|
free(zhp);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
if (make_dataset_handle_common(zhp, zc) == -1) {
|
|
|
|
zcmd_free_nvlists(zc);
|
|
|
|
free(zhp);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (zhp->zfs_dmustats.dds_is_snapshot ||
|
|
|
|
strchr(zc->zc_name, '@') != NULL)
|
|
|
|
zhp->zfs_type = ZFS_TYPE_SNAPSHOT;
|
|
|
|
else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL)
|
|
|
|
zhp->zfs_type = ZFS_TYPE_VOLUME;
|
|
|
|
else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS)
|
|
|
|
zhp->zfs_type = ZFS_TYPE_FILESYSTEM;
|
2012-05-29 17:50:50 +00:00
|
|
|
|
|
|
|
return (zhp);
|
|
|
|
}
|
|
|
|
|
2011-11-17 18:14:36 +00:00
|
|
|
zfs_handle_t *
|
|
|
|
zfs_handle_dup(zfs_handle_t *zhp_orig)
|
|
|
|
{
|
2018-04-12 17:50:39 +00:00
|
|
|
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
|
2011-11-17 18:14:36 +00:00
|
|
|
|
|
|
|
if (zhp == NULL)
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
zhp->zfs_hdl = zhp_orig->zfs_hdl;
|
|
|
|
zhp->zpool_hdl = zhp_orig->zpool_hdl;
|
|
|
|
(void) strlcpy(zhp->zfs_name, zhp_orig->zfs_name,
|
|
|
|
sizeof (zhp->zfs_name));
|
|
|
|
zhp->zfs_type = zhp_orig->zfs_type;
|
|
|
|
zhp->zfs_head_type = zhp_orig->zfs_head_type;
|
|
|
|
zhp->zfs_dmustats = zhp_orig->zfs_dmustats;
|
|
|
|
if (zhp_orig->zfs_props != NULL) {
|
|
|
|
if (nvlist_dup(zhp_orig->zfs_props, &zhp->zfs_props, 0) != 0) {
|
|
|
|
(void) no_memory(zhp->zfs_hdl);
|
|
|
|
zfs_close(zhp);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (zhp_orig->zfs_user_props != NULL) {
|
|
|
|
if (nvlist_dup(zhp_orig->zfs_user_props,
|
|
|
|
&zhp->zfs_user_props, 0) != 0) {
|
|
|
|
(void) no_memory(zhp->zfs_hdl);
|
|
|
|
zfs_close(zhp);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (zhp_orig->zfs_recvd_props != NULL) {
|
|
|
|
if (nvlist_dup(zhp_orig->zfs_recvd_props,
|
|
|
|
&zhp->zfs_recvd_props, 0)) {
|
|
|
|
(void) no_memory(zhp->zfs_hdl);
|
|
|
|
zfs_close(zhp);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
zhp->zfs_mntcheck = zhp_orig->zfs_mntcheck;
|
|
|
|
if (zhp_orig->zfs_mntopts != NULL) {
|
|
|
|
zhp->zfs_mntopts = zfs_strdup(zhp_orig->zfs_hdl,
|
|
|
|
zhp_orig->zfs_mntopts);
|
|
|
|
}
|
|
|
|
zhp->zfs_props_table = zhp_orig->zfs_props_table;
|
|
|
|
return (zhp);
|
|
|
|
}
|
|
|
|
|
2013-12-11 22:33:41 +00:00
|
|
|
boolean_t
|
|
|
|
zfs_bookmark_exists(const char *path)
|
|
|
|
{
|
|
|
|
nvlist_t *bmarks;
|
|
|
|
nvlist_t *props;
|
2016-06-15 21:28:36 +00:00
|
|
|
char fsname[ZFS_MAX_DATASET_NAME_LEN];
|
2013-12-11 22:33:41 +00:00
|
|
|
char *bmark_name;
|
|
|
|
char *pound;
|
|
|
|
int err;
|
|
|
|
boolean_t rv;
|
|
|
|
|
|
|
|
(void) strlcpy(fsname, path, sizeof (fsname));
|
|
|
|
pound = strchr(fsname, '#');
|
|
|
|
if (pound == NULL)
|
|
|
|
return (B_FALSE);
|
|
|
|
|
|
|
|
*pound = '\0';
|
|
|
|
bmark_name = pound + 1;
|
|
|
|
props = fnvlist_alloc();
|
|
|
|
err = lzc_get_bookmarks(fsname, props, &bmarks);
|
|
|
|
nvlist_free(props);
|
|
|
|
if (err != 0) {
|
|
|
|
nvlist_free(bmarks);
|
|
|
|
return (B_FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
rv = nvlist_exists(bmarks, bmark_name);
|
|
|
|
nvlist_free(bmarks);
|
|
|
|
return (rv);
|
|
|
|
}
|
|
|
|
|
|
|
|
zfs_handle_t *
|
|
|
|
make_bookmark_handle(zfs_handle_t *parent, const char *path,
|
|
|
|
nvlist_t *bmark_props)
|
|
|
|
{
|
2018-04-12 17:50:39 +00:00
|
|
|
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
|
2013-12-11 22:33:41 +00:00
|
|
|
|
|
|
|
if (zhp == NULL)
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
/* Fill in the name. */
|
|
|
|
zhp->zfs_hdl = parent->zfs_hdl;
|
|
|
|
(void) strlcpy(zhp->zfs_name, path, sizeof (zhp->zfs_name));
|
|
|
|
|
|
|
|
/* Set the property lists. */
|
|
|
|
if (nvlist_dup(bmark_props, &zhp->zfs_props, 0) != 0) {
|
|
|
|
free(zhp);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the types. */
|
|
|
|
zhp->zfs_head_type = parent->zfs_head_type;
|
|
|
|
zhp->zfs_type = ZFS_TYPE_BOOKMARK;
|
|
|
|
|
|
|
|
if ((zhp->zpool_hdl = zpool_handle(zhp)) == NULL) {
|
|
|
|
nvlist_free(zhp->zfs_props);
|
|
|
|
free(zhp);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (zhp);
|
|
|
|
}
|
|
|
|
|
2017-01-26 22:42:15 +00:00
|
|
|
struct zfs_open_bookmarks_cb_data {
|
|
|
|
const char *path;
|
|
|
|
zfs_handle_t *zhp;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
zfs_open_bookmarks_cb(zfs_handle_t *zhp, void *data)
|
|
|
|
{
|
|
|
|
struct zfs_open_bookmarks_cb_data *dp = data;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Is it the one we are looking for?
|
|
|
|
*/
|
|
|
|
if (strcmp(dp->path, zfs_get_name(zhp)) == 0) {
|
|
|
|
/*
|
|
|
|
* We found it. Save it and let the caller know we are done.
|
|
|
|
*/
|
|
|
|
dp->zhp = zhp;
|
|
|
|
return (EEXIST);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Not found. Close the handle and ask for another one.
|
|
|
|
*/
|
|
|
|
zfs_close(zhp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
/*
|
2017-01-26 22:42:15 +00:00
|
|
|
* Opens the given snapshot, bookmark, filesystem, or volume. The 'types'
|
2008-11-20 20:01:55 +00:00
|
|
|
* argument is a mask of acceptable types. The function will print an
|
|
|
|
* appropriate error message and return NULL if it can't be opened.
|
|
|
|
*/
|
|
|
|
zfs_handle_t *
|
|
|
|
zfs_open(libzfs_handle_t *hdl, const char *path, int types)
|
|
|
|
{
|
|
|
|
zfs_handle_t *zhp;
|
2022-06-08 13:08:10 +00:00
|
|
|
char errbuf[ERRBUFLEN];
|
2017-01-26 22:42:15 +00:00
|
|
|
char *bookp;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN, "cannot open '%s'"), path);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Validate the name before we even try to open it.
|
|
|
|
*/
|
2017-01-26 22:42:15 +00:00
|
|
|
if (!zfs_validate_name(hdl, path, types, B_FALSE)) {
|
2008-11-20 20:01:55 +00:00
|
|
|
(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
|
2022-12-04 22:03:14 +00:00
|
|
|
errno = EINVAL;
|
2008-11-20 20:01:55 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-01-26 22:42:15 +00:00
|
|
|
* Bookmarks needs to be handled separately.
|
2008-11-20 20:01:55 +00:00
|
|
|
*/
|
2017-01-26 22:42:15 +00:00
|
|
|
bookp = strchr(path, '#');
|
|
|
|
if (bookp == NULL) {
|
|
|
|
/*
|
|
|
|
* Try to get stats for the dataset, which will tell us if it
|
|
|
|
* exists.
|
|
|
|
*/
|
|
|
|
errno = 0;
|
|
|
|
if ((zhp = make_dataset_handle(hdl, path)) == NULL) {
|
|
|
|
(void) zfs_standard_error(hdl, errno, errbuf);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
char dsname[ZFS_MAX_DATASET_NAME_LEN];
|
|
|
|
zfs_handle_t *pzhp;
|
|
|
|
struct zfs_open_bookmarks_cb_data cb_data = {path, NULL};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to cut out '#' and everything after '#'
|
|
|
|
* to get the parent dataset name only.
|
|
|
|
*/
|
|
|
|
assert(bookp - path < sizeof (dsname));
|
Cleanup: Switch to strlcpy from strncpy
Coverity found a bug in `zfs_secpolicy_create_clone()` where it is
possible for us to pass an unterminated string when `zfs_get_parent()`
returns an error. Upon inspection, it is clear that using `strlcpy()`
would have avoided this issue.
Looking at the codebase, there are a number of other uses of `strncpy()`
that are unsafe and even when it is used safely, switching to
`strlcpy()` would make the code more readable. Therefore, we switch all
instances where we use `strncpy()` to use `strlcpy()`.
Unfortunately, we do not portably have access to `strlcpy()` in
tests/zfs-tests/cmd/zfs_diff-socket.c because it does not link to
libspl. Modifying the appropriate Makefile.am to try to link to it
resulted in an error from the naming choice used in the file. Trying to
disable the check on the file did not work on FreeBSD because Clang
ignores `#undef` when a definition is provided by `-Dstrncpy(...)=...`.
We workaround that by explictly including the C file from libspl into
the test. This makes things build correctly everywhere.
We add a deprecation warning to `config/Rules.am` and suppress it on the
remaining `strncpy()` usage. `strlcpy()` is not portably avaliable in
tests/zfs-tests/cmd/zfs_diff-socket.c, so we use `snprintf()` there as a
substitute.
This patch does not tackle the related problem of `strcpy()`, which is
even less safe. Thankfully, a quick inspection found that it is used far
more correctly than strncpy() was used. A quick inspection did not find
any problems with `strcpy()` usage outside of zhack, but it should be
said that I only checked around 90% of them.
Lastly, some of the fields in kstat_t varied in size by 1 depending on
whether they were in userspace or in the kernel. The origin of this
discrepancy appears to be 04a479f7066ccdaa23a6546955303b172f4a6909 where
it was made for no apparent reason. It conflicts with the comment on
KSTAT_STRLEN, so we shrink the kernel field sizes to match the userspace
field sizes.
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Ryan Moeller <ryan@iXsystems.com>
Signed-off-by: Richard Yao <richard.yao@alumni.stonybrook.edu>
Closes #13876
2022-09-27 23:35:29 +00:00
|
|
|
(void) strlcpy(dsname, path,
|
|
|
|
MIN(sizeof (dsname), bookp - path + 1));
|
2017-01-26 22:42:15 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Create handle for the parent dataset.
|
|
|
|
*/
|
|
|
|
errno = 0;
|
|
|
|
if ((pzhp = make_dataset_handle(hdl, dsname)) == NULL) {
|
|
|
|
(void) zfs_standard_error(hdl, errno, errbuf);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Iterate bookmarks to find the right one.
|
|
|
|
*/
|
|
|
|
errno = 0;
|
2023-04-10 18:53:02 +00:00
|
|
|
if ((zfs_iter_bookmarks_v2(pzhp, 0, zfs_open_bookmarks_cb,
|
2017-01-26 22:42:15 +00:00
|
|
|
&cb_data) == 0) && (cb_data.zhp == NULL)) {
|
|
|
|
(void) zfs_error(hdl, EZFS_NOENT, errbuf);
|
|
|
|
zfs_close(pzhp);
|
2022-12-04 22:03:14 +00:00
|
|
|
errno = ENOENT;
|
2017-01-26 22:42:15 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
if (cb_data.zhp == NULL) {
|
|
|
|
(void) zfs_standard_error(hdl, errno, errbuf);
|
|
|
|
zfs_close(pzhp);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
zhp = cb_data.zhp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Cleanup.
|
|
|
|
*/
|
|
|
|
zfs_close(pzhp);
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!(types & zhp->zfs_type)) {
|
|
|
|
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
|
|
|
|
zfs_close(zhp);
|
2022-12-04 22:03:14 +00:00
|
|
|
errno = EINVAL;
|
2008-11-20 20:01:55 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (zhp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release a ZFS handle. Nothing to do but free the associated memory.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
zfs_close(zfs_handle_t *zhp)
|
|
|
|
{
|
|
|
|
if (zhp->zfs_mntopts)
|
|
|
|
free(zhp->zfs_mntopts);
|
|
|
|
nvlist_free(zhp->zfs_props);
|
|
|
|
nvlist_free(zhp->zfs_user_props);
|
2010-05-28 20:45:14 +00:00
|
|
|
nvlist_free(zhp->zfs_recvd_props);
|
2008-11-20 20:01:55 +00:00
|
|
|
free(zhp);
|
|
|
|
}
|
|
|
|
|
2009-01-15 21:59:39 +00:00
|
|
|
typedef struct mnttab_node {
|
|
|
|
struct mnttab mtn_mt;
|
|
|
|
avl_node_t mtn_node;
|
|
|
|
} mnttab_node_t;
|
|
|
|
|
|
|
|
static int
|
|
|
|
libzfs_mnttab_cache_compare(const void *arg1, const void *arg2)
|
|
|
|
{
|
2016-08-27 18:12:53 +00:00
|
|
|
const mnttab_node_t *mtn1 = (const mnttab_node_t *)arg1;
|
|
|
|
const mnttab_node_t *mtn2 = (const mnttab_node_t *)arg2;
|
2009-01-15 21:59:39 +00:00
|
|
|
int rv;
|
|
|
|
|
|
|
|
rv = strcmp(mtn1->mtn_mt.mnt_special, mtn2->mtn_mt.mnt_special);
|
|
|
|
|
Reduce loaded range tree memory usage
This patch implements a new tree structure for ZFS, and uses it to
store range trees more efficiently.
The new structure is approximately a B-tree, though there are some
small differences from the usual characterizations. The tree has core
nodes and leaf nodes; each contain data elements, which the elements
in the core nodes acting as separators between its children. The
difference between core and leaf nodes is that the core nodes have an
array of children, while leaf nodes don't. Every node in the tree may
be only partially full; in most cases, they are all at least 50% full
(in terms of element count) except for the root node, which can be
less full. Underfull nodes will steal from their neighbors or merge to
remain full enough, while overfull nodes will split in two. The data
elements are contained in tree-controlled buffers; they are copied
into these on insertion, and overwritten on deletion. This means that
the elements are not independently allocated, which reduces overhead,
but also means they can't be shared between trees (and also that
pointers to them are only valid until a side-effectful tree operation
occurs). The overhead varies based on how dense the tree is, but is
usually on the order of about 50% of the element size; the per-node
overheads are very small, and so don't make a significant difference.
The trees can accept arbitrary records; they accept a size and a
comparator to allow them to be used for a variety of purposes.
The new trees replace the AVL trees used in the range trees today.
Currently, the range_seg_t structure contains three 8 byte integers
of payload and two 24 byte avl_tree_node_ts to handle its storage in
both an offset-sorted tree and a size-sorted tree (total size: 64
bytes). In the new model, the range seg structures are usually two 4
byte integers, but a separate one needs to exist for the size-sorted
and offset-sorted tree. Between the raw size, the 50% overhead, and
the double storage, the new btrees are expected to use 8*1.5*2 = 24
bytes per record, or 33.3% as much memory as the AVL trees (this is
for the purposes of storing metaslab range trees; for other purposes,
like scrubs, they use ~50% as much memory).
We reduced the size of the payload in the range segments by teaching
range trees about starting offsets and shifts; since metaslabs have a
fixed starting offset, and they all operate in terms of disk sectors,
we can store the ranges using 4-byte integers as long as the size of
the metaslab divided by the sector size is less than 2^32. For 512-byte
sectors, this is a 2^41 (or 2TB) metaslab, which with the default
settings corresponds to a 256PB disk. 4k sector disks can handle
metaslabs up to 2^46 bytes, or 2^63 byte disks. Since we do not
anticipate disks of this size in the near future, there should be
almost no cases where metaslabs need 64-byte integers to store their
ranges. We do still have the capability to store 64-byte integer ranges
to account for cases where we are storing per-vdev (or per-dnode) trees,
which could reasonably go above the limits discussed. We also do not
store fill information in the compact version of the node, since it
is only used for sorted scrub.
We also optimized the metaslab loading process in various other ways
to offset some inefficiencies in the btree model. While individual
operations (find, insert, remove_from) are faster for the btree than
they are for the avl tree, remove usually requires a find operation,
while in the AVL tree model the element itself suffices. Some clever
changes actually caused an overall speedup in metaslab loading; we use
approximately 40% less cpu to load metaslabs in our tests on Illumos.
Another memory and performance optimization was achieved by changing
what is stored in the size-sorted trees. When a disk is heavily
fragmented, the df algorithm used by default in ZFS will almost always
find a number of small regions in its initial cursor-based search; it
will usually only fall back to the size-sorted tree to find larger
regions. If we increase the size of the cursor-based search slightly,
and don't store segments that are smaller than a tunable size floor
in the size-sorted tree, we can further cut memory usage down to
below 20% of what the AVL trees store. This also results in further
reductions in CPU time spent loading metaslabs.
The 16KiB size floor was chosen because it results in substantial memory
usage reduction while not usually resulting in situations where we can't
find an appropriate chunk with the cursor and are forced to use an
oversized chunk from the size-sorted tree. In addition, even if we do
have to use an oversized chunk from the size-sorted tree, the chunk
would be too small to use for ZIL allocations, so it isn't as big of a
loss as it might otherwise be. And often, more small allocations will
follow the initial one, and the cursor search will now find the
remainder of the chunk we didn't use all of and use it for subsequent
allocations. Practical testing has shown little or no change in
fragmentation as a result of this change.
If the size-sorted tree becomes empty while the offset sorted one still
has entries, it will load all the entries from the offset sorted tree
and disregard the size floor until it is unloaded again. This operation
occurs rarely with the default setting, only on incredibly thoroughly
fragmented pools.
There are some other small changes to zdb to teach it to handle btrees,
but nothing major.
Reviewed-by: George Wilson <gwilson@delphix.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed by: Sebastien Roy seb@delphix.com
Reviewed-by: Igor Kozhukhov <igor@dilos.org>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #9181
2019-10-09 17:36:03 +00:00
|
|
|
return (TREE_ISIGN(rv));
|
2009-01-15 21:59:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
libzfs_mnttab_init(libzfs_handle_t *hdl)
|
|
|
|
{
|
2018-11-05 15:40:05 +00:00
|
|
|
pthread_mutex_init(&hdl->libzfs_mnttab_cache_lock, NULL);
|
2009-01-15 21:59:39 +00:00
|
|
|
assert(avl_numnodes(&hdl->libzfs_mnttab_cache) == 0);
|
|
|
|
avl_create(&hdl->libzfs_mnttab_cache, libzfs_mnttab_cache_compare,
|
|
|
|
sizeof (mnttab_node_t), offsetof(mnttab_node_t, mtn_node));
|
2009-07-02 22:44:48 +00:00
|
|
|
}
|
|
|
|
|
2020-06-15 18:30:37 +00:00
|
|
|
static int
|
2009-07-02 22:44:48 +00:00
|
|
|
libzfs_mnttab_update(libzfs_handle_t *hdl)
|
|
|
|
{
|
2021-04-08 22:44:23 +00:00
|
|
|
FILE *mnttab;
|
2009-07-02 22:44:48 +00:00
|
|
|
struct mnttab entry;
|
2009-01-15 21:59:39 +00:00
|
|
|
|
2021-04-08 22:44:23 +00:00
|
|
|
if ((mnttab = fopen(MNTTAB, "re")) == NULL)
|
2013-08-13 19:24:58 +00:00
|
|
|
return (ENOENT);
|
|
|
|
|
2021-04-08 22:44:23 +00:00
|
|
|
while (getmntent(mnttab, &entry) == 0) {
|
2009-01-15 21:59:39 +00:00
|
|
|
mnttab_node_t *mtn;
|
2014-01-13 21:02:59 +00:00
|
|
|
avl_index_t where;
|
2009-01-15 21:59:39 +00:00
|
|
|
|
|
|
|
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
|
|
|
|
continue;
|
2014-01-13 21:02:59 +00:00
|
|
|
|
2009-01-15 21:59:39 +00:00
|
|
|
mtn = zfs_alloc(hdl, sizeof (mnttab_node_t));
|
|
|
|
mtn->mtn_mt.mnt_special = zfs_strdup(hdl, entry.mnt_special);
|
|
|
|
mtn->mtn_mt.mnt_mountp = zfs_strdup(hdl, entry.mnt_mountp);
|
|
|
|
mtn->mtn_mt.mnt_fstype = zfs_strdup(hdl, entry.mnt_fstype);
|
|
|
|
mtn->mtn_mt.mnt_mntopts = zfs_strdup(hdl, entry.mnt_mntopts);
|
2014-01-13 21:02:59 +00:00
|
|
|
|
|
|
|
/* Exclude duplicate mounts */
|
|
|
|
if (avl_find(&hdl->libzfs_mnttab_cache, mtn, &where) != NULL) {
|
|
|
|
free(mtn->mtn_mt.mnt_special);
|
|
|
|
free(mtn->mtn_mt.mnt_mountp);
|
|
|
|
free(mtn->mtn_mt.mnt_fstype);
|
|
|
|
free(mtn->mtn_mt.mnt_mntopts);
|
|
|
|
free(mtn);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2009-01-15 21:59:39 +00:00
|
|
|
avl_add(&hdl->libzfs_mnttab_cache, mtn);
|
|
|
|
}
|
2013-08-13 19:24:58 +00:00
|
|
|
|
2021-04-08 22:44:23 +00:00
|
|
|
(void) fclose(mnttab);
|
2013-08-13 19:24:58 +00:00
|
|
|
return (0);
|
2009-01-15 21:59:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
libzfs_mnttab_fini(libzfs_handle_t *hdl)
|
|
|
|
{
|
|
|
|
void *cookie = NULL;
|
|
|
|
mnttab_node_t *mtn;
|
|
|
|
|
2017-02-07 22:02:27 +00:00
|
|
|
while ((mtn = avl_destroy_nodes(&hdl->libzfs_mnttab_cache, &cookie))
|
|
|
|
!= NULL) {
|
2009-01-15 21:59:39 +00:00
|
|
|
free(mtn->mtn_mt.mnt_special);
|
|
|
|
free(mtn->mtn_mt.mnt_mountp);
|
|
|
|
free(mtn->mtn_mt.mnt_fstype);
|
|
|
|
free(mtn->mtn_mt.mnt_mntopts);
|
|
|
|
free(mtn);
|
|
|
|
}
|
|
|
|
avl_destroy(&hdl->libzfs_mnttab_cache);
|
2018-11-05 15:40:05 +00:00
|
|
|
(void) pthread_mutex_destroy(&hdl->libzfs_mnttab_cache_lock);
|
2009-01-15 21:59:39 +00:00
|
|
|
}
|
|
|
|
|
2009-07-02 22:44:48 +00:00
|
|
|
void
|
|
|
|
libzfs_mnttab_cache(libzfs_handle_t *hdl, boolean_t enable)
|
|
|
|
{
|
|
|
|
hdl->libzfs_mnttab_enable = enable;
|
|
|
|
}
|
|
|
|
|
2009-01-15 21:59:39 +00:00
|
|
|
int
|
|
|
|
libzfs_mnttab_find(libzfs_handle_t *hdl, const char *fsname,
|
|
|
|
struct mnttab *entry)
|
|
|
|
{
|
2021-04-08 22:44:23 +00:00
|
|
|
FILE *mnttab;
|
2009-01-15 21:59:39 +00:00
|
|
|
mnttab_node_t find;
|
|
|
|
mnttab_node_t *mtn;
|
2018-11-05 15:40:05 +00:00
|
|
|
int ret = ENOENT;
|
2009-01-15 21:59:39 +00:00
|
|
|
|
2009-07-02 22:44:48 +00:00
|
|
|
if (!hdl->libzfs_mnttab_enable) {
|
|
|
|
struct mnttab srch = { 0 };
|
|
|
|
|
|
|
|
if (avl_numnodes(&hdl->libzfs_mnttab_cache))
|
|
|
|
libzfs_mnttab_fini(hdl);
|
2013-08-13 19:24:58 +00:00
|
|
|
|
2021-04-08 22:44:23 +00:00
|
|
|
if ((mnttab = fopen(MNTTAB, "re")) == NULL)
|
2013-08-13 19:24:58 +00:00
|
|
|
return (ENOENT);
|
|
|
|
|
2009-07-02 22:44:48 +00:00
|
|
|
srch.mnt_special = (char *)fsname;
|
2022-04-19 18:38:30 +00:00
|
|
|
srch.mnt_fstype = (char *)MNTTYPE_ZFS;
|
2021-04-08 22:44:23 +00:00
|
|
|
ret = getmntany(mnttab, entry, &srch) ? ENOENT : 0;
|
|
|
|
(void) fclose(mnttab);
|
|
|
|
return (ret);
|
2009-07-02 22:44:48 +00:00
|
|
|
}
|
|
|
|
|
2018-11-05 15:40:05 +00:00
|
|
|
pthread_mutex_lock(&hdl->libzfs_mnttab_cache_lock);
|
|
|
|
if (avl_numnodes(&hdl->libzfs_mnttab_cache) == 0) {
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if ((error = libzfs_mnttab_update(hdl)) != 0) {
|
|
|
|
pthread_mutex_unlock(&hdl->libzfs_mnttab_cache_lock);
|
2013-08-13 19:24:58 +00:00
|
|
|
return (error);
|
2018-11-05 15:40:05 +00:00
|
|
|
}
|
|
|
|
}
|
2009-01-15 21:59:39 +00:00
|
|
|
|
|
|
|
find.mtn_mt.mnt_special = (char *)fsname;
|
|
|
|
mtn = avl_find(&hdl->libzfs_mnttab_cache, &find, NULL);
|
|
|
|
if (mtn) {
|
|
|
|
*entry = mtn->mtn_mt;
|
2018-11-05 15:40:05 +00:00
|
|
|
ret = 0;
|
2009-01-15 21:59:39 +00:00
|
|
|
}
|
2018-11-05 15:40:05 +00:00
|
|
|
pthread_mutex_unlock(&hdl->libzfs_mnttab_cache_lock);
|
|
|
|
return (ret);
|
2009-01-15 21:59:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
libzfs_mnttab_add(libzfs_handle_t *hdl, const char *special,
|
|
|
|
const char *mountp, const char *mntopts)
|
|
|
|
{
|
|
|
|
mnttab_node_t *mtn;
|
|
|
|
|
2018-11-05 15:40:05 +00:00
|
|
|
pthread_mutex_lock(&hdl->libzfs_mnttab_cache_lock);
|
|
|
|
if (avl_numnodes(&hdl->libzfs_mnttab_cache) != 0) {
|
|
|
|
mtn = zfs_alloc(hdl, sizeof (mnttab_node_t));
|
|
|
|
mtn->mtn_mt.mnt_special = zfs_strdup(hdl, special);
|
|
|
|
mtn->mtn_mt.mnt_mountp = zfs_strdup(hdl, mountp);
|
|
|
|
mtn->mtn_mt.mnt_fstype = zfs_strdup(hdl, MNTTYPE_ZFS);
|
|
|
|
mtn->mtn_mt.mnt_mntopts = zfs_strdup(hdl, mntopts);
|
|
|
|
/*
|
|
|
|
* Another thread may have already added this entry
|
|
|
|
* via libzfs_mnttab_update. If so we should skip it.
|
|
|
|
*/
|
2020-04-22 17:40:34 +00:00
|
|
|
if (avl_find(&hdl->libzfs_mnttab_cache, mtn, NULL) != NULL) {
|
|
|
|
free(mtn->mtn_mt.mnt_special);
|
|
|
|
free(mtn->mtn_mt.mnt_mountp);
|
|
|
|
free(mtn->mtn_mt.mnt_fstype);
|
|
|
|
free(mtn->mtn_mt.mnt_mntopts);
|
2018-11-05 15:40:05 +00:00
|
|
|
free(mtn);
|
2020-04-22 17:40:34 +00:00
|
|
|
} else {
|
2018-11-05 15:40:05 +00:00
|
|
|
avl_add(&hdl->libzfs_mnttab_cache, mtn);
|
2020-04-22 17:40:34 +00:00
|
|
|
}
|
2018-11-05 15:40:05 +00:00
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&hdl->libzfs_mnttab_cache_lock);
|
2009-01-15 21:59:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
libzfs_mnttab_remove(libzfs_handle_t *hdl, const char *fsname)
|
|
|
|
{
|
|
|
|
mnttab_node_t find;
|
|
|
|
mnttab_node_t *ret;
|
|
|
|
|
2018-11-05 15:40:05 +00:00
|
|
|
pthread_mutex_lock(&hdl->libzfs_mnttab_cache_lock);
|
2009-01-15 21:59:39 +00:00
|
|
|
find.mtn_mt.mnt_special = (char *)fsname;
|
2017-02-07 22:02:27 +00:00
|
|
|
if ((ret = avl_find(&hdl->libzfs_mnttab_cache, (void *)&find, NULL))
|
|
|
|
!= NULL) {
|
2009-01-15 21:59:39 +00:00
|
|
|
avl_remove(&hdl->libzfs_mnttab_cache, ret);
|
|
|
|
free(ret->mtn_mt.mnt_special);
|
|
|
|
free(ret->mtn_mt.mnt_mountp);
|
|
|
|
free(ret->mtn_mt.mnt_fstype);
|
|
|
|
free(ret->mtn_mt.mnt_mntopts);
|
|
|
|
free(ret);
|
|
|
|
}
|
2018-11-05 15:40:05 +00:00
|
|
|
pthread_mutex_unlock(&hdl->libzfs_mnttab_cache_lock);
|
2009-01-15 21:59:39 +00:00
|
|
|
}
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
int
|
|
|
|
zfs_spa_version(zfs_handle_t *zhp, int *spa_version)
|
|
|
|
{
|
2008-12-03 20:09:06 +00:00
|
|
|
zpool_handle_t *zpool_handle = zhp->zpool_hdl;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
if (zpool_handle == NULL)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
*spa_version = zpool_get_prop_int(zpool_handle,
|
|
|
|
ZPOOL_PROP_VERSION, NULL);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The choice of reservation property depends on the SPA version.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
zfs_which_resv_prop(zfs_handle_t *zhp, zfs_prop_t *resv_prop)
|
|
|
|
{
|
|
|
|
int spa_version;
|
|
|
|
|
|
|
|
if (zfs_spa_version(zhp, &spa_version) < 0)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
if (spa_version >= SPA_VERSION_REFRESERVATION)
|
|
|
|
*resv_prop = ZFS_PROP_REFRESERVATION;
|
|
|
|
else
|
|
|
|
*resv_prop = ZFS_PROP_RESERVATION;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Given an nvlist of properties to set, validates that they are correct, and
|
|
|
|
* parses any numeric properties (index, boolean, etc) if they are specified as
|
|
|
|
* strings.
|
|
|
|
*/
|
2008-12-03 20:09:06 +00:00
|
|
|
nvlist_t *
|
|
|
|
zfs_valid_proplist(libzfs_handle_t *hdl, zfs_type_t type, nvlist_t *nvl,
|
2016-01-13 23:05:59 +00:00
|
|
|
uint64_t zoned, zfs_handle_t *zhp, zpool_handle_t *zpool_hdl,
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
boolean_t key_params_ok, const char *errbuf)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
|
|
|
nvpair_t *elem;
|
|
|
|
uint64_t intval;
|
2023-03-11 18:39:24 +00:00
|
|
|
const char *strval;
|
2008-11-20 20:01:55 +00:00
|
|
|
zfs_prop_t prop;
|
|
|
|
nvlist_t *ret;
|
|
|
|
int chosen_normal = -1;
|
|
|
|
int chosen_utf = -1;
|
2023-05-12 16:12:28 +00:00
|
|
|
int set_maxbs = 0;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
if (nvlist_alloc(&ret, NV_UNIQUE_NAME, 0) != 0) {
|
|
|
|
(void) no_memory(hdl);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2009-07-02 22:44:48 +00:00
|
|
|
/*
|
|
|
|
* Make sure this property is valid and applies to this type.
|
|
|
|
*/
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
elem = NULL;
|
|
|
|
while ((elem = nvlist_next_nvpair(nvl, elem)) != NULL) {
|
|
|
|
const char *propname = nvpair_name(elem);
|
|
|
|
|
2009-07-02 22:44:48 +00:00
|
|
|
prop = zfs_name_to_prop(propname);
|
2022-06-14 18:27:53 +00:00
|
|
|
if (prop == ZPROP_USERPROP && zfs_prop_user(propname)) {
|
2008-11-20 20:01:55 +00:00
|
|
|
/*
|
2009-07-02 22:44:48 +00:00
|
|
|
* This is a user property: make sure it's a
|
2008-11-20 20:01:55 +00:00
|
|
|
* string, and that it's less than ZAP_MAXNAMELEN.
|
|
|
|
*/
|
|
|
|
if (nvpair_type(elem) != DATA_TYPE_STRING) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"'%s' must be a string"), propname);
|
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"property name '%s' is too long"),
|
|
|
|
propname);
|
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
(void) nvpair_value_string(elem, &strval);
|
|
|
|
if (nvlist_add_string(ret, propname, strval) != 0) {
|
|
|
|
(void) no_memory(hdl);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2009-07-02 22:44:48 +00:00
|
|
|
/*
|
|
|
|
* Currently, only user properties can be modified on
|
|
|
|
* snapshots.
|
|
|
|
*/
|
2008-12-03 20:09:06 +00:00
|
|
|
if (type == ZFS_TYPE_SNAPSHOT) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"this property can not be modified for snapshots"));
|
|
|
|
(void) zfs_error(hdl, EZFS_PROPTYPE, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2022-06-14 18:27:53 +00:00
|
|
|
if (prop == ZPROP_USERPROP && zfs_prop_userquota(propname)) {
|
2009-07-02 22:44:48 +00:00
|
|
|
zfs_userquota_prop_t uqtype;
|
2018-04-04 17:16:47 +00:00
|
|
|
char *newpropname = NULL;
|
2009-07-02 22:44:48 +00:00
|
|
|
char domain[128];
|
|
|
|
uint64_t rid;
|
|
|
|
uint64_t valary[3];
|
2018-04-04 17:16:47 +00:00
|
|
|
int rc;
|
2009-07-02 22:44:48 +00:00
|
|
|
|
|
|
|
if (userquota_propname_decode(propname, zoned,
|
|
|
|
&uqtype, domain, sizeof (domain), &rid) != 0) {
|
|
|
|
zfs_error_aux(hdl,
|
|
|
|
dgettext(TEXT_DOMAIN,
|
|
|
|
"'%s' has an invalid user/group name"),
|
|
|
|
propname);
|
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uqtype != ZFS_PROP_USERQUOTA &&
|
2016-10-04 18:46:10 +00:00
|
|
|
uqtype != ZFS_PROP_GROUPQUOTA &&
|
|
|
|
uqtype != ZFS_PROP_USEROBJQUOTA &&
|
2018-02-13 22:54:54 +00:00
|
|
|
uqtype != ZFS_PROP_GROUPOBJQUOTA &&
|
|
|
|
uqtype != ZFS_PROP_PROJECTQUOTA &&
|
|
|
|
uqtype != ZFS_PROP_PROJECTOBJQUOTA) {
|
2009-07-02 22:44:48 +00:00
|
|
|
zfs_error_aux(hdl,
|
|
|
|
dgettext(TEXT_DOMAIN, "'%s' is readonly"),
|
|
|
|
propname);
|
|
|
|
(void) zfs_error(hdl, EZFS_PROPREADONLY,
|
|
|
|
errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nvpair_type(elem) == DATA_TYPE_STRING) {
|
|
|
|
(void) nvpair_value_string(elem, &strval);
|
|
|
|
if (strcmp(strval, "none") == 0) {
|
|
|
|
intval = 0;
|
|
|
|
} else if (zfs_nicestrtonum(hdl,
|
|
|
|
strval, &intval) != 0) {
|
|
|
|
(void) zfs_error(hdl,
|
|
|
|
EZFS_BADPROP, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
} else if (nvpair_type(elem) ==
|
|
|
|
DATA_TYPE_UINT64) {
|
|
|
|
(void) nvpair_value_uint64(elem, &intval);
|
|
|
|
if (intval == 0) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"use 'none' to disable "
|
2018-02-13 22:54:54 +00:00
|
|
|
"{user|group|project}quota"));
|
2009-07-02 22:44:48 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"'%s' must be a number"), propname);
|
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
/*
|
|
|
|
* Encode the prop name as
|
|
|
|
* userquota@<hex-rid>-domain, to make it easy
|
|
|
|
* for the kernel to decode.
|
|
|
|
*/
|
2018-04-04 17:16:47 +00:00
|
|
|
rc = asprintf(&newpropname, "%s%llx-%s",
|
|
|
|
zfs_userquota_prop_prefixes[uqtype],
|
2010-05-28 20:45:14 +00:00
|
|
|
(longlong_t)rid, domain);
|
2018-04-04 17:16:47 +00:00
|
|
|
if (rc == -1 || newpropname == NULL) {
|
|
|
|
(void) no_memory(hdl);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2009-07-02 22:44:48 +00:00
|
|
|
valary[0] = uqtype;
|
|
|
|
valary[1] = rid;
|
|
|
|
valary[2] = intval;
|
|
|
|
if (nvlist_add_uint64_array(ret, newpropname,
|
|
|
|
valary, 3) != 0) {
|
2018-04-04 17:16:47 +00:00
|
|
|
free(newpropname);
|
2009-07-02 22:44:48 +00:00
|
|
|
(void) no_memory(hdl);
|
|
|
|
goto error;
|
|
|
|
}
|
2018-04-04 17:16:47 +00:00
|
|
|
free(newpropname);
|
2009-07-02 22:44:48 +00:00
|
|
|
continue;
|
2022-06-14 18:27:53 +00:00
|
|
|
} else if (prop == ZPROP_USERPROP &&
|
|
|
|
zfs_prop_written(propname)) {
|
2011-11-17 18:14:36 +00:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"'%s' is readonly"),
|
|
|
|
propname);
|
|
|
|
(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
|
|
|
|
goto error;
|
2009-07-02 22:44:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (prop == ZPROP_INVAL) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"invalid property '%s'"), propname);
|
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2014-04-21 18:22:08 +00:00
|
|
|
if (!zfs_prop_valid_for_type(prop, type, B_FALSE)) {
|
2008-11-20 20:01:55 +00:00
|
|
|
zfs_error_aux(hdl,
|
|
|
|
dgettext(TEXT_DOMAIN, "'%s' does not "
|
|
|
|
"apply to datasets of this type"), propname);
|
|
|
|
(void) zfs_error(hdl, EZFS_PROPTYPE, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (zfs_prop_readonly(prop) &&
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
!(zfs_prop_setonce(prop) && zhp == NULL) &&
|
|
|
|
!(zfs_prop_encryption_key_param(prop) && key_params_ok)) {
|
2008-11-20 20:01:55 +00:00
|
|
|
zfs_error_aux(hdl,
|
|
|
|
dgettext(TEXT_DOMAIN, "'%s' is readonly"),
|
|
|
|
propname);
|
|
|
|
(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (zprop_parse_value(hdl, elem, prop, type, ret,
|
|
|
|
&strval, &intval, errbuf) != 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform some additional checks for specific properties.
|
|
|
|
*/
|
|
|
|
switch (prop) {
|
|
|
|
case ZFS_PROP_VERSION:
|
|
|
|
{
|
|
|
|
int version;
|
|
|
|
|
|
|
|
if (zhp == NULL)
|
|
|
|
break;
|
|
|
|
version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
|
|
|
|
if (intval < version) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"Can not downgrade; already at version %u"),
|
|
|
|
version);
|
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case ZFS_PROP_VOLBLOCKSIZE:
|
2014-11-03 20:15:08 +00:00
|
|
|
case ZFS_PROP_RECORDSIZE:
|
|
|
|
{
|
|
|
|
int maxbs = SPA_MAXBLOCKSIZE;
|
2015-08-24 21:18:48 +00:00
|
|
|
char buf[64];
|
|
|
|
|
2016-01-13 23:05:59 +00:00
|
|
|
if (zpool_hdl != NULL) {
|
|
|
|
maxbs = zpool_get_prop_int(zpool_hdl,
|
2014-11-03 20:15:08 +00:00
|
|
|
ZPOOL_PROP_MAXBLOCKSIZE, NULL);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* The value must be a power of two between
|
|
|
|
* SPA_MINBLOCKSIZE and maxbs.
|
|
|
|
*/
|
2008-11-20 20:01:55 +00:00
|
|
|
if (intval < SPA_MINBLOCKSIZE ||
|
2014-11-03 20:15:08 +00:00
|
|
|
intval > maxbs || !ISP2(intval)) {
|
2017-05-02 20:43:53 +00:00
|
|
|
zfs_nicebytes(maxbs, buf, sizeof (buf));
|
2008-11-20 20:01:55 +00:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
2014-11-03 20:15:08 +00:00
|
|
|
"'%s' must be power of 2 from 512B "
|
2015-08-24 21:18:48 +00:00
|
|
|
"to %s"), propname, buf);
|
2008-11-20 20:01:55 +00:00
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
2023-05-12 16:12:28 +00:00
|
|
|
/* save the ZFS_PROP_RECORDSIZE during create op */
|
|
|
|
if (zpool_hdl == NULL && prop == ZFS_PROP_RECORDSIZE) {
|
|
|
|
set_maxbs = intval;
|
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
break;
|
2014-11-03 20:15:08 +00:00
|
|
|
}
|
2018-09-06 01:33:36 +00:00
|
|
|
|
|
|
|
case ZFS_PROP_SPECIAL_SMALL_BLOCKS:
|
2019-12-03 17:58:03 +00:00
|
|
|
{
|
2023-05-12 16:12:28 +00:00
|
|
|
int maxbs =
|
|
|
|
set_maxbs == 0 ? SPA_OLD_MAXBLOCKSIZE : set_maxbs;
|
2019-12-03 17:58:03 +00:00
|
|
|
char buf[64];
|
|
|
|
|
2018-09-06 01:33:36 +00:00
|
|
|
if (zpool_hdl != NULL) {
|
|
|
|
char state[64] = "";
|
|
|
|
|
2019-12-03 17:58:03 +00:00
|
|
|
maxbs = zpool_get_prop_int(zpool_hdl,
|
|
|
|
ZPOOL_PROP_MAXBLOCKSIZE, NULL);
|
|
|
|
|
2018-09-06 01:33:36 +00:00
|
|
|
/*
|
|
|
|
* Issue a warning but do not fail so that
|
2019-12-03 17:58:03 +00:00
|
|
|
* tests for settable properties succeed.
|
2018-09-06 01:33:36 +00:00
|
|
|
*/
|
|
|
|
if (zpool_prop_get_feature(zpool_hdl,
|
|
|
|
"feature@allocation_classes", state,
|
|
|
|
sizeof (state)) != 0 ||
|
|
|
|
strcmp(state, ZFS_FEATURE_ACTIVE) != 0) {
|
|
|
|
(void) fprintf(stderr, gettext(
|
|
|
|
"%s: property requires a special "
|
|
|
|
"device in the pool\n"), propname);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (intval != 0 &&
|
|
|
|
(intval < SPA_MINBLOCKSIZE ||
|
2019-12-03 17:58:03 +00:00
|
|
|
intval > maxbs || !ISP2(intval))) {
|
|
|
|
zfs_nicebytes(maxbs, buf, sizeof (buf));
|
2018-09-06 01:33:36 +00:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
2021-05-15 10:23:45 +00:00
|
|
|
"invalid '%s=%llu' property: must be zero "
|
|
|
|
"or a power of 2 from 512B to %s"),
|
|
|
|
propname, (unsigned long long)intval, buf);
|
2018-09-06 01:33:36 +00:00
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
break;
|
2019-12-03 17:58:03 +00:00
|
|
|
}
|
2018-09-06 01:33:36 +00:00
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
case ZFS_PROP_MLSLABEL:
|
|
|
|
{
|
2010-08-26 18:43:42 +00:00
|
|
|
#ifdef HAVE_MLSLABEL
|
2010-05-28 20:45:14 +00:00
|
|
|
/*
|
|
|
|
* Verify the mlslabel string and convert to
|
|
|
|
* internal hex label string.
|
|
|
|
*/
|
|
|
|
|
|
|
|
m_label_t *new_sl;
|
|
|
|
char *hex = NULL; /* internal label string */
|
|
|
|
|
|
|
|
/* Default value is already OK. */
|
|
|
|
if (strcasecmp(strval, ZFS_MLSLABEL_DEFAULT) == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Verify the label can be converted to binary form */
|
|
|
|
if (((new_sl = m_label_alloc(MAC_LABEL)) == NULL) ||
|
|
|
|
(str_to_label(strval, &new_sl, MAC_LABEL,
|
|
|
|
L_NO_CORRECTION, NULL) == -1)) {
|
|
|
|
goto badlabel;
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
/* Now translate to hex internal label string */
|
|
|
|
if (label_to_str(new_sl, &hex, M_INTERNAL,
|
|
|
|
DEF_NAMES) != 0) {
|
|
|
|
if (hex)
|
|
|
|
free(hex);
|
|
|
|
goto badlabel;
|
|
|
|
}
|
|
|
|
m_label_free(new_sl);
|
|
|
|
|
|
|
|
/* If string is already in internal form, we're done. */
|
|
|
|
if (strcmp(strval, hex) == 0) {
|
|
|
|
free(hex);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Replace the label string with the internal form. */
|
|
|
|
(void) nvlist_remove(ret, zfs_prop_to_name(prop),
|
|
|
|
DATA_TYPE_STRING);
|
2022-03-14 22:44:56 +00:00
|
|
|
fnvlist_add_string(ret, zfs_prop_to_name(prop), hex);
|
2010-05-28 20:45:14 +00:00
|
|
|
free(hex);
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
break;
|
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
badlabel:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"invalid mlslabel '%s'"), strval);
|
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
m_label_free(new_sl); /* OK if null */
|
|
|
|
goto error;
|
2010-08-26 18:43:42 +00:00
|
|
|
#else
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"mlslabels are unsupported"));
|
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
goto error;
|
|
|
|
#endif /* HAVE_MLSLABEL */
|
2010-05-28 20:45:14 +00:00
|
|
|
}
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
case ZFS_PROP_MOUNTPOINT:
|
|
|
|
{
|
|
|
|
namecheck_err_t why;
|
|
|
|
|
|
|
|
if (strcmp(strval, ZFS_MOUNTPOINT_NONE) == 0 ||
|
|
|
|
strcmp(strval, ZFS_MOUNTPOINT_LEGACY) == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (mountpoint_namecheck(strval, &why)) {
|
|
|
|
switch (why) {
|
|
|
|
case NAME_ERR_LEADING_SLASH:
|
|
|
|
zfs_error_aux(hdl,
|
|
|
|
dgettext(TEXT_DOMAIN,
|
|
|
|
"'%s' must be an absolute path, "
|
|
|
|
"'none', or 'legacy'"), propname);
|
|
|
|
break;
|
|
|
|
case NAME_ERR_TOOLONG:
|
|
|
|
zfs_error_aux(hdl,
|
|
|
|
dgettext(TEXT_DOMAIN,
|
|
|
|
"component of '%s' is too long"),
|
|
|
|
propname);
|
|
|
|
break;
|
2017-02-07 22:02:27 +00:00
|
|
|
|
2010-08-26 16:52:41 +00:00
|
|
|
default:
|
2017-02-07 22:02:27 +00:00
|
|
|
zfs_error_aux(hdl,
|
|
|
|
dgettext(TEXT_DOMAIN,
|
|
|
|
"(%d) not defined"),
|
|
|
|
why);
|
2010-08-26 16:52:41 +00:00
|
|
|
break;
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
2022-02-15 16:58:59 +00:00
|
|
|
zfs_fallthrough;
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
case ZFS_PROP_SHARESMB:
|
|
|
|
case ZFS_PROP_SHARENFS:
|
|
|
|
/*
|
|
|
|
* For the mountpoint and sharenfs or sharesmb
|
|
|
|
* properties, check if it can be set in a
|
|
|
|
* global/non-global zone based on
|
|
|
|
* the zoned property value:
|
|
|
|
*
|
|
|
|
* global zone non-global zone
|
|
|
|
* --------------------------------------------------
|
|
|
|
* zoned=on mountpoint (no) mountpoint (yes)
|
|
|
|
* sharenfs (no) sharenfs (no)
|
|
|
|
* sharesmb (no) sharesmb (no)
|
|
|
|
*
|
|
|
|
* zoned=off mountpoint (yes) N/A
|
|
|
|
* sharenfs (yes)
|
|
|
|
* sharesmb (yes)
|
|
|
|
*/
|
|
|
|
if (zoned) {
|
|
|
|
if (getzoneid() == GLOBAL_ZONEID) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"'%s' cannot be set on "
|
|
|
|
"dataset in a non-global zone"),
|
|
|
|
propname);
|
|
|
|
(void) zfs_error(hdl, EZFS_ZONED,
|
|
|
|
errbuf);
|
|
|
|
goto error;
|
|
|
|
} else if (prop == ZFS_PROP_SHARENFS ||
|
|
|
|
prop == ZFS_PROP_SHARESMB) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"'%s' cannot be set in "
|
|
|
|
"a non-global zone"), propname);
|
|
|
|
(void) zfs_error(hdl, EZFS_ZONED,
|
|
|
|
errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
} else if (getzoneid() != GLOBAL_ZONEID) {
|
|
|
|
/*
|
|
|
|
* If zoned property is 'off', this must be in
|
2009-07-02 22:44:48 +00:00
|
|
|
* a global zone. If not, something is wrong.
|
2008-11-20 20:01:55 +00:00
|
|
|
*/
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"'%s' cannot be set while dataset "
|
|
|
|
"'zoned' property is set"), propname);
|
|
|
|
(void) zfs_error(hdl, EZFS_ZONED, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* At this point, it is legitimate to set the
|
|
|
|
* property. Now we want to make sure that the
|
|
|
|
* property value is valid if it is sharenfs.
|
|
|
|
*/
|
|
|
|
if ((prop == ZFS_PROP_SHARENFS ||
|
|
|
|
prop == ZFS_PROP_SHARESMB) &&
|
|
|
|
strcmp(strval, "on") != 0 &&
|
|
|
|
strcmp(strval, "off") != 0) {
|
2022-02-28 14:46:25 +00:00
|
|
|
enum sa_protocol proto;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
if (prop == ZFS_PROP_SHARESMB)
|
2022-02-28 14:46:25 +00:00
|
|
|
proto = SA_PROTOCOL_SMB;
|
2008-11-20 20:01:55 +00:00
|
|
|
else
|
2022-02-28 14:46:25 +00:00
|
|
|
proto = SA_PROTOCOL_NFS;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2022-02-28 14:46:25 +00:00
|
|
|
if (sa_validate_shareopts(strval, proto) !=
|
|
|
|
SA_OK) {
|
2008-11-20 20:01:55 +00:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"'%s' cannot be set to invalid "
|
|
|
|
"options"), propname);
|
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP,
|
|
|
|
errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
2017-02-07 22:02:27 +00:00
|
|
|
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
case ZFS_PROP_KEYLOCATION:
|
|
|
|
if (!zfs_prop_valid_keylocation(strval, B_FALSE)) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"invalid keylocation"));
|
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (zhp != NULL) {
|
|
|
|
uint64_t crypt =
|
|
|
|
zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION);
|
|
|
|
|
|
|
|
if (crypt == ZIO_CRYPT_OFF &&
|
|
|
|
strcmp(strval, "none") != 0) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
2018-04-10 04:11:17 +00:00
|
|
|
"keylocation must be 'none' "
|
|
|
|
"for unencrypted datasets"));
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP,
|
|
|
|
errbuf);
|
|
|
|
goto error;
|
|
|
|
} else if (crypt != ZIO_CRYPT_OFF &&
|
|
|
|
strcmp(strval, "none") == 0) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
2018-04-10 04:11:17 +00:00
|
|
|
"keylocation must not be 'none' "
|
|
|
|
"for encrypted datasets"));
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP,
|
|
|
|
errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ZFS_PROP_PBKDF2_ITERS:
|
|
|
|
if (intval < MIN_PBKDF2_ITERATIONS) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"minimum pbkdf2 iterations is %u"),
|
|
|
|
MIN_PBKDF2_ITERATIONS);
|
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
case ZFS_PROP_UTF8ONLY:
|
|
|
|
chosen_utf = (int)intval;
|
|
|
|
break;
|
2017-02-07 22:02:27 +00:00
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
case ZFS_PROP_NORMALIZE:
|
|
|
|
chosen_normal = (int)intval;
|
|
|
|
break;
|
2017-02-07 22:02:27 +00:00
|
|
|
|
2010-08-26 16:52:41 +00:00
|
|
|
default:
|
|
|
|
break;
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For changes to existing volumes, we have some additional
|
|
|
|
* checks to enforce.
|
|
|
|
*/
|
|
|
|
if (type == ZFS_TYPE_VOLUME && zhp != NULL) {
|
|
|
|
uint64_t blocksize = zfs_prop_get_int(zhp,
|
|
|
|
ZFS_PROP_VOLBLOCKSIZE);
|
|
|
|
char buf[64];
|
|
|
|
|
|
|
|
switch (prop) {
|
|
|
|
case ZFS_PROP_VOLSIZE:
|
|
|
|
if (intval % blocksize != 0) {
|
2017-05-02 20:43:53 +00:00
|
|
|
zfs_nicebytes(blocksize, buf,
|
2008-11-20 20:01:55 +00:00
|
|
|
sizeof (buf));
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"'%s' must be a multiple of "
|
|
|
|
"volume block size (%s)"),
|
|
|
|
propname, buf);
|
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP,
|
|
|
|
errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (intval == 0) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"'%s' cannot be zero"),
|
|
|
|
propname);
|
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP,
|
|
|
|
errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
break;
|
2017-02-07 22:02:27 +00:00
|
|
|
|
2010-08-26 16:52:41 +00:00
|
|
|
default:
|
|
|
|
break;
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
}
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
|
|
|
|
/* check encryption properties */
|
|
|
|
if (zhp != NULL) {
|
|
|
|
int64_t crypt = zfs_prop_get_int(zhp,
|
|
|
|
ZFS_PROP_ENCRYPTION);
|
|
|
|
|
|
|
|
switch (prop) {
|
|
|
|
case ZFS_PROP_COPIES:
|
|
|
|
if (crypt != ZIO_CRYPT_OFF && intval > 2) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"encrypted datasets cannot have "
|
|
|
|
"3 copies"));
|
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP,
|
|
|
|
errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If normalization was chosen, but no UTF8 choice was made,
|
|
|
|
* enforce rejection of non-UTF8 names.
|
|
|
|
*
|
|
|
|
* If normalization was chosen, but rejecting non-UTF8 names
|
|
|
|
* was explicitly not chosen, it is an error.
|
2021-10-29 23:59:18 +00:00
|
|
|
*
|
|
|
|
* If utf8only was turned off, but the parent has normalization,
|
|
|
|
* turn off normalization.
|
2008-11-20 20:01:55 +00:00
|
|
|
*/
|
|
|
|
if (chosen_normal > 0 && chosen_utf < 0) {
|
|
|
|
if (nvlist_add_uint64(ret,
|
|
|
|
zfs_prop_to_name(ZFS_PROP_UTF8ONLY), 1) != 0) {
|
|
|
|
(void) no_memory(hdl);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
} else if (chosen_normal > 0 && chosen_utf == 0) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"'%s' must be set 'on' if normalization chosen"),
|
|
|
|
zfs_prop_to_name(ZFS_PROP_UTF8ONLY));
|
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
goto error;
|
2021-10-29 23:59:18 +00:00
|
|
|
} else if (chosen_normal < 0 && chosen_utf == 0) {
|
|
|
|
if (nvlist_add_uint64(ret,
|
|
|
|
zfs_prop_to_name(ZFS_PROP_NORMALIZE), 0) != 0) {
|
|
|
|
(void) no_memory(hdl);
|
|
|
|
goto error;
|
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
2010-08-26 21:24:34 +00:00
|
|
|
return (ret);
|
|
|
|
|
|
|
|
error:
|
|
|
|
nvlist_free(ret);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2020-06-15 18:30:37 +00:00
|
|
|
static int
|
2010-08-26 21:24:34 +00:00
|
|
|
zfs_add_synthetic_resv(zfs_handle_t *zhp, nvlist_t *nvl)
|
|
|
|
{
|
|
|
|
uint64_t old_volsize;
|
|
|
|
uint64_t new_volsize;
|
|
|
|
uint64_t old_reservation;
|
|
|
|
uint64_t new_reservation;
|
|
|
|
zfs_prop_t resv_prop;
|
2016-01-13 22:37:39 +00:00
|
|
|
nvlist_t *props;
|
2019-06-30 23:38:07 +00:00
|
|
|
zpool_handle_t *zph = zpool_handle(zhp);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is an existing volume, and someone is setting the volsize,
|
|
|
|
* make sure that it matches the reservation, or add it if necessary.
|
|
|
|
*/
|
2010-08-26 21:24:34 +00:00
|
|
|
old_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
|
|
|
|
if (zfs_which_resv_prop(zhp, &resv_prop) < 0)
|
|
|
|
return (-1);
|
|
|
|
old_reservation = zfs_prop_get_int(zhp, resv_prop);
|
2016-01-13 22:37:39 +00:00
|
|
|
|
|
|
|
props = fnvlist_alloc();
|
|
|
|
fnvlist_add_uint64(props, zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
|
|
|
|
zfs_prop_get_int(zhp, ZFS_PROP_VOLBLOCKSIZE));
|
|
|
|
|
2019-06-30 23:38:07 +00:00
|
|
|
if ((zvol_volsize_to_reservation(zph, old_volsize, props) !=
|
2016-01-13 22:37:39 +00:00
|
|
|
old_reservation) || nvlist_exists(nvl,
|
|
|
|
zfs_prop_to_name(resv_prop))) {
|
|
|
|
fnvlist_free(props);
|
2010-08-26 21:24:34 +00:00
|
|
|
return (0);
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
2010-08-26 21:24:34 +00:00
|
|
|
if (nvlist_lookup_uint64(nvl, zfs_prop_to_name(ZFS_PROP_VOLSIZE),
|
2016-01-13 22:37:39 +00:00
|
|
|
&new_volsize) != 0) {
|
|
|
|
fnvlist_free(props);
|
2010-08-26 21:24:34 +00:00
|
|
|
return (-1);
|
2016-01-13 22:37:39 +00:00
|
|
|
}
|
2019-06-30 23:38:07 +00:00
|
|
|
new_reservation = zvol_volsize_to_reservation(zph, new_volsize, props);
|
2016-01-13 22:37:39 +00:00
|
|
|
fnvlist_free(props);
|
|
|
|
|
2010-08-26 21:24:34 +00:00
|
|
|
if (nvlist_add_uint64(nvl, zfs_prop_to_name(resv_prop),
|
|
|
|
new_reservation) != 0) {
|
|
|
|
(void) no_memory(zhp->zfs_hdl);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
return (1);
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
2018-04-11 16:14:45 +00:00
|
|
|
/*
|
|
|
|
* Helper for 'zfs {set|clone} refreservation=auto'. Must be called after
|
2019-09-03 00:53:27 +00:00
|
|
|
* zfs_valid_proplist(), as it is what sets the UINT64_MAX sentinel value.
|
2018-04-11 16:14:45 +00:00
|
|
|
* Return codes must match zfs_add_synthetic_resv().
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
zfs_fix_auto_resv(zfs_handle_t *zhp, nvlist_t *nvl)
|
|
|
|
{
|
|
|
|
uint64_t volsize;
|
|
|
|
uint64_t resvsize;
|
|
|
|
zfs_prop_t prop;
|
|
|
|
nvlist_t *props;
|
|
|
|
|
|
|
|
if (!ZFS_IS_VOLUME(zhp)) {
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (zfs_which_resv_prop(zhp, &prop) != 0) {
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (prop != ZFS_PROP_REFRESERVATION) {
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nvlist_lookup_uint64(nvl, zfs_prop_to_name(prop), &resvsize) != 0) {
|
|
|
|
/* No value being set, so it can't be "auto" */
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
if (resvsize != UINT64_MAX) {
|
|
|
|
/* Being set to a value other than "auto" */
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
props = fnvlist_alloc();
|
|
|
|
|
|
|
|
fnvlist_add_uint64(props, zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
|
|
|
|
zfs_prop_get_int(zhp, ZFS_PROP_VOLBLOCKSIZE));
|
|
|
|
|
|
|
|
if (nvlist_lookup_uint64(nvl, zfs_prop_to_name(ZFS_PROP_VOLSIZE),
|
|
|
|
&volsize) != 0) {
|
|
|
|
volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
|
|
|
|
}
|
|
|
|
|
2019-06-30 23:38:07 +00:00
|
|
|
resvsize = zvol_volsize_to_reservation(zpool_handle(zhp), volsize,
|
|
|
|
props);
|
2018-04-11 16:14:45 +00:00
|
|
|
fnvlist_free(props);
|
|
|
|
|
|
|
|
(void) nvlist_remove_all(nvl, zfs_prop_to_name(prop));
|
|
|
|
if (nvlist_add_uint64(nvl, zfs_prop_to_name(prop), resvsize) != 0) {
|
|
|
|
(void) no_memory(zhp->zfs_hdl);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
2011-05-19 18:44:07 +00:00
|
|
|
static boolean_t
|
|
|
|
zfs_is_namespace_prop(zfs_prop_t prop)
|
|
|
|
{
|
|
|
|
switch (prop) {
|
|
|
|
|
|
|
|
case ZFS_PROP_ATIME:
|
2014-01-18 19:00:53 +00:00
|
|
|
case ZFS_PROP_RELATIME:
|
2011-05-19 18:44:07 +00:00
|
|
|
case ZFS_PROP_DEVICES:
|
|
|
|
case ZFS_PROP_EXEC:
|
|
|
|
case ZFS_PROP_SETUID:
|
|
|
|
case ZFS_PROP_READONLY:
|
|
|
|
case ZFS_PROP_XATTR:
|
|
|
|
case ZFS_PROP_NBMAND:
|
|
|
|
return (B_TRUE);
|
|
|
|
|
|
|
|
default:
|
|
|
|
return (B_FALSE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
/*
|
|
|
|
* Given a property name and value, set the property for the given dataset.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
zfs_prop_set(zfs_handle_t *zhp, const char *propname, const char *propval)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
2022-06-08 13:08:10 +00:00
|
|
|
char errbuf[ERRBUFLEN];
|
2008-11-20 20:01:55 +00:00
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
2015-07-05 23:11:09 +00:00
|
|
|
nvlist_t *nvl = NULL;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
|
|
|
|
zhp->zfs_name);
|
|
|
|
|
|
|
|
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 ||
|
|
|
|
nvlist_add_string(nvl, propname, propval) != 0) {
|
|
|
|
(void) no_memory(hdl);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2015-07-05 23:11:09 +00:00
|
|
|
ret = zfs_prop_set_list(zhp, nvl);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2015-07-05 23:11:09 +00:00
|
|
|
error:
|
2008-11-20 20:01:55 +00:00
|
|
|
nvlist_free(nvl);
|
2015-07-05 23:11:09 +00:00
|
|
|
return (ret);
|
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2015-07-05 23:11:09 +00:00
|
|
|
/*
|
|
|
|
* Given an nvlist of property names and values, set the properties for the
|
|
|
|
* given dataset.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
zfs_prop_set_list(zfs_handle_t *zhp, nvlist_t *props)
|
2023-10-02 23:58:54 +00:00
|
|
|
{
|
|
|
|
return (zfs_prop_set_list_flags(zhp, props, 0));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Given an nvlist of property names, values and flags, set the properties
|
|
|
|
* for the given dataset. If ZFS_SET_NOMOUNT is set, it allows to update
|
|
|
|
* mountpoint, sharenfs and sharesmb properties without (un/re)mounting
|
|
|
|
* and (un/re)sharing the dataset.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
zfs_prop_set_list_flags(zfs_handle_t *zhp, nvlist_t *props, int flags)
|
2015-07-05 23:11:09 +00:00
|
|
|
{
|
|
|
|
zfs_cmd_t zc = {"\0"};
|
|
|
|
int ret = -1;
|
|
|
|
prop_changelist_t **cls = NULL;
|
|
|
|
int cl_idx;
|
2022-06-08 13:08:10 +00:00
|
|
|
char errbuf[ERRBUFLEN];
|
2015-07-05 23:11:09 +00:00
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
|
|
|
nvlist_t *nvl;
|
2016-08-25 08:40:20 +00:00
|
|
|
int nvl_len = 0;
|
2015-07-05 23:11:09 +00:00
|
|
|
int added_resv = 0;
|
2023-06-30 15:36:43 +00:00
|
|
|
zfs_prop_t prop;
|
|
|
|
boolean_t nsprop = B_FALSE;
|
2015-07-05 23:11:09 +00:00
|
|
|
nvpair_t *elem;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2015-07-05 23:11:09 +00:00
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
|
|
|
|
zhp->zfs_name);
|
|
|
|
|
|
|
|
if ((nvl = zfs_valid_proplist(hdl, zhp->zfs_type, props,
|
2016-01-13 23:05:59 +00:00
|
|
|
zfs_prop_get_int(zhp, ZFS_PROP_ZONED), zhp, zhp->zpool_hdl,
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
B_FALSE, errbuf)) == NULL)
|
2008-11-20 20:01:55 +00:00
|
|
|
goto error;
|
|
|
|
|
2008-12-03 20:09:06 +00:00
|
|
|
/*
|
2015-07-05 23:11:09 +00:00
|
|
|
* We have to check for any extra properties which need to be added
|
|
|
|
* before computing the length of the nvlist.
|
2008-12-03 20:09:06 +00:00
|
|
|
*/
|
2015-07-05 23:11:09 +00:00
|
|
|
for (elem = nvlist_next_nvpair(nvl, NULL);
|
|
|
|
elem != NULL;
|
|
|
|
elem = nvlist_next_nvpair(nvl, elem)) {
|
|
|
|
if (zfs_name_to_prop(nvpair_name(elem)) == ZFS_PROP_VOLSIZE &&
|
|
|
|
(added_resv = zfs_add_synthetic_resv(zhp, nvl)) == -1) {
|
|
|
|
goto error;
|
|
|
|
}
|
2013-08-28 11:45:09 +00:00
|
|
|
}
|
2018-04-11 16:14:45 +00:00
|
|
|
|
|
|
|
if (added_resv != 1 &&
|
|
|
|
(added_resv = zfs_fix_auto_resv(zhp, nvl)) == -1) {
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2015-07-05 23:11:09 +00:00
|
|
|
/*
|
|
|
|
* Check how many properties we're setting and allocate an array to
|
|
|
|
* store changelist pointers for postfix().
|
|
|
|
*/
|
|
|
|
for (elem = nvlist_next_nvpair(nvl, NULL);
|
|
|
|
elem != NULL;
|
|
|
|
elem = nvlist_next_nvpair(nvl, elem))
|
|
|
|
nvl_len++;
|
|
|
|
if ((cls = calloc(nvl_len, sizeof (prop_changelist_t *))) == NULL)
|
2008-12-03 20:09:06 +00:00
|
|
|
goto error;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2015-07-05 23:11:09 +00:00
|
|
|
cl_idx = 0;
|
|
|
|
for (elem = nvlist_next_nvpair(nvl, NULL);
|
|
|
|
elem != NULL;
|
|
|
|
elem = nvlist_next_nvpair(nvl, elem)) {
|
|
|
|
|
|
|
|
prop = zfs_name_to_prop(nvpair_name(elem));
|
2023-06-30 15:36:43 +00:00
|
|
|
nsprop |= zfs_is_namespace_prop(prop);
|
2015-07-05 23:11:09 +00:00
|
|
|
|
|
|
|
assert(cl_idx < nvl_len);
|
|
|
|
/*
|
|
|
|
* We don't want to unmount & remount the dataset when changing
|
|
|
|
* its canmount property to 'on' or 'noauto'. We only use
|
|
|
|
* the changelist logic to unmount when setting canmount=off.
|
|
|
|
*/
|
2017-01-23 19:13:15 +00:00
|
|
|
if (prop != ZFS_PROP_CANMOUNT ||
|
|
|
|
(fnvpair_value_uint64(elem) == ZFS_CANMOUNT_OFF &&
|
|
|
|
zfs_is_mounted(zhp, NULL))) {
|
2023-10-02 23:58:54 +00:00
|
|
|
cls[cl_idx] = changelist_gather(zhp, prop,
|
|
|
|
((flags & ZFS_SET_NOMOUNT) ?
|
|
|
|
CL_GATHER_DONT_UNMOUNT : 0), 0);
|
2015-07-05 23:11:09 +00:00
|
|
|
if (cls[cl_idx] == NULL)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (prop == ZFS_PROP_MOUNTPOINT &&
|
|
|
|
changelist_haszonedchild(cls[cl_idx])) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"child dataset with inherited mountpoint is used "
|
|
|
|
"in a non-global zone"));
|
|
|
|
ret = zfs_error(hdl, EZFS_ZONED, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cls[cl_idx] != NULL &&
|
|
|
|
(ret = changelist_prefix(cls[cl_idx])) != 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
cl_idx++;
|
|
|
|
}
|
|
|
|
assert(cl_idx == nvl_len);
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
/*
|
2015-07-05 23:11:09 +00:00
|
|
|
* Execute the corresponding ioctl() to set this list of properties.
|
2008-11-20 20:01:55 +00:00
|
|
|
*/
|
|
|
|
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
|
|
|
|
|
2022-03-16 18:51:28 +00:00
|
|
|
zcmd_write_src_nvlist(hdl, &zc, nvl);
|
|
|
|
zcmd_alloc_dst_nvlist(hdl, &zc, 0);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
ret = zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc);
|
2009-07-02 22:44:48 +00:00
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
if (ret != 0) {
|
2018-06-17 10:53:29 +00:00
|
|
|
if (zc.zc_nvlist_dst_filled == B_FALSE) {
|
|
|
|
(void) zfs_standard_error(hdl, errno, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2015-07-05 23:11:09 +00:00
|
|
|
/* Get the list of unset properties back and report them. */
|
|
|
|
nvlist_t *errorprops = NULL;
|
|
|
|
if (zcmd_read_dst_nvlist(hdl, &zc, &errorprops) != 0)
|
|
|
|
goto error;
|
2018-06-17 10:53:29 +00:00
|
|
|
for (nvpair_t *elem = nvlist_next_nvpair(errorprops, NULL);
|
2015-07-05 23:11:09 +00:00
|
|
|
elem != NULL;
|
2018-06-17 10:53:29 +00:00
|
|
|
elem = nvlist_next_nvpair(errorprops, elem)) {
|
2015-07-05 23:11:09 +00:00
|
|
|
prop = zfs_name_to_prop(nvpair_name(elem));
|
|
|
|
zfs_setprop_error(hdl, prop, errno, errbuf);
|
|
|
|
}
|
|
|
|
nvlist_free(errorprops);
|
|
|
|
|
2010-08-26 21:24:34 +00:00
|
|
|
if (added_resv && errno == ENOSPC) {
|
|
|
|
/* clean up the volsize property we tried to set */
|
|
|
|
uint64_t old_volsize = zfs_prop_get_int(zhp,
|
|
|
|
ZFS_PROP_VOLSIZE);
|
|
|
|
nvlist_free(nvl);
|
2015-07-05 23:11:09 +00:00
|
|
|
nvl = NULL;
|
2010-08-26 21:24:34 +00:00
|
|
|
zcmd_free_nvlists(&zc);
|
2015-07-05 23:11:09 +00:00
|
|
|
|
2010-08-26 21:24:34 +00:00
|
|
|
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
|
|
|
|
goto error;
|
|
|
|
if (nvlist_add_uint64(nvl,
|
|
|
|
zfs_prop_to_name(ZFS_PROP_VOLSIZE),
|
|
|
|
old_volsize) != 0)
|
|
|
|
goto error;
|
2022-03-16 18:51:28 +00:00
|
|
|
zcmd_write_src_nvlist(hdl, &zc, nvl);
|
2010-08-26 21:24:34 +00:00
|
|
|
(void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc);
|
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
} else {
|
2015-07-05 23:11:09 +00:00
|
|
|
for (cl_idx = 0; cl_idx < nvl_len; cl_idx++) {
|
|
|
|
if (cls[cl_idx] != NULL) {
|
|
|
|
int clp_err = changelist_postfix(cls[cl_idx]);
|
|
|
|
if (clp_err != 0)
|
|
|
|
ret = clp_err;
|
|
|
|
}
|
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2011-05-19 18:44:07 +00:00
|
|
|
if (ret == 0) {
|
|
|
|
/*
|
|
|
|
* Refresh the statistics so the new property
|
|
|
|
* value is reflected.
|
|
|
|
*/
|
2008-11-20 20:01:55 +00:00
|
|
|
(void) get_stats(zhp);
|
2011-05-19 18:44:07 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Remount the filesystem to propagate the change
|
|
|
|
* if one of the options handled by the generic
|
|
|
|
* Linux namespace layer has been modified.
|
|
|
|
*/
|
2023-06-30 15:36:43 +00:00
|
|
|
if (nsprop && zfs_is_mounted(zhp, NULL))
|
2011-05-19 18:44:07 +00:00
|
|
|
ret = zfs_mount(zhp, MNTOPT_REMOUNT, 0);
|
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
error:
|
|
|
|
nvlist_free(nvl);
|
|
|
|
zcmd_free_nvlists(&zc);
|
2015-07-05 23:11:09 +00:00
|
|
|
if (cls != NULL) {
|
|
|
|
for (cl_idx = 0; cl_idx < nvl_len; cl_idx++) {
|
|
|
|
if (cls[cl_idx] != NULL)
|
|
|
|
changelist_free(cls[cl_idx]);
|
|
|
|
}
|
|
|
|
free(cls);
|
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2010-05-28 20:45:14 +00:00
|
|
|
* Given a property, inherit the value from the parent dataset, or if received
|
|
|
|
* is TRUE, revert to the received value, if any.
|
2008-11-20 20:01:55 +00:00
|
|
|
*/
|
|
|
|
int
|
2010-05-28 20:45:14 +00:00
|
|
|
zfs_prop_inherit(zfs_handle_t *zhp, const char *propname, boolean_t received)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
2013-09-04 12:00:57 +00:00
|
|
|
zfs_cmd_t zc = {"\0"};
|
2008-11-20 20:01:55 +00:00
|
|
|
int ret;
|
|
|
|
prop_changelist_t *cl;
|
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
2022-06-08 13:08:10 +00:00
|
|
|
char errbuf[ERRBUFLEN];
|
2008-11-20 20:01:55 +00:00
|
|
|
zfs_prop_t prop;
|
|
|
|
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot inherit %s for '%s'"), propname, zhp->zfs_name);
|
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
zc.zc_cookie = received;
|
2022-06-14 18:27:53 +00:00
|
|
|
if ((prop = zfs_name_to_prop(propname)) == ZPROP_USERPROP) {
|
2008-11-20 20:01:55 +00:00
|
|
|
/*
|
|
|
|
* For user properties, the amount of work we have to do is very
|
|
|
|
* small, so just do it here.
|
|
|
|
*/
|
|
|
|
if (!zfs_prop_user(propname)) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"invalid property"));
|
|
|
|
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
|
|
|
|
}
|
|
|
|
|
|
|
|
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
|
|
|
|
(void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value));
|
|
|
|
|
|
|
|
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_INHERIT_PROP, &zc) != 0)
|
|
|
|
return (zfs_standard_error(hdl, errno, errbuf));
|
|
|
|
|
2021-04-16 18:03:20 +00:00
|
|
|
(void) get_stats(zhp);
|
2008-11-20 20:01:55 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Verify that this property is inheritable.
|
|
|
|
*/
|
|
|
|
if (zfs_prop_readonly(prop))
|
|
|
|
return (zfs_error(hdl, EZFS_PROPREADONLY, errbuf));
|
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
if (!zfs_prop_inheritable(prop) && !received)
|
2008-11-20 20:01:55 +00:00
|
|
|
return (zfs_error(hdl, EZFS_PROPNONINHERIT, errbuf));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check to see if the value applies to this type
|
|
|
|
*/
|
2014-04-21 18:22:08 +00:00
|
|
|
if (!zfs_prop_valid_for_type(prop, zhp->zfs_type, B_FALSE))
|
2008-11-20 20:01:55 +00:00
|
|
|
return (zfs_error(hdl, EZFS_PROPTYPE, errbuf));
|
|
|
|
|
|
|
|
/*
|
2010-08-26 21:24:34 +00:00
|
|
|
* Normalize the name, to get rid of shorthand abbreviations.
|
2008-11-20 20:01:55 +00:00
|
|
|
*/
|
|
|
|
propname = zfs_prop_to_name(prop);
|
|
|
|
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
|
|
|
|
(void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value));
|
|
|
|
|
|
|
|
if (prop == ZFS_PROP_MOUNTPOINT && getzoneid() == GLOBAL_ZONEID &&
|
|
|
|
zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"dataset is used in a non-global zone"));
|
|
|
|
return (zfs_error(hdl, EZFS_ZONED, errbuf));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Determine datasets which will be affected by this change, if any.
|
|
|
|
*/
|
2008-12-03 20:09:06 +00:00
|
|
|
if ((cl = changelist_gather(zhp, prop, 0, 0)) == NULL)
|
2008-11-20 20:01:55 +00:00
|
|
|
return (-1);
|
|
|
|
|
|
|
|
if (prop == ZFS_PROP_MOUNTPOINT && changelist_haszonedchild(cl)) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"child dataset with inherited mountpoint is used "
|
|
|
|
"in a non-global zone"));
|
|
|
|
ret = zfs_error(hdl, EZFS_ZONED, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((ret = changelist_prefix(cl)) != 0)
|
|
|
|
goto error;
|
|
|
|
|
Cleanup: Address Clang's static analyzer's unused code complaints
These were categorized as the following:
* Dead assignment 23
* Dead increment 4
* Dead initialization 6
* Dead nested assignment 18
Most of these are harmless, but since actual issues can hide among them,
we correct them.
That said, there were a few return values that were being ignored that
appeared to merit some correction:
* `destroy_callback()` in `cmd/zfs/zfs_main.c` ignored the error from
`destroy_batched()`. We handle it by returning -1 if there is an
error.
* `zfs_do_upgrade()` in `cmd/zfs/zfs_main.c` ignored the error from
`zfs_for_each()`. We handle it by doing a binary OR of the error
value from the subsequent `zfs_for_each()` call to the existing
value. This is how errors are mostly handled inside `zfs_for_each()`.
The error value here is passed to exit from the zfs command, so doing
a binary or on it is better than what we did previously.
* `get_zap_prop()` in `module/zfs/zcp_get.c` ignored the error from
`dsl_prop_get_ds()` when the property is not of type string. We
return an error when it does. There is a small concern that the
`zfs_get_temporary_prop()` call would handle things, but in the case
that it does not, we would be pushing an uninitialized numval onto
the lua stack. It is expected that `dsl_prop_get_ds()` will succeed
anytime that `zfs_get_temporary_prop()` does, so that not giving it a
chance to fix things is not a problem.
* `draid_merge_impl()` in `tests/zfs-tests/cmd/draid.c` used
`nvlist_add_nvlist()` twice in ways in which errors are expected to
be impossible, so we switch to `fnvlist_add_nvlist()`.
A few notable ones did not merit use of the return value, so we
suppressed it with `(void)`:
* `write_free_diffs()` in `lib/libzfs/libzfs_diff.c` ignored the error
value from `describe_free()`. A look through the commit history
revealed that this was intentional.
* `arc_evict_hdr()` in `module/zfs/arc.c` did not need to use the
returned handle from `arc_hdr_realloc()` because it is already
referenced in lists.
* `spa_vdev_detach()` in `module/zfs/spa.c` has a comment explicitly
saying not to use the error from `vdev_label_init()` because whatever
causes the error could be the reason why a detach is being done.
Unfortunately, I am not presently able to analyze the kernel modules
with Clang's static analyzer, so I could have missed some cases of this.
In cases where reports were present in code that is duplicated between
Linux and FreeBSD, I made a conscious effort to fix the FreeBSD version
too.
After this commit is merged, regressions like dee8934 should become
extremely obvious with Clang's static analyzer since a regression would
appear in the results as the only instance of unused code. That assumes
that Coverity does not catch the issue first.
My local branch with fixes from all of my outstanding non-draft pull
requests shows 118 reports from Clang's static anlayzer after this
patch. That is down by 51 from 169.
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Cedric Berger <cedric@precidata.com>
Signed-off-by: Richard Yao <richard.yao@alumni.stonybrook.edu>
Closes #13986
2022-10-14 20:37:54 +00:00
|
|
|
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_INHERIT_PROP, &zc) != 0) {
|
2022-09-23 23:55:26 +00:00
|
|
|
changelist_free(cl);
|
2008-11-20 20:01:55 +00:00
|
|
|
return (zfs_standard_error(hdl, errno, errbuf));
|
|
|
|
} else {
|
|
|
|
|
|
|
|
if ((ret = changelist_postfix(cl)) != 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Refresh the statistics so the new property is reflected.
|
|
|
|
*/
|
|
|
|
(void) get_stats(zhp);
|
2014-03-21 12:27:18 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Remount the filesystem to propagate the change
|
|
|
|
* if one of the options handled by the generic
|
|
|
|
* Linux namespace layer has been modified.
|
|
|
|
*/
|
|
|
|
if (zfs_is_namespace_prop(prop) &&
|
|
|
|
zfs_is_mounted(zhp, NULL))
|
|
|
|
ret = zfs_mount(zhp, MNTOPT_REMOUNT, 0);
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
error:
|
|
|
|
changelist_free(cl);
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* True DSL properties are stored in an nvlist. The following two functions
|
|
|
|
* extract them appropriately.
|
|
|
|
*/
|
2011-05-19 18:44:07 +00:00
|
|
|
uint64_t
|
2023-03-11 18:39:24 +00:00
|
|
|
getprop_uint64(zfs_handle_t *zhp, zfs_prop_t prop, const char **source)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
|
|
|
nvlist_t *nv;
|
|
|
|
uint64_t value;
|
|
|
|
|
|
|
|
*source = NULL;
|
|
|
|
if (nvlist_lookup_nvlist(zhp->zfs_props,
|
|
|
|
zfs_prop_to_name(prop), &nv) == 0) {
|
2022-03-14 22:44:56 +00:00
|
|
|
value = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
|
2008-11-20 20:01:55 +00:00
|
|
|
(void) nvlist_lookup_string(nv, ZPROP_SOURCE, source);
|
|
|
|
} else {
|
2009-07-02 22:44:48 +00:00
|
|
|
verify(!zhp->zfs_props_table ||
|
|
|
|
zhp->zfs_props_table[prop] == B_TRUE);
|
2008-11-20 20:01:55 +00:00
|
|
|
value = zfs_prop_default_numeric(prop);
|
2023-03-11 18:39:24 +00:00
|
|
|
*source = "";
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return (value);
|
|
|
|
}
|
|
|
|
|
2016-01-06 21:22:48 +00:00
|
|
|
static const char *
|
2023-03-11 18:39:24 +00:00
|
|
|
getprop_string(zfs_handle_t *zhp, zfs_prop_t prop, const char **source)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
|
|
|
nvlist_t *nv;
|
2016-01-06 21:22:48 +00:00
|
|
|
const char *value;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
*source = NULL;
|
|
|
|
if (nvlist_lookup_nvlist(zhp->zfs_props,
|
|
|
|
zfs_prop_to_name(prop), &nv) == 0) {
|
2016-01-06 21:22:48 +00:00
|
|
|
value = fnvlist_lookup_string(nv, ZPROP_VALUE);
|
2008-11-20 20:01:55 +00:00
|
|
|
(void) nvlist_lookup_string(nv, ZPROP_SOURCE, source);
|
|
|
|
} else {
|
2009-07-02 22:44:48 +00:00
|
|
|
verify(!zhp->zfs_props_table ||
|
|
|
|
zhp->zfs_props_table[prop] == B_TRUE);
|
2016-01-06 21:22:48 +00:00
|
|
|
value = zfs_prop_default_string(prop);
|
2023-03-11 18:39:24 +00:00
|
|
|
*source = "";
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return (value);
|
|
|
|
}
|
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
static boolean_t
|
|
|
|
zfs_is_recvd_props_mode(zfs_handle_t *zhp)
|
|
|
|
{
|
2022-12-14 01:27:54 +00:00
|
|
|
return (zhp->zfs_props != NULL &&
|
|
|
|
zhp->zfs_props == zhp->zfs_recvd_props);
|
2010-05-28 20:45:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2022-10-27 22:25:42 +00:00
|
|
|
zfs_set_recvd_props_mode(zfs_handle_t *zhp, uintptr_t *cookie)
|
2010-05-28 20:45:14 +00:00
|
|
|
{
|
2022-10-27 22:25:42 +00:00
|
|
|
*cookie = (uintptr_t)zhp->zfs_props;
|
2010-05-28 20:45:14 +00:00
|
|
|
zhp->zfs_props = zhp->zfs_recvd_props;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2022-10-27 22:25:42 +00:00
|
|
|
zfs_unset_recvd_props_mode(zfs_handle_t *zhp, uintptr_t *cookie)
|
2010-05-28 20:45:14 +00:00
|
|
|
{
|
2022-10-27 22:25:42 +00:00
|
|
|
zhp->zfs_props = (nvlist_t *)*cookie;
|
2010-05-28 20:45:14 +00:00
|
|
|
*cookie = 0;
|
|
|
|
}
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
/*
|
|
|
|
* Internal function for getting a numeric property. Both zfs_prop_get() and
|
|
|
|
* zfs_prop_get_int() are built using this interface.
|
|
|
|
*
|
|
|
|
* Certain properties can be overridden using 'mount -o'. In this case, scan
|
2016-09-20 17:07:58 +00:00
|
|
|
* the contents of the /proc/self/mounts entry, searching for the
|
|
|
|
* appropriate options. If they differ from the on-disk values, report the
|
|
|
|
* current values and mark the source "temporary".
|
2008-11-20 20:01:55 +00:00
|
|
|
*/
|
|
|
|
static int
|
|
|
|
get_numeric_property(zfs_handle_t *zhp, zfs_prop_t prop, zprop_source_t *src,
|
2023-03-11 18:39:24 +00:00
|
|
|
const char **source, uint64_t *val)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
2013-09-04 12:00:57 +00:00
|
|
|
zfs_cmd_t zc = {"\0"};
|
2008-11-20 20:01:55 +00:00
|
|
|
nvlist_t *zplprops = NULL;
|
|
|
|
struct mnttab mnt;
|
2022-04-19 18:38:30 +00:00
|
|
|
const char *mntopt_on = NULL;
|
|
|
|
const char *mntopt_off = NULL;
|
2010-05-28 20:45:14 +00:00
|
|
|
boolean_t received = zfs_is_recvd_props_mode(zhp);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
*source = NULL;
|
|
|
|
|
2014-04-21 18:22:08 +00:00
|
|
|
/*
|
|
|
|
* If the property is being fetched for a snapshot, check whether
|
|
|
|
* the property is valid for the snapshot's head dataset type.
|
|
|
|
*/
|
|
|
|
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT &&
|
2016-12-12 18:46:26 +00:00
|
|
|
!zfs_prop_valid_for_type(prop, zhp->zfs_head_type, B_TRUE)) {
|
|
|
|
*val = zfs_prop_default_numeric(prop);
|
|
|
|
return (-1);
|
2014-06-13 12:45:08 +00:00
|
|
|
}
|
2014-04-21 18:22:08 +00:00
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
switch (prop) {
|
|
|
|
case ZFS_PROP_ATIME:
|
|
|
|
mntopt_on = MNTOPT_ATIME;
|
|
|
|
mntopt_off = MNTOPT_NOATIME;
|
|
|
|
break;
|
|
|
|
|
2014-01-18 19:00:53 +00:00
|
|
|
case ZFS_PROP_RELATIME:
|
|
|
|
mntopt_on = MNTOPT_RELATIME;
|
|
|
|
mntopt_off = MNTOPT_NORELATIME;
|
|
|
|
break;
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
case ZFS_PROP_DEVICES:
|
|
|
|
mntopt_on = MNTOPT_DEVICES;
|
|
|
|
mntopt_off = MNTOPT_NODEVICES;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ZFS_PROP_EXEC:
|
|
|
|
mntopt_on = MNTOPT_EXEC;
|
|
|
|
mntopt_off = MNTOPT_NOEXEC;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ZFS_PROP_READONLY:
|
|
|
|
mntopt_on = MNTOPT_RO;
|
|
|
|
mntopt_off = MNTOPT_RW;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ZFS_PROP_SETUID:
|
|
|
|
mntopt_on = MNTOPT_SETUID;
|
|
|
|
mntopt_off = MNTOPT_NOSETUID;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ZFS_PROP_XATTR:
|
|
|
|
mntopt_on = MNTOPT_XATTR;
|
|
|
|
mntopt_off = MNTOPT_NOXATTR;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ZFS_PROP_NBMAND:
|
|
|
|
mntopt_on = MNTOPT_NBMAND;
|
|
|
|
mntopt_off = MNTOPT_NONBMAND;
|
|
|
|
break;
|
2017-02-07 22:02:27 +00:00
|
|
|
|
2010-08-26 16:52:41 +00:00
|
|
|
default:
|
|
|
|
break;
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Because looking up the mount options is potentially expensive
|
2016-09-20 17:07:58 +00:00
|
|
|
* (iterating over all of /proc/self/mounts), we defer its
|
|
|
|
* calculation until we're looking up a property which requires
|
|
|
|
* its presence.
|
2008-11-20 20:01:55 +00:00
|
|
|
*/
|
|
|
|
if (!zhp->zfs_mntcheck &&
|
undocumented libzfs API changes broke "zfs list"
While OpenZFS does permit breaking changes to the libzfs API, we should
avoid these changes when reasonably possible, and take steps to mitigate
the impact to consumers when changes are necessary.
Commit e4288a8397bb1f made a libzfs API change that is especially
difficult for consumers because there is no change to the function
signatures, only to their behavior. Therefore, consumers can't notice
that there was a change at compile time. Also, the API change was
incompletely and incorrectly documented.
The commit message mentions `zfs_get_prop()` [sic], but all callers of
`get_numeric_property()` are impacted: `zfs_prop_get()`,
`zfs_prop_get_numeric()`, and `zfs_prop_get_int()`.
`zfs_prop_get_int()` always calls `get_numeric_property(src=NULL)`, so
it assumes that the filesystem is not mounted. This means that e.g.
`zfs_prop_get_int(ZFS_PROP_MOUNTED)` always returns 0.
The documentation says that to preserve the previous behavior, callers
should initialize `*src=ZPROP_SRC_NONE`, and some callers were changed
to do that. However, the existing behavior is actually preserved by
initializing `*src=ZPROP_SRC_ALL`, not `NONE`.
The code comment above `zfs_prop_get()` says, "src: ... NULL will be
treated as ZPROP_SRC_ALL.". However, the code actually treats NULL as
ZPROP_SRC_NONE. i.e. `zfs_prop_get(src=NULL)` assumes that the
filesystem is not mounted.
There are several existing calls which use `src=NULL` which are impacted
by the API change, most noticeably those used by `zfs list`, which now
assumes that filesystems are not mounted. For example,
`zfs list -o name,mounted` previously indicated whether a filesystem was
mounted or not, but now it always (incorrectly) indicates that the
filesystem is not mounted (`MOUNTED: no`). Similarly, properties that
are set at mount time are ignored. E.g. `zfs list -o name,atime` may
display an incorrect value if it was set at mount time.
To address these problems, this commit reverts commit e4288a8397bb1f:
"zfs get: don't lookup mount options when using "-s local""
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #11999
2021-05-06 18:24:56 +00:00
|
|
|
(mntopt_on != NULL || prop == ZFS_PROP_MOUNTED)) {
|
2009-01-15 21:59:39 +00:00
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
|
|
|
struct mnttab entry;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2022-03-16 18:51:28 +00:00
|
|
|
if (libzfs_mnttab_find(hdl, zhp->zfs_name, &entry) == 0)
|
2009-01-15 21:59:39 +00:00
|
|
|
zhp->zfs_mntopts = zfs_strdup(hdl,
|
2008-11-20 20:01:55 +00:00
|
|
|
entry.mnt_mntopts);
|
|
|
|
|
|
|
|
zhp->zfs_mntcheck = B_TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (zhp->zfs_mntopts == NULL)
|
2022-04-19 18:38:30 +00:00
|
|
|
mnt.mnt_mntopts = (char *)"";
|
2008-11-20 20:01:55 +00:00
|
|
|
else
|
|
|
|
mnt.mnt_mntopts = zhp->zfs_mntopts;
|
|
|
|
|
|
|
|
switch (prop) {
|
2015-08-31 23:46:01 +00:00
|
|
|
case ZFS_PROP_ATIME:
|
|
|
|
case ZFS_PROP_RELATIME:
|
2008-11-20 20:01:55 +00:00
|
|
|
case ZFS_PROP_DEVICES:
|
|
|
|
case ZFS_PROP_EXEC:
|
|
|
|
case ZFS_PROP_READONLY:
|
|
|
|
case ZFS_PROP_SETUID:
|
2020-01-21 23:06:10 +00:00
|
|
|
#ifndef __FreeBSD__
|
2008-11-20 20:01:55 +00:00
|
|
|
case ZFS_PROP_XATTR:
|
2020-01-21 23:06:10 +00:00
|
|
|
#endif
|
2008-11-20 20:01:55 +00:00
|
|
|
case ZFS_PROP_NBMAND:
|
|
|
|
*val = getprop_uint64(zhp, prop, source);
|
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
if (received)
|
|
|
|
break;
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
if (hasmntopt(&mnt, mntopt_on) && !*val) {
|
|
|
|
*val = B_TRUE;
|
|
|
|
if (src)
|
|
|
|
*src = ZPROP_SRC_TEMPORARY;
|
|
|
|
} else if (hasmntopt(&mnt, mntopt_off) && *val) {
|
|
|
|
*val = B_FALSE;
|
|
|
|
if (src)
|
|
|
|
*src = ZPROP_SRC_TEMPORARY;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ZFS_PROP_CANMOUNT:
|
2010-05-28 20:45:14 +00:00
|
|
|
case ZFS_PROP_VOLSIZE:
|
2008-11-20 20:01:55 +00:00
|
|
|
case ZFS_PROP_QUOTA:
|
|
|
|
case ZFS_PROP_REFQUOTA:
|
|
|
|
case ZFS_PROP_RESERVATION:
|
|
|
|
case ZFS_PROP_REFRESERVATION:
|
2015-04-01 13:07:48 +00:00
|
|
|
case ZFS_PROP_FILESYSTEM_LIMIT:
|
|
|
|
case ZFS_PROP_SNAPSHOT_LIMIT:
|
|
|
|
case ZFS_PROP_FILESYSTEM_COUNT:
|
|
|
|
case ZFS_PROP_SNAPSHOT_COUNT:
|
2008-11-20 20:01:55 +00:00
|
|
|
*val = getprop_uint64(zhp, prop, source);
|
2010-05-28 20:45:14 +00:00
|
|
|
|
|
|
|
if (*source == NULL) {
|
|
|
|
/* not default, must be local */
|
2008-11-20 20:01:55 +00:00
|
|
|
*source = zhp->zfs_name;
|
2010-05-28 20:45:14 +00:00
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case ZFS_PROP_MOUNTED:
|
|
|
|
*val = (zhp->zfs_mntopts != NULL);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ZFS_PROP_NUMCLONES:
|
|
|
|
*val = zhp->zfs_dmustats.dds_num_clones;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ZFS_PROP_VERSION:
|
|
|
|
case ZFS_PROP_NORMALIZE:
|
|
|
|
case ZFS_PROP_UTF8ONLY:
|
|
|
|
case ZFS_PROP_CASE:
|
2022-03-16 18:51:28 +00:00
|
|
|
zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0);
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
|
|
|
|
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_OBJSET_ZPLPROPS, &zc)) {
|
|
|
|
zcmd_free_nvlists(&zc);
|
2015-04-19 03:57:36 +00:00
|
|
|
if (prop == ZFS_PROP_VERSION &&
|
|
|
|
zhp->zfs_type == ZFS_TYPE_VOLUME)
|
|
|
|
*val = zfs_prop_default_numeric(prop);
|
2009-08-18 18:43:27 +00:00
|
|
|
return (-1);
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
if (zcmd_read_dst_nvlist(zhp->zfs_hdl, &zc, &zplprops) != 0 ||
|
|
|
|
nvlist_lookup_uint64(zplprops, zfs_prop_to_name(prop),
|
|
|
|
val) != 0) {
|
|
|
|
zcmd_free_nvlists(&zc);
|
2009-08-18 18:43:27 +00:00
|
|
|
return (-1);
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
2016-04-01 03:54:07 +00:00
|
|
|
nvlist_free(zplprops);
|
2008-11-20 20:01:55 +00:00
|
|
|
zcmd_free_nvlists(&zc);
|
|
|
|
break;
|
|
|
|
|
2013-07-27 17:51:50 +00:00
|
|
|
case ZFS_PROP_INCONSISTENT:
|
|
|
|
*val = zhp->zfs_dmustats.dds_inconsistent;
|
|
|
|
break;
|
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 16:48:13 +00:00
|
|
|
case ZFS_PROP_REDACTED:
|
|
|
|
*val = zhp->zfs_dmustats.dds_redacted;
|
|
|
|
break;
|
|
|
|
|
2022-12-14 01:27:54 +00:00
|
|
|
case ZFS_PROP_GUID:
|
|
|
|
if (zhp->zfs_dmustats.dds_guid != 0)
|
|
|
|
*val = zhp->zfs_dmustats.dds_guid;
|
|
|
|
else
|
|
|
|
*val = getprop_uint64(zhp, prop, source);
|
|
|
|
break;
|
|
|
|
|
2022-07-25 21:04:46 +00:00
|
|
|
case ZFS_PROP_CREATETXG:
|
|
|
|
/*
|
|
|
|
* We can directly read createtxg property from zfs
|
|
|
|
* handle for Filesystem, Snapshot and ZVOL types.
|
|
|
|
*/
|
2022-12-14 01:27:54 +00:00
|
|
|
if (((zhp->zfs_type == ZFS_TYPE_FILESYSTEM) ||
|
2022-07-25 21:04:46 +00:00
|
|
|
(zhp->zfs_type == ZFS_TYPE_SNAPSHOT) ||
|
2022-12-14 01:27:54 +00:00
|
|
|
(zhp->zfs_type == ZFS_TYPE_VOLUME)) &&
|
|
|
|
(zhp->zfs_dmustats.dds_creation_txg != 0)) {
|
2022-07-25 21:04:46 +00:00
|
|
|
*val = zhp->zfs_dmustats.dds_creation_txg;
|
|
|
|
break;
|
2022-12-14 01:27:54 +00:00
|
|
|
} else {
|
|
|
|
*val = getprop_uint64(zhp, prop, source);
|
2022-07-25 21:04:46 +00:00
|
|
|
}
|
|
|
|
zfs_fallthrough;
|
2008-11-20 20:01:55 +00:00
|
|
|
default:
|
|
|
|
switch (zfs_prop_get_type(prop)) {
|
|
|
|
case PROP_TYPE_NUMBER:
|
|
|
|
case PROP_TYPE_INDEX:
|
|
|
|
*val = getprop_uint64(zhp, prop, source);
|
2008-12-03 20:09:06 +00:00
|
|
|
/*
|
2009-07-02 22:44:48 +00:00
|
|
|
* If we tried to use a default value for a
|
2008-12-03 20:09:06 +00:00
|
|
|
* readonly property, it means that it was not
|
2017-01-23 19:22:11 +00:00
|
|
|
* present. Note this only applies to "truly"
|
|
|
|
* readonly properties, not set-once properties
|
|
|
|
* like volblocksize.
|
2008-12-03 20:09:06 +00:00
|
|
|
*/
|
|
|
|
if (zfs_prop_readonly(prop) &&
|
2017-01-23 19:22:11 +00:00
|
|
|
!zfs_prop_setonce(prop) &&
|
2010-05-28 20:45:14 +00:00
|
|
|
*source != NULL && (*source)[0] == '\0') {
|
|
|
|
*source = NULL;
|
2017-01-24 17:15:35 +00:00
|
|
|
return (-1);
|
2008-12-03 20:09:06 +00:00
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PROP_TYPE_STRING:
|
|
|
|
default:
|
|
|
|
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot get non-numeric property"));
|
|
|
|
return (zfs_error(zhp->zfs_hdl, EZFS_BADPROP,
|
|
|
|
dgettext(TEXT_DOMAIN, "internal error")));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate the source type, given the raw source string.
|
|
|
|
*/
|
|
|
|
static void
|
2023-03-11 18:39:24 +00:00
|
|
|
get_source(zfs_handle_t *zhp, zprop_source_t *srctype, const char *source,
|
2008-11-20 20:01:55 +00:00
|
|
|
char *statbuf, size_t statlen)
|
|
|
|
{
|
2017-11-18 22:08:00 +00:00
|
|
|
if (statbuf == NULL ||
|
|
|
|
srctype == NULL || *srctype == ZPROP_SRC_TEMPORARY) {
|
2008-11-20 20:01:55 +00:00
|
|
|
return;
|
2017-11-18 22:08:00 +00:00
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
if (source == NULL) {
|
|
|
|
*srctype = ZPROP_SRC_NONE;
|
|
|
|
} else if (source[0] == '\0') {
|
|
|
|
*srctype = ZPROP_SRC_DEFAULT;
|
2010-05-28 20:45:14 +00:00
|
|
|
} else if (strstr(source, ZPROP_SOURCE_VAL_RECVD) != NULL) {
|
|
|
|
*srctype = ZPROP_SRC_RECEIVED;
|
2008-11-20 20:01:55 +00:00
|
|
|
} else {
|
|
|
|
if (strcmp(source, zhp->zfs_name) == 0) {
|
|
|
|
*srctype = ZPROP_SRC_LOCAL;
|
|
|
|
} else {
|
|
|
|
(void) strlcpy(statbuf, source, statlen);
|
|
|
|
*srctype = ZPROP_SRC_INHERITED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
int
|
|
|
|
zfs_prop_get_recvd(zfs_handle_t *zhp, const char *propname, char *propbuf,
|
|
|
|
size_t proplen, boolean_t literal)
|
|
|
|
{
|
|
|
|
zfs_prop_t prop;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (zhp->zfs_recvd_props == NULL)
|
|
|
|
if (get_recvd_props_ioctl(zhp) != 0)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
prop = zfs_name_to_prop(propname);
|
|
|
|
|
2022-06-14 18:27:53 +00:00
|
|
|
if (prop != ZPROP_USERPROP) {
|
2022-10-27 22:25:42 +00:00
|
|
|
uintptr_t cookie;
|
2010-05-28 20:45:14 +00:00
|
|
|
if (!nvlist_exists(zhp->zfs_recvd_props, propname))
|
|
|
|
return (-1);
|
|
|
|
zfs_set_recvd_props_mode(zhp, &cookie);
|
|
|
|
err = zfs_prop_get(zhp, prop, propbuf, proplen,
|
|
|
|
NULL, NULL, 0, literal);
|
|
|
|
zfs_unset_recvd_props_mode(zhp, &cookie);
|
|
|
|
} else {
|
|
|
|
nvlist_t *propval;
|
2023-03-11 18:39:24 +00:00
|
|
|
const char *recvdval;
|
2010-05-28 20:45:14 +00:00
|
|
|
if (nvlist_lookup_nvlist(zhp->zfs_recvd_props,
|
|
|
|
propname, &propval) != 0)
|
|
|
|
return (-1);
|
2022-03-14 22:44:56 +00:00
|
|
|
recvdval = fnvlist_lookup_string(propval, ZPROP_VALUE);
|
2010-05-28 20:45:14 +00:00
|
|
|
(void) strlcpy(propbuf, recvdval, proplen);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (err == 0 ? 0 : -1);
|
|
|
|
}
|
|
|
|
|
2011-11-17 18:14:36 +00:00
|
|
|
static int
|
|
|
|
get_clones_string(zfs_handle_t *zhp, char *propbuf, size_t proplen)
|
|
|
|
{
|
|
|
|
nvlist_t *value;
|
|
|
|
nvpair_t *pair;
|
|
|
|
|
|
|
|
value = zfs_get_clones_nvl(zhp);
|
2021-04-06 23:05:54 +00:00
|
|
|
if (value == NULL || nvlist_empty(value))
|
2011-11-17 18:14:36 +00:00
|
|
|
return (-1);
|
|
|
|
|
|
|
|
propbuf[0] = '\0';
|
|
|
|
for (pair = nvlist_next_nvpair(value, NULL); pair != NULL;
|
|
|
|
pair = nvlist_next_nvpair(value, pair)) {
|
|
|
|
if (propbuf[0] != '\0')
|
|
|
|
(void) strlcat(propbuf, ",", proplen);
|
|
|
|
(void) strlcat(propbuf, nvpair_name(pair), proplen);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct get_clones_arg {
|
|
|
|
uint64_t numclones;
|
|
|
|
nvlist_t *value;
|
|
|
|
const char *origin;
|
2016-06-15 21:28:36 +00:00
|
|
|
char buf[ZFS_MAX_DATASET_NAME_LEN];
|
2011-11-17 18:14:36 +00:00
|
|
|
};
|
|
|
|
|
2020-06-15 18:30:37 +00:00
|
|
|
static int
|
2011-11-17 18:14:36 +00:00
|
|
|
get_clones_cb(zfs_handle_t *zhp, void *arg)
|
|
|
|
{
|
|
|
|
struct get_clones_arg *gca = arg;
|
|
|
|
|
|
|
|
if (gca->numclones == 0) {
|
|
|
|
zfs_close(zhp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (zfs_prop_get(zhp, ZFS_PROP_ORIGIN, gca->buf, sizeof (gca->buf),
|
|
|
|
NULL, NULL, 0, B_TRUE) != 0)
|
|
|
|
goto out;
|
|
|
|
if (strcmp(gca->buf, gca->origin) == 0) {
|
2013-09-04 12:00:57 +00:00
|
|
|
fnvlist_add_boolean(gca->value, zfs_get_name(zhp));
|
2011-11-17 18:14:36 +00:00
|
|
|
gca->numclones--;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2023-04-10 18:53:02 +00:00
|
|
|
(void) zfs_iter_children_v2(zhp, 0, get_clones_cb, gca);
|
2011-11-17 18:14:36 +00:00
|
|
|
zfs_close(zhp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
nvlist_t *
|
|
|
|
zfs_get_clones_nvl(zfs_handle_t *zhp)
|
|
|
|
{
|
|
|
|
nvlist_t *nv, *value;
|
|
|
|
|
|
|
|
if (nvlist_lookup_nvlist(zhp->zfs_props,
|
|
|
|
zfs_prop_to_name(ZFS_PROP_CLONES), &nv) != 0) {
|
|
|
|
struct get_clones_arg gca;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if this is a snapshot, then the kernel wasn't able
|
|
|
|
* to get the clones. Do it by slowly iterating.
|
|
|
|
*/
|
|
|
|
if (zhp->zfs_type != ZFS_TYPE_SNAPSHOT)
|
|
|
|
return (NULL);
|
|
|
|
if (nvlist_alloc(&nv, NV_UNIQUE_NAME, 0) != 0)
|
|
|
|
return (NULL);
|
|
|
|
if (nvlist_alloc(&value, NV_UNIQUE_NAME, 0) != 0) {
|
|
|
|
nvlist_free(nv);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
gca.numclones = zfs_prop_get_int(zhp, ZFS_PROP_NUMCLONES);
|
|
|
|
gca.value = value;
|
|
|
|
gca.origin = zhp->zfs_name;
|
|
|
|
|
|
|
|
if (gca.numclones != 0) {
|
|
|
|
zfs_handle_t *root;
|
2016-06-15 21:28:36 +00:00
|
|
|
char pool[ZFS_MAX_DATASET_NAME_LEN];
|
2011-11-17 18:14:36 +00:00
|
|
|
char *cp = pool;
|
|
|
|
|
|
|
|
/* get the pool name */
|
|
|
|
(void) strlcpy(pool, zhp->zfs_name, sizeof (pool));
|
|
|
|
(void) strsep(&cp, "/@");
|
|
|
|
root = zfs_open(zhp->zfs_hdl, pool,
|
|
|
|
ZFS_TYPE_FILESYSTEM);
|
2016-09-30 22:56:17 +00:00
|
|
|
if (root == NULL) {
|
|
|
|
nvlist_free(nv);
|
|
|
|
nvlist_free(value);
|
|
|
|
return (NULL);
|
|
|
|
}
|
2011-11-17 18:14:36 +00:00
|
|
|
|
|
|
|
(void) get_clones_cb(root, &gca);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gca.numclones != 0 ||
|
|
|
|
nvlist_add_nvlist(nv, ZPROP_VALUE, value) != 0 ||
|
|
|
|
nvlist_add_nvlist(zhp->zfs_props,
|
|
|
|
zfs_prop_to_name(ZFS_PROP_CLONES), nv) != 0) {
|
|
|
|
nvlist_free(nv);
|
|
|
|
nvlist_free(value);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
nvlist_free(nv);
|
|
|
|
nvlist_free(value);
|
2022-03-14 22:44:56 +00:00
|
|
|
nv = fnvlist_lookup_nvlist(zhp->zfs_props,
|
|
|
|
zfs_prop_to_name(ZFS_PROP_CLONES));
|
2011-11-17 18:14:36 +00:00
|
|
|
}
|
|
|
|
|
2022-03-14 22:44:56 +00:00
|
|
|
return (fnvlist_lookup_nvlist(nv, ZPROP_VALUE));
|
2011-11-17 18:14:36 +00:00
|
|
|
}
|
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 16:48:13 +00:00
|
|
|
static int
|
|
|
|
get_rsnaps_string(zfs_handle_t *zhp, char *propbuf, size_t proplen)
|
|
|
|
{
|
|
|
|
nvlist_t *value;
|
|
|
|
uint64_t *snaps;
|
|
|
|
uint_t nsnaps;
|
|
|
|
|
|
|
|
if (nvlist_lookup_nvlist(zhp->zfs_props,
|
|
|
|
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS), &value) != 0)
|
|
|
|
return (-1);
|
|
|
|
if (nvlist_lookup_uint64_array(value, ZPROP_VALUE, &snaps,
|
|
|
|
&nsnaps) != 0)
|
|
|
|
return (-1);
|
|
|
|
if (nsnaps == 0) {
|
|
|
|
/* There's no redaction snapshots; pass a special value back */
|
|
|
|
(void) snprintf(propbuf, proplen, "none");
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
propbuf[0] = '\0';
|
|
|
|
for (int i = 0; i < nsnaps; i++) {
|
|
|
|
char buf[128];
|
|
|
|
if (propbuf[0] != '\0')
|
|
|
|
(void) strlcat(propbuf, ",", proplen);
|
|
|
|
(void) snprintf(buf, sizeof (buf), "%llu",
|
|
|
|
(u_longlong_t)snaps[i]);
|
|
|
|
(void) strlcat(propbuf, buf, proplen);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2018-02-08 16:16:23 +00:00
|
|
|
/*
|
|
|
|
* Accepts a property and value and checks that the value
|
|
|
|
* matches the one found by the channel program. If they are
|
|
|
|
* not equal, print both of them.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
zcp_check(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t intval,
|
|
|
|
const char *strval)
|
|
|
|
{
|
|
|
|
if (!zhp->zfs_hdl->libzfs_prop_debug)
|
|
|
|
return;
|
|
|
|
int error;
|
|
|
|
char *poolname = zhp->zpool_hdl->zpool_name;
|
|
|
|
const char *prop_name = zfs_prop_to_name(prop);
|
|
|
|
const char *program =
|
|
|
|
"args = ...\n"
|
|
|
|
"ds = args['dataset']\n"
|
|
|
|
"prop = args['property']\n"
|
|
|
|
"value, setpoint = zfs.get_prop(ds, prop)\n"
|
|
|
|
"return {value=value, setpoint=setpoint}\n";
|
|
|
|
nvlist_t *outnvl;
|
|
|
|
nvlist_t *retnvl;
|
|
|
|
nvlist_t *argnvl = fnvlist_alloc();
|
|
|
|
|
|
|
|
fnvlist_add_string(argnvl, "dataset", zhp->zfs_name);
|
|
|
|
fnvlist_add_string(argnvl, "property", zfs_prop_to_name(prop));
|
|
|
|
|
2018-02-08 16:35:09 +00:00
|
|
|
error = lzc_channel_program_nosync(poolname, program,
|
2018-02-08 16:16:23 +00:00
|
|
|
10 * 1000 * 1000, 10 * 1024 * 1024, argnvl, &outnvl);
|
|
|
|
|
|
|
|
if (error == 0) {
|
|
|
|
retnvl = fnvlist_lookup_nvlist(outnvl, "return");
|
|
|
|
if (zfs_prop_get_type(prop) == PROP_TYPE_NUMBER) {
|
|
|
|
int64_t ans;
|
|
|
|
error = nvlist_lookup_int64(retnvl, "value", &ans);
|
|
|
|
if (error != 0) {
|
|
|
|
(void) fprintf(stderr, "%s: zcp check error: "
|
|
|
|
"%u\n", prop_name, error);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (ans != intval) {
|
|
|
|
(void) fprintf(stderr, "%s: zfs found %llu, "
|
|
|
|
"but zcp found %llu\n", prop_name,
|
|
|
|
(u_longlong_t)intval, (u_longlong_t)ans);
|
|
|
|
}
|
|
|
|
} else {
|
2023-03-11 18:39:24 +00:00
|
|
|
const char *str_ans;
|
2018-02-08 16:16:23 +00:00
|
|
|
error = nvlist_lookup_string(retnvl, "value", &str_ans);
|
|
|
|
if (error != 0) {
|
|
|
|
(void) fprintf(stderr, "%s: zcp check error: "
|
|
|
|
"%u\n", prop_name, error);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (strcmp(strval, str_ans) != 0) {
|
|
|
|
(void) fprintf(stderr,
|
|
|
|
"%s: zfs found '%s', but zcp found '%s'\n",
|
|
|
|
prop_name, strval, str_ans);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
(void) fprintf(stderr, "%s: zcp check failed, channel program "
|
|
|
|
"error: %u\n", prop_name, error);
|
|
|
|
}
|
|
|
|
nvlist_free(argnvl);
|
|
|
|
nvlist_free(outnvl);
|
|
|
|
}
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
/*
|
undocumented libzfs API changes broke "zfs list"
While OpenZFS does permit breaking changes to the libzfs API, we should
avoid these changes when reasonably possible, and take steps to mitigate
the impact to consumers when changes are necessary.
Commit e4288a8397bb1f made a libzfs API change that is especially
difficult for consumers because there is no change to the function
signatures, only to their behavior. Therefore, consumers can't notice
that there was a change at compile time. Also, the API change was
incompletely and incorrectly documented.
The commit message mentions `zfs_get_prop()` [sic], but all callers of
`get_numeric_property()` are impacted: `zfs_prop_get()`,
`zfs_prop_get_numeric()`, and `zfs_prop_get_int()`.
`zfs_prop_get_int()` always calls `get_numeric_property(src=NULL)`, so
it assumes that the filesystem is not mounted. This means that e.g.
`zfs_prop_get_int(ZFS_PROP_MOUNTED)` always returns 0.
The documentation says that to preserve the previous behavior, callers
should initialize `*src=ZPROP_SRC_NONE`, and some callers were changed
to do that. However, the existing behavior is actually preserved by
initializing `*src=ZPROP_SRC_ALL`, not `NONE`.
The code comment above `zfs_prop_get()` says, "src: ... NULL will be
treated as ZPROP_SRC_ALL.". However, the code actually treats NULL as
ZPROP_SRC_NONE. i.e. `zfs_prop_get(src=NULL)` assumes that the
filesystem is not mounted.
There are several existing calls which use `src=NULL` which are impacted
by the API change, most noticeably those used by `zfs list`, which now
assumes that filesystems are not mounted. For example,
`zfs list -o name,mounted` previously indicated whether a filesystem was
mounted or not, but now it always (incorrectly) indicates that the
filesystem is not mounted (`MOUNTED: no`). Similarly, properties that
are set at mount time are ignored. E.g. `zfs list -o name,atime` may
display an incorrect value if it was set at mount time.
To address these problems, this commit reverts commit e4288a8397bb1f:
"zfs get: don't lookup mount options when using "-s local""
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #11999
2021-05-06 18:24:56 +00:00
|
|
|
* Retrieve a property from the given object. If 'literal' is specified, then
|
|
|
|
* numbers are left as exact values. Otherwise, numbers are converted to a
|
|
|
|
* human-readable form.
|
2008-11-20 20:01:55 +00:00
|
|
|
*
|
|
|
|
* Returns 0 on success, or -1 on error.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
zfs_prop_get(zfs_handle_t *zhp, zfs_prop_t prop, char *propbuf, size_t proplen,
|
|
|
|
zprop_source_t *src, char *statbuf, size_t statlen, boolean_t literal)
|
|
|
|
{
|
2023-03-11 18:39:24 +00:00
|
|
|
const char *source = NULL;
|
2008-11-20 20:01:55 +00:00
|
|
|
uint64_t val;
|
2016-01-06 21:22:48 +00:00
|
|
|
const char *str;
|
2008-11-20 20:01:55 +00:00
|
|
|
const char *strval;
|
2010-05-28 20:45:14 +00:00
|
|
|
boolean_t received = zfs_is_recvd_props_mode(zhp);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check to see if this property applies to our object
|
|
|
|
*/
|
2014-04-21 18:22:08 +00:00
|
|
|
if (!zfs_prop_valid_for_type(prop, zhp->zfs_type, B_FALSE))
|
2008-11-20 20:01:55 +00:00
|
|
|
return (-1);
|
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
if (received && zfs_prop_readonly(prop))
|
|
|
|
return (-1);
|
|
|
|
|
undocumented libzfs API changes broke "zfs list"
While OpenZFS does permit breaking changes to the libzfs API, we should
avoid these changes when reasonably possible, and take steps to mitigate
the impact to consumers when changes are necessary.
Commit e4288a8397bb1f made a libzfs API change that is especially
difficult for consumers because there is no change to the function
signatures, only to their behavior. Therefore, consumers can't notice
that there was a change at compile time. Also, the API change was
incompletely and incorrectly documented.
The commit message mentions `zfs_get_prop()` [sic], but all callers of
`get_numeric_property()` are impacted: `zfs_prop_get()`,
`zfs_prop_get_numeric()`, and `zfs_prop_get_int()`.
`zfs_prop_get_int()` always calls `get_numeric_property(src=NULL)`, so
it assumes that the filesystem is not mounted. This means that e.g.
`zfs_prop_get_int(ZFS_PROP_MOUNTED)` always returns 0.
The documentation says that to preserve the previous behavior, callers
should initialize `*src=ZPROP_SRC_NONE`, and some callers were changed
to do that. However, the existing behavior is actually preserved by
initializing `*src=ZPROP_SRC_ALL`, not `NONE`.
The code comment above `zfs_prop_get()` says, "src: ... NULL will be
treated as ZPROP_SRC_ALL.". However, the code actually treats NULL as
ZPROP_SRC_NONE. i.e. `zfs_prop_get(src=NULL)` assumes that the
filesystem is not mounted.
There are several existing calls which use `src=NULL` which are impacted
by the API change, most noticeably those used by `zfs list`, which now
assumes that filesystems are not mounted. For example,
`zfs list -o name,mounted` previously indicated whether a filesystem was
mounted or not, but now it always (incorrectly) indicates that the
filesystem is not mounted (`MOUNTED: no`). Similarly, properties that
are set at mount time are ignored. E.g. `zfs list -o name,atime` may
display an incorrect value if it was set at mount time.
To address these problems, this commit reverts commit e4288a8397bb1f:
"zfs get: don't lookup mount options when using "-s local""
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #11999
2021-05-06 18:24:56 +00:00
|
|
|
if (src)
|
|
|
|
*src = ZPROP_SRC_NONE;
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
switch (prop) {
|
|
|
|
case ZFS_PROP_CREATION:
|
|
|
|
/*
|
|
|
|
* 'creation' is a time_t stored in the statistics. We convert
|
|
|
|
* this into a string unless 'literal' is specified.
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
val = getprop_uint64(zhp, prop, &source);
|
|
|
|
time_t time = (time_t)val;
|
|
|
|
struct tm t;
|
|
|
|
|
|
|
|
if (literal ||
|
|
|
|
localtime_r(&time, &t) == NULL ||
|
|
|
|
strftime(propbuf, proplen, "%a %b %e %k:%M %Y",
|
|
|
|
&t) == 0)
|
2013-12-06 22:20:22 +00:00
|
|
|
(void) snprintf(propbuf, proplen, "%llu",
|
2016-12-12 18:46:26 +00:00
|
|
|
(u_longlong_t)val);
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
2018-02-08 16:16:23 +00:00
|
|
|
zcp_check(zhp, prop, val, NULL);
|
2008-11-20 20:01:55 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case ZFS_PROP_MOUNTPOINT:
|
|
|
|
/*
|
|
|
|
* Getting the precise mountpoint can be tricky.
|
|
|
|
*
|
|
|
|
* - for 'none' or 'legacy', return those values.
|
|
|
|
* - for inherited mountpoints, we want to take everything
|
|
|
|
* after our ancestor and append it to the inherited value.
|
|
|
|
*
|
|
|
|
* If the pool has an alternate root, we want to prepend that
|
|
|
|
* root to any values we return.
|
|
|
|
*/
|
2008-12-03 20:09:06 +00:00
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
str = getprop_string(zhp, prop, &source);
|
|
|
|
|
2008-12-03 20:09:06 +00:00
|
|
|
if (str[0] == '/') {
|
|
|
|
char buf[MAXPATHLEN];
|
|
|
|
char *root = buf;
|
2010-05-28 20:45:14 +00:00
|
|
|
const char *relpath;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
/*
|
|
|
|
* If we inherit the mountpoint, even from a dataset
|
|
|
|
* with a received value, the source will be the path of
|
|
|
|
* the dataset we inherit from. If source is
|
|
|
|
* ZPROP_SOURCE_VAL_RECVD, the received value is not
|
|
|
|
* inherited.
|
|
|
|
*/
|
|
|
|
if (strcmp(source, ZPROP_SOURCE_VAL_RECVD) == 0) {
|
|
|
|
relpath = "";
|
|
|
|
} else {
|
|
|
|
relpath = zhp->zfs_name + strlen(source);
|
|
|
|
if (relpath[0] == '/')
|
|
|
|
relpath++;
|
|
|
|
}
|
2008-12-03 20:09:06 +00:00
|
|
|
|
|
|
|
if ((zpool_get_prop(zhp->zpool_hdl,
|
2016-05-09 21:03:18 +00:00
|
|
|
ZPOOL_PROP_ALTROOT, buf, MAXPATHLEN, NULL,
|
|
|
|
B_FALSE)) || (strcmp(root, "-") == 0))
|
2008-12-03 20:09:06 +00:00
|
|
|
root[0] = '\0';
|
|
|
|
/*
|
|
|
|
* Special case an alternate root of '/'. This will
|
|
|
|
* avoid having multiple leading slashes in the
|
|
|
|
* mountpoint path.
|
|
|
|
*/
|
|
|
|
if (strcmp(root, "/") == 0)
|
|
|
|
root++;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the mountpoint is '/' then skip over this
|
|
|
|
* if we are obtaining either an alternate root or
|
|
|
|
* an inherited mountpoint.
|
|
|
|
*/
|
|
|
|
if (str[1] == '\0' && (root[0] != '\0' ||
|
|
|
|
relpath[0] != '\0'))
|
2008-11-20 20:01:55 +00:00
|
|
|
str++;
|
|
|
|
|
|
|
|
if (relpath[0] == '\0')
|
|
|
|
(void) snprintf(propbuf, proplen, "%s%s",
|
|
|
|
root, str);
|
|
|
|
else
|
|
|
|
(void) snprintf(propbuf, proplen, "%s%s%s%s",
|
|
|
|
root, str, relpath[0] == '@' ? "" : "/",
|
|
|
|
relpath);
|
|
|
|
} else {
|
|
|
|
/* 'legacy' or 'none' */
|
|
|
|
(void) strlcpy(propbuf, str, proplen);
|
|
|
|
}
|
2018-02-08 16:16:23 +00:00
|
|
|
zcp_check(zhp, prop, 0, propbuf);
|
2008-11-20 20:01:55 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case ZFS_PROP_ORIGIN:
|
2022-12-14 01:27:54 +00:00
|
|
|
if (*zhp->zfs_dmustats.dds_origin != '\0') {
|
|
|
|
str = (char *)&zhp->zfs_dmustats.dds_origin;
|
|
|
|
} else {
|
|
|
|
str = getprop_string(zhp, prop, &source);
|
|
|
|
}
|
|
|
|
if (str == NULL || *str == '\0')
|
|
|
|
str = zfs_prop_default_string(prop);
|
2016-01-06 21:22:48 +00:00
|
|
|
if (str == NULL)
|
2008-11-20 20:01:55 +00:00
|
|
|
return (-1);
|
2016-01-06 21:22:48 +00:00
|
|
|
(void) strlcpy(propbuf, str, proplen);
|
2018-02-08 16:16:23 +00:00
|
|
|
zcp_check(zhp, prop, 0, str);
|
2008-11-20 20:01:55 +00:00
|
|
|
break;
|
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 16:48:13 +00:00
|
|
|
case ZFS_PROP_REDACT_SNAPS:
|
|
|
|
if (get_rsnaps_string(zhp, propbuf, proplen) != 0)
|
|
|
|
return (-1);
|
|
|
|
break;
|
|
|
|
|
2011-11-17 18:14:36 +00:00
|
|
|
case ZFS_PROP_CLONES:
|
|
|
|
if (get_clones_string(zhp, propbuf, proplen) != 0)
|
|
|
|
return (-1);
|
|
|
|
break;
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
case ZFS_PROP_QUOTA:
|
|
|
|
case ZFS_PROP_REFQUOTA:
|
|
|
|
case ZFS_PROP_RESERVATION:
|
|
|
|
case ZFS_PROP_REFRESERVATION:
|
|
|
|
|
|
|
|
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
|
|
|
|
return (-1);
|
|
|
|
/*
|
|
|
|
* If quota or reservation is 0, we translate this into 'none'
|
|
|
|
* (unless literal is set), and indicate that it's the default
|
|
|
|
* value. Otherwise, we print the number nicely and indicate
|
|
|
|
* that its set locally.
|
|
|
|
*/
|
|
|
|
if (val == 0) {
|
|
|
|
if (literal)
|
|
|
|
(void) strlcpy(propbuf, "0", proplen);
|
|
|
|
else
|
|
|
|
(void) strlcpy(propbuf, "none", proplen);
|
|
|
|
} else {
|
|
|
|
if (literal)
|
|
|
|
(void) snprintf(propbuf, proplen, "%llu",
|
|
|
|
(u_longlong_t)val);
|
|
|
|
else
|
2017-05-02 20:43:53 +00:00
|
|
|
zfs_nicebytes(val, propbuf, proplen);
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
2018-02-08 16:16:23 +00:00
|
|
|
zcp_check(zhp, prop, val, NULL);
|
2008-11-20 20:01:55 +00:00
|
|
|
break;
|
|
|
|
|
2015-04-01 13:07:48 +00:00
|
|
|
case ZFS_PROP_FILESYSTEM_LIMIT:
|
|
|
|
case ZFS_PROP_SNAPSHOT_LIMIT:
|
|
|
|
case ZFS_PROP_FILESYSTEM_COUNT:
|
|
|
|
case ZFS_PROP_SNAPSHOT_COUNT:
|
|
|
|
|
|
|
|
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
/*
|
2021-10-29 23:18:13 +00:00
|
|
|
* If limit is UINT64_MAX, we translate this into 'none', and
|
|
|
|
* indicate that it's the default value. Otherwise, we print
|
|
|
|
* the number nicely and indicate that it's set locally.
|
2015-04-01 13:07:48 +00:00
|
|
|
*/
|
2021-10-29 23:18:13 +00:00
|
|
|
if (val == UINT64_MAX) {
|
|
|
|
(void) strlcpy(propbuf, "none", proplen);
|
|
|
|
} else if (literal) {
|
2015-04-01 13:07:48 +00:00
|
|
|
(void) snprintf(propbuf, proplen, "%llu",
|
|
|
|
(u_longlong_t)val);
|
|
|
|
} else {
|
|
|
|
zfs_nicenum(val, propbuf, proplen);
|
|
|
|
}
|
2018-02-08 16:16:23 +00:00
|
|
|
|
|
|
|
zcp_check(zhp, prop, val, NULL);
|
2015-04-01 13:07:48 +00:00
|
|
|
break;
|
|
|
|
|
2011-07-26 19:23:00 +00:00
|
|
|
case ZFS_PROP_REFRATIO:
|
2008-11-20 20:01:55 +00:00
|
|
|
case ZFS_PROP_COMPRESSRATIO:
|
|
|
|
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
|
|
|
|
return (-1);
|
2017-08-03 16:43:17 +00:00
|
|
|
if (literal)
|
|
|
|
(void) snprintf(propbuf, proplen, "%llu.%02llu",
|
|
|
|
(u_longlong_t)(val / 100),
|
|
|
|
(u_longlong_t)(val % 100));
|
|
|
|
else
|
|
|
|
(void) snprintf(propbuf, proplen, "%llu.%02llux",
|
|
|
|
(u_longlong_t)(val / 100),
|
|
|
|
(u_longlong_t)(val % 100));
|
2018-02-08 16:16:23 +00:00
|
|
|
zcp_check(zhp, prop, val, NULL);
|
2008-11-20 20:01:55 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case ZFS_PROP_TYPE:
|
|
|
|
switch (zhp->zfs_type) {
|
|
|
|
case ZFS_TYPE_FILESYSTEM:
|
|
|
|
str = "filesystem";
|
|
|
|
break;
|
|
|
|
case ZFS_TYPE_VOLUME:
|
|
|
|
str = "volume";
|
|
|
|
break;
|
|
|
|
case ZFS_TYPE_SNAPSHOT:
|
|
|
|
str = "snapshot";
|
|
|
|
break;
|
2013-12-11 22:33:41 +00:00
|
|
|
case ZFS_TYPE_BOOKMARK:
|
|
|
|
str = "bookmark";
|
|
|
|
break;
|
2008-11-20 20:01:55 +00:00
|
|
|
default:
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
(void) snprintf(propbuf, proplen, "%s", str);
|
2018-02-08 16:16:23 +00:00
|
|
|
zcp_check(zhp, prop, 0, propbuf);
|
2008-11-20 20:01:55 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case ZFS_PROP_MOUNTED:
|
|
|
|
/*
|
|
|
|
* The 'mounted' property is a pseudo-property that described
|
|
|
|
* whether the filesystem is currently mounted. Even though
|
|
|
|
* it's a boolean value, the typical values of "on" and "off"
|
|
|
|
* don't make sense, so we translate to "yes" and "no".
|
|
|
|
*/
|
|
|
|
if (get_numeric_property(zhp, ZFS_PROP_MOUNTED,
|
|
|
|
src, &source, &val) != 0)
|
|
|
|
return (-1);
|
|
|
|
if (val)
|
|
|
|
(void) strlcpy(propbuf, "yes", proplen);
|
|
|
|
else
|
|
|
|
(void) strlcpy(propbuf, "no", proplen);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ZFS_PROP_NAME:
|
|
|
|
/*
|
|
|
|
* The 'name' property is a pseudo-property derived from the
|
|
|
|
* dataset name. It is presented as a real property to simplify
|
|
|
|
* consumers.
|
|
|
|
*/
|
|
|
|
(void) strlcpy(propbuf, zhp->zfs_name, proplen);
|
2018-02-08 16:16:23 +00:00
|
|
|
zcp_check(zhp, prop, 0, propbuf);
|
2008-11-20 20:01:55 +00:00
|
|
|
break;
|
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
case ZFS_PROP_MLSLABEL:
|
|
|
|
{
|
2010-08-26 18:43:42 +00:00
|
|
|
#ifdef HAVE_MLSLABEL
|
2010-05-28 20:45:14 +00:00
|
|
|
m_label_t *new_sl = NULL;
|
|
|
|
char *ascii = NULL; /* human readable label */
|
|
|
|
|
|
|
|
(void) strlcpy(propbuf,
|
|
|
|
getprop_string(zhp, prop, &source), proplen);
|
|
|
|
|
|
|
|
if (literal || (strcasecmp(propbuf,
|
|
|
|
ZFS_MLSLABEL_DEFAULT) == 0))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to translate the internal hex string to
|
|
|
|
* human-readable output. If there are any
|
|
|
|
* problems just use the hex string.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (str_to_label(propbuf, &new_sl, MAC_LABEL,
|
|
|
|
L_NO_CORRECTION, NULL) == -1) {
|
|
|
|
m_label_free(new_sl);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (label_to_str(new_sl, &ascii, M_LABEL,
|
|
|
|
DEF_NAMES) != 0) {
|
|
|
|
if (ascii)
|
|
|
|
free(ascii);
|
|
|
|
m_label_free(new_sl);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
m_label_free(new_sl);
|
|
|
|
|
|
|
|
(void) strlcpy(propbuf, ascii, proplen);
|
|
|
|
free(ascii);
|
2010-08-26 18:43:42 +00:00
|
|
|
#else
|
|
|
|
(void) strlcpy(propbuf,
|
|
|
|
getprop_string(zhp, prop, &source), proplen);
|
|
|
|
#endif /* HAVE_MLSLABEL */
|
2010-05-28 20:45:14 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2012-06-06 17:01:43 +00:00
|
|
|
case ZFS_PROP_GUID:
|
Flex non-pretty-printed properties and raw-/pretty-print remaining ones
Before:
nabijaczleweli@tarta:~/store/code/zfs$ /sbin/zpool list -Td -o name,size,alloc,free,ckpoint,expandsz,guid,load_guid,frag,cap,dedup,health,altroot,guid,dedupditto,load_guid,maxblocksize,maxdnodesize 2>/dev/null
Sun 20 Feb 03:57:44 CET 2022
NAME SIZE ALLOC FREE CKPOINT EXPANDSZ GUID LOAD_GUID FRAG CAP DEDUP HEALTH ALTROOT GUID DEDUPDITTO LOAD_GUID MAXBLOCKSIZE MAXDNODESIZE
filling 25.5T 6.52T 18.9T - 64M 11512889483096932869 11656109927366648364 1% 25% 1.00x ONLINE - 11512889483096932869 0 11656109927366648364 1048576 16384
tarta-boot 240M 50.6M 189M - - 2372068846917849656 7752280792179633787 12% 21% 1.00x ONLINE - 2372068846917849656 0 7752280792179633787 1048576 512
tarta-zoot 55.5G 6.42G 49.1G - - 12971868889665384604 8622632123393589527 17% 11% 1.00x ONLINE - 12971868889665384604 0 8622632123393589527 1048576 16384
nabijaczleweli@tarta:~/store/code/zfs$ /sbin/zfs list -o name,guid,keyguid,ivsetguid,createtxg,objsetid,pbkdf2iters,refratio -r tarta-zoot
NAME GUID KEYGUID IVSETGUID CREATETXG OBJSETID PBKDF2ITERS REFRATIO
tarta-zoot 1110930838977259561 659P - 1 54 0 1.03x
tarta-zoot/PAGEFILE.SYS 2202570496672997800 3.20E - 2163 1539 0 1.07x
tarta-zoot/dupa 16941280502417785695 9.81E - 2274707 1322 1000000000000 1.00x
tarta-zoot/etc 17029963068508333530 12.9E - 3663 1087 0 1.52x
tarta-zoot/home 3508163802370032575 8.50E - 3664 294 0 1.00x
tarta-zoot/home/misio 7283672744014848555 13.0E - 3665 302 0 2.28x
tarta-zoot/home/nabijaczleweli 12286744508078616303 5.15E - 3666 200 0 2.05x
tarta-zoot/home/nabijaczleweli/tftp 13551632689932817643 5.16E - 3667 1095 0 1.00x
tarta-zoot/home/root 5203106193060067946 15.4E - 3668 698 0 2.86x
tarta-zoot/home/shared-config 8866040021005142194 14.5E - 3670 2069 0 1.20x
tarta-zoot/home/tymek 9472751824283011822 4.56E - 3671 1202 0 1.32x
tarta-zoot/oldboot 10460192444135730377 13.8E - 2268398 1232 0 1.01x
tarta-zoot/opt 9945621324983170410 5.84E - 3672 1210 0 1.00x
tarta-zoot/opt/icecc 13178238931846132425 9.04E - 3673 1103 0 2.83x
tarta-zoot/opt/swtpm 10172962421514870859 4.13E - 825669 145132 0 1.87x
tarta-zoot/srv 217179989022738337 3.90E - 3674 2469 0 1.00x
tarta-zoot/usr 12214213243060765090 15.0E - 3675 2477 0 2.58x
tarta-zoot/usr/local 7542700368693813134 941P - 3676 2484 0 2.33x
tarta-zoot/var 13414177124447929530 10.2E - 3677 2492 0 1.57x
tarta-zoot/var/lib 6969944550407159241 5.28E - 3678 2499 0 2.34x
tarta-zoot/var/tmp 6399468088048343912 1.34E - 3679 1218 0 3.95x
After:
nabijaczleweli@tarta:~/store/code/zfs$ cmd/zpool/zpool list -Td -o name,size,alloc,free,ckpoint,expandsz,guid,load_guid,frag,cap,dedup,health,altroot,guid,dedupditto,load_guid,maxblocksize,maxdnodesize 2>/dev/null
Sun 20 Feb 03:57:42 CET 2022
NAME SIZE ALLOC FREE CKPOINT EXPANDSZ GUID LOAD_GUID FRAG CAP DEDUP HEALTH ALTROOT GUID DEDUPDITTO LOAD_GUID MAXBLOCKSIZE MAXDNODESIZE
filling 25.5T 6.52T 18.9T - 64M 11512889483096932869 11656109927366648364 1% 25% 1.00x ONLINE - 11512889483096932869 0 11656109927366648364 1M 16K
tarta-boot 240M 50.6M 189M - - 2372068846917849656 7752280792179633787 12% 21% 1.00x ONLINE - 2372068846917849656 0 7752280792179633787 1M 512
tarta-zoot 55.5G 6.42G 49.1G - - 12971868889665384604 8622632123393589527 17% 11% 1.00x ONLINE - 12971868889665384604 0 8622632123393589527 1M 16K
nabijaczleweli@tarta:~/store/code/zfs$ cmd/zfs/zfs list -o name,guid,keyguid,ivsetguid,createtxg,objsetid,pbkdf2iters,refratio -r tarta-zoot
NAME GUID KEYGUID IVSETGUID CREATETXG OBJSETID PBKDF2ITERS REFRATIO
tarta-zoot 1110930838977259561 741529699813639505 - 1 54 0 1.03x
tarta-zoot/PAGEFILE.SYS 2202570496672997800 3689529982640017884 - 2163 1539 0 1.07x
tarta-zoot/dupa 16941280502417785695 11312442953423259518 - 2274707 1322 1000000000000 1.00x
tarta-zoot/etc 17029963068508333530 14852574366795347233 - 3663 1087 0 1.52x
tarta-zoot/home 3508163802370032575 9802810070759776956 - 3664 294 0 1.00x
tarta-zoot/home/misio 7283672744014848555 14983161489316798151 - 3665 302 0 2.28x
tarta-zoot/home/nabijaczleweli 12286744508078616303 5937870537299886218 - 3666 200 0 2.05x
tarta-zoot/home/nabijaczleweli/tftp 13551632689932817643 5950522828900813054 - 3667 1095 0 1.00x
tarta-zoot/home/root 5203106193060067946 17718025091255443518 - 3668 698 0 2.86x
tarta-zoot/home/shared-config 8866040021005142194 16716354482778968577 - 3670 2069 0 1.20x
tarta-zoot/home/tymek 9472751824283011822 5251854710505749954 - 3671 1202 0 1.32x
tarta-zoot/oldboot 10460192444135730377 15894065034622168157 - 2268398 1232 0 1.01x
tarta-zoot/opt 9945621324983170410 6737735639539098405 - 3672 1210 0 1.00x
tarta-zoot/opt/icecc 13178238931846132425 10425145983015238428 - 3673 1103 0 2.83x
tarta-zoot/opt/swtpm 10172962421514870859 4764783754852521469 - 825669 145132 0 1.87x
tarta-zoot/srv 217179989022738337 4492810461439647259 - 3674 2469 0 1.00x
tarta-zoot/usr 12214213243060765090 17306702395865262834 - 3675 2477 0 2.58x
tarta-zoot/usr/local 7542700368693813134 1059954157997659784 - 3676 2484 0 2.33x
tarta-zoot/var 13414177124447929530 11764397504176937123 - 3677 2492 0 1.57x
tarta-zoot/var/lib 6969944550407159241 6084753728494937404 - 3678 2499 0 2.34x
tarta-zoot/var/tmp 6399468088048343912 1548692824635344277 - 3679 1218 0 3.95x
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Ahelenia Ziemiańska <nabijaczleweli@nabijaczleweli.xyz>
Closes #13122
Closes #13125
2022-02-20 02:07:25 +00:00
|
|
|
case ZFS_PROP_KEY_GUID:
|
|
|
|
case ZFS_PROP_IVSET_GUID:
|
2017-05-09 22:36:53 +00:00
|
|
|
case ZFS_PROP_CREATETXG:
|
2019-05-24 20:58:12 +00:00
|
|
|
case ZFS_PROP_OBJSETID:
|
2020-09-08 15:49:55 +00:00
|
|
|
case ZFS_PROP_PBKDF2_ITERS:
|
2012-06-06 17:01:43 +00:00
|
|
|
/*
|
2019-05-24 20:58:12 +00:00
|
|
|
* These properties are stored as numbers, but they are
|
2020-09-08 15:49:55 +00:00
|
|
|
* identifiers or counters.
|
2012-06-06 17:01:43 +00:00
|
|
|
* We don't want them to be pretty printed, because pretty
|
2020-09-08 15:49:55 +00:00
|
|
|
* printing truncates their values making them useless.
|
2012-06-06 17:01:43 +00:00
|
|
|
*/
|
|
|
|
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
|
|
|
|
return (-1);
|
|
|
|
(void) snprintf(propbuf, proplen, "%llu", (u_longlong_t)val);
|
2018-02-08 16:16:23 +00:00
|
|
|
zcp_check(zhp, prop, val, NULL);
|
2012-06-06 17:01:43 +00:00
|
|
|
break;
|
|
|
|
|
2017-05-02 20:43:53 +00:00
|
|
|
case ZFS_PROP_REFERENCED:
|
|
|
|
case ZFS_PROP_AVAILABLE:
|
|
|
|
case ZFS_PROP_USED:
|
|
|
|
case ZFS_PROP_USEDSNAP:
|
|
|
|
case ZFS_PROP_USEDDS:
|
|
|
|
case ZFS_PROP_USEDREFRESERV:
|
|
|
|
case ZFS_PROP_USEDCHILD:
|
|
|
|
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
|
|
|
|
return (-1);
|
2018-02-08 16:16:23 +00:00
|
|
|
if (literal) {
|
2017-05-02 20:43:53 +00:00
|
|
|
(void) snprintf(propbuf, proplen, "%llu",
|
|
|
|
(u_longlong_t)val);
|
2018-02-08 16:16:23 +00:00
|
|
|
} else {
|
2017-05-02 20:43:53 +00:00
|
|
|
zfs_nicebytes(val, propbuf, proplen);
|
2018-02-08 16:16:23 +00:00
|
|
|
}
|
|
|
|
zcp_check(zhp, prop, val, NULL);
|
2017-05-02 20:43:53 +00:00
|
|
|
break;
|
|
|
|
|
2022-08-02 23:45:30 +00:00
|
|
|
case ZFS_PROP_SNAPSHOTS_CHANGED:
|
|
|
|
{
|
|
|
|
if ((get_numeric_property(zhp, prop, src, &source,
|
|
|
|
&val) != 0) || val == 0) {
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
time_t time = (time_t)val;
|
|
|
|
struct tm t;
|
|
|
|
|
|
|
|
if (literal ||
|
|
|
|
localtime_r(&time, &t) == NULL ||
|
2022-08-24 21:20:43 +00:00
|
|
|
strftime(propbuf, proplen, "%a %b %e %k:%M:%S %Y",
|
2022-08-02 23:45:30 +00:00
|
|
|
&t) == 0)
|
|
|
|
(void) snprintf(propbuf, proplen, "%llu",
|
|
|
|
(u_longlong_t)val);
|
|
|
|
}
|
|
|
|
zcp_check(zhp, prop, val, NULL);
|
|
|
|
break;
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
default:
|
|
|
|
switch (zfs_prop_get_type(prop)) {
|
|
|
|
case PROP_TYPE_NUMBER:
|
|
|
|
if (get_numeric_property(zhp, prop, src,
|
2018-02-08 16:16:23 +00:00
|
|
|
&source, &val) != 0) {
|
2008-11-20 20:01:55 +00:00
|
|
|
return (-1);
|
2018-02-08 16:16:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (literal) {
|
2008-11-20 20:01:55 +00:00
|
|
|
(void) snprintf(propbuf, proplen, "%llu",
|
|
|
|
(u_longlong_t)val);
|
2018-02-08 16:16:23 +00:00
|
|
|
} else {
|
2008-11-20 20:01:55 +00:00
|
|
|
zfs_nicenum(val, propbuf, proplen);
|
2018-02-08 16:16:23 +00:00
|
|
|
}
|
|
|
|
zcp_check(zhp, prop, val, NULL);
|
2008-11-20 20:01:55 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PROP_TYPE_STRING:
|
2016-01-06 21:22:48 +00:00
|
|
|
str = getprop_string(zhp, prop, &source);
|
|
|
|
if (str == NULL)
|
|
|
|
return (-1);
|
2018-02-08 16:16:23 +00:00
|
|
|
|
2016-01-06 21:22:48 +00:00
|
|
|
(void) strlcpy(propbuf, str, proplen);
|
2018-02-08 16:16:23 +00:00
|
|
|
zcp_check(zhp, prop, 0, str);
|
2008-11-20 20:01:55 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PROP_TYPE_INDEX:
|
|
|
|
if (get_numeric_property(zhp, prop, src,
|
|
|
|
&source, &val) != 0)
|
|
|
|
return (-1);
|
|
|
|
if (zfs_prop_index_to_string(prop, val, &strval) != 0)
|
|
|
|
return (-1);
|
2018-02-08 16:16:23 +00:00
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
(void) strlcpy(propbuf, strval, proplen);
|
2018-02-08 16:16:23 +00:00
|
|
|
zcp_check(zhp, prop, 0, strval);
|
2008-11-20 20:01:55 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
get_source(zhp, src, source, statbuf, statlen);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Utility function to get the given numeric property. Does no validation that
|
|
|
|
* the given property is the appropriate type; should only be used with
|
|
|
|
* hard-coded property types.
|
|
|
|
*/
|
|
|
|
uint64_t
|
|
|
|
zfs_prop_get_int(zfs_handle_t *zhp, zfs_prop_t prop)
|
|
|
|
{
|
2023-03-11 18:39:24 +00:00
|
|
|
const char *source;
|
2016-07-26 19:08:51 +00:00
|
|
|
uint64_t val = 0;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
(void) get_numeric_property(zhp, prop, NULL, &source, &val);
|
|
|
|
|
|
|
|
return (val);
|
|
|
|
}
|
|
|
|
|
2020-06-15 18:30:37 +00:00
|
|
|
static int
|
2008-11-20 20:01:55 +00:00
|
|
|
zfs_prop_set_int(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t val)
|
|
|
|
{
|
|
|
|
char buf[64];
|
|
|
|
|
2009-07-02 22:44:48 +00:00
|
|
|
(void) snprintf(buf, sizeof (buf), "%llu", (longlong_t)val);
|
2008-11-20 20:01:55 +00:00
|
|
|
return (zfs_prop_set(zhp, zfs_prop_to_name(prop), buf));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Similar to zfs_prop_get(), but returns the value as an integer.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
zfs_prop_get_numeric(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t *value,
|
|
|
|
zprop_source_t *src, char *statbuf, size_t statlen)
|
|
|
|
{
|
2023-03-11 18:39:24 +00:00
|
|
|
const char *source;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check to see if this property applies to our object
|
|
|
|
*/
|
2014-04-21 18:22:08 +00:00
|
|
|
if (!zfs_prop_valid_for_type(prop, zhp->zfs_type, B_FALSE)) {
|
2008-11-20 20:01:55 +00:00
|
|
|
return (zfs_error_fmt(zhp->zfs_hdl, EZFS_PROPTYPE,
|
|
|
|
dgettext(TEXT_DOMAIN, "cannot get property '%s'"),
|
|
|
|
zfs_prop_to_name(prop)));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (src)
|
|
|
|
*src = ZPROP_SRC_NONE;
|
|
|
|
|
|
|
|
if (get_numeric_property(zhp, prop, src, &source, value) != 0)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
get_source(zhp, src, source, statbuf, statlen);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2010-08-26 18:43:07 +00:00
|
|
|
#ifdef HAVE_IDMAP
|
2009-07-02 22:44:48 +00:00
|
|
|
static int
|
|
|
|
idmap_id_to_numeric_domain_rid(uid_t id, boolean_t isuser,
|
|
|
|
char **domainp, idmap_rid_t *ridp)
|
|
|
|
{
|
|
|
|
idmap_get_handle_t *get_hdl = NULL;
|
|
|
|
idmap_stat status;
|
|
|
|
int err = EINVAL;
|
|
|
|
|
2010-08-26 21:24:34 +00:00
|
|
|
if (idmap_get_create(&get_hdl) != IDMAP_SUCCESS)
|
2009-07-02 22:44:48 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (isuser) {
|
|
|
|
err = idmap_get_sidbyuid(get_hdl, id,
|
|
|
|
IDMAP_REQ_FLG_USE_CACHE, domainp, ridp, &status);
|
|
|
|
} else {
|
|
|
|
err = idmap_get_sidbygid(get_hdl, id,
|
|
|
|
IDMAP_REQ_FLG_USE_CACHE, domainp, ridp, &status);
|
|
|
|
}
|
|
|
|
if (err == IDMAP_SUCCESS &&
|
|
|
|
idmap_get_mappings(get_hdl) == IDMAP_SUCCESS &&
|
|
|
|
status == IDMAP_SUCCESS)
|
|
|
|
err = 0;
|
|
|
|
else
|
|
|
|
err = EINVAL;
|
|
|
|
out:
|
|
|
|
if (get_hdl)
|
|
|
|
idmap_get_destroy(get_hdl);
|
|
|
|
return (err);
|
|
|
|
}
|
2010-08-26 18:43:07 +00:00
|
|
|
#endif /* HAVE_IDMAP */
|
2009-07-02 22:44:48 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* convert the propname into parameters needed by kernel
|
|
|
|
* Eg: userquota@ahrens -> ZFS_PROP_USERQUOTA, "", 126829
|
|
|
|
* Eg: userused@matt@domain -> ZFS_PROP_USERUSED, "S-1-123-456", 789
|
2011-11-19 19:53:12 +00:00
|
|
|
* Eg: groupquota@staff -> ZFS_PROP_GROUPQUOTA, "", 1234
|
|
|
|
* Eg: groupused@staff -> ZFS_PROP_GROUPUSED, "", 1234
|
2018-02-13 22:54:54 +00:00
|
|
|
* Eg: projectquota@123 -> ZFS_PROP_PROJECTQUOTA, "", 123
|
|
|
|
* Eg: projectused@789 -> ZFS_PROP_PROJECTUSED, "", 789
|
2009-07-02 22:44:48 +00:00
|
|
|
*/
|
|
|
|
static int
|
|
|
|
userquota_propname_decode(const char *propname, boolean_t zoned,
|
|
|
|
zfs_userquota_prop_t *typep, char *domain, int domainlen, uint64_t *ridp)
|
|
|
|
{
|
|
|
|
zfs_userquota_prop_t type;
|
2011-11-19 19:53:12 +00:00
|
|
|
char *cp;
|
2009-07-02 22:44:48 +00:00
|
|
|
boolean_t isuser;
|
2011-11-19 19:53:12 +00:00
|
|
|
boolean_t isgroup;
|
2018-02-13 22:54:54 +00:00
|
|
|
boolean_t isproject;
|
2011-11-19 19:53:12 +00:00
|
|
|
struct passwd *pw;
|
|
|
|
struct group *gr;
|
2009-07-02 22:44:48 +00:00
|
|
|
|
|
|
|
domain[0] = '\0';
|
|
|
|
|
2018-02-13 22:54:54 +00:00
|
|
|
/* Figure out the property type ({user|group|project}{quota|space}) */
|
2009-07-02 22:44:48 +00:00
|
|
|
for (type = 0; type < ZFS_NUM_USERQUOTA_PROPS; type++) {
|
|
|
|
if (strncmp(propname, zfs_userquota_prop_prefixes[type],
|
|
|
|
strlen(zfs_userquota_prop_prefixes[type])) == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (type == ZFS_NUM_USERQUOTA_PROPS)
|
|
|
|
return (EINVAL);
|
|
|
|
*typep = type;
|
|
|
|
|
2016-10-04 18:46:10 +00:00
|
|
|
isuser = (type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_USERUSED ||
|
2016-12-12 18:46:26 +00:00
|
|
|
type == ZFS_PROP_USEROBJQUOTA ||
|
|
|
|
type == ZFS_PROP_USEROBJUSED);
|
2016-10-04 18:46:10 +00:00
|
|
|
isgroup = (type == ZFS_PROP_GROUPQUOTA || type == ZFS_PROP_GROUPUSED ||
|
2016-12-12 18:46:26 +00:00
|
|
|
type == ZFS_PROP_GROUPOBJQUOTA ||
|
|
|
|
type == ZFS_PROP_GROUPOBJUSED);
|
2018-02-13 22:54:54 +00:00
|
|
|
isproject = (type == ZFS_PROP_PROJECTQUOTA ||
|
|
|
|
type == ZFS_PROP_PROJECTUSED || type == ZFS_PROP_PROJECTOBJQUOTA ||
|
|
|
|
type == ZFS_PROP_PROJECTOBJUSED);
|
2009-07-02 22:44:48 +00:00
|
|
|
|
|
|
|
cp = strchr(propname, '@') + 1;
|
|
|
|
|
2011-11-19 19:53:12 +00:00
|
|
|
if (isuser && (pw = getpwnam(cp)) != NULL) {
|
|
|
|
if (zoned && getzoneid() == GLOBAL_ZONEID)
|
|
|
|
return (ENOENT);
|
|
|
|
*ridp = pw->pw_uid;
|
|
|
|
} else if (isgroup && (gr = getgrnam(cp)) != NULL) {
|
|
|
|
if (zoned && getzoneid() == GLOBAL_ZONEID)
|
|
|
|
return (ENOENT);
|
|
|
|
*ridp = gr->gr_gid;
|
2018-02-13 22:54:54 +00:00
|
|
|
} else if (!isproject && strchr(cp, '@')) {
|
2010-08-26 18:43:07 +00:00
|
|
|
#ifdef HAVE_IDMAP
|
2009-07-02 22:44:48 +00:00
|
|
|
/*
|
|
|
|
* It's a SID name (eg "user@domain") that needs to be
|
2009-08-18 18:43:27 +00:00
|
|
|
* turned into S-1-domainID-RID.
|
2009-07-02 22:44:48 +00:00
|
|
|
*/
|
2009-08-18 18:43:27 +00:00
|
|
|
directory_error_t e;
|
2011-11-19 19:53:12 +00:00
|
|
|
char *numericsid = NULL;
|
|
|
|
char *end;
|
|
|
|
|
2009-07-02 22:44:48 +00:00
|
|
|
if (zoned && getzoneid() == GLOBAL_ZONEID)
|
|
|
|
return (ENOENT);
|
2009-08-18 18:43:27 +00:00
|
|
|
if (isuser) {
|
|
|
|
e = directory_sid_from_user_name(NULL,
|
|
|
|
cp, &numericsid);
|
|
|
|
} else {
|
|
|
|
e = directory_sid_from_group_name(NULL,
|
|
|
|
cp, &numericsid);
|
|
|
|
}
|
|
|
|
if (e != NULL) {
|
|
|
|
directory_error_free(e);
|
2009-07-02 22:44:48 +00:00
|
|
|
return (ENOENT);
|
2009-08-18 18:43:27 +00:00
|
|
|
}
|
|
|
|
if (numericsid == NULL)
|
2009-07-02 22:44:48 +00:00
|
|
|
return (ENOENT);
|
2009-08-18 18:43:27 +00:00
|
|
|
cp = numericsid;
|
|
|
|
(void) strlcpy(domain, cp, domainlen);
|
2009-07-02 22:44:48 +00:00
|
|
|
cp = strrchr(domain, '-');
|
|
|
|
*cp = '\0';
|
|
|
|
cp++;
|
|
|
|
|
|
|
|
errno = 0;
|
|
|
|
*ridp = strtoull(cp, &end, 10);
|
2011-11-19 19:53:12 +00:00
|
|
|
free(numericsid);
|
|
|
|
|
2009-07-02 22:44:48 +00:00
|
|
|
if (errno != 0 || *end != '\0')
|
|
|
|
return (EINVAL);
|
2011-11-19 19:53:12 +00:00
|
|
|
#else
|
2021-12-12 14:38:17 +00:00
|
|
|
(void) domainlen;
|
2011-11-19 19:53:12 +00:00
|
|
|
return (ENOSYS);
|
|
|
|
#endif /* HAVE_IDMAP */
|
2009-07-02 22:44:48 +00:00
|
|
|
} else {
|
2018-02-13 22:54:54 +00:00
|
|
|
/* It's a user/group/project ID (eg "12345"). */
|
2011-11-19 19:53:12 +00:00
|
|
|
uid_t id;
|
|
|
|
char *end;
|
|
|
|
id = strtoul(cp, &end, 10);
|
2009-07-02 22:44:48 +00:00
|
|
|
if (*end != '\0')
|
|
|
|
return (EINVAL);
|
2018-02-13 22:54:54 +00:00
|
|
|
if (id > MAXUID && !isproject) {
|
2012-12-16 13:33:51 +00:00
|
|
|
#ifdef HAVE_IDMAP
|
2009-07-02 22:44:48 +00:00
|
|
|
/* It's an ephemeral ID. */
|
2012-12-16 13:33:51 +00:00
|
|
|
idmap_rid_t rid;
|
|
|
|
char *mapdomain;
|
|
|
|
|
2009-07-02 22:44:48 +00:00
|
|
|
if (idmap_id_to_numeric_domain_rid(id, isuser,
|
|
|
|
&mapdomain, &rid) != 0)
|
|
|
|
return (ENOENT);
|
2009-08-18 18:43:27 +00:00
|
|
|
(void) strlcpy(domain, mapdomain, domainlen);
|
2009-07-02 22:44:48 +00:00
|
|
|
*ridp = rid;
|
2012-12-16 13:33:51 +00:00
|
|
|
#else
|
|
|
|
return (ENOSYS);
|
|
|
|
#endif /* HAVE_IDMAP */
|
2009-07-02 22:44:48 +00:00
|
|
|
} else {
|
|
|
|
*ridp = id;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
zfs_prop_get_userquota_common(zfs_handle_t *zhp, const char *propname,
|
|
|
|
uint64_t *propvalue, zfs_userquota_prop_t *typep)
|
|
|
|
{
|
|
|
|
int err;
|
2013-09-04 12:00:57 +00:00
|
|
|
zfs_cmd_t zc = {"\0"};
|
2009-07-02 22:44:48 +00:00
|
|
|
|
2011-11-17 18:14:36 +00:00
|
|
|
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
|
2009-07-02 22:44:48 +00:00
|
|
|
|
|
|
|
err = userquota_propname_decode(propname,
|
|
|
|
zfs_prop_get_int(zhp, ZFS_PROP_ZONED),
|
|
|
|
typep, zc.zc_value, sizeof (zc.zc_value), &zc.zc_guid);
|
|
|
|
zc.zc_objset_type = *typep;
|
|
|
|
if (err)
|
|
|
|
return (err);
|
|
|
|
|
2019-10-24 00:29:43 +00:00
|
|
|
err = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_USERSPACE_ONE, &zc);
|
2009-07-02 22:44:48 +00:00
|
|
|
if (err)
|
|
|
|
return (err);
|
|
|
|
|
|
|
|
*propvalue = zc.zc_cookie;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_prop_get_userquota_int(zfs_handle_t *zhp, const char *propname,
|
|
|
|
uint64_t *propvalue)
|
|
|
|
{
|
|
|
|
zfs_userquota_prop_t type;
|
|
|
|
|
|
|
|
return (zfs_prop_get_userquota_common(zhp, propname, propvalue,
|
|
|
|
&type));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_prop_get_userquota(zfs_handle_t *zhp, const char *propname,
|
|
|
|
char *propbuf, int proplen, boolean_t literal)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
uint64_t propvalue;
|
|
|
|
zfs_userquota_prop_t type;
|
|
|
|
|
|
|
|
err = zfs_prop_get_userquota_common(zhp, propname, &propvalue,
|
|
|
|
&type);
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
return (err);
|
|
|
|
|
|
|
|
if (literal) {
|
2010-08-26 16:52:39 +00:00
|
|
|
(void) snprintf(propbuf, proplen, "%llu",
|
2013-12-06 22:20:22 +00:00
|
|
|
(u_longlong_t)propvalue);
|
2009-07-02 22:44:48 +00:00
|
|
|
} else if (propvalue == 0 &&
|
2016-10-04 18:46:10 +00:00
|
|
|
(type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_GROUPQUOTA ||
|
2018-02-13 22:54:54 +00:00
|
|
|
type == ZFS_PROP_USEROBJQUOTA || type == ZFS_PROP_GROUPOBJQUOTA ||
|
2018-03-05 20:56:27 +00:00
|
|
|
type == ZFS_PROP_PROJECTQUOTA ||
|
|
|
|
type == ZFS_PROP_PROJECTOBJQUOTA)) {
|
2009-07-02 22:44:48 +00:00
|
|
|
(void) strlcpy(propbuf, "none", proplen);
|
2017-05-02 20:43:53 +00:00
|
|
|
} else if (type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_GROUPQUOTA ||
|
2018-02-13 22:54:54 +00:00
|
|
|
type == ZFS_PROP_USERUSED || type == ZFS_PROP_GROUPUSED ||
|
|
|
|
type == ZFS_PROP_PROJECTUSED || type == ZFS_PROP_PROJECTQUOTA) {
|
2017-05-02 20:43:53 +00:00
|
|
|
zfs_nicebytes(propvalue, propbuf, proplen);
|
2009-07-02 22:44:48 +00:00
|
|
|
} else {
|
|
|
|
zfs_nicenum(propvalue, propbuf, proplen);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 16:48:13 +00:00
|
|
|
/*
|
|
|
|
* propname must start with "written@" or "written#".
|
|
|
|
*/
|
2011-11-17 18:14:36 +00:00
|
|
|
int
|
|
|
|
zfs_prop_get_written_int(zfs_handle_t *zhp, const char *propname,
|
|
|
|
uint64_t *propvalue)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
2011-11-17 18:14:36 +00:00
|
|
|
int err;
|
2013-09-04 12:00:57 +00:00
|
|
|
zfs_cmd_t zc = {"\0"};
|
2011-11-17 18:14:36 +00:00
|
|
|
const char *snapname;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2011-11-17 18:14:36 +00:00
|
|
|
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
|
2008-11-20 20:01:55 +00:00
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 16:48:13 +00:00
|
|
|
assert(zfs_prop_written(propname));
|
|
|
|
snapname = propname + strlen("written@");
|
|
|
|
if (strchr(snapname, '@') != NULL || strchr(snapname, '#') != NULL) {
|
|
|
|
/* full snapshot or bookmark name specified */
|
2011-11-17 18:14:36 +00:00
|
|
|
(void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value));
|
|
|
|
} else {
|
|
|
|
/* snapname is the short name, append it to zhp's fsname */
|
|
|
|
char *cp;
|
|
|
|
|
|
|
|
(void) strlcpy(zc.zc_value, zhp->zfs_name,
|
|
|
|
sizeof (zc.zc_value));
|
|
|
|
cp = strchr(zc.zc_value, '@');
|
|
|
|
if (cp != NULL)
|
|
|
|
*cp = '\0';
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 16:48:13 +00:00
|
|
|
(void) strlcat(zc.zc_value, snapname - 1, sizeof (zc.zc_value));
|
2011-11-17 18:14:36 +00:00
|
|
|
}
|
2009-01-15 21:59:39 +00:00
|
|
|
|
2019-10-24 00:29:43 +00:00
|
|
|
err = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SPACE_WRITTEN, &zc);
|
2011-11-17 18:14:36 +00:00
|
|
|
if (err)
|
|
|
|
return (err);
|
2009-01-15 21:59:39 +00:00
|
|
|
|
2011-11-17 18:14:36 +00:00
|
|
|
*propvalue = zc.zc_cookie;
|
|
|
|
return (0);
|
2009-01-15 21:59:39 +00:00
|
|
|
}
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
int
|
2011-11-17 18:14:36 +00:00
|
|
|
zfs_prop_get_written(zfs_handle_t *zhp, const char *propname,
|
|
|
|
char *propbuf, int proplen, boolean_t literal)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
2011-11-17 18:14:36 +00:00
|
|
|
int err;
|
|
|
|
uint64_t propvalue;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2011-11-17 18:14:36 +00:00
|
|
|
err = zfs_prop_get_written_int(zhp, propname, &propvalue);
|
2009-01-15 21:59:39 +00:00
|
|
|
|
2011-11-17 18:14:36 +00:00
|
|
|
if (err)
|
|
|
|
return (err);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2011-11-17 18:14:36 +00:00
|
|
|
if (literal) {
|
2013-12-06 22:20:22 +00:00
|
|
|
(void) snprintf(propbuf, proplen, "%llu",
|
|
|
|
(u_longlong_t)propvalue);
|
2011-11-17 18:14:36 +00:00
|
|
|
} else {
|
2017-05-02 20:43:53 +00:00
|
|
|
zfs_nicebytes(propvalue, propbuf, proplen);
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
2011-11-17 18:14:36 +00:00
|
|
|
|
|
|
|
return (0);
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-11-17 18:14:36 +00:00
|
|
|
* Returns the name of the given zfs handle.
|
2008-11-20 20:01:55 +00:00
|
|
|
*/
|
2011-11-17 18:14:36 +00:00
|
|
|
const char *
|
|
|
|
zfs_get_name(const zfs_handle_t *zhp)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
2011-11-17 18:14:36 +00:00
|
|
|
return (zhp->zfs_name);
|
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2017-02-03 21:24:44 +00:00
|
|
|
/*
|
|
|
|
* Returns the name of the parent pool for the given zfs handle.
|
|
|
|
*/
|
|
|
|
const char *
|
|
|
|
zfs_get_pool_name(const zfs_handle_t *zhp)
|
|
|
|
{
|
|
|
|
return (zhp->zpool_hdl->zpool_name);
|
|
|
|
}
|
|
|
|
|
2011-11-17 18:14:36 +00:00
|
|
|
/*
|
|
|
|
* Returns the type of the given zfs handle.
|
|
|
|
*/
|
|
|
|
zfs_type_t
|
|
|
|
zfs_get_type(const zfs_handle_t *zhp)
|
|
|
|
{
|
|
|
|
return (zhp->zfs_type);
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
2021-05-15 10:35:46 +00:00
|
|
|
/*
|
|
|
|
* Returns the type of the given zfs handle,
|
|
|
|
* or, if a snapshot, the type of the snapshotted dataset.
|
|
|
|
*/
|
|
|
|
zfs_type_t
|
|
|
|
zfs_get_underlying_type(const zfs_handle_t *zhp)
|
|
|
|
{
|
|
|
|
return (zhp->zfs_head_type);
|
|
|
|
}
|
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
/*
|
|
|
|
* Is one dataset name a child dataset of another?
|
|
|
|
*
|
|
|
|
* Needs to handle these cases:
|
|
|
|
* Dataset 1 "a/foo" "a/foo" "a/foo" "a/foo"
|
|
|
|
* Dataset 2 "a/fo" "a/foobar" "a/bar/baz" "a/foo/bar"
|
|
|
|
* Descendant? No. No. No. Yes.
|
|
|
|
*/
|
|
|
|
static boolean_t
|
|
|
|
is_descendant(const char *ds1, const char *ds2)
|
|
|
|
{
|
|
|
|
size_t d1len = strlen(ds1);
|
|
|
|
|
|
|
|
/* ds2 can't be a descendant if it's smaller */
|
|
|
|
if (strlen(ds2) < d1len)
|
|
|
|
return (B_FALSE);
|
|
|
|
|
|
|
|
/* otherwise, compare strings and verify that there's a '/' char */
|
|
|
|
return (ds2[d1len] == '/' && (strncmp(ds1, ds2, d1len) == 0));
|
|
|
|
}
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
/*
|
|
|
|
* Given a complete name, return just the portion that refers to the parent.
|
2011-11-17 18:14:36 +00:00
|
|
|
* Will return -1 if there is no parent (path is just the name of the
|
|
|
|
* pool).
|
2008-11-20 20:01:55 +00:00
|
|
|
*/
|
|
|
|
static int
|
|
|
|
parent_name(const char *path, char *buf, size_t buflen)
|
|
|
|
{
|
2011-11-17 18:14:36 +00:00
|
|
|
char *slashp;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2011-11-17 18:14:36 +00:00
|
|
|
(void) strlcpy(buf, path, buflen);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2011-11-17 18:14:36 +00:00
|
|
|
if ((slashp = strrchr(buf, '/')) == NULL)
|
|
|
|
return (-1);
|
|
|
|
*slashp = '\0';
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
int
|
|
|
|
zfs_parent_name(zfs_handle_t *zhp, char *buf, size_t buflen)
|
|
|
|
{
|
|
|
|
return (parent_name(zfs_get_name(zhp), buf, buflen));
|
|
|
|
}
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
/*
|
|
|
|
* If accept_ancestor is false, then check to make sure that the given path has
|
|
|
|
* a parent, and that it exists. If accept_ancestor is true, then find the
|
|
|
|
* closest existing ancestor for the given path. In prefixlen return the
|
|
|
|
* length of already existing prefix of the given path. We also fetch the
|
|
|
|
* 'zoned' property, which is used to validate property settings when creating
|
|
|
|
* new datasets.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
check_parents(libzfs_handle_t *hdl, const char *path, uint64_t *zoned,
|
|
|
|
boolean_t accept_ancestor, int *prefixlen)
|
|
|
|
{
|
2013-09-04 12:00:57 +00:00
|
|
|
zfs_cmd_t zc = {"\0"};
|
2016-06-15 21:28:36 +00:00
|
|
|
char parent[ZFS_MAX_DATASET_NAME_LEN];
|
2008-11-20 20:01:55 +00:00
|
|
|
char *slash;
|
|
|
|
zfs_handle_t *zhp;
|
2022-06-08 13:08:10 +00:00
|
|
|
char errbuf[ERRBUFLEN];
|
2010-05-28 20:45:14 +00:00
|
|
|
uint64_t is_zoned;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2009-01-15 21:59:39 +00:00
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN, "cannot create '%s'"), path);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
/* get parent, and check to see if this is just a pool */
|
|
|
|
if (parent_name(path, parent, sizeof (parent)) != 0) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"missing dataset name"));
|
|
|
|
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check to see if the pool exists */
|
|
|
|
if ((slash = strchr(parent, '/')) == NULL)
|
|
|
|
slash = parent + strlen(parent);
|
Cleanup: Switch to strlcpy from strncpy
Coverity found a bug in `zfs_secpolicy_create_clone()` where it is
possible for us to pass an unterminated string when `zfs_get_parent()`
returns an error. Upon inspection, it is clear that using `strlcpy()`
would have avoided this issue.
Looking at the codebase, there are a number of other uses of `strncpy()`
that are unsafe and even when it is used safely, switching to
`strlcpy()` would make the code more readable. Therefore, we switch all
instances where we use `strncpy()` to use `strlcpy()`.
Unfortunately, we do not portably have access to `strlcpy()` in
tests/zfs-tests/cmd/zfs_diff-socket.c because it does not link to
libspl. Modifying the appropriate Makefile.am to try to link to it
resulted in an error from the naming choice used in the file. Trying to
disable the check on the file did not work on FreeBSD because Clang
ignores `#undef` when a definition is provided by `-Dstrncpy(...)=...`.
We workaround that by explictly including the C file from libspl into
the test. This makes things build correctly everywhere.
We add a deprecation warning to `config/Rules.am` and suppress it on the
remaining `strncpy()` usage. `strlcpy()` is not portably avaliable in
tests/zfs-tests/cmd/zfs_diff-socket.c, so we use `snprintf()` there as a
substitute.
This patch does not tackle the related problem of `strcpy()`, which is
even less safe. Thankfully, a quick inspection found that it is used far
more correctly than strncpy() was used. A quick inspection did not find
any problems with `strcpy()` usage outside of zhack, but it should be
said that I only checked around 90% of them.
Lastly, some of the fields in kstat_t varied in size by 1 depending on
whether they were in userspace or in the kernel. The origin of this
discrepancy appears to be 04a479f7066ccdaa23a6546955303b172f4a6909 where
it was made for no apparent reason. It conflicts with the comment on
KSTAT_STRLEN, so we shrink the kernel field sizes to match the userspace
field sizes.
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Ryan Moeller <ryan@iXsystems.com>
Signed-off-by: Richard Yao <richard.yao@alumni.stonybrook.edu>
Closes #13876
2022-09-27 23:35:29 +00:00
|
|
|
(void) strlcpy(zc.zc_name, parent,
|
|
|
|
MIN(sizeof (zc.zc_name), slash - parent + 1));
|
2019-10-24 00:29:43 +00:00
|
|
|
if (zfs_ioctl(hdl, ZFS_IOC_OBJSET_STATS, &zc) != 0 &&
|
2008-11-20 20:01:55 +00:00
|
|
|
errno == ENOENT) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"no such pool '%s'"), zc.zc_name);
|
|
|
|
return (zfs_error(hdl, EZFS_NOENT, errbuf));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check to see if the parent dataset exists */
|
|
|
|
while ((zhp = make_dataset_handle(hdl, parent)) == NULL) {
|
|
|
|
if (errno == ENOENT && accept_ancestor) {
|
|
|
|
/*
|
|
|
|
* Go deeper to find an ancestor, give up on top level.
|
|
|
|
*/
|
|
|
|
if (parent_name(parent, parent, sizeof (parent)) != 0) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"no such pool '%s'"), zc.zc_name);
|
|
|
|
return (zfs_error(hdl, EZFS_NOENT, errbuf));
|
|
|
|
}
|
|
|
|
} else if (errno == ENOENT) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"parent does not exist"));
|
|
|
|
return (zfs_error(hdl, EZFS_NOENT, errbuf));
|
|
|
|
} else
|
|
|
|
return (zfs_standard_error(hdl, errno, errbuf));
|
|
|
|
}
|
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
is_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
|
|
|
|
if (zoned != NULL)
|
|
|
|
*zoned = is_zoned;
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
/* we are in a non-global zone, but parent is in the global zone */
|
2010-05-28 20:45:14 +00:00
|
|
|
if (getzoneid() != GLOBAL_ZONEID && !is_zoned) {
|
2008-11-20 20:01:55 +00:00
|
|
|
(void) zfs_standard_error(hdl, EPERM, errbuf);
|
|
|
|
zfs_close(zhp);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* make sure parent is a filesystem */
|
|
|
|
if (zfs_get_type(zhp) != ZFS_TYPE_FILESYSTEM) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"parent is not a filesystem"));
|
|
|
|
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
|
|
|
|
zfs_close(zhp);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
zfs_close(zhp);
|
|
|
|
if (prefixlen != NULL)
|
|
|
|
*prefixlen = strlen(parent);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Finds whether the dataset of the given type(s) exists.
|
|
|
|
*/
|
|
|
|
boolean_t
|
|
|
|
zfs_dataset_exists(libzfs_handle_t *hdl, const char *path, zfs_type_t types)
|
|
|
|
{
|
|
|
|
zfs_handle_t *zhp;
|
|
|
|
|
|
|
|
if (!zfs_validate_name(hdl, path, types, B_FALSE))
|
|
|
|
return (B_FALSE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to get stats for the dataset, which will tell us if it exists.
|
|
|
|
*/
|
|
|
|
if ((zhp = make_dataset_handle(hdl, path)) != NULL) {
|
|
|
|
int ds_type = zhp->zfs_type;
|
|
|
|
|
|
|
|
zfs_close(zhp);
|
|
|
|
if (types & ds_type)
|
|
|
|
return (B_TRUE);
|
|
|
|
}
|
|
|
|
return (B_FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Given a path to 'target', create all the ancestors between
|
|
|
|
* the prefixlen portion of the path, and the target itself.
|
|
|
|
* Fail if the initial prefixlen-ancestor does not already exist.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
create_parents(libzfs_handle_t *hdl, char *target, int prefixlen)
|
|
|
|
{
|
|
|
|
zfs_handle_t *h;
|
|
|
|
char *cp;
|
|
|
|
const char *opname;
|
|
|
|
|
|
|
|
/* make sure prefix exists */
|
|
|
|
cp = target + prefixlen;
|
|
|
|
if (*cp != '/') {
|
|
|
|
assert(strchr(cp, '/') == NULL);
|
|
|
|
h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
|
|
|
|
} else {
|
|
|
|
*cp = '\0';
|
|
|
|
h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
|
|
|
|
*cp = '/';
|
|
|
|
}
|
|
|
|
if (h == NULL)
|
|
|
|
return (-1);
|
|
|
|
zfs_close(h);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Attempt to create, mount, and share any ancestor filesystems,
|
|
|
|
* up to the prefixlen-long one.
|
|
|
|
*/
|
|
|
|
for (cp = target + prefixlen + 1;
|
2017-02-07 22:02:27 +00:00
|
|
|
(cp = strchr(cp, '/')) != NULL; *cp = '/', cp++) {
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
*cp = '\0';
|
|
|
|
|
|
|
|
h = make_dataset_handle(hdl, target);
|
|
|
|
if (h) {
|
|
|
|
/* it already exists, nothing to do here */
|
|
|
|
zfs_close(h);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (zfs_create(hdl, target, ZFS_TYPE_FILESYSTEM,
|
|
|
|
NULL) != 0) {
|
|
|
|
opname = dgettext(TEXT_DOMAIN, "create");
|
|
|
|
goto ancestorerr;
|
|
|
|
}
|
|
|
|
|
|
|
|
h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
|
|
|
|
if (h == NULL) {
|
|
|
|
opname = dgettext(TEXT_DOMAIN, "open");
|
|
|
|
goto ancestorerr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (zfs_mount(h, NULL, 0) != 0) {
|
|
|
|
opname = dgettext(TEXT_DOMAIN, "mount");
|
|
|
|
goto ancestorerr;
|
|
|
|
}
|
|
|
|
|
2022-02-28 15:52:07 +00:00
|
|
|
if (zfs_share(h, NULL) != 0) {
|
2008-11-20 20:01:55 +00:00
|
|
|
opname = dgettext(TEXT_DOMAIN, "share");
|
|
|
|
goto ancestorerr;
|
|
|
|
}
|
|
|
|
|
|
|
|
zfs_close(h);
|
|
|
|
}
|
2022-02-28 15:52:07 +00:00
|
|
|
zfs_commit_shares(NULL);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
ancestorerr:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"failed to %s ancestor '%s'"), opname, target);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Creates non-existing ancestors of the given path.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
zfs_create_ancestors(libzfs_handle_t *hdl, const char *path)
|
|
|
|
{
|
|
|
|
int prefix;
|
|
|
|
char *path_copy;
|
2022-06-08 13:08:10 +00:00
|
|
|
char errbuf[ERRBUFLEN];
|
2010-08-26 16:58:04 +00:00
|
|
|
int rc = 0;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2016-09-12 15:15:20 +00:00
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot create '%s'"), path);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check that we are not passing the nesting limit
|
|
|
|
* before we start creating any ancestors.
|
|
|
|
*/
|
|
|
|
if (dataset_nestcheck(path) != 0) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"maximum name nesting depth exceeded"));
|
|
|
|
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
|
|
|
|
}
|
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
if (check_parents(hdl, path, NULL, B_TRUE, &prefix) != 0)
|
2008-11-20 20:01:55 +00:00
|
|
|
return (-1);
|
|
|
|
|
|
|
|
if ((path_copy = strdup(path)) != NULL) {
|
|
|
|
rc = create_parents(hdl, path_copy, prefix);
|
|
|
|
free(path_copy);
|
|
|
|
}
|
|
|
|
if (path_copy == NULL || rc != 0)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a new filesystem or volume.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
zfs_create(libzfs_handle_t *hdl, const char *path, zfs_type_t type,
|
|
|
|
nvlist_t *props)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
uint64_t size = 0;
|
|
|
|
uint64_t blocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
|
|
|
|
uint64_t zoned;
|
2017-01-23 17:49:57 +00:00
|
|
|
enum lzc_dataset_type ost;
|
2017-05-09 22:22:46 +00:00
|
|
|
zpool_handle_t *zpool_handle;
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
uint8_t *wkeydata = NULL;
|
|
|
|
uint_t wkeylen = 0;
|
2022-06-08 13:08:10 +00:00
|
|
|
char errbuf[ERRBUFLEN];
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
char parent[ZFS_MAX_DATASET_NAME_LEN];
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot create '%s'"), path);
|
|
|
|
|
|
|
|
/* validate the path, taking care to note the extended error message */
|
|
|
|
if (!zfs_validate_name(hdl, path, type, B_TRUE))
|
|
|
|
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
|
|
|
|
|
2016-09-12 15:15:20 +00:00
|
|
|
if (dataset_nestcheck(path) != 0) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"maximum name nesting depth exceeded"));
|
|
|
|
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
|
|
|
|
}
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
/* validate parents exist */
|
|
|
|
if (check_parents(hdl, path, &zoned, B_FALSE, NULL) != 0)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The failure modes when creating a dataset of a different type over
|
|
|
|
* one that already exists is a little strange. In particular, if you
|
|
|
|
* try to create a dataset on top of an existing dataset, the ioctl()
|
|
|
|
* will return ENOENT, not EEXIST. To prevent this from happening, we
|
|
|
|
* first try to see if the dataset exists.
|
|
|
|
*/
|
2013-08-28 11:45:09 +00:00
|
|
|
if (zfs_dataset_exists(hdl, path, ZFS_TYPE_DATASET)) {
|
2008-11-20 20:01:55 +00:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"dataset already exists"));
|
|
|
|
return (zfs_error(hdl, EZFS_EXISTS, errbuf));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type == ZFS_TYPE_VOLUME)
|
2017-01-23 17:49:57 +00:00
|
|
|
ost = LZC_DATSET_TYPE_ZVOL;
|
2008-11-20 20:01:55 +00:00
|
|
|
else
|
2017-01-23 17:49:57 +00:00
|
|
|
ost = LZC_DATSET_TYPE_ZFS;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2016-01-13 23:05:59 +00:00
|
|
|
/* open zpool handle for prop validation */
|
2016-06-15 21:28:36 +00:00
|
|
|
char pool_path[ZFS_MAX_DATASET_NAME_LEN];
|
2016-01-13 23:05:59 +00:00
|
|
|
(void) strlcpy(pool_path, path, sizeof (pool_path));
|
|
|
|
|
|
|
|
/* truncate pool_path at first slash */
|
|
|
|
char *p = strchr(pool_path, '/');
|
|
|
|
if (p != NULL)
|
|
|
|
*p = '\0';
|
|
|
|
|
2017-05-09 22:22:46 +00:00
|
|
|
if ((zpool_handle = zpool_open(hdl, pool_path)) == NULL)
|
|
|
|
return (-1);
|
2016-01-13 23:05:59 +00:00
|
|
|
|
2008-12-03 20:09:06 +00:00
|
|
|
if (props && (props = zfs_valid_proplist(hdl, type, props,
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
zoned, NULL, zpool_handle, B_TRUE, errbuf)) == 0) {
|
2016-01-13 23:05:59 +00:00
|
|
|
zpool_close(zpool_handle);
|
2008-11-20 20:01:55 +00:00
|
|
|
return (-1);
|
2016-01-13 23:05:59 +00:00
|
|
|
}
|
|
|
|
zpool_close(zpool_handle);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
if (type == ZFS_TYPE_VOLUME) {
|
|
|
|
/*
|
|
|
|
* If we are creating a volume, the size and block size must
|
|
|
|
* satisfy a few restraints. First, the blocksize must be a
|
|
|
|
* valid block size between SPA_{MIN,MAX}BLOCKSIZE. Second, the
|
|
|
|
* volsize must be a multiple of the block size, and cannot be
|
|
|
|
* zero.
|
|
|
|
*/
|
|
|
|
if (props == NULL || nvlist_lookup_uint64(props,
|
|
|
|
zfs_prop_to_name(ZFS_PROP_VOLSIZE), &size) != 0) {
|
|
|
|
nvlist_free(props);
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"missing volume size"));
|
|
|
|
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((ret = nvlist_lookup_uint64(props,
|
|
|
|
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
|
|
|
|
&blocksize)) != 0) {
|
|
|
|
if (ret == ENOENT) {
|
|
|
|
blocksize = zfs_prop_default_numeric(
|
|
|
|
ZFS_PROP_VOLBLOCKSIZE);
|
|
|
|
} else {
|
|
|
|
nvlist_free(props);
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"missing volume block size"));
|
|
|
|
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0) {
|
|
|
|
nvlist_free(props);
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"volume size cannot be zero"));
|
|
|
|
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size % blocksize != 0) {
|
|
|
|
nvlist_free(props);
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"volume size must be a multiple of volume block "
|
|
|
|
"size"));
|
|
|
|
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
(void) parent_name(path, parent, sizeof (parent));
|
2017-10-13 17:09:04 +00:00
|
|
|
if (zfs_crypto_create(hdl, parent, props, NULL, B_TRUE,
|
|
|
|
&wkeydata, &wkeylen) != 0) {
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
nvlist_free(props);
|
|
|
|
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
|
|
|
|
}
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
/* create the dataset */
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
ret = lzc_create(path, ost, props, wkeydata, wkeylen);
|
2013-08-28 11:45:09 +00:00
|
|
|
nvlist_free(props);
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
if (wkeydata != NULL)
|
|
|
|
free(wkeydata);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
/* check for failure */
|
|
|
|
if (ret != 0) {
|
|
|
|
switch (errno) {
|
|
|
|
case ENOENT:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"no such parent '%s'"), parent);
|
|
|
|
return (zfs_error(hdl, EZFS_NOENT, errbuf));
|
|
|
|
|
|
|
|
case ENOTSUP:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"pool must be upgraded to set this "
|
|
|
|
"property or value"));
|
|
|
|
return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
|
|
|
|
case EACCES:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"encryption root's key is not loaded "
|
|
|
|
"or provided"));
|
|
|
|
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
|
|
|
|
|
2017-12-06 05:19:31 +00:00
|
|
|
case ERANGE:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"invalid property value(s) specified"));
|
|
|
|
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
|
2008-11-20 20:01:55 +00:00
|
|
|
#ifdef _ILP32
|
|
|
|
case EOVERFLOW:
|
|
|
|
/*
|
|
|
|
* This platform can't address a volume this big.
|
|
|
|
*/
|
|
|
|
if (type == ZFS_TYPE_VOLUME)
|
|
|
|
return (zfs_error(hdl, EZFS_VOLTOOBIG,
|
|
|
|
errbuf));
|
2022-02-15 16:58:59 +00:00
|
|
|
zfs_fallthrough;
|
2008-11-20 20:01:55 +00:00
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
return (zfs_standard_error(hdl, errno, errbuf));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Destroys the given dataset. The caller must make sure that the filesystem
|
2012-07-12 12:32:45 +00:00
|
|
|
* isn't mounted, and that there are no active dependents. If the file system
|
|
|
|
* does not exist this function does nothing.
|
2008-11-20 20:01:55 +00:00
|
|
|
*/
|
|
|
|
int
|
2009-08-18 18:43:27 +00:00
|
|
|
zfs_destroy(zfs_handle_t *zhp, boolean_t defer)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
2018-06-27 21:37:54 +00:00
|
|
|
int error;
|
|
|
|
|
|
|
|
if (zhp->zfs_type != ZFS_TYPE_SNAPSHOT && defer)
|
|
|
|
return (EINVAL);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2013-12-11 22:33:41 +00:00
|
|
|
if (zhp->zfs_type == ZFS_TYPE_BOOKMARK) {
|
|
|
|
nvlist_t *nv = fnvlist_alloc();
|
|
|
|
fnvlist_add_boolean(nv, zhp->zfs_name);
|
2018-06-27 21:37:54 +00:00
|
|
|
error = lzc_destroy_bookmarks(nv, NULL);
|
2013-12-11 22:33:41 +00:00
|
|
|
fnvlist_free(nv);
|
|
|
|
if (error != 0) {
|
2018-06-27 21:37:54 +00:00
|
|
|
return (zfs_standard_error_fmt(zhp->zfs_hdl, error,
|
2013-12-11 22:33:41 +00:00
|
|
|
dgettext(TEXT_DOMAIN, "cannot destroy '%s'"),
|
|
|
|
zhp->zfs_name));
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2018-06-27 21:37:54 +00:00
|
|
|
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
|
|
|
|
nvlist_t *nv = fnvlist_alloc();
|
|
|
|
fnvlist_add_boolean(nv, zhp->zfs_name);
|
|
|
|
error = lzc_destroy_snaps(nv, defer, NULL);
|
|
|
|
fnvlist_free(nv);
|
2008-11-20 20:01:55 +00:00
|
|
|
} else {
|
2018-06-27 21:37:54 +00:00
|
|
|
error = lzc_destroy(zhp->zfs_name);
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
2018-06-27 21:37:54 +00:00
|
|
|
if (error != 0 && error != ENOENT) {
|
2008-11-20 20:01:55 +00:00
|
|
|
return (zfs_standard_error_fmt(zhp->zfs_hdl, errno,
|
|
|
|
dgettext(TEXT_DOMAIN, "cannot destroy '%s'"),
|
|
|
|
zhp->zfs_name));
|
|
|
|
}
|
|
|
|
|
|
|
|
remove_mountpoint(zhp);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct destroydata {
|
2011-11-17 18:14:36 +00:00
|
|
|
nvlist_t *nvl;
|
|
|
|
const char *snapname;
|
2008-11-20 20:01:55 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
2010-05-28 20:45:14 +00:00
|
|
|
zfs_check_snap_cb(zfs_handle_t *zhp, void *arg)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
|
|
|
struct destroydata *dd = arg;
|
2016-06-15 21:28:36 +00:00
|
|
|
char name[ZFS_MAX_DATASET_NAME_LEN];
|
2010-05-28 20:45:14 +00:00
|
|
|
int rv = 0;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2017-06-28 17:05:16 +00:00
|
|
|
if (snprintf(name, sizeof (name), "%s@%s", zhp->zfs_name,
|
|
|
|
dd->snapname) >= sizeof (name))
|
|
|
|
return (EINVAL);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2013-05-25 02:06:23 +00:00
|
|
|
if (lzc_exists(name))
|
2022-03-14 22:44:56 +00:00
|
|
|
fnvlist_add_boolean(dd->nvl, name);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2023-04-10 18:53:02 +00:00
|
|
|
rv = zfs_iter_filesystems_v2(zhp, 0, zfs_check_snap_cb, dd);
|
2011-11-17 18:14:36 +00:00
|
|
|
zfs_close(zhp);
|
2008-11-20 20:01:55 +00:00
|
|
|
return (rv);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Destroys all snapshots with the given name in zhp & descendants.
|
|
|
|
*/
|
|
|
|
int
|
2009-08-18 18:43:27 +00:00
|
|
|
zfs_destroy_snaps(zfs_handle_t *zhp, char *snapname, boolean_t defer)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct destroydata dd = { 0 };
|
|
|
|
|
|
|
|
dd.snapname = snapname;
|
2022-03-14 22:44:56 +00:00
|
|
|
dd.nvl = fnvlist_alloc();
|
2011-11-17 18:14:36 +00:00
|
|
|
(void) zfs_check_snap_cb(zfs_handle_dup(zhp), &dd);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2013-05-25 02:06:23 +00:00
|
|
|
if (nvlist_empty(dd.nvl)) {
|
2011-11-17 18:14:36 +00:00
|
|
|
ret = zfs_standard_error_fmt(zhp->zfs_hdl, ENOENT,
|
2008-11-20 20:01:55 +00:00
|
|
|
dgettext(TEXT_DOMAIN, "cannot destroy '%s@%s'"),
|
2011-11-17 18:14:36 +00:00
|
|
|
zhp->zfs_name, snapname);
|
|
|
|
} else {
|
2013-09-04 12:00:57 +00:00
|
|
|
ret = zfs_destroy_snaps_nvl(zhp->zfs_hdl, dd.nvl, defer);
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
2022-03-14 22:44:56 +00:00
|
|
|
fnvlist_free(dd.nvl);
|
2011-11-17 18:14:36 +00:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2013-09-04 12:00:57 +00:00
|
|
|
* Destroys all the snapshots named in the nvlist.
|
2011-11-17 18:14:36 +00:00
|
|
|
*/
|
|
|
|
int
|
2013-09-04 12:00:57 +00:00
|
|
|
zfs_destroy_snaps_nvl(libzfs_handle_t *hdl, nvlist_t *snaps, boolean_t defer)
|
2011-11-17 18:14:36 +00:00
|
|
|
{
|
2016-04-20 03:45:04 +00:00
|
|
|
nvlist_t *errlist = NULL;
|
2013-09-04 12:00:57 +00:00
|
|
|
nvpair_t *pair;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2021-09-20 15:29:59 +00:00
|
|
|
int ret = zfs_destroy_snaps_nvl_os(hdl, snaps);
|
|
|
|
if (ret != 0)
|
|
|
|
return (ret);
|
|
|
|
|
2013-08-28 11:45:09 +00:00
|
|
|
ret = lzc_destroy_snaps(snaps, defer, &errlist);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2016-04-20 03:45:04 +00:00
|
|
|
if (ret == 0) {
|
|
|
|
nvlist_free(errlist);
|
2013-09-04 12:00:57 +00:00
|
|
|
return (0);
|
2016-04-20 03:45:04 +00:00
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2013-05-25 02:06:23 +00:00
|
|
|
if (nvlist_empty(errlist)) {
|
2022-06-08 13:08:10 +00:00
|
|
|
char errbuf[ERRBUFLEN];
|
2013-09-04 12:00:57 +00:00
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN, "cannot destroy snapshots"));
|
|
|
|
|
|
|
|
ret = zfs_standard_error(hdl, ret, errbuf);
|
|
|
|
}
|
|
|
|
for (pair = nvlist_next_nvpair(errlist, NULL);
|
|
|
|
pair != NULL; pair = nvlist_next_nvpair(errlist, pair)) {
|
2022-06-08 13:08:10 +00:00
|
|
|
char errbuf[ERRBUFLEN];
|
2013-09-04 12:00:57 +00:00
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN, "cannot destroy snapshot %s"),
|
|
|
|
nvpair_name(pair));
|
|
|
|
|
|
|
|
switch (fnvpair_value_int32(pair)) {
|
|
|
|
case EEXIST:
|
|
|
|
zfs_error_aux(hdl,
|
|
|
|
dgettext(TEXT_DOMAIN, "snapshot is cloned"));
|
|
|
|
ret = zfs_error(hdl, EZFS_EXISTS, errbuf);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ret = zfs_standard_error(hdl, errno, errbuf);
|
|
|
|
break;
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-20 03:45:04 +00:00
|
|
|
nvlist_free(errlist);
|
2013-08-28 11:45:09 +00:00
|
|
|
return (ret);
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clones the given dataset. The target must be of the same type as the source.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
zfs_clone(zfs_handle_t *zhp, const char *target, nvlist_t *props)
|
|
|
|
{
|
2016-06-15 21:28:36 +00:00
|
|
|
char parent[ZFS_MAX_DATASET_NAME_LEN];
|
2008-11-20 20:01:55 +00:00
|
|
|
int ret;
|
2022-06-08 13:08:10 +00:00
|
|
|
char errbuf[ERRBUFLEN];
|
2008-11-20 20:01:55 +00:00
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
|
|
|
uint64_t zoned;
|
|
|
|
|
|
|
|
assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
|
|
|
|
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot create '%s'"), target);
|
|
|
|
|
2011-11-17 18:14:36 +00:00
|
|
|
/* validate the target/clone name */
|
2008-11-20 20:01:55 +00:00
|
|
|
if (!zfs_validate_name(hdl, target, ZFS_TYPE_FILESYSTEM, B_TRUE))
|
|
|
|
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
|
|
|
|
|
|
|
|
/* validate parents exist */
|
|
|
|
if (check_parents(hdl, target, &zoned, B_FALSE, NULL) != 0)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
(void) parent_name(target, parent, sizeof (parent));
|
|
|
|
|
|
|
|
/* do the clone */
|
|
|
|
|
|
|
|
if (props) {
|
2022-02-21 03:20:00 +00:00
|
|
|
zfs_type_t type = ZFS_TYPE_FILESYSTEM;
|
2018-04-11 16:14:45 +00:00
|
|
|
|
2022-02-21 03:20:00 +00:00
|
|
|
if (ZFS_IS_VOLUME(zhp))
|
2013-08-28 11:45:09 +00:00
|
|
|
type = ZFS_TYPE_VOLUME;
|
2008-12-03 20:09:06 +00:00
|
|
|
if ((props = zfs_valid_proplist(hdl, type, props, zoned,
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
zhp, zhp->zpool_hdl, B_TRUE, errbuf)) == NULL)
|
2008-11-20 20:01:55 +00:00
|
|
|
return (-1);
|
2018-04-11 16:14:45 +00:00
|
|
|
if (zfs_fix_auto_resv(zhp, props) == -1) {
|
|
|
|
nvlist_free(props);
|
|
|
|
return (-1);
|
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
if (zfs_crypto_clone_check(hdl, zhp, parent, props) != 0) {
|
|
|
|
nvlist_free(props);
|
|
|
|
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
|
|
|
|
}
|
|
|
|
|
2013-08-28 11:45:09 +00:00
|
|
|
ret = lzc_clone(target, zhp->zfs_name, props);
|
|
|
|
nvlist_free(props);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
if (ret != 0) {
|
|
|
|
switch (errno) {
|
|
|
|
|
|
|
|
case ENOENT:
|
|
|
|
/*
|
|
|
|
* The parent doesn't exist. We should have caught this
|
|
|
|
* above, but there may a race condition that has since
|
|
|
|
* destroyed the parent.
|
|
|
|
*
|
|
|
|
* At this point, we don't know whether it's the source
|
|
|
|
* that doesn't exist anymore, or whether the target
|
|
|
|
* dataset doesn't exist.
|
|
|
|
*/
|
|
|
|
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"no such parent '%s'"), parent);
|
|
|
|
return (zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf));
|
|
|
|
|
|
|
|
case EXDEV:
|
|
|
|
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"source and target pools differ"));
|
|
|
|
return (zfs_error(zhp->zfs_hdl, EZFS_CROSSTARGET,
|
|
|
|
errbuf));
|
|
|
|
|
|
|
|
default:
|
|
|
|
return (zfs_standard_error(zhp->zfs_hdl, errno,
|
|
|
|
errbuf));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Promotes the given clone fs to be the clone parent.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
zfs_promote(zfs_handle_t *zhp)
|
|
|
|
{
|
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
2017-06-26 23:56:09 +00:00
|
|
|
char snapname[ZFS_MAX_DATASET_NAME_LEN];
|
2008-11-20 20:01:55 +00:00
|
|
|
int ret;
|
2022-06-08 13:08:10 +00:00
|
|
|
char errbuf[ERRBUFLEN];
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot promote '%s'"), zhp->zfs_name);
|
|
|
|
|
|
|
|
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"snapshots can not be promoted"));
|
|
|
|
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
|
|
|
|
}
|
|
|
|
|
2017-06-26 23:56:09 +00:00
|
|
|
if (zhp->zfs_dmustats.dds_origin[0] == '\0') {
|
2008-11-20 20:01:55 +00:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"not a cloned filesystem"));
|
|
|
|
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
|
|
|
|
}
|
|
|
|
|
2017-07-28 21:12:34 +00:00
|
|
|
if (!zfs_validate_name(hdl, zhp->zfs_name, zhp->zfs_type, B_TRUE))
|
|
|
|
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
|
|
|
|
|
2017-06-26 23:56:09 +00:00
|
|
|
ret = lzc_promote(zhp->zfs_name, snapname, sizeof (snapname));
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
if (ret != 0) {
|
2017-06-26 23:56:09 +00:00
|
|
|
switch (ret) {
|
2019-06-24 23:42:52 +00:00
|
|
|
case EACCES:
|
|
|
|
/*
|
|
|
|
* Promoting encrypted dataset outside its
|
|
|
|
* encryption root.
|
|
|
|
*/
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot promote dataset outside its "
|
|
|
|
"encryption root"));
|
|
|
|
return (zfs_error(hdl, EZFS_EXISTS, errbuf));
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
case EEXIST:
|
2013-12-06 22:20:22 +00:00
|
|
|
/* There is a conflicting snapshot name. */
|
2008-11-20 20:01:55 +00:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
2010-05-28 20:45:14 +00:00
|
|
|
"conflicting snapshot '%s' from parent '%s'"),
|
2017-06-26 23:56:09 +00:00
|
|
|
snapname, zhp->zfs_dmustats.dds_origin);
|
2008-11-20 20:01:55 +00:00
|
|
|
return (zfs_error(hdl, EZFS_EXISTS, errbuf));
|
|
|
|
|
|
|
|
default:
|
2017-06-26 23:56:09 +00:00
|
|
|
return (zfs_standard_error(hdl, ret, errbuf));
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
2010-08-26 18:56:53 +00:00
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2013-08-28 11:45:09 +00:00
|
|
|
typedef struct snapdata {
|
|
|
|
nvlist_t *sd_nvl;
|
|
|
|
const char *sd_snapname;
|
|
|
|
} snapdata_t;
|
|
|
|
|
|
|
|
static int
|
|
|
|
zfs_snapshot_cb(zfs_handle_t *zhp, void *arg)
|
|
|
|
{
|
|
|
|
snapdata_t *sd = arg;
|
2016-06-15 21:28:36 +00:00
|
|
|
char name[ZFS_MAX_DATASET_NAME_LEN];
|
2013-08-28 11:45:09 +00:00
|
|
|
int rv = 0;
|
|
|
|
|
2013-07-27 17:51:50 +00:00
|
|
|
if (zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) == 0) {
|
2017-06-28 17:05:16 +00:00
|
|
|
if (snprintf(name, sizeof (name), "%s@%s", zfs_get_name(zhp),
|
|
|
|
sd->sd_snapname) >= sizeof (name))
|
|
|
|
return (EINVAL);
|
2013-08-28 11:45:09 +00:00
|
|
|
|
2013-07-27 17:51:50 +00:00
|
|
|
fnvlist_add_boolean(sd->sd_nvl, name);
|
2013-08-28 11:45:09 +00:00
|
|
|
|
2023-04-10 18:53:02 +00:00
|
|
|
rv = zfs_iter_filesystems_v2(zhp, 0, zfs_snapshot_cb, sd);
|
2013-07-27 17:51:50 +00:00
|
|
|
}
|
2013-08-28 11:45:09 +00:00
|
|
|
zfs_close(zhp);
|
2013-07-27 17:51:50 +00:00
|
|
|
|
2013-08-28 11:45:09 +00:00
|
|
|
return (rv);
|
|
|
|
}
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
/*
|
2013-08-28 11:45:09 +00:00
|
|
|
* Creates snapshots. The keys in the snaps nvlist are the snapshots to be
|
|
|
|
* created.
|
2008-11-20 20:01:55 +00:00
|
|
|
*/
|
|
|
|
int
|
2013-08-28 11:45:09 +00:00
|
|
|
zfs_snapshot_nvl(libzfs_handle_t *hdl, nvlist_t *snaps, nvlist_t *props)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
|
|
|
int ret;
|
2022-06-08 13:08:10 +00:00
|
|
|
char errbuf[ERRBUFLEN];
|
2013-08-28 11:45:09 +00:00
|
|
|
nvpair_t *elem;
|
|
|
|
nvlist_t *errors;
|
2016-10-20 18:49:50 +00:00
|
|
|
zpool_handle_t *zpool_hdl;
|
|
|
|
char pool[ZFS_MAX_DATASET_NAME_LEN];
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
2013-08-28 11:45:09 +00:00
|
|
|
"cannot create snapshots "));
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2013-08-28 11:45:09 +00:00
|
|
|
elem = NULL;
|
|
|
|
while ((elem = nvlist_next_nvpair(snaps, elem)) != NULL) {
|
|
|
|
const char *snapname = nvpair_name(elem);
|
2008-12-03 20:09:06 +00:00
|
|
|
|
2013-08-28 11:45:09 +00:00
|
|
|
/* validate the target name */
|
|
|
|
if (!zfs_validate_name(hdl, snapname, ZFS_TYPE_SNAPSHOT,
|
|
|
|
B_TRUE)) {
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot create snapshot '%s'"), snapname);
|
|
|
|
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
|
2008-12-03 20:09:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-13 23:05:59 +00:00
|
|
|
/*
|
|
|
|
* get pool handle for prop validation. assumes all snaps are in the
|
|
|
|
* same pool, as does lzc_snapshot (below).
|
|
|
|
*/
|
|
|
|
elem = nvlist_next_nvpair(snaps, NULL);
|
2022-10-01 00:02:57 +00:00
|
|
|
if (elem == NULL)
|
|
|
|
return (-1);
|
2016-01-13 23:05:59 +00:00
|
|
|
(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
|
|
|
|
pool[strcspn(pool, "/@")] = '\0';
|
2016-10-20 18:49:50 +00:00
|
|
|
zpool_hdl = zpool_open(hdl, pool);
|
|
|
|
if (zpool_hdl == NULL)
|
|
|
|
return (-1);
|
2016-01-13 23:05:59 +00:00
|
|
|
|
2013-08-28 11:45:09 +00:00
|
|
|
if (props != NULL &&
|
|
|
|
(props = zfs_valid_proplist(hdl, ZFS_TYPE_SNAPSHOT,
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
props, B_FALSE, NULL, zpool_hdl, B_FALSE, errbuf)) == NULL) {
|
2016-01-13 23:05:59 +00:00
|
|
|
zpool_close(zpool_hdl);
|
2008-11-20 20:01:55 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
2016-01-13 23:05:59 +00:00
|
|
|
zpool_close(zpool_hdl);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2013-08-28 11:45:09 +00:00
|
|
|
ret = lzc_snapshot(snaps, props, &errors);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2013-08-28 11:45:09 +00:00
|
|
|
if (ret != 0) {
|
|
|
|
boolean_t printed = B_FALSE;
|
|
|
|
for (elem = nvlist_next_nvpair(errors, NULL);
|
|
|
|
elem != NULL;
|
|
|
|
elem = nvlist_next_nvpair(errors, elem)) {
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot create snapshot '%s'"), nvpair_name(elem));
|
|
|
|
(void) zfs_standard_error(hdl,
|
|
|
|
fnvpair_value_int32(elem), errbuf);
|
|
|
|
printed = B_TRUE;
|
|
|
|
}
|
|
|
|
if (!printed) {
|
|
|
|
switch (ret) {
|
|
|
|
case EXDEV:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"multiple snapshots of same "
|
|
|
|
"fs not allowed"));
|
|
|
|
(void) zfs_error(hdl, EZFS_EXISTS, errbuf);
|
2008-12-03 20:09:06 +00:00
|
|
|
|
2013-08-28 11:45:09 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
(void) zfs_standard_error(hdl, ret, errbuf);
|
|
|
|
}
|
|
|
|
}
|
2010-05-28 20:45:14 +00:00
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2013-08-28 11:45:09 +00:00
|
|
|
nvlist_free(props);
|
|
|
|
nvlist_free(errors);
|
|
|
|
return (ret);
|
|
|
|
}
|
2010-08-26 18:56:53 +00:00
|
|
|
|
2013-08-28 11:45:09 +00:00
|
|
|
int
|
|
|
|
zfs_snapshot(libzfs_handle_t *hdl, const char *path, boolean_t recursive,
|
|
|
|
nvlist_t *props)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
snapdata_t sd = { 0 };
|
2016-06-15 21:28:36 +00:00
|
|
|
char fsname[ZFS_MAX_DATASET_NAME_LEN];
|
2013-08-28 11:45:09 +00:00
|
|
|
char *cp;
|
|
|
|
zfs_handle_t *zhp;
|
2022-06-08 13:08:10 +00:00
|
|
|
char errbuf[ERRBUFLEN];
|
2013-08-28 11:45:09 +00:00
|
|
|
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot snapshot %s"), path);
|
|
|
|
|
|
|
|
if (!zfs_validate_name(hdl, path, ZFS_TYPE_SNAPSHOT, B_TRUE))
|
|
|
|
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
|
2013-12-06 22:20:22 +00:00
|
|
|
|
2013-08-28 11:45:09 +00:00
|
|
|
(void) strlcpy(fsname, path, sizeof (fsname));
|
|
|
|
cp = strchr(fsname, '@');
|
|
|
|
*cp = '\0';
|
|
|
|
sd.sd_snapname = cp + 1;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2013-08-28 11:45:09 +00:00
|
|
|
if ((zhp = zfs_open(hdl, fsname, ZFS_TYPE_FILESYSTEM |
|
|
|
|
ZFS_TYPE_VOLUME)) == NULL) {
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
2022-03-14 22:44:56 +00:00
|
|
|
sd.sd_nvl = fnvlist_alloc();
|
2013-08-28 11:45:09 +00:00
|
|
|
if (recursive) {
|
|
|
|
(void) zfs_snapshot_cb(zfs_handle_dup(zhp), &sd);
|
|
|
|
} else {
|
|
|
|
fnvlist_add_boolean(sd.sd_nvl, path);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = zfs_snapshot_nvl(hdl, sd.sd_nvl, props);
|
2022-03-14 22:44:56 +00:00
|
|
|
fnvlist_free(sd.sd_nvl);
|
2013-08-28 11:45:09 +00:00
|
|
|
zfs_close(zhp);
|
2008-11-20 20:01:55 +00:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Destroy any more recent snapshots. We invoke this callback on any dependents
|
|
|
|
* of the snapshot first. If the 'cb_dependent' member is non-zero, then this
|
|
|
|
* is a dependent and we should just destroy it without checking the transaction
|
|
|
|
* group.
|
|
|
|
*/
|
|
|
|
typedef struct rollback_data {
|
|
|
|
const char *cb_target; /* the snapshot */
|
|
|
|
uint64_t cb_create; /* creation time reference */
|
|
|
|
boolean_t cb_error;
|
|
|
|
boolean_t cb_force;
|
|
|
|
} rollback_data_t;
|
|
|
|
|
|
|
|
static int
|
2013-12-11 22:33:41 +00:00
|
|
|
rollback_destroy_dependent(zfs_handle_t *zhp, void *data)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
|
|
|
rollback_data_t *cbp = data;
|
2013-12-11 22:33:41 +00:00
|
|
|
prop_changelist_t *clp;
|
|
|
|
|
|
|
|
/* We must destroy this clone; first unmount it */
|
|
|
|
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
|
|
|
|
cbp->cb_force ? MS_FORCE: 0);
|
|
|
|
if (clp == NULL || changelist_prefix(clp) != 0) {
|
|
|
|
cbp->cb_error = B_TRUE;
|
|
|
|
zfs_close(zhp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
if (zfs_destroy(zhp, B_FALSE) != 0)
|
|
|
|
cbp->cb_error = B_TRUE;
|
|
|
|
else
|
|
|
|
changelist_remove(clp, zhp->zfs_name);
|
|
|
|
(void) changelist_postfix(clp);
|
|
|
|
changelist_free(clp);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2013-12-11 22:33:41 +00:00
|
|
|
zfs_close(zhp);
|
|
|
|
return (0);
|
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2013-12-11 22:33:41 +00:00
|
|
|
static int
|
|
|
|
rollback_destroy(zfs_handle_t *zhp, void *data)
|
|
|
|
{
|
|
|
|
rollback_data_t *cbp = data;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2013-12-11 22:33:41 +00:00
|
|
|
if (zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) > cbp->cb_create) {
|
2023-04-10 18:53:02 +00:00
|
|
|
cbp->cb_error |= zfs_iter_dependents_v2(zhp, 0, B_FALSE,
|
2013-12-11 22:33:41 +00:00
|
|
|
rollback_destroy_dependent, cbp);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2013-12-11 22:33:41 +00:00
|
|
|
cbp->cb_error |= zfs_destroy(zhp, B_FALSE);
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
zfs_close(zhp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Given a dataset, rollback to a specific snapshot, discarding any
|
|
|
|
* data changes since then and making it the active dataset.
|
|
|
|
*
|
2013-12-11 22:33:41 +00:00
|
|
|
* Any snapshots and bookmarks more recent than the target are
|
|
|
|
* destroyed, along with their dependents (i.e. clones).
|
2008-11-20 20:01:55 +00:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
zfs_rollback(zfs_handle_t *zhp, zfs_handle_t *snap, boolean_t force)
|
|
|
|
{
|
|
|
|
rollback_data_t cb = { 0 };
|
|
|
|
int err;
|
|
|
|
boolean_t restore_resv = 0;
|
2010-08-26 16:58:04 +00:00
|
|
|
uint64_t old_volsize = 0, new_volsize;
|
|
|
|
zfs_prop_t resv_prop = { 0 };
|
2019-03-12 20:13:22 +00:00
|
|
|
uint64_t min_txg = 0;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
assert(zhp->zfs_type == ZFS_TYPE_FILESYSTEM ||
|
|
|
|
zhp->zfs_type == ZFS_TYPE_VOLUME);
|
|
|
|
|
|
|
|
/*
|
2012-08-24 14:12:46 +00:00
|
|
|
* Destroy all recent snapshots and their dependents.
|
2008-11-20 20:01:55 +00:00
|
|
|
*/
|
|
|
|
cb.cb_force = force;
|
|
|
|
cb.cb_target = snap->zfs_name;
|
|
|
|
cb.cb_create = zfs_prop_get_int(snap, ZFS_PROP_CREATETXG);
|
2019-03-12 20:13:22 +00:00
|
|
|
|
|
|
|
if (cb.cb_create > 0)
|
|
|
|
min_txg = cb.cb_create;
|
|
|
|
|
2023-04-10 18:53:02 +00:00
|
|
|
(void) zfs_iter_snapshots_v2(zhp, 0, rollback_destroy, &cb,
|
2019-03-12 20:13:22 +00:00
|
|
|
min_txg, 0);
|
|
|
|
|
2023-04-10 18:53:02 +00:00
|
|
|
(void) zfs_iter_bookmarks_v2(zhp, 0, rollback_destroy, &cb);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
if (cb.cb_error)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now that we have verified that the snapshot is the latest,
|
|
|
|
* rollback to the given snapshot.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (zhp->zfs_type == ZFS_TYPE_VOLUME) {
|
|
|
|
if (zfs_which_resv_prop(zhp, &resv_prop) < 0)
|
|
|
|
return (-1);
|
|
|
|
old_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
|
|
|
|
restore_resv =
|
|
|
|
(old_volsize == zfs_prop_get_int(zhp, resv_prop));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-03-11 18:26:47 +00:00
|
|
|
* Pass both the filesystem and the wanted snapshot names,
|
|
|
|
* we would get an error back if the snapshot is destroyed or
|
|
|
|
* a new snapshot is created before this request is processed.
|
2008-11-20 20:01:55 +00:00
|
|
|
*/
|
2017-03-11 18:26:47 +00:00
|
|
|
err = lzc_rollback_to(zhp->zfs_name, snap->zfs_name);
|
2017-07-27 12:58:52 +00:00
|
|
|
if (err != 0) {
|
2022-06-08 13:08:10 +00:00
|
|
|
char errbuf[ERRBUFLEN];
|
2017-07-27 12:58:52 +00:00
|
|
|
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
2008-11-20 20:01:55 +00:00
|
|
|
dgettext(TEXT_DOMAIN, "cannot rollback '%s'"),
|
|
|
|
zhp->zfs_name);
|
2017-07-27 12:58:52 +00:00
|
|
|
switch (err) {
|
|
|
|
case EEXIST:
|
|
|
|
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"there is a snapshot or bookmark more recent "
|
|
|
|
"than '%s'"), snap->zfs_name);
|
|
|
|
(void) zfs_error(zhp->zfs_hdl, EZFS_EXISTS, errbuf);
|
|
|
|
break;
|
|
|
|
case ESRCH:
|
|
|
|
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"'%s' is not found among snapshots of '%s'"),
|
|
|
|
snap->zfs_name, zhp->zfs_name);
|
|
|
|
(void) zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf);
|
|
|
|
break;
|
|
|
|
case EINVAL:
|
|
|
|
(void) zfs_error(zhp->zfs_hdl, EZFS_BADTYPE, errbuf);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
(void) zfs_standard_error(zhp->zfs_hdl, err, errbuf);
|
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For volumes, if the pre-rollback volsize matched the pre-
|
|
|
|
* rollback reservation and the volsize has changed then set
|
|
|
|
* the reservation property to the post-rollback volsize.
|
|
|
|
* Make a new handle since the rollback closed the dataset.
|
|
|
|
*/
|
|
|
|
if ((zhp->zfs_type == ZFS_TYPE_VOLUME) &&
|
|
|
|
(zhp = make_dataset_handle(zhp->zfs_hdl, zhp->zfs_name))) {
|
|
|
|
if (restore_resv) {
|
|
|
|
new_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
|
|
|
|
if (old_volsize != new_volsize)
|
|
|
|
err = zfs_prop_set_int(zhp, resv_prop,
|
|
|
|
new_volsize);
|
|
|
|
}
|
|
|
|
zfs_close(zhp);
|
|
|
|
}
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Renames the given dataset.
|
|
|
|
*/
|
|
|
|
int
|
2020-09-01 23:14:16 +00:00
|
|
|
zfs_rename(zfs_handle_t *zhp, const char *target, renameflags_t flags)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
2017-02-07 22:02:27 +00:00
|
|
|
int ret = 0;
|
2013-09-04 12:00:57 +00:00
|
|
|
zfs_cmd_t zc = {"\0"};
|
2008-11-20 20:01:55 +00:00
|
|
|
char *delim;
|
|
|
|
prop_changelist_t *cl = NULL;
|
2016-06-15 21:28:36 +00:00
|
|
|
char parent[ZFS_MAX_DATASET_NAME_LEN];
|
2020-09-01 23:14:16 +00:00
|
|
|
char property[ZFS_MAXPROPLEN];
|
2008-11-20 20:01:55 +00:00
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
2022-06-08 13:08:10 +00:00
|
|
|
char errbuf[ERRBUFLEN];
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
/* if we have the same exact name, just return success */
|
|
|
|
if (strcmp(zhp->zfs_name, target) == 0)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot rename to '%s'"), target);
|
|
|
|
|
2017-07-28 21:12:34 +00:00
|
|
|
/* make sure source name is valid */
|
|
|
|
if (!zfs_validate_name(hdl, zhp->zfs_name, zhp->zfs_type, B_TRUE))
|
|
|
|
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
/*
|
|
|
|
* Make sure the target name is valid
|
|
|
|
*/
|
|
|
|
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
|
|
|
|
if ((strchr(target, '@') == NULL) ||
|
|
|
|
*target == '@') {
|
|
|
|
/*
|
|
|
|
* Snapshot target name is abbreviated,
|
|
|
|
* reconstruct full dataset name
|
|
|
|
*/
|
|
|
|
(void) strlcpy(parent, zhp->zfs_name,
|
|
|
|
sizeof (parent));
|
|
|
|
delim = strchr(parent, '@');
|
|
|
|
if (strchr(target, '@') == NULL)
|
|
|
|
*(++delim) = '\0';
|
|
|
|
else
|
|
|
|
*delim = '\0';
|
|
|
|
(void) strlcat(parent, target, sizeof (parent));
|
|
|
|
target = parent;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Make sure we're renaming within the same dataset.
|
|
|
|
*/
|
|
|
|
delim = strchr(target, '@');
|
|
|
|
if (strncmp(zhp->zfs_name, target, delim - target)
|
|
|
|
!= 0 || zhp->zfs_name[delim - target] != '@') {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"snapshots must be part of same "
|
|
|
|
"dataset"));
|
|
|
|
return (zfs_error(hdl, EZFS_CROSSTARGET,
|
|
|
|
errbuf));
|
|
|
|
}
|
|
|
|
}
|
2016-09-12 15:15:20 +00:00
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
if (!zfs_validate_name(hdl, target, zhp->zfs_type, B_TRUE))
|
|
|
|
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
|
|
|
|
} else {
|
2020-09-01 23:14:16 +00:00
|
|
|
if (flags.recursive) {
|
2008-11-20 20:01:55 +00:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"recursive rename must be a snapshot"));
|
|
|
|
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!zfs_validate_name(hdl, target, zhp->zfs_type, B_TRUE))
|
|
|
|
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
|
|
|
|
|
|
|
|
/* validate parents */
|
2010-05-28 20:45:14 +00:00
|
|
|
if (check_parents(hdl, target, NULL, B_FALSE, NULL) != 0)
|
2008-11-20 20:01:55 +00:00
|
|
|
return (-1);
|
|
|
|
|
|
|
|
/* make sure we're in the same pool */
|
|
|
|
verify((delim = strchr(target, '/')) != NULL);
|
|
|
|
if (strncmp(zhp->zfs_name, target, delim - target) != 0 ||
|
|
|
|
zhp->zfs_name[delim - target] != '/') {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"datasets must be within same pool"));
|
|
|
|
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* new name cannot be a child of the current dataset name */
|
2010-05-28 20:45:14 +00:00
|
|
|
if (is_descendant(zhp->zfs_name, target)) {
|
2008-11-20 20:01:55 +00:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
2010-05-28 20:45:14 +00:00
|
|
|
"New dataset name cannot be a descendant of "
|
2008-11-20 20:01:55 +00:00
|
|
|
"current dataset name"));
|
|
|
|
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN, "cannot rename '%s'"), zhp->zfs_name);
|
|
|
|
|
|
|
|
if (getzoneid() == GLOBAL_ZONEID &&
|
|
|
|
zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"dataset is used in a non-global zone"));
|
|
|
|
return (zfs_error(hdl, EZFS_ZONED, errbuf));
|
|
|
|
}
|
|
|
|
|
2020-09-01 23:14:16 +00:00
|
|
|
/*
|
|
|
|
* Avoid unmounting file systems with mountpoint property set to
|
|
|
|
* 'legacy' or 'none' even if -u option is not given.
|
|
|
|
*/
|
|
|
|
if (zhp->zfs_type == ZFS_TYPE_FILESYSTEM &&
|
|
|
|
!flags.recursive && !flags.nounmount &&
|
|
|
|
zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, property,
|
|
|
|
sizeof (property), NULL, NULL, 0, B_FALSE) == 0 &&
|
|
|
|
(strcmp(property, "legacy") == 0 ||
|
|
|
|
strcmp(property, "none") == 0)) {
|
|
|
|
flags.nounmount = B_TRUE;
|
|
|
|
}
|
|
|
|
if (flags.recursive) {
|
2019-05-28 22:31:39 +00:00
|
|
|
char *parentname = zfs_strdup(zhp->zfs_hdl, zhp->zfs_name);
|
2008-11-20 20:01:55 +00:00
|
|
|
delim = strchr(parentname, '@');
|
|
|
|
*delim = '\0';
|
2020-09-01 23:14:16 +00:00
|
|
|
zfs_handle_t *zhrp = zfs_open(zhp->zfs_hdl, parentname,
|
|
|
|
ZFS_TYPE_DATASET);
|
2019-05-28 22:31:39 +00:00
|
|
|
free(parentname);
|
2008-11-20 20:01:55 +00:00
|
|
|
if (zhrp == NULL) {
|
|
|
|
ret = -1;
|
|
|
|
goto error;
|
|
|
|
}
|
2019-05-28 22:31:39 +00:00
|
|
|
zfs_close(zhrp);
|
2016-01-14 00:10:38 +00:00
|
|
|
} else if (zhp->zfs_type != ZFS_TYPE_SNAPSHOT) {
|
2018-10-11 04:13:13 +00:00
|
|
|
if ((cl = changelist_gather(zhp, ZFS_PROP_NAME,
|
2020-09-01 23:14:16 +00:00
|
|
|
flags.nounmount ? CL_GATHER_DONT_UNMOUNT :
|
2018-10-11 04:13:13 +00:00
|
|
|
CL_GATHER_ITER_MOUNTED,
|
2020-09-01 23:14:16 +00:00
|
|
|
flags.forceunmount ? MS_FORCE : 0)) == NULL)
|
2008-11-20 20:01:55 +00:00
|
|
|
return (-1);
|
|
|
|
|
|
|
|
if (changelist_haszonedchild(cl)) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"child dataset with inherited mountpoint is used "
|
|
|
|
"in a non-global zone"));
|
|
|
|
(void) zfs_error(hdl, EZFS_ZONED, errbuf);
|
2010-08-26 16:58:04 +00:00
|
|
|
ret = -1;
|
2008-11-20 20:01:55 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((ret = changelist_prefix(cl)) != 0)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ZFS_IS_VOLUME(zhp))
|
|
|
|
zc.zc_objset_type = DMU_OST_ZVOL;
|
|
|
|
else
|
|
|
|
zc.zc_objset_type = DMU_OST_ZFS;
|
|
|
|
|
|
|
|
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
|
|
|
|
(void) strlcpy(zc.zc_value, target, sizeof (zc.zc_value));
|
|
|
|
|
2020-09-01 23:14:16 +00:00
|
|
|
zc.zc_cookie = !!flags.recursive;
|
|
|
|
zc.zc_cookie |= (!!flags.nounmount) << 1;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
if ((ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_RENAME, &zc)) != 0) {
|
|
|
|
/*
|
|
|
|
* if it was recursive, the one that actually failed will
|
|
|
|
* be in zc.zc_name
|
|
|
|
*/
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot rename '%s'"), zc.zc_name);
|
|
|
|
|
2020-09-01 23:14:16 +00:00
|
|
|
if (flags.recursive && errno == EEXIST) {
|
2008-11-20 20:01:55 +00:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"a child dataset already has a snapshot "
|
|
|
|
"with the new name"));
|
|
|
|
(void) zfs_error(hdl, EZFS_EXISTS, errbuf);
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
} else if (errno == EACCES) {
|
2019-06-20 19:29:51 +00:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot move encrypted child outside of "
|
|
|
|
"its encryption root"));
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
(void) zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
|
2008-11-20 20:01:55 +00:00
|
|
|
} else {
|
|
|
|
(void) zfs_standard_error(zhp->zfs_hdl, errno, errbuf);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* On failure, we still want to remount any filesystems that
|
|
|
|
* were previously mounted, so we don't alter the system state.
|
|
|
|
*/
|
2016-01-14 00:10:38 +00:00
|
|
|
if (cl != NULL)
|
2008-11-20 20:01:55 +00:00
|
|
|
(void) changelist_postfix(cl);
|
|
|
|
} else {
|
2016-01-14 00:10:38 +00:00
|
|
|
if (cl != NULL) {
|
2008-11-20 20:01:55 +00:00
|
|
|
changelist_rename(cl, zfs_get_name(zhp), target);
|
|
|
|
ret = changelist_postfix(cl);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
error:
|
2016-01-14 00:10:38 +00:00
|
|
|
if (cl != NULL) {
|
2008-11-20 20:01:55 +00:00
|
|
|
changelist_free(cl);
|
|
|
|
}
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2017-10-19 18:18:42 +00:00
|
|
|
nvlist_t *
|
|
|
|
zfs_get_all_props(zfs_handle_t *zhp)
|
|
|
|
{
|
|
|
|
return (zhp->zfs_props);
|
|
|
|
}
|
|
|
|
|
|
|
|
nvlist_t *
|
|
|
|
zfs_get_recvd_props(zfs_handle_t *zhp)
|
|
|
|
{
|
|
|
|
if (zhp->zfs_recvd_props == NULL)
|
|
|
|
if (get_recvd_props_ioctl(zhp) != 0)
|
|
|
|
return (NULL);
|
|
|
|
return (zhp->zfs_recvd_props);
|
|
|
|
}
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
nvlist_t *
|
2010-08-26 18:56:53 +00:00
|
|
|
zfs_get_user_props(zfs_handle_t *zhp)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
2010-08-26 18:56:53 +00:00
|
|
|
return (zhp->zfs_user_props);
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function is used by 'zfs list' to determine the exact set of columns to
|
|
|
|
* display, and their maximum widths. This does two main things:
|
|
|
|
*
|
|
|
|
* - If this is a list of all properties, then expand the list to include
|
|
|
|
* all native properties, and set a flag so that for each dataset we look
|
|
|
|
* for new unique user properties and add them to the list.
|
|
|
|
*
|
|
|
|
* - For non fixed-width properties, keep track of the maximum width seen
|
2010-05-28 20:45:14 +00:00
|
|
|
* so that we can size the column appropriately. If the user has
|
|
|
|
* requested received property values, we also need to compute the width
|
|
|
|
* of the RECEIVED column.
|
2008-11-20 20:01:55 +00:00
|
|
|
*/
|
|
|
|
int
|
2013-11-19 15:41:37 +00:00
|
|
|
zfs_expand_proplist(zfs_handle_t *zhp, zprop_list_t **plp, boolean_t received,
|
|
|
|
boolean_t literal)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
|
|
|
zprop_list_t *entry;
|
|
|
|
zprop_list_t **last, **start;
|
|
|
|
nvlist_t *userprops, *propval;
|
|
|
|
nvpair_t *elem;
|
2023-03-11 18:39:24 +00:00
|
|
|
const char *strval;
|
2008-11-20 20:01:55 +00:00
|
|
|
char buf[ZFS_MAXPROPLEN];
|
|
|
|
|
|
|
|
if (zprop_expand_list(hdl, plp, ZFS_TYPE_DATASET) != 0)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
userprops = zfs_get_user_props(zhp);
|
|
|
|
|
|
|
|
entry = *plp;
|
|
|
|
if (entry->pl_all && nvlist_next_nvpair(userprops, NULL) != NULL) {
|
|
|
|
/*
|
|
|
|
* Go through and add any user properties as necessary. We
|
|
|
|
* start by incrementing our list pointer to the first
|
|
|
|
* non-native property.
|
|
|
|
*/
|
|
|
|
start = plp;
|
|
|
|
while (*start != NULL) {
|
2022-06-14 18:27:53 +00:00
|
|
|
if ((*start)->pl_prop == ZPROP_USERPROP)
|
2008-11-20 20:01:55 +00:00
|
|
|
break;
|
|
|
|
start = &(*start)->pl_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
elem = NULL;
|
|
|
|
while ((elem = nvlist_next_nvpair(userprops, elem)) != NULL) {
|
|
|
|
/*
|
|
|
|
* See if we've already found this property in our list.
|
|
|
|
*/
|
|
|
|
for (last = start; *last != NULL;
|
|
|
|
last = &(*last)->pl_next) {
|
|
|
|
if (strcmp((*last)->pl_user_prop,
|
|
|
|
nvpair_name(elem)) == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*last == NULL) {
|
2022-03-16 18:51:28 +00:00
|
|
|
entry = zfs_alloc(hdl, sizeof (zprop_list_t));
|
|
|
|
entry->pl_user_prop =
|
|
|
|
zfs_strdup(hdl, nvpair_name(elem));
|
2022-06-14 18:27:53 +00:00
|
|
|
entry->pl_prop = ZPROP_USERPROP;
|
2008-11-20 20:01:55 +00:00
|
|
|
entry->pl_width = strlen(nvpair_name(elem));
|
|
|
|
entry->pl_all = B_TRUE;
|
|
|
|
*last = entry;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now go through and check the width of any non-fixed columns
|
|
|
|
*/
|
|
|
|
for (entry = *plp; entry != NULL; entry = entry->pl_next) {
|
2013-11-19 15:41:37 +00:00
|
|
|
if (entry->pl_fixed && !literal)
|
2008-11-20 20:01:55 +00:00
|
|
|
continue;
|
|
|
|
|
2022-06-14 18:27:53 +00:00
|
|
|
if (entry->pl_prop != ZPROP_USERPROP) {
|
2008-11-20 20:01:55 +00:00
|
|
|
if (zfs_prop_get(zhp, entry->pl_prop,
|
2013-11-19 15:41:37 +00:00
|
|
|
buf, sizeof (buf), NULL, NULL, 0, literal) == 0) {
|
2008-11-20 20:01:55 +00:00
|
|
|
if (strlen(buf) > entry->pl_width)
|
|
|
|
entry->pl_width = strlen(buf);
|
|
|
|
}
|
2010-05-28 20:45:14 +00:00
|
|
|
if (received && zfs_prop_get_recvd(zhp,
|
|
|
|
zfs_prop_to_name(entry->pl_prop),
|
2013-11-19 15:41:37 +00:00
|
|
|
buf, sizeof (buf), literal) == 0)
|
2010-05-28 20:45:14 +00:00
|
|
|
if (strlen(buf) > entry->pl_recvd_width)
|
|
|
|
entry->pl_recvd_width = strlen(buf);
|
|
|
|
} else {
|
|
|
|
if (nvlist_lookup_nvlist(userprops, entry->pl_user_prop,
|
|
|
|
&propval) == 0) {
|
2022-03-14 22:44:56 +00:00
|
|
|
strval = fnvlist_lookup_string(propval,
|
|
|
|
ZPROP_VALUE);
|
2010-05-28 20:45:14 +00:00
|
|
|
if (strlen(strval) > entry->pl_width)
|
|
|
|
entry->pl_width = strlen(strval);
|
|
|
|
}
|
|
|
|
if (received && zfs_prop_get_recvd(zhp,
|
|
|
|
entry->pl_user_prop,
|
2013-11-19 15:41:37 +00:00
|
|
|
buf, sizeof (buf), literal) == 0)
|
2010-05-28 20:45:14 +00:00
|
|
|
if (strlen(buf) > entry->pl_recvd_width)
|
|
|
|
entry->pl_recvd_width = strlen(buf);
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2009-07-02 22:44:48 +00:00
|
|
|
void
|
|
|
|
zfs_prune_proplist(zfs_handle_t *zhp, uint8_t *props)
|
|
|
|
{
|
|
|
|
nvpair_t *curr;
|
2015-07-05 23:11:09 +00:00
|
|
|
nvpair_t *next;
|
2009-07-02 22:44:48 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Keep a reference to the props-table against which we prune the
|
|
|
|
* properties.
|
|
|
|
*/
|
|
|
|
zhp->zfs_props_table = props;
|
|
|
|
|
|
|
|
curr = nvlist_next_nvpair(zhp->zfs_props, NULL);
|
|
|
|
|
|
|
|
while (curr) {
|
|
|
|
zfs_prop_t zfs_prop = zfs_name_to_prop(nvpair_name(curr));
|
2015-07-05 23:11:09 +00:00
|
|
|
next = nvlist_next_nvpair(zhp->zfs_props, curr);
|
2009-07-02 22:44:48 +00:00
|
|
|
|
|
|
|
/*
|
2022-06-14 18:27:53 +00:00
|
|
|
* User properties will result in ZPROP_USERPROP (an alias
|
|
|
|
* for ZPROP_INVAL), and since we
|
2010-05-28 20:45:14 +00:00
|
|
|
* only know how to prune standard ZFS properties, we always
|
|
|
|
* leave these in the list. This can also happen if we
|
|
|
|
* encounter an unknown DSL property (when running older
|
|
|
|
* software, for example).
|
2009-07-02 22:44:48 +00:00
|
|
|
*/
|
2022-06-14 18:27:53 +00:00
|
|
|
if (zfs_prop != ZPROP_USERPROP && props[zfs_prop] == B_FALSE)
|
2009-07-02 22:44:48 +00:00
|
|
|
(void) nvlist_remove(zhp->zfs_props,
|
|
|
|
nvpair_name(curr), nvpair_type(curr));
|
|
|
|
curr = next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
zfs_smb_acl_mgmt(libzfs_handle_t *hdl, char *dataset, char *path,
|
|
|
|
zfs_smb_acl_op_t cmd, char *resource1, char *resource2)
|
|
|
|
{
|
2013-09-04 12:00:57 +00:00
|
|
|
zfs_cmd_t zc = {"\0"};
|
2009-07-02 22:44:48 +00:00
|
|
|
nvlist_t *nvlist = NULL;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
(void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
|
|
|
|
(void) strlcpy(zc.zc_value, path, sizeof (zc.zc_value));
|
|
|
|
zc.zc_cookie = (uint64_t)cmd;
|
|
|
|
|
|
|
|
if (cmd == ZFS_SMB_ACL_RENAME) {
|
|
|
|
if (nvlist_alloc(&nvlist, NV_UNIQUE_NAME, 0) != 0) {
|
|
|
|
(void) no_memory(hdl);
|
2015-07-05 23:11:09 +00:00
|
|
|
return (0);
|
2009-07-02 22:44:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case ZFS_SMB_ACL_ADD:
|
|
|
|
case ZFS_SMB_ACL_REMOVE:
|
|
|
|
(void) strlcpy(zc.zc_string, resource1, sizeof (zc.zc_string));
|
|
|
|
break;
|
|
|
|
case ZFS_SMB_ACL_RENAME:
|
|
|
|
if (nvlist_add_string(nvlist, ZFS_SMB_ACL_SRC,
|
|
|
|
resource1) != 0) {
|
|
|
|
(void) no_memory(hdl);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
if (nvlist_add_string(nvlist, ZFS_SMB_ACL_TARGET,
|
|
|
|
resource2) != 0) {
|
|
|
|
(void) no_memory(hdl);
|
|
|
|
return (-1);
|
|
|
|
}
|
2022-03-16 18:51:28 +00:00
|
|
|
zcmd_write_src_nvlist(hdl, &zc, nvlist);
|
2009-07-02 22:44:48 +00:00
|
|
|
break;
|
|
|
|
case ZFS_SMB_ACL_PURGE:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
error = ioctl(hdl->libzfs_fd, ZFS_IOC_SMB_ACL, &zc);
|
2016-04-01 03:54:07 +00:00
|
|
|
nvlist_free(nvlist);
|
2009-07-02 22:44:48 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_smb_acl_add(libzfs_handle_t *hdl, char *dataset,
|
|
|
|
char *path, char *resource)
|
|
|
|
{
|
|
|
|
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_ADD,
|
|
|
|
resource, NULL));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_smb_acl_remove(libzfs_handle_t *hdl, char *dataset,
|
|
|
|
char *path, char *resource)
|
|
|
|
{
|
|
|
|
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_REMOVE,
|
|
|
|
resource, NULL));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_smb_acl_purge(libzfs_handle_t *hdl, char *dataset, char *path)
|
|
|
|
{
|
|
|
|
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_PURGE,
|
|
|
|
NULL, NULL));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_smb_acl_rename(libzfs_handle_t *hdl, char *dataset, char *path,
|
|
|
|
char *oldname, char *newname)
|
|
|
|
{
|
|
|
|
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_RENAME,
|
|
|
|
oldname, newname));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_userspace(zfs_handle_t *zhp, zfs_userquota_prop_t type,
|
|
|
|
zfs_userspace_cb_t func, void *arg)
|
|
|
|
{
|
2013-09-04 12:00:57 +00:00
|
|
|
zfs_cmd_t zc = {"\0"};
|
2009-07-02 22:44:48 +00:00
|
|
|
zfs_useracct_t buf[100];
|
2013-07-16 20:54:29 +00:00
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
|
|
|
int ret;
|
2009-07-02 22:44:48 +00:00
|
|
|
|
2011-11-17 18:14:36 +00:00
|
|
|
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
|
2009-07-02 22:44:48 +00:00
|
|
|
|
|
|
|
zc.zc_objset_type = type;
|
|
|
|
zc.zc_nvlist_dst = (uintptr_t)buf;
|
|
|
|
|
2013-07-16 20:54:29 +00:00
|
|
|
for (;;) {
|
2009-07-02 22:44:48 +00:00
|
|
|
zfs_useracct_t *zua = buf;
|
|
|
|
|
|
|
|
zc.zc_nvlist_dst_size = sizeof (buf);
|
2013-07-16 20:54:29 +00:00
|
|
|
if (zfs_ioctl(hdl, ZFS_IOC_USERSPACE_MANY, &zc) != 0) {
|
2016-10-04 18:46:10 +00:00
|
|
|
if ((errno == ENOTSUP &&
|
|
|
|
(type == ZFS_PROP_USEROBJUSED ||
|
|
|
|
type == ZFS_PROP_GROUPOBJUSED ||
|
|
|
|
type == ZFS_PROP_USEROBJQUOTA ||
|
2018-02-13 22:54:54 +00:00
|
|
|
type == ZFS_PROP_GROUPOBJQUOTA ||
|
|
|
|
type == ZFS_PROP_PROJECTOBJUSED ||
|
|
|
|
type == ZFS_PROP_PROJECTOBJQUOTA ||
|
|
|
|
type == ZFS_PROP_PROJECTUSED ||
|
|
|
|
type == ZFS_PROP_PROJECTQUOTA)))
|
2016-10-04 18:46:10 +00:00
|
|
|
break;
|
|
|
|
|
2021-05-15 10:23:45 +00:00
|
|
|
return (zfs_standard_error_fmt(hdl, errno,
|
2013-07-16 20:54:29 +00:00
|
|
|
dgettext(TEXT_DOMAIN,
|
2021-05-15 10:23:45 +00:00
|
|
|
"cannot get used/quota for %s"), zc.zc_name));
|
2013-07-16 20:54:29 +00:00
|
|
|
}
|
|
|
|
if (zc.zc_nvlist_dst_size == 0)
|
2009-07-02 22:44:48 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
while (zc.zc_nvlist_dst_size > 0) {
|
2013-07-16 20:54:29 +00:00
|
|
|
if ((ret = func(arg, zua->zu_domain, zua->zu_rid,
|
|
|
|
zua->zu_space)) != 0)
|
|
|
|
return (ret);
|
2009-07-02 22:44:48 +00:00
|
|
|
zua++;
|
|
|
|
zc.zc_nvlist_dst_size -= sizeof (zfs_useracct_t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-16 20:54:29 +00:00
|
|
|
return (0);
|
2009-07-02 22:44:48 +00:00
|
|
|
}
|
2009-08-18 18:43:27 +00:00
|
|
|
|
2013-09-04 12:00:57 +00:00
|
|
|
struct holdarg {
|
|
|
|
nvlist_t *nvl;
|
|
|
|
const char *snapname;
|
|
|
|
const char *tag;
|
|
|
|
boolean_t recursive;
|
2013-06-20 22:43:17 +00:00
|
|
|
int error;
|
2013-09-04 12:00:57 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
zfs_hold_one(zfs_handle_t *zhp, void *arg)
|
|
|
|
{
|
|
|
|
struct holdarg *ha = arg;
|
2016-06-15 21:28:36 +00:00
|
|
|
char name[ZFS_MAX_DATASET_NAME_LEN];
|
2013-09-04 12:00:57 +00:00
|
|
|
int rv = 0;
|
|
|
|
|
2017-06-28 17:05:16 +00:00
|
|
|
if (snprintf(name, sizeof (name), "%s@%s", zhp->zfs_name,
|
|
|
|
ha->snapname) >= sizeof (name))
|
|
|
|
return (EINVAL);
|
2013-09-04 12:00:57 +00:00
|
|
|
|
2013-05-25 02:06:23 +00:00
|
|
|
if (lzc_exists(name))
|
2013-09-04 12:00:57 +00:00
|
|
|
fnvlist_add_string(ha->nvl, name, ha->tag);
|
|
|
|
|
|
|
|
if (ha->recursive)
|
2023-04-10 18:53:02 +00:00
|
|
|
rv = zfs_iter_filesystems_v2(zhp, 0, zfs_hold_one, ha);
|
2013-09-04 12:00:57 +00:00
|
|
|
zfs_close(zhp);
|
|
|
|
return (rv);
|
|
|
|
}
|
|
|
|
|
2009-08-18 18:43:27 +00:00
|
|
|
int
|
|
|
|
zfs_hold(zfs_handle_t *zhp, const char *snapname, const char *tag,
|
2013-05-25 02:06:23 +00:00
|
|
|
boolean_t recursive, int cleanup_fd)
|
2009-08-18 18:43:27 +00:00
|
|
|
{
|
2013-09-04 12:00:57 +00:00
|
|
|
int ret;
|
|
|
|
struct holdarg ha;
|
2009-08-18 18:43:27 +00:00
|
|
|
|
2013-09-04 12:00:57 +00:00
|
|
|
ha.nvl = fnvlist_alloc();
|
|
|
|
ha.snapname = snapname;
|
|
|
|
ha.tag = tag;
|
|
|
|
ha.recursive = recursive;
|
|
|
|
(void) zfs_hold_one(zfs_handle_dup(zhp), &ha);
|
2013-05-23 17:07:25 +00:00
|
|
|
|
2013-05-25 02:06:23 +00:00
|
|
|
if (nvlist_empty(ha.nvl)) {
|
2022-06-08 13:08:10 +00:00
|
|
|
char errbuf[ERRBUFLEN];
|
2013-05-25 02:06:23 +00:00
|
|
|
|
2013-05-23 17:07:25 +00:00
|
|
|
fnvlist_free(ha.nvl);
|
|
|
|
ret = ENOENT;
|
2013-05-25 02:06:23 +00:00
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot hold snapshot '%s@%s'"),
|
|
|
|
zhp->zfs_name, snapname);
|
|
|
|
(void) zfs_standard_error(zhp->zfs_hdl, ret, errbuf);
|
2013-05-23 17:07:25 +00:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2013-05-25 02:06:23 +00:00
|
|
|
ret = zfs_hold_nvl(zhp, cleanup_fd, ha.nvl);
|
2013-09-04 12:00:57 +00:00
|
|
|
fnvlist_free(ha.nvl);
|
2010-08-26 21:24:34 +00:00
|
|
|
|
2013-05-25 02:06:23 +00:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_hold_nvl(zfs_handle_t *zhp, int cleanup_fd, nvlist_t *holds)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
nvlist_t *errors;
|
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
2022-06-08 13:08:10 +00:00
|
|
|
char errbuf[ERRBUFLEN];
|
2013-05-25 02:06:23 +00:00
|
|
|
nvpair_t *elem;
|
|
|
|
|
|
|
|
errors = NULL;
|
|
|
|
ret = lzc_hold(holds, cleanup_fd, &errors);
|
|
|
|
|
|
|
|
if (ret == 0) {
|
|
|
|
/* There may be errors even in the success case. */
|
|
|
|
fnvlist_free(errors);
|
2013-09-04 12:00:57 +00:00
|
|
|
return (0);
|
2013-05-25 02:06:23 +00:00
|
|
|
}
|
2009-08-18 18:43:27 +00:00
|
|
|
|
2013-05-25 02:06:23 +00:00
|
|
|
if (nvlist_empty(errors)) {
|
2013-09-04 12:00:57 +00:00
|
|
|
/* no hold-specific errors */
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN, "cannot hold"));
|
|
|
|
switch (ret) {
|
|
|
|
case ENOTSUP:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"pool must be upgraded"));
|
|
|
|
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
|
|
|
|
break;
|
|
|
|
case EINVAL:
|
|
|
|
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
(void) zfs_standard_error(hdl, ret, errbuf);
|
|
|
|
}
|
|
|
|
}
|
2009-08-18 18:43:27 +00:00
|
|
|
|
2013-09-04 12:00:57 +00:00
|
|
|
for (elem = nvlist_next_nvpair(errors, NULL);
|
|
|
|
elem != NULL;
|
|
|
|
elem = nvlist_next_nvpair(errors, elem)) {
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot hold snapshot '%s'"), nvpair_name(elem));
|
|
|
|
switch (fnvpair_value_int32(elem)) {
|
2010-05-28 20:45:14 +00:00
|
|
|
case E2BIG:
|
|
|
|
/*
|
|
|
|
* Temporary tags wind up having the ds object id
|
|
|
|
* prepended. So even if we passed the length check
|
|
|
|
* above, it's still possible for the tag to wind
|
|
|
|
* up being slightly too long.
|
|
|
|
*/
|
2013-09-04 12:00:57 +00:00
|
|
|
(void) zfs_error(hdl, EZFS_TAGTOOLONG, errbuf);
|
|
|
|
break;
|
2009-08-18 18:43:27 +00:00
|
|
|
case EINVAL:
|
2013-09-04 12:00:57 +00:00
|
|
|
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
|
|
|
|
break;
|
2009-08-18 18:43:27 +00:00
|
|
|
case EEXIST:
|
2013-09-04 12:00:57 +00:00
|
|
|
(void) zfs_error(hdl, EZFS_REFTAG_HOLD, errbuf);
|
|
|
|
break;
|
2009-08-18 18:43:27 +00:00
|
|
|
default:
|
2013-09-04 12:00:57 +00:00
|
|
|
(void) zfs_standard_error(hdl,
|
|
|
|
fnvpair_value_int32(elem), errbuf);
|
2009-08-18 18:43:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-04 12:00:57 +00:00
|
|
|
fnvlist_free(errors);
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
zfs_release_one(zfs_handle_t *zhp, void *arg)
|
|
|
|
{
|
|
|
|
struct holdarg *ha = arg;
|
2016-06-15 21:28:36 +00:00
|
|
|
char name[ZFS_MAX_DATASET_NAME_LEN];
|
2013-09-04 12:00:57 +00:00
|
|
|
int rv = 0;
|
2013-06-20 22:43:17 +00:00
|
|
|
nvlist_t *existing_holds;
|
2013-09-04 12:00:57 +00:00
|
|
|
|
2017-06-28 17:05:16 +00:00
|
|
|
if (snprintf(name, sizeof (name), "%s@%s", zhp->zfs_name,
|
|
|
|
ha->snapname) >= sizeof (name)) {
|
|
|
|
ha->error = EINVAL;
|
|
|
|
rv = EINVAL;
|
|
|
|
}
|
2013-09-04 12:00:57 +00:00
|
|
|
|
2013-06-20 22:43:17 +00:00
|
|
|
if (lzc_get_holds(name, &existing_holds) != 0) {
|
|
|
|
ha->error = ENOENT;
|
|
|
|
} else if (!nvlist_exists(existing_holds, ha->tag)) {
|
|
|
|
ha->error = ESRCH;
|
|
|
|
} else {
|
|
|
|
nvlist_t *torelease = fnvlist_alloc();
|
|
|
|
fnvlist_add_boolean(torelease, ha->tag);
|
|
|
|
fnvlist_add_nvlist(ha->nvl, name, torelease);
|
|
|
|
fnvlist_free(torelease);
|
2013-09-04 12:00:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ha->recursive)
|
2023-04-10 18:53:02 +00:00
|
|
|
rv = zfs_iter_filesystems_v2(zhp, 0, zfs_release_one, ha);
|
2013-09-04 12:00:57 +00:00
|
|
|
zfs_close(zhp);
|
|
|
|
return (rv);
|
2009-08-18 18:43:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_release(zfs_handle_t *zhp, const char *snapname, const char *tag,
|
|
|
|
boolean_t recursive)
|
|
|
|
{
|
2013-09-04 12:00:57 +00:00
|
|
|
int ret;
|
|
|
|
struct holdarg ha;
|
2013-05-25 02:06:23 +00:00
|
|
|
nvlist_t *errors = NULL;
|
2013-09-04 12:00:57 +00:00
|
|
|
nvpair_t *elem;
|
2009-08-18 18:43:27 +00:00
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
2022-06-08 13:08:10 +00:00
|
|
|
char errbuf[ERRBUFLEN];
|
2009-08-18 18:43:27 +00:00
|
|
|
|
2013-09-04 12:00:57 +00:00
|
|
|
ha.nvl = fnvlist_alloc();
|
|
|
|
ha.snapname = snapname;
|
|
|
|
ha.tag = tag;
|
|
|
|
ha.recursive = recursive;
|
2013-06-20 22:43:17 +00:00
|
|
|
ha.error = 0;
|
2013-09-04 12:00:57 +00:00
|
|
|
(void) zfs_release_one(zfs_handle_dup(zhp), &ha);
|
2013-05-23 17:07:25 +00:00
|
|
|
|
2013-05-25 02:06:23 +00:00
|
|
|
if (nvlist_empty(ha.nvl)) {
|
2013-05-23 17:07:25 +00:00
|
|
|
fnvlist_free(ha.nvl);
|
2013-06-20 22:43:17 +00:00
|
|
|
ret = ha.error;
|
2013-05-23 17:07:25 +00:00
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot release hold from snapshot '%s@%s'"),
|
|
|
|
zhp->zfs_name, snapname);
|
2013-06-20 22:43:17 +00:00
|
|
|
if (ret == ESRCH) {
|
|
|
|
(void) zfs_error(hdl, EZFS_REFTAG_RELE, errbuf);
|
|
|
|
} else {
|
|
|
|
(void) zfs_standard_error(hdl, ret, errbuf);
|
|
|
|
}
|
2013-05-23 17:07:25 +00:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2013-09-04 12:00:57 +00:00
|
|
|
ret = lzc_release(ha.nvl, &errors);
|
|
|
|
fnvlist_free(ha.nvl);
|
2009-08-18 18:43:27 +00:00
|
|
|
|
2013-05-25 02:06:23 +00:00
|
|
|
if (ret == 0) {
|
|
|
|
/* There may be errors even in the success case. */
|
|
|
|
fnvlist_free(errors);
|
2013-09-04 12:00:57 +00:00
|
|
|
return (0);
|
2013-05-25 02:06:23 +00:00
|
|
|
}
|
2013-09-04 12:00:57 +00:00
|
|
|
|
2013-05-25 02:06:23 +00:00
|
|
|
if (nvlist_empty(errors)) {
|
2013-09-04 12:00:57 +00:00
|
|
|
/* no hold-specific errors */
|
2009-08-18 18:43:27 +00:00
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
2013-09-04 12:00:57 +00:00
|
|
|
"cannot release"));
|
2009-08-18 18:43:27 +00:00
|
|
|
switch (errno) {
|
|
|
|
case ENOTSUP:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"pool must be upgraded"));
|
2013-09-04 12:00:57 +00:00
|
|
|
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
|
|
|
|
break;
|
|
|
|
default:
|
2021-05-15 10:23:45 +00:00
|
|
|
(void) zfs_standard_error(hdl, errno, errbuf);
|
2013-09-04 12:00:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (elem = nvlist_next_nvpair(errors, NULL);
|
|
|
|
elem != NULL;
|
|
|
|
elem = nvlist_next_nvpair(errors, elem)) {
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot release hold from snapshot '%s'"),
|
|
|
|
nvpair_name(elem));
|
|
|
|
switch (fnvpair_value_int32(elem)) {
|
|
|
|
case ESRCH:
|
|
|
|
(void) zfs_error(hdl, EZFS_REFTAG_RELE, errbuf);
|
|
|
|
break;
|
2009-08-18 18:43:27 +00:00
|
|
|
case EINVAL:
|
2013-09-04 12:00:57 +00:00
|
|
|
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
|
|
|
|
break;
|
2009-08-18 18:43:27 +00:00
|
|
|
default:
|
2021-05-15 10:23:45 +00:00
|
|
|
(void) zfs_standard_error(hdl,
|
2013-09-04 12:00:57 +00:00
|
|
|
fnvpair_value_int32(elem), errbuf);
|
2009-08-18 18:43:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-04 12:00:57 +00:00
|
|
|
fnvlist_free(errors);
|
|
|
|
return (ret);
|
2009-08-18 18:43:27 +00:00
|
|
|
}
|
2010-05-28 20:45:14 +00:00
|
|
|
|
2011-07-26 22:44:36 +00:00
|
|
|
int
|
|
|
|
zfs_get_fsacl(zfs_handle_t *zhp, nvlist_t **nvl)
|
|
|
|
{
|
2013-09-04 12:00:57 +00:00
|
|
|
zfs_cmd_t zc = {"\0"};
|
2011-07-26 22:44:36 +00:00
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
|
|
|
int nvsz = 2048;
|
|
|
|
void *nvbuf;
|
|
|
|
int err = 0;
|
2022-06-08 13:08:10 +00:00
|
|
|
char errbuf[ERRBUFLEN];
|
2011-07-26 22:44:36 +00:00
|
|
|
|
|
|
|
assert(zhp->zfs_type == ZFS_TYPE_VOLUME ||
|
|
|
|
zhp->zfs_type == ZFS_TYPE_FILESYSTEM);
|
|
|
|
|
|
|
|
tryagain:
|
|
|
|
|
|
|
|
nvbuf = malloc(nvsz);
|
|
|
|
if (nvbuf == NULL) {
|
|
|
|
err = (zfs_error(hdl, EZFS_NOMEM, strerror(errno)));
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
zc.zc_nvlist_dst_size = nvsz;
|
|
|
|
zc.zc_nvlist_dst = (uintptr_t)nvbuf;
|
|
|
|
|
2016-06-15 21:28:36 +00:00
|
|
|
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
|
2011-07-26 22:44:36 +00:00
|
|
|
|
2019-10-24 00:29:43 +00:00
|
|
|
if (zfs_ioctl(hdl, ZFS_IOC_GET_FSACL, &zc) != 0) {
|
2011-07-26 22:44:36 +00:00
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN, "cannot get permissions on '%s'"),
|
|
|
|
zc.zc_name);
|
|
|
|
switch (errno) {
|
|
|
|
case ENOMEM:
|
|
|
|
free(nvbuf);
|
|
|
|
nvsz = zc.zc_nvlist_dst_size;
|
|
|
|
goto tryagain;
|
|
|
|
|
|
|
|
case ENOTSUP:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"pool must be upgraded"));
|
|
|
|
err = zfs_error(hdl, EZFS_BADVERSION, errbuf);
|
|
|
|
break;
|
|
|
|
case EINVAL:
|
|
|
|
err = zfs_error(hdl, EZFS_BADTYPE, errbuf);
|
|
|
|
break;
|
|
|
|
case ENOENT:
|
|
|
|
err = zfs_error(hdl, EZFS_NOENT, errbuf);
|
|
|
|
break;
|
|
|
|
default:
|
2021-05-15 10:23:45 +00:00
|
|
|
err = zfs_standard_error(hdl, errno, errbuf);
|
2011-07-26 22:44:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* success */
|
|
|
|
int rc = nvlist_unpack(nvbuf, zc.zc_nvlist_dst_size, nvl, 0);
|
|
|
|
if (rc) {
|
2021-05-15 10:23:45 +00:00
|
|
|
err = zfs_standard_error_fmt(hdl, rc, dgettext(
|
2011-07-26 22:44:36 +00:00
|
|
|
TEXT_DOMAIN, "cannot get permissions on '%s'"),
|
|
|
|
zc.zc_name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
free(nvbuf);
|
|
|
|
out:
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_set_fsacl(zfs_handle_t *zhp, boolean_t un, nvlist_t *nvl)
|
|
|
|
{
|
2013-09-04 12:00:57 +00:00
|
|
|
zfs_cmd_t zc = {"\0"};
|
2011-07-26 22:44:36 +00:00
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
|
|
|
char *nvbuf;
|
2022-06-08 13:08:10 +00:00
|
|
|
char errbuf[ERRBUFLEN];
|
2011-07-26 22:44:36 +00:00
|
|
|
size_t nvsz;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
assert(zhp->zfs_type == ZFS_TYPE_VOLUME ||
|
|
|
|
zhp->zfs_type == ZFS_TYPE_FILESYSTEM);
|
|
|
|
|
|
|
|
err = nvlist_size(nvl, &nvsz, NV_ENCODE_NATIVE);
|
|
|
|
assert(err == 0);
|
|
|
|
|
|
|
|
nvbuf = malloc(nvsz);
|
|
|
|
|
|
|
|
err = nvlist_pack(nvl, &nvbuf, &nvsz, NV_ENCODE_NATIVE, 0);
|
|
|
|
assert(err == 0);
|
|
|
|
|
|
|
|
zc.zc_nvlist_src_size = nvsz;
|
|
|
|
zc.zc_nvlist_src = (uintptr_t)nvbuf;
|
|
|
|
zc.zc_perm_action = un;
|
|
|
|
|
|
|
|
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
|
|
|
|
|
|
|
|
if (zfs_ioctl(hdl, ZFS_IOC_SET_FSACL, &zc) != 0) {
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN, "cannot set permissions on '%s'"),
|
|
|
|
zc.zc_name);
|
|
|
|
switch (errno) {
|
|
|
|
case ENOTSUP:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"pool must be upgraded"));
|
|
|
|
err = zfs_error(hdl, EZFS_BADVERSION, errbuf);
|
|
|
|
break;
|
|
|
|
case EINVAL:
|
|
|
|
err = zfs_error(hdl, EZFS_BADTYPE, errbuf);
|
|
|
|
break;
|
|
|
|
case ENOENT:
|
|
|
|
err = zfs_error(hdl, EZFS_NOENT, errbuf);
|
|
|
|
break;
|
|
|
|
default:
|
2021-05-15 10:23:45 +00:00
|
|
|
err = zfs_standard_error(hdl, errno, errbuf);
|
2011-07-26 22:44:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
free(nvbuf);
|
|
|
|
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_get_holds(zfs_handle_t *zhp, nvlist_t **nvl)
|
|
|
|
{
|
2013-09-04 12:00:57 +00:00
|
|
|
int err;
|
2022-06-08 13:08:10 +00:00
|
|
|
char errbuf[ERRBUFLEN];
|
2011-07-26 22:44:36 +00:00
|
|
|
|
2013-09-04 12:00:57 +00:00
|
|
|
err = lzc_get_holds(zhp->zfs_name, nvl);
|
2011-07-26 22:44:36 +00:00
|
|
|
|
2013-09-04 12:00:57 +00:00
|
|
|
if (err != 0) {
|
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
2011-07-26 22:44:36 +00:00
|
|
|
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN, "cannot get holds for '%s'"),
|
2013-09-04 12:00:57 +00:00
|
|
|
zhp->zfs_name);
|
|
|
|
switch (err) {
|
2011-07-26 22:44:36 +00:00
|
|
|
case ENOTSUP:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"pool must be upgraded"));
|
|
|
|
err = zfs_error(hdl, EZFS_BADVERSION, errbuf);
|
|
|
|
break;
|
|
|
|
case EINVAL:
|
|
|
|
err = zfs_error(hdl, EZFS_BADTYPE, errbuf);
|
|
|
|
break;
|
|
|
|
case ENOENT:
|
|
|
|
err = zfs_error(hdl, EZFS_NOENT, errbuf);
|
|
|
|
break;
|
|
|
|
default:
|
2021-05-15 10:23:45 +00:00
|
|
|
err = zfs_standard_error(hdl, errno, errbuf);
|
2011-07-26 22:44:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
2013-06-11 17:12:34 +00:00
|
|
|
/*
|
2019-06-30 23:38:07 +00:00
|
|
|
* The theory of raidz space accounting
|
|
|
|
*
|
|
|
|
* The "referenced" property of RAIDZ vdevs is scaled such that a 128KB block
|
|
|
|
* will "reference" 128KB, even though it allocates more than that, to store the
|
|
|
|
* parity information (and perhaps skip sectors). This concept of the
|
|
|
|
* "referenced" (and other DMU space accounting) being lower than the allocated
|
|
|
|
* space by a constant factor is called "raidz deflation."
|
|
|
|
*
|
|
|
|
* As mentioned above, the constant factor for raidz deflation assumes a 128KB
|
|
|
|
* block size. However, zvols typically have a much smaller block size (default
|
|
|
|
* 8KB). These smaller blocks may require proportionally much more parity
|
|
|
|
* information (and perhaps skip sectors). In this case, the change to the
|
|
|
|
* "referenced" property may be much more than the logical block size.
|
|
|
|
*
|
|
|
|
* Suppose a raidz vdev has 5 disks with ashift=12. A 128k block may be written
|
|
|
|
* as follows.
|
|
|
|
*
|
|
|
|
* +-------+-------+-------+-------+-------+
|
|
|
|
* | disk1 | disk2 | disk3 | disk4 | disk5 |
|
|
|
|
* +-------+-------+-------+-------+-------+
|
|
|
|
* | P0 | D0 | D8 | D16 | D24 |
|
|
|
|
* | P1 | D1 | D9 | D17 | D25 |
|
|
|
|
* | P2 | D2 | D10 | D18 | D26 |
|
|
|
|
* | P3 | D3 | D11 | D19 | D27 |
|
|
|
|
* | P4 | D4 | D12 | D20 | D28 |
|
|
|
|
* | P5 | D5 | D13 | D21 | D29 |
|
|
|
|
* | P6 | D6 | D14 | D22 | D30 |
|
|
|
|
* | P7 | D7 | D15 | D23 | D31 |
|
|
|
|
* +-------+-------+-------+-------+-------+
|
|
|
|
*
|
|
|
|
* Above, notice that 160k was allocated: 8 x 4k parity sectors + 32 x 4k data
|
|
|
|
* sectors. The dataset's referenced will increase by 128k and the pool's
|
|
|
|
* allocated and free properties will be adjusted by 160k.
|
|
|
|
*
|
|
|
|
* A 4k block written to the same raidz vdev will require two 4k sectors. The
|
|
|
|
* blank cells represent unallocated space.
|
|
|
|
*
|
|
|
|
* +-------+-------+-------+-------+-------+
|
|
|
|
* | disk1 | disk2 | disk3 | disk4 | disk5 |
|
|
|
|
* +-------+-------+-------+-------+-------+
|
|
|
|
* | P0 | D0 | | | |
|
|
|
|
* +-------+-------+-------+-------+-------+
|
|
|
|
*
|
|
|
|
* Above, notice that the 4k block required one sector for parity and another
|
|
|
|
* for data. vdev_raidz_asize() will return 8k and as such the pool's allocated
|
|
|
|
* and free properties will be adjusted by 8k. The dataset will not be charged
|
|
|
|
* 8k. Rather, it will be charged a value that is scaled according to the
|
|
|
|
* overhead of the 128k block on the same vdev. This 8k allocation will be
|
|
|
|
* charged 8k * 128k / 160k. 128k is from SPA_OLD_MAXBLOCKSIZE and 160k is as
|
|
|
|
* calculated in the 128k block example above.
|
|
|
|
*
|
|
|
|
* Every raidz allocation is sized to be a multiple of nparity+1 sectors. That
|
|
|
|
* is, every raidz1 allocation will be a multiple of 2 sectors, raidz2
|
|
|
|
* allocations are a multiple of 3 sectors, and raidz3 allocations are a
|
|
|
|
* multiple of of 4 sectors. When a block does not fill the required number of
|
|
|
|
* sectors, skip blocks (sectors) are used.
|
|
|
|
*
|
|
|
|
* An 8k block being written to a raidz vdev may be written as follows:
|
|
|
|
*
|
|
|
|
* +-------+-------+-------+-------+-------+
|
|
|
|
* | disk1 | disk2 | disk3 | disk4 | disk5 |
|
|
|
|
* +-------+-------+-------+-------+-------+
|
|
|
|
* | P0 | D0 | D1 | S0 | |
|
|
|
|
* +-------+-------+-------+-------+-------+
|
|
|
|
*
|
|
|
|
* In order to maintain the nparity+1 allocation size, a skip block (S0) was
|
|
|
|
* added. For this 8k block, the pool's allocated and free properties are
|
|
|
|
* adjusted by 16k and the dataset's referenced is increased by 16k * 128k /
|
|
|
|
* 160k. Again, 128k is from SPA_OLD_MAXBLOCKSIZE and 160k is as calculated in
|
|
|
|
* the 128k block example above.
|
|
|
|
*
|
2021-04-03 01:38:53 +00:00
|
|
|
* The situation is slightly different for dRAID since the minimum allocation
|
Distributed Spare (dRAID) Feature
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
2020-11-13 21:51:51 +00:00
|
|
|
* size is the full group width. The same 8K block above would be written as
|
|
|
|
* follows in a dRAID group:
|
|
|
|
*
|
|
|
|
* +-------+-------+-------+-------+-------+
|
|
|
|
* | disk1 | disk2 | disk3 | disk4 | disk5 |
|
|
|
|
* +-------+-------+-------+-------+-------+
|
|
|
|
* | P0 | D0 | D1 | S0 | S1 |
|
|
|
|
* +-------+-------+-------+-------+-------+
|
|
|
|
*
|
2019-06-30 23:38:07 +00:00
|
|
|
* Compression may lead to a variety of block sizes being written for the same
|
|
|
|
* volume or file. There is no clear way to reserve just the amount of space
|
|
|
|
* that will be required, so the worst case (no compression) is assumed.
|
|
|
|
* Note that metadata blocks will typically be compressed, so the reservation
|
|
|
|
* size returned by zvol_volsize_to_reservation() will generally be slightly
|
|
|
|
* larger than the maximum that the volume can reference.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Derived from function of same name in module/zfs/vdev_raidz.c. Returns the
|
|
|
|
* amount of space (in bytes) that will be allocated for the specified block
|
|
|
|
* size. Note that the "referenced" space accounted will be less than this, but
|
|
|
|
* not necessarily equal to "blksize", due to RAIDZ deflation.
|
|
|
|
*/
|
|
|
|
static uint64_t
|
|
|
|
vdev_raidz_asize(uint64_t ndisks, uint64_t nparity, uint64_t ashift,
|
|
|
|
uint64_t blksize)
|
|
|
|
{
|
|
|
|
uint64_t asize, ndata;
|
|
|
|
|
|
|
|
ASSERT3U(ndisks, >, nparity);
|
|
|
|
ndata = ndisks - nparity;
|
|
|
|
asize = ((blksize - 1) >> ashift) + 1;
|
|
|
|
asize += nparity * ((asize + ndata - 1) / ndata);
|
|
|
|
asize = roundup(asize, nparity + 1) << ashift;
|
|
|
|
|
|
|
|
return (asize);
|
|
|
|
}
|
|
|
|
|
Distributed Spare (dRAID) Feature
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
2020-11-13 21:51:51 +00:00
|
|
|
/*
|
|
|
|
* Derived from function of same name in module/zfs/vdev_draid.c. Returns the
|
|
|
|
* amount of space (in bytes) that will be allocated for the specified block
|
|
|
|
* size.
|
|
|
|
*/
|
|
|
|
static uint64_t
|
|
|
|
vdev_draid_asize(uint64_t ndisks, uint64_t nparity, uint64_t ashift,
|
|
|
|
uint64_t blksize)
|
|
|
|
{
|
|
|
|
ASSERT3U(ndisks, >, nparity);
|
|
|
|
uint64_t ndata = ndisks - nparity;
|
|
|
|
uint64_t rows = ((blksize - 1) / (ndata << ashift)) + 1;
|
|
|
|
uint64_t asize = (rows * ndisks) << ashift;
|
|
|
|
|
|
|
|
return (asize);
|
|
|
|
}
|
|
|
|
|
2019-06-30 23:38:07 +00:00
|
|
|
/*
|
|
|
|
* Determine how much space will be allocated if it lands on the most space-
|
|
|
|
* inefficient top-level vdev. Returns the size in bytes required to store one
|
|
|
|
* copy of the volume data. See theory comment above.
|
|
|
|
*/
|
|
|
|
static uint64_t
|
|
|
|
volsize_from_vdevs(zpool_handle_t *zhp, uint64_t nblocks, uint64_t blksize)
|
|
|
|
{
|
|
|
|
nvlist_t *config, *tree, **vdevs;
|
Distributed Spare (dRAID) Feature
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
2020-11-13 21:51:51 +00:00
|
|
|
uint_t nvdevs;
|
2019-06-30 23:38:07 +00:00
|
|
|
uint64_t ret = 0;
|
|
|
|
|
|
|
|
config = zpool_get_config(zhp, NULL);
|
|
|
|
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) != 0 ||
|
|
|
|
nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN,
|
|
|
|
&vdevs, &nvdevs) != 0) {
|
|
|
|
return (nblocks * blksize);
|
|
|
|
}
|
|
|
|
|
Distributed Spare (dRAID) Feature
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
2020-11-13 21:51:51 +00:00
|
|
|
for (int v = 0; v < nvdevs; v++) {
|
2023-03-11 18:39:24 +00:00
|
|
|
const char *type;
|
2019-06-30 23:38:07 +00:00
|
|
|
uint64_t nparity, ashift, asize, tsize;
|
|
|
|
uint64_t volsize;
|
|
|
|
|
|
|
|
if (nvlist_lookup_string(vdevs[v], ZPOOL_CONFIG_TYPE,
|
Distributed Spare (dRAID) Feature
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
2020-11-13 21:51:51 +00:00
|
|
|
&type) != 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (strcmp(type, VDEV_TYPE_RAIDZ) != 0 &&
|
|
|
|
strcmp(type, VDEV_TYPE_DRAID) != 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (nvlist_lookup_uint64(vdevs[v],
|
|
|
|
ZPOOL_CONFIG_NPARITY, &nparity) != 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (nvlist_lookup_uint64(vdevs[v],
|
|
|
|
ZPOOL_CONFIG_ASHIFT, &ashift) != 0)
|
2019-06-30 23:38:07 +00:00
|
|
|
continue;
|
|
|
|
|
Distributed Spare (dRAID) Feature
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
2020-11-13 21:51:51 +00:00
|
|
|
if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
|
|
|
|
nvlist_t **disks;
|
|
|
|
uint_t ndisks;
|
|
|
|
|
|
|
|
if (nvlist_lookup_nvlist_array(vdevs[v],
|
|
|
|
ZPOOL_CONFIG_CHILDREN, &disks, &ndisks) != 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* allocation size for the "typical" 128k block */
|
|
|
|
tsize = vdev_raidz_asize(ndisks, nparity, ashift,
|
|
|
|
SPA_OLD_MAXBLOCKSIZE);
|
|
|
|
|
|
|
|
/* allocation size for the blksize block */
|
|
|
|
asize = vdev_raidz_asize(ndisks, nparity, ashift,
|
|
|
|
blksize);
|
|
|
|
} else {
|
|
|
|
uint64_t ndata;
|
|
|
|
|
|
|
|
if (nvlist_lookup_uint64(vdevs[v],
|
|
|
|
ZPOOL_CONFIG_DRAID_NDATA, &ndata) != 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* allocation size for the "typical" 128k block */
|
|
|
|
tsize = vdev_draid_asize(ndata + nparity, nparity,
|
|
|
|
ashift, SPA_OLD_MAXBLOCKSIZE);
|
|
|
|
|
|
|
|
/* allocation size for the blksize block */
|
|
|
|
asize = vdev_draid_asize(ndata + nparity, nparity,
|
|
|
|
ashift, blksize);
|
|
|
|
}
|
2019-06-30 23:38:07 +00:00
|
|
|
|
|
|
|
/*
|
Distributed Spare (dRAID) Feature
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
2020-11-13 21:51:51 +00:00
|
|
|
* Scale this size down as a ratio of 128k / tsize.
|
|
|
|
* See theory statement above.
|
2019-06-30 23:38:07 +00:00
|
|
|
*/
|
|
|
|
volsize = nblocks * asize * SPA_OLD_MAXBLOCKSIZE / tsize;
|
|
|
|
if (volsize > ret) {
|
|
|
|
ret = volsize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret == 0) {
|
|
|
|
ret = nblocks * blksize;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert the zvol's volume size to an appropriate reservation. See theory
|
|
|
|
* comment above.
|
|
|
|
*
|
2013-06-11 17:12:34 +00:00
|
|
|
* Note: If this routine is updated, it is necessary to update the ZFS test
|
2019-06-30 23:38:07 +00:00
|
|
|
* suite's shell version in reservation.shlib.
|
2013-06-11 17:12:34 +00:00
|
|
|
*/
|
2010-05-28 20:45:14 +00:00
|
|
|
uint64_t
|
2019-06-30 23:38:07 +00:00
|
|
|
zvol_volsize_to_reservation(zpool_handle_t *zph, uint64_t volsize,
|
|
|
|
nvlist_t *props)
|
2010-05-28 20:45:14 +00:00
|
|
|
{
|
|
|
|
uint64_t numdb;
|
|
|
|
uint64_t nblocks, volblocksize;
|
|
|
|
int ncopies;
|
2023-03-11 18:39:24 +00:00
|
|
|
const char *strval;
|
2010-05-28 20:45:14 +00:00
|
|
|
|
|
|
|
if (nvlist_lookup_string(props,
|
|
|
|
zfs_prop_to_name(ZFS_PROP_COPIES), &strval) == 0)
|
|
|
|
ncopies = atoi(strval);
|
|
|
|
else
|
|
|
|
ncopies = 1;
|
|
|
|
if (nvlist_lookup_uint64(props,
|
|
|
|
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
|
|
|
|
&volblocksize) != 0)
|
|
|
|
volblocksize = ZVOL_DEFAULT_BLOCKSIZE;
|
2019-06-30 23:38:07 +00:00
|
|
|
|
|
|
|
nblocks = volsize / volblocksize;
|
|
|
|
/*
|
|
|
|
* Metadata defaults to using 128k blocks, not volblocksize blocks. For
|
|
|
|
* this reason, only the data blocks are scaled based on vdev config.
|
|
|
|
*/
|
|
|
|
volsize = volsize_from_vdevs(zph, nblocks, volblocksize);
|
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
/* start with metadnode L0-L6 */
|
|
|
|
numdb = 7;
|
|
|
|
/* calculate number of indirects */
|
|
|
|
while (nblocks > 1) {
|
|
|
|
nblocks += DNODES_PER_LEVEL - 1;
|
|
|
|
nblocks /= DNODES_PER_LEVEL;
|
|
|
|
numdb += nblocks;
|
|
|
|
}
|
|
|
|
numdb *= MIN(SPA_DVAS_PER_BP, ncopies + 1);
|
|
|
|
volsize *= ncopies;
|
|
|
|
/*
|
|
|
|
* this is exactly DN_MAX_INDBLKSHIFT when metadata isn't
|
|
|
|
* compressed, but in practice they compress down to about
|
|
|
|
* 1100 bytes
|
|
|
|
*/
|
|
|
|
numdb *= 1ULL << DN_MAX_INDBLKSHIFT;
|
|
|
|
volsize += numdb;
|
|
|
|
return (volsize);
|
|
|
|
}
|
2020-04-01 17:02:06 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for the given activity and return the status of the wait (whether or not
|
|
|
|
* any waiting was done) in the 'waited' parameter. Non-existent fses are
|
|
|
|
* reported via the 'missing' parameter, rather than by printing an error
|
|
|
|
* message. This is convenient when this function is called in a loop over a
|
|
|
|
* long period of time (as it is, for example, by zfs's wait cmd). In that
|
|
|
|
* scenario, a fs being exported or destroyed should be considered a normal
|
|
|
|
* event, so we don't want to print an error when we find that the fs doesn't
|
|
|
|
* exist.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
zfs_wait_status(zfs_handle_t *zhp, zfs_wait_activity_t activity,
|
|
|
|
boolean_t *missing, boolean_t *waited)
|
|
|
|
{
|
|
|
|
int error = lzc_wait_fs(zhp->zfs_name, activity, waited);
|
|
|
|
*missing = (error == ENOENT);
|
|
|
|
if (*missing)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
if (error != 0) {
|
|
|
|
(void) zfs_standard_error_fmt(zhp->zfs_hdl, error,
|
|
|
|
dgettext(TEXT_DOMAIN, "error waiting in fs '%s'"),
|
|
|
|
zhp->zfs_name);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|