zfs/module/zcommon/zfs_comutil.c

264 lines
6.3 KiB
C
Raw Normal View History

2008-11-20 20:01:55 +00:00
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
2008-11-20 20:01:55 +00:00
*/
/*
* This file is intended for functions that ought to be common between user
* land (libzfs) and the kernel. When many common routines need to be shared
* then a separate file should to be created.
*/
Update build system and packaging Minimal changes required to integrate the SPL sources in to the ZFS repository build infrastructure and packaging. Build system and packaging: * Renamed SPL_* autoconf m4 macros to ZFS_*. * Removed redundant SPL_* autoconf m4 macros. * Updated the RPM spec files to remove SPL package dependency. * The zfs package obsoletes the spl package, and the zfs-kmod package obsoletes the spl-kmod package. * The zfs-kmod-devel* packages were updated to add compatibility symlinks under /usr/src/spl-x.y.z until all dependent packages can be updated. They will be removed in a future release. * Updated copy-builtin script for in-kernel builds. * Updated DKMS package to include the spl.ko. * Updated stale AUTHORS file to include all contributors. * Updated stale COPYRIGHT and included the SPL as an exception. * Renamed README.markdown to README.md * Renamed OPENSOLARIS.LICENSE to LICENSE. * Renamed DISCLAIMER to NOTICE. Required code changes: * Removed redundant HAVE_SPL macro. * Removed _BOOT from nvpairs since it doesn't apply for Linux. * Initial header cleanup (removal of empty headers, refactoring). * Remove SPL repository clone/build from zimport.sh. * Use of DEFINE_RATELIMIT_STATE and DEFINE_SPINLOCK removed due to build issues when forcing C99 compilation. * Replaced legacy ACCESS_ONCE with READ_ONCE. * Include needed headers for `current` and `EXPORT_SYMBOL`. Reviewed-by: Tony Hutter <hutter2@llnl.gov> Reviewed-by: Olaf Faaland <faaland1@llnl.gov> Reviewed-by: Matthew Ahrens <mahrens@delphix.com> Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> TEST_ZIMPORT_SKIP="yes" Closes #7556
2018-02-16 01:53:18 +00:00
#if !defined(_KERNEL)
#include <string.h>
2008-11-20 20:01:55 +00:00
#endif
#include <sys/types.h>
#include <sys/fs/zfs.h>
#include <sys/nvpair.h>
#include "zfs_comutil.h"
#include <sys/zfs_ratelimit.h>
2008-11-20 20:01:55 +00:00
/*
* Are there allocatable vdevs?
*/
boolean_t
zfs_allocatable_devs(nvlist_t *nv)
{
uint64_t is_log;
uint_t c;
nvlist_t **child;
uint_t children;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0) {
return (B_FALSE);
}
for (c = 0; c < children; c++) {
is_log = 0;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (!is_log)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Are there special vdevs?
*/
boolean_t
zfs_special_devs(nvlist_t *nv, char *type)
{
char *bias;
uint_t c;
nvlist_t **child;
uint_t children;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0) {
return (B_FALSE);
}
for (c = 0; c < children; c++) {
if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS,
&bias) == 0) {
if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0 ||
strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0) {
if (type != NULL && strcmp(bias, type) == 0) {
return (B_TRUE);
} else if (type == NULL) {
return (B_TRUE);
}
}
}
}
return (B_FALSE);
}
void
zpool_get_load_policy(nvlist_t *nvl, zpool_load_policy_t *zlpp)
{
nvlist_t *policy;
nvpair_t *elem;
char *nm;
/* Defaults */
zlpp->zlp_rewind = ZPOOL_NO_REWIND;
zlpp->zlp_maxmeta = 0;
zlpp->zlp_maxdata = UINT64_MAX;
zlpp->zlp_txg = UINT64_MAX;
if (nvl == NULL)
return;
elem = NULL;
while ((elem = nvlist_next_nvpair(nvl, elem)) != NULL) {
nm = nvpair_name(elem);
if (strcmp(nm, ZPOOL_LOAD_POLICY) == 0) {
if (nvpair_value_nvlist(elem, &policy) == 0)
zpool_get_load_policy(policy, zlpp);
return;
} else if (strcmp(nm, ZPOOL_LOAD_REWIND_POLICY) == 0) {
if (nvpair_value_uint32(elem, &zlpp->zlp_rewind) == 0)
if (zlpp->zlp_rewind & ~ZPOOL_REWIND_POLICIES)
zlpp->zlp_rewind = ZPOOL_NO_REWIND;
} else if (strcmp(nm, ZPOOL_LOAD_REQUEST_TXG) == 0) {
(void) nvpair_value_uint64(elem, &zlpp->zlp_txg);
} else if (strcmp(nm, ZPOOL_LOAD_META_THRESH) == 0) {
(void) nvpair_value_uint64(elem, &zlpp->zlp_maxmeta);
} else if (strcmp(nm, ZPOOL_LOAD_DATA_THRESH) == 0) {
(void) nvpair_value_uint64(elem, &zlpp->zlp_maxdata);
}
}
if (zlpp->zlp_rewind == 0)
zlpp->zlp_rewind = ZPOOL_NO_REWIND;
}
typedef struct zfs_version_spa_map {
int version_zpl;
int version_spa;
} zfs_version_spa_map_t;
/*
* Keep this table in monotonically increasing version number order.
*/
static zfs_version_spa_map_t zfs_version_table[] = {
{ZPL_VERSION_INITIAL, SPA_VERSION_INITIAL},
{ZPL_VERSION_DIRENT_TYPE, SPA_VERSION_INITIAL},
{ZPL_VERSION_FUID, SPA_VERSION_FUID},
{ZPL_VERSION_USERSPACE, SPA_VERSION_USERSPACE},
{ZPL_VERSION_SA, SPA_VERSION_SA},
{0, 0}
};
/*
* Return the max zpl version for a corresponding spa version
* -1 is returned if no mapping exists.
*/
int
zfs_zpl_version_map(int spa_version)
{
int i;
int version = -1;
for (i = 0; zfs_version_table[i].version_spa; i++) {
if (spa_version >= zfs_version_table[i].version_spa)
version = zfs_version_table[i].version_zpl;
}
return (version);
}
/*
* Return the min spa version for a corresponding spa version
* -1 is returned if no mapping exists.
*/
int
zfs_spa_version_map(int zpl_version)
{
int i;
int version = -1;
for (i = 0; zfs_version_table[i].version_zpl; i++) {
if (zfs_version_table[i].version_zpl >= zpl_version)
return (zfs_version_table[i].version_spa);
}
return (version);
}
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 11:45:09 +00:00
/*
* This is the table of legacy internal event names; it should not be modified.
* The internal events are now stored in the history log as strings.
*/
const char *zfs_history_event_names[ZFS_NUM_LEGACY_HISTORY_EVENTS] = {
"invalid event",
"pool create",
"vdev add",
"pool remove",
"pool destroy",
"pool export",
"pool import",
"vdev attach",
"vdev replace",
"vdev detach",
"vdev online",
"vdev offline",
"vdev upgrade",
"pool clear",
"pool scrub",
"pool property set",
"create",
"clone",
"destroy",
"destroy_begin_sync",
"inherit",
"property set",
"quota set",
"permission update",
"permission remove",
"permission who remove",
"promote",
"receive",
"rename",
"reservation set",
"replay_inc_sync",
"replay_full_sync",
"rollback",
"snapshot",
"filesystem version upgrade",
"refquota set",
"refreservation set",
"pool scrub done",
"user hold",
"user release",
"pool split",
};
OpenZFS 9337 - zfs get all is slow due to uncached metadata This project's goal is to make read-heavy channel programs and zfs(1m) administrative commands faster by caching all the metadata that they will need in the dbuf layer. This will prevent the data from being evicted, so that any future call to i.e. zfs get all won't have to go to disk (very much). There are two parts: The dbuf_metadata_cache. We identify what to put into the cache based on the object type of each dbuf. Caching objset properties os {version,normalization,utf8only,casesensitivity} in the objset_t. The reason these needed to be cached is that although they are queried frequently, they aren't stored in a dbuf type which we can easily recognize and cache in the dbuf layer; instead, we have to explicitly store them. There's already existing infrastructure for maintaining cached properties in the objset setup code, so I simply used that. Performance Testing: - Disabled kmem_flags - Tuned dbuf_cache_max_bytes very low (128K) - Tuned zfs_arc_max very low (64M) Created test pool with 400 filesystems, and 100 snapshots per filesystem. Later on in testing, added 600 more filesystems (with no snapshots) to make sure scaling didn't look different between snapshots and filesystems. Results: | Test | Time (trunk / diff) | I/Os (trunk / diff) | +------------------------+---------------------+---------------------+ | zpool import | 0:05 / 0:06 | 12.9k / 12.9k | | zfs get all (uncached) | 1:36 / 0:53 | 16.7k / 5.7k | | zfs get all (cached) | 1:36 / 0:51 | 16.0k / 6.0k | Authored by: Matthew Ahrens <mahrens@delphix.com> Reviewed by: Prakash Surya <prakash.surya@delphix.com> Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Thomas Caputi <tcaputi@datto.com> Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov> Approved by: Richard Lowe <richlowe@richlowe.net> Ported-by: Alek Pinchuk <apinchuk@datto.com> Signed-off-by: Alek Pinchuk <apinchuk@datto.com> OpenZFS-issue: https://illumos.org/issues/9337 OpenZFS-commit: https://github.com/openzfs/openzfs/commit/7dec52f Closes #7668
2018-07-10 17:49:50 +00:00
boolean_t
zfs_dataset_name_hidden(const char *name)
{
/*
* Skip over datasets that are not visible in this zone,
* internal datasets (which have a $ in their name), and
* temporary datasets (which have a % in their name).
*/
if (strchr(name, '$') != NULL)
return (B_TRUE);
if (strchr(name, '%') != NULL)
return (B_TRUE);
if (!INGLOBALZONE(curproc) && !zone_dataset_visible(name, NULL))
return (B_TRUE);
return (B_FALSE);
}
Update build system and packaging Minimal changes required to integrate the SPL sources in to the ZFS repository build infrastructure and packaging. Build system and packaging: * Renamed SPL_* autoconf m4 macros to ZFS_*. * Removed redundant SPL_* autoconf m4 macros. * Updated the RPM spec files to remove SPL package dependency. * The zfs package obsoletes the spl package, and the zfs-kmod package obsoletes the spl-kmod package. * The zfs-kmod-devel* packages were updated to add compatibility symlinks under /usr/src/spl-x.y.z until all dependent packages can be updated. They will be removed in a future release. * Updated copy-builtin script for in-kernel builds. * Updated DKMS package to include the spl.ko. * Updated stale AUTHORS file to include all contributors. * Updated stale COPYRIGHT and included the SPL as an exception. * Renamed README.markdown to README.md * Renamed OPENSOLARIS.LICENSE to LICENSE. * Renamed DISCLAIMER to NOTICE. Required code changes: * Removed redundant HAVE_SPL macro. * Removed _BOOT from nvpairs since it doesn't apply for Linux. * Initial header cleanup (removal of empty headers, refactoring). * Remove SPL repository clone/build from zimport.sh. * Use of DEFINE_RATELIMIT_STATE and DEFINE_SPINLOCK removed due to build issues when forcing C99 compilation. * Replaced legacy ACCESS_ONCE with READ_ONCE. * Include needed headers for `current` and `EXPORT_SYMBOL`. Reviewed-by: Tony Hutter <hutter2@llnl.gov> Reviewed-by: Olaf Faaland <faaland1@llnl.gov> Reviewed-by: Matthew Ahrens <mahrens@delphix.com> Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> TEST_ZIMPORT_SKIP="yes" Closes #7556
2018-02-16 01:53:18 +00:00
#if defined(_KERNEL)
EXPORT_SYMBOL(zfs_allocatable_devs);
EXPORT_SYMBOL(zfs_special_devs);
EXPORT_SYMBOL(zpool_get_load_policy);
EXPORT_SYMBOL(zfs_zpl_version_map);
EXPORT_SYMBOL(zfs_spa_version_map);
EXPORT_SYMBOL(zfs_history_event_names);
OpenZFS 9337 - zfs get all is slow due to uncached metadata This project's goal is to make read-heavy channel programs and zfs(1m) administrative commands faster by caching all the metadata that they will need in the dbuf layer. This will prevent the data from being evicted, so that any future call to i.e. zfs get all won't have to go to disk (very much). There are two parts: The dbuf_metadata_cache. We identify what to put into the cache based on the object type of each dbuf. Caching objset properties os {version,normalization,utf8only,casesensitivity} in the objset_t. The reason these needed to be cached is that although they are queried frequently, they aren't stored in a dbuf type which we can easily recognize and cache in the dbuf layer; instead, we have to explicitly store them. There's already existing infrastructure for maintaining cached properties in the objset setup code, so I simply used that. Performance Testing: - Disabled kmem_flags - Tuned dbuf_cache_max_bytes very low (128K) - Tuned zfs_arc_max very low (64M) Created test pool with 400 filesystems, and 100 snapshots per filesystem. Later on in testing, added 600 more filesystems (with no snapshots) to make sure scaling didn't look different between snapshots and filesystems. Results: | Test | Time (trunk / diff) | I/Os (trunk / diff) | +------------------------+---------------------+---------------------+ | zpool import | 0:05 / 0:06 | 12.9k / 12.9k | | zfs get all (uncached) | 1:36 / 0:53 | 16.7k / 5.7k | | zfs get all (cached) | 1:36 / 0:51 | 16.0k / 6.0k | Authored by: Matthew Ahrens <mahrens@delphix.com> Reviewed by: Prakash Surya <prakash.surya@delphix.com> Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Thomas Caputi <tcaputi@datto.com> Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov> Approved by: Richard Lowe <richlowe@richlowe.net> Ported-by: Alek Pinchuk <apinchuk@datto.com> Signed-off-by: Alek Pinchuk <apinchuk@datto.com> OpenZFS-issue: https://illumos.org/issues/9337 OpenZFS-commit: https://github.com/openzfs/openzfs/commit/7dec52f Closes #7668
2018-07-10 17:49:50 +00:00
EXPORT_SYMBOL(zfs_dataset_name_hidden);
#endif