Merge pull request #245 from truenas/NAS-130542-2

NAS-130542 / None / Sync truenas/zfs-2.3-release with upstream master
This commit is contained in:
Alexander Motin 2024-08-12 06:18:46 -04:00 committed by GitHub
commit 072c9ab66b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
171 changed files with 7167 additions and 2254 deletions

View File

@ -37,7 +37,7 @@ jobs:
- uses: actions/upload-artifact@v4
if: failure()
with:
name: Zpool-logs-${{ inputs.os }}
name: Zloop-logs-${{ inputs.os }}
path: |
/var/tmp/zloop/*/
!/var/tmp/zloop/*/vdev/
@ -46,7 +46,7 @@ jobs:
- uses: actions/upload-artifact@v4
if: failure()
with:
name: Zpool-files-${{ inputs.os }}
name: Zloop-files-${{ inputs.os }}
path: |
/var/tmp/zloop/*/vdev/
retention-days: 14

View File

@ -77,7 +77,10 @@ Yanping Gao <yanping.gao@xtaotech.com>
Youzhong Yang <youzhong@gmail.com>
# Signed-off-by: overriding Author:
Ryan <errornointernet@envs.net> <error.nointernet@gmail.com>
Qiuhao Chen <chenqiuhao1997@gmail.com> <haohao0924@126.com>
Yuxin Wang <yuxinwang9999@gmail.com> <Bi11gates9999@gmail.com>
Zhenlei Huang <zlei@FreeBSD.org> <zlei.huang@gmail.com>
# Commits from strange places, long ago
Brian Behlendorf <behlendorf1@llnl.gov> <behlendo@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>
@ -95,6 +98,7 @@ Alek Pinchuk <apinchuk@axcient.com> <alek-p@users.noreply.github.com>
Alexander Lobakin <alobakin@pm.me> <solbjorn@users.noreply.github.com>
Alexey Smirnoff <fling@member.fsf.org> <fling-@users.noreply.github.com>
Allen Holl <allen.m.holl@gmail.com> <65494904+allen-4@users.noreply.github.com>
Alphan Yılmaz <alphanyilmaz@gmail.com> <a1ea321@users.noreply.github.com>
Ameer Hamza <ahamza@ixsystems.com> <106930537+ixhamza@users.noreply.github.com>
Andrew J. Hesford <ajh@sideband.org> <48421688+ahesford@users.noreply.github.com>>
Andrew Sun <me@andrewsun.com> <as-com@users.noreply.github.com>
@ -102,6 +106,7 @@ Aron Xu <happyaron.xu@gmail.com> <happyaron@users.noreply.github.com>
Arun KV <arun.kv@datacore.com> <65647132+arun-kv@users.noreply.github.com>
Ben Wolsieffer <benwolsieffer@gmail.com> <lopsided98@users.noreply.github.com>
bernie1995 <bernie.pikes@gmail.com> <42413912+bernie1995@users.noreply.github.com>
Bojan Novković <bnovkov@FreeBSD.org> <72801811+bnovkov@users.noreply.github.com>
Boris Protopopov <boris.protopopov@actifio.com> <bprotopopov@users.noreply.github.com>
Brad Forschinger <github@bnjf.id.au> <bnjf@users.noreply.github.com>
Brandon Thetford <brandon@dodecatec.com> <dodexahedron@users.noreply.github.com>
@ -193,6 +198,7 @@ Stefan Lendl <s.lendl@proxmox.com> <1321542+stfl@users.noreply.github.com>
Thomas Bertschinger <bertschinger@lanl.gov> <101425190+bertschinger@users.noreply.github.com>
Thomas Geppert <geppi@digitx.de> <geppi@users.noreply.github.com>
Tim Crawford <tcrawford@datto.com> <crawfxrd@users.noreply.github.com>
Todd Seidelmann <18294602+seidelma@users.noreply.github.com>
Tom Matthews <tom@axiom-partners.com> <tomtastic@users.noreply.github.com>
Tony Perkins <tperkins@datto.com> <62951051+tony-zfs@users.noreply.github.com>
Torsten Wörtwein <twoertwein@gmail.com> <twoertwein@users.noreply.github.com>

13
AUTHORS
View File

@ -46,6 +46,7 @@ CONTRIBUTORS:
Alex Zhuravlev <alexey.zhuravlev@intel.com>
Allan Jude <allanjude@freebsd.org>
Allen Holl <allen.m.holl@gmail.com>
Alphan Yılmaz <alphanyilmaz@gmail.com>
alteriks <alteriks@gmail.com>
Alyssa Ross <hi@alyssa.is>
Ameer Hamza <ahamza@ixsystems.com>
@ -99,6 +100,7 @@ CONTRIBUTORS:
bernie1995 <bernie.pikes@gmail.com>
Bill McGonigle <bill-github.com-public1@bfccomputing.com>
Bill Pijewski <wdp@joyent.com>
Bojan Novković <bnovkov@FreeBSD.org>
Boris Protopopov <boris.protopopov@nexenta.com>
Brad Forschinger <github@bnjf.id.au>
Brad Lewis <brad.lewis@delphix.com>
@ -168,6 +170,7 @@ CONTRIBUTORS:
Daniel Hoffman <dj.hoffman@delphix.com>
Daniel Kobras <d.kobras@science-computing.de>
Daniel Kolesa <daniel@octaforge.org>
Daniel Perry <dtperry@amazon.com>
Daniel Reichelt <hacking@nachtgeist.net>
Daniel Stevenson <bot@dstev.net>
Daniel Verite <daniel@verite.pro>
@ -187,6 +190,7 @@ CONTRIBUTORS:
Dennis R. Friedrichsen <dennis.r.friedrichsen@gmail.com>
Denys Rtveliashvili <denys@rtveliashvili.name>
Derek Dai <daiderek@gmail.com>
Derek Schrock <dereks@lifeofadishwasher.com>
Dex Wood <slash2314@gmail.com>
DHE <git@dehacked.net>
Didier Roche <didrocks@ubuntu.com>
@ -245,6 +249,7 @@ CONTRIBUTORS:
Gionatan Danti <g.danti@assyoma.it>
Giuseppe Di Natale <guss80@gmail.com>
Glenn Washburn <development@efficientek.com>
glibg10b <glibg10b@users.noreply.github.com>
gofaster <felix.gofaster@gmail.com>
Gordan Bobic <gordan@redsleeve.org>
Gordon Bergling <gbergling@googlemail.com>
@ -410,6 +415,7 @@ CONTRIBUTORS:
Mart Frauenlob <allkind@fastest.cc>
Martin Matuska <mm@FreeBSD.org>
Martin Rüegg <martin.rueegg@metaworx.ch>
Martin Wagner <martin.wagner.dev@gmail.com>
Massimo Maggi <me@massimo-maggi.eu>
Mateusz Guzik <mjguzik@gmail.com>
Mateusz Piotrowski <0mp@FreeBSD.org>
@ -488,6 +494,7 @@ CONTRIBUTORS:
Peng <peng.hse@xtaotech.com>
Peter Ashford <ashford@accs.com>
Peter Dave Hello <hsu@peterdavehello.org>
Peter Doherty <peterd@acranox.org>
Peter Levine <plevine457@gmail.com>
Peter Wirdemo <peter.wirdemo@gmail.com>
Petros Koutoupis <petros@petroskoutoupis.com>
@ -501,6 +508,7 @@ CONTRIBUTORS:
Prasad Joshi <prasadjoshi124@gmail.com>
privb0x23 <privb0x23@users.noreply.github.com>
P.SCH <p88@yahoo.com>
Qiuhao Chen <chenqiuhao1997@gmail.com>
Quartz <yyhran@163.com>
Quentin Zdanis <zdanisq@gmail.com>
Rafael Kitover <rkitover@gmail.com>
@ -532,6 +540,7 @@ CONTRIBUTORS:
Roman Strashkin <roman.strashkin@nexenta.com>
Ross Williams <ross@ross-williams.net>
Ruben Kerkhof <ruben@rubenkerkhof.com>
Ryan <errornointernet@envs.net>
Ryan Hirasaki <ryanhirasaki@gmail.com>
Ryan Lahfa <masterancpp@gmail.com>
Ryan Libby <rlibby@FreeBSD.org>
@ -556,6 +565,7 @@ CONTRIBUTORS:
Sen Haerens <sen@senhaerens.be>
Serapheim Dimitropoulos <serapheim@delphix.com>
Seth Forshee <seth.forshee@canonical.com>
Seth Troisi <sethtroisi@google.com>
Shaan Nobee <sniper111@gmail.com>
Shampavman <sham.pavman@nexenta.com>
Shaun Tancheff <shaun@aeonazure.com>
@ -602,6 +612,7 @@ CONTRIBUTORS:
Tim Schumacher <timschumi@gmx.de>
Tino Reichardt <milky-zfs@mcmilk.de>
Tobin Harding <me@tobin.cc>
Todd Seidelmann <seidelma@users.noreply.github.com>
Tom Caputi <tcaputi@datto.com>
Tom Matthews <tom@axiom-partners.com>
Tomohiro Kusumi <kusumi.tomohiro@gmail.com>
@ -653,6 +664,8 @@ CONTRIBUTORS:
Zachary Bedell <zac@thebedells.org>
Zach Dykstra <dykstra.zachary@gmail.com>
zgock <zgock@nuc.base.zgock-lab.net>
Zhao Yongming <zym@apache.org>
Zhenlei Huang <zlei@FreeBSD.org>
Zhu Chuang <chuang@melty.land>
Érico Nogueira <erico.erc@gmail.com>
Đoàn Trần Công Danh <congdanhqx@gmail.com>

2
META
View File

@ -6,5 +6,5 @@ Release: 1
Release-Tags: relext
License: CDDL
Author: OpenZFS
Linux-Maximum: 6.8
Linux-Maximum: 6.9
Linux-Minimum: 3.10

View File

@ -32,4 +32,4 @@ For more details see the NOTICE, LICENSE and COPYRIGHT files; `UCRL-CODE-235197`
# Supported Kernels
* The `META` file contains the officially recognized supported Linux kernel versions.
* Supported FreeBSD versions are any supported branches and releases starting from 12.4-RELEASE.
* Supported FreeBSD versions are any supported branches and releases starting from 13.0-RELEASE.

View File

@ -260,33 +260,34 @@ def draw_graph(kstats_dict):
arc_stats = isolate_section('arcstats', kstats_dict)
GRAPH_INDENT = ' '*4
GRAPH_WIDTH = 60
GRAPH_WIDTH = 70
arc_max = int(arc_stats['c_max'])
arc_size = f_bytes(arc_stats['size'])
arc_perc = f_perc(arc_stats['size'], arc_stats['c_max'])
mfu_size = f_bytes(arc_stats['mfu_size'])
mru_size = f_bytes(arc_stats['mru_size'])
meta_size = f_bytes(arc_stats['arc_meta_used'])
dnode_limit = f_bytes(arc_stats['arc_dnode_limit'])
arc_perc = f_perc(arc_stats['size'], arc_max)
data_size = f_bytes(arc_stats['data_size'])
meta_size = f_bytes(arc_stats['metadata_size'])
dnode_size = f_bytes(arc_stats['dnode_size'])
info_form = ('ARC: {0} ({1}) MFU: {2} MRU: {3} META: {4} '
'DNODE {5} ({6})')
info_line = info_form.format(arc_size, arc_perc, mfu_size, mru_size,
meta_size, dnode_size, dnode_limit)
info_form = ('ARC: {0} ({1}) Data: {2} Meta: {3} Dnode: {4}')
info_line = info_form.format(arc_size, arc_perc, data_size, meta_size,
dnode_size)
info_spc = ' '*int((GRAPH_WIDTH-len(info_line))/2)
info_line = GRAPH_INDENT+info_spc+info_line
graph_line = GRAPH_INDENT+'+'+('-'*(GRAPH_WIDTH-2))+'+'
mfu_perc = float(int(arc_stats['mfu_size'])/int(arc_stats['c_max']))
mru_perc = float(int(arc_stats['mru_size'])/int(arc_stats['c_max']))
arc_perc = float(int(arc_stats['size'])/int(arc_stats['c_max']))
arc_perc = float(int(arc_stats['size'])/arc_max)
data_perc = float(int(arc_stats['data_size'])/arc_max)
meta_perc = float(int(arc_stats['metadata_size'])/arc_max)
dnode_perc = float(int(arc_stats['dnode_size'])/arc_max)
total_ticks = float(arc_perc)*GRAPH_WIDTH
mfu_ticks = mfu_perc*GRAPH_WIDTH
mru_ticks = mru_perc*GRAPH_WIDTH
other_ticks = total_ticks-(mfu_ticks+mru_ticks)
data_ticks = data_perc*GRAPH_WIDTH
meta_ticks = meta_perc*GRAPH_WIDTH
dnode_ticks = dnode_perc*GRAPH_WIDTH
other_ticks = total_ticks-(data_ticks+meta_ticks+dnode_ticks)
core_form = 'F'*int(mfu_ticks)+'R'*int(mru_ticks)+'O'*int(other_ticks)
core_form = 'D'*int(data_ticks)+'M'*int(meta_ticks)+'N'*int(dnode_ticks)+\
'O'*int(other_ticks)
core_spc = ' '*(GRAPH_WIDTH-(2+len(core_form)))
core_line = GRAPH_INDENT+'|'+core_form+core_spc+'|'
@ -536,56 +537,87 @@ def section_arc(kstats_dict):
arc_stats = isolate_section('arcstats', kstats_dict)
throttle = arc_stats['memory_throttle_count']
if throttle == '0':
health = 'HEALTHY'
else:
health = 'THROTTLED'
prt_1('ARC status:', health)
prt_i1('Memory throttle count:', throttle)
print()
memory_all = arc_stats['memory_all_bytes']
memory_free = arc_stats['memory_free_bytes']
memory_avail = arc_stats['memory_available_bytes']
arc_size = arc_stats['size']
arc_target_size = arc_stats['c']
arc_max = arc_stats['c_max']
arc_min = arc_stats['c_min']
dnode_limit = arc_stats['arc_dnode_limit']
print('ARC status:')
prt_i1('Total memory size:', f_bytes(memory_all))
prt_i2('Min target size:', f_perc(arc_min, memory_all), f_bytes(arc_min))
prt_i2('Max target size:', f_perc(arc_max, memory_all), f_bytes(arc_max))
prt_i2('Target size (adaptive):',
f_perc(arc_size, arc_max), f_bytes(arc_target_size))
prt_i2('Current size:', f_perc(arc_size, arc_max), f_bytes(arc_size))
prt_i1('Free memory size:', f_bytes(memory_free))
prt_i1('Available memory size:', f_bytes(memory_avail))
print()
compressed_size = arc_stats['compressed_size']
overhead_size = arc_stats['overhead_size']
bonus_size = arc_stats['bonus_size']
dnode_size = arc_stats['dnode_size']
dbuf_size = arc_stats['dbuf_size']
hdr_size = arc_stats['hdr_size']
l2_hdr_size = arc_stats['l2_hdr_size']
abd_chunk_waste_size = arc_stats['abd_chunk_waste_size']
prt_1('ARC structal breakdown (current size):', f_bytes(arc_size))
prt_i2('Compressed size:',
f_perc(compressed_size, arc_size), f_bytes(compressed_size))
prt_i2('Overhead size:',
f_perc(overhead_size, arc_size), f_bytes(overhead_size))
prt_i2('Bonus size:',
f_perc(bonus_size, arc_size), f_bytes(bonus_size))
prt_i2('Dnode size:',
f_perc(dnode_size, arc_size), f_bytes(dnode_size))
prt_i2('Dbuf size:',
f_perc(dbuf_size, arc_size), f_bytes(dbuf_size))
prt_i2('Header size:',
f_perc(hdr_size, arc_size), f_bytes(hdr_size))
prt_i2('L2 header size:',
f_perc(l2_hdr_size, arc_size), f_bytes(l2_hdr_size))
prt_i2('ABD chunk waste size:',
f_perc(abd_chunk_waste_size, arc_size), f_bytes(abd_chunk_waste_size))
print()
meta = arc_stats['meta']
pd = arc_stats['pd']
pm = arc_stats['pm']
data_size = arc_stats['data_size']
metadata_size = arc_stats['metadata_size']
anon_data = arc_stats['anon_data']
anon_metadata = arc_stats['anon_metadata']
mfu_data = arc_stats['mfu_data']
mfu_metadata = arc_stats['mfu_metadata']
mfu_edata = arc_stats['mfu_evictable_data']
mfu_emetadata = arc_stats['mfu_evictable_metadata']
mru_data = arc_stats['mru_data']
mru_metadata = arc_stats['mru_metadata']
mru_edata = arc_stats['mru_evictable_data']
mru_emetadata = arc_stats['mru_evictable_metadata']
mfug_data = arc_stats['mfu_ghost_data']
mfug_metadata = arc_stats['mfu_ghost_metadata']
mrug_data = arc_stats['mru_ghost_data']
mrug_metadata = arc_stats['mru_ghost_metadata']
unc_data = arc_stats['uncached_data']
unc_metadata = arc_stats['uncached_metadata']
bonus_size = arc_stats['bonus_size']
dnode_limit = arc_stats['arc_dnode_limit']
dnode_size = arc_stats['dnode_size']
dbuf_size = arc_stats['dbuf_size']
hdr_size = arc_stats['hdr_size']
l2_hdr_size = arc_stats['l2_hdr_size']
abd_chunk_waste_size = arc_stats['abd_chunk_waste_size']
target_size_ratio = '{0}:1'.format(int(arc_max) // int(arc_min))
prt_2('ARC size (current):',
f_perc(arc_size, arc_max), f_bytes(arc_size))
prt_i2('Target size (adaptive):',
f_perc(arc_target_size, arc_max), f_bytes(arc_target_size))
prt_i2('Min size (hard limit):',
f_perc(arc_min, arc_max), f_bytes(arc_min))
prt_i2('Max size (high water):',
target_size_ratio, f_bytes(arc_max))
caches_size = int(anon_data)+int(anon_metadata)+\
int(mfu_data)+int(mfu_metadata)+int(mru_data)+int(mru_metadata)+\
int(unc_data)+int(unc_metadata)
prt_1('ARC types breakdown (compressed + overhead):', f_bytes(caches_size))
prt_i2('Data size:',
f_perc(data_size, caches_size), f_bytes(data_size))
prt_i2('Metadata size:',
f_perc(metadata_size, caches_size), f_bytes(metadata_size))
print()
prt_1('ARC states breakdown (compressed + overhead):', f_bytes(caches_size))
prt_i2('Anonymous data size:',
f_perc(anon_data, caches_size), f_bytes(anon_data))
prt_i2('Anonymous metadata size:',
@ -596,43 +628,37 @@ def section_arc(kstats_dict):
f_bytes(v / 65536 * caches_size / 65536))
prt_i2('MFU data size:',
f_perc(mfu_data, caches_size), f_bytes(mfu_data))
prt_i2('MFU evictable data size:',
f_perc(mfu_edata, caches_size), f_bytes(mfu_edata))
prt_i1('MFU ghost data size:', f_bytes(mfug_data))
v = (s-int(pm))*int(meta)/s
prt_i2('MFU metadata target:', f_perc(v, s),
f_bytes(v / 65536 * caches_size / 65536))
prt_i2('MFU metadata size:',
f_perc(mfu_metadata, caches_size), f_bytes(mfu_metadata))
prt_i2('MFU evictable metadata size:',
f_perc(mfu_emetadata, caches_size), f_bytes(mfu_emetadata))
prt_i1('MFU ghost metadata size:', f_bytes(mfug_metadata))
v = int(pd)*(s-int(meta))/s
prt_i2('MRU data target:', f_perc(v, s),
f_bytes(v / 65536 * caches_size / 65536))
prt_i2('MRU data size:',
f_perc(mru_data, caches_size), f_bytes(mru_data))
prt_i2('MRU evictable data size:',
f_perc(mru_edata, caches_size), f_bytes(mru_edata))
prt_i1('MRU ghost data size:', f_bytes(mrug_data))
v = int(pm)*int(meta)/s
prt_i2('MRU metadata target:', f_perc(v, s),
f_bytes(v / 65536 * caches_size / 65536))
prt_i2('MRU metadata size:',
f_perc(mru_metadata, caches_size), f_bytes(mru_metadata))
prt_i2('MRU evictable metadata size:',
f_perc(mru_emetadata, caches_size), f_bytes(mru_emetadata))
prt_i1('MRU ghost metadata size:', f_bytes(mrug_metadata))
prt_i2('Uncached data size:',
f_perc(unc_data, caches_size), f_bytes(unc_data))
prt_i2('Uncached metadata size:',
f_perc(unc_metadata, caches_size), f_bytes(unc_metadata))
prt_i2('Bonus size:',
f_perc(bonus_size, arc_size), f_bytes(bonus_size))
prt_i2('Dnode cache target:',
f_perc(dnode_limit, arc_max), f_bytes(dnode_limit))
prt_i2('Dnode cache size:',
f_perc(dnode_size, dnode_limit), f_bytes(dnode_size))
prt_i2('Dbuf size:',
f_perc(dbuf_size, arc_size), f_bytes(dbuf_size))
prt_i2('Header size:',
f_perc(hdr_size, arc_size), f_bytes(hdr_size))
prt_i2('L2 header size:',
f_perc(l2_hdr_size, arc_size), f_bytes(l2_hdr_size))
prt_i2('ABD chunk waste size:',
f_perc(abd_chunk_waste_size, arc_size), f_bytes(abd_chunk_waste_size))
print()
print('ARC hash breakdown:')
@ -647,6 +673,9 @@ def section_arc(kstats_dict):
print()
print('ARC misc:')
prt_i1('Memory throttles:', arc_stats['memory_throttle_count'])
prt_i1('Memory direct reclaims:', arc_stats['memory_direct_count'])
prt_i1('Memory indirect reclaims:', arc_stats['memory_indirect_count'])
prt_i1('Deleted:', f_hits(arc_stats['deleted']))
prt_i1('Mutex misses:', f_hits(arc_stats['mutex_miss']))
prt_i1('Eviction skips:', f_hits(arc_stats['evict_skip']))

View File

@ -48,6 +48,7 @@
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/zap_impl.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_sa.h>
@ -1126,16 +1127,33 @@ dump_zap(objset_t *os, uint64_t object, void *data, size_t size)
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = ", attr.za_name);
boolean_t key64 =
!!(zap_getflags(zc.zc_zap) & ZAP_FLAG_UINT64_KEY);
if (key64)
(void) printf("\t\t0x%010lx = ",
*(uint64_t *)attr.za_name);
else
(void) printf("\t\t%s = ", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
prop = umem_zalloc(attr.za_num_integers *
attr.za_integer_length, UMEM_NOFAIL);
(void) zap_lookup(os, object, attr.za_name,
attr.za_integer_length, attr.za_num_integers, prop);
if (attr.za_integer_length == 1) {
if (key64)
(void) zap_lookup_uint64(os, object,
(const uint64_t *)attr.za_name, 1,
attr.za_integer_length, attr.za_num_integers,
prop);
else
(void) zap_lookup(os, object, attr.za_name,
attr.za_integer_length, attr.za_num_integers,
prop);
if (attr.za_integer_length == 1 && !key64) {
if (strcmp(attr.za_name,
DSL_CRYPTO_KEY_MASTER_KEY) == 0 ||
strcmp(attr.za_name,
@ -1154,6 +1172,10 @@ dump_zap(objset_t *os, uint64_t object, void *data, size_t size)
} else {
for (i = 0; i < attr.za_num_integers; i++) {
switch (attr.za_integer_length) {
case 1:
(void) printf("%u ",
((uint8_t *)prop)[i]);
break;
case 2:
(void) printf("%u ",
((uint16_t *)prop)[i]);
@ -1963,8 +1985,8 @@ dump_ddt(ddt_t *ddt, ddt_type_t type, ddt_class_t class)
(void) printf("%s: %llu entries, size %llu on disk, %llu in core\n",
name,
(u_longlong_t)count,
(u_longlong_t)(dspace / count),
(u_longlong_t)(mspace / count));
(u_longlong_t)dspace,
(u_longlong_t)mspace);
if (dump_opt['D'] < 3)
return;
@ -2082,8 +2104,13 @@ dump_brt(spa_t *spa)
for (zap_cursor_init(&zc, brt->brt_mos, brtvd->bv_mos_entries);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
uint64_t offset = *(uint64_t *)za.za_name;
uint64_t refcnt = za.za_first_integer;
uint64_t refcnt;
VERIFY0(zap_lookup_uint64(brt->brt_mos,
brtvd->bv_mos_entries,
(const uint64_t *)za.za_name, 1,
za.za_integer_length, za.za_num_integers, &refcnt));
uint64_t offset = *(const uint64_t *)za.za_name;
snprintf(dva, sizeof (dva), "%" PRIu64 ":%llx", vdevid,
(u_longlong_t)offset);
@ -8336,7 +8363,7 @@ zdb_dump_block(char *label, void *buf, uint64_t size, int flags)
(void) printf("\n%s\n%6s %s 0123456789abcdef\n", label, "", hdr);
#ifdef _LITTLE_ENDIAN
#ifdef _ZFS_LITTLE_ENDIAN
/* correct the endianness */
do_bswap = !do_bswap;
#endif
@ -8905,6 +8932,19 @@ zdb_numeric(char *str)
return (B_TRUE);
}
static int
dummy_get_file_info(dmu_object_type_t bonustype, const void *data,
zfs_file_info_t *zoi)
{
(void) data, (void) zoi;
if (bonustype != DMU_OT_ZNODE && bonustype != DMU_OT_SA)
return (ENOENT);
(void) fprintf(stderr, "dummy_get_file_info: not implemented");
abort();
}
int
main(int argc, char **argv)
{
@ -9186,7 +9226,7 @@ main(int argc, char **argv)
char *pname = strdup(target);
const char *value;
nvlist_t *pnvl = NULL;
nvlist_t *vnvl;
nvlist_t *vnvl = NULL;
if (strpbrk(pname, "/@") != NULL)
*strpbrk(pname, "/@") = '\0';
@ -9220,6 +9260,7 @@ main(int argc, char **argv)
libzfs_core_fini();
}
dmu_objset_register_type(DMU_OST_ZFS, dummy_get_file_info);
kernel_init(SPA_MODE_READ);
kernel_init_done = B_TRUE;

View File

@ -134,6 +134,10 @@ static int zfs_do_unzone(int argc, char **argv);
static int zfs_do_help(int argc, char **argv);
enum zfs_options {
ZFS_OPTION_JSON_NUMS_AS_INT = 1024
};
/*
* Enable a reasonable set of defaults for libumem debugging on DEBUG builds.
*/
@ -272,6 +276,8 @@ static zfs_command_t command_table[] = {
#define NCOMMAND (sizeof (command_table) / sizeof (command_table[0]))
#define MAX_CMD_LEN 256
zfs_command_t *current_command;
static const char *
@ -292,7 +298,7 @@ get_usage(zfs_help_t idx)
"<filesystem|volume>@<snap>[%<snap>][,...]\n"
"\tdestroy <filesystem|volume>#<bookmark>\n"));
case HELP_GET:
return (gettext("\tget [-rHp] [-d max] "
return (gettext("\tget [-rHp] [-j [--json-int]] [-d max] "
"[-o \"all\" | field[,...]]\n"
"\t [-t type[,...]] [-s source[,...]]\n"
"\t <\"all\" | property[,...]> "
@ -304,11 +310,12 @@ get_usage(zfs_help_t idx)
return (gettext("\tupgrade [-v]\n"
"\tupgrade [-r] [-V version] <-a | filesystem ...>\n"));
case HELP_LIST:
return (gettext("\tlist [-Hp] [-r|-d max] [-o property[,...]] "
"[-s property]...\n\t [-S property]... [-t type[,...]] "
return (gettext("\tlist [-Hp] [-j [--json-int]] [-r|-d max] "
"[-o property[,...]] [-s property]...\n\t "
"[-S property]... [-t type[,...]] "
"[filesystem|volume|snapshot] ...\n"));
case HELP_MOUNT:
return (gettext("\tmount\n"
return (gettext("\tmount [-j]\n"
"\tmount [-flvO] [-o opts] <-a|-R filesystem|"
"filesystem>\n"));
case HELP_PROMOTE:
@ -420,7 +427,7 @@ get_usage(zfs_help_t idx)
"\t <filesystem|volume>\n"
"\tchange-key -i [-l] <filesystem|volume>\n"));
case HELP_VERSION:
return (gettext("\tversion\n"));
return (gettext("\tversion [-j]\n"));
case HELP_REDACT:
return (gettext("\tredact <snapshot> <bookmark> "
"<redaction_snapshot> ...\n"));
@ -1885,7 +1892,89 @@ is_recvd_column(zprop_get_cbdata_t *cbp)
}
/*
* zfs get [-rHp] [-o all | field[,field]...] [-s source[,source]...]
* Generates an nvlist with output version for every command based on params.
* Purpose of this is to add a version of JSON output, considering the schema
* format might be updated for each command in future.
*
* Schema:
*
* "output_version": {
* "command": string,
* "vers_major": integer,
* "vers_minor": integer,
* }
*/
static nvlist_t *
zfs_json_schema(int maj_v, int min_v)
{
nvlist_t *sch = NULL;
nvlist_t *ov = NULL;
char cmd[MAX_CMD_LEN];
snprintf(cmd, MAX_CMD_LEN, "zfs %s", current_command->name);
sch = fnvlist_alloc();
ov = fnvlist_alloc();
fnvlist_add_string(ov, "command", cmd);
fnvlist_add_uint32(ov, "vers_major", maj_v);
fnvlist_add_uint32(ov, "vers_minor", min_v);
fnvlist_add_nvlist(sch, "output_version", ov);
fnvlist_free(ov);
return (sch);
}
static void
fill_dataset_info(nvlist_t *list, zfs_handle_t *zhp, boolean_t as_int)
{
char createtxg[ZFS_MAXPROPLEN];
zfs_type_t type = zfs_get_type(zhp);
nvlist_add_string(list, "name", zfs_get_name(zhp));
switch (type) {
case ZFS_TYPE_FILESYSTEM:
fnvlist_add_string(list, "type", "FILESYSTEM");
break;
case ZFS_TYPE_VOLUME:
fnvlist_add_string(list, "type", "VOLUME");
break;
case ZFS_TYPE_SNAPSHOT:
fnvlist_add_string(list, "type", "SNAPSHOT");
break;
case ZFS_TYPE_POOL:
fnvlist_add_string(list, "type", "POOL");
break;
case ZFS_TYPE_BOOKMARK:
fnvlist_add_string(list, "type", "BOOKMARK");
break;
default:
fnvlist_add_string(list, "type", "UNKNOWN");
break;
}
if (type != ZFS_TYPE_POOL)
fnvlist_add_string(list, "pool", zfs_get_pool_name(zhp));
if (as_int) {
fnvlist_add_uint64(list, "createtxg", zfs_prop_get_int(zhp,
ZFS_PROP_CREATETXG));
} else {
if (zfs_prop_get(zhp, ZFS_PROP_CREATETXG, createtxg,
sizeof (createtxg), NULL, NULL, 0, B_TRUE) == 0)
fnvlist_add_string(list, "createtxg", createtxg);
}
if (type == ZFS_TYPE_SNAPSHOT) {
char *ds, *snap;
ds = snap = strdup(zfs_get_name(zhp));
ds = strsep(&snap, "@");
fnvlist_add_string(list, "dataset", ds);
fnvlist_add_string(list, "snapshot_name", snap);
free(ds);
}
}
/*
* zfs get [-rHp] [-j [--json-int]] [-o all | field[,field]...]
* [-s source[,source]...]
* < all | property[,property]... > < fs | snap | vol > ...
*
* -r recurse over any child datasets
@ -1898,6 +1987,8 @@ is_recvd_column(zprop_get_cbdata_t *cbp)
* "local,default,inherited,received,temporary,none". Default is
* all six.
* -p Display values in parsable (literal) format.
* -j Display output in JSON format.
* --json-int Display numbers as integers instead of strings.
*
* Prints properties for the given datasets. The user can control which
* columns to display as well as which property types to allow.
@ -1917,9 +2008,21 @@ get_callback(zfs_handle_t *zhp, void *data)
nvlist_t *user_props = zfs_get_user_props(zhp);
zprop_list_t *pl = cbp->cb_proplist;
nvlist_t *propval;
nvlist_t *item, *d, *props;
item = d = props = NULL;
const char *strval;
const char *sourceval;
boolean_t received = is_recvd_column(cbp);
int err = 0;
if (cbp->cb_json) {
d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "datasets");
if (d == NULL) {
fprintf(stderr, "datasets obj not found.\n");
exit(1);
}
props = fnvlist_alloc();
}
for (; pl != NULL; pl = pl->pl_next) {
char *recvdval = NULL;
@ -1954,9 +2057,9 @@ get_callback(zfs_handle_t *zhp, void *data)
cbp->cb_literal) == 0))
recvdval = rbuf;
zprop_print_one_property(zfs_get_name(zhp), cbp,
err = zprop_collect_property(zfs_get_name(zhp), cbp,
zfs_prop_to_name(pl->pl_prop),
buf, sourcetype, source, recvdval);
buf, sourcetype, source, recvdval, props);
} else if (zfs_prop_userquota(pl->pl_user_prop)) {
sourcetype = ZPROP_SRC_LOCAL;
@ -1966,8 +2069,9 @@ get_callback(zfs_handle_t *zhp, void *data)
(void) strlcpy(buf, "-", sizeof (buf));
}
zprop_print_one_property(zfs_get_name(zhp), cbp,
pl->pl_user_prop, buf, sourcetype, source, NULL);
err = zprop_collect_property(zfs_get_name(zhp), cbp,
pl->pl_user_prop, buf, sourcetype, source, NULL,
props);
} else if (zfs_prop_written(pl->pl_user_prop)) {
sourcetype = ZPROP_SRC_LOCAL;
@ -1977,8 +2081,9 @@ get_callback(zfs_handle_t *zhp, void *data)
(void) strlcpy(buf, "-", sizeof (buf));
}
zprop_print_one_property(zfs_get_name(zhp), cbp,
pl->pl_user_prop, buf, sourcetype, source, NULL);
err = zprop_collect_property(zfs_get_name(zhp), cbp,
pl->pl_user_prop, buf, sourcetype, source, NULL,
props);
} else {
if (nvlist_lookup_nvlist(user_props,
pl->pl_user_prop, &propval) != 0) {
@ -2010,9 +2115,24 @@ get_callback(zfs_handle_t *zhp, void *data)
cbp->cb_literal) == 0))
recvdval = rbuf;
zprop_print_one_property(zfs_get_name(zhp), cbp,
err = zprop_collect_property(zfs_get_name(zhp), cbp,
pl->pl_user_prop, strval, sourcetype,
source, recvdval);
source, recvdval, props);
}
if (err != 0)
return (err);
}
if (cbp->cb_json) {
if (!nvlist_empty(props)) {
item = fnvlist_alloc();
fill_dataset_info(item, zhp, cbp->cb_json_as_int);
fnvlist_add_nvlist(item, "properties", props);
fnvlist_add_nvlist(d, zfs_get_name(zhp), item);
fnvlist_free(props);
fnvlist_free(item);
} else {
fnvlist_free(props);
}
}
@ -2029,6 +2149,7 @@ zfs_do_get(int argc, char **argv)
int ret = 0;
int limit = 0;
zprop_list_t fake_name = { 0 };
nvlist_t *data;
/*
* Set up default columns and sources.
@ -2040,8 +2161,14 @@ zfs_do_get(int argc, char **argv)
cb.cb_columns[3] = GET_COL_SOURCE;
cb.cb_type = ZFS_TYPE_DATASET;
struct option long_options[] = {
{"json-int", no_argument, NULL, ZFS_OPTION_JSON_NUMS_AS_INT},
{0, 0, 0, 0}
};
/* check options */
while ((c = getopt(argc, argv, ":d:o:s:rt:Hp")) != -1) {
while ((c = getopt_long(argc, argv, ":d:o:s:jrt:Hp", long_options,
NULL)) != -1) {
switch (c) {
case 'p':
cb.cb_literal = B_TRUE;
@ -2055,6 +2182,17 @@ zfs_do_get(int argc, char **argv)
case 'H':
cb.cb_scripted = B_TRUE;
break;
case 'j':
cb.cb_json = B_TRUE;
cb.cb_jsobj = zfs_json_schema(0, 1);
data = fnvlist_alloc();
fnvlist_add_nvlist(cb.cb_jsobj, "datasets", data);
fnvlist_free(data);
break;
case ZFS_OPTION_JSON_NUMS_AS_INT:
cb.cb_json_as_int = B_TRUE;
cb.cb_literal = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
@ -2178,7 +2316,6 @@ found2:;
found3:;
}
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
@ -2195,6 +2332,12 @@ found3:;
usage(B_FALSE);
}
if (!cb.cb_json && cb.cb_json_as_int) {
(void) fprintf(stderr, gettext("'--json-int' only works with"
" '-j' option\n"));
usage(B_FALSE);
}
fields = argv[0];
/*
@ -2235,6 +2378,11 @@ found3:;
ret = zfs_for_each(argc, argv, flags, types, NULL,
&cb.cb_proplist, limit, get_callback, &cb);
if (ret == 0 && cb.cb_json)
zcmd_print_json(cb.cb_jsobj);
else if (ret != 0 && cb.cb_json)
nvlist_free(cb.cb_jsobj);
if (cb.cb_proplist == &fake_name)
zprop_free_list(fake_name.pl_next);
else
@ -3442,6 +3590,9 @@ typedef struct list_cbdata {
boolean_t cb_literal;
boolean_t cb_scripted;
zprop_list_t *cb_proplist;
boolean_t cb_json;
nvlist_t *cb_jsobj;
boolean_t cb_json_as_int;
} list_cbdata_t;
/*
@ -3512,10 +3663,11 @@ zfs_list_avail_color(zfs_handle_t *zhp)
/*
* Given a dataset and a list of fields, print out all the properties according
* to the described layout.
* to the described layout, or return an nvlist containing all the fields, later
* to be printed out as JSON object.
*/
static void
print_dataset(zfs_handle_t *zhp, list_cbdata_t *cb)
collect_dataset(zfs_handle_t *zhp, list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
boolean_t first = B_TRUE;
@ -3524,9 +3676,23 @@ print_dataset(zfs_handle_t *zhp, list_cbdata_t *cb)
nvlist_t *propval;
const char *propstr;
boolean_t right_justify;
nvlist_t *item, *d, *props;
item = d = props = NULL;
zprop_source_t sourcetype = ZPROP_SRC_NONE;
char source[ZFS_MAX_DATASET_NAME_LEN];
if (cb->cb_json) {
d = fnvlist_lookup_nvlist(cb->cb_jsobj, "datasets");
if (d == NULL) {
fprintf(stderr, "datasets obj not found.\n");
exit(1);
}
item = fnvlist_alloc();
props = fnvlist_alloc();
fill_dataset_info(item, zhp, cb->cb_json_as_int);
}
for (; pl != NULL; pl = pl->pl_next) {
if (!first) {
if (!cb->cb_json && !first) {
if (cb->cb_scripted)
(void) putchar('\t');
else
@ -3542,69 +3708,112 @@ print_dataset(zfs_handle_t *zhp, list_cbdata_t *cb)
right_justify = zfs_prop_align_right(pl->pl_prop);
} else if (pl->pl_prop != ZPROP_USERPROP) {
if (zfs_prop_get(zhp, pl->pl_prop, property,
sizeof (property), NULL, NULL, 0,
cb->cb_literal) != 0)
sizeof (property), &sourcetype, source,
sizeof (source), cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = zfs_prop_align_right(pl->pl_prop);
} else if (zfs_prop_userquota(pl->pl_user_prop)) {
sourcetype = ZPROP_SRC_LOCAL;
if (zfs_prop_get_userquota(zhp, pl->pl_user_prop,
property, sizeof (property), cb->cb_literal) != 0)
property, sizeof (property), cb->cb_literal) != 0) {
sourcetype = ZPROP_SRC_NONE;
propstr = "-";
else
} else {
propstr = property;
}
right_justify = B_TRUE;
} else if (zfs_prop_written(pl->pl_user_prop)) {
sourcetype = ZPROP_SRC_LOCAL;
if (zfs_prop_get_written(zhp, pl->pl_user_prop,
property, sizeof (property), cb->cb_literal) != 0)
property, sizeof (property), cb->cb_literal) != 0) {
sourcetype = ZPROP_SRC_NONE;
propstr = "-";
else
} else {
propstr = property;
}
right_justify = B_TRUE;
} else {
if (nvlist_lookup_nvlist(userprops,
pl->pl_user_prop, &propval) != 0)
pl->pl_user_prop, &propval) != 0) {
propstr = "-";
else
} else {
propstr = fnvlist_lookup_string(propval,
ZPROP_VALUE);
strlcpy(source,
fnvlist_lookup_string(propval,
ZPROP_SOURCE), ZFS_MAX_DATASET_NAME_LEN);
if (strcmp(source,
zfs_get_name(zhp)) == 0) {
sourcetype = ZPROP_SRC_LOCAL;
} else if (strcmp(source,
ZPROP_SOURCE_VAL_RECVD) == 0) {
sourcetype = ZPROP_SRC_RECEIVED;
} else {
sourcetype = ZPROP_SRC_INHERITED;
}
}
right_justify = B_FALSE;
}
/*
* zfs_list_avail_color() needs ZFS_PROP_AVAILABLE + USED
* - so we need another for() search for the USED part
* - when no colors wanted, we can skip the whole thing
*/
if (use_color() && pl->pl_prop == ZFS_PROP_AVAILABLE) {
zprop_list_t *pl2 = cb->cb_proplist;
for (; pl2 != NULL; pl2 = pl2->pl_next) {
if (pl2->pl_prop == ZFS_PROP_USED) {
color_start(zfs_list_avail_color(zhp));
/* found it, no need for more loops */
break;
if (cb->cb_json) {
if (pl->pl_prop == ZFS_PROP_NAME)
continue;
if (zprop_nvlist_one_property(
zfs_prop_to_name(pl->pl_prop), propstr,
sourcetype, source, NULL, props,
cb->cb_json_as_int) != 0)
nomem();
} else {
/*
* zfs_list_avail_color() needs
* ZFS_PROP_AVAILABLE + USED, so we need another
* for() search for the USED part when no colors
* wanted, we can skip the whole thing
*/
if (use_color() && pl->pl_prop == ZFS_PROP_AVAILABLE) {
zprop_list_t *pl2 = cb->cb_proplist;
for (; pl2 != NULL; pl2 = pl2->pl_next) {
if (pl2->pl_prop == ZFS_PROP_USED) {
color_start(
zfs_list_avail_color(zhp));
/*
* found it, no need for more
* loops
*/
break;
}
}
}
/*
* If this is being called in scripted mode, or if
* this is the last column and it is left-justified,
* don't include a width format specifier.
*/
if (cb->cb_scripted || (pl->pl_next == NULL &&
!right_justify))
(void) fputs(propstr, stdout);
else if (right_justify) {
(void) printf("%*s", (int)pl->pl_width,
propstr);
} else {
(void) printf("%-*s", (int)pl->pl_width,
propstr);
}
if (pl->pl_prop == ZFS_PROP_AVAILABLE)
color_end();
}
/*
* If this is being called in scripted mode, or if this is the
* last column and it is left-justified, don't include a width
* format specifier.
*/
if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify))
(void) fputs(propstr, stdout);
else if (right_justify)
(void) printf("%*s", (int)pl->pl_width, propstr);
else
(void) printf("%-*s", (int)pl->pl_width, propstr);
if (pl->pl_prop == ZFS_PROP_AVAILABLE)
color_end();
}
(void) putchar('\n');
if (cb->cb_json) {
fnvlist_add_nvlist(item, "properties", props);
fnvlist_add_nvlist(d, zfs_get_name(zhp), item);
fnvlist_free(props);
fnvlist_free(item);
} else
(void) putchar('\n');
}
/*
@ -3616,12 +3825,12 @@ list_callback(zfs_handle_t *zhp, void *data)
list_cbdata_t *cbp = data;
if (cbp->cb_first) {
if (!cbp->cb_scripted)
if (!cbp->cb_scripted && !cbp->cb_json)
print_header(cbp);
cbp->cb_first = B_FALSE;
}
print_dataset(zhp, cbp);
collect_dataset(zhp, cbp);
return (0);
}
@ -3640,9 +3849,16 @@ zfs_do_list(int argc, char **argv)
int ret = 0;
zfs_sort_column_t *sortcol = NULL;
int flags = ZFS_ITER_PROP_LISTSNAPS | ZFS_ITER_ARGS_CAN_BE_PATHS;
nvlist_t *data = NULL;
struct option long_options[] = {
{"json-int", no_argument, NULL, ZFS_OPTION_JSON_NUMS_AS_INT},
{0, 0, 0, 0}
};
/* check options */
while ((c = getopt(argc, argv, "HS:d:o:prs:t:")) != -1) {
while ((c = getopt_long(argc, argv, "jHS:d:o:prs:t:", long_options,
NULL)) != -1) {
switch (c) {
case 'o':
fields = optarg;
@ -3657,6 +3873,17 @@ zfs_do_list(int argc, char **argv)
case 'r':
flags |= ZFS_ITER_RECURSE;
break;
case 'j':
cb.cb_json = B_TRUE;
cb.cb_jsobj = zfs_json_schema(0, 1);
data = fnvlist_alloc();
fnvlist_add_nvlist(cb.cb_jsobj, "datasets", data);
fnvlist_free(data);
break;
case ZFS_OPTION_JSON_NUMS_AS_INT:
cb.cb_json_as_int = B_TRUE;
cb.cb_literal = B_TRUE;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
@ -3730,6 +3957,12 @@ found3:;
argc -= optind;
argv += optind;
if (!cb.cb_json && cb.cb_json_as_int) {
(void) fprintf(stderr, gettext("'--json-int' only works with"
" '-j' option\n"));
usage(B_FALSE);
}
/*
* If "-o space" and no types were specified, don't display snapshots.
*/
@ -3769,6 +4002,11 @@ found3:;
ret = zfs_for_each(argc, argv, flags, types, sortcol, &cb.cb_proplist,
limit, list_callback, &cb);
if (ret == 0 && cb.cb_json)
zcmd_print_json(cb.cb_jsobj);
else if (ret != 0 && cb.cb_json)
nvlist_free(cb.cb_jsobj);
zprop_free_list(cb.cb_proplist);
zfs_free_sort_columns(sortcol);
@ -7189,14 +7427,17 @@ share_mount(int op, int argc, char **argv)
int do_all = 0;
int recursive = 0;
boolean_t verbose = B_FALSE;
boolean_t json = B_FALSE;
int c, ret = 0;
char *options = NULL;
int flags = 0;
nvlist_t *jsobj, *data, *item;
const uint_t mount_nthr = 512;
uint_t nthr;
jsobj = data = item = NULL;
/* check options */
while ((c = getopt(argc, argv, op == OP_MOUNT ? ":aRlvo:Of" : "al"))
while ((c = getopt(argc, argv, op == OP_MOUNT ? ":ajRlvo:Of" : "al"))
!= -1) {
switch (c) {
case 'a':
@ -7211,6 +7452,11 @@ share_mount(int op, int argc, char **argv)
case 'l':
flags |= MS_CRYPT;
break;
case 'j':
json = B_TRUE;
jsobj = zfs_json_schema(0, 1);
data = fnvlist_alloc();
break;
case 'o':
if (*optarg == '\0') {
(void) fprintf(stderr, gettext("empty mount "
@ -7245,6 +7491,11 @@ share_mount(int op, int argc, char **argv)
argc -= optind;
argv += optind;
if (json && argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
/* check number of arguments */
if (do_all || recursive) {
enum sa_protocol protocol = SA_NO_PROTOCOL;
@ -7348,12 +7599,30 @@ share_mount(int op, int argc, char **argv)
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0 ||
strchr(entry.mnt_special, '@') != NULL)
continue;
(void) printf("%-30s %s\n", entry.mnt_special,
entry.mnt_mountp);
if (json) {
item = fnvlist_alloc();
fnvlist_add_string(item, "filesystem",
entry.mnt_special);
fnvlist_add_string(item, "mountpoint",
entry.mnt_mountp);
fnvlist_add_nvlist(data, entry.mnt_special,
item);
fnvlist_free(item);
} else {
(void) printf("%-30s %s\n", entry.mnt_special,
entry.mnt_mountp);
}
}
(void) fclose(mnttab);
if (json) {
fnvlist_add_nvlist(jsobj, "datasets", data);
if (nvlist_empty(data))
fnvlist_free(jsobj);
else
zcmd_print_json(jsobj);
fnvlist_free(data);
}
} else {
zfs_handle_t *zhp;
@ -8811,8 +9080,39 @@ found:;
static int
zfs_do_version(int argc, char **argv)
{
(void) argc, (void) argv;
return (zfs_version_print() != 0);
int c;
nvlist_t *jsobj = NULL, *zfs_ver = NULL;
boolean_t json = B_FALSE;
while ((c = getopt(argc, argv, "j")) != -1) {
switch (c) {
case 'j':
json = B_TRUE;
jsobj = zfs_json_schema(0, 1);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
if (argc != 0) {
(void) fprintf(stderr, "too many arguments\n");
usage(B_FALSE);
}
if (json) {
zfs_ver = zfs_version_nvlist();
if (zfs_ver) {
fnvlist_add_nvlist(jsobj, "zfs_version", zfs_ver);
zcmd_print_json(jsobj);
fnvlist_free(zfs_ver);
return (0);
} else
return (-1);
} else
return (zfs_version_print() != 0);
}
/* Display documentation */

View File

@ -43,6 +43,9 @@ cols = {
"obj": [12, -1, "objset"],
"cc": [5, 1000, "zil_commit_count"],
"cwc": [5, 1000, "zil_commit_writer_count"],
"cec": [5, 1000, "zil_commit_error_count"],
"csc": [5, 1000, "zil_commit_stall_count"],
"cSc": [5, 1000, "zil_commit_suspend_count"],
"ic": [5, 1000, "zil_itx_count"],
"iic": [5, 1000, "zil_itx_indirect_count"],
"iib": [5, 1024, "zil_itx_indirect_bytes"],

File diff suppressed because it is too large Load Diff

View File

@ -56,15 +56,6 @@ typedef struct redup_table {
int numhashbits;
} redup_table_t;
int
highbit64(uint64_t i)
{
if (i == 0)
return (0);
return (NBBY * sizeof (uint64_t) - __builtin_clzll(i));
}
void *
safe_calloc(size_t n)
{

View File

@ -26,6 +26,7 @@
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2023, Klara, Inc.
*/
/*
@ -444,6 +445,7 @@ ztest_func_t ztest_blake3;
ztest_func_t ztest_fletcher;
ztest_func_t ztest_fletcher_incr;
ztest_func_t ztest_verify_dnode_bt;
ztest_func_t ztest_pool_prefetch_ddt;
static uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */
static uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */
@ -499,6 +501,7 @@ static ztest_info_t ztest_info[] = {
ZTI_INIT(ztest_fletcher, 1, &zopt_rarely),
ZTI_INIT(ztest_fletcher_incr, 1, &zopt_rarely),
ZTI_INIT(ztest_verify_dnode_bt, 1, &zopt_sometimes),
ZTI_INIT(ztest_pool_prefetch_ddt, 1, &zopt_rarely),
};
#define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t))
@ -6993,6 +6996,21 @@ ztest_fletcher_incr(ztest_ds_t *zd, uint64_t id)
}
}
void
ztest_pool_prefetch_ddt(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
spa_t *spa;
(void) pthread_rwlock_rdlock(&ztest_name_lock);
VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
ddt_prefetch_all(spa);
spa_close(spa, FTAG);
(void) pthread_rwlock_unlock(&ztest_name_lock);
}
static int
ztest_set_global_vars(void)
{
@ -8495,17 +8513,24 @@ print_time(hrtime_t t, char *timebuf)
}
static nvlist_t *
make_random_props(void)
make_random_pool_props(void)
{
nvlist_t *props;
props = fnvlist_alloc();
if (ztest_random(2) == 0)
return (props);
/* Twenty percent of the time enable ZPOOL_PROP_DEDUP_TABLE_QUOTA */
if (ztest_random(5) == 0) {
fnvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_DEDUP_TABLE_QUOTA),
2 * 1024 * 1024);
}
fnvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE), 1);
/* Fifty percent of the time enable ZPOOL_PROP_AUTOREPLACE */
if (ztest_random(2) == 0) {
fnvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE), 1);
}
return (props);
}
@ -8537,7 +8562,7 @@ ztest_init(ztest_shared_t *zs)
zs->zs_mirrors = ztest_opts.zo_mirrors;
nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0,
NULL, ztest_opts.zo_raid_children, zs->zs_mirrors, 1);
props = make_random_props();
props = make_random_pool_props();
/*
* We don't expect the pool to suspend unless maxfaults == 0,

View File

@ -72,7 +72,7 @@ AM_LDFLAGS += $(ASAN_LDFLAGS)
AM_LDFLAGS += $(UBSAN_LDFLAGS)
if BUILD_FREEBSD
AM_LDFLAGS += -fstack-protector-strong -shared
AM_LDFLAGS += -fstack-protector-strong
AM_LDFLAGS += -Wl,-x -Wl,--fatal-warnings -Wl,--warn-shared-textrel
AM_LDFLAGS += -lm
endif

View File

@ -332,7 +332,7 @@ AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_MAX_HW_SECTORS], [
ZFS_LINUX_TEST_RESULT([blk_queue_max_hw_sectors], [
AC_MSG_RESULT(yes)
],[
ZFS_LINUX_TEST_ERROR([blk_queue_max_hw_sectors])
AC_MSG_RESULT(no)
])
])
@ -355,7 +355,7 @@ AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_MAX_SEGMENTS], [
ZFS_LINUX_TEST_RESULT([blk_queue_max_segments], [
AC_MSG_RESULT(yes)
], [
ZFS_LINUX_TEST_ERROR([blk_queue_max_segments])
AC_MSG_RESULT(no)
])
])

View File

@ -534,6 +534,30 @@ AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BDEV_WHOLE], [
])
])
dnl #
dnl # 5.16 API change
dnl # Added bdev_nr_bytes() helper.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_NR_BYTES], [
ZFS_LINUX_TEST_SRC([bdev_nr_bytes], [
#include <linux/blkdev.h>
],[
struct block_device *bdev = NULL;
loff_t nr_bytes __attribute__ ((unused)) = 0;
nr_bytes = bdev_nr_bytes(bdev);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BDEV_NR_BYTES], [
AC_MSG_CHECKING([whether bdev_nr_bytes() is available])
ZFS_LINUX_TEST_RESULT([bdev_nr_bytes], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BDEV_NR_BYTES, 1, [bdev_nr_bytes() is available])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 5.20 API change,
dnl # Removed bdevname(), snprintf(.., %pg) should be used.
@ -747,6 +771,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV], [
ZFS_AC_KERNEL_SRC_BLKDEV_CHECK_DISK_CHANGE
ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_CHECK_MEDIA_CHANGE
ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_WHOLE
ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_NR_BYTES
ZFS_AC_KERNEL_SRC_BLKDEV_BDEVNAME
ZFS_AC_KERNEL_SRC_BLKDEV_ISSUE_DISCARD
ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_KOBJ
@ -767,6 +792,7 @@ AC_DEFUN([ZFS_AC_KERNEL_BLKDEV], [
ZFS_AC_KERNEL_BLKDEV_CHECK_DISK_CHANGE
ZFS_AC_KERNEL_BLKDEV_BDEV_CHECK_MEDIA_CHANGE
ZFS_AC_KERNEL_BLKDEV_BDEV_WHOLE
ZFS_AC_KERNEL_BLKDEV_BDEV_NR_BYTES
ZFS_AC_KERNEL_BLKDEV_BDEVNAME
ZFS_AC_KERNEL_BLKDEV_GET_ERESTARTSYS
ZFS_AC_KERNEL_BLKDEV_ISSUE_DISCARD

View File

@ -0,0 +1,23 @@
dnl #
dnl # 5.11 API change
dnl # kmap_atomic() was deprecated in favor of kmap_local_page()
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_KMAP_LOCAL_PAGE], [
ZFS_LINUX_TEST_SRC([kmap_local_page], [
#include <linux/highmem.h>
],[
struct page page;
kmap_local_page(&page);
])
])
AC_DEFUN([ZFS_AC_KERNEL_KMAP_LOCAL_PAGE], [
AC_MSG_CHECKING([whether kmap_local_page exists])
ZFS_LINUX_TEST_RESULT([kmap_local_page], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_KMAP_LOCAL_PAGE, 1,
[kernel has kmap_local_page])
],[
AC_MSG_RESULT(no)
])
])

View File

@ -125,6 +125,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [
ZFS_AC_KERNEL_SRC_VFS_DEDUPE_FILE_RANGE
ZFS_AC_KERNEL_SRC_VFS_FILE_OPERATIONS_EXTEND
ZFS_AC_KERNEL_SRC_KMAP_ATOMIC_ARGS
ZFS_AC_KERNEL_SRC_KMAP_LOCAL_PAGE
ZFS_AC_KERNEL_SRC_FOLLOW_DOWN_ONE
ZFS_AC_KERNEL_SRC_MAKE_REQUEST_FN
ZFS_AC_KERNEL_SRC_GENERIC_IO_ACCT
@ -276,6 +277,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [
ZFS_AC_KERNEL_VFS_DEDUPE_FILE_RANGE
ZFS_AC_KERNEL_VFS_FILE_OPERATIONS_EXTEND
ZFS_AC_KERNEL_KMAP_ATOMIC_ARGS
ZFS_AC_KERNEL_KMAP_LOCAL_PAGE
ZFS_AC_KERNEL_FOLLOW_DOWN_ONE
ZFS_AC_KERNEL_MAKE_REQUEST_FN
ZFS_AC_KERNEL_GENERIC_IO_ACCT

View File

@ -516,6 +516,8 @@ AC_DEFUN([ZFS_AC_DEFAULT_PACKAGE], [
VENDOR=alpine ;
elif test -f /etc/arch-release ; then
VENDOR=arch ;
elif test -f /etc/artix-release ; then
VENDOR=artix ;
elif test -f /etc/fedora-release ; then
VENDOR=fedora ;
elif test -f /bin/freebsd-version ; then
@ -551,7 +553,7 @@ AC_DEFUN([ZFS_AC_DEFAULT_PACKAGE], [
AC_MSG_CHECKING([default package type])
case "$VENDOR" in
alpine|arch|gentoo|lunar|slackware)
alpine|arch|artix|gentoo|lunar|slackware)
DEFAULT_PACKAGE=tgz ;;
debian|ubuntu)
DEFAULT_PACKAGE=deb ;;
@ -576,6 +578,8 @@ AC_DEFUN([ZFS_AC_DEFAULT_PACKAGE], [
case "$VENDOR" in
alpine|gentoo) DEFAULT_INIT_SHELL=/sbin/openrc-run
IS_SYSV_RC=false ;;
artix) DEFAULT_INIT_SHELL=/usr/bin/openrc-run
IS_SYSV_RC=false ;;
*) DEFAULT_INIT_SHELL=/bin/sh
IS_SYSV_RC=true ;;
esac
@ -594,7 +598,7 @@ AC_DEFUN([ZFS_AC_DEFAULT_PACKAGE], [
AC_MSG_CHECKING([default init config directory])
case "$VENDOR" in
alpine|gentoo)
alpine|artix|gentoo)
initconfdir=/etc/conf.d
;;
fedora|openeuler|redhat|sles|toss)
@ -623,7 +627,7 @@ AC_DEFUN([ZFS_AC_DEFAULT_PACKAGE], [
AC_MSG_CHECKING([default bash completion directory])
case "$VENDOR" in
alpine|debian|gentoo|ubuntu)
alpine|artix|debian|gentoo|ubuntu)
bashcompletiondir=/usr/share/bash-completion/completions
;;
freebsd)

View File

@ -1 +1,2 @@
/zfs
/zpool

View File

@ -1,5 +1,10 @@
nodist_bashcompletion_DATA = %D%/zfs
SUBSTFILES += $(nodist_bashcompletion_DATA)
nodist_bashcompletion_DATA = %D%/zfs %D%/zpool
COMPLETION_FILES = %D%/zfs
SUBSTFILES += $(COMPLETION_FILES)
CLEANFILES += %D%/zpool
SHELLCHECKSCRIPTS += $(nodist_bashcompletion_DATA)
$(call SHELLCHECK_OPTS,$(nodist_bashcompletion_DATA)): SHELLCHECK_SHELL = bash
SHELLCHECKSCRIPTS += $(COMPLETION_FILES)
$(call SHELLCHECK_OPTS,$(COMPLETION_FILES)): SHELLCHECK_SHELL = bash
%D%/zpool: %D%/zfs
$(LN_S) -f zfs $@

View File

@ -111,6 +111,7 @@ usr/share/man/man8/zpool-labelclear.8
usr/share/man/man8/zpool-list.8
usr/share/man/man8/zpool-offline.8
usr/share/man/man8/zpool-online.8
usr/share/man/man8/zpool-prefetch.8
usr/share/man/man8/zpool-reguid.8
usr/share/man/man8/zpool-remove.8
usr/share/man/man8/zpool-reopen.8

View File

@ -51,8 +51,8 @@ extern "C" {
/*
* Miscellaneous ZFS constants
*/
#define ZFS_MAXPROPLEN MAXPATHLEN
#define ZPOOL_MAXPROPLEN MAXPATHLEN
#define ZFS_MAXPROPLEN ZAP_MAXVALUELEN
#define ZPOOL_MAXPROPLEN ZAP_MAXVALUELEN
/*
* libzfs errors
@ -327,6 +327,8 @@ _LIBZFS_H int zpool_vdev_clear(zpool_handle_t *, uint64_t);
_LIBZFS_H nvlist_t *zpool_find_vdev(zpool_handle_t *, const char *, boolean_t *,
boolean_t *, boolean_t *);
_LIBZFS_H nvlist_t *zpool_find_parent_vdev(zpool_handle_t *, const char *,
boolean_t *, boolean_t *, boolean_t *);
_LIBZFS_H nvlist_t *zpool_find_vdev_by_physpath(zpool_handle_t *, const char *,
boolean_t *, boolean_t *, boolean_t *);
_LIBZFS_H int zpool_label_disk(libzfs_handle_t *, zpool_handle_t *,
@ -458,6 +460,7 @@ _LIBZFS_H nvlist_t *zpool_get_config(zpool_handle_t *, nvlist_t **);
_LIBZFS_H nvlist_t *zpool_get_features(zpool_handle_t *);
_LIBZFS_H int zpool_refresh_stats(zpool_handle_t *, boolean_t *);
_LIBZFS_H int zpool_get_errlog(zpool_handle_t *, nvlist_t **);
_LIBZFS_H void zpool_add_propname(zpool_handle_t *, const char *);
/*
* Import and export functions
@ -468,7 +471,8 @@ _LIBZFS_H int zpool_import(libzfs_handle_t *, nvlist_t *, const char *,
char *altroot);
_LIBZFS_H int zpool_import_props(libzfs_handle_t *, nvlist_t *, const char *,
nvlist_t *, int);
_LIBZFS_H void zpool_print_unsup_feat(nvlist_t *config);
_LIBZFS_H void zpool_collect_unsup_feat(nvlist_t *config, char *buf,
size_t size);
/*
* Miscellaneous pool functions
@ -499,11 +503,13 @@ _LIBZFS_H void zpool_obj_to_path(zpool_handle_t *, uint64_t, uint64_t, char *,
size_t);
_LIBZFS_H int zfs_ioctl(libzfs_handle_t *, int, struct zfs_cmd *);
_LIBZFS_H void zpool_explain_recover(libzfs_handle_t *, const char *, int,
nvlist_t *);
nvlist_t *, char *, size_t);
_LIBZFS_H int zpool_checkpoint(zpool_handle_t *);
_LIBZFS_H int zpool_discard_checkpoint(zpool_handle_t *);
_LIBZFS_H boolean_t zpool_is_draid_spare(const char *);
_LIBZFS_H int zpool_prefetch(zpool_handle_t *, zpool_prefetch_type_t);
/*
* Basic handle manipulations. These functions do not create or destroy the
* underlying datasets, only the references to them.
@ -628,6 +634,8 @@ _LIBZFS_H int zprop_get_list(libzfs_handle_t *, char *, zprop_list_t **,
zfs_type_t);
_LIBZFS_H void zprop_free_list(zprop_list_t *);
_LIBZFS_H void zcmd_print_json(nvlist_t *);
#define ZFS_GET_NCOLS 5
typedef enum {
@ -655,9 +663,13 @@ typedef struct zprop_get_cbdata {
boolean_t cb_scripted;
boolean_t cb_literal;
boolean_t cb_first;
boolean_t cb_json;
zprop_list_t *cb_proplist;
zfs_type_t cb_type;
vdev_cbdata_t cb_vdevs;
nvlist_t *cb_jsobj;
boolean_t cb_json_as_int;
boolean_t cb_json_pool_key_guid;
} zprop_get_cbdata_t;
#define ZFS_SET_NOMOUNT 1
@ -671,6 +683,13 @@ _LIBZFS_H void zprop_print_one_property(const char *, zprop_get_cbdata_t *,
const char *, const char *, zprop_source_t, const char *,
const char *);
_LIBZFS_H int zprop_nvlist_one_property(const char *, const char *,
zprop_source_t, const char *, const char *, nvlist_t *, boolean_t);
_LIBZFS_H int zprop_collect_property(const char *, zprop_get_cbdata_t *,
const char *, const char *, zprop_source_t, const char *,
const char *, nvlist_t *);
/*
* Iterator functions.
*/
@ -976,6 +995,7 @@ _LIBZFS_H boolean_t libzfs_envvar_is_set(const char *);
_LIBZFS_H const char *zfs_version_userland(void);
_LIBZFS_H char *zfs_version_kernel(void);
_LIBZFS_H int zfs_version_print(void);
_LIBZFS_H nvlist_t *zfs_version_nvlist(void);
/*
* Given a device or file, determine if it is part of a pool.

View File

@ -148,6 +148,9 @@ _LIBZFS_CORE_H int lzc_pool_checkpoint_discard(const char *);
_LIBZFS_CORE_H int lzc_wait(const char *, zpool_wait_activity_t, boolean_t *);
_LIBZFS_CORE_H int lzc_wait_tag(const char *, zpool_wait_activity_t, uint64_t,
boolean_t *);
_LIBZFS_CORE_H int lzc_pool_prefetch(const char *, zpool_prefetch_type_t);
_LIBZFS_CORE_H int lzc_wait_fs(const char *, zfs_wait_activity_t, boolean_t *);
_LIBZFS_CORE_H int lzc_set_bootenv(const char *, const nvlist_t *);

View File

@ -28,48 +28,6 @@
#ifndef _SYS_CCOMPAT_H
#define _SYS_CCOMPAT_H
#if __FreeBSD_version < 1300051
#define vm_page_valid(m) (m)->valid = VM_PAGE_BITS_ALL
#define vm_page_do_sunbusy(m)
#define vm_page_none_valid(m) ((m)->valid == 0)
#else
#define vm_page_do_sunbusy(m) vm_page_sunbusy(m)
#endif
#if __FreeBSD_version < 1300074
#define VOP_UNLOCK1(x) VOP_UNLOCK(x, 0)
#else
#define VOP_UNLOCK1(x) VOP_UNLOCK(x)
#endif
#if __FreeBSD_version < 1300064
#define VN_IS_DOOMED(vp) ((vp)->v_iflag & VI_DOOMED)
#endif
#if __FreeBSD_version < 1300068
#define VFS_VOP_VECTOR_REGISTER(x)
#endif
#if __FreeBSD_version >= 1300076
#define getnewvnode_reserve_() getnewvnode_reserve()
#else
#define getnewvnode_reserve_() getnewvnode_reserve(1)
#endif
#if __FreeBSD_version < 1300102
#define ASSERT_VOP_IN_SEQC(zp)
#define MNTK_FPLOOKUP 0
#define vn_seqc_write_begin(vp)
#define vn_seqc_write_end(vp)
#ifndef VFS_SMR_DECLARE
#define VFS_SMR_DECLARE
#endif
#ifndef VFS_SMR_ZONE_SET
#define VFS_SMR_ZONE_SET(zone)
#endif
#endif
struct hlist_node {
struct hlist_node *next, **pprev;
};

View File

@ -49,6 +49,7 @@ MALLOC_DECLARE(M_SOLARIS);
#define KM_NOSLEEP M_NOWAIT
#define KM_NORMALPRI 0
#define KMC_NODEBUG UMA_ZONE_NODUMP
#define KMC_RECLAIMABLE 0x0
typedef struct vmem vmem_t;

View File

@ -70,10 +70,4 @@ int secpolicy_xvattr(vnode_t *vp, xvattr_t *xvap, uid_t owner, cred_t *cr,
vtype_t vtype);
int secpolicy_smb(cred_t *cr);
#if __FreeBSD_version >= 1300005
#define spl_priv_check_cred(a, b) priv_check_cred((a), (b))
#else
#define spl_priv_check_cred(a, b) priv_check_cred((a), (b), 0)
#endif
#endif /* _OPENSOLARIS_SYS_POLICY_H_ */

View File

@ -88,9 +88,6 @@ do_thread_create(caddr_t stk, size_t stksize, void (*proc)(void *), void *arg,
thread_lock(td);
sched_prio(td, pri);
sched_add(td, SRQ_BORING);
#if __FreeBSD_version < 1300068
thread_unlock(td);
#endif
}
return (td);
}

View File

@ -30,9 +30,7 @@
#define _OPENSOLARIS_SYS_RANDOM_H_
#include_next <sys/random.h>
#if __FreeBSD_version >= 1300108
#include <sys/prng.h>
#endif
static inline int
random_get_bytes(uint8_t *p, size_t s)
@ -51,7 +49,7 @@ random_get_pseudo_bytes(uint8_t *p, size_t s)
static inline uint32_t
random_in_range(uint32_t range)
{
#if defined(_KERNEL) && __FreeBSD_version >= 1300108
#if defined(_KERNEL)
return (prng32_bounded(range));
#else
uint32_t r;

View File

@ -31,13 +31,14 @@
#include_next <sys/sdt.h>
#ifdef KDTRACE_HOOKS
/* CSTYLED */
/* BEGIN CSTYLED */
SDT_PROBE_DECLARE(sdt, , , set__error);
#define SET_ERROR(err) \
((sdt_sdt___set__error->id ? \
(*sdt_probe_func)(sdt_sdt___set__error->id, \
(uintptr_t)err, 0, 0, 0, 0) : 0), err)
#define SET_ERROR(err) ({ \
SDT_PROBE1(sdt, , , set__error, (uintptr_t)err); \
err; \
})
/* END CSTYLED */
#else
#define SET_ERROR(err) (err)
#endif

View File

@ -41,22 +41,6 @@ void zfs_vmobject_assert_wlocked(vm_object_t object);
void zfs_vmobject_wlock(vm_object_t object);
void zfs_vmobject_wunlock(vm_object_t object);
#if __FreeBSD_version >= 1300081
#define zfs_vmobject_assert_wlocked_12(x)
#define zfs_vmobject_wlock_12(x)
#define zfs_vmobject_wunlock_12(x)
#else
#define zfs_vmobject_assert_wlocked_12(x) \
zfs_vmobject_assert_wlocked((x))
#define zfs_vmobject_wlock_12(x) \
zfs_vmobject_wlock(x)
#define zfs_vmobject_wunlock_12(x) \
zfs_vmobject_wunlock(x)
#define vm_page_grab_unlocked(obj, idx, flags) \
vm_page_grab((obj), (idx), (flags))
#define vm_page_grab_valid_unlocked(m, obj, idx, flags) \
vm_page_grab_valid((m), (obj), (idx), (flags))
#endif
static inline caddr_t
zfs_map_page(vm_page_t pp, struct sf_buf **sfp)
{

View File

@ -95,17 +95,13 @@ vn_is_readonly(vnode_t *vp)
static __inline void
vn_flush_cached_data(vnode_t *vp, boolean_t sync)
{
#if __FreeBSD_version > 1300054
if (vm_object_mightbedirty(vp->v_object)) {
#else
if (vp->v_object->flags & OBJ_MIGHTBEDIRTY) {
#endif
int flags = sync ? OBJPC_SYNC : 0;
vn_lock(vp, LK_SHARED | LK_RETRY);
zfs_vmobject_wlock(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, flags);
zfs_vmobject_wunlock(vp->v_object);
VOP_UNLOCK1(vp);
VOP_UNLOCK(vp);
}
}
#endif

View File

@ -27,40 +27,21 @@
#ifndef _SYS_FS_ZFS_VFSOPS_H
#define _SYS_FS_ZFS_VFSOPS_H
#if __FreeBSD_version >= 1300125
#define TEARDOWN_RMS
#endif
#if __FreeBSD_version >= 1300109
#define TEARDOWN_INACTIVE_RMS
#endif
#include <sys/dataset_kstats.h>
#include <sys/list.h>
#include <sys/vfs.h>
#include <sys/zil.h>
#include <sys/sa.h>
#include <sys/rrwlock.h>
#ifdef TEARDOWN_INACTIVE_RMS
#include <sys/rmlock.h>
#endif
#include <sys/zfs_ioctl.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef TEARDOWN_RMS
typedef struct rmslock zfs_teardown_lock_t;
#else
#define zfs_teardown_lock_t rrmlock_t
#endif
#ifdef TEARDOWN_INACTIVE_RMS
typedef struct rmslock zfs_teardown_inactive_lock_t;
#else
#define zfs_teardown_inactive_lock_t krwlock_t
#endif
typedef struct zfsvfs zfsvfs_t;
struct znode;
@ -120,7 +101,6 @@ struct zfsvfs {
struct task z_unlinked_drain_task;
};
#ifdef TEARDOWN_RMS
#define ZFS_TEARDOWN_INIT(zfsvfs) \
rms_init(&(zfsvfs)->z_teardown_lock, "zfs teardown")
@ -150,39 +130,7 @@ struct zfsvfs {
#define ZFS_TEARDOWN_HELD(zfsvfs) \
rms_owned_any(&(zfsvfs)->z_teardown_lock)
#else
#define ZFS_TEARDOWN_INIT(zfsvfs) \
rrm_init(&(zfsvfs)->z_teardown_lock, B_FALSE)
#define ZFS_TEARDOWN_DESTROY(zfsvfs) \
rrm_destroy(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_ENTER_READ(zfsvfs, tag) \
rrm_enter_read(&(zfsvfs)->z_teardown_lock, tag);
#define ZFS_TEARDOWN_EXIT_READ(zfsvfs, tag) \
rrm_exit(&(zfsvfs)->z_teardown_lock, tag)
#define ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, tag) \
rrm_enter(&(zfsvfs)->z_teardown_lock, RW_WRITER, tag)
#define ZFS_TEARDOWN_EXIT_WRITE(zfsvfs) \
rrm_exit(&(zfsvfs)->z_teardown_lock, tag)
#define ZFS_TEARDOWN_EXIT(zfsvfs, tag) \
rrm_exit(&(zfsvfs)->z_teardown_lock, tag)
#define ZFS_TEARDOWN_READ_HELD(zfsvfs) \
RRM_READ_HELD(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_WRITE_HELD(zfsvfs) \
RRM_WRITE_HELD(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_HELD(zfsvfs) \
RRM_LOCK_HELD(&(zfsvfs)->z_teardown_lock)
#endif
#ifdef TEARDOWN_INACTIVE_RMS
#define ZFS_TEARDOWN_INACTIVE_INIT(zfsvfs) \
rms_init(&(zfsvfs)->z_teardown_inactive_lock, "zfs teardown inactive")
@ -206,31 +154,6 @@ struct zfsvfs {
#define ZFS_TEARDOWN_INACTIVE_WRITE_HELD(zfsvfs) \
rms_wowned(&(zfsvfs)->z_teardown_inactive_lock)
#else
#define ZFS_TEARDOWN_INACTIVE_INIT(zfsvfs) \
rw_init(&(zfsvfs)->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL)
#define ZFS_TEARDOWN_INACTIVE_DESTROY(zfsvfs) \
rw_destroy(&(zfsvfs)->z_teardown_inactive_lock)
#define ZFS_TEARDOWN_INACTIVE_TRY_ENTER_READ(zfsvfs) \
rw_tryenter(&(zfsvfs)->z_teardown_inactive_lock, RW_READER)
#define ZFS_TEARDOWN_INACTIVE_ENTER_READ(zfsvfs) \
rw_enter(&(zfsvfs)->z_teardown_inactive_lock, RW_READER)
#define ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs) \
rw_exit(&(zfsvfs)->z_teardown_inactive_lock)
#define ZFS_TEARDOWN_INACTIVE_ENTER_WRITE(zfsvfs) \
rw_enter(&(zfsvfs)->z_teardown_inactive_lock, RW_WRITER)
#define ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs) \
rw_exit(&(zfsvfs)->z_teardown_inactive_lock)
#define ZFS_TEARDOWN_INACTIVE_WRITE_HELD(zfsvfs) \
RW_WRITE_HELD(&(zfsvfs)->z_teardown_inactive_lock)
#endif
#define ZSB_XATTR 0x0001 /* Enable user xattrs */
/*

View File

@ -29,9 +29,15 @@
#include <linux/highmem.h>
#include <linux/uaccess.h>
#ifdef HAVE_KMAP_LOCAL_PAGE
/* 5.11 API change */
#define zfs_kmap_local(page) kmap_local_page(page)
#define zfs_kunmap_local(addr) kunmap_local(addr)
#else
/* 2.6.37 API change */
#define zfs_kmap_atomic(page) kmap_atomic(page)
#define zfs_kunmap_atomic(addr) kunmap_atomic(addr)
#define zfs_kmap_local(page) kmap_atomic(page)
#define zfs_kunmap_local(addr) kunmap_atomic(addr)
#endif
/* 5.0 API change - no more 'type' argument for access_ok() */
#ifdef HAVE_ACCESS_OK_TYPE

View File

@ -45,6 +45,7 @@ typedef enum kmc_bit {
KMC_BIT_TOTAL = 18, /* Proc handler helper bit */
KMC_BIT_ALLOC = 19, /* Proc handler helper bit */
KMC_BIT_MAX = 20, /* Proc handler helper bit */
KMC_BIT_RECLAIMABLE = 21, /* Can be freed by shrinker */
} kmc_bit_t;
/* kmem move callback return values */
@ -66,9 +67,7 @@ typedef enum kmem_cbrc {
#define KMC_TOTAL (1 << KMC_BIT_TOTAL)
#define KMC_ALLOC (1 << KMC_BIT_ALLOC)
#define KMC_MAX (1 << KMC_BIT_MAX)
#define KMC_REAP_CHUNK INT_MAX
#define KMC_DEFAULT_SEEKS 1
#define KMC_RECLAIMABLE (1 << KMC_BIT_RECLAIMABLE)
extern struct list_head spl_kmem_cache_list;
extern struct rw_semaphore spl_kmem_cache_sem;
@ -192,22 +191,25 @@ extern void spl_kmem_reap(void);
extern uint64_t spl_kmem_cache_inuse(kmem_cache_t *cache);
extern uint64_t spl_kmem_cache_entry_size(kmem_cache_t *cache);
#ifndef SPL_KMEM_CACHE_IMPLEMENTING
/*
* Macros for the kmem_cache_* API expected by ZFS and SPL clients. We don't
* define them inside spl-kmem-cache.c, as that uses the kernel's incompatible
* kmem_cache_* facilities to implement ours.
*/
/* Avoid conflicts with kernel names that might be implemented as macros. */
#undef kmem_cache_alloc
#define kmem_cache_create(name, size, align, ctor, dtor, rclm, priv, vmp, fl) \
spl_kmem_cache_create(name, size, align, ctor, dtor, rclm, priv, vmp, fl)
#define kmem_cache_set_move(skc, move) spl_kmem_cache_set_move(skc, move)
#define kmem_cache_destroy(skc) spl_kmem_cache_destroy(skc)
/*
* This is necessary to be compatible with other kernel modules
* or in-tree filesystem that may define kmem_cache_alloc,
* like bcachefs does it now.
*/
#ifdef kmem_cache_alloc
#undef kmem_cache_alloc
#endif
#define kmem_cache_alloc(skc, flags) spl_kmem_cache_alloc(skc, flags)
#define kmem_cache_free(skc, obj) spl_kmem_cache_free(skc, obj)
#define kmem_cache_reap_now(skc) spl_kmem_cache_reap_now(skc)
#define kmem_reap() spl_kmem_reap()
#endif
/*
* The following functions are only available for internal use.

View File

@ -80,7 +80,7 @@
snprintf(__get_str(msg), TRACE_DBUF_MSG_MAX, \
DBUF_TP_PRINTK_FMT, DBUF_TP_PRINTK_ARGS); \
} else { \
__assign_str(os_spa, "NULL") \
__assign_str(os_spa, "NULL"); \
__entry->ds_object = 0; \
__entry->db_object = 0; \
__entry->db_level = 0; \

View File

@ -137,6 +137,7 @@ void abd_copy_from_buf_off(abd_t *, const void *, size_t, size_t);
void abd_copy_to_buf_off(void *, abd_t *, size_t, size_t);
int abd_cmp(abd_t *, abd_t *);
int abd_cmp_buf_off(abd_t *, const void *, size_t, size_t);
int abd_cmp_zero_off(abd_t *, size_t, size_t);
void abd_zero_off(abd_t *, size_t, size_t);
void abd_verify(abd_t *);
@ -183,6 +184,12 @@ abd_zero(abd_t *abd, size_t size)
abd_zero_off(abd, 0, size);
}
static inline int
abd_cmp_zero(abd_t *abd, size_t size)
{
return (abd_cmp_zero_off(abd, 0, size));
}
/*
* ABD type check functions
*/

View File

@ -250,6 +250,16 @@ typedef struct arc_buf_info {
enum zio_compress abi_l2arc_compress;
} arc_buf_info_t;
/*
* Flags returned by arc_cached; describes which part of the arc
* the block is cached in.
*/
#define ARC_CACHED_EMBEDDED (1U << 0)
#define ARC_CACHED_IN_L1 (1U << 1)
#define ARC_CACHED_IN_MRU (1U << 2)
#define ARC_CACHED_IN_MFU (1U << 3)
#define ARC_CACHED_IN_L2 (1U << 4)
void arc_space_consume(uint64_t space, arc_space_type_t type);
void arc_space_return(uint64_t space, arc_space_type_t type);
boolean_t arc_is_metadata(arc_buf_t *buf);
@ -310,6 +320,7 @@ zio_t *arc_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
arc_prune_t *arc_add_prune_callback(arc_prune_func_t *func, void *priv);
void arc_remove_prune_callback(arc_prune_t *p);
void arc_freed(spa_t *spa, const blkptr_t *bp);
int arc_cached(spa_t *spa, const blkptr_t *bp);
void arc_flush(spa_t *spa, boolean_t retry);
void arc_tempreserve_clear(uint64_t reserve);

View File

@ -1058,10 +1058,10 @@ extern uint_t arc_lotsfree_percent;
extern uint64_t zfs_arc_min;
extern uint64_t zfs_arc_max;
extern void arc_reduce_target_size(int64_t to_free);
extern uint64_t arc_reduce_target_size(uint64_t to_free);
extern boolean_t arc_reclaim_needed(void);
extern void arc_kmem_reap_soon(void);
extern void arc_wait_for_eviction(uint64_t, boolean_t);
extern void arc_wait_for_eviction(uint64_t, boolean_t, boolean_t);
extern void arc_lowmem_init(void);
extern void arc_lowmem_fini(void);

View File

@ -34,6 +34,7 @@
*/
#include <sys/types.h>
#include <sys/sysmacros.h>
#ifdef __cplusplus
extern "C" {

View File

@ -214,9 +214,15 @@ typedef struct dmu_buf_impl {
struct objset *db_objset;
/*
* handle to safely access the dnode we belong to (NULL when evicted)
* Handle to safely access the dnode we belong to (NULL when evicted)
* if dnode_move() is used on the platform, or just dnode otherwise.
*/
#if !defined(__linux__) && !defined(__FreeBSD__)
#define USE_DNODE_HANDLE 1
struct dnode_handle *db_dnode_handle;
#else
struct dnode *db_dnode;
#endif
/*
* our parent buffer; if the dnode points to us directly,
@ -417,11 +423,19 @@ void dbuf_stats_destroy(void);
int dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift);
#ifdef USE_DNODE_HANDLE
#define DB_DNODE(_db) ((_db)->db_dnode_handle->dnh_dnode)
#define DB_DNODE_LOCK(_db) ((_db)->db_dnode_handle->dnh_zrlock)
#define DB_DNODE_ENTER(_db) (zrl_add(&DB_DNODE_LOCK(_db)))
#define DB_DNODE_EXIT(_db) (zrl_remove(&DB_DNODE_LOCK(_db)))
#define DB_DNODE_HELD(_db) (!zrl_is_zero(&DB_DNODE_LOCK(_db)))
#else
#define DB_DNODE(_db) ((_db)->db_dnode)
#define DB_DNODE_LOCK(_db)
#define DB_DNODE_ENTER(_db)
#define DB_DNODE_EXIT(_db)
#define DB_DNODE_HELD(_db) (B_TRUE)
#endif
void dbuf_init(void);
void dbuf_fini(void);

View File

@ -151,7 +151,8 @@ enum ddt_phys_type {
*/
/* State flags for dde_flags */
#define DDE_FLAG_LOADED (1 << 0) /* entry ready for use */
#define DDE_FLAG_LOADED (1 << 0) /* entry ready for use */
#define DDE_FLAG_OVERQUOTA (1 << 1) /* entry unusable, no space */
typedef struct {
/* key must be first for ddt_key_compare */
@ -170,6 +171,7 @@ typedef struct {
uint8_t dde_flags; /* load state flags */
kcondvar_t dde_cv; /* signaled when load completes */
uint64_t dde_waiters; /* count of waiters on dde_cv */
avl_node_t dde_node; /* ddt_tree node */
} ddt_entry_t;
@ -228,11 +230,13 @@ extern void ddt_histogram_add(ddt_histogram_t *dst, const ddt_histogram_t *src);
extern void ddt_histogram_stat(ddt_stat_t *dds, const ddt_histogram_t *ddh);
extern boolean_t ddt_histogram_empty(const ddt_histogram_t *ddh);
extern void ddt_get_dedup_object_stats(spa_t *spa, ddt_object_t *ddo);
extern uint64_t ddt_get_ddt_dsize(spa_t *spa);
extern void ddt_get_dedup_histogram(spa_t *spa, ddt_histogram_t *ddh);
extern void ddt_get_dedup_stats(spa_t *spa, ddt_stat_t *dds_total);
extern uint64_t ddt_get_dedup_dspace(spa_t *spa);
extern uint64_t ddt_get_pool_dedup_ratio(spa_t *spa);
extern int ddt_get_pool_dedup_cached(spa_t *spa, uint64_t *psize);
extern ddt_t *ddt_select(spa_t *spa, const blkptr_t *bp);
extern void ddt_enter(ddt_t *ddt);
@ -240,8 +244,9 @@ extern void ddt_exit(ddt_t *ddt);
extern void ddt_init(void);
extern void ddt_fini(void);
extern ddt_entry_t *ddt_lookup(ddt_t *ddt, const blkptr_t *bp, boolean_t add);
extern void ddt_prefetch(spa_t *spa, const blkptr_t *bp);
extern void ddt_remove(ddt_t *ddt, ddt_entry_t *dde);
extern void ddt_prefetch(spa_t *spa, const blkptr_t *bp);
extern void ddt_prefetch_all(spa_t *spa);
extern boolean_t ddt_class_contains(spa_t *spa, ddt_class_t max_class,
const blkptr_t *bp);

View File

@ -47,6 +47,7 @@ typedef struct {
const ddt_key_t *ddk);
void (*ddt_op_prefetch)(objset_t *os, uint64_t object,
const ddt_key_t *ddk);
void (*ddt_op_prefetch_all)(objset_t *os, uint64_t object);
int (*ddt_op_update)(objset_t *os, uint64_t object,
const ddt_key_t *ddk, const ddt_phys_t *phys, size_t psize,
dmu_tx_t *tx);

View File

@ -505,6 +505,12 @@ void dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
void dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
dmu_tx_t *tx);
/*
* Get an estimated cache size for an object. Caller must expect races.
*/
int dmu_object_cached_size(objset_t *os, uint64_t object,
uint64_t *l1sz, uint64_t *l2sz);
void dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset,
void *data, uint8_t etype, uint8_t comp, int uncompressed_size,
int compressed_size, int byteorder, dmu_tx_t *tx);
@ -903,6 +909,8 @@ void dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset,
void dmu_prefetch_by_dnode(dnode_t *dn, int64_t level, uint64_t offset,
uint64_t len, enum zio_priority pri);
void dmu_prefetch_dnode(objset_t *os, uint64_t object, enum zio_priority pri);
int dmu_prefetch_wait(objset_t *os, uint64_t object, uint64_t offset,
uint64_t size);
typedef struct dmu_object_info {
/* All sizes are in bytes unless otherwise indicated. */

View File

@ -380,6 +380,9 @@ struct dnode {
/* holds prefetch structure */
struct zfetch dn_zfetch;
/* Not in dn_phys, but should be. set it after taking a hold */
dmu_object_type_t dn_storage_type; /* type for storage class */
};
/*
@ -462,6 +465,8 @@ void dnode_evict_dbufs(dnode_t *dn);
void dnode_evict_bonus(dnode_t *dn);
void dnode_free_interior_slots(dnode_t *dn);
void dnode_set_storage_type(dnode_t *dn, dmu_object_type_t type);
#define DNODE_IS_DIRTY(_dn) \
((_dn)->dn_dirty_txg >= spa_syncing_txg((_dn)->dn_objset->os_spa))

View File

@ -258,6 +258,9 @@ typedef enum {
ZPOOL_PROP_BCLONEUSED,
ZPOOL_PROP_BCLONESAVED,
ZPOOL_PROP_BCLONERATIO,
ZPOOL_PROP_DEDUP_TABLE_SIZE,
ZPOOL_PROP_DEDUP_TABLE_QUOTA,
ZPOOL_PROP_DEDUPCACHED,
ZPOOL_NUM_PROPS
} zpool_prop_t;
@ -368,6 +371,9 @@ typedef enum {
VDEV_PROP_RAIDZ_EXPANDING,
VDEV_PROP_SLOW_IO_N,
VDEV_PROP_SLOW_IO_T,
VDEV_PROP_TRIM_SUPPORT,
VDEV_PROP_TRIM_ERRORS,
VDEV_PROP_SLOW_IOS,
VDEV_NUM_PROPS
} vdev_prop_t;
@ -1512,6 +1518,7 @@ typedef enum zfs_ioc {
ZFS_IOC_VDEV_GET_PROPS, /* 0x5a55 */
ZFS_IOC_VDEV_SET_PROPS, /* 0x5a56 */
ZFS_IOC_POOL_SCRUB, /* 0x5a57 */
ZFS_IOC_POOL_PREFETCH, /* 0x5a58 */
/*
* Per-platform (Optional) - 8/128 numbers reserved.
@ -1643,6 +1650,11 @@ typedef enum {
ZFS_WAIT_NUM_ACTIVITIES
} zfs_wait_activity_t;
typedef enum {
ZPOOL_PREFETCH_NONE = 0,
ZPOOL_PREFETCH_DDT
} zpool_prefetch_type_t;
/*
* Bookmark name values.
*/
@ -1681,6 +1693,17 @@ typedef enum {
*/
#define ZPOOL_HIDDEN_ARGS "hidden_args"
/*
* The following is used when invoking ZFS_IOC_POOL_GET_PROPS.
*/
#define ZPOOL_GET_PROPS_NAMES "get_props_names"
/*
* Opt-in property names used with ZPOOL_GET_PROPS_NAMES.
* For example, properties that are hidden or expensive to compute.
*/
#define ZPOOL_DEDUPCACHED_PROP_NAME "dedupcached"
/*
* The following are names used when invoking ZFS_IOC_POOL_INITIALIZE.
*/
@ -1720,6 +1743,11 @@ typedef enum {
#define ZFS_WAIT_ACTIVITY "wait_activity"
#define ZFS_WAIT_WAITED "wait_waited"
/*
* The following are names used when invoking ZFS_IOC_POOL_PREFETCH.
*/
#define ZPOOL_PREFETCH_TYPE "prefetch_type"
/*
* Flags for ZFS_IOC_VDEV_SET_STATE
*/

View File

@ -272,7 +272,6 @@ int sa_add_impl(sa_handle_t *, sa_attr_type_t,
uint32_t, sa_data_locator_t, void *, dmu_tx_t *);
void sa_register_update_callback_locked(objset_t *, sa_update_cb_t *);
int sa_size_locked(sa_handle_t *, sa_attr_type_t, int *);
void sa_default_locator(void **, uint32_t *, uint32_t, boolean_t, void *);
int sa_attr_size(sa_os_t *, sa_idx_tab_t *, sa_attr_type_t,

View File

@ -35,11 +35,10 @@
#ifndef _SYS_SPA_H
#define _SYS_SPA_H
#include <sys/avl.h>
#include <sys/zfs_context.h>
#include <sys/avl.h>
#include <sys/kstat.h>
#include <sys/nvpair.h>
#include <sys/sysmacros.h>
#include <sys/types.h>
#include <sys/fs/zfs.h>
#include <sys/spa_checksum.h>
@ -1050,8 +1049,8 @@ extern metaslab_class_t *spa_log_class(spa_t *spa);
extern metaslab_class_t *spa_embedded_log_class(spa_t *spa);
extern metaslab_class_t *spa_special_class(spa_t *spa);
extern metaslab_class_t *spa_dedup_class(spa_t *spa);
extern metaslab_class_t *spa_preferred_class(spa_t *spa, uint64_t size,
dmu_object_type_t objtype, uint_t level, uint_t special_smallblk);
extern metaslab_class_t *spa_preferred_class(spa_t *spa, const zio_t *zio);
extern boolean_t spa_special_has_ddt(spa_t *spa);
extern void spa_evicting_os_register(spa_t *, objset_t *os);
extern void spa_evicting_os_deregister(spa_t *, objset_t *os);
@ -1198,6 +1197,8 @@ extern void spa_boot_init(void);
/* properties */
extern int spa_prop_set(spa_t *spa, nvlist_t *nvp);
extern int spa_prop_get(spa_t *spa, nvlist_t **nvp);
extern int spa_prop_get_nvlist(spa_t *spa, char **props,
unsigned int n_props, nvlist_t **outnvl);
extern void spa_prop_clear_bootfs(spa_t *spa, uint64_t obj, dmu_tx_t *tx);
extern void spa_configfile_set(spa_t *, nvlist_t *, boolean_t);

View File

@ -146,6 +146,7 @@ struct spa_aux_vdev {
vdev_t **sav_vdevs; /* devices */
int sav_count; /* number devices */
boolean_t sav_sync; /* sync the device list */
boolean_t sav_label_sync; /* sync aux labels */
nvlist_t **sav_pending; /* pending device additions */
uint_t sav_npending; /* # pending devices */
};
@ -465,6 +466,9 @@ struct spa {
boolean_t spa_waiters_cancel; /* waiters should return */
char *spa_compatibility; /* compatibility file(s) */
uint64_t spa_dedup_table_quota; /* property DDT maximum size */
uint64_t spa_dedup_dsize; /* cached on-disk size of DDT */
uint64_t spa_dedup_class_full_txg; /* txg dedup class was full */
/*
* spa_refcount & spa_config_lock must be the last elements

View File

@ -225,6 +225,7 @@ int zap_lookup_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
int key_numints, uint64_t integer_size, uint64_t num_integers, void *buf);
int zap_contains(objset_t *ds, uint64_t zapobj, const char *name);
int zap_prefetch(objset_t *os, uint64_t zapobj, const char *name);
int zap_prefetch_object(objset_t *os, uint64_t zapobj);
int zap_prefetch_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
int key_numints);

View File

@ -413,6 +413,7 @@ void procfs_list_add(procfs_list_t *procfs_list, void *p);
#define KM_NORMALPRI 0 /* not needed with UMEM_DEFAULT */
#define KMC_NODEBUG UMC_NODEBUG
#define KMC_KVMEM 0x0
#define KMC_RECLAIMABLE 0x0
#define kmem_alloc(_s, _f) umem_alloc(_s, _f)
#define kmem_zalloc(_s, _f) umem_zalloc(_s, _f)
#define kmem_free(_b, _s) umem_free(_b, _s)

View File

@ -467,6 +467,21 @@ typedef struct zil_stats {
*/
kstat_named_t zil_commit_writer_count;
/*
* Number of times a ZIL commit failed and the ZIL was forced to fall
* back to txg_wait_synced(). The separate counts are for different
* reasons:
* - error: ZIL IO (write/flush) returned an error
* (see zil_commit_impl())
* - stall: LWB block allocation failed, ZIL chain abandoned
* (see zil_commit_writer_stall())
* - suspend: ZIL suspended
* (see zil_commit(), zil_get_commit_list())
*/
kstat_named_t zil_commit_error_count;
kstat_named_t zil_commit_stall_count;
kstat_named_t zil_commit_suspend_count;
/*
* Number of transactions (reads, writes, renames, etc.)
* that have been committed.
@ -510,6 +525,9 @@ typedef struct zil_stats {
typedef struct zil_sums {
wmsum_t zil_commit_count;
wmsum_t zil_commit_writer_count;
wmsum_t zil_commit_error_count;
wmsum_t zil_commit_stall_count;
wmsum_t zil_commit_suspend_count;
wmsum_t zil_itx_count;
wmsum_t zil_itx_indirect_count;
wmsum_t zil_itx_indirect_bytes;

View File

@ -356,6 +356,7 @@ typedef struct zio_prop {
uint8_t zp_iv[ZIO_DATA_IV_LEN];
uint8_t zp_mac[ZIO_DATA_MAC_LEN];
uint32_t zp_zpl_smallblk;
dmu_object_type_t zp_storage_type;
} zio_prop_t;
typedef struct zio_cksum_report zio_cksum_report_t;

View File

@ -18,22 +18,19 @@
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2024, Klara, Inc.
*/
#ifndef _SYS_ZVOL_IMPL_H
#define _SYS_ZVOL_IMPL_H
#include <sys/zfs_context.h>
#define ZVOL_RDONLY 0x1
/*
* Whether the zvol has been written to (as opposed to ZVOL_RDONLY, which
* specifies whether or not the zvol _can_ be written to)
*/
#define ZVOL_WRITTEN_TO 0x2
#define ZVOL_DUMPIFIED 0x4
#define ZVOL_EXCL 0x8
#define ZVOL_RDONLY (1<<0) /* zvol is readonly (writes rejected) */
#define ZVOL_WRITTEN_TO (1<<1) /* zvol has been written to (needs flush) */
#define ZVOL_EXCL (1<<2) /* zvol has O_EXCL client right now */
#define ZVOL_REMOVING (1<<3) /* zvol waiting to remove minor */
/*
* The in-core state of each volume.
@ -57,6 +54,7 @@ typedef struct zvol_state {
kmutex_t zv_state_lock; /* protects zvol_state_t */
atomic_t zv_suspend_ref; /* refcount for suspend */
krwlock_t zv_suspend_lock; /* suspend lock */
kcondvar_t zv_removing_cv; /* ready to remove minor */
struct zvol_state_os *zv_zso; /* private platform state */
boolean_t zv_threading; /* volthreading property */
} zvol_state_t;

View File

@ -44,8 +44,6 @@
#include "nfs.h"
#define _PATH_MOUNTDPID "/var/run/mountd.pid"
#define OPTSSIZE 1024
#define MAXLINESIZE (PATH_MAX + OPTSSIZE)
#define ZFS_EXPORTS_FILE "/etc/zfs/exports"
#define ZFS_EXPORTS_LOCK ZFS_EXPORTS_FILE".lock"
@ -69,17 +67,30 @@
* index, quiet
*/
static int
translate_opts(const char *shareopts, FILE *out)
translate_opts(char *oldopts, FILE *out)
{
static const char *const known_opts[] = { "ro", "maproot", "mapall",
"mask", "network", "sec", "alldirs", "public", "webnfs", "index",
"quiet" };
char oldopts[OPTSSIZE], newopts[OPTSSIZE];
char *o, *s = NULL;
char *newopts, *o, *s = NULL;
unsigned int i;
size_t len;
size_t len, newopts_len;
int ret;
strlcpy(oldopts, shareopts, sizeof (oldopts));
/*
* Calculate the length needed for the worst case of a single
* character option:
* - Add one to strlen(oldopts) so that the trailing nul is counted
* as a separator.
* - Multiply by 3/2 since the single character option plus separator
* is expanded to 3 characters.
* - Add one for the trailing nul. Needed for a single repetition of
* the single character option and certain other cases.
*/
newopts_len = (strlen(oldopts) + 1) * 3 / 2 + 1;
newopts = malloc(newopts_len);
if (newopts == NULL)
return (EOF);
newopts[0] = '\0';
s = oldopts;
while ((o = strsep(&s, "-, ")) != NULL) {
@ -89,14 +100,16 @@ translate_opts(const char *shareopts, FILE *out)
len = strlen(known_opts[i]);
if (strncmp(known_opts[i], o, len) == 0 &&
(o[len] == '\0' || o[len] == '=')) {
strlcat(newopts, "-", sizeof (newopts));
strlcat(newopts, "-", newopts_len);
break;
}
}
strlcat(newopts, o, sizeof (newopts));
strlcat(newopts, " ", sizeof (newopts));
strlcat(newopts, o, newopts_len);
strlcat(newopts, " ", newopts_len);
}
return (fputs(newopts, out));
ret = fputs(newopts, out);
free(newopts);
return (ret);
}
static int
@ -106,20 +119,38 @@ nfs_enable_share_impl(sa_share_impl_t impl_share, FILE *tmpfile)
if (strcmp(shareopts, "on") == 0)
shareopts = "";
boolean_t need_free;
char *mp;
boolean_t need_free, fnd_semi;
char *mp, *lineopts, *exportopts, *s;
size_t whitelen;
int rc = nfs_escape_mountpoint(impl_share->sa_mountpoint, &mp,
&need_free);
if (rc != SA_OK)
return (rc);
if (fputs(mp, tmpfile) == EOF ||
fputc('\t', tmpfile) == EOF ||
translate_opts(shareopts, tmpfile) == EOF ||
fputc('\n', tmpfile) == EOF) {
fprintf(stderr, "failed to write to temporary file\n");
rc = SA_SYSTEM_ERR;
lineopts = strdup(shareopts);
if (lineopts == NULL)
return (SA_SYSTEM_ERR);
s = lineopts;
fnd_semi = B_FALSE;
while ((exportopts = strsep(&s, ";")) != NULL) {
if (s != NULL)
fnd_semi = B_TRUE;
/* Ignore only whitespace between ';' separated option sets. */
if (fnd_semi) {
whitelen = strspn(exportopts, "\t ");
if (exportopts[whitelen] == '\0')
continue;
}
if (fputs(mp, tmpfile) == EOF ||
fputc('\t', tmpfile) == EOF ||
translate_opts(exportopts, tmpfile) == EOF ||
fputc('\n', tmpfile) == EOF) {
fprintf(stderr, "failed to write to temporary file\n");
rc = SA_SYSTEM_ERR;
break;
}
}
free(lineopts);
if (need_free)
free(mp);

View File

@ -37,5 +37,9 @@
/* Print a timestamp in either Unix or standard format. */
void print_timestamp(uint_t);
/* Return timestamp in either Unix or standard format in provided buffer */
void get_timestamp(uint_t, char *, int);
/* convert time_t to standard format */
void format_timestamp(time_t, char *, int);
#endif /* _STATCOMMON_H */

View File

@ -62,3 +62,45 @@ print_timestamp(uint_t timestamp_fmt)
(void) printf("%s\n", dstr);
}
}
/*
* Return timestamp as decimal reprentation (in string) of time_t
* value (-T u was specified) or in date(1) format (-T d was specified).
*/
void
get_timestamp(uint_t timestamp_fmt, char *buf, int len)
{
time_t t = time(NULL);
static const char *fmt = NULL;
/* We only need to retrieve this once per invocation */
if (fmt == NULL)
fmt = nl_langinfo(_DATE_FMT);
if (timestamp_fmt == UDATE) {
(void) snprintf(buf, len, "%lld", (longlong_t)t);
} else if (timestamp_fmt == DDATE) {
struct tm tm;
strftime(buf, len, fmt, localtime_r(&t, &tm));
}
}
/*
* Format the provided time stamp to human readable format
*/
void
format_timestamp(time_t t, char *buf, int len)
{
struct tm tm;
static const char *fmt = NULL;
if (t == 0) {
snprintf(buf, len, "-");
return;
}
/* We only need to retrieve this once per invocation */
if (fmt == NULL)
fmt = nl_langinfo(_DATE_FMT);
strftime(buf, len, fmt, localtime_r(&t, &tm));
}

View File

@ -143,7 +143,9 @@
<elf-symbol name='avl_update_gt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_update_lt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_walk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='format_timestamp' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='get_system_hostid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='get_timestamp' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getexecname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getextmntent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getmntany' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
@ -1151,6 +1153,18 @@
<parameter type-id='3502e3ff' name='timestamp_fmt'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='get_timestamp' mangled-name='get_timestamp' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='get_timestamp'>
<parameter type-id='3502e3ff' name='timestamp_fmt'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='95e97e5e' name='len'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='format_timestamp' mangled-name='format_timestamp' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='format_timestamp'>
<parameter type-id='c9d12d66' name='t'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='95e97e5e' name='len'/>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libuutil/uu_alloc.c' language='LANG_C99'>
<type-decl name='char' size-in-bits='8' id='a84c031d'/>

View File

@ -179,10 +179,12 @@
<elf-symbol name='fletcher_4_native' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_native_varsize' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='format_timestamp' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fsleep' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='get_dataset_depth' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='get_system_hostid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getexecname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='get_timestamp' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getextmntent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getmntany' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getprop_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
@ -280,6 +282,7 @@
<elf-symbol name='vdev_prop_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_user' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_values' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zcmd_print_json' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_depends_on' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_is_supported' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_is_valid_guid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
@ -452,11 +455,13 @@
<elf-symbol name='zfs_userspace' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_valid_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_version_kernel' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_version_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_version_print' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_version_userland' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_wait_status' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_zpl_version_map' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_add_propname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_checkpoint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_clear' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_clear_label' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
@ -481,6 +486,7 @@
<elf-symbol name='zpool_feature_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_find_config' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_find_vdev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_find_parent_vdev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_find_vdev_by_physpath' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_free_handles' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_all_vdev_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
@ -520,9 +526,10 @@
<elf-symbol name='zpool_open' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_open_canfail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_pool_state_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prefetch' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prepare_and_label_disk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prepare_disk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_print_unsup_feat' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_collect_unsup_feat' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_align_right' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_column_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_default_numeric' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
@ -575,12 +582,14 @@
<elf-symbol name='zpool_vdev_split' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_wait_status' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_collect_property' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_free_list' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_get_list' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_index_to_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_iter' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_iter_common' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_name_to_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_nvlist_one_property' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_print_one_property' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_random_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_hidden' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
@ -1282,6 +1291,18 @@
<parameter type-id='3502e3ff' name='timestamp_fmt'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='get_timestamp' mangled-name='get_timestamp' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='get_timestamp'>
<parameter type-id='3502e3ff' name='timestamp_fmt'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='95e97e5e' name='len'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='format_timestamp' mangled-name='format_timestamp' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='format_timestamp'>
<parameter type-id='c9d12d66' name='t'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='95e97e5e' name='len'/>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libtpool/thread_pool.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='49ef3ffd' size-in-bits='1024' id='a14403f5'>
@ -1642,6 +1663,9 @@
<class-decl name='uu_avl' is-struct='yes' visibility='default' is-declaration-only='yes' id='4af029d1'/>
<class-decl name='uu_avl_pool' is-struct='yes' visibility='default' is-declaration-only='yes' id='12a530a8'/>
<class-decl name='uu_avl_walk' is-struct='yes' visibility='default' is-declaration-only='yes' id='e70a39e3'/>
<array-type-def dimensions='1' type-id='80f4b756' size-in-bits='256' id='71dc54ac'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
<type-decl name='int' size-in-bits='32' id='95e97e5e'/>
<type-decl name='long int' size-in-bits='64' id='bd54fe1a'/>
<type-decl name='long long int' size-in-bits='64' id='1eb56b1e'/>
@ -2096,7 +2120,7 @@
<var-decl name='zfs_props_table' type-id='ae3e8ca6' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='zpool_handle' size-in-bits='2560' is-struct='yes' visibility='default' id='67002a8a'>
<class-decl name='zpool_handle' size-in-bits='2816' is-struct='yes' visibility='default' id='67002a8a'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zpool_hdl' type-id='b0382bb3' visibility='default'/>
</data-member>
@ -2109,19 +2133,25 @@
<data-member access='public' layout-offset-in-bits='2176'>
<var-decl name='zpool_state' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2208'>
<var-decl name='zpool_n_propnames' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2240'>
<var-decl name='zpool_config_size' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2304'>
<var-decl name='zpool_config' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2368'>
<var-decl name='zpool_old_config' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2432'>
<var-decl name='zpool_props' type-id='5ce45b60' visibility='default'/>
<var-decl name='zpool_propnames' type-id='71dc54ac' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2496'>
<var-decl name='zpool_config_size' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2560'>
<var-decl name='zpool_config' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2624'>
<var-decl name='zpool_old_config' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2688'>
<var-decl name='zpool_props' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2752'>
<var-decl name='zpool_start_block' type-id='804dc465' visibility='default'/>
</data-member>
</class-decl>
@ -2921,7 +2951,10 @@
<enumerator name='ZPOOL_PROP_BCLONEUSED' value='33'/>
<enumerator name='ZPOOL_PROP_BCLONESAVED' value='34'/>
<enumerator name='ZPOOL_PROP_BCLONERATIO' value='35'/>
<enumerator name='ZPOOL_NUM_PROPS' value='36'/>
<enumerator name='ZPOOL_PROP_DEDUP_TABLE_SIZE' value='36'/>
<enumerator name='ZPOOL_PROP_DEDUP_TABLE_QUOTA' value='37'/>
<enumerator name='ZPOOL_PROP_DEDUPCACHED' value='38'/>
<enumerator name='ZPOOL_NUM_PROPS' value='39'/>
</enum-decl>
<typedef-decl name='zpool_prop_t' type-id='af1ba157' id='5d0c23fb'/>
<typedef-decl name='regoff_t' type-id='95e97e5e' id='54a2a2a8'/>
@ -5702,7 +5735,10 @@
<enumerator name='VDEV_PROP_RAIDZ_EXPANDING' value='46'/>
<enumerator name='VDEV_PROP_SLOW_IO_N' value='47'/>
<enumerator name='VDEV_PROP_SLOW_IO_T' value='48'/>
<enumerator name='VDEV_NUM_PROPS' value='49'/>
<enumerator name='VDEV_PROP_TRIM_SUPPORT' value='49'/>
<enumerator name='VDEV_PROP_TRIM_ERRORS' value='50'/>
<enumerator name='VDEV_PROP_SLOW_IOS' value='51'/>
<enumerator name='VDEV_NUM_PROPS' value='52'/>
</enum-decl>
<typedef-decl name='vdev_prop_t' type-id='1573bec8' id='5aa5c90c'/>
<class-decl name='zpool_load_policy' size-in-bits='256' is-struct='yes' visibility='default' id='2f65b36f'>
@ -5891,6 +5927,7 @@
<enumerator name='ZFS_IOC_VDEV_GET_PROPS' value='23125'/>
<enumerator name='ZFS_IOC_VDEV_SET_PROPS' value='23126'/>
<enumerator name='ZFS_IOC_POOL_SCRUB' value='23127'/>
<enumerator name='ZFS_IOC_POOL_PREFETCH' value='23128'/>
<enumerator name='ZFS_IOC_PLATFORM' value='23168'/>
<enumerator name='ZFS_IOC_EVENTS_NEXT' value='23169'/>
<enumerator name='ZFS_IOC_EVENTS_CLEAR' value='23170'/>
@ -5919,6 +5956,12 @@
<enumerator name='ZPOOL_WAIT_NUM_ACTIVITIES' value='9'/>
</enum-decl>
<typedef-decl name='zpool_wait_activity_t' type-id='849338e3' id='73446457'/>
<enum-decl name='zpool_prefetch_type_t' naming-typedef-id='e55ff6bc' id='0299ab50'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPOOL_PREFETCH_NONE' value='0'/>
<enumerator name='ZPOOL_PREFETCH_DDT' value='1'/>
</enum-decl>
<typedef-decl name='zpool_prefetch_type_t' type-id='0299ab50' id='e55ff6bc'/>
<enum-decl name='spa_feature' id='33ecb627'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='SPA_FEATURE_NONE' value='-1'/>
@ -5966,6 +6009,8 @@
<enumerator name='SPA_FEATURES' value='41'/>
</enum-decl>
<typedef-decl name='spa_feature_t' type-id='33ecb627' id='d6618c78'/>
<qualified-type-def type-id='80f4b756' const='yes' id='b99c00c9'/>
<pointer-type-def type-id='b99c00c9' size-in-bits='64' id='13956559'/>
<qualified-type-def type-id='22cce67b' const='yes' id='d2816df0'/>
<pointer-type-def type-id='d2816df0' size-in-bits='64' id='3bbfee2e'/>
<qualified-type-def type-id='b96825af' const='yes' id='2b61797f'/>
@ -6058,6 +6103,11 @@
<parameter type-id='37e3bd22'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_pool_prefetch' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='e55ff6bc'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_set_bootenv' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='22cce67b'/>
@ -6203,6 +6253,13 @@
<parameter type-id='9da381c4'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_add_string_array' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='13956559'/>
<parameter type-id='3502e3ff'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
@ -6391,6 +6448,11 @@
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_prefetch' mangled-name='zpool_prefetch' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prefetch'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='e55ff6bc' name='type'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_add' mangled-name='zpool_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_add'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='5ce45b60' name='nvroot'/>
@ -6413,6 +6475,8 @@
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='95e97e5e' name='reason'/>
<parameter type-id='5ce45b60' name='config'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='b59d7dce' name='size'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_import' mangled-name='zpool_import' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import'>
@ -6422,8 +6486,10 @@
<parameter type-id='26a90f95' name='altroot'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_print_unsup_feat' mangled-name='zpool_print_unsup_feat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_print_unsup_feat'>
<function-decl name='zpool_collect_unsup_feat' mangled-name='zpool_collect_unsup_feat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_collect_unsup_feat'>
<parameter type-id='5ce45b60' name='config'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='b59d7dce' name='size'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_import_props' mangled-name='zpool_import_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import_props'>
@ -6475,6 +6541,14 @@
<parameter type-id='37e3bd22' name='log'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zpool_find_parent_vdev' mangled-name='zpool_find_parent_vdev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_find_parent_vdev'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='37e3bd22' name='avail_spare'/>
<parameter type-id='37e3bd22' name='l2cache'/>
<parameter type-id='37e3bd22' name='log'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zpool_vdev_path_to_guid' mangled-name='zpool_vdev_path_to_guid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_path_to_guid'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
@ -6585,6 +6659,11 @@
<parameter type-id='95e97e5e' name='name_flags'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='zpool_add_propname' mangled-name='zpool_add_propname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_add_propname'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_get_errlog' mangled-name='zpool_get_errlog' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_errlog'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='857bb57e' name='nverrlistp'/>
@ -8146,6 +8225,20 @@
<parameter type-id='2e45de5d' name='argtype'/>
<return type-id='9200a744'/>
</function-decl>
<function-decl name='zcmd_print_json' mangled-name='zcmd_print_json' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zcmd_print_json'>
<parameter type-id='5ce45b60' name='nvl'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_nvlist_one_property' mangled-name='zprop_nvlist_one_property' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_nvlist_one_property'>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='80f4b756' name='value'/>
<parameter type-id='a2256d42' name='sourcetype'/>
<parameter type-id='80f4b756' name='source'/>
<parameter type-id='80f4b756' name='recvd_value'/>
<parameter type-id='5ce45b60' name='nvl'/>
<parameter type-id='c19b74c3' name='as_int'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zprop_print_one_property' mangled-name='zprop_print_one_property' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_print_one_property'>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='0d2a0670' name='cbp'/>
@ -8156,6 +8249,17 @@
<parameter type-id='80f4b756' name='recvd_value'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_collect_property' mangled-name='zprop_collect_property' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_collect_property'>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='0d2a0670' name='cbp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='80f4b756' name='value'/>
<parameter type-id='a2256d42' name='sourcetype'/>
<parameter type-id='80f4b756' name='source'/>
<parameter type-id='80f4b756' name='recvd_value'/>
<parameter type-id='5ce45b60' name='nvl'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zprop_get_list' mangled-name='zprop_get_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_get_list'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='26a90f95' name='props'/>
@ -8184,6 +8288,9 @@
<function-decl name='use_color' mangled-name='use_color' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='use_color'>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_version_nvlist' mangled-name='zfs_version_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_nvlist'>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='printf_color' mangled-name='printf_color' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='printf_color'>
<parameter type-id='80f4b756' name='color'/>
<parameter type-id='80f4b756' name='format'/>
@ -8684,7 +8791,6 @@
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzutil/zutil_device_path.c' language='LANG_C99'>
<pointer-type-def type-id='b99c00c9' size-in-bits='64' id='13956559'/>
<function-decl name='zpool_default_search_paths' mangled-name='zpool_default_search_paths' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_default_search_paths'>
<parameter type-id='78c01427'/>
<return type-id='13956559'/>
@ -9146,7 +9252,6 @@
<array-type-def dimensions='1' type-id='b99c00c9' size-in-bits='2624' id='5ce15418'>
<subrange length='41' type-id='7359adad' id='cb834f44'/>
</array-type-def>
<qualified-type-def type-id='80f4b756' const='yes' id='b99c00c9'/>
<pointer-type-def type-id='8f92235e' size-in-bits='64' id='90421557'/>
<function-decl name='nvpair_value_uint32' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='dace003f'/>

View File

@ -94,12 +94,15 @@ struct zfs_handle {
* snapshots of volumes.
*/
#define ZFS_IS_VOLUME(zhp) ((zhp)->zfs_head_type == ZFS_TYPE_VOLUME)
#define ZHP_MAX_PROPNAMES 4
struct zpool_handle {
libzfs_handle_t *zpool_hdl;
zpool_handle_t *zpool_next;
char zpool_name[ZFS_MAX_DATASET_NAME_LEN];
int zpool_state;
unsigned int zpool_n_propnames;
const char *zpool_propnames[ZHP_MAX_PROPNAMES];
size_t zpool_config_size;
nvlist_t *zpool_config;
nvlist_t *zpool_old_config;

View File

@ -79,6 +79,13 @@ zpool_get_all_props(zpool_handle_t *zhp)
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zhp->zpool_n_propnames > 0) {
nvlist_t *innvl = fnvlist_alloc();
fnvlist_add_string_array(innvl, ZPOOL_GET_PROPS_NAMES,
zhp->zpool_propnames, zhp->zpool_n_propnames);
zcmd_write_src_nvlist(hdl, &zc, innvl);
}
zcmd_alloc_dst_nvlist(hdl, &zc, 0);
while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
@ -318,6 +325,15 @@ zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
return (0);
}
/*
* ZPOOL_PROP_DEDUPCACHED can be fetched by name only using
* the ZPOOL_GET_PROPS_NAMES mechanism
*/
if (prop == ZPOOL_PROP_DEDUPCACHED) {
zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);
(void) zpool_get_all_props(zhp);
}
if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
prop != ZPOOL_PROP_NAME)
return (-1);
@ -332,6 +348,24 @@ zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
intval = zpool_get_prop_int(zhp, prop, &src);
switch (prop) {
case ZPOOL_PROP_DEDUP_TABLE_QUOTA:
/*
* If dedup quota is 0, we translate this into 'none'
* (unless literal is set). And if it is UINT64_MAX
* we translate that as 'automatic' (limit to size of
* the dedicated dedup VDEV. Otherwise, fall throught
* into the regular number formating.
*/
if (intval == 0) {
(void) strlcpy(buf, literal ? "0" : "none",
len);
break;
} else if (intval == UINT64_MAX) {
(void) strlcpy(buf, "auto", len);
break;
}
zfs_fallthrough;
case ZPOOL_PROP_SIZE:
case ZPOOL_PROP_ALLOCATED:
case ZPOOL_PROP_FREE:
@ -342,6 +376,8 @@ zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
case ZPOOL_PROP_MAXDNODESIZE:
case ZPOOL_PROP_BCLONESAVED:
case ZPOOL_PROP_BCLONEUSED:
case ZPOOL_PROP_DEDUP_TABLE_SIZE:
case ZPOOL_PROP_DEDUPCACHED:
if (literal)
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
@ -493,9 +529,10 @@ zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
zpool_prop_t prop;
const char *strval;
uint64_t intval;
const char *slash, *check;
const char *check;
struct stat64 statbuf;
zpool_handle_t *zhp;
char *parent, *slash;
char report[1024];
if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
@ -749,30 +786,36 @@ zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
goto error;
}
slash = strrchr(strval, '/');
parent = strdup(strval);
if (parent == NULL) {
(void) zfs_error(hdl, EZFS_NOMEM, errbuf);
goto error;
}
slash = strrchr(parent, '/');
if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
strcmp(slash, "/..") == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is not a valid file"), strval);
"'%s' is not a valid file"), parent);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
free(parent);
goto error;
}
*(char *)slash = '\0';
*slash = '\0';
if (strval[0] != '\0' &&
(stat64(strval, &statbuf) != 0 ||
if (parent[0] != '\0' &&
(stat64(parent, &statbuf) != 0 ||
!S_ISDIR(statbuf.st_mode))) {
*(char *)slash = '/';
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is not a valid directory"),
strval);
parent);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
free(parent);
goto error;
}
free(parent);
*(char *)slash = '/';
break;
case ZPOOL_PROP_COMPATIBILITY:
@ -1719,6 +1762,28 @@ zpool_discard_checkpoint(zpool_handle_t *zhp)
return (0);
}
/*
* Load data type for the given pool.
*/
int
zpool_prefetch(zpool_handle_t *zhp, zpool_prefetch_type_t type)
{
libzfs_handle_t *hdl = zhp->zpool_hdl;
char msg[1024];
int error;
error = lzc_pool_prefetch(zhp->zpool_name, type);
if (error != 0) {
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot prefetch %s in '%s'"),
type == ZPOOL_PREFETCH_DDT ? "ddt" : "", zhp->zpool_name);
(void) zpool_standard_error(hdl, error, msg);
return (-1);
}
return (0);
}
/*
* Add the given vdevs to the pool. The caller must have already performed the
* necessary verification to ensure that the vdev specification is well-formed.
@ -1932,23 +1997,18 @@ zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
void
zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
nvlist_t *config)
nvlist_t *config, char *buf, size_t size)
{
nvlist_t *nv = NULL;
int64_t loss = -1;
uint64_t edata = UINT64_MAX;
uint64_t rewindto;
struct tm t;
char timestr[128];
char timestr[128], temp[1024];
if (!hdl->libzfs_printerr)
return;
if (reason >= 0)
(void) printf(dgettext(TEXT_DOMAIN, "action: "));
else
(void) printf(dgettext(TEXT_DOMAIN, "\t"));
/* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
@ -1959,56 +2019,61 @@ zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
&edata);
(void) printf(dgettext(TEXT_DOMAIN,
(void) snprintf(buf, size, dgettext(TEXT_DOMAIN,
"Recovery is possible, but will result in some data loss.\n"));
if (localtime_r((time_t *)&rewindto, &t) != NULL &&
ctime_r((time_t *)&rewindto, timestr) != NULL) {
timestr[24] = 0;
(void) printf(dgettext(TEXT_DOMAIN,
(void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,
"\tReturning the pool to its state as of %s\n"
"\tshould correct the problem. "),
timestr);
"\tshould correct the problem. "), timestr);
(void) strlcat(buf, temp, size);
} else {
(void) printf(dgettext(TEXT_DOMAIN,
(void) strlcat(buf, dgettext(TEXT_DOMAIN,
"\tReverting the pool to an earlier state "
"should correct the problem.\n\t"));
"should correct the problem.\n\t"), size);
}
if (loss > 120) {
(void) printf(dgettext(TEXT_DOMAIN,
(void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,
"Approximately %lld minutes of data\n"
"\tmust be discarded, irreversibly. "),
((longlong_t)loss + 30) / 60);
(void) strlcat(buf, temp, size);
} else if (loss > 0) {
(void) printf(dgettext(TEXT_DOMAIN,
(void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,
"Approximately %lld seconds of data\n"
"\tmust be discarded, irreversibly. "),
(longlong_t)loss);
(void) strlcat(buf, temp, size);
}
if (edata != 0 && edata != UINT64_MAX) {
if (edata == 1) {
(void) printf(dgettext(TEXT_DOMAIN,
(void) strlcat(buf, dgettext(TEXT_DOMAIN,
"After rewind, at least\n"
"\tone persistent user-data error will remain. "));
"\tone persistent user-data error will remain. "),
size);
} else {
(void) printf(dgettext(TEXT_DOMAIN,
(void) strlcat(buf, dgettext(TEXT_DOMAIN,
"After rewind, several\n"
"\tpersistent user-data errors will remain. "));
"\tpersistent user-data errors will remain. "),
size);
}
}
(void) printf(dgettext(TEXT_DOMAIN,
(void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,
"Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
reason >= 0 ? "clear" : "import", name);
(void) strlcat(buf, temp, size);
(void) printf(dgettext(TEXT_DOMAIN,
(void) strlcat(buf, dgettext(TEXT_DOMAIN,
"A scrub of the pool\n"
"\tis strongly recommended after recovery.\n"));
"\tis strongly recommended after recovery.\n"), size);
return;
no_info:
(void) printf(dgettext(TEXT_DOMAIN,
"Destroy and re-create the pool from\n\ta backup source.\n"));
(void) strlcat(buf, dgettext(TEXT_DOMAIN,
"Destroy and re-create the pool from\n\ta backup source.\n"), size);
}
/*
@ -2077,9 +2142,10 @@ print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
}
void
zpool_print_unsup_feat(nvlist_t *config)
zpool_collect_unsup_feat(nvlist_t *config, char *buf, size_t size)
{
nvlist_t *nvinfo, *unsup_feat;
char temp[512];
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
unsup_feat = fnvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT);
@ -2087,10 +2153,14 @@ zpool_print_unsup_feat(nvlist_t *config)
for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL);
nvp != NULL; nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
const char *desc = fnvpair_value_string(nvp);
if (strlen(desc) > 0)
(void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
else
(void) printf("\t%s\n", nvpair_name(nvp));
if (strlen(desc) > 0) {
(void) snprintf(temp, 512, "\t%s (%s)\n",
nvpair_name(nvp), desc);
(void) strlcat(buf, temp, size);
} else {
(void) snprintf(temp, 512, "\t%s\n", nvpair_name(nvp));
(void) strlcat(buf, temp, size);
}
}
}
@ -2113,6 +2183,7 @@ zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
const char *origname;
int ret;
int error = 0;
char buf[2048];
char errbuf[ERRBUFLEN];
origname = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
@ -2195,7 +2266,9 @@ zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
(void) printf(dgettext(TEXT_DOMAIN, "This "
"pool uses the following feature(s) not "
"supported by this system:\n"));
zpool_print_unsup_feat(nv);
memset(buf, 0, 2048);
zpool_collect_unsup_feat(nv, buf, 2048);
(void) printf("%s", buf);
if (nvlist_exists(nvinfo,
ZPOOL_CONFIG_CAN_RDONLY)) {
(void) printf(dgettext(TEXT_DOMAIN,
@ -2294,8 +2367,11 @@ zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
break;
default:
(void) zpool_standard_error(hdl, error, desc);
memset(buf, 0, 2048);
zpool_explain_recover(hdl,
newname ? origname : thename, -error, nv);
newname ? origname : thename, -error, nv,
buf, 2048);
(void) printf("\t%s", buf);
break;
}
@ -2794,10 +2870,13 @@ zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
* the nvpair name to determine how we should look for the device.
* 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
* spare; but FALSE if its an INUSE spare.
*
* If 'return_parent' is set, then return the *parent* of the vdev you're
* searching for rather than the vdev itself.
*/
static nvlist_t *
vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
boolean_t *l2cache, boolean_t *log)
boolean_t *l2cache, boolean_t *log, boolean_t return_parent)
{
uint_t c, children;
nvlist_t **child;
@ -2805,6 +2884,8 @@ vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
uint64_t is_log;
const char *srchkey;
nvpair_t *pair = nvlist_next_nvpair(search, NULL);
const char *tmp = NULL;
boolean_t is_root;
/* Nothing to look for */
if (search == NULL || pair == NULL)
@ -2813,6 +2894,12 @@ vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
/* Obtain the key we will use to search */
srchkey = nvpair_name(pair);
nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &tmp);
if (strcmp(tmp, "root") == 0)
is_root = B_TRUE;
else
is_root = B_FALSE;
switch (nvpair_type(pair)) {
case DATA_TYPE_UINT64:
if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
@ -2943,7 +3030,7 @@ vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search,
avail_spare, l2cache, NULL)) != NULL) {
avail_spare, l2cache, NULL, return_parent)) != NULL) {
/*
* The 'is_log' value is only set for the toplevel
* vdev, not the leaf vdevs. So we always lookup the
@ -2956,7 +3043,7 @@ vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
is_log) {
*log = B_TRUE;
}
return (ret);
return (ret && return_parent && !is_root ? nv : ret);
}
}
@ -2964,9 +3051,11 @@ vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search,
avail_spare, l2cache, NULL)) != NULL) {
avail_spare, l2cache, NULL, return_parent))
!= NULL) {
*avail_spare = B_TRUE;
return (ret);
return (ret && return_parent &&
!is_root ? nv : ret);
}
}
}
@ -2975,9 +3064,11 @@ vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search,
avail_spare, l2cache, NULL)) != NULL) {
avail_spare, l2cache, NULL, return_parent))
!= NULL) {
*l2cache = B_TRUE;
return (ret);
return (ret && return_parent &&
!is_root ? nv : ret);
}
}
}
@ -3012,7 +3103,8 @@ zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
*l2cache = B_FALSE;
if (log != NULL)
*log = B_FALSE;
ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log,
B_FALSE);
fnvlist_free(search);
return (ret);
@ -3040,11 +3132,12 @@ zpool_vdev_is_interior(const char *name)
}
/*
* Lookup the nvlist for a given vdev.
* Lookup the nvlist for a given vdev or vdev's parent (depending on
* if 'return_parent' is set).
*/
nvlist_t *
zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
boolean_t *l2cache, boolean_t *log)
static nvlist_t *
__zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
boolean_t *l2cache, boolean_t *log, boolean_t return_parent)
{
char *end;
nvlist_t *nvroot, *search, *ret;
@ -3081,12 +3174,30 @@ zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
*l2cache = B_FALSE;
if (log != NULL)
*log = B_FALSE;
ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log,
return_parent);
fnvlist_free(search);
return (ret);
}
nvlist_t *
zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
boolean_t *l2cache, boolean_t *log)
{
return (__zpool_find_vdev(zhp, path, avail_spare, l2cache, log,
B_FALSE));
}
/* Given a vdev path, return its parent's nvlist */
nvlist_t *
zpool_find_parent_vdev(zpool_handle_t *zhp, const char *path,
boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
{
return (__zpool_find_vdev(zhp, path, avail_spare, l2cache, log,
B_TRUE));
}
/*
* Convert a vdev path to a GUID. Returns GUID or 0 on error.
*
@ -4385,6 +4496,14 @@ zbookmark_mem_compare(const void *a, const void *b)
return (memcmp(a, b, sizeof (zbookmark_phys_t)));
}
void
zpool_add_propname(zpool_handle_t *zhp, const char *propname)
{
assert(zhp->zpool_n_propnames < ZHP_MAX_PROPNAMES);
zhp->zpool_propnames[zhp->zpool_n_propnames] = propname;
zhp->zpool_n_propnames++;
}
/*
* Retrieve the persistent error log, uniquify the members, and return to the
* caller.
@ -5228,6 +5347,8 @@ zpool_get_vdev_prop_value(nvlist_t *nvprop, vdev_prop_t prop, char *prop_name,
case VDEV_PROP_WRITE_ERRORS:
case VDEV_PROP_CHECKSUM_ERRORS:
case VDEV_PROP_INITIALIZE_ERRORS:
case VDEV_PROP_TRIM_ERRORS:
case VDEV_PROP_SLOW_IOS:
case VDEV_PROP_OPS_NULL:
case VDEV_PROP_OPS_READ:
case VDEV_PROP_OPS_WRITE:
@ -5307,6 +5428,11 @@ zpool_get_vdev_prop_value(nvlist_t *nvprop, vdev_prop_t prop, char *prop_name,
src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
} else {
/* 'trim_support' only valid for leaf vdevs */
if (prop == VDEV_PROP_TRIM_SUPPORT) {
(void) strlcpy(buf, "-", len);
break;
}
src = ZPROP_SRC_DEFAULT;
intval = vdev_prop_default_numeric(prop);
/* Only use if provided by the RAIDZ VDEV above */

View File

@ -4952,7 +4952,7 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
if (flags->verbose) {
(void) printf("%s %s%s stream of %s into %s\n",
flags->dryrun ? "would receive" : "receiving",
flags->heal ? " corrective" : "",
flags->heal ? "corrective " : "",
drrb->drr_fromguid ? "incremental" : "full",
drrb->drr_toname, destsnap);
(void) fflush(stdout);

View File

@ -68,6 +68,7 @@
* as necessary.
*/
#define URI_REGEX "^\\([A-Za-z][A-Za-z0-9+.\\-]*\\):"
#define STR_NUMS "0123456789"
int
libzfs_errno(libzfs_handle_t *hdl)
@ -1267,6 +1268,14 @@ zcmd_read_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t **nvlp)
* ================================================================
*/
void
zcmd_print_json(nvlist_t *nvl)
{
nvlist_print_json(stdout, nvl);
(void) putchar('\n');
nvlist_free(nvl);
}
static void
zprop_print_headers(zprop_get_cbdata_t *cbp, zfs_type_t type)
{
@ -1393,6 +1402,103 @@ zprop_print_headers(zprop_get_cbdata_t *cbp, zfs_type_t type)
(void) printf("\n");
}
/*
* Add property value and source to provided nvlist, according to
* settings in cb structure. Later to be printed in JSON format.
*/
int
zprop_nvlist_one_property(const char *propname,
const char *value, zprop_source_t sourcetype, const char *source,
const char *recvd_value, nvlist_t *nvl, boolean_t as_int)
{
int ret = 0;
nvlist_t *src_nv, *prop;
boolean_t all_numeric = strspn(value, STR_NUMS) == strlen(value);
src_nv = prop = NULL;
if ((nvlist_alloc(&prop, NV_UNIQUE_NAME, 0) != 0) ||
(nvlist_alloc(&src_nv, NV_UNIQUE_NAME, 0) != 0)) {
ret = -1;
goto err;
}
if (as_int && all_numeric) {
uint64_t val;
sscanf(value, "%lld", (u_longlong_t *)&val);
if (nvlist_add_uint64(prop, "value", val) != 0) {
ret = -1;
goto err;
}
} else {
if (nvlist_add_string(prop, "value", value) != 0) {
ret = -1;
goto err;
}
}
switch (sourcetype) {
case ZPROP_SRC_NONE:
if (nvlist_add_string(src_nv, "type", "NONE") != 0 ||
(nvlist_add_string(src_nv, "data", "-") != 0)) {
ret = -1;
goto err;
}
break;
case ZPROP_SRC_DEFAULT:
if (nvlist_add_string(src_nv, "type", "DEFAULT") != 0 ||
(nvlist_add_string(src_nv, "data", "-") != 0)) {
ret = -1;
goto err;
}
break;
case ZPROP_SRC_LOCAL:
if (nvlist_add_string(src_nv, "type", "LOCAL") != 0 ||
(nvlist_add_string(src_nv, "data", "-") != 0)) {
ret = -1;
goto err;
}
break;
case ZPROP_SRC_TEMPORARY:
if (nvlist_add_string(src_nv, "type", "TEMPORARY") != 0 ||
(nvlist_add_string(src_nv, "data", "-") != 0)) {
ret = -1;
goto err;
}
break;
case ZPROP_SRC_INHERITED:
if (nvlist_add_string(src_nv, "type", "INHERITED") != 0 ||
(nvlist_add_string(src_nv, "data", source) != 0)) {
ret = -1;
goto err;
}
break;
case ZPROP_SRC_RECEIVED:
if (nvlist_add_string(src_nv, "type", "RECEIVED") != 0 ||
(nvlist_add_string(src_nv, "data",
(recvd_value == NULL ? "-" : recvd_value)) != 0)) {
ret = -1;
goto err;
}
break;
default:
assert(!"unhandled zprop_source_t");
if (nvlist_add_string(src_nv, "type",
"unhandled zprop_source_t") != 0) {
ret = -1;
goto err;
}
}
if ((nvlist_add_nvlist(prop, "source", src_nv) != 0) ||
(nvlist_add_nvlist(nvl, propname, prop)) != 0) {
ret = -1;
goto err;
}
err:
nvlist_free(src_nv);
nvlist_free(prop);
return (ret);
}
/*
* Display a single line of output, according to the settings in the callback
* structure.
@ -1484,6 +1590,26 @@ zprop_print_one_property(const char *name, zprop_get_cbdata_t *cbp,
(void) printf("\n");
}
int
zprop_collect_property(const char *name, zprop_get_cbdata_t *cbp,
const char *propname, const char *value, zprop_source_t sourcetype,
const char *source, const char *recvd_value, nvlist_t *nvl)
{
if (cbp->cb_json) {
if ((sourcetype & cbp->cb_sources) == 0)
return (0);
else {
return (zprop_nvlist_one_property(propname, value,
sourcetype, source, recvd_value, nvl,
cbp->cb_json_as_int));
}
} else {
zprop_print_one_property(name, cbp,
propname, value, sourcetype, source, recvd_value);
return (0);
}
}
/*
* Given a numeric suffix, convert the value into a number of bits that the
* resulting value must be shifted.
@ -1691,6 +1817,16 @@ zprop_parse_value(libzfs_handle_t *hdl, nvpair_t *elem, int prop,
"use 'none' to disable quota/refquota"));
goto error;
}
/*
* Pool dedup table quota; force use of 'none' instead of 0
*/
if ((type & ZFS_TYPE_POOL) && *ivalp == 0 &&
(!isnone && !isauto) &&
prop == ZPOOL_PROP_DEDUP_TABLE_QUOTA) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"use 'none' to disable ddt table quota"));
goto error;
}
/*
* Special handling for "*_limit=none". In this case it's not
@ -1732,6 +1868,10 @@ zprop_parse_value(libzfs_handle_t *hdl, nvpair_t *elem, int prop,
}
*ivalp = UINT64_MAX;
break;
case ZPOOL_PROP_DEDUP_TABLE_QUOTA:
ASSERT(type & ZFS_TYPE_POOL);
*ivalp = UINT64_MAX;
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'auto' is invalid value for '%s'"),
@ -1985,6 +2125,34 @@ zfs_version_print(void)
return (0);
}
/*
* Returns an nvlist with both zfs userland and kernel versions.
* Returns NULL on error.
*/
nvlist_t *
zfs_version_nvlist(void)
{
nvlist_t *nvl;
char kmod_ver[64];
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
return (NULL);
if (nvlist_add_string(nvl, "userland", ZFS_META_ALIAS) != 0)
goto err;
char *kver = zfs_version_kernel();
if (kver == NULL) {
fprintf(stderr, "zfs_version_kernel() failed: %s\n",
zfs_strerror(errno));
goto err;
}
(void) snprintf(kmod_ver, 64, "zfs-kmod-%s", kver);
if (nvlist_add_string(nvl, "kernel", kmod_ver) != 0)
goto err;
return (nvl);
err:
nvlist_free(nvl);
return (NULL);
}
/*
* Return 1 if the user requested ANSI color output, and our terminal supports
* it. Return 0 for no color.

View File

@ -126,7 +126,9 @@
<elf-symbol name='atomic_swap_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='format_timestamp' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='get_system_hostid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='get_timestamp' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getexecname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getextmntent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getmntany' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
@ -176,6 +178,7 @@
<elf-symbol name='lzc_load_key' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_pool_checkpoint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_pool_checkpoint_discard' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_pool_prefetch' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_promote' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_receive' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_receive_one' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
@ -1109,6 +1112,18 @@
<parameter type-id='3502e3ff' name='timestamp_fmt'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='get_timestamp' mangled-name='get_timestamp' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='get_timestamp'>
<parameter type-id='3502e3ff' name='timestamp_fmt'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='95e97e5e' name='len'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='format_timestamp' mangled-name='format_timestamp' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='format_timestamp'>
<parameter type-id='c9d12d66' name='t'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='95e97e5e' name='len'/>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs_core/libzfs_core.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='03085adc' size-in-bits='192' id='083f8d58'>
@ -1428,6 +1443,7 @@
<enumerator name='ZFS_IOC_VDEV_GET_PROPS' value='23125'/>
<enumerator name='ZFS_IOC_VDEV_SET_PROPS' value='23126'/>
<enumerator name='ZFS_IOC_POOL_SCRUB' value='23127'/>
<enumerator name='ZFS_IOC_POOL_PREFETCH' value='23128'/>
<enumerator name='ZFS_IOC_PLATFORM' value='23168'/>
<enumerator name='ZFS_IOC_EVENTS_NEXT' value='23169'/>
<enumerator name='ZFS_IOC_EVENTS_CLEAR' value='23170'/>
@ -1462,6 +1478,12 @@
<enumerator name='ZFS_WAIT_NUM_ACTIVITIES' value='1'/>
</enum-decl>
<typedef-decl name='zfs_wait_activity_t' type-id='527d5dc6' id='3024501a'/>
<enum-decl name='zpool_prefetch_type_t' naming-typedef-id='e55ff6bc' id='0299ab50'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPOOL_PREFETCH_NONE' value='0'/>
<enumerator name='ZPOOL_PREFETCH_DDT' value='1'/>
</enum-decl>
<typedef-decl name='zpool_prefetch_type_t' type-id='0299ab50' id='e55ff6bc'/>
<enum-decl name='data_type_t' naming-typedef-id='8d0687d2' id='aeeae136'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='DATA_TYPE_DONTCARE' value='-1'/>
@ -2892,6 +2914,11 @@
<parameter type-id='80f4b756' name='pool'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_pool_prefetch' mangled-name='lzc_pool_prefetch' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_pool_prefetch'>
<parameter type-id='80f4b756' name='pool'/>
<parameter type-id='e55ff6bc' name='type'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_channel_program_nosync' mangled-name='lzc_channel_program_nosync' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_channel_program_nosync'>
<parameter type-id='80f4b756' name='pool'/>
<parameter type-id='80f4b756' name='program'/>

View File

@ -1629,6 +1629,26 @@ lzc_pool_checkpoint_discard(const char *pool)
return (error);
}
/*
* Load the requested data type for the specified pool.
*/
int
lzc_pool_prefetch(const char *pool, zpool_prefetch_type_t type)
{
int error;
nvlist_t *result = NULL;
nvlist_t *args = fnvlist_alloc();
fnvlist_add_int32(args, ZPOOL_PREFETCH_TYPE, type);
error = lzc_ioctl(ZFS_IOC_POOL_PREFETCH, pool, args, &result);
fnvlist_free(args);
fnvlist_free(result);
return (error);
}
/*
* Executes a read-only channel program.
*

View File

@ -83,6 +83,7 @@ dist_man_MANS = \
%D%/man8/zpool-list.8 \
%D%/man8/zpool-offline.8 \
%D%/man8/zpool-online.8 \
%D%/man8/zpool-prefetch.8 \
%D%/man8/zpool-reguid.8 \
%D%/man8/zpool-remove.8 \
%D%/man8/zpool-reopen.8 \

View File

@ -16,7 +16,7 @@
.\" own identifying information:
.\" Portions Copyright [yyyy] [name of copyright owner]
.\"
.Dd February 14, 2024
.Dd June 27, 2024
.Dt ZFS 4
.Os
.
@ -831,6 +831,13 @@ even with a small average compressed block size of ~8 KiB.
The parameter can be set to 0 (zero) to disable the limit,
and only applies on Linux.
.
.It Sy zfs_arc_shrinker_seeks Ns = Ns Sy 2 Pq int
Relative cost of ARC eviction on Linux, AKA number of seeks needed to
restore evicted page.
Bigger values make ARC more precious and evictions smaller, comparing to
other kernel subsystems.
Value of 4 means parity with page cache.
.
.It Sy zfs_arc_sys_free Ns = Ns Sy 0 Ns B Pq u64
The target number of bytes the ARC should leave as free memory on the system.
If zero, equivalent to the bigger of
@ -2113,7 +2120,7 @@ The default of
.Sy 32
was determined to be a reasonable compromise.
.
.It Sy zfs_txg_history Ns = Ns Sy 0 Pq uint
.It Sy zfs_txg_history Ns = Ns Sy 100 Pq uint
Historical statistics for this many latest TXGs will be available in
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /TXGs .
.

View File

@ -154,7 +154,7 @@ defaults to the current kernel release.
.
.It Sy bootfs.rollback Ns Op Sy = Ns Ar snapshot-name
Execute
.Nm zfs Cm snapshot Fl Rf Ar boot-dataset Ns Sy @ Ns Ar snapshot-name
.Nm zfs Cm rollback Fl Rf Ar boot-dataset Ns Sy @ Ns Ar snapshot-name
before pivoting to the real root.
.Ar snapshot-name
defaults to the current kernel release.

View File

@ -102,8 +102,14 @@ Parent of this vdev
Comma separated list of children of this vdev
.It Sy numchildren
The number of children belonging to this vdev
.It Sy read_errors , write_errors , checksum_errors , initialize_errors
.It Sy read_errors , write_errors , checksum_errors , initialize_errors , trim_errors
The number of errors of each type encountered by this vdev
.It Sy slow_ios
The number of slow I/Os encountered by this vdev,
These represent I/O operations that didn't complete in
.Sy zio_slow_io_ms
milliseconds
.Pq Sy 30000 No by default .
.It Sy null_ops , read_ops , write_ops , free_ops , claim_ops , trim_ops
The number of I/O operations of each type performed by this vdev
.It Xo
@ -113,6 +119,8 @@ The number of I/O operations of each type performed by this vdev
The cumulative size of all operations of each type performed by this vdev
.It Sy removing
If this device is currently being removed from the pool
.It Sy trim_support
Indicates if a leaf device supports trim operations.
.El
.Pp
The following native properties can be used to change the behavior of a vdev.

View File

@ -38,7 +38,7 @@
.\" Copyright (c) 2019, Kjeld Schouten-Lebbing
.\" Copyright (c) 2022 Hewlett Packard Enterprise Development LP.
.\"
.Dd August 8, 2023
.Dd June 29, 2024
.Dt ZFSPROPS 7
.Os
.
@ -1634,7 +1634,7 @@ then only metadata are prefetched.
The default value is
.Sy all .
.Pp
Please note that the module parameter zfs_disable_prefetch=1 can
Please note that the module parameter zfs_prefetch_disable=1 can
be used to totally disable speculative prefetch, bypassing anything
this property does.
.It Sy setuid Ns = Ns Sy on Ns | Ns Sy off
@ -1722,6 +1722,18 @@ Please note that the options are comma-separated, unlike those found in
This is done to negate the need for quoting, as well as to make parsing
with scripts easier.
.Pp
For
.Fx ,
there may be multiple sets of options separated by semicolon(s).
Each set of options must apply to different hosts or networks and each
set of options will create a separate line for
.Xr exports 5 .
Any semicolon separated option set that consists entirely of whitespace
will be ignored.
This use of semicolons is only for
.Fx
at this time.
.Pp
See
.Xr exports 5
for the meaning of the default options.

View File

@ -28,7 +28,7 @@
.\" Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
.\" Copyright (c) 2023, Klara Inc.
.\"
.Dd January 2, 2024
.Dd July 29, 2024
.Dt ZPOOLPROPS 7
.Os
.
@ -73,6 +73,12 @@ The amount of storage used by cloned blocks.
Percentage of pool space used.
This property can also be referred to by its shortened column name,
.Sy cap .
.It Sy dedupcached
Total size of the deduplication table currently loaded into the ARC.
See
.Xr zpool-prefetch 8 .
.It Sy dedup_table_size
Total on-disk size of the deduplication table.
.It Sy expandsize
Amount of uninitialized space within the pool or device that can be used to
increase the total capacity of the pool.
@ -348,6 +354,27 @@ See
and
.Xr zpool-upgrade 8
for more information on the operation of compatibility feature sets.
.It Sy dedup_table_quota Ns = Ns Ar number Ns | Ns Sy none Ns | Ns Sy auto
This property sets a limit on the on-disk size of the pool's dedup table.
Entries will not be added to the dedup table once this size is reached;
if a dedup table already exists, and is larger than this size, they
will not be removed as part of setting this property.
Existing entries will still have their reference counts updated.
.Pp
The actual size limit of the table may be above or below the quota,
depending on the actual on-disk size of the entries (which may be
approximated for purposes of calculating the quota).
That is, setting a quota size of 1M may result in the maximum size being
slightly below, or slightly above, that value.
Set to
.Sy 'none'
to disable.
In automatic mode, which is the default, the size of a dedicated dedup vdev
is used as the quota limit.
.Pp
The
.Sy dedup_table_quota
property works for both legacy and fast dedup tables.
.It Sy dedupditto Ns = Ns Ar number
This property is deprecated and no longer has any effect.
.It Sy delegation Ns = Ns Sy on Ns | Ns Sy off
@ -461,7 +488,7 @@ The expected convention is that the property name is divided into two portions
such as
.Ar module : Ns Ar property ,
but this namespace is not enforced by ZFS.
User property names can be at most 256 characters, and cannot begin with a dash
User property names can be at most 255 characters, and cannot begin with a dash
.Pq Qq Sy - .
.Pp
When making programmatic use of user properties, it is strongly suggested to use

View File

@ -314,7 +314,6 @@ fragmentation,
and free space histogram, as well as overall pool fragmentation and histogram.
.It Fl MM
"Special" vdevs are added to -M's normal output.
.It Fl O , -object-lookups Ns = Ns Ar dataset path
Also display information about the maximum contiguous free space and the
percentage of free space in each space map.
.It Fl MMM
@ -327,7 +326,7 @@ but force zdb to interpret the
in
.Op Ar poolname Ns Op / Ns Ar dataset Ns | Ns Ar objset-ID
as a numeric objset ID.
.It Fl O Ar dataset path
.It Fl O , -object-lookups Ns = Ns Ar dataset path
Look up the specified
.Ar path
inside of the

View File

@ -41,6 +41,7 @@
.Cm list
.Op Fl r Ns | Ns Fl d Ar depth
.Op Fl Hp
.Op Fl j Op Ar --json-int
.Oo Fl o Ar property Ns Oo , Ns Ar property Oc Ns Oc
.Oo Fl s Ar property Oc Ns
.Oo Fl S Ar property Oc Ns
@ -70,6 +71,11 @@ The following fields are displayed:
Used for scripting mode.
Do not print headers and separate fields by a single tab instead of arbitrary
white space.
.It Fl j Op Ar --json-int
Print the output in JSON format.
Specify
.Sy --json-int
to print the numbers in integer format instead of strings in JSON output.
.It Fl d Ar depth
Recursively display any children of the dataset, limiting the recursion to
.Ar depth .
@ -186,6 +192,161 @@ pool/home 315K 457G 21K /export/home
pool/home/anne 18K 457G 18K /export/home/anne
pool/home/bob 276K 457G 276K /export/home/bob
.Ed
.Ss Example 2 : No Listing ZFS filesystems and snapshots in JSON format
.Bd -literal -compact -offset Ds
.No # Nm zfs Cm list Fl j Fl t Ar filesystem,snapshot | Cm jq
{
"output_version": {
"command": "zfs list",
"vers_major": 0,
"vers_minor": 1
},
"datasets": {
"pool": {
"name": "pool",
"type": "FILESYSTEM",
"pool": "pool",
"properties": {
"used": {
"value": "290K",
"source": {
"type": "NONE",
"data": "-"
}
},
"available": {
"value": "30.5G",
"source": {
"type": "NONE",
"data": "-"
}
},
"referenced": {
"value": "24K",
"source": {
"type": "NONE",
"data": "-"
}
},
"mountpoint": {
"value": "/pool",
"source": {
"type": "DEFAULT",
"data": "-"
}
}
}
},
"pool/home": {
"name": "pool/home",
"type": "FILESYSTEM",
"pool": "pool",
"properties": {
"used": {
"value": "48K",
"source": {
"type": "NONE",
"data": "-"
}
},
"available": {
"value": "30.5G",
"source": {
"type": "NONE",
"data": "-"
}
},
"referenced": {
"value": "24K",
"source": {
"type": "NONE",
"data": "-"
}
},
"mountpoint": {
"value": "/mnt/home",
"source": {
"type": "LOCAL",
"data": "-"
}
}
}
},
"pool/home/bob": {
"name": "pool/home/bob",
"type": "FILESYSTEM",
"pool": "pool",
"properties": {
"used": {
"value": "24K",
"source": {
"type": "NONE",
"data": "-"
}
},
"available": {
"value": "30.5G",
"source": {
"type": "NONE",
"data": "-"
}
},
"referenced": {
"value": "24K",
"source": {
"type": "NONE",
"data": "-"
}
},
"mountpoint": {
"value": "/mnt/home/bob",
"source": {
"type": "INHERITED",
"data": "pool/home"
}
}
}
},
"pool/home/bob@v1": {
"name": "pool/home/bob@v1",
"type": "SNAPSHOT",
"pool": "pool",
"dataset": "pool/home/bob",
"snapshot_name": "v1",
"properties": {
"used": {
"value": "0B",
"source": {
"type": "NONE",
"data": "-"
}
},
"available": {
"value": "-",
"source": {
"type": "NONE",
"data": "-"
}
},
"referenced": {
"value": "24K",
"source": {
"type": "NONE",
"data": "-"
}
},
"mountpoint": {
"value": "-",
"source": {
"type": "NONE",
"data": "-"
}
}
}
}
}
}
.Ed
.
.Sh SEE ALSO
.Xr zfsprops 7 ,

View File

@ -39,6 +39,7 @@
.Sh SYNOPSIS
.Nm zfs
.Cm mount
.Op Fl j
.Nm zfs
.Cm mount
.Op Fl Oflv
@ -54,8 +55,13 @@
.It Xo
.Nm zfs
.Cm mount
.Op Fl j
.Xc
Displays all ZFS file systems currently mounted.
.Bl -tag -width "-j"
.It Fl j
Displays all mounted file systems in JSON format.
.El
.It Xo
.Nm zfs
.Cm mount

View File

@ -46,6 +46,7 @@
.Cm get
.Op Fl r Ns | Ns Fl d Ar depth
.Op Fl Hp
.Op Fl j Op Ar --json-int
.Oo Fl o Ar field Ns Oo , Ns Ar field Oc Ns Oc
.Oo Fl s Ar source Ns Oo , Ns Ar source Oc Ns Oc
.Oo Fl t Ar type Ns Oo , Ns Ar type Oc Ns Oc
@ -91,6 +92,7 @@ dataset.
.Cm get
.Op Fl r Ns | Ns Fl d Ar depth
.Op Fl Hp
.Op Fl j Op Ar --json-int
.Oo Fl o Ar field Ns Oo , Ns Ar field Oc Ns Oc
.Oo Fl s Ar source Ns Oo , Ns Ar source Oc Ns Oc
.Oo Fl t Ar type Ns Oo , Ns Ar type Oc Ns Oc
@ -128,6 +130,11 @@ The value
can be used to display all properties that apply to the given dataset's type
.Pq Sy filesystem , volume , snapshot , No or Sy bookmark .
.Bl -tag -width "-s source"
.It Fl j Op Ar --json-int
Display the output in JSON format.
Specify
.Sy --json-int
to display numbers in integer format instead of strings for JSON output.
.It Fl H
Display output in a form more easily parsed by scripts.
Any headers are omitted, and fields are explicitly separated by a single tab
@ -283,6 +290,50 @@ The following command gets a single property value:
on
.Ed
.Pp
The following command gets a single property value recursively in JSON format:
.Bd -literal -compact -offset Ds
.No # Nm zfs Cm get Fl j Fl r Sy mountpoint Ar pool/home | Nm jq
{
"output_version": {
"command": "zfs get",
"vers_major": 0,
"vers_minor": 1
},
"datasets": {
"pool/home": {
"name": "pool/home",
"type": "FILESYSTEM",
"pool": "pool",
"createtxg": "10",
"properties": {
"mountpoint": {
"value": "/pool/home",
"source": {
"type": "DEFAULT",
"data": "-"
}
}
}
},
"pool/home/bob": {
"name": "pool/home/bob",
"type": "FILESYSTEM",
"pool": "pool",
"createtxg": "1176",
"properties": {
"mountpoint": {
"value": "/pool/home/bob",
"source": {
"type": "DEFAULT",
"data": "-"
}
}
}
}
}
}
.Ed
.Pp
The following command lists all properties with local settings for
.Ar pool/home/bob :
.Bd -literal -compact -offset Ds

View File

@ -48,6 +48,7 @@
.Fl ?V
.Nm
.Cm version
.Op Fl j
.Nm
.Cm subcommand
.Op Ar arguments
@ -153,10 +154,14 @@ Displays a help message.
.It Xo
.Nm
.Cm version
.Op Fl j
.Xc
Displays the software version of the
.Nm
userland utility and the zfs kernel module.
Use
.Fl j
option to output in JSON format.
.El
.
.Ss Dataset Management

View File

@ -37,6 +37,7 @@
.Nm zpool
.Cm get
.Op Fl Hp
.Op Fl j Op Ar --json-int, --json-pool-key-guid
.Op Fl o Ar field Ns Oo , Ns Ar field Oc Ns
.Sy all Ns | Ns Ar property Ns Oo , Ns Ar property Oc Ns
.Oo Ar pool Oc Ns
@ -44,6 +45,7 @@
.Nm zpool
.Cm get
.Op Fl Hp
.Op Fl j Op Ar --json-int
.Op Fl o Ar field Ns Oo , Ns Ar field Oc Ns
.Sy all Ns | Ns Ar property Ns Oo , Ns Ar property Oc Ns
.Ar pool
@ -67,6 +69,7 @@
.Nm zpool
.Cm get
.Op Fl Hp
.Op Fl j Op Ar --json-int, --json-pool-key-guid
.Op Fl o Ar field Ns Oo , Ns Ar field Oc Ns
.Sy all Ns | Ns Ar property Ns Oo , Ns Ar property Oc Ns
.Oo Ar pool Oc Ns
@ -95,6 +98,14 @@ See the
.Xr zpoolprops 7
manual page for more information on the available pool properties.
.Bl -tag -compact -offset Ds -width "-o field"
.It Fl j Op Ar --json-int, --json-pool-key-guid
Display the list of properties in JSON format.
Specify
.Sy --json-int
to display the numbers in integer format instead of strings in JSON output.
Specify
.Sy --json-pool-key-guid
to set pool GUID as key for pool objects instead of pool name.
.It Fl H
Scripted mode.
Do not display headers, and separate fields by a single tab instead of arbitrary
@ -108,6 +119,7 @@ Display numbers in parsable (exact) values.
.It Xo
.Nm zpool
.Cm get
.Op Fl j Op Ar --json-int
.Op Fl Hp
.Op Fl o Ar field Ns Oo , Ns Ar field Oc Ns
.Sy all Ns | Ns Ar property Ns Oo , Ns Ar property Oc Ns
@ -145,6 +157,11 @@ See the
.Xr vdevprops 7
manual page for more information on the available pool properties.
.Bl -tag -compact -offset Ds -width "-o field"
.It Fl j Op Ar --json-int
Display the list of properties in JSON format.
Specify
.Sy --json-int
to display the numbers in integer format instead of strings in JSON output.
.It Fl H
Scripted mode.
Do not display headers, and separate fields by a single tab instead of arbitrary

View File

@ -37,6 +37,7 @@
.Nm zpool
.Cm list
.Op Fl HgLpPv
.Op Fl j Op Ar --json-int, --json-pool-key-guid
.Op Fl o Ar property Ns Oo , Ns Ar property Oc Ns
.Op Fl T Sy u Ns | Ns Sy d
.Oo Ar pool Oc Ns
@ -58,6 +59,14 @@ is specified, the command exits after
.Ar count
reports are printed.
.Bl -tag -width Ds
.It Fl j Op Ar --json-int, --json-pool-key-guid
Display the list of pools in JSON format.
Specify
.Sy --json-int
to display the numbers in integer format instead of strings.
Specify
.Sy --json-pool-key-guid
to set pool GUID as key for pool objects instead of pool names.
.It Fl g
Display vdev GUIDs instead of the normal device names.
These GUIDs can be used in place of device names for the zpool
@ -139,6 +148,104 @@ data 23.9G 14.6G 9.30G - 48% 61% 1.00x ONLINE -
sda - - - - -
sdb - - - 10G -
sdc - - - - -
.Ed
.
.Ss Example 3 : No Displaying expanded space on a device
The following command lists all available pools on the system in JSON
format.
.Bd -literal -compact -offset Ds
.No # Nm zpool Cm list Fl j | Nm jq
{
"output_version": {
"command": "zpool list",
"vers_major": 0,
"vers_minor": 1
},
"pools": {
"tank": {
"name": "tank",
"type": "POOL",
"state": "ONLINE",
"guid": "15220353080205405147",
"txg": "2671",
"spa_version": "5000",
"zpl_version": "5",
"properties": {
"size": {
"value": "111G",
"source": {
"type": "NONE",
"data": "-"
}
},
"allocated": {
"value": "30.8G",
"source": {
"type": "NONE",
"data": "-"
}
},
"free": {
"value": "80.2G",
"source": {
"type": "NONE",
"data": "-"
}
},
"checkpoint": {
"value": "-",
"source": {
"type": "NONE",
"data": "-"
}
},
"expandsize": {
"value": "-",
"source": {
"type": "NONE",
"data": "-"
}
},
"fragmentation": {
"value": "0%",
"source": {
"type": "NONE",
"data": "-"
}
},
"capacity": {
"value": "27%",
"source": {
"type": "NONE",
"data": "-"
}
},
"dedupratio": {
"value": "1.00x",
"source": {
"type": "NONE",
"data": "-"
}
},
"health": {
"value": "ONLINE",
"source": {
"type": "NONE",
"data": "-"
}
},
"altroot": {
"value": "-",
"source": {
"type": "DEFAULT",
"data": "-"
}
}
}
}
}
}
.Ed
.
.Sh SEE ALSO

46
man/man8/zpool-prefetch.8 Normal file
View File

@ -0,0 +1,46 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or http://www.opensolaris.org/os/licensing.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\"
.\" Copyright (c) 2023, Klara Inc.
.\"
.Dd February 14, 2024
.Dt ZPOOL-PREFETCH 8
.Os
.
.Sh NAME
.Nm zpool-prefetch
.Nd Loads specific types of data for the given pool
.Sh SYNOPSIS
.Nm zpool
.Cm prefetch
.Fl t Ar type
.Ar pool
.Sh DESCRIPTION
.Bl -tag -width Ds
.It Xo
.Nm zpool
.Cm prefetch
.Fl t Li ddt
.Ar pool
.Xc
Prefetch data of a specific type for the given pool; specifically the DDT,
which will improve write I/O performance when the DDT is resident in the ARC.
.El

View File

@ -26,7 +26,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
.Dd March 16, 2022
.Dd February 14, 2024
.Dt ZPOOL-STATUS 8
.Os
.
@ -41,6 +41,7 @@
.Op Fl c Op Ar SCRIPT1 Ns Oo , Ns Ar SCRIPT2 Oc Ns
.Oo Ar pool Oc Ns
.Op Ar interval Op Ar count
.Op Fl j Op Ar --json-int, --json-flat-vdevs, --json-pool-key-guid
.
.Sh DESCRIPTION
Displays the detailed health status for the given pools.
@ -69,12 +70,25 @@ See the
option of
.Nm zpool Cm iostat
for complete details.
.It Fl j Op Ar --json-int, --json-flat-vdevs, --json-pool-key-guid
Display the status for ZFS pools in JSON format.
Specify
.Sy --json-int
to display numbers in integer format instead of strings.
Specify
.Sy --json-flat-vdevs
to display vdevs in flat hierarchy instead of nested vdev objects.
Specify
.Sy --json-pool-key-guid
to set pool GUID as key for pool objects instead of pool names.
.It Fl D
Display a histogram of deduplication statistics, showing the allocated
.Pq physically present on disk
and referenced
.Pq logically referenced in the pool
block counts and sizes by reference count.
If repeated, (-DD), also shows statistics on how much of the DDT is resident
in the ARC.
.It Fl e
Only show unhealthy vdevs (not-ONLINE or with errors).
.It Fl g
@ -159,6 +173,175 @@ rpool 14.6G 54.9G 4 55 250K 2.69M
---------- ----- ----- ----- ----- ----- ----- ----
.Ed
.
.Ss Example 2 : No Display the status output in JSON format
.Nm zpool Cm status No can output in JSON format if
.Fl j
is specified.
.Fl c
can be used to run a script on each VDEV.
.Bd -literal -compact -offset Ds
.No # Nm zpool Cm status Fl j Fl c Pa vendor , Ns Pa model , Ns Pa size | Nm jq
{
"output_version": {
"command": "zpool status",
"vers_major": 0,
"vers_minor": 1
},
"pools": {
"tank": {
"name": "tank",
"state": "ONLINE",
"guid": "3920273586464696295",
"txg": "16597",
"spa_version": "5000",
"zpl_version": "5",
"status": "OK",
"vdevs": {
"tank": {
"name": "tank",
"alloc_space": "62.6G",
"total_space": "15.0T",
"def_space": "11.3T",
"read_errors": "0",
"write_errors": "0",
"checksum_errors": "0",
"vdevs": {
"raidz1-0": {
"name": "raidz1-0",
"vdev_type": "raidz",
"guid": "763132626387621737",
"state": "HEALTHY",
"alloc_space": "62.5G",
"total_space": "10.9T",
"def_space": "7.26T",
"rep_dev_size": "10.9T",
"read_errors": "0",
"write_errors": "0",
"checksum_errors": "0",
"vdevs": {
"ca1eb824-c371-491d-ac13-37637e35c683": {
"name": "ca1eb824-c371-491d-ac13-37637e35c683",
"vdev_type": "disk",
"guid": "12841765308123764671",
"path": "/dev/disk/by-partuuid/ca1eb824-c371-491d-ac13-37637e35c683",
"state": "HEALTHY",
"rep_dev_size": "3.64T",
"phys_space": "3.64T",
"read_errors": "0",
"write_errors": "0",
"checksum_errors": "0",
"vendor": "ATA",
"model": "WDC WD40EFZX-68AWUN0",
"size": "3.6T"
},
"97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7": {
"name": "97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7",
"vdev_type": "disk",
"guid": "1527839927278881561",
"path": "/dev/disk/by-partuuid/97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7",
"state": "HEALTHY",
"rep_dev_size": "3.64T",
"phys_space": "3.64T",
"read_errors": "0",
"write_errors": "0",
"checksum_errors": "0",
"vendor": "ATA",
"model": "WDC WD40EFZX-68AWUN0",
"size": "3.6T"
},
"e9ddba5f-f948-4734-a472-cb8aa5f0ff65": {
"name": "e9ddba5f-f948-4734-a472-cb8aa5f0ff65",
"vdev_type": "disk",
"guid": "6982750226085199860",
"path": "/dev/disk/by-partuuid/e9ddba5f-f948-4734-a472-cb8aa5f0ff65",
"state": "HEALTHY",
"rep_dev_size": "3.64T",
"phys_space": "3.64T",
"read_errors": "0",
"write_errors": "0",
"checksum_errors": "0",
"vendor": "ATA",
"model": "WDC WD40EFZX-68AWUN0",
"size": "3.6T"
}
}
}
}
}
},
"dedup": {
"mirror-2": {
"name": "mirror-2",
"vdev_type": "mirror",
"guid": "2227766268377771003",
"state": "HEALTHY",
"alloc_space": "89.1M",
"total_space": "3.62T",
"def_space": "3.62T",
"rep_dev_size": "3.62T",
"read_errors": "0",
"write_errors": "0",
"checksum_errors": "0",
"vdevs": {
"db017360-d8e9-4163-961b-144ca75293a3": {
"name": "db017360-d8e9-4163-961b-144ca75293a3",
"vdev_type": "disk",
"guid": "17880913061695450307",
"path": "/dev/disk/by-partuuid/db017360-d8e9-4163-961b-144ca75293a3",
"state": "HEALTHY",
"rep_dev_size": "3.63T",
"phys_space": "3.64T",
"read_errors": "0",
"write_errors": "0",
"checksum_errors": "0",
"vendor": "ATA",
"model": "WDC WD40EFZX-68AWUN0",
"size": "3.6T"
},
"952c3baf-b08a-4a8c-b7fa-33a07af5fe6f": {
"name": "952c3baf-b08a-4a8c-b7fa-33a07af5fe6f",
"vdev_type": "disk",
"guid": "10276374011610020557",
"path": "/dev/disk/by-partuuid/952c3baf-b08a-4a8c-b7fa-33a07af5fe6f",
"state": "HEALTHY",
"rep_dev_size": "3.63T",
"phys_space": "3.64T",
"read_errors": "0",
"write_errors": "0",
"checksum_errors": "0",
"vendor": "ATA",
"model": "WDC WD40EFZX-68AWUN0",
"size": "3.6T"
}
}
}
},
"special": {
"25d418f8-92bd-4327-b59f-7ef5d5f50d81": {
"name": "25d418f8-92bd-4327-b59f-7ef5d5f50d81",
"vdev_type": "disk",
"guid": "3935742873387713123",
"path": "/dev/disk/by-partuuid/25d418f8-92bd-4327-b59f-7ef5d5f50d81",
"state": "HEALTHY",
"alloc_space": "37.4M",
"total_space": "444G",
"def_space": "444G",
"rep_dev_size": "444G",
"phys_space": "447G",
"read_errors": "0",
"write_errors": "0",
"checksum_errors": "0",
"vendor": "ATA",
"model": "Micron_5300_MTFDDAK480TDS",
"size": "447.1G"
}
},
"error_count": "0"
}
}
}
.Ed
.
.Sh SEE ALSO
.Xr zpool-events 8 ,
.Xr zpool-history 8 ,

View File

@ -26,7 +26,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
.Dd March 16, 2022
.Dd February 14, 2024
.Dt ZPOOL 8
.Os
.
@ -38,6 +38,7 @@
.Fl ?V
.Nm
.Cm version
.Op Fl j
.Nm
.Cm subcommand
.Op Ar arguments
@ -79,10 +80,14 @@ Displays a help message.
.It Xo
.Nm
.Cm version
.Op Fl j
.Xc
Displays the software version of the
.Nm
userland utility and the ZFS kernel module.
Use
.Fl j
option to output in JSON format.
.El
.
.Ss Creation
@ -168,6 +173,8 @@ specified.
.
.Ss Maintenance
.Bl -tag -width Ds
.It Xr zpool-prefetch 8
Prefetches specific types of pool data.
.It Xr zpool-scrub 8
Begins a scrub or resumes a paused scrub.
.It Xr zpool-checkpoint 8
@ -598,6 +605,7 @@ don't wait.
.Xr zpool-list 8 ,
.Xr zpool-offline 8 ,
.Xr zpool-online 8 ,
.Xr zpool-prefetch 8 ,
.Xr zpool-reguid 8 ,
.Xr zpool-remove 8 ,
.Xr zpool-reopen 8 ,

View File

@ -491,6 +491,8 @@ zfs-$(CONFIG_PPC64) += $(addprefix zfs/,$(ZFS_OBJS_PPC_PPC64))
UBSAN_SANITIZE_zap_leaf.o := n
UBSAN_SANITIZE_zap_micro.o := n
UBSAN_SANITIZE_sa.o := n
UBSAN_SANITIZE_zfs/zap_micro.o := n
UBSAN_SANITIZE_zfs/sa.o := n
# Suppress incorrect warnings from versions of objtool which are not
# aware of x86 EVEX prefix instructions used for AVX512.

View File

@ -240,22 +240,14 @@ void
kmem_cache_reap_soon(kmem_cache_t *cache)
{
#ifndef KMEM_DEBUG
#if __FreeBSD_version >= 1300043
uma_zone_reclaim(cache->kc_zone, UMA_RECLAIM_DRAIN);
#else
zone_drain(cache->kc_zone);
#endif
#endif
}
void
kmem_reap(void)
{
#if __FreeBSD_version >= 1300043
uma_reclaim(UMA_RECLAIM_TRIM);
#else
uma_reclaim();
#endif
}
#else
void

View File

@ -41,42 +41,42 @@ int
secpolicy_nfs(cred_t *cr)
{
return (spl_priv_check_cred(cr, PRIV_NFS_DAEMON));
return (priv_check_cred(cr, PRIV_NFS_DAEMON));
}
int
secpolicy_zfs(cred_t *cr)
{
return (spl_priv_check_cred(cr, PRIV_VFS_MOUNT));
return (priv_check_cred(cr, PRIV_VFS_MOUNT));
}
int
secpolicy_zfs_proc(cred_t *cr, proc_t *proc)
{
return (spl_priv_check_cred(cr, PRIV_VFS_MOUNT));
return (priv_check_cred(cr, PRIV_VFS_MOUNT));
}
int
secpolicy_sys_config(cred_t *cr, int checkonly __unused)
{
return (spl_priv_check_cred(cr, PRIV_ZFS_POOL_CONFIG));
return (priv_check_cred(cr, PRIV_ZFS_POOL_CONFIG));
}
int
secpolicy_zinject(cred_t *cr)
{
return (spl_priv_check_cred(cr, PRIV_ZFS_INJECT));
return (priv_check_cred(cr, PRIV_ZFS_INJECT));
}
int
secpolicy_fs_unmount(cred_t *cr, struct mount *vfsp __unused)
{
return (spl_priv_check_cred(cr, PRIV_VFS_UNMOUNT));
return (priv_check_cred(cr, PRIV_VFS_UNMOUNT));
}
int
@ -104,7 +104,7 @@ secpolicy_basic_link(vnode_t *vp, cred_t *cr)
return (0);
if (secpolicy_fs_owner(vp->v_mount, cr) == 0)
return (0);
return (spl_priv_check_cred(cr, PRIV_VFS_LINK));
return (priv_check_cred(cr, PRIV_VFS_LINK));
}
int
@ -120,7 +120,7 @@ secpolicy_vnode_remove(vnode_t *vp, cred_t *cr)
if (secpolicy_fs_owner(vp->v_mount, cr) == 0)
return (0);
return (spl_priv_check_cred(cr, PRIV_VFS_ADMIN));
return (priv_check_cred(cr, PRIV_VFS_ADMIN));
}
int
@ -130,18 +130,18 @@ secpolicy_vnode_access(cred_t *cr, vnode_t *vp, uid_t owner, accmode_t accmode)
if (secpolicy_fs_owner(vp->v_mount, cr) == 0)
return (0);
if ((accmode & VREAD) && spl_priv_check_cred(cr, PRIV_VFS_READ) != 0)
if ((accmode & VREAD) && priv_check_cred(cr, PRIV_VFS_READ) != 0)
return (EACCES);
if ((accmode & VWRITE) &&
spl_priv_check_cred(cr, PRIV_VFS_WRITE) != 0) {
priv_check_cred(cr, PRIV_VFS_WRITE) != 0) {
return (EACCES);
}
if (accmode & VEXEC) {
if (vp->v_type == VDIR) {
if (spl_priv_check_cred(cr, PRIV_VFS_LOOKUP) != 0)
if (priv_check_cred(cr, PRIV_VFS_LOOKUP) != 0)
return (EACCES);
} else {
if (spl_priv_check_cred(cr, PRIV_VFS_EXEC) != 0)
if (priv_check_cred(cr, PRIV_VFS_EXEC) != 0)
return (EACCES);
}
}
@ -198,7 +198,7 @@ secpolicy_vnode_any_access(cred_t *cr, vnode_t *vp, uid_t owner)
continue;
break;
}
if (spl_priv_check_cred(cr, priv) == 0)
if (priv_check_cred(cr, priv) == 0)
return (0);
}
return (EPERM);
@ -212,7 +212,7 @@ secpolicy_vnode_setdac(vnode_t *vp, cred_t *cr, uid_t owner)
return (0);
if (secpolicy_fs_owner(vp->v_mount, cr) == 0)
return (0);
return (spl_priv_check_cred(cr, PRIV_VFS_ADMIN));
return (priv_check_cred(cr, PRIV_VFS_ADMIN));
}
int
@ -262,7 +262,7 @@ secpolicy_vnode_setattr(cred_t *cr, vnode_t *vp, struct vattr *vap,
((mask & AT_GID) && vap->va_gid != ovap->va_gid &&
!groupmember(vap->va_gid, cr))) {
if (secpolicy_fs_owner(vp->v_mount, cr) != 0) {
error = spl_priv_check_cred(cr, PRIV_VFS_CHOWN);
error = priv_check_cred(cr, PRIV_VFS_CHOWN);
if (error)
return (error);
}
@ -306,7 +306,7 @@ secpolicy_vnode_setids_setgids(vnode_t *vp, cred_t *cr, gid_t gid)
return (0);
if (secpolicy_fs_owner(vp->v_mount, cr) == 0)
return (0);
return (spl_priv_check_cred(cr, PRIV_VFS_SETGID));
return (priv_check_cred(cr, PRIV_VFS_SETGID));
}
int
@ -316,7 +316,7 @@ secpolicy_vnode_setid_retain(znode_t *zp, cred_t *cr,
if (secpolicy_fs_owner(ZTOV(zp)->v_mount, cr) == 0)
return (0);
return (spl_priv_check_cred(cr, PRIV_VFS_RETAINSUGID));
return (priv_check_cred(cr, PRIV_VFS_RETAINSUGID));
}
void
@ -327,7 +327,7 @@ secpolicy_setid_clear(struct vattr *vap, vnode_t *vp, cred_t *cr)
return;
if ((vap->va_mode & (S_ISUID | S_ISGID)) != 0) {
if (spl_priv_check_cred(cr, PRIV_VFS_RETAINSUGID)) {
if (priv_check_cred(cr, PRIV_VFS_RETAINSUGID)) {
vap->va_mask |= AT_MODE;
vap->va_mode &= ~(S_ISUID|S_ISGID);
}
@ -349,7 +349,7 @@ secpolicy_setid_setsticky_clear(vnode_t *vp, struct vattr *vap,
* is not a member of. Both of these are allowed in jail(8).
*/
if (vp->v_type != VDIR && (vap->va_mode & S_ISTXT)) {
if (spl_priv_check_cred(cr, PRIV_VFS_STICKYFILE))
if (priv_check_cred(cr, PRIV_VFS_STICKYFILE))
return (EFTYPE);
}
/*
@ -365,7 +365,7 @@ secpolicy_setid_setsticky_clear(vnode_t *vp, struct vattr *vap,
* Deny setting setuid if we are not the file owner.
*/
if ((vap->va_mode & S_ISUID) && ovap->va_uid != cr->cr_uid) {
error = spl_priv_check_cred(cr, PRIV_VFS_ADMIN);
error = priv_check_cred(cr, PRIV_VFS_ADMIN);
if (error)
return (error);
}
@ -376,7 +376,7 @@ int
secpolicy_fs_mount(cred_t *cr, vnode_t *mvp, struct mount *vfsp)
{
return (spl_priv_check_cred(cr, PRIV_VFS_MOUNT));
return (priv_check_cred(cr, PRIV_VFS_MOUNT));
}
int
@ -389,7 +389,7 @@ secpolicy_vnode_owner(vnode_t *vp, cred_t *cr, uid_t owner)
return (0);
/* XXX: vfs_suser()? */
return (spl_priv_check_cred(cr, PRIV_VFS_MOUNT_OWNER));
return (priv_check_cred(cr, PRIV_VFS_MOUNT_OWNER));
}
int
@ -398,14 +398,14 @@ secpolicy_vnode_chown(vnode_t *vp, cred_t *cr, uid_t owner)
if (secpolicy_fs_owner(vp->v_mount, cr) == 0)
return (0);
return (spl_priv_check_cred(cr, PRIV_VFS_CHOWN));
return (priv_check_cred(cr, PRIV_VFS_CHOWN));
}
void
secpolicy_fs_mount_clearopts(cred_t *cr, struct mount *vfsp)
{
if (spl_priv_check_cred(cr, PRIV_VFS_MOUNT_NONUSER) != 0) {
if (priv_check_cred(cr, PRIV_VFS_MOUNT_NONUSER) != 0) {
MNT_ILOCK(vfsp);
vfsp->vfs_flag |= VFS_NOSETUID | MNT_USER;
vfs_clearmntopt(vfsp, MNTOPT_SETUID);
@ -424,12 +424,12 @@ secpolicy_xvattr(vnode_t *vp, xvattr_t *xvap, uid_t owner, cred_t *cr,
if (secpolicy_fs_owner(vp->v_mount, cr) == 0)
return (0);
return (spl_priv_check_cred(cr, PRIV_VFS_SYSFLAGS));
return (priv_check_cred(cr, PRIV_VFS_SYSFLAGS));
}
int
secpolicy_smb(cred_t *cr)
{
return (spl_priv_check_cred(cr, PRIV_NETSMB));
return (priv_check_cred(cr, PRIV_NETSMB));
}

View File

@ -42,11 +42,6 @@
#include <vm/uma.h>
#if __FreeBSD_version < 1201522
#define taskqueue_start_threads_in_proc(tqp, count, pri, proc, name, ...) \
taskqueue_start_threads(tqp, count, pri, name, __VA_ARGS__)
#endif
static uint_t taskq_tsd;
static uma_zone_t taskq_zone;

View File

@ -45,6 +45,16 @@
#include <sys/vnode.h>
#include <sys/zfs_znode.h>
static void
zfs_freeuio(struct uio *uio)
{
#if __FreeBSD_version > 1500013
freeuio(uio);
#else
free(uio, M_IOV);
#endif
}
int
zfs_uiomove(void *cp, size_t n, zfs_uio_rw_t dir, zfs_uio_t *uio)
{
@ -77,7 +87,7 @@ zfs_uiocopy(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio, size_t *cbytes)
error = vn_io_fault_uiomove(p, n, uio_clone);
*cbytes = zfs_uio_resid(uio) - uio_clone->uio_resid;
if (uio_clone != &small_uio_clone)
free(uio_clone, M_IOV);
zfs_freeuio(uio_clone);
return (error);
}

View File

@ -158,7 +158,7 @@ mount_snapshot(kthread_t *td, vnode_t **vpp, const char *fstype, char *fspath,
return (error);
}
vn_seqc_write_begin(vp);
VOP_UNLOCK1(vp);
VOP_UNLOCK(vp);
/*
* Allocate and initialize the filesystem.
@ -249,10 +249,8 @@ mount_snapshot(kthread_t *td, vnode_t **vpp, const char *fstype, char *fspath,
if (VFS_ROOT(mp, LK_EXCLUSIVE, &mvp))
panic("mount: lost mount");
vn_seqc_write_end(vp);
VOP_UNLOCK1(vp);
#if __FreeBSD_version >= 1300048
VOP_UNLOCK(vp);
vfs_op_exit(mp);
#endif
vfs_unbusy(mp);
*vpp = mvp;
return (0);
@ -272,12 +270,8 @@ void
vn_rele_async(vnode_t *vp, taskq_t *taskq)
{
VERIFY3U(vp->v_usecount, >, 0);
if (refcount_release_if_not_last(&vp->v_usecount)) {
#if __FreeBSD_version < 1300045
vdrop(vp);
#endif
if (refcount_release_if_not_last(&vp->v_usecount))
return;
}
VERIFY3U(taskq_dispatch((taskq_t *)taskq,
(task_func_t *)vrele, vp, TQ_SLEEP), !=, 0);
}

View File

@ -63,7 +63,7 @@ zone_dataset_attach(struct ucred *cred, const char *dataset, int jailid)
struct prison *pr;
int dofree, error;
if ((error = spl_priv_check_cred(cred, PRIV_ZFS_JAIL)) != 0)
if ((error = priv_check_cred(cred, PRIV_ZFS_JAIL)) != 0)
return (error);
/* Allocate memory before we grab prison's mutex. */
@ -116,7 +116,7 @@ zone_dataset_detach(struct ucred *cred, const char *dataset, int jailid)
struct prison *pr;
int error;
if ((error = spl_priv_check_cred(cred, PRIV_ZFS_JAIL)) != 0)
if ((error = priv_check_cred(cred, PRIV_ZFS_JAIL)) != 0)
return (error);
sx_slock(&allprison_lock);

View File

@ -300,7 +300,7 @@ void
abd_init(void)
{
abd_chunk_cache = kmem_cache_create("abd_chunk", PAGE_SIZE, 0,
NULL, NULL, NULL, NULL, 0, KMC_NODEBUG);
NULL, NULL, NULL, NULL, 0, KMC_NODEBUG | KMC_RECLAIMABLE);
wmsum_init(&abd_sums.abdstat_struct_size, 0);
wmsum_init(&abd_sums.abdstat_scatter_cnt, 0);

View File

@ -149,26 +149,29 @@ static eventhandler_tag arc_event_lowmem = NULL;
static void
arc_lowmem(void *arg __unused, int howto __unused)
{
int64_t free_memory, to_free;
int64_t can_free, free_memory, to_free;
arc_no_grow = B_TRUE;
arc_warm = B_TRUE;
arc_growtime = gethrtime() + SEC2NSEC(arc_grow_retry);
free_memory = arc_available_memory();
int64_t can_free = arc_c - arc_c_min;
if (can_free <= 0)
return;
to_free = (can_free >> arc_shrink_shift) - MIN(free_memory, 0);
can_free = arc_c - arc_c_min;
to_free = (MAX(can_free, 0) >> arc_shrink_shift) - MIN(free_memory, 0);
DTRACE_PROBE2(arc__needfree, int64_t, free_memory, int64_t, to_free);
arc_reduce_target_size(to_free);
to_free = arc_reduce_target_size(to_free);
/*
* It is unsafe to block here in arbitrary threads, because we can come
* here from ARC itself and may hold ARC locks and thus risk a deadlock
* with ARC reclaim thread.
*/
if (curproc == pageproc)
arc_wait_for_eviction(to_free, B_FALSE);
if (curproc == pageproc) {
arc_wait_for_eviction(to_free, B_FALSE, B_FALSE);
ARCSTAT_BUMP(arcstat_memory_indirect_count);
} else {
ARCSTAT_BUMP(arcstat_memory_direct_count);
}
}
void

View File

@ -197,13 +197,6 @@ zfs_crypto_dispatch(freebsd_crypt_session_t *session, struct cryptop *crp)
crp->crp_etype = 0;
crp->crp_flags &= ~CRYPTO_F_DONE;
session->fs_done = false;
#if __FreeBSD_version < 1300087
/*
* Session ID changed, so we should record that,
* and try again
*/
session->fs_sid = crp->crp_session;
#endif
}
return (error);
}
@ -250,7 +243,6 @@ freebsd_crypt_uio_debug_log(boolean_t encrypt,
* happen every time the key changes (including when
* it's first loaded).
*/
#if __FreeBSD_version >= 1300087
int
freebsd_crypt_newsession(freebsd_crypt_session_t *sessp,
const struct zio_crypt_info *c_info, crypto_key_t *key)
@ -389,244 +381,3 @@ out:
}
return (error);
}
#else
int
freebsd_crypt_newsession(freebsd_crypt_session_t *sessp,
const struct zio_crypt_info *c_info, crypto_key_t *key)
{
struct cryptoini cria = {0}, crie = {0}, *crip;
struct enc_xform *xform;
struct auth_hash *xauth;
int error = 0;
crypto_session_t sid;
#ifdef FCRYPTO_DEBUG
printf("%s(%p, { %s, %d, %d, %s }, { %p, %u })\n",
__FUNCTION__, sessp,
c_info->ci_algname, c_info->ci_crypt_type,
(unsigned int)c_info->ci_keylen, c_info->ci_name,
key->ck_data, (unsigned int)key->ck_length);
printf("\tkey = { ");
for (int i = 0; i < key->ck_length / 8; i++) {
uint8_t *b = (uint8_t *)key->ck_data;
printf("%02x ", b[i]);
}
printf("}\n");
#endif
switch (c_info->ci_crypt_type) {
case ZC_TYPE_GCM:
xform = &enc_xform_aes_nist_gcm;
switch (key->ck_length/8) {
case AES_128_GMAC_KEY_LEN:
xauth = &auth_hash_nist_gmac_aes_128;
break;
case AES_192_GMAC_KEY_LEN:
xauth = &auth_hash_nist_gmac_aes_192;
break;
case AES_256_GMAC_KEY_LEN:
xauth = &auth_hash_nist_gmac_aes_256;
break;
default:
error = EINVAL;
goto bad;
}
break;
case ZC_TYPE_CCM:
xform = &enc_xform_ccm;
switch (key->ck_length/8) {
case AES_128_CBC_MAC_KEY_LEN:
xauth = &auth_hash_ccm_cbc_mac_128;
break;
case AES_192_CBC_MAC_KEY_LEN:
xauth = &auth_hash_ccm_cbc_mac_192;
break;
case AES_256_CBC_MAC_KEY_LEN:
xauth = &auth_hash_ccm_cbc_mac_256;
break;
default:
error = EINVAL;
goto bad;
break;
}
break;
default:
error = ENOTSUP;
goto bad;
}
#ifdef FCRYPTO_DEBUG
printf("%s(%d): Using crypt %s (key length %u [%u bytes]), "
"auth %s (key length %d)\n",
__FUNCTION__, __LINE__,
xform->name, (unsigned int)key->ck_length,
(unsigned int)key->ck_length/8,
xauth->name, xauth->keysize);
#endif
crie.cri_alg = xform->type;
crie.cri_key = key->ck_data;
crie.cri_klen = key->ck_length;
cria.cri_alg = xauth->type;
cria.cri_key = key->ck_data;
cria.cri_klen = key->ck_length;
cria.cri_next = &crie;
crie.cri_next = NULL;
crip = &cria;
// Everything else is zero-initialised
error = crypto_newsession(&sid, crip,
CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE);
if (error != 0) {
printf("%s(%d): crypto_newsession failed with %d\n",
__FUNCTION__, __LINE__, error);
goto bad;
}
sessp->fs_sid = sid;
mtx_init(&sessp->fs_lock, "FreeBSD Cryptographic Session Lock",
NULL, MTX_DEF);
crypt_sessions++;
bad:
return (error);
}
/*
* The meat of encryption/decryption.
* If sessp is NULL, then it will create a
* temporary cryptographic session, and release
* it when done.
*/
int
freebsd_crypt_uio(boolean_t encrypt,
freebsd_crypt_session_t *input_sessionp,
const struct zio_crypt_info *c_info,
zfs_uio_t *data_uio,
crypto_key_t *key,
uint8_t *ivbuf,
size_t datalen,
size_t auth_len)
{
struct cryptop *crp;
struct cryptodesc *enc_desc, *auth_desc;
struct enc_xform *xform;
struct auth_hash *xauth;
freebsd_crypt_session_t *session = NULL;
int error;
freebsd_crypt_uio_debug_log(encrypt, input_sessionp, c_info, data_uio,
key, ivbuf, datalen, auth_len);
switch (c_info->ci_crypt_type) {
case ZC_TYPE_GCM:
xform = &enc_xform_aes_nist_gcm;
switch (key->ck_length/8) {
case AES_128_GMAC_KEY_LEN:
xauth = &auth_hash_nist_gmac_aes_128;
break;
case AES_192_GMAC_KEY_LEN:
xauth = &auth_hash_nist_gmac_aes_192;
break;
case AES_256_GMAC_KEY_LEN:
xauth = &auth_hash_nist_gmac_aes_256;
break;
default:
error = EINVAL;
goto bad;
}
break;
case ZC_TYPE_CCM:
xform = &enc_xform_ccm;
switch (key->ck_length/8) {
case AES_128_CBC_MAC_KEY_LEN:
xauth = &auth_hash_ccm_cbc_mac_128;
break;
case AES_192_CBC_MAC_KEY_LEN:
xauth = &auth_hash_ccm_cbc_mac_192;
break;
case AES_256_CBC_MAC_KEY_LEN:
xauth = &auth_hash_ccm_cbc_mac_256;
break;
default:
error = EINVAL;
goto bad;
break;
}
break;
default:
error = ENOTSUP;
goto bad;
}
#ifdef FCRYPTO_DEBUG
printf("%s(%d): Using crypt %s (key length %u [%u bytes]), "
"auth %s (key length %d)\n",
__FUNCTION__, __LINE__,
xform->name, (unsigned int)key->ck_length,
(unsigned int)key->ck_length/8,
xauth->name, xauth->keysize);
#endif
if (input_sessionp == NULL) {
session = kmem_zalloc(sizeof (*session), KM_SLEEP);
error = freebsd_crypt_newsession(session, c_info, key);
if (error)
goto out;
} else
session = input_sessionp;
crp = crypto_getreq(2);
if (crp == NULL) {
error = ENOMEM;
goto bad;
}
auth_desc = crp->crp_desc;
enc_desc = auth_desc->crd_next;
crp->crp_session = session->fs_sid;
crp->crp_ilen = auth_len + datalen;
crp->crp_buf = (void*)GET_UIO_STRUCT(data_uio);
crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIFSYNC;
auth_desc->crd_skip = 0;
auth_desc->crd_len = auth_len;
auth_desc->crd_inject = auth_len + datalen;
auth_desc->crd_alg = xauth->type;
#ifdef FCRYPTO_DEBUG
printf("%s: auth: skip = %u, len = %u, inject = %u\n",
__FUNCTION__, auth_desc->crd_skip, auth_desc->crd_len,
auth_desc->crd_inject);
#endif
enc_desc->crd_skip = auth_len;
enc_desc->crd_len = datalen;
enc_desc->crd_inject = auth_len;
enc_desc->crd_alg = xform->type;
enc_desc->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
memcpy(enc_desc->crd_iv, ivbuf, ZIO_DATA_IV_LEN);
enc_desc->crd_next = NULL;
#ifdef FCRYPTO_DEBUG
printf("%s: enc: skip = %u, len = %u, inject = %u\n",
__FUNCTION__, enc_desc->crd_skip, enc_desc->crd_len,
enc_desc->crd_inject);
#endif
if (encrypt)
enc_desc->crd_flags |= CRD_F_ENCRYPT;
error = zfs_crypto_dispatch(session, crp);
crypto_freereq(crp);
out:
if (input_sessionp == NULL) {
freebsd_crypt_freesession(session);
kmem_free(session, sizeof (*session));
}
bad:
#ifdef FCRYPTO_DEBUG
if (error)
printf("%s: returning error %d\n", __FUNCTION__, error);
#endif
return (error);
}
#endif

View File

@ -60,20 +60,7 @@
#define IDX_TO_OFF(idx) (((vm_ooffset_t)(idx)) << PAGE_SHIFT)
#endif
#if __FreeBSD_version < 1300051
#define VM_ALLOC_BUSY_FLAGS VM_ALLOC_NOBUSY
#else
#define VM_ALLOC_BUSY_FLAGS VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY
#endif
#if __FreeBSD_version < 1300072
#define dmu_page_lock(m) vm_page_lock(m)
#define dmu_page_unlock(m) vm_page_unlock(m)
#else
#define dmu_page_lock(m)
#define dmu_page_unlock(m)
#endif
int
dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
@ -167,7 +154,6 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
#endif
vmobj = ma[0]->object;
zfs_vmobject_wlock_12(vmobj);
db = dbp[0];
for (i = 0; i < *rbehind; i++) {
@ -177,7 +163,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
break;
if (!vm_page_none_valid(m)) {
ASSERT3U(m->valid, ==, VM_PAGE_BITS_ALL);
vm_page_do_sunbusy(m);
vm_page_sunbusy(m);
break;
}
ASSERT3U(m->dirty, ==, 0);
@ -189,13 +175,11 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
memcpy(va, (char *)db->db_data + bufoff, PAGESIZE);
zfs_unmap_page(sf);
vm_page_valid(m);
dmu_page_lock(m);
if ((m->busy_lock & VPB_BIT_WAITERS) != 0)
vm_page_activate(m);
else
vm_page_deactivate(m);
dmu_page_unlock(m);
vm_page_do_sunbusy(m);
vm_page_sunbusy(m);
}
*rbehind = i;
@ -296,7 +280,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
break;
if (!vm_page_none_valid(m)) {
ASSERT3U(m->valid, ==, VM_PAGE_BITS_ALL);
vm_page_do_sunbusy(m);
vm_page_sunbusy(m);
break;
}
ASSERT3U(m->dirty, ==, 0);
@ -314,16 +298,13 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
}
zfs_unmap_page(sf);
vm_page_valid(m);
dmu_page_lock(m);
if ((m->busy_lock & VPB_BIT_WAITERS) != 0)
vm_page_activate(m);
else
vm_page_deactivate(m);
dmu_page_unlock(m);
vm_page_do_sunbusy(m);
vm_page_sunbusy(m);
}
*rahead = i;
zfs_vmobject_wunlock_12(vmobj);
dmu_buf_rele_array(dbp, numbufs, FTAG);
return (0);

View File

@ -46,7 +46,6 @@ knlist_sx_xunlock(void *arg)
sx_xunlock((struct sx *)arg);
}
#if __FreeBSD_version >= 1300128
static void
knlist_sx_assert_lock(void *arg, int what)
{
@ -56,28 +55,10 @@ knlist_sx_assert_lock(void *arg, int what)
else
sx_assert((struct sx *)arg, SX_UNLOCKED);
}
#else
static void
knlist_sx_assert_locked(void *arg)
{
sx_assert((struct sx *)arg, SX_LOCKED);
}
static void
knlist_sx_assert_unlocked(void *arg)
{
sx_assert((struct sx *)arg, SX_UNLOCKED);
}
#endif
void
knlist_init_sx(struct knlist *knl, struct sx *lock)
{
#if __FreeBSD_version >= 1300128
knlist_init(knl, lock, knlist_sx_xlock, knlist_sx_xunlock,
knlist_sx_assert_lock);
#else
knlist_init(knl, lock, knlist_sx_xlock, knlist_sx_xunlock,
knlist_sx_assert_locked, knlist_sx_assert_unlocked);
#endif
}

View File

@ -345,11 +345,7 @@ FEATURE(zfs, "OpenZFS support");
DECLARE_MODULE(zfsctrl, zfs_mod, SI_SUB_CLOCKS, SI_ORDER_ANY);
MODULE_VERSION(zfsctrl, 1);
#if __FreeBSD_version > 1300092
MODULE_DEPEND(zfsctrl, xdr, 1, 1, 1);
#else
MODULE_DEPEND(zfsctrl, krpc, 1, 1, 1);
#endif
MODULE_DEPEND(zfsctrl, acl_nfs4, 1, 1, 1);
MODULE_DEPEND(zfsctrl, crypto, 1, 1, 1);
MODULE_DEPEND(zfsctrl, zlib, 1, 1, 1);

View File

@ -379,11 +379,7 @@ vdev_geom_io(struct g_consumer *cp, int *cmds, void **datas, off_t *offsets,
int i, n_bios, j;
size_t bios_size;
#if __FreeBSD_version > 1300130
maxio = maxphys - (maxphys % cp->provider->sectorsize);
#else
maxio = MAXPHYS - (MAXPHYS % cp->provider->sectorsize);
#endif
n_bios = 0;
/* How many bios are required for all commands ? */

View File

@ -733,7 +733,7 @@ zfsctl_root_vptocnp(struct vop_vptocnp_args *ap)
if (error != 0)
return (SET_ERROR(error));
VOP_UNLOCK1(dvp);
VOP_UNLOCK(dvp);
*ap->a_vpp = dvp;
*ap->a_buflen -= sizeof (dotzfs_name);
memcpy(ap->a_buf + *ap->a_buflen, dotzfs_name, sizeof (dotzfs_name));
@ -814,12 +814,8 @@ zfsctl_common_getacl(struct vop_getacl_args *ap)
static struct vop_vector zfsctl_ops_root = {
.vop_default = &default_vnodeops,
#if __FreeBSD_version >= 1300121
.vop_fplookup_vexec = VOP_EAGAIN,
#endif
#if __FreeBSD_version >= 1300139
.vop_fplookup_symlink = VOP_EAGAIN,
#endif
.vop_open = zfsctl_common_open,
.vop_close = zfsctl_common_close,
.vop_ioctl = VOP_EINVAL,
@ -1146,12 +1142,8 @@ zfsctl_snapdir_getattr(struct vop_getattr_args *ap)
static struct vop_vector zfsctl_ops_snapdir = {
.vop_default = &default_vnodeops,
#if __FreeBSD_version >= 1300121
.vop_fplookup_vexec = VOP_EAGAIN,
#endif
#if __FreeBSD_version >= 1300139
.vop_fplookup_symlink = VOP_EAGAIN,
#endif
.vop_open = zfsctl_common_open,
.vop_close = zfsctl_common_close,
.vop_getattr = zfsctl_snapdir_getattr,
@ -1226,27 +1218,19 @@ zfsctl_snapshot_vptocnp(struct vop_vptocnp_args *ap)
* before we can lock the vnode again.
*/
locked = VOP_ISLOCKED(vp);
#if __FreeBSD_version >= 1300045
enum vgetstate vs = vget_prep(vp);
#else
vhold(vp);
#endif
vput(vp);
/* Look up .zfs/snapshot, our parent. */
error = zfsctl_snapdir_vnode(vp->v_mount, NULL, LK_SHARED, &dvp);
if (error == 0) {
VOP_UNLOCK1(dvp);
VOP_UNLOCK(dvp);
*ap->a_vpp = dvp;
*ap->a_buflen -= len;
memcpy(ap->a_buf + *ap->a_buflen, node->sn_name, len);
}
vfs_unbusy(mp);
#if __FreeBSD_version >= 1300045
vget_finish(vp, locked | LK_RETRY, vs);
#else
vget(vp, locked | LK_VNHELD | LK_RETRY, curthread);
#endif
return (error);
}
@ -1256,18 +1240,12 @@ zfsctl_snapshot_vptocnp(struct vop_vptocnp_args *ap)
*/
static struct vop_vector zfsctl_ops_snapshot = {
.vop_default = NULL, /* ensure very restricted access */
#if __FreeBSD_version >= 1300121
.vop_fplookup_vexec = VOP_EAGAIN,
#endif
#if __FreeBSD_version >= 1300139
.vop_fplookup_symlink = VOP_EAGAIN,
#endif
.vop_open = zfsctl_common_open,
.vop_close = zfsctl_common_close,
.vop_inactive = zfsctl_snapshot_inactive,
#if __FreeBSD_version >= 1300045
.vop_need_inactive = vop_stdneed_inactive,
#endif
.vop_need_inactive = vop_stdneed_inactive,
.vop_reclaim = zfsctl_snapshot_reclaim,
.vop_vptocnp = zfsctl_snapshot_vptocnp,
.vop_lock1 = vop_stdlock,

View File

@ -824,7 +824,7 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, znode_t **xvpp, cred_t *cr)
return (SET_ERROR(EDQUOT));
}
getnewvnode_reserve_();
getnewvnode_reserve();
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
@ -926,7 +926,7 @@ top:
goto top;
}
if (error == 0)
VOP_UNLOCK1(ZTOV(*xzpp));
VOP_UNLOCK(ZTOV(*xzpp));
return (error);
}

View File

@ -270,7 +270,7 @@ zfs_vop_fsync(vnode_t *vp)
goto drop;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_FSYNC(vp, MNT_WAIT, curthread);
VOP_UNLOCK1(vp);
VOP_UNLOCK(vp);
vn_finished_write(mp);
drop:
return (SET_ERROR(error));
@ -330,14 +330,6 @@ zfs_file_unlink(const char *fnamep)
zfs_uio_seg_t seg = UIO_SYSSPACE;
int rc;
#if __FreeBSD_version >= 1300018
rc = kern_funlinkat(curthread, AT_FDCWD, fnamep, FD_NONE, seg, 0, 0);
#elif __FreeBSD_version >= 1202504 || defined(AT_BENEATH)
rc = kern_unlinkat(curthread, AT_FDCWD, __DECONST(char *, fnamep),
seg, 0, 0);
#else
rc = kern_unlinkat(curthread, AT_FDCWD, __DECONST(char *, fnamep),
seg, 0);
#endif
return (SET_ERROR(rc));
}

View File

@ -36,10 +36,6 @@
#include <sys/zfs_ioctl_impl.h>
#if __FreeBSD_version < 1201517
#define vm_page_max_user_wired vm_page_max_wired
#endif
int
zfs_vfs_ref(zfsvfs_t **zfvp)
{

View File

@ -126,25 +126,16 @@ static int zfs_root(vfs_t *vfsp, int flags, vnode_t **vpp);
static int zfs_statfs(vfs_t *vfsp, struct statfs *statp);
static int zfs_vget(vfs_t *vfsp, ino_t ino, int flags, vnode_t **vpp);
static int zfs_sync(vfs_t *vfsp, int waitfor);
#if __FreeBSD_version >= 1300098
static int zfs_checkexp(vfs_t *vfsp, struct sockaddr *nam, uint64_t *extflagsp,
struct ucred **credanonp, int *numsecflavors, int *secflavors);
#else
static int zfs_checkexp(vfs_t *vfsp, struct sockaddr *nam, int *extflagsp,
struct ucred **credanonp, int *numsecflavors, int **secflavors);
#endif
static int zfs_fhtovp(vfs_t *vfsp, fid_t *fidp, int flags, vnode_t **vpp);
static void zfs_freevfs(vfs_t *vfsp);
struct vfsops zfs_vfsops = {
.vfs_mount = zfs_mount,
.vfs_unmount = zfs_umount,
#if __FreeBSD_version >= 1300049
.vfs_root = vfs_cache_root,
.vfs_cachedroot = zfs_root,
#else
.vfs_root = zfs_root,
#endif
.vfs_cachedroot = zfs_root,
.vfs_statfs = zfs_statfs,
.vfs_vget = zfs_vget,
.vfs_sync = zfs_sync,
@ -1357,16 +1348,16 @@ zfs_mount(vfs_t *vfsp)
vn_lock(mvp, LK_SHARED | LK_RETRY);
if (VOP_GETATTR(mvp, &vattr, cr)) {
VOP_UNLOCK1(mvp);
VOP_UNLOCK(mvp);
goto out;
}
if (secpolicy_vnode_owner(mvp, cr, vattr.va_uid) != 0 &&
VOP_ACCESS(mvp, VWRITE, cr, td) != 0) {
VOP_UNLOCK1(mvp);
VOP_UNLOCK(mvp);
goto out;
}
VOP_UNLOCK1(mvp);
VOP_UNLOCK(mvp);
}
secpolicy_fs_mount_clearopts(cr, vfsp);
@ -1578,11 +1569,7 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
* 'z_parent' is self referential for non-snapshots.
*/
#ifdef FREEBSD_NAMECACHE
#if __FreeBSD_version >= 1300117
cache_purgevfs(zfsvfs->z_parent->z_vfs);
#else
cache_purgevfs(zfsvfs->z_parent->z_vfs, true);
#endif
#endif
}
@ -1649,9 +1636,18 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
zfs_unregister_callbacks(zfsvfs);
/*
* Evict cached data
* Evict cached data. We must write out any dirty data before
* disowning the dataset.
*/
if (!zfs_is_readonly(zfsvfs))
objset_t *os = zfsvfs->z_os;
boolean_t os_dirty = B_FALSE;
for (int t = 0; t < TXG_SIZE; t++) {
if (dmu_objset_is_dirty(os, t)) {
os_dirty = B_TRUE;
break;
}
}
if (!zfs_is_readonly(zfsvfs) && os_dirty)
txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
dmu_objset_evict_dbufs(zfsvfs->z_os);
dd = zfsvfs->z_os->os_dsl_dataset->ds_dir;
@ -1775,13 +1771,8 @@ zfs_vget(vfs_t *vfsp, ino_t ino, int flags, vnode_t **vpp)
}
static int
#if __FreeBSD_version >= 1300098
zfs_checkexp(vfs_t *vfsp, struct sockaddr *nam, uint64_t *extflagsp,
struct ucred **credanonp, int *numsecflavors, int *secflavors)
#else
zfs_checkexp(vfs_t *vfsp, struct sockaddr *nam, int *extflagsp,
struct ucred **credanonp, int *numsecflavors, int **secflavors)
#endif
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
@ -2070,10 +2061,8 @@ zfs_vnodes_adjust_back(void)
#endif
}
#if __FreeBSD_version >= 1300139
static struct sx zfs_vnlru_lock;
static struct vnode *zfs_vnlru_marker;
#endif
static arc_prune_t *zfs_prune;
static void
@ -2081,13 +2070,9 @@ zfs_prune_task(uint64_t nr_to_scan, void *arg __unused)
{
if (nr_to_scan > INT_MAX)
nr_to_scan = INT_MAX;
#if __FreeBSD_version >= 1300139
sx_xlock(&zfs_vnlru_lock);
vnlru_free_vfsops(nr_to_scan, &zfs_vfsops, zfs_vnlru_marker);
sx_xunlock(&zfs_vnlru_lock);
#else
vnlru_free(nr_to_scan, &zfs_vfsops);
#endif
}
void
@ -2117,10 +2102,8 @@ zfs_init(void)
zfsvfs_taskq = taskq_create("zfsvfs", 1, minclsyspri, 0, 0, 0);
#if __FreeBSD_version >= 1300139
zfs_vnlru_marker = vnlru_alloc_marker();
sx_init(&zfs_vnlru_lock, "zfs vnlru lock");
#endif
zfs_prune = arc_add_prune_callback(zfs_prune_task, NULL);
}
@ -2128,10 +2111,8 @@ void
zfs_fini(void)
{
arc_remove_prune_callback(zfs_prune);
#if __FreeBSD_version >= 1300139
vnlru_free_marker(zfs_vnlru_marker);
sx_destroy(&zfs_vnlru_lock);
#endif
taskq_destroy(zfsvfs_taskq);
zfsctl_fini();

View File

@ -39,9 +39,7 @@
#include <sys/endian.h>
#include <sys/vm.h>
#include <sys/vnode.h>
#if __FreeBSD_version >= 1300102
#include <sys/smr.h>
#endif
#include <sys/dirent.h>
#include <sys/file.h>
#include <sys/stat.h>
@ -100,18 +98,6 @@
VFS_SMR_DECLARE;
#if __FreeBSD_version < 1300103
#define NDFREE_PNBUF(ndp) NDFREE((ndp), NDF_ONLY_PNBUF)
#endif
#if __FreeBSD_version >= 1300047
#define vm_page_wire_lock(pp)
#define vm_page_wire_unlock(pp)
#else
#define vm_page_wire_lock(pp) vm_page_lock(pp)
#define vm_page_wire_unlock(pp) vm_page_unlock(pp)
#endif
#ifdef DEBUG_VFS_LOCKS
#define VNCHECKREF(vp) \
VNASSERT((vp)->v_holdcnt > 0 && (vp)->v_usecount > 0, vp, \
@ -338,39 +324,6 @@ page_busy(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes)
nbytes = end - off;
obj = vp->v_object;
zfs_vmobject_assert_wlocked_12(obj);
#if __FreeBSD_version < 1300050
for (;;) {
if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
pp->valid) {
if (vm_page_xbusied(pp)) {
/*
* Reference the page before unlocking and
* sleeping so that the page daemon is less
* likely to reclaim it.
*/
vm_page_reference(pp);
vm_page_lock(pp);
zfs_vmobject_wunlock(obj);
vm_page_busy_sleep(pp, "zfsmwb", true);
zfs_vmobject_wlock(obj);
continue;
}
vm_page_sbusy(pp);
} else if (pp != NULL) {
ASSERT(!pp->valid);
pp = NULL;
}
if (pp != NULL) {
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
vm_object_pip_add(obj, 1);
pmap_remove_write(pp);
if (nbytes != 0)
vm_page_clear_dirty(pp, off, nbytes);
}
break;
}
#else
vm_page_grab_valid_unlocked(&pp, obj, OFF_TO_IDX(start),
VM_ALLOC_NOCREAT | VM_ALLOC_SBUSY | VM_ALLOC_NORMAL |
VM_ALLOC_IGN_SBUSY);
@ -381,7 +334,6 @@ page_busy(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes)
if (nbytes != 0)
vm_page_clear_dirty(pp, off, nbytes);
}
#endif
return (pp);
}
@ -390,14 +342,9 @@ page_unbusy(vm_page_t pp)
{
vm_page_sunbusy(pp);
#if __FreeBSD_version >= 1300041
vm_object_pip_wakeup(pp->object);
#else
vm_object_pip_subtract(pp->object, 1);
#endif
}
#if __FreeBSD_version > 1300051
static vm_page_t
page_hold(vnode_t *vp, int64_t start)
{
@ -410,57 +357,11 @@ page_hold(vnode_t *vp, int64_t start)
VM_ALLOC_NOBUSY);
return (m);
}
#else
static vm_page_t
page_hold(vnode_t *vp, int64_t start)
{
vm_object_t obj;
vm_page_t pp;
obj = vp->v_object;
zfs_vmobject_assert_wlocked(obj);
for (;;) {
if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
pp->valid) {
if (vm_page_xbusied(pp)) {
/*
* Reference the page before unlocking and
* sleeping so that the page daemon is less
* likely to reclaim it.
*/
vm_page_reference(pp);
vm_page_lock(pp);
zfs_vmobject_wunlock(obj);
vm_page_busy_sleep(pp, "zfsmwb", true);
zfs_vmobject_wlock(obj);
continue;
}
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
vm_page_wire_lock(pp);
vm_page_hold(pp);
vm_page_wire_unlock(pp);
} else
pp = NULL;
break;
}
return (pp);
}
#endif
static void
page_unhold(vm_page_t pp)
{
vm_page_wire_lock(pp);
#if __FreeBSD_version >= 1300035
vm_page_unwire(pp, PQ_ACTIVE);
#else
vm_page_unhold(pp);
#endif
vm_page_wire_unlock(pp);
}
/*
@ -484,34 +385,22 @@ update_pages(znode_t *zp, int64_t start, int len, objset_t *os)
ASSERT3P(obj, !=, NULL);
off = start & PAGEOFFSET;
zfs_vmobject_wlock_12(obj);
#if __FreeBSD_version >= 1300041
vm_object_pip_add(obj, 1);
#endif
for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
vm_page_t pp;
int nbytes = imin(PAGESIZE - off, len);
if ((pp = page_busy(vp, start, off, nbytes)) != NULL) {
zfs_vmobject_wunlock_12(obj);
va = zfs_map_page(pp, &sf);
(void) dmu_read(os, zp->z_id, start + off, nbytes,
va + off, DMU_READ_PREFETCH);
zfs_unmap_page(sf);
zfs_vmobject_wlock_12(obj);
page_unbusy(pp);
}
len -= nbytes;
off = 0;
}
#if __FreeBSD_version >= 1300041
vm_object_pip_wakeup(obj);
#else
vm_object_pip_wakeupn(obj, 0);
#endif
zfs_vmobject_wunlock_12(obj);
}
/*
@ -542,26 +431,22 @@ mappedread_sf(znode_t *zp, int nbytes, zfs_uio_t *uio)
ASSERT3P(obj, !=, NULL);
ASSERT0(zfs_uio_offset(uio) & PAGEOFFSET);
zfs_vmobject_wlock_12(obj);
for (start = zfs_uio_offset(uio); len > 0; start += PAGESIZE) {
int bytes = MIN(PAGESIZE, len);
pp = vm_page_grab_unlocked(obj, OFF_TO_IDX(start),
VM_ALLOC_SBUSY | VM_ALLOC_NORMAL | VM_ALLOC_IGN_SBUSY);
if (vm_page_none_valid(pp)) {
zfs_vmobject_wunlock_12(obj);
va = zfs_map_page(pp, &sf);
error = dmu_read(os, zp->z_id, start, bytes, va,
DMU_READ_PREFETCH);
if (bytes != PAGESIZE && error == 0)
memset(va + bytes, 0, PAGESIZE - bytes);
zfs_unmap_page(sf);
zfs_vmobject_wlock_12(obj);
#if __FreeBSD_version >= 1300081
if (error == 0) {
vm_page_valid(pp);
vm_page_activate(pp);
vm_page_do_sunbusy(pp);
vm_page_sunbusy(pp);
} else {
zfs_vmobject_wlock(obj);
if (!vm_page_wired(pp) && pp->valid == 0 &&
@ -571,29 +456,15 @@ mappedread_sf(znode_t *zp, int nbytes, zfs_uio_t *uio)
vm_page_sunbusy(pp);
zfs_vmobject_wunlock(obj);
}
#else
vm_page_do_sunbusy(pp);
vm_page_lock(pp);
if (error) {
if (pp->wire_count == 0 && pp->valid == 0 &&
!vm_page_busied(pp))
vm_page_free(pp);
} else {
pp->valid = VM_PAGE_BITS_ALL;
vm_page_activate(pp);
}
vm_page_unlock(pp);
#endif
} else {
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
vm_page_do_sunbusy(pp);
vm_page_sunbusy(pp);
}
if (error)
break;
zfs_uio_advance(uio, bytes);
len -= bytes;
}
zfs_vmobject_wunlock_12(obj);
return (error);
}
@ -623,7 +494,6 @@ mappedread(znode_t *zp, int nbytes, zfs_uio_t *uio)
start = zfs_uio_offset(uio);
off = start & PAGEOFFSET;
zfs_vmobject_wlock_12(obj);
for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
vm_page_t pp;
uint64_t bytes = MIN(PAGESIZE - off, len);
@ -632,25 +502,20 @@ mappedread(znode_t *zp, int nbytes, zfs_uio_t *uio)
struct sf_buf *sf;
caddr_t va;
zfs_vmobject_wunlock_12(obj);
va = zfs_map_page(pp, &sf);
error = vn_io_fault_uiomove(va + off, bytes,
GET_UIO_STRUCT(uio));
zfs_unmap_page(sf);
zfs_vmobject_wlock_12(obj);
page_unhold(pp);
} else {
zfs_vmobject_wunlock_12(obj);
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, bytes);
zfs_vmobject_wlock_12(obj);
}
len -= bytes;
off = 0;
if (error)
break;
}
zfs_vmobject_wunlock_12(obj);
return (error);
}
@ -786,9 +651,7 @@ zfs_lookup(vnode_t *dvp, const char *nm, vnode_t **vpp,
znode_t *zdp = VTOZ(dvp);
znode_t *zp;
zfsvfs_t *zfsvfs = zdp->z_zfsvfs;
#if __FreeBSD_version > 1300124
seqc_t dvp_seqc;
#endif
int error = 0;
/*
@ -814,9 +677,7 @@ zfs_lookup(vnode_t *dvp, const char *nm, vnode_t **vpp,
if ((error = zfs_enter_verify_zp(zfsvfs, zdp, FTAG)) != 0)
return (error);
#if __FreeBSD_version > 1300124
dvp_seqc = vn_seqc_read_notmodify(dvp);
#endif
*vpp = NULL;
@ -895,7 +756,7 @@ zfs_lookup(vnode_t *dvp, const char *nm, vnode_t **vpp,
zfs_exit(zfsvfs, FTAG);
ltype = VOP_ISLOCKED(dvp);
VOP_UNLOCK1(dvp);
VOP_UNLOCK(dvp);
error = zfsctl_root(zfsvfs->z_parent, LK_SHARED,
&zfsctl_vp);
if (error == 0) {
@ -996,7 +857,6 @@ zfs_lookup(vnode_t *dvp, const char *nm, vnode_t **vpp,
}
}
#if __FreeBSD_version > 1300124
if ((cnp->cn_flags & ISDOTDOT) != 0) {
/*
* FIXME: zfs_lookup_lock relocks vnodes and does nothing to
@ -1014,7 +874,6 @@ zfs_lookup(vnode_t *dvp, const char *nm, vnode_t **vpp,
cnp->cn_flags &= ~MAKEENTRY;
}
}
#endif
/* Insert name into cache (as non-existent) if appropriate. */
if (zfsvfs->z_use_namecache && !zfsvfs->z_replay &&
@ -1149,7 +1008,7 @@ zfs_create(znode_t *dzp, const char *name, vattr_t *vap, int excl, int mode,
goto out;
}
getnewvnode_reserve_();
getnewvnode_reserve();
tx = dmu_tx_create(os);
@ -1183,7 +1042,7 @@ zfs_create(znode_t *dzp, const char *name, vattr_t *vap, int excl, int mode,
* delete the newly created dnode.
*/
zfs_znode_delete(zp, tx);
VOP_UNLOCK1(ZTOV(zp));
VOP_UNLOCK(ZTOV(zp));
zrele(zp);
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
@ -1512,7 +1371,7 @@ zfs_mkdir(znode_t *dzp, const char *dirname, vattr_t *vap, znode_t **zpp,
/*
* Add a new entry to the directory.
*/
getnewvnode_reserve_();
getnewvnode_reserve();
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
@ -1547,7 +1406,7 @@ zfs_mkdir(znode_t *dzp, const char *dirname, vattr_t *vap, znode_t **zpp,
error = zfs_link_create(dzp, dirname, zp, tx, ZNEW);
if (error != 0) {
zfs_znode_delete(zp, tx);
VOP_UNLOCK1(ZTOV(zp));
VOP_UNLOCK(ZTOV(zp));
zrele(zp);
goto out;
}
@ -1575,16 +1434,6 @@ out:
return (error);
}
#if __FreeBSD_version < 1300124
static void
cache_vop_rmdir(struct vnode *dvp, struct vnode *vp)
{
cache_purge(dvp);
cache_purge(vp);
}
#endif
/*
* Remove a directory subdir entry. If the current working
* directory is the same as the subdir to be removed, the
@ -2984,9 +2833,9 @@ zfs_rename_relock(struct vnode *sdvp, struct vnode **svpp,
znode_t *sdzp, *tdzp, *szp, *tzp;
int error;
VOP_UNLOCK1(tdvp);
VOP_UNLOCK(tdvp);
if (*tvpp != NULL && *tvpp != tdvp)
VOP_UNLOCK1(*tvpp);
VOP_UNLOCK(*tvpp);
relock:
error = vn_lock(sdvp, LK_EXCLUSIVE);
@ -2994,13 +2843,13 @@ relock:
goto out;
error = vn_lock(tdvp, LK_EXCLUSIVE | LK_NOWAIT);
if (error != 0) {
VOP_UNLOCK1(sdvp);
VOP_UNLOCK(sdvp);
if (error != EBUSY)
goto out;
error = vn_lock(tdvp, LK_EXCLUSIVE);
if (error)
goto out;
VOP_UNLOCK1(tdvp);
VOP_UNLOCK(tdvp);
goto relock;
}
tdzp = VTOZ(tdvp);
@ -3008,8 +2857,8 @@ relock:
error = zfs_rename_relock_lookup(sdzp, scnp, &szp, tdzp, tcnp, &tzp);
if (error != 0) {
VOP_UNLOCK1(sdvp);
VOP_UNLOCK1(tdvp);
VOP_UNLOCK(sdvp);
VOP_UNLOCK(tdvp);
goto out;
}
svp = ZTOV(szp);
@ -3021,8 +2870,8 @@ relock:
nvp = svp;
error = vn_lock(nvp, LK_EXCLUSIVE | LK_NOWAIT);
if (error != 0) {
VOP_UNLOCK1(sdvp);
VOP_UNLOCK1(tdvp);
VOP_UNLOCK(sdvp);
VOP_UNLOCK(tdvp);
if (tvp != NULL)
vrele(tvp);
if (error != EBUSY) {
@ -3034,7 +2883,7 @@ relock:
vrele(nvp);
goto out;
}
VOP_UNLOCK1(nvp);
VOP_UNLOCK(nvp);
/*
* Concurrent rename race.
* XXX ?
@ -3058,9 +2907,9 @@ relock:
nvp = tvp;
error = vn_lock(nvp, LK_EXCLUSIVE | LK_NOWAIT);
if (error != 0) {
VOP_UNLOCK1(sdvp);
VOP_UNLOCK1(tdvp);
VOP_UNLOCK1(*svpp);
VOP_UNLOCK(sdvp);
VOP_UNLOCK(tdvp);
VOP_UNLOCK(*svpp);
if (error != EBUSY) {
vrele(nvp);
goto out;
@ -3137,19 +2986,6 @@ zfs_rename_check(znode_t *szp, znode_t *sdzp, znode_t *tdzp)
return (error);
}
#if __FreeBSD_version < 1300124
static void
cache_vop_rename(struct vnode *fdvp, struct vnode *fvp, struct vnode *tdvp,
struct vnode *tvp, struct componentname *fcnp, struct componentname *tcnp)
{
cache_purge(fvp);
if (tvp != NULL)
cache_purge(tvp);
cache_purge_negative(tdvp);
}
#endif
static int
zfs_do_rename_impl(vnode_t *sdvp, vnode_t **svpp, struct componentname *scnp,
vnode_t *tdvp, vnode_t **tvpp, struct componentname *tcnp,
@ -3205,13 +3041,13 @@ zfs_do_rename(vnode_t *sdvp, vnode_t **svpp, struct componentname *scnp,
}
error = zfs_do_rename_impl(sdvp, svpp, scnp, tdvp, tvpp, tcnp, cr);
VOP_UNLOCK1(sdvp);
VOP_UNLOCK1(*svpp);
VOP_UNLOCK(sdvp);
VOP_UNLOCK(*svpp);
out:
if (*tvpp != NULL)
VOP_UNLOCK1(*tvpp);
VOP_UNLOCK(*tvpp);
if (tdvp != *tvpp)
VOP_UNLOCK1(tdvp);
VOP_UNLOCK(tdvp);
return (error);
}
@ -3463,17 +3299,17 @@ zfs_rename(znode_t *sdzp, const char *sname, znode_t *tdzp, const char *tname,
tdvp = ZTOV(tdzp);
error = zfs_lookup_internal(sdzp, sname, &svp, &scn, DELETE);
if (sdzp->z_zfsvfs->z_replay == B_FALSE)
VOP_UNLOCK1(sdvp);
VOP_UNLOCK(sdvp);
if (error != 0)
goto fail;
VOP_UNLOCK1(svp);
VOP_UNLOCK(svp);
vn_lock(tdvp, LK_EXCLUSIVE | LK_RETRY);
error = zfs_lookup_internal(tdzp, tname, &tvp, &tcn, RENAME);
if (error == EJUSTRETURN)
tvp = NULL;
else if (error != 0) {
VOP_UNLOCK1(tdvp);
VOP_UNLOCK(tdvp);
goto fail;
}
@ -3564,7 +3400,7 @@ zfs_symlink(znode_t *dzp, const char *name, vattr_t *vap,
return (SET_ERROR(EDQUOT));
}
getnewvnode_reserve_();
getnewvnode_reserve();
tx = dmu_tx_create(zfsvfs->z_os);
fuid_dirtied = zfsvfs->z_fuid_dirty;
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
@ -3611,7 +3447,7 @@ zfs_symlink(znode_t *dzp, const char *name, vattr_t *vap,
error = zfs_link_create(dzp, name, zp, tx, ZNEW);
if (error != 0) {
zfs_znode_delete(zp, tx);
VOP_UNLOCK1(ZTOV(zp));
VOP_UNLOCK(ZTOV(zp));
zrele(zp);
} else {
zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
@ -4472,7 +4308,6 @@ zfs_freebsd_write(struct vop_write_args *ap)
ap->a_cred));
}
#if __FreeBSD_version >= 1300102
/*
* VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see
* the comment above cache_fplookup for details.
@ -4497,9 +4332,7 @@ zfs_freebsd_fplookup_vexec(struct vop_fplookup_vexec_args *v)
return (EAGAIN);
return (0);
}
#endif
#if __FreeBSD_version >= 1300139
static int
zfs_freebsd_fplookup_symlink(struct vop_fplookup_symlink_args *v)
{
@ -4519,7 +4352,6 @@ zfs_freebsd_fplookup_symlink(struct vop_fplookup_symlink_args *v)
}
return (cache_symlink_resolve(v->a_fpl, target, strlen(target)));
}
#endif
#ifndef _SYS_SYSPROTO_H_
struct vop_access_args {
@ -4557,13 +4389,8 @@ zfs_freebsd_access(struct vop_access_args *ap)
if (error == 0) {
accmode = ap->a_accmode & ~(VREAD|VWRITE|VEXEC|VAPPEND);
if (accmode != 0) {
#if __FreeBSD_version >= 1300105
error = vaccess(vp->v_type, zp->z_mode, zp->z_uid,
zp->z_gid, accmode, ap->a_cred);
#else
error = vaccess(vp->v_type, zp->z_mode, zp->z_uid,
zp->z_gid, accmode, ap->a_cred, NULL);
#endif
}
}
@ -4898,7 +4725,7 @@ zfs_freebsd_setattr(struct vop_setattr_args *ap)
* otherwise, they behave like unprivileged processes.
*/
if (secpolicy_fs_owner(vp->v_mount, cred) == 0 ||
spl_priv_check_cred(cred, PRIV_VFS_SYSFLAGS) == 0) {
priv_check_cred(cred, PRIV_VFS_SYSFLAGS) == 0) {
if (zflags &
(ZFS_IMMUTABLE | ZFS_APPENDONLY | ZFS_NOUNLINK)) {
error = securelevel_gt(cred, 0);
@ -5017,10 +4844,8 @@ zfs_freebsd_symlink(struct vop_symlink_args *ap)
struct componentname *cnp = ap->a_cnp;
vattr_t *vap = ap->a_vap;
znode_t *zp = NULL;
#if __FreeBSD_version >= 1300139
char *symlink;
size_t symlink_len;
#endif
int rc;
#if __FreeBSD_version < 1400068
@ -5036,7 +4861,6 @@ zfs_freebsd_symlink(struct vop_symlink_args *ap)
if (rc == 0) {
*ap->a_vpp = ZTOV(zp);
ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);
#if __FreeBSD_version >= 1300139
MPASS(zp->z_cached_symlink == NULL);
symlink_len = strlen(ap->a_target);
symlink = cache_symlink_alloc(symlink_len + 1, M_WAITOK);
@ -5046,7 +4870,6 @@ zfs_freebsd_symlink(struct vop_symlink_args *ap)
atomic_store_rel_ptr((uintptr_t *)&zp->z_cached_symlink,
(uintptr_t)symlink);
}
#endif
}
return (rc);
}
@ -5064,15 +4887,12 @@ zfs_freebsd_readlink(struct vop_readlink_args *ap)
{
zfs_uio_t uio;
int error;
#if __FreeBSD_version >= 1300139
znode_t *zp = VTOZ(ap->a_vp);
char *symlink, *base;
size_t symlink_len;
bool trycache;
#endif
zfs_uio_init(&uio, ap->a_uio);
#if __FreeBSD_version >= 1300139
trycache = false;
if (zfs_uio_segflg(&uio) == UIO_SYSSPACE &&
zfs_uio_iovcnt(&uio) == 1) {
@ -5080,9 +4900,7 @@ zfs_freebsd_readlink(struct vop_readlink_args *ap)
symlink_len = zfs_uio_iovlen(&uio, 0);
trycache = true;
}
#endif
error = zfs_readlink(ap->a_vp, &uio, ap->a_cred, NULL);
#if __FreeBSD_version >= 1300139
if (atomic_load_ptr(&zp->z_cached_symlink) != NULL ||
error != 0 || !trycache) {
return (error);
@ -5097,7 +4915,6 @@ zfs_freebsd_readlink(struct vop_readlink_args *ap)
cache_symlink_free(symlink, symlink_len + 1);
}
}
#endif
return (error);
}
@ -5139,15 +4956,10 @@ zfs_freebsd_inactive(struct vop_inactive_args *ap)
{
vnode_t *vp = ap->a_vp;
#if __FreeBSD_version >= 1300123
zfs_inactive(vp, curthread->td_ucred, NULL);
#else
zfs_inactive(vp, ap->a_td->td_ucred, NULL);
#endif
return (0);
}
#if __FreeBSD_version >= 1300042
#ifndef _SYS_SYSPROTO_H_
struct vop_need_inactive_args {
struct vnode *a_vp;
@ -5173,7 +4985,6 @@ zfs_freebsd_need_inactive(struct vop_need_inactive_args *ap)
return (need);
}
#endif
#ifndef _SYS_SYSPROTO_H_
struct vop_reclaim_args {
@ -5191,10 +5002,6 @@ zfs_freebsd_reclaim(struct vop_reclaim_args *ap)
ASSERT3P(zp, !=, NULL);
#if __FreeBSD_version < 1300042
/* Destroy the vm object and flush associated pages. */
vnode_destroy_vobject(vp);
#endif
/*
* z_teardown_inactive_lock protects from a race with
* zfs_znode_dmu_fini in zfsvfs_teardown during
@ -5405,7 +5212,7 @@ zfs_getextattr_dir(struct vop_getextattr_args *ap, const char *attrname)
} else if (ap->a_uio != NULL)
error = VOP_READ(vp, ap->a_uio, IO_UNIT, ap->a_cred);
VOP_UNLOCK1(vp);
VOP_UNLOCK(vp);
vn_close(vp, flags, ap->a_cred, td);
return (error);
}
@ -5692,7 +5499,7 @@ zfs_setextattr_dir(struct vop_setextattr_args *ap, const char *attrname)
if (error == 0)
VOP_WRITE(vp, ap->a_uio, IO_UNIT, ap->a_cred);
VOP_UNLOCK1(vp);
VOP_UNLOCK(vp);
vn_close(vp, flags, ap->a_cred, td);
return (error);
}
@ -6175,26 +5982,13 @@ zfs_vptocnp(struct vop_vptocnp_args *ap)
zfs_exit(zfsvfs, FTAG);
covered_vp = vp->v_mount->mnt_vnodecovered;
#if __FreeBSD_version >= 1300045
enum vgetstate vs = vget_prep(covered_vp);
#else
vhold(covered_vp);
#endif
ltype = VOP_ISLOCKED(vp);
VOP_UNLOCK1(vp);
#if __FreeBSD_version >= 1300045
VOP_UNLOCK(vp);
error = vget_finish(covered_vp, LK_SHARED, vs);
#else
error = vget(covered_vp, LK_SHARED | LK_VNHELD, curthread);
#endif
if (error == 0) {
#if __FreeBSD_version >= 1300123
error = VOP_VPTOCNP(covered_vp, ap->a_vpp, ap->a_buf,
ap->a_buflen);
#else
error = VOP_VPTOCNP(covered_vp, ap->a_vpp, ap->a_cred,
ap->a_buf, ap->a_buflen);
#endif
vput(covered_vp);
}
vn_lock(vp, ltype | LK_RETRY);
@ -6252,7 +6046,6 @@ zfs_deallocate(struct vop_deallocate_args *ap)
}
#endif
#if __FreeBSD_version >= 1300039
#ifndef _SYS_SYSPROTO_H_
struct vop_copy_file_range_args {
struct vnode *a_invp;
@ -6279,7 +6072,6 @@ zfs_freebsd_copy_file_range(struct vop_copy_file_range_args *ap)
struct vnode *invp = ap->a_invp;
struct vnode *outvp = ap->a_outvp;
struct mount *mp;
struct uio io;
int error;
uint64_t len = *ap->a_lenp;
@ -6327,12 +6119,6 @@ zfs_freebsd_copy_file_range(struct vop_copy_file_range_args *ap)
goto out_locked;
#endif
io.uio_offset = *ap->a_outoffp;
io.uio_resid = *ap->a_lenp;
error = vn_rlimit_fsize(outvp, &io, ap->a_fsizetd);
if (error != 0)
goto out_locked;
error = zfs_clone_range(VTOZ(invp), ap->a_inoffp, VTOZ(outvp),
ap->a_outoffp, &len, ap->a_outcred);
if (error == EXDEV || error == EAGAIN || error == EINVAL ||
@ -6359,7 +6145,6 @@ bad_write_fallback:
ap->a_incred, ap->a_outcred, ap->a_fsizetd);
return (error);
}
#endif
struct vop_vector zfs_vnodeops;
struct vop_vector zfs_fifoops;
@ -6368,16 +6153,10 @@ struct vop_vector zfs_shareops;
struct vop_vector zfs_vnodeops = {
.vop_default = &default_vnodeops,
.vop_inactive = zfs_freebsd_inactive,
#if __FreeBSD_version >= 1300042
.vop_need_inactive = zfs_freebsd_need_inactive,
#endif
.vop_reclaim = zfs_freebsd_reclaim,
#if __FreeBSD_version >= 1300102
.vop_fplookup_vexec = zfs_freebsd_fplookup_vexec,
#endif
#if __FreeBSD_version >= 1300139
.vop_fplookup_symlink = zfs_freebsd_fplookup_symlink,
#endif
.vop_access = zfs_freebsd_access,
.vop_allocate = VOP_EINVAL,
#if __FreeBSD_version >= 1400032
@ -6416,29 +6195,21 @@ struct vop_vector zfs_vnodeops = {
.vop_getpages = zfs_freebsd_getpages,
.vop_putpages = zfs_freebsd_putpages,
.vop_vptocnp = zfs_vptocnp,
#if __FreeBSD_version >= 1300064
.vop_lock1 = vop_lock,
.vop_unlock = vop_unlock,
.vop_islocked = vop_islocked,
#endif
#if __FreeBSD_version >= 1400043
.vop_add_writecount = vop_stdadd_writecount_nomsync,
#endif
#if __FreeBSD_version >= 1300039
.vop_copy_file_range = zfs_freebsd_copy_file_range,
#endif
};
VFS_VOP_VECTOR_REGISTER(zfs_vnodeops);
struct vop_vector zfs_fifoops = {
.vop_default = &fifo_specops,
.vop_fsync = zfs_freebsd_fsync,
#if __FreeBSD_version >= 1300102
.vop_fplookup_vexec = zfs_freebsd_fplookup_vexec,
#endif
#if __FreeBSD_version >= 1300139
.vop_fplookup_vexec = zfs_freebsd_fplookup_vexec,
.vop_fplookup_symlink = zfs_freebsd_fplookup_symlink,
#endif
.vop_access = zfs_freebsd_access,
.vop_getattr = zfs_freebsd_getattr,
.vop_inactive = zfs_freebsd_inactive,
@ -6462,12 +6233,8 @@ VFS_VOP_VECTOR_REGISTER(zfs_fifoops);
*/
struct vop_vector zfs_shareops = {
.vop_default = &default_vnodeops,
#if __FreeBSD_version >= 1300121
.vop_fplookup_vexec = VOP_EAGAIN,
#endif
#if __FreeBSD_version >= 1300139
.vop_fplookup_symlink = VOP_EAGAIN,
#endif
.vop_access = zfs_freebsd_access,
.vop_inactive = zfs_freebsd_inactive,
.vop_reclaim = zfs_freebsd_reclaim,

Some files were not shown because too many files have changed in this diff Show More