Merge commit 'refs/top-bases/linux-kernel-device' into linux-kernel-device
This commit is contained in:
commit
ac23e26b6c
|
@ -734,13 +734,19 @@ dump_ddt(ddt_t *ddt, enum ddt_type type, enum ddt_class class)
|
|||
static void
|
||||
dump_all_ddts(spa_t *spa)
|
||||
{
|
||||
ddt_histogram_t ddh_total = { 0 };
|
||||
ddt_stat_t dds_total = { 0 };
|
||||
ddt_histogram_t ddh_total;
|
||||
ddt_stat_t dds_total;
|
||||
enum zio_checksum c;
|
||||
enum ddt_type type;
|
||||
enum ddt_class class;
|
||||
|
||||
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
|
||||
bzero(&ddh_total, sizeof (ddt_histogram_t));
|
||||
bzero(&dds_total, sizeof (ddt_stat_t));
|
||||
|
||||
for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
|
||||
ddt_t *ddt = spa->spa_ddt[c];
|
||||
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
|
||||
for (enum ddt_class class = 0; class < DDT_CLASSES;
|
||||
for (type = 0; type < DDT_TYPES; type++) {
|
||||
for (class = 0; class < DDT_CLASSES;
|
||||
class++) {
|
||||
dump_ddt(ddt, type, class);
|
||||
}
|
||||
|
|
|
@ -2176,7 +2176,7 @@ get_interval_count(int *argcp, char **argv, unsigned long *iv,
|
|||
unsigned long *cnt)
|
||||
{
|
||||
unsigned long interval = 0, count = 0;
|
||||
int argc = *argcp, errno;
|
||||
int argc = *argcp;
|
||||
|
||||
/*
|
||||
* Determine if the last argument is an integer or a pool name
|
||||
|
@ -3216,7 +3216,7 @@ print_scan_status(pool_scan_stat_t *ps)
|
|||
*/
|
||||
if (ps->pss_state == DSS_FINISHED) {
|
||||
uint64_t minutes_taken = (end - start) / 60;
|
||||
char *fmt;
|
||||
char *fmt = NULL;
|
||||
|
||||
if (ps->pss_func == POOL_SCAN_SCRUB) {
|
||||
fmt = gettext("scrub repaired %s in %lluh%um with "
|
||||
|
|
|
@ -1995,6 +1995,8 @@ ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset)
|
|||
case ZTEST_IO_SETATTR:
|
||||
(void) ztest_setattr(zd, object);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
umem_free(data, blocksize);
|
||||
|
@ -5373,7 +5375,7 @@ print_time(hrtime_t t, char *timebuf)
|
|||
}
|
||||
|
||||
static nvlist_t *
|
||||
make_random_props()
|
||||
make_random_props(void)
|
||||
{
|
||||
nvlist_t *props;
|
||||
|
||||
|
|
|
@ -1235,7 +1235,7 @@ zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
|
|||
if (flags.dedup) {
|
||||
featureflags |= (DMU_BACKUP_FEATURE_DEDUP |
|
||||
DMU_BACKUP_FEATURE_DEDUPPROPS);
|
||||
if (err = pipe(pipefd)) {
|
||||
if ((err = pipe(pipefd))) {
|
||||
zfs_error_aux(zhp->zfs_hdl, strerror(errno));
|
||||
return (zfs_error(zhp->zfs_hdl, EZFS_PIPEFAILED,
|
||||
errbuf));
|
||||
|
@ -1243,7 +1243,7 @@ zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
|
|||
dda.outputfd = outfd;
|
||||
dda.inputfd = pipefd[1];
|
||||
dda.dedup_hdl = zhp->zfs_hdl;
|
||||
if (err = pthread_create(&tid, NULL, cksummer, &dda)) {
|
||||
if ((err = pthread_create(&tid, NULL, cksummer, &dda))) {
|
||||
(void) close(pipefd[0]);
|
||||
(void) close(pipefd[1]);
|
||||
zfs_error_aux(zhp->zfs_hdl, strerror(errno));
|
||||
|
|
|
@ -1438,10 +1438,11 @@ arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
|
|||
static void
|
||||
arc_hdr_destroy(arc_buf_hdr_t *hdr)
|
||||
{
|
||||
l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr;
|
||||
|
||||
ASSERT(refcount_is_zero(&hdr->b_refcnt));
|
||||
ASSERT3P(hdr->b_state, ==, arc_anon);
|
||||
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
|
||||
l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr;
|
||||
|
||||
if (l2hdr != NULL) {
|
||||
boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx);
|
||||
|
|
|
@ -58,7 +58,7 @@ bplist_iterate(bplist_t *bpl, bplist_itor_t *func, void *arg, dmu_tx_t *tx)
|
|||
bplist_entry_t *bpe;
|
||||
|
||||
mutex_enter(&bpl->bpl_lock);
|
||||
while (bpe = list_head(&bpl->bpl_list)) {
|
||||
while ((bpe = list_head(&bpl->bpl_list))) {
|
||||
list_remove(&bpl->bpl_list, bpe);
|
||||
mutex_exit(&bpl->bpl_lock);
|
||||
func(arg, &bpe->bpe_blk, tx);
|
||||
|
|
|
@ -108,9 +108,9 @@ dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid)
|
|||
{
|
||||
dbuf_hash_table_t *h = &dbuf_hash_table;
|
||||
objset_t *os = dn->dn_objset;
|
||||
uint64_t obj = dn->dn_object;
|
||||
uint64_t hv = DBUF_HASH(os, obj, level, blkid);
|
||||
uint64_t idx = hv & h->hash_table_mask;
|
||||
uint64_t obj;
|
||||
uint64_t hv;
|
||||
uint64_t idx;
|
||||
dmu_buf_impl_t *db;
|
||||
|
||||
obj = dn->dn_object;
|
||||
|
@ -2325,7 +2325,7 @@ dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
|
|||
|
||||
#ifdef ZFS_DEBUG
|
||||
if (db->db_blkid == DMU_SPILL_BLKID) {
|
||||
dnode_t *dn = db->db_dnode;
|
||||
ASSERTV(dnode_t *dn = db->db_dnode);
|
||||
ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
|
||||
ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
|
||||
db->db_blkptr == &dn->dn_phys->dn_spill);
|
||||
|
@ -2404,7 +2404,7 @@ dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
|
|||
|
||||
#ifdef ZFS_DEBUG
|
||||
if (db->db_blkid == DMU_SPILL_BLKID) {
|
||||
dnode_t *dn = db->db_dnode;
|
||||
ASSERTV(dnode_t *dn = db->db_dnode);
|
||||
ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
|
||||
ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
|
||||
db->db_blkptr == &dn->dn_phys->dn_spill);
|
||||
|
|
|
@ -446,11 +446,15 @@ ddt_histogram_empty(const ddt_histogram_t *ddh)
|
|||
void
|
||||
ddt_get_dedup_object_stats(spa_t *spa, ddt_object_t *ddo_total)
|
||||
{
|
||||
enum zio_checksum c;
|
||||
enum ddt_type type;
|
||||
enum ddt_class class;
|
||||
|
||||
/* Sum the statistics we cached in ddt_object_sync(). */
|
||||
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
|
||||
for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
|
||||
ddt_t *ddt = spa->spa_ddt[c];
|
||||
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
|
||||
for (enum ddt_class class = 0; class < DDT_CLASSES;
|
||||
for (type = 0; type < DDT_TYPES; type++) {
|
||||
for (class = 0; class < DDT_CLASSES;
|
||||
class++) {
|
||||
ddt_object_t *ddo =
|
||||
&ddt->ddt_object_stats[type][class];
|
||||
|
@ -474,10 +478,14 @@ ddt_get_dedup_object_stats(spa_t *spa, ddt_object_t *ddo_total)
|
|||
void
|
||||
ddt_get_dedup_histogram(spa_t *spa, ddt_histogram_t *ddh)
|
||||
{
|
||||
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
|
||||
enum zio_checksum c;
|
||||
enum ddt_type type;
|
||||
enum ddt_class class;
|
||||
|
||||
for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
|
||||
ddt_t *ddt = spa->spa_ddt[c];
|
||||
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
|
||||
for (enum ddt_class class = 0; class < DDT_CLASSES;
|
||||
for (type = 0; type < DDT_TYPES; type++) {
|
||||
for (class = 0; class < DDT_CLASSES;
|
||||
class++) {
|
||||
ddt_histogram_add(ddh,
|
||||
&ddt->ddt_histogram_cache[type][class]);
|
||||
|
@ -650,9 +658,10 @@ ddt_alloc(const ddt_key_t *ddk)
|
|||
static void
|
||||
ddt_free(ddt_entry_t *dde)
|
||||
{
|
||||
ASSERT(!dde->dde_loading);
|
||||
int p;
|
||||
|
||||
ASSERT(!dde->dde_loading);
|
||||
|
||||
for (p = 0; p < DDT_PHYS_TYPES; p++)
|
||||
ASSERT(dde->dde_lead_zio[p] == NULL);
|
||||
|
||||
|
@ -741,6 +750,8 @@ ddt_prefetch(spa_t *spa, const blkptr_t *bp)
|
|||
{
|
||||
ddt_t *ddt;
|
||||
ddt_entry_t dde;
|
||||
enum ddt_type type;
|
||||
enum ddt_class class;
|
||||
|
||||
if (!BP_GET_DEDUP(bp))
|
||||
return;
|
||||
|
@ -753,8 +764,8 @@ ddt_prefetch(spa_t *spa, const blkptr_t *bp)
|
|||
ddt = ddt_select(spa, bp);
|
||||
ddt_key_fill(&dde.dde_key, bp);
|
||||
|
||||
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
|
||||
for (enum ddt_class class = 0; class < DDT_CLASSES; class++) {
|
||||
for (type = 0; type < DDT_TYPES; type++) {
|
||||
for (class = 0; class < DDT_CLASSES; class++) {
|
||||
ddt_object_prefetch(ddt, type, class, &dde);
|
||||
}
|
||||
}
|
||||
|
@ -812,15 +823,20 @@ ddt_table_free(ddt_t *ddt)
|
|||
void
|
||||
ddt_create(spa_t *spa)
|
||||
{
|
||||
enum zio_checksum c;
|
||||
|
||||
spa->spa_dedup_checksum = ZIO_DEDUPCHECKSUM;
|
||||
|
||||
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++)
|
||||
for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++)
|
||||
spa->spa_ddt[c] = ddt_table_alloc(spa, c);
|
||||
}
|
||||
|
||||
int
|
||||
ddt_load(spa_t *spa)
|
||||
{
|
||||
enum zio_checksum c;
|
||||
enum ddt_type type;
|
||||
enum ddt_class class;
|
||||
int error;
|
||||
|
||||
ddt_create(spa);
|
||||
|
@ -832,10 +848,10 @@ ddt_load(spa_t *spa)
|
|||
if (error)
|
||||
return (error == ENOENT ? 0 : error);
|
||||
|
||||
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
|
||||
for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
|
||||
ddt_t *ddt = spa->spa_ddt[c];
|
||||
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
|
||||
for (enum ddt_class class = 0; class < DDT_CLASSES;
|
||||
for (type = 0; type < DDT_TYPES; type++) {
|
||||
for (class = 0; class < DDT_CLASSES;
|
||||
class++) {
|
||||
error = ddt_object_load(ddt, type, class);
|
||||
if (error != 0 && error != ENOENT)
|
||||
|
@ -856,7 +872,9 @@ ddt_load(spa_t *spa)
|
|||
void
|
||||
ddt_unload(spa_t *spa)
|
||||
{
|
||||
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
|
||||
enum zio_checksum c;
|
||||
|
||||
for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
|
||||
if (spa->spa_ddt[c]) {
|
||||
ddt_table_free(spa->spa_ddt[c]);
|
||||
spa->spa_ddt[c] = NULL;
|
||||
|
@ -869,6 +887,8 @@ ddt_class_contains(spa_t *spa, enum ddt_class max_class, const blkptr_t *bp)
|
|||
{
|
||||
ddt_t *ddt;
|
||||
ddt_entry_t dde;
|
||||
enum ddt_type type;
|
||||
enum ddt_class class;
|
||||
|
||||
if (!BP_GET_DEDUP(bp))
|
||||
return (B_FALSE);
|
||||
|
@ -880,8 +900,8 @@ ddt_class_contains(spa_t *spa, enum ddt_class max_class, const blkptr_t *bp)
|
|||
|
||||
ddt_key_fill(&dde.dde_key, bp);
|
||||
|
||||
for (enum ddt_type type = 0; type < DDT_TYPES; type++)
|
||||
for (enum ddt_class class = 0; class <= max_class; class++)
|
||||
for (type = 0; type < DDT_TYPES; type++)
|
||||
for (class = 0; class <= max_class; class++)
|
||||
if (ddt_object_lookup(ddt, type, class, &dde) == 0)
|
||||
return (B_TRUE);
|
||||
|
||||
|
@ -893,13 +913,15 @@ ddt_repair_start(ddt_t *ddt, const blkptr_t *bp)
|
|||
{
|
||||
ddt_key_t ddk;
|
||||
ddt_entry_t *dde;
|
||||
enum ddt_type type;
|
||||
enum ddt_class class;
|
||||
|
||||
ddt_key_fill(&ddk, bp);
|
||||
|
||||
dde = ddt_alloc(&ddk);
|
||||
|
||||
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
|
||||
for (enum ddt_class class = 0; class < DDT_CLASSES; class++) {
|
||||
for (type = 0; type < DDT_TYPES; type++) {
|
||||
for (class = 0; class < DDT_CLASSES; class++) {
|
||||
/*
|
||||
* We can only do repair if there are multiple copies
|
||||
* of the block. For anything in the UNIQUE class,
|
||||
|
@ -1067,6 +1089,8 @@ ddt_sync_table(ddt_t *ddt, dmu_tx_t *tx, uint64_t txg)
|
|||
spa_t *spa = ddt->ddt_spa;
|
||||
ddt_entry_t *dde;
|
||||
void *cookie = NULL;
|
||||
enum ddt_type type;
|
||||
enum ddt_class class;
|
||||
|
||||
if (avl_numnodes(&ddt->ddt_tree) == 0)
|
||||
return;
|
||||
|
@ -1086,8 +1110,8 @@ ddt_sync_table(ddt_t *ddt, dmu_tx_t *tx, uint64_t txg)
|
|||
ddt_free(dde);
|
||||
}
|
||||
|
||||
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
|
||||
for (enum ddt_class class = 0; class < DDT_CLASSES; class++) {
|
||||
for (type = 0; type < DDT_TYPES; type++) {
|
||||
for (class = 0; class < DDT_CLASSES; class++) {
|
||||
if (!ddt_object_exists(ddt, type, class))
|
||||
continue;
|
||||
ddt_object_sync(ddt, type, class, tx);
|
||||
|
@ -1106,12 +1130,13 @@ ddt_sync(spa_t *spa, uint64_t txg)
|
|||
dmu_tx_t *tx;
|
||||
zio_t *rio = zio_root(spa, NULL, NULL,
|
||||
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
|
||||
enum zio_checksum c;
|
||||
|
||||
ASSERT(spa_syncing_txg(spa) == txg);
|
||||
|
||||
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
|
||||
|
||||
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
|
||||
for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
|
||||
ddt_t *ddt = spa->spa_ddt[c];
|
||||
if (ddt == NULL)
|
||||
continue;
|
||||
|
|
|
@ -1146,8 +1146,8 @@ dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg)
|
|||
{
|
||||
dmu_sync_arg_t *dsa = varg;
|
||||
dmu_buf_t *db = dsa->dsa_zgd->zgd_db;
|
||||
dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode;
|
||||
blkptr_t *bp = zio->io_bp;
|
||||
ASSERTV(dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode);
|
||||
|
||||
if (zio->io_error == 0) {
|
||||
if (BP_IS_HOLE(bp)) {
|
||||
|
|
|
@ -961,7 +961,6 @@ dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg)
|
|||
blkptr_t *bp = zio->io_bp;
|
||||
objset_t *os = arg;
|
||||
dnode_phys_t *dnp = &os->os_phys->os_meta_dnode;
|
||||
ASSERTV(blkptr_t *bp_orig = &zio->io_bp_orig);
|
||||
|
||||
ASSERT(bp == os->os_rootbp);
|
||||
ASSERT(BP_GET_TYPE(bp) == DMU_OT_OBJSET);
|
||||
|
@ -1223,7 +1222,7 @@ dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx)
|
|||
objset_t *os = dn->dn_objset;
|
||||
void *data = NULL;
|
||||
dmu_buf_impl_t *db = NULL;
|
||||
uint64_t *user, *group;
|
||||
uint64_t *user = NULL, *group = NULL;
|
||||
int flags = dn->dn_id_flags;
|
||||
int error;
|
||||
boolean_t have_spill = B_FALSE;
|
||||
|
|
|
@ -996,6 +996,8 @@ backup_byteswap(dmu_replay_record_t *drr)
|
|||
DO64(drr_end.drr_checksum.zc_word[3]);
|
||||
DO64(drr_end.drr_toguid);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
#undef DO64
|
||||
#undef DO32
|
||||
|
@ -1183,8 +1185,9 @@ restore_write_byref(struct restorearg *ra, objset_t *os,
|
|||
ref_os = os;
|
||||
}
|
||||
|
||||
if (err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
|
||||
drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH))
|
||||
err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
|
||||
drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH);
|
||||
if (err)
|
||||
return (err);
|
||||
|
||||
tx = dmu_tx_create(os);
|
||||
|
@ -1441,7 +1444,7 @@ out:
|
|||
if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
|
||||
void *cookie = NULL;
|
||||
|
||||
while (gmep = avl_destroy_nodes(&ra.guid_to_ds_map, &cookie)) {
|
||||
while ((gmep = avl_destroy_nodes(&ra.guid_to_ds_map, &cookie))) {
|
||||
dsl_dataset_rele(gmep->gme_ds, &ra.guid_to_ds_map);
|
||||
kmem_free(gmep, sizeof (guid_map_entry_t));
|
||||
}
|
||||
|
|
|
@ -1218,7 +1218,7 @@ dmu_tx_do_callbacks(list_t *cb_list, int error)
|
|||
{
|
||||
dmu_tx_callback_t *dcb;
|
||||
|
||||
while (dcb = list_head(cb_list)) {
|
||||
while ((dcb = list_head(cb_list))) {
|
||||
list_remove(cb_list, dcb);
|
||||
dcb->dcb_func(dcb->dcb_data, error);
|
||||
kmem_free(dcb, sizeof (dmu_tx_callback_t));
|
||||
|
|
|
@ -672,7 +672,7 @@ dmu_zfetch(zfetch_t *zf, uint64_t offset, uint64_t size, int prefetched)
|
|||
ZFETCHSTAT_BUMP(zfetchstat_hits);
|
||||
} else {
|
||||
ZFETCHSTAT_BUMP(zfetchstat_misses);
|
||||
if (fetched = dmu_zfetch_colinear(zf, &zst)) {
|
||||
if ((fetched = dmu_zfetch_colinear(zf, &zst))) {
|
||||
ZFETCHSTAT_BUMP(zfetchstat_colinear_hits);
|
||||
} else {
|
||||
ZFETCHSTAT_BUMP(zfetchstat_colinear_misses);
|
||||
|
|
|
@ -92,7 +92,7 @@ dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
|
|||
int used, compressed, uncompressed;
|
||||
int64_t delta;
|
||||
|
||||
used = bp_get_dasize(tx->tx_pool->dp_spa, bp);
|
||||
used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
|
||||
compressed = BP_GET_PSIZE(bp);
|
||||
uncompressed = BP_GET_UCSIZE(bp);
|
||||
|
||||
|
@ -136,15 +136,17 @@ int
|
|||
dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
|
||||
boolean_t async)
|
||||
{
|
||||
int used, compressed, uncompressed;
|
||||
|
||||
if (BP_IS_HOLE(bp))
|
||||
return (0);
|
||||
|
||||
ASSERT(dmu_tx_is_syncing(tx));
|
||||
ASSERT(bp->blk_birth <= tx->tx_txg);
|
||||
|
||||
int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
|
||||
int compressed = BP_GET_PSIZE(bp);
|
||||
int uncompressed = BP_GET_UCSIZE(bp);
|
||||
used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
|
||||
compressed = BP_GET_PSIZE(bp);
|
||||
uncompressed = BP_GET_UCSIZE(bp);
|
||||
|
||||
ASSERT(used > 0);
|
||||
if (ds == NULL) {
|
||||
|
@ -1469,8 +1471,8 @@ static void
|
|||
remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx)
|
||||
{
|
||||
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
|
||||
uint64_t count;
|
||||
int err;
|
||||
ASSERTV(uint64_t count);
|
||||
|
||||
ASSERT(ds->ds_phys->ds_num_children >= 2);
|
||||
err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx);
|
||||
|
@ -1753,6 +1755,7 @@ dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
|
|||
|
||||
if (dsl_dataset_is_snapshot(ds_next)) {
|
||||
dsl_dataset_t *ds_nextnext;
|
||||
dsl_dataset_t *hds;
|
||||
|
||||
/*
|
||||
* Update next's unique to include blocks which
|
||||
|
@ -1775,7 +1778,6 @@ dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
|
|||
ASSERT3P(ds_next->ds_prev, ==, NULL);
|
||||
|
||||
/* Collapse range in this head. */
|
||||
dsl_dataset_t *hds;
|
||||
VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
|
||||
ds->ds_dir->dd_phys->dd_head_dataset_obj,
|
||||
FTAG, &hds));
|
||||
|
@ -2477,7 +2479,6 @@ struct promotearg {
|
|||
};
|
||||
|
||||
static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
|
||||
static boolean_t snaplist_unstable(list_t *l);
|
||||
|
||||
static int
|
||||
dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
|
||||
|
|
|
@ -692,9 +692,10 @@ upgrade_dir_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
|
|||
void
|
||||
dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx)
|
||||
{
|
||||
ASSERT(dmu_tx_is_syncing(tx));
|
||||
uint64_t obj;
|
||||
|
||||
ASSERT(dmu_tx_is_syncing(tx));
|
||||
|
||||
(void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx);
|
||||
VERIFY(0 == dsl_pool_open_special_dir(dp,
|
||||
FREE_DIR_NAME, &dp->dp_free_dir));
|
||||
|
|
|
@ -413,6 +413,8 @@ dsl_prop_check_prediction(dsl_dir_t *dd, dsl_prop_setarg_t *psa)
|
|||
case ZFS_PROP_QUOTA:
|
||||
case ZFS_PROP_RESERVATION:
|
||||
return;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1072,6 +1072,7 @@ dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx)
|
|||
{
|
||||
dsl_pool_t *dp = scn->scn_dp;
|
||||
dsl_dataset_t *ds;
|
||||
char *dsname;
|
||||
|
||||
VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
|
||||
|
||||
|
@ -1081,7 +1082,7 @@ dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx)
|
|||
dmu_buf_will_dirty(ds->ds_dbuf, tx);
|
||||
dsl_scan_visit_rootbp(scn, ds, &ds->ds_phys->ds_bp, tx);
|
||||
|
||||
char *dsname = kmem_alloc(ZFS_MAXNAMELEN, KM_SLEEP);
|
||||
dsname = kmem_alloc(ZFS_MAXNAMELEN, KM_SLEEP);
|
||||
dsl_dataset_name(ds, dsname);
|
||||
zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; "
|
||||
"pausing=%u",
|
||||
|
@ -1644,9 +1645,9 @@ dsl_scan_scrub_cb(dsl_pool_t *dp,
|
|||
size_t size = BP_GET_PSIZE(bp);
|
||||
spa_t *spa = dp->dp_spa;
|
||||
uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp);
|
||||
boolean_t needs_io;
|
||||
boolean_t needs_io = B_FALSE;
|
||||
int zio_flags = ZIO_FLAG_SCRUB_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL;
|
||||
int zio_priority;
|
||||
int zio_priority = 0;
|
||||
int d;
|
||||
|
||||
if (phys_birth <= scn->scn_phys.scn_min_txg ||
|
||||
|
|
|
@ -528,8 +528,8 @@ int dmu_xuio_add(struct xuio *uio, struct arc_buf *abuf, offset_t off,
|
|||
int dmu_xuio_cnt(struct xuio *uio);
|
||||
struct arc_buf *dmu_xuio_arcbuf(struct xuio *uio, int i);
|
||||
void dmu_xuio_clear(struct xuio *uio, int i);
|
||||
void xuio_stat_wbuf_copied();
|
||||
void xuio_stat_wbuf_nocopy();
|
||||
void xuio_stat_wbuf_copied(void);
|
||||
void xuio_stat_wbuf_nocopy(void);
|
||||
|
||||
extern int zfs_prefetch_disable;
|
||||
|
||||
|
|
|
@ -148,8 +148,8 @@ int sa_replace_all_by_template(sa_handle_t *, sa_bulk_attr_t *,
|
|||
int sa_replace_all_by_template_locked(sa_handle_t *, sa_bulk_attr_t *,
|
||||
int, dmu_tx_t *);
|
||||
boolean_t sa_enabled(objset_t *);
|
||||
void sa_cache_init();
|
||||
void sa_cache_fini();
|
||||
void sa_cache_init(void);
|
||||
void sa_cache_fini(void);
|
||||
int sa_set_sa_object(objset_t *, uint64_t);
|
||||
int sa_hdrsize(void *);
|
||||
void sa_handle_lock(sa_handle_t *);
|
||||
|
|
|
@ -393,6 +393,8 @@ sa_attr_op(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count,
|
|||
if (error)
|
||||
return (error);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return (error);
|
||||
|
@ -637,7 +639,7 @@ sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
|
|||
int buf_space;
|
||||
sa_attr_type_t *attrs, *attrs_start;
|
||||
int i, lot_count;
|
||||
int hdrsize, spillhdrsize;
|
||||
int hdrsize, spillhdrsize = 0;
|
||||
int used;
|
||||
dmu_object_type_t bonustype;
|
||||
sa_lot_t *lot;
|
||||
|
@ -1020,16 +1022,16 @@ sa_tear_down(objset_t *os)
|
|||
sizeof (sa_attr_table_t) * sa->sa_num_attrs);
|
||||
|
||||
cookie = NULL;
|
||||
while (layout = avl_destroy_nodes(&sa->sa_layout_hash_tree, &cookie)) {
|
||||
while ((layout = avl_destroy_nodes(&sa->sa_layout_hash_tree, &cookie))){
|
||||
sa_idx_tab_t *tab;
|
||||
while (tab = list_head(&layout->lot_idx_tab)) {
|
||||
while ((tab = list_head(&layout->lot_idx_tab))) {
|
||||
ASSERT(refcount_count(&tab->sa_refcount));
|
||||
sa_idx_tab_rele(os, tab);
|
||||
}
|
||||
}
|
||||
|
||||
cookie = NULL;
|
||||
while (layout = avl_destroy_nodes(&sa->sa_layout_num_tree, &cookie)) {
|
||||
while ((layout = avl_destroy_nodes(&sa->sa_layout_num_tree, &cookie))){
|
||||
kmem_free(layout->lot_attrs,
|
||||
sizeof (sa_attr_type_t) * layout->lot_attr_count);
|
||||
kmem_free(layout, sizeof (sa_lot_t));
|
||||
|
@ -1121,9 +1123,9 @@ sa_byteswap(sa_handle_t *hdl, sa_buf_type_t buftype)
|
|||
{
|
||||
sa_hdr_phys_t *sa_hdr_phys = SA_GET_HDR(hdl, buftype);
|
||||
dmu_buf_impl_t *db;
|
||||
sa_os_t *sa = hdl->sa_os->os_sa;
|
||||
int num_lengths = 1;
|
||||
int i;
|
||||
ASSERTV(sa_os_t *sa = hdl->sa_os->os_sa);
|
||||
|
||||
ASSERT(MUTEX_HELD(&sa->sa_lock));
|
||||
if (sa_hdr_phys->sa_magic == SA_MAGIC)
|
||||
|
@ -1224,7 +1226,7 @@ sa_idx_tab_rele(objset_t *os, void *arg)
|
|||
static void
|
||||
sa_idx_tab_hold(objset_t *os, sa_idx_tab_t *idx_tab)
|
||||
{
|
||||
sa_os_t *sa = os->os_sa;
|
||||
ASSERTV(sa_os_t *sa = os->os_sa);
|
||||
|
||||
ASSERT(MUTEX_HELD(&sa->sa_lock));
|
||||
(void) refcount_add(&idx_tab->sa_refcount, NULL);
|
||||
|
@ -1260,10 +1262,10 @@ sa_handle_get_from_db(objset_t *os, dmu_buf_t *db, void *userp,
|
|||
sa_handle_type_t hdl_type, sa_handle_t **handlepp)
|
||||
{
|
||||
int error = 0;
|
||||
dmu_object_info_t doi;
|
||||
sa_handle_t *handle;
|
||||
|
||||
#ifdef ZFS_DEBUG
|
||||
dmu_object_info_t doi;
|
||||
|
||||
dmu_object_info_from_db(db, &doi);
|
||||
ASSERT(doi.doi_bonus_type == DMU_OT_SA ||
|
||||
doi.doi_bonus_type == DMU_OT_ZNODE);
|
||||
|
@ -1302,7 +1304,7 @@ sa_handle_get(objset_t *objset, uint64_t objid, void *userp,
|
|||
dmu_buf_t *db;
|
||||
int error;
|
||||
|
||||
if (error = dmu_bonus_hold(objset, objid, NULL, &db))
|
||||
if ((error = dmu_bonus_hold(objset, objid, NULL, &db)))
|
||||
return (error);
|
||||
|
||||
return (sa_handle_get_from_db(objset, db, userp, hdl_type,
|
||||
|
@ -1402,8 +1404,8 @@ sa_find_idx_tab(objset_t *os, dmu_object_type_t bonustype, void *data)
|
|||
|
||||
/* Verify header size is consistent with layout information */
|
||||
ASSERT(tb);
|
||||
ASSERT(IS_SA_BONUSTYPE(bonustype) &&
|
||||
SA_HDR_SIZE_MATCH_LAYOUT(hdr, tb) || !IS_SA_BONUSTYPE(bonustype) ||
|
||||
ASSERT((IS_SA_BONUSTYPE(bonustype) &&
|
||||
SA_HDR_SIZE_MATCH_LAYOUT(hdr, tb)) || !IS_SA_BONUSTYPE(bonustype) ||
|
||||
(IS_SA_BONUSTYPE(bonustype) && hdr->sa_layout_info == 0));
|
||||
|
||||
/*
|
||||
|
@ -1541,7 +1543,7 @@ sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
|
|||
sa_bulk_attr_t *attr_desc;
|
||||
void *old_data[2];
|
||||
int bonus_attr_count = 0;
|
||||
int bonus_data_size, spill_data_size;
|
||||
int bonus_data_size = 0, spill_data_size = 0;
|
||||
int spill_attr_count = 0;
|
||||
int error;
|
||||
uint16_t length;
|
||||
|
|
|
@ -1682,7 +1682,6 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
|
|||
int orig_mode = spa->spa_mode;
|
||||
int parse;
|
||||
uint64_t obj;
|
||||
int c;
|
||||
|
||||
/*
|
||||
* If this is an untrusted config, access the pool in read-only mode.
|
||||
|
@ -2712,6 +2711,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
|
|||
nvlist_t **spares, **l2cache;
|
||||
uint_t nspares, nl2cache;
|
||||
uint64_t version, obj;
|
||||
int c;
|
||||
|
||||
/*
|
||||
* If this pool already exists, return failure.
|
||||
|
@ -5382,7 +5382,7 @@ spa_sync(spa_t *spa, uint64_t txg)
|
|||
ddt_sync(spa, txg);
|
||||
dsl_scan_sync(dp, tx);
|
||||
|
||||
while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
|
||||
while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)))
|
||||
vdev_sync(vd, txg);
|
||||
|
||||
if (pass == 1)
|
||||
|
|
|
@ -884,10 +884,9 @@ spa_vdev_config_enter(spa_t *spa)
|
|||
void
|
||||
spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
|
||||
{
|
||||
ASSERT(MUTEX_HELD(&spa_namespace_lock));
|
||||
|
||||
int config_changed = B_FALSE;
|
||||
|
||||
ASSERT(MUTEX_HELD(&spa_namespace_lock));
|
||||
ASSERT(txg > spa_last_synced_txg(spa));
|
||||
|
||||
spa->spa_pending_vdev = NULL;
|
||||
|
|
|
@ -1417,8 +1417,8 @@ vdev_validate(vdev_t *vd)
|
|||
void
|
||||
vdev_close(vdev_t *vd)
|
||||
{
|
||||
ASSERTV(spa_t *spa = vd->vdev_spa);
|
||||
vdev_t *pvd = vd->vdev_parent;
|
||||
ASSERTV(spa_t *spa = vd->vdev_spa);
|
||||
|
||||
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
|
||||
|
||||
|
@ -1467,10 +1467,9 @@ vdev_hold(vdev_t *vd)
|
|||
void
|
||||
vdev_rele(vdev_t *vd)
|
||||
{
|
||||
spa_t *spa = vd->vdev_spa;
|
||||
int c;
|
||||
|
||||
ASSERT(spa_is_root(spa));
|
||||
ASSERT(spa_is_root(vd->vdev_spa));
|
||||
for (c = 0; c < vd->vdev_children; c++)
|
||||
vdev_rele(vd->vdev_child[c]);
|
||||
|
||||
|
@ -3075,10 +3074,10 @@ vdev_is_bootable(vdev_t *vd)
|
|||
void
|
||||
vdev_load_log_state(vdev_t *nvd, vdev_t *ovd)
|
||||
{
|
||||
spa_t *spa = nvd->vdev_spa;
|
||||
int c;
|
||||
|
||||
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
|
||||
ASSERT3S(spa_config_held(nvd->vdev_spa, SCL_STATE_ALL, RW_WRITER), ==,
|
||||
SCL_STATE_ALL);
|
||||
ASSERT3U(nvd->vdev_guid, ==, ovd->vdev_guid);
|
||||
|
||||
for (c = 0; c < nvd->vdev_children; c++)
|
||||
|
|
|
@ -1077,12 +1077,13 @@ vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags)
|
|||
zio = zio_root(spa, NULL, NULL, flags);
|
||||
|
||||
for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd)) {
|
||||
uint64_t *good_writes = kmem_zalloc(sizeof (uint64_t),
|
||||
KM_SLEEP);
|
||||
uint64_t *good_writes;
|
||||
zio_t *vio;
|
||||
|
||||
ASSERT(!vd->vdev_ishole);
|
||||
|
||||
zio_t *vio = zio_null(zio, spa, NULL,
|
||||
good_writes = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
|
||||
vio = zio_null(zio, spa, NULL,
|
||||
(vd->vdev_islog || vd->vdev_aux != NULL) ?
|
||||
vdev_label_sync_ignore_done : vdev_label_sync_top_done,
|
||||
good_writes, flags);
|
||||
|
|
|
@ -1043,12 +1043,13 @@ zap_update(objset_t *os, uint64_t zapobj, const char *name,
|
|||
{
|
||||
zap_t *zap;
|
||||
mzap_ent_t *mze;
|
||||
uint64_t oldval;
|
||||
const uint64_t *intval = val;
|
||||
zap_name_t *zn;
|
||||
int err;
|
||||
|
||||
#ifdef ZFS_DEBUG
|
||||
uint64_t oldval;
|
||||
|
||||
/*
|
||||
* If there is an old value, it shouldn't change across the
|
||||
* lockdir (eg, due to bprewrite's xlation).
|
||||
|
|
|
@ -1534,8 +1534,8 @@ zfs_ioc_vdev_split(struct file *filp, zfs_cmd_t *zc)
|
|||
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
|
||||
return (error);
|
||||
|
||||
if (error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
|
||||
zc->zc_iflags, &config)) {
|
||||
if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
|
||||
zc->zc_iflags, &config))) {
|
||||
spa_close(spa, FTAG);
|
||||
return (error);
|
||||
}
|
||||
|
@ -1651,13 +1651,13 @@ zfs_ioc_objset_stats(struct file *filp, zfs_cmd_t *zc)
|
|||
* local property values.
|
||||
*/
|
||||
static int
|
||||
zfs_ioc_objset_recvd_props(zfs_cmd_t *zc)
|
||||
zfs_ioc_objset_recvd_props(struct file *filp, zfs_cmd_t *zc)
|
||||
{
|
||||
objset_t *os = NULL;
|
||||
int error;
|
||||
nvlist_t *nv;
|
||||
|
||||
if (error = dmu_objset_hold(zc->zc_name, FTAG, &os))
|
||||
if ((error = dmu_objset_hold(zc->zc_name, FTAG, &os)))
|
||||
return (error);
|
||||
|
||||
/*
|
||||
|
@ -2382,8 +2382,8 @@ zfs_ioc_pool_set_props(struct file *filp, zfs_cmd_t *zc)
|
|||
int error;
|
||||
nvpair_t *pair;
|
||||
|
||||
if (error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
|
||||
zc->zc_iflags, &props))
|
||||
if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
|
||||
zc->zc_iflags, &props)))
|
||||
return (error);
|
||||
|
||||
/*
|
||||
|
@ -3112,8 +3112,8 @@ zfs_check_settable(struct file *filp, const char *dsname,
|
|||
|
||||
if (prop == ZPROP_INVAL) {
|
||||
if (zfs_prop_user(propname)) {
|
||||
if (err = zfs_secpolicy_write_perms(dsname,
|
||||
ZFS_DELEG_PERM_USERPROP, cr))
|
||||
if ((err = zfs_secpolicy_write_perms(dsname,
|
||||
ZFS_DELEG_PERM_USERPROP, cr)))
|
||||
return (err);
|
||||
return (0);
|
||||
}
|
||||
|
@ -3136,7 +3136,7 @@ zfs_check_settable(struct file *filp, const char *dsname,
|
|||
return (EINVAL);
|
||||
}
|
||||
|
||||
if (err = zfs_secpolicy_write_perms(dsname, perm, cr))
|
||||
if ((err = zfs_secpolicy_write_perms(dsname, perm, cr)))
|
||||
return (err);
|
||||
return (0);
|
||||
}
|
||||
|
@ -3220,6 +3220,8 @@ zfs_check_settable(struct file *filp, const char *dsname,
|
|||
return (ENOTSUP);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return (zfs_secpolicy_setprop(dsname, prop, pair, CRED()));
|
||||
|
|
|
@ -1894,8 +1894,8 @@ zfs_obj_to_pobj(objset_t *osp, uint64_t obj, uint64_t *pobjp, int *is_xattrdir,
|
|||
dmu_object_info_from_db(db, &doi);
|
||||
if ((doi.doi_bonus_type != DMU_OT_SA &&
|
||||
doi.doi_bonus_type != DMU_OT_ZNODE) ||
|
||||
doi.doi_bonus_type == DMU_OT_ZNODE &&
|
||||
doi.doi_bonus_size < sizeof (znode_phys_t)) {
|
||||
(doi.doi_bonus_type == DMU_OT_ZNODE &&
|
||||
doi.doi_bonus_size < sizeof (znode_phys_t))) {
|
||||
sa_buf_rele(db, FTAG);
|
||||
return (EINVAL);
|
||||
}
|
||||
|
|
|
@ -78,8 +78,6 @@ boolean_t zfs_nocacheflush = B_FALSE;
|
|||
|
||||
static kmem_cache_t *zil_lwb_cache;
|
||||
|
||||
static boolean_t zil_empty(zilog_t *zilog);
|
||||
|
||||
#define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \
|
||||
sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused))
|
||||
|
||||
|
@ -287,6 +285,8 @@ zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
|
|||
char *lrbuf, *lrp;
|
||||
int error = 0;
|
||||
|
||||
bzero(&next_blk, sizeof(blkptr_t));
|
||||
|
||||
/*
|
||||
* Old logs didn't record the maximum zh_claim_lr_seq.
|
||||
*/
|
||||
|
@ -308,7 +308,7 @@ zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
|
|||
for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) {
|
||||
uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
|
||||
int reclen;
|
||||
char *end;
|
||||
char *end = NULL;
|
||||
|
||||
if (blk_seq > claim_blk_seq)
|
||||
break;
|
||||
|
|
|
@ -1949,12 +1949,12 @@ static void
|
|||
zio_ddt_ditto_write_done(zio_t *zio)
|
||||
{
|
||||
int p = DDT_PHYS_DITTO;
|
||||
zio_prop_t *zp = &zio->io_prop;
|
||||
blkptr_t *bp = zio->io_bp;
|
||||
ddt_t *ddt = ddt_select(zio->io_spa, bp);
|
||||
ddt_entry_t *dde = zio->io_private;
|
||||
ddt_phys_t *ddp = &dde->dde_phys[p];
|
||||
ddt_key_t *ddk = &dde->dde_key;
|
||||
ASSERTV(zio_prop_t *zp = &zio->io_prop);
|
||||
|
||||
ddt_enter(ddt);
|
||||
|
||||
|
@ -2171,6 +2171,8 @@ zio_dva_claim(zio_t *zio)
|
|||
static void
|
||||
zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
|
||||
{
|
||||
int g;
|
||||
|
||||
ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp));
|
||||
ASSERT(zio->io_bp_override == NULL);
|
||||
|
||||
|
|
Loading…
Reference in New Issue