Make use of ZFS_DEBUG consistent within kmod sources

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Ryan Moeller <ryan@iXsystems.com>
Signed-off-by: Matt Macy <mmacy@FreeBSD.org>
Closes #10623
This commit is contained in:
Matthew Macy 2020-07-25 20:07:44 -07:00 committed by GitHub
parent f5b189f937
commit 6d8da84106
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 39 additions and 34 deletions

View File

@ -119,9 +119,11 @@ extern "C" {
#define __CONST __sun_attr__((__const__)) #define __CONST __sun_attr__((__const__))
#define __PURE __sun_attr__((__pure__)) #define __PURE __sun_attr__((__pure__))
#if (defined(ZFS_DEBUG) || !defined(NDEBUG))&& !defined(DEBUG) #ifdef INVARIANTS
#define DEBUG #define ZFS_DEBUG
#undef NDEBUG
#endif #endif
#define EXPORT_SYMBOL(x) #define EXPORT_SYMBOL(x)
#define MODULE_AUTHOR(s) #define MODULE_AUTHOR(s)
#define MODULE_DESCRIPTION(s) #define MODULE_DESCRIPTION(s)
@ -134,6 +136,9 @@ extern "C" {
#ifdef ZFS_DEBUG #ifdef ZFS_DEBUG
#undef NDEBUG #undef NDEBUG
#endif #endif
#if !defined(ZFS_DEBUG) && !defined(NDEBUG)
#define NDEBUG
#endif
#ifndef EINTEGRITY #ifndef EINTEGRITY
#define EINTEGRITY 97 /* EINTEGRITY is new in 13 */ #define EINTEGRITY 97 /* EINTEGRITY is new in 13 */

View File

@ -269,7 +269,7 @@ avl_find(avl_tree_t *tree, const void *value, avl_index_t *where)
diff = tree->avl_compar(value, AVL_NODE2DATA(node, off)); diff = tree->avl_compar(value, AVL_NODE2DATA(node, off));
ASSERT(-1 <= diff && diff <= 1); ASSERT(-1 <= diff && diff <= 1);
if (diff == 0) { if (diff == 0) {
#ifdef DEBUG #ifdef ZFS_DEBUG
if (where != NULL) if (where != NULL)
*where = 0; *where = 0;
#endif #endif
@ -578,7 +578,7 @@ avl_insert_here(
{ {
avl_node_t *node; avl_node_t *node;
int child = direction; /* rely on AVL_BEFORE == 0, AVL_AFTER == 1 */ int child = direction; /* rely on AVL_BEFORE == 0, AVL_AFTER == 1 */
#ifdef DEBUG #ifdef ZFS_DEBUG
int diff; int diff;
#endif #endif
@ -593,7 +593,7 @@ avl_insert_here(
*/ */
node = AVL_DATA2NODE(here, tree->avl_offset); node = AVL_DATA2NODE(here, tree->avl_offset);
#ifdef DEBUG #ifdef ZFS_DEBUG
diff = tree->avl_compar(new_data, here); diff = tree->avl_compar(new_data, here);
ASSERT(-1 <= diff && diff <= 1); ASSERT(-1 <= diff && diff <= 1);
ASSERT(diff != 0); ASSERT(diff != 0);
@ -604,7 +604,7 @@ avl_insert_here(
node = node->avl_child[child]; node = node->avl_child[child];
child = 1 - child; child = 1 - child;
while (node->avl_child[child] != NULL) { while (node->avl_child[child] != NULL) {
#ifdef DEBUG #ifdef ZFS_DEBUG
diff = tree->avl_compar(new_data, diff = tree->avl_compar(new_data,
AVL_NODE2DATA(node, tree->avl_offset)); AVL_NODE2DATA(node, tree->avl_offset));
ASSERT(-1 <= diff && diff <= 1); ASSERT(-1 <= diff && diff <= 1);
@ -613,7 +613,7 @@ avl_insert_here(
#endif #endif
node = node->avl_child[child]; node = node->avl_child[child];
} }
#ifdef DEBUG #ifdef ZFS_DEBUG
diff = tree->avl_compar(new_data, diff = tree->avl_compar(new_data,
AVL_NODE2DATA(node, tree->avl_offset)); AVL_NODE2DATA(node, tree->avl_offset));
ASSERT(-1 <= diff && diff <= 1); ASSERT(-1 <= diff && diff <= 1);

View File

@ -126,7 +126,7 @@ extern "C" {
#if defined(_KERNEL) && !defined(_ASM) #if defined(_KERNEL) && !defined(_ASM)
#if defined(DEBUG) #if defined(ZFS_DEBUG)
#if STACK_ALIGN == 4 #if STACK_ALIGN == 4
#define ASSERT_STACK_ALIGNED() \ #define ASSERT_STACK_ALIGNED() \
{ \ { \

View File

@ -154,7 +154,7 @@ callb_add_common(boolean_t (*func)(void *arg, int code),
cp->c_arg = arg; cp->c_arg = arg;
cp->c_class = (uchar_t)class; cp->c_class = (uchar_t)class;
cp->c_flag |= CALLB_TAKEN; cp->c_flag |= CALLB_TAKEN;
#ifdef DEBUG #ifdef ZFS_DEBUG
if (strlen(name) > CB_MAXNAME) if (strlen(name) > CB_MAXNAME)
cmn_err(CE_WARN, "callb_add: name of callback function '%s' " cmn_err(CE_WARN, "callb_add: name of callback function '%s' "
"too long -- truncated to %d chars", "too long -- truncated to %d chars",
@ -217,7 +217,7 @@ callb_delete(callb_id_t id)
while (*pp != NULL && *pp != me) while (*pp != NULL && *pp != me)
pp = &(*pp)->c_next; pp = &(*pp)->c_next;
#ifdef DEBUG #ifdef ZFS_DEBUG
if (*pp != me) { if (*pp != me) {
cmn_err(CE_WARN, "callb delete bogus entry 0x%p", cmn_err(CE_WARN, "callb delete bogus entry 0x%p",
(void *)me); (void *)me);

View File

@ -174,7 +174,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
if (err != 0) if (err != 0)
return (err); return (err);
#ifdef DEBUG #ifdef ZFS_DEBUG
IMPLY(last_size < PAGE_SIZE, *rahead == 0); IMPLY(last_size < PAGE_SIZE, *rahead == 0);
if (dbp[0]->db_offset != 0 || numbufs > 1) { if (dbp[0]->db_offset != 0 || numbufs > 1) {
for (i = 0; i < numbufs; i++) { for (i = 0; i < numbufs; i++) {
@ -269,7 +269,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
} }
} }
#ifdef DEBUG #ifdef ZFS_DEBUG
/* /*
* Three possibilities: * Three possibilities:
* - last requested page ends at a buffer boundary and , thus, * - last requested page ends at a buffer boundary and , thus,

View File

@ -839,7 +839,7 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, znode_t **xvpp, cred_t *cr)
if (fuid_dirtied) if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx); zfs_fuid_sync(zfsvfs, tx);
#ifdef DEBUG #ifdef ZFS_DEBUG
error = sa_lookup(xzp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), error = sa_lookup(xzp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (parent)); &parent, sizeof (parent));
ASSERT(error == 0 && parent == zp->z_id); ASSERT(error == 0 && parent == zp->z_id);

View File

@ -1271,7 +1271,7 @@ zfs_get_done(zgd_t *zgd, int error)
kmem_free(zgd, sizeof (zgd_t)); kmem_free(zgd, sizeof (zgd_t));
} }
#ifdef DEBUG #ifdef ZFS_DEBUG
static int zil_fault_io = 0; static int zil_fault_io = 0;
#endif #endif
@ -1354,7 +1354,7 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
/* test for truncation needs to be done while range locked */ /* test for truncation needs to be done while range locked */
if (lr->lr_offset >= zp->z_size) if (lr->lr_offset >= zp->z_size)
error = SET_ERROR(ENOENT); error = SET_ERROR(ENOENT);
#ifdef DEBUG #ifdef ZFS_DEBUG
if (zil_fault_io) { if (zil_fault_io) {
error = SET_ERROR(EIO); error = SET_ERROR(EIO);
zil_fault_io = 0; zil_fault_io = 0;

View File

@ -76,7 +76,7 @@ SYSCTL_INT(_debug_sizeof, OID_AUTO, znode, CTLFLAG_RD,
* Define ZNODE_STATS to turn on statistic gathering. By default, it is only * Define ZNODE_STATS to turn on statistic gathering. By default, it is only
* turned on when DEBUG is also defined. * turned on when DEBUG is also defined.
*/ */
#ifdef DEBUG #ifdef ZFS_DEBUG
#define ZNODE_STATS #define ZNODE_STATS
#endif /* DEBUG */ #endif /* DEBUG */

View File

@ -1059,7 +1059,7 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, znode_t **xzpp, cred_t *cr)
int error; int error;
zfs_acl_ids_t acl_ids; zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied; boolean_t fuid_dirtied;
#ifdef DEBUG #ifdef ZFS_DEBUG
uint64_t parent; uint64_t parent;
#endif #endif
@ -1095,7 +1095,7 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, znode_t **xzpp, cred_t *cr)
if (fuid_dirtied) if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx); zfs_fuid_sync(zfsvfs, tx);
#ifdef DEBUG #ifdef ZFS_DEBUG
error = sa_lookup(xzp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), error = sa_lookup(xzp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (parent)); &parent, sizeof (parent));
ASSERT(error == 0 && parent == zp->z_id); ASSERT(error == 0 && parent == zp->z_id);

View File

@ -265,7 +265,7 @@ zfsdev_detach(void)
misc_deregister(&zfs_misc); misc_deregister(&zfs_misc);
} }
#ifdef DEBUG #ifdef ZFS_DEBUG
#define ZFS_DEBUG_STR " (DEBUG mode)" #define ZFS_DEBUG_STR " (DEBUG mode)"
#else #else
#define ZFS_DEBUG_STR "" #define ZFS_DEBUG_STR ""

View File

@ -1060,7 +1060,7 @@ zfs_get_done(zgd_t *zgd, int error)
kmem_free(zgd, sizeof (zgd_t)); kmem_free(zgd, sizeof (zgd_t));
} }
#ifdef DEBUG #ifdef ZFS_DEBUG
static int zil_fault_io = 0; static int zil_fault_io = 0;
#endif #endif
@ -1142,7 +1142,7 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
/* test for truncation needs to be done while range locked */ /* test for truncation needs to be done while range locked */
if (lr->lr_offset >= zp->z_size) if (lr->lr_offset >= zp->z_size)
error = SET_ERROR(ENOENT); error = SET_ERROR(ENOENT);
#ifdef DEBUG #ifdef ZFS_DEBUG
if (zil_fault_io) { if (zil_fault_io) {
error = SET_ERROR(EIO); error = SET_ERROR(EIO);
zil_fault_io = 0; zil_fault_io = 0;

View File

@ -1989,7 +1989,7 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
* objects may be dirtied in syncing context, but only if they * objects may be dirtied in syncing context, but only if they
* were already pre-dirtied in open context. * were already pre-dirtied in open context.
*/ */
#ifdef DEBUG #ifdef ZFS_DEBUG
if (dn->dn_objset->os_dsl_dataset != NULL) { if (dn->dn_objset->os_dsl_dataset != NULL) {
rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
RW_READER, FTAG); RW_READER, FTAG);
@ -2062,7 +2062,7 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
*/ */
os = dn->dn_objset; os = dn->dn_objset;
VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa)); VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
#ifdef DEBUG #ifdef ZFS_DEBUG
if (dn->dn_objset->os_dsl_dataset != NULL) if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG); rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||

View File

@ -91,7 +91,7 @@ dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
if (child == NULL) if (child == NULL)
continue; continue;
#ifdef DEBUG #ifdef ZFS_DEBUG
DB_DNODE_ENTER(child); DB_DNODE_ENTER(child);
ASSERT3P(DB_DNODE(child), ==, dn); ASSERT3P(DB_DNODE(child), ==, dn);
DB_DNODE_EXIT(child); DB_DNODE_EXIT(child);
@ -462,7 +462,7 @@ dnode_evict_dbufs(dnode_t *dn)
mutex_enter(&dn->dn_dbufs_mtx); mutex_enter(&dn->dn_dbufs_mtx);
for (db = avl_first(&dn->dn_dbufs); db != NULL; db = db_next) { for (db = avl_first(&dn->dn_dbufs); db != NULL; db = db_next) {
#ifdef DEBUG #ifdef ZFS_DEBUG
DB_DNODE_ENTER(db); DB_DNODE_ENTER(db);
ASSERT3P(DB_DNODE(db), ==, dn); ASSERT3P(DB_DNODE(db), ==, dn);
DB_DNODE_EXIT(db); DB_DNODE_EXIT(db);

View File

@ -1553,7 +1553,7 @@ dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
ASSERT(used > 0 || ASSERT(used > 0 ||
dsl_dir_phys(dd)->dd_used_breakdown[type] >= -used); dsl_dir_phys(dd)->dd_used_breakdown[type] >= -used);
dsl_dir_phys(dd)->dd_used_breakdown[type] += used; dsl_dir_phys(dd)->dd_used_breakdown[type] += used;
#ifdef DEBUG #ifdef ZFS_DEBUG
{ {
dd_used_t t; dd_used_t t;
uint64_t u = 0; uint64_t u = 0;

View File

@ -4401,7 +4401,7 @@ metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
*/ */
if (zal->zal_size == metaslab_trace_max_entries) { if (zal->zal_size == metaslab_trace_max_entries) {
metaslab_alloc_trace_t *mat_next; metaslab_alloc_trace_t *mat_next;
#ifdef DEBUG #ifdef ZFS_DEBUG
panic("too many entries in allocation list"); panic("too many entries in allocation list");
#endif #endif
METASLABSTAT_BUMP(metaslabstat_trace_over_limit); METASLABSTAT_BUMP(metaslabstat_trace_over_limit);

View File

@ -33,7 +33,7 @@ int zfs_multilist_num_sublists = 0;
* Given the object contained on the list, return a pointer to the * Given the object contained on the list, return a pointer to the
* object's multilist_node_t structure it contains. * object's multilist_node_t structure it contains.
*/ */
#ifdef DEBUG #ifdef ZFS_DEBUG
static multilist_node_t * static multilist_node_t *
multilist_d2l(multilist_t *ml, void *obj) multilist_d2l(multilist_t *ml, void *obj)
{ {

View File

@ -164,7 +164,7 @@ static void
rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, void *tag) rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, void *tag)
{ {
mutex_enter(&rrl->rr_lock); mutex_enter(&rrl->rr_lock);
#if !defined(DEBUG) && defined(_KERNEL) #if !defined(ZFS_DEBUG) && defined(_KERNEL)
if (rrl->rr_writer == NULL && !rrl->rr_writer_wanted && if (rrl->rr_writer == NULL && !rrl->rr_writer_wanted &&
!rrl->rr_track_all) { !rrl->rr_track_all) {
rrl->rr_anon_rcount.rc_count++; rrl->rr_anon_rcount.rc_count++;
@ -241,7 +241,7 @@ void
rrw_exit(rrwlock_t *rrl, void *tag) rrw_exit(rrwlock_t *rrl, void *tag)
{ {
mutex_enter(&rrl->rr_lock); mutex_enter(&rrl->rr_lock);
#if !defined(DEBUG) && defined(_KERNEL) #if !defined(ZFS_DEBUG) && defined(_KERNEL)
if (!rrl->rr_writer && rrl->rr_linked_rcount.rc_count == 0) { if (!rrl->rr_writer && rrl->rr_linked_rcount.rc_count == 0) {
rrl->rr_anon_rcount.rc_count--; rrl->rr_anon_rcount.rc_count--;
if (rrl->rr_anon_rcount.rc_count == 0) if (rrl->rr_anon_rcount.rc_count == 0)

View File

@ -675,7 +675,7 @@ space_map_write_impl(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
space_map_write_intro_debug(sm, maptype, tx); space_map_write_intro_debug(sm, maptype, tx);
#ifdef DEBUG #ifdef ZFS_DEBUG
/* /*
* We do this right after we write the intro debug entry * We do this right after we write the intro debug entry
* because the estimate does not take it into account. * because the estimate does not take it into account.
@ -736,7 +736,7 @@ space_map_write_impl(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
dmu_buf_rele(db, FTAG); dmu_buf_rele(db, FTAG);
#ifdef DEBUG #ifdef ZFS_DEBUG
/* /*
* We expect our estimation to be based on the worst case * We expect our estimation to be based on the worst case
* scenario [see comment in space_map_estimate_optimal_size()]. * scenario [see comment in space_map_estimate_optimal_size()].

View File

@ -4806,7 +4806,7 @@ zfs_allow_log_destroy(void *arg)
kmem_strfree(poolname); kmem_strfree(poolname);
} }
#ifdef DEBUG #ifdef ZFS_DEBUG
static boolean_t zfs_ioc_recv_inject_err; static boolean_t zfs_ioc_recv_inject_err;
#endif #endif
@ -5019,7 +5019,7 @@ zfs_ioc_recv_impl(char *tofs, char *tosnap, char *origin, nvlist_t *recvprops,
} }
*read_bytes = off - noff; *read_bytes = off - noff;
#ifdef DEBUG #ifdef ZFS_DEBUG
if (zfs_ioc_recv_inject_err) { if (zfs_ioc_recv_inject_err) {
zfs_ioc_recv_inject_err = B_FALSE; zfs_ioc_recv_inject_err = B_FALSE;
error = 1; error = 1;