diff --git a/include/os/freebsd/spl/sys/sysmacros.h b/include/os/freebsd/spl/sys/sysmacros.h index 2d0164cb12..7e3ab89155 100644 --- a/include/os/freebsd/spl/sys/sysmacros.h +++ b/include/os/freebsd/spl/sys/sysmacros.h @@ -80,6 +80,7 @@ extern "C" { #define kpreempt_disable() critical_enter() #define kpreempt_enable() critical_exit() #define CPU_SEQID curcpu +#define CPU_SEQID_UNSTABLE curcpu #define is_system_labeled() 0 /* * Convert a single byte to/from binary-coded decimal (BCD). diff --git a/include/os/linux/spl/sys/sysmacros.h b/include/os/linux/spl/sys/sysmacros.h index eb3494bc79..98d1ab1d7f 100644 --- a/include/os/linux/spl/sys/sysmacros.h +++ b/include/os/linux/spl/sys/sysmacros.h @@ -76,6 +76,7 @@ #define max_ncpus num_possible_cpus() #define boot_ncpus num_online_cpus() #define CPU_SEQID smp_processor_id() +#define CPU_SEQID_UNSTABLE raw_smp_processor_id() #define is_system_labeled() 0 #ifndef RLIM64_INFINITY diff --git a/include/sys/zfs_context.h b/include/sys/zfs_context.h index 9f637036ee..ee3216d676 100644 --- a/include/sys/zfs_context.h +++ b/include/sys/zfs_context.h @@ -626,6 +626,7 @@ extern void delay(clock_t ticks); #define defclsyspri 0 #define CPU_SEQID ((uintptr_t)pthread_self() & (max_ncpus - 1)) +#define CPU_SEQID_UNSTABLE CPU_SEQID #define kcred NULL #define CRED() NULL diff --git a/module/icp/core/kcf_sched.c b/module/icp/core/kcf_sched.c index 40d50553d6..81fd15f8ea 100644 --- a/module/icp/core/kcf_sched.c +++ b/module/icp/core/kcf_sched.c @@ -1308,9 +1308,7 @@ kcf_reqid_insert(kcf_areq_node_t *areq) kcf_areq_node_t *headp; kcf_reqid_table_t *rt; - kpreempt_disable(); - rt = kcf_reqid_table[CPU_SEQID & REQID_TABLE_MASK]; - kpreempt_enable(); + rt = kcf_reqid_table[CPU_SEQID_UNSTABLE & REQID_TABLE_MASK]; mutex_enter(&rt->rt_lock); diff --git a/module/zfs/aggsum.c b/module/zfs/aggsum.c index a2fec27744..e38f4a66cc 100644 --- a/module/zfs/aggsum.c +++ b/module/zfs/aggsum.c @@ -167,9 +167,7 @@ aggsum_add(aggsum_t *as, int64_t delta) struct aggsum_bucket *asb; int64_t borrow; - kpreempt_disable(); - asb = &as->as_buckets[CPU_SEQID % as->as_numbuckets]; - kpreempt_enable(); + asb = &as->as_buckets[CPU_SEQID_UNSTABLE % as->as_numbuckets]; /* Try fast path if we already borrowed enough before. */ mutex_enter(&asb->asc_lock); diff --git a/module/zfs/dmu_object.c b/module/zfs/dmu_object.c index 453a2842ce..12cdbd68b1 100644 --- a/module/zfs/dmu_object.c +++ b/module/zfs/dmu_object.c @@ -58,10 +58,8 @@ dmu_object_alloc_impl(objset_t *os, dmu_object_type_t ot, int blocksize, int dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift; int error; - kpreempt_disable(); - cpuobj = &os->os_obj_next_percpu[CPU_SEQID % + cpuobj = &os->os_obj_next_percpu[CPU_SEQID_UNSTABLE % os->os_obj_next_percpu_len]; - kpreempt_enable(); if (dn_slots == 0) { dn_slots = DNODE_MIN_SLOTS; diff --git a/module/zfs/txg.c b/module/zfs/txg.c index 65375b579d..420244abb6 100644 --- a/module/zfs/txg.c +++ b/module/zfs/txg.c @@ -305,9 +305,7 @@ txg_hold_open(dsl_pool_t *dp, txg_handle_t *th) * significance to the chosen tx_cpu. Because.. Why not use * the current cpu to index into the array? */ - kpreempt_disable(); - tc = &tx->tx_cpu[CPU_SEQID]; - kpreempt_enable(); + tc = &tx->tx_cpu[CPU_SEQID_UNSTABLE]; mutex_enter(&tc->tc_open_lock); txg = tx->tx_open_txg; diff --git a/module/zfs/zio.c b/module/zfs/zio.c index 260e88b0be..55c2f1ea1e 100644 --- a/module/zfs/zio.c +++ b/module/zfs/zio.c @@ -2246,9 +2246,7 @@ zio_nowait(zio_t *zio) * will ensure they complete prior to unloading the pool. */ spa_t *spa = zio->io_spa; - kpreempt_disable(); - pio = spa->spa_async_zio_root[CPU_SEQID]; - kpreempt_enable(); + pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE]; zio_add_child(pio, zio); }