Introduce CPU_SEQID_UNSTABLE
Current CPU_SEQID users don't care about possibly changing CPU ID, but enclose it within kpreempt disable/enable in order to fend off warnings from Linux's CONFIG_DEBUG_PREEMPT. There is no need to do it. The expected way to get CPU ID while allowing for migration is to use raw_smp_processor_id. In order to make this future-proof this patch keeps CPU_SEQID as is and introduces CPU_SEQID_UNSTABLE instead, to make it clear that consumers explicitly want this behavior. Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed-by: Ryan Moeller <ryan@iXsystems.com> Reviewed-by: Matt Macy <mmacy@FreeBSD.org> Signed-off-by: Mateusz Guzik <mjguzik@gmail.com> Closes #11142
This commit is contained in:
parent
8583540c6e
commit
09eb36ce3d
|
@ -80,6 +80,7 @@ extern "C" {
|
|||
#define kpreempt_disable() critical_enter()
|
||||
#define kpreempt_enable() critical_exit()
|
||||
#define CPU_SEQID curcpu
|
||||
#define CPU_SEQID_UNSTABLE curcpu
|
||||
#define is_system_labeled() 0
|
||||
/*
|
||||
* Convert a single byte to/from binary-coded decimal (BCD).
|
||||
|
|
|
@ -76,6 +76,7 @@
|
|||
#define max_ncpus num_possible_cpus()
|
||||
#define boot_ncpus num_online_cpus()
|
||||
#define CPU_SEQID smp_processor_id()
|
||||
#define CPU_SEQID_UNSTABLE raw_smp_processor_id()
|
||||
#define is_system_labeled() 0
|
||||
|
||||
#ifndef RLIM64_INFINITY
|
||||
|
|
|
@ -626,6 +626,7 @@ extern void delay(clock_t ticks);
|
|||
#define defclsyspri 0
|
||||
|
||||
#define CPU_SEQID ((uintptr_t)pthread_self() & (max_ncpus - 1))
|
||||
#define CPU_SEQID_UNSTABLE CPU_SEQID
|
||||
|
||||
#define kcred NULL
|
||||
#define CRED() NULL
|
||||
|
|
|
@ -1308,9 +1308,7 @@ kcf_reqid_insert(kcf_areq_node_t *areq)
|
|||
kcf_areq_node_t *headp;
|
||||
kcf_reqid_table_t *rt;
|
||||
|
||||
kpreempt_disable();
|
||||
rt = kcf_reqid_table[CPU_SEQID & REQID_TABLE_MASK];
|
||||
kpreempt_enable();
|
||||
rt = kcf_reqid_table[CPU_SEQID_UNSTABLE & REQID_TABLE_MASK];
|
||||
|
||||
mutex_enter(&rt->rt_lock);
|
||||
|
||||
|
|
|
@ -167,9 +167,7 @@ aggsum_add(aggsum_t *as, int64_t delta)
|
|||
struct aggsum_bucket *asb;
|
||||
int64_t borrow;
|
||||
|
||||
kpreempt_disable();
|
||||
asb = &as->as_buckets[CPU_SEQID % as->as_numbuckets];
|
||||
kpreempt_enable();
|
||||
asb = &as->as_buckets[CPU_SEQID_UNSTABLE % as->as_numbuckets];
|
||||
|
||||
/* Try fast path if we already borrowed enough before. */
|
||||
mutex_enter(&asb->asc_lock);
|
||||
|
|
|
@ -58,10 +58,8 @@ dmu_object_alloc_impl(objset_t *os, dmu_object_type_t ot, int blocksize,
|
|||
int dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift;
|
||||
int error;
|
||||
|
||||
kpreempt_disable();
|
||||
cpuobj = &os->os_obj_next_percpu[CPU_SEQID %
|
||||
cpuobj = &os->os_obj_next_percpu[CPU_SEQID_UNSTABLE %
|
||||
os->os_obj_next_percpu_len];
|
||||
kpreempt_enable();
|
||||
|
||||
if (dn_slots == 0) {
|
||||
dn_slots = DNODE_MIN_SLOTS;
|
||||
|
|
|
@ -305,9 +305,7 @@ txg_hold_open(dsl_pool_t *dp, txg_handle_t *th)
|
|||
* significance to the chosen tx_cpu. Because.. Why not use
|
||||
* the current cpu to index into the array?
|
||||
*/
|
||||
kpreempt_disable();
|
||||
tc = &tx->tx_cpu[CPU_SEQID];
|
||||
kpreempt_enable();
|
||||
tc = &tx->tx_cpu[CPU_SEQID_UNSTABLE];
|
||||
|
||||
mutex_enter(&tc->tc_open_lock);
|
||||
txg = tx->tx_open_txg;
|
||||
|
|
|
@ -2246,9 +2246,7 @@ zio_nowait(zio_t *zio)
|
|||
* will ensure they complete prior to unloading the pool.
|
||||
*/
|
||||
spa_t *spa = zio->io_spa;
|
||||
kpreempt_disable();
|
||||
pio = spa->spa_async_zio_root[CPU_SEQID];
|
||||
kpreempt_enable();
|
||||
pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE];
|
||||
|
||||
zio_add_child(pio, zio);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue