OpenZFS 8199 - multi-threaded dmu_object_alloc()
dmu_object_alloc() is single-threaded, so when multiple threads are creating files in a single filesystem, they spend a lot of time waiting for the os_obj_lock. To improve performance of multi-threaded file creation, we must make dmu_object_alloc() typically not grab any filesystem-wide locks. The solution is to have a "next object to allocate" for each CPU. Each of these "next object"s is in a different block of the dnode object, so that concurrent allocation holds dnodes in different dbufs. When a thread's "next object" reaches the end of a chunk of objects (by default 4 blocks worth -- 128 dnodes), it will be reset to the per-objset os_obj_next, which will be increased by a chunk of objects (128). Only when manipulating the os_obj_next will we need to grab the os_obj_lock. This decreases lock contention dramatically, because each thread only needs to grab the os_obj_lock briefly, once per 128 allocations. This results in a 70% performance improvement to multi-threaded object creation (where each thread is creating objects in its own directory), from 67,000/sec to 115,000/sec, with 8 CPUs. Work sponsored by Intel Corp. Authored by: Matthew Ahrens <mahrens@delphix.com> Reviewed-by: Ned Bass <bass6@llnl.gov> Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Ported-by: Matthew Ahrens <mahrens@delphix.com> Signed-off-by: Matthew Ahrens <mahrens@delphix.com> OpenZFS-issue: https://www.illumos.org/issues/8199 OpenZFS-commit: https://github.com/openzfs/openzfs/pull/374 Closes #4703 Closes #6117
This commit is contained in:
parent
1b7c1e5ce9
commit
dbeb879699
|
@ -120,7 +120,11 @@ struct objset {
|
||||||
|
|
||||||
/* Protected by os_obj_lock */
|
/* Protected by os_obj_lock */
|
||||||
kmutex_t os_obj_lock;
|
kmutex_t os_obj_lock;
|
||||||
uint64_t os_obj_next;
|
uint64_t os_obj_next_chunk;
|
||||||
|
|
||||||
|
/* Per-CPU next object to allocate, protected by atomic ops. */
|
||||||
|
uint64_t *os_obj_next_percpu;
|
||||||
|
int os_obj_next_percpu_len;
|
||||||
|
|
||||||
/* Protected by os_lock */
|
/* Protected by os_lock */
|
||||||
kmutex_t os_lock;
|
kmutex_t os_lock;
|
||||||
|
|
|
@ -32,6 +32,15 @@
|
||||||
#include <sys/zfeature.h>
|
#include <sys/zfeature.h>
|
||||||
#include <sys/dsl_dataset.h>
|
#include <sys/dsl_dataset.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Each of the concurrent object allocators will grab
|
||||||
|
* 2^dmu_object_alloc_chunk_shift dnode slots at a time. The default is to
|
||||||
|
* grab 128 slots, which is 4 blocks worth. This was experimentally
|
||||||
|
* determined to be the lowest value that eliminates the measurable effect
|
||||||
|
* of lock contention from this code path.
|
||||||
|
*/
|
||||||
|
int dmu_object_alloc_chunk_shift = 7;
|
||||||
|
|
||||||
uint64_t
|
uint64_t
|
||||||
dmu_object_alloc(objset_t *os, dmu_object_type_t ot, int blocksize,
|
dmu_object_alloc(objset_t *os, dmu_object_type_t ot, int blocksize,
|
||||||
dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
|
dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
|
||||||
|
@ -50,6 +59,9 @@ dmu_object_alloc_dnsize(objset_t *os, dmu_object_type_t ot, int blocksize,
|
||||||
dnode_t *dn = NULL;
|
dnode_t *dn = NULL;
|
||||||
int dn_slots = dnodesize >> DNODE_SHIFT;
|
int dn_slots = dnodesize >> DNODE_SHIFT;
|
||||||
boolean_t restarted = B_FALSE;
|
boolean_t restarted = B_FALSE;
|
||||||
|
uint64_t *cpuobj = &os->os_obj_next_percpu[CPU_SEQID %
|
||||||
|
os->os_obj_next_percpu_len];
|
||||||
|
int dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift;
|
||||||
|
|
||||||
if (dn_slots == 0) {
|
if (dn_slots == 0) {
|
||||||
dn_slots = DNODE_MIN_SLOTS;
|
dn_slots = DNODE_MIN_SLOTS;
|
||||||
|
@ -58,54 +70,88 @@ dmu_object_alloc_dnsize(objset_t *os, dmu_object_type_t ot, int blocksize,
|
||||||
ASSERT3S(dn_slots, <=, DNODE_MAX_SLOTS);
|
ASSERT3S(dn_slots, <=, DNODE_MAX_SLOTS);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_enter(&os->os_obj_lock);
|
/*
|
||||||
|
* The "chunk" of dnodes that is assigned to a CPU-specific
|
||||||
|
* allocator needs to be at least one block's worth, to avoid
|
||||||
|
* lock contention on the dbuf. It can be at most one L1 block's
|
||||||
|
* worth, so that the "rescan after polishing off a L1's worth"
|
||||||
|
* logic below will be sure to kick in.
|
||||||
|
*/
|
||||||
|
if (dnodes_per_chunk < DNODES_PER_BLOCK)
|
||||||
|
dnodes_per_chunk = DNODES_PER_BLOCK;
|
||||||
|
if (dnodes_per_chunk > L1_dnode_count)
|
||||||
|
dnodes_per_chunk = L1_dnode_count;
|
||||||
|
|
||||||
|
object = *cpuobj;
|
||||||
for (;;) {
|
for (;;) {
|
||||||
object = os->os_obj_next;
|
|
||||||
/*
|
/*
|
||||||
* Each time we polish off a L1 bp worth of dnodes (2^12
|
* If we finished a chunk of dnodes, get a new one from
|
||||||
* objects), move to another L1 bp that's still
|
* the global allocator.
|
||||||
* reasonably sparse (at most 1/4 full). Look from the
|
|
||||||
* beginning at most once per txg. If we still can't
|
|
||||||
* allocate from that L1 block, search for an empty L0
|
|
||||||
* block, which will quickly skip to the end of the
|
|
||||||
* metadnode if the no nearby L0 blocks are empty. This
|
|
||||||
* fallback avoids a pathology where full dnode blocks
|
|
||||||
* containing large dnodes appear sparse because they
|
|
||||||
* have a low blk_fill, leading to many failed
|
|
||||||
* allocation attempts. In the long term a better
|
|
||||||
* mechanism to search for sparse metadnode regions,
|
|
||||||
* such as spacemaps, could be implemented.
|
|
||||||
*
|
|
||||||
* os_scan_dnodes is set during txg sync if enough objects
|
|
||||||
* have been freed since the previous rescan to justify
|
|
||||||
* backfilling again.
|
|
||||||
*
|
|
||||||
* Note that dmu_traverse depends on the behavior that we use
|
|
||||||
* multiple blocks of the dnode object before going back to
|
|
||||||
* reuse objects. Any change to this algorithm should preserve
|
|
||||||
* that property or find another solution to the issues
|
|
||||||
* described in traverse_visitbp.
|
|
||||||
*/
|
*/
|
||||||
if (P2PHASE(object, L1_dnode_count) == 0) {
|
if (P2PHASE(object, dnodes_per_chunk) == 0) {
|
||||||
uint64_t offset;
|
mutex_enter(&os->os_obj_lock);
|
||||||
uint64_t blkfill;
|
ASSERT0(P2PHASE(os->os_obj_next_chunk,
|
||||||
int minlvl;
|
dnodes_per_chunk));
|
||||||
int error;
|
object = os->os_obj_next_chunk;
|
||||||
if (os->os_rescan_dnodes) {
|
|
||||||
offset = 0;
|
/*
|
||||||
os->os_rescan_dnodes = B_FALSE;
|
* Each time we polish off a L1 bp worth of dnodes
|
||||||
} else {
|
* (2^12 objects), move to another L1 bp that's
|
||||||
offset = object << DNODE_SHIFT;
|
* still reasonably sparse (at most 1/4 full). Look
|
||||||
|
* from the beginning at most once per txg. If we
|
||||||
|
* still can't allocate from that L1 block, search
|
||||||
|
* for an empty L0 block, which will quickly skip
|
||||||
|
* to the end of the metadnode if no nearby L0
|
||||||
|
* blocks are empty. This fallback avoids a
|
||||||
|
* pathology where full dnode blocks containing
|
||||||
|
* large dnodes appear sparse because they have a
|
||||||
|
* low blk_fill, leading to many failed allocation
|
||||||
|
* attempts. In the long term a better mechanism to
|
||||||
|
* search for sparse metadnode regions, such as
|
||||||
|
* spacemaps, could be implemented.
|
||||||
|
*
|
||||||
|
* os_scan_dnodes is set during txg sync if enough
|
||||||
|
* objects have been freed since the previous
|
||||||
|
* rescan to justify backfilling again.
|
||||||
|
*
|
||||||
|
* Note that dmu_traverse depends on the behavior
|
||||||
|
* that we use multiple blocks of the dnode object
|
||||||
|
* before going back to reuse objects. Any change
|
||||||
|
* to this algorithm should preserve that property
|
||||||
|
* or find another solution to the issues described
|
||||||
|
* in traverse_visitbp.
|
||||||
|
*/
|
||||||
|
if (P2PHASE(object, L1_dnode_count) == 0) {
|
||||||
|
uint64_t offset;
|
||||||
|
uint64_t blkfill;
|
||||||
|
int minlvl;
|
||||||
|
int error;
|
||||||
|
if (os->os_rescan_dnodes) {
|
||||||
|
offset = 0;
|
||||||
|
os->os_rescan_dnodes = B_FALSE;
|
||||||
|
} else {
|
||||||
|
offset = object << DNODE_SHIFT;
|
||||||
|
}
|
||||||
|
blkfill = restarted ? 1 : DNODES_PER_BLOCK >> 2;
|
||||||
|
minlvl = restarted ? 1 : 2;
|
||||||
|
restarted = B_TRUE;
|
||||||
|
error = dnode_next_offset(DMU_META_DNODE(os),
|
||||||
|
DNODE_FIND_HOLE, &offset, minlvl,
|
||||||
|
blkfill, 0);
|
||||||
|
if (error == 0) {
|
||||||
|
object = offset >> DNODE_SHIFT;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
blkfill = restarted ? 1 : DNODES_PER_BLOCK >> 2;
|
/*
|
||||||
minlvl = restarted ? 1 : 2;
|
* Note: if "restarted", we may find a L0 that
|
||||||
restarted = B_TRUE;
|
* is not suitably aligned.
|
||||||
error = dnode_next_offset(DMU_META_DNODE(os),
|
*/
|
||||||
DNODE_FIND_HOLE, &offset, minlvl, blkfill, 0);
|
os->os_obj_next_chunk =
|
||||||
if (error == 0)
|
P2ALIGN(object, dnodes_per_chunk) +
|
||||||
object = offset >> DNODE_SHIFT;
|
dnodes_per_chunk;
|
||||||
|
(void) atomic_swap_64(cpuobj, object);
|
||||||
|
mutex_exit(&os->os_obj_lock);
|
||||||
}
|
}
|
||||||
os->os_obj_next = object + dn_slots;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX We should check for an i/o error here and return
|
* XXX We should check for an i/o error here and return
|
||||||
|
@ -113,28 +159,38 @@ dmu_object_alloc_dnsize(objset_t *os, dmu_object_type_t ot, int blocksize,
|
||||||
* dmu_tx_assign(), but there is currently no mechanism
|
* dmu_tx_assign(), but there is currently no mechanism
|
||||||
* to do so.
|
* to do so.
|
||||||
*/
|
*/
|
||||||
(void) dnode_hold_impl(os, object, DNODE_MUST_BE_FREE, dn_slots,
|
(void) dnode_hold_impl(os, object, DNODE_MUST_BE_FREE,
|
||||||
FTAG, &dn);
|
dn_slots, FTAG, &dn);
|
||||||
if (dn)
|
if (dn != NULL) {
|
||||||
break;
|
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
|
||||||
|
|
||||||
if (dmu_object_next(os, &object, B_TRUE, 0) == 0)
|
|
||||||
os->os_obj_next = object;
|
|
||||||
else
|
|
||||||
/*
|
/*
|
||||||
* Skip to next known valid starting point for a dnode.
|
* Another thread could have allocated it; check
|
||||||
|
* again now that we have the struct lock.
|
||||||
*/
|
*/
|
||||||
os->os_obj_next = P2ROUNDUP(object + 1,
|
if (dn->dn_type == DMU_OT_NONE) {
|
||||||
DNODES_PER_BLOCK);
|
dnode_allocate(dn, ot, blocksize, 0,
|
||||||
|
bonustype, bonuslen, dn_slots, tx);
|
||||||
|
rw_exit(&dn->dn_struct_rwlock);
|
||||||
|
dmu_tx_add_new_object(tx, dn);
|
||||||
|
dnode_rele(dn, FTAG);
|
||||||
|
|
||||||
|
(void) atomic_swap_64(cpuobj,
|
||||||
|
object + dn_slots);
|
||||||
|
return (object);
|
||||||
|
}
|
||||||
|
rw_exit(&dn->dn_struct_rwlock);
|
||||||
|
dnode_rele(dn, FTAG);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dmu_object_next(os, &object, B_TRUE, 0) != 0) {
|
||||||
|
/*
|
||||||
|
* Skip to next known valid starting point for a
|
||||||
|
* dnode.
|
||||||
|
*/
|
||||||
|
object = P2ROUNDUP(object + 1, DNODES_PER_BLOCK);
|
||||||
|
}
|
||||||
|
(void) atomic_swap_64(cpuobj, object);
|
||||||
}
|
}
|
||||||
|
|
||||||
dnode_allocate(dn, ot, blocksize, 0, bonustype, bonuslen, dn_slots, tx);
|
|
||||||
mutex_exit(&os->os_obj_lock);
|
|
||||||
|
|
||||||
dmu_tx_add_new_object(tx, dn);
|
|
||||||
dnode_rele(dn, FTAG);
|
|
||||||
|
|
||||||
return (object);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
|
@ -341,4 +397,10 @@ EXPORT_SYMBOL(dmu_object_free);
|
||||||
EXPORT_SYMBOL(dmu_object_next);
|
EXPORT_SYMBOL(dmu_object_next);
|
||||||
EXPORT_SYMBOL(dmu_object_zapify);
|
EXPORT_SYMBOL(dmu_object_zapify);
|
||||||
EXPORT_SYMBOL(dmu_object_free_zapified);
|
EXPORT_SYMBOL(dmu_object_free_zapified);
|
||||||
|
|
||||||
|
/* BEGIN CSTYLED */
|
||||||
|
module_param(dmu_object_alloc_chunk_shift, int, 0644);
|
||||||
|
MODULE_PARM_DESC(dmu_object_alloc_chunk_shift,
|
||||||
|
"CPU-specific allocator grabs 2^N objects at once");
|
||||||
|
/* END CSTYLED */
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -547,6 +547,9 @@ dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
|
||||||
mutex_init(&os->os_userused_lock, NULL, MUTEX_DEFAULT, NULL);
|
mutex_init(&os->os_userused_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||||
mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL);
|
mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||||
mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL);
|
mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||||
|
os->os_obj_next_percpu_len = boot_ncpus;
|
||||||
|
os->os_obj_next_percpu = kmem_zalloc(os->os_obj_next_percpu_len *
|
||||||
|
sizeof (os->os_obj_next_percpu[0]), KM_SLEEP);
|
||||||
|
|
||||||
dnode_special_open(os, &os->os_phys->os_meta_dnode,
|
dnode_special_open(os, &os->os_phys->os_meta_dnode,
|
||||||
DMU_META_DNODE_OBJECT, &os->os_meta_dnode);
|
DMU_META_DNODE_OBJECT, &os->os_meta_dnode);
|
||||||
|
@ -842,6 +845,9 @@ dmu_objset_evict_done(objset_t *os)
|
||||||
rw_enter(&os_lock, RW_READER);
|
rw_enter(&os_lock, RW_READER);
|
||||||
rw_exit(&os_lock);
|
rw_exit(&os_lock);
|
||||||
|
|
||||||
|
kmem_free(os->os_obj_next_percpu,
|
||||||
|
os->os_obj_next_percpu_len * sizeof (os->os_obj_next_percpu[0]));
|
||||||
|
|
||||||
mutex_destroy(&os->os_lock);
|
mutex_destroy(&os->os_lock);
|
||||||
mutex_destroy(&os->os_userused_lock);
|
mutex_destroy(&os->os_userused_lock);
|
||||||
mutex_destroy(&os->os_obj_lock);
|
mutex_destroy(&os->os_obj_lock);
|
||||||
|
|
|
@ -1779,14 +1779,6 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
|
||||||
DMU_OT_NONE, 0, tx);
|
DMU_OT_NONE, 0, tx);
|
||||||
ASSERT(error == 0);
|
ASSERT(error == 0);
|
||||||
|
|
||||||
/*
|
|
||||||
* Give dmu_object_alloc() a hint about where to start
|
|
||||||
* allocating new objects. Otherwise, since the metadnode's
|
|
||||||
* dnode_phys_t structure isn't initialized yet, dmu_object_next()
|
|
||||||
* would fail and we'd have to skip to the next dnode block.
|
|
||||||
*/
|
|
||||||
os->os_obj_next = moid + 1;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set starting attributes.
|
* Set starting attributes.
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Reference in New Issue