ZAP: Some cleanups/micro-optimizations
- Remove custom zap_memset(), use regular memset(). - Use PANIC() instead of opaque cmn_err(CE_PANIC). - Provide entry parameter to zap_leaf_rehash_entry(). - Reduce branching in zap_leaf_array_create() inner loop. - Remove signedness where it should not be. Should be no function changes. Reviewed-by: Brian Atkinson <batkinson@lanl.gov> Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Alexander Motin <mav@FreeBSD.org> Sponsored by: iXsystems, Inc. Closes #15976
This commit is contained in:
parent
f1b368359b
commit
c28f94f32e
|
@ -47,7 +47,7 @@ struct zap_stats;
|
|||
* entries - header space (2*chunksize)
|
||||
*/
|
||||
#define ZAP_LEAF_NUMCHUNKS_BS(bs) \
|
||||
(((1<<(bs)) - 2*ZAP_LEAF_HASH_NUMENTRIES_BS(bs)) / \
|
||||
(((1U << (bs)) - 2 * ZAP_LEAF_HASH_NUMENTRIES_BS(bs)) / \
|
||||
ZAP_LEAF_CHUNKSIZE - 2)
|
||||
|
||||
#define ZAP_LEAF_NUMCHUNKS(l) (ZAP_LEAF_NUMCHUNKS_BS(((l)->l_bs)))
|
||||
|
@ -80,7 +80,7 @@ struct zap_stats;
|
|||
* chunks per entry (3).
|
||||
*/
|
||||
#define ZAP_LEAF_HASH_SHIFT_BS(bs) ((bs) - 5)
|
||||
#define ZAP_LEAF_HASH_NUMENTRIES_BS(bs) (1 << ZAP_LEAF_HASH_SHIFT_BS(bs))
|
||||
#define ZAP_LEAF_HASH_NUMENTRIES_BS(bs) (1U << ZAP_LEAF_HASH_SHIFT_BS(bs))
|
||||
#define ZAP_LEAF_HASH_SHIFT(l) (ZAP_LEAF_HASH_SHIFT_BS(((l)->l_bs)))
|
||||
#define ZAP_LEAF_HASH_NUMENTRIES(l) (ZAP_LEAF_HASH_NUMENTRIES_BS(((l)->l_bs)))
|
||||
|
||||
|
@ -163,7 +163,7 @@ typedef struct zap_leaf {
|
|||
dmu_buf_user_t l_dbu;
|
||||
krwlock_t l_rwlock;
|
||||
uint64_t l_blkid; /* 1<<ZAP_BLOCK_SHIFT byte block off */
|
||||
int l_bs; /* block size shift */
|
||||
uint_t l_bs; /* block size shift */
|
||||
dmu_buf_t *l_dbuf;
|
||||
} zap_leaf_t;
|
||||
|
||||
|
@ -243,7 +243,7 @@ extern boolean_t zap_entry_normalization_conflict(zap_entry_handle_t *zeh,
|
|||
*/
|
||||
|
||||
extern void zap_leaf_init(zap_leaf_t *l, boolean_t sort);
|
||||
extern void zap_leaf_byteswap(zap_leaf_phys_t *buf, int len);
|
||||
extern void zap_leaf_byteswap(zap_leaf_phys_t *buf, size_t len);
|
||||
extern void zap_leaf_split(zap_leaf_t *l, zap_leaf_t *nl, boolean_t sort);
|
||||
extern void zap_leaf_stats(struct zap *zap, zap_leaf_t *l,
|
||||
struct zap_stats *zs);
|
||||
|
|
|
@ -41,7 +41,8 @@
|
|||
#include <sys/zap_leaf.h>
|
||||
#include <sys/arc.h>
|
||||
|
||||
static uint16_t *zap_leaf_rehash_entry(zap_leaf_t *l, uint16_t entry);
|
||||
static uint16_t *zap_leaf_rehash_entry(zap_leaf_t *l, struct zap_leaf_entry *le,
|
||||
uint16_t entry);
|
||||
|
||||
#define CHAIN_END 0xffff /* end of the chunk chain */
|
||||
|
||||
|
@ -52,16 +53,6 @@ static uint16_t *zap_leaf_rehash_entry(zap_leaf_t *l, uint16_t entry);
|
|||
|
||||
#define LEAF_HASH_ENTPTR(l, h) (&zap_leaf_phys(l)->l_hash[LEAF_HASH(l, h)])
|
||||
|
||||
static void
|
||||
zap_memset(void *a, int c, size_t n)
|
||||
{
|
||||
char *cp = a;
|
||||
char *cpend = cp + n;
|
||||
|
||||
while (cp < cpend)
|
||||
*cp++ = c;
|
||||
}
|
||||
|
||||
static void
|
||||
stv(int len, void *addr, uint64_t value)
|
||||
{
|
||||
|
@ -79,7 +70,7 @@ stv(int len, void *addr, uint64_t value)
|
|||
*(uint64_t *)addr = value;
|
||||
return;
|
||||
default:
|
||||
cmn_err(CE_PANIC, "bad int len %d", len);
|
||||
PANIC("bad int len %d", len);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -96,13 +87,13 @@ ldv(int len, const void *addr)
|
|||
case 8:
|
||||
return (*(uint64_t *)addr);
|
||||
default:
|
||||
cmn_err(CE_PANIC, "bad int len %d", len);
|
||||
PANIC("bad int len %d", len);
|
||||
}
|
||||
return (0xFEEDFACEDEADBEEFULL);
|
||||
}
|
||||
|
||||
void
|
||||
zap_leaf_byteswap(zap_leaf_phys_t *buf, int size)
|
||||
zap_leaf_byteswap(zap_leaf_phys_t *buf, size_t size)
|
||||
{
|
||||
zap_leaf_t l;
|
||||
dmu_buf_t l_dbuf;
|
||||
|
@ -119,10 +110,10 @@ zap_leaf_byteswap(zap_leaf_phys_t *buf, int size)
|
|||
buf->l_hdr.lh_prefix_len = BSWAP_16(buf->l_hdr.lh_prefix_len);
|
||||
buf->l_hdr.lh_freelist = BSWAP_16(buf->l_hdr.lh_freelist);
|
||||
|
||||
for (int i = 0; i < ZAP_LEAF_HASH_NUMENTRIES(&l); i++)
|
||||
for (uint_t i = 0; i < ZAP_LEAF_HASH_NUMENTRIES(&l); i++)
|
||||
buf->l_hash[i] = BSWAP_16(buf->l_hash[i]);
|
||||
|
||||
for (int i = 0; i < ZAP_LEAF_NUMCHUNKS(&l); i++) {
|
||||
for (uint_t i = 0; i < ZAP_LEAF_NUMCHUNKS(&l); i++) {
|
||||
zap_leaf_chunk_t *lc = &ZAP_LEAF_CHUNK(&l, i);
|
||||
struct zap_leaf_entry *le;
|
||||
|
||||
|
@ -160,11 +151,11 @@ void
|
|||
zap_leaf_init(zap_leaf_t *l, boolean_t sort)
|
||||
{
|
||||
l->l_bs = highbit64(l->l_dbuf->db_size) - 1;
|
||||
zap_memset(&zap_leaf_phys(l)->l_hdr, 0,
|
||||
memset(&zap_leaf_phys(l)->l_hdr, 0,
|
||||
sizeof (struct zap_leaf_header));
|
||||
zap_memset(zap_leaf_phys(l)->l_hash, CHAIN_END,
|
||||
memset(zap_leaf_phys(l)->l_hash, CHAIN_END,
|
||||
2*ZAP_LEAF_HASH_NUMENTRIES(l));
|
||||
for (int i = 0; i < ZAP_LEAF_NUMCHUNKS(l); i++) {
|
||||
for (uint_t i = 0; i < ZAP_LEAF_NUMCHUNKS(l); i++) {
|
||||
ZAP_LEAF_CHUNK(l, i).l_free.lf_type = ZAP_CHUNK_FREE;
|
||||
ZAP_LEAF_CHUNK(l, i).l_free.lf_next = i+1;
|
||||
}
|
||||
|
@ -185,7 +176,7 @@ zap_leaf_chunk_alloc(zap_leaf_t *l)
|
|||
{
|
||||
ASSERT(zap_leaf_phys(l)->l_hdr.lh_nfree > 0);
|
||||
|
||||
int chunk = zap_leaf_phys(l)->l_hdr.lh_freelist;
|
||||
uint_t chunk = zap_leaf_phys(l)->l_hdr.lh_freelist;
|
||||
ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l));
|
||||
ASSERT3U(ZAP_LEAF_CHUNK(l, chunk).l_free.lf_type, ==, ZAP_CHUNK_FREE);
|
||||
|
||||
|
@ -223,28 +214,29 @@ zap_leaf_array_create(zap_leaf_t *l, const char *buf,
|
|||
{
|
||||
uint16_t chunk_head;
|
||||
uint16_t *chunkp = &chunk_head;
|
||||
int byten = 0;
|
||||
int byten = integer_size;
|
||||
uint64_t value = 0;
|
||||
int shift = (integer_size - 1) * 8;
|
||||
int len = num_integers;
|
||||
|
||||
ASSERT3U(num_integers * integer_size, <=, ZAP_MAXVALUELEN);
|
||||
|
||||
if (len > 0)
|
||||
value = ldv(integer_size, buf);
|
||||
while (len > 0) {
|
||||
uint16_t chunk = zap_leaf_chunk_alloc(l);
|
||||
struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, chunk).l_array;
|
||||
|
||||
la->la_type = ZAP_CHUNK_ARRAY;
|
||||
for (int i = 0; i < ZAP_LEAF_ARRAY_BYTES; i++) {
|
||||
if (byten == 0)
|
||||
value = ldv(integer_size, buf);
|
||||
la->la_array[i] = value >> shift;
|
||||
value <<= 8;
|
||||
if (++byten == integer_size) {
|
||||
byten = 0;
|
||||
buf += integer_size;
|
||||
if (--byten == 0) {
|
||||
if (--len == 0)
|
||||
break;
|
||||
byten = integer_size;
|
||||
buf += integer_size;
|
||||
value = ldv(integer_size, buf);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -264,7 +256,7 @@ zap_leaf_array_free(zap_leaf_t *l, uint16_t *chunkp)
|
|||
*chunkp = CHAIN_END;
|
||||
|
||||
while (chunk != CHAIN_END) {
|
||||
int nextchunk = ZAP_LEAF_CHUNK(l, chunk).l_array.la_next;
|
||||
uint_t nextchunk = ZAP_LEAF_CHUNK(l, chunk).l_array.la_next;
|
||||
ASSERT3U(ZAP_LEAF_CHUNK(l, chunk).l_array.la_type, ==,
|
||||
ZAP_CHUNK_ARRAY);
|
||||
zap_leaf_chunk_free(l, chunk);
|
||||
|
@ -333,7 +325,7 @@ zap_leaf_array_read(zap_leaf_t *l, uint16_t chunk,
|
|||
|
||||
static boolean_t
|
||||
zap_leaf_array_match(zap_leaf_t *l, zap_name_t *zn,
|
||||
int chunk, int array_numints)
|
||||
uint_t chunk, int array_numints)
|
||||
{
|
||||
int bseen = 0;
|
||||
|
||||
|
@ -562,7 +554,7 @@ zap_entry_create(zap_leaf_t *l, zap_name_t *zn, uint32_t cd,
|
|||
|
||||
uint64_t valuelen = integer_size * num_integers;
|
||||
|
||||
int numchunks = 1 + ZAP_LEAF_ARRAY_NCHUNKS(zn->zn_key_orig_numints *
|
||||
uint_t numchunks = 1 + ZAP_LEAF_ARRAY_NCHUNKS(zn->zn_key_orig_numints *
|
||||
zn->zn_key_intlen) + ZAP_LEAF_ARRAY_NCHUNKS(valuelen);
|
||||
if (numchunks > ZAP_LEAF_NUMCHUNKS(l))
|
||||
return (SET_ERROR(E2BIG));
|
||||
|
@ -624,7 +616,7 @@ zap_entry_create(zap_leaf_t *l, zap_name_t *zn, uint32_t cd,
|
|||
|
||||
/* link it into the hash chain */
|
||||
/* XXX if we did the search above, we could just use that */
|
||||
uint16_t *chunkp = zap_leaf_rehash_entry(l, chunk);
|
||||
uint16_t *chunkp = zap_leaf_rehash_entry(l, le, chunk);
|
||||
|
||||
zap_leaf_phys(l)->l_hdr.lh_nentries++;
|
||||
|
||||
|
@ -687,9 +679,8 @@ zap_entry_normalization_conflict(zap_entry_handle_t *zeh, zap_name_t *zn,
|
|||
*/
|
||||
|
||||
static uint16_t *
|
||||
zap_leaf_rehash_entry(zap_leaf_t *l, uint16_t entry)
|
||||
zap_leaf_rehash_entry(zap_leaf_t *l, struct zap_leaf_entry *le, uint16_t entry)
|
||||
{
|
||||
struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, entry);
|
||||
struct zap_leaf_entry *le2;
|
||||
uint16_t *chunkp;
|
||||
|
||||
|
@ -722,7 +713,7 @@ zap_leaf_transfer_array(zap_leaf_t *l, uint16_t chunk, zap_leaf_t *nl)
|
|||
&ZAP_LEAF_CHUNK(nl, nchunk).l_array;
|
||||
struct zap_leaf_array *la =
|
||||
&ZAP_LEAF_CHUNK(l, chunk).l_array;
|
||||
int nextchunk = la->la_next;
|
||||
uint_t nextchunk = la->la_next;
|
||||
|
||||
ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l));
|
||||
ASSERT3U(nchunk, <, ZAP_LEAF_NUMCHUNKS(l));
|
||||
|
@ -739,7 +730,7 @@ zap_leaf_transfer_array(zap_leaf_t *l, uint16_t chunk, zap_leaf_t *nl)
|
|||
}
|
||||
|
||||
static void
|
||||
zap_leaf_transfer_entry(zap_leaf_t *l, int entry, zap_leaf_t *nl)
|
||||
zap_leaf_transfer_entry(zap_leaf_t *l, uint_t entry, zap_leaf_t *nl)
|
||||
{
|
||||
struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, entry);
|
||||
ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY);
|
||||
|
@ -748,7 +739,7 @@ zap_leaf_transfer_entry(zap_leaf_t *l, int entry, zap_leaf_t *nl)
|
|||
struct zap_leaf_entry *nle = ZAP_LEAF_ENTRY(nl, chunk);
|
||||
*nle = *le; /* structure assignment */
|
||||
|
||||
(void) zap_leaf_rehash_entry(nl, chunk);
|
||||
(void) zap_leaf_rehash_entry(nl, nle, chunk);
|
||||
|
||||
nle->le_name_chunk = zap_leaf_transfer_array(l, le->le_name_chunk, nl);
|
||||
nle->le_value_chunk =
|
||||
|
@ -766,7 +757,7 @@ zap_leaf_transfer_entry(zap_leaf_t *l, int entry, zap_leaf_t *nl)
|
|||
void
|
||||
zap_leaf_split(zap_leaf_t *l, zap_leaf_t *nl, boolean_t sort)
|
||||
{
|
||||
int bit = 64 - 1 - zap_leaf_phys(l)->l_hdr.lh_prefix_len;
|
||||
uint_t bit = 64 - 1 - zap_leaf_phys(l)->l_hdr.lh_prefix_len;
|
||||
|
||||
/* set new prefix and prefix_len */
|
||||
zap_leaf_phys(l)->l_hdr.lh_prefix <<= 1;
|
||||
|
@ -777,7 +768,7 @@ zap_leaf_split(zap_leaf_t *l, zap_leaf_t *nl, boolean_t sort)
|
|||
zap_leaf_phys(l)->l_hdr.lh_prefix_len;
|
||||
|
||||
/* break existing hash chains */
|
||||
zap_memset(zap_leaf_phys(l)->l_hash, CHAIN_END,
|
||||
memset(zap_leaf_phys(l)->l_hash, CHAIN_END,
|
||||
2*ZAP_LEAF_HASH_NUMENTRIES(l));
|
||||
|
||||
if (sort)
|
||||
|
@ -792,7 +783,7 @@ zap_leaf_split(zap_leaf_t *l, zap_leaf_t *nl, boolean_t sort)
|
|||
* but this accesses memory more sequentially, and when we're
|
||||
* called, the block is usually pretty full.
|
||||
*/
|
||||
for (int i = 0; i < ZAP_LEAF_NUMCHUNKS(l); i++) {
|
||||
for (uint_t i = 0; i < ZAP_LEAF_NUMCHUNKS(l); i++) {
|
||||
struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, i);
|
||||
if (le->le_type != ZAP_CHUNK_ENTRY)
|
||||
continue;
|
||||
|
@ -800,14 +791,14 @@ zap_leaf_split(zap_leaf_t *l, zap_leaf_t *nl, boolean_t sort)
|
|||
if (le->le_hash & (1ULL << bit))
|
||||
zap_leaf_transfer_entry(l, i, nl);
|
||||
else
|
||||
(void) zap_leaf_rehash_entry(l, i);
|
||||
(void) zap_leaf_rehash_entry(l, le, i);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
zap_leaf_stats(zap_t *zap, zap_leaf_t *l, zap_stats_t *zs)
|
||||
{
|
||||
int n = zap_f_phys(zap)->zap_ptrtbl.zt_shift -
|
||||
uint_t n = zap_f_phys(zap)->zap_ptrtbl.zt_shift -
|
||||
zap_leaf_phys(l)->l_hdr.lh_prefix_len;
|
||||
n = MIN(n, ZAP_HISTOGRAM_SIZE-1);
|
||||
zs->zs_leafs_with_2n_pointers[n]++;
|
||||
|
@ -823,9 +814,9 @@ zap_leaf_stats(zap_t *zap, zap_leaf_t *l, zap_stats_t *zs)
|
|||
n = MIN(n, ZAP_HISTOGRAM_SIZE-1);
|
||||
zs->zs_blocks_n_tenths_full[n]++;
|
||||
|
||||
for (int i = 0; i < ZAP_LEAF_HASH_NUMENTRIES(l); i++) {
|
||||
int nentries = 0;
|
||||
int chunk = zap_leaf_phys(l)->l_hash[i];
|
||||
for (uint_t i = 0; i < ZAP_LEAF_HASH_NUMENTRIES(l); i++) {
|
||||
uint_t nentries = 0;
|
||||
uint_t chunk = zap_leaf_phys(l)->l_hash[i];
|
||||
|
||||
while (chunk != CHAIN_END) {
|
||||
struct zap_leaf_entry *le =
|
||||
|
|
Loading…
Reference in New Issue