Merge commit 'refs/top-bases/linux-kernel-module' into linux-kernel-module
This commit is contained in:
commit
5de3a200c4
|
@ -765,6 +765,31 @@ dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
|
|||
*/
|
||||
kstat_t *xuio_ksp = NULL;
|
||||
|
||||
typedef struct xuio_stats {
|
||||
/* loaned yet not returned arc_buf */
|
||||
kstat_named_t xuiostat_onloan_rbuf;
|
||||
kstat_named_t xuiostat_onloan_wbuf;
|
||||
/* whether a copy is made when loaning out a read buffer */
|
||||
kstat_named_t xuiostat_rbuf_copied;
|
||||
kstat_named_t xuiostat_rbuf_nocopy;
|
||||
/* whether a copy is made when assigning a write buffer */
|
||||
kstat_named_t xuiostat_wbuf_copied;
|
||||
kstat_named_t xuiostat_wbuf_nocopy;
|
||||
} xuio_stats_t;
|
||||
|
||||
static xuio_stats_t xuio_stats = {
|
||||
{ "onloan_read_buf", KSTAT_DATA_UINT64 },
|
||||
{ "onloan_write_buf", KSTAT_DATA_UINT64 },
|
||||
{ "read_buf_copied", KSTAT_DATA_UINT64 },
|
||||
{ "read_buf_nocopy", KSTAT_DATA_UINT64 },
|
||||
{ "write_buf_copied", KSTAT_DATA_UINT64 },
|
||||
{ "write_buf_nocopy", KSTAT_DATA_UINT64 }
|
||||
};
|
||||
|
||||
#define XUIOSTAT_INCR(stat, val) \
|
||||
atomic_add_64(&xuio_stats.stat.value.ui64, (val))
|
||||
#define XUIOSTAT_BUMP(stat) XUIOSTAT_INCR(stat, 1)
|
||||
|
||||
int
|
||||
dmu_xuio_init(xuio_t *xuio, int nblk)
|
||||
{
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include <sys/param.h>
|
||||
#include <sys/cred.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/uio.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
|
|
@ -239,32 +239,6 @@ typedef struct dmu_xuio {
|
|||
iovec_t *iovp;
|
||||
} dmu_xuio_t;
|
||||
|
||||
typedef struct xuio_stats {
|
||||
/* loaned yet not returned arc_buf */
|
||||
kstat_named_t xuiostat_onloan_rbuf;
|
||||
kstat_named_t xuiostat_onloan_wbuf;
|
||||
/* whether a copy is made when loaning out a read buffer */
|
||||
kstat_named_t xuiostat_rbuf_copied;
|
||||
kstat_named_t xuiostat_rbuf_nocopy;
|
||||
/* whether a copy is made when assigning a write buffer */
|
||||
kstat_named_t xuiostat_wbuf_copied;
|
||||
kstat_named_t xuiostat_wbuf_nocopy;
|
||||
} xuio_stats_t;
|
||||
|
||||
static xuio_stats_t xuio_stats = {
|
||||
{ "onloan_read_buf", KSTAT_DATA_UINT64 },
|
||||
{ "onloan_write_buf", KSTAT_DATA_UINT64 },
|
||||
{ "read_buf_copied", KSTAT_DATA_UINT64 },
|
||||
{ "read_buf_nocopy", KSTAT_DATA_UINT64 },
|
||||
{ "write_buf_copied", KSTAT_DATA_UINT64 },
|
||||
{ "write_buf_nocopy", KSTAT_DATA_UINT64 }
|
||||
};
|
||||
|
||||
#define XUIOSTAT_INCR(stat, val) \
|
||||
atomic_add_64(&xuio_stats.stat.value.ui64, (val))
|
||||
#define XUIOSTAT_BUMP(stat) XUIOSTAT_INCR(stat, 1)
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
* compress to d_len or less.
|
||||
*/
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/zfs_context.h>
|
||||
|
||||
#define MATCH_BITS 6
|
||||
#define MATCH_MIN 3
|
||||
|
@ -55,12 +55,15 @@ lzjb_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
|
|||
int copymask = 1 << (NBBY - 1);
|
||||
int mlen, offset, hash;
|
||||
uint16_t *hp;
|
||||
uint16_t lempel[LEMPEL_SIZE] = { 0 };
|
||||
uint16_t *lempel;
|
||||
|
||||
lempel = kmem_zalloc(LEMPEL_SIZE * sizeof (uint16_t), KM_SLEEP);
|
||||
while (src < (uchar_t *)s_start + s_len) {
|
||||
if ((copymask <<= 1) == (1 << NBBY)) {
|
||||
if (dst >= (uchar_t *)d_start + d_len - 1 - 2 * NBBY)
|
||||
if (dst >= (uchar_t *)d_start + d_len - 1 - 2 * NBBY) {
|
||||
kmem_free(lempel, LEMPEL_SIZE*sizeof(uint16_t));
|
||||
return (s_len);
|
||||
}
|
||||
copymask = 1;
|
||||
copymap = dst;
|
||||
*dst++ = 0;
|
||||
|
@ -90,6 +93,8 @@ lzjb_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
|
|||
*dst++ = *src++;
|
||||
}
|
||||
}
|
||||
|
||||
kmem_free(lempel, LEMPEL_SIZE * sizeof (uint16_t));
|
||||
return (dst - (uchar_t *)d_start);
|
||||
}
|
||||
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
#include <sys/vdev_impl.h>
|
||||
#include <sys/zio.h>
|
||||
|
||||
#define WITH_NDF_BLOCK_ALLOCATOR
|
||||
|
||||
uint64_t metaslab_aliquot = 512ULL << 10;
|
||||
uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
|
||||
|
||||
|
@ -350,6 +352,9 @@ metaslab_segsize_compare(const void *x1, const void *x2)
|
|||
return (0);
|
||||
}
|
||||
|
||||
#if defined(WITH_FF_BLOCK_ALLOCATOR) || \
|
||||
defined(WITH_DF_BLOCK_ALLOCATOR) || \
|
||||
defined(WITH_CDF_BLOCK_ALLOCATOR)
|
||||
/*
|
||||
* This is a helper function that can be used by the allocator to find
|
||||
* a suitable block to allocate. This will search the specified AVL
|
||||
|
@ -389,6 +394,7 @@ metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
|
|||
*cursor = 0;
|
||||
return (metaslab_block_picker(t, cursor, size, align));
|
||||
}
|
||||
#endif /* WITH_FF/DF/CDF_BLOCK_ALLOCATOR */
|
||||
|
||||
static void
|
||||
metaslab_pp_load(space_map_t *sm)
|
||||
|
@ -452,6 +458,7 @@ metaslab_pp_maxsize(space_map_t *sm)
|
|||
return (ss->ss_end - ss->ss_start);
|
||||
}
|
||||
|
||||
#if defined(WITH_FF_BLOCK_ALLOCATOR)
|
||||
/*
|
||||
* ==========================================================================
|
||||
* The first-fit block allocator
|
||||
|
@ -484,6 +491,10 @@ static space_map_ops_t metaslab_ff_ops = {
|
|||
metaslab_ff_fragmented
|
||||
};
|
||||
|
||||
space_map_ops_t *zfs_metaslab_ops = &metaslab_ff_ops;
|
||||
#endif /* WITH_FF_BLOCK_ALLOCATOR */
|
||||
|
||||
#if defined(WITH_DF_BLOCK_ALLOCATOR)
|
||||
/*
|
||||
* ==========================================================================
|
||||
* Dynamic block allocator -
|
||||
|
@ -543,11 +554,15 @@ static space_map_ops_t metaslab_df_ops = {
|
|||
metaslab_df_fragmented
|
||||
};
|
||||
|
||||
space_map_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
|
||||
#endif /* WITH_DF_BLOCK_ALLOCATOR */
|
||||
|
||||
/*
|
||||
* ==========================================================================
|
||||
* Other experimental allocators
|
||||
* ==========================================================================
|
||||
*/
|
||||
#if defined(WITH_CDF_BLOCK_ALLOCATOR)
|
||||
static uint64_t
|
||||
metaslab_cdf_alloc(space_map_t *sm, uint64_t size)
|
||||
{
|
||||
|
@ -607,6 +622,10 @@ static space_map_ops_t metaslab_cdf_ops = {
|
|||
metaslab_cdf_fragmented
|
||||
};
|
||||
|
||||
space_map_ops_t *zfs_metaslab_ops = &metaslab_cdf_ops;
|
||||
#endif /* WITH_CDF_BLOCK_ALLOCATOR */
|
||||
|
||||
#if defined(WITH_NDF_BLOCK_ALLOCATOR)
|
||||
uint64_t metaslab_ndf_clump_shift = 4;
|
||||
|
||||
static uint64_t
|
||||
|
@ -672,6 +691,7 @@ static space_map_ops_t metaslab_ndf_ops = {
|
|||
};
|
||||
|
||||
space_map_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops;
|
||||
#endif /* WITH_NDF_BLOCK_ALLOCATOR */
|
||||
|
||||
/*
|
||||
* ==========================================================================
|
||||
|
|
|
@ -1716,7 +1716,7 @@ vdev_raidz_combrec(zio_t *zio, int total_errors, int data_errors)
|
|||
void *orig[VDEV_RAIDZ_MAXPARITY];
|
||||
int tstore[VDEV_RAIDZ_MAXPARITY + 2];
|
||||
int *tgts = &tstore[1];
|
||||
int current, next, i, c, n;
|
||||
int curr, next, i, c, n;
|
||||
int code, ret = 0;
|
||||
|
||||
ASSERT(total_errors < rm->rm_firstdatacol);
|
||||
|
@ -1764,12 +1764,12 @@ vdev_raidz_combrec(zio_t *zio, int total_errors, int data_errors)
|
|||
|
||||
orig[n - 1] = zio_buf_alloc(rm->rm_col[0].rc_size);
|
||||
|
||||
current = 0;
|
||||
next = tgts[current];
|
||||
curr = 0;
|
||||
next = tgts[curr];
|
||||
|
||||
while (current != n) {
|
||||
tgts[current] = next;
|
||||
current = 0;
|
||||
while (curr != n) {
|
||||
tgts[curr] = next;
|
||||
curr = 0;
|
||||
|
||||
/*
|
||||
* Save off the original data that we're going to
|
||||
|
@ -1817,34 +1817,34 @@ vdev_raidz_combrec(zio_t *zio, int total_errors, int data_errors)
|
|||
|
||||
do {
|
||||
/*
|
||||
* Find the next valid column after the current
|
||||
* Find the next valid column after the curr
|
||||
* position..
|
||||
*/
|
||||
for (next = tgts[current] + 1;
|
||||
for (next = tgts[curr] + 1;
|
||||
next < rm->rm_cols &&
|
||||
rm->rm_col[next].rc_error != 0; next++)
|
||||
continue;
|
||||
|
||||
ASSERT(next <= tgts[current + 1]);
|
||||
ASSERT(next <= tgts[curr + 1]);
|
||||
|
||||
/*
|
||||
* If that spot is available, we're done here.
|
||||
*/
|
||||
if (next != tgts[current + 1])
|
||||
if (next != tgts[curr + 1])
|
||||
break;
|
||||
|
||||
/*
|
||||
* Otherwise, find the next valid column after
|
||||
* the previous position.
|
||||
*/
|
||||
for (c = tgts[current - 1] + 1;
|
||||
for (c = tgts[curr - 1] + 1;
|
||||
rm->rm_col[c].rc_error != 0; c++)
|
||||
continue;
|
||||
|
||||
tgts[current] = c;
|
||||
current++;
|
||||
tgts[curr] = c;
|
||||
curr++;
|
||||
|
||||
} while (current != n);
|
||||
} while (curr != n);
|
||||
}
|
||||
}
|
||||
n--;
|
||||
|
|
Loading…
Reference in New Issue