Merge commit 'refs/top-bases/linux-kernel-mem' into linux-kernel-mem

This commit is contained in:
Brian Behlendorf 2010-06-14 16:59:15 -07:00
commit 09abb3e8ae
6 changed files with 68 additions and 43 deletions

View File

@ -765,6 +765,31 @@ dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
*/ */
kstat_t *xuio_ksp = NULL; kstat_t *xuio_ksp = NULL;
typedef struct xuio_stats {
/* loaned yet not returned arc_buf */
kstat_named_t xuiostat_onloan_rbuf;
kstat_named_t xuiostat_onloan_wbuf;
/* whether a copy is made when loaning out a read buffer */
kstat_named_t xuiostat_rbuf_copied;
kstat_named_t xuiostat_rbuf_nocopy;
/* whether a copy is made when assigning a write buffer */
kstat_named_t xuiostat_wbuf_copied;
kstat_named_t xuiostat_wbuf_nocopy;
} xuio_stats_t;
static xuio_stats_t xuio_stats = {
{ "onloan_read_buf", KSTAT_DATA_UINT64 },
{ "onloan_write_buf", KSTAT_DATA_UINT64 },
{ "read_buf_copied", KSTAT_DATA_UINT64 },
{ "read_buf_nocopy", KSTAT_DATA_UINT64 },
{ "write_buf_copied", KSTAT_DATA_UINT64 },
{ "write_buf_nocopy", KSTAT_DATA_UINT64 }
};
#define XUIOSTAT_INCR(stat, val) \
atomic_add_64(&xuio_stats.stat.value.ui64, (val))
#define XUIOSTAT_BUMP(stat) XUIOSTAT_INCR(stat, 1)
int int
dmu_xuio_init(xuio_t *xuio, int nblk) dmu_xuio_init(xuio_t *xuio, int nblk)
{ {

View File

@ -40,6 +40,7 @@
#include <sys/param.h> #include <sys/param.h>
#include <sys/cred.h> #include <sys/cred.h>
#include <sys/time.h> #include <sys/time.h>
#include <sys/uio.h>
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {

View File

@ -239,32 +239,6 @@ typedef struct dmu_xuio {
iovec_t *iovp; iovec_t *iovp;
} dmu_xuio_t; } dmu_xuio_t;
typedef struct xuio_stats {
/* loaned yet not returned arc_buf */
kstat_named_t xuiostat_onloan_rbuf;
kstat_named_t xuiostat_onloan_wbuf;
/* whether a copy is made when loaning out a read buffer */
kstat_named_t xuiostat_rbuf_copied;
kstat_named_t xuiostat_rbuf_nocopy;
/* whether a copy is made when assigning a write buffer */
kstat_named_t xuiostat_wbuf_copied;
kstat_named_t xuiostat_wbuf_nocopy;
} xuio_stats_t;
static xuio_stats_t xuio_stats = {
{ "onloan_read_buf", KSTAT_DATA_UINT64 },
{ "onloan_write_buf", KSTAT_DATA_UINT64 },
{ "read_buf_copied", KSTAT_DATA_UINT64 },
{ "read_buf_nocopy", KSTAT_DATA_UINT64 },
{ "write_buf_copied", KSTAT_DATA_UINT64 },
{ "write_buf_nocopy", KSTAT_DATA_UINT64 }
};
#define XUIOSTAT_INCR(stat, val) \
atomic_add_64(&xuio_stats.stat.value.ui64, (val))
#define XUIOSTAT_BUMP(stat) XUIOSTAT_INCR(stat, 1)
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -37,7 +37,7 @@
* compress to d_len or less. * compress to d_len or less.
*/ */
#include <sys/types.h> #include <sys/zfs_context.h>
#define MATCH_BITS 6 #define MATCH_BITS 6
#define MATCH_MIN 3 #define MATCH_MIN 3
@ -55,12 +55,15 @@ lzjb_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
int copymask = 1 << (NBBY - 1); int copymask = 1 << (NBBY - 1);
int mlen, offset, hash; int mlen, offset, hash;
uint16_t *hp; uint16_t *hp;
uint16_t lempel[LEMPEL_SIZE] = { 0 }; uint16_t *lempel;
lempel = kmem_zalloc(LEMPEL_SIZE * sizeof (uint16_t), KM_SLEEP);
while (src < (uchar_t *)s_start + s_len) { while (src < (uchar_t *)s_start + s_len) {
if ((copymask <<= 1) == (1 << NBBY)) { if ((copymask <<= 1) == (1 << NBBY)) {
if (dst >= (uchar_t *)d_start + d_len - 1 - 2 * NBBY) if (dst >= (uchar_t *)d_start + d_len - 1 - 2 * NBBY) {
kmem_free(lempel, LEMPEL_SIZE*sizeof(uint16_t));
return (s_len); return (s_len);
}
copymask = 1; copymask = 1;
copymap = dst; copymap = dst;
*dst++ = 0; *dst++ = 0;
@ -90,6 +93,8 @@ lzjb_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
*dst++ = *src++; *dst++ = *src++;
} }
} }
kmem_free(lempel, LEMPEL_SIZE * sizeof (uint16_t));
return (dst - (uchar_t *)d_start); return (dst - (uchar_t *)d_start);
} }

View File

@ -30,6 +30,8 @@
#include <sys/vdev_impl.h> #include <sys/vdev_impl.h>
#include <sys/zio.h> #include <sys/zio.h>
#define WITH_NDF_BLOCK_ALLOCATOR
uint64_t metaslab_aliquot = 512ULL << 10; uint64_t metaslab_aliquot = 512ULL << 10;
uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */ uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
@ -350,6 +352,9 @@ metaslab_segsize_compare(const void *x1, const void *x2)
return (0); return (0);
} }
#if defined(WITH_FF_BLOCK_ALLOCATOR) || \
defined(WITH_DF_BLOCK_ALLOCATOR) || \
defined(WITH_CDF_BLOCK_ALLOCATOR)
/* /*
* This is a helper function that can be used by the allocator to find * This is a helper function that can be used by the allocator to find
* a suitable block to allocate. This will search the specified AVL * a suitable block to allocate. This will search the specified AVL
@ -389,6 +394,7 @@ metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
*cursor = 0; *cursor = 0;
return (metaslab_block_picker(t, cursor, size, align)); return (metaslab_block_picker(t, cursor, size, align));
} }
#endif /* WITH_FF/DF/CDF_BLOCK_ALLOCATOR */
static void static void
metaslab_pp_load(space_map_t *sm) metaslab_pp_load(space_map_t *sm)
@ -452,6 +458,7 @@ metaslab_pp_maxsize(space_map_t *sm)
return (ss->ss_end - ss->ss_start); return (ss->ss_end - ss->ss_start);
} }
#if defined(WITH_FF_BLOCK_ALLOCATOR)
/* /*
* ========================================================================== * ==========================================================================
* The first-fit block allocator * The first-fit block allocator
@ -484,6 +491,10 @@ static space_map_ops_t metaslab_ff_ops = {
metaslab_ff_fragmented metaslab_ff_fragmented
}; };
space_map_ops_t *zfs_metaslab_ops = &metaslab_ff_ops;
#endif /* WITH_FF_BLOCK_ALLOCATOR */
#if defined(WITH_DF_BLOCK_ALLOCATOR)
/* /*
* ========================================================================== * ==========================================================================
* Dynamic block allocator - * Dynamic block allocator -
@ -543,11 +554,15 @@ static space_map_ops_t metaslab_df_ops = {
metaslab_df_fragmented metaslab_df_fragmented
}; };
space_map_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
#endif /* WITH_DF_BLOCK_ALLOCATOR */
/* /*
* ========================================================================== * ==========================================================================
* Other experimental allocators * Other experimental allocators
* ========================================================================== * ==========================================================================
*/ */
#if defined(WITH_CDF_BLOCK_ALLOCATOR)
static uint64_t static uint64_t
metaslab_cdf_alloc(space_map_t *sm, uint64_t size) metaslab_cdf_alloc(space_map_t *sm, uint64_t size)
{ {
@ -607,6 +622,10 @@ static space_map_ops_t metaslab_cdf_ops = {
metaslab_cdf_fragmented metaslab_cdf_fragmented
}; };
space_map_ops_t *zfs_metaslab_ops = &metaslab_cdf_ops;
#endif /* WITH_CDF_BLOCK_ALLOCATOR */
#if defined(WITH_NDF_BLOCK_ALLOCATOR)
uint64_t metaslab_ndf_clump_shift = 4; uint64_t metaslab_ndf_clump_shift = 4;
static uint64_t static uint64_t
@ -672,6 +691,7 @@ static space_map_ops_t metaslab_ndf_ops = {
}; };
space_map_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops; space_map_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops;
#endif /* WITH_NDF_BLOCK_ALLOCATOR */
/* /*
* ========================================================================== * ==========================================================================

View File

@ -1716,7 +1716,7 @@ vdev_raidz_combrec(zio_t *zio, int total_errors, int data_errors)
void *orig[VDEV_RAIDZ_MAXPARITY]; void *orig[VDEV_RAIDZ_MAXPARITY];
int tstore[VDEV_RAIDZ_MAXPARITY + 2]; int tstore[VDEV_RAIDZ_MAXPARITY + 2];
int *tgts = &tstore[1]; int *tgts = &tstore[1];
int current, next, i, c, n; int curr, next, i, c, n;
int code, ret = 0; int code, ret = 0;
ASSERT(total_errors < rm->rm_firstdatacol); ASSERT(total_errors < rm->rm_firstdatacol);
@ -1764,12 +1764,12 @@ vdev_raidz_combrec(zio_t *zio, int total_errors, int data_errors)
orig[n - 1] = zio_buf_alloc(rm->rm_col[0].rc_size); orig[n - 1] = zio_buf_alloc(rm->rm_col[0].rc_size);
current = 0; curr = 0;
next = tgts[current]; next = tgts[curr];
while (current != n) { while (curr != n) {
tgts[current] = next; tgts[curr] = next;
current = 0; curr = 0;
/* /*
* Save off the original data that we're going to * Save off the original data that we're going to
@ -1817,34 +1817,34 @@ vdev_raidz_combrec(zio_t *zio, int total_errors, int data_errors)
do { do {
/* /*
* Find the next valid column after the current * Find the next valid column after the curr
* position.. * position..
*/ */
for (next = tgts[current] + 1; for (next = tgts[curr] + 1;
next < rm->rm_cols && next < rm->rm_cols &&
rm->rm_col[next].rc_error != 0; next++) rm->rm_col[next].rc_error != 0; next++)
continue; continue;
ASSERT(next <= tgts[current + 1]); ASSERT(next <= tgts[curr + 1]);
/* /*
* If that spot is available, we're done here. * If that spot is available, we're done here.
*/ */
if (next != tgts[current + 1]) if (next != tgts[curr + 1])
break; break;
/* /*
* Otherwise, find the next valid column after * Otherwise, find the next valid column after
* the previous position. * the previous position.
*/ */
for (c = tgts[current - 1] + 1; for (c = tgts[curr - 1] + 1;
rm->rm_col[c].rc_error != 0; c++) rm->rm_col[c].rc_error != 0; c++)
continue; continue;
tgts[current] = c; tgts[curr] = c;
current++; curr++;
} while (current != n); } while (curr != n);
} }
} }
n--; n--;