vdev_mirror: kstat observables for preferred vdev

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Gvozden Neskovic <neskovic@gmail.com>
Closes #6461
This commit is contained in:
Gvozden Neskovic 2017-08-04 12:23:10 +02:00 committed by Brian Behlendorf
parent d6c6590c5d
commit 551905dd47
3 changed files with 82 additions and 6 deletions

View File

@ -1034,6 +1034,10 @@ extern void spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub);
extern void vdev_cache_stat_init(void); extern void vdev_cache_stat_init(void);
extern void vdev_cache_stat_fini(void); extern void vdev_cache_stat_fini(void);
/* vdev mirror */
extern void vdev_mirror_stat_init(void);
extern void vdev_mirror_stat_fini(void);
/* Initialization and termination */ /* Initialization and termination */
extern void spa_init(int flags); extern void spa_init(int flags);
extern void spa_fini(void); extern void spa_fini(void);

View File

@ -1897,6 +1897,7 @@ spa_init(int mode)
dmu_init(); dmu_init();
zil_init(); zil_init();
vdev_cache_stat_init(); vdev_cache_stat_init();
vdev_mirror_stat_init();
vdev_raidz_math_init(); vdev_raidz_math_init();
vdev_file_init(); vdev_file_init();
zfs_prop_init(); zfs_prop_init();
@ -1916,6 +1917,7 @@ spa_fini(void)
vdev_file_fini(); vdev_file_fini();
vdev_cache_stat_fini(); vdev_cache_stat_fini();
vdev_mirror_stat_fini();
vdev_raidz_math_fini(); vdev_raidz_math_fini();
zil_fini(); zil_fini();
dmu_fini(); dmu_fini();

View File

@ -34,6 +34,65 @@
#include <sys/abd.h> #include <sys/abd.h>
#include <sys/fs/zfs.h> #include <sys/fs/zfs.h>
/*
* Vdev mirror kstats
*/
static kstat_t *mirror_ksp = NULL;
typedef struct mirror_stats {
kstat_named_t vdev_mirror_stat_rotating_linear;
kstat_named_t vdev_mirror_stat_rotating_offset;
kstat_named_t vdev_mirror_stat_rotating_seek;
kstat_named_t vdev_mirror_stat_non_rotating_linear;
kstat_named_t vdev_mirror_stat_non_rotating_seek;
kstat_named_t vdev_mirror_stat_preferred_found;
kstat_named_t vdev_mirror_stat_preferred_not_found;
} mirror_stats_t;
static mirror_stats_t mirror_stats = {
/* New I/O follows directly the last I/O */
{ "rotating_linear", KSTAT_DATA_UINT64 },
/* New I/O is within zfs_vdev_mirror_rotating_seek_offset of the last */
{ "rotating_offset", KSTAT_DATA_UINT64 },
/* New I/O requires random seek */
{ "rotating_seek", KSTAT_DATA_UINT64 },
/* New I/O follows directly the last I/O (nonrot) */
{ "non_rotating_linear", KSTAT_DATA_UINT64 },
/* New I/O requires random seek (nonrot) */
{ "non_rotating_seek", KSTAT_DATA_UINT64 },
/* Preferred child vdev found */
{ "preferred_found", KSTAT_DATA_UINT64 },
/* Preferred child vdev not found or equal load */
{ "preferred_not_found", KSTAT_DATA_UINT64 },
};
#define MIRROR_STAT(stat) (mirror_stats.stat.value.ui64)
#define MIRROR_INCR(stat, val) atomic_add_64(&MIRROR_STAT(stat), val)
#define MIRROR_BUMP(stat) MIRROR_INCR(stat, 1)
void
vdev_mirror_stat_init(void)
{
mirror_ksp = kstat_create("zfs", 0, "vdev_mirror_stats",
"misc", KSTAT_TYPE_NAMED,
sizeof (mirror_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (mirror_ksp != NULL) {
mirror_ksp->ks_data = &mirror_stats;
kstat_install(mirror_ksp);
}
}
void
vdev_mirror_stat_fini(void)
{
if (mirror_ksp != NULL) {
kstat_delete(mirror_ksp);
mirror_ksp = NULL;
}
}
/* /*
* Virtual device vector for mirroring. * Virtual device vector for mirroring.
*/ */
@ -140,8 +199,10 @@ vdev_mirror_load(mirror_map_t *mm, vdev_t *vd, uint64_t zio_offset)
if (vd->vdev_nonrot) { if (vd->vdev_nonrot) {
/* Non-rotating media. */ /* Non-rotating media. */
if (last_offset == zio_offset) if (last_offset == zio_offset) {
MIRROR_BUMP(vdev_mirror_stat_non_rotating_linear);
return (load + zfs_vdev_mirror_non_rotating_inc); return (load + zfs_vdev_mirror_non_rotating_inc);
}
/* /*
* Apply a seek penalty even for non-rotating devices as * Apply a seek penalty even for non-rotating devices as
@ -149,12 +210,15 @@ vdev_mirror_load(mirror_map_t *mm, vdev_t *vd, uint64_t zio_offset)
* the device, thus avoiding unnecessary per-command overhead * the device, thus avoiding unnecessary per-command overhead
* and boosting performance. * and boosting performance.
*/ */
MIRROR_BUMP(vdev_mirror_stat_non_rotating_seek);
return (load + zfs_vdev_mirror_non_rotating_seek_inc); return (load + zfs_vdev_mirror_non_rotating_seek_inc);
} }
/* Rotating media I/O's which directly follow the last I/O. */ /* Rotating media I/O's which directly follow the last I/O. */
if (last_offset == zio_offset) if (last_offset == zio_offset) {
MIRROR_BUMP(vdev_mirror_stat_rotating_linear);
return (load + zfs_vdev_mirror_rotating_inc); return (load + zfs_vdev_mirror_rotating_inc);
}
/* /*
* Apply half the seek increment to I/O's within seek offset * Apply half the seek increment to I/O's within seek offset
@ -162,10 +226,13 @@ vdev_mirror_load(mirror_map_t *mm, vdev_t *vd, uint64_t zio_offset)
* of a seek increment. * of a seek increment.
*/ */
offset_diff = (int64_t)(last_offset - zio_offset); offset_diff = (int64_t)(last_offset - zio_offset);
if (ABS(offset_diff) < zfs_vdev_mirror_rotating_seek_offset) if (ABS(offset_diff) < zfs_vdev_mirror_rotating_seek_offset) {
MIRROR_BUMP(vdev_mirror_stat_rotating_offset);
return (load + (zfs_vdev_mirror_rotating_seek_inc / 2)); return (load + (zfs_vdev_mirror_rotating_seek_inc / 2));
}
/* Apply the full seek increment to all other I/O's. */ /* Apply the full seek increment to all other I/O's. */
MIRROR_BUMP(vdev_mirror_stat_rotating_seek);
return (load + zfs_vdev_mirror_rotating_seek_inc); return (load + zfs_vdev_mirror_rotating_seek_inc);
} }
@ -387,12 +454,15 @@ vdev_mirror_child_select(zio_t *zio)
mm->mm_preferred_cnt++; mm->mm_preferred_cnt++;
} }
if (mm->mm_preferred_cnt == 1) if (mm->mm_preferred_cnt == 1) {
MIRROR_BUMP(vdev_mirror_stat_preferred_found);
return (mm->mm_preferred[0]); return (mm->mm_preferred[0]);
}
if (mm->mm_preferred_cnt > 1) {
if (mm->mm_preferred_cnt > 1) MIRROR_BUMP(vdev_mirror_stat_preferred_not_found);
return (vdev_mirror_preferred_child_randomize(zio)); return (vdev_mirror_preferred_child_randomize(zio));
}
/* /*
* Every device is either missing or has this txg in its DTL. * Every device is either missing or has this txg in its DTL.