Retire legacy test infrastructure

* Removed zpios kmod, utility, headers and man page.

* Removed unused scripts zpios-profile/*, zpios-test/*,
  zpool-config/*, smb.sh, zpios-sanity.sh, zpios-survey.sh,
  zpios.sh, and zpool-create.sh.

* Removed zfs-script-config.sh.in.  When building 'make' generates
  a common.sh with in-tree path information from the common.sh.in
  template.  This file and sourced by the test scripts and used
  for in-tree testing, it is not included in the packages.  When
  building packages 'make install' uses the same template to
  create a new common.sh which is appropriate for the packaging.

* Removed unused functions/variables from scripts/common.sh.in.
  Only minimal path information and configuration environment
  variables remain.

* Removed unused scripts from scripts/ directory.

* Remaining shell scripts in the scripts directory updated to
  cleanly pass shellcheck and added to checked scripts.

* Renamed tests/test-runner/cmd/ to tests/test-runner/bin/ to
  match install location name.

* Removed last traces of the --enable-debug-dmu-tx configure
  options which was retired some time ago.

Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #6509
This commit is contained in:
Brian Behlendorf 2017-08-15 17:26:38 -07:00 committed by GitHub
parent 70322be8dc
commit c8f9061fc7
88 changed files with 605 additions and 8188 deletions

2
.gitignore vendored
View File

@ -39,8 +39,6 @@ Makefile.in
/zfs_config.h.in
/zfs.release
/stamp-h1
/.script-config
/zfs-script-config.sh
/aclocal.m4
/autom4te.cache

View File

@ -10,10 +10,6 @@ and attributed properly. These exceptions include, but are not limited
to, the vdev_disk.c and zvol.c implementation which are licensed under
the CDDL.
The zpios test code is originally derived from the Lustre pios test code
which is licensed under the GPLv2. As such the heavily modified zpios
kernel test code also remains licensed under the GPLv2.
The latest stable and development versions of this port can be downloaded
from the official ZFS on Linux site located at:

View File

@ -29,9 +29,8 @@ distclean-local::
-o -name .pc -o -name .hg -o -name .git \) -prune -o \
\( -name '*.orig' -o -name '*.rej' -o -name '*~' \
-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
-o -name '.*.rej' -o -name '.script-config' -o -size 0 \
-o -name '*%' -o -name '.*.cmd' -o -name 'core' \
-o -name 'Makefile' -o -name 'Module.symvers' \
-o -name '.*.rej' -o -size 0 -o -name '*%' -o -name '.*.cmd' \
-o -name 'core' -o -name 'Makefile' -o -name 'Module.symvers' \
-o -name '*.order' -o -name '*.markers' \) \
-type f -print | xargs $(RM)
@ -52,11 +51,8 @@ cstyle:
shellcheck:
@if type shellcheck > /dev/null 2>&1; then \
shellcheck --exclude=SC1090 --format=gcc scripts/paxcheck.sh \
scripts/zloop.sh \
scripts/zfs-tests.sh \
scripts/zfs.sh \
scripts/commitcheck.sh \
shellcheck --exclude=SC1090 --format=gcc \
$$(find scripts/*.sh -type f) \
$$(find cmd/zed/zed.d/*.sh -type f) \
$$(find cmd/zpool/zpool.d/* -executable); \
fi

View File

@ -1,3 +1,3 @@
SUBDIRS = zfs zpool zdb zhack zinject zstreamdump ztest zpios
SUBDIRS = zfs zpool zdb zhack zinject zstreamdump ztest
SUBDIRS += mount_zfs fsck_zfs zvol_id vdev_id arcstat dbufstat zed
SUBDIRS += arc_summary raidz_test zgenhostid

View File

@ -1 +0,0 @@
/zpios

View File

@ -1,11 +0,0 @@
include $(top_srcdir)/config/Rules.am
DEFAULT_INCLUDES += \
-I$(top_srcdir)/include
sbin_PROGRAMS = zpios
zpios_SOURCES = \
zpios_main.c \
zpios_util.c \
zpios.h

View File

@ -1,127 +0,0 @@
/*
* ZPIOS is a heavily modified version of the original PIOS test code.
* It is designed to have the test code running in the Linux kernel
* against ZFS while still being flexibly controlled from user space.
*
* Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* LLNL-CODE-403049
*
* Original PIOS Test Code
* Copyright (C) 2004 Cluster File Systems, Inc.
* Written by Peter Braam <braam@clusterfs.com>
* Atul Vidwansa <atul@clusterfs.com>
* Milind Dumbare <milind@clusterfs.com>
*
* This file is part of ZFS on Linux.
* For details, see <http://zfsonlinux.org/>.
*
* ZPIOS is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* ZPIOS is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with ZPIOS. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright (c) 2015, Intel Corporation.
*/
#ifndef _ZPIOS_H
#define _ZPIOS_H
#include <zpios-ctl.h>
#define VERSION_SIZE 64
/* Regular expressions */
#define REGEX_NUMBERS "^[0-9]+$"
#define REGEX_NUMBERS_COMMA "^([0-9]+,)*[0-9]+$"
#define REGEX_SIZE "^[0-9]+[kKmMgGtT]?$"
#define REGEX_SIZE_COMMA "^([0-9]+[kKmMgGtT]?,)*[0-9]+[kKmMgGtT]?$"
/* Flags for low, high, incr */
#define FLAG_SET 0x01
#define FLAG_LOW 0x02
#define FLAG_HIGH 0x04
#define FLAG_INCR 0x08
#define TRUE 1
#define FALSE 0
#define KB (1024)
#define MB (KB * 1024)
#define GB (MB * 1024)
#define TB (GB * 1024)
#define KMGT_SIZE 16
/*
* All offsets, sizes and counts can be passed to the application in
* multiple ways.
* 1. a value (stored in val[0], val_count will be 1)
* 2. a comma separated list of values (stored in val[], using val_count)
* 3. a range and block sizes, low, high, factor (val_count must be 0)
*/
typedef struct pios_range_repeat {
uint64_t val[32]; /* Comma sep array, or low, high, inc */
uint64_t val_count; /* Num of values */
uint64_t val_low;
uint64_t val_high;
uint64_t val_inc_perc;
uint64_t next_val; /* For multiple runs in get_next() */
} range_repeat_t;
typedef struct cmd_args {
range_repeat_t T; /* Thread count */
range_repeat_t N; /* Region count */
range_repeat_t O; /* Offset count */
range_repeat_t C; /* Chunksize */
range_repeat_t S; /* Regionsize */
range_repeat_t B; /* Blocksize */
const char *pool; /* Pool */
const char *name; /* Name */
uint32_t flags; /* Flags */
uint32_t block_size; /* ZFS block size */
uint32_t io_type; /* DMUIO only */
uint32_t verbose; /* Verbose */
uint32_t human_readable; /* Human readable output */
uint64_t regionnoise; /* Region noise */
uint64_t chunknoise; /* Chunk noise */
uint64_t thread_delay; /* Thread delay */
char pre[ZPIOS_PATH_SIZE]; /* Pre-exec hook */
char post[ZPIOS_PATH_SIZE]; /* Post-exec hook */
char log[ZPIOS_PATH_SIZE]; /* Requested log dir */
/* Control */
int current_id;
uint64_t current_T;
uint64_t current_N;
uint64_t current_C;
uint64_t current_S;
uint64_t current_O;
uint64_t current_B;
uint32_t rc;
} cmd_args_t;
int set_count(char *pattern1, char *pattern2, range_repeat_t *range,
char *optarg, uint32_t *flags, char *arg);
int set_lhi(char *pattern, range_repeat_t *range, char *optarg,
int flag, uint32_t *flag_thread, char *arg);
int set_noise(uint64_t *noise, char *optarg, char *arg);
int set_load_params(cmd_args_t *args, char *optarg);
int check_mutual_exclusive_command_lines(uint32_t flag, char *arg);
void print_stats_header(cmd_args_t *args);
void print_stats(cmd_args_t *args, zpios_cmd_t *cmd);
#endif /* _ZPIOS_H */

View File

@ -1,680 +0,0 @@
/*
* ZPIOS is a heavily modified version of the original PIOS test code.
* It is designed to have the test code running in the Linux kernel
* against ZFS while still being flexibly controlled from user space.
*
* Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* LLNL-CODE-403049
*
* Original PIOS Test Code
* Copyright (C) 2004 Cluster File Systems, Inc.
* Written by Peter Braam <braam@clusterfs.com>
* Atul Vidwansa <atul@clusterfs.com>
* Milind Dumbare <milind@clusterfs.com>
*
* This file is part of ZFS on Linux.
* For details, see <http://zfsonlinux.org/>.
*
* ZPIOS is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* ZPIOS is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with ZPIOS. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright (c) 2015, Intel Corporation.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <getopt.h>
#include <assert.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include "zpios.h"
static const char short_opt[] =
"t:l:h:e:n:i:j:k:o:m:q:r:c:a:b:g:s:A:B:C:"
"S:L:p:M:xP:R:G:I:N:T:VzOfHv?";
static const struct option long_opt[] = {
{"threadcount", required_argument, 0, 't' },
{"threadcount_low", required_argument, 0, 'l' },
{"threadcount_high", required_argument, 0, 'h' },
{"threadcount_incr", required_argument, 0, 'e' },
{"regioncount", required_argument, 0, 'n' },
{"regioncount_low", required_argument, 0, 'i' },
{"regioncount_high", required_argument, 0, 'j' },
{"regioncount_incr", required_argument, 0, 'k' },
{"offset", required_argument, 0, 'o' },
{"offset_low", required_argument, 0, 'm' },
{"offset_high", required_argument, 0, 'q' },
{"offset_incr", required_argument, 0, 'r' },
{"chunksize", required_argument, 0, 'c' },
{"chunksize_low", required_argument, 0, 'a' },
{"chunksize_high", required_argument, 0, 'b' },
{"chunksize_incr", required_argument, 0, 'g' },
{"regionsize", required_argument, 0, 's' },
{"regionsize_low", required_argument, 0, 'A' },
{"regionsize_high", required_argument, 0, 'B' },
{"regionsize_incr", required_argument, 0, 'C' },
{"blocksize", required_argument, 0, 'S' },
{"load", required_argument, 0, 'L' },
{"pool", required_argument, 0, 'p' },
{"name", required_argument, 0, 'M' },
{"cleanup", no_argument, 0, 'x' },
{"prerun", required_argument, 0, 'P' },
{"postrun", required_argument, 0, 'R' },
{"log", required_argument, 0, 'G' },
{"regionnoise", required_argument, 0, 'I' },
{"chunknoise", required_argument, 0, 'N' },
{"threaddelay", required_argument, 0, 'T' },
{"verify", no_argument, 0, 'V' },
{"zerocopy", no_argument, 0, 'z' },
{"nowait", no_argument, 0, 'O' },
{"noprefetch", no_argument, 0, 'f' },
{"human-readable", no_argument, 0, 'H' },
{"verbose", no_argument, 0, 'v' },
{"help", no_argument, 0, '?' },
{ 0, 0, 0, 0 },
};
static int zpiosctl_fd; /* Control file descriptor */
static char zpios_version[VERSION_SIZE]; /* Kernel version string */
static char *zpios_buffer = NULL; /* Scratch space area */
static int zpios_buffer_size = 0; /* Scratch space size */
static int
usage(void)
{
fprintf(stderr, "Usage: zpios\n");
fprintf(stderr,
" --threadcount -t =values\n"
" --threadcount_low -l =value\n"
" --threadcount_high -h =value\n"
" --threadcount_incr -e =value\n"
" --regioncount -n =values\n"
" --regioncount_low -i =value\n"
" --regioncount_high -j =value\n"
" --regioncount_incr -k =value\n"
" --offset -o =values\n"
" --offset_low -m =value\n"
" --offset_high -q =value\n"
" --offset_incr -r =value\n"
" --chunksize -c =values\n"
" --chunksize_low -a =value\n"
" --chunksize_high -b =value\n"
" --chunksize_incr -g =value\n"
" --regionsize -s =values\n"
" --regionsize_low -A =value\n"
" --regionsize_high -B =value\n"
" --regionsize_incr -C =value\n"
" --blocksize -S =values\n"
" --load -L =dmuio|ssf|fpp\n"
" --pool -p =pool name\n"
" --name -M =test name\n"
" --cleanup -x\n"
" --prerun -P =pre-command\n"
" --postrun -R =post-command\n"
" --log -G =log directory\n"
" --regionnoise -I =shift\n"
" --chunknoise -N =bytes\n"
" --threaddelay -T =jiffies\n"
" --verify -V\n"
" --zerocopy -z\n"
" --nowait -O\n"
" --noprefetch -f\n"
" --human-readable -H\n"
" --verbose -v =increase verbosity\n"
" --help -? =this help\n\n");
return (0);
}
static void args_fini(cmd_args_t *args)
{
assert(args != NULL);
free(args);
}
/* block size is 128K to 16M, power of 2 */
#define MIN_BLKSIZE (128ULL << 10)
#define MAX_BLKSIZE (16ULL << 20)
#define POW_OF_TWO(x) (((x) & ((x) - 1)) == 0)
static cmd_args_t *
args_init(int argc, char **argv)
{
cmd_args_t *args;
uint32_t fl_th = 0;
uint32_t fl_rc = 0;
uint32_t fl_of = 0;
uint32_t fl_rs = 0;
uint32_t fl_cs = 0;
uint32_t fl_bs = 0;
int c, rc, i;
if (argc == 1) {
usage();
return ((cmd_args_t *)NULL);
}
/* Configure and populate the args structures */
args = malloc(sizeof (*args));
if (args == NULL)
return (NULL);
memset(args, 0, sizeof (*args));
/* provide a default block size of 128K */
args->B.next_val = 0;
args->B.val[0] = MIN_BLKSIZE;
args->B.val_count = 1;
while ((c = getopt_long(argc, argv, short_opt, long_opt, NULL)) != -1) {
rc = 0;
switch (c) {
case 't': /* --thread count */
rc = set_count(REGEX_NUMBERS, REGEX_NUMBERS_COMMA,
&args->T, optarg, &fl_th, "threadcount");
break;
case 'l': /* --threadcount_low */
rc = set_lhi(REGEX_NUMBERS, &args->T, optarg,
FLAG_LOW, &fl_th, "threadcount_low");
break;
case 'h': /* --threadcount_high */
rc = set_lhi(REGEX_NUMBERS, &args->T, optarg,
FLAG_HIGH, &fl_th, "threadcount_high");
break;
case 'e': /* --threadcount_inc */
rc = set_lhi(REGEX_NUMBERS, &args->T, optarg,
FLAG_INCR, &fl_th, "threadcount_incr");
break;
case 'n': /* --regioncount */
rc = set_count(REGEX_NUMBERS, REGEX_NUMBERS_COMMA,
&args->N, optarg, &fl_rc, "regioncount");
break;
case 'i': /* --regioncount_low */
rc = set_lhi(REGEX_NUMBERS, &args->N, optarg,
FLAG_LOW, &fl_rc, "regioncount_low");
break;
case 'j': /* --regioncount_high */
rc = set_lhi(REGEX_NUMBERS, &args->N, optarg,
FLAG_HIGH, &fl_rc, "regioncount_high");
break;
case 'k': /* --regioncount_inc */
rc = set_lhi(REGEX_NUMBERS, &args->N, optarg,
FLAG_INCR, &fl_rc, "regioncount_incr");
break;
case 'o': /* --offset */
rc = set_count(REGEX_SIZE, REGEX_SIZE_COMMA,
&args->O, optarg, &fl_of, "offset");
break;
case 'm': /* --offset_low */
rc = set_lhi(REGEX_SIZE, &args->O, optarg,
FLAG_LOW, &fl_of, "offset_low");
break;
case 'q': /* --offset_high */
rc = set_lhi(REGEX_SIZE, &args->O, optarg,
FLAG_HIGH, &fl_of, "offset_high");
break;
case 'r': /* --offset_inc */
rc = set_lhi(REGEX_NUMBERS, &args->O, optarg,
FLAG_INCR, &fl_of, "offset_incr");
break;
case 'c': /* --chunksize */
rc = set_count(REGEX_SIZE, REGEX_SIZE_COMMA,
&args->C, optarg, &fl_cs, "chunksize");
break;
case 'a': /* --chunksize_low */
rc = set_lhi(REGEX_SIZE, &args->C, optarg,
FLAG_LOW, &fl_cs, "chunksize_low");
break;
case 'b': /* --chunksize_high */
rc = set_lhi(REGEX_SIZE, &args->C, optarg,
FLAG_HIGH, &fl_cs, "chunksize_high");
break;
case 'g': /* --chunksize_inc */
rc = set_lhi(REGEX_NUMBERS, &args->C, optarg,
FLAG_INCR, &fl_cs, "chunksize_incr");
break;
case 's': /* --regionsize */
rc = set_count(REGEX_SIZE, REGEX_SIZE_COMMA,
&args->S, optarg, &fl_rs, "regionsize");
break;
case 'A': /* --regionsize_low */
rc = set_lhi(REGEX_SIZE, &args->S, optarg,
FLAG_LOW, &fl_rs, "regionsize_low");
break;
case 'B': /* --regionsize_high */
rc = set_lhi(REGEX_SIZE, &args->S, optarg,
FLAG_HIGH, &fl_rs, "regionsize_high");
break;
case 'C': /* --regionsize_inc */
rc = set_lhi(REGEX_NUMBERS, &args->S, optarg,
FLAG_INCR, &fl_rs, "regionsize_incr");
break;
case 'S': /* --blocksize */
rc = set_count(REGEX_SIZE, REGEX_SIZE_COMMA,
&args->B, optarg, &fl_bs, "blocksize");
break;
case 'L': /* --load */
rc = set_load_params(args, optarg);
break;
case 'p': /* --pool */
args->pool = optarg;
break;
case 'M':
args->name = optarg;
break;
case 'x': /* --cleanup */
args->flags |= DMU_REMOVE;
break;
case 'P': /* --prerun */
strncpy(args->pre, optarg, ZPIOS_PATH_SIZE - 1);
break;
case 'R': /* --postrun */
strncpy(args->post, optarg, ZPIOS_PATH_SIZE - 1);
break;
case 'G': /* --log */
strncpy(args->log, optarg, ZPIOS_PATH_SIZE - 1);
break;
case 'I': /* --regionnoise */
rc = set_noise(&args->regionnoise, optarg,
"regionnoise");
break;
case 'N': /* --chunknoise */
rc = set_noise(&args->chunknoise, optarg, "chunknoise");
break;
case 'T': /* --threaddelay */
rc = set_noise(&args->thread_delay, optarg,
"threaddelay");
break;
case 'V': /* --verify */
args->flags |= DMU_VERIFY;
break;
case 'z': /* --zerocopy */
args->flags |= (DMU_WRITE_ZC | DMU_READ_ZC);
break;
case 'O': /* --nowait */
args->flags |= DMU_WRITE_NOWAIT;
break;
case 'f': /* --noprefetch */
args->flags |= DMU_READ_NOPF;
break;
case 'H': /* --human-readable */
args->human_readable = 1;
break;
case 'v': /* --verbose */
args->verbose++;
break;
case '?':
rc = 1;
break;
default:
fprintf(stderr, "Unknown option '%s'\n",
argv[optind - 1]);
rc = EINVAL;
break;
}
if (rc) {
usage();
args_fini(args);
return (NULL);
}
}
check_mutual_exclusive_command_lines(fl_th, "threadcount");
check_mutual_exclusive_command_lines(fl_rc, "regioncount");
check_mutual_exclusive_command_lines(fl_of, "offset");
check_mutual_exclusive_command_lines(fl_rs, "regionsize");
check_mutual_exclusive_command_lines(fl_cs, "chunksize");
if (args->pool == NULL) {
fprintf(stderr, "Error: Pool not specified\n");
usage();
args_fini(args);
return (NULL);
}
if ((args->flags & (DMU_WRITE_ZC | DMU_READ_ZC)) &&
(args->flags & DMU_VERIFY)) {
fprintf(stderr, "Error, --zerocopy incompatible --verify, "
"used for performance analysis only\n");
usage();
args_fini(args);
return (NULL);
}
/* validate block size(s) */
for (i = 0; i < args->B.val_count; i++) {
int bs = args->B.val[i];
if (bs < MIN_BLKSIZE || bs > MAX_BLKSIZE || !POW_OF_TWO(bs)) {
fprintf(stderr, "Error: invalid block size %d\n", bs);
args_fini(args);
return (NULL);
}
}
return (args);
}
static int
dev_clear(void)
{
zpios_cfg_t cfg;
int rc;
memset(&cfg, 0, sizeof (cfg));
cfg.cfg_magic = ZPIOS_CFG_MAGIC;
cfg.cfg_cmd = ZPIOS_CFG_BUFFER_CLEAR;
cfg.cfg_arg1 = 0;
rc = ioctl(zpiosctl_fd, ZPIOS_CFG, &cfg);
if (rc)
fprintf(stderr, "Ioctl() error %lu / %d: %d\n",
(unsigned long) ZPIOS_CFG, cfg.cfg_cmd, errno);
(void) lseek(zpiosctl_fd, 0, SEEK_SET);
return (rc);
}
/* Passing a size of zero simply results in querying the current size */
static int
dev_size(int size)
{
zpios_cfg_t cfg;
int rc;
memset(&cfg, 0, sizeof (cfg));
cfg.cfg_magic = ZPIOS_CFG_MAGIC;
cfg.cfg_cmd = ZPIOS_CFG_BUFFER_SIZE;
cfg.cfg_arg1 = size;
rc = ioctl(zpiosctl_fd, ZPIOS_CFG, &cfg);
if (rc) {
fprintf(stderr, "Ioctl() error %lu / %d: %d\n",
(unsigned long) ZPIOS_CFG, cfg.cfg_cmd, errno);
return (rc);
}
return (cfg.cfg_rc1);
}
static void
dev_fini(void)
{
if (zpios_buffer)
free(zpios_buffer);
if (zpiosctl_fd != -1) {
if (close(zpiosctl_fd) == -1) {
fprintf(stderr, "Unable to close %s: %d\n",
ZPIOS_DEV, errno);
}
}
}
static int
dev_init(void)
{
int rc;
zpiosctl_fd = open(ZPIOS_DEV, O_RDONLY);
if (zpiosctl_fd == -1) {
fprintf(stderr, "Unable to open %s: %d\n"
"Is the zpios module loaded?\n", ZPIOS_DEV, errno);
rc = errno;
goto error;
}
if ((rc = dev_clear()))
goto error;
if ((rc = dev_size(0)) < 0)
goto error;
zpios_buffer_size = rc;
zpios_buffer = (char *)malloc(zpios_buffer_size);
if (zpios_buffer == NULL) {
rc = ENOMEM;
goto error;
}
memset(zpios_buffer, 0, zpios_buffer_size);
return (0);
error:
if (zpiosctl_fd != -1) {
if (close(zpiosctl_fd) == -1) {
fprintf(stderr, "Unable to close %s: %d\n",
ZPIOS_DEV, errno);
}
}
return (rc);
}
static int
get_next(uint64_t *val, range_repeat_t *range)
{
/* if low, incr, high is given */
if (range->val_count == 0) {
*val = (range->val_low) +
(range->val_low * range->next_val / 100);
if (*val > range->val_high)
return (0); /* No more values, limit exceeded */
if (!range->next_val)
range->next_val = range->val_inc_perc;
else
range->next_val = range->next_val + range->val_inc_perc;
return (1); /* more values to come */
/* if only one val is given */
} else if (range->val_count == 1) {
if (range->next_val)
return (0); /* No more values, we only have one */
*val = range->val[0];
range->next_val = 1;
return (1); /* more values to come */
/* if comma separated values are given */
} else if (range->val_count > 1) {
if (range->next_val > range->val_count - 1)
return (0); /* No more values, limit exceeded */
*val = range->val[range->next_val];
range->next_val++;
return (1); /* more values to come */
}
return (0);
}
static int
run_one(cmd_args_t *args, uint32_t id, uint32_t T, uint32_t N,
uint64_t C, uint64_t S, uint64_t O, uint64_t B)
{
zpios_cmd_t *cmd;
int rc, rc2, cmd_size;
dev_clear();
cmd_size = sizeof (zpios_cmd_t) +
((T + N + 1) * sizeof (zpios_stats_t));
cmd = (zpios_cmd_t *)malloc(cmd_size);
if (cmd == NULL)
return (ENOMEM);
memset(cmd, 0, cmd_size);
cmd->cmd_magic = ZPIOS_CMD_MAGIC;
strncpy(cmd->cmd_pool, args->pool, ZPIOS_NAME_SIZE - 1);
strncpy(cmd->cmd_pre, args->pre, ZPIOS_PATH_SIZE - 1);
strncpy(cmd->cmd_post, args->post, ZPIOS_PATH_SIZE - 1);
strncpy(cmd->cmd_log, args->log, ZPIOS_PATH_SIZE - 1);
cmd->cmd_id = id;
cmd->cmd_chunk_size = C;
cmd->cmd_thread_count = T;
cmd->cmd_region_count = N;
cmd->cmd_region_size = S;
cmd->cmd_offset = O;
cmd->cmd_block_size = B;
cmd->cmd_region_noise = args->regionnoise;
cmd->cmd_chunk_noise = args->chunknoise;
cmd->cmd_thread_delay = args->thread_delay;
cmd->cmd_flags = args->flags;
cmd->cmd_data_size = (T + N + 1) * sizeof (zpios_stats_t);
rc = ioctl(zpiosctl_fd, ZPIOS_CMD, cmd);
if (rc)
args->rc = errno;
print_stats(args, cmd);
if (args->verbose) {
rc2 = read(zpiosctl_fd, zpios_buffer, zpios_buffer_size);
zpios_buffer[zpios_buffer_size - 1] = '\0';
if (rc2 < 0) {
fprintf(stdout, "Error reading results: %d\n", rc2);
} else if ((rc2 > 0) && (strlen(zpios_buffer) > 0)) {
fprintf(stdout, "\n%s\n", zpios_buffer);
fflush(stdout);
}
}
free(cmd);
return (rc);
}
static int
run_offsets(cmd_args_t *args)
{
int rc = 0;
while (rc == 0 && get_next(&args->current_O, &args->O)) {
rc = run_one(args, args->current_id,
args->current_T, args->current_N, args->current_C,
args->current_S, args->current_O, args->current_B);
args->current_id++;
}
args->O.next_val = 0;
return (rc);
}
static int
run_region_counts(cmd_args_t *args)
{
int rc = 0;
while (rc == 0 && get_next((uint64_t *)&args->current_N, &args->N))
rc = run_offsets(args);
args->N.next_val = 0;
return (rc);
}
static int
run_region_sizes(cmd_args_t *args)
{
int rc = 0;
while (rc == 0 && get_next(&args->current_S, &args->S)) {
if (args->current_S < args->current_C) {
fprintf(stderr, "Error: in any run chunksize must "
"be strictly smaller than regionsize.\n");
return (EINVAL);
}
rc = run_region_counts(args);
}
args->S.next_val = 0;
return (rc);
}
static int
run_chunk_sizes(cmd_args_t *args)
{
int rc = 0;
while (rc == 0 && get_next(&args->current_C, &args->C)) {
rc = run_region_sizes(args);
}
args->C.next_val = 0;
return (rc);
}
static int
run_block_sizes(cmd_args_t *args)
{
int rc = 0;
while (rc == 0 && get_next(&args->current_B, &args->B)) {
rc = run_chunk_sizes(args);
}
args->B.next_val = 0;
return (rc);
}
static int
run_thread_counts(cmd_args_t *args)
{
int rc = 0;
while (rc == 0 && get_next((uint64_t *)&args->current_T, &args->T))
rc = run_block_sizes(args);
return (rc);
}
int
main(int argc, char **argv)
{
cmd_args_t *args;
int rc = 0;
/* Argument init and parsing */
if ((args = args_init(argc, argv)) == NULL) {
rc = -1;
goto out;
}
/* Device specific init */
if ((rc = dev_init()))
goto out;
/* Generic kernel version string */
if (args->verbose)
fprintf(stdout, "%s", zpios_version);
print_stats_header(args);
rc = run_thread_counts(args);
out:
if (args != NULL)
args_fini(args);
dev_fini();
return (rc);
}

View File

@ -1,476 +0,0 @@
/*
* ZPIOS is a heavily modified version of the original PIOS test code.
* It is designed to have the test code running in the Linux kernel
* against ZFS while still being flexibly controlled from user space.
*
* Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* LLNL-CODE-403049
*
* Original PIOS Test Code
* Copyright (C) 2004 Cluster File Systems, Inc.
* Written by Peter Braam <braam@clusterfs.com>
* Atul Vidwansa <atul@clusterfs.com>
* Milind Dumbare <milind@clusterfs.com>
*
* This file is part of ZFS on Linux.
* For details, see <http://zfsonlinux.org/>.
*
* ZPIOS is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* ZPIOS is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with ZPIOS. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright (c) 2015, Intel Corporation.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <assert.h>
#include <regex.h>
#include "zpios.h"
/* extracts an unsigned int (64) and K,M,G,T from the string */
/* and returns a 64 bit value converted to the proper units */
static int
kmgt_to_uint64(const char *str, uint64_t *val)
{
char *endptr;
int rc = 0;
*val = strtoll(str, &endptr, 0);
if ((str == endptr) && (*val == 0))
return (EINVAL);
switch (endptr[0]) {
case 'k': case 'K':
*val = (*val) << 10;
break;
case 'm': case 'M':
*val = (*val) << 20;
break;
case 'g': case 'G':
*val = (*val) << 30;
break;
case 't': case 'T':
*val = (*val) << 40;
break;
case '\0':
break;
default:
rc = EINVAL;
}
return (rc);
}
static char *
uint64_to_kmgt(char *str, uint64_t val)
{
char postfix[] = "kmgt";
int i = -1;
while ((val >= KB) && (i < 4)) {
val = (val >> 10);
i++;
}
if (i >= 4)
(void) snprintf(str, KMGT_SIZE-1, "inf");
else
(void) snprintf(str, KMGT_SIZE-1, "%lu%c", (unsigned long)val,
(i == -1) ? '\0' : postfix[i]);
return (str);
}
static char *
kmgt_per_sec(char *str, uint64_t v, double t)
{
char postfix[] = "kmgt";
double val = ((double)v) / t;
int i = -1;
while ((val >= (double)KB) && (i < 4)) {
val /= (double)KB;
i++;
}
if (i >= 4)
(void) snprintf(str, KMGT_SIZE-1, "inf");
else
(void) snprintf(str, KMGT_SIZE-1, "%.2f%c", val,
(i == -1) ? '\0' : postfix[i]);
return (str);
}
static char *
print_flags(char *str, uint32_t flags)
{
str[0] = (flags & DMU_WRITE) ? 'w' : '-';
str[1] = (flags & DMU_READ) ? 'r' : '-';
str[2] = (flags & DMU_VERIFY) ? 'v' : '-';
str[3] = (flags & DMU_REMOVE) ? 'c' : '-';
str[4] = (flags & DMU_FPP) ? 'p' : 's';
str[5] = (flags & (DMU_WRITE_ZC | DMU_READ_ZC)) ? 'z' : '-';
str[6] = (flags & DMU_WRITE_NOWAIT) ? 'O' : '-';
str[7] = '\0';
return (str);
}
static int
regex_match(const char *string, char *pattern)
{
regex_t re = { 0 };
int rc;
rc = regcomp(&re, pattern, REG_EXTENDED | REG_NOSUB | REG_ICASE);
if (rc) {
fprintf(stderr, "Error: Couldn't do regcomp, %d\n", rc);
return (rc);
}
rc = regexec(&re, string, (size_t)0, NULL, 0);
regfree(&re);
return (rc);
}
/* fills the pios_range_repeat structure of comma separated values */
static int
split_string(const char *optarg, char *pattern, range_repeat_t *range)
{
const char comma[] = ",";
char *cp, *token[32];
int rc, i = 0;
if ((rc = regex_match(optarg, pattern)))
return (rc);
cp = strdup(optarg);
if (cp == NULL)
return (ENOMEM);
do {
/*
* STRTOK(3) Each subsequent call, with a null pointer as the
* value of the * first argument, starts searching from the
* saved pointer and behaves as described above.
*/
if (i == 0) {
token[i] = strtok(cp, comma);
} else {
token[i] = strtok(NULL, comma);
}
} while ((token[i++] != NULL) && (i < 32));
range->val_count = i - 1;
for (i = 0; i < range->val_count; i++)
kmgt_to_uint64(token[i], &range->val[i]);
free(cp);
return (0);
}
int
set_count(char *pattern1, char *pattern2, range_repeat_t *range,
char *optarg, uint32_t *flags, char *arg)
{
uint64_t count = range->val_count;
if (flags)
*flags |= FLAG_SET;
range->next_val = 0;
if (regex_match(optarg, pattern1) == 0) {
kmgt_to_uint64(optarg, &range->val[0]);
range->val_count = 1;
} else if (split_string(optarg, pattern2, range) < 0) {
fprintf(stderr, "Error: Incorrect pattern for %s, '%s'\n",
arg, optarg);
return (EINVAL);
} else if (count == range->val_count) {
fprintf(stderr, "Error: input ignored for %s, '%s'\n",
arg, optarg);
}
return (0);
}
/*
* Validates the value with regular expression and sets low, high, incr
* according to value at which flag will be set. Sets the flag after.
*/
int
set_lhi(char *pattern, range_repeat_t *range, char *optarg,
int flag, uint32_t *flag_thread, char *arg)
{
int rc;
if ((rc = regex_match(optarg, pattern))) {
fprintf(stderr, "Error: Wrong pattern in %s, '%s'\n",
arg, optarg);
return (rc);
}
switch (flag) {
case FLAG_LOW:
kmgt_to_uint64(optarg, &range->val_low);
break;
case FLAG_HIGH:
kmgt_to_uint64(optarg, &range->val_high);
break;
case FLAG_INCR:
kmgt_to_uint64(optarg, &range->val_inc_perc);
break;
default:
assert(0);
}
*flag_thread |= flag;
return (0);
}
int
set_noise(uint64_t *noise, char *optarg, char *arg)
{
if (regex_match(optarg, REGEX_NUMBERS) == 0) {
kmgt_to_uint64(optarg, noise);
} else {
fprintf(stderr, "Error: Incorrect pattern for %s\n", arg);
return (EINVAL);
}
return (0);
}
int
set_load_params(cmd_args_t *args, char *optarg)
{
char *param, *search, *searchdup, comma[] = ",";
int rc = 0;
search = strdup(optarg);
if (search == NULL)
return (ENOMEM);
searchdup = search;
while ((param = strtok(search, comma)) != NULL) {
search = NULL;
if (strcmp("fpp", param) == 0) {
args->flags |= DMU_FPP; /* File Per Process/Thread */
} else if (strcmp("ssf", param) == 0) {
args->flags &= ~DMU_FPP; /* Single Shared File */
} else if (strcmp("dmuio", param) == 0) {
args->io_type |= DMU_IO;
args->flags |= (DMU_WRITE | DMU_READ);
} else {
fprintf(stderr, "Invalid load: %s\n", param);
rc = EINVAL;
}
}
free(searchdup);
return (rc);
}
/*
* Checks the low, high, increment values against the single value for
* mutual exclusion, for e.g threadcount is mutually exclusive to
* threadcount_low, ..._high, ..._incr
*/
int
check_mutual_exclusive_command_lines(uint32_t flag, char *arg)
{
if ((flag & FLAG_SET) && (flag & (FLAG_LOW | FLAG_HIGH | FLAG_INCR))) {
fprintf(stderr, "Error: --%s can not be given with --%s_low, "
"--%s_high or --%s_incr.\n", arg, arg, arg, arg);
return (0);
}
if ((flag & (FLAG_LOW | FLAG_HIGH | FLAG_INCR)) && !(flag & FLAG_SET)) {
if (flag != (FLAG_LOW | FLAG_HIGH | FLAG_INCR)) {
fprintf(stderr, "Error: One or more values missing "
"from --%s_low, --%s_high, --%s_incr.\n",
arg, arg, arg);
return (0);
}
}
return (1);
}
void
print_stats_header(cmd_args_t *args)
{
if (args->verbose) {
printf(
"status name id\tth-cnt\trg-cnt\trg-sz\t"
"ch-sz\toffset\trg-no\tch-no\tth-dly\tflags\tblksz\ttime\t"
"cr-time\trm-time\twr-time\trd-time\twr-data\twr-ch\t"
"wr-bw\trd-data\trd-ch\trd-bw\n");
printf(
"-------------------------------------------------"
"-------------------------------------------------"
"-------------------------------------------------"
"--------------------------------------------------\n");
} else {
printf(
"status name id\t"
"wr-data\twr-ch\twr-bw\t"
"rd-data\trd-ch\trd-bw\n");
printf(
"-----------------------------------------"
"--------------------------------------\n");
}
}
static void
print_stats_human_readable(cmd_args_t *args, zpios_cmd_t *cmd)
{
zpios_stats_t *summary_stats;
double t_time, wr_time, rd_time, cr_time, rm_time;
char str[KMGT_SIZE];
if (args->rc)
printf("FAIL: %3d ", args->rc);
else
printf("PASS: ");
printf("%-12s", args->name ? args->name : ZPIOS_NAME);
printf("%2u\t", cmd->cmd_id);
if (args->verbose) {
printf("%u\t", cmd->cmd_thread_count);
printf("%u\t", cmd->cmd_region_count);
printf("%s\t", uint64_to_kmgt(str, cmd->cmd_region_size));
printf("%s\t", uint64_to_kmgt(str, cmd->cmd_chunk_size));
printf("%s\t", uint64_to_kmgt(str, cmd->cmd_offset));
printf("%s\t", uint64_to_kmgt(str, cmd->cmd_region_noise));
printf("%s\t", uint64_to_kmgt(str, cmd->cmd_chunk_noise));
printf("%s\t", uint64_to_kmgt(str, cmd->cmd_thread_delay));
printf("%s\t", print_flags(str, cmd->cmd_flags));
printf("%s\t", uint64_to_kmgt(str, cmd->cmd_block_size));
}
if (args->rc) {
printf("\n");
return;
}
summary_stats = (zpios_stats_t *)cmd->cmd_data_str;
t_time = zpios_timespec_to_double(summary_stats->total_time.delta);
wr_time = zpios_timespec_to_double(summary_stats->wr_time.delta);
rd_time = zpios_timespec_to_double(summary_stats->rd_time.delta);
cr_time = zpios_timespec_to_double(summary_stats->cr_time.delta);
rm_time = zpios_timespec_to_double(summary_stats->rm_time.delta);
if (args->verbose) {
printf("%.2f\t", t_time);
printf("%.3f\t", cr_time);
printf("%.3f\t", rm_time);
printf("%.2f\t", wr_time);
printf("%.2f\t", rd_time);
}
printf("%s\t", uint64_to_kmgt(str, summary_stats->wr_data));
printf("%s\t", uint64_to_kmgt(str, summary_stats->wr_chunks));
printf("%s\t", kmgt_per_sec(str, summary_stats->wr_data, wr_time));
printf("%s\t", uint64_to_kmgt(str, summary_stats->rd_data));
printf("%s\t", uint64_to_kmgt(str, summary_stats->rd_chunks));
printf("%s\n", kmgt_per_sec(str, summary_stats->rd_data, rd_time));
fflush(stdout);
}
static void
print_stats_table(cmd_args_t *args, zpios_cmd_t *cmd)
{
zpios_stats_t *summary_stats;
double wr_time, rd_time;
if (args->rc)
printf("FAIL: %3d ", args->rc);
else
printf("PASS: ");
printf("%-12s", args->name ? args->name : ZPIOS_NAME);
printf("%2u\t", cmd->cmd_id);
if (args->verbose) {
printf("%u\t", cmd->cmd_thread_count);
printf("%u\t", cmd->cmd_region_count);
printf("%llu\t", (long long unsigned)cmd->cmd_region_size);
printf("%llu\t", (long long unsigned)cmd->cmd_chunk_size);
printf("%llu\t", (long long unsigned)cmd->cmd_offset);
printf("%u\t", cmd->cmd_region_noise);
printf("%u\t", cmd->cmd_chunk_noise);
printf("%u\t", cmd->cmd_thread_delay);
printf("0x%x\t", cmd->cmd_flags);
printf("%u\t", cmd->cmd_block_size);
}
if (args->rc) {
printf("\n");
return;
}
summary_stats = (zpios_stats_t *)cmd->cmd_data_str;
wr_time = zpios_timespec_to_double(summary_stats->wr_time.delta);
rd_time = zpios_timespec_to_double(summary_stats->rd_time.delta);
if (args->verbose) {
printf("%ld.%02ld\t",
(long)summary_stats->total_time.delta.ts_sec,
(long)summary_stats->total_time.delta.ts_nsec);
printf("%ld.%02ld\t",
(long)summary_stats->cr_time.delta.ts_sec,
(long)summary_stats->cr_time.delta.ts_nsec);
printf("%ld.%02ld\t",
(long)summary_stats->rm_time.delta.ts_sec,
(long)summary_stats->rm_time.delta.ts_nsec);
printf("%ld.%02ld\t",
(long)summary_stats->wr_time.delta.ts_sec,
(long)summary_stats->wr_time.delta.ts_nsec);
printf("%ld.%02ld\t",
(long)summary_stats->rd_time.delta.ts_sec,
(long)summary_stats->rd_time.delta.ts_nsec);
}
printf("%lld\t", (long long unsigned)summary_stats->wr_data);
printf("%lld\t", (long long unsigned)summary_stats->wr_chunks);
printf("%.4f\t", (double)summary_stats->wr_data / wr_time);
printf("%lld\t", (long long unsigned)summary_stats->rd_data);
printf("%lld\t", (long long unsigned)summary_stats->rd_chunks);
printf("%.4f\n", (double)summary_stats->rd_data / rd_time);
fflush(stdout);
}
void
print_stats(cmd_args_t *args, zpios_cmd_t *cmd)
{
if (args->human_readable)
print_stats_human_readable(args, cmd);
else
print_stats_table(args, cmd);
}

View File

@ -104,7 +104,6 @@ AC_CONFIG_FILES([
cmd/zpool/Makefile
cmd/zstreamdump/Makefile
cmd/ztest/Makefile
cmd/zpios/Makefile
cmd/mount_zfs/Makefile
cmd/fsck_zfs/Makefile
cmd/zvol_id/Makefile
@ -127,7 +126,6 @@ AC_CONFIG_FILES([
module/unicode/Makefile
module/zcommon/Makefile
module/zfs/Makefile
module/zpios/Makefile
module/icp/Makefile
include/Makefile
include/linux/Makefile
@ -138,13 +136,9 @@ AC_CONFIG_FILES([
include/sys/crypto/Makefile
include/sys/sysevent/Makefile
scripts/Makefile
scripts/zpios-profile/Makefile
scripts/zpios-test/Makefile
scripts/zpool-config/Makefile
scripts/common.sh
tests/Makefile
tests/test-runner/Makefile
tests/test-runner/cmd/Makefile
tests/test-runner/bin/Makefile
tests/test-runner/include/Makefile
tests/test-runner/man/Makefile
tests/runfiles/Makefile
@ -310,7 +304,6 @@ AC_CONFIG_FILES([
rpm/generic/zfs.spec
rpm/generic/zfs-kmod.spec
rpm/generic/zfs-dkms.spec
zfs-script-config.sh
zfs.release
])

View File

@ -6,11 +6,7 @@ COMMON_H = \
$(top_srcdir)/include/zfs_deleg.h \
$(top_srcdir)/include/zfs_fletcher.h \
$(top_srcdir)/include/zfs_namecheck.h \
$(top_srcdir)/include/zfs_prop.h \
$(top_srcdir)/include/zpios-ctl.h
KERNEL_H = \
$(top_srcdir)/include/zpios-internal.h
$(top_srcdir)/include/zfs_prop.h
USER_H = \
$(top_srcdir)/include/libnvpair.h \
@ -22,7 +18,7 @@ USER_H = \
$(top_srcdir)/include/libzfs_impl.h \
$(top_srcdir)/include/thread_pool.h
EXTRA_DIST = $(COMMON_H) $(KERNEL_H) $(USER_H)
EXTRA_DIST = $(COMMON_H) $(USER_H)
if CONFIG_USER
libzfsdir = $(includedir)/libzfs
@ -31,5 +27,5 @@ endif
if CONFIG_KERNEL
kerneldir = @prefix@/src/zfs-$(VERSION)/include
kernel_HEADERS = $(COMMON_H) $(KERNEL_H)
kernel_HEADERS = $(COMMON_H)
endif

View File

@ -1,206 +0,0 @@
/*
* ZPIOS is a heavily modified version of the original PIOS test code.
* It is designed to have the test code running in the Linux kernel
* against ZFS while still being flexibly controlled from user space.
*
* Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* LLNL-CODE-403049
*
* Original PIOS Test Code
* Copyright (C) 2004 Cluster File Systems, Inc.
* Written by Peter Braam <braam@clusterfs.com>
* Atul Vidwansa <atul@clusterfs.com>
* Milind Dumbare <milind@clusterfs.com>
*
* This file is part of ZFS on Linux.
* For details, see <http://zfsonlinux.org/>.
*
* ZPIOS is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* ZPIOS is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with ZPIOS. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright (c) 2015, Intel Corporation.
*/
#ifndef _ZPIOS_CTL_H
#define _ZPIOS_CTL_H
/*
* Contains shared definitions which both the userspace
* and kernelspace portions of zpios must agree on.
*/
#ifndef _KERNEL
#include <stdint.h>
#endif
#define ZPIOS_NAME "zpios"
#define ZPIOS_DEV "/dev/zpios"
#define DMU_IO 0x01
#define DMU_WRITE 0x0001
#define DMU_READ 0x0002
#define DMU_VERIFY 0x0004
#define DMU_REMOVE 0x0008
#define DMU_FPP 0x0010
#define DMU_WRITE_ZC 0x0020 /* Incompatible w/DMU_VERIFY */
#define DMU_READ_ZC 0x0040 /* Incompatible w/DMU_VERIFY */
#define DMU_WRITE_NOWAIT 0x0080
#define DMU_READ_NOPF 0x0100
#define ZPIOS_NAME_SIZE 16
#define ZPIOS_PATH_SIZE 128
#define PHASE_PRE_RUN "pre-run"
#define PHASE_PRE_CREATE "pre-create"
#define PHASE_PRE_WRITE "pre-write"
#define PHASE_PRE_READ "pre-read"
#define PHASE_PRE_REMOVE "pre-remove"
#define PHASE_POST_RUN "post-run"
#define PHASE_POST_CREATE "post-create"
#define PHASE_POST_WRITE "post-write"
#define PHASE_POST_READ "post-read"
#define PHASE_POST_REMOVE "post-remove"
#define ZPIOS_CFG_MAGIC 0x87237190U
typedef struct zpios_cfg {
uint32_t cfg_magic; /* Unique magic */
int32_t cfg_cmd; /* Config command */
int32_t cfg_arg1; /* Config command arg 1 */
int32_t cfg_rc1; /* Config response 1 */
} zpios_cfg_t;
typedef struct zpios_timespec {
uint32_t ts_sec;
uint32_t ts_nsec;
} zpios_timespec_t;
typedef struct zpios_time {
zpios_timespec_t start;
zpios_timespec_t stop;
zpios_timespec_t delta;
} zpios_time_t;
typedef struct zpios_stats {
zpios_time_t total_time;
zpios_time_t cr_time;
zpios_time_t rm_time;
zpios_time_t wr_time;
zpios_time_t rd_time;
uint64_t wr_data;
uint64_t wr_chunks;
uint64_t rd_data;
uint64_t rd_chunks;
} zpios_stats_t;
#define ZPIOS_CMD_MAGIC 0x49715385U
typedef struct zpios_cmd {
uint32_t cmd_magic; /* Unique magic */
uint32_t cmd_id; /* Run ID */
char cmd_pool[ZPIOS_NAME_SIZE]; /* Pool name */
uint64_t cmd_chunk_size; /* Chunk size */
uint32_t cmd_thread_count; /* Thread count */
uint32_t cmd_region_count; /* Region count */
uint64_t cmd_region_size; /* Region size */
uint64_t cmd_offset; /* Region offset */
uint32_t cmd_region_noise; /* Region noise */
uint32_t cmd_chunk_noise; /* Chunk noise */
uint32_t cmd_thread_delay; /* Thread delay */
uint32_t cmd_flags; /* Test flags */
uint32_t cmd_block_size; /* ZFS block size */
char cmd_pre[ZPIOS_PATH_SIZE]; /* Pre-exec hook */
char cmd_post[ZPIOS_PATH_SIZE]; /* Post-exec hook */
char cmd_log[ZPIOS_PATH_SIZE]; /* Requested log dir */
uint64_t cmd_data_size; /* Opaque data size */
char cmd_data_str[0]; /* Opaque data region */
} zpios_cmd_t;
/* Valid ioctls */
#define ZPIOS_CFG _IOWR('f', 101, zpios_cfg_t)
#define ZPIOS_CMD _IOWR('f', 102, zpios_cmd_t)
/* Valid configuration commands */
#define ZPIOS_CFG_BUFFER_CLEAR 0x001 /* Clear text buffer */
#define ZPIOS_CFG_BUFFER_SIZE 0x002 /* Resize text buffer */
#ifndef NSEC_PER_SEC
#define NSEC_PER_SEC 1000000000L
#endif
static inline
void
zpios_timespec_normalize(zpios_timespec_t *ts, uint32_t sec, uint32_t nsec)
{
while (nsec >= NSEC_PER_SEC) {
nsec -= NSEC_PER_SEC;
sec++;
}
while (((int32_t)nsec) < 0) {
nsec += NSEC_PER_SEC;
sec--;
}
ts->ts_sec = sec;
ts->ts_nsec = nsec;
}
static inline
zpios_timespec_t
zpios_timespec_add(zpios_timespec_t lhs, zpios_timespec_t rhs)
{
zpios_timespec_t ts_delta;
zpios_timespec_normalize(&ts_delta, lhs.ts_sec + rhs.ts_sec,
lhs.ts_nsec + rhs.ts_nsec);
return (ts_delta);
}
static inline
zpios_timespec_t
zpios_timespec_sub(zpios_timespec_t lhs, zpios_timespec_t rhs)
{
zpios_timespec_t ts_delta;
zpios_timespec_normalize(&ts_delta, lhs.ts_sec - rhs.ts_sec,
lhs.ts_nsec - rhs.ts_nsec);
return (ts_delta);
}
#ifdef _KERNEL
static inline
zpios_timespec_t
zpios_timespec_now(void)
{
zpios_timespec_t zts_now;
struct timespec ts_now;
ts_now = current_kernel_time();
zts_now.ts_sec = ts_now.tv_sec;
zts_now.ts_nsec = ts_now.tv_nsec;
return (zts_now);
}
#else
static inline
double
zpios_timespec_to_double(zpios_timespec_t ts)
{
return
((double)(ts.ts_sec) +
((double)(ts.ts_nsec) / (double)(NSEC_PER_SEC)));
}
#endif /* _KERNEL */
#endif /* _ZPIOS_CTL_H */

View File

@ -1,115 +0,0 @@
/*
* ZPIOS is a heavily modified version of the original PIOS test code.
* It is designed to have the test code running in the Linux kernel
* against ZFS while still being flexibly controlled from user space.
*
* Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* LLNL-CODE-403049
*
* Original PIOS Test Code
* Copyright (C) 2004 Cluster File Systems, Inc.
* Written by Peter Braam <braam@clusterfs.com>
* Atul Vidwansa <atul@clusterfs.com>
* Milind Dumbare <milind@clusterfs.com>
*
* This file is part of ZFS on Linux.
* For details, see <http://zfsonlinux.org/>.
*
* ZPIOS is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* ZPIOS is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with ZPIOS. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright (c) 2015, Intel Corporation.
*/
#ifndef _ZPIOS_INTERNAL_H
#define _ZPIOS_INTERNAL_H
#include "zpios-ctl.h"
#define OBJ_SIZE 64
struct run_args;
typedef struct dmu_obj {
objset_t *os;
uint64_t obj;
} dmu_obj_t;
/* thread doing the IO data */
typedef struct thread_data {
struct run_args *run_args;
int thread_no;
int rc;
zpios_stats_t stats;
kmutex_t lock;
} thread_data_t;
/* region for IO data */
typedef struct zpios_region {
__u64 wr_offset;
__u64 rd_offset;
__u64 init_offset;
__u64 max_offset;
dmu_obj_t obj;
zpios_stats_t stats;
kmutex_t lock;
} zpios_region_t;
/* arguments for one run */
typedef struct run_args {
/* Config args */
int id;
char pool[ZPIOS_NAME_SIZE];
__u64 chunk_size;
__u32 thread_count;
__u32 region_count;
__u64 region_size;
__u64 offset;
__u32 region_noise;
__u32 chunk_noise;
__u32 thread_delay;
__u32 flags;
__u32 block_size;
char pre[ZPIOS_PATH_SIZE];
char post[ZPIOS_PATH_SIZE];
char log[ZPIOS_PATH_SIZE];
/* Control data */
objset_t *os;
wait_queue_head_t waitq;
volatile uint64_t threads_done;
kmutex_t lock_work;
kmutex_t lock_ctl;
__u32 region_next;
/* Results data */
struct file *file;
zpios_stats_t stats;
thread_data_t **threads;
zpios_region_t regions[0]; /* Must be last element */
} run_args_t;
#define ZPIOS_INFO_BUFFER_SIZE 65536
#define ZPIOS_INFO_BUFFER_REDZONE 1024
typedef struct zpios_info {
spinlock_t info_lock;
int info_size;
char *info_buffer;
char *info_head; /* Internal kernel use only */
} zpios_info_t;
#endif /* _ZPIOS_INTERNAL_H */

View File

@ -1,4 +1,4 @@
dist_man_MANS = zhack.1 zpios.1 ztest.1 raidz_test.1
dist_man_MANS = zhack.1 ztest.1 raidz_test.1
EXTRA_DIST = cstyle.1
install-data-local:

View File

@ -95,6 +95,5 @@ This man page was written by Darik Horn <dajhorn@vanadac.com>.
.SH SEE ALSO
.BR splat (1),
.BR zfs (8),
.BR zpios (1),
.BR zpool-features (5),
.BR ztest (1)

View File

@ -1,241 +0,0 @@
'\" t
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or http://www.opensolaris.org/os/licensing.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\"
.\" Copyright 2013 Darik Horn <dajhorn@vanadac.com>. All rights reserved.
.\"
.\" Copyright (c) 2015, Intel Corporation.
.\"
.TH zpios 1 "2013 FEB 28" "ZFS on Linux" "User Commands"
.SH NAME
zpios \- Directly test the DMU.
.SH SYNOPSIS
.LP
.BI "zpios [options] <\-p " pool ">"
.SH DESCRIPTION
This utility runs in-kernel DMU performance and stress tests that do
not depend on the ZFS Posix Layer ("ZPL").
.SH OPTIONS
.HP
.BI "\-t" " regex" ", \-\-threadcount" " regex"
.IP
Start this many threads for each test series, specified as a comma
delimited regular expression. (eg: "-t 1,2,3")
.IP
This option is mutually exclusive with the \fBthreadcount_*\fR
options.
.HP
.BI "\-l" " regex_low" ", \-\-threadcount_low" " regex_low"
.HP
.BI "\-h" " regex_high" ", \-\-threadcount_high" " regex_high"
.HP
.BI "\-e" " regex_incr" ", \-\-threadcount_incr" " regex_incr"
.IP
Start \fIregex_low\fR threads for the first test,
add \fIregex_incr\fR threads for each subsequent test,
and start \fIregex_high\fR threads for the last test.
.IP
These three options must be specified together and are mutually
exclusive with the \fBthreadcount\fR option.
.HP
.BI "\-n" " regex" ", \-\-regioncount" " regex"
.IP
Create this many regions for each test series, specified as a comma
delimited regular expression. (eg: "-n 512,4096,65536")
.IP
This option is mutually exclusive with the \fBregioncount_*\fR
options.
.HP
.BI "\-i" " regex_low" ", \-\-regioncount_low" " regex_low"
.HP
.BI "\-j" " regex_high" ", \-\-regioncount_high" " regex_high"
.HP
.BI "\-k" " regex_incr" ", \-\-regioncount_incr" " regex_incr"
.IP
Create \fIregex_low\fR regions for the first test,
add \fIregex_incr\fR regions for each subsequent test, and
create \fIregex_high\fR regions for the last test.
.IP
These three options must be specified together and are mutually
exclusive with the \fBregioncount\fR option.
.HP
.BI "\-o" " size" ", \-\-offset" " size"
.IP
Create regions at \fIsize\fR offset for each test series, specified as
a comma delimited regular expression with an optional unit suffix.
(eg: "-o 4M" means four megabytes.)
.IP
This option is mutually exclusive with the \fBoffset_*\fR options.
.HP
.BI "\-m" " size_low" ", \-\-offset_low" " size_low"
.HP
.BI "\-q" " size_high" ", \-\-offset_high" " size_high"
.HP
.BI "\-r" " size_incr" ", \-\-offset_incr" " size_incr"
.IP
Create a region at \fIsize_low\fR offset for the first test, add
\fIsize_incr\fR to the offset for each subsequent test, and create
a region at \fIsize_high\fR offset for the last test.
.IP
These three options must be specified together and are mutually
exclusive with the \fBoffset\fR option.
.HP
.BI "\-c" " size" ", \-\-chunksize" " size"
.IP
Use \fIsize\fR chunks for each test, specified as a comma delimited
regular expression with an optional unit suffix. (eg: "-c 1M" means
one megabyte.) The chunk size must be at least the region size.
.IP
This option is mutually exclusive with the \fBchunksize_*\fB options.
.HP
.BI "\-a" " size_low" ", \-\-chunksize_low" " size_low"
.HP
.BI "\-b" " size_high" ", \-\-chunksize_high" " size_high"
.HP
.BI "\-g" " size_incr" ", \-\-chunksize_incr" " size_incr"
.IP
Use a \fIsize_low\fR chunk size for the first test, add \fIsize_incr\fR
to the chunk size for each subsequent test, and use a \fIsize_high\fR
chunk size for the last test.
.IP
These three options must be specified together and are mutually
exclusive with the \fBchunksize\fR option.
.HP
.BI "\-s" " size" ", \-\-regionsize" " size"
.IP
Use \fIsize\fR regions for each test, specified as a comma delimited
regular expression with an optional unit suffix. (eg: "-s 1M" means
one megabyte.)
.IP
This option is mutually exclusive with the \fBregionsize_*\fB options.
.HP
.BI "\-A" " size_low" ", \-\-regionsize_low" " size_low"
.HP
.BI "\-B" " size_high" ", \-\-regionsize_high" " size_high"
.HP
.BI "\-C" " size_incr" ", \-\-regionsize_incr" " size_incr"
.IP
Use a \fIsize_low\fR region size for the first test, add \fIsize_incr\fR
to the region size for each subsequent test, and use a \fIsize_high\fR
region size for the last test.
.IP
These three options must be specified together and are mutually
exclusive with the \fBregionsize\fR option.
.HP
.BI "\-S" " size | sizes" ", \-\-blocksize" " size | sizes"
.IP
Use \fIsize\fR ZFS blocks for each test, specified as a comma delimited
regular expression with an optional unit suffix. (eg: "-S 1M" means
one megabyte.) The supported range is powers of two from 128K through 16M.
A range of blocks can be tested as follows: "-S 128K,256K,512K,1M".
.IP
.HP
.BI "\-L" " dmu_flags" ", \-\-load" " dmu_flags"
.IP
Specify \fIdmuio\fR for regular DMU_IO, \fIssf\fR for single shared
file access, or \fIfpp\fR for per thread access. Use commas to delimit
multiple flags. (eg: "-L dmuio,ssf")
.HP
.BI "\-p" " name" ", \-\-pool" " name"
.IP
The pool name, which is mandatory.
.HP
.BI "\-M" " test" ", \-\-name" " test"
.IP
An arbitrary string that appears in the program output.
.HP
.BI "-x, \-\-cleanup"
.IP
Enable the DMU_REMOVE flag.
.HP
.BI "\-P" " command" ", \-\-prerun" " command"
.IP
Invoke \fIcommand\fR from the kernel before running the test. Shell
expansion is not performed and the environment is set to
HOME=/; TERM=linux; PATH=/sbin:/usr/sbin:/bin:/usr/bin.
.HP
.BI "\-R" " command" ", \-\-postrun" " command"
.IP
Invoke \fIcommand\fR from the kernel after running the test. Shell
expansion is not performed and the environment is set to
HOME=/; TERM=linux; PATH=/sbin:/usr/sbin:/bin:/usr/bin.
.HP
.BI "\-G" " directory" ", \-\-log" " directory"
.IP
Put logging output in this directory.
.HP
.BI "\-I" " size" ", \-\-regionnoise" " size"
.IP
Randomly vary the \fBregionsize\fR parameter for each test
modulo \fIsize\fR bytes.
.HP
.BI "\-N" " size" ", \-\-chunknoise" " size"
.IP
Randomly vary the \fBchunksize\fR parameter for each test
modulo \fIsize\fR bytes.
.HP
.BI "\-T" " time" ", \-\-threaddelay" " time"
.IP
Randomly vary the execution time for each test
modulo \fItime\fR kernel jiffies.
.HP
.BI "\-V" "" ", \-\-verify" ""
.IP
Enable the DMU_VERIFY flag for trivial data verification.
.HP
.BI "\-z" "" ", \-\-zerocopy" ""
.IP
Enable the DMU_READ_ZC and DMU_WRITE_ZC flags, which are
currently unimplemented for Linux.
.IP
.HP
.BI "\-O" "" ", \-\-nowait" ""
.IP
Enable the DMU_WRITE_NOWAIT flag.
.HP
.BI "\-f" "" ", \-\-noprefetch" ""
.IP
Enable the DMU_READ_NOPF flag.
.HP
.BI "\-H" "" ", \-\-human\-readable" ""
.IP
Print PASS and FAIL results explicitly and put unit suffixes on large
numbers.
.HP
.BI "\-v" "" ", \-\-verbose" ""
.IP
Increase output verbosity.
.HP
.BI "\-?" " " ", \-\-help" " "
.IP
Print the usage message.
.SH "AUTHORS"
The original zpios implementation was created by Cluster File Systems
Inc and adapted to ZFS on Linux by Brian Behlendorf
<behlendorf1@llnl.gov>.
This man page was written by Darik Horn <dajhorn@vanadac.com>.
.SH "SEE ALSO"
.BR zpool (8),
.BR zfs (8)

View File

@ -3,7 +3,6 @@ subdir-m += nvpair
subdir-m += unicode
subdir-m += zcommon
subdir-m += zfs
subdir-m += zpios
subdir-m += icp
INSTALL_MOD_DIR ?= extra

View File

@ -152,5 +152,7 @@ icp_init(void)
#if defined(_KERNEL) && defined(HAVE_SPL)
module_exit(icp_fini);
module_init(icp_init);
MODULE_AUTHOR(ZFS_META_AUTHOR);
MODULE_LICENSE(ZFS_META_LICENSE);
MODULE_VERSION(ZFS_META_VERSION "-" ZFS_META_RELEASE);
#endif

View File

@ -1,10 +0,0 @@
src = @abs_top_srcdir@/module/zpios
obj = @abs_builddir@
MODULE := zpios
EXTRA_CFLAGS = $(ZFS_MODULE_CFLAGS) @KERNELCPPFLAGS@
obj-$(CONFIG_ZFS) := $(MODULE).o
$(MODULE)-objs += pios.o

File diff suppressed because it is too large Load Diff

View File

@ -37,7 +37,6 @@
#define buildforkernels akmod
%bcond_with debug
%bcond_with debug_dmu_tx
Name: %{module}-kmod
@ -117,12 +116,6 @@ bash %{SOURCE10} --target %{_target_cpu} %{?repo:--repo %{?repo}} --kmodname %{
%define debug --disable-debug
%endif
%if %{with debug_dmu_tx}
%define debug_dmu_tx --enable-debug-dmu-tx
%else
%define debug_dmu_tx --disable-debug-dmu-tx
%endif
#
# Allow the overriding of spl locations
#
@ -163,8 +156,7 @@ for kernel_version in %{?kernel_versions}; do
--with-linux-obj=%{kobj} \
--with-spl="%{spldir}" \
--with-spl-obj="%{splobj}" \
%{debug} \
%{debug_dmu_tx}
%{debug}
make %{?_smp_mflags}
cd ..
done

View File

@ -1,5 +1,4 @@
%bcond_with debug
%bcond_with debug_dmu_tx
Name: @PACKAGE@-kmod
Version: @VERSION@
@ -59,12 +58,6 @@ fi
%define debug --disable-debug
%endif
%if %{with debug_dmu_tx}
%define debug_dmu_tx --enable-debug-dmu-tx
%else
%define debug_dmu_tx --disable-debug-dmu-tx
%endif
%setup -n %{kmod_name}-%{version}
%build
%configure \
@ -73,8 +66,7 @@ fi
--with-linux-obj=%{kobj} \
--with-spl="%{splsrc}" \
--with-spl-obj="%{splobj}" \
%{debug} \
%{debug_dmu_tx}
%{debug}
make %{?_smp_mflags}
%install

View File

@ -1,17 +1,67 @@
SUBDIRS = zpool-config zpios-test zpios-profile
EXTRA_DIST = dkms.mkconf dkms.postbuild kmodtool zfs2zol-patch.sed cstyle.pl
pkgdatadir = $(datadir)/@PACKAGE@
dist_pkgdata_SCRIPTS = \
$(top_builddir)/scripts/common.sh \
$(top_srcdir)/scripts/zimport.sh \
$(top_srcdir)/scripts/zfs.sh \
$(top_srcdir)/scripts/zfs-tests.sh \
$(top_srcdir)/scripts/zloop.sh \
$(top_srcdir)/scripts/zpool-create.sh \
$(top_srcdir)/scripts/zpios.sh \
$(top_srcdir)/scripts/zpios-sanity.sh \
$(top_srcdir)/scripts/zpios-survey.sh \
$(top_srcdir)/scripts/smb.sh \
$(top_srcdir)/scripts/zfs-helpers.sh
EXTRA_DIST = \
common.sh.in \
commitcheck.sh \
dkms.mkconf \
dkms.postbuild \
kmodtool \
paxcheck.sh \
zfs2zol-patch.sed \
cstyle.pl
define EXTRA_ENVIRONMENT
# Only required for in-tree use
export INTREE="yes"
export GDB="/usr/bin/libtool --mode=execute gdb"
export LDMOD=/sbin/insmod
export CMD_DIR=@abs_top_builddir@/cmd
export UDEV_RULE_DIR=@abs_top_builddir@/udev/rules.d
export ZEDLET_ETC_DIR=$$CMD_DIR/zed/zed.d
export ZEDLET_LIBEXEC_DIR=$$CMD_DIR/zed/zed.d
export ZPOOL_SCRIPT_DIR=$$CMD_DIR/zpool/zpool.d
export ZPOOL_SCRIPTS_PATH=$$CMD_DIR/zpool/zpool.d
export INSTALL_UDEV_DIR=@udevdir@
export INSTALL_UDEV_RULE_DIR=@udevruledir@
export INSTALL_MOUNT_HELPER_DIR=@mounthelperdir@
export INSTALL_SYSCONF_DIR=@sysconfdir@
export KMOD_SPL=@SPL_OBJ@/module/spl/spl.ko
export KMOD_SPLAT=@SPL_OBJ@/module/splat/splat.ko
export KMOD_ZAVL=@abs_top_builddir@/module/avl/zavl.ko
export KMOD_ZNVPAIR=@abs_top_builddir@/module/nvpair/znvpair.ko
export KMOD_ZUNICODE=@abs_top_builddir@/module/unicode/zunicode.ko
export KMOD_ZCOMMON=@abs_top_builddir@/module/zcommon/zcommon.ko
export KMOD_ICP=@abs_top_builddir@/module/icp/icp.ko
export KMOD_ZFS=@abs_top_builddir@/module/zfs/zfs.ko
endef
export EXTRA_ENVIRONMENT
all-local:
-$(SED) -e '\|^export BIN_DIR=|s|$$|@abs_top_builddir@/bin|' \
-e '\|^export SBIN_DIR=|s|$$|@abs_top_builddir@/bin|' \
-e '\|^export ZTS_DIR=|s|$$|@abs_top_srcdir@/tests|' \
-e '\|^export SCRIPT_DIR=|s|$$|@abs_top_srcdir@/scripts|' \
common.sh.in >common.sh
-echo "$$EXTRA_ENVIRONMENT" >>common.sh
clean-local:
-$(RM) common.sh
install-data-hook:
-$(SED) -e '\|^export BIN_DIR=|s|$$|@bindir@|' \
-e '\|^export SBIN_DIR=|s|$$|@sbindir@|' \
-e '\|^export ZTS_DIR=|s|$$|@datadir@/@PACKAGE@|' \
-e '\|^export SCRIPT_DIR=|s|$$|@datadir@/@PACKAGE@|' \
common.sh.in >$(DESTDIR)$(datadir)/@PACKAGE@/common.sh

View File

@ -1,768 +1,20 @@
#!/bin/bash
#
# Common support functions for testing scripts. If a script-config
# files is available it will be sourced so in-tree kernel modules and
# utilities will be used. If no script-config can be found then the
# installed kernel modules and utilities will be used.
basedir="$(dirname $0)"
SCRIPT_CONFIG=zfs-script-config.sh
if [ -f "${basedir}/../${SCRIPT_CONFIG}" ]; then
. "${basedir}/../${SCRIPT_CONFIG}"
else
KERNEL_MODULES=(zlib_deflate zlib_inflate)
MODULES=(spl splat zavl znvpair zunicode zcommon icp zfs)
fi
PROG="<define PROG>"
CLEANUP=
VERBOSE=
VERBOSE_FLAG=
FORCE=
FORCE_FLAG=
ERROR=
RAID0S=()
RAID10S=()
RAIDZS=()
RAIDZ2S=()
TESTS_RUN=${TESTS_RUN:-'*'}
TESTS_SKIP=${TESTS_SKIP:-}
prefix=@prefix@
exec_prefix=@exec_prefix@
pkgdatadir=@datarootdir@/@PACKAGE@
bindir=@bindir@
sbindir=@sbindir@
udevdir=@udevdir@
udevruledir=@udevruledir@
mounthelperdir=@mounthelperdir@
sysconfdir=@sysconfdir@
localstatedir=@localstatedir@
ETCDIR=${ETCDIR:-/etc}
DEVDIR=${DEVDIR:-/dev/disk/by-vdev}
ZPOOLDIR=${ZPOOLDIR:-${pkgdatadir}/zpool-config}
ZPIOSDIR=${ZPIOSDIR:-${pkgdatadir}/zpios-test}
ZPIOSPROFILEDIR=${ZPIOSPROFILEDIR:-${pkgdatadir}/zpios-profile}
TESTSDIR=${TESTSDIR:-${pkgdatadir}/zfs-tests}
RUNFILEDIR=${RUNFILEDIR:-${pkgdatadir}/runfiles}
ZDB=${ZDB:-${sbindir}/zdb}
ZFS=${ZFS:-${sbindir}/zfs}
ZINJECT=${ZINJECT:-${sbindir}/zinject}
ZHACK=${ZHACK:-${sbindir}/zhack}
ZPOOL=${ZPOOL:-${sbindir}/zpool}
ZTEST=${ZTEST:-${sbindir}/ztest}
ZPIOS=${ZPIOS:-${sbindir}/zpios}
COMMON_SH=${COMMON_SH:-${pkgdatadir}/common.sh}
ZFS_SH=${ZFS_SH:-${pkgdatadir}/zfs.sh}
ZPOOL_CREATE_SH=${ZPOOL_CREATE_SH:-${pkgdatadir}/zpool-create.sh}
ZPIOS_SH=${ZPIOS_SH:-${pkgdatadir}/zpios.sh}
ZPIOS_SURVEY_SH=${ZPIOS_SURVEY_SH:-${pkgdatadir}/zpios-survey.sh}
TEST_RUNNER=${TEST_RUNNER:-${pkgdatadir}/test-runner/bin/test-runner.py}
STF_TOOLS=${STF_TOOLS:-${pkgdatadir}/test-runner}
STF_SUITE=${STF_SUITE:-${pkgdatadir}/zfs-tests}
LDMOD=${LDMOD:-/sbin/modprobe}
LSMOD=${LSMOD:-/sbin/lsmod}
RMMOD=${RMMOD:-/sbin/rmmod}
INFOMOD=${INFOMOD:-/sbin/modinfo}
LOSETUP=${LOSETUP:-/sbin/losetup}
MDADM=${MDADM:-/sbin/mdadm}
DMSETUP=${DMSETUP:-/sbin/dmsetup}
PARTED=${PARTED:-/sbin/parted}
BLOCKDEV=${BLOCKDEV:-/sbin/blockdev}
LSSCSI=${LSSCSI:-/usr/bin/lsscsi}
SCSIRESCAN=${SCSIRESCAN:-/usr/bin/scsi-rescan}
SYSCTL=${SYSCTL:-/sbin/sysctl}
UDEVADM=${UDEVADM:-/sbin/udevadm}
AWK=${AWK:-/usr/bin/awk}
GDB=${GDB:-/usr/bin/gdb}
ZED_PIDFILE=${ZED_PIDFILE:-${localstatedir}/run/zed.pid}
COLOR_BLACK="\033[0;30m"
COLOR_DK_GRAY="\033[1;30m"
COLOR_BLUE="\033[0;34m"
COLOR_LT_BLUE="\033[1;34m"
COLOR_GREEN="\033[0;32m"
COLOR_LT_GREEN="\033[1;32m"
COLOR_CYAN="\033[0;36m"
COLOR_LT_CYAN="\033[1;36m"
COLOR_RED="\033[0;31m"
COLOR_LT_RED="\033[1;31m"
COLOR_PURPLE="\033[0;35m"
COLOR_LT_PURPLE="\033[1;35m"
COLOR_BROWN="\033[0;33m"
COLOR_YELLOW="\033[1;33m"
COLOR_LT_GRAY="\033[0;37m"
COLOR_WHITE="\033[1;37m"
COLOR_RESET="\033[0m"
die() {
echo -e "${PROG}: $1" >&2
exit 1
}
msg() {
if [ ${VERBOSE} ]; then
echo "$@"
fi
}
pass() {
echo -e "${COLOR_GREEN}Pass${COLOR_RESET}"
}
fail() {
echo -e "${COLOR_RED}Fail${COLOR_RESET} ($1)"
exit $1
}
skip() {
echo -e "${COLOR_BROWN}Skip${COLOR_RESET}"
}
populate() {
local ROOT=$1
local MAX_DIR_SIZE=$2
local MAX_FILE_SIZE=$3
mkdir -p $ROOT/{a,b,c,d,e,f,g}/{h,i}
DIRS=`find $ROOT`
for DIR in $DIRS; do
COUNT=$(($RANDOM % $MAX_DIR_SIZE))
for i in `seq $COUNT`; do
FILE=`mktemp -p ${DIR}`
SIZE=$(($RANDOM % $MAX_FILE_SIZE))
dd if=/dev/urandom of=$FILE bs=1k count=$SIZE &>/dev/null
done
done
return 0
}
init() {
# Create a random directory tree of files and sub-directories to
# to act as a copy source for the various regression tests.
SRC_DIR=`mktemp -d -p /var/tmp/ zfs.src.XXXXXXXX`
trap "rm -Rf $SRC_DIR" INT TERM EXIT
populate $SRC_DIR 10 100
}
check_modules() {
local LOADED_MODULES=()
local MISSING_MODULES=()
for MOD in ${MODULES[*]}; do
local NAME=`basename $MOD .ko`
if ${LSMOD} | egrep -q "^${NAME}"; then
LOADED_MODULES=(${NAME} ${LOADED_MODULES[*]})
fi
if [ ${INFOMOD} ${MOD} 2>/dev/null ]; then
MISSING_MODULES=("\t${MOD}\n" ${MISSING_MODULES[*]})
fi
done
if [ ${#LOADED_MODULES[*]} -gt 0 ]; then
ERROR="Unload these modules with '${PROG} -u':\n"
ERROR="${ERROR}${LOADED_MODULES[*]}"
return 1
fi
if [ ${#MISSING_MODULES[*]} -gt 0 ]; then
ERROR="The following modules can not be found,"
ERROR="${ERROR} ensure your source trees are built:\n"
ERROR="${ERROR}${MISSING_MODULES[*]}"
return 1
fi
return 0
}
load_module() {
local NAME=`basename $1 .ko`
if [ ${VERBOSE} ]; then
echo "Loading ${NAME} ($@)"
fi
${LDMOD} $* &>/dev/null
if [ $? -ne 0 ]; then
echo "Failed to load ${NAME} ($@)"
return 1
fi
return 0
}
load_modules() {
mkdir -p /etc/zfs
for MOD in ${KERNEL_MODULES[*]}; do
load_module ${MOD} >/dev/null
done
for MOD in ${MODULES[*]}; do
local NAME=`basename ${MOD} .ko`
local VALUE=
for OPT in "$@"; do
OPT_NAME=`echo ${OPT} | cut -f1 -d'='`
if [ ${NAME} = "${OPT_NAME}" ]; then
VALUE=`echo ${OPT} | cut -f2- -d'='`
fi
done
load_module ${MOD} ${VALUE} || return 1
done
if [ ${VERBOSE} ]; then
echo "Successfully loaded ZFS module stack"
fi
return 0
}
unload_module() {
local NAME=`basename $1 .ko`
if [ ${VERBOSE} ]; then
echo "Unloading ${NAME} ($@)"
fi
${RMMOD} ${NAME} || ERROR="Failed to unload ${NAME}" return 1
return 0
}
unload_modules() {
local MODULES_REVERSE=( $(echo ${MODULES[@]} |
${AWK} '{for (i=NF;i>=1;i--) printf $i" "} END{print ""}') )
for MOD in ${MODULES_REVERSE[*]}; do
local NAME=`basename ${MOD} .ko`
local USE_COUNT=`${LSMOD} |
egrep "^${NAME} "| ${AWK} '{print $3}'`
if [ "${USE_COUNT}" = 0 ] ; then
unload_module ${MOD} || return 1
fi
done
if [ ${VERBOSE} ]; then
echo "Successfully unloaded ZFS module stack"
fi
return 0
}
#
# Check that the mdadm utilities are installed.
#
check_loop_utils() {
test -f ${LOSETUP} || die "${LOSETUP} utility must be installed"
}
#
# Find and return an unused loop device. A new /dev/loopN node will be
# created if required. The kernel loop driver will automatically register
# the minor as long as it's less than /sys/module/loop/parameters/max_loop.
#
unused_loop_device() {
local DEVICE=$(${LOSETUP} -f)
local MAX_LOOP_PATH="/sys/module/loop/parameters/max_loop"
local MAX_LOOP;
# An existing /dev/loopN device was available.
if [ -n "${DEVICE}" ]; then
echo "${DEVICE}"
return 0
fi
# Create a new /dev/loopN provided we are not at MAX_LOOP.
if [ -f "${MAX_LOOP_PATH}" ]; then
MAX_LOOP=`cat /sys/module/loop/parameters/max_loop`
if [ ${MAX_LOOP} -eq 0 ]; then
MAX_LOOP=255
fi
for (( i=0; i<=${MAX_LOOP}; i++ )); do
DEVICE="/dev/loop$i"
if [ -b "${DEVICE}" ]; then
continue
else
mknod -m660 "${DEVICE}" b 7 $i
chown root.disk "${DEVICE}"
chmod 666 "${DEVICE}"
echo "${DEVICE}"
return 0
fi
done
fi
die "Error: Unable to create new loopback device"
}
#
# This can be slightly dangerous because the loop devices we are
# cleaning up may not be ours. However, if the devices are currently
# in use we will not be able to remove them, and we only remove
# devices which include 'zpool' or 'deleted' in the name. So any
# damage we might do should be limited to other zfs related testing.
#
cleanup_loop_devices() {
local TMP_FILE=`mktemp`
${LOSETUP} -a | tr -d '()' >${TMP_FILE}
${AWK} -F":" -v losetup="$LOSETUP" \
'/zpool/ || /deleted/ { system("losetup -d "$1) }' ${TMP_FILE}
${AWK} -F" " '/zpool/ || /deleted/ { system("rm -f "$3) }' ${TMP_FILE}
rm -f ${TMP_FILE}
}
#
# Destroy the passed loopback devices, this is used when you know
# the names of the loopback devices.
#
destroy_loop_devices() {
local LODEVICES="$1"
msg "Destroying ${LODEVICES}"
${LOSETUP} -d ${LODEVICES} || \
die "Error $? destroying ${FILE} -> ${DEVICE} loopback"
rm -f ${FILES}
return 0
}
#
# Create a device label taking care to briefly wait if udev needs to settle.
#
label() {
local DEVICE=$1
local LABEL=$2
wait_udev ${DEVICE} 30 || return 1
${PARTED} ${DEVICE} --script -- mklabel ${LABEL} || return 2
return 0
}
#
# Create a primary partition on a block device.
#
partition() {
local DEVICE=$1
local TYPE=$2
local START=$3
local END=$4
${PARTED} --align optimal ${DEVICE} --script -- \
mkpart ${TYPE} ${START} ${END} || return 1
udev_trigger
return 0
}
#
# Create a filesystem on the block device
#
format() {
local DEVICE=$1
local FSTYPE=$2
# Force 4K blocksize, else mkfs.ext2 tries to use 8K, which
# won't mount
/sbin/mkfs.${FSTYPE} -b 4096 -F -q ${DEVICE} >/dev/null || return 1
return 0
}
#
# Check that the mdadm utilities are installed.
#
check_md_utils() {
test -f ${MDADM} || die "${MDADM} utility must be installed"
test -f ${PARTED} || die "${PARTED} utility must be installed"
}
check_md_partitionable() {
local LOFILE=`mktemp -p /tmp zpool-lo.XXXXXXXX`
local LODEVICE=`unused_loop_device`
local MDDEVICE=`unused_md_device`
local RESULT=1
check_md_utils
rm -f ${LOFILE}
dd if=/dev/zero of=${LOFILE} bs=1M count=0 seek=16 \
&>/dev/null || return ${RESULT}
msg "Creating ${LODEVICE} using ${LOFILE}"
${LOSETUP} ${LODEVICE} ${LOFILE}
if [ $? -ne 0 ]; then
rm -f ${LOFILE}
return ${RESULT}
fi
msg "Creating ${MDDEVICE} using ${LODEVICE}"
${MDADM} --build ${MDDEVICE} --level=faulty \
--raid-devices=1 ${LODEVICE} &>/dev/null
if [ $? -ne 0 ]; then
destroy_loop_devices ${LODEVICE}
rm -f ${LOFILE}
return ${RESULT}
fi
wait_udev ${MDDEVICE} 30
${BLOCKDEV} --rereadpt ${MDDEVICE} 2>/dev/null
RESULT=$?
destroy_md_devices ${MDDEVICE}
destroy_loop_devices ${LODEVICE}
rm -f ${LOFILE}
return ${RESULT}
}
#
# Find and return an unused md device.
#
unused_md_device() {
for (( i=0; i<32; i++ )); do
MDDEVICE=md${i}
# Skip active devicesudo in /proc/mdstat.
grep -q "${MDDEVICE} " /proc/mdstat && continue
# Device doesn't exist, use it.
if [ ! -e $/dev/{MDDEVICE} ]; then
echo /dev/${MDDEVICE}
return
fi
# Device exists but may not be in use.
if [ -b /dev/${MDDEVICE} ]; then
${MDADM} --detail /dev/${MDDEVICE} &>/dev/null
if [ $? -eq 1 ]; then
echo /dev/${MDDEVICE}
return
fi
fi
done
die "Error: Unable to find unused md device"
}
#
# This can be slightly dangerous because it is possible the md devices
# we are cleaning up may not be ours. However, if the devices are
# currently in use we will not be able to remove them, and even if
# we remove devices which were not out we do not zero the super block
# so you should be able to reconstruct them.
#
cleanup_md_devices() {
destroy_md_devices "`ls /dev/md* 2>/dev/null | grep -v p`"
udev_trigger
}
#
# Destroy the passed md devices, this is used when you know
# the names of the md devices.
#
destroy_md_devices() {
local MDDEVICES="$1"
msg "Destroying ${MDDEVICES}"
for MDDEVICE in ${MDDEVICES}; do
${MDADM} --stop ${MDDEVICE} &>/dev/null
${MDADM} --remove ${MDDEVICE} &>/dev/null
${MDADM} --detail ${MDDEVICE} &>/dev/null
done
return 0
}
#
# Check that the scsi utilities are installed.
#
check_sd_utils() {
${INFOMOD} scsi_debug &>/dev/null || die "scsi_debug module required"
test -f ${LSSCSI} || die "${LSSCSI} utility must be installed"
}
#
# Rescan the scsi bus for scsi_debug devices. It is preferable to use the
# scsi-rescan tool if it is installed, but if it's not we can fall back to
# removing and readding the device manually. This rescan will only effect
# the first scsi_debug device if scsi-rescan is missing.
#
scsi_rescan() {
local AWK_SCRIPT="/scsi_debug/ { print \$1; exit }"
if [ -f ${SCSIRESCAN} ]; then
${SCSIRESCAN} --forcerescan --remove &>/dev/null
else
local SCSIID=`${LSSCSI} | ${AWK} "${AWK_SCRIPT}" | tr -d '[]'`
local SCSIHOST=`echo ${SCSIID} | cut -f1 -d':'`
echo 1 >"/sys/class/scsi_device/${SCSIID}/device/delete"
udev_trigger
echo "- - -" >/sys/class/scsi_host/host${SCSIHOST}/scan
udev_trigger
fi
}
#
# Trigger udev and wait for it to settle.
#
udev_trigger() {
if [ -f ${UDEVADM} ]; then
${UDEVADM} trigger --action=change --subsystem-match=block
${UDEVADM} settle
else
/sbin/udevtrigger
/sbin/udevsettle
fi
}
#
# The following udev helper functions assume that the provided
# udev rules file will create a /dev/disk/by-vdev/<CHANNEL><RANK>
# disk mapping. In this mapping each CHANNEL is represented by
# the letters a-z, and the RANK is represented by the numbers
# 1-n. A CHANNEL should identify a group of RANKS which are all
# attached to a single controller, each RANK represents a disk.
# This provides a simply mechanism to locate a specific drive
# given a known hardware configuration.
#
udev_setup() {
local SRC_PATH=$1
# When running in tree manually construct symlinks in tree to
# the proper devices. Symlinks are installed for all entries
# in the config file regardless of if that device actually
# exists. When installed as a package udev can be relied on for
# this and it will only create links for devices which exist.
if [ ${INTREE} ]; then
PWD=`pwd`
mkdir -p ${DEVDIR}/
cd ${DEVDIR}/
${AWK} '!/^#/ && /./ { system( \
"ln -f -s /dev/disk/by-path/"$2" "$1";" \
"ln -f -s /dev/disk/by-path/"$2"-part1 "$1"p1;" \
"ln -f -s /dev/disk/by-path/"$2"-part9 "$1"p9;" \
) }' $SRC_PATH
cd ${PWD}
else
DST_FILE=`basename ${SRC_PATH} | cut -f1-2 -d'.'`
DST_PATH=/etc/zfs/${DST_FILE}
if [ -e ${DST_PATH} ]; then
die "Error: Config ${DST_PATH} already exists"
fi
cp ${SRC_PATH} ${DST_PATH}
udev_trigger
fi
return 0
}
udev_cleanup() {
local SRC_PATH=$1
if [ ${INTREE} ]; then
PWD=`pwd`
cd ${DEVDIR}/
${AWK} '!/^#/ && /./ { system( \
"rm -f "$1" "$1"p1 "$1"p9") }' $SRC_PATH
cd ${PWD}
fi
return 0
}
udev_cr2d() {
local CHANNEL=`echo "obase=16; $1+96" | bc`
local RANK=$2
printf "\x${CHANNEL}${RANK}"
}
udev_raid0_setup() {
local RANKS=$1
local CHANNELS=$2
local IDX=0
RAID0S=()
for RANK in `seq 1 ${RANKS}`; do
for CHANNEL in `seq 1 ${CHANNELS}`; do
DISK=`udev_cr2d ${CHANNEL} ${RANK}`
RAID0S[${IDX}]="${DEVDIR}/${DISK}"
let IDX=IDX+1
done
done
return 0
}
udev_raid10_setup() {
local RANKS=$1
local CHANNELS=$2
local IDX=0
RAID10S=()
for RANK in `seq 1 ${RANKS}`; do
for CHANNEL1 in `seq 1 2 ${CHANNELS}`; do
let CHANNEL2=CHANNEL1+1
DISK1=`udev_cr2d ${CHANNEL1} ${RANK}`
DISK2=`udev_cr2d ${CHANNEL2} ${RANK}`
GROUP="${DEVDIR}/${DISK1} ${DEVDIR}/${DISK2}"
RAID10S[${IDX}]="mirror ${GROUP}"
let IDX=IDX+1
done
done
return 0
}
udev_raidz_setup() {
local RANKS=$1
local CHANNELS=$2
RAIDZS=()
for RANK in `seq 1 ${RANKS}`; do
RAIDZ=("raidz")
for CHANNEL in `seq 1 ${CHANNELS}`; do
DISK=`udev_cr2d ${CHANNEL} ${RANK}`
RAIDZ[${CHANNEL}]="${DEVDIR}/${DISK}"
done
RAIDZS[${RANK}]="${RAIDZ[*]}"
done
return 0
}
udev_raidz2_setup() {
local RANKS=$1
local CHANNELS=$2
RAIDZ2S=()
for RANK in `seq 1 ${RANKS}`; do
RAIDZ2=("raidz2")
for CHANNEL in `seq 1 ${CHANNELS}`; do
DISK=`udev_cr2d ${CHANNEL} ${RANK}`
RAIDZ2[${CHANNEL}]="${DEVDIR}/${DISK}"
done
RAIDZ2S[${RANK}]="${RAIDZ2[*]}"
done
return 0
}
run_one_test() {
local TEST_NUM=$1
local TEST_NAME=$2
printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
test_${TEST_NUM}
}
skip_one_test() {
local TEST_NUM=$1
local TEST_NAME=$2
printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
skip
}
run_test() {
local TEST_NUM=$1
local TEST_NAME=$2
for i in ${TESTS_SKIP[@]}; do
if [[ $i == ${TEST_NUM} ]] ; then
skip_one_test ${TEST_NUM} "${TEST_NAME}"
return 0
fi
done
if [ "${TESTS_RUN[0]}" = "*" ]; then
run_one_test ${TEST_NUM} "${TEST_NAME}"
else
for i in ${TESTS_RUN[@]}; do
if [[ $i == ${TEST_NUM} ]] ; then
run_one_test ${TEST_NUM} "${TEST_NAME}"
return 0
fi
done
skip_one_test ${TEST_NUM} "${TEST_NAME}"
fi
}
wait_udev() {
local DEVICE=$1
local DELAY=$2
local COUNT=0
udev_trigger
while [ ! -e ${DEVICE} ]; do
if [ ${COUNT} -gt ${DELAY} ]; then
return 1
fi
let COUNT=${COUNT}+1
sleep 1
done
return 0
}
stack_clear() {
local STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
local STACK_TRACER_ENABLED=/proc/sys/kernel/stack_tracer_enabled
if [ -e $STACK_MAX_SIZE ]; then
echo 1 >$STACK_TRACER_ENABLED
echo 0 >$STACK_MAX_SIZE
fi
}
stack_check() {
local STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
local STACK_TRACE=/sys/kernel/debug/tracing/stack_trace
local STACK_LIMIT=7000
if [ -e $STACK_MAX_SIZE ]; then
STACK_SIZE=`cat $STACK_MAX_SIZE`
if [ $STACK_SIZE -ge $STACK_LIMIT ]; then
echo
echo "Warning: max stack size $STACK_SIZE bytes"
cat $STACK_TRACE
fi
fi
}
kill_zed() {
if [ -f $ZED_PIDFILE ]; then
kill $(cat $ZED_PIDFILE)
fi
}
#!/bin/sh
# Directories
export BIN_DIR=
export SBIN_DIR=
export ZTS_DIR=
export SCRIPT_DIR=
# General commands
export ZDB=${ZDB:-$SBIN_DIR/zdb}
export ZFS=${ZFS:-$SBIN_DIR/zfs}
export ZPOOL=${ZPOOL:-$SBIN_DIR/zpool}
export ZTEST=${ZTEST:-$SBIN_DIR/ztest}
export ZFS_SH=${ZFS_SH:-$SCRIPT_DIR/zfs.sh}
# Test Suite
export RUNFILE_DIR=${RUNFILE_DIR:-$ZTS_DIR/runfiles}
export TEST_RUNNER=${TEST_RUNNER:-$ZTS_DIR/test-runner/bin/test-runner.py}
export STF_TOOLS=${STF_TOOLS:-$ZTS_DIR/test-runner}
export STF_SUITE=${STF_SUITE:-$ZTS_DIR/zfs-tests}

View File

@ -40,10 +40,6 @@ PRE_BUILD="configure
then
echo --enable-debug
fi
if [[ \${ZFS_DKMS_ENABLE_DMU_TX,,} == @(y|yes) ]]
then
echo --enable-debug-dmu-tx
fi
}
)
"
@ -86,10 +82,7 @@ DEST_MODULE_LOCATION[3]="/extra/zcommon/zcommon"
BUILT_MODULE_NAME[4]="zfs"
BUILT_MODULE_LOCATION[4]="module/zfs/"
DEST_MODULE_LOCATION[4]="/extra/zfs/zfs"
BUILT_MODULE_NAME[5]="zpios"
BUILT_MODULE_LOCATION[5]="module/zpios/"
DEST_MODULE_LOCATION[5]="/extra/zpios/zpios"
BUILT_MODULE_NAME[6]="icp"
BUILT_MODULE_LOCATION[6]="module/icp/"
DEST_MODULE_LOCATION[6]="/extra/icp/icp"
BUILT_MODULE_NAME[5]="icp"
BUILT_MODULE_LOCATION[5]="module/icp/"
DEST_MODULE_LOCATION[5]="/extra/icp/icp"
EOF

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/bin/sh
PROG=$0
@ -19,6 +19,6 @@ if [ -z "${arch}" -o -z "${kver}" -o -z "${pkgname}" -o \
exit 1
fi
cp ${tree}/${pkgname}/${pkgver}/build/zfs_config.h \
${tree}/${pkgname}/${pkgver}/build/module/Module.symvers \
${tree}/${pkgname}/${pkgver}/${kver}/${arch}/
cp "${tree}/${pkgname}/${pkgver}/build/zfs_config.h" \
"${tree}/${pkgname}/${pkgver}/build/module/Module.symvers" \
"${tree}/${pkgname}/${pkgver}/${kver}/${arch}/"

View File

@ -1,214 +0,0 @@
#!/bin/bash
BASETANK="share"
DATE=`date "+%Y%m%d"`
TEST_SMBFS=0
TEST_DESTROY=0
if [ -z "$1" ]; then
echo "Usage: `basename $0` [unpack]<[smbfs][snapshot][all]>"
exit 1
fi
set_onoff() {
type="$1"
dataset="$2"
toggle="$3"
current=`zfs get -H $type -o value $dataset`
if [ "$current" != "$toggle" ]; then
run "zfs set $type=$toggle $dataset"
fi
}
check_exists() {
dataset="$1"
extra=""
[ -n "$2" ] && extra="$2"
zfs get all "$dataset" > /dev/null 2>&1
if [ $? != 0 ]; then
run "zfs create $extra $dataset"
fi
}
check_shares() {
if [ "$TEST_SMBFS" == "1" ]; then
echo "Shares:"
echo "=> usershare list:"
net usershare list
echo
echo "=> /etc/dfs/sharetab:"
cat /etc/dfs/sharetab
echo
fi
sleep 2
}
test_header() {
echo "TEST: $*"
echo "======================================"
}
run() {
cmd="$*"
echo "CMD: $cmd"
$cmd
}
# ---------
# Needs more work...
if echo "$*" | grep -qi "unpack"; then
zfs unmount -a
zfs unshare -a
run "zfs destroy -r $BASETANK/tests"
sh /etc/init.d/zfs stop
# for tid in `grep ^tid /proc/net/iet/volume | sed "s@.*:\([0-9].*\) name.*@\1@"`
# do
# ietadm --op delete --tid $tid
# done
set -e
rmmod `lsmod | grep ^z | grep -v zlib_deflate | sed 's@ .*@@'` spl zlib_deflate
pushd / > /dev/null
[ -f "tmp/zfs.tgz" ] && tar xzf tmp/zfs.tgz && rm tmp/zfs.tgz
[ -f "tmp/spl.tgz" ] && tar xzf tmp/spl.tgz && rm tmp/spl.tgz
popd > /dev/null
depmod -a
sh /etc/init.d/zfs start
set +e
fi
# ---------
if echo "$*" | egrep -qi "smbfs|all"; then
check_exists $BASETANK/tests
TEST_SMBFS=1
test_header "Exists || Create"
str=
for volnr in 1 2 3; do
check_exists $BASETANK/tests/smbfs$volnr
str="$str $BASETANK/tests/smbfs$volnr"
done
run "zfs get sharesmb $str"
# Set sharesmb=on
test_header "Enable SMB share"
for volnr in 1 2 3; do
set_onoff sharesmb "$BASETANK/tests/smbfs$volnr" on
check_shares
done
# Share all
test_header "Share all (individually)"
for volnr in 1 2 3; do
run "zfs share $BASETANK/tests/smbfs$volnr"
check_shares
done
# Unshare all
test_header "Unshare all (individually)"
for volnr in 1 2 3; do
run "zfs unshare $BASETANK/tests/smbfs$volnr"
check_shares
done
# Change mountpoint - first unshare and then share individual
test_header "Change mount point (unshare ; share)"
mkdir -p /tests
set_onoff sharesmb "$str" off
for volnr in 3 1 2; do
run "zfs set mountpoint=/tests/smbfs$volnr $BASETANK/tests/smbfs$volnr"
echo "CMD: mount | grep ^$BASETANK/tests/smbfs$volnr"
mount | grep ^$BASETANK/tests/smbfs$volnr
echo
run "zfs mount $BASETANK/tests/smbfs$volnr"
echo "CMD: mount | grep ^$BASETANK/tests/smbfs$volnr"
mount | grep ^$BASETANK/tests/smbfs$volnr
echo
set_onoff sharesmb "$BASETANK/tests/smbfs$volnr" on
check_shares
run "zfs share $BASETANK/tests/smbfs$volnr"
check_shares
echo "-------------------"
done
# Change mountpoint - remounting
test_header "Change mount point (remounting)"
for volnr in 3 1 2; do
run "zfs set mountpoint=/$BASETANK/tests/smbfs$volnr $BASETANK/tests/smbfs$volnr"
echo "CMD: mount | grep ^$BASETANK/tests/smbfs$volnr"
mount | grep ^$BASETANK/tests/smbfs$volnr
echo
# => Doesn't seem to remount (!?)
run "zfs mount $BASETANK/tests/smbfs$volnr"
echo "CMD: mount | grep ^$BASETANK/tests/smbfs$volnr"
mount | grep ^$BASETANK/tests/smbfs$volnr
echo
# => Doesn't seem to reshare (!?)
check_shares
run "zfs share $BASETANK/tests/smbfs$volnr"
check_shares
echo "-------------------"
done
fi
# ---------
if echo "$*" | egrep -qi "smbfs|all"; then
test_header "Unshare + Share all"
run "zfs share -a" ; check_shares
run "zfs unshare -a" ; check_shares
fi
# ---------
if echo "$*" | grep -qi "snapshot|all"; then
test_header "Snapshots"
echo ; echo "-------------------"
check_exists $BASETANK/tests/destroy
check_exists $BASETANK/tests/destroy/destroy1
run "zfs destroy -r $BASETANK/tests/destroy"
echo ; echo "-------------------"
check_exists $BASETANK/tests/destroy
run "zfs snapshot $BASETANK/tests/destroy@$DATE"
run "zfs destroy -r $BASETANK/tests/destroy"
echo ; echo "-------------------"
check_exists $BASETANK/tests/destroy
run "zfs snapshot $BASETANK/tests/destroy@$DATE"
run "zfs destroy -r $BASETANK/tests/destroy@$DATE"
run "zfs destroy -r $BASETANK/tests/destroy"
fi
if echo "$*" | egrep -qi "smbfs|snapshot|all"; then
test_header "Cleanup (Share all + Destroy all)"
run "zfs share -a"
check_shares
run "zfs destroy -r $BASETANK/tests"
check_shares
run "zfs list"
fi

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/bin/sh
#
# This script is designed to facilitate in-tree development and testing
# by installing symlinks on your system which refer to in-tree helper
@ -17,20 +17,30 @@
# --sysconfdir=DIR install zfs configuration files [PREFIX/etc]
#
basedir="$(dirname $0)"
BASE_DIR=$(dirname "$0")
SCRIPT_COMMON=common.sh
if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
. "${basedir}/${SCRIPT_COMMON}"
if [ -f "${BASE_DIR}/${SCRIPT_COMMON}" ]; then
. "${BASE_DIR}/${SCRIPT_COMMON}"
else
echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
fi
PROG=zfs-helpers.sh
DRYRUN=
INSTALL=
REMOVE=
VERBOSE=
DRYRUN="no"
INSTALL="no"
REMOVE="no"
VERBOSE="no"
fail() {
echo "${PROG}: $1" >&2
exit 1
}
msg() {
if [ "$VERBOSE" = "yes" ]; then
echo "$@"
fi
}
usage() {
cat << EOF
@ -60,16 +70,16 @@ while getopts 'hdirv' OPTION; do
exit 1
;;
d)
DRYRUN=1
DRYRUN="yes"
;;
i)
INSTALL=1
INSTALL="yes"
;;
r)
REMOVE=1
REMOVE="yes"
;;
v)
VERBOSE=1
VERBOSE="yes"
;;
?)
usage
@ -78,27 +88,29 @@ while getopts 'hdirv' OPTION; do
esac
done
if [ "${INSTALL}" -a "${REMOVE}" ]; then
usage
die "Specify -i or -r but not both"
if [ "$INSTALL" = "yes" -a "$REMOVE" = "yes" ]; then
fail "Specify -i or -r but not both"
fi
if [ ! "${INSTALL}" -a ! "${REMOVE}" ]; then
usage
die "Either -i or -r must be specified"
if [ "$INSTALL" = "no" -a "$REMOVE" = "no" ]; then
fail "Either -i or -r must be specified"
fi
if [ $(id -u) != 0 ]; then
die "Must run as root"
if [ "$(id -u)" != "0" ]; then
fail "Must run as root"
fi
if [ "$VERBOSE" ]; then
if [ "$INTREE" != "yes" ]; then
fail "Must be run in-tree"
fi
if [ "$VERBOSE" = "yes" ]; then
echo "--- Configuration ---"
echo "udevdir: $udevdir"
echo "udevruledir: $udevruledir"
echo "mounthelperdir: $mounthelperdir"
echo "sysconfdir: $sysconfdir"
echo "DRYRUN: $DRYRUN"
echo "udevdir: $INSTALL_UDEV_DIR"
echo "udevruledir: $INSTALL_UDEV_RULE_DIR"
echo "mounthelperdir: $INSTALL_MOUNT_HELPER_DIR"
echo "sysconfdir: $INSTALL_SYSCONF_DIR"
echo "dryrun: $DRYRUN"
echo
fi
@ -106,18 +118,19 @@ install() {
local src=$1
local dst=$2
if [ -h $dst ]; then
if [ -h "$dst" ]; then
echo "Symlink exists: $dst"
elif [ -e $dst ]; then
elif [ -e "$dst" ]; then
echo "File exists: $dst"
elif [ ! -e $src ]; then
elif [ ! -e "$src" ]; then
echo "Source missing: $src"
else
msg "ln -s $src $dst"
if [ ! "$DRYRUN" ]; then
mkdir -p $(dirname $dst) &>/dev/null
ln -s $src $dst
if [ "$DRYRUN" = "no" ]; then
DIR=$(dirname "$dst")
mkdir -p "$DIR" >/dev/null 2>&1
ln -s "$src" "$dst"
fi
fi
}
@ -125,31 +138,42 @@ install() {
remove() {
local dst=$1
if [ -h $dst ]; then
if [ -h "$dst" ]; then
msg "rm $dst"
rm $dst
rmdir $(dirname $dst) &>/dev/null
rm "$dst"
DIR=$(dirname "$dst")
rmdir "$DIR" >/dev/null 2>&1
elif [ -e "$dst" ]; then
echo "Expected symlink: $dst"
fi
}
if [ ${INSTALL} ]; then
install $CMDDIR/mount_zfs/mount.zfs $mounthelperdir/mount.zfs
install $CMDDIR/fsck_zfs/fsck.zfs $mounthelperdir/fsck.zfs
install $CMDDIR/zvol_id/zvol_id $udevdir/zvol_id
install $CMDDIR/vdev_id/vdev_id $udevdir/vdev_id
install $UDEVRULEDIR/60-zvol.rules $udevruledir/60-zvol.rules
install $UDEVRULEDIR/69-vdev.rules $udevruledir/69-vdev.rules
install $UDEVRULEDIR/90-zfs.rules $udevruledir/90-zfs.rules
install $CMDDIR/zpool/zpool.d $sysconfdir/zfs/zpool.d
if [ "${INSTALL}" = "yes" ]; then
install "$CMD_DIR/mount_zfs/mount.zfs" \
"$INSTALL_MOUNT_HELPER_DIR/mount.zfs"
install "$CMD_DIR/fsck_zfs/fsck.zfs" \
"$INSTALL_MOUNT_HELPER_DIR/fsck.zfs"
install "$CMD_DIR/zvol_id/zvol_id" \
"$INSTALL_UDEV_DIR/zvol_id"
install "$CMD_DIR/vdev_id/vdev_id" \
"$INSTALL_UDEV_DIR/vdev_id"
install "$UDEV_RULE_DIR/60-zvol.rules" \
"$INSTALL_UDEV_RULE_DIR/60-zvol.rules"
install "$UDEV_RULE_DIR/69-vdev.rules" \
"$INSTALL_UDEV_RULE_DIR/69-vdev.rules"
install "$UDEV_RULE_DIR/90-zfs.rules" \
"$INSTALL_UDEV_RULE_DIR/90-zfs.rules"
install "$CMD_DIR/zpool/zpool.d" \
"$INSTALL_SYSCONF_DIR/zfs/zpool.d"
else
remove $mounthelperdir/mount.zfs
remove $mounthelperdir/fsck.zfs
remove $udevdir/zvol_id
remove $udevdir/vdev_id
remove $udevruledir/60-zvol.rules
remove $udevruledir/69-vdev.rules
remove $udevruledir/90-zfs.rules
remove $sysconfdir/zfs/zpool.d
remove "$INSTALL_MOUNT_HELPER_DIR/mount.zfs"
remove "$INSTALL_MOUNT_HELPER_DIR/fsck.zfs"
remove "$INSTALL_UDEV_DIR/zvol_id"
remove "$INSTALL_UDEV_DIR/vdev_id"
remove "$INSTALL_UDEV_RULE_DIR/60-zvol.rules"
remove "$INSTALL_UDEV_RULE_DIR/69-vdev.rules"
remove "$INSTALL_UDEV_RULE_DIR/90-zfs.rules"
remove "$INSTALL_SYSCONF_DIR/zfs/zpool.d"
fi
exit 0

View File

@ -20,21 +20,21 @@
#
# CDDL HEADER END
#
basedir=$(dirname "$0")
BASE_DIR=$(dirname "$0")
SCRIPT_COMMON=common.sh
if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
. "${basedir}/${SCRIPT_COMMON}"
if [ -f "${BASE_DIR}/${SCRIPT_COMMON}" ]; then
. "${BASE_DIR}/${SCRIPT_COMMON}"
else
echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
fi
PROG=zfs-tests.sh
VERBOSE=
VERBOSE="no"
QUIET=
CLEANUP=1
CLEANUPALL=0
LOOPBACK=1
CLEANUP="yes"
CLEANUPALL="no"
LOOPBACK="yes"
FILESIZE="4G"
RUNFILE=${RUNFILE:-"linux.run"}
FILEDIR=${FILEDIR:-/var/tmp}
@ -45,6 +45,26 @@ ZFS_DBGMSG="$STF_SUITE/callbacks/zfs_dbgmsg.ksh"
ZFS_DMESG="$STF_SUITE/callbacks/zfs_dmesg.ksh"
ZFS_MMP="$STF_SUITE/callbacks/zfs_mmp.ksh"
TESTFAIL_CALLBACKS=${TESTFAIL_CALLBACKS:-"$ZFS_DBGMSG:$ZFS_DMESG:$ZFS_MMP"}
LOSETUP=${LOSETUP:-/sbin/losetup}
DMSETUP=${DMSETUP:-/sbin/dmsetup}
#
# Log an informational message when additional verbosity is enabled.
#
msg() {
if [ "$VERBOSE" = "yes" ]; then
echo "$@"
fi
}
#
# Log a failure message, cleanup, and return an error.
#
fail() {
echo -e "$PROG: $1" >&2
cleanup
exit 1
}
#
# Attempt to remove loopback devices and files which where created earlier
@ -52,11 +72,11 @@ TESTFAIL_CALLBACKS=${TESTFAIL_CALLBACKS:-"$ZFS_DBGMSG:$ZFS_DMESG:$ZFS_MMP"}
# to the script to suppress cleanup for debugging purposes.
#
cleanup() {
if [ $CLEANUP -eq 0 ]; then
if [ "$CLEANUP" = "no" ]; then
return 0
fi
if [ $LOOPBACK -eq 1 ]; then
if [ "$LOOPBACK" = "yes" ]; then
for TEST_LOOPBACK in ${LOOPBACKS}; do
LOOP_DEV=$(basename "$TEST_LOOPBACK")
DM_DEV=$(sudo "${DMSETUP}" ls 2>/dev/null | \
@ -78,8 +98,7 @@ cleanup() {
rm -f "${TEST_FILE}" &>/dev/null
done
# Preserve in-tree symlinks to aid debugging.
if [ -z "${INTREE}" ] && [ -d "$STF_PATH" ]; then
if [ "$STF_PATH_REMOVE" = "yes" ] && [ -d "$STF_PATH" ]; then
rm -Rf "$STF_PATH"
fi
}
@ -121,21 +140,12 @@ cleanup_all() {
done
}
#
# Log a failure message, cleanup, and return an error.
#
fail() {
echo -e "${PROG}: $1" >&2
cleanup
exit 1
}
#
# Takes a name as the only arguments and looks for the following variations
# on that name. If one is found it is returned.
#
# $RUNFILEDIR/<name>
# $RUNFILEDIR/<name>.run
# $RUNFILE_DIR/<name>
# $RUNFILE_DIR/<name>.run
# <name>
# <name>.run
#
@ -143,10 +153,10 @@ find_runfile() {
local NAME=$1
local RESULT=""
if [ -f "$RUNFILEDIR/$NAME" ]; then
RESULT="$RUNFILEDIR/$NAME"
elif [ -f "$RUNFILEDIR/$NAME.run" ]; then
RESULT="$RUNFILEDIR/$NAME.run"
if [ -f "$RUNFILE_DIR/$NAME" ]; then
RESULT="$RUNFILE_DIR/$NAME"
elif [ -f "$RUNFILE_DIR/$NAME.run" ]; then
RESULT="$RUNFILE_DIR/$NAME.run"
elif [ -f "$NAME" ]; then
RESULT="$NAME"
elif [ -f "$NAME.run" ]; then
@ -188,40 +198,44 @@ create_links() {
constrain_path() {
. "$STF_SUITE/include/commands.cfg"
if [ -n "${INTREE}" ]; then
STF_PATH="$BUILDDIR/bin"
if [ "$INTREE" = "yes" ]; then
# Constrained path set to ./zfs/bin/
STF_PATH="$BIN_DIR"
STF_PATH_REMOVE="no"
STF_MISSING_BIN=""
if [ ! -d "$STF_PATH" ]; then
mkdir "$STF_PATH"
fi
else
SYSTEMDIR=${SYSTEMDIR:-/var/tmp/constrained_path.XXXX}
STF_PATH=$(/bin/mktemp -d "$SYSTEMDIR")
fi
STF_MISSING_BIN=""
chmod 755 "$STF_PATH" || fail "Couldn't chmod $STF_PATH"
fi
# Standard system utilities
create_links "/bin /usr/bin /sbin /usr/sbin" "$SYSTEM_FILES"
if [ -z "${INTREE}" ]; then
# Special case links for standard zfs utilities
create_links "/bin /usr/bin /sbin /usr/sbin" "$ZFS_FILES"
# Special case links for zfs test suite utilties
create_links "$TESTSDIR/bin" "$ZFSTEST_FILES"
else
# Special case links for standard zfs utilities
DIRS="$(find "$CMDDIR" -type d \( ! -name .deps -a \
DIRS="$(find "$CMD_DIR" -type d \( ! -name .deps -a \
! -name .libs \) -print | tr '\n' ' ')"
create_links "$DIRS" "$ZFS_FILES"
# Special case links for zfs test suite utilties
DIRS="$(find "$TESTSDIR" -type d \( ! -name .deps -a \
# Special case links for zfs test suite utilities
DIRS="$(find "$STF_SUITE" -type d \( ! -name .deps -a \
! -name .libs \) -print | tr '\n' ' ')"
create_links "$DIRS" "$ZFSTEST_FILES"
else
# Constrained path set to /var/tmp/constrained_path.*
SYSTEMDIR=${SYSTEMDIR:-/var/tmp/constrained_path.XXXX}
STF_PATH=$(/bin/mktemp -d "$SYSTEMDIR")
STF_PATH_REMOVE="yes"
STF_MISSING_BIN=""
chmod 755 "$STF_PATH" || fail "Couldn't chmod $STF_PATH"
# Special case links for standard zfs utilities
create_links "/bin /usr/bin /sbin /usr/sbin" "$ZFS_FILES"
# Special case links for zfs test suite utilities
create_links "$STF_SUITE/bin" "$ZFSTEST_FILES"
fi
# Standard system utilities
create_links "/bin /usr/bin /sbin /usr/sbin" "$SYSTEM_FILES"
# Exceptions
ln -fs "$STF_PATH/awk" "$STF_PATH/nawk"
ln -fs /sbin/fsck.ext2 "$STF_PATH/fsck"
@ -250,6 +264,7 @@ OPTIONS:
-x Remove all testpools, dm, lo, and files (unsafe)
-k Disable cleanup after test failure
-f Use files only, disables block device tests
-c Only create and populate constrained path
-d DIR Use DIR for files and loopback devices
-s SIZE Use vdevs of SIZE (default: 4G)
-r RUNFILE Run tests in RUNFILE (default: linux.run)
@ -270,7 +285,7 @@ $0 -x
EOF
}
while getopts 'hvqxkfd:s:r:?t:u:' OPTION; do
while getopts 'hvqxkfcd:s:r:?t:u:' OPTION; do
case $OPTION in
h)
usage
@ -278,19 +293,23 @@ while getopts 'hvqxkfd:s:r:?t:u:' OPTION; do
;;
v)
# shellcheck disable=SC2034
VERBOSE=1
VERBOSE="yes"
;;
q)
QUIET="-q"
;;
x)
CLEANUPALL=1
CLEANUPALL="yes"
;;
k)
CLEANUP=0
CLEANUP="no"
;;
f)
LOOPBACK=0
LOOPBACK="no"
;;
c)
constrain_path
exit
;;
d)
FILEDIR="$OPTARG"
@ -323,7 +342,7 @@ FILES=${FILES:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1 $FILEDIR/file-vdev2"}
LOOPBACKS=${LOOPBACKS:-""}
if [ ${#SINGLETEST[@]} -ne 0 ]; then
RUNFILEDIR="/var/tmp"
RUNFILE_DIR="/var/tmp"
RUNFILE="zfs-tests.$$.run"
SINGLEQUIET="False"
@ -331,7 +350,7 @@ if [ ${#SINGLETEST[@]} -ne 0 ]; then
SINGLEQUIET="True"
fi
cat >$RUNFILEDIR/$RUNFILE << EOF
cat >$RUNFILE_DIR/$RUNFILE << EOF
[DEFAULT]
pre =
quiet = $SINGLEQUIET
@ -357,7 +376,7 @@ EOF
CLEANUPSCRIPT="cleanup"
fi
cat >>$RUNFILEDIR/$RUNFILE << EOF
cat >>$RUNFILE_DIR/$RUNFILE << EOF
[$SINGLETESTDIR]
tests = ['$SINGLETESTFILE']
@ -394,7 +413,7 @@ if [ "$(sudo whoami)" != "root" ]; then
fi
#
# Constain the available binaries to a known set.
# Constrain the available binaries to a known set.
#
constrain_path
@ -406,14 +425,14 @@ constrain_path
"Missing $STF_SUITE/include/default.cfg file."
#
# Verify the ZFS module stack if loaded.
# Verify the ZFS module stack is loaded.
#
sudo "${ZFS_SH}" &>/dev/null
#
# Attempt to cleanup all previous state for a new test run.
#
if [ $CLEANUPALL -ne 0 ]; then
if [ "$CLEANUPALL" = "yes" ]; then
cleanup_all
fi
@ -457,9 +476,10 @@ if [ -z "${DISKS}" ]; then
#
# If requested setup loopback devices backed by the sparse files.
#
if [ $LOOPBACK -eq 1 ]; then
if [ "$LOOPBACK" = "yes" ]; then
DISKS=""
check_loop_utils
test -x "$LOSETUP" || fail "$LOSETUP utility must be installed"
for TEST_FILE in ${FILES}; do
TEST_LOOPBACK=$(sudo "${LOSETUP}" -f)
@ -472,7 +492,7 @@ if [ -z "${DISKS}" ]; then
fi
fi
NUM_DISKS=$(echo "${DISKS}" | $AWK '{print NF}')
NUM_DISKS=$(echo "${DISKS}" | awk '{print NF}')
[ "$NUM_DISKS" -lt 3 ] && fail "Not enough disks ($NUM_DISKS/3 minimum)"
#

View File

@ -1,19 +1,34 @@
#!/bin/bash
#!/bin/sh
#
# A simple script to load/unload the ZFS module stack.
#
# A simple script to simply the loading/unloading the ZFS module stack.
basedir=$(dirname "$0")
BASE_DIR=$(dirname "$0")
SCRIPT_COMMON=common.sh
if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
. "${basedir}/${SCRIPT_COMMON}"
if [ -f "${BASE_DIR}/${SCRIPT_COMMON}" ]; then
. "${BASE_DIR}/${SCRIPT_COMMON}"
else
echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
fi
# shellcheck disable=SC2034
PROG=zfs.sh
UNLOAD=
VERBOSE="no"
UNLOAD="no"
ZED_PIDFILE=${ZED_PIDFILE:-/var/run/zed.pid}
LDMOD=${LDMOD:-/sbin/modprobe}
KMOD_ZLIB_DEFLATE=${KMOD_ZLIB_DEFLATE:-zlib_deflate}
KMOD_ZLIB_INFLATE=${KMOD_ZLIB_INFLATE:-zlib_inflate}
KMOD_SPL=${KMOD_SPL:-spl}
KMOD_SPLAT=${KMOD_SPLAT:-splat}
KMOD_ZAVL=${KMOD_ZAVL:-zavl}
KMOD_ZNVPAIR=${KMOD_ZNVPAIR:-znvpair}
KMOD_ZUNICODE=${KMOD_ZUNICODE:-zunicode}
KMOD_ZCOMMON=${KMOD_ZCOMMON:-zcommon}
KMOD_ICP=${KMOD_ICP:-icp}
KMOD_ZFS=${KMOD_ZFS:-zfs}
usage() {
cat << EOF
@ -27,13 +42,6 @@ OPTIONS:
-h Show this message
-v Verbose
-u Unload modules
MODULE-OPTIONS:
Must be of the from module="options", for example:
$0 zfs="zfs_prefetch_disable=1"
$0 zfs="zfs_prefetch_disable=1 zfs_mdcomp_disable=1"
EOF
}
@ -44,11 +52,10 @@ while getopts 'hvu' OPTION; do
exit 1
;;
v)
# shellcheck disable=SC2034
VERBOSE=1
VERBOSE="yes"
;;
u)
UNLOAD=1
UNLOAD="yes"
;;
?)
usage
@ -57,20 +64,158 @@ while getopts 'hvu' OPTION; do
esac
done
kill_zed() {
if [ -f "$ZED_PIDFILE" ]; then
PID=$(cat "$ZED_PIDFILE")
kill "$PID"
fi
}
check_modules() {
local LOADED_MODULES=""
local MISSING_MODULES=""
for KMOD in $KMOD_SPL $KMOD_SPLAT $KMOD_ZAVL $KMOD_ZNVPAIR \
$KMOD_ZUNICODE $KMOD_ZCOMMON $KMOD_ICP $KMOD_ZFS; do
NAME=$(basename "$KMOD" .ko)
if lsmod | egrep -q "^${NAME}"; then
LOADED_MODULES="$LOADED_MODULES\t$NAME\n"
fi
if ! modinfo "$KMOD" >/dev/null 2>&1; then
MISSING_MODULES="$MISSING_MODULES\t${KMOD}\n"
fi
done
if [ -n "$LOADED_MODULES" ]; then
printf "Unload the kernel modules by running '%s -u':\n" "$PROG"
printf "%b" "$LOADED_MODULES"
exit 1
fi
if [ -n "$MISSING_MODULES" ]; then
printf "The following kernel modules can not be found:\n"
printf "%b" "$MISSING_MODULES"
exit 1
fi
return 0
}
load_module() {
local KMOD=$1
FILE=$(modinfo "$KMOD" | awk '/^filename:/ {print $2}')
VERSION=$(modinfo "$KMOD" | awk '/^version:/ {print $2}')
if [ "$VERBOSE" = "yes" ]; then
echo "Loading: $FILE ($VERSION)"
fi
$LDMOD "$KMOD" >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Failed to load $KMOD"
return 1
fi
return 0
}
load_modules() {
mkdir -p /etc/zfs
modprobe "$KMOD_ZLIB_DEFLATE" >/dev/null
modprobe "$KMOD_ZLIB_INFLATE" >/dev/null
for KMOD in $KMOD_SPL $KMOD_SPLAT $KMOD_ZAVL $KMOD_ZNVPAIR \
$KMOD_ZUNICODE $KMOD_ZCOMMON $KMOD_ICP $KMOD_ZFS; do
load_module "$KMOD" || return 1
done
if [ "$VERBOSE" = "yes" ]; then
echo "Successfully loaded ZFS module stack"
fi
return 0
}
unload_module() {
local KMOD=$1
NAME=$(basename "$KMOD" .ko)
FILE=$(modinfo "$KMOD" | awk '/^filename:/ {print $2}')
VERSION=$(modinfo "$KMOD" | awk '/^version:/ {print $2}')
if [ "$VERBOSE" = "yes" ]; then
echo "Unloading: $KMOD ($VERSION)"
fi
rmmod "$NAME" || echo "Failed to unload $NAME"
return 0
}
unload_modules() {
for KMOD in $KMOD_ZFS $KMOD_ICP $KMOD_ZCOMMON $KMOD_ZUNICODE \
$KMOD_ZNVPAIR $KMOD_ZAVL $KMOD_SPLAT $KMOD_SPL; do
NAME=$(basename "$KMOD" .ko)
USE_COUNT=$(lsmod | egrep "^${NAME} " | awk '{print $3}')
if [ "$USE_COUNT" = "0" ] ; then
unload_module "$KMOD" || return 1
fi
done
if [ "$VERBOSE" = "yes" ]; then
echo "Successfully unloaded ZFS module stack"
fi
return 0
}
stack_clear() {
local STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
local STACK_TRACER_ENABLED=/proc/sys/kernel/stack_tracer_enabled
if [ -e "$STACK_MAX_SIZE" ]; then
echo 1 >"$STACK_TRACER_ENABLED"
echo 0 >"$STACK_MAX_SIZE"
fi
}
stack_check() {
local STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
local STACK_TRACE=/sys/kernel/debug/tracing/stack_trace
local STACK_LIMIT=7600
if [ -e "$STACK_MAX_SIZE" ]; then
STACK_SIZE=$(cat "$STACK_MAX_SIZE")
if [ "$STACK_SIZE" -ge "$STACK_LIMIT" ]; then
echo
echo "Warning: max stack size $STACK_SIZE bytes"
cat "$STACK_TRACE"
fi
fi
}
if [ "$(id -u)" != 0 ]; then
die "Must run as root"
echo "Must run as root"
exit 1
fi
if [ ${UNLOAD} ]; then
if [ "$UNLOAD" = "yes" ]; then
kill_zed
umount -t zfs -a
stack_check
unload_modules
else
stack_clear
check_modules || die "${ERROR}"
load_modules "$@" || die "Failed to load modules"
wait_udev /dev/zfs 30 || die "'/dev/zfs' was not created"
check_modules
load_modules "$@"
udevadm trigger
udevadm settle
fi
exit 0

View File

@ -53,23 +53,21 @@
# master Pass Pass Pass
# installed Pass Pass Pass
#
basedir="$(dirname $0)"
BASE_DIR=$(dirname "$0")
SCRIPT_COMMON=common.sh
if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
. "${basedir}/${SCRIPT_COMMON}"
if [ -f "${BASE_DIR}/${SCRIPT_COMMON}" ]; then
. "${BASE_DIR}/${SCRIPT_COMMON}"
else
echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
fi
PROG=zimport.sh
SRC_TAGS="zfs-0.6.1 zfs-0.6.2 master"
SRC_TAGS="zfs-0.6.5.11 master"
POOL_TAGS="all master"
TEST_DIR=`mktemp -u -d -p /var/tmp zimport.XXXXXXXX`
KEEP=0
VERBOSE=0
COLOR=1
TEST_DIR=$(mktemp -u -d -p /var/tmp zimport.XXXXXXXX)
KEEP="no"
VERBOSE="no"
COLOR="yes"
REPO="https://github.com/zfsonlinux"
IMAGES_DIR="$SCRIPTDIR/zfs-images/"
IMAGES_TAR="https://github.com/zfsonlinux/zfs-images/tarball/master"
@ -80,6 +78,11 @@ CONFIG_OPTIONS=${CONFIG_OPTIONS:-""}
MAKE_LOG="make.log"
MAKE_OPTIONS=${MAKE_OPTIONS:-"-s -j$(nproc)"}
COLOR_GREEN="\033[0;32m"
COLOR_RED="\033[0;31m"
COLOR_BROWN="\033[0;33m"
COLOR_RESET="\033[0m"
usage() {
cat << EOF
USAGE:
@ -109,13 +112,13 @@ while getopts 'hvckr:s:i:p:f:?' OPTION; do
exit 1
;;
v)
VERBOSE=1
VERBOSE="yes"
;;
c)
COLOR=0
COLOR="no"
;;
k)
KEEP=1
KEEP="yes"
;;
r)
REPO="$OPTARG"
@ -139,15 +142,51 @@ while getopts 'hvckr:s:i:p:f:?' OPTION; do
esac
done
# Initialize the test suite
init
check_modules || die "ZFS modules must be unloaded"
#
# Verify the module start is not loaded
#
if lsmod | grep zfs >/dev/null; then
echo "ZFS modules must be unloaded"
exit 1
fi
#
# Create a random directory tree of files and sub-directories to
# to act as a copy source for the various regression tests.
#
populate() {
local ROOT=$1
local MAX_DIR_SIZE=$2
local MAX_FILE_SIZE=$3
# shellcheck disable=SC2086
mkdir -p $ROOT/{a,b,c,d,e,f,g}/{h,i}
DIRS=$(find "$ROOT")
for DIR in $DIRS; do
COUNT=$((RANDOM % MAX_DIR_SIZE))
# shellcheck disable=SC2034
for i in $(seq $COUNT); do
FILE=$(mktemp -p "$DIR")
SIZE=$((RANDOM % MAX_FILE_SIZE))
dd if=/dev/urandom of="$FILE" bs=1k \
count="$SIZE" &>/dev/null
done
done
return 0
}
SRC_DIR=$(mktemp -d -p /var/tmp/ zfs.src.XXXXXXXX)
trap 'rm -Rf "$SRC_DIR"' INT TERM EXIT
populate "$SRC_DIR" 10 100
SRC_DIR="$TEST_DIR/src"
SRC_DIR_SPL="$SRC_DIR/spl"
SRC_DIR_ZFS="$SRC_DIR/zfs"
if [ $COLOR -eq 0 ]; then
if [ "$COLOR" = "no" ]; then
COLOR_GREEN=""
COLOR_BROWN=""
COLOR_RED=""
@ -166,6 +205,11 @@ fail_nonewline() {
echo -n -e "${COLOR_RED}Fail${COLOR_RESET}\t\t"
}
fail() {
echo -e "${COLOR_RED}Fail${COLOR_RESET} ($1)"
exit "$1"
}
#
# Set several helper variables which are derived from a source tag.
#
@ -179,24 +223,22 @@ fail_nonewline() {
src_set_vars() {
local TAG=$1
SPL_TAG=`echo $TAG | sed -e 's/zfs/spl/'`
SPL_DIR=$SRC_DIR_SPL/$SPL_TAG
SPL_URL=$REPO/spl/tarball/$SPL_TAG
SPL_TAG="${TAG//zfs/spl}"
SPL_DIR="$SRC_DIR_SPL/$SPL_TAG"
SPL_URL="$REPO/spl/tarball/$SPL_TAG"
ZFS_TAG=$TAG
ZFS_DIR=$SRC_DIR_ZFS/$ZFS_TAG
ZFS_URL=$REPO/zfs/tarball/$ZFS_TAG
ZFS_TAG="$TAG"
ZFS_DIR="$SRC_DIR_ZFS/$ZFS_TAG"
ZFS_URL="$REPO/zfs/tarball/$ZFS_TAG"
if [ "$TAG" = "installed" ]; then
ZPOOL_CMD=`which zpool`
ZFS_CMD=`which zfs`
ZPOOL_CMD=$(which zpool)
ZFS_CMD=$(which zfs)
ZFS_SH="/usr/share/zfs/zfs.sh"
ZPOOL_CREATE="/usr/share/zfs/zpool-create.sh"
else
ZPOOL_CMD="./cmd/zpool/zpool"
ZFS_CMD="./cmd/zfs/zfs"
ZFS_SH="./scripts/zfs.sh"
ZPOOL_CREATE="./scripts/zpool-create.sh"
fi
}
@ -220,8 +262,7 @@ pool_set_vars() {
POOL_DIR=$TEST_DIR/pools/$POOL_TAG
POOL_DIR_PRISTINE=$POOL_DIR/pristine
POOL_DIR_COPY=$POOL_DIR/copy
POOL_DIR_SRC=`echo -n "$SRC_DIR_ZFS/"; \
echo "$POOL_TAG" | sed -e 's/zol/zfs/'`
POOL_DIR_SRC="$SRC_DIR_ZFS/${POOL_TAG//zol/zfs}"
}
#
@ -230,46 +271,51 @@ pool_set_vars() {
# extended as needed to create more realistic pools.
#
pool_create() {
pool_set_vars $1
src_set_vars $1
pool_set_vars "$1"
src_set_vars "$1"
if [ "$POOL_TAG" != "installed" ]; then
cd $POOL_DIR_SRC
cd "$POOL_DIR_SRC"
fi
$ZFS_SH zfs="spa_config_path=$POOL_DIR_PRISTINE" || fail 1
# Create a file vdev RAIDZ pool.
FILEDIR="$POOL_DIR_PRISTINE" $ZPOOL_CREATE \
-c file-raidz -p $POOL_TAG -v -x >/dev/null || fail 2
truncate -s 1G \
"$POOL_DIR_PRISTINE/vdev1" "$POOL_DIR_PRISTINE/vdev2" \
"$POOL_DIR_PRISTINE/vdev3" "$POOL_DIR_PRISTINE/vdev4"
$ZPOOL_CMD create "$POOL_TAG" raidz \
"$POOL_DIR_PRISTINE/vdev1" "$POOL_DIR_PRISTINE/vdev2" \
"$POOL_DIR_PRISTINE/vdev3" "$POOL_DIR_PRISTINE/vdev4"
# Create a pool/fs filesystem with some random contents.
$ZFS_CMD create $POOL_TAG/fs || fail 3
populate /$POOL_TAG/fs/ 10 100
$ZFS_CMD create "$POOL_TAG/fs" || fail 3
populate "/$POOL_TAG/fs/" 10 100
# Snapshot that filesystem, clone it, remove the files/dirs,
# replace them with new files/dirs.
$ZFS_CMD snap $POOL_TAG/fs@snap || fail 4
$ZFS_CMD clone $POOL_TAG/fs@snap $POOL_TAG/clone || fail 5
$ZFS_CMD snap "$POOL_TAG/fs@snap" || fail 4
$ZFS_CMD clone "$POOL_TAG/fs@snap" "$POOL_TAG/clone" || fail 5
# shellcheck disable=SC2086
rm -Rf /$POOL_TAG/clone/* || fail 6
populate /$POOL_TAG/clone/ 10 100
populate "/$POOL_TAG/clone/" 10 100
# Scrub the pool, delay slightly, then export it. It is now
# somewhat interesting for testing purposes.
$ZPOOL_CMD scrub $POOL_TAG || fail 7
$ZPOOL_CMD scrub "$POOL_TAG" || fail 7
sleep 10
$ZPOOL_CMD export $POOL_TAG || fail 8
$ZPOOL_CMD export "$POOL_TAG" || fail 8
$ZFS_SH -u || fail 9
}
# If the zfs-images directory doesn't exist fetch a copy from Github then
# cache it in the $TEST_DIR and update $IMAGES_DIR.
if [ ! -d $IMAGES_DIR ]; then
if [ ! -d "$IMAGES_DIR" ]; then
IMAGES_DIR="$TEST_DIR/zfs-images"
mkdir -p $IMAGES_DIR
curl -sL $IMAGES_TAR | \
tar -xz -C $IMAGES_DIR --strip-components=1 || fail 10
mkdir -p "$IMAGES_DIR"
curl -sL "$IMAGES_TAR" | \
tar -xz -C "$IMAGES_DIR" --strip-components=1 || fail 10
fi
# Given the available images in the zfs-images directory substitute the
@ -277,8 +323,9 @@ fi
for TAG in $POOL_TAGS; do
if [ "$TAG" = "all" ]; then
ALL_TAGS=`ls $IMAGES_DIR | grep "tar.bz2" | \
sed 's/.tar.bz2//' | tr '\n' ' '`
# shellcheck disable=SC2010
ALL_TAGS=$(ls "$IMAGES_DIR" | grep "tar.bz2" | \
sed 's/.tar.bz2//' | tr '\n' ' ')
NEW_TAGS="$NEW_TAGS $ALL_TAGS"
else
NEW_TAGS="$NEW_TAGS $TAG"
@ -286,41 +333,41 @@ for TAG in $POOL_TAGS; do
done
POOL_TAGS="$NEW_TAGS"
if [ $VERBOSE -ne 0 ]; then
if [ "$VERBOSE" = "yes" ]; then
echo "---------------------------- Options ----------------------------"
echo "VERBOSE=$VERBOSE"
echo "KEEP=$KEEP"
echo "REPO=$REPO"
echo "SRC_TAGS="$SRC_TAGS""
echo "POOL_TAGS="$POOL_TAGS""
echo "SRC_TAGS=$SRC_TAGS"
echo "POOL_TAGS=$POOL_TAGS"
echo "PATH=$TEST_DIR"
echo
fi
if [ ! -d $TEST_DIR ]; then
mkdir -p $TEST_DIR
if [ ! -d "$TEST_DIR" ]; then
mkdir -p "$TEST_DIR"
fi
if [ ! -d $SRC_DIR ]; then
mkdir -p $SRC_DIR
if [ ! -d "$SRC_DIR" ]; then
mkdir -p "$SRC_DIR"
fi
# Print a header for all tags which are being tested.
echo "--------------------- ZFS on Linux Source Versions --------------"
printf "%-16s" " "
for TAG in $SRC_TAGS; do
src_set_vars $TAG
src_set_vars "$TAG"
if [ "$TAG" = "installed" ]; then
ZFS_VERSION=`modinfo zfs | awk '/version:/ { print $2; exit }'`
ZFS_VERSION=$(modinfo zfs | awk '/version:/ { print $2; exit }')
if [ -n "$ZFS_VERSION" ]; then
printf "%-16s" $ZFS_VERSION
printf "%-16s" "$ZFS_VERSION"
else
echo "ZFS is not installed\n"
echo -e "ZFS is not installed\n"
fail
fi
else
printf "%-16s" $TAG
printf "%-16s" "$TAG"
fi
done
echo -e "\n-----------------------------------------------------------------"
@ -331,29 +378,29 @@ echo -e "\n-----------------------------------------------------------------"
#
printf "%-16s" "Clone SPL"
for TAG in $SRC_TAGS; do
src_set_vars $TAG
src_set_vars "$TAG"
if [ -d $SPL_DIR ]; then
if [ -d "$SPL_DIR" ]; then
skip_nonewline
elif [ "$SPL_TAG" = "installed" ]; then
skip_nonewline
else
cd $SRC_DIR
cd "$SRC_DIR"
if [ ! -d $SRC_DIR_SPL ]; then
mkdir -p $SRC_DIR_SPL
if [ ! -d "$SRC_DIR_SPL" ]; then
mkdir -p "$SRC_DIR_SPL"
fi
git archive --format=tar --prefix=$SPL_TAG/ $SPL_TAG \
-o $SRC_DIR_SPL/$SPL_TAG.tar &>/dev/nul || \
rm $SRC_DIR_SPL/$SPL_TAG.tar
if [ -s $SRC_DIR_SPL/$SPL_TAG.tar ]; then
tar -xf $SRC_DIR_SPL/$SPL_TAG.tar -C $SRC_DIR_SPL
rm $SRC_DIR_SPL/$SPL_TAG.tar
git archive --format=tar --prefix="$SPL_TAG/ $SPL_TAG" \
-o "$SRC_DIR_SPL/$SPL_TAG.tar" &>/dev/nul || \
rm "$SRC_DIR_SPL/$SPL_TAG.tar"
if [ -s "$SRC_DIR_SPL/$SPL_TAG.tar" ]; then
tar -xf "$SRC_DIR_SPL/$SPL_TAG.tar" -C "$SRC_DIR_SPL"
rm "$SRC_DIR_SPL/$SPL_TAG.tar"
echo -n -e "${COLOR_GREEN}Local${COLOR_RESET}\t\t"
else
mkdir -p $SPL_DIR || fail 1
curl -sL $SPL_URL | tar -xz -C $SPL_DIR \
mkdir -p "$SPL_DIR" || fail 1
curl -sL "$SPL_URL" | tar -xz -C "$SPL_DIR" \
--strip-components=1 || fail 2
echo -n -e "${COLOR_GREEN}Remote${COLOR_RESET}\t\t"
fi
@ -367,29 +414,29 @@ printf "\n"
#
printf "%-16s" "Clone ZFS"
for TAG in $SRC_TAGS; do
src_set_vars $TAG
src_set_vars "$TAG"
if [ -d $ZFS_DIR ]; then
if [ -d "$ZFS_DIR" ]; then
skip_nonewline
elif [ "$ZFS_TAG" = "installed" ]; then
skip_nonewline
else
cd $SRC_DIR
cd "$SRC_DIR"
if [ ! -d $SRC_DIR_ZFS ]; then
mkdir -p $SRC_DIR_ZFS
if [ ! -d "$SRC_DIR_ZFS" ]; then
mkdir -p "$SRC_DIR_ZFS"
fi
git archive --format=tar --prefix=$ZFS_TAG/ $ZFS_TAG \
-o $SRC_DIR_ZFS/$ZFS_TAG.tar &>/dev/nul || \
rm $SRC_DIR_ZFS/$ZFS_TAG.tar
if [ -s $SRC_DIR_ZFS/$ZFS_TAG.tar ]; then
tar -xf $SRC_DIR_ZFS/$ZFS_TAG.tar -C $SRC_DIR_ZFS
rm $SRC_DIR_ZFS/$ZFS_TAG.tar
git archive --format=tar --prefix="$ZFS_TAG/ $ZFS_TAG" \
-o "$SRC_DIR_ZFS/$ZFS_TAG.tar" &>/dev/nul || \
rm "$SRC_DIR_ZFS/$ZFS_TAG.tar"
if [ -s "$SRC_DIR_ZFS/$ZFS_TAG.tar" ]; then
tar -xf "$SRC_DIR_ZFS/$ZFS_TAG.tar" -C "$SRC_DIR_ZFS"
rm "$SRC_DIR_ZFS/$ZFS_TAG.tar"
echo -n -e "${COLOR_GREEN}Local${COLOR_RESET}\t\t"
else
mkdir -p $ZFS_DIR || fail 1
curl -sL $ZFS_URL | tar -xz -C $ZFS_DIR \
mkdir -p "$ZFS_DIR" || fail 1
curl -sL "$ZFS_URL" | tar -xz -C "$ZFS_DIR" \
--strip-components=1 || fail 2
echo -n -e "${COLOR_GREEN}Remote${COLOR_RESET}\t\t"
fi
@ -400,18 +447,20 @@ printf "\n"
# Build the listed tags
printf "%-16s" "Build SPL"
for TAG in $SRC_TAGS; do
src_set_vars $TAG
src_set_vars "$TAG"
if [ -f $SPL_DIR/module/spl/spl.ko ]; then
if [ -f "$SPL_DIR/module/spl/spl.ko" ]; then
skip_nonewline
elif [ "$SPL_TAG" = "installed" ]; then
skip_nonewline
else
cd $SPL_DIR
cd "$SPL_DIR"
make distclean &>/dev/null
./autogen.sh >>$CONFIG_LOG 2>&1 || fail 1
./configure $CONFIG_OPTIONS >>$CONFIG_LOG 2>&1 || fail 2
make ${MAKE_OPTIONS} >>$MAKE_LOG 2>&1 || fail 3
./autogen.sh >>"$CONFIG_LOG" 2>&1 || fail 1
# shellcheck disable=SC2086
./configure $CONFIG_OPTIONS >>"$CONFIG_LOG" 2>&1 || fail 2
# shellcheck disable=SC2086
make $MAKE_OPTIONS >>"$MAKE_LOG" 2>&1 || fail 3
pass_nonewline
fi
done
@ -420,19 +469,21 @@ printf "\n"
# Build the listed tags
printf "%-16s" "Build ZFS"
for TAG in $SRC_TAGS; do
src_set_vars $TAG
src_set_vars "$TAG"
if [ -f $ZFS_DIR/module/zfs/zfs.ko ]; then
if [ -f "$ZFS_DIR/module/zfs/zfs.ko" ]; then
skip_nonewline
elif [ "$ZFS_TAG" = "installed" ]; then
skip_nonewline
else
cd $ZFS_DIR
cd "$ZFS_DIR"
make distclean &>/dev/null
./autogen.sh >>$CONFIG_LOG 2>&1 || fail 1
./configure --with-spl=$SPL_DIR $CONFIG_OPTIONS \
>>$CONFIG_LOG 2>&1 || fail 2
make ${MAKE_OPTIONS} >>$MAKE_LOG 2>&1 || fail 3
./autogen.sh >>"$CONFIG_LOG" 2>&1 || fail 1
# shellcheck disable=SC2086
./configure --with-spl="$SPL_DIR" $CONFIG_OPTIONS \
>>"$CONFIG_LOG" 2>&1 || fail 2
# shellcheck disable=SC2086
make $MAKE_OPTIONS >>"$MAKE_LOG" 2>&1 || fail 3
pass_nonewline
fi
done
@ -442,23 +493,23 @@ echo "-----------------------------------------------------------------"
# Either create a new pool using 'zpool create', or alternately restore an
# existing pool from another ZFS implementation for compatibility testing.
for TAG in $POOL_TAGS; do
pool_set_vars $TAG
pool_set_vars "$TAG"
SKIP=0
printf "%-16s" $POOL_TAG
rm -Rf $POOL_DIR
mkdir -p $POOL_DIR_PRISTINE
printf "%-16s" "$POOL_TAG"
rm -Rf "$POOL_DIR"
mkdir -p "$POOL_DIR_PRISTINE"
# Use the existing compressed image if available.
if [ -f $POOL_BZIP ]; then
tar -xjf $POOL_BZIP -C $POOL_DIR_PRISTINE \
if [ -f "$POOL_BZIP" ]; then
tar -xjf "$POOL_BZIP" -C "$POOL_DIR_PRISTINE" \
--strip-components=1 || fail 1
# Use the installed version to create the pool.
elif [ "$TAG" = "installed" ]; then
pool_create $TAG
pool_create "$TAG"
# A source build is available to create the pool.
elif [ -d $POOL_DIR_SRC ]; then
pool_create $TAG
elif [ -d "$POOL_DIR_SRC" ]; then
pool_create "$TAG"
else
SKIP=1
fi
@ -471,35 +522,36 @@ for TAG in $POOL_TAGS; do
continue
fi
src_set_vars $TAG
src_set_vars "$TAG"
if [ "$TAG" != "installed" ]; then
cd $ZFS_DIR
cd "$ZFS_DIR"
fi
$ZFS_SH zfs="spa_config_path=$POOL_DIR_COPY"
cp -a --sparse=always $POOL_DIR_PRISTINE \
$POOL_DIR_COPY || fail 2
POOL_NAME=`$ZPOOL_CMD import -d $POOL_DIR_COPY | \
awk '/pool:/ { print $2; exit 0 }'`
cp -a --sparse=always "$POOL_DIR_PRISTINE" \
"$POOL_DIR_COPY" || fail 2
POOL_NAME=$($ZPOOL_CMD import -d "$POOL_DIR_COPY" | \
awk '/pool:/ { print $2; exit 0 }')
$ZPOOL_CMD import -N -d $POOL_DIR_COPY $POOL_NAME &>/dev/null
$ZPOOL_CMD import -N -d "$POOL_DIR_COPY" \
"$POOL_NAME" &>/dev/null
if [ $? -ne 0 ]; then
fail_nonewline
ERROR=1
else
$ZPOOL_CMD export $POOL_NAME || fail 3
$ZPOOL_CMD export "$POOL_NAME" || fail 3
pass_nonewline
fi
rm -Rf $POOL_DIR_COPY
rm -Rf "$POOL_DIR_COPY"
$ZFS_SH -u || fail 4
done
printf "\n"
done
if [ ! $KEEP ]; then
rm -Rf $TEST_DIR
if [ "$KEEP" = "no" ]; then
rm -Rf "$TEST_DIR"
fi
exit $ERROR

View File

@ -20,11 +20,10 @@
# Copyright (C) 2016 Lawrence Livermore National Security, LLC.
#
basedir=$(dirname "$0")
BASE_DIR=$(dirname "$0")
SCRIPT_COMMON=common.sh
if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
. "${basedir}/${SCRIPT_COMMON}"
if [ -f "${BASE_DIR}/${SCRIPT_COMMON}" ]; then
. "${BASE_DIR}/${SCRIPT_COMMON}"
else
echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
fi

View File

@ -1,7 +0,0 @@
pkgdataprofiledir = $(pkgdatadir)/zpios-profile
dist_pkgdataprofile_SCRIPTS = \
$(top_srcdir)/scripts/zpios-profile/zpios-profile-disk.sh \
$(top_srcdir)/scripts/zpios-profile/zpios-profile-pids.sh \
$(top_srcdir)/scripts/zpios-profile/zpios-profile-post.sh \
$(top_srcdir)/scripts/zpios-profile/zpios-profile-pre.sh \
$(top_srcdir)/scripts/zpios-profile/zpios-profile.sh

View File

@ -1,129 +0,0 @@
#!/bin/bash
#
# /proc/diskinfo <after skipping major/minor>
# Field 1 -- device name
# Field 2 -- # of reads issued
# Field 3 -- # of reads merged
# Field 4 -- # of sectors read
# Field 5 -- # of milliseconds spent reading
# Field 6 -- # of writes completed
# Field 7 -- # of writes merged
# Field 8 -- # of sectors written
# Field 9 -- # of milliseconds spent writing
# Field 10 -- # of I/Os currently in progress
# Field 11 -- # of milliseconds spent doing I/Os
# Field 12 -- weighted # of milliseconds spent doing I/Os
PROG=zpios-profile-disk.sh
RUN_PIDS=${0}
RUN_LOG_DIR=${1}
RUN_ID=${2}
create_table() {
local FIELD=$1
local ROW_M=()
local ROW_N=()
local HEADER=1
local STEP=1
for DISK_FILE in `ls -r --sort=time --time=ctime ${RUN_LOG_DIR}/${RUN_ID}/disk-[0-9]*`; do
ROW_M=( ${ROW_N[@]} )
ROW_N=( `cat ${DISK_FILE} | grep sd | cut -c11- | cut -f${FIELD} -d' ' | tr "\n" "\t"` )
if [ $HEADER -eq 1 ]; then
echo -n "step, "
cat ${DISK_FILE} | grep sd | cut -c11- | cut -f1 -d' ' | tr "\n" ", "
echo "total"
HEADER=0
fi
if [ ${#ROW_M[@]} -eq 0 ]; then
continue
fi
if [ ${#ROW_M[@]} -ne ${#ROW_N[@]} ]; then
echo "Badly formatted profile data in ${DISK_FILE}"
break
fi
TOTAL=0
echo -n "${STEP}, "
for (( i=0; i<${#ROW_N[@]}; i++ )); do
DELTA=`echo "${ROW_N[${i}]}-${ROW_M[${i}]}" | bc`
let TOTAL=${TOTAL}+${DELTA}
echo -n "${DELTA}, "
done
echo "${TOTAL}, "
let STEP=${STEP}+1
done
}
create_table_mbs() {
local FIELD=$1
local TIME=$2
local ROW_M=()
local ROW_N=()
local HEADER=1
local STEP=1
for DISK_FILE in `ls -r --sort=time --time=ctime ${RUN_LOG_DIR}/${RUN_ID}/disk-[0-9]*`; do
ROW_M=( ${ROW_N[@]} )
ROW_N=( `cat ${DISK_FILE} | grep sd | cut -c11- | cut -f${FIELD} -d' ' | tr "\n" "\t"` )
if [ $HEADER -eq 1 ]; then
echo -n "step, "
cat ${DISK_FILE} | grep sd | cut -c11- | cut -f1 -d' ' | tr "\n" ", "
echo "total"
HEADER=0
fi
if [ ${#ROW_M[@]} -eq 0 ]; then
continue
fi
if [ ${#ROW_M[@]} -ne ${#ROW_N[@]} ]; then
echo "Badly formatted profile data in ${DISK_FILE}"
break
fi
TOTAL=0
echo -n "${STEP}, "
for (( i=0; i<${#ROW_N[@]}; i++ )); do
DELTA=`echo "${ROW_N[${i}]}-${ROW_M[${i}]}" | bc`
MBS=`echo "scale=2; ((${DELTA}*512)/${TIME})/(1024*1024)" | bc`
TOTAL=`echo "scale=2; ${TOTAL}+${MBS}" | bc`
echo -n "${MBS}, "
done
echo "${TOTAL}, "
let STEP=${STEP}+1
done
}
echo
echo "Reads issued per device"
create_table 2
echo
echo "Reads merged per device"
create_table 3
echo
echo "Sectors read per device"
create_table 4
echo "MB/s per device"
create_table_mbs 4 3
echo
echo "Writes issued per device"
create_table 6
echo
echo "Writes merged per device"
create_table 7
echo
echo "Sectors written per device"
create_table 8
echo "MB/s per device"
create_table_mbs 8 3
exit 0

View File

@ -1,131 +0,0 @@
#!/bin/bash
PROG=zpios-profile-pids.sh
RUN_PIDS=${0}
RUN_LOG_DIR=${1}
RUN_ID=${2}
ROW_M=()
ROW_N=()
ROW_N_SCHED=()
ROW_N_WAIT=()
HEADER=1
STEP=1
for PID_FILE in `ls -r --sort=time --time=ctime ${RUN_LOG_DIR}/${RUN_ID}/pids-[0-9]*`; do
ROW_M=( ${ROW_N[@]} )
ROW_N=( 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 )
ROW_N_SCHED=( `cat ${PID_FILE} | cut -f15 -d' ' | tr "\n" "\t"` )
ROW_N_WAIT=( `cat ${PID_FILE} | cut -f17 -d' ' | tr "\n" "\t"` )
ROW_N_NAMES=( `cat ${PID_FILE} | cut -f2 -d' ' | cut -f2 -d'(' |
cut -f1 -d')' | cut -f1 -d'/' | tr "\n" "\t"` )
for (( i=0; i<${#ROW_N_SCHED[@]}; i++ )); do
SUM=`echo "${ROW_N_WAIT[${i}]}+${ROW_N_SCHED[${i}]}" | bc`
case ${ROW_N_NAMES[${i}]} in
zio_taskq) IDX=0;;
zio_req_nul) IDX=1;;
zio_irq_nul) IDX=2;;
zio_req_rd) IDX=3;;
zio_irq_rd) IDX=4;;
zio_req_wr) IDX=5;;
zio_irq_wr) IDX=6;;
zio_req_fr) IDX=7;;
zio_irq_fr) IDX=8;;
zio_req_cm) IDX=9;;
zio_irq_cm) IDX=10;;
zio_req_ctl) IDX=11;;
zio_irq_ctl) IDX=12;;
txg_quiesce) IDX=13;;
txg_sync) IDX=14;;
txg_timelimit) IDX=15;;
arc_reclaim) IDX=16;;
l2arc_feed) IDX=17;;
zpios_io) IDX=18;;
*) continue;;
esac
let ROW_N[${IDX}]=${ROW_N[${IDX}]}+${SUM}
done
if [ $HEADER -eq 1 ]; then
echo "step, zio_taskq, zio_req_nul, zio_irq_nul, " \
"zio_req_rd, zio_irq_rd, zio_req_wr, zio_irq_wr, " \
"zio_req_fr, zio_irq_fr, zio_req_cm, zio_irq_cm, " \
"zio_req_ctl, zio_irq_ctl, txg_quiesce, txg_sync, " \
"txg_timelimit, arc_reclaim, l2arc_feed, zpios_io, " \
"idle"
HEADER=0
fi
if [ ${#ROW_M[@]} -eq 0 ]; then
continue
fi
if [ ${#ROW_M[@]} -ne ${#ROW_N[@]} ]; then
echo "Badly formatted profile data in ${PID_FILE}"
break
fi
# Original values are in jiffies and we expect HZ to be 1000
# on most 2.6 systems thus we divide by 10 to get a percentage.
IDLE=1000
echo -n "${STEP}, "
for (( i=0; i<${#ROW_N[@]}; i++ )); do
DELTA=`echo "${ROW_N[${i}]}-${ROW_M[${i}]}" | bc`
DELTA_PERCENT=`echo "scale=1; ${DELTA}/10" | bc`
let IDLE=${IDLE}-${DELTA}
echo -n "${DELTA_PERCENT}, "
done
ILDE_PERCENT=`echo "scale=1; ${IDLE}/10" | bc`
echo "${ILDE_PERCENT}"
let STEP=${STEP}+1
done
exit
echo
echo "Percent of total system time per pid"
for PID_FILE in `ls -r --sort=time --time=ctime ${RUN_LOG_DIR}/${RUN_ID}/pids-[0-9]*`; do
ROW_M=( ${ROW_N[@]} )
ROW_N_SCHED=( `cat ${PID_FILE} | cut -f15 -d' ' | tr "\n" "\t"` )
ROW_N_WAIT=( `cat ${PID_FILE} | cut -f17 -d' ' | tr "\n" "\t"` )
for (( i=0; i<${#ROW_N_SCHED[@]}; i++ )); do
ROW_N[${i}]=`echo "${ROW_N_WAIT[${i}]}+${ROW_N_SCHED[${i}]}" | bc`
done
if [ $HEADER -eq 1 ]; then
echo -n "step, "
cat ${PID_FILE} | cut -f2 -d' ' | tr "\n" ", "
echo
HEADER=0
fi
if [ ${#ROW_M[@]} -eq 0 ]; then
continue
fi
if [ ${#ROW_M[@]} -ne ${#ROW_N[@]} ]; then
echo "Badly formatted profile data in ${PID_FILE}"
break
fi
# Original values are in jiffies and we expect HZ to be 1000
# on most 2.6 systems thus we divide by 10 to get a percentage.
echo -n "${STEP}, "
for (( i=0; i<${#ROW_N[@]}; i++ )); do
DELTA=`echo "scale=1; (${ROW_N[${i}]}-${ROW_M[${i}]})/10" | bc`
echo -n "${DELTA}, "
done
echo
let STEP=${STEP}+1
done
exit 0

View File

@ -1,129 +0,0 @@
#!/bin/bash
PROG=zpios-profile-post.sh
RUN_POST=${0}
RUN_PHASE=${1}
RUN_DIR=${2}
RUN_ID=${3}
RUN_POOL=${4}
RUN_CHUNK_SIZE=${5}
RUN_REGION_SIZE=${6}
RUN_THRD_COUNT=${7}
RUN_REGION_COUNT=${8}
RUN_OFFSET=${9}
RUN_REGION_NOISE=${10}
RUN_CHUNK_NOISE=${11}
RUN_THRD_DELAY=${12}
RUN_FLAGS=${13}
RUN_RESULT=${14}
# Summarize system time per process
zpios_profile_post_pids() {
${PROFILE_PIDS} ${PROFILE_RUN_CR_PIDS_LOG} >${PROFILE_RUN_CR_PIDS_CSV}
${PROFILE_PIDS} ${PROFILE_RUN_WR_PIDS_LOG} >${PROFILE_RUN_WR_PIDS_CSV}
${PROFILE_PIDS} ${PROFILE_RUN_RD_PIDS_LOG} >${PROFILE_RUN_RD_PIDS_CSV}
${PROFILE_PIDS} ${PROFILE_RUN_RM_PIDS_LOG} >${PROFILE_RUN_RM_PIDS_CSV}
}
zpios_profile_post_disk() {
${PROFILE_DISK} ${PROFILE_RUN_CR_DISK_LOG} >${PROFILE_RUN_CR_DISK_CSV}
${PROFILE_DISK} ${PROFILE_RUN_WR_DISK_LOG} >${PROFILE_RUN_WR_DISK_CSV}
${PROFILE_DISK} ${PROFILE_RUN_RD_DISK_LOG} >${PROFILE_RUN_RD_DISK_CSV}
${PROFILE_DISK} ${PROFILE_RUN_RM_DISK_LOG} >${PROFILE_RUN_RM_DISK_CSV}
}
# Summarize per device performance
# Stop a user defined profiling script which is gathering additional data
zpios_profile_post_stop() {
local PROFILE_PID=$1
kill -s SIGHUP `cat ${PROFILE_PID}`
# Sleep waiting for profile script to exit
while [ -f ${PROFILE_PID} ]; do
sleep 0.01
done
}
zpios_profile_post_proc_stop() {
local PROC_DIR=$1
if [ -f ${PROFILE_ARC_PROC} ]; then
cat ${PROFILE_ARC_PROC} >${PROC_DIR}/arcstats.txt
fi
if [ -f ${PROFILE_VDEV_CACHE_PROC} ]; then
cat ${PROFILE_VDEV_CACHE_PROC} >${PROC_DIR}/vdev_cache_stats.txt
fi
}
zpios_profile_post_oprofile_stop() {
local OPROFILE_LOG=$1
local OPROFILE_ARGS="-a -g -l -p ${OPROFILE_KERNEL_DIR},${OPROFILE_SPL_DIR},${OPROFILE_ZFS_DIR}"
/usr/bin/opcontrol --stop >>${OPROFILE_LOG} 2>&1
/usr/bin/opcontrol --dump >>${OPROFILE_LOG} 2>&1
/usr/bin/opreport ${OPROFILE_ARGS} >${OPROFILE_LOG} 2>&1
/usr/bin/oparchive
}
zpios_profile_post_create() {
zpios_profile_post_oprofile_stop ${PROFILE_RUN_CR_OPROFILE_LOG}
zpios_profile_post_proc_stop ${PROFILE_RUN_CR_DIR}
zpios_profile_post_stop ${PROFILE_RUN_CR_PID}
}
zpios_profile_post_write() {
zpios_profile_post_oprofile_stop ${PROFILE_RUN_WR_OPROFILE_LOG}
zpios_profile_post_proc_stop ${PROFILE_RUN_WR_DIR}
zpios_profile_post_stop ${PROFILE_RUN_WR_PID}
}
zpios_profile_post_read() {
zpios_profile_post_oprofile_stop ${PROFILE_RUN_CR_RD_LOG}
zpios_profile_post_proc_stop ${PROFILE_RUN_RD_DIR}
zpios_profile_post_stop ${PROFILE_RUN_RD_PID}
}
zpios_profile_post_remove() {
zpios_profile_post_oprofile_stop ${PROFILE_RUN_RM_OPROFILE_LOG}
zpios_profile_post_proc_stop ${PROFILE_RUN_RM_DIR}
zpios_profile_post_stop ${PROFILE_RUN_RM_PID}
}
# Source global zpios test configuration
if [ -f ${RUN_DIR}/zpios-config.sh ]; then
. ${RUN_DIR}/zpios-config.sh
fi
# Source global per-run test configuration
if [ -f ${RUN_DIR}/${RUN_ID}/zpios-config-run.sh ]; then
. ${RUN_DIR}/${RUN_ID}/zpios-config-run.sh
fi
case "${RUN_PHASE}" in
post-run)
zpios_profile_post_pids
zpios_profile_post_disk
;;
post-create)
zpios_profile_post_create
;;
post-write)
zpios_profile_post_write
;;
post-read)
zpios_profile_post_read
;;
post-remove)
zpios_profile_post_remove
;;
*)
echo "Usage: ${PROG} {post-run|post-create|post-write|post-read|post-remove}"
exit 1
esac
exit 0

View File

@ -1,184 +0,0 @@
#!/bin/bash
PROG=zpios-profile-pre.sh
PROFILE_RDY=0
trap "PROFILE_RDY=1" SIGHUP
RUN_PRE=${0}
RUN_PHASE=${1}
RUN_DIR=${2}
RUN_ID=${3}
RUN_POOL=${4}
RUN_CHUNK_SIZE=${5}
RUN_REGION_SIZE=${6}
RUN_THRD_COUNT=${7}
RUN_REGION_COUNT=${8}
RUN_OFFSET=${9}
RUN_REGION_NOISE=${10}
RUN_CHUNK_NOISE=${11}
RUN_THRD_DELAY=${12}
RUN_FLAGS=${13}
RUN_RESULT=${14}
zpios_profile_pre_run_cfg() {
cat > ${RUN_DIR}/${RUN_ID}/zpios-config-run.sh << EOF
#
# Zpios Profiling Configuration for Run ${RUN_ID}
#
PROFILE_RUN_DIR=${RUN_DIR}/${RUN_ID}
PROFILE_RUN_CR_DIR=${RUN_DIR}/${RUN_ID}/create
PROFILE_RUN_CR_PID=${RUN_DIR}/${RUN_ID}/create/profile.pid
PROFILE_RUN_CR_OPROFILE_LOG=${RUN_DIR}/${RUN_ID}/create/oprofile.txt
PROFILE_RUN_CR_PIDS_LOG=${RUN_DIR}/${RUN_ID}/create/pids.txt
PROFILE_RUN_CR_PIDS_CSV=${RUN_DIR}/${RUN_ID}/create/pids.csv
PROFILE_RUN_CR_DISK_LOG=${RUN_DIR}/${RUN_ID}/create/disk.txt
PROFILE_RUN_CR_DISK_CSV=${RUN_DIR}/${RUN_ID}/create/disk.csv
PROFILE_RUN_WR_DIR=${RUN_DIR}/${RUN_ID}/write
PROFILE_RUN_WR_PID=${RUN_DIR}/${RUN_ID}/write/profile.pid
PROFILE_RUN_WR_OPROFILE_LOG=${RUN_DIR}/${RUN_ID}/write/oprofile.txt
PROFILE_RUN_WR_PIDS_LOG=${RUN_DIR}/${RUN_ID}/write/pids.txt
PROFILE_RUN_WR_PIDS_CSV=${RUN_DIR}/${RUN_ID}/write/pids.csv
PROFILE_RUN_WR_DISK_LOG=${RUN_DIR}/${RUN_ID}/write/disk.txt
PROFILE_RUN_WR_DISK_CSV=${RUN_DIR}/${RUN_ID}/write/disk.csv
PROFILE_RUN_RD_DIR=${RUN_DIR}/${RUN_ID}/read
PROFILE_RUN_RD_PID=${RUN_DIR}/${RUN_ID}/read/profile.pid
PROFILE_RUN_RD_OPROFILE_LOG=${RUN_DIR}/${RUN_ID}/read/oprofile.txt
PROFILE_RUN_RD_PIDS_LOG=${RUN_DIR}/${RUN_ID}/read/pids.txt
PROFILE_RUN_RD_PIDS_CSV=${RUN_DIR}/${RUN_ID}/read/pids.csv
PROFILE_RUN_RD_DISK_LOG=${RUN_DIR}/${RUN_ID}/read/disk.txt
PROFILE_RUN_RD_DISK_CSV=${RUN_DIR}/${RUN_ID}/read/disk.csv
PROFILE_RUN_RM_DIR=${RUN_DIR}/${RUN_ID}/remove
PROFILE_RUN_RM_PID=${RUN_DIR}/${RUN_ID}/remove/profile.pid
PROFILE_RUN_RM_OPROFILE_LOG=${RUN_DIR}/${RUN_ID}/remove/oprofile.txt
PROFILE_RUN_RM_PIDS_LOG=${RUN_DIR}/${RUN_ID}/remove/pids.txt
PROFILE_RUN_RM_PIDS_CSV=${RUN_DIR}/${RUN_ID}/remove/pids.csv
PROFILE_RUN_RM_DISK_LOG=${RUN_DIR}/${RUN_ID}/remove/disk.txt
PROFILE_RUN_RM_DISK_CSV=${RUN_DIR}/${RUN_ID}/remove/disk.csv
# PROFILE_PIDS_LOG=${RUN_DIR}/${RUN_ID}/pids-summary.csv
# PROFILE_DISK_LOG=${RUN_DIR}/${RUN_ID}/disk-summary.csv
EOF
}
zpios_profile_pre_run_args() {
cat > ${RUN_DIR}/${RUN_ID}/zpios-args.txt << EOF
#
# Zpios Arguments for Run ${RUN_ID}
#
DIR=${RUN_DIR}
ID=${RUN_ID}
POOL=${RUN_POOL}
CHUNK_SIZE=${RUN_CHUNK_SIZE}
REGION_SIZE=${RUN_REGION_SIZE}
THRD_COUNT=${RUN_THRD_COUNT}
REGION_COUNT=${RUN_REGION_COUNT}
OFFSET=${RUN_OFFSET}
REGION_NOISE=${RUN_REGION_NOISE}
CHUNK_NOISE=${RUN_CHUNK_NOISE}
THRD_DELAY=${RUN_THRD_DELAY}
FLAGS=${RUN_FLAGS}
RESULT=${RUN_RESULT}
EOF
}
# Spawn a user defined profiling script to gather additional data
zpios_profile_pre_start() {
local PROFILE_PID=$1
${PROFILE_USER} ${RUN_PHASE} ${RUN_DIR} ${RUN_ID} &
echo "$!" >${PROFILE_PID}
# Sleep waiting for profile script to be ready, it will
# signal us via SIGHUP when it is ready to start profiling.
while [ ${PROFILE_RDY} -eq 0 ]; do
sleep 0.01
done
}
zpios_profile_post_proc_start() {
if [ -f ${PROFILE_ARC_PROC} ]; then
echo 0 >${PROFILE_ARC_PROC}
fi
if [ -f ${PROFILE_VDEV_CACHE_PROC} ]; then
echo 0 >${PROFILE_VDEV_CACHE_PROC}
fi
}
zpios_profile_pre_oprofile_start() {
local OPROFILE_LOG=$1
/usr/bin/opcontrol --reset >>${OPROFILE_LOG} 2>&1
/usr/bin/opcontrol --start >>${OPROFILE_LOG} 2>&1
}
zpios_profile_pre_create() {
mkdir ${PROFILE_RUN_CR_DIR}
zpios_profile_pre_start ${PROFILE_RUN_CR_PID}
zpios_profile_post_proc_start
zpios_profile_pre_oprofile_start ${PROFILE_RUN_CR_OPROFILE_LOG}
}
zpios_profile_pre_write() {
mkdir ${PROFILE_RUN_WR_DIR}
zpios_profile_pre_start ${PROFILE_RUN_WR_PID}
zpios_profile_post_proc_start
zpios_profile_pre_oprofile_start ${PROFILE_RUN_WR_OPROFILE_LOG}
}
zpios_profile_pre_read() {
mkdir ${PROFILE_RUN_RD_DIR}
zpios_profile_pre_start ${PROFILE_RUN_RD_PID}
zpios_profile_post_proc_start
zpios_profile_pre_oprofile_start ${PROFILE_RUN_CR_RD_LOG}
}
zpios_profile_pre_remove() {
mkdir ${PROFILE_RUN_RM_DIR}
zpios_profile_pre_start ${PROFILE_RUN_RM_PID}
zpios_profile_post_proc_start
zpios_profile_pre_oprofile_start ${PROFILE_RUN_RM_OPROFILE_LOG}
}
# Source global zpios test configuration
if [ -f ${RUN_DIR}/zpios-config.sh ]; then
. ${RUN_DIR}/zpios-config.sh
fi
# Source global per-run test configuration
if [ -f ${RUN_DIR}/${RUN_ID}/zpios-config-run.sh ]; then
. ${RUN_DIR}/${RUN_ID}/zpios-config-run.sh
fi
case "${RUN_PHASE}" in
pre-run)
mkdir -p ${RUN_DIR}/${RUN_ID}/
zpios_profile_pre_run_cfg
zpios_profile_pre_run_args
;;
pre-create)
zpios_profile_pre_create
;;
pre-write)
zpios_profile_pre_write
;;
pre-read)
zpios_profile_pre_read
;;
pre-remove)
zpios_profile_pre_remove
;;
*)
echo "Usage: ${PROG} {pre-run|pre-create|pre-write|pre-read|pre-remove}"
exit 1
esac
exit 0

View File

@ -1,226 +0,0 @@
#!/bin/bash
PROG=zpios-profile.sh
trap "RUN_DONE=1" SIGHUP
RUN_PHASE=${1}
RUN_LOG_DIR=${2}
RUN_ID=${3}
RUN_DONE=0
POLL_INTERVAL=2.99
# Log these pids, the exact pid numbers will vary from system to system
# so I harvest pid for all the following type of processes from /proc/<pid>/
#
# zio_taskq/#
# spa_zio_issue/#
# spa_zio_intr/#
# txg_quiesce_thr
# txg_sync_thread
# txg_timelimit_t
# arc_reclaim_thr
# l2arc_feed_thre
# zpios_io/#
ZIO_TASKQ_PIDS=()
ZIO_REQ_NUL_PIDS=()
ZIO_IRQ_NUL_PIDS=()
ZIO_REQ_RD_PIDS=()
ZIO_IRQ_RD_PIDS=()
ZIO_REQ_WR_PIDS=()
ZIO_IRQ_WR_PIDS=()
ZIO_REQ_FR_PIDS=()
ZIO_IRQ_FR_PIDS=()
ZIO_REQ_CM_PIDS=()
ZIO_IRQ_CM_PIDS=()
ZIO_REQ_CTL_PIDS=()
ZIO_IRQ_CTL_PIDS=()
TXG_QUIESCE_PIDS=()
TXG_SYNC_PIDS=()
TXG_TIMELIMIT_PIDS=()
ARC_RECLAIM_PIDS=()
L2ARC_FEED_PIDS=()
ZPIOS_IO_PIDS=()
show_pids() {
echo "* zio_taskq: { ${ZIO_TASKQ_PIDS[@]} } = ${#ZIO_TASKQ_PIDS[@]}"
echo "* zio_req_nul: { ${ZIO_REQ_NUL_PIDS[@]} } = ${#ZIO_REQ_NUL_PIDS[@]}"
echo "* zio_irq_nul: { ${ZIO_IRQ_NUL_PIDS[@]} } = ${#ZIO_IRQ_NUL_PIDS[@]}"
echo "* zio_req_rd: { ${ZIO_REQ_RD_PIDS[@]} } = ${#ZIO_REQ_RD_PIDS[@]}"
echo "* zio_irq_rd: { ${ZIO_IRQ_RD_PIDS[@]} } = ${#ZIO_IRQ_RD_PIDS[@]}"
echo "* zio_req_wr: { ${ZIO_REQ_WR_PIDS[@]} } = ${#ZIO_REQ_WR_PIDS[@]}"
echo "* zio_irq_wr: { ${ZIO_IRQ_WR_PIDS[@]} } = ${#ZIO_IRQ_WR_PIDS[@]}"
echo "* zio_req_fr: { ${ZIO_REQ_FR_PIDS[@]} } = ${#ZIO_REQ_FR_PIDS[@]}"
echo "* zio_irq_fr: { ${ZIO_IRQ_FR_PIDS[@]} } = ${#ZIO_IRQ_FR_PIDS[@]}"
echo "* zio_req_cm: { ${ZIO_REQ_CM_PIDS[@]} } = ${#ZIO_REQ_CM_PIDS[@]}"
echo "* zio_irq_cm: { ${ZIO_IRQ_CM_PIDS[@]} } = ${#ZIO_IRQ_CM_PIDS[@]}"
echo "* zio_req_ctl: { ${ZIO_REQ_CTL_PIDS[@]} } = ${#ZIO_REQ_CTL_PIDS[@]}"
echo "* zio_irq_ctl: { ${ZIO_IRQ_CTL_PIDS[@]} } = ${#ZIO_IRQ_CTL_PIDS[@]}"
echo "* txg_quiesce: { ${TXG_QUIESCE_PIDS[@]} } = ${#TXG_QUIESCE_PIDS[@]}"
echo "* txg_sync: { ${TXG_SYNC_PIDS[@]} } = ${#TXG_SYNC_PIDS[@]}"
echo "* txg_timelimit: { ${TXG_TIMELIMIT_PIDS[@]} } = ${#TXG_TIMELIMIT_PIDS[@]}"
echo "* arc_reclaim: { ${ARC_RECLAIM_PIDS[@]} } = ${#ARC_RECLAIM_PIDS[@]}"
echo "* l2arc_feed: { ${L2ARC_FEED_PIDS[@]} } = ${#L2ARC_FEED_PIDS[@]}"
echo "* zpios_io: { ${ZPIOS_IO_PIDS[@]} } = ${#ZPIOS_IO_PIDS[@]}"
}
check_pid() {
local PID=$1
local NAME=$2
local TYPE=$3
local PIDS=( "$4" )
local NAME_STRING=`echo ${NAME} | cut -f1 -d'/'`
local NAME_NUMBER=`echo ${NAME} | cut -f2 -d'/'`
if [ "${NAME_STRING}" == "${TYPE}" ]; then
if [ -n "${NAME_NUMBER}" ]; then
PIDS[${NAME_NUMBER}]=${PID}
else
PIDS[${#PIDS[@]}]=${PID}
fi
fi
echo "${PIDS[@]}"
}
# NOTE: This whole process is crazy slow but it will do for now
acquire_pids() {
echo "--- Aquiring ZFS pids ---"
for PID in `ls /proc/ | grep [0-9] | sort -n -u`; do
if [ ! -e /proc/${PID}/status ]; then
continue
fi
NAME=`cat /proc/${PID}/status | head -n1 | cut -f2`
ZIO_TASKQ_PIDS=( `check_pid ${PID} ${NAME} "zio_taskq" \
"$(echo "${ZIO_TASKQ_PIDS[@]}")"` )
ZIO_REQ_NUL_PIDS=( `check_pid ${PID} ${NAME} "zio_req_nul" \
"$(echo "${ZIO_REQ_NUL_PIDS[@]}")"` )
ZIO_IRQ_NUL_PIDS=( `check_pid ${PID} ${NAME} "zio_irq_nul" \
"$(echo "${ZIO_IRQ_NUL_PIDS[@]}")"` )
ZIO_REQ_RD_PIDS=( `check_pid ${PID} ${NAME} "zio_req_rd" \
"$(echo "${ZIO_REQ_RD_PIDS[@]}")"` )
ZIO_IRQ_RD_PIDS=( `check_pid ${PID} ${NAME} "zio_irq_rd" \
"$(echo "${ZIO_IRQ_RD_PIDS[@]}")"` )
ZIO_REQ_WR_PIDS=( `check_pid ${PID} ${NAME} "zio_req_wr" \
"$(echo "${ZIO_REQ_WR_PIDS[@]}")"` )
ZIO_IRQ_WR_PIDS=( `check_pid ${PID} ${NAME} "zio_irq_wr" \
"$(echo "${ZIO_IRQ_WR_PIDS[@]}")"` )
ZIO_REQ_FR_PIDS=( `check_pid ${PID} ${NAME} "zio_req_fr" \
"$(echo "${ZIO_REQ_FR_PIDS[@]}")"` )
ZIO_IRQ_FR_PIDS=( `check_pid ${PID} ${NAME} "zio_irq_fr" \
"$(echo "${ZIO_IRQ_FR_PIDS[@]}")"` )
ZIO_REQ_CM_PIDS=( `check_pid ${PID} ${NAME} "zio_req_cm" \
"$(echo "${ZIO_REQ_CM_PIDS[@]}")"` )
ZIO_IRQ_CM_PIDS=( `check_pid ${PID} ${NAME} "zio_irq_cm" \
"$(echo "${ZIO_IRQ_CM_PIDS[@]}")"` )
ZIO_REQ_CTL_PIDS=( `check_pid ${PID} ${NAME} "zio_req_ctl" \
"$(echo "${ZIO_REQ_CTL_PIDS[@]}")"` )
ZIO_IRQ_CTL_PIDS=( `check_pid ${PID} ${NAME} "zio_irq_ctl" \
"$(echo "${ZIO_IRQ_CTL_PIDS[@]}")"` )
TXG_QUIESCE_PIDS=( `check_pid ${PID} ${NAME} "txg_quiesce" \
"$(echo "${TXG_QUIESCE_PIDS[@]}")"` )
TXG_SYNC_PIDS=( `check_pid ${PID} ${NAME} "txg_sync" \
"$(echo "${TXG_SYNC_PIDS[@]}")"` )
TXG_TIMELIMIT_PIDS=( `check_pid ${PID} ${NAME} "txg_timelimit" \
"$(echo "${TXG_TIMELIMIT_PIDS[@]}")"` )
ARC_RECLAIM_PIDS=( `check_pid ${PID} ${NAME} "arc_reclaim" \
"$(echo "${ARC_RECLAIM_PIDS[@]}")"` )
L2ARC_FEED_PIDS=( `check_pid ${PID} ${NAME} "l2arc_feed" \
"$(echo "${L2ARC_FEED_PIDS[@]}")"` )
done
# Wait for zpios_io threads to start
kill -s SIGHUP ${PPID}
echo "* Waiting for zpios_io threads to start"
while [ ${RUN_DONE} -eq 0 ]; do
ZPIOS_IO_PIDS=( `ps ax | grep zpios_io | grep -v grep | \
sed 's/^ *//g' | cut -f1 -d' '` )
if [ ${#ZPIOS_IO_PIDS[@]} -gt 0 ]; then
break;
fi
sleep 0.1
done
echo "`show_pids`" >${RUN_LOG_DIR}/${RUN_ID}/pids.txt
}
log_pids() {
echo "--- Logging ZFS profile to ${RUN_LOG_DIR}/${RUN_ID}/ ---"
ALL_PIDS=( ${ZIO_TASKQ_PIDS[@]} \
${ZIO_REQ_NUL_PIDS[@]} \
${ZIO_IRQ_NUL_PIDS[@]} \
${ZIO_REQ_RD_PID[@]} \
${ZIO_IRQ_RD_PIDS[@]} \
${ZIO_REQ_WR_PIDS[@]} \
${ZIO_IRQ_WR_PIDS[@]} \
${ZIO_REQ_FR_PIDS[@]} \
${ZIO_IRQ_FR_PIDS[@]} \
${ZIO_REQ_CM_PIDS[@]} \
${ZIO_IRQ_CM_PIDS[@]} \
${ZIO_REQ_CTL_PIDS[@]} \
${ZIO_IRQ_CTL_PIDS[@]} \
${TXG_QUIESCE_PIDS[@]} \
${TXG_SYNC_PIDS[@]} \
${TXG_TIMELIMIT_PIDS[@]} \
${ARC_RECLAIM_PIDS[@]} \
${L2ARC_FEED_PIDS[@]} \
${ZPIOS_IO_PIDS[@]} )
while [ ${RUN_DONE} -eq 0 ]; do
NOW=`date +%s.%N`
LOG_PIDS="${RUN_LOG_DIR}/${RUN_ID}/pids-${NOW}"
LOG_DISK="${RUN_LOG_DIR}/${RUN_ID}/disk-${NOW}"
for PID in "${ALL_PIDS[@]}"; do
if [ -z ${PID} ]; then
continue;
fi
if [ -e /proc/${PID}/stat ]; then
cat /proc/${PID}/stat | head -n1 >>${LOG_PIDS}
else
echo "<${PID} exited>" >>${LOG_PIDS}
fi
done
cat /proc/diskstats >${LOG_DISK}
NOW2=`date +%s.%N`
DELTA=`echo "${POLL_INTERVAL}-(${NOW2}-${NOW})" | bc`
sleep ${DELTA}
done
}
acquire_pids
log_pids
# rm ${PROFILE_PID}
exit 0

View File

@ -1,171 +0,0 @@
#!/bin/bash
#
# ZFS/ZPOOL configuration test script.
basedir="$(dirname $0)"
SCRIPT_COMMON=common.sh
if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
. "${basedir}/${SCRIPT_COMMON}"
else
echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
fi
PROG=zpios-sanity.sh
HEADER=
FAILS=0
usage() {
cat << EOF
USAGE:
$0 [hvxfc]
DESCRIPTION:
ZPIOS sanity tests
OPTIONS:
-h Show this message
-v Verbose
-x Destructive hd/sd/md/dm/ram tests
-f Don't prompt due to -x
-c Cleanup lo+file devices at start
EOF
}
while getopts 'hvxfc?' OPTION; do
case $OPTION in
h)
usage
exit 1
;;
v)
VERBOSE=1
;;
x)
DANGEROUS=1
;;
f)
FORCE=1
;;
c)
CLEANUP=1
;;
?)
usage
exit
;;
esac
done
if [ $(id -u) != 0 ]; then
die "Must run as root"
fi
# Initialize the test suite
init
# Perform pre-cleanup is requested
if [ ${CLEANUP} ]; then
${ZFS_SH} -u
cleanup_md_devices
cleanup_loop_devices
rm -f /tmp/zpool.cache.*
fi
zpios_test() {
CONFIG=$1
TEST=$2
LOG=`mktemp`
${ZPIOS_SH} -f -c ${CONFIG} -t ${TEST} &>${LOG}
if [ $? -ne 0 ]; then
FAILS=1
if [ ${VERBOSE} ]; then
printf "FAIL: %-13s\n" ${CONFIG}
cat ${LOG}
else
if [ ! ${HEADER} ]; then
head -2 ${LOG}
HEADER=1
fi
printf "FAIL: %-13s" ${CONFIG}
tail -1 ${LOG}
fi
else
if [ ${VERBOSE} ]; then
cat ${LOG}
else
if [ ! ${HEADER} ]; then
head -2 ${LOG}
HEADER=1
fi
tail -1 ${LOG}
fi
fi
rm -f ${LOG}
}
if [ ${DANGEROUS} ] && [ ! ${FORCE} ]; then
cat << EOF
The -x option was passed which will result in UNRECOVERABLE DATA LOSS
on on the following block devices:
/dev/sd[abcd]
/dev/hda
/dev/ram0
/dev/md0
/dev/dm-0
To continue please confirm by entering YES:
EOF
read CONFIRM
if [ ${CONFIRM} != "YES" ] && [ ${CONFIRM} != "yes" ]; then
exit 0;
fi
fi
#
# These configurations are all safe and pose no risk to any data on
# the system which runs them. They will confine all their IO to a
# file in /tmp or a loopback device configured to use a file in /tmp.
#
SAFE_CONFIGS=( \
file-raid0 file-raid10 file-raidz file-raidz2 \
lo-raid0 lo-raid10 lo-raidz lo-raidz2 \
)
#
# These configurations are down right dangerous. They will attempt
# to use various real block devices on your system which may contain
# data you car about. You are STRONGLY advised not to run this unless
# you are certain there is no data on the system you care about.
#
DANGEROUS_CONFIGS=( \
hda-raid0 \
sda-raid0 \
ram0-raid0 \
md0-raid10 md0-raid5 \
dm0-raid0 \
)
TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || die "Unable to load modules"
for CONFIG in ${SAFE_CONFIGS[*]}; do
zpios_test $CONFIG tiny
done
if [ ${DANGEROUS} ]; then
for CONFIG in ${DANGEROUS_CONFIGS[*]}; do
zpios_test $CONFIG tiny
done
fi
${ZFS_SH} -u
exit $FAILS

View File

@ -1,187 +0,0 @@
#!/bin/bash
#
# Wrapper script for easily running a survey of zpios based tests
#
basedir="$(dirname $0)"
SCRIPT_COMMON=common.sh
if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
. "${basedir}/${SCRIPT_COMMON}"
else
echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
fi
PROG=zpios-survey.sh
usage() {
cat << EOF
USAGE:
$0 [hvp] <-c config> <-t test>
DESCRIPTION:
Helper script for easy zpios survey benchmarking.
OPTIONS:
-h Show this message
-v Verbose
-p Enable profiling
-c Zpool configuration
-t Zpios test
-l Zpios survey log
EOF
}
print_header() {
tee -a ${ZPIOS_SURVEY_LOG} << EOF
================================================================
Test: $1
EOF
}
# Baseline performance for an out of the box config with no manual tuning.
# Ideally, we want everything to be automatically tuned for your system and
# for this to perform reasonably well.
zpios_survey_base() {
TEST_NAME="${ZPOOL_CONFIG}+${ZPIOS_TEST}+baseline"
print_header ${TEST_NAME}
${ZFS_SH} ${VERBOSE_FLAG} | \
tee -a ${ZPIOS_SURVEY_LOG}
${ZPIOS_SH} ${VERBOSE_FLAG} -c ${ZPOOL_CONFIG} -t ${ZPIOS_TEST} | \
tee -a ${ZPIOS_SURVEY_LOG}
${ZFS_SH} -u ${VERBOSE_FLAG} | \
tee -a ${ZPIOS_SURVEY_LOG}
}
# Disable ZFS's prefetching. For some reason still not clear to me
# current prefetching policy is quite bad for a random workload.
# Allowing the algorithm to detect a random workload and not do
# anything may be the way to address this issue.
zpios_survey_prefetch() {
TEST_NAME="${ZPOOL_CONFIG}+${ZPIOS_TEST}+prefetch"
print_header ${TEST_NAME}
${ZFS_SH} ${VERBOSE_FLAG} \
tee -a ${ZPIOS_SURVEY_LOG}
${ZPIOS_SH} ${VERBOSE_FLAG} -c ${ZPOOL_CONFIG} -t ${ZPIOS_TEST} \
-o "--noprefetch" | \
tee -a ${ZPIOS_SURVEY_LOG}
${ZFS_SH} -u ${VERBOSE_FLAG} | \
tee -a ${ZPIOS_SURVEY_LOG}
}
# Simulating a zerocopy IO path should improve performance by freeing up
# lots of CPU which is wasted move data between buffers.
zpios_survey_zerocopy() {
TEST_NAME="${ZPOOL_CONFIG}+${ZPIOS_TEST}+zerocopy"
print_header ${TEST_NAME}
${ZFS_SH} ${VERBOSE_FLAG} | \
tee -a ${ZPIOS_SURVEY_LOG}
${ZPIOS_SH} ${VERBOSE_FLAG} -c ${ZPOOL_CONFIG} -t ${ZPIOS_TEST} \
-o "--zerocopy" | \
tee -a ${ZPIOS_SURVEY_LOG}
${ZFS_SH} -u ${VERBOSE_FLAG} | \
tee -a ${ZPIOS_SURVEY_LOG}
}
# Disabling checksumming should show some (if small) improvement
# simply due to freeing up a modest amount of CPU.
zpios_survey_checksum() {
TEST_NAME="${ZPOOL_CONFIG}+${ZPIOS_TEST}+checksum"
print_header ${TEST_NAME}
${ZFS_SH} ${VERBOSE_FLAG} | \
tee -a ${ZPIOS_SURVEY_LOG}
${ZPIOS_SH} ${VERBOSE_FLAG} -c ${ZPOOL_CONFIG} -t ${ZPIOS_TEST} \
-s "set checksum=off" | \
tee -a ${ZPIOS_SURVEY_LOG}
${ZFS_SH} -u ${VERBOSE_FLAG} | \
tee -a ${ZPIOS_SURVEY_LOG}
}
# Increasing the pending IO depth also seems to improve things likely
# at the expense of latency. This should be explored more because I'm
# seeing a much bigger impact there that I would have expected. There
# may be some low hanging fruit to be found here.
zpios_survey_pending() {
TEST_NAME="${ZPOOL_CONFIG}+${ZPIOS_TEST}+pending"
print_header ${TEST_NAME}
${ZFS_SH} ${VERBOSE_FLAG} \
zfs="zfs_vdev_max_pending=1024" | \
tee -a ${ZPIOS_SURVEY_LOG}
${ZPIOS_SH} ${VERBOSE_FLAG} -c ${ZPOOL_CONFIG} -t ${ZPIOS_TEST} | \
tee -a ${ZPIOS_SURVEY_LOG}
${ZFS_SH} -u ${VERBOSE_FLAG} | \
tee -a ${ZPIOS_SURVEY_LOG}
}
# Apply all possible turning concurrently to get a best case number
zpios_survey_all() {
TEST_NAME="${ZPOOL_CONFIG}+${ZPIOS_TEST}+all"
print_header ${TEST_NAME}
${ZFS_SH} ${VERBOSE_FLAG} \
zfs="zfs_vdev_max_pending=1024" | \
tee -a ${ZPIOS_SURVEY_LOG}
${ZPIOS_SH} ${VERBOSE_FLAG} -c ${ZPOOL_CONFIG} -t ${ZPIOS_TEST} \
-o "--noprefetch --zerocopy" \
-s "set checksum=off" | \
tee -a ${ZPIOS_SURVEY_LOG}
${ZFS_SH} -u ${VERBOSE_FLAG} | \
tee -a ${ZPIOS_SURVEY_LOG}
}
PROFILE=
ZPOOL_NAME=zpios-survey
ZPOOL_CONFIG=zpool-config.sh
ZPIOS_TEST=zpios-test.sh
ZPIOS_SURVEY_LOG=/dev/null
while getopts 'hvpc:t:l:' OPTION; do
case $OPTION in
h)
usage
exit 1
;;
v)
VERBOSE=1
VERBOSE_FLAG="-v"
;;
p)
PROFILE=1
PROFILE_FLAG="-p"
;;
c)
ZPOOL_CONFIG=${OPTARG}
;;
t)
ZPIOS_TEST=${OPTARG}
;;
l)
ZPIOS_SURVEY_LOG=${OPTARG}
;;
?)
usage
exit
;;
esac
done
if [ $(id -u) != 0 ]; then
die "Must run as root"
fi
zpios_survey_base
zpios_survey_prefetch
zpios_survey_zerocopy
zpios_survey_checksum
zpios_survey_pending
zpios_survey_all
exit 0

View File

@ -1,65 +0,0 @@
#!/bin/bash
#
# Usage: zpios
# --threadcount -t =values
# --threadcount_low -l =value
# --threadcount_high -h =value
# --threadcount_incr -e =value
# --regioncount -n =values
# --regioncount_low -i =value
# --regioncount_high -j =value
# --regioncount_incr -k =value
# --offset -o =values
# --offset_low -m =value
# --offset_high -q =value
# --offset_incr -r =value
# --chunksize -c =values
# --chunksize_low -a =value
# --chunksize_high -b =value
# --chunksize_incr -g =value
# --regionsize -s =values
# --regionsize_low -A =value
# --regionsize_high -B =value
# --regionsize_incr -C =value
# --load -L =dmuio|ssf|fpp
# --pool -p =pool name
# --name -M =test name
# --cleanup -x
# --prerun -P =pre-command
# --postrun -R =post-command
# --log -G =log directory
# --regionnoise -I =shift
# --chunknoise -N =bytes
# --threaddelay -T =jiffies
# --verify -V
# --zerocopy -z
# --nowait -O
# --human-readable -H
# --verbose -v =increase verbosity
# --help -? =this help
ZPIOS_CMD="${ZPIOS} \
--load=dmuio \
--pool=${ZPOOL_NAME} \
--name=${ZPOOL_CONFIG} \
--threadcount=16 \
--regioncount=8192 \
--regionsize=4M \
--chunksize=1M \
--offset=4M \
--cleanup \
--human-readable \
${ZPIOS_OPTIONS}"
zpios_start() {
if [ ${VERBOSE} ]; then
ZPIOS_CMD="${ZPIOS_CMD} --verbose"
echo ${ZPIOS_CMD}
fi
${ZPIOS_CMD} || exit 1
}
zpios_stop() {
[ ${VERBOSE} ] && echo
}

View File

@ -1,66 +0,0 @@
#!/bin/bash
#
# Usage: zpios
# --threadcount -t =values
# --threadcount_low -l =value
# --threadcount_high -h =value
# --threadcount_incr -e =value
# --regioncount -n =values
# --regioncount_low -i =value
# --regioncount_high -j =value
# --regioncount_incr -k =value
# --offset -o =values
# --offset_low -m =value
# --offset_high -q =value
# --offset_incr -r =value
# --chunksize -c =values
# --chunksize_low -a =value
# --chunksize_high -b =value
# --chunksize_incr -g =value
# --regionsize -s =values
# --regionsize_low -A =value
# --regionsize_high -B =value
# --regionsize_incr -C =value
# --load -L =dmuio|ssf|fpp
# --pool -p =pool name
# --name -M =test name
# --cleanup -x
# --prerun -P =pre-command
# --postrun -R =post-command
# --log -G =log directory
# --regionnoise -I =shift
# --chunknoise -N =bytes
# --threaddelay -T =jiffies
# --verify -V
# --zerocopy -z
# --nowait -O
# --human-readable -H
# --verbose -v =increase verbosity
# --help -? =this help
ZPIOS_CMD="${ZPIOS} \
--load=dmuio \
--pool=${ZPOOL_NAME} \
--name=${ZPOOL_CONFIG} \
--threadcount=1 \
--regioncount=16 \
--regionsize=4M \
--chunksize=1M \
--offset=4M \
--cleanup \
--human-readable \
${ZPIOS_OPTIONS}"
zpios_start() {
if [ ${VERBOSE} ]; then
ZPIOS_CMD="${ZPIOS_CMD} --verbose"
echo ${ZPIOS_CMD}
fi
${ZPIOS_CMD} || exit 1
}
zpios_stop() {
[ ${VERBOSE} ] && echo
}

View File

@ -1,65 +0,0 @@
#!/bin/bash
#
# Usage: zpios
# --threadcount -t =values
# --threadcount_low -l =value
# --threadcount_high -h =value
# --threadcount_incr -e =value
# --regioncount -n =values
# --regioncount_low -i =value
# --regioncount_high -j =value
# --regioncount_incr -k =value
# --offset -o =values
# --offset_low -m =value
# --offset_high -q =value
# --offset_incr -r =value
# --chunksize -c =values
# --chunksize_low -a =value
# --chunksize_high -b =value
# --chunksize_incr -g =value
# --regionsize -s =values
# --regionsize_low -A =value
# --regionsize_high -B =value
# --regionsize_incr -C =value
# --load -L =dmuio|ssf|fpp
# --pool -p =pool name
# --name -M =test name
# --cleanup -x
# --prerun -P =pre-command
# --postrun -R =post-command
# --log -G =log directory
# --regionnoise -I =shift
# --chunknoise -N =bytes
# --threaddelay -T =jiffies
# --verify -V
# --zerocopy -z
# --nowait -O
# --human-readable -H
# --verbose -v =increase verbosity
# --help -? =this help
ZPIOS_CMD="${ZPIOS} \
--load=dmuio \
--pool=${ZPOOL_NAME} \
--name=${ZPOOL_CONFIG} \
--threadcount=1,2,4,8,16,32,64,128,256 \
--regioncount=65536 \
--regionsize=4M \
--chunksize=1M \
--offset=4M \
--cleanup \
--human-readable \
${ZPIOS_OPTIONS}"
zpios_start() {
if [ ${VERBOSE} ]; then
ZPIOS_CMD="${ZPIOS_CMD} --verbose"
echo ${ZPIOS_CMD}
fi
${ZPIOS_CMD} || exit 1
}
zpios_stop() {
[ ${VERBOSE} ] && echo
}

View File

@ -1,65 +0,0 @@
#!/bin/bash
#
# Usage: zpios
# --threadcount -t =values
# --threadcount_low -l =value
# --threadcount_high -h =value
# --threadcount_incr -e =value
# --regioncount -n =values
# --regioncount_low -i =value
# --regioncount_high -j =value
# --regioncount_incr -k =value
# --offset -o =values
# --offset_low -m =value
# --offset_high -q =value
# --offset_incr -r =value
# --chunksize -c =values
# --chunksize_low -a =value
# --chunksize_high -b =value
# --chunksize_incr -g =value
# --regionsize -s =values
# --regionsize_low -A =value
# --regionsize_high -B =value
# --regionsize_incr -C =value
# --load -L =dmuio|ssf|fpp
# --pool -p =pool name
# --name -M =test name
# --cleanup -x
# --prerun -P =pre-command
# --postrun -R =post-command
# --log -G =log directory
# --regionnoise -I =shift
# --chunknoise -N =bytes
# --threaddelay -T =jiffies
# --verify -V
# --zerocopy -z
# --nowait -O
# --human-readable -H
# --verbose -v =increase verbosity
# --help -? =this help
ZPIOS_CMD="${ZPIOS} \
--load=dmuio \
--pool=${ZPOOL_NAME} \
--name=${ZPOOL_CONFIG} \
--threadcount=256 \
--regioncount=65536 \
--regionsize=4M \
--chunksize=1M \
--offset=4M \
--cleanup \
--human-readable \
${ZPIOS_OPTIONS}"
zpios_start() {
if [ ${VERBOSE} ]; then
ZPIOS_CMD="${ZPIOS_CMD} --verbose"
echo ${ZPIOS_CMD}
fi
${ZPIOS_CMD} || exit 1
}
zpios_stop() {
[ ${VERBOSE} ] && echo
}

View File

@ -1,65 +0,0 @@
#!/bin/bash
#
# Usage: zpios
# --threadcount -t =values
# --threadcount_low -l =value
# --threadcount_high -h =value
# --threadcount_incr -e =value
# --regioncount -n =values
# --regioncount_low -i =value
# --regioncount_high -j =value
# --regioncount_incr -k =value
# --offset -o =values
# --offset_low -m =value
# --offset_high -q =value
# --offset_incr -r =value
# --chunksize -c =values
# --chunksize_low -a =value
# --chunksize_high -b =value
# --chunksize_incr -g =value
# --regionsize -s =values
# --regionsize_low -A =value
# --regionsize_high -B =value
# --regionsize_incr -C =value
# --load -L =dmuio|ssf|fpp
# --pool -p =pool name
# --name -M =test name
# --cleanup -x
# --prerun -P =pre-command
# --postrun -R =post-command
# --log -G =log directory
# --regionnoise -I =shift
# --chunknoise -N =bytes
# --threaddelay -T =jiffies
# --verify -V
# --zerocopy -z
# --nowait -O
# --human-readable -H
# --verbose -v =increase verbosity
# --help -? =this help
ZPIOS_CMD="${ZPIOS} \
--load=dmuio \
--pool=${ZPOOL_NAME} \
--name=${ZPOOL_CONFIG} \
--threadcount=4 \
--regioncount=1024 \
--regionsize=4M \
--chunksize=1M \
--offset=4M \
--cleanup \
--human-readable \
${ZPIOS_OPTIONS}"
zpios_start() {
if [ ${VERBOSE} ]; then
ZPIOS_CMD="${ZPIOS_CMD} --verbose"
echo ${ZPIOS_CMD}
fi
${ZPIOS_CMD} || exit 1
}
zpios_stop() {
[ ${VERBOSE} ] && echo
}

View File

@ -1,13 +0,0 @@
pkgdatatestdir = $(pkgdatadir)/zpios-test
dist_pkgdatatest_SCRIPTS = \
$(top_srcdir)/scripts/zpios-test/16th-8192rc-4rs-1cs-4off.sh \
$(top_srcdir)/scripts/zpios-test/1th-16rc-4rs-1cs-4off.sh \
$(top_srcdir)/scripts/zpios-test/1x256th-65536rc-4rs-1cs-4off.sh \
$(top_srcdir)/scripts/zpios-test/256th-65536rc-4rs-1cs-4off.sh \
$(top_srcdir)/scripts/zpios-test/4th-1024rc-4rs-1cs-4off.sh \
$(top_srcdir)/scripts/zpios-test/large.sh \
$(top_srcdir)/scripts/zpios-test/large-thread-survey.sh \
$(top_srcdir)/scripts/zpios-test/medium.sh \
$(top_srcdir)/scripts/zpios-test/small.sh \
$(top_srcdir)/scripts/zpios-test/tiny.sh \
$(top_srcdir)/scripts/zpios-test/lustre.sh

View File

@ -1 +0,0 @@
1x256th-65536rc-4rs-1cs-4off.sh

View File

@ -1 +0,0 @@
256th-65536rc-4rs-1cs-4off.sh

View File

@ -1,66 +0,0 @@
#!/bin/bash
#
# Usage: zpios
# --threadcount -t =values
# --threadcount_low -l =value
# --threadcount_high -h =value
# --threadcount_incr -e =value
# --regioncount -n =values
# --regioncount_low -i =value
# --regioncount_high -j =value
# --regioncount_incr -k =value
# --offset -o =values
# --offset_low -m =value
# --offset_high -q =value
# --offset_incr -r =value
# --chunksize -c =values
# --chunksize_low -a =value
# --chunksize_high -b =value
# --chunksize_incr -g =value
# --regionsize -s =values
# --regionsize_low -A =value
# --regionsize_high -B =value
# --regionsize_incr -C =value
# --load -L =dmuio|ssf|fpp
# --pool -p =pool name
# --name -M =test name
# --cleanup -x
# --prerun -P =pre-command
# --postrun -R =post-command
# --log -G =log directory
# --regionnoise -I =shift
# --chunknoise -N =bytes
# --threaddelay -T =jiffies
# --verify -V
# --zerocopy -z
# --nowait -O
# --human-readable -H
# --verbose -v =increase verbosity
# --help -? =this help
ZPIOS_CMD="${ZPIOS} \
--load=dmuio,fpp \
--pool=${ZPOOL_NAME} \
--name=${ZPOOL_DESC} \
--threadcount=128 \
--regioncount=4096 \
--regionsize=16M \
--chunksize=1M \
--offset=0M \
--threaddelay=0 \
--cleanup \
--human-readable \
${ZPIOS_OPTIONS}"
zpios_start() {
if [ ${VERBOSE} ]; then
ZPIOS_CMD="${ZPIOS_CMD} --verbose"
echo ${ZPIOS_CMD}
fi
${ZPIOS_CMD} || exit 1
}
zpios_stop() {
[ ${VERBOSE} ] && echo
}

View File

@ -1 +0,0 @@
16th-8192rc-4rs-1cs-4off.sh

View File

@ -1 +0,0 @@
4th-1024rc-4rs-1cs-4off.sh

View File

@ -1 +0,0 @@
1th-16rc-4rs-1cs-4off.sh

View File

@ -1,281 +0,0 @@
#!/bin/bash
#
# Wrapper script for easily running zpios based tests
#
basedir="$(dirname $0)"
SCRIPT_COMMON=common.sh
if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
. "${basedir}/${SCRIPT_COMMON}"
else
echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
fi
PROG=zpios.sh
DATE=`date +%Y%m%d-%H%M%S`
if [ "${ZPIOS_MODULES}" ]; then
MODULES=(${ZPIOS_MODULES[*]})
else
MODULES=(zpios)
fi
usage() {
cat << EOF
USAGE:
$0 [hvp] <-c config> <-t test>
DESCRIPTION:
Helper script for easy zpios benchmarking.
OPTIONS:
-h Show this message
-v Verbose
-f Force everything
-p Enable profiling
-c Zpool configuration
-t Zpios test
-o Additional zpios options
-l Additional zpool options
-s Additional zfs options
EOF
}
unload_die() {
unload_modules
while [ -c /dev/zpios ]; do
sleep 1
done
exit 1
}
print_header() {
echo --------------------- ZPIOS RESULTS ----------------------------
echo -n "Date: "; date
echo -n "Kernel: "; uname -r
dmesg | grep "Loaded Solaris Porting Layer" | tail -n1
dmesg | grep "Loaded ZFS Filesystem" | tail -n1
echo
}
print_spl_info() {
echo --------------------- SPL Tunings ------------------------------
${SYSCTL} -A | grep spl
if [ -d /sys/module/spl/parameters ]; then
grep [0-9] /sys/module/spl/parameters/*
else
grep [0-9] /sys/module/spl/*
fi
echo
}
print_zfs_info() {
echo --------------------- ZFS Tunings ------------------------------
${SYSCTL} -A | grep zfs
if [ -d /sys/module/zfs/parameters ]; then
grep [0-9] /sys/module/zfs/parameters/*
else
grep [0-9] /sys/module/zfs/*
fi
echo
}
print_stats() {
echo ---------------------- Statistics -------------------------------
${SYSCTL} -A | grep spl | grep stack_max
if [ -d /proc/spl/kstat/ ]; then
if [ -f /proc/spl/kstat/zfs/arcstats ]; then
echo "* ARC"
cat /proc/spl/kstat/zfs/arcstats
echo
fi
if [ -f /proc/spl/kstat/zfs/vdev_cache_stats ]; then
echo "* VDEV Cache"
cat /proc/spl/kstat/zfs/vdev_cache_stats
echo
fi
fi
if [ -f /proc/spl/kmem/slab ]; then
echo "* SPL SLAB"
cat /proc/spl/kmem/slab
echo
fi
echo
}
check_test() {
if [ ! -f ${ZPIOS_TEST} ]; then
local NAME=`basename ${ZPIOS_TEST} .sh`
ERROR="Unknown test '${NAME}', available tests are:\n"
for TST in `ls ${ZPIOSDIR}/ | grep ".sh"`; do
local NAME=`basename ${TST} .sh`
ERROR="${ERROR}${NAME}\n"
done
return 1
fi
return 0
}
zpios_profile_config() {
cat > ${PROFILE_DIR}/zpios-config.sh << EOF
#
# Zpios Profiling Configuration
#
PROFILE_DIR=/tmp/zpios/${ZPOOL_CONFIG}+${ZPIOS_TEST_ARG}+${DATE}
PROFILE_PRE=${ZPIOSPROFILEDIR}/zpios-profile-pre.sh
PROFILE_POST=${ZPIOSPROFILEDIR}/zpios-profile-post.sh
PROFILE_USER=${ZPIOSPROFILEDIR}/zpios-profile.sh
PROFILE_PIDS=${ZPIOSPROFILEDIR}/zpios-profile-pids.sh
PROFILE_DISK=${ZPIOSPROFILEDIR}/zpios-profile-disk.sh
PROFILE_ARC_PROC=/proc/spl/kstat/zfs/arcstats
PROFILE_VDEV_CACHE_PROC=/proc/spl/kstat/zfs/vdev_cache_stats
OPROFILE_KERNEL="/boot/vmlinux-`uname -r`"
OPROFILE_KERNEL_DIR="/lib/modules/`uname -r`/kernel/"
OPROFILE_SPL_DIR=${SPLBUILD}/module/
OPROFILE_ZFS_DIR=${MODDIR}
EOF
}
zpios_profile_start() {
PROFILE_DIR=/tmp/zpios/${ZPOOL_CONFIG}+${ZPIOS_TEST_ARG}+${DATE}
mkdir -p ${PROFILE_DIR}
zpios_profile_config
. ${PROFILE_DIR}/zpios-config.sh
ZPIOS_OPTIONS="${ZPIOS_OPTIONS} --log=${PROFILE_DIR}"
ZPIOS_OPTIONS="${ZPIOS_OPTIONS} --prerun=${PROFILE_PRE}"
ZPIOS_OPTIONS="${ZPIOS_OPTIONS} --postrun=${PROFILE_POST}"
/usr/bin/opcontrol --init
/usr/bin/opcontrol --setup --vmlinux=${OPROFILE_KERNEL}
}
zpios_profile_stop() {
/usr/bin/opcontrol --shutdown
/usr/bin/opcontrol --deinit
}
PROFILE=
ZPOOL_CONFIG=zpool-config.sh
ZPIOS_TEST=zpios-test.sh
ZPOOL_NAME=zpios
ZPIOS_OPTIONS=
ZPOOL_OPTIONS=""
ZFS_OPTIONS=""
while getopts 'hvfpc:t:o:l:s:' OPTION; do
case $OPTION in
h)
usage
exit 1
;;
v)
VERBOSE=1
VERBOSE_FLAG="-v"
;;
f)
FORCE=1
FORCE_FLAG="-f"
;;
p)
PROFILE=1
;;
c)
ZPOOL_CONFIG=${OPTARG}
;;
t)
ZPIOS_TEST_ARG=${OPTARG}
ZPIOS_TEST=${ZPIOSDIR}/${OPTARG}.sh
;;
o)
ZPIOS_OPTIONS=${OPTARG}
;;
l) # Passed through to zpool-create.sh
ZPOOL_OPTIONS=${OPTARG}
;;
s) # Passed through to zpool-create.sh
ZFS_OPTIONS=${OPTARG}
;;
?)
usage
exit
;;
esac
done
if [ $(id -u) != 0 ]; then
die "Must run as root"
fi
# Validate and source your test config
check_test || die "${ERROR}"
. ${ZPIOS_TEST}
# Pull in the zpios test module if not loaded. If this fails, it is
# likely because the full module stack was not yet loaded with zfs.sh
if check_modules; then
if ! load_modules; then
die "Run 'zfs.sh' to ensure the full module stack is loaded"
fi
fi
# Wait for device creation
while [ ! -c /dev/zpios ]; do
sleep 1
done
if [ ${VERBOSE} ]; then
print_header
print_spl_info
print_zfs_info
fi
# Create the zpool configuration
${ZPOOL_CREATE_SH} ${VERBOSE_FLAG} ${FORCE_FLAG} \
-p ${ZPOOL_NAME} -c ${ZPOOL_CONFIG} \
-l "${ZPOOL_OPTIONS}" -s "${ZFS_OPTIONS}" || unload_die
if [ ${PROFILE} ]; then
zpios_profile_start
fi
zpios_start
zpios_stop
if [ ${PROFILE} ]; then
zpios_profile_stop
fi
if [ ${VERBOSE} ]; then
print_stats
fi
# Destroy the zpool configuration
${ZPOOL_CREATE_SH} ${VERBOSE_FLAG} ${FORCE_FLAG} \
-p ${ZPOOL_NAME} -c ${ZPOOL_CONFIG} -d || unload_die
# Unload the test module stack and wait for device removal
unload_modules
while [ -c /dev/zpios ]; do
sleep 1
done
exit 0

View File

@ -1,30 +0,0 @@
pkgdataconfigdir = $(pkgdatadir)/zpool-config
dist_pkgdataconfig_SCRIPTS = \
$(top_srcdir)/scripts/zpool-config/dm0-raid0.sh \
$(top_srcdir)/scripts/zpool-config/file-raid0.sh \
$(top_srcdir)/scripts/zpool-config/file-raid10.sh \
$(top_srcdir)/scripts/zpool-config/file-raidz2.sh \
$(top_srcdir)/scripts/zpool-config/file-raidz.sh \
$(top_srcdir)/scripts/zpool-config/hda-raid0.sh \
$(top_srcdir)/scripts/zpool-config/lo-raid0.sh \
$(top_srcdir)/scripts/zpool-config/lo-raid10.sh \
$(top_srcdir)/scripts/zpool-config/lo-raidz2.sh \
$(top_srcdir)/scripts/zpool-config/lo-raidz.sh \
$(top_srcdir)/scripts/zpool-config/lo-faulty-raid0.sh \
$(top_srcdir)/scripts/zpool-config/lo-faulty-raid10.sh \
$(top_srcdir)/scripts/zpool-config/lo-faulty-raidz.sh \
$(top_srcdir)/scripts/zpool-config/lo-faulty-raidz2.sh \
$(top_srcdir)/scripts/zpool-config/lo-faulty-raidz3.sh \
$(top_srcdir)/scripts/zpool-config/md0-raid10.sh \
$(top_srcdir)/scripts/zpool-config/md0-raid5.sh \
$(top_srcdir)/scripts/zpool-config/ram0-raid0.sh \
$(top_srcdir)/scripts/zpool-config/scsi_debug-noraid.sh \
$(top_srcdir)/scripts/zpool-config/scsi_debug-raid0.sh \
$(top_srcdir)/scripts/zpool-config/scsi_debug-raid10.sh \
$(top_srcdir)/scripts/zpool-config/scsi_debug-raidz.sh \
$(top_srcdir)/scripts/zpool-config/scsi_debug-raidz2.sh \
$(top_srcdir)/scripts/zpool-config/scsi_debug-raidz3.sh \
$(top_srcdir)/scripts/zpool-config/sda-raid0.sh \
$(top_srcdir)/scripts/zpool-config/zpool-raid0.sh \
$(top_srcdir)/scripts/zpool-config/zpool-raid10.sh \
$(top_srcdir)/scripts/zpool-config/zpool-raidz.sh

View File

@ -1,60 +0,0 @@
#!/bin/bash
#
# Four disk Raid-0 DM in a single Raid-0 Configuration
#
PVCREATE=${PVCREATE:-/sbin/pvcreate}
PVREMOVE=${PVREMOVE:-/sbin/pvremove}
PVDEVICES=${PVDEVICES:-"/dev/sd[abcd]"}
VGCREATE=${VGCREATE:-/sbin/vgcreate}
VGREMOVE=${VGREMOVE:-/sbin/vgremove}
VGNAME=${VGNAME:-"vg_tank"}
LVCREATE=${LVCREATE:-/sbin/lvcreate}
LVREMOVE=${LVREMOVE:-/sbin/lvremove}
LVNAME=${LVNAME:-"lv_tank"}
LVSTRIPES=${LVSTRIPES:-4}
LVSIZE=${LVSIZE:-32G}
DEVICES="/dev/${VGNAME}/${LVNAME}"
zpool_dm_destroy() {
msg ${LVREMOVE} -f ${VGNAME}/${LVNAME}
${LVREMOVE} -f ${VGNAME}/${LVNAME} >/dev/null
msg ${VGREMOVE} -f ${VGNAME}
${VGREMOVE} -f ${VGNAME} >/dev/null
msg ${PVREMOVE} ${PVDEVICES}
${PVREMOVE} ${PVDEVICES} >/dev/null
}
zpool_create() {
# Remove EFI labels which cause pvcreate failure
for DEVICE in ${PVDEVICES}; do
dd if=/dev/urandom of=${DEVICE} bs=1k count=32 &>/dev/null
done
msg ${PVCREATE} -f ${PVDEVICES}
${PVCREATE} -f ${PVDEVICES} >/dev/null || exit 1
msg ${VGCREATE} ${VGNAME} ${PVDEVICES}
${VGCREATE} ${VGNAME} ${PVDEVICES} >/dev/null || exit 2
msg ${LVCREATE} --size=${LVSIZE} --stripes=${LVSTRIPES} \
--name=${LVNAME} ${VGNAME}
${LVCREATE} --size=${LVSIZE} --stripes=${LVSTRIPES} \
--name=${LVNAME} ${VGNAME} >/dev/null || exit 3
msg ${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${DEVICES}
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} \
${DEVICES} || (zpool_dm_destroy && exit 4)
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
zpool_dm_destroy
}

View File

@ -1,30 +0,0 @@
#!/bin/bash
#
# 4 File Raid-0 Configuration
#
FILEDIR=${FILEDIR:-/var/tmp}
FILES=${FILES:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1 \
$FILEDIR/file-vdev2 $FILEDIR/file-vdev3"}
zpool_create() {
for FILE in ${FILES}; do
msg "Creating ${FILE}"
rm -f ${FILE} || exit 1
dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
&>/dev/null || die "Error $? creating ${FILE}"
done
msg ${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${FILES}
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${FILES} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
for FILE in ${FILES}; do
msg "Removing ${FILE}"
rm -f ${FILE} || exit 1
done
}

View File

@ -1,33 +0,0 @@
#!/bin/bash
#
# 4 File Raid-10 Configuration
#
FILEDIR=${FILEDIR:-/var/tmp}
FILES_M1=${FILES_M1:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1"}
FILES_M2=${FILES_M2:-"$FILEDIR/file-vdev2 $FILEDIR/file-vdev3"}
FILES="${FILES_M1} ${FILES_M2}"
zpool_create() {
for FILE in ${FILES}; do
msg "Creating ${FILE}"
rm -f ${FILE} || exit 1
dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
&>/dev/null || die "Error $? creating ${FILE}"
done
msg ${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} \
mirror ${FILES_M1} mirror ${FILES_M2}
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} \
mirror ${FILES_M1} mirror ${FILES_M2} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
for FILE in ${FILES}; do
msg "Removing ${FILE}"
rm -f ${FILE} || exit 1
done
}

View File

@ -1,30 +0,0 @@
#!/bin/bash
#
# 4 File Raid-Z Configuration
#
FILEDIR=${FILEDIR:-/var/tmp}
FILES=${FILES:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1 \
$FILEDIR/file-vdev2 $FILEDIR/file-vdev3"}
zpool_create() {
for FILE in ${FILES}; do
msg "Creating ${FILE}"
rm -f ${FILE} || exit 1
dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
&>/dev/null || die "Error $? creating ${FILE}"
done
msg ${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} raidz ${FILES}
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} raidz ${FILES} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
for FILE in ${FILES}; do
msg "Removing ${FILE}"
rm -f ${FILE} || exit 1
done
}

View File

@ -1,30 +0,0 @@
#!/bin/bash
#
# 4 File Raid-Z2 Configuration
#
FILEDIR=${FILEDIR:-/var/tmp}
FILES=${FILES:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1 \
$FILEDIR/file-vdev2 $FILEDIR/file-vdev3"}
zpool_create() {
for FILE in ${FILES}; do
msg "Creating ${FILE}"
rm -f ${FILE} || exit 1
dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
&>/dev/null || die "Error $? creating ${FILE}"
done
msg ${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} raidz2 ${FILES}
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} raidz2 ${FILES} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
for FILE in ${FILES}; do
msg "Removing ${FILE}"
rm -f ${FILE} || exit 1
done
}

View File

@ -1,16 +0,0 @@
#!/bin/bash
#
# Single disk /dev/hda Raid-0 Configuration
#
DEVICES="/dev/hda"
zpool_create() {
msg ${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${DEVICES}
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${DEVICES} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME} || exit 1
}

View File

@ -1,77 +0,0 @@
#!/bin/bash
#
# 4 loopback devices using the md faulty level for easy
# fault injection on top of which is layered raid0 (striped).
#
# zpool-vdev0 zpool-vdev1 zpool-vdev2 zpool-vdev3
# loop0 loop1 loop2 loop3
# md0 (faulty) md1 (faulty) md2 (faulty) md3 (faulty)
# <--------------------- raid0 zpool --------------------->
#
FILEDIR=${FILEDIR:-/var/tmp}
FILES=${FILES:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1 \
$FILEDIR/file-vdev2 $FILEDIR/file-vdev3"}
LODEVICES=""
MDDEVICES=""
zpool_create() {
check_loop_utils
check_md_utils
check_md_partitionable || die "Error non-partitionable md devices"
for FILE in ${FILES}; do
LODEVICE=`unused_loop_device`
MDDEVICE=`unused_md_device`
rm -f ${FILE} || exit 1
dd if=/dev/zero of=${FILE} bs=1M count=0 seek=256 \
&>/dev/null || die "Error $? creating ${FILE}"
# Setup the loopback device on the file.
msg "Creating ${LODEVICE} using ${FILE}"
${LOSETUP} ${LODEVICE} ${FILE} || \
die "Error $? creating ${LODEVICE} using ${FILE}"
LODEVICES="${LODEVICES} ${LODEVICE}"
# Setup the md device on the loopback device.
msg "Creating ${MDDEVICE} using ${LODEVICE}"
${MDADM} --build ${MDDEVICE} --level=faulty \
--raid-devices=1 ${LODEVICE} &>/dev/null || \
(destroy_md_devices "${MDDEVICES}" && \
destroy_loop_devices "${LODEVICES}" && \
die "Error $? creating ${MDDEVICE} using ${LODEVICE}")
wait_udev ${MDDEVICE} 30 || \
(destroy_md_devices "${MDDEVICES}" && \
destroy_loop_devices "${LODEVICES}" && \
die "Error udev never created ${MDDEVICE}")
# Check if the md device support partitions
${BLOCKDEV} --rereadpt ${MDDEVICE} 2>/dev/null || \
(destroy_md_devices "${MDDEVICES}" && \
destroy_loop_devices "${LODEVICES}" && \
die "Error ${MDDEVICE} does not support partitions")
# Create a GPT/EFI partition table for ZFS to use.
${PARTED} --script ${MDDEVICE} mklabel gpt
MDDEVICES="${MDDEVICES} ${MDDEVICE}"
done
msg ${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${MDDEVICES}
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${MDDEVICES} || \
(destroy_md_devices "${MDDEVICES}" && \
destroy_loop_devices "${LODEVICES}" && exit 1)
echo "$LODEVICES" >/tmp/zpool-lo.txt
echo "$MDDEVICES" >/tmp/zpool-md.txt
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
destroy_md_devices "`cat /tmp/zpool-md.txt`"
destroy_loop_devices "`cat /tmp/zpool-lo.txt`"
rm -f /tmp/zpool-md.txt /tmp/zpool-lo.txt
}

View File

@ -1,84 +0,0 @@
#!/bin/bash
#
# 4 loopback devices using the md faulty level for easy
# fault injection on top of which is layered raid10 (mirrored).
#
# zpool-vdev0 zpool-vdev1 zpool-vdev2 zpool-vdev3
# loop0 loop1 loop2 loop3
# md0 (faulty) md1 (faulty) md2 (faulty) md3 (faulty)
# <--------------------- raid10 zpool --------------------->
#
FILEDIR=${FILEDIR:-/var/tmp}
FILES_M1=${FILES_M1:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1"}
FILES_M2=${FILES_M2:-"$FILEDIR/file-vdev2 $FILEDIR/file-vdev3"}
FILES="${FILES_M1} ${FILES_M2}"
LODEVICES=""
MDDEVICES=""
MDDEVICES_M1=""
MDDEVICES_M2=""
zpool_create() {
local COUNT=0
check_loop_utils
check_md_utils
check_md_partitionable || die "Error non-partitionable md devices"
for FILE in ${FILES}; do
LODEVICE=`unused_loop_device`
MDDEVICE=`unused_md_device`
let COUNT=${COUNT}+1
rm -f ${FILE} || exit 1
dd if=/dev/zero of=${FILE} bs=1M count=0 seek=256 \
&>/dev/null || die "Error $? creating ${FILE}"
# Setup the loopback device on the file.
msg "Creating ${LODEVICE} using ${FILE}"
${LOSETUP} ${LODEVICE} ${FILE} || \
die "Error $? creating ${LODEVICE} using ${FILE}"
LODEVICES="${LODEVICES} ${LODEVICE}"
# Setup the md device on the loopback device.
msg "Creating ${MDDEVICE} using ${LODEVICE}"
${MDADM} --build ${MDDEVICE} --level=faulty \
--raid-devices=1 ${LODEVICE} &>/dev/null || \
(destroy_md_devices "${MDDEVICES}" && \
destroy_loop_devices "${LODEVICES}" && \
die "Error $? creating ${MDDEVICE} using ${LODEVICE}")
wait_udev ${MDDEVICE} 30 || \
(destroy_md_devices "${MDDEVICES}" && \
destroy_loop_devices "${LODEVICES}" && \
die "Error udev never created ${MDDEVICE}")
# Create empty GPT/EFI partition table.
${PARTED} --script ${MDDEVICE} mklabel gpt
MDDEVICES="${MDDEVICES} ${MDDEVICE}"
if [ $((COUNT % 2)) -eq 0 ]; then
MDDEVICES_M2="${MDDEVICES_M2} ${MDDEVICE}"
else
MDDEVICES_M1="${MDDEVICES_M1} ${MDDEVICE}"
fi
done
msg ${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} \
mirror ${MDDEVICES_M1} mirror ${MDDEVICES_M2}
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} \
mirror ${MDDEVICES_M1} mirror ${MDDEVICES_M2} || \
(destroy_md_devices "${MDDEVICES}" && \
destroy_loop_devices "${LODEVICES}" && exit 1)
echo "$LODEVICES" >/tmp/zpool-lo.txt
echo "$MDDEVICES" >/tmp/zpool-md.txt
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
destroy_md_devices "`cat /tmp/zpool-md.txt`"
destroy_loop_devices "`cat /tmp/zpool-lo.txt`"
rm -f /tmp/zpool-md.txt /tmp/zpool-lo.txt
}

View File

@ -1,71 +0,0 @@
#!/bin/bash
#
# 4 loopback devices using the md faulty level for easy
# fault injection on top of which is layered raidz.
#
# zpool-vdev0 zpool-vdev1 zpool-vdev2 zpool-vdev3
# loop0 loop1 loop2 loop3
# md0 (faulty) md1 (faulty) md2 (faulty) md3 (faulty)
# <--------------------- raidz zpool --------------------->
#
FILEDIR=${FILEDIR:-/var/tmp}
FILES=${FILES:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1 \
$FILEDIR/file-vdev2 $FILEDIR/file-vdev3"}
LODEVICES=""
MDDEVICES=""
zpool_create() {
check_loop_utils
check_md_utils
check_md_partitionable || die "Error non-partitionable md devices"
for FILE in ${FILES}; do
LODEVICE=`unused_loop_device`
MDDEVICE=`unused_md_device`
rm -f ${FILE} || exit 1
dd if=/dev/zero of=${FILE} bs=1M count=0 seek=256 \
&>/dev/null || die "Error $? creating ${FILE}"
# Setup the loopback device on the file.
msg "Creating ${LODEVICE} using ${FILE}"
${LOSETUP} ${LODEVICE} ${FILE} || \
die "Error $? creating ${LODEVICE} using ${FILE}"
LODEVICES="${LODEVICES} ${LODEVICE}"
# Setup the md device on the loopback device.
msg "Creating ${MDDEVICE} using ${LODEVICE}"
${MDADM} --build ${MDDEVICE} --level=faulty \
--raid-devices=1 ${LODEVICE} &>/dev/null || \
(destroy_md_devices "${MDDEVICES}" && \
destroy_loop_devices "${LODEVICES}" && \
die "Error $? creating ${MDDEVICE} using ${LODEVICE}")
wait_udev ${MDDEVICE} 30 || \
(destroy_md_devices "${MDDEVICES}" && \
destroy_loop_devices "${LODEVICES}" && \
die "Error udev never created ${MDDEVICE}")
# Create empty GPT/EFI partition table.
${PARTED} --script ${MDDEVICE} mklabel gpt
MDDEVICES="${MDDEVICES} ${MDDEVICE}"
done
msg ${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} raidz ${MDDEVICES}
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} raidz ${MDDEVICES} || \
(destroy_md_devices "${MDDEVICES}" && \
destroy_loop_devices "${LODEVICES}" && exit 1)
echo "$LODEVICES" >/tmp/zpool-lo.txt
echo "$MDDEVICES" >/tmp/zpool-md.txt
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
destroy_md_devices "`cat /tmp/zpool-md.txt`"
destroy_loop_devices "`cat /tmp/zpool-lo.txt`"
rm -f /tmp/zpool-md.txt /tmp/zpool-lo.txt
}

View File

@ -1,71 +0,0 @@
#!/bin/bash
#
# 4 loopback devices using the md faulty level for easy
# fault injection on top of which is layered raidz2.
#
# zpool-vdev0 zpool-vdev1 zpool-vdev2 zpool-vdev3
# loop0 loop1 loop2 loop3
# md0 (faulty) md1 (faulty) md2 (faulty) md3 (faulty)
# <--------------------- raidz2 zpool -------------------->
#
FILEDIR=${FILEDIR:-/var/tmp}
FILES=${FILES:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1 \
$FILEDIR/file-vdev2 $FILEDIR/file-vdev3"}
LODEVICES=""
MDDEVICES=""
zpool_create() {
check_loop_utils
check_md_utils
check_md_partitionable || die "Error non-partitionable md devices"
for FILE in ${FILES}; do
LODEVICE=`unused_loop_device`
MDDEVICE=`unused_md_device`
rm -f ${FILE} || exit 1
dd if=/dev/zero of=${FILE} bs=1M count=0 seek=256 \
&>/dev/null || die "Error $? creating ${FILE}"
# Setup the loopback device on the file.
msg "Creating ${LODEVICE} using ${FILE}"
${LOSETUP} ${LODEVICE} ${FILE} || \
die "Error $? creating ${LODEVICE} using ${FILE}"
LODEVICES="${LODEVICES} ${LODEVICE}"
# Setup the md device on the loopback device.
msg "Creating ${MDDEVICE} using ${LODEVICE}"
${MDADM} --build ${MDDEVICE} --level=faulty \
--raid-devices=1 ${LODEVICE} &>/dev/null || \
(destroy_md_devices "${MDDEVICES}" && \
destroy_loop_devices "${LODEVICES}" && \
die "Error $? creating ${MDDEVICE} using ${LODEVICE}")
wait_udev ${MDDEVICE} 30 || \
(destroy_md_devices "${MDDEVICES}" && \
destroy_loop_devices "${LODEVICES}" && \
die "Error udev never created ${MDDEVICE}")
# Create empty GPT/EFI partition table.
${PARTED} --script ${MDDEVICE} mklabel gpt
MDDEVICES="${MDDEVICES} ${MDDEVICE}"
done
msg ${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} raidz2 ${MDDEVICES}
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} raidz2 ${MDDEVICES} || \
(destroy_md_devices "${MDDEVICES}" && \
destroy_loop_devices "${LODEVICES}" && exit 1)
echo "$LODEVICES" >/tmp/zpool-lo.txt
echo "$MDDEVICES" >/tmp/zpool-md.txt
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
destroy_md_devices "`cat /tmp/zpool-md.txt`"
destroy_loop_devices "`cat /tmp/zpool-lo.txt`"
rm -f /tmp/zpool-md.txt /tmp/zpool-lo.txt
}

View File

@ -1,72 +0,0 @@
#!/bin/bash
#
# 4 loopback devices using the md faulty level for easy
# fault injection on top of which is layered raidz3.
#
# zpool-vdev0 zpool-vdev1 zpool-vdev2 zpool-vdev3
# loop0 loop1 loop2 loop3
# md0 (faulty) md1 (faulty) md2 (faulty) md3 (faulty)
# <--------------------- raidz3 zpool -------------------->
#
FILES="/tmp/zpool-vdev0 \
/tmp/zpool-vdev1 \
/tmp/zpool-vdev2 \
/tmp/zpool-vdev3"
LODEVICES=""
MDDEVICES=""
zpool_create() {
check_loop_utils
check_md_utils
check_md_partitionable || die "Error non-partitionable md devices"
for FILE in ${FILES}; do
LODEVICE=`unused_loop_device`
MDDEVICE=`unused_md_device`
rm -f ${FILE} || exit 1
dd if=/dev/zero of=${FILE} bs=1M count=0 seek=256 \
&>/dev/null || die "Error $? creating ${FILE}"
# Setup the loopback device on the file.
msg "Creating ${LODEVICE} using ${FILE}"
${LOSETUP} ${LODEVICE} ${FILE} || \
die "Error $? creating ${LODEVICE} using ${FILE}"
LODEVICES="${LODEVICES} ${LODEVICE}"
# Setup the md device on the loopback device.
msg "Creating ${MDDEVICE} using ${LODEVICE}"
${MDADM} --build ${MDDEVICE} --level=faulty \
--raid-devices=1 ${LODEVICE} &>/dev/null || \
(destroy_md_devices "${MDDEVICES}" && \
destroy_loop_devices "${LODEVICES}" && \
die "Error $? creating ${MDDEVICE} using ${LODEVICE}")
wait_udev ${MDDEVICE} 30 || \
(destroy_md_devices "${MDDEVICES}" && \
destroy_loop_devices "${LODEVICES}" && \
die "Error udev never created ${MDDEVICE}")
# Create empty GPT/EFI partition table.
${PARTED} --script ${MDDEVICE} mklabel gpt
MDDEVICES="${MDDEVICES} ${MDDEVICE}"
done
msg ${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} raidz3 ${MDDEVICES}
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} raidz3 ${MDDEVICES} || \
(destroy_md_devices "${MDDEVICES}" && \
destroy_loop_devices "${LODEVICES}" && exit 1)
echo "$LODEVICES" >/tmp/zpool-lo.txt
echo "$MDDEVICES" >/tmp/zpool-md.txt
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
destroy_md_devices "`cat /tmp/zpool-md.txt`"
destroy_loop_devices "`cat /tmp/zpool-lo.txt`"
rm -f /tmp/zpool-md.txt /tmp/zpool-lo.txt
}

View File

@ -1,43 +0,0 @@
#!/bin/bash
#
# 4 Device Loopback Raid-0 Configuration
#
FILEDIR=${FILEDIR:-/var/tmp}
FILES=${FILES:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1 \
$FILEDIR/file-vdev2 $FILEDIR/file-vdev3"}
DEVICES=""
zpool_create() {
check_loop_utils
for FILE in ${FILES}; do
DEVICE=`unused_loop_device`
msg "Creating ${FILE} using loopback device ${DEVICE}"
rm -f ${FILE} || exit 1
dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
&>/dev/null || die "Error $? creating ${FILE}"
${LOSETUP} ${DEVICE} ${FILE} ||
die "Error $? creating ${FILE} -> ${DEVICE} loopback"
DEVICES="${DEVICES} ${DEVICE}"
done
msg ${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${DEVICES}
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${DEVICES} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
# Delay to ensure device is closed before removing loop device
sleep 1
for FILE in ${FILES}; do
DEVICE=`${LOSETUP} -a | grep ${FILE} | head -n1|cut -f1 -d:`
msg "Removing ${FILE} using loopback device ${DEVICE}"
${LOSETUP} -d ${DEVICE} ||
die "Error $? destroying ${FILE} -> ${DEVICE} loopback"
rm -f ${FILE} || exit 1
done
}

View File

@ -1,58 +0,0 @@
#!/bin/bash
#
# 4 Device Loopback Raid-0 Configuration
#
FILEDIR=${FILEDIR:-/var/tmp}
FILES_M1=${FILES_M1:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1"}
FILES_M2=${FILES_M2:-"$FILEDIR/file-vdev2 $FILEDIR/file-vdev3"}
FILES="${FILES_M1} ${FILES_M2}"
DEVICES_M1=""
DEVICES_M2=""
zpool_create() {
check_loop_utils
for FILE in ${FILES_M1}; do
DEVICE=`unused_loop_device`
msg "Creating ${FILE} using loopback device ${DEVICE}"
rm -f ${FILE} || exit 1
dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
&>/dev/null || die "Error $? creating ${FILE}"
${LOSETUP} ${DEVICE} ${FILE} ||
die "Error $? creating ${FILE} -> ${DEVICE} loopback"
DEVICES_M1="${DEVICES_M1} ${DEVICE}"
done
for FILE in ${FILES_M2}; do
DEVICE=`unused_loop_device`
msg "Creating ${FILE} using loopback device ${DEVICE}"
rm -f ${FILE} || exit 1
dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
&>/dev/null || die "Error $? creating ${FILE}"
${LOSETUP} ${DEVICE} ${FILE} ||
die "Error $? creating ${FILE} -> ${DEVICE} loopback"
DEVICES_M2="${DEVICES_M2} ${DEVICE}"
done
msg ${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} \
mirror ${DEVICES_M1} mirror ${DEVICES_M2}
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} \
mirror ${DEVICES_M1} mirror ${DEVICES_M2}
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
# Delay to ensure device is closed before removing loop device
sleep 1
for FILE in ${FILES}; do
DEVICE=`${LOSETUP} -a | grep ${FILE} | head -n1|cut -f1 -d:`
msg "Removing ${FILE} using loopback device ${DEVICE}"
${LOSETUP} -d ${DEVICE} ||
die "Error $? destroying ${FILE} -> ${DEVICE} loopback"
rm -f ${FILE} || exit 1
done
}

View File

@ -1,42 +0,0 @@
#!/bin/bash
#
# 4 Device Loopback Raid-0 Configuration
#
FILEDIR=${FILEDIR:-/var/tmp}
FILES=${FILES:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1 \
$FILEDIR/file-vdev2 $FILEDIR/file-vdev3"}
DEVICES=""
zpool_create() {
check_loop_utils
for FILE in ${FILES}; do
DEVICE=`unused_loop_device`
msg "Creating ${FILE} using loopback device ${DEVICE}"
rm -f ${FILE} || exit 1
dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
&>/dev/null || die "Error $? creating ${FILE}"
${LOSETUP} ${DEVICE} ${FILE} ||
die "Error $? creating ${FILE} -> ${DEVICE} loopback"
DEVICES="${DEVICES} ${DEVICE}"
done
msg ${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} raidz ${DEVICES}
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} raidz ${DEVICES} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
# Delay to ensure device is closed before removing loop device
sleep 1
for FILE in ${FILES}; do
DEVICE=`${LOSETUP} -a | grep ${FILE} | head -n1|cut -f1 -d:`
msg "Removing ${FILE} using loopback device ${DEVICE}"
${LOSETUP} -d ${DEVICE} ||
die "Error $? destroying ${FILE} -> ${DEVICE} loopback"
rm -f ${FILE} || exit 1
done
}

View File

@ -1,43 +0,0 @@
#!/bin/bash
#
# 4 Device Loopback Raid-0 Configuration
#
FILEDIR=${FILEDIR:-/var/tmp}
FILES=${FILES:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1 \
$FILEDIR/file-vdev2 $FILEDIR/file-vdev3"}
DEVICES=""
zpool_create() {
check_loop_utils
for FILE in ${FILES}; do
DEVICE=`unused_loop_device`
msg "Creating ${FILE} using loopback device ${DEVICE}"
rm -f ${FILE} || exit 1
dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
&>/dev/null || die "Error $? creating ${FILE}"
${LOSETUP} ${DEVICE} ${FILE} ||
die "Error $? creating ${FILE} -> ${DEVICE} loopback"
DEVICES="${DEVICES} ${DEVICE}"
done
msg ${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} raidz2 ${DEVICES}
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} raidz2 ${DEVICES} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
# Delay to ensure device is closed before removing loop device
sleep 1
for FILE in ${FILES}; do
DEVICE=`${LOSETUP} -a | grep ${FILE} | head -n1|cut -f1 -d:`
msg "Removing ${FILE} using loopback device ${DEVICE}"
${LOSETUP} -d ${DEVICE} ||
die "Error $? destroying ${FILE} -> ${DEVICE} loopback"
rm -f ${FILE} || exit 1
done
}

View File

@ -1,38 +0,0 @@
#!/bin/bash
#
# Four disk Raid-10 in a single Raid-0 Configuration
#
MDADM=${MDADM:-/sbin/mdadm}
MDDEVICES=${MDDEVICES:-"/dev/sd[abcd]"}
MDCOUNT=${MDCOUNT:-4}
MDRAID=${MDRAID:-10}
DEVICES="/dev/md0"
zpool_md_destroy() {
msg ${MDADM} --manage --stop ${DEVICES}
${MDADM} --manage --stop ${DEVICES} &>/dev/null
msg ${MDADM} --zero-superblock ${MDDEVICES}
${MDADM} --zero-superblock ${MDDEVICES} >/dev/null
}
zpool_create() {
msg ${MDADM} --create ${DEVICES} --level=${MDRAID} \
--raid-devices=${MDCOUNT} ${MDDEVICES}
${MDADM} --create ${DEVICES} --level=${MDRAID} \
--raid-devices=${MDCOUNT} ${MDDEVICES} \
&>/dev/null || (zpool_md_destroy && exit 1)
msg ${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${DEVICES}
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} \
${DEVICES} || (zpool_md_destroy && exit 2)
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
zpool_md_destroy
}

View File

@ -1,38 +0,0 @@
#!/bin/bash
#
# Four disk Raid-5 in a single Raid-0 Configuration
#
MDADM=${MDADM:-/sbin/mdadm}
MDDEVICES=${MDDEVICES:-"/dev/sd[abcd]"}
MDCOUNT=${MDCOUNT:-4}
MDRAID=${MDRAID:-5}
DEVICES="/dev/md0"
zpool_md_destroy() {
msg ${MDADM} --manage --stop ${DEVICES}
${MDADM} --manage --stop ${DEVICES} &>/dev/null
msg ${MDADM} --zero-superblock ${MDDEVICES}
${MDADM} --zero-superblock ${MDDEVICES} >/dev/null
}
zpool_create() {
msg ${MDADM} --create ${DEVICES} --level=${MDRAID} \
--raid-devices=${MDCOUNT} ${MDDEVICES}
${MDADM} --create ${DEVICES} --level=${MDRAID} \
--raid-devices=${MDCOUNT} ${MDDEVICES} \
&>/dev/null || (zpool_md_destroy && exit 1)
msg ${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${DEVICES}
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} \
${DEVICES} || (zpool_md_destroy && exit 2)
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
zpool_md_destroy
}

View File

@ -1,16 +0,0 @@
#!/bin/bash
#
# Single ram disk /dev/ram0 Raid-0 Configuration
#
DEVICES="/dev/ram0"
zpool_create() {
msg ${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${DEVICES}
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${DEVICES} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME} || exit 1
}

View File

@ -1,46 +0,0 @@
#!/bin/bash
#
# 1 scsi_debug devices on top of which is layered no raid.
#
SDSIZE=${SDSIZE:-128}
SDHOSTS=${SDHOSTS:-1}
SDTGTS=${SDTGTS:-1}
SDLUNS=${SDLUNS:-1}
LDMOD=/sbin/modprobe
zpool_create() {
check_sd_utils
test `${LSMOD} | grep -c scsi_debug` -gt 0 && \
(echo 0 >/sys/module/scsi_debug/parameters/every_nth && \
${RMMOD} scsi_debug || exit 1)
udev_trigger
msg "${LDMOD} scsi_debug dev_size_mb=${SDSIZE} " \
"add_host=${SDHOSTS} num_tgts=${SDTGTS} " \
"max_luns=${SDLUNS}"
${LDMOD} scsi_debug \
dev_size_mb=${SDSIZE} \
add_host=${SDHOSTS} \
num_tgts=${SDTGTS} \
max_luns=${SDLUNS} || \
die "Error $? creating scsi_debug devices"
udev_trigger
SDDEVICE=`${LSSCSI}|${AWK} '/scsi_debug/ { print $6; exit }'`
msg "${PARTED} -s ${SDDEVICE} mklabel gpt"
${PARTED} -s ${SDDEVICE} mklabel gpt || \
(${RMMOD} scsi_debug && die "Error $? creating gpt label")
msg "${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${SDDEVICE}"
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${SDDEVICE} || \
(${RMMOD} scsi_debug && exit 1)
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
msg "${RMMOD} scsi_debug"
${RMMOD} scsi_debug || die "Error $? removing scsi_debug devices"
}

View File

@ -1,78 +0,0 @@
#!/bin/bash
#
# 1 scsi_debug device for fault injection and 3 loopback devices
# on top of which is layered raid0 (striped).
#
SDSIZE=${SDSIZE:-256}
SDHOSTS=${SDHOSTS:-1}
SDTGTS=${SDTGTS:-1}
SDLUNS=${SDLUNS:-1}
LDMOD=/sbin/modprobe
FILEDIR=${FILEDIR:-/var/tmp}
FILES=${FILES:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1 $FILEDIR/file-vdev2"}
DEVICES=""
zpool_create() {
check_loop_utils
check_sd_utils
test `${LSMOD} | grep -c scsi_debug` -gt 0 && \
(echo 0 >/sys/module/scsi_debug/parameters/every_nth && \
${RMMOD} scsi_debug || exit 1)
udev_trigger
msg "${LDMOD} scsi_debug dev_size_mb=${SDSIZE} " \
"add_host=${SDHOSTS} num_tgts=${SDTGTS} " \
"max_luns=${SDLUNS}"
${LDMOD} scsi_debug \
dev_size_mb=${SDSIZE} \
add_host=${SDHOSTS} \
num_tgts=${SDTGTS} \
max_luns=${SDLUNS} || \
die "Error $? creating scsi_debug devices"
udev_trigger
SDDEVICE=`${LSSCSI} | ${AWK} '/scsi_debug/ { print $6; exit }'`
msg "${PARTED} -s ${SDDEVICE} mklabel gpt"
${PARTED} -s ${SDDEVICE} mklabel gpt || \
(${RMMOD} scsi_debug && die "Error $? creating gpt label")
for FILE in ${FILES}; do
LODEVICE=`unused_loop_device`
rm -f ${FILE} || exit 1
dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
&>/dev/null || (${RMMOD} scsi_debug && \
die "Error $? creating ${FILE}")
# Setup the loopback device on the file.
msg "Creating ${LODEVICE} using ${FILE}"
${LOSETUP} ${LODEVICE} ${FILE} || (${RMMOD} scsi_debug \
die "Error $? creating ${LODEVICE} using ${FILE}")
DEVICES="${DEVICES} ${LODEVICE}"
done
DEVICES="${DEVICES} ${SDDEVICE}"
msg "${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${DEVICES}"
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${DEVICES} || \
(${RMMOD} scsi_debug && exit 1)
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
for FILE in ${FILES}; do
LODEVICE=`${LOSETUP} -a | grep ${FILE} | head -n1|cut -f1 -d:`
msg "Removing ${LODEVICE} using ${FILE}"
${LOSETUP} -d ${LODEVICE} ||
die "Error $? destroying ${LODEVICE} using ${FILE}"
rm -f ${FILE} || exit 1
done
msg "${RMMOD} scsi_debug"
${RMMOD} scsi_debug || die "Error $? removing scsi_debug devices"
}

View File

@ -1,92 +0,0 @@
#!/bin/bash
#
# 1 scsi_debug device for fault injection and 3 loopback devices
# on top of which is layered raid10 (mirrored).
#
SDSIZE=${SDSIZE:-256}
SDHOSTS=${SDHOSTS:-1}
SDTGTS=${SDTGTS:-1}
SDLUNS=${SDLUNS:-1}
LDMOD=/sbin/modprobe
FILEDIR=${FILEDIR:-/var/tmp}
FILES=${FILES:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1 $FILEDIR/file-vdev2"}
DEVICES_M1=""
DEVICES_M2=""
zpool_create() {
local COUNT=0
check_loop_utils
check_sd_utils
test `${LSMOD} | grep -c scsi_debug` -gt 0 && \
(echo 0 >/sys/module/scsi_debug/parameters/every_nth && \
${RMMOD} scsi_debug || exit 1)
udev_trigger
msg "${LDMOD} scsi_debug dev_size_mb=${SDSIZE} " \
"add_host=${SDHOSTS} num_tgts=${SDTGTS} " \
"max_luns=${SDLUNS}"
${LDMOD} scsi_debug \
dev_size_mb=${SDSIZE} \
add_host=${SDHOSTS} \
num_tgts=${SDTGTS} \
max_luns=${SDLUNS} || \
die "Error $? creating scsi_debug devices"
udev_trigger
SDDEVICE=`${LSSCSI}|${AWK} '/scsi_debug/ { print $6; exit }'`
msg "${PARTED} -s ${SDDEVICE} mklabel gpt"
${PARTED} -s ${SDDEVICE} mklabel gpt || \
(${RMMOD} scsi_debug && die "Error $? creating gpt label")
for FILE in ${FILES}; do
LODEVICE=`unused_loop_device`
rm -f ${FILE} || exit 1
dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
&>/dev/null || (${RMMOD} scsi_debug && \
die "Error $? creating ${FILE}")
# Setup the loopback device on the file.
msg "Creating ${LODEVICE} using ${FILE}"
${LOSETUP} ${LODEVICE} ${FILE} || (${RMMOD} scsi_debug \
die "Error $? creating ${LODEVICE} using ${FILE}")
DEVICES="${DEVICES} ${LODEVICE}"
done
DEVICES="${DEVICES} ${SDDEVICE}"
for DEVICE in ${DEVICES}; do
let COUNT=${COUNT}+1
if [ $((COUNT % 2)) -eq 0 ]; then
DEVICES_M2="${DEVICES_M2} ${DEVICE}"
else
DEVICES_M1="${DEVICES_M1} ${DEVICE}"
fi
done
msg "${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} " \
"mirror ${DEVICES_M1} mirror ${DEVICES_M2}"
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} \
mirror ${DEVICES_M1} mirror ${DEVICES_M2} || \
(${RMMOD} scsi_debug && exit 1)
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
for FILE in ${FILES}; do
LODEVICE=`${LOSETUP} -a | grep ${FILE} | head -n1|cut -f1 -d:`
msg "Removing ${LODEVICE} using ${FILE}"
${LOSETUP} -d ${LODEVICE} ||
die "Error $? destroying ${LODEVICE} using ${FILE}"
rm -f ${FILE} || exit 1
done
msg "${RMMOD} scsi_debug"
${RMMOD} scsi_debug || die "Error $? removing scsi_debug devices"
}

View File

@ -1,78 +0,0 @@
#!/bin/bash
#
# 1 scsi_debug device for fault injection and 3 loopback devices
# on top of which is layered raidz.
#
SDSIZE=${SDSIZE:-256}
SDHOSTS=${SDHOSTS:-1}
SDTGTS=${SDTGTS:-1}
SDLUNS=${SDLUNS:-1}
LDMOD=/sbin/modprobe
FILEDIR=${FILEDIR:-/var/tmp}
FILES=${FILES:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1 $FILEDIR/file-vdev2"}
DEVICES=""
zpool_create() {
check_loop_utils
check_sd_utils
test `${LSMOD} | grep -c scsi_debug` -gt 0 && \
(echo 0 >/sys/module/scsi_debug/parameters/every_nth && \
${RMMOD} scsi_debug || exit 1)
udev_trigger
msg "${LDMOD} scsi_debug dev_size_mb=${SDSIZE} " \
"add_host=${SDHOSTS} num_tgts=${SDTGTS} " \
"max_luns=${SDLUNS}"
${LDMOD} scsi_debug \
dev_size_mb=${SDSIZE} \
add_host=${SDHOSTS} \
num_tgts=${SDTGTS} \
max_luns=${SDLUNS} || \
die "Error $? creating scsi_debug devices"
udev_trigger
SDDEVICE=`${LSSCSI} | ${AWK} '/scsi_debug/ { print $6; exit }'`
msg "${PARTED} -s ${SDDEVICE} mklabel gpt"
${PARTED} -s ${SDDEVICE} mklabel gpt || \
(${RMMOD} scsi_debug && die "Error $? creating gpt label")
for FILE in ${FILES}; do
LODEVICE=`unused_loop_device`
rm -f ${FILE} || exit 1
dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
&>/dev/null || (${RMMOD} scsi_debug && \
die "Error $? creating ${FILE}")
# Setup the loopback device on the file.
msg "Creating ${LODEVICE} using ${FILE}"
${LOSETUP} ${LODEVICE} ${FILE} || (${RMMOD} scsi_debug \
die "Error $? creating ${LODEVICE} using ${FILE}")
DEVICES="${DEVICES} ${LODEVICE}"
done
DEVICES="${DEVICES} ${SDDEVICE}"
msg "${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} raidz ${DEVICES}"
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} raidz ${DEVICES} || \
(${RMMOD} scsi_debug && exit 1)
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
for FILE in ${FILES}; do
LODEVICE=`${LOSETUP} -a | grep ${FILE} | head -n1|cut -f1 -d:`
msg "Removing ${LODEVICE} using ${FILE}"
${LOSETUP} -d ${LODEVICE} ||
die "Error $? destroying ${LODEVICE} using ${FILE}"
rm -f ${FILE} || exit 1
done
msg "${RMMOD} scsi_debug"
${RMMOD} scsi_debug || die "Error $? removing scsi_debug devices"
}

View File

@ -1,78 +0,0 @@
#!/bin/bash
#
# 1 scsi_debug device for fault injection and 3 loopback devices
# on top of which is layered raidz2.
#
SDSIZE=${SDSIZE:-256}
SDHOSTS=${SDHOSTS:-1}
SDTGTS=${SDTGTS:-1}
SDLUNS=${SDLUNS:-1}
LDMOD=/sbin/modprobe
FILEDIR=${FILEDIR:-/var/tmp}
FILES=${FILES:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1 $FILEDIR/file-vdev2"}
DEVICES=""
zpool_create() {
check_loop_utils
check_sd_utils
test `${LSMOD} | grep -c scsi_debug` -gt 0 && \
(echo 0 >/sys/module/scsi_debug/parameters/every_nth && \
${RMMOD} scsi_debug || exit 1)
udev_trigger
msg "${LDMOD} scsi_debug dev_size_mb=${SDSIZE} " \
"add_host=${SDHOSTS} num_tgts=${SDTGTS} " \
"max_luns=${SDLUNS}"
${LDMOD} scsi_debug \
dev_size_mb=${SDSIZE} \
add_host=${SDHOSTS} \
num_tgts=${SDTGTS} \
max_luns=${SDLUNS} || \
die "Error $? creating scsi_debug devices"
udev_trigger
SDDEVICE=`${LSSCSI} | ${AWK} '/scsi_debug/ { print $6; exit }'`
msg "${PARTED} -s ${SDDEVICE} mklabel gpt"
${PARTED} -s ${SDDEVICE} mklabel gpt || \
(${RMMOD} scsi_debug && die "Error $? creating gpt label")
for FILE in ${FILES}; do
LODEVICE=`unused_loop_device`
rm -f ${FILE} || exit 1
dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
&>/dev/null || (${RMMOD} scsi_debug && \
die "Error $? creating ${FILE}")
# Setup the loopback device on the file.
msg "Creating ${LODEVICE} using ${FILE}"
${LOSETUP} ${LODEVICE} ${FILE} || (${RMMOD} scsi_debug \
die "Error $? creating ${LODEVICE} using ${FILE}")
DEVICES="${DEVICES} ${LODEVICE}"
done
DEVICES="${DEVICES} ${SDDEVICE}"
msg "${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} raidz2 ${DEVICES}"
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} raidz2 ${DEVICES} || \
(${RMMOD} scsi_debug && exit 1)
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
for FILE in ${FILES}; do
LODEVICE=`${LOSETUP} -a | grep ${FILE} | head -n1|cut -f1 -d:`
msg "Removing ${LODEVICE} using ${FILE}"
${LOSETUP} -d ${LODEVICE} ||
die "Error $? destroying ${LODEVICE} using ${FILE}"
rm -f ${FILE} || exit 1
done
msg "${RMMOD} scsi_debug"
${RMMOD} scsi_debug || die "Error $? removing scsi_debug devices"
}

View File

@ -1,79 +0,0 @@
#!/bin/bash
#
# 1 scsi_debug device for fault injection and 3 loopback devices
# on top of which is layered raidz3.
#
SDSIZE=${SDSIZE:-256}
SDHOSTS=${SDHOSTS:-1}
SDTGTS=${SDTGTS:-1}
SDLUNS=${SDLUNS:-1}
LDMOD=/sbin/modprobe
FILES="/tmp/zpool-vdev0 \
/tmp/zpool-vdev1 \
/tmp/zpool-vdev2"
DEVICES=""
zpool_create() {
check_loop_utils
check_sd_utils
test `${LSMOD} | grep -c scsi_debug` -gt 0 && \
(echo 0 >/sys/module/scsi_debug/parameters/every_nth && \
${RMMOD} scsi_debug || exit 1)
udev_trigger
msg "${LDMOD} scsi_debug dev_size_mb=${SDSIZE} " \
"add_host=${SDHOSTS} num_tgts=${SDTGTS} " \
"max_luns=${SDLUNS}"
${LDMOD} scsi_debug \
dev_size_mb=${SDSIZE} \
add_host=${SDHOSTS} \
num_tgts=${SDTGTS} \
max_luns=${SDLUNS} || \
die "Error $? creating scsi_debug devices"
udev_trigger
SDDEVICE=`${LSSCSI} | ${AWK} '/scsi_debug/ { print $6; exit }'`
msg "${PARTED} -s ${SDDEVICE} mklabel gpt"
${PARTED} -s ${SDDEVICE} mklabel gpt || \
(${RMMOD} scsi_debug && die "Error $? creating gpt label")
for FILE in ${FILES}; do
LODEVICE=`unused_loop_device`
rm -f ${FILE} || exit 1
dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
&>/dev/null || (${RMMOD} scsi_debug && \
die "Error $? creating ${FILE}")
# Setup the loopback device on the file.
msg "Creating ${LODEVICE} using ${FILE}"
${LOSETUP} ${LODEVICE} ${FILE} || (${RMMOD} scsi_debug \
die "Error $? creating ${LODEVICE} using ${FILE}")
DEVICES="${DEVICES} ${LODEVICE}"
done
DEVICES="${DEVICES} ${SDDEVICE}"
msg "${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} raidz3 ${DEVICES}"
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} raidz3 ${DEVICES} || \
(${RMMOD} scsi_debug && exit 1)
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
for FILE in ${FILES}; do
LODEVICE=`${LOSETUP} -a | grep ${FILE} | head -n1|cut -f1 -d:`
msg "Removing ${LODEVICE} using ${FILE}"
${LOSETUP} -d ${LODEVICE} ||
die "Error $? destroying ${LODEVICE} using ${FILE}"
rm -f ${FILE} || exit 1
done
msg "${RMMOD} scsi_debug"
${RMMOD} scsi_debug || die "Error $? removing scsi_debug devices"
}

View File

@ -1,16 +0,0 @@
#!/bin/bash
#
# Single disk /dev/sda Raid-0 Configuration
#
DEVICES="/dev/sda"
zpool_create() {
msg ${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${DEVICES}
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${DEVICES} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME} || exit 1
}

View File

@ -1,80 +0,0 @@
#!/bin/bash
#
# Zpool Raid-0 Configuration
#
# This script is used to test with the /dev/disk/by-vdev/[A-Z][1-n] devices.
# It assumes that you have already populated /dev/disk/by-vdev/ by creating
# an /etc/zfs/vdev_id.conf file based on your system design.
#
# You can then use either the zpool-create.sh or the zpios.sh test script to
# test various Raid-0 configurations by adjusting the following tunables.
# For example if you wanted to create and test a single 4-disk Raid-0
# configuration using disks [A-D]1 with dedicated ZIL and L2ARC devices
# you could run the following.
#
# ZIL="log A2" L2ARC="cache B2" RANKS=1 CHANNELS=4 \
# zpool-create.sh -c zpool-raid0
#
# zpool status tank
# pool: tank
# state: ONLINE
# scan: none requested
# config:
#
# NAME STATE READ WRITE CKSUM
# tank ONLINE 0 0 0
# A1 ONLINE 0 0 0
# B1 ONLINE 0 0 0
# C1 ONLINE 0 0 0
# D1 ONLINE 0 0 0
# logs
# A2 ONLINE 0 0 0
# cache
# B2 ONLINE 0 0 0
#
# errors: No known data errors
#
# Number of interior vdevs to create using the following rank ids.
RANKS=${RANKS:-1}
RANK_LIST=( 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 )
# Number of devices per vdev using the following channel ids.
CHANNELS=${CHANNELS:-8}
CHANNEL_LIST=( A B C D E F G H I J K L M N O P Q R S T U V W X Y Z )
# Create a ZIL vdev as follows.
ZIL=${ZIL:-}
# Create an L2ARC vdev as follows.
L2ARC=${L2ARC:-}
raid0_setup() {
local RANKS=$1
local CHANNELS=$2
RAID0S=()
for (( i=0, k=0; i<${RANKS}; i++ )); do
RANK=${RANK_LIST[$i]}
for (( j=0; j<${CHANNELS}; j++, k++ )); do
RAID0S[${k}]="${CHANNEL_LIST[$j]}${RANK}"
done
done
return 0
}
zpool_create() {
raid0_setup ${RANKS} ${CHANNELS}
ZPOOL_DEVICES="${RAID0S[*]} ${ZIL} ${L2ARC}"
msg ${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${ZPOOL_DEVICES}
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${ZPOOL_DEVICES} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
}

View File

@ -1,85 +0,0 @@
#!/bin/bash
#
# Zpool Raid-10 Configuration
#
# This script is used to test with the /dev/disk/by-vdev/[A-Z][1-n] devices.
# It assumes that you have already populated /dev/disk/by-vdev/ by creating
# an /etc/zfs/vdev_id.conf file based on your system design.
#
# You can then use either the zpool-create.sh or the zpios.sh test script to
# test various Raid-10 configurations by adjusting the following tunables.
# For example if you wanted to create and test a single 4-disk Raid-10
# configuration using disks [A-D]1 with dedicated ZIL and L2ARC devices
# you could run the following.
#
# ZIL="log A2" L2ARC="cache B2" RANKS=1 CHANNELS=4 \
# zpool-create.sh -c zpool-raid10
#
# zpool status tank
# pool: tank
# state: ONLINE
# scan: none requested
# config:
#
# NAME STATE READ WRITE CKSUM
# tank ONLINE 0 0 0
# mirror-0 ONLINE 0 0 0
# A1 ONLINE 0 0 0
# B1 ONLINE 0 0 0
# mirror-1 ONLINE 0 0 0
# C1 ONLINE 0 0 0
# D1 ONLINE 0 0 0
# logs
# A2 ONLINE 0 0 0
# cache
# B2 ONLINE 0 0 0
#
# errors: No known data errors
#
# Number of interior vdevs to create using the following rank ids.
RANKS=${RANKS:-1}
RANK_LIST=( 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 )
# Number of devices per vdev using the following channel ids.
CHANNELS=${CHANNELS:-8}
CHANNEL_LIST=( A B C D E F G H I J K L M N O P Q R S T U V W X Y Z )
# Create a ZIL vdev as follows.
ZIL=${ZIL:-}
# Create an L2ARC vdev as follows.
L2ARC=${L2ARC:-}
raid10_setup() {
local RANKS=$1
local CHANNELS=$2
local IDX=0
RAID10S=()
for (( i=0, l=0 ; i<${RANKS}; i++ )); do
RANK=${RANK_LIST[$i]}
for (( j=0, k=1; j<${CHANNELS}; j+=2,k+=2,l++ )); do
DISK1="${CHANNEL_LIST[$j]}${RANK}"
DISK2="${CHANNEL_LIST[$k]}${RANK}"
RAID10S[$l]="mirror ${DISK1} ${DISK2}"
done
done
return 0
}
zpool_create() {
raid10_setup ${RANKS} ${CHANNELS}
ZPOOL_DEVICES="${RAID10S[*]} ${ZIL} ${L2ARC}"
msg ${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${ZPOOL_DEVICES}
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${ZPOOL_DEVICES} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
}

View File

@ -1,87 +0,0 @@
#!/bin/bash
#
# Zpool Raid-Z Configuration
#
# This script is used to test with the /dev/disk/by-vdev/[A-Z][1-n] devices.
# It assumes that you have already populated /dev/disk/by-vdev/ by creating
# an /etc/zfs/vdev_id.conf file based on your system design.
#
# You can then use either the zpool-create.sh or the zpios.sh test script to
# test various Raid-Z configurations by adjusting the following tunables.
# For example if you wanted to create and test a single 4-disk Raid-Z2
# configuration using disks [A-D]1 with dedicated ZIL and L2ARC devices
# you could run the following.
#
# ZIL="log A2" L2ARC="cache B2" RANKS=1 CHANNELS=4 LEVEL=2 \
# zpool-create.sh -c zpool-raidz
#
# zpool status tank
# pool: tank
# state: ONLINE
# scan: none requested
# config:
#
# NAME STATE READ WRITE CKSUM
# tank ONLINE 0 0 0
# raidz2-0 ONLINE 0 0 0
# A1 ONLINE 0 0 0
# B1 ONLINE 0 0 0
# C1 ONLINE 0 0 0
# D1 ONLINE 0 0 0
# logs
# A2 ONLINE 0 0 0
# cache
# B2 ONLINE 0 0 0
#
# errors: No known data errors
#
# Number of interior vdevs to create using the following rank ids.
RANKS=${RANKS:-1}
RANK_LIST=( 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 )
# Number of devices per vdev using the following channel ids.
CHANNELS=${CHANNELS:-8}
CHANNEL_LIST=( A B C D E F G H I J K L M N O P Q R S T U V W X Y Z )
# Raid-Z Level: 1, 2, or 3.
LEVEL=${LEVEL:-2}
# Create a ZIL vdev as follows.
ZIL=${ZIL:-}
# Create an L2ARC vdev as follows.
L2ARC=${L2ARC:-}
raidz_setup() {
local RANKS=$1
local CHANNELS=$2
RAIDZS=()
for (( i=0; i<${RANKS}; i++ )); do
RANK=${RANK_LIST[$i]}
RAIDZ=("raidz${LEVEL}")
for (( j=0, k=1; j<${CHANNELS}; j++, k++ )); do
RAIDZ[$k]="${CHANNEL_LIST[$j]}${RANK}"
done
RAIDZS[$i]="${RAIDZ[*]}"
done
return 0
}
zpool_create() {
raidz_setup ${RANKS} ${CHANNELS}
ZPOOL_DEVICES="${RAIDZS[*]} ${ZIL} ${L2ARC}"
msg ${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${ZPOOL_DEVICES}
${ZPOOL} create ${ZPOOL_FLAGS} ${ZPOOL_NAME} ${ZPOOL_DEVICES} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
}

View File

@ -1,139 +0,0 @@
#!/bin/bash
basedir="$(dirname $0)"
SCRIPT_COMMON=common.sh
if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
. "${basedir}/${SCRIPT_COMMON}"
else
echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
fi
PROG=zpool-create.sh
usage() {
cat << EOF
USAGE:
$0 [hvfxcp]
DESCRIPTION:
Create one of several predefined zpool configurations.
OPTIONS:
-h Show this message
-v Verbose
-f Force everything
-x Disable all zpool features
-c Configuration for zpool
-p Name for zpool
-d Destroy zpool (default create)
-l Additional zpool options
-s Additional zfs options
EOF
}
check_config() {
if [ ! -f ${ZPOOL_CONFIG} ]; then
local NAME=`basename ${ZPOOL_CONFIG} .sh`
ERROR="Unknown config '${NAME}', available configs are:\n"
for CFG in `ls ${ZPOOLDIR}/ | grep ".sh"`; do
local NAME=`basename ${CFG} .sh`
ERROR="${ERROR}${NAME}\n"
done
return 1
fi
return 0
}
ZPOOL_CONFIG=unknown
ZPOOL_NAME=tank
ZPOOL_DESTROY=
ZPOOL_FLAGS=${ZPOOL_FLAGS:-""}
ZPOOL_OPTIONS=""
ZFS_OPTIONS=""
while getopts 'hvfxc:p:dl:s:' OPTION; do
case $OPTION in
h)
usage
exit 1
;;
v)
VERBOSE=1
VERBOSE_FLAG="-v"
;;
f)
FORCE=1
ZPOOL_FLAGS="$ZPOOL_FLAGS -f"
;;
x)
NO_FEATURES=1
ZPOOL_FLAGS="$ZPOOL_FLAGS -d"
;;
c)
ZPOOL_CONFIG=${ZPOOLDIR}/${OPTARG}.sh
;;
p)
ZPOOL_NAME=${OPTARG}
;;
d)
ZPOOL_DESTROY=1
;;
l)
ZPOOL_OPTIONS=${OPTARG}
;;
s)
ZFS_OPTIONS=${OPTARG}
;;
?)
usage
exit 1
;;
esac
done
if [ $(id -u) != 0 ]; then
die "Must run as root"
fi
check_config || die "${ERROR}"
. ${ZPOOL_CONFIG}
if [ ${ZPOOL_DESTROY} ]; then
zpool_destroy
else
zpool_create
if [ "${ZPOOL_OPTIONS}" ]; then
if [ ${VERBOSE} ]; then
echo
echo "${ZPOOL} ${ZPOOL_OPTIONS} ${ZPOOL_NAME}"
fi
${ZPOOL} ${ZPOOL_OPTIONS} ${ZPOOL_NAME} || exit 1
fi
if [ "${ZFS_OPTIONS}" ]; then
if [ ${VERBOSE} ]; then
echo
echo "${ZFS} ${ZFS_OPTIONS} ${ZPOOL_NAME}"
fi
${ZFS} ${ZFS_OPTIONS} ${ZPOOL_NAME} || exit 1
fi
if [ ${VERBOSE} ]; then
echo
echo "zpool list"
${ZPOOL} list || exit 1
echo
echo "zpool status ${ZPOOL_NAME}"
${ZPOOL} status ${ZPOOL_NAME} || exit 1
fi
fi
exit 0

View File

@ -1 +1 @@
SUBDIRS = cmd include man
SUBDIRS = bin include man

View File

@ -135,7 +135,6 @@ export ZFS_FILES='zdb
zinject
zpool
ztest
zpios
raidz_test
arc_summary.py
arcstat.py

View File

@ -1,106 +0,0 @@
export KERNELSRC=@LINUX@
export KERNELBUILD=@LINUX_OBJ@
export KERNELSRCVER=@LINUX_VERSION@
export KERNELMOD=/lib/modules/${KERNELSRCVER}/kernel
export SPLSRC=@SPL@
export SPLBUILD=@SPL_OBJ@
export SPLSRCVER=@SPL_VERSION@
export SRCDIR=@abs_top_srcdir@
export BUILDDIR=@abs_top_builddir@
export LIBDIR=${BUILDDIR}/lib
export CMDDIR=${BUILDDIR}/cmd
export MODDIR=${BUILDDIR}/module
export SCRIPTDIR=${BUILDDIR}/scripts
export ZPOOLDIR=${BUILDDIR}/scripts/zpool-config
export ZPIOSDIR=${BUILDDIR}/scripts/zpios-test
export ZPIOSPROFILEDIR=${BUILDDIR}/scripts/zpios-profile
export ETCDIR=${SRCDIR}/etc
export TESTSDIR=${SRCDIR}/tests
export RUNFILEDIR=${TESTSDIR}/runfiles
export UDEVRULEDIR=${BUILDDIR}/udev/rules.d
export ZEDLET_ETC_DIR=${SRCDIR}/cmd/zed/zed.d
export ZEDLET_LIBEXEC_DIR=${SRCDIR}/cmd/zed/zed.d
export ZPOOL_SCRIPT_DIR=${SRCDIR}/cmd/zpool/zpool.d
export ZPOOL_SCRIPTS_PATH=${SRCDIR}/cmd/zpool/zpool.d
export ZDB=${CMDDIR}/zdb/zdb
export ZFS=${CMDDIR}/zfs/zfs
export ZHACK=${CMDDIR}/zhack/zhack
export ZINJECT=${CMDDIR}/zinject/zinject
export ZPOOL=${CMDDIR}/zpool/zpool
export ZTEST=${CMDDIR}/ztest/ztest
export ZPIOS=${CMDDIR}/zpios/zpios
export RAIDZ_TEST=${CMDDIR}/raidz_test/raidz_test
export ARC_SUMMARY=${CMDDIR}/arc_summary/arc_summary.py
export ARCSTAT=${CMDDIR}/arcstat/arcstat.py
export DBUFSTAT=${CMDDIR}/dbufstat/dbufstat.py
export ZED=${CMDDIR}/zed/zed
export COMMON_SH=${SCRIPTDIR}/common.sh
export ZFS_SH=${SCRIPTDIR}/zfs.sh
export ZPOOL_CREATE_SH=${SCRIPTDIR}/zpool-create.sh
export ZPIOS_SH=${SCRIPTDIR}/zpios.sh
export ZPIOS_SURVEY_SH=${SCRIPTDIR}/zpios-survey.sh
# Test Suite Specific Commands
export NOINUSE_CHECK=1
export TEST_RUNNER=${TESTSDIR}/test-runner/cmd/test-runner.py
export STF_TOOLS=${TESTSDIR}/test-runner
export STF_SUITE=${TESTSDIR}/zfs-tests
export CHG_USR_EXEC=${TESTSDIR}/zfs-tests/cmd/chg_usr_exec/chg_usr_exec
export DEVNAME2DEVID=${TESTSDIR}/zfs-tests/cmd/devname2devid/devname2devid
export DIR_RD_UPDATE=${TESTSDIR}/zfs-tests/cmd/dir_rd_update/dir_rd_update
export FILE_CHECK=${TESTSDIR}/zfs-tests/cmd/file_check/file_check
export FILE_TRUNC=${TESTSDIR}/zfs-tests/cmd/file_trunc/file_trunc
export FILE_WRITE=${TESTSDIR}/zfs-tests/cmd/file_write/file_write
export LARGEST_FILE=${TESTSDIR}/zfs-tests/cmd/largest_file/largest_file
export MKBUSY=${TESTSDIR}/zfs-tests/cmd/mkbusy/mkbusy
export MKFILE=${TESTSDIR}/zfs-tests/cmd/mkfile/mkfile
export MKFILES=${TESTSDIR}/zfs-tests/cmd/mkfiles/mkfiles
export MKTREE=${TESTSDIR}/zfs-tests/cmd/mktree/mktree
export MMAP_EXEC=${TESTSDIR}/zfs-tests/cmd/mmap_exec/mmap_exec
export MMAPWRITE=${TESTSDIR}/zfs-tests/cmd/mmapwrite/mmapwrite
export RANDFREE_FILE=${TESTSDIR}/zfs-tests/cmd/randfree_file/randfree_file
export READMMAP=${TESTSDIR}/zfs-tests/cmd/readmmap/readmmap
export RENAME_DIR=${TESTSDIR}/zfs-tests/cmd/rename_dir/rename_dir
export RM_LNKCNT_ZERO_FILE=${TESTSDIR}/zfs-tests/cmd/rm_lnkcnt_zero_file/rm_lnkcnt_zero_file
export THREADSAPPEND=${TESTSDIR}/zfs-tests/cmd/threadsappend/threadsappend
export XATTRTEST=${TESTSDIR}/zfs-tests/cmd/xattrtest/xattrtest
export INTREE=1
export LDMOD=/sbin/insmod
export GDB="/usr/bin/libtool --mode=execute gdb"
export ZED_PIDFILE=@runstatedir@/zed.pid
export KERNEL_MODULES=( \
${KERNELMOD}/lib/zlib_deflate/zlib_deflate.ko \
${KERNELMOD}/lib/zlib_inflate/zlib_inflate.ko \
)
export SPL_MODULES=( \
${SPLBUILD}/module/spl/spl.ko \
${SPLBUILD}/module/splat/splat.ko \
)
export ZFS_MODULES=( \
${MODDIR}/avl/zavl.ko \
${MODDIR}/nvpair/znvpair.ko \
${MODDIR}/unicode/zunicode.ko \
${MODDIR}/zcommon/zcommon.ko \
${MODDIR}/icp/icp.ko \
${MODDIR}/zfs/zfs.ko \
)
export ZPIOS_MODULES=( \
${MODDIR}/zpios/zpios.ko \
)
export MODULES=( \
${SPL_MODULES[*]} \
${ZFS_MODULES[*]} \
)