Merge commit 'refs/top-bases/linux-configure-branch' into linux-configure-branch

This commit is contained in:
Brian Behlendorf 2009-11-20 10:32:06 -08:00
commit 8bacf4f03d
13 changed files with 235 additions and 6 deletions

View File

@ -7,7 +7,8 @@ AC_DEFUN([ZFS_AC_KERNEL_BIO_RW_SYNCIO], [
ZFS_LINUX_TRY_COMPILE([
#include <linux/bio.h>
],[
int flags = BIO_RW_SYNCIO;
int flags;
flags = BIO_RW_SYNCIO;
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BIO_RW_SYNCIO, 1,

View File

@ -1,6 +1,8 @@
dnl #
dnl # 2.6.18 API change
nl #
dnl # 2.6.31 API change
dnl # In 2.6.29 kernels blk_end_request() was a GPL-only symbol, this was
dnl # changed in 2.6.31 so it may be used by non-GPL modules.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_BLK_END_REQUEST], [
AC_MSG_CHECKING([whether blk_end_request() is available])
ZFS_LINUX_TRY_COMPILE([
@ -15,4 +17,21 @@ AC_DEFUN([ZFS_AC_KERNEL_BLK_END_REQUEST], [
],[
AC_MSG_RESULT(no)
])
AC_MSG_CHECKING([whether blk_end_request() is GPL-only])
ZFS_LINUX_TRY_COMPILE([
#include <linux/module.h>
#include <linux/blkdev.h>
MODULE_LICENSE("CDDL");
],[
struct request *req = NULL;
(void) blk_end_request(req, 0, 0);
],[
AC_MSG_RESULT(no)
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLK_END_REQUEST_GPL_ONLY, 1,
[blk_end_request() is GPL-only])
])
])

View File

@ -1,5 +1,8 @@
dnl #
dnl # 2.6.31 API change
dnl # 2.6.29 API change
dnl # In the 2.6.29 kernel blk_rq_bytes() was available as a GPL-only symbol.
dnl # So we need to check the symbol license as well. As of 2.6.31 the
dnl blk_rq_bytes() helper was changed to a static inline which we can use.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_BLK_RQ_BYTES], [
AC_MSG_CHECKING([whether blk_rq_bytes() is available])
@ -15,4 +18,21 @@ AC_DEFUN([ZFS_AC_KERNEL_BLK_RQ_BYTES], [
],[
AC_MSG_RESULT(no)
])
AC_MSG_CHECKING([whether blk_rq_bytes() is GPL-only])
ZFS_LINUX_TRY_COMPILE([
#include <linux/module.h>
#include <linux/blkdev.h>
MODULE_LICENSE("CDDL");
],[
struct request *req = NULL;
(void) blk_rq_bytes(req);
],[
AC_MSG_RESULT(no)
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLK_RQ_BYTES_GPL_ONLY, 1,
[blk_rq_bytes() is GPL-only])
])
])

View File

@ -0,0 +1,20 @@
dnl #
dnl # 2.6.x API change
dnl #
AC_DEFUN([ZFS_AC_KERNEL_RQ_FOR_EACH_SEGMENT], [
AC_MSG_CHECKING([whether rq_for_each_segment() is available])
ZFS_LINUX_TRY_COMPILE([
#include <linux/blkdev.h>
],[
struct bio_vec *bv;
struct req_iterator iter;
struct request *req = NULL;
rq_for_each_segment(bv, req, iter) { }
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_RQ_FOR_EACH_SEGMENT, 1,
[rq_for_each_segment() is available])
],[
AC_MSG_RESULT(no)
])
])

View File

@ -19,6 +19,7 @@ AC_DEFUN([ZFS_AC_CONFIG_KERNEL], [
ZFS_AC_KERNEL_BLK_RQ_SECTORS
ZFS_AC_KERNEL_GET_DISK_RO
ZFS_AC_KERNEL_RQ_IS_SYNC
ZFS_AC_KERNEL_RQ_FOR_EACH_SEGMENT
dnl # Kernel build make options
dnl # KERNELMAKE_PARAMS="V=1" # Enable verbose module build

View File

@ -1492,7 +1492,6 @@ EXPORT_SYMBOL(spa_get_space);
EXPORT_SYMBOL(spa_get_dspace);
EXPORT_SYMBOL(spa_get_asize);
EXPORT_SYMBOL(spa_max_replication);
EXPORT_SYMBOL(spa_busy);
EXPORT_SYMBOL(spa_get_failmode);
EXPORT_SYMBOL(spa_suspended);

View File

@ -460,7 +460,7 @@ static void
zfs_range_unlock_reader(znode_t *zp, rl_t *remove)
{
avl_tree_t *tree = &zp->z_range_avl;
rl_t *rl, *next;
rl_t *rl, *next = NULL;
uint64_t len;
/*

View File

@ -0,0 +1,41 @@
#
# /etc/udev/rules.d/99-zpool.rules
#
ENV{DEVTYPE}=="disk", IMPORT{program}="path_id %p"
# Full devices (*:pci*port:*:id-lun)
ENV{DEVTYPE}=="disk", ENV{ID_PATH}=="*:07:00.0*0:*:8-lun0", SYMLINK+="disk/zpool/a1"
ENV{DEVTYPE}=="disk", ENV{ID_PATH}=="*:07:00.0*0:*:9-lun0", SYMLINK+="disk/zpool/a2"
ENV{DEVTYPE}=="disk", ENV{ID_PATH}=="*:07:00.0*0:*:10-lun0", SYMLINK+="disk/zpool/a3"
ENV{DEVTYPE}=="disk", ENV{ID_PATH}=="*:07:00.0*0:*:11-lun0", SYMLINK+="disk/zpool/a4"
ENV{DEVTYPE}=="disk", ENV{ID_PATH}=="*:07:00.0*0:*:12-lun0", SYMLINK+="disk/zpool/a5"
ENV{DEVTYPE}=="disk", ENV{ID_PATH}=="*:07:00.0*0:*:13-lun0", SYMLINK+="disk/zpool/a6"
ENV{DEVTYPE}=="disk", ENV{ID_PATH}=="*:07:00.0*0:*:14-lun0", SYMLINK+="disk/zpool/a7"
ENV{DEVTYPE}=="disk", ENV{ID_PATH}=="*:07:00.0*0:*:15-lun0", SYMLINK+="disk/zpool/a8"
ENV{DEVTYPE}=="disk", ENV{ID_PATH}=="*:07:00.0*0:*:16-lun0", SYMLINK+="disk/zpool/b1"
ENV{DEVTYPE}=="disk", ENV{ID_PATH}=="*:07:00.0*0:*:17-lun0", SYMLINK+="disk/zpool/b2"
ENV{DEVTYPE}=="disk", ENV{ID_PATH}=="*:07:00.0*0:*:18-lun0", SYMLINK+="disk/zpool/b3"
ENV{DEVTYPE}=="disk", ENV{ID_PATH}=="*:07:00.0*0:*:19-lun0", SYMLINK+="disk/zpool/b4"
ENV{DEVTYPE}=="disk", ENV{ID_PATH}=="*:07:00.0*0:*:20-lun0", SYMLINK+="disk/zpool/b5"
ENV{DEVTYPE}=="disk", ENV{ID_PATH}=="*:07:00.0*0:*:21-lun0", SYMLINK+="disk/zpool/b6"
ENV{DEVTYPE}=="disk", ENV{ID_PATH}=="*:07:00.0*0:*:22-lun0", SYMLINK+="disk/zpool/b7"
ENV{DEVTYPE}=="disk", ENV{ID_PATH}=="*:07:00.0*0:*:23-lun0", SYMLINK+="disk/zpool/b8"
# Partitions (*:pci*port:*:id-lun)
ENV{DEVTYPE}=="partition", ENV{ID_PATH}=="*:07:00.0*0:*:8-lun0", SYMLINK+="disk/zpool/a1-part%n"
ENV{DEVTYPE}=="partition", ENV{ID_PATH}=="*:07:00.0*0:*:9-lun0", SYMLINK+="disk/zpool/a2-part%n"
ENV{DEVTYPE}=="partition", ENV{ID_PATH}=="*:07:00.0*0:*:10-lun0", SYMLINK+="disk/zpool/a3-part%n"
ENV{DEVTYPE}=="partition", ENV{ID_PATH}=="*:07:00.0*0:*:11-lun0", SYMLINK+="disk/zpool/a4-part%n"
ENV{DEVTYPE}=="partition", ENV{ID_PATH}=="*:07:00.0*0:*:12-lun0", SYMLINK+="disk/zpool/a5-part%n"
ENV{DEVTYPE}=="partition", ENV{ID_PATH}=="*:07:00.0*0:*:13-lun0", SYMLINK+="disk/zpool/a6-part%n"
ENV{DEVTYPE}=="partition", ENV{ID_PATH}=="*:07:00.0*0:*:14-lun0", SYMLINK+="disk/zpool/a7-part%n"
ENV{DEVTYPE}=="partition", ENV{ID_PATH}=="*:07:00.0*0:*:15-lun0", SYMLINK+="disk/zpool/a8-part%n"
ENV{DEVTYPE}=="partition", ENV{ID_PATH}=="*:07:00.0*0:*:16-lun0", SYMLINK+="disk/zpool/b1-part%n"
ENV{DEVTYPE}=="partition", ENV{ID_PATH}=="*:07:00.0*0:*:17-lun0", SYMLINK+="disk/zpool/b2-part%n"
ENV{DEVTYPE}=="partition", ENV{ID_PATH}=="*:07:00.0*0:*:18-lun0", SYMLINK+="disk/zpool/b3-part%n"
ENV{DEVTYPE}=="partition", ENV{ID_PATH}=="*:07:00.0*0:*:19-lun0", SYMLINK+="disk/zpool/b4-part%n"
ENV{DEVTYPE}=="partition", ENV{ID_PATH}=="*:07:00.0*0:*:20-lun0", SYMLINK+="disk/zpool/b5-part%n"
ENV{DEVTYPE}=="partition", ENV{ID_PATH}=="*:07:00.0*0:*:21-lun0", SYMLINK+="disk/zpool/b6-part%n"
ENV{DEVTYPE}=="partition", ENV{ID_PATH}=="*:07:00.0*0:*:22-lun0", SYMLINK+="disk/zpool/b7-part%n"
ENV{DEVTYPE}=="partition", ENV{ID_PATH}=="*:07:00.0*0:*:23-lun0", SYMLINK+="disk/zpool/b8-part%n"

View File

@ -115,4 +115,52 @@ zconfig_test2() {
}
zconfig_test2
# ZVOL sanity check
zconfig_test3() {
POOL_NAME=tank
ZVOL_NAME=fish
FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
SRC_DIR=/bin/
TMP_FILE1=`mktemp`
TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
echo -n "test 3 - ZVOL sanity: "
# Create a pool and volume.
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
${ZFS} create -V 400M ${FULL_NAME} || fail 3
# Partition the volume, for a 400M volume there will be
# 812 cylinders, 16 heads, and 63 sectors per track.
/sbin/sfdisk -q /dev/${FULL_NAME} << EOF &>${TMP_FILE1} || fail 4
,812
;
;
;
EOF
# Format the partition with ext3.
/sbin/mkfs.ext3 /dev/${FULL_NAME}1 &>${TMP_FILE1} || fail 5
# Mount the ext3 filesystem and copy some data to it.
mkdir -p /tmp/${ZVOL_NAME} || fail 6
mount /dev/${FULL_NAME}1 /tmp/${ZVOL_NAME} || fail 7
cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME} || fail 8
# Verify the copied files match the original files.
diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}${SRC_DIR} || fail 9
# Remove the files, umount, destroy the volume and pool.
rm -Rf /tmp/${ZVOL_NAME}${SRC_DIR}* || fail 10
umount /tmp/${ZVOL_NAME} || fail 11
${ZFS} destroy ${FULL_NAME} || fail 12
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 13
rm -f ${TMP_FILE1} || fail 14
${ZFS_SH} -u || fail 15
pass
}
zconfig_test3
exit 0

View File

@ -0,0 +1,20 @@
#!/bin/bash
#
# Flash (White Box) Raid-0 Configuration (1x16)
#
RANKS=8
CHANNELS=2
zpool_create() {
udev_setup ${UDEVDIR}/99-zpool.rules.promise
udev_raid0_setup ${RANKS} ${CHANNELS}
msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID0S[*]}
${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID0S[*]} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
}

View File

@ -0,0 +1,20 @@
#!/bin/bash
#
# Flash (White Box) Raid-10 Configuration (10x2(1+1))
#
RANKS=8
CHANNELS=2
zpool_create() {
udev_setup ${UDEVDIR}/99-zpool.rules.promise
udev_raid10_setup ${RANKS} ${CHANNELS}
msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID10S[*]}
${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID10S[*]} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
}

View File

@ -0,0 +1,20 @@
#!/bin/bash
#
# Flash (White Box) Raid-Z Configuration (2x8(7+1))
#
RANKS=8
CHANNELS=2
zpool_create() {
udev_setup ${UDEVDIR}/99-zpool.rules.promise
udev_raidz_setup ${RANKS} ${CHANNELS}
msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZS[*]}
${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZS[*]} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
}

View File

@ -0,0 +1,20 @@
#!/bin/bash
#
# Flash (White Box) Raid-Z2 Configuration (2x8(6+2))
#
RANKS=8
CHANNELS=2
zpool_create() {
udev_setup ${UDEVDIR}/99-zpool.rules.promise
udev_raidz2_setup ${RANKS} ${CHANNELS}
msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZ2S[*]}
${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZ2S[*]} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
}