Merge commit 'refs/top-bases/linux-zfs-branch' into linux-zfs-branch
This commit is contained in:
commit
3baeaf3343
44
ChangeLog
44
ChangeLog
|
@ -1,3 +1,47 @@
|
||||||
|
2010-05-21 Brian Behlendorf <behlendorf1@llnl.gov>
|
||||||
|
|
||||||
|
* : Tag zfs-0.4.9 - Use 'git log --no-merges' for full change log.
|
||||||
|
|
||||||
|
* : Build system improvements:
|
||||||
|
- Added support for the 'make -s' silent build option.
|
||||||
|
- Allow zfs_config.h to be included by dependent packages.
|
||||||
|
- Minor spec file updates.
|
||||||
|
- Minor build system message updates.
|
||||||
|
|
||||||
|
* : Topic branch cleanup. Several old branches were removed and
|
||||||
|
numerous hunks which were accidentaly commited to incorrect topic
|
||||||
|
branches in the past were relocated to the correct topic branch.
|
||||||
|
|
||||||
|
* *.c, *.h, *.sh, AUTHORS, COPYING, COPYRIGHT, DISCLAIMER, README:
|
||||||
|
Added standard header to source files which includes the copyright,
|
||||||
|
license, and author information. Additionally, updated the listed
|
||||||
|
top level files to the latest versions.
|
||||||
|
|
||||||
|
* cmd/zpool/zpool_vdev.c: Check all partitions with check_file()
|
||||||
|
even when no libblkid is found. This oversight would result in
|
||||||
|
ZFS not detecting existing filesystems on partitions.
|
||||||
|
|
||||||
|
* module/zfs/fm.c, module/zfs/zfs_fm, libzfs/libzfs_pool.c:
|
||||||
|
Added zevents which are similar to Solaris FMA support. The
|
||||||
|
existing FMA and sysevent call points in ZFS were unified in
|
||||||
|
to a single event type and used to create a user space visible
|
||||||
|
event notification system under Linux. The new 'zpool events'
|
||||||
|
command can be used to show all recent ZFS events.
|
||||||
|
|
||||||
|
* module/zfs/spa.c, module/zfs/zil.c: Suppress large memory
|
||||||
|
allocation warnings for two particular kmem_alloc()'s. For
|
||||||
|
now we can live with them as is but long term a way should be
|
||||||
|
found to perform small allocations or use the vmem based slab.
|
||||||
|
|
||||||
|
* module/zfs/zvol.c: Recreate volume and snapshot /dev links during
|
||||||
|
module load. Links in /dev for volumes/snapshots were only being
|
||||||
|
created at volume/snapshot creation time. Those links are now also
|
||||||
|
created dynamically at module load time based on the spa config.
|
||||||
|
|
||||||
|
* module/zfs/zvol.c, module/zfs/include/sys/blkdev.h: The handler
|
||||||
|
zvol_request() should use the unlocked version of blk_end_request()
|
||||||
|
to avoid a deadlock in the ZVOL.
|
||||||
|
|
||||||
2010-03-11 Brian Behlendorf <behlendorf1@llnl.gov>
|
2010-03-11 Brian Behlendorf <behlendorf1@llnl.gov>
|
||||||
|
|
||||||
* : Tag zfs-0.4.8 - Use 'git log --no-merges' for full change log.
|
* : Tag zfs-0.4.8 - Use 'git log --no-merges' for full change log.
|
||||||
|
|
2
META
2
META
|
@ -1,7 +1,7 @@
|
||||||
Meta: 1
|
Meta: 1
|
||||||
Name: zfs
|
Name: zfs
|
||||||
Branch: 1.0
|
Branch: 1.0
|
||||||
Version: 0.4.8
|
Version: 0.4.9
|
||||||
Release: 1
|
Release: 1
|
||||||
Release-Tags: relext
|
Release-Tags: relext
|
||||||
License: CDDL
|
License: CDDL
|
||||||
|
|
115
README
115
README
|
@ -1,40 +1,95 @@
|
||||||
============================ ZFS KERNEL BUILD ============================
|
============================ ZFS QUICK START ============================
|
||||||
|
|
||||||
1) Build the SPL (Solaris Porting Layer) module which is designed to
|
1) Build the SPL (Solaris Porting Layer) and install it. This package
|
||||||
provide many Solaris APIs in the Linux kernel which are needed
|
provides several Solaris APIs used by ZFS and is a required dependency.
|
||||||
by ZFS. To build the SPL:
|
Before building ZFS build this package and install the resulting rpms.
|
||||||
|
|
||||||
tar -xzf spl-x.y.z.tgz
|
> tar -xzf spl-x.y.z.tgz
|
||||||
cd spl-x.y.z
|
> cd spl-x.y.z
|
||||||
./configure --with-linux=<kernel src>
|
> ./configure --with-linux=<kernel src>
|
||||||
make
|
> make
|
||||||
make check <as root>
|
> make rpm
|
||||||
|
|
||||||
2) Build ZFS, this port is based on build specified by the ZFS.RELEASE
|
> sudo rpm -Uvh *.<arch>.rpm
|
||||||
file. You will need to have both the kernel and SPL source available.
|
Preparing... ########################################### [100%]
|
||||||
To build ZFS for use as a Linux kernel module.
|
1:spl ########################################### [ 33%]
|
||||||
|
2:spl-modules-devel ########################################### [ 67%]
|
||||||
|
3:spl-modules ########################################### [100%]
|
||||||
|
|
||||||
tar -xzf zfs-x.y.z.tgz
|
|
||||||
cd zfs-x.y.z
|
|
||||||
./configure --with-linux=<kernel src> \
|
|
||||||
--with-spl=<spl src>
|
|
||||||
make
|
|
||||||
make check <as root>
|
|
||||||
|
|
||||||
============================ ZPIOS TEST SUITE ============================
|
2) Build ZFS and install it. This package provides the native port of
|
||||||
|
ZFS for Linux including all kernel modules and command line utilities.
|
||||||
|
Note it is important that you have installed spl-module-devel package
|
||||||
|
from step 1) before attempting to build ZFS.
|
||||||
|
|
||||||
3) Provided is an in-kernel test application called zpios which can be
|
> tar -xzf zfs-x.y.z.tgz
|
||||||
used to simulate a parallel IO load. It may be used as a stress
|
> cd zfs-x.y.z
|
||||||
or performance test for your configuration. To simplify testing
|
> ./configure --with-linux=<kernel src>
|
||||||
scripts provided in the scripts/ directory which provide a few
|
> make
|
||||||
pre-built zpool configurations and zpios test cases. By default
|
> make rpm
|
||||||
'make check' as root will run a simple test against several small
|
|
||||||
loopback devices created in /tmp/.
|
> sudo rpm -Uvh *.<arch>.rpm
|
||||||
|
Preparing... ########################################### [100%]
|
||||||
|
1:zfs ########################################### [ 20%]
|
||||||
|
2:zfs-test ########################################### [ 40%]
|
||||||
|
3:zfs-modules-devel ########################################### [ 60%]
|
||||||
|
4:zfs-modules ########################################### [ 80%]
|
||||||
|
|
||||||
|
|
||||||
|
3) Enjoy ZFS on Linux! Currently only the ZVOL is fully functional
|
||||||
|
but work on the ZPL is underway. Why is just having the ZVOL still
|
||||||
|
useful you ask? Well here's an example of what you can do today using
|
||||||
|
just the ZVOL:
|
||||||
|
|
||||||
|
> # Create the 'tank' zpool containing a raidz vdev spread over 4 devices.
|
||||||
|
> zpool create tank raidz /dev/sdb /dev/sdc /dev/sdd /dev/sde
|
||||||
|
> zpool list
|
||||||
|
NAME SIZE USED AVAIL CAP HEALTH ALTROOT
|
||||||
|
tank 1.81T 132K 1.81T 0% ONLINE -
|
||||||
|
|
||||||
|
> # Create a 100G block device named 'fish' in the 'tank' zpool.
|
||||||
|
> zfs create -V 100G tank/fish
|
||||||
|
> zfs list
|
||||||
|
NAME USED AVAIL REFER MOUNTPOINT
|
||||||
|
tank 100G 1.24T 26.9K /tank
|
||||||
|
tank/fish 100G 1.33T 23.9K -
|
||||||
|
|
||||||
|
> # Partition 'tank/fish' as if it were a normal block device.
|
||||||
|
> sfdisk /dev/tank/fish << EOF
|
||||||
|
0,
|
||||||
|
EOF
|
||||||
|
> sfdisk -l /dev/tank/fish
|
||||||
|
|
||||||
|
Disk /dev/tank/fish: 208050 cylinders, 16 heads, 63 sectors/track
|
||||||
|
Units = cylinders of 516096 bytes, blocks of 1024 bytes, counting from 0
|
||||||
|
|
||||||
|
Device Boot Start End #cyls #blocks Id System
|
||||||
|
/dev/tank/fish1 0+ 208049 208050- 104857199+ 83 Linux
|
||||||
|
/dev/tank/fish2 0 - 0 0 0 Empty
|
||||||
|
/dev/tank/fish3 0 - 0 0 0 Empty
|
||||||
|
/dev/tank/fish4 0 - 0 0 0 Empty
|
||||||
|
|
||||||
|
> # Format the new /dev/tank/fish1 partition with ext2 and mount it.
|
||||||
|
> mkfs.ext2 -q /dev/tank/fish1
|
||||||
|
> mkdir -p /mnt/tank/fish1
|
||||||
|
> mount /dev/tank/fish1 /mnt/tank/fish1
|
||||||
|
> ls /mnt/tank/fish1
|
||||||
|
lost+found
|
||||||
|
|
||||||
|
> # Take a snapshot of the pristine ext2 filesystem and mount it read-only.
|
||||||
|
> zfs snapshot tank/fish@pristine
|
||||||
|
> mkdir /mnt/tank/fish@pristine1
|
||||||
|
> mount /dev/tank/fish@pristine1 /mnt/tank/fish@pristine1
|
||||||
|
> ls /mnt/tank/fish\@pristine1
|
||||||
|
lost+found
|
||||||
|
|
||||||
|
> # Changes made to tank/fish1 do not appear in tank/fish@pristine1
|
||||||
|
> touch /mnt/tank/fish1/foo
|
||||||
|
> ls /mnt/tank/fish1/
|
||||||
|
foo lost+found
|
||||||
|
> ls /mnt/tank/fish\@pristine1
|
||||||
|
lost+found
|
||||||
|
|
||||||
cd scripts
|
|
||||||
./zfs.sh # Load the ZFS/SPL modules
|
|
||||||
./zpios.sh -c lo-raid0.sh -t tiny -v # Tiny zpios loopback test
|
|
||||||
./zfs.sh -u # Unload the ZFS/SPL modules
|
|
||||||
|
|
||||||
Enjoy,
|
Enjoy,
|
||||||
Brian Behlendorf <behlendorf1@llnl.gov>
|
Brian Behlendorf <behlendorf1@llnl.gov>
|
||||||
|
|
51
TODO
51
TODO
|
@ -1,51 +0,0 @@
|
||||||
SUMMARY OF MAJOR KNOWN PROBLEMS IN v0.4.6 (Development Release)
|
|
||||||
|
|
||||||
* Fault Management (FM) and sysevent support / analog.
|
|
||||||
bugzilla 14866, 15645
|
|
||||||
|
|
||||||
This is probably the biggest remaining chunk of work. Linux has no
|
|
||||||
direct equivalent of the Solaris Fault Management Architecture (FMA)
|
|
||||||
and we need one. All fault information is currently ignored and no
|
|
||||||
disk errors are even logged. We need to settle on a design for this
|
|
||||||
but minimally it needs to log the events to syslog.
|
|
||||||
|
|
||||||
* Implement the ZVOL.
|
|
||||||
bugzilla xxxxx
|
|
||||||
|
|
||||||
This should be pretty staight forward now that the DMU is fully
|
|
||||||
implemented and solid. It just needs to be done.
|
|
||||||
|
|
||||||
* Implement the ZPL.
|
|
||||||
bugzilla xxxxx
|
|
||||||
|
|
||||||
Getting basic ZPL support should be pretty straight forward. Moving
|
|
||||||
beyond that to fully integrate with the VFS for things like mmap and
|
|
||||||
file locking will be trickier.
|
|
||||||
|
|
||||||
* Integrate the ZFS-FUSE port in to this code base.
|
|
||||||
bugzilla xxxxx
|
|
||||||
|
|
||||||
Merging the zfs-fuse code base in with this project would be nice from a
|
|
||||||
code maintence standpoint. This code base is quite a bit newer than
|
|
||||||
zfs-fuse and it already provides a libzpool library for zfs-fuse to link
|
|
||||||
against. This should be a pretty straight forward addition.
|
|
||||||
|
|
||||||
* Emulate kthreads with pthreads in userspace.
|
|
||||||
bugzilla xxxxx
|
|
||||||
|
|
||||||
There is a patch available for this but each time I've integrated it
|
|
||||||
I've observed SIGSEGVs in ztest. Once this patch is in place ztest
|
|
||||||
can be used to use the kthread API which brings us one step closer
|
|
||||||
to being able to run it in the kernel as an additional sanity check.
|
|
||||||
|
|
||||||
* DMU Performance
|
|
||||||
bugzilla 13566
|
|
||||||
|
|
||||||
While performance is currently not bad it is not where it needs to be
|
|
||||||
for production use. The latest test results which can be found in the
|
|
||||||
docs directly show that on hardware which is capable of 8GB/s we only
|
|
||||||
see a few GB/s when running through the DMU. To address this we need
|
|
||||||
to finish getting the code working with the kernel lock profiler and
|
|
||||||
look for some hot locks. Additionally, it would be interesting to run
|
|
||||||
the same tests on Solaris (once we have a ZVOL/ZPL) and compare the
|
|
||||||
performance. It's not at all clear to me Solaris currently does better.
|
|
|
@ -3790,7 +3790,7 @@ zpool_do_events_short(nvlist_t *nvl)
|
||||||
|
|
||||||
verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);
|
verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);
|
||||||
memset(str, ' ', 32);
|
memset(str, ' ', 32);
|
||||||
(void) ctime_r(&tv[0], ctime_str);
|
(void) ctime_r((const time_t *)&tv[0], ctime_str);
|
||||||
(void) strncpy(str, ctime_str+4, 6); /* 'Jun 30' */
|
(void) strncpy(str, ctime_str+4, 6); /* 'Jun 30' */
|
||||||
(void) strncpy(str+7, ctime_str+20, 4); /* '1993' */
|
(void) strncpy(str+7, ctime_str+20, 4); /* '1993' */
|
||||||
(void) strncpy(str+12, ctime_str+11, 8); /* '21:49:08' */
|
(void) strncpy(str+12, ctime_str+11, 8); /* '21:49:08' */
|
||||||
|
|
|
@ -61,9 +61,8 @@ blk_requeue_request(request_queue_t *q, struct request *req)
|
||||||
|
|
||||||
#ifndef HAVE_BLK_END_REQUEST
|
#ifndef HAVE_BLK_END_REQUEST
|
||||||
static inline bool
|
static inline bool
|
||||||
blk_end_request(struct request *req, int error, unsigned int nr_bytes)
|
__blk_end_request(struct request *req, int error, unsigned int nr_bytes)
|
||||||
{
|
{
|
||||||
struct request_queue *q = req->q;
|
|
||||||
LIST_HEAD(list);
|
LIST_HEAD(list);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -79,14 +78,23 @@ blk_end_request(struct request *req, int error, unsigned int nr_bytes)
|
||||||
* entire request partial requests are not supported.
|
* entire request partial requests are not supported.
|
||||||
*/
|
*/
|
||||||
req->hard_cur_sectors = nr_bytes >> 9;
|
req->hard_cur_sectors = nr_bytes >> 9;
|
||||||
|
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
|
||||||
end_request(req, ((error == 0) ? 1 : error));
|
end_request(req, ((error == 0) ? 1 : error));
|
||||||
spin_unlock_irq(q->queue_lock);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
blk_end_request(struct request *req, int error, unsigned int nr_bytes)
|
||||||
|
{
|
||||||
|
struct request_queue *q = req->q;
|
||||||
|
bool rc;
|
||||||
|
|
||||||
|
spin_lock_irq(q->queue_lock);
|
||||||
|
rc = __blk_end_request(req, error, nr_bytes);
|
||||||
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
# ifdef HAVE_BLK_END_REQUEST_GPL_ONLY
|
# ifdef HAVE_BLK_END_REQUEST_GPL_ONLY
|
||||||
/*
|
/*
|
||||||
|
@ -94,25 +102,34 @@ blk_end_request(struct request *req, int error, unsigned int nr_bytes)
|
||||||
* GPL-only version of the helper. As of 2.6.31 the helper is available
|
* GPL-only version of the helper. As of 2.6.31 the helper is available
|
||||||
* to non-GPL modules and is not explicitly exported GPL-only.
|
* to non-GPL modules and is not explicitly exported GPL-only.
|
||||||
*/
|
*/
|
||||||
# define blk_end_request ___blk_end_request
|
# define __blk_end_request __blk_end_request_x
|
||||||
static inline bool
|
# define blk_end_request blk_end_request_x
|
||||||
___blk_end_request(struct request *req, int error, unsigned int nr_bytes)
|
|
||||||
{
|
|
||||||
struct request_queue *q = req->q;
|
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
__blk_end_request_x(struct request *req, int error, unsigned int nr_bytes)
|
||||||
|
{
|
||||||
/*
|
/*
|
||||||
* The old API required the driver to end each segment and not
|
* The old API required the driver to end each segment and not
|
||||||
* the entire request. In our case we always need to end the
|
* the entire request. In our case we always need to end the
|
||||||
* entire request partial requests are not supported.
|
* entire request partial requests are not supported.
|
||||||
*/
|
*/
|
||||||
req->hard_cur_sectors = nr_bytes >> 9;
|
req->hard_cur_sectors = nr_bytes >> 9;
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
|
||||||
end_request(req, ((error == 0) ? 1 : error));
|
end_request(req, ((error == 0) ? 1 : error));
|
||||||
spin_unlock_irq(q->queue_lock);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
static inline bool
|
||||||
|
blk_end_request_x(struct request *req, int error, unsigned int nr_bytes)
|
||||||
|
{
|
||||||
|
struct request_queue *q = req->q;
|
||||||
|
bool rc;
|
||||||
|
|
||||||
|
spin_lock_irq(q->queue_lock);
|
||||||
|
__blk_end_request_x(req, error, nr_bytes);
|
||||||
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
# endif /* HAVE_BLK_END_REQUEST_GPL_ONLY */
|
# endif /* HAVE_BLK_END_REQUEST_GPL_ONLY */
|
||||||
#endif /* HAVE_BLK_END_REQUEST */
|
#endif /* HAVE_BLK_END_REQUEST */
|
||||||
|
|
||||||
|
|
|
@ -626,14 +626,14 @@ zvol_request(struct request_queue *q)
|
||||||
req->rq_disk->disk_name,
|
req->rq_disk->disk_name,
|
||||||
(long long unsigned)blk_rq_pos(req),
|
(long long unsigned)blk_rq_pos(req),
|
||||||
(long unsigned)blk_rq_sectors(req));
|
(long unsigned)blk_rq_sectors(req));
|
||||||
blk_end_request(req, -EIO, size);
|
__blk_end_request(req, -EIO, size);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!blk_fs_request(req)) {
|
if (!blk_fs_request(req)) {
|
||||||
printk(KERN_INFO "%s: non-fs cmd\n",
|
printk(KERN_INFO "%s: non-fs cmd\n",
|
||||||
req->rq_disk->disk_name);
|
req->rq_disk->disk_name);
|
||||||
blk_end_request(req, -EIO, size);
|
__blk_end_request(req, -EIO, size);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -642,8 +642,9 @@ zvol_request(struct request_queue *q)
|
||||||
zvol_dispatch(zvol_read, req);
|
zvol_dispatch(zvol_read, req);
|
||||||
break;
|
break;
|
||||||
case WRITE:
|
case WRITE:
|
||||||
if (unlikely(get_disk_ro(zv->zv_disk))) {
|
if (unlikely(get_disk_ro(zv->zv_disk)) ||
|
||||||
blk_end_request(req, -EROFS, size);
|
unlikely(zv->zv_mode & DS_MODE_READONLY)) {
|
||||||
|
__blk_end_request(req, -EROFS, size);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -652,7 +653,7 @@ zvol_request(struct request_queue *q)
|
||||||
default:
|
default:
|
||||||
printk(KERN_INFO "%s: unknown cmd: %d\n",
|
printk(KERN_INFO "%s: unknown cmd: %d\n",
|
||||||
req->rq_disk->disk_name, (int)rq_data_dir(req));
|
req->rq_disk->disk_name, (int)rq_data_dir(req));
|
||||||
blk_end_request(req, -EIO, size);
|
__blk_end_request(req, -EIO, size);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -94,7 +94,11 @@
|
||||||
%endif
|
%endif
|
||||||
%define kpkg kernel
|
%define kpkg kernel
|
||||||
%define kdevpkg kernel-devel
|
%define kdevpkg kernel-devel
|
||||||
%define kverpkg %{kver}
|
%if %{defined el6}
|
||||||
|
%define kverpkg %(echo %{kver} | %{__sed} -e 's/.%{_target_cpu}//g')
|
||||||
|
%else
|
||||||
|
%define kverpkg %{kver}
|
||||||
|
%endif
|
||||||
%define koppkg =
|
%define koppkg =
|
||||||
%if %{undefined kdir}
|
%if %{undefined kdir}
|
||||||
%define kdir %{_usrsrc}/kernels/%{kver}-%{_target_cpu}
|
%define kdir %{_usrsrc}/kernels/%{kver}-%{_target_cpu}
|
||||||
|
|
Loading…
Reference in New Issue