From 0b1e6fcc3e7ec110dd282a002e28d98bcd8e08ad Mon Sep 17 00:00:00 2001 From: Brian Atkinson Date: Wed, 20 Jan 2021 22:27:30 -0700 Subject: [PATCH] Extending FreeBSD UIO Struct In FreeBSD the struct uio was just a typedef to uio_t. In order to extend this struct, outside of the definition for the struct uio, the struct uio has been embedded inside of a uio_t struct. Also renamed all the uio_* interfaces to be zfs_uio_* to make it clear this is a ZFS interface. Reviewed-by: Ryan Moeller Reviewed-by: Jorgen Lundman Reviewed-by: Brian Behlendorf Signed-off-by: Brian Atkinson Closes #11438 --- include/os/freebsd/spl/sys/uio.h | 79 +++++++++++++-------- include/os/freebsd/zfs/sys/freebsd_crypto.h | 2 +- include/os/freebsd/zfs/sys/zfs_znode_impl.h | 4 +- include/os/linux/spl/sys/uio.h | 60 +++++++++------- include/os/linux/zfs/sys/zfs_vnops_os.h | 2 +- include/os/linux/zfs/sys/zfs_znode_impl.h | 4 +- include/sys/crypto/common.h | 2 +- include/sys/dmu.h | 12 ++-- include/sys/sa.h | 2 +- include/sys/uio_impl.h | 8 +-- include/sys/zfs_sa.h | 2 +- include/sys/zfs_vnops.h | 8 +-- lib/libspl/include/sys/uio.h | 44 ++++++------ module/icp/algs/modes/modes.c | 12 ++-- module/icp/core/kcf_prov_lib.c | 32 ++++----- module/icp/io/sha1_mod.c | 48 ++++++------- module/icp/io/sha2_mod.c | 48 ++++++------- module/icp/io/skein_mod.c | 40 +++++------ module/os/freebsd/spl/spl_uio.c | 38 ++++++---- module/os/freebsd/zfs/crypto_os.c | 26 +++---- module/os/freebsd/zfs/zfs_ctldir.c | 40 ++++++----- module/os/freebsd/zfs/zfs_file_os.c | 2 +- module/os/freebsd/zfs/zfs_vnops_os.c | 79 ++++++++++++--------- module/os/freebsd/zfs/zio_crypt.c | 77 +++++++++++--------- module/os/freebsd/zfs/zvol_os.c | 44 +++++++----- module/os/linux/zfs/zfs_uio.c | 43 +++++------ module/os/linux/zfs/zfs_vnops_os.c | 14 ++-- module/os/linux/zfs/zio_crypt.c | 30 ++++---- module/os/linux/zfs/zpl_file.c | 18 ++--- module/os/linux/zfs/zpl_inode.c | 4 +- module/os/linux/zfs/zpl_xattr.c | 6 +- module/os/linux/zfs/zvol_os.c | 8 +-- module/zfs/dmu.c | 52 ++++++-------- module/zfs/sa.c | 6 +- module/zfs/zfs_sa.c | 11 +-- module/zfs/zfs_vnops.c | 67 ++++++++--------- tests/zfs-tests/cmd/mmapwrite/mmapwrite.c | 4 +- 37 files changed, 521 insertions(+), 457 deletions(-) diff --git a/include/os/freebsd/spl/sys/uio.h b/include/os/freebsd/spl/sys/uio.h index 11b2189cda..f1d30195f0 100644 --- a/include/os/freebsd/spl/sys/uio.h +++ b/include/os/freebsd/spl/sys/uio.h @@ -35,55 +35,72 @@ #include #include - - -#define uio_loffset uio_offset - -typedef struct uio uio_t; typedef struct iovec iovec_t; -typedef enum uio_seg uio_seg_t; +typedef enum uio_seg zfs_uio_seg_t; +typedef enum uio_rw zfs_uio_rw_t; + +typedef struct zfs_uio { + struct uio *uio; +} zfs_uio_t; + +#define GET_UIO_STRUCT(u) (u)->uio +#define zfs_uio_segflg(u) GET_UIO_STRUCT(u)->uio_segflg +#define zfs_uio_offset(u) GET_UIO_STRUCT(u)->uio_offset +#define zfs_uio_resid(u) GET_UIO_STRUCT(u)->uio_resid +#define zfs_uio_iovcnt(u) GET_UIO_STRUCT(u)->uio_iovcnt +#define zfs_uio_iovlen(u, idx) GET_UIO_STRUCT(u)->uio_iov[(idx)].iov_len +#define zfs_uio_iovbase(u, idx) GET_UIO_STRUCT(u)->uio_iov[(idx)].iov_base +#define zfs_uio_td(u) GET_UIO_STRUCT(u)->uio_td +#define zfs_uio_rw(u) GET_UIO_STRUCT(u)->uio_rw +#define zfs_uio_fault_disable(u, set) +#define zfs_uio_prefaultpages(size, u) (0) + + +static __inline void +zfs_uio_init(zfs_uio_t *uio, struct uio *uio_s) +{ + GET_UIO_STRUCT(uio) = uio_s; +} + +static __inline void +zfs_uio_setoffset(zfs_uio_t *uio, offset_t off) +{ + zfs_uio_offset(uio) = off; +} static __inline int -zfs_uiomove(void *cp, size_t n, enum uio_rw dir, uio_t *uio) +zfs_uiomove(void *cp, size_t n, zfs_uio_rw_t dir, zfs_uio_t *uio) { - - ASSERT(uio->uio_rw == dir); - return (uiomove(cp, (int)n, uio)); + ASSERT(zfs_uio_rw(uio) == dir); + return (uiomove(cp, (int)n, GET_UIO_STRUCT(uio))); } -#define uiomove(cp, n, dir, uio) zfs_uiomove((cp), (n), (dir), (uio)) -int uiocopy(void *p, size_t n, enum uio_rw rw, struct uio *uio, size_t *cbytes); -void uioskip(uio_t *uiop, size_t n); - -#define uio_segflg(uio) (uio)->uio_segflg -#define uio_offset(uio) (uio)->uio_loffset -#define uio_resid(uio) (uio)->uio_resid -#define uio_iovcnt(uio) (uio)->uio_iovcnt -#define uio_iovlen(uio, idx) (uio)->uio_iov[(idx)].iov_len -#define uio_iovbase(uio, idx) (uio)->uio_iov[(idx)].iov_base -#define uio_fault_disable(uio, set) -#define uio_prefaultpages(size, uio) (0) +int zfs_uiocopy(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio, + size_t *cbytes); +void zfs_uioskip(zfs_uio_t *uiop, size_t n); +int zfs_uio_fault_move(void *p, size_t n, zfs_uio_rw_t dir, zfs_uio_t *uio); static inline void -uio_iov_at_index(uio_t *uio, uint_t idx, void **base, uint64_t *len) +zfs_uio_iov_at_index(zfs_uio_t *uio, uint_t idx, void **base, uint64_t *len) { - *base = uio_iovbase(uio, idx); - *len = uio_iovlen(uio, idx); + *base = zfs_uio_iovbase(uio, idx); + *len = zfs_uio_iovlen(uio, idx); } static inline void -uio_advance(uio_t *uio, size_t size) +zfs_uio_advance(zfs_uio_t *uio, size_t size) { - uio->uio_resid -= size; - uio->uio_loffset += size; + zfs_uio_resid(uio) -= size; + zfs_uio_offset(uio) += size; } static inline offset_t -uio_index_at_offset(uio_t *uio, offset_t off, uint_t *vec_idx) +zfs_uio_index_at_offset(zfs_uio_t *uio, offset_t off, uint_t *vec_idx) { *vec_idx = 0; - while (*vec_idx < uio_iovcnt(uio) && off >= uio_iovlen(uio, *vec_idx)) { - off -= uio_iovlen(uio, *vec_idx); + while (*vec_idx < zfs_uio_iovcnt(uio) && + off >= zfs_uio_iovlen(uio, *vec_idx)) { + off -= zfs_uio_iovlen(uio, *vec_idx); (*vec_idx)++; } diff --git a/include/os/freebsd/zfs/sys/freebsd_crypto.h b/include/os/freebsd/zfs/sys/freebsd_crypto.h index 08e058d6af..e240f5b0dd 100644 --- a/include/os/freebsd/zfs/sys/freebsd_crypto.h +++ b/include/os/freebsd/zfs/sys/freebsd_crypto.h @@ -92,7 +92,7 @@ int freebsd_crypt_newsession(freebsd_crypt_session_t *sessp, void freebsd_crypt_freesession(freebsd_crypt_session_t *sessp); int freebsd_crypt_uio(boolean_t, freebsd_crypt_session_t *, - struct zio_crypt_info *, uio_t *, crypto_key_t *, uint8_t *, + struct zio_crypt_info *, zfs_uio_t *, crypto_key_t *, uint8_t *, size_t, size_t); #endif /* _ZFS_FREEBSD_CRYPTO_H */ diff --git a/include/os/freebsd/zfs/sys/zfs_znode_impl.h b/include/os/freebsd/zfs/sys/zfs_znode_impl.h index ac2625d9a8..a802d1d73c 100644 --- a/include/os/freebsd/zfs/sys/zfs_znode_impl.h +++ b/include/os/freebsd/zfs/sys/zfs_znode_impl.h @@ -40,6 +40,7 @@ #include #include #include +#include #ifdef __cplusplus extern "C" { @@ -117,7 +118,8 @@ extern minor_t zfsdev_minor_alloc(void); #define Z_ISDIR(type) ((type) == VDIR) #define zn_has_cached_data(zp) vn_has_cached_data(ZTOV(zp)) -#define zn_rlimit_fsize(zp, uio, td) vn_rlimit_fsize(ZTOV(zp), (uio), (td)) +#define zn_rlimit_fsize(zp, uio) \ + vn_rlimit_fsize(ZTOV(zp), GET_UIO_STRUCT(uio), zfs_uio_td(uio)) /* Called on entry to each ZFS vnode and vfs operation */ #define ZFS_ENTER(zfsvfs) \ diff --git a/include/os/linux/spl/sys/uio.h b/include/os/linux/spl/sys/uio.h index 6e850c5fe7..cafb6805f6 100644 --- a/include/os/linux/spl/sys/uio.h +++ b/include/os/linux/spl/sys/uio.h @@ -36,21 +36,21 @@ typedef struct iovec iovec_t; -typedef enum uio_rw { +typedef enum zfs_uio_rw { UIO_READ = 0, UIO_WRITE = 1, -} uio_rw_t; +} zfs_uio_rw_t; -typedef enum uio_seg { +typedef enum zfs_uio_seg { UIO_USERSPACE = 0, UIO_SYSSPACE = 1, UIO_BVEC = 2, #if defined(HAVE_VFS_IOV_ITER) UIO_ITER = 3, #endif -} uio_seg_t; +} zfs_uio_seg_t; -typedef struct uio { +typedef struct zfs_uio { union { const struct iovec *uio_iov; const struct bio_vec *uio_bvec; @@ -60,42 +60,51 @@ typedef struct uio { }; int uio_iovcnt; offset_t uio_loffset; - uio_seg_t uio_segflg; + zfs_uio_seg_t uio_segflg; boolean_t uio_fault_disable; uint16_t uio_fmode; uint16_t uio_extflg; ssize_t uio_resid; size_t uio_skip; -} uio_t; +} zfs_uio_t; -#define uio_segflg(uio) (uio)->uio_segflg -#define uio_offset(uio) (uio)->uio_loffset -#define uio_resid(uio) (uio)->uio_resid -#define uio_iovcnt(uio) (uio)->uio_iovcnt -#define uio_iovlen(uio, idx) (uio)->uio_iov[(idx)].iov_len -#define uio_iovbase(uio, idx) (uio)->uio_iov[(idx)].iov_base -#define uio_fault_disable(uio, set) (uio)->uio_fault_disable = set +#define zfs_uio_segflg(u) (u)->uio_segflg +#define zfs_uio_offset(u) (u)->uio_loffset +#define zfs_uio_resid(u) (u)->uio_resid +#define zfs_uio_iovcnt(u) (u)->uio_iovcnt +#define zfs_uio_iovlen(u, idx) (u)->uio_iov[(idx)].iov_len +#define zfs_uio_iovbase(u, idx) (u)->uio_iov[(idx)].iov_base +#define zfs_uio_fault_disable(u, set) (u)->uio_fault_disable = set +#define zfs_uio_rlimit_fsize(z, u) (0) +#define zfs_uio_fault_move(p, n, rw, u) zfs_uiomove((p), (n), (rw), (u)) static inline void -uio_iov_at_index(uio_t *uio, uint_t idx, void **base, uint64_t *len) +zfs_uio_setoffset(zfs_uio_t *uio, offset_t off) { - *base = uio_iovbase(uio, idx); - *len = uio_iovlen(uio, idx); + uio->uio_loffset = off; } static inline void -uio_advance(uio_t *uio, size_t size) +zfs_uio_iov_at_index(zfs_uio_t *uio, uint_t idx, void **base, uint64_t *len) +{ + *base = zfs_uio_iovbase(uio, idx); + *len = zfs_uio_iovlen(uio, idx); +} + +static inline void +zfs_uio_advance(zfs_uio_t *uio, size_t size) { uio->uio_resid -= size; uio->uio_loffset += size; } static inline offset_t -uio_index_at_offset(uio_t *uio, offset_t off, uint_t *vec_idx) +zfs_uio_index_at_offset(zfs_uio_t *uio, offset_t off, uint_t *vec_idx) { *vec_idx = 0; - while (*vec_idx < uio_iovcnt(uio) && off >= uio_iovlen(uio, *vec_idx)) { - off -= uio_iovlen(uio, *vec_idx); + while (*vec_idx < zfs_uio_iovcnt(uio) && + off >= zfs_uio_iovlen(uio, *vec_idx)) { + off -= zfs_uio_iovlen(uio, *vec_idx); (*vec_idx)++; } @@ -116,8 +125,9 @@ iov_iter_init_compat(struct iov_iter *iter, unsigned int dir, } static inline void -uio_iovec_init(uio_t *uio, const struct iovec *iov, unsigned long nr_segs, - offset_t offset, uio_seg_t seg, ssize_t resid, size_t skip) +zfs_uio_iovec_init(zfs_uio_t *uio, const struct iovec *iov, + unsigned long nr_segs, offset_t offset, zfs_uio_seg_t seg, ssize_t resid, + size_t skip) { ASSERT(seg == UIO_USERSPACE || seg == UIO_SYSSPACE); @@ -133,7 +143,7 @@ uio_iovec_init(uio_t *uio, const struct iovec *iov, unsigned long nr_segs, } static inline void -uio_bvec_init(uio_t *uio, struct bio *bio) +zfs_uio_bvec_init(zfs_uio_t *uio, struct bio *bio) { uio->uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)]; uio->uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio); @@ -148,7 +158,7 @@ uio_bvec_init(uio_t *uio, struct bio *bio) #if defined(HAVE_VFS_IOV_ITER) static inline void -uio_iov_iter_init(uio_t *uio, struct iov_iter *iter, offset_t offset, +zfs_uio_iov_iter_init(zfs_uio_t *uio, struct iov_iter *iter, offset_t offset, ssize_t resid, size_t skip) { uio->uio_iter = iter; diff --git a/include/os/linux/zfs/sys/zfs_vnops_os.h b/include/os/linux/zfs/sys/zfs_vnops_os.h index d3b80753d1..47f91e4a6c 100644 --- a/include/os/linux/zfs/sys/zfs_vnops_os.h +++ b/include/os/linux/zfs/sys/zfs_vnops_os.h @@ -61,7 +61,7 @@ extern int zfs_rename(znode_t *sdzp, char *snm, znode_t *tdzp, char *tnm, cred_t *cr, int flags); extern int zfs_symlink(znode_t *dzp, char *name, vattr_t *vap, char *link, znode_t **zpp, cred_t *cr, int flags); -extern int zfs_readlink(struct inode *ip, uio_t *uio, cred_t *cr); +extern int zfs_readlink(struct inode *ip, zfs_uio_t *uio, cred_t *cr); extern int zfs_link(znode_t *tdzp, znode_t *szp, char *name, cred_t *cr, int flags); extern void zfs_inactive(struct inode *ip); diff --git a/include/os/linux/zfs/sys/zfs_znode_impl.h b/include/os/linux/zfs/sys/zfs_znode_impl.h index e95aef6c88..0a8fe7c78e 100644 --- a/include/os/linux/zfs/sys/zfs_znode_impl.h +++ b/include/os/linux/zfs/sys/zfs_znode_impl.h @@ -70,8 +70,8 @@ extern "C" { #define Z_ISDEV(type) (S_ISCHR(type) || S_ISBLK(type) || S_ISFIFO(type)) #define Z_ISDIR(type) S_ISDIR(type) -#define zn_has_cached_data(zp) ((zp)->z_is_mapped) -#define zn_rlimit_fsize(zp, uio, td) (0) +#define zn_has_cached_data(zp) ((zp)->z_is_mapped) +#define zn_rlimit_fsize(zp, uio) (0) /* * zhold() wraps igrab() on Linux, and igrab() may fail when the diff --git a/include/sys/crypto/common.h b/include/sys/crypto/common.h index a4f9d9848c..9a239225cd 100644 --- a/include/sys/crypto/common.h +++ b/include/sys/crypto/common.h @@ -244,7 +244,7 @@ typedef struct crypto_data { iovec_t cdu_raw; /* Pointer and length */ /* uio scatter-gather format */ - uio_t *cdu_uio; + zfs_uio_t *cdu_uio; } cdu; /* Crypto Data Union */ } crypto_data_t; diff --git a/include/sys/dmu.h b/include/sys/dmu.h index 0c50d0409b..ad96b729f5 100644 --- a/include/sys/dmu.h +++ b/include/sys/dmu.h @@ -847,14 +847,14 @@ void dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, dmu_tx_t *tx); #ifdef _KERNEL -int dmu_read_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size); -int dmu_read_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size); -int dmu_read_uio_dnode(dnode_t *dn, struct uio *uio, uint64_t size); -int dmu_write_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size, +int dmu_read_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size); +int dmu_read_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size); +int dmu_read_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size); +int dmu_write_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size, dmu_tx_t *tx); -int dmu_write_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size, +int dmu_write_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size, dmu_tx_t *tx); -int dmu_write_uio_dnode(dnode_t *dn, struct uio *uio, uint64_t size, +int dmu_write_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size, dmu_tx_t *tx); #endif struct arc_buf *dmu_request_arcbuf(dmu_buf_t *handle, int size); diff --git a/include/sys/sa.h b/include/sys/sa.h index 432e0bc415..98eb8f9cd7 100644 --- a/include/sys/sa.h +++ b/include/sys/sa.h @@ -158,7 +158,7 @@ void sa_handle_lock(sa_handle_t *); void sa_handle_unlock(sa_handle_t *); #ifdef _KERNEL -int sa_lookup_uio(sa_handle_t *, sa_attr_type_t, uio_t *); +int sa_lookup_uio(sa_handle_t *, sa_attr_type_t, zfs_uio_t *); int sa_add_projid(sa_handle_t *, dmu_tx_t *, uint64_t); #endif diff --git a/include/sys/uio_impl.h b/include/sys/uio_impl.h index cfef0b95db..be70cea548 100644 --- a/include/sys/uio_impl.h +++ b/include/sys/uio_impl.h @@ -41,9 +41,9 @@ #include -extern int uiomove(void *, size_t, enum uio_rw, uio_t *); -extern int uio_prefaultpages(ssize_t, uio_t *); -extern int uiocopy(void *, size_t, enum uio_rw, uio_t *, size_t *); -extern void uioskip(uio_t *, size_t); +extern int zfs_uiomove(void *, size_t, zfs_uio_rw_t, zfs_uio_t *); +extern int zfs_uio_prefaultpages(ssize_t, zfs_uio_t *); +extern int zfs_uiocopy(void *, size_t, zfs_uio_rw_t, zfs_uio_t *, size_t *); +extern void zfs_uioskip(zfs_uio_t *, size_t); #endif /* _SYS_UIO_IMPL_H */ diff --git a/include/sys/zfs_sa.h b/include/sys/zfs_sa.h index 4e6d28638e..1ca7ced331 100644 --- a/include/sys/zfs_sa.h +++ b/include/sys/zfs_sa.h @@ -134,7 +134,7 @@ typedef struct znode_phys { #define DXATTR_MAX_ENTRY_SIZE (32768) #define DXATTR_MAX_SA_SIZE (SPA_OLD_MAXBLOCKSIZE >> 1) -int zfs_sa_readlink(struct znode *, uio_t *); +int zfs_sa_readlink(struct znode *, zfs_uio_t *); void zfs_sa_symlink(struct znode *, char *link, int len, dmu_tx_t *); void zfs_sa_get_scanstamp(struct znode *, xvattr_t *); void zfs_sa_set_scanstamp(struct znode *, xvattr_t *, dmu_tx_t *); diff --git a/include/sys/zfs_vnops.h b/include/sys/zfs_vnops.h index 6bf077b4bf..18259f0dc9 100644 --- a/include/sys/zfs_vnops.h +++ b/include/sys/zfs_vnops.h @@ -27,16 +27,16 @@ #include extern int zfs_fsync(znode_t *, int, cred_t *); -extern int zfs_read(znode_t *, uio_t *, int, cred_t *); -extern int zfs_write(znode_t *, uio_t *, int, cred_t *); +extern int zfs_read(znode_t *, zfs_uio_t *, int, cred_t *); +extern int zfs_write(znode_t *, zfs_uio_t *, int, cred_t *); extern int zfs_holey(znode_t *, ulong_t, loff_t *); extern int zfs_access(znode_t *, int, int, cred_t *); extern int zfs_getsecattr(znode_t *, vsecattr_t *, int, cred_t *); extern int zfs_setsecattr(znode_t *, vsecattr_t *, int, cred_t *); -extern int mappedread(znode_t *, int, uio_t *); -extern int mappedread_sf(znode_t *, int, uio_t *); +extern int mappedread(znode_t *, int, zfs_uio_t *); +extern int mappedread_sf(znode_t *, int, zfs_uio_t *); extern void update_pages(znode_t *, int64_t, int, objset_t *); /* diff --git a/lib/libspl/include/sys/uio.h b/lib/libspl/include/sys/uio.h index 1d56b5b18b..81ade54b54 100644 --- a/lib/libspl/include/sys/uio.h +++ b/lib/libspl/include/sys/uio.h @@ -51,58 +51,58 @@ typedef struct iovec iovec_t; #if defined(__linux__) || defined(__APPLE__) -typedef enum uio_rw { +typedef enum zfs_uio_rw { UIO_READ = 0, UIO_WRITE = 1, -} uio_rw_t; +} zfs_uio_rw_t; -typedef enum uio_seg { +typedef enum zfs_uio_seg { UIO_USERSPACE = 0, UIO_SYSSPACE = 1, -} uio_seg_t; +} zfs_uio_seg_t; #elif defined(__FreeBSD__) -typedef enum uio_seg uio_seg_t; +typedef enum uio_seg zfs_uio_seg_t; #endif -typedef struct uio { +typedef struct zfs_uio { struct iovec *uio_iov; /* pointer to array of iovecs */ int uio_iovcnt; /* number of iovecs */ offset_t uio_loffset; /* file offset */ - uio_seg_t uio_segflg; /* address space (kernel or user) */ + zfs_uio_seg_t uio_segflg; /* address space (kernel or user) */ uint16_t uio_fmode; /* file mode flags */ uint16_t uio_extflg; /* extended flags */ ssize_t uio_resid; /* residual count */ -} uio_t; +} zfs_uio_t; -#define uio_segflg(uio) (uio)->uio_segflg -#define uio_offset(uio) (uio)->uio_loffset -#define uio_resid(uio) (uio)->uio_resid -#define uio_iovcnt(uio) (uio)->uio_iovcnt -#define uio_iovlen(uio, idx) (uio)->uio_iov[(idx)].iov_len -#define uio_iovbase(uio, idx) (uio)->uio_iov[(idx)].iov_base +#define zfs_uio_segflg(uio) (uio)->uio_segflg +#define zfs_uio_offset(uio) (uio)->uio_loffset +#define zfs_uio_resid(uio) (uio)->uio_resid +#define zfs_uio_iovcnt(uio) (uio)->uio_iovcnt +#define zfs_uio_iovlen(uio, idx) (uio)->uio_iov[(idx)].iov_len +#define zfs_uio_iovbase(uio, idx) (uio)->uio_iov[(idx)].iov_base static inline void -uio_iov_at_index(uio_t *uio, uint_t idx, void **base, uint64_t *len) +zfs_uio_iov_at_index(zfs_uio_t *uio, uint_t idx, void **base, uint64_t *len) { - *base = uio_iovbase(uio, idx); - *len = uio_iovlen(uio, idx); + *base = zfs_uio_iovbase(uio, idx); + *len = zfs_uio_iovlen(uio, idx); } static inline void -uio_advance(uio_t *uio, size_t size) +zfs_uio_advance(zfs_uio_t *uio, size_t size) { uio->uio_resid -= size; uio->uio_loffset += size; } static inline offset_t -uio_index_at_offset(uio_t *uio, offset_t off, uint_t *vec_idx) +zfs_uio_index_at_offset(zfs_uio_t *uio, offset_t off, uint_t *vec_idx) { *vec_idx = 0; - while (*vec_idx < (uint_t)uio_iovcnt(uio) && - off >= (offset_t)uio_iovlen(uio, *vec_idx)) { - off -= uio_iovlen(uio, *vec_idx); + while (*vec_idx < (uint_t)zfs_uio_iovcnt(uio) && + off >= (offset_t)zfs_uio_iovlen(uio, *vec_idx)) { + off -= zfs_uio_iovlen(uio, *vec_idx); (*vec_idx)++; } diff --git a/module/icp/algs/modes/modes.c b/module/icp/algs/modes/modes.c index faae9722bd..59743c7d68 100644 --- a/module/icp/algs/modes/modes.c +++ b/module/icp/algs/modes/modes.c @@ -43,11 +43,11 @@ crypto_init_ptrs(crypto_data_t *out, void **iov_or_mp, offset_t *current_offset) break; case CRYPTO_DATA_UIO: { - uio_t *uiop = out->cd_uio; + zfs_uio_t *uiop = out->cd_uio; uint_t vec_idx; offset = out->cd_offset; - offset = uio_index_at_offset(uiop, offset, &vec_idx); + offset = zfs_uio_index_at_offset(uiop, offset, &vec_idx); *current_offset = offset; *iov_or_mp = (void *)(uintptr_t)vec_idx; @@ -85,7 +85,7 @@ crypto_get_ptrs(crypto_data_t *out, void **iov_or_mp, offset_t *current_offset, } case CRYPTO_DATA_UIO: { - uio_t *uio = out->cd_uio; + zfs_uio_t *uio = out->cd_uio; offset_t offset; uint_t vec_idx; uint8_t *p; @@ -94,7 +94,7 @@ crypto_get_ptrs(crypto_data_t *out, void **iov_or_mp, offset_t *current_offset, offset = *current_offset; vec_idx = (uintptr_t)(*iov_or_mp); - uio_iov_at_index(uio, vec_idx, &iov_base, &iov_len); + zfs_uio_iov_at_index(uio, vec_idx, &iov_base, &iov_len); p = (uint8_t *)iov_base + offset; *out_data_1 = p; @@ -106,10 +106,10 @@ crypto_get_ptrs(crypto_data_t *out, void **iov_or_mp, offset_t *current_offset, } else { /* one block spans two iovecs */ *out_data_1_len = iov_len - offset; - if (vec_idx == uio_iovcnt(uio)) + if (vec_idx == zfs_uio_iovcnt(uio)) return; vec_idx++; - uio_iov_at_index(uio, vec_idx, &iov_base, &iov_len); + zfs_uio_iov_at_index(uio, vec_idx, &iov_base, &iov_len); *out_data_2 = (uint8_t *)iov_base; *current_offset = amt - *out_data_1_len; } diff --git a/module/icp/core/kcf_prov_lib.c b/module/icp/core/kcf_prov_lib.c index 905ef66573..1b115d9762 100644 --- a/module/icp/core/kcf_prov_lib.c +++ b/module/icp/core/kcf_prov_lib.c @@ -40,7 +40,7 @@ int crypto_uio_data(crypto_data_t *data, uchar_t *buf, int len, cmd_type_t cmd, void *digest_ctx, void (*update)(void)) { - uio_t *uiop = data->cd_uio; + zfs_uio_t *uiop = data->cd_uio; off_t offset = data->cd_offset; size_t length = len; uint_t vec_idx; @@ -48,7 +48,7 @@ crypto_uio_data(crypto_data_t *data, uchar_t *buf, int len, cmd_type_t cmd, uchar_t *datap; ASSERT(data->cd_format == CRYPTO_DATA_UIO); - if (uio_segflg(uiop) != UIO_SYSSPACE) { + if (zfs_uio_segflg(uiop) != UIO_SYSSPACE) { return (CRYPTO_ARGUMENTS_BAD); } @@ -56,9 +56,9 @@ crypto_uio_data(crypto_data_t *data, uchar_t *buf, int len, cmd_type_t cmd, * Jump to the first iovec containing data to be * processed. */ - offset = uio_index_at_offset(uiop, offset, &vec_idx); + offset = zfs_uio_index_at_offset(uiop, offset, &vec_idx); - if (vec_idx == uio_iovcnt(uiop) && length > 0) { + if (vec_idx == zfs_uio_iovcnt(uiop) && length > 0) { /* * The caller specified an offset that is larger than * the total size of the buffers it provided. @@ -66,11 +66,11 @@ crypto_uio_data(crypto_data_t *data, uchar_t *buf, int len, cmd_type_t cmd, return (CRYPTO_DATA_LEN_RANGE); } - while (vec_idx < uio_iovcnt(uiop) && length > 0) { - cur_len = MIN(uio_iovlen(uiop, vec_idx) - + while (vec_idx < zfs_uio_iovcnt(uiop) && length > 0) { + cur_len = MIN(zfs_uio_iovlen(uiop, vec_idx) - offset, length); - datap = (uchar_t *)(uio_iovbase(uiop, vec_idx) + offset); + datap = (uchar_t *)(zfs_uio_iovbase(uiop, vec_idx) + offset); switch (cmd) { case COPY_FROM_DATA: bcopy(datap, buf, cur_len); @@ -97,7 +97,7 @@ crypto_uio_data(crypto_data_t *data, uchar_t *buf, int len, cmd_type_t cmd, offset = 0; } - if (vec_idx == uio_iovcnt(uiop) && length > 0) { + if (vec_idx == zfs_uio_iovcnt(uiop) && length > 0) { /* * The end of the specified iovec's was reached but * the length requested could not be processed. @@ -166,7 +166,7 @@ crypto_update_uio(void *ctx, crypto_data_t *input, crypto_data_t *output, void (*copy_block)(uint8_t *, uint64_t *)) { common_ctx_t *common_ctx = ctx; - uio_t *uiop = input->cd_uio; + zfs_uio_t *uiop = input->cd_uio; off_t offset = input->cd_offset; size_t length = input->cd_length; uint_t vec_idx; @@ -178,7 +178,7 @@ crypto_update_uio(void *ctx, crypto_data_t *input, crypto_data_t *output, &common_ctx->cc_iv[0]); } - if (uio_segflg(input->cd_uio) != UIO_SYSSPACE) { + if (zfs_uio_segflg(input->cd_uio) != UIO_SYSSPACE) { return (CRYPTO_ARGUMENTS_BAD); } @@ -186,8 +186,8 @@ crypto_update_uio(void *ctx, crypto_data_t *input, crypto_data_t *output, * Jump to the first iovec containing data to be * processed. */ - offset = uio_index_at_offset(uiop, offset, &vec_idx); - if (vec_idx == uio_iovcnt(uiop) && length > 0) { + offset = zfs_uio_index_at_offset(uiop, offset, &vec_idx); + if (vec_idx == zfs_uio_iovcnt(uiop) && length > 0) { /* * The caller specified an offset that is larger than the * total size of the buffers it provided. @@ -198,11 +198,11 @@ crypto_update_uio(void *ctx, crypto_data_t *input, crypto_data_t *output, /* * Now process the iovecs. */ - while (vec_idx < uio_iovcnt(uiop) && length > 0) { - cur_len = MIN(uio_iovlen(uiop, vec_idx) - + while (vec_idx < zfs_uio_iovcnt(uiop) && length > 0) { + cur_len = MIN(zfs_uio_iovlen(uiop, vec_idx) - offset, length); - int rv = (cipher)(ctx, uio_iovbase(uiop, vec_idx) + offset, + int rv = (cipher)(ctx, zfs_uio_iovbase(uiop, vec_idx) + offset, cur_len, output); if (rv != CRYPTO_SUCCESS) { @@ -213,7 +213,7 @@ crypto_update_uio(void *ctx, crypto_data_t *input, crypto_data_t *output, offset = 0; } - if (vec_idx == uio_iovcnt(uiop) && length > 0) { + if (vec_idx == zfs_uio_iovcnt(uiop) && length > 0) { /* * The end of the specified iovec's was reached but * the length requested could not be processed, i.e. diff --git a/module/icp/io/sha1_mod.c b/module/icp/io/sha1_mod.c index ffae143cde..6dcee6b2ec 100644 --- a/module/icp/io/sha1_mod.c +++ b/module/icp/io/sha1_mod.c @@ -271,15 +271,15 @@ sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data) size_t cur_len; /* we support only kernel buffer */ - if (uio_segflg(data->cd_uio) != UIO_SYSSPACE) + if (zfs_uio_segflg(data->cd_uio) != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* * Jump to the first iovec containing data to be * digested. */ - offset = uio_index_at_offset(data->cd_uio, offset, &vec_idx); - if (vec_idx == uio_iovcnt(data->cd_uio)) { + offset = zfs_uio_index_at_offset(data->cd_uio, offset, &vec_idx); + if (vec_idx == zfs_uio_iovcnt(data->cd_uio)) { /* * The caller specified an offset that is larger than the * total size of the buffers it provided. @@ -290,12 +290,12 @@ sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data) /* * Now do the digesting on the iovecs. */ - while (vec_idx < uio_iovcnt(data->cd_uio) && length > 0) { - cur_len = MIN(uio_iovlen(data->cd_uio, vec_idx) - + while (vec_idx < zfs_uio_iovcnt(data->cd_uio) && length > 0) { + cur_len = MIN(zfs_uio_iovlen(data->cd_uio, vec_idx) - offset, length); SHA1Update(sha1_ctx, - (uint8_t *)uio_iovbase(data->cd_uio, vec_idx) + offset, + (uint8_t *)zfs_uio_iovbase(data->cd_uio, vec_idx) + offset, cur_len); length -= cur_len; @@ -303,7 +303,7 @@ sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data) offset = 0; } - if (vec_idx == uio_iovcnt(data->cd_uio) && length > 0) { + if (vec_idx == zfs_uio_iovcnt(data->cd_uio) && length > 0) { /* * The end of the specified iovec's was reached but * the length requested could not be processed, i.e. @@ -330,15 +330,15 @@ sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest, uint_t vec_idx = 0; /* we support only kernel buffer */ - if (uio_segflg(digest->cd_uio) != UIO_SYSSPACE) + if (zfs_uio_segflg(digest->cd_uio) != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* * Jump to the first iovec containing ptr to the digest to * be returned. */ - offset = uio_index_at_offset(digest->cd_uio, offset, &vec_idx); - if (vec_idx == uio_iovcnt(digest->cd_uio)) { + offset = zfs_uio_index_at_offset(digest->cd_uio, offset, &vec_idx); + if (vec_idx == zfs_uio_iovcnt(digest->cd_uio)) { /* * The caller specified an offset that is * larger than the total size of the buffers @@ -348,7 +348,7 @@ sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest, } if (offset + digest_len <= - uio_iovlen(digest->cd_uio, vec_idx)) { + zfs_uio_iovlen(digest->cd_uio, vec_idx)) { /* * The computed SHA1 digest will fit in the current * iovec. @@ -360,11 +360,11 @@ sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest, * the user only what was requested. */ SHA1Final(digest_scratch, sha1_ctx); - bcopy(digest_scratch, (uchar_t *)uio_iovbase(digest-> - cd_uio, vec_idx) + offset, + bcopy(digest_scratch, (uchar_t *) + zfs_uio_iovbase(digest->cd_uio, vec_idx) + offset, digest_len); } else { - SHA1Final((uchar_t *)uio_iovbase(digest-> + SHA1Final((uchar_t *)zfs_uio_iovbase(digest-> cd_uio, vec_idx) + offset, sha1_ctx); } @@ -382,11 +382,11 @@ sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest, SHA1Final(digest_tmp, sha1_ctx); - while (vec_idx < uio_iovcnt(digest->cd_uio) && length > 0) { - cur_len = MIN(uio_iovlen(digest->cd_uio, vec_idx) - + while (vec_idx < zfs_uio_iovcnt(digest->cd_uio) && length > 0) { + cur_len = MIN(zfs_uio_iovlen(digest->cd_uio, vec_idx) - offset, length); bcopy(digest_tmp + scratch_offset, - uio_iovbase(digest->cd_uio, vec_idx) + offset, + zfs_uio_iovbase(digest->cd_uio, vec_idx) + offset, cur_len); length -= cur_len; @@ -395,7 +395,7 @@ sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest, offset = 0; } - if (vec_idx == uio_iovcnt(digest->cd_uio) && length > 0) { + if (vec_idx == zfs_uio_iovcnt(digest->cd_uio) && length > 0) { /* * The end of the specified iovec's was reached but * the length requested could not be processed, i.e. @@ -1096,12 +1096,12 @@ sha1_mac_verify_atomic(crypto_provider_handle_t provider, size_t cur_len; /* we support only kernel buffer */ - if (uio_segflg(mac->cd_uio) != UIO_SYSSPACE) + if (zfs_uio_segflg(mac->cd_uio) != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* jump to the first iovec containing the expected digest */ - offset = uio_index_at_offset(mac->cd_uio, offset, &vec_idx); - if (vec_idx == uio_iovcnt(mac->cd_uio)) { + offset = zfs_uio_index_at_offset(mac->cd_uio, offset, &vec_idx); + if (vec_idx == zfs_uio_iovcnt(mac->cd_uio)) { /* * The caller specified an offset that is * larger than the total size of the buffers @@ -1112,12 +1112,12 @@ sha1_mac_verify_atomic(crypto_provider_handle_t provider, } /* do the comparison of computed digest vs specified one */ - while (vec_idx < uio_iovcnt(mac->cd_uio) && length > 0) { - cur_len = MIN(uio_iovlen(mac->cd_uio, vec_idx) - + while (vec_idx < zfs_uio_iovcnt(mac->cd_uio) && length > 0) { + cur_len = MIN(zfs_uio_iovlen(mac->cd_uio, vec_idx) - offset, length); if (bcmp(digest + scratch_offset, - uio_iovbase(mac->cd_uio, vec_idx) + offset, + zfs_uio_iovbase(mac->cd_uio, vec_idx) + offset, cur_len) != 0) { ret = CRYPTO_INVALID_MAC; break; diff --git a/module/icp/io/sha2_mod.c b/module/icp/io/sha2_mod.c index a4a5c6041d..d690cd0bcb 100644 --- a/module/icp/io/sha2_mod.c +++ b/module/icp/io/sha2_mod.c @@ -296,15 +296,15 @@ sha2_digest_update_uio(SHA2_CTX *sha2_ctx, crypto_data_t *data) size_t cur_len; /* we support only kernel buffer */ - if (uio_segflg(data->cd_uio) != UIO_SYSSPACE) + if (zfs_uio_segflg(data->cd_uio) != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* * Jump to the first iovec containing data to be * digested. */ - offset = uio_index_at_offset(data->cd_uio, offset, &vec_idx); - if (vec_idx == uio_iovcnt(data->cd_uio)) { + offset = zfs_uio_index_at_offset(data->cd_uio, offset, &vec_idx); + if (vec_idx == zfs_uio_iovcnt(data->cd_uio)) { /* * The caller specified an offset that is larger than the * total size of the buffers it provided. @@ -315,18 +315,18 @@ sha2_digest_update_uio(SHA2_CTX *sha2_ctx, crypto_data_t *data) /* * Now do the digesting on the iovecs. */ - while (vec_idx < uio_iovcnt(data->cd_uio) && length > 0) { - cur_len = MIN(uio_iovlen(data->cd_uio, vec_idx) - + while (vec_idx < zfs_uio_iovcnt(data->cd_uio) && length > 0) { + cur_len = MIN(zfs_uio_iovlen(data->cd_uio, vec_idx) - offset, length); - SHA2Update(sha2_ctx, (uint8_t *)uio_iovbase(data->cd_uio, + SHA2Update(sha2_ctx, (uint8_t *)zfs_uio_iovbase(data->cd_uio, vec_idx) + offset, cur_len); length -= cur_len; vec_idx++; offset = 0; } - if (vec_idx == uio_iovcnt(data->cd_uio) && length > 0) { + if (vec_idx == zfs_uio_iovcnt(data->cd_uio) && length > 0) { /* * The end of the specified iovec's was reached but * the length requested could not be processed, i.e. @@ -353,15 +353,15 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest, uint_t vec_idx = 0; /* we support only kernel buffer */ - if (uio_segflg(digest->cd_uio) != UIO_SYSSPACE) + if (zfs_uio_segflg(digest->cd_uio) != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* * Jump to the first iovec containing ptr to the digest to * be returned. */ - offset = uio_index_at_offset(digest->cd_uio, offset, &vec_idx); - if (vec_idx == uio_iovcnt(digest->cd_uio)) { + offset = zfs_uio_index_at_offset(digest->cd_uio, offset, &vec_idx); + if (vec_idx == zfs_uio_iovcnt(digest->cd_uio)) { /* * The caller specified an offset that is * larger than the total size of the buffers @@ -371,7 +371,7 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest, } if (offset + digest_len <= - uio_iovlen(digest->cd_uio, vec_idx)) { + zfs_uio_iovlen(digest->cd_uio, vec_idx)) { /* * The computed SHA2 digest will fit in the current * iovec. @@ -387,11 +387,11 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest, */ SHA2Final(digest_scratch, sha2_ctx); - bcopy(digest_scratch, (uchar_t *)uio_iovbase(digest-> - cd_uio, vec_idx) + offset, + bcopy(digest_scratch, (uchar_t *) + zfs_uio_iovbase(digest->cd_uio, vec_idx) + offset, digest_len); } else { - SHA2Final((uchar_t *)uio_iovbase(digest-> + SHA2Final((uchar_t *)zfs_uio_iovbase(digest-> cd_uio, vec_idx) + offset, sha2_ctx); @@ -410,12 +410,12 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest, SHA2Final(digest_tmp, sha2_ctx); - while (vec_idx < uio_iovcnt(digest->cd_uio) && length > 0) { + while (vec_idx < zfs_uio_iovcnt(digest->cd_uio) && length > 0) { cur_len = - MIN(uio_iovlen(digest->cd_uio, vec_idx) - + MIN(zfs_uio_iovlen(digest->cd_uio, vec_idx) - offset, length); bcopy(digest_tmp + scratch_offset, - uio_iovbase(digest->cd_uio, vec_idx) + offset, + zfs_uio_iovbase(digest->cd_uio, vec_idx) + offset, cur_len); length -= cur_len; @@ -424,7 +424,7 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest, offset = 0; } - if (vec_idx == uio_iovcnt(digest->cd_uio) && length > 0) { + if (vec_idx == zfs_uio_iovcnt(digest->cd_uio) && length > 0) { /* * The end of the specified iovec's was reached but * the length requested could not be processed, i.e. @@ -1251,12 +1251,12 @@ sha2_mac_verify_atomic(crypto_provider_handle_t provider, size_t cur_len; /* we support only kernel buffer */ - if (uio_segflg(mac->cd_uio) != UIO_SYSSPACE) + if (zfs_uio_segflg(mac->cd_uio) != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* jump to the first iovec containing the expected digest */ - offset = uio_index_at_offset(mac->cd_uio, offset, &vec_idx); - if (vec_idx == uio_iovcnt(mac->cd_uio)) { + offset = zfs_uio_index_at_offset(mac->cd_uio, offset, &vec_idx); + if (vec_idx == zfs_uio_iovcnt(mac->cd_uio)) { /* * The caller specified an offset that is * larger than the total size of the buffers @@ -1267,12 +1267,12 @@ sha2_mac_verify_atomic(crypto_provider_handle_t provider, } /* do the comparison of computed digest vs specified one */ - while (vec_idx < uio_iovcnt(mac->cd_uio) && length > 0) { - cur_len = MIN(uio_iovlen(mac->cd_uio, vec_idx) - + while (vec_idx < zfs_uio_iovcnt(mac->cd_uio) && length > 0) { + cur_len = MIN(zfs_uio_iovlen(mac->cd_uio, vec_idx) - offset, length); if (bcmp(digest + scratch_offset, - uio_iovbase(mac->cd_uio, vec_idx) + offset, + zfs_uio_iovbase(mac->cd_uio, vec_idx) + offset, cur_len) != 0) { ret = CRYPTO_INVALID_MAC; break; diff --git a/module/icp/io/skein_mod.c b/module/icp/io/skein_mod.c index 18026807fd..5ee36af12b 100644 --- a/module/icp/io/skein_mod.c +++ b/module/icp/io/skein_mod.c @@ -272,18 +272,18 @@ skein_digest_update_uio(skein_ctx_t *ctx, const crypto_data_t *data) size_t length = data->cd_length; uint_t vec_idx = 0; size_t cur_len; - uio_t *uio = data->cd_uio; + zfs_uio_t *uio = data->cd_uio; /* we support only kernel buffer */ - if (uio_segflg(uio) != UIO_SYSSPACE) + if (zfs_uio_segflg(uio) != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* * Jump to the first iovec containing data to be * digested. */ - offset = uio_index_at_offset(uio, offset, &vec_idx); - if (vec_idx == uio_iovcnt(uio)) { + offset = zfs_uio_index_at_offset(uio, offset, &vec_idx); + if (vec_idx == zfs_uio_iovcnt(uio)) { /* * The caller specified an offset that is larger than the * total size of the buffers it provided. @@ -294,16 +294,16 @@ skein_digest_update_uio(skein_ctx_t *ctx, const crypto_data_t *data) /* * Now do the digesting on the iovecs. */ - while (vec_idx < uio_iovcnt(uio) && length > 0) { - cur_len = MIN(uio_iovlen(uio, vec_idx) - offset, length); - SKEIN_OP(ctx, Update, (uint8_t *)uio_iovbase(uio, vec_idx) + while (vec_idx < zfs_uio_iovcnt(uio) && length > 0) { + cur_len = MIN(zfs_uio_iovlen(uio, vec_idx) - offset, length); + SKEIN_OP(ctx, Update, (uint8_t *)zfs_uio_iovbase(uio, vec_idx) + offset, cur_len); length -= cur_len; vec_idx++; offset = 0; } - if (vec_idx == uio_iovcnt(uio) && length > 0) { + if (vec_idx == zfs_uio_iovcnt(uio) && length > 0) { /* * The end of the specified iovec's was reached but * the length requested could not be processed, i.e. @@ -322,19 +322,19 @@ static int skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest, crypto_req_handle_t req) { - off_t offset = digest->cd_offset; - uint_t vec_idx = 0; - uio_t *uio = digest->cd_uio; + off_t offset = digest->cd_offset; + uint_t vec_idx = 0; + zfs_uio_t *uio = digest->cd_uio; /* we support only kernel buffer */ - if (uio_segflg(uio) != UIO_SYSSPACE) + if (zfs_uio_segflg(uio) != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* * Jump to the first iovec containing ptr to the digest to be returned. */ - offset = uio_index_at_offset(uio, offset, &vec_idx); - if (vec_idx == uio_iovcnt(uio)) { + offset = zfs_uio_index_at_offset(uio, offset, &vec_idx); + if (vec_idx == zfs_uio_iovcnt(uio)) { /* * The caller specified an offset that is larger than the * total size of the buffers it provided. @@ -342,10 +342,10 @@ skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest, return (CRYPTO_DATA_LEN_RANGE); } if (offset + CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen) <= - uio_iovlen(uio, vec_idx)) { + zfs_uio_iovlen(uio, vec_idx)) { /* The computed digest will fit in the current iovec. */ SKEIN_OP(ctx, Final, - (uchar_t *)uio_iovbase(uio, vec_idx) + offset); + (uchar_t *)zfs_uio_iovbase(uio, vec_idx) + offset); } else { uint8_t *digest_tmp; off_t scratch_offset = 0; @@ -357,11 +357,11 @@ skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest, if (digest_tmp == NULL) return (CRYPTO_HOST_MEMORY); SKEIN_OP(ctx, Final, digest_tmp); - while (vec_idx < uio_iovcnt(uio) && length > 0) { - cur_len = MIN(uio_iovlen(uio, vec_idx) - offset, + while (vec_idx < zfs_uio_iovcnt(uio) && length > 0) { + cur_len = MIN(zfs_uio_iovlen(uio, vec_idx) - offset, length); bcopy(digest_tmp + scratch_offset, - uio_iovbase(uio, vec_idx) + offset, cur_len); + zfs_uio_iovbase(uio, vec_idx) + offset, cur_len); length -= cur_len; vec_idx++; @@ -370,7 +370,7 @@ skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest, } kmem_free(digest_tmp, CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen)); - if (vec_idx == uio_iovcnt(uio) && length > 0) { + if (vec_idx == zfs_uio_iovcnt(uio) && length > 0) { /* * The end of the specified iovec's was reached but * the length requested could not be processed, i.e. diff --git a/module/os/freebsd/spl/spl_uio.c b/module/os/freebsd/spl/spl_uio.c index c6b6103947..f5f3524f7b 100644 --- a/module/os/freebsd/spl/spl_uio.c +++ b/module/os/freebsd/spl/spl_uio.c @@ -43,31 +43,32 @@ #include #include #include +#include /* - * same as uiomove() but doesn't modify uio structure. + * same as zfs_uiomove() but doesn't modify uio structure. * return in cbytes how many bytes were copied. */ int -uiocopy(void *p, size_t n, enum uio_rw rw, struct uio *uio, size_t *cbytes) +zfs_uiocopy(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio, size_t *cbytes) { struct iovec small_iovec[1]; struct uio small_uio_clone; struct uio *uio_clone; int error; - ASSERT3U(uio->uio_rw, ==, rw); - if (uio->uio_iovcnt == 1) { - small_uio_clone = *uio; - small_iovec[0] = *uio->uio_iov; + ASSERT3U(zfs_uio_rw(uio), ==, rw); + if (zfs_uio_iovcnt(uio) == 1) { + small_uio_clone = *(GET_UIO_STRUCT(uio)); + small_iovec[0] = *(GET_UIO_STRUCT(uio)->uio_iov); small_uio_clone.uio_iov = small_iovec; uio_clone = &small_uio_clone; } else { - uio_clone = cloneuio(uio); + uio_clone = cloneuio(GET_UIO_STRUCT(uio)); } error = vn_io_fault_uiomove(p, n, uio_clone); - *cbytes = uio->uio_resid - uio_clone->uio_resid; + *cbytes = zfs_uio_resid(uio) - uio_clone->uio_resid; if (uio_clone != &small_uio_clone) free(uio_clone, M_IOV); return (error); @@ -77,16 +78,23 @@ uiocopy(void *p, size_t n, enum uio_rw rw, struct uio *uio, size_t *cbytes) * Drop the next n chars out of *uiop. */ void -uioskip(uio_t *uio, size_t n) +zfs_uioskip(zfs_uio_t *uio, size_t n) { - enum uio_seg segflg; + zfs_uio_seg_t segflg; /* For the full compatibility with illumos. */ - if (n > uio->uio_resid) + if (n > zfs_uio_resid(uio)) return; - segflg = uio->uio_segflg; - uio->uio_segflg = UIO_NOCOPY; - uiomove(NULL, n, uio->uio_rw, uio); - uio->uio_segflg = segflg; + segflg = zfs_uio_segflg(uio); + zfs_uio_segflg(uio) = UIO_NOCOPY; + zfs_uiomove(NULL, n, zfs_uio_rw(uio), uio); + zfs_uio_segflg(uio) = segflg; +} + +int +zfs_uio_fault_move(void *p, size_t n, zfs_uio_rw_t dir, zfs_uio_t *uio) +{ + ASSERT(zfs_uio_rw(uio) == dir); + return (vn_io_fault_uiomove(p, n, GET_UIO_STRUCT(uio))); } diff --git a/module/os/freebsd/zfs/crypto_os.c b/module/os/freebsd/zfs/crypto_os.c index a0223a64f8..6a67dbc9f6 100644 --- a/module/os/freebsd/zfs/crypto_os.c +++ b/module/os/freebsd/zfs/crypto_os.c @@ -199,7 +199,7 @@ static void freebsd_crypt_uio_debug_log(boolean_t encrypt, freebsd_crypt_session_t *input_sessionp, struct zio_crypt_info *c_info, - uio_t *data_uio, + zfs_uio_t *data_uio, crypto_key_t *key, uint8_t *ivbuf, size_t datalen, @@ -224,13 +224,13 @@ freebsd_crypt_uio_debug_log(boolean_t encrypt, printf("%02x ", b[i]); } printf("}\n"); - for (int i = 0; i < data_uio->uio_iovcnt; i++) { + for (int i = 0; i < zfs_uio_iovcnt(data_uio); i++) { printf("\tiovec #%d: <%p, %u>\n", i, - data_uio->uio_iov[i].iov_base, - (unsigned int)data_uio->uio_iov[i].iov_len); - total += data_uio->uio_iov[i].iov_len; + zfs_uio_iovbase(data_uio, i), + (unsigned int)zfs_uio_iovlen(data_uio, i)); + total += zfs_uio_iovlen(data_uio, i); } - data_uio->uio_resid = total; + zfs_uio_resid(data_uio) = total; #endif } /* @@ -323,7 +323,7 @@ int freebsd_crypt_uio(boolean_t encrypt, freebsd_crypt_session_t *input_sessionp, struct zio_crypt_info *c_info, - uio_t *data_uio, + zfs_uio_t *data_uio, crypto_key_t *key, uint8_t *ivbuf, size_t datalen, @@ -336,9 +336,9 @@ freebsd_crypt_uio(boolean_t encrypt, freebsd_crypt_uio_debug_log(encrypt, input_sessionp, c_info, data_uio, key, ivbuf, datalen, auth_len); - for (int i = 0; i < data_uio->uio_iovcnt; i++) - total += data_uio->uio_iov[i].iov_len; - data_uio->uio_resid = total; + for (int i = 0; i < zfs_uio_iovcnt(data_uio); i++) + total += zfs_uio_iovlen(data_uio, i); + zfs_uio_resid(data_uio) = total; if (input_sessionp == NULL) { session = kmem_zalloc(sizeof (*session), KM_SLEEP); error = freebsd_crypt_newsession(session, c_info, key); @@ -356,7 +356,7 @@ freebsd_crypt_uio(boolean_t encrypt, CRYPTO_OP_VERIFY_DIGEST; } crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_IV_SEPARATE; - crypto_use_uio(crp, data_uio); + crypto_use_uio(crp, GET_UIO_STRUCT(data_uio)); crp->crp_aad_start = 0; crp->crp_aad_length = auth_len; @@ -493,7 +493,7 @@ int freebsd_crypt_uio(boolean_t encrypt, freebsd_crypt_session_t *input_sessionp, struct zio_crypt_info *c_info, - uio_t *data_uio, + zfs_uio_t *data_uio, crypto_key_t *key, uint8_t *ivbuf, size_t datalen, @@ -577,7 +577,7 @@ freebsd_crypt_uio(boolean_t encrypt, crp->crp_session = session->fs_sid; crp->crp_ilen = auth_len + datalen; - crp->crp_buf = (void*)data_uio; + crp->crp_buf = (void*)GET_UIO_STRUCT(data_uio); crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIFSYNC; auth_desc->crd_skip = 0; diff --git a/module/os/freebsd/zfs/zfs_ctldir.c b/module/os/freebsd/zfs/zfs_ctldir.c index 587c648a02..3ab4502bbc 100644 --- a/module/os/freebsd/zfs/zfs_ctldir.c +++ b/module/os/freebsd/zfs/zfs_ctldir.c @@ -251,7 +251,7 @@ sfs_reclaim_vnode(vnode_t *vp) static int sfs_readdir_common(uint64_t parent_id, uint64_t id, struct vop_readdir_args *ap, - uio_t *uio, off_t *offp) + zfs_uio_t *uio, off_t *offp) { struct dirent entry; int error; @@ -260,26 +260,26 @@ sfs_readdir_common(uint64_t parent_id, uint64_t id, struct vop_readdir_args *ap, if (ap->a_ncookies != NULL) *ap->a_ncookies = 0; - if (uio->uio_resid < sizeof (entry)) + if (zfs_uio_resid(uio) < sizeof (entry)) return (SET_ERROR(EINVAL)); - if (uio->uio_offset < 0) + if (zfs_uio_offset(uio) < 0) return (SET_ERROR(EINVAL)); - if (uio->uio_offset == 0) { + if (zfs_uio_offset(uio) == 0) { entry.d_fileno = id; entry.d_type = DT_DIR; entry.d_name[0] = '.'; entry.d_name[1] = '\0'; entry.d_namlen = 1; entry.d_reclen = sizeof (entry); - error = vfs_read_dirent(ap, &entry, uio->uio_offset); + error = vfs_read_dirent(ap, &entry, zfs_uio_offset(uio)); if (error != 0) return (SET_ERROR(error)); } - if (uio->uio_offset < sizeof (entry)) + if (zfs_uio_offset(uio) < sizeof (entry)) return (SET_ERROR(EINVAL)); - if (uio->uio_offset == sizeof (entry)) { + if (zfs_uio_offset(uio) == sizeof (entry)) { entry.d_fileno = parent_id; entry.d_type = DT_DIR; entry.d_name[0] = '.'; @@ -287,7 +287,7 @@ sfs_readdir_common(uint64_t parent_id, uint64_t id, struct vop_readdir_args *ap, entry.d_name[2] = '\0'; entry.d_namlen = 2; entry.d_reclen = sizeof (entry); - error = vfs_read_dirent(ap, &entry, uio->uio_offset); + error = vfs_read_dirent(ap, &entry, zfs_uio_offset(uio)); if (error != 0) return (SET_ERROR(error)); } @@ -666,21 +666,23 @@ zfsctl_root_readdir(struct vop_readdir_args *ap) vnode_t *vp = ap->a_vp; zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; zfsctl_root_t *node = vp->v_data; - uio_t *uio = ap->a_uio; + zfs_uio_t uio; int *eofp = ap->a_eofflag; off_t dots_offset; int error; + zfs_uio_init(&uio, ap->a_uio); + ASSERT(vp->v_type == VDIR); - error = sfs_readdir_common(zfsvfs->z_root, ZFSCTL_INO_ROOT, ap, uio, + error = sfs_readdir_common(zfsvfs->z_root, ZFSCTL_INO_ROOT, ap, &uio, &dots_offset); if (error != 0) { if (error == ENAMETOOLONG) /* ran out of destination space */ error = 0; return (error); } - if (uio->uio_offset != dots_offset) + if (zfs_uio_offset(&uio) != dots_offset) return (SET_ERROR(EINVAL)); CTASSERT(sizeof (node->snapdir->sn_name) <= sizeof (entry.d_name)); @@ -689,7 +691,7 @@ zfsctl_root_readdir(struct vop_readdir_args *ap) strcpy(entry.d_name, node->snapdir->sn_name); entry.d_namlen = strlen(entry.d_name); entry.d_reclen = sizeof (entry); - error = vfs_read_dirent(ap, &entry, uio->uio_offset); + error = vfs_read_dirent(ap, &entry, zfs_uio_offset(&uio)); if (error != 0) { if (error == ENAMETOOLONG) error = 0; @@ -1030,15 +1032,17 @@ zfsctl_snapdir_readdir(struct vop_readdir_args *ap) struct dirent entry; vnode_t *vp = ap->a_vp; zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; - uio_t *uio = ap->a_uio; + zfs_uio_t uio; int *eofp = ap->a_eofflag; off_t dots_offset; int error; + zfs_uio_init(&uio, ap->a_uio); + ASSERT(vp->v_type == VDIR); - error = sfs_readdir_common(ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, ap, uio, - &dots_offset); + error = sfs_readdir_common(ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, ap, + &uio, &dots_offset); if (error != 0) { if (error == ENAMETOOLONG) /* ran out of destination space */ error = 0; @@ -1050,7 +1054,7 @@ zfsctl_snapdir_readdir(struct vop_readdir_args *ap) uint64_t cookie; uint64_t id; - cookie = uio->uio_offset - dots_offset; + cookie = zfs_uio_offset(&uio) - dots_offset; dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG); error = dmu_snapshot_list_next(zfsvfs->z_os, sizeof (snapname), @@ -1071,14 +1075,14 @@ zfsctl_snapdir_readdir(struct vop_readdir_args *ap) strcpy(entry.d_name, snapname); entry.d_namlen = strlen(entry.d_name); entry.d_reclen = sizeof (entry); - error = vfs_read_dirent(ap, &entry, uio->uio_offset); + error = vfs_read_dirent(ap, &entry, zfs_uio_offset(&uio)); if (error != 0) { if (error == ENAMETOOLONG) error = 0; ZFS_EXIT(zfsvfs); return (SET_ERROR(error)); } - uio->uio_offset = cookie + dots_offset; + zfs_uio_setoffset(&uio, cookie + dots_offset); } /* NOTREACHED */ } diff --git a/module/os/freebsd/zfs/zfs_file_os.c b/module/os/freebsd/zfs/zfs_file_os.c index 78c4f7c6ba..fd86a75416 100644 --- a/module/os/freebsd/zfs/zfs_file_os.c +++ b/module/os/freebsd/zfs/zfs_file_os.c @@ -287,7 +287,7 @@ zfs_file_private(zfs_file_t *fp) int zfs_file_unlink(const char *fnamep) { - enum uio_seg seg = UIO_SYSSPACE; + zfs_uio_seg_t seg = UIO_SYSSPACE; int rc; #if __FreeBSD_version >= 1300018 diff --git a/module/os/freebsd/zfs/zfs_vnops_os.c b/module/os/freebsd/zfs/zfs_vnops_os.c index 633898e051..efecf8e830 100644 --- a/module/os/freebsd/zfs/zfs_vnops_os.c +++ b/module/os/freebsd/zfs/zfs_vnops_os.c @@ -518,7 +518,7 @@ update_pages(znode_t *zp, int64_t start, int len, objset_t *os) * in one single dmu_read() call. */ int -mappedread_sf(znode_t *zp, int nbytes, uio_t *uio) +mappedread_sf(znode_t *zp, int nbytes, zfs_uio_t *uio) { vnode_t *vp = ZTOV(zp); objset_t *os = zp->z_zfsvfs->z_os; @@ -530,14 +530,14 @@ mappedread_sf(znode_t *zp, int nbytes, uio_t *uio) int len = nbytes; int error = 0; - ASSERT(uio->uio_segflg == UIO_NOCOPY); + ASSERT(zfs_uio_segflg(uio) == UIO_NOCOPY); ASSERT(vp->v_mount != NULL); obj = vp->v_object; ASSERT(obj != NULL); - ASSERT((uio->uio_loffset & PAGEOFFSET) == 0); + ASSERT((zfs_uio_offset(uio) & PAGEOFFSET) == 0); zfs_vmobject_wlock_12(obj); - for (start = uio->uio_loffset; len > 0; start += PAGESIZE) { + for (start = zfs_uio_offset(uio); len > 0; start += PAGESIZE) { int bytes = MIN(PAGESIZE, len); pp = vm_page_grab_unlocked(obj, OFF_TO_IDX(start), @@ -584,8 +584,7 @@ mappedread_sf(znode_t *zp, int nbytes, uio_t *uio) } if (error) break; - uio->uio_resid -= bytes; - uio->uio_offset += bytes; + zfs_uio_advance(uio, bytes); len -= bytes; } zfs_vmobject_wunlock_12(obj); @@ -603,7 +602,7 @@ mappedread_sf(znode_t *zp, int nbytes, uio_t *uio) * the file is memory mapped. */ int -mappedread(znode_t *zp, int nbytes, uio_t *uio) +mappedread(znode_t *zp, int nbytes, zfs_uio_t *uio) { vnode_t *vp = ZTOV(zp); vm_object_t obj; @@ -616,7 +615,7 @@ mappedread(znode_t *zp, int nbytes, uio_t *uio) obj = vp->v_object; ASSERT(obj != NULL); - start = uio->uio_loffset; + start = zfs_uio_offset(uio); off = start & PAGEOFFSET; zfs_vmobject_wlock_12(obj); for (start &= PAGEMASK; len > 0; start += PAGESIZE) { @@ -629,7 +628,8 @@ mappedread(znode_t *zp, int nbytes, uio_t *uio) zfs_vmobject_wunlock_12(obj); va = zfs_map_page(pp, &sf); - error = vn_io_fault_uiomove(va + off, bytes, uio); + error = vn_io_fault_uiomove(va + off, bytes, + GET_UIO_STRUCT(uio)); zfs_unmap_page(sf); zfs_vmobject_wlock_12(obj); page_unhold(pp); @@ -1678,7 +1678,7 @@ zfs_rmdir(znode_t *dzp, const char *name, znode_t *cwd, cred_t *cr, int flags) */ /* ARGSUSED */ static int -zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, +zfs_readdir(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, int *eofp, int *ncookies, ulong_t **cookies) { znode_t *zp = VTOZ(vp); @@ -1723,7 +1723,7 @@ zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, /* * Check for valid iov_len. */ - if (uio->uio_iov->iov_len <= 0) { + if (GET_UIO_STRUCT(uio)->uio_iov->iov_len <= 0) { ZFS_EXIT(zfsvfs); return (SET_ERROR(EINVAL)); } @@ -1738,7 +1738,7 @@ zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, error = 0; os = zfsvfs->z_os; - offset = uio->uio_loffset; + offset = zfs_uio_offset(uio); prefetch = zp->z_zn_prefetch; /* @@ -1759,9 +1759,9 @@ zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, /* * Get space to change directory entries into fs independent format. */ - iovp = uio->uio_iov; + iovp = GET_UIO_STRUCT(uio)->uio_iov; bytes_wanted = iovp->iov_len; - if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) { + if (zfs_uio_segflg(uio) != UIO_SYSSPACE || zfs_uio_iovcnt(uio) != 1) { bufsize = bytes_wanted; outbuf = kmem_alloc(bufsize, KM_SLEEP); odp = (struct dirent64 *)outbuf; @@ -1776,7 +1776,7 @@ zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, /* * Minimum entry size is dirent size and 1 byte for a file name. */ - ncooks = uio->uio_resid / (sizeof (struct dirent) - + ncooks = zfs_uio_resid(uio) / (sizeof (struct dirent) - sizeof (((struct dirent *)NULL)->d_name) + 1); cooks = malloc(ncooks * sizeof (ulong_t), M_TEMP, M_WAITOK); *cookies = cooks; @@ -1956,20 +1956,21 @@ zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, if (ncookies != NULL) *ncookies -= ncooks; - if (uio->uio_segflg == UIO_SYSSPACE && uio->uio_iovcnt == 1) { + if (zfs_uio_segflg(uio) == UIO_SYSSPACE && zfs_uio_iovcnt(uio) == 1) { iovp->iov_base += outcount; iovp->iov_len -= outcount; - uio->uio_resid -= outcount; - } else if ((error = uiomove(outbuf, (long)outcount, UIO_READ, uio))) { + zfs_uio_resid(uio) -= outcount; + } else if ((error = + zfs_uiomove(outbuf, (long)outcount, UIO_READ, uio))) { /* * Reset the pointer. */ - offset = uio->uio_loffset; + offset = zfs_uio_offset(uio); } update: zap_cursor_fini(&zc); - if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) + if (zfs_uio_segflg(uio) != UIO_SYSSPACE || zfs_uio_iovcnt(uio) != 1) kmem_free(outbuf, bufsize); if (error == ENOENT) @@ -1977,7 +1978,7 @@ update: ZFS_ACCESSTIME_STAMP(zfsvfs, zp); - uio->uio_loffset = offset; + zfs_uio_setoffset(uio, offset); ZFS_EXIT(zfsvfs); if (error != 0 && cookies != NULL) { free(*cookies, M_TEMP); @@ -3660,7 +3661,7 @@ zfs_symlink(znode_t *dzp, const char *name, vattr_t *vap, */ /* ARGSUSED */ static int -zfs_readlink(vnode_t *vp, uio_t *uio, cred_t *cr, caller_context_t *ct) +zfs_readlink(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, caller_context_t *ct) { znode_t *zp = VTOZ(vp); zfsvfs_t *zfsvfs = zp->z_zfsvfs; @@ -4443,8 +4444,9 @@ struct vop_read_args { static int zfs_freebsd_read(struct vop_read_args *ap) { - - return (zfs_read(VTOZ(ap->a_vp), ap->a_uio, ioflags(ap->a_ioflag), + zfs_uio_t uio; + zfs_uio_init(&uio, ap->a_uio); + return (zfs_read(VTOZ(ap->a_vp), &uio, ioflags(ap->a_ioflag), ap->a_cred)); } @@ -4460,8 +4462,9 @@ struct vop_write_args { static int zfs_freebsd_write(struct vop_write_args *ap) { - - return (zfs_write(VTOZ(ap->a_vp), ap->a_uio, ioflags(ap->a_ioflag), + zfs_uio_t uio; + zfs_uio_init(&uio, ap->a_uio); + return (zfs_write(VTOZ(ap->a_vp), &uio, ioflags(ap->a_ioflag), ap->a_cred)); } @@ -4713,8 +4716,9 @@ struct vop_readdir_args { static int zfs_freebsd_readdir(struct vop_readdir_args *ap) { - - return (zfs_readdir(ap->a_vp, ap->a_uio, ap->a_cred, ap->a_eofflag, + zfs_uio_t uio; + zfs_uio_init(&uio, ap->a_uio); + return (zfs_readdir(ap->a_vp, &uio, ap->a_cred, ap->a_eofflag, ap->a_ncookies, ap->a_cookies)); } @@ -5004,8 +5008,9 @@ struct vop_readlink_args { static int zfs_freebsd_readlink(struct vop_readlink_args *ap) { - - return (zfs_readlink(ap->a_vp, ap->a_uio, ap->a_cred, NULL)); + zfs_uio_t uio; + zfs_uio_init(&uio, ap->a_uio); + return (zfs_readlink(ap->a_vp, &uio, ap->a_cred, NULL)); } #ifndef _SYS_SYSPROTO_H_ @@ -5473,11 +5478,14 @@ zfs_listextattr(struct vop_listextattr_args *ap) uint8_t dirbuf[sizeof (struct dirent)]; struct dirent *dp; struct iovec aiov; - struct uio auio, *uio = ap->a_uio; + struct uio auio; size_t *sizep = ap->a_size; size_t plen; vnode_t *xvp = NULL, *vp; int done, error, eof, pos; + zfs_uio_t uio; + + zfs_uio_init(&uio, ap->a_uio); /* * If the xattr property is off, refuse the request. @@ -5559,15 +5567,16 @@ zfs_listextattr(struct vop_listextattr_args *ap) nlen = dp->d_namlen - plen; if (sizep != NULL) *sizep += 1 + nlen; - else if (uio != NULL) { + else if (GET_UIO_STRUCT(&uio) != NULL) { /* * Format of extattr name entry is one byte for * length and the rest for name. */ - error = uiomove(&nlen, 1, uio->uio_rw, uio); + error = zfs_uiomove(&nlen, 1, zfs_uio_rw(&uio), + &uio); if (error == 0) { - error = uiomove(dp->d_name + plen, nlen, - uio->uio_rw, uio); + error = zfs_uiomove(dp->d_name + plen, + nlen, zfs_uio_rw(&uio), &uio); } if (error != 0) break; diff --git a/module/os/freebsd/zfs/zio_crypt.c b/module/os/freebsd/zfs/zio_crypt.c index fb88bc325d..f4e69b8ec7 100644 --- a/module/os/freebsd/zfs/zio_crypt.c +++ b/module/os/freebsd/zfs/zio_crypt.c @@ -404,7 +404,7 @@ int failed_decrypt_size; static int zio_do_crypt_uio_opencrypto(boolean_t encrypt, freebsd_crypt_session_t *sess, uint64_t crypt, crypto_key_t *key, uint8_t *ivbuf, uint_t datalen, - uio_t *uio, uint_t auth_len) + zfs_uio_t *uio, uint_t auth_len) { zio_crypt_info_t *ci; int ret; @@ -439,7 +439,8 @@ zio_crypt_key_wrap(crypto_key_t *cwkey, zio_crypt_key_t *key, uint8_t *iv, * input and output. Also, the AAD (for AES-GMC at least) * needs to logically go in front. */ - uio_t cuio; + zfs_uio_t cuio; + struct uio cuio_s; iovec_t iovecs[4]; uint64_t crypt = key->zk_crypt; uint_t enc_len, keydata_len, aad_len; @@ -447,6 +448,8 @@ zio_crypt_key_wrap(crypto_key_t *cwkey, zio_crypt_key_t *key, uint8_t *iv, ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS); ASSERT3U(cwkey->ck_format, ==, CRYPTO_KEY_RAW); + zfs_uio_init(&cuio, &cuio_s); + keydata_len = zio_crypt_table[crypt].ci_keylen; /* generate iv for wrapping the master and hmac key */ @@ -489,9 +492,9 @@ zio_crypt_key_wrap(crypto_key_t *cwkey, zio_crypt_key_t *key, uint8_t *iv, iovecs[0].iov_len = aad_len; enc_len = zio_crypt_table[crypt].ci_keylen + SHA512_HMAC_KEYLEN; - cuio.uio_iov = iovecs; - cuio.uio_iovcnt = 4; - cuio.uio_segflg = UIO_SYSSPACE; + GET_UIO_STRUCT(&cuio)->uio_iov = iovecs; + zfs_uio_iovcnt(&cuio) = 4; + zfs_uio_segflg(&cuio) = UIO_SYSSPACE; /* encrypt the keys and store the resulting ciphertext and mac */ ret = zio_do_crypt_uio_opencrypto(B_TRUE, NULL, crypt, cwkey, @@ -517,7 +520,8 @@ zio_crypt_key_unwrap(crypto_key_t *cwkey, uint64_t crypt, uint64_t version, * input and output. Also, the AAD (for AES-GMC at least) * needs to logically go in front. */ - uio_t cuio; + zfs_uio_t cuio; + struct uio cuio_s; iovec_t iovecs[4]; void *src, *dst; uint_t enc_len, keydata_len, aad_len; @@ -528,6 +532,8 @@ zio_crypt_key_unwrap(crypto_key_t *cwkey, uint64_t crypt, uint64_t version, keydata_len = zio_crypt_table[crypt].ci_keylen; rw_init(&key->zk_salt_lock, NULL, RW_DEFAULT, NULL); + zfs_uio_init(&cuio, &cuio_s); + /* * Since we only support one buffer, we need to copy * the encrypted buffer (source) to the plain buffer @@ -565,9 +571,9 @@ zio_crypt_key_unwrap(crypto_key_t *cwkey, uint64_t crypt, uint64_t version, iovecs[0].iov_base = aad; iovecs[0].iov_len = aad_len; - cuio.uio_iov = iovecs; - cuio.uio_iovcnt = 4; - cuio.uio_segflg = UIO_SYSSPACE; + GET_UIO_STRUCT(&cuio)->uio_iov = iovecs; + zfs_uio_iovcnt(&cuio) = 4; + zfs_uio_segflg(&cuio) = UIO_SYSSPACE; /* decrypt the keys and store the result in the output buffers */ ret = zio_do_crypt_uio_opencrypto(B_FALSE, NULL, crypt, cwkey, @@ -1137,10 +1143,11 @@ error: } static void -zio_crypt_destroy_uio(uio_t *uio) +zio_crypt_destroy_uio(zfs_uio_t *uio) { - if (uio->uio_iov) - kmem_free(uio->uio_iov, uio->uio_iovcnt * sizeof (iovec_t)); + if (GET_UIO_STRUCT(uio)->uio_iov) + kmem_free(GET_UIO_STRUCT(uio)->uio_iov, + zfs_uio_iovcnt(uio) * sizeof (iovec_t)); } /* @@ -1234,14 +1241,14 @@ zio_crypt_do_indirect_mac_checksum_abd(boolean_t generate, abd_t *abd, * accommodate some of the drivers, the authbuf needs to be logically before * the data. This means that we need to copy the source to the destination, * and set up an extra iovec_t at the beginning to handle the authbuf. - * It also means we'll only return one uio_t. + * It also means we'll only return one zfs_uio_t. */ /* ARGSUSED */ static int zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf, - uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, uio_t *puio, - uio_t *out_uio, uint_t *enc_len, uint8_t **authbuf, uint_t *auth_len, + uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, zfs_uio_t *puio, + zfs_uio_t *out_uio, uint_t *enc_len, uint8_t **authbuf, uint_t *auth_len, boolean_t *no_crypt) { uint8_t *aadbuf = zio_buf_alloc(datalen); @@ -1385,8 +1392,8 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf, *enc_len = total_len; *authbuf = aadbuf; *auth_len = aad_len; - out_uio->uio_iov = dst_iovecs; - out_uio->uio_iovcnt = nr_iovecs; + GET_UIO_STRUCT(out_uio)->uio_iov = dst_iovecs; + zfs_uio_iovcnt(out_uio) = nr_iovecs; return (0); } @@ -1397,7 +1404,7 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf, static int zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version, uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, - uio_t *puio, uio_t *out_uio, uint_t *enc_len, uint8_t **authbuf, + zfs_uio_t *puio, zfs_uio_t *out_uio, uint_t *enc_len, uint8_t **authbuf, uint_t *auth_len, boolean_t *no_crypt) { uint8_t *aadbuf = zio_buf_alloc(datalen); @@ -1534,8 +1541,8 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version, *enc_len = total_len; *authbuf = aadbuf; *auth_len = aad_len; - out_uio->uio_iov = dst_iovecs; - out_uio->uio_iovcnt = nr_iovecs; + GET_UIO_STRUCT(out_uio)->uio_iov = dst_iovecs; + zfs_uio_iovcnt(out_uio) = nr_iovecs; return (0); } @@ -1543,7 +1550,7 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version, /* ARGSUSED */ static int zio_crypt_init_uios_normal(boolean_t encrypt, uint8_t *plainbuf, - uint8_t *cipherbuf, uint_t datalen, uio_t *puio, uio_t *out_uio, + uint8_t *cipherbuf, uint_t datalen, zfs_uio_t *puio, zfs_uio_t *out_uio, uint_t *enc_len) { int ret; @@ -1571,8 +1578,8 @@ zio_crypt_init_uios_normal(boolean_t encrypt, uint8_t *plainbuf, cipher_iovecs[0].iov_len = datalen; *enc_len = datalen; - out_uio->uio_iov = cipher_iovecs; - out_uio->uio_iovcnt = nr_cipher; + GET_UIO_STRUCT(out_uio)->uio_iov = cipher_iovecs; + zfs_uio_iovcnt(out_uio) = nr_cipher; return (0); @@ -1583,8 +1590,8 @@ error: kmem_free(cipher_iovecs, nr_cipher * sizeof (iovec_t)); *enc_len = 0; - out_uio->uio_iov = NULL; - out_uio->uio_iovcnt = 0; + GET_UIO_STRUCT(out_uio)->uio_iov = NULL; + zfs_uio_iovcnt(out_uio) = 0; return (ret); } @@ -1600,8 +1607,8 @@ error: static int zio_crypt_init_uios(boolean_t encrypt, uint64_t version, dmu_object_type_t ot, uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, - uint8_t *mac, uio_t *puio, uio_t *cuio, uint_t *enc_len, uint8_t **authbuf, - uint_t *auth_len, boolean_t *no_crypt) + uint8_t *mac, zfs_uio_t *puio, zfs_uio_t *cuio, uint_t *enc_len, + uint8_t **authbuf, uint_t *auth_len, boolean_t *no_crypt) { int ret; iovec_t *mac_iov; @@ -1633,9 +1640,11 @@ zio_crypt_init_uios(boolean_t encrypt, uint64_t version, dmu_object_type_t ot, goto error; /* populate the uios */ - cuio->uio_segflg = UIO_SYSSPACE; + zfs_uio_segflg(cuio) = UIO_SYSSPACE; - mac_iov = ((iovec_t *)&cuio->uio_iov[cuio->uio_iovcnt - 1]); + mac_iov = + ((iovec_t *)&(GET_UIO_STRUCT(cuio)-> + uio_iov[zfs_uio_iovcnt(cuio) - 1])); mac_iov->iov_base = (void *)mac; mac_iov->iov_len = ZIO_DATA_MAC_LEN; @@ -1662,14 +1671,18 @@ zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key, uint64_t crypt = key->zk_crypt; uint_t keydata_len = zio_crypt_table[crypt].ci_keylen; uint_t enc_len, auth_len; - uio_t puio, cuio; + zfs_uio_t puio, cuio; + struct uio puio_s, cuio_s; uint8_t enc_keydata[MASTER_KEY_MAX_LEN]; crypto_key_t tmp_ckey, *ckey = NULL; freebsd_crypt_session_t *tmpl = NULL; uint8_t *authbuf = NULL; - bzero(&puio, sizeof (uio_t)); - bzero(&cuio, sizeof (uio_t)); + + zfs_uio_init(&puio, &puio_s); + zfs_uio_init(&cuio, &cuio_s); + bzero(GET_UIO_STRUCT(&puio), sizeof (struct uio)); + bzero(GET_UIO_STRUCT(&cuio), sizeof (struct uio)); #ifdef FCRYPTO_DEBUG printf("%s(%s, %p, %p, %d, %p, %p, %u, %s, %p, %p, %p)\n", diff --git a/module/os/freebsd/zfs/zvol_os.c b/module/os/freebsd/zfs/zvol_os.c index cca15a7684..a9991b9344 100644 --- a/module/os/freebsd/zfs/zvol_os.c +++ b/module/os/freebsd/zfs/zvol_os.c @@ -746,12 +746,15 @@ out: */ static int -zvol_cdev_read(struct cdev *dev, struct uio *uio, int ioflag) +zvol_cdev_read(struct cdev *dev, struct uio *uio_s, int ioflag) { zvol_state_t *zv; uint64_t volsize; zfs_locked_range_t *lr; int error = 0; + zfs_uio_t uio; + + zfs_uio_init(&uio, uio_s); zv = dev->si_drv2; @@ -760,20 +763,20 @@ zvol_cdev_read(struct cdev *dev, struct uio *uio, int ioflag) * uio_loffset == volsize isn't an error as * its required for EOF processing. */ - if (uio->uio_resid > 0 && - (uio->uio_loffset < 0 || uio->uio_loffset > volsize)) + if (zfs_uio_resid(&uio) > 0 && + (zfs_uio_offset(&uio) < 0 || zfs_uio_offset(&uio) > volsize)) return (SET_ERROR(EIO)); - lr = zfs_rangelock_enter(&zv->zv_rangelock, uio->uio_loffset, - uio->uio_resid, RL_READER); - while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { - uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); + lr = zfs_rangelock_enter(&zv->zv_rangelock, zfs_uio_offset(&uio), + zfs_uio_resid(&uio), RL_READER); + while (zfs_uio_resid(&uio) > 0 && zfs_uio_offset(&uio) < volsize) { + uint64_t bytes = MIN(zfs_uio_resid(&uio), DMU_MAX_ACCESS >> 1); /* don't read past the end */ - if (bytes > volsize - uio->uio_loffset) - bytes = volsize - uio->uio_loffset; + if (bytes > volsize - zfs_uio_offset(&uio)) + bytes = volsize - zfs_uio_offset(&uio); - error = dmu_read_uio_dnode(zv->zv_dn, uio, bytes); + error = dmu_read_uio_dnode(zv->zv_dn, &uio, bytes); if (error) { /* convert checksum errors into IO errors */ if (error == ECKSUM) @@ -787,20 +790,23 @@ zvol_cdev_read(struct cdev *dev, struct uio *uio, int ioflag) } static int -zvol_cdev_write(struct cdev *dev, struct uio *uio, int ioflag) +zvol_cdev_write(struct cdev *dev, struct uio *uio_s, int ioflag) { zvol_state_t *zv; uint64_t volsize; zfs_locked_range_t *lr; int error = 0; boolean_t sync; + zfs_uio_t uio; zv = dev->si_drv2; volsize = zv->zv_volsize; - if (uio->uio_resid > 0 && - (uio->uio_loffset < 0 || uio->uio_loffset > volsize)) + zfs_uio_init(&uio, uio_s); + + if (zfs_uio_resid(&uio) > 0 && + (zfs_uio_offset(&uio) < 0 || zfs_uio_offset(&uio) > volsize)) return (SET_ERROR(EIO)); sync = (ioflag & IO_SYNC) || @@ -809,11 +815,11 @@ zvol_cdev_write(struct cdev *dev, struct uio *uio, int ioflag) rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER); zvol_ensure_zilog(zv); - lr = zfs_rangelock_enter(&zv->zv_rangelock, uio->uio_loffset, - uio->uio_resid, RL_WRITER); - while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { - uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); - uint64_t off = uio->uio_loffset; + lr = zfs_rangelock_enter(&zv->zv_rangelock, zfs_uio_offset(&uio), + zfs_uio_resid(&uio), RL_WRITER); + while (zfs_uio_resid(&uio) > 0 && zfs_uio_offset(&uio) < volsize) { + uint64_t bytes = MIN(zfs_uio_resid(&uio), DMU_MAX_ACCESS >> 1); + uint64_t off = zfs_uio_offset(&uio); dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); if (bytes > volsize - off) /* don't write past the end */ @@ -825,7 +831,7 @@ zvol_cdev_write(struct cdev *dev, struct uio *uio, int ioflag) dmu_tx_abort(tx); break; } - error = dmu_write_uio_dnode(zv->zv_dn, uio, bytes, tx); + error = dmu_write_uio_dnode(zv->zv_dn, &uio, bytes, tx); if (error == 0) zvol_log_write(zv, tx, off, bytes, sync); dmu_tx_commit(tx); diff --git a/module/os/linux/zfs/zfs_uio.c b/module/os/linux/zfs/zfs_uio.c index 7d2267f0a9..a06e04b18b 100644 --- a/module/os/linux/zfs/zfs_uio.c +++ b/module/os/linux/zfs/zfs_uio.c @@ -55,7 +55,7 @@ * a non-zero errno on failure. */ static int -uiomove_iov(void *p, size_t n, enum uio_rw rw, struct uio *uio) +zfs_uiomove_iov(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio) { const struct iovec *iov = uio->uio_iov; size_t skip = uio->uio_skip; @@ -126,7 +126,7 @@ uiomove_iov(void *p, size_t n, enum uio_rw rw, struct uio *uio) } static int -uiomove_bvec(void *p, size_t n, enum uio_rw rw, struct uio *uio) +zfs_uiomove_bvec(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio) { const struct bio_vec *bv = uio->uio_bvec; size_t skip = uio->uio_skip; @@ -160,7 +160,7 @@ uiomove_bvec(void *p, size_t n, enum uio_rw rw, struct uio *uio) #if defined(HAVE_VFS_IOV_ITER) static int -uiomove_iter(void *p, size_t n, enum uio_rw rw, struct uio *uio, +zfs_uiomove_iter(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio, boolean_t revert) { size_t cnt = MIN(n, uio->uio_resid); @@ -182,7 +182,7 @@ uiomove_iter(void *p, size_t n, enum uio_rw rw, struct uio *uio, return (EFAULT); /* - * Revert advancing the uio_iter. This is set by uiocopy() + * Revert advancing the uio_iter. This is set by zfs_uiocopy() * to avoid consuming the uio and its iov_iter structure. */ if (revert) @@ -196,18 +196,18 @@ uiomove_iter(void *p, size_t n, enum uio_rw rw, struct uio *uio, #endif int -uiomove(void *p, size_t n, enum uio_rw rw, struct uio *uio) +zfs_uiomove(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio) { if (uio->uio_segflg == UIO_BVEC) - return (uiomove_bvec(p, n, rw, uio)); + return (zfs_uiomove_bvec(p, n, rw, uio)); #if defined(HAVE_VFS_IOV_ITER) else if (uio->uio_segflg == UIO_ITER) - return (uiomove_iter(p, n, rw, uio, B_FALSE)); + return (zfs_uiomove_iter(p, n, rw, uio, B_FALSE)); #endif else - return (uiomove_iov(p, n, rw, uio)); + return (zfs_uiomove_iov(p, n, rw, uio)); } -EXPORT_SYMBOL(uiomove); +EXPORT_SYMBOL(zfs_uiomove); /* * Fault in the pages of the first n bytes specified by the uio structure. @@ -216,7 +216,7 @@ EXPORT_SYMBOL(uiomove); * the pages resident. */ int -uio_prefaultpages(ssize_t n, struct uio *uio) +zfs_uio_prefaultpages(ssize_t n, zfs_uio_t *uio) { if (uio->uio_segflg == UIO_SYSSPACE || uio->uio_segflg == UIO_BVEC) { /* There's never a need to fault in kernel pages */ @@ -263,40 +263,40 @@ uio_prefaultpages(ssize_t n, struct uio *uio) return (0); } -EXPORT_SYMBOL(uio_prefaultpages); +EXPORT_SYMBOL(zfs_uio_prefaultpages); /* - * The same as uiomove() but doesn't modify uio structure. + * The same as zfs_uiomove() but doesn't modify uio structure. * return in cbytes how many bytes were copied. */ int -uiocopy(void *p, size_t n, enum uio_rw rw, struct uio *uio, size_t *cbytes) +zfs_uiocopy(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio, size_t *cbytes) { - struct uio uio_copy; + zfs_uio_t uio_copy; int ret; - bcopy(uio, &uio_copy, sizeof (struct uio)); + bcopy(uio, &uio_copy, sizeof (zfs_uio_t)); if (uio->uio_segflg == UIO_BVEC) - ret = uiomove_bvec(p, n, rw, &uio_copy); + ret = zfs_uiomove_bvec(p, n, rw, &uio_copy); #if defined(HAVE_VFS_IOV_ITER) else if (uio->uio_segflg == UIO_ITER) - ret = uiomove_iter(p, n, rw, &uio_copy, B_TRUE); + ret = zfs_uiomove_iter(p, n, rw, &uio_copy, B_TRUE); #endif else - ret = uiomove_iov(p, n, rw, &uio_copy); + ret = zfs_uiomove_iov(p, n, rw, &uio_copy); *cbytes = uio->uio_resid - uio_copy.uio_resid; return (ret); } -EXPORT_SYMBOL(uiocopy); +EXPORT_SYMBOL(zfs_uiocopy); /* * Drop the next n chars out of *uio. */ void -uioskip(uio_t *uio, size_t n) +zfs_uioskip(zfs_uio_t *uio, size_t n) { if (n > uio->uio_resid) return; @@ -325,5 +325,6 @@ uioskip(uio_t *uio, size_t n) uio->uio_loffset += n; uio->uio_resid -= n; } -EXPORT_SYMBOL(uioskip); +EXPORT_SYMBOL(zfs_uioskip); + #endif /* _KERNEL */ diff --git a/module/os/linux/zfs/zfs_vnops_os.c b/module/os/linux/zfs/zfs_vnops_os.c index 466fac3f35..fc9018381e 100644 --- a/module/os/linux/zfs/zfs_vnops_os.c +++ b/module/os/linux/zfs/zfs_vnops_os.c @@ -301,7 +301,7 @@ update_pages(znode_t *zp, int64_t start, int len, objset_t *os) * the file is memory mapped. */ int -mappedread(znode_t *zp, int nbytes, uio_t *uio) +mappedread(znode_t *zp, int nbytes, zfs_uio_t *uio) { struct inode *ip = ZTOI(zp); struct address_space *mp = ip->i_mapping; @@ -323,7 +323,7 @@ mappedread(znode_t *zp, int nbytes, uio_t *uio) unlock_page(pp); pb = kmap(pp); - error = uiomove(pb + off, bytes, UIO_READ, uio); + error = zfs_uiomove(pb + off, bytes, UIO_READ, uio); kunmap(pp); if (mapping_writably_mapped(mp)) @@ -375,8 +375,8 @@ zfs_write_simple(znode_t *zp, const void *data, size_t len, iov.iov_base = (void *)data; iov.iov_len = len; - uio_t uio; - uio_iovec_init(&uio, &iov, 1, pos, UIO_SYSSPACE, len, 0); + zfs_uio_t uio; + zfs_uio_iovec_init(&uio, &iov, 1, pos, UIO_SYSSPACE, len, 0); cookie = spl_fstrans_mark(); error = zfs_write(zp, &uio, 0, kcred); @@ -384,8 +384,8 @@ zfs_write_simple(znode_t *zp, const void *data, size_t len, if (error == 0) { if (residp != NULL) - *residp = uio_resid(&uio); - else if (uio_resid(&uio) != 0) + *residp = zfs_uio_resid(&uio); + else if (zfs_uio_resid(&uio) != 0) error = SET_ERROR(EIO); } @@ -3208,7 +3208,7 @@ top: */ /* ARGSUSED */ int -zfs_readlink(struct inode *ip, uio_t *uio, cred_t *cr) +zfs_readlink(struct inode *ip, zfs_uio_t *uio, cred_t *cr) { znode_t *zp = ITOZ(ip); zfsvfs_t *zfsvfs = ITOZSB(ip); diff --git a/module/os/linux/zfs/zio_crypt.c b/module/os/linux/zfs/zio_crypt.c index c0aa7dc9d6..92bf7b6a3b 100644 --- a/module/os/linux/zfs/zio_crypt.c +++ b/module/os/linux/zfs/zio_crypt.c @@ -376,7 +376,7 @@ error: static int zio_do_crypt_uio(boolean_t encrypt, uint64_t crypt, crypto_key_t *key, crypto_ctx_template_t tmpl, uint8_t *ivbuf, uint_t datalen, - uio_t *puio, uio_t *cuio, uint8_t *authbuf, uint_t auth_len) + zfs_uio_t *puio, zfs_uio_t *cuio, uint8_t *authbuf, uint_t auth_len) { int ret; crypto_data_t plaindata, cipherdata; @@ -479,7 +479,7 @@ zio_crypt_key_wrap(crypto_key_t *cwkey, zio_crypt_key_t *key, uint8_t *iv, uint8_t *mac, uint8_t *keydata_out, uint8_t *hmac_keydata_out) { int ret; - uio_t puio, cuio; + zfs_uio_t puio, cuio; uint64_t aad[3]; iovec_t plain_iovecs[2], cipher_iovecs[3]; uint64_t crypt = key->zk_crypt; @@ -495,7 +495,7 @@ zio_crypt_key_wrap(crypto_key_t *cwkey, zio_crypt_key_t *key, uint8_t *iv, if (ret != 0) goto error; - /* initialize uio_ts */ + /* initialize zfs_uio_ts */ plain_iovecs[0].iov_base = key->zk_master_keydata; plain_iovecs[0].iov_len = keydata_len; plain_iovecs[1].iov_base = key->zk_hmac_keydata; @@ -550,7 +550,7 @@ zio_crypt_key_unwrap(crypto_key_t *cwkey, uint64_t crypt, uint64_t version, uint8_t *mac, zio_crypt_key_t *key) { crypto_mechanism_t mech; - uio_t puio, cuio; + zfs_uio_t puio, cuio; uint64_t aad[3]; iovec_t plain_iovecs[2], cipher_iovecs[3]; uint_t enc_len, keydata_len, aad_len; @@ -563,7 +563,7 @@ zio_crypt_key_unwrap(crypto_key_t *cwkey, uint64_t crypt, uint64_t version, keydata_len = zio_crypt_table[crypt].ci_keylen; - /* initialize uio_ts */ + /* initialize zfs_uio_ts */ plain_iovecs[0].iov_base = key->zk_master_keydata; plain_iovecs[0].iov_len = keydata_len; plain_iovecs[1].iov_base = key->zk_hmac_keydata; @@ -1289,7 +1289,7 @@ error: } static void -zio_crypt_destroy_uio(uio_t *uio) +zio_crypt_destroy_uio(zfs_uio_t *uio) { if (uio->uio_iov) kmem_free(uio->uio_iov, uio->uio_iovcnt * sizeof (iovec_t)); @@ -1379,8 +1379,8 @@ zio_crypt_do_indirect_mac_checksum_abd(boolean_t generate, abd_t *abd, */ static int zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf, - uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, uio_t *puio, - uio_t *cuio, uint_t *enc_len, uint8_t **authbuf, uint_t *auth_len, + uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, zfs_uio_t *puio, + zfs_uio_t *cuio, uint_t *enc_len, uint8_t **authbuf, uint_t *auth_len, boolean_t *no_crypt) { int ret; @@ -1575,7 +1575,7 @@ error: static int zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version, uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, - uio_t *puio, uio_t *cuio, uint_t *enc_len, uint8_t **authbuf, + zfs_uio_t *puio, zfs_uio_t *cuio, uint_t *enc_len, uint8_t **authbuf, uint_t *auth_len, boolean_t *no_crypt) { int ret; @@ -1758,7 +1758,7 @@ error: static int zio_crypt_init_uios_normal(boolean_t encrypt, uint8_t *plainbuf, - uint8_t *cipherbuf, uint_t datalen, uio_t *puio, uio_t *cuio, + uint8_t *cipherbuf, uint_t datalen, zfs_uio_t *puio, zfs_uio_t *cuio, uint_t *enc_len) { int ret; @@ -1818,8 +1818,8 @@ error: static int zio_crypt_init_uios(boolean_t encrypt, uint64_t version, dmu_object_type_t ot, uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, - uint8_t *mac, uio_t *puio, uio_t *cuio, uint_t *enc_len, uint8_t **authbuf, - uint_t *auth_len, boolean_t *no_crypt) + uint8_t *mac, zfs_uio_t *puio, zfs_uio_t *cuio, uint_t *enc_len, + uint8_t **authbuf, uint_t *auth_len, boolean_t *no_crypt) { int ret; iovec_t *mac_iov; @@ -1878,7 +1878,7 @@ zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key, uint64_t crypt = key->zk_crypt; uint_t keydata_len = zio_crypt_table[crypt].ci_keylen; uint_t enc_len, auth_len; - uio_t puio, cuio; + zfs_uio_t puio, cuio; uint8_t enc_keydata[MASTER_KEY_MAX_LEN]; crypto_key_t tmp_ckey, *ckey = NULL; crypto_ctx_template_t tmpl; @@ -1944,8 +1944,8 @@ zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key, /* If the hardware implementation fails fall back to software */ } - bzero(&puio, sizeof (uio_t)); - bzero(&cuio, sizeof (uio_t)); + bzero(&puio, sizeof (zfs_uio_t)); + bzero(&cuio, sizeof (zfs_uio_t)); /* create uios for encryption */ ret = zio_crypt_init_uios(encrypt, key->zk_version, ot, plainbuf, diff --git a/module/os/linux/zfs/zpl_file.c b/module/os/linux/zfs/zpl_file.c index cd54c40ac0..59174d64d9 100644 --- a/module/os/linux/zfs/zpl_file.c +++ b/module/os/linux/zfs/zpl_file.c @@ -245,13 +245,13 @@ zpl_file_accessed(struct file *filp) * Otherwise, for older kernels extract the iovec and pass it instead. */ static void -zpl_uio_init(uio_t *uio, struct kiocb *kiocb, struct iov_iter *to, +zpl_uio_init(zfs_uio_t *uio, struct kiocb *kiocb, struct iov_iter *to, loff_t pos, ssize_t count, size_t skip) { #if defined(HAVE_VFS_IOV_ITER) - uio_iov_iter_init(uio, to, pos, count, skip); + zfs_uio_iov_iter_init(uio, to, pos, count, skip); #else - uio_iovec_init(uio, to->iov, to->nr_segs, pos, + zfs_uio_iovec_init(uio, to->iov, to->nr_segs, pos, to->type & ITER_KVEC ? UIO_SYSSPACE : UIO_USERSPACE, count, skip); #endif @@ -264,7 +264,7 @@ zpl_iter_read(struct kiocb *kiocb, struct iov_iter *to) fstrans_cookie_t cookie; struct file *filp = kiocb->ki_filp; ssize_t count = iov_iter_count(to); - uio_t uio; + zfs_uio_t uio; zpl_uio_init(&uio, kiocb, to, kiocb->ki_pos, count, 0); @@ -320,7 +320,7 @@ zpl_iter_write(struct kiocb *kiocb, struct iov_iter *from) fstrans_cookie_t cookie; struct file *filp = kiocb->ki_filp; struct inode *ip = filp->f_mapping->host; - uio_t uio; + zfs_uio_t uio; size_t count = 0; ssize_t ret; @@ -364,8 +364,8 @@ zpl_aio_read(struct kiocb *kiocb, const struct iovec *iov, if (ret) return (ret); - uio_t uio; - uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE, + zfs_uio_t uio; + zfs_uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE, count, 0); crhold(cr); @@ -407,8 +407,8 @@ zpl_aio_write(struct kiocb *kiocb, const struct iovec *iov, if (ret) return (ret); - uio_t uio; - uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE, + zfs_uio_t uio; + zfs_uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE, count, 0); crhold(cr); diff --git a/module/os/linux/zfs/zpl_inode.c b/module/os/linux/zfs/zpl_inode.c index bd1f605166..ee08942d15 100644 --- a/module/os/linux/zfs/zpl_inode.c +++ b/module/os/linux/zfs/zpl_inode.c @@ -545,8 +545,8 @@ zpl_get_link_common(struct dentry *dentry, struct inode *ip, char **link) iov.iov_len = MAXPATHLEN; iov.iov_base = kmem_zalloc(MAXPATHLEN, KM_SLEEP); - uio_t uio; - uio_iovec_init(&uio, &iov, 1, 0, UIO_SYSSPACE, MAXPATHLEN - 1, 0); + zfs_uio_t uio; + zfs_uio_iovec_init(&uio, &iov, 1, 0, UIO_SYSSPACE, MAXPATHLEN - 1, 0); cookie = spl_fstrans_mark(); error = -zfs_readlink(ip, &uio, cr); diff --git a/module/os/linux/zfs/zpl_xattr.c b/module/os/linux/zfs/zpl_xattr.c index 0a730b7cda..928058ef67 100644 --- a/module/os/linux/zfs/zpl_xattr.c +++ b/module/os/linux/zfs/zpl_xattr.c @@ -306,15 +306,15 @@ zpl_xattr_get_dir(struct inode *ip, const char *name, void *value, iov.iov_base = (void *)value; iov.iov_len = size; - uio_t uio; - uio_iovec_init(&uio, &iov, 1, 0, UIO_SYSSPACE, size, 0); + zfs_uio_t uio; + zfs_uio_iovec_init(&uio, &iov, 1, 0, UIO_SYSSPACE, size, 0); cookie = spl_fstrans_mark(); error = -zfs_read(ITOZ(xip), &uio, 0, cr); spl_fstrans_unmark(cookie); if (error == 0) - error = size - uio_resid(&uio); + error = size - zfs_uio_resid(&uio); out: if (xzp) zrele(xzp); diff --git a/module/os/linux/zfs/zvol_os.c b/module/os/linux/zfs/zvol_os.c index 51f4f932d7..24bb79f52f 100644 --- a/module/os/linux/zfs/zvol_os.c +++ b/module/os/linux/zfs/zvol_os.c @@ -85,9 +85,9 @@ zvol_write(void *arg) zv_request_t *zvr = arg; struct bio *bio = zvr->bio; int error = 0; - uio_t uio; + zfs_uio_t uio; - uio_bvec_init(&uio, bio); + zfs_uio_bvec_init(&uio, bio); zvol_state_t *zv = zvr->zv; ASSERT3P(zv, !=, NULL); @@ -247,9 +247,9 @@ zvol_read(void *arg) zv_request_t *zvr = arg; struct bio *bio = zvr->bio; int error = 0; - uio_t uio; + zfs_uio_t uio; - uio_bvec_init(&uio, bio); + zfs_uio_bvec_init(&uio, bio); zvol_state_t *zv = zvr->zv; ASSERT3P(zv, !=, NULL); diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c index c54762526f..c1493fb965 100644 --- a/module/zfs/dmu.c +++ b/module/zfs/dmu.c @@ -1171,7 +1171,7 @@ dmu_redact(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, #ifdef _KERNEL int -dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size) +dmu_read_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size) { dmu_buf_t **dbp; int numbufs, i, err; @@ -1180,7 +1180,7 @@ dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size) * NB: we could do this block-at-a-time, but it's nice * to be reading in parallel. */ - err = dmu_buf_hold_array_by_dnode(dn, uio_offset(uio), size, + err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size, TRUE, FTAG, &numbufs, &dbp, 0); if (err) return (err); @@ -1192,16 +1192,12 @@ dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size) ASSERT(size > 0); - bufoff = uio_offset(uio) - db->db_offset; + bufoff = zfs_uio_offset(uio) - db->db_offset; tocpy = MIN(db->db_size - bufoff, size); -#ifdef __FreeBSD__ - err = vn_io_fault_uiomove((char *)db->db_data + bufoff, - tocpy, uio); -#else - err = uiomove((char *)db->db_data + bufoff, tocpy, - UIO_READ, uio); -#endif + err = zfs_uio_fault_move((char *)db->db_data + bufoff, tocpy, + UIO_READ, uio); + if (err) break; @@ -1215,14 +1211,14 @@ dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size) /* * Read 'size' bytes into the uio buffer. * From object zdb->db_object. - * Starting at offset uio->uio_loffset. + * Starting at zfs_uio_offset(uio). * * If the caller already has a dbuf in the target object * (e.g. its bonus buffer), this routine is faster than dmu_read_uio(), * because we don't have to find the dnode_t for the object. */ int -dmu_read_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size) +dmu_read_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size) { dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb; dnode_t *dn; @@ -1242,10 +1238,10 @@ dmu_read_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size) /* * Read 'size' bytes into the uio buffer. * From the specified object - * Starting at offset uio->uio_loffset. + * Starting at offset zfs_uio_offset(uio). */ int -dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size) +dmu_read_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size) { dnode_t *dn; int err; @@ -1265,14 +1261,14 @@ dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size) } int -dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx) +dmu_write_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size, dmu_tx_t *tx) { dmu_buf_t **dbp; int numbufs; int err = 0; int i; - err = dmu_buf_hold_array_by_dnode(dn, uio_offset(uio), size, + err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size, FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH); if (err) return (err); @@ -1284,7 +1280,7 @@ dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx) ASSERT(size > 0); - bufoff = uio_offset(uio) - db->db_offset; + bufoff = zfs_uio_offset(uio) - db->db_offset; tocpy = MIN(db->db_size - bufoff, size); ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); @@ -1295,18 +1291,14 @@ dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx) dmu_buf_will_dirty(db, tx); /* - * XXX uiomove could block forever (eg.nfs-backed + * XXX zfs_uiomove could block forever (eg.nfs-backed * pages). There needs to be a uiolockdown() function - * to lock the pages in memory, so that uiomove won't + * to lock the pages in memory, so that zfs_uiomove won't * block. */ -#ifdef __FreeBSD__ - err = vn_io_fault_uiomove((char *)db->db_data + bufoff, - tocpy, uio); -#else - err = uiomove((char *)db->db_data + bufoff, tocpy, - UIO_WRITE, uio); -#endif + err = zfs_uio_fault_move((char *)db->db_data + bufoff, + tocpy, UIO_WRITE, uio); + if (tocpy == db->db_size) dmu_buf_fill_done(db, tx); @@ -1323,14 +1315,14 @@ dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx) /* * Write 'size' bytes from the uio buffer. * To object zdb->db_object. - * Starting at offset uio->uio_loffset. + * Starting at offset zfs_uio_offset(uio). * * If the caller already has a dbuf in the target object * (e.g. its bonus buffer), this routine is faster than dmu_write_uio(), * because we don't have to find the dnode_t for the object. */ int -dmu_write_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size, +dmu_write_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size, dmu_tx_t *tx) { dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb; @@ -1351,10 +1343,10 @@ dmu_write_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size, /* * Write 'size' bytes from the uio buffer. * To the specified object. - * Starting at offset uio->uio_loffset. + * Starting at offset zfs_uio_offset(uio). */ int -dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size, +dmu_write_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size, dmu_tx_t *tx) { dnode_t *dn; diff --git a/module/zfs/sa.c b/module/zfs/sa.c index 83a10e7b45..5af0aaa7d0 100644 --- a/module/zfs/sa.c +++ b/module/zfs/sa.c @@ -1502,7 +1502,7 @@ sa_lookup(sa_handle_t *hdl, sa_attr_type_t attr, void *buf, uint32_t buflen) #ifdef _KERNEL int -sa_lookup_uio(sa_handle_t *hdl, sa_attr_type_t attr, uio_t *uio) +sa_lookup_uio(sa_handle_t *hdl, sa_attr_type_t attr, zfs_uio_t *uio) { int error; sa_bulk_attr_t bulk; @@ -1515,8 +1515,8 @@ sa_lookup_uio(sa_handle_t *hdl, sa_attr_type_t attr, uio_t *uio) mutex_enter(&hdl->sa_lock); if ((error = sa_attr_op(hdl, &bulk, 1, SA_LOOKUP, NULL)) == 0) { - error = uiomove((void *)bulk.sa_addr, MIN(bulk.sa_size, - uio_resid(uio)), UIO_READ, uio); + error = zfs_uiomove((void *)bulk.sa_addr, MIN(bulk.sa_size, + zfs_uio_resid(uio)), UIO_READ, uio); } mutex_exit(&hdl->sa_lock); return (error); diff --git a/module/zfs/zfs_sa.c b/module/zfs/zfs_sa.c index cbb773ffbd..67be131da6 100644 --- a/module/zfs/zfs_sa.c +++ b/module/zfs/zfs_sa.c @@ -71,7 +71,7 @@ sa_attr_reg_t zfs_attr_table[ZPL_END+1] = { #ifdef _KERNEL int -zfs_sa_readlink(znode_t *zp, uio_t *uio) +zfs_sa_readlink(znode_t *zp, zfs_uio_t *uio) { dmu_buf_t *db = sa_get_db(zp->z_sa_hdl); size_t bufsz; @@ -79,15 +79,16 @@ zfs_sa_readlink(znode_t *zp, uio_t *uio) bufsz = zp->z_size; if (bufsz + ZFS_OLD_ZNODE_PHYS_SIZE <= db->db_size) { - error = uiomove((caddr_t)db->db_data + + error = zfs_uiomove((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE, - MIN((size_t)bufsz, uio_resid(uio)), UIO_READ, uio); + MIN((size_t)bufsz, zfs_uio_resid(uio)), UIO_READ, uio); } else { dmu_buf_t *dbp; if ((error = dmu_buf_hold(ZTOZSB(zp)->z_os, zp->z_id, 0, FTAG, &dbp, DMU_READ_NO_PREFETCH)) == 0) { - error = uiomove(dbp->db_data, - MIN((size_t)bufsz, uio_resid(uio)), UIO_READ, uio); + error = zfs_uiomove(dbp->db_data, + MIN((size_t)bufsz, zfs_uio_resid(uio)), UIO_READ, + uio); dmu_buf_rele(dbp, FTAG); } } diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c index 36563e8e2a..68b77fb753 100644 --- a/module/zfs/zfs_vnops.c +++ b/module/zfs/zfs_vnops.c @@ -187,7 +187,7 @@ static unsigned long zfs_vnops_read_chunk_size = 1024 * 1024; /* Tunable */ */ /* ARGSUSED */ int -zfs_read(struct znode *zp, uio_t *uio, int ioflag, cred_t *cr) +zfs_read(struct znode *zp, zfs_uio_t *uio, int ioflag, cred_t *cr) { int error = 0; boolean_t frsync = B_FALSE; @@ -210,7 +210,7 @@ zfs_read(struct znode *zp, uio_t *uio, int ioflag, cred_t *cr) /* * Validate file offset */ - if (uio->uio_loffset < (offset_t)0) { + if (zfs_uio_offset(uio) < (offset_t)0) { ZFS_EXIT(zfsvfs); return (SET_ERROR(EINVAL)); } @@ -218,7 +218,7 @@ zfs_read(struct znode *zp, uio_t *uio, int ioflag, cred_t *cr) /* * Fasttrack empty reads */ - if (uio->uio_resid == 0) { + if (zfs_uio_resid(uio) == 0) { ZFS_EXIT(zfsvfs); return (0); } @@ -242,26 +242,26 @@ zfs_read(struct znode *zp, uio_t *uio, int ioflag, cred_t *cr) * Lock the range against changes. */ zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock, - uio->uio_loffset, uio->uio_resid, RL_READER); + zfs_uio_offset(uio), zfs_uio_resid(uio), RL_READER); /* * If we are reading past end-of-file we can skip * to the end; but we might still need to set atime. */ - if (uio->uio_loffset >= zp->z_size) { + if (zfs_uio_offset(uio) >= zp->z_size) { error = 0; goto out; } - ASSERT(uio->uio_loffset < zp->z_size); - ssize_t n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset); + ASSERT(zfs_uio_offset(uio) < zp->z_size); + ssize_t n = MIN(zfs_uio_resid(uio), zp->z_size - zfs_uio_offset(uio)); ssize_t start_resid = n; while (n > 0) { ssize_t nbytes = MIN(n, zfs_vnops_read_chunk_size - - P2PHASE(uio->uio_loffset, zfs_vnops_read_chunk_size)); + P2PHASE(zfs_uio_offset(uio), zfs_vnops_read_chunk_size)); #ifdef UIO_NOCOPY - if (uio->uio_segflg == UIO_NOCOPY) + if (zfs_uio_segflg(uio) == UIO_NOCOPY) error = mappedread_sf(zp, nbytes, uio); else #endif @@ -314,10 +314,10 @@ out: /* ARGSUSED */ int -zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr) +zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr) { int error = 0; - ssize_t start_resid = uio->uio_resid; + ssize_t start_resid = zfs_uio_resid(uio); /* * Fasttrack empty write @@ -356,7 +356,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr) */ if ((zp->z_pflags & ZFS_IMMUTABLE) || ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & O_APPEND) && - (uio->uio_loffset < zp->z_size))) { + (zfs_uio_offset(uio) < zp->z_size))) { ZFS_EXIT(zfsvfs); return (SET_ERROR(EPERM)); } @@ -364,7 +364,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr) /* * Validate file offset */ - offset_t woff = ioflag & O_APPEND ? zp->z_size : uio->uio_loffset; + offset_t woff = ioflag & O_APPEND ? zp->z_size : zfs_uio_offset(uio); if (woff < 0) { ZFS_EXIT(zfsvfs); return (SET_ERROR(EINVAL)); @@ -377,7 +377,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr) * don't hold up txg. * Skip this if uio contains loaned arc_buf. */ - if (uio_prefaultpages(MIN(n, max_blksz), uio)) { + if (zfs_uio_prefaultpages(MIN(n, max_blksz), uio)) { ZFS_EXIT(zfsvfs); return (SET_ERROR(EFAULT)); } @@ -401,7 +401,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr) */ woff = zp->z_size; } - uio->uio_loffset = woff; + zfs_uio_setoffset(uio, woff); } else { /* * Note that if the file block size will change as a result of @@ -411,7 +411,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr) lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER); } - if (zn_rlimit_fsize(zp, uio, uio->uio_td)) { + if (zn_rlimit_fsize(zp, uio)) { zfs_rangelock_exit(lr); ZFS_EXIT(zfsvfs); return (SET_ERROR(EFBIG)); @@ -441,7 +441,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr) * and allows us to do more fine-grained space accounting. */ while (n > 0) { - woff = uio->uio_loffset; + woff = zfs_uio_offset(uio); if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, uid) || zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, gid) || @@ -469,7 +469,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr) max_blksz); ASSERT(abuf != NULL); ASSERT(arc_buf_size(abuf) == max_blksz); - if ((error = uiocopy(abuf->b_data, max_blksz, + if ((error = zfs_uiocopy(abuf->b_data, max_blksz, UIO_WRITE, uio, &cbytes))) { dmu_return_arcbuf(abuf); break; @@ -530,11 +530,11 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr) ssize_t tx_bytes; if (abuf == NULL) { - tx_bytes = uio->uio_resid; - uio_fault_disable(uio, B_TRUE); + tx_bytes = zfs_uio_resid(uio); + zfs_uio_fault_disable(uio, B_TRUE); error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl), uio, nbytes, tx); - uio_fault_disable(uio, B_FALSE); + zfs_uio_fault_disable(uio, B_FALSE); #ifdef __linux__ if (error == EFAULT) { dmu_tx_commit(tx); @@ -542,12 +542,13 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr) * Account for partial writes before * continuing the loop. * Update needs to occur before the next - * uio_prefaultpages, or prefaultpages may + * zfs_uio_prefaultpages, or prefaultpages may * error, and we may break the loop early. */ - if (tx_bytes != uio->uio_resid) - n -= tx_bytes - uio->uio_resid; - if (uio_prefaultpages(MIN(n, max_blksz), uio)) { + if (tx_bytes != zfs_uio_resid(uio)) + n -= tx_bytes - zfs_uio_resid(uio); + if (zfs_uio_prefaultpages(MIN(n, max_blksz), + uio)) { break; } continue; @@ -557,7 +558,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr) dmu_tx_commit(tx); break; } - tx_bytes -= uio->uio_resid; + tx_bytes -= zfs_uio_resid(uio); } else { /* Implied by abuf != NULL: */ ASSERT3S(n, >=, max_blksz); @@ -582,8 +583,8 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr) dmu_tx_commit(tx); break; } - ASSERT3S(nbytes, <=, uio->uio_resid); - uioskip(uio, nbytes); + ASSERT3S(nbytes, <=, zfs_uio_resid(uio)); + zfs_uioskip(uio, nbytes); tx_bytes = nbytes; } if (tx_bytes && zn_has_cached_data(zp) && @@ -634,9 +635,9 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr) * Update the file size (zp_size) if it has changed; * account for possible concurrent updates. */ - while ((end_size = zp->z_size) < uio->uio_loffset) { + while ((end_size = zp->z_size) < zfs_uio_offset(uio)) { (void) atomic_cas_64(&zp->z_size, end_size, - uio->uio_loffset); + zfs_uio_offset(uio)); ASSERT(error == 0); } /* @@ -659,7 +660,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr) n -= nbytes; if (n > 0) { - if (uio_prefaultpages(MIN(n, max_blksz), uio)) { + if (zfs_uio_prefaultpages(MIN(n, max_blksz), uio)) { error = SET_ERROR(EFAULT); break; } @@ -674,7 +675,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr) * uio data is inaccessible return an error. Otherwise, it's * at least a partial write, so it's successful. */ - if (zfsvfs->z_replay || uio->uio_resid == start_resid || + if (zfsvfs->z_replay || zfs_uio_resid(uio) == start_resid || error == EFAULT) { ZFS_EXIT(zfsvfs); return (error); @@ -684,7 +685,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr) zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) zil_commit(zilog, zp->z_id); - const int64_t nwritten = start_resid - uio->uio_resid; + const int64_t nwritten = start_resid - zfs_uio_resid(uio); dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten); task_io_account_write(nwritten); diff --git a/tests/zfs-tests/cmd/mmapwrite/mmapwrite.c b/tests/zfs-tests/cmd/mmapwrite/mmapwrite.c index 458d6d8e40..152f5ba90e 100644 --- a/tests/zfs-tests/cmd/mmapwrite/mmapwrite.c +++ b/tests/zfs-tests/cmd/mmapwrite/mmapwrite.c @@ -42,8 +42,8 @@ * 2. In the same process, context #2, mmap page fault (which means the mm_sem * is hold) occurred, zfs_dirty_inode open a txg failed, and wait previous * txg "n" completed. - * 3. context #1 call uiomove to write, however page fault is occurred in - * uiomove, which means it needs mm_sem, but mm_sem is hold by + * 3. context #1 call zfs_uiomove to write, however page fault is occurred in + * zfs_uiomove, which means it needs mm_sem, but mm_sem is hold by * context #2, so it stuck and can't complete, then txg "n" will not * complete. *