Extending FreeBSD UIO Struct

In FreeBSD the struct uio was just a typedef to uio_t. In order to
extend this struct, outside of the definition for the struct uio, the
struct uio has been embedded inside of a uio_t struct.

Also renamed all the uio_* interfaces to be zfs_uio_* to make it clear
this is a ZFS interface.

Reviewed-by: Ryan Moeller <ryan@iXsystems.com>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Closes #11438
This commit is contained in:
Brian Atkinson 2021-01-20 22:27:30 -07:00 committed by Tony Hutter
parent cd4f9572d0
commit 0b1e6fcc3e
37 changed files with 521 additions and 457 deletions

View File

@ -35,55 +35,72 @@
#include <sys/_uio.h> #include <sys/_uio.h>
#include <sys/debug.h> #include <sys/debug.h>
#define uio_loffset uio_offset
typedef struct uio uio_t;
typedef struct iovec iovec_t; typedef struct iovec iovec_t;
typedef enum uio_seg uio_seg_t; typedef enum uio_seg zfs_uio_seg_t;
typedef enum uio_rw zfs_uio_rw_t;
typedef struct zfs_uio {
struct uio *uio;
} zfs_uio_t;
#define GET_UIO_STRUCT(u) (u)->uio
#define zfs_uio_segflg(u) GET_UIO_STRUCT(u)->uio_segflg
#define zfs_uio_offset(u) GET_UIO_STRUCT(u)->uio_offset
#define zfs_uio_resid(u) GET_UIO_STRUCT(u)->uio_resid
#define zfs_uio_iovcnt(u) GET_UIO_STRUCT(u)->uio_iovcnt
#define zfs_uio_iovlen(u, idx) GET_UIO_STRUCT(u)->uio_iov[(idx)].iov_len
#define zfs_uio_iovbase(u, idx) GET_UIO_STRUCT(u)->uio_iov[(idx)].iov_base
#define zfs_uio_td(u) GET_UIO_STRUCT(u)->uio_td
#define zfs_uio_rw(u) GET_UIO_STRUCT(u)->uio_rw
#define zfs_uio_fault_disable(u, set)
#define zfs_uio_prefaultpages(size, u) (0)
static __inline void
zfs_uio_init(zfs_uio_t *uio, struct uio *uio_s)
{
GET_UIO_STRUCT(uio) = uio_s;
}
static __inline void
zfs_uio_setoffset(zfs_uio_t *uio, offset_t off)
{
zfs_uio_offset(uio) = off;
}
static __inline int static __inline int
zfs_uiomove(void *cp, size_t n, enum uio_rw dir, uio_t *uio) zfs_uiomove(void *cp, size_t n, zfs_uio_rw_t dir, zfs_uio_t *uio)
{ {
ASSERT(zfs_uio_rw(uio) == dir);
ASSERT(uio->uio_rw == dir); return (uiomove(cp, (int)n, GET_UIO_STRUCT(uio)));
return (uiomove(cp, (int)n, uio));
} }
#define uiomove(cp, n, dir, uio) zfs_uiomove((cp), (n), (dir), (uio))
int uiocopy(void *p, size_t n, enum uio_rw rw, struct uio *uio, size_t *cbytes); int zfs_uiocopy(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio,
void uioskip(uio_t *uiop, size_t n); size_t *cbytes);
void zfs_uioskip(zfs_uio_t *uiop, size_t n);
#define uio_segflg(uio) (uio)->uio_segflg int zfs_uio_fault_move(void *p, size_t n, zfs_uio_rw_t dir, zfs_uio_t *uio);
#define uio_offset(uio) (uio)->uio_loffset
#define uio_resid(uio) (uio)->uio_resid
#define uio_iovcnt(uio) (uio)->uio_iovcnt
#define uio_iovlen(uio, idx) (uio)->uio_iov[(idx)].iov_len
#define uio_iovbase(uio, idx) (uio)->uio_iov[(idx)].iov_base
#define uio_fault_disable(uio, set)
#define uio_prefaultpages(size, uio) (0)
static inline void static inline void
uio_iov_at_index(uio_t *uio, uint_t idx, void **base, uint64_t *len) zfs_uio_iov_at_index(zfs_uio_t *uio, uint_t idx, void **base, uint64_t *len)
{ {
*base = uio_iovbase(uio, idx); *base = zfs_uio_iovbase(uio, idx);
*len = uio_iovlen(uio, idx); *len = zfs_uio_iovlen(uio, idx);
} }
static inline void static inline void
uio_advance(uio_t *uio, size_t size) zfs_uio_advance(zfs_uio_t *uio, size_t size)
{ {
uio->uio_resid -= size; zfs_uio_resid(uio) -= size;
uio->uio_loffset += size; zfs_uio_offset(uio) += size;
} }
static inline offset_t static inline offset_t
uio_index_at_offset(uio_t *uio, offset_t off, uint_t *vec_idx) zfs_uio_index_at_offset(zfs_uio_t *uio, offset_t off, uint_t *vec_idx)
{ {
*vec_idx = 0; *vec_idx = 0;
while (*vec_idx < uio_iovcnt(uio) && off >= uio_iovlen(uio, *vec_idx)) { while (*vec_idx < zfs_uio_iovcnt(uio) &&
off -= uio_iovlen(uio, *vec_idx); off >= zfs_uio_iovlen(uio, *vec_idx)) {
off -= zfs_uio_iovlen(uio, *vec_idx);
(*vec_idx)++; (*vec_idx)++;
} }

View File

@ -92,7 +92,7 @@ int freebsd_crypt_newsession(freebsd_crypt_session_t *sessp,
void freebsd_crypt_freesession(freebsd_crypt_session_t *sessp); void freebsd_crypt_freesession(freebsd_crypt_session_t *sessp);
int freebsd_crypt_uio(boolean_t, freebsd_crypt_session_t *, int freebsd_crypt_uio(boolean_t, freebsd_crypt_session_t *,
struct zio_crypt_info *, uio_t *, crypto_key_t *, uint8_t *, struct zio_crypt_info *, zfs_uio_t *, crypto_key_t *, uint8_t *,
size_t, size_t); size_t, size_t);
#endif /* _ZFS_FREEBSD_CRYPTO_H */ #endif /* _ZFS_FREEBSD_CRYPTO_H */

View File

@ -40,6 +40,7 @@
#include <sys/zil.h> #include <sys/zil.h>
#include <sys/zfs_project.h> #include <sys/zfs_project.h>
#include <vm/vm_object.h> #include <vm/vm_object.h>
#include <sys/uio.h>
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
@ -117,7 +118,8 @@ extern minor_t zfsdev_minor_alloc(void);
#define Z_ISDIR(type) ((type) == VDIR) #define Z_ISDIR(type) ((type) == VDIR)
#define zn_has_cached_data(zp) vn_has_cached_data(ZTOV(zp)) #define zn_has_cached_data(zp) vn_has_cached_data(ZTOV(zp))
#define zn_rlimit_fsize(zp, uio, td) vn_rlimit_fsize(ZTOV(zp), (uio), (td)) #define zn_rlimit_fsize(zp, uio) \
vn_rlimit_fsize(ZTOV(zp), GET_UIO_STRUCT(uio), zfs_uio_td(uio))
/* Called on entry to each ZFS vnode and vfs operation */ /* Called on entry to each ZFS vnode and vfs operation */
#define ZFS_ENTER(zfsvfs) \ #define ZFS_ENTER(zfsvfs) \

View File

@ -36,21 +36,21 @@
typedef struct iovec iovec_t; typedef struct iovec iovec_t;
typedef enum uio_rw { typedef enum zfs_uio_rw {
UIO_READ = 0, UIO_READ = 0,
UIO_WRITE = 1, UIO_WRITE = 1,
} uio_rw_t; } zfs_uio_rw_t;
typedef enum uio_seg { typedef enum zfs_uio_seg {
UIO_USERSPACE = 0, UIO_USERSPACE = 0,
UIO_SYSSPACE = 1, UIO_SYSSPACE = 1,
UIO_BVEC = 2, UIO_BVEC = 2,
#if defined(HAVE_VFS_IOV_ITER) #if defined(HAVE_VFS_IOV_ITER)
UIO_ITER = 3, UIO_ITER = 3,
#endif #endif
} uio_seg_t; } zfs_uio_seg_t;
typedef struct uio { typedef struct zfs_uio {
union { union {
const struct iovec *uio_iov; const struct iovec *uio_iov;
const struct bio_vec *uio_bvec; const struct bio_vec *uio_bvec;
@ -60,42 +60,51 @@ typedef struct uio {
}; };
int uio_iovcnt; int uio_iovcnt;
offset_t uio_loffset; offset_t uio_loffset;
uio_seg_t uio_segflg; zfs_uio_seg_t uio_segflg;
boolean_t uio_fault_disable; boolean_t uio_fault_disable;
uint16_t uio_fmode; uint16_t uio_fmode;
uint16_t uio_extflg; uint16_t uio_extflg;
ssize_t uio_resid; ssize_t uio_resid;
size_t uio_skip; size_t uio_skip;
} uio_t; } zfs_uio_t;
#define uio_segflg(uio) (uio)->uio_segflg #define zfs_uio_segflg(u) (u)->uio_segflg
#define uio_offset(uio) (uio)->uio_loffset #define zfs_uio_offset(u) (u)->uio_loffset
#define uio_resid(uio) (uio)->uio_resid #define zfs_uio_resid(u) (u)->uio_resid
#define uio_iovcnt(uio) (uio)->uio_iovcnt #define zfs_uio_iovcnt(u) (u)->uio_iovcnt
#define uio_iovlen(uio, idx) (uio)->uio_iov[(idx)].iov_len #define zfs_uio_iovlen(u, idx) (u)->uio_iov[(idx)].iov_len
#define uio_iovbase(uio, idx) (uio)->uio_iov[(idx)].iov_base #define zfs_uio_iovbase(u, idx) (u)->uio_iov[(idx)].iov_base
#define uio_fault_disable(uio, set) (uio)->uio_fault_disable = set #define zfs_uio_fault_disable(u, set) (u)->uio_fault_disable = set
#define zfs_uio_rlimit_fsize(z, u) (0)
#define zfs_uio_fault_move(p, n, rw, u) zfs_uiomove((p), (n), (rw), (u))
static inline void static inline void
uio_iov_at_index(uio_t *uio, uint_t idx, void **base, uint64_t *len) zfs_uio_setoffset(zfs_uio_t *uio, offset_t off)
{ {
*base = uio_iovbase(uio, idx); uio->uio_loffset = off;
*len = uio_iovlen(uio, idx);
} }
static inline void static inline void
uio_advance(uio_t *uio, size_t size) zfs_uio_iov_at_index(zfs_uio_t *uio, uint_t idx, void **base, uint64_t *len)
{
*base = zfs_uio_iovbase(uio, idx);
*len = zfs_uio_iovlen(uio, idx);
}
static inline void
zfs_uio_advance(zfs_uio_t *uio, size_t size)
{ {
uio->uio_resid -= size; uio->uio_resid -= size;
uio->uio_loffset += size; uio->uio_loffset += size;
} }
static inline offset_t static inline offset_t
uio_index_at_offset(uio_t *uio, offset_t off, uint_t *vec_idx) zfs_uio_index_at_offset(zfs_uio_t *uio, offset_t off, uint_t *vec_idx)
{ {
*vec_idx = 0; *vec_idx = 0;
while (*vec_idx < uio_iovcnt(uio) && off >= uio_iovlen(uio, *vec_idx)) { while (*vec_idx < zfs_uio_iovcnt(uio) &&
off -= uio_iovlen(uio, *vec_idx); off >= zfs_uio_iovlen(uio, *vec_idx)) {
off -= zfs_uio_iovlen(uio, *vec_idx);
(*vec_idx)++; (*vec_idx)++;
} }
@ -116,8 +125,9 @@ iov_iter_init_compat(struct iov_iter *iter, unsigned int dir,
} }
static inline void static inline void
uio_iovec_init(uio_t *uio, const struct iovec *iov, unsigned long nr_segs, zfs_uio_iovec_init(zfs_uio_t *uio, const struct iovec *iov,
offset_t offset, uio_seg_t seg, ssize_t resid, size_t skip) unsigned long nr_segs, offset_t offset, zfs_uio_seg_t seg, ssize_t resid,
size_t skip)
{ {
ASSERT(seg == UIO_USERSPACE || seg == UIO_SYSSPACE); ASSERT(seg == UIO_USERSPACE || seg == UIO_SYSSPACE);
@ -133,7 +143,7 @@ uio_iovec_init(uio_t *uio, const struct iovec *iov, unsigned long nr_segs,
} }
static inline void static inline void
uio_bvec_init(uio_t *uio, struct bio *bio) zfs_uio_bvec_init(zfs_uio_t *uio, struct bio *bio)
{ {
uio->uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)]; uio->uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)];
uio->uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio); uio->uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio);
@ -148,7 +158,7 @@ uio_bvec_init(uio_t *uio, struct bio *bio)
#if defined(HAVE_VFS_IOV_ITER) #if defined(HAVE_VFS_IOV_ITER)
static inline void static inline void
uio_iov_iter_init(uio_t *uio, struct iov_iter *iter, offset_t offset, zfs_uio_iov_iter_init(zfs_uio_t *uio, struct iov_iter *iter, offset_t offset,
ssize_t resid, size_t skip) ssize_t resid, size_t skip)
{ {
uio->uio_iter = iter; uio->uio_iter = iter;

View File

@ -61,7 +61,7 @@ extern int zfs_rename(znode_t *sdzp, char *snm, znode_t *tdzp,
char *tnm, cred_t *cr, int flags); char *tnm, cred_t *cr, int flags);
extern int zfs_symlink(znode_t *dzp, char *name, vattr_t *vap, extern int zfs_symlink(znode_t *dzp, char *name, vattr_t *vap,
char *link, znode_t **zpp, cred_t *cr, int flags); char *link, znode_t **zpp, cred_t *cr, int flags);
extern int zfs_readlink(struct inode *ip, uio_t *uio, cred_t *cr); extern int zfs_readlink(struct inode *ip, zfs_uio_t *uio, cred_t *cr);
extern int zfs_link(znode_t *tdzp, znode_t *szp, extern int zfs_link(znode_t *tdzp, znode_t *szp,
char *name, cred_t *cr, int flags); char *name, cred_t *cr, int flags);
extern void zfs_inactive(struct inode *ip); extern void zfs_inactive(struct inode *ip);

View File

@ -70,8 +70,8 @@ extern "C" {
#define Z_ISDEV(type) (S_ISCHR(type) || S_ISBLK(type) || S_ISFIFO(type)) #define Z_ISDEV(type) (S_ISCHR(type) || S_ISBLK(type) || S_ISFIFO(type))
#define Z_ISDIR(type) S_ISDIR(type) #define Z_ISDIR(type) S_ISDIR(type)
#define zn_has_cached_data(zp) ((zp)->z_is_mapped) #define zn_has_cached_data(zp) ((zp)->z_is_mapped)
#define zn_rlimit_fsize(zp, uio, td) (0) #define zn_rlimit_fsize(zp, uio) (0)
/* /*
* zhold() wraps igrab() on Linux, and igrab() may fail when the * zhold() wraps igrab() on Linux, and igrab() may fail when the

View File

@ -244,7 +244,7 @@ typedef struct crypto_data {
iovec_t cdu_raw; /* Pointer and length */ iovec_t cdu_raw; /* Pointer and length */
/* uio scatter-gather format */ /* uio scatter-gather format */
uio_t *cdu_uio; zfs_uio_t *cdu_uio;
} cdu; /* Crypto Data Union */ } cdu; /* Crypto Data Union */
} crypto_data_t; } crypto_data_t;

View File

@ -847,14 +847,14 @@ void dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
void dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, void dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
dmu_tx_t *tx); dmu_tx_t *tx);
#ifdef _KERNEL #ifdef _KERNEL
int dmu_read_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size); int dmu_read_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size);
int dmu_read_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size); int dmu_read_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size);
int dmu_read_uio_dnode(dnode_t *dn, struct uio *uio, uint64_t size); int dmu_read_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size);
int dmu_write_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size, int dmu_write_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size,
dmu_tx_t *tx); dmu_tx_t *tx);
int dmu_write_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size, int dmu_write_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size,
dmu_tx_t *tx); dmu_tx_t *tx);
int dmu_write_uio_dnode(dnode_t *dn, struct uio *uio, uint64_t size, int dmu_write_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size,
dmu_tx_t *tx); dmu_tx_t *tx);
#endif #endif
struct arc_buf *dmu_request_arcbuf(dmu_buf_t *handle, int size); struct arc_buf *dmu_request_arcbuf(dmu_buf_t *handle, int size);

View File

@ -158,7 +158,7 @@ void sa_handle_lock(sa_handle_t *);
void sa_handle_unlock(sa_handle_t *); void sa_handle_unlock(sa_handle_t *);
#ifdef _KERNEL #ifdef _KERNEL
int sa_lookup_uio(sa_handle_t *, sa_attr_type_t, uio_t *); int sa_lookup_uio(sa_handle_t *, sa_attr_type_t, zfs_uio_t *);
int sa_add_projid(sa_handle_t *, dmu_tx_t *, uint64_t); int sa_add_projid(sa_handle_t *, dmu_tx_t *, uint64_t);
#endif #endif

View File

@ -41,9 +41,9 @@
#include <sys/uio.h> #include <sys/uio.h>
extern int uiomove(void *, size_t, enum uio_rw, uio_t *); extern int zfs_uiomove(void *, size_t, zfs_uio_rw_t, zfs_uio_t *);
extern int uio_prefaultpages(ssize_t, uio_t *); extern int zfs_uio_prefaultpages(ssize_t, zfs_uio_t *);
extern int uiocopy(void *, size_t, enum uio_rw, uio_t *, size_t *); extern int zfs_uiocopy(void *, size_t, zfs_uio_rw_t, zfs_uio_t *, size_t *);
extern void uioskip(uio_t *, size_t); extern void zfs_uioskip(zfs_uio_t *, size_t);
#endif /* _SYS_UIO_IMPL_H */ #endif /* _SYS_UIO_IMPL_H */

View File

@ -134,7 +134,7 @@ typedef struct znode_phys {
#define DXATTR_MAX_ENTRY_SIZE (32768) #define DXATTR_MAX_ENTRY_SIZE (32768)
#define DXATTR_MAX_SA_SIZE (SPA_OLD_MAXBLOCKSIZE >> 1) #define DXATTR_MAX_SA_SIZE (SPA_OLD_MAXBLOCKSIZE >> 1)
int zfs_sa_readlink(struct znode *, uio_t *); int zfs_sa_readlink(struct znode *, zfs_uio_t *);
void zfs_sa_symlink(struct znode *, char *link, int len, dmu_tx_t *); void zfs_sa_symlink(struct znode *, char *link, int len, dmu_tx_t *);
void zfs_sa_get_scanstamp(struct znode *, xvattr_t *); void zfs_sa_get_scanstamp(struct znode *, xvattr_t *);
void zfs_sa_set_scanstamp(struct znode *, xvattr_t *, dmu_tx_t *); void zfs_sa_set_scanstamp(struct znode *, xvattr_t *, dmu_tx_t *);

View File

@ -27,16 +27,16 @@
#include <sys/zfs_vnops_os.h> #include <sys/zfs_vnops_os.h>
extern int zfs_fsync(znode_t *, int, cred_t *); extern int zfs_fsync(znode_t *, int, cred_t *);
extern int zfs_read(znode_t *, uio_t *, int, cred_t *); extern int zfs_read(znode_t *, zfs_uio_t *, int, cred_t *);
extern int zfs_write(znode_t *, uio_t *, int, cred_t *); extern int zfs_write(znode_t *, zfs_uio_t *, int, cred_t *);
extern int zfs_holey(znode_t *, ulong_t, loff_t *); extern int zfs_holey(znode_t *, ulong_t, loff_t *);
extern int zfs_access(znode_t *, int, int, cred_t *); extern int zfs_access(znode_t *, int, int, cred_t *);
extern int zfs_getsecattr(znode_t *, vsecattr_t *, int, cred_t *); extern int zfs_getsecattr(znode_t *, vsecattr_t *, int, cred_t *);
extern int zfs_setsecattr(znode_t *, vsecattr_t *, int, cred_t *); extern int zfs_setsecattr(znode_t *, vsecattr_t *, int, cred_t *);
extern int mappedread(znode_t *, int, uio_t *); extern int mappedread(znode_t *, int, zfs_uio_t *);
extern int mappedread_sf(znode_t *, int, uio_t *); extern int mappedread_sf(znode_t *, int, zfs_uio_t *);
extern void update_pages(znode_t *, int64_t, int, objset_t *); extern void update_pages(znode_t *, int64_t, int, objset_t *);
/* /*

View File

@ -51,58 +51,58 @@
typedef struct iovec iovec_t; typedef struct iovec iovec_t;
#if defined(__linux__) || defined(__APPLE__) #if defined(__linux__) || defined(__APPLE__)
typedef enum uio_rw { typedef enum zfs_uio_rw {
UIO_READ = 0, UIO_READ = 0,
UIO_WRITE = 1, UIO_WRITE = 1,
} uio_rw_t; } zfs_uio_rw_t;
typedef enum uio_seg { typedef enum zfs_uio_seg {
UIO_USERSPACE = 0, UIO_USERSPACE = 0,
UIO_SYSSPACE = 1, UIO_SYSSPACE = 1,
} uio_seg_t; } zfs_uio_seg_t;
#elif defined(__FreeBSD__) #elif defined(__FreeBSD__)
typedef enum uio_seg uio_seg_t; typedef enum uio_seg zfs_uio_seg_t;
#endif #endif
typedef struct uio { typedef struct zfs_uio {
struct iovec *uio_iov; /* pointer to array of iovecs */ struct iovec *uio_iov; /* pointer to array of iovecs */
int uio_iovcnt; /* number of iovecs */ int uio_iovcnt; /* number of iovecs */
offset_t uio_loffset; /* file offset */ offset_t uio_loffset; /* file offset */
uio_seg_t uio_segflg; /* address space (kernel or user) */ zfs_uio_seg_t uio_segflg; /* address space (kernel or user) */
uint16_t uio_fmode; /* file mode flags */ uint16_t uio_fmode; /* file mode flags */
uint16_t uio_extflg; /* extended flags */ uint16_t uio_extflg; /* extended flags */
ssize_t uio_resid; /* residual count */ ssize_t uio_resid; /* residual count */
} uio_t; } zfs_uio_t;
#define uio_segflg(uio) (uio)->uio_segflg #define zfs_uio_segflg(uio) (uio)->uio_segflg
#define uio_offset(uio) (uio)->uio_loffset #define zfs_uio_offset(uio) (uio)->uio_loffset
#define uio_resid(uio) (uio)->uio_resid #define zfs_uio_resid(uio) (uio)->uio_resid
#define uio_iovcnt(uio) (uio)->uio_iovcnt #define zfs_uio_iovcnt(uio) (uio)->uio_iovcnt
#define uio_iovlen(uio, idx) (uio)->uio_iov[(idx)].iov_len #define zfs_uio_iovlen(uio, idx) (uio)->uio_iov[(idx)].iov_len
#define uio_iovbase(uio, idx) (uio)->uio_iov[(idx)].iov_base #define zfs_uio_iovbase(uio, idx) (uio)->uio_iov[(idx)].iov_base
static inline void static inline void
uio_iov_at_index(uio_t *uio, uint_t idx, void **base, uint64_t *len) zfs_uio_iov_at_index(zfs_uio_t *uio, uint_t idx, void **base, uint64_t *len)
{ {
*base = uio_iovbase(uio, idx); *base = zfs_uio_iovbase(uio, idx);
*len = uio_iovlen(uio, idx); *len = zfs_uio_iovlen(uio, idx);
} }
static inline void static inline void
uio_advance(uio_t *uio, size_t size) zfs_uio_advance(zfs_uio_t *uio, size_t size)
{ {
uio->uio_resid -= size; uio->uio_resid -= size;
uio->uio_loffset += size; uio->uio_loffset += size;
} }
static inline offset_t static inline offset_t
uio_index_at_offset(uio_t *uio, offset_t off, uint_t *vec_idx) zfs_uio_index_at_offset(zfs_uio_t *uio, offset_t off, uint_t *vec_idx)
{ {
*vec_idx = 0; *vec_idx = 0;
while (*vec_idx < (uint_t)uio_iovcnt(uio) && while (*vec_idx < (uint_t)zfs_uio_iovcnt(uio) &&
off >= (offset_t)uio_iovlen(uio, *vec_idx)) { off >= (offset_t)zfs_uio_iovlen(uio, *vec_idx)) {
off -= uio_iovlen(uio, *vec_idx); off -= zfs_uio_iovlen(uio, *vec_idx);
(*vec_idx)++; (*vec_idx)++;
} }

View File

@ -43,11 +43,11 @@ crypto_init_ptrs(crypto_data_t *out, void **iov_or_mp, offset_t *current_offset)
break; break;
case CRYPTO_DATA_UIO: { case CRYPTO_DATA_UIO: {
uio_t *uiop = out->cd_uio; zfs_uio_t *uiop = out->cd_uio;
uint_t vec_idx; uint_t vec_idx;
offset = out->cd_offset; offset = out->cd_offset;
offset = uio_index_at_offset(uiop, offset, &vec_idx); offset = zfs_uio_index_at_offset(uiop, offset, &vec_idx);
*current_offset = offset; *current_offset = offset;
*iov_or_mp = (void *)(uintptr_t)vec_idx; *iov_or_mp = (void *)(uintptr_t)vec_idx;
@ -85,7 +85,7 @@ crypto_get_ptrs(crypto_data_t *out, void **iov_or_mp, offset_t *current_offset,
} }
case CRYPTO_DATA_UIO: { case CRYPTO_DATA_UIO: {
uio_t *uio = out->cd_uio; zfs_uio_t *uio = out->cd_uio;
offset_t offset; offset_t offset;
uint_t vec_idx; uint_t vec_idx;
uint8_t *p; uint8_t *p;
@ -94,7 +94,7 @@ crypto_get_ptrs(crypto_data_t *out, void **iov_or_mp, offset_t *current_offset,
offset = *current_offset; offset = *current_offset;
vec_idx = (uintptr_t)(*iov_or_mp); vec_idx = (uintptr_t)(*iov_or_mp);
uio_iov_at_index(uio, vec_idx, &iov_base, &iov_len); zfs_uio_iov_at_index(uio, vec_idx, &iov_base, &iov_len);
p = (uint8_t *)iov_base + offset; p = (uint8_t *)iov_base + offset;
*out_data_1 = p; *out_data_1 = p;
@ -106,10 +106,10 @@ crypto_get_ptrs(crypto_data_t *out, void **iov_or_mp, offset_t *current_offset,
} else { } else {
/* one block spans two iovecs */ /* one block spans two iovecs */
*out_data_1_len = iov_len - offset; *out_data_1_len = iov_len - offset;
if (vec_idx == uio_iovcnt(uio)) if (vec_idx == zfs_uio_iovcnt(uio))
return; return;
vec_idx++; vec_idx++;
uio_iov_at_index(uio, vec_idx, &iov_base, &iov_len); zfs_uio_iov_at_index(uio, vec_idx, &iov_base, &iov_len);
*out_data_2 = (uint8_t *)iov_base; *out_data_2 = (uint8_t *)iov_base;
*current_offset = amt - *out_data_1_len; *current_offset = amt - *out_data_1_len;
} }

View File

@ -40,7 +40,7 @@ int
crypto_uio_data(crypto_data_t *data, uchar_t *buf, int len, cmd_type_t cmd, crypto_uio_data(crypto_data_t *data, uchar_t *buf, int len, cmd_type_t cmd,
void *digest_ctx, void (*update)(void)) void *digest_ctx, void (*update)(void))
{ {
uio_t *uiop = data->cd_uio; zfs_uio_t *uiop = data->cd_uio;
off_t offset = data->cd_offset; off_t offset = data->cd_offset;
size_t length = len; size_t length = len;
uint_t vec_idx; uint_t vec_idx;
@ -48,7 +48,7 @@ crypto_uio_data(crypto_data_t *data, uchar_t *buf, int len, cmd_type_t cmd,
uchar_t *datap; uchar_t *datap;
ASSERT(data->cd_format == CRYPTO_DATA_UIO); ASSERT(data->cd_format == CRYPTO_DATA_UIO);
if (uio_segflg(uiop) != UIO_SYSSPACE) { if (zfs_uio_segflg(uiop) != UIO_SYSSPACE) {
return (CRYPTO_ARGUMENTS_BAD); return (CRYPTO_ARGUMENTS_BAD);
} }
@ -56,9 +56,9 @@ crypto_uio_data(crypto_data_t *data, uchar_t *buf, int len, cmd_type_t cmd,
* Jump to the first iovec containing data to be * Jump to the first iovec containing data to be
* processed. * processed.
*/ */
offset = uio_index_at_offset(uiop, offset, &vec_idx); offset = zfs_uio_index_at_offset(uiop, offset, &vec_idx);
if (vec_idx == uio_iovcnt(uiop) && length > 0) { if (vec_idx == zfs_uio_iovcnt(uiop) && length > 0) {
/* /*
* The caller specified an offset that is larger than * The caller specified an offset that is larger than
* the total size of the buffers it provided. * the total size of the buffers it provided.
@ -66,11 +66,11 @@ crypto_uio_data(crypto_data_t *data, uchar_t *buf, int len, cmd_type_t cmd,
return (CRYPTO_DATA_LEN_RANGE); return (CRYPTO_DATA_LEN_RANGE);
} }
while (vec_idx < uio_iovcnt(uiop) && length > 0) { while (vec_idx < zfs_uio_iovcnt(uiop) && length > 0) {
cur_len = MIN(uio_iovlen(uiop, vec_idx) - cur_len = MIN(zfs_uio_iovlen(uiop, vec_idx) -
offset, length); offset, length);
datap = (uchar_t *)(uio_iovbase(uiop, vec_idx) + offset); datap = (uchar_t *)(zfs_uio_iovbase(uiop, vec_idx) + offset);
switch (cmd) { switch (cmd) {
case COPY_FROM_DATA: case COPY_FROM_DATA:
bcopy(datap, buf, cur_len); bcopy(datap, buf, cur_len);
@ -97,7 +97,7 @@ crypto_uio_data(crypto_data_t *data, uchar_t *buf, int len, cmd_type_t cmd,
offset = 0; offset = 0;
} }
if (vec_idx == uio_iovcnt(uiop) && length > 0) { if (vec_idx == zfs_uio_iovcnt(uiop) && length > 0) {
/* /*
* The end of the specified iovec's was reached but * The end of the specified iovec's was reached but
* the length requested could not be processed. * the length requested could not be processed.
@ -166,7 +166,7 @@ crypto_update_uio(void *ctx, crypto_data_t *input, crypto_data_t *output,
void (*copy_block)(uint8_t *, uint64_t *)) void (*copy_block)(uint8_t *, uint64_t *))
{ {
common_ctx_t *common_ctx = ctx; common_ctx_t *common_ctx = ctx;
uio_t *uiop = input->cd_uio; zfs_uio_t *uiop = input->cd_uio;
off_t offset = input->cd_offset; off_t offset = input->cd_offset;
size_t length = input->cd_length; size_t length = input->cd_length;
uint_t vec_idx; uint_t vec_idx;
@ -178,7 +178,7 @@ crypto_update_uio(void *ctx, crypto_data_t *input, crypto_data_t *output,
&common_ctx->cc_iv[0]); &common_ctx->cc_iv[0]);
} }
if (uio_segflg(input->cd_uio) != UIO_SYSSPACE) { if (zfs_uio_segflg(input->cd_uio) != UIO_SYSSPACE) {
return (CRYPTO_ARGUMENTS_BAD); return (CRYPTO_ARGUMENTS_BAD);
} }
@ -186,8 +186,8 @@ crypto_update_uio(void *ctx, crypto_data_t *input, crypto_data_t *output,
* Jump to the first iovec containing data to be * Jump to the first iovec containing data to be
* processed. * processed.
*/ */
offset = uio_index_at_offset(uiop, offset, &vec_idx); offset = zfs_uio_index_at_offset(uiop, offset, &vec_idx);
if (vec_idx == uio_iovcnt(uiop) && length > 0) { if (vec_idx == zfs_uio_iovcnt(uiop) && length > 0) {
/* /*
* The caller specified an offset that is larger than the * The caller specified an offset that is larger than the
* total size of the buffers it provided. * total size of the buffers it provided.
@ -198,11 +198,11 @@ crypto_update_uio(void *ctx, crypto_data_t *input, crypto_data_t *output,
/* /*
* Now process the iovecs. * Now process the iovecs.
*/ */
while (vec_idx < uio_iovcnt(uiop) && length > 0) { while (vec_idx < zfs_uio_iovcnt(uiop) && length > 0) {
cur_len = MIN(uio_iovlen(uiop, vec_idx) - cur_len = MIN(zfs_uio_iovlen(uiop, vec_idx) -
offset, length); offset, length);
int rv = (cipher)(ctx, uio_iovbase(uiop, vec_idx) + offset, int rv = (cipher)(ctx, zfs_uio_iovbase(uiop, vec_idx) + offset,
cur_len, output); cur_len, output);
if (rv != CRYPTO_SUCCESS) { if (rv != CRYPTO_SUCCESS) {
@ -213,7 +213,7 @@ crypto_update_uio(void *ctx, crypto_data_t *input, crypto_data_t *output,
offset = 0; offset = 0;
} }
if (vec_idx == uio_iovcnt(uiop) && length > 0) { if (vec_idx == zfs_uio_iovcnt(uiop) && length > 0) {
/* /*
* The end of the specified iovec's was reached but * The end of the specified iovec's was reached but
* the length requested could not be processed, i.e. * the length requested could not be processed, i.e.

View File

@ -271,15 +271,15 @@ sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data)
size_t cur_len; size_t cur_len;
/* we support only kernel buffer */ /* we support only kernel buffer */
if (uio_segflg(data->cd_uio) != UIO_SYSSPACE) if (zfs_uio_segflg(data->cd_uio) != UIO_SYSSPACE)
return (CRYPTO_ARGUMENTS_BAD); return (CRYPTO_ARGUMENTS_BAD);
/* /*
* Jump to the first iovec containing data to be * Jump to the first iovec containing data to be
* digested. * digested.
*/ */
offset = uio_index_at_offset(data->cd_uio, offset, &vec_idx); offset = zfs_uio_index_at_offset(data->cd_uio, offset, &vec_idx);
if (vec_idx == uio_iovcnt(data->cd_uio)) { if (vec_idx == zfs_uio_iovcnt(data->cd_uio)) {
/* /*
* The caller specified an offset that is larger than the * The caller specified an offset that is larger than the
* total size of the buffers it provided. * total size of the buffers it provided.
@ -290,12 +290,12 @@ sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data)
/* /*
* Now do the digesting on the iovecs. * Now do the digesting on the iovecs.
*/ */
while (vec_idx < uio_iovcnt(data->cd_uio) && length > 0) { while (vec_idx < zfs_uio_iovcnt(data->cd_uio) && length > 0) {
cur_len = MIN(uio_iovlen(data->cd_uio, vec_idx) - cur_len = MIN(zfs_uio_iovlen(data->cd_uio, vec_idx) -
offset, length); offset, length);
SHA1Update(sha1_ctx, SHA1Update(sha1_ctx,
(uint8_t *)uio_iovbase(data->cd_uio, vec_idx) + offset, (uint8_t *)zfs_uio_iovbase(data->cd_uio, vec_idx) + offset,
cur_len); cur_len);
length -= cur_len; length -= cur_len;
@ -303,7 +303,7 @@ sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data)
offset = 0; offset = 0;
} }
if (vec_idx == uio_iovcnt(data->cd_uio) && length > 0) { if (vec_idx == zfs_uio_iovcnt(data->cd_uio) && length > 0) {
/* /*
* The end of the specified iovec's was reached but * The end of the specified iovec's was reached but
* the length requested could not be processed, i.e. * the length requested could not be processed, i.e.
@ -330,15 +330,15 @@ sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
uint_t vec_idx = 0; uint_t vec_idx = 0;
/* we support only kernel buffer */ /* we support only kernel buffer */
if (uio_segflg(digest->cd_uio) != UIO_SYSSPACE) if (zfs_uio_segflg(digest->cd_uio) != UIO_SYSSPACE)
return (CRYPTO_ARGUMENTS_BAD); return (CRYPTO_ARGUMENTS_BAD);
/* /*
* Jump to the first iovec containing ptr to the digest to * Jump to the first iovec containing ptr to the digest to
* be returned. * be returned.
*/ */
offset = uio_index_at_offset(digest->cd_uio, offset, &vec_idx); offset = zfs_uio_index_at_offset(digest->cd_uio, offset, &vec_idx);
if (vec_idx == uio_iovcnt(digest->cd_uio)) { if (vec_idx == zfs_uio_iovcnt(digest->cd_uio)) {
/* /*
* The caller specified an offset that is * The caller specified an offset that is
* larger than the total size of the buffers * larger than the total size of the buffers
@ -348,7 +348,7 @@ sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
} }
if (offset + digest_len <= if (offset + digest_len <=
uio_iovlen(digest->cd_uio, vec_idx)) { zfs_uio_iovlen(digest->cd_uio, vec_idx)) {
/* /*
* The computed SHA1 digest will fit in the current * The computed SHA1 digest will fit in the current
* iovec. * iovec.
@ -360,11 +360,11 @@ sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
* the user only what was requested. * the user only what was requested.
*/ */
SHA1Final(digest_scratch, sha1_ctx); SHA1Final(digest_scratch, sha1_ctx);
bcopy(digest_scratch, (uchar_t *)uio_iovbase(digest-> bcopy(digest_scratch, (uchar_t *)
cd_uio, vec_idx) + offset, zfs_uio_iovbase(digest->cd_uio, vec_idx) + offset,
digest_len); digest_len);
} else { } else {
SHA1Final((uchar_t *)uio_iovbase(digest-> SHA1Final((uchar_t *)zfs_uio_iovbase(digest->
cd_uio, vec_idx) + offset, cd_uio, vec_idx) + offset,
sha1_ctx); sha1_ctx);
} }
@ -382,11 +382,11 @@ sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
SHA1Final(digest_tmp, sha1_ctx); SHA1Final(digest_tmp, sha1_ctx);
while (vec_idx < uio_iovcnt(digest->cd_uio) && length > 0) { while (vec_idx < zfs_uio_iovcnt(digest->cd_uio) && length > 0) {
cur_len = MIN(uio_iovlen(digest->cd_uio, vec_idx) - cur_len = MIN(zfs_uio_iovlen(digest->cd_uio, vec_idx) -
offset, length); offset, length);
bcopy(digest_tmp + scratch_offset, bcopy(digest_tmp + scratch_offset,
uio_iovbase(digest->cd_uio, vec_idx) + offset, zfs_uio_iovbase(digest->cd_uio, vec_idx) + offset,
cur_len); cur_len);
length -= cur_len; length -= cur_len;
@ -395,7 +395,7 @@ sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
offset = 0; offset = 0;
} }
if (vec_idx == uio_iovcnt(digest->cd_uio) && length > 0) { if (vec_idx == zfs_uio_iovcnt(digest->cd_uio) && length > 0) {
/* /*
* The end of the specified iovec's was reached but * The end of the specified iovec's was reached but
* the length requested could not be processed, i.e. * the length requested could not be processed, i.e.
@ -1096,12 +1096,12 @@ sha1_mac_verify_atomic(crypto_provider_handle_t provider,
size_t cur_len; size_t cur_len;
/* we support only kernel buffer */ /* we support only kernel buffer */
if (uio_segflg(mac->cd_uio) != UIO_SYSSPACE) if (zfs_uio_segflg(mac->cd_uio) != UIO_SYSSPACE)
return (CRYPTO_ARGUMENTS_BAD); return (CRYPTO_ARGUMENTS_BAD);
/* jump to the first iovec containing the expected digest */ /* jump to the first iovec containing the expected digest */
offset = uio_index_at_offset(mac->cd_uio, offset, &vec_idx); offset = zfs_uio_index_at_offset(mac->cd_uio, offset, &vec_idx);
if (vec_idx == uio_iovcnt(mac->cd_uio)) { if (vec_idx == zfs_uio_iovcnt(mac->cd_uio)) {
/* /*
* The caller specified an offset that is * The caller specified an offset that is
* larger than the total size of the buffers * larger than the total size of the buffers
@ -1112,12 +1112,12 @@ sha1_mac_verify_atomic(crypto_provider_handle_t provider,
} }
/* do the comparison of computed digest vs specified one */ /* do the comparison of computed digest vs specified one */
while (vec_idx < uio_iovcnt(mac->cd_uio) && length > 0) { while (vec_idx < zfs_uio_iovcnt(mac->cd_uio) && length > 0) {
cur_len = MIN(uio_iovlen(mac->cd_uio, vec_idx) - cur_len = MIN(zfs_uio_iovlen(mac->cd_uio, vec_idx) -
offset, length); offset, length);
if (bcmp(digest + scratch_offset, if (bcmp(digest + scratch_offset,
uio_iovbase(mac->cd_uio, vec_idx) + offset, zfs_uio_iovbase(mac->cd_uio, vec_idx) + offset,
cur_len) != 0) { cur_len) != 0) {
ret = CRYPTO_INVALID_MAC; ret = CRYPTO_INVALID_MAC;
break; break;

View File

@ -296,15 +296,15 @@ sha2_digest_update_uio(SHA2_CTX *sha2_ctx, crypto_data_t *data)
size_t cur_len; size_t cur_len;
/* we support only kernel buffer */ /* we support only kernel buffer */
if (uio_segflg(data->cd_uio) != UIO_SYSSPACE) if (zfs_uio_segflg(data->cd_uio) != UIO_SYSSPACE)
return (CRYPTO_ARGUMENTS_BAD); return (CRYPTO_ARGUMENTS_BAD);
/* /*
* Jump to the first iovec containing data to be * Jump to the first iovec containing data to be
* digested. * digested.
*/ */
offset = uio_index_at_offset(data->cd_uio, offset, &vec_idx); offset = zfs_uio_index_at_offset(data->cd_uio, offset, &vec_idx);
if (vec_idx == uio_iovcnt(data->cd_uio)) { if (vec_idx == zfs_uio_iovcnt(data->cd_uio)) {
/* /*
* The caller specified an offset that is larger than the * The caller specified an offset that is larger than the
* total size of the buffers it provided. * total size of the buffers it provided.
@ -315,18 +315,18 @@ sha2_digest_update_uio(SHA2_CTX *sha2_ctx, crypto_data_t *data)
/* /*
* Now do the digesting on the iovecs. * Now do the digesting on the iovecs.
*/ */
while (vec_idx < uio_iovcnt(data->cd_uio) && length > 0) { while (vec_idx < zfs_uio_iovcnt(data->cd_uio) && length > 0) {
cur_len = MIN(uio_iovlen(data->cd_uio, vec_idx) - cur_len = MIN(zfs_uio_iovlen(data->cd_uio, vec_idx) -
offset, length); offset, length);
SHA2Update(sha2_ctx, (uint8_t *)uio_iovbase(data->cd_uio, SHA2Update(sha2_ctx, (uint8_t *)zfs_uio_iovbase(data->cd_uio,
vec_idx) + offset, cur_len); vec_idx) + offset, cur_len);
length -= cur_len; length -= cur_len;
vec_idx++; vec_idx++;
offset = 0; offset = 0;
} }
if (vec_idx == uio_iovcnt(data->cd_uio) && length > 0) { if (vec_idx == zfs_uio_iovcnt(data->cd_uio) && length > 0) {
/* /*
* The end of the specified iovec's was reached but * The end of the specified iovec's was reached but
* the length requested could not be processed, i.e. * the length requested could not be processed, i.e.
@ -353,15 +353,15 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest,
uint_t vec_idx = 0; uint_t vec_idx = 0;
/* we support only kernel buffer */ /* we support only kernel buffer */
if (uio_segflg(digest->cd_uio) != UIO_SYSSPACE) if (zfs_uio_segflg(digest->cd_uio) != UIO_SYSSPACE)
return (CRYPTO_ARGUMENTS_BAD); return (CRYPTO_ARGUMENTS_BAD);
/* /*
* Jump to the first iovec containing ptr to the digest to * Jump to the first iovec containing ptr to the digest to
* be returned. * be returned.
*/ */
offset = uio_index_at_offset(digest->cd_uio, offset, &vec_idx); offset = zfs_uio_index_at_offset(digest->cd_uio, offset, &vec_idx);
if (vec_idx == uio_iovcnt(digest->cd_uio)) { if (vec_idx == zfs_uio_iovcnt(digest->cd_uio)) {
/* /*
* The caller specified an offset that is * The caller specified an offset that is
* larger than the total size of the buffers * larger than the total size of the buffers
@ -371,7 +371,7 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest,
} }
if (offset + digest_len <= if (offset + digest_len <=
uio_iovlen(digest->cd_uio, vec_idx)) { zfs_uio_iovlen(digest->cd_uio, vec_idx)) {
/* /*
* The computed SHA2 digest will fit in the current * The computed SHA2 digest will fit in the current
* iovec. * iovec.
@ -387,11 +387,11 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest,
*/ */
SHA2Final(digest_scratch, sha2_ctx); SHA2Final(digest_scratch, sha2_ctx);
bcopy(digest_scratch, (uchar_t *)uio_iovbase(digest-> bcopy(digest_scratch, (uchar_t *)
cd_uio, vec_idx) + offset, zfs_uio_iovbase(digest->cd_uio, vec_idx) + offset,
digest_len); digest_len);
} else { } else {
SHA2Final((uchar_t *)uio_iovbase(digest-> SHA2Final((uchar_t *)zfs_uio_iovbase(digest->
cd_uio, vec_idx) + offset, cd_uio, vec_idx) + offset,
sha2_ctx); sha2_ctx);
@ -410,12 +410,12 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest,
SHA2Final(digest_tmp, sha2_ctx); SHA2Final(digest_tmp, sha2_ctx);
while (vec_idx < uio_iovcnt(digest->cd_uio) && length > 0) { while (vec_idx < zfs_uio_iovcnt(digest->cd_uio) && length > 0) {
cur_len = cur_len =
MIN(uio_iovlen(digest->cd_uio, vec_idx) - MIN(zfs_uio_iovlen(digest->cd_uio, vec_idx) -
offset, length); offset, length);
bcopy(digest_tmp + scratch_offset, bcopy(digest_tmp + scratch_offset,
uio_iovbase(digest->cd_uio, vec_idx) + offset, zfs_uio_iovbase(digest->cd_uio, vec_idx) + offset,
cur_len); cur_len);
length -= cur_len; length -= cur_len;
@ -424,7 +424,7 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest,
offset = 0; offset = 0;
} }
if (vec_idx == uio_iovcnt(digest->cd_uio) && length > 0) { if (vec_idx == zfs_uio_iovcnt(digest->cd_uio) && length > 0) {
/* /*
* The end of the specified iovec's was reached but * The end of the specified iovec's was reached but
* the length requested could not be processed, i.e. * the length requested could not be processed, i.e.
@ -1251,12 +1251,12 @@ sha2_mac_verify_atomic(crypto_provider_handle_t provider,
size_t cur_len; size_t cur_len;
/* we support only kernel buffer */ /* we support only kernel buffer */
if (uio_segflg(mac->cd_uio) != UIO_SYSSPACE) if (zfs_uio_segflg(mac->cd_uio) != UIO_SYSSPACE)
return (CRYPTO_ARGUMENTS_BAD); return (CRYPTO_ARGUMENTS_BAD);
/* jump to the first iovec containing the expected digest */ /* jump to the first iovec containing the expected digest */
offset = uio_index_at_offset(mac->cd_uio, offset, &vec_idx); offset = zfs_uio_index_at_offset(mac->cd_uio, offset, &vec_idx);
if (vec_idx == uio_iovcnt(mac->cd_uio)) { if (vec_idx == zfs_uio_iovcnt(mac->cd_uio)) {
/* /*
* The caller specified an offset that is * The caller specified an offset that is
* larger than the total size of the buffers * larger than the total size of the buffers
@ -1267,12 +1267,12 @@ sha2_mac_verify_atomic(crypto_provider_handle_t provider,
} }
/* do the comparison of computed digest vs specified one */ /* do the comparison of computed digest vs specified one */
while (vec_idx < uio_iovcnt(mac->cd_uio) && length > 0) { while (vec_idx < zfs_uio_iovcnt(mac->cd_uio) && length > 0) {
cur_len = MIN(uio_iovlen(mac->cd_uio, vec_idx) - cur_len = MIN(zfs_uio_iovlen(mac->cd_uio, vec_idx) -
offset, length); offset, length);
if (bcmp(digest + scratch_offset, if (bcmp(digest + scratch_offset,
uio_iovbase(mac->cd_uio, vec_idx) + offset, zfs_uio_iovbase(mac->cd_uio, vec_idx) + offset,
cur_len) != 0) { cur_len) != 0) {
ret = CRYPTO_INVALID_MAC; ret = CRYPTO_INVALID_MAC;
break; break;

View File

@ -272,18 +272,18 @@ skein_digest_update_uio(skein_ctx_t *ctx, const crypto_data_t *data)
size_t length = data->cd_length; size_t length = data->cd_length;
uint_t vec_idx = 0; uint_t vec_idx = 0;
size_t cur_len; size_t cur_len;
uio_t *uio = data->cd_uio; zfs_uio_t *uio = data->cd_uio;
/* we support only kernel buffer */ /* we support only kernel buffer */
if (uio_segflg(uio) != UIO_SYSSPACE) if (zfs_uio_segflg(uio) != UIO_SYSSPACE)
return (CRYPTO_ARGUMENTS_BAD); return (CRYPTO_ARGUMENTS_BAD);
/* /*
* Jump to the first iovec containing data to be * Jump to the first iovec containing data to be
* digested. * digested.
*/ */
offset = uio_index_at_offset(uio, offset, &vec_idx); offset = zfs_uio_index_at_offset(uio, offset, &vec_idx);
if (vec_idx == uio_iovcnt(uio)) { if (vec_idx == zfs_uio_iovcnt(uio)) {
/* /*
* The caller specified an offset that is larger than the * The caller specified an offset that is larger than the
* total size of the buffers it provided. * total size of the buffers it provided.
@ -294,16 +294,16 @@ skein_digest_update_uio(skein_ctx_t *ctx, const crypto_data_t *data)
/* /*
* Now do the digesting on the iovecs. * Now do the digesting on the iovecs.
*/ */
while (vec_idx < uio_iovcnt(uio) && length > 0) { while (vec_idx < zfs_uio_iovcnt(uio) && length > 0) {
cur_len = MIN(uio_iovlen(uio, vec_idx) - offset, length); cur_len = MIN(zfs_uio_iovlen(uio, vec_idx) - offset, length);
SKEIN_OP(ctx, Update, (uint8_t *)uio_iovbase(uio, vec_idx) SKEIN_OP(ctx, Update, (uint8_t *)zfs_uio_iovbase(uio, vec_idx)
+ offset, cur_len); + offset, cur_len);
length -= cur_len; length -= cur_len;
vec_idx++; vec_idx++;
offset = 0; offset = 0;
} }
if (vec_idx == uio_iovcnt(uio) && length > 0) { if (vec_idx == zfs_uio_iovcnt(uio) && length > 0) {
/* /*
* The end of the specified iovec's was reached but * The end of the specified iovec's was reached but
* the length requested could not be processed, i.e. * the length requested could not be processed, i.e.
@ -322,19 +322,19 @@ static int
skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest, skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest,
crypto_req_handle_t req) crypto_req_handle_t req)
{ {
off_t offset = digest->cd_offset; off_t offset = digest->cd_offset;
uint_t vec_idx = 0; uint_t vec_idx = 0;
uio_t *uio = digest->cd_uio; zfs_uio_t *uio = digest->cd_uio;
/* we support only kernel buffer */ /* we support only kernel buffer */
if (uio_segflg(uio) != UIO_SYSSPACE) if (zfs_uio_segflg(uio) != UIO_SYSSPACE)
return (CRYPTO_ARGUMENTS_BAD); return (CRYPTO_ARGUMENTS_BAD);
/* /*
* Jump to the first iovec containing ptr to the digest to be returned. * Jump to the first iovec containing ptr to the digest to be returned.
*/ */
offset = uio_index_at_offset(uio, offset, &vec_idx); offset = zfs_uio_index_at_offset(uio, offset, &vec_idx);
if (vec_idx == uio_iovcnt(uio)) { if (vec_idx == zfs_uio_iovcnt(uio)) {
/* /*
* The caller specified an offset that is larger than the * The caller specified an offset that is larger than the
* total size of the buffers it provided. * total size of the buffers it provided.
@ -342,10 +342,10 @@ skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest,
return (CRYPTO_DATA_LEN_RANGE); return (CRYPTO_DATA_LEN_RANGE);
} }
if (offset + CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen) <= if (offset + CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen) <=
uio_iovlen(uio, vec_idx)) { zfs_uio_iovlen(uio, vec_idx)) {
/* The computed digest will fit in the current iovec. */ /* The computed digest will fit in the current iovec. */
SKEIN_OP(ctx, Final, SKEIN_OP(ctx, Final,
(uchar_t *)uio_iovbase(uio, vec_idx) + offset); (uchar_t *)zfs_uio_iovbase(uio, vec_idx) + offset);
} else { } else {
uint8_t *digest_tmp; uint8_t *digest_tmp;
off_t scratch_offset = 0; off_t scratch_offset = 0;
@ -357,11 +357,11 @@ skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest,
if (digest_tmp == NULL) if (digest_tmp == NULL)
return (CRYPTO_HOST_MEMORY); return (CRYPTO_HOST_MEMORY);
SKEIN_OP(ctx, Final, digest_tmp); SKEIN_OP(ctx, Final, digest_tmp);
while (vec_idx < uio_iovcnt(uio) && length > 0) { while (vec_idx < zfs_uio_iovcnt(uio) && length > 0) {
cur_len = MIN(uio_iovlen(uio, vec_idx) - offset, cur_len = MIN(zfs_uio_iovlen(uio, vec_idx) - offset,
length); length);
bcopy(digest_tmp + scratch_offset, bcopy(digest_tmp + scratch_offset,
uio_iovbase(uio, vec_idx) + offset, cur_len); zfs_uio_iovbase(uio, vec_idx) + offset, cur_len);
length -= cur_len; length -= cur_len;
vec_idx++; vec_idx++;
@ -370,7 +370,7 @@ skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest,
} }
kmem_free(digest_tmp, CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen)); kmem_free(digest_tmp, CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen));
if (vec_idx == uio_iovcnt(uio) && length > 0) { if (vec_idx == zfs_uio_iovcnt(uio) && length > 0) {
/* /*
* The end of the specified iovec's was reached but * The end of the specified iovec's was reached but
* the length requested could not be processed, i.e. * the length requested could not be processed, i.e.

View File

@ -43,31 +43,32 @@
#include <sys/param.h> #include <sys/param.h>
#include <sys/uio.h> #include <sys/uio.h>
#include <sys/vnode.h> #include <sys/vnode.h>
#include <sys/zfs_znode.h>
/* /*
* same as uiomove() but doesn't modify uio structure. * same as zfs_uiomove() but doesn't modify uio structure.
* return in cbytes how many bytes were copied. * return in cbytes how many bytes were copied.
*/ */
int int
uiocopy(void *p, size_t n, enum uio_rw rw, struct uio *uio, size_t *cbytes) zfs_uiocopy(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio, size_t *cbytes)
{ {
struct iovec small_iovec[1]; struct iovec small_iovec[1];
struct uio small_uio_clone; struct uio small_uio_clone;
struct uio *uio_clone; struct uio *uio_clone;
int error; int error;
ASSERT3U(uio->uio_rw, ==, rw); ASSERT3U(zfs_uio_rw(uio), ==, rw);
if (uio->uio_iovcnt == 1) { if (zfs_uio_iovcnt(uio) == 1) {
small_uio_clone = *uio; small_uio_clone = *(GET_UIO_STRUCT(uio));
small_iovec[0] = *uio->uio_iov; small_iovec[0] = *(GET_UIO_STRUCT(uio)->uio_iov);
small_uio_clone.uio_iov = small_iovec; small_uio_clone.uio_iov = small_iovec;
uio_clone = &small_uio_clone; uio_clone = &small_uio_clone;
} else { } else {
uio_clone = cloneuio(uio); uio_clone = cloneuio(GET_UIO_STRUCT(uio));
} }
error = vn_io_fault_uiomove(p, n, uio_clone); error = vn_io_fault_uiomove(p, n, uio_clone);
*cbytes = uio->uio_resid - uio_clone->uio_resid; *cbytes = zfs_uio_resid(uio) - uio_clone->uio_resid;
if (uio_clone != &small_uio_clone) if (uio_clone != &small_uio_clone)
free(uio_clone, M_IOV); free(uio_clone, M_IOV);
return (error); return (error);
@ -77,16 +78,23 @@ uiocopy(void *p, size_t n, enum uio_rw rw, struct uio *uio, size_t *cbytes)
* Drop the next n chars out of *uiop. * Drop the next n chars out of *uiop.
*/ */
void void
uioskip(uio_t *uio, size_t n) zfs_uioskip(zfs_uio_t *uio, size_t n)
{ {
enum uio_seg segflg; zfs_uio_seg_t segflg;
/* For the full compatibility with illumos. */ /* For the full compatibility with illumos. */
if (n > uio->uio_resid) if (n > zfs_uio_resid(uio))
return; return;
segflg = uio->uio_segflg; segflg = zfs_uio_segflg(uio);
uio->uio_segflg = UIO_NOCOPY; zfs_uio_segflg(uio) = UIO_NOCOPY;
uiomove(NULL, n, uio->uio_rw, uio); zfs_uiomove(NULL, n, zfs_uio_rw(uio), uio);
uio->uio_segflg = segflg; zfs_uio_segflg(uio) = segflg;
}
int
zfs_uio_fault_move(void *p, size_t n, zfs_uio_rw_t dir, zfs_uio_t *uio)
{
ASSERT(zfs_uio_rw(uio) == dir);
return (vn_io_fault_uiomove(p, n, GET_UIO_STRUCT(uio)));
} }

View File

@ -199,7 +199,7 @@ static void
freebsd_crypt_uio_debug_log(boolean_t encrypt, freebsd_crypt_uio_debug_log(boolean_t encrypt,
freebsd_crypt_session_t *input_sessionp, freebsd_crypt_session_t *input_sessionp,
struct zio_crypt_info *c_info, struct zio_crypt_info *c_info,
uio_t *data_uio, zfs_uio_t *data_uio,
crypto_key_t *key, crypto_key_t *key,
uint8_t *ivbuf, uint8_t *ivbuf,
size_t datalen, size_t datalen,
@ -224,13 +224,13 @@ freebsd_crypt_uio_debug_log(boolean_t encrypt,
printf("%02x ", b[i]); printf("%02x ", b[i]);
} }
printf("}\n"); printf("}\n");
for (int i = 0; i < data_uio->uio_iovcnt; i++) { for (int i = 0; i < zfs_uio_iovcnt(data_uio); i++) {
printf("\tiovec #%d: <%p, %u>\n", i, printf("\tiovec #%d: <%p, %u>\n", i,
data_uio->uio_iov[i].iov_base, zfs_uio_iovbase(data_uio, i),
(unsigned int)data_uio->uio_iov[i].iov_len); (unsigned int)zfs_uio_iovlen(data_uio, i));
total += data_uio->uio_iov[i].iov_len; total += zfs_uio_iovlen(data_uio, i);
} }
data_uio->uio_resid = total; zfs_uio_resid(data_uio) = total;
#endif #endif
} }
/* /*
@ -323,7 +323,7 @@ int
freebsd_crypt_uio(boolean_t encrypt, freebsd_crypt_uio(boolean_t encrypt,
freebsd_crypt_session_t *input_sessionp, freebsd_crypt_session_t *input_sessionp,
struct zio_crypt_info *c_info, struct zio_crypt_info *c_info,
uio_t *data_uio, zfs_uio_t *data_uio,
crypto_key_t *key, crypto_key_t *key,
uint8_t *ivbuf, uint8_t *ivbuf,
size_t datalen, size_t datalen,
@ -336,9 +336,9 @@ freebsd_crypt_uio(boolean_t encrypt,
freebsd_crypt_uio_debug_log(encrypt, input_sessionp, c_info, data_uio, freebsd_crypt_uio_debug_log(encrypt, input_sessionp, c_info, data_uio,
key, ivbuf, datalen, auth_len); key, ivbuf, datalen, auth_len);
for (int i = 0; i < data_uio->uio_iovcnt; i++) for (int i = 0; i < zfs_uio_iovcnt(data_uio); i++)
total += data_uio->uio_iov[i].iov_len; total += zfs_uio_iovlen(data_uio, i);
data_uio->uio_resid = total; zfs_uio_resid(data_uio) = total;
if (input_sessionp == NULL) { if (input_sessionp == NULL) {
session = kmem_zalloc(sizeof (*session), KM_SLEEP); session = kmem_zalloc(sizeof (*session), KM_SLEEP);
error = freebsd_crypt_newsession(session, c_info, key); error = freebsd_crypt_newsession(session, c_info, key);
@ -356,7 +356,7 @@ freebsd_crypt_uio(boolean_t encrypt,
CRYPTO_OP_VERIFY_DIGEST; CRYPTO_OP_VERIFY_DIGEST;
} }
crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_IV_SEPARATE; crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_IV_SEPARATE;
crypto_use_uio(crp, data_uio); crypto_use_uio(crp, GET_UIO_STRUCT(data_uio));
crp->crp_aad_start = 0; crp->crp_aad_start = 0;
crp->crp_aad_length = auth_len; crp->crp_aad_length = auth_len;
@ -493,7 +493,7 @@ int
freebsd_crypt_uio(boolean_t encrypt, freebsd_crypt_uio(boolean_t encrypt,
freebsd_crypt_session_t *input_sessionp, freebsd_crypt_session_t *input_sessionp,
struct zio_crypt_info *c_info, struct zio_crypt_info *c_info,
uio_t *data_uio, zfs_uio_t *data_uio,
crypto_key_t *key, crypto_key_t *key,
uint8_t *ivbuf, uint8_t *ivbuf,
size_t datalen, size_t datalen,
@ -577,7 +577,7 @@ freebsd_crypt_uio(boolean_t encrypt,
crp->crp_session = session->fs_sid; crp->crp_session = session->fs_sid;
crp->crp_ilen = auth_len + datalen; crp->crp_ilen = auth_len + datalen;
crp->crp_buf = (void*)data_uio; crp->crp_buf = (void*)GET_UIO_STRUCT(data_uio);
crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIFSYNC; crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIFSYNC;
auth_desc->crd_skip = 0; auth_desc->crd_skip = 0;

View File

@ -251,7 +251,7 @@ sfs_reclaim_vnode(vnode_t *vp)
static int static int
sfs_readdir_common(uint64_t parent_id, uint64_t id, struct vop_readdir_args *ap, sfs_readdir_common(uint64_t parent_id, uint64_t id, struct vop_readdir_args *ap,
uio_t *uio, off_t *offp) zfs_uio_t *uio, off_t *offp)
{ {
struct dirent entry; struct dirent entry;
int error; int error;
@ -260,26 +260,26 @@ sfs_readdir_common(uint64_t parent_id, uint64_t id, struct vop_readdir_args *ap,
if (ap->a_ncookies != NULL) if (ap->a_ncookies != NULL)
*ap->a_ncookies = 0; *ap->a_ncookies = 0;
if (uio->uio_resid < sizeof (entry)) if (zfs_uio_resid(uio) < sizeof (entry))
return (SET_ERROR(EINVAL)); return (SET_ERROR(EINVAL));
if (uio->uio_offset < 0) if (zfs_uio_offset(uio) < 0)
return (SET_ERROR(EINVAL)); return (SET_ERROR(EINVAL));
if (uio->uio_offset == 0) { if (zfs_uio_offset(uio) == 0) {
entry.d_fileno = id; entry.d_fileno = id;
entry.d_type = DT_DIR; entry.d_type = DT_DIR;
entry.d_name[0] = '.'; entry.d_name[0] = '.';
entry.d_name[1] = '\0'; entry.d_name[1] = '\0';
entry.d_namlen = 1; entry.d_namlen = 1;
entry.d_reclen = sizeof (entry); entry.d_reclen = sizeof (entry);
error = vfs_read_dirent(ap, &entry, uio->uio_offset); error = vfs_read_dirent(ap, &entry, zfs_uio_offset(uio));
if (error != 0) if (error != 0)
return (SET_ERROR(error)); return (SET_ERROR(error));
} }
if (uio->uio_offset < sizeof (entry)) if (zfs_uio_offset(uio) < sizeof (entry))
return (SET_ERROR(EINVAL)); return (SET_ERROR(EINVAL));
if (uio->uio_offset == sizeof (entry)) { if (zfs_uio_offset(uio) == sizeof (entry)) {
entry.d_fileno = parent_id; entry.d_fileno = parent_id;
entry.d_type = DT_DIR; entry.d_type = DT_DIR;
entry.d_name[0] = '.'; entry.d_name[0] = '.';
@ -287,7 +287,7 @@ sfs_readdir_common(uint64_t parent_id, uint64_t id, struct vop_readdir_args *ap,
entry.d_name[2] = '\0'; entry.d_name[2] = '\0';
entry.d_namlen = 2; entry.d_namlen = 2;
entry.d_reclen = sizeof (entry); entry.d_reclen = sizeof (entry);
error = vfs_read_dirent(ap, &entry, uio->uio_offset); error = vfs_read_dirent(ap, &entry, zfs_uio_offset(uio));
if (error != 0) if (error != 0)
return (SET_ERROR(error)); return (SET_ERROR(error));
} }
@ -666,21 +666,23 @@ zfsctl_root_readdir(struct vop_readdir_args *ap)
vnode_t *vp = ap->a_vp; vnode_t *vp = ap->a_vp;
zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
zfsctl_root_t *node = vp->v_data; zfsctl_root_t *node = vp->v_data;
uio_t *uio = ap->a_uio; zfs_uio_t uio;
int *eofp = ap->a_eofflag; int *eofp = ap->a_eofflag;
off_t dots_offset; off_t dots_offset;
int error; int error;
zfs_uio_init(&uio, ap->a_uio);
ASSERT(vp->v_type == VDIR); ASSERT(vp->v_type == VDIR);
error = sfs_readdir_common(zfsvfs->z_root, ZFSCTL_INO_ROOT, ap, uio, error = sfs_readdir_common(zfsvfs->z_root, ZFSCTL_INO_ROOT, ap, &uio,
&dots_offset); &dots_offset);
if (error != 0) { if (error != 0) {
if (error == ENAMETOOLONG) /* ran out of destination space */ if (error == ENAMETOOLONG) /* ran out of destination space */
error = 0; error = 0;
return (error); return (error);
} }
if (uio->uio_offset != dots_offset) if (zfs_uio_offset(&uio) != dots_offset)
return (SET_ERROR(EINVAL)); return (SET_ERROR(EINVAL));
CTASSERT(sizeof (node->snapdir->sn_name) <= sizeof (entry.d_name)); CTASSERT(sizeof (node->snapdir->sn_name) <= sizeof (entry.d_name));
@ -689,7 +691,7 @@ zfsctl_root_readdir(struct vop_readdir_args *ap)
strcpy(entry.d_name, node->snapdir->sn_name); strcpy(entry.d_name, node->snapdir->sn_name);
entry.d_namlen = strlen(entry.d_name); entry.d_namlen = strlen(entry.d_name);
entry.d_reclen = sizeof (entry); entry.d_reclen = sizeof (entry);
error = vfs_read_dirent(ap, &entry, uio->uio_offset); error = vfs_read_dirent(ap, &entry, zfs_uio_offset(&uio));
if (error != 0) { if (error != 0) {
if (error == ENAMETOOLONG) if (error == ENAMETOOLONG)
error = 0; error = 0;
@ -1030,15 +1032,17 @@ zfsctl_snapdir_readdir(struct vop_readdir_args *ap)
struct dirent entry; struct dirent entry;
vnode_t *vp = ap->a_vp; vnode_t *vp = ap->a_vp;
zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
uio_t *uio = ap->a_uio; zfs_uio_t uio;
int *eofp = ap->a_eofflag; int *eofp = ap->a_eofflag;
off_t dots_offset; off_t dots_offset;
int error; int error;
zfs_uio_init(&uio, ap->a_uio);
ASSERT(vp->v_type == VDIR); ASSERT(vp->v_type == VDIR);
error = sfs_readdir_common(ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, ap, uio, error = sfs_readdir_common(ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, ap,
&dots_offset); &uio, &dots_offset);
if (error != 0) { if (error != 0) {
if (error == ENAMETOOLONG) /* ran out of destination space */ if (error == ENAMETOOLONG) /* ran out of destination space */
error = 0; error = 0;
@ -1050,7 +1054,7 @@ zfsctl_snapdir_readdir(struct vop_readdir_args *ap)
uint64_t cookie; uint64_t cookie;
uint64_t id; uint64_t id;
cookie = uio->uio_offset - dots_offset; cookie = zfs_uio_offset(&uio) - dots_offset;
dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG); dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG);
error = dmu_snapshot_list_next(zfsvfs->z_os, sizeof (snapname), error = dmu_snapshot_list_next(zfsvfs->z_os, sizeof (snapname),
@ -1071,14 +1075,14 @@ zfsctl_snapdir_readdir(struct vop_readdir_args *ap)
strcpy(entry.d_name, snapname); strcpy(entry.d_name, snapname);
entry.d_namlen = strlen(entry.d_name); entry.d_namlen = strlen(entry.d_name);
entry.d_reclen = sizeof (entry); entry.d_reclen = sizeof (entry);
error = vfs_read_dirent(ap, &entry, uio->uio_offset); error = vfs_read_dirent(ap, &entry, zfs_uio_offset(&uio));
if (error != 0) { if (error != 0) {
if (error == ENAMETOOLONG) if (error == ENAMETOOLONG)
error = 0; error = 0;
ZFS_EXIT(zfsvfs); ZFS_EXIT(zfsvfs);
return (SET_ERROR(error)); return (SET_ERROR(error));
} }
uio->uio_offset = cookie + dots_offset; zfs_uio_setoffset(&uio, cookie + dots_offset);
} }
/* NOTREACHED */ /* NOTREACHED */
} }

View File

@ -287,7 +287,7 @@ zfs_file_private(zfs_file_t *fp)
int int
zfs_file_unlink(const char *fnamep) zfs_file_unlink(const char *fnamep)
{ {
enum uio_seg seg = UIO_SYSSPACE; zfs_uio_seg_t seg = UIO_SYSSPACE;
int rc; int rc;
#if __FreeBSD_version >= 1300018 #if __FreeBSD_version >= 1300018

View File

@ -518,7 +518,7 @@ update_pages(znode_t *zp, int64_t start, int len, objset_t *os)
* in one single dmu_read() call. * in one single dmu_read() call.
*/ */
int int
mappedread_sf(znode_t *zp, int nbytes, uio_t *uio) mappedread_sf(znode_t *zp, int nbytes, zfs_uio_t *uio)
{ {
vnode_t *vp = ZTOV(zp); vnode_t *vp = ZTOV(zp);
objset_t *os = zp->z_zfsvfs->z_os; objset_t *os = zp->z_zfsvfs->z_os;
@ -530,14 +530,14 @@ mappedread_sf(znode_t *zp, int nbytes, uio_t *uio)
int len = nbytes; int len = nbytes;
int error = 0; int error = 0;
ASSERT(uio->uio_segflg == UIO_NOCOPY); ASSERT(zfs_uio_segflg(uio) == UIO_NOCOPY);
ASSERT(vp->v_mount != NULL); ASSERT(vp->v_mount != NULL);
obj = vp->v_object; obj = vp->v_object;
ASSERT(obj != NULL); ASSERT(obj != NULL);
ASSERT((uio->uio_loffset & PAGEOFFSET) == 0); ASSERT((zfs_uio_offset(uio) & PAGEOFFSET) == 0);
zfs_vmobject_wlock_12(obj); zfs_vmobject_wlock_12(obj);
for (start = uio->uio_loffset; len > 0; start += PAGESIZE) { for (start = zfs_uio_offset(uio); len > 0; start += PAGESIZE) {
int bytes = MIN(PAGESIZE, len); int bytes = MIN(PAGESIZE, len);
pp = vm_page_grab_unlocked(obj, OFF_TO_IDX(start), pp = vm_page_grab_unlocked(obj, OFF_TO_IDX(start),
@ -584,8 +584,7 @@ mappedread_sf(znode_t *zp, int nbytes, uio_t *uio)
} }
if (error) if (error)
break; break;
uio->uio_resid -= bytes; zfs_uio_advance(uio, bytes);
uio->uio_offset += bytes;
len -= bytes; len -= bytes;
} }
zfs_vmobject_wunlock_12(obj); zfs_vmobject_wunlock_12(obj);
@ -603,7 +602,7 @@ mappedread_sf(znode_t *zp, int nbytes, uio_t *uio)
* the file is memory mapped. * the file is memory mapped.
*/ */
int int
mappedread(znode_t *zp, int nbytes, uio_t *uio) mappedread(znode_t *zp, int nbytes, zfs_uio_t *uio)
{ {
vnode_t *vp = ZTOV(zp); vnode_t *vp = ZTOV(zp);
vm_object_t obj; vm_object_t obj;
@ -616,7 +615,7 @@ mappedread(znode_t *zp, int nbytes, uio_t *uio)
obj = vp->v_object; obj = vp->v_object;
ASSERT(obj != NULL); ASSERT(obj != NULL);
start = uio->uio_loffset; start = zfs_uio_offset(uio);
off = start & PAGEOFFSET; off = start & PAGEOFFSET;
zfs_vmobject_wlock_12(obj); zfs_vmobject_wlock_12(obj);
for (start &= PAGEMASK; len > 0; start += PAGESIZE) { for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
@ -629,7 +628,8 @@ mappedread(znode_t *zp, int nbytes, uio_t *uio)
zfs_vmobject_wunlock_12(obj); zfs_vmobject_wunlock_12(obj);
va = zfs_map_page(pp, &sf); va = zfs_map_page(pp, &sf);
error = vn_io_fault_uiomove(va + off, bytes, uio); error = vn_io_fault_uiomove(va + off, bytes,
GET_UIO_STRUCT(uio));
zfs_unmap_page(sf); zfs_unmap_page(sf);
zfs_vmobject_wlock_12(obj); zfs_vmobject_wlock_12(obj);
page_unhold(pp); page_unhold(pp);
@ -1678,7 +1678,7 @@ zfs_rmdir(znode_t *dzp, const char *name, znode_t *cwd, cred_t *cr, int flags)
*/ */
/* ARGSUSED */ /* ARGSUSED */
static int static int
zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, zfs_readdir(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, int *eofp,
int *ncookies, ulong_t **cookies) int *ncookies, ulong_t **cookies)
{ {
znode_t *zp = VTOZ(vp); znode_t *zp = VTOZ(vp);
@ -1723,7 +1723,7 @@ zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp,
/* /*
* Check for valid iov_len. * Check for valid iov_len.
*/ */
if (uio->uio_iov->iov_len <= 0) { if (GET_UIO_STRUCT(uio)->uio_iov->iov_len <= 0) {
ZFS_EXIT(zfsvfs); ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL)); return (SET_ERROR(EINVAL));
} }
@ -1738,7 +1738,7 @@ zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp,
error = 0; error = 0;
os = zfsvfs->z_os; os = zfsvfs->z_os;
offset = uio->uio_loffset; offset = zfs_uio_offset(uio);
prefetch = zp->z_zn_prefetch; prefetch = zp->z_zn_prefetch;
/* /*
@ -1759,9 +1759,9 @@ zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp,
/* /*
* Get space to change directory entries into fs independent format. * Get space to change directory entries into fs independent format.
*/ */
iovp = uio->uio_iov; iovp = GET_UIO_STRUCT(uio)->uio_iov;
bytes_wanted = iovp->iov_len; bytes_wanted = iovp->iov_len;
if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) { if (zfs_uio_segflg(uio) != UIO_SYSSPACE || zfs_uio_iovcnt(uio) != 1) {
bufsize = bytes_wanted; bufsize = bytes_wanted;
outbuf = kmem_alloc(bufsize, KM_SLEEP); outbuf = kmem_alloc(bufsize, KM_SLEEP);
odp = (struct dirent64 *)outbuf; odp = (struct dirent64 *)outbuf;
@ -1776,7 +1776,7 @@ zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp,
/* /*
* Minimum entry size is dirent size and 1 byte for a file name. * Minimum entry size is dirent size and 1 byte for a file name.
*/ */
ncooks = uio->uio_resid / (sizeof (struct dirent) - ncooks = zfs_uio_resid(uio) / (sizeof (struct dirent) -
sizeof (((struct dirent *)NULL)->d_name) + 1); sizeof (((struct dirent *)NULL)->d_name) + 1);
cooks = malloc(ncooks * sizeof (ulong_t), M_TEMP, M_WAITOK); cooks = malloc(ncooks * sizeof (ulong_t), M_TEMP, M_WAITOK);
*cookies = cooks; *cookies = cooks;
@ -1956,20 +1956,21 @@ zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp,
if (ncookies != NULL) if (ncookies != NULL)
*ncookies -= ncooks; *ncookies -= ncooks;
if (uio->uio_segflg == UIO_SYSSPACE && uio->uio_iovcnt == 1) { if (zfs_uio_segflg(uio) == UIO_SYSSPACE && zfs_uio_iovcnt(uio) == 1) {
iovp->iov_base += outcount; iovp->iov_base += outcount;
iovp->iov_len -= outcount; iovp->iov_len -= outcount;
uio->uio_resid -= outcount; zfs_uio_resid(uio) -= outcount;
} else if ((error = uiomove(outbuf, (long)outcount, UIO_READ, uio))) { } else if ((error =
zfs_uiomove(outbuf, (long)outcount, UIO_READ, uio))) {
/* /*
* Reset the pointer. * Reset the pointer.
*/ */
offset = uio->uio_loffset; offset = zfs_uio_offset(uio);
} }
update: update:
zap_cursor_fini(&zc); zap_cursor_fini(&zc);
if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) if (zfs_uio_segflg(uio) != UIO_SYSSPACE || zfs_uio_iovcnt(uio) != 1)
kmem_free(outbuf, bufsize); kmem_free(outbuf, bufsize);
if (error == ENOENT) if (error == ENOENT)
@ -1977,7 +1978,7 @@ update:
ZFS_ACCESSTIME_STAMP(zfsvfs, zp); ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
uio->uio_loffset = offset; zfs_uio_setoffset(uio, offset);
ZFS_EXIT(zfsvfs); ZFS_EXIT(zfsvfs);
if (error != 0 && cookies != NULL) { if (error != 0 && cookies != NULL) {
free(*cookies, M_TEMP); free(*cookies, M_TEMP);
@ -3660,7 +3661,7 @@ zfs_symlink(znode_t *dzp, const char *name, vattr_t *vap,
*/ */
/* ARGSUSED */ /* ARGSUSED */
static int static int
zfs_readlink(vnode_t *vp, uio_t *uio, cred_t *cr, caller_context_t *ct) zfs_readlink(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, caller_context_t *ct)
{ {
znode_t *zp = VTOZ(vp); znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs; zfsvfs_t *zfsvfs = zp->z_zfsvfs;
@ -4443,8 +4444,9 @@ struct vop_read_args {
static int static int
zfs_freebsd_read(struct vop_read_args *ap) zfs_freebsd_read(struct vop_read_args *ap)
{ {
zfs_uio_t uio;
return (zfs_read(VTOZ(ap->a_vp), ap->a_uio, ioflags(ap->a_ioflag), zfs_uio_init(&uio, ap->a_uio);
return (zfs_read(VTOZ(ap->a_vp), &uio, ioflags(ap->a_ioflag),
ap->a_cred)); ap->a_cred));
} }
@ -4460,8 +4462,9 @@ struct vop_write_args {
static int static int
zfs_freebsd_write(struct vop_write_args *ap) zfs_freebsd_write(struct vop_write_args *ap)
{ {
zfs_uio_t uio;
return (zfs_write(VTOZ(ap->a_vp), ap->a_uio, ioflags(ap->a_ioflag), zfs_uio_init(&uio, ap->a_uio);
return (zfs_write(VTOZ(ap->a_vp), &uio, ioflags(ap->a_ioflag),
ap->a_cred)); ap->a_cred));
} }
@ -4713,8 +4716,9 @@ struct vop_readdir_args {
static int static int
zfs_freebsd_readdir(struct vop_readdir_args *ap) zfs_freebsd_readdir(struct vop_readdir_args *ap)
{ {
zfs_uio_t uio;
return (zfs_readdir(ap->a_vp, ap->a_uio, ap->a_cred, ap->a_eofflag, zfs_uio_init(&uio, ap->a_uio);
return (zfs_readdir(ap->a_vp, &uio, ap->a_cred, ap->a_eofflag,
ap->a_ncookies, ap->a_cookies)); ap->a_ncookies, ap->a_cookies));
} }
@ -5004,8 +5008,9 @@ struct vop_readlink_args {
static int static int
zfs_freebsd_readlink(struct vop_readlink_args *ap) zfs_freebsd_readlink(struct vop_readlink_args *ap)
{ {
zfs_uio_t uio;
return (zfs_readlink(ap->a_vp, ap->a_uio, ap->a_cred, NULL)); zfs_uio_init(&uio, ap->a_uio);
return (zfs_readlink(ap->a_vp, &uio, ap->a_cred, NULL));
} }
#ifndef _SYS_SYSPROTO_H_ #ifndef _SYS_SYSPROTO_H_
@ -5473,11 +5478,14 @@ zfs_listextattr(struct vop_listextattr_args *ap)
uint8_t dirbuf[sizeof (struct dirent)]; uint8_t dirbuf[sizeof (struct dirent)];
struct dirent *dp; struct dirent *dp;
struct iovec aiov; struct iovec aiov;
struct uio auio, *uio = ap->a_uio; struct uio auio;
size_t *sizep = ap->a_size; size_t *sizep = ap->a_size;
size_t plen; size_t plen;
vnode_t *xvp = NULL, *vp; vnode_t *xvp = NULL, *vp;
int done, error, eof, pos; int done, error, eof, pos;
zfs_uio_t uio;
zfs_uio_init(&uio, ap->a_uio);
/* /*
* If the xattr property is off, refuse the request. * If the xattr property is off, refuse the request.
@ -5559,15 +5567,16 @@ zfs_listextattr(struct vop_listextattr_args *ap)
nlen = dp->d_namlen - plen; nlen = dp->d_namlen - plen;
if (sizep != NULL) if (sizep != NULL)
*sizep += 1 + nlen; *sizep += 1 + nlen;
else if (uio != NULL) { else if (GET_UIO_STRUCT(&uio) != NULL) {
/* /*
* Format of extattr name entry is one byte for * Format of extattr name entry is one byte for
* length and the rest for name. * length and the rest for name.
*/ */
error = uiomove(&nlen, 1, uio->uio_rw, uio); error = zfs_uiomove(&nlen, 1, zfs_uio_rw(&uio),
&uio);
if (error == 0) { if (error == 0) {
error = uiomove(dp->d_name + plen, nlen, error = zfs_uiomove(dp->d_name + plen,
uio->uio_rw, uio); nlen, zfs_uio_rw(&uio), &uio);
} }
if (error != 0) if (error != 0)
break; break;

View File

@ -404,7 +404,7 @@ int failed_decrypt_size;
static int static int
zio_do_crypt_uio_opencrypto(boolean_t encrypt, freebsd_crypt_session_t *sess, zio_do_crypt_uio_opencrypto(boolean_t encrypt, freebsd_crypt_session_t *sess,
uint64_t crypt, crypto_key_t *key, uint8_t *ivbuf, uint_t datalen, uint64_t crypt, crypto_key_t *key, uint8_t *ivbuf, uint_t datalen,
uio_t *uio, uint_t auth_len) zfs_uio_t *uio, uint_t auth_len)
{ {
zio_crypt_info_t *ci; zio_crypt_info_t *ci;
int ret; int ret;
@ -439,7 +439,8 @@ zio_crypt_key_wrap(crypto_key_t *cwkey, zio_crypt_key_t *key, uint8_t *iv,
* input and output. Also, the AAD (for AES-GMC at least) * input and output. Also, the AAD (for AES-GMC at least)
* needs to logically go in front. * needs to logically go in front.
*/ */
uio_t cuio; zfs_uio_t cuio;
struct uio cuio_s;
iovec_t iovecs[4]; iovec_t iovecs[4];
uint64_t crypt = key->zk_crypt; uint64_t crypt = key->zk_crypt;
uint_t enc_len, keydata_len, aad_len; uint_t enc_len, keydata_len, aad_len;
@ -447,6 +448,8 @@ zio_crypt_key_wrap(crypto_key_t *cwkey, zio_crypt_key_t *key, uint8_t *iv,
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS); ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
ASSERT3U(cwkey->ck_format, ==, CRYPTO_KEY_RAW); ASSERT3U(cwkey->ck_format, ==, CRYPTO_KEY_RAW);
zfs_uio_init(&cuio, &cuio_s);
keydata_len = zio_crypt_table[crypt].ci_keylen; keydata_len = zio_crypt_table[crypt].ci_keylen;
/* generate iv for wrapping the master and hmac key */ /* generate iv for wrapping the master and hmac key */
@ -489,9 +492,9 @@ zio_crypt_key_wrap(crypto_key_t *cwkey, zio_crypt_key_t *key, uint8_t *iv,
iovecs[0].iov_len = aad_len; iovecs[0].iov_len = aad_len;
enc_len = zio_crypt_table[crypt].ci_keylen + SHA512_HMAC_KEYLEN; enc_len = zio_crypt_table[crypt].ci_keylen + SHA512_HMAC_KEYLEN;
cuio.uio_iov = iovecs; GET_UIO_STRUCT(&cuio)->uio_iov = iovecs;
cuio.uio_iovcnt = 4; zfs_uio_iovcnt(&cuio) = 4;
cuio.uio_segflg = UIO_SYSSPACE; zfs_uio_segflg(&cuio) = UIO_SYSSPACE;
/* encrypt the keys and store the resulting ciphertext and mac */ /* encrypt the keys and store the resulting ciphertext and mac */
ret = zio_do_crypt_uio_opencrypto(B_TRUE, NULL, crypt, cwkey, ret = zio_do_crypt_uio_opencrypto(B_TRUE, NULL, crypt, cwkey,
@ -517,7 +520,8 @@ zio_crypt_key_unwrap(crypto_key_t *cwkey, uint64_t crypt, uint64_t version,
* input and output. Also, the AAD (for AES-GMC at least) * input and output. Also, the AAD (for AES-GMC at least)
* needs to logically go in front. * needs to logically go in front.
*/ */
uio_t cuio; zfs_uio_t cuio;
struct uio cuio_s;
iovec_t iovecs[4]; iovec_t iovecs[4];
void *src, *dst; void *src, *dst;
uint_t enc_len, keydata_len, aad_len; uint_t enc_len, keydata_len, aad_len;
@ -528,6 +532,8 @@ zio_crypt_key_unwrap(crypto_key_t *cwkey, uint64_t crypt, uint64_t version,
keydata_len = zio_crypt_table[crypt].ci_keylen; keydata_len = zio_crypt_table[crypt].ci_keylen;
rw_init(&key->zk_salt_lock, NULL, RW_DEFAULT, NULL); rw_init(&key->zk_salt_lock, NULL, RW_DEFAULT, NULL);
zfs_uio_init(&cuio, &cuio_s);
/* /*
* Since we only support one buffer, we need to copy * Since we only support one buffer, we need to copy
* the encrypted buffer (source) to the plain buffer * the encrypted buffer (source) to the plain buffer
@ -565,9 +571,9 @@ zio_crypt_key_unwrap(crypto_key_t *cwkey, uint64_t crypt, uint64_t version,
iovecs[0].iov_base = aad; iovecs[0].iov_base = aad;
iovecs[0].iov_len = aad_len; iovecs[0].iov_len = aad_len;
cuio.uio_iov = iovecs; GET_UIO_STRUCT(&cuio)->uio_iov = iovecs;
cuio.uio_iovcnt = 4; zfs_uio_iovcnt(&cuio) = 4;
cuio.uio_segflg = UIO_SYSSPACE; zfs_uio_segflg(&cuio) = UIO_SYSSPACE;
/* decrypt the keys and store the result in the output buffers */ /* decrypt the keys and store the result in the output buffers */
ret = zio_do_crypt_uio_opencrypto(B_FALSE, NULL, crypt, cwkey, ret = zio_do_crypt_uio_opencrypto(B_FALSE, NULL, crypt, cwkey,
@ -1137,10 +1143,11 @@ error:
} }
static void static void
zio_crypt_destroy_uio(uio_t *uio) zio_crypt_destroy_uio(zfs_uio_t *uio)
{ {
if (uio->uio_iov) if (GET_UIO_STRUCT(uio)->uio_iov)
kmem_free(uio->uio_iov, uio->uio_iovcnt * sizeof (iovec_t)); kmem_free(GET_UIO_STRUCT(uio)->uio_iov,
zfs_uio_iovcnt(uio) * sizeof (iovec_t));
} }
/* /*
@ -1234,14 +1241,14 @@ zio_crypt_do_indirect_mac_checksum_abd(boolean_t generate, abd_t *abd,
* accommodate some of the drivers, the authbuf needs to be logically before * accommodate some of the drivers, the authbuf needs to be logically before
* the data. This means that we need to copy the source to the destination, * the data. This means that we need to copy the source to the destination,
* and set up an extra iovec_t at the beginning to handle the authbuf. * and set up an extra iovec_t at the beginning to handle the authbuf.
* It also means we'll only return one uio_t. * It also means we'll only return one zfs_uio_t.
*/ */
/* ARGSUSED */ /* ARGSUSED */
static int static int
zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf, zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, uio_t *puio, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, zfs_uio_t *puio,
uio_t *out_uio, uint_t *enc_len, uint8_t **authbuf, uint_t *auth_len, zfs_uio_t *out_uio, uint_t *enc_len, uint8_t **authbuf, uint_t *auth_len,
boolean_t *no_crypt) boolean_t *no_crypt)
{ {
uint8_t *aadbuf = zio_buf_alloc(datalen); uint8_t *aadbuf = zio_buf_alloc(datalen);
@ -1385,8 +1392,8 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
*enc_len = total_len; *enc_len = total_len;
*authbuf = aadbuf; *authbuf = aadbuf;
*auth_len = aad_len; *auth_len = aad_len;
out_uio->uio_iov = dst_iovecs; GET_UIO_STRUCT(out_uio)->uio_iov = dst_iovecs;
out_uio->uio_iovcnt = nr_iovecs; zfs_uio_iovcnt(out_uio) = nr_iovecs;
return (0); return (0);
} }
@ -1397,7 +1404,7 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
static int static int
zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version, zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap,
uio_t *puio, uio_t *out_uio, uint_t *enc_len, uint8_t **authbuf, zfs_uio_t *puio, zfs_uio_t *out_uio, uint_t *enc_len, uint8_t **authbuf,
uint_t *auth_len, boolean_t *no_crypt) uint_t *auth_len, boolean_t *no_crypt)
{ {
uint8_t *aadbuf = zio_buf_alloc(datalen); uint8_t *aadbuf = zio_buf_alloc(datalen);
@ -1534,8 +1541,8 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
*enc_len = total_len; *enc_len = total_len;
*authbuf = aadbuf; *authbuf = aadbuf;
*auth_len = aad_len; *auth_len = aad_len;
out_uio->uio_iov = dst_iovecs; GET_UIO_STRUCT(out_uio)->uio_iov = dst_iovecs;
out_uio->uio_iovcnt = nr_iovecs; zfs_uio_iovcnt(out_uio) = nr_iovecs;
return (0); return (0);
} }
@ -1543,7 +1550,7 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
/* ARGSUSED */ /* ARGSUSED */
static int static int
zio_crypt_init_uios_normal(boolean_t encrypt, uint8_t *plainbuf, zio_crypt_init_uios_normal(boolean_t encrypt, uint8_t *plainbuf,
uint8_t *cipherbuf, uint_t datalen, uio_t *puio, uio_t *out_uio, uint8_t *cipherbuf, uint_t datalen, zfs_uio_t *puio, zfs_uio_t *out_uio,
uint_t *enc_len) uint_t *enc_len)
{ {
int ret; int ret;
@ -1571,8 +1578,8 @@ zio_crypt_init_uios_normal(boolean_t encrypt, uint8_t *plainbuf,
cipher_iovecs[0].iov_len = datalen; cipher_iovecs[0].iov_len = datalen;
*enc_len = datalen; *enc_len = datalen;
out_uio->uio_iov = cipher_iovecs; GET_UIO_STRUCT(out_uio)->uio_iov = cipher_iovecs;
out_uio->uio_iovcnt = nr_cipher; zfs_uio_iovcnt(out_uio) = nr_cipher;
return (0); return (0);
@ -1583,8 +1590,8 @@ error:
kmem_free(cipher_iovecs, nr_cipher * sizeof (iovec_t)); kmem_free(cipher_iovecs, nr_cipher * sizeof (iovec_t));
*enc_len = 0; *enc_len = 0;
out_uio->uio_iov = NULL; GET_UIO_STRUCT(out_uio)->uio_iov = NULL;
out_uio->uio_iovcnt = 0; zfs_uio_iovcnt(out_uio) = 0;
return (ret); return (ret);
} }
@ -1600,8 +1607,8 @@ error:
static int static int
zio_crypt_init_uios(boolean_t encrypt, uint64_t version, dmu_object_type_t ot, zio_crypt_init_uios(boolean_t encrypt, uint64_t version, dmu_object_type_t ot,
uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap,
uint8_t *mac, uio_t *puio, uio_t *cuio, uint_t *enc_len, uint8_t **authbuf, uint8_t *mac, zfs_uio_t *puio, zfs_uio_t *cuio, uint_t *enc_len,
uint_t *auth_len, boolean_t *no_crypt) uint8_t **authbuf, uint_t *auth_len, boolean_t *no_crypt)
{ {
int ret; int ret;
iovec_t *mac_iov; iovec_t *mac_iov;
@ -1633,9 +1640,11 @@ zio_crypt_init_uios(boolean_t encrypt, uint64_t version, dmu_object_type_t ot,
goto error; goto error;
/* populate the uios */ /* populate the uios */
cuio->uio_segflg = UIO_SYSSPACE; zfs_uio_segflg(cuio) = UIO_SYSSPACE;
mac_iov = ((iovec_t *)&cuio->uio_iov[cuio->uio_iovcnt - 1]); mac_iov =
((iovec_t *)&(GET_UIO_STRUCT(cuio)->
uio_iov[zfs_uio_iovcnt(cuio) - 1]));
mac_iov->iov_base = (void *)mac; mac_iov->iov_base = (void *)mac;
mac_iov->iov_len = ZIO_DATA_MAC_LEN; mac_iov->iov_len = ZIO_DATA_MAC_LEN;
@ -1662,14 +1671,18 @@ zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
uint64_t crypt = key->zk_crypt; uint64_t crypt = key->zk_crypt;
uint_t keydata_len = zio_crypt_table[crypt].ci_keylen; uint_t keydata_len = zio_crypt_table[crypt].ci_keylen;
uint_t enc_len, auth_len; uint_t enc_len, auth_len;
uio_t puio, cuio; zfs_uio_t puio, cuio;
struct uio puio_s, cuio_s;
uint8_t enc_keydata[MASTER_KEY_MAX_LEN]; uint8_t enc_keydata[MASTER_KEY_MAX_LEN];
crypto_key_t tmp_ckey, *ckey = NULL; crypto_key_t tmp_ckey, *ckey = NULL;
freebsd_crypt_session_t *tmpl = NULL; freebsd_crypt_session_t *tmpl = NULL;
uint8_t *authbuf = NULL; uint8_t *authbuf = NULL;
bzero(&puio, sizeof (uio_t));
bzero(&cuio, sizeof (uio_t)); zfs_uio_init(&puio, &puio_s);
zfs_uio_init(&cuio, &cuio_s);
bzero(GET_UIO_STRUCT(&puio), sizeof (struct uio));
bzero(GET_UIO_STRUCT(&cuio), sizeof (struct uio));
#ifdef FCRYPTO_DEBUG #ifdef FCRYPTO_DEBUG
printf("%s(%s, %p, %p, %d, %p, %p, %u, %s, %p, %p, %p)\n", printf("%s(%s, %p, %p, %d, %p, %p, %u, %s, %p, %p, %p)\n",

View File

@ -746,12 +746,15 @@ out:
*/ */
static int static int
zvol_cdev_read(struct cdev *dev, struct uio *uio, int ioflag) zvol_cdev_read(struct cdev *dev, struct uio *uio_s, int ioflag)
{ {
zvol_state_t *zv; zvol_state_t *zv;
uint64_t volsize; uint64_t volsize;
zfs_locked_range_t *lr; zfs_locked_range_t *lr;
int error = 0; int error = 0;
zfs_uio_t uio;
zfs_uio_init(&uio, uio_s);
zv = dev->si_drv2; zv = dev->si_drv2;
@ -760,20 +763,20 @@ zvol_cdev_read(struct cdev *dev, struct uio *uio, int ioflag)
* uio_loffset == volsize isn't an error as * uio_loffset == volsize isn't an error as
* its required for EOF processing. * its required for EOF processing.
*/ */
if (uio->uio_resid > 0 && if (zfs_uio_resid(&uio) > 0 &&
(uio->uio_loffset < 0 || uio->uio_loffset > volsize)) (zfs_uio_offset(&uio) < 0 || zfs_uio_offset(&uio) > volsize))
return (SET_ERROR(EIO)); return (SET_ERROR(EIO));
lr = zfs_rangelock_enter(&zv->zv_rangelock, uio->uio_loffset, lr = zfs_rangelock_enter(&zv->zv_rangelock, zfs_uio_offset(&uio),
uio->uio_resid, RL_READER); zfs_uio_resid(&uio), RL_READER);
while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { while (zfs_uio_resid(&uio) > 0 && zfs_uio_offset(&uio) < volsize) {
uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); uint64_t bytes = MIN(zfs_uio_resid(&uio), DMU_MAX_ACCESS >> 1);
/* don't read past the end */ /* don't read past the end */
if (bytes > volsize - uio->uio_loffset) if (bytes > volsize - zfs_uio_offset(&uio))
bytes = volsize - uio->uio_loffset; bytes = volsize - zfs_uio_offset(&uio);
error = dmu_read_uio_dnode(zv->zv_dn, uio, bytes); error = dmu_read_uio_dnode(zv->zv_dn, &uio, bytes);
if (error) { if (error) {
/* convert checksum errors into IO errors */ /* convert checksum errors into IO errors */
if (error == ECKSUM) if (error == ECKSUM)
@ -787,20 +790,23 @@ zvol_cdev_read(struct cdev *dev, struct uio *uio, int ioflag)
} }
static int static int
zvol_cdev_write(struct cdev *dev, struct uio *uio, int ioflag) zvol_cdev_write(struct cdev *dev, struct uio *uio_s, int ioflag)
{ {
zvol_state_t *zv; zvol_state_t *zv;
uint64_t volsize; uint64_t volsize;
zfs_locked_range_t *lr; zfs_locked_range_t *lr;
int error = 0; int error = 0;
boolean_t sync; boolean_t sync;
zfs_uio_t uio;
zv = dev->si_drv2; zv = dev->si_drv2;
volsize = zv->zv_volsize; volsize = zv->zv_volsize;
if (uio->uio_resid > 0 && zfs_uio_init(&uio, uio_s);
(uio->uio_loffset < 0 || uio->uio_loffset > volsize))
if (zfs_uio_resid(&uio) > 0 &&
(zfs_uio_offset(&uio) < 0 || zfs_uio_offset(&uio) > volsize))
return (SET_ERROR(EIO)); return (SET_ERROR(EIO));
sync = (ioflag & IO_SYNC) || sync = (ioflag & IO_SYNC) ||
@ -809,11 +815,11 @@ zvol_cdev_write(struct cdev *dev, struct uio *uio, int ioflag)
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER); rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
zvol_ensure_zilog(zv); zvol_ensure_zilog(zv);
lr = zfs_rangelock_enter(&zv->zv_rangelock, uio->uio_loffset, lr = zfs_rangelock_enter(&zv->zv_rangelock, zfs_uio_offset(&uio),
uio->uio_resid, RL_WRITER); zfs_uio_resid(&uio), RL_WRITER);
while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { while (zfs_uio_resid(&uio) > 0 && zfs_uio_offset(&uio) < volsize) {
uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); uint64_t bytes = MIN(zfs_uio_resid(&uio), DMU_MAX_ACCESS >> 1);
uint64_t off = uio->uio_loffset; uint64_t off = zfs_uio_offset(&uio);
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
if (bytes > volsize - off) /* don't write past the end */ if (bytes > volsize - off) /* don't write past the end */
@ -825,7 +831,7 @@ zvol_cdev_write(struct cdev *dev, struct uio *uio, int ioflag)
dmu_tx_abort(tx); dmu_tx_abort(tx);
break; break;
} }
error = dmu_write_uio_dnode(zv->zv_dn, uio, bytes, tx); error = dmu_write_uio_dnode(zv->zv_dn, &uio, bytes, tx);
if (error == 0) if (error == 0)
zvol_log_write(zv, tx, off, bytes, sync); zvol_log_write(zv, tx, off, bytes, sync);
dmu_tx_commit(tx); dmu_tx_commit(tx);

View File

@ -55,7 +55,7 @@
* a non-zero errno on failure. * a non-zero errno on failure.
*/ */
static int static int
uiomove_iov(void *p, size_t n, enum uio_rw rw, struct uio *uio) zfs_uiomove_iov(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
{ {
const struct iovec *iov = uio->uio_iov; const struct iovec *iov = uio->uio_iov;
size_t skip = uio->uio_skip; size_t skip = uio->uio_skip;
@ -126,7 +126,7 @@ uiomove_iov(void *p, size_t n, enum uio_rw rw, struct uio *uio)
} }
static int static int
uiomove_bvec(void *p, size_t n, enum uio_rw rw, struct uio *uio) zfs_uiomove_bvec(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
{ {
const struct bio_vec *bv = uio->uio_bvec; const struct bio_vec *bv = uio->uio_bvec;
size_t skip = uio->uio_skip; size_t skip = uio->uio_skip;
@ -160,7 +160,7 @@ uiomove_bvec(void *p, size_t n, enum uio_rw rw, struct uio *uio)
#if defined(HAVE_VFS_IOV_ITER) #if defined(HAVE_VFS_IOV_ITER)
static int static int
uiomove_iter(void *p, size_t n, enum uio_rw rw, struct uio *uio, zfs_uiomove_iter(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio,
boolean_t revert) boolean_t revert)
{ {
size_t cnt = MIN(n, uio->uio_resid); size_t cnt = MIN(n, uio->uio_resid);
@ -182,7 +182,7 @@ uiomove_iter(void *p, size_t n, enum uio_rw rw, struct uio *uio,
return (EFAULT); return (EFAULT);
/* /*
* Revert advancing the uio_iter. This is set by uiocopy() * Revert advancing the uio_iter. This is set by zfs_uiocopy()
* to avoid consuming the uio and its iov_iter structure. * to avoid consuming the uio and its iov_iter structure.
*/ */
if (revert) if (revert)
@ -196,18 +196,18 @@ uiomove_iter(void *p, size_t n, enum uio_rw rw, struct uio *uio,
#endif #endif
int int
uiomove(void *p, size_t n, enum uio_rw rw, struct uio *uio) zfs_uiomove(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
{ {
if (uio->uio_segflg == UIO_BVEC) if (uio->uio_segflg == UIO_BVEC)
return (uiomove_bvec(p, n, rw, uio)); return (zfs_uiomove_bvec(p, n, rw, uio));
#if defined(HAVE_VFS_IOV_ITER) #if defined(HAVE_VFS_IOV_ITER)
else if (uio->uio_segflg == UIO_ITER) else if (uio->uio_segflg == UIO_ITER)
return (uiomove_iter(p, n, rw, uio, B_FALSE)); return (zfs_uiomove_iter(p, n, rw, uio, B_FALSE));
#endif #endif
else else
return (uiomove_iov(p, n, rw, uio)); return (zfs_uiomove_iov(p, n, rw, uio));
} }
EXPORT_SYMBOL(uiomove); EXPORT_SYMBOL(zfs_uiomove);
/* /*
* Fault in the pages of the first n bytes specified by the uio structure. * Fault in the pages of the first n bytes specified by the uio structure.
@ -216,7 +216,7 @@ EXPORT_SYMBOL(uiomove);
* the pages resident. * the pages resident.
*/ */
int int
uio_prefaultpages(ssize_t n, struct uio *uio) zfs_uio_prefaultpages(ssize_t n, zfs_uio_t *uio)
{ {
if (uio->uio_segflg == UIO_SYSSPACE || uio->uio_segflg == UIO_BVEC) { if (uio->uio_segflg == UIO_SYSSPACE || uio->uio_segflg == UIO_BVEC) {
/* There's never a need to fault in kernel pages */ /* There's never a need to fault in kernel pages */
@ -263,40 +263,40 @@ uio_prefaultpages(ssize_t n, struct uio *uio)
return (0); return (0);
} }
EXPORT_SYMBOL(uio_prefaultpages); EXPORT_SYMBOL(zfs_uio_prefaultpages);
/* /*
* The same as uiomove() but doesn't modify uio structure. * The same as zfs_uiomove() but doesn't modify uio structure.
* return in cbytes how many bytes were copied. * return in cbytes how many bytes were copied.
*/ */
int int
uiocopy(void *p, size_t n, enum uio_rw rw, struct uio *uio, size_t *cbytes) zfs_uiocopy(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio, size_t *cbytes)
{ {
struct uio uio_copy; zfs_uio_t uio_copy;
int ret; int ret;
bcopy(uio, &uio_copy, sizeof (struct uio)); bcopy(uio, &uio_copy, sizeof (zfs_uio_t));
if (uio->uio_segflg == UIO_BVEC) if (uio->uio_segflg == UIO_BVEC)
ret = uiomove_bvec(p, n, rw, &uio_copy); ret = zfs_uiomove_bvec(p, n, rw, &uio_copy);
#if defined(HAVE_VFS_IOV_ITER) #if defined(HAVE_VFS_IOV_ITER)
else if (uio->uio_segflg == UIO_ITER) else if (uio->uio_segflg == UIO_ITER)
ret = uiomove_iter(p, n, rw, &uio_copy, B_TRUE); ret = zfs_uiomove_iter(p, n, rw, &uio_copy, B_TRUE);
#endif #endif
else else
ret = uiomove_iov(p, n, rw, &uio_copy); ret = zfs_uiomove_iov(p, n, rw, &uio_copy);
*cbytes = uio->uio_resid - uio_copy.uio_resid; *cbytes = uio->uio_resid - uio_copy.uio_resid;
return (ret); return (ret);
} }
EXPORT_SYMBOL(uiocopy); EXPORT_SYMBOL(zfs_uiocopy);
/* /*
* Drop the next n chars out of *uio. * Drop the next n chars out of *uio.
*/ */
void void
uioskip(uio_t *uio, size_t n) zfs_uioskip(zfs_uio_t *uio, size_t n)
{ {
if (n > uio->uio_resid) if (n > uio->uio_resid)
return; return;
@ -325,5 +325,6 @@ uioskip(uio_t *uio, size_t n)
uio->uio_loffset += n; uio->uio_loffset += n;
uio->uio_resid -= n; uio->uio_resid -= n;
} }
EXPORT_SYMBOL(uioskip); EXPORT_SYMBOL(zfs_uioskip);
#endif /* _KERNEL */ #endif /* _KERNEL */

View File

@ -301,7 +301,7 @@ update_pages(znode_t *zp, int64_t start, int len, objset_t *os)
* the file is memory mapped. * the file is memory mapped.
*/ */
int int
mappedread(znode_t *zp, int nbytes, uio_t *uio) mappedread(znode_t *zp, int nbytes, zfs_uio_t *uio)
{ {
struct inode *ip = ZTOI(zp); struct inode *ip = ZTOI(zp);
struct address_space *mp = ip->i_mapping; struct address_space *mp = ip->i_mapping;
@ -323,7 +323,7 @@ mappedread(znode_t *zp, int nbytes, uio_t *uio)
unlock_page(pp); unlock_page(pp);
pb = kmap(pp); pb = kmap(pp);
error = uiomove(pb + off, bytes, UIO_READ, uio); error = zfs_uiomove(pb + off, bytes, UIO_READ, uio);
kunmap(pp); kunmap(pp);
if (mapping_writably_mapped(mp)) if (mapping_writably_mapped(mp))
@ -375,8 +375,8 @@ zfs_write_simple(znode_t *zp, const void *data, size_t len,
iov.iov_base = (void *)data; iov.iov_base = (void *)data;
iov.iov_len = len; iov.iov_len = len;
uio_t uio; zfs_uio_t uio;
uio_iovec_init(&uio, &iov, 1, pos, UIO_SYSSPACE, len, 0); zfs_uio_iovec_init(&uio, &iov, 1, pos, UIO_SYSSPACE, len, 0);
cookie = spl_fstrans_mark(); cookie = spl_fstrans_mark();
error = zfs_write(zp, &uio, 0, kcred); error = zfs_write(zp, &uio, 0, kcred);
@ -384,8 +384,8 @@ zfs_write_simple(znode_t *zp, const void *data, size_t len,
if (error == 0) { if (error == 0) {
if (residp != NULL) if (residp != NULL)
*residp = uio_resid(&uio); *residp = zfs_uio_resid(&uio);
else if (uio_resid(&uio) != 0) else if (zfs_uio_resid(&uio) != 0)
error = SET_ERROR(EIO); error = SET_ERROR(EIO);
} }
@ -3208,7 +3208,7 @@ top:
*/ */
/* ARGSUSED */ /* ARGSUSED */
int int
zfs_readlink(struct inode *ip, uio_t *uio, cred_t *cr) zfs_readlink(struct inode *ip, zfs_uio_t *uio, cred_t *cr)
{ {
znode_t *zp = ITOZ(ip); znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip); zfsvfs_t *zfsvfs = ITOZSB(ip);

View File

@ -376,7 +376,7 @@ error:
static int static int
zio_do_crypt_uio(boolean_t encrypt, uint64_t crypt, crypto_key_t *key, zio_do_crypt_uio(boolean_t encrypt, uint64_t crypt, crypto_key_t *key,
crypto_ctx_template_t tmpl, uint8_t *ivbuf, uint_t datalen, crypto_ctx_template_t tmpl, uint8_t *ivbuf, uint_t datalen,
uio_t *puio, uio_t *cuio, uint8_t *authbuf, uint_t auth_len) zfs_uio_t *puio, zfs_uio_t *cuio, uint8_t *authbuf, uint_t auth_len)
{ {
int ret; int ret;
crypto_data_t plaindata, cipherdata; crypto_data_t plaindata, cipherdata;
@ -479,7 +479,7 @@ zio_crypt_key_wrap(crypto_key_t *cwkey, zio_crypt_key_t *key, uint8_t *iv,
uint8_t *mac, uint8_t *keydata_out, uint8_t *hmac_keydata_out) uint8_t *mac, uint8_t *keydata_out, uint8_t *hmac_keydata_out)
{ {
int ret; int ret;
uio_t puio, cuio; zfs_uio_t puio, cuio;
uint64_t aad[3]; uint64_t aad[3];
iovec_t plain_iovecs[2], cipher_iovecs[3]; iovec_t plain_iovecs[2], cipher_iovecs[3];
uint64_t crypt = key->zk_crypt; uint64_t crypt = key->zk_crypt;
@ -495,7 +495,7 @@ zio_crypt_key_wrap(crypto_key_t *cwkey, zio_crypt_key_t *key, uint8_t *iv,
if (ret != 0) if (ret != 0)
goto error; goto error;
/* initialize uio_ts */ /* initialize zfs_uio_ts */
plain_iovecs[0].iov_base = key->zk_master_keydata; plain_iovecs[0].iov_base = key->zk_master_keydata;
plain_iovecs[0].iov_len = keydata_len; plain_iovecs[0].iov_len = keydata_len;
plain_iovecs[1].iov_base = key->zk_hmac_keydata; plain_iovecs[1].iov_base = key->zk_hmac_keydata;
@ -550,7 +550,7 @@ zio_crypt_key_unwrap(crypto_key_t *cwkey, uint64_t crypt, uint64_t version,
uint8_t *mac, zio_crypt_key_t *key) uint8_t *mac, zio_crypt_key_t *key)
{ {
crypto_mechanism_t mech; crypto_mechanism_t mech;
uio_t puio, cuio; zfs_uio_t puio, cuio;
uint64_t aad[3]; uint64_t aad[3];
iovec_t plain_iovecs[2], cipher_iovecs[3]; iovec_t plain_iovecs[2], cipher_iovecs[3];
uint_t enc_len, keydata_len, aad_len; uint_t enc_len, keydata_len, aad_len;
@ -563,7 +563,7 @@ zio_crypt_key_unwrap(crypto_key_t *cwkey, uint64_t crypt, uint64_t version,
keydata_len = zio_crypt_table[crypt].ci_keylen; keydata_len = zio_crypt_table[crypt].ci_keylen;
/* initialize uio_ts */ /* initialize zfs_uio_ts */
plain_iovecs[0].iov_base = key->zk_master_keydata; plain_iovecs[0].iov_base = key->zk_master_keydata;
plain_iovecs[0].iov_len = keydata_len; plain_iovecs[0].iov_len = keydata_len;
plain_iovecs[1].iov_base = key->zk_hmac_keydata; plain_iovecs[1].iov_base = key->zk_hmac_keydata;
@ -1289,7 +1289,7 @@ error:
} }
static void static void
zio_crypt_destroy_uio(uio_t *uio) zio_crypt_destroy_uio(zfs_uio_t *uio)
{ {
if (uio->uio_iov) if (uio->uio_iov)
kmem_free(uio->uio_iov, uio->uio_iovcnt * sizeof (iovec_t)); kmem_free(uio->uio_iov, uio->uio_iovcnt * sizeof (iovec_t));
@ -1379,8 +1379,8 @@ zio_crypt_do_indirect_mac_checksum_abd(boolean_t generate, abd_t *abd,
*/ */
static int static int
zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf, zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, uio_t *puio, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, zfs_uio_t *puio,
uio_t *cuio, uint_t *enc_len, uint8_t **authbuf, uint_t *auth_len, zfs_uio_t *cuio, uint_t *enc_len, uint8_t **authbuf, uint_t *auth_len,
boolean_t *no_crypt) boolean_t *no_crypt)
{ {
int ret; int ret;
@ -1575,7 +1575,7 @@ error:
static int static int
zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version, zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap,
uio_t *puio, uio_t *cuio, uint_t *enc_len, uint8_t **authbuf, zfs_uio_t *puio, zfs_uio_t *cuio, uint_t *enc_len, uint8_t **authbuf,
uint_t *auth_len, boolean_t *no_crypt) uint_t *auth_len, boolean_t *no_crypt)
{ {
int ret; int ret;
@ -1758,7 +1758,7 @@ error:
static int static int
zio_crypt_init_uios_normal(boolean_t encrypt, uint8_t *plainbuf, zio_crypt_init_uios_normal(boolean_t encrypt, uint8_t *plainbuf,
uint8_t *cipherbuf, uint_t datalen, uio_t *puio, uio_t *cuio, uint8_t *cipherbuf, uint_t datalen, zfs_uio_t *puio, zfs_uio_t *cuio,
uint_t *enc_len) uint_t *enc_len)
{ {
int ret; int ret;
@ -1818,8 +1818,8 @@ error:
static int static int
zio_crypt_init_uios(boolean_t encrypt, uint64_t version, dmu_object_type_t ot, zio_crypt_init_uios(boolean_t encrypt, uint64_t version, dmu_object_type_t ot,
uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap,
uint8_t *mac, uio_t *puio, uio_t *cuio, uint_t *enc_len, uint8_t **authbuf, uint8_t *mac, zfs_uio_t *puio, zfs_uio_t *cuio, uint_t *enc_len,
uint_t *auth_len, boolean_t *no_crypt) uint8_t **authbuf, uint_t *auth_len, boolean_t *no_crypt)
{ {
int ret; int ret;
iovec_t *mac_iov; iovec_t *mac_iov;
@ -1878,7 +1878,7 @@ zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
uint64_t crypt = key->zk_crypt; uint64_t crypt = key->zk_crypt;
uint_t keydata_len = zio_crypt_table[crypt].ci_keylen; uint_t keydata_len = zio_crypt_table[crypt].ci_keylen;
uint_t enc_len, auth_len; uint_t enc_len, auth_len;
uio_t puio, cuio; zfs_uio_t puio, cuio;
uint8_t enc_keydata[MASTER_KEY_MAX_LEN]; uint8_t enc_keydata[MASTER_KEY_MAX_LEN];
crypto_key_t tmp_ckey, *ckey = NULL; crypto_key_t tmp_ckey, *ckey = NULL;
crypto_ctx_template_t tmpl; crypto_ctx_template_t tmpl;
@ -1944,8 +1944,8 @@ zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
/* If the hardware implementation fails fall back to software */ /* If the hardware implementation fails fall back to software */
} }
bzero(&puio, sizeof (uio_t)); bzero(&puio, sizeof (zfs_uio_t));
bzero(&cuio, sizeof (uio_t)); bzero(&cuio, sizeof (zfs_uio_t));
/* create uios for encryption */ /* create uios for encryption */
ret = zio_crypt_init_uios(encrypt, key->zk_version, ot, plainbuf, ret = zio_crypt_init_uios(encrypt, key->zk_version, ot, plainbuf,

View File

@ -245,13 +245,13 @@ zpl_file_accessed(struct file *filp)
* Otherwise, for older kernels extract the iovec and pass it instead. * Otherwise, for older kernels extract the iovec and pass it instead.
*/ */
static void static void
zpl_uio_init(uio_t *uio, struct kiocb *kiocb, struct iov_iter *to, zpl_uio_init(zfs_uio_t *uio, struct kiocb *kiocb, struct iov_iter *to,
loff_t pos, ssize_t count, size_t skip) loff_t pos, ssize_t count, size_t skip)
{ {
#if defined(HAVE_VFS_IOV_ITER) #if defined(HAVE_VFS_IOV_ITER)
uio_iov_iter_init(uio, to, pos, count, skip); zfs_uio_iov_iter_init(uio, to, pos, count, skip);
#else #else
uio_iovec_init(uio, to->iov, to->nr_segs, pos, zfs_uio_iovec_init(uio, to->iov, to->nr_segs, pos,
to->type & ITER_KVEC ? UIO_SYSSPACE : UIO_USERSPACE, to->type & ITER_KVEC ? UIO_SYSSPACE : UIO_USERSPACE,
count, skip); count, skip);
#endif #endif
@ -264,7 +264,7 @@ zpl_iter_read(struct kiocb *kiocb, struct iov_iter *to)
fstrans_cookie_t cookie; fstrans_cookie_t cookie;
struct file *filp = kiocb->ki_filp; struct file *filp = kiocb->ki_filp;
ssize_t count = iov_iter_count(to); ssize_t count = iov_iter_count(to);
uio_t uio; zfs_uio_t uio;
zpl_uio_init(&uio, kiocb, to, kiocb->ki_pos, count, 0); zpl_uio_init(&uio, kiocb, to, kiocb->ki_pos, count, 0);
@ -320,7 +320,7 @@ zpl_iter_write(struct kiocb *kiocb, struct iov_iter *from)
fstrans_cookie_t cookie; fstrans_cookie_t cookie;
struct file *filp = kiocb->ki_filp; struct file *filp = kiocb->ki_filp;
struct inode *ip = filp->f_mapping->host; struct inode *ip = filp->f_mapping->host;
uio_t uio; zfs_uio_t uio;
size_t count = 0; size_t count = 0;
ssize_t ret; ssize_t ret;
@ -364,8 +364,8 @@ zpl_aio_read(struct kiocb *kiocb, const struct iovec *iov,
if (ret) if (ret)
return (ret); return (ret);
uio_t uio; zfs_uio_t uio;
uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE, zfs_uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE,
count, 0); count, 0);
crhold(cr); crhold(cr);
@ -407,8 +407,8 @@ zpl_aio_write(struct kiocb *kiocb, const struct iovec *iov,
if (ret) if (ret)
return (ret); return (ret);
uio_t uio; zfs_uio_t uio;
uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE, zfs_uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE,
count, 0); count, 0);
crhold(cr); crhold(cr);

View File

@ -545,8 +545,8 @@ zpl_get_link_common(struct dentry *dentry, struct inode *ip, char **link)
iov.iov_len = MAXPATHLEN; iov.iov_len = MAXPATHLEN;
iov.iov_base = kmem_zalloc(MAXPATHLEN, KM_SLEEP); iov.iov_base = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
uio_t uio; zfs_uio_t uio;
uio_iovec_init(&uio, &iov, 1, 0, UIO_SYSSPACE, MAXPATHLEN - 1, 0); zfs_uio_iovec_init(&uio, &iov, 1, 0, UIO_SYSSPACE, MAXPATHLEN - 1, 0);
cookie = spl_fstrans_mark(); cookie = spl_fstrans_mark();
error = -zfs_readlink(ip, &uio, cr); error = -zfs_readlink(ip, &uio, cr);

View File

@ -306,15 +306,15 @@ zpl_xattr_get_dir(struct inode *ip, const char *name, void *value,
iov.iov_base = (void *)value; iov.iov_base = (void *)value;
iov.iov_len = size; iov.iov_len = size;
uio_t uio; zfs_uio_t uio;
uio_iovec_init(&uio, &iov, 1, 0, UIO_SYSSPACE, size, 0); zfs_uio_iovec_init(&uio, &iov, 1, 0, UIO_SYSSPACE, size, 0);
cookie = spl_fstrans_mark(); cookie = spl_fstrans_mark();
error = -zfs_read(ITOZ(xip), &uio, 0, cr); error = -zfs_read(ITOZ(xip), &uio, 0, cr);
spl_fstrans_unmark(cookie); spl_fstrans_unmark(cookie);
if (error == 0) if (error == 0)
error = size - uio_resid(&uio); error = size - zfs_uio_resid(&uio);
out: out:
if (xzp) if (xzp)
zrele(xzp); zrele(xzp);

View File

@ -85,9 +85,9 @@ zvol_write(void *arg)
zv_request_t *zvr = arg; zv_request_t *zvr = arg;
struct bio *bio = zvr->bio; struct bio *bio = zvr->bio;
int error = 0; int error = 0;
uio_t uio; zfs_uio_t uio;
uio_bvec_init(&uio, bio); zfs_uio_bvec_init(&uio, bio);
zvol_state_t *zv = zvr->zv; zvol_state_t *zv = zvr->zv;
ASSERT3P(zv, !=, NULL); ASSERT3P(zv, !=, NULL);
@ -247,9 +247,9 @@ zvol_read(void *arg)
zv_request_t *zvr = arg; zv_request_t *zvr = arg;
struct bio *bio = zvr->bio; struct bio *bio = zvr->bio;
int error = 0; int error = 0;
uio_t uio; zfs_uio_t uio;
uio_bvec_init(&uio, bio); zfs_uio_bvec_init(&uio, bio);
zvol_state_t *zv = zvr->zv; zvol_state_t *zv = zvr->zv;
ASSERT3P(zv, !=, NULL); ASSERT3P(zv, !=, NULL);

View File

@ -1171,7 +1171,7 @@ dmu_redact(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
#ifdef _KERNEL #ifdef _KERNEL
int int
dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size) dmu_read_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size)
{ {
dmu_buf_t **dbp; dmu_buf_t **dbp;
int numbufs, i, err; int numbufs, i, err;
@ -1180,7 +1180,7 @@ dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size)
* NB: we could do this block-at-a-time, but it's nice * NB: we could do this block-at-a-time, but it's nice
* to be reading in parallel. * to be reading in parallel.
*/ */
err = dmu_buf_hold_array_by_dnode(dn, uio_offset(uio), size, err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size,
TRUE, FTAG, &numbufs, &dbp, 0); TRUE, FTAG, &numbufs, &dbp, 0);
if (err) if (err)
return (err); return (err);
@ -1192,16 +1192,12 @@ dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size)
ASSERT(size > 0); ASSERT(size > 0);
bufoff = uio_offset(uio) - db->db_offset; bufoff = zfs_uio_offset(uio) - db->db_offset;
tocpy = MIN(db->db_size - bufoff, size); tocpy = MIN(db->db_size - bufoff, size);
#ifdef __FreeBSD__ err = zfs_uio_fault_move((char *)db->db_data + bufoff, tocpy,
err = vn_io_fault_uiomove((char *)db->db_data + bufoff, UIO_READ, uio);
tocpy, uio);
#else
err = uiomove((char *)db->db_data + bufoff, tocpy,
UIO_READ, uio);
#endif
if (err) if (err)
break; break;
@ -1215,14 +1211,14 @@ dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size)
/* /*
* Read 'size' bytes into the uio buffer. * Read 'size' bytes into the uio buffer.
* From object zdb->db_object. * From object zdb->db_object.
* Starting at offset uio->uio_loffset. * Starting at zfs_uio_offset(uio).
* *
* If the caller already has a dbuf in the target object * If the caller already has a dbuf in the target object
* (e.g. its bonus buffer), this routine is faster than dmu_read_uio(), * (e.g. its bonus buffer), this routine is faster than dmu_read_uio(),
* because we don't have to find the dnode_t for the object. * because we don't have to find the dnode_t for the object.
*/ */
int int
dmu_read_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size) dmu_read_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size)
{ {
dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb; dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
dnode_t *dn; dnode_t *dn;
@ -1242,10 +1238,10 @@ dmu_read_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size)
/* /*
* Read 'size' bytes into the uio buffer. * Read 'size' bytes into the uio buffer.
* From the specified object * From the specified object
* Starting at offset uio->uio_loffset. * Starting at offset zfs_uio_offset(uio).
*/ */
int int
dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size) dmu_read_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size)
{ {
dnode_t *dn; dnode_t *dn;
int err; int err;
@ -1265,14 +1261,14 @@ dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size)
} }
int int
dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx) dmu_write_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size, dmu_tx_t *tx)
{ {
dmu_buf_t **dbp; dmu_buf_t **dbp;
int numbufs; int numbufs;
int err = 0; int err = 0;
int i; int i;
err = dmu_buf_hold_array_by_dnode(dn, uio_offset(uio), size, err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size,
FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH); FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH);
if (err) if (err)
return (err); return (err);
@ -1284,7 +1280,7 @@ dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx)
ASSERT(size > 0); ASSERT(size > 0);
bufoff = uio_offset(uio) - db->db_offset; bufoff = zfs_uio_offset(uio) - db->db_offset;
tocpy = MIN(db->db_size - bufoff, size); tocpy = MIN(db->db_size - bufoff, size);
ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
@ -1295,18 +1291,14 @@ dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx)
dmu_buf_will_dirty(db, tx); dmu_buf_will_dirty(db, tx);
/* /*
* XXX uiomove could block forever (eg.nfs-backed * XXX zfs_uiomove could block forever (eg.nfs-backed
* pages). There needs to be a uiolockdown() function * pages). There needs to be a uiolockdown() function
* to lock the pages in memory, so that uiomove won't * to lock the pages in memory, so that zfs_uiomove won't
* block. * block.
*/ */
#ifdef __FreeBSD__ err = zfs_uio_fault_move((char *)db->db_data + bufoff,
err = vn_io_fault_uiomove((char *)db->db_data + bufoff, tocpy, UIO_WRITE, uio);
tocpy, uio);
#else
err = uiomove((char *)db->db_data + bufoff, tocpy,
UIO_WRITE, uio);
#endif
if (tocpy == db->db_size) if (tocpy == db->db_size)
dmu_buf_fill_done(db, tx); dmu_buf_fill_done(db, tx);
@ -1323,14 +1315,14 @@ dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx)
/* /*
* Write 'size' bytes from the uio buffer. * Write 'size' bytes from the uio buffer.
* To object zdb->db_object. * To object zdb->db_object.
* Starting at offset uio->uio_loffset. * Starting at offset zfs_uio_offset(uio).
* *
* If the caller already has a dbuf in the target object * If the caller already has a dbuf in the target object
* (e.g. its bonus buffer), this routine is faster than dmu_write_uio(), * (e.g. its bonus buffer), this routine is faster than dmu_write_uio(),
* because we don't have to find the dnode_t for the object. * because we don't have to find the dnode_t for the object.
*/ */
int int
dmu_write_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size, dmu_write_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size,
dmu_tx_t *tx) dmu_tx_t *tx)
{ {
dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb; dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
@ -1351,10 +1343,10 @@ dmu_write_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size,
/* /*
* Write 'size' bytes from the uio buffer. * Write 'size' bytes from the uio buffer.
* To the specified object. * To the specified object.
* Starting at offset uio->uio_loffset. * Starting at offset zfs_uio_offset(uio).
*/ */
int int
dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size, dmu_write_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size,
dmu_tx_t *tx) dmu_tx_t *tx)
{ {
dnode_t *dn; dnode_t *dn;

View File

@ -1502,7 +1502,7 @@ sa_lookup(sa_handle_t *hdl, sa_attr_type_t attr, void *buf, uint32_t buflen)
#ifdef _KERNEL #ifdef _KERNEL
int int
sa_lookup_uio(sa_handle_t *hdl, sa_attr_type_t attr, uio_t *uio) sa_lookup_uio(sa_handle_t *hdl, sa_attr_type_t attr, zfs_uio_t *uio)
{ {
int error; int error;
sa_bulk_attr_t bulk; sa_bulk_attr_t bulk;
@ -1515,8 +1515,8 @@ sa_lookup_uio(sa_handle_t *hdl, sa_attr_type_t attr, uio_t *uio)
mutex_enter(&hdl->sa_lock); mutex_enter(&hdl->sa_lock);
if ((error = sa_attr_op(hdl, &bulk, 1, SA_LOOKUP, NULL)) == 0) { if ((error = sa_attr_op(hdl, &bulk, 1, SA_LOOKUP, NULL)) == 0) {
error = uiomove((void *)bulk.sa_addr, MIN(bulk.sa_size, error = zfs_uiomove((void *)bulk.sa_addr, MIN(bulk.sa_size,
uio_resid(uio)), UIO_READ, uio); zfs_uio_resid(uio)), UIO_READ, uio);
} }
mutex_exit(&hdl->sa_lock); mutex_exit(&hdl->sa_lock);
return (error); return (error);

View File

@ -71,7 +71,7 @@ sa_attr_reg_t zfs_attr_table[ZPL_END+1] = {
#ifdef _KERNEL #ifdef _KERNEL
int int
zfs_sa_readlink(znode_t *zp, uio_t *uio) zfs_sa_readlink(znode_t *zp, zfs_uio_t *uio)
{ {
dmu_buf_t *db = sa_get_db(zp->z_sa_hdl); dmu_buf_t *db = sa_get_db(zp->z_sa_hdl);
size_t bufsz; size_t bufsz;
@ -79,15 +79,16 @@ zfs_sa_readlink(znode_t *zp, uio_t *uio)
bufsz = zp->z_size; bufsz = zp->z_size;
if (bufsz + ZFS_OLD_ZNODE_PHYS_SIZE <= db->db_size) { if (bufsz + ZFS_OLD_ZNODE_PHYS_SIZE <= db->db_size) {
error = uiomove((caddr_t)db->db_data + error = zfs_uiomove((caddr_t)db->db_data +
ZFS_OLD_ZNODE_PHYS_SIZE, ZFS_OLD_ZNODE_PHYS_SIZE,
MIN((size_t)bufsz, uio_resid(uio)), UIO_READ, uio); MIN((size_t)bufsz, zfs_uio_resid(uio)), UIO_READ, uio);
} else { } else {
dmu_buf_t *dbp; dmu_buf_t *dbp;
if ((error = dmu_buf_hold(ZTOZSB(zp)->z_os, zp->z_id, if ((error = dmu_buf_hold(ZTOZSB(zp)->z_os, zp->z_id,
0, FTAG, &dbp, DMU_READ_NO_PREFETCH)) == 0) { 0, FTAG, &dbp, DMU_READ_NO_PREFETCH)) == 0) {
error = uiomove(dbp->db_data, error = zfs_uiomove(dbp->db_data,
MIN((size_t)bufsz, uio_resid(uio)), UIO_READ, uio); MIN((size_t)bufsz, zfs_uio_resid(uio)), UIO_READ,
uio);
dmu_buf_rele(dbp, FTAG); dmu_buf_rele(dbp, FTAG);
} }
} }

View File

@ -187,7 +187,7 @@ static unsigned long zfs_vnops_read_chunk_size = 1024 * 1024; /* Tunable */
*/ */
/* ARGSUSED */ /* ARGSUSED */
int int
zfs_read(struct znode *zp, uio_t *uio, int ioflag, cred_t *cr) zfs_read(struct znode *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
{ {
int error = 0; int error = 0;
boolean_t frsync = B_FALSE; boolean_t frsync = B_FALSE;
@ -210,7 +210,7 @@ zfs_read(struct znode *zp, uio_t *uio, int ioflag, cred_t *cr)
/* /*
* Validate file offset * Validate file offset
*/ */
if (uio->uio_loffset < (offset_t)0) { if (zfs_uio_offset(uio) < (offset_t)0) {
ZFS_EXIT(zfsvfs); ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL)); return (SET_ERROR(EINVAL));
} }
@ -218,7 +218,7 @@ zfs_read(struct znode *zp, uio_t *uio, int ioflag, cred_t *cr)
/* /*
* Fasttrack empty reads * Fasttrack empty reads
*/ */
if (uio->uio_resid == 0) { if (zfs_uio_resid(uio) == 0) {
ZFS_EXIT(zfsvfs); ZFS_EXIT(zfsvfs);
return (0); return (0);
} }
@ -242,26 +242,26 @@ zfs_read(struct znode *zp, uio_t *uio, int ioflag, cred_t *cr)
* Lock the range against changes. * Lock the range against changes.
*/ */
zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock, zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
uio->uio_loffset, uio->uio_resid, RL_READER); zfs_uio_offset(uio), zfs_uio_resid(uio), RL_READER);
/* /*
* If we are reading past end-of-file we can skip * If we are reading past end-of-file we can skip
* to the end; but we might still need to set atime. * to the end; but we might still need to set atime.
*/ */
if (uio->uio_loffset >= zp->z_size) { if (zfs_uio_offset(uio) >= zp->z_size) {
error = 0; error = 0;
goto out; goto out;
} }
ASSERT(uio->uio_loffset < zp->z_size); ASSERT(zfs_uio_offset(uio) < zp->z_size);
ssize_t n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset); ssize_t n = MIN(zfs_uio_resid(uio), zp->z_size - zfs_uio_offset(uio));
ssize_t start_resid = n; ssize_t start_resid = n;
while (n > 0) { while (n > 0) {
ssize_t nbytes = MIN(n, zfs_vnops_read_chunk_size - ssize_t nbytes = MIN(n, zfs_vnops_read_chunk_size -
P2PHASE(uio->uio_loffset, zfs_vnops_read_chunk_size)); P2PHASE(zfs_uio_offset(uio), zfs_vnops_read_chunk_size));
#ifdef UIO_NOCOPY #ifdef UIO_NOCOPY
if (uio->uio_segflg == UIO_NOCOPY) if (zfs_uio_segflg(uio) == UIO_NOCOPY)
error = mappedread_sf(zp, nbytes, uio); error = mappedread_sf(zp, nbytes, uio);
else else
#endif #endif
@ -314,10 +314,10 @@ out:
/* ARGSUSED */ /* ARGSUSED */
int int
zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr) zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
{ {
int error = 0; int error = 0;
ssize_t start_resid = uio->uio_resid; ssize_t start_resid = zfs_uio_resid(uio);
/* /*
* Fasttrack empty write * Fasttrack empty write
@ -356,7 +356,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr)
*/ */
if ((zp->z_pflags & ZFS_IMMUTABLE) || if ((zp->z_pflags & ZFS_IMMUTABLE) ||
((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & O_APPEND) && ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & O_APPEND) &&
(uio->uio_loffset < zp->z_size))) { (zfs_uio_offset(uio) < zp->z_size))) {
ZFS_EXIT(zfsvfs); ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM)); return (SET_ERROR(EPERM));
} }
@ -364,7 +364,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr)
/* /*
* Validate file offset * Validate file offset
*/ */
offset_t woff = ioflag & O_APPEND ? zp->z_size : uio->uio_loffset; offset_t woff = ioflag & O_APPEND ? zp->z_size : zfs_uio_offset(uio);
if (woff < 0) { if (woff < 0) {
ZFS_EXIT(zfsvfs); ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL)); return (SET_ERROR(EINVAL));
@ -377,7 +377,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr)
* don't hold up txg. * don't hold up txg.
* Skip this if uio contains loaned arc_buf. * Skip this if uio contains loaned arc_buf.
*/ */
if (uio_prefaultpages(MIN(n, max_blksz), uio)) { if (zfs_uio_prefaultpages(MIN(n, max_blksz), uio)) {
ZFS_EXIT(zfsvfs); ZFS_EXIT(zfsvfs);
return (SET_ERROR(EFAULT)); return (SET_ERROR(EFAULT));
} }
@ -401,7 +401,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr)
*/ */
woff = zp->z_size; woff = zp->z_size;
} }
uio->uio_loffset = woff; zfs_uio_setoffset(uio, woff);
} else { } else {
/* /*
* Note that if the file block size will change as a result of * Note that if the file block size will change as a result of
@ -411,7 +411,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr)
lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER); lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
} }
if (zn_rlimit_fsize(zp, uio, uio->uio_td)) { if (zn_rlimit_fsize(zp, uio)) {
zfs_rangelock_exit(lr); zfs_rangelock_exit(lr);
ZFS_EXIT(zfsvfs); ZFS_EXIT(zfsvfs);
return (SET_ERROR(EFBIG)); return (SET_ERROR(EFBIG));
@ -441,7 +441,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr)
* and allows us to do more fine-grained space accounting. * and allows us to do more fine-grained space accounting.
*/ */
while (n > 0) { while (n > 0) {
woff = uio->uio_loffset; woff = zfs_uio_offset(uio);
if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, uid) || if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, uid) ||
zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, gid) || zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, gid) ||
@ -469,7 +469,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr)
max_blksz); max_blksz);
ASSERT(abuf != NULL); ASSERT(abuf != NULL);
ASSERT(arc_buf_size(abuf) == max_blksz); ASSERT(arc_buf_size(abuf) == max_blksz);
if ((error = uiocopy(abuf->b_data, max_blksz, if ((error = zfs_uiocopy(abuf->b_data, max_blksz,
UIO_WRITE, uio, &cbytes))) { UIO_WRITE, uio, &cbytes))) {
dmu_return_arcbuf(abuf); dmu_return_arcbuf(abuf);
break; break;
@ -530,11 +530,11 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr)
ssize_t tx_bytes; ssize_t tx_bytes;
if (abuf == NULL) { if (abuf == NULL) {
tx_bytes = uio->uio_resid; tx_bytes = zfs_uio_resid(uio);
uio_fault_disable(uio, B_TRUE); zfs_uio_fault_disable(uio, B_TRUE);
error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl), error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, nbytes, tx); uio, nbytes, tx);
uio_fault_disable(uio, B_FALSE); zfs_uio_fault_disable(uio, B_FALSE);
#ifdef __linux__ #ifdef __linux__
if (error == EFAULT) { if (error == EFAULT) {
dmu_tx_commit(tx); dmu_tx_commit(tx);
@ -542,12 +542,13 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr)
* Account for partial writes before * Account for partial writes before
* continuing the loop. * continuing the loop.
* Update needs to occur before the next * Update needs to occur before the next
* uio_prefaultpages, or prefaultpages may * zfs_uio_prefaultpages, or prefaultpages may
* error, and we may break the loop early. * error, and we may break the loop early.
*/ */
if (tx_bytes != uio->uio_resid) if (tx_bytes != zfs_uio_resid(uio))
n -= tx_bytes - uio->uio_resid; n -= tx_bytes - zfs_uio_resid(uio);
if (uio_prefaultpages(MIN(n, max_blksz), uio)) { if (zfs_uio_prefaultpages(MIN(n, max_blksz),
uio)) {
break; break;
} }
continue; continue;
@ -557,7 +558,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr)
dmu_tx_commit(tx); dmu_tx_commit(tx);
break; break;
} }
tx_bytes -= uio->uio_resid; tx_bytes -= zfs_uio_resid(uio);
} else { } else {
/* Implied by abuf != NULL: */ /* Implied by abuf != NULL: */
ASSERT3S(n, >=, max_blksz); ASSERT3S(n, >=, max_blksz);
@ -582,8 +583,8 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr)
dmu_tx_commit(tx); dmu_tx_commit(tx);
break; break;
} }
ASSERT3S(nbytes, <=, uio->uio_resid); ASSERT3S(nbytes, <=, zfs_uio_resid(uio));
uioskip(uio, nbytes); zfs_uioskip(uio, nbytes);
tx_bytes = nbytes; tx_bytes = nbytes;
} }
if (tx_bytes && zn_has_cached_data(zp) && if (tx_bytes && zn_has_cached_data(zp) &&
@ -634,9 +635,9 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr)
* Update the file size (zp_size) if it has changed; * Update the file size (zp_size) if it has changed;
* account for possible concurrent updates. * account for possible concurrent updates.
*/ */
while ((end_size = zp->z_size) < uio->uio_loffset) { while ((end_size = zp->z_size) < zfs_uio_offset(uio)) {
(void) atomic_cas_64(&zp->z_size, end_size, (void) atomic_cas_64(&zp->z_size, end_size,
uio->uio_loffset); zfs_uio_offset(uio));
ASSERT(error == 0); ASSERT(error == 0);
} }
/* /*
@ -659,7 +660,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr)
n -= nbytes; n -= nbytes;
if (n > 0) { if (n > 0) {
if (uio_prefaultpages(MIN(n, max_blksz), uio)) { if (zfs_uio_prefaultpages(MIN(n, max_blksz), uio)) {
error = SET_ERROR(EFAULT); error = SET_ERROR(EFAULT);
break; break;
} }
@ -674,7 +675,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr)
* uio data is inaccessible return an error. Otherwise, it's * uio data is inaccessible return an error. Otherwise, it's
* at least a partial write, so it's successful. * at least a partial write, so it's successful.
*/ */
if (zfsvfs->z_replay || uio->uio_resid == start_resid || if (zfsvfs->z_replay || zfs_uio_resid(uio) == start_resid ||
error == EFAULT) { error == EFAULT) {
ZFS_EXIT(zfsvfs); ZFS_EXIT(zfsvfs);
return (error); return (error);
@ -684,7 +685,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr)
zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, zp->z_id); zil_commit(zilog, zp->z_id);
const int64_t nwritten = start_resid - uio->uio_resid; const int64_t nwritten = start_resid - zfs_uio_resid(uio);
dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten); dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten);
task_io_account_write(nwritten); task_io_account_write(nwritten);

View File

@ -42,8 +42,8 @@
* 2. In the same process, context #2, mmap page fault (which means the mm_sem * 2. In the same process, context #2, mmap page fault (which means the mm_sem
* is hold) occurred, zfs_dirty_inode open a txg failed, and wait previous * is hold) occurred, zfs_dirty_inode open a txg failed, and wait previous
* txg "n" completed. * txg "n" completed.
* 3. context #1 call uiomove to write, however page fault is occurred in * 3. context #1 call zfs_uiomove to write, however page fault is occurred in
* uiomove, which means it needs mm_sem, but mm_sem is hold by * zfs_uiomove, which means it needs mm_sem, but mm_sem is hold by
* context #2, so it stuck and can't complete, then txg "n" will not * context #2, so it stuck and can't complete, then txg "n" will not
* complete. * complete.
* *