Bugfix/fix uio partial copies
In zfs_write(), the loop continues to the next iteration without accounting for partial copies occurring in uiomove_iov when copy_from_user/__copy_from_user_inatomic return a non-zero status. This results in "zfs: accessing past end of object..." in the kernel log, and the write failing. Account for partial copies and update uio struct before returning EFAULT, leave a comment explaining the reason why this is done. Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed-by: ilbsmart <wgqimut@gmail.com> Signed-off-by: Fabio Scaccabarozzi <fsvm88@gmail.com> Closes #8673 Closes #10148
This commit is contained in:
parent
0929c4de39
commit
c9e3efdb3a
|
@ -829,6 +829,15 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
|
||||||
uio->uio_fault_disable = B_FALSE;
|
uio->uio_fault_disable = B_FALSE;
|
||||||
if (error == EFAULT) {
|
if (error == EFAULT) {
|
||||||
dmu_tx_commit(tx);
|
dmu_tx_commit(tx);
|
||||||
|
/*
|
||||||
|
* Account for partial writes before
|
||||||
|
* continuing the loop.
|
||||||
|
* Update needs to occur before the next
|
||||||
|
* uio_prefaultpages, or prefaultpages may
|
||||||
|
* error, and we may break the loop early.
|
||||||
|
*/
|
||||||
|
if (tx_bytes != uio->uio_resid)
|
||||||
|
n -= tx_bytes - uio->uio_resid;
|
||||||
if (uio_prefaultpages(MIN(n, max_blksz), uio)) {
|
if (uio_prefaultpages(MIN(n, max_blksz), uio)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -80,22 +80,31 @@ uiomove_iov(void *p, size_t n, enum uio_rw rw, struct uio *uio)
|
||||||
if (copy_to_user(iov->iov_base+skip, p, cnt))
|
if (copy_to_user(iov->iov_base+skip, p, cnt))
|
||||||
return (EFAULT);
|
return (EFAULT);
|
||||||
} else {
|
} else {
|
||||||
|
unsigned long b_left = 0;
|
||||||
if (uio->uio_fault_disable) {
|
if (uio->uio_fault_disable) {
|
||||||
if (!zfs_access_ok(VERIFY_READ,
|
if (!zfs_access_ok(VERIFY_READ,
|
||||||
(iov->iov_base + skip), cnt)) {
|
(iov->iov_base + skip), cnt)) {
|
||||||
return (EFAULT);
|
return (EFAULT);
|
||||||
}
|
}
|
||||||
pagefault_disable();
|
pagefault_disable();
|
||||||
if (__copy_from_user_inatomic(p,
|
b_left =
|
||||||
(iov->iov_base + skip), cnt)) {
|
__copy_from_user_inatomic(p,
|
||||||
pagefault_enable();
|
(iov->iov_base + skip), cnt);
|
||||||
return (EFAULT);
|
|
||||||
}
|
|
||||||
pagefault_enable();
|
pagefault_enable();
|
||||||
} else {
|
} else {
|
||||||
if (copy_from_user(p,
|
b_left =
|
||||||
(iov->iov_base + skip), cnt))
|
copy_from_user(p,
|
||||||
return (EFAULT);
|
(iov->iov_base + skip), cnt);
|
||||||
|
}
|
||||||
|
if (b_left > 0) {
|
||||||
|
unsigned long c_bytes =
|
||||||
|
cnt - b_left;
|
||||||
|
uio->uio_skip += c_bytes;
|
||||||
|
ASSERT3U(uio->uio_skip, <,
|
||||||
|
iov->iov_len);
|
||||||
|
uio->uio_resid -= c_bytes;
|
||||||
|
uio->uio_loffset += c_bytes;
|
||||||
|
return (EFAULT);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
Loading…
Reference in New Issue