Linux 4.6 compat: PAGE_CACHE_SIZE removal

As described in torvalds/linux@4a2d057e the macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were originally introduced
to make it possible to add bigger chunks to the page cache.  This
never panned out and it has therefore been removed from the kernel.

ZFS has been updated to use the PAGE_{SIZE,SHIFT,MASK,ALIGN} macros
and calls to page_cache_release() have been replaced with put_page().

There was no need to introduce a configure check for this because
these interfaces have existed for a very long time.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Chunwei Chen <tuxoko@gmail.com>
Closes #4489
This commit is contained in:
Brian Behlendorf 2016-04-05 12:39:37 -07:00 committed by Ned Bass
parent 60a4ea3f94
commit d746e2ea0e
2 changed files with 23 additions and 24 deletions

View File

@ -332,11 +332,11 @@ update_pages(struct inode *ip, int64_t start, int len,
int64_t off; int64_t off;
void *pb; void *pb;
off = start & (PAGE_CACHE_SIZE-1); off = start & (PAGE_SIZE-1);
for (start &= PAGE_CACHE_MASK; len > 0; start += PAGE_CACHE_SIZE) { for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
nbytes = MIN(PAGE_CACHE_SIZE - off, len); nbytes = MIN(PAGE_SIZE - off, len);
pp = find_lock_page(mp, start >> PAGE_CACHE_SHIFT); pp = find_lock_page(mp, start >> PAGE_SHIFT);
if (pp) { if (pp) {
if (mapping_writably_mapped(mp)) if (mapping_writably_mapped(mp))
flush_dcache_page(pp); flush_dcache_page(pp);
@ -353,7 +353,7 @@ update_pages(struct inode *ip, int64_t start, int len,
SetPageUptodate(pp); SetPageUptodate(pp);
ClearPageError(pp); ClearPageError(pp);
unlock_page(pp); unlock_page(pp);
page_cache_release(pp); put_page(pp);
} }
len -= nbytes; len -= nbytes;
@ -384,11 +384,11 @@ mappedread(struct inode *ip, int nbytes, uio_t *uio)
void *pb; void *pb;
start = uio->uio_loffset; start = uio->uio_loffset;
off = start & (PAGE_CACHE_SIZE-1); off = start & (PAGE_SIZE-1);
for (start &= PAGE_CACHE_MASK; len > 0; start += PAGE_CACHE_SIZE) { for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
bytes = MIN(PAGE_CACHE_SIZE - off, len); bytes = MIN(PAGE_SIZE - off, len);
pp = find_lock_page(mp, start >> PAGE_CACHE_SHIFT); pp = find_lock_page(mp, start >> PAGE_SHIFT);
if (pp) { if (pp) {
ASSERT(PageUptodate(pp)); ASSERT(PageUptodate(pp));
@ -401,7 +401,7 @@ mappedread(struct inode *ip, int nbytes, uio_t *uio)
mark_page_accessed(pp); mark_page_accessed(pp);
unlock_page(pp); unlock_page(pp);
page_cache_release(pp); put_page(pp);
} else { } else {
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl), error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, bytes); uio, bytes);
@ -3894,8 +3894,8 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
pgoff = page_offset(pp); /* Page byte-offset in file */ pgoff = page_offset(pp); /* Page byte-offset in file */
offset = i_size_read(ip); /* File length in bytes */ offset = i_size_read(ip); /* File length in bytes */
pglen = MIN(PAGE_CACHE_SIZE, /* Page length in bytes */ pglen = MIN(PAGE_SIZE, /* Page length in bytes */
P2ROUNDUP(offset, PAGE_CACHE_SIZE)-pgoff); P2ROUNDUP(offset, PAGE_SIZE)-pgoff);
/* Page is beyond end of file */ /* Page is beyond end of file */
if (pgoff >= offset) { if (pgoff >= offset) {
@ -4006,7 +4006,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
} }
va = kmap(pp); va = kmap(pp);
ASSERT3U(pglen, <=, PAGE_CACHE_SIZE); ASSERT3U(pglen, <=, PAGE_SIZE);
dmu_write(zsb->z_os, zp->z_id, pgoff, pglen, va, tx); dmu_write(zsb->z_os, zp->z_id, pgoff, pglen, va, tx);
kunmap(pp); kunmap(pp);
@ -4181,7 +4181,7 @@ zfs_fillpage(struct inode *ip, struct page *pl[], int nr_pages)
int err; int err;
os = zsb->z_os; os = zsb->z_os;
io_len = nr_pages << PAGE_CACHE_SHIFT; io_len = nr_pages << PAGE_SHIFT;
i_size = i_size_read(ip); i_size = i_size_read(ip);
io_off = page_offset(pl[0]); io_off = page_offset(pl[0]);

View File

@ -1510,13 +1510,12 @@ zfs_zero_partial_page(znode_t *zp, uint64_t start, uint64_t len)
int64_t off; int64_t off;
void *pb; void *pb;
ASSERT((start & PAGE_CACHE_MASK) == ASSERT((start & PAGE_MASK) == ((start + len - 1) & PAGE_MASK));
((start + len - 1) & PAGE_CACHE_MASK));
off = start & (PAGE_CACHE_SIZE - 1); off = start & (PAGE_SIZE - 1);
start &= PAGE_CACHE_MASK; start &= PAGE_MASK;
pp = find_lock_page(mp, start >> PAGE_CACHE_SHIFT); pp = find_lock_page(mp, start >> PAGE_SHIFT);
if (pp) { if (pp) {
if (mapping_writably_mapped(mp)) if (mapping_writably_mapped(mp))
flush_dcache_page(pp); flush_dcache_page(pp);
@ -1532,7 +1531,7 @@ zfs_zero_partial_page(znode_t *zp, uint64_t start, uint64_t len)
SetPageUptodate(pp); SetPageUptodate(pp);
ClearPageError(pp); ClearPageError(pp);
unlock_page(pp); unlock_page(pp);
page_cache_release(pp); put_page(pp);
} }
} }
@ -1579,14 +1578,14 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
loff_t first_page_offset, last_page_offset; loff_t first_page_offset, last_page_offset;
/* first possible full page in hole */ /* first possible full page in hole */
first_page = (off + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; first_page = (off + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* last page of hole */ /* last page of hole */
last_page = (off + len) >> PAGE_CACHE_SHIFT; last_page = (off + len) >> PAGE_SHIFT;
/* offset of first_page */ /* offset of first_page */
first_page_offset = first_page << PAGE_CACHE_SHIFT; first_page_offset = first_page << PAGE_SHIFT;
/* offset of last_page */ /* offset of last_page */
last_page_offset = last_page << PAGE_CACHE_SHIFT; last_page_offset = last_page << PAGE_SHIFT;
/* truncate whole pages */ /* truncate whole pages */
if (last_page_offset > first_page_offset) { if (last_page_offset > first_page_offset) {