1
0
Fork 0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-01-24 09:13:20 -05:00

iomap: Change calling convention for zeroing

Pass the full length to iomap_zero() and dax_iomap_zero(), and have
them return how many bytes they actually handled.  This is preparatory
work for handling THP, although it looks like DAX could actually take
advantage of it if there's a larger contiguous area.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Matthew Wilcox (Oracle) 2020-09-21 08:58:42 -07:00 committed by Darrick J. Wong
parent e25ba8cbfd
commit 81ee8e52a7
3 changed files with 22 additions and 27 deletions

View file

@ -1037,18 +1037,18 @@ static vm_fault_t dax_load_hole(struct xa_state *xas,
return ret; return ret;
} }
int dax_iomap_zero(loff_t pos, unsigned offset, unsigned size, s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
struct iomap *iomap)
{ {
sector_t sector = iomap_sector(iomap, pos & PAGE_MASK); sector_t sector = iomap_sector(iomap, pos & PAGE_MASK);
pgoff_t pgoff; pgoff_t pgoff;
long rc, id; long rc, id;
void *kaddr; void *kaddr;
bool page_aligned = false; bool page_aligned = false;
unsigned offset = offset_in_page(pos);
unsigned size = min_t(u64, PAGE_SIZE - offset, length);
if (IS_ALIGNED(sector << SECTOR_SHIFT, PAGE_SIZE) && if (IS_ALIGNED(sector << SECTOR_SHIFT, PAGE_SIZE) &&
IS_ALIGNED(size, PAGE_SIZE)) (size == PAGE_SIZE))
page_aligned = true; page_aligned = true;
rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, &pgoff); rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, &pgoff);
@ -1058,8 +1058,7 @@ int dax_iomap_zero(loff_t pos, unsigned offset, unsigned size,
id = dax_read_lock(); id = dax_read_lock();
if (page_aligned) if (page_aligned)
rc = dax_zero_page_range(iomap->dax_dev, pgoff, rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
size >> PAGE_SHIFT);
else else
rc = dax_direct_access(iomap->dax_dev, pgoff, 1, &kaddr, NULL); rc = dax_direct_access(iomap->dax_dev, pgoff, 1, &kaddr, NULL);
if (rc < 0) { if (rc < 0) {
@ -1072,7 +1071,7 @@ int dax_iomap_zero(loff_t pos, unsigned offset, unsigned size,
dax_flush(iomap->dax_dev, kaddr + offset, size); dax_flush(iomap->dax_dev, kaddr + offset, size);
} }
dax_read_unlock(id); dax_read_unlock(id);
return 0; return size;
} }
static loff_t static loff_t

View file

@ -898,11 +898,13 @@ iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
} }
EXPORT_SYMBOL_GPL(iomap_file_unshare); EXPORT_SYMBOL_GPL(iomap_file_unshare);
static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset, static s64 iomap_zero(struct inode *inode, loff_t pos, u64 length,
unsigned bytes, struct iomap *iomap, struct iomap *srcmap) struct iomap *iomap, struct iomap *srcmap)
{ {
struct page *page; struct page *page;
int status; int status;
unsigned offset = offset_in_page(pos);
unsigned bytes = min_t(u64, PAGE_SIZE - offset, length);
status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap, srcmap); status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap, srcmap);
if (status) if (status)
@ -914,38 +916,33 @@ static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap); return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap);
} }
static loff_t static loff_t iomap_zero_range_actor(struct inode *inode, loff_t pos,
iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count, loff_t length, void *data, struct iomap *iomap,
void *data, struct iomap *iomap, struct iomap *srcmap) struct iomap *srcmap)
{ {
bool *did_zero = data; bool *did_zero = data;
loff_t written = 0; loff_t written = 0;
int status;
/* already zeroed? we're done. */ /* already zeroed? we're done. */
if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
return count; return length;
do { do {
unsigned offset, bytes; s64 bytes;
offset = offset_in_page(pos);
bytes = min_t(loff_t, PAGE_SIZE - offset, count);
if (IS_DAX(inode)) if (IS_DAX(inode))
status = dax_iomap_zero(pos, offset, bytes, iomap); bytes = dax_iomap_zero(pos, length, iomap);
else else
status = iomap_zero(inode, pos, offset, bytes, iomap, bytes = iomap_zero(inode, pos, length, iomap, srcmap);
srcmap); if (bytes < 0)
if (status < 0) return bytes;
return status;
pos += bytes; pos += bytes;
count -= bytes; length -= bytes;
written += bytes; written += bytes;
if (did_zero) if (did_zero)
*did_zero = true; *did_zero = true;
} while (count > 0); } while (length > 0);
return written; return written;
} }

View file

@ -214,8 +214,7 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping, int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index); pgoff_t index);
int dax_iomap_zero(loff_t pos, unsigned offset, unsigned size, s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap);
struct iomap *iomap);
static inline bool dax_mapping(struct address_space *mapping) static inline bool dax_mapping(struct address_space *mapping)
{ {
return mapping->host && IS_DAX(mapping->host); return mapping->host && IS_DAX(mapping->host);