mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-24 17:23:25 -05:00
block: convert to bio_first_bvec_all & bio_first_page_all
This patch converts to bio_first_bvec_all() & bio_first_page_all() for retrieving the 1st bvec/page, and prepares for supporting multipage bvec. Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
86292abc5a
commit
263663cd3c
8 changed files with 13 additions and 13 deletions
|
@ -953,7 +953,7 @@ static void drbd_bm_endio(struct bio *bio)
|
||||||
struct drbd_bm_aio_ctx *ctx = bio->bi_private;
|
struct drbd_bm_aio_ctx *ctx = bio->bi_private;
|
||||||
struct drbd_device *device = ctx->device;
|
struct drbd_device *device = ctx->device;
|
||||||
struct drbd_bitmap *b = device->bitmap;
|
struct drbd_bitmap *b = device->bitmap;
|
||||||
unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
|
unsigned int idx = bm_page_to_idx(bio_first_page_all(bio));
|
||||||
|
|
||||||
if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
|
if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
|
||||||
!bm_test_page_unchanged(b->bm_pages[idx]))
|
!bm_test_page_unchanged(b->bm_pages[idx]))
|
||||||
|
|
|
@ -430,7 +430,7 @@ static void put_entry_bdev(struct zram *zram, unsigned long entry)
|
||||||
|
|
||||||
static void zram_page_end_io(struct bio *bio)
|
static void zram_page_end_io(struct bio *bio)
|
||||||
{
|
{
|
||||||
struct page *page = bio->bi_io_vec[0].bv_page;
|
struct page *page = bio_first_page_all(bio);
|
||||||
|
|
||||||
page_endio(page, op_is_write(bio_op(bio)),
|
page_endio(page, op_is_write(bio_op(bio)),
|
||||||
blk_status_to_errno(bio->bi_status));
|
blk_status_to_errno(bio->bi_status));
|
||||||
|
|
|
@ -211,7 +211,7 @@ static void write_bdev_super_endio(struct bio *bio)
|
||||||
|
|
||||||
static void __write_super(struct cache_sb *sb, struct bio *bio)
|
static void __write_super(struct cache_sb *sb, struct bio *bio)
|
||||||
{
|
{
|
||||||
struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
|
struct cache_sb *out = page_address(bio_first_page_all(bio));
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
bio->bi_iter.bi_sector = SB_SECTOR;
|
bio->bi_iter.bi_sector = SB_SECTOR;
|
||||||
|
@ -1166,7 +1166,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
|
||||||
dc->bdev->bd_holder = dc;
|
dc->bdev->bd_holder = dc;
|
||||||
|
|
||||||
bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
|
bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
|
||||||
dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
|
bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page;
|
||||||
get_page(sb_page);
|
get_page(sb_page);
|
||||||
|
|
||||||
if (cached_dev_init(dc, sb->block_size << 9))
|
if (cached_dev_init(dc, sb->block_size << 9))
|
||||||
|
@ -1810,7 +1810,7 @@ void bch_cache_release(struct kobject *kobj)
|
||||||
free_fifo(&ca->free[i]);
|
free_fifo(&ca->free[i]);
|
||||||
|
|
||||||
if (ca->sb_bio.bi_inline_vecs[0].bv_page)
|
if (ca->sb_bio.bi_inline_vecs[0].bv_page)
|
||||||
put_page(ca->sb_bio.bi_io_vec[0].bv_page);
|
put_page(bio_first_page_all(&ca->sb_bio));
|
||||||
|
|
||||||
if (!IS_ERR_OR_NULL(ca->bdev))
|
if (!IS_ERR_OR_NULL(ca->bdev))
|
||||||
blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
|
blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
|
||||||
|
@ -1864,7 +1864,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
|
||||||
ca->bdev->bd_holder = ca;
|
ca->bdev->bd_holder = ca;
|
||||||
|
|
||||||
bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
|
bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
|
||||||
ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
|
bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;
|
||||||
get_page(sb_page);
|
get_page(sb_page);
|
||||||
|
|
||||||
if (blk_queue_discard(bdev_get_queue(ca->bdev)))
|
if (blk_queue_discard(bdev_get_queue(ca->bdev)))
|
||||||
|
|
|
@ -563,7 +563,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
||||||
/* we need the actual starting offset of this extent in the file */
|
/* we need the actual starting offset of this extent in the file */
|
||||||
read_lock(&em_tree->lock);
|
read_lock(&em_tree->lock);
|
||||||
em = lookup_extent_mapping(em_tree,
|
em = lookup_extent_mapping(em_tree,
|
||||||
page_offset(bio->bi_io_vec->bv_page),
|
page_offset(bio_first_page_all(bio)),
|
||||||
PAGE_SIZE);
|
PAGE_SIZE);
|
||||||
read_unlock(&em_tree->lock);
|
read_unlock(&em_tree->lock);
|
||||||
if (!em)
|
if (!em)
|
||||||
|
|
|
@ -8074,7 +8074,7 @@ static void btrfs_retry_endio_nocsum(struct bio *bio)
|
||||||
ASSERT(bio->bi_vcnt == 1);
|
ASSERT(bio->bi_vcnt == 1);
|
||||||
io_tree = &BTRFS_I(inode)->io_tree;
|
io_tree = &BTRFS_I(inode)->io_tree;
|
||||||
failure_tree = &BTRFS_I(inode)->io_failure_tree;
|
failure_tree = &BTRFS_I(inode)->io_failure_tree;
|
||||||
ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
|
ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(inode));
|
||||||
|
|
||||||
done->uptodate = 1;
|
done->uptodate = 1;
|
||||||
ASSERT(!bio_flagged(bio, BIO_CLONED));
|
ASSERT(!bio_flagged(bio, BIO_CLONED));
|
||||||
|
@ -8164,7 +8164,7 @@ static void btrfs_retry_endio(struct bio *bio)
|
||||||
uptodate = 1;
|
uptodate = 1;
|
||||||
|
|
||||||
ASSERT(bio->bi_vcnt == 1);
|
ASSERT(bio->bi_vcnt == 1);
|
||||||
ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode));
|
ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(done->inode));
|
||||||
|
|
||||||
io_tree = &BTRFS_I(inode)->io_tree;
|
io_tree = &BTRFS_I(inode)->io_tree;
|
||||||
failure_tree = &BTRFS_I(inode)->io_failure_tree;
|
failure_tree = &BTRFS_I(inode)->io_failure_tree;
|
||||||
|
|
|
@ -56,7 +56,7 @@ static void f2fs_read_end_io(struct bio *bio)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
#ifdef CONFIG_F2FS_FAULT_INJECTION
|
#ifdef CONFIG_F2FS_FAULT_INJECTION
|
||||||
if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) {
|
if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)), FAULT_IO)) {
|
||||||
f2fs_show_injection_info(FAULT_IO);
|
f2fs_show_injection_info(FAULT_IO);
|
||||||
bio->bi_status = BLK_STS_IOERR;
|
bio->bi_status = BLK_STS_IOERR;
|
||||||
}
|
}
|
||||||
|
|
|
@ -240,7 +240,7 @@ static void hib_init_batch(struct hib_bio_batch *hb)
|
||||||
static void hib_end_io(struct bio *bio)
|
static void hib_end_io(struct bio *bio)
|
||||||
{
|
{
|
||||||
struct hib_bio_batch *hb = bio->bi_private;
|
struct hib_bio_batch *hb = bio->bi_private;
|
||||||
struct page *page = bio->bi_io_vec[0].bv_page;
|
struct page *page = bio_first_page_all(bio);
|
||||||
|
|
||||||
if (bio->bi_status) {
|
if (bio->bi_status) {
|
||||||
pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
|
pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
|
||||||
|
|
|
@ -50,7 +50,7 @@ static struct bio *get_swap_bio(gfp_t gfp_flags,
|
||||||
|
|
||||||
void end_swap_bio_write(struct bio *bio)
|
void end_swap_bio_write(struct bio *bio)
|
||||||
{
|
{
|
||||||
struct page *page = bio->bi_io_vec[0].bv_page;
|
struct page *page = bio_first_page_all(bio);
|
||||||
|
|
||||||
if (bio->bi_status) {
|
if (bio->bi_status) {
|
||||||
SetPageError(page);
|
SetPageError(page);
|
||||||
|
@ -122,7 +122,7 @@ static void swap_slot_free_notify(struct page *page)
|
||||||
|
|
||||||
static void end_swap_bio_read(struct bio *bio)
|
static void end_swap_bio_read(struct bio *bio)
|
||||||
{
|
{
|
||||||
struct page *page = bio->bi_io_vec[0].bv_page;
|
struct page *page = bio_first_page_all(bio);
|
||||||
struct task_struct *waiter = bio->bi_private;
|
struct task_struct *waiter = bio->bi_private;
|
||||||
|
|
||||||
if (bio->bi_status) {
|
if (bio->bi_status) {
|
||||||
|
|
Loading…
Add table
Reference in a new issue