Lines Matching refs:fio

413 static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)  in __bio_alloc()  argument
415 struct f2fs_sb_info *sbi = fio->sbi; in __bio_alloc()
420 f2fs_target_device(sbi, fio->new_blkaddr, bio); in __bio_alloc()
421 if (is_read_io(fio->op)) { in __bio_alloc()
428 fio->type, fio->temp); in __bio_alloc()
430 if (fio->io_wbc) in __bio_alloc()
431 wbc_init_bio(fio->io_wbc, bio); in __bio_alloc()
438 const struct f2fs_io_info *fio, in f2fs_set_bio_crypt_ctx() argument
445 if (!fio || !fio->encrypted_page) in f2fs_set_bio_crypt_ctx()
453 const struct f2fs_io_info *fio) in f2fs_crypt_mergeable_bio() argument
459 if (fio && fio->encrypted_page) in f2fs_crypt_mergeable_bio()
524 static void __attach_io_flag(struct f2fs_io_info *fio) in __attach_io_flag() argument
526 struct f2fs_sb_info *sbi = fio->sbi; in __attach_io_flag()
530 if (fio->type == DATA) in __attach_io_flag()
532 else if (fio->type == NODE) in __attach_io_flag()
546 if ((1 << fio->temp) & meta_flag) in __attach_io_flag()
547 fio->op_flags |= REQ_META; in __attach_io_flag()
548 if ((1 << fio->temp) & fua_flag) in __attach_io_flag()
549 fio->op_flags |= REQ_FUA; in __attach_io_flag()
554 struct f2fs_io_info *fio = &io->fio; in __submit_merged_bio() local
559 __attach_io_flag(fio); in __submit_merged_bio()
560 bio_set_op_attrs(io->bio, fio->op, fio->op_flags); in __submit_merged_bio()
562 if (is_read_io(fio->op)) in __submit_merged_bio()
563 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio); in __submit_merged_bio()
565 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio); in __submit_merged_bio()
567 __submit_bio(io->sbi, io->bio, fio->type); in __submit_merged_bio()
618 io->fio.type = META_FLUSH; in __f2fs_submit_merged_write()
619 io->fio.op = REQ_OP_WRITE; in __f2fs_submit_merged_write()
620 io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC; in __f2fs_submit_merged_write()
622 io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA; in __f2fs_submit_merged_write()
676 int f2fs_submit_page_bio(struct f2fs_io_info *fio) in f2fs_submit_page_bio() argument
679 struct page *page = fio->encrypted_page ? in f2fs_submit_page_bio()
680 fio->encrypted_page : fio->page; in f2fs_submit_page_bio()
682 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, in f2fs_submit_page_bio()
683 fio->is_por ? META_POR : (__is_meta_io(fio) ? in f2fs_submit_page_bio()
687 trace_f2fs_submit_page_bio(page, fio); in f2fs_submit_page_bio()
690 bio = __bio_alloc(fio, 1); in f2fs_submit_page_bio()
692 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host, in f2fs_submit_page_bio()
693 fio->page->index, fio, GFP_NOIO); in f2fs_submit_page_bio()
700 if (fio->io_wbc && !is_read_io(fio->op)) in f2fs_submit_page_bio()
701 wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE); in f2fs_submit_page_bio()
703 __attach_io_flag(fio); in f2fs_submit_page_bio()
704 bio_set_op_attrs(bio, fio->op, fio->op_flags); in f2fs_submit_page_bio()
706 inc_page_count(fio->sbi, is_read_io(fio->op) ? in f2fs_submit_page_bio()
707 __read_io_type(page): WB_DATA_TYPE(fio->page)); in f2fs_submit_page_bio()
709 __submit_bio(fio->sbi, bio, fio->type); in f2fs_submit_page_bio()
725 struct f2fs_io_info *fio) in io_type_is_mergeable() argument
727 if (io->fio.op != fio->op) in io_type_is_mergeable()
729 return io->fio.op_flags == fio->op_flags; in io_type_is_mergeable()
734 struct f2fs_io_info *fio, in io_is_mergeable() argument
738 if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) { in io_is_mergeable()
750 return io_type_is_mergeable(io, fio); in io_is_mergeable()
777 static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio, in add_ipu_page() argument
780 struct f2fs_sb_info *sbi = fio->sbi; in add_ipu_page()
798 *fio->last_block, in add_ipu_page()
799 fio->new_blkaddr)); in add_ipu_page()
801 fio->page->mapping->host, in add_ipu_page()
802 fio->page->index, fio) && in add_ipu_page()
881 int f2fs_merge_page_bio(struct f2fs_io_info *fio) in f2fs_merge_page_bio() argument
883 struct bio *bio = *fio->bio; in f2fs_merge_page_bio()
884 struct page *page = fio->encrypted_page ? in f2fs_merge_page_bio()
885 fio->encrypted_page : fio->page; in f2fs_merge_page_bio()
887 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, in f2fs_merge_page_bio()
888 __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC)) in f2fs_merge_page_bio()
891 trace_f2fs_submit_page_bio(page, fio); in f2fs_merge_page_bio()
893 if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block, in f2fs_merge_page_bio()
894 fio->new_blkaddr)) in f2fs_merge_page_bio()
895 f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL); in f2fs_merge_page_bio()
898 bio = __bio_alloc(fio, BIO_MAX_PAGES); in f2fs_merge_page_bio()
899 __attach_io_flag(fio); in f2fs_merge_page_bio()
900 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host, in f2fs_merge_page_bio()
901 fio->page->index, fio, GFP_NOIO); in f2fs_merge_page_bio()
902 bio_set_op_attrs(bio, fio->op, fio->op_flags); in f2fs_merge_page_bio()
904 add_bio_entry(fio->sbi, bio, page, fio->temp); in f2fs_merge_page_bio()
906 if (add_ipu_page(fio, &bio, page)) in f2fs_merge_page_bio()
910 if (fio->io_wbc) in f2fs_merge_page_bio()
911 wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE); in f2fs_merge_page_bio()
913 inc_page_count(fio->sbi, WB_DATA_TYPE(page)); in f2fs_merge_page_bio()
915 *fio->last_block = fio->new_blkaddr; in f2fs_merge_page_bio()
916 *fio->bio = bio; in f2fs_merge_page_bio()
921 void f2fs_submit_page_write(struct f2fs_io_info *fio) in f2fs_submit_page_write() argument
923 struct f2fs_sb_info *sbi = fio->sbi; in f2fs_submit_page_write()
924 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); in f2fs_submit_page_write()
925 struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp; in f2fs_submit_page_write()
928 f2fs_bug_on(sbi, is_read_io(fio->op)); in f2fs_submit_page_write()
932 if (fio->in_list) { in f2fs_submit_page_write()
938 fio = list_first_entry(&io->io_list, in f2fs_submit_page_write()
940 list_del(&fio->list); in f2fs_submit_page_write()
944 verify_fio_blkaddr(fio); in f2fs_submit_page_write()
946 if (fio->encrypted_page) in f2fs_submit_page_write()
947 bio_page = fio->encrypted_page; in f2fs_submit_page_write()
948 else if (fio->compressed_page) in f2fs_submit_page_write()
949 bio_page = fio->compressed_page; in f2fs_submit_page_write()
951 bio_page = fio->page; in f2fs_submit_page_write()
954 fio->submitted = true; in f2fs_submit_page_write()
959 (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio, in f2fs_submit_page_write()
960 fio->new_blkaddr) || in f2fs_submit_page_write()
961 !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host, in f2fs_submit_page_write()
962 bio_page->index, fio))) in f2fs_submit_page_write()
967 (fio->type == DATA || fio->type == NODE) && in f2fs_submit_page_write()
968 fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) { in f2fs_submit_page_write()
970 fio->retry = true; in f2fs_submit_page_write()
973 io->bio = __bio_alloc(fio, BIO_MAX_PAGES); in f2fs_submit_page_write()
974 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host, in f2fs_submit_page_write()
975 bio_page->index, fio, GFP_NOIO); in f2fs_submit_page_write()
976 io->fio = *fio; in f2fs_submit_page_write()
984 if (fio->io_wbc) in f2fs_submit_page_write()
985 wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE); in f2fs_submit_page_write()
987 io->last_block_in_bio = fio->new_blkaddr; in f2fs_submit_page_write()
989 trace_f2fs_submit_page_write(fio->page, fio); in f2fs_submit_page_write()
991 if (fio->in_list) in f2fs_submit_page_write()
2469 int f2fs_encrypt_one_page(struct f2fs_io_info *fio) in f2fs_encrypt_one_page() argument
2471 struct inode *inode = fio->page->mapping->host; in f2fs_encrypt_one_page()
2478 page = fio->compressed_page ? fio->compressed_page : fio->page; in f2fs_encrypt_one_page()
2481 f2fs_wait_on_block_writeback(inode, fio->old_blkaddr); in f2fs_encrypt_one_page()
2487 fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page, in f2fs_encrypt_one_page()
2489 if (IS_ERR(fio->encrypted_page)) { in f2fs_encrypt_one_page()
2491 if (PTR_ERR(fio->encrypted_page) == -ENOMEM) { in f2fs_encrypt_one_page()
2492 f2fs_flush_merged_writes(fio->sbi); in f2fs_encrypt_one_page()
2497 return PTR_ERR(fio->encrypted_page); in f2fs_encrypt_one_page()
2500 mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr); in f2fs_encrypt_one_page()
2504 page_address(fio->encrypted_page), PAGE_SIZE); in f2fs_encrypt_one_page()
2511 struct f2fs_io_info *fio) in check_inplace_update_policy() argument
2534 fio && fio->op == REQ_OP_WRITE && in check_inplace_update_policy()
2535 !(fio->op_flags & REQ_SYNC) && in check_inplace_update_policy()
2544 if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) && in check_inplace_update_policy()
2545 !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr))) in check_inplace_update_policy()
2551 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio) in f2fs_should_update_inplace() argument
2564 return check_inplace_update_policy(inode, fio); in f2fs_should_update_inplace()
2567 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio) in f2fs_should_update_outplace() argument
2574 if (fio && is_sbi_flag_set(sbi, SBI_NEED_FSCK)) in f2fs_should_update_outplace()
2592 if (fio) { in f2fs_should_update_outplace()
2593 if (page_private_gcing(fio->page)) in f2fs_should_update_outplace()
2595 if (page_private_dummy(fio->page)) in f2fs_should_update_outplace()
2598 f2fs_is_checkpointed_data(sbi, fio->old_blkaddr))) in f2fs_should_update_outplace()
2604 static inline bool need_inplace_update(struct f2fs_io_info *fio) in need_inplace_update() argument
2606 struct inode *inode = fio->page->mapping->host; in need_inplace_update()
2608 if (f2fs_should_update_outplace(inode, fio)) in need_inplace_update()
2611 return f2fs_should_update_inplace(inode, fio); in need_inplace_update()
2614 int f2fs_do_write_data_page(struct f2fs_io_info *fio) in f2fs_do_write_data_page() argument
2616 struct page *page = fio->page; in f2fs_do_write_data_page()
2625 if (need_inplace_update(fio) && in f2fs_do_write_data_page()
2627 fio->old_blkaddr = ei.blk + page->index - ei.fofs; in f2fs_do_write_data_page()
2629 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr, in f2fs_do_write_data_page()
2634 fio->need_lock = LOCK_DONE; in f2fs_do_write_data_page()
2639 if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi)) in f2fs_do_write_data_page()
2646 fio->old_blkaddr = dn.data_blkaddr; in f2fs_do_write_data_page()
2649 if (fio->old_blkaddr == NULL_ADDR) { in f2fs_do_write_data_page()
2655 if (__is_valid_data_blkaddr(fio->old_blkaddr) && in f2fs_do_write_data_page()
2656 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr, in f2fs_do_write_data_page()
2666 (__is_valid_data_blkaddr(fio->old_blkaddr) && in f2fs_do_write_data_page()
2667 need_inplace_update(fio))) { in f2fs_do_write_data_page()
2668 err = f2fs_encrypt_one_page(fio); in f2fs_do_write_data_page()
2675 if (fio->need_lock == LOCK_REQ) in f2fs_do_write_data_page()
2676 f2fs_unlock_op(fio->sbi); in f2fs_do_write_data_page()
2677 err = f2fs_inplace_write_data(fio); in f2fs_do_write_data_page()
2680 fscrypt_finalize_bounce_page(&fio->encrypted_page); in f2fs_do_write_data_page()
2686 trace_f2fs_do_write_data_page(fio->page, IPU); in f2fs_do_write_data_page()
2690 if (fio->need_lock == LOCK_RETRY) { in f2fs_do_write_data_page()
2691 if (!f2fs_trylock_op(fio->sbi)) { in f2fs_do_write_data_page()
2695 fio->need_lock = LOCK_REQ; in f2fs_do_write_data_page()
2698 err = f2fs_get_node_info(fio->sbi, dn.nid, &ni, false); in f2fs_do_write_data_page()
2702 fio->version = ni.version; in f2fs_do_write_data_page()
2704 err = f2fs_encrypt_one_page(fio); in f2fs_do_write_data_page()
2711 if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR) in f2fs_do_write_data_page()
2712 f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false); in f2fs_do_write_data_page()
2715 f2fs_outplace_write_data(&dn, fio); in f2fs_do_write_data_page()
2723 if (fio->need_lock == LOCK_REQ) in f2fs_do_write_data_page()
2724 f2fs_unlock_op(fio->sbi); in f2fs_do_write_data_page()
2745 struct f2fs_io_info fio = { in f2fs_write_single_data_page() local
2814 fio.need_lock = LOCK_DONE; in f2fs_write_single_data_page()
2815 err = f2fs_do_write_data_page(&fio); in f2fs_write_single_data_page()
2838 err = f2fs_do_write_data_page(&fio); in f2fs_write_single_data_page()
2840 fio.need_lock = LOCK_REQ; in f2fs_write_single_data_page()
2841 err = f2fs_do_write_data_page(&fio); in f2fs_write_single_data_page()
2883 *submitted = fio.submitted ? 1 : 0; in f2fs_write_single_data_page()