Lines Matching refs:bio

66 	unsigned int sz = sizeof(struct bio) + extra_size;  in bio_find_or_create_slab()
235 void bio_uninit(struct bio *bio) in bio_uninit() argument
238 if (bio->bi_blkg) { in bio_uninit()
239 blkg_put(bio->bi_blkg); in bio_uninit()
240 bio->bi_blkg = NULL; in bio_uninit()
243 if (bio_integrity(bio)) in bio_uninit()
244 bio_integrity_free(bio); in bio_uninit()
246 bio_crypt_free_ctx(bio); in bio_uninit()
250 static void bio_free(struct bio *bio) in bio_free() argument
252 struct bio_set *bs = bio->bi_pool; in bio_free()
255 bio_uninit(bio); in bio_free()
258 bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio)); in bio_free()
263 p = bio; in bio_free()
269 kfree(bio); in bio_free()
278 void bio_init(struct bio *bio, struct bio_vec *table, in bio_init() argument
281 memset(bio, 0, sizeof(*bio)); in bio_init()
282 atomic_set(&bio->__bi_remaining, 1); in bio_init()
283 atomic_set(&bio->__bi_cnt, 1); in bio_init()
285 bio->bi_io_vec = table; in bio_init()
286 bio->bi_max_vecs = max_vecs; in bio_init()
300 void bio_reset(struct bio *bio) in bio_reset() argument
302 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); in bio_reset()
304 bio_uninit(bio); in bio_reset()
306 memset(bio, 0, BIO_RESET_BYTES); in bio_reset()
307 bio->bi_flags = flags; in bio_reset()
308 atomic_set(&bio->__bi_remaining, 1); in bio_reset()
312 static struct bio *__bio_chain_endio(struct bio *bio) in __bio_chain_endio() argument
314 struct bio *parent = bio->bi_private; in __bio_chain_endio()
316 if (bio->bi_status && !parent->bi_status) in __bio_chain_endio()
317 parent->bi_status = bio->bi_status; in __bio_chain_endio()
318 bio_put(bio); in __bio_chain_endio()
322 static void bio_chain_endio(struct bio *bio) in bio_chain_endio() argument
324 bio_endio(__bio_chain_endio(bio)); in bio_chain_endio()
338 void bio_chain(struct bio *bio, struct bio *parent) in bio_chain() argument
340 BUG_ON(bio->bi_private || bio->bi_end_io); in bio_chain()
342 bio->bi_private = parent; in bio_chain()
343 bio->bi_end_io = bio_chain_endio; in bio_chain()
351 struct bio *bio; in bio_alloc_rescue() local
355 bio = bio_list_pop(&bs->rescue_list); in bio_alloc_rescue()
358 if (!bio) in bio_alloc_rescue()
361 submit_bio_noacct(bio); in bio_alloc_rescue()
368 struct bio *bio; in punt_bios_to_rescuer() local
386 while ((bio = bio_list_pop(&current->bio_list[0]))) in punt_bios_to_rescuer()
387 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
391 while ((bio = bio_list_pop(&current->bio_list[1]))) in punt_bios_to_rescuer()
392 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
437 struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs, in bio_alloc_bioset()
444 struct bio *bio; in bio_alloc_bioset() local
451 p = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask); in bio_alloc_bioset()
500 bio = p + front_pad; in bio_alloc_bioset()
501 bio_init(bio, NULL, 0); in bio_alloc_bioset()
516 bio->bi_flags |= idx << BVEC_POOL_OFFSET; in bio_alloc_bioset()
518 bvl = bio->bi_inline_vecs; in bio_alloc_bioset()
521 bio->bi_pool = bs; in bio_alloc_bioset()
522 bio->bi_max_vecs = nr_iovecs; in bio_alloc_bioset()
523 bio->bi_io_vec = bvl; in bio_alloc_bioset()
524 return bio; in bio_alloc_bioset()
532 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start) in zero_fill_bio_iter() argument
538 __bio_for_each_segment(bv, bio, iter, start) { in zero_fill_bio_iter()
557 void bio_truncate(struct bio *bio, unsigned new_size) in bio_truncate() argument
564 if (new_size >= bio->bi_iter.bi_size) in bio_truncate()
567 if (bio_op(bio) != REQ_OP_READ) in bio_truncate()
570 bio_for_each_segment(bv, bio, iter) { in bio_truncate()
594 bio->bi_iter.bi_size = new_size; in bio_truncate()
609 void guard_bio_eod(struct bio *bio) in guard_bio_eod() argument
615 part = __disk_get_part(bio->bi_disk, bio->bi_partno); in guard_bio_eod()
619 maxsector = get_capacity(bio->bi_disk); in guard_bio_eod()
630 if (unlikely(bio->bi_iter.bi_sector >= maxsector)) in guard_bio_eod()
633 maxsector -= bio->bi_iter.bi_sector; in guard_bio_eod()
634 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) in guard_bio_eod()
637 bio_truncate(bio, maxsector << 9); in guard_bio_eod()
648 void bio_put(struct bio *bio) in bio_put() argument
650 if (!bio_flagged(bio, BIO_REFFED)) in bio_put()
651 bio_free(bio); in bio_put()
653 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt)); in bio_put()
658 if (atomic_dec_and_test(&bio->__bi_cnt)) in bio_put()
659 bio_free(bio); in bio_put()
675 void __bio_clone_fast(struct bio *bio, struct bio *bio_src) in __bio_clone_fast() argument
677 BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio)); in __bio_clone_fast()
683 bio->bi_disk = bio_src->bi_disk; in __bio_clone_fast()
684 bio->bi_partno = bio_src->bi_partno; in __bio_clone_fast()
685 bio_set_flag(bio, BIO_CLONED); in __bio_clone_fast()
687 bio_set_flag(bio, BIO_THROTTLED); in __bio_clone_fast()
688 bio->bi_opf = bio_src->bi_opf; in __bio_clone_fast()
689 bio->bi_ioprio = bio_src->bi_ioprio; in __bio_clone_fast()
690 bio->bi_write_hint = bio_src->bi_write_hint; in __bio_clone_fast()
691 bio->bi_iter = bio_src->bi_iter; in __bio_clone_fast()
692 bio->bi_io_vec = bio_src->bi_io_vec; in __bio_clone_fast()
694 bio_clone_blkg_association(bio, bio_src); in __bio_clone_fast()
695 blkcg_bio_issue_init(bio); in __bio_clone_fast()
707 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) in bio_clone_fast() argument
709 struct bio *b; in bio_clone_fast()
715 __bio_clone_fast(b, bio); in bio_clone_fast()
717 if (bio_crypt_clone(b, bio, gfp_mask) < 0) in bio_clone_fast()
720 if (bio_integrity(bio) && in bio_clone_fast()
721 bio_integrity_clone(b, bio, gfp_mask) < 0) in bio_clone_fast()
732 const char *bio_devname(struct bio *bio, char *buf) in bio_devname() argument
734 return disk_name(bio->bi_disk, bio->bi_partno, buf); in bio_devname()
762 static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio, in bio_try_merge_hw_seg() argument
766 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; in bio_try_merge_hw_seg()
775 return __bio_try_merge_page(bio, page, len, offset, same_page); in bio_try_merge_hw_seg()
791 int bio_add_hw_page(struct request_queue *q, struct bio *bio, in bio_add_hw_page() argument
797 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) in bio_add_hw_page()
800 if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors) in bio_add_hw_page()
803 if (bio->bi_vcnt > 0) { in bio_add_hw_page()
804 if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page)) in bio_add_hw_page()
811 bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; in bio_add_hw_page()
816 if (bio_full(bio, len)) in bio_add_hw_page()
819 if (bio->bi_vcnt >= queue_max_segments(q)) in bio_add_hw_page()
822 bvec = &bio->bi_io_vec[bio->bi_vcnt]; in bio_add_hw_page()
826 bio->bi_vcnt++; in bio_add_hw_page()
827 bio->bi_iter.bi_size += len; in bio_add_hw_page()
846 int bio_add_pc_page(struct request_queue *q, struct bio *bio, in bio_add_pc_page() argument
850 return bio_add_hw_page(q, bio, page, len, offset, in bio_add_pc_page()
871 bool __bio_try_merge_page(struct bio *bio, struct page *page, in __bio_try_merge_page() argument
874 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) in __bio_try_merge_page()
877 if (bio->bi_vcnt > 0) { in __bio_try_merge_page()
878 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; in __bio_try_merge_page()
881 if (bio->bi_iter.bi_size > UINT_MAX - len) { in __bio_try_merge_page()
886 bio->bi_iter.bi_size += len; in __bio_try_merge_page()
904 void __bio_add_page(struct bio *bio, struct page *page, in __bio_add_page() argument
907 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt]; in __bio_add_page()
909 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); in __bio_add_page()
910 WARN_ON_ONCE(bio_full(bio, len)); in __bio_add_page()
916 bio->bi_iter.bi_size += len; in __bio_add_page()
917 bio->bi_vcnt++; in __bio_add_page()
919 if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page))) in __bio_add_page()
920 bio_set_flag(bio, BIO_WORKINGSET); in __bio_add_page()
934 int bio_add_page(struct bio *bio, struct page *page, in bio_add_page() argument
939 if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) { in bio_add_page()
940 if (bio_full(bio, len)) in bio_add_page()
942 __bio_add_page(bio, page, len, offset); in bio_add_page()
948 void bio_release_pages(struct bio *bio, bool mark_dirty) in bio_release_pages() argument
953 if (bio_flagged(bio, BIO_NO_PAGE_REF)) in bio_release_pages()
956 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_release_pages()
964 static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter) in __bio_iov_bvec_add_pages() argument
974 size = bio_add_page(bio, bv->bv_page, len, in __bio_iov_bvec_add_pages()
1002 static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) in __bio_iov_iter_get_pages() argument
1004 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; in __bio_iov_iter_get_pages()
1005 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; in __bio_iov_iter_get_pages()
1006 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; in __bio_iov_iter_get_pages()
1030 if (__bio_try_merge_page(bio, page, len, offset, &same_page)) { in __bio_iov_iter_get_pages()
1034 if (WARN_ON_ONCE(bio_full(bio, len))) { in __bio_iov_iter_get_pages()
1038 __bio_add_page(bio, page, len, offset); in __bio_iov_iter_get_pages()
1047 static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter) in __bio_iov_append_get_pages() argument
1049 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; in __bio_iov_append_get_pages()
1050 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; in __bio_iov_append_get_pages()
1051 struct request_queue *q = bio->bi_disk->queue; in __bio_iov_append_get_pages()
1053 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; in __bio_iov_append_get_pages()
1077 if (bio_add_hw_page(q, bio, page, len, offset, in __bio_iov_append_get_pages()
1112 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) in bio_iov_iter_get_pages() argument
1117 if (WARN_ON_ONCE(bio->bi_vcnt)) in bio_iov_iter_get_pages()
1121 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { in bio_iov_iter_get_pages()
1124 ret = __bio_iov_append_get_pages(bio, iter); in bio_iov_iter_get_pages()
1127 ret = __bio_iov_bvec_add_pages(bio, iter); in bio_iov_iter_get_pages()
1129 ret = __bio_iov_iter_get_pages(bio, iter); in bio_iov_iter_get_pages()
1131 } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0)); in bio_iov_iter_get_pages()
1134 bio_set_flag(bio, BIO_NO_PAGE_REF); in bio_iov_iter_get_pages()
1135 return bio->bi_vcnt ? 0 : ret; in bio_iov_iter_get_pages()
1139 static void submit_bio_wait_endio(struct bio *bio) in submit_bio_wait_endio() argument
1141 complete(bio->bi_private); in submit_bio_wait_endio()
1155 int submit_bio_wait(struct bio *bio) in submit_bio_wait() argument
1157 DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map); in submit_bio_wait()
1160 bio->bi_private = &done; in submit_bio_wait()
1161 bio->bi_end_io = submit_bio_wait_endio; in submit_bio_wait()
1162 bio->bi_opf |= REQ_SYNC; in submit_bio_wait()
1163 submit_bio(bio); in submit_bio_wait()
1174 return blk_status_to_errno(bio->bi_status); in submit_bio_wait()
1189 void bio_advance(struct bio *bio, unsigned bytes) in bio_advance() argument
1191 if (bio_integrity(bio)) in bio_advance()
1192 bio_integrity_advance(bio, bytes); in bio_advance()
1194 bio_crypt_advance(bio, bytes); in bio_advance()
1195 bio_advance_iter(bio, &bio->bi_iter, bytes); in bio_advance()
1199 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, in bio_copy_data_iter()
1200 struct bio *src, struct bvec_iter *src_iter) in bio_copy_data_iter()
1238 void bio_copy_data(struct bio *dst, struct bio *src) in bio_copy_data()
1257 void bio_list_copy_data(struct bio *dst, struct bio *src) in bio_list_copy_data()
1284 void bio_free_pages(struct bio *bio) in bio_free_pages() argument
1289 bio_for_each_segment_all(bvec, bio, iter_all) in bio_free_pages()
1323 void bio_set_pages_dirty(struct bio *bio) in bio_set_pages_dirty() argument
1328 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_set_pages_dirty()
1349 static struct bio *bio_dirty_list;
1356 struct bio *bio, *next; in bio_dirty_fn() local
1363 while ((bio = next) != NULL) { in bio_dirty_fn()
1364 next = bio->bi_private; in bio_dirty_fn()
1366 bio_release_pages(bio, true); in bio_dirty_fn()
1367 bio_put(bio); in bio_dirty_fn()
1371 void bio_check_pages_dirty(struct bio *bio) in bio_check_pages_dirty() argument
1377 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_check_pages_dirty()
1382 bio_release_pages(bio, false); in bio_check_pages_dirty()
1383 bio_put(bio); in bio_check_pages_dirty()
1387 bio->bi_private = bio_dirty_list; in bio_check_pages_dirty()
1388 bio_dirty_list = bio; in bio_check_pages_dirty()
1393 static inline bool bio_remaining_done(struct bio *bio) in bio_remaining_done() argument
1399 if (!bio_flagged(bio, BIO_CHAIN)) in bio_remaining_done()
1402 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); in bio_remaining_done()
1404 if (atomic_dec_and_test(&bio->__bi_remaining)) { in bio_remaining_done()
1405 bio_clear_flag(bio, BIO_CHAIN); in bio_remaining_done()
1426 void bio_endio(struct bio *bio) in bio_endio() argument
1429 if (!bio_remaining_done(bio)) in bio_endio()
1431 if (!bio_integrity_endio(bio)) in bio_endio()
1434 if (bio->bi_disk) in bio_endio()
1435 rq_qos_done_bio(bio->bi_disk->queue, bio); in bio_endio()
1445 if (bio->bi_end_io == bio_chain_endio) { in bio_endio()
1446 bio = __bio_chain_endio(bio); in bio_endio()
1450 if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) { in bio_endio()
1451 trace_block_bio_complete(bio->bi_disk->queue, bio); in bio_endio()
1452 bio_clear_flag(bio, BIO_TRACE_COMPLETION); in bio_endio()
1455 blk_throtl_bio_endio(bio); in bio_endio()
1457 bio_uninit(bio); in bio_endio()
1458 if (bio->bi_end_io) in bio_endio()
1459 bio->bi_end_io(bio); in bio_endio()
1477 struct bio *bio_split(struct bio *bio, int sectors, in bio_split() argument
1480 struct bio *split; in bio_split()
1483 BUG_ON(sectors >= bio_sectors(bio)); in bio_split()
1486 if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) in bio_split()
1489 split = bio_clone_fast(bio, gfp, bs); in bio_split()
1498 bio_advance(bio, split->bi_iter.bi_size); in bio_split()
1500 if (bio_flagged(bio, BIO_TRACE_COMPLETION)) in bio_split()
1513 void bio_trim(struct bio *bio, int offset, int size) in bio_trim() argument
1520 if (offset == 0 && size == bio->bi_iter.bi_size) in bio_trim()
1523 bio_advance(bio, offset << 9); in bio_trim()
1524 bio->bi_iter.bi_size = size; in bio_trim()
1526 if (bio_integrity(bio)) in bio_trim()
1527 bio_integrity_trim(bio); in bio_trim()