Lines Matching refs:bio
20 struct request *prev_rq, struct bio *prev, struct bio *next) in bio_will_gap()
33 bio_get_first_bvec(prev_rq->bio, &pb); in bio_will_gap()
55 static inline bool req_gap_back_merge(struct request *req, struct bio *bio) in req_gap_back_merge() argument
57 return bio_will_gap(req->q, req, req->biotail, bio); in req_gap_back_merge()
60 static inline bool req_gap_front_merge(struct request *req, struct bio *bio) in req_gap_front_merge() argument
62 return bio_will_gap(req->q, NULL, bio, req->bio); in req_gap_front_merge()
65 static struct bio *blk_bio_discard_split(struct request_queue *q, in blk_bio_discard_split()
66 struct bio *bio, in blk_bio_discard_split() argument
89 if (bio_sectors(bio) <= max_discard_sectors) in blk_bio_discard_split()
100 tmp = bio->bi_iter.bi_sector + split_sectors - alignment; in blk_bio_discard_split()
106 return bio_split(bio, split_sectors, GFP_NOIO, bs); in blk_bio_discard_split()
109 static struct bio *blk_bio_write_zeroes_split(struct request_queue *q, in blk_bio_write_zeroes_split()
110 struct bio *bio, struct bio_set *bs, unsigned *nsegs) in blk_bio_write_zeroes_split() argument
117 if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors) in blk_bio_write_zeroes_split()
120 return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs); in blk_bio_write_zeroes_split()
123 static struct bio *blk_bio_write_same_split(struct request_queue *q, in blk_bio_write_same_split()
124 struct bio *bio, in blk_bio_write_same_split() argument
133 if (bio_sectors(bio) <= q->limits.max_write_same_sectors) in blk_bio_write_same_split()
136 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); in blk_bio_write_same_split()
148 struct bio *bio) in get_max_io_size() argument
150 unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0); in get_max_io_size()
154 unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1); in get_max_io_size()
248 static struct bio *blk_bio_segment_split(struct request_queue *q, in blk_bio_segment_split()
249 struct bio *bio, in blk_bio_segment_split() argument
256 const unsigned max_sectors = get_max_io_size(q, bio); in blk_bio_segment_split()
259 bio_for_each_bvec(bv, bio, iter) { in blk_bio_segment_split()
285 return bio_split(bio, sectors, GFP_NOIO, bs); in blk_bio_segment_split()
301 void __blk_queue_split(struct bio **bio, unsigned int *nr_segs) in __blk_queue_split() argument
303 struct request_queue *q = (*bio)->bi_disk->queue; in __blk_queue_split()
304 struct bio *split = NULL; in __blk_queue_split()
306 switch (bio_op(*bio)) { in __blk_queue_split()
309 split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs); in __blk_queue_split()
312 split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, in __blk_queue_split()
316 split = blk_bio_write_same_split(q, *bio, &q->bio_split, in __blk_queue_split()
329 (*bio)->bi_vcnt == 1 && in __blk_queue_split()
330 ((*bio)->bi_io_vec[0].bv_len + in __blk_queue_split()
331 (*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) { in __blk_queue_split()
335 split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs); in __blk_queue_split()
343 bio_chain(split, *bio); in __blk_queue_split()
344 trace_block_split(q, split, (*bio)->bi_iter.bi_sector); in __blk_queue_split()
345 submit_bio_noacct(*bio); in __blk_queue_split()
346 *bio = split; in __blk_queue_split()
348 blk_throtl_charge_bio_split(*bio); in __blk_queue_split()
362 void blk_queue_split(struct bio **bio) in blk_queue_split() argument
366 __blk_queue_split(bio, &nr_segs); in blk_queue_split()
377 if (!rq->bio) in blk_recalc_rq_segments()
380 switch (bio_op(rq->bio)) { in blk_recalc_rq_segments()
384 struct bio *bio = rq->bio; in blk_recalc_rq_segments() local
386 for_each_bio(bio) in blk_recalc_rq_segments()
485 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, in __blk_bios_map_sg() argument
494 for_each_bio(bio) { in __blk_bios_map_sg()
495 bio_for_each_bvec(bvec, bio, iter) { in __blk_bios_map_sg()
512 if (likely(bio->bi_iter.bi_size)) { in __blk_bios_map_sg()
532 else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME) in __blk_rq_map_sg()
533 nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg); in __blk_rq_map_sg()
534 else if (rq->bio) in __blk_rq_map_sg()
535 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg); in __blk_rq_map_sg()
557 static inline int ll_new_hw_segment(struct request *req, struct bio *bio, in ll_new_hw_segment() argument
560 if (!blk_cgroup_mergeable(req, bio)) in ll_new_hw_segment()
563 if (blk_integrity_merge_bio(req->q, req, bio) == false) in ll_new_hw_segment()
585 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) in ll_back_merge_fn() argument
587 if (req_gap_back_merge(req, bio)) in ll_back_merge_fn()
590 integrity_req_gap_back_merge(req, bio)) in ll_back_merge_fn()
592 if (!bio_crypt_ctx_back_mergeable(req, bio)) in ll_back_merge_fn()
594 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_back_merge_fn()
600 return ll_new_hw_segment(req, bio, nr_segs); in ll_back_merge_fn()
603 static int ll_front_merge_fn(struct request *req, struct bio *bio, in ll_front_merge_fn() argument
606 if (req_gap_front_merge(req, bio)) in ll_front_merge_fn()
609 integrity_req_gap_front_merge(req, bio)) in ll_front_merge_fn()
611 if (!bio_crypt_ctx_front_mergeable(req, bio)) in ll_front_merge_fn()
613 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_front_merge_fn()
614 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { in ll_front_merge_fn()
619 return ll_new_hw_segment(req, bio, nr_segs); in ll_front_merge_fn()
629 if (blk_rq_sectors(req) + bio_sectors(next->bio) > in req_attempt_discard_merge()
645 if (req_gap_back_merge(req, next->bio)) in ll_merge_requests_fn()
659 if (!blk_cgroup_mergeable(req, next->bio)) in ll_merge_requests_fn()
685 struct bio *bio; in blk_rq_set_mixed_merge() local
695 for (bio = rq->bio; bio; bio = bio->bi_next) { in blk_rq_set_mixed_merge()
696 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && in blk_rq_set_mixed_merge()
697 (bio->bi_opf & REQ_FAILFAST_MASK) != ff); in blk_rq_set_mixed_merge()
698 bio->bi_opf |= ff; in blk_rq_set_mixed_merge()
743 !blk_write_same_mergeable(req->bio, next->bio)) in attempt_merge()
798 req->biotail->bi_next = next->bio; in attempt_merge()
817 next->bio = NULL; in attempt_merge()
857 bool blk_rq_merge_ok(struct request *rq, struct bio *bio) in blk_rq_merge_ok() argument
859 if (!rq_mergeable(rq) || !bio_mergeable(bio)) in blk_rq_merge_ok()
862 if (req_op(rq) != bio_op(bio)) in blk_rq_merge_ok()
866 if (bio_data_dir(bio) != rq_data_dir(rq)) in blk_rq_merge_ok()
870 if (rq->rq_disk != bio->bi_disk) in blk_rq_merge_ok()
874 if (!blk_cgroup_mergeable(rq, bio)) in blk_rq_merge_ok()
878 if (blk_integrity_merge_bio(rq->q, rq, bio) == false) in blk_rq_merge_ok()
882 if (!bio_crypt_rq_ctx_compatible(rq, bio)) in blk_rq_merge_ok()
887 !blk_write_same_mergeable(rq->bio, bio)) in blk_rq_merge_ok()
894 if (rq->write_hint != bio->bi_write_hint) in blk_rq_merge_ok()
897 if (rq->ioprio != bio_prio(bio)) in blk_rq_merge_ok()
903 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) in blk_try_merge() argument
907 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) in blk_try_merge()
909 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) in blk_try_merge()
931 struct bio *bio, unsigned int nr_segs) in bio_attempt_back_merge() argument
933 const int ff = bio->bi_opf & REQ_FAILFAST_MASK; in bio_attempt_back_merge()
935 if (!ll_back_merge_fn(req, bio, nr_segs)) in bio_attempt_back_merge()
938 trace_block_bio_backmerge(req->q, req, bio); in bio_attempt_back_merge()
939 rq_qos_merge(req->q, req, bio); in bio_attempt_back_merge()
944 req->biotail->bi_next = bio; in bio_attempt_back_merge()
945 req->biotail = bio; in bio_attempt_back_merge()
946 req->__data_len += bio->bi_iter.bi_size; in bio_attempt_back_merge()
948 bio_crypt_free_ctx(bio); in bio_attempt_back_merge()
955 struct bio *bio, unsigned int nr_segs) in bio_attempt_front_merge() argument
957 const int ff = bio->bi_opf & REQ_FAILFAST_MASK; in bio_attempt_front_merge()
959 if (!ll_front_merge_fn(req, bio, nr_segs)) in bio_attempt_front_merge()
962 trace_block_bio_frontmerge(req->q, req, bio); in bio_attempt_front_merge()
963 rq_qos_merge(req->q, req, bio); in bio_attempt_front_merge()
968 bio->bi_next = req->bio; in bio_attempt_front_merge()
969 req->bio = bio; in bio_attempt_front_merge()
971 req->__sector = bio->bi_iter.bi_sector; in bio_attempt_front_merge()
972 req->__data_len += bio->bi_iter.bi_size; in bio_attempt_front_merge()
974 bio_crypt_do_front_merge(req, bio); in bio_attempt_front_merge()
981 struct request *req, struct bio *bio) in bio_attempt_discard_merge() argument
987 if (blk_rq_sectors(req) + bio_sectors(bio) > in bio_attempt_discard_merge()
991 rq_qos_merge(q, req, bio); in bio_attempt_discard_merge()
993 req->biotail->bi_next = bio; in bio_attempt_discard_merge()
994 req->biotail = bio; in bio_attempt_discard_merge()
995 req->__data_len += bio->bi_iter.bi_size; in bio_attempt_discard_merge()
1007 struct bio *bio, in blk_attempt_bio_merge() argument
1011 if (!blk_rq_merge_ok(rq, bio)) in blk_attempt_bio_merge()
1014 switch (blk_try_merge(rq, bio)) { in blk_attempt_bio_merge()
1016 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) in blk_attempt_bio_merge()
1017 return bio_attempt_back_merge(rq, bio, nr_segs); in blk_attempt_bio_merge()
1020 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) in blk_attempt_bio_merge()
1021 return bio_attempt_front_merge(rq, bio, nr_segs); in blk_attempt_bio_merge()
1024 return bio_attempt_discard_merge(q, rq, bio); in blk_attempt_bio_merge()
1054 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, in blk_attempt_plug_merge() argument
1061 plug = blk_mq_plug(q, bio); in blk_attempt_plug_merge()
1080 if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == in blk_attempt_plug_merge()
1093 struct bio *bio, unsigned int nr_segs) in blk_bio_list_merge() argument
1102 switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) { in blk_bio_list_merge()
1117 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, in blk_mq_sched_try_merge() argument
1122 switch (elv_merge(q, &rq, bio)) { in blk_mq_sched_try_merge()
1124 if (!blk_mq_sched_allow_merge(q, rq, bio)) in blk_mq_sched_try_merge()
1126 if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK) in blk_mq_sched_try_merge()
1133 if (!blk_mq_sched_allow_merge(q, rq, bio)) in blk_mq_sched_try_merge()
1135 if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK) in blk_mq_sched_try_merge()
1142 return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK; in blk_mq_sched_try_merge()