Lines Matching refs:bio
245 static void req_bio_endio(struct request *rq, struct bio *bio, in req_bio_endio() argument
249 bio->bi_status = error; in req_bio_endio()
252 bio_set_flag(bio, BIO_QUIET); in req_bio_endio()
254 bio_advance(bio, nbytes); in req_bio_endio()
261 if (bio->bi_iter.bi_size) in req_bio_endio()
262 bio->bi_status = BLK_STS_IOERR; in req_bio_endio()
264 bio->bi_iter.bi_sector = rq->__sector; in req_bio_endio()
268 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) in req_bio_endio()
269 bio_endio(bio); in req_bio_endio()
282 rq->bio, rq->biotail, blk_rq_bytes(rq)); in blk_dump_rq_flags()
482 static inline int bio_queue_enter(struct bio *bio) in bio_queue_enter() argument
484 struct request_queue *q = bio->bi_disk->queue; in bio_queue_enter()
485 bool nowait = bio->bi_opf & REQ_NOWAIT; in bio_queue_enter()
491 bio_wouldblock_error(bio); in bio_queue_enter()
493 bio_io_error(bio); in bio_queue_enter()
655 static void handle_bad_sector(struct bio *bio, sector_t maxsector) in handle_bad_sector() argument
661 bio_devname(bio, b), bio->bi_opf, in handle_bad_sector()
662 bio_end_sector(bio), maxsector); in handle_bad_sector()
700 static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part) in bio_check_ro() argument
702 const int op = bio_op(bio); in bio_check_ro()
707 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) in bio_check_ro()
712 bio_devname(bio, b), part->partno); in bio_check_ro()
720 static noinline int should_fail_bio(struct bio *bio) in should_fail_bio() argument
722 if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size)) in should_fail_bio()
733 static inline int bio_check_eod(struct bio *bio, sector_t maxsector) in bio_check_eod() argument
735 unsigned int nr_sectors = bio_sectors(bio); in bio_check_eod()
739 bio->bi_iter.bi_sector > maxsector - nr_sectors)) { in bio_check_eod()
740 handle_bad_sector(bio, maxsector); in bio_check_eod()
749 static inline int blk_partition_remap(struct bio *bio) in blk_partition_remap() argument
755 p = __disk_get_part(bio->bi_disk, bio->bi_partno); in blk_partition_remap()
758 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size))) in blk_partition_remap()
760 if (unlikely(bio_check_ro(bio, p))) in blk_partition_remap()
763 if (bio_sectors(bio)) { in blk_partition_remap()
764 if (bio_check_eod(bio, part_nr_sects_read(p))) in blk_partition_remap()
766 bio->bi_iter.bi_sector += p->start_sect; in blk_partition_remap()
767 trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p), in blk_partition_remap()
768 bio->bi_iter.bi_sector - p->start_sect); in blk_partition_remap()
770 bio->bi_partno = 0; in blk_partition_remap()
781 struct bio *bio) in blk_check_zone_append() argument
783 sector_t pos = bio->bi_iter.bi_sector; in blk_check_zone_append()
784 int nr_sectors = bio_sectors(bio); in blk_check_zone_append()
807 bio->bi_opf |= REQ_NOMERGE; in blk_check_zone_append()
812 static noinline_for_stack bool submit_bio_checks(struct bio *bio) in submit_bio_checks() argument
814 struct request_queue *q = bio->bi_disk->queue; in submit_bio_checks()
820 plug = blk_mq_plug(q, bio); in submit_bio_checks()
822 bio->bi_opf |= REQ_NOWAIT; in submit_bio_checks()
828 if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q)) in submit_bio_checks()
831 if (should_fail_bio(bio)) in submit_bio_checks()
834 if (bio->bi_partno) { in submit_bio_checks()
835 if (unlikely(blk_partition_remap(bio))) in submit_bio_checks()
838 if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0))) in submit_bio_checks()
840 if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk)))) in submit_bio_checks()
848 if (op_is_flush(bio->bi_opf) && in submit_bio_checks()
850 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); in submit_bio_checks()
851 if (!bio_sectors(bio)) { in submit_bio_checks()
858 bio->bi_opf &= ~REQ_HIPRI; in submit_bio_checks()
860 switch (bio_op(bio)) { in submit_bio_checks()
874 status = blk_check_zone_append(q, bio); in submit_bio_checks()
906 if (blk_throtl_bio(bio)) in submit_bio_checks()
909 blk_cgroup_bio_start(bio); in submit_bio_checks()
910 blkcg_bio_issue_init(bio); in submit_bio_checks()
912 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) { in submit_bio_checks()
913 trace_block_bio_queue(q, bio); in submit_bio_checks()
917 bio_set_flag(bio, BIO_TRACE_COMPLETION); in submit_bio_checks()
924 bio->bi_status = status; in submit_bio_checks()
925 bio_endio(bio); in submit_bio_checks()
929 static blk_qc_t __submit_bio(struct bio *bio) in __submit_bio() argument
931 struct gendisk *disk = bio->bi_disk; in __submit_bio()
934 if (blk_crypto_bio_prep(&bio)) { in __submit_bio()
936 return blk_mq_submit_bio(bio); in __submit_bio()
937 ret = disk->fops->submit_bio(bio); in __submit_bio()
962 static blk_qc_t __submit_bio_noacct(struct bio *bio) in __submit_bio_noacct() argument
967 BUG_ON(bio->bi_next); in __submit_bio_noacct()
973 struct request_queue *q = bio->bi_disk->queue; in __submit_bio_noacct()
976 if (unlikely(bio_queue_enter(bio) != 0)) in __submit_bio_noacct()
985 ret = __submit_bio(bio); in __submit_bio_noacct()
993 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) in __submit_bio_noacct()
994 if (q == bio->bi_disk->queue) in __submit_bio_noacct()
995 bio_list_add(&same, bio); in __submit_bio_noacct()
997 bio_list_add(&lower, bio); in __submit_bio_noacct()
1005 } while ((bio = bio_list_pop(&bio_list_on_stack[0]))); in __submit_bio_noacct()
1011 static blk_qc_t __submit_bio_noacct_mq(struct bio *bio) in __submit_bio_noacct_mq() argument
1019 struct gendisk *disk = bio->bi_disk; in __submit_bio_noacct_mq()
1021 if (unlikely(bio_queue_enter(bio) != 0)) in __submit_bio_noacct_mq()
1024 if (!blk_crypto_bio_prep(&bio)) { in __submit_bio_noacct_mq()
1030 ret = blk_mq_submit_bio(bio); in __submit_bio_noacct_mq()
1031 } while ((bio = bio_list_pop(&bio_list[0]))); in __submit_bio_noacct_mq()
1046 blk_qc_t submit_bio_noacct(struct bio *bio) in submit_bio_noacct() argument
1048 if (!submit_bio_checks(bio)) in submit_bio_noacct()
1058 bio_list_add(¤t->bio_list[0], bio); in submit_bio_noacct()
1062 if (!bio->bi_disk->fops->submit_bio) in submit_bio_noacct()
1063 return __submit_bio_noacct_mq(bio); in submit_bio_noacct()
1064 return __submit_bio_noacct(bio); in submit_bio_noacct()
1081 blk_qc_t submit_bio(struct bio *bio) in submit_bio() argument
1083 if (blkcg_punt_bio_submit(bio)) in submit_bio()
1090 if (bio_has_data(bio)) { in submit_bio()
1093 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) in submit_bio()
1094 count = queue_logical_block_size(bio->bi_disk->queue) >> 9; in submit_bio()
1096 count = bio_sectors(bio); in submit_bio()
1098 if (op_is_write(bio_op(bio))) { in submit_bio()
1101 task_io_account_read(bio->bi_iter.bi_size); in submit_bio()
1109 op_is_write(bio_op(bio)) ? "WRITE" : "READ", in submit_bio()
1110 (unsigned long long)bio->bi_iter.bi_sector, in submit_bio()
1111 bio_devname(bio, b), count); in submit_bio()
1121 if (unlikely(bio_op(bio) == REQ_OP_READ && in submit_bio()
1122 bio_flagged(bio, BIO_WORKINGSET))) { in submit_bio()
1127 ret = submit_bio_noacct(bio); in submit_bio()
1133 return submit_bio_noacct(bio); in submit_bio()
1243 struct bio *bio; in blk_rq_err_bytes() local
1255 for (bio = rq->bio; bio; bio = bio->bi_next) { in blk_rq_err_bytes()
1256 if ((bio->bi_opf & ff) != ff) in blk_rq_err_bytes()
1258 bytes += bio->bi_iter.bi_size; in blk_rq_err_bytes()
1348 struct bio *bio) in part_start_io_acct() argument
1350 *part = disk_map_sector_rcu(disk, bio->bi_iter.bi_sector); in part_start_io_acct()
1352 return __part_start_io_acct(*part, bio_sectors(bio), bio_op(bio)); in part_start_io_acct()
1377 void part_end_io_acct(struct hd_struct *part, struct bio *bio, in part_end_io_acct() argument
1380 __part_end_io_acct(part, bio_op(bio), start_time); in part_end_io_acct()
1398 if (rq->bio) { in blk_steal_bios()
1400 list->tail->bi_next = rq->bio; in blk_steal_bios()
1402 list->head = rq->bio; in blk_steal_bios()
1405 rq->bio = NULL; in blk_steal_bios()
1446 if (!req->bio) in blk_update_request()
1462 while (req->bio) { in blk_update_request()
1463 struct bio *bio = req->bio; in blk_update_request() local
1464 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); in blk_update_request()
1466 if (bio_bytes == bio->bi_iter.bi_size) in blk_update_request()
1467 req->bio = bio->bi_next; in blk_update_request()
1470 bio_clear_flag(bio, BIO_TRACE_COMPLETION); in blk_update_request()
1471 req_bio_endio(req, bio, bio_bytes, error); in blk_update_request()
1483 if (!req->bio) { in blk_update_request()
1502 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; in blk_update_request()
1579 struct bio *bio; in blk_rq_unprep_clone() local
1581 while ((bio = rq->bio) != NULL) { in blk_rq_unprep_clone()
1582 rq->bio = bio->bi_next; in blk_rq_unprep_clone()
1584 bio_put(bio); in blk_rq_unprep_clone()
1608 int (*bio_ctr)(struct bio *, struct bio *, void *), in blk_rq_prep_clone() argument
1611 struct bio *bio, *bio_src; in blk_rq_prep_clone() local
1617 bio = bio_clone_fast(bio_src, gfp_mask, bs); in blk_rq_prep_clone()
1618 if (!bio) in blk_rq_prep_clone()
1621 if (bio_ctr && bio_ctr(bio, bio_src, data)) in blk_rq_prep_clone()
1624 if (rq->bio) { in blk_rq_prep_clone()
1625 rq->biotail->bi_next = bio; in blk_rq_prep_clone()
1626 rq->biotail = bio; in blk_rq_prep_clone()
1628 rq->bio = rq->biotail = bio; in blk_rq_prep_clone()
1630 bio = NULL; in blk_rq_prep_clone()
1643 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0) in blk_rq_prep_clone()
1649 if (bio) in blk_rq_prep_clone()
1650 bio_put(bio); in blk_rq_prep_clone()
1805 sizeof_field(struct bio, bi_opf)); in blk_dev_init()