Lines Matching refs:bio
162 static void nvmet_bio_done(struct bio *bio) in nvmet_bio_done() argument
164 struct nvmet_req *req = bio->bi_private; in nvmet_bio_done()
166 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); in nvmet_bio_done()
167 if (bio != &req->b.inline_bio) in nvmet_bio_done()
168 bio_put(bio); in nvmet_bio_done()
172 static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, in nvmet_bdev_alloc_bip() argument
187 bip = bio_integrity_alloc(bio, GFP_NOIO, in nvmet_bdev_alloc_bip()
194 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); in nvmet_bdev_alloc_bip()
196 bip_set_seed(bip, bio->bi_iter.bi_sector >> in nvmet_bdev_alloc_bip()
202 rc = bio_integrity_add_page(bio, miter->page, len, in nvmet_bdev_alloc_bip()
219 static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, in nvmet_bdev_alloc_bip() argument
229 struct bio *bio; in nvmet_bdev_execute_rw() local
262 bio = &req->b.inline_bio; in nvmet_bdev_execute_rw()
263 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); in nvmet_bdev_execute_rw()
265 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); in nvmet_bdev_execute_rw()
267 bio_set_dev(bio, req->ns->bdev); in nvmet_bdev_execute_rw()
268 bio->bi_iter.bi_sector = sector; in nvmet_bdev_execute_rw()
269 bio->bi_private = req; in nvmet_bdev_execute_rw()
270 bio->bi_end_io = nvmet_bio_done; in nvmet_bdev_execute_rw()
271 bio->bi_opf = op; in nvmet_bdev_execute_rw()
279 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) in nvmet_bdev_execute_rw()
281 struct bio *prev = bio; in nvmet_bdev_execute_rw()
284 rc = nvmet_bdev_alloc_bip(req, bio, in nvmet_bdev_execute_rw()
287 bio_io_error(bio); in nvmet_bdev_execute_rw()
292 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); in nvmet_bdev_execute_rw()
293 bio_set_dev(bio, req->ns->bdev); in nvmet_bdev_execute_rw()
294 bio->bi_iter.bi_sector = sector; in nvmet_bdev_execute_rw()
295 bio->bi_opf = op; in nvmet_bdev_execute_rw()
297 bio_chain(bio, prev); in nvmet_bdev_execute_rw()
306 rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter); in nvmet_bdev_execute_rw()
308 bio_io_error(bio); in nvmet_bdev_execute_rw()
313 submit_bio(bio); in nvmet_bdev_execute_rw()
319 struct bio *bio = &req->b.inline_bio; in nvmet_bdev_execute_flush() local
324 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); in nvmet_bdev_execute_flush()
325 bio_set_dev(bio, req->ns->bdev); in nvmet_bdev_execute_flush()
326 bio->bi_private = req; in nvmet_bdev_execute_flush()
327 bio->bi_end_io = nvmet_bio_done; in nvmet_bdev_execute_flush()
328 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; in nvmet_bdev_execute_flush()
330 submit_bio(bio); in nvmet_bdev_execute_flush()
341 struct nvme_dsm_range *range, struct bio **bio) in nvmet_bdev_discard_range() argument
349 GFP_KERNEL, 0, bio); in nvmet_bdev_discard_range()
360 struct bio *bio = NULL; in nvmet_bdev_execute_discard() local
370 status = nvmet_bdev_discard_range(req, &range, &bio); in nvmet_bdev_execute_discard()
375 if (bio) { in nvmet_bdev_execute_discard()
376 bio->bi_private = req; in nvmet_bdev_execute_discard()
377 bio->bi_end_io = nvmet_bio_done; in nvmet_bdev_execute_discard()
379 bio_io_error(bio); in nvmet_bdev_execute_discard()
381 submit_bio(bio); in nvmet_bdev_execute_discard()
408 struct bio *bio = NULL; in nvmet_bdev_execute_write_zeroes() local
421 GFP_KERNEL, &bio, 0); in nvmet_bdev_execute_write_zeroes()
422 if (bio) { in nvmet_bdev_execute_write_zeroes()
423 bio->bi_private = req; in nvmet_bdev_execute_write_zeroes()
424 bio->bi_end_io = nvmet_bio_done; in nvmet_bdev_execute_write_zeroes()
425 submit_bio(bio); in nvmet_bdev_execute_write_zeroes()