Lines Matching refs:bio
46 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) in bio_copy_from_iter() argument
51 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_from_iter()
77 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) in bio_copy_to_iter() argument
82 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_to_iter()
107 static int bio_uncopy_user(struct bio *bio) in bio_uncopy_user() argument
109 struct bio_map_data *bmd = bio->bi_private; in bio_uncopy_user()
120 else if (bio_data_dir(bio) == READ) in bio_uncopy_user()
121 ret = bio_copy_to_iter(bio, bmd->iter); in bio_uncopy_user()
123 bio_free_pages(bio); in bio_uncopy_user()
126 bio_put(bio); in bio_uncopy_user()
135 struct bio *bio, *bounce_bio; in bio_copy_user_iov() local
158 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_copy_user_iov()
159 if (!bio) in bio_copy_user_iov()
161 bio->bi_opf |= req_op(rq); in bio_copy_user_iov()
193 if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) { in bio_copy_user_iov()
204 map_data->offset += bio->bi_iter.bi_size; in bio_copy_user_iov()
212 ret = bio_copy_from_iter(bio, iter); in bio_copy_user_iov()
217 zero_fill_bio(bio); in bio_copy_user_iov()
218 iov_iter_advance(iter, bio->bi_iter.bi_size); in bio_copy_user_iov()
221 bio->bi_private = bmd; in bio_copy_user_iov()
223 bounce_bio = bio; in bio_copy_user_iov()
236 bio_free_pages(bio); in bio_copy_user_iov()
237 bio_put(bio); in bio_copy_user_iov()
247 struct bio *bio, *bounce_bio; in bio_map_user_iov() local
254 bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES)); in bio_map_user_iov()
255 if (!bio) in bio_map_user_iov()
257 bio->bi_opf |= req_op(rq); in bio_map_user_iov()
285 if (!bio_add_hw_page(rq->q, bio, page, n, offs, in bio_map_user_iov()
314 bio_get(bio); in bio_map_user_iov()
316 bounce_bio = bio; in bio_map_user_iov()
329 bio_put(bio); in bio_map_user_iov()
331 bio_release_pages(bio, false); in bio_map_user_iov()
332 bio_put(bio); in bio_map_user_iov()
345 static void bio_unmap_user(struct bio *bio) in bio_unmap_user() argument
347 bio_release_pages(bio, bio_data_dir(bio) == READ); in bio_unmap_user()
348 bio_put(bio); in bio_unmap_user()
349 bio_put(bio); in bio_unmap_user()
352 static void bio_invalidate_vmalloc_pages(struct bio *bio) in bio_invalidate_vmalloc_pages() argument
355 if (bio->bi_private && !op_is_write(bio_op(bio))) { in bio_invalidate_vmalloc_pages()
358 for (i = 0; i < bio->bi_vcnt; i++) in bio_invalidate_vmalloc_pages()
359 len += bio->bi_io_vec[i].bv_len; in bio_invalidate_vmalloc_pages()
360 invalidate_kernel_vmap_range(bio->bi_private, len); in bio_invalidate_vmalloc_pages()
365 static void bio_map_kern_endio(struct bio *bio) in bio_map_kern_endio() argument
367 bio_invalidate_vmalloc_pages(bio); in bio_map_kern_endio()
368 bio_put(bio); in bio_map_kern_endio()
381 static struct bio *bio_map_kern(struct request_queue *q, void *data, in bio_map_kern()
391 struct bio *bio; in bio_map_kern() local
393 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_map_kern()
394 if (!bio) in bio_map_kern()
399 bio->bi_private = data; in bio_map_kern()
416 if (bio_add_pc_page(q, bio, page, bytes, in bio_map_kern()
419 bio_put(bio); in bio_map_kern()
428 bio->bi_end_io = bio_map_kern_endio; in bio_map_kern()
429 return bio; in bio_map_kern()
432 static void bio_copy_kern_endio(struct bio *bio) in bio_copy_kern_endio() argument
434 bio_free_pages(bio); in bio_copy_kern_endio()
435 bio_put(bio); in bio_copy_kern_endio()
438 static void bio_copy_kern_endio_read(struct bio *bio) in bio_copy_kern_endio_read() argument
440 char *p = bio->bi_private; in bio_copy_kern_endio_read()
444 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_kern_endio_read()
449 bio_copy_kern_endio(bio); in bio_copy_kern_endio_read()
463 static struct bio *bio_copy_kern(struct request_queue *q, void *data, in bio_copy_kern()
469 struct bio *bio; in bio_copy_kern() local
480 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_copy_kern()
481 if (!bio) in bio_copy_kern()
498 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) in bio_copy_kern()
506 bio->bi_end_io = bio_copy_kern_endio_read; in bio_copy_kern()
507 bio->bi_private = data; in bio_copy_kern()
509 bio->bi_end_io = bio_copy_kern_endio; in bio_copy_kern()
512 return bio; in bio_copy_kern()
515 bio_free_pages(bio); in bio_copy_kern()
516 bio_put(bio); in bio_copy_kern()
524 int blk_rq_append_bio(struct request *rq, struct bio **bio) in blk_rq_append_bio() argument
526 struct bio *orig_bio = *bio; in blk_rq_append_bio()
531 blk_queue_bounce(rq->q, bio); in blk_rq_append_bio()
533 bio_for_each_bvec(bv, *bio, iter) in blk_rq_append_bio()
536 if (!rq->bio) { in blk_rq_append_bio()
537 blk_rq_bio_prep(rq, *bio, nr_segs); in blk_rq_append_bio()
539 if (!ll_back_merge_fn(rq, *bio, nr_segs)) { in blk_rq_append_bio()
540 if (orig_bio != *bio) { in blk_rq_append_bio()
541 bio_put(*bio); in blk_rq_append_bio()
542 *bio = orig_bio; in blk_rq_append_bio()
547 rq->biotail->bi_next = *bio; in blk_rq_append_bio()
548 rq->biotail = *bio; in blk_rq_append_bio()
549 rq->__data_len += (*bio)->bi_iter.bi_size; in blk_rq_append_bio()
550 bio_crypt_free_ctx(*bio); in blk_rq_append_bio()
584 struct bio *bio = NULL; in blk_rq_map_user_iov() local
606 if (!bio) in blk_rq_map_user_iov()
607 bio = rq->bio; in blk_rq_map_user_iov()
613 blk_rq_unmap_user(bio); in blk_rq_map_user_iov()
615 rq->bio = NULL; in blk_rq_map_user_iov()
644 int blk_rq_unmap_user(struct bio *bio) in blk_rq_unmap_user() argument
646 struct bio *mapped_bio; in blk_rq_unmap_user()
649 while (bio) { in blk_rq_unmap_user()
650 mapped_bio = bio; in blk_rq_unmap_user()
651 if (unlikely(bio_flagged(bio, BIO_BOUNCED))) in blk_rq_unmap_user()
652 mapped_bio = bio->bi_private; in blk_rq_unmap_user()
654 if (bio->bi_private) { in blk_rq_unmap_user()
662 mapped_bio = bio; in blk_rq_unmap_user()
663 bio = bio->bi_next; in blk_rq_unmap_user()
689 struct bio *bio, *orig_bio; in blk_rq_map_kern() local
698 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); in blk_rq_map_kern()
700 bio = bio_map_kern(q, kbuf, len, gfp_mask); in blk_rq_map_kern()
702 if (IS_ERR(bio)) in blk_rq_map_kern()
703 return PTR_ERR(bio); in blk_rq_map_kern()
705 bio->bi_opf &= ~REQ_OP_MASK; in blk_rq_map_kern()
706 bio->bi_opf |= req_op(rq); in blk_rq_map_kern()
708 orig_bio = bio; in blk_rq_map_kern()
709 ret = blk_rq_append_bio(rq, &bio); in blk_rq_map_kern()