Lines Matching full:clone
78 * One of these is allocated per clone bio.
88 struct bio clone; member
93 * It contains the first clone used for that original.
111 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); in dm_per_bio_data()
113 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; in dm_per_bio_data()
114 …return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_siz… in dm_per_bio_data()
122 …rn (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone)); in dm_bio_from_per_bio_data()
124 return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone)); in dm_bio_from_per_bio_data()
130 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; in dm_bio_get_target_bio_nr()
592 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); in dm_start_time_ns_from_clone()
634 struct bio *clone; in alloc_io() local
636 clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); in alloc_io()
637 if (!clone) in alloc_io()
640 tio = container_of(clone, struct dm_target_io, clone); in alloc_io()
659 bio_put(&io->tio.clone); in free_io()
671 struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); in alloc_tio() local
672 if (!clone) in alloc_tio()
675 tio = container_of(clone, struct dm_target_io, clone); in alloc_tio()
691 bio_put(&tio->clone); in free_tio()
993 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); in clone_endio()
1268 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); in dm_accept_partial_bio()
1302 struct bio *clone = &tio->clone; in __map_bio() local
1307 clone->bi_end_io = clone_endio; in __map_bio()
1310 * Map the clone. If r == 0 we don't need to do in __map_bio()
1315 sector = clone->bi_iter.bi_sector; in __map_bio()
1317 if (unlikely(swap_bios_limit(ti, clone))) { in __map_bio()
1325 r = ti->type->map(ti, clone); in __map_bio()
1331 trace_block_bio_remap(clone->bi_disk->queue, clone, in __map_bio()
1333 ret = submit_bio_noacct(clone); in __map_bio()
1336 if (unlikely(swap_bios_limit(ti, clone))) { in __map_bio()
1344 if (unlikely(swap_bios_limit(ti, clone))) { in __map_bio()
1371 struct bio *clone = &tio->clone; in clone_bio() local
1374 __bio_clone_fast(clone, bio); in clone_bio()
1376 r = bio_crypt_clone(clone, bio, GFP_NOIO); in clone_bio()
1389 r = bio_integrity_clone(clone, bio, GFP_NOIO); in clone_bio()
1394 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); in clone_bio()
1395 clone->bi_iter.bi_size = to_bytes(len); in clone_bio()
1398 bio_integrity_trim(clone); in clone_bio()
1414 bio_list_add(blist, &tio->clone); in alloc_multiple_bios()
1429 bio_list_add(blist, &tio->clone); in alloc_multiple_bios()
1437 tio = container_of(bio, struct dm_target_io, clone); in alloc_multiple_bios()
1446 struct bio *clone = &tio->clone; in __clone_and_map_simple_bio() local
1450 __bio_clone_fast(clone, ci->bio); in __clone_and_map_simple_bio()
1452 bio_setup_sector(clone, ci->sector, *len); in __clone_and_map_simple_bio()
1467 tio = container_of(bio, struct dm_target_io, clone); in __send_duplicate_bios()
1481 * the basis for the clone(s). in __send_empty_flush()
1660 * We take a clone of the original to store in in __split_and_process_bio()
2947 …roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); in dm_alloc_md_mempools()
2957 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); in dm_alloc_md_mempools()