Lines Matching refs:sctx
62 struct switch_ctx *sctx; in alloc_switch_ctx() local
64 sctx = kzalloc(struct_size(sctx, path_list, nr_paths), GFP_KERNEL); in alloc_switch_ctx()
65 if (!sctx) in alloc_switch_ctx()
68 sctx->ti = ti; in alloc_switch_ctx()
69 sctx->region_size = region_size; in alloc_switch_ctx()
71 ti->private = sctx; in alloc_switch_ctx()
73 return sctx; in alloc_switch_ctx()
78 struct switch_ctx *sctx = ti->private; in alloc_region_table() local
82 if (!(sctx->region_size & (sctx->region_size - 1))) in alloc_region_table()
83 sctx->region_size_bits = __ffs(sctx->region_size); in alloc_region_table()
85 sctx->region_size_bits = -1; in alloc_region_table()
87 sctx->region_table_entry_bits = 1; in alloc_region_table()
88 while (sctx->region_table_entry_bits < sizeof(region_table_slot_t) * 8 && in alloc_region_table()
89 (region_table_slot_t)1 << sctx->region_table_entry_bits < nr_paths) in alloc_region_table()
90 sctx->region_table_entry_bits++; in alloc_region_table()
92 sctx->region_entries_per_slot = (sizeof(region_table_slot_t) * 8) / sctx->region_table_entry_bits; in alloc_region_table()
93 if (!(sctx->region_entries_per_slot & (sctx->region_entries_per_slot - 1))) in alloc_region_table()
94 sctx->region_entries_per_slot_bits = __ffs(sctx->region_entries_per_slot); in alloc_region_table()
96 sctx->region_entries_per_slot_bits = -1; in alloc_region_table()
98 if (sector_div(nr_regions, sctx->region_size)) in alloc_region_table()
105 sctx->nr_regions = nr_regions; in alloc_region_table()
108 if (sector_div(nr_slots, sctx->region_entries_per_slot)) in alloc_region_table()
116 sctx->region_table = vmalloc(array_size(nr_slots, in alloc_region_table()
118 if (!sctx->region_table) { in alloc_region_table()
126 static void switch_get_position(struct switch_ctx *sctx, unsigned long region_nr, in switch_get_position() argument
129 if (sctx->region_entries_per_slot_bits >= 0) { in switch_get_position()
130 *region_index = region_nr >> sctx->region_entries_per_slot_bits; in switch_get_position()
131 *bit = region_nr & (sctx->region_entries_per_slot - 1); in switch_get_position()
133 *region_index = region_nr / sctx->region_entries_per_slot; in switch_get_position()
134 *bit = region_nr % sctx->region_entries_per_slot; in switch_get_position()
137 *bit *= sctx->region_table_entry_bits; in switch_get_position()
140 static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long region_nr) in switch_region_table_read() argument
145 switch_get_position(sctx, region_nr, ®ion_index, &bit); in switch_region_table_read()
147 return (READ_ONCE(sctx->region_table[region_index]) >> bit) & in switch_region_table_read()
148 ((1 << sctx->region_table_entry_bits) - 1); in switch_region_table_read()
154 static unsigned switch_get_path_nr(struct switch_ctx *sctx, sector_t offset) in switch_get_path_nr() argument
160 if (sctx->region_size_bits >= 0) in switch_get_path_nr()
161 p >>= sctx->region_size_bits; in switch_get_path_nr()
163 sector_div(p, sctx->region_size); in switch_get_path_nr()
165 path_nr = switch_region_table_read(sctx, p); in switch_get_path_nr()
168 if (unlikely(path_nr >= sctx->nr_paths)) in switch_get_path_nr()
174 static void switch_region_table_write(struct switch_ctx *sctx, unsigned long region_nr, in switch_region_table_write() argument
181 switch_get_position(sctx, region_nr, ®ion_index, &bit); in switch_region_table_write()
183 pte = sctx->region_table[region_index]; in switch_region_table_write()
184 pte &= ~((((region_table_slot_t)1 << sctx->region_table_entry_bits) - 1) << bit); in switch_region_table_write()
186 sctx->region_table[region_index] = pte; in switch_region_table_write()
192 static void initialise_region_table(struct switch_ctx *sctx) in initialise_region_table() argument
197 for (region_nr = 0; region_nr < sctx->nr_regions; region_nr++) { in initialise_region_table()
198 switch_region_table_write(sctx, region_nr, path_nr); in initialise_region_table()
199 if (++path_nr >= sctx->nr_paths) in initialise_region_table()
206 struct switch_ctx *sctx = ti->private; in parse_path() local
211 &sctx->path_list[sctx->nr_paths].dmdev); in parse_path()
219 dm_put_device(ti, sctx->path_list[sctx->nr_paths].dmdev); in parse_path()
223 sctx->path_list[sctx->nr_paths].start = start; in parse_path()
225 sctx->nr_paths++; in parse_path()
235 struct switch_ctx *sctx = ti->private; in switch_dtr() local
237 while (sctx->nr_paths--) in switch_dtr()
238 dm_put_device(ti, sctx->path_list[sctx->nr_paths].dmdev); in switch_dtr()
240 vfree(sctx->region_table); in switch_dtr()
241 kfree(sctx); in switch_dtr()
260 struct switch_ctx *sctx; in switch_ctr() local
286 sctx = alloc_switch_ctx(ti, nr_paths, region_size); in switch_ctr()
287 if (!sctx) { in switch_ctr()
306 initialise_region_table(sctx); in switch_ctr()
321 struct switch_ctx *sctx = ti->private; in switch_map() local
323 unsigned path_nr = switch_get_path_nr(sctx, offset); in switch_map()
325 bio_set_dev(bio, sctx->path_list[path_nr].dmdev->bdev); in switch_map()
326 bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset; in switch_map()
373 static int process_set_region_mappings(struct switch_ctx *sctx, in process_set_region_mappings() argument
413 unlikely(region_index + num_write >= sctx->nr_regions)) { in process_set_region_mappings()
415 region_index, num_write, sctx->nr_regions); in process_set_region_mappings()
421 path_nr = switch_region_table_read(sctx, region_index - cycle_length); in process_set_region_mappings()
422 switch_region_table_write(sctx, region_index, path_nr); in process_set_region_mappings()
449 if (unlikely(region_index >= sctx->nr_regions)) { in process_set_region_mappings()
450 DMWARN("invalid set_region_mappings region number: %lu >= %lu", region_index, sctx->nr_regions); in process_set_region_mappings()
453 if (unlikely(path_nr >= sctx->nr_paths)) { in process_set_region_mappings()
454 DMWARN("invalid set_region_mappings device: %lu >= %u", path_nr, sctx->nr_paths); in process_set_region_mappings()
458 switch_region_table_write(sctx, region_index, path_nr); in process_set_region_mappings()
474 struct switch_ctx *sctx = ti->private; in switch_message() local
480 r = process_set_region_mappings(sctx, argc, argv); in switch_message()
492 struct switch_ctx *sctx = ti->private; in switch_status() local
502 DMEMIT("%u %u 0", sctx->nr_paths, sctx->region_size); in switch_status()
503 for (path_nr = 0; path_nr < sctx->nr_paths; path_nr++) in switch_status()
504 DMEMIT(" %s %llu", sctx->path_list[path_nr].dmdev->name, in switch_status()
505 (unsigned long long)sctx->path_list[path_nr].start); in switch_status()
517 struct switch_ctx *sctx = ti->private; in switch_prepare_ioctl() local
520 path_nr = switch_get_path_nr(sctx, 0); in switch_prepare_ioctl()
522 *bdev = sctx->path_list[path_nr].dmdev->bdev; in switch_prepare_ioctl()
527 if (ti->len + sctx->path_list[path_nr].start != in switch_prepare_ioctl()
536 struct switch_ctx *sctx = ti->private; in switch_iterate_devices() local
540 for (path_nr = 0; path_nr < sctx->nr_paths; path_nr++) { in switch_iterate_devices()
541 r = fn(ti, sctx->path_list[path_nr].dmdev, in switch_iterate_devices()
542 sctx->path_list[path_nr].start, ti->len, data); in switch_iterate_devices()