Lines Matching refs:sctx
62 struct switch_ctx *sctx; in alloc_switch_ctx() local
64 sctx = kzalloc(sizeof(struct switch_ctx) + nr_paths * sizeof(struct switch_path), in alloc_switch_ctx()
66 if (!sctx) in alloc_switch_ctx()
69 sctx->ti = ti; in alloc_switch_ctx()
70 sctx->region_size = region_size; in alloc_switch_ctx()
72 ti->private = sctx; in alloc_switch_ctx()
74 return sctx; in alloc_switch_ctx()
79 struct switch_ctx *sctx = ti->private; in alloc_region_table() local
83 if (!(sctx->region_size & (sctx->region_size - 1))) in alloc_region_table()
84 sctx->region_size_bits = __ffs(sctx->region_size); in alloc_region_table()
86 sctx->region_size_bits = -1; in alloc_region_table()
88 sctx->region_table_entry_bits = 1; in alloc_region_table()
89 while (sctx->region_table_entry_bits < sizeof(region_table_slot_t) * 8 && in alloc_region_table()
90 (region_table_slot_t)1 << sctx->region_table_entry_bits < nr_paths) in alloc_region_table()
91 sctx->region_table_entry_bits++; in alloc_region_table()
93 sctx->region_entries_per_slot = (sizeof(region_table_slot_t) * 8) / sctx->region_table_entry_bits; in alloc_region_table()
94 if (!(sctx->region_entries_per_slot & (sctx->region_entries_per_slot - 1))) in alloc_region_table()
95 sctx->region_entries_per_slot_bits = __ffs(sctx->region_entries_per_slot); in alloc_region_table()
97 sctx->region_entries_per_slot_bits = -1; in alloc_region_table()
99 if (sector_div(nr_regions, sctx->region_size)) in alloc_region_table()
106 sctx->nr_regions = nr_regions; in alloc_region_table()
109 if (sector_div(nr_slots, sctx->region_entries_per_slot)) in alloc_region_table()
117 sctx->region_table = vmalloc(nr_slots * sizeof(region_table_slot_t)); in alloc_region_table()
118 if (!sctx->region_table) { in alloc_region_table()
126 static void switch_get_position(struct switch_ctx *sctx, unsigned long region_nr, in switch_get_position() argument
129 if (sctx->region_entries_per_slot_bits >= 0) { in switch_get_position()
130 *region_index = region_nr >> sctx->region_entries_per_slot_bits; in switch_get_position()
131 *bit = region_nr & (sctx->region_entries_per_slot - 1); in switch_get_position()
133 *region_index = region_nr / sctx->region_entries_per_slot; in switch_get_position()
134 *bit = region_nr % sctx->region_entries_per_slot; in switch_get_position()
137 *bit *= sctx->region_table_entry_bits; in switch_get_position()
140 static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long region_nr) in switch_region_table_read() argument
145 switch_get_position(sctx, region_nr, ®ion_index, &bit); in switch_region_table_read()
147 return (ACCESS_ONCE(sctx->region_table[region_index]) >> bit) & in switch_region_table_read()
148 ((1 << sctx->region_table_entry_bits) - 1); in switch_region_table_read()
154 static unsigned switch_get_path_nr(struct switch_ctx *sctx, sector_t offset) in switch_get_path_nr() argument
160 if (sctx->region_size_bits >= 0) in switch_get_path_nr()
161 p >>= sctx->region_size_bits; in switch_get_path_nr()
163 sector_div(p, sctx->region_size); in switch_get_path_nr()
165 path_nr = switch_region_table_read(sctx, p); in switch_get_path_nr()
168 if (unlikely(path_nr >= sctx->nr_paths)) in switch_get_path_nr()
174 static void switch_region_table_write(struct switch_ctx *sctx, unsigned long region_nr, in switch_region_table_write() argument
181 switch_get_position(sctx, region_nr, ®ion_index, &bit); in switch_region_table_write()
183 pte = sctx->region_table[region_index]; in switch_region_table_write()
184 pte &= ~((((region_table_slot_t)1 << sctx->region_table_entry_bits) - 1) << bit); in switch_region_table_write()
186 sctx->region_table[region_index] = pte; in switch_region_table_write()
192 static void initialise_region_table(struct switch_ctx *sctx) in initialise_region_table() argument
197 for (region_nr = 0; region_nr < sctx->nr_regions; region_nr++) { in initialise_region_table()
198 switch_region_table_write(sctx, region_nr, path_nr); in initialise_region_table()
199 if (++path_nr >= sctx->nr_paths) in initialise_region_table()
206 struct switch_ctx *sctx = ti->private; in parse_path() local
211 &sctx->path_list[sctx->nr_paths].dmdev); in parse_path()
219 dm_put_device(ti, sctx->path_list[sctx->nr_paths].dmdev); in parse_path()
223 sctx->path_list[sctx->nr_paths].start = start; in parse_path()
225 sctx->nr_paths++; in parse_path()
235 struct switch_ctx *sctx = ti->private; in switch_dtr() local
237 while (sctx->nr_paths--) in switch_dtr()
238 dm_put_device(ti, sctx->path_list[sctx->nr_paths].dmdev); in switch_dtr()
240 vfree(sctx->region_table); in switch_dtr()
241 kfree(sctx); in switch_dtr()
260 struct switch_ctx *sctx; in switch_ctr() local
286 sctx = alloc_switch_ctx(ti, nr_paths, region_size); in switch_ctr()
287 if (!sctx) { in switch_ctr()
306 initialise_region_table(sctx); in switch_ctr()
321 struct switch_ctx *sctx = ti->private; in switch_map() local
323 unsigned path_nr = switch_get_path_nr(sctx, offset); in switch_map()
325 bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev; in switch_map()
326 bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset; in switch_map()
373 static int process_set_region_mappings(struct switch_ctx *sctx, in process_set_region_mappings() argument
413 unlikely(region_index + num_write >= sctx->nr_regions)) { in process_set_region_mappings()
415 region_index, num_write, sctx->nr_regions); in process_set_region_mappings()
421 path_nr = switch_region_table_read(sctx, region_index - cycle_length); in process_set_region_mappings()
422 switch_region_table_write(sctx, region_index, path_nr); in process_set_region_mappings()
449 if (unlikely(region_index >= sctx->nr_regions)) { in process_set_region_mappings()
450 DMWARN("invalid set_region_mappings region number: %lu >= %lu", region_index, sctx->nr_regions); in process_set_region_mappings()
453 if (unlikely(path_nr >= sctx->nr_paths)) { in process_set_region_mappings()
454 DMWARN("invalid set_region_mappings device: %lu >= %u", path_nr, sctx->nr_paths); in process_set_region_mappings()
458 switch_region_table_write(sctx, region_index, path_nr); in process_set_region_mappings()
473 struct switch_ctx *sctx = ti->private; in switch_message() local
479 r = process_set_region_mappings(sctx, argc, argv); in switch_message()
491 struct switch_ctx *sctx = ti->private; in switch_status() local
501 DMEMIT("%u %u 0", sctx->nr_paths, sctx->region_size); in switch_status()
502 for (path_nr = 0; path_nr < sctx->nr_paths; path_nr++) in switch_status()
503 DMEMIT(" %s %llu", sctx->path_list[path_nr].dmdev->name, in switch_status()
504 (unsigned long long)sctx->path_list[path_nr].start); in switch_status()
517 struct switch_ctx *sctx = ti->private; in switch_prepare_ioctl() local
520 path_nr = switch_get_path_nr(sctx, 0); in switch_prepare_ioctl()
522 *bdev = sctx->path_list[path_nr].dmdev->bdev; in switch_prepare_ioctl()
523 *mode = sctx->path_list[path_nr].dmdev->mode; in switch_prepare_ioctl()
528 if (ti->len + sctx->path_list[path_nr].start != in switch_prepare_ioctl()
537 struct switch_ctx *sctx = ti->private; in switch_iterate_devices() local
541 for (path_nr = 0; path_nr < sctx->nr_paths; path_nr++) { in switch_iterate_devices()
542 r = fn(ti, sctx->path_list[path_nr].dmdev, in switch_iterate_devices()
543 sctx->path_list[path_nr].start, ti->len, data); in switch_iterate_devices()