Home
last modified time | relevance | path

Searched refs:ti (Results 1 – 25 of 112) sorted by relevance

12345

/drivers/md/
Ddm-linear.c29 int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) in dm_linear_ctr() argument
37 ti->error = "Invalid argument count"; in dm_linear_ctr()
43 ti->error = "Cannot allocate linear context"; in dm_linear_ctr()
49 ti->error = "Invalid device sector"; in dm_linear_ctr()
54 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev); in dm_linear_ctr()
56 ti->error = "Device lookup failed"; in dm_linear_ctr()
60 ti->num_flush_bios = 1; in dm_linear_ctr()
61 ti->num_discard_bios = 1; in dm_linear_ctr()
62 ti->num_write_same_bios = 1; in dm_linear_ctr()
63 ti->num_write_zeroes_bios = 1; in dm_linear_ctr()
[all …]
Ddm-flakey.c48 struct dm_target *ti) in parse_features() argument
65 r = dm_read_arg_group(_args, as, &argc, &ti->error); in parse_features()
78 ti->error = "Feature drop_writes duplicated"; in parse_features()
81 ti->error = "Feature drop_writes conflicts with feature error_writes"; in parse_features()
93 ti->error = "Feature error_writes duplicated"; in parse_features()
97 ti->error = "Feature error_writes conflicts with feature drop_writes"; in parse_features()
109 ti->error = "Feature corrupt_bio_byte requires parameters"; in parse_features()
113 r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error); in parse_features()
127 ti->error = "Invalid corrupt bio direction (r or w)"; in parse_features()
135 r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error); in parse_features()
[all …]
Ddm-table.c282 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, in device_area_is_invalid() argument
303 dm_device_name(ti->table->md), bdevname(bdev, b), in device_area_is_invalid()
316 dm_device_name(ti->table->md), bdevname(bdev, b), in device_area_is_invalid()
332 dm_device_name(ti->table->md), in device_area_is_invalid()
349 dm_device_name(ti->table->md), in device_area_is_invalid()
362 dm_device_name(ti->table->md), in device_area_is_invalid()
371 dm_device_name(ti->table->md), in device_area_is_invalid()
429 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, in dm_get_device() argument
435 struct dm_table *t = ti->table; in dm_get_device()
469 static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, in dm_set_device_limits() argument
[all …]
Ddm-delay.c128 static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) in delay_ctr() argument
136 ti->error = "Requires exactly 3 or 6 arguments"; in delay_ctr()
142 ti->error = "Cannot allocate context"; in delay_ctr()
150 ti->error = "Invalid device sector"; in delay_ctr()
156 ti->error = "Invalid delay"; in delay_ctr()
160 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), in delay_ctr()
163 ti->error = "Device lookup failed"; in delay_ctr()
173 ti->error = "Invalid write device sector"; in delay_ctr()
179 ti->error = "Invalid write delay"; in delay_ctr()
183 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), in delay_ctr()
[all …]
Ddm-switch.c39 struct dm_target *ti; member
59 static struct switch_ctx *alloc_switch_ctx(struct dm_target *ti, unsigned nr_paths, in alloc_switch_ctx() argument
69 sctx->ti = ti; in alloc_switch_ctx()
72 ti->private = sctx; in alloc_switch_ctx()
77 static int alloc_region_table(struct dm_target *ti, unsigned nr_paths) in alloc_region_table() argument
79 struct switch_ctx *sctx = ti->private; in alloc_region_table()
80 sector_t nr_regions = ti->len; in alloc_region_table()
103 ti->error = "Region table too large"; in alloc_region_table()
113 ti->error = "Region table too large"; in alloc_region_table()
119 ti->error = "Cannot allocate region table"; in alloc_region_table()
[all …]
Ddm-stripe.c39 struct dm_target *ti; member
55 dm_table_event(sc->ti->table); in trigger_event()
74 static int get_stripe(struct dm_target *ti, struct stripe_c *sc, in get_stripe() argument
84 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), in get_stripe()
98 static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) in stripe_ctr() argument
108 ti->error = "Not enough arguments"; in stripe_ctr()
113 ti->error = "Invalid stripe count"; in stripe_ctr()
118 ti->error = "Invalid chunk_size"; in stripe_ctr()
122 width = ti->len; in stripe_ctr()
124 ti->error = "Target length not divisible by " in stripe_ctr()
[all …]
Ddm-mpath.c68 struct dm_target *ti; member
163 static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) in free_pgpaths() argument
169 dm_put_device(ti, pgpath->path.dev); in free_pgpaths()
175 struct dm_target *ti) in free_priority_group() argument
184 free_pgpaths(&pg->pgpaths, ti); in free_priority_group()
188 static struct multipath *alloc_multipath(struct dm_target *ti) in alloc_multipath() argument
207 m->ti = ti; in alloc_multipath()
208 ti->private = m; in alloc_multipath()
214 static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m) in alloc_multipath_stage2() argument
220 if (dm_use_blk_mq(dm_table_get_md(ti->table))) in alloc_multipath_stage2()
[all …]
Ddm-verity-target.c81 return v->data_start + dm_target_offset(v->ti, bi_sector); in verity_map_sector()
259 struct mapped_device *md = dm_table_get_md(v->ti->table); in verity_handle_err()
416 struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); in verity_for_io_block()
463 struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); in verity_for_bv_block()
504 struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); in verity_bv_skip_block()
587 struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); in verity_finish_io()
678 int verity_map(struct dm_target *ti, struct bio *bio) in verity_map() argument
680 struct dm_verity *v = ti->private; in verity_map()
701 io = dm_per_bio_data(bio, ti->per_io_data_size); in verity_map()
723 void verity_status(struct dm_target *ti, status_type_t type, in verity_status() argument
[all …]
Ddm-zoned-target.c617 static int dmz_map(struct dm_target *ti, struct bio *bio) in dmz_map() argument
619 struct dmz_target *dmz = ti->private; in dmz_map()
681 static int dmz_get_zoned_device(struct dm_target *ti, char *path) in dmz_get_zoned_device() argument
683 struct dmz_target *dmz = ti->private; in dmz_get_zoned_device()
690 ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &dmz->ddev); in dmz_get_zoned_device()
692 ti->error = "Get target device failed"; in dmz_get_zoned_device()
707 ti->error = "Not a zoned block device"; in dmz_get_zoned_device()
715 if (ti->begin || in dmz_get_zoned_device()
716 ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) { in dmz_get_zoned_device()
717 ti->error = "Partial mapping not supported"; in dmz_get_zoned_device()
[all …]
Ddm-raid1.c52 struct dm_target *ti; member
254 static int mirror_flush(struct dm_target *ti) in mirror_flush() argument
256 struct mirror_set *ms = ti->private; in mirror_flush()
348 from.count = ms->ti->len & (region_size - 1); in recover()
413 dm_table_event(ms->ti->table); in do_recovery()
462 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector); in map_sector()
493 if (dm_noflush_suspending(ms->ti)) in hold_bio()
851 dm_table_event(ms->ti->table); in trigger_event()
885 struct dm_target *ti, in alloc_context() argument
895 ti->error = "Cannot allocate mirror context"; in alloc_context()
[all …]
Ddm-raid.c226 struct dm_target *ti; member
510 rs->ti->error = "Invalid flags combination"; in rs_check_for_valid_flags()
698 struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table)); in rs_set_capacity()
732 static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *raid_type, in raid_set_alloc() argument
739 ti->error = "Insufficient number of devices"; in raid_set_alloc()
745 ti->error = "Cannot allocate raid context"; in raid_set_alloc()
754 rs->ti = ti; in raid_set_alloc()
786 dm_put_device(rs->ti, rs->journal_dev.dev); in raid_set_free()
791 dm_put_device(rs->ti, rs->dev[i].meta_dev); in raid_set_free()
794 dm_put_device(rs->ti, rs->dev[i].data_dev); in raid_set_free()
[all …]
Ddm-log-writes.c444 static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) in log_writes_ctr() argument
455 ti->error = "Invalid argument count"; in log_writes_ctr()
461 ti->error = "Cannot allocate context"; in log_writes_ctr()
473 ret = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev); in log_writes_ctr()
475 ti->error = "Device lookup failed"; in log_writes_ctr()
480 ret = dm_get_device(ti, logdevname, dm_table_get_mode(ti->table), in log_writes_ctr()
483 ti->error = "Log device lookup failed"; in log_writes_ctr()
484 dm_put_device(ti, lc->dev); in log_writes_ctr()
493 ti->error = "Couldn't alloc kthread"; in log_writes_ctr()
494 dm_put_device(ti, lc->dev); in log_writes_ctr()
[all …]
Ddm-snap.c31 #define dm_target_is_snapshot_merge(ti) \ argument
32 ((ti)->type->name == dm_snapshot_merge_target_name)
55 struct dm_target *ti; member
322 struct dm_target *ti; member
453 if (dm_target_is_snapshot_merge(s->ti) && snap_merge) in __find_snapshots_sharing_cow()
488 snap->ti->error = "Snapshot cow pairing for exception " in __validate_exception_handover()
503 if (!dm_target_is_snapshot_merge(snap->ti)) in __validate_exception_handover()
510 snap->ti->error = "A snapshot is already merging."; in __validate_exception_handover()
516 snap->ti->error = "Snapshot exception store does not " in __validate_exception_handover()
1118 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) in snapshot_ctr() argument
[all …]
Ddm.c847 dm_endio_fn endio = tio->ti->type->end_io; in clone_endio()
859 int r = endio(tio->ti, bio, &error); in clone_endio()
883 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) in max_io_len_target_boundary() argument
885 sector_t target_offset = dm_target_offset(ti, sector); in max_io_len_target_boundary()
887 return ti->len - target_offset; in max_io_len_target_boundary()
890 static sector_t max_io_len(sector_t sector, struct dm_target *ti) in max_io_len() argument
892 sector_t len = max_io_len_target_boundary(sector, ti); in max_io_len()
898 if (ti->max_io_len) { in max_io_len()
899 offset = dm_target_offset(ti, sector); in max_io_len()
900 if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) in max_io_len()
[all …]
Ddm-android-verity.h112 extern void dm_linear_dtr(struct dm_target *ti);
113 extern int dm_linear_map(struct dm_target *ti, struct bio *bio);
114 extern int dm_linear_end_io(struct dm_target *ti, struct bio *bio,
116 extern void dm_linear_status(struct dm_target *ti, status_type_t type,
118 extern int dm_linear_prepare_ioctl(struct dm_target *ti,
120 extern int dm_linear_iterate_devices(struct dm_target *ti,
122 extern int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv);
123 extern long dm_linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
126 extern size_t dm_linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
Ddm-crypt.c90 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
375 struct dm_target *ti, in alloc_essiv_cipher() argument
385 ti->error = "Error allocating crypto tfm for ESSIV"; in alloc_essiv_cipher()
390 ti->error = "Block size of ESSIV cipher does " in alloc_essiv_cipher()
398 ti->error = "Failed to set key for ESSIV cipher"; in alloc_essiv_cipher()
425 static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, in crypt_iv_essiv_ctr() argument
434 ti->error = "Digest algorithm missing for ESSIV mode"; in crypt_iv_essiv_ctr()
441 ti->error = "Error initializing ESSIV hash"; in crypt_iv_essiv_ctr()
448 ti->error = "Error kmallocing salt storage in ESSIV"; in crypt_iv_essiv_ctr()
456 essiv_tfm = alloc_essiv_cipher(cc, ti, salt, in crypt_iv_essiv_ctr()
[all …]
Ddm-verity-fec.c567 dm_put_device(v->ti, f->dev); in verity_fec_dtr()
606 struct dm_target *ti = v->ti; in verity_fec_parse_opt_args() local
613 ti->error = "FEC feature arguments require a value"; in verity_fec_parse_opt_args()
621 r = dm_get_device(ti, arg_value, FMODE_READ, &v->fec->dev); in verity_fec_parse_opt_args()
623 ti->error = "FEC device lookup failed"; in verity_fec_parse_opt_args()
631 ti->error = "Invalid " DM_VERITY_OPT_FEC_BLOCKS; in verity_fec_parse_opt_args()
640 ti->error = "Invalid " DM_VERITY_OPT_FEC_START; in verity_fec_parse_opt_args()
649 ti->error = "Invalid " DM_VERITY_OPT_FEC_ROOTS; in verity_fec_parse_opt_args()
655 ti->error = "Unrecognized verity FEC feature request"; in verity_fec_parse_opt_args()
693 v->ti->error = "Cannot allocate FEC structure"; in verity_fec_ctr_alloc()
[all …]
Ddm-rq.c222 tio->ti->type->release_clone_rq(clone); in dm_end_request()
273 tio->ti->type->release_clone_rq(tio->clone); in dm_requeue_original_request()
290 if (tio->ti) { in dm_done()
291 rq_end_io = tio->ti->type->rq_end_io; in dm_done()
294 r = rq_end_io(tio->ti, clone, error, &tio->info); in dm_done()
450 tio->ti = NULL; in init_tio()
475 struct dm_target *ti = tio->ti; in map_request() local
480 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); in map_request()
488 ti->type->release_clone_rq(clone); in map_request()
626 struct dm_target *ti = md->immutable_target; in dm_old_request_fn() local
[all …]
Ddm-thin.c231 struct dm_target *ti; /* Only set if a pool target is bound */ member
311 dm_table_event(pool->ti->table); in notify_of_pool_mode_change()
321 struct dm_target *ti; member
1532 dm_table_event(pool->ti->table); in check_low_water_mark()
2525 struct pool_c *pt = pool->ti->private; in set_discard_callbacks()
2539 struct pool_c *pt = pool->ti->private; in set_pool_mode()
2718 static int thin_bio_map(struct dm_target *ti, struct bio *bio) in thin_bio_map() argument
2721 struct thin_c *tc = ti->private; in thin_bio_map()
2879 static int bind_control_target(struct pool *pool, struct dm_target *ti) in bind_control_target() argument
2881 struct pool_c *pt = ti->private; in bind_control_target()
[all …]
/drivers/gpu/drm/gma500/
Dmdfld_tmd_vid.c37 struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD; in tmd_vid_get_config_mode() local
45 mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo; in tmd_vid_get_config_mode()
46 mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo; in tmd_vid_get_config_mode()
48 ((ti->hsync_offset_hi << 8) | \ in tmd_vid_get_config_mode()
49 ti->hsync_offset_lo); in tmd_vid_get_config_mode()
51 ((ti->hsync_pulse_width_hi << 8) | \ in tmd_vid_get_config_mode()
52 ti->hsync_pulse_width_lo); in tmd_vid_get_config_mode()
53 mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \ in tmd_vid_get_config_mode()
54 ti->hblank_lo); in tmd_vid_get_config_mode()
56 mode->vdisplay + ((ti->vsync_offset_hi << 8) | \ in tmd_vid_get_config_mode()
[all …]
Dmid_bios.c232 struct gct_r10_timing_info *ti; in mid_get_vbt_data_r10() local
253 ti = &gct[vbt.primary_panel_idx].DTD; in mid_get_vbt_data_r10()
254 dp_ti->pixel_clock = ti->pixel_clock; in mid_get_vbt_data_r10()
255 dp_ti->hactive_hi = ti->hactive_hi; in mid_get_vbt_data_r10()
256 dp_ti->hactive_lo = ti->hactive_lo; in mid_get_vbt_data_r10()
257 dp_ti->hblank_hi = ti->hblank_hi; in mid_get_vbt_data_r10()
258 dp_ti->hblank_lo = ti->hblank_lo; in mid_get_vbt_data_r10()
259 dp_ti->hsync_offset_hi = ti->hsync_offset_hi; in mid_get_vbt_data_r10()
260 dp_ti->hsync_offset_lo = ti->hsync_offset_lo; in mid_get_vbt_data_r10()
261 dp_ti->hsync_pulse_width_hi = ti->hsync_pulse_width_hi; in mid_get_vbt_data_r10()
[all …]
Doaktrail_lvds.c228 struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD; in oaktrail_lvds_get_configuration_mode() local
238 mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo; in oaktrail_lvds_get_configuration_mode()
239 mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo; in oaktrail_lvds_get_configuration_mode()
241 ((ti->hsync_offset_hi << 8) | \ in oaktrail_lvds_get_configuration_mode()
242 ti->hsync_offset_lo); in oaktrail_lvds_get_configuration_mode()
244 ((ti->hsync_pulse_width_hi << 8) | \ in oaktrail_lvds_get_configuration_mode()
245 ti->hsync_pulse_width_lo); in oaktrail_lvds_get_configuration_mode()
246 mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \ in oaktrail_lvds_get_configuration_mode()
247 ti->hblank_lo); in oaktrail_lvds_get_configuration_mode()
249 mode->vdisplay + ((ti->vsync_offset_hi << 4) | \ in oaktrail_lvds_get_configuration_mode()
[all …]
/drivers/media/platform/ti-vpe/
DMakefile2 obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o
3 obj-$(CONFIG_VIDEO_TI_VPDMA) += ti-vpdma.o
4 obj-$(CONFIG_VIDEO_TI_SC) += ti-sc.o
5 obj-$(CONFIG_VIDEO_TI_CSC) += ti-csc.o
7 ti-vpe-y := vpe.o
8 ti-vpdma-y := vpdma.o
9 ti-sc-y := sc.o
10 ti-csc-y := csc.o
14 obj-$(CONFIG_VIDEO_TI_CAL) += ti-cal.o
16 ti-cal-y := cal.o
/drivers/thermal/ti-soc-thermal/
DMakefile2 obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal.o
3 ti-soc-thermal-y := ti-bandgap.o
4 ti-soc-thermal-$(CONFIG_TI_THERMAL) += ti-thermal-common.o
5 ti-soc-thermal-$(CONFIG_DRA752_THERMAL) += dra752-thermal-data.o
6 ti-soc-thermal-$(CONFIG_OMAP3_THERMAL) += omap3-thermal-data.o
7 ti-soc-thermal-$(CONFIG_OMAP4_THERMAL) += omap4-thermal-data.o
8 ti-soc-thermal-$(CONFIG_OMAP5_THERMAL) += omap5-thermal-data.o
/drivers/staging/lustre/lustre/ptlrpc/
Dpinger.c406 struct timeout_item *ti; in ptlrpc_new_timeout() local
408 ti = kzalloc(sizeof(*ti), GFP_NOFS); in ptlrpc_new_timeout()
409 if (!ti) in ptlrpc_new_timeout()
412 INIT_LIST_HEAD(&ti->ti_obd_list); in ptlrpc_new_timeout()
413 INIT_LIST_HEAD(&ti->ti_chain); in ptlrpc_new_timeout()
414 ti->ti_timeout = time; in ptlrpc_new_timeout()
415 ti->ti_event = event; in ptlrpc_new_timeout()
416 ti->ti_cb = cb; in ptlrpc_new_timeout()
417 ti->ti_cb_data = data; in ptlrpc_new_timeout()
419 return ti; in ptlrpc_new_timeout()
[all …]

12345