Home
last modified time | relevance | path

Searched refs:t (Results 1 – 25 of 151) sorted by relevance

1234567

/include/linux/
Dunits.h44 static inline long milli_kelvin_to_millicelsius(long t) in milli_kelvin_to_millicelsius() argument
46 return t + ABSOLUTE_ZERO_MILLICELSIUS; in milli_kelvin_to_millicelsius()
49 static inline long millicelsius_to_milli_kelvin(long t) in millicelsius_to_milli_kelvin() argument
51 return t - ABSOLUTE_ZERO_MILLICELSIUS; in millicelsius_to_milli_kelvin()
57 static inline long kelvin_to_millicelsius(long t) in kelvin_to_millicelsius() argument
59 return milli_kelvin_to_millicelsius(t * MILLIDEGREE_PER_DEGREE); in kelvin_to_millicelsius()
62 static inline long millicelsius_to_kelvin(long t) in millicelsius_to_kelvin() argument
64 t = millicelsius_to_milli_kelvin(t); in millicelsius_to_kelvin()
66 return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DEGREE); in millicelsius_to_kelvin()
69 static inline long deci_kelvin_to_celsius(long t) in deci_kelvin_to_celsius() argument
[all …]
Drseq.h26 static inline void rseq_set_notify_resume(struct task_struct *t) in rseq_set_notify_resume() argument
28 if (t->rseq) in rseq_set_notify_resume()
29 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); in rseq_set_notify_resume()
51 static inline void rseq_preempt(struct task_struct *t) in rseq_preempt() argument
53 __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask); in rseq_preempt()
54 rseq_set_notify_resume(t); in rseq_preempt()
58 static inline void rseq_migrate(struct task_struct *t) in rseq_migrate() argument
60 __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask); in rseq_migrate()
61 rseq_set_notify_resume(t); in rseq_migrate()
68 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) in rseq_fork() argument
[all …]
Duser_events.h30 extern void user_event_mm_dup(struct task_struct *t,
33 extern void user_event_mm_remove(struct task_struct *t);
35 static inline void user_events_fork(struct task_struct *t, in user_events_fork() argument
40 if (!t || !current->user_event_mm) in user_events_fork()
46 t->user_event_mm = old_mm; in user_events_fork()
51 user_event_mm_dup(t, old_mm); in user_events_fork()
54 static inline void user_events_execve(struct task_struct *t) in user_events_execve() argument
56 if (!t || !t->user_event_mm) in user_events_execve()
59 user_event_mm_remove(t); in user_events_execve()
62 static inline void user_events_exit(struct task_struct *t) in user_events_exit() argument
[all …]
Dbtf.h223 struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t,
226 bool btf_type_is_void(const struct btf_type *t);
238 const char *btf_type_str(const struct btf_type *t);
250 static inline bool btf_type_is_ptr(const struct btf_type *t) in btf_type_is_ptr() argument
252 return BTF_INFO_KIND(t->info) == BTF_KIND_PTR; in btf_type_is_ptr()
255 static inline bool btf_type_is_int(const struct btf_type *t) in btf_type_is_int() argument
257 return BTF_INFO_KIND(t->info) == BTF_KIND_INT; in btf_type_is_int()
260 static inline bool btf_type_is_small_int(const struct btf_type *t) in btf_type_is_small_int() argument
262 return btf_type_is_int(t) && t->size <= sizeof(u64); in btf_type_is_small_int()
265 static inline u8 btf_int_encoding(const struct btf_type *t) in btf_int_encoding() argument
[all …]
Drcupdate_trace.h34 void rcu_read_unlock_trace_special(struct task_struct *t);
50 struct task_struct *t = current; in rcu_read_lock_trace() local
52 WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1); in rcu_read_lock_trace()
55 t->trc_reader_special.b.need_mb) in rcu_read_lock_trace()
72 struct task_struct *t = current; in rcu_read_unlock_trace() local
75 nesting = READ_ONCE(t->trc_reader_nesting) - 1; in rcu_read_unlock_trace()
78 WRITE_ONCE(t->trc_reader_nesting, INT_MIN + nesting); in rcu_read_unlock_trace()
79 if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) { in rcu_read_unlock_trace()
80 WRITE_ONCE(t->trc_reader_nesting, nesting); in rcu_read_unlock_trace()
84 rcu_read_unlock_trace_special(t); in rcu_read_unlock_trace()
Dkcov.h30 void kcov_task_init(struct task_struct *t);
31 void kcov_task_exit(struct task_struct *t);
33 #define kcov_prepare_switch(t) \ argument
35 (t)->kcov_mode |= KCOV_IN_CTXSW; \
38 #define kcov_finish_switch(t) \ argument
40 (t)->kcov_mode &= ~KCOV_IN_CTXSW; \
96 static inline void kcov_task_init(struct task_struct *t) {} in kcov_task_init() argument
97 static inline void kcov_task_exit(struct task_struct *t) {} in kcov_task_exit() argument
98 static inline void kcov_prepare_switch(struct task_struct *t) {} in kcov_prepare_switch() argument
99 static inline void kcov_finish_switch(struct task_struct *t) {} in kcov_finish_switch() argument
Dcb710.h51 #define CB710_PORT_ACCESSORS(t) \ argument
52 static inline void cb710_write_port_##t(struct cb710_slot *slot, \
53 unsigned port, u##t value) \
55 iowrite##t(value, slot->iobase + port); \
58 static inline u##t cb710_read_port_##t(struct cb710_slot *slot, \
61 return ioread##t(slot->iobase + port); \
64 static inline void cb710_modify_port_##t(struct cb710_slot *slot, \
65 unsigned port, u##t set, u##t clear) \
67 iowrite##t( \
68 (ioread##t(slot->iobase + port) & ~clear)|set, \
Dwin_minmax.h13 u32 t; /* time measurement was taken */ member
27 static inline u32 minmax_reset(struct minmax *m, u32 t, u32 meas) in minmax_reset() argument
29 struct minmax_sample val = { .t = t, .v = meas }; in minmax_reset()
35 u32 minmax_running_max(struct minmax *m, u32 win, u32 t, u32 meas);
36 u32 minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas);
Dstackleak.h72 static inline void stackleak_task_init(struct task_struct *t) in stackleak_task_init() argument
74 t->lowest_stack = stackleak_task_low_bound(t); in stackleak_task_init()
76 t->prev_lowest_stack = t->lowest_stack; in stackleak_task_init()
86 static inline void stackleak_task_init(struct task_struct *t) { } in stackleak_task_init() argument
Ddamon.h503 struct damon_target *t, struct damon_region *r,
506 struct damon_target *t, struct damon_region *r,
508 bool (*target_valid)(struct damon_target *t);
656 static inline struct damon_region *damon_last_region(struct damon_target *t) in damon_last_region() argument
658 return list_last_entry(&t->regions_list, struct damon_region, list); in damon_last_region()
661 static inline struct damon_region *damon_first_region(struct damon_target *t) in damon_first_region() argument
663 return list_first_entry(&t->regions_list, struct damon_region, list); in damon_first_region()
672 #define damon_for_each_region(r, t) \ argument
673 list_for_each_entry(r, &t->regions_list, list)
675 #define damon_for_each_region_from(r, t) \ argument
[all …]
Dinterrupt.h678 void (*callback)(struct tasklet_struct *t);
719 static inline int tasklet_trylock(struct tasklet_struct *t) in tasklet_trylock() argument
721 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); in tasklet_trylock()
724 void tasklet_unlock(struct tasklet_struct *t);
725 void tasklet_unlock_wait(struct tasklet_struct *t);
726 void tasklet_unlock_spin_wait(struct tasklet_struct *t);
729 static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; } in tasklet_trylock() argument
730 static inline void tasklet_unlock(struct tasklet_struct *t) { } in tasklet_unlock() argument
731 static inline void tasklet_unlock_wait(struct tasklet_struct *t) { } in tasklet_unlock_wait() argument
732 static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { } in tasklet_unlock_spin_wait() argument
[all …]
Dgunyah_qtvm.h24 int gunyah_qtvm_register_resource_ticket(struct gunyah_vm_resource_ticket *t,
26 void gunyah_qtvm_unregister_resource_ticket(struct gunyah_vm_resource_ticket *t,
38 struct gunyah_vm_resource_ticket *t, u16 vmid) in gunyah_qtvm_register_resource_ticket() argument
43 struct gunyah_vm_resource_ticket *t, u16 vmid) in gunyah_qtvm_unregister_resource_ticket() argument
Dnls.h62 static inline unsigned char nls_tolower(struct nls_table *t, unsigned char c) in nls_tolower() argument
64 unsigned char nc = t->charset2lower[c]; in nls_tolower()
69 static inline unsigned char nls_toupper(struct nls_table *t, unsigned char c) in nls_toupper() argument
71 unsigned char nc = t->charset2upper[c]; in nls_toupper()
76 static inline int nls_strnicmp(struct nls_table *t, const unsigned char *s1, in nls_strnicmp() argument
80 if (nls_tolower(t, *s1++) != nls_tolower(t, *s2++)) in nls_strnicmp()
Dthread_info.h145 #define read_task_thread_flags(t) \ argument
146 read_ti_thread_flags(task_thread_info(t))
156 #define set_task_syscall_work(t, fl) \ argument
157 set_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
158 #define test_task_syscall_work(t, fl) \ argument
159 test_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
160 #define clear_task_syscall_work(t, fl) \ argument
161 clear_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
172 #define set_task_syscall_work(t, fl) \ argument
173 set_ti_thread_flag(task_thread_info(t), TIF_##fl)
[all …]
/include/asm-generic/
Dioctl.h8 #define _IOC_TYPECHECK(t) (sizeof(t)) argument
12 #define _IOC_TYPECHECK(t) \ argument
13 ((sizeof(t) == sizeof(t[1]) && \
14 sizeof(t) < (1 << _IOC_SIZEBITS)) ? \
15 sizeof(t) : __invalid_size_argument_for_IOC)
/include/scsi/
Dscsi_transport.h57 scsi_transport_reserve_target(struct scsi_transport_template * t, int space) in scsi_transport_reserve_target() argument
59 BUG_ON(t->target_private_offset != 0); in scsi_transport_reserve_target()
60 t->target_private_offset = ALIGN(t->target_size, sizeof(void *)); in scsi_transport_reserve_target()
61 t->target_size = t->target_private_offset + space; in scsi_transport_reserve_target()
64 scsi_transport_reserve_device(struct scsi_transport_template * t, int space) in scsi_transport_reserve_device() argument
66 BUG_ON(t->device_private_offset != 0); in scsi_transport_reserve_device()
67 t->device_private_offset = ALIGN(t->device_size, sizeof(void *)); in scsi_transport_reserve_device()
68 t->device_size = t->device_private_offset + space; in scsi_transport_reserve_device()
/include/net/netfilter/
Dnf_conntrack_tuple.h85 static inline void nf_ct_dump_tuple_ip(const struct nf_conntrack_tuple *t) in nf_ct_dump_tuple_ip() argument
89 t, t->dst.protonum, in nf_ct_dump_tuple_ip()
90 &t->src.u3.ip, ntohs(t->src.u.all), in nf_ct_dump_tuple_ip()
91 &t->dst.u3.ip, ntohs(t->dst.u.all)); in nf_ct_dump_tuple_ip()
95 static inline void nf_ct_dump_tuple_ipv6(const struct nf_conntrack_tuple *t) in nf_ct_dump_tuple_ipv6() argument
99 t, t->dst.protonum, in nf_ct_dump_tuple_ipv6()
100 t->src.u3.all, ntohs(t->src.u.all), in nf_ct_dump_tuple_ipv6()
101 t->dst.u3.all, ntohs(t->dst.u.all)); in nf_ct_dump_tuple_ipv6()
105 static inline void nf_ct_dump_tuple(const struct nf_conntrack_tuple *t) in nf_ct_dump_tuple() argument
107 switch (t->src.l3num) { in nf_ct_dump_tuple()
[all …]
/include/linux/sunrpc/
Dsched.h147 #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) argument
148 #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) argument
149 #define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT)) argument
150 #define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN) argument
151 #define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT) argument
152 #define RPC_IS_MOVEABLE(t) ((t)->tk_flags & RPC_TASK_MOVEABLE) argument
163 #define rpc_test_and_set_running(t) \ argument
164 test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
165 #define rpc_clear_running(t) clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) argument
167 #define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) argument
[all …]
Dtimer.h29 int *t; in rpc_set_timeo() local
32 t = &rt->ntimeouts[timer-1]; in rpc_set_timeo()
33 if (ntimeo < *t) { in rpc_set_timeo()
34 if (*t > 0) in rpc_set_timeo()
35 (*t)--; in rpc_set_timeo()
39 *t = ntimeo; in rpc_set_timeo()
/include/linux/sched/
Dcputime.h12 extern bool task_cputime(struct task_struct *t,
14 extern u64 task_gtime(struct task_struct *t);
16 static inline bool task_cputime(struct task_struct *t, in task_cputime() argument
19 *utime = t->utime; in task_cputime()
20 *stime = t->stime; in task_cputime()
24 static inline u64 task_gtime(struct task_struct *t) in task_gtime() argument
26 return t->gtime; in task_gtime()
31 static inline void task_cputime_scaled(struct task_struct *t, in task_cputime_scaled() argument
35 *utimescaled = t->utimescaled; in task_cputime_scaled()
36 *stimescaled = t->stimescaled; in task_cputime_scaled()
[all …]
Dtask.h121 static inline struct task_struct *get_task_struct(struct task_struct *t) in get_task_struct() argument
123 refcount_inc(&t->usage); in get_task_struct()
124 return t; in get_task_struct()
127 static inline struct task_struct *tryget_task_struct(struct task_struct *t) in tryget_task_struct() argument
129 return refcount_inc_not_zero(&t->usage) ? t : NULL; in tryget_task_struct()
132 extern void __put_task_struct(struct task_struct *t);
135 static inline void put_task_struct(struct task_struct *t) in put_task_struct() argument
137 if (!refcount_dec_and_test(&t->usage)) in put_task_struct()
148 __put_task_struct(t); in put_task_struct()
173 call_rcu(&t->rcu, __put_task_struct_rcu_cb); in put_task_struct()
[all …]
/include/net/sctp/
Dsctp.h145 struct sctp_transport *t, __u32 pmtu);
150 struct sctp_transport *t);
153 int sctp_hash_transport(struct sctp_transport *t);
154 void sctp_unhash_transport(struct sctp_transport *t);
561 static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) in sctp_transport_dst_check() argument
563 if (t->dst && !dst_check(t->dst, t->dst_cookie)) in sctp_transport_dst_check()
564 sctp_transport_dst_release(t); in sctp_transport_dst_check()
566 return t->dst; in sctp_transport_dst_check()
573 const struct sctp_transport *t, in __sctp_mtu_payload() argument
580 if (sp->udp_port && (!t || t->encap_port)) in __sctp_mtu_payload()
[all …]
/include/trace/hooks/
Dbinder.h23 TP_PROTO(struct binder_transaction *t),
24 TP_ARGS(t));
26 TP_PROTO(struct binder_transaction *t, struct task_struct *task),
27 TP_ARGS(t, task));
29 TP_PROTO(struct binder_transaction *t, struct task_struct *task),
30 TP_ARGS(t, task));
41 TP_PROTO(struct binder_proc *proc, struct binder_transaction *t,
43 TP_ARGS(proc, t, binder_th_task, pending_async, sync));
49 TP_PROTO(struct binder_transaction_data *tr, struct binder_transaction *t,
51 TP_ARGS(tr, t, proc));
[all …]
/include/media/
Dv4l2-dv-timings.h22 struct v4l2_fract v4l2_calc_timeperframe(const struct v4l2_dv_timings *t);
37 typedef bool v4l2_check_dv_timings_fnc(const struct v4l2_dv_timings *t, void *handle);
51 bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
72 int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t,
95 bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
109 bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic);
135 const struct v4l2_dv_timings *t, bool detailed);
208 struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t);
/include/crypto/
Dgf128mul.h223 be128 t[256]; member
228 void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t);
229 void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t);
231 static inline void gf128mul_free_4k(struct gf128mul_4k *t) in gf128mul_free_4k() argument
233 kfree_sensitive(t); in gf128mul_free_4k()
240 struct gf128mul_4k *t[16]; member
249 void gf128mul_free_64k(struct gf128mul_64k *t);
250 void gf128mul_64k_bbe(be128 *a, const struct gf128mul_64k *t);

1234567