/kernel/futex/ |
D | futex.h | 105 u32 bitset; 119 extern int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key, 143 extern int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, 149 extern int fault_in_user_writeable(u32 __user *uaddr); 150 extern int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval); 151 extern int futex_get_value_locked(u32 *dest, u32 __user *from); 223 extern int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, 233 extern int fixup_pi_owner(u32 __user *uaddr, struct futex_q *q, int locked); 259 extern int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, u32 260 val, ktime_t *abs_time, u32 bitset, u32 __user [all …]
|
D | syscalls.c | 86 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, in do_futex() 87 u32 __user *uaddr2, u32 val2, u32 val3) in do_futex() 139 static __always_inline bool futex_cmd_has_timeout(u32 cmd) in futex_cmd_has_timeout() 153 futex_init_timeout(u32 cmd, u32 op, struct timespec64 *ts, ktime_t *t) in futex_init_timeout() 166 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, in SYSCALL_DEFINE6() argument 168 u32 __user *, uaddr2, u32, val3) in SYSCALL_DEFINE6() 361 SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val, in SYSCALL_DEFINE6() argument 362 const struct old_timespec32 __user *, utime, u32 __user *, uaddr2, in SYSCALL_DEFINE6() 363 u32, val3) in SYSCALL_DEFINE6()
|
D | pi.c | 202 static int attach_to_pi_state(u32 __user *uaddr, u32 uval, in attach_to_pi_state() 207 u32 uval2; in attach_to_pi_state() 320 static int handle_exit_race(u32 __user *uaddr, u32 uval, in handle_exit_race() 323 u32 uval2; in handle_exit_race() 410 static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key, in attach_to_pi_owner() 474 static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) in lock_pi_update_atomic() 477 u32 curval; in lock_pi_update_atomic() 514 int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, in futex_lock_pi_atomic() 521 u32 uval, newval, vpid = task_pid_vnr(task); in futex_lock_pi_atomic() 613 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state) in wake_futex_pi() [all …]
|
D | waitwake.c | 144 int futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) in futex_wake() 193 static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr) in futex_atomic_op_inuser() 243 int futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, in futex_wake_op() 411 u32 uval; in futex_wait_multiple_setup() 443 u32 __user *uaddr = (u32 __user *)(unsigned long)vs[i].w.uaddr; in futex_wait_multiple_setup() 445 u32 val = (u32)vs[i].w.val; in futex_wait_multiple_setup() 584 int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, in futex_wait_setup() 587 u32 uval; in futex_wait_setup() 639 int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, ktime_t *abs_time, u32 bitset) in futex_wait() 705 u32 __user *uaddr = restart->futex.uaddr; in futex_wait_restart()
|
/kernel/bpf/ |
D | bloom_filter.c | 17 u32 bitset_mask; 18 u32 hash_seed; 25 u32 aligned_u32_count; 26 u32 nr_hash_funcs; 30 static u32 hash(struct bpf_bloom_filter *bloom, void *value, in hash() 31 u32 value_size, u32 index) in hash() 33 u32 h; in hash() 48 u32 i, h; in bloom_map_peek_elem() 63 u32 i, h; in bloom_map_push_elem() 93 u32 bitset_bytes, bitset_mask, nr_hash_funcs, nr_bits; in bloom_map_alloc() [all …]
|
D | btf.c | 221 u32 cnt; 228 u32 *resolved_ids; 229 u32 *resolved_sizes; 233 u32 nr_types; /* includes VOID for base BTF */ 234 u32 types_size; 235 u32 data_size; 237 u32 id; 244 u32 start_id; /* first type ID in this BTF (0 for base BTF) */ 245 u32 start_str_off; /* first string offset (0 for base BTF) */ 257 u32 type_id; [all …]
|
D | core.c | 203 const u32 *insn_to_jit_off) in bpf_prog_fill_jited_linfo() 205 u32 linfo_idx, insn_start, insn_end, nr_linfo, i; in bpf_prog_fill_jited_linfo() 236 u32 pages; in bpf_prog_realloc() 276 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64); in bpf_prog_calc_tag() 277 u32 raw_size = bpf_prog_tag_scratch_size(fp); in bpf_prog_calc_tag() 278 u32 digest[SHA1_DIGEST_WORDS]; in bpf_prog_calc_tag() 279 u32 ws[SHA1_WORKSPACE_WORDS]; in bpf_prog_calc_tag() 280 u32 i, bsize, psize, blocks; in bpf_prog_calc_tag() 347 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old, in bpf_adj_delta_to_imm() 365 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old, in bpf_adj_delta_to_off() [all …]
|
D | arraymap.c | 84 u32 elem_size, index_mask, max_entries; in array_map_alloc() 157 static void *array_map_elem_ptr(struct bpf_array* array, u32 index) in array_map_elem_ptr() 166 u32 index = *(u32 *)key; in array_map_lookup_elem() 175 u32 off) in array_map_direct_value_addr() 189 u32 *off) in array_map_direct_value_meta() 209 u32 elem_size = array->elem_size; in array_map_gen_lookup() 241 u32 index = *(u32 *)key; in percpu_array_map_lookup_elem() 249 static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) in percpu_array_map_lookup_percpu_elem() 252 u32 index = *(u32 *)key; in percpu_array_map_lookup_percpu_elem() 266 u32 index = *(u32 *)key; in bpf_percpu_array_copy() [all …]
|
D | reuseport_array.c | 43 if (attr->value_size != sizeof(u32) && in reuseport_array_alloc_check() 53 u32 index = *(u32 *)key; in reuseport_array_lookup_elem() 65 u32 index = *(u32 *)key; in reuseport_array_delete_elem() 98 u32 i; in reuseport_array_free() 195 u32 map_flags) in reuseport_array_update_check() 241 u32 index = *(u32 *)key; in bpf_fd_reuseport_array_update_elem() 323 u32 index = key ? *(u32 *)key : U32_MAX; in reuseport_array_get_next_key() 324 u32 *next = (u32 *)next_key; in reuseport_array_get_next_key()
|
D | stackmap.c | 21 u32 hash; 22 u32 nr; 30 u32 n_buckets; 72 u32 value_size = attr->value_size; in stack_map_alloc() 128 u64 *ips, u32 trace_nr, bool user) in stack_map_get_build_id_offset() 177 get_callchain_entry_for_task(struct task_struct *task, u32 max_depth) in get_callchain_entry_for_task() 218 u32 skip = flags & BPF_F_SKIP_FIELD_MASK; in __bpf_get_stackid() 219 u32 hash, id, trace_nr, trace_len; in __bpf_get_stackid() 231 hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0); in __bpf_get_stackid() 286 u32 max_depth = map->value_size / stack_map_data_size(map); in BPF_CALL_3() [all …]
|
D | hashtab.c | 103 u32 n_buckets; /* number of hash buckets */ 104 u32 elem_size; /* size of each element in bytes */ 105 u32 hashrnd; 127 u32 hash; 150 struct bucket *b, u32 hash, in htab_lock_bucket() 155 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); in htab_lock_bucket() 173 struct bucket *b, u32 hash, in htab_unlock_bucket() 176 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); in htab_unlock_bucket() 197 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size, in htab_elem_set_ptr() 203 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size) in htab_elem_get_ptr() [all …]
|
D | bpf_lru_list.c | 356 u32 hash) in __local_list_add_pending() 358 *(u32 *)((void *)node + lru->hash_offset) = hash; in __local_list_add_pending() 405 u32 hash) in bpf_percpu_lru_pop_free() 426 *(u32 *)((void *)node + lru->hash_offset) = hash; in bpf_percpu_lru_pop_free() 437 u32 hash) in bpf_common_lru_pop_free() 499 struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash) in bpf_lru_pop_free() 565 u32 node_offset, u32 elem_size, in bpf_common_lru_populate() 566 u32 nr_elems) in bpf_common_lru_populate() 569 u32 i; in bpf_common_lru_populate() 583 u32 node_offset, u32 elem_size, in bpf_percpu_lru_populate() [all …]
|
D | helpers.c | 126 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu) in BPF_CALL_3() argument 253 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size) in BPF_CALL_2() argument 564 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2) in BPF_CALL_3() argument 579 struct bpf_pidns_info *, nsdata, u32, size) in BPF_CALL_4() argument 647 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size, in BPF_CALL_3() argument 669 BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size, in BPF_CALL_5() argument 702 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu) in BPF_CALL_2() argument 808 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, in bpf_bprintf_prepare() 809 u32 num_args, struct bpf_bprintf_data *data) in bpf_bprintf_prepare() 831 data->bin_args = (u32 *)tmp_buf; in bpf_bprintf_prepare() [all …]
|
D | cpumap.c | 56 u32 cpu; /* kthread CPU and map index */ 88 u32 value_size = attr->value_size; in cpu_map_alloc() 187 u32 act; in cpu_map_bpf_prog_run_skb() 234 u32 act; in cpu_map_bpf_prog_run_xdp() 432 u32 cpu) in __cpu_map_entry_alloc() 547 u32 key_cpu, struct bpf_cpu_map_entry *rcpu) in __cpu_map_entry_replace() 562 u32 key_cpu = *(u32 *)key; in cpu_map_delete_elem() 579 u32 key_cpu = *(u32 *)key; in cpu_map_update_elem() 614 u32 i; in cpu_map_free() 648 static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) in __cpu_map_lookup_elem() [all …]
|
D | devmap.c | 84 u32 n_buckets; 113 u32 valsize = attr->value_size; in dev_map_init_map() 249 u32 index = key ? *(u32 *)key : U32_MAX; in dev_map_get_next_key() 250 u32 *next = next_key; in dev_map_get_next_key() 267 static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key) in __dev_map_hash_lookup_elem() 285 u32 idx, *next = next_key; in dev_map_hash_get_next_key() 293 idx = *(u32 *)key; in dev_map_hash_get_next_key() 336 u32 act; in dev_map_bpf_prog_run() 365 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) in bq_xmit_all() 429 static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key) in __dev_map_lookup_elem() [all …]
|
D | bpf_lru_list.h | 71 int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset, 73 void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset, 74 u32 elem_size, u32 nr_elems); 76 struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash);
|
/kernel/locking/ |
D | qspinlock.c | 116 static inline __pure u32 encode_tail(int cpu, int idx) in encode_tail() 118 u32 tail; in encode_tail() 126 static inline __pure struct mcs_spinlock *decode_tail(u32 tail) in decode_tail() 177 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) in xchg_tail() 183 return (u32)xchg_relaxed(&lock->tail, in xchg_tail() 221 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) in xchg_tail() 223 u32 old, new, val = atomic_read(&lock->val); in xchg_tail() 250 static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock) in queued_fetch_set_pending_acquire() 278 static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, in __pv_wait_head_or_lock() 316 void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) in queued_spin_lock_slowpath() [all …]
|
/kernel/ |
D | rseq.c | 87 u32 cpu_id = raw_smp_processor_id(); in rseq_update_cpu_id() 106 u32 cpu_id_start = 0, cpu_id = RSEQ_CPU_ID_UNINITIALIZED; in rseq_reset_rseq_cpu_id() 127 u32 __user *usig; in rseq_get_rseq_cs() 128 u32 sig; in rseq_get_rseq_cs() 160 usig = (u32 __user *)(unsigned long)(rseq_cs->abort_ip - sizeof(u32)); in rseq_get_rseq_cs() 174 static bool rseq_warn_flags(const char *str, u32 flags) in rseq_warn_flags() 176 u32 test_flags; in rseq_warn_flags() 189 static int rseq_need_restart(struct task_struct *t, u32 cs_flags) in rseq_need_restart() 191 u32 flags, event_mask; in rseq_need_restart() 336 SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, in SYSCALL_DEFINE4() argument [all …]
|
D | audit.h | 84 u32 osid; 146 u32 target_sid; 163 u32 osid; 222 static inline int audit_hash_ino(u32 ino) in audit_hash_ino() 231 extern int audit_comparator(const u32 left, const u32 op, const u32 right); 232 extern int audit_uid_comparator(kuid_t left, u32 op, kuid_t right); 233 extern int audit_gid_comparator(kgid_t left, u32 op, kgid_t right); 270 u32 op); 292 extern int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op);
|
D | audit.c | 72 u32 audit_enabled = AUDIT_OFF; 78 static u32 audit_default = AUDIT_OFF; 81 static u32 audit_failure = AUDIT_FAIL_PRINTK; 107 u32 portid; 117 static u32 audit_rate_limit; 121 static u32 audit_backlog_limit = 64; 123 static u32 audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME; 128 static u32 audit_sig_sid; 384 static int audit_log_config_change(char *function_name, u32 new, u32 old, in audit_log_config_change() 403 static int audit_do_config_change(char *function_name, u32 *to_change, u32 new) in audit_do_config_change() [all …]
|
D | user_namespace.c | 237 u32 id; /* id to find */ 238 u32 count; /* == 0 unless used with map_id_range_down() */ 247 u32 first, last, id2; in cmp_map_id() 276 map_id_range_down_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count) in map_id_range_down_max() 294 map_id_range_down_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count) in map_id_range_down_base() 297 u32 first, last, id2; in map_id_range_down_base() 312 static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count) in map_id_range_down() 327 id = (u32) -1; in map_id_range_down() 332 static u32 map_id_down(struct uid_gid_map *map, u32 id) in map_id_down() 343 map_id_up_base(unsigned extents, struct uid_gid_map *map, u32 id) in map_id_up_base() [all …]
|
/kernel/gcov/ |
D | clang.c | 62 u32 checksum; 70 u32 ident; 71 u32 checksum; 72 u32 cfg_checksum; 74 u32 num_counters; 105 void llvm_gcda_start_file(const char *orig_filename, u32 version, u32 checksum) in llvm_gcda_start_file() 113 void llvm_gcda_emit_function(u32 ident, u32 func_checksum, u32 cfg_checksum) in llvm_gcda_emit_function() 128 void llvm_gcda_emit_arcs(u32 num_counters, u64 *counters) in llvm_gcda_emit_arcs() 279 u32 i; in gcov_info_add() 379 u32 i; in convert_to_gcda()
|
/kernel/trace/ |
D | trace_probe_tmpl.h | 17 *(u32 *)buf = (u32)val; in fetch_store_raw() 41 *(u32 *)buf <<= code->lshift; in fetch_apply_bitfield() 42 *(u32 *)buf >>= code->rshift; in fetch_apply_bitfield() 90 int maxlen = get_loc_len(*(u32 *)dest); in fetch_store_symstring() 108 u32 loc = 0; in process_fetch_insn_bottom() 162 loc = *(u32 *)dest; in process_fetch_insn_bottom() 166 loc = *(u32 *)dest; in process_fetch_insn_bottom() 170 loc = *(u32 *)dest; in process_fetch_insn_bottom() 201 dest += sizeof(u32); in process_fetch_insn_bottom() 202 *(u32 *)dest = update_data_loc(loc, ret); in process_fetch_insn_bottom() [all …]
|
D | bpf_trace.c | 82 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, 163 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr) in bpf_probe_read_user_common() 173 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size, in BPF_CALL_3() argument 189 bpf_probe_read_user_str_common(void *dst, u32 size, in bpf_probe_read_user_str_common() 210 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size, in BPF_CALL_3() argument 226 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr) in bpf_probe_read_kernel_common() 236 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size, in BPF_CALL_3() argument 252 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr) in bpf_probe_read_kernel_str_common() 271 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size, in BPF_CALL_3() argument 287 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size, in BPF_CALL_3() argument [all …]
|
D | trace_probe.h | 60 (((u32)(len) << 16) | ((u32)(offs) & 0xffff)) 61 #define get_loc_len(dl) ((u32)(dl) >> 16) 62 #define get_loc_offs(dl) ((u32)(dl) & 0xffff) 64 static nokprobe_inline void *get_loc_data(u32 *dl, void *ent) in get_loc_data() 69 static nokprobe_inline u32 update_data_loc(u32 loc, int consumed) in update_data_loc() 71 u32 maxlen = get_loc_len(loc); in update_data_loc() 72 u32 offset = get_loc_offs(loc); in update_data_loc() 145 typedef u32 string; 146 typedef u32 string_size; 158 DECLARE_BASIC_PRINT_TYPE_FUNC(u32);
|