Home
last modified time | relevance | path

Searched refs:key (Results 1 – 25 of 75) sorted by relevance

123

/kernel/
Djump_label.c72 jea->key = jeb->key - delta; in jump_label_swap()
76 jeb->key = tmp.key + delta; in jump_label_swap()
93 static void jump_label_update(struct static_key *key);
104 int static_key_count(struct static_key *key) in static_key_count() argument
110 int n = atomic_read(&key->enabled); in static_key_count()
116 void static_key_slow_inc_cpuslocked(struct static_key *key) in static_key_slow_inc_cpuslocked() argument
120 STATIC_KEY_CHECK_USE(key); in static_key_slow_inc_cpuslocked()
135 for (v = atomic_read(&key->enabled); v > 0; v = v1) { in static_key_slow_inc_cpuslocked()
136 v1 = atomic_cmpxchg(&key->enabled, v, v + 1); in static_key_slow_inc_cpuslocked()
142 if (atomic_read(&key->enabled) == 0) { in static_key_slow_inc_cpuslocked()
[all …]
Dstatic_call_inline.c40 return (long)site->key + (long)&site->key; in __static_call_key()
61 site->key = (__static_call_key(site) | STATIC_CALL_SITE_INIT) - in static_call_set_init()
62 (long)&site->key; in static_call_set_init()
89 a->key = b->key - delta; in static_call_site_swap()
92 b->key = tmp.key + delta; in static_call_site_swap()
102 static inline bool static_call_key_has_mods(struct static_call_key *key) in static_call_key_has_mods() argument
104 return !(key->type & 1); in static_call_key_has_mods()
107 static inline struct static_call_mod *static_call_key_next(struct static_call_key *key) in static_call_key_next() argument
109 if (!static_call_key_has_mods(key)) in static_call_key_next()
112 return key->mods; in static_call_key_next()
[all …]
Duser_namespace.c243 const struct idmap_key *key = k; in cmp_map_id() local
246 id2 = key->id + key->count - 1; in cmp_map_id()
249 if (key->map_up) in cmp_map_id()
256 if (key->id >= first && key->id <= last && in cmp_map_id()
260 if (key->id < first || id2 < first) in cmp_map_id()
273 struct idmap_key key; in map_id_range_down_max() local
275 key.map_up = false; in map_id_range_down_max()
276 key.count = count; in map_id_range_down_max()
277 key.id = id; in map_id_range_down_max()
279 return bsearch(&key, map->forward, extents, in map_id_range_down_max()
[all …]
Daudit_tree.c27 unsigned long key; member
219 static inline struct list_head *chunk_hash(unsigned long key) in chunk_hash() argument
221 unsigned long n = key / L1_CACHE_BYTES; in chunk_hash()
236 WARN_ON_ONCE(!chunk->key); in insert_hash()
237 list = chunk_hash(chunk->key); in insert_hash()
244 unsigned long key = inode_to_key(inode); in audit_tree_lookup() local
245 struct list_head *list = chunk_hash(key); in audit_tree_lookup()
253 if (READ_ONCE(p->key) == key) { in audit_tree_lookup()
298 new->key = old->key; in replace_chunk()
440 chunk->key = inode_to_key(inode); in create_chunk()
[all …]
Dtracepoint.c330 if (tp->regfunc && !static_key_enabled(&tp->key)) { in tracepoint_add_func()
361 static_key_enable(&tp->key); in tracepoint_add_func()
414 if (tp->unregfunc && static_key_enabled(&tp->key)) in tracepoint_remove_func()
417 static_key_disable(&tp->key); in tracepoint_remove_func()
802 if (!static_key_enabled(&tp->key)) { in rvh_func_add()
811 if (!static_key_enabled(&tp->key)) in rvh_func_add()
826 if (tp->regfunc && !static_key_enabled(&tp->key)) { in android_rvh_add_func()
836 static_key_enable(&tp->key); in android_rvh_add_func()
855 if (WARN_ON(static_key_enabled(&tp->key) && data)) in android_rvh_probe_register()
/kernel/sched/
Dwait_bit.c23 struct wait_bit_key *key = arg; in wake_bit_function() local
26 if (wait_bit->key.flags != key->flags || in wake_bit_function()
27 wait_bit->key.bit_nr != key->bit_nr || in wake_bit_function()
28 test_bit(key->bit_nr, key->flags)) in wake_bit_function()
31 return autoremove_wake_function(wq_entry, mode, sync, key); in wake_bit_function()
48 if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) in __wait_on_bit()
49 ret = (*action)(&wbq_entry->key, mode); in __wait_on_bit()
50 } while (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret); in __wait_on_bit()
75 wq_entry.key.timeout = jiffies + timeout; in out_of_line_wait_on_bit_timeout()
89 if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) { in __wait_on_bit_lock()
[all …]
Dwait.c10 …_init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key) in __init_waitqueue_head() argument
13 lockdep_set_class_and_name(&wq_head->lock, key, name); in __init_waitqueue_head()
83 int nr_exclusive, int wake_flags, void *key, in __wake_up_common() argument
109 ret = curr->func(curr, mode, wake_flags, key); in __wake_up_common()
127 int nr_exclusive, int wake_flags, void *key) in __wake_up_common_lock() argument
140 wake_flags, key, &bookmark); in __wake_up_common_lock()
156 int nr_exclusive, void *key) in __wake_up() argument
158 __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key); in __wake_up()
171 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key) in __wake_up_locked_key() argument
173 __wake_up_common(wq_head, mode, 1, 0, key, NULL); in __wake_up_locked_key()
[all …]
/kernel/bpf/
Dhashtab.c127 char key[] __aligned(8); member
226 *(void __percpu **)(l->key + key_size) = pptr; in htab_elem_set_ptr()
231 return *(void __percpu **)(l->key + key_size); in htab_elem_get_ptr()
236 return *(void **)(l->key + roundup(map->key_size, 8)); in fd_htab_map_get_ptr()
263 bpf_timer_cancel_and_free(elem->key + in htab_free_prealloced_timers()
300 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, in prealloc_lru_pop() argument
308 memcpy(l->key, key, htab->map.key_size); in prealloc_lru_pop()
575 static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd) in htab_map_hash() argument
577 return jhash(key, key_len, hashrnd); in htab_map_hash()
592 void *key, u32 key_size) in lookup_elem_raw() argument
[all …]
Dlpm_trie.c166 const struct bpf_lpm_trie_key *key) in longest_prefix_match() argument
168 u32 limit = min(node->prefixlen, key->prefixlen); in longest_prefix_match()
181 *(__be64 *)key->data); in longest_prefix_match()
194 *(__be32 *)&key->data[i]); in longest_prefix_match()
206 *(__be16 *)&key->data[i]); in longest_prefix_match()
217 prefixlen += 8 - fls(node->data[i] ^ key->data[i]); in longest_prefix_match()
231 struct bpf_lpm_trie_key *key = _key; in trie_lookup_elem() local
233 if (key->prefixlen > trie->max_prefixlen) in trie_lookup_elem()
247 matchlen = longest_prefix_match(trie, node, key); in trie_lookup_elem()
270 next_bit = extract_bit(key->data, node->prefixlen); in trie_lookup_elem()
[all …]
Dlocal_storage.c67 void *key, bool locked) in cgroup_storage_lookup() argument
81 switch (bpf_cgroup_storage_key_cmp(map, key, &storage->key)) { in cgroup_storage_lookup()
113 switch (bpf_cgroup_storage_key_cmp(map, &storage->key, &this->key)) { in cgroup_storage_insert()
131 static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *key) in cgroup_storage_lookup_elem() argument
136 storage = cgroup_storage_lookup(map, key, false); in cgroup_storage_lookup_elem()
143 static int cgroup_storage_update_elem(struct bpf_map *map, void *key, in cgroup_storage_update_elem() argument
157 key, false); in cgroup_storage_update_elem()
182 int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key, in bpf_percpu_cgroup_storage_copy() argument
191 storage = cgroup_storage_lookup(map, key, false); in bpf_percpu_cgroup_storage_copy()
211 int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key, in bpf_percpu_cgroup_storage_update() argument
[all …]
Darraymap.c159 static void *array_map_lookup_elem(struct bpf_map *map, void *key) in array_map_lookup_elem() argument
162 u32 index = *(u32 *)key; in array_map_lookup_elem()
234 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) in percpu_array_map_lookup_elem() argument
237 u32 index = *(u32 *)key; in percpu_array_map_lookup_elem()
245 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) in bpf_percpu_array_copy() argument
248 u32 index = *(u32 *)key; in bpf_percpu_array_copy()
272 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) in array_map_get_next_key() argument
275 u32 index = key ? *(u32 *)key : U32_MAX; in array_map_get_next_key()
297 static int array_map_update_elem(struct bpf_map *map, void *key, void *value, in array_map_update_elem() argument
301 u32 index = *(u32 *)key; in array_map_update_elem()
[all …]
Ddevmap.c245 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) in dev_map_get_next_key() argument
248 u32 index = key ? *(u32 *)key : U32_MAX; in dev_map_get_next_key()
266 static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key) in __dev_map_hash_lookup_elem() argument
269 struct hlist_head *head = dev_map_index_hash(dtab, key); in __dev_map_hash_lookup_elem()
274 if (dev->idx == key) in __dev_map_hash_lookup_elem()
280 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key, in dev_map_hash_get_next_key() argument
289 if (!key) in dev_map_hash_get_next_key()
292 idx = *(u32 *)key; in dev_map_hash_get_next_key()
428 static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key) in __dev_map_lookup_elem() argument
433 if (key >= map->max_entries) in __dev_map_lookup_elem()
[all …]
Dreuseport_array.c52 static void *reuseport_array_lookup_elem(struct bpf_map *map, void *key) in reuseport_array_lookup_elem() argument
55 u32 index = *(u32 *)key; in reuseport_array_lookup_elem()
64 static int reuseport_array_delete_elem(struct bpf_map *map, void *key) in reuseport_array_delete_elem() argument
67 u32 index = *(u32 *)key; in reuseport_array_delete_elem()
174 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, in bpf_fd_reuseport_array_lookup_elem() argument
184 sk = reuseport_array_lookup_elem(map, key); in bpf_fd_reuseport_array_lookup_elem()
241 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, in bpf_fd_reuseport_array_update_elem() argument
247 u32 index = *(u32 *)key; in bpf_fd_reuseport_array_update_elem()
325 static int reuseport_array_get_next_key(struct bpf_map *map, void *key, in reuseport_array_get_next_key() argument
329 u32 index = key ? *(u32 *)key : U32_MAX; in reuseport_array_get_next_key()
Dsyscall.c176 static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key, in bpf_map_update_value() argument
183 return bpf_map_offload_update_elem(map, key, value, flags); in bpf_map_update_value()
186 return map->ops->map_update_elem(map, key, value, flags); in bpf_map_update_value()
189 return sock_map_update_elem_sys(map, key, value, flags); in bpf_map_update_value()
191 return bpf_fd_array_map_update_elem(map, f.file, key, value, in bpf_map_update_value()
198 err = bpf_percpu_hash_update(map, key, value, flags); in bpf_map_update_value()
200 err = bpf_percpu_array_update(map, key, value, flags); in bpf_map_update_value()
202 err = bpf_percpu_cgroup_storage_update(map, key, value, in bpf_map_update_value()
206 err = bpf_fd_array_map_update_elem(map, f.file, key, value, in bpf_map_update_value()
211 err = bpf_fd_htab_map_update_elem(map, f.file, key, value, in bpf_map_update_value()
[all …]
Dbpf_inode_storage.c103 static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key) in bpf_fd_inode_storage_lookup_elem() argument
109 fd = *(int *)key; in bpf_fd_inode_storage_lookup_elem()
119 static int bpf_fd_inode_storage_update_elem(struct bpf_map *map, void *key, in bpf_fd_inode_storage_update_elem() argument
126 fd = *(int *)key; in bpf_fd_inode_storage_update_elem()
155 static int bpf_fd_inode_storage_delete_elem(struct bpf_map *map, void *key) in bpf_fd_inode_storage_delete_elem() argument
160 fd = *(int *)key; in bpf_fd_inode_storage_delete_elem()
216 static int notsupp_get_next_key(struct bpf_map *map, void *key, in notsupp_get_next_key() argument
Dbpf_task_storage.c116 static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key) in bpf_pid_task_storage_lookup_elem() argument
124 fd = *(int *)key; in bpf_pid_task_storage_lookup_elem()
149 static int bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key, in bpf_pid_task_storage_update_elem() argument
158 fd = *(int *)key; in bpf_pid_task_storage_update_elem()
197 static int bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key) in bpf_pid_task_storage_delete_elem() argument
204 fd = *(int *)key; in bpf_pid_task_storage_delete_elem()
278 static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key) in notsupp_get_next_key() argument
Dinode.c175 void *key; member
192 kfree(iter->key); in map_iter_free()
205 iter->key = kzalloc(map->key_size, GFP_KERNEL | __GFP_NOWARN); in map_iter_alloc()
206 if (!iter->key) in map_iter_alloc()
219 void *key = map_iter(m)->key; in map_seq_next() local
229 prev_key = key; in map_seq_next()
232 if (map->ops->map_get_next_key(map, prev_key, key)) { in map_seq_next()
234 key = NULL; in map_seq_next()
237 return key; in map_seq_next()
245 return *pos ? map_iter(m)->key : SEQ_START_TOKEN; in map_seq_start()
[all …]
Dbpf_struct_ops.c232 static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key, in bpf_struct_ops_map_get_next_key() argument
235 if (key && *(u32 *)key == 0) in bpf_struct_ops_map_get_next_key()
242 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, in bpf_struct_ops_map_sys_lookup_elem() argument
249 if (unlikely(*(u32 *)key != 0)) in bpf_struct_ops_map_sys_lookup_elem()
271 static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key) in bpf_struct_ops_map_lookup_elem() argument
315 static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, in bpf_struct_ops_map_update_elem() argument
332 if (*(u32 *)key != 0) in bpf_struct_ops_map_update_elem()
486 static int bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key) in bpf_struct_ops_map_delete_elem() argument
512 static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key, in bpf_struct_ops_map_seq_show_elem() argument
522 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); in bpf_struct_ops_map_seq_show_elem()
Dcpumap.c556 static int cpu_map_delete_elem(struct bpf_map *map, void *key) in cpu_map_delete_elem() argument
559 u32 key_cpu = *(u32 *)key; in cpu_map_delete_elem()
569 static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value, in cpu_map_update_elem() argument
576 u32 key_cpu = *(u32 *)key; in cpu_map_update_elem()
645 static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) in __cpu_map_lookup_elem() argument
650 if (key >= map->max_entries) in __cpu_map_lookup_elem()
653 rcpu = rcu_dereference_check(cmap->cpu_map[key], in __cpu_map_lookup_elem()
658 static void *cpu_map_lookup_elem(struct bpf_map *map, void *key) in cpu_map_lookup_elem() argument
661 __cpu_map_lookup_elem(map, *(u32 *)key); in cpu_map_lookup_elem()
666 static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key) in cpu_map_get_next_key() argument
[all …]
Dtrampoline.c61 static struct bpf_trampoline *bpf_trampoline_lookup(u64 key) in bpf_trampoline_lookup() argument
68 head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)]; in bpf_trampoline_lookup()
70 if (tr->key == key) { in bpf_trampoline_lookup()
79 tr->key = key; in bpf_trampoline_lookup()
293 static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx) in bpf_tramp_image_alloc() argument
319 snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx); in bpf_tramp_image_alloc()
353 im = bpf_tramp_image_alloc(tr->key, tr->selector); in bpf_trampoline_update()
487 struct bpf_trampoline *bpf_trampoline_get(u64 key, in bpf_trampoline_get() argument
492 tr = bpf_trampoline_lookup(key); in bpf_trampoline_get()
/kernel/trace/
Dtracing_map.c399 kfree(elt->key); in tracing_map_elt_free()
414 elt->key = kzalloc(map->key_size, GFP_KERNEL); in tracing_map_elt_alloc()
415 if (!elt->key) { in tracing_map_elt_alloc()
505 static inline bool keys_match(void *key, void *test_key, unsigned key_size) in keys_match() argument
509 if (memcmp(key, test_key, key_size)) in keys_match()
516 __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only) in __tracing_map_insert() argument
523 key_hash = jhash(key, map->key_size, 0); in __tracing_map_insert()
531 test_key = entry->key; in __tracing_map_insert()
536 keys_match(key, val->key, map->key_size)) { in __tracing_map_insert()
566 if (!cmpxchg(&entry->key, 0, key_hash)) { in __tracing_map_insert()
[all …]
Dtracing_map.h143 void *key; member
148 u32 key; member
158 void *key; member
257 tracing_map_insert(struct tracing_map *map, void *key);
259 tracing_map_lookup(struct tracing_map *map, void *key);
/kernel/debug/kdb/
Dkdb_io.c134 int key; in kdb_getchar() local
143 key = (*f)(); in kdb_getchar()
144 if (key == -1) { in kdb_getchar()
164 *pbuf++ = key; in kdb_getchar()
165 key = kdb_handle_escape(buf, pbuf - buf); in kdb_getchar()
166 if (key < 0) /* no escape sequence; return best character */ in kdb_getchar()
168 if (key > 0) in kdb_getchar()
169 return key; in kdb_getchar()
211 int key, buf_size, ret; in kdb_read() local
228 key = kdb_getchar(); in kdb_read()
[all …]
/kernel/futex/
Dcore.c188 union futex_key key; member
220 union futex_key key; member
284 .key = FUTEX_KEY_INIT,
418 static struct futex_hash_bucket *hash_futex(union futex_key *key) in hash_futex() argument
420 u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4, in hash_futex()
421 key->both.offset); in hash_futex()
543 static int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key, in get_futex_key() argument
555 key->both.offset = address % PAGE_SIZE; in get_futex_key()
558 address -= key->both.offset; in get_futex_key()
581 key->private.mm = mm; in get_futex_key()
[all …]
/kernel/locking/
Dlockdep.c376 #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS) argument
377 #define classhashentry(key) (classhash_table + __classhashfn((key))) argument
413 static inline u64 iterate_chain_key(u64 key, u32 idx) in iterate_chain_key() argument
415 u32 k0 = key, k1 = key >> 32; in iterate_chain_key()
632 const char *__get_key_name(const struct lockdep_subclass_key *key, char *str) in __get_key_name() argument
634 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); in __get_key_name()
688 name = __get_key_name(class->key, str); in __print_lock_name()
719 name = __get_key_name(lock->key->subkeys, str); in print_lockdep_cache()
834 if (new_class->key - new_class->subclass == class->key) in count_matching_names()
847 struct lockdep_subclass_key *key; in look_up_lock_class() local
[all …]

123