/kernel/ |
D | kthread.c | 74 static inline struct kthread *to_kthread(struct task_struct *k) in to_kthread() argument 76 WARN_ON(!(k->flags & PF_KTHREAD)); in to_kthread() 77 return (__force void *)k->set_child_tid; in to_kthread() 99 void free_kthread_struct(struct task_struct *k) in free_kthread_struct() argument 107 kthread = to_kthread(k); in free_kthread_struct() 127 bool __kthread_should_park(struct task_struct *k) in __kthread_should_park() argument 129 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags); in __kthread_should_park() 497 void kthread_set_per_cpu(struct task_struct *k, int cpu) in kthread_set_per_cpu() argument 499 struct kthread *kthread = to_kthread(k); in kthread_set_per_cpu() 503 WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY)); in kthread_set_per_cpu() [all …]
|
D | range.c | 127 int i, j, k = az - 1, nr_range = az; in clean_sort_range() local 129 for (i = 0; i < k; i++) { in clean_sort_range() 132 for (j = k; j > i; j--) { in clean_sort_range() 134 k = j; in clean_sort_range() 140 range[i].start = range[k].start; in clean_sort_range() 141 range[i].end = range[k].end; in clean_sort_range() 142 range[k].start = 0; in clean_sort_range() 143 range[k].end = 0; in clean_sort_range() 144 k--; in clean_sort_range()
|
D | audit.h | 301 #define audit_to_watch(k, p, l, o) (-EINVAL) argument 302 #define audit_add_watch(k, l) (-EINVAL) argument 303 #define audit_remove_watch_rule(k) BUG() argument 307 #define audit_alloc_mark(k, p, l) (ERR_PTR(-EINVAL)) argument 310 #define audit_remove_mark_rule(k) argument
|
D | kexec_file.c | 1053 int i, k; in kexec_purgatory_find_symbol() local 1072 for (k = 0; k < sechdrs[i].sh_size/sizeof(Elf_Sym); k++) { in kexec_purgatory_find_symbol() 1073 if (ELF_ST_BIND(syms[k].st_info) != STB_GLOBAL) in kexec_purgatory_find_symbol() 1076 if (strcmp(strtab + syms[k].st_name, name) != 0) in kexec_purgatory_find_symbol() 1079 if (syms[k].st_shndx == SHN_UNDEF || in kexec_purgatory_find_symbol() 1080 syms[k].st_shndx >= ehdr->e_shnum) { in kexec_purgatory_find_symbol() 1082 name, syms[k].st_shndx); in kexec_purgatory_find_symbol() 1087 return &syms[k]; in kexec_purgatory_find_symbol()
|
D | compat.c | 201 unsigned long *k; in compat_get_user_cpu_mask() local 208 k = cpumask_bits(new_mask); in compat_get_user_cpu_mask() 209 return compat_get_bitmap(k, user_mask_ptr, len * 8); in compat_get_user_cpu_mask()
|
D | seccomp.c | 191 u32 k = ftest->k; in seccomp_check_filter() local 197 if (k >= sizeof(struct seccomp_data) || k & 3) in seccomp_check_filter() 202 ftest->k = sizeof(struct seccomp_data); in seccomp_check_filter() 206 ftest->k = sizeof(struct seccomp_data); in seccomp_check_filter()
|
D | user_namespace.c | 223 static int cmp_map_id(const void *k, const void *e) in cmp_map_id() argument 226 const struct idmap_key *key = k; in cmp_map_id()
|
D | signal.c | 3953 struct k_sigaction *k; in do_sigaction() local 3959 k = &p->sighand->action[sig-1]; in do_sigaction() 3963 *oact = *k; in do_sigaction() 3970 *k = *act; in do_sigaction()
|
/kernel/power/ |
D | swap.c | 100 unsigned int k; member 429 handle->k = 0; in get_swap_writer() 452 handle->cur->entries[handle->k++] = offset; in swap_write_page() 453 if (handle->k >= MAP_PAGE_ENTRIES) { in swap_write_page() 463 handle->k = 0; in swap_write_page() 1002 handle->k = 0; in get_swap_reader() 1016 offset = handle->cur->entries[handle->k]; in swap_read_page() 1022 if (++handle->k >= MAP_PAGE_ENTRIES) { in swap_read_page() 1023 handle->k = 0; in swap_read_page() 1300 handle->cur->entries[handle->k]) { in load_image_lzo()
|
D | hibernate.c | 245 unsigned int k; in swsusp_show_speed() local 253 k = nr_pages * (PAGE_SIZE / 1024); in swsusp_show_speed() 254 kps = (k * 100) / centisecs; in swsusp_show_speed() 256 msg, k, centisecs / 100, centisecs % 100, kps / 1000, in swsusp_show_speed()
|
/kernel/bpf/ |
D | xskmap.c | 284 int k = *(u32 *)key; in xsk_map_delete_elem() local 286 if (k >= map->max_entries) in xsk_map_delete_elem() 290 map_entry = &m->xsk_map[k]; in xsk_map_delete_elem()
|
D | devmap.c | 545 int k = *(u32 *)key; in dev_map_delete_elem() local 547 if (k >= map->max_entries) in dev_map_delete_elem() 558 old_dev = xchg(&dtab->netdev_map[k], NULL); in dev_map_delete_elem() 568 int k = *(u32 *)key; in dev_map_hash_delete_elem() local 574 old_dev = __dev_map_hash_lookup_elem(map, k); in dev_map_hash_delete_elem()
|
D | core.c | 64 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size) in bpf_internal_load_pointer_neg_helper() argument 68 if (k >= SKF_NET_OFF) { in bpf_internal_load_pointer_neg_helper() 69 ptr = skb_network_header(skb) + k - SKF_NET_OFF; in bpf_internal_load_pointer_neg_helper() 70 } else if (k >= SKF_LL_OFF) { in bpf_internal_load_pointer_neg_helper() 73 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; in bpf_internal_load_pointer_neg_helper()
|
/kernel/locking/ |
D | rtmutex.h | 21 #define debug_rt_mutex_init(m, n, k) do { } while (0) argument
|
D | lockdep.c | 1117 struct lock_class_key *k; in lockdep_register_key() local 1127 hlist_for_each_entry_rcu(k, hash_head, hash_entry) { in lockdep_register_key() 1128 if (WARN_ON_ONCE(k == key)) in lockdep_register_key() 1143 struct lock_class_key *k; in is_dynamic_key() local 1160 hlist_for_each_entry_rcu(k, hash_head, hash_entry) { in is_dynamic_key() 1161 if (k == key) { in is_dynamic_key() 5296 struct lock_class_key *k; in lockdep_unregister_key() local 5311 hlist_for_each_entry_rcu(k, hash_head, hash_entry) { in lockdep_unregister_key() 5312 if (k == key) { in lockdep_unregister_key() 5313 hlist_del_rcu(&k->hash_entry); in lockdep_unregister_key()
|
/kernel/sched/ |
D | topology.c | 1633 int k; in sched_init_numa() local 1640 for_each_node(k) { in sched_init_numa() 1641 if (sched_debug() && (node_distance(j, k) != node_distance(k, j))) in sched_init_numa() 1644 if (node_distance(j, k) > sched_domains_numa_distance[i]) in sched_init_numa() 1647 cpumask_or(mask, mask, cpumask_of_node(k)); in sched_init_numa()
|
/kernel/cgroup/ |
D | cpuset.c | 729 int i, j, k; /* indices for partition finding loops */ in generate_sched_domains() local 816 for (k = 0; k < csn; k++) { in generate_sched_domains() 817 struct cpuset *c = csa[k]; in generate_sched_domains()
|
/kernel/printk/ |
D | printk.c | 1261 unsigned long long k; in boot_delay_msec() local 1269 k = (unsigned long long)loops_per_msec * boot_delay; in boot_delay_msec() 1272 while (k) { in boot_delay_msec() 1273 k--; in boot_delay_msec()
|
/kernel/trace/ |
D | trace_events_hist.c | 4910 unsigned int i, j, k; in create_sort_keys() local 4954 for (j = 1, k = 1; j < hist_data->n_fields; j++) { in create_sort_keys() 4961 idx = k++; in create_sort_keys()
|