/kernel/ |
D | kthread.c | 73 static inline struct kthread *to_kthread(struct task_struct *k) in to_kthread() argument 75 WARN_ON(!(k->flags & PF_KTHREAD)); in to_kthread() 76 return k->worker_private; in to_kthread() 129 void free_kthread_struct(struct task_struct *k) in free_kthread_struct() argument 136 kthread = to_kthread(k); in free_kthread_struct() 143 k->worker_private = NULL; in free_kthread_struct() 161 bool __kthread_should_park(struct task_struct *k) in __kthread_should_park() argument 163 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags); in __kthread_should_park() 598 void kthread_set_per_cpu(struct task_struct *k, int cpu) in kthread_set_per_cpu() argument 600 struct kthread *kthread = to_kthread(k); in kthread_set_per_cpu() [all …]
|
D | range.c | 128 int i, j, k = az - 1, nr_range = az; in clean_sort_range() local 130 for (i = 0; i < k; i++) { in clean_sort_range() 133 for (j = k; j > i; j--) { in clean_sort_range() 135 k = j; in clean_sort_range() 141 range[i].start = range[k].start; in clean_sort_range() 142 range[i].end = range[k].end; in clean_sort_range() 143 range[k].start = 0; in clean_sort_range() 144 range[k].end = 0; in clean_sort_range() 145 k--; in clean_sort_range()
|
D | audit.h | 309 #define audit_to_watch(k, p, l, o) (-EINVAL) argument 310 #define audit_add_watch(k, l) (-EINVAL) argument 311 #define audit_remove_watch_rule(k) BUG() argument 315 #define audit_alloc_mark(k, p, l) (ERR_PTR(-EINVAL)) argument 318 #define audit_remove_mark_rule(k) do { } while (0) argument
|
D | seccomp.c | 281 u32 k = ftest->k; in seccomp_check_filter() local 287 if (k >= sizeof(struct seccomp_data) || k & 3) in seccomp_check_filter() 292 ftest->k = sizeof(struct seccomp_data); in seccomp_check_filter() 296 ftest->k = sizeof(struct seccomp_data); in seccomp_check_filter() 733 u32 k = insn->k; in seccomp_is_const_allow() local 737 switch (k) { in seccomp_is_const_allow() 751 return k == SECCOMP_RET_ALLOW; in seccomp_is_const_allow() 753 pc += insn->k; in seccomp_is_const_allow() 761 op_res = reg_value == k; in seccomp_is_const_allow() 764 op_res = reg_value >= k; in seccomp_is_const_allow() [all …]
|
D | kexec_file.c | 1050 int i, k; in kexec_purgatory_find_symbol() local 1069 for (k = 0; k < sechdrs[i].sh_size/sizeof(Elf_Sym); k++) { in kexec_purgatory_find_symbol() 1070 if (ELF_ST_BIND(syms[k].st_info) != STB_GLOBAL) in kexec_purgatory_find_symbol() 1073 if (strcmp(strtab + syms[k].st_name, name) != 0) in kexec_purgatory_find_symbol() 1076 if (syms[k].st_shndx == SHN_UNDEF || in kexec_purgatory_find_symbol() 1077 syms[k].st_shndx >= ehdr->e_shnum) { in kexec_purgatory_find_symbol() 1079 name, syms[k].st_shndx); in kexec_purgatory_find_symbol() 1084 return &syms[k]; in kexec_purgatory_find_symbol()
|
D | compat.c | 113 unsigned long *k; in compat_get_user_cpu_mask() local 120 k = cpumask_bits(new_mask); in compat_get_user_cpu_mask() 121 return compat_get_bitmap(k, user_mask_ptr, len * 8); in compat_get_user_cpu_mask()
|
D | smp.c | 254 unsigned int i, j, k; in cfd_seq_data_add() local 270 for (k = *n_data; k > j; k--) in cfd_seq_data_add() 271 data[k].val = data[k - 1].val; in cfd_seq_data_add()
|
D | user_namespace.c | 245 static int cmp_map_id(const void *k, const void *e) in cmp_map_id() argument 248 const struct idmap_key *key = k; in cmp_map_id()
|
D | signal.c | 4112 struct k_sigaction *k; in do_sigaction() local 4118 k = &p->sighand->action[sig-1]; in do_sigaction() 4121 if (k->sa.sa_flags & SA_IMMUTABLE) { in do_sigaction() 4126 *oact = *k; in do_sigaction() 4149 *k = *act; in do_sigaction()
|
/kernel/power/ |
D | swap.c | 102 unsigned int k; member 441 handle->k = 0; in get_swap_writer() 465 handle->cur->entries[handle->k++] = offset; in swap_write_page() 466 if (handle->k >= MAP_PAGE_ENTRIES) { in swap_write_page() 479 handle->k = 0; in swap_write_page() 1020 handle->k = 0; in get_swap_reader() 1034 offset = handle->cur->entries[handle->k]; in swap_read_page() 1040 if (++handle->k >= MAP_PAGE_ENTRIES) { in swap_read_page() 1041 handle->k = 0; in swap_read_page() 1316 handle->cur->entries[handle->k]) { in load_image_lzo()
|
D | hibernate.c | 264 unsigned int k; in swsusp_show_speed() local 272 k = nr_pages * (PAGE_SIZE / 1024); in swsusp_show_speed() 273 kps = (k * 100) / centisecs; in swsusp_show_speed() 275 msg, k, centisecs / 100, centisecs % 100, kps / 1000, in swsusp_show_speed()
|
/kernel/bpf/ |
D | devmap.c | 806 int k = *(u32 *)key; in dev_map_delete_elem() local 808 if (k >= map->max_entries) in dev_map_delete_elem() 811 old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL)); in dev_map_delete_elem() 821 int k = *(u32 *)key; in dev_map_hash_delete_elem() local 827 old_dev = __dev_map_hash_lookup_elem(map, k); in dev_map_hash_delete_elem()
|
D | core.c | 68 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size) in bpf_internal_load_pointer_neg_helper() argument 72 if (k >= SKF_NET_OFF) { in bpf_internal_load_pointer_neg_helper() 73 ptr = skb_network_header(skb) + k - SKF_NET_OFF; in bpf_internal_load_pointer_neg_helper() 74 } else if (k >= SKF_LL_OFF) { in bpf_internal_load_pointer_neg_helper() 77 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; in bpf_internal_load_pointer_neg_helper()
|
/kernel/sched/ |
D | topology.c | 1883 int k; in sched_init_numa() local 1890 for_each_cpu_node_but(k, offline_node) { in sched_init_numa() 1891 if (sched_debug() && (node_distance(j, k) != node_distance(k, j))) in sched_init_numa() 1894 if (node_distance(j, k) > sched_domains_numa_distance[i]) in sched_init_numa() 1897 cpumask_or(mask, mask, cpumask_of_node(k)); in sched_init_numa()
|
/kernel/locking/ |
D | lockdep.c | 1213 struct lock_class_key *k; in lockdep_register_key() local 1223 hlist_for_each_entry_rcu(k, hash_head, hash_entry) { in lockdep_register_key() 1224 if (WARN_ON_ONCE(k == key)) in lockdep_register_key() 1239 struct lock_class_key *k; in is_dynamic_key() local 1256 hlist_for_each_entry_rcu(k, hash_head, hash_entry) { in is_dynamic_key() 1257 if (k == key) { in is_dynamic_key() 6325 struct lock_class_key *k; in lockdep_unregister_key() local 6338 hlist_for_each_entry_rcu(k, hash_head, hash_entry) { in lockdep_unregister_key() 6339 if (k == key) { in lockdep_unregister_key() 6340 hlist_del_rcu(&k->hash_entry); in lockdep_unregister_key()
|
/kernel/cgroup/ |
D | cpuset.c | 934 int i, j, k; /* indices for partition finding loops */ in generate_sched_domains() local 1021 for (k = 0; k < csn; k++) { in generate_sched_domains() 1022 struct cpuset *c = csa[k]; in generate_sched_domains()
|
/kernel/printk/ |
D | printk.c | 1234 unsigned long long k; in boot_delay_msec() local 1242 k = (unsigned long long)loops_per_msec * boot_delay; in boot_delay_msec() 1245 while (k) { in boot_delay_msec() 1246 k--; in boot_delay_msec()
|
/kernel/trace/ |
D | trace_events_hist.c | 4633 unsigned int i, j, k; in create_sort_keys() local 4680 for (j = 1, k = 1; j < hist_data->n_fields; j++) { in create_sort_keys() 4687 idx = k++; in create_sort_keys()
|