/kernel/ |
D | crash_core.c | 55 char *cur = cmdline, *tmp; in parse_crashkernel_mem() local 71 start = memparse(cur, &tmp); in parse_crashkernel_mem() 72 if (cur == tmp) { in parse_crashkernel_mem() 76 cur = tmp; in parse_crashkernel_mem() 77 if (*cur != '-') { in parse_crashkernel_mem() 81 cur++; in parse_crashkernel_mem() 84 if (*cur != ':') { in parse_crashkernel_mem() 85 end = memparse(cur, &tmp); in parse_crashkernel_mem() 86 if (cur == tmp) { in parse_crashkernel_mem() 90 cur = tmp; in parse_crashkernel_mem() [all …]
|
D | smpboot.c | 215 struct smp_hotplug_thread *cur; in smpboot_create_threads() local 219 list_for_each_entry(cur, &hotplug_threads, list) { in smpboot_create_threads() 220 ret = __smpboot_create_thread(cur, cpu); in smpboot_create_threads() 238 struct smp_hotplug_thread *cur; in smpboot_unpark_threads() local 241 list_for_each_entry(cur, &hotplug_threads, list) in smpboot_unpark_threads() 242 smpboot_unpark_thread(cur, cpu); in smpboot_unpark_threads() 257 struct smp_hotplug_thread *cur; in smpboot_park_threads() local 260 list_for_each_entry_reverse(cur, &hotplug_threads, list) in smpboot_park_threads() 261 smpboot_park_thread(cur, cpu); in smpboot_park_threads()
|
D | seccomp.c | 1453 struct seccomp_knotif *cur; in find_notification() local 1457 list_for_each_entry(cur, &filter->notif->notifications, list) { in find_notification() 1458 if (cur->id == id) in find_notification() 1459 return cur; in find_notification() 1500 struct seccomp_knotif *knotif = NULL, *cur; in seccomp_notify_recv() local 1518 list_for_each_entry(cur, &filter->notif->notifications, list) { in seccomp_notify_recv() 1519 if (cur->state == SECCOMP_NOTIFY_INIT) { in seccomp_notify_recv() 1520 knotif = cur; in seccomp_notify_recv() 1805 struct seccomp_knotif *cur; in seccomp_notify_poll() local 1812 list_for_each_entry(cur, &filter->notif->notifications, list) { in seccomp_notify_poll() [all …]
|
D | resource.c | 1432 struct resource *cur; in merge_system_ram_resource() local 1441 cur = res->sibling; in merge_system_ram_resource() 1442 if (cur && system_ram_resources_mergeable(res, cur)) { in merge_system_ram_resource() 1443 res->end = cur->end; in merge_system_ram_resource() 1444 res->sibling = cur->sibling; in merge_system_ram_resource() 1445 free_resource(cur); in merge_system_ram_resource() 1449 cur = res->parent->child; in merge_system_ram_resource() 1450 while (cur && cur->sibling != res) in merge_system_ram_resource() 1451 cur = cur->sibling; in merge_system_ram_resource() 1452 if (cur && system_ram_resources_mergeable(cur, res)) { in merge_system_ram_resource() [all …]
|
D | padata.c | 132 struct padata_work *cur, *next; in padata_works_free() local 138 list_for_each_entry_safe(cur, next, works, pw_list) { in padata_works_free() 139 list_del(&cur->pw_list); in padata_works_free() 140 padata_work_free(cur); in padata_works_free() 400 struct padata_priv *cur; in padata_do_serial() local 406 cur = list_entry(pos, struct padata_priv, list); in padata_do_serial() 407 if (cur->seq_nr < padata->seq_nr) in padata_do_serial()
|
D | kprobes.c | 1965 struct llist_node **cur) in __kretprobe_find_ret_addr() argument 1968 struct llist_node *node = *cur; in __kretprobe_find_ret_addr() 1978 *cur = node; in __kretprobe_find_ret_addr() 2003 struct llist_node **cur) in kretprobe_find_ret_addr() argument 2008 if (WARN_ON_ONCE(!cur)) in kretprobe_find_ret_addr() 2012 ret = __kretprobe_find_ret_addr(tsk, cur); in kretprobe_find_ret_addr() 2015 ri = container_of(*cur, struct kretprobe_instance, llist); in kretprobe_find_ret_addr()
|
D | cpu.c | 2900 ssize_t cur, res = 0; in states_show() local 2908 cur = sprintf(buf, "%3d: %s\n", i, sp->name); in states_show() 2909 buf += cur; in states_show() 2910 res += cur; in states_show()
|
D | workqueue.c | 6769 int cur, pre, cpu, pod; in init_pod_type() local 6777 for_each_possible_cpu(cur) { in init_pod_type() 6779 if (pre >= cur) { in init_pod_type() 6780 pt->cpu_pod[cur] = pt->nr_pods++; in init_pod_type() 6783 if (cpus_share_pod(cur, pre)) { in init_pod_type() 6784 pt->cpu_pod[cur] = pt->cpu_pod[pre]; in init_pod_type()
|
/kernel/locking/ |
D | ww_mutex.h | 357 struct MUTEX_WAITER *cur; in __ww_mutex_check_waiters() local 361 for (cur = __ww_waiter_first(lock); cur; in __ww_mutex_check_waiters() 362 cur = __ww_waiter_next(lock, cur)) { in __ww_mutex_check_waiters() 364 if (!cur->ww_ctx) in __ww_mutex_check_waiters() 367 if (__ww_mutex_die(lock, cur, ww_ctx) || in __ww_mutex_check_waiters() 368 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx)) in __ww_mutex_check_waiters() 446 struct MUTEX_WAITER *cur; in __ww_mutex_check_kill() local 465 for (cur = __ww_waiter_prev(lock, waiter); cur; in __ww_mutex_check_kill() 466 cur = __ww_waiter_prev(lock, cur)) { in __ww_mutex_check_kill() 468 if (!cur->ww_ctx) in __ww_mutex_check_kill() [all …]
|
D | locktorture.c | 902 long cur; in __torture_print_stats() local 912 cur = data_race(statp[i].n_lock_acquired); in __torture_print_stats() 913 sum += cur; in __torture_print_stats() 914 if (max < cur) in __torture_print_stats() 915 max = cur; in __torture_print_stats() 916 if (min > cur) in __torture_print_stats() 917 min = cur; in __torture_print_stats()
|
/kernel/power/ |
D | snapshot.c | 416 struct bm_position cur; /* most recently used bit position */ member 588 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree, in memory_bm_position_reset() 590 bm->cur.node = list_entry(bm->cur.zone->leaves.next, in memory_bm_position_reset() 592 bm->cur.node_pfn = 0; in memory_bm_position_reset() 593 bm->cur.cur_pfn = BM_END_OF_MAP; in memory_bm_position_reset() 594 bm->cur.node_bit = 0; in memory_bm_position_reset() 634 struct mem_extent *ext, *cur, *aux; in create_mem_extents() local 665 cur = ext; in create_mem_extents() 666 list_for_each_entry_safe_continue(cur, aux, list, hook) { in create_mem_extents() 667 if (zone_end < cur->start) in create_mem_extents() [all …]
|
D | swap.c | 97 struct swap_map_page *cur; member 413 if (handle->cur) in release_swap_writer() 414 free_page((unsigned long)handle->cur); in release_swap_writer() 415 handle->cur = NULL; in release_swap_writer() 428 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL); in get_swap_writer() 429 if (!handle->cur) { in get_swap_writer() 455 if (!handle->cur) in swap_write_page() 461 handle->cur->entries[handle->k++] = offset; in swap_write_page() 466 handle->cur->next_swap = offset; in swap_write_page() 467 error = write_page(handle->cur, handle->cur_swap, hb); in swap_write_page() [all …]
|
D | power.h | 129 unsigned int cur; /* number of the block of PAGE_SIZE bytes the member
|
/kernel/trace/ |
D | rethook.c | 216 struct llist_node **cur) in __rethook_find_ret_addr() argument 219 struct llist_node *node = *cur; in __rethook_find_ret_addr() 229 *cur = node; in __rethook_find_ret_addr() 255 struct llist_node **cur) in rethook_find_ret_addr() argument 260 if (WARN_ON_ONCE(!cur)) in rethook_find_ret_addr() 267 ret = __rethook_find_ret_addr(tsk, cur); in rethook_find_ret_addr() 270 rhn = container_of(*cur, struct rethook_node, llist); in rethook_find_ret_addr()
|
/kernel/kcsan/ |
D | kcsan_test.c | 164 char *cur; in __report_matches() local 178 cur = expect[0]; in __report_matches() 180 cur += scnprintf(cur, end - cur, "BUG: KCSAN: %s in ", in __report_matches() 190 cur += scnprintf(cur, end - cur, "%ps / %ps", in __report_matches() 194 scnprintf(cur, end - cur, "%pS", r->access[0].fn); in __report_matches() 196 cur = strchr(expect[0], '+'); in __report_matches() 197 if (cur) in __report_matches() 198 *cur = '\0'; in __report_matches() 202 cur = expect[1]; in __report_matches() 205 cur += scnprintf(cur, end - cur, "race at unknown origin, with "); in __report_matches() [all …]
|
D | report.c | 280 char *cur; in get_stack_skipnr() local 291 cur = strnstr(buf, "kcsan_", len); in get_stack_skipnr() 292 if (cur) { in get_stack_skipnr() 293 cur += strlen("kcsan_"); in get_stack_skipnr() 294 if (!str_has_prefix(cur, "test")) in get_stack_skipnr()
|
/kernel/bpf/ |
D | bpf_lru_list.c | 175 struct list_head *cur, *last, *next = inactive; in __bpf_lru_list_rotate_inactive() local 186 cur = l->next_inactive_rotation; in __bpf_lru_list_rotate_inactive() 188 if (cur == inactive) { in __bpf_lru_list_rotate_inactive() 189 cur = cur->prev; in __bpf_lru_list_rotate_inactive() 193 node = list_entry(cur, struct bpf_lru_node, list); in __bpf_lru_list_rotate_inactive() 194 next = cur->prev; in __bpf_lru_list_rotate_inactive() 197 if (cur == last) in __bpf_lru_list_rotate_inactive() 199 cur = next; in __bpf_lru_list_rotate_inactive()
|
D | bpf_iter.c | 781 int cur; /* current value, inclusive */ member 802 s->cur = s->end = 0; in bpf_iter_num_new() 808 s->cur = s->end = 0; in bpf_iter_num_new() 816 s->cur = start - 1; in bpf_iter_num_new() 831 if ((s64)(s->cur + 1) >= s->end) { in bpf_iter_num_next() 832 s->cur = s->end = 0; in bpf_iter_num_next() 836 s->cur++; in bpf_iter_num_next() 838 return &s->cur; in bpf_iter_num_next() 845 s->cur = s->end = 0; in bpf_iter_num_destroy()
|
D | verifier.c | 687 struct bpf_verifier_state *cur = env->cur_state; in func() local 689 return cur->frame[reg->frameno]; in func() 1810 struct bpf_verifier_state *cur = env->cur_state; in explored_state() local 1811 struct bpf_func_state *state = cur->frame[cur->curframe]; in explored_state() 1969 static void update_loop_entry(struct bpf_verifier_state *cur, struct bpf_verifier_state *hdr) in update_loop_entry() argument 1973 cur1 = get_loop_entry(cur) ?: cur; in update_loop_entry() 1982 cur->loop_entry = hdr; in update_loop_entry() 2015 struct bpf_verifier_state *cur = env->cur_state; in pop_stack() local 2022 if (cur) { in pop_stack() 2023 err = copy_verifier_state(cur, &head->st); in pop_stack() [all …]
|
D | btf.c | 4992 void *cur, *end; in btf_check_all_metas() local 4995 cur = btf->nohdr_data + hdr->type_off; in btf_check_all_metas() 4996 end = cur + hdr->type_len; in btf_check_all_metas() 4999 while (cur < end) { in btf_check_all_metas() 5000 struct btf_type *t = cur; in btf_check_all_metas() 5003 meta_size = btf_check_meta(env, t, end - cur); in btf_check_all_metas() 5008 cur += meta_size; in btf_check_all_metas()
|
/kernel/time/ |
D | tick-broadcast.c | 165 struct clock_event_device *cur = tick_broadcast_device.evtdev; in tick_install_broadcast_device() local 170 if (!tick_check_broadcast_device(cur, dev)) in tick_install_broadcast_device() 176 clockevents_exchange_device(cur, dev); in tick_install_broadcast_device() 177 if (cur) in tick_install_broadcast_device() 178 cur->event_handler = clockevents_handle_noop; in tick_install_broadcast_device()
|
/kernel/cgroup/ |
D | cpuset.c | 698 static int validate_change_legacy(struct cpuset *cur, struct cpuset *trial) in validate_change_legacy() argument 708 cpuset_for_each_child(c, css, cur) in validate_change_legacy() 714 par = parent_cs(cur); in validate_change_legacy() 743 static int validate_change(struct cpuset *cur, struct cpuset *trial) in validate_change() argument 752 ret = validate_change_legacy(cur, trial); in validate_change() 757 if (cur == &top_cpuset) in validate_change() 760 par = parent_cs(cur); in validate_change() 767 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { in validate_change() 768 if (!cpumask_empty(cur->cpus_allowed) && in validate_change() 771 if (!nodes_empty(cur->mems_allowed) && in validate_change() [all …]
|
/kernel/sched/ |
D | fair.c | 2188 struct task_struct *cur; in task_numa_compare() local 2199 cur = rcu_dereference(dst_rq->curr); in task_numa_compare() 2200 if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur))) in task_numa_compare() 2201 cur = NULL; in task_numa_compare() 2207 if (cur == env->p) { in task_numa_compare() 2212 if (!cur) { in task_numa_compare() 2220 if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr)) in task_numa_compare() 2229 cur->numa_preferred_nid != env->src_nid) { in task_numa_compare() 2243 cur_ng = rcu_dereference(cur->numa_group); in task_numa_compare() 2254 imp = taskimp + task_weight(cur, env->src_nid, dist) - in task_numa_compare() [all …]
|
D | topology.c | 2602 static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, in dattrs_equal() argument 2608 if (!new && !cur) in dattrs_equal() 2613 return !memcmp(cur ? (cur + idx_cur) : &tmp, in dattrs_equal()
|
D | cpufreq_schedutil.c | 146 policy->cpuinfo.max_freq : policy->cur; in get_next_freq()
|